summaryrefslogtreecommitdiff
path: root/bzrlib
diff options
context:
space:
mode:
authorLorry <lorry@roadtrain.codethink.co.uk>2012-08-22 15:47:16 +0100
committerLorry <lorry@roadtrain.codethink.co.uk>2012-08-22 15:47:16 +0100
commit25335618bf8755ce6b116ee14f47f5a1f2c821e9 (patch)
treed889d7ab3f9f985d0c54c534cb8052bd2e6d7163 /bzrlib
downloadbzr-tarball-25335618bf8755ce6b116ee14f47f5a1f2c821e9.tar.gz
Tarball conversion
Diffstat (limited to 'bzrlib')
-rw-r--r--bzrlib/__init__.py232
-rw-r--r--bzrlib/_annotator_py.py313
-rw-r--r--bzrlib/_annotator_pyx.c1499
-rw-r--r--bzrlib/_annotator_pyx.pyx294
-rw-r--r--bzrlib/_bencode_pyx.c2529
-rw-r--r--bzrlib/_bencode_pyx.h22
-rw-r--r--bzrlib/_bencode_pyx.pyx400
-rw-r--r--bzrlib/_btree_serializer_py.py74
-rw-r--r--bzrlib/_btree_serializer_pyx.c9433
-rw-r--r--bzrlib/_btree_serializer_pyx.pyx982
-rw-r--r--bzrlib/_chk_map_py.py169
-rw-r--r--bzrlib/_chk_map_pyx.c2172
-rw-r--r--bzrlib/_chk_map_pyx.pyx454
-rw-r--r--bzrlib/_chunks_to_lines_py.py59
-rw-r--r--bzrlib/_chunks_to_lines_pyx.c518
-rw-r--r--bzrlib/_chunks_to_lines_pyx.pyx130
-rw-r--r--bzrlib/_dirstate_helpers_py.py319
-rw-r--r--bzrlib/_dirstate_helpers_pyx.c20927
-rw-r--r--bzrlib/_dirstate_helpers_pyx.h17
-rw-r--r--bzrlib/_dirstate_helpers_pyx.pyx2031
-rw-r--r--bzrlib/_export_c_api.h104
-rw-r--r--bzrlib/_groupcompress_py.py468
-rw-r--r--bzrlib/_groupcompress_pyx.c6763
-rw-r--r--bzrlib/_groupcompress_pyx.pyx600
-rw-r--r--bzrlib/_import_c_api.h189
-rw-r--r--bzrlib/_knit_load_data_py.py96
-rw-r--r--bzrlib/_knit_load_data_pyx.c1405
-rw-r--r--bzrlib/_knit_load_data_pyx.pyx304
-rw-r--r--bzrlib/_known_graph_py.py374
-rw-r--r--bzrlib/_known_graph_pyx.c4826
-rw-r--r--bzrlib/_known_graph_pyx.pyx955
-rw-r--r--bzrlib/_patiencediff_c.c1281
-rwxr-xr-xbzrlib/_patiencediff_py.py253
-rw-r--r--bzrlib/_readdir_py.py52
-rw-r--r--bzrlib/_readdir_pyx.c1752
-rw-r--r--bzrlib/_readdir_pyx.pyx379
-rw-r--r--bzrlib/_rio_py.py79
-rw-r--r--bzrlib/_rio_pyx.c1203
-rw-r--r--bzrlib/_rio_pyx.pyx211
-rw-r--r--bzrlib/_simple_set_pyx.c2381
-rw-r--r--bzrlib/_simple_set_pyx.h26
-rw-r--r--bzrlib/_simple_set_pyx.pxd91
-rw-r--r--bzrlib/_simple_set_pyx.pyx606
-rw-r--r--bzrlib/_simple_set_pyx_api.h128
-rw-r--r--bzrlib/_static_tuple_c.c958
-rw-r--r--bzrlib/_static_tuple_c.h117
-rw-r--r--bzrlib/_static_tuple_c.pxd46
-rw-r--r--bzrlib/_static_tuple_py.py82
-rw-r--r--bzrlib/_walkdirs_win32.pyx300
-rw-r--r--bzrlib/add.py139
-rw-r--r--bzrlib/annotate.py445
-rw-r--r--bzrlib/api.py103
-rw-r--r--bzrlib/atomicfile.py114
-rw-r--r--bzrlib/bencode.py27
-rw-r--r--bzrlib/bisect_multi.py66
-rw-r--r--bzrlib/branch.py3407
-rw-r--r--bzrlib/branchbuilder.py303
-rw-r--r--bzrlib/branchfmt/__init__.py25
-rw-r--r--bzrlib/branchfmt/fullhistory.py178
-rw-r--r--bzrlib/breakin.py82
-rw-r--r--bzrlib/btree_index.py1608
-rw-r--r--bzrlib/bugtracker.py328
-rw-r--r--bzrlib/builtins.py6731
-rw-r--r--bzrlib/bundle/__init__.py87
-rw-r--r--bzrlib/bundle/apply_bundle.py80
-rw-r--r--bzrlib/bundle/bundle_data.py794
-rw-r--r--bzrlib/bundle/commands.py105
-rw-r--r--bzrlib/bundle/serializer/__init__.py216
-rw-r--r--bzrlib/bundle/serializer/v08.py554
-rw-r--r--bzrlib/bundle/serializer/v09.py76
-rw-r--r--bzrlib/bundle/serializer/v4.py742
-rw-r--r--bzrlib/bzr_distutils.py118
-rw-r--r--bzrlib/bzrdir.py2315
-rw-r--r--bzrlib/cache_utf8.py119
-rw-r--r--bzrlib/cethread.py156
-rw-r--r--bzrlib/check.py446
-rw-r--r--bzrlib/chk_map.py1764
-rw-r--r--bzrlib/chk_serializer.py254
-rw-r--r--bzrlib/chunk_writer.py278
-rw-r--r--bzrlib/clean_tree.py130
-rw-r--r--bzrlib/cleanup.py199
-rw-r--r--bzrlib/cmd_test_script.py67
-rw-r--r--bzrlib/cmd_version_info.py138
-rw-r--r--bzrlib/cmdline.py166
-rw-r--r--bzrlib/commands.py1303
-rw-r--r--bzrlib/commit.py1019
-rw-r--r--bzrlib/commit_signature_commands.py191
-rw-r--r--bzrlib/config.py4195
-rw-r--r--bzrlib/conflicts.py893
-rw-r--r--bzrlib/controldir.py1460
-rw-r--r--bzrlib/counted_lock.py113
-rw-r--r--bzrlib/crash.py270
-rw-r--r--bzrlib/debug.py59
-rw-r--r--bzrlib/decorators.py359
-rw-r--r--bzrlib/delta.h154
-rw-r--r--bzrlib/delta.py455
-rw-r--r--bzrlib/diff-delta.c1188
-rw-r--r--bzrlib/diff.py1043
-rw-r--r--bzrlib/directory_service.py152
-rw-r--r--bzrlib/dirstate.py4275
-rw-r--r--bzrlib/doc/__init__.py38
-rw-r--r--bzrlib/doc/api/__init__.py53
-rw-r--r--bzrlib/doc/api/branch.txt37
-rw-r--r--bzrlib/doc/api/transport.txt24
-rw-r--r--bzrlib/doc_generate/__init__.py27
-rw-r--r--bzrlib/doc_generate/autodoc_bash_completion.py53
-rw-r--r--bzrlib/doc_generate/autodoc_man.py256
-rw-r--r--bzrlib/doc_generate/autodoc_rstx.py188
-rw-r--r--bzrlib/doc_generate/conf.py217
-rw-r--r--bzrlib/email_message.py209
-rw-r--r--bzrlib/errors.py3342
-rw-r--r--bzrlib/estimate_compressed_size.py70
-rw-r--r--bzrlib/export/__init__.py247
-rw-r--r--bzrlib/export/dir_exporter.py96
-rw-r--r--bzrlib/export/tar_exporter.py228
-rw-r--r--bzrlib/export/zip_exporter.py105
-rw-r--r--bzrlib/export_pot.py322
-rw-r--r--bzrlib/externalcommand.py66
-rw-r--r--bzrlib/fetch.py430
-rw-r--r--bzrlib/fifo_cache.py270
-rw-r--r--bzrlib/filter_tree.py77
-rw-r--r--bzrlib/filters/__init__.py287
-rw-r--r--bzrlib/filters/eol.py73
-rw-r--r--bzrlib/foreign.py342
-rw-r--r--bzrlib/generate_ids.py121
-rw-r--r--bzrlib/globbing.py356
-rw-r--r--bzrlib/gpg.py557
-rw-r--r--bzrlib/graph.py1715
-rw-r--r--bzrlib/groupcompress.py2211
-rw-r--r--bzrlib/hashcache.py320
-rw-r--r--bzrlib/help.py167
-rw-r--r--bzrlib/help_topics/__init__.py943
-rw-r--r--bzrlib/help_topics/en/authentication.txt226
-rw-r--r--bzrlib/help_topics/en/configuration.txt724
-rw-r--r--bzrlib/help_topics/en/conflict-types.txt377
-rw-r--r--bzrlib/help_topics/en/content-filters.txt93
-rw-r--r--bzrlib/help_topics/en/debug-flags.txt45
-rw-r--r--bzrlib/help_topics/en/diverged-branches.txt39
-rw-r--r--bzrlib/help_topics/en/eol.txt122
-rw-r--r--bzrlib/help_topics/en/log-formats.txt34
-rw-r--r--bzrlib/help_topics/en/patterns.txt38
-rw-r--r--bzrlib/help_topics/en/rules.txt40
-rw-r--r--bzrlib/help_topics/en/url-special-chars.txt48
-rw-r--r--bzrlib/help_topics/es/conflict-types.txt185
-rw-r--r--bzrlib/hooks.py446
-rw-r--r--bzrlib/i18n.py206
-rw-r--r--bzrlib/identitymap.py75
-rw-r--r--bzrlib/ignores.py236
-rw-r--r--bzrlib/index.py1868
-rw-r--r--bzrlib/info.py537
-rw-r--r--bzrlib/inspect_for_copy.py76
-rw-r--r--bzrlib/inter.py121
-rw-r--r--bzrlib/intset.py227
-rw-r--r--bzrlib/inventory.py2368
-rw-r--r--bzrlib/inventory_delta.py377
-rw-r--r--bzrlib/iterablefile.py258
-rw-r--r--bzrlib/knit.py3505
-rw-r--r--bzrlib/lazy_import.py410
-rw-r--r--bzrlib/lazy_regex.py133
-rw-r--r--bzrlib/library_state.py117
-rw-r--r--bzrlib/lock.py550
-rw-r--r--bzrlib/lockable_files.py301
-rw-r--r--bzrlib/lockdir.py863
-rw-r--r--bzrlib/log.py2137
-rw-r--r--bzrlib/lru_cache.py316
-rw-r--r--bzrlib/lsprof.py326
-rw-r--r--bzrlib/mail_client.py641
-rw-r--r--bzrlib/memorytree.py324
-rw-r--r--bzrlib/merge.py2467
-rw-r--r--bzrlib/merge3.py482
-rw-r--r--bzrlib/merge_directive.py684
-rw-r--r--bzrlib/mergetools.py127
-rw-r--r--bzrlib/missing.py228
-rw-r--r--bzrlib/msgeditor.py349
-rw-r--r--bzrlib/multiparent.py681
-rw-r--r--bzrlib/mutabletree.py799
-rw-r--r--bzrlib/option.py575
-rw-r--r--bzrlib/osutils.py2587
-rw-r--r--bzrlib/pack.py537
-rw-r--r--bzrlib/patch.py104
-rw-r--r--bzrlib/patches.py474
-rwxr-xr-xbzrlib/patiencediff.py168
-rw-r--r--bzrlib/plugin.py677
-rw-r--r--bzrlib/plugins/__init__.py19
-rw-r--r--bzrlib/plugins/bash_completion/README.txt143
-rw-r--r--bzrlib/plugins/bash_completion/__init__.py41
-rw-r--r--bzrlib/plugins/bash_completion/bashcomp.py482
-rw-r--r--bzrlib/plugins/bash_completion/tests/__init__.py23
-rw-r--r--bzrlib/plugins/bash_completion/tests/test_bashcomp.py332
-rw-r--r--bzrlib/plugins/changelog_merge/__init__.py78
-rw-r--r--bzrlib/plugins/changelog_merge/changelog_merge.py199
-rw-r--r--bzrlib/plugins/changelog_merge/tests/__init__.py24
-rw-r--r--bzrlib/plugins/changelog_merge/tests/test_changelog_merge.py222
-rw-r--r--bzrlib/plugins/launchpad/__init__.py201
-rw-r--r--bzrlib/plugins/launchpad/account.py113
-rw-r--r--bzrlib/plugins/launchpad/cmds.py410
-rw-r--r--bzrlib/plugins/launchpad/lp_api.py313
-rw-r--r--bzrlib/plugins/launchpad/lp_api_lite.py288
-rw-r--r--bzrlib/plugins/launchpad/lp_directory.py209
-rw-r--r--bzrlib/plugins/launchpad/lp_propose.py221
-rw-r--r--bzrlib/plugins/launchpad/lp_registration.py358
-rw-r--r--bzrlib/plugins/launchpad/test_account.py117
-rw-r--r--bzrlib/plugins/launchpad/test_lp_api.py100
-rw-r--r--bzrlib/plugins/launchpad/test_lp_api_lite.py549
-rw-r--r--bzrlib/plugins/launchpad/test_lp_directory.py639
-rw-r--r--bzrlib/plugins/launchpad/test_lp_login.py58
-rw-r--r--bzrlib/plugins/launchpad/test_lp_open.py103
-rw-r--r--bzrlib/plugins/launchpad/test_lp_service.py181
-rw-r--r--bzrlib/plugins/launchpad/test_register.py366
-rw-r--r--bzrlib/plugins/netrc_credential_store/__init__.py75
-rw-r--r--bzrlib/plugins/netrc_credential_store/tests/__init__.py23
-rw-r--r--bzrlib/plugins/netrc_credential_store/tests/test_netrc.py86
-rw-r--r--bzrlib/plugins/news_merge/README7
-rw-r--r--bzrlib/plugins/news_merge/__init__.py58
-rw-r--r--bzrlib/plugins/news_merge/news_merge.py78
-rw-r--r--bzrlib/plugins/news_merge/parser.py71
-rw-r--r--bzrlib/plugins/news_merge/tests/__init__.py23
-rw-r--r--bzrlib/plugins/news_merge/tests/test_news_merge.py27
-rw-r--r--bzrlib/plugins/po_merge/README7
-rw-r--r--bzrlib/plugins/po_merge/__init__.py92
-rw-r--r--bzrlib/plugins/po_merge/po_merge.py174
-rw-r--r--bzrlib/plugins/po_merge/tests/__init__.py23
-rw-r--r--bzrlib/plugins/po_merge/tests/test_po_merge.py451
-rw-r--r--bzrlib/plugins/weave_fmt/__init__.py128
-rw-r--r--bzrlib/plugins/weave_fmt/branch.py219
-rw-r--r--bzrlib/plugins/weave_fmt/bzrdir.py1006
-rw-r--r--bzrlib/plugins/weave_fmt/repository.py883
-rw-r--r--bzrlib/plugins/weave_fmt/test_bzrdir.py584
-rw-r--r--bzrlib/plugins/weave_fmt/test_repository.py331
-rw-r--r--bzrlib/plugins/weave_fmt/test_workingtree.py89
-rw-r--r--bzrlib/plugins/weave_fmt/workingtree.py243
-rw-r--r--bzrlib/plugins/weave_fmt/xml4.py190
-rw-r--r--bzrlib/progress.py248
-rw-r--r--bzrlib/push.py177
-rw-r--r--bzrlib/python-compat.h90
-rw-r--r--bzrlib/pyutils.py91
-rw-r--r--bzrlib/readdir.h22
-rw-r--r--bzrlib/reconcile.py558
-rw-r--r--bzrlib/reconfigure.py385
-rw-r--r--bzrlib/recordcounter.py89
-rw-r--r--bzrlib/registry.py291
-rw-r--r--bzrlib/remote.py4291
-rw-r--r--bzrlib/rename_map.py264
-rw-r--r--bzrlib/repofmt/__init__.py20
-rw-r--r--bzrlib/repofmt/groupcompress_repo.py1426
-rw-r--r--bzrlib/repofmt/knitpack_repo.py1156
-rw-r--r--bzrlib/repofmt/knitrepo.py522
-rw-r--r--bzrlib/repofmt/pack_repo.py2091
-rw-r--r--bzrlib/repository.py1831
-rw-r--r--bzrlib/revision.py234
-rw-r--r--bzrlib/revisionspec.py1009
-rw-r--r--bzrlib/revisiontree.py334
-rw-r--r--bzrlib/rio.py389
-rw-r--r--bzrlib/rules.py165
-rw-r--r--bzrlib/send.py205
-rw-r--r--bzrlib/serializer.py103
-rw-r--r--bzrlib/shelf.py439
-rw-r--r--bzrlib/shelf_ui.py498
-rw-r--r--bzrlib/shellcomplete.py86
-rw-r--r--bzrlib/smart/__init__.py55
-rw-r--r--bzrlib/smart/branch.py448
-rw-r--r--bzrlib/smart/bzrdir.py626
-rw-r--r--bzrlib/smart/client.py352
-rw-r--r--bzrlib/smart/medium.py1193
-rw-r--r--bzrlib/smart/message.py353
-rw-r--r--bzrlib/smart/packrepository.py47
-rw-r--r--bzrlib/smart/protocol.py1385
-rw-r--r--bzrlib/smart/repository.py1304
-rw-r--r--bzrlib/smart/request.py776
-rw-r--r--bzrlib/smart/server.py502
-rw-r--r--bzrlib/smart/signals.py116
-rw-r--r--bzrlib/smart/vfs.py231
-rw-r--r--bzrlib/smtp_connection.py190
-rw-r--r--bzrlib/static_tuple.py58
-rw-r--r--bzrlib/status.py483
-rw-r--r--bzrlib/store/__init__.py323
-rw-r--r--bzrlib/store/text.py127
-rw-r--r--bzrlib/store/versioned/__init__.py243
-rw-r--r--bzrlib/strace.py96
-rw-r--r--bzrlib/switch.py175
-rw-r--r--bzrlib/symbol_versioning.py371
-rw-r--r--bzrlib/tag.py429
-rw-r--r--bzrlib/testament.py245
-rw-r--r--bzrlib/tests/EncodingAdapter.py133
-rw-r--r--bzrlib/tests/TestUtil.py233
-rw-r--r--bzrlib/tests/__init__.py4521
-rw-r--r--bzrlib/tests/blackbox/__init__.py152
-rw-r--r--bzrlib/tests/blackbox/test_add.py267
-rw-r--r--bzrlib/tests/blackbox/test_added.py77
-rw-r--r--bzrlib/tests/blackbox/test_alias.py95
-rw-r--r--bzrlib/tests/blackbox/test_aliases.py73
-rw-r--r--bzrlib/tests/blackbox/test_ancestry.py111
-rw-r--r--bzrlib/tests/blackbox/test_annotate.py333
-rw-r--r--bzrlib/tests/blackbox/test_bound_branches.py386
-rw-r--r--bzrlib/tests/blackbox/test_branch.py657
-rw-r--r--bzrlib/tests/blackbox/test_branches.py93
-rw-r--r--bzrlib/tests/blackbox/test_break_lock.py143
-rw-r--r--bzrlib/tests/blackbox/test_bundle_info.py55
-rw-r--r--bzrlib/tests/blackbox/test_cat.py241
-rw-r--r--bzrlib/tests/blackbox/test_cat_revision.py78
-rw-r--r--bzrlib/tests/blackbox/test_check.py112
-rw-r--r--bzrlib/tests/blackbox/test_checkout.py228
-rw-r--r--bzrlib/tests/blackbox/test_clean_tree.py118
-rw-r--r--bzrlib/tests/blackbox/test_command_encoding.py120
-rw-r--r--bzrlib/tests/blackbox/test_commit.py893
-rw-r--r--bzrlib/tests/blackbox/test_config.py384
-rw-r--r--bzrlib/tests/blackbox/test_conflicts.py126
-rw-r--r--bzrlib/tests/blackbox/test_debug.py60
-rw-r--r--bzrlib/tests/blackbox/test_deleted.py37
-rw-r--r--bzrlib/tests/blackbox/test_diff.py424
-rw-r--r--bzrlib/tests/blackbox/test_dpush.py174
-rw-r--r--bzrlib/tests/blackbox/test_dump_btree.py130
-rw-r--r--bzrlib/tests/blackbox/test_exceptions.py167
-rw-r--r--bzrlib/tests/blackbox/test_export.py453
-rw-r--r--bzrlib/tests/blackbox/test_export_pot.py37
-rw-r--r--bzrlib/tests/blackbox/test_filesystem_cicp.py282
-rw-r--r--bzrlib/tests/blackbox/test_filtered_view_ops.py198
-rw-r--r--bzrlib/tests/blackbox/test_find_merge_base.py39
-rw-r--r--bzrlib/tests/blackbox/test_help.py205
-rw-r--r--bzrlib/tests/blackbox/test_hooks.py75
-rw-r--r--bzrlib/tests/blackbox/test_ignore.py180
-rw-r--r--bzrlib/tests/blackbox/test_ignored.py48
-rw-r--r--bzrlib/tests/blackbox/test_info.py1583
-rw-r--r--bzrlib/tests/blackbox/test_init.py236
-rw-r--r--bzrlib/tests/blackbox/test_inventory.py117
-rw-r--r--bzrlib/tests/blackbox/test_join.py95
-rw-r--r--bzrlib/tests/blackbox/test_locale.py89
-rw-r--r--bzrlib/tests/blackbox/test_log.py1133
-rw-r--r--bzrlib/tests/blackbox/test_logformats.py124
-rw-r--r--bzrlib/tests/blackbox/test_lookup_revision.py31
-rw-r--r--bzrlib/tests/blackbox/test_ls.py262
-rw-r--r--bzrlib/tests/blackbox/test_lsprof.py33
-rw-r--r--bzrlib/tests/blackbox/test_merge.py763
-rw-r--r--bzrlib/tests/blackbox/test_merge_directive.py261
-rw-r--r--bzrlib/tests/blackbox/test_missing.py262
-rw-r--r--bzrlib/tests/blackbox/test_mkdir.py59
-rw-r--r--bzrlib/tests/blackbox/test_modified.py81
-rw-r--r--bzrlib/tests/blackbox/test_mv.py531
-rw-r--r--bzrlib/tests/blackbox/test_nick.py90
-rw-r--r--bzrlib/tests/blackbox/test_non_ascii.py551
-rw-r--r--bzrlib/tests/blackbox/test_outside_wt.py82
-rw-r--r--bzrlib/tests/blackbox/test_pack.py106
-rw-r--r--bzrlib/tests/blackbox/test_pull.py589
-rw-r--r--bzrlib/tests/blackbox/test_push.py919
-rw-r--r--bzrlib/tests/blackbox/test_re_sign.py101
-rw-r--r--bzrlib/tests/blackbox/test_reconcile.py91
-rw-r--r--bzrlib/tests/blackbox/test_reconfigure.py271
-rw-r--r--bzrlib/tests/blackbox/test_reference.py87
-rw-r--r--bzrlib/tests/blackbox/test_remember_option.py208
-rw-r--r--bzrlib/tests/blackbox/test_remerge.py125
-rw-r--r--bzrlib/tests/blackbox/test_remove.py289
-rw-r--r--bzrlib/tests/blackbox/test_remove_tree.py170
-rw-r--r--bzrlib/tests/blackbox/test_repair_workingtree.py97
-rw-r--r--bzrlib/tests/blackbox/test_resolve.py131
-rw-r--r--bzrlib/tests/blackbox/test_revert.py211
-rw-r--r--bzrlib/tests/blackbox/test_revision_history.py76
-rw-r--r--bzrlib/tests/blackbox/test_revision_info.py128
-rw-r--r--bzrlib/tests/blackbox/test_revno.py179
-rw-r--r--bzrlib/tests/blackbox/test_rmbranch.py121
-rw-r--r--bzrlib/tests/blackbox/test_script.py72
-rw-r--r--bzrlib/tests/blackbox/test_selftest.py161
-rw-r--r--bzrlib/tests/blackbox/test_send.py466
-rw-r--r--bzrlib/tests/blackbox/test_serve.py450
-rw-r--r--bzrlib/tests/blackbox/test_shared_repository.py166
-rw-r--r--bzrlib/tests/blackbox/test_shell_complete.py27
-rw-r--r--bzrlib/tests/blackbox/test_shelve.py155
-rw-r--r--bzrlib/tests/blackbox/test_sign_my_commits.py190
-rw-r--r--bzrlib/tests/blackbox/test_split.py64
-rw-r--r--bzrlib/tests/blackbox/test_status.py804
-rw-r--r--bzrlib/tests/blackbox/test_switch.py494
-rw-r--r--bzrlib/tests/blackbox/test_tags.py448
-rw-r--r--bzrlib/tests/blackbox/test_testament.py48
-rw-r--r--bzrlib/tests/blackbox/test_too_much.py622
-rw-r--r--bzrlib/tests/blackbox/test_uncommit.py316
-rw-r--r--bzrlib/tests/blackbox/test_unknowns.py54
-rw-r--r--bzrlib/tests/blackbox/test_update.py482
-rw-r--r--bzrlib/tests/blackbox/test_upgrade.py296
-rw-r--r--bzrlib/tests/blackbox/test_version.py148
-rw-r--r--bzrlib/tests/blackbox/test_version_info.py184
-rw-r--r--bzrlib/tests/blackbox/test_versioning.py176
-rw-r--r--bzrlib/tests/blackbox/test_view.py111
-rw-r--r--bzrlib/tests/blackbox/test_whoami.py151
-rw-r--r--bzrlib/tests/commands/__init__.py49
-rw-r--r--bzrlib/tests/commands/test_branch.py43
-rw-r--r--bzrlib/tests/commands/test_cat.py50
-rw-r--r--bzrlib/tests/commands/test_checkout.py40
-rw-r--r--bzrlib/tests/commands/test_commit.py71
-rw-r--r--bzrlib/tests/commands/test_init.py35
-rw-r--r--bzrlib/tests/commands/test_init_repository.py35
-rw-r--r--bzrlib/tests/commands/test_merge.py38
-rw-r--r--bzrlib/tests/commands/test_missing.py38
-rw-r--r--bzrlib/tests/commands/test_pull.py58
-rw-r--r--bzrlib/tests/commands/test_push.py46
-rw-r--r--bzrlib/tests/commands/test_revert.py61
-rw-r--r--bzrlib/tests/commands/test_update.py45
-rw-r--r--bzrlib/tests/fake_command.py23
-rw-r--r--bzrlib/tests/features.py494
-rw-r--r--bzrlib/tests/file_utils.py47
-rw-r--r--bzrlib/tests/fixtures.py160
-rw-r--r--bzrlib/tests/ftp_server/__init__.py100
-rw-r--r--bzrlib/tests/ftp_server/medusa_based.py294
-rw-r--r--bzrlib/tests/ftp_server/pyftpdlib_based.py208
-rw-r--r--bzrlib/tests/http_server.py516
-rw-r--r--bzrlib/tests/http_utils.py557
-rw-r--r--bzrlib/tests/https_server.py150
-rw-r--r--bzrlib/tests/lock_helpers.py91
-rw-r--r--bzrlib/tests/matchers.py244
-rw-r--r--bzrlib/tests/per_branch/__init__.py182
-rw-r--r--bzrlib/tests/per_branch/test_branch.py1059
-rw-r--r--bzrlib/tests/per_branch/test_break_lock.py99
-rw-r--r--bzrlib/tests/per_branch/test_check.py105
-rw-r--r--bzrlib/tests/per_branch/test_commit.py222
-rw-r--r--bzrlib/tests/per_branch/test_config.py46
-rw-r--r--bzrlib/tests/per_branch/test_create_checkout.py70
-rw-r--r--bzrlib/tests/per_branch/test_create_clone.py154
-rw-r--r--bzrlib/tests/per_branch/test_dotted_revno_to_revision_id.py45
-rw-r--r--bzrlib/tests/per_branch/test_get_revision_id_to_revno_map.py99
-rw-r--r--bzrlib/tests/per_branch/test_hooks.py347
-rw-r--r--bzrlib/tests/per_branch/test_http.py78
-rw-r--r--bzrlib/tests/per_branch/test_iter_merge_sorted_revisions.py305
-rw-r--r--bzrlib/tests/per_branch/test_last_revision_info.py67
-rw-r--r--bzrlib/tests/per_branch/test_locking.py539
-rw-r--r--bzrlib/tests/per_branch/test_parent.py107
-rw-r--r--bzrlib/tests/per_branch/test_permissions.py137
-rw-r--r--bzrlib/tests/per_branch/test_pull.py274
-rw-r--r--bzrlib/tests/per_branch/test_push.py461
-rw-r--r--bzrlib/tests/per_branch/test_reconcile.py90
-rw-r--r--bzrlib/tests/per_branch/test_revision_id_to_dotted_revno.py36
-rw-r--r--bzrlib/tests/per_branch/test_revision_id_to_revno.py64
-rw-r--r--bzrlib/tests/per_branch/test_sprout.py224
-rw-r--r--bzrlib/tests/per_branch/test_stacking.py589
-rw-r--r--bzrlib/tests/per_branch/test_tags.py508
-rw-r--r--bzrlib/tests/per_branch/test_uncommit.py109
-rw-r--r--bzrlib/tests/per_branch/test_update.py91
-rw-r--r--bzrlib/tests/per_bzrdir/__init__.py88
-rw-r--r--bzrlib/tests/per_bzrdir/test_bzrdir.py693
-rw-r--r--bzrlib/tests/per_controldir/__init__.py107
-rw-r--r--bzrlib/tests/per_controldir/test_controldir.py1766
-rw-r--r--bzrlib/tests/per_controldir/test_format.py48
-rw-r--r--bzrlib/tests/per_controldir/test_push.py62
-rw-r--r--bzrlib/tests/per_controldir_colo/__init__.py78
-rw-r--r--bzrlib/tests/per_controldir_colo/test_supported.py158
-rw-r--r--bzrlib/tests/per_controldir_colo/test_unsupported.py83
-rw-r--r--bzrlib/tests/per_foreign_vcs/__init__.py50
-rw-r--r--bzrlib/tests/per_foreign_vcs/test_branch.py145
-rw-r--r--bzrlib/tests/per_foreign_vcs/test_repository.py87
-rw-r--r--bzrlib/tests/per_interbranch/__init__.py191
-rw-r--r--bzrlib/tests/per_interbranch/test_copy_content_into.py47
-rw-r--r--bzrlib/tests/per_interbranch/test_fetch.py95
-rw-r--r--bzrlib/tests/per_interbranch/test_get.py34
-rw-r--r--bzrlib/tests/per_interbranch/test_pull.py210
-rw-r--r--bzrlib/tests/per_interbranch/test_push.py379
-rw-r--r--bzrlib/tests/per_interrepository/__init__.py211
-rw-r--r--bzrlib/tests/per_interrepository/test_fetch.py553
-rw-r--r--bzrlib/tests/per_interrepository/test_interrepository.py205
-rw-r--r--bzrlib/tests/per_intertree/__init__.py184
-rw-r--r--bzrlib/tests/per_intertree/test_compare.py1846
-rw-r--r--bzrlib/tests/per_intertree/test_file_content_matches.py48
-rw-r--r--bzrlib/tests/per_inventory/__init__.py67
-rw-r--r--bzrlib/tests/per_inventory/basics.py343
-rw-r--r--bzrlib/tests/per_lock/__init__.py50
-rw-r--r--bzrlib/tests/per_lock/test_lock.py181
-rw-r--r--bzrlib/tests/per_lock/test_temporary_write_lock.py114
-rw-r--r--bzrlib/tests/per_merger.py417
-rw-r--r--bzrlib/tests/per_pack_repository.py1162
-rw-r--r--bzrlib/tests/per_repository/__init__.py137
-rw-r--r--bzrlib/tests/per_repository/test_add_fallback_repository.py58
-rw-r--r--bzrlib/tests/per_repository/test_break_lock.py51
-rw-r--r--bzrlib/tests/per_repository/test_check.py42
-rw-r--r--bzrlib/tests/per_repository/test_commit_builder.py1443
-rw-r--r--bzrlib/tests/per_repository/test_fetch.py369
-rw-r--r--bzrlib/tests/per_repository/test_file_graph.py37
-rw-r--r--bzrlib/tests/per_repository/test_get_parent_map.py73
-rw-r--r--bzrlib/tests/per_repository/test_has_revisions.py43
-rw-r--r--bzrlib/tests/per_repository/test_has_same_location.py126
-rw-r--r--bzrlib/tests/per_repository/test_locking.py57
-rw-r--r--bzrlib/tests/per_repository/test_pack.py37
-rw-r--r--bzrlib/tests/per_repository/test_reconcile.py31
-rw-r--r--bzrlib/tests/per_repository/test_refresh_data.py61
-rw-r--r--bzrlib/tests/per_repository/test_repository.py1002
-rw-r--r--bzrlib/tests/per_repository/test_revision.py109
-rw-r--r--bzrlib/tests/per_repository/test_signatures.py163
-rw-r--r--bzrlib/tests/per_repository/test_statistics.py61
-rw-r--r--bzrlib/tests/per_repository/test_write_group.py124
-rw-r--r--bzrlib/tests/per_repository_chk/__init__.py80
-rw-r--r--bzrlib/tests/per_repository_chk/test_supported.py395
-rw-r--r--bzrlib/tests/per_repository_chk/test_unsupported.py30
-rw-r--r--bzrlib/tests/per_repository_reference/__init__.py137
-rw-r--r--bzrlib/tests/per_repository_reference/test__make_parents_provider.py44
-rw-r--r--bzrlib/tests/per_repository_reference/test_add_inventory.py55
-rw-r--r--bzrlib/tests/per_repository_reference/test_add_revision.py54
-rw-r--r--bzrlib/tests/per_repository_reference/test_add_signature_text.py53
-rw-r--r--bzrlib/tests/per_repository_reference/test_all_revision_ids.py63
-rw-r--r--bzrlib/tests/per_repository_reference/test_break_lock.py46
-rw-r--r--bzrlib/tests/per_repository_reference/test_check.py41
-rw-r--r--bzrlib/tests/per_repository_reference/test_commit_with_stacking.py220
-rw-r--r--bzrlib/tests/per_repository_reference/test_default_stacking.py34
-rw-r--r--bzrlib/tests/per_repository_reference/test_fetch.py186
-rw-r--r--bzrlib/tests/per_repository_reference/test_get_record_stream.py201
-rw-r--r--bzrlib/tests/per_repository_reference/test_get_rev_id_for_revno.py47
-rw-r--r--bzrlib/tests/per_repository_reference/test_graph.py116
-rw-r--r--bzrlib/tests/per_repository_reference/test_initialize.py55
-rw-r--r--bzrlib/tests/per_repository_reference/test_unlock.py76
-rw-r--r--bzrlib/tests/per_repository_vf/__init__.py55
-rw-r--r--bzrlib/tests/per_repository_vf/helpers.py83
-rw-r--r--bzrlib/tests/per_repository_vf/test__generate_text_key_index.py39
-rw-r--r--bzrlib/tests/per_repository_vf/test_add_inventory_by_delta.py111
-rw-r--r--bzrlib/tests/per_repository_vf/test_check.py140
-rw-r--r--bzrlib/tests/per_repository_vf/test_check_reconcile.py978
-rw-r--r--bzrlib/tests/per_repository_vf/test_fetch.py57
-rw-r--r--bzrlib/tests/per_repository_vf/test_fileid_involved.py432
-rw-r--r--bzrlib/tests/per_repository_vf/test_find_text_key_references.py38
-rw-r--r--bzrlib/tests/per_repository_vf/test_merge_directive.py79
-rw-r--r--bzrlib/tests/per_repository_vf/test_reconcile.py442
-rw-r--r--bzrlib/tests/per_repository_vf/test_refresh_data.py91
-rw-r--r--bzrlib/tests/per_repository_vf/test_repository.py466
-rw-r--r--bzrlib/tests/per_repository_vf/test_write_group.py656
-rw-r--r--bzrlib/tests/per_transport.py1872
-rw-r--r--bzrlib/tests/per_tree/__init__.py407
-rw-r--r--bzrlib/tests/per_tree/test_annotate_iter.py55
-rw-r--r--bzrlib/tests/per_tree/test_export.py98
-rw-r--r--bzrlib/tests/per_tree/test_get_file_mtime.py53
-rw-r--r--bzrlib/tests/per_tree/test_get_file_with_stat.py53
-rw-r--r--bzrlib/tests/per_tree/test_get_root_id.py44
-rw-r--r--bzrlib/tests/per_tree/test_get_symlink_target.py64
-rw-r--r--bzrlib/tests/per_tree/test_ids.py51
-rw-r--r--bzrlib/tests/per_tree/test_inv.py173
-rw-r--r--bzrlib/tests/per_tree/test_is_executable.py39
-rw-r--r--bzrlib/tests/per_tree/test_iter_search_rules.py77
-rw-r--r--bzrlib/tests/per_tree/test_list_files.py116
-rw-r--r--bzrlib/tests/per_tree/test_locking.py28
-rw-r--r--bzrlib/tests/per_tree/test_path_content_summary.py156
-rw-r--r--bzrlib/tests/per_tree/test_revision_tree.py35
-rw-r--r--bzrlib/tests/per_tree/test_test_trees.py307
-rw-r--r--bzrlib/tests/per_tree/test_tree.py362
-rw-r--r--bzrlib/tests/per_tree/test_walkdirs.py109
-rw-r--r--bzrlib/tests/per_uifactory/__init__.py274
-rw-r--r--bzrlib/tests/per_versionedfile.py2858
-rw-r--r--bzrlib/tests/per_workingtree/__init__.py145
-rw-r--r--bzrlib/tests/per_workingtree/test_add.py166
-rw-r--r--bzrlib/tests/per_workingtree/test_add_reference.py111
-rw-r--r--bzrlib/tests/per_workingtree/test_annotate_iter.py181
-rw-r--r--bzrlib/tests/per_workingtree/test_basis_inventory.py83
-rw-r--r--bzrlib/tests/per_workingtree/test_basis_tree.py74
-rw-r--r--bzrlib/tests/per_workingtree/test_break_lock.py69
-rw-r--r--bzrlib/tests/per_workingtree/test_changes_from.py54
-rw-r--r--bzrlib/tests/per_workingtree/test_check.py64
-rw-r--r--bzrlib/tests/per_workingtree/test_check_state.py110
-rw-r--r--bzrlib/tests/per_workingtree/test_commit.py587
-rw-r--r--bzrlib/tests/per_workingtree/test_content_filters.py377
-rw-r--r--bzrlib/tests/per_workingtree/test_eol_conversion.py335
-rw-r--r--bzrlib/tests/per_workingtree/test_executable.py195
-rw-r--r--bzrlib/tests/per_workingtree/test_flush.py79
-rw-r--r--bzrlib/tests/per_workingtree/test_get_file_mtime.py113
-rw-r--r--bzrlib/tests/per_workingtree/test_get_parent_ids.py54
-rw-r--r--bzrlib/tests/per_workingtree/test_inv.py182
-rw-r--r--bzrlib/tests/per_workingtree/test_is_control_filename.py39
-rw-r--r--bzrlib/tests/per_workingtree/test_is_ignored.py224
-rw-r--r--bzrlib/tests/per_workingtree/test_locking.py292
-rw-r--r--bzrlib/tests/per_workingtree/test_merge_from_branch.py254
-rw-r--r--bzrlib/tests/per_workingtree/test_mkdir.py38
-rw-r--r--bzrlib/tests/per_workingtree/test_move.py563
-rw-r--r--bzrlib/tests/per_workingtree/test_nested_specifics.py83
-rw-r--r--bzrlib/tests/per_workingtree/test_parents.py764
-rw-r--r--bzrlib/tests/per_workingtree/test_paths2ids.py201
-rw-r--r--bzrlib/tests/per_workingtree/test_pull.py109
-rw-r--r--bzrlib/tests/per_workingtree/test_put_file.py37
-rw-r--r--bzrlib/tests/per_workingtree/test_read_working_inventory.py56
-rw-r--r--bzrlib/tests/per_workingtree/test_readonly.py116
-rw-r--r--bzrlib/tests/per_workingtree/test_remove.py345
-rw-r--r--bzrlib/tests/per_workingtree/test_rename_one.py420
-rw-r--r--bzrlib/tests/per_workingtree/test_revision_tree.py135
-rw-r--r--bzrlib/tests/per_workingtree/test_set_root_id.py71
-rw-r--r--bzrlib/tests/per_workingtree/test_smart_add.py364
-rw-r--r--bzrlib/tests/per_workingtree/test_symlinks.py180
-rw-r--r--bzrlib/tests/per_workingtree/test_uncommit.py33
-rw-r--r--bzrlib/tests/per_workingtree/test_unversion.py205
-rw-r--r--bzrlib/tests/per_workingtree/test_views.py188
-rw-r--r--bzrlib/tests/per_workingtree/test_walkdirs.py266
-rw-r--r--bzrlib/tests/per_workingtree/test_workingtree.py1234
-rw-r--r--bzrlib/tests/scenarios.py61
-rw-r--r--bzrlib/tests/script.py535
-rw-r--r--bzrlib/tests/ssl_certs/__init__.py29
-rw-r--r--bzrlib/tests/ssl_certs/ca.crt38
-rw-r--r--bzrlib/tests/ssl_certs/ca.key54
-rwxr-xr-xbzrlib/tests/ssl_certs/create_ssls.py268
-rw-r--r--bzrlib/tests/ssl_certs/server.crt33
-rw-r--r--bzrlib/tests/ssl_certs/server.csr29
-rw-r--r--bzrlib/tests/ssl_certs/server_with_pass.key54
-rw-r--r--bzrlib/tests/ssl_certs/server_without_pass.key51
-rw-r--r--bzrlib/tests/stub_sftp.py570
-rw-r--r--bzrlib/tests/test__annotator.py375
-rw-r--r--bzrlib/tests/test__bencode.py217
-rw-r--r--bzrlib/tests/test__btree_serializer.py305
-rw-r--r--bzrlib/tests/test__chk_map.py279
-rw-r--r--bzrlib/tests/test__chunks_to_lines.py110
-rw-r--r--bzrlib/tests/test__dirstate_helpers.py1397
-rw-r--r--bzrlib/tests/test__groupcompress.py582
-rw-r--r--bzrlib/tests/test__known_graph.py915
-rw-r--r--bzrlib/tests/test__rio.py169
-rw-r--r--bzrlib/tests/test__simple_set.py392
-rw-r--r--bzrlib/tests/test__static_tuple.py634
-rw-r--r--bzrlib/tests/test__walkdirs_win32.py117
-rw-r--r--bzrlib/tests/test_ancestry.py36
-rw-r--r--bzrlib/tests/test_annotate.py489
-rw-r--r--bzrlib/tests/test_api.py143
-rw-r--r--bzrlib/tests/test_atomicfile.py126
-rw-r--r--bzrlib/tests/test_bad_files.py95
-rw-r--r--bzrlib/tests/test_bisect_multi.py343
-rw-r--r--bzrlib/tests/test_branch.py724
-rw-r--r--bzrlib/tests/test_branchbuilder.py450
-rw-r--r--bzrlib/tests/test_btree_index.py1545
-rw-r--r--bzrlib/tests/test_bugtracker.py252
-rw-r--r--bzrlib/tests/test_bundle.py1877
-rw-r--r--bzrlib/tests/test_bzrdir.py1572
-rw-r--r--bzrlib/tests/test_cache_utf8.py117
-rw-r--r--bzrlib/tests/test_cethread.py161
-rw-r--r--bzrlib/tests/test_chk_map.py2828
-rw-r--r--bzrlib/tests/test_chk_serializer.py109
-rw-r--r--bzrlib/tests/test_chunk_writer.py113
-rw-r--r--bzrlib/tests/test_clean_tree.py137
-rw-r--r--bzrlib/tests/test_cleanup.py293
-rw-r--r--bzrlib/tests/test_cmdline.py115
-rw-r--r--bzrlib/tests/test_commands.py451
-rw-r--r--bzrlib/tests/test_commit.py841
-rw-r--r--bzrlib/tests/test_commit_merge.py121
-rw-r--r--bzrlib/tests/test_config.py4953
-rw-r--r--bzrlib/tests/test_conflicts.py1198
-rw-r--r--bzrlib/tests/test_controldir.py237
-rw-r--r--bzrlib/tests/test_counted_lock.py219
-rw-r--r--bzrlib/tests/test_crash.py112
-rw-r--r--bzrlib/tests/test_debug.py41
-rw-r--r--bzrlib/tests/test_decorators.py321
-rw-r--r--bzrlib/tests/test_delta.py363
-rw-r--r--bzrlib/tests/test_diff.py1497
-rw-r--r--bzrlib/tests/test_directory_service.py139
-rw-r--r--bzrlib/tests/test_dirstate.py2881
-rw-r--r--bzrlib/tests/test_email_message.py226
-rw-r--r--bzrlib/tests/test_eol_filters.py75
-rw-r--r--bzrlib/tests/test_errors.py765
-rw-r--r--bzrlib/tests/test_estimate_compressed_size.py79
-rw-r--r--bzrlib/tests/test_export.py294
-rw-r--r--bzrlib/tests/test_export_pot.py467
-rw-r--r--bzrlib/tests/test_extract.py80
-rw-r--r--bzrlib/tests/test_features.py137
-rw-r--r--bzrlib/tests/test_fetch.py523
-rw-r--r--bzrlib/tests/test_fifo_cache.py325
-rw-r--r--bzrlib/tests/test_filter_tree.py68
-rw-r--r--bzrlib/tests/test_filters.py157
-rw-r--r--bzrlib/tests/test_fixtures.py28
-rw-r--r--bzrlib/tests/test_foreign.py495
-rw-r--r--bzrlib/tests/test_ftp_transport.py151
-rw-r--r--bzrlib/tests/test_generate_docs.py53
-rw-r--r--bzrlib/tests/test_generate_ids.py157
-rw-r--r--bzrlib/tests/test_globbing.py386
-rw-r--r--bzrlib/tests/test_gpg.py524
-rw-r--r--bzrlib/tests/test_graph.py1743
-rw-r--r--bzrlib/tests/test_groupcompress.py1224
-rw-r--r--bzrlib/tests/test_hashcache.py211
-rw-r--r--bzrlib/tests/test_help.py713
-rw-r--r--bzrlib/tests/test_hooks.py275
-rw-r--r--bzrlib/tests/test_http.py2363
-rw-r--r--bzrlib/tests/test_http_response.py830
-rw-r--r--bzrlib/tests/test_https_ca_bundle.py58
-rw-r--r--bzrlib/tests/test_https_urllib.py100
-rw-r--r--bzrlib/tests/test_i18n.py162
-rw-r--r--bzrlib/tests/test_identitymap.py81
-rw-r--r--bzrlib/tests/test_ignores.py226
-rw-r--r--bzrlib/tests/test_import_tariff.py272
-rw-r--r--bzrlib/tests/test_index.py1766
-rw-r--r--bzrlib/tests/test_info.py336
-rw-r--r--bzrlib/tests/test_inv.py1572
-rw-r--r--bzrlib/tests/test_inventory_delta.py622
-rw-r--r--bzrlib/tests/test_knit.py2644
-rw-r--r--bzrlib/tests/test_lazy_import.py1217
-rw-r--r--bzrlib/tests/test_lazy_regex.py153
-rw-r--r--bzrlib/tests/test_library_state.py51
-rw-r--r--bzrlib/tests/test_lock.py155
-rw-r--r--bzrlib/tests/test_lockable_files.py351
-rw-r--r--bzrlib/tests/test_lockdir.py761
-rw-r--r--bzrlib/tests/test_log.py1638
-rw-r--r--bzrlib/tests/test_lru_cache.py435
-rw-r--r--bzrlib/tests/test_lsprof.py139
-rw-r--r--bzrlib/tests/test_mail_client.py293
-rw-r--r--bzrlib/tests/test_matchers.py204
-rw-r--r--bzrlib/tests/test_memorytree.py223
-rw-r--r--bzrlib/tests/test_merge.py3291
-rw-r--r--bzrlib/tests/test_merge3.py454
-rw-r--r--bzrlib/tests/test_merge_core.py829
-rw-r--r--bzrlib/tests/test_merge_directive.py787
-rw-r--r--bzrlib/tests/test_mergetools.py177
-rw-r--r--bzrlib/tests/test_missing.py281
-rw-r--r--bzrlib/tests/test_msgeditor.py382
-rw-r--r--bzrlib/tests/test_multiparent.py274
-rw-r--r--bzrlib/tests/test_mutabletree.py62
-rw-r--r--bzrlib/tests/test_nonascii.py194
-rw-r--r--bzrlib/tests/test_options.py433
-rw-r--r--bzrlib/tests/test_osutils.py2277
-rw-r--r--bzrlib/tests/test_osutils_encodings.py202
-rw-r--r--bzrlib/tests/test_pack.py741
-rw-r--r--bzrlib/tests/test_patch.py28
-rw-r--r--bzrlib/tests/test_patches.py310
-rw-r--r--bzrlib/tests/test_patches_data/binary-after-normal.patch6
-rw-r--r--bzrlib/tests/test_patches_data/binary.patch6
-rw-r--r--bzrlib/tests/test_patches_data/diff1154
-rw-r--r--bzrlib/tests/test_patches_data/diff-212
-rw-r--r--bzrlib/tests/test_patches_data/diff-37
-rw-r--r--bzrlib/tests/test_patches_data/diff-48
-rw-r--r--bzrlib/tests/test_patches_data/diff-5164
-rw-r--r--bzrlib/tests/test_patches_data/diff-6562
-rw-r--r--bzrlib/tests/test_patches_data/diff-77
-rw-r--r--bzrlib/tests/test_patches_data/insert_top.patch7
-rw-r--r--bzrlib/tests/test_patches_data/mod2681
-rw-r--r--bzrlib/tests/test_patches_data/mod-2560
-rw-r--r--bzrlib/tests/test_patches_data/mod-3561
-rw-r--r--bzrlib/tests/test_patches_data/mod-4558
-rw-r--r--bzrlib/tests/test_patches_data/mod-5403
-rw-r--r--bzrlib/tests/test_patches_data/mod-61
-rw-r--r--bzrlib/tests/test_patches_data/mod-71
-rw-r--r--bzrlib/tests/test_patches_data/orig2789
-rw-r--r--bzrlib/tests/test_patches_data/orig-2558
-rw-r--r--bzrlib/tests/test_patches_data/orig-3560
-rw-r--r--bzrlib/tests/test_patches_data/orig-4558
-rw-r--r--bzrlib/tests/test_patches_data/orig-5558
-rw-r--r--bzrlib/tests/test_patches_data/orig-6558
-rw-r--r--bzrlib/tests/test_patches_data/orig-71
-rw-r--r--bzrlib/tests/test_patches_data/patchtext.patch25
-rw-r--r--bzrlib/tests/test_permissions.py272
-rw-r--r--bzrlib/tests/test_plugins.py985
-rw-r--r--bzrlib/tests/test_progress.py188
-rw-r--r--bzrlib/tests/test_pyutils.py88
-rw-r--r--bzrlib/tests/test_read_bundle.py111
-rw-r--r--bzrlib/tests/test_reconcile.py70
-rw-r--r--bzrlib/tests/test_reconfigure.py457
-rw-r--r--bzrlib/tests/test_registry.py356
-rw-r--r--bzrlib/tests/test_remote.py4294
-rw-r--r--bzrlib/tests/test_rename_map.py202
-rw-r--r--bzrlib/tests/test_repository.py1723
-rw-r--r--bzrlib/tests/test_revert.py161
-rw-r--r--bzrlib/tests/test_revision.py257
-rw-r--r--bzrlib/tests/test_revisionspec.py756
-rw-r--r--bzrlib/tests/test_revisiontree.py80
-rw-r--r--bzrlib/tests/test_rio.py402
-rw-r--r--bzrlib/tests/test_rules.py141
-rw-r--r--bzrlib/tests/test_sampler.py39
-rw-r--r--bzrlib/tests/test_scenarios.py110
-rw-r--r--bzrlib/tests/test_script.py639
-rw-r--r--bzrlib/tests/test_selftest.py3664
-rw-r--r--bzrlib/tests/test_serializer.py45
-rw-r--r--bzrlib/tests/test_server.py743
-rw-r--r--bzrlib/tests/test_setup.py81
-rw-r--r--bzrlib/tests/test_sftp_transport.py499
-rw-r--r--bzrlib/tests/test_shelf.py767
-rw-r--r--bzrlib/tests/test_shelf_ui.py613
-rw-r--r--bzrlib/tests/test_smart.py2712
-rw-r--r--bzrlib/tests/test_smart_add.py161
-rw-r--r--bzrlib/tests/test_smart_request.py272
-rw-r--r--bzrlib/tests/test_smart_signals.py189
-rw-r--r--bzrlib/tests/test_smart_transport.py4299
-rw-r--r--bzrlib/tests/test_smtp_connection.py274
-rw-r--r--bzrlib/tests/test_source.py438
-rw-r--r--bzrlib/tests/test_ssh_transport.py254
-rw-r--r--bzrlib/tests/test_status.py199
-rw-r--r--bzrlib/tests/test_store.py449
-rw-r--r--bzrlib/tests/test_strace.py96
-rw-r--r--bzrlib/tests/test_subsume.py114
-rw-r--r--bzrlib/tests/test_switch.py191
-rw-r--r--bzrlib/tests/test_symbol_versioning.py297
-rw-r--r--bzrlib/tests/test_tag.py192
-rw-r--r--bzrlib/tests/test_test_server.py336
-rw-r--r--bzrlib/tests/test_testament.py563
-rw-r--r--bzrlib/tests/test_textfile.py47
-rw-r--r--bzrlib/tests/test_textmerge.py41
-rw-r--r--bzrlib/tests/test_timestamp.py134
-rw-r--r--bzrlib/tests/test_trace.py443
-rw-r--r--bzrlib/tests/test_transactions.py299
-rw-r--r--bzrlib/tests/test_transform.py3729
-rw-r--r--bzrlib/tests/test_transport.py1147
-rw-r--r--bzrlib/tests/test_transport_log.py76
-rw-r--r--bzrlib/tests/test_tree.py418
-rw-r--r--bzrlib/tests/test_treebuilder.py95
-rw-r--r--bzrlib/tests/test_treeshape.py44
-rw-r--r--bzrlib/tests/test_tsort.py695
-rw-r--r--bzrlib/tests/test_tuned_gzip.py130
-rw-r--r--bzrlib/tests/test_ui.py566
-rw-r--r--bzrlib/tests/test_uncommit.py146
-rw-r--r--bzrlib/tests/test_upgrade.py213
-rw-r--r--bzrlib/tests/test_upgrade_stacked.py90
-rw-r--r--bzrlib/tests/test_url_policy_open.py359
-rw-r--r--bzrlib/tests/test_urlutils.py1045
-rw-r--r--bzrlib/tests/test_utextwrap.py211
-rw-r--r--bzrlib/tests/test_version.py49
-rw-r--r--bzrlib/tests/test_version_info.py421
-rw-r--r--bzrlib/tests/test_versionedfile.py139
-rw-r--r--bzrlib/tests/test_vf_search.py239
-rw-r--r--bzrlib/tests/test_weave.py770
-rw-r--r--bzrlib/tests/test_whitebox.py65
-rw-r--r--bzrlib/tests/test_win32utils.py429
-rw-r--r--bzrlib/tests/test_workingtree.py497
-rw-r--r--bzrlib/tests/test_workingtree_4.py874
-rw-r--r--bzrlib/tests/test_wsgi.py328
-rw-r--r--bzrlib/tests/test_xml.py538
-rw-r--r--bzrlib/tests/testui.py46
-rw-r--r--bzrlib/tests/transport_util.py47
-rw-r--r--bzrlib/tests/treeshape.py82
-rw-r--r--bzrlib/textfile.py55
-rw-r--r--bzrlib/textinv.py93
-rw-r--r--bzrlib/textmerge.py150
-rw-r--r--bzrlib/timestamp.py159
-rw-r--r--bzrlib/trace.py644
-rw-r--r--bzrlib/transactions.py199
-rw-r--r--bzrlib/transform.py3211
-rw-r--r--bzrlib/transport/__init__.py1896
-rw-r--r--bzrlib/transport/brokenrename.py53
-rw-r--r--bzrlib/transport/chroot.py69
-rw-r--r--bzrlib/transport/decorator.py198
-rw-r--r--bzrlib/transport/fakenfs.py70
-rw-r--r--bzrlib/transport/fakevfat.py106
-rw-r--r--bzrlib/transport/ftp/__init__.py642
-rw-r--r--bzrlib/transport/ftp/_gssapi.py128
-rw-r--r--bzrlib/transport/gio_transport.py593
-rw-r--r--bzrlib/transport/http/__init__.py658
-rw-r--r--bzrlib/transport/http/_pycurl.py448
-rw-r--r--bzrlib/transport/http/_urllib.py192
-rw-r--r--bzrlib/transport/http/_urllib2_wrappers.py1851
-rw-r--r--bzrlib/transport/http/ca_bundle.py79
-rw-r--r--bzrlib/transport/http/response.py386
-rw-r--r--bzrlib/transport/http/wsgi.py184
-rw-r--r--bzrlib/transport/local.py585
-rw-r--r--bzrlib/transport/log.py155
-rw-r--r--bzrlib/transport/memory.py328
-rw-r--r--bzrlib/transport/nosmart.py45
-rw-r--r--bzrlib/transport/pathfilter.py182
-rw-r--r--bzrlib/transport/readonly.py86
-rw-r--r--bzrlib/transport/remote.py608
-rw-r--r--bzrlib/transport/sftp.py903
-rw-r--r--bzrlib/transport/ssh.py766
-rw-r--r--bzrlib/transport/trace.py177
-rw-r--r--bzrlib/transport/unlistable.py46
-rw-r--r--bzrlib/tree.py1635
-rw-r--r--bzrlib/treebuilder.py80
-rw-r--r--bzrlib/tsort.py713
-rw-r--r--bzrlib/tuned_gzip.py415
-rw-r--r--bzrlib/ui/__init__.py556
-rw-r--r--bzrlib/ui/text.py673
-rw-r--r--bzrlib/uncommit.py144
-rw-r--r--bzrlib/upgrade.py311
-rw-r--r--bzrlib/url_policy_open.py314
-rw-r--r--bzrlib/urlutils.py969
-rw-r--r--bzrlib/utextwrap.py266
-rw-r--r--bzrlib/util/__init__.py1
-rw-r--r--bzrlib/util/_bencode_py.py174
-rw-r--r--bzrlib/util/configobj/__init__.py1
-rw-r--r--bzrlib/util/configobj/configobj.py2461
-rw-r--r--bzrlib/util/simplemapi.py259
-rw-r--r--bzrlib/util/tests/__init__.py1
-rw-r--r--bzrlib/version.py107
-rw-r--r--bzrlib/version_info_formats/__init__.py207
-rw-r--r--bzrlib/version_info_formats/format_custom.py112
-rw-r--r--bzrlib/version_info_formats/format_python.py107
-rw-r--r--bzrlib/version_info_formats/format_rio.py96
-rw-r--r--bzrlib/versionedfile.py1963
-rw-r--r--bzrlib/vf_repository.py3202
-rw-r--r--bzrlib/vf_search.py511
-rw-r--r--bzrlib/views.py285
-rwxr-xr-xbzrlib/weave.py1032
-rw-r--r--bzrlib/weavefile.py167
-rw-r--r--bzrlib/win32utils.py646
-rw-r--r--bzrlib/workingtree.py3191
-rw-r--r--bzrlib/workingtree_3.py267
-rw-r--r--bzrlib/workingtree_4.py2354
-rw-r--r--bzrlib/xml5.py115
-rw-r--r--bzrlib/xml6.py35
-rw-r--r--bzrlib/xml7.py34
-rw-r--r--bzrlib/xml8.py371
-rw-r--r--bzrlib/xml_serializer.py439
876 files changed, 426862 insertions, 0 deletions
diff --git a/bzrlib/__init__.py b/bzrlib/__init__.py
new file mode 100644
index 0000000..bff40ce
--- /dev/null
+++ b/bzrlib/__init__.py
@@ -0,0 +1,232 @@
+# Copyright (C) 2005-2011 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""All of bzr.
+
+Developer documentation is available at
+http://doc.bazaar.canonical.com/bzr.dev/developers/
+
+The project website is at http://bazaar.canonical.com/
+
+Some particularly interesting things in bzrlib are:
+
+ * bzrlib.initialize -- setup the library for use
+ * bzrlib.plugin.load_plugins -- load all installed plugins
+ * bzrlib.branch.Branch.open -- open a branch
+ * bzrlib.workingtree.WorkingTree.open -- open a working tree
+
+We hope you enjoy this library.
+"""
+
+from __future__ import absolute_import
+
+import time
+
+# Keep track of when bzrlib was first imported, so that we can give rough
+# timestamps relative to program start in the log file kept by bzrlib.trace.
+_start_time = time.time()
+
+import codecs
+import sys
+
+
+IGNORE_FILENAME = ".bzrignore"
+
+
+__copyright__ = "Copyright 2005-2012 Canonical Ltd."
+
+# same format as sys.version_info: "A tuple containing the five components of
+# the version number: major, minor, micro, releaselevel, and serial. All
+# values except releaselevel are integers; the release level is 'alpha',
+# 'beta', 'candidate', or 'final'. The version_info value corresponding to the
+# Python version 2.0 is (2, 0, 0, 'final', 0)." Additionally we use a
+# releaselevel of 'dev' for unreleased under-development code.
+
+version_info = (2, 6, 0, 'beta', 2)
+
+# API compatibility version
+api_minimum_version = (2, 4, 0)
+
+
+def _format_version_tuple(version_info):
+ """Turn a version number 2, 3 or 5-tuple into a short string.
+
+ This format matches <http://docs.python.org/dist/meta-data.html>
+ and the typical presentation used in Python output.
+
+ This also checks that the version is reasonable: the sub-release must be
+ zero for final releases.
+
+ >>> print _format_version_tuple((1, 0, 0, 'final', 0))
+ 1.0.0
+ >>> print _format_version_tuple((1, 2, 0, 'dev', 0))
+ 1.2.0dev
+ >>> print _format_version_tuple((1, 2, 0, 'dev', 1))
+ 1.2.0dev1
+ >>> print _format_version_tuple((1, 1, 1, 'candidate', 2))
+ 1.1.1rc2
+ >>> print _format_version_tuple((2, 1, 0, 'beta', 1))
+ 2.1b1
+ >>> print _format_version_tuple((1, 4, 0))
+ 1.4.0
+ >>> print _format_version_tuple((1, 4))
+ 1.4
+ >>> print _format_version_tuple((2, 1, 0, 'final', 42))
+ 2.1.0.42
+ >>> print _format_version_tuple((1, 4, 0, 'wibble', 0))
+ 1.4.0.wibble.0
+ """
+ if len(version_info) == 2:
+ main_version = '%d.%d' % version_info[:2]
+ else:
+ main_version = '%d.%d.%d' % version_info[:3]
+ if len(version_info) <= 3:
+ return main_version
+
+ release_type = version_info[3]
+ sub = version_info[4]
+
+ if release_type == 'final' and sub == 0:
+ sub_string = ''
+ elif release_type == 'final':
+ sub_string = '.' + str(sub)
+ elif release_type == 'dev' and sub == 0:
+ sub_string = 'dev'
+ elif release_type == 'dev':
+ sub_string = 'dev' + str(sub)
+ elif release_type in ('alpha', 'beta'):
+ if version_info[2] == 0:
+ main_version = '%d.%d' % version_info[:2]
+ sub_string = release_type[0] + str(sub)
+ elif release_type == 'candidate':
+ sub_string = 'rc' + str(sub)
+ else:
+ return '.'.join(map(str, version_info))
+
+ return main_version + sub_string
+
+
+# lazy_regex import must be done after _format_version_tuple definition
+# to avoid "no attribute '_format_version_tuple'" error when using
+# deprecated_function in the lazy_regex module.
+if getattr(sys, '_bzr_lazy_regex', False):
+ # The 'bzr' executable sets _bzr_lazy_regex. We install the lazy regex
+ # hack as soon as possible so that as much of the standard library can
+ # benefit, including the 'string' module.
+ del sys._bzr_lazy_regex
+ import bzrlib.lazy_regex
+ bzrlib.lazy_regex.install_lazy_compile()
+
+
+__version__ = _format_version_tuple(version_info)
+version_string = __version__
+
+
+def _patch_filesystem_default_encoding(new_enc):
+ """Change the Python process global encoding for filesystem names
+
+ The effect is to change how open() and other builtin functions handle
+ unicode filenames on posix systems. This should only be done near startup.
+
+ The new encoding string passed to this function must survive until process
+ termination, otherwise the interpreter may access uninitialized memory.
+ The use of intern() may defer breakage is but is not enough, the string
+ object should be secure against module reloading and during teardown.
+ """
+ try:
+ import ctypes
+ old_ptr = ctypes.c_void_p.in_dll(ctypes.pythonapi,
+ "Py_FileSystemDefaultEncoding")
+ except (ImportError, ValueError):
+ return # No ctypes or not CPython implementation, do nothing
+ new_ptr = ctypes.cast(ctypes.c_char_p(intern(new_enc)), ctypes.c_void_p)
+ old_ptr.value = new_ptr.value
+ if sys.getfilesystemencoding() != new_enc:
+ raise RuntimeError("Failed to change the filesystem default encoding")
+ return new_enc
+
+
+# When running under the bzr script, override bad filesystem default encoding.
+# This is not safe to do for all users of bzrlib, other scripts should instead
+# just ensure a usable locale is set via the $LANG variable on posix systems.
+_fs_enc = sys.getfilesystemencoding()
+if getattr(sys, "_bzr_default_fs_enc", None) is not None:
+ if (_fs_enc is None or codecs.lookup(_fs_enc).name == "ascii"):
+ _fs_enc = _patch_filesystem_default_encoding(sys._bzr_default_fs_enc)
+if _fs_enc is None:
+ _fs_enc = "ascii"
+else:
+ _fs_enc = codecs.lookup(_fs_enc).name
+
+
+# bzr has various bits of global state that are slowly being eliminated.
+# This variable is intended to permit any new state-like things to be attached
+# to a library_state.BzrLibraryState object rather than getting new global
+# variables that need to be hunted down. Accessing the current BzrLibraryState
+# through this variable is not encouraged: it is better to pass it around as
+# part of the context of an operation than to look it up directly, but when
+# that is too hard, it is better to use this variable than to make a brand new
+# global variable.
+# If using this variable by looking it up (because it can't be easily obtained)
+# it is important to store the reference you get, rather than looking it up
+# repeatedly; that way your code will behave properly in the bzrlib test suite
+# and from programs that do use multiple library contexts.
+global_state = None
+
+
+def initialize(setup_ui=True, stdin=None, stdout=None, stderr=None):
+ """Set up everything needed for normal use of bzrlib.
+
+ Most applications that embed bzrlib, including bzr itself, should call
+ this function to initialize various subsystems.
+
+ More options may be added in future so callers should use named arguments.
+
+ The object returned by this function can be used as a contex manager
+ through the 'with' statement to automatically shut down when the process
+ is finished with bzrlib. However (from bzr 2.4) it's not necessary to
+ separately enter the context as well as starting bzr: bzrlib is ready to
+ go when this function returns.
+
+ :param setup_ui: If true (default) use a terminal UI; otherwise
+ some other ui_factory must be assigned to `bzrlib.ui.ui_factory` by
+ the caller.
+ :param stdin, stdout, stderr: If provided, use these for terminal IO;
+ otherwise use the files in `sys`.
+ :return: A context manager for the use of bzrlib. The __exit__
+ should be called by the caller before exiting their process or
+ otherwise stopping use of bzrlib. Advanced callers can use
+ BzrLibraryState directly.
+ """
+ from bzrlib import library_state, trace
+ if setup_ui:
+ import bzrlib.ui
+ stdin = stdin or sys.stdin
+ stdout = stdout or sys.stdout
+ stderr = stderr or sys.stderr
+ ui_factory = bzrlib.ui.make_ui_for_terminal(stdin, stdout, stderr)
+ else:
+ ui_factory = None
+ tracer = trace.DefaultConfig()
+ state = library_state.BzrLibraryState(ui=ui_factory, trace=tracer)
+ # Start automatically in case people don't realize this returns a context.
+ state._start()
+ return state
+
+
+def test_suite():
+ import tests
+ return tests.test_suite()
diff --git a/bzrlib/_annotator_py.py b/bzrlib/_annotator_py.py
new file mode 100644
index 0000000..22e8689
--- /dev/null
+++ b/bzrlib/_annotator_py.py
@@ -0,0 +1,313 @@
+# Copyright (C) 2009, 2010 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""Functionality for doing annotations in the 'optimal' way"""
+
+from __future__ import absolute_import
+
+from bzrlib.lazy_import import lazy_import
+lazy_import(globals(), """
+from bzrlib import (
+ annotate, # Must be lazy to avoid circular importing
+ graph as _mod_graph,
+ patiencediff,
+ )
+""")
+from bzrlib import (
+ errors,
+ osutils,
+ ui,
+ )
+
+
+class Annotator(object):
+ """Class that drives performing annotations."""
+
+ def __init__(self, vf):
+ """Create a new Annotator from a VersionedFile."""
+ self._vf = vf
+ self._parent_map = {}
+ self._text_cache = {}
+ # Map from key => number of nexts that will be built from this key
+ self._num_needed_children = {}
+ self._annotations_cache = {}
+ self._heads_provider = None
+ self._ann_tuple_cache = {}
+
+ def _update_needed_children(self, key, parent_keys):
+ for parent_key in parent_keys:
+ if parent_key in self._num_needed_children:
+ self._num_needed_children[parent_key] += 1
+ else:
+ self._num_needed_children[parent_key] = 1
+
+ def _get_needed_keys(self, key):
+ """Determine the texts we need to get from the backing vf.
+
+ :return: (vf_keys_needed, ann_keys_needed)
+ vf_keys_needed These are keys that we need to get from the vf
+ ann_keys_needed Texts which we have in self._text_cache but we
+ don't have annotations for. We need to yield these
+ in the proper order so that we can get proper
+ annotations.
+ """
+ parent_map = self._parent_map
+ # We need 1 extra copy of the node we will be looking at when we are
+ # done
+ self._num_needed_children[key] = 1
+ vf_keys_needed = set()
+ ann_keys_needed = set()
+ needed_keys = set([key])
+ while needed_keys:
+ parent_lookup = []
+ next_parent_map = {}
+ for key in needed_keys:
+ if key in self._parent_map:
+ # We don't need to lookup this key in the vf
+ if key not in self._text_cache:
+ # Extract this text from the vf
+ vf_keys_needed.add(key)
+ elif key not in self._annotations_cache:
+ # We do need to annotate
+ ann_keys_needed.add(key)
+ next_parent_map[key] = self._parent_map[key]
+ else:
+ parent_lookup.append(key)
+ vf_keys_needed.add(key)
+ needed_keys = set()
+ next_parent_map.update(self._vf.get_parent_map(parent_lookup))
+ for key, parent_keys in next_parent_map.iteritems():
+ if parent_keys is None: # No graph versionedfile
+ parent_keys = ()
+ next_parent_map[key] = ()
+ self._update_needed_children(key, parent_keys)
+ needed_keys.update([key for key in parent_keys
+ if key not in parent_map])
+ parent_map.update(next_parent_map)
+ # _heads_provider does some graph caching, so it is only valid while
+ # self._parent_map hasn't changed
+ self._heads_provider = None
+ return vf_keys_needed, ann_keys_needed
+
+ def _get_needed_texts(self, key, pb=None):
+ """Get the texts we need to properly annotate key.
+
+ :param key: A Key that is present in self._vf
+ :return: Yield (this_key, text, num_lines)
+ 'text' is an opaque object that just has to work with whatever
+ matcher object we are using. Currently it is always 'lines' but
+ future improvements may change this to a simple text string.
+ """
+ keys, ann_keys = self._get_needed_keys(key)
+ if pb is not None:
+ pb.update('getting stream', 0, len(keys))
+ stream = self._vf.get_record_stream(keys, 'topological', True)
+ for idx, record in enumerate(stream):
+ if pb is not None:
+ pb.update('extracting', 0, len(keys))
+ if record.storage_kind == 'absent':
+ raise errors.RevisionNotPresent(record.key, self._vf)
+ this_key = record.key
+ lines = osutils.chunks_to_lines(record.get_bytes_as('chunked'))
+ num_lines = len(lines)
+ self._text_cache[this_key] = lines
+ yield this_key, lines, num_lines
+ for key in ann_keys:
+ lines = self._text_cache[key]
+ num_lines = len(lines)
+ yield key, lines, num_lines
+
+ def _get_parent_annotations_and_matches(self, key, text, parent_key):
+ """Get the list of annotations for the parent, and the matching lines.
+
+ :param text: The opaque value given by _get_needed_texts
+ :param parent_key: The key for the parent text
+ :return: (parent_annotations, matching_blocks)
+ parent_annotations is a list as long as the number of lines in
+ parent
+ matching_blocks is a list of (parent_idx, text_idx, len) tuples
+ indicating which lines match between the two texts
+ """
+ parent_lines = self._text_cache[parent_key]
+ parent_annotations = self._annotations_cache[parent_key]
+ # PatienceSequenceMatcher should probably be part of Policy
+ matcher = patiencediff.PatienceSequenceMatcher(None,
+ parent_lines, text)
+ matching_blocks = matcher.get_matching_blocks()
+ return parent_annotations, matching_blocks
+
+ def _update_from_first_parent(self, key, annotations, lines, parent_key):
+ """Reannotate this text relative to its first parent."""
+ (parent_annotations,
+ matching_blocks) = self._get_parent_annotations_and_matches(
+ key, lines, parent_key)
+
+ for parent_idx, lines_idx, match_len in matching_blocks:
+ # For all matching regions we copy across the parent annotations
+ annotations[lines_idx:lines_idx + match_len] = \
+ parent_annotations[parent_idx:parent_idx + match_len]
+
+ def _update_from_other_parents(self, key, annotations, lines,
+ this_annotation, parent_key):
+ """Reannotate this text relative to a second (or more) parent."""
+ (parent_annotations,
+ matching_blocks) = self._get_parent_annotations_and_matches(
+ key, lines, parent_key)
+
+ last_ann = None
+ last_parent = None
+ last_res = None
+ # TODO: consider making all annotations unique and then using 'is'
+ # everywhere. Current results claim that isn't any faster,
+ # because of the time spent deduping
+ # deduping also saves a bit of memory. For NEWS it saves ~1MB,
+ # but that is out of 200-300MB for extracting everything, so a
+ # fairly trivial amount
+ for parent_idx, lines_idx, match_len in matching_blocks:
+ # For lines which match this parent, we will now resolve whether
+ # this parent wins over the current annotation
+ ann_sub = annotations[lines_idx:lines_idx + match_len]
+ par_sub = parent_annotations[parent_idx:parent_idx + match_len]
+ if ann_sub == par_sub:
+ continue
+ for idx in xrange(match_len):
+ ann = ann_sub[idx]
+ par_ann = par_sub[idx]
+ ann_idx = lines_idx + idx
+ if ann == par_ann:
+ # Nothing to change
+ continue
+ if ann == this_annotation:
+ # Originally claimed 'this', but it was really in this
+ # parent
+ annotations[ann_idx] = par_ann
+ continue
+ # Resolve the fact that both sides have a different value for
+ # last modified
+ if ann == last_ann and par_ann == last_parent:
+ annotations[ann_idx] = last_res
+ else:
+ new_ann = set(ann)
+ new_ann.update(par_ann)
+ new_ann = tuple(sorted(new_ann))
+ annotations[ann_idx] = new_ann
+ last_ann = ann
+ last_parent = par_ann
+ last_res = new_ann
+
+ def _record_annotation(self, key, parent_keys, annotations):
+ self._annotations_cache[key] = annotations
+ for parent_key in parent_keys:
+ num = self._num_needed_children[parent_key]
+ num -= 1
+ if num == 0:
+ del self._text_cache[parent_key]
+ del self._annotations_cache[parent_key]
+ # Do we want to clean up _num_needed_children at this point as
+ # well?
+ self._num_needed_children[parent_key] = num
+
+ def _annotate_one(self, key, text, num_lines):
+ this_annotation = (key,)
+ # Note: annotations will be mutated by calls to _update_from*
+ annotations = [this_annotation] * num_lines
+ parent_keys = self._parent_map[key]
+ if parent_keys:
+ self._update_from_first_parent(key, annotations, text,
+ parent_keys[0])
+ for parent in parent_keys[1:]:
+ self._update_from_other_parents(key, annotations, text,
+ this_annotation, parent)
+ self._record_annotation(key, parent_keys, annotations)
+
+ def add_special_text(self, key, parent_keys, text):
+ """Add a specific text to the graph.
+
+ This is used to add a text which is not otherwise present in the
+ versioned file. (eg. a WorkingTree injecting 'current:' into the
+ graph to annotate the edited content.)
+
+ :param key: The key to use to request this text be annotated
+ :param parent_keys: The parents of this text
+ :param text: A string containing the content of the text
+ """
+ self._parent_map[key] = parent_keys
+ self._text_cache[key] = osutils.split_lines(text)
+ self._heads_provider = None
+
+ def annotate(self, key):
+ """Return annotated fulltext for the given key.
+
+ :param key: A tuple defining the text to annotate
+ :return: ([annotations], [lines])
+ annotations is a list of tuples of keys, one for each line in lines
+ each key is a possible source for the given line.
+ lines the text of "key" as a list of lines
+ """
+ pb = ui.ui_factory.nested_progress_bar()
+ try:
+ for text_key, text, num_lines in self._get_needed_texts(key, pb=pb):
+ self._annotate_one(text_key, text, num_lines)
+ finally:
+ pb.finished()
+ try:
+ annotations = self._annotations_cache[key]
+ except KeyError:
+ raise errors.RevisionNotPresent(key, self._vf)
+ return annotations, self._text_cache[key]
+
+ def _get_heads_provider(self):
+ if self._heads_provider is None:
+ self._heads_provider = _mod_graph.KnownGraph(self._parent_map)
+ return self._heads_provider
+
+ def _resolve_annotation_tie(self, the_heads, line, tiebreaker):
+ if tiebreaker is None:
+ head = sorted(the_heads)[0]
+ else:
+ # Backwards compatibility, break up the heads into pairs and
+ # resolve the result
+ next_head = iter(the_heads)
+ head = next_head.next()
+ for possible_head in next_head:
+ annotated_lines = ((head, line), (possible_head, line))
+ head = tiebreaker(annotated_lines)[0]
+ return head
+
+ def annotate_flat(self, key):
+ """Determine the single-best-revision to source for each line.
+
+ This is meant as a compatibility thunk to how annotate() used to work.
+ :return: [(ann_key, line)]
+ A list of tuples with a single annotation key for each line.
+ """
+ custom_tiebreaker = annotate._break_annotation_tie
+ annotations, lines = self.annotate(key)
+ out = []
+ heads = self._get_heads_provider().heads
+ append = out.append
+ for annotation, line in zip(annotations, lines):
+ if len(annotation) == 1:
+ head = annotation[0]
+ else:
+ the_heads = heads(annotation)
+ if len(the_heads) == 1:
+ for head in the_heads: break # get the item out of the set
+ else:
+ head = self._resolve_annotation_tie(the_heads, line,
+ custom_tiebreaker)
+ append((head, line))
+ return out
diff --git a/bzrlib/_annotator_pyx.c b/bzrlib/_annotator_pyx.c
new file mode 100644
index 0000000..b671505
--- /dev/null
+++ b/bzrlib/_annotator_pyx.c
@@ -0,0 +1,1499 @@
+/* Generated by Pyrex 0.9.8.5 on Fri Oct 8 14:00:55 2010 */
+
+#define PY_SSIZE_T_CLEAN
+#include "Python.h"
+#include "structmember.h"
+#ifndef PY_LONG_LONG
+ #define PY_LONG_LONG LONG_LONG
+#endif
+#if PY_VERSION_HEX < 0x02050000
+ typedef int Py_ssize_t;
+ #define PY_SSIZE_T_MAX INT_MAX
+ #define PY_SSIZE_T_MIN INT_MIN
+ #define PyInt_FromSsize_t(z) PyInt_FromLong(z)
+ #define PyInt_AsSsize_t(o) PyInt_AsLong(o)
+#endif
+#if !defined(WIN32) && !defined(MS_WINDOWS)
+ #ifndef __stdcall
+ #define __stdcall
+ #endif
+ #ifndef __cdecl
+ #define __cdecl
+ #endif
+#endif
+#ifdef __cplusplus
+#define __PYX_EXTERN_C extern "C"
+#else
+#define __PYX_EXTERN_C extern
+#endif
+#include <math.h>
+#include "python-compat.h"
+
+
+typedef struct {PyObject **p; int i; char *s; long n;} __Pyx_StringTabEntry; /*proto*/
+
+static PyObject *__pyx_m;
+static PyObject *__pyx_b;
+static int __pyx_lineno;
+static char *__pyx_filename;
+static char **__pyx_f;
+
+static char __pyx_mdoc[] = "Functionality for doing annotations in the \'optimal\' way";
+
+static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb); /*proto*/
+
+static PyObject *__Pyx_UnpackItem(PyObject *); /*proto*/
+static int __Pyx_EndUnpack(PyObject *); /*proto*/
+
+static PyObject *__Pyx_Import(PyObject *name, PyObject *from_list); /*proto*/
+
+static PyObject *__Pyx_GetItemInt(PyObject *o, Py_ssize_t i); /*proto*/
+
+static int __Pyx_InitStrings(__Pyx_StringTabEntry *t); /*proto*/
+
+static PyObject *__Pyx_GetName(PyObject *dict, PyObject *name); /*proto*/
+
+static PyObject *__Pyx_CreateClass(PyObject *bases, PyObject *dict, PyObject *name, char *modname); /*proto*/
+
+static void __Pyx_AddTraceback(char *funcname); /*proto*/
+
+/* Declarations from bzrlib._annotator_pyx */
+
+
+/* Declarations from implementation of bzrlib._annotator_pyx */
+
+static int __pyx_f_6bzrlib_14_annotator_pyx__check_annotations_are_lists(PyObject *,PyObject *); /*proto*/
+static int __pyx_f_6bzrlib_14_annotator_pyx__check_match_ranges(PyObject *,PyObject *,Py_ssize_t,Py_ssize_t,Py_ssize_t); /*proto*/
+static PyObject *__pyx_f_6bzrlib_14_annotator_pyx__next_tuple_entry(PyObject *,Py_ssize_t *); /*proto*/
+static PyObject *__pyx_f_6bzrlib_14_annotator_pyx__combine_annotations(PyObject *,PyObject *,PyObject *); /*proto*/
+static int __pyx_f_6bzrlib_14_annotator_pyx__apply_parent_annotations(PyObject *,PyObject *,PyObject *); /*proto*/
+static int __pyx_f_6bzrlib_14_annotator_pyx__merge_annotations(PyObject *,PyObject *,PyObject *,PyObject *,PyObject *); /*proto*/
+
+static char __pyx_k1[] = "annotations must be a list";
+static char __pyx_k2[] = "parent_annotations must be a list";
+static char __pyx_k3[] = "Match length exceeds len of parent_annotations %s > %s";
+static char __pyx_k4[] = "Match length exceeds len of annotations %s > %s";
+static char __pyx_k5[] = "annotations must be tuples";
+static char __pyx_k6[] = "_get_parent_annotations_and_matches";
+static char __pyx_k7[] = "_ann_tuple_cache";
+static char __pyx_k8[] = "bzrlib";
+static char __pyx_k9[] = "annotate";
+static char __pyx_k10[] = "_break_annotation_tie";
+static char __pyx_k11[] = "_get_heads_provider";
+static char __pyx_k12[] = "heads";
+static char __pyx_k13[] = "_resolve_annotation_tie";
+static char __pyx_k14[] = "_annotator_py";
+static char __pyx_k15[] = "Annotator";
+static char __pyx_k16[] = "Class that drives performing annotations.";
+static char __pyx_k17[] = "_update_from_first_parent";
+static char __pyx_k18[] = "_update_from_other_parents";
+static char __pyx_k19[] = "annotate_flat";
+
+static PyObject *__pyx_n_Annotator;
+static PyObject *__pyx_n__ann_tuple_cache;
+static PyObject *__pyx_n__annotator_py;
+static PyObject *__pyx_n__break_annotation_tie;
+static PyObject *__pyx_n__get_heads_provider;
+static PyObject *__pyx_n__resolve_annotation_tie;
+static PyObject *__pyx_n__update_from_first_parent;
+static PyObject *__pyx_n__update_from_other_parents;
+static PyObject *__pyx_n_annotate;
+static PyObject *__pyx_n_annotate_flat;
+static PyObject *__pyx_n_bzrlib;
+static PyObject *__pyx_n_heads;
+
+static PyObject *__pyx_k1p;
+static PyObject *__pyx_k2p;
+static PyObject *__pyx_k3p;
+static PyObject *__pyx_k4p;
+static PyObject *__pyx_k5p;
+static PyObject *__pyx_k6p;
+static PyObject *__pyx_k16p;
+
+static __Pyx_StringTabEntry __pyx_string_tab[] = {
+ {&__pyx_n_Annotator, 1, __pyx_k15, sizeof(__pyx_k15)},
+ {&__pyx_n__ann_tuple_cache, 1, __pyx_k7, sizeof(__pyx_k7)},
+ {&__pyx_n__annotator_py, 1, __pyx_k14, sizeof(__pyx_k14)},
+ {&__pyx_n__break_annotation_tie, 1, __pyx_k10, sizeof(__pyx_k10)},
+ {&__pyx_n__get_heads_provider, 1, __pyx_k11, sizeof(__pyx_k11)},
+ {&__pyx_n__resolve_annotation_tie, 1, __pyx_k13, sizeof(__pyx_k13)},
+ {&__pyx_n__update_from_first_parent, 1, __pyx_k17, sizeof(__pyx_k17)},
+ {&__pyx_n__update_from_other_parents, 1, __pyx_k18, sizeof(__pyx_k18)},
+ {&__pyx_n_annotate, 1, __pyx_k9, sizeof(__pyx_k9)},
+ {&__pyx_n_annotate_flat, 1, __pyx_k19, sizeof(__pyx_k19)},
+ {&__pyx_n_bzrlib, 1, __pyx_k8, sizeof(__pyx_k8)},
+ {&__pyx_n_heads, 1, __pyx_k12, sizeof(__pyx_k12)},
+ {&__pyx_k1p, 0, __pyx_k1, sizeof(__pyx_k1)},
+ {&__pyx_k2p, 0, __pyx_k2, sizeof(__pyx_k2)},
+ {&__pyx_k3p, 0, __pyx_k3, sizeof(__pyx_k3)},
+ {&__pyx_k4p, 0, __pyx_k4, sizeof(__pyx_k4)},
+ {&__pyx_k5p, 0, __pyx_k5, sizeof(__pyx_k5)},
+ {&__pyx_k6p, 0, __pyx_k6, sizeof(__pyx_k6)},
+ {&__pyx_k16p, 0, __pyx_k16, sizeof(__pyx_k16)},
+ {0, 0, 0, 0}
+};
+
+
+
+/* Implementation of bzrlib._annotator_pyx */
+
+static int __pyx_f_6bzrlib_14_annotator_pyx__check_annotations_are_lists(PyObject *__pyx_v_annotations,PyObject *__pyx_v_parent_annotations) {
+ int __pyx_r;
+ int __pyx_1;
+ PyObject *__pyx_2 = 0;
+ PyObject *__pyx_3 = 0;
+ Py_INCREF(__pyx_v_annotations);
+ Py_INCREF(__pyx_v_parent_annotations);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_annotator_pyx.pyx":63 */
+ __pyx_1 = (!PyList_CheckExact(__pyx_v_annotations));
+ if (__pyx_1) {
+ __pyx_2 = PyTuple_New(1); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 64; goto __pyx_L1;}
+ Py_INCREF(__pyx_k1p);
+ PyTuple_SET_ITEM(__pyx_2, 0, __pyx_k1p);
+ __pyx_3 = PyObject_CallObject(PyExc_TypeError, __pyx_2); if (!__pyx_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 64; goto __pyx_L1;}
+ Py_DECREF(__pyx_2); __pyx_2 = 0;
+ __Pyx_Raise(__pyx_3, 0, 0);
+ Py_DECREF(__pyx_3); __pyx_3 = 0;
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 64; goto __pyx_L1;}
+ goto __pyx_L2;
+ }
+ __pyx_L2:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_annotator_pyx.pyx":65 */
+ __pyx_1 = (!PyList_CheckExact(__pyx_v_parent_annotations));
+ if (__pyx_1) {
+ __pyx_2 = PyTuple_New(1); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 66; goto __pyx_L1;}
+ Py_INCREF(__pyx_k2p);
+ PyTuple_SET_ITEM(__pyx_2, 0, __pyx_k2p);
+ __pyx_3 = PyObject_CallObject(PyExc_TypeError, __pyx_2); if (!__pyx_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 66; goto __pyx_L1;}
+ Py_DECREF(__pyx_2); __pyx_2 = 0;
+ __Pyx_Raise(__pyx_3, 0, 0);
+ Py_DECREF(__pyx_3); __pyx_3 = 0;
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 66; goto __pyx_L1;}
+ goto __pyx_L3;
+ }
+ __pyx_L3:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_annotator_pyx.pyx":67 */
+ __pyx_r = 0;
+ goto __pyx_L0;
+
+ __pyx_r = 0;
+ goto __pyx_L0;
+ __pyx_L1:;
+ Py_XDECREF(__pyx_2);
+ Py_XDECREF(__pyx_3);
+ __Pyx_AddTraceback("bzrlib._annotator_pyx._check_annotations_are_lists");
+ __pyx_r = (-1);
+ __pyx_L0:;
+ Py_DECREF(__pyx_v_annotations);
+ Py_DECREF(__pyx_v_parent_annotations);
+ return __pyx_r;
+}
+
+static int __pyx_f_6bzrlib_14_annotator_pyx__check_match_ranges(PyObject *__pyx_v_parent_annotations,PyObject *__pyx_v_annotations,Py_ssize_t __pyx_v_parent_idx,Py_ssize_t __pyx_v_lines_idx,Py_ssize_t __pyx_v_match_len) {
+ int __pyx_r;
+ int __pyx_1;
+ PyObject *__pyx_2 = 0;
+ PyObject *__pyx_3 = 0;
+ PyObject *__pyx_4 = 0;
+ Py_INCREF(__pyx_v_parent_annotations);
+ Py_INCREF(__pyx_v_annotations);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_annotator_pyx.pyx":73 */
+ __pyx_1 = ((__pyx_v_parent_idx + __pyx_v_match_len) > PyList_GET_SIZE(__pyx_v_parent_annotations));
+ if (__pyx_1) {
+ __pyx_2 = PyInt_FromSsize_t((__pyx_v_parent_idx + __pyx_v_match_len)); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 76; goto __pyx_L1;}
+ __pyx_3 = PyInt_FromSsize_t(PyList_GET_SIZE(__pyx_v_parent_annotations)); if (!__pyx_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 77; goto __pyx_L1;}
+ __pyx_4 = PyTuple_New(2); if (!__pyx_4) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 76; goto __pyx_L1;}
+ PyTuple_SET_ITEM(__pyx_4, 0, __pyx_2);
+ PyTuple_SET_ITEM(__pyx_4, 1, __pyx_3);
+ __pyx_2 = 0;
+ __pyx_3 = 0;
+ __pyx_2 = PyNumber_Remainder(__pyx_k3p, __pyx_4); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 76; goto __pyx_L1;}
+ Py_DECREF(__pyx_4); __pyx_4 = 0;
+ __pyx_3 = PyTuple_New(1); if (!__pyx_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 74; goto __pyx_L1;}
+ PyTuple_SET_ITEM(__pyx_3, 0, __pyx_2);
+ __pyx_2 = 0;
+ __pyx_4 = PyObject_CallObject(PyExc_ValueError, __pyx_3); if (!__pyx_4) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 74; goto __pyx_L1;}
+ Py_DECREF(__pyx_3); __pyx_3 = 0;
+ __Pyx_Raise(__pyx_4, 0, 0);
+ Py_DECREF(__pyx_4); __pyx_4 = 0;
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 74; goto __pyx_L1;}
+ goto __pyx_L2;
+ }
+ __pyx_L2:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_annotator_pyx.pyx":78 */
+ __pyx_1 = ((__pyx_v_lines_idx + __pyx_v_match_len) > PyList_GET_SIZE(__pyx_v_annotations));
+ if (__pyx_1) {
+ __pyx_2 = PyInt_FromSsize_t((__pyx_v_lines_idx + __pyx_v_match_len)); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 81; goto __pyx_L1;}
+ __pyx_3 = PyInt_FromSsize_t(PyList_GET_SIZE(__pyx_v_annotations)); if (!__pyx_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 82; goto __pyx_L1;}
+ __pyx_4 = PyTuple_New(2); if (!__pyx_4) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 81; goto __pyx_L1;}
+ PyTuple_SET_ITEM(__pyx_4, 0, __pyx_2);
+ PyTuple_SET_ITEM(__pyx_4, 1, __pyx_3);
+ __pyx_2 = 0;
+ __pyx_3 = 0;
+ __pyx_2 = PyNumber_Remainder(__pyx_k4p, __pyx_4); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 81; goto __pyx_L1;}
+ Py_DECREF(__pyx_4); __pyx_4 = 0;
+ __pyx_3 = PyTuple_New(1); if (!__pyx_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 79; goto __pyx_L1;}
+ PyTuple_SET_ITEM(__pyx_3, 0, __pyx_2);
+ __pyx_2 = 0;
+ __pyx_4 = PyObject_CallObject(PyExc_ValueError, __pyx_3); if (!__pyx_4) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 79; goto __pyx_L1;}
+ Py_DECREF(__pyx_3); __pyx_3 = 0;
+ __Pyx_Raise(__pyx_4, 0, 0);
+ Py_DECREF(__pyx_4); __pyx_4 = 0;
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 79; goto __pyx_L1;}
+ goto __pyx_L3;
+ }
+ __pyx_L3:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_annotator_pyx.pyx":83 */
+ __pyx_r = 0;
+ goto __pyx_L0;
+
+ __pyx_r = 0;
+ goto __pyx_L0;
+ __pyx_L1:;
+ Py_XDECREF(__pyx_2);
+ Py_XDECREF(__pyx_3);
+ Py_XDECREF(__pyx_4);
+ __Pyx_AddTraceback("bzrlib._annotator_pyx._check_match_ranges");
+ __pyx_r = (-1);
+ __pyx_L0:;
+ Py_DECREF(__pyx_v_parent_annotations);
+ Py_DECREF(__pyx_v_annotations);
+ return __pyx_r;
+}
+
+static PyObject *__pyx_f_6bzrlib_14_annotator_pyx__next_tuple_entry(PyObject *__pyx_v_tpl,Py_ssize_t *__pyx_v_pos) {
+ PyObject *__pyx_r;
+ int __pyx_1;
+ Py_INCREF(__pyx_v_tpl);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_annotator_pyx.pyx":94 */
+ (__pyx_v_pos[0]) = ((__pyx_v_pos[0]) + 1);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_annotator_pyx.pyx":95 */
+ __pyx_1 = ((__pyx_v_pos[0]) >= PyTuple_GET_SIZE(__pyx_v_tpl));
+ if (__pyx_1) {
+ __pyx_r = NULL;
+ goto __pyx_L0;
+ goto __pyx_L2;
+ }
+ __pyx_L2:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_annotator_pyx.pyx":97 */
+ __pyx_r = PyTuple_GET_ITEM(__pyx_v_tpl,(__pyx_v_pos[0]));
+ goto __pyx_L0;
+
+ __pyx_r = 0;
+ __pyx_L0:;
+ Py_DECREF(__pyx_v_tpl);
+ return __pyx_r;
+}
+
+static PyObject *__pyx_f_6bzrlib_14_annotator_pyx__combine_annotations(PyObject *__pyx_v_ann_one,PyObject *__pyx_v_ann_two,PyObject *__pyx_v_cache) {
+ Py_ssize_t __pyx_v_pos_one;
+ Py_ssize_t __pyx_v_pos_two;
+ Py_ssize_t __pyx_v_out_pos;
+ PyObject *__pyx_v_temp;
+ PyObject *__pyx_v_left;
+ PyObject *__pyx_v_right;
+ PyObject *__pyx_v_cache_key;
+ PyObject *__pyx_v_new_ann;
+ PyObject *__pyx_r;
+ int __pyx_1;
+ PyObject *__pyx_2 = 0;
+ PyObject *__pyx_3 = 0;
+ Py_INCREF(__pyx_v_ann_one);
+ Py_INCREF(__pyx_v_ann_two);
+ Py_INCREF(__pyx_v_cache);
+ __pyx_v_cache_key = Py_None; Py_INCREF(Py_None);
+ __pyx_v_new_ann = Py_None; Py_INCREF(Py_None);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_annotator_pyx.pyx":106 */
+ __pyx_1 = PyObject_RichCompareBool(__pyx_v_ann_one,__pyx_v_ann_two,Py_LT); if (__pyx_1 == (-1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 106; goto __pyx_L1;}
+ if (__pyx_1) {
+ __pyx_2 = PyTuple_New(2); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 107; goto __pyx_L1;}
+ Py_INCREF(__pyx_v_ann_one);
+ PyTuple_SET_ITEM(__pyx_2, 0, __pyx_v_ann_one);
+ Py_INCREF(__pyx_v_ann_two);
+ PyTuple_SET_ITEM(__pyx_2, 1, __pyx_v_ann_two);
+ Py_DECREF(__pyx_v_cache_key);
+ __pyx_v_cache_key = __pyx_2;
+ __pyx_2 = 0;
+ goto __pyx_L2;
+ }
+ /*else*/ {
+ __pyx_2 = PyTuple_New(2); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 109; goto __pyx_L1;}
+ Py_INCREF(__pyx_v_ann_two);
+ PyTuple_SET_ITEM(__pyx_2, 0, __pyx_v_ann_two);
+ Py_INCREF(__pyx_v_ann_one);
+ PyTuple_SET_ITEM(__pyx_2, 1, __pyx_v_ann_one);
+ Py_DECREF(__pyx_v_cache_key);
+ __pyx_v_cache_key = __pyx_2;
+ __pyx_2 = 0;
+ }
+ __pyx_L2:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_annotator_pyx.pyx":110 */
+ __pyx_v_temp = PyDict_GetItem(__pyx_v_cache,__pyx_v_cache_key);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_annotator_pyx.pyx":111 */
+ __pyx_1 = (__pyx_v_temp != NULL);
+ if (__pyx_1) {
+ Py_INCREF(((PyObject *)__pyx_v_temp));
+ __pyx_r = ((PyObject *)__pyx_v_temp);
+ goto __pyx_L0;
+ goto __pyx_L3;
+ }
+ __pyx_L3:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_annotator_pyx.pyx":114 */
+ __pyx_1 = (!PyTuple_CheckExact(__pyx_v_ann_one));
+ if (!__pyx_1) {
+ __pyx_1 = (!PyTuple_CheckExact(__pyx_v_ann_two));
+ }
+ if (__pyx_1) {
+ __pyx_2 = PyTuple_New(1); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 115; goto __pyx_L1;}
+ Py_INCREF(__pyx_k5p);
+ PyTuple_SET_ITEM(__pyx_2, 0, __pyx_k5p);
+ __pyx_3 = PyObject_CallObject(PyExc_TypeError, __pyx_2); if (!__pyx_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 115; goto __pyx_L1;}
+ Py_DECREF(__pyx_2); __pyx_2 = 0;
+ __Pyx_Raise(__pyx_3, 0, 0);
+ Py_DECREF(__pyx_3); __pyx_3 = 0;
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 115; goto __pyx_L1;}
+ goto __pyx_L4;
+ }
+ __pyx_L4:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_annotator_pyx.pyx":118 */
+ __pyx_v_pos_one = (-1);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_annotator_pyx.pyx":119 */
+ __pyx_v_pos_two = (-1);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_annotator_pyx.pyx":120 */
+ __pyx_v_out_pos = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_annotator_pyx.pyx":121 */
+ __pyx_v_left = __pyx_f_6bzrlib_14_annotator_pyx__next_tuple_entry(__pyx_v_ann_one,(&__pyx_v_pos_one));
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_annotator_pyx.pyx":122 */
+ __pyx_v_right = __pyx_f_6bzrlib_14_annotator_pyx__next_tuple_entry(__pyx_v_ann_two,(&__pyx_v_pos_two));
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_annotator_pyx.pyx":123 */
+ __pyx_2 = PyTuple_New((PyTuple_GET_SIZE(__pyx_v_ann_one) + PyTuple_GET_SIZE(__pyx_v_ann_two))); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 123; goto __pyx_L1;}
+ Py_DECREF(__pyx_v_new_ann);
+ __pyx_v_new_ann = __pyx_2;
+ __pyx_2 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_annotator_pyx.pyx":125 */
+ while (1) {
+ __pyx_1 = (__pyx_v_left != NULL);
+ if (__pyx_1) {
+ __pyx_1 = (__pyx_v_right != NULL);
+ }
+ if (!__pyx_1) break;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_annotator_pyx.pyx":129 */
+ __pyx_1 = (__pyx_v_left == __pyx_v_right);
+ if (!__pyx_1) {
+ __pyx_1 = PyObject_RichCompareBool(__pyx_v_left,__pyx_v_right,Py_EQ);
+ }
+ if (__pyx_1) {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_annotator_pyx.pyx":132 */
+ Py_INCREF(__pyx_v_left);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_annotator_pyx.pyx":133 */
+ PyTuple_SET_ITEM(__pyx_v_new_ann,__pyx_v_out_pos,__pyx_v_left);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_annotator_pyx.pyx":134 */
+ __pyx_v_left = __pyx_f_6bzrlib_14_annotator_pyx__next_tuple_entry(__pyx_v_ann_one,(&__pyx_v_pos_one));
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_annotator_pyx.pyx":135 */
+ __pyx_v_right = __pyx_f_6bzrlib_14_annotator_pyx__next_tuple_entry(__pyx_v_ann_two,(&__pyx_v_pos_two));
+ goto __pyx_L7;
+ }
+ __pyx_1 = PyObject_RichCompareBool(__pyx_v_left,__pyx_v_right,Py_LT);
+ if (__pyx_1) {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_annotator_pyx.pyx":138 */
+ Py_INCREF(__pyx_v_left);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_annotator_pyx.pyx":139 */
+ PyTuple_SET_ITEM(__pyx_v_new_ann,__pyx_v_out_pos,__pyx_v_left);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_annotator_pyx.pyx":140 */
+ __pyx_v_left = __pyx_f_6bzrlib_14_annotator_pyx__next_tuple_entry(__pyx_v_ann_one,(&__pyx_v_pos_one));
+ goto __pyx_L7;
+ }
+ /*else*/ {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_annotator_pyx.pyx":142 */
+ Py_INCREF(__pyx_v_right);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_annotator_pyx.pyx":143 */
+ PyTuple_SET_ITEM(__pyx_v_new_ann,__pyx_v_out_pos,__pyx_v_right);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_annotator_pyx.pyx":144 */
+ __pyx_v_right = __pyx_f_6bzrlib_14_annotator_pyx__next_tuple_entry(__pyx_v_ann_two,(&__pyx_v_pos_two));
+ }
+ __pyx_L7:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_annotator_pyx.pyx":145 */
+ __pyx_v_out_pos = (__pyx_v_out_pos + 1);
+ }
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_annotator_pyx.pyx":146 */
+ while (1) {
+ __pyx_1 = (__pyx_v_left != NULL);
+ if (!__pyx_1) break;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_annotator_pyx.pyx":147 */
+ Py_INCREF(__pyx_v_left);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_annotator_pyx.pyx":148 */
+ PyTuple_SET_ITEM(__pyx_v_new_ann,__pyx_v_out_pos,__pyx_v_left);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_annotator_pyx.pyx":149 */
+ __pyx_v_left = __pyx_f_6bzrlib_14_annotator_pyx__next_tuple_entry(__pyx_v_ann_one,(&__pyx_v_pos_one));
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_annotator_pyx.pyx":150 */
+ __pyx_v_out_pos = (__pyx_v_out_pos + 1);
+ }
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_annotator_pyx.pyx":151 */
+ while (1) {
+ __pyx_1 = (__pyx_v_right != NULL);
+ if (!__pyx_1) break;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_annotator_pyx.pyx":152 */
+ Py_INCREF(__pyx_v_right);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_annotator_pyx.pyx":153 */
+ PyTuple_SET_ITEM(__pyx_v_new_ann,__pyx_v_out_pos,__pyx_v_right);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_annotator_pyx.pyx":154 */
+ __pyx_v_right = __pyx_f_6bzrlib_14_annotator_pyx__next_tuple_entry(__pyx_v_ann_two,(&__pyx_v_pos_two));
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_annotator_pyx.pyx":155 */
+ __pyx_v_out_pos = (__pyx_v_out_pos + 1);
+ }
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_annotator_pyx.pyx":156 */
+ __pyx_1 = (__pyx_v_out_pos != PyTuple_GET_SIZE(__pyx_v_new_ann));
+ if (__pyx_1) {
+ __pyx_3 = PySequence_GetSlice(__pyx_v_new_ann, 0, __pyx_v_out_pos); if (!__pyx_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 159; goto __pyx_L1;}
+ Py_DECREF(__pyx_v_new_ann);
+ __pyx_v_new_ann = __pyx_3;
+ __pyx_3 = 0;
+ goto __pyx_L12;
+ }
+ __pyx_L12:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_annotator_pyx.pyx":160 */
+ __pyx_1 = PyDict_SetItem(__pyx_v_cache,__pyx_v_cache_key,__pyx_v_new_ann); if (__pyx_1 == (-1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 160; goto __pyx_L1;}
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_annotator_pyx.pyx":161 */
+ Py_INCREF(__pyx_v_new_ann);
+ __pyx_r = __pyx_v_new_ann;
+ goto __pyx_L0;
+
+ __pyx_r = Py_None; Py_INCREF(Py_None);
+ goto __pyx_L0;
+ __pyx_L1:;
+ Py_XDECREF(__pyx_2);
+ Py_XDECREF(__pyx_3);
+ __Pyx_AddTraceback("bzrlib._annotator_pyx._combine_annotations");
+ __pyx_r = 0;
+ __pyx_L0:;
+ Py_DECREF(__pyx_v_cache_key);
+ Py_DECREF(__pyx_v_new_ann);
+ Py_DECREF(__pyx_v_ann_one);
+ Py_DECREF(__pyx_v_ann_two);
+ Py_DECREF(__pyx_v_cache);
+ return __pyx_r;
+}
+
+static int __pyx_f_6bzrlib_14_annotator_pyx__apply_parent_annotations(PyObject *__pyx_v_annotations,PyObject *__pyx_v_parent_annotations,PyObject *__pyx_v_matching_blocks) {
+ Py_ssize_t __pyx_v_parent_idx;
+ Py_ssize_t __pyx_v_lines_idx;
+ Py_ssize_t __pyx_v_match_len;
+ Py_ssize_t __pyx_v_idx;
+ PyListObject *__pyx_v_par_list;
+ PyListObject *__pyx_v_ann_list;
+ PyObject **__pyx_v_par_temp;
+ PyObject **__pyx_v_ann_temp;
+ int __pyx_r;
+ int __pyx_1;
+ PyObject *__pyx_2 = 0;
+ PyObject *__pyx_3 = 0;
+ PyObject *__pyx_4 = 0;
+ Py_ssize_t __pyx_5;
+ Py_INCREF(__pyx_v_annotations);
+ Py_INCREF(__pyx_v_parent_annotations);
+ Py_INCREF(__pyx_v_matching_blocks);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_annotator_pyx.pyx":174 */
+ __pyx_1 = __pyx_f_6bzrlib_14_annotator_pyx__check_annotations_are_lists(__pyx_v_annotations,__pyx_v_parent_annotations); if (__pyx_1 == (-1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 174; goto __pyx_L1;}
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_annotator_pyx.pyx":175 */
+ __pyx_v_par_list = ((PyListObject *)__pyx_v_parent_annotations);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_annotator_pyx.pyx":176 */
+ __pyx_v_ann_list = ((PyListObject *)__pyx_v_annotations);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_annotator_pyx.pyx":181 */
+ __pyx_2 = PyObject_GetIter(__pyx_v_matching_blocks); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 181; goto __pyx_L1;}
+ for (;;) {
+ __pyx_3 = PyIter_Next(__pyx_2);
+ if (!__pyx_3) {
+ if (PyErr_Occurred()) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 181; goto __pyx_L1;}
+ break;
+ }
+ __pyx_4 = PyObject_GetIter(__pyx_3); if (!__pyx_4) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 181; goto __pyx_L1;}
+ Py_DECREF(__pyx_3); __pyx_3 = 0;
+ __pyx_3 = __Pyx_UnpackItem(__pyx_4); if (!__pyx_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 181; goto __pyx_L1;}
+ __pyx_5 = PyInt_AsSsize_t(__pyx_3); if (PyErr_Occurred()) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 181; goto __pyx_L1;}
+ Py_DECREF(__pyx_3); __pyx_3 = 0;
+ __pyx_v_parent_idx = __pyx_5;
+ __pyx_3 = __Pyx_UnpackItem(__pyx_4); if (!__pyx_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 181; goto __pyx_L1;}
+ __pyx_5 = PyInt_AsSsize_t(__pyx_3); if (PyErr_Occurred()) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 181; goto __pyx_L1;}
+ Py_DECREF(__pyx_3); __pyx_3 = 0;
+ __pyx_v_lines_idx = __pyx_5;
+ __pyx_3 = __Pyx_UnpackItem(__pyx_4); if (!__pyx_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 181; goto __pyx_L1;}
+ __pyx_5 = PyInt_AsSsize_t(__pyx_3); if (PyErr_Occurred()) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 181; goto __pyx_L1;}
+ Py_DECREF(__pyx_3); __pyx_3 = 0;
+ __pyx_v_match_len = __pyx_5;
+ if (__Pyx_EndUnpack(__pyx_4) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 181; goto __pyx_L1;}
+ Py_DECREF(__pyx_4); __pyx_4 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_annotator_pyx.pyx":182 */
+ __pyx_1 = __pyx_f_6bzrlib_14_annotator_pyx__check_match_ranges(__pyx_v_parent_annotations,__pyx_v_annotations,__pyx_v_parent_idx,__pyx_v_lines_idx,__pyx_v_match_len); if (__pyx_1 == (-1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 182; goto __pyx_L1;}
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_annotator_pyx.pyx":184 */
+ __pyx_v_par_temp = (__pyx_v_par_list->ob_item + __pyx_v_parent_idx);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_annotator_pyx.pyx":185 */
+ __pyx_v_ann_temp = (__pyx_v_ann_list->ob_item + __pyx_v_lines_idx);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_annotator_pyx.pyx":186 */
+ for (__pyx_v_idx = 0; __pyx_v_idx < __pyx_v_match_len; ++__pyx_v_idx) {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_annotator_pyx.pyx":187 */
+ Py_INCREF((__pyx_v_par_temp[__pyx_v_idx]));
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_annotator_pyx.pyx":188 */
+ Py_DECREF((__pyx_v_ann_temp[__pyx_v_idx]));
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_annotator_pyx.pyx":189 */
+ (__pyx_v_ann_temp[__pyx_v_idx]) = (__pyx_v_par_temp[__pyx_v_idx]);
+ }
+ }
+ Py_DECREF(__pyx_2); __pyx_2 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_annotator_pyx.pyx":190 */
+ __pyx_r = 0;
+ goto __pyx_L0;
+
+ __pyx_r = 0;
+ goto __pyx_L0;
+ __pyx_L1:;
+ Py_XDECREF(__pyx_2);
+ Py_XDECREF(__pyx_3);
+ Py_XDECREF(__pyx_4);
+ __Pyx_AddTraceback("bzrlib._annotator_pyx._apply_parent_annotations");
+ __pyx_r = (-1);
+ __pyx_L0:;
+ Py_DECREF(__pyx_v_annotations);
+ Py_DECREF(__pyx_v_parent_annotations);
+ Py_DECREF(__pyx_v_matching_blocks);
+ return __pyx_r;
+}
+
+static int __pyx_f_6bzrlib_14_annotator_pyx__merge_annotations(PyObject *__pyx_v_this_annotation,PyObject *__pyx_v_annotations,PyObject *__pyx_v_parent_annotations,PyObject *__pyx_v_matching_blocks,PyObject *__pyx_v_ann_cache) {
+ Py_ssize_t __pyx_v_parent_idx;
+ Py_ssize_t __pyx_v_ann_idx;
+ Py_ssize_t __pyx_v_lines_idx;
+ Py_ssize_t __pyx_v_match_len;
+ Py_ssize_t __pyx_v_idx;
+ PyObject *__pyx_v_ann_temp;
+ PyObject *__pyx_v_par_temp;
+ PyObject *__pyx_v_last_ann;
+ PyObject *__pyx_v_last_parent;
+ PyObject *__pyx_v_last_res;
+ PyObject *__pyx_v_par_ann;
+ PyObject *__pyx_v_ann;
+ PyObject *__pyx_v_new_ann;
+ int __pyx_r;
+ int __pyx_1;
+ PyObject *__pyx_2 = 0;
+ PyObject *__pyx_3 = 0;
+ PyObject *__pyx_4 = 0;
+ Py_ssize_t __pyx_5;
+ Py_INCREF(__pyx_v_this_annotation);
+ Py_INCREF(__pyx_v_annotations);
+ Py_INCREF(__pyx_v_parent_annotations);
+ Py_INCREF(__pyx_v_matching_blocks);
+ Py_INCREF(__pyx_v_ann_cache);
+ __pyx_v_last_ann = Py_None; Py_INCREF(Py_None);
+ __pyx_v_last_parent = Py_None; Py_INCREF(Py_None);
+ __pyx_v_last_res = Py_None; Py_INCREF(Py_None);
+ __pyx_v_par_ann = Py_None; Py_INCREF(Py_None);
+ __pyx_v_ann = Py_None; Py_INCREF(Py_None);
+ __pyx_v_new_ann = Py_None; Py_INCREF(Py_None);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_annotator_pyx.pyx":199 */
+ __pyx_1 = __pyx_f_6bzrlib_14_annotator_pyx__check_annotations_are_lists(__pyx_v_annotations,__pyx_v_parent_annotations); if (__pyx_1 == (-1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 199; goto __pyx_L1;}
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_annotator_pyx.pyx":200 */
+ Py_INCREF(Py_None);
+ Py_DECREF(__pyx_v_last_ann);
+ __pyx_v_last_ann = Py_None;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_annotator_pyx.pyx":201 */
+ Py_INCREF(Py_None);
+ Py_DECREF(__pyx_v_last_parent);
+ __pyx_v_last_parent = Py_None;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_annotator_pyx.pyx":202 */
+ Py_INCREF(Py_None);
+ Py_DECREF(__pyx_v_last_res);
+ __pyx_v_last_res = Py_None;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_annotator_pyx.pyx":203 */
+ __pyx_2 = PyObject_GetIter(__pyx_v_matching_blocks); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 203; goto __pyx_L1;}
+ for (;;) {
+ __pyx_3 = PyIter_Next(__pyx_2);
+ if (!__pyx_3) {
+ if (PyErr_Occurred()) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 203; goto __pyx_L1;}
+ break;
+ }
+ __pyx_4 = PyObject_GetIter(__pyx_3); if (!__pyx_4) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 203; goto __pyx_L1;}
+ Py_DECREF(__pyx_3); __pyx_3 = 0;
+ __pyx_3 = __Pyx_UnpackItem(__pyx_4); if (!__pyx_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 203; goto __pyx_L1;}
+ __pyx_5 = PyInt_AsSsize_t(__pyx_3); if (PyErr_Occurred()) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 203; goto __pyx_L1;}
+ Py_DECREF(__pyx_3); __pyx_3 = 0;
+ __pyx_v_parent_idx = __pyx_5;
+ __pyx_3 = __Pyx_UnpackItem(__pyx_4); if (!__pyx_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 203; goto __pyx_L1;}
+ __pyx_5 = PyInt_AsSsize_t(__pyx_3); if (PyErr_Occurred()) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 203; goto __pyx_L1;}
+ Py_DECREF(__pyx_3); __pyx_3 = 0;
+ __pyx_v_lines_idx = __pyx_5;
+ __pyx_3 = __Pyx_UnpackItem(__pyx_4); if (!__pyx_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 203; goto __pyx_L1;}
+ __pyx_5 = PyInt_AsSsize_t(__pyx_3); if (PyErr_Occurred()) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 203; goto __pyx_L1;}
+ Py_DECREF(__pyx_3); __pyx_3 = 0;
+ __pyx_v_match_len = __pyx_5;
+ if (__Pyx_EndUnpack(__pyx_4) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 203; goto __pyx_L1;}
+ Py_DECREF(__pyx_4); __pyx_4 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_annotator_pyx.pyx":204 */
+ __pyx_1 = __pyx_f_6bzrlib_14_annotator_pyx__check_match_ranges(__pyx_v_parent_annotations,__pyx_v_annotations,__pyx_v_parent_idx,__pyx_v_lines_idx,__pyx_v_match_len); if (__pyx_1 == (-1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 204; goto __pyx_L1;}
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_annotator_pyx.pyx":208 */
+ for (__pyx_v_idx = 0; __pyx_v_idx < __pyx_v_match_len; ++__pyx_v_idx) {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_annotator_pyx.pyx":209 */
+ __pyx_v_ann_idx = (__pyx_v_lines_idx + __pyx_v_idx);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_annotator_pyx.pyx":210 */
+ __pyx_v_ann_temp = PyList_GET_ITEM(__pyx_v_annotations,__pyx_v_ann_idx);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_annotator_pyx.pyx":211 */
+ __pyx_v_par_temp = PyList_GET_ITEM(__pyx_v_parent_annotations,(__pyx_v_parent_idx + __pyx_v_idx));
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_annotator_pyx.pyx":212 */
+ __pyx_1 = (__pyx_v_ann_temp == __pyx_v_par_temp);
+ if (__pyx_1) {
+ goto __pyx_L4;
+ goto __pyx_L6;
+ }
+ __pyx_L6:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_annotator_pyx.pyx":221 */
+ Py_INCREF(((PyObject *)__pyx_v_par_temp));
+ Py_DECREF(__pyx_v_par_ann);
+ __pyx_v_par_ann = ((PyObject *)__pyx_v_par_temp);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_annotator_pyx.pyx":222 */
+ Py_INCREF(((PyObject *)__pyx_v_ann_temp));
+ Py_DECREF(__pyx_v_ann);
+ __pyx_v_ann = ((PyObject *)__pyx_v_ann_temp);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_annotator_pyx.pyx":223 */
+ __pyx_1 = __pyx_v_ann == __pyx_v_this_annotation;
+ if (__pyx_1) {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_annotator_pyx.pyx":226 */
+ Py_INCREF(__pyx_v_par_ann);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_annotator_pyx.pyx":227 */
+ __pyx_1 = PyList_SetItem(__pyx_v_annotations,__pyx_v_ann_idx,__pyx_v_par_ann); if (__pyx_1 == (-1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 227; goto __pyx_L1;}
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_annotator_pyx.pyx":228 */
+ goto __pyx_L4;
+ goto __pyx_L7;
+ }
+ __pyx_L7:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_annotator_pyx.pyx":231 */
+ __pyx_1 = __pyx_v_ann == __pyx_v_last_ann;
+ if (__pyx_1) {
+ __pyx_1 = __pyx_v_par_ann == __pyx_v_last_parent;
+ }
+ if (__pyx_1) {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_annotator_pyx.pyx":232 */
+ Py_INCREF(__pyx_v_last_res);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_annotator_pyx.pyx":233 */
+ __pyx_1 = PyList_SetItem(__pyx_v_annotations,__pyx_v_ann_idx,__pyx_v_last_res); if (__pyx_1 == (-1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 233; goto __pyx_L1;}
+ goto __pyx_L8;
+ }
+ /*else*/ {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_annotator_pyx.pyx":235 */
+ __pyx_3 = __pyx_f_6bzrlib_14_annotator_pyx__combine_annotations(__pyx_v_ann,__pyx_v_par_ann,__pyx_v_ann_cache); if (!__pyx_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 235; goto __pyx_L1;}
+ Py_DECREF(__pyx_v_new_ann);
+ __pyx_v_new_ann = __pyx_3;
+ __pyx_3 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_annotator_pyx.pyx":236 */
+ Py_INCREF(__pyx_v_new_ann);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_annotator_pyx.pyx":237 */
+ __pyx_1 = PyList_SetItem(__pyx_v_annotations,__pyx_v_ann_idx,__pyx_v_new_ann); if (__pyx_1 == (-1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 237; goto __pyx_L1;}
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_annotator_pyx.pyx":238 */
+ Py_INCREF(__pyx_v_ann);
+ Py_DECREF(__pyx_v_last_ann);
+ __pyx_v_last_ann = __pyx_v_ann;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_annotator_pyx.pyx":239 */
+ Py_INCREF(__pyx_v_par_ann);
+ Py_DECREF(__pyx_v_last_parent);
+ __pyx_v_last_parent = __pyx_v_par_ann;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_annotator_pyx.pyx":240 */
+ Py_INCREF(__pyx_v_new_ann);
+ Py_DECREF(__pyx_v_last_res);
+ __pyx_v_last_res = __pyx_v_new_ann;
+ }
+ __pyx_L8:;
+ __pyx_L4:;
+ }
+ }
+ Py_DECREF(__pyx_2); __pyx_2 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_annotator_pyx.pyx":241 */
+ __pyx_r = 0;
+ goto __pyx_L0;
+
+ __pyx_r = 0;
+ goto __pyx_L0;
+ __pyx_L1:;
+ Py_XDECREF(__pyx_2);
+ Py_XDECREF(__pyx_3);
+ Py_XDECREF(__pyx_4);
+ __Pyx_AddTraceback("bzrlib._annotator_pyx._merge_annotations");
+ __pyx_r = (-1);
+ __pyx_L0:;
+ Py_DECREF(__pyx_v_last_ann);
+ Py_DECREF(__pyx_v_last_parent);
+ Py_DECREF(__pyx_v_last_res);
+ Py_DECREF(__pyx_v_par_ann);
+ Py_DECREF(__pyx_v_ann);
+ Py_DECREF(__pyx_v_new_ann);
+ Py_DECREF(__pyx_v_this_annotation);
+ Py_DECREF(__pyx_v_annotations);
+ Py_DECREF(__pyx_v_parent_annotations);
+ Py_DECREF(__pyx_v_matching_blocks);
+ Py_DECREF(__pyx_v_ann_cache);
+ return __pyx_r;
+}
+
+static PyObject *__pyx_f_6bzrlib_14_annotator_pyx_9Annotator__update_from_first_parent(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
+static char __pyx_doc_6bzrlib_14_annotator_pyx_9Annotator__update_from_first_parent[] = "Reannotate this text relative to its first parent.";
+static PyMethodDef __pyx_mdef_6bzrlib_14_annotator_pyx_9Annotator__update_from_first_parent = {"_update_from_first_parent", (PyCFunction)__pyx_f_6bzrlib_14_annotator_pyx_9Annotator__update_from_first_parent, METH_VARARGS|METH_KEYWORDS, __pyx_doc_6bzrlib_14_annotator_pyx_9Annotator__update_from_first_parent};
+static PyObject *__pyx_f_6bzrlib_14_annotator_pyx_9Annotator__update_from_first_parent(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
+ PyObject *__pyx_v_self = 0;
+ PyObject *__pyx_v_key = 0;
+ PyObject *__pyx_v_annotations = 0;
+ PyObject *__pyx_v_lines = 0;
+ PyObject *__pyx_v_parent_key = 0;
+ PyObject *__pyx_v_parent_annotations;
+ PyObject *__pyx_v_matching_blocks;
+ PyObject *__pyx_r;
+ PyObject *__pyx_1 = 0;
+ PyObject *__pyx_2 = 0;
+ PyObject *__pyx_3 = 0;
+ int __pyx_4;
+ static char *__pyx_argnames[] = {"self","key","annotations","lines","parent_key",0};
+ if (!PyArg_ParseTupleAndKeywords(__pyx_args, __pyx_kwds, "OOOOO", __pyx_argnames, &__pyx_v_self, &__pyx_v_key, &__pyx_v_annotations, &__pyx_v_lines, &__pyx_v_parent_key)) return 0;
+ Py_INCREF(__pyx_v_self);
+ Py_INCREF(__pyx_v_key);
+ Py_INCREF(__pyx_v_annotations);
+ Py_INCREF(__pyx_v_lines);
+ Py_INCREF(__pyx_v_parent_key);
+ __pyx_v_parent_annotations = Py_None; Py_INCREF(Py_None);
+ __pyx_v_matching_blocks = Py_None; Py_INCREF(Py_None);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_annotator_pyx.pyx":250 */
+ __pyx_1 = PyObject_GetAttr(__pyx_v_self, __pyx_k6p); if (!__pyx_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 250; goto __pyx_L1;}
+ __pyx_2 = PyTuple_New(3); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 250; goto __pyx_L1;}
+ Py_INCREF(__pyx_v_key);
+ PyTuple_SET_ITEM(__pyx_2, 0, __pyx_v_key);
+ Py_INCREF(__pyx_v_lines);
+ PyTuple_SET_ITEM(__pyx_2, 1, __pyx_v_lines);
+ Py_INCREF(__pyx_v_parent_key);
+ PyTuple_SET_ITEM(__pyx_2, 2, __pyx_v_parent_key);
+ __pyx_3 = PyObject_CallObject(__pyx_1, __pyx_2); if (!__pyx_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 250; goto __pyx_L1;}
+ Py_DECREF(__pyx_1); __pyx_1 = 0;
+ Py_DECREF(__pyx_2); __pyx_2 = 0;
+ __pyx_1 = PyObject_GetIter(__pyx_3); if (!__pyx_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 249; goto __pyx_L1;}
+ Py_DECREF(__pyx_3); __pyx_3 = 0;
+ __pyx_2 = __Pyx_UnpackItem(__pyx_1); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 249; goto __pyx_L1;}
+ Py_DECREF(__pyx_v_parent_annotations);
+ __pyx_v_parent_annotations = __pyx_2;
+ __pyx_2 = 0;
+ __pyx_3 = __Pyx_UnpackItem(__pyx_1); if (!__pyx_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 249; goto __pyx_L1;}
+ Py_DECREF(__pyx_v_matching_blocks);
+ __pyx_v_matching_blocks = __pyx_3;
+ __pyx_3 = 0;
+ if (__Pyx_EndUnpack(__pyx_1) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 249; goto __pyx_L1;}
+ Py_DECREF(__pyx_1); __pyx_1 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_annotator_pyx.pyx":253 */
+ __pyx_4 = __pyx_f_6bzrlib_14_annotator_pyx__apply_parent_annotations(__pyx_v_annotations,__pyx_v_parent_annotations,__pyx_v_matching_blocks); if (__pyx_4 == (-1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 253; goto __pyx_L1;}
+
+ __pyx_r = Py_None; Py_INCREF(Py_None);
+ goto __pyx_L0;
+ __pyx_L1:;
+ Py_XDECREF(__pyx_1);
+ Py_XDECREF(__pyx_2);
+ Py_XDECREF(__pyx_3);
+ __Pyx_AddTraceback("bzrlib._annotator_pyx.Annotator._update_from_first_parent");
+ __pyx_r = 0;
+ __pyx_L0:;
+ Py_DECREF(__pyx_v_parent_annotations);
+ Py_DECREF(__pyx_v_matching_blocks);
+ Py_DECREF(__pyx_v_self);
+ Py_DECREF(__pyx_v_key);
+ Py_DECREF(__pyx_v_annotations);
+ Py_DECREF(__pyx_v_lines);
+ Py_DECREF(__pyx_v_parent_key);
+ return __pyx_r;
+}
+
+static PyObject *__pyx_f_6bzrlib_14_annotator_pyx_9Annotator__update_from_other_parents(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
+static char __pyx_doc_6bzrlib_14_annotator_pyx_9Annotator__update_from_other_parents[] = "Reannotate this text relative to a second (or more) parent.";
+static PyMethodDef __pyx_mdef_6bzrlib_14_annotator_pyx_9Annotator__update_from_other_parents = {"_update_from_other_parents", (PyCFunction)__pyx_f_6bzrlib_14_annotator_pyx_9Annotator__update_from_other_parents, METH_VARARGS|METH_KEYWORDS, __pyx_doc_6bzrlib_14_annotator_pyx_9Annotator__update_from_other_parents};
+static PyObject *__pyx_f_6bzrlib_14_annotator_pyx_9Annotator__update_from_other_parents(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
+ PyObject *__pyx_v_self = 0;
+ PyObject *__pyx_v_key = 0;
+ PyObject *__pyx_v_annotations = 0;
+ PyObject *__pyx_v_lines = 0;
+ PyObject *__pyx_v_this_annotation = 0;
+ PyObject *__pyx_v_parent_key = 0;
+ PyObject *__pyx_v_parent_annotations;
+ PyObject *__pyx_v_matching_blocks;
+ PyObject *__pyx_r;
+ PyObject *__pyx_1 = 0;
+ PyObject *__pyx_2 = 0;
+ PyObject *__pyx_3 = 0;
+ int __pyx_4;
+ static char *__pyx_argnames[] = {"self","key","annotations","lines","this_annotation","parent_key",0};
+ if (!PyArg_ParseTupleAndKeywords(__pyx_args, __pyx_kwds, "OOOOOO", __pyx_argnames, &__pyx_v_self, &__pyx_v_key, &__pyx_v_annotations, &__pyx_v_lines, &__pyx_v_this_annotation, &__pyx_v_parent_key)) return 0;
+ Py_INCREF(__pyx_v_self);
+ Py_INCREF(__pyx_v_key);
+ Py_INCREF(__pyx_v_annotations);
+ Py_INCREF(__pyx_v_lines);
+ Py_INCREF(__pyx_v_this_annotation);
+ Py_INCREF(__pyx_v_parent_key);
+ __pyx_v_parent_annotations = Py_None; Py_INCREF(Py_None);
+ __pyx_v_matching_blocks = Py_None; Py_INCREF(Py_None);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_annotator_pyx.pyx":260 */
+ __pyx_1 = PyObject_GetAttr(__pyx_v_self, __pyx_k6p); if (!__pyx_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 260; goto __pyx_L1;}
+ __pyx_2 = PyTuple_New(3); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 260; goto __pyx_L1;}
+ Py_INCREF(__pyx_v_key);
+ PyTuple_SET_ITEM(__pyx_2, 0, __pyx_v_key);
+ Py_INCREF(__pyx_v_lines);
+ PyTuple_SET_ITEM(__pyx_2, 1, __pyx_v_lines);
+ Py_INCREF(__pyx_v_parent_key);
+ PyTuple_SET_ITEM(__pyx_2, 2, __pyx_v_parent_key);
+ __pyx_3 = PyObject_CallObject(__pyx_1, __pyx_2); if (!__pyx_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 260; goto __pyx_L1;}
+ Py_DECREF(__pyx_1); __pyx_1 = 0;
+ Py_DECREF(__pyx_2); __pyx_2 = 0;
+ __pyx_1 = PyObject_GetIter(__pyx_3); if (!__pyx_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 259; goto __pyx_L1;}
+ Py_DECREF(__pyx_3); __pyx_3 = 0;
+ __pyx_2 = __Pyx_UnpackItem(__pyx_1); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 259; goto __pyx_L1;}
+ Py_DECREF(__pyx_v_parent_annotations);
+ __pyx_v_parent_annotations = __pyx_2;
+ __pyx_2 = 0;
+ __pyx_3 = __Pyx_UnpackItem(__pyx_1); if (!__pyx_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 259; goto __pyx_L1;}
+ Py_DECREF(__pyx_v_matching_blocks);
+ __pyx_v_matching_blocks = __pyx_3;
+ __pyx_3 = 0;
+ if (__Pyx_EndUnpack(__pyx_1) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 259; goto __pyx_L1;}
+ Py_DECREF(__pyx_1); __pyx_1 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_annotator_pyx.pyx":262 */
+ __pyx_2 = PyObject_GetAttr(__pyx_v_self, __pyx_n__ann_tuple_cache); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 263; goto __pyx_L1;}
+ __pyx_4 = __pyx_f_6bzrlib_14_annotator_pyx__merge_annotations(__pyx_v_this_annotation,__pyx_v_annotations,__pyx_v_parent_annotations,__pyx_v_matching_blocks,__pyx_2); if (__pyx_4 == (-1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 262; goto __pyx_L1;}
+ Py_DECREF(__pyx_2); __pyx_2 = 0;
+
+ __pyx_r = Py_None; Py_INCREF(Py_None);
+ goto __pyx_L0;
+ __pyx_L1:;
+ Py_XDECREF(__pyx_1);
+ Py_XDECREF(__pyx_2);
+ Py_XDECREF(__pyx_3);
+ __Pyx_AddTraceback("bzrlib._annotator_pyx.Annotator._update_from_other_parents");
+ __pyx_r = 0;
+ __pyx_L0:;
+ Py_DECREF(__pyx_v_parent_annotations);
+ Py_DECREF(__pyx_v_matching_blocks);
+ Py_DECREF(__pyx_v_self);
+ Py_DECREF(__pyx_v_key);
+ Py_DECREF(__pyx_v_annotations);
+ Py_DECREF(__pyx_v_lines);
+ Py_DECREF(__pyx_v_this_annotation);
+ Py_DECREF(__pyx_v_parent_key);
+ return __pyx_r;
+}
+
+static PyObject *__pyx_f_6bzrlib_14_annotator_pyx_9Annotator_annotate_flat(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
+static char __pyx_doc_6bzrlib_14_annotator_pyx_9Annotator_annotate_flat[] = "Determine the single-best-revision to source for each line.\n\n This is meant as a compatibility thunk to how annotate() used to work.\n ";
+static PyMethodDef __pyx_mdef_6bzrlib_14_annotator_pyx_9Annotator_annotate_flat = {"annotate_flat", (PyCFunction)__pyx_f_6bzrlib_14_annotator_pyx_9Annotator_annotate_flat, METH_VARARGS|METH_KEYWORDS, __pyx_doc_6bzrlib_14_annotator_pyx_9Annotator_annotate_flat};
+static PyObject *__pyx_f_6bzrlib_14_annotator_pyx_9Annotator_annotate_flat(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
+ PyObject *__pyx_v_self = 0;
+ PyObject *__pyx_v_key = 0;
+ Py_ssize_t __pyx_v_pos;
+ Py_ssize_t __pyx_v_num_lines;
+ PyObject *__pyx_v_annotate;
+ PyObject *__pyx_v_custom_tiebreaker;
+ PyObject *__pyx_v_annotations;
+ PyObject *__pyx_v_lines;
+ PyObject *__pyx_v_out;
+ PyObject *__pyx_v_heads;
+ PyObject *__pyx_v_annotation;
+ PyObject *__pyx_v_line;
+ PyObject *__pyx_v_head;
+ PyObject *__pyx_v_the_heads;
+ PyObject *__pyx_r;
+ PyObject *__pyx_1 = 0;
+ PyObject *__pyx_2 = 0;
+ PyObject *__pyx_3 = 0;
+ Py_ssize_t __pyx_4;
+ int __pyx_5;
+ static char *__pyx_argnames[] = {"self","key",0};
+ if (!PyArg_ParseTupleAndKeywords(__pyx_args, __pyx_kwds, "OO", __pyx_argnames, &__pyx_v_self, &__pyx_v_key)) return 0;
+ Py_INCREF(__pyx_v_self);
+ Py_INCREF(__pyx_v_key);
+ __pyx_v_annotate = Py_None; Py_INCREF(Py_None);
+ __pyx_v_custom_tiebreaker = Py_None; Py_INCREF(Py_None);
+ __pyx_v_annotations = Py_None; Py_INCREF(Py_None);
+ __pyx_v_lines = Py_None; Py_INCREF(Py_None);
+ __pyx_v_out = Py_None; Py_INCREF(Py_None);
+ __pyx_v_heads = Py_None; Py_INCREF(Py_None);
+ __pyx_v_annotation = Py_None; Py_INCREF(Py_None);
+ __pyx_v_line = Py_None; Py_INCREF(Py_None);
+ __pyx_v_head = Py_None; Py_INCREF(Py_None);
+ __pyx_v_the_heads = Py_None; Py_INCREF(Py_None);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_annotator_pyx.pyx":272 */
+ __pyx_1 = PyList_New(1); if (!__pyx_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 272; goto __pyx_L1;}
+ Py_INCREF(__pyx_n_annotate);
+ PyList_SET_ITEM(__pyx_1, 0, __pyx_n_annotate);
+ __pyx_2 = __Pyx_Import(__pyx_n_bzrlib, __pyx_1); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 272; goto __pyx_L1;}
+ Py_DECREF(__pyx_1); __pyx_1 = 0;
+ __pyx_1 = PyObject_GetAttr(__pyx_2, __pyx_n_annotate); if (!__pyx_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 272; goto __pyx_L1;}
+ Py_DECREF(__pyx_v_annotate);
+ __pyx_v_annotate = __pyx_1;
+ __pyx_1 = 0;
+ Py_DECREF(__pyx_2); __pyx_2 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_annotator_pyx.pyx":274 */
+ __pyx_2 = PyObject_GetAttr(__pyx_v_annotate, __pyx_n__break_annotation_tie); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 274; goto __pyx_L1;}
+ Py_DECREF(__pyx_v_custom_tiebreaker);
+ __pyx_v_custom_tiebreaker = __pyx_2;
+ __pyx_2 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_annotator_pyx.pyx":275 */
+ __pyx_1 = PyObject_GetAttr(__pyx_v_self, __pyx_n_annotate); if (!__pyx_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 275; goto __pyx_L1;}
+ __pyx_2 = PyTuple_New(1); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 275; goto __pyx_L1;}
+ Py_INCREF(__pyx_v_key);
+ PyTuple_SET_ITEM(__pyx_2, 0, __pyx_v_key);
+ __pyx_3 = PyObject_CallObject(__pyx_1, __pyx_2); if (!__pyx_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 275; goto __pyx_L1;}
+ Py_DECREF(__pyx_1); __pyx_1 = 0;
+ Py_DECREF(__pyx_2); __pyx_2 = 0;
+ __pyx_1 = PyObject_GetIter(__pyx_3); if (!__pyx_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 275; goto __pyx_L1;}
+ Py_DECREF(__pyx_3); __pyx_3 = 0;
+ __pyx_2 = __Pyx_UnpackItem(__pyx_1); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 275; goto __pyx_L1;}
+ Py_DECREF(__pyx_v_annotations);
+ __pyx_v_annotations = __pyx_2;
+ __pyx_2 = 0;
+ __pyx_3 = __Pyx_UnpackItem(__pyx_1); if (!__pyx_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 275; goto __pyx_L1;}
+ Py_DECREF(__pyx_v_lines);
+ __pyx_v_lines = __pyx_3;
+ __pyx_3 = 0;
+ if (__Pyx_EndUnpack(__pyx_1) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 275; goto __pyx_L1;}
+ Py_DECREF(__pyx_1); __pyx_1 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_annotator_pyx.pyx":276 */
+ __pyx_4 = PyObject_Length(__pyx_v_lines); if (__pyx_4 == -1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 276; goto __pyx_L1;}
+ __pyx_v_num_lines = __pyx_4;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_annotator_pyx.pyx":277 */
+ __pyx_2 = PyList_New(0); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 277; goto __pyx_L1;}
+ Py_DECREF(__pyx_v_out);
+ __pyx_v_out = __pyx_2;
+ __pyx_2 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_annotator_pyx.pyx":278 */
+ __pyx_3 = PyObject_GetAttr(__pyx_v_self, __pyx_n__get_heads_provider); if (!__pyx_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 278; goto __pyx_L1;}
+ __pyx_1 = PyObject_CallObject(__pyx_3, 0); if (!__pyx_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 278; goto __pyx_L1;}
+ Py_DECREF(__pyx_3); __pyx_3 = 0;
+ __pyx_2 = PyObject_GetAttr(__pyx_1, __pyx_n_heads); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 278; goto __pyx_L1;}
+ Py_DECREF(__pyx_1); __pyx_1 = 0;
+ Py_DECREF(__pyx_v_heads);
+ __pyx_v_heads = __pyx_2;
+ __pyx_2 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_annotator_pyx.pyx":279 */
+ for (__pyx_v_pos = 0; __pyx_v_pos < __pyx_v_num_lines; ++__pyx_v_pos) {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_annotator_pyx.pyx":280 */
+ __pyx_3 = __Pyx_GetItemInt(__pyx_v_annotations, __pyx_v_pos); if (!__pyx_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 280; goto __pyx_L1;}
+ Py_DECREF(__pyx_v_annotation);
+ __pyx_v_annotation = __pyx_3;
+ __pyx_3 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_annotator_pyx.pyx":281 */
+ __pyx_1 = __Pyx_GetItemInt(__pyx_v_lines, __pyx_v_pos); if (!__pyx_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 281; goto __pyx_L1;}
+ Py_DECREF(__pyx_v_line);
+ __pyx_v_line = __pyx_1;
+ __pyx_1 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_annotator_pyx.pyx":282 */
+ __pyx_4 = PyObject_Length(__pyx_v_annotation); if (__pyx_4 == -1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 282; goto __pyx_L1;}
+ __pyx_5 = (__pyx_4 == 1);
+ if (__pyx_5) {
+ __pyx_2 = __Pyx_GetItemInt(__pyx_v_annotation, 0); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 283; goto __pyx_L1;}
+ Py_DECREF(__pyx_v_head);
+ __pyx_v_head = __pyx_2;
+ __pyx_2 = 0;
+ goto __pyx_L4;
+ }
+ /*else*/ {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_annotator_pyx.pyx":285 */
+ __pyx_3 = PyTuple_New(1); if (!__pyx_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 285; goto __pyx_L1;}
+ Py_INCREF(__pyx_v_annotation);
+ PyTuple_SET_ITEM(__pyx_3, 0, __pyx_v_annotation);
+ __pyx_1 = PyObject_CallObject(__pyx_v_heads, __pyx_3); if (!__pyx_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 285; goto __pyx_L1;}
+ Py_DECREF(__pyx_3); __pyx_3 = 0;
+ Py_DECREF(__pyx_v_the_heads);
+ __pyx_v_the_heads = __pyx_1;
+ __pyx_1 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_annotator_pyx.pyx":286 */
+ __pyx_4 = PyObject_Length(__pyx_v_the_heads); if (__pyx_4 == -1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 286; goto __pyx_L1;}
+ __pyx_5 = (__pyx_4 == 1);
+ if (__pyx_5) {
+ __pyx_2 = PyObject_GetIter(__pyx_v_the_heads); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 287; goto __pyx_L1;}
+ for (;;) {
+ __pyx_3 = PyIter_Next(__pyx_2);
+ if (!__pyx_3) {
+ if (PyErr_Occurred()) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 287; goto __pyx_L1;}
+ break;
+ }
+ Py_DECREF(__pyx_v_head);
+ __pyx_v_head = __pyx_3;
+ __pyx_3 = 0;
+ goto __pyx_L7;
+ }
+ __pyx_L7:;
+ Py_DECREF(__pyx_2); __pyx_2 = 0;
+ goto __pyx_L5;
+ }
+ /*else*/ {
+ __pyx_1 = PyObject_GetAttr(__pyx_v_self, __pyx_n__resolve_annotation_tie); if (!__pyx_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 291; goto __pyx_L1;}
+ __pyx_3 = PyTuple_New(3); if (!__pyx_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 291; goto __pyx_L1;}
+ Py_INCREF(__pyx_v_the_heads);
+ PyTuple_SET_ITEM(__pyx_3, 0, __pyx_v_the_heads);
+ Py_INCREF(__pyx_v_line);
+ PyTuple_SET_ITEM(__pyx_3, 1, __pyx_v_line);
+ Py_INCREF(__pyx_v_custom_tiebreaker);
+ PyTuple_SET_ITEM(__pyx_3, 2, __pyx_v_custom_tiebreaker);
+ __pyx_2 = PyObject_CallObject(__pyx_1, __pyx_3); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 291; goto __pyx_L1;}
+ Py_DECREF(__pyx_1); __pyx_1 = 0;
+ Py_DECREF(__pyx_3); __pyx_3 = 0;
+ Py_DECREF(__pyx_v_head);
+ __pyx_v_head = __pyx_2;
+ __pyx_2 = 0;
+ }
+ __pyx_L5:;
+ }
+ __pyx_L4:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_annotator_pyx.pyx":293 */
+ __pyx_1 = PyTuple_New(2); if (!__pyx_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 293; goto __pyx_L1;}
+ Py_INCREF(__pyx_v_head);
+ PyTuple_SET_ITEM(__pyx_1, 0, __pyx_v_head);
+ Py_INCREF(__pyx_v_line);
+ PyTuple_SET_ITEM(__pyx_1, 1, __pyx_v_line);
+ __pyx_5 = PyList_Append(__pyx_v_out,__pyx_1); if (__pyx_5 == (-1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 293; goto __pyx_L1;}
+ Py_DECREF(__pyx_1); __pyx_1 = 0;
+ }
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_annotator_pyx.pyx":294 */
+ Py_INCREF(__pyx_v_out);
+ __pyx_r = __pyx_v_out;
+ goto __pyx_L0;
+
+ __pyx_r = Py_None; Py_INCREF(Py_None);
+ goto __pyx_L0;
+ __pyx_L1:;
+ Py_XDECREF(__pyx_1);
+ Py_XDECREF(__pyx_2);
+ Py_XDECREF(__pyx_3);
+ __Pyx_AddTraceback("bzrlib._annotator_pyx.Annotator.annotate_flat");
+ __pyx_r = 0;
+ __pyx_L0:;
+ Py_DECREF(__pyx_v_annotate);
+ Py_DECREF(__pyx_v_custom_tiebreaker);
+ Py_DECREF(__pyx_v_annotations);
+ Py_DECREF(__pyx_v_lines);
+ Py_DECREF(__pyx_v_out);
+ Py_DECREF(__pyx_v_heads);
+ Py_DECREF(__pyx_v_annotation);
+ Py_DECREF(__pyx_v_line);
+ Py_DECREF(__pyx_v_head);
+ Py_DECREF(__pyx_v_the_heads);
+ Py_DECREF(__pyx_v_self);
+ Py_DECREF(__pyx_v_key);
+ return __pyx_r;
+}
+
+static struct PyMethodDef __pyx_methods[] = {
+ {0, 0, 0, 0}
+};
+
+static void __pyx_init_filenames(void); /*proto*/
+
+PyMODINIT_FUNC init_annotator_pyx(void); /*proto*/
+PyMODINIT_FUNC init_annotator_pyx(void) {
+ PyObject *__pyx_1 = 0;
+ PyObject *__pyx_2 = 0;
+ PyObject *__pyx_3 = 0;
+ PyObject *__pyx_4 = 0;
+ __pyx_init_filenames();
+ __pyx_m = Py_InitModule4("_annotator_pyx", __pyx_methods, __pyx_mdoc, 0, PYTHON_API_VERSION);
+ if (!__pyx_m) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 17; goto __pyx_L1;};
+ Py_INCREF(__pyx_m);
+ __pyx_b = PyImport_AddModule("__builtin__");
+ if (!__pyx_b) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 17; goto __pyx_L1;};
+ if (PyObject_SetAttrString(__pyx_m, "__builtins__", __pyx_b) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 17; goto __pyx_L1;};
+ if (__Pyx_InitStrings(__pyx_string_tab) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 17; goto __pyx_L1;};
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_annotator_pyx.pyx":58 */
+ __pyx_1 = PyList_New(1); if (!__pyx_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 58; goto __pyx_L1;}
+ Py_INCREF(__pyx_n__annotator_py);
+ PyList_SET_ITEM(__pyx_1, 0, __pyx_n__annotator_py);
+ __pyx_2 = __Pyx_Import(__pyx_n_bzrlib, __pyx_1); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 58; goto __pyx_L1;}
+ Py_DECREF(__pyx_1); __pyx_1 = 0;
+ __pyx_1 = PyObject_GetAttr(__pyx_2, __pyx_n__annotator_py); if (!__pyx_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 58; goto __pyx_L1;}
+ if (PyObject_SetAttr(__pyx_m, __pyx_n__annotator_py, __pyx_1) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 58; goto __pyx_L1;}
+ Py_DECREF(__pyx_1); __pyx_1 = 0;
+ Py_DECREF(__pyx_2); __pyx_2 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_annotator_pyx.pyx":244 */
+ __pyx_2 = PyDict_New(); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 244; goto __pyx_L1;}
+ __pyx_1 = __Pyx_GetName(__pyx_m, __pyx_n__annotator_py); if (!__pyx_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 244; goto __pyx_L1;}
+ __pyx_3 = PyObject_GetAttr(__pyx_1, __pyx_n_Annotator); if (!__pyx_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 244; goto __pyx_L1;}
+ Py_DECREF(__pyx_1); __pyx_1 = 0;
+ __pyx_1 = PyTuple_New(1); if (!__pyx_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 244; goto __pyx_L1;}
+ PyTuple_SET_ITEM(__pyx_1, 0, __pyx_3);
+ __pyx_3 = 0;
+ if (PyDict_SetItemString(__pyx_2, "__doc__", __pyx_k16p) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 244; goto __pyx_L1;}
+ __pyx_3 = __Pyx_CreateClass(__pyx_1, __pyx_2, __pyx_n_Annotator, "bzrlib._annotator_pyx"); if (!__pyx_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 244; goto __pyx_L1;}
+ Py_DECREF(__pyx_1); __pyx_1 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_annotator_pyx.pyx":247 */
+ __pyx_1 = PyCFunction_New(&__pyx_mdef_6bzrlib_14_annotator_pyx_9Annotator__update_from_first_parent, 0); if (!__pyx_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 247; goto __pyx_L1;}
+ __pyx_4 = PyMethod_New(__pyx_1, 0, __pyx_3); if (!__pyx_4) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 247; goto __pyx_L1;}
+ Py_DECREF(__pyx_1); __pyx_1 = 0;
+ if (PyObject_SetAttr(__pyx_3, __pyx_n__update_from_first_parent, __pyx_4) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 247; goto __pyx_L1;}
+ Py_DECREF(__pyx_4); __pyx_4 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_annotator_pyx.pyx":256 */
+ __pyx_1 = PyCFunction_New(&__pyx_mdef_6bzrlib_14_annotator_pyx_9Annotator__update_from_other_parents, 0); if (!__pyx_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 256; goto __pyx_L1;}
+ __pyx_4 = PyMethod_New(__pyx_1, 0, __pyx_3); if (!__pyx_4) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 256; goto __pyx_L1;}
+ Py_DECREF(__pyx_1); __pyx_1 = 0;
+ if (PyObject_SetAttr(__pyx_3, __pyx_n__update_from_other_parents, __pyx_4) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 256; goto __pyx_L1;}
+ Py_DECREF(__pyx_4); __pyx_4 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_annotator_pyx.pyx":265 */
+ __pyx_1 = PyCFunction_New(&__pyx_mdef_6bzrlib_14_annotator_pyx_9Annotator_annotate_flat, 0); if (!__pyx_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 265; goto __pyx_L1;}
+ __pyx_4 = PyMethod_New(__pyx_1, 0, __pyx_3); if (!__pyx_4) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 265; goto __pyx_L1;}
+ Py_DECREF(__pyx_1); __pyx_1 = 0;
+ if (PyObject_SetAttr(__pyx_3, __pyx_n_annotate_flat, __pyx_4) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 265; goto __pyx_L1;}
+ Py_DECREF(__pyx_4); __pyx_4 = 0;
+ if (PyObject_SetAttr(__pyx_m, __pyx_n_Annotator, __pyx_3) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 244; goto __pyx_L1;}
+ Py_DECREF(__pyx_3); __pyx_3 = 0;
+ Py_DECREF(__pyx_2); __pyx_2 = 0;
+ return;
+ __pyx_L1:;
+ Py_XDECREF(__pyx_1);
+ Py_XDECREF(__pyx_2);
+ Py_XDECREF(__pyx_3);
+ Py_XDECREF(__pyx_4);
+ __Pyx_AddTraceback("bzrlib._annotator_pyx");
+}
+
+static char *__pyx_filenames[] = {
+ "_annotator_pyx.pyx",
+};
+
+/* Runtime support code */
+
+static void __pyx_init_filenames(void) {
+ __pyx_f = __pyx_filenames;
+}
+
+static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb) {
+ Py_XINCREF(type);
+ Py_XINCREF(value);
+ Py_XINCREF(tb);
+ /* First, check the traceback argument, replacing None with NULL. */
+ if (tb == Py_None) {
+ Py_DECREF(tb);
+ tb = 0;
+ }
+ else if (tb != NULL && !PyTraceBack_Check(tb)) {
+ PyErr_SetString(PyExc_TypeError,
+ "raise: arg 3 must be a traceback or None");
+ goto raise_error;
+ }
+ /* Next, replace a missing value with None */
+ if (value == NULL) {
+ value = Py_None;
+ Py_INCREF(value);
+ }
+ #if PY_VERSION_HEX < 0x02050000
+ if (!PyClass_Check(type))
+ #else
+ if (!PyType_Check(type))
+ #endif
+ {
+ /* Raising an instance. The value should be a dummy. */
+ if (value != Py_None) {
+ PyErr_SetString(PyExc_TypeError,
+ "instance exception may not have a separate value");
+ goto raise_error;
+ }
+ /* Normalize to raise <class>, <instance> */
+ Py_DECREF(value);
+ value = type;
+ #if PY_VERSION_HEX < 0x02050000
+ if (PyInstance_Check(type)) {
+ type = (PyObject*) ((PyInstanceObject*)type)->in_class;
+ Py_INCREF(type);
+ }
+ else {
+ PyErr_SetString(PyExc_TypeError,
+ "raise: exception must be an old-style class or instance");
+ goto raise_error;
+ }
+ #else
+ type = (PyObject*) type->ob_type;
+ Py_INCREF(type);
+ if (!PyType_IsSubtype((PyTypeObject *)type, (PyTypeObject *)PyExc_BaseException)) {
+ PyErr_SetString(PyExc_TypeError,
+ "raise: exception class must be a subclass of BaseException");
+ goto raise_error;
+ }
+ #endif
+ }
+ PyErr_Restore(type, value, tb);
+ return;
+raise_error:
+ Py_XDECREF(value);
+ Py_XDECREF(type);
+ Py_XDECREF(tb);
+ return;
+}
+
+static void __Pyx_UnpackError(void) {
+ PyErr_SetString(PyExc_ValueError, "unpack sequence of wrong size");
+}
+
+static PyObject *__Pyx_UnpackItem(PyObject *iter) {
+ PyObject *item;
+ if (!(item = PyIter_Next(iter))) {
+ if (!PyErr_Occurred())
+ __Pyx_UnpackError();
+ }
+ return item;
+}
+
+static int __Pyx_EndUnpack(PyObject *iter) {
+ PyObject *item;
+ if ((item = PyIter_Next(iter))) {
+ Py_DECREF(item);
+ __Pyx_UnpackError();
+ return -1;
+ }
+ else if (!PyErr_Occurred())
+ return 0;
+ else
+ return -1;
+}
+
+static PyObject *__Pyx_Import(PyObject *name, PyObject *from_list) {
+ PyObject *__import__ = 0;
+ PyObject *empty_list = 0;
+ PyObject *module = 0;
+ PyObject *global_dict = 0;
+ PyObject *empty_dict = 0;
+ PyObject *list;
+ __import__ = PyObject_GetAttrString(__pyx_b, "__import__");
+ if (!__import__)
+ goto bad;
+ if (from_list)
+ list = from_list;
+ else {
+ empty_list = PyList_New(0);
+ if (!empty_list)
+ goto bad;
+ list = empty_list;
+ }
+ global_dict = PyModule_GetDict(__pyx_m);
+ if (!global_dict)
+ goto bad;
+ empty_dict = PyDict_New();
+ if (!empty_dict)
+ goto bad;
+ module = PyObject_CallFunction(__import__, "OOOO",
+ name, global_dict, empty_dict, list);
+bad:
+ Py_XDECREF(empty_list);
+ Py_XDECREF(__import__);
+ Py_XDECREF(empty_dict);
+ return module;
+}
+
+static PyObject *__Pyx_GetItemInt(PyObject *o, Py_ssize_t i) {
+ PyTypeObject *t = o->ob_type;
+ PyObject *r;
+ if (t->tp_as_sequence && t->tp_as_sequence->sq_item)
+ r = PySequence_GetItem(o, i);
+ else {
+ PyObject *j = PyInt_FromLong(i);
+ if (!j)
+ return 0;
+ r = PyObject_GetItem(o, j);
+ Py_DECREF(j);
+ }
+ return r;
+}
+
+static int __Pyx_InitStrings(__Pyx_StringTabEntry *t) {
+ while (t->p) {
+ *t->p = PyString_FromStringAndSize(t->s, t->n - 1);
+ if (!*t->p)
+ return -1;
+ if (t->i)
+ PyString_InternInPlace(t->p);
+ ++t;
+ }
+ return 0;
+}
+
+static PyObject *__Pyx_GetName(PyObject *dict, PyObject *name) {
+ PyObject *result;
+ result = PyObject_GetAttr(dict, name);
+ if (!result)
+ PyErr_SetObject(PyExc_NameError, name);
+ return result;
+}
+
+static PyObject *__Pyx_CreateClass(
+ PyObject *bases, PyObject *dict, PyObject *name, char *modname)
+{
+ PyObject *py_modname;
+ PyObject *result = 0;
+
+ py_modname = PyString_FromString(modname);
+ if (!py_modname)
+ goto bad;
+ if (PyDict_SetItemString(dict, "__module__", py_modname) < 0)
+ goto bad;
+ result = PyClass_New(bases, dict, name);
+bad:
+ Py_XDECREF(py_modname);
+ return result;
+}
+
+#include "compile.h"
+#include "frameobject.h"
+#include "traceback.h"
+
+static void __Pyx_AddTraceback(char *funcname) {
+ PyObject *py_srcfile = 0;
+ PyObject *py_funcname = 0;
+ PyObject *py_globals = 0;
+ PyObject *empty_tuple = 0;
+ PyObject *empty_string = 0;
+ PyCodeObject *py_code = 0;
+ PyFrameObject *py_frame = 0;
+
+ py_srcfile = PyString_FromString(__pyx_filename);
+ if (!py_srcfile) goto bad;
+ py_funcname = PyString_FromString(funcname);
+ if (!py_funcname) goto bad;
+ py_globals = PyModule_GetDict(__pyx_m);
+ if (!py_globals) goto bad;
+ empty_tuple = PyTuple_New(0);
+ if (!empty_tuple) goto bad;
+ empty_string = PyString_FromString("");
+ if (!empty_string) goto bad;
+ py_code = PyCode_New(
+ 0, /*int argcount,*/
+ 0, /*int nlocals,*/
+ 0, /*int stacksize,*/
+ 0, /*int flags,*/
+ empty_string, /*PyObject *code,*/
+ empty_tuple, /*PyObject *consts,*/
+ empty_tuple, /*PyObject *names,*/
+ empty_tuple, /*PyObject *varnames,*/
+ empty_tuple, /*PyObject *freevars,*/
+ empty_tuple, /*PyObject *cellvars,*/
+ py_srcfile, /*PyObject *filename,*/
+ py_funcname, /*PyObject *name,*/
+ __pyx_lineno, /*int firstlineno,*/
+ empty_string /*PyObject *lnotab*/
+ );
+ if (!py_code) goto bad;
+ py_frame = PyFrame_New(
+ PyThreadState_Get(), /*PyThreadState *tstate,*/
+ py_code, /*PyCodeObject *code,*/
+ py_globals, /*PyObject *globals,*/
+ 0 /*PyObject *locals*/
+ );
+ if (!py_frame) goto bad;
+ py_frame->f_lineno = __pyx_lineno;
+ PyTraceBack_Here(py_frame);
+bad:
+ Py_XDECREF(py_srcfile);
+ Py_XDECREF(py_funcname);
+ Py_XDECREF(empty_tuple);
+ Py_XDECREF(empty_string);
+ Py_XDECREF(py_code);
+ Py_XDECREF(py_frame);
+}
diff --git a/bzrlib/_annotator_pyx.pyx b/bzrlib/_annotator_pyx.pyx
new file mode 100644
index 0000000..abce4a0
--- /dev/null
+++ b/bzrlib/_annotator_pyx.pyx
@@ -0,0 +1,294 @@
+# Copyright (C) 2009, 2010 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""Functionality for doing annotations in the 'optimal' way"""
+
+cdef extern from "python-compat.h":
+ pass
+
+cdef extern from "Python.h":
+ ctypedef int Py_ssize_t
+ ctypedef struct PyObject:
+ pass
+ ctypedef struct PyListObject:
+ PyObject **ob_item
+ int PyList_CheckExact(object)
+ PyObject *PyList_GET_ITEM(object, Py_ssize_t o)
+ Py_ssize_t PyList_GET_SIZE(object)
+ int PyList_Append(object, object) except -1
+ int PyList_SetItem(object, Py_ssize_t o, object) except -1
+ int PyList_Sort(object) except -1
+
+ int PyTuple_CheckExact(object)
+ object PyTuple_New(Py_ssize_t len)
+ void PyTuple_SET_ITEM(object, Py_ssize_t pos, object)
+ void PyTuple_SET_ITEM_ptr "PyTuple_SET_ITEM" (object, Py_ssize_t,
+ PyObject *)
+ int PyTuple_Resize(PyObject **, Py_ssize_t newlen)
+ PyObject *PyTuple_GET_ITEM(object, Py_ssize_t o)
+ Py_ssize_t PyTuple_GET_SIZE(object)
+
+ PyObject *PyDict_GetItem(object d, object k)
+ int PyDict_SetItem(object d, object k, object v) except -1
+
+ void Py_INCREF(object)
+ void Py_INCREF_ptr "Py_INCREF" (PyObject *)
+ void Py_DECREF_ptr "Py_DECREF" (PyObject *)
+
+ int Py_EQ
+ int Py_LT
+ int PyObject_RichCompareBool(object, object, int opid) except -1
+ int PyObject_RichCompareBool_ptr "PyObject_RichCompareBool" (
+ PyObject *, PyObject *, int opid)
+
+
+from bzrlib import _annotator_py
+
+
+cdef int _check_annotations_are_lists(annotations,
+ parent_annotations) except -1:
+ if not PyList_CheckExact(annotations):
+ raise TypeError('annotations must be a list')
+ if not PyList_CheckExact(parent_annotations):
+ raise TypeError('parent_annotations must be a list')
+ return 0
+
+
+cdef int _check_match_ranges(parent_annotations, annotations,
+ Py_ssize_t parent_idx, Py_ssize_t lines_idx,
+ Py_ssize_t match_len) except -1:
+ if parent_idx + match_len > PyList_GET_SIZE(parent_annotations):
+ raise ValueError('Match length exceeds len of'
+ ' parent_annotations %s > %s'
+ % (parent_idx + match_len,
+ PyList_GET_SIZE(parent_annotations)))
+ if lines_idx + match_len > PyList_GET_SIZE(annotations):
+ raise ValueError('Match length exceeds len of'
+ ' annotations %s > %s'
+ % (lines_idx + match_len,
+ PyList_GET_SIZE(annotations)))
+ return 0
+
+
+cdef PyObject *_next_tuple_entry(object tpl, Py_ssize_t *pos): # cannot_raise
+ """Return the next entry from this tuple.
+
+ :param tpl: The tuple we are investigating, *must* be a PyTuple
+ :param pos: The last item we found. Will be updated to the new position.
+
+ This cannot raise an exception, as it does no error checking.
+ """
+ pos[0] = pos[0] + 1
+ if pos[0] >= PyTuple_GET_SIZE(tpl):
+ return NULL
+ return PyTuple_GET_ITEM(tpl, pos[0])
+
+
+cdef object _combine_annotations(ann_one, ann_two, cache):
+ """Combine the annotations from both sides."""
+ cdef Py_ssize_t pos_one, pos_two, len_one, len_two
+ cdef Py_ssize_t out_pos
+ cdef PyObject *temp, *left, *right
+
+ if (PyObject_RichCompareBool(ann_one, ann_two, Py_LT)):
+ cache_key = (ann_one, ann_two)
+ else:
+ cache_key = (ann_two, ann_one)
+ temp = PyDict_GetItem(cache, cache_key)
+ if temp != NULL:
+ return <object>temp
+
+ if not PyTuple_CheckExact(ann_one) or not PyTuple_CheckExact(ann_two):
+ raise TypeError('annotations must be tuples')
+ # We know that annotations are tuples, and that both sides are already
+ # sorted, so we can just walk and update a new list.
+ pos_one = -1
+ pos_two = -1
+ out_pos = 0
+ left = _next_tuple_entry(ann_one, &pos_one)
+ right = _next_tuple_entry(ann_two, &pos_two)
+ new_ann = PyTuple_New(PyTuple_GET_SIZE(ann_one)
+ + PyTuple_GET_SIZE(ann_two))
+ while left != NULL and right != NULL:
+ # left == right is done by PyObject_RichCompareBool_ptr, however it
+ # avoids a function call for a very common case. Drops 'time bzr
+ # annotate NEWS' from 7.25s to 7.16s, so it *is* a visible impact.
+ if (left == right
+ or PyObject_RichCompareBool_ptr(left, right, Py_EQ)):
+ # Identical values, step both
+ Py_INCREF_ptr(left)
+ PyTuple_SET_ITEM_ptr(new_ann, out_pos, left)
+ left = _next_tuple_entry(ann_one, &pos_one)
+ right = _next_tuple_entry(ann_two, &pos_two)
+ elif (PyObject_RichCompareBool_ptr(left, right, Py_LT)):
+ # left < right or right == NULL
+ Py_INCREF_ptr(left)
+ PyTuple_SET_ITEM_ptr(new_ann, out_pos, left)
+ left = _next_tuple_entry(ann_one, &pos_one)
+ else: # right < left or left == NULL
+ Py_INCREF_ptr(right)
+ PyTuple_SET_ITEM_ptr(new_ann, out_pos, right)
+ right = _next_tuple_entry(ann_two, &pos_two)
+ out_pos = out_pos + 1
+ while left != NULL:
+ Py_INCREF_ptr(left)
+ PyTuple_SET_ITEM_ptr(new_ann, out_pos, left)
+ left = _next_tuple_entry(ann_one, &pos_one)
+ out_pos = out_pos + 1
+ while right != NULL:
+ Py_INCREF_ptr(right)
+ PyTuple_SET_ITEM_ptr(new_ann, out_pos, right)
+ right = _next_tuple_entry(ann_two, &pos_two)
+ out_pos = out_pos + 1
+ if out_pos != PyTuple_GET_SIZE(new_ann):
+ # Timing _PyTuple_Resize was not significantly faster that slicing
+ # PyTuple_Resize((<PyObject **>new_ann), out_pos)
+ new_ann = new_ann[0:out_pos]
+ PyDict_SetItem(cache, cache_key, new_ann)
+ return new_ann
+
+
+cdef int _apply_parent_annotations(annotations, parent_annotations,
+ matching_blocks) except -1:
+ """Apply the annotations from parent_annotations into annotations.
+
+ matching_blocks defines the ranges that match.
+ """
+ cdef Py_ssize_t parent_idx, lines_idx, match_len, idx
+ cdef PyListObject *par_list, *ann_list
+ cdef PyObject **par_temp, **ann_temp
+
+ _check_annotations_are_lists(annotations, parent_annotations)
+ par_list = <PyListObject *>parent_annotations
+ ann_list = <PyListObject *>annotations
+ # For NEWS and bzrlib/builtins.py, over 99% of the lines are simply copied
+ # across from the parent entry. So this routine is heavily optimized for
+ # that. Would be interesting if we could use memcpy() but we have to incref
+ # and decref
+ for parent_idx, lines_idx, match_len in matching_blocks:
+ _check_match_ranges(parent_annotations, annotations,
+ parent_idx, lines_idx, match_len)
+ par_temp = par_list.ob_item + parent_idx
+ ann_temp = ann_list.ob_item + lines_idx
+ for idx from 0 <= idx < match_len:
+ Py_INCREF_ptr(par_temp[idx])
+ Py_DECREF_ptr(ann_temp[idx])
+ ann_temp[idx] = par_temp[idx]
+ return 0
+
+
+cdef int _merge_annotations(this_annotation, annotations, parent_annotations,
+ matching_blocks, ann_cache) except -1:
+ cdef Py_ssize_t parent_idx, ann_idx, lines_idx, match_len, idx
+ cdef Py_ssize_t pos
+ cdef PyObject *ann_temp, *par_temp
+
+ _check_annotations_are_lists(annotations, parent_annotations)
+ last_ann = None
+ last_parent = None
+ last_res = None
+ for parent_idx, lines_idx, match_len in matching_blocks:
+ _check_match_ranges(parent_annotations, annotations,
+ parent_idx, lines_idx, match_len)
+ # For lines which match this parent, we will now resolve whether
+ # this parent wins over the current annotation
+ for idx from 0 <= idx < match_len:
+ ann_idx = lines_idx + idx
+ ann_temp = PyList_GET_ITEM(annotations, ann_idx)
+ par_temp = PyList_GET_ITEM(parent_annotations, parent_idx + idx)
+ if (ann_temp == par_temp):
+ # This is parent, do nothing
+ # Pointer comparison is fine here. Value comparison would
+ # be ok, but it will be handled in the final if clause by
+ # merging the two tuples into the same tuple
+ # Avoiding the Py_INCREF and function call to
+ # PyObject_RichCompareBool using pointer comparison drops
+ # timing from 215ms => 125ms
+ continue
+ par_ann = <object>par_temp
+ ann = <object>ann_temp
+ if (ann is this_annotation):
+ # Originally claimed 'this', but it was really in this
+ # parent
+ Py_INCREF(par_ann)
+ PyList_SetItem(annotations, ann_idx, par_ann)
+ continue
+ # Resolve the fact that both sides have a different value for
+ # last modified
+ if (ann is last_ann and par_ann is last_parent):
+ Py_INCREF(last_res)
+ PyList_SetItem(annotations, ann_idx, last_res)
+ else:
+ new_ann = _combine_annotations(ann, par_ann, ann_cache)
+ Py_INCREF(new_ann)
+ PyList_SetItem(annotations, ann_idx, new_ann)
+ last_ann = ann
+ last_parent = par_ann
+ last_res = new_ann
+ return 0
+
+
+class Annotator(_annotator_py.Annotator):
+ """Class that drives performing annotations."""
+
+ def _update_from_first_parent(self, key, annotations, lines, parent_key):
+ """Reannotate this text relative to its first parent."""
+ (parent_annotations,
+ matching_blocks) = self._get_parent_annotations_and_matches(
+ key, lines, parent_key)
+
+ _apply_parent_annotations(annotations, parent_annotations,
+ matching_blocks)
+
+ def _update_from_other_parents(self, key, annotations, lines,
+ this_annotation, parent_key):
+ """Reannotate this text relative to a second (or more) parent."""
+ (parent_annotations,
+ matching_blocks) = self._get_parent_annotations_and_matches(
+ key, lines, parent_key)
+ _merge_annotations(this_annotation, annotations, parent_annotations,
+ matching_blocks, self._ann_tuple_cache)
+
+ def annotate_flat(self, key):
+ """Determine the single-best-revision to source for each line.
+
+ This is meant as a compatibility thunk to how annotate() used to work.
+ """
+ cdef Py_ssize_t pos, num_lines
+
+ from bzrlib import annotate
+
+ custom_tiebreaker = annotate._break_annotation_tie
+ annotations, lines = self.annotate(key)
+ num_lines = len(lines)
+ out = []
+ heads = self._get_heads_provider().heads
+ for pos from 0 <= pos < num_lines:
+ annotation = annotations[pos]
+ line = lines[pos]
+ if len(annotation) == 1:
+ head = annotation[0]
+ else:
+ the_heads = heads(annotation)
+ if len(the_heads) == 1:
+ for head in the_heads: break # get the item out of the set
+ else:
+ # We need to resolve the ambiguity, for now just pick the
+ # sorted smallest
+ head = self._resolve_annotation_tie(the_heads, line,
+ custom_tiebreaker)
+ PyList_Append(out, (head, line))
+ return out
diff --git a/bzrlib/_bencode_pyx.c b/bzrlib/_bencode_pyx.c
new file mode 100644
index 0000000..8e815e1
--- /dev/null
+++ b/bzrlib/_bencode_pyx.c
@@ -0,0 +1,2529 @@
+/* Generated by Pyrex 0.9.8.5 on Fri Oct 8 14:00:55 2010 */
+
+#define PY_SSIZE_T_CLEAN
+#include "Python.h"
+#include "structmember.h"
+#ifndef PY_LONG_LONG
+ #define PY_LONG_LONG LONG_LONG
+#endif
+#if PY_VERSION_HEX < 0x02050000
+ typedef int Py_ssize_t;
+ #define PY_SSIZE_T_MAX INT_MAX
+ #define PY_SSIZE_T_MIN INT_MIN
+ #define PyInt_FromSsize_t(z) PyInt_FromLong(z)
+ #define PyInt_AsSsize_t(o) PyInt_AsLong(o)
+#endif
+#if !defined(WIN32) && !defined(MS_WINDOWS)
+ #ifndef __stdcall
+ #define __stdcall
+ #endif
+ #ifndef __cdecl
+ #define __cdecl
+ #endif
+#endif
+#ifdef __cplusplus
+#define __PYX_EXTERN_C extern "C"
+#else
+#define __PYX_EXTERN_C extern
+#endif
+#include <math.h>
+#include "stddef.h"
+#include "stdlib.h"
+#include "string.h"
+#include "python-compat.h"
+#include "_bencode_pyx.h"
+#include "_static_tuple_c.h"
+
+
+typedef struct {PyObject **p; int i; char *s; long n;} __Pyx_StringTabEntry; /*proto*/
+
+static PyObject *__pyx_m;
+static PyObject *__pyx_b;
+static int __pyx_lineno;
+static char *__pyx_filename;
+static char **__pyx_f;
+
+static char __pyx_mdoc[] = "Pyrex implementation for bencode coder/decoder";
+
+static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb); /*proto*/
+
+static PyObject *__Pyx_GetName(PyObject *dict, PyObject *name); /*proto*/
+
+static int __Pyx_InitStrings(__Pyx_StringTabEntry *t); /*proto*/
+
+static int __Pyx_SetVtable(PyObject *dict, void *vtable); /*proto*/
+
+static PyTypeObject *__Pyx_ImportType(char *module_name, char *class_name, long size); /*proto*/
+
+static PyObject *__Pyx_ImportModule(char *name); /*proto*/
+
+static PyObject *__Pyx_CreateClass(PyObject *bases, PyObject *dict, PyObject *name, char *modname); /*proto*/
+
+static void __Pyx_AddTraceback(char *funcname); /*proto*/
+
+/* Declarations from bzrlib._static_tuple_c */
+
+static PyTypeObject *__pyx_ptype_6bzrlib_15_static_tuple_c_StaticTuple = 0;
+
+/* Declarations from bzrlib._bencode_pyx */
+
+
+/* Declarations from implementation of bzrlib._bencode_pyx */
+
+struct __pyx_obj_6bzrlib_12_bencode_pyx_Decoder {
+ PyObject_HEAD
+ struct __pyx_vtabstruct_6bzrlib_12_bencode_pyx_Decoder *__pyx_vtab;
+ char *tail;
+ int size;
+ int _yield_tuples;
+ PyObject *text;
+};
+
+enum {
+ __pyx_e_6bzrlib_12_bencode_pyx_INITSIZE = 1024,
+ __pyx_e_6bzrlib_12_bencode_pyx_INT_BUF_SIZE = 32
+};
+
+struct __pyx_obj_6bzrlib_12_bencode_pyx_Encoder {
+ PyObject_HEAD
+ struct __pyx_vtabstruct_6bzrlib_12_bencode_pyx_Encoder *__pyx_vtab;
+ char *tail;
+ int size;
+ char *buffer;
+ int maxsize;
+};
+
+
+struct __pyx_vtabstruct_6bzrlib_12_bencode_pyx_Decoder {
+ PyObject *(*_decode_object)(struct __pyx_obj_6bzrlib_12_bencode_pyx_Decoder *);
+ int (*_read_digits)(struct __pyx_obj_6bzrlib_12_bencode_pyx_Decoder *,char);
+ PyObject *(*_decode_int)(struct __pyx_obj_6bzrlib_12_bencode_pyx_Decoder *);
+ PyObject *(*_decode_string)(struct __pyx_obj_6bzrlib_12_bencode_pyx_Decoder *);
+ PyObject *(*_decode_list)(struct __pyx_obj_6bzrlib_12_bencode_pyx_Decoder *);
+ PyObject *(*_decode_dict)(struct __pyx_obj_6bzrlib_12_bencode_pyx_Decoder *);
+};
+static struct __pyx_vtabstruct_6bzrlib_12_bencode_pyx_Decoder *__pyx_vtabptr_6bzrlib_12_bencode_pyx_Decoder;
+
+
+struct __pyx_vtabstruct_6bzrlib_12_bencode_pyx_Encoder {
+ int (*_ensure_buffer)(struct __pyx_obj_6bzrlib_12_bencode_pyx_Encoder *,int);
+ int (*_encode_int)(struct __pyx_obj_6bzrlib_12_bencode_pyx_Encoder *,int);
+ int (*_encode_long)(struct __pyx_obj_6bzrlib_12_bencode_pyx_Encoder *,PyObject *);
+ int (*_append_string)(struct __pyx_obj_6bzrlib_12_bencode_pyx_Encoder *,PyObject *);
+ int (*_encode_string)(struct __pyx_obj_6bzrlib_12_bencode_pyx_Encoder *,PyObject *);
+ int (*_encode_list)(struct __pyx_obj_6bzrlib_12_bencode_pyx_Encoder *,PyObject *);
+ int (*_encode_dict)(struct __pyx_obj_6bzrlib_12_bencode_pyx_Encoder *,PyObject *);
+};
+static struct __pyx_vtabstruct_6bzrlib_12_bencode_pyx_Encoder *__pyx_vtabptr_6bzrlib_12_bencode_pyx_Encoder;
+
+static PyTypeObject *__pyx_ptype_6bzrlib_12_bencode_pyx_Decoder = 0;
+static PyTypeObject *__pyx_ptype_6bzrlib_12_bencode_pyx_Encoder = 0;
+
+static char __pyx_k1[] = "String required";
+static char __pyx_k2[] = "junk in stream";
+static char __pyx_k3[] = "stream underflow";
+static char __pyx_k4[] = "_decode_object";
+static char __pyx_k5[] = "too deeply nested";
+static char __pyx_k6[] = "unknown object type identifier %r";
+static char __pyx_k7[] = "Stop character %c not found: %c";
+static char __pyx_k8[] = "string len not terminated by \":\"";
+static char __pyx_k9[] = "leading zeros are not allowed";
+static char __pyx_k10[] = "";
+static char __pyx_k11[] = "string size below zero: %d";
+static char __pyx_k12[] = "malformed list";
+static char __pyx_k13[] = "key was not a simple string.";
+static char __pyx_k14[] = "dict keys disordered";
+static char __pyx_k15[] = "malformed dict";
+static char __pyx_k16[] = "decode";
+static char __pyx_k17[] = "bencoded";
+static char __pyx_k18[] = "Not enough memory to allocate buffer for encoder";
+static char __pyx_k19[] = "Cannot realloc buffer for encoder";
+static char __pyx_k20[] = "i%de";
+static char __pyx_k21[] = "int %d too big to encode";
+static char __pyx_k22[] = "join";
+static char __pyx_k23[] = "i";
+static char __pyx_k24[] = "e";
+static char __pyx_k25[] = "%d:";
+static char __pyx_k26[] = "string %s too big to encode";
+static char __pyx_k27[] = "process";
+static char __pyx_k28[] = "keys";
+static char __pyx_k29[] = "sort";
+static char __pyx_k30[] = "key in dict should be string";
+static char __pyx_k31[] = "encode";
+static char __pyx_k32[] = "Bencached";
+static char __pyx_k33[] = "unsupported type %r";
+static char __pyx_k34[] = "object";
+static char __pyx_k35[] = "__slots__";
+static char __pyx_k36[] = "__init__";
+
+static PyObject *__pyx_n_Bencached;
+static PyObject *__pyx_n___init__;
+static PyObject *__pyx_n___slots__;
+static PyObject *__pyx_n_bencoded;
+static PyObject *__pyx_n_decode;
+static PyObject *__pyx_n_e;
+static PyObject *__pyx_n_i;
+static PyObject *__pyx_n_join;
+static PyObject *__pyx_n_keys;
+static PyObject *__pyx_n_object;
+static PyObject *__pyx_n_process;
+static PyObject *__pyx_n_sort;
+
+static PyObject *__pyx_k1p;
+static PyObject *__pyx_k2p;
+static PyObject *__pyx_k3p;
+static PyObject *__pyx_k5p;
+static PyObject *__pyx_k6p;
+static PyObject *__pyx_k7p;
+static PyObject *__pyx_k8p;
+static PyObject *__pyx_k9p;
+static PyObject *__pyx_k10p;
+static PyObject *__pyx_k11p;
+static PyObject *__pyx_k12p;
+static PyObject *__pyx_k13p;
+static PyObject *__pyx_k14p;
+static PyObject *__pyx_k15p;
+static PyObject *__pyx_k18p;
+static PyObject *__pyx_k19p;
+static PyObject *__pyx_k21p;
+static PyObject *__pyx_k26p;
+static PyObject *__pyx_k30p;
+static PyObject *__pyx_k33p;
+
+static __Pyx_StringTabEntry __pyx_string_tab[] = {
+ {&__pyx_n_Bencached, 1, __pyx_k32, sizeof(__pyx_k32)},
+ {&__pyx_n___init__, 1, __pyx_k36, sizeof(__pyx_k36)},
+ {&__pyx_n___slots__, 1, __pyx_k35, sizeof(__pyx_k35)},
+ {&__pyx_n_bencoded, 1, __pyx_k17, sizeof(__pyx_k17)},
+ {&__pyx_n_decode, 1, __pyx_k16, sizeof(__pyx_k16)},
+ {&__pyx_n_e, 1, __pyx_k24, sizeof(__pyx_k24)},
+ {&__pyx_n_i, 1, __pyx_k23, sizeof(__pyx_k23)},
+ {&__pyx_n_join, 1, __pyx_k22, sizeof(__pyx_k22)},
+ {&__pyx_n_keys, 1, __pyx_k28, sizeof(__pyx_k28)},
+ {&__pyx_n_object, 1, __pyx_k34, sizeof(__pyx_k34)},
+ {&__pyx_n_process, 1, __pyx_k27, sizeof(__pyx_k27)},
+ {&__pyx_n_sort, 1, __pyx_k29, sizeof(__pyx_k29)},
+ {&__pyx_k1p, 0, __pyx_k1, sizeof(__pyx_k1)},
+ {&__pyx_k2p, 0, __pyx_k2, sizeof(__pyx_k2)},
+ {&__pyx_k3p, 0, __pyx_k3, sizeof(__pyx_k3)},
+ {&__pyx_k5p, 0, __pyx_k5, sizeof(__pyx_k5)},
+ {&__pyx_k6p, 0, __pyx_k6, sizeof(__pyx_k6)},
+ {&__pyx_k7p, 0, __pyx_k7, sizeof(__pyx_k7)},
+ {&__pyx_k8p, 0, __pyx_k8, sizeof(__pyx_k8)},
+ {&__pyx_k9p, 0, __pyx_k9, sizeof(__pyx_k9)},
+ {&__pyx_k10p, 0, __pyx_k10, sizeof(__pyx_k10)},
+ {&__pyx_k11p, 0, __pyx_k11, sizeof(__pyx_k11)},
+ {&__pyx_k12p, 0, __pyx_k12, sizeof(__pyx_k12)},
+ {&__pyx_k13p, 0, __pyx_k13, sizeof(__pyx_k13)},
+ {&__pyx_k14p, 0, __pyx_k14, sizeof(__pyx_k14)},
+ {&__pyx_k15p, 0, __pyx_k15, sizeof(__pyx_k15)},
+ {&__pyx_k18p, 0, __pyx_k18, sizeof(__pyx_k18)},
+ {&__pyx_k19p, 0, __pyx_k19, sizeof(__pyx_k19)},
+ {&__pyx_k21p, 0, __pyx_k21, sizeof(__pyx_k21)},
+ {&__pyx_k26p, 0, __pyx_k26, sizeof(__pyx_k26)},
+ {&__pyx_k30p, 0, __pyx_k30, sizeof(__pyx_k30)},
+ {&__pyx_k33p, 0, __pyx_k33, sizeof(__pyx_k33)},
+ {0, 0, 0, 0}
+};
+
+static PyObject *__pyx_d1;
+static int __pyx_d2;
+
+
+/* Implementation of bzrlib._bencode_pyx */
+
+static int __pyx_f_6bzrlib_12_bencode_pyx_7Decoder___init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
+static int __pyx_f_6bzrlib_12_bencode_pyx_7Decoder___init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
+ PyObject *__pyx_v_s = 0;
+ PyObject *__pyx_v_yield_tuples = 0;
+ int __pyx_r;
+ int __pyx_1;
+ PyObject *__pyx_2 = 0;
+ PyObject *__pyx_3 = 0;
+ char *__pyx_4;
+ Py_ssize_t __pyx_5;
+ static char *__pyx_argnames[] = {"s","yield_tuples",0};
+ __pyx_v_yield_tuples = __pyx_d1;
+ if (!PyArg_ParseTupleAndKeywords(__pyx_args, __pyx_kwds, "O|O", __pyx_argnames, &__pyx_v_s, &__pyx_v_yield_tuples)) return -1;
+ Py_INCREF(__pyx_v_self);
+ Py_INCREF(__pyx_v_s);
+ Py_INCREF(__pyx_v_yield_tuples);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_bencode_pyx.pyx":81 */
+ __pyx_1 = (!PyString_CheckExact(__pyx_v_s));
+ if (__pyx_1) {
+ __pyx_2 = PyTuple_New(1); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 82; goto __pyx_L1;}
+ Py_INCREF(__pyx_k1p);
+ PyTuple_SET_ITEM(__pyx_2, 0, __pyx_k1p);
+ __pyx_3 = PyObject_CallObject(PyExc_TypeError, __pyx_2); if (!__pyx_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 82; goto __pyx_L1;}
+ Py_DECREF(__pyx_2); __pyx_2 = 0;
+ __Pyx_Raise(__pyx_3, 0, 0);
+ Py_DECREF(__pyx_3); __pyx_3 = 0;
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 82; goto __pyx_L1;}
+ goto __pyx_L2;
+ }
+ __pyx_L2:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_bencode_pyx.pyx":84 */
+ Py_INCREF(__pyx_v_s);
+ Py_DECREF(((struct __pyx_obj_6bzrlib_12_bencode_pyx_Decoder *)__pyx_v_self)->text);
+ ((struct __pyx_obj_6bzrlib_12_bencode_pyx_Decoder *)__pyx_v_self)->text = __pyx_v_s;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_bencode_pyx.pyx":85 */
+ __pyx_4 = PyString_AS_STRING(__pyx_v_s); if (__pyx_4 == NULL) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 85; goto __pyx_L1;}
+ ((struct __pyx_obj_6bzrlib_12_bencode_pyx_Decoder *)__pyx_v_self)->tail = __pyx_4;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_bencode_pyx.pyx":86 */
+ __pyx_5 = PyString_GET_SIZE(__pyx_v_s); if (__pyx_5 == (-1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 86; goto __pyx_L1;}
+ ((struct __pyx_obj_6bzrlib_12_bencode_pyx_Decoder *)__pyx_v_self)->size = __pyx_5;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_bencode_pyx.pyx":87 */
+ __pyx_2 = PyTuple_New(1); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 87; goto __pyx_L1;}
+ Py_INCREF(__pyx_v_yield_tuples);
+ PyTuple_SET_ITEM(__pyx_2, 0, __pyx_v_yield_tuples);
+ __pyx_3 = PyObject_CallObject(((PyObject *)(&PyInt_Type)), __pyx_2); if (!__pyx_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 87; goto __pyx_L1;}
+ Py_DECREF(__pyx_2); __pyx_2 = 0;
+ __pyx_1 = PyInt_AsLong(__pyx_3); if (PyErr_Occurred()) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 87; goto __pyx_L1;}
+ Py_DECREF(__pyx_3); __pyx_3 = 0;
+ ((struct __pyx_obj_6bzrlib_12_bencode_pyx_Decoder *)__pyx_v_self)->_yield_tuples = __pyx_1;
+
+ __pyx_r = 0;
+ goto __pyx_L0;
+ __pyx_L1:;
+ Py_XDECREF(__pyx_2);
+ Py_XDECREF(__pyx_3);
+ __Pyx_AddTraceback("bzrlib._bencode_pyx.Decoder.__init__");
+ __pyx_r = -1;
+ __pyx_L0:;
+ Py_DECREF(__pyx_v_self);
+ Py_DECREF(__pyx_v_s);
+ Py_DECREF(__pyx_v_yield_tuples);
+ return __pyx_r;
+}
+
+static PyObject *__pyx_f_6bzrlib_12_bencode_pyx_7Decoder_decode(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
+static PyObject *__pyx_f_6bzrlib_12_bencode_pyx_7Decoder_decode(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
+ PyObject *__pyx_v_result;
+ PyObject *__pyx_r;
+ PyObject *__pyx_1 = 0;
+ int __pyx_2;
+ PyObject *__pyx_3 = 0;
+ static char *__pyx_argnames[] = {0};
+ if (!PyArg_ParseTupleAndKeywords(__pyx_args, __pyx_kwds, "", __pyx_argnames)) return 0;
+ Py_INCREF(__pyx_v_self);
+ __pyx_v_result = Py_None; Py_INCREF(Py_None);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_bencode_pyx.pyx":90 */
+ __pyx_1 = ((struct __pyx_vtabstruct_6bzrlib_12_bencode_pyx_Decoder *)((struct __pyx_obj_6bzrlib_12_bencode_pyx_Decoder *)__pyx_v_self)->__pyx_vtab)->_decode_object(((struct __pyx_obj_6bzrlib_12_bencode_pyx_Decoder *)__pyx_v_self)); if (!__pyx_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 90; goto __pyx_L1;}
+ Py_DECREF(__pyx_v_result);
+ __pyx_v_result = __pyx_1;
+ __pyx_1 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_bencode_pyx.pyx":91 */
+ __pyx_2 = (((struct __pyx_obj_6bzrlib_12_bencode_pyx_Decoder *)__pyx_v_self)->size != 0);
+ if (__pyx_2) {
+ __pyx_1 = PyTuple_New(1); if (!__pyx_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 92; goto __pyx_L1;}
+ Py_INCREF(__pyx_k2p);
+ PyTuple_SET_ITEM(__pyx_1, 0, __pyx_k2p);
+ __pyx_3 = PyObject_CallObject(PyExc_ValueError, __pyx_1); if (!__pyx_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 92; goto __pyx_L1;}
+ Py_DECREF(__pyx_1); __pyx_1 = 0;
+ __Pyx_Raise(__pyx_3, 0, 0);
+ Py_DECREF(__pyx_3); __pyx_3 = 0;
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 92; goto __pyx_L1;}
+ goto __pyx_L2;
+ }
+ __pyx_L2:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_bencode_pyx.pyx":93 */
+ Py_INCREF(__pyx_v_result);
+ __pyx_r = __pyx_v_result;
+ goto __pyx_L0;
+
+ __pyx_r = Py_None; Py_INCREF(Py_None);
+ goto __pyx_L0;
+ __pyx_L1:;
+ Py_XDECREF(__pyx_1);
+ Py_XDECREF(__pyx_3);
+ __Pyx_AddTraceback("bzrlib._bencode_pyx.Decoder.decode");
+ __pyx_r = 0;
+ __pyx_L0:;
+ Py_DECREF(__pyx_v_result);
+ Py_DECREF(__pyx_v_self);
+ return __pyx_r;
+}
+
+static PyObject *__pyx_f_6bzrlib_12_bencode_pyx_7Decoder_decode_object(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
+static PyObject *__pyx_f_6bzrlib_12_bencode_pyx_7Decoder_decode_object(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
+ PyObject *__pyx_r;
+ PyObject *__pyx_1 = 0;
+ static char *__pyx_argnames[] = {0};
+ if (!PyArg_ParseTupleAndKeywords(__pyx_args, __pyx_kwds, "", __pyx_argnames)) return 0;
+ Py_INCREF(__pyx_v_self);
+ __pyx_1 = ((struct __pyx_vtabstruct_6bzrlib_12_bencode_pyx_Decoder *)((struct __pyx_obj_6bzrlib_12_bencode_pyx_Decoder *)__pyx_v_self)->__pyx_vtab)->_decode_object(((struct __pyx_obj_6bzrlib_12_bencode_pyx_Decoder *)__pyx_v_self)); if (!__pyx_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 96; goto __pyx_L1;}
+ __pyx_r = __pyx_1;
+ __pyx_1 = 0;
+ goto __pyx_L0;
+
+ __pyx_r = Py_None; Py_INCREF(Py_None);
+ goto __pyx_L0;
+ __pyx_L1:;
+ Py_XDECREF(__pyx_1);
+ __Pyx_AddTraceback("bzrlib._bencode_pyx.Decoder.decode_object");
+ __pyx_r = 0;
+ __pyx_L0:;
+ Py_DECREF(__pyx_v_self);
+ return __pyx_r;
+}
+
+static PyObject *__pyx_f_6bzrlib_12_bencode_pyx_7Decoder__decode_object(struct __pyx_obj_6bzrlib_12_bencode_pyx_Decoder *__pyx_v_self) {
+ char __pyx_v_ch;
+ PyObject *__pyx_r;
+ int __pyx_1;
+ PyObject *__pyx_2 = 0;
+ PyObject *__pyx_3 = 0;
+ Py_INCREF(__pyx_v_self);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_bencode_pyx.pyx":101 */
+ __pyx_1 = (0 == __pyx_v_self->size);
+ if (__pyx_1) {
+ __pyx_2 = PyTuple_New(1); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 102; goto __pyx_L1;}
+ Py_INCREF(__pyx_k3p);
+ PyTuple_SET_ITEM(__pyx_2, 0, __pyx_k3p);
+ __pyx_3 = PyObject_CallObject(PyExc_ValueError, __pyx_2); if (!__pyx_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 102; goto __pyx_L1;}
+ Py_DECREF(__pyx_2); __pyx_2 = 0;
+ __Pyx_Raise(__pyx_3, 0, 0);
+ Py_DECREF(__pyx_3); __pyx_3 = 0;
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 102; goto __pyx_L1;}
+ goto __pyx_L2;
+ }
+ __pyx_L2:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_bencode_pyx.pyx":104 */
+ __pyx_1 = Py_EnterRecursiveCall(__pyx_k4);
+ if (__pyx_1) {
+ __pyx_2 = PyTuple_New(1); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 105; goto __pyx_L1;}
+ Py_INCREF(__pyx_k5p);
+ PyTuple_SET_ITEM(__pyx_2, 0, __pyx_k5p);
+ __pyx_3 = PyObject_CallObject(PyExc_RuntimeError, __pyx_2); if (!__pyx_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 105; goto __pyx_L1;}
+ Py_DECREF(__pyx_2); __pyx_2 = 0;
+ __Pyx_Raise(__pyx_3, 0, 0);
+ Py_DECREF(__pyx_3); __pyx_3 = 0;
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 105; goto __pyx_L1;}
+ goto __pyx_L3;
+ }
+ __pyx_L3:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_bencode_pyx.pyx":106 */
+ /*try:*/ {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_bencode_pyx.pyx":107 */
+ __pyx_v_ch = (__pyx_v_self->tail[0]);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_bencode_pyx.pyx":108 */
+ __pyx_1 = '0' <= __pyx_v_ch;
+ if (__pyx_1) {
+ __pyx_1 = __pyx_v_ch <= '9';
+ }
+ if (__pyx_1) {
+ __pyx_2 = ((struct __pyx_vtabstruct_6bzrlib_12_bencode_pyx_Decoder *)__pyx_v_self->__pyx_vtab)->_decode_string(__pyx_v_self); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 109; goto __pyx_L5;}
+ __pyx_r = __pyx_2;
+ __pyx_2 = 0;
+ goto __pyx_L4;
+ goto __pyx_L7;
+ }
+ __pyx_1 = (__pyx_v_ch == 'l');
+ if (__pyx_1) {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_bencode_pyx.pyx":111 */
+ D_UPDATE_TAIL(__pyx_v_self,1);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_bencode_pyx.pyx":112 */
+ __pyx_3 = ((struct __pyx_vtabstruct_6bzrlib_12_bencode_pyx_Decoder *)__pyx_v_self->__pyx_vtab)->_decode_list(__pyx_v_self); if (!__pyx_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 112; goto __pyx_L5;}
+ __pyx_r = __pyx_3;
+ __pyx_3 = 0;
+ goto __pyx_L4;
+ goto __pyx_L7;
+ }
+ __pyx_1 = (__pyx_v_ch == 'i');
+ if (__pyx_1) {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_bencode_pyx.pyx":114 */
+ D_UPDATE_TAIL(__pyx_v_self,1);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_bencode_pyx.pyx":115 */
+ __pyx_2 = ((struct __pyx_vtabstruct_6bzrlib_12_bencode_pyx_Decoder *)__pyx_v_self->__pyx_vtab)->_decode_int(__pyx_v_self); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 115; goto __pyx_L5;}
+ __pyx_r = __pyx_2;
+ __pyx_2 = 0;
+ goto __pyx_L4;
+ goto __pyx_L7;
+ }
+ __pyx_1 = (__pyx_v_ch == 'd');
+ if (__pyx_1) {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_bencode_pyx.pyx":117 */
+ D_UPDATE_TAIL(__pyx_v_self,1);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_bencode_pyx.pyx":118 */
+ __pyx_3 = ((struct __pyx_vtabstruct_6bzrlib_12_bencode_pyx_Decoder *)__pyx_v_self->__pyx_vtab)->_decode_dict(__pyx_v_self); if (!__pyx_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 118; goto __pyx_L5;}
+ __pyx_r = __pyx_3;
+ __pyx_3 = 0;
+ goto __pyx_L4;
+ goto __pyx_L7;
+ }
+ /*else*/ {
+ __pyx_2 = PyInt_FromLong(__pyx_v_ch); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 120; goto __pyx_L5;}
+ __pyx_3 = PyNumber_Remainder(__pyx_k6p, __pyx_2); if (!__pyx_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 120; goto __pyx_L5;}
+ Py_DECREF(__pyx_2); __pyx_2 = 0;
+ __pyx_2 = PyTuple_New(1); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 120; goto __pyx_L5;}
+ PyTuple_SET_ITEM(__pyx_2, 0, __pyx_3);
+ __pyx_3 = 0;
+ __pyx_3 = PyObject_CallObject(PyExc_ValueError, __pyx_2); if (!__pyx_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 120; goto __pyx_L5;}
+ Py_DECREF(__pyx_2); __pyx_2 = 0;
+ __Pyx_Raise(__pyx_3, 0, 0);
+ Py_DECREF(__pyx_3); __pyx_3 = 0;
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 120; goto __pyx_L5;}
+ }
+ __pyx_L7:;
+ }
+ /*finally:*/ {
+ int __pyx_why;
+ PyObject *__pyx_exc_type, *__pyx_exc_value, *__pyx_exc_tb;
+ int __pyx_exc_lineno;
+ __pyx_why = 0; goto __pyx_L6;
+ __pyx_L4: __pyx_why = 3; goto __pyx_L6;
+ __pyx_L5: {
+ __pyx_why = 4;
+ Py_XDECREF(__pyx_2); __pyx_2 = 0;
+ Py_XDECREF(__pyx_3); __pyx_3 = 0;
+ PyErr_Fetch(&__pyx_exc_type, &__pyx_exc_value, &__pyx_exc_tb);
+ __pyx_exc_lineno = __pyx_lineno;
+ goto __pyx_L6;
+ }
+ __pyx_L6:;
+ Py_LeaveRecursiveCall();
+ switch (__pyx_why) {
+ case 3: goto __pyx_L0;
+ case 4: {
+ PyErr_Restore(__pyx_exc_type, __pyx_exc_value, __pyx_exc_tb);
+ __pyx_lineno = __pyx_exc_lineno;
+ __pyx_exc_type = 0;
+ __pyx_exc_value = 0;
+ __pyx_exc_tb = 0;
+ goto __pyx_L1;
+ }
+ }
+ }
+
+ __pyx_r = Py_None; Py_INCREF(Py_None);
+ goto __pyx_L0;
+ __pyx_L1:;
+ Py_XDECREF(__pyx_2);
+ Py_XDECREF(__pyx_3);
+ __Pyx_AddTraceback("bzrlib._bencode_pyx.Decoder._decode_object");
+ __pyx_r = 0;
+ __pyx_L0:;
+ Py_DECREF(__pyx_v_self);
+ return __pyx_r;
+}
+
+static int __pyx_f_6bzrlib_12_bencode_pyx_7Decoder__read_digits(struct __pyx_obj_6bzrlib_12_bencode_pyx_Decoder *__pyx_v_self,char __pyx_v_stop_char) {
+ int __pyx_v_i;
+ int __pyx_r;
+ int __pyx_1;
+ PyObject *__pyx_2 = 0;
+ PyObject *__pyx_3 = 0;
+ PyObject *__pyx_4 = 0;
+ Py_INCREF(__pyx_v_self);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_bencode_pyx.pyx":126 */
+ __pyx_v_i = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_bencode_pyx.pyx":127 */
+ while (1) {
+ __pyx_1 = ((__pyx_v_self->tail[__pyx_v_i]) >= '0');
+ if (__pyx_1) {
+ __pyx_1 = ((__pyx_v_self->tail[__pyx_v_i]) <= '9');
+ }
+ if (!__pyx_1) {
+ __pyx_1 = ((__pyx_v_self->tail[__pyx_v_i]) == '-');
+ }
+ if (__pyx_1) {
+ __pyx_1 = (__pyx_v_i < __pyx_v_self->size);
+ }
+ if (!__pyx_1) break;
+ __pyx_v_i = (__pyx_v_i + 1);
+ }
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_bencode_pyx.pyx":131 */
+ __pyx_1 = ((__pyx_v_self->tail[__pyx_v_i]) != __pyx_v_stop_char);
+ if (__pyx_1) {
+ __pyx_2 = PyInt_FromLong(__pyx_v_stop_char); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 133; goto __pyx_L1;}
+ __pyx_3 = PyInt_FromLong((__pyx_v_self->tail[__pyx_v_i])); if (!__pyx_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 133; goto __pyx_L1;}
+ __pyx_4 = PyTuple_New(2); if (!__pyx_4) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 133; goto __pyx_L1;}
+ PyTuple_SET_ITEM(__pyx_4, 0, __pyx_2);
+ PyTuple_SET_ITEM(__pyx_4, 1, __pyx_3);
+ __pyx_2 = 0;
+ __pyx_3 = 0;
+ __pyx_2 = PyNumber_Remainder(__pyx_k7p, __pyx_4); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 132; goto __pyx_L1;}
+ Py_DECREF(__pyx_4); __pyx_4 = 0;
+ __pyx_3 = PyTuple_New(1); if (!__pyx_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 132; goto __pyx_L1;}
+ PyTuple_SET_ITEM(__pyx_3, 0, __pyx_2);
+ __pyx_2 = 0;
+ __pyx_4 = PyObject_CallObject(PyExc_ValueError, __pyx_3); if (!__pyx_4) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 132; goto __pyx_L1;}
+ Py_DECREF(__pyx_3); __pyx_3 = 0;
+ __Pyx_Raise(__pyx_4, 0, 0);
+ Py_DECREF(__pyx_4); __pyx_4 = 0;
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 132; goto __pyx_L1;}
+ goto __pyx_L4;
+ }
+ __pyx_L4:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_bencode_pyx.pyx":134 */
+ __pyx_1 = ((__pyx_v_self->tail[0]) == '0');
+ if (!__pyx_1) {
+ __pyx_1 = ((__pyx_v_self->tail[0]) == '-');
+ if (__pyx_1) {
+ __pyx_1 = ((__pyx_v_self->tail[1]) == '0');
+ }
+ }
+ if (__pyx_1) {
+ __pyx_1 = (__pyx_v_i == 1);
+ if (__pyx_1) {
+ __pyx_r = __pyx_v_i;
+ goto __pyx_L0;
+ goto __pyx_L6;
+ }
+ /*else*/ {
+ __Pyx_Raise(PyExc_ValueError, 0, 0);
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 139; goto __pyx_L1;}
+ }
+ __pyx_L6:;
+ goto __pyx_L5;
+ }
+ __pyx_L5:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_bencode_pyx.pyx":140 */
+ __pyx_r = __pyx_v_i;
+ goto __pyx_L0;
+
+ __pyx_r = 0;
+ goto __pyx_L0;
+ __pyx_L1:;
+ Py_XDECREF(__pyx_2);
+ Py_XDECREF(__pyx_3);
+ Py_XDECREF(__pyx_4);
+ __Pyx_AddTraceback("bzrlib._bencode_pyx.Decoder._read_digits");
+ __pyx_r = (-1);
+ __pyx_L0:;
+ Py_DECREF(__pyx_v_self);
+ return __pyx_r;
+}
+
+static PyObject *__pyx_f_6bzrlib_12_bencode_pyx_7Decoder__decode_int(struct __pyx_obj_6bzrlib_12_bencode_pyx_Decoder *__pyx_v_self) {
+ int __pyx_v_i;
+ PyObject *__pyx_v_ret;
+ PyObject *__pyx_r;
+ int __pyx_1;
+ PyObject *__pyx_2 = 0;
+ Py_INCREF(__pyx_v_self);
+ __pyx_v_ret = Py_None; Py_INCREF(Py_None);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_bencode_pyx.pyx":144 */
+ __pyx_1 = ((struct __pyx_vtabstruct_6bzrlib_12_bencode_pyx_Decoder *)__pyx_v_self->__pyx_vtab)->_read_digits(__pyx_v_self,'e'); if (__pyx_1 == (-1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 144; goto __pyx_L1;}
+ __pyx_v_i = __pyx_1;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_bencode_pyx.pyx":145 */
+ (__pyx_v_self->tail[__pyx_v_i]) = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_bencode_pyx.pyx":146 */
+ /*try:*/ {
+ __pyx_2 = PyInt_FromString(__pyx_v_self->tail,NULL,10); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 147; goto __pyx_L3;}
+ Py_DECREF(__pyx_v_ret);
+ __pyx_v_ret = __pyx_2;
+ __pyx_2 = 0;
+ }
+ /*finally:*/ {
+ int __pyx_why;
+ PyObject *__pyx_exc_type, *__pyx_exc_value, *__pyx_exc_tb;
+ int __pyx_exc_lineno;
+ __pyx_why = 0; goto __pyx_L4;
+ __pyx_L3: {
+ __pyx_why = 4;
+ Py_XDECREF(__pyx_2); __pyx_2 = 0;
+ PyErr_Fetch(&__pyx_exc_type, &__pyx_exc_value, &__pyx_exc_tb);
+ __pyx_exc_lineno = __pyx_lineno;
+ goto __pyx_L4;
+ }
+ __pyx_L4:;
+ (__pyx_v_self->tail[__pyx_v_i]) = 'e';
+ switch (__pyx_why) {
+ case 4: {
+ PyErr_Restore(__pyx_exc_type, __pyx_exc_value, __pyx_exc_tb);
+ __pyx_lineno = __pyx_exc_lineno;
+ __pyx_exc_type = 0;
+ __pyx_exc_value = 0;
+ __pyx_exc_tb = 0;
+ goto __pyx_L1;
+ }
+ }
+ }
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_bencode_pyx.pyx":150 */
+ D_UPDATE_TAIL(__pyx_v_self,(__pyx_v_i + 1));
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_bencode_pyx.pyx":151 */
+ Py_INCREF(__pyx_v_ret);
+ __pyx_r = __pyx_v_ret;
+ goto __pyx_L0;
+
+ __pyx_r = Py_None; Py_INCREF(Py_None);
+ goto __pyx_L0;
+ __pyx_L1:;
+ Py_XDECREF(__pyx_2);
+ __Pyx_AddTraceback("bzrlib._bencode_pyx.Decoder._decode_int");
+ __pyx_r = 0;
+ __pyx_L0:;
+ Py_DECREF(__pyx_v_ret);
+ Py_DECREF(__pyx_v_self);
+ return __pyx_r;
+}
+
+static PyObject *__pyx_f_6bzrlib_12_bencode_pyx_7Decoder__decode_string(struct __pyx_obj_6bzrlib_12_bencode_pyx_Decoder *__pyx_v_self) {
+ int __pyx_v_n;
+ char *__pyx_v_next_tail;
+ PyObject *__pyx_v_result;
+ PyObject *__pyx_r;
+ int __pyx_1;
+ PyObject *__pyx_2 = 0;
+ PyObject *__pyx_3 = 0;
+ Py_INCREF(__pyx_v_self);
+ __pyx_v_result = Py_None; Py_INCREF(Py_None);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_bencode_pyx.pyx":160 */
+ __pyx_v_n = strtol(__pyx_v_self->tail,(&__pyx_v_next_tail),10);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_bencode_pyx.pyx":161 */
+ __pyx_1 = (__pyx_v_next_tail == NULL);
+ if (!__pyx_1) {
+ __pyx_1 = ((__pyx_v_next_tail[0]) != ':');
+ }
+ if (__pyx_1) {
+ __pyx_2 = PyTuple_New(1); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 162; goto __pyx_L1;}
+ Py_INCREF(__pyx_k8p);
+ PyTuple_SET_ITEM(__pyx_2, 0, __pyx_k8p);
+ __pyx_3 = PyObject_CallObject(PyExc_ValueError, __pyx_2); if (!__pyx_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 162; goto __pyx_L1;}
+ Py_DECREF(__pyx_2); __pyx_2 = 0;
+ __Pyx_Raise(__pyx_3, 0, 0);
+ Py_DECREF(__pyx_3); __pyx_3 = 0;
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 162; goto __pyx_L1;}
+ goto __pyx_L2;
+ }
+ __pyx_L2:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_bencode_pyx.pyx":164 */
+ __pyx_1 = ((__pyx_v_self->tail[0]) == '0');
+ if (__pyx_1) {
+ __pyx_1 = (__pyx_v_n != 0);
+ if (!__pyx_1) {
+ __pyx_1 = ((__pyx_v_next_tail - __pyx_v_self->tail) != 1);
+ }
+ }
+ if (__pyx_1) {
+ __pyx_2 = PyTuple_New(1); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 166; goto __pyx_L1;}
+ Py_INCREF(__pyx_k9p);
+ PyTuple_SET_ITEM(__pyx_2, 0, __pyx_k9p);
+ __pyx_3 = PyObject_CallObject(PyExc_ValueError, __pyx_2); if (!__pyx_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 166; goto __pyx_L1;}
+ Py_DECREF(__pyx_2); __pyx_2 = 0;
+ __Pyx_Raise(__pyx_3, 0, 0);
+ Py_DECREF(__pyx_3); __pyx_3 = 0;
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 166; goto __pyx_L1;}
+ goto __pyx_L3;
+ }
+ __pyx_L3:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_bencode_pyx.pyx":167 */
+ D_UPDATE_TAIL(__pyx_v_self,((__pyx_v_next_tail - __pyx_v_self->tail) + 1));
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_bencode_pyx.pyx":168 */
+ __pyx_1 = (__pyx_v_n == 0);
+ if (__pyx_1) {
+ Py_INCREF(__pyx_k10p);
+ __pyx_r = __pyx_k10p;
+ goto __pyx_L0;
+ goto __pyx_L4;
+ }
+ __pyx_L4:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_bencode_pyx.pyx":170 */
+ __pyx_1 = (__pyx_v_n > __pyx_v_self->size);
+ if (__pyx_1) {
+ __pyx_2 = PyTuple_New(1); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 171; goto __pyx_L1;}
+ Py_INCREF(__pyx_k3p);
+ PyTuple_SET_ITEM(__pyx_2, 0, __pyx_k3p);
+ __pyx_3 = PyObject_CallObject(PyExc_ValueError, __pyx_2); if (!__pyx_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 171; goto __pyx_L1;}
+ Py_DECREF(__pyx_2); __pyx_2 = 0;
+ __Pyx_Raise(__pyx_3, 0, 0);
+ Py_DECREF(__pyx_3); __pyx_3 = 0;
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 171; goto __pyx_L1;}
+ goto __pyx_L5;
+ }
+ __pyx_L5:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_bencode_pyx.pyx":172 */
+ __pyx_1 = (__pyx_v_n < 0);
+ if (__pyx_1) {
+ __pyx_2 = PyInt_FromLong(__pyx_v_n); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 173; goto __pyx_L1;}
+ __pyx_3 = PyNumber_Remainder(__pyx_k11p, __pyx_2); if (!__pyx_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 173; goto __pyx_L1;}
+ Py_DECREF(__pyx_2); __pyx_2 = 0;
+ __pyx_2 = PyTuple_New(1); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 173; goto __pyx_L1;}
+ PyTuple_SET_ITEM(__pyx_2, 0, __pyx_3);
+ __pyx_3 = 0;
+ __pyx_3 = PyObject_CallObject(PyExc_ValueError, __pyx_2); if (!__pyx_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 173; goto __pyx_L1;}
+ Py_DECREF(__pyx_2); __pyx_2 = 0;
+ __Pyx_Raise(__pyx_3, 0, 0);
+ Py_DECREF(__pyx_3); __pyx_3 = 0;
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 173; goto __pyx_L1;}
+ goto __pyx_L6;
+ }
+ __pyx_L6:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_bencode_pyx.pyx":175 */
+ __pyx_2 = PyString_FromStringAndSize(__pyx_v_self->tail,__pyx_v_n); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 175; goto __pyx_L1;}
+ Py_DECREF(__pyx_v_result);
+ __pyx_v_result = __pyx_2;
+ __pyx_2 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_bencode_pyx.pyx":176 */
+ D_UPDATE_TAIL(__pyx_v_self,__pyx_v_n);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_bencode_pyx.pyx":177 */
+ Py_INCREF(__pyx_v_result);
+ __pyx_r = __pyx_v_result;
+ goto __pyx_L0;
+
+ __pyx_r = Py_None; Py_INCREF(Py_None);
+ goto __pyx_L0;
+ __pyx_L1:;
+ Py_XDECREF(__pyx_2);
+ Py_XDECREF(__pyx_3);
+ __Pyx_AddTraceback("bzrlib._bencode_pyx.Decoder._decode_string");
+ __pyx_r = 0;
+ __pyx_L0:;
+ Py_DECREF(__pyx_v_result);
+ Py_DECREF(__pyx_v_self);
+ return __pyx_r;
+}
+
+static PyObject *__pyx_f_6bzrlib_12_bencode_pyx_7Decoder__decode_list(struct __pyx_obj_6bzrlib_12_bencode_pyx_Decoder *__pyx_v_self) {
+ PyObject *__pyx_v_result;
+ PyObject *__pyx_r;
+ PyObject *__pyx_1 = 0;
+ int __pyx_2;
+ PyObject *__pyx_3 = 0;
+ Py_INCREF(__pyx_v_self);
+ __pyx_v_result = Py_None; Py_INCREF(Py_None);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_bencode_pyx.pyx":180 */
+ __pyx_1 = PyList_New(0); if (!__pyx_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 180; goto __pyx_L1;}
+ Py_DECREF(__pyx_v_result);
+ __pyx_v_result = __pyx_1;
+ __pyx_1 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_bencode_pyx.pyx":182 */
+ while (1) {
+ __pyx_2 = (__pyx_v_self->size > 0);
+ if (!__pyx_2) break;
+ __pyx_2 = ((__pyx_v_self->tail[0]) == 'e');
+ if (__pyx_2) {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_bencode_pyx.pyx":184 */
+ D_UPDATE_TAIL(__pyx_v_self,1);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_bencode_pyx.pyx":185 */
+ __pyx_2 = __pyx_v_self->_yield_tuples;
+ if (__pyx_2) {
+ __pyx_1 = PyTuple_New(1); if (!__pyx_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 186; goto __pyx_L1;}
+ Py_INCREF(__pyx_v_result);
+ PyTuple_SET_ITEM(__pyx_1, 0, __pyx_v_result);
+ __pyx_3 = PyObject_CallObject(((PyObject *)(&PyTuple_Type)), __pyx_1); if (!__pyx_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 186; goto __pyx_L1;}
+ Py_DECREF(__pyx_1); __pyx_1 = 0;
+ __pyx_r = __pyx_3;
+ __pyx_3 = 0;
+ goto __pyx_L0;
+ goto __pyx_L5;
+ }
+ /*else*/ {
+ Py_INCREF(__pyx_v_result);
+ __pyx_r = __pyx_v_result;
+ goto __pyx_L0;
+ }
+ __pyx_L5:;
+ goto __pyx_L4;
+ }
+ /*else*/ {
+ __pyx_1 = ((struct __pyx_vtabstruct_6bzrlib_12_bencode_pyx_Decoder *)__pyx_v_self->__pyx_vtab)->_decode_object(__pyx_v_self); if (!__pyx_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 193; goto __pyx_L1;}
+ __pyx_2 = PyList_Append(__pyx_v_result,__pyx_1); if (__pyx_2 == (-1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 193; goto __pyx_L1;}
+ Py_DECREF(__pyx_1); __pyx_1 = 0;
+ }
+ __pyx_L4:;
+ }
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_bencode_pyx.pyx":195 */
+ __pyx_3 = PyTuple_New(1); if (!__pyx_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 195; goto __pyx_L1;}
+ Py_INCREF(__pyx_k12p);
+ PyTuple_SET_ITEM(__pyx_3, 0, __pyx_k12p);
+ __pyx_1 = PyObject_CallObject(PyExc_ValueError, __pyx_3); if (!__pyx_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 195; goto __pyx_L1;}
+ Py_DECREF(__pyx_3); __pyx_3 = 0;
+ __Pyx_Raise(__pyx_1, 0, 0);
+ Py_DECREF(__pyx_1); __pyx_1 = 0;
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 195; goto __pyx_L1;}
+
+ __pyx_r = Py_None; Py_INCREF(Py_None);
+ goto __pyx_L0;
+ __pyx_L1:;
+ Py_XDECREF(__pyx_1);
+ Py_XDECREF(__pyx_3);
+ __Pyx_AddTraceback("bzrlib._bencode_pyx.Decoder._decode_list");
+ __pyx_r = 0;
+ __pyx_L0:;
+ Py_DECREF(__pyx_v_result);
+ Py_DECREF(__pyx_v_self);
+ return __pyx_r;
+}
+
+static PyObject *__pyx_f_6bzrlib_12_bencode_pyx_7Decoder__decode_dict(struct __pyx_obj_6bzrlib_12_bencode_pyx_Decoder *__pyx_v_self) {
+ char __pyx_v_ch;
+ PyObject *__pyx_v_result;
+ PyObject *__pyx_v_lastkey;
+ PyObject *__pyx_v_key;
+ PyObject *__pyx_v_value;
+ PyObject *__pyx_r;
+ PyObject *__pyx_1 = 0;
+ int __pyx_2;
+ PyObject *__pyx_3 = 0;
+ Py_INCREF(__pyx_v_self);
+ __pyx_v_result = Py_None; Py_INCREF(Py_None);
+ __pyx_v_lastkey = Py_None; Py_INCREF(Py_None);
+ __pyx_v_key = Py_None; Py_INCREF(Py_None);
+ __pyx_v_value = Py_None; Py_INCREF(Py_None);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_bencode_pyx.pyx":200 */
+ __pyx_1 = PyDict_New(); if (!__pyx_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 200; goto __pyx_L1;}
+ Py_DECREF(__pyx_v_result);
+ __pyx_v_result = __pyx_1;
+ __pyx_1 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_bencode_pyx.pyx":201 */
+ Py_INCREF(Py_None);
+ Py_DECREF(__pyx_v_lastkey);
+ __pyx_v_lastkey = Py_None;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_bencode_pyx.pyx":203 */
+ while (1) {
+ __pyx_2 = (__pyx_v_self->size > 0);
+ if (!__pyx_2) break;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_bencode_pyx.pyx":204 */
+ __pyx_v_ch = (__pyx_v_self->tail[0]);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_bencode_pyx.pyx":205 */
+ __pyx_2 = (__pyx_v_ch == 'e');
+ if (__pyx_2) {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_bencode_pyx.pyx":206 */
+ D_UPDATE_TAIL(__pyx_v_self,1);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_bencode_pyx.pyx":207 */
+ Py_INCREF(__pyx_v_result);
+ __pyx_r = __pyx_v_result;
+ goto __pyx_L0;
+ goto __pyx_L4;
+ }
+ /*else*/ {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_bencode_pyx.pyx":210 */
+ __pyx_2 = ((__pyx_v_self->tail[0]) < '0');
+ if (!__pyx_2) {
+ __pyx_2 = ((__pyx_v_self->tail[0]) > '9');
+ }
+ if (__pyx_2) {
+ __pyx_1 = PyTuple_New(1); if (!__pyx_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 211; goto __pyx_L1;}
+ Py_INCREF(__pyx_k13p);
+ PyTuple_SET_ITEM(__pyx_1, 0, __pyx_k13p);
+ __pyx_3 = PyObject_CallObject(PyExc_ValueError, __pyx_1); if (!__pyx_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 211; goto __pyx_L1;}
+ Py_DECREF(__pyx_1); __pyx_1 = 0;
+ __Pyx_Raise(__pyx_3, 0, 0);
+ Py_DECREF(__pyx_3); __pyx_3 = 0;
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 211; goto __pyx_L1;}
+ goto __pyx_L5;
+ }
+ __pyx_L5:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_bencode_pyx.pyx":212 */
+ __pyx_1 = ((struct __pyx_vtabstruct_6bzrlib_12_bencode_pyx_Decoder *)__pyx_v_self->__pyx_vtab)->_decode_string(__pyx_v_self); if (!__pyx_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 212; goto __pyx_L1;}
+ Py_DECREF(__pyx_v_key);
+ __pyx_v_key = __pyx_1;
+ __pyx_1 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_bencode_pyx.pyx":213 */
+ if (PyObject_Cmp(__pyx_v_lastkey, __pyx_v_key, &__pyx_2) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 213; goto __pyx_L1;}
+ __pyx_2 = __pyx_2 >= 0;
+ if (__pyx_2) {
+ __pyx_3 = PyTuple_New(1); if (!__pyx_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 214; goto __pyx_L1;}
+ Py_INCREF(__pyx_k14p);
+ PyTuple_SET_ITEM(__pyx_3, 0, __pyx_k14p);
+ __pyx_1 = PyObject_CallObject(PyExc_ValueError, __pyx_3); if (!__pyx_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 214; goto __pyx_L1;}
+ Py_DECREF(__pyx_3); __pyx_3 = 0;
+ __Pyx_Raise(__pyx_1, 0, 0);
+ Py_DECREF(__pyx_1); __pyx_1 = 0;
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 214; goto __pyx_L1;}
+ goto __pyx_L6;
+ }
+ /*else*/ {
+ Py_INCREF(__pyx_v_key);
+ Py_DECREF(__pyx_v_lastkey);
+ __pyx_v_lastkey = __pyx_v_key;
+ }
+ __pyx_L6:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_bencode_pyx.pyx":217 */
+ __pyx_3 = ((struct __pyx_vtabstruct_6bzrlib_12_bencode_pyx_Decoder *)__pyx_v_self->__pyx_vtab)->_decode_object(__pyx_v_self); if (!__pyx_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 217; goto __pyx_L1;}
+ Py_DECREF(__pyx_v_value);
+ __pyx_v_value = __pyx_3;
+ __pyx_3 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_bencode_pyx.pyx":218 */
+ if (PyObject_SetItem(__pyx_v_result, __pyx_v_key, __pyx_v_value) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 218; goto __pyx_L1;}
+ }
+ __pyx_L4:;
+ }
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_bencode_pyx.pyx":220 */
+ __pyx_1 = PyTuple_New(1); if (!__pyx_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 220; goto __pyx_L1;}
+ Py_INCREF(__pyx_k15p);
+ PyTuple_SET_ITEM(__pyx_1, 0, __pyx_k15p);
+ __pyx_3 = PyObject_CallObject(PyExc_ValueError, __pyx_1); if (!__pyx_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 220; goto __pyx_L1;}
+ Py_DECREF(__pyx_1); __pyx_1 = 0;
+ __Pyx_Raise(__pyx_3, 0, 0);
+ Py_DECREF(__pyx_3); __pyx_3 = 0;
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 220; goto __pyx_L1;}
+
+ __pyx_r = Py_None; Py_INCREF(Py_None);
+ goto __pyx_L0;
+ __pyx_L1:;
+ Py_XDECREF(__pyx_1);
+ Py_XDECREF(__pyx_3);
+ __Pyx_AddTraceback("bzrlib._bencode_pyx.Decoder._decode_dict");
+ __pyx_r = 0;
+ __pyx_L0:;
+ Py_DECREF(__pyx_v_result);
+ Py_DECREF(__pyx_v_lastkey);
+ Py_DECREF(__pyx_v_key);
+ Py_DECREF(__pyx_v_value);
+ Py_DECREF(__pyx_v_self);
+ return __pyx_r;
+}
+
+static PyObject *__pyx_f_6bzrlib_12_bencode_pyx_bdecode(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
+static char __pyx_doc_6bzrlib_12_bencode_pyx_bdecode[] = "Decode string x to Python object";
+static PyObject *__pyx_f_6bzrlib_12_bencode_pyx_bdecode(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
+ PyObject *__pyx_v_s = 0;
+ PyObject *__pyx_r;
+ PyObject *__pyx_1 = 0;
+ PyObject *__pyx_2 = 0;
+ static char *__pyx_argnames[] = {"s",0};
+ if (!PyArg_ParseTupleAndKeywords(__pyx_args, __pyx_kwds, "O", __pyx_argnames, &__pyx_v_s)) return 0;
+ Py_INCREF(__pyx_v_s);
+ __pyx_1 = PyTuple_New(1); if (!__pyx_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 225; goto __pyx_L1;}
+ Py_INCREF(__pyx_v_s);
+ PyTuple_SET_ITEM(__pyx_1, 0, __pyx_v_s);
+ __pyx_2 = PyObject_CallObject(((PyObject *)__pyx_ptype_6bzrlib_12_bencode_pyx_Decoder), __pyx_1); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 225; goto __pyx_L1;}
+ Py_DECREF(__pyx_1); __pyx_1 = 0;
+ __pyx_1 = PyObject_GetAttr(__pyx_2, __pyx_n_decode); if (!__pyx_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 225; goto __pyx_L1;}
+ Py_DECREF(__pyx_2); __pyx_2 = 0;
+ __pyx_2 = PyObject_CallObject(__pyx_1, 0); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 225; goto __pyx_L1;}
+ Py_DECREF(__pyx_1); __pyx_1 = 0;
+ __pyx_r = __pyx_2;
+ __pyx_2 = 0;
+ goto __pyx_L0;
+
+ __pyx_r = Py_None; Py_INCREF(Py_None);
+ goto __pyx_L0;
+ __pyx_L1:;
+ Py_XDECREF(__pyx_1);
+ Py_XDECREF(__pyx_2);
+ __Pyx_AddTraceback("bzrlib._bencode_pyx.bdecode");
+ __pyx_r = 0;
+ __pyx_L0:;
+ Py_DECREF(__pyx_v_s);
+ return __pyx_r;
+}
+
+static PyObject *__pyx_f_6bzrlib_12_bencode_pyx_bdecode_as_tuple(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
+static char __pyx_doc_6bzrlib_12_bencode_pyx_bdecode_as_tuple[] = "Decode string x to Python object, using tuples rather than lists.";
+static PyObject *__pyx_f_6bzrlib_12_bencode_pyx_bdecode_as_tuple(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
+ PyObject *__pyx_v_s = 0;
+ PyObject *__pyx_r;
+ PyObject *__pyx_1 = 0;
+ PyObject *__pyx_2 = 0;
+ static char *__pyx_argnames[] = {"s",0};
+ if (!PyArg_ParseTupleAndKeywords(__pyx_args, __pyx_kwds, "O", __pyx_argnames, &__pyx_v_s)) return 0;
+ Py_INCREF(__pyx_v_s);
+ __pyx_1 = PyTuple_New(2); if (!__pyx_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 230; goto __pyx_L1;}
+ Py_INCREF(__pyx_v_s);
+ PyTuple_SET_ITEM(__pyx_1, 0, __pyx_v_s);
+ Py_INCREF(Py_True);
+ PyTuple_SET_ITEM(__pyx_1, 1, Py_True);
+ __pyx_2 = PyObject_CallObject(((PyObject *)__pyx_ptype_6bzrlib_12_bencode_pyx_Decoder), __pyx_1); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 230; goto __pyx_L1;}
+ Py_DECREF(__pyx_1); __pyx_1 = 0;
+ __pyx_1 = PyObject_GetAttr(__pyx_2, __pyx_n_decode); if (!__pyx_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 230; goto __pyx_L1;}
+ Py_DECREF(__pyx_2); __pyx_2 = 0;
+ __pyx_2 = PyObject_CallObject(__pyx_1, 0); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 230; goto __pyx_L1;}
+ Py_DECREF(__pyx_1); __pyx_1 = 0;
+ __pyx_r = __pyx_2;
+ __pyx_2 = 0;
+ goto __pyx_L0;
+
+ __pyx_r = Py_None; Py_INCREF(Py_None);
+ goto __pyx_L0;
+ __pyx_L1:;
+ Py_XDECREF(__pyx_1);
+ Py_XDECREF(__pyx_2);
+ __Pyx_AddTraceback("bzrlib._bencode_pyx.bdecode_as_tuple");
+ __pyx_r = 0;
+ __pyx_L0:;
+ Py_DECREF(__pyx_v_s);
+ return __pyx_r;
+}
+
+static PyObject *__pyx_f_6bzrlib_12_bencode_pyx_9Bencached___init__(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
+static PyMethodDef __pyx_mdef_6bzrlib_12_bencode_pyx_9Bencached___init__ = {"__init__", (PyCFunction)__pyx_f_6bzrlib_12_bencode_pyx_9Bencached___init__, METH_VARARGS|METH_KEYWORDS, 0};
+static PyObject *__pyx_f_6bzrlib_12_bencode_pyx_9Bencached___init__(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
+ PyObject *__pyx_v_self = 0;
+ PyObject *__pyx_v_s = 0;
+ PyObject *__pyx_r;
+ static char *__pyx_argnames[] = {"self","s",0};
+ if (!PyArg_ParseTupleAndKeywords(__pyx_args, __pyx_kwds, "OO", __pyx_argnames, &__pyx_v_self, &__pyx_v_s)) return 0;
+ Py_INCREF(__pyx_v_self);
+ Py_INCREF(__pyx_v_s);
+ if (PyObject_SetAttr(__pyx_v_self, __pyx_n_bencoded, __pyx_v_s) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 237; goto __pyx_L1;}
+
+ __pyx_r = Py_None; Py_INCREF(Py_None);
+ goto __pyx_L0;
+ __pyx_L1:;
+ __Pyx_AddTraceback("bzrlib._bencode_pyx.Bencached.__init__");
+ __pyx_r = 0;
+ __pyx_L0:;
+ Py_DECREF(__pyx_v_self);
+ Py_DECREF(__pyx_v_s);
+ return __pyx_r;
+}
+
+static int __pyx_f_6bzrlib_12_bencode_pyx_7Encoder___init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
+static int __pyx_f_6bzrlib_12_bencode_pyx_7Encoder___init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
+ int __pyx_v_maxsize;
+ char *__pyx_v_p;
+ int __pyx_r;
+ int __pyx_1;
+ PyObject *__pyx_2 = 0;
+ PyObject *__pyx_3 = 0;
+ static char *__pyx_argnames[] = {"maxsize",0};
+ __pyx_v_maxsize = __pyx_d2;
+ if (!PyArg_ParseTupleAndKeywords(__pyx_args, __pyx_kwds, "|i", __pyx_argnames, &__pyx_v_maxsize)) return -1;
+ Py_INCREF(__pyx_v_self);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_bencode_pyx.pyx":259 */
+ ((struct __pyx_obj_6bzrlib_12_bencode_pyx_Encoder *)__pyx_v_self)->maxsize = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_bencode_pyx.pyx":260 */
+ ((struct __pyx_obj_6bzrlib_12_bencode_pyx_Encoder *)__pyx_v_self)->size = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_bencode_pyx.pyx":261 */
+ ((struct __pyx_obj_6bzrlib_12_bencode_pyx_Encoder *)__pyx_v_self)->tail = NULL;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_bencode_pyx.pyx":263 */
+ __pyx_v_p = ((char *)malloc(__pyx_v_maxsize));
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_bencode_pyx.pyx":264 */
+ __pyx_1 = (__pyx_v_p == NULL);
+ if (__pyx_1) {
+ __pyx_2 = PyTuple_New(1); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 265; goto __pyx_L1;}
+ Py_INCREF(__pyx_k18p);
+ PyTuple_SET_ITEM(__pyx_2, 0, __pyx_k18p);
+ __pyx_3 = PyObject_CallObject(PyExc_MemoryError, __pyx_2); if (!__pyx_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 265; goto __pyx_L1;}
+ Py_DECREF(__pyx_2); __pyx_2 = 0;
+ __Pyx_Raise(__pyx_3, 0, 0);
+ Py_DECREF(__pyx_3); __pyx_3 = 0;
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 265; goto __pyx_L1;}
+ goto __pyx_L2;
+ }
+ __pyx_L2:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_bencode_pyx.pyx":267 */
+ ((struct __pyx_obj_6bzrlib_12_bencode_pyx_Encoder *)__pyx_v_self)->buffer = __pyx_v_p;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_bencode_pyx.pyx":268 */
+ ((struct __pyx_obj_6bzrlib_12_bencode_pyx_Encoder *)__pyx_v_self)->maxsize = __pyx_v_maxsize;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_bencode_pyx.pyx":269 */
+ ((struct __pyx_obj_6bzrlib_12_bencode_pyx_Encoder *)__pyx_v_self)->tail = __pyx_v_p;
+
+ __pyx_r = 0;
+ goto __pyx_L0;
+ __pyx_L1:;
+ Py_XDECREF(__pyx_2);
+ Py_XDECREF(__pyx_3);
+ __Pyx_AddTraceback("bzrlib._bencode_pyx.Encoder.__init__");
+ __pyx_r = -1;
+ __pyx_L0:;
+ Py_DECREF(__pyx_v_self);
+ return __pyx_r;
+}
+
+static void __pyx_f_6bzrlib_12_bencode_pyx_7Encoder___dealloc__(PyObject *__pyx_v_self); /*proto*/
+static void __pyx_f_6bzrlib_12_bencode_pyx_7Encoder___dealloc__(PyObject *__pyx_v_self) {
+ Py_INCREF(__pyx_v_self);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_bencode_pyx.pyx":272 */
+ free(((struct __pyx_obj_6bzrlib_12_bencode_pyx_Encoder *)__pyx_v_self)->buffer);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_bencode_pyx.pyx":273 */
+ ((struct __pyx_obj_6bzrlib_12_bencode_pyx_Encoder *)__pyx_v_self)->buffer = NULL;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_bencode_pyx.pyx":274 */
+ ((struct __pyx_obj_6bzrlib_12_bencode_pyx_Encoder *)__pyx_v_self)->maxsize = 0;
+
+ Py_DECREF(__pyx_v_self);
+}
+
+static PyObject *__pyx_f_6bzrlib_12_bencode_pyx_7Encoder___str__(PyObject *__pyx_v_self); /*proto*/
+static PyObject *__pyx_f_6bzrlib_12_bencode_pyx_7Encoder___str__(PyObject *__pyx_v_self) {
+ PyObject *__pyx_r;
+ int __pyx_1;
+ PyObject *__pyx_2 = 0;
+ Py_INCREF(__pyx_v_self);
+ __pyx_1 = (((struct __pyx_obj_6bzrlib_12_bencode_pyx_Encoder *)__pyx_v_self)->buffer != NULL);
+ if (__pyx_1) {
+ __pyx_1 = (((struct __pyx_obj_6bzrlib_12_bencode_pyx_Encoder *)__pyx_v_self)->size != 0);
+ }
+ if (__pyx_1) {
+ __pyx_2 = PyString_FromStringAndSize(((struct __pyx_obj_6bzrlib_12_bencode_pyx_Encoder *)__pyx_v_self)->buffer,((struct __pyx_obj_6bzrlib_12_bencode_pyx_Encoder *)__pyx_v_self)->size); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 278; goto __pyx_L1;}
+ __pyx_r = __pyx_2;
+ __pyx_2 = 0;
+ goto __pyx_L0;
+ goto __pyx_L2;
+ }
+ /*else*/ {
+ Py_INCREF(__pyx_k10p);
+ __pyx_r = __pyx_k10p;
+ goto __pyx_L0;
+ }
+ __pyx_L2:;
+
+ __pyx_r = Py_None; Py_INCREF(Py_None);
+ goto __pyx_L0;
+ __pyx_L1:;
+ Py_XDECREF(__pyx_2);
+ __Pyx_AddTraceback("bzrlib._bencode_pyx.Encoder.__str__");
+ __pyx_r = 0;
+ __pyx_L0:;
+ Py_DECREF(__pyx_v_self);
+ return __pyx_r;
+}
+
+static int __pyx_f_6bzrlib_12_bencode_pyx_7Encoder__ensure_buffer(struct __pyx_obj_6bzrlib_12_bencode_pyx_Encoder *__pyx_v_self,int __pyx_v_required) {
+ char *__pyx_v_new_buffer;
+ int __pyx_v_new_size;
+ int __pyx_r;
+ int __pyx_1;
+ PyObject *__pyx_2 = 0;
+ PyObject *__pyx_3 = 0;
+ Py_INCREF(__pyx_v_self);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_bencode_pyx.pyx":290 */
+ __pyx_1 = ((__pyx_v_self->size + __pyx_v_required) < __pyx_v_self->maxsize);
+ if (__pyx_1) {
+ __pyx_r = 1;
+ goto __pyx_L0;
+ goto __pyx_L2;
+ }
+ __pyx_L2:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_bencode_pyx.pyx":293 */
+ __pyx_v_new_size = __pyx_v_self->maxsize;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_bencode_pyx.pyx":294 */
+ while (1) {
+ __pyx_1 = (__pyx_v_new_size < (__pyx_v_self->size + __pyx_v_required));
+ if (!__pyx_1) break;
+ __pyx_v_new_size = (__pyx_v_new_size * 2);
+ }
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_bencode_pyx.pyx":296 */
+ __pyx_v_new_buffer = ((char *)realloc(__pyx_v_self->buffer,__pyx_v_new_size));
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_bencode_pyx.pyx":297 */
+ __pyx_1 = (__pyx_v_new_buffer == NULL);
+ if (__pyx_1) {
+ __pyx_2 = PyTuple_New(1); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 298; goto __pyx_L1;}
+ Py_INCREF(__pyx_k19p);
+ PyTuple_SET_ITEM(__pyx_2, 0, __pyx_k19p);
+ __pyx_3 = PyObject_CallObject(PyExc_MemoryError, __pyx_2); if (!__pyx_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 298; goto __pyx_L1;}
+ Py_DECREF(__pyx_2); __pyx_2 = 0;
+ __Pyx_Raise(__pyx_3, 0, 0);
+ Py_DECREF(__pyx_3); __pyx_3 = 0;
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 298; goto __pyx_L1;}
+ goto __pyx_L5;
+ }
+ __pyx_L5:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_bencode_pyx.pyx":300 */
+ __pyx_v_self->buffer = __pyx_v_new_buffer;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_bencode_pyx.pyx":301 */
+ __pyx_v_self->maxsize = __pyx_v_new_size;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_bencode_pyx.pyx":302 */
+ __pyx_v_self->tail = (&(__pyx_v_new_buffer[__pyx_v_self->size]));
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_bencode_pyx.pyx":303 */
+ __pyx_r = 1;
+ goto __pyx_L0;
+
+ __pyx_r = 0;
+ goto __pyx_L0;
+ __pyx_L1:;
+ Py_XDECREF(__pyx_2);
+ Py_XDECREF(__pyx_3);
+ __Pyx_AddTraceback("bzrlib._bencode_pyx.Encoder._ensure_buffer");
+ __pyx_r = 0;
+ __pyx_L0:;
+ Py_DECREF(__pyx_v_self);
+ return __pyx_r;
+}
+
+static int __pyx_f_6bzrlib_12_bencode_pyx_7Encoder__encode_int(struct __pyx_obj_6bzrlib_12_bencode_pyx_Encoder *__pyx_v_self,int __pyx_v_x) {
+ int __pyx_v_n;
+ int __pyx_r;
+ int __pyx_1;
+ PyObject *__pyx_2 = 0;
+ PyObject *__pyx_3 = 0;
+ Py_INCREF(__pyx_v_self);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_bencode_pyx.pyx":310 */
+ __pyx_1 = ((struct __pyx_vtabstruct_6bzrlib_12_bencode_pyx_Encoder *)__pyx_v_self->__pyx_vtab)->_ensure_buffer(__pyx_v_self,__pyx_e_6bzrlib_12_bencode_pyx_INT_BUF_SIZE); if (__pyx_1 == 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 310; goto __pyx_L1;}
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_bencode_pyx.pyx":311 */
+ __pyx_v_n = snprintf(__pyx_v_self->tail,__pyx_e_6bzrlib_12_bencode_pyx_INT_BUF_SIZE,__pyx_k20,__pyx_v_x);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_bencode_pyx.pyx":312 */
+ __pyx_1 = (__pyx_v_n < 0);
+ if (__pyx_1) {
+ __pyx_2 = PyInt_FromLong(__pyx_v_x); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 313; goto __pyx_L1;}
+ __pyx_3 = PyNumber_Remainder(__pyx_k21p, __pyx_2); if (!__pyx_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 313; goto __pyx_L1;}
+ Py_DECREF(__pyx_2); __pyx_2 = 0;
+ __pyx_2 = PyTuple_New(1); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 313; goto __pyx_L1;}
+ PyTuple_SET_ITEM(__pyx_2, 0, __pyx_3);
+ __pyx_3 = 0;
+ __pyx_3 = PyObject_CallObject(PyExc_MemoryError, __pyx_2); if (!__pyx_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 313; goto __pyx_L1;}
+ Py_DECREF(__pyx_2); __pyx_2 = 0;
+ __Pyx_Raise(__pyx_3, 0, 0);
+ Py_DECREF(__pyx_3); __pyx_3 = 0;
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 313; goto __pyx_L1;}
+ goto __pyx_L2;
+ }
+ __pyx_L2:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_bencode_pyx.pyx":314 */
+ E_UPDATE_TAIL(__pyx_v_self,__pyx_v_n);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_bencode_pyx.pyx":315 */
+ __pyx_r = 1;
+ goto __pyx_L0;
+
+ __pyx_r = 0;
+ goto __pyx_L0;
+ __pyx_L1:;
+ Py_XDECREF(__pyx_2);
+ Py_XDECREF(__pyx_3);
+ __Pyx_AddTraceback("bzrlib._bencode_pyx.Encoder._encode_int");
+ __pyx_r = 0;
+ __pyx_L0:;
+ Py_DECREF(__pyx_v_self);
+ return __pyx_r;
+}
+
+static int __pyx_f_6bzrlib_12_bencode_pyx_7Encoder__encode_long(struct __pyx_obj_6bzrlib_12_bencode_pyx_Encoder *__pyx_v_self,PyObject *__pyx_v_x) {
+ int __pyx_r;
+ PyObject *__pyx_1 = 0;
+ PyObject *__pyx_2 = 0;
+ PyObject *__pyx_3 = 0;
+ int __pyx_4;
+ Py_INCREF(__pyx_v_self);
+ Py_INCREF(__pyx_v_x);
+ __pyx_1 = PyObject_GetAttr(__pyx_k10p, __pyx_n_join); if (!__pyx_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 318; goto __pyx_L1;}
+ __pyx_2 = PyTuple_New(1); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 318; goto __pyx_L1;}
+ Py_INCREF(__pyx_v_x);
+ PyTuple_SET_ITEM(__pyx_2, 0, __pyx_v_x);
+ __pyx_3 = PyObject_CallObject(((PyObject *)(&PyString_Type)), __pyx_2); if (!__pyx_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 318; goto __pyx_L1;}
+ Py_DECREF(__pyx_2); __pyx_2 = 0;
+ __pyx_2 = PyTuple_New(3); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 318; goto __pyx_L1;}
+ Py_INCREF(__pyx_n_i);
+ PyTuple_SET_ITEM(__pyx_2, 0, __pyx_n_i);
+ PyTuple_SET_ITEM(__pyx_2, 1, __pyx_3);
+ Py_INCREF(__pyx_n_e);
+ PyTuple_SET_ITEM(__pyx_2, 2, __pyx_n_e);
+ __pyx_3 = 0;
+ __pyx_3 = PyTuple_New(1); if (!__pyx_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 318; goto __pyx_L1;}
+ PyTuple_SET_ITEM(__pyx_3, 0, __pyx_2);
+ __pyx_2 = 0;
+ __pyx_2 = PyObject_CallObject(__pyx_1, __pyx_3); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 318; goto __pyx_L1;}
+ Py_DECREF(__pyx_1); __pyx_1 = 0;
+ Py_DECREF(__pyx_3); __pyx_3 = 0;
+ __pyx_4 = ((struct __pyx_vtabstruct_6bzrlib_12_bencode_pyx_Encoder *)__pyx_v_self->__pyx_vtab)->_append_string(__pyx_v_self,__pyx_2); if (__pyx_4 == 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 318; goto __pyx_L1;}
+ Py_DECREF(__pyx_2); __pyx_2 = 0;
+ __pyx_r = __pyx_4;
+ goto __pyx_L0;
+
+ __pyx_r = 0;
+ goto __pyx_L0;
+ __pyx_L1:;
+ Py_XDECREF(__pyx_1);
+ Py_XDECREF(__pyx_2);
+ Py_XDECREF(__pyx_3);
+ __Pyx_AddTraceback("bzrlib._bencode_pyx.Encoder._encode_long");
+ __pyx_r = 0;
+ __pyx_L0:;
+ Py_DECREF(__pyx_v_self);
+ Py_DECREF(__pyx_v_x);
+ return __pyx_r;
+}
+
+static int __pyx_f_6bzrlib_12_bencode_pyx_7Encoder__append_string(struct __pyx_obj_6bzrlib_12_bencode_pyx_Encoder *__pyx_v_self,PyObject *__pyx_v_s) {
+ Py_ssize_t __pyx_v_n;
+ int __pyx_r;
+ Py_ssize_t __pyx_1;
+ int __pyx_2;
+ char *__pyx_3;
+ Py_INCREF(__pyx_v_self);
+ Py_INCREF(__pyx_v_s);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_bencode_pyx.pyx":322 */
+ __pyx_1 = PyString_GET_SIZE(__pyx_v_s); if (__pyx_1 == (-1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 322; goto __pyx_L1;}
+ __pyx_v_n = __pyx_1;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_bencode_pyx.pyx":323 */
+ __pyx_2 = ((struct __pyx_vtabstruct_6bzrlib_12_bencode_pyx_Encoder *)__pyx_v_self->__pyx_vtab)->_ensure_buffer(__pyx_v_self,__pyx_v_n); if (__pyx_2 == 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 323; goto __pyx_L1;}
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_bencode_pyx.pyx":324 */
+ __pyx_3 = PyString_AS_STRING(__pyx_v_s); if (__pyx_3 == NULL) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 324; goto __pyx_L1;}
+ memcpy(__pyx_v_self->tail,__pyx_3,__pyx_v_n);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_bencode_pyx.pyx":325 */
+ E_UPDATE_TAIL(__pyx_v_self,__pyx_v_n);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_bencode_pyx.pyx":326 */
+ __pyx_r = 1;
+ goto __pyx_L0;
+
+ __pyx_r = 0;
+ goto __pyx_L0;
+ __pyx_L1:;
+ __Pyx_AddTraceback("bzrlib._bencode_pyx.Encoder._append_string");
+ __pyx_r = 0;
+ __pyx_L0:;
+ Py_DECREF(__pyx_v_self);
+ Py_DECREF(__pyx_v_s);
+ return __pyx_r;
+}
+
+static int __pyx_f_6bzrlib_12_bencode_pyx_7Encoder__encode_string(struct __pyx_obj_6bzrlib_12_bencode_pyx_Encoder *__pyx_v_self,PyObject *__pyx_v_x) {
+ int __pyx_v_n;
+ Py_ssize_t __pyx_v_x_len;
+ int __pyx_r;
+ Py_ssize_t __pyx_1;
+ int __pyx_2;
+ PyObject *__pyx_3 = 0;
+ PyObject *__pyx_4 = 0;
+ char *__pyx_5;
+ Py_INCREF(__pyx_v_self);
+ Py_INCREF(__pyx_v_x);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_bencode_pyx.pyx":331 */
+ __pyx_1 = PyString_GET_SIZE(__pyx_v_x); if (__pyx_1 == (-1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 331; goto __pyx_L1;}
+ __pyx_v_x_len = __pyx_1;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_bencode_pyx.pyx":332 */
+ __pyx_2 = ((struct __pyx_vtabstruct_6bzrlib_12_bencode_pyx_Encoder *)__pyx_v_self->__pyx_vtab)->_ensure_buffer(__pyx_v_self,(__pyx_v_x_len + __pyx_e_6bzrlib_12_bencode_pyx_INT_BUF_SIZE)); if (__pyx_2 == 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 332; goto __pyx_L1;}
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_bencode_pyx.pyx":333 */
+ __pyx_v_n = snprintf(__pyx_v_self->tail,__pyx_e_6bzrlib_12_bencode_pyx_INT_BUF_SIZE,__pyx_k25,__pyx_v_x_len);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_bencode_pyx.pyx":334 */
+ __pyx_2 = (__pyx_v_n < 0);
+ if (__pyx_2) {
+ __pyx_3 = PyNumber_Remainder(__pyx_k26p, __pyx_v_x); if (!__pyx_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 335; goto __pyx_L1;}
+ __pyx_4 = PyTuple_New(1); if (!__pyx_4) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 335; goto __pyx_L1;}
+ PyTuple_SET_ITEM(__pyx_4, 0, __pyx_3);
+ __pyx_3 = 0;
+ __pyx_3 = PyObject_CallObject(PyExc_MemoryError, __pyx_4); if (!__pyx_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 335; goto __pyx_L1;}
+ Py_DECREF(__pyx_4); __pyx_4 = 0;
+ __Pyx_Raise(__pyx_3, 0, 0);
+ Py_DECREF(__pyx_3); __pyx_3 = 0;
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 335; goto __pyx_L1;}
+ goto __pyx_L2;
+ }
+ __pyx_L2:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_bencode_pyx.pyx":336 */
+ __pyx_5 = PyString_AS_STRING(__pyx_v_x); if (__pyx_5 == NULL) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 336; goto __pyx_L1;}
+ memcpy((__pyx_v_self->tail + __pyx_v_n),__pyx_5,__pyx_v_x_len);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_bencode_pyx.pyx":337 */
+ E_UPDATE_TAIL(__pyx_v_self,(__pyx_v_n + __pyx_v_x_len));
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_bencode_pyx.pyx":338 */
+ __pyx_r = 1;
+ goto __pyx_L0;
+
+ __pyx_r = 0;
+ goto __pyx_L0;
+ __pyx_L1:;
+ Py_XDECREF(__pyx_3);
+ Py_XDECREF(__pyx_4);
+ __Pyx_AddTraceback("bzrlib._bencode_pyx.Encoder._encode_string");
+ __pyx_r = 0;
+ __pyx_L0:;
+ Py_DECREF(__pyx_v_self);
+ Py_DECREF(__pyx_v_x);
+ return __pyx_r;
+}
+
+static int __pyx_f_6bzrlib_12_bencode_pyx_7Encoder__encode_list(struct __pyx_obj_6bzrlib_12_bencode_pyx_Encoder *__pyx_v_self,PyObject *__pyx_v_x) {
+ PyObject *__pyx_v_i;
+ int __pyx_r;
+ int __pyx_1;
+ PyObject *__pyx_2 = 0;
+ PyObject *__pyx_3 = 0;
+ PyObject *__pyx_4 = 0;
+ PyObject *__pyx_5 = 0;
+ Py_INCREF(__pyx_v_self);
+ Py_INCREF(__pyx_v_x);
+ __pyx_v_i = Py_None; Py_INCREF(Py_None);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_bencode_pyx.pyx":341 */
+ __pyx_1 = ((struct __pyx_vtabstruct_6bzrlib_12_bencode_pyx_Encoder *)__pyx_v_self->__pyx_vtab)->_ensure_buffer(__pyx_v_self,1); if (__pyx_1 == 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 341; goto __pyx_L1;}
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_bencode_pyx.pyx":342 */
+ (__pyx_v_self->tail[0]) = 'l';
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_bencode_pyx.pyx":343 */
+ E_UPDATE_TAIL(__pyx_v_self,1);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_bencode_pyx.pyx":345 */
+ __pyx_2 = PyObject_GetIter(__pyx_v_x); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 345; goto __pyx_L1;}
+ for (;;) {
+ __pyx_3 = PyIter_Next(__pyx_2);
+ if (!__pyx_3) {
+ if (PyErr_Occurred()) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 345; goto __pyx_L1;}
+ break;
+ }
+ Py_DECREF(__pyx_v_i);
+ __pyx_v_i = __pyx_3;
+ __pyx_3 = 0;
+ __pyx_3 = PyObject_GetAttr(((PyObject *)__pyx_v_self), __pyx_n_process); if (!__pyx_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 346; goto __pyx_L1;}
+ __pyx_4 = PyTuple_New(1); if (!__pyx_4) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 346; goto __pyx_L1;}
+ Py_INCREF(__pyx_v_i);
+ PyTuple_SET_ITEM(__pyx_4, 0, __pyx_v_i);
+ __pyx_5 = PyObject_CallObject(__pyx_3, __pyx_4); if (!__pyx_5) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 346; goto __pyx_L1;}
+ Py_DECREF(__pyx_3); __pyx_3 = 0;
+ Py_DECREF(__pyx_4); __pyx_4 = 0;
+ Py_DECREF(__pyx_5); __pyx_5 = 0;
+ }
+ Py_DECREF(__pyx_2); __pyx_2 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_bencode_pyx.pyx":348 */
+ __pyx_1 = ((struct __pyx_vtabstruct_6bzrlib_12_bencode_pyx_Encoder *)__pyx_v_self->__pyx_vtab)->_ensure_buffer(__pyx_v_self,1); if (__pyx_1 == 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 348; goto __pyx_L1;}
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_bencode_pyx.pyx":349 */
+ (__pyx_v_self->tail[0]) = 'e';
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_bencode_pyx.pyx":350 */
+ E_UPDATE_TAIL(__pyx_v_self,1);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_bencode_pyx.pyx":351 */
+ __pyx_r = 1;
+ goto __pyx_L0;
+
+ __pyx_r = 0;
+ goto __pyx_L0;
+ __pyx_L1:;
+ Py_XDECREF(__pyx_2);
+ Py_XDECREF(__pyx_3);
+ Py_XDECREF(__pyx_4);
+ Py_XDECREF(__pyx_5);
+ __Pyx_AddTraceback("bzrlib._bencode_pyx.Encoder._encode_list");
+ __pyx_r = 0;
+ __pyx_L0:;
+ Py_DECREF(__pyx_v_i);
+ Py_DECREF(__pyx_v_self);
+ Py_DECREF(__pyx_v_x);
+ return __pyx_r;
+}
+
+static int __pyx_f_6bzrlib_12_bencode_pyx_7Encoder__encode_dict(struct __pyx_obj_6bzrlib_12_bencode_pyx_Encoder *__pyx_v_self,PyObject *__pyx_v_x) {
+ PyObject *__pyx_v_keys;
+ PyObject *__pyx_v_k;
+ int __pyx_r;
+ int __pyx_1;
+ PyObject *__pyx_2 = 0;
+ PyObject *__pyx_3 = 0;
+ PyObject *__pyx_4 = 0;
+ PyObject *__pyx_5 = 0;
+ Py_INCREF(__pyx_v_self);
+ Py_INCREF(__pyx_v_x);
+ __pyx_v_keys = Py_None; Py_INCREF(Py_None);
+ __pyx_v_k = Py_None; Py_INCREF(Py_None);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_bencode_pyx.pyx":354 */
+ __pyx_1 = ((struct __pyx_vtabstruct_6bzrlib_12_bencode_pyx_Encoder *)__pyx_v_self->__pyx_vtab)->_ensure_buffer(__pyx_v_self,1); if (__pyx_1 == 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 354; goto __pyx_L1;}
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_bencode_pyx.pyx":355 */
+ (__pyx_v_self->tail[0]) = 'd';
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_bencode_pyx.pyx":356 */
+ E_UPDATE_TAIL(__pyx_v_self,1);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_bencode_pyx.pyx":358 */
+ __pyx_2 = PyObject_GetAttr(__pyx_v_x, __pyx_n_keys); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 358; goto __pyx_L1;}
+ __pyx_3 = PyObject_CallObject(__pyx_2, 0); if (!__pyx_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 358; goto __pyx_L1;}
+ Py_DECREF(__pyx_2); __pyx_2 = 0;
+ Py_DECREF(__pyx_v_keys);
+ __pyx_v_keys = __pyx_3;
+ __pyx_3 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_bencode_pyx.pyx":359 */
+ __pyx_2 = PyObject_GetAttr(__pyx_v_keys, __pyx_n_sort); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 359; goto __pyx_L1;}
+ __pyx_3 = PyObject_CallObject(__pyx_2, 0); if (!__pyx_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 359; goto __pyx_L1;}
+ Py_DECREF(__pyx_2); __pyx_2 = 0;
+ Py_DECREF(__pyx_3); __pyx_3 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_bencode_pyx.pyx":360 */
+ __pyx_2 = PyObject_GetIter(__pyx_v_keys); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 360; goto __pyx_L1;}
+ for (;;) {
+ __pyx_3 = PyIter_Next(__pyx_2);
+ if (!__pyx_3) {
+ if (PyErr_Occurred()) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 360; goto __pyx_L1;}
+ break;
+ }
+ Py_DECREF(__pyx_v_k);
+ __pyx_v_k = __pyx_3;
+ __pyx_3 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_bencode_pyx.pyx":361 */
+ __pyx_1 = (!PyString_CheckExact(__pyx_v_k));
+ if (__pyx_1) {
+ __pyx_3 = PyTuple_New(1); if (!__pyx_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 362; goto __pyx_L1;}
+ Py_INCREF(__pyx_k30p);
+ PyTuple_SET_ITEM(__pyx_3, 0, __pyx_k30p);
+ __pyx_4 = PyObject_CallObject(PyExc_TypeError, __pyx_3); if (!__pyx_4) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 362; goto __pyx_L1;}
+ Py_DECREF(__pyx_3); __pyx_3 = 0;
+ __Pyx_Raise(__pyx_4, 0, 0);
+ Py_DECREF(__pyx_4); __pyx_4 = 0;
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 362; goto __pyx_L1;}
+ goto __pyx_L4;
+ }
+ __pyx_L4:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_bencode_pyx.pyx":363 */
+ __pyx_1 = ((struct __pyx_vtabstruct_6bzrlib_12_bencode_pyx_Encoder *)__pyx_v_self->__pyx_vtab)->_encode_string(__pyx_v_self,__pyx_v_k); if (__pyx_1 == 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 363; goto __pyx_L1;}
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_bencode_pyx.pyx":364 */
+ __pyx_3 = PyObject_GetAttr(((PyObject *)__pyx_v_self), __pyx_n_process); if (!__pyx_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 364; goto __pyx_L1;}
+ __pyx_4 = PyObject_GetItem(__pyx_v_x, __pyx_v_k); if (!__pyx_4) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 364; goto __pyx_L1;}
+ __pyx_5 = PyTuple_New(1); if (!__pyx_5) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 364; goto __pyx_L1;}
+ PyTuple_SET_ITEM(__pyx_5, 0, __pyx_4);
+ __pyx_4 = 0;
+ __pyx_4 = PyObject_CallObject(__pyx_3, __pyx_5); if (!__pyx_4) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 364; goto __pyx_L1;}
+ Py_DECREF(__pyx_3); __pyx_3 = 0;
+ Py_DECREF(__pyx_5); __pyx_5 = 0;
+ Py_DECREF(__pyx_4); __pyx_4 = 0;
+ }
+ Py_DECREF(__pyx_2); __pyx_2 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_bencode_pyx.pyx":366 */
+ __pyx_1 = ((struct __pyx_vtabstruct_6bzrlib_12_bencode_pyx_Encoder *)__pyx_v_self->__pyx_vtab)->_ensure_buffer(__pyx_v_self,1); if (__pyx_1 == 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 366; goto __pyx_L1;}
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_bencode_pyx.pyx":367 */
+ (__pyx_v_self->tail[0]) = 'e';
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_bencode_pyx.pyx":368 */
+ E_UPDATE_TAIL(__pyx_v_self,1);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_bencode_pyx.pyx":369 */
+ __pyx_r = 1;
+ goto __pyx_L0;
+
+ __pyx_r = 0;
+ goto __pyx_L0;
+ __pyx_L1:;
+ Py_XDECREF(__pyx_2);
+ Py_XDECREF(__pyx_3);
+ Py_XDECREF(__pyx_4);
+ Py_XDECREF(__pyx_5);
+ __Pyx_AddTraceback("bzrlib._bencode_pyx.Encoder._encode_dict");
+ __pyx_r = 0;
+ __pyx_L0:;
+ Py_DECREF(__pyx_v_keys);
+ Py_DECREF(__pyx_v_k);
+ Py_DECREF(__pyx_v_self);
+ Py_DECREF(__pyx_v_x);
+ return __pyx_r;
+}
+
+static PyObject *__pyx_f_6bzrlib_12_bencode_pyx_7Encoder_process(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
+static PyObject *__pyx_f_6bzrlib_12_bencode_pyx_7Encoder_process(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
+ PyObject *__pyx_v_x = 0;
+ PyObject *__pyx_r;
+ int __pyx_1;
+ PyObject *__pyx_2 = 0;
+ PyObject *__pyx_3 = 0;
+ int __pyx_4;
+ static char *__pyx_argnames[] = {"x",0};
+ if (!PyArg_ParseTupleAndKeywords(__pyx_args, __pyx_kwds, "O", __pyx_argnames, &__pyx_v_x)) return 0;
+ Py_INCREF(__pyx_v_self);
+ Py_INCREF(__pyx_v_x);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_bencode_pyx.pyx":372 */
+ __pyx_1 = Py_EnterRecursiveCall(__pyx_k31);
+ if (__pyx_1) {
+ __pyx_2 = PyTuple_New(1); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 373; goto __pyx_L1;}
+ Py_INCREF(__pyx_k5p);
+ PyTuple_SET_ITEM(__pyx_2, 0, __pyx_k5p);
+ __pyx_3 = PyObject_CallObject(PyExc_RuntimeError, __pyx_2); if (!__pyx_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 373; goto __pyx_L1;}
+ Py_DECREF(__pyx_2); __pyx_2 = 0;
+ __Pyx_Raise(__pyx_3, 0, 0);
+ Py_DECREF(__pyx_3); __pyx_3 = 0;
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 373; goto __pyx_L1;}
+ goto __pyx_L2;
+ }
+ __pyx_L2:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_bencode_pyx.pyx":374 */
+ /*try:*/ {
+ __pyx_1 = PyString_CheckExact(__pyx_v_x);
+ if (__pyx_1) {
+ __pyx_1 = ((struct __pyx_vtabstruct_6bzrlib_12_bencode_pyx_Encoder *)((struct __pyx_obj_6bzrlib_12_bencode_pyx_Encoder *)__pyx_v_self)->__pyx_vtab)->_encode_string(((struct __pyx_obj_6bzrlib_12_bencode_pyx_Encoder *)__pyx_v_self),__pyx_v_x); if (__pyx_1 == 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 376; goto __pyx_L4;}
+ goto __pyx_L6;
+ }
+ __pyx_1 = PyInt_CheckExact(__pyx_v_x);
+ if (__pyx_1) {
+ __pyx_1 = PyInt_AsLong(__pyx_v_x); if (PyErr_Occurred()) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 378; goto __pyx_L4;}
+ __pyx_4 = ((struct __pyx_vtabstruct_6bzrlib_12_bencode_pyx_Encoder *)((struct __pyx_obj_6bzrlib_12_bencode_pyx_Encoder *)__pyx_v_self)->__pyx_vtab)->_encode_int(((struct __pyx_obj_6bzrlib_12_bencode_pyx_Encoder *)__pyx_v_self),__pyx_1); if (__pyx_4 == 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 378; goto __pyx_L4;}
+ goto __pyx_L6;
+ }
+ __pyx_1 = PyLong_CheckExact(__pyx_v_x);
+ if (__pyx_1) {
+ __pyx_4 = ((struct __pyx_vtabstruct_6bzrlib_12_bencode_pyx_Encoder *)((struct __pyx_obj_6bzrlib_12_bencode_pyx_Encoder *)__pyx_v_self)->__pyx_vtab)->_encode_long(((struct __pyx_obj_6bzrlib_12_bencode_pyx_Encoder *)__pyx_v_self),__pyx_v_x); if (__pyx_4 == 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 380; goto __pyx_L4;}
+ goto __pyx_L6;
+ }
+ __pyx_1 = PyList_CheckExact(__pyx_v_x);
+ if (!__pyx_1) {
+ __pyx_1 = PyTuple_CheckExact(__pyx_v_x);
+ if (!__pyx_1) {
+ __pyx_1 = StaticTuple_CheckExact(__pyx_v_x);
+ }
+ }
+ if (__pyx_1) {
+ __pyx_4 = ((struct __pyx_vtabstruct_6bzrlib_12_bencode_pyx_Encoder *)((struct __pyx_obj_6bzrlib_12_bencode_pyx_Encoder *)__pyx_v_self)->__pyx_vtab)->_encode_list(((struct __pyx_obj_6bzrlib_12_bencode_pyx_Encoder *)__pyx_v_self),__pyx_v_x); if (__pyx_4 == 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 383; goto __pyx_L4;}
+ goto __pyx_L6;
+ }
+ __pyx_1 = PyDict_CheckExact(__pyx_v_x);
+ if (__pyx_1) {
+ __pyx_4 = ((struct __pyx_vtabstruct_6bzrlib_12_bencode_pyx_Encoder *)((struct __pyx_obj_6bzrlib_12_bencode_pyx_Encoder *)__pyx_v_self)->__pyx_vtab)->_encode_dict(((struct __pyx_obj_6bzrlib_12_bencode_pyx_Encoder *)__pyx_v_self),__pyx_v_x); if (__pyx_4 == 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 385; goto __pyx_L4;}
+ goto __pyx_L6;
+ }
+ __pyx_1 = PyBool_Check(__pyx_v_x);
+ if (__pyx_1) {
+ __pyx_2 = PyTuple_New(1); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 387; goto __pyx_L4;}
+ Py_INCREF(__pyx_v_x);
+ PyTuple_SET_ITEM(__pyx_2, 0, __pyx_v_x);
+ __pyx_3 = PyObject_CallObject(((PyObject *)(&PyInt_Type)), __pyx_2); if (!__pyx_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 387; goto __pyx_L4;}
+ Py_DECREF(__pyx_2); __pyx_2 = 0;
+ __pyx_4 = PyInt_AsLong(__pyx_3); if (PyErr_Occurred()) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 387; goto __pyx_L4;}
+ Py_DECREF(__pyx_3); __pyx_3 = 0;
+ __pyx_1 = ((struct __pyx_vtabstruct_6bzrlib_12_bencode_pyx_Encoder *)((struct __pyx_obj_6bzrlib_12_bencode_pyx_Encoder *)__pyx_v_self)->__pyx_vtab)->_encode_int(((struct __pyx_obj_6bzrlib_12_bencode_pyx_Encoder *)__pyx_v_self),__pyx_4); if (__pyx_1 == 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 387; goto __pyx_L4;}
+ goto __pyx_L6;
+ }
+ __pyx_2 = __Pyx_GetName(__pyx_m, __pyx_n_Bencached); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 388; goto __pyx_L4;}
+ __pyx_4 = PyObject_IsInstance(__pyx_v_x,__pyx_2); if (__pyx_4 == -1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 388; goto __pyx_L4;}
+ Py_DECREF(__pyx_2); __pyx_2 = 0;
+ if (__pyx_4) {
+ __pyx_3 = PyObject_GetAttr(__pyx_v_x, __pyx_n_bencoded); if (!__pyx_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 389; goto __pyx_L4;}
+ __pyx_1 = ((struct __pyx_vtabstruct_6bzrlib_12_bencode_pyx_Encoder *)((struct __pyx_obj_6bzrlib_12_bencode_pyx_Encoder *)__pyx_v_self)->__pyx_vtab)->_append_string(((struct __pyx_obj_6bzrlib_12_bencode_pyx_Encoder *)__pyx_v_self),__pyx_3); if (__pyx_1 == 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 389; goto __pyx_L4;}
+ Py_DECREF(__pyx_3); __pyx_3 = 0;
+ goto __pyx_L6;
+ }
+ /*else*/ {
+ __pyx_2 = PyNumber_Remainder(__pyx_k33p, __pyx_v_x); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 391; goto __pyx_L4;}
+ __pyx_3 = PyTuple_New(1); if (!__pyx_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 391; goto __pyx_L4;}
+ PyTuple_SET_ITEM(__pyx_3, 0, __pyx_2);
+ __pyx_2 = 0;
+ __pyx_2 = PyObject_CallObject(PyExc_TypeError, __pyx_3); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 391; goto __pyx_L4;}
+ Py_DECREF(__pyx_3); __pyx_3 = 0;
+ __Pyx_Raise(__pyx_2, 0, 0);
+ Py_DECREF(__pyx_2); __pyx_2 = 0;
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 391; goto __pyx_L4;}
+ }
+ __pyx_L6:;
+ }
+ /*finally:*/ {
+ int __pyx_why;
+ PyObject *__pyx_exc_type, *__pyx_exc_value, *__pyx_exc_tb;
+ int __pyx_exc_lineno;
+ __pyx_why = 0; goto __pyx_L5;
+ __pyx_L4: {
+ __pyx_why = 4;
+ Py_XDECREF(__pyx_3); __pyx_3 = 0;
+ Py_XDECREF(__pyx_2); __pyx_2 = 0;
+ PyErr_Fetch(&__pyx_exc_type, &__pyx_exc_value, &__pyx_exc_tb);
+ __pyx_exc_lineno = __pyx_lineno;
+ goto __pyx_L5;
+ }
+ __pyx_L5:;
+ Py_LeaveRecursiveCall();
+ switch (__pyx_why) {
+ case 4: {
+ PyErr_Restore(__pyx_exc_type, __pyx_exc_value, __pyx_exc_tb);
+ __pyx_lineno = __pyx_exc_lineno;
+ __pyx_exc_type = 0;
+ __pyx_exc_value = 0;
+ __pyx_exc_tb = 0;
+ goto __pyx_L1;
+ }
+ }
+ }
+
+ __pyx_r = Py_None; Py_INCREF(Py_None);
+ goto __pyx_L0;
+ __pyx_L1:;
+ Py_XDECREF(__pyx_2);
+ Py_XDECREF(__pyx_3);
+ __Pyx_AddTraceback("bzrlib._bencode_pyx.Encoder.process");
+ __pyx_r = 0;
+ __pyx_L0:;
+ Py_DECREF(__pyx_v_self);
+ Py_DECREF(__pyx_v_x);
+ return __pyx_r;
+}
+
+static PyObject *__pyx_f_6bzrlib_12_bencode_pyx_bencode(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
+static char __pyx_doc_6bzrlib_12_bencode_pyx_bencode[] = "Encode Python object x to string";
+static PyObject *__pyx_f_6bzrlib_12_bencode_pyx_bencode(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
+ PyObject *__pyx_v_x = 0;
+ PyObject *__pyx_v_encoder;
+ PyObject *__pyx_r;
+ PyObject *__pyx_1 = 0;
+ PyObject *__pyx_2 = 0;
+ PyObject *__pyx_3 = 0;
+ static char *__pyx_argnames[] = {"x",0};
+ if (!PyArg_ParseTupleAndKeywords(__pyx_args, __pyx_kwds, "O", __pyx_argnames, &__pyx_v_x)) return 0;
+ Py_INCREF(__pyx_v_x);
+ __pyx_v_encoder = Py_None; Py_INCREF(Py_None);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_bencode_pyx.pyx":398 */
+ __pyx_1 = PyObject_CallObject(((PyObject *)__pyx_ptype_6bzrlib_12_bencode_pyx_Encoder), 0); if (!__pyx_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 398; goto __pyx_L1;}
+ Py_DECREF(__pyx_v_encoder);
+ __pyx_v_encoder = __pyx_1;
+ __pyx_1 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_bencode_pyx.pyx":399 */
+ __pyx_1 = PyObject_GetAttr(__pyx_v_encoder, __pyx_n_process); if (!__pyx_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 399; goto __pyx_L1;}
+ __pyx_2 = PyTuple_New(1); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 399; goto __pyx_L1;}
+ Py_INCREF(__pyx_v_x);
+ PyTuple_SET_ITEM(__pyx_2, 0, __pyx_v_x);
+ __pyx_3 = PyObject_CallObject(__pyx_1, __pyx_2); if (!__pyx_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 399; goto __pyx_L1;}
+ Py_DECREF(__pyx_1); __pyx_1 = 0;
+ Py_DECREF(__pyx_2); __pyx_2 = 0;
+ Py_DECREF(__pyx_3); __pyx_3 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_bencode_pyx.pyx":400 */
+ __pyx_1 = PyTuple_New(1); if (!__pyx_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 400; goto __pyx_L1;}
+ Py_INCREF(__pyx_v_encoder);
+ PyTuple_SET_ITEM(__pyx_1, 0, __pyx_v_encoder);
+ __pyx_2 = PyObject_CallObject(((PyObject *)(&PyString_Type)), __pyx_1); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 400; goto __pyx_L1;}
+ Py_DECREF(__pyx_1); __pyx_1 = 0;
+ __pyx_r = __pyx_2;
+ __pyx_2 = 0;
+ goto __pyx_L0;
+
+ __pyx_r = Py_None; Py_INCREF(Py_None);
+ goto __pyx_L0;
+ __pyx_L1:;
+ Py_XDECREF(__pyx_1);
+ Py_XDECREF(__pyx_2);
+ Py_XDECREF(__pyx_3);
+ __Pyx_AddTraceback("bzrlib._bencode_pyx.bencode");
+ __pyx_r = 0;
+ __pyx_L0:;
+ Py_DECREF(__pyx_v_encoder);
+ Py_DECREF(__pyx_v_x);
+ return __pyx_r;
+}
+static struct __pyx_vtabstruct_6bzrlib_12_bencode_pyx_Decoder __pyx_vtable_6bzrlib_12_bencode_pyx_Decoder;
+
+static PyObject *__pyx_tp_new_6bzrlib_12_bencode_pyx_Decoder(PyTypeObject *t, PyObject *a, PyObject *k) {
+ struct __pyx_obj_6bzrlib_12_bencode_pyx_Decoder *p;
+ PyObject *o = (*t->tp_alloc)(t, 0);
+ if (!o) return 0;
+ p = ((struct __pyx_obj_6bzrlib_12_bencode_pyx_Decoder *)o);
+ *(struct __pyx_vtabstruct_6bzrlib_12_bencode_pyx_Decoder **)&p->__pyx_vtab = __pyx_vtabptr_6bzrlib_12_bencode_pyx_Decoder;
+ p->text = Py_None; Py_INCREF(Py_None);
+ return o;
+}
+
+static void __pyx_tp_dealloc_6bzrlib_12_bencode_pyx_Decoder(PyObject *o) {
+ struct __pyx_obj_6bzrlib_12_bencode_pyx_Decoder *p = (struct __pyx_obj_6bzrlib_12_bencode_pyx_Decoder *)o;
+ Py_XDECREF(p->text);
+ (*o->ob_type->tp_free)(o);
+}
+
+static int __pyx_tp_traverse_6bzrlib_12_bencode_pyx_Decoder(PyObject *o, visitproc v, void *a) {
+ int e;
+ struct __pyx_obj_6bzrlib_12_bencode_pyx_Decoder *p = (struct __pyx_obj_6bzrlib_12_bencode_pyx_Decoder *)o;
+ if (p->text) {
+ e = (*v)(p->text, a); if (e) return e;
+ }
+ return 0;
+}
+
+static int __pyx_tp_clear_6bzrlib_12_bencode_pyx_Decoder(PyObject *o) {
+ struct __pyx_obj_6bzrlib_12_bencode_pyx_Decoder *p = (struct __pyx_obj_6bzrlib_12_bencode_pyx_Decoder *)o;
+ PyObject *t;
+ t = p->text;
+ p->text = Py_None; Py_INCREF(Py_None);
+ Py_XDECREF(t);
+ return 0;
+}
+
+static struct PyMethodDef __pyx_methods_6bzrlib_12_bencode_pyx_Decoder[] = {
+ {"decode", (PyCFunction)__pyx_f_6bzrlib_12_bencode_pyx_7Decoder_decode, METH_VARARGS|METH_KEYWORDS, 0},
+ {"decode_object", (PyCFunction)__pyx_f_6bzrlib_12_bencode_pyx_7Decoder_decode_object, METH_VARARGS|METH_KEYWORDS, 0},
+ {0, 0, 0, 0}
+};
+
+static struct PyMemberDef __pyx_members_6bzrlib_12_bencode_pyx_Decoder[] = {
+ {"tail", T_STRING, offsetof(struct __pyx_obj_6bzrlib_12_bencode_pyx_Decoder, tail), READONLY, 0},
+ {"size", T_INT, offsetof(struct __pyx_obj_6bzrlib_12_bencode_pyx_Decoder, size), READONLY, 0},
+ {"_yield_tuples", T_INT, offsetof(struct __pyx_obj_6bzrlib_12_bencode_pyx_Decoder, _yield_tuples), READONLY, 0},
+ {0, 0, 0, 0, 0}
+};
+
+static PyNumberMethods __pyx_tp_as_number_Decoder = {
+ 0, /*nb_add*/
+ 0, /*nb_subtract*/
+ 0, /*nb_multiply*/
+ 0, /*nb_divide*/
+ 0, /*nb_remainder*/
+ 0, /*nb_divmod*/
+ 0, /*nb_power*/
+ 0, /*nb_negative*/
+ 0, /*nb_positive*/
+ 0, /*nb_absolute*/
+ 0, /*nb_nonzero*/
+ 0, /*nb_invert*/
+ 0, /*nb_lshift*/
+ 0, /*nb_rshift*/
+ 0, /*nb_and*/
+ 0, /*nb_xor*/
+ 0, /*nb_or*/
+ 0, /*nb_coerce*/
+ 0, /*nb_int*/
+ 0, /*nb_long*/
+ 0, /*nb_float*/
+ 0, /*nb_oct*/
+ 0, /*nb_hex*/
+ 0, /*nb_inplace_add*/
+ 0, /*nb_inplace_subtract*/
+ 0, /*nb_inplace_multiply*/
+ 0, /*nb_inplace_divide*/
+ 0, /*nb_inplace_remainder*/
+ 0, /*nb_inplace_power*/
+ 0, /*nb_inplace_lshift*/
+ 0, /*nb_inplace_rshift*/
+ 0, /*nb_inplace_and*/
+ 0, /*nb_inplace_xor*/
+ 0, /*nb_inplace_or*/
+ 0, /*nb_floor_divide*/
+ 0, /*nb_true_divide*/
+ 0, /*nb_inplace_floor_divide*/
+ 0, /*nb_inplace_true_divide*/
+ #if Py_TPFLAGS_DEFAULT & Py_TPFLAGS_HAVE_INDEX
+ 0, /*nb_index*/
+ #endif
+};
+
+static PySequenceMethods __pyx_tp_as_sequence_Decoder = {
+ 0, /*sq_length*/
+ 0, /*sq_concat*/
+ 0, /*sq_repeat*/
+ 0, /*sq_item*/
+ 0, /*sq_slice*/
+ 0, /*sq_ass_item*/
+ 0, /*sq_ass_slice*/
+ 0, /*sq_contains*/
+ 0, /*sq_inplace_concat*/
+ 0, /*sq_inplace_repeat*/
+};
+
+static PyMappingMethods __pyx_tp_as_mapping_Decoder = {
+ 0, /*mp_length*/
+ 0, /*mp_subscript*/
+ 0, /*mp_ass_subscript*/
+};
+
+static PyBufferProcs __pyx_tp_as_buffer_Decoder = {
+ 0, /*bf_getreadbuffer*/
+ 0, /*bf_getwritebuffer*/
+ 0, /*bf_getsegcount*/
+ 0, /*bf_getcharbuffer*/
+};
+
+PyTypeObject __pyx_type_6bzrlib_12_bencode_pyx_Decoder = {
+ PyObject_HEAD_INIT(0)
+ 0, /*ob_size*/
+ "bzrlib._bencode_pyx.Decoder", /*tp_name*/
+ sizeof(struct __pyx_obj_6bzrlib_12_bencode_pyx_Decoder), /*tp_basicsize*/
+ 0, /*tp_itemsize*/
+ __pyx_tp_dealloc_6bzrlib_12_bencode_pyx_Decoder, /*tp_dealloc*/
+ 0, /*tp_print*/
+ 0, /*tp_getattr*/
+ 0, /*tp_setattr*/
+ 0, /*tp_compare*/
+ 0, /*tp_repr*/
+ &__pyx_tp_as_number_Decoder, /*tp_as_number*/
+ &__pyx_tp_as_sequence_Decoder, /*tp_as_sequence*/
+ &__pyx_tp_as_mapping_Decoder, /*tp_as_mapping*/
+ 0, /*tp_hash*/
+ 0, /*tp_call*/
+ 0, /*tp_str*/
+ 0, /*tp_getattro*/
+ 0, /*tp_setattro*/
+ &__pyx_tp_as_buffer_Decoder, /*tp_as_buffer*/
+ Py_TPFLAGS_DEFAULT|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC, /*tp_flags*/
+ "Bencode decoder", /*tp_doc*/
+ __pyx_tp_traverse_6bzrlib_12_bencode_pyx_Decoder, /*tp_traverse*/
+ __pyx_tp_clear_6bzrlib_12_bencode_pyx_Decoder, /*tp_clear*/
+ 0, /*tp_richcompare*/
+ 0, /*tp_weaklistoffset*/
+ 0, /*tp_iter*/
+ 0, /*tp_iternext*/
+ __pyx_methods_6bzrlib_12_bencode_pyx_Decoder, /*tp_methods*/
+ __pyx_members_6bzrlib_12_bencode_pyx_Decoder, /*tp_members*/
+ 0, /*tp_getset*/
+ 0, /*tp_base*/
+ 0, /*tp_dict*/
+ 0, /*tp_descr_get*/
+ 0, /*tp_descr_set*/
+ 0, /*tp_dictoffset*/
+ __pyx_f_6bzrlib_12_bencode_pyx_7Decoder___init__, /*tp_init*/
+ 0, /*tp_alloc*/
+ __pyx_tp_new_6bzrlib_12_bencode_pyx_Decoder, /*tp_new*/
+ 0, /*tp_free*/
+ 0, /*tp_is_gc*/
+ 0, /*tp_bases*/
+ 0, /*tp_mro*/
+ 0, /*tp_cache*/
+ 0, /*tp_subclasses*/
+ 0, /*tp_weaklist*/
+};
+static struct __pyx_vtabstruct_6bzrlib_12_bencode_pyx_Encoder __pyx_vtable_6bzrlib_12_bencode_pyx_Encoder;
+
+static PyObject *__pyx_tp_new_6bzrlib_12_bencode_pyx_Encoder(PyTypeObject *t, PyObject *a, PyObject *k) {
+ struct __pyx_obj_6bzrlib_12_bencode_pyx_Encoder *p;
+ PyObject *o = (*t->tp_alloc)(t, 0);
+ if (!o) return 0;
+ p = ((struct __pyx_obj_6bzrlib_12_bencode_pyx_Encoder *)o);
+ *(struct __pyx_vtabstruct_6bzrlib_12_bencode_pyx_Encoder **)&p->__pyx_vtab = __pyx_vtabptr_6bzrlib_12_bencode_pyx_Encoder;
+ return o;
+}
+
+static void __pyx_tp_dealloc_6bzrlib_12_bencode_pyx_Encoder(PyObject *o) {
+ {
+ PyObject *etype, *eval, *etb;
+ PyErr_Fetch(&etype, &eval, &etb);
+ ++o->ob_refcnt;
+ __pyx_f_6bzrlib_12_bencode_pyx_7Encoder___dealloc__(o);
+ if (PyErr_Occurred()) PyErr_WriteUnraisable(o);
+ --o->ob_refcnt;
+ PyErr_Restore(etype, eval, etb);
+ }
+ (*o->ob_type->tp_free)(o);
+}
+
+static struct PyMethodDef __pyx_methods_6bzrlib_12_bencode_pyx_Encoder[] = {
+ {"process", (PyCFunction)__pyx_f_6bzrlib_12_bencode_pyx_7Encoder_process, METH_VARARGS|METH_KEYWORDS, 0},
+ {0, 0, 0, 0}
+};
+
+static struct PyMemberDef __pyx_members_6bzrlib_12_bencode_pyx_Encoder[] = {
+ {"tail", T_STRING, offsetof(struct __pyx_obj_6bzrlib_12_bencode_pyx_Encoder, tail), READONLY, 0},
+ {"size", T_INT, offsetof(struct __pyx_obj_6bzrlib_12_bencode_pyx_Encoder, size), READONLY, 0},
+ {"buffer", T_STRING, offsetof(struct __pyx_obj_6bzrlib_12_bencode_pyx_Encoder, buffer), READONLY, 0},
+ {"maxsize", T_INT, offsetof(struct __pyx_obj_6bzrlib_12_bencode_pyx_Encoder, maxsize), READONLY, 0},
+ {0, 0, 0, 0, 0}
+};
+
+static PyNumberMethods __pyx_tp_as_number_Encoder = {
+ 0, /*nb_add*/
+ 0, /*nb_subtract*/
+ 0, /*nb_multiply*/
+ 0, /*nb_divide*/
+ 0, /*nb_remainder*/
+ 0, /*nb_divmod*/
+ 0, /*nb_power*/
+ 0, /*nb_negative*/
+ 0, /*nb_positive*/
+ 0, /*nb_absolute*/
+ 0, /*nb_nonzero*/
+ 0, /*nb_invert*/
+ 0, /*nb_lshift*/
+ 0, /*nb_rshift*/
+ 0, /*nb_and*/
+ 0, /*nb_xor*/
+ 0, /*nb_or*/
+ 0, /*nb_coerce*/
+ 0, /*nb_int*/
+ 0, /*nb_long*/
+ 0, /*nb_float*/
+ 0, /*nb_oct*/
+ 0, /*nb_hex*/
+ 0, /*nb_inplace_add*/
+ 0, /*nb_inplace_subtract*/
+ 0, /*nb_inplace_multiply*/
+ 0, /*nb_inplace_divide*/
+ 0, /*nb_inplace_remainder*/
+ 0, /*nb_inplace_power*/
+ 0, /*nb_inplace_lshift*/
+ 0, /*nb_inplace_rshift*/
+ 0, /*nb_inplace_and*/
+ 0, /*nb_inplace_xor*/
+ 0, /*nb_inplace_or*/
+ 0, /*nb_floor_divide*/
+ 0, /*nb_true_divide*/
+ 0, /*nb_inplace_floor_divide*/
+ 0, /*nb_inplace_true_divide*/
+ #if Py_TPFLAGS_DEFAULT & Py_TPFLAGS_HAVE_INDEX
+ 0, /*nb_index*/
+ #endif
+};
+
+static PySequenceMethods __pyx_tp_as_sequence_Encoder = {
+ 0, /*sq_length*/
+ 0, /*sq_concat*/
+ 0, /*sq_repeat*/
+ 0, /*sq_item*/
+ 0, /*sq_slice*/
+ 0, /*sq_ass_item*/
+ 0, /*sq_ass_slice*/
+ 0, /*sq_contains*/
+ 0, /*sq_inplace_concat*/
+ 0, /*sq_inplace_repeat*/
+};
+
+static PyMappingMethods __pyx_tp_as_mapping_Encoder = {
+ 0, /*mp_length*/
+ 0, /*mp_subscript*/
+ 0, /*mp_ass_subscript*/
+};
+
+static PyBufferProcs __pyx_tp_as_buffer_Encoder = {
+ 0, /*bf_getreadbuffer*/
+ 0, /*bf_getwritebuffer*/
+ 0, /*bf_getsegcount*/
+ 0, /*bf_getcharbuffer*/
+};
+
+PyTypeObject __pyx_type_6bzrlib_12_bencode_pyx_Encoder = {
+ PyObject_HEAD_INIT(0)
+ 0, /*ob_size*/
+ "bzrlib._bencode_pyx.Encoder", /*tp_name*/
+ sizeof(struct __pyx_obj_6bzrlib_12_bencode_pyx_Encoder), /*tp_basicsize*/
+ 0, /*tp_itemsize*/
+ __pyx_tp_dealloc_6bzrlib_12_bencode_pyx_Encoder, /*tp_dealloc*/
+ 0, /*tp_print*/
+ 0, /*tp_getattr*/
+ 0, /*tp_setattr*/
+ 0, /*tp_compare*/
+ 0, /*tp_repr*/
+ &__pyx_tp_as_number_Encoder, /*tp_as_number*/
+ &__pyx_tp_as_sequence_Encoder, /*tp_as_sequence*/
+ &__pyx_tp_as_mapping_Encoder, /*tp_as_mapping*/
+ 0, /*tp_hash*/
+ 0, /*tp_call*/
+ __pyx_f_6bzrlib_12_bencode_pyx_7Encoder___str__, /*tp_str*/
+ 0, /*tp_getattro*/
+ 0, /*tp_setattro*/
+ &__pyx_tp_as_buffer_Encoder, /*tp_as_buffer*/
+ Py_TPFLAGS_DEFAULT|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_BASETYPE, /*tp_flags*/
+ "Bencode encoder", /*tp_doc*/
+ 0, /*tp_traverse*/
+ 0, /*tp_clear*/
+ 0, /*tp_richcompare*/
+ 0, /*tp_weaklistoffset*/
+ 0, /*tp_iter*/
+ 0, /*tp_iternext*/
+ __pyx_methods_6bzrlib_12_bencode_pyx_Encoder, /*tp_methods*/
+ __pyx_members_6bzrlib_12_bencode_pyx_Encoder, /*tp_members*/
+ 0, /*tp_getset*/
+ 0, /*tp_base*/
+ 0, /*tp_dict*/
+ 0, /*tp_descr_get*/
+ 0, /*tp_descr_set*/
+ 0, /*tp_dictoffset*/
+ __pyx_f_6bzrlib_12_bencode_pyx_7Encoder___init__, /*tp_init*/
+ 0, /*tp_alloc*/
+ __pyx_tp_new_6bzrlib_12_bencode_pyx_Encoder, /*tp_new*/
+ 0, /*tp_free*/
+ 0, /*tp_is_gc*/
+ 0, /*tp_bases*/
+ 0, /*tp_mro*/
+ 0, /*tp_cache*/
+ 0, /*tp_subclasses*/
+ 0, /*tp_weaklist*/
+};
+
+static struct PyMethodDef __pyx_methods[] = {
+ {"bdecode", (PyCFunction)__pyx_f_6bzrlib_12_bencode_pyx_bdecode, METH_VARARGS|METH_KEYWORDS, __pyx_doc_6bzrlib_12_bencode_pyx_bdecode},
+ {"bdecode_as_tuple", (PyCFunction)__pyx_f_6bzrlib_12_bencode_pyx_bdecode_as_tuple, METH_VARARGS|METH_KEYWORDS, __pyx_doc_6bzrlib_12_bencode_pyx_bdecode_as_tuple},
+ {"bencode", (PyCFunction)__pyx_f_6bzrlib_12_bencode_pyx_bencode, METH_VARARGS|METH_KEYWORDS, __pyx_doc_6bzrlib_12_bencode_pyx_bencode},
+ {0, 0, 0, 0}
+};
+
+static void __pyx_init_filenames(void); /*proto*/
+
+PyMODINIT_FUNC init_bencode_pyx(void); /*proto*/
+PyMODINIT_FUNC init_bencode_pyx(void) {
+ int __pyx_1;
+ PyObject *__pyx_2 = 0;
+ PyObject *__pyx_3 = 0;
+ PyObject *__pyx_4 = 0;
+ PyObject *__pyx_5 = 0;
+ PyObject *__pyx_6 = 0;
+ __pyx_init_filenames();
+ __pyx_m = Py_InitModule4("_bencode_pyx", __pyx_methods, __pyx_mdoc, 0, PYTHON_API_VERSION);
+ if (!__pyx_m) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 17; goto __pyx_L1;};
+ Py_INCREF(__pyx_m);
+ __pyx_b = PyImport_AddModule("__builtin__");
+ if (!__pyx_b) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 17; goto __pyx_L1;};
+ if (PyObject_SetAttrString(__pyx_m, "__builtins__", __pyx_b) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 17; goto __pyx_L1;};
+ if (__Pyx_InitStrings(__pyx_string_tab) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 17; goto __pyx_L1;};
+ __pyx_vtabptr_6bzrlib_12_bencode_pyx_Decoder = &__pyx_vtable_6bzrlib_12_bencode_pyx_Decoder;
+ *(void(**)(void))&__pyx_vtable_6bzrlib_12_bencode_pyx_Decoder._decode_object = (void(*)(void))__pyx_f_6bzrlib_12_bencode_pyx_7Decoder__decode_object;
+ *(void(**)(void))&__pyx_vtable_6bzrlib_12_bencode_pyx_Decoder._read_digits = (void(*)(void))__pyx_f_6bzrlib_12_bencode_pyx_7Decoder__read_digits;
+ *(void(**)(void))&__pyx_vtable_6bzrlib_12_bencode_pyx_Decoder._decode_int = (void(*)(void))__pyx_f_6bzrlib_12_bencode_pyx_7Decoder__decode_int;
+ *(void(**)(void))&__pyx_vtable_6bzrlib_12_bencode_pyx_Decoder._decode_string = (void(*)(void))__pyx_f_6bzrlib_12_bencode_pyx_7Decoder__decode_string;
+ *(void(**)(void))&__pyx_vtable_6bzrlib_12_bencode_pyx_Decoder._decode_list = (void(*)(void))__pyx_f_6bzrlib_12_bencode_pyx_7Decoder__decode_list;
+ *(void(**)(void))&__pyx_vtable_6bzrlib_12_bencode_pyx_Decoder._decode_dict = (void(*)(void))__pyx_f_6bzrlib_12_bencode_pyx_7Decoder__decode_dict;
+ __pyx_type_6bzrlib_12_bencode_pyx_Decoder.tp_free = _PyObject_GC_Del;
+ if (PyType_Ready(&__pyx_type_6bzrlib_12_bencode_pyx_Decoder) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 69; goto __pyx_L1;}
+ if (__Pyx_SetVtable(__pyx_type_6bzrlib_12_bencode_pyx_Decoder.tp_dict, __pyx_vtabptr_6bzrlib_12_bencode_pyx_Decoder) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 69; goto __pyx_L1;}
+ if (PyObject_SetAttrString(__pyx_m, "Decoder", (PyObject *)&__pyx_type_6bzrlib_12_bencode_pyx_Decoder) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 69; goto __pyx_L1;}
+ __pyx_ptype_6bzrlib_12_bencode_pyx_Decoder = &__pyx_type_6bzrlib_12_bencode_pyx_Decoder;
+ __pyx_vtabptr_6bzrlib_12_bencode_pyx_Encoder = &__pyx_vtable_6bzrlib_12_bencode_pyx_Encoder;
+ *(void(**)(void))&__pyx_vtable_6bzrlib_12_bencode_pyx_Encoder._ensure_buffer = (void(*)(void))__pyx_f_6bzrlib_12_bencode_pyx_7Encoder__ensure_buffer;
+ *(void(**)(void))&__pyx_vtable_6bzrlib_12_bencode_pyx_Encoder._encode_int = (void(*)(void))__pyx_f_6bzrlib_12_bencode_pyx_7Encoder__encode_int;
+ *(void(**)(void))&__pyx_vtable_6bzrlib_12_bencode_pyx_Encoder._encode_long = (void(*)(void))__pyx_f_6bzrlib_12_bencode_pyx_7Encoder__encode_long;
+ *(void(**)(void))&__pyx_vtable_6bzrlib_12_bencode_pyx_Encoder._append_string = (void(*)(void))__pyx_f_6bzrlib_12_bencode_pyx_7Encoder__append_string;
+ *(void(**)(void))&__pyx_vtable_6bzrlib_12_bencode_pyx_Encoder._encode_string = (void(*)(void))__pyx_f_6bzrlib_12_bencode_pyx_7Encoder__encode_string;
+ *(void(**)(void))&__pyx_vtable_6bzrlib_12_bencode_pyx_Encoder._encode_list = (void(*)(void))__pyx_f_6bzrlib_12_bencode_pyx_7Encoder__encode_list;
+ *(void(**)(void))&__pyx_vtable_6bzrlib_12_bencode_pyx_Encoder._encode_dict = (void(*)(void))__pyx_f_6bzrlib_12_bencode_pyx_7Encoder__encode_dict;
+ if (PyType_Ready(&__pyx_type_6bzrlib_12_bencode_pyx_Encoder) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 245; goto __pyx_L1;}
+ if (__Pyx_SetVtable(__pyx_type_6bzrlib_12_bencode_pyx_Encoder.tp_dict, __pyx_vtabptr_6bzrlib_12_bencode_pyx_Encoder) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 245; goto __pyx_L1;}
+ if (PyObject_SetAttrString(__pyx_m, "Encoder", (PyObject *)&__pyx_type_6bzrlib_12_bencode_pyx_Encoder) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 245; goto __pyx_L1;}
+ __pyx_ptype_6bzrlib_12_bencode_pyx_Encoder = &__pyx_type_6bzrlib_12_bencode_pyx_Encoder;
+ __pyx_ptype_6bzrlib_15_static_tuple_c_StaticTuple = __Pyx_ImportType("bzrlib._static_tuple_c", "StaticTuple", sizeof(StaticTuple)); if (!__pyx_ptype_6bzrlib_15_static_tuple_c_StaticTuple) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 26; goto __pyx_L1;}
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_bencode_pyx.pyx":66 */
+ __pyx_1 = import_static_tuple_c(); if (__pyx_1 == (-1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 66; goto __pyx_L1;}
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_bencode_pyx.pyx":77 */
+ __pyx_2 = PyInt_FromLong(0); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 77; goto __pyx_L1;}
+ __pyx_d1 = __pyx_2;
+ __pyx_2 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_bencode_pyx.pyx":233 */
+ __pyx_3 = PyDict_New(); if (!__pyx_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 233; goto __pyx_L1;}
+ __pyx_4 = __Pyx_GetName(__pyx_b, __pyx_n_object); if (!__pyx_4) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 233; goto __pyx_L1;}
+ __pyx_5 = PyTuple_New(1); if (!__pyx_5) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 233; goto __pyx_L1;}
+ PyTuple_SET_ITEM(__pyx_5, 0, __pyx_4);
+ __pyx_4 = 0;
+ __pyx_4 = __Pyx_CreateClass(__pyx_5, __pyx_3, __pyx_n_Bencached, "bzrlib._bencode_pyx"); if (!__pyx_4) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 233; goto __pyx_L1;}
+ Py_DECREF(__pyx_5); __pyx_5 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_bencode_pyx.pyx":234 */
+ __pyx_5 = PyList_New(1); if (!__pyx_5) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 234; goto __pyx_L1;}
+ Py_INCREF(__pyx_n_bencoded);
+ PyList_SET_ITEM(__pyx_5, 0, __pyx_n_bencoded);
+ if (PyObject_SetAttr(__pyx_4, __pyx_n___slots__, __pyx_5) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 234; goto __pyx_L1;}
+ Py_DECREF(__pyx_5); __pyx_5 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_bencode_pyx.pyx":236 */
+ __pyx_5 = PyCFunction_New(&__pyx_mdef_6bzrlib_12_bencode_pyx_9Bencached___init__, 0); if (!__pyx_5) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 236; goto __pyx_L1;}
+ __pyx_6 = PyMethod_New(__pyx_5, 0, __pyx_4); if (!__pyx_6) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 236; goto __pyx_L1;}
+ Py_DECREF(__pyx_5); __pyx_5 = 0;
+ if (PyObject_SetAttr(__pyx_4, __pyx_n___init__, __pyx_6) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 236; goto __pyx_L1;}
+ Py_DECREF(__pyx_6); __pyx_6 = 0;
+ if (PyObject_SetAttr(__pyx_m, __pyx_n_Bencached, __pyx_4) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 233; goto __pyx_L1;}
+ Py_DECREF(__pyx_4); __pyx_4 = 0;
+ Py_DECREF(__pyx_3); __pyx_3 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_bencode_pyx.pyx":253 */
+ __pyx_d2 = __pyx_e_6bzrlib_12_bencode_pyx_INITSIZE;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_bencode_pyx.pyx":396 */
+ return;
+ __pyx_L1:;
+ Py_XDECREF(__pyx_2);
+ Py_XDECREF(__pyx_3);
+ Py_XDECREF(__pyx_4);
+ Py_XDECREF(__pyx_5);
+ Py_XDECREF(__pyx_6);
+ __Pyx_AddTraceback("bzrlib._bencode_pyx");
+}
+
+static char *__pyx_filenames[] = {
+ "_bencode_pyx.pyx",
+ "_static_tuple_c.pxd",
+};
+
+/* Runtime support code */
+
+static void __pyx_init_filenames(void) {
+ __pyx_f = __pyx_filenames;
+}
+
+static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb) {
+ Py_XINCREF(type);
+ Py_XINCREF(value);
+ Py_XINCREF(tb);
+ /* First, check the traceback argument, replacing None with NULL. */
+ if (tb == Py_None) {
+ Py_DECREF(tb);
+ tb = 0;
+ }
+ else if (tb != NULL && !PyTraceBack_Check(tb)) {
+ PyErr_SetString(PyExc_TypeError,
+ "raise: arg 3 must be a traceback or None");
+ goto raise_error;
+ }
+ /* Next, replace a missing value with None */
+ if (value == NULL) {
+ value = Py_None;
+ Py_INCREF(value);
+ }
+ #if PY_VERSION_HEX < 0x02050000
+ if (!PyClass_Check(type))
+ #else
+ if (!PyType_Check(type))
+ #endif
+ {
+ /* Raising an instance. The value should be a dummy. */
+ if (value != Py_None) {
+ PyErr_SetString(PyExc_TypeError,
+ "instance exception may not have a separate value");
+ goto raise_error;
+ }
+ /* Normalize to raise <class>, <instance> */
+ Py_DECREF(value);
+ value = type;
+ #if PY_VERSION_HEX < 0x02050000
+ if (PyInstance_Check(type)) {
+ type = (PyObject*) ((PyInstanceObject*)type)->in_class;
+ Py_INCREF(type);
+ }
+ else {
+ PyErr_SetString(PyExc_TypeError,
+ "raise: exception must be an old-style class or instance");
+ goto raise_error;
+ }
+ #else
+ type = (PyObject*) type->ob_type;
+ Py_INCREF(type);
+ if (!PyType_IsSubtype((PyTypeObject *)type, (PyTypeObject *)PyExc_BaseException)) {
+ PyErr_SetString(PyExc_TypeError,
+ "raise: exception class must be a subclass of BaseException");
+ goto raise_error;
+ }
+ #endif
+ }
+ PyErr_Restore(type, value, tb);
+ return;
+raise_error:
+ Py_XDECREF(value);
+ Py_XDECREF(type);
+ Py_XDECREF(tb);
+ return;
+}
+
+static PyObject *__Pyx_GetName(PyObject *dict, PyObject *name) {
+ PyObject *result;
+ result = PyObject_GetAttr(dict, name);
+ if (!result)
+ PyErr_SetObject(PyExc_NameError, name);
+ return result;
+}
+
+static int __Pyx_InitStrings(__Pyx_StringTabEntry *t) {
+ while (t->p) {
+ *t->p = PyString_FromStringAndSize(t->s, t->n - 1);
+ if (!*t->p)
+ return -1;
+ if (t->i)
+ PyString_InternInPlace(t->p);
+ ++t;
+ }
+ return 0;
+}
+
+static int __Pyx_SetVtable(PyObject *dict, void *vtable) {
+ PyObject *pycobj = 0;
+ int result;
+
+ pycobj = PyCObject_FromVoidPtr(vtable, 0);
+ if (!pycobj)
+ goto bad;
+ if (PyDict_SetItemString(dict, "__pyx_vtable__", pycobj) < 0)
+ goto bad;
+ result = 0;
+ goto done;
+
+bad:
+ result = -1;
+done:
+ Py_XDECREF(pycobj);
+ return result;
+}
+
+#ifndef __PYX_HAVE_RT_ImportType
+#define __PYX_HAVE_RT_ImportType
+static PyTypeObject *__Pyx_ImportType(char *module_name, char *class_name,
+ long size)
+{
+ PyObject *py_module = 0;
+ PyObject *result = 0;
+
+ py_module = __Pyx_ImportModule(module_name);
+ if (!py_module)
+ goto bad;
+ result = PyObject_GetAttrString(py_module, class_name);
+ if (!result)
+ goto bad;
+ if (!PyType_Check(result)) {
+ PyErr_Format(PyExc_TypeError,
+ "%s.%s is not a type object",
+ module_name, class_name);
+ goto bad;
+ }
+ if (((PyTypeObject *)result)->tp_basicsize != size) {
+ PyErr_Format(PyExc_ValueError,
+ "%s.%s does not appear to be the correct type object",
+ module_name, class_name);
+ goto bad;
+ }
+ return (PyTypeObject *)result;
+bad:
+ Py_XDECREF(result);
+ return 0;
+}
+#endif
+
+#ifndef __PYX_HAVE_RT_ImportModule
+#define __PYX_HAVE_RT_ImportModule
+static PyObject *__Pyx_ImportModule(char *name) {
+ PyObject *py_name = 0;
+
+ py_name = PyString_FromString(name);
+ if (!py_name)
+ goto bad;
+ return PyImport_Import(py_name);
+bad:
+ Py_XDECREF(py_name);
+ return 0;
+}
+#endif
+
+static PyObject *__Pyx_CreateClass(
+ PyObject *bases, PyObject *dict, PyObject *name, char *modname)
+{
+ PyObject *py_modname;
+ PyObject *result = 0;
+
+ py_modname = PyString_FromString(modname);
+ if (!py_modname)
+ goto bad;
+ if (PyDict_SetItemString(dict, "__module__", py_modname) < 0)
+ goto bad;
+ result = PyClass_New(bases, dict, name);
+bad:
+ Py_XDECREF(py_modname);
+ return result;
+}
+
+#include "compile.h"
+#include "frameobject.h"
+#include "traceback.h"
+
+static void __Pyx_AddTraceback(char *funcname) {
+ PyObject *py_srcfile = 0;
+ PyObject *py_funcname = 0;
+ PyObject *py_globals = 0;
+ PyObject *empty_tuple = 0;
+ PyObject *empty_string = 0;
+ PyCodeObject *py_code = 0;
+ PyFrameObject *py_frame = 0;
+
+ py_srcfile = PyString_FromString(__pyx_filename);
+ if (!py_srcfile) goto bad;
+ py_funcname = PyString_FromString(funcname);
+ if (!py_funcname) goto bad;
+ py_globals = PyModule_GetDict(__pyx_m);
+ if (!py_globals) goto bad;
+ empty_tuple = PyTuple_New(0);
+ if (!empty_tuple) goto bad;
+ empty_string = PyString_FromString("");
+ if (!empty_string) goto bad;
+ py_code = PyCode_New(
+ 0, /*int argcount,*/
+ 0, /*int nlocals,*/
+ 0, /*int stacksize,*/
+ 0, /*int flags,*/
+ empty_string, /*PyObject *code,*/
+ empty_tuple, /*PyObject *consts,*/
+ empty_tuple, /*PyObject *names,*/
+ empty_tuple, /*PyObject *varnames,*/
+ empty_tuple, /*PyObject *freevars,*/
+ empty_tuple, /*PyObject *cellvars,*/
+ py_srcfile, /*PyObject *filename,*/
+ py_funcname, /*PyObject *name,*/
+ __pyx_lineno, /*int firstlineno,*/
+ empty_string /*PyObject *lnotab*/
+ );
+ if (!py_code) goto bad;
+ py_frame = PyFrame_New(
+ PyThreadState_Get(), /*PyThreadState *tstate,*/
+ py_code, /*PyCodeObject *code,*/
+ py_globals, /*PyObject *globals,*/
+ 0 /*PyObject *locals*/
+ );
+ if (!py_frame) goto bad;
+ py_frame->f_lineno = __pyx_lineno;
+ PyTraceBack_Here(py_frame);
+bad:
+ Py_XDECREF(py_srcfile);
+ Py_XDECREF(py_funcname);
+ Py_XDECREF(empty_tuple);
+ Py_XDECREF(empty_string);
+ Py_XDECREF(py_code);
+ Py_XDECREF(py_frame);
+}
diff --git a/bzrlib/_bencode_pyx.h b/bzrlib/_bencode_pyx.h
new file mode 100644
index 0000000..d956cf1
--- /dev/null
+++ b/bzrlib/_bencode_pyx.h
@@ -0,0 +1,22 @@
+/* Copyright (C) 2009 Canonical Ltd
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+/* Simple header providing some macro definitions for _bencode_pyx.pyx
+ */
+
+#define D_UPDATE_TAIL(self, n) (((self)->size -= (n), (self)->tail += (n)))
+#define E_UPDATE_TAIL(self, n) (((self)->size += (n), (self)->tail += (n)))
diff --git a/bzrlib/_bencode_pyx.pyx b/bzrlib/_bencode_pyx.pyx
new file mode 100644
index 0000000..b41a1bb
--- /dev/null
+++ b/bzrlib/_bencode_pyx.pyx
@@ -0,0 +1,400 @@
+# Copyright (C) 2007, 2009, 2010 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""Pyrex implementation for bencode coder/decoder"""
+
+
+cdef extern from "stddef.h":
+ ctypedef unsigned int size_t
+
+cdef extern from "Python.h":
+ ctypedef int Py_ssize_t
+ int PyInt_CheckExact(object o)
+ int PyLong_CheckExact(object o)
+ int PyString_CheckExact(object o)
+ int PyTuple_CheckExact(object o)
+ int PyList_CheckExact(object o)
+ int PyDict_CheckExact(object o)
+ int PyBool_Check(object o)
+ object PyString_FromStringAndSize(char *v, Py_ssize_t len)
+ char *PyString_AS_STRING(object o) except NULL
+ Py_ssize_t PyString_GET_SIZE(object o) except -1
+ object PyInt_FromString(char *str, char **pend, int base)
+ int Py_GetRecursionLimit()
+ int Py_EnterRecursiveCall(char *)
+ void Py_LeaveRecursiveCall()
+
+ int PyList_Append(object, object) except -1
+
+cdef extern from "stdlib.h":
+ void free(void *memblock)
+ void *malloc(size_t size)
+ void *realloc(void *memblock, size_t size)
+ long strtol(char *, char **, int)
+
+cdef extern from "string.h":
+ void *memcpy(void *dest, void *src, size_t count)
+
+cdef extern from "python-compat.h":
+ int snprintf(char* buffer, size_t nsize, char* fmt, ...)
+
+cdef class Decoder
+cdef class Encoder
+
+cdef extern from "_bencode_pyx.h":
+ void D_UPDATE_TAIL(Decoder, int n)
+ void E_UPDATE_TAIL(Encoder, int n)
+
+# To maintain compatibility with older versions of pyrex, we have to use the
+# relative import here, rather than 'bzrlib._static_tuple_c'
+from _static_tuple_c cimport StaticTuple, StaticTuple_CheckExact, \
+ import_static_tuple_c
+
+import_static_tuple_c()
+
+
+cdef class Decoder:
+ """Bencode decoder"""
+
+ cdef readonly char *tail
+ cdef readonly int size
+ cdef readonly int _yield_tuples
+ cdef object text
+
+ def __init__(self, s, yield_tuples=0):
+ """Initialize decoder engine.
+ @param s: Python string.
+ """
+ if not PyString_CheckExact(s):
+ raise TypeError("String required")
+
+ self.text = s
+ self.tail = PyString_AS_STRING(s)
+ self.size = PyString_GET_SIZE(s)
+ self._yield_tuples = int(yield_tuples)
+
+ def decode(self):
+ result = self._decode_object()
+ if self.size != 0:
+ raise ValueError('junk in stream')
+ return result
+
+ def decode_object(self):
+ return self._decode_object()
+
+ cdef object _decode_object(self):
+ cdef char ch
+
+ if 0 == self.size:
+ raise ValueError('stream underflow')
+
+ if Py_EnterRecursiveCall("_decode_object"):
+ raise RuntimeError("too deeply nested")
+ try:
+ ch = self.tail[0]
+ if c'0' <= ch <= c'9':
+ return self._decode_string()
+ elif ch == c'l':
+ D_UPDATE_TAIL(self, 1)
+ return self._decode_list()
+ elif ch == c'i':
+ D_UPDATE_TAIL(self, 1)
+ return self._decode_int()
+ elif ch == c'd':
+ D_UPDATE_TAIL(self, 1)
+ return self._decode_dict()
+ else:
+ raise ValueError('unknown object type identifier %r' % ch)
+ finally:
+ Py_LeaveRecursiveCall()
+
+ cdef int _read_digits(self, char stop_char) except -1:
+ cdef int i
+ i = 0
+ while ((self.tail[i] >= c'0' and self.tail[i] <= c'9') or
+ self.tail[i] == c'-') and i < self.size:
+ i = i + 1
+
+ if self.tail[i] != stop_char:
+ raise ValueError("Stop character %c not found: %c" %
+ (stop_char, self.tail[i]))
+ if (self.tail[0] == c'0' or
+ (self.tail[0] == c'-' and self.tail[1] == c'0')):
+ if i == 1:
+ return i
+ else:
+ raise ValueError # leading zeroes are not allowed
+ return i
+
+ cdef object _decode_int(self):
+ cdef int i
+ i = self._read_digits(c'e')
+ self.tail[i] = 0
+ try:
+ ret = PyInt_FromString(self.tail, NULL, 10)
+ finally:
+ self.tail[i] = c'e'
+ D_UPDATE_TAIL(self, i+1)
+ return ret
+
+ cdef object _decode_string(self):
+ cdef int n
+ cdef char *next_tail
+ # strtol allows leading whitespace, negatives, and leading zeros
+ # however, all callers have already checked that '0' <= tail[0] <= '9'
+ # or they wouldn't have called _decode_string
+ # strtol will stop at trailing whitespace, etc
+ n = strtol(self.tail, &next_tail, 10)
+ if next_tail == NULL or next_tail[0] != c':':
+ raise ValueError('string len not terminated by ":"')
+ # strtol allows leading zeros, so validate that we don't have that
+ if (self.tail[0] == c'0'
+ and (n != 0 or (next_tail - self.tail != 1))):
+ raise ValueError('leading zeros are not allowed')
+ D_UPDATE_TAIL(self, next_tail - self.tail + 1)
+ if n == 0:
+ return ''
+ if n > self.size:
+ raise ValueError('stream underflow')
+ if n < 0:
+ raise ValueError('string size below zero: %d' % n)
+
+ result = PyString_FromStringAndSize(self.tail, n)
+ D_UPDATE_TAIL(self, n)
+ return result
+
+ cdef object _decode_list(self):
+ result = []
+
+ while self.size > 0:
+ if self.tail[0] == c'e':
+ D_UPDATE_TAIL(self, 1)
+ if self._yield_tuples:
+ return tuple(result)
+ else:
+ return result
+ else:
+ # As a quick shortcut, check to see if the next object is a
+ # string, since we know that won't be creating recursion
+ # if self.tail[0] >= c'0' and self.tail[0] <= c'9':
+ PyList_Append(result, self._decode_object())
+
+ raise ValueError('malformed list')
+
+ cdef object _decode_dict(self):
+ cdef char ch
+
+ result = {}
+ lastkey = None
+
+ while self.size > 0:
+ ch = self.tail[0]
+ if ch == c'e':
+ D_UPDATE_TAIL(self, 1)
+ return result
+ else:
+ # keys should be strings only
+ if self.tail[0] < c'0' or self.tail[0] > c'9':
+ raise ValueError('key was not a simple string.')
+ key = self._decode_string()
+ if lastkey >= key:
+ raise ValueError('dict keys disordered')
+ else:
+ lastkey = key
+ value = self._decode_object()
+ result[key] = value
+
+ raise ValueError('malformed dict')
+
+
+def bdecode(object s):
+ """Decode string x to Python object"""
+ return Decoder(s).decode()
+
+
+def bdecode_as_tuple(object s):
+ """Decode string x to Python object, using tuples rather than lists."""
+ return Decoder(s, True).decode()
+
+
+class Bencached(object):
+ __slots__ = ['bencoded']
+
+ def __init__(self, s):
+ self.bencoded = s
+
+
+cdef enum:
+ INITSIZE = 1024 # initial size for encoder buffer
+ INT_BUF_SIZE = 32
+
+
+cdef class Encoder:
+ """Bencode encoder"""
+
+ cdef readonly char *tail
+ cdef readonly int size
+ cdef readonly char *buffer
+ cdef readonly int maxsize
+
+ def __init__(self, int maxsize=INITSIZE):
+ """Initialize encoder engine
+ @param maxsize: initial size of internal char buffer
+ """
+ cdef char *p
+
+ self.maxsize = 0
+ self.size = 0
+ self.tail = NULL
+
+ p = <char*>malloc(maxsize)
+ if p == NULL:
+ raise MemoryError('Not enough memory to allocate buffer '
+ 'for encoder')
+ self.buffer = p
+ self.maxsize = maxsize
+ self.tail = p
+
+ def __dealloc__(self):
+ free(self.buffer)
+ self.buffer = NULL
+ self.maxsize = 0
+
+ def __str__(self):
+ if self.buffer != NULL and self.size != 0:
+ return PyString_FromStringAndSize(self.buffer, self.size)
+ else:
+ return ''
+
+ cdef int _ensure_buffer(self, int required) except 0:
+ """Ensure that tail of CharTail buffer has enough size.
+ If buffer is not big enough then function try to
+ realloc buffer.
+ """
+ cdef char *new_buffer
+ cdef int new_size
+
+ if self.size + required < self.maxsize:
+ return 1
+
+ new_size = self.maxsize
+ while new_size < self.size + required:
+ new_size = new_size * 2
+ new_buffer = <char*>realloc(self.buffer, <size_t>new_size)
+ if new_buffer == NULL:
+ raise MemoryError('Cannot realloc buffer for encoder')
+
+ self.buffer = new_buffer
+ self.maxsize = new_size
+ self.tail = &new_buffer[self.size]
+ return 1
+
+ cdef int _encode_int(self, int x) except 0:
+ """Encode int to bencode string iNNNe
+ @param x: value to encode
+ """
+ cdef int n
+ self._ensure_buffer(INT_BUF_SIZE)
+ n = snprintf(self.tail, INT_BUF_SIZE, "i%de", x)
+ if n < 0:
+ raise MemoryError('int %d too big to encode' % x)
+ E_UPDATE_TAIL(self, n)
+ return 1
+
+ cdef int _encode_long(self, x) except 0:
+ return self._append_string(''.join(('i', str(x), 'e')))
+
+ cdef int _append_string(self, s) except 0:
+ cdef Py_ssize_t n
+ n = PyString_GET_SIZE(s)
+ self._ensure_buffer(n)
+ memcpy(self.tail, PyString_AS_STRING(s), n)
+ E_UPDATE_TAIL(self, n)
+ return 1
+
+ cdef int _encode_string(self, x) except 0:
+ cdef int n
+ cdef Py_ssize_t x_len
+ x_len = PyString_GET_SIZE(x)
+ self._ensure_buffer(x_len + INT_BUF_SIZE)
+ n = snprintf(self.tail, INT_BUF_SIZE, '%d:', x_len)
+ if n < 0:
+ raise MemoryError('string %s too big to encode' % x)
+ memcpy(<void *>(self.tail+n), PyString_AS_STRING(x), x_len)
+ E_UPDATE_TAIL(self, n + x_len)
+ return 1
+
+ cdef int _encode_list(self, x) except 0:
+ self._ensure_buffer(1)
+ self.tail[0] = c'l'
+ E_UPDATE_TAIL(self, 1)
+
+ for i in x:
+ self.process(i)
+
+ self._ensure_buffer(1)
+ self.tail[0] = c'e'
+ E_UPDATE_TAIL(self, 1)
+ return 1
+
+ cdef int _encode_dict(self, x) except 0:
+ self._ensure_buffer(1)
+ self.tail[0] = c'd'
+ E_UPDATE_TAIL(self, 1)
+
+ keys = x.keys()
+ keys.sort()
+ for k in keys:
+ if not PyString_CheckExact(k):
+ raise TypeError('key in dict should be string')
+ self._encode_string(k)
+ self.process(x[k])
+
+ self._ensure_buffer(1)
+ self.tail[0] = c'e'
+ E_UPDATE_TAIL(self, 1)
+ return 1
+
+ def process(self, object x):
+ if Py_EnterRecursiveCall("encode"):
+ raise RuntimeError("too deeply nested")
+ try:
+ if PyString_CheckExact(x):
+ self._encode_string(x)
+ elif PyInt_CheckExact(x):
+ self._encode_int(x)
+ elif PyLong_CheckExact(x):
+ self._encode_long(x)
+ elif (PyList_CheckExact(x) or PyTuple_CheckExact(x)
+ or StaticTuple_CheckExact(x)):
+ self._encode_list(x)
+ elif PyDict_CheckExact(x):
+ self._encode_dict(x)
+ elif PyBool_Check(x):
+ self._encode_int(int(x))
+ elif isinstance(x, Bencached):
+ self._append_string(x.bencoded)
+ else:
+ raise TypeError('unsupported type %r' % x)
+ finally:
+ Py_LeaveRecursiveCall()
+
+
+def bencode(x):
+ """Encode Python object x to string"""
+ encoder = Encoder()
+ encoder.process(x)
+ return str(encoder)
diff --git a/bzrlib/_btree_serializer_py.py b/bzrlib/_btree_serializer_py.py
new file mode 100644
index 0000000..21d00e6
--- /dev/null
+++ b/bzrlib/_btree_serializer_py.py
@@ -0,0 +1,74 @@
+# Copyright (C) 2008, 2009, 2010 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+#
+
+"""B+Tree index parsing."""
+
+from __future__ import absolute_import
+
+from bzrlib import static_tuple
+
+
+def _parse_leaf_lines(bytes, key_length, ref_list_length):
+ lines = bytes.split('\n')
+ nodes = []
+ as_st = static_tuple.StaticTuple.from_sequence
+ stuple = static_tuple.StaticTuple
+ for line in lines[1:]:
+ if line == '':
+ return nodes
+ elements = line.split('\0', key_length)
+ # keys are tuples
+ key = as_st(elements[:key_length]).intern()
+ line = elements[-1]
+ references, value = line.rsplit('\0', 1)
+ if ref_list_length:
+ ref_lists = []
+ for ref_string in references.split('\t'):
+ ref_list = as_st([as_st(ref.split('\0')).intern()
+ for ref in ref_string.split('\r') if ref])
+ ref_lists.append(ref_list)
+ ref_lists = as_st(ref_lists)
+ node_value = stuple(value, ref_lists)
+ else:
+ node_value = stuple(value, stuple())
+ # No need for StaticTuple here as it is put into a dict
+ nodes.append((key, node_value))
+ return nodes
+
+
+def _flatten_node(node, reference_lists):
+ """Convert a node into the serialized form.
+
+ :param node: A tuple representing a node (key_tuple, value, references)
+ :param reference_lists: Does this index have reference lists?
+ :return: (string_key, flattened)
+ string_key The serialized key for referencing this node
+ flattened A string with the serialized form for the contents
+ """
+ if reference_lists:
+ # TODO: Consider turning this back into the 'unoptimized' nested loop
+ # form. It is probably more obvious for most people, and this is
+ # just a reference implementation.
+ flattened_references = ['\r'.join(['\x00'.join(reference)
+ for reference in ref_list])
+ for ref_list in node[3]]
+ else:
+ flattened_references = []
+ string_key = '\x00'.join(node[1])
+ line = ("%s\x00%s\x00%s\n" % (string_key,
+ '\t'.join(flattened_references), node[2]))
+ return string_key, line
diff --git a/bzrlib/_btree_serializer_pyx.c b/bzrlib/_btree_serializer_pyx.c
new file mode 100644
index 0000000..2055782
--- /dev/null
+++ b/bzrlib/_btree_serializer_pyx.c
@@ -0,0 +1,9433 @@
+/* Generated by Cython 0.13 on Thu May 26 17:26:28 2011 */
+
+#define PY_SSIZE_T_CLEAN
+#include "Python.h"
+#ifndef Py_PYTHON_H
+ #error Python headers needed to compile C extensions, please install development version of Python.
+#else
+
+#include <stddef.h> /* For offsetof */
+#ifndef offsetof
+#define offsetof(type, member) ( (size_t) & ((type*)0) -> member )
+#endif
+
+#if !defined(WIN32) && !defined(MS_WINDOWS)
+ #ifndef __stdcall
+ #define __stdcall
+ #endif
+ #ifndef __cdecl
+ #define __cdecl
+ #endif
+ #ifndef __fastcall
+ #define __fastcall
+ #endif
+#endif
+
+#ifndef DL_IMPORT
+ #define DL_IMPORT(t) t
+#endif
+#ifndef DL_EXPORT
+ #define DL_EXPORT(t) t
+#endif
+
+#ifndef PY_LONG_LONG
+ #define PY_LONG_LONG LONG_LONG
+#endif
+
+#if PY_VERSION_HEX < 0x02040000
+ #define METH_COEXIST 0
+ #define PyDict_CheckExact(op) (Py_TYPE(op) == &PyDict_Type)
+ #define PyDict_Contains(d,o) PySequence_Contains(d,o)
+#endif
+
+#if PY_VERSION_HEX < 0x02050000
+ typedef int Py_ssize_t;
+ #define PY_SSIZE_T_MAX INT_MAX
+ #define PY_SSIZE_T_MIN INT_MIN
+ #define PY_FORMAT_SIZE_T ""
+ #define PyInt_FromSsize_t(z) PyInt_FromLong(z)
+ #define PyInt_AsSsize_t(o) PyInt_AsLong(o)
+ #define PyNumber_Index(o) PyNumber_Int(o)
+ #define PyIndex_Check(o) PyNumber_Check(o)
+ #define PyErr_WarnEx(category, message, stacklevel) PyErr_Warn(category, message)
+#endif
+
+#if PY_VERSION_HEX < 0x02060000
+ #define Py_REFCNT(ob) (((PyObject*)(ob))->ob_refcnt)
+ #define Py_TYPE(ob) (((PyObject*)(ob))->ob_type)
+ #define Py_SIZE(ob) (((PyVarObject*)(ob))->ob_size)
+ #define PyVarObject_HEAD_INIT(type, size) \
+ PyObject_HEAD_INIT(type) size,
+ #define PyType_Modified(t)
+
+ typedef struct {
+ void *buf;
+ PyObject *obj;
+ Py_ssize_t len;
+ Py_ssize_t itemsize;
+ int readonly;
+ int ndim;
+ char *format;
+ Py_ssize_t *shape;
+ Py_ssize_t *strides;
+ Py_ssize_t *suboffsets;
+ void *internal;
+ } Py_buffer;
+
+ #define PyBUF_SIMPLE 0
+ #define PyBUF_WRITABLE 0x0001
+ #define PyBUF_FORMAT 0x0004
+ #define PyBUF_ND 0x0008
+ #define PyBUF_STRIDES (0x0010 | PyBUF_ND)
+ #define PyBUF_C_CONTIGUOUS (0x0020 | PyBUF_STRIDES)
+ #define PyBUF_F_CONTIGUOUS (0x0040 | PyBUF_STRIDES)
+ #define PyBUF_ANY_CONTIGUOUS (0x0080 | PyBUF_STRIDES)
+ #define PyBUF_INDIRECT (0x0100 | PyBUF_STRIDES)
+
+#endif
+
+#if PY_MAJOR_VERSION < 3
+ #define __Pyx_BUILTIN_MODULE_NAME "__builtin__"
+#else
+ #define __Pyx_BUILTIN_MODULE_NAME "builtins"
+#endif
+
+#if PY_MAJOR_VERSION >= 3
+ #define Py_TPFLAGS_CHECKTYPES 0
+ #define Py_TPFLAGS_HAVE_INDEX 0
+#endif
+
+#if (PY_VERSION_HEX < 0x02060000) || (PY_MAJOR_VERSION >= 3)
+ #define Py_TPFLAGS_HAVE_NEWBUFFER 0
+#endif
+
+#if PY_MAJOR_VERSION >= 3
+ #define PyBaseString_Type PyUnicode_Type
+ #define PyStringObject PyUnicodeObject
+ #define PyString_Type PyUnicode_Type
+ #define PyString_Check PyUnicode_Check
+ #define PyString_CheckExact PyUnicode_CheckExact
+#endif
+
+#if PY_VERSION_HEX < 0x02060000
+ #define PyBytesObject PyStringObject
+ #define PyBytes_Type PyString_Type
+ #define PyBytes_Check PyString_Check
+ #define PyBytes_CheckExact PyString_CheckExact
+ #define PyBytes_FromString PyString_FromString
+ #define PyBytes_FromStringAndSize PyString_FromStringAndSize
+ #define PyBytes_FromFormat PyString_FromFormat
+ #define PyBytes_DecodeEscape PyString_DecodeEscape
+ #define PyBytes_AsString PyString_AsString
+ #define PyBytes_AsStringAndSize PyString_AsStringAndSize
+ #define PyBytes_Size PyString_Size
+ #define PyBytes_AS_STRING PyString_AS_STRING
+ #define PyBytes_GET_SIZE PyString_GET_SIZE
+ #define PyBytes_Repr PyString_Repr
+ #define PyBytes_Concat PyString_Concat
+ #define PyBytes_ConcatAndDel PyString_ConcatAndDel
+ #define PySet_Check(obj) PyObject_TypeCheck(obj, &PySet_Type)
+ #define PyFrozenSet_Check(obj) PyObject_TypeCheck(obj, &PyFrozenSet_Type)
+#endif
+
+#ifndef PySet_CheckExact
+# define PySet_CheckExact(obj) (Py_TYPE(obj) == &PySet_Type)
+#endif
+
+#if PY_MAJOR_VERSION >= 3
+ #define PyInt_Type PyLong_Type
+ #define PyInt_Check(op) PyLong_Check(op)
+ #define PyInt_CheckExact(op) PyLong_CheckExact(op)
+ #define PyInt_FromString PyLong_FromString
+ #define PyInt_FromUnicode PyLong_FromUnicode
+ #define PyInt_FromLong PyLong_FromLong
+ #define PyInt_FromSize_t PyLong_FromSize_t
+ #define PyInt_FromSsize_t PyLong_FromSsize_t
+ #define PyInt_AsLong PyLong_AsLong
+ #define PyInt_AS_LONG PyLong_AS_LONG
+ #define PyInt_AsSsize_t PyLong_AsSsize_t
+ #define PyInt_AsUnsignedLongMask PyLong_AsUnsignedLongMask
+ #define PyInt_AsUnsignedLongLongMask PyLong_AsUnsignedLongLongMask
+#endif
+
+#if PY_MAJOR_VERSION >= 3
+ #define PyBoolObject PyLongObject
+#endif
+
+
+#if PY_MAJOR_VERSION >= 3
+ #define __Pyx_PyNumber_Divide(x,y) PyNumber_TrueDivide(x,y)
+ #define __Pyx_PyNumber_InPlaceDivide(x,y) PyNumber_InPlaceTrueDivide(x,y)
+#else
+ #define __Pyx_PyNumber_Divide(x,y) PyNumber_Divide(x,y)
+ #define __Pyx_PyNumber_InPlaceDivide(x,y) PyNumber_InPlaceDivide(x,y)
+#endif
+
+#if PY_MAJOR_VERSION >= 3
+ #define PyMethod_New(func, self, klass) ((self) ? PyMethod_New(func, self) : PyInstanceMethod_New(func))
+#endif
+
+#if PY_VERSION_HEX < 0x02050000
+ #define __Pyx_GetAttrString(o,n) PyObject_GetAttrString((o),((char *)(n)))
+ #define __Pyx_SetAttrString(o,n,a) PyObject_SetAttrString((o),((char *)(n)),(a))
+ #define __Pyx_DelAttrString(o,n) PyObject_DelAttrString((o),((char *)(n)))
+#else
+ #define __Pyx_GetAttrString(o,n) PyObject_GetAttrString((o),(n))
+ #define __Pyx_SetAttrString(o,n,a) PyObject_SetAttrString((o),(n),(a))
+ #define __Pyx_DelAttrString(o,n) PyObject_DelAttrString((o),(n))
+#endif
+
+#if PY_VERSION_HEX < 0x02050000
+ #define __Pyx_NAMESTR(n) ((char *)(n))
+ #define __Pyx_DOCSTR(n) ((char *)(n))
+#else
+ #define __Pyx_NAMESTR(n) (n)
+ #define __Pyx_DOCSTR(n) (n)
+#endif
+
+#ifdef __cplusplus
+#define __PYX_EXTERN_C extern "C"
+#else
+#define __PYX_EXTERN_C extern
+#endif
+
+#if defined(WIN32) || defined(MS_WINDOWS)
+#define _USE_MATH_DEFINES
+#endif
+#include <math.h>
+#define __PYX_HAVE_API__bzrlib___btree_serializer_pyx
+#include "python-compat.h"
+#include "stdlib.h"
+#include "string.h"
+#include "_static_tuple_c.h"
+
+/* inline attribute */
+#ifndef CYTHON_INLINE
+ #if defined(__GNUC__)
+ #define CYTHON_INLINE __inline__
+ #elif defined(_MSC_VER)
+ #define CYTHON_INLINE __inline
+ #elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L
+ #define CYTHON_INLINE inline
+ #else
+ #define CYTHON_INLINE
+ #endif
+#endif
+
+/* unused attribute */
+#ifndef CYTHON_UNUSED
+# if defined(__GNUC__)
+# if !(defined(__cplusplus)) || (__GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ >= 4))
+# define CYTHON_UNUSED __attribute__ ((__unused__))
+# else
+# define CYTHON_UNUSED
+# endif
+# elif defined(__ICC) || defined(__INTEL_COMPILER)
+# define CYTHON_UNUSED __attribute__ ((__unused__))
+# else
+# define CYTHON_UNUSED
+# endif
+#endif
+
+typedef struct {PyObject **p; char *s; const long n; const char* encoding; const char is_unicode; const char is_str; const char intern; } __Pyx_StringTabEntry; /*proto*/
+
+
+/* Type Conversion Predeclarations */
+
+#define __Pyx_PyBytes_FromUString(s) PyBytes_FromString((char*)s)
+#define __Pyx_PyBytes_AsUString(s) ((unsigned char*) PyBytes_AsString(s))
+
+#define __Pyx_PyBool_FromLong(b) ((b) ? (Py_INCREF(Py_True), Py_True) : (Py_INCREF(Py_False), Py_False))
+static CYTHON_INLINE int __Pyx_PyObject_IsTrue(PyObject*);
+static CYTHON_INLINE PyObject* __Pyx_PyNumber_Int(PyObject* x);
+
+static CYTHON_INLINE Py_ssize_t __Pyx_PyIndex_AsSsize_t(PyObject*);
+static CYTHON_INLINE PyObject * __Pyx_PyInt_FromSize_t(size_t);
+static CYTHON_INLINE size_t __Pyx_PyInt_AsSize_t(PyObject*);
+
+#define __pyx_PyFloat_AsDouble(x) (PyFloat_CheckExact(x) ? PyFloat_AS_DOUBLE(x) : PyFloat_AsDouble(x))
+
+
+#ifdef __GNUC__
+/* Test for GCC > 2.95 */
+#if __GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95))
+#define likely(x) __builtin_expect(!!(x), 1)
+#define unlikely(x) __builtin_expect(!!(x), 0)
+#else /* __GNUC__ > 2 ... */
+#define likely(x) (x)
+#define unlikely(x) (x)
+#endif /* __GNUC__ > 2 ... */
+#else /* __GNUC__ */
+#define likely(x) (x)
+#define unlikely(x) (x)
+#endif /* __GNUC__ */
+
+static PyObject *__pyx_m;
+static PyObject *__pyx_b;
+static PyObject *__pyx_empty_tuple;
+static PyObject *__pyx_empty_bytes;
+static int __pyx_lineno;
+static int __pyx_clineno = 0;
+static const char * __pyx_cfilenm= __FILE__;
+static const char *__pyx_filename;
+
+
+static const char *__pyx_f[] = {
+ "_btree_serializer_pyx.pyx",
+ "_static_tuple_c.pxd",
+};
+
+/* Type declarations */
+
+/* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":337
+ * # One slightly ugly option would be to cache block offsets in a global.
+ * # However, that leads to thread-safety issues, etc.
+ * ctypedef struct gc_chk_sha1_record: # <<<<<<<<<<<<<<
+ * long long block_offset
+ * unsigned int block_length
+ */
+
+typedef struct {
+ PY_LONG_LONG block_offset;
+ unsigned int block_length;
+ unsigned int record_start;
+ unsigned int record_end;
+ char sha1[20];
+} __pyx_t_6bzrlib_21_btree_serializer_pyx_gc_chk_sha1_record;
+
+/* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":123
+ *
+ *
+ * cdef class BTreeLeafParser: # <<<<<<<<<<<<<<
+ * """Parse the leaf nodes of a BTree index.
+ *
+ */
+
+struct __pyx_obj_6bzrlib_21_btree_serializer_pyx_BTreeLeafParser {
+ PyObject_HEAD
+ struct __pyx_vtabstruct_6bzrlib_21_btree_serializer_pyx_BTreeLeafParser *__pyx_vtab;
+ PyObject *bytes;
+ int key_length;
+ int ref_list_length;
+ PyObject *keys;
+ char *_cur_str;
+ char *_end_str;
+ char *_start;
+ int _header_found;
+};
+
+/* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":518
+ *
+ *
+ * cdef class GCCHKSHA1LeafNode: # <<<<<<<<<<<<<<
+ * """Track all the entries for a given leaf node."""
+ *
+ */
+
+struct __pyx_obj_6bzrlib_21_btree_serializer_pyx_GCCHKSHA1LeafNode {
+ PyObject_HEAD
+ struct __pyx_vtabstruct_6bzrlib_21_btree_serializer_pyx_GCCHKSHA1LeafNode *__pyx_vtab;
+ __pyx_t_6bzrlib_21_btree_serializer_pyx_gc_chk_sha1_record *records;
+ PyObject *last_key;
+ __pyx_t_6bzrlib_21_btree_serializer_pyx_gc_chk_sha1_record *last_record;
+ int num_records;
+ unsigned char common_shift;
+ unsigned char offsets[257];
+};
+
+
+struct __pyx_vtabstruct_6bzrlib_21_btree_serializer_pyx_GCCHKSHA1LeafNode {
+ StaticTuple *(*_record_to_value_and_refs)(struct __pyx_obj_6bzrlib_21_btree_serializer_pyx_GCCHKSHA1LeafNode *, __pyx_t_6bzrlib_21_btree_serializer_pyx_gc_chk_sha1_record *);
+ StaticTuple *(*_record_to_item)(struct __pyx_obj_6bzrlib_21_btree_serializer_pyx_GCCHKSHA1LeafNode *, __pyx_t_6bzrlib_21_btree_serializer_pyx_gc_chk_sha1_record *);
+ __pyx_t_6bzrlib_21_btree_serializer_pyx_gc_chk_sha1_record *(*_lookup_record)(struct __pyx_obj_6bzrlib_21_btree_serializer_pyx_GCCHKSHA1LeafNode *, char *);
+ int (*_count_records)(struct __pyx_obj_6bzrlib_21_btree_serializer_pyx_GCCHKSHA1LeafNode *, char *, char *);
+ PyObject *(*_parse_bytes)(struct __pyx_obj_6bzrlib_21_btree_serializer_pyx_GCCHKSHA1LeafNode *, PyObject *);
+ char *(*_parse_one_entry)(struct __pyx_obj_6bzrlib_21_btree_serializer_pyx_GCCHKSHA1LeafNode *, char *, char *, __pyx_t_6bzrlib_21_btree_serializer_pyx_gc_chk_sha1_record *);
+ int (*_offset_for_sha1)(struct __pyx_obj_6bzrlib_21_btree_serializer_pyx_GCCHKSHA1LeafNode *, char *);
+ PyObject *(*_compute_common)(struct __pyx_obj_6bzrlib_21_btree_serializer_pyx_GCCHKSHA1LeafNode *);
+};
+static struct __pyx_vtabstruct_6bzrlib_21_btree_serializer_pyx_GCCHKSHA1LeafNode *__pyx_vtabptr_6bzrlib_21_btree_serializer_pyx_GCCHKSHA1LeafNode;
+
+
+/* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":123
+ *
+ *
+ * cdef class BTreeLeafParser: # <<<<<<<<<<<<<<
+ * """Parse the leaf nodes of a BTree index.
+ *
+ */
+
+struct __pyx_vtabstruct_6bzrlib_21_btree_serializer_pyx_BTreeLeafParser {
+ PyObject *(*extract_key)(struct __pyx_obj_6bzrlib_21_btree_serializer_pyx_BTreeLeafParser *, char *);
+ int (*process_line)(struct __pyx_obj_6bzrlib_21_btree_serializer_pyx_BTreeLeafParser *);
+};
+static struct __pyx_vtabstruct_6bzrlib_21_btree_serializer_pyx_BTreeLeafParser *__pyx_vtabptr_6bzrlib_21_btree_serializer_pyx_BTreeLeafParser;
+
+#ifndef CYTHON_REFNANNY
+ #define CYTHON_REFNANNY 0
+#endif
+
+#if CYTHON_REFNANNY
+ typedef struct {
+ void (*INCREF)(void*, PyObject*, int);
+ void (*DECREF)(void*, PyObject*, int);
+ void (*GOTREF)(void*, PyObject*, int);
+ void (*GIVEREF)(void*, PyObject*, int);
+ void* (*SetupContext)(const char*, int, const char*);
+ void (*FinishContext)(void**);
+ } __Pyx_RefNannyAPIStruct;
+ static __Pyx_RefNannyAPIStruct *__Pyx_RefNanny = NULL;
+ static __Pyx_RefNannyAPIStruct * __Pyx_RefNannyImportAPI(const char *modname) {
+ PyObject *m = NULL, *p = NULL;
+ void *r = NULL;
+ m = PyImport_ImportModule((char *)modname);
+ if (!m) goto end;
+ p = PyObject_GetAttrString(m, (char *)"RefNannyAPI");
+ if (!p) goto end;
+ r = PyLong_AsVoidPtr(p);
+ end:
+ Py_XDECREF(p);
+ Py_XDECREF(m);
+ return (__Pyx_RefNannyAPIStruct *)r;
+ }
+ #define __Pyx_RefNannySetupContext(name) void *__pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__)
+ #define __Pyx_RefNannyFinishContext() __Pyx_RefNanny->FinishContext(&__pyx_refnanny)
+ #define __Pyx_INCREF(r) __Pyx_RefNanny->INCREF(__pyx_refnanny, (PyObject *)(r), __LINE__)
+ #define __Pyx_DECREF(r) __Pyx_RefNanny->DECREF(__pyx_refnanny, (PyObject *)(r), __LINE__)
+ #define __Pyx_GOTREF(r) __Pyx_RefNanny->GOTREF(__pyx_refnanny, (PyObject *)(r), __LINE__)
+ #define __Pyx_GIVEREF(r) __Pyx_RefNanny->GIVEREF(__pyx_refnanny, (PyObject *)(r), __LINE__)
+ #define __Pyx_XDECREF(r) do { if((r) != NULL) {__Pyx_DECREF(r);} } while(0)
+#else
+ #define __Pyx_RefNannySetupContext(name)
+ #define __Pyx_RefNannyFinishContext()
+ #define __Pyx_INCREF(r) Py_INCREF(r)
+ #define __Pyx_DECREF(r) Py_DECREF(r)
+ #define __Pyx_GOTREF(r)
+ #define __Pyx_GIVEREF(r)
+ #define __Pyx_XDECREF(r) Py_XDECREF(r)
+#endif /* CYTHON_REFNANNY */
+#define __Pyx_XGIVEREF(r) do { if((r) != NULL) {__Pyx_GIVEREF(r);} } while(0)
+#define __Pyx_XGOTREF(r) do { if((r) != NULL) {__Pyx_GOTREF(r);} } while(0)
+
+static PyObject *__Pyx_GetName(PyObject *dict, PyObject *name); /*proto*/
+
+static void __Pyx_RaiseDoubleKeywordsError(
+ const char* func_name, PyObject* kw_name); /*proto*/
+
+static void __Pyx_RaiseArgtupleInvalid(const char* func_name, int exact,
+ Py_ssize_t num_min, Py_ssize_t num_max, Py_ssize_t num_found); /*proto*/
+
+static int __Pyx_ParseOptionalKeywords(PyObject *kwds, PyObject **argnames[], PyObject *kwds2, PyObject *values[], Py_ssize_t num_pos_args, const char* function_name); /*proto*/
+
+static CYTHON_INLINE int __Pyx_TypeTest(PyObject *obj, PyTypeObject *type); /*proto*/
+
+static CYTHON_INLINE long __Pyx_div_long(long, long); /* proto */
+
+
+static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Generic(PyObject *o, PyObject* j) {
+ PyObject *r;
+ if (!j) return NULL;
+ r = PyObject_GetItem(o, j);
+ Py_DECREF(j);
+ return r;
+}
+
+
+#define __Pyx_GetItemInt_List(o, i, size, to_py_func) (((size) <= sizeof(Py_ssize_t)) ? \
+ __Pyx_GetItemInt_List_Fast(o, i) : \
+ __Pyx_GetItemInt_Generic(o, to_py_func(i)))
+
+static CYTHON_INLINE PyObject *__Pyx_GetItemInt_List_Fast(PyObject *o, Py_ssize_t i) {
+ if (likely(o != Py_None)) {
+ if (likely((0 <= i) & (i < PyList_GET_SIZE(o)))) {
+ PyObject *r = PyList_GET_ITEM(o, i);
+ Py_INCREF(r);
+ return r;
+ }
+ else if ((-PyList_GET_SIZE(o) <= i) & (i < 0)) {
+ PyObject *r = PyList_GET_ITEM(o, PyList_GET_SIZE(o) + i);
+ Py_INCREF(r);
+ return r;
+ }
+ }
+ return __Pyx_GetItemInt_Generic(o, PyInt_FromSsize_t(i));
+}
+
+#define __Pyx_GetItemInt_Tuple(o, i, size, to_py_func) (((size) <= sizeof(Py_ssize_t)) ? \
+ __Pyx_GetItemInt_Tuple_Fast(o, i) : \
+ __Pyx_GetItemInt_Generic(o, to_py_func(i)))
+
+static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Tuple_Fast(PyObject *o, Py_ssize_t i) {
+ if (likely(o != Py_None)) {
+ if (likely((0 <= i) & (i < PyTuple_GET_SIZE(o)))) {
+ PyObject *r = PyTuple_GET_ITEM(o, i);
+ Py_INCREF(r);
+ return r;
+ }
+ else if ((-PyTuple_GET_SIZE(o) <= i) & (i < 0)) {
+ PyObject *r = PyTuple_GET_ITEM(o, PyTuple_GET_SIZE(o) + i);
+ Py_INCREF(r);
+ return r;
+ }
+ }
+ return __Pyx_GetItemInt_Generic(o, PyInt_FromSsize_t(i));
+}
+
+
+#define __Pyx_GetItemInt(o, i, size, to_py_func) (((size) <= sizeof(Py_ssize_t)) ? \
+ __Pyx_GetItemInt_Fast(o, i) : \
+ __Pyx_GetItemInt_Generic(o, to_py_func(i)))
+
+static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Fast(PyObject *o, Py_ssize_t i) {
+ PyObject *r;
+ if (PyList_CheckExact(o) && ((0 <= i) & (i < PyList_GET_SIZE(o)))) {
+ r = PyList_GET_ITEM(o, i);
+ Py_INCREF(r);
+ }
+ else if (PyTuple_CheckExact(o) && ((0 <= i) & (i < PyTuple_GET_SIZE(o)))) {
+ r = PyTuple_GET_ITEM(o, i);
+ Py_INCREF(r);
+ }
+ else if (Py_TYPE(o)->tp_as_sequence && Py_TYPE(o)->tp_as_sequence->sq_item && (likely(i >= 0))) {
+ r = PySequence_GetItem(o, i);
+ }
+ else {
+ r = __Pyx_GetItemInt_Generic(o, PyInt_FromSsize_t(i));
+ }
+ return r;
+}
+
+static PyObject *__Pyx_Import(PyObject *name, PyObject *from_list); /*proto*/
+
+static CYTHON_INLINE void __Pyx_ErrRestore(PyObject *type, PyObject *value, PyObject *tb); /*proto*/
+static CYTHON_INLINE void __Pyx_ErrFetch(PyObject **type, PyObject **value, PyObject **tb); /*proto*/
+
+static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb); /*proto*/
+
+static CYTHON_INLINE unsigned char __Pyx_PyInt_AsUnsignedChar(PyObject *);
+
+static CYTHON_INLINE unsigned short __Pyx_PyInt_AsUnsignedShort(PyObject *);
+
+static CYTHON_INLINE unsigned int __Pyx_PyInt_AsUnsignedInt(PyObject *);
+
+static CYTHON_INLINE char __Pyx_PyInt_AsChar(PyObject *);
+
+static CYTHON_INLINE short __Pyx_PyInt_AsShort(PyObject *);
+
+static CYTHON_INLINE int __Pyx_PyInt_AsInt(PyObject *);
+
+static CYTHON_INLINE signed char __Pyx_PyInt_AsSignedChar(PyObject *);
+
+static CYTHON_INLINE signed short __Pyx_PyInt_AsSignedShort(PyObject *);
+
+static CYTHON_INLINE signed int __Pyx_PyInt_AsSignedInt(PyObject *);
+
+static CYTHON_INLINE int __Pyx_PyInt_AsLongDouble(PyObject *);
+
+static CYTHON_INLINE unsigned long __Pyx_PyInt_AsUnsignedLong(PyObject *);
+
+static CYTHON_INLINE unsigned PY_LONG_LONG __Pyx_PyInt_AsUnsignedLongLong(PyObject *);
+
+static CYTHON_INLINE long __Pyx_PyInt_AsLong(PyObject *);
+
+static CYTHON_INLINE PY_LONG_LONG __Pyx_PyInt_AsLongLong(PyObject *);
+
+static CYTHON_INLINE signed long __Pyx_PyInt_AsSignedLong(PyObject *);
+
+static CYTHON_INLINE signed PY_LONG_LONG __Pyx_PyInt_AsSignedLongLong(PyObject *);
+
+static void __Pyx_WriteUnraisable(const char *name); /*proto*/
+
+static int __Pyx_SetVtable(PyObject *dict, void *vtable); /*proto*/
+
+static PyTypeObject *__Pyx_ImportType(const char *module_name, const char *class_name, long size, int strict); /*proto*/
+
+static PyObject *__Pyx_ImportModule(const char *name); /*proto*/
+
+static void __Pyx_AddTraceback(const char *funcname); /*proto*/
+
+static int __Pyx_InitStrings(__Pyx_StringTabEntry *t); /*proto*/
+/* Module declarations from bzrlib._static_tuple_c */
+
+/* Module declarations from bzrlib._static_tuple_c */
+
+static PyTypeObject *__pyx_ptype_6bzrlib_15_static_tuple_c_StaticTuple = 0;
+/* Module declarations from bzrlib._btree_serializer_pyx */
+
+static PyTypeObject *__pyx_ptype_6bzrlib_21_btree_serializer_pyx_BTreeLeafParser = 0;
+static PyTypeObject *__pyx_ptype_6bzrlib_21_btree_serializer_pyx_GCCHKSHA1LeafNode = 0;
+static int __pyx_v_6bzrlib_21_btree_serializer_pyx__unhexbuf[256];
+static char *__pyx_v_6bzrlib_21_btree_serializer_pyx__hexbuf;
+static void *__pyx_f_6bzrlib_21_btree_serializer_pyx__my_memrchr(void *, int, size_t); /*proto*/
+static PyObject *__pyx_f_6bzrlib_21_btree_serializer_pyx_safe_string_from_size(char *, Py_ssize_t); /*proto*/
+static PyObject *__pyx_f_6bzrlib_21_btree_serializer_pyx_safe_interned_string_from_size(char *, Py_ssize_t); /*proto*/
+static PyObject *__pyx_f_6bzrlib_21_btree_serializer_pyx__populate_unhexbuf(void); /*proto*/
+static int __pyx_f_6bzrlib_21_btree_serializer_pyx__unhexlify_sha1(char *, char *); /*proto*/
+static void __pyx_f_6bzrlib_21_btree_serializer_pyx__hexlify_sha1(char *, char *); /*proto*/
+static int __pyx_f_6bzrlib_21_btree_serializer_pyx__key_to_sha1(PyObject *, char *); /*proto*/
+static StaticTuple *__pyx_f_6bzrlib_21_btree_serializer_pyx__sha1_to_key(char *); /*proto*/
+static unsigned int __pyx_f_6bzrlib_21_btree_serializer_pyx__sha1_to_uint(char *); /*proto*/
+static PyObject *__pyx_f_6bzrlib_21_btree_serializer_pyx__format_record(__pyx_t_6bzrlib_21_btree_serializer_pyx_gc_chk_sha1_record *); /*proto*/
+#define __Pyx_MODULE_NAME "bzrlib._btree_serializer_pyx"
+int __pyx_module_is_main_bzrlib___btree_serializer_pyx = 0;
+
+/* Implementation of bzrlib._btree_serializer_pyx */
+static PyObject *__pyx_builtin_AssertionError;
+static PyObject *__pyx_builtin_ValueError;
+static PyObject *__pyx_builtin_KeyError;
+static PyObject *__pyx_builtin_TypeError;
+static char __pyx_k_1[] = "tried to create a string with an invalid size: %d @0x%x";
+static char __pyx_k_2[] = "invalid key, wanted segment from ";
+static char __pyx_k_3[] = "sha1:";
+static char __pyx_k_4[] = "last < self._start";
+static char __pyx_k_5[] = "type=leaf";
+static char __pyx_k_6[] = "Node did not start with \"type=leaf\": %r";
+static char __pyx_k_7[] = "Failed to find the value area";
+static char __pyx_k_8[] = " 0 0";
+static char __pyx_k_9[] = "invalid key, loop_counter != self.ref_list_length";
+static char __pyx_k_10[] = "unexpected reference data present";
+static char __pyx_k_11[] = "self.bytes is not a string.";
+static char __pyx_k_12[] = "not a 40-byte hex digest";
+static char __pyx_k_13[] = "not a 20-byte binary digest";
+static char __pyx_k_14[] = "sha1_bin must be a str of exactly 20 bytes";
+static char __pyx_k_15[] = "%s %u %u %u";
+static char __pyx_k_16[] = "%lu %u %u %u";
+static char __pyx_k_17[] = "_record_to_value_and_refs";
+static char __pyx_k_18[] = "key %r is not present";
+static char __pyx_k_19[] = "We only support parsing plain 8-bit strings.";
+static char __pyx_k_20[] = "type=leaf\n";
+static char __pyx_k_21[] = "bytes did not start with 'type=leaf\\n': %r";
+static char __pyx_k_22[] = "Something went wrong while parsing.";
+static char __pyx_k_23[] = "line did not start with sha1: %r";
+static char __pyx_k_24[] = "Line did not contain 40 hex bytes";
+static char __pyx_k_25[] = "We failed to unhexlify";
+static char __pyx_k_26[] = "only 1 null, not 2 as expected";
+static char __pyx_k_27[] = "Failed to parse block offset";
+static char __pyx_k_28[] = "Failed to parse block length";
+static char __pyx_k_29[] = "Failed to parse record end";
+static char __pyx_k_30[] = "We expected a tuple() or StaticTuple() for node not: %s";
+static char __pyx_k_31[] = "With ref_lists, we expected 4 entries not: %s";
+static char __pyx_k_32[] = "Without ref_lists, we need at least 3 entries not: %s";
+static char __pyx_k_33[] = "\000";
+static char __pyx_k_34[] = "We expect references to be tuples not: %s";
+static char __pyx_k_35[] = "We expect reference bits to be strings not: %s";
+static char __pyx_k_36[] = "Expected a plain str for value not: %s";
+static char __pyx_k_37[] = "Pyrex extensions to btree node parsing.";
+static char __pyx_k_38[] = "_py_unhexlify (line 386)";
+static char __pyx_k_39[] = "_py_hexlify (line 409)";
+static char __pyx_k_40[] = "_py_key_to_sha1 (line 446)";
+static char __pyx_k_41[] = "_py_sha1_to_key (line 482)";
+static char __pyx_k_42[] = "_parse_into_chk (line 838)";
+static char __pyx_k_43[] = "_flatten_node (line 845)";
+static char __pyx_k__sys[] = "sys";
+static char __pyx_k__join[] = "join";
+static char __pyx_k__keys[] = "keys";
+static char __pyx_k__node[] = "node";
+static char __pyx_k__sha1[] = "sha1";
+static char __pyx_k__bytes[] = "bytes";
+static char __pyx_k__parse[] = "parse";
+static char __pyx_k___start[] = "_start";
+static char __pyx_k__offsets[] = "offsets";
+static char __pyx_k__records[] = "records";
+static char __pyx_k__KeyError[] = "KeyError";
+static char __pyx_k____main__[] = "__main__";
+static char __pyx_k____test__[] = "__test__";
+static char __pyx_k___cur_str[] = "_cur_str";
+static char __pyx_k___end_str[] = "_end_str";
+static char __pyx_k__last_key[] = "last_key";
+static char __pyx_k__TypeError[] = "TypeError";
+static char __pyx_k__ValueError[] = "ValueError";
+static char __pyx_k__key_length[] = "key_length";
+static char __pyx_k__record_end[] = "record_end";
+static char __pyx_k___py_hexlify[] = "_py_hexlify";
+static char __pyx_k__extract_key[] = "extract_key";
+static char __pyx_k__last_record[] = "last_record";
+static char __pyx_k__num_records[] = "num_records";
+static char __pyx_k___parse_bytes[] = "_parse_bytes";
+static char __pyx_k__block_length[] = "block_length";
+static char __pyx_k__block_offset[] = "block_offset";
+static char __pyx_k__common_shift[] = "common_shift";
+static char __pyx_k__process_line[] = "process_line";
+static char __pyx_k__record_start[] = "record_start";
+static char __pyx_k___flatten_node[] = "_flatten_node";
+static char __pyx_k___header_found[] = "_header_found";
+static char __pyx_k___py_unhexlify[] = "_py_unhexlify";
+static char __pyx_k__AssertionError[] = "AssertionError";
+static char __pyx_k___count_records[] = "_count_records";
+static char __pyx_k___lookup_record[] = "_lookup_record";
+static char __pyx_k___compute_common[] = "_compute_common";
+static char __pyx_k___parse_into_chk[] = "_parse_into_chk";
+static char __pyx_k___py_key_to_sha1[] = "_py_key_to_sha1";
+static char __pyx_k___py_sha1_to_key[] = "_py_sha1_to_key";
+static char __pyx_k___record_to_item[] = "_record_to_item";
+static char __pyx_k__ref_list_length[] = "ref_list_length";
+static char __pyx_k__reference_lists[] = "reference_lists";
+static char __pyx_k__0123456789abcdef[] = "0123456789abcdef";
+static char __pyx_k___offset_for_sha1[] = "_offset_for_sha1";
+static char __pyx_k___parse_one_entry[] = "_parse_one_entry";
+static PyObject *__pyx_kp_s_1;
+static PyObject *__pyx_kp_s_10;
+static PyObject *__pyx_kp_s_11;
+static PyObject *__pyx_kp_s_12;
+static PyObject *__pyx_kp_s_13;
+static PyObject *__pyx_kp_s_14;
+static PyObject *__pyx_n_s_17;
+static PyObject *__pyx_kp_s_18;
+static PyObject *__pyx_kp_s_19;
+static PyObject *__pyx_kp_s_2;
+static PyObject *__pyx_kp_s_21;
+static PyObject *__pyx_kp_s_22;
+static PyObject *__pyx_kp_s_23;
+static PyObject *__pyx_kp_s_24;
+static PyObject *__pyx_kp_s_25;
+static PyObject *__pyx_kp_s_26;
+static PyObject *__pyx_kp_s_27;
+static PyObject *__pyx_kp_s_28;
+static PyObject *__pyx_kp_s_29;
+static PyObject *__pyx_kp_s_30;
+static PyObject *__pyx_kp_s_31;
+static PyObject *__pyx_kp_s_32;
+static PyObject *__pyx_kp_s_33;
+static PyObject *__pyx_kp_s_34;
+static PyObject *__pyx_kp_s_35;
+static PyObject *__pyx_kp_s_36;
+static PyObject *__pyx_kp_u_38;
+static PyObject *__pyx_kp_u_39;
+static PyObject *__pyx_kp_s_4;
+static PyObject *__pyx_kp_u_40;
+static PyObject *__pyx_kp_u_41;
+static PyObject *__pyx_kp_u_42;
+static PyObject *__pyx_kp_u_43;
+static PyObject *__pyx_kp_s_6;
+static PyObject *__pyx_kp_s_7;
+static PyObject *__pyx_kp_s_9;
+static PyObject *__pyx_n_s__AssertionError;
+static PyObject *__pyx_n_s__KeyError;
+static PyObject *__pyx_n_s__TypeError;
+static PyObject *__pyx_n_s__ValueError;
+static PyObject *__pyx_n_s____main__;
+static PyObject *__pyx_n_s____test__;
+static PyObject *__pyx_n_s___compute_common;
+static PyObject *__pyx_n_s___count_records;
+static PyObject *__pyx_n_s___cur_str;
+static PyObject *__pyx_n_s___end_str;
+static PyObject *__pyx_n_s___flatten_node;
+static PyObject *__pyx_n_s___header_found;
+static PyObject *__pyx_n_s___lookup_record;
+static PyObject *__pyx_n_s___offset_for_sha1;
+static PyObject *__pyx_n_s___parse_bytes;
+static PyObject *__pyx_n_s___parse_into_chk;
+static PyObject *__pyx_n_s___parse_one_entry;
+static PyObject *__pyx_n_s___py_hexlify;
+static PyObject *__pyx_n_s___py_key_to_sha1;
+static PyObject *__pyx_n_s___py_sha1_to_key;
+static PyObject *__pyx_n_s___py_unhexlify;
+static PyObject *__pyx_n_s___record_to_item;
+static PyObject *__pyx_n_s___start;
+static PyObject *__pyx_n_s__block_length;
+static PyObject *__pyx_n_s__block_offset;
+static PyObject *__pyx_n_s__bytes;
+static PyObject *__pyx_n_s__common_shift;
+static PyObject *__pyx_n_s__extract_key;
+static PyObject *__pyx_n_s__join;
+static PyObject *__pyx_n_s__key_length;
+static PyObject *__pyx_n_s__keys;
+static PyObject *__pyx_n_s__last_key;
+static PyObject *__pyx_n_s__last_record;
+static PyObject *__pyx_n_s__node;
+static PyObject *__pyx_n_s__num_records;
+static PyObject *__pyx_n_s__offsets;
+static PyObject *__pyx_n_s__parse;
+static PyObject *__pyx_n_s__process_line;
+static PyObject *__pyx_n_s__record_end;
+static PyObject *__pyx_n_s__record_start;
+static PyObject *__pyx_n_s__records;
+static PyObject *__pyx_n_s__ref_list_length;
+static PyObject *__pyx_n_s__reference_lists;
+static PyObject *__pyx_n_s__sha1;
+static PyObject *__pyx_n_s__sys;
+static PyObject *__pyx_int_0;
+static PyObject *__pyx_int_1;
+
+/* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":81
+ *
+ * # TODO: Find some way to import this from _dirstate_helpers
+ * cdef void* _my_memrchr(void *s, int c, size_t n): # cannot_raise # <<<<<<<<<<<<<<
+ * # memrchr seems to be a GNU extension, so we have to implement it ourselves
+ * # It is not present in any win32 standard library
+ */
+
+static void *__pyx_f_6bzrlib_21_btree_serializer_pyx__my_memrchr(void *__pyx_v_s, int __pyx_v_c, size_t __pyx_v_n) {
+ char *__pyx_v_pos;
+ char *__pyx_v_start;
+ void *__pyx_r;
+ int __pyx_t_1;
+ __Pyx_RefNannySetupContext("_my_memrchr");
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":87
+ * cdef char *start
+ *
+ * start = <char*>s # <<<<<<<<<<<<<<
+ * pos = start + n - 1
+ * while pos >= start:
+ */
+ __pyx_v_start = ((char *)__pyx_v_s);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":88
+ *
+ * start = <char*>s
+ * pos = start + n - 1 # <<<<<<<<<<<<<<
+ * while pos >= start:
+ * if pos[0] == c:
+ */
+ __pyx_v_pos = ((__pyx_v_start + __pyx_v_n) - 1);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":89
+ * start = <char*>s
+ * pos = start + n - 1
+ * while pos >= start: # <<<<<<<<<<<<<<
+ * if pos[0] == c:
+ * return <void*>pos
+ */
+ while (1) {
+ __pyx_t_1 = (__pyx_v_pos >= __pyx_v_start);
+ if (!__pyx_t_1) break;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":90
+ * pos = start + n - 1
+ * while pos >= start:
+ * if pos[0] == c: # <<<<<<<<<<<<<<
+ * return <void*>pos
+ * pos = pos - 1
+ */
+ __pyx_t_1 = ((__pyx_v_pos[0]) == __pyx_v_c);
+ if (__pyx_t_1) {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":91
+ * while pos >= start:
+ * if pos[0] == c:
+ * return <void*>pos # <<<<<<<<<<<<<<
+ * pos = pos - 1
+ * return NULL
+ */
+ __pyx_r = ((void *)__pyx_v_pos);
+ goto __pyx_L0;
+ goto __pyx_L5;
+ }
+ __pyx_L5:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":92
+ * if pos[0] == c:
+ * return <void*>pos
+ * pos = pos - 1 # <<<<<<<<<<<<<<
+ * return NULL
+ *
+ */
+ __pyx_v_pos = (__pyx_v_pos - 1);
+ }
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":93
+ * return <void*>pos
+ * pos = pos - 1
+ * return NULL # <<<<<<<<<<<<<<
+ *
+ *
+ */
+ __pyx_r = NULL;
+ goto __pyx_L0;
+
+ __pyx_r = 0;
+ __pyx_L0:;
+ __Pyx_RefNannyFinishContext();
+ return __pyx_r;
+}
+
+/* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":97
+ *
+ * # TODO: Import this from _dirstate_helpers when it is merged
+ * cdef object safe_string_from_size(char *s, Py_ssize_t size): # <<<<<<<<<<<<<<
+ * if size < 0:
+ * raise AssertionError(
+ */
+
+static PyObject *__pyx_f_6bzrlib_21_btree_serializer_pyx_safe_string_from_size(char *__pyx_v_s, Py_ssize_t __pyx_v_size) {
+ PyObject *__pyx_r = NULL;
+ int __pyx_t_1;
+ PyObject *__pyx_t_2 = NULL;
+ PyObject *__pyx_t_3 = NULL;
+ PyObject *__pyx_t_4 = NULL;
+ __Pyx_RefNannySetupContext("safe_string_from_size");
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":98
+ * # TODO: Import this from _dirstate_helpers when it is merged
+ * cdef object safe_string_from_size(char *s, Py_ssize_t size):
+ * if size < 0: # <<<<<<<<<<<<<<
+ * raise AssertionError(
+ * 'tried to create a string with an invalid size: %d @0x%x'
+ */
+ __pyx_t_1 = (__pyx_v_size < 0);
+ if (__pyx_t_1) {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":101
+ * raise AssertionError(
+ * 'tried to create a string with an invalid size: %d @0x%x'
+ * % (size, <int>s)) # <<<<<<<<<<<<<<
+ * return PyString_FromStringAndSize(s, size)
+ *
+ */
+ __pyx_t_2 = PyInt_FromSsize_t(__pyx_v_size); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 101; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_2);
+ __pyx_t_3 = PyInt_FromLong(((int)__pyx_v_s)); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 101; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_3);
+ __pyx_t_4 = PyTuple_New(2); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 101; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_4);
+ PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_2);
+ __Pyx_GIVEREF(__pyx_t_2);
+ PyTuple_SET_ITEM(__pyx_t_4, 1, __pyx_t_3);
+ __Pyx_GIVEREF(__pyx_t_3);
+ __pyx_t_2 = 0;
+ __pyx_t_3 = 0;
+ __pyx_t_3 = PyNumber_Remainder(((PyObject *)__pyx_kp_s_1), __pyx_t_4); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 101; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(((PyObject *)__pyx_t_3));
+ __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
+ __pyx_t_4 = PyTuple_New(1); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 99; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_4);
+ PyTuple_SET_ITEM(__pyx_t_4, 0, ((PyObject *)__pyx_t_3));
+ __Pyx_GIVEREF(((PyObject *)__pyx_t_3));
+ __pyx_t_3 = 0;
+ __pyx_t_3 = PyObject_Call(__pyx_builtin_AssertionError, __pyx_t_4, NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 99; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_3);
+ __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
+ __Pyx_Raise(__pyx_t_3, 0, 0);
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 99; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ goto __pyx_L3;
+ }
+ __pyx_L3:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":102
+ * 'tried to create a string with an invalid size: %d @0x%x'
+ * % (size, <int>s))
+ * return PyString_FromStringAndSize(s, size) # <<<<<<<<<<<<<<
+ *
+ *
+ */
+ __Pyx_XDECREF(__pyx_r);
+ __pyx_t_3 = PyString_FromStringAndSize(__pyx_v_s, __pyx_v_size); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 102; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_3);
+ __pyx_r = __pyx_t_3;
+ __pyx_t_3 = 0;
+ goto __pyx_L0;
+
+ __pyx_r = Py_None; __Pyx_INCREF(Py_None);
+ goto __pyx_L0;
+ __pyx_L1_error:;
+ __Pyx_XDECREF(__pyx_t_2);
+ __Pyx_XDECREF(__pyx_t_3);
+ __Pyx_XDECREF(__pyx_t_4);
+ __Pyx_AddTraceback("bzrlib._btree_serializer_pyx.safe_string_from_size");
+ __pyx_r = 0;
+ __pyx_L0:;
+ __Pyx_XGIVEREF(__pyx_r);
+ __Pyx_RefNannyFinishContext();
+ return __pyx_r;
+}
+
+/* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":105
+ *
+ *
+ * cdef object safe_interned_string_from_size(char *s, Py_ssize_t size): # <<<<<<<<<<<<<<
+ * cdef PyObject *py_str
+ * if size < 0:
+ */
+
+static PyObject *__pyx_f_6bzrlib_21_btree_serializer_pyx_safe_interned_string_from_size(char *__pyx_v_s, Py_ssize_t __pyx_v_size) {
+ PyObject *__pyx_v_py_str;
+ PyObject *__pyx_v_result;
+ PyObject *__pyx_r = NULL;
+ int __pyx_t_1;
+ PyObject *__pyx_t_2 = NULL;
+ PyObject *__pyx_t_3 = NULL;
+ PyObject *__pyx_t_4 = NULL;
+ __Pyx_RefNannySetupContext("safe_interned_string_from_size");
+ __pyx_v_result = Py_None; __Pyx_INCREF(Py_None);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":107
+ * cdef object safe_interned_string_from_size(char *s, Py_ssize_t size):
+ * cdef PyObject *py_str
+ * if size < 0: # <<<<<<<<<<<<<<
+ * raise AssertionError(
+ * 'tried to create a string with an invalid size: %d @0x%x'
+ */
+ __pyx_t_1 = (__pyx_v_size < 0);
+ if (__pyx_t_1) {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":110
+ * raise AssertionError(
+ * 'tried to create a string with an invalid size: %d @0x%x'
+ * % (size, <int>s)) # <<<<<<<<<<<<<<
+ * py_str = PyString_FromStringAndSize_ptr(s, size)
+ * PyString_InternInPlace(&py_str)
+ */
+ __pyx_t_2 = PyInt_FromSsize_t(__pyx_v_size); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 110; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_2);
+ __pyx_t_3 = PyInt_FromLong(((int)__pyx_v_s)); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 110; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_3);
+ __pyx_t_4 = PyTuple_New(2); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 110; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_4);
+ PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_2);
+ __Pyx_GIVEREF(__pyx_t_2);
+ PyTuple_SET_ITEM(__pyx_t_4, 1, __pyx_t_3);
+ __Pyx_GIVEREF(__pyx_t_3);
+ __pyx_t_2 = 0;
+ __pyx_t_3 = 0;
+ __pyx_t_3 = PyNumber_Remainder(((PyObject *)__pyx_kp_s_1), __pyx_t_4); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 110; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(((PyObject *)__pyx_t_3));
+ __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
+ __pyx_t_4 = PyTuple_New(1); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 108; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_4);
+ PyTuple_SET_ITEM(__pyx_t_4, 0, ((PyObject *)__pyx_t_3));
+ __Pyx_GIVEREF(((PyObject *)__pyx_t_3));
+ __pyx_t_3 = 0;
+ __pyx_t_3 = PyObject_Call(__pyx_builtin_AssertionError, __pyx_t_4, NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 108; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_3);
+ __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
+ __Pyx_Raise(__pyx_t_3, 0, 0);
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 108; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ goto __pyx_L3;
+ }
+ __pyx_L3:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":111
+ * 'tried to create a string with an invalid size: %d @0x%x'
+ * % (size, <int>s))
+ * py_str = PyString_FromStringAndSize_ptr(s, size) # <<<<<<<<<<<<<<
+ * PyString_InternInPlace(&py_str)
+ * result = <object>py_str
+ */
+ __pyx_v_py_str = PyString_FromStringAndSize(__pyx_v_s, __pyx_v_size);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":112
+ * % (size, <int>s))
+ * py_str = PyString_FromStringAndSize_ptr(s, size)
+ * PyString_InternInPlace(&py_str) # <<<<<<<<<<<<<<
+ * result = <object>py_str
+ * # Casting a PyObject* to an <object> triggers an INCREF from Pyrex, so we
+ */
+ PyString_InternInPlace((&__pyx_v_py_str));
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":113
+ * py_str = PyString_FromStringAndSize_ptr(s, size)
+ * PyString_InternInPlace(&py_str)
+ * result = <object>py_str # <<<<<<<<<<<<<<
+ * # Casting a PyObject* to an <object> triggers an INCREF from Pyrex, so we
+ * # DECREF it to avoid geting immortal strings
+ */
+ __Pyx_INCREF(((PyObject *)__pyx_v_py_str));
+ __Pyx_DECREF(__pyx_v_result);
+ __pyx_v_result = ((PyObject *)__pyx_v_py_str);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":116
+ * # Casting a PyObject* to an <object> triggers an INCREF from Pyrex, so we
+ * # DECREF it to avoid geting immortal strings
+ * Py_DECREF_ptr(py_str) # <<<<<<<<<<<<<<
+ * return result
+ *
+ */
+ Py_DECREF(__pyx_v_py_str);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":117
+ * # DECREF it to avoid geting immortal strings
+ * Py_DECREF_ptr(py_str)
+ * return result # <<<<<<<<<<<<<<
+ *
+ * # This sets up the StaticTuple C_API functionality
+ */
+ __Pyx_XDECREF(__pyx_r);
+ __Pyx_INCREF(__pyx_v_result);
+ __pyx_r = __pyx_v_result;
+ goto __pyx_L0;
+
+ __pyx_r = Py_None; __Pyx_INCREF(Py_None);
+ goto __pyx_L0;
+ __pyx_L1_error:;
+ __Pyx_XDECREF(__pyx_t_2);
+ __Pyx_XDECREF(__pyx_t_3);
+ __Pyx_XDECREF(__pyx_t_4);
+ __Pyx_AddTraceback("bzrlib._btree_serializer_pyx.safe_interned_string_from_size");
+ __pyx_r = 0;
+ __pyx_L0:;
+ __Pyx_DECREF(__pyx_v_result);
+ __Pyx_XGIVEREF(__pyx_r);
+ __Pyx_RefNannyFinishContext();
+ return __pyx_r;
+}
+
+/* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":153
+ * cdef int _header_found
+ *
+ * def __init__(self, bytes, key_length, ref_list_length): # <<<<<<<<<<<<<<
+ * self.bytes = bytes
+ * self.key_length = key_length
+ */
+
+static int __pyx_pf_6bzrlib_21_btree_serializer_pyx_15BTreeLeafParser___init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
+static int __pyx_pf_6bzrlib_21_btree_serializer_pyx_15BTreeLeafParser___init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
+ PyObject *__pyx_v_bytes = 0;
+ PyObject *__pyx_v_key_length = 0;
+ PyObject *__pyx_v_ref_list_length = 0;
+ int __pyx_r;
+ int __pyx_t_1;
+ PyObject *__pyx_t_2 = NULL;
+ static PyObject **__pyx_pyargnames[] = {&__pyx_n_s__bytes,&__pyx_n_s__key_length,&__pyx_n_s__ref_list_length,0};
+ __Pyx_RefNannySetupContext("__init__");
+ if (unlikely(__pyx_kwds)) {
+ Py_ssize_t kw_args = PyDict_Size(__pyx_kwds);
+ PyObject* values[3] = {0,0,0};
+ switch (PyTuple_GET_SIZE(__pyx_args)) {
+ case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2);
+ case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
+ case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
+ case 0: break;
+ default: goto __pyx_L5_argtuple_error;
+ }
+ switch (PyTuple_GET_SIZE(__pyx_args)) {
+ case 0:
+ values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__bytes);
+ if (likely(values[0])) kw_args--;
+ else goto __pyx_L5_argtuple_error;
+ case 1:
+ values[1] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__key_length);
+ if (likely(values[1])) kw_args--;
+ else {
+ __Pyx_RaiseArgtupleInvalid("__init__", 1, 3, 3, 1); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 153; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
+ }
+ case 2:
+ values[2] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__ref_list_length);
+ if (likely(values[2])) kw_args--;
+ else {
+ __Pyx_RaiseArgtupleInvalid("__init__", 1, 3, 3, 2); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 153; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
+ }
+ }
+ if (unlikely(kw_args > 0)) {
+ if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, PyTuple_GET_SIZE(__pyx_args), "__init__") < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 153; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
+ }
+ __pyx_v_bytes = values[0];
+ __pyx_v_key_length = values[1];
+ __pyx_v_ref_list_length = values[2];
+ } else if (PyTuple_GET_SIZE(__pyx_args) != 3) {
+ goto __pyx_L5_argtuple_error;
+ } else {
+ __pyx_v_bytes = PyTuple_GET_ITEM(__pyx_args, 0);
+ __pyx_v_key_length = PyTuple_GET_ITEM(__pyx_args, 1);
+ __pyx_v_ref_list_length = PyTuple_GET_ITEM(__pyx_args, 2);
+ }
+ goto __pyx_L4_argument_unpacking_done;
+ __pyx_L5_argtuple_error:;
+ __Pyx_RaiseArgtupleInvalid("__init__", 1, 3, 3, PyTuple_GET_SIZE(__pyx_args)); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 153; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
+ __pyx_L3_error:;
+ __Pyx_AddTraceback("bzrlib._btree_serializer_pyx.BTreeLeafParser.__init__");
+ __Pyx_RefNannyFinishContext();
+ return -1;
+ __pyx_L4_argument_unpacking_done:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":154
+ *
+ * def __init__(self, bytes, key_length, ref_list_length):
+ * self.bytes = bytes # <<<<<<<<<<<<<<
+ * self.key_length = key_length
+ * self.ref_list_length = ref_list_length
+ */
+ __Pyx_INCREF(__pyx_v_bytes);
+ __Pyx_GIVEREF(__pyx_v_bytes);
+ __Pyx_GOTREF(((struct __pyx_obj_6bzrlib_21_btree_serializer_pyx_BTreeLeafParser *)__pyx_v_self)->bytes);
+ __Pyx_DECREF(((struct __pyx_obj_6bzrlib_21_btree_serializer_pyx_BTreeLeafParser *)__pyx_v_self)->bytes);
+ ((struct __pyx_obj_6bzrlib_21_btree_serializer_pyx_BTreeLeafParser *)__pyx_v_self)->bytes = __pyx_v_bytes;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":155
+ * def __init__(self, bytes, key_length, ref_list_length):
+ * self.bytes = bytes
+ * self.key_length = key_length # <<<<<<<<<<<<<<
+ * self.ref_list_length = ref_list_length
+ * self.keys = []
+ */
+ __pyx_t_1 = __Pyx_PyInt_AsInt(__pyx_v_key_length); if (unlikely((__pyx_t_1 == (int)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 155; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ ((struct __pyx_obj_6bzrlib_21_btree_serializer_pyx_BTreeLeafParser *)__pyx_v_self)->key_length = __pyx_t_1;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":156
+ * self.bytes = bytes
+ * self.key_length = key_length
+ * self.ref_list_length = ref_list_length # <<<<<<<<<<<<<<
+ * self.keys = []
+ * self._cur_str = NULL
+ */
+ __pyx_t_1 = __Pyx_PyInt_AsInt(__pyx_v_ref_list_length); if (unlikely((__pyx_t_1 == (int)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 156; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ ((struct __pyx_obj_6bzrlib_21_btree_serializer_pyx_BTreeLeafParser *)__pyx_v_self)->ref_list_length = __pyx_t_1;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":157
+ * self.key_length = key_length
+ * self.ref_list_length = ref_list_length
+ * self.keys = [] # <<<<<<<<<<<<<<
+ * self._cur_str = NULL
+ * self._end_str = NULL
+ */
+ __pyx_t_2 = PyList_New(0); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 157; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(((PyObject *)__pyx_t_2));
+ __Pyx_GIVEREF(((PyObject *)__pyx_t_2));
+ __Pyx_GOTREF(((struct __pyx_obj_6bzrlib_21_btree_serializer_pyx_BTreeLeafParser *)__pyx_v_self)->keys);
+ __Pyx_DECREF(((struct __pyx_obj_6bzrlib_21_btree_serializer_pyx_BTreeLeafParser *)__pyx_v_self)->keys);
+ ((struct __pyx_obj_6bzrlib_21_btree_serializer_pyx_BTreeLeafParser *)__pyx_v_self)->keys = ((PyObject *)__pyx_t_2);
+ __pyx_t_2 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":158
+ * self.ref_list_length = ref_list_length
+ * self.keys = []
+ * self._cur_str = NULL # <<<<<<<<<<<<<<
+ * self._end_str = NULL
+ * self._header_found = 0
+ */
+ ((struct __pyx_obj_6bzrlib_21_btree_serializer_pyx_BTreeLeafParser *)__pyx_v_self)->_cur_str = NULL;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":159
+ * self.keys = []
+ * self._cur_str = NULL
+ * self._end_str = NULL # <<<<<<<<<<<<<<
+ * self._header_found = 0
+ * # keys are tuples
+ */
+ ((struct __pyx_obj_6bzrlib_21_btree_serializer_pyx_BTreeLeafParser *)__pyx_v_self)->_end_str = NULL;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":160
+ * self._cur_str = NULL
+ * self._end_str = NULL
+ * self._header_found = 0 # <<<<<<<<<<<<<<
+ * # keys are tuples
+ *
+ */
+ ((struct __pyx_obj_6bzrlib_21_btree_serializer_pyx_BTreeLeafParser *)__pyx_v_self)->_header_found = 0;
+
+ __pyx_r = 0;
+ goto __pyx_L0;
+ __pyx_L1_error:;
+ __Pyx_XDECREF(__pyx_t_2);
+ __Pyx_AddTraceback("bzrlib._btree_serializer_pyx.BTreeLeafParser.__init__");
+ __pyx_r = -1;
+ __pyx_L0:;
+ __Pyx_RefNannyFinishContext();
+ return __pyx_r;
+}
+
+/* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":163
+ * # keys are tuples
+ *
+ * cdef extract_key(self, char * last): # <<<<<<<<<<<<<<
+ * """Extract a key.
+ *
+ */
+
+static PyObject *__pyx_f_6bzrlib_21_btree_serializer_pyx_15BTreeLeafParser_extract_key(struct __pyx_obj_6bzrlib_21_btree_serializer_pyx_BTreeLeafParser *__pyx_v_self, char *__pyx_v_last) {
+ char *__pyx_v_temp_ptr;
+ int __pyx_v_loop_counter;
+ StaticTuple *__pyx_v_key;
+ PyObject *__pyx_v_failure_string;
+ PyObject *__pyx_v_key_element;
+ PyObject *__pyx_r = NULL;
+ PyObject *__pyx_t_1 = NULL;
+ int __pyx_t_2;
+ int __pyx_t_3;
+ PyObject *__pyx_t_4 = NULL;
+ int __pyx_t_5;
+ int __pyx_t_6;
+ int __pyx_t_7;
+ __Pyx_RefNannySetupContext("extract_key");
+ __pyx_v_key = ((StaticTuple *)Py_None); __Pyx_INCREF(Py_None);
+ __pyx_v_failure_string = Py_None; __Pyx_INCREF(Py_None);
+ __pyx_v_key_element = Py_None; __Pyx_INCREF(Py_None);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":173
+ * cdef StaticTuple key
+ *
+ * key = StaticTuple_New(self.key_length) # <<<<<<<<<<<<<<
+ * for loop_counter from 0 <= loop_counter < self.key_length:
+ * # grab a key segment
+ */
+ __pyx_t_1 = ((PyObject *)StaticTuple_New(__pyx_v_self->key_length)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 173; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_1);
+ __Pyx_DECREF(((PyObject *)__pyx_v_key));
+ __pyx_v_key = ((StaticTuple *)__pyx_t_1);
+ __pyx_t_1 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":174
+ *
+ * key = StaticTuple_New(self.key_length)
+ * for loop_counter from 0 <= loop_counter < self.key_length: # <<<<<<<<<<<<<<
+ * # grab a key segment
+ * temp_ptr = <char*>memchr(self._start, c'\0', last - self._start)
+ */
+ __pyx_t_2 = __pyx_v_self->key_length;
+ for (__pyx_v_loop_counter = 0; __pyx_v_loop_counter < __pyx_t_2; __pyx_v_loop_counter++) {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":176
+ * for loop_counter from 0 <= loop_counter < self.key_length:
+ * # grab a key segment
+ * temp_ptr = <char*>memchr(self._start, c'\0', last - self._start) # <<<<<<<<<<<<<<
+ * if temp_ptr == NULL:
+ * if loop_counter + 1 == self.key_length:
+ */
+ __pyx_v_temp_ptr = ((char *)memchr(__pyx_v_self->_start, '\x00', (__pyx_v_last - __pyx_v_self->_start)));
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":177
+ * # grab a key segment
+ * temp_ptr = <char*>memchr(self._start, c'\0', last - self._start)
+ * if temp_ptr == NULL: # <<<<<<<<<<<<<<
+ * if loop_counter + 1 == self.key_length:
+ * # capture to last
+ */
+ __pyx_t_3 = (__pyx_v_temp_ptr == NULL);
+ if (__pyx_t_3) {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":178
+ * temp_ptr = <char*>memchr(self._start, c'\0', last - self._start)
+ * if temp_ptr == NULL:
+ * if loop_counter + 1 == self.key_length: # <<<<<<<<<<<<<<
+ * # capture to last
+ * temp_ptr = last
+ */
+ __pyx_t_3 = ((__pyx_v_loop_counter + 1) == __pyx_v_self->key_length);
+ if (__pyx_t_3) {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":180
+ * if loop_counter + 1 == self.key_length:
+ * # capture to last
+ * temp_ptr = last # <<<<<<<<<<<<<<
+ * else:
+ * # Invalid line
+ */
+ __pyx_v_temp_ptr = __pyx_v_last;
+ goto __pyx_L6;
+ }
+ /*else*/ {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":185
+ * failure_string = ("invalid key, wanted segment from " +
+ * repr(safe_string_from_size(self._start,
+ * last - self._start))) # <<<<<<<<<<<<<<
+ * raise AssertionError(failure_string)
+ * # capture the key string
+ */
+ __pyx_t_1 = __pyx_f_6bzrlib_21_btree_serializer_pyx_safe_string_from_size(__pyx_v_self->_start, (__pyx_v_last - __pyx_v_self->_start)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 184; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_1);
+ __pyx_t_4 = PyObject_Repr(__pyx_t_1); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 184; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_4);
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+ __pyx_t_1 = PyNumber_Add(((PyObject *)__pyx_kp_s_2), __pyx_t_4); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 183; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_1);
+ __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
+ __Pyx_DECREF(__pyx_v_failure_string);
+ __pyx_v_failure_string = __pyx_t_1;
+ __pyx_t_1 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":186
+ * repr(safe_string_from_size(self._start,
+ * last - self._start)))
+ * raise AssertionError(failure_string) # <<<<<<<<<<<<<<
+ * # capture the key string
+ * if (self.key_length == 1
+ */
+ __pyx_t_1 = PyTuple_New(1); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 186; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_1);
+ __Pyx_INCREF(__pyx_v_failure_string);
+ PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_v_failure_string);
+ __Pyx_GIVEREF(__pyx_v_failure_string);
+ __pyx_t_4 = PyObject_Call(__pyx_builtin_AssertionError, __pyx_t_1, NULL); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 186; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_4);
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+ __Pyx_Raise(__pyx_t_4, 0, 0);
+ __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 186; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ }
+ __pyx_L6:;
+ goto __pyx_L5;
+ }
+ __pyx_L5:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":188
+ * raise AssertionError(failure_string)
+ * # capture the key string
+ * if (self.key_length == 1 # <<<<<<<<<<<<<<
+ * and (temp_ptr - self._start) == 45
+ * and strncmp(self._start, 'sha1:', 5) == 0):
+ */
+ __pyx_t_3 = (__pyx_v_self->key_length == 1);
+ if (__pyx_t_3) {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":189
+ * # capture the key string
+ * if (self.key_length == 1
+ * and (temp_ptr - self._start) == 45 # <<<<<<<<<<<<<<
+ * and strncmp(self._start, 'sha1:', 5) == 0):
+ * key_element = safe_string_from_size(self._start,
+ */
+ __pyx_t_5 = ((__pyx_v_temp_ptr - __pyx_v_self->_start) == 45);
+ if (__pyx_t_5) {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":190
+ * if (self.key_length == 1
+ * and (temp_ptr - self._start) == 45
+ * and strncmp(self._start, 'sha1:', 5) == 0): # <<<<<<<<<<<<<<
+ * key_element = safe_string_from_size(self._start,
+ * temp_ptr - self._start)
+ */
+ __pyx_t_6 = (strncmp(__pyx_v_self->_start, __pyx_k_3, 5) == 0);
+ __pyx_t_7 = __pyx_t_6;
+ } else {
+ __pyx_t_7 = __pyx_t_5;
+ }
+ __pyx_t_5 = __pyx_t_7;
+ } else {
+ __pyx_t_5 = __pyx_t_3;
+ }
+ if (__pyx_t_5) {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":192
+ * and strncmp(self._start, 'sha1:', 5) == 0):
+ * key_element = safe_string_from_size(self._start,
+ * temp_ptr - self._start) # <<<<<<<<<<<<<<
+ * else:
+ * key_element = safe_interned_string_from_size(self._start,
+ */
+ __pyx_t_4 = __pyx_f_6bzrlib_21_btree_serializer_pyx_safe_string_from_size(__pyx_v_self->_start, (__pyx_v_temp_ptr - __pyx_v_self->_start)); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 191; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_4);
+ __Pyx_DECREF(__pyx_v_key_element);
+ __pyx_v_key_element = __pyx_t_4;
+ __pyx_t_4 = 0;
+ goto __pyx_L7;
+ }
+ /*else*/ {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":195
+ * else:
+ * key_element = safe_interned_string_from_size(self._start,
+ * temp_ptr - self._start) # <<<<<<<<<<<<<<
+ * # advance our pointer
+ * self._start = temp_ptr + 1
+ */
+ __pyx_t_4 = __pyx_f_6bzrlib_21_btree_serializer_pyx_safe_interned_string_from_size(__pyx_v_self->_start, (__pyx_v_temp_ptr - __pyx_v_self->_start)); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 194; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_4);
+ __Pyx_DECREF(__pyx_v_key_element);
+ __pyx_v_key_element = __pyx_t_4;
+ __pyx_t_4 = 0;
+ }
+ __pyx_L7:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":197
+ * temp_ptr - self._start)
+ * # advance our pointer
+ * self._start = temp_ptr + 1 # <<<<<<<<<<<<<<
+ * Py_INCREF(key_element)
+ * StaticTuple_SET_ITEM(key, loop_counter, key_element)
+ */
+ __pyx_v_self->_start = (__pyx_v_temp_ptr + 1);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":198
+ * # advance our pointer
+ * self._start = temp_ptr + 1
+ * Py_INCREF(key_element) # <<<<<<<<<<<<<<
+ * StaticTuple_SET_ITEM(key, loop_counter, key_element)
+ * key = StaticTuple_Intern(key)
+ */
+ Py_INCREF(__pyx_v_key_element);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":199
+ * self._start = temp_ptr + 1
+ * Py_INCREF(key_element)
+ * StaticTuple_SET_ITEM(key, loop_counter, key_element) # <<<<<<<<<<<<<<
+ * key = StaticTuple_Intern(key)
+ * return key
+ */
+ StaticTuple_SET_ITEM(__pyx_v_key, __pyx_v_loop_counter, __pyx_v_key_element);
+ }
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":200
+ * Py_INCREF(key_element)
+ * StaticTuple_SET_ITEM(key, loop_counter, key_element)
+ * key = StaticTuple_Intern(key) # <<<<<<<<<<<<<<
+ * return key
+ *
+ */
+ __pyx_t_4 = ((PyObject *)StaticTuple_Intern(__pyx_v_key)); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 200; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_4);
+ __Pyx_DECREF(((PyObject *)__pyx_v_key));
+ __pyx_v_key = ((StaticTuple *)__pyx_t_4);
+ __pyx_t_4 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":201
+ * StaticTuple_SET_ITEM(key, loop_counter, key_element)
+ * key = StaticTuple_Intern(key)
+ * return key # <<<<<<<<<<<<<<
+ *
+ * cdef int process_line(self) except -1:
+ */
+ __Pyx_XDECREF(__pyx_r);
+ __Pyx_INCREF(((PyObject *)__pyx_v_key));
+ __pyx_r = ((PyObject *)__pyx_v_key);
+ goto __pyx_L0;
+
+ __pyx_r = Py_None; __Pyx_INCREF(Py_None);
+ goto __pyx_L0;
+ __pyx_L1_error:;
+ __Pyx_XDECREF(__pyx_t_1);
+ __Pyx_XDECREF(__pyx_t_4);
+ __Pyx_AddTraceback("bzrlib._btree_serializer_pyx.BTreeLeafParser.extract_key");
+ __pyx_r = 0;
+ __pyx_L0:;
+ __Pyx_DECREF((PyObject *)__pyx_v_key);
+ __Pyx_DECREF(__pyx_v_failure_string);
+ __Pyx_DECREF(__pyx_v_key_element);
+ __Pyx_XGIVEREF(__pyx_r);
+ __Pyx_RefNannyFinishContext();
+ return __pyx_r;
+}
+
+/* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":203
+ * return key
+ *
+ * cdef int process_line(self) except -1: # <<<<<<<<<<<<<<
+ * """Process a line in the bytes."""
+ * cdef char *last
+ */
+
+static int __pyx_f_6bzrlib_21_btree_serializer_pyx_15BTreeLeafParser_process_line(struct __pyx_obj_6bzrlib_21_btree_serializer_pyx_BTreeLeafParser *__pyx_v_self) {
+ char *__pyx_v_last;
+ char *__pyx_v_temp_ptr;
+ char *__pyx_v_ref_ptr;
+ char *__pyx_v_next_start;
+ int __pyx_v_loop_counter;
+ Py_ssize_t __pyx_v_str_len;
+ PyObject *__pyx_v_key;
+ PyObject *__pyx_v_value;
+ StaticTuple *__pyx_v_ref_lists;
+ PyObject *__pyx_v_ref_list;
+ StaticTuple *__pyx_v_node_value;
+ int __pyx_r;
+ int __pyx_t_1;
+ PyObject *__pyx_t_2 = NULL;
+ PyObject *__pyx_t_3 = NULL;
+ int __pyx_t_4;
+ int __pyx_t_5;
+ int __pyx_t_6;
+ __Pyx_RefNannySetupContext("process_line");
+ __pyx_v_key = Py_None; __Pyx_INCREF(Py_None);
+ __pyx_v_value = Py_None; __Pyx_INCREF(Py_None);
+ __pyx_v_ref_lists = ((StaticTuple *)Py_None); __Pyx_INCREF(Py_None);
+ __pyx_v_ref_list = Py_None; __Pyx_INCREF(Py_None);
+ __pyx_v_node_value = ((StaticTuple *)Py_None); __Pyx_INCREF(Py_None);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":212
+ * cdef Py_ssize_t str_len
+ *
+ * self._start = self._cur_str # <<<<<<<<<<<<<<
+ * # Find the next newline
+ * last = <char*>memchr(self._start, c'\n', self._end_str - self._start)
+ */
+ __pyx_v_self->_start = __pyx_v_self->_cur_str;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":214
+ * self._start = self._cur_str
+ * # Find the next newline
+ * last = <char*>memchr(self._start, c'\n', self._end_str - self._start) # <<<<<<<<<<<<<<
+ * if last == NULL:
+ * # Process until the end of the file
+ */
+ __pyx_v_last = ((char *)memchr(__pyx_v_self->_start, '\n', (__pyx_v_self->_end_str - __pyx_v_self->_start)));
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":215
+ * # Find the next newline
+ * last = <char*>memchr(self._start, c'\n', self._end_str - self._start)
+ * if last == NULL: # <<<<<<<<<<<<<<
+ * # Process until the end of the file
+ * last = self._end_str
+ */
+ __pyx_t_1 = (__pyx_v_last == NULL);
+ if (__pyx_t_1) {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":217
+ * if last == NULL:
+ * # Process until the end of the file
+ * last = self._end_str # <<<<<<<<<<<<<<
+ * self._cur_str = self._end_str
+ * else:
+ */
+ __pyx_v_last = __pyx_v_self->_end_str;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":218
+ * # Process until the end of the file
+ * last = self._end_str
+ * self._cur_str = self._end_str # <<<<<<<<<<<<<<
+ * else:
+ * # And the next string is right after it
+ */
+ __pyx_v_self->_cur_str = __pyx_v_self->_end_str;
+ goto __pyx_L3;
+ }
+ /*else*/ {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":221
+ * else:
+ * # And the next string is right after it
+ * self._cur_str = last + 1 # <<<<<<<<<<<<<<
+ * # The last character is right before the '\n'
+ *
+ */
+ __pyx_v_self->_cur_str = (__pyx_v_last + 1);
+ }
+ __pyx_L3:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":224
+ * # The last character is right before the '\n'
+ *
+ * if last == self._start: # <<<<<<<<<<<<<<
+ * # parsed it all.
+ * return 0
+ */
+ __pyx_t_1 = (__pyx_v_last == __pyx_v_self->_start);
+ if (__pyx_t_1) {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":226
+ * if last == self._start:
+ * # parsed it all.
+ * return 0 # <<<<<<<<<<<<<<
+ * if last < self._start:
+ * # Unexpected error condition - fail
+ */
+ __pyx_r = 0;
+ goto __pyx_L0;
+ goto __pyx_L4;
+ }
+ __pyx_L4:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":227
+ * # parsed it all.
+ * return 0
+ * if last < self._start: # <<<<<<<<<<<<<<
+ * # Unexpected error condition - fail
+ * raise AssertionError("last < self._start")
+ */
+ __pyx_t_1 = (__pyx_v_last < __pyx_v_self->_start);
+ if (__pyx_t_1) {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":229
+ * if last < self._start:
+ * # Unexpected error condition - fail
+ * raise AssertionError("last < self._start") # <<<<<<<<<<<<<<
+ * if 0 == self._header_found:
+ * # The first line in a leaf node is the header "type=leaf\n"
+ */
+ __pyx_t_2 = PyTuple_New(1); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 229; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_2);
+ __Pyx_INCREF(((PyObject *)__pyx_kp_s_4));
+ PyTuple_SET_ITEM(__pyx_t_2, 0, ((PyObject *)__pyx_kp_s_4));
+ __Pyx_GIVEREF(((PyObject *)__pyx_kp_s_4));
+ __pyx_t_3 = PyObject_Call(__pyx_builtin_AssertionError, __pyx_t_2, NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 229; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_3);
+ __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+ __Pyx_Raise(__pyx_t_3, 0, 0);
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 229; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ goto __pyx_L5;
+ }
+ __pyx_L5:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":230
+ * # Unexpected error condition - fail
+ * raise AssertionError("last < self._start")
+ * if 0 == self._header_found: # <<<<<<<<<<<<<<
+ * # The first line in a leaf node is the header "type=leaf\n"
+ * if strncmp("type=leaf", self._start, last - self._start) == 0:
+ */
+ __pyx_t_1 = (0 == __pyx_v_self->_header_found);
+ if (__pyx_t_1) {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":232
+ * if 0 == self._header_found:
+ * # The first line in a leaf node is the header "type=leaf\n"
+ * if strncmp("type=leaf", self._start, last - self._start) == 0: # <<<<<<<<<<<<<<
+ * self._header_found = 1
+ * return 0
+ */
+ __pyx_t_1 = (strncmp(__pyx_k_5, __pyx_v_self->_start, (__pyx_v_last - __pyx_v_self->_start)) == 0);
+ if (__pyx_t_1) {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":233
+ * # The first line in a leaf node is the header "type=leaf\n"
+ * if strncmp("type=leaf", self._start, last - self._start) == 0:
+ * self._header_found = 1 # <<<<<<<<<<<<<<
+ * return 0
+ * else:
+ */
+ __pyx_v_self->_header_found = 1;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":234
+ * if strncmp("type=leaf", self._start, last - self._start) == 0:
+ * self._header_found = 1
+ * return 0 # <<<<<<<<<<<<<<
+ * else:
+ * raise AssertionError('Node did not start with "type=leaf": %r'
+ */
+ __pyx_r = 0;
+ goto __pyx_L0;
+ goto __pyx_L7;
+ }
+ /*else*/ {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":237
+ * else:
+ * raise AssertionError('Node did not start with "type=leaf": %r'
+ * % (safe_string_from_size(self._start, last - self._start))) # <<<<<<<<<<<<<<
+ *
+ * key = self.extract_key(last)
+ */
+ __pyx_t_3 = __pyx_f_6bzrlib_21_btree_serializer_pyx_safe_string_from_size(__pyx_v_self->_start, (__pyx_v_last - __pyx_v_self->_start)); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 237; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_3);
+ __pyx_t_2 = PyNumber_Remainder(((PyObject *)__pyx_kp_s_6), __pyx_t_3); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 237; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(((PyObject *)__pyx_t_2));
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+ __pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 236; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_3);
+ PyTuple_SET_ITEM(__pyx_t_3, 0, ((PyObject *)__pyx_t_2));
+ __Pyx_GIVEREF(((PyObject *)__pyx_t_2));
+ __pyx_t_2 = 0;
+ __pyx_t_2 = PyObject_Call(__pyx_builtin_AssertionError, __pyx_t_3, NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 236; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_2);
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+ __Pyx_Raise(__pyx_t_2, 0, 0);
+ __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 236; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ }
+ __pyx_L7:;
+ goto __pyx_L6;
+ }
+ __pyx_L6:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":239
+ * % (safe_string_from_size(self._start, last - self._start)))
+ *
+ * key = self.extract_key(last) # <<<<<<<<<<<<<<
+ * # find the value area
+ * temp_ptr = <char*>_my_memrchr(self._start, c'\0', last - self._start)
+ */
+ __pyx_t_2 = ((struct __pyx_vtabstruct_6bzrlib_21_btree_serializer_pyx_BTreeLeafParser *)__pyx_v_self->__pyx_vtab)->extract_key(__pyx_v_self, __pyx_v_last); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 239; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_2);
+ __Pyx_DECREF(__pyx_v_key);
+ __pyx_v_key = __pyx_t_2;
+ __pyx_t_2 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":241
+ * key = self.extract_key(last)
+ * # find the value area
+ * temp_ptr = <char*>_my_memrchr(self._start, c'\0', last - self._start) # <<<<<<<<<<<<<<
+ * if temp_ptr == NULL:
+ * # Invalid line
+ */
+ __pyx_v_temp_ptr = ((char *)__pyx_f_6bzrlib_21_btree_serializer_pyx__my_memrchr(__pyx_v_self->_start, '\x00', (__pyx_v_last - __pyx_v_self->_start)));
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":242
+ * # find the value area
+ * temp_ptr = <char*>_my_memrchr(self._start, c'\0', last - self._start)
+ * if temp_ptr == NULL: # <<<<<<<<<<<<<<
+ * # Invalid line
+ * raise AssertionError("Failed to find the value area")
+ */
+ __pyx_t_1 = (__pyx_v_temp_ptr == NULL);
+ if (__pyx_t_1) {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":244
+ * if temp_ptr == NULL:
+ * # Invalid line
+ * raise AssertionError("Failed to find the value area") # <<<<<<<<<<<<<<
+ * else:
+ * # Because of how conversions were done, we ended up with *lots* of
+ */
+ __pyx_t_2 = PyTuple_New(1); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 244; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_2);
+ __Pyx_INCREF(((PyObject *)__pyx_kp_s_7));
+ PyTuple_SET_ITEM(__pyx_t_2, 0, ((PyObject *)__pyx_kp_s_7));
+ __Pyx_GIVEREF(((PyObject *)__pyx_kp_s_7));
+ __pyx_t_3 = PyObject_Call(__pyx_builtin_AssertionError, __pyx_t_2, NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 244; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_3);
+ __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+ __Pyx_Raise(__pyx_t_3, 0, 0);
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 244; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ goto __pyx_L8;
+ }
+ /*else*/ {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":252
+ * # something like '12607215 328306 0 0', which ends up consuming 1MB
+ * # of memory, just for those strings.
+ * str_len = last - temp_ptr - 1 # <<<<<<<<<<<<<<
+ * if (str_len > 4
+ * and strncmp(" 0 0", last - 4, 4) == 0):
+ */
+ __pyx_v_str_len = ((__pyx_v_last - __pyx_v_temp_ptr) - 1);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":253
+ * # of memory, just for those strings.
+ * str_len = last - temp_ptr - 1
+ * if (str_len > 4 # <<<<<<<<<<<<<<
+ * and strncmp(" 0 0", last - 4, 4) == 0):
+ * # This drops peak mem for bzr.dev from 87.4MB => 86.2MB
+ */
+ __pyx_t_1 = (__pyx_v_str_len > 4);
+ if (__pyx_t_1) {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":254
+ * str_len = last - temp_ptr - 1
+ * if (str_len > 4
+ * and strncmp(" 0 0", last - 4, 4) == 0): # <<<<<<<<<<<<<<
+ * # This drops peak mem for bzr.dev from 87.4MB => 86.2MB
+ * # For Launchpad 236MB => 232MB
+ */
+ __pyx_t_4 = (strncmp(__pyx_k_8, (__pyx_v_last - 4), 4) == 0);
+ __pyx_t_5 = __pyx_t_4;
+ } else {
+ __pyx_t_5 = __pyx_t_1;
+ }
+ if (__pyx_t_5) {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":257
+ * # This drops peak mem for bzr.dev from 87.4MB => 86.2MB
+ * # For Launchpad 236MB => 232MB
+ * value = safe_interned_string_from_size(temp_ptr + 1, str_len) # <<<<<<<<<<<<<<
+ * else:
+ * value = safe_string_from_size(temp_ptr + 1, str_len)
+ */
+ __pyx_t_3 = __pyx_f_6bzrlib_21_btree_serializer_pyx_safe_interned_string_from_size((__pyx_v_temp_ptr + 1), __pyx_v_str_len); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 257; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_3);
+ __Pyx_DECREF(__pyx_v_value);
+ __pyx_v_value = __pyx_t_3;
+ __pyx_t_3 = 0;
+ goto __pyx_L9;
+ }
+ /*else*/ {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":259
+ * value = safe_interned_string_from_size(temp_ptr + 1, str_len)
+ * else:
+ * value = safe_string_from_size(temp_ptr + 1, str_len) # <<<<<<<<<<<<<<
+ * # shrink the references end point
+ * last = temp_ptr
+ */
+ __pyx_t_3 = __pyx_f_6bzrlib_21_btree_serializer_pyx_safe_string_from_size((__pyx_v_temp_ptr + 1), __pyx_v_str_len); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 259; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_3);
+ __Pyx_DECREF(__pyx_v_value);
+ __pyx_v_value = __pyx_t_3;
+ __pyx_t_3 = 0;
+ }
+ __pyx_L9:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":261
+ * value = safe_string_from_size(temp_ptr + 1, str_len)
+ * # shrink the references end point
+ * last = temp_ptr # <<<<<<<<<<<<<<
+ *
+ * if self.ref_list_length:
+ */
+ __pyx_v_last = __pyx_v_temp_ptr;
+ }
+ __pyx_L8:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":263
+ * last = temp_ptr
+ *
+ * if self.ref_list_length: # <<<<<<<<<<<<<<
+ * ref_lists = StaticTuple_New(self.ref_list_length)
+ * loop_counter = 0
+ */
+ if (__pyx_v_self->ref_list_length) {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":264
+ *
+ * if self.ref_list_length:
+ * ref_lists = StaticTuple_New(self.ref_list_length) # <<<<<<<<<<<<<<
+ * loop_counter = 0
+ * while loop_counter < self.ref_list_length:
+ */
+ __pyx_t_3 = ((PyObject *)StaticTuple_New(__pyx_v_self->ref_list_length)); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 264; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_3);
+ __Pyx_DECREF(((PyObject *)__pyx_v_ref_lists));
+ __pyx_v_ref_lists = ((StaticTuple *)__pyx_t_3);
+ __pyx_t_3 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":265
+ * if self.ref_list_length:
+ * ref_lists = StaticTuple_New(self.ref_list_length)
+ * loop_counter = 0 # <<<<<<<<<<<<<<
+ * while loop_counter < self.ref_list_length:
+ * ref_list = []
+ */
+ __pyx_v_loop_counter = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":266
+ * ref_lists = StaticTuple_New(self.ref_list_length)
+ * loop_counter = 0
+ * while loop_counter < self.ref_list_length: # <<<<<<<<<<<<<<
+ * ref_list = []
+ * # extract a reference list
+ */
+ while (1) {
+ __pyx_t_5 = (__pyx_v_loop_counter < __pyx_v_self->ref_list_length);
+ if (!__pyx_t_5) break;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":267
+ * loop_counter = 0
+ * while loop_counter < self.ref_list_length:
+ * ref_list = [] # <<<<<<<<<<<<<<
+ * # extract a reference list
+ * loop_counter = loop_counter + 1
+ */
+ __pyx_t_3 = PyList_New(0); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 267; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(((PyObject *)__pyx_t_3));
+ __Pyx_DECREF(__pyx_v_ref_list);
+ __pyx_v_ref_list = ((PyObject *)__pyx_t_3);
+ __pyx_t_3 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":269
+ * ref_list = []
+ * # extract a reference list
+ * loop_counter = loop_counter + 1 # <<<<<<<<<<<<<<
+ * if last < self._start:
+ * raise AssertionError("last < self._start")
+ */
+ __pyx_v_loop_counter = (__pyx_v_loop_counter + 1);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":270
+ * # extract a reference list
+ * loop_counter = loop_counter + 1
+ * if last < self._start: # <<<<<<<<<<<<<<
+ * raise AssertionError("last < self._start")
+ * # find the next reference list end point:
+ */
+ __pyx_t_5 = (__pyx_v_last < __pyx_v_self->_start);
+ if (__pyx_t_5) {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":271
+ * loop_counter = loop_counter + 1
+ * if last < self._start:
+ * raise AssertionError("last < self._start") # <<<<<<<<<<<<<<
+ * # find the next reference list end point:
+ * temp_ptr = <char*>memchr(self._start, c'\t', last - self._start)
+ */
+ __pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 271; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_3);
+ __Pyx_INCREF(((PyObject *)__pyx_kp_s_4));
+ PyTuple_SET_ITEM(__pyx_t_3, 0, ((PyObject *)__pyx_kp_s_4));
+ __Pyx_GIVEREF(((PyObject *)__pyx_kp_s_4));
+ __pyx_t_2 = PyObject_Call(__pyx_builtin_AssertionError, __pyx_t_3, NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 271; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_2);
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+ __Pyx_Raise(__pyx_t_2, 0, 0);
+ __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 271; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ goto __pyx_L13;
+ }
+ __pyx_L13:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":273
+ * raise AssertionError("last < self._start")
+ * # find the next reference list end point:
+ * temp_ptr = <char*>memchr(self._start, c'\t', last - self._start) # <<<<<<<<<<<<<<
+ * if temp_ptr == NULL:
+ * # Only valid for the last list
+ */
+ __pyx_v_temp_ptr = ((char *)memchr(__pyx_v_self->_start, '\t', (__pyx_v_last - __pyx_v_self->_start)));
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":274
+ * # find the next reference list end point:
+ * temp_ptr = <char*>memchr(self._start, c'\t', last - self._start)
+ * if temp_ptr == NULL: # <<<<<<<<<<<<<<
+ * # Only valid for the last list
+ * if loop_counter != self.ref_list_length:
+ */
+ __pyx_t_5 = (__pyx_v_temp_ptr == NULL);
+ if (__pyx_t_5) {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":276
+ * if temp_ptr == NULL:
+ * # Only valid for the last list
+ * if loop_counter != self.ref_list_length: # <<<<<<<<<<<<<<
+ * # Invalid line
+ * raise AssertionError(
+ */
+ __pyx_t_5 = (__pyx_v_loop_counter != __pyx_v_self->ref_list_length);
+ if (__pyx_t_5) {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":278
+ * if loop_counter != self.ref_list_length:
+ * # Invalid line
+ * raise AssertionError( # <<<<<<<<<<<<<<
+ * "invalid key, loop_counter != self.ref_list_length")
+ * else:
+ */
+ __pyx_t_2 = PyTuple_New(1); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 278; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_2);
+ __Pyx_INCREF(((PyObject *)__pyx_kp_s_9));
+ PyTuple_SET_ITEM(__pyx_t_2, 0, ((PyObject *)__pyx_kp_s_9));
+ __Pyx_GIVEREF(((PyObject *)__pyx_kp_s_9));
+ __pyx_t_3 = PyObject_Call(__pyx_builtin_AssertionError, __pyx_t_2, NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 278; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_3);
+ __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+ __Pyx_Raise(__pyx_t_3, 0, 0);
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 278; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ goto __pyx_L15;
+ }
+ /*else*/ {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":282
+ * else:
+ * # scan to the end of the ref list area
+ * ref_ptr = last # <<<<<<<<<<<<<<
+ * next_start = last
+ * else:
+ */
+ __pyx_v_ref_ptr = __pyx_v_last;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":283
+ * # scan to the end of the ref list area
+ * ref_ptr = last
+ * next_start = last # <<<<<<<<<<<<<<
+ * else:
+ * # scan to the end of this ref list
+ */
+ __pyx_v_next_start = __pyx_v_last;
+ }
+ __pyx_L15:;
+ goto __pyx_L14;
+ }
+ /*else*/ {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":286
+ * else:
+ * # scan to the end of this ref list
+ * ref_ptr = temp_ptr # <<<<<<<<<<<<<<
+ * next_start = temp_ptr + 1
+ * # Now, there may be multiple keys in the ref list.
+ */
+ __pyx_v_ref_ptr = __pyx_v_temp_ptr;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":287
+ * # scan to the end of this ref list
+ * ref_ptr = temp_ptr
+ * next_start = temp_ptr + 1 # <<<<<<<<<<<<<<
+ * # Now, there may be multiple keys in the ref list.
+ * while self._start < ref_ptr:
+ */
+ __pyx_v_next_start = (__pyx_v_temp_ptr + 1);
+ }
+ __pyx_L14:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":289
+ * next_start = temp_ptr + 1
+ * # Now, there may be multiple keys in the ref list.
+ * while self._start < ref_ptr: # <<<<<<<<<<<<<<
+ * # loop finding keys and extracting them
+ * temp_ptr = <char*>memchr(self._start, c'\r',
+ */
+ while (1) {
+ __pyx_t_5 = (__pyx_v_self->_start < __pyx_v_ref_ptr);
+ if (!__pyx_t_5) break;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":292
+ * # loop finding keys and extracting them
+ * temp_ptr = <char*>memchr(self._start, c'\r',
+ * ref_ptr - self._start) # <<<<<<<<<<<<<<
+ * if temp_ptr == NULL:
+ * # key runs to the end
+ */
+ __pyx_v_temp_ptr = ((char *)memchr(__pyx_v_self->_start, '\r', (__pyx_v_ref_ptr - __pyx_v_self->_start)));
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":293
+ * temp_ptr = <char*>memchr(self._start, c'\r',
+ * ref_ptr - self._start)
+ * if temp_ptr == NULL: # <<<<<<<<<<<<<<
+ * # key runs to the end
+ * temp_ptr = ref_ptr
+ */
+ __pyx_t_5 = (__pyx_v_temp_ptr == NULL);
+ if (__pyx_t_5) {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":295
+ * if temp_ptr == NULL:
+ * # key runs to the end
+ * temp_ptr = ref_ptr # <<<<<<<<<<<<<<
+ *
+ * PyList_Append(ref_list, self.extract_key(temp_ptr))
+ */
+ __pyx_v_temp_ptr = __pyx_v_ref_ptr;
+ goto __pyx_L18;
+ }
+ __pyx_L18:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":297
+ * temp_ptr = ref_ptr
+ *
+ * PyList_Append(ref_list, self.extract_key(temp_ptr)) # <<<<<<<<<<<<<<
+ * ref_list = StaticTuple_Intern(StaticTuple(*ref_list))
+ * Py_INCREF(ref_list)
+ */
+ __pyx_t_3 = ((struct __pyx_vtabstruct_6bzrlib_21_btree_serializer_pyx_BTreeLeafParser *)__pyx_v_self->__pyx_vtab)->extract_key(__pyx_v_self, __pyx_v_temp_ptr); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 297; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_3);
+ __pyx_t_6 = PyList_Append(__pyx_v_ref_list, __pyx_t_3); if (unlikely(__pyx_t_6 == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 297; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+ }
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":298
+ *
+ * PyList_Append(ref_list, self.extract_key(temp_ptr))
+ * ref_list = StaticTuple_Intern(StaticTuple(*ref_list)) # <<<<<<<<<<<<<<
+ * Py_INCREF(ref_list)
+ * StaticTuple_SET_ITEM(ref_lists, loop_counter - 1, ref_list)
+ */
+ __pyx_t_3 = PySequence_Tuple(__pyx_v_ref_list); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 298; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(((PyObject *)__pyx_t_3));
+ __pyx_t_2 = PyObject_Call(((PyObject *)((PyObject*)__pyx_ptype_6bzrlib_15_static_tuple_c_StaticTuple)), ((PyObject *)__pyx_t_3), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 298; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_2);
+ __Pyx_DECREF(((PyObject *)__pyx_t_3)); __pyx_t_3 = 0;
+ __pyx_t_3 = ((PyObject *)StaticTuple_Intern(((StaticTuple *)__pyx_t_2))); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 298; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_3);
+ __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+ __Pyx_DECREF(__pyx_v_ref_list);
+ __pyx_v_ref_list = __pyx_t_3;
+ __pyx_t_3 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":299
+ * PyList_Append(ref_list, self.extract_key(temp_ptr))
+ * ref_list = StaticTuple_Intern(StaticTuple(*ref_list))
+ * Py_INCREF(ref_list) # <<<<<<<<<<<<<<
+ * StaticTuple_SET_ITEM(ref_lists, loop_counter - 1, ref_list)
+ * # prepare for the next reference list
+ */
+ Py_INCREF(__pyx_v_ref_list);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":300
+ * ref_list = StaticTuple_Intern(StaticTuple(*ref_list))
+ * Py_INCREF(ref_list)
+ * StaticTuple_SET_ITEM(ref_lists, loop_counter - 1, ref_list) # <<<<<<<<<<<<<<
+ * # prepare for the next reference list
+ * self._start = next_start
+ */
+ StaticTuple_SET_ITEM(__pyx_v_ref_lists, (__pyx_v_loop_counter - 1), __pyx_v_ref_list);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":302
+ * StaticTuple_SET_ITEM(ref_lists, loop_counter - 1, ref_list)
+ * # prepare for the next reference list
+ * self._start = next_start # <<<<<<<<<<<<<<
+ * node_value = StaticTuple(value, ref_lists)
+ * else:
+ */
+ __pyx_v_self->_start = __pyx_v_next_start;
+ }
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":303
+ * # prepare for the next reference list
+ * self._start = next_start
+ * node_value = StaticTuple(value, ref_lists) # <<<<<<<<<<<<<<
+ * else:
+ * if last != self._start:
+ */
+ __pyx_t_3 = PyTuple_New(2); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 303; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_3);
+ __Pyx_INCREF(__pyx_v_value);
+ PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_v_value);
+ __Pyx_GIVEREF(__pyx_v_value);
+ __Pyx_INCREF(((PyObject *)__pyx_v_ref_lists));
+ PyTuple_SET_ITEM(__pyx_t_3, 1, ((PyObject *)__pyx_v_ref_lists));
+ __Pyx_GIVEREF(((PyObject *)__pyx_v_ref_lists));
+ __pyx_t_2 = PyObject_Call(((PyObject *)((PyObject*)__pyx_ptype_6bzrlib_15_static_tuple_c_StaticTuple)), __pyx_t_3, NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 303; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_2);
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+ __Pyx_DECREF(((PyObject *)__pyx_v_node_value));
+ __pyx_v_node_value = ((StaticTuple *)__pyx_t_2);
+ __pyx_t_2 = 0;
+ goto __pyx_L10;
+ }
+ /*else*/ {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":305
+ * node_value = StaticTuple(value, ref_lists)
+ * else:
+ * if last != self._start: # <<<<<<<<<<<<<<
+ * # unexpected reference data present
+ * raise AssertionError("unexpected reference data present")
+ */
+ __pyx_t_5 = (__pyx_v_last != __pyx_v_self->_start);
+ if (__pyx_t_5) {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":307
+ * if last != self._start:
+ * # unexpected reference data present
+ * raise AssertionError("unexpected reference data present") # <<<<<<<<<<<<<<
+ * node_value = StaticTuple(value, StaticTuple())
+ * PyList_Append(self.keys, StaticTuple(key, node_value))
+ */
+ __pyx_t_2 = PyTuple_New(1); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 307; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_2);
+ __Pyx_INCREF(((PyObject *)__pyx_kp_s_10));
+ PyTuple_SET_ITEM(__pyx_t_2, 0, ((PyObject *)__pyx_kp_s_10));
+ __Pyx_GIVEREF(((PyObject *)__pyx_kp_s_10));
+ __pyx_t_3 = PyObject_Call(__pyx_builtin_AssertionError, __pyx_t_2, NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 307; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_3);
+ __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+ __Pyx_Raise(__pyx_t_3, 0, 0);
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 307; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ goto __pyx_L19;
+ }
+ __pyx_L19:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":308
+ * # unexpected reference data present
+ * raise AssertionError("unexpected reference data present")
+ * node_value = StaticTuple(value, StaticTuple()) # <<<<<<<<<<<<<<
+ * PyList_Append(self.keys, StaticTuple(key, node_value))
+ * return 0
+ */
+ __pyx_t_3 = PyObject_Call(((PyObject *)((PyObject*)__pyx_ptype_6bzrlib_15_static_tuple_c_StaticTuple)), ((PyObject *)__pyx_empty_tuple), NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 308; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_3);
+ __pyx_t_2 = PyTuple_New(2); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 308; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_2);
+ __Pyx_INCREF(__pyx_v_value);
+ PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_v_value);
+ __Pyx_GIVEREF(__pyx_v_value);
+ PyTuple_SET_ITEM(__pyx_t_2, 1, __pyx_t_3);
+ __Pyx_GIVEREF(__pyx_t_3);
+ __pyx_t_3 = 0;
+ __pyx_t_3 = PyObject_Call(((PyObject *)((PyObject*)__pyx_ptype_6bzrlib_15_static_tuple_c_StaticTuple)), __pyx_t_2, NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 308; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_3);
+ __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+ __Pyx_DECREF(((PyObject *)__pyx_v_node_value));
+ __pyx_v_node_value = ((StaticTuple *)__pyx_t_3);
+ __pyx_t_3 = 0;
+ }
+ __pyx_L10:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":309
+ * raise AssertionError("unexpected reference data present")
+ * node_value = StaticTuple(value, StaticTuple())
+ * PyList_Append(self.keys, StaticTuple(key, node_value)) # <<<<<<<<<<<<<<
+ * return 0
+ *
+ */
+ __pyx_t_3 = PyTuple_New(2); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 309; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_3);
+ __Pyx_INCREF(__pyx_v_key);
+ PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_v_key);
+ __Pyx_GIVEREF(__pyx_v_key);
+ __Pyx_INCREF(((PyObject *)__pyx_v_node_value));
+ PyTuple_SET_ITEM(__pyx_t_3, 1, ((PyObject *)__pyx_v_node_value));
+ __Pyx_GIVEREF(((PyObject *)__pyx_v_node_value));
+ __pyx_t_2 = PyObject_Call(((PyObject *)((PyObject*)__pyx_ptype_6bzrlib_15_static_tuple_c_StaticTuple)), __pyx_t_3, NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 309; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_2);
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+ __pyx_t_6 = PyList_Append(__pyx_v_self->keys, __pyx_t_2); if (unlikely(__pyx_t_6 == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 309; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":310
+ * node_value = StaticTuple(value, StaticTuple())
+ * PyList_Append(self.keys, StaticTuple(key, node_value))
+ * return 0 # <<<<<<<<<<<<<<
+ *
+ * def parse(self):
+ */
+ __pyx_r = 0;
+ goto __pyx_L0;
+
+ __pyx_r = 0;
+ goto __pyx_L0;
+ __pyx_L1_error:;
+ __Pyx_XDECREF(__pyx_t_2);
+ __Pyx_XDECREF(__pyx_t_3);
+ __Pyx_AddTraceback("bzrlib._btree_serializer_pyx.BTreeLeafParser.process_line");
+ __pyx_r = -1;
+ __pyx_L0:;
+ __Pyx_DECREF(__pyx_v_key);
+ __Pyx_DECREF(__pyx_v_value);
+ __Pyx_DECREF((PyObject *)__pyx_v_ref_lists);
+ __Pyx_DECREF(__pyx_v_ref_list);
+ __Pyx_DECREF((PyObject *)__pyx_v_node_value);
+ __Pyx_RefNannyFinishContext();
+ return __pyx_r;
+}
+
+/* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":312
+ * return 0
+ *
+ * def parse(self): # <<<<<<<<<<<<<<
+ * cdef Py_ssize_t byte_count
+ * if not PyString_CheckExact(self.bytes):
+ */
+
+static PyObject *__pyx_pf_6bzrlib_21_btree_serializer_pyx_15BTreeLeafParser_parse(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/
+static PyObject *__pyx_pf_6bzrlib_21_btree_serializer_pyx_15BTreeLeafParser_parse(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) {
+ Py_ssize_t __pyx_v_byte_count;
+ PyObject *__pyx_r = NULL;
+ int __pyx_t_1;
+ PyObject *__pyx_t_2 = NULL;
+ PyObject *__pyx_t_3 = NULL;
+ char *__pyx_t_4;
+ int __pyx_t_5;
+ __Pyx_RefNannySetupContext("parse");
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":314
+ * def parse(self):
+ * cdef Py_ssize_t byte_count
+ * if not PyString_CheckExact(self.bytes): # <<<<<<<<<<<<<<
+ * raise AssertionError('self.bytes is not a string.')
+ * byte_count = PyString_Size(self.bytes)
+ */
+ __pyx_t_1 = (!PyString_CheckExact(((struct __pyx_obj_6bzrlib_21_btree_serializer_pyx_BTreeLeafParser *)__pyx_v_self)->bytes));
+ if (__pyx_t_1) {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":315
+ * cdef Py_ssize_t byte_count
+ * if not PyString_CheckExact(self.bytes):
+ * raise AssertionError('self.bytes is not a string.') # <<<<<<<<<<<<<<
+ * byte_count = PyString_Size(self.bytes)
+ * self._cur_str = PyString_AsString(self.bytes)
+ */
+ __pyx_t_2 = PyTuple_New(1); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 315; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_2);
+ __Pyx_INCREF(((PyObject *)__pyx_kp_s_11));
+ PyTuple_SET_ITEM(__pyx_t_2, 0, ((PyObject *)__pyx_kp_s_11));
+ __Pyx_GIVEREF(((PyObject *)__pyx_kp_s_11));
+ __pyx_t_3 = PyObject_Call(__pyx_builtin_AssertionError, __pyx_t_2, NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 315; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_3);
+ __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+ __Pyx_Raise(__pyx_t_3, 0, 0);
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 315; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ goto __pyx_L5;
+ }
+ __pyx_L5:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":316
+ * if not PyString_CheckExact(self.bytes):
+ * raise AssertionError('self.bytes is not a string.')
+ * byte_count = PyString_Size(self.bytes) # <<<<<<<<<<<<<<
+ * self._cur_str = PyString_AsString(self.bytes)
+ * # This points to the last character in the string
+ */
+ __pyx_v_byte_count = PyString_Size(((struct __pyx_obj_6bzrlib_21_btree_serializer_pyx_BTreeLeafParser *)__pyx_v_self)->bytes);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":317
+ * raise AssertionError('self.bytes is not a string.')
+ * byte_count = PyString_Size(self.bytes)
+ * self._cur_str = PyString_AsString(self.bytes) # <<<<<<<<<<<<<<
+ * # This points to the last character in the string
+ * self._end_str = self._cur_str + byte_count
+ */
+ __pyx_t_4 = PyString_AsString(((struct __pyx_obj_6bzrlib_21_btree_serializer_pyx_BTreeLeafParser *)__pyx_v_self)->bytes); if (unlikely(__pyx_t_4 == NULL)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 317; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ ((struct __pyx_obj_6bzrlib_21_btree_serializer_pyx_BTreeLeafParser *)__pyx_v_self)->_cur_str = __pyx_t_4;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":319
+ * self._cur_str = PyString_AsString(self.bytes)
+ * # This points to the last character in the string
+ * self._end_str = self._cur_str + byte_count # <<<<<<<<<<<<<<
+ * while self._cur_str < self._end_str:
+ * self.process_line()
+ */
+ ((struct __pyx_obj_6bzrlib_21_btree_serializer_pyx_BTreeLeafParser *)__pyx_v_self)->_end_str = (((struct __pyx_obj_6bzrlib_21_btree_serializer_pyx_BTreeLeafParser *)__pyx_v_self)->_cur_str + __pyx_v_byte_count);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":320
+ * # This points to the last character in the string
+ * self._end_str = self._cur_str + byte_count
+ * while self._cur_str < self._end_str: # <<<<<<<<<<<<<<
+ * self.process_line()
+ * return self.keys
+ */
+ while (1) {
+ __pyx_t_1 = (((struct __pyx_obj_6bzrlib_21_btree_serializer_pyx_BTreeLeafParser *)__pyx_v_self)->_cur_str < ((struct __pyx_obj_6bzrlib_21_btree_serializer_pyx_BTreeLeafParser *)__pyx_v_self)->_end_str);
+ if (!__pyx_t_1) break;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":321
+ * self._end_str = self._cur_str + byte_count
+ * while self._cur_str < self._end_str:
+ * self.process_line() # <<<<<<<<<<<<<<
+ * return self.keys
+ *
+ */
+ __pyx_t_5 = ((struct __pyx_vtabstruct_6bzrlib_21_btree_serializer_pyx_BTreeLeafParser *)((struct __pyx_obj_6bzrlib_21_btree_serializer_pyx_BTreeLeafParser *)__pyx_v_self)->__pyx_vtab)->process_line(((struct __pyx_obj_6bzrlib_21_btree_serializer_pyx_BTreeLeafParser *)__pyx_v_self)); if (unlikely(__pyx_t_5 == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 321; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ }
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":322
+ * while self._cur_str < self._end_str:
+ * self.process_line()
+ * return self.keys # <<<<<<<<<<<<<<
+ *
+ *
+ */
+ __Pyx_XDECREF(__pyx_r);
+ __Pyx_INCREF(((struct __pyx_obj_6bzrlib_21_btree_serializer_pyx_BTreeLeafParser *)__pyx_v_self)->keys);
+ __pyx_r = ((struct __pyx_obj_6bzrlib_21_btree_serializer_pyx_BTreeLeafParser *)__pyx_v_self)->keys;
+ goto __pyx_L0;
+
+ __pyx_r = Py_None; __Pyx_INCREF(Py_None);
+ goto __pyx_L0;
+ __pyx_L1_error:;
+ __Pyx_XDECREF(__pyx_t_2);
+ __Pyx_XDECREF(__pyx_t_3);
+ __Pyx_AddTraceback("bzrlib._btree_serializer_pyx.BTreeLeafParser.parse");
+ __pyx_r = NULL;
+ __pyx_L0:;
+ __Pyx_XGIVEREF(__pyx_r);
+ __Pyx_RefNannyFinishContext();
+ return __pyx_r;
+}
+
+/* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":325
+ *
+ *
+ * def _parse_leaf_lines(bytes, key_length, ref_list_length): # <<<<<<<<<<<<<<
+ * parser = BTreeLeafParser(bytes, key_length, ref_list_length)
+ * return parser.parse()
+ */
+
+static PyObject *__pyx_pf_6bzrlib_21_btree_serializer_pyx__parse_leaf_lines(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
+static PyObject *__pyx_pf_6bzrlib_21_btree_serializer_pyx__parse_leaf_lines(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
+ PyObject *__pyx_v_bytes = 0;
+ PyObject *__pyx_v_key_length = 0;
+ PyObject *__pyx_v_ref_list_length = 0;
+ struct __pyx_obj_6bzrlib_21_btree_serializer_pyx_BTreeLeafParser *__pyx_v_parser;
+ PyObject *__pyx_r = NULL;
+ PyObject *__pyx_t_1 = NULL;
+ PyObject *__pyx_t_2 = NULL;
+ static PyObject **__pyx_pyargnames[] = {&__pyx_n_s__bytes,&__pyx_n_s__key_length,&__pyx_n_s__ref_list_length,0};
+ __Pyx_RefNannySetupContext("_parse_leaf_lines");
+ __pyx_self = __pyx_self;
+ if (unlikely(__pyx_kwds)) {
+ Py_ssize_t kw_args = PyDict_Size(__pyx_kwds);
+ PyObject* values[3] = {0,0,0};
+ switch (PyTuple_GET_SIZE(__pyx_args)) {
+ case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2);
+ case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
+ case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
+ case 0: break;
+ default: goto __pyx_L5_argtuple_error;
+ }
+ switch (PyTuple_GET_SIZE(__pyx_args)) {
+ case 0:
+ values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__bytes);
+ if (likely(values[0])) kw_args--;
+ else goto __pyx_L5_argtuple_error;
+ case 1:
+ values[1] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__key_length);
+ if (likely(values[1])) kw_args--;
+ else {
+ __Pyx_RaiseArgtupleInvalid("_parse_leaf_lines", 1, 3, 3, 1); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 325; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
+ }
+ case 2:
+ values[2] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__ref_list_length);
+ if (likely(values[2])) kw_args--;
+ else {
+ __Pyx_RaiseArgtupleInvalid("_parse_leaf_lines", 1, 3, 3, 2); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 325; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
+ }
+ }
+ if (unlikely(kw_args > 0)) {
+ if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, PyTuple_GET_SIZE(__pyx_args), "_parse_leaf_lines") < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 325; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
+ }
+ __pyx_v_bytes = values[0];
+ __pyx_v_key_length = values[1];
+ __pyx_v_ref_list_length = values[2];
+ } else if (PyTuple_GET_SIZE(__pyx_args) != 3) {
+ goto __pyx_L5_argtuple_error;
+ } else {
+ __pyx_v_bytes = PyTuple_GET_ITEM(__pyx_args, 0);
+ __pyx_v_key_length = PyTuple_GET_ITEM(__pyx_args, 1);
+ __pyx_v_ref_list_length = PyTuple_GET_ITEM(__pyx_args, 2);
+ }
+ goto __pyx_L4_argument_unpacking_done;
+ __pyx_L5_argtuple_error:;
+ __Pyx_RaiseArgtupleInvalid("_parse_leaf_lines", 1, 3, 3, PyTuple_GET_SIZE(__pyx_args)); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 325; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
+ __pyx_L3_error:;
+ __Pyx_AddTraceback("bzrlib._btree_serializer_pyx._parse_leaf_lines");
+ __Pyx_RefNannyFinishContext();
+ return NULL;
+ __pyx_L4_argument_unpacking_done:;
+ __pyx_v_parser = ((struct __pyx_obj_6bzrlib_21_btree_serializer_pyx_BTreeLeafParser *)Py_None); __Pyx_INCREF(Py_None);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":326
+ *
+ * def _parse_leaf_lines(bytes, key_length, ref_list_length):
+ * parser = BTreeLeafParser(bytes, key_length, ref_list_length) # <<<<<<<<<<<<<<
+ * return parser.parse()
+ *
+ */
+ __pyx_t_1 = PyTuple_New(3); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 326; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_1);
+ __Pyx_INCREF(__pyx_v_bytes);
+ PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_v_bytes);
+ __Pyx_GIVEREF(__pyx_v_bytes);
+ __Pyx_INCREF(__pyx_v_key_length);
+ PyTuple_SET_ITEM(__pyx_t_1, 1, __pyx_v_key_length);
+ __Pyx_GIVEREF(__pyx_v_key_length);
+ __Pyx_INCREF(__pyx_v_ref_list_length);
+ PyTuple_SET_ITEM(__pyx_t_1, 2, __pyx_v_ref_list_length);
+ __Pyx_GIVEREF(__pyx_v_ref_list_length);
+ __pyx_t_2 = PyObject_Call(((PyObject *)((PyObject*)__pyx_ptype_6bzrlib_21_btree_serializer_pyx_BTreeLeafParser)), __pyx_t_1, NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 326; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_2);
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+ __Pyx_DECREF(((PyObject *)__pyx_v_parser));
+ __pyx_v_parser = ((struct __pyx_obj_6bzrlib_21_btree_serializer_pyx_BTreeLeafParser *)__pyx_t_2);
+ __pyx_t_2 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":327
+ * def _parse_leaf_lines(bytes, key_length, ref_list_length):
+ * parser = BTreeLeafParser(bytes, key_length, ref_list_length)
+ * return parser.parse() # <<<<<<<<<<<<<<
+ *
+ *
+ */
+ __Pyx_XDECREF(__pyx_r);
+ __pyx_t_2 = PyObject_GetAttr(((PyObject *)__pyx_v_parser), __pyx_n_s__parse); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 327; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_2);
+ __pyx_t_1 = PyObject_Call(__pyx_t_2, ((PyObject *)__pyx_empty_tuple), NULL); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 327; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_1);
+ __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+ __pyx_r = __pyx_t_1;
+ __pyx_t_1 = 0;
+ goto __pyx_L0;
+
+ __pyx_r = Py_None; __Pyx_INCREF(Py_None);
+ goto __pyx_L0;
+ __pyx_L1_error:;
+ __Pyx_XDECREF(__pyx_t_1);
+ __Pyx_XDECREF(__pyx_t_2);
+ __Pyx_AddTraceback("bzrlib._btree_serializer_pyx._parse_leaf_lines");
+ __pyx_r = NULL;
+ __pyx_L0:;
+ __Pyx_DECREF((PyObject *)__pyx_v_parser);
+ __Pyx_XGIVEREF(__pyx_r);
+ __Pyx_RefNannyFinishContext();
+ return __pyx_r;
+}
+
+/* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":349
+ * _hexbuf = '0123456789abcdef'
+ *
+ * cdef _populate_unhexbuf(): # <<<<<<<<<<<<<<
+ * cdef int i
+ * for i from 0 <= i < 256:
+ */
+
+static PyObject *__pyx_f_6bzrlib_21_btree_serializer_pyx__populate_unhexbuf(void) {
+ int __pyx_v_i;
+ PyObject *__pyx_r = NULL;
+ __Pyx_RefNannySetupContext("_populate_unhexbuf");
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":351
+ * cdef _populate_unhexbuf():
+ * cdef int i
+ * for i from 0 <= i < 256: # <<<<<<<<<<<<<<
+ * _unhexbuf[i] = -1
+ * for i from 0 <= i < 10: # 0123456789 => map to the raw number
+ */
+ for (__pyx_v_i = 0; __pyx_v_i < 256; __pyx_v_i++) {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":352
+ * cdef int i
+ * for i from 0 <= i < 256:
+ * _unhexbuf[i] = -1 # <<<<<<<<<<<<<<
+ * for i from 0 <= i < 10: # 0123456789 => map to the raw number
+ * _unhexbuf[(i + c'0')] = i
+ */
+ (__pyx_v_6bzrlib_21_btree_serializer_pyx__unhexbuf[__pyx_v_i]) = -1;
+ }
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":353
+ * for i from 0 <= i < 256:
+ * _unhexbuf[i] = -1
+ * for i from 0 <= i < 10: # 0123456789 => map to the raw number # <<<<<<<<<<<<<<
+ * _unhexbuf[(i + c'0')] = i
+ * for i from 10 <= i < 16: # abcdef => 10, 11, 12, 13, 14, 15, 16
+ */
+ for (__pyx_v_i = 0; __pyx_v_i < 10; __pyx_v_i++) {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":354
+ * _unhexbuf[i] = -1
+ * for i from 0 <= i < 10: # 0123456789 => map to the raw number
+ * _unhexbuf[(i + c'0')] = i # <<<<<<<<<<<<<<
+ * for i from 10 <= i < 16: # abcdef => 10, 11, 12, 13, 14, 15, 16
+ * _unhexbuf[(i - 10 + c'a')] = i
+ */
+ (__pyx_v_6bzrlib_21_btree_serializer_pyx__unhexbuf[(__pyx_v_i + '0')]) = __pyx_v_i;
+ }
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":355
+ * for i from 0 <= i < 10: # 0123456789 => map to the raw number
+ * _unhexbuf[(i + c'0')] = i
+ * for i from 10 <= i < 16: # abcdef => 10, 11, 12, 13, 14, 15, 16 # <<<<<<<<<<<<<<
+ * _unhexbuf[(i - 10 + c'a')] = i
+ * for i from 10 <= i < 16: # ABCDEF => 10, 11, 12, 13, 14, 15, 16
+ */
+ for (__pyx_v_i = 10; __pyx_v_i < 16; __pyx_v_i++) {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":356
+ * _unhexbuf[(i + c'0')] = i
+ * for i from 10 <= i < 16: # abcdef => 10, 11, 12, 13, 14, 15, 16
+ * _unhexbuf[(i - 10 + c'a')] = i # <<<<<<<<<<<<<<
+ * for i from 10 <= i < 16: # ABCDEF => 10, 11, 12, 13, 14, 15, 16
+ * _unhexbuf[(i - 10 + c'A')] = i
+ */
+ (__pyx_v_6bzrlib_21_btree_serializer_pyx__unhexbuf[((__pyx_v_i - 10) + 'a')]) = __pyx_v_i;
+ }
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":357
+ * for i from 10 <= i < 16: # abcdef => 10, 11, 12, 13, 14, 15, 16
+ * _unhexbuf[(i - 10 + c'a')] = i
+ * for i from 10 <= i < 16: # ABCDEF => 10, 11, 12, 13, 14, 15, 16 # <<<<<<<<<<<<<<
+ * _unhexbuf[(i - 10 + c'A')] = i
+ * _populate_unhexbuf()
+ */
+ for (__pyx_v_i = 10; __pyx_v_i < 16; __pyx_v_i++) {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":358
+ * _unhexbuf[(i - 10 + c'a')] = i
+ * for i from 10 <= i < 16: # ABCDEF => 10, 11, 12, 13, 14, 15, 16
+ * _unhexbuf[(i - 10 + c'A')] = i # <<<<<<<<<<<<<<
+ * _populate_unhexbuf()
+ *
+ */
+ (__pyx_v_6bzrlib_21_btree_serializer_pyx__unhexbuf[((__pyx_v_i - 10) + 'A')]) = __pyx_v_i;
+ }
+
+ __pyx_r = Py_None; __Pyx_INCREF(Py_None);
+ __Pyx_XGIVEREF(__pyx_r);
+ __Pyx_RefNannyFinishContext();
+ return __pyx_r;
+}
+
+/* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":362
+ *
+ *
+ * cdef int _unhexlify_sha1(char *as_hex, char *as_bin): # cannot_raise # <<<<<<<<<<<<<<
+ * """Take the hex sha1 in as_hex and make it binary in as_bin
+ *
+ */
+
+static int __pyx_f_6bzrlib_21_btree_serializer_pyx__unhexlify_sha1(char *__pyx_v_as_hex, char *__pyx_v_as_bin) {
+ int __pyx_v_top;
+ int __pyx_v_bot;
+ int __pyx_v_i;
+ int __pyx_v_j;
+ int __pyx_r;
+ int __pyx_t_1;
+ int __pyx_t_2;
+ int __pyx_t_3;
+ __Pyx_RefNannySetupContext("_unhexlify_sha1");
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":374
+ * # binascii does this using isupper() and tolower() and ?: syntax. I'm
+ * # guessing a simple lookup array should be faster.
+ * j = 0 # <<<<<<<<<<<<<<
+ * for i from 0 <= i < 20:
+ * top = _unhexbuf[<unsigned char>(as_hex[j])]
+ */
+ __pyx_v_j = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":375
+ * # guessing a simple lookup array should be faster.
+ * j = 0
+ * for i from 0 <= i < 20: # <<<<<<<<<<<<<<
+ * top = _unhexbuf[<unsigned char>(as_hex[j])]
+ * j = j + 1
+ */
+ for (__pyx_v_i = 0; __pyx_v_i < 20; __pyx_v_i++) {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":376
+ * j = 0
+ * for i from 0 <= i < 20:
+ * top = _unhexbuf[<unsigned char>(as_hex[j])] # <<<<<<<<<<<<<<
+ * j = j + 1
+ * bot = _unhexbuf[<unsigned char>(as_hex[j])]
+ */
+ __pyx_v_top = (__pyx_v_6bzrlib_21_btree_serializer_pyx__unhexbuf[((unsigned char)(__pyx_v_as_hex[__pyx_v_j]))]);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":377
+ * for i from 0 <= i < 20:
+ * top = _unhexbuf[<unsigned char>(as_hex[j])]
+ * j = j + 1 # <<<<<<<<<<<<<<
+ * bot = _unhexbuf[<unsigned char>(as_hex[j])]
+ * j = j + 1
+ */
+ __pyx_v_j = (__pyx_v_j + 1);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":378
+ * top = _unhexbuf[<unsigned char>(as_hex[j])]
+ * j = j + 1
+ * bot = _unhexbuf[<unsigned char>(as_hex[j])] # <<<<<<<<<<<<<<
+ * j = j + 1
+ * if top == -1 or bot == -1:
+ */
+ __pyx_v_bot = (__pyx_v_6bzrlib_21_btree_serializer_pyx__unhexbuf[((unsigned char)(__pyx_v_as_hex[__pyx_v_j]))]);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":379
+ * j = j + 1
+ * bot = _unhexbuf[<unsigned char>(as_hex[j])]
+ * j = j + 1 # <<<<<<<<<<<<<<
+ * if top == -1 or bot == -1:
+ * return 0
+ */
+ __pyx_v_j = (__pyx_v_j + 1);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":380
+ * bot = _unhexbuf[<unsigned char>(as_hex[j])]
+ * j = j + 1
+ * if top == -1 or bot == -1: # <<<<<<<<<<<<<<
+ * return 0
+ * as_bin[i] = <unsigned char>((top << 4) + bot);
+ */
+ __pyx_t_1 = (__pyx_v_top == -1);
+ if (!__pyx_t_1) {
+ __pyx_t_2 = (__pyx_v_bot == -1);
+ __pyx_t_3 = __pyx_t_2;
+ } else {
+ __pyx_t_3 = __pyx_t_1;
+ }
+ if (__pyx_t_3) {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":381
+ * j = j + 1
+ * if top == -1 or bot == -1:
+ * return 0 # <<<<<<<<<<<<<<
+ * as_bin[i] = <unsigned char>((top << 4) + bot);
+ * return 1
+ */
+ __pyx_r = 0;
+ goto __pyx_L0;
+ goto __pyx_L5;
+ }
+ __pyx_L5:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":382
+ * if top == -1 or bot == -1:
+ * return 0
+ * as_bin[i] = <unsigned char>((top << 4) + bot); # <<<<<<<<<<<<<<
+ * return 1
+ *
+ */
+ (__pyx_v_as_bin[__pyx_v_i]) = ((unsigned char)((__pyx_v_top << 4) + __pyx_v_bot));
+ }
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":383
+ * return 0
+ * as_bin[i] = <unsigned char>((top << 4) + bot);
+ * return 1 # <<<<<<<<<<<<<<
+ *
+ *
+ */
+ __pyx_r = 1;
+ goto __pyx_L0;
+
+ __pyx_r = 0;
+ __pyx_L0:;
+ __Pyx_RefNannyFinishContext();
+ return __pyx_r;
+}
+
+/* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":386
+ *
+ *
+ * def _py_unhexlify(as_hex): # <<<<<<<<<<<<<<
+ * """For the test infrastructure, just thunks to _unhexlify_sha1"""
+ * if len(as_hex) != 40 or not PyString_CheckExact(as_hex):
+ */
+
+static PyObject *__pyx_pf_6bzrlib_21_btree_serializer_pyx__py_unhexlify(PyObject *__pyx_self, PyObject *__pyx_v_as_hex); /*proto*/
+static char __pyx_doc_6bzrlib_21_btree_serializer_pyx__py_unhexlify[] = "For the test infrastructure, just thunks to _unhexlify_sha1";
+static PyObject *__pyx_pf_6bzrlib_21_btree_serializer_pyx__py_unhexlify(PyObject *__pyx_self, PyObject *__pyx_v_as_hex) {
+ PyObject *__pyx_v_as_bin;
+ PyObject *__pyx_r = NULL;
+ Py_ssize_t __pyx_t_1;
+ int __pyx_t_2;
+ int __pyx_t_3;
+ int __pyx_t_4;
+ PyObject *__pyx_t_5 = NULL;
+ PyObject *__pyx_t_6 = NULL;
+ int __pyx_t_7;
+ __Pyx_RefNannySetupContext("_py_unhexlify");
+ __pyx_self = __pyx_self;
+ __pyx_v_as_bin = Py_None; __Pyx_INCREF(Py_None);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":388
+ * def _py_unhexlify(as_hex):
+ * """For the test infrastructure, just thunks to _unhexlify_sha1"""
+ * if len(as_hex) != 40 or not PyString_CheckExact(as_hex): # <<<<<<<<<<<<<<
+ * raise ValueError('not a 40-byte hex digest')
+ * as_bin = PyString_FromStringAndSize(NULL, 20)
+ */
+ __pyx_t_1 = PyObject_Length(__pyx_v_as_hex); if (unlikely(__pyx_t_1 == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 388; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_2 = (__pyx_t_1 != 40);
+ if (!__pyx_t_2) {
+ __pyx_t_3 = (!PyString_CheckExact(__pyx_v_as_hex));
+ __pyx_t_4 = __pyx_t_3;
+ } else {
+ __pyx_t_4 = __pyx_t_2;
+ }
+ if (__pyx_t_4) {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":389
+ * """For the test infrastructure, just thunks to _unhexlify_sha1"""
+ * if len(as_hex) != 40 or not PyString_CheckExact(as_hex):
+ * raise ValueError('not a 40-byte hex digest') # <<<<<<<<<<<<<<
+ * as_bin = PyString_FromStringAndSize(NULL, 20)
+ * if _unhexlify_sha1(PyString_AS_STRING(as_hex), PyString_AS_STRING(as_bin)):
+ */
+ __pyx_t_5 = PyTuple_New(1); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 389; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_5);
+ __Pyx_INCREF(((PyObject *)__pyx_kp_s_12));
+ PyTuple_SET_ITEM(__pyx_t_5, 0, ((PyObject *)__pyx_kp_s_12));
+ __Pyx_GIVEREF(((PyObject *)__pyx_kp_s_12));
+ __pyx_t_6 = PyObject_Call(__pyx_builtin_ValueError, __pyx_t_5, NULL); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 389; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_6);
+ __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
+ __Pyx_Raise(__pyx_t_6, 0, 0);
+ __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 389; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ goto __pyx_L5;
+ }
+ __pyx_L5:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":390
+ * if len(as_hex) != 40 or not PyString_CheckExact(as_hex):
+ * raise ValueError('not a 40-byte hex digest')
+ * as_bin = PyString_FromStringAndSize(NULL, 20) # <<<<<<<<<<<<<<
+ * if _unhexlify_sha1(PyString_AS_STRING(as_hex), PyString_AS_STRING(as_bin)):
+ * return as_bin
+ */
+ __pyx_t_6 = PyString_FromStringAndSize(NULL, 20); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 390; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_6);
+ __Pyx_DECREF(__pyx_v_as_bin);
+ __pyx_v_as_bin = __pyx_t_6;
+ __pyx_t_6 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":391
+ * raise ValueError('not a 40-byte hex digest')
+ * as_bin = PyString_FromStringAndSize(NULL, 20)
+ * if _unhexlify_sha1(PyString_AS_STRING(as_hex), PyString_AS_STRING(as_bin)): # <<<<<<<<<<<<<<
+ * return as_bin
+ * return None
+ */
+ __pyx_t_7 = __pyx_f_6bzrlib_21_btree_serializer_pyx__unhexlify_sha1(PyString_AS_STRING(__pyx_v_as_hex), PyString_AS_STRING(__pyx_v_as_bin));
+ if (__pyx_t_7) {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":392
+ * as_bin = PyString_FromStringAndSize(NULL, 20)
+ * if _unhexlify_sha1(PyString_AS_STRING(as_hex), PyString_AS_STRING(as_bin)):
+ * return as_bin # <<<<<<<<<<<<<<
+ * return None
+ *
+ */
+ __Pyx_XDECREF(__pyx_r);
+ __Pyx_INCREF(__pyx_v_as_bin);
+ __pyx_r = __pyx_v_as_bin;
+ goto __pyx_L0;
+ goto __pyx_L6;
+ }
+ __pyx_L6:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":393
+ * if _unhexlify_sha1(PyString_AS_STRING(as_hex), PyString_AS_STRING(as_bin)):
+ * return as_bin
+ * return None # <<<<<<<<<<<<<<
+ *
+ *
+ */
+ __Pyx_XDECREF(__pyx_r);
+ __Pyx_INCREF(Py_None);
+ __pyx_r = Py_None;
+ goto __pyx_L0;
+
+ __pyx_r = Py_None; __Pyx_INCREF(Py_None);
+ goto __pyx_L0;
+ __pyx_L1_error:;
+ __Pyx_XDECREF(__pyx_t_5);
+ __Pyx_XDECREF(__pyx_t_6);
+ __Pyx_AddTraceback("bzrlib._btree_serializer_pyx._py_unhexlify");
+ __pyx_r = NULL;
+ __pyx_L0:;
+ __Pyx_DECREF(__pyx_v_as_bin);
+ __Pyx_XGIVEREF(__pyx_r);
+ __Pyx_RefNannyFinishContext();
+ return __pyx_r;
+}
+
+/* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":396
+ *
+ *
+ * cdef void _hexlify_sha1(char *as_bin, char *as_hex): # cannot_raise # <<<<<<<<<<<<<<
+ * cdef int i, j
+ * cdef char c
+ */
+
+static void __pyx_f_6bzrlib_21_btree_serializer_pyx__hexlify_sha1(char *__pyx_v_as_bin, char *__pyx_v_as_hex) {
+ int __pyx_v_i;
+ int __pyx_v_j;
+ char __pyx_v_c;
+ __Pyx_RefNannySetupContext("_hexlify_sha1");
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":400
+ * cdef char c
+ *
+ * j = 0 # <<<<<<<<<<<<<<
+ * for i from 0 <= i < 20:
+ * c = as_bin[i]
+ */
+ __pyx_v_j = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":401
+ *
+ * j = 0
+ * for i from 0 <= i < 20: # <<<<<<<<<<<<<<
+ * c = as_bin[i]
+ * as_hex[j] = _hexbuf[(c>>4)&0xf]
+ */
+ for (__pyx_v_i = 0; __pyx_v_i < 20; __pyx_v_i++) {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":402
+ * j = 0
+ * for i from 0 <= i < 20:
+ * c = as_bin[i] # <<<<<<<<<<<<<<
+ * as_hex[j] = _hexbuf[(c>>4)&0xf]
+ * j = j + 1
+ */
+ __pyx_v_c = (__pyx_v_as_bin[__pyx_v_i]);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":403
+ * for i from 0 <= i < 20:
+ * c = as_bin[i]
+ * as_hex[j] = _hexbuf[(c>>4)&0xf] # <<<<<<<<<<<<<<
+ * j = j + 1
+ * as_hex[j] = _hexbuf[(c)&0xf]
+ */
+ (__pyx_v_as_hex[__pyx_v_j]) = (__pyx_v_6bzrlib_21_btree_serializer_pyx__hexbuf[((__pyx_v_c >> 4) & 0xf)]);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":404
+ * c = as_bin[i]
+ * as_hex[j] = _hexbuf[(c>>4)&0xf]
+ * j = j + 1 # <<<<<<<<<<<<<<
+ * as_hex[j] = _hexbuf[(c)&0xf]
+ * j = j + 1
+ */
+ __pyx_v_j = (__pyx_v_j + 1);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":405
+ * as_hex[j] = _hexbuf[(c>>4)&0xf]
+ * j = j + 1
+ * as_hex[j] = _hexbuf[(c)&0xf] # <<<<<<<<<<<<<<
+ * j = j + 1
+ *
+ */
+ (__pyx_v_as_hex[__pyx_v_j]) = (__pyx_v_6bzrlib_21_btree_serializer_pyx__hexbuf[(__pyx_v_c & 0xf)]);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":406
+ * j = j + 1
+ * as_hex[j] = _hexbuf[(c)&0xf]
+ * j = j + 1 # <<<<<<<<<<<<<<
+ *
+ *
+ */
+ __pyx_v_j = (__pyx_v_j + 1);
+ }
+
+ __Pyx_RefNannyFinishContext();
+}
+
+/* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":409
+ *
+ *
+ * def _py_hexlify(as_bin): # <<<<<<<<<<<<<<
+ * """For test infrastructure, thunk to _hexlify_sha1"""
+ * if len(as_bin) != 20 or not PyString_CheckExact(as_bin):
+ */
+
+static PyObject *__pyx_pf_6bzrlib_21_btree_serializer_pyx__py_hexlify(PyObject *__pyx_self, PyObject *__pyx_v_as_bin); /*proto*/
+static char __pyx_doc_6bzrlib_21_btree_serializer_pyx__py_hexlify[] = "For test infrastructure, thunk to _hexlify_sha1";
+static PyObject *__pyx_pf_6bzrlib_21_btree_serializer_pyx__py_hexlify(PyObject *__pyx_self, PyObject *__pyx_v_as_bin) {
+ PyObject *__pyx_v_as_hex;
+ PyObject *__pyx_r = NULL;
+ Py_ssize_t __pyx_t_1;
+ int __pyx_t_2;
+ int __pyx_t_3;
+ int __pyx_t_4;
+ PyObject *__pyx_t_5 = NULL;
+ PyObject *__pyx_t_6 = NULL;
+ __Pyx_RefNannySetupContext("_py_hexlify");
+ __pyx_self = __pyx_self;
+ __pyx_v_as_hex = Py_None; __Pyx_INCREF(Py_None);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":411
+ * def _py_hexlify(as_bin):
+ * """For test infrastructure, thunk to _hexlify_sha1"""
+ * if len(as_bin) != 20 or not PyString_CheckExact(as_bin): # <<<<<<<<<<<<<<
+ * raise ValueError('not a 20-byte binary digest')
+ * as_hex = PyString_FromStringAndSize(NULL, 40)
+ */
+ __pyx_t_1 = PyObject_Length(__pyx_v_as_bin); if (unlikely(__pyx_t_1 == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 411; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_2 = (__pyx_t_1 != 20);
+ if (!__pyx_t_2) {
+ __pyx_t_3 = (!PyString_CheckExact(__pyx_v_as_bin));
+ __pyx_t_4 = __pyx_t_3;
+ } else {
+ __pyx_t_4 = __pyx_t_2;
+ }
+ if (__pyx_t_4) {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":412
+ * """For test infrastructure, thunk to _hexlify_sha1"""
+ * if len(as_bin) != 20 or not PyString_CheckExact(as_bin):
+ * raise ValueError('not a 20-byte binary digest') # <<<<<<<<<<<<<<
+ * as_hex = PyString_FromStringAndSize(NULL, 40)
+ * _hexlify_sha1(PyString_AS_STRING(as_bin), PyString_AS_STRING(as_hex))
+ */
+ __pyx_t_5 = PyTuple_New(1); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 412; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_5);
+ __Pyx_INCREF(((PyObject *)__pyx_kp_s_13));
+ PyTuple_SET_ITEM(__pyx_t_5, 0, ((PyObject *)__pyx_kp_s_13));
+ __Pyx_GIVEREF(((PyObject *)__pyx_kp_s_13));
+ __pyx_t_6 = PyObject_Call(__pyx_builtin_ValueError, __pyx_t_5, NULL); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 412; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_6);
+ __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
+ __Pyx_Raise(__pyx_t_6, 0, 0);
+ __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 412; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ goto __pyx_L5;
+ }
+ __pyx_L5:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":413
+ * if len(as_bin) != 20 or not PyString_CheckExact(as_bin):
+ * raise ValueError('not a 20-byte binary digest')
+ * as_hex = PyString_FromStringAndSize(NULL, 40) # <<<<<<<<<<<<<<
+ * _hexlify_sha1(PyString_AS_STRING(as_bin), PyString_AS_STRING(as_hex))
+ * return as_hex
+ */
+ __pyx_t_6 = PyString_FromStringAndSize(NULL, 40); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 413; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_6);
+ __Pyx_DECREF(__pyx_v_as_hex);
+ __pyx_v_as_hex = __pyx_t_6;
+ __pyx_t_6 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":414
+ * raise ValueError('not a 20-byte binary digest')
+ * as_hex = PyString_FromStringAndSize(NULL, 40)
+ * _hexlify_sha1(PyString_AS_STRING(as_bin), PyString_AS_STRING(as_hex)) # <<<<<<<<<<<<<<
+ * return as_hex
+ *
+ */
+ __pyx_f_6bzrlib_21_btree_serializer_pyx__hexlify_sha1(PyString_AS_STRING(__pyx_v_as_bin), PyString_AS_STRING(__pyx_v_as_hex));
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":415
+ * as_hex = PyString_FromStringAndSize(NULL, 40)
+ * _hexlify_sha1(PyString_AS_STRING(as_bin), PyString_AS_STRING(as_hex))
+ * return as_hex # <<<<<<<<<<<<<<
+ *
+ *
+ */
+ __Pyx_XDECREF(__pyx_r);
+ __Pyx_INCREF(__pyx_v_as_hex);
+ __pyx_r = __pyx_v_as_hex;
+ goto __pyx_L0;
+
+ __pyx_r = Py_None; __Pyx_INCREF(Py_None);
+ goto __pyx_L0;
+ __pyx_L1_error:;
+ __Pyx_XDECREF(__pyx_t_5);
+ __Pyx_XDECREF(__pyx_t_6);
+ __Pyx_AddTraceback("bzrlib._btree_serializer_pyx._py_hexlify");
+ __pyx_r = NULL;
+ __pyx_L0:;
+ __Pyx_DECREF(__pyx_v_as_hex);
+ __Pyx_XGIVEREF(__pyx_r);
+ __Pyx_RefNannyFinishContext();
+ return __pyx_r;
+}
+
+/* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":418
+ *
+ *
+ * cdef int _key_to_sha1(key, char *sha1): # cannot_raise # <<<<<<<<<<<<<<
+ * """Map a key into its sha1 content.
+ *
+ */
+
+static int __pyx_f_6bzrlib_21_btree_serializer_pyx__key_to_sha1(PyObject *__pyx_v_key, char *__pyx_v_sha1) {
+ char *__pyx_v_c_val;
+ PyObject *__pyx_v_p_val;
+ int __pyx_r;
+ int __pyx_t_1;
+ int __pyx_t_2;
+ __Pyx_RefNannySetupContext("_key_to_sha1");
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":428
+ * cdef PyObject *p_val
+ *
+ * if StaticTuple_CheckExact(key) and StaticTuple_GET_SIZE(key) == 1: # <<<<<<<<<<<<<<
+ * p_val = <PyObject *>StaticTuple_GET_ITEM(key, 0)
+ * elif (PyTuple_CheckExact(key) and PyTuple_GET_SIZE(key) == 1):
+ */
+ if (StaticTuple_CheckExact(__pyx_v_key)) {
+ if (!(likely(((__pyx_v_key) == Py_None) || likely(__Pyx_TypeTest(__pyx_v_key, __pyx_ptype_6bzrlib_15_static_tuple_c_StaticTuple))))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 428; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_1 = (StaticTuple_GET_SIZE(((StaticTuple *)__pyx_v_key)) == 1);
+ __pyx_t_2 = __pyx_t_1;
+ } else {
+ __pyx_t_2 = StaticTuple_CheckExact(__pyx_v_key);
+ }
+ if (__pyx_t_2) {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":429
+ *
+ * if StaticTuple_CheckExact(key) and StaticTuple_GET_SIZE(key) == 1:
+ * p_val = <PyObject *>StaticTuple_GET_ITEM(key, 0) # <<<<<<<<<<<<<<
+ * elif (PyTuple_CheckExact(key) and PyTuple_GET_SIZE(key) == 1):
+ * p_val = PyTuple_GET_ITEM_ptr_object(key, 0)
+ */
+ if (!(likely(((__pyx_v_key) == Py_None) || likely(__Pyx_TypeTest(__pyx_v_key, __pyx_ptype_6bzrlib_15_static_tuple_c_StaticTuple))))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 429; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_v_p_val = ((PyObject *)StaticTuple_GET_ITEM(((StaticTuple *)__pyx_v_key), 0));
+ goto __pyx_L3;
+ }
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":430
+ * if StaticTuple_CheckExact(key) and StaticTuple_GET_SIZE(key) == 1:
+ * p_val = <PyObject *>StaticTuple_GET_ITEM(key, 0)
+ * elif (PyTuple_CheckExact(key) and PyTuple_GET_SIZE(key) == 1): # <<<<<<<<<<<<<<
+ * p_val = PyTuple_GET_ITEM_ptr_object(key, 0)
+ * else:
+ */
+ if (PyTuple_CheckExact(__pyx_v_key)) {
+ __pyx_t_2 = (PyTuple_GET_SIZE(__pyx_v_key) == 1);
+ __pyx_t_1 = __pyx_t_2;
+ } else {
+ __pyx_t_1 = PyTuple_CheckExact(__pyx_v_key);
+ }
+ if (__pyx_t_1) {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":431
+ * p_val = <PyObject *>StaticTuple_GET_ITEM(key, 0)
+ * elif (PyTuple_CheckExact(key) and PyTuple_GET_SIZE(key) == 1):
+ * p_val = PyTuple_GET_ITEM_ptr_object(key, 0) # <<<<<<<<<<<<<<
+ * else:
+ * # Not a tuple or a StaticTuple
+ */
+ __pyx_v_p_val = PyTuple_GET_ITEM(__pyx_v_key, 0);
+ goto __pyx_L3;
+ }
+ /*else*/ {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":434
+ * else:
+ * # Not a tuple or a StaticTuple
+ * return 0 # <<<<<<<<<<<<<<
+ * if (PyString_CheckExact_ptr(p_val) and PyString_GET_SIZE_ptr(p_val) == 45):
+ * c_val = PyString_AS_STRING_ptr(p_val)
+ */
+ __pyx_r = 0;
+ goto __pyx_L0;
+ }
+ __pyx_L3:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":435
+ * # Not a tuple or a StaticTuple
+ * return 0
+ * if (PyString_CheckExact_ptr(p_val) and PyString_GET_SIZE_ptr(p_val) == 45): # <<<<<<<<<<<<<<
+ * c_val = PyString_AS_STRING_ptr(p_val)
+ * else:
+ */
+ if (PyString_CheckExact(__pyx_v_p_val)) {
+ __pyx_t_1 = (PyString_GET_SIZE(__pyx_v_p_val) == 45);
+ __pyx_t_2 = __pyx_t_1;
+ } else {
+ __pyx_t_2 = PyString_CheckExact(__pyx_v_p_val);
+ }
+ if (__pyx_t_2) {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":436
+ * return 0
+ * if (PyString_CheckExact_ptr(p_val) and PyString_GET_SIZE_ptr(p_val) == 45):
+ * c_val = PyString_AS_STRING_ptr(p_val) # <<<<<<<<<<<<<<
+ * else:
+ * return 0
+ */
+ __pyx_v_c_val = PyString_AS_STRING(__pyx_v_p_val);
+ goto __pyx_L4;
+ }
+ /*else*/ {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":438
+ * c_val = PyString_AS_STRING_ptr(p_val)
+ * else:
+ * return 0 # <<<<<<<<<<<<<<
+ * if strncmp(c_val, 'sha1:', 5) != 0:
+ * return 0
+ */
+ __pyx_r = 0;
+ goto __pyx_L0;
+ }
+ __pyx_L4:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":439
+ * else:
+ * return 0
+ * if strncmp(c_val, 'sha1:', 5) != 0: # <<<<<<<<<<<<<<
+ * return 0
+ * if not _unhexlify_sha1(c_val + 5, sha1):
+ */
+ __pyx_t_2 = (strncmp(__pyx_v_c_val, __pyx_k_3, 5) != 0);
+ if (__pyx_t_2) {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":440
+ * return 0
+ * if strncmp(c_val, 'sha1:', 5) != 0:
+ * return 0 # <<<<<<<<<<<<<<
+ * if not _unhexlify_sha1(c_val + 5, sha1):
+ * return 0
+ */
+ __pyx_r = 0;
+ goto __pyx_L0;
+ goto __pyx_L5;
+ }
+ __pyx_L5:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":441
+ * if strncmp(c_val, 'sha1:', 5) != 0:
+ * return 0
+ * if not _unhexlify_sha1(c_val + 5, sha1): # <<<<<<<<<<<<<<
+ * return 0
+ * return 1
+ */
+ __pyx_t_2 = (!__pyx_f_6bzrlib_21_btree_serializer_pyx__unhexlify_sha1((__pyx_v_c_val + 5), __pyx_v_sha1));
+ if (__pyx_t_2) {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":442
+ * return 0
+ * if not _unhexlify_sha1(c_val + 5, sha1):
+ * return 0 # <<<<<<<<<<<<<<
+ * return 1
+ *
+ */
+ __pyx_r = 0;
+ goto __pyx_L0;
+ goto __pyx_L6;
+ }
+ __pyx_L6:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":443
+ * if not _unhexlify_sha1(c_val + 5, sha1):
+ * return 0
+ * return 1 # <<<<<<<<<<<<<<
+ *
+ *
+ */
+ __pyx_r = 1;
+ goto __pyx_L0;
+
+ __pyx_r = 0;
+ goto __pyx_L0;
+ __pyx_L1_error:;
+ __Pyx_WriteUnraisable("bzrlib._btree_serializer_pyx._key_to_sha1");
+ __pyx_r = 0;
+ __pyx_L0:;
+ __Pyx_RefNannyFinishContext();
+ return __pyx_r;
+}
+
+/* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":446
+ *
+ *
+ * def _py_key_to_sha1(key): # <<<<<<<<<<<<<<
+ * """Map a key to a simple sha1 string.
+ *
+ */
+
+static PyObject *__pyx_pf_6bzrlib_21_btree_serializer_pyx__py_key_to_sha1(PyObject *__pyx_self, PyObject *__pyx_v_key); /*proto*/
+static char __pyx_doc_6bzrlib_21_btree_serializer_pyx__py_key_to_sha1[] = "Map a key to a simple sha1 string.\n\n This is a testing thunk to the C function.\n ";
+static PyObject *__pyx_pf_6bzrlib_21_btree_serializer_pyx__py_key_to_sha1(PyObject *__pyx_self, PyObject *__pyx_v_key) {
+ PyObject *__pyx_v_as_bin_sha;
+ PyObject *__pyx_r = NULL;
+ PyObject *__pyx_t_1 = NULL;
+ int __pyx_t_2;
+ __Pyx_RefNannySetupContext("_py_key_to_sha1");
+ __pyx_self = __pyx_self;
+ __pyx_v_as_bin_sha = Py_None; __Pyx_INCREF(Py_None);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":451
+ * This is a testing thunk to the C function.
+ * """
+ * as_bin_sha = PyString_FromStringAndSize(NULL, 20) # <<<<<<<<<<<<<<
+ * if _key_to_sha1(key, PyString_AS_STRING(as_bin_sha)):
+ * return as_bin_sha
+ */
+ __pyx_t_1 = PyString_FromStringAndSize(NULL, 20); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 451; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_1);
+ __Pyx_DECREF(__pyx_v_as_bin_sha);
+ __pyx_v_as_bin_sha = __pyx_t_1;
+ __pyx_t_1 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":452
+ * """
+ * as_bin_sha = PyString_FromStringAndSize(NULL, 20)
+ * if _key_to_sha1(key, PyString_AS_STRING(as_bin_sha)): # <<<<<<<<<<<<<<
+ * return as_bin_sha
+ * return None
+ */
+ __pyx_t_2 = __pyx_f_6bzrlib_21_btree_serializer_pyx__key_to_sha1(__pyx_v_key, PyString_AS_STRING(__pyx_v_as_bin_sha));
+ if (__pyx_t_2) {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":453
+ * as_bin_sha = PyString_FromStringAndSize(NULL, 20)
+ * if _key_to_sha1(key, PyString_AS_STRING(as_bin_sha)):
+ * return as_bin_sha # <<<<<<<<<<<<<<
+ * return None
+ *
+ */
+ __Pyx_XDECREF(__pyx_r);
+ __Pyx_INCREF(__pyx_v_as_bin_sha);
+ __pyx_r = __pyx_v_as_bin_sha;
+ goto __pyx_L0;
+ goto __pyx_L5;
+ }
+ __pyx_L5:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":454
+ * if _key_to_sha1(key, PyString_AS_STRING(as_bin_sha)):
+ * return as_bin_sha
+ * return None # <<<<<<<<<<<<<<
+ *
+ *
+ */
+ __Pyx_XDECREF(__pyx_r);
+ __Pyx_INCREF(Py_None);
+ __pyx_r = Py_None;
+ goto __pyx_L0;
+
+ __pyx_r = Py_None; __Pyx_INCREF(Py_None);
+ goto __pyx_L0;
+ __pyx_L1_error:;
+ __Pyx_XDECREF(__pyx_t_1);
+ __Pyx_AddTraceback("bzrlib._btree_serializer_pyx._py_key_to_sha1");
+ __pyx_r = NULL;
+ __pyx_L0:;
+ __Pyx_DECREF(__pyx_v_as_bin_sha);
+ __Pyx_XGIVEREF(__pyx_r);
+ __Pyx_RefNannyFinishContext();
+ return __pyx_r;
+}
+
+/* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":457
+ *
+ *
+ * cdef StaticTuple _sha1_to_key(char *sha1): # <<<<<<<<<<<<<<
+ * """Compute a ('sha1:abcd',) key for a given sha1."""
+ * cdef StaticTuple key
+ */
+
+static StaticTuple *__pyx_f_6bzrlib_21_btree_serializer_pyx__sha1_to_key(char *__pyx_v_sha1) {
+ StaticTuple *__pyx_v_key;
+ PyObject *__pyx_v_hexxed;
+ char *__pyx_v_c_buf;
+ StaticTuple *__pyx_r = NULL;
+ PyObject *__pyx_t_1 = NULL;
+ __Pyx_RefNannySetupContext("_sha1_to_key");
+ __pyx_v_key = ((StaticTuple *)Py_None); __Pyx_INCREF(Py_None);
+ __pyx_v_hexxed = Py_None; __Pyx_INCREF(Py_None);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":462
+ * cdef object hexxed
+ * cdef char *c_buf
+ * hexxed = PyString_FromStringAndSize(NULL, 45) # <<<<<<<<<<<<<<
+ * c_buf = PyString_AS_STRING(hexxed)
+ * memcpy(c_buf, 'sha1:', 5)
+ */
+ __pyx_t_1 = PyString_FromStringAndSize(NULL, 45); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 462; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_1);
+ __Pyx_DECREF(__pyx_v_hexxed);
+ __pyx_v_hexxed = __pyx_t_1;
+ __pyx_t_1 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":463
+ * cdef char *c_buf
+ * hexxed = PyString_FromStringAndSize(NULL, 45)
+ * c_buf = PyString_AS_STRING(hexxed) # <<<<<<<<<<<<<<
+ * memcpy(c_buf, 'sha1:', 5)
+ * _hexlify_sha1(sha1, c_buf+5)
+ */
+ __pyx_v_c_buf = PyString_AS_STRING(__pyx_v_hexxed);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":464
+ * hexxed = PyString_FromStringAndSize(NULL, 45)
+ * c_buf = PyString_AS_STRING(hexxed)
+ * memcpy(c_buf, 'sha1:', 5) # <<<<<<<<<<<<<<
+ * _hexlify_sha1(sha1, c_buf+5)
+ * key = StaticTuple_New(1)
+ */
+ memcpy(__pyx_v_c_buf, __pyx_k_3, 5);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":465
+ * c_buf = PyString_AS_STRING(hexxed)
+ * memcpy(c_buf, 'sha1:', 5)
+ * _hexlify_sha1(sha1, c_buf+5) # <<<<<<<<<<<<<<
+ * key = StaticTuple_New(1)
+ * Py_INCREF(hexxed)
+ */
+ __pyx_f_6bzrlib_21_btree_serializer_pyx__hexlify_sha1(__pyx_v_sha1, (__pyx_v_c_buf + 5));
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":466
+ * memcpy(c_buf, 'sha1:', 5)
+ * _hexlify_sha1(sha1, c_buf+5)
+ * key = StaticTuple_New(1) # <<<<<<<<<<<<<<
+ * Py_INCREF(hexxed)
+ * StaticTuple_SET_ITEM(key, 0, hexxed)
+ */
+ __pyx_t_1 = ((PyObject *)StaticTuple_New(1)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 466; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_1);
+ __Pyx_DECREF(((PyObject *)__pyx_v_key));
+ __pyx_v_key = ((StaticTuple *)__pyx_t_1);
+ __pyx_t_1 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":467
+ * _hexlify_sha1(sha1, c_buf+5)
+ * key = StaticTuple_New(1)
+ * Py_INCREF(hexxed) # <<<<<<<<<<<<<<
+ * StaticTuple_SET_ITEM(key, 0, hexxed)
+ * # This is a bit expensive. To parse 120 keys takes 48us, to return them all
+ */
+ Py_INCREF(__pyx_v_hexxed);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":468
+ * key = StaticTuple_New(1)
+ * Py_INCREF(hexxed)
+ * StaticTuple_SET_ITEM(key, 0, hexxed) # <<<<<<<<<<<<<<
+ * # This is a bit expensive. To parse 120 keys takes 48us, to return them all
+ * # can be done in 66.6us (so 18.6us to build them all).
+ */
+ StaticTuple_SET_ITEM(__pyx_v_key, 0, __pyx_v_hexxed);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":478
+ * # We *could* hang the PyObject form off of the gc_chk_sha1_record for ones
+ * # that we have deserialized. Something to think about, at least.
+ * key = StaticTuple_Intern(key) # <<<<<<<<<<<<<<
+ * return key
+ *
+ */
+ __pyx_t_1 = ((PyObject *)StaticTuple_Intern(__pyx_v_key)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 478; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_1);
+ __Pyx_DECREF(((PyObject *)__pyx_v_key));
+ __pyx_v_key = ((StaticTuple *)__pyx_t_1);
+ __pyx_t_1 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":479
+ * # that we have deserialized. Something to think about, at least.
+ * key = StaticTuple_Intern(key)
+ * return key # <<<<<<<<<<<<<<
+ *
+ *
+ */
+ __Pyx_XDECREF(((PyObject *)__pyx_r));
+ __Pyx_INCREF(((PyObject *)__pyx_v_key));
+ __pyx_r = __pyx_v_key;
+ goto __pyx_L0;
+
+ __pyx_r = ((StaticTuple *)Py_None); __Pyx_INCREF(Py_None);
+ goto __pyx_L0;
+ __pyx_L1_error:;
+ __Pyx_XDECREF(__pyx_t_1);
+ __Pyx_AddTraceback("bzrlib._btree_serializer_pyx._sha1_to_key");
+ __pyx_r = 0;
+ __pyx_L0:;
+ __Pyx_DECREF((PyObject *)__pyx_v_key);
+ __Pyx_DECREF(__pyx_v_hexxed);
+ __Pyx_XGIVEREF((PyObject *)__pyx_r);
+ __Pyx_RefNannyFinishContext();
+ return __pyx_r;
+}
+
+/* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":482
+ *
+ *
+ * def _py_sha1_to_key(sha1_bin): # <<<<<<<<<<<<<<
+ * """Test thunk to check the sha1 mapping."""
+ * if not PyString_CheckExact(sha1_bin) or PyString_GET_SIZE(sha1_bin) != 20:
+ */
+
+static PyObject *__pyx_pf_6bzrlib_21_btree_serializer_pyx__py_sha1_to_key(PyObject *__pyx_self, PyObject *__pyx_v_sha1_bin); /*proto*/
+static char __pyx_doc_6bzrlib_21_btree_serializer_pyx__py_sha1_to_key[] = "Test thunk to check the sha1 mapping.";
+static PyObject *__pyx_pf_6bzrlib_21_btree_serializer_pyx__py_sha1_to_key(PyObject *__pyx_self, PyObject *__pyx_v_sha1_bin) {
+ PyObject *__pyx_r = NULL;
+ int __pyx_t_1;
+ int __pyx_t_2;
+ int __pyx_t_3;
+ PyObject *__pyx_t_4 = NULL;
+ PyObject *__pyx_t_5 = NULL;
+ __Pyx_RefNannySetupContext("_py_sha1_to_key");
+ __pyx_self = __pyx_self;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":484
+ * def _py_sha1_to_key(sha1_bin):
+ * """Test thunk to check the sha1 mapping."""
+ * if not PyString_CheckExact(sha1_bin) or PyString_GET_SIZE(sha1_bin) != 20: # <<<<<<<<<<<<<<
+ * raise ValueError('sha1_bin must be a str of exactly 20 bytes')
+ * return _sha1_to_key(PyString_AS_STRING(sha1_bin))
+ */
+ __pyx_t_1 = (!PyString_CheckExact(__pyx_v_sha1_bin));
+ if (!__pyx_t_1) {
+ __pyx_t_2 = (PyString_GET_SIZE(__pyx_v_sha1_bin) != 20);
+ __pyx_t_3 = __pyx_t_2;
+ } else {
+ __pyx_t_3 = __pyx_t_1;
+ }
+ if (__pyx_t_3) {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":485
+ * """Test thunk to check the sha1 mapping."""
+ * if not PyString_CheckExact(sha1_bin) or PyString_GET_SIZE(sha1_bin) != 20:
+ * raise ValueError('sha1_bin must be a str of exactly 20 bytes') # <<<<<<<<<<<<<<
+ * return _sha1_to_key(PyString_AS_STRING(sha1_bin))
+ *
+ */
+ __pyx_t_4 = PyTuple_New(1); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 485; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_4);
+ __Pyx_INCREF(((PyObject *)__pyx_kp_s_14));
+ PyTuple_SET_ITEM(__pyx_t_4, 0, ((PyObject *)__pyx_kp_s_14));
+ __Pyx_GIVEREF(((PyObject *)__pyx_kp_s_14));
+ __pyx_t_5 = PyObject_Call(__pyx_builtin_ValueError, __pyx_t_4, NULL); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 485; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_5);
+ __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
+ __Pyx_Raise(__pyx_t_5, 0, 0);
+ __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 485; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ goto __pyx_L5;
+ }
+ __pyx_L5:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":486
+ * if not PyString_CheckExact(sha1_bin) or PyString_GET_SIZE(sha1_bin) != 20:
+ * raise ValueError('sha1_bin must be a str of exactly 20 bytes')
+ * return _sha1_to_key(PyString_AS_STRING(sha1_bin)) # <<<<<<<<<<<<<<
+ *
+ *
+ */
+ __Pyx_XDECREF(__pyx_r);
+ __pyx_t_5 = ((PyObject *)__pyx_f_6bzrlib_21_btree_serializer_pyx__sha1_to_key(PyString_AS_STRING(__pyx_v_sha1_bin))); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 486; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_5);
+ __pyx_r = __pyx_t_5;
+ __pyx_t_5 = 0;
+ goto __pyx_L0;
+
+ __pyx_r = Py_None; __Pyx_INCREF(Py_None);
+ goto __pyx_L0;
+ __pyx_L1_error:;
+ __Pyx_XDECREF(__pyx_t_4);
+ __Pyx_XDECREF(__pyx_t_5);
+ __Pyx_AddTraceback("bzrlib._btree_serializer_pyx._py_sha1_to_key");
+ __pyx_r = NULL;
+ __pyx_L0:;
+ __Pyx_XGIVEREF(__pyx_r);
+ __Pyx_RefNannyFinishContext();
+ return __pyx_r;
+}
+
+/* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":489
+ *
+ *
+ * cdef unsigned int _sha1_to_uint(char *sha1): # cannot_raise # <<<<<<<<<<<<<<
+ * cdef unsigned int val
+ * # Must be in MSB, because that is how the content is sorted
+ */
+
+static unsigned int __pyx_f_6bzrlib_21_btree_serializer_pyx__sha1_to_uint(char *__pyx_v_sha1) {
+ unsigned int __pyx_v_val;
+ unsigned int __pyx_r;
+ __Pyx_RefNannySetupContext("_sha1_to_uint");
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":495
+ * | ((<unsigned int>(sha1[1]) & 0xff) << 16)
+ * | ((<unsigned int>(sha1[2]) & 0xff) << 8)
+ * | ((<unsigned int>(sha1[3]) & 0xff) << 0)) # <<<<<<<<<<<<<<
+ * return val
+ *
+ */
+ __pyx_v_val = (((((((unsigned int)(__pyx_v_sha1[0])) & 0xff) << 24) | ((((unsigned int)(__pyx_v_sha1[1])) & 0xff) << 16)) | ((((unsigned int)(__pyx_v_sha1[2])) & 0xff) << 8)) | ((((unsigned int)(__pyx_v_sha1[3])) & 0xff) << 0));
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":496
+ * | ((<unsigned int>(sha1[2]) & 0xff) << 8)
+ * | ((<unsigned int>(sha1[3]) & 0xff) << 0))
+ * return val # <<<<<<<<<<<<<<
+ *
+ *
+ */
+ __pyx_r = __pyx_v_val;
+ goto __pyx_L0;
+
+ __pyx_r = 0;
+ __pyx_L0:;
+ __Pyx_RefNannyFinishContext();
+ return __pyx_r;
+}
+
+/* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":499
+ *
+ *
+ * cdef _format_record(gc_chk_sha1_record *record): # <<<<<<<<<<<<<<
+ * # This is inefficient to go from a logical state back to a
+ * # string, but it makes things work a bit better internally for now.
+ */
+
+static PyObject *__pyx_f_6bzrlib_21_btree_serializer_pyx__format_record(__pyx_t_6bzrlib_21_btree_serializer_pyx_gc_chk_sha1_record *__pyx_v_record) {
+ PyObject *__pyx_v_block_offset_str;
+ PyObject *__pyx_v_value;
+ PyObject *__pyx_r = NULL;
+ int __pyx_t_1;
+ PyObject *__pyx_t_2 = NULL;
+ PyObject *__pyx_t_3 = NULL;
+ __Pyx_RefNannySetupContext("_format_record");
+ __pyx_v_block_offset_str = Py_None; __Pyx_INCREF(Py_None);
+ __pyx_v_value = Py_None; __Pyx_INCREF(Py_None);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":502
+ * # This is inefficient to go from a logical state back to a
+ * # string, but it makes things work a bit better internally for now.
+ * if record.block_offset >= 0xFFFFFFFF: # <<<<<<<<<<<<<<
+ * # %llu is what we really want, but unfortunately it was only added
+ * # in python 2.7... :(
+ */
+ __pyx_t_1 = (__pyx_v_record->block_offset >= 0xFFFFFFFF);
+ if (__pyx_t_1) {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":505
+ * # %llu is what we really want, but unfortunately it was only added
+ * # in python 2.7... :(
+ * block_offset_str = str(record.block_offset) # <<<<<<<<<<<<<<
+ * value = PyString_FromFormat('%s %u %u %u',
+ * PyString_AS_STRING(block_offset_str),
+ */
+ __pyx_t_2 = PyLong_FromLongLong(__pyx_v_record->block_offset); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 505; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_2);
+ __pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 505; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_3);
+ PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_2);
+ __Pyx_GIVEREF(__pyx_t_2);
+ __pyx_t_2 = 0;
+ __pyx_t_2 = PyObject_Call(((PyObject *)((PyObject*)&PyString_Type)), __pyx_t_3, NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 505; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_2);
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+ __Pyx_DECREF(__pyx_v_block_offset_str);
+ __pyx_v_block_offset_str = __pyx_t_2;
+ __pyx_t_2 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":509
+ * PyString_AS_STRING(block_offset_str),
+ * record.block_length,
+ * record.record_start, record.record_end) # <<<<<<<<<<<<<<
+ * else:
+ * value = PyString_FromFormat('%lu %u %u %u',
+ */
+ __pyx_t_2 = PyString_FromFormat(__pyx_k_15, PyString_AS_STRING(__pyx_v_block_offset_str), __pyx_v_record->block_length, __pyx_v_record->record_start, __pyx_v_record->record_end); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 506; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_2);
+ __Pyx_DECREF(__pyx_v_value);
+ __pyx_v_value = __pyx_t_2;
+ __pyx_t_2 = 0;
+ goto __pyx_L3;
+ }
+ /*else*/ {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":514
+ * <unsigned long>record.block_offset,
+ * record.block_length,
+ * record.record_start, record.record_end) # <<<<<<<<<<<<<<
+ * return value
+ *
+ */
+ __pyx_t_2 = PyString_FromFormat(__pyx_k_16, ((unsigned long)__pyx_v_record->block_offset), __pyx_v_record->block_length, __pyx_v_record->record_start, __pyx_v_record->record_end); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 511; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_2);
+ __Pyx_DECREF(__pyx_v_value);
+ __pyx_v_value = __pyx_t_2;
+ __pyx_t_2 = 0;
+ }
+ __pyx_L3:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":515
+ * record.block_length,
+ * record.record_start, record.record_end)
+ * return value # <<<<<<<<<<<<<<
+ *
+ *
+ */
+ __Pyx_XDECREF(__pyx_r);
+ __Pyx_INCREF(__pyx_v_value);
+ __pyx_r = __pyx_v_value;
+ goto __pyx_L0;
+
+ __pyx_r = Py_None; __Pyx_INCREF(Py_None);
+ goto __pyx_L0;
+ __pyx_L1_error:;
+ __Pyx_XDECREF(__pyx_t_2);
+ __Pyx_XDECREF(__pyx_t_3);
+ __Pyx_AddTraceback("bzrlib._btree_serializer_pyx._format_record");
+ __pyx_r = 0;
+ __pyx_L0:;
+ __Pyx_DECREF(__pyx_v_block_offset_str);
+ __Pyx_DECREF(__pyx_v_value);
+ __Pyx_XGIVEREF(__pyx_r);
+ __Pyx_RefNannyFinishContext();
+ return __pyx_r;
+}
+
+/* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":522
+ *
+ * cdef gc_chk_sha1_record *records
+ * cdef public object last_key # <<<<<<<<<<<<<<
+ * cdef gc_chk_sha1_record *last_record
+ * cdef public int num_records
+ */
+
+static PyObject *__pyx_pf_6bzrlib_21_btree_serializer_pyx_17GCCHKSHA1LeafNode_8last_key___get__(PyObject *__pyx_v_self); /*proto*/
+static PyObject *__pyx_pf_6bzrlib_21_btree_serializer_pyx_17GCCHKSHA1LeafNode_8last_key___get__(PyObject *__pyx_v_self) {
+ PyObject *__pyx_r = NULL;
+ __Pyx_RefNannySetupContext("__get__");
+ __Pyx_XDECREF(__pyx_r);
+ __Pyx_INCREF(((struct __pyx_obj_6bzrlib_21_btree_serializer_pyx_GCCHKSHA1LeafNode *)__pyx_v_self)->last_key);
+ __pyx_r = ((struct __pyx_obj_6bzrlib_21_btree_serializer_pyx_GCCHKSHA1LeafNode *)__pyx_v_self)->last_key;
+ goto __pyx_L0;
+
+ __pyx_r = Py_None; __Pyx_INCREF(Py_None);
+ __pyx_L0:;
+ __Pyx_XGIVEREF(__pyx_r);
+ __Pyx_RefNannyFinishContext();
+ return __pyx_r;
+}
+
+static int __pyx_pf_6bzrlib_21_btree_serializer_pyx_17GCCHKSHA1LeafNode_8last_key___set__(PyObject *__pyx_v_self, PyObject *__pyx_v_value); /*proto*/
+static int __pyx_pf_6bzrlib_21_btree_serializer_pyx_17GCCHKSHA1LeafNode_8last_key___set__(PyObject *__pyx_v_self, PyObject *__pyx_v_value) {
+ int __pyx_r;
+ __Pyx_RefNannySetupContext("__set__");
+ __Pyx_INCREF(__pyx_v_value);
+ __Pyx_GIVEREF(__pyx_v_value);
+ __Pyx_GOTREF(((struct __pyx_obj_6bzrlib_21_btree_serializer_pyx_GCCHKSHA1LeafNode *)__pyx_v_self)->last_key);
+ __Pyx_DECREF(((struct __pyx_obj_6bzrlib_21_btree_serializer_pyx_GCCHKSHA1LeafNode *)__pyx_v_self)->last_key);
+ ((struct __pyx_obj_6bzrlib_21_btree_serializer_pyx_GCCHKSHA1LeafNode *)__pyx_v_self)->last_key = __pyx_v_value;
+
+ __pyx_r = 0;
+ __Pyx_RefNannyFinishContext();
+ return __pyx_r;
+}
+
+static int __pyx_pf_6bzrlib_21_btree_serializer_pyx_17GCCHKSHA1LeafNode_8last_key___del__(PyObject *__pyx_v_self); /*proto*/
+static int __pyx_pf_6bzrlib_21_btree_serializer_pyx_17GCCHKSHA1LeafNode_8last_key___del__(PyObject *__pyx_v_self) {
+ int __pyx_r;
+ __Pyx_RefNannySetupContext("__del__");
+ __Pyx_INCREF(Py_None);
+ __Pyx_GIVEREF(Py_None);
+ __Pyx_GOTREF(((struct __pyx_obj_6bzrlib_21_btree_serializer_pyx_GCCHKSHA1LeafNode *)__pyx_v_self)->last_key);
+ __Pyx_DECREF(((struct __pyx_obj_6bzrlib_21_btree_serializer_pyx_GCCHKSHA1LeafNode *)__pyx_v_self)->last_key);
+ ((struct __pyx_obj_6bzrlib_21_btree_serializer_pyx_GCCHKSHA1LeafNode *)__pyx_v_self)->last_key = Py_None;
+
+ __pyx_r = 0;
+ __Pyx_RefNannyFinishContext();
+ return __pyx_r;
+}
+
+/* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":524
+ * cdef public object last_key
+ * cdef gc_chk_sha1_record *last_record
+ * cdef public int num_records # <<<<<<<<<<<<<<
+ * # This is the number of bits to shift to get to the interesting byte. A
+ * # value of 24 means that the very first byte changes across all keys.
+ */
+
+static PyObject *__pyx_pf_6bzrlib_21_btree_serializer_pyx_17GCCHKSHA1LeafNode_11num_records___get__(PyObject *__pyx_v_self); /*proto*/
+static PyObject *__pyx_pf_6bzrlib_21_btree_serializer_pyx_17GCCHKSHA1LeafNode_11num_records___get__(PyObject *__pyx_v_self) {
+ PyObject *__pyx_r = NULL;
+ PyObject *__pyx_t_1 = NULL;
+ __Pyx_RefNannySetupContext("__get__");
+ __Pyx_XDECREF(__pyx_r);
+ __pyx_t_1 = PyInt_FromLong(((struct __pyx_obj_6bzrlib_21_btree_serializer_pyx_GCCHKSHA1LeafNode *)__pyx_v_self)->num_records); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 524; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_1);
+ __pyx_r = __pyx_t_1;
+ __pyx_t_1 = 0;
+ goto __pyx_L0;
+
+ __pyx_r = Py_None; __Pyx_INCREF(Py_None);
+ goto __pyx_L0;
+ __pyx_L1_error:;
+ __Pyx_XDECREF(__pyx_t_1);
+ __Pyx_AddTraceback("bzrlib._btree_serializer_pyx.GCCHKSHA1LeafNode.num_records.__get__");
+ __pyx_r = NULL;
+ __pyx_L0:;
+ __Pyx_XGIVEREF(__pyx_r);
+ __Pyx_RefNannyFinishContext();
+ return __pyx_r;
+}
+
+static int __pyx_pf_6bzrlib_21_btree_serializer_pyx_17GCCHKSHA1LeafNode_11num_records___set__(PyObject *__pyx_v_self, PyObject *__pyx_v_value); /*proto*/
+static int __pyx_pf_6bzrlib_21_btree_serializer_pyx_17GCCHKSHA1LeafNode_11num_records___set__(PyObject *__pyx_v_self, PyObject *__pyx_v_value) {
+ int __pyx_r;
+ int __pyx_t_1;
+ __Pyx_RefNannySetupContext("__set__");
+ __pyx_t_1 = __Pyx_PyInt_AsInt(__pyx_v_value); if (unlikely((__pyx_t_1 == (int)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 524; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ ((struct __pyx_obj_6bzrlib_21_btree_serializer_pyx_GCCHKSHA1LeafNode *)__pyx_v_self)->num_records = __pyx_t_1;
+
+ __pyx_r = 0;
+ goto __pyx_L0;
+ __pyx_L1_error:;
+ __Pyx_AddTraceback("bzrlib._btree_serializer_pyx.GCCHKSHA1LeafNode.num_records.__set__");
+ __pyx_r = -1;
+ __pyx_L0:;
+ __Pyx_RefNannyFinishContext();
+ return __pyx_r;
+}
+
+/* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":530
+ * # ignore. 0 means that at least the first 3 bytes are identical, though
+ * # that is going to be very rare
+ * cdef public unsigned char common_shift # <<<<<<<<<<<<<<
+ * # This maps an interesting byte to the first record that matches.
+ * # Equivalent to bisect.bisect_left(self.records, sha1), though only taking
+ */
+
+static PyObject *__pyx_pf_6bzrlib_21_btree_serializer_pyx_17GCCHKSHA1LeafNode_12common_shift___get__(PyObject *__pyx_v_self); /*proto*/
+static PyObject *__pyx_pf_6bzrlib_21_btree_serializer_pyx_17GCCHKSHA1LeafNode_12common_shift___get__(PyObject *__pyx_v_self) {
+ PyObject *__pyx_r = NULL;
+ PyObject *__pyx_t_1 = NULL;
+ __Pyx_RefNannySetupContext("__get__");
+ __Pyx_XDECREF(__pyx_r);
+ __pyx_t_1 = PyInt_FromLong(((struct __pyx_obj_6bzrlib_21_btree_serializer_pyx_GCCHKSHA1LeafNode *)__pyx_v_self)->common_shift); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 530; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_1);
+ __pyx_r = __pyx_t_1;
+ __pyx_t_1 = 0;
+ goto __pyx_L0;
+
+ __pyx_r = Py_None; __Pyx_INCREF(Py_None);
+ goto __pyx_L0;
+ __pyx_L1_error:;
+ __Pyx_XDECREF(__pyx_t_1);
+ __Pyx_AddTraceback("bzrlib._btree_serializer_pyx.GCCHKSHA1LeafNode.common_shift.__get__");
+ __pyx_r = NULL;
+ __pyx_L0:;
+ __Pyx_XGIVEREF(__pyx_r);
+ __Pyx_RefNannyFinishContext();
+ return __pyx_r;
+}
+
+static int __pyx_pf_6bzrlib_21_btree_serializer_pyx_17GCCHKSHA1LeafNode_12common_shift___set__(PyObject *__pyx_v_self, PyObject *__pyx_v_value); /*proto*/
+static int __pyx_pf_6bzrlib_21_btree_serializer_pyx_17GCCHKSHA1LeafNode_12common_shift___set__(PyObject *__pyx_v_self, PyObject *__pyx_v_value) {
+ int __pyx_r;
+ unsigned char __pyx_t_1;
+ __Pyx_RefNannySetupContext("__set__");
+ __pyx_t_1 = __Pyx_PyInt_AsUnsignedChar(__pyx_v_value); if (unlikely((__pyx_t_1 == (unsigned char)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 530; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ ((struct __pyx_obj_6bzrlib_21_btree_serializer_pyx_GCCHKSHA1LeafNode *)__pyx_v_self)->common_shift = __pyx_t_1;
+
+ __pyx_r = 0;
+ goto __pyx_L0;
+ __pyx_L1_error:;
+ __Pyx_AddTraceback("bzrlib._btree_serializer_pyx.GCCHKSHA1LeafNode.common_shift.__set__");
+ __pyx_r = -1;
+ __pyx_L0:;
+ __Pyx_RefNannyFinishContext();
+ return __pyx_r;
+}
+
+/* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":536
+ * cdef unsigned char offsets[257]
+ *
+ * def __sizeof__(self): # <<<<<<<<<<<<<<
+ * # :( Why doesn't Pyrex let me do a simple sizeof(GCCHKSHA1LeafNode)
+ * # like Cython? Explicitly enumerating everything here seems to leave my
+ */
+
+static PyObject *__pyx_pf_6bzrlib_21_btree_serializer_pyx_17GCCHKSHA1LeafNode___sizeof__(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/
+static PyObject *__pyx_pf_6bzrlib_21_btree_serializer_pyx_17GCCHKSHA1LeafNode___sizeof__(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) {
+ PyObject *__pyx_r = NULL;
+ PyObject *__pyx_t_1 = NULL;
+ __Pyx_RefNannySetupContext("__sizeof__");
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":546
+ * # return (sizeof(GCCHKSHA1LeafNode)
+ * # + sizeof(gc_chk_sha1_record)*self.num_records)
+ * return (sizeof(PyObject) + sizeof(void*) + sizeof(int) # <<<<<<<<<<<<<<
+ * + sizeof(gc_chk_sha1_record*) + sizeof(PyObject *)
+ * + sizeof(gc_chk_sha1_record*) + sizeof(char)
+ */
+ __Pyx_XDECREF(__pyx_r);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":550
+ * + sizeof(gc_chk_sha1_record*) + sizeof(char)
+ * + sizeof(unsigned char)*257
+ * + sizeof(gc_chk_sha1_record)*self.num_records) # <<<<<<<<<<<<<<
+ *
+ * def __dealloc__(self):
+ */
+ __pyx_t_1 = __Pyx_PyInt_FromSize_t((((((((((sizeof(PyObject)) + (sizeof(void *))) + (sizeof(int))) + (sizeof(__pyx_t_6bzrlib_21_btree_serializer_pyx_gc_chk_sha1_record *))) + (sizeof(PyObject *))) + (sizeof(__pyx_t_6bzrlib_21_btree_serializer_pyx_gc_chk_sha1_record *))) + (sizeof(char))) + ((sizeof(unsigned char)) * 257)) + ((sizeof(__pyx_t_6bzrlib_21_btree_serializer_pyx_gc_chk_sha1_record)) * ((struct __pyx_obj_6bzrlib_21_btree_serializer_pyx_GCCHKSHA1LeafNode *)__pyx_v_self)->num_records))); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 550; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_1);
+ __pyx_r = __pyx_t_1;
+ __pyx_t_1 = 0;
+ goto __pyx_L0;
+
+ __pyx_r = Py_None; __Pyx_INCREF(Py_None);
+ goto __pyx_L0;
+ __pyx_L1_error:;
+ __Pyx_XDECREF(__pyx_t_1);
+ __Pyx_AddTraceback("bzrlib._btree_serializer_pyx.GCCHKSHA1LeafNode.__sizeof__");
+ __pyx_r = NULL;
+ __pyx_L0:;
+ __Pyx_XGIVEREF(__pyx_r);
+ __Pyx_RefNannyFinishContext();
+ return __pyx_r;
+}
+
+/* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":552
+ * + sizeof(gc_chk_sha1_record)*self.num_records)
+ *
+ * def __dealloc__(self): # <<<<<<<<<<<<<<
+ * if self.records != NULL:
+ * PyMem_Free(self.records)
+ */
+
+static void __pyx_pf_6bzrlib_21_btree_serializer_pyx_17GCCHKSHA1LeafNode___dealloc__(PyObject *__pyx_v_self); /*proto*/
+static void __pyx_pf_6bzrlib_21_btree_serializer_pyx_17GCCHKSHA1LeafNode___dealloc__(PyObject *__pyx_v_self) {
+ int __pyx_t_1;
+ __Pyx_RefNannySetupContext("__dealloc__");
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":553
+ *
+ * def __dealloc__(self):
+ * if self.records != NULL: # <<<<<<<<<<<<<<
+ * PyMem_Free(self.records)
+ * self.records = NULL
+ */
+ __pyx_t_1 = (((struct __pyx_obj_6bzrlib_21_btree_serializer_pyx_GCCHKSHA1LeafNode *)__pyx_v_self)->records != NULL);
+ if (__pyx_t_1) {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":554
+ * def __dealloc__(self):
+ * if self.records != NULL:
+ * PyMem_Free(self.records) # <<<<<<<<<<<<<<
+ * self.records = NULL
+ *
+ */
+ PyMem_Free(((struct __pyx_obj_6bzrlib_21_btree_serializer_pyx_GCCHKSHA1LeafNode *)__pyx_v_self)->records);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":555
+ * if self.records != NULL:
+ * PyMem_Free(self.records)
+ * self.records = NULL # <<<<<<<<<<<<<<
+ *
+ * def __init__(self, bytes):
+ */
+ ((struct __pyx_obj_6bzrlib_21_btree_serializer_pyx_GCCHKSHA1LeafNode *)__pyx_v_self)->records = NULL;
+ goto __pyx_L5;
+ }
+ __pyx_L5:;
+
+ __Pyx_RefNannyFinishContext();
+}
+
+/* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":557
+ * self.records = NULL
+ *
+ * def __init__(self, bytes): # <<<<<<<<<<<<<<
+ * self._parse_bytes(bytes)
+ * self.last_key = None
+ */
+
+static int __pyx_pf_6bzrlib_21_btree_serializer_pyx_17GCCHKSHA1LeafNode___init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
+static int __pyx_pf_6bzrlib_21_btree_serializer_pyx_17GCCHKSHA1LeafNode___init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
+ PyObject *__pyx_v_bytes = 0;
+ int __pyx_r;
+ PyObject *__pyx_t_1 = NULL;
+ static PyObject **__pyx_pyargnames[] = {&__pyx_n_s__bytes,0};
+ __Pyx_RefNannySetupContext("__init__");
+ if (unlikely(__pyx_kwds)) {
+ Py_ssize_t kw_args = PyDict_Size(__pyx_kwds);
+ PyObject* values[1] = {0};
+ switch (PyTuple_GET_SIZE(__pyx_args)) {
+ case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
+ case 0: break;
+ default: goto __pyx_L5_argtuple_error;
+ }
+ switch (PyTuple_GET_SIZE(__pyx_args)) {
+ case 0:
+ values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__bytes);
+ if (likely(values[0])) kw_args--;
+ else goto __pyx_L5_argtuple_error;
+ }
+ if (unlikely(kw_args > 0)) {
+ if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, PyTuple_GET_SIZE(__pyx_args), "__init__") < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 557; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
+ }
+ __pyx_v_bytes = values[0];
+ } else if (PyTuple_GET_SIZE(__pyx_args) != 1) {
+ goto __pyx_L5_argtuple_error;
+ } else {
+ __pyx_v_bytes = PyTuple_GET_ITEM(__pyx_args, 0);
+ }
+ goto __pyx_L4_argument_unpacking_done;
+ __pyx_L5_argtuple_error:;
+ __Pyx_RaiseArgtupleInvalid("__init__", 1, 1, 1, PyTuple_GET_SIZE(__pyx_args)); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 557; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
+ __pyx_L3_error:;
+ __Pyx_AddTraceback("bzrlib._btree_serializer_pyx.GCCHKSHA1LeafNode.__init__");
+ __Pyx_RefNannyFinishContext();
+ return -1;
+ __pyx_L4_argument_unpacking_done:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":558
+ *
+ * def __init__(self, bytes):
+ * self._parse_bytes(bytes) # <<<<<<<<<<<<<<
+ * self.last_key = None
+ * self.last_record = NULL
+ */
+ __pyx_t_1 = ((struct __pyx_vtabstruct_6bzrlib_21_btree_serializer_pyx_GCCHKSHA1LeafNode *)((struct __pyx_obj_6bzrlib_21_btree_serializer_pyx_GCCHKSHA1LeafNode *)__pyx_v_self)->__pyx_vtab)->_parse_bytes(((struct __pyx_obj_6bzrlib_21_btree_serializer_pyx_GCCHKSHA1LeafNode *)__pyx_v_self), __pyx_v_bytes); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 558; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_1);
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":559
+ * def __init__(self, bytes):
+ * self._parse_bytes(bytes)
+ * self.last_key = None # <<<<<<<<<<<<<<
+ * self.last_record = NULL
+ *
+ */
+ __Pyx_INCREF(Py_None);
+ __Pyx_GIVEREF(Py_None);
+ __Pyx_GOTREF(((struct __pyx_obj_6bzrlib_21_btree_serializer_pyx_GCCHKSHA1LeafNode *)__pyx_v_self)->last_key);
+ __Pyx_DECREF(((struct __pyx_obj_6bzrlib_21_btree_serializer_pyx_GCCHKSHA1LeafNode *)__pyx_v_self)->last_key);
+ ((struct __pyx_obj_6bzrlib_21_btree_serializer_pyx_GCCHKSHA1LeafNode *)__pyx_v_self)->last_key = Py_None;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":560
+ * self._parse_bytes(bytes)
+ * self.last_key = None
+ * self.last_record = NULL # <<<<<<<<<<<<<<
+ *
+ * property min_key:
+ */
+ ((struct __pyx_obj_6bzrlib_21_btree_serializer_pyx_GCCHKSHA1LeafNode *)__pyx_v_self)->last_record = NULL;
+
+ __pyx_r = 0;
+ goto __pyx_L0;
+ __pyx_L1_error:;
+ __Pyx_XDECREF(__pyx_t_1);
+ __Pyx_AddTraceback("bzrlib._btree_serializer_pyx.GCCHKSHA1LeafNode.__init__");
+ __pyx_r = -1;
+ __pyx_L0:;
+ __Pyx_RefNannyFinishContext();
+ return __pyx_r;
+}
+
+/* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":563
+ *
+ * property min_key:
+ * def __get__(self): # <<<<<<<<<<<<<<
+ * if self.num_records > 0:
+ * return _sha1_to_key(self.records[0].sha1)
+ */
+
+static PyObject *__pyx_pf_6bzrlib_21_btree_serializer_pyx_17GCCHKSHA1LeafNode_7min_key___get__(PyObject *__pyx_v_self); /*proto*/
+static PyObject *__pyx_pf_6bzrlib_21_btree_serializer_pyx_17GCCHKSHA1LeafNode_7min_key___get__(PyObject *__pyx_v_self) {
+ PyObject *__pyx_r = NULL;
+ int __pyx_t_1;
+ PyObject *__pyx_t_2 = NULL;
+ __Pyx_RefNannySetupContext("__get__");
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":564
+ * property min_key:
+ * def __get__(self):
+ * if self.num_records > 0: # <<<<<<<<<<<<<<
+ * return _sha1_to_key(self.records[0].sha1)
+ * return None
+ */
+ __pyx_t_1 = (((struct __pyx_obj_6bzrlib_21_btree_serializer_pyx_GCCHKSHA1LeafNode *)__pyx_v_self)->num_records > 0);
+ if (__pyx_t_1) {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":565
+ * def __get__(self):
+ * if self.num_records > 0:
+ * return _sha1_to_key(self.records[0].sha1) # <<<<<<<<<<<<<<
+ * return None
+ *
+ */
+ __Pyx_XDECREF(__pyx_r);
+ __pyx_t_2 = ((PyObject *)__pyx_f_6bzrlib_21_btree_serializer_pyx__sha1_to_key((((struct __pyx_obj_6bzrlib_21_btree_serializer_pyx_GCCHKSHA1LeafNode *)__pyx_v_self)->records[0]).sha1)); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 565; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_2);
+ __pyx_r = __pyx_t_2;
+ __pyx_t_2 = 0;
+ goto __pyx_L0;
+ goto __pyx_L5;
+ }
+ __pyx_L5:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":566
+ * if self.num_records > 0:
+ * return _sha1_to_key(self.records[0].sha1)
+ * return None # <<<<<<<<<<<<<<
+ *
+ * property max_key:
+ */
+ __Pyx_XDECREF(__pyx_r);
+ __Pyx_INCREF(Py_None);
+ __pyx_r = Py_None;
+ goto __pyx_L0;
+
+ __pyx_r = Py_None; __Pyx_INCREF(Py_None);
+ goto __pyx_L0;
+ __pyx_L1_error:;
+ __Pyx_XDECREF(__pyx_t_2);
+ __Pyx_AddTraceback("bzrlib._btree_serializer_pyx.GCCHKSHA1LeafNode.min_key.__get__");
+ __pyx_r = NULL;
+ __pyx_L0:;
+ __Pyx_XGIVEREF(__pyx_r);
+ __Pyx_RefNannyFinishContext();
+ return __pyx_r;
+}
+
+/* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":569
+ *
+ * property max_key:
+ * def __get__(self): # <<<<<<<<<<<<<<
+ * if self.num_records > 0:
+ * return _sha1_to_key(self.records[self.num_records-1].sha1)
+ */
+
+static PyObject *__pyx_pf_6bzrlib_21_btree_serializer_pyx_17GCCHKSHA1LeafNode_7max_key___get__(PyObject *__pyx_v_self); /*proto*/
+static PyObject *__pyx_pf_6bzrlib_21_btree_serializer_pyx_17GCCHKSHA1LeafNode_7max_key___get__(PyObject *__pyx_v_self) {
+ PyObject *__pyx_r = NULL;
+ int __pyx_t_1;
+ PyObject *__pyx_t_2 = NULL;
+ __Pyx_RefNannySetupContext("__get__");
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":570
+ * property max_key:
+ * def __get__(self):
+ * if self.num_records > 0: # <<<<<<<<<<<<<<
+ * return _sha1_to_key(self.records[self.num_records-1].sha1)
+ * return None
+ */
+ __pyx_t_1 = (((struct __pyx_obj_6bzrlib_21_btree_serializer_pyx_GCCHKSHA1LeafNode *)__pyx_v_self)->num_records > 0);
+ if (__pyx_t_1) {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":571
+ * def __get__(self):
+ * if self.num_records > 0:
+ * return _sha1_to_key(self.records[self.num_records-1].sha1) # <<<<<<<<<<<<<<
+ * return None
+ *
+ */
+ __Pyx_XDECREF(__pyx_r);
+ __pyx_t_2 = ((PyObject *)__pyx_f_6bzrlib_21_btree_serializer_pyx__sha1_to_key((((struct __pyx_obj_6bzrlib_21_btree_serializer_pyx_GCCHKSHA1LeafNode *)__pyx_v_self)->records[(((struct __pyx_obj_6bzrlib_21_btree_serializer_pyx_GCCHKSHA1LeafNode *)__pyx_v_self)->num_records - 1)]).sha1)); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 571; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_2);
+ __pyx_r = __pyx_t_2;
+ __pyx_t_2 = 0;
+ goto __pyx_L0;
+ goto __pyx_L5;
+ }
+ __pyx_L5:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":572
+ * if self.num_records > 0:
+ * return _sha1_to_key(self.records[self.num_records-1].sha1)
+ * return None # <<<<<<<<<<<<<<
+ *
+ * cdef StaticTuple _record_to_value_and_refs(self,
+ */
+ __Pyx_XDECREF(__pyx_r);
+ __Pyx_INCREF(Py_None);
+ __pyx_r = Py_None;
+ goto __pyx_L0;
+
+ __pyx_r = Py_None; __Pyx_INCREF(Py_None);
+ goto __pyx_L0;
+ __pyx_L1_error:;
+ __Pyx_XDECREF(__pyx_t_2);
+ __Pyx_AddTraceback("bzrlib._btree_serializer_pyx.GCCHKSHA1LeafNode.max_key.__get__");
+ __pyx_r = NULL;
+ __pyx_L0:;
+ __Pyx_XGIVEREF(__pyx_r);
+ __Pyx_RefNannyFinishContext();
+ return __pyx_r;
+}
+
+/* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":574
+ * return None
+ *
+ * cdef StaticTuple _record_to_value_and_refs(self, # <<<<<<<<<<<<<<
+ * gc_chk_sha1_record *record):
+ * """Extract the refs and value part of this record."""
+ */
+
+static StaticTuple *__pyx_f_6bzrlib_21_btree_serializer_pyx_17GCCHKSHA1LeafNode__record_to_value_and_refs(struct __pyx_obj_6bzrlib_21_btree_serializer_pyx_GCCHKSHA1LeafNode *__pyx_v_self, __pyx_t_6bzrlib_21_btree_serializer_pyx_gc_chk_sha1_record *__pyx_v_record) {
+ StaticTuple *__pyx_v_value_and_refs;
+ StaticTuple *__pyx_v_empty;
+ PyObject *__pyx_v_value;
+ StaticTuple *__pyx_r = NULL;
+ PyObject *__pyx_t_1 = NULL;
+ __Pyx_RefNannySetupContext("_record_to_value_and_refs");
+ __pyx_v_value_and_refs = ((StaticTuple *)Py_None); __Pyx_INCREF(Py_None);
+ __pyx_v_empty = ((StaticTuple *)Py_None); __Pyx_INCREF(Py_None);
+ __pyx_v_value = Py_None; __Pyx_INCREF(Py_None);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":579
+ * cdef StaticTuple value_and_refs
+ * cdef StaticTuple empty
+ * value_and_refs = StaticTuple_New(2) # <<<<<<<<<<<<<<
+ * value = _format_record(record)
+ * Py_INCREF(value)
+ */
+ __pyx_t_1 = ((PyObject *)StaticTuple_New(2)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 579; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_1);
+ __Pyx_DECREF(((PyObject *)__pyx_v_value_and_refs));
+ __pyx_v_value_and_refs = ((StaticTuple *)__pyx_t_1);
+ __pyx_t_1 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":580
+ * cdef StaticTuple empty
+ * value_and_refs = StaticTuple_New(2)
+ * value = _format_record(record) # <<<<<<<<<<<<<<
+ * Py_INCREF(value)
+ * StaticTuple_SET_ITEM(value_and_refs, 0, value)
+ */
+ __pyx_t_1 = __pyx_f_6bzrlib_21_btree_serializer_pyx__format_record(__pyx_v_record); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 580; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_1);
+ __Pyx_DECREF(__pyx_v_value);
+ __pyx_v_value = __pyx_t_1;
+ __pyx_t_1 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":581
+ * value_and_refs = StaticTuple_New(2)
+ * value = _format_record(record)
+ * Py_INCREF(value) # <<<<<<<<<<<<<<
+ * StaticTuple_SET_ITEM(value_and_refs, 0, value)
+ * # Always empty refs
+ */
+ Py_INCREF(__pyx_v_value);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":582
+ * value = _format_record(record)
+ * Py_INCREF(value)
+ * StaticTuple_SET_ITEM(value_and_refs, 0, value) # <<<<<<<<<<<<<<
+ * # Always empty refs
+ * empty = StaticTuple_New(0)
+ */
+ StaticTuple_SET_ITEM(__pyx_v_value_and_refs, 0, __pyx_v_value);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":584
+ * StaticTuple_SET_ITEM(value_and_refs, 0, value)
+ * # Always empty refs
+ * empty = StaticTuple_New(0) # <<<<<<<<<<<<<<
+ * Py_INCREF(empty)
+ * StaticTuple_SET_ITEM(value_and_refs, 1, empty)
+ */
+ __pyx_t_1 = ((PyObject *)StaticTuple_New(0)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 584; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_1);
+ __Pyx_DECREF(((PyObject *)__pyx_v_empty));
+ __pyx_v_empty = ((StaticTuple *)__pyx_t_1);
+ __pyx_t_1 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":585
+ * # Always empty refs
+ * empty = StaticTuple_New(0)
+ * Py_INCREF(empty) # <<<<<<<<<<<<<<
+ * StaticTuple_SET_ITEM(value_and_refs, 1, empty)
+ * return value_and_refs
+ */
+ Py_INCREF(((PyObject *)__pyx_v_empty));
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":586
+ * empty = StaticTuple_New(0)
+ * Py_INCREF(empty)
+ * StaticTuple_SET_ITEM(value_and_refs, 1, empty) # <<<<<<<<<<<<<<
+ * return value_and_refs
+ *
+ */
+ StaticTuple_SET_ITEM(__pyx_v_value_and_refs, 1, ((PyObject *)__pyx_v_empty));
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":587
+ * Py_INCREF(empty)
+ * StaticTuple_SET_ITEM(value_and_refs, 1, empty)
+ * return value_and_refs # <<<<<<<<<<<<<<
+ *
+ * cdef StaticTuple _record_to_item(self, gc_chk_sha1_record *record):
+ */
+ __Pyx_XDECREF(((PyObject *)__pyx_r));
+ __Pyx_INCREF(((PyObject *)__pyx_v_value_and_refs));
+ __pyx_r = __pyx_v_value_and_refs;
+ goto __pyx_L0;
+
+ __pyx_r = ((StaticTuple *)Py_None); __Pyx_INCREF(Py_None);
+ goto __pyx_L0;
+ __pyx_L1_error:;
+ __Pyx_XDECREF(__pyx_t_1);
+ __Pyx_AddTraceback("bzrlib._btree_serializer_pyx.GCCHKSHA1LeafNode._record_to_value_and_refs");
+ __pyx_r = 0;
+ __pyx_L0:;
+ __Pyx_DECREF((PyObject *)__pyx_v_value_and_refs);
+ __Pyx_DECREF((PyObject *)__pyx_v_empty);
+ __Pyx_DECREF(__pyx_v_value);
+ __Pyx_XGIVEREF((PyObject *)__pyx_r);
+ __Pyx_RefNannyFinishContext();
+ return __pyx_r;
+}
+
+/* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":589
+ * return value_and_refs
+ *
+ * cdef StaticTuple _record_to_item(self, gc_chk_sha1_record *record): # <<<<<<<<<<<<<<
+ * """Turn a given record back into a fully fledged item.
+ * """
+ */
+
+static StaticTuple *__pyx_f_6bzrlib_21_btree_serializer_pyx_17GCCHKSHA1LeafNode__record_to_item(struct __pyx_obj_6bzrlib_21_btree_serializer_pyx_GCCHKSHA1LeafNode *__pyx_v_self, __pyx_t_6bzrlib_21_btree_serializer_pyx_gc_chk_sha1_record *__pyx_v_record) {
+ StaticTuple *__pyx_v_item;
+ StaticTuple *__pyx_v_key;
+ StaticTuple *__pyx_v_value_and_refs;
+ StaticTuple *__pyx_r = NULL;
+ PyObject *__pyx_t_1 = NULL;
+ __Pyx_RefNannySetupContext("_record_to_item");
+ __pyx_v_item = ((StaticTuple *)Py_None); __Pyx_INCREF(Py_None);
+ __pyx_v_key = ((StaticTuple *)Py_None); __Pyx_INCREF(Py_None);
+ __pyx_v_value_and_refs = ((StaticTuple *)Py_None); __Pyx_INCREF(Py_None);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":596
+ * cdef StaticTuple value_and_refs
+ * cdef object value
+ * key = _sha1_to_key(record.sha1) # <<<<<<<<<<<<<<
+ * item = StaticTuple_New(2)
+ * Py_INCREF(key)
+ */
+ __pyx_t_1 = ((PyObject *)__pyx_f_6bzrlib_21_btree_serializer_pyx__sha1_to_key(__pyx_v_record->sha1)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 596; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_1);
+ __Pyx_DECREF(((PyObject *)__pyx_v_key));
+ __pyx_v_key = ((StaticTuple *)__pyx_t_1);
+ __pyx_t_1 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":597
+ * cdef object value
+ * key = _sha1_to_key(record.sha1)
+ * item = StaticTuple_New(2) # <<<<<<<<<<<<<<
+ * Py_INCREF(key)
+ * StaticTuple_SET_ITEM(item, 0, key)
+ */
+ __pyx_t_1 = ((PyObject *)StaticTuple_New(2)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 597; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_1);
+ __Pyx_DECREF(((PyObject *)__pyx_v_item));
+ __pyx_v_item = ((StaticTuple *)__pyx_t_1);
+ __pyx_t_1 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":598
+ * key = _sha1_to_key(record.sha1)
+ * item = StaticTuple_New(2)
+ * Py_INCREF(key) # <<<<<<<<<<<<<<
+ * StaticTuple_SET_ITEM(item, 0, key)
+ * value_and_refs = self._record_to_value_and_refs(record)
+ */
+ Py_INCREF(((PyObject *)__pyx_v_key));
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":599
+ * item = StaticTuple_New(2)
+ * Py_INCREF(key)
+ * StaticTuple_SET_ITEM(item, 0, key) # <<<<<<<<<<<<<<
+ * value_and_refs = self._record_to_value_and_refs(record)
+ * Py_INCREF(value_and_refs)
+ */
+ StaticTuple_SET_ITEM(__pyx_v_item, 0, ((PyObject *)__pyx_v_key));
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":600
+ * Py_INCREF(key)
+ * StaticTuple_SET_ITEM(item, 0, key)
+ * value_and_refs = self._record_to_value_and_refs(record) # <<<<<<<<<<<<<<
+ * Py_INCREF(value_and_refs)
+ * StaticTuple_SET_ITEM(item, 1, value_and_refs)
+ */
+ __pyx_t_1 = ((PyObject *)((struct __pyx_vtabstruct_6bzrlib_21_btree_serializer_pyx_GCCHKSHA1LeafNode *)__pyx_v_self->__pyx_vtab)->_record_to_value_and_refs(__pyx_v_self, __pyx_v_record)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 600; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_1);
+ __Pyx_DECREF(((PyObject *)__pyx_v_value_and_refs));
+ __pyx_v_value_and_refs = ((StaticTuple *)__pyx_t_1);
+ __pyx_t_1 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":601
+ * StaticTuple_SET_ITEM(item, 0, key)
+ * value_and_refs = self._record_to_value_and_refs(record)
+ * Py_INCREF(value_and_refs) # <<<<<<<<<<<<<<
+ * StaticTuple_SET_ITEM(item, 1, value_and_refs)
+ * return item
+ */
+ Py_INCREF(((PyObject *)__pyx_v_value_and_refs));
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":602
+ * value_and_refs = self._record_to_value_and_refs(record)
+ * Py_INCREF(value_and_refs)
+ * StaticTuple_SET_ITEM(item, 1, value_and_refs) # <<<<<<<<<<<<<<
+ * return item
+ *
+ */
+ StaticTuple_SET_ITEM(__pyx_v_item, 1, ((PyObject *)__pyx_v_value_and_refs));
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":603
+ * Py_INCREF(value_and_refs)
+ * StaticTuple_SET_ITEM(item, 1, value_and_refs)
+ * return item # <<<<<<<<<<<<<<
+ *
+ * cdef gc_chk_sha1_record* _lookup_record(self, char *sha1) except? NULL:
+ */
+ __Pyx_XDECREF(((PyObject *)__pyx_r));
+ __Pyx_INCREF(((PyObject *)__pyx_v_item));
+ __pyx_r = __pyx_v_item;
+ goto __pyx_L0;
+
+ __pyx_r = ((StaticTuple *)Py_None); __Pyx_INCREF(Py_None);
+ goto __pyx_L0;
+ __pyx_L1_error:;
+ __Pyx_XDECREF(__pyx_t_1);
+ __Pyx_AddTraceback("bzrlib._btree_serializer_pyx.GCCHKSHA1LeafNode._record_to_item");
+ __pyx_r = 0;
+ __pyx_L0:;
+ __Pyx_DECREF((PyObject *)__pyx_v_item);
+ __Pyx_DECREF((PyObject *)__pyx_v_key);
+ __Pyx_DECREF((PyObject *)__pyx_v_value_and_refs);
+ __Pyx_XGIVEREF((PyObject *)__pyx_r);
+ __Pyx_RefNannyFinishContext();
+ return __pyx_r;
+}
+
+/* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":605
+ * return item
+ *
+ * cdef gc_chk_sha1_record* _lookup_record(self, char *sha1) except? NULL: # <<<<<<<<<<<<<<
+ * """Find a gc_chk_sha1_record that matches the sha1 supplied."""
+ * cdef int lo, hi, mid, the_cmp
+ */
+
+static __pyx_t_6bzrlib_21_btree_serializer_pyx_gc_chk_sha1_record *__pyx_f_6bzrlib_21_btree_serializer_pyx_17GCCHKSHA1LeafNode__lookup_record(struct __pyx_obj_6bzrlib_21_btree_serializer_pyx_GCCHKSHA1LeafNode *__pyx_v_self, char *__pyx_v_sha1) {
+ int __pyx_v_lo;
+ int __pyx_v_hi;
+ int __pyx_v_mid;
+ int __pyx_v_the_cmp;
+ int __pyx_v_offset;
+ long __pyx_v_local_n_cmp;
+ __pyx_t_6bzrlib_21_btree_serializer_pyx_gc_chk_sha1_record *__pyx_r;
+ int __pyx_t_1;
+ int __pyx_t_2;
+ __Pyx_RefNannySetupContext("_lookup_record");
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":619
+ * # the offset array dropped us from 23us to 20us and 156 comparisions
+ * # (1.3/key)
+ * offset = self._offset_for_sha1(sha1) # <<<<<<<<<<<<<<
+ * lo = self.offsets[offset]
+ * hi = self.offsets[offset+1]
+ */
+ __pyx_t_1 = ((struct __pyx_vtabstruct_6bzrlib_21_btree_serializer_pyx_GCCHKSHA1LeafNode *)__pyx_v_self->__pyx_vtab)->_offset_for_sha1(__pyx_v_self, __pyx_v_sha1); if (unlikely(__pyx_t_1 == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 619; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_v_offset = __pyx_t_1;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":620
+ * # (1.3/key)
+ * offset = self._offset_for_sha1(sha1)
+ * lo = self.offsets[offset] # <<<<<<<<<<<<<<
+ * hi = self.offsets[offset+1]
+ * if hi == 255:
+ */
+ __pyx_v_lo = (__pyx_v_self->offsets[__pyx_v_offset]);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":621
+ * offset = self._offset_for_sha1(sha1)
+ * lo = self.offsets[offset]
+ * hi = self.offsets[offset+1] # <<<<<<<<<<<<<<
+ * if hi == 255:
+ * # if hi == 255 that means we potentially ran off the end of the
+ */
+ __pyx_v_hi = (__pyx_v_self->offsets[(__pyx_v_offset + 1)]);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":622
+ * lo = self.offsets[offset]
+ * hi = self.offsets[offset+1]
+ * if hi == 255: # <<<<<<<<<<<<<<
+ * # if hi == 255 that means we potentially ran off the end of the
+ * # list, so push it up to num_records
+ */
+ __pyx_t_2 = (__pyx_v_hi == 255);
+ if (__pyx_t_2) {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":627
+ * # note that if 'lo' == 255, that is ok, because we can start
+ * # searching from that part of the list.
+ * hi = self.num_records # <<<<<<<<<<<<<<
+ * local_n_cmp = 0
+ * while lo < hi:
+ */
+ __pyx_v_hi = __pyx_v_self->num_records;
+ goto __pyx_L3;
+ }
+ __pyx_L3:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":628
+ * # searching from that part of the list.
+ * hi = self.num_records
+ * local_n_cmp = 0 # <<<<<<<<<<<<<<
+ * while lo < hi:
+ * mid = (lo + hi) / 2
+ */
+ __pyx_v_local_n_cmp = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":629
+ * hi = self.num_records
+ * local_n_cmp = 0
+ * while lo < hi: # <<<<<<<<<<<<<<
+ * mid = (lo + hi) / 2
+ * the_cmp = memcmp(self.records[mid].sha1, sha1, 20)
+ */
+ while (1) {
+ __pyx_t_2 = (__pyx_v_lo < __pyx_v_hi);
+ if (!__pyx_t_2) break;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":630
+ * local_n_cmp = 0
+ * while lo < hi:
+ * mid = (lo + hi) / 2 # <<<<<<<<<<<<<<
+ * the_cmp = memcmp(self.records[mid].sha1, sha1, 20)
+ * if the_cmp == 0:
+ */
+ __pyx_v_mid = __Pyx_div_long((__pyx_v_lo + __pyx_v_hi), 2);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":631
+ * while lo < hi:
+ * mid = (lo + hi) / 2
+ * the_cmp = memcmp(self.records[mid].sha1, sha1, 20) # <<<<<<<<<<<<<<
+ * if the_cmp == 0:
+ * return &self.records[mid]
+ */
+ __pyx_v_the_cmp = memcmp((__pyx_v_self->records[__pyx_v_mid]).sha1, __pyx_v_sha1, 20);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":632
+ * mid = (lo + hi) / 2
+ * the_cmp = memcmp(self.records[mid].sha1, sha1, 20)
+ * if the_cmp == 0: # <<<<<<<<<<<<<<
+ * return &self.records[mid]
+ * elif the_cmp < 0:
+ */
+ __pyx_t_2 = (__pyx_v_the_cmp == 0);
+ if (__pyx_t_2) {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":633
+ * the_cmp = memcmp(self.records[mid].sha1, sha1, 20)
+ * if the_cmp == 0:
+ * return &self.records[mid] # <<<<<<<<<<<<<<
+ * elif the_cmp < 0:
+ * lo = mid + 1
+ */
+ __pyx_r = (&(__pyx_v_self->records[__pyx_v_mid]));
+ goto __pyx_L0;
+ goto __pyx_L6;
+ }
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":634
+ * if the_cmp == 0:
+ * return &self.records[mid]
+ * elif the_cmp < 0: # <<<<<<<<<<<<<<
+ * lo = mid + 1
+ * else:
+ */
+ __pyx_t_2 = (__pyx_v_the_cmp < 0);
+ if (__pyx_t_2) {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":635
+ * return &self.records[mid]
+ * elif the_cmp < 0:
+ * lo = mid + 1 # <<<<<<<<<<<<<<
+ * else:
+ * hi = mid
+ */
+ __pyx_v_lo = (__pyx_v_mid + 1);
+ goto __pyx_L6;
+ }
+ /*else*/ {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":637
+ * lo = mid + 1
+ * else:
+ * hi = mid # <<<<<<<<<<<<<<
+ * return NULL
+ *
+ */
+ __pyx_v_hi = __pyx_v_mid;
+ }
+ __pyx_L6:;
+ }
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":638
+ * else:
+ * hi = mid
+ * return NULL # <<<<<<<<<<<<<<
+ *
+ * def __contains__(self, key):
+ */
+ __pyx_r = NULL;
+ goto __pyx_L0;
+
+ __pyx_r = 0;
+ goto __pyx_L0;
+ __pyx_L1_error:;
+ __Pyx_AddTraceback("bzrlib._btree_serializer_pyx.GCCHKSHA1LeafNode._lookup_record");
+ __pyx_r = NULL;
+ __pyx_L0:;
+ __Pyx_RefNannyFinishContext();
+ return __pyx_r;
+}
+
+/* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":640
+ * return NULL
+ *
+ * def __contains__(self, key): # <<<<<<<<<<<<<<
+ * cdef char sha1[20]
+ * cdef gc_chk_sha1_record *record
+ */
+
+static int __pyx_pf_6bzrlib_21_btree_serializer_pyx_17GCCHKSHA1LeafNode___contains__(PyObject *__pyx_v_self, PyObject *__pyx_v_key); /*proto*/
+static int __pyx_pf_6bzrlib_21_btree_serializer_pyx_17GCCHKSHA1LeafNode___contains__(PyObject *__pyx_v_self, PyObject *__pyx_v_key) {
+ char __pyx_v_sha1[20];
+ __pyx_t_6bzrlib_21_btree_serializer_pyx_gc_chk_sha1_record *__pyx_v_record;
+ int __pyx_r;
+ int __pyx_t_1;
+ __pyx_t_6bzrlib_21_btree_serializer_pyx_gc_chk_sha1_record *__pyx_t_2;
+ int __pyx_t_3;
+ __Pyx_RefNannySetupContext("__contains__");
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":643
+ * cdef char sha1[20]
+ * cdef gc_chk_sha1_record *record
+ * if _key_to_sha1(key, sha1): # <<<<<<<<<<<<<<
+ * # If it isn't a sha1 key, then it won't be in this leaf node
+ * record = self._lookup_record(sha1)
+ */
+ __pyx_t_1 = __pyx_f_6bzrlib_21_btree_serializer_pyx__key_to_sha1(__pyx_v_key, __pyx_v_sha1);
+ if (__pyx_t_1) {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":645
+ * if _key_to_sha1(key, sha1):
+ * # If it isn't a sha1 key, then it won't be in this leaf node
+ * record = self._lookup_record(sha1) # <<<<<<<<<<<<<<
+ * if record != NULL:
+ * self.last_key = key
+ */
+ __pyx_t_2 = ((struct __pyx_vtabstruct_6bzrlib_21_btree_serializer_pyx_GCCHKSHA1LeafNode *)((struct __pyx_obj_6bzrlib_21_btree_serializer_pyx_GCCHKSHA1LeafNode *)__pyx_v_self)->__pyx_vtab)->_lookup_record(((struct __pyx_obj_6bzrlib_21_btree_serializer_pyx_GCCHKSHA1LeafNode *)__pyx_v_self), __pyx_v_sha1); if (unlikely(__pyx_t_2 == NULL && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 645; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_v_record = __pyx_t_2;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":646
+ * # If it isn't a sha1 key, then it won't be in this leaf node
+ * record = self._lookup_record(sha1)
+ * if record != NULL: # <<<<<<<<<<<<<<
+ * self.last_key = key
+ * self.last_record = record
+ */
+ __pyx_t_3 = (__pyx_v_record != NULL);
+ if (__pyx_t_3) {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":647
+ * record = self._lookup_record(sha1)
+ * if record != NULL:
+ * self.last_key = key # <<<<<<<<<<<<<<
+ * self.last_record = record
+ * return True
+ */
+ __Pyx_INCREF(__pyx_v_key);
+ __Pyx_GIVEREF(__pyx_v_key);
+ __Pyx_GOTREF(((struct __pyx_obj_6bzrlib_21_btree_serializer_pyx_GCCHKSHA1LeafNode *)__pyx_v_self)->last_key);
+ __Pyx_DECREF(((struct __pyx_obj_6bzrlib_21_btree_serializer_pyx_GCCHKSHA1LeafNode *)__pyx_v_self)->last_key);
+ ((struct __pyx_obj_6bzrlib_21_btree_serializer_pyx_GCCHKSHA1LeafNode *)__pyx_v_self)->last_key = __pyx_v_key;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":648
+ * if record != NULL:
+ * self.last_key = key
+ * self.last_record = record # <<<<<<<<<<<<<<
+ * return True
+ * return False
+ */
+ ((struct __pyx_obj_6bzrlib_21_btree_serializer_pyx_GCCHKSHA1LeafNode *)__pyx_v_self)->last_record = __pyx_v_record;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":649
+ * self.last_key = key
+ * self.last_record = record
+ * return True # <<<<<<<<<<<<<<
+ * return False
+ *
+ */
+ __pyx_r = 1;
+ goto __pyx_L0;
+ goto __pyx_L6;
+ }
+ __pyx_L6:;
+ goto __pyx_L5;
+ }
+ __pyx_L5:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":650
+ * self.last_record = record
+ * return True
+ * return False # <<<<<<<<<<<<<<
+ *
+ * def __getitem__(self, key):
+ */
+ __pyx_r = 0;
+ goto __pyx_L0;
+
+ __pyx_r = 0;
+ goto __pyx_L0;
+ __pyx_L1_error:;
+ __Pyx_AddTraceback("bzrlib._btree_serializer_pyx.GCCHKSHA1LeafNode.__contains__");
+ __pyx_r = -1;
+ __pyx_L0:;
+ __Pyx_RefNannyFinishContext();
+ return __pyx_r;
+}
+
+/* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":652
+ * return False
+ *
+ * def __getitem__(self, key): # <<<<<<<<<<<<<<
+ * cdef char sha1[20]
+ * cdef gc_chk_sha1_record *record
+ */
+
+static PyObject *__pyx_pf_6bzrlib_21_btree_serializer_pyx_17GCCHKSHA1LeafNode___getitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_key); /*proto*/
+static PyObject *__pyx_pf_6bzrlib_21_btree_serializer_pyx_17GCCHKSHA1LeafNode___getitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_key) {
+ char __pyx_v_sha1[20];
+ __pyx_t_6bzrlib_21_btree_serializer_pyx_gc_chk_sha1_record *__pyx_v_record;
+ PyObject *__pyx_r = NULL;
+ int __pyx_t_1;
+ int __pyx_t_2;
+ int __pyx_t_3;
+ int __pyx_t_4;
+ __pyx_t_6bzrlib_21_btree_serializer_pyx_gc_chk_sha1_record *__pyx_t_5;
+ PyObject *__pyx_t_6 = NULL;
+ PyObject *__pyx_t_7 = NULL;
+ __Pyx_RefNannySetupContext("__getitem__");
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":655
+ * cdef char sha1[20]
+ * cdef gc_chk_sha1_record *record
+ * record = NULL # <<<<<<<<<<<<<<
+ * if self.last_record != NULL and key is self.last_key:
+ * record = self.last_record
+ */
+ __pyx_v_record = NULL;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":656
+ * cdef gc_chk_sha1_record *record
+ * record = NULL
+ * if self.last_record != NULL and key is self.last_key: # <<<<<<<<<<<<<<
+ * record = self.last_record
+ * elif _key_to_sha1(key, sha1):
+ */
+ __pyx_t_1 = (((struct __pyx_obj_6bzrlib_21_btree_serializer_pyx_GCCHKSHA1LeafNode *)__pyx_v_self)->last_record != NULL);
+ if (__pyx_t_1) {
+ __pyx_t_2 = (__pyx_v_key == ((struct __pyx_obj_6bzrlib_21_btree_serializer_pyx_GCCHKSHA1LeafNode *)__pyx_v_self)->last_key);
+ __pyx_t_3 = __pyx_t_2;
+ } else {
+ __pyx_t_3 = __pyx_t_1;
+ }
+ if (__pyx_t_3) {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":657
+ * record = NULL
+ * if self.last_record != NULL and key is self.last_key:
+ * record = self.last_record # <<<<<<<<<<<<<<
+ * elif _key_to_sha1(key, sha1):
+ * record = self._lookup_record(sha1)
+ */
+ __pyx_v_record = ((struct __pyx_obj_6bzrlib_21_btree_serializer_pyx_GCCHKSHA1LeafNode *)__pyx_v_self)->last_record;
+ goto __pyx_L5;
+ }
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":658
+ * if self.last_record != NULL and key is self.last_key:
+ * record = self.last_record
+ * elif _key_to_sha1(key, sha1): # <<<<<<<<<<<<<<
+ * record = self._lookup_record(sha1)
+ * if record == NULL:
+ */
+ __pyx_t_4 = __pyx_f_6bzrlib_21_btree_serializer_pyx__key_to_sha1(__pyx_v_key, __pyx_v_sha1);
+ if (__pyx_t_4) {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":659
+ * record = self.last_record
+ * elif _key_to_sha1(key, sha1):
+ * record = self._lookup_record(sha1) # <<<<<<<<<<<<<<
+ * if record == NULL:
+ * raise KeyError('key %r is not present' % (key,))
+ */
+ __pyx_t_5 = ((struct __pyx_vtabstruct_6bzrlib_21_btree_serializer_pyx_GCCHKSHA1LeafNode *)((struct __pyx_obj_6bzrlib_21_btree_serializer_pyx_GCCHKSHA1LeafNode *)__pyx_v_self)->__pyx_vtab)->_lookup_record(((struct __pyx_obj_6bzrlib_21_btree_serializer_pyx_GCCHKSHA1LeafNode *)__pyx_v_self), __pyx_v_sha1); if (unlikely(__pyx_t_5 == NULL && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 659; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_v_record = __pyx_t_5;
+ goto __pyx_L5;
+ }
+ __pyx_L5:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":660
+ * elif _key_to_sha1(key, sha1):
+ * record = self._lookup_record(sha1)
+ * if record == NULL: # <<<<<<<<<<<<<<
+ * raise KeyError('key %r is not present' % (key,))
+ * return self._record_to_value_and_refs(record)
+ */
+ __pyx_t_3 = (__pyx_v_record == NULL);
+ if (__pyx_t_3) {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":661
+ * record = self._lookup_record(sha1)
+ * if record == NULL:
+ * raise KeyError('key %r is not present' % (key,)) # <<<<<<<<<<<<<<
+ * return self._record_to_value_and_refs(record)
+ *
+ */
+ __pyx_t_6 = PyTuple_New(1); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 661; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_6);
+ __Pyx_INCREF(__pyx_v_key);
+ PyTuple_SET_ITEM(__pyx_t_6, 0, __pyx_v_key);
+ __Pyx_GIVEREF(__pyx_v_key);
+ __pyx_t_7 = PyNumber_Remainder(((PyObject *)__pyx_kp_s_18), __pyx_t_6); if (unlikely(!__pyx_t_7)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 661; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(((PyObject *)__pyx_t_7));
+ __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
+ __pyx_t_6 = PyTuple_New(1); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 661; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_6);
+ PyTuple_SET_ITEM(__pyx_t_6, 0, ((PyObject *)__pyx_t_7));
+ __Pyx_GIVEREF(((PyObject *)__pyx_t_7));
+ __pyx_t_7 = 0;
+ __pyx_t_7 = PyObject_Call(__pyx_builtin_KeyError, __pyx_t_6, NULL); if (unlikely(!__pyx_t_7)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 661; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_7);
+ __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
+ __Pyx_Raise(__pyx_t_7, 0, 0);
+ __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 661; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ goto __pyx_L6;
+ }
+ __pyx_L6:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":662
+ * if record == NULL:
+ * raise KeyError('key %r is not present' % (key,))
+ * return self._record_to_value_and_refs(record) # <<<<<<<<<<<<<<
+ *
+ * def __len__(self):
+ */
+ __Pyx_XDECREF(__pyx_r);
+ __pyx_t_7 = ((PyObject *)((struct __pyx_vtabstruct_6bzrlib_21_btree_serializer_pyx_GCCHKSHA1LeafNode *)((struct __pyx_obj_6bzrlib_21_btree_serializer_pyx_GCCHKSHA1LeafNode *)__pyx_v_self)->__pyx_vtab)->_record_to_value_and_refs(((struct __pyx_obj_6bzrlib_21_btree_serializer_pyx_GCCHKSHA1LeafNode *)__pyx_v_self), __pyx_v_record)); if (unlikely(!__pyx_t_7)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 662; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_7);
+ __pyx_r = __pyx_t_7;
+ __pyx_t_7 = 0;
+ goto __pyx_L0;
+
+ __pyx_r = Py_None; __Pyx_INCREF(Py_None);
+ goto __pyx_L0;
+ __pyx_L1_error:;
+ __Pyx_XDECREF(__pyx_t_6);
+ __Pyx_XDECREF(__pyx_t_7);
+ __Pyx_AddTraceback("bzrlib._btree_serializer_pyx.GCCHKSHA1LeafNode.__getitem__");
+ __pyx_r = NULL;
+ __pyx_L0:;
+ __Pyx_XGIVEREF(__pyx_r);
+ __Pyx_RefNannyFinishContext();
+ return __pyx_r;
+}
+
+/* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":664
+ * return self._record_to_value_and_refs(record)
+ *
+ * def __len__(self): # <<<<<<<<<<<<<<
+ * return self.num_records
+ *
+ */
+
+static Py_ssize_t __pyx_pf_6bzrlib_21_btree_serializer_pyx_17GCCHKSHA1LeafNode___len__(PyObject *__pyx_v_self); /*proto*/
+static Py_ssize_t __pyx_pf_6bzrlib_21_btree_serializer_pyx_17GCCHKSHA1LeafNode___len__(PyObject *__pyx_v_self) {
+ Py_ssize_t __pyx_r;
+ __Pyx_RefNannySetupContext("__len__");
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":665
+ *
+ * def __len__(self):
+ * return self.num_records # <<<<<<<<<<<<<<
+ *
+ * def all_keys(self):
+ */
+ __pyx_r = ((struct __pyx_obj_6bzrlib_21_btree_serializer_pyx_GCCHKSHA1LeafNode *)__pyx_v_self)->num_records;
+ goto __pyx_L0;
+
+ __pyx_r = 0;
+ __pyx_L0:;
+ __Pyx_RefNannyFinishContext();
+ return __pyx_r;
+}
+
+/* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":667
+ * return self.num_records
+ *
+ * def all_keys(self): # <<<<<<<<<<<<<<
+ * cdef int i
+ * result = []
+ */
+
+static PyObject *__pyx_pf_6bzrlib_21_btree_serializer_pyx_17GCCHKSHA1LeafNode_all_keys(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/
+static PyObject *__pyx_pf_6bzrlib_21_btree_serializer_pyx_17GCCHKSHA1LeafNode_all_keys(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) {
+ int __pyx_v_i;
+ PyObject *__pyx_v_result;
+ PyObject *__pyx_r = NULL;
+ PyObject *__pyx_t_1 = NULL;
+ int __pyx_t_2;
+ int __pyx_t_3;
+ __Pyx_RefNannySetupContext("all_keys");
+ __pyx_v_result = ((PyObject *)Py_None); __Pyx_INCREF(Py_None);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":669
+ * def all_keys(self):
+ * cdef int i
+ * result = [] # <<<<<<<<<<<<<<
+ * for i from 0 <= i < self.num_records:
+ * PyList_Append(result, _sha1_to_key(self.records[i].sha1))
+ */
+ __pyx_t_1 = PyList_New(0); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 669; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(((PyObject *)__pyx_t_1));
+ __Pyx_DECREF(((PyObject *)__pyx_v_result));
+ __pyx_v_result = __pyx_t_1;
+ __pyx_t_1 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":670
+ * cdef int i
+ * result = []
+ * for i from 0 <= i < self.num_records: # <<<<<<<<<<<<<<
+ * PyList_Append(result, _sha1_to_key(self.records[i].sha1))
+ * return result
+ */
+ __pyx_t_2 = ((struct __pyx_obj_6bzrlib_21_btree_serializer_pyx_GCCHKSHA1LeafNode *)__pyx_v_self)->num_records;
+ for (__pyx_v_i = 0; __pyx_v_i < __pyx_t_2; __pyx_v_i++) {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":671
+ * result = []
+ * for i from 0 <= i < self.num_records:
+ * PyList_Append(result, _sha1_to_key(self.records[i].sha1)) # <<<<<<<<<<<<<<
+ * return result
+ *
+ */
+ __pyx_t_1 = ((PyObject *)__pyx_f_6bzrlib_21_btree_serializer_pyx__sha1_to_key((((struct __pyx_obj_6bzrlib_21_btree_serializer_pyx_GCCHKSHA1LeafNode *)__pyx_v_self)->records[__pyx_v_i]).sha1)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 671; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_1);
+ __pyx_t_3 = PyList_Append(((PyObject *)__pyx_v_result), __pyx_t_1); if (unlikely(__pyx_t_3 == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 671; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+ }
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":672
+ * for i from 0 <= i < self.num_records:
+ * PyList_Append(result, _sha1_to_key(self.records[i].sha1))
+ * return result # <<<<<<<<<<<<<<
+ *
+ * def all_items(self):
+ */
+ __Pyx_XDECREF(__pyx_r);
+ __Pyx_INCREF(((PyObject *)__pyx_v_result));
+ __pyx_r = ((PyObject *)__pyx_v_result);
+ goto __pyx_L0;
+
+ __pyx_r = Py_None; __Pyx_INCREF(Py_None);
+ goto __pyx_L0;
+ __pyx_L1_error:;
+ __Pyx_XDECREF(__pyx_t_1);
+ __Pyx_AddTraceback("bzrlib._btree_serializer_pyx.GCCHKSHA1LeafNode.all_keys");
+ __pyx_r = NULL;
+ __pyx_L0:;
+ __Pyx_DECREF(__pyx_v_result);
+ __Pyx_XGIVEREF(__pyx_r);
+ __Pyx_RefNannyFinishContext();
+ return __pyx_r;
+}
+
+/* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":674
+ * return result
+ *
+ * def all_items(self): # <<<<<<<<<<<<<<
+ * cdef int i
+ * result = []
+ */
+
+static PyObject *__pyx_pf_6bzrlib_21_btree_serializer_pyx_17GCCHKSHA1LeafNode_all_items(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/
+static PyObject *__pyx_pf_6bzrlib_21_btree_serializer_pyx_17GCCHKSHA1LeafNode_all_items(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) {
+ int __pyx_v_i;
+ PyObject *__pyx_v_result;
+ StaticTuple *__pyx_v_item;
+ PyObject *__pyx_r = NULL;
+ PyObject *__pyx_t_1 = NULL;
+ int __pyx_t_2;
+ int __pyx_t_3;
+ __Pyx_RefNannySetupContext("all_items");
+ __pyx_v_result = ((PyObject *)Py_None); __Pyx_INCREF(Py_None);
+ __pyx_v_item = ((StaticTuple *)Py_None); __Pyx_INCREF(Py_None);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":676
+ * def all_items(self):
+ * cdef int i
+ * result = [] # <<<<<<<<<<<<<<
+ * for i from 0 <= i < self.num_records:
+ * item = self._record_to_item(&self.records[i])
+ */
+ __pyx_t_1 = PyList_New(0); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 676; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(((PyObject *)__pyx_t_1));
+ __Pyx_DECREF(((PyObject *)__pyx_v_result));
+ __pyx_v_result = __pyx_t_1;
+ __pyx_t_1 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":677
+ * cdef int i
+ * result = []
+ * for i from 0 <= i < self.num_records: # <<<<<<<<<<<<<<
+ * item = self._record_to_item(&self.records[i])
+ * PyList_Append(result, item)
+ */
+ __pyx_t_2 = ((struct __pyx_obj_6bzrlib_21_btree_serializer_pyx_GCCHKSHA1LeafNode *)__pyx_v_self)->num_records;
+ for (__pyx_v_i = 0; __pyx_v_i < __pyx_t_2; __pyx_v_i++) {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":678
+ * result = []
+ * for i from 0 <= i < self.num_records:
+ * item = self._record_to_item(&self.records[i]) # <<<<<<<<<<<<<<
+ * PyList_Append(result, item)
+ * return result
+ */
+ __pyx_t_1 = ((PyObject *)((struct __pyx_vtabstruct_6bzrlib_21_btree_serializer_pyx_GCCHKSHA1LeafNode *)((struct __pyx_obj_6bzrlib_21_btree_serializer_pyx_GCCHKSHA1LeafNode *)__pyx_v_self)->__pyx_vtab)->_record_to_item(((struct __pyx_obj_6bzrlib_21_btree_serializer_pyx_GCCHKSHA1LeafNode *)__pyx_v_self), (&(((struct __pyx_obj_6bzrlib_21_btree_serializer_pyx_GCCHKSHA1LeafNode *)__pyx_v_self)->records[__pyx_v_i])))); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 678; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_1);
+ __Pyx_DECREF(((PyObject *)__pyx_v_item));
+ __pyx_v_item = ((StaticTuple *)__pyx_t_1);
+ __pyx_t_1 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":679
+ * for i from 0 <= i < self.num_records:
+ * item = self._record_to_item(&self.records[i])
+ * PyList_Append(result, item) # <<<<<<<<<<<<<<
+ * return result
+ *
+ */
+ __pyx_t_3 = PyList_Append(((PyObject *)__pyx_v_result), ((PyObject *)__pyx_v_item)); if (unlikely(__pyx_t_3 == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 679; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ }
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":680
+ * item = self._record_to_item(&self.records[i])
+ * PyList_Append(result, item)
+ * return result # <<<<<<<<<<<<<<
+ *
+ * cdef int _count_records(self, char *c_content, char *c_end): # cannot_raise
+ */
+ __Pyx_XDECREF(__pyx_r);
+ __Pyx_INCREF(((PyObject *)__pyx_v_result));
+ __pyx_r = ((PyObject *)__pyx_v_result);
+ goto __pyx_L0;
+
+ __pyx_r = Py_None; __Pyx_INCREF(Py_None);
+ goto __pyx_L0;
+ __pyx_L1_error:;
+ __Pyx_XDECREF(__pyx_t_1);
+ __Pyx_AddTraceback("bzrlib._btree_serializer_pyx.GCCHKSHA1LeafNode.all_items");
+ __pyx_r = NULL;
+ __pyx_L0:;
+ __Pyx_DECREF(__pyx_v_result);
+ __Pyx_DECREF((PyObject *)__pyx_v_item);
+ __Pyx_XGIVEREF(__pyx_r);
+ __Pyx_RefNannyFinishContext();
+ return __pyx_r;
+}
+
+/* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":682
+ * return result
+ *
+ * cdef int _count_records(self, char *c_content, char *c_end): # cannot_raise # <<<<<<<<<<<<<<
+ * """Count how many records are in this section."""
+ * cdef char *c_cur
+ */
+
+static int __pyx_f_6bzrlib_21_btree_serializer_pyx_17GCCHKSHA1LeafNode__count_records(struct __pyx_obj_6bzrlib_21_btree_serializer_pyx_GCCHKSHA1LeafNode *__pyx_v_self, char *__pyx_v_c_content, char *__pyx_v_c_end) {
+ char *__pyx_v_c_cur;
+ int __pyx_v_num_records;
+ int __pyx_r;
+ int __pyx_t_1;
+ int __pyx_t_2;
+ int __pyx_t_3;
+ __Pyx_RefNannySetupContext("_count_records");
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":687
+ * cdef int num_records
+ *
+ * c_cur = c_content # <<<<<<<<<<<<<<
+ * num_records = 0
+ * while c_cur != NULL and c_cur < c_end:
+ */
+ __pyx_v_c_cur = __pyx_v_c_content;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":688
+ *
+ * c_cur = c_content
+ * num_records = 0 # <<<<<<<<<<<<<<
+ * while c_cur != NULL and c_cur < c_end:
+ * c_cur = <char *>memchr(c_cur, c'\n', c_end - c_cur);
+ */
+ __pyx_v_num_records = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":689
+ * c_cur = c_content
+ * num_records = 0
+ * while c_cur != NULL and c_cur < c_end: # <<<<<<<<<<<<<<
+ * c_cur = <char *>memchr(c_cur, c'\n', c_end - c_cur);
+ * if c_cur == NULL:
+ */
+ while (1) {
+ __pyx_t_1 = (__pyx_v_c_cur != NULL);
+ if (__pyx_t_1) {
+ __pyx_t_2 = (__pyx_v_c_cur < __pyx_v_c_end);
+ __pyx_t_3 = __pyx_t_2;
+ } else {
+ __pyx_t_3 = __pyx_t_1;
+ }
+ if (!__pyx_t_3) break;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":690
+ * num_records = 0
+ * while c_cur != NULL and c_cur < c_end:
+ * c_cur = <char *>memchr(c_cur, c'\n', c_end - c_cur); # <<<<<<<<<<<<<<
+ * if c_cur == NULL:
+ * break
+ */
+ __pyx_v_c_cur = ((char *)memchr(__pyx_v_c_cur, '\n', (__pyx_v_c_end - __pyx_v_c_cur)));
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":691
+ * while c_cur != NULL and c_cur < c_end:
+ * c_cur = <char *>memchr(c_cur, c'\n', c_end - c_cur);
+ * if c_cur == NULL: # <<<<<<<<<<<<<<
+ * break
+ * c_cur = c_cur + 1
+ */
+ __pyx_t_3 = (__pyx_v_c_cur == NULL);
+ if (__pyx_t_3) {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":692
+ * c_cur = <char *>memchr(c_cur, c'\n', c_end - c_cur);
+ * if c_cur == NULL:
+ * break # <<<<<<<<<<<<<<
+ * c_cur = c_cur + 1
+ * num_records = num_records + 1
+ */
+ goto __pyx_L4_break;
+ goto __pyx_L5;
+ }
+ __pyx_L5:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":693
+ * if c_cur == NULL:
+ * break
+ * c_cur = c_cur + 1 # <<<<<<<<<<<<<<
+ * num_records = num_records + 1
+ * return num_records
+ */
+ __pyx_v_c_cur = (__pyx_v_c_cur + 1);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":694
+ * break
+ * c_cur = c_cur + 1
+ * num_records = num_records + 1 # <<<<<<<<<<<<<<
+ * return num_records
+ *
+ */
+ __pyx_v_num_records = (__pyx_v_num_records + 1);
+ }
+ __pyx_L4_break:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":695
+ * c_cur = c_cur + 1
+ * num_records = num_records + 1
+ * return num_records # <<<<<<<<<<<<<<
+ *
+ * cdef _parse_bytes(self, bytes):
+ */
+ __pyx_r = __pyx_v_num_records;
+ goto __pyx_L0;
+
+ __pyx_r = 0;
+ __pyx_L0:;
+ __Pyx_RefNannyFinishContext();
+ return __pyx_r;
+}
+
+/* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":697
+ * return num_records
+ *
+ * cdef _parse_bytes(self, bytes): # <<<<<<<<<<<<<<
+ * """Parse the string 'bytes' into content."""
+ * cdef char *c_bytes
+ */
+
+static PyObject *__pyx_f_6bzrlib_21_btree_serializer_pyx_17GCCHKSHA1LeafNode__parse_bytes(struct __pyx_obj_6bzrlib_21_btree_serializer_pyx_GCCHKSHA1LeafNode *__pyx_v_self, PyObject *__pyx_v_bytes) {
+ char *__pyx_v_c_bytes;
+ char *__pyx_v_c_cur;
+ char *__pyx_v_c_end;
+ Py_ssize_t __pyx_v_n_bytes;
+ int __pyx_v_num_records;
+ int __pyx_v_entry;
+ __pyx_t_6bzrlib_21_btree_serializer_pyx_gc_chk_sha1_record *__pyx_v_cur_record;
+ PyObject *__pyx_r = NULL;
+ int __pyx_t_1;
+ PyObject *__pyx_t_2 = NULL;
+ PyObject *__pyx_t_3 = NULL;
+ int __pyx_t_4;
+ int __pyx_t_5;
+ int __pyx_t_6;
+ int __pyx_t_7;
+ char *__pyx_t_8;
+ __Pyx_RefNannySetupContext("_parse_bytes");
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":707
+ * cdef gc_chk_sha1_record *cur_record
+ *
+ * if not PyString_CheckExact(bytes): # <<<<<<<<<<<<<<
+ * raise TypeError('We only support parsing plain 8-bit strings.')
+ * # Pass 1, count how many records there will be
+ */
+ __pyx_t_1 = (!PyString_CheckExact(__pyx_v_bytes));
+ if (__pyx_t_1) {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":708
+ *
+ * if not PyString_CheckExact(bytes):
+ * raise TypeError('We only support parsing plain 8-bit strings.') # <<<<<<<<<<<<<<
+ * # Pass 1, count how many records there will be
+ * n_bytes = PyString_GET_SIZE(bytes)
+ */
+ __pyx_t_2 = PyTuple_New(1); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 708; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_2);
+ __Pyx_INCREF(((PyObject *)__pyx_kp_s_19));
+ PyTuple_SET_ITEM(__pyx_t_2, 0, ((PyObject *)__pyx_kp_s_19));
+ __Pyx_GIVEREF(((PyObject *)__pyx_kp_s_19));
+ __pyx_t_3 = PyObject_Call(__pyx_builtin_TypeError, __pyx_t_2, NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 708; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_3);
+ __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+ __Pyx_Raise(__pyx_t_3, 0, 0);
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 708; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ goto __pyx_L3;
+ }
+ __pyx_L3:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":710
+ * raise TypeError('We only support parsing plain 8-bit strings.')
+ * # Pass 1, count how many records there will be
+ * n_bytes = PyString_GET_SIZE(bytes) # <<<<<<<<<<<<<<
+ * c_bytes = PyString_AS_STRING(bytes)
+ * c_end = c_bytes + n_bytes
+ */
+ __pyx_v_n_bytes = PyString_GET_SIZE(__pyx_v_bytes);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":711
+ * # Pass 1, count how many records there will be
+ * n_bytes = PyString_GET_SIZE(bytes)
+ * c_bytes = PyString_AS_STRING(bytes) # <<<<<<<<<<<<<<
+ * c_end = c_bytes + n_bytes
+ * if strncmp(c_bytes, 'type=leaf\n', 10):
+ */
+ __pyx_v_c_bytes = PyString_AS_STRING(__pyx_v_bytes);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":712
+ * n_bytes = PyString_GET_SIZE(bytes)
+ * c_bytes = PyString_AS_STRING(bytes)
+ * c_end = c_bytes + n_bytes # <<<<<<<<<<<<<<
+ * if strncmp(c_bytes, 'type=leaf\n', 10):
+ * raise ValueError("bytes did not start with 'type=leaf\\n': %r"
+ */
+ __pyx_v_c_end = (__pyx_v_c_bytes + __pyx_v_n_bytes);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":713
+ * c_bytes = PyString_AS_STRING(bytes)
+ * c_end = c_bytes + n_bytes
+ * if strncmp(c_bytes, 'type=leaf\n', 10): # <<<<<<<<<<<<<<
+ * raise ValueError("bytes did not start with 'type=leaf\\n': %r"
+ * % (bytes[:10],))
+ */
+ __pyx_t_4 = strncmp(__pyx_v_c_bytes, __pyx_k_20, 10);
+ if (__pyx_t_4) {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":715
+ * if strncmp(c_bytes, 'type=leaf\n', 10):
+ * raise ValueError("bytes did not start with 'type=leaf\\n': %r"
+ * % (bytes[:10],)) # <<<<<<<<<<<<<<
+ * c_cur = c_bytes + 10
+ * num_records = self._count_records(c_cur, c_end)
+ */
+ __pyx_t_3 = PySequence_GetSlice(__pyx_v_bytes, 0, 10); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 715; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_3);
+ __pyx_t_2 = PyTuple_New(1); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 715; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_2);
+ PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_t_3);
+ __Pyx_GIVEREF(__pyx_t_3);
+ __pyx_t_3 = 0;
+ __pyx_t_3 = PyNumber_Remainder(((PyObject *)__pyx_kp_s_21), __pyx_t_2); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 715; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(((PyObject *)__pyx_t_3));
+ __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+ __pyx_t_2 = PyTuple_New(1); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 714; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_2);
+ PyTuple_SET_ITEM(__pyx_t_2, 0, ((PyObject *)__pyx_t_3));
+ __Pyx_GIVEREF(((PyObject *)__pyx_t_3));
+ __pyx_t_3 = 0;
+ __pyx_t_3 = PyObject_Call(__pyx_builtin_ValueError, __pyx_t_2, NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 714; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_3);
+ __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+ __Pyx_Raise(__pyx_t_3, 0, 0);
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 714; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ goto __pyx_L4;
+ }
+ __pyx_L4:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":716
+ * raise ValueError("bytes did not start with 'type=leaf\\n': %r"
+ * % (bytes[:10],))
+ * c_cur = c_bytes + 10 # <<<<<<<<<<<<<<
+ * num_records = self._count_records(c_cur, c_end)
+ * # Now allocate the memory for these items, and go to town
+ */
+ __pyx_v_c_cur = (__pyx_v_c_bytes + 10);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":717
+ * % (bytes[:10],))
+ * c_cur = c_bytes + 10
+ * num_records = self._count_records(c_cur, c_end) # <<<<<<<<<<<<<<
+ * # Now allocate the memory for these items, and go to town
+ * self.records = <gc_chk_sha1_record*>PyMem_Malloc(num_records *
+ */
+ __pyx_v_num_records = ((struct __pyx_vtabstruct_6bzrlib_21_btree_serializer_pyx_GCCHKSHA1LeafNode *)__pyx_v_self->__pyx_vtab)->_count_records(__pyx_v_self, __pyx_v_c_cur, __pyx_v_c_end);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":719
+ * num_records = self._count_records(c_cur, c_end)
+ * # Now allocate the memory for these items, and go to town
+ * self.records = <gc_chk_sha1_record*>PyMem_Malloc(num_records * # <<<<<<<<<<<<<<
+ * (sizeof(unsigned short) + sizeof(gc_chk_sha1_record)))
+ * self.num_records = num_records
+ */
+ __pyx_v_self->records = ((__pyx_t_6bzrlib_21_btree_serializer_pyx_gc_chk_sha1_record *)PyMem_Malloc((__pyx_v_num_records * ((sizeof(unsigned short)) + (sizeof(__pyx_t_6bzrlib_21_btree_serializer_pyx_gc_chk_sha1_record))))));
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":721
+ * self.records = <gc_chk_sha1_record*>PyMem_Malloc(num_records *
+ * (sizeof(unsigned short) + sizeof(gc_chk_sha1_record)))
+ * self.num_records = num_records # <<<<<<<<<<<<<<
+ * cur_record = self.records
+ * entry = 0
+ */
+ __pyx_v_self->num_records = __pyx_v_num_records;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":722
+ * (sizeof(unsigned short) + sizeof(gc_chk_sha1_record)))
+ * self.num_records = num_records
+ * cur_record = self.records # <<<<<<<<<<<<<<
+ * entry = 0
+ * while c_cur != NULL and c_cur < c_end and entry < num_records:
+ */
+ __pyx_v_cur_record = __pyx_v_self->records;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":723
+ * self.num_records = num_records
+ * cur_record = self.records
+ * entry = 0 # <<<<<<<<<<<<<<
+ * while c_cur != NULL and c_cur < c_end and entry < num_records:
+ * c_cur = self._parse_one_entry(c_cur, c_end, cur_record)
+ */
+ __pyx_v_entry = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":724
+ * cur_record = self.records
+ * entry = 0
+ * while c_cur != NULL and c_cur < c_end and entry < num_records: # <<<<<<<<<<<<<<
+ * c_cur = self._parse_one_entry(c_cur, c_end, cur_record)
+ * cur_record = cur_record + 1
+ */
+ while (1) {
+ __pyx_t_1 = (__pyx_v_c_cur != NULL);
+ if (__pyx_t_1) {
+ __pyx_t_5 = (__pyx_v_c_cur < __pyx_v_c_end);
+ if (__pyx_t_5) {
+ __pyx_t_6 = (__pyx_v_entry < __pyx_v_num_records);
+ __pyx_t_7 = __pyx_t_6;
+ } else {
+ __pyx_t_7 = __pyx_t_5;
+ }
+ __pyx_t_5 = __pyx_t_7;
+ } else {
+ __pyx_t_5 = __pyx_t_1;
+ }
+ if (!__pyx_t_5) break;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":725
+ * entry = 0
+ * while c_cur != NULL and c_cur < c_end and entry < num_records:
+ * c_cur = self._parse_one_entry(c_cur, c_end, cur_record) # <<<<<<<<<<<<<<
+ * cur_record = cur_record + 1
+ * entry = entry + 1
+ */
+ __pyx_t_8 = ((struct __pyx_vtabstruct_6bzrlib_21_btree_serializer_pyx_GCCHKSHA1LeafNode *)__pyx_v_self->__pyx_vtab)->_parse_one_entry(__pyx_v_self, __pyx_v_c_cur, __pyx_v_c_end, __pyx_v_cur_record); if (unlikely(__pyx_t_8 == NULL)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 725; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_v_c_cur = __pyx_t_8;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":726
+ * while c_cur != NULL and c_cur < c_end and entry < num_records:
+ * c_cur = self._parse_one_entry(c_cur, c_end, cur_record)
+ * cur_record = cur_record + 1 # <<<<<<<<<<<<<<
+ * entry = entry + 1
+ * if (entry != self.num_records
+ */
+ __pyx_v_cur_record = (__pyx_v_cur_record + 1);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":727
+ * c_cur = self._parse_one_entry(c_cur, c_end, cur_record)
+ * cur_record = cur_record + 1
+ * entry = entry + 1 # <<<<<<<<<<<<<<
+ * if (entry != self.num_records
+ * or c_cur != c_end
+ */
+ __pyx_v_entry = (__pyx_v_entry + 1);
+ }
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":728
+ * cur_record = cur_record + 1
+ * entry = entry + 1
+ * if (entry != self.num_records # <<<<<<<<<<<<<<
+ * or c_cur != c_end
+ * or cur_record != self.records + self.num_records):
+ */
+ __pyx_t_5 = (__pyx_v_entry != __pyx_v_self->num_records);
+ if (!__pyx_t_5) {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":730
+ * if (entry != self.num_records
+ * or c_cur != c_end
+ * or cur_record != self.records + self.num_records): # <<<<<<<<<<<<<<
+ * raise ValueError('Something went wrong while parsing.')
+ * # Pass 3: build the offset map
+ */
+ __pyx_t_1 = (__pyx_v_c_cur != __pyx_v_c_end);
+ if (!__pyx_t_1) {
+ __pyx_t_7 = (__pyx_v_cur_record != (__pyx_v_self->records + __pyx_v_self->num_records));
+ __pyx_t_6 = __pyx_t_7;
+ } else {
+ __pyx_t_6 = __pyx_t_1;
+ }
+ __pyx_t_1 = __pyx_t_6;
+ } else {
+ __pyx_t_1 = __pyx_t_5;
+ }
+ if (__pyx_t_1) {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":731
+ * or c_cur != c_end
+ * or cur_record != self.records + self.num_records):
+ * raise ValueError('Something went wrong while parsing.') # <<<<<<<<<<<<<<
+ * # Pass 3: build the offset map
+ * self._compute_common()
+ */
+ __pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 731; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_3);
+ __Pyx_INCREF(((PyObject *)__pyx_kp_s_22));
+ PyTuple_SET_ITEM(__pyx_t_3, 0, ((PyObject *)__pyx_kp_s_22));
+ __Pyx_GIVEREF(((PyObject *)__pyx_kp_s_22));
+ __pyx_t_2 = PyObject_Call(__pyx_builtin_ValueError, __pyx_t_3, NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 731; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_2);
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+ __Pyx_Raise(__pyx_t_2, 0, 0);
+ __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 731; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ goto __pyx_L7;
+ }
+ __pyx_L7:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":733
+ * raise ValueError('Something went wrong while parsing.')
+ * # Pass 3: build the offset map
+ * self._compute_common() # <<<<<<<<<<<<<<
+ *
+ * cdef char *_parse_one_entry(self, char *c_cur, char *c_end,
+ */
+ __pyx_t_2 = ((struct __pyx_vtabstruct_6bzrlib_21_btree_serializer_pyx_GCCHKSHA1LeafNode *)__pyx_v_self->__pyx_vtab)->_compute_common(__pyx_v_self); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 733; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_2);
+ __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+
+ __pyx_r = Py_None; __Pyx_INCREF(Py_None);
+ goto __pyx_L0;
+ __pyx_L1_error:;
+ __Pyx_XDECREF(__pyx_t_2);
+ __Pyx_XDECREF(__pyx_t_3);
+ __Pyx_AddTraceback("bzrlib._btree_serializer_pyx.GCCHKSHA1LeafNode._parse_bytes");
+ __pyx_r = 0;
+ __pyx_L0:;
+ __Pyx_XGIVEREF(__pyx_r);
+ __Pyx_RefNannyFinishContext();
+ return __pyx_r;
+}
+
+/* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":735
+ * self._compute_common()
+ *
+ * cdef char *_parse_one_entry(self, char *c_cur, char *c_end, # <<<<<<<<<<<<<<
+ * gc_chk_sha1_record *cur_record) except NULL:
+ * """Read a single sha record from the bytes.
+ */
+
+static char *__pyx_f_6bzrlib_21_btree_serializer_pyx_17GCCHKSHA1LeafNode__parse_one_entry(struct __pyx_obj_6bzrlib_21_btree_serializer_pyx_GCCHKSHA1LeafNode *__pyx_v_self, char *__pyx_v_c_cur, char *__pyx_v_c_end, __pyx_t_6bzrlib_21_btree_serializer_pyx_gc_chk_sha1_record *__pyx_v_cur_record) {
+ char *__pyx_v_c_next;
+ char *__pyx_r;
+ int __pyx_t_1;
+ PyObject *__pyx_t_2 = NULL;
+ PyObject *__pyx_t_3 = NULL;
+ int __pyx_t_4;
+ int __pyx_t_5;
+ int __pyx_t_6;
+ __Pyx_RefNannySetupContext("_parse_one_entry");
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":742
+ * """
+ * cdef char *c_next
+ * if strncmp(c_cur, 'sha1:', 5): # <<<<<<<<<<<<<<
+ * raise ValueError('line did not start with sha1: %r'
+ * % (safe_string_from_size(c_cur, 10),))
+ */
+ __pyx_t_1 = strncmp(__pyx_v_c_cur, __pyx_k_3, 5);
+ if (__pyx_t_1) {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":744
+ * if strncmp(c_cur, 'sha1:', 5):
+ * raise ValueError('line did not start with sha1: %r'
+ * % (safe_string_from_size(c_cur, 10),)) # <<<<<<<<<<<<<<
+ * c_cur = c_cur + 5
+ * c_next = <char *>memchr(c_cur, c'\0', c_end - c_cur)
+ */
+ __pyx_t_2 = __pyx_f_6bzrlib_21_btree_serializer_pyx_safe_string_from_size(__pyx_v_c_cur, 10); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 744; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_2);
+ __pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 744; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_3);
+ PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_2);
+ __Pyx_GIVEREF(__pyx_t_2);
+ __pyx_t_2 = 0;
+ __pyx_t_2 = PyNumber_Remainder(((PyObject *)__pyx_kp_s_23), __pyx_t_3); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 744; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(((PyObject *)__pyx_t_2));
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+ __pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 743; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_3);
+ PyTuple_SET_ITEM(__pyx_t_3, 0, ((PyObject *)__pyx_t_2));
+ __Pyx_GIVEREF(((PyObject *)__pyx_t_2));
+ __pyx_t_2 = 0;
+ __pyx_t_2 = PyObject_Call(__pyx_builtin_ValueError, __pyx_t_3, NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 743; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_2);
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+ __Pyx_Raise(__pyx_t_2, 0, 0);
+ __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 743; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ goto __pyx_L3;
+ }
+ __pyx_L3:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":745
+ * raise ValueError('line did not start with sha1: %r'
+ * % (safe_string_from_size(c_cur, 10),))
+ * c_cur = c_cur + 5 # <<<<<<<<<<<<<<
+ * c_next = <char *>memchr(c_cur, c'\0', c_end - c_cur)
+ * if c_next == NULL or (c_next - c_cur != 40):
+ */
+ __pyx_v_c_cur = (__pyx_v_c_cur + 5);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":746
+ * % (safe_string_from_size(c_cur, 10),))
+ * c_cur = c_cur + 5
+ * c_next = <char *>memchr(c_cur, c'\0', c_end - c_cur) # <<<<<<<<<<<<<<
+ * if c_next == NULL or (c_next - c_cur != 40):
+ * raise ValueError('Line did not contain 40 hex bytes')
+ */
+ __pyx_v_c_next = ((char *)memchr(__pyx_v_c_cur, '\x00', (__pyx_v_c_end - __pyx_v_c_cur)));
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":747
+ * c_cur = c_cur + 5
+ * c_next = <char *>memchr(c_cur, c'\0', c_end - c_cur)
+ * if c_next == NULL or (c_next - c_cur != 40): # <<<<<<<<<<<<<<
+ * raise ValueError('Line did not contain 40 hex bytes')
+ * if not _unhexlify_sha1(c_cur, cur_record.sha1):
+ */
+ __pyx_t_4 = (__pyx_v_c_next == NULL);
+ if (!__pyx_t_4) {
+ __pyx_t_5 = ((__pyx_v_c_next - __pyx_v_c_cur) != 40);
+ __pyx_t_6 = __pyx_t_5;
+ } else {
+ __pyx_t_6 = __pyx_t_4;
+ }
+ if (__pyx_t_6) {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":748
+ * c_next = <char *>memchr(c_cur, c'\0', c_end - c_cur)
+ * if c_next == NULL or (c_next - c_cur != 40):
+ * raise ValueError('Line did not contain 40 hex bytes') # <<<<<<<<<<<<<<
+ * if not _unhexlify_sha1(c_cur, cur_record.sha1):
+ * raise ValueError('We failed to unhexlify')
+ */
+ __pyx_t_2 = PyTuple_New(1); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 748; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_2);
+ __Pyx_INCREF(((PyObject *)__pyx_kp_s_24));
+ PyTuple_SET_ITEM(__pyx_t_2, 0, ((PyObject *)__pyx_kp_s_24));
+ __Pyx_GIVEREF(((PyObject *)__pyx_kp_s_24));
+ __pyx_t_3 = PyObject_Call(__pyx_builtin_ValueError, __pyx_t_2, NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 748; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_3);
+ __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+ __Pyx_Raise(__pyx_t_3, 0, 0);
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 748; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ goto __pyx_L4;
+ }
+ __pyx_L4:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":749
+ * if c_next == NULL or (c_next - c_cur != 40):
+ * raise ValueError('Line did not contain 40 hex bytes')
+ * if not _unhexlify_sha1(c_cur, cur_record.sha1): # <<<<<<<<<<<<<<
+ * raise ValueError('We failed to unhexlify')
+ * c_cur = c_next + 1
+ */
+ __pyx_t_6 = (!__pyx_f_6bzrlib_21_btree_serializer_pyx__unhexlify_sha1(__pyx_v_c_cur, __pyx_v_cur_record->sha1));
+ if (__pyx_t_6) {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":750
+ * raise ValueError('Line did not contain 40 hex bytes')
+ * if not _unhexlify_sha1(c_cur, cur_record.sha1):
+ * raise ValueError('We failed to unhexlify') # <<<<<<<<<<<<<<
+ * c_cur = c_next + 1
+ * if c_cur[0] != c'\0':
+ */
+ __pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 750; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_3);
+ __Pyx_INCREF(((PyObject *)__pyx_kp_s_25));
+ PyTuple_SET_ITEM(__pyx_t_3, 0, ((PyObject *)__pyx_kp_s_25));
+ __Pyx_GIVEREF(((PyObject *)__pyx_kp_s_25));
+ __pyx_t_2 = PyObject_Call(__pyx_builtin_ValueError, __pyx_t_3, NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 750; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_2);
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+ __Pyx_Raise(__pyx_t_2, 0, 0);
+ __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 750; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ goto __pyx_L5;
+ }
+ __pyx_L5:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":751
+ * if not _unhexlify_sha1(c_cur, cur_record.sha1):
+ * raise ValueError('We failed to unhexlify')
+ * c_cur = c_next + 1 # <<<<<<<<<<<<<<
+ * if c_cur[0] != c'\0':
+ * raise ValueError('only 1 null, not 2 as expected')
+ */
+ __pyx_v_c_cur = (__pyx_v_c_next + 1);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":752
+ * raise ValueError('We failed to unhexlify')
+ * c_cur = c_next + 1
+ * if c_cur[0] != c'\0': # <<<<<<<<<<<<<<
+ * raise ValueError('only 1 null, not 2 as expected')
+ * c_cur = c_cur + 1
+ */
+ __pyx_t_6 = ((__pyx_v_c_cur[0]) != '\x00');
+ if (__pyx_t_6) {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":753
+ * c_cur = c_next + 1
+ * if c_cur[0] != c'\0':
+ * raise ValueError('only 1 null, not 2 as expected') # <<<<<<<<<<<<<<
+ * c_cur = c_cur + 1
+ * cur_record.block_offset = strtoll(c_cur, &c_next, 10)
+ */
+ __pyx_t_2 = PyTuple_New(1); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 753; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_2);
+ __Pyx_INCREF(((PyObject *)__pyx_kp_s_26));
+ PyTuple_SET_ITEM(__pyx_t_2, 0, ((PyObject *)__pyx_kp_s_26));
+ __Pyx_GIVEREF(((PyObject *)__pyx_kp_s_26));
+ __pyx_t_3 = PyObject_Call(__pyx_builtin_ValueError, __pyx_t_2, NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 753; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_3);
+ __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+ __Pyx_Raise(__pyx_t_3, 0, 0);
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 753; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ goto __pyx_L6;
+ }
+ __pyx_L6:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":754
+ * if c_cur[0] != c'\0':
+ * raise ValueError('only 1 null, not 2 as expected')
+ * c_cur = c_cur + 1 # <<<<<<<<<<<<<<
+ * cur_record.block_offset = strtoll(c_cur, &c_next, 10)
+ * if c_cur == c_next or c_next[0] != c' ':
+ */
+ __pyx_v_c_cur = (__pyx_v_c_cur + 1);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":755
+ * raise ValueError('only 1 null, not 2 as expected')
+ * c_cur = c_cur + 1
+ * cur_record.block_offset = strtoll(c_cur, &c_next, 10) # <<<<<<<<<<<<<<
+ * if c_cur == c_next or c_next[0] != c' ':
+ * raise ValueError('Failed to parse block offset')
+ */
+ __pyx_v_cur_record->block_offset = strtoll(__pyx_v_c_cur, (&__pyx_v_c_next), 10);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":756
+ * c_cur = c_cur + 1
+ * cur_record.block_offset = strtoll(c_cur, &c_next, 10)
+ * if c_cur == c_next or c_next[0] != c' ': # <<<<<<<<<<<<<<
+ * raise ValueError('Failed to parse block offset')
+ * c_cur = c_next + 1
+ */
+ __pyx_t_6 = (__pyx_v_c_cur == __pyx_v_c_next);
+ if (!__pyx_t_6) {
+ __pyx_t_4 = ((__pyx_v_c_next[0]) != ' ');
+ __pyx_t_5 = __pyx_t_4;
+ } else {
+ __pyx_t_5 = __pyx_t_6;
+ }
+ if (__pyx_t_5) {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":757
+ * cur_record.block_offset = strtoll(c_cur, &c_next, 10)
+ * if c_cur == c_next or c_next[0] != c' ':
+ * raise ValueError('Failed to parse block offset') # <<<<<<<<<<<<<<
+ * c_cur = c_next + 1
+ * cur_record.block_length = strtoul(c_cur, &c_next, 10)
+ */
+ __pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 757; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_3);
+ __Pyx_INCREF(((PyObject *)__pyx_kp_s_27));
+ PyTuple_SET_ITEM(__pyx_t_3, 0, ((PyObject *)__pyx_kp_s_27));
+ __Pyx_GIVEREF(((PyObject *)__pyx_kp_s_27));
+ __pyx_t_2 = PyObject_Call(__pyx_builtin_ValueError, __pyx_t_3, NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 757; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_2);
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+ __Pyx_Raise(__pyx_t_2, 0, 0);
+ __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 757; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ goto __pyx_L7;
+ }
+ __pyx_L7:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":758
+ * if c_cur == c_next or c_next[0] != c' ':
+ * raise ValueError('Failed to parse block offset')
+ * c_cur = c_next + 1 # <<<<<<<<<<<<<<
+ * cur_record.block_length = strtoul(c_cur, &c_next, 10)
+ * if c_cur == c_next or c_next[0] != c' ':
+ */
+ __pyx_v_c_cur = (__pyx_v_c_next + 1);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":759
+ * raise ValueError('Failed to parse block offset')
+ * c_cur = c_next + 1
+ * cur_record.block_length = strtoul(c_cur, &c_next, 10) # <<<<<<<<<<<<<<
+ * if c_cur == c_next or c_next[0] != c' ':
+ * raise ValueError('Failed to parse block length')
+ */
+ __pyx_v_cur_record->block_length = strtoul(__pyx_v_c_cur, (&__pyx_v_c_next), 10);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":760
+ * c_cur = c_next + 1
+ * cur_record.block_length = strtoul(c_cur, &c_next, 10)
+ * if c_cur == c_next or c_next[0] != c' ': # <<<<<<<<<<<<<<
+ * raise ValueError('Failed to parse block length')
+ * c_cur = c_next + 1
+ */
+ __pyx_t_5 = (__pyx_v_c_cur == __pyx_v_c_next);
+ if (!__pyx_t_5) {
+ __pyx_t_6 = ((__pyx_v_c_next[0]) != ' ');
+ __pyx_t_4 = __pyx_t_6;
+ } else {
+ __pyx_t_4 = __pyx_t_5;
+ }
+ if (__pyx_t_4) {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":761
+ * cur_record.block_length = strtoul(c_cur, &c_next, 10)
+ * if c_cur == c_next or c_next[0] != c' ':
+ * raise ValueError('Failed to parse block length') # <<<<<<<<<<<<<<
+ * c_cur = c_next + 1
+ * cur_record.record_start = strtoul(c_cur, &c_next, 10)
+ */
+ __pyx_t_2 = PyTuple_New(1); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 761; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_2);
+ __Pyx_INCREF(((PyObject *)__pyx_kp_s_28));
+ PyTuple_SET_ITEM(__pyx_t_2, 0, ((PyObject *)__pyx_kp_s_28));
+ __Pyx_GIVEREF(((PyObject *)__pyx_kp_s_28));
+ __pyx_t_3 = PyObject_Call(__pyx_builtin_ValueError, __pyx_t_2, NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 761; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_3);
+ __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+ __Pyx_Raise(__pyx_t_3, 0, 0);
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 761; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ goto __pyx_L8;
+ }
+ __pyx_L8:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":762
+ * if c_cur == c_next or c_next[0] != c' ':
+ * raise ValueError('Failed to parse block length')
+ * c_cur = c_next + 1 # <<<<<<<<<<<<<<
+ * cur_record.record_start = strtoul(c_cur, &c_next, 10)
+ * if c_cur == c_next or c_next[0] != c' ':
+ */
+ __pyx_v_c_cur = (__pyx_v_c_next + 1);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":763
+ * raise ValueError('Failed to parse block length')
+ * c_cur = c_next + 1
+ * cur_record.record_start = strtoul(c_cur, &c_next, 10) # <<<<<<<<<<<<<<
+ * if c_cur == c_next or c_next[0] != c' ':
+ * raise ValueError('Failed to parse block length')
+ */
+ __pyx_v_cur_record->record_start = strtoul(__pyx_v_c_cur, (&__pyx_v_c_next), 10);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":764
+ * c_cur = c_next + 1
+ * cur_record.record_start = strtoul(c_cur, &c_next, 10)
+ * if c_cur == c_next or c_next[0] != c' ': # <<<<<<<<<<<<<<
+ * raise ValueError('Failed to parse block length')
+ * c_cur = c_next + 1
+ */
+ __pyx_t_4 = (__pyx_v_c_cur == __pyx_v_c_next);
+ if (!__pyx_t_4) {
+ __pyx_t_5 = ((__pyx_v_c_next[0]) != ' ');
+ __pyx_t_6 = __pyx_t_5;
+ } else {
+ __pyx_t_6 = __pyx_t_4;
+ }
+ if (__pyx_t_6) {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":765
+ * cur_record.record_start = strtoul(c_cur, &c_next, 10)
+ * if c_cur == c_next or c_next[0] != c' ':
+ * raise ValueError('Failed to parse block length') # <<<<<<<<<<<<<<
+ * c_cur = c_next + 1
+ * cur_record.record_end = strtoul(c_cur, &c_next, 10)
+ */
+ __pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 765; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_3);
+ __Pyx_INCREF(((PyObject *)__pyx_kp_s_28));
+ PyTuple_SET_ITEM(__pyx_t_3, 0, ((PyObject *)__pyx_kp_s_28));
+ __Pyx_GIVEREF(((PyObject *)__pyx_kp_s_28));
+ __pyx_t_2 = PyObject_Call(__pyx_builtin_ValueError, __pyx_t_3, NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 765; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_2);
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+ __Pyx_Raise(__pyx_t_2, 0, 0);
+ __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 765; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ goto __pyx_L9;
+ }
+ __pyx_L9:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":766
+ * if c_cur == c_next or c_next[0] != c' ':
+ * raise ValueError('Failed to parse block length')
+ * c_cur = c_next + 1 # <<<<<<<<<<<<<<
+ * cur_record.record_end = strtoul(c_cur, &c_next, 10)
+ * if c_cur == c_next or c_next[0] != c'\n':
+ */
+ __pyx_v_c_cur = (__pyx_v_c_next + 1);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":767
+ * raise ValueError('Failed to parse block length')
+ * c_cur = c_next + 1
+ * cur_record.record_end = strtoul(c_cur, &c_next, 10) # <<<<<<<<<<<<<<
+ * if c_cur == c_next or c_next[0] != c'\n':
+ * raise ValueError('Failed to parse record end')
+ */
+ __pyx_v_cur_record->record_end = strtoul(__pyx_v_c_cur, (&__pyx_v_c_next), 10);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":768
+ * c_cur = c_next + 1
+ * cur_record.record_end = strtoul(c_cur, &c_next, 10)
+ * if c_cur == c_next or c_next[0] != c'\n': # <<<<<<<<<<<<<<
+ * raise ValueError('Failed to parse record end')
+ * c_cur = c_next + 1
+ */
+ __pyx_t_6 = (__pyx_v_c_cur == __pyx_v_c_next);
+ if (!__pyx_t_6) {
+ __pyx_t_4 = ((__pyx_v_c_next[0]) != '\n');
+ __pyx_t_5 = __pyx_t_4;
+ } else {
+ __pyx_t_5 = __pyx_t_6;
+ }
+ if (__pyx_t_5) {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":769
+ * cur_record.record_end = strtoul(c_cur, &c_next, 10)
+ * if c_cur == c_next or c_next[0] != c'\n':
+ * raise ValueError('Failed to parse record end') # <<<<<<<<<<<<<<
+ * c_cur = c_next + 1
+ * return c_cur
+ */
+ __pyx_t_2 = PyTuple_New(1); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 769; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_2);
+ __Pyx_INCREF(((PyObject *)__pyx_kp_s_29));
+ PyTuple_SET_ITEM(__pyx_t_2, 0, ((PyObject *)__pyx_kp_s_29));
+ __Pyx_GIVEREF(((PyObject *)__pyx_kp_s_29));
+ __pyx_t_3 = PyObject_Call(__pyx_builtin_ValueError, __pyx_t_2, NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 769; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_3);
+ __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+ __Pyx_Raise(__pyx_t_3, 0, 0);
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 769; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ goto __pyx_L10;
+ }
+ __pyx_L10:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":770
+ * if c_cur == c_next or c_next[0] != c'\n':
+ * raise ValueError('Failed to parse record end')
+ * c_cur = c_next + 1 # <<<<<<<<<<<<<<
+ * return c_cur
+ *
+ */
+ __pyx_v_c_cur = (__pyx_v_c_next + 1);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":771
+ * raise ValueError('Failed to parse record end')
+ * c_cur = c_next + 1
+ * return c_cur # <<<<<<<<<<<<<<
+ *
+ * cdef int _offset_for_sha1(self, char *sha1) except -1:
+ */
+ __pyx_r = __pyx_v_c_cur;
+ goto __pyx_L0;
+
+ __pyx_r = 0;
+ goto __pyx_L0;
+ __pyx_L1_error:;
+ __Pyx_XDECREF(__pyx_t_2);
+ __Pyx_XDECREF(__pyx_t_3);
+ __Pyx_AddTraceback("bzrlib._btree_serializer_pyx.GCCHKSHA1LeafNode._parse_one_entry");
+ __pyx_r = NULL;
+ __pyx_L0:;
+ __Pyx_RefNannyFinishContext();
+ return __pyx_r;
+}
+
+/* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":773
+ * return c_cur
+ *
+ * cdef int _offset_for_sha1(self, char *sha1) except -1: # <<<<<<<<<<<<<<
+ * """Find the first interesting 8-bits of this sha1."""
+ * cdef int this_offset
+ */
+
+static int __pyx_f_6bzrlib_21_btree_serializer_pyx_17GCCHKSHA1LeafNode__offset_for_sha1(struct __pyx_obj_6bzrlib_21_btree_serializer_pyx_GCCHKSHA1LeafNode *__pyx_v_self, char *__pyx_v_sha1) {
+ int __pyx_v_this_offset;
+ unsigned int __pyx_v_as_uint;
+ int __pyx_r;
+ __Pyx_RefNannySetupContext("_offset_for_sha1");
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":777
+ * cdef int this_offset
+ * cdef unsigned int as_uint
+ * as_uint = _sha1_to_uint(sha1) # <<<<<<<<<<<<<<
+ * this_offset = (as_uint >> self.common_shift) & 0xFF
+ * return this_offset
+ */
+ __pyx_v_as_uint = __pyx_f_6bzrlib_21_btree_serializer_pyx__sha1_to_uint(__pyx_v_sha1);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":778
+ * cdef unsigned int as_uint
+ * as_uint = _sha1_to_uint(sha1)
+ * this_offset = (as_uint >> self.common_shift) & 0xFF # <<<<<<<<<<<<<<
+ * return this_offset
+ *
+ */
+ __pyx_v_this_offset = ((__pyx_v_as_uint >> __pyx_v_self->common_shift) & 0xFF);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":779
+ * as_uint = _sha1_to_uint(sha1)
+ * this_offset = (as_uint >> self.common_shift) & 0xFF
+ * return this_offset # <<<<<<<<<<<<<<
+ *
+ * def _get_offset_for_sha1(self, sha1):
+ */
+ __pyx_r = __pyx_v_this_offset;
+ goto __pyx_L0;
+
+ __pyx_r = 0;
+ __pyx_L0:;
+ __Pyx_RefNannyFinishContext();
+ return __pyx_r;
+}
+
+/* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":781
+ * return this_offset
+ *
+ * def _get_offset_for_sha1(self, sha1): # <<<<<<<<<<<<<<
+ * return self._offset_for_sha1(PyString_AS_STRING(sha1))
+ *
+ */
+
+static PyObject *__pyx_pf_6bzrlib_21_btree_serializer_pyx_17GCCHKSHA1LeafNode__get_offset_for_sha1(PyObject *__pyx_v_self, PyObject *__pyx_v_sha1); /*proto*/
+static PyObject *__pyx_pf_6bzrlib_21_btree_serializer_pyx_17GCCHKSHA1LeafNode__get_offset_for_sha1(PyObject *__pyx_v_self, PyObject *__pyx_v_sha1) {
+ PyObject *__pyx_r = NULL;
+ int __pyx_t_1;
+ PyObject *__pyx_t_2 = NULL;
+ __Pyx_RefNannySetupContext("_get_offset_for_sha1");
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":782
+ *
+ * def _get_offset_for_sha1(self, sha1):
+ * return self._offset_for_sha1(PyString_AS_STRING(sha1)) # <<<<<<<<<<<<<<
+ *
+ * cdef _compute_common(self):
+ */
+ __Pyx_XDECREF(__pyx_r);
+ __pyx_t_1 = ((struct __pyx_vtabstruct_6bzrlib_21_btree_serializer_pyx_GCCHKSHA1LeafNode *)((struct __pyx_obj_6bzrlib_21_btree_serializer_pyx_GCCHKSHA1LeafNode *)__pyx_v_self)->__pyx_vtab)->_offset_for_sha1(((struct __pyx_obj_6bzrlib_21_btree_serializer_pyx_GCCHKSHA1LeafNode *)__pyx_v_self), PyString_AS_STRING(__pyx_v_sha1)); if (unlikely(__pyx_t_1 == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 782; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_2 = PyInt_FromLong(__pyx_t_1); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 782; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_2);
+ __pyx_r = __pyx_t_2;
+ __pyx_t_2 = 0;
+ goto __pyx_L0;
+
+ __pyx_r = Py_None; __Pyx_INCREF(Py_None);
+ goto __pyx_L0;
+ __pyx_L1_error:;
+ __Pyx_XDECREF(__pyx_t_2);
+ __Pyx_AddTraceback("bzrlib._btree_serializer_pyx.GCCHKSHA1LeafNode._get_offset_for_sha1");
+ __pyx_r = NULL;
+ __pyx_L0:;
+ __Pyx_XGIVEREF(__pyx_r);
+ __Pyx_RefNannyFinishContext();
+ return __pyx_r;
+}
+
+/* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":784
+ * return self._offset_for_sha1(PyString_AS_STRING(sha1))
+ *
+ * cdef _compute_common(self): # <<<<<<<<<<<<<<
+ * cdef unsigned int first
+ * cdef unsigned int this
+ */
+
+static PyObject *__pyx_f_6bzrlib_21_btree_serializer_pyx_17GCCHKSHA1LeafNode__compute_common(struct __pyx_obj_6bzrlib_21_btree_serializer_pyx_GCCHKSHA1LeafNode *__pyx_v_self) {
+ unsigned int __pyx_v_first;
+ unsigned int __pyx_v_this;
+ unsigned int __pyx_v_common_mask;
+ unsigned char __pyx_v_common_shift;
+ int __pyx_v_i;
+ int __pyx_v_offset;
+ int __pyx_v_this_offset;
+ int __pyx_v_max_offset;
+ PyObject *__pyx_r = NULL;
+ int __pyx_t_1;
+ int __pyx_t_2;
+ int __pyx_t_3;
+ int __pyx_t_4;
+ __Pyx_RefNannySetupContext("_compute_common");
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":798
+ * # By XORing the records together, we can determine what bits are set in
+ * # all of them
+ * if self.num_records < 2: # <<<<<<<<<<<<<<
+ * # Everything is in common if you have 0 or 1 leaves
+ * # So we'll always just shift to the first byte
+ */
+ __pyx_t_1 = (__pyx_v_self->num_records < 2);
+ if (__pyx_t_1) {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":801
+ * # Everything is in common if you have 0 or 1 leaves
+ * # So we'll always just shift to the first byte
+ * self.common_shift = 24 # <<<<<<<<<<<<<<
+ * else:
+ * common_mask = 0xFFFFFFFF
+ */
+ __pyx_v_self->common_shift = 24;
+ goto __pyx_L3;
+ }
+ /*else*/ {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":803
+ * self.common_shift = 24
+ * else:
+ * common_mask = 0xFFFFFFFF # <<<<<<<<<<<<<<
+ * first = _sha1_to_uint(self.records[0].sha1)
+ * for i from 0 < i < self.num_records:
+ */
+ __pyx_v_common_mask = 0xFFFFFFFF;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":804
+ * else:
+ * common_mask = 0xFFFFFFFF
+ * first = _sha1_to_uint(self.records[0].sha1) # <<<<<<<<<<<<<<
+ * for i from 0 < i < self.num_records:
+ * this = _sha1_to_uint(self.records[i].sha1)
+ */
+ __pyx_v_first = __pyx_f_6bzrlib_21_btree_serializer_pyx__sha1_to_uint((__pyx_v_self->records[0]).sha1);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":805
+ * common_mask = 0xFFFFFFFF
+ * first = _sha1_to_uint(self.records[0].sha1)
+ * for i from 0 < i < self.num_records: # <<<<<<<<<<<<<<
+ * this = _sha1_to_uint(self.records[i].sha1)
+ * common_mask = (~(first ^ this)) & common_mask
+ */
+ __pyx_t_2 = __pyx_v_self->num_records;
+ for (__pyx_v_i = 0+1; __pyx_v_i < __pyx_t_2; __pyx_v_i++) {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":806
+ * first = _sha1_to_uint(self.records[0].sha1)
+ * for i from 0 < i < self.num_records:
+ * this = _sha1_to_uint(self.records[i].sha1) # <<<<<<<<<<<<<<
+ * common_mask = (~(first ^ this)) & common_mask
+ * common_shift = 24
+ */
+ __pyx_v_this = __pyx_f_6bzrlib_21_btree_serializer_pyx__sha1_to_uint((__pyx_v_self->records[__pyx_v_i]).sha1);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":807
+ * for i from 0 < i < self.num_records:
+ * this = _sha1_to_uint(self.records[i].sha1)
+ * common_mask = (~(first ^ this)) & common_mask # <<<<<<<<<<<<<<
+ * common_shift = 24
+ * while common_mask & 0x80000000 and common_shift > 0:
+ */
+ __pyx_v_common_mask = ((~(__pyx_v_first ^ __pyx_v_this)) & __pyx_v_common_mask);
+ }
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":808
+ * this = _sha1_to_uint(self.records[i].sha1)
+ * common_mask = (~(first ^ this)) & common_mask
+ * common_shift = 24 # <<<<<<<<<<<<<<
+ * while common_mask & 0x80000000 and common_shift > 0:
+ * common_mask = common_mask << 1
+ */
+ __pyx_v_common_shift = 24;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":809
+ * common_mask = (~(first ^ this)) & common_mask
+ * common_shift = 24
+ * while common_mask & 0x80000000 and common_shift > 0: # <<<<<<<<<<<<<<
+ * common_mask = common_mask << 1
+ * common_shift = common_shift - 1
+ */
+ while (1) {
+ if ((__pyx_v_common_mask & 0x80000000)) {
+ __pyx_t_1 = (__pyx_v_common_shift > 0);
+ __pyx_t_3 = __pyx_t_1;
+ } else {
+ __pyx_t_3 = (__pyx_v_common_mask & 0x80000000);
+ }
+ if (!__pyx_t_3) break;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":810
+ * common_shift = 24
+ * while common_mask & 0x80000000 and common_shift > 0:
+ * common_mask = common_mask << 1 # <<<<<<<<<<<<<<
+ * common_shift = common_shift - 1
+ * self.common_shift = common_shift
+ */
+ __pyx_v_common_mask = (__pyx_v_common_mask << 1);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":811
+ * while common_mask & 0x80000000 and common_shift > 0:
+ * common_mask = common_mask << 1
+ * common_shift = common_shift - 1 # <<<<<<<<<<<<<<
+ * self.common_shift = common_shift
+ * offset = 0
+ */
+ __pyx_v_common_shift = (__pyx_v_common_shift - 1);
+ }
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":812
+ * common_mask = common_mask << 1
+ * common_shift = common_shift - 1
+ * self.common_shift = common_shift # <<<<<<<<<<<<<<
+ * offset = 0
+ * max_offset = self.num_records
+ */
+ __pyx_v_self->common_shift = __pyx_v_common_shift;
+ }
+ __pyx_L3:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":813
+ * common_shift = common_shift - 1
+ * self.common_shift = common_shift
+ * offset = 0 # <<<<<<<<<<<<<<
+ * max_offset = self.num_records
+ * # We cap this loop at 254 records. All the other offsets just get
+ */
+ __pyx_v_offset = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":814
+ * self.common_shift = common_shift
+ * offset = 0
+ * max_offset = self.num_records # <<<<<<<<<<<<<<
+ * # We cap this loop at 254 records. All the other offsets just get
+ * # filled with 0xff as the singleton saying 'too many'.
+ */
+ __pyx_v_max_offset = __pyx_v_self->num_records;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":819
+ * # It means that if we have >255 records we have to bisect the second
+ * # half of the list, but this is going to be very rare in practice.
+ * if max_offset > 255: # <<<<<<<<<<<<<<
+ * max_offset = 255
+ * for i from 0 <= i < max_offset:
+ */
+ __pyx_t_3 = (__pyx_v_max_offset > 255);
+ if (__pyx_t_3) {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":820
+ * # half of the list, but this is going to be very rare in practice.
+ * if max_offset > 255:
+ * max_offset = 255 # <<<<<<<<<<<<<<
+ * for i from 0 <= i < max_offset:
+ * this_offset = self._offset_for_sha1(self.records[i].sha1)
+ */
+ __pyx_v_max_offset = 255;
+ goto __pyx_L8;
+ }
+ __pyx_L8:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":821
+ * if max_offset > 255:
+ * max_offset = 255
+ * for i from 0 <= i < max_offset: # <<<<<<<<<<<<<<
+ * this_offset = self._offset_for_sha1(self.records[i].sha1)
+ * while offset <= this_offset:
+ */
+ __pyx_t_2 = __pyx_v_max_offset;
+ for (__pyx_v_i = 0; __pyx_v_i < __pyx_t_2; __pyx_v_i++) {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":822
+ * max_offset = 255
+ * for i from 0 <= i < max_offset:
+ * this_offset = self._offset_for_sha1(self.records[i].sha1) # <<<<<<<<<<<<<<
+ * while offset <= this_offset:
+ * self.offsets[offset] = i
+ */
+ __pyx_t_4 = ((struct __pyx_vtabstruct_6bzrlib_21_btree_serializer_pyx_GCCHKSHA1LeafNode *)__pyx_v_self->__pyx_vtab)->_offset_for_sha1(__pyx_v_self, (__pyx_v_self->records[__pyx_v_i]).sha1); if (unlikely(__pyx_t_4 == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 822; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_v_this_offset = __pyx_t_4;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":823
+ * for i from 0 <= i < max_offset:
+ * this_offset = self._offset_for_sha1(self.records[i].sha1)
+ * while offset <= this_offset: # <<<<<<<<<<<<<<
+ * self.offsets[offset] = i
+ * offset = offset + 1
+ */
+ while (1) {
+ __pyx_t_3 = (__pyx_v_offset <= __pyx_v_this_offset);
+ if (!__pyx_t_3) break;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":824
+ * this_offset = self._offset_for_sha1(self.records[i].sha1)
+ * while offset <= this_offset:
+ * self.offsets[offset] = i # <<<<<<<<<<<<<<
+ * offset = offset + 1
+ * while offset < 257:
+ */
+ (__pyx_v_self->offsets[__pyx_v_offset]) = __pyx_v_i;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":825
+ * while offset <= this_offset:
+ * self.offsets[offset] = i
+ * offset = offset + 1 # <<<<<<<<<<<<<<
+ * while offset < 257:
+ * self.offsets[offset] = max_offset
+ */
+ __pyx_v_offset = (__pyx_v_offset + 1);
+ }
+ }
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":826
+ * self.offsets[offset] = i
+ * offset = offset + 1
+ * while offset < 257: # <<<<<<<<<<<<<<
+ * self.offsets[offset] = max_offset
+ * offset = offset + 1
+ */
+ while (1) {
+ __pyx_t_3 = (__pyx_v_offset < 257);
+ if (!__pyx_t_3) break;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":827
+ * offset = offset + 1
+ * while offset < 257:
+ * self.offsets[offset] = max_offset # <<<<<<<<<<<<<<
+ * offset = offset + 1
+ *
+ */
+ (__pyx_v_self->offsets[__pyx_v_offset]) = __pyx_v_max_offset;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":828
+ * while offset < 257:
+ * self.offsets[offset] = max_offset
+ * offset = offset + 1 # <<<<<<<<<<<<<<
+ *
+ * def _get_offsets(self):
+ */
+ __pyx_v_offset = (__pyx_v_offset + 1);
+ }
+
+ __pyx_r = Py_None; __Pyx_INCREF(Py_None);
+ goto __pyx_L0;
+ __pyx_L1_error:;
+ __Pyx_AddTraceback("bzrlib._btree_serializer_pyx.GCCHKSHA1LeafNode._compute_common");
+ __pyx_r = 0;
+ __pyx_L0:;
+ __Pyx_XGIVEREF(__pyx_r);
+ __Pyx_RefNannyFinishContext();
+ return __pyx_r;
+}
+
+/* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":830
+ * offset = offset + 1
+ *
+ * def _get_offsets(self): # <<<<<<<<<<<<<<
+ * cdef int i
+ * result = []
+ */
+
+static PyObject *__pyx_pf_6bzrlib_21_btree_serializer_pyx_17GCCHKSHA1LeafNode__get_offsets(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/
+static PyObject *__pyx_pf_6bzrlib_21_btree_serializer_pyx_17GCCHKSHA1LeafNode__get_offsets(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) {
+ int __pyx_v_i;
+ PyObject *__pyx_v_result;
+ PyObject *__pyx_r = NULL;
+ PyObject *__pyx_t_1 = NULL;
+ int __pyx_t_2;
+ __Pyx_RefNannySetupContext("_get_offsets");
+ __pyx_v_result = ((PyObject *)Py_None); __Pyx_INCREF(Py_None);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":832
+ * def _get_offsets(self):
+ * cdef int i
+ * result = [] # <<<<<<<<<<<<<<
+ * for i from 0 <= i < 257:
+ * PyList_Append(result, self.offsets[i])
+ */
+ __pyx_t_1 = PyList_New(0); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 832; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(((PyObject *)__pyx_t_1));
+ __Pyx_DECREF(((PyObject *)__pyx_v_result));
+ __pyx_v_result = __pyx_t_1;
+ __pyx_t_1 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":833
+ * cdef int i
+ * result = []
+ * for i from 0 <= i < 257: # <<<<<<<<<<<<<<
+ * PyList_Append(result, self.offsets[i])
+ * return result
+ */
+ for (__pyx_v_i = 0; __pyx_v_i < 257; __pyx_v_i++) {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":834
+ * result = []
+ * for i from 0 <= i < 257:
+ * PyList_Append(result, self.offsets[i]) # <<<<<<<<<<<<<<
+ * return result
+ *
+ */
+ __pyx_t_1 = PyInt_FromLong((((struct __pyx_obj_6bzrlib_21_btree_serializer_pyx_GCCHKSHA1LeafNode *)__pyx_v_self)->offsets[__pyx_v_i])); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 834; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_1);
+ __pyx_t_2 = PyList_Append(((PyObject *)__pyx_v_result), __pyx_t_1); if (unlikely(__pyx_t_2 == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 834; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+ }
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":835
+ * for i from 0 <= i < 257:
+ * PyList_Append(result, self.offsets[i])
+ * return result # <<<<<<<<<<<<<<
+ *
+ *
+ */
+ __Pyx_XDECREF(__pyx_r);
+ __Pyx_INCREF(((PyObject *)__pyx_v_result));
+ __pyx_r = ((PyObject *)__pyx_v_result);
+ goto __pyx_L0;
+
+ __pyx_r = Py_None; __Pyx_INCREF(Py_None);
+ goto __pyx_L0;
+ __pyx_L1_error:;
+ __Pyx_XDECREF(__pyx_t_1);
+ __Pyx_AddTraceback("bzrlib._btree_serializer_pyx.GCCHKSHA1LeafNode._get_offsets");
+ __pyx_r = NULL;
+ __pyx_L0:;
+ __Pyx_DECREF(__pyx_v_result);
+ __Pyx_XGIVEREF(__pyx_r);
+ __Pyx_RefNannyFinishContext();
+ return __pyx_r;
+}
+
+/* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":838
+ *
+ *
+ * def _parse_into_chk(bytes, key_length, ref_list_length): # <<<<<<<<<<<<<<
+ * """Parse into a format optimized for chk records."""
+ * assert key_length == 1
+ */
+
+static PyObject *__pyx_pf_6bzrlib_21_btree_serializer_pyx__parse_into_chk(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
+static char __pyx_doc_6bzrlib_21_btree_serializer_pyx__parse_into_chk[] = "Parse into a format optimized for chk records.";
+static PyObject *__pyx_pf_6bzrlib_21_btree_serializer_pyx__parse_into_chk(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
+ PyObject *__pyx_v_bytes = 0;
+ PyObject *__pyx_v_key_length = 0;
+ PyObject *__pyx_v_ref_list_length = 0;
+ PyObject *__pyx_r = NULL;
+ PyObject *__pyx_t_1 = NULL;
+ int __pyx_t_2;
+ PyObject *__pyx_t_3 = NULL;
+ static PyObject **__pyx_pyargnames[] = {&__pyx_n_s__bytes,&__pyx_n_s__key_length,&__pyx_n_s__ref_list_length,0};
+ __Pyx_RefNannySetupContext("_parse_into_chk");
+ __pyx_self = __pyx_self;
+ if (unlikely(__pyx_kwds)) {
+ Py_ssize_t kw_args = PyDict_Size(__pyx_kwds);
+ PyObject* values[3] = {0,0,0};
+ switch (PyTuple_GET_SIZE(__pyx_args)) {
+ case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2);
+ case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
+ case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
+ case 0: break;
+ default: goto __pyx_L5_argtuple_error;
+ }
+ switch (PyTuple_GET_SIZE(__pyx_args)) {
+ case 0:
+ values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__bytes);
+ if (likely(values[0])) kw_args--;
+ else goto __pyx_L5_argtuple_error;
+ case 1:
+ values[1] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__key_length);
+ if (likely(values[1])) kw_args--;
+ else {
+ __Pyx_RaiseArgtupleInvalid("_parse_into_chk", 1, 3, 3, 1); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 838; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
+ }
+ case 2:
+ values[2] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__ref_list_length);
+ if (likely(values[2])) kw_args--;
+ else {
+ __Pyx_RaiseArgtupleInvalid("_parse_into_chk", 1, 3, 3, 2); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 838; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
+ }
+ }
+ if (unlikely(kw_args > 0)) {
+ if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, PyTuple_GET_SIZE(__pyx_args), "_parse_into_chk") < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 838; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
+ }
+ __pyx_v_bytes = values[0];
+ __pyx_v_key_length = values[1];
+ __pyx_v_ref_list_length = values[2];
+ } else if (PyTuple_GET_SIZE(__pyx_args) != 3) {
+ goto __pyx_L5_argtuple_error;
+ } else {
+ __pyx_v_bytes = PyTuple_GET_ITEM(__pyx_args, 0);
+ __pyx_v_key_length = PyTuple_GET_ITEM(__pyx_args, 1);
+ __pyx_v_ref_list_length = PyTuple_GET_ITEM(__pyx_args, 2);
+ }
+ goto __pyx_L4_argument_unpacking_done;
+ __pyx_L5_argtuple_error:;
+ __Pyx_RaiseArgtupleInvalid("_parse_into_chk", 1, 3, 3, PyTuple_GET_SIZE(__pyx_args)); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 838; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
+ __pyx_L3_error:;
+ __Pyx_AddTraceback("bzrlib._btree_serializer_pyx._parse_into_chk");
+ __Pyx_RefNannyFinishContext();
+ return NULL;
+ __pyx_L4_argument_unpacking_done:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":840
+ * def _parse_into_chk(bytes, key_length, ref_list_length):
+ * """Parse into a format optimized for chk records."""
+ * assert key_length == 1 # <<<<<<<<<<<<<<
+ * assert ref_list_length == 0
+ * return GCCHKSHA1LeafNode(bytes)
+ */
+ #ifndef PYREX_WITHOUT_ASSERTIONS
+ __pyx_t_1 = PyObject_RichCompare(__pyx_v_key_length, __pyx_int_1, Py_EQ); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 840; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_1);
+ __pyx_t_2 = __Pyx_PyObject_IsTrue(__pyx_t_1); if (unlikely(__pyx_t_2 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 840; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+ if (unlikely(!__pyx_t_2)) {
+ PyErr_SetNone(PyExc_AssertionError);
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 840; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ }
+ #endif
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":841
+ * """Parse into a format optimized for chk records."""
+ * assert key_length == 1
+ * assert ref_list_length == 0 # <<<<<<<<<<<<<<
+ * return GCCHKSHA1LeafNode(bytes)
+ *
+ */
+ #ifndef PYREX_WITHOUT_ASSERTIONS
+ __pyx_t_1 = PyObject_RichCompare(__pyx_v_ref_list_length, __pyx_int_0, Py_EQ); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 841; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_1);
+ __pyx_t_2 = __Pyx_PyObject_IsTrue(__pyx_t_1); if (unlikely(__pyx_t_2 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 841; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+ if (unlikely(!__pyx_t_2)) {
+ PyErr_SetNone(PyExc_AssertionError);
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 841; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ }
+ #endif
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":842
+ * assert key_length == 1
+ * assert ref_list_length == 0
+ * return GCCHKSHA1LeafNode(bytes) # <<<<<<<<<<<<<<
+ *
+ *
+ */
+ __Pyx_XDECREF(__pyx_r);
+ __pyx_t_1 = PyTuple_New(1); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 842; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_1);
+ __Pyx_INCREF(__pyx_v_bytes);
+ PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_v_bytes);
+ __Pyx_GIVEREF(__pyx_v_bytes);
+ __pyx_t_3 = PyObject_Call(((PyObject *)((PyObject*)__pyx_ptype_6bzrlib_21_btree_serializer_pyx_GCCHKSHA1LeafNode)), __pyx_t_1, NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 842; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_3);
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+ __pyx_r = __pyx_t_3;
+ __pyx_t_3 = 0;
+ goto __pyx_L0;
+
+ __pyx_r = Py_None; __Pyx_INCREF(Py_None);
+ goto __pyx_L0;
+ __pyx_L1_error:;
+ __Pyx_XDECREF(__pyx_t_1);
+ __Pyx_XDECREF(__pyx_t_3);
+ __Pyx_AddTraceback("bzrlib._btree_serializer_pyx._parse_into_chk");
+ __pyx_r = NULL;
+ __pyx_L0:;
+ __Pyx_XGIVEREF(__pyx_r);
+ __Pyx_RefNannyFinishContext();
+ return __pyx_r;
+}
+
+/* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":845
+ *
+ *
+ * def _flatten_node(node, reference_lists): # <<<<<<<<<<<<<<
+ * """Convert a node into the serialized form.
+ *
+ */
+
+static PyObject *__pyx_pf_6bzrlib_21_btree_serializer_pyx__flatten_node(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
+static char __pyx_doc_6bzrlib_21_btree_serializer_pyx__flatten_node[] = "Convert a node into the serialized form.\n\n :param node: A tuple representing a node:\n (index, key_tuple, value, references)\n :param reference_lists: Does this index have reference lists?\n :return: (string_key, flattened)\n string_key The serialized key for referencing this node\n flattened A string with the serialized form for the contents\n ";
+static PyObject *__pyx_pf_6bzrlib_21_btree_serializer_pyx__flatten_node(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
+ PyObject *__pyx_v_node = 0;
+ PyObject *__pyx_v_reference_lists = 0;
+ int __pyx_v_have_reference_lists;
+ Py_ssize_t __pyx_v_flat_len;
+ Py_ssize_t __pyx_v_key_len;
+ Py_ssize_t __pyx_v_node_len;
+ char *__pyx_v_value;
+ Py_ssize_t __pyx_v_value_len;
+ char *__pyx_v_out;
+ Py_ssize_t __pyx_v_refs_len;
+ Py_ssize_t __pyx_v_next_len;
+ int __pyx_v_first_ref_list;
+ int __pyx_v_first_reference;
+ int __pyx_v_i;
+ Py_ssize_t __pyx_v_ref_bit_len;
+ PyObject *__pyx_v_string_key;
+ PyObject *__pyx_v_ref_lists;
+ PyObject *__pyx_v_ref_list;
+ PyObject *__pyx_v_reference;
+ PyObject *__pyx_v_ref_bit;
+ PyObject *__pyx_v_val;
+ PyObject *__pyx_v_line;
+ PyObject *__pyx_r = NULL;
+ int __pyx_t_1;
+ int __pyx_t_2;
+ int __pyx_t_3;
+ PyObject *__pyx_t_4 = NULL;
+ PyObject *__pyx_t_5 = NULL;
+ Py_ssize_t __pyx_t_6;
+ int __pyx_t_7;
+ PyObject *__pyx_t_8 = NULL;
+ Py_ssize_t __pyx_t_9;
+ PyObject *__pyx_t_10 = NULL;
+ Py_ssize_t __pyx_t_11;
+ PyObject *__pyx_t_12 = NULL;
+ char *__pyx_t_13;
+ static PyObject **__pyx_pyargnames[] = {&__pyx_n_s__node,&__pyx_n_s__reference_lists,0};
+ __Pyx_RefNannySetupContext("_flatten_node");
+ __pyx_self = __pyx_self;
+ if (unlikely(__pyx_kwds)) {
+ Py_ssize_t kw_args = PyDict_Size(__pyx_kwds);
+ PyObject* values[2] = {0,0};
+ switch (PyTuple_GET_SIZE(__pyx_args)) {
+ case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
+ case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
+ case 0: break;
+ default: goto __pyx_L5_argtuple_error;
+ }
+ switch (PyTuple_GET_SIZE(__pyx_args)) {
+ case 0:
+ values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__node);
+ if (likely(values[0])) kw_args--;
+ else goto __pyx_L5_argtuple_error;
+ case 1:
+ values[1] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__reference_lists);
+ if (likely(values[1])) kw_args--;
+ else {
+ __Pyx_RaiseArgtupleInvalid("_flatten_node", 1, 2, 2, 1); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 845; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
+ }
+ }
+ if (unlikely(kw_args > 0)) {
+ if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, PyTuple_GET_SIZE(__pyx_args), "_flatten_node") < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 845; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
+ }
+ __pyx_v_node = values[0];
+ __pyx_v_reference_lists = values[1];
+ } else if (PyTuple_GET_SIZE(__pyx_args) != 2) {
+ goto __pyx_L5_argtuple_error;
+ } else {
+ __pyx_v_node = PyTuple_GET_ITEM(__pyx_args, 0);
+ __pyx_v_reference_lists = PyTuple_GET_ITEM(__pyx_args, 1);
+ }
+ goto __pyx_L4_argument_unpacking_done;
+ __pyx_L5_argtuple_error:;
+ __Pyx_RaiseArgtupleInvalid("_flatten_node", 1, 2, 2, PyTuple_GET_SIZE(__pyx_args)); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 845; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
+ __pyx_L3_error:;
+ __Pyx_AddTraceback("bzrlib._btree_serializer_pyx._flatten_node");
+ __Pyx_RefNannyFinishContext();
+ return NULL;
+ __pyx_L4_argument_unpacking_done:;
+ __pyx_v_string_key = Py_None; __Pyx_INCREF(Py_None);
+ __pyx_v_ref_lists = Py_None; __Pyx_INCREF(Py_None);
+ __pyx_v_ref_list = Py_None; __Pyx_INCREF(Py_None);
+ __pyx_v_reference = Py_None; __Pyx_INCREF(Py_None);
+ __pyx_v_ref_bit = Py_None; __Pyx_INCREF(Py_None);
+ __pyx_v_val = Py_None; __Pyx_INCREF(Py_None);
+ __pyx_v_line = Py_None; __Pyx_INCREF(Py_None);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":869
+ * cdef Py_ssize_t ref_bit_len
+ *
+ * if not PyTuple_CheckExact(node) and not StaticTuple_CheckExact(node): # <<<<<<<<<<<<<<
+ * raise TypeError('We expected a tuple() or StaticTuple() for node not: %s'
+ * % type(node))
+ */
+ __pyx_t_1 = (!PyTuple_CheckExact(__pyx_v_node));
+ if (__pyx_t_1) {
+ __pyx_t_2 = (!StaticTuple_CheckExact(__pyx_v_node));
+ __pyx_t_3 = __pyx_t_2;
+ } else {
+ __pyx_t_3 = __pyx_t_1;
+ }
+ if (__pyx_t_3) {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":871
+ * if not PyTuple_CheckExact(node) and not StaticTuple_CheckExact(node):
+ * raise TypeError('We expected a tuple() or StaticTuple() for node not: %s'
+ * % type(node)) # <<<<<<<<<<<<<<
+ * node_len = len(node)
+ * have_reference_lists = reference_lists
+ */
+ __pyx_t_4 = PyNumber_Remainder(((PyObject *)__pyx_kp_s_30), ((PyObject *)Py_TYPE(__pyx_v_node))); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 871; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(((PyObject *)__pyx_t_4));
+ __pyx_t_5 = PyTuple_New(1); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 870; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_5);
+ PyTuple_SET_ITEM(__pyx_t_5, 0, ((PyObject *)__pyx_t_4));
+ __Pyx_GIVEREF(((PyObject *)__pyx_t_4));
+ __pyx_t_4 = 0;
+ __pyx_t_4 = PyObject_Call(__pyx_builtin_TypeError, __pyx_t_5, NULL); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 870; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_4);
+ __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
+ __Pyx_Raise(__pyx_t_4, 0, 0);
+ __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 870; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ goto __pyx_L6;
+ }
+ __pyx_L6:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":872
+ * raise TypeError('We expected a tuple() or StaticTuple() for node not: %s'
+ * % type(node))
+ * node_len = len(node) # <<<<<<<<<<<<<<
+ * have_reference_lists = reference_lists
+ * if have_reference_lists:
+ */
+ __pyx_t_6 = PyObject_Length(__pyx_v_node); if (unlikely(__pyx_t_6 == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 872; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_v_node_len = __pyx_t_6;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":873
+ * % type(node))
+ * node_len = len(node)
+ * have_reference_lists = reference_lists # <<<<<<<<<<<<<<
+ * if have_reference_lists:
+ * if node_len != 4:
+ */
+ __pyx_t_7 = __Pyx_PyInt_AsInt(__pyx_v_reference_lists); if (unlikely((__pyx_t_7 == (int)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 873; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_v_have_reference_lists = __pyx_t_7;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":874
+ * node_len = len(node)
+ * have_reference_lists = reference_lists
+ * if have_reference_lists: # <<<<<<<<<<<<<<
+ * if node_len != 4:
+ * raise ValueError('With ref_lists, we expected 4 entries not: %s'
+ */
+ if (__pyx_v_have_reference_lists) {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":875
+ * have_reference_lists = reference_lists
+ * if have_reference_lists:
+ * if node_len != 4: # <<<<<<<<<<<<<<
+ * raise ValueError('With ref_lists, we expected 4 entries not: %s'
+ * % len(node))
+ */
+ __pyx_t_3 = (__pyx_v_node_len != 4);
+ if (__pyx_t_3) {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":877
+ * if node_len != 4:
+ * raise ValueError('With ref_lists, we expected 4 entries not: %s'
+ * % len(node)) # <<<<<<<<<<<<<<
+ * elif node_len < 3:
+ * raise ValueError('Without ref_lists, we need at least 3 entries not: %s'
+ */
+ __pyx_t_6 = PyObject_Length(__pyx_v_node); if (unlikely(__pyx_t_6 == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 877; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_4 = PyInt_FromSsize_t(__pyx_t_6); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 877; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_4);
+ __pyx_t_5 = PyNumber_Remainder(((PyObject *)__pyx_kp_s_31), __pyx_t_4); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 877; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(((PyObject *)__pyx_t_5));
+ __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
+ __pyx_t_4 = PyTuple_New(1); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 876; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_4);
+ PyTuple_SET_ITEM(__pyx_t_4, 0, ((PyObject *)__pyx_t_5));
+ __Pyx_GIVEREF(((PyObject *)__pyx_t_5));
+ __pyx_t_5 = 0;
+ __pyx_t_5 = PyObject_Call(__pyx_builtin_ValueError, __pyx_t_4, NULL); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 876; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_5);
+ __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
+ __Pyx_Raise(__pyx_t_5, 0, 0);
+ __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 876; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ goto __pyx_L8;
+ }
+ __pyx_L8:;
+ goto __pyx_L7;
+ }
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":878
+ * raise ValueError('With ref_lists, we expected 4 entries not: %s'
+ * % len(node))
+ * elif node_len < 3: # <<<<<<<<<<<<<<
+ * raise ValueError('Without ref_lists, we need at least 3 entries not: %s'
+ * % len(node))
+ */
+ __pyx_t_3 = (__pyx_v_node_len < 3);
+ if (__pyx_t_3) {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":880
+ * elif node_len < 3:
+ * raise ValueError('Without ref_lists, we need at least 3 entries not: %s'
+ * % len(node)) # <<<<<<<<<<<<<<
+ * # TODO: We can probably do better than string.join(), namely
+ * # when key has only 1 item, we can just grab that string
+ */
+ __pyx_t_6 = PyObject_Length(__pyx_v_node); if (unlikely(__pyx_t_6 == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 880; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_5 = PyInt_FromSsize_t(__pyx_t_6); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 880; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_5);
+ __pyx_t_4 = PyNumber_Remainder(((PyObject *)__pyx_kp_s_32), __pyx_t_5); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 880; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(((PyObject *)__pyx_t_4));
+ __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
+ __pyx_t_5 = PyTuple_New(1); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 879; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_5);
+ PyTuple_SET_ITEM(__pyx_t_5, 0, ((PyObject *)__pyx_t_4));
+ __Pyx_GIVEREF(((PyObject *)__pyx_t_4));
+ __pyx_t_4 = 0;
+ __pyx_t_4 = PyObject_Call(__pyx_builtin_ValueError, __pyx_t_5, NULL); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 879; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_4);
+ __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
+ __Pyx_Raise(__pyx_t_4, 0, 0);
+ __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 879; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ goto __pyx_L7;
+ }
+ __pyx_L7:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":891
+ * # We *could* do more work on our own, and grab the actual items
+ * # lists. For now, just ask people to use a better compiler. :)
+ * string_key = '\0'.join(node[1]) # <<<<<<<<<<<<<<
+ *
+ * # TODO: instead of using string joins, precompute the final string length,
+ */
+ __pyx_t_4 = PyObject_GetAttr(((PyObject *)__pyx_kp_s_33), __pyx_n_s__join); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 891; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_4);
+ __pyx_t_5 = __Pyx_GetItemInt(__pyx_v_node, 1, sizeof(long), PyInt_FromLong); if (!__pyx_t_5) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 891; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_5);
+ __pyx_t_8 = PyTuple_New(1); if (unlikely(!__pyx_t_8)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 891; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_8);
+ PyTuple_SET_ITEM(__pyx_t_8, 0, __pyx_t_5);
+ __Pyx_GIVEREF(__pyx_t_5);
+ __pyx_t_5 = 0;
+ __pyx_t_5 = PyObject_Call(__pyx_t_4, __pyx_t_8, NULL); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 891; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_5);
+ __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
+ __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0;
+ __Pyx_DECREF(__pyx_v_string_key);
+ __pyx_v_string_key = __pyx_t_5;
+ __pyx_t_5 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":905
+ * # ref := BYTES (NULL BYTES)*
+ * # value := BYTES
+ * refs_len = 0 # <<<<<<<<<<<<<<
+ * if have_reference_lists:
+ * # Figure out how many bytes it will take to store the references
+ */
+ __pyx_v_refs_len = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":906
+ * # value := BYTES
+ * refs_len = 0
+ * if have_reference_lists: # <<<<<<<<<<<<<<
+ * # Figure out how many bytes it will take to store the references
+ * ref_lists = node[3]
+ */
+ if (__pyx_v_have_reference_lists) {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":908
+ * if have_reference_lists:
+ * # Figure out how many bytes it will take to store the references
+ * ref_lists = node[3] # <<<<<<<<<<<<<<
+ * next_len = len(ref_lists) # TODO: use a Py function
+ * if next_len > 0:
+ */
+ __pyx_t_5 = __Pyx_GetItemInt(__pyx_v_node, 3, sizeof(long), PyInt_FromLong); if (!__pyx_t_5) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 908; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_5);
+ __Pyx_DECREF(__pyx_v_ref_lists);
+ __pyx_v_ref_lists = __pyx_t_5;
+ __pyx_t_5 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":909
+ * # Figure out how many bytes it will take to store the references
+ * ref_lists = node[3]
+ * next_len = len(ref_lists) # TODO: use a Py function # <<<<<<<<<<<<<<
+ * if next_len > 0:
+ * # If there are no nodes, we don't need to do any work
+ */
+ __pyx_t_6 = PyObject_Length(__pyx_v_ref_lists); if (unlikely(__pyx_t_6 == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 909; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_v_next_len = __pyx_t_6;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":910
+ * ref_lists = node[3]
+ * next_len = len(ref_lists) # TODO: use a Py function
+ * if next_len > 0: # <<<<<<<<<<<<<<
+ * # If there are no nodes, we don't need to do any work
+ * # Otherwise we will need (len - 1) '\t' characters to separate
+ */
+ __pyx_t_3 = (__pyx_v_next_len > 0);
+ if (__pyx_t_3) {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":914
+ * # Otherwise we will need (len - 1) '\t' characters to separate
+ * # the reference lists
+ * refs_len = refs_len + (next_len - 1) # <<<<<<<<<<<<<<
+ * for ref_list in ref_lists:
+ * next_len = len(ref_list)
+ */
+ __pyx_v_refs_len = (__pyx_v_refs_len + (__pyx_v_next_len - 1));
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":915
+ * # the reference lists
+ * refs_len = refs_len + (next_len - 1)
+ * for ref_list in ref_lists: # <<<<<<<<<<<<<<
+ * next_len = len(ref_list)
+ * if next_len > 0:
+ */
+ if (PyList_CheckExact(__pyx_v_ref_lists) || PyTuple_CheckExact(__pyx_v_ref_lists)) {
+ __pyx_t_6 = 0; __pyx_t_5 = __pyx_v_ref_lists; __Pyx_INCREF(__pyx_t_5);
+ } else {
+ __pyx_t_6 = -1; __pyx_t_5 = PyObject_GetIter(__pyx_v_ref_lists); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 915; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_5);
+ }
+ for (;;) {
+ if (likely(PyList_CheckExact(__pyx_t_5))) {
+ if (__pyx_t_6 >= PyList_GET_SIZE(__pyx_t_5)) break;
+ __pyx_t_8 = PyList_GET_ITEM(__pyx_t_5, __pyx_t_6); __Pyx_INCREF(__pyx_t_8); __pyx_t_6++;
+ } else if (likely(PyTuple_CheckExact(__pyx_t_5))) {
+ if (__pyx_t_6 >= PyTuple_GET_SIZE(__pyx_t_5)) break;
+ __pyx_t_8 = PyTuple_GET_ITEM(__pyx_t_5, __pyx_t_6); __Pyx_INCREF(__pyx_t_8); __pyx_t_6++;
+ } else {
+ __pyx_t_8 = PyIter_Next(__pyx_t_5);
+ if (!__pyx_t_8) {
+ if (unlikely(PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 915; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ break;
+ }
+ __Pyx_GOTREF(__pyx_t_8);
+ }
+ __Pyx_DECREF(__pyx_v_ref_list);
+ __pyx_v_ref_list = __pyx_t_8;
+ __pyx_t_8 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":916
+ * refs_len = refs_len + (next_len - 1)
+ * for ref_list in ref_lists:
+ * next_len = len(ref_list) # <<<<<<<<<<<<<<
+ * if next_len > 0:
+ * # We will need (len - 1) '\r' characters to separate the
+ */
+ __pyx_t_9 = PyObject_Length(__pyx_v_ref_list); if (unlikely(__pyx_t_9 == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 916; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_v_next_len = __pyx_t_9;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":917
+ * for ref_list in ref_lists:
+ * next_len = len(ref_list)
+ * if next_len > 0: # <<<<<<<<<<<<<<
+ * # We will need (len - 1) '\r' characters to separate the
+ * # references
+ */
+ __pyx_t_3 = (__pyx_v_next_len > 0);
+ if (__pyx_t_3) {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":920
+ * # We will need (len - 1) '\r' characters to separate the
+ * # references
+ * refs_len = refs_len + (next_len - 1) # <<<<<<<<<<<<<<
+ * for reference in ref_list:
+ * if (not PyTuple_CheckExact(reference)
+ */
+ __pyx_v_refs_len = (__pyx_v_refs_len + (__pyx_v_next_len - 1));
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":921
+ * # references
+ * refs_len = refs_len + (next_len - 1)
+ * for reference in ref_list: # <<<<<<<<<<<<<<
+ * if (not PyTuple_CheckExact(reference)
+ * and not StaticTuple_CheckExact(reference)):
+ */
+ if (PyList_CheckExact(__pyx_v_ref_list) || PyTuple_CheckExact(__pyx_v_ref_list)) {
+ __pyx_t_9 = 0; __pyx_t_8 = __pyx_v_ref_list; __Pyx_INCREF(__pyx_t_8);
+ } else {
+ __pyx_t_9 = -1; __pyx_t_8 = PyObject_GetIter(__pyx_v_ref_list); if (unlikely(!__pyx_t_8)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 921; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_8);
+ }
+ for (;;) {
+ if (likely(PyList_CheckExact(__pyx_t_8))) {
+ if (__pyx_t_9 >= PyList_GET_SIZE(__pyx_t_8)) break;
+ __pyx_t_4 = PyList_GET_ITEM(__pyx_t_8, __pyx_t_9); __Pyx_INCREF(__pyx_t_4); __pyx_t_9++;
+ } else if (likely(PyTuple_CheckExact(__pyx_t_8))) {
+ if (__pyx_t_9 >= PyTuple_GET_SIZE(__pyx_t_8)) break;
+ __pyx_t_4 = PyTuple_GET_ITEM(__pyx_t_8, __pyx_t_9); __Pyx_INCREF(__pyx_t_4); __pyx_t_9++;
+ } else {
+ __pyx_t_4 = PyIter_Next(__pyx_t_8);
+ if (!__pyx_t_4) {
+ if (unlikely(PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 921; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ break;
+ }
+ __Pyx_GOTREF(__pyx_t_4);
+ }
+ __Pyx_DECREF(__pyx_v_reference);
+ __pyx_v_reference = __pyx_t_4;
+ __pyx_t_4 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":922
+ * refs_len = refs_len + (next_len - 1)
+ * for reference in ref_list:
+ * if (not PyTuple_CheckExact(reference) # <<<<<<<<<<<<<<
+ * and not StaticTuple_CheckExact(reference)):
+ * raise TypeError(
+ */
+ __pyx_t_3 = (!PyTuple_CheckExact(__pyx_v_reference));
+ if (__pyx_t_3) {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":923
+ * for reference in ref_list:
+ * if (not PyTuple_CheckExact(reference)
+ * and not StaticTuple_CheckExact(reference)): # <<<<<<<<<<<<<<
+ * raise TypeError(
+ * 'We expect references to be tuples not: %s'
+ */
+ __pyx_t_1 = (!StaticTuple_CheckExact(__pyx_v_reference));
+ __pyx_t_2 = __pyx_t_1;
+ } else {
+ __pyx_t_2 = __pyx_t_3;
+ }
+ if (__pyx_t_2) {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":926
+ * raise TypeError(
+ * 'We expect references to be tuples not: %s'
+ * % type(reference)) # <<<<<<<<<<<<<<
+ * next_len = len(reference)
+ * if next_len > 0:
+ */
+ __pyx_t_4 = PyNumber_Remainder(((PyObject *)__pyx_kp_s_34), ((PyObject *)Py_TYPE(__pyx_v_reference))); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 926; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(((PyObject *)__pyx_t_4));
+ __pyx_t_10 = PyTuple_New(1); if (unlikely(!__pyx_t_10)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 924; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_10);
+ PyTuple_SET_ITEM(__pyx_t_10, 0, ((PyObject *)__pyx_t_4));
+ __Pyx_GIVEREF(((PyObject *)__pyx_t_4));
+ __pyx_t_4 = 0;
+ __pyx_t_4 = PyObject_Call(__pyx_builtin_TypeError, __pyx_t_10, NULL); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 924; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_4);
+ __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;
+ __Pyx_Raise(__pyx_t_4, 0, 0);
+ __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 924; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ goto __pyx_L16;
+ }
+ __pyx_L16:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":927
+ * 'We expect references to be tuples not: %s'
+ * % type(reference))
+ * next_len = len(reference) # <<<<<<<<<<<<<<
+ * if next_len > 0:
+ * # We will need (len - 1) '\x00' characters to
+ */
+ __pyx_t_11 = PyObject_Length(__pyx_v_reference); if (unlikely(__pyx_t_11 == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 927; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_v_next_len = __pyx_t_11;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":928
+ * % type(reference))
+ * next_len = len(reference)
+ * if next_len > 0: # <<<<<<<<<<<<<<
+ * # We will need (len - 1) '\x00' characters to
+ * # separate the reference key
+ */
+ __pyx_t_2 = (__pyx_v_next_len > 0);
+ if (__pyx_t_2) {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":931
+ * # We will need (len - 1) '\x00' characters to
+ * # separate the reference key
+ * refs_len = refs_len + (next_len - 1) # <<<<<<<<<<<<<<
+ * for ref_bit in reference:
+ * if not PyString_CheckExact(ref_bit):
+ */
+ __pyx_v_refs_len = (__pyx_v_refs_len + (__pyx_v_next_len - 1));
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":932
+ * # separate the reference key
+ * refs_len = refs_len + (next_len - 1)
+ * for ref_bit in reference: # <<<<<<<<<<<<<<
+ * if not PyString_CheckExact(ref_bit):
+ * raise TypeError('We expect reference bits'
+ */
+ if (PyList_CheckExact(__pyx_v_reference) || PyTuple_CheckExact(__pyx_v_reference)) {
+ __pyx_t_11 = 0; __pyx_t_4 = __pyx_v_reference; __Pyx_INCREF(__pyx_t_4);
+ } else {
+ __pyx_t_11 = -1; __pyx_t_4 = PyObject_GetIter(__pyx_v_reference); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 932; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_4);
+ }
+ for (;;) {
+ if (likely(PyList_CheckExact(__pyx_t_4))) {
+ if (__pyx_t_11 >= PyList_GET_SIZE(__pyx_t_4)) break;
+ __pyx_t_10 = PyList_GET_ITEM(__pyx_t_4, __pyx_t_11); __Pyx_INCREF(__pyx_t_10); __pyx_t_11++;
+ } else if (likely(PyTuple_CheckExact(__pyx_t_4))) {
+ if (__pyx_t_11 >= PyTuple_GET_SIZE(__pyx_t_4)) break;
+ __pyx_t_10 = PyTuple_GET_ITEM(__pyx_t_4, __pyx_t_11); __Pyx_INCREF(__pyx_t_10); __pyx_t_11++;
+ } else {
+ __pyx_t_10 = PyIter_Next(__pyx_t_4);
+ if (!__pyx_t_10) {
+ if (unlikely(PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 932; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ break;
+ }
+ __Pyx_GOTREF(__pyx_t_10);
+ }
+ __Pyx_DECREF(__pyx_v_ref_bit);
+ __pyx_v_ref_bit = __pyx_t_10;
+ __pyx_t_10 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":933
+ * refs_len = refs_len + (next_len - 1)
+ * for ref_bit in reference:
+ * if not PyString_CheckExact(ref_bit): # <<<<<<<<<<<<<<
+ * raise TypeError('We expect reference bits'
+ * ' to be strings not: %s'
+ */
+ __pyx_t_2 = (!PyString_CheckExact(__pyx_v_ref_bit));
+ if (__pyx_t_2) {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":936
+ * raise TypeError('We expect reference bits'
+ * ' to be strings not: %s'
+ * % type(<object>ref_bit)) # <<<<<<<<<<<<<<
+ * refs_len = refs_len + PyString_GET_SIZE(ref_bit)
+ *
+ */
+ __pyx_t_10 = PyNumber_Remainder(((PyObject *)__pyx_kp_s_35), ((PyObject *)Py_TYPE(__pyx_v_ref_bit))); if (unlikely(!__pyx_t_10)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 936; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(((PyObject *)__pyx_t_10));
+ __pyx_t_12 = PyTuple_New(1); if (unlikely(!__pyx_t_12)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 934; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_12);
+ PyTuple_SET_ITEM(__pyx_t_12, 0, ((PyObject *)__pyx_t_10));
+ __Pyx_GIVEREF(((PyObject *)__pyx_t_10));
+ __pyx_t_10 = 0;
+ __pyx_t_10 = PyObject_Call(__pyx_builtin_TypeError, __pyx_t_12, NULL); if (unlikely(!__pyx_t_10)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 934; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_10);
+ __Pyx_DECREF(__pyx_t_12); __pyx_t_12 = 0;
+ __Pyx_Raise(__pyx_t_10, 0, 0);
+ __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 934; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ goto __pyx_L20;
+ }
+ __pyx_L20:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":937
+ * ' to be strings not: %s'
+ * % type(<object>ref_bit))
+ * refs_len = refs_len + PyString_GET_SIZE(ref_bit) # <<<<<<<<<<<<<<
+ *
+ * # So we have the (key NULL refs NULL value LF)
+ */
+ __pyx_v_refs_len = (__pyx_v_refs_len + PyString_GET_SIZE(__pyx_v_ref_bit));
+ }
+ __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
+ goto __pyx_L17;
+ }
+ __pyx_L17:;
+ }
+ __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0;
+ goto __pyx_L13;
+ }
+ __pyx_L13:;
+ }
+ __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
+ goto __pyx_L10;
+ }
+ __pyx_L10:;
+ goto __pyx_L9;
+ }
+ __pyx_L9:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":940
+ *
+ * # So we have the (key NULL refs NULL value LF)
+ * key_len = PyString_Size(string_key) # <<<<<<<<<<<<<<
+ * val = node[2]
+ * if not PyString_CheckExact(val):
+ */
+ __pyx_v_key_len = PyString_Size(__pyx_v_string_key);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":941
+ * # So we have the (key NULL refs NULL value LF)
+ * key_len = PyString_Size(string_key)
+ * val = node[2] # <<<<<<<<<<<<<<
+ * if not PyString_CheckExact(val):
+ * raise TypeError('Expected a plain str for value not: %s'
+ */
+ __pyx_t_5 = __Pyx_GetItemInt(__pyx_v_node, 2, sizeof(long), PyInt_FromLong); if (!__pyx_t_5) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 941; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_5);
+ __Pyx_DECREF(__pyx_v_val);
+ __pyx_v_val = __pyx_t_5;
+ __pyx_t_5 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":942
+ * key_len = PyString_Size(string_key)
+ * val = node[2]
+ * if not PyString_CheckExact(val): # <<<<<<<<<<<<<<
+ * raise TypeError('Expected a plain str for value not: %s'
+ * % type(val))
+ */
+ __pyx_t_2 = (!PyString_CheckExact(__pyx_v_val));
+ if (__pyx_t_2) {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":944
+ * if not PyString_CheckExact(val):
+ * raise TypeError('Expected a plain str for value not: %s'
+ * % type(val)) # <<<<<<<<<<<<<<
+ * value = PyString_AS_STRING(val)
+ * value_len = PyString_GET_SIZE(val)
+ */
+ __pyx_t_5 = PyNumber_Remainder(((PyObject *)__pyx_kp_s_36), ((PyObject *)Py_TYPE(__pyx_v_val))); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 944; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(((PyObject *)__pyx_t_5));
+ __pyx_t_8 = PyTuple_New(1); if (unlikely(!__pyx_t_8)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 943; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_8);
+ PyTuple_SET_ITEM(__pyx_t_8, 0, ((PyObject *)__pyx_t_5));
+ __Pyx_GIVEREF(((PyObject *)__pyx_t_5));
+ __pyx_t_5 = 0;
+ __pyx_t_5 = PyObject_Call(__pyx_builtin_TypeError, __pyx_t_8, NULL); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 943; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_5);
+ __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0;
+ __Pyx_Raise(__pyx_t_5, 0, 0);
+ __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 943; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ goto __pyx_L21;
+ }
+ __pyx_L21:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":945
+ * raise TypeError('Expected a plain str for value not: %s'
+ * % type(val))
+ * value = PyString_AS_STRING(val) # <<<<<<<<<<<<<<
+ * value_len = PyString_GET_SIZE(val)
+ * flat_len = (key_len + 1 + refs_len + 1 + value_len + 1)
+ */
+ __pyx_v_value = PyString_AS_STRING(__pyx_v_val);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":946
+ * % type(val))
+ * value = PyString_AS_STRING(val)
+ * value_len = PyString_GET_SIZE(val) # <<<<<<<<<<<<<<
+ * flat_len = (key_len + 1 + refs_len + 1 + value_len + 1)
+ * line = PyString_FromStringAndSize(NULL, flat_len)
+ */
+ __pyx_v_value_len = PyString_GET_SIZE(__pyx_v_val);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":947
+ * value = PyString_AS_STRING(val)
+ * value_len = PyString_GET_SIZE(val)
+ * flat_len = (key_len + 1 + refs_len + 1 + value_len + 1) # <<<<<<<<<<<<<<
+ * line = PyString_FromStringAndSize(NULL, flat_len)
+ * # Get a pointer to the new buffer
+ */
+ __pyx_v_flat_len = (((((__pyx_v_key_len + 1) + __pyx_v_refs_len) + 1) + __pyx_v_value_len) + 1);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":948
+ * value_len = PyString_GET_SIZE(val)
+ * flat_len = (key_len + 1 + refs_len + 1 + value_len + 1)
+ * line = PyString_FromStringAndSize(NULL, flat_len) # <<<<<<<<<<<<<<
+ * # Get a pointer to the new buffer
+ * out = PyString_AsString(line)
+ */
+ __pyx_t_5 = PyString_FromStringAndSize(NULL, __pyx_v_flat_len); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 948; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_5);
+ __Pyx_DECREF(__pyx_v_line);
+ __pyx_v_line = __pyx_t_5;
+ __pyx_t_5 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":950
+ * line = PyString_FromStringAndSize(NULL, flat_len)
+ * # Get a pointer to the new buffer
+ * out = PyString_AsString(line) # <<<<<<<<<<<<<<
+ * memcpy(out, PyString_AsString(string_key), key_len)
+ * out = out + key_len
+ */
+ __pyx_t_13 = PyString_AsString(__pyx_v_line); if (unlikely(__pyx_t_13 == NULL)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 950; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_v_out = __pyx_t_13;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":951
+ * # Get a pointer to the new buffer
+ * out = PyString_AsString(line)
+ * memcpy(out, PyString_AsString(string_key), key_len) # <<<<<<<<<<<<<<
+ * out = out + key_len
+ * out[0] = c'\0'
+ */
+ __pyx_t_13 = PyString_AsString(__pyx_v_string_key); if (unlikely(__pyx_t_13 == NULL)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 951; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ memcpy(__pyx_v_out, __pyx_t_13, __pyx_v_key_len);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":952
+ * out = PyString_AsString(line)
+ * memcpy(out, PyString_AsString(string_key), key_len)
+ * out = out + key_len # <<<<<<<<<<<<<<
+ * out[0] = c'\0'
+ * out = out + 1
+ */
+ __pyx_v_out = (__pyx_v_out + __pyx_v_key_len);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":953
+ * memcpy(out, PyString_AsString(string_key), key_len)
+ * out = out + key_len
+ * out[0] = c'\0' # <<<<<<<<<<<<<<
+ * out = out + 1
+ * if refs_len > 0:
+ */
+ (__pyx_v_out[0]) = '\x00';
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":954
+ * out = out + key_len
+ * out[0] = c'\0'
+ * out = out + 1 # <<<<<<<<<<<<<<
+ * if refs_len > 0:
+ * first_ref_list = 1
+ */
+ __pyx_v_out = (__pyx_v_out + 1);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":955
+ * out[0] = c'\0'
+ * out = out + 1
+ * if refs_len > 0: # <<<<<<<<<<<<<<
+ * first_ref_list = 1
+ * for ref_list in ref_lists:
+ */
+ __pyx_t_2 = (__pyx_v_refs_len > 0);
+ if (__pyx_t_2) {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":956
+ * out = out + 1
+ * if refs_len > 0:
+ * first_ref_list = 1 # <<<<<<<<<<<<<<
+ * for ref_list in ref_lists:
+ * if first_ref_list == 0:
+ */
+ __pyx_v_first_ref_list = 1;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":957
+ * if refs_len > 0:
+ * first_ref_list = 1
+ * for ref_list in ref_lists: # <<<<<<<<<<<<<<
+ * if first_ref_list == 0:
+ * out[0] = c'\t'
+ */
+ if (PyList_CheckExact(__pyx_v_ref_lists) || PyTuple_CheckExact(__pyx_v_ref_lists)) {
+ __pyx_t_6 = 0; __pyx_t_5 = __pyx_v_ref_lists; __Pyx_INCREF(__pyx_t_5);
+ } else {
+ __pyx_t_6 = -1; __pyx_t_5 = PyObject_GetIter(__pyx_v_ref_lists); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 957; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_5);
+ }
+ for (;;) {
+ if (likely(PyList_CheckExact(__pyx_t_5))) {
+ if (__pyx_t_6 >= PyList_GET_SIZE(__pyx_t_5)) break;
+ __pyx_t_8 = PyList_GET_ITEM(__pyx_t_5, __pyx_t_6); __Pyx_INCREF(__pyx_t_8); __pyx_t_6++;
+ } else if (likely(PyTuple_CheckExact(__pyx_t_5))) {
+ if (__pyx_t_6 >= PyTuple_GET_SIZE(__pyx_t_5)) break;
+ __pyx_t_8 = PyTuple_GET_ITEM(__pyx_t_5, __pyx_t_6); __Pyx_INCREF(__pyx_t_8); __pyx_t_6++;
+ } else {
+ __pyx_t_8 = PyIter_Next(__pyx_t_5);
+ if (!__pyx_t_8) {
+ if (unlikely(PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 957; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ break;
+ }
+ __Pyx_GOTREF(__pyx_t_8);
+ }
+ __Pyx_DECREF(__pyx_v_ref_list);
+ __pyx_v_ref_list = __pyx_t_8;
+ __pyx_t_8 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":958
+ * first_ref_list = 1
+ * for ref_list in ref_lists:
+ * if first_ref_list == 0: # <<<<<<<<<<<<<<
+ * out[0] = c'\t'
+ * out = out + 1
+ */
+ __pyx_t_2 = (__pyx_v_first_ref_list == 0);
+ if (__pyx_t_2) {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":959
+ * for ref_list in ref_lists:
+ * if first_ref_list == 0:
+ * out[0] = c'\t' # <<<<<<<<<<<<<<
+ * out = out + 1
+ * first_ref_list = 0
+ */
+ (__pyx_v_out[0]) = '\t';
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":960
+ * if first_ref_list == 0:
+ * out[0] = c'\t'
+ * out = out + 1 # <<<<<<<<<<<<<<
+ * first_ref_list = 0
+ * first_reference = 1
+ */
+ __pyx_v_out = (__pyx_v_out + 1);
+ goto __pyx_L25;
+ }
+ __pyx_L25:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":961
+ * out[0] = c'\t'
+ * out = out + 1
+ * first_ref_list = 0 # <<<<<<<<<<<<<<
+ * first_reference = 1
+ * for reference in ref_list:
+ */
+ __pyx_v_first_ref_list = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":962
+ * out = out + 1
+ * first_ref_list = 0
+ * first_reference = 1 # <<<<<<<<<<<<<<
+ * for reference in ref_list:
+ * if first_reference == 0:
+ */
+ __pyx_v_first_reference = 1;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":963
+ * first_ref_list = 0
+ * first_reference = 1
+ * for reference in ref_list: # <<<<<<<<<<<<<<
+ * if first_reference == 0:
+ * out[0] = c'\r'
+ */
+ if (PyList_CheckExact(__pyx_v_ref_list) || PyTuple_CheckExact(__pyx_v_ref_list)) {
+ __pyx_t_9 = 0; __pyx_t_8 = __pyx_v_ref_list; __Pyx_INCREF(__pyx_t_8);
+ } else {
+ __pyx_t_9 = -1; __pyx_t_8 = PyObject_GetIter(__pyx_v_ref_list); if (unlikely(!__pyx_t_8)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 963; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_8);
+ }
+ for (;;) {
+ if (likely(PyList_CheckExact(__pyx_t_8))) {
+ if (__pyx_t_9 >= PyList_GET_SIZE(__pyx_t_8)) break;
+ __pyx_t_4 = PyList_GET_ITEM(__pyx_t_8, __pyx_t_9); __Pyx_INCREF(__pyx_t_4); __pyx_t_9++;
+ } else if (likely(PyTuple_CheckExact(__pyx_t_8))) {
+ if (__pyx_t_9 >= PyTuple_GET_SIZE(__pyx_t_8)) break;
+ __pyx_t_4 = PyTuple_GET_ITEM(__pyx_t_8, __pyx_t_9); __Pyx_INCREF(__pyx_t_4); __pyx_t_9++;
+ } else {
+ __pyx_t_4 = PyIter_Next(__pyx_t_8);
+ if (!__pyx_t_4) {
+ if (unlikely(PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 963; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ break;
+ }
+ __Pyx_GOTREF(__pyx_t_4);
+ }
+ __Pyx_DECREF(__pyx_v_reference);
+ __pyx_v_reference = __pyx_t_4;
+ __pyx_t_4 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":964
+ * first_reference = 1
+ * for reference in ref_list:
+ * if first_reference == 0: # <<<<<<<<<<<<<<
+ * out[0] = c'\r'
+ * out = out + 1
+ */
+ __pyx_t_2 = (__pyx_v_first_reference == 0);
+ if (__pyx_t_2) {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":965
+ * for reference in ref_list:
+ * if first_reference == 0:
+ * out[0] = c'\r' # <<<<<<<<<<<<<<
+ * out = out + 1
+ * first_reference = 0
+ */
+ (__pyx_v_out[0]) = '\r';
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":966
+ * if first_reference == 0:
+ * out[0] = c'\r'
+ * out = out + 1 # <<<<<<<<<<<<<<
+ * first_reference = 0
+ * next_len = len(reference)
+ */
+ __pyx_v_out = (__pyx_v_out + 1);
+ goto __pyx_L28;
+ }
+ __pyx_L28:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":967
+ * out[0] = c'\r'
+ * out = out + 1
+ * first_reference = 0 # <<<<<<<<<<<<<<
+ * next_len = len(reference)
+ * for i from 0 <= i < next_len:
+ */
+ __pyx_v_first_reference = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":968
+ * out = out + 1
+ * first_reference = 0
+ * next_len = len(reference) # <<<<<<<<<<<<<<
+ * for i from 0 <= i < next_len:
+ * if i != 0:
+ */
+ __pyx_t_11 = PyObject_Length(__pyx_v_reference); if (unlikely(__pyx_t_11 == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 968; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_v_next_len = __pyx_t_11;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":969
+ * first_reference = 0
+ * next_len = len(reference)
+ * for i from 0 <= i < next_len: # <<<<<<<<<<<<<<
+ * if i != 0:
+ * out[0] = c'\x00'
+ */
+ __pyx_t_11 = __pyx_v_next_len;
+ for (__pyx_v_i = 0; __pyx_v_i < __pyx_t_11; __pyx_v_i++) {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":970
+ * next_len = len(reference)
+ * for i from 0 <= i < next_len:
+ * if i != 0: # <<<<<<<<<<<<<<
+ * out[0] = c'\x00'
+ * out = out + 1
+ */
+ __pyx_t_2 = (__pyx_v_i != 0);
+ if (__pyx_t_2) {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":971
+ * for i from 0 <= i < next_len:
+ * if i != 0:
+ * out[0] = c'\x00' # <<<<<<<<<<<<<<
+ * out = out + 1
+ * ref_bit = reference[i]
+ */
+ (__pyx_v_out[0]) = '\x00';
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":972
+ * if i != 0:
+ * out[0] = c'\x00'
+ * out = out + 1 # <<<<<<<<<<<<<<
+ * ref_bit = reference[i]
+ * ref_bit_len = PyString_GET_SIZE(ref_bit)
+ */
+ __pyx_v_out = (__pyx_v_out + 1);
+ goto __pyx_L31;
+ }
+ __pyx_L31:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":973
+ * out[0] = c'\x00'
+ * out = out + 1
+ * ref_bit = reference[i] # <<<<<<<<<<<<<<
+ * ref_bit_len = PyString_GET_SIZE(ref_bit)
+ * memcpy(out, PyString_AS_STRING(ref_bit), ref_bit_len)
+ */
+ __pyx_t_4 = __Pyx_GetItemInt(__pyx_v_reference, __pyx_v_i, sizeof(int), PyInt_FromLong); if (!__pyx_t_4) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 973; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_4);
+ __Pyx_DECREF(__pyx_v_ref_bit);
+ __pyx_v_ref_bit = __pyx_t_4;
+ __pyx_t_4 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":974
+ * out = out + 1
+ * ref_bit = reference[i]
+ * ref_bit_len = PyString_GET_SIZE(ref_bit) # <<<<<<<<<<<<<<
+ * memcpy(out, PyString_AS_STRING(ref_bit), ref_bit_len)
+ * out = out + ref_bit_len
+ */
+ __pyx_v_ref_bit_len = PyString_GET_SIZE(__pyx_v_ref_bit);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":975
+ * ref_bit = reference[i]
+ * ref_bit_len = PyString_GET_SIZE(ref_bit)
+ * memcpy(out, PyString_AS_STRING(ref_bit), ref_bit_len) # <<<<<<<<<<<<<<
+ * out = out + ref_bit_len
+ * out[0] = c'\0'
+ */
+ memcpy(__pyx_v_out, PyString_AS_STRING(__pyx_v_ref_bit), __pyx_v_ref_bit_len);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":976
+ * ref_bit_len = PyString_GET_SIZE(ref_bit)
+ * memcpy(out, PyString_AS_STRING(ref_bit), ref_bit_len)
+ * out = out + ref_bit_len # <<<<<<<<<<<<<<
+ * out[0] = c'\0'
+ * out = out + 1
+ */
+ __pyx_v_out = (__pyx_v_out + __pyx_v_ref_bit_len);
+ }
+ }
+ __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0;
+ }
+ __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
+ goto __pyx_L22;
+ }
+ __pyx_L22:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":977
+ * memcpy(out, PyString_AS_STRING(ref_bit), ref_bit_len)
+ * out = out + ref_bit_len
+ * out[0] = c'\0' # <<<<<<<<<<<<<<
+ * out = out + 1
+ * memcpy(out, value, value_len)
+ */
+ (__pyx_v_out[0]) = '\x00';
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":978
+ * out = out + ref_bit_len
+ * out[0] = c'\0'
+ * out = out + 1 # <<<<<<<<<<<<<<
+ * memcpy(out, value, value_len)
+ * out = out + value_len
+ */
+ __pyx_v_out = (__pyx_v_out + 1);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":979
+ * out[0] = c'\0'
+ * out = out + 1
+ * memcpy(out, value, value_len) # <<<<<<<<<<<<<<
+ * out = out + value_len
+ * out[0] = c'\n'
+ */
+ memcpy(__pyx_v_out, __pyx_v_value, __pyx_v_value_len);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":980
+ * out = out + 1
+ * memcpy(out, value, value_len)
+ * out = out + value_len # <<<<<<<<<<<<<<
+ * out[0] = c'\n'
+ * return string_key, line
+ */
+ __pyx_v_out = (__pyx_v_out + __pyx_v_value_len);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":981
+ * memcpy(out, value, value_len)
+ * out = out + value_len
+ * out[0] = c'\n' # <<<<<<<<<<<<<<
+ * return string_key, line
+ */
+ (__pyx_v_out[0]) = '\n';
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":982
+ * out = out + value_len
+ * out[0] = c'\n'
+ * return string_key, line # <<<<<<<<<<<<<<
+ */
+ __Pyx_XDECREF(__pyx_r);
+ __pyx_t_5 = PyTuple_New(2); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 982; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_5);
+ __Pyx_INCREF(__pyx_v_string_key);
+ PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_v_string_key);
+ __Pyx_GIVEREF(__pyx_v_string_key);
+ __Pyx_INCREF(__pyx_v_line);
+ PyTuple_SET_ITEM(__pyx_t_5, 1, __pyx_v_line);
+ __Pyx_GIVEREF(__pyx_v_line);
+ __pyx_r = __pyx_t_5;
+ __pyx_t_5 = 0;
+ goto __pyx_L0;
+
+ __pyx_r = Py_None; __Pyx_INCREF(Py_None);
+ goto __pyx_L0;
+ __pyx_L1_error:;
+ __Pyx_XDECREF(__pyx_t_4);
+ __Pyx_XDECREF(__pyx_t_5);
+ __Pyx_XDECREF(__pyx_t_8);
+ __Pyx_XDECREF(__pyx_t_10);
+ __Pyx_XDECREF(__pyx_t_12);
+ __Pyx_AddTraceback("bzrlib._btree_serializer_pyx._flatten_node");
+ __pyx_r = NULL;
+ __pyx_L0:;
+ __Pyx_DECREF(__pyx_v_string_key);
+ __Pyx_DECREF(__pyx_v_ref_lists);
+ __Pyx_DECREF(__pyx_v_ref_list);
+ __Pyx_DECREF(__pyx_v_reference);
+ __Pyx_DECREF(__pyx_v_ref_bit);
+ __Pyx_DECREF(__pyx_v_val);
+ __Pyx_DECREF(__pyx_v_line);
+ __Pyx_XGIVEREF(__pyx_r);
+ __Pyx_RefNannyFinishContext();
+ return __pyx_r;
+}
+static struct __pyx_vtabstruct_6bzrlib_21_btree_serializer_pyx_BTreeLeafParser __pyx_vtable_6bzrlib_21_btree_serializer_pyx_BTreeLeafParser;
+
+static PyObject *__pyx_tp_new_6bzrlib_21_btree_serializer_pyx_BTreeLeafParser(PyTypeObject *t, PyObject *a, PyObject *k) {
+ struct __pyx_obj_6bzrlib_21_btree_serializer_pyx_BTreeLeafParser *p;
+ PyObject *o = (*t->tp_alloc)(t, 0);
+ if (!o) return 0;
+ p = ((struct __pyx_obj_6bzrlib_21_btree_serializer_pyx_BTreeLeafParser *)o);
+ p->__pyx_vtab = __pyx_vtabptr_6bzrlib_21_btree_serializer_pyx_BTreeLeafParser;
+ p->bytes = Py_None; Py_INCREF(Py_None);
+ p->keys = Py_None; Py_INCREF(Py_None);
+ return o;
+}
+
+static void __pyx_tp_dealloc_6bzrlib_21_btree_serializer_pyx_BTreeLeafParser(PyObject *o) {
+ struct __pyx_obj_6bzrlib_21_btree_serializer_pyx_BTreeLeafParser *p = (struct __pyx_obj_6bzrlib_21_btree_serializer_pyx_BTreeLeafParser *)o;
+ Py_XDECREF(p->bytes);
+ Py_XDECREF(p->keys);
+ (*Py_TYPE(o)->tp_free)(o);
+}
+
+static int __pyx_tp_traverse_6bzrlib_21_btree_serializer_pyx_BTreeLeafParser(PyObject *o, visitproc v, void *a) {
+ int e;
+ struct __pyx_obj_6bzrlib_21_btree_serializer_pyx_BTreeLeafParser *p = (struct __pyx_obj_6bzrlib_21_btree_serializer_pyx_BTreeLeafParser *)o;
+ if (p->bytes) {
+ e = (*v)(p->bytes, a); if (e) return e;
+ }
+ if (p->keys) {
+ e = (*v)(p->keys, a); if (e) return e;
+ }
+ return 0;
+}
+
+static int __pyx_tp_clear_6bzrlib_21_btree_serializer_pyx_BTreeLeafParser(PyObject *o) {
+ struct __pyx_obj_6bzrlib_21_btree_serializer_pyx_BTreeLeafParser *p = (struct __pyx_obj_6bzrlib_21_btree_serializer_pyx_BTreeLeafParser *)o;
+ PyObject* tmp;
+ tmp = ((PyObject*)p->bytes);
+ p->bytes = Py_None; Py_INCREF(Py_None);
+ Py_XDECREF(tmp);
+ tmp = ((PyObject*)p->keys);
+ p->keys = Py_None; Py_INCREF(Py_None);
+ Py_XDECREF(tmp);
+ return 0;
+}
+
+static PyMethodDef __pyx_methods_6bzrlib_21_btree_serializer_pyx_BTreeLeafParser[] = {
+ {__Pyx_NAMESTR("parse"), (PyCFunction)__pyx_pf_6bzrlib_21_btree_serializer_pyx_15BTreeLeafParser_parse, METH_NOARGS, __Pyx_DOCSTR(0)},
+ {0, 0, 0, 0}
+};
+
+static PyNumberMethods __pyx_tp_as_number_BTreeLeafParser = {
+ 0, /*nb_add*/
+ 0, /*nb_subtract*/
+ 0, /*nb_multiply*/
+ #if PY_MAJOR_VERSION < 3
+ 0, /*nb_divide*/
+ #endif
+ 0, /*nb_remainder*/
+ 0, /*nb_divmod*/
+ 0, /*nb_power*/
+ 0, /*nb_negative*/
+ 0, /*nb_positive*/
+ 0, /*nb_absolute*/
+ 0, /*nb_nonzero*/
+ 0, /*nb_invert*/
+ 0, /*nb_lshift*/
+ 0, /*nb_rshift*/
+ 0, /*nb_and*/
+ 0, /*nb_xor*/
+ 0, /*nb_or*/
+ #if PY_MAJOR_VERSION < 3
+ 0, /*nb_coerce*/
+ #endif
+ 0, /*nb_int*/
+ #if PY_MAJOR_VERSION < 3
+ 0, /*nb_long*/
+ #else
+ 0, /*reserved*/
+ #endif
+ 0, /*nb_float*/
+ #if PY_MAJOR_VERSION < 3
+ 0, /*nb_oct*/
+ #endif
+ #if PY_MAJOR_VERSION < 3
+ 0, /*nb_hex*/
+ #endif
+ 0, /*nb_inplace_add*/
+ 0, /*nb_inplace_subtract*/
+ 0, /*nb_inplace_multiply*/
+ #if PY_MAJOR_VERSION < 3
+ 0, /*nb_inplace_divide*/
+ #endif
+ 0, /*nb_inplace_remainder*/
+ 0, /*nb_inplace_power*/
+ 0, /*nb_inplace_lshift*/
+ 0, /*nb_inplace_rshift*/
+ 0, /*nb_inplace_and*/
+ 0, /*nb_inplace_xor*/
+ 0, /*nb_inplace_or*/
+ 0, /*nb_floor_divide*/
+ 0, /*nb_true_divide*/
+ 0, /*nb_inplace_floor_divide*/
+ 0, /*nb_inplace_true_divide*/
+ #if PY_VERSION_HEX >= 0x02050000
+ 0, /*nb_index*/
+ #endif
+};
+
+static PySequenceMethods __pyx_tp_as_sequence_BTreeLeafParser = {
+ 0, /*sq_length*/
+ 0, /*sq_concat*/
+ 0, /*sq_repeat*/
+ 0, /*sq_item*/
+ 0, /*sq_slice*/
+ 0, /*sq_ass_item*/
+ 0, /*sq_ass_slice*/
+ 0, /*sq_contains*/
+ 0, /*sq_inplace_concat*/
+ 0, /*sq_inplace_repeat*/
+};
+
+static PyMappingMethods __pyx_tp_as_mapping_BTreeLeafParser = {
+ 0, /*mp_length*/
+ 0, /*mp_subscript*/
+ 0, /*mp_ass_subscript*/
+};
+
+static PyBufferProcs __pyx_tp_as_buffer_BTreeLeafParser = {
+ #if PY_MAJOR_VERSION < 3
+ 0, /*bf_getreadbuffer*/
+ #endif
+ #if PY_MAJOR_VERSION < 3
+ 0, /*bf_getwritebuffer*/
+ #endif
+ #if PY_MAJOR_VERSION < 3
+ 0, /*bf_getsegcount*/
+ #endif
+ #if PY_MAJOR_VERSION < 3
+ 0, /*bf_getcharbuffer*/
+ #endif
+ #if PY_VERSION_HEX >= 0x02060000
+ 0, /*bf_getbuffer*/
+ #endif
+ #if PY_VERSION_HEX >= 0x02060000
+ 0, /*bf_releasebuffer*/
+ #endif
+};
+
+PyTypeObject __pyx_type_6bzrlib_21_btree_serializer_pyx_BTreeLeafParser = {
+ PyVarObject_HEAD_INIT(0, 0)
+ __Pyx_NAMESTR("bzrlib._btree_serializer_pyx.BTreeLeafParser"), /*tp_name*/
+ sizeof(struct __pyx_obj_6bzrlib_21_btree_serializer_pyx_BTreeLeafParser), /*tp_basicsize*/
+ 0, /*tp_itemsize*/
+ __pyx_tp_dealloc_6bzrlib_21_btree_serializer_pyx_BTreeLeafParser, /*tp_dealloc*/
+ 0, /*tp_print*/
+ 0, /*tp_getattr*/
+ 0, /*tp_setattr*/
+ #if PY_MAJOR_VERSION < 3
+ 0, /*tp_compare*/
+ #else
+ 0, /*reserved*/
+ #endif
+ 0, /*tp_repr*/
+ &__pyx_tp_as_number_BTreeLeafParser, /*tp_as_number*/
+ &__pyx_tp_as_sequence_BTreeLeafParser, /*tp_as_sequence*/
+ &__pyx_tp_as_mapping_BTreeLeafParser, /*tp_as_mapping*/
+ 0, /*tp_hash*/
+ 0, /*tp_call*/
+ 0, /*tp_str*/
+ 0, /*tp_getattro*/
+ 0, /*tp_setattro*/
+ &__pyx_tp_as_buffer_BTreeLeafParser, /*tp_as_buffer*/
+ Py_TPFLAGS_DEFAULT|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_HAVE_GC, /*tp_flags*/
+ __Pyx_DOCSTR("Parse the leaf nodes of a BTree index.\n\n :ivar bytes: The PyString object containing the uncompressed text for the\n node.\n :ivar key_length: An integer describing how many pieces the keys have for\n this index.\n :ivar ref_list_length: An integer describing how many references this index\n contains.\n :ivar keys: A PyList of keys found in this node.\n\n :ivar _cur_str: A pointer to the start of the next line to parse\n :ivar _end_str: A pointer to the end of bytes\n :ivar _start: Pointer to the location within the current line while\n parsing.\n :ivar _header_found: True when we have parsed the header for this node\n "), /*tp_doc*/
+ __pyx_tp_traverse_6bzrlib_21_btree_serializer_pyx_BTreeLeafParser, /*tp_traverse*/
+ __pyx_tp_clear_6bzrlib_21_btree_serializer_pyx_BTreeLeafParser, /*tp_clear*/
+ 0, /*tp_richcompare*/
+ 0, /*tp_weaklistoffset*/
+ 0, /*tp_iter*/
+ 0, /*tp_iternext*/
+ __pyx_methods_6bzrlib_21_btree_serializer_pyx_BTreeLeafParser, /*tp_methods*/
+ 0, /*tp_members*/
+ 0, /*tp_getset*/
+ 0, /*tp_base*/
+ 0, /*tp_dict*/
+ 0, /*tp_descr_get*/
+ 0, /*tp_descr_set*/
+ 0, /*tp_dictoffset*/
+ __pyx_pf_6bzrlib_21_btree_serializer_pyx_15BTreeLeafParser___init__, /*tp_init*/
+ 0, /*tp_alloc*/
+ __pyx_tp_new_6bzrlib_21_btree_serializer_pyx_BTreeLeafParser, /*tp_new*/
+ 0, /*tp_free*/
+ 0, /*tp_is_gc*/
+ 0, /*tp_bases*/
+ 0, /*tp_mro*/
+ 0, /*tp_cache*/
+ 0, /*tp_subclasses*/
+ 0, /*tp_weaklist*/
+ 0, /*tp_del*/
+ #if PY_VERSION_HEX >= 0x02060000
+ 0, /*tp_version_tag*/
+ #endif
+};
+static struct __pyx_vtabstruct_6bzrlib_21_btree_serializer_pyx_GCCHKSHA1LeafNode __pyx_vtable_6bzrlib_21_btree_serializer_pyx_GCCHKSHA1LeafNode;
+
+static PyObject *__pyx_tp_new_6bzrlib_21_btree_serializer_pyx_GCCHKSHA1LeafNode(PyTypeObject *t, PyObject *a, PyObject *k) {
+ struct __pyx_obj_6bzrlib_21_btree_serializer_pyx_GCCHKSHA1LeafNode *p;
+ PyObject *o = (*t->tp_alloc)(t, 0);
+ if (!o) return 0;
+ p = ((struct __pyx_obj_6bzrlib_21_btree_serializer_pyx_GCCHKSHA1LeafNode *)o);
+ p->__pyx_vtab = __pyx_vtabptr_6bzrlib_21_btree_serializer_pyx_GCCHKSHA1LeafNode;
+ p->last_key = Py_None; Py_INCREF(Py_None);
+ return o;
+}
+
+static void __pyx_tp_dealloc_6bzrlib_21_btree_serializer_pyx_GCCHKSHA1LeafNode(PyObject *o) {
+ struct __pyx_obj_6bzrlib_21_btree_serializer_pyx_GCCHKSHA1LeafNode *p = (struct __pyx_obj_6bzrlib_21_btree_serializer_pyx_GCCHKSHA1LeafNode *)o;
+ {
+ PyObject *etype, *eval, *etb;
+ PyErr_Fetch(&etype, &eval, &etb);
+ ++Py_REFCNT(o);
+ __pyx_pf_6bzrlib_21_btree_serializer_pyx_17GCCHKSHA1LeafNode___dealloc__(o);
+ if (PyErr_Occurred()) PyErr_WriteUnraisable(o);
+ --Py_REFCNT(o);
+ PyErr_Restore(etype, eval, etb);
+ }
+ Py_XDECREF(p->last_key);
+ (*Py_TYPE(o)->tp_free)(o);
+}
+
+static int __pyx_tp_traverse_6bzrlib_21_btree_serializer_pyx_GCCHKSHA1LeafNode(PyObject *o, visitproc v, void *a) {
+ int e;
+ struct __pyx_obj_6bzrlib_21_btree_serializer_pyx_GCCHKSHA1LeafNode *p = (struct __pyx_obj_6bzrlib_21_btree_serializer_pyx_GCCHKSHA1LeafNode *)o;
+ if (p->last_key) {
+ e = (*v)(p->last_key, a); if (e) return e;
+ }
+ return 0;
+}
+
+static int __pyx_tp_clear_6bzrlib_21_btree_serializer_pyx_GCCHKSHA1LeafNode(PyObject *o) {
+ struct __pyx_obj_6bzrlib_21_btree_serializer_pyx_GCCHKSHA1LeafNode *p = (struct __pyx_obj_6bzrlib_21_btree_serializer_pyx_GCCHKSHA1LeafNode *)o;
+ PyObject* tmp;
+ tmp = ((PyObject*)p->last_key);
+ p->last_key = Py_None; Py_INCREF(Py_None);
+ Py_XDECREF(tmp);
+ return 0;
+}
+static PyObject *__pyx_sq_item_6bzrlib_21_btree_serializer_pyx_GCCHKSHA1LeafNode(PyObject *o, Py_ssize_t i) {
+ PyObject *r;
+ PyObject *x = PyInt_FromSsize_t(i); if(!x) return 0;
+ r = Py_TYPE(o)->tp_as_mapping->mp_subscript(o, x);
+ Py_DECREF(x);
+ return r;
+}
+
+static PyObject *__pyx_getprop_6bzrlib_21_btree_serializer_pyx_17GCCHKSHA1LeafNode_min_key(PyObject *o, void *x) {
+ return __pyx_pf_6bzrlib_21_btree_serializer_pyx_17GCCHKSHA1LeafNode_7min_key___get__(o);
+}
+
+static PyObject *__pyx_getprop_6bzrlib_21_btree_serializer_pyx_17GCCHKSHA1LeafNode_max_key(PyObject *o, void *x) {
+ return __pyx_pf_6bzrlib_21_btree_serializer_pyx_17GCCHKSHA1LeafNode_7max_key___get__(o);
+}
+
+static PyObject *__pyx_getprop_6bzrlib_21_btree_serializer_pyx_17GCCHKSHA1LeafNode_last_key(PyObject *o, void *x) {
+ return __pyx_pf_6bzrlib_21_btree_serializer_pyx_17GCCHKSHA1LeafNode_8last_key___get__(o);
+}
+
+static int __pyx_setprop_6bzrlib_21_btree_serializer_pyx_17GCCHKSHA1LeafNode_last_key(PyObject *o, PyObject *v, void *x) {
+ if (v) {
+ return __pyx_pf_6bzrlib_21_btree_serializer_pyx_17GCCHKSHA1LeafNode_8last_key___set__(o, v);
+ }
+ else {
+ return __pyx_pf_6bzrlib_21_btree_serializer_pyx_17GCCHKSHA1LeafNode_8last_key___del__(o);
+ }
+}
+
+static PyObject *__pyx_getprop_6bzrlib_21_btree_serializer_pyx_17GCCHKSHA1LeafNode_num_records(PyObject *o, void *x) {
+ return __pyx_pf_6bzrlib_21_btree_serializer_pyx_17GCCHKSHA1LeafNode_11num_records___get__(o);
+}
+
+static int __pyx_setprop_6bzrlib_21_btree_serializer_pyx_17GCCHKSHA1LeafNode_num_records(PyObject *o, PyObject *v, void *x) {
+ if (v) {
+ return __pyx_pf_6bzrlib_21_btree_serializer_pyx_17GCCHKSHA1LeafNode_11num_records___set__(o, v);
+ }
+ else {
+ PyErr_SetString(PyExc_NotImplementedError, "__del__");
+ return -1;
+ }
+}
+
+static PyObject *__pyx_getprop_6bzrlib_21_btree_serializer_pyx_17GCCHKSHA1LeafNode_common_shift(PyObject *o, void *x) {
+ return __pyx_pf_6bzrlib_21_btree_serializer_pyx_17GCCHKSHA1LeafNode_12common_shift___get__(o);
+}
+
+static int __pyx_setprop_6bzrlib_21_btree_serializer_pyx_17GCCHKSHA1LeafNode_common_shift(PyObject *o, PyObject *v, void *x) {
+ if (v) {
+ return __pyx_pf_6bzrlib_21_btree_serializer_pyx_17GCCHKSHA1LeafNode_12common_shift___set__(o, v);
+ }
+ else {
+ PyErr_SetString(PyExc_NotImplementedError, "__del__");
+ return -1;
+ }
+}
+
+static PyMethodDef __pyx_methods_6bzrlib_21_btree_serializer_pyx_GCCHKSHA1LeafNode[] = {
+ {__Pyx_NAMESTR("__sizeof__"), (PyCFunction)__pyx_pf_6bzrlib_21_btree_serializer_pyx_17GCCHKSHA1LeafNode___sizeof__, METH_NOARGS, __Pyx_DOCSTR(0)},
+ {__Pyx_NAMESTR("all_keys"), (PyCFunction)__pyx_pf_6bzrlib_21_btree_serializer_pyx_17GCCHKSHA1LeafNode_all_keys, METH_NOARGS, __Pyx_DOCSTR(0)},
+ {__Pyx_NAMESTR("all_items"), (PyCFunction)__pyx_pf_6bzrlib_21_btree_serializer_pyx_17GCCHKSHA1LeafNode_all_items, METH_NOARGS, __Pyx_DOCSTR(0)},
+ {__Pyx_NAMESTR("_get_offset_for_sha1"), (PyCFunction)__pyx_pf_6bzrlib_21_btree_serializer_pyx_17GCCHKSHA1LeafNode__get_offset_for_sha1, METH_O, __Pyx_DOCSTR(0)},
+ {__Pyx_NAMESTR("_get_offsets"), (PyCFunction)__pyx_pf_6bzrlib_21_btree_serializer_pyx_17GCCHKSHA1LeafNode__get_offsets, METH_NOARGS, __Pyx_DOCSTR(0)},
+ {0, 0, 0, 0}
+};
+
+static struct PyGetSetDef __pyx_getsets_6bzrlib_21_btree_serializer_pyx_GCCHKSHA1LeafNode[] = {
+ {(char *)"min_key", __pyx_getprop_6bzrlib_21_btree_serializer_pyx_17GCCHKSHA1LeafNode_min_key, 0, 0, 0},
+ {(char *)"max_key", __pyx_getprop_6bzrlib_21_btree_serializer_pyx_17GCCHKSHA1LeafNode_max_key, 0, 0, 0},
+ {(char *)"last_key", __pyx_getprop_6bzrlib_21_btree_serializer_pyx_17GCCHKSHA1LeafNode_last_key, __pyx_setprop_6bzrlib_21_btree_serializer_pyx_17GCCHKSHA1LeafNode_last_key, 0, 0},
+ {(char *)"num_records", __pyx_getprop_6bzrlib_21_btree_serializer_pyx_17GCCHKSHA1LeafNode_num_records, __pyx_setprop_6bzrlib_21_btree_serializer_pyx_17GCCHKSHA1LeafNode_num_records, 0, 0},
+ {(char *)"common_shift", __pyx_getprop_6bzrlib_21_btree_serializer_pyx_17GCCHKSHA1LeafNode_common_shift, __pyx_setprop_6bzrlib_21_btree_serializer_pyx_17GCCHKSHA1LeafNode_common_shift, 0, 0},
+ {0, 0, 0, 0, 0}
+};
+
+static PyNumberMethods __pyx_tp_as_number_GCCHKSHA1LeafNode = {
+ 0, /*nb_add*/
+ 0, /*nb_subtract*/
+ 0, /*nb_multiply*/
+ #if PY_MAJOR_VERSION < 3
+ 0, /*nb_divide*/
+ #endif
+ 0, /*nb_remainder*/
+ 0, /*nb_divmod*/
+ 0, /*nb_power*/
+ 0, /*nb_negative*/
+ 0, /*nb_positive*/
+ 0, /*nb_absolute*/
+ 0, /*nb_nonzero*/
+ 0, /*nb_invert*/
+ 0, /*nb_lshift*/
+ 0, /*nb_rshift*/
+ 0, /*nb_and*/
+ 0, /*nb_xor*/
+ 0, /*nb_or*/
+ #if PY_MAJOR_VERSION < 3
+ 0, /*nb_coerce*/
+ #endif
+ 0, /*nb_int*/
+ #if PY_MAJOR_VERSION < 3
+ 0, /*nb_long*/
+ #else
+ 0, /*reserved*/
+ #endif
+ 0, /*nb_float*/
+ #if PY_MAJOR_VERSION < 3
+ 0, /*nb_oct*/
+ #endif
+ #if PY_MAJOR_VERSION < 3
+ 0, /*nb_hex*/
+ #endif
+ 0, /*nb_inplace_add*/
+ 0, /*nb_inplace_subtract*/
+ 0, /*nb_inplace_multiply*/
+ #if PY_MAJOR_VERSION < 3
+ 0, /*nb_inplace_divide*/
+ #endif
+ 0, /*nb_inplace_remainder*/
+ 0, /*nb_inplace_power*/
+ 0, /*nb_inplace_lshift*/
+ 0, /*nb_inplace_rshift*/
+ 0, /*nb_inplace_and*/
+ 0, /*nb_inplace_xor*/
+ 0, /*nb_inplace_or*/
+ 0, /*nb_floor_divide*/
+ 0, /*nb_true_divide*/
+ 0, /*nb_inplace_floor_divide*/
+ 0, /*nb_inplace_true_divide*/
+ #if PY_VERSION_HEX >= 0x02050000
+ 0, /*nb_index*/
+ #endif
+};
+
+static PySequenceMethods __pyx_tp_as_sequence_GCCHKSHA1LeafNode = {
+ __pyx_pf_6bzrlib_21_btree_serializer_pyx_17GCCHKSHA1LeafNode___len__, /*sq_length*/
+ 0, /*sq_concat*/
+ 0, /*sq_repeat*/
+ __pyx_sq_item_6bzrlib_21_btree_serializer_pyx_GCCHKSHA1LeafNode, /*sq_item*/
+ 0, /*sq_slice*/
+ 0, /*sq_ass_item*/
+ 0, /*sq_ass_slice*/
+ __pyx_pf_6bzrlib_21_btree_serializer_pyx_17GCCHKSHA1LeafNode___contains__, /*sq_contains*/
+ 0, /*sq_inplace_concat*/
+ 0, /*sq_inplace_repeat*/
+};
+
+static PyMappingMethods __pyx_tp_as_mapping_GCCHKSHA1LeafNode = {
+ __pyx_pf_6bzrlib_21_btree_serializer_pyx_17GCCHKSHA1LeafNode___len__, /*mp_length*/
+ __pyx_pf_6bzrlib_21_btree_serializer_pyx_17GCCHKSHA1LeafNode___getitem__, /*mp_subscript*/
+ 0, /*mp_ass_subscript*/
+};
+
+static PyBufferProcs __pyx_tp_as_buffer_GCCHKSHA1LeafNode = {
+ #if PY_MAJOR_VERSION < 3
+ 0, /*bf_getreadbuffer*/
+ #endif
+ #if PY_MAJOR_VERSION < 3
+ 0, /*bf_getwritebuffer*/
+ #endif
+ #if PY_MAJOR_VERSION < 3
+ 0, /*bf_getsegcount*/
+ #endif
+ #if PY_MAJOR_VERSION < 3
+ 0, /*bf_getcharbuffer*/
+ #endif
+ #if PY_VERSION_HEX >= 0x02060000
+ 0, /*bf_getbuffer*/
+ #endif
+ #if PY_VERSION_HEX >= 0x02060000
+ 0, /*bf_releasebuffer*/
+ #endif
+};
+
+PyTypeObject __pyx_type_6bzrlib_21_btree_serializer_pyx_GCCHKSHA1LeafNode = {
+ PyVarObject_HEAD_INIT(0, 0)
+ __Pyx_NAMESTR("bzrlib._btree_serializer_pyx.GCCHKSHA1LeafNode"), /*tp_name*/
+ sizeof(struct __pyx_obj_6bzrlib_21_btree_serializer_pyx_GCCHKSHA1LeafNode), /*tp_basicsize*/
+ 0, /*tp_itemsize*/
+ __pyx_tp_dealloc_6bzrlib_21_btree_serializer_pyx_GCCHKSHA1LeafNode, /*tp_dealloc*/
+ 0, /*tp_print*/
+ 0, /*tp_getattr*/
+ 0, /*tp_setattr*/
+ #if PY_MAJOR_VERSION < 3
+ 0, /*tp_compare*/
+ #else
+ 0, /*reserved*/
+ #endif
+ 0, /*tp_repr*/
+ &__pyx_tp_as_number_GCCHKSHA1LeafNode, /*tp_as_number*/
+ &__pyx_tp_as_sequence_GCCHKSHA1LeafNode, /*tp_as_sequence*/
+ &__pyx_tp_as_mapping_GCCHKSHA1LeafNode, /*tp_as_mapping*/
+ 0, /*tp_hash*/
+ 0, /*tp_call*/
+ 0, /*tp_str*/
+ 0, /*tp_getattro*/
+ 0, /*tp_setattro*/
+ &__pyx_tp_as_buffer_GCCHKSHA1LeafNode, /*tp_as_buffer*/
+ Py_TPFLAGS_DEFAULT|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_HAVE_GC, /*tp_flags*/
+ __Pyx_DOCSTR("Track all the entries for a given leaf node."), /*tp_doc*/
+ __pyx_tp_traverse_6bzrlib_21_btree_serializer_pyx_GCCHKSHA1LeafNode, /*tp_traverse*/
+ __pyx_tp_clear_6bzrlib_21_btree_serializer_pyx_GCCHKSHA1LeafNode, /*tp_clear*/
+ 0, /*tp_richcompare*/
+ 0, /*tp_weaklistoffset*/
+ 0, /*tp_iter*/
+ 0, /*tp_iternext*/
+ __pyx_methods_6bzrlib_21_btree_serializer_pyx_GCCHKSHA1LeafNode, /*tp_methods*/
+ 0, /*tp_members*/
+ __pyx_getsets_6bzrlib_21_btree_serializer_pyx_GCCHKSHA1LeafNode, /*tp_getset*/
+ 0, /*tp_base*/
+ 0, /*tp_dict*/
+ 0, /*tp_descr_get*/
+ 0, /*tp_descr_set*/
+ 0, /*tp_dictoffset*/
+ __pyx_pf_6bzrlib_21_btree_serializer_pyx_17GCCHKSHA1LeafNode___init__, /*tp_init*/
+ 0, /*tp_alloc*/
+ __pyx_tp_new_6bzrlib_21_btree_serializer_pyx_GCCHKSHA1LeafNode, /*tp_new*/
+ 0, /*tp_free*/
+ 0, /*tp_is_gc*/
+ 0, /*tp_bases*/
+ 0, /*tp_mro*/
+ 0, /*tp_cache*/
+ 0, /*tp_subclasses*/
+ 0, /*tp_weaklist*/
+ 0, /*tp_del*/
+ #if PY_VERSION_HEX >= 0x02060000
+ 0, /*tp_version_tag*/
+ #endif
+};
+
+static PyMethodDef __pyx_methods[] = {
+ {__Pyx_NAMESTR("_parse_leaf_lines"), (PyCFunction)__pyx_pf_6bzrlib_21_btree_serializer_pyx__parse_leaf_lines, METH_VARARGS|METH_KEYWORDS, __Pyx_DOCSTR(0)},
+ {__Pyx_NAMESTR("_py_unhexlify"), (PyCFunction)__pyx_pf_6bzrlib_21_btree_serializer_pyx__py_unhexlify, METH_O, __Pyx_DOCSTR(__pyx_doc_6bzrlib_21_btree_serializer_pyx__py_unhexlify)},
+ {__Pyx_NAMESTR("_py_hexlify"), (PyCFunction)__pyx_pf_6bzrlib_21_btree_serializer_pyx__py_hexlify, METH_O, __Pyx_DOCSTR(__pyx_doc_6bzrlib_21_btree_serializer_pyx__py_hexlify)},
+ {__Pyx_NAMESTR("_py_key_to_sha1"), (PyCFunction)__pyx_pf_6bzrlib_21_btree_serializer_pyx__py_key_to_sha1, METH_O, __Pyx_DOCSTR(__pyx_doc_6bzrlib_21_btree_serializer_pyx__py_key_to_sha1)},
+ {__Pyx_NAMESTR("_py_sha1_to_key"), (PyCFunction)__pyx_pf_6bzrlib_21_btree_serializer_pyx__py_sha1_to_key, METH_O, __Pyx_DOCSTR(__pyx_doc_6bzrlib_21_btree_serializer_pyx__py_sha1_to_key)},
+ {__Pyx_NAMESTR("_parse_into_chk"), (PyCFunction)__pyx_pf_6bzrlib_21_btree_serializer_pyx__parse_into_chk, METH_VARARGS|METH_KEYWORDS, __Pyx_DOCSTR(__pyx_doc_6bzrlib_21_btree_serializer_pyx__parse_into_chk)},
+ {__Pyx_NAMESTR("_flatten_node"), (PyCFunction)__pyx_pf_6bzrlib_21_btree_serializer_pyx__flatten_node, METH_VARARGS|METH_KEYWORDS, __Pyx_DOCSTR(__pyx_doc_6bzrlib_21_btree_serializer_pyx__flatten_node)},
+ {0, 0, 0, 0}
+};
+
+#if PY_MAJOR_VERSION >= 3
+static struct PyModuleDef __pyx_moduledef = {
+ PyModuleDef_HEAD_INIT,
+ __Pyx_NAMESTR("_btree_serializer_pyx"),
+ __Pyx_DOCSTR(__pyx_k_37), /* m_doc */
+ -1, /* m_size */
+ __pyx_methods /* m_methods */,
+ NULL, /* m_reload */
+ NULL, /* m_traverse */
+ NULL, /* m_clear */
+ NULL /* m_free */
+};
+#endif
+
+static __Pyx_StringTabEntry __pyx_string_tab[] = {
+ {&__pyx_kp_s_1, __pyx_k_1, sizeof(__pyx_k_1), 0, 0, 1, 0},
+ {&__pyx_kp_s_10, __pyx_k_10, sizeof(__pyx_k_10), 0, 0, 1, 0},
+ {&__pyx_kp_s_11, __pyx_k_11, sizeof(__pyx_k_11), 0, 0, 1, 0},
+ {&__pyx_kp_s_12, __pyx_k_12, sizeof(__pyx_k_12), 0, 0, 1, 0},
+ {&__pyx_kp_s_13, __pyx_k_13, sizeof(__pyx_k_13), 0, 0, 1, 0},
+ {&__pyx_kp_s_14, __pyx_k_14, sizeof(__pyx_k_14), 0, 0, 1, 0},
+ {&__pyx_n_s_17, __pyx_k_17, sizeof(__pyx_k_17), 0, 0, 1, 1},
+ {&__pyx_kp_s_18, __pyx_k_18, sizeof(__pyx_k_18), 0, 0, 1, 0},
+ {&__pyx_kp_s_19, __pyx_k_19, sizeof(__pyx_k_19), 0, 0, 1, 0},
+ {&__pyx_kp_s_2, __pyx_k_2, sizeof(__pyx_k_2), 0, 0, 1, 0},
+ {&__pyx_kp_s_21, __pyx_k_21, sizeof(__pyx_k_21), 0, 0, 1, 0},
+ {&__pyx_kp_s_22, __pyx_k_22, sizeof(__pyx_k_22), 0, 0, 1, 0},
+ {&__pyx_kp_s_23, __pyx_k_23, sizeof(__pyx_k_23), 0, 0, 1, 0},
+ {&__pyx_kp_s_24, __pyx_k_24, sizeof(__pyx_k_24), 0, 0, 1, 0},
+ {&__pyx_kp_s_25, __pyx_k_25, sizeof(__pyx_k_25), 0, 0, 1, 0},
+ {&__pyx_kp_s_26, __pyx_k_26, sizeof(__pyx_k_26), 0, 0, 1, 0},
+ {&__pyx_kp_s_27, __pyx_k_27, sizeof(__pyx_k_27), 0, 0, 1, 0},
+ {&__pyx_kp_s_28, __pyx_k_28, sizeof(__pyx_k_28), 0, 0, 1, 0},
+ {&__pyx_kp_s_29, __pyx_k_29, sizeof(__pyx_k_29), 0, 0, 1, 0},
+ {&__pyx_kp_s_30, __pyx_k_30, sizeof(__pyx_k_30), 0, 0, 1, 0},
+ {&__pyx_kp_s_31, __pyx_k_31, sizeof(__pyx_k_31), 0, 0, 1, 0},
+ {&__pyx_kp_s_32, __pyx_k_32, sizeof(__pyx_k_32), 0, 0, 1, 0},
+ {&__pyx_kp_s_33, __pyx_k_33, sizeof(__pyx_k_33), 0, 0, 1, 0},
+ {&__pyx_kp_s_34, __pyx_k_34, sizeof(__pyx_k_34), 0, 0, 1, 0},
+ {&__pyx_kp_s_35, __pyx_k_35, sizeof(__pyx_k_35), 0, 0, 1, 0},
+ {&__pyx_kp_s_36, __pyx_k_36, sizeof(__pyx_k_36), 0, 0, 1, 0},
+ {&__pyx_kp_u_38, __pyx_k_38, sizeof(__pyx_k_38), 0, 1, 0, 0},
+ {&__pyx_kp_u_39, __pyx_k_39, sizeof(__pyx_k_39), 0, 1, 0, 0},
+ {&__pyx_kp_s_4, __pyx_k_4, sizeof(__pyx_k_4), 0, 0, 1, 0},
+ {&__pyx_kp_u_40, __pyx_k_40, sizeof(__pyx_k_40), 0, 1, 0, 0},
+ {&__pyx_kp_u_41, __pyx_k_41, sizeof(__pyx_k_41), 0, 1, 0, 0},
+ {&__pyx_kp_u_42, __pyx_k_42, sizeof(__pyx_k_42), 0, 1, 0, 0},
+ {&__pyx_kp_u_43, __pyx_k_43, sizeof(__pyx_k_43), 0, 1, 0, 0},
+ {&__pyx_kp_s_6, __pyx_k_6, sizeof(__pyx_k_6), 0, 0, 1, 0},
+ {&__pyx_kp_s_7, __pyx_k_7, sizeof(__pyx_k_7), 0, 0, 1, 0},
+ {&__pyx_kp_s_9, __pyx_k_9, sizeof(__pyx_k_9), 0, 0, 1, 0},
+ {&__pyx_n_s__AssertionError, __pyx_k__AssertionError, sizeof(__pyx_k__AssertionError), 0, 0, 1, 1},
+ {&__pyx_n_s__KeyError, __pyx_k__KeyError, sizeof(__pyx_k__KeyError), 0, 0, 1, 1},
+ {&__pyx_n_s__TypeError, __pyx_k__TypeError, sizeof(__pyx_k__TypeError), 0, 0, 1, 1},
+ {&__pyx_n_s__ValueError, __pyx_k__ValueError, sizeof(__pyx_k__ValueError), 0, 0, 1, 1},
+ {&__pyx_n_s____main__, __pyx_k____main__, sizeof(__pyx_k____main__), 0, 0, 1, 1},
+ {&__pyx_n_s____test__, __pyx_k____test__, sizeof(__pyx_k____test__), 0, 0, 1, 1},
+ {&__pyx_n_s___compute_common, __pyx_k___compute_common, sizeof(__pyx_k___compute_common), 0, 0, 1, 1},
+ {&__pyx_n_s___count_records, __pyx_k___count_records, sizeof(__pyx_k___count_records), 0, 0, 1, 1},
+ {&__pyx_n_s___cur_str, __pyx_k___cur_str, sizeof(__pyx_k___cur_str), 0, 0, 1, 1},
+ {&__pyx_n_s___end_str, __pyx_k___end_str, sizeof(__pyx_k___end_str), 0, 0, 1, 1},
+ {&__pyx_n_s___flatten_node, __pyx_k___flatten_node, sizeof(__pyx_k___flatten_node), 0, 0, 1, 1},
+ {&__pyx_n_s___header_found, __pyx_k___header_found, sizeof(__pyx_k___header_found), 0, 0, 1, 1},
+ {&__pyx_n_s___lookup_record, __pyx_k___lookup_record, sizeof(__pyx_k___lookup_record), 0, 0, 1, 1},
+ {&__pyx_n_s___offset_for_sha1, __pyx_k___offset_for_sha1, sizeof(__pyx_k___offset_for_sha1), 0, 0, 1, 1},
+ {&__pyx_n_s___parse_bytes, __pyx_k___parse_bytes, sizeof(__pyx_k___parse_bytes), 0, 0, 1, 1},
+ {&__pyx_n_s___parse_into_chk, __pyx_k___parse_into_chk, sizeof(__pyx_k___parse_into_chk), 0, 0, 1, 1},
+ {&__pyx_n_s___parse_one_entry, __pyx_k___parse_one_entry, sizeof(__pyx_k___parse_one_entry), 0, 0, 1, 1},
+ {&__pyx_n_s___py_hexlify, __pyx_k___py_hexlify, sizeof(__pyx_k___py_hexlify), 0, 0, 1, 1},
+ {&__pyx_n_s___py_key_to_sha1, __pyx_k___py_key_to_sha1, sizeof(__pyx_k___py_key_to_sha1), 0, 0, 1, 1},
+ {&__pyx_n_s___py_sha1_to_key, __pyx_k___py_sha1_to_key, sizeof(__pyx_k___py_sha1_to_key), 0, 0, 1, 1},
+ {&__pyx_n_s___py_unhexlify, __pyx_k___py_unhexlify, sizeof(__pyx_k___py_unhexlify), 0, 0, 1, 1},
+ {&__pyx_n_s___record_to_item, __pyx_k___record_to_item, sizeof(__pyx_k___record_to_item), 0, 0, 1, 1},
+ {&__pyx_n_s___start, __pyx_k___start, sizeof(__pyx_k___start), 0, 0, 1, 1},
+ {&__pyx_n_s__block_length, __pyx_k__block_length, sizeof(__pyx_k__block_length), 0, 0, 1, 1},
+ {&__pyx_n_s__block_offset, __pyx_k__block_offset, sizeof(__pyx_k__block_offset), 0, 0, 1, 1},
+ {&__pyx_n_s__bytes, __pyx_k__bytes, sizeof(__pyx_k__bytes), 0, 0, 1, 1},
+ {&__pyx_n_s__common_shift, __pyx_k__common_shift, sizeof(__pyx_k__common_shift), 0, 0, 1, 1},
+ {&__pyx_n_s__extract_key, __pyx_k__extract_key, sizeof(__pyx_k__extract_key), 0, 0, 1, 1},
+ {&__pyx_n_s__join, __pyx_k__join, sizeof(__pyx_k__join), 0, 0, 1, 1},
+ {&__pyx_n_s__key_length, __pyx_k__key_length, sizeof(__pyx_k__key_length), 0, 0, 1, 1},
+ {&__pyx_n_s__keys, __pyx_k__keys, sizeof(__pyx_k__keys), 0, 0, 1, 1},
+ {&__pyx_n_s__last_key, __pyx_k__last_key, sizeof(__pyx_k__last_key), 0, 0, 1, 1},
+ {&__pyx_n_s__last_record, __pyx_k__last_record, sizeof(__pyx_k__last_record), 0, 0, 1, 1},
+ {&__pyx_n_s__node, __pyx_k__node, sizeof(__pyx_k__node), 0, 0, 1, 1},
+ {&__pyx_n_s__num_records, __pyx_k__num_records, sizeof(__pyx_k__num_records), 0, 0, 1, 1},
+ {&__pyx_n_s__offsets, __pyx_k__offsets, sizeof(__pyx_k__offsets), 0, 0, 1, 1},
+ {&__pyx_n_s__parse, __pyx_k__parse, sizeof(__pyx_k__parse), 0, 0, 1, 1},
+ {&__pyx_n_s__process_line, __pyx_k__process_line, sizeof(__pyx_k__process_line), 0, 0, 1, 1},
+ {&__pyx_n_s__record_end, __pyx_k__record_end, sizeof(__pyx_k__record_end), 0, 0, 1, 1},
+ {&__pyx_n_s__record_start, __pyx_k__record_start, sizeof(__pyx_k__record_start), 0, 0, 1, 1},
+ {&__pyx_n_s__records, __pyx_k__records, sizeof(__pyx_k__records), 0, 0, 1, 1},
+ {&__pyx_n_s__ref_list_length, __pyx_k__ref_list_length, sizeof(__pyx_k__ref_list_length), 0, 0, 1, 1},
+ {&__pyx_n_s__reference_lists, __pyx_k__reference_lists, sizeof(__pyx_k__reference_lists), 0, 0, 1, 1},
+ {&__pyx_n_s__sha1, __pyx_k__sha1, sizeof(__pyx_k__sha1), 0, 0, 1, 1},
+ {&__pyx_n_s__sys, __pyx_k__sys, sizeof(__pyx_k__sys), 0, 0, 1, 1},
+ {0, 0, 0, 0, 0, 0, 0}
+};
+static int __Pyx_InitCachedBuiltins(void) {
+ __pyx_builtin_AssertionError = __Pyx_GetName(__pyx_b, __pyx_n_s__AssertionError); if (!__pyx_builtin_AssertionError) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 99; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_builtin_ValueError = __Pyx_GetName(__pyx_b, __pyx_n_s__ValueError); if (!__pyx_builtin_ValueError) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 389; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_builtin_KeyError = __Pyx_GetName(__pyx_b, __pyx_n_s__KeyError); if (!__pyx_builtin_KeyError) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 661; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_builtin_TypeError = __Pyx_GetName(__pyx_b, __pyx_n_s__TypeError); if (!__pyx_builtin_TypeError) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 708; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ return 0;
+ __pyx_L1_error:;
+ return -1;
+}
+
+static int __Pyx_InitGlobals(void) {
+ if (__Pyx_InitStrings(__pyx_string_tab) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;};
+ __pyx_int_0 = PyInt_FromLong(0); if (unlikely(!__pyx_int_0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;};
+ __pyx_int_1 = PyInt_FromLong(1); if (unlikely(!__pyx_int_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;};
+ return 0;
+ __pyx_L1_error:;
+ return -1;
+}
+
+#if PY_MAJOR_VERSION < 3
+PyMODINIT_FUNC init_btree_serializer_pyx(void); /*proto*/
+PyMODINIT_FUNC init_btree_serializer_pyx(void)
+#else
+PyMODINIT_FUNC PyInit__btree_serializer_pyx(void); /*proto*/
+PyMODINIT_FUNC PyInit__btree_serializer_pyx(void)
+#endif
+{
+ PyObject *__pyx_t_1 = NULL;
+ int __pyx_t_2;
+ PyObject *__pyx_t_3 = NULL;
+ PyObject *__pyx_t_4 = NULL;
+ #if CYTHON_REFNANNY
+ void* __pyx_refnanny = NULL;
+ __Pyx_RefNanny = __Pyx_RefNannyImportAPI("refnanny");
+ if (!__Pyx_RefNanny) {
+ PyErr_Clear();
+ __Pyx_RefNanny = __Pyx_RefNannyImportAPI("Cython.Runtime.refnanny");
+ if (!__Pyx_RefNanny)
+ Py_FatalError("failed to import 'refnanny' module");
+ }
+ __pyx_refnanny = __Pyx_RefNanny->SetupContext("PyMODINIT_FUNC PyInit__btree_serializer_pyx(void)", __LINE__, __FILE__);
+ #endif
+ __pyx_empty_tuple = PyTuple_New(0); if (unlikely(!__pyx_empty_tuple)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_empty_bytes = PyBytes_FromStringAndSize("", 0); if (unlikely(!__pyx_empty_bytes)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ #ifdef __pyx_binding_PyCFunctionType_USED
+ if (__pyx_binding_PyCFunctionType_init() < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ #endif
+ /*--- Library function declarations ---*/
+ /*--- Threads initialization code ---*/
+ #if defined(__PYX_FORCE_INIT_THREADS) && __PYX_FORCE_INIT_THREADS
+ #ifdef WITH_THREAD /* Python build with threading support? */
+ PyEval_InitThreads();
+ #endif
+ #endif
+ /*--- Module creation code ---*/
+ #if PY_MAJOR_VERSION < 3
+ __pyx_m = Py_InitModule4(__Pyx_NAMESTR("_btree_serializer_pyx"), __pyx_methods, __Pyx_DOCSTR(__pyx_k_37), 0, PYTHON_API_VERSION);
+ #else
+ __pyx_m = PyModule_Create(&__pyx_moduledef);
+ #endif
+ if (!__pyx_m) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;};
+ #if PY_MAJOR_VERSION < 3
+ Py_INCREF(__pyx_m);
+ #endif
+ __pyx_b = PyImport_AddModule(__Pyx_NAMESTR(__Pyx_BUILTIN_MODULE_NAME));
+ if (!__pyx_b) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;};
+ if (__Pyx_SetAttrString(__pyx_m, "__builtins__", __pyx_b) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;};
+ /*--- Initialize various global constants etc. ---*/
+ if (unlikely(__Pyx_InitGlobals() < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ if (__pyx_module_is_main_bzrlib___btree_serializer_pyx) {
+ if (__Pyx_SetAttrString(__pyx_m, "__name__", __pyx_n_s____main__) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;};
+ }
+ /*--- Builtin init code ---*/
+ if (unlikely(__Pyx_InitCachedBuiltins() < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ /*--- Global init code ---*/
+ /*--- Function export code ---*/
+ /*--- Type init code ---*/
+ __pyx_vtabptr_6bzrlib_21_btree_serializer_pyx_BTreeLeafParser = &__pyx_vtable_6bzrlib_21_btree_serializer_pyx_BTreeLeafParser;
+ #if PY_MAJOR_VERSION >= 3
+ __pyx_vtable_6bzrlib_21_btree_serializer_pyx_BTreeLeafParser.extract_key = (PyObject *(*)(struct __pyx_obj_6bzrlib_21_btree_serializer_pyx_BTreeLeafParser *, char *))__pyx_f_6bzrlib_21_btree_serializer_pyx_15BTreeLeafParser_extract_key;
+ __pyx_vtable_6bzrlib_21_btree_serializer_pyx_BTreeLeafParser.process_line = (int (*)(struct __pyx_obj_6bzrlib_21_btree_serializer_pyx_BTreeLeafParser *))__pyx_f_6bzrlib_21_btree_serializer_pyx_15BTreeLeafParser_process_line;
+ #else
+ *(void(**)(void))&__pyx_vtable_6bzrlib_21_btree_serializer_pyx_BTreeLeafParser.extract_key = (void(*)(void))__pyx_f_6bzrlib_21_btree_serializer_pyx_15BTreeLeafParser_extract_key;
+ *(void(**)(void))&__pyx_vtable_6bzrlib_21_btree_serializer_pyx_BTreeLeafParser.process_line = (void(*)(void))__pyx_f_6bzrlib_21_btree_serializer_pyx_15BTreeLeafParser_process_line;
+ #endif
+ if (PyType_Ready(&__pyx_type_6bzrlib_21_btree_serializer_pyx_BTreeLeafParser) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 123; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ if (__Pyx_SetVtable(__pyx_type_6bzrlib_21_btree_serializer_pyx_BTreeLeafParser.tp_dict, __pyx_vtabptr_6bzrlib_21_btree_serializer_pyx_BTreeLeafParser) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 123; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ if (__Pyx_SetAttrString(__pyx_m, "BTreeLeafParser", (PyObject *)&__pyx_type_6bzrlib_21_btree_serializer_pyx_BTreeLeafParser) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 123; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_ptype_6bzrlib_21_btree_serializer_pyx_BTreeLeafParser = &__pyx_type_6bzrlib_21_btree_serializer_pyx_BTreeLeafParser;
+ __pyx_vtabptr_6bzrlib_21_btree_serializer_pyx_GCCHKSHA1LeafNode = &__pyx_vtable_6bzrlib_21_btree_serializer_pyx_GCCHKSHA1LeafNode;
+ #if PY_MAJOR_VERSION >= 3
+ __pyx_vtable_6bzrlib_21_btree_serializer_pyx_GCCHKSHA1LeafNode._record_to_value_and_refs = (StaticTuple *(*)(struct __pyx_obj_6bzrlib_21_btree_serializer_pyx_GCCHKSHA1LeafNode *, __pyx_t_6bzrlib_21_btree_serializer_pyx_gc_chk_sha1_record *))__pyx_f_6bzrlib_21_btree_serializer_pyx_17GCCHKSHA1LeafNode__record_to_value_and_refs;
+ __pyx_vtable_6bzrlib_21_btree_serializer_pyx_GCCHKSHA1LeafNode._record_to_item = (StaticTuple *(*)(struct __pyx_obj_6bzrlib_21_btree_serializer_pyx_GCCHKSHA1LeafNode *, __pyx_t_6bzrlib_21_btree_serializer_pyx_gc_chk_sha1_record *))__pyx_f_6bzrlib_21_btree_serializer_pyx_17GCCHKSHA1LeafNode__record_to_item;
+ __pyx_vtable_6bzrlib_21_btree_serializer_pyx_GCCHKSHA1LeafNode._lookup_record = (__pyx_t_6bzrlib_21_btree_serializer_pyx_gc_chk_sha1_record *(*)(struct __pyx_obj_6bzrlib_21_btree_serializer_pyx_GCCHKSHA1LeafNode *, char *))__pyx_f_6bzrlib_21_btree_serializer_pyx_17GCCHKSHA1LeafNode__lookup_record;
+ __pyx_vtable_6bzrlib_21_btree_serializer_pyx_GCCHKSHA1LeafNode._count_records = (int (*)(struct __pyx_obj_6bzrlib_21_btree_serializer_pyx_GCCHKSHA1LeafNode *, char *, char *))__pyx_f_6bzrlib_21_btree_serializer_pyx_17GCCHKSHA1LeafNode__count_records;
+ __pyx_vtable_6bzrlib_21_btree_serializer_pyx_GCCHKSHA1LeafNode._parse_bytes = (PyObject *(*)(struct __pyx_obj_6bzrlib_21_btree_serializer_pyx_GCCHKSHA1LeafNode *, PyObject *))__pyx_f_6bzrlib_21_btree_serializer_pyx_17GCCHKSHA1LeafNode__parse_bytes;
+ __pyx_vtable_6bzrlib_21_btree_serializer_pyx_GCCHKSHA1LeafNode._parse_one_entry = (char *(*)(struct __pyx_obj_6bzrlib_21_btree_serializer_pyx_GCCHKSHA1LeafNode *, char *, char *, __pyx_t_6bzrlib_21_btree_serializer_pyx_gc_chk_sha1_record *))__pyx_f_6bzrlib_21_btree_serializer_pyx_17GCCHKSHA1LeafNode__parse_one_entry;
+ __pyx_vtable_6bzrlib_21_btree_serializer_pyx_GCCHKSHA1LeafNode._offset_for_sha1 = (int (*)(struct __pyx_obj_6bzrlib_21_btree_serializer_pyx_GCCHKSHA1LeafNode *, char *))__pyx_f_6bzrlib_21_btree_serializer_pyx_17GCCHKSHA1LeafNode__offset_for_sha1;
+ __pyx_vtable_6bzrlib_21_btree_serializer_pyx_GCCHKSHA1LeafNode._compute_common = (PyObject *(*)(struct __pyx_obj_6bzrlib_21_btree_serializer_pyx_GCCHKSHA1LeafNode *))__pyx_f_6bzrlib_21_btree_serializer_pyx_17GCCHKSHA1LeafNode__compute_common;
+ #else
+ *(void(**)(void))&__pyx_vtable_6bzrlib_21_btree_serializer_pyx_GCCHKSHA1LeafNode._record_to_value_and_refs = (void(*)(void))__pyx_f_6bzrlib_21_btree_serializer_pyx_17GCCHKSHA1LeafNode__record_to_value_and_refs;
+ *(void(**)(void))&__pyx_vtable_6bzrlib_21_btree_serializer_pyx_GCCHKSHA1LeafNode._record_to_item = (void(*)(void))__pyx_f_6bzrlib_21_btree_serializer_pyx_17GCCHKSHA1LeafNode__record_to_item;
+ *(void(**)(void))&__pyx_vtable_6bzrlib_21_btree_serializer_pyx_GCCHKSHA1LeafNode._lookup_record = (void(*)(void))__pyx_f_6bzrlib_21_btree_serializer_pyx_17GCCHKSHA1LeafNode__lookup_record;
+ *(void(**)(void))&__pyx_vtable_6bzrlib_21_btree_serializer_pyx_GCCHKSHA1LeafNode._count_records = (void(*)(void))__pyx_f_6bzrlib_21_btree_serializer_pyx_17GCCHKSHA1LeafNode__count_records;
+ *(void(**)(void))&__pyx_vtable_6bzrlib_21_btree_serializer_pyx_GCCHKSHA1LeafNode._parse_bytes = (void(*)(void))__pyx_f_6bzrlib_21_btree_serializer_pyx_17GCCHKSHA1LeafNode__parse_bytes;
+ *(void(**)(void))&__pyx_vtable_6bzrlib_21_btree_serializer_pyx_GCCHKSHA1LeafNode._parse_one_entry = (void(*)(void))__pyx_f_6bzrlib_21_btree_serializer_pyx_17GCCHKSHA1LeafNode__parse_one_entry;
+ *(void(**)(void))&__pyx_vtable_6bzrlib_21_btree_serializer_pyx_GCCHKSHA1LeafNode._offset_for_sha1 = (void(*)(void))__pyx_f_6bzrlib_21_btree_serializer_pyx_17GCCHKSHA1LeafNode__offset_for_sha1;
+ *(void(**)(void))&__pyx_vtable_6bzrlib_21_btree_serializer_pyx_GCCHKSHA1LeafNode._compute_common = (void(*)(void))__pyx_f_6bzrlib_21_btree_serializer_pyx_17GCCHKSHA1LeafNode__compute_common;
+ #endif
+ if (PyType_Ready(&__pyx_type_6bzrlib_21_btree_serializer_pyx_GCCHKSHA1LeafNode) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 518; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ if (__Pyx_SetVtable(__pyx_type_6bzrlib_21_btree_serializer_pyx_GCCHKSHA1LeafNode.tp_dict, __pyx_vtabptr_6bzrlib_21_btree_serializer_pyx_GCCHKSHA1LeafNode) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 518; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ if (__Pyx_SetAttrString(__pyx_m, "GCCHKSHA1LeafNode", (PyObject *)&__pyx_type_6bzrlib_21_btree_serializer_pyx_GCCHKSHA1LeafNode) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 518; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_ptype_6bzrlib_21_btree_serializer_pyx_GCCHKSHA1LeafNode = &__pyx_type_6bzrlib_21_btree_serializer_pyx_GCCHKSHA1LeafNode;
+ /*--- Type import code ---*/
+ __pyx_ptype_6bzrlib_15_static_tuple_c_StaticTuple = __Pyx_ImportType("bzrlib._static_tuple_c", "StaticTuple", sizeof(StaticTuple), 0); if (unlikely(!__pyx_ptype_6bzrlib_15_static_tuple_c_StaticTuple)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 26; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ /*--- Function import code ---*/
+ /*--- Execution code ---*/
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":77
+ * # have to worry about exception checking.
+ * ## extern cdef class StaticTuple
+ * import sys # <<<<<<<<<<<<<<
+ *
+ *
+ */
+ __pyx_t_1 = __Pyx_Import(((PyObject *)__pyx_n_s__sys), 0); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 77; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_1);
+ if (PyObject_SetAttr(__pyx_m, __pyx_n_s__sys, __pyx_t_1) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 77; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":120
+ *
+ * # This sets up the StaticTuple C_API functionality
+ * import_static_tuple_c() # <<<<<<<<<<<<<<
+ *
+ *
+ */
+ __pyx_t_2 = import_static_tuple_c(); if (unlikely(__pyx_t_2 == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 120; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":347
+ * cdef int _unhexbuf[256]
+ * cdef char *_hexbuf
+ * _hexbuf = '0123456789abcdef' # <<<<<<<<<<<<<<
+ *
+ * cdef _populate_unhexbuf():
+ */
+ __pyx_v_6bzrlib_21_btree_serializer_pyx__hexbuf = __pyx_k__0123456789abcdef;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":359
+ * for i from 10 <= i < 16: # ABCDEF => 10, 11, 12, 13, 14, 15, 16
+ * _unhexbuf[(i - 10 + c'A')] = i
+ * _populate_unhexbuf() # <<<<<<<<<<<<<<
+ *
+ *
+ */
+ __pyx_t_1 = __pyx_f_6bzrlib_21_btree_serializer_pyx__populate_unhexbuf(); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 359; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_1);
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_btree_serializer_pyx.pyx":1
+ * # Copyright (C) 2008, 2009, 2010 Canonical Ltd # <<<<<<<<<<<<<<
+ * #
+ * # This program is free software; you can redistribute it and/or modify
+ */
+ __pyx_t_1 = PyDict_New(); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(((PyObject *)__pyx_t_1));
+ __pyx_t_3 = PyObject_GetAttr(__pyx_m, __pyx_n_s___py_unhexlify); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_3);
+ __pyx_t_4 = __Pyx_GetAttrString(__pyx_t_3, "__doc__"); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_4);
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+ if (PyDict_SetItem(__pyx_t_1, ((PyObject *)__pyx_kp_u_38), __pyx_t_4) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
+ __pyx_t_4 = PyObject_GetAttr(__pyx_m, __pyx_n_s___py_hexlify); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_4);
+ __pyx_t_3 = __Pyx_GetAttrString(__pyx_t_4, "__doc__"); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_3);
+ __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
+ if (PyDict_SetItem(__pyx_t_1, ((PyObject *)__pyx_kp_u_39), __pyx_t_3) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+ __pyx_t_3 = PyObject_GetAttr(__pyx_m, __pyx_n_s___py_key_to_sha1); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_3);
+ __pyx_t_4 = __Pyx_GetAttrString(__pyx_t_3, "__doc__"); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_4);
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+ if (PyDict_SetItem(__pyx_t_1, ((PyObject *)__pyx_kp_u_40), __pyx_t_4) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
+ __pyx_t_4 = PyObject_GetAttr(__pyx_m, __pyx_n_s___py_sha1_to_key); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_4);
+ __pyx_t_3 = __Pyx_GetAttrString(__pyx_t_4, "__doc__"); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_3);
+ __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
+ if (PyDict_SetItem(__pyx_t_1, ((PyObject *)__pyx_kp_u_41), __pyx_t_3) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+ __pyx_t_3 = PyObject_GetAttr(__pyx_m, __pyx_n_s___parse_into_chk); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_3);
+ __pyx_t_4 = __Pyx_GetAttrString(__pyx_t_3, "__doc__"); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_4);
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+ if (PyDict_SetItem(__pyx_t_1, ((PyObject *)__pyx_kp_u_42), __pyx_t_4) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
+ __pyx_t_4 = PyObject_GetAttr(__pyx_m, __pyx_n_s___flatten_node); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_4);
+ __pyx_t_3 = __Pyx_GetAttrString(__pyx_t_4, "__doc__"); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_3);
+ __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
+ if (PyDict_SetItem(__pyx_t_1, ((PyObject *)__pyx_kp_u_43), __pyx_t_3) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+ if (PyObject_SetAttr(__pyx_m, __pyx_n_s____test__, ((PyObject *)__pyx_t_1)) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_DECREF(((PyObject *)__pyx_t_1)); __pyx_t_1 = 0;
+ goto __pyx_L0;
+ __pyx_L1_error:;
+ __Pyx_XDECREF(__pyx_t_1);
+ __Pyx_XDECREF(__pyx_t_3);
+ __Pyx_XDECREF(__pyx_t_4);
+ if (__pyx_m) {
+ __Pyx_AddTraceback("init bzrlib._btree_serializer_pyx");
+ Py_DECREF(__pyx_m); __pyx_m = 0;
+ } else if (!PyErr_Occurred()) {
+ PyErr_SetString(PyExc_ImportError, "init bzrlib._btree_serializer_pyx");
+ }
+ __pyx_L0:;
+ __Pyx_RefNannyFinishContext();
+ #if PY_MAJOR_VERSION < 3
+ return;
+ #else
+ return __pyx_m;
+ #endif
+}
+
+/* Runtime support code */
+
+static PyObject *__Pyx_GetName(PyObject *dict, PyObject *name) {
+ PyObject *result;
+ result = PyObject_GetAttr(dict, name);
+ if (!result)
+ PyErr_SetObject(PyExc_NameError, name);
+ return result;
+}
+
+static void __Pyx_RaiseDoubleKeywordsError(
+ const char* func_name,
+ PyObject* kw_name)
+{
+ PyErr_Format(PyExc_TypeError,
+ #if PY_MAJOR_VERSION >= 3
+ "%s() got multiple values for keyword argument '%U'", func_name, kw_name);
+ #else
+ "%s() got multiple values for keyword argument '%s'", func_name,
+ PyString_AS_STRING(kw_name));
+ #endif
+}
+
+static void __Pyx_RaiseArgtupleInvalid(
+ const char* func_name,
+ int exact,
+ Py_ssize_t num_min,
+ Py_ssize_t num_max,
+ Py_ssize_t num_found)
+{
+ Py_ssize_t num_expected;
+ const char *number, *more_or_less;
+
+ if (num_found < num_min) {
+ num_expected = num_min;
+ more_or_less = "at least";
+ } else {
+ num_expected = num_max;
+ more_or_less = "at most";
+ }
+ if (exact) {
+ more_or_less = "exactly";
+ }
+ number = (num_expected == 1) ? "" : "s";
+ PyErr_Format(PyExc_TypeError,
+ #if PY_VERSION_HEX < 0x02050000
+ "%s() takes %s %d positional argument%s (%d given)",
+ #else
+ "%s() takes %s %zd positional argument%s (%zd given)",
+ #endif
+ func_name, more_or_less, num_expected, number, num_found);
+}
+
+static int __Pyx_ParseOptionalKeywords(
+ PyObject *kwds,
+ PyObject **argnames[],
+ PyObject *kwds2,
+ PyObject *values[],
+ Py_ssize_t num_pos_args,
+ const char* function_name)
+{
+ PyObject *key = 0, *value = 0;
+ Py_ssize_t pos = 0;
+ PyObject*** name;
+ PyObject*** first_kw_arg = argnames + num_pos_args;
+
+ while (PyDict_Next(kwds, &pos, &key, &value)) {
+ name = first_kw_arg;
+ while (*name && (**name != key)) name++;
+ if (*name) {
+ values[name-argnames] = value;
+ } else {
+ #if PY_MAJOR_VERSION < 3
+ if (unlikely(!PyString_CheckExact(key)) && unlikely(!PyString_Check(key))) {
+ #else
+ if (unlikely(!PyUnicode_CheckExact(key)) && unlikely(!PyUnicode_Check(key))) {
+ #endif
+ goto invalid_keyword_type;
+ } else {
+ for (name = first_kw_arg; *name; name++) {
+ #if PY_MAJOR_VERSION >= 3
+ if (PyUnicode_GET_SIZE(**name) == PyUnicode_GET_SIZE(key) &&
+ PyUnicode_Compare(**name, key) == 0) break;
+ #else
+ if (PyString_GET_SIZE(**name) == PyString_GET_SIZE(key) &&
+ _PyString_Eq(**name, key)) break;
+ #endif
+ }
+ if (*name) {
+ values[name-argnames] = value;
+ } else {
+ /* unexpected keyword found */
+ for (name=argnames; name != first_kw_arg; name++) {
+ if (**name == key) goto arg_passed_twice;
+ #if PY_MAJOR_VERSION >= 3
+ if (PyUnicode_GET_SIZE(**name) == PyUnicode_GET_SIZE(key) &&
+ PyUnicode_Compare(**name, key) == 0) goto arg_passed_twice;
+ #else
+ if (PyString_GET_SIZE(**name) == PyString_GET_SIZE(key) &&
+ _PyString_Eq(**name, key)) goto arg_passed_twice;
+ #endif
+ }
+ if (kwds2) {
+ if (unlikely(PyDict_SetItem(kwds2, key, value))) goto bad;
+ } else {
+ goto invalid_keyword;
+ }
+ }
+ }
+ }
+ }
+ return 0;
+arg_passed_twice:
+ __Pyx_RaiseDoubleKeywordsError(function_name, **name);
+ goto bad;
+invalid_keyword_type:
+ PyErr_Format(PyExc_TypeError,
+ "%s() keywords must be strings", function_name);
+ goto bad;
+invalid_keyword:
+ PyErr_Format(PyExc_TypeError,
+ #if PY_MAJOR_VERSION < 3
+ "%s() got an unexpected keyword argument '%s'",
+ function_name, PyString_AsString(key));
+ #else
+ "%s() got an unexpected keyword argument '%U'",
+ function_name, key);
+ #endif
+bad:
+ return -1;
+}
+
+static CYTHON_INLINE int __Pyx_TypeTest(PyObject *obj, PyTypeObject *type) {
+ if (unlikely(!type)) {
+ PyErr_Format(PyExc_SystemError, "Missing type object");
+ return 0;
+ }
+ if (likely(PyObject_TypeCheck(obj, type)))
+ return 1;
+ PyErr_Format(PyExc_TypeError, "Cannot convert %.200s to %.200s",
+ Py_TYPE(obj)->tp_name, type->tp_name);
+ return 0;
+}
+
+static CYTHON_INLINE long __Pyx_div_long(long a, long b) {
+ long q = a / b;
+ long r = a - q*b;
+ q -= ((r != 0) & ((r ^ b) < 0));
+ return q;
+}
+
+
+static PyObject *__Pyx_Import(PyObject *name, PyObject *from_list) {
+ PyObject *py_import = 0;
+ PyObject *empty_list = 0;
+ PyObject *module = 0;
+ PyObject *global_dict = 0;
+ PyObject *empty_dict = 0;
+ PyObject *list;
+ py_import = __Pyx_GetAttrString(__pyx_b, "__import__");
+ if (!py_import)
+ goto bad;
+ if (from_list)
+ list = from_list;
+ else {
+ empty_list = PyList_New(0);
+ if (!empty_list)
+ goto bad;
+ list = empty_list;
+ }
+ global_dict = PyModule_GetDict(__pyx_m);
+ if (!global_dict)
+ goto bad;
+ empty_dict = PyDict_New();
+ if (!empty_dict)
+ goto bad;
+ module = PyObject_CallFunctionObjArgs(py_import,
+ name, global_dict, empty_dict, list, NULL);
+bad:
+ Py_XDECREF(empty_list);
+ Py_XDECREF(py_import);
+ Py_XDECREF(empty_dict);
+ return module;
+}
+
+static CYTHON_INLINE void __Pyx_ErrRestore(PyObject *type, PyObject *value, PyObject *tb) {
+ PyObject *tmp_type, *tmp_value, *tmp_tb;
+ PyThreadState *tstate = PyThreadState_GET();
+
+ tmp_type = tstate->curexc_type;
+ tmp_value = tstate->curexc_value;
+ tmp_tb = tstate->curexc_traceback;
+ tstate->curexc_type = type;
+ tstate->curexc_value = value;
+ tstate->curexc_traceback = tb;
+ Py_XDECREF(tmp_type);
+ Py_XDECREF(tmp_value);
+ Py_XDECREF(tmp_tb);
+}
+
+static CYTHON_INLINE void __Pyx_ErrFetch(PyObject **type, PyObject **value, PyObject **tb) {
+ PyThreadState *tstate = PyThreadState_GET();
+ *type = tstate->curexc_type;
+ *value = tstate->curexc_value;
+ *tb = tstate->curexc_traceback;
+
+ tstate->curexc_type = 0;
+ tstate->curexc_value = 0;
+ tstate->curexc_traceback = 0;
+}
+
+
+#if PY_MAJOR_VERSION < 3
+static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb) {
+ Py_XINCREF(type);
+ Py_XINCREF(value);
+ Py_XINCREF(tb);
+ /* First, check the traceback argument, replacing None with NULL. */
+ if (tb == Py_None) {
+ Py_DECREF(tb);
+ tb = 0;
+ }
+ else if (tb != NULL && !PyTraceBack_Check(tb)) {
+ PyErr_SetString(PyExc_TypeError,
+ "raise: arg 3 must be a traceback or None");
+ goto raise_error;
+ }
+ /* Next, replace a missing value with None */
+ if (value == NULL) {
+ value = Py_None;
+ Py_INCREF(value);
+ }
+ #if PY_VERSION_HEX < 0x02050000
+ if (!PyClass_Check(type))
+ #else
+ if (!PyType_Check(type))
+ #endif
+ {
+ /* Raising an instance. The value should be a dummy. */
+ if (value != Py_None) {
+ PyErr_SetString(PyExc_TypeError,
+ "instance exception may not have a separate value");
+ goto raise_error;
+ }
+ /* Normalize to raise <class>, <instance> */
+ Py_DECREF(value);
+ value = type;
+ #if PY_VERSION_HEX < 0x02050000
+ if (PyInstance_Check(type)) {
+ type = (PyObject*) ((PyInstanceObject*)type)->in_class;
+ Py_INCREF(type);
+ }
+ else {
+ type = 0;
+ PyErr_SetString(PyExc_TypeError,
+ "raise: exception must be an old-style class or instance");
+ goto raise_error;
+ }
+ #else
+ type = (PyObject*) Py_TYPE(type);
+ Py_INCREF(type);
+ if (!PyType_IsSubtype((PyTypeObject *)type, (PyTypeObject *)PyExc_BaseException)) {
+ PyErr_SetString(PyExc_TypeError,
+ "raise: exception class must be a subclass of BaseException");
+ goto raise_error;
+ }
+ #endif
+ }
+
+ __Pyx_ErrRestore(type, value, tb);
+ return;
+raise_error:
+ Py_XDECREF(value);
+ Py_XDECREF(type);
+ Py_XDECREF(tb);
+ return;
+}
+
+#else /* Python 3+ */
+
+static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb) {
+ if (tb == Py_None) {
+ tb = 0;
+ } else if (tb && !PyTraceBack_Check(tb)) {
+ PyErr_SetString(PyExc_TypeError,
+ "raise: arg 3 must be a traceback or None");
+ goto bad;
+ }
+ if (value == Py_None)
+ value = 0;
+
+ if (PyExceptionInstance_Check(type)) {
+ if (value) {
+ PyErr_SetString(PyExc_TypeError,
+ "instance exception may not have a separate value");
+ goto bad;
+ }
+ value = type;
+ type = (PyObject*) Py_TYPE(value);
+ } else if (!PyExceptionClass_Check(type)) {
+ PyErr_SetString(PyExc_TypeError,
+ "raise: exception class must be a subclass of BaseException");
+ goto bad;
+ }
+
+ PyErr_SetObject(type, value);
+
+ if (tb) {
+ PyThreadState *tstate = PyThreadState_GET();
+ PyObject* tmp_tb = tstate->curexc_traceback;
+ if (tb != tmp_tb) {
+ Py_INCREF(tb);
+ tstate->curexc_traceback = tb;
+ Py_XDECREF(tmp_tb);
+ }
+ }
+
+bad:
+ return;
+}
+#endif
+
+static CYTHON_INLINE unsigned char __Pyx_PyInt_AsUnsignedChar(PyObject* x) {
+ const unsigned char neg_one = (unsigned char)-1, const_zero = 0;
+ const int is_unsigned = neg_one > const_zero;
+ if (sizeof(unsigned char) < sizeof(long)) {
+ long val = __Pyx_PyInt_AsLong(x);
+ if (unlikely(val != (long)(unsigned char)val)) {
+ if (!unlikely(val == -1 && PyErr_Occurred())) {
+ PyErr_SetString(PyExc_OverflowError,
+ (is_unsigned && unlikely(val < 0)) ?
+ "can't convert negative value to unsigned char" :
+ "value too large to convert to unsigned char");
+ }
+ return (unsigned char)-1;
+ }
+ return (unsigned char)val;
+ }
+ return (unsigned char)__Pyx_PyInt_AsUnsignedLong(x);
+}
+
+static CYTHON_INLINE unsigned short __Pyx_PyInt_AsUnsignedShort(PyObject* x) {
+ const unsigned short neg_one = (unsigned short)-1, const_zero = 0;
+ const int is_unsigned = neg_one > const_zero;
+ if (sizeof(unsigned short) < sizeof(long)) {
+ long val = __Pyx_PyInt_AsLong(x);
+ if (unlikely(val != (long)(unsigned short)val)) {
+ if (!unlikely(val == -1 && PyErr_Occurred())) {
+ PyErr_SetString(PyExc_OverflowError,
+ (is_unsigned && unlikely(val < 0)) ?
+ "can't convert negative value to unsigned short" :
+ "value too large to convert to unsigned short");
+ }
+ return (unsigned short)-1;
+ }
+ return (unsigned short)val;
+ }
+ return (unsigned short)__Pyx_PyInt_AsUnsignedLong(x);
+}
+
+static CYTHON_INLINE unsigned int __Pyx_PyInt_AsUnsignedInt(PyObject* x) {
+ const unsigned int neg_one = (unsigned int)-1, const_zero = 0;
+ const int is_unsigned = neg_one > const_zero;
+ if (sizeof(unsigned int) < sizeof(long)) {
+ long val = __Pyx_PyInt_AsLong(x);
+ if (unlikely(val != (long)(unsigned int)val)) {
+ if (!unlikely(val == -1 && PyErr_Occurred())) {
+ PyErr_SetString(PyExc_OverflowError,
+ (is_unsigned && unlikely(val < 0)) ?
+ "can't convert negative value to unsigned int" :
+ "value too large to convert to unsigned int");
+ }
+ return (unsigned int)-1;
+ }
+ return (unsigned int)val;
+ }
+ return (unsigned int)__Pyx_PyInt_AsUnsignedLong(x);
+}
+
+static CYTHON_INLINE char __Pyx_PyInt_AsChar(PyObject* x) {
+ const char neg_one = (char)-1, const_zero = 0;
+ const int is_unsigned = neg_one > const_zero;
+ if (sizeof(char) < sizeof(long)) {
+ long val = __Pyx_PyInt_AsLong(x);
+ if (unlikely(val != (long)(char)val)) {
+ if (!unlikely(val == -1 && PyErr_Occurred())) {
+ PyErr_SetString(PyExc_OverflowError,
+ (is_unsigned && unlikely(val < 0)) ?
+ "can't convert negative value to char" :
+ "value too large to convert to char");
+ }
+ return (char)-1;
+ }
+ return (char)val;
+ }
+ return (char)__Pyx_PyInt_AsLong(x);
+}
+
+static CYTHON_INLINE short __Pyx_PyInt_AsShort(PyObject* x) {
+ const short neg_one = (short)-1, const_zero = 0;
+ const int is_unsigned = neg_one > const_zero;
+ if (sizeof(short) < sizeof(long)) {
+ long val = __Pyx_PyInt_AsLong(x);
+ if (unlikely(val != (long)(short)val)) {
+ if (!unlikely(val == -1 && PyErr_Occurred())) {
+ PyErr_SetString(PyExc_OverflowError,
+ (is_unsigned && unlikely(val < 0)) ?
+ "can't convert negative value to short" :
+ "value too large to convert to short");
+ }
+ return (short)-1;
+ }
+ return (short)val;
+ }
+ return (short)__Pyx_PyInt_AsLong(x);
+}
+
+static CYTHON_INLINE int __Pyx_PyInt_AsInt(PyObject* x) {
+ const int neg_one = (int)-1, const_zero = 0;
+ const int is_unsigned = neg_one > const_zero;
+ if (sizeof(int) < sizeof(long)) {
+ long val = __Pyx_PyInt_AsLong(x);
+ if (unlikely(val != (long)(int)val)) {
+ if (!unlikely(val == -1 && PyErr_Occurred())) {
+ PyErr_SetString(PyExc_OverflowError,
+ (is_unsigned && unlikely(val < 0)) ?
+ "can't convert negative value to int" :
+ "value too large to convert to int");
+ }
+ return (int)-1;
+ }
+ return (int)val;
+ }
+ return (int)__Pyx_PyInt_AsLong(x);
+}
+
+static CYTHON_INLINE signed char __Pyx_PyInt_AsSignedChar(PyObject* x) {
+ const signed char neg_one = (signed char)-1, const_zero = 0;
+ const int is_unsigned = neg_one > const_zero;
+ if (sizeof(signed char) < sizeof(long)) {
+ long val = __Pyx_PyInt_AsLong(x);
+ if (unlikely(val != (long)(signed char)val)) {
+ if (!unlikely(val == -1 && PyErr_Occurred())) {
+ PyErr_SetString(PyExc_OverflowError,
+ (is_unsigned && unlikely(val < 0)) ?
+ "can't convert negative value to signed char" :
+ "value too large to convert to signed char");
+ }
+ return (signed char)-1;
+ }
+ return (signed char)val;
+ }
+ return (signed char)__Pyx_PyInt_AsSignedLong(x);
+}
+
+static CYTHON_INLINE signed short __Pyx_PyInt_AsSignedShort(PyObject* x) {
+ const signed short neg_one = (signed short)-1, const_zero = 0;
+ const int is_unsigned = neg_one > const_zero;
+ if (sizeof(signed short) < sizeof(long)) {
+ long val = __Pyx_PyInt_AsLong(x);
+ if (unlikely(val != (long)(signed short)val)) {
+ if (!unlikely(val == -1 && PyErr_Occurred())) {
+ PyErr_SetString(PyExc_OverflowError,
+ (is_unsigned && unlikely(val < 0)) ?
+ "can't convert negative value to signed short" :
+ "value too large to convert to signed short");
+ }
+ return (signed short)-1;
+ }
+ return (signed short)val;
+ }
+ return (signed short)__Pyx_PyInt_AsSignedLong(x);
+}
+
+static CYTHON_INLINE signed int __Pyx_PyInt_AsSignedInt(PyObject* x) {
+ const signed int neg_one = (signed int)-1, const_zero = 0;
+ const int is_unsigned = neg_one > const_zero;
+ if (sizeof(signed int) < sizeof(long)) {
+ long val = __Pyx_PyInt_AsLong(x);
+ if (unlikely(val != (long)(signed int)val)) {
+ if (!unlikely(val == -1 && PyErr_Occurred())) {
+ PyErr_SetString(PyExc_OverflowError,
+ (is_unsigned && unlikely(val < 0)) ?
+ "can't convert negative value to signed int" :
+ "value too large to convert to signed int");
+ }
+ return (signed int)-1;
+ }
+ return (signed int)val;
+ }
+ return (signed int)__Pyx_PyInt_AsSignedLong(x);
+}
+
+static CYTHON_INLINE int __Pyx_PyInt_AsLongDouble(PyObject* x) {
+ const int neg_one = (int)-1, const_zero = 0;
+ const int is_unsigned = neg_one > const_zero;
+ if (sizeof(int) < sizeof(long)) {
+ long val = __Pyx_PyInt_AsLong(x);
+ if (unlikely(val != (long)(int)val)) {
+ if (!unlikely(val == -1 && PyErr_Occurred())) {
+ PyErr_SetString(PyExc_OverflowError,
+ (is_unsigned && unlikely(val < 0)) ?
+ "can't convert negative value to int" :
+ "value too large to convert to int");
+ }
+ return (int)-1;
+ }
+ return (int)val;
+ }
+ return (int)__Pyx_PyInt_AsLong(x);
+}
+
+static CYTHON_INLINE unsigned long __Pyx_PyInt_AsUnsignedLong(PyObject* x) {
+ const unsigned long neg_one = (unsigned long)-1, const_zero = 0;
+ const int is_unsigned = neg_one > const_zero;
+#if PY_VERSION_HEX < 0x03000000
+ if (likely(PyInt_Check(x))) {
+ long val = PyInt_AS_LONG(x);
+ if (is_unsigned && unlikely(val < 0)) {
+ PyErr_SetString(PyExc_OverflowError,
+ "can't convert negative value to unsigned long");
+ return (unsigned long)-1;
+ }
+ return (unsigned long)val;
+ } else
+#endif
+ if (likely(PyLong_Check(x))) {
+ if (is_unsigned) {
+ if (unlikely(Py_SIZE(x) < 0)) {
+ PyErr_SetString(PyExc_OverflowError,
+ "can't convert negative value to unsigned long");
+ return (unsigned long)-1;
+ }
+ return PyLong_AsUnsignedLong(x);
+ } else {
+ return PyLong_AsLong(x);
+ }
+ } else {
+ unsigned long val;
+ PyObject *tmp = __Pyx_PyNumber_Int(x);
+ if (!tmp) return (unsigned long)-1;
+ val = __Pyx_PyInt_AsUnsignedLong(tmp);
+ Py_DECREF(tmp);
+ return val;
+ }
+}
+
+static CYTHON_INLINE unsigned PY_LONG_LONG __Pyx_PyInt_AsUnsignedLongLong(PyObject* x) {
+ const unsigned PY_LONG_LONG neg_one = (unsigned PY_LONG_LONG)-1, const_zero = 0;
+ const int is_unsigned = neg_one > const_zero;
+#if PY_VERSION_HEX < 0x03000000
+ if (likely(PyInt_Check(x))) {
+ long val = PyInt_AS_LONG(x);
+ if (is_unsigned && unlikely(val < 0)) {
+ PyErr_SetString(PyExc_OverflowError,
+ "can't convert negative value to unsigned PY_LONG_LONG");
+ return (unsigned PY_LONG_LONG)-1;
+ }
+ return (unsigned PY_LONG_LONG)val;
+ } else
+#endif
+ if (likely(PyLong_Check(x))) {
+ if (is_unsigned) {
+ if (unlikely(Py_SIZE(x) < 0)) {
+ PyErr_SetString(PyExc_OverflowError,
+ "can't convert negative value to unsigned PY_LONG_LONG");
+ return (unsigned PY_LONG_LONG)-1;
+ }
+ return PyLong_AsUnsignedLongLong(x);
+ } else {
+ return PyLong_AsLongLong(x);
+ }
+ } else {
+ unsigned PY_LONG_LONG val;
+ PyObject *tmp = __Pyx_PyNumber_Int(x);
+ if (!tmp) return (unsigned PY_LONG_LONG)-1;
+ val = __Pyx_PyInt_AsUnsignedLongLong(tmp);
+ Py_DECREF(tmp);
+ return val;
+ }
+}
+
+static CYTHON_INLINE long __Pyx_PyInt_AsLong(PyObject* x) {
+ const long neg_one = (long)-1, const_zero = 0;
+ const int is_unsigned = neg_one > const_zero;
+#if PY_VERSION_HEX < 0x03000000
+ if (likely(PyInt_Check(x))) {
+ long val = PyInt_AS_LONG(x);
+ if (is_unsigned && unlikely(val < 0)) {
+ PyErr_SetString(PyExc_OverflowError,
+ "can't convert negative value to long");
+ return (long)-1;
+ }
+ return (long)val;
+ } else
+#endif
+ if (likely(PyLong_Check(x))) {
+ if (is_unsigned) {
+ if (unlikely(Py_SIZE(x) < 0)) {
+ PyErr_SetString(PyExc_OverflowError,
+ "can't convert negative value to long");
+ return (long)-1;
+ }
+ return PyLong_AsUnsignedLong(x);
+ } else {
+ return PyLong_AsLong(x);
+ }
+ } else {
+ long val;
+ PyObject *tmp = __Pyx_PyNumber_Int(x);
+ if (!tmp) return (long)-1;
+ val = __Pyx_PyInt_AsLong(tmp);
+ Py_DECREF(tmp);
+ return val;
+ }
+}
+
+static CYTHON_INLINE PY_LONG_LONG __Pyx_PyInt_AsLongLong(PyObject* x) {
+ const PY_LONG_LONG neg_one = (PY_LONG_LONG)-1, const_zero = 0;
+ const int is_unsigned = neg_one > const_zero;
+#if PY_VERSION_HEX < 0x03000000
+ if (likely(PyInt_Check(x))) {
+ long val = PyInt_AS_LONG(x);
+ if (is_unsigned && unlikely(val < 0)) {
+ PyErr_SetString(PyExc_OverflowError,
+ "can't convert negative value to PY_LONG_LONG");
+ return (PY_LONG_LONG)-1;
+ }
+ return (PY_LONG_LONG)val;
+ } else
+#endif
+ if (likely(PyLong_Check(x))) {
+ if (is_unsigned) {
+ if (unlikely(Py_SIZE(x) < 0)) {
+ PyErr_SetString(PyExc_OverflowError,
+ "can't convert negative value to PY_LONG_LONG");
+ return (PY_LONG_LONG)-1;
+ }
+ return PyLong_AsUnsignedLongLong(x);
+ } else {
+ return PyLong_AsLongLong(x);
+ }
+ } else {
+ PY_LONG_LONG val;
+ PyObject *tmp = __Pyx_PyNumber_Int(x);
+ if (!tmp) return (PY_LONG_LONG)-1;
+ val = __Pyx_PyInt_AsLongLong(tmp);
+ Py_DECREF(tmp);
+ return val;
+ }
+}
+
+static CYTHON_INLINE signed long __Pyx_PyInt_AsSignedLong(PyObject* x) {
+ const signed long neg_one = (signed long)-1, const_zero = 0;
+ const int is_unsigned = neg_one > const_zero;
+#if PY_VERSION_HEX < 0x03000000
+ if (likely(PyInt_Check(x))) {
+ long val = PyInt_AS_LONG(x);
+ if (is_unsigned && unlikely(val < 0)) {
+ PyErr_SetString(PyExc_OverflowError,
+ "can't convert negative value to signed long");
+ return (signed long)-1;
+ }
+ return (signed long)val;
+ } else
+#endif
+ if (likely(PyLong_Check(x))) {
+ if (is_unsigned) {
+ if (unlikely(Py_SIZE(x) < 0)) {
+ PyErr_SetString(PyExc_OverflowError,
+ "can't convert negative value to signed long");
+ return (signed long)-1;
+ }
+ return PyLong_AsUnsignedLong(x);
+ } else {
+ return PyLong_AsLong(x);
+ }
+ } else {
+ signed long val;
+ PyObject *tmp = __Pyx_PyNumber_Int(x);
+ if (!tmp) return (signed long)-1;
+ val = __Pyx_PyInt_AsSignedLong(tmp);
+ Py_DECREF(tmp);
+ return val;
+ }
+}
+
+static CYTHON_INLINE signed PY_LONG_LONG __Pyx_PyInt_AsSignedLongLong(PyObject* x) {
+ const signed PY_LONG_LONG neg_one = (signed PY_LONG_LONG)-1, const_zero = 0;
+ const int is_unsigned = neg_one > const_zero;
+#if PY_VERSION_HEX < 0x03000000
+ if (likely(PyInt_Check(x))) {
+ long val = PyInt_AS_LONG(x);
+ if (is_unsigned && unlikely(val < 0)) {
+ PyErr_SetString(PyExc_OverflowError,
+ "can't convert negative value to signed PY_LONG_LONG");
+ return (signed PY_LONG_LONG)-1;
+ }
+ return (signed PY_LONG_LONG)val;
+ } else
+#endif
+ if (likely(PyLong_Check(x))) {
+ if (is_unsigned) {
+ if (unlikely(Py_SIZE(x) < 0)) {
+ PyErr_SetString(PyExc_OverflowError,
+ "can't convert negative value to signed PY_LONG_LONG");
+ return (signed PY_LONG_LONG)-1;
+ }
+ return PyLong_AsUnsignedLongLong(x);
+ } else {
+ return PyLong_AsLongLong(x);
+ }
+ } else {
+ signed PY_LONG_LONG val;
+ PyObject *tmp = __Pyx_PyNumber_Int(x);
+ if (!tmp) return (signed PY_LONG_LONG)-1;
+ val = __Pyx_PyInt_AsSignedLongLong(tmp);
+ Py_DECREF(tmp);
+ return val;
+ }
+}
+
+static void __Pyx_WriteUnraisable(const char *name) {
+ PyObject *old_exc, *old_val, *old_tb;
+ PyObject *ctx;
+ __Pyx_ErrFetch(&old_exc, &old_val, &old_tb);
+ #if PY_MAJOR_VERSION < 3
+ ctx = PyString_FromString(name);
+ #else
+ ctx = PyUnicode_FromString(name);
+ #endif
+ __Pyx_ErrRestore(old_exc, old_val, old_tb);
+ if (!ctx) {
+ PyErr_WriteUnraisable(Py_None);
+ } else {
+ PyErr_WriteUnraisable(ctx);
+ Py_DECREF(ctx);
+ }
+}
+
+static int __Pyx_SetVtable(PyObject *dict, void *vtable) {
+#if PY_VERSION_HEX >= 0x02070000 && !(PY_MAJOR_VERSION==3&&PY_MINOR_VERSION==0)
+ PyObject *ob = PyCapsule_New(vtable, 0, 0);
+#else
+ PyObject *ob = PyCObject_FromVoidPtr(vtable, 0);
+#endif
+ if (!ob)
+ goto bad;
+ if (PyDict_SetItemString(dict, "__pyx_vtable__", ob) < 0)
+ goto bad;
+ Py_DECREF(ob);
+ return 0;
+bad:
+ Py_XDECREF(ob);
+ return -1;
+}
+
+#ifndef __PYX_HAVE_RT_ImportType
+#define __PYX_HAVE_RT_ImportType
+static PyTypeObject *__Pyx_ImportType(const char *module_name, const char *class_name,
+ long size, int strict)
+{
+ PyObject *py_module = 0;
+ PyObject *result = 0;
+ PyObject *py_name = 0;
+ char warning[200];
+
+ py_module = __Pyx_ImportModule(module_name);
+ if (!py_module)
+ goto bad;
+ #if PY_MAJOR_VERSION < 3
+ py_name = PyString_FromString(class_name);
+ #else
+ py_name = PyUnicode_FromString(class_name);
+ #endif
+ if (!py_name)
+ goto bad;
+ result = PyObject_GetAttr(py_module, py_name);
+ Py_DECREF(py_name);
+ py_name = 0;
+ Py_DECREF(py_module);
+ py_module = 0;
+ if (!result)
+ goto bad;
+ if (!PyType_Check(result)) {
+ PyErr_Format(PyExc_TypeError,
+ "%s.%s is not a type object",
+ module_name, class_name);
+ goto bad;
+ }
+ if (!strict && ((PyTypeObject *)result)->tp_basicsize > size) {
+ PyOS_snprintf(warning, sizeof(warning),
+ "%s.%s size changed, may indicate binary incompatibility",
+ module_name, class_name);
+ #if PY_VERSION_HEX < 0x02050000
+ PyErr_Warn(NULL, warning);
+ #else
+ PyErr_WarnEx(NULL, warning, 0);
+ #endif
+ }
+ else if (((PyTypeObject *)result)->tp_basicsize != size) {
+ PyErr_Format(PyExc_ValueError,
+ "%s.%s has the wrong size, try recompiling",
+ module_name, class_name);
+ goto bad;
+ }
+ return (PyTypeObject *)result;
+bad:
+ Py_XDECREF(py_module);
+ Py_XDECREF(result);
+ return 0;
+}
+#endif
+
+#ifndef __PYX_HAVE_RT_ImportModule
+#define __PYX_HAVE_RT_ImportModule
+static PyObject *__Pyx_ImportModule(const char *name) {
+ PyObject *py_name = 0;
+ PyObject *py_module = 0;
+
+ #if PY_MAJOR_VERSION < 3
+ py_name = PyString_FromString(name);
+ #else
+ py_name = PyUnicode_FromString(name);
+ #endif
+ if (!py_name)
+ goto bad;
+ py_module = PyImport_Import(py_name);
+ Py_DECREF(py_name);
+ return py_module;
+bad:
+ Py_XDECREF(py_name);
+ return 0;
+}
+#endif
+
+#include "compile.h"
+#include "frameobject.h"
+#include "traceback.h"
+
+static void __Pyx_AddTraceback(const char *funcname) {
+ PyObject *py_srcfile = 0;
+ PyObject *py_funcname = 0;
+ PyObject *py_globals = 0;
+ PyCodeObject *py_code = 0;
+ PyFrameObject *py_frame = 0;
+
+ #if PY_MAJOR_VERSION < 3
+ py_srcfile = PyString_FromString(__pyx_filename);
+ #else
+ py_srcfile = PyUnicode_FromString(__pyx_filename);
+ #endif
+ if (!py_srcfile) goto bad;
+ if (__pyx_clineno) {
+ #if PY_MAJOR_VERSION < 3
+ py_funcname = PyString_FromFormat( "%s (%s:%d)", funcname, __pyx_cfilenm, __pyx_clineno);
+ #else
+ py_funcname = PyUnicode_FromFormat( "%s (%s:%d)", funcname, __pyx_cfilenm, __pyx_clineno);
+ #endif
+ }
+ else {
+ #if PY_MAJOR_VERSION < 3
+ py_funcname = PyString_FromString(funcname);
+ #else
+ py_funcname = PyUnicode_FromString(funcname);
+ #endif
+ }
+ if (!py_funcname) goto bad;
+ py_globals = PyModule_GetDict(__pyx_m);
+ if (!py_globals) goto bad;
+ py_code = PyCode_New(
+ 0, /*int argcount,*/
+ #if PY_MAJOR_VERSION >= 3
+ 0, /*int kwonlyargcount,*/
+ #endif
+ 0, /*int nlocals,*/
+ 0, /*int stacksize,*/
+ 0, /*int flags,*/
+ __pyx_empty_bytes, /*PyObject *code,*/
+ __pyx_empty_tuple, /*PyObject *consts,*/
+ __pyx_empty_tuple, /*PyObject *names,*/
+ __pyx_empty_tuple, /*PyObject *varnames,*/
+ __pyx_empty_tuple, /*PyObject *freevars,*/
+ __pyx_empty_tuple, /*PyObject *cellvars,*/
+ py_srcfile, /*PyObject *filename,*/
+ py_funcname, /*PyObject *name,*/
+ __pyx_lineno, /*int firstlineno,*/
+ __pyx_empty_bytes /*PyObject *lnotab*/
+ );
+ if (!py_code) goto bad;
+ py_frame = PyFrame_New(
+ PyThreadState_GET(), /*PyThreadState *tstate,*/
+ py_code, /*PyCodeObject *code,*/
+ py_globals, /*PyObject *globals,*/
+ 0 /*PyObject *locals*/
+ );
+ if (!py_frame) goto bad;
+ py_frame->f_lineno = __pyx_lineno;
+ PyTraceBack_Here(py_frame);
+bad:
+ Py_XDECREF(py_srcfile);
+ Py_XDECREF(py_funcname);
+ Py_XDECREF(py_code);
+ Py_XDECREF(py_frame);
+}
+
+static int __Pyx_InitStrings(__Pyx_StringTabEntry *t) {
+ while (t->p) {
+ #if PY_MAJOR_VERSION < 3
+ if (t->is_unicode) {
+ *t->p = PyUnicode_DecodeUTF8(t->s, t->n - 1, NULL);
+ } else if (t->intern) {
+ *t->p = PyString_InternFromString(t->s);
+ } else {
+ *t->p = PyString_FromStringAndSize(t->s, t->n - 1);
+ }
+ #else /* Python 3+ has unicode identifiers */
+ if (t->is_unicode | t->is_str) {
+ if (t->intern) {
+ *t->p = PyUnicode_InternFromString(t->s);
+ } else if (t->encoding) {
+ *t->p = PyUnicode_Decode(t->s, t->n - 1, t->encoding, NULL);
+ } else {
+ *t->p = PyUnicode_FromStringAndSize(t->s, t->n - 1);
+ }
+ } else {
+ *t->p = PyBytes_FromStringAndSize(t->s, t->n - 1);
+ }
+ #endif
+ if (!*t->p)
+ return -1;
+ ++t;
+ }
+ return 0;
+}
+
+/* Type Conversion Functions */
+
+static CYTHON_INLINE int __Pyx_PyObject_IsTrue(PyObject* x) {
+ int is_true = x == Py_True;
+ if (is_true | (x == Py_False) | (x == Py_None)) return is_true;
+ else return PyObject_IsTrue(x);
+}
+
+static CYTHON_INLINE PyObject* __Pyx_PyNumber_Int(PyObject* x) {
+ PyNumberMethods *m;
+ const char *name = NULL;
+ PyObject *res = NULL;
+#if PY_VERSION_HEX < 0x03000000
+ if (PyInt_Check(x) || PyLong_Check(x))
+#else
+ if (PyLong_Check(x))
+#endif
+ return Py_INCREF(x), x;
+ m = Py_TYPE(x)->tp_as_number;
+#if PY_VERSION_HEX < 0x03000000
+ if (m && m->nb_int) {
+ name = "int";
+ res = PyNumber_Int(x);
+ }
+ else if (m && m->nb_long) {
+ name = "long";
+ res = PyNumber_Long(x);
+ }
+#else
+ if (m && m->nb_int) {
+ name = "int";
+ res = PyNumber_Long(x);
+ }
+#endif
+ if (res) {
+#if PY_VERSION_HEX < 0x03000000
+ if (!PyInt_Check(res) && !PyLong_Check(res)) {
+#else
+ if (!PyLong_Check(res)) {
+#endif
+ PyErr_Format(PyExc_TypeError,
+ "__%s__ returned non-%s (type %.200s)",
+ name, name, Py_TYPE(res)->tp_name);
+ Py_DECREF(res);
+ return NULL;
+ }
+ }
+ else if (!PyErr_Occurred()) {
+ PyErr_SetString(PyExc_TypeError,
+ "an integer is required");
+ }
+ return res;
+}
+
+static CYTHON_INLINE Py_ssize_t __Pyx_PyIndex_AsSsize_t(PyObject* b) {
+ Py_ssize_t ival;
+ PyObject* x = PyNumber_Index(b);
+ if (!x) return -1;
+ ival = PyInt_AsSsize_t(x);
+ Py_DECREF(x);
+ return ival;
+}
+
+static CYTHON_INLINE PyObject * __Pyx_PyInt_FromSize_t(size_t ival) {
+#if PY_VERSION_HEX < 0x02050000
+ if (ival <= LONG_MAX)
+ return PyInt_FromLong((long)ival);
+ else {
+ unsigned char *bytes = (unsigned char *) &ival;
+ int one = 1; int little = (int)*(unsigned char*)&one;
+ return _PyLong_FromByteArray(bytes, sizeof(size_t), little, 0);
+ }
+#else
+ return PyInt_FromSize_t(ival);
+#endif
+}
+
+static CYTHON_INLINE size_t __Pyx_PyInt_AsSize_t(PyObject* x) {
+ unsigned PY_LONG_LONG val = __Pyx_PyInt_AsUnsignedLongLong(x);
+ if (unlikely(val == (unsigned PY_LONG_LONG)-1 && PyErr_Occurred())) {
+ return (size_t)-1;
+ } else if (unlikely(val != (unsigned PY_LONG_LONG)(size_t)val)) {
+ PyErr_SetString(PyExc_OverflowError,
+ "value too large to convert to size_t");
+ return (size_t)-1;
+ }
+ return (size_t)val;
+}
+
+
+#endif /* Py_PYTHON_H */
diff --git a/bzrlib/_btree_serializer_pyx.pyx b/bzrlib/_btree_serializer_pyx.pyx
new file mode 100644
index 0000000..2d30177
--- /dev/null
+++ b/bzrlib/_btree_serializer_pyx.pyx
@@ -0,0 +1,982 @@
+# Copyright (C) 2008, 2009, 2010 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+#
+
+"""Pyrex extensions to btree node parsing."""
+
+#python2.4 support
+cdef extern from "python-compat.h":
+ pass
+
+cdef extern from "stdlib.h":
+ ctypedef unsigned size_t
+
+cdef extern from "Python.h":
+ ctypedef int Py_ssize_t # Required for older pyrex versions
+ ctypedef struct PyObject:
+ pass
+ int PyList_Append(object lst, object item) except -1
+
+ char *PyString_AsString(object p) except NULL
+ object PyString_FromStringAndSize(char *, Py_ssize_t)
+ PyObject *PyString_FromStringAndSize_ptr "PyString_FromStringAndSize" (char *, Py_ssize_t)
+ object PyString_FromFormat(char *, ...)
+ int PyString_CheckExact(object s)
+ int PyString_CheckExact_ptr "PyString_CheckExact" (PyObject *)
+ Py_ssize_t PyString_Size(object p)
+ Py_ssize_t PyString_GET_SIZE_ptr "PyString_GET_SIZE" (PyObject *)
+ char * PyString_AS_STRING_ptr "PyString_AS_STRING" (PyObject *)
+ char * PyString_AS_STRING(object)
+ Py_ssize_t PyString_GET_SIZE(object)
+ int PyString_AsStringAndSize_ptr(PyObject *, char **buf, Py_ssize_t *len)
+ void PyString_InternInPlace(PyObject **)
+ int PyTuple_CheckExact(object t)
+ object PyTuple_New(Py_ssize_t n_entries)
+ void PyTuple_SET_ITEM(object, Py_ssize_t offset, object) # steals the ref
+ Py_ssize_t PyTuple_GET_SIZE(object t)
+ PyObject *PyTuple_GET_ITEM_ptr_object "PyTuple_GET_ITEM" (object tpl, int index)
+ void Py_INCREF(object)
+ void Py_DECREF_ptr "Py_DECREF" (PyObject *)
+ void *PyMem_Malloc(size_t nbytes)
+ void PyMem_Free(void *)
+ void memset(void *, int, size_t)
+
+cdef extern from "string.h":
+ void *memcpy(void *dest, void *src, size_t n)
+ void *memchr(void *s, int c, size_t n)
+ int memcmp(void *s1, void *s2, size_t n)
+ # GNU extension
+ # void *memrchr(void *s, int c, size_t n)
+ int strncmp(char *s1, char *s2, size_t n)
+ unsigned long strtoul(char *s1, char **out, int base)
+ long long strtoll(char *s1, char **out, int base)
+
+
+# It seems we need to import the definitions so that the pyrex compiler has
+# local names to access them.
+from _static_tuple_c cimport StaticTuple, \
+ import_static_tuple_c, StaticTuple_New, \
+ StaticTuple_Intern, StaticTuple_SET_ITEM, StaticTuple_CheckExact, \
+ StaticTuple_GET_SIZE, StaticTuple_GET_ITEM
+# This tells the test infrastructure that StaticTuple is a class, so we don't
+# have to worry about exception checking.
+## extern cdef class StaticTuple
+import sys
+
+
+# TODO: Find some way to import this from _dirstate_helpers
+cdef void* _my_memrchr(void *s, int c, size_t n): # cannot_raise
+ # memrchr seems to be a GNU extension, so we have to implement it ourselves
+ # It is not present in any win32 standard library
+ cdef char *pos
+ cdef char *start
+
+ start = <char*>s
+ pos = start + n - 1
+ while pos >= start:
+ if pos[0] == c:
+ return <void*>pos
+ pos = pos - 1
+ return NULL
+
+
+# TODO: Import this from _dirstate_helpers when it is merged
+cdef object safe_string_from_size(char *s, Py_ssize_t size):
+ if size < 0:
+ raise AssertionError(
+ 'tried to create a string with an invalid size: %d @0x%x'
+ % (size, <int>s))
+ return PyString_FromStringAndSize(s, size)
+
+
+cdef object safe_interned_string_from_size(char *s, Py_ssize_t size):
+ cdef PyObject *py_str
+ if size < 0:
+ raise AssertionError(
+ 'tried to create a string with an invalid size: %d @0x%x'
+ % (size, <int>s))
+ py_str = PyString_FromStringAndSize_ptr(s, size)
+ PyString_InternInPlace(&py_str)
+ result = <object>py_str
+ # Casting a PyObject* to an <object> triggers an INCREF from Pyrex, so we
+ # DECREF it to avoid geting immortal strings
+ Py_DECREF_ptr(py_str)
+ return result
+
+# This sets up the StaticTuple C_API functionality
+import_static_tuple_c()
+
+
+cdef class BTreeLeafParser:
+ """Parse the leaf nodes of a BTree index.
+
+ :ivar bytes: The PyString object containing the uncompressed text for the
+ node.
+ :ivar key_length: An integer describing how many pieces the keys have for
+ this index.
+ :ivar ref_list_length: An integer describing how many references this index
+ contains.
+ :ivar keys: A PyList of keys found in this node.
+
+ :ivar _cur_str: A pointer to the start of the next line to parse
+ :ivar _end_str: A pointer to the end of bytes
+ :ivar _start: Pointer to the location within the current line while
+ parsing.
+ :ivar _header_found: True when we have parsed the header for this node
+ """
+
+ cdef object bytes
+ cdef int key_length
+ cdef int ref_list_length
+ cdef object keys
+
+ cdef char * _cur_str
+ cdef char * _end_str
+ # The current start point for parsing
+ cdef char * _start
+
+ cdef int _header_found
+
+ def __init__(self, bytes, key_length, ref_list_length):
+ self.bytes = bytes
+ self.key_length = key_length
+ self.ref_list_length = ref_list_length
+ self.keys = []
+ self._cur_str = NULL
+ self._end_str = NULL
+ self._header_found = 0
+ # keys are tuples
+
+ cdef extract_key(self, char * last):
+ """Extract a key.
+
+ :param last: points at the byte after the last byte permitted for the
+ key.
+ """
+ cdef char *temp_ptr
+ cdef int loop_counter
+ cdef StaticTuple key
+
+ key = StaticTuple_New(self.key_length)
+ for loop_counter from 0 <= loop_counter < self.key_length:
+ # grab a key segment
+ temp_ptr = <char*>memchr(self._start, c'\0', last - self._start)
+ if temp_ptr == NULL:
+ if loop_counter + 1 == self.key_length:
+ # capture to last
+ temp_ptr = last
+ else:
+ # Invalid line
+ failure_string = ("invalid key, wanted segment from " +
+ repr(safe_string_from_size(self._start,
+ last - self._start)))
+ raise AssertionError(failure_string)
+ # capture the key string
+ if (self.key_length == 1
+ and (temp_ptr - self._start) == 45
+ and strncmp(self._start, 'sha1:', 5) == 0):
+ key_element = safe_string_from_size(self._start,
+ temp_ptr - self._start)
+ else:
+ key_element = safe_interned_string_from_size(self._start,
+ temp_ptr - self._start)
+ # advance our pointer
+ self._start = temp_ptr + 1
+ Py_INCREF(key_element)
+ StaticTuple_SET_ITEM(key, loop_counter, key_element)
+ key = StaticTuple_Intern(key)
+ return key
+
+ cdef int process_line(self) except -1:
+ """Process a line in the bytes."""
+ cdef char *last
+ cdef char *temp_ptr
+ cdef char *ref_ptr
+ cdef char *next_start
+ cdef int loop_counter
+ cdef Py_ssize_t str_len
+
+ self._start = self._cur_str
+ # Find the next newline
+ last = <char*>memchr(self._start, c'\n', self._end_str - self._start)
+ if last == NULL:
+ # Process until the end of the file
+ last = self._end_str
+ self._cur_str = self._end_str
+ else:
+ # And the next string is right after it
+ self._cur_str = last + 1
+ # The last character is right before the '\n'
+
+ if last == self._start:
+ # parsed it all.
+ return 0
+ if last < self._start:
+ # Unexpected error condition - fail
+ raise AssertionError("last < self._start")
+ if 0 == self._header_found:
+ # The first line in a leaf node is the header "type=leaf\n"
+ if strncmp("type=leaf", self._start, last - self._start) == 0:
+ self._header_found = 1
+ return 0
+ else:
+ raise AssertionError('Node did not start with "type=leaf": %r'
+ % (safe_string_from_size(self._start, last - self._start)))
+
+ key = self.extract_key(last)
+ # find the value area
+ temp_ptr = <char*>_my_memrchr(self._start, c'\0', last - self._start)
+ if temp_ptr == NULL:
+ # Invalid line
+ raise AssertionError("Failed to find the value area")
+ else:
+ # Because of how conversions were done, we ended up with *lots* of
+ # values that are identical. These are all of the 0-length nodes
+ # that are referred to by the TREE_ROOT (and likely some other
+ # directory nodes.) For example, bzr has 25k references to
+ # something like '12607215 328306 0 0', which ends up consuming 1MB
+ # of memory, just for those strings.
+ str_len = last - temp_ptr - 1
+ if (str_len > 4
+ and strncmp(" 0 0", last - 4, 4) == 0):
+ # This drops peak mem for bzr.dev from 87.4MB => 86.2MB
+ # For Launchpad 236MB => 232MB
+ value = safe_interned_string_from_size(temp_ptr + 1, str_len)
+ else:
+ value = safe_string_from_size(temp_ptr + 1, str_len)
+ # shrink the references end point
+ last = temp_ptr
+
+ if self.ref_list_length:
+ ref_lists = StaticTuple_New(self.ref_list_length)
+ loop_counter = 0
+ while loop_counter < self.ref_list_length:
+ ref_list = []
+ # extract a reference list
+ loop_counter = loop_counter + 1
+ if last < self._start:
+ raise AssertionError("last < self._start")
+ # find the next reference list end point:
+ temp_ptr = <char*>memchr(self._start, c'\t', last - self._start)
+ if temp_ptr == NULL:
+ # Only valid for the last list
+ if loop_counter != self.ref_list_length:
+ # Invalid line
+ raise AssertionError(
+ "invalid key, loop_counter != self.ref_list_length")
+ else:
+ # scan to the end of the ref list area
+ ref_ptr = last
+ next_start = last
+ else:
+ # scan to the end of this ref list
+ ref_ptr = temp_ptr
+ next_start = temp_ptr + 1
+ # Now, there may be multiple keys in the ref list.
+ while self._start < ref_ptr:
+ # loop finding keys and extracting them
+ temp_ptr = <char*>memchr(self._start, c'\r',
+ ref_ptr - self._start)
+ if temp_ptr == NULL:
+ # key runs to the end
+ temp_ptr = ref_ptr
+
+ PyList_Append(ref_list, self.extract_key(temp_ptr))
+ ref_list = StaticTuple_Intern(StaticTuple(*ref_list))
+ Py_INCREF(ref_list)
+ StaticTuple_SET_ITEM(ref_lists, loop_counter - 1, ref_list)
+ # prepare for the next reference list
+ self._start = next_start
+ node_value = StaticTuple(value, ref_lists)
+ else:
+ if last != self._start:
+ # unexpected reference data present
+ raise AssertionError("unexpected reference data present")
+ node_value = StaticTuple(value, StaticTuple())
+ PyList_Append(self.keys, StaticTuple(key, node_value))
+ return 0
+
+ def parse(self):
+ cdef Py_ssize_t byte_count
+ if not PyString_CheckExact(self.bytes):
+ raise AssertionError('self.bytes is not a string.')
+ byte_count = PyString_Size(self.bytes)
+ self._cur_str = PyString_AsString(self.bytes)
+ # This points to the last character in the string
+ self._end_str = self._cur_str + byte_count
+ while self._cur_str < self._end_str:
+ self.process_line()
+ return self.keys
+
+
+def _parse_leaf_lines(bytes, key_length, ref_list_length):
+ parser = BTreeLeafParser(bytes, key_length, ref_list_length)
+ return parser.parse()
+
+
+# TODO: We can go from 8 byte offset + 4 byte length to a simple lookup,
+# because the block_offset + length is likely to be repeated. However,
+# the big win there is to cache across pages, and not just one page
+# Though if we did cache in a page, we could certainly use a short int.
+# And this goes from 40 bytes to 30 bytes.
+# One slightly ugly option would be to cache block offsets in a global.
+# However, that leads to thread-safety issues, etc.
+ctypedef struct gc_chk_sha1_record:
+ long long block_offset
+ unsigned int block_length
+ unsigned int record_start
+ unsigned int record_end
+ char sha1[20]
+
+
+cdef int _unhexbuf[256]
+cdef char *_hexbuf
+_hexbuf = '0123456789abcdef'
+
+cdef _populate_unhexbuf():
+ cdef int i
+ for i from 0 <= i < 256:
+ _unhexbuf[i] = -1
+ for i from 0 <= i < 10: # 0123456789 => map to the raw number
+ _unhexbuf[(i + c'0')] = i
+ for i from 10 <= i < 16: # abcdef => 10, 11, 12, 13, 14, 15, 16
+ _unhexbuf[(i - 10 + c'a')] = i
+ for i from 10 <= i < 16: # ABCDEF => 10, 11, 12, 13, 14, 15, 16
+ _unhexbuf[(i - 10 + c'A')] = i
+_populate_unhexbuf()
+
+
+cdef int _unhexlify_sha1(char *as_hex, char *as_bin): # cannot_raise
+ """Take the hex sha1 in as_hex and make it binary in as_bin
+
+ Same as binascii.unhexlify, but working on C strings, not Python objects.
+ """
+ cdef int top
+ cdef int bot
+ cdef int i, j
+ cdef char *cur
+
+ # binascii does this using isupper() and tolower() and ?: syntax. I'm
+ # guessing a simple lookup array should be faster.
+ j = 0
+ for i from 0 <= i < 20:
+ top = _unhexbuf[<unsigned char>(as_hex[j])]
+ j = j + 1
+ bot = _unhexbuf[<unsigned char>(as_hex[j])]
+ j = j + 1
+ if top == -1 or bot == -1:
+ return 0
+ as_bin[i] = <unsigned char>((top << 4) + bot);
+ return 1
+
+
+def _py_unhexlify(as_hex):
+ """For the test infrastructure, just thunks to _unhexlify_sha1"""
+ if len(as_hex) != 40 or not PyString_CheckExact(as_hex):
+ raise ValueError('not a 40-byte hex digest')
+ as_bin = PyString_FromStringAndSize(NULL, 20)
+ if _unhexlify_sha1(PyString_AS_STRING(as_hex), PyString_AS_STRING(as_bin)):
+ return as_bin
+ return None
+
+
+cdef void _hexlify_sha1(char *as_bin, char *as_hex): # cannot_raise
+ cdef int i, j
+ cdef char c
+
+ j = 0
+ for i from 0 <= i < 20:
+ c = as_bin[i]
+ as_hex[j] = _hexbuf[(c>>4)&0xf]
+ j = j + 1
+ as_hex[j] = _hexbuf[(c)&0xf]
+ j = j + 1
+
+
+def _py_hexlify(as_bin):
+ """For test infrastructure, thunk to _hexlify_sha1"""
+ if len(as_bin) != 20 or not PyString_CheckExact(as_bin):
+ raise ValueError('not a 20-byte binary digest')
+ as_hex = PyString_FromStringAndSize(NULL, 40)
+ _hexlify_sha1(PyString_AS_STRING(as_bin), PyString_AS_STRING(as_hex))
+ return as_hex
+
+
+cdef int _key_to_sha1(key, char *sha1): # cannot_raise
+ """Map a key into its sha1 content.
+
+ :param key: A tuple of style ('sha1:abcd...',)
+ :param sha1: A char buffer of 20 bytes
+ :return: 1 if this could be converted, 0 otherwise
+ """
+ cdef char *c_val
+ cdef PyObject *p_val
+
+ if StaticTuple_CheckExact(key) and StaticTuple_GET_SIZE(key) == 1:
+ p_val = <PyObject *>StaticTuple_GET_ITEM(key, 0)
+ elif (PyTuple_CheckExact(key) and PyTuple_GET_SIZE(key) == 1):
+ p_val = PyTuple_GET_ITEM_ptr_object(key, 0)
+ else:
+ # Not a tuple or a StaticTuple
+ return 0
+ if (PyString_CheckExact_ptr(p_val) and PyString_GET_SIZE_ptr(p_val) == 45):
+ c_val = PyString_AS_STRING_ptr(p_val)
+ else:
+ return 0
+ if strncmp(c_val, 'sha1:', 5) != 0:
+ return 0
+ if not _unhexlify_sha1(c_val + 5, sha1):
+ return 0
+ return 1
+
+
+def _py_key_to_sha1(key):
+ """Map a key to a simple sha1 string.
+
+ This is a testing thunk to the C function.
+ """
+ as_bin_sha = PyString_FromStringAndSize(NULL, 20)
+ if _key_to_sha1(key, PyString_AS_STRING(as_bin_sha)):
+ return as_bin_sha
+ return None
+
+
+cdef StaticTuple _sha1_to_key(char *sha1):
+ """Compute a ('sha1:abcd',) key for a given sha1."""
+ cdef StaticTuple key
+ cdef object hexxed
+ cdef char *c_buf
+ hexxed = PyString_FromStringAndSize(NULL, 45)
+ c_buf = PyString_AS_STRING(hexxed)
+ memcpy(c_buf, 'sha1:', 5)
+ _hexlify_sha1(sha1, c_buf+5)
+ key = StaticTuple_New(1)
+ Py_INCREF(hexxed)
+ StaticTuple_SET_ITEM(key, 0, hexxed)
+ # This is a bit expensive. To parse 120 keys takes 48us, to return them all
+ # can be done in 66.6us (so 18.6us to build them all).
+ # Adding simple hash() here brings it to 76.6us (so computing the hash
+ # value of 120keys is 10us), Intern is 86.9us (another 10us to look and add
+ # them to the intern structure.)
+ # However, since we only intern keys that are in active use, it is probably
+ # a win. Since they would have been read from elsewhere anyway.
+ # We *could* hang the PyObject form off of the gc_chk_sha1_record for ones
+ # that we have deserialized. Something to think about, at least.
+ key = StaticTuple_Intern(key)
+ return key
+
+
+def _py_sha1_to_key(sha1_bin):
+ """Test thunk to check the sha1 mapping."""
+ if not PyString_CheckExact(sha1_bin) or PyString_GET_SIZE(sha1_bin) != 20:
+ raise ValueError('sha1_bin must be a str of exactly 20 bytes')
+ return _sha1_to_key(PyString_AS_STRING(sha1_bin))
+
+
+cdef unsigned int _sha1_to_uint(char *sha1): # cannot_raise
+ cdef unsigned int val
+ # Must be in MSB, because that is how the content is sorted
+ val = (((<unsigned int>(sha1[0]) & 0xff) << 24)
+ | ((<unsigned int>(sha1[1]) & 0xff) << 16)
+ | ((<unsigned int>(sha1[2]) & 0xff) << 8)
+ | ((<unsigned int>(sha1[3]) & 0xff) << 0))
+ return val
+
+
+cdef _format_record(gc_chk_sha1_record *record):
+ # This is inefficient to go from a logical state back to a
+ # string, but it makes things work a bit better internally for now.
+ if record.block_offset >= 0xFFFFFFFF:
+ # %llu is what we really want, but unfortunately it was only added
+ # in python 2.7... :(
+ block_offset_str = str(record.block_offset)
+ value = PyString_FromFormat('%s %u %u %u',
+ PyString_AS_STRING(block_offset_str),
+ record.block_length,
+ record.record_start, record.record_end)
+ else:
+ value = PyString_FromFormat('%lu %u %u %u',
+ <unsigned long>record.block_offset,
+ record.block_length,
+ record.record_start, record.record_end)
+ return value
+
+
+cdef class GCCHKSHA1LeafNode:
+ """Track all the entries for a given leaf node."""
+
+ cdef gc_chk_sha1_record *records
+ cdef public object last_key
+ cdef gc_chk_sha1_record *last_record
+ cdef public int num_records
+ # This is the number of bits to shift to get to the interesting byte. A
+ # value of 24 means that the very first byte changes across all keys.
+ # Anything else means that there is a common prefix of bits that we can
+ # ignore. 0 means that at least the first 3 bytes are identical, though
+ # that is going to be very rare
+ cdef public unsigned char common_shift
+ # This maps an interesting byte to the first record that matches.
+ # Equivalent to bisect.bisect_left(self.records, sha1), though only taking
+ # into account that one byte.
+ cdef unsigned char offsets[257]
+
+ def __sizeof__(self):
+ # :( Why doesn't Pyrex let me do a simple sizeof(GCCHKSHA1LeafNode)
+ # like Cython? Explicitly enumerating everything here seems to leave my
+ # size off by 2 (286 bytes vs 288 bytes actual). I'm guessing it is an
+ # alignment/padding issue. Oh well- at least we scale properly with
+ # num_records and are very close to correct, which is what I care
+ # about.
+ # If we ever decide to require cython:
+ # return (sizeof(GCCHKSHA1LeafNode)
+ # + sizeof(gc_chk_sha1_record)*self.num_records)
+ return (sizeof(PyObject) + sizeof(void*) + sizeof(int)
+ + sizeof(gc_chk_sha1_record*) + sizeof(PyObject *)
+ + sizeof(gc_chk_sha1_record*) + sizeof(char)
+ + sizeof(unsigned char)*257
+ + sizeof(gc_chk_sha1_record)*self.num_records)
+
+ def __dealloc__(self):
+ if self.records != NULL:
+ PyMem_Free(self.records)
+ self.records = NULL
+
+ def __init__(self, bytes):
+ self._parse_bytes(bytes)
+ self.last_key = None
+ self.last_record = NULL
+
+ property min_key:
+ def __get__(self):
+ if self.num_records > 0:
+ return _sha1_to_key(self.records[0].sha1)
+ return None
+
+ property max_key:
+ def __get__(self):
+ if self.num_records > 0:
+ return _sha1_to_key(self.records[self.num_records-1].sha1)
+ return None
+
+ cdef StaticTuple _record_to_value_and_refs(self,
+ gc_chk_sha1_record *record):
+ """Extract the refs and value part of this record."""
+ cdef StaticTuple value_and_refs
+ cdef StaticTuple empty
+ value_and_refs = StaticTuple_New(2)
+ value = _format_record(record)
+ Py_INCREF(value)
+ StaticTuple_SET_ITEM(value_and_refs, 0, value)
+ # Always empty refs
+ empty = StaticTuple_New(0)
+ Py_INCREF(empty)
+ StaticTuple_SET_ITEM(value_and_refs, 1, empty)
+ return value_and_refs
+
+ cdef StaticTuple _record_to_item(self, gc_chk_sha1_record *record):
+ """Turn a given record back into a fully fledged item.
+ """
+ cdef StaticTuple item
+ cdef StaticTuple key
+ cdef StaticTuple value_and_refs
+ cdef object value
+ key = _sha1_to_key(record.sha1)
+ item = StaticTuple_New(2)
+ Py_INCREF(key)
+ StaticTuple_SET_ITEM(item, 0, key)
+ value_and_refs = self._record_to_value_and_refs(record)
+ Py_INCREF(value_and_refs)
+ StaticTuple_SET_ITEM(item, 1, value_and_refs)
+ return item
+
+ cdef gc_chk_sha1_record* _lookup_record(self, char *sha1) except? NULL:
+ """Find a gc_chk_sha1_record that matches the sha1 supplied."""
+ cdef int lo, hi, mid, the_cmp
+ cdef int offset
+
+ # TODO: We can speed up misses by comparing this sha1 to the common
+ # bits, and seeing if the common prefix matches, if not, we don't
+ # need to search for anything because it cannot match
+ # Use the offset array to find the closest fit for this entry
+ # follow that up with bisecting, since multiple keys can be in one
+ # spot
+ # Bisecting dropped us from 7000 comparisons to 582 (4.8/key), using
+ # the offset array dropped us from 23us to 20us and 156 comparisions
+ # (1.3/key)
+ offset = self._offset_for_sha1(sha1)
+ lo = self.offsets[offset]
+ hi = self.offsets[offset+1]
+ if hi == 255:
+ # if hi == 255 that means we potentially ran off the end of the
+ # list, so push it up to num_records
+ # note that if 'lo' == 255, that is ok, because we can start
+ # searching from that part of the list.
+ hi = self.num_records
+ local_n_cmp = 0
+ while lo < hi:
+ mid = (lo + hi) / 2
+ the_cmp = memcmp(self.records[mid].sha1, sha1, 20)
+ if the_cmp == 0:
+ return &self.records[mid]
+ elif the_cmp < 0:
+ lo = mid + 1
+ else:
+ hi = mid
+ return NULL
+
+ def __contains__(self, key):
+ cdef char sha1[20]
+ cdef gc_chk_sha1_record *record
+ if _key_to_sha1(key, sha1):
+ # If it isn't a sha1 key, then it won't be in this leaf node
+ record = self._lookup_record(sha1)
+ if record != NULL:
+ self.last_key = key
+ self.last_record = record
+ return True
+ return False
+
+ def __getitem__(self, key):
+ cdef char sha1[20]
+ cdef gc_chk_sha1_record *record
+ record = NULL
+ if self.last_record != NULL and key is self.last_key:
+ record = self.last_record
+ elif _key_to_sha1(key, sha1):
+ record = self._lookup_record(sha1)
+ if record == NULL:
+ raise KeyError('key %r is not present' % (key,))
+ return self._record_to_value_and_refs(record)
+
+ def __len__(self):
+ return self.num_records
+
+ def all_keys(self):
+ cdef int i
+ result = []
+ for i from 0 <= i < self.num_records:
+ PyList_Append(result, _sha1_to_key(self.records[i].sha1))
+ return result
+
+ def all_items(self):
+ cdef int i
+ result = []
+ for i from 0 <= i < self.num_records:
+ item = self._record_to_item(&self.records[i])
+ PyList_Append(result, item)
+ return result
+
+ cdef int _count_records(self, char *c_content, char *c_end): # cannot_raise
+ """Count how many records are in this section."""
+ cdef char *c_cur
+ cdef int num_records
+
+ c_cur = c_content
+ num_records = 0
+ while c_cur != NULL and c_cur < c_end:
+ c_cur = <char *>memchr(c_cur, c'\n', c_end - c_cur);
+ if c_cur == NULL:
+ break
+ c_cur = c_cur + 1
+ num_records = num_records + 1
+ return num_records
+
+ cdef _parse_bytes(self, bytes):
+ """Parse the string 'bytes' into content."""
+ cdef char *c_bytes
+ cdef char *c_cur
+ cdef char *c_end
+ cdef Py_ssize_t n_bytes
+ cdef int num_records
+ cdef int entry
+ cdef gc_chk_sha1_record *cur_record
+
+ if not PyString_CheckExact(bytes):
+ raise TypeError('We only support parsing plain 8-bit strings.')
+ # Pass 1, count how many records there will be
+ n_bytes = PyString_GET_SIZE(bytes)
+ c_bytes = PyString_AS_STRING(bytes)
+ c_end = c_bytes + n_bytes
+ if strncmp(c_bytes, 'type=leaf\n', 10):
+ raise ValueError("bytes did not start with 'type=leaf\\n': %r"
+ % (bytes[:10],))
+ c_cur = c_bytes + 10
+ num_records = self._count_records(c_cur, c_end)
+ # Now allocate the memory for these items, and go to town
+ self.records = <gc_chk_sha1_record*>PyMem_Malloc(num_records *
+ (sizeof(unsigned short) + sizeof(gc_chk_sha1_record)))
+ self.num_records = num_records
+ cur_record = self.records
+ entry = 0
+ while c_cur != NULL and c_cur < c_end and entry < num_records:
+ c_cur = self._parse_one_entry(c_cur, c_end, cur_record)
+ cur_record = cur_record + 1
+ entry = entry + 1
+ if (entry != self.num_records
+ or c_cur != c_end
+ or cur_record != self.records + self.num_records):
+ raise ValueError('Something went wrong while parsing.')
+ # Pass 3: build the offset map
+ self._compute_common()
+
+ cdef char *_parse_one_entry(self, char *c_cur, char *c_end,
+ gc_chk_sha1_record *cur_record) except NULL:
+ """Read a single sha record from the bytes.
+ :param c_cur: The pointer to the start of bytes
+ :param cur_record:
+ """
+ cdef char *c_next
+ if strncmp(c_cur, 'sha1:', 5):
+ raise ValueError('line did not start with sha1: %r'
+ % (safe_string_from_size(c_cur, 10),))
+ c_cur = c_cur + 5
+ c_next = <char *>memchr(c_cur, c'\0', c_end - c_cur)
+ if c_next == NULL or (c_next - c_cur != 40):
+ raise ValueError('Line did not contain 40 hex bytes')
+ if not _unhexlify_sha1(c_cur, cur_record.sha1):
+ raise ValueError('We failed to unhexlify')
+ c_cur = c_next + 1
+ if c_cur[0] != c'\0':
+ raise ValueError('only 1 null, not 2 as expected')
+ c_cur = c_cur + 1
+ cur_record.block_offset = strtoll(c_cur, &c_next, 10)
+ if c_cur == c_next or c_next[0] != c' ':
+ raise ValueError('Failed to parse block offset')
+ c_cur = c_next + 1
+ cur_record.block_length = strtoul(c_cur, &c_next, 10)
+ if c_cur == c_next or c_next[0] != c' ':
+ raise ValueError('Failed to parse block length')
+ c_cur = c_next + 1
+ cur_record.record_start = strtoul(c_cur, &c_next, 10)
+ if c_cur == c_next or c_next[0] != c' ':
+ raise ValueError('Failed to parse block length')
+ c_cur = c_next + 1
+ cur_record.record_end = strtoul(c_cur, &c_next, 10)
+ if c_cur == c_next or c_next[0] != c'\n':
+ raise ValueError('Failed to parse record end')
+ c_cur = c_next + 1
+ return c_cur
+
+ cdef int _offset_for_sha1(self, char *sha1) except -1:
+ """Find the first interesting 8-bits of this sha1."""
+ cdef int this_offset
+ cdef unsigned int as_uint
+ as_uint = _sha1_to_uint(sha1)
+ this_offset = (as_uint >> self.common_shift) & 0xFF
+ return this_offset
+
+ def _get_offset_for_sha1(self, sha1):
+ return self._offset_for_sha1(PyString_AS_STRING(sha1))
+
+ cdef _compute_common(self):
+ cdef unsigned int first
+ cdef unsigned int this
+ cdef unsigned int common_mask
+ cdef unsigned char common_shift
+ cdef int i
+ cdef int offset, this_offset
+ cdef int max_offset
+ # The idea with the offset map is that we should be able to quickly
+ # jump to the key that matches a gives sha1. We know that the keys are
+ # in sorted order, and we know that a lot of the prefix is going to be
+ # the same across them.
+ # By XORing the records together, we can determine what bits are set in
+ # all of them
+ if self.num_records < 2:
+ # Everything is in common if you have 0 or 1 leaves
+ # So we'll always just shift to the first byte
+ self.common_shift = 24
+ else:
+ common_mask = 0xFFFFFFFF
+ first = _sha1_to_uint(self.records[0].sha1)
+ for i from 0 < i < self.num_records:
+ this = _sha1_to_uint(self.records[i].sha1)
+ common_mask = (~(first ^ this)) & common_mask
+ common_shift = 24
+ while common_mask & 0x80000000 and common_shift > 0:
+ common_mask = common_mask << 1
+ common_shift = common_shift - 1
+ self.common_shift = common_shift
+ offset = 0
+ max_offset = self.num_records
+ # We cap this loop at 254 records. All the other offsets just get
+ # filled with 0xff as the singleton saying 'too many'.
+ # It means that if we have >255 records we have to bisect the second
+ # half of the list, but this is going to be very rare in practice.
+ if max_offset > 255:
+ max_offset = 255
+ for i from 0 <= i < max_offset:
+ this_offset = self._offset_for_sha1(self.records[i].sha1)
+ while offset <= this_offset:
+ self.offsets[offset] = i
+ offset = offset + 1
+ while offset < 257:
+ self.offsets[offset] = max_offset
+ offset = offset + 1
+
+ def _get_offsets(self):
+ cdef int i
+ result = []
+ for i from 0 <= i < 257:
+ PyList_Append(result, self.offsets[i])
+ return result
+
+
+def _parse_into_chk(bytes, key_length, ref_list_length):
+ """Parse into a format optimized for chk records."""
+ assert key_length == 1
+ assert ref_list_length == 0
+ return GCCHKSHA1LeafNode(bytes)
+
+
+def _flatten_node(node, reference_lists):
+ """Convert a node into the serialized form.
+
+ :param node: A tuple representing a node:
+ (index, key_tuple, value, references)
+ :param reference_lists: Does this index have reference lists?
+ :return: (string_key, flattened)
+ string_key The serialized key for referencing this node
+ flattened A string with the serialized form for the contents
+ """
+ cdef int have_reference_lists
+ cdef Py_ssize_t flat_len
+ cdef Py_ssize_t key_len
+ cdef Py_ssize_t node_len
+ cdef char * value
+ cdef Py_ssize_t value_len
+ cdef char * out
+ cdef Py_ssize_t refs_len
+ cdef Py_ssize_t next_len
+ cdef int first_ref_list
+ cdef int first_reference
+ cdef int i
+ cdef Py_ssize_t ref_bit_len
+
+ if not PyTuple_CheckExact(node) and not StaticTuple_CheckExact(node):
+ raise TypeError('We expected a tuple() or StaticTuple() for node not: %s'
+ % type(node))
+ node_len = len(node)
+ have_reference_lists = reference_lists
+ if have_reference_lists:
+ if node_len != 4:
+ raise ValueError('With ref_lists, we expected 4 entries not: %s'
+ % len(node))
+ elif node_len < 3:
+ raise ValueError('Without ref_lists, we need at least 3 entries not: %s'
+ % len(node))
+ # TODO: We can probably do better than string.join(), namely
+ # when key has only 1 item, we can just grab that string
+ # And when there are 2 items, we could do a single malloc + len() + 1
+ # also, doing .join() requires a PyObject_GetAttrString call, which
+ # we could also avoid.
+ # TODO: Note that pyrex 0.9.6 generates fairly crummy code here, using the
+ # python object interface, versus 0.9.8+ which uses a helper that
+ # checks if this supports the sequence interface.
+ # We *could* do more work on our own, and grab the actual items
+ # lists. For now, just ask people to use a better compiler. :)
+ string_key = '\0'.join(node[1])
+
+ # TODO: instead of using string joins, precompute the final string length,
+ # and then malloc a single string and copy everything in.
+
+ # TODO: We probably want to use PySequenceFast, because we have lists and
+ # tuples, but we aren't sure which we will get.
+
+ # line := string_key NULL flat_refs NULL value LF
+ # string_key := BYTES (NULL BYTES)*
+ # flat_refs := ref_list (TAB ref_list)*
+ # ref_list := ref (CR ref)*
+ # ref := BYTES (NULL BYTES)*
+ # value := BYTES
+ refs_len = 0
+ if have_reference_lists:
+ # Figure out how many bytes it will take to store the references
+ ref_lists = node[3]
+ next_len = len(ref_lists) # TODO: use a Py function
+ if next_len > 0:
+ # If there are no nodes, we don't need to do any work
+ # Otherwise we will need (len - 1) '\t' characters to separate
+ # the reference lists
+ refs_len = refs_len + (next_len - 1)
+ for ref_list in ref_lists:
+ next_len = len(ref_list)
+ if next_len > 0:
+ # We will need (len - 1) '\r' characters to separate the
+ # references
+ refs_len = refs_len + (next_len - 1)
+ for reference in ref_list:
+ if (not PyTuple_CheckExact(reference)
+ and not StaticTuple_CheckExact(reference)):
+ raise TypeError(
+ 'We expect references to be tuples not: %s'
+ % type(reference))
+ next_len = len(reference)
+ if next_len > 0:
+ # We will need (len - 1) '\x00' characters to
+ # separate the reference key
+ refs_len = refs_len + (next_len - 1)
+ for ref_bit in reference:
+ if not PyString_CheckExact(ref_bit):
+ raise TypeError('We expect reference bits'
+ ' to be strings not: %s'
+ % type(<object>ref_bit))
+ refs_len = refs_len + PyString_GET_SIZE(ref_bit)
+
+ # So we have the (key NULL refs NULL value LF)
+ key_len = PyString_Size(string_key)
+ val = node[2]
+ if not PyString_CheckExact(val):
+ raise TypeError('Expected a plain str for value not: %s'
+ % type(val))
+ value = PyString_AS_STRING(val)
+ value_len = PyString_GET_SIZE(val)
+ flat_len = (key_len + 1 + refs_len + 1 + value_len + 1)
+ line = PyString_FromStringAndSize(NULL, flat_len)
+ # Get a pointer to the new buffer
+ out = PyString_AsString(line)
+ memcpy(out, PyString_AsString(string_key), key_len)
+ out = out + key_len
+ out[0] = c'\0'
+ out = out + 1
+ if refs_len > 0:
+ first_ref_list = 1
+ for ref_list in ref_lists:
+ if first_ref_list == 0:
+ out[0] = c'\t'
+ out = out + 1
+ first_ref_list = 0
+ first_reference = 1
+ for reference in ref_list:
+ if first_reference == 0:
+ out[0] = c'\r'
+ out = out + 1
+ first_reference = 0
+ next_len = len(reference)
+ for i from 0 <= i < next_len:
+ if i != 0:
+ out[0] = c'\x00'
+ out = out + 1
+ ref_bit = reference[i]
+ ref_bit_len = PyString_GET_SIZE(ref_bit)
+ memcpy(out, PyString_AS_STRING(ref_bit), ref_bit_len)
+ out = out + ref_bit_len
+ out[0] = c'\0'
+ out = out + 1
+ memcpy(out, value, value_len)
+ out = out + value_len
+ out[0] = c'\n'
+ return string_key, line
diff --git a/bzrlib/_chk_map_py.py b/bzrlib/_chk_map_py.py
new file mode 100644
index 0000000..211736e
--- /dev/null
+++ b/bzrlib/_chk_map_py.py
@@ -0,0 +1,169 @@
+# Copyright (C) 2009, 2010 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""Python implementation of _search_key functions, etc."""
+
+from __future__ import absolute_import
+
+import zlib
+import struct
+
+from bzrlib.static_tuple import StaticTuple
+
+_LeafNode = None
+_InternalNode = None
+_unknown = None
+
+def _crc32(bit):
+ # Depending on python version and platform, zlib.crc32 will return either a
+ # signed (<= 2.5 >= 3.0) or an unsigned (2.5, 2.6).
+ # http://docs.python.org/library/zlib.html recommends using a mask to force
+ # an unsigned value to ensure the same numeric value (unsigned) is obtained
+ # across all python versions and platforms.
+ # Note: However, on 32-bit platforms this causes an upcast to PyLong, which
+ # are generally slower than PyInts. However, if performance becomes
+ # critical, we should probably write the whole thing as an extension
+ # anyway.
+ # Though we really don't need that 32nd bit of accuracy. (even 2**24
+ # is probably enough node fan out for realistic trees.)
+ return zlib.crc32(bit)&0xFFFFFFFF
+
+
+def _search_key_16(key):
+ """Map the key tuple into a search key string which has 16-way fan out."""
+ return '\x00'.join(['%08X' % _crc32(bit) for bit in key])
+
+
+def _search_key_255(key):
+ """Map the key tuple into a search key string which has 255-way fan out.
+
+ We use 255-way because '\n' is used as a delimiter, and causes problems
+ while parsing.
+ """
+ bytes = '\x00'.join([struct.pack('>L', _crc32(bit)) for bit in key])
+ return bytes.replace('\n', '_')
+
+
+def _deserialise_leaf_node(bytes, key, search_key_func=None):
+ """Deserialise bytes, with key key, into a LeafNode.
+
+ :param bytes: The bytes of the node.
+ :param key: The key that the serialised node has.
+ """
+ global _unknown, _LeafNode, _InternalNode
+ if _LeafNode is None:
+ from bzrlib import chk_map
+ _unknown = chk_map._unknown
+ _LeafNode = chk_map.LeafNode
+ _InternalNode = chk_map.InternalNode
+ result = _LeafNode(search_key_func=search_key_func)
+ # Splitlines can split on '\r' so don't use it, split('\n') adds an
+ # extra '' if the bytes ends in a final newline.
+ lines = bytes.split('\n')
+ trailing = lines.pop()
+ if trailing != '':
+ raise AssertionError('We did not have a final newline for %s'
+ % (key,))
+ items = {}
+ if lines[0] != 'chkleaf:':
+ raise ValueError("not a serialised leaf node: %r" % bytes)
+ maximum_size = int(lines[1])
+ width = int(lines[2])
+ length = int(lines[3])
+ prefix = lines[4]
+ pos = 5
+ while pos < len(lines):
+ line = prefix + lines[pos]
+ elements = line.split('\x00')
+ pos += 1
+ if len(elements) != width + 1:
+ raise AssertionError(
+ 'Incorrect number of elements (%d vs %d) for: %r'
+ % (len(elements), width + 1, line))
+ num_value_lines = int(elements[-1])
+ value_lines = lines[pos:pos+num_value_lines]
+ pos += num_value_lines
+ value = '\n'.join(value_lines)
+ items[StaticTuple.from_sequence(elements[:-1])] = value
+ if len(items) != length:
+ raise AssertionError("item count (%d) mismatch for key %s,"
+ " bytes %r" % (length, key, bytes))
+ result._items = items
+ result._len = length
+ result._maximum_size = maximum_size
+ result._key = key
+ result._key_width = width
+ result._raw_size = (sum(map(len, lines[5:])) # the length of the suffix
+ + (length)*(len(prefix))
+ + (len(lines)-5))
+ if not items:
+ result._search_prefix = None
+ result._common_serialised_prefix = None
+ else:
+ result._search_prefix = _unknown
+ result._common_serialised_prefix = prefix
+ if len(bytes) != result._current_size():
+ raise AssertionError('_current_size computed incorrectly')
+ return result
+
+
+def _deserialise_internal_node(bytes, key, search_key_func=None):
+ global _unknown, _LeafNode, _InternalNode
+ if _InternalNode is None:
+ from bzrlib import chk_map
+ _unknown = chk_map._unknown
+ _LeafNode = chk_map.LeafNode
+ _InternalNode = chk_map.InternalNode
+ result = _InternalNode(search_key_func=search_key_func)
+ # Splitlines can split on '\r' so don't use it, remove the extra ''
+ # from the result of split('\n') because we should have a trailing
+ # newline
+ lines = bytes.split('\n')
+ if lines[-1] != '':
+ raise ValueError("last line must be ''")
+ lines.pop(-1)
+ items = {}
+ if lines[0] != 'chknode:':
+ raise ValueError("not a serialised internal node: %r" % bytes)
+ maximum_size = int(lines[1])
+ width = int(lines[2])
+ length = int(lines[3])
+ common_prefix = lines[4]
+ for line in lines[5:]:
+ line = common_prefix + line
+ prefix, flat_key = line.rsplit('\x00', 1)
+ items[prefix] = StaticTuple(flat_key,)
+ if len(items) == 0:
+ raise AssertionError("We didn't find any item for %s" % key)
+ result._items = items
+ result._len = length
+ result._maximum_size = maximum_size
+ result._key = key
+ result._key_width = width
+ # XXX: InternalNodes don't really care about their size, and this will
+ # change if we add prefix compression
+ result._raw_size = None # len(bytes)
+ result._node_width = len(prefix)
+ result._search_prefix = common_prefix
+ return result
+
+
+def _bytes_to_text_key(bytes):
+ """Take a CHKInventory value string and return a (file_id, rev_id) tuple"""
+ sections = bytes.split('\n')
+ kind, file_id = sections[0].split(': ')
+ return (intern(file_id), intern(sections[3]))
+
diff --git a/bzrlib/_chk_map_pyx.c b/bzrlib/_chk_map_pyx.c
new file mode 100644
index 0000000..d73d418
--- /dev/null
+++ b/bzrlib/_chk_map_pyx.c
@@ -0,0 +1,2172 @@
+/* Generated by Pyrex 0.9.8.5 on Fri Oct 8 14:01:05 2010 */
+
+#define PY_SSIZE_T_CLEAN
+#include "Python.h"
+#include "structmember.h"
+#ifndef PY_LONG_LONG
+ #define PY_LONG_LONG LONG_LONG
+#endif
+#if PY_VERSION_HEX < 0x02050000
+ typedef int Py_ssize_t;
+ #define PY_SSIZE_T_MAX INT_MAX
+ #define PY_SSIZE_T_MIN INT_MIN
+ #define PyInt_FromSsize_t(z) PyInt_FromLong(z)
+ #define PyInt_AsSsize_t(o) PyInt_AsLong(o)
+#endif
+#if !defined(WIN32) && !defined(MS_WINDOWS)
+ #ifndef __stdcall
+ #define __stdcall
+ #endif
+ #ifndef __cdecl
+ #define __cdecl
+ #endif
+#endif
+#ifdef __cplusplus
+#define __PYX_EXTERN_C extern "C"
+#else
+#define __PYX_EXTERN_C extern
+#endif
+#include <math.h>
+#include "python-compat.h"
+#include "_static_tuple_c.h"
+
+
+typedef struct {PyObject **p; int i; char *s; long n;} __Pyx_StringTabEntry; /*proto*/
+
+static PyObject *__pyx_m;
+static PyObject *__pyx_b;
+static int __pyx_lineno;
+static char *__pyx_filename;
+static char **__pyx_f;
+
+static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb); /*proto*/
+
+static PyObject *__Pyx_GetItemInt(PyObject *o, Py_ssize_t i); /*proto*/
+
+static PyObject *__Pyx_Import(PyObject *name, PyObject *from_list); /*proto*/
+
+static int __Pyx_InitStrings(__Pyx_StringTabEntry *t); /*proto*/
+
+static PyTypeObject *__Pyx_ImportType(char *module_name, char *class_name, long size); /*proto*/
+
+static PyObject *__Pyx_ImportModule(char *name); /*proto*/
+
+static void __Pyx_AddTraceback(char *funcname); /*proto*/
+
+/* Declarations from bzrlib._static_tuple_c */
+
+static PyTypeObject *__pyx_ptype_6bzrlib_15_static_tuple_c_StaticTuple = 0;
+
+/* Declarations from bzrlib._chk_map_pyx */
+
+
+/* Declarations from implementation of bzrlib._chk_map_pyx */
+
+static PyObject *__pyx_v_6bzrlib_12_chk_map_pyx_crc32;
+static PyObject *__pyx_v_6bzrlib_12_chk_map_pyx__LeafNode;
+static PyObject *__pyx_v_6bzrlib_12_chk_map_pyx__InternalNode;
+static PyObject *__pyx_v_6bzrlib_12_chk_map_pyx__unknown;
+static void *__pyx_f_6bzrlib_12_chk_map_pyx__my_memrchr(void *,int,size_t); /*proto*/
+static PyObject *__pyx_f_6bzrlib_12_chk_map_pyx_safe_interned_string_from_size(char *,Py_ssize_t); /*proto*/
+static int __pyx_f_6bzrlib_12_chk_map_pyx__get_int_from_line(char **,char *,char *); /*proto*/
+static PyObject *__pyx_f_6bzrlib_12_chk_map_pyx__import_globals(void); /*proto*/
+
+static char __pyx_k1[] = "tried to create a string with an invalid size: %d @0x%x";
+static char __pyx_k2[] = "%08X";
+static char __pyx_k3[] = "Missing %s line\n";
+static char __pyx_k4[] = "%s line not a proper int\n";
+static char __pyx_k5[] = "bzrlib";
+static char __pyx_k6[] = "chk_map";
+static char __pyx_k7[] = "LeafNode";
+static char __pyx_k8[] = "InternalNode";
+static char __pyx_k9[] = "_unknown";
+static char __pyx_k10[] = "search_key_func";
+static char __pyx_k11[] = "bytes must be a plain string not %s";
+static char __pyx_k12[] = "chkleaf:\n";
+static char __pyx_k13[] = "not a serialised leaf node: %r";
+static char __pyx_k14[] = "bytes does not end in a newline";
+static char __pyx_k15[] = "maximum_size";
+static char __pyx_k16[] = "width";
+static char __pyx_k17[] = "length";
+static char __pyx_k18[] = "Missing the prefix line\n";
+static char __pyx_k19[] = "append";
+static char __pyx_k20[] = "Prefix has too many nulls versus width";
+static char __pyx_k21[] = "null line\n";
+static char __pyx_k22[] = "fail to find the num value lines null";
+static char __pyx_k23[] = "num value lines";
+static char __pyx_k24[] = "missing trailing newline";
+static char __pyx_k25[] = "bad no null, bad";
+static char __pyx_k26[] = "Too many bits for entry";
+static char __pyx_k27[] = "bad no null";
+static char __pyx_k28[] = "Incorrect number of elements (%d vs %d)";
+static char __pyx_k29[] = "item count (%d) mismatch for key %s, bytes %r";
+static char __pyx_k30[] = "_items";
+static char __pyx_k31[] = "_len";
+static char __pyx_k32[] = "_maximum_size";
+static char __pyx_k33[] = "_key";
+static char __pyx_k34[] = "_key_width";
+static char __pyx_k35[] = "_raw_size";
+static char __pyx_k36[] = "_search_prefix";
+static char __pyx_k37[] = "_common_serialised_prefix";
+static char __pyx_k38[] = "_current_size";
+static char __pyx_k39[] = "_current_size computed incorrectly %d != %d";
+static char __pyx_k40[] = "key %r is not a StaticTuple";
+static char __pyx_k41[] = "chknode:\n";
+static char __pyx_k42[] = "not a serialised internal node: %r";
+static char __pyx_k43[] = "intern";
+static char __pyx_k44[] = "_node_width";
+static char __pyx_k45[] = "bytes must be a string, got %r";
+static char __pyx_k46[] = "No kind section found.";
+static char __pyx_k47[] = "Kind section should end with \": \", got %r";
+static char __pyx_k48[] = "no newline after file-id";
+static char __pyx_k49[] = "no newline after parent_str";
+static char __pyx_k50[] = "no newline after name str";
+static char __pyx_k51[] = "zlib";
+static char __pyx_k52[] = "crc32";
+
+static PyObject *__pyx_n_InternalNode;
+static PyObject *__pyx_n_LeafNode;
+static PyObject *__pyx_n__common_serialised_prefix;
+static PyObject *__pyx_n__current_size;
+static PyObject *__pyx_n__items;
+static PyObject *__pyx_n__key;
+static PyObject *__pyx_n__key_width;
+static PyObject *__pyx_n__len;
+static PyObject *__pyx_n__maximum_size;
+static PyObject *__pyx_n__node_width;
+static PyObject *__pyx_n__raw_size;
+static PyObject *__pyx_n__search_prefix;
+static PyObject *__pyx_n__unknown;
+static PyObject *__pyx_n_append;
+static PyObject *__pyx_n_bzrlib;
+static PyObject *__pyx_n_chk_map;
+static PyObject *__pyx_n_crc32;
+static PyObject *__pyx_n_intern;
+static PyObject *__pyx_n_search_key_func;
+static PyObject *__pyx_n_zlib;
+
+static PyObject *__pyx_k1p;
+static PyObject *__pyx_k3p;
+static PyObject *__pyx_k4p;
+static PyObject *__pyx_k11p;
+static PyObject *__pyx_k13p;
+static PyObject *__pyx_k14p;
+static PyObject *__pyx_k18p;
+static PyObject *__pyx_k20p;
+static PyObject *__pyx_k21p;
+static PyObject *__pyx_k22p;
+static PyObject *__pyx_k24p;
+static PyObject *__pyx_k25p;
+static PyObject *__pyx_k26p;
+static PyObject *__pyx_k27p;
+static PyObject *__pyx_k28p;
+static PyObject *__pyx_k29p;
+static PyObject *__pyx_k39p;
+static PyObject *__pyx_k40p;
+static PyObject *__pyx_k42p;
+static PyObject *__pyx_k45p;
+static PyObject *__pyx_k46p;
+static PyObject *__pyx_k47p;
+static PyObject *__pyx_k48p;
+static PyObject *__pyx_k49p;
+static PyObject *__pyx_k50p;
+
+static __Pyx_StringTabEntry __pyx_string_tab[] = {
+ {&__pyx_n_InternalNode, 1, __pyx_k8, sizeof(__pyx_k8)},
+ {&__pyx_n_LeafNode, 1, __pyx_k7, sizeof(__pyx_k7)},
+ {&__pyx_n__common_serialised_prefix, 1, __pyx_k37, sizeof(__pyx_k37)},
+ {&__pyx_n__current_size, 1, __pyx_k38, sizeof(__pyx_k38)},
+ {&__pyx_n__items, 1, __pyx_k30, sizeof(__pyx_k30)},
+ {&__pyx_n__key, 1, __pyx_k33, sizeof(__pyx_k33)},
+ {&__pyx_n__key_width, 1, __pyx_k34, sizeof(__pyx_k34)},
+ {&__pyx_n__len, 1, __pyx_k31, sizeof(__pyx_k31)},
+ {&__pyx_n__maximum_size, 1, __pyx_k32, sizeof(__pyx_k32)},
+ {&__pyx_n__node_width, 1, __pyx_k44, sizeof(__pyx_k44)},
+ {&__pyx_n__raw_size, 1, __pyx_k35, sizeof(__pyx_k35)},
+ {&__pyx_n__search_prefix, 1, __pyx_k36, sizeof(__pyx_k36)},
+ {&__pyx_n__unknown, 1, __pyx_k9, sizeof(__pyx_k9)},
+ {&__pyx_n_append, 1, __pyx_k19, sizeof(__pyx_k19)},
+ {&__pyx_n_bzrlib, 1, __pyx_k5, sizeof(__pyx_k5)},
+ {&__pyx_n_chk_map, 1, __pyx_k6, sizeof(__pyx_k6)},
+ {&__pyx_n_crc32, 1, __pyx_k52, sizeof(__pyx_k52)},
+ {&__pyx_n_intern, 1, __pyx_k43, sizeof(__pyx_k43)},
+ {&__pyx_n_search_key_func, 1, __pyx_k10, sizeof(__pyx_k10)},
+ {&__pyx_n_zlib, 1, __pyx_k51, sizeof(__pyx_k51)},
+ {&__pyx_k1p, 0, __pyx_k1, sizeof(__pyx_k1)},
+ {&__pyx_k3p, 0, __pyx_k3, sizeof(__pyx_k3)},
+ {&__pyx_k4p, 0, __pyx_k4, sizeof(__pyx_k4)},
+ {&__pyx_k11p, 0, __pyx_k11, sizeof(__pyx_k11)},
+ {&__pyx_k13p, 0, __pyx_k13, sizeof(__pyx_k13)},
+ {&__pyx_k14p, 0, __pyx_k14, sizeof(__pyx_k14)},
+ {&__pyx_k18p, 0, __pyx_k18, sizeof(__pyx_k18)},
+ {&__pyx_k20p, 0, __pyx_k20, sizeof(__pyx_k20)},
+ {&__pyx_k21p, 0, __pyx_k21, sizeof(__pyx_k21)},
+ {&__pyx_k22p, 0, __pyx_k22, sizeof(__pyx_k22)},
+ {&__pyx_k24p, 0, __pyx_k24, sizeof(__pyx_k24)},
+ {&__pyx_k25p, 0, __pyx_k25, sizeof(__pyx_k25)},
+ {&__pyx_k26p, 0, __pyx_k26, sizeof(__pyx_k26)},
+ {&__pyx_k27p, 0, __pyx_k27, sizeof(__pyx_k27)},
+ {&__pyx_k28p, 0, __pyx_k28, sizeof(__pyx_k28)},
+ {&__pyx_k29p, 0, __pyx_k29, sizeof(__pyx_k29)},
+ {&__pyx_k39p, 0, __pyx_k39, sizeof(__pyx_k39)},
+ {&__pyx_k40p, 0, __pyx_k40, sizeof(__pyx_k40)},
+ {&__pyx_k42p, 0, __pyx_k42, sizeof(__pyx_k42)},
+ {&__pyx_k45p, 0, __pyx_k45, sizeof(__pyx_k45)},
+ {&__pyx_k46p, 0, __pyx_k46, sizeof(__pyx_k46)},
+ {&__pyx_k47p, 0, __pyx_k47, sizeof(__pyx_k47)},
+ {&__pyx_k48p, 0, __pyx_k48, sizeof(__pyx_k48)},
+ {&__pyx_k49p, 0, __pyx_k49, sizeof(__pyx_k49)},
+ {&__pyx_k50p, 0, __pyx_k50, sizeof(__pyx_k50)},
+ {0, 0, 0, 0}
+};
+
+static PyObject *__pyx_d1;
+static PyObject *__pyx_d2;
+
+
+/* Implementation of bzrlib._chk_map_pyx */
+
+static void *__pyx_f_6bzrlib_12_chk_map_pyx__my_memrchr(void *__pyx_v_s,int __pyx_v_c,size_t __pyx_v_n) {
+ char *__pyx_v_pos;
+ char *__pyx_v_start;
+ void *__pyx_r;
+ int __pyx_1;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_chk_map_pyx.pyx":76 */
+ __pyx_v_start = ((char *)__pyx_v_s);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_chk_map_pyx.pyx":77 */
+ __pyx_v_pos = ((__pyx_v_start + __pyx_v_n) - 1);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_chk_map_pyx.pyx":78 */
+ while (1) {
+ __pyx_1 = (__pyx_v_pos >= __pyx_v_start);
+ if (!__pyx_1) break;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_chk_map_pyx.pyx":79 */
+ __pyx_1 = ((__pyx_v_pos[0]) == __pyx_v_c);
+ if (__pyx_1) {
+ __pyx_r = __pyx_v_pos;
+ goto __pyx_L0;
+ goto __pyx_L4;
+ }
+ __pyx_L4:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_chk_map_pyx.pyx":81 */
+ __pyx_v_pos = (__pyx_v_pos - 1);
+ }
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_chk_map_pyx.pyx":82 */
+ __pyx_r = NULL;
+ goto __pyx_L0;
+
+ __pyx_r = 0;
+ __pyx_L0:;
+ return __pyx_r;
+}
+
+static PyObject *__pyx_f_6bzrlib_12_chk_map_pyx_safe_interned_string_from_size(char *__pyx_v_s,Py_ssize_t __pyx_v_size) {
+ PyObject *__pyx_v_py_str;
+ PyObject *__pyx_v_result;
+ PyObject *__pyx_r;
+ int __pyx_1;
+ PyObject *__pyx_2 = 0;
+ PyObject *__pyx_3 = 0;
+ PyObject *__pyx_4 = 0;
+ __pyx_v_result = Py_None; Py_INCREF(Py_None);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_chk_map_pyx.pyx":87 */
+ __pyx_1 = (__pyx_v_size < 0);
+ if (__pyx_1) {
+ __pyx_2 = PyInt_FromSsize_t(__pyx_v_size); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 90; goto __pyx_L1;}
+ __pyx_3 = PyInt_FromLong(((int)__pyx_v_s)); if (!__pyx_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 90; goto __pyx_L1;}
+ __pyx_4 = PyTuple_New(2); if (!__pyx_4) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 90; goto __pyx_L1;}
+ PyTuple_SET_ITEM(__pyx_4, 0, __pyx_2);
+ PyTuple_SET_ITEM(__pyx_4, 1, __pyx_3);
+ __pyx_2 = 0;
+ __pyx_3 = 0;
+ __pyx_2 = PyNumber_Remainder(__pyx_k1p, __pyx_4); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 90; goto __pyx_L1;}
+ Py_DECREF(__pyx_4); __pyx_4 = 0;
+ __pyx_3 = PyTuple_New(1); if (!__pyx_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 88; goto __pyx_L1;}
+ PyTuple_SET_ITEM(__pyx_3, 0, __pyx_2);
+ __pyx_2 = 0;
+ __pyx_4 = PyObject_CallObject(PyExc_AssertionError, __pyx_3); if (!__pyx_4) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 88; goto __pyx_L1;}
+ Py_DECREF(__pyx_3); __pyx_3 = 0;
+ __Pyx_Raise(__pyx_4, 0, 0);
+ Py_DECREF(__pyx_4); __pyx_4 = 0;
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 88; goto __pyx_L1;}
+ goto __pyx_L2;
+ }
+ __pyx_L2:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_chk_map_pyx.pyx":91 */
+ __pyx_v_py_str = PyString_FromStringAndSize(__pyx_v_s,__pyx_v_size);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_chk_map_pyx.pyx":92 */
+ PyString_InternInPlace((&__pyx_v_py_str));
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_chk_map_pyx.pyx":93 */
+ Py_INCREF(((PyObject *)__pyx_v_py_str));
+ Py_DECREF(__pyx_v_result);
+ __pyx_v_result = ((PyObject *)__pyx_v_py_str);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_chk_map_pyx.pyx":96 */
+ Py_DECREF(__pyx_v_py_str);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_chk_map_pyx.pyx":97 */
+ Py_INCREF(__pyx_v_result);
+ __pyx_r = __pyx_v_result;
+ goto __pyx_L0;
+
+ __pyx_r = Py_None; Py_INCREF(Py_None);
+ goto __pyx_L0;
+ __pyx_L1:;
+ Py_XDECREF(__pyx_2);
+ Py_XDECREF(__pyx_3);
+ Py_XDECREF(__pyx_4);
+ __Pyx_AddTraceback("bzrlib._chk_map_pyx.safe_interned_string_from_size");
+ __pyx_r = 0;
+ __pyx_L0:;
+ Py_DECREF(__pyx_v_result);
+ return __pyx_r;
+}
+
+static PyObject *__pyx_f_6bzrlib_12_chk_map_pyx__search_key_16(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
+static char __pyx_doc_6bzrlib_12_chk_map_pyx__search_key_16[] = "See chk_map._search_key_16.";
+static PyObject *__pyx_f_6bzrlib_12_chk_map_pyx__search_key_16(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
+ PyObject *__pyx_v_key = 0;
+ Py_ssize_t __pyx_v_num_bits;
+ Py_ssize_t __pyx_v_i;
+ Py_ssize_t __pyx_v_num_out_bytes;
+ unsigned long __pyx_v_crc_val;
+ char *__pyx_v_c_out;
+ PyObject *__pyx_v_out;
+ PyObject *__pyx_r;
+ Py_ssize_t __pyx_1;
+ PyObject *__pyx_2 = 0;
+ int __pyx_3;
+ PyObject *__pyx_4 = 0;
+ static char *__pyx_argnames[] = {"key",0};
+ if (!PyArg_ParseTupleAndKeywords(__pyx_args, __pyx_kwds, "O", __pyx_argnames, &__pyx_v_key)) return 0;
+ Py_INCREF(__pyx_v_key);
+ __pyx_v_out = Py_None; Py_INCREF(Py_None);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_chk_map_pyx.pyx":109 */
+ __pyx_1 = PyObject_Length(__pyx_v_key); if (__pyx_1 == -1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 109; goto __pyx_L1;}
+ __pyx_v_num_bits = __pyx_1;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_chk_map_pyx.pyx":111 */
+ __pyx_v_num_out_bytes = ((9 * __pyx_v_num_bits) - 1);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_chk_map_pyx.pyx":112 */
+ __pyx_2 = PyString_FromStringAndSize(NULL,__pyx_v_num_out_bytes); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 112; goto __pyx_L1;}
+ Py_DECREF(__pyx_v_out);
+ __pyx_v_out = __pyx_2;
+ __pyx_2 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_chk_map_pyx.pyx":113 */
+ __pyx_v_c_out = PyString_AS_STRING(__pyx_v_out);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_chk_map_pyx.pyx":114 */
+ for (__pyx_v_i = 0; __pyx_v_i < __pyx_v_num_bits; ++__pyx_v_i) {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_chk_map_pyx.pyx":115 */
+ __pyx_3 = (__pyx_v_i > 0);
+ if (__pyx_3) {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_chk_map_pyx.pyx":116 */
+ (__pyx_v_c_out[0]) = '\x000';
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_chk_map_pyx.pyx":117 */
+ __pyx_v_c_out = (__pyx_v_c_out + 1);
+ goto __pyx_L4;
+ }
+ __pyx_L4:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_chk_map_pyx.pyx":118 */
+ __pyx_2 = __Pyx_GetItemInt(__pyx_v_key, __pyx_v_i); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 118; goto __pyx_L1;}
+ __pyx_4 = PyTuple_New(1); if (!__pyx_4) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 118; goto __pyx_L1;}
+ PyTuple_SET_ITEM(__pyx_4, 0, __pyx_2);
+ __pyx_2 = 0;
+ __pyx_2 = PyObject_CallObject(__pyx_v_6bzrlib_12_chk_map_pyx_crc32, __pyx_4); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 118; goto __pyx_L1;}
+ Py_DECREF(__pyx_4); __pyx_4 = 0;
+ __pyx_v_crc_val = PyInt_AS_LONG(__pyx_2);
+ Py_DECREF(__pyx_2); __pyx_2 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_chk_map_pyx.pyx":120 */
+ sprintf(__pyx_v_c_out,__pyx_k2,__pyx_v_crc_val);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_chk_map_pyx.pyx":121 */
+ __pyx_v_c_out = (__pyx_v_c_out + 8);
+ }
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_chk_map_pyx.pyx":122 */
+ Py_INCREF(__pyx_v_out);
+ __pyx_r = __pyx_v_out;
+ goto __pyx_L0;
+
+ __pyx_r = Py_None; Py_INCREF(Py_None);
+ goto __pyx_L0;
+ __pyx_L1:;
+ Py_XDECREF(__pyx_2);
+ Py_XDECREF(__pyx_4);
+ __Pyx_AddTraceback("bzrlib._chk_map_pyx._search_key_16");
+ __pyx_r = 0;
+ __pyx_L0:;
+ Py_DECREF(__pyx_v_out);
+ Py_DECREF(__pyx_v_key);
+ return __pyx_r;
+}
+
+static PyObject *__pyx_f_6bzrlib_12_chk_map_pyx__search_key_255(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
+static char __pyx_doc_6bzrlib_12_chk_map_pyx__search_key_255[] = "See chk_map._search_key_255.";
+static PyObject *__pyx_f_6bzrlib_12_chk_map_pyx__search_key_255(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
+ PyObject *__pyx_v_key = 0;
+ Py_ssize_t __pyx_v_num_bits;
+ Py_ssize_t __pyx_v_i;
+ Py_ssize_t __pyx_v_j;
+ Py_ssize_t __pyx_v_num_out_bytes;
+ unsigned long __pyx_v_crc_val;
+ char *__pyx_v_c_out;
+ PyObject *__pyx_v_out;
+ PyObject *__pyx_r;
+ Py_ssize_t __pyx_1;
+ PyObject *__pyx_2 = 0;
+ int __pyx_3;
+ PyObject *__pyx_4 = 0;
+ static char *__pyx_argnames[] = {"key",0};
+ if (!PyArg_ParseTupleAndKeywords(__pyx_args, __pyx_kwds, "O", __pyx_argnames, &__pyx_v_key)) return 0;
+ Py_INCREF(__pyx_v_key);
+ __pyx_v_out = Py_None; Py_INCREF(Py_None);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_chk_map_pyx.pyx":134 */
+ __pyx_1 = PyObject_Length(__pyx_v_key); if (__pyx_1 == -1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 134; goto __pyx_L1;}
+ __pyx_v_num_bits = __pyx_1;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_chk_map_pyx.pyx":136 */
+ __pyx_v_num_out_bytes = ((5 * __pyx_v_num_bits) - 1);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_chk_map_pyx.pyx":137 */
+ __pyx_2 = PyString_FromStringAndSize(NULL,__pyx_v_num_out_bytes); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 137; goto __pyx_L1;}
+ Py_DECREF(__pyx_v_out);
+ __pyx_v_out = __pyx_2;
+ __pyx_2 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_chk_map_pyx.pyx":138 */
+ __pyx_v_c_out = PyString_AS_STRING(__pyx_v_out);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_chk_map_pyx.pyx":139 */
+ for (__pyx_v_i = 0; __pyx_v_i < __pyx_v_num_bits; ++__pyx_v_i) {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_chk_map_pyx.pyx":140 */
+ __pyx_3 = (__pyx_v_i > 0);
+ if (__pyx_3) {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_chk_map_pyx.pyx":141 */
+ (__pyx_v_c_out[0]) = '\x000';
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_chk_map_pyx.pyx":142 */
+ __pyx_v_c_out = (__pyx_v_c_out + 1);
+ goto __pyx_L4;
+ }
+ __pyx_L4:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_chk_map_pyx.pyx":143 */
+ __pyx_2 = __Pyx_GetItemInt(__pyx_v_key, __pyx_v_i); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 143; goto __pyx_L1;}
+ __pyx_4 = PyTuple_New(1); if (!__pyx_4) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 143; goto __pyx_L1;}
+ PyTuple_SET_ITEM(__pyx_4, 0, __pyx_2);
+ __pyx_2 = 0;
+ __pyx_2 = PyObject_CallObject(__pyx_v_6bzrlib_12_chk_map_pyx_crc32, __pyx_4); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 143; goto __pyx_L1;}
+ Py_DECREF(__pyx_4); __pyx_4 = 0;
+ __pyx_v_crc_val = PyInt_AS_LONG(__pyx_2);
+ Py_DECREF(__pyx_2); __pyx_2 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_chk_map_pyx.pyx":145 */
+ (__pyx_v_c_out[0]) = ((__pyx_v_crc_val >> 24) & 0xFF);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_chk_map_pyx.pyx":146 */
+ (__pyx_v_c_out[1]) = ((__pyx_v_crc_val >> 16) & 0xFF);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_chk_map_pyx.pyx":147 */
+ (__pyx_v_c_out[2]) = ((__pyx_v_crc_val >> 8) & 0xFF);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_chk_map_pyx.pyx":148 */
+ (__pyx_v_c_out[3]) = ((__pyx_v_crc_val >> 0) & 0xFF);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_chk_map_pyx.pyx":149 */
+ for (__pyx_v_j = 0; __pyx_v_j < 4; ++__pyx_v_j) {
+ __pyx_3 = ((__pyx_v_c_out[__pyx_v_j]) == '\n');
+ if (__pyx_3) {
+ (__pyx_v_c_out[__pyx_v_j]) = '_';
+ goto __pyx_L7;
+ }
+ __pyx_L7:;
+ }
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_chk_map_pyx.pyx":152 */
+ __pyx_v_c_out = (__pyx_v_c_out + 4);
+ }
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_chk_map_pyx.pyx":153 */
+ Py_INCREF(__pyx_v_out);
+ __pyx_r = __pyx_v_out;
+ goto __pyx_L0;
+
+ __pyx_r = Py_None; Py_INCREF(Py_None);
+ goto __pyx_L0;
+ __pyx_L1:;
+ Py_XDECREF(__pyx_2);
+ Py_XDECREF(__pyx_4);
+ __Pyx_AddTraceback("bzrlib._chk_map_pyx._search_key_255");
+ __pyx_r = 0;
+ __pyx_L0:;
+ Py_DECREF(__pyx_v_out);
+ Py_DECREF(__pyx_v_key);
+ return __pyx_r;
+}
+
+static int __pyx_f_6bzrlib_12_chk_map_pyx__get_int_from_line(char **__pyx_v_cur,char *__pyx_v_end,char *__pyx_v_message) {
+ int __pyx_v_value;
+ char *__pyx_v_next_line;
+ char *__pyx_v_next;
+ int __pyx_r;
+ int __pyx_1;
+ PyObject *__pyx_2 = 0;
+ PyObject *__pyx_3 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_chk_map_pyx.pyx":167 */
+ __pyx_v_next_line = ((char *)memchr((__pyx_v_cur[0]),'\n',(__pyx_v_end - (__pyx_v_cur[0]))));
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_chk_map_pyx.pyx":168 */
+ __pyx_1 = (__pyx_v_next_line == NULL);
+ if (__pyx_1) {
+ __pyx_2 = PyString_FromString(__pyx_v_message); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 169; goto __pyx_L1;}
+ __pyx_3 = PyNumber_Remainder(__pyx_k3p, __pyx_2); if (!__pyx_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 169; goto __pyx_L1;}
+ Py_DECREF(__pyx_2); __pyx_2 = 0;
+ __pyx_2 = PyTuple_New(1); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 169; goto __pyx_L1;}
+ PyTuple_SET_ITEM(__pyx_2, 0, __pyx_3);
+ __pyx_3 = 0;
+ __pyx_3 = PyObject_CallObject(PyExc_ValueError, __pyx_2); if (!__pyx_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 169; goto __pyx_L1;}
+ Py_DECREF(__pyx_2); __pyx_2 = 0;
+ __Pyx_Raise(__pyx_3, 0, 0);
+ Py_DECREF(__pyx_3); __pyx_3 = 0;
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 169; goto __pyx_L1;}
+ goto __pyx_L2;
+ }
+ __pyx_L2:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_chk_map_pyx.pyx":171 */
+ __pyx_v_value = strtol((__pyx_v_cur[0]),(&__pyx_v_next),10);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_chk_map_pyx.pyx":172 */
+ __pyx_1 = (__pyx_v_next != __pyx_v_next_line);
+ if (__pyx_1) {
+ __pyx_2 = PyString_FromString(__pyx_v_message); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 173; goto __pyx_L1;}
+ __pyx_3 = PyNumber_Remainder(__pyx_k4p, __pyx_2); if (!__pyx_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 173; goto __pyx_L1;}
+ Py_DECREF(__pyx_2); __pyx_2 = 0;
+ __pyx_2 = PyTuple_New(1); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 173; goto __pyx_L1;}
+ PyTuple_SET_ITEM(__pyx_2, 0, __pyx_3);
+ __pyx_3 = 0;
+ __pyx_3 = PyObject_CallObject(PyExc_ValueError, __pyx_2); if (!__pyx_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 173; goto __pyx_L1;}
+ Py_DECREF(__pyx_2); __pyx_2 = 0;
+ __Pyx_Raise(__pyx_3, 0, 0);
+ Py_DECREF(__pyx_3); __pyx_3 = 0;
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 173; goto __pyx_L1;}
+ goto __pyx_L3;
+ }
+ __pyx_L3:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_chk_map_pyx.pyx":174 */
+ (__pyx_v_cur[0]) = (__pyx_v_next_line + 1);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_chk_map_pyx.pyx":175 */
+ __pyx_r = __pyx_v_value;
+ goto __pyx_L0;
+
+ __pyx_r = 0;
+ goto __pyx_L0;
+ __pyx_L1:;
+ Py_XDECREF(__pyx_2);
+ Py_XDECREF(__pyx_3);
+ __Pyx_AddTraceback("bzrlib._chk_map_pyx._get_int_from_line");
+ __pyx_r = (-1);
+ __pyx_L0:;
+ return __pyx_r;
+}
+
+static PyObject *__pyx_f_6bzrlib_12_chk_map_pyx__import_globals(void) {
+ PyObject *__pyx_v_chk_map;
+ PyObject *__pyx_r;
+ PyObject *__pyx_1 = 0;
+ PyObject *__pyx_2 = 0;
+ __pyx_v_chk_map = Py_None; Py_INCREF(Py_None);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_chk_map_pyx.pyx":182 */
+ __pyx_1 = PyList_New(1); if (!__pyx_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 182; goto __pyx_L1;}
+ Py_INCREF(__pyx_n_chk_map);
+ PyList_SET_ITEM(__pyx_1, 0, __pyx_n_chk_map);
+ __pyx_2 = __Pyx_Import(__pyx_n_bzrlib, __pyx_1); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 182; goto __pyx_L1;}
+ Py_DECREF(__pyx_1); __pyx_1 = 0;
+ __pyx_1 = PyObject_GetAttr(__pyx_2, __pyx_n_chk_map); if (!__pyx_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 182; goto __pyx_L1;}
+ Py_DECREF(__pyx_v_chk_map);
+ __pyx_v_chk_map = __pyx_1;
+ __pyx_1 = 0;
+ Py_DECREF(__pyx_2); __pyx_2 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_chk_map_pyx.pyx":183 */
+ __pyx_2 = PyObject_GetAttr(__pyx_v_chk_map, __pyx_n_LeafNode); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 183; goto __pyx_L1;}
+ Py_DECREF(__pyx_v_6bzrlib_12_chk_map_pyx__LeafNode);
+ __pyx_v_6bzrlib_12_chk_map_pyx__LeafNode = __pyx_2;
+ __pyx_2 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_chk_map_pyx.pyx":184 */
+ __pyx_1 = PyObject_GetAttr(__pyx_v_chk_map, __pyx_n_InternalNode); if (!__pyx_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 184; goto __pyx_L1;}
+ Py_DECREF(__pyx_v_6bzrlib_12_chk_map_pyx__InternalNode);
+ __pyx_v_6bzrlib_12_chk_map_pyx__InternalNode = __pyx_1;
+ __pyx_1 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_chk_map_pyx.pyx":185 */
+ __pyx_2 = PyObject_GetAttr(__pyx_v_chk_map, __pyx_n__unknown); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 185; goto __pyx_L1;}
+ Py_DECREF(__pyx_v_6bzrlib_12_chk_map_pyx__unknown);
+ __pyx_v_6bzrlib_12_chk_map_pyx__unknown = __pyx_2;
+ __pyx_2 = 0;
+
+ __pyx_r = Py_None; Py_INCREF(Py_None);
+ goto __pyx_L0;
+ __pyx_L1:;
+ Py_XDECREF(__pyx_1);
+ Py_XDECREF(__pyx_2);
+ __Pyx_AddTraceback("bzrlib._chk_map_pyx._import_globals");
+ __pyx_r = 0;
+ __pyx_L0:;
+ Py_DECREF(__pyx_v_chk_map);
+ return __pyx_r;
+}
+
+static PyObject *__pyx_f_6bzrlib_12_chk_map_pyx__deserialise_leaf_node(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
+static char __pyx_doc_6bzrlib_12_chk_map_pyx__deserialise_leaf_node[] = "Deserialise bytes, with key key, into a LeafNode.\n\n :param bytes: The bytes of the node.\n :param key: The key that the serialised node has.\n ";
+static PyObject *__pyx_f_6bzrlib_12_chk_map_pyx__deserialise_leaf_node(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
+ PyObject *__pyx_v_bytes = 0;
+ PyObject *__pyx_v_key = 0;
+ PyObject *__pyx_v_search_key_func = 0;
+ char *__pyx_v_c_bytes;
+ char *__pyx_v_cur;
+ char *__pyx_v_end;
+ char *__pyx_v_next_line;
+ Py_ssize_t __pyx_v_c_bytes_len;
+ Py_ssize_t __pyx_v_prefix_length;
+ Py_ssize_t __pyx_v_items_length;
+ int __pyx_v_maximum_size;
+ int __pyx_v_width;
+ int __pyx_v_length;
+ int __pyx_v_i;
+ int __pyx_v_prefix_tail_len;
+ int __pyx_v_num_value_lines;
+ int __pyx_v_num_prefix_bits;
+ char *__pyx_v_prefix;
+ char *__pyx_v_value_start;
+ char *__pyx_v_prefix_tail;
+ char *__pyx_v_next_null;
+ char *__pyx_v_last_null;
+ char *__pyx_v_line_start;
+ char *__pyx_v_c_entry;
+ char *__pyx_v_entry_start;
+ StaticTuple *__pyx_v_entry_bits;
+ PyObject *__pyx_v_result;
+ PyObject *__pyx_v_prefix_bits;
+ PyObject *__pyx_v_items;
+ PyObject *__pyx_v_entry;
+ PyObject *__pyx_v_value;
+ PyObject *__pyx_r;
+ int __pyx_1;
+ PyObject *__pyx_2 = 0;
+ PyObject *__pyx_3 = 0;
+ PyObject *__pyx_4 = 0;
+ Py_ssize_t __pyx_5;
+ static char *__pyx_argnames[] = {"bytes","key","search_key_func",0};
+ __pyx_v_search_key_func = __pyx_d1;
+ if (!PyArg_ParseTupleAndKeywords(__pyx_args, __pyx_kwds, "OO|O", __pyx_argnames, &__pyx_v_bytes, &__pyx_v_key, &__pyx_v_search_key_func)) return 0;
+ Py_INCREF(__pyx_v_bytes);
+ Py_INCREF(__pyx_v_key);
+ Py_INCREF(__pyx_v_search_key_func);
+ __pyx_v_entry_bits = ((StaticTuple *)Py_None); Py_INCREF(Py_None);
+ __pyx_v_result = Py_None; Py_INCREF(Py_None);
+ __pyx_v_prefix_bits = Py_None; Py_INCREF(Py_None);
+ __pyx_v_items = Py_None; Py_INCREF(Py_None);
+ __pyx_v_entry = Py_None; Py_INCREF(Py_None);
+ __pyx_v_value = Py_None; Py_INCREF(Py_None);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_chk_map_pyx.pyx":204 */
+ __pyx_1 = __pyx_v_6bzrlib_12_chk_map_pyx__LeafNode == Py_None;
+ if (__pyx_1) {
+ __pyx_2 = __pyx_f_6bzrlib_12_chk_map_pyx__import_globals(); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 205; goto __pyx_L1;}
+ Py_DECREF(__pyx_2); __pyx_2 = 0;
+ goto __pyx_L2;
+ }
+ __pyx_L2:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_chk_map_pyx.pyx":207 */
+ __pyx_2 = PyTuple_New(0); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 207; goto __pyx_L1;}
+ __pyx_3 = PyDict_New(); if (!__pyx_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 207; goto __pyx_L1;}
+ if (PyDict_SetItem(__pyx_3, __pyx_n_search_key_func, __pyx_v_search_key_func) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 207; goto __pyx_L1;}
+ __pyx_4 = PyEval_CallObjectWithKeywords(__pyx_v_6bzrlib_12_chk_map_pyx__LeafNode, __pyx_2, __pyx_3); if (!__pyx_4) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 207; goto __pyx_L1;}
+ Py_DECREF(__pyx_2); __pyx_2 = 0;
+ Py_DECREF(__pyx_3); __pyx_3 = 0;
+ Py_DECREF(__pyx_v_result);
+ __pyx_v_result = __pyx_4;
+ __pyx_4 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_chk_map_pyx.pyx":210 */
+ __pyx_1 = (!PyString_CheckExact(__pyx_v_bytes));
+ if (__pyx_1) {
+ __pyx_2 = PyTuple_New(1); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 211; goto __pyx_L1;}
+ Py_INCREF(__pyx_v_bytes);
+ PyTuple_SET_ITEM(__pyx_2, 0, __pyx_v_bytes);
+ __pyx_3 = PyObject_CallObject(((PyObject *)(&PyType_Type)), __pyx_2); if (!__pyx_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 211; goto __pyx_L1;}
+ Py_DECREF(__pyx_2); __pyx_2 = 0;
+ __pyx_4 = PyTuple_New(1); if (!__pyx_4) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 211; goto __pyx_L1;}
+ PyTuple_SET_ITEM(__pyx_4, 0, __pyx_3);
+ __pyx_3 = 0;
+ __pyx_2 = PyNumber_Remainder(__pyx_k11p, __pyx_4); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 211; goto __pyx_L1;}
+ Py_DECREF(__pyx_4); __pyx_4 = 0;
+ __pyx_3 = PyTuple_New(1); if (!__pyx_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 211; goto __pyx_L1;}
+ PyTuple_SET_ITEM(__pyx_3, 0, __pyx_2);
+ __pyx_2 = 0;
+ __pyx_4 = PyObject_CallObject(PyExc_TypeError, __pyx_3); if (!__pyx_4) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 211; goto __pyx_L1;}
+ Py_DECREF(__pyx_3); __pyx_3 = 0;
+ __Pyx_Raise(__pyx_4, 0, 0);
+ Py_DECREF(__pyx_4); __pyx_4 = 0;
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 211; goto __pyx_L1;}
+ goto __pyx_L3;
+ }
+ __pyx_L3:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_chk_map_pyx.pyx":213 */
+ __pyx_v_c_bytes = PyString_AS_STRING(__pyx_v_bytes);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_chk_map_pyx.pyx":214 */
+ __pyx_v_c_bytes_len = PyString_GET_SIZE(__pyx_v_bytes);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_chk_map_pyx.pyx":216 */
+ __pyx_1 = (__pyx_v_c_bytes_len < 9);
+ if (!__pyx_1) {
+ __pyx_1 = (memcmp(__pyx_v_c_bytes,__pyx_k12,9) != 0);
+ }
+ if (__pyx_1) {
+ __pyx_2 = PyNumber_Remainder(__pyx_k13p, __pyx_v_bytes); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 217; goto __pyx_L1;}
+ __pyx_3 = PyTuple_New(1); if (!__pyx_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 217; goto __pyx_L1;}
+ PyTuple_SET_ITEM(__pyx_3, 0, __pyx_2);
+ __pyx_2 = 0;
+ __pyx_4 = PyObject_CallObject(PyExc_ValueError, __pyx_3); if (!__pyx_4) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 217; goto __pyx_L1;}
+ Py_DECREF(__pyx_3); __pyx_3 = 0;
+ __Pyx_Raise(__pyx_4, 0, 0);
+ Py_DECREF(__pyx_4); __pyx_4 = 0;
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 217; goto __pyx_L1;}
+ goto __pyx_L4;
+ }
+ __pyx_L4:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_chk_map_pyx.pyx":218 */
+ __pyx_1 = ((__pyx_v_c_bytes[(__pyx_v_c_bytes_len - 1)]) != '\n');
+ if (__pyx_1) {
+ __pyx_2 = PyTuple_New(1); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 219; goto __pyx_L1;}
+ Py_INCREF(__pyx_k14p);
+ PyTuple_SET_ITEM(__pyx_2, 0, __pyx_k14p);
+ __pyx_3 = PyObject_CallObject(PyExc_ValueError, __pyx_2); if (!__pyx_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 219; goto __pyx_L1;}
+ Py_DECREF(__pyx_2); __pyx_2 = 0;
+ __Pyx_Raise(__pyx_3, 0, 0);
+ Py_DECREF(__pyx_3); __pyx_3 = 0;
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 219; goto __pyx_L1;}
+ goto __pyx_L5;
+ }
+ __pyx_L5:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_chk_map_pyx.pyx":221 */
+ __pyx_v_end = (__pyx_v_c_bytes + __pyx_v_c_bytes_len);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_chk_map_pyx.pyx":222 */
+ __pyx_v_cur = (__pyx_v_c_bytes + 9);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_chk_map_pyx.pyx":223 */
+ __pyx_1 = __pyx_f_6bzrlib_12_chk_map_pyx__get_int_from_line((&__pyx_v_cur),__pyx_v_end,__pyx_k15); if (__pyx_1 == (-1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 223; goto __pyx_L1;}
+ __pyx_v_maximum_size = __pyx_1;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_chk_map_pyx.pyx":224 */
+ __pyx_1 = __pyx_f_6bzrlib_12_chk_map_pyx__get_int_from_line((&__pyx_v_cur),__pyx_v_end,__pyx_k16); if (__pyx_1 == (-1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 224; goto __pyx_L1;}
+ __pyx_v_width = __pyx_1;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_chk_map_pyx.pyx":225 */
+ __pyx_1 = __pyx_f_6bzrlib_12_chk_map_pyx__get_int_from_line((&__pyx_v_cur),__pyx_v_end,__pyx_k17); if (__pyx_1 == (-1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 225; goto __pyx_L1;}
+ __pyx_v_length = __pyx_1;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_chk_map_pyx.pyx":227 */
+ __pyx_v_next_line = ((char *)memchr(__pyx_v_cur,'\n',(__pyx_v_end - __pyx_v_cur)));
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_chk_map_pyx.pyx":228 */
+ __pyx_1 = (__pyx_v_next_line == NULL);
+ if (__pyx_1) {
+ __pyx_4 = PyTuple_New(1); if (!__pyx_4) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 229; goto __pyx_L1;}
+ Py_INCREF(__pyx_k18p);
+ PyTuple_SET_ITEM(__pyx_4, 0, __pyx_k18p);
+ __pyx_2 = PyObject_CallObject(PyExc_ValueError, __pyx_4); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 229; goto __pyx_L1;}
+ Py_DECREF(__pyx_4); __pyx_4 = 0;
+ __Pyx_Raise(__pyx_2, 0, 0);
+ Py_DECREF(__pyx_2); __pyx_2 = 0;
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 229; goto __pyx_L1;}
+ goto __pyx_L6;
+ }
+ __pyx_L6:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_chk_map_pyx.pyx":230 */
+ __pyx_v_prefix = __pyx_v_cur;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_chk_map_pyx.pyx":231 */
+ __pyx_v_prefix_length = (__pyx_v_next_line - __pyx_v_cur);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_chk_map_pyx.pyx":232 */
+ __pyx_v_cur = (__pyx_v_next_line + 1);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_chk_map_pyx.pyx":234 */
+ __pyx_3 = PyList_New(0); if (!__pyx_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 234; goto __pyx_L1;}
+ Py_DECREF(__pyx_v_prefix_bits);
+ __pyx_v_prefix_bits = __pyx_3;
+ __pyx_3 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_chk_map_pyx.pyx":235 */
+ __pyx_v_prefix_tail = __pyx_v_prefix;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_chk_map_pyx.pyx":236 */
+ __pyx_v_num_prefix_bits = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_chk_map_pyx.pyx":237 */
+ __pyx_v_next_null = ((char *)memchr(__pyx_v_prefix,'\0',__pyx_v_prefix_length));
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_chk_map_pyx.pyx":238 */
+ while (1) {
+ __pyx_1 = (__pyx_v_next_null != NULL);
+ if (!__pyx_1) break;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_chk_map_pyx.pyx":239 */
+ __pyx_v_num_prefix_bits = (__pyx_v_num_prefix_bits + 1);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_chk_map_pyx.pyx":240 */
+ __pyx_4 = PyObject_GetAttr(__pyx_v_prefix_bits, __pyx_n_append); if (!__pyx_4) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 240; goto __pyx_L1;}
+ __pyx_2 = PyString_FromStringAndSize(__pyx_v_prefix_tail,(__pyx_v_next_null - __pyx_v_prefix_tail)); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 241; goto __pyx_L1;}
+ __pyx_3 = PyTuple_New(1); if (!__pyx_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 240; goto __pyx_L1;}
+ PyTuple_SET_ITEM(__pyx_3, 0, __pyx_2);
+ __pyx_2 = 0;
+ __pyx_2 = PyObject_CallObject(__pyx_4, __pyx_3); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 240; goto __pyx_L1;}
+ Py_DECREF(__pyx_4); __pyx_4 = 0;
+ Py_DECREF(__pyx_3); __pyx_3 = 0;
+ Py_DECREF(__pyx_2); __pyx_2 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_chk_map_pyx.pyx":242 */
+ __pyx_v_prefix_tail = (__pyx_v_next_null + 1);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_chk_map_pyx.pyx":243 */
+ __pyx_v_next_null = ((char *)memchr(__pyx_v_prefix_tail,'\0',(__pyx_v_next_line - __pyx_v_prefix_tail)));
+ }
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_chk_map_pyx.pyx":244 */
+ __pyx_v_prefix_tail_len = (__pyx_v_next_line - __pyx_v_prefix_tail);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_chk_map_pyx.pyx":246 */
+ __pyx_1 = (__pyx_v_num_prefix_bits >= __pyx_v_width);
+ if (__pyx_1) {
+ __pyx_4 = PyTuple_New(1); if (!__pyx_4) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 247; goto __pyx_L1;}
+ Py_INCREF(__pyx_k20p);
+ PyTuple_SET_ITEM(__pyx_4, 0, __pyx_k20p);
+ __pyx_3 = PyObject_CallObject(PyExc_ValueError, __pyx_4); if (!__pyx_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 247; goto __pyx_L1;}
+ Py_DECREF(__pyx_4); __pyx_4 = 0;
+ __Pyx_Raise(__pyx_3, 0, 0);
+ Py_DECREF(__pyx_3); __pyx_3 = 0;
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 247; goto __pyx_L1;}
+ goto __pyx_L9;
+ }
+ __pyx_L9:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_chk_map_pyx.pyx":249 */
+ __pyx_v_items_length = (__pyx_v_end - __pyx_v_cur);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_chk_map_pyx.pyx":250 */
+ __pyx_2 = PyDict_New(); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 250; goto __pyx_L1;}
+ Py_DECREF(__pyx_v_items);
+ __pyx_v_items = __pyx_2;
+ __pyx_2 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_chk_map_pyx.pyx":251 */
+ while (1) {
+ __pyx_1 = (__pyx_v_cur < __pyx_v_end);
+ if (!__pyx_1) break;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_chk_map_pyx.pyx":252 */
+ __pyx_v_line_start = __pyx_v_cur;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_chk_map_pyx.pyx":253 */
+ __pyx_v_next_line = ((char *)memchr(__pyx_v_cur,'\n',(__pyx_v_end - __pyx_v_cur)));
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_chk_map_pyx.pyx":254 */
+ __pyx_1 = (__pyx_v_next_line == NULL);
+ if (__pyx_1) {
+ __pyx_4 = PyTuple_New(1); if (!__pyx_4) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 255; goto __pyx_L1;}
+ Py_INCREF(__pyx_k21p);
+ PyTuple_SET_ITEM(__pyx_4, 0, __pyx_k21p);
+ __pyx_3 = PyObject_CallObject(PyExc_ValueError, __pyx_4); if (!__pyx_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 255; goto __pyx_L1;}
+ Py_DECREF(__pyx_4); __pyx_4 = 0;
+ __Pyx_Raise(__pyx_3, 0, 0);
+ Py_DECREF(__pyx_3); __pyx_3 = 0;
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 255; goto __pyx_L1;}
+ goto __pyx_L12;
+ }
+ __pyx_L12:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_chk_map_pyx.pyx":256 */
+ __pyx_v_last_null = ((char *)__pyx_f_6bzrlib_12_chk_map_pyx__my_memrchr(__pyx_v_cur,'\0',(__pyx_v_next_line - __pyx_v_cur)));
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_chk_map_pyx.pyx":257 */
+ __pyx_1 = (__pyx_v_last_null == NULL);
+ if (__pyx_1) {
+ __pyx_2 = PyTuple_New(1); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 258; goto __pyx_L1;}
+ Py_INCREF(__pyx_k22p);
+ PyTuple_SET_ITEM(__pyx_2, 0, __pyx_k22p);
+ __pyx_4 = PyObject_CallObject(PyExc_ValueError, __pyx_2); if (!__pyx_4) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 258; goto __pyx_L1;}
+ Py_DECREF(__pyx_2); __pyx_2 = 0;
+ __Pyx_Raise(__pyx_4, 0, 0);
+ Py_DECREF(__pyx_4); __pyx_4 = 0;
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 258; goto __pyx_L1;}
+ goto __pyx_L13;
+ }
+ __pyx_L13:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_chk_map_pyx.pyx":259 */
+ __pyx_v_next_null = (__pyx_v_last_null + 1);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_chk_map_pyx.pyx":260 */
+ __pyx_1 = __pyx_f_6bzrlib_12_chk_map_pyx__get_int_from_line((&__pyx_v_next_null),(__pyx_v_next_line + 1),__pyx_k23); if (__pyx_1 == (-1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 260; goto __pyx_L1;}
+ __pyx_v_num_value_lines = __pyx_1;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_chk_map_pyx.pyx":262 */
+ __pyx_v_cur = (__pyx_v_next_line + 1);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_chk_map_pyx.pyx":263 */
+ __pyx_v_value_start = __pyx_v_cur;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_chk_map_pyx.pyx":265 */
+ for (__pyx_v_i = 0; __pyx_v_i < __pyx_v_num_value_lines; ++__pyx_v_i) {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_chk_map_pyx.pyx":266 */
+ __pyx_v_next_line = ((char *)memchr(__pyx_v_cur,'\n',(__pyx_v_end - __pyx_v_cur)));
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_chk_map_pyx.pyx":267 */
+ __pyx_1 = (__pyx_v_next_line == NULL);
+ if (__pyx_1) {
+ __pyx_3 = PyTuple_New(1); if (!__pyx_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 268; goto __pyx_L1;}
+ Py_INCREF(__pyx_k24p);
+ PyTuple_SET_ITEM(__pyx_3, 0, __pyx_k24p);
+ __pyx_2 = PyObject_CallObject(PyExc_ValueError, __pyx_3); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 268; goto __pyx_L1;}
+ Py_DECREF(__pyx_3); __pyx_3 = 0;
+ __Pyx_Raise(__pyx_2, 0, 0);
+ Py_DECREF(__pyx_2); __pyx_2 = 0;
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 268; goto __pyx_L1;}
+ goto __pyx_L16;
+ }
+ __pyx_L16:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_chk_map_pyx.pyx":269 */
+ __pyx_v_cur = (__pyx_v_next_line + 1);
+ }
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_chk_map_pyx.pyx":270 */
+ __pyx_4 = ((PyObject *)StaticTuple_New(__pyx_v_width)); if (!__pyx_4) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 270; goto __pyx_L1;}
+ Py_DECREF(((PyObject *)__pyx_v_entry_bits));
+ __pyx_v_entry_bits = ((StaticTuple *)__pyx_4);
+ __pyx_4 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_chk_map_pyx.pyx":271 */
+ for (__pyx_v_i = 0; __pyx_v_i < __pyx_v_num_prefix_bits; ++__pyx_v_i) {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_chk_map_pyx.pyx":274 */
+ __pyx_3 = __Pyx_GetItemInt(__pyx_v_prefix_bits, __pyx_v_i); if (!__pyx_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 274; goto __pyx_L1;}
+ Py_DECREF(__pyx_v_entry);
+ __pyx_v_entry = __pyx_3;
+ __pyx_3 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_chk_map_pyx.pyx":276 */
+ Py_INCREF(__pyx_v_entry);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_chk_map_pyx.pyx":277 */
+ StaticTuple_SET_ITEM(__pyx_v_entry_bits,__pyx_v_i,__pyx_v_entry);
+ }
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_chk_map_pyx.pyx":278 */
+ __pyx_2 = PyString_FromStringAndSize(__pyx_v_value_start,(__pyx_v_next_line - __pyx_v_value_start)); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 278; goto __pyx_L1;}
+ Py_DECREF(__pyx_v_value);
+ __pyx_v_value = __pyx_2;
+ __pyx_2 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_chk_map_pyx.pyx":281 */
+ __pyx_v_entry_start = __pyx_v_line_start;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_chk_map_pyx.pyx":282 */
+ __pyx_v_next_null = ((char *)memchr(__pyx_v_entry_start,'\0',((__pyx_v_last_null - __pyx_v_entry_start) + 1)));
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_chk_map_pyx.pyx":284 */
+ __pyx_1 = (__pyx_v_next_null == NULL);
+ if (__pyx_1) {
+ __pyx_4 = PyTuple_New(1); if (!__pyx_4) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 285; goto __pyx_L1;}
+ Py_INCREF(__pyx_k25p);
+ PyTuple_SET_ITEM(__pyx_4, 0, __pyx_k25p);
+ __pyx_3 = PyObject_CallObject(PyExc_ValueError, __pyx_4); if (!__pyx_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 285; goto __pyx_L1;}
+ Py_DECREF(__pyx_4); __pyx_4 = 0;
+ __Pyx_Raise(__pyx_3, 0, 0);
+ Py_DECREF(__pyx_3); __pyx_3 = 0;
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 285; goto __pyx_L1;}
+ goto __pyx_L19;
+ }
+ __pyx_L19:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_chk_map_pyx.pyx":286 */
+ __pyx_2 = PyString_FromStringAndSize(NULL,((__pyx_v_prefix_tail_len + __pyx_v_next_null) - __pyx_v_line_start)); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 286; goto __pyx_L1;}
+ Py_DECREF(__pyx_v_entry);
+ __pyx_v_entry = __pyx_2;
+ __pyx_2 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_chk_map_pyx.pyx":288 */
+ __pyx_v_c_entry = PyString_AS_STRING(__pyx_v_entry);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_chk_map_pyx.pyx":289 */
+ __pyx_1 = (__pyx_v_prefix_tail_len > 0);
+ if (__pyx_1) {
+ memcpy(__pyx_v_c_entry,__pyx_v_prefix_tail,__pyx_v_prefix_tail_len);
+ goto __pyx_L20;
+ }
+ __pyx_L20:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_chk_map_pyx.pyx":291 */
+ __pyx_1 = ((__pyx_v_next_null - __pyx_v_line_start) > 0);
+ if (__pyx_1) {
+ memcpy((__pyx_v_c_entry + __pyx_v_prefix_tail_len),__pyx_v_line_start,(__pyx_v_next_null - __pyx_v_line_start));
+ goto __pyx_L21;
+ }
+ __pyx_L21:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_chk_map_pyx.pyx":293 */
+ Py_INCREF(__pyx_v_entry);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_chk_map_pyx.pyx":294 */
+ __pyx_v_i = __pyx_v_num_prefix_bits;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_chk_map_pyx.pyx":295 */
+ StaticTuple_SET_ITEM(__pyx_v_entry_bits,__pyx_v_i,__pyx_v_entry);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_chk_map_pyx.pyx":296 */
+ while (1) {
+ __pyx_1 = (__pyx_v_next_null != __pyx_v_last_null);
+ if (!__pyx_1) break;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_chk_map_pyx.pyx":297 */
+ __pyx_v_i = (__pyx_v_i + 1);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_chk_map_pyx.pyx":298 */
+ __pyx_1 = (__pyx_v_i > __pyx_v_width);
+ if (__pyx_1) {
+ __pyx_4 = PyTuple_New(1); if (!__pyx_4) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 299; goto __pyx_L1;}
+ Py_INCREF(__pyx_k26p);
+ PyTuple_SET_ITEM(__pyx_4, 0, __pyx_k26p);
+ __pyx_3 = PyObject_CallObject(PyExc_ValueError, __pyx_4); if (!__pyx_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 299; goto __pyx_L1;}
+ Py_DECREF(__pyx_4); __pyx_4 = 0;
+ __Pyx_Raise(__pyx_3, 0, 0);
+ Py_DECREF(__pyx_3); __pyx_3 = 0;
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 299; goto __pyx_L1;}
+ goto __pyx_L24;
+ }
+ __pyx_L24:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_chk_map_pyx.pyx":300 */
+ __pyx_v_entry_start = (__pyx_v_next_null + 1);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_chk_map_pyx.pyx":301 */
+ __pyx_v_next_null = ((char *)memchr(__pyx_v_entry_start,'\0',((__pyx_v_last_null - __pyx_v_entry_start) + 1)));
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_chk_map_pyx.pyx":303 */
+ __pyx_1 = (__pyx_v_next_null == NULL);
+ if (__pyx_1) {
+ __pyx_2 = PyTuple_New(1); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 304; goto __pyx_L1;}
+ Py_INCREF(__pyx_k27p);
+ PyTuple_SET_ITEM(__pyx_2, 0, __pyx_k27p);
+ __pyx_4 = PyObject_CallObject(PyExc_ValueError, __pyx_2); if (!__pyx_4) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 304; goto __pyx_L1;}
+ Py_DECREF(__pyx_2); __pyx_2 = 0;
+ __Pyx_Raise(__pyx_4, 0, 0);
+ Py_DECREF(__pyx_4); __pyx_4 = 0;
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 304; goto __pyx_L1;}
+ goto __pyx_L25;
+ }
+ __pyx_L25:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_chk_map_pyx.pyx":305 */
+ __pyx_3 = PyString_FromStringAndSize(__pyx_v_entry_start,(__pyx_v_next_null - __pyx_v_entry_start)); if (!__pyx_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 305; goto __pyx_L1;}
+ Py_DECREF(__pyx_v_entry);
+ __pyx_v_entry = __pyx_3;
+ __pyx_3 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_chk_map_pyx.pyx":307 */
+ Py_INCREF(__pyx_v_entry);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_chk_map_pyx.pyx":308 */
+ StaticTuple_SET_ITEM(__pyx_v_entry_bits,__pyx_v_i,__pyx_v_entry);
+ }
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_chk_map_pyx.pyx":309 */
+ __pyx_1 = (StaticTuple_GET_SIZE(__pyx_v_entry_bits) != __pyx_v_width);
+ if (__pyx_1) {
+ __pyx_5 = PyObject_Length(((PyObject *)__pyx_v_entry_bits)); if (__pyx_5 == -1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 312; goto __pyx_L1;}
+ __pyx_2 = PyInt_FromSsize_t((__pyx_5 + 1)); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 312; goto __pyx_L1;}
+ __pyx_4 = PyInt_FromLong((__pyx_v_width + 1)); if (!__pyx_4) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 312; goto __pyx_L1;}
+ __pyx_3 = PyTuple_New(2); if (!__pyx_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 312; goto __pyx_L1;}
+ PyTuple_SET_ITEM(__pyx_3, 0, __pyx_2);
+ PyTuple_SET_ITEM(__pyx_3, 1, __pyx_4);
+ __pyx_2 = 0;
+ __pyx_4 = 0;
+ __pyx_2 = PyNumber_Remainder(__pyx_k28p, __pyx_3); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 312; goto __pyx_L1;}
+ Py_DECREF(__pyx_3); __pyx_3 = 0;
+ __pyx_4 = PyTuple_New(1); if (!__pyx_4) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 310; goto __pyx_L1;}
+ PyTuple_SET_ITEM(__pyx_4, 0, __pyx_2);
+ __pyx_2 = 0;
+ __pyx_3 = PyObject_CallObject(PyExc_AssertionError, __pyx_4); if (!__pyx_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 310; goto __pyx_L1;}
+ Py_DECREF(__pyx_4); __pyx_4 = 0;
+ __Pyx_Raise(__pyx_3, 0, 0);
+ Py_DECREF(__pyx_3); __pyx_3 = 0;
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 310; goto __pyx_L1;}
+ goto __pyx_L26;
+ }
+ __pyx_L26:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_chk_map_pyx.pyx":313 */
+ __pyx_2 = ((PyObject *)StaticTuple_Intern(__pyx_v_entry_bits)); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 313; goto __pyx_L1;}
+ Py_DECREF(((PyObject *)__pyx_v_entry_bits));
+ __pyx_v_entry_bits = ((StaticTuple *)__pyx_2);
+ __pyx_2 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_chk_map_pyx.pyx":314 */
+ __pyx_1 = PyDict_SetItem(__pyx_v_items,((PyObject *)__pyx_v_entry_bits),__pyx_v_value); if (__pyx_1 == (-1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 314; goto __pyx_L1;}
+ }
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_chk_map_pyx.pyx":315 */
+ __pyx_5 = PyObject_Length(__pyx_v_items); if (__pyx_5 == -1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 315; goto __pyx_L1;}
+ __pyx_1 = (__pyx_5 != __pyx_v_length);
+ if (__pyx_1) {
+ __pyx_4 = PyInt_FromLong(__pyx_v_length); if (!__pyx_4) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 317; goto __pyx_L1;}
+ __pyx_3 = PyTuple_New(3); if (!__pyx_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 317; goto __pyx_L1;}
+ PyTuple_SET_ITEM(__pyx_3, 0, __pyx_4);
+ Py_INCREF(((PyObject *)__pyx_v_entry_bits));
+ PyTuple_SET_ITEM(__pyx_3, 1, ((PyObject *)__pyx_v_entry_bits));
+ Py_INCREF(__pyx_v_bytes);
+ PyTuple_SET_ITEM(__pyx_3, 2, __pyx_v_bytes);
+ __pyx_4 = 0;
+ __pyx_2 = PyNumber_Remainder(__pyx_k29p, __pyx_3); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 317; goto __pyx_L1;}
+ Py_DECREF(__pyx_3); __pyx_3 = 0;
+ __pyx_4 = PyTuple_New(1); if (!__pyx_4) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 316; goto __pyx_L1;}
+ PyTuple_SET_ITEM(__pyx_4, 0, __pyx_2);
+ __pyx_2 = 0;
+ __pyx_3 = PyObject_CallObject(PyExc_ValueError, __pyx_4); if (!__pyx_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 316; goto __pyx_L1;}
+ Py_DECREF(__pyx_4); __pyx_4 = 0;
+ __Pyx_Raise(__pyx_3, 0, 0);
+ Py_DECREF(__pyx_3); __pyx_3 = 0;
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 316; goto __pyx_L1;}
+ goto __pyx_L27;
+ }
+ __pyx_L27:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_chk_map_pyx.pyx":318 */
+ if (PyObject_SetAttr(__pyx_v_result, __pyx_n__items, __pyx_v_items) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 318; goto __pyx_L1;}
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_chk_map_pyx.pyx":319 */
+ __pyx_2 = PyInt_FromLong(__pyx_v_length); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 319; goto __pyx_L1;}
+ if (PyObject_SetAttr(__pyx_v_result, __pyx_n__len, __pyx_2) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 319; goto __pyx_L1;}
+ Py_DECREF(__pyx_2); __pyx_2 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_chk_map_pyx.pyx":320 */
+ __pyx_4 = PyInt_FromLong(__pyx_v_maximum_size); if (!__pyx_4) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 320; goto __pyx_L1;}
+ if (PyObject_SetAttr(__pyx_v_result, __pyx_n__maximum_size, __pyx_4) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 320; goto __pyx_L1;}
+ Py_DECREF(__pyx_4); __pyx_4 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_chk_map_pyx.pyx":321 */
+ if (PyObject_SetAttr(__pyx_v_result, __pyx_n__key, __pyx_v_key) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 321; goto __pyx_L1;}
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_chk_map_pyx.pyx":322 */
+ __pyx_3 = PyInt_FromLong(__pyx_v_width); if (!__pyx_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 322; goto __pyx_L1;}
+ if (PyObject_SetAttr(__pyx_v_result, __pyx_n__key_width, __pyx_3) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 322; goto __pyx_L1;}
+ Py_DECREF(__pyx_3); __pyx_3 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_chk_map_pyx.pyx":323 */
+ __pyx_2 = PyInt_FromSsize_t((__pyx_v_items_length + (__pyx_v_length * __pyx_v_prefix_length))); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 323; goto __pyx_L1;}
+ if (PyObject_SetAttr(__pyx_v_result, __pyx_n__raw_size, __pyx_2) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 323; goto __pyx_L1;}
+ Py_DECREF(__pyx_2); __pyx_2 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_chk_map_pyx.pyx":324 */
+ __pyx_1 = (__pyx_v_length == 0);
+ if (__pyx_1) {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_chk_map_pyx.pyx":325 */
+ if (PyObject_SetAttr(__pyx_v_result, __pyx_n__search_prefix, Py_None) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 325; goto __pyx_L1;}
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_chk_map_pyx.pyx":326 */
+ if (PyObject_SetAttr(__pyx_v_result, __pyx_n__common_serialised_prefix, Py_None) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 326; goto __pyx_L1;}
+ goto __pyx_L28;
+ }
+ /*else*/ {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_chk_map_pyx.pyx":328 */
+ if (PyObject_SetAttr(__pyx_v_result, __pyx_n__search_prefix, __pyx_v_6bzrlib_12_chk_map_pyx__unknown) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 328; goto __pyx_L1;}
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_chk_map_pyx.pyx":329 */
+ __pyx_4 = PyString_FromStringAndSize(__pyx_v_prefix,__pyx_v_prefix_length); if (!__pyx_4) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 329; goto __pyx_L1;}
+ if (PyObject_SetAttr(__pyx_v_result, __pyx_n__common_serialised_prefix, __pyx_4) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 329; goto __pyx_L1;}
+ Py_DECREF(__pyx_4); __pyx_4 = 0;
+ }
+ __pyx_L28:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_chk_map_pyx.pyx":331 */
+ __pyx_3 = PyInt_FromSsize_t(__pyx_v_c_bytes_len); if (!__pyx_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 331; goto __pyx_L1;}
+ __pyx_2 = PyObject_GetAttr(__pyx_v_result, __pyx_n__current_size); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 331; goto __pyx_L1;}
+ __pyx_4 = PyObject_CallObject(__pyx_2, 0); if (!__pyx_4) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 331; goto __pyx_L1;}
+ Py_DECREF(__pyx_2); __pyx_2 = 0;
+ if (PyObject_Cmp(__pyx_3, __pyx_4, &__pyx_1) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 331; goto __pyx_L1;}
+ __pyx_1 = __pyx_1 != 0;
+ Py_DECREF(__pyx_3); __pyx_3 = 0;
+ Py_DECREF(__pyx_4); __pyx_4 = 0;
+ if (__pyx_1) {
+ __pyx_2 = PyInt_FromSsize_t(__pyx_v_c_bytes_len); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 333; goto __pyx_L1;}
+ __pyx_3 = PyObject_GetAttr(__pyx_v_result, __pyx_n__current_size); if (!__pyx_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 333; goto __pyx_L1;}
+ __pyx_4 = PyObject_CallObject(__pyx_3, 0); if (!__pyx_4) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 333; goto __pyx_L1;}
+ Py_DECREF(__pyx_3); __pyx_3 = 0;
+ __pyx_3 = PyTuple_New(3); if (!__pyx_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 332; goto __pyx_L1;}
+ Py_INCREF(__pyx_k39p);
+ PyTuple_SET_ITEM(__pyx_3, 0, __pyx_k39p);
+ PyTuple_SET_ITEM(__pyx_3, 1, __pyx_2);
+ PyTuple_SET_ITEM(__pyx_3, 2, __pyx_4);
+ __pyx_2 = 0;
+ __pyx_4 = 0;
+ __pyx_2 = PyObject_CallObject(PyExc_AssertionError, __pyx_3); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 332; goto __pyx_L1;}
+ Py_DECREF(__pyx_3); __pyx_3 = 0;
+ __Pyx_Raise(__pyx_2, 0, 0);
+ Py_DECREF(__pyx_2); __pyx_2 = 0;
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 332; goto __pyx_L1;}
+ goto __pyx_L29;
+ }
+ __pyx_L29:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_chk_map_pyx.pyx":334 */
+ Py_INCREF(__pyx_v_result);
+ __pyx_r = __pyx_v_result;
+ goto __pyx_L0;
+
+ __pyx_r = Py_None; Py_INCREF(Py_None);
+ goto __pyx_L0;
+ __pyx_L1:;
+ Py_XDECREF(__pyx_2);
+ Py_XDECREF(__pyx_3);
+ Py_XDECREF(__pyx_4);
+ __Pyx_AddTraceback("bzrlib._chk_map_pyx._deserialise_leaf_node");
+ __pyx_r = 0;
+ __pyx_L0:;
+ Py_DECREF(__pyx_v_entry_bits);
+ Py_DECREF(__pyx_v_result);
+ Py_DECREF(__pyx_v_prefix_bits);
+ Py_DECREF(__pyx_v_items);
+ Py_DECREF(__pyx_v_entry);
+ Py_DECREF(__pyx_v_value);
+ Py_DECREF(__pyx_v_bytes);
+ Py_DECREF(__pyx_v_key);
+ Py_DECREF(__pyx_v_search_key_func);
+ return __pyx_r;
+}
+
+static PyObject *__pyx_f_6bzrlib_12_chk_map_pyx__deserialise_internal_node(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
+static PyObject *__pyx_f_6bzrlib_12_chk_map_pyx__deserialise_internal_node(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
+ PyObject *__pyx_v_bytes = 0;
+ PyObject *__pyx_v_key = 0;
+ PyObject *__pyx_v_search_key_func = 0;
+ char *__pyx_v_c_bytes;
+ char *__pyx_v_cur;
+ char *__pyx_v_end;
+ char *__pyx_v_next_line;
+ Py_ssize_t __pyx_v_c_bytes_len;
+ Py_ssize_t __pyx_v_prefix_length;
+ int __pyx_v_maximum_size;
+ int __pyx_v_width;
+ int __pyx_v_length;
+ char *__pyx_v_prefix;
+ char *__pyx_v_next_null;
+ char *__pyx_v_c_item_prefix;
+ PyObject *__pyx_v_result;
+ PyObject *__pyx_v_items;
+ PyObject *__pyx_v_item_prefix;
+ PyObject *__pyx_v_flat_key;
+ PyObject *__pyx_r;
+ int __pyx_1;
+ PyObject *__pyx_2 = 0;
+ PyObject *__pyx_3 = 0;
+ PyObject *__pyx_4 = 0;
+ Py_ssize_t __pyx_5;
+ static char *__pyx_argnames[] = {"bytes","key","search_key_func",0};
+ __pyx_v_search_key_func = __pyx_d2;
+ if (!PyArg_ParseTupleAndKeywords(__pyx_args, __pyx_kwds, "OO|O", __pyx_argnames, &__pyx_v_bytes, &__pyx_v_key, &__pyx_v_search_key_func)) return 0;
+ Py_INCREF(__pyx_v_bytes);
+ Py_INCREF(__pyx_v_key);
+ Py_INCREF(__pyx_v_search_key_func);
+ __pyx_v_result = Py_None; Py_INCREF(Py_None);
+ __pyx_v_items = Py_None; Py_INCREF(Py_None);
+ __pyx_v_item_prefix = Py_None; Py_INCREF(Py_None);
+ __pyx_v_flat_key = Py_None; Py_INCREF(Py_None);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_chk_map_pyx.pyx":344 */
+ __pyx_1 = __pyx_v_6bzrlib_12_chk_map_pyx__InternalNode == Py_None;
+ if (__pyx_1) {
+ __pyx_2 = __pyx_f_6bzrlib_12_chk_map_pyx__import_globals(); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 345; goto __pyx_L1;}
+ Py_DECREF(__pyx_2); __pyx_2 = 0;
+ goto __pyx_L2;
+ }
+ __pyx_L2:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_chk_map_pyx.pyx":346 */
+ __pyx_2 = PyTuple_New(0); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 346; goto __pyx_L1;}
+ __pyx_3 = PyDict_New(); if (!__pyx_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 346; goto __pyx_L1;}
+ if (PyDict_SetItem(__pyx_3, __pyx_n_search_key_func, __pyx_v_search_key_func) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 346; goto __pyx_L1;}
+ __pyx_4 = PyEval_CallObjectWithKeywords(__pyx_v_6bzrlib_12_chk_map_pyx__InternalNode, __pyx_2, __pyx_3); if (!__pyx_4) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 346; goto __pyx_L1;}
+ Py_DECREF(__pyx_2); __pyx_2 = 0;
+ Py_DECREF(__pyx_3); __pyx_3 = 0;
+ Py_DECREF(__pyx_v_result);
+ __pyx_v_result = __pyx_4;
+ __pyx_4 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_chk_map_pyx.pyx":348 */
+ __pyx_1 = (!StaticTuple_CheckExact(__pyx_v_key));
+ if (__pyx_1) {
+ __pyx_2 = PyTuple_New(1); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 349; goto __pyx_L1;}
+ Py_INCREF(__pyx_v_key);
+ PyTuple_SET_ITEM(__pyx_2, 0, __pyx_v_key);
+ __pyx_3 = PyNumber_Remainder(__pyx_k40p, __pyx_2); if (!__pyx_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 349; goto __pyx_L1;}
+ Py_DECREF(__pyx_2); __pyx_2 = 0;
+ __pyx_4 = PyTuple_New(1); if (!__pyx_4) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 349; goto __pyx_L1;}
+ PyTuple_SET_ITEM(__pyx_4, 0, __pyx_3);
+ __pyx_3 = 0;
+ __pyx_2 = PyObject_CallObject(PyExc_TypeError, __pyx_4); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 349; goto __pyx_L1;}
+ Py_DECREF(__pyx_4); __pyx_4 = 0;
+ __Pyx_Raise(__pyx_2, 0, 0);
+ Py_DECREF(__pyx_2); __pyx_2 = 0;
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 349; goto __pyx_L1;}
+ goto __pyx_L3;
+ }
+ __pyx_L3:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_chk_map_pyx.pyx":350 */
+ __pyx_1 = (!PyString_CheckExact(__pyx_v_bytes));
+ if (__pyx_1) {
+ __pyx_3 = PyTuple_New(1); if (!__pyx_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 351; goto __pyx_L1;}
+ Py_INCREF(__pyx_v_bytes);
+ PyTuple_SET_ITEM(__pyx_3, 0, __pyx_v_bytes);
+ __pyx_4 = PyObject_CallObject(((PyObject *)(&PyType_Type)), __pyx_3); if (!__pyx_4) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 351; goto __pyx_L1;}
+ Py_DECREF(__pyx_3); __pyx_3 = 0;
+ __pyx_2 = PyTuple_New(1); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 351; goto __pyx_L1;}
+ PyTuple_SET_ITEM(__pyx_2, 0, __pyx_4);
+ __pyx_4 = 0;
+ __pyx_3 = PyNumber_Remainder(__pyx_k11p, __pyx_2); if (!__pyx_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 351; goto __pyx_L1;}
+ Py_DECREF(__pyx_2); __pyx_2 = 0;
+ __pyx_4 = PyTuple_New(1); if (!__pyx_4) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 351; goto __pyx_L1;}
+ PyTuple_SET_ITEM(__pyx_4, 0, __pyx_3);
+ __pyx_3 = 0;
+ __pyx_2 = PyObject_CallObject(PyExc_TypeError, __pyx_4); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 351; goto __pyx_L1;}
+ Py_DECREF(__pyx_4); __pyx_4 = 0;
+ __Pyx_Raise(__pyx_2, 0, 0);
+ Py_DECREF(__pyx_2); __pyx_2 = 0;
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 351; goto __pyx_L1;}
+ goto __pyx_L4;
+ }
+ __pyx_L4:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_chk_map_pyx.pyx":353 */
+ __pyx_v_c_bytes = PyString_AS_STRING(__pyx_v_bytes);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_chk_map_pyx.pyx":354 */
+ __pyx_v_c_bytes_len = PyString_GET_SIZE(__pyx_v_bytes);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_chk_map_pyx.pyx":356 */
+ __pyx_1 = (__pyx_v_c_bytes_len < 9);
+ if (!__pyx_1) {
+ __pyx_1 = (memcmp(__pyx_v_c_bytes,__pyx_k41,9) != 0);
+ }
+ if (__pyx_1) {
+ __pyx_3 = PyNumber_Remainder(__pyx_k42p, __pyx_v_bytes); if (!__pyx_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 357; goto __pyx_L1;}
+ __pyx_4 = PyTuple_New(1); if (!__pyx_4) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 357; goto __pyx_L1;}
+ PyTuple_SET_ITEM(__pyx_4, 0, __pyx_3);
+ __pyx_3 = 0;
+ __pyx_2 = PyObject_CallObject(PyExc_ValueError, __pyx_4); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 357; goto __pyx_L1;}
+ Py_DECREF(__pyx_4); __pyx_4 = 0;
+ __Pyx_Raise(__pyx_2, 0, 0);
+ Py_DECREF(__pyx_2); __pyx_2 = 0;
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 357; goto __pyx_L1;}
+ goto __pyx_L5;
+ }
+ __pyx_L5:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_chk_map_pyx.pyx":358 */
+ __pyx_1 = ((__pyx_v_c_bytes[(__pyx_v_c_bytes_len - 1)]) != '\n');
+ if (__pyx_1) {
+ __pyx_3 = PyTuple_New(1); if (!__pyx_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 359; goto __pyx_L1;}
+ Py_INCREF(__pyx_k14p);
+ PyTuple_SET_ITEM(__pyx_3, 0, __pyx_k14p);
+ __pyx_4 = PyObject_CallObject(PyExc_ValueError, __pyx_3); if (!__pyx_4) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 359; goto __pyx_L1;}
+ Py_DECREF(__pyx_3); __pyx_3 = 0;
+ __Pyx_Raise(__pyx_4, 0, 0);
+ Py_DECREF(__pyx_4); __pyx_4 = 0;
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 359; goto __pyx_L1;}
+ goto __pyx_L6;
+ }
+ __pyx_L6:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_chk_map_pyx.pyx":361 */
+ __pyx_2 = PyDict_New(); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 361; goto __pyx_L1;}
+ Py_DECREF(__pyx_v_items);
+ __pyx_v_items = __pyx_2;
+ __pyx_2 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_chk_map_pyx.pyx":362 */
+ __pyx_v_cur = (__pyx_v_c_bytes + 9);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_chk_map_pyx.pyx":363 */
+ __pyx_v_end = (__pyx_v_c_bytes + __pyx_v_c_bytes_len);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_chk_map_pyx.pyx":364 */
+ __pyx_1 = __pyx_f_6bzrlib_12_chk_map_pyx__get_int_from_line((&__pyx_v_cur),__pyx_v_end,__pyx_k15); if (__pyx_1 == (-1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 364; goto __pyx_L1;}
+ __pyx_v_maximum_size = __pyx_1;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_chk_map_pyx.pyx":365 */
+ __pyx_1 = __pyx_f_6bzrlib_12_chk_map_pyx__get_int_from_line((&__pyx_v_cur),__pyx_v_end,__pyx_k16); if (__pyx_1 == (-1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 365; goto __pyx_L1;}
+ __pyx_v_width = __pyx_1;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_chk_map_pyx.pyx":366 */
+ __pyx_1 = __pyx_f_6bzrlib_12_chk_map_pyx__get_int_from_line((&__pyx_v_cur),__pyx_v_end,__pyx_k17); if (__pyx_1 == (-1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 366; goto __pyx_L1;}
+ __pyx_v_length = __pyx_1;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_chk_map_pyx.pyx":368 */
+ __pyx_v_next_line = ((char *)memchr(__pyx_v_cur,'\n',(__pyx_v_end - __pyx_v_cur)));
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_chk_map_pyx.pyx":369 */
+ __pyx_1 = (__pyx_v_next_line == NULL);
+ if (__pyx_1) {
+ __pyx_3 = PyTuple_New(1); if (!__pyx_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 370; goto __pyx_L1;}
+ Py_INCREF(__pyx_k18p);
+ PyTuple_SET_ITEM(__pyx_3, 0, __pyx_k18p);
+ __pyx_4 = PyObject_CallObject(PyExc_ValueError, __pyx_3); if (!__pyx_4) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 370; goto __pyx_L1;}
+ Py_DECREF(__pyx_3); __pyx_3 = 0;
+ __Pyx_Raise(__pyx_4, 0, 0);
+ Py_DECREF(__pyx_4); __pyx_4 = 0;
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 370; goto __pyx_L1;}
+ goto __pyx_L7;
+ }
+ __pyx_L7:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_chk_map_pyx.pyx":371 */
+ __pyx_v_prefix = __pyx_v_cur;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_chk_map_pyx.pyx":372 */
+ __pyx_v_prefix_length = (__pyx_v_next_line - __pyx_v_cur);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_chk_map_pyx.pyx":373 */
+ __pyx_v_cur = (__pyx_v_next_line + 1);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_chk_map_pyx.pyx":375 */
+ while (1) {
+ __pyx_1 = (__pyx_v_cur < __pyx_v_end);
+ if (!__pyx_1) break;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_chk_map_pyx.pyx":377 */
+ __pyx_v_next_line = ((char *)memchr(__pyx_v_cur,'\n',(__pyx_v_end - __pyx_v_cur)));
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_chk_map_pyx.pyx":378 */
+ __pyx_1 = (__pyx_v_next_line == NULL);
+ if (__pyx_1) {
+ __pyx_2 = PyTuple_New(1); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 379; goto __pyx_L1;}
+ Py_INCREF(__pyx_k24p);
+ PyTuple_SET_ITEM(__pyx_2, 0, __pyx_k24p);
+ __pyx_3 = PyObject_CallObject(PyExc_ValueError, __pyx_2); if (!__pyx_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 379; goto __pyx_L1;}
+ Py_DECREF(__pyx_2); __pyx_2 = 0;
+ __Pyx_Raise(__pyx_3, 0, 0);
+ Py_DECREF(__pyx_3); __pyx_3 = 0;
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 379; goto __pyx_L1;}
+ goto __pyx_L10;
+ }
+ __pyx_L10:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_chk_map_pyx.pyx":380 */
+ __pyx_v_next_null = ((char *)__pyx_f_6bzrlib_12_chk_map_pyx__my_memrchr(__pyx_v_cur,'\0',(__pyx_v_next_line - __pyx_v_cur)));
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_chk_map_pyx.pyx":381 */
+ __pyx_1 = (__pyx_v_next_null == NULL);
+ if (__pyx_1) {
+ __pyx_4 = PyTuple_New(1); if (!__pyx_4) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 382; goto __pyx_L1;}
+ Py_INCREF(__pyx_k27p);
+ PyTuple_SET_ITEM(__pyx_4, 0, __pyx_k27p);
+ __pyx_2 = PyObject_CallObject(PyExc_ValueError, __pyx_4); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 382; goto __pyx_L1;}
+ Py_DECREF(__pyx_4); __pyx_4 = 0;
+ __Pyx_Raise(__pyx_2, 0, 0);
+ Py_DECREF(__pyx_2); __pyx_2 = 0;
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 382; goto __pyx_L1;}
+ goto __pyx_L11;
+ }
+ __pyx_L11:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_chk_map_pyx.pyx":383 */
+ __pyx_3 = PyString_FromStringAndSize(NULL,((__pyx_v_prefix_length + __pyx_v_next_null) - __pyx_v_cur)); if (!__pyx_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 383; goto __pyx_L1;}
+ Py_DECREF(__pyx_v_item_prefix);
+ __pyx_v_item_prefix = __pyx_3;
+ __pyx_3 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_chk_map_pyx.pyx":385 */
+ __pyx_v_c_item_prefix = PyString_AS_STRING(__pyx_v_item_prefix);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_chk_map_pyx.pyx":386 */
+ __pyx_5 = __pyx_v_prefix_length;
+ if (__pyx_5) {
+ memcpy(__pyx_v_c_item_prefix,__pyx_v_prefix,__pyx_v_prefix_length);
+ goto __pyx_L12;
+ }
+ __pyx_L12:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_chk_map_pyx.pyx":388 */
+ memcpy((__pyx_v_c_item_prefix + __pyx_v_prefix_length),__pyx_v_cur,(__pyx_v_next_null - __pyx_v_cur));
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_chk_map_pyx.pyx":389 */
+ __pyx_4 = PyString_FromStringAndSize((__pyx_v_next_null + 1),((__pyx_v_next_line - __pyx_v_next_null) - 1)); if (!__pyx_4) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 389; goto __pyx_L1;}
+ Py_DECREF(__pyx_v_flat_key);
+ __pyx_v_flat_key = __pyx_4;
+ __pyx_4 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_chk_map_pyx.pyx":391 */
+ __pyx_2 = PyTuple_New(1); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 391; goto __pyx_L1;}
+ Py_INCREF(__pyx_v_flat_key);
+ PyTuple_SET_ITEM(__pyx_2, 0, __pyx_v_flat_key);
+ __pyx_3 = PyObject_CallObject(((PyObject *)__pyx_ptype_6bzrlib_15_static_tuple_c_StaticTuple), __pyx_2); if (!__pyx_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 391; goto __pyx_L1;}
+ Py_DECREF(__pyx_2); __pyx_2 = 0;
+ __pyx_4 = PyObject_GetAttr(__pyx_3, __pyx_n_intern); if (!__pyx_4) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 391; goto __pyx_L1;}
+ Py_DECREF(__pyx_3); __pyx_3 = 0;
+ __pyx_2 = PyObject_CallObject(__pyx_4, 0); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 391; goto __pyx_L1;}
+ Py_DECREF(__pyx_4); __pyx_4 = 0;
+ Py_DECREF(__pyx_v_flat_key);
+ __pyx_v_flat_key = __pyx_2;
+ __pyx_2 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_chk_map_pyx.pyx":392 */
+ __pyx_1 = PyDict_SetItem(__pyx_v_items,__pyx_v_item_prefix,__pyx_v_flat_key); if (__pyx_1 == (-1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 392; goto __pyx_L1;}
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_chk_map_pyx.pyx":393 */
+ __pyx_v_cur = (__pyx_v_next_line + 1);
+ }
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_chk_map_pyx.pyx":394 */
+ #ifndef PYREX_WITHOUT_ASSERTIONS
+ __pyx_5 = PyObject_Length(__pyx_v_items); if (__pyx_5 == -1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 394; goto __pyx_L1;}
+ if (!(__pyx_5 > 0)) {
+ PyErr_SetNone(PyExc_AssertionError);
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 394; goto __pyx_L1;}
+ }
+ #endif
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_chk_map_pyx.pyx":395 */
+ if (PyObject_SetAttr(__pyx_v_result, __pyx_n__items, __pyx_v_items) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 395; goto __pyx_L1;}
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_chk_map_pyx.pyx":396 */
+ __pyx_3 = PyInt_FromLong(__pyx_v_length); if (!__pyx_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 396; goto __pyx_L1;}
+ if (PyObject_SetAttr(__pyx_v_result, __pyx_n__len, __pyx_3) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 396; goto __pyx_L1;}
+ Py_DECREF(__pyx_3); __pyx_3 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_chk_map_pyx.pyx":397 */
+ __pyx_4 = PyInt_FromLong(__pyx_v_maximum_size); if (!__pyx_4) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 397; goto __pyx_L1;}
+ if (PyObject_SetAttr(__pyx_v_result, __pyx_n__maximum_size, __pyx_4) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 397; goto __pyx_L1;}
+ Py_DECREF(__pyx_4); __pyx_4 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_chk_map_pyx.pyx":398 */
+ if (PyObject_SetAttr(__pyx_v_result, __pyx_n__key, __pyx_v_key) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 398; goto __pyx_L1;}
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_chk_map_pyx.pyx":399 */
+ __pyx_2 = PyInt_FromLong(__pyx_v_width); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 399; goto __pyx_L1;}
+ if (PyObject_SetAttr(__pyx_v_result, __pyx_n__key_width, __pyx_2) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 399; goto __pyx_L1;}
+ Py_DECREF(__pyx_2); __pyx_2 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_chk_map_pyx.pyx":402 */
+ if (PyObject_SetAttr(__pyx_v_result, __pyx_n__raw_size, Py_None) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 402; goto __pyx_L1;}
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_chk_map_pyx.pyx":403 */
+ __pyx_5 = PyObject_Length(__pyx_v_item_prefix); if (__pyx_5 == -1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 403; goto __pyx_L1;}
+ __pyx_3 = PyInt_FromSsize_t(__pyx_5); if (!__pyx_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 403; goto __pyx_L1;}
+ if (PyObject_SetAttr(__pyx_v_result, __pyx_n__node_width, __pyx_3) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 403; goto __pyx_L1;}
+ Py_DECREF(__pyx_3); __pyx_3 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_chk_map_pyx.pyx":404 */
+ __pyx_4 = PyString_FromStringAndSize(__pyx_v_prefix,__pyx_v_prefix_length); if (!__pyx_4) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 404; goto __pyx_L1;}
+ if (PyObject_SetAttr(__pyx_v_result, __pyx_n__search_prefix, __pyx_4) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 404; goto __pyx_L1;}
+ Py_DECREF(__pyx_4); __pyx_4 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_chk_map_pyx.pyx":405 */
+ Py_INCREF(__pyx_v_result);
+ __pyx_r = __pyx_v_result;
+ goto __pyx_L0;
+
+ __pyx_r = Py_None; Py_INCREF(Py_None);
+ goto __pyx_L0;
+ __pyx_L1:;
+ Py_XDECREF(__pyx_2);
+ Py_XDECREF(__pyx_3);
+ Py_XDECREF(__pyx_4);
+ __Pyx_AddTraceback("bzrlib._chk_map_pyx._deserialise_internal_node");
+ __pyx_r = 0;
+ __pyx_L0:;
+ Py_DECREF(__pyx_v_result);
+ Py_DECREF(__pyx_v_items);
+ Py_DECREF(__pyx_v_item_prefix);
+ Py_DECREF(__pyx_v_flat_key);
+ Py_DECREF(__pyx_v_bytes);
+ Py_DECREF(__pyx_v_key);
+ Py_DECREF(__pyx_v_search_key_func);
+ return __pyx_r;
+}
+
+static PyObject *__pyx_f_6bzrlib_12_chk_map_pyx__bytes_to_text_key(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
+static char __pyx_doc_6bzrlib_12_chk_map_pyx__bytes_to_text_key[] = "Take a CHKInventory value string and return a (file_id, rev_id) tuple";
+static PyObject *__pyx_f_6bzrlib_12_chk_map_pyx__bytes_to_text_key(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
+ PyObject *__pyx_v_bytes = 0;
+ StaticTuple *__pyx_v_key;
+ char *__pyx_v_byte_str;
+ char *__pyx_v_cur_end;
+ char *__pyx_v_file_id_str;
+ char *__pyx_v_byte_end;
+ char *__pyx_v_revision_str;
+ Py_ssize_t __pyx_v_byte_size;
+ PyObject *__pyx_v_file_id;
+ PyObject *__pyx_v_revision;
+ PyObject *__pyx_r;
+ int __pyx_1;
+ PyObject *__pyx_2 = 0;
+ PyObject *__pyx_3 = 0;
+ static char *__pyx_argnames[] = {"bytes",0};
+ if (!PyArg_ParseTupleAndKeywords(__pyx_args, __pyx_kwds, "O", __pyx_argnames, &__pyx_v_bytes)) return 0;
+ Py_INCREF(__pyx_v_bytes);
+ __pyx_v_key = ((StaticTuple *)Py_None); Py_INCREF(Py_None);
+ __pyx_v_file_id = Py_None; Py_INCREF(Py_None);
+ __pyx_v_revision = Py_None; Py_INCREF(Py_None);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_chk_map_pyx.pyx":415 */
+ __pyx_1 = (!PyString_CheckExact(__pyx_v_bytes));
+ if (__pyx_1) {
+ __pyx_2 = PyTuple_New(1); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 416; goto __pyx_L1;}
+ Py_INCREF(__pyx_v_bytes);
+ PyTuple_SET_ITEM(__pyx_2, 0, __pyx_v_bytes);
+ __pyx_3 = PyObject_CallObject(((PyObject *)(&PyType_Type)), __pyx_2); if (!__pyx_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 416; goto __pyx_L1;}
+ Py_DECREF(__pyx_2); __pyx_2 = 0;
+ __pyx_2 = PyTuple_New(1); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 416; goto __pyx_L1;}
+ PyTuple_SET_ITEM(__pyx_2, 0, __pyx_3);
+ __pyx_3 = 0;
+ __pyx_3 = PyNumber_Remainder(__pyx_k45p, __pyx_2); if (!__pyx_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 416; goto __pyx_L1;}
+ Py_DECREF(__pyx_2); __pyx_2 = 0;
+ __pyx_2 = PyTuple_New(1); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 416; goto __pyx_L1;}
+ PyTuple_SET_ITEM(__pyx_2, 0, __pyx_3);
+ __pyx_3 = 0;
+ __pyx_3 = PyObject_CallObject(PyExc_TypeError, __pyx_2); if (!__pyx_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 416; goto __pyx_L1;}
+ Py_DECREF(__pyx_2); __pyx_2 = 0;
+ __Pyx_Raise(__pyx_3, 0, 0);
+ Py_DECREF(__pyx_3); __pyx_3 = 0;
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 416; goto __pyx_L1;}
+ goto __pyx_L2;
+ }
+ __pyx_L2:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_chk_map_pyx.pyx":417 */
+ __pyx_v_byte_str = PyString_AS_STRING(__pyx_v_bytes);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_chk_map_pyx.pyx":418 */
+ __pyx_v_byte_size = PyString_GET_SIZE(__pyx_v_bytes);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_chk_map_pyx.pyx":419 */
+ __pyx_v_byte_end = (__pyx_v_byte_str + __pyx_v_byte_size);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_chk_map_pyx.pyx":420 */
+ __pyx_v_cur_end = ((char *)memchr(__pyx_v_byte_str,':',__pyx_v_byte_size));
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_chk_map_pyx.pyx":421 */
+ __pyx_1 = (__pyx_v_cur_end == NULL);
+ if (__pyx_1) {
+ __pyx_2 = PyTuple_New(1); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 422; goto __pyx_L1;}
+ Py_INCREF(__pyx_k46p);
+ PyTuple_SET_ITEM(__pyx_2, 0, __pyx_k46p);
+ __pyx_3 = PyObject_CallObject(PyExc_ValueError, __pyx_2); if (!__pyx_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 422; goto __pyx_L1;}
+ Py_DECREF(__pyx_2); __pyx_2 = 0;
+ __Pyx_Raise(__pyx_3, 0, 0);
+ Py_DECREF(__pyx_3); __pyx_3 = 0;
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 422; goto __pyx_L1;}
+ goto __pyx_L3;
+ }
+ __pyx_L3:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_chk_map_pyx.pyx":423 */
+ __pyx_1 = ((__pyx_v_cur_end[1]) != ' ');
+ if (__pyx_1) {
+ __pyx_2 = PyString_FromString(__pyx_v_cur_end); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 425; goto __pyx_L1;}
+ __pyx_3 = PySequence_GetSlice(__pyx_2, 0, 2); if (!__pyx_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 425; goto __pyx_L1;}
+ Py_DECREF(__pyx_2); __pyx_2 = 0;
+ __pyx_2 = PyTuple_New(1); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 425; goto __pyx_L1;}
+ PyTuple_SET_ITEM(__pyx_2, 0, __pyx_3);
+ __pyx_3 = 0;
+ __pyx_3 = PyObject_CallObject(((PyObject *)(&PyString_Type)), __pyx_2); if (!__pyx_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 425; goto __pyx_L1;}
+ Py_DECREF(__pyx_2); __pyx_2 = 0;
+ __pyx_2 = PyNumber_Remainder(__pyx_k47p, __pyx_3); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 425; goto __pyx_L1;}
+ Py_DECREF(__pyx_3); __pyx_3 = 0;
+ __pyx_3 = PyTuple_New(1); if (!__pyx_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 424; goto __pyx_L1;}
+ PyTuple_SET_ITEM(__pyx_3, 0, __pyx_2);
+ __pyx_2 = 0;
+ __pyx_2 = PyObject_CallObject(PyExc_ValueError, __pyx_3); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 424; goto __pyx_L1;}
+ Py_DECREF(__pyx_3); __pyx_3 = 0;
+ __Pyx_Raise(__pyx_2, 0, 0);
+ Py_DECREF(__pyx_2); __pyx_2 = 0;
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 424; goto __pyx_L1;}
+ goto __pyx_L4;
+ }
+ __pyx_L4:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_chk_map_pyx.pyx":426 */
+ __pyx_v_file_id_str = (__pyx_v_cur_end + 2);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_chk_map_pyx.pyx":428 */
+ __pyx_v_cur_end = ((char *)memchr(__pyx_v_file_id_str,'\n',(__pyx_v_byte_end - __pyx_v_file_id_str)));
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_chk_map_pyx.pyx":429 */
+ __pyx_1 = (__pyx_v_cur_end == NULL);
+ if (__pyx_1) {
+ __pyx_3 = PyTuple_New(1); if (!__pyx_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 430; goto __pyx_L1;}
+ Py_INCREF(__pyx_k48p);
+ PyTuple_SET_ITEM(__pyx_3, 0, __pyx_k48p);
+ __pyx_2 = PyObject_CallObject(PyExc_ValueError, __pyx_3); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 430; goto __pyx_L1;}
+ Py_DECREF(__pyx_3); __pyx_3 = 0;
+ __Pyx_Raise(__pyx_2, 0, 0);
+ Py_DECREF(__pyx_2); __pyx_2 = 0;
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 430; goto __pyx_L1;}
+ goto __pyx_L5;
+ }
+ __pyx_L5:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_chk_map_pyx.pyx":431 */
+ __pyx_3 = __pyx_f_6bzrlib_12_chk_map_pyx_safe_interned_string_from_size(__pyx_v_file_id_str,(__pyx_v_cur_end - __pyx_v_file_id_str)); if (!__pyx_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 431; goto __pyx_L1;}
+ Py_DECREF(__pyx_v_file_id);
+ __pyx_v_file_id = __pyx_3;
+ __pyx_3 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_chk_map_pyx.pyx":434 */
+ __pyx_v_cur_end = ((char *)memchr((__pyx_v_cur_end + 1),'\n',((__pyx_v_byte_end - __pyx_v_cur_end) - 1)));
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_chk_map_pyx.pyx":435 */
+ __pyx_1 = (__pyx_v_cur_end == NULL);
+ if (__pyx_1) {
+ __pyx_2 = PyTuple_New(1); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 436; goto __pyx_L1;}
+ Py_INCREF(__pyx_k49p);
+ PyTuple_SET_ITEM(__pyx_2, 0, __pyx_k49p);
+ __pyx_3 = PyObject_CallObject(PyExc_ValueError, __pyx_2); if (!__pyx_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 436; goto __pyx_L1;}
+ Py_DECREF(__pyx_2); __pyx_2 = 0;
+ __Pyx_Raise(__pyx_3, 0, 0);
+ Py_DECREF(__pyx_3); __pyx_3 = 0;
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 436; goto __pyx_L1;}
+ goto __pyx_L6;
+ }
+ __pyx_L6:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_chk_map_pyx.pyx":438 */
+ __pyx_v_cur_end = ((char *)memchr((__pyx_v_cur_end + 1),'\n',((__pyx_v_byte_end - __pyx_v_cur_end) - 1)));
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_chk_map_pyx.pyx":439 */
+ __pyx_1 = (__pyx_v_cur_end == NULL);
+ if (__pyx_1) {
+ __pyx_2 = PyTuple_New(1); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 440; goto __pyx_L1;}
+ Py_INCREF(__pyx_k50p);
+ PyTuple_SET_ITEM(__pyx_2, 0, __pyx_k50p);
+ __pyx_3 = PyObject_CallObject(PyExc_ValueError, __pyx_2); if (!__pyx_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 440; goto __pyx_L1;}
+ Py_DECREF(__pyx_2); __pyx_2 = 0;
+ __Pyx_Raise(__pyx_3, 0, 0);
+ Py_DECREF(__pyx_3); __pyx_3 = 0;
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 440; goto __pyx_L1;}
+ goto __pyx_L7;
+ }
+ __pyx_L7:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_chk_map_pyx.pyx":442 */
+ __pyx_v_revision_str = (__pyx_v_cur_end + 1);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_chk_map_pyx.pyx":443 */
+ __pyx_v_cur_end = ((char *)memchr((__pyx_v_cur_end + 1),'\n',((__pyx_v_byte_end - __pyx_v_cur_end) - 1)));
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_chk_map_pyx.pyx":444 */
+ __pyx_1 = (__pyx_v_cur_end == NULL);
+ if (__pyx_1) {
+ __pyx_v_cur_end = __pyx_v_byte_end;
+ goto __pyx_L8;
+ }
+ __pyx_L8:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_chk_map_pyx.pyx":447 */
+ __pyx_2 = __pyx_f_6bzrlib_12_chk_map_pyx_safe_interned_string_from_size(__pyx_v_revision_str,(__pyx_v_cur_end - __pyx_v_revision_str)); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 447; goto __pyx_L1;}
+ Py_DECREF(__pyx_v_revision);
+ __pyx_v_revision = __pyx_2;
+ __pyx_2 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_chk_map_pyx.pyx":449 */
+ __pyx_3 = ((PyObject *)StaticTuple_New(2)); if (!__pyx_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 449; goto __pyx_L1;}
+ Py_DECREF(((PyObject *)__pyx_v_key));
+ __pyx_v_key = ((StaticTuple *)__pyx_3);
+ __pyx_3 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_chk_map_pyx.pyx":450 */
+ Py_INCREF(__pyx_v_file_id);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_chk_map_pyx.pyx":451 */
+ StaticTuple_SET_ITEM(__pyx_v_key,0,__pyx_v_file_id);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_chk_map_pyx.pyx":452 */
+ Py_INCREF(__pyx_v_revision);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_chk_map_pyx.pyx":453 */
+ StaticTuple_SET_ITEM(__pyx_v_key,1,__pyx_v_revision);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_chk_map_pyx.pyx":454 */
+ __pyx_2 = ((PyObject *)StaticTuple_Intern(__pyx_v_key)); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 454; goto __pyx_L1;}
+ __pyx_r = __pyx_2;
+ __pyx_2 = 0;
+ goto __pyx_L0;
+
+ __pyx_r = Py_None; Py_INCREF(Py_None);
+ goto __pyx_L0;
+ __pyx_L1:;
+ Py_XDECREF(__pyx_2);
+ Py_XDECREF(__pyx_3);
+ __Pyx_AddTraceback("bzrlib._chk_map_pyx._bytes_to_text_key");
+ __pyx_r = 0;
+ __pyx_L0:;
+ Py_DECREF(__pyx_v_key);
+ Py_DECREF(__pyx_v_file_id);
+ Py_DECREF(__pyx_v_revision);
+ Py_DECREF(__pyx_v_bytes);
+ return __pyx_r;
+}
+
+static struct PyMethodDef __pyx_methods[] = {
+ {"_search_key_16", (PyCFunction)__pyx_f_6bzrlib_12_chk_map_pyx__search_key_16, METH_VARARGS|METH_KEYWORDS, __pyx_doc_6bzrlib_12_chk_map_pyx__search_key_16},
+ {"_search_key_255", (PyCFunction)__pyx_f_6bzrlib_12_chk_map_pyx__search_key_255, METH_VARARGS|METH_KEYWORDS, __pyx_doc_6bzrlib_12_chk_map_pyx__search_key_255},
+ {"_deserialise_leaf_node", (PyCFunction)__pyx_f_6bzrlib_12_chk_map_pyx__deserialise_leaf_node, METH_VARARGS|METH_KEYWORDS, __pyx_doc_6bzrlib_12_chk_map_pyx__deserialise_leaf_node},
+ {"_deserialise_internal_node", (PyCFunction)__pyx_f_6bzrlib_12_chk_map_pyx__deserialise_internal_node, METH_VARARGS|METH_KEYWORDS, 0},
+ {"_bytes_to_text_key", (PyCFunction)__pyx_f_6bzrlib_12_chk_map_pyx__bytes_to_text_key, METH_VARARGS|METH_KEYWORDS, __pyx_doc_6bzrlib_12_chk_map_pyx__bytes_to_text_key},
+ {0, 0, 0, 0}
+};
+
+static void __pyx_init_filenames(void); /*proto*/
+
+PyMODINIT_FUNC init_chk_map_pyx(void); /*proto*/
+PyMODINIT_FUNC init_chk_map_pyx(void) {
+ PyObject *__pyx_1 = 0;
+ PyObject *__pyx_2 = 0;
+ int __pyx_3;
+ __pyx_init_filenames();
+ __pyx_m = Py_InitModule4("_chk_map_pyx", __pyx_methods, 0, 0, PYTHON_API_VERSION);
+ if (!__pyx_m) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 19; goto __pyx_L1;};
+ Py_INCREF(__pyx_m);
+ __pyx_b = PyImport_AddModule("__builtin__");
+ if (!__pyx_b) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 19; goto __pyx_L1;};
+ if (PyObject_SetAttrString(__pyx_m, "__builtins__", __pyx_b) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 19; goto __pyx_L1;};
+ if (__Pyx_InitStrings(__pyx_string_tab) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 19; goto __pyx_L1;};
+ __pyx_v_6bzrlib_12_chk_map_pyx_crc32 = Py_None; Py_INCREF(Py_None);
+ __pyx_v_6bzrlib_12_chk_map_pyx__LeafNode = Py_None; Py_INCREF(Py_None);
+ __pyx_v_6bzrlib_12_chk_map_pyx__InternalNode = Py_None; Py_INCREF(Py_None);
+ __pyx_v_6bzrlib_12_chk_map_pyx__unknown = Py_None; Py_INCREF(Py_None);
+ __pyx_ptype_6bzrlib_15_static_tuple_c_StaticTuple = __Pyx_ImportType("bzrlib._static_tuple_c", "StaticTuple", sizeof(StaticTuple)); if (!__pyx_ptype_6bzrlib_15_static_tuple_c_StaticTuple) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 26; goto __pyx_L1;}
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_chk_map_pyx.pyx":57 */
+ __pyx_1 = PyList_New(1); if (!__pyx_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 57; goto __pyx_L1;}
+ Py_INCREF(__pyx_n_crc32);
+ PyList_SET_ITEM(__pyx_1, 0, __pyx_n_crc32);
+ __pyx_2 = __Pyx_Import(__pyx_n_zlib, __pyx_1); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 57; goto __pyx_L1;}
+ Py_DECREF(__pyx_1); __pyx_1 = 0;
+ __pyx_1 = PyObject_GetAttr(__pyx_2, __pyx_n_crc32); if (!__pyx_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 57; goto __pyx_L1;}
+ Py_DECREF(__pyx_v_6bzrlib_12_chk_map_pyx_crc32);
+ __pyx_v_6bzrlib_12_chk_map_pyx_crc32 = __pyx_1;
+ __pyx_1 = 0;
+ Py_DECREF(__pyx_2); __pyx_2 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_chk_map_pyx.pyx":61 */
+ __pyx_3 = import_static_tuple_c(); if (__pyx_3 == (-1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 61; goto __pyx_L1;}
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_chk_map_pyx.pyx":64 */
+ Py_INCREF(Py_None);
+ Py_DECREF(__pyx_v_6bzrlib_12_chk_map_pyx__LeafNode);
+ __pyx_v_6bzrlib_12_chk_map_pyx__LeafNode = Py_None;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_chk_map_pyx.pyx":66 */
+ Py_INCREF(Py_None);
+ Py_DECREF(__pyx_v_6bzrlib_12_chk_map_pyx__InternalNode);
+ __pyx_v_6bzrlib_12_chk_map_pyx__InternalNode = Py_None;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_chk_map_pyx.pyx":68 */
+ Py_INCREF(Py_None);
+ Py_DECREF(__pyx_v_6bzrlib_12_chk_map_pyx__unknown);
+ __pyx_v_6bzrlib_12_chk_map_pyx__unknown = Py_None;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_chk_map_pyx.pyx":188 */
+ Py_INCREF(Py_None);
+ __pyx_d1 = Py_None;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_chk_map_pyx.pyx":337 */
+ Py_INCREF(Py_None);
+ __pyx_d2 = Py_None;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_chk_map_pyx.pyx":408 */
+ return;
+ __pyx_L1:;
+ Py_XDECREF(__pyx_1);
+ Py_XDECREF(__pyx_2);
+ __Pyx_AddTraceback("bzrlib._chk_map_pyx");
+}
+
+static char *__pyx_filenames[] = {
+ "_chk_map_pyx.pyx",
+ "_static_tuple_c.pxd",
+};
+
+/* Runtime support code */
+
+static void __pyx_init_filenames(void) {
+ __pyx_f = __pyx_filenames;
+}
+
+static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb) {
+ Py_XINCREF(type);
+ Py_XINCREF(value);
+ Py_XINCREF(tb);
+ /* First, check the traceback argument, replacing None with NULL. */
+ if (tb == Py_None) {
+ Py_DECREF(tb);
+ tb = 0;
+ }
+ else if (tb != NULL && !PyTraceBack_Check(tb)) {
+ PyErr_SetString(PyExc_TypeError,
+ "raise: arg 3 must be a traceback or None");
+ goto raise_error;
+ }
+ /* Next, replace a missing value with None */
+ if (value == NULL) {
+ value = Py_None;
+ Py_INCREF(value);
+ }
+ #if PY_VERSION_HEX < 0x02050000
+ if (!PyClass_Check(type))
+ #else
+ if (!PyType_Check(type))
+ #endif
+ {
+ /* Raising an instance. The value should be a dummy. */
+ if (value != Py_None) {
+ PyErr_SetString(PyExc_TypeError,
+ "instance exception may not have a separate value");
+ goto raise_error;
+ }
+ /* Normalize to raise <class>, <instance> */
+ Py_DECREF(value);
+ value = type;
+ #if PY_VERSION_HEX < 0x02050000
+ if (PyInstance_Check(type)) {
+ type = (PyObject*) ((PyInstanceObject*)type)->in_class;
+ Py_INCREF(type);
+ }
+ else {
+ PyErr_SetString(PyExc_TypeError,
+ "raise: exception must be an old-style class or instance");
+ goto raise_error;
+ }
+ #else
+ type = (PyObject*) type->ob_type;
+ Py_INCREF(type);
+ if (!PyType_IsSubtype((PyTypeObject *)type, (PyTypeObject *)PyExc_BaseException)) {
+ PyErr_SetString(PyExc_TypeError,
+ "raise: exception class must be a subclass of BaseException");
+ goto raise_error;
+ }
+ #endif
+ }
+ PyErr_Restore(type, value, tb);
+ return;
+raise_error:
+ Py_XDECREF(value);
+ Py_XDECREF(type);
+ Py_XDECREF(tb);
+ return;
+}
+
+static PyObject *__Pyx_GetItemInt(PyObject *o, Py_ssize_t i) {
+ PyTypeObject *t = o->ob_type;
+ PyObject *r;
+ if (t->tp_as_sequence && t->tp_as_sequence->sq_item)
+ r = PySequence_GetItem(o, i);
+ else {
+ PyObject *j = PyInt_FromLong(i);
+ if (!j)
+ return 0;
+ r = PyObject_GetItem(o, j);
+ Py_DECREF(j);
+ }
+ return r;
+}
+
+static PyObject *__Pyx_Import(PyObject *name, PyObject *from_list) {
+ PyObject *__import__ = 0;
+ PyObject *empty_list = 0;
+ PyObject *module = 0;
+ PyObject *global_dict = 0;
+ PyObject *empty_dict = 0;
+ PyObject *list;
+ __import__ = PyObject_GetAttrString(__pyx_b, "__import__");
+ if (!__import__)
+ goto bad;
+ if (from_list)
+ list = from_list;
+ else {
+ empty_list = PyList_New(0);
+ if (!empty_list)
+ goto bad;
+ list = empty_list;
+ }
+ global_dict = PyModule_GetDict(__pyx_m);
+ if (!global_dict)
+ goto bad;
+ empty_dict = PyDict_New();
+ if (!empty_dict)
+ goto bad;
+ module = PyObject_CallFunction(__import__, "OOOO",
+ name, global_dict, empty_dict, list);
+bad:
+ Py_XDECREF(empty_list);
+ Py_XDECREF(__import__);
+ Py_XDECREF(empty_dict);
+ return module;
+}
+
+static int __Pyx_InitStrings(__Pyx_StringTabEntry *t) {
+ while (t->p) {
+ *t->p = PyString_FromStringAndSize(t->s, t->n - 1);
+ if (!*t->p)
+ return -1;
+ if (t->i)
+ PyString_InternInPlace(t->p);
+ ++t;
+ }
+ return 0;
+}
+
+#ifndef __PYX_HAVE_RT_ImportType
+#define __PYX_HAVE_RT_ImportType
+static PyTypeObject *__Pyx_ImportType(char *module_name, char *class_name,
+ long size)
+{
+ PyObject *py_module = 0;
+ PyObject *result = 0;
+
+ py_module = __Pyx_ImportModule(module_name);
+ if (!py_module)
+ goto bad;
+ result = PyObject_GetAttrString(py_module, class_name);
+ if (!result)
+ goto bad;
+ if (!PyType_Check(result)) {
+ PyErr_Format(PyExc_TypeError,
+ "%s.%s is not a type object",
+ module_name, class_name);
+ goto bad;
+ }
+ if (((PyTypeObject *)result)->tp_basicsize != size) {
+ PyErr_Format(PyExc_ValueError,
+ "%s.%s does not appear to be the correct type object",
+ module_name, class_name);
+ goto bad;
+ }
+ return (PyTypeObject *)result;
+bad:
+ Py_XDECREF(result);
+ return 0;
+}
+#endif
+
+#ifndef __PYX_HAVE_RT_ImportModule
+#define __PYX_HAVE_RT_ImportModule
+static PyObject *__Pyx_ImportModule(char *name) {
+ PyObject *py_name = 0;
+
+ py_name = PyString_FromString(name);
+ if (!py_name)
+ goto bad;
+ return PyImport_Import(py_name);
+bad:
+ Py_XDECREF(py_name);
+ return 0;
+}
+#endif
+
+#include "compile.h"
+#include "frameobject.h"
+#include "traceback.h"
+
+static void __Pyx_AddTraceback(char *funcname) {
+ PyObject *py_srcfile = 0;
+ PyObject *py_funcname = 0;
+ PyObject *py_globals = 0;
+ PyObject *empty_tuple = 0;
+ PyObject *empty_string = 0;
+ PyCodeObject *py_code = 0;
+ PyFrameObject *py_frame = 0;
+
+ py_srcfile = PyString_FromString(__pyx_filename);
+ if (!py_srcfile) goto bad;
+ py_funcname = PyString_FromString(funcname);
+ if (!py_funcname) goto bad;
+ py_globals = PyModule_GetDict(__pyx_m);
+ if (!py_globals) goto bad;
+ empty_tuple = PyTuple_New(0);
+ if (!empty_tuple) goto bad;
+ empty_string = PyString_FromString("");
+ if (!empty_string) goto bad;
+ py_code = PyCode_New(
+ 0, /*int argcount,*/
+ 0, /*int nlocals,*/
+ 0, /*int stacksize,*/
+ 0, /*int flags,*/
+ empty_string, /*PyObject *code,*/
+ empty_tuple, /*PyObject *consts,*/
+ empty_tuple, /*PyObject *names,*/
+ empty_tuple, /*PyObject *varnames,*/
+ empty_tuple, /*PyObject *freevars,*/
+ empty_tuple, /*PyObject *cellvars,*/
+ py_srcfile, /*PyObject *filename,*/
+ py_funcname, /*PyObject *name,*/
+ __pyx_lineno, /*int firstlineno,*/
+ empty_string /*PyObject *lnotab*/
+ );
+ if (!py_code) goto bad;
+ py_frame = PyFrame_New(
+ PyThreadState_Get(), /*PyThreadState *tstate,*/
+ py_code, /*PyCodeObject *code,*/
+ py_globals, /*PyObject *globals,*/
+ 0 /*PyObject *locals*/
+ );
+ if (!py_frame) goto bad;
+ py_frame->f_lineno = __pyx_lineno;
+ PyTraceBack_Here(py_frame);
+bad:
+ Py_XDECREF(py_srcfile);
+ Py_XDECREF(py_funcname);
+ Py_XDECREF(empty_tuple);
+ Py_XDECREF(empty_string);
+ Py_XDECREF(py_code);
+ Py_XDECREF(py_frame);
+}
diff --git a/bzrlib/_chk_map_pyx.pyx b/bzrlib/_chk_map_pyx.pyx
new file mode 100644
index 0000000..2c1d9a2
--- /dev/null
+++ b/bzrlib/_chk_map_pyx.pyx
@@ -0,0 +1,454 @@
+# Copyright (C) 2009, 2010 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+
+#python2.4 support
+cdef extern from "python-compat.h":
+ pass
+
+cdef extern from *:
+ ctypedef unsigned int size_t
+ int memcmp(void *, void*, size_t)
+ void memcpy(void *, void*, size_t)
+ void *memchr(void *s, int c, size_t len)
+ long strtol(char *, char **, int)
+ void sprintf(char *, char *, ...)
+
+cdef extern from "Python.h":
+ ctypedef int Py_ssize_t # Required for older pyrex versions
+ ctypedef struct PyObject:
+ pass
+ int PyTuple_CheckExact(object p)
+ Py_ssize_t PyTuple_GET_SIZE(object t)
+ int PyString_CheckExact(object)
+ char *PyString_AS_STRING(object s)
+ PyObject *PyString_FromStringAndSize_ptr "PyString_FromStringAndSize" (char *, Py_ssize_t)
+ Py_ssize_t PyString_GET_SIZE(object)
+ void PyString_InternInPlace(PyObject **)
+ long PyInt_AS_LONG(object)
+
+ int PyDict_SetItem(object d, object k, object v) except -1
+
+ void Py_INCREF(object)
+ void Py_DECREF_ptr "Py_DECREF" (PyObject *)
+
+ object PyString_FromStringAndSize(char*, Py_ssize_t)
+
+# cimport all of the definitions we will need to access
+from _static_tuple_c cimport StaticTuple,\
+ import_static_tuple_c, StaticTuple_New, \
+ StaticTuple_Intern, StaticTuple_SET_ITEM, StaticTuple_CheckExact, \
+ StaticTuple_GET_SIZE
+
+cdef object crc32
+from zlib import crc32
+
+
+# Set up the StaticTuple C_API functionality
+import_static_tuple_c()
+
+cdef object _LeafNode
+_LeafNode = None
+cdef object _InternalNode
+_InternalNode = None
+cdef object _unknown
+_unknown = None
+
+# We shouldn't just copy this from _dirstate_helpers_pyx
+cdef void* _my_memrchr(void *s, int c, size_t n): # cannot_raise
+ # memrchr seems to be a GNU extension, so we have to implement it ourselves
+ cdef char *pos
+ cdef char *start
+
+ start = <char*>s
+ pos = start + n - 1
+ while pos >= start:
+ if pos[0] == c:
+ return <void*>pos
+ pos = pos - 1
+ return NULL
+
+
+cdef object safe_interned_string_from_size(char *s, Py_ssize_t size):
+ cdef PyObject *py_str
+ if size < 0:
+ raise AssertionError(
+ 'tried to create a string with an invalid size: %d @0x%x'
+ % (size, <int>s))
+ py_str = PyString_FromStringAndSize_ptr(s, size)
+ PyString_InternInPlace(&py_str)
+ result = <object>py_str
+ # Casting a PyObject* to an <object> triggers an INCREF from Pyrex, so we
+ # DECREF it to avoid geting immortal strings
+ Py_DECREF_ptr(py_str)
+ return result
+
+
+def _search_key_16(key):
+ """See chk_map._search_key_16."""
+ cdef Py_ssize_t num_bits
+ cdef Py_ssize_t i, j
+ cdef Py_ssize_t num_out_bytes
+ cdef unsigned long crc_val
+ cdef Py_ssize_t out_off
+ cdef char *c_out
+
+ num_bits = len(key)
+ # 4 bytes per crc32, and another 1 byte between bits
+ num_out_bytes = (9 * num_bits) - 1
+ out = PyString_FromStringAndSize(NULL, num_out_bytes)
+ c_out = PyString_AS_STRING(out)
+ for i from 0 <= i < num_bits:
+ if i > 0:
+ c_out[0] = c'\x00'
+ c_out = c_out + 1
+ crc_val = PyInt_AS_LONG(crc32(key[i]))
+ # Hex(val) order
+ sprintf(c_out, '%08X', crc_val)
+ c_out = c_out + 8
+ return out
+
+
+def _search_key_255(key):
+ """See chk_map._search_key_255."""
+ cdef Py_ssize_t num_bits
+ cdef Py_ssize_t i, j
+ cdef Py_ssize_t num_out_bytes
+ cdef unsigned long crc_val
+ cdef Py_ssize_t out_off
+ cdef char *c_out
+
+ num_bits = len(key)
+ # 4 bytes per crc32, and another 1 byte between bits
+ num_out_bytes = (5 * num_bits) - 1
+ out = PyString_FromStringAndSize(NULL, num_out_bytes)
+ c_out = PyString_AS_STRING(out)
+ for i from 0 <= i < num_bits:
+ if i > 0:
+ c_out[0] = c'\x00'
+ c_out = c_out + 1
+ crc_val = PyInt_AS_LONG(crc32(key[i]))
+ # MSB order
+ c_out[0] = (crc_val >> 24) & 0xFF
+ c_out[1] = (crc_val >> 16) & 0xFF
+ c_out[2] = (crc_val >> 8) & 0xFF
+ c_out[3] = (crc_val >> 0) & 0xFF
+ for j from 0 <= j < 4:
+ if c_out[j] == c'\n':
+ c_out[j] = c'_'
+ c_out = c_out + 4
+ return out
+
+
+cdef int _get_int_from_line(char **cur, char *end, char *message) except -1:
+ """Read a positive integer from the data stream.
+
+ :param cur: The start of the data, this will be moved to after the
+ trailing newline when done.
+ :param end: Do not parse any data past this byte.
+ :return: The integer stored in those bytes
+ """
+ cdef int value
+ cdef char *next_line, *next
+
+ next_line = <char *>memchr(cur[0], c'\n', end - cur[0])
+ if next_line == NULL:
+ raise ValueError("Missing %s line\n" % message)
+
+ value = strtol(cur[0], &next, 10)
+ if next != next_line:
+ raise ValueError("%s line not a proper int\n" % message)
+ cur[0] = next_line + 1
+ return value
+
+
+cdef _import_globals():
+ """Set the global attributes. Done lazy to avoid recursive import loops."""
+ global _LeafNode, _InternalNode, _unknown
+
+ from bzrlib import chk_map
+ _LeafNode = chk_map.LeafNode
+ _InternalNode = chk_map.InternalNode
+ _unknown = chk_map._unknown
+
+
+def _deserialise_leaf_node(bytes, key, search_key_func=None):
+ """Deserialise bytes, with key key, into a LeafNode.
+
+ :param bytes: The bytes of the node.
+ :param key: The key that the serialised node has.
+ """
+ cdef char *c_bytes, *cur, *next, *end
+ cdef char *next_line
+ cdef Py_ssize_t c_bytes_len, prefix_length, items_length
+ cdef int maximum_size, width, length, i, prefix_tail_len
+ cdef int num_value_lines, num_prefix_bits
+ cdef char *prefix, *value_start, *prefix_tail
+ cdef char *next_null, *last_null, *line_start
+ cdef char *c_entry, *entry_start
+ cdef StaticTuple entry_bits
+
+ if _LeafNode is None:
+ _import_globals()
+
+ result = _LeafNode(search_key_func=search_key_func)
+ # Splitlines can split on '\r' so don't use it, split('\n') adds an
+ # extra '' if the bytes ends in a final newline.
+ if not PyString_CheckExact(bytes):
+ raise TypeError('bytes must be a plain string not %s' % (type(bytes),))
+
+ c_bytes = PyString_AS_STRING(bytes)
+ c_bytes_len = PyString_GET_SIZE(bytes)
+
+ if c_bytes_len < 9 or memcmp(c_bytes, "chkleaf:\n", 9) != 0:
+ raise ValueError("not a serialised leaf node: %r" % bytes)
+ if c_bytes[c_bytes_len - 1] != c'\n':
+ raise ValueError("bytes does not end in a newline")
+
+ end = c_bytes + c_bytes_len
+ cur = c_bytes + 9
+ maximum_size = _get_int_from_line(&cur, end, "maximum_size")
+ width = _get_int_from_line(&cur, end, "width")
+ length = _get_int_from_line(&cur, end, "length")
+
+ next_line = <char *>memchr(cur, c'\n', end - cur)
+ if next_line == NULL:
+ raise ValueError('Missing the prefix line\n')
+ prefix = cur
+ prefix_length = next_line - cur
+ cur = next_line + 1
+
+ prefix_bits = []
+ prefix_tail = prefix
+ num_prefix_bits = 0
+ next_null = <char *>memchr(prefix, c'\0', prefix_length)
+ while next_null != NULL:
+ num_prefix_bits = num_prefix_bits + 1
+ prefix_bits.append(
+ PyString_FromStringAndSize(prefix_tail, next_null - prefix_tail))
+ prefix_tail = next_null + 1
+ next_null = <char *>memchr(prefix_tail, c'\0', next_line - prefix_tail)
+ prefix_tail_len = next_line - prefix_tail
+
+ if num_prefix_bits >= width:
+ raise ValueError('Prefix has too many nulls versus width')
+
+ items_length = end - cur
+ items = {}
+ while cur < end:
+ line_start = cur
+ next_line = <char *>memchr(cur, c'\n', end - cur)
+ if next_line == NULL:
+ raise ValueError('null line\n')
+ last_null = <char *>_my_memrchr(cur, c'\0', next_line - cur)
+ if last_null == NULL:
+ raise ValueError('fail to find the num value lines null')
+ next_null = last_null + 1 # move past NULL
+ num_value_lines = _get_int_from_line(&next_null, next_line + 1,
+ "num value lines")
+ cur = next_line + 1
+ value_start = cur
+ # Walk num_value_lines forward
+ for i from 0 <= i < num_value_lines:
+ next_line = <char *>memchr(cur, c'\n', end - cur)
+ if next_line == NULL:
+ raise ValueError('missing trailing newline')
+ cur = next_line + 1
+ entry_bits = StaticTuple_New(width)
+ for i from 0 <= i < num_prefix_bits:
+ # TODO: Use PyList_GetItem, or turn prefix_bits into a
+ # tuple/StaticTuple
+ entry = prefix_bits[i]
+ # SET_ITEM 'steals' a reference
+ Py_INCREF(entry)
+ StaticTuple_SET_ITEM(entry_bits, i, entry)
+ value = PyString_FromStringAndSize(value_start, next_line - value_start)
+ # The next entry bit needs the 'tail' from the prefix, and first part
+ # of the line
+ entry_start = line_start
+ next_null = <char *>memchr(entry_start, c'\0',
+ last_null - entry_start + 1)
+ if next_null == NULL:
+ raise ValueError('bad no null, bad')
+ entry = PyString_FromStringAndSize(NULL,
+ prefix_tail_len + next_null - line_start)
+ c_entry = PyString_AS_STRING(entry)
+ if prefix_tail_len > 0:
+ memcpy(c_entry, prefix_tail, prefix_tail_len)
+ if next_null - line_start > 0:
+ memcpy(c_entry + prefix_tail_len, line_start, next_null - line_start)
+ Py_INCREF(entry)
+ i = num_prefix_bits
+ StaticTuple_SET_ITEM(entry_bits, i, entry)
+ while next_null != last_null: # We have remaining bits
+ i = i + 1
+ if i > width:
+ raise ValueError("Too many bits for entry")
+ entry_start = next_null + 1
+ next_null = <char *>memchr(entry_start, c'\0',
+ last_null - entry_start + 1)
+ if next_null == NULL:
+ raise ValueError('bad no null')
+ entry = PyString_FromStringAndSize(entry_start,
+ next_null - entry_start)
+ Py_INCREF(entry)
+ StaticTuple_SET_ITEM(entry_bits, i, entry)
+ if StaticTuple_GET_SIZE(entry_bits) != width:
+ raise AssertionError(
+ 'Incorrect number of elements (%d vs %d)'
+ % (len(entry_bits)+1, width + 1))
+ entry_bits = StaticTuple_Intern(entry_bits)
+ PyDict_SetItem(items, entry_bits, value)
+ if len(items) != length:
+ raise ValueError("item count (%d) mismatch for key %s,"
+ " bytes %r" % (length, entry_bits, bytes))
+ result._items = items
+ result._len = length
+ result._maximum_size = maximum_size
+ result._key = key
+ result._key_width = width
+ result._raw_size = items_length + length * prefix_length
+ if length == 0:
+ result._search_prefix = None
+ result._common_serialised_prefix = None
+ else:
+ result._search_prefix = _unknown
+ result._common_serialised_prefix = PyString_FromStringAndSize(prefix,
+ prefix_length)
+ if c_bytes_len != result._current_size():
+ raise AssertionError('_current_size computed incorrectly %d != %d',
+ c_bytes_len, result._current_size())
+ return result
+
+
+def _deserialise_internal_node(bytes, key, search_key_func=None):
+ cdef char *c_bytes, *cur, *next, *end
+ cdef char *next_line
+ cdef Py_ssize_t c_bytes_len, prefix_length
+ cdef int maximum_size, width, length, i, prefix_tail_len
+ cdef char *prefix, *line_prefix, *next_null, *c_item_prefix
+
+ if _InternalNode is None:
+ _import_globals()
+ result = _InternalNode(search_key_func=search_key_func)
+
+ if not StaticTuple_CheckExact(key):
+ raise TypeError('key %r is not a StaticTuple' % (key,))
+ if not PyString_CheckExact(bytes):
+ raise TypeError('bytes must be a plain string not %s' % (type(bytes),))
+
+ c_bytes = PyString_AS_STRING(bytes)
+ c_bytes_len = PyString_GET_SIZE(bytes)
+
+ if c_bytes_len < 9 or memcmp(c_bytes, "chknode:\n", 9) != 0:
+ raise ValueError("not a serialised internal node: %r" % bytes)
+ if c_bytes[c_bytes_len - 1] != c'\n':
+ raise ValueError("bytes does not end in a newline")
+
+ items = {}
+ cur = c_bytes + 9
+ end = c_bytes + c_bytes_len
+ maximum_size = _get_int_from_line(&cur, end, "maximum_size")
+ width = _get_int_from_line(&cur, end, "width")
+ length = _get_int_from_line(&cur, end, "length")
+
+ next_line = <char *>memchr(cur, c'\n', end - cur)
+ if next_line == NULL:
+ raise ValueError('Missing the prefix line\n')
+ prefix = cur
+ prefix_length = next_line - cur
+ cur = next_line + 1
+
+ while cur < end:
+ # Find the null separator
+ next_line = <char *>memchr(cur, c'\n', end - cur)
+ if next_line == NULL:
+ raise ValueError('missing trailing newline')
+ next_null = <char *>_my_memrchr(cur, c'\0', next_line - cur)
+ if next_null == NULL:
+ raise ValueError('bad no null')
+ item_prefix = PyString_FromStringAndSize(NULL,
+ prefix_length + next_null - cur)
+ c_item_prefix = PyString_AS_STRING(item_prefix)
+ if prefix_length:
+ memcpy(c_item_prefix, prefix, prefix_length)
+ memcpy(c_item_prefix + prefix_length, cur, next_null - cur)
+ flat_key = PyString_FromStringAndSize(next_null + 1,
+ next_line - next_null - 1)
+ flat_key = StaticTuple(flat_key).intern()
+ PyDict_SetItem(items, item_prefix, flat_key)
+ cur = next_line + 1
+ assert len(items) > 0
+ result._items = items
+ result._len = length
+ result._maximum_size = maximum_size
+ result._key = key
+ result._key_width = width
+ # XXX: InternalNodes don't really care about their size, and this will
+ # change if we add prefix compression
+ result._raw_size = None # len(bytes)
+ result._node_width = len(item_prefix)
+ result._search_prefix = PyString_FromStringAndSize(prefix, prefix_length)
+ return result
+
+
+def _bytes_to_text_key(bytes):
+ """Take a CHKInventory value string and return a (file_id, rev_id) tuple"""
+ cdef StaticTuple key
+ cdef char *byte_str, *cur_end, *file_id_str, *byte_end
+ cdef char *revision_str
+ cdef Py_ssize_t byte_size, pos, file_id_len
+
+ if not PyString_CheckExact(bytes):
+ raise TypeError('bytes must be a string, got %r' % (type(bytes),))
+ byte_str = PyString_AS_STRING(bytes)
+ byte_size = PyString_GET_SIZE(bytes)
+ byte_end = byte_str + byte_size
+ cur_end = <char*>memchr(byte_str, c':', byte_size)
+ if cur_end == NULL:
+ raise ValueError('No kind section found.')
+ if cur_end[1] != c' ':
+ raise ValueError(
+ 'Kind section should end with ": ", got %r' % str(cur_end[:2],))
+ file_id_str = cur_end + 2
+ # file_id is now the data up until the next newline
+ cur_end = <char*>memchr(file_id_str, c'\n', byte_end - file_id_str)
+ if cur_end == NULL:
+ raise ValueError('no newline after file-id')
+ file_id = safe_interned_string_from_size(file_id_str,
+ cur_end - file_id_str)
+ # this is the end of the parent_str
+ cur_end = <char*>memchr(cur_end + 1, c'\n', byte_end - cur_end - 1)
+ if cur_end == NULL:
+ raise ValueError('no newline after parent_str')
+ # end of the name str
+ cur_end = <char*>memchr(cur_end + 1, c'\n', byte_end - cur_end - 1)
+ if cur_end == NULL:
+ raise ValueError('no newline after name str')
+ # the next section is the revision info
+ revision_str = cur_end + 1
+ cur_end = <char*>memchr(cur_end + 1, c'\n', byte_end - cur_end - 1)
+ if cur_end == NULL:
+ # This is probably a dir: entry, which has revision as the last item
+ cur_end = byte_end
+ revision = safe_interned_string_from_size(revision_str,
+ cur_end - revision_str)
+ key = StaticTuple_New(2)
+ Py_INCREF(file_id)
+ StaticTuple_SET_ITEM(key, 0, file_id)
+ Py_INCREF(revision)
+ StaticTuple_SET_ITEM(key, 1, revision)
+ return StaticTuple_Intern(key)
diff --git a/bzrlib/_chunks_to_lines_py.py b/bzrlib/_chunks_to_lines_py.py
new file mode 100644
index 0000000..4cadcc2
--- /dev/null
+++ b/bzrlib/_chunks_to_lines_py.py
@@ -0,0 +1,59 @@
+# Copyright (C) 2008 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""The python implementation of chunks_to_lines"""
+
+from __future__ import absolute_import
+
+
+def chunks_to_lines(chunks):
+ """Re-split chunks into simple lines.
+
+ Each entry in the result should contain a single newline at the end. Except
+ for the last entry which may not have a final newline. If chunks is already
+ a simple list of lines, we return it directly.
+
+ :param chunks: An list/tuple of strings. If chunks is already a list of
+ lines, then we will return it as-is.
+ :return: A list of strings.
+ """
+ # Optimize for a very common case when chunks are already lines
+ last_no_newline = False
+ for chunk in chunks:
+ if last_no_newline:
+ # Only the last chunk is allowed to not have a trailing newline
+ # Getting here means the last chunk didn't have a newline, and we
+ # have a chunk following it
+ break
+ if not chunk:
+ # Empty strings are never valid lines
+ break
+ elif '\n' in chunk[:-1]:
+ # This chunk has an extra '\n', so we will have to split it
+ break
+ elif chunk[-1] != '\n':
+ # This chunk does not have a trailing newline
+ last_no_newline = True
+ else:
+ # All of the lines (but possibly the last) have a single newline at the
+ # end of the string.
+ # For the last one, we allow it to not have a trailing newline, but it
+ # is not allowed to be an empty string.
+ return chunks
+
+ # These aren't simple lines, just join and split again.
+ from bzrlib import osutils
+ return osutils._split_lines(''.join(chunks))
diff --git a/bzrlib/_chunks_to_lines_pyx.c b/bzrlib/_chunks_to_lines_pyx.c
new file mode 100644
index 0000000..26de242
--- /dev/null
+++ b/bzrlib/_chunks_to_lines_pyx.c
@@ -0,0 +1,518 @@
+/* Generated by Pyrex 0.9.8.5 on Fri Oct 8 14:00:56 2010 */
+
+#define PY_SSIZE_T_CLEAN
+#include "Python.h"
+#include "structmember.h"
+#ifndef PY_LONG_LONG
+ #define PY_LONG_LONG LONG_LONG
+#endif
+#if PY_VERSION_HEX < 0x02050000
+ typedef int Py_ssize_t;
+ #define PY_SSIZE_T_MAX INT_MAX
+ #define PY_SSIZE_T_MIN INT_MIN
+ #define PyInt_FromSsize_t(z) PyInt_FromLong(z)
+ #define PyInt_AsSsize_t(o) PyInt_AsLong(o)
+#endif
+#if !defined(WIN32) && !defined(MS_WINDOWS)
+ #ifndef __stdcall
+ #define __stdcall
+ #endif
+ #ifndef __cdecl
+ #define __cdecl
+ #endif
+#endif
+#ifdef __cplusplus
+#define __PYX_EXTERN_C extern "C"
+#else
+#define __PYX_EXTERN_C extern
+#endif
+#include <math.h>
+#include "python-compat.h"
+#include "stdlib.h"
+#include "string.h"
+
+
+typedef struct {PyObject **p; int i; char *s; long n;} __Pyx_StringTabEntry; /*proto*/
+
+static PyObject *__pyx_m;
+static PyObject *__pyx_b;
+static int __pyx_lineno;
+static char *__pyx_filename;
+static char **__pyx_f;
+
+static char __pyx_mdoc[] = "Pyrex extensions for converting chunks to lines.";
+
+static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb); /*proto*/
+
+static int __Pyx_InitStrings(__Pyx_StringTabEntry *t); /*proto*/
+
+static void __Pyx_AddTraceback(char *funcname); /*proto*/
+
+/* Declarations from bzrlib._chunks_to_lines_pyx */
+
+
+/* Declarations from implementation of bzrlib._chunks_to_lines_pyx */
+
+
+static char __pyx_k1[] = "chunk is not a string";
+
+
+static PyObject *__pyx_k1p;
+
+static __Pyx_StringTabEntry __pyx_string_tab[] = {
+ {&__pyx_k1p, 0, __pyx_k1, sizeof(__pyx_k1)},
+ {0, 0, 0, 0}
+};
+
+
+
+/* Implementation of bzrlib._chunks_to_lines_pyx */
+
+static PyObject *__pyx_f_6bzrlib_20_chunks_to_lines_pyx_chunks_to_lines(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
+static char __pyx_doc_6bzrlib_20_chunks_to_lines_pyx_chunks_to_lines[] = "Re-split chunks into simple lines.\n\n Each entry in the result should contain a single newline at the end. Except\n for the last entry which may not have a final newline. If chunks is already\n a simple list of lines, we return it directly.\n\n :param chunks: An list/tuple of strings. If chunks is already a list of\n lines, then we will return it as-is.\n :return: A list of strings.\n ";
+static PyObject *__pyx_f_6bzrlib_20_chunks_to_lines_pyx_chunks_to_lines(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
+ PyObject *__pyx_v_chunks = 0;
+ char *__pyx_v_c_str;
+ char *__pyx_v_newline;
+ char *__pyx_v_c_last;
+ Py_ssize_t __pyx_v_the_len;
+ int __pyx_v_last_no_newline;
+ PyObject *__pyx_v_chunk;
+ PyObject *__pyx_v_lines;
+ PyObject *__pyx_v_tail;
+ PyObject *__pyx_v_line;
+ PyObject *__pyx_r;
+ PyObject *__pyx_1 = 0;
+ PyObject *__pyx_2 = 0;
+ int __pyx_3;
+ PyObject *__pyx_4 = 0;
+ static char *__pyx_argnames[] = {"chunks",0};
+ if (!PyArg_ParseTupleAndKeywords(__pyx_args, __pyx_kwds, "O", __pyx_argnames, &__pyx_v_chunks)) return 0;
+ Py_INCREF(__pyx_v_chunks);
+ __pyx_v_chunk = Py_None; Py_INCREF(Py_None);
+ __pyx_v_lines = Py_None; Py_INCREF(Py_None);
+ __pyx_v_tail = Py_None; Py_INCREF(Py_None);
+ __pyx_v_line = Py_None; Py_INCREF(Py_None);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_chunks_to_lines_pyx.pyx":60 */
+ __pyx_v_last_no_newline = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_chunks_to_lines_pyx.pyx":61 */
+ __pyx_1 = PyObject_GetIter(__pyx_v_chunks); if (!__pyx_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 61; goto __pyx_L1;}
+ for (;;) {
+ __pyx_2 = PyIter_Next(__pyx_1);
+ if (!__pyx_2) {
+ if (PyErr_Occurred()) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 61; goto __pyx_L1;}
+ break;
+ }
+ Py_DECREF(__pyx_v_chunk);
+ __pyx_v_chunk = __pyx_2;
+ __pyx_2 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_chunks_to_lines_pyx.pyx":62 */
+ __pyx_3 = __pyx_v_last_no_newline;
+ if (__pyx_3) {
+ goto __pyx_L3;
+ goto __pyx_L4;
+ }
+ __pyx_L4:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_chunks_to_lines_pyx.pyx":70 */
+ __pyx_3 = (!PyString_CheckExact(__pyx_v_chunk));
+ if (__pyx_3) {
+ __pyx_2 = PyTuple_New(1); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 71; goto __pyx_L1;}
+ Py_INCREF(__pyx_k1p);
+ PyTuple_SET_ITEM(__pyx_2, 0, __pyx_k1p);
+ __pyx_4 = PyObject_CallObject(PyExc_TypeError, __pyx_2); if (!__pyx_4) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 71; goto __pyx_L1;}
+ Py_DECREF(__pyx_2); __pyx_2 = 0;
+ __Pyx_Raise(__pyx_4, 0, 0);
+ Py_DECREF(__pyx_4); __pyx_4 = 0;
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 71; goto __pyx_L1;}
+ goto __pyx_L5;
+ }
+ __pyx_L5:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_chunks_to_lines_pyx.pyx":72 */
+ __pyx_v_the_len = PyString_GET_SIZE(__pyx_v_chunk);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_chunks_to_lines_pyx.pyx":73 */
+ __pyx_3 = (__pyx_v_the_len == 0);
+ if (__pyx_3) {
+ goto __pyx_L3;
+ goto __pyx_L6;
+ }
+ __pyx_L6:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_chunks_to_lines_pyx.pyx":76 */
+ __pyx_v_c_str = PyString_AS_STRING(__pyx_v_chunk);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_chunks_to_lines_pyx.pyx":77 */
+ __pyx_v_c_last = ((__pyx_v_c_str + __pyx_v_the_len) - 1);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_chunks_to_lines_pyx.pyx":78 */
+ __pyx_v_newline = ((char *)memchr(__pyx_v_c_str,'\n',__pyx_v_the_len));
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_chunks_to_lines_pyx.pyx":79 */
+ __pyx_3 = (__pyx_v_newline != __pyx_v_c_last);
+ if (__pyx_3) {
+ __pyx_3 = (__pyx_v_newline == NULL);
+ if (__pyx_3) {
+ __pyx_v_last_no_newline = 1;
+ goto __pyx_L8;
+ }
+ /*else*/ {
+ goto __pyx_L3;
+ }
+ __pyx_L8:;
+ goto __pyx_L7;
+ }
+ __pyx_L7:;
+ }
+ /*else*/ {
+ Py_INCREF(__pyx_v_chunks);
+ __pyx_r = __pyx_v_chunks;
+ Py_DECREF(__pyx_1); __pyx_1 = 0;
+ goto __pyx_L0;
+ }
+ __pyx_L3:;
+ Py_DECREF(__pyx_1); __pyx_1 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_chunks_to_lines_pyx.pyx":91 */
+ __pyx_2 = PyList_New(0); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 91; goto __pyx_L1;}
+ Py_DECREF(__pyx_v_lines);
+ __pyx_v_lines = __pyx_2;
+ __pyx_2 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_chunks_to_lines_pyx.pyx":92 */
+ Py_INCREF(Py_None);
+ Py_DECREF(__pyx_v_tail);
+ __pyx_v_tail = Py_None;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_chunks_to_lines_pyx.pyx":93 */
+ __pyx_4 = PyObject_GetIter(__pyx_v_chunks); if (!__pyx_4) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 93; goto __pyx_L1;}
+ for (;;) {
+ __pyx_1 = PyIter_Next(__pyx_4);
+ if (!__pyx_1) {
+ if (PyErr_Occurred()) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 93; goto __pyx_L1;}
+ break;
+ }
+ Py_DECREF(__pyx_v_chunk);
+ __pyx_v_chunk = __pyx_1;
+ __pyx_1 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_chunks_to_lines_pyx.pyx":94 */
+ __pyx_3 = __pyx_v_tail != Py_None;
+ if (__pyx_3) {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_chunks_to_lines_pyx.pyx":95 */
+ __pyx_2 = PyNumber_Add(__pyx_v_tail, __pyx_v_chunk); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 95; goto __pyx_L1;}
+ Py_DECREF(__pyx_v_chunk);
+ __pyx_v_chunk = __pyx_2;
+ __pyx_2 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_chunks_to_lines_pyx.pyx":96 */
+ Py_INCREF(Py_None);
+ Py_DECREF(__pyx_v_tail);
+ __pyx_v_tail = Py_None;
+ goto __pyx_L11;
+ }
+ __pyx_L11:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_chunks_to_lines_pyx.pyx":97 */
+ __pyx_3 = (!PyString_CheckExact(__pyx_v_chunk));
+ if (__pyx_3) {
+ __pyx_1 = PyTuple_New(1); if (!__pyx_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 98; goto __pyx_L1;}
+ Py_INCREF(__pyx_k1p);
+ PyTuple_SET_ITEM(__pyx_1, 0, __pyx_k1p);
+ __pyx_2 = PyObject_CallObject(PyExc_TypeError, __pyx_1); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 98; goto __pyx_L1;}
+ Py_DECREF(__pyx_1); __pyx_1 = 0;
+ __Pyx_Raise(__pyx_2, 0, 0);
+ Py_DECREF(__pyx_2); __pyx_2 = 0;
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 98; goto __pyx_L1;}
+ goto __pyx_L12;
+ }
+ __pyx_L12:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_chunks_to_lines_pyx.pyx":99 */
+ __pyx_v_the_len = PyString_GET_SIZE(__pyx_v_chunk);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_chunks_to_lines_pyx.pyx":100 */
+ __pyx_3 = (__pyx_v_the_len == 0);
+ if (__pyx_3) {
+ goto __pyx_L9;
+ goto __pyx_L13;
+ }
+ __pyx_L13:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_chunks_to_lines_pyx.pyx":104 */
+ __pyx_v_c_str = PyString_AS_STRING(__pyx_v_chunk);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_chunks_to_lines_pyx.pyx":105 */
+ __pyx_v_c_last = ((__pyx_v_c_str + __pyx_v_the_len) - 1);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_chunks_to_lines_pyx.pyx":106 */
+ __pyx_v_newline = ((char *)memchr(__pyx_v_c_str,'\n',__pyx_v_the_len));
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_chunks_to_lines_pyx.pyx":107 */
+ __pyx_3 = (__pyx_v_newline == __pyx_v_c_last);
+ if (__pyx_3) {
+ __pyx_3 = PyList_Append(__pyx_v_lines,__pyx_v_chunk); if (__pyx_3 == (-1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 109; goto __pyx_L1;}
+ goto __pyx_L14;
+ }
+ __pyx_3 = (__pyx_v_newline == NULL);
+ if (__pyx_3) {
+ Py_INCREF(__pyx_v_chunk);
+ Py_DECREF(__pyx_v_tail);
+ __pyx_v_tail = __pyx_v_chunk;
+ goto __pyx_L14;
+ }
+ /*else*/ {
+ while (1) {
+ __pyx_3 = (__pyx_v_newline != NULL);
+ if (!__pyx_3) break;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_chunks_to_lines_pyx.pyx":118 */
+ __pyx_1 = PyString_FromStringAndSize(__pyx_v_c_str,((__pyx_v_newline - __pyx_v_c_str) + 1)); if (!__pyx_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 118; goto __pyx_L1;}
+ Py_DECREF(__pyx_v_line);
+ __pyx_v_line = __pyx_1;
+ __pyx_1 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_chunks_to_lines_pyx.pyx":119 */
+ __pyx_3 = PyList_Append(__pyx_v_lines,__pyx_v_line); if (__pyx_3 == (-1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 119; goto __pyx_L1;}
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_chunks_to_lines_pyx.pyx":120 */
+ __pyx_v_c_str = (__pyx_v_newline + 1);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_chunks_to_lines_pyx.pyx":121 */
+ __pyx_3 = (__pyx_v_c_str > __pyx_v_c_last);
+ if (__pyx_3) {
+ goto __pyx_L16;
+ goto __pyx_L17;
+ }
+ __pyx_L17:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_chunks_to_lines_pyx.pyx":123 */
+ __pyx_v_the_len = ((__pyx_v_c_last - __pyx_v_c_str) + 1);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_chunks_to_lines_pyx.pyx":124 */
+ __pyx_v_newline = ((char *)memchr(__pyx_v_c_str,'\n',__pyx_v_the_len));
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_chunks_to_lines_pyx.pyx":125 */
+ __pyx_3 = (__pyx_v_newline == NULL);
+ if (__pyx_3) {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_chunks_to_lines_pyx.pyx":126 */
+ __pyx_2 = PyString_FromStringAndSize(__pyx_v_c_str,__pyx_v_the_len); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 126; goto __pyx_L1;}
+ Py_DECREF(__pyx_v_tail);
+ __pyx_v_tail = __pyx_2;
+ __pyx_2 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_chunks_to_lines_pyx.pyx":127 */
+ goto __pyx_L16;
+ goto __pyx_L18;
+ }
+ __pyx_L18:;
+ }
+ __pyx_L16:;
+ }
+ __pyx_L14:;
+ __pyx_L9:;
+ }
+ Py_DECREF(__pyx_4); __pyx_4 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_chunks_to_lines_pyx.pyx":128 */
+ __pyx_3 = __pyx_v_tail != Py_None;
+ if (__pyx_3) {
+ __pyx_3 = PyList_Append(__pyx_v_lines,__pyx_v_tail); if (__pyx_3 == (-1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 129; goto __pyx_L1;}
+ goto __pyx_L19;
+ }
+ __pyx_L19:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_chunks_to_lines_pyx.pyx":130 */
+ Py_INCREF(__pyx_v_lines);
+ __pyx_r = __pyx_v_lines;
+ goto __pyx_L0;
+
+ __pyx_r = Py_None; Py_INCREF(Py_None);
+ goto __pyx_L0;
+ __pyx_L1:;
+ Py_XDECREF(__pyx_1);
+ Py_XDECREF(__pyx_2);
+ Py_XDECREF(__pyx_4);
+ __Pyx_AddTraceback("bzrlib._chunks_to_lines_pyx.chunks_to_lines");
+ __pyx_r = 0;
+ __pyx_L0:;
+ Py_DECREF(__pyx_v_chunk);
+ Py_DECREF(__pyx_v_lines);
+ Py_DECREF(__pyx_v_tail);
+ Py_DECREF(__pyx_v_line);
+ Py_DECREF(__pyx_v_chunks);
+ return __pyx_r;
+}
+
+static struct PyMethodDef __pyx_methods[] = {
+ {"chunks_to_lines", (PyCFunction)__pyx_f_6bzrlib_20_chunks_to_lines_pyx_chunks_to_lines, METH_VARARGS|METH_KEYWORDS, __pyx_doc_6bzrlib_20_chunks_to_lines_pyx_chunks_to_lines},
+ {0, 0, 0, 0}
+};
+
+static void __pyx_init_filenames(void); /*proto*/
+
+PyMODINIT_FUNC init_chunks_to_lines_pyx(void); /*proto*/
+PyMODINIT_FUNC init_chunks_to_lines_pyx(void) {
+ __pyx_init_filenames();
+ __pyx_m = Py_InitModule4("_chunks_to_lines_pyx", __pyx_methods, __pyx_mdoc, 0, PYTHON_API_VERSION);
+ if (!__pyx_m) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 18; goto __pyx_L1;};
+ Py_INCREF(__pyx_m);
+ __pyx_b = PyImport_AddModule("__builtin__");
+ if (!__pyx_b) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 18; goto __pyx_L1;};
+ if (PyObject_SetAttrString(__pyx_m, "__builtins__", __pyx_b) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 18; goto __pyx_L1;};
+ if (__Pyx_InitStrings(__pyx_string_tab) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 18; goto __pyx_L1;};
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_chunks_to_lines_pyx.pyx":42 */
+ return;
+ __pyx_L1:;
+ __Pyx_AddTraceback("bzrlib._chunks_to_lines_pyx");
+}
+
+static char *__pyx_filenames[] = {
+ "_chunks_to_lines_pyx.pyx",
+};
+
+/* Runtime support code */
+
+static void __pyx_init_filenames(void) {
+ __pyx_f = __pyx_filenames;
+}
+
+static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb) {
+ Py_XINCREF(type);
+ Py_XINCREF(value);
+ Py_XINCREF(tb);
+ /* First, check the traceback argument, replacing None with NULL. */
+ if (tb == Py_None) {
+ Py_DECREF(tb);
+ tb = 0;
+ }
+ else if (tb != NULL && !PyTraceBack_Check(tb)) {
+ PyErr_SetString(PyExc_TypeError,
+ "raise: arg 3 must be a traceback or None");
+ goto raise_error;
+ }
+ /* Next, replace a missing value with None */
+ if (value == NULL) {
+ value = Py_None;
+ Py_INCREF(value);
+ }
+ #if PY_VERSION_HEX < 0x02050000
+ if (!PyClass_Check(type))
+ #else
+ if (!PyType_Check(type))
+ #endif
+ {
+ /* Raising an instance. The value should be a dummy. */
+ if (value != Py_None) {
+ PyErr_SetString(PyExc_TypeError,
+ "instance exception may not have a separate value");
+ goto raise_error;
+ }
+ /* Normalize to raise <class>, <instance> */
+ Py_DECREF(value);
+ value = type;
+ #if PY_VERSION_HEX < 0x02050000
+ if (PyInstance_Check(type)) {
+ type = (PyObject*) ((PyInstanceObject*)type)->in_class;
+ Py_INCREF(type);
+ }
+ else {
+ PyErr_SetString(PyExc_TypeError,
+ "raise: exception must be an old-style class or instance");
+ goto raise_error;
+ }
+ #else
+ type = (PyObject*) type->ob_type;
+ Py_INCREF(type);
+ if (!PyType_IsSubtype((PyTypeObject *)type, (PyTypeObject *)PyExc_BaseException)) {
+ PyErr_SetString(PyExc_TypeError,
+ "raise: exception class must be a subclass of BaseException");
+ goto raise_error;
+ }
+ #endif
+ }
+ PyErr_Restore(type, value, tb);
+ return;
+raise_error:
+ Py_XDECREF(value);
+ Py_XDECREF(type);
+ Py_XDECREF(tb);
+ return;
+}
+
+static int __Pyx_InitStrings(__Pyx_StringTabEntry *t) {
+ while (t->p) {
+ *t->p = PyString_FromStringAndSize(t->s, t->n - 1);
+ if (!*t->p)
+ return -1;
+ if (t->i)
+ PyString_InternInPlace(t->p);
+ ++t;
+ }
+ return 0;
+}
+
+#include "compile.h"
+#include "frameobject.h"
+#include "traceback.h"
+
+static void __Pyx_AddTraceback(char *funcname) {
+ PyObject *py_srcfile = 0;
+ PyObject *py_funcname = 0;
+ PyObject *py_globals = 0;
+ PyObject *empty_tuple = 0;
+ PyObject *empty_string = 0;
+ PyCodeObject *py_code = 0;
+ PyFrameObject *py_frame = 0;
+
+ py_srcfile = PyString_FromString(__pyx_filename);
+ if (!py_srcfile) goto bad;
+ py_funcname = PyString_FromString(funcname);
+ if (!py_funcname) goto bad;
+ py_globals = PyModule_GetDict(__pyx_m);
+ if (!py_globals) goto bad;
+ empty_tuple = PyTuple_New(0);
+ if (!empty_tuple) goto bad;
+ empty_string = PyString_FromString("");
+ if (!empty_string) goto bad;
+ py_code = PyCode_New(
+ 0, /*int argcount,*/
+ 0, /*int nlocals,*/
+ 0, /*int stacksize,*/
+ 0, /*int flags,*/
+ empty_string, /*PyObject *code,*/
+ empty_tuple, /*PyObject *consts,*/
+ empty_tuple, /*PyObject *names,*/
+ empty_tuple, /*PyObject *varnames,*/
+ empty_tuple, /*PyObject *freevars,*/
+ empty_tuple, /*PyObject *cellvars,*/
+ py_srcfile, /*PyObject *filename,*/
+ py_funcname, /*PyObject *name,*/
+ __pyx_lineno, /*int firstlineno,*/
+ empty_string /*PyObject *lnotab*/
+ );
+ if (!py_code) goto bad;
+ py_frame = PyFrame_New(
+ PyThreadState_Get(), /*PyThreadState *tstate,*/
+ py_code, /*PyCodeObject *code,*/
+ py_globals, /*PyObject *globals,*/
+ 0 /*PyObject *locals*/
+ );
+ if (!py_frame) goto bad;
+ py_frame->f_lineno = __pyx_lineno;
+ PyTraceBack_Here(py_frame);
+bad:
+ Py_XDECREF(py_srcfile);
+ Py_XDECREF(py_funcname);
+ Py_XDECREF(empty_tuple);
+ Py_XDECREF(empty_string);
+ Py_XDECREF(py_code);
+ Py_XDECREF(py_frame);
+}
diff --git a/bzrlib/_chunks_to_lines_pyx.pyx b/bzrlib/_chunks_to_lines_pyx.pyx
new file mode 100644
index 0000000..a6119a7
--- /dev/null
+++ b/bzrlib/_chunks_to_lines_pyx.pyx
@@ -0,0 +1,130 @@
+# Copyright (C) 2008 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+#
+
+"""Pyrex extensions for converting chunks to lines."""
+
+#python2.4 support
+cdef extern from "python-compat.h":
+ pass
+
+cdef extern from "stdlib.h":
+ ctypedef unsigned size_t
+
+cdef extern from "Python.h":
+ ctypedef int Py_ssize_t # Required for older pyrex versions
+ ctypedef struct PyObject:
+ pass
+ int PyList_Append(object lst, object item) except -1
+
+ int PyString_CheckExact(object p)
+ char *PyString_AS_STRING(object p)
+ Py_ssize_t PyString_GET_SIZE(object p)
+ object PyString_FromStringAndSize(char *c_str, Py_ssize_t len)
+
+cdef extern from "string.h":
+ void *memchr(void *s, int c, size_t n)
+
+
+def chunks_to_lines(chunks):
+ """Re-split chunks into simple lines.
+
+ Each entry in the result should contain a single newline at the end. Except
+ for the last entry which may not have a final newline. If chunks is already
+ a simple list of lines, we return it directly.
+
+ :param chunks: An list/tuple of strings. If chunks is already a list of
+ lines, then we will return it as-is.
+ :return: A list of strings.
+ """
+ cdef char *c_str
+ cdef char *newline
+ cdef char *c_last
+ cdef Py_ssize_t the_len
+ cdef int last_no_newline
+
+ # Check to see if the chunks are already lines
+ last_no_newline = 0
+ for chunk in chunks:
+ if last_no_newline:
+ # We have a chunk which followed a chunk without a newline, so this
+ # is not a simple list of lines.
+ break
+ # Switching from PyString_AsStringAndSize to PyString_CheckExact and
+ # then the macros GET_SIZE and AS_STRING saved us 40us / 470us.
+ # It seems PyString_AsStringAndSize can actually trigger a conversion,
+ # which we don't want anyway.
+ if not PyString_CheckExact(chunk):
+ raise TypeError('chunk is not a string')
+ the_len = PyString_GET_SIZE(chunk)
+ if the_len == 0:
+ # An empty string is never a valid line
+ break
+ c_str = PyString_AS_STRING(chunk)
+ c_last = c_str + the_len - 1
+ newline = <char *>memchr(c_str, c'\n', the_len)
+ if newline != c_last:
+ if newline == NULL:
+ # Missing a newline. Only valid as the last line
+ last_no_newline = 1
+ else:
+ # There is a newline in the middle, we must resplit
+ break
+ else:
+ # Everything was already a list of lines
+ return chunks
+
+ # We know we need to create a new list of lines
+ lines = []
+ tail = None # Any remainder from the previous chunk
+ for chunk in chunks:
+ if tail is not None:
+ chunk = tail + chunk
+ tail = None
+ if not PyString_CheckExact(chunk):
+ raise TypeError('chunk is not a string')
+ the_len = PyString_GET_SIZE(chunk)
+ if the_len == 0:
+ # An empty string is never a valid line, and we don't need to
+ # append anything
+ continue
+ c_str = PyString_AS_STRING(chunk)
+ c_last = c_str + the_len - 1
+ newline = <char *>memchr(c_str, c'\n', the_len)
+ if newline == c_last:
+ # A simple line
+ PyList_Append(lines, chunk)
+ elif newline == NULL:
+ # A chunk without a newline, if this is the last entry, then we
+ # allow it
+ tail = chunk
+ else:
+ # We have a newline in the middle, loop until we've consumed all
+ # lines
+ while newline != NULL:
+ line = PyString_FromStringAndSize(c_str, newline - c_str + 1)
+ PyList_Append(lines, line)
+ c_str = newline + 1
+ if c_str > c_last: # We are done
+ break
+ the_len = c_last - c_str + 1
+ newline = <char *>memchr(c_str, c'\n', the_len)
+ if newline == NULL:
+ tail = PyString_FromStringAndSize(c_str, the_len)
+ break
+ if tail is not None:
+ PyList_Append(lines, tail)
+ return lines
diff --git a/bzrlib/_dirstate_helpers_py.py b/bzrlib/_dirstate_helpers_py.py
new file mode 100644
index 0000000..1bf1e43
--- /dev/null
+++ b/bzrlib/_dirstate_helpers_py.py
@@ -0,0 +1,319 @@
+# Copyright (C) 2007, 2008 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""Python implementations of Dirstate Helper functions."""
+
+from __future__ import absolute_import
+
+import binascii
+import os
+import struct
+
+# We cannot import the dirstate module, because it loads this module
+# All we really need is the IN_MEMORY_MODIFIED constant
+from bzrlib import errors
+from bzrlib.dirstate import DirState
+
+
+def pack_stat(st, _b64=binascii.b2a_base64, _pack=struct.Struct('>6L').pack):
+ """Convert stat values into a packed representation
+
+ Not all of the fields from the stat included are strictly needed, and by
+ just encoding the mtime and mode a slight speed increase could be gained.
+ However, using the pyrex version instead is a bigger win.
+ """
+ # base64 encoding always adds a final newline, so strip it off
+ return _b64(_pack(st.st_size & 0xFFFFFFFF, int(st.st_mtime) & 0xFFFFFFFF,
+ int(st.st_ctime) & 0xFFFFFFFF, st.st_dev & 0xFFFFFFFF,
+ st.st_ino & 0xFFFFFFFF, st.st_mode))[:-1]
+
+
+def _unpack_stat(packed_stat):
+ """Turn a packed_stat back into the stat fields.
+
+ This is meant as a debugging tool, should not be used in real code.
+ """
+ (st_size, st_mtime, st_ctime, st_dev, st_ino,
+ st_mode) = struct.unpack('>6L', binascii.a2b_base64(packed_stat))
+ return dict(st_size=st_size, st_mtime=st_mtime, st_ctime=st_ctime,
+ st_dev=st_dev, st_ino=st_ino, st_mode=st_mode)
+
+
+def _bisect_path_left(paths, path):
+ """Return the index where to insert path into paths.
+
+ This uses the dirblock sorting. So all children in a directory come before
+ the children of children. For example::
+
+ a/
+ b/
+ c
+ d/
+ e
+ b-c
+ d-e
+ a-a
+ a=c
+
+ Will be sorted as::
+
+ a
+ a-a
+ a=c
+ a/b
+ a/b-c
+ a/d
+ a/d-e
+ a/b/c
+ a/d/e
+
+ :param paths: A list of paths to search through
+ :param path: A single path to insert
+ :return: An offset where 'path' can be inserted.
+ :seealso: bisect.bisect_left
+ """
+ hi = len(paths)
+ lo = 0
+ while lo < hi:
+ mid = (lo + hi) // 2
+ # Grab the dirname for the current dirblock
+ cur = paths[mid]
+ if _cmp_path_by_dirblock(cur, path) < 0:
+ lo = mid + 1
+ else:
+ hi = mid
+ return lo
+
+
+def _bisect_path_right(paths, path):
+ """Return the index where to insert path into paths.
+
+ This uses a path-wise comparison so we get::
+ a
+ a-b
+ a=b
+ a/b
+ Rather than::
+ a
+ a-b
+ a/b
+ a=b
+ :param paths: A list of paths to search through
+ :param path: A single path to insert
+ :return: An offset where 'path' can be inserted.
+ :seealso: bisect.bisect_right
+ """
+ hi = len(paths)
+ lo = 0
+ while lo < hi:
+ mid = (lo+hi)//2
+ # Grab the dirname for the current dirblock
+ cur = paths[mid]
+ if _cmp_path_by_dirblock(path, cur) < 0:
+ hi = mid
+ else:
+ lo = mid + 1
+ return lo
+
+
+def bisect_dirblock(dirblocks, dirname, lo=0, hi=None, cache={}):
+ """Return the index where to insert dirname into the dirblocks.
+
+ The return value idx is such that all directories blocks in dirblock[:idx]
+ have names < dirname, and all blocks in dirblock[idx:] have names >=
+ dirname.
+
+ Optional args lo (default 0) and hi (default len(dirblocks)) bound the
+ slice of a to be searched.
+ """
+ if hi is None:
+ hi = len(dirblocks)
+ try:
+ dirname_split = cache[dirname]
+ except KeyError:
+ dirname_split = dirname.split('/')
+ cache[dirname] = dirname_split
+ while lo < hi:
+ mid = (lo + hi) // 2
+ # Grab the dirname for the current dirblock
+ cur = dirblocks[mid][0]
+ try:
+ cur_split = cache[cur]
+ except KeyError:
+ cur_split = cur.split('/')
+ cache[cur] = cur_split
+ if cur_split < dirname_split: lo = mid + 1
+ else: hi = mid
+ return lo
+
+
+def cmp_by_dirs(path1, path2):
+ """Compare two paths directory by directory.
+
+ This is equivalent to doing::
+
+ cmp(path1.split('/'), path2.split('/'))
+
+ The idea is that you should compare path components separately. This
+ differs from plain ``cmp(path1, path2)`` for paths like ``'a-b'`` and
+ ``a/b``. "a-b" comes after "a" but would come before "a/b" lexically.
+
+ :param path1: first path
+ :param path2: second path
+ :return: negative number if ``path1`` comes first,
+ 0 if paths are equal,
+ and positive number if ``path2`` sorts first
+ """
+ if not isinstance(path1, str):
+ raise TypeError("'path1' must be a plain string, not %s: %r"
+ % (type(path1), path1))
+ if not isinstance(path2, str):
+ raise TypeError("'path2' must be a plain string, not %s: %r"
+ % (type(path2), path2))
+ return cmp(path1.split('/'), path2.split('/'))
+
+
+def _cmp_path_by_dirblock(path1, path2):
+ """Compare two paths based on what directory they are in.
+
+ This generates a sort order, such that all children of a directory are
+ sorted together, and grandchildren are in the same order as the
+ children appear. But all grandchildren come after all children.
+
+ :param path1: first path
+ :param path2: the second path
+ :return: negative number if ``path1`` comes first,
+ 0 if paths are equal
+ and a positive number if ``path2`` sorts first
+ """
+ if not isinstance(path1, str):
+ raise TypeError("'path1' must be a plain string, not %s: %r"
+ % (type(path1), path1))
+ if not isinstance(path2, str):
+ raise TypeError("'path2' must be a plain string, not %s: %r"
+ % (type(path2), path2))
+ dirname1, basename1 = os.path.split(path1)
+ key1 = (dirname1.split('/'), basename1)
+ dirname2, basename2 = os.path.split(path2)
+ key2 = (dirname2.split('/'), basename2)
+ return cmp(key1, key2)
+
+
+def _read_dirblocks(state):
+ """Read in the dirblocks for the given DirState object.
+
+ This is tightly bound to the DirState internal representation. It should be
+ thought of as a member function, which is only separated out so that we can
+ re-write it in pyrex.
+
+ :param state: A DirState object.
+ :return: None
+ """
+ state._state_file.seek(state._end_of_header)
+ text = state._state_file.read()
+ # TODO: check the crc checksums. crc_measured = zlib.crc32(text)
+
+ fields = text.split('\0')
+ # Remove the last blank entry
+ trailing = fields.pop()
+ if trailing != '':
+ raise errors.DirstateCorrupt(state,
+ 'trailing garbage: %r' % (trailing,))
+ # consider turning fields into a tuple.
+
+ # skip the first field which is the trailing null from the header.
+ cur = 1
+ # Each line now has an extra '\n' field which is not used
+ # so we just skip over it
+ # entry size:
+ # 3 fields for the key
+ # + number of fields per tree_data (5) * tree count
+ # + newline
+ num_present_parents = state._num_present_parents()
+ tree_count = 1 + num_present_parents
+ entry_size = state._fields_per_entry()
+ expected_field_count = entry_size * state._num_entries
+ field_count = len(fields)
+ # this checks our adjustment, and also catches file too short.
+ if field_count - cur != expected_field_count:
+ raise errors.DirstateCorrupt(state,
+ 'field count incorrect %s != %s, entry_size=%s, '\
+ 'num_entries=%s fields=%r' % (
+ field_count - cur, expected_field_count, entry_size,
+ state._num_entries, fields))
+
+ if num_present_parents == 1:
+ # Bind external functions to local names
+ _int = int
+ # We access all fields in order, so we can just iterate over
+ # them. Grab an straight iterator over the fields. (We use an
+ # iterator because we don't want to do a lot of additions, nor
+ # do we want to do a lot of slicing)
+ next = iter(fields).next
+ # Move the iterator to the current position
+ for x in xrange(cur):
+ next()
+ # The two blocks here are deliberate: the root block and the
+ # contents-of-root block.
+ state._dirblocks = [('', []), ('', [])]
+ current_block = state._dirblocks[0][1]
+ current_dirname = ''
+ append_entry = current_block.append
+ for count in xrange(state._num_entries):
+ dirname = next()
+ name = next()
+ file_id = next()
+ if dirname != current_dirname:
+ # new block - different dirname
+ current_block = []
+ current_dirname = dirname
+ state._dirblocks.append((current_dirname, current_block))
+ append_entry = current_block.append
+ # we know current_dirname == dirname, so re-use it to avoid
+ # creating new strings
+ entry = ((current_dirname, name, file_id),
+ [(# Current Tree
+ next(), # minikind
+ next(), # fingerprint
+ _int(next()), # size
+ next() == 'y', # executable
+ next(), # packed_stat or revision_id
+ ),
+ ( # Parent 1
+ next(), # minikind
+ next(), # fingerprint
+ _int(next()), # size
+ next() == 'y', # executable
+ next(), # packed_stat or revision_id
+ ),
+ ])
+ trailing = next()
+ if trailing != '\n':
+ raise ValueError("trailing garbage in dirstate: %r" % trailing)
+ # append the entry to the current block
+ append_entry(entry)
+ state._split_root_dirblock_into_contents()
+ else:
+ fields_to_entry = state._get_fields_to_entry()
+ entries = [fields_to_entry(fields[pos:pos+entry_size])
+ for pos in xrange(cur, field_count, entry_size)]
+ state._entries_to_current_state(entries)
+ # To convert from format 2 => format 3
+ # state._dirblocks = sorted(state._dirblocks,
+ # key=lambda blk:blk[0].split('/'))
+ # To convert from format 3 => format 2
+ # state._dirblocks = sorted(state._dirblocks)
+ state._dirblock_state = DirState.IN_MEMORY_UNMODIFIED
diff --git a/bzrlib/_dirstate_helpers_pyx.c b/bzrlib/_dirstate_helpers_pyx.c
new file mode 100644
index 0000000..119076e
--- /dev/null
+++ b/bzrlib/_dirstate_helpers_pyx.c
@@ -0,0 +1,20927 @@
+/* Generated by Cython 0.13 on Thu Oct 6 10:22:35 2011 */
+
+#define PY_SSIZE_T_CLEAN
+#include "Python.h"
+#ifndef Py_PYTHON_H
+ #error Python headers needed to compile C extensions, please install development version of Python.
+#else
+
+#include <stddef.h> /* For offsetof */
+#ifndef offsetof
+#define offsetof(type, member) ( (size_t) & ((type*)0) -> member )
+#endif
+
+#if !defined(WIN32) && !defined(MS_WINDOWS)
+ #ifndef __stdcall
+ #define __stdcall
+ #endif
+ #ifndef __cdecl
+ #define __cdecl
+ #endif
+ #ifndef __fastcall
+ #define __fastcall
+ #endif
+#endif
+
+#ifndef DL_IMPORT
+ #define DL_IMPORT(t) t
+#endif
+#ifndef DL_EXPORT
+ #define DL_EXPORT(t) t
+#endif
+
+#ifndef PY_LONG_LONG
+ #define PY_LONG_LONG LONG_LONG
+#endif
+
+#if PY_VERSION_HEX < 0x02040000
+ #define METH_COEXIST 0
+ #define PyDict_CheckExact(op) (Py_TYPE(op) == &PyDict_Type)
+ #define PyDict_Contains(d,o) PySequence_Contains(d,o)
+#endif
+
+#if PY_VERSION_HEX < 0x02050000
+ typedef int Py_ssize_t;
+ #define PY_SSIZE_T_MAX INT_MAX
+ #define PY_SSIZE_T_MIN INT_MIN
+ #define PY_FORMAT_SIZE_T ""
+ #define PyInt_FromSsize_t(z) PyInt_FromLong(z)
+ #define PyInt_AsSsize_t(o) PyInt_AsLong(o)
+ #define PyNumber_Index(o) PyNumber_Int(o)
+ #define PyIndex_Check(o) PyNumber_Check(o)
+ #define PyErr_WarnEx(category, message, stacklevel) PyErr_Warn(category, message)
+#endif
+
+#if PY_VERSION_HEX < 0x02060000
+ #define Py_REFCNT(ob) (((PyObject*)(ob))->ob_refcnt)
+ #define Py_TYPE(ob) (((PyObject*)(ob))->ob_type)
+ #define Py_SIZE(ob) (((PyVarObject*)(ob))->ob_size)
+ #define PyVarObject_HEAD_INIT(type, size) \
+ PyObject_HEAD_INIT(type) size,
+ #define PyType_Modified(t)
+
+ typedef struct {
+ void *buf;
+ PyObject *obj;
+ Py_ssize_t len;
+ Py_ssize_t itemsize;
+ int readonly;
+ int ndim;
+ char *format;
+ Py_ssize_t *shape;
+ Py_ssize_t *strides;
+ Py_ssize_t *suboffsets;
+ void *internal;
+ } Py_buffer;
+
+ #define PyBUF_SIMPLE 0
+ #define PyBUF_WRITABLE 0x0001
+ #define PyBUF_FORMAT 0x0004
+ #define PyBUF_ND 0x0008
+ #define PyBUF_STRIDES (0x0010 | PyBUF_ND)
+ #define PyBUF_C_CONTIGUOUS (0x0020 | PyBUF_STRIDES)
+ #define PyBUF_F_CONTIGUOUS (0x0040 | PyBUF_STRIDES)
+ #define PyBUF_ANY_CONTIGUOUS (0x0080 | PyBUF_STRIDES)
+ #define PyBUF_INDIRECT (0x0100 | PyBUF_STRIDES)
+
+#endif
+
+#if PY_MAJOR_VERSION < 3
+ #define __Pyx_BUILTIN_MODULE_NAME "__builtin__"
+#else
+ #define __Pyx_BUILTIN_MODULE_NAME "builtins"
+#endif
+
+#if PY_MAJOR_VERSION >= 3
+ #define Py_TPFLAGS_CHECKTYPES 0
+ #define Py_TPFLAGS_HAVE_INDEX 0
+#endif
+
+#if (PY_VERSION_HEX < 0x02060000) || (PY_MAJOR_VERSION >= 3)
+ #define Py_TPFLAGS_HAVE_NEWBUFFER 0
+#endif
+
+#if PY_MAJOR_VERSION >= 3
+ #define PyBaseString_Type PyUnicode_Type
+ #define PyStringObject PyUnicodeObject
+ #define PyString_Type PyUnicode_Type
+ #define PyString_Check PyUnicode_Check
+ #define PyString_CheckExact PyUnicode_CheckExact
+#endif
+
+#if PY_VERSION_HEX < 0x02060000
+ #define PyBytesObject PyStringObject
+ #define PyBytes_Type PyString_Type
+ #define PyBytes_Check PyString_Check
+ #define PyBytes_CheckExact PyString_CheckExact
+ #define PyBytes_FromString PyString_FromString
+ #define PyBytes_FromStringAndSize PyString_FromStringAndSize
+ #define PyBytes_FromFormat PyString_FromFormat
+ #define PyBytes_DecodeEscape PyString_DecodeEscape
+ #define PyBytes_AsString PyString_AsString
+ #define PyBytes_AsStringAndSize PyString_AsStringAndSize
+ #define PyBytes_Size PyString_Size
+ #define PyBytes_AS_STRING PyString_AS_STRING
+ #define PyBytes_GET_SIZE PyString_GET_SIZE
+ #define PyBytes_Repr PyString_Repr
+ #define PyBytes_Concat PyString_Concat
+ #define PyBytes_ConcatAndDel PyString_ConcatAndDel
+ #define PySet_Check(obj) PyObject_TypeCheck(obj, &PySet_Type)
+ #define PyFrozenSet_Check(obj) PyObject_TypeCheck(obj, &PyFrozenSet_Type)
+#endif
+
+#ifndef PySet_CheckExact
+# define PySet_CheckExact(obj) (Py_TYPE(obj) == &PySet_Type)
+#endif
+
+#if PY_MAJOR_VERSION >= 3
+ #define PyInt_Type PyLong_Type
+ #define PyInt_Check(op) PyLong_Check(op)
+ #define PyInt_CheckExact(op) PyLong_CheckExact(op)
+ #define PyInt_FromString PyLong_FromString
+ #define PyInt_FromUnicode PyLong_FromUnicode
+ #define PyInt_FromLong PyLong_FromLong
+ #define PyInt_FromSize_t PyLong_FromSize_t
+ #define PyInt_FromSsize_t PyLong_FromSsize_t
+ #define PyInt_AsLong PyLong_AsLong
+ #define PyInt_AS_LONG PyLong_AS_LONG
+ #define PyInt_AsSsize_t PyLong_AsSsize_t
+ #define PyInt_AsUnsignedLongMask PyLong_AsUnsignedLongMask
+ #define PyInt_AsUnsignedLongLongMask PyLong_AsUnsignedLongLongMask
+#endif
+
+#if PY_MAJOR_VERSION >= 3
+ #define PyBoolObject PyLongObject
+#endif
+
+
+#if PY_MAJOR_VERSION >= 3
+ #define __Pyx_PyNumber_Divide(x,y) PyNumber_TrueDivide(x,y)
+ #define __Pyx_PyNumber_InPlaceDivide(x,y) PyNumber_InPlaceTrueDivide(x,y)
+#else
+ #define __Pyx_PyNumber_Divide(x,y) PyNumber_Divide(x,y)
+ #define __Pyx_PyNumber_InPlaceDivide(x,y) PyNumber_InPlaceDivide(x,y)
+#endif
+
+#if PY_MAJOR_VERSION >= 3
+ #define PyMethod_New(func, self, klass) ((self) ? PyMethod_New(func, self) : PyInstanceMethod_New(func))
+#endif
+
+#if PY_VERSION_HEX < 0x02050000
+ #define __Pyx_GetAttrString(o,n) PyObject_GetAttrString((o),((char *)(n)))
+ #define __Pyx_SetAttrString(o,n,a) PyObject_SetAttrString((o),((char *)(n)),(a))
+ #define __Pyx_DelAttrString(o,n) PyObject_DelAttrString((o),((char *)(n)))
+#else
+ #define __Pyx_GetAttrString(o,n) PyObject_GetAttrString((o),(n))
+ #define __Pyx_SetAttrString(o,n,a) PyObject_SetAttrString((o),(n),(a))
+ #define __Pyx_DelAttrString(o,n) PyObject_DelAttrString((o),(n))
+#endif
+
+#if PY_VERSION_HEX < 0x02050000
+ #define __Pyx_NAMESTR(n) ((char *)(n))
+ #define __Pyx_DOCSTR(n) ((char *)(n))
+#else
+ #define __Pyx_NAMESTR(n) (n)
+ #define __Pyx_DOCSTR(n) (n)
+#endif
+
+#ifdef __cplusplus
+#define __PYX_EXTERN_C extern "C"
+#else
+#define __PYX_EXTERN_C extern
+#endif
+
+#if defined(WIN32) || defined(MS_WINDOWS)
+#define _USE_MATH_DEFINES
+#endif
+#include <math.h>
+#define __PYX_HAVE_API__bzrlib___dirstate_helpers_pyx
+#include "python-compat.h"
+#include "_dirstate_helpers_pyx.h"
+#include "stdlib.h"
+#include "sys/stat.h"
+#include "string.h"
+#include "_static_tuple_c.h"
+
+/* inline attribute */
+#ifndef CYTHON_INLINE
+ #if defined(__GNUC__)
+ #define CYTHON_INLINE __inline__
+ #elif defined(_MSC_VER)
+ #define CYTHON_INLINE __inline
+ #elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L
+ #define CYTHON_INLINE inline
+ #else
+ #define CYTHON_INLINE
+ #endif
+#endif
+
+/* unused attribute */
+#ifndef CYTHON_UNUSED
+# if defined(__GNUC__)
+# if !(defined(__cplusplus)) || (__GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ >= 4))
+# define CYTHON_UNUSED __attribute__ ((__unused__))
+# else
+# define CYTHON_UNUSED
+# endif
+# elif defined(__ICC) || defined(__INTEL_COMPILER)
+# define CYTHON_UNUSED __attribute__ ((__unused__))
+# else
+# define CYTHON_UNUSED
+# endif
+#endif
+
+typedef struct {PyObject **p; char *s; const long n; const char* encoding; const char is_unicode; const char is_str; const char intern; } __Pyx_StringTabEntry; /*proto*/
+
+
+/* Type Conversion Predeclarations */
+
+#define __Pyx_PyBytes_FromUString(s) PyBytes_FromString((char*)s)
+#define __Pyx_PyBytes_AsUString(s) ((unsigned char*) PyBytes_AsString(s))
+
+#define __Pyx_PyBool_FromLong(b) ((b) ? (Py_INCREF(Py_True), Py_True) : (Py_INCREF(Py_False), Py_False))
+static CYTHON_INLINE int __Pyx_PyObject_IsTrue(PyObject*);
+static CYTHON_INLINE PyObject* __Pyx_PyNumber_Int(PyObject* x);
+
+static CYTHON_INLINE Py_ssize_t __Pyx_PyIndex_AsSsize_t(PyObject*);
+static CYTHON_INLINE PyObject * __Pyx_PyInt_FromSize_t(size_t);
+static CYTHON_INLINE size_t __Pyx_PyInt_AsSize_t(PyObject*);
+
+#define __pyx_PyFloat_AsDouble(x) (PyFloat_CheckExact(x) ? PyFloat_AS_DOUBLE(x) : PyFloat_AsDouble(x))
+
+
+#ifdef __GNUC__
+/* Test for GCC > 2.95 */
+#if __GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95))
+#define likely(x) __builtin_expect(!!(x), 1)
+#define unlikely(x) __builtin_expect(!!(x), 0)
+#else /* __GNUC__ > 2 ... */
+#define likely(x) (x)
+#define unlikely(x) (x)
+#endif /* __GNUC__ > 2 ... */
+#else /* __GNUC__ */
+#define likely(x) (x)
+#define unlikely(x) (x)
+#endif /* __GNUC__ */
+
+static PyObject *__pyx_m;
+static PyObject *__pyx_b;
+static PyObject *__pyx_empty_tuple;
+static PyObject *__pyx_empty_bytes;
+static int __pyx_lineno;
+static int __pyx_clineno = 0;
+static const char * __pyx_cfilenm= __FILE__;
+static const char *__pyx_filename;
+
+
+static const char *__pyx_f[] = {
+ "_dirstate_helpers_pyx.pyx",
+ "_static_tuple_c.pxd",
+};
+
+/* Type declarations */
+
+/* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1020
+ *
+ *
+ * cdef class ProcessEntryC: # <<<<<<<<<<<<<<
+ *
+ * cdef int doing_consistency_expansion
+ */
+
+struct __pyx_obj_6bzrlib_21_dirstate_helpers_pyx_ProcessEntryC {
+ PyObject_HEAD
+ struct __pyx_vtabstruct_6bzrlib_21_dirstate_helpers_pyx_ProcessEntryC *__pyx_vtab;
+ int doing_consistency_expansion;
+ PyObject *old_dirname_to_file_id;
+ PyObject *new_dirname_to_file_id;
+ PyObject *last_source_parent;
+ PyObject *last_target_parent;
+ int include_unchanged;
+ int partial;
+ PyObject *use_filesystem_for_exec;
+ PyObject *utf8_decode;
+ PyObject *searched_specific_files;
+ PyObject *searched_exact_paths;
+ PyObject *search_specific_files;
+ PyObject *search_specific_file_parents;
+ PyObject *state;
+ PyObject *current_root;
+ PyObject *current_root_unicode;
+ PyObject *root_entries;
+ int root_entries_pos;
+ int root_entries_len;
+ PyObject *root_abspath;
+ int source_index;
+ int target_index;
+ int want_unversioned;
+ PyObject *tree;
+ PyObject *dir_iterator;
+ int block_index;
+ PyObject *current_block;
+ int current_block_pos;
+ PyObject *current_block_list;
+ PyObject *current_dir_info;
+ PyObject *current_dir_list;
+ PyObject *_pending_consistent_entries;
+ int path_index;
+ PyObject *root_dir_info;
+ PyObject *bisect_left;
+ PyObject *pathjoin;
+ PyObject *fstat;
+ PyObject *seen_ids;
+ PyObject *sha_file;
+};
+
+/* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":532
+ *
+ *
+ * cdef class Reader: # <<<<<<<<<<<<<<
+ * """Maintain the current location, and return fields as you parse them."""
+ *
+ */
+
+struct __pyx_obj_6bzrlib_21_dirstate_helpers_pyx_Reader {
+ PyObject_HEAD
+ struct __pyx_vtabstruct_6bzrlib_21_dirstate_helpers_pyx_Reader *__pyx_vtab;
+ PyObject *state;
+ PyObject *text;
+ char *text_cstr;
+ int text_size;
+ char *end_cstr;
+ char *cur_cstr;
+ char *next;
+};
+
+
+/* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1020
+ *
+ *
+ * cdef class ProcessEntryC: # <<<<<<<<<<<<<<
+ *
+ * cdef int doing_consistency_expansion
+ */
+
+struct __pyx_vtabstruct_6bzrlib_21_dirstate_helpers_pyx_ProcessEntryC {
+ PyObject *(*_process_entry)(struct __pyx_obj_6bzrlib_21_dirstate_helpers_pyx_ProcessEntryC *, PyObject *, PyObject *);
+ int (*_gather_result_for_consistency)(struct __pyx_obj_6bzrlib_21_dirstate_helpers_pyx_ProcessEntryC *, PyObject *);
+ int (*_update_current_block)(struct __pyx_obj_6bzrlib_21_dirstate_helpers_pyx_ProcessEntryC *);
+ PyObject *(*_iter_next)(struct __pyx_obj_6bzrlib_21_dirstate_helpers_pyx_ProcessEntryC *);
+ PyObject *(*_maybe_tree_ref)(struct __pyx_obj_6bzrlib_21_dirstate_helpers_pyx_ProcessEntryC *, PyObject *);
+ PyObject *(*_loop_one_block)(struct __pyx_obj_6bzrlib_21_dirstate_helpers_pyx_ProcessEntryC *);
+ PyObject *(*_next_consistent_entries)(struct __pyx_obj_6bzrlib_21_dirstate_helpers_pyx_ProcessEntryC *);
+ PyObject *(*_path_info)(struct __pyx_obj_6bzrlib_21_dirstate_helpers_pyx_ProcessEntryC *, PyObject *, PyObject *);
+};
+static struct __pyx_vtabstruct_6bzrlib_21_dirstate_helpers_pyx_ProcessEntryC *__pyx_vtabptr_6bzrlib_21_dirstate_helpers_pyx_ProcessEntryC;
+
+
+/* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":532
+ *
+ *
+ * cdef class Reader: # <<<<<<<<<<<<<<
+ * """Maintain the current location, and return fields as you parse them."""
+ *
+ */
+
+struct __pyx_vtabstruct_6bzrlib_21_dirstate_helpers_pyx_Reader {
+ char *(*get_next)(struct __pyx_obj_6bzrlib_21_dirstate_helpers_pyx_Reader *, int *);
+ PyObject *(*get_next_str)(struct __pyx_obj_6bzrlib_21_dirstate_helpers_pyx_Reader *);
+ int (*_init)(struct __pyx_obj_6bzrlib_21_dirstate_helpers_pyx_Reader *);
+ PyObject *(*_get_entry)(struct __pyx_obj_6bzrlib_21_dirstate_helpers_pyx_Reader *, int, void **, int *);
+};
+static struct __pyx_vtabstruct_6bzrlib_21_dirstate_helpers_pyx_Reader *__pyx_vtabptr_6bzrlib_21_dirstate_helpers_pyx_Reader;
+
+#ifndef CYTHON_REFNANNY
+ #define CYTHON_REFNANNY 0
+#endif
+
+#if CYTHON_REFNANNY
+ typedef struct {
+ void (*INCREF)(void*, PyObject*, int);
+ void (*DECREF)(void*, PyObject*, int);
+ void (*GOTREF)(void*, PyObject*, int);
+ void (*GIVEREF)(void*, PyObject*, int);
+ void* (*SetupContext)(const char*, int, const char*);
+ void (*FinishContext)(void**);
+ } __Pyx_RefNannyAPIStruct;
+ static __Pyx_RefNannyAPIStruct *__Pyx_RefNanny = NULL;
+ static __Pyx_RefNannyAPIStruct * __Pyx_RefNannyImportAPI(const char *modname) {
+ PyObject *m = NULL, *p = NULL;
+ void *r = NULL;
+ m = PyImport_ImportModule((char *)modname);
+ if (!m) goto end;
+ p = PyObject_GetAttrString(m, (char *)"RefNannyAPI");
+ if (!p) goto end;
+ r = PyLong_AsVoidPtr(p);
+ end:
+ Py_XDECREF(p);
+ Py_XDECREF(m);
+ return (__Pyx_RefNannyAPIStruct *)r;
+ }
+ #define __Pyx_RefNannySetupContext(name) void *__pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__)
+ #define __Pyx_RefNannyFinishContext() __Pyx_RefNanny->FinishContext(&__pyx_refnanny)
+ #define __Pyx_INCREF(r) __Pyx_RefNanny->INCREF(__pyx_refnanny, (PyObject *)(r), __LINE__)
+ #define __Pyx_DECREF(r) __Pyx_RefNanny->DECREF(__pyx_refnanny, (PyObject *)(r), __LINE__)
+ #define __Pyx_GOTREF(r) __Pyx_RefNanny->GOTREF(__pyx_refnanny, (PyObject *)(r), __LINE__)
+ #define __Pyx_GIVEREF(r) __Pyx_RefNanny->GIVEREF(__pyx_refnanny, (PyObject *)(r), __LINE__)
+ #define __Pyx_XDECREF(r) do { if((r) != NULL) {__Pyx_DECREF(r);} } while(0)
+#else
+ #define __Pyx_RefNannySetupContext(name)
+ #define __Pyx_RefNannyFinishContext()
+ #define __Pyx_INCREF(r) Py_INCREF(r)
+ #define __Pyx_DECREF(r) Py_DECREF(r)
+ #define __Pyx_GOTREF(r)
+ #define __Pyx_GIVEREF(r)
+ #define __Pyx_XDECREF(r) Py_XDECREF(r)
+#endif /* CYTHON_REFNANNY */
+#define __Pyx_XGIVEREF(r) do { if((r) != NULL) {__Pyx_GIVEREF(r);} } while(0)
+#define __Pyx_XGOTREF(r) do { if((r) != NULL) {__Pyx_GOTREF(r);} } while(0)
+
+static PyObject *__Pyx_GetName(PyObject *dict, PyObject *name); /*proto*/
+
+static void __Pyx_RaiseDoubleKeywordsError(
+ const char* func_name, PyObject* kw_name); /*proto*/
+
+static void __Pyx_RaiseArgtupleInvalid(const char* func_name, int exact,
+ Py_ssize_t num_min, Py_ssize_t num_max, Py_ssize_t num_found); /*proto*/
+
+static int __Pyx_ParseOptionalKeywords(PyObject *kwds, PyObject **argnames[], PyObject *kwds2, PyObject *values[], Py_ssize_t num_pos_args, const char* function_name); /*proto*/
+
+static CYTHON_INLINE long __Pyx_div_long(long, long); /* proto */
+
+
+static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Generic(PyObject *o, PyObject* j) {
+ PyObject *r;
+ if (!j) return NULL;
+ r = PyObject_GetItem(o, j);
+ Py_DECREF(j);
+ return r;
+}
+
+
+#define __Pyx_GetItemInt_List(o, i, size, to_py_func) (((size) <= sizeof(Py_ssize_t)) ? \
+ __Pyx_GetItemInt_List_Fast(o, i) : \
+ __Pyx_GetItemInt_Generic(o, to_py_func(i)))
+
+static CYTHON_INLINE PyObject *__Pyx_GetItemInt_List_Fast(PyObject *o, Py_ssize_t i) {
+ if (likely(o != Py_None)) {
+ if (likely((0 <= i) & (i < PyList_GET_SIZE(o)))) {
+ PyObject *r = PyList_GET_ITEM(o, i);
+ Py_INCREF(r);
+ return r;
+ }
+ else if ((-PyList_GET_SIZE(o) <= i) & (i < 0)) {
+ PyObject *r = PyList_GET_ITEM(o, PyList_GET_SIZE(o) + i);
+ Py_INCREF(r);
+ return r;
+ }
+ }
+ return __Pyx_GetItemInt_Generic(o, PyInt_FromSsize_t(i));
+}
+
+#define __Pyx_GetItemInt_Tuple(o, i, size, to_py_func) (((size) <= sizeof(Py_ssize_t)) ? \
+ __Pyx_GetItemInt_Tuple_Fast(o, i) : \
+ __Pyx_GetItemInt_Generic(o, to_py_func(i)))
+
+static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Tuple_Fast(PyObject *o, Py_ssize_t i) {
+ if (likely(o != Py_None)) {
+ if (likely((0 <= i) & (i < PyTuple_GET_SIZE(o)))) {
+ PyObject *r = PyTuple_GET_ITEM(o, i);
+ Py_INCREF(r);
+ return r;
+ }
+ else if ((-PyTuple_GET_SIZE(o) <= i) & (i < 0)) {
+ PyObject *r = PyTuple_GET_ITEM(o, PyTuple_GET_SIZE(o) + i);
+ Py_INCREF(r);
+ return r;
+ }
+ }
+ return __Pyx_GetItemInt_Generic(o, PyInt_FromSsize_t(i));
+}
+
+
+#define __Pyx_GetItemInt(o, i, size, to_py_func) (((size) <= sizeof(Py_ssize_t)) ? \
+ __Pyx_GetItemInt_Fast(o, i) : \
+ __Pyx_GetItemInt_Generic(o, to_py_func(i)))
+
+static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Fast(PyObject *o, Py_ssize_t i) {
+ PyObject *r;
+ if (PyList_CheckExact(o) && ((0 <= i) & (i < PyList_GET_SIZE(o)))) {
+ r = PyList_GET_ITEM(o, i);
+ Py_INCREF(r);
+ }
+ else if (PyTuple_CheckExact(o) && ((0 <= i) & (i < PyTuple_GET_SIZE(o)))) {
+ r = PyTuple_GET_ITEM(o, i);
+ Py_INCREF(r);
+ }
+ else if (Py_TYPE(o)->tp_as_sequence && Py_TYPE(o)->tp_as_sequence->sq_item && (likely(i >= 0))) {
+ r = PySequence_GetItem(o, i);
+ }
+ else {
+ r = __Pyx_GetItemInt_Generic(o, PyInt_FromSsize_t(i));
+ }
+ return r;
+}
+
+#define __Pyx_SetItemInt(o, i, v, size, to_py_func) (((size) <= sizeof(Py_ssize_t)) ? \
+ __Pyx_SetItemInt_Fast(o, i, v) : \
+ __Pyx_SetItemInt_Generic(o, to_py_func(i), v))
+
+static CYTHON_INLINE int __Pyx_SetItemInt_Generic(PyObject *o, PyObject *j, PyObject *v) {
+ int r;
+ if (!j) return -1;
+ r = PyObject_SetItem(o, j, v);
+ Py_DECREF(j);
+ return r;
+}
+
+static CYTHON_INLINE int __Pyx_SetItemInt_Fast(PyObject *o, Py_ssize_t i, PyObject *v) {
+ if (PyList_CheckExact(o) && ((0 <= i) & (i < PyList_GET_SIZE(o)))) {
+ Py_INCREF(v);
+ Py_DECREF(PyList_GET_ITEM(o, i));
+ PyList_SET_ITEM(o, i, v);
+ return 1;
+ }
+ else if (Py_TYPE(o)->tp_as_sequence && Py_TYPE(o)->tp_as_sequence->sq_ass_item && (likely(i >= 0)))
+ return PySequence_SetItem(o, i, v);
+ else {
+ PyObject *j = PyInt_FromSsize_t(i);
+ return __Pyx_SetItemInt_Generic(o, j, v);
+ }
+}
+
+static CYTHON_INLINE void __Pyx_RaiseNeedMoreValuesError(Py_ssize_t index);
+
+static CYTHON_INLINE void __Pyx_RaiseTooManyValuesError(Py_ssize_t expected);
+
+static PyObject *__Pyx_UnpackItem(PyObject *, Py_ssize_t index); /*proto*/
+static int __Pyx_EndUnpack(PyObject *, Py_ssize_t expected); /*proto*/
+
+#if PY_VERSION_HEX < 0x02050000
+#ifndef PyAnySet_CheckExact
+
+#define PyAnySet_CheckExact(ob) \
+ ((ob)->ob_type == &PySet_Type || \
+ (ob)->ob_type == &PyFrozenSet_Type)
+
+#define PySet_New(iterable) \
+ PyObject_CallFunctionObjArgs((PyObject *)&PySet_Type, (iterable), NULL)
+
+#define Pyx_PyFrozenSet_New(iterable) \
+ PyObject_CallFunctionObjArgs((PyObject *)&PyFrozenSet_Type, (iterable), NULL)
+
+#define PySet_Size(anyset) \
+ PyObject_Size((anyset))
+
+#define PySet_Contains(anyset, key) \
+ PySequence_Contains((anyset), (key))
+
+#define PySet_Pop(set) \
+ PyObject_CallMethod(set, (char *)"pop", NULL)
+
+static CYTHON_INLINE int PySet_Clear(PyObject *set) {
+ PyObject *ret = PyObject_CallMethod(set, (char *)"clear", NULL);
+ if (!ret) return -1;
+ Py_DECREF(ret); return 0;
+}
+
+static CYTHON_INLINE int PySet_Discard(PyObject *set, PyObject *key) {
+ PyObject *ret = PyObject_CallMethod(set, (char *)"discard", (char *)"O", key);
+ if (!ret) return -1;
+ Py_DECREF(ret); return 0;
+}
+
+static CYTHON_INLINE int PySet_Add(PyObject *set, PyObject *key) {
+ PyObject *ret = PyObject_CallMethod(set, (char *)"add", (char *)"O", key);
+ if (!ret) return -1;
+ Py_DECREF(ret); return 0;
+}
+
+#endif /* PyAnySet_CheckExact (<= Py2.4) */
+
+#if PY_VERSION_HEX < 0x02040000
+#ifndef Py_SETOBJECT_H
+#define Py_SETOBJECT_H
+
+static PyTypeObject *__Pyx_PySet_Type = NULL;
+static PyTypeObject *__Pyx_PyFrozenSet_Type = NULL;
+
+#define PySet_Type (*__Pyx_PySet_Type)
+#define PyFrozenSet_Type (*__Pyx_PyFrozenSet_Type)
+
+#define PyAnySet_Check(ob) \
+ (PyAnySet_CheckExact(ob) || \
+ PyType_IsSubtype((ob)->ob_type, &PySet_Type) || \
+ PyType_IsSubtype((ob)->ob_type, &PyFrozenSet_Type))
+
+#define PyFrozenSet_CheckExact(ob) ((ob)->ob_type == &PyFrozenSet_Type)
+
+static int __Pyx_Py23SetsImport(void) {
+ PyObject *sets=0, *Set=0, *ImmutableSet=0;
+
+ sets = PyImport_ImportModule((char *)"sets");
+ if (!sets) goto bad;
+ Set = PyObject_GetAttrString(sets, (char *)"Set");
+ if (!Set) goto bad;
+ ImmutableSet = PyObject_GetAttrString(sets, (char *)"ImmutableSet");
+ if (!ImmutableSet) goto bad;
+ Py_DECREF(sets);
+
+ __Pyx_PySet_Type = (PyTypeObject*) Set;
+ __Pyx_PyFrozenSet_Type = (PyTypeObject*) ImmutableSet;
+
+ return 0;
+
+ bad:
+ Py_XDECREF(sets);
+ Py_XDECREF(Set);
+ Py_XDECREF(ImmutableSet);
+ return -1;
+}
+
+#else
+static int __Pyx_Py23SetsImport(void) { return 0; }
+#endif /* !Py_SETOBJECT_H */
+#endif /* < Py2.4 */
+#endif /* < Py2.5 */
+
+static int __Pyx_GetException(PyObject **type, PyObject **value, PyObject **tb); /*proto*/
+
+static CYTHON_INLINE PyObject* __Pyx_PyObject_Pop(PyObject* L) {
+ PyObject *r, *m;
+#if PY_VERSION_HEX >= 0x02040000
+ if (likely(PyList_CheckExact(L))
+ /* Check that both the size is positive and no reallocation shrinking needs to be done. */
+ && likely(PyList_GET_SIZE(L) > (((PyListObject*)L)->allocated >> 1))) {
+ Py_SIZE(L) -= 1;
+ return PyList_GET_ITEM(L, PyList_GET_SIZE(L));
+ }
+#endif
+ m = __Pyx_GetAttrString(L, "pop");
+ if (!m) return NULL;
+ r = PyObject_CallObject(m, NULL);
+ Py_DECREF(m);
+ return r;
+}
+
+static CYTHON_INLINE long __Pyx_NegateNonNeg(long b) { return unlikely(b < 0) ? b : !b; }
+static CYTHON_INLINE PyObject* __Pyx_PyBoolOrNull_FromLong(long b) {
+ return unlikely(b < 0) ? NULL : __Pyx_PyBool_FromLong(b);
+}
+
+#define __Pyx_DelItemInt(o, i, size, to_py_func) (((size) <= sizeof(Py_ssize_t)) ? \
+ __Pyx_DelItemInt_Fast(o, i) : \
+ __Pyx_DelItem_Generic(o, to_py_func(i)))
+
+static CYTHON_INLINE int __Pyx_DelItem_Generic(PyObject *o, PyObject *j) {
+ int r;
+ if (!j) return -1;
+ r = PyObject_DelItem(o, j);
+ Py_DECREF(j);
+ return r;
+}
+
+static CYTHON_INLINE int __Pyx_DelItemInt_Fast(PyObject *o, Py_ssize_t i) {
+ if (Py_TYPE(o)->tp_as_sequence && Py_TYPE(o)->tp_as_sequence->sq_ass_item && likely(i >= 0))
+ return PySequence_DelItem(o, i);
+ else {
+ PyObject *j = PyInt_FromSsize_t(i);
+ return __Pyx_DelItem_Generic(o, j);
+ }
+}
+
+static CYTHON_INLINE void __Pyx_ExceptionSave(PyObject **type, PyObject **value, PyObject **tb); /*proto*/
+static void __Pyx_ExceptionReset(PyObject *type, PyObject *value, PyObject *tb); /*proto*/
+
+static PyObject *__Pyx_Import(PyObject *name, PyObject *from_list); /*proto*/
+
+static CYTHON_INLINE void __Pyx_ErrRestore(PyObject *type, PyObject *value, PyObject *tb); /*proto*/
+static CYTHON_INLINE void __Pyx_ErrFetch(PyObject **type, PyObject **value, PyObject **tb); /*proto*/
+
+static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb); /*proto*/
+
+static CYTHON_INLINE unsigned char __Pyx_PyInt_AsUnsignedChar(PyObject *);
+
+static CYTHON_INLINE unsigned short __Pyx_PyInt_AsUnsignedShort(PyObject *);
+
+static CYTHON_INLINE unsigned int __Pyx_PyInt_AsUnsignedInt(PyObject *);
+
+static CYTHON_INLINE char __Pyx_PyInt_AsChar(PyObject *);
+
+static CYTHON_INLINE short __Pyx_PyInt_AsShort(PyObject *);
+
+static CYTHON_INLINE int __Pyx_PyInt_AsInt(PyObject *);
+
+static CYTHON_INLINE signed char __Pyx_PyInt_AsSignedChar(PyObject *);
+
+static CYTHON_INLINE signed short __Pyx_PyInt_AsSignedShort(PyObject *);
+
+static CYTHON_INLINE signed int __Pyx_PyInt_AsSignedInt(PyObject *);
+
+static CYTHON_INLINE int __Pyx_PyInt_AsLongDouble(PyObject *);
+
+static CYTHON_INLINE unsigned long __Pyx_PyInt_AsUnsignedLong(PyObject *);
+
+static CYTHON_INLINE unsigned PY_LONG_LONG __Pyx_PyInt_AsUnsignedLongLong(PyObject *);
+
+static CYTHON_INLINE long __Pyx_PyInt_AsLong(PyObject *);
+
+static CYTHON_INLINE PY_LONG_LONG __Pyx_PyInt_AsLongLong(PyObject *);
+
+static CYTHON_INLINE signed long __Pyx_PyInt_AsSignedLong(PyObject *);
+
+static CYTHON_INLINE signed PY_LONG_LONG __Pyx_PyInt_AsSignedLongLong(PyObject *);
+
+static void __Pyx_WriteUnraisable(const char *name); /*proto*/
+
+static int __Pyx_SetVtable(PyObject *dict, void *vtable); /*proto*/
+
+static PyTypeObject *__Pyx_ImportType(const char *module_name, const char *class_name, long size, int strict); /*proto*/
+
+static PyObject *__Pyx_ImportModule(const char *name); /*proto*/
+
+static void __Pyx_AddTraceback(const char *funcname); /*proto*/
+
+static int __Pyx_InitStrings(__Pyx_StringTabEntry *t); /*proto*/
+/* Module declarations from bzrlib._static_tuple_c */
+
+/* Module declarations from bzrlib._static_tuple_c */
+
+static PyTypeObject *__pyx_ptype_6bzrlib_15_static_tuple_c_StaticTuple = 0;
+/* Module declarations from bzrlib._dirstate_helpers_pyx */
+
+static PyTypeObject *__pyx_ptype_6bzrlib_21_dirstate_helpers_pyx_Reader = 0;
+static PyTypeObject *__pyx_ptype_6bzrlib_21_dirstate_helpers_pyx_ProcessEntryC = 0;
+static int __pyx_v_6bzrlib_21_dirstate_helpers_pyx_ERROR_PATH_NOT_FOUND;
+static int __pyx_v_6bzrlib_21_dirstate_helpers_pyx_ERROR_DIRECTORY;
+static PyObject *__pyx_v_6bzrlib_21_dirstate_helpers_pyx__kind_absent = 0;
+static PyObject *__pyx_v_6bzrlib_21_dirstate_helpers_pyx__kind_file = 0;
+static PyObject *__pyx_v_6bzrlib_21_dirstate_helpers_pyx__kind_directory = 0;
+static PyObject *__pyx_v_6bzrlib_21_dirstate_helpers_pyx__kind_symlink = 0;
+static PyObject *__pyx_v_6bzrlib_21_dirstate_helpers_pyx__kind_relocated = 0;
+static PyObject *__pyx_v_6bzrlib_21_dirstate_helpers_pyx__kind_tree_reference = 0;
+static void *__pyx_f_6bzrlib_21_dirstate_helpers_pyx__my_memrchr(void *, int, size_t); /*proto*/
+static PyObject *__pyx_f_6bzrlib_21_dirstate_helpers_pyx_safe_string_from_size(char *, Py_ssize_t); /*proto*/
+static int __pyx_f_6bzrlib_21_dirstate_helpers_pyx__is_aligned(void *); /*proto*/
+static int __pyx_f_6bzrlib_21_dirstate_helpers_pyx__cmp_by_dirs(char *, int, char *, int); /*proto*/
+static int __pyx_f_6bzrlib_21_dirstate_helpers_pyx__cmp_path_by_dirblock_intern(char *, int, char *, int); /*proto*/
+static int __pyx_f_6bzrlib_21_dirstate_helpers_pyx_minikind_from_mode(int); /*proto*/
+static PyObject *__pyx_f_6bzrlib_21_dirstate_helpers_pyx__pack_stat(PyObject *); /*proto*/
+static PyObject *__pyx_f_6bzrlib_21_dirstate_helpers_pyx__update_entry(PyObject *, PyObject *, PyObject *, PyObject *); /*proto*/
+static char __pyx_f_6bzrlib_21_dirstate_helpers_pyx__minikind_from_string(PyObject *); /*proto*/
+static PyObject *__pyx_f_6bzrlib_21_dirstate_helpers_pyx__minikind_to_kind(char); /*proto*/
+static int __pyx_f_6bzrlib_21_dirstate_helpers_pyx__versioned_minikind(char); /*proto*/
+#define __Pyx_MODULE_NAME "bzrlib._dirstate_helpers_pyx"
+int __pyx_module_is_main_bzrlib___dirstate_helpers_pyx = 0;
+
+/* Implementation of bzrlib._dirstate_helpers_pyx */
+static PyObject *__pyx_builtin_AssertionError;
+static PyObject *__pyx_builtin_TypeError;
+static PyObject *__pyx_builtin_KeyError;
+static PyObject *__pyx_builtin_StopIteration;
+static PyObject *__pyx_builtin_OSError;
+static PyObject *__pyx_builtin_AttributeError;
+static PyObject *__pyx_builtin_cmp;
+static PyObject *__pyx_builtin_UnicodeDecodeError;
+static char __pyx_k_1[] = "Must be a single character string, not %s";
+static char __pyx_k_2[] = "tried to create a string with an invalid size: %d";
+static char __pyx_k_3[] = "'path1' must be a plain string, not %s: %r";
+static char __pyx_k_4[] = "'path2' must be a plain string, not %s: %r";
+static char __pyx_k_5[] = "";
+static char __pyx_k_6[] = "you must pass a python list for 'paths' not: %s %r";
+static char __pyx_k_7[] = "you must pass a string for 'path' not: %s %r";
+static char __pyx_k_8[] = "you must pass a python list for 'dirblocks' not: %s %r";
+static char __pyx_k_9[] = "you must pass a string for dirname not: %s %r";
+static char __pyx_k_10[] = "get_next() called when cur_str is NULL";
+static char __pyx_k_11[] = "get_next() called when there are no chars left";
+static char __pyx_k_12[] = "failed to find trailing NULL (\\0). Trailing garbage: %r";
+static char __pyx_k_13[] = "First character should be null not: %s";
+static char __pyx_k_14[] = "Bad parse, we expected to end on \\n, not: %d %s: %s";
+static char __pyx_k_15[] = "_num_present_parents";
+static char __pyx_k_16[] = "We read the wrong number of entries. We expected to read %s, but read %s";
+static char __pyx_k_17[] = "_split_root_dirblock_into_contents";
+static char __pyx_k_18[] = "IN_MEMORY_UNMODIFIED";
+static char __pyx_k_19[] = "_get_block_entry_index";
+static char __pyx_k_20[] = "searched_specific_files";
+static char __pyx_k_21[] = "searched_exact_paths";
+static char __pyx_k_22[] = "use_filesystem_for_exec";
+static char __pyx_k_23[] = "search_specific_files";
+static char __pyx_k_24[] = "doing_consistency_expansion";
+static char __pyx_k_25[] = "old_dirname_to_file_id";
+static char __pyx_k_26[] = "new_dirname_to_file_id";
+static char __pyx_k_27[] = "search_specific_file_parents";
+static char __pyx_k_28[] = "current_root_unicode";
+static char __pyx_k_29[] = "_pending_consistent_entries";
+static char __pyx_k_30[] = "unsupported target index";
+static char __pyx_k_31[] = "Unsupported target index %d";
+static char __pyx_k_32[] = "entry '%s/%s' is considered renamed from %r but source does not exist\nentry: %s";
+static char __pyx_k_33[] = "tree-reference";
+static char __pyx_k_34[] = "Could not find target parent in wt: %s\nparent of: %s";
+static char __pyx_k_35[] = "We could not find the parent entry in index %d for the entry: %s";
+static char __pyx_k_36[] = "don't know how to compare source_minikind=%r, target_minikind=%r";
+static char __pyx_k_37[] = "_gather_result_for_consistency";
+static char __pyx_k_38[] = "file_kind_from_stat_mode";
+static char __pyx_k_39[] = "_directory_is_tree_reference";
+static char __pyx_k_40[] = ".bzr";
+static char __pyx_k_41[] = "_find_block_index_from_key";
+static char __pyx_k_42[] = "_update_current_block";
+static char __pyx_k_43[] = "_next_consistent_entries";
+static char __pyx_k_44[] = "result is not None: %r";
+static char __pyx_k_45[] = "Missing entry for specific path parent %r, %r";
+static char __pyx_k_46[] = "Got entry<->path mismatch for specific path %r entry %r path_info %r ";
+static char __pyx_k_47[] = "/";
+static char __pyx_k_48[] = "Helper functions for DirState.\n\nThis is the python implementation for DirState functions.\n";
+static char __pyx_k_49[] = "bzrlib.dirstate";
+static char __pyx_k_50[] = "bzrlib.osutils";
+static char __pyx_k_51[] = "_py_memrchr (line 142)";
+static char __pyx_k_52[] = "cmp_by_dirs (line 246)";
+static char __pyx_k_53[] = "_cmp_path_by_dirblock (line 275)";
+static char __pyx_k_54[] = "_cmp_path_by_dirblock";
+static char __pyx_k_55[] = "_bisect_path_left (line 377)";
+static char __pyx_k_56[] = "_bisect_path_right (line 430)";
+static char __pyx_k_57[] = "bisect_dirblock (line 483)";
+static char __pyx_k_58[] = "Reader._parse_dirblocks (line 731)";
+static char __pyx_k_59[] = "_read_dirblocks (line 779)";
+static char __pyx_k_60[] = "pack_stat (line 835)";
+static char __pyx_k_61[] = "update_entry (line 840)";
+static char __pyx_k__a[] = "a";
+static char __pyx_k__c[] = "c";
+static char __pyx_k__d[] = "d";
+static char __pyx_k__f[] = "f";
+static char __pyx_k__l[] = "l";
+static char __pyx_k__r[] = "r";
+static char __pyx_k__s[] = "s";
+static char __pyx_k__ar[] = "ar";
+static char __pyx_k__hi[] = "hi";
+static char __pyx_k__lo[] = "lo";
+static char __pyx_k__os[] = "os";
+static char __pyx_k__add[] = "add";
+static char __pyx_k__cmp[] = "cmp";
+static char __pyx_k__sys[] = "sys";
+static char __pyx_k__file[] = "file";
+static char __pyx_k__next[] = "next";
+static char __pyx_k__path[] = "path";
+static char __pyx_k__read[] = "read";
+static char __pyx_k__seek[] = "seek";
+static char __pyx_k__self[] = "self";
+static char __pyx_k__stat[] = "stat";
+static char __pyx_k__text[] = "text";
+static char __pyx_k__tree[] = "tree";
+static char __pyx_k__utf8[] = "utf8";
+static char __pyx_k___init[] = "_init";
+static char __pyx_k__cache[] = "cache";
+static char __pyx_k__entry[] = "entry";
+static char __pyx_k__errno[] = "errno";
+static char __pyx_k__fstat[] = "fstat";
+static char __pyx_k__lstat[] = "lstat";
+static char __pyx_k__path1[] = "path1";
+static char __pyx_k__path2[] = "path2";
+static char __pyx_k__paths[] = "paths";
+static char __pyx_k__split[] = "split";
+static char __pyx_k__state[] = "state";
+static char __pyx_k__win32[] = "win32";
+static char __pyx_k__EINVAL[] = "EINVAL";
+static char __pyx_k__ENOENT[] = "ENOENT";
+static char __pyx_k__Reader[] = "Reader";
+static char __pyx_k____ne__[] = "__ne__";
+static char __pyx_k__absent[] = "absent";
+static char __pyx_k__bisect[] = "bisect";
+static char __pyx_k__bzrlib[] = "bzrlib";
+static char __pyx_k__decode[] = "decode";
+static char __pyx_k__encode[] = "encode";
+static char __pyx_k__errors[] = "errors";
+static char __pyx_k__prefix[] = "prefix";
+static char __pyx_k__rsplit[] = "rsplit";
+static char __pyx_k__st_dev[] = "st_dev";
+static char __pyx_k__st_ino[] = "st_ino";
+static char __pyx_k__update[] = "update";
+static char __pyx_k__ENOTDIR[] = "ENOTDIR";
+static char __pyx_k__OSError[] = "OSError";
+static char __pyx_k__S_IEXEC[] = "S_IEXEC";
+static char __pyx_k__S_ISREG[] = "S_ISREG";
+static char __pyx_k___encode[] = "_encode";
+static char __pyx_k___fs_enc[] = "_fs_enc";
+static char __pyx_k__abspath[] = "abspath";
+static char __pyx_k__dirname[] = "dirname";
+static char __pyx_k__osutils[] = "osutils";
+static char __pyx_k__partial[] = "partial";
+static char __pyx_k__st_mode[] = "st_mode";
+static char __pyx_k__st_size[] = "st_size";
+static char __pyx_k__symlink[] = "symlink";
+static char __pyx_k__BzrError[] = "BzrError";
+static char __pyx_k__DirState[] = "DirState";
+static char __pyx_k__KeyError[] = "KeyError";
+static char __pyx_k__NULLSTAT[] = "NULLSTAT";
+static char __pyx_k____main__[] = "__main__";
+static char __pyx_k____test__[] = "__test__";
+static char __pyx_k__binascii[] = "binascii";
+static char __pyx_k__cur_cstr[] = "cur_cstr";
+static char __pyx_k__end_cstr[] = "end_cstr";
+static char __pyx_k__get_next[] = "get_next";
+static char __pyx_k__pathjoin[] = "pathjoin";
+static char __pyx_k__platform[] = "platform";
+static char __pyx_k__seen_ids[] = "seen_ids";
+static char __pyx_k__sha_file[] = "sha_file";
+static char __pyx_k__st_ctime[] = "st_ctime";
+static char __pyx_k__st_mtime[] = "st_mtime";
+static char __pyx_k__winerror[] = "winerror";
+static char __pyx_k__TypeError[] = "TypeError";
+static char __pyx_k___filename[] = "_filename";
+static char __pyx_k__dirblocks[] = "dirblocks";
+static char __pyx_k__directory[] = "directory";
+static char __pyx_k__is_inside[] = "is_inside";
+static char __pyx_k__pack_stat[] = "pack_stat";
+static char __pyx_k__path_utf8[] = "path_utf8";
+static char __pyx_k__relocated[] = "relocated";
+static char __pyx_k__splitpath[] = "splitpath";
+static char __pyx_k__text_cstr[] = "text_cstr";
+static char __pyx_k__text_size[] = "text_size";
+static char __pyx_k___dirblocks[] = "_dirblocks";
+static char __pyx_k___get_entry[] = "_get_entry";
+static char __pyx_k___iter_next[] = "_iter_next";
+static char __pyx_k___path_info[] = "_path_info";
+static char __pyx_k___read_link[] = "_read_link";
+static char __pyx_k___sha1_file[] = "_sha1_file";
+static char __pyx_k__b2a_base64[] = "b2a_base64";
+static char __pyx_k__cache_utf8[] = "cache_utf8";
+static char __pyx_k__path_index[] = "path_index";
+static char __pyx_k__stat_value[] = "stat_value";
+static char __pyx_k___py_memrchr[] = "_py_memrchr";
+static char __pyx_k___state_file[] = "_state_file";
+static char __pyx_k__bisect_left[] = "bisect_left";
+static char __pyx_k__block_index[] = "block_index";
+static char __pyx_k__cmp_by_dirs[] = "cmp_by_dirs";
+static char __pyx_k__utf8_decode[] = "utf8_decode";
+static char __pyx_k___cutoff_time[] = "_cutoff_time";
+static char __pyx_k___num_entries[] = "_num_entries";
+static char __pyx_k___utf8_decode[] = "_utf8_decode";
+static char __pyx_k__current_root[] = "current_root";
+static char __pyx_k__dir_iterator[] = "dir_iterator";
+static char __pyx_k__get_next_str[] = "get_next_str";
+static char __pyx_k__root_abspath[] = "root_abspath";
+static char __pyx_k__root_entries[] = "root_entries";
+static char __pyx_k__source_index[] = "source_index";
+static char __pyx_k__target_index[] = "target_index";
+static char __pyx_k__update_entry[] = "update_entry";
+static char __pyx_k__StopIteration[] = "StopIteration";
+static char __pyx_k___ensure_block[] = "_ensure_block";
+static char __pyx_k__current_block[] = "current_block";
+static char __pyx_k__is_inside_any[] = "is_inside_any";
+static char __pyx_k__root_dir_info[] = "root_dir_info";
+static char __pyx_k__stat_and_sha1[] = "stat_and_sha1";
+static char __pyx_k__AssertionError[] = "AssertionError";
+static char __pyx_k__AttributeError[] = "AttributeError";
+static char __pyx_k___end_of_header[] = "_end_of_header";
+static char __pyx_k___is_executable[] = "_is_executable";
+static char __pyx_k___mark_modified[] = "_mark_modified";
+static char __pyx_k___observed_sha1[] = "_observed_sha1";
+static char __pyx_k___process_entry[] = "_process_entry";
+static char __pyx_k___sha1_provider[] = "_sha1_provider";
+static char __pyx_k___walkdirs_utf8[] = "_walkdirs_utf8";
+static char __pyx_k__CorruptDirstate[] = "CorruptDirstate";
+static char __pyx_k__DirstateCorrupt[] = "DirstateCorrupt";
+static char __pyx_k___dirblock_state[] = "_dirblock_state";
+static char __pyx_k___loop_one_block[] = "_loop_one_block";
+static char __pyx_k___maybe_tree_ref[] = "_maybe_tree_ref";
+static char __pyx_k___read_dirblocks[] = "_read_dirblocks";
+static char __pyx_k__bisect_dirblock[] = "bisect_dirblock";
+static char __pyx_k__BadFileKindError[] = "BadFileKindError";
+static char __pyx_k___parse_dirblocks[] = "_parse_dirblocks";
+static char __pyx_k___sha_cutoff_time[] = "_sha_cutoff_time";
+static char __pyx_k__current_dir_info[] = "current_dir_info";
+static char __pyx_k__current_dir_list[] = "current_dir_list";
+static char __pyx_k__root_entries_len[] = "root_entries_len";
+static char __pyx_k__root_entries_pos[] = "root_entries_pos";
+static char __pyx_k__want_unversioned[] = "want_unversioned";
+static char __pyx_k___bisect_path_left[] = "_bisect_path_left";
+static char __pyx_k___entries_for_path[] = "_entries_for_path";
+static char __pyx_k__current_block_pos[] = "current_block_pos";
+static char __pyx_k__include_unchanged[] = "include_unchanged";
+static char __pyx_k__UnicodeDecodeError[] = "UnicodeDecodeError";
+static char __pyx_k___bisect_path_right[] = "_bisect_path_right";
+static char __pyx_k__current_block_list[] = "current_block_list";
+static char __pyx_k__last_source_parent[] = "last_source_parent";
+static char __pyx_k__last_target_parent[] = "last_target_parent";
+static char __pyx_k__parent_directories[] = "parent_directories";
+static char __pyx_k__BadFilenameEncoding[] = "BadFilenameEncoding";
+static char __pyx_k__NULL_PARENT_DETAILS[] = "NULL_PARENT_DETAILS";
+static PyObject *__pyx_kp_s_1;
+static PyObject *__pyx_kp_s_10;
+static PyObject *__pyx_kp_s_11;
+static PyObject *__pyx_kp_s_12;
+static PyObject *__pyx_kp_s_13;
+static PyObject *__pyx_kp_s_14;
+static PyObject *__pyx_n_s_15;
+static PyObject *__pyx_kp_s_16;
+static PyObject *__pyx_n_s_17;
+static PyObject *__pyx_n_s_18;
+static PyObject *__pyx_n_s_19;
+static PyObject *__pyx_kp_s_2;
+static PyObject *__pyx_n_s_20;
+static PyObject *__pyx_n_s_21;
+static PyObject *__pyx_n_s_22;
+static PyObject *__pyx_n_s_23;
+static PyObject *__pyx_n_s_24;
+static PyObject *__pyx_n_s_25;
+static PyObject *__pyx_n_s_26;
+static PyObject *__pyx_n_s_27;
+static PyObject *__pyx_n_s_28;
+static PyObject *__pyx_n_s_29;
+static PyObject *__pyx_kp_s_3;
+static PyObject *__pyx_kp_s_30;
+static PyObject *__pyx_kp_s_31;
+static PyObject *__pyx_kp_s_32;
+static PyObject *__pyx_kp_s_33;
+static PyObject *__pyx_kp_s_34;
+static PyObject *__pyx_kp_s_35;
+static PyObject *__pyx_kp_s_36;
+static PyObject *__pyx_n_s_37;
+static PyObject *__pyx_n_s_38;
+static PyObject *__pyx_n_s_39;
+static PyObject *__pyx_kp_s_4;
+static PyObject *__pyx_kp_s_40;
+static PyObject *__pyx_n_s_41;
+static PyObject *__pyx_n_s_42;
+static PyObject *__pyx_n_s_43;
+static PyObject *__pyx_kp_s_44;
+static PyObject *__pyx_kp_s_45;
+static PyObject *__pyx_kp_s_46;
+static PyObject *__pyx_kp_s_47;
+static PyObject *__pyx_n_s_49;
+static PyObject *__pyx_kp_s_5;
+static PyObject *__pyx_n_s_50;
+static PyObject *__pyx_kp_u_51;
+static PyObject *__pyx_kp_u_52;
+static PyObject *__pyx_kp_u_53;
+static PyObject *__pyx_n_s_54;
+static PyObject *__pyx_kp_u_55;
+static PyObject *__pyx_kp_u_56;
+static PyObject *__pyx_kp_u_57;
+static PyObject *__pyx_kp_u_58;
+static PyObject *__pyx_kp_u_59;
+static PyObject *__pyx_kp_s_6;
+static PyObject *__pyx_kp_u_60;
+static PyObject *__pyx_kp_u_61;
+static PyObject *__pyx_kp_s_7;
+static PyObject *__pyx_kp_s_8;
+static PyObject *__pyx_kp_s_9;
+static PyObject *__pyx_n_s__AssertionError;
+static PyObject *__pyx_n_s__AttributeError;
+static PyObject *__pyx_n_s__BadFileKindError;
+static PyObject *__pyx_n_s__BadFilenameEncoding;
+static PyObject *__pyx_n_s__BzrError;
+static PyObject *__pyx_n_s__CorruptDirstate;
+static PyObject *__pyx_n_s__DirState;
+static PyObject *__pyx_n_s__DirstateCorrupt;
+static PyObject *__pyx_n_s__EINVAL;
+static PyObject *__pyx_n_s__ENOENT;
+static PyObject *__pyx_n_s__ENOTDIR;
+static PyObject *__pyx_n_s__KeyError;
+static PyObject *__pyx_n_s__NULLSTAT;
+static PyObject *__pyx_n_s__NULL_PARENT_DETAILS;
+static PyObject *__pyx_n_s__OSError;
+static PyObject *__pyx_n_s__Reader;
+static PyObject *__pyx_n_s__S_IEXEC;
+static PyObject *__pyx_n_s__S_ISREG;
+static PyObject *__pyx_n_s__StopIteration;
+static PyObject *__pyx_n_s__TypeError;
+static PyObject *__pyx_n_s__UnicodeDecodeError;
+static PyObject *__pyx_n_s____main__;
+static PyObject *__pyx_n_s____ne__;
+static PyObject *__pyx_n_s____test__;
+static PyObject *__pyx_n_s___bisect_path_left;
+static PyObject *__pyx_n_s___bisect_path_right;
+static PyObject *__pyx_n_s___cutoff_time;
+static PyObject *__pyx_n_s___dirblock_state;
+static PyObject *__pyx_n_s___dirblocks;
+static PyObject *__pyx_n_s___encode;
+static PyObject *__pyx_n_s___end_of_header;
+static PyObject *__pyx_n_s___ensure_block;
+static PyObject *__pyx_n_s___entries_for_path;
+static PyObject *__pyx_n_s___filename;
+static PyObject *__pyx_n_s___fs_enc;
+static PyObject *__pyx_n_s___get_entry;
+static PyObject *__pyx_n_s___init;
+static PyObject *__pyx_n_s___is_executable;
+static PyObject *__pyx_n_s___iter_next;
+static PyObject *__pyx_n_s___loop_one_block;
+static PyObject *__pyx_n_s___mark_modified;
+static PyObject *__pyx_n_s___maybe_tree_ref;
+static PyObject *__pyx_n_s___num_entries;
+static PyObject *__pyx_n_s___observed_sha1;
+static PyObject *__pyx_n_s___parse_dirblocks;
+static PyObject *__pyx_n_s___path_info;
+static PyObject *__pyx_n_s___process_entry;
+static PyObject *__pyx_n_s___py_memrchr;
+static PyObject *__pyx_n_s___read_dirblocks;
+static PyObject *__pyx_n_s___read_link;
+static PyObject *__pyx_n_s___sha1_file;
+static PyObject *__pyx_n_s___sha1_provider;
+static PyObject *__pyx_n_s___sha_cutoff_time;
+static PyObject *__pyx_n_s___state_file;
+static PyObject *__pyx_n_s___utf8_decode;
+static PyObject *__pyx_n_s___walkdirs_utf8;
+static PyObject *__pyx_n_s__a;
+static PyObject *__pyx_n_s__absent;
+static PyObject *__pyx_n_s__abspath;
+static PyObject *__pyx_n_s__add;
+static PyObject *__pyx_n_s__ar;
+static PyObject *__pyx_n_s__b2a_base64;
+static PyObject *__pyx_n_s__binascii;
+static PyObject *__pyx_n_s__bisect;
+static PyObject *__pyx_n_s__bisect_dirblock;
+static PyObject *__pyx_n_s__bisect_left;
+static PyObject *__pyx_n_s__block_index;
+static PyObject *__pyx_n_s__bzrlib;
+static PyObject *__pyx_n_s__c;
+static PyObject *__pyx_n_s__cache;
+static PyObject *__pyx_n_s__cache_utf8;
+static PyObject *__pyx_n_s__cmp;
+static PyObject *__pyx_n_s__cmp_by_dirs;
+static PyObject *__pyx_n_s__cur_cstr;
+static PyObject *__pyx_n_s__current_block;
+static PyObject *__pyx_n_s__current_block_list;
+static PyObject *__pyx_n_s__current_block_pos;
+static PyObject *__pyx_n_s__current_dir_info;
+static PyObject *__pyx_n_s__current_dir_list;
+static PyObject *__pyx_n_s__current_root;
+static PyObject *__pyx_n_s__d;
+static PyObject *__pyx_n_s__decode;
+static PyObject *__pyx_n_s__dir_iterator;
+static PyObject *__pyx_n_s__dirblocks;
+static PyObject *__pyx_n_s__directory;
+static PyObject *__pyx_n_s__dirname;
+static PyObject *__pyx_n_s__encode;
+static PyObject *__pyx_n_s__end_cstr;
+static PyObject *__pyx_n_s__entry;
+static PyObject *__pyx_n_s__errno;
+static PyObject *__pyx_n_s__errors;
+static PyObject *__pyx_n_s__f;
+static PyObject *__pyx_n_s__file;
+static PyObject *__pyx_n_s__fstat;
+static PyObject *__pyx_n_s__get_next;
+static PyObject *__pyx_n_s__get_next_str;
+static PyObject *__pyx_n_s__hi;
+static PyObject *__pyx_n_s__include_unchanged;
+static PyObject *__pyx_n_s__is_inside;
+static PyObject *__pyx_n_s__is_inside_any;
+static PyObject *__pyx_n_s__l;
+static PyObject *__pyx_n_s__last_source_parent;
+static PyObject *__pyx_n_s__last_target_parent;
+static PyObject *__pyx_n_s__lo;
+static PyObject *__pyx_n_s__lstat;
+static PyObject *__pyx_n_s__next;
+static PyObject *__pyx_n_s__os;
+static PyObject *__pyx_n_s__osutils;
+static PyObject *__pyx_n_s__pack_stat;
+static PyObject *__pyx_n_s__parent_directories;
+static PyObject *__pyx_n_s__partial;
+static PyObject *__pyx_n_s__path;
+static PyObject *__pyx_n_s__path1;
+static PyObject *__pyx_n_s__path2;
+static PyObject *__pyx_n_s__path_index;
+static PyObject *__pyx_n_s__path_utf8;
+static PyObject *__pyx_n_s__pathjoin;
+static PyObject *__pyx_n_s__paths;
+static PyObject *__pyx_n_s__platform;
+static PyObject *__pyx_n_s__prefix;
+static PyObject *__pyx_n_s__r;
+static PyObject *__pyx_n_s__read;
+static PyObject *__pyx_n_s__relocated;
+static PyObject *__pyx_n_s__root_abspath;
+static PyObject *__pyx_n_s__root_dir_info;
+static PyObject *__pyx_n_s__root_entries;
+static PyObject *__pyx_n_s__root_entries_len;
+static PyObject *__pyx_n_s__root_entries_pos;
+static PyObject *__pyx_n_s__rsplit;
+static PyObject *__pyx_n_s__s;
+static PyObject *__pyx_n_s__seek;
+static PyObject *__pyx_n_s__seen_ids;
+static PyObject *__pyx_n_s__self;
+static PyObject *__pyx_n_s__sha_file;
+static PyObject *__pyx_n_s__source_index;
+static PyObject *__pyx_n_s__split;
+static PyObject *__pyx_n_s__splitpath;
+static PyObject *__pyx_n_s__st_ctime;
+static PyObject *__pyx_n_s__st_dev;
+static PyObject *__pyx_n_s__st_ino;
+static PyObject *__pyx_n_s__st_mode;
+static PyObject *__pyx_n_s__st_mtime;
+static PyObject *__pyx_n_s__st_size;
+static PyObject *__pyx_n_s__stat;
+static PyObject *__pyx_n_s__stat_and_sha1;
+static PyObject *__pyx_n_s__stat_value;
+static PyObject *__pyx_n_s__state;
+static PyObject *__pyx_n_s__symlink;
+static PyObject *__pyx_n_s__sys;
+static PyObject *__pyx_n_s__target_index;
+static PyObject *__pyx_n_s__text;
+static PyObject *__pyx_n_s__text_cstr;
+static PyObject *__pyx_n_s__text_size;
+static PyObject *__pyx_n_s__tree;
+static PyObject *__pyx_n_s__update;
+static PyObject *__pyx_n_s__update_entry;
+static PyObject *__pyx_n_s__utf8;
+static PyObject *__pyx_n_s__utf8_decode;
+static PyObject *__pyx_n_s__want_unversioned;
+static PyObject *__pyx_n_s__win32;
+static PyObject *__pyx_n_s__winerror;
+static PyObject *__pyx_int_0;
+static PyObject *__pyx_int_1;
+
+/* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":128
+ * import_static_tuple_c()
+ *
+ * cdef void* _my_memrchr(void *s, int c, size_t n): # cannot_raise # <<<<<<<<<<<<<<
+ * # memrchr seems to be a GNU extension, so we have to implement it ourselves
+ * cdef char *pos
+ */
+
+static void *__pyx_f_6bzrlib_21_dirstate_helpers_pyx__my_memrchr(void *__pyx_v_s, int __pyx_v_c, size_t __pyx_v_n) {
+ char *__pyx_v_pos;
+ char *__pyx_v_start;
+ void *__pyx_r;
+ int __pyx_t_1;
+ __Pyx_RefNannySetupContext("_my_memrchr");
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":133
+ * cdef char *start
+ *
+ * start = <char*>s # <<<<<<<<<<<<<<
+ * pos = start + n - 1
+ * while pos >= start:
+ */
+ __pyx_v_start = ((char *)__pyx_v_s);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":134
+ *
+ * start = <char*>s
+ * pos = start + n - 1 # <<<<<<<<<<<<<<
+ * while pos >= start:
+ * if pos[0] == c:
+ */
+ __pyx_v_pos = ((__pyx_v_start + __pyx_v_n) - 1);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":135
+ * start = <char*>s
+ * pos = start + n - 1
+ * while pos >= start: # <<<<<<<<<<<<<<
+ * if pos[0] == c:
+ * return <void*>pos
+ */
+ while (1) {
+ __pyx_t_1 = (__pyx_v_pos >= __pyx_v_start);
+ if (!__pyx_t_1) break;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":136
+ * pos = start + n - 1
+ * while pos >= start:
+ * if pos[0] == c: # <<<<<<<<<<<<<<
+ * return <void*>pos
+ * pos = pos - 1
+ */
+ __pyx_t_1 = ((__pyx_v_pos[0]) == __pyx_v_c);
+ if (__pyx_t_1) {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":137
+ * while pos >= start:
+ * if pos[0] == c:
+ * return <void*>pos # <<<<<<<<<<<<<<
+ * pos = pos - 1
+ * return NULL
+ */
+ __pyx_r = ((void *)__pyx_v_pos);
+ goto __pyx_L0;
+ goto __pyx_L5;
+ }
+ __pyx_L5:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":138
+ * if pos[0] == c:
+ * return <void*>pos
+ * pos = pos - 1 # <<<<<<<<<<<<<<
+ * return NULL
+ *
+ */
+ __pyx_v_pos = (__pyx_v_pos - 1);
+ }
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":139
+ * return <void*>pos
+ * pos = pos - 1
+ * return NULL # <<<<<<<<<<<<<<
+ *
+ *
+ */
+ __pyx_r = NULL;
+ goto __pyx_L0;
+
+ __pyx_r = 0;
+ __pyx_L0:;
+ __Pyx_RefNannyFinishContext();
+ return __pyx_r;
+}
+
+/* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":142
+ *
+ *
+ * def _py_memrchr(s, c): # <<<<<<<<<<<<<<
+ * """Just to expose _my_memrchr for testing.
+ *
+ */
+
+static PyObject *__pyx_pf_6bzrlib_21_dirstate_helpers_pyx__py_memrchr(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
+static char __pyx_doc_6bzrlib_21_dirstate_helpers_pyx__py_memrchr[] = "Just to expose _my_memrchr for testing.\n\n :param s: The Python string to search\n :param c: The character to search for\n :return: The offset to the last instance of 'c' in s\n ";
+static PyObject *__pyx_pf_6bzrlib_21_dirstate_helpers_pyx__py_memrchr(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
+ PyObject *__pyx_v_s = 0;
+ PyObject *__pyx_v_c = 0;
+ void *__pyx_v__s;
+ void *__pyx_v_found;
+ int __pyx_v_length;
+ char *__pyx_v__c;
+ PyObject *__pyx_r = NULL;
+ PyObject *__pyx_t_1 = NULL;
+ PyObject *__pyx_t_2 = NULL;
+ int __pyx_t_3;
+ static PyObject **__pyx_pyargnames[] = {&__pyx_n_s__s,&__pyx_n_s__c,0};
+ __Pyx_RefNannySetupContext("_py_memrchr");
+ __pyx_self = __pyx_self;
+ if (unlikely(__pyx_kwds)) {
+ Py_ssize_t kw_args = PyDict_Size(__pyx_kwds);
+ PyObject* values[2] = {0,0};
+ switch (PyTuple_GET_SIZE(__pyx_args)) {
+ case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
+ case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
+ case 0: break;
+ default: goto __pyx_L5_argtuple_error;
+ }
+ switch (PyTuple_GET_SIZE(__pyx_args)) {
+ case 0:
+ values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__s);
+ if (likely(values[0])) kw_args--;
+ else goto __pyx_L5_argtuple_error;
+ case 1:
+ values[1] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__c);
+ if (likely(values[1])) kw_args--;
+ else {
+ __Pyx_RaiseArgtupleInvalid("_py_memrchr", 1, 2, 2, 1); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 142; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
+ }
+ }
+ if (unlikely(kw_args > 0)) {
+ if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, PyTuple_GET_SIZE(__pyx_args), "_py_memrchr") < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 142; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
+ }
+ __pyx_v_s = values[0];
+ __pyx_v_c = values[1];
+ } else if (PyTuple_GET_SIZE(__pyx_args) != 2) {
+ goto __pyx_L5_argtuple_error;
+ } else {
+ __pyx_v_s = PyTuple_GET_ITEM(__pyx_args, 0);
+ __pyx_v_c = PyTuple_GET_ITEM(__pyx_args, 1);
+ }
+ goto __pyx_L4_argument_unpacking_done;
+ __pyx_L5_argtuple_error:;
+ __Pyx_RaiseArgtupleInvalid("_py_memrchr", 1, 2, 2, PyTuple_GET_SIZE(__pyx_args)); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 142; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
+ __pyx_L3_error:;
+ __Pyx_AddTraceback("bzrlib._dirstate_helpers_pyx._py_memrchr");
+ __Pyx_RefNannyFinishContext();
+ return NULL;
+ __pyx_L4_argument_unpacking_done:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":154
+ * cdef char *_c
+ *
+ * _s = PyString_AsString(s) # <<<<<<<<<<<<<<
+ * length = PyString_Size(s)
+ *
+ */
+ __pyx_v__s = PyString_AsString(__pyx_v_s);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":155
+ *
+ * _s = PyString_AsString(s)
+ * length = PyString_Size(s) # <<<<<<<<<<<<<<
+ *
+ * _c = PyString_AsString(c)
+ */
+ __pyx_v_length = PyString_Size(__pyx_v_s);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":157
+ * length = PyString_Size(s)
+ *
+ * _c = PyString_AsString(c) # <<<<<<<<<<<<<<
+ * assert PyString_Size(c) == 1,\
+ * 'Must be a single character string, not %s' % (c,)
+ */
+ __pyx_v__c = PyString_AsString(__pyx_v_c);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":158
+ *
+ * _c = PyString_AsString(c)
+ * assert PyString_Size(c) == 1,\ # <<<<<<<<<<<<<<
+ * 'Must be a single character string, not %s' % (c,)
+ * found = _my_memrchr(_s, _c[0], length)
+ */
+ #ifndef PYREX_WITHOUT_ASSERTIONS
+ if (unlikely(!(PyString_Size(__pyx_v_c) == 1))) {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":159
+ * _c = PyString_AsString(c)
+ * assert PyString_Size(c) == 1,\
+ * 'Must be a single character string, not %s' % (c,) # <<<<<<<<<<<<<<
+ * found = _my_memrchr(_s, _c[0], length)
+ * if found == NULL:
+ */
+ __pyx_t_1 = PyTuple_New(1); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 159; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_1);
+ __Pyx_INCREF(__pyx_v_c);
+ PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_v_c);
+ __Pyx_GIVEREF(__pyx_v_c);
+ __pyx_t_2 = PyNumber_Remainder(((PyObject *)__pyx_kp_s_1), __pyx_t_1); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 159; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(((PyObject *)__pyx_t_2));
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+ PyErr_SetObject(PyExc_AssertionError, ((PyObject *)__pyx_t_2));
+ __Pyx_DECREF(((PyObject *)__pyx_t_2)); __pyx_t_2 = 0;
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 158; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ }
+ #endif
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":160
+ * assert PyString_Size(c) == 1,\
+ * 'Must be a single character string, not %s' % (c,)
+ * found = _my_memrchr(_s, _c[0], length) # <<<<<<<<<<<<<<
+ * if found == NULL:
+ * return None
+ */
+ __pyx_v_found = __pyx_f_6bzrlib_21_dirstate_helpers_pyx__my_memrchr(__pyx_v__s, (__pyx_v__c[0]), __pyx_v_length);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":161
+ * 'Must be a single character string, not %s' % (c,)
+ * found = _my_memrchr(_s, _c[0], length)
+ * if found == NULL: # <<<<<<<<<<<<<<
+ * return None
+ * return <char*>found - <char*>_s
+ */
+ __pyx_t_3 = (__pyx_v_found == NULL);
+ if (__pyx_t_3) {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":162
+ * found = _my_memrchr(_s, _c[0], length)
+ * if found == NULL:
+ * return None # <<<<<<<<<<<<<<
+ * return <char*>found - <char*>_s
+ *
+ */
+ __Pyx_XDECREF(__pyx_r);
+ __Pyx_INCREF(Py_None);
+ __pyx_r = Py_None;
+ goto __pyx_L0;
+ goto __pyx_L6;
+ }
+ __pyx_L6:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":163
+ * if found == NULL:
+ * return None
+ * return <char*>found - <char*>_s # <<<<<<<<<<<<<<
+ *
+ *
+ */
+ __Pyx_XDECREF(__pyx_r);
+ __pyx_t_2 = PyInt_FromLong((((char *)__pyx_v_found) - ((char *)__pyx_v__s))); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 163; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_2);
+ __pyx_r = __pyx_t_2;
+ __pyx_t_2 = 0;
+ goto __pyx_L0;
+
+ __pyx_r = Py_None; __Pyx_INCREF(Py_None);
+ goto __pyx_L0;
+ __pyx_L1_error:;
+ __Pyx_XDECREF(__pyx_t_1);
+ __Pyx_XDECREF(__pyx_t_2);
+ __Pyx_AddTraceback("bzrlib._dirstate_helpers_pyx._py_memrchr");
+ __pyx_r = NULL;
+ __pyx_L0:;
+ __Pyx_XGIVEREF(__pyx_r);
+ __Pyx_RefNannyFinishContext();
+ return __pyx_r;
+}
+
+/* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":166
+ *
+ *
+ * cdef object safe_string_from_size(char *s, Py_ssize_t size): # <<<<<<<<<<<<<<
+ * if size < 0:
+ * raise AssertionError(
+ */
+
+static PyObject *__pyx_f_6bzrlib_21_dirstate_helpers_pyx_safe_string_from_size(char *__pyx_v_s, Py_ssize_t __pyx_v_size) {
+ PyObject *__pyx_r = NULL;
+ int __pyx_t_1;
+ PyObject *__pyx_t_2 = NULL;
+ PyObject *__pyx_t_3 = NULL;
+ __Pyx_RefNannySetupContext("safe_string_from_size");
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":167
+ *
+ * cdef object safe_string_from_size(char *s, Py_ssize_t size):
+ * if size < 0: # <<<<<<<<<<<<<<
+ * raise AssertionError(
+ * 'tried to create a string with an invalid size: %d'
+ */
+ __pyx_t_1 = (__pyx_v_size < 0);
+ if (__pyx_t_1) {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":170
+ * raise AssertionError(
+ * 'tried to create a string with an invalid size: %d'
+ * % (size)) # <<<<<<<<<<<<<<
+ * return PyString_FromStringAndSize(s, size)
+ *
+ */
+ __pyx_t_2 = PyInt_FromSsize_t(__pyx_v_size); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 170; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_2);
+ __pyx_t_3 = PyNumber_Remainder(((PyObject *)__pyx_kp_s_2), __pyx_t_2); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 170; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(((PyObject *)__pyx_t_3));
+ __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+ __pyx_t_2 = PyTuple_New(1); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 168; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_2);
+ PyTuple_SET_ITEM(__pyx_t_2, 0, ((PyObject *)__pyx_t_3));
+ __Pyx_GIVEREF(((PyObject *)__pyx_t_3));
+ __pyx_t_3 = 0;
+ __pyx_t_3 = PyObject_Call(__pyx_builtin_AssertionError, __pyx_t_2, NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 168; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_3);
+ __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+ __Pyx_Raise(__pyx_t_3, 0, 0);
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 168; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ goto __pyx_L3;
+ }
+ __pyx_L3:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":171
+ * 'tried to create a string with an invalid size: %d'
+ * % (size))
+ * return PyString_FromStringAndSize(s, size) # <<<<<<<<<<<<<<
+ *
+ *
+ */
+ __Pyx_XDECREF(__pyx_r);
+ __pyx_t_3 = PyString_FromStringAndSize(__pyx_v_s, __pyx_v_size); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 171; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_3);
+ __pyx_r = __pyx_t_3;
+ __pyx_t_3 = 0;
+ goto __pyx_L0;
+
+ __pyx_r = Py_None; __Pyx_INCREF(Py_None);
+ goto __pyx_L0;
+ __pyx_L1_error:;
+ __Pyx_XDECREF(__pyx_t_2);
+ __Pyx_XDECREF(__pyx_t_3);
+ __Pyx_AddTraceback("bzrlib._dirstate_helpers_pyx.safe_string_from_size");
+ __pyx_r = 0;
+ __pyx_L0:;
+ __Pyx_XGIVEREF(__pyx_r);
+ __Pyx_RefNannyFinishContext();
+ return __pyx_r;
+}
+
+/* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":174
+ *
+ *
+ * cdef int _is_aligned(void *ptr): # cannot_raise # <<<<<<<<<<<<<<
+ * """Is this pointer aligned to an integer size offset?
+ *
+ */
+
+static int __pyx_f_6bzrlib_21_dirstate_helpers_pyx__is_aligned(void *__pyx_v_ptr) {
+ int __pyx_r;
+ __Pyx_RefNannySetupContext("_is_aligned");
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":179
+ * :return: 1 if this pointer is aligned, 0 otherwise.
+ * """
+ * return ((<intptr_t>ptr) & ((sizeof(int))-1)) == 0 # <<<<<<<<<<<<<<
+ *
+ *
+ */
+ __pyx_r = ((((intptr_t)__pyx_v_ptr) & ((sizeof(int)) - 1)) == 0);
+ goto __pyx_L0;
+
+ __pyx_r = 0;
+ __pyx_L0:;
+ __Pyx_RefNannyFinishContext();
+ return __pyx_r;
+}
+
+/* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":182
+ *
+ *
+ * cdef int _cmp_by_dirs(char *path1, int size1, char *path2, int size2): # cannot_raise # <<<<<<<<<<<<<<
+ * cdef unsigned char *cur1
+ * cdef unsigned char *cur2
+ */
+
+static int __pyx_f_6bzrlib_21_dirstate_helpers_pyx__cmp_by_dirs(char *__pyx_v_path1, int __pyx_v_size1, char *__pyx_v_path2, int __pyx_v_size2) {
+ unsigned char *__pyx_v_cur1;
+ unsigned char *__pyx_v_cur2;
+ unsigned char *__pyx_v_end1;
+ unsigned char *__pyx_v_end2;
+ int *__pyx_v_cur_int1;
+ int *__pyx_v_cur_int2;
+ int *__pyx_v_end_int1;
+ int *__pyx_v_end_int2;
+ int __pyx_r;
+ int __pyx_t_1;
+ int __pyx_t_2;
+ int __pyx_t_3;
+ int __pyx_t_4;
+ int __pyx_t_5;
+ size_t __pyx_t_6;
+ __Pyx_RefNannySetupContext("_cmp_by_dirs");
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":192
+ * cdef int *end_int2
+ *
+ * if path1 == path2 and size1 == size2: # <<<<<<<<<<<<<<
+ * return 0
+ *
+ */
+ __pyx_t_1 = (__pyx_v_path1 == __pyx_v_path2);
+ if (__pyx_t_1) {
+ __pyx_t_2 = (__pyx_v_size1 == __pyx_v_size2);
+ __pyx_t_3 = __pyx_t_2;
+ } else {
+ __pyx_t_3 = __pyx_t_1;
+ }
+ if (__pyx_t_3) {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":193
+ *
+ * if path1 == path2 and size1 == size2:
+ * return 0 # <<<<<<<<<<<<<<
+ *
+ * end1 = <unsigned char*>path1+size1
+ */
+ __pyx_r = 0;
+ goto __pyx_L0;
+ goto __pyx_L3;
+ }
+ __pyx_L3:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":195
+ * return 0
+ *
+ * end1 = <unsigned char*>path1+size1 # <<<<<<<<<<<<<<
+ * end2 = <unsigned char*>path2+size2
+ *
+ */
+ __pyx_v_end1 = (((unsigned char *)__pyx_v_path1) + __pyx_v_size1);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":196
+ *
+ * end1 = <unsigned char*>path1+size1
+ * end2 = <unsigned char*>path2+size2 # <<<<<<<<<<<<<<
+ *
+ * # Use 32-bit comparisons for the matching portion of the string.
+ */
+ __pyx_v_end2 = (((unsigned char *)__pyx_v_path2) + __pyx_v_size2);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":203
+ * # 99% of the time, these will be aligned, but in case they aren't just skip
+ * # this loop
+ * if _is_aligned(path1) and _is_aligned(path2): # <<<<<<<<<<<<<<
+ * cur_int1 = <int*>path1
+ * cur_int2 = <int*>path2
+ */
+ __pyx_t_4 = __pyx_f_6bzrlib_21_dirstate_helpers_pyx__is_aligned(__pyx_v_path1);
+ if (__pyx_t_4) {
+ __pyx_t_5 = __pyx_f_6bzrlib_21_dirstate_helpers_pyx__is_aligned(__pyx_v_path2);
+ __pyx_t_3 = __pyx_t_5;
+ } else {
+ __pyx_t_3 = __pyx_t_4;
+ }
+ if (__pyx_t_3) {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":204
+ * # this loop
+ * if _is_aligned(path1) and _is_aligned(path2):
+ * cur_int1 = <int*>path1 # <<<<<<<<<<<<<<
+ * cur_int2 = <int*>path2
+ * end_int1 = <int*>(path1 + size1 - (size1 % sizeof(int)))
+ */
+ __pyx_v_cur_int1 = ((int *)__pyx_v_path1);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":205
+ * if _is_aligned(path1) and _is_aligned(path2):
+ * cur_int1 = <int*>path1
+ * cur_int2 = <int*>path2 # <<<<<<<<<<<<<<
+ * end_int1 = <int*>(path1 + size1 - (size1 % sizeof(int)))
+ * end_int2 = <int*>(path2 + size2 - (size2 % sizeof(int)))
+ */
+ __pyx_v_cur_int2 = ((int *)__pyx_v_path2);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":206
+ * cur_int1 = <int*>path1
+ * cur_int2 = <int*>path2
+ * end_int1 = <int*>(path1 + size1 - (size1 % sizeof(int))) # <<<<<<<<<<<<<<
+ * end_int2 = <int*>(path2 + size2 - (size2 % sizeof(int)))
+ *
+ */
+ __pyx_t_6 = (sizeof(int));
+ if (unlikely(__pyx_t_6 == 0)) {
+ PyErr_Format(PyExc_ZeroDivisionError, "integer division or modulo by zero");
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 206; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ }
+ __pyx_v_end_int1 = ((int *)((__pyx_v_path1 + __pyx_v_size1) - (__pyx_v_size1 % __pyx_t_6)));
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":207
+ * cur_int2 = <int*>path2
+ * end_int1 = <int*>(path1 + size1 - (size1 % sizeof(int)))
+ * end_int2 = <int*>(path2 + size2 - (size2 % sizeof(int))) # <<<<<<<<<<<<<<
+ *
+ * while cur_int1 < end_int1 and cur_int2 < end_int2:
+ */
+ __pyx_t_6 = (sizeof(int));
+ if (unlikely(__pyx_t_6 == 0)) {
+ PyErr_Format(PyExc_ZeroDivisionError, "integer division or modulo by zero");
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 207; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ }
+ __pyx_v_end_int2 = ((int *)((__pyx_v_path2 + __pyx_v_size2) - (__pyx_v_size2 % __pyx_t_6)));
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":209
+ * end_int2 = <int*>(path2 + size2 - (size2 % sizeof(int)))
+ *
+ * while cur_int1 < end_int1 and cur_int2 < end_int2: # <<<<<<<<<<<<<<
+ * if cur_int1[0] != cur_int2[0]:
+ * break
+ */
+ while (1) {
+ __pyx_t_3 = (__pyx_v_cur_int1 < __pyx_v_end_int1);
+ if (__pyx_t_3) {
+ __pyx_t_1 = (__pyx_v_cur_int2 < __pyx_v_end_int2);
+ __pyx_t_2 = __pyx_t_1;
+ } else {
+ __pyx_t_2 = __pyx_t_3;
+ }
+ if (!__pyx_t_2) break;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":210
+ *
+ * while cur_int1 < end_int1 and cur_int2 < end_int2:
+ * if cur_int1[0] != cur_int2[0]: # <<<<<<<<<<<<<<
+ * break
+ * cur_int1 = cur_int1 + 1
+ */
+ __pyx_t_2 = ((__pyx_v_cur_int1[0]) != (__pyx_v_cur_int2[0]));
+ if (__pyx_t_2) {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":211
+ * while cur_int1 < end_int1 and cur_int2 < end_int2:
+ * if cur_int1[0] != cur_int2[0]:
+ * break # <<<<<<<<<<<<<<
+ * cur_int1 = cur_int1 + 1
+ * cur_int2 = cur_int2 + 1
+ */
+ goto __pyx_L6_break;
+ goto __pyx_L7;
+ }
+ __pyx_L7:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":212
+ * if cur_int1[0] != cur_int2[0]:
+ * break
+ * cur_int1 = cur_int1 + 1 # <<<<<<<<<<<<<<
+ * cur_int2 = cur_int2 + 1
+ *
+ */
+ __pyx_v_cur_int1 = (__pyx_v_cur_int1 + 1);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":213
+ * break
+ * cur_int1 = cur_int1 + 1
+ * cur_int2 = cur_int2 + 1 # <<<<<<<<<<<<<<
+ *
+ * cur1 = <unsigned char*>cur_int1
+ */
+ __pyx_v_cur_int2 = (__pyx_v_cur_int2 + 1);
+ }
+ __pyx_L6_break:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":215
+ * cur_int2 = cur_int2 + 1
+ *
+ * cur1 = <unsigned char*>cur_int1 # <<<<<<<<<<<<<<
+ * cur2 = <unsigned char*>cur_int2
+ * else:
+ */
+ __pyx_v_cur1 = ((unsigned char *)__pyx_v_cur_int1);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":216
+ *
+ * cur1 = <unsigned char*>cur_int1
+ * cur2 = <unsigned char*>cur_int2 # <<<<<<<<<<<<<<
+ * else:
+ * cur1 = <unsigned char*>path1
+ */
+ __pyx_v_cur2 = ((unsigned char *)__pyx_v_cur_int2);
+ goto __pyx_L4;
+ }
+ /*else*/ {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":218
+ * cur2 = <unsigned char*>cur_int2
+ * else:
+ * cur1 = <unsigned char*>path1 # <<<<<<<<<<<<<<
+ * cur2 = <unsigned char*>path2
+ *
+ */
+ __pyx_v_cur1 = ((unsigned char *)__pyx_v_path1);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":219
+ * else:
+ * cur1 = <unsigned char*>path1
+ * cur2 = <unsigned char*>path2 # <<<<<<<<<<<<<<
+ *
+ * while cur1 < end1 and cur2 < end2:
+ */
+ __pyx_v_cur2 = ((unsigned char *)__pyx_v_path2);
+ }
+ __pyx_L4:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":221
+ * cur2 = <unsigned char*>path2
+ *
+ * while cur1 < end1 and cur2 < end2: # <<<<<<<<<<<<<<
+ * if cur1[0] == cur2[0]:
+ * # This character matches, just go to the next one
+ */
+ while (1) {
+ __pyx_t_2 = (__pyx_v_cur1 < __pyx_v_end1);
+ if (__pyx_t_2) {
+ __pyx_t_3 = (__pyx_v_cur2 < __pyx_v_end2);
+ __pyx_t_1 = __pyx_t_3;
+ } else {
+ __pyx_t_1 = __pyx_t_2;
+ }
+ if (!__pyx_t_1) break;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":222
+ *
+ * while cur1 < end1 and cur2 < end2:
+ * if cur1[0] == cur2[0]: # <<<<<<<<<<<<<<
+ * # This character matches, just go to the next one
+ * cur1 = cur1 + 1
+ */
+ __pyx_t_1 = ((__pyx_v_cur1[0]) == (__pyx_v_cur2[0]));
+ if (__pyx_t_1) {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":224
+ * if cur1[0] == cur2[0]:
+ * # This character matches, just go to the next one
+ * cur1 = cur1 + 1 # <<<<<<<<<<<<<<
+ * cur2 = cur2 + 1
+ * continue
+ */
+ __pyx_v_cur1 = (__pyx_v_cur1 + 1);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":225
+ * # This character matches, just go to the next one
+ * cur1 = cur1 + 1
+ * cur2 = cur2 + 1 # <<<<<<<<<<<<<<
+ * continue
+ * # The current characters do not match
+ */
+ __pyx_v_cur2 = (__pyx_v_cur2 + 1);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":226
+ * cur1 = cur1 + 1
+ * cur2 = cur2 + 1
+ * continue # <<<<<<<<<<<<<<
+ * # The current characters do not match
+ * if cur1[0] == c'/':
+ */
+ goto __pyx_L8_continue;
+ goto __pyx_L10;
+ }
+ __pyx_L10:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":228
+ * continue
+ * # The current characters do not match
+ * if cur1[0] == c'/': # <<<<<<<<<<<<<<
+ * return -1 # Reached the end of path1 segment first
+ * elif cur2[0] == c'/':
+ */
+ __pyx_t_1 = ((__pyx_v_cur1[0]) == '/');
+ if (__pyx_t_1) {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":229
+ * # The current characters do not match
+ * if cur1[0] == c'/':
+ * return -1 # Reached the end of path1 segment first # <<<<<<<<<<<<<<
+ * elif cur2[0] == c'/':
+ * return 1 # Reached the end of path2 segment first
+ */
+ __pyx_r = -1;
+ goto __pyx_L0;
+ goto __pyx_L11;
+ }
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":230
+ * if cur1[0] == c'/':
+ * return -1 # Reached the end of path1 segment first
+ * elif cur2[0] == c'/': # <<<<<<<<<<<<<<
+ * return 1 # Reached the end of path2 segment first
+ * elif cur1[0] < cur2[0]:
+ */
+ __pyx_t_1 = ((__pyx_v_cur2[0]) == '/');
+ if (__pyx_t_1) {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":231
+ * return -1 # Reached the end of path1 segment first
+ * elif cur2[0] == c'/':
+ * return 1 # Reached the end of path2 segment first # <<<<<<<<<<<<<<
+ * elif cur1[0] < cur2[0]:
+ * return -1
+ */
+ __pyx_r = 1;
+ goto __pyx_L0;
+ goto __pyx_L11;
+ }
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":232
+ * elif cur2[0] == c'/':
+ * return 1 # Reached the end of path2 segment first
+ * elif cur1[0] < cur2[0]: # <<<<<<<<<<<<<<
+ * return -1
+ * else:
+ */
+ __pyx_t_1 = ((__pyx_v_cur1[0]) < (__pyx_v_cur2[0]));
+ if (__pyx_t_1) {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":233
+ * return 1 # Reached the end of path2 segment first
+ * elif cur1[0] < cur2[0]:
+ * return -1 # <<<<<<<<<<<<<<
+ * else:
+ * return 1
+ */
+ __pyx_r = -1;
+ goto __pyx_L0;
+ goto __pyx_L11;
+ }
+ /*else*/ {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":235
+ * return -1
+ * else:
+ * return 1 # <<<<<<<<<<<<<<
+ *
+ * # We reached the end of at least one of the strings
+ */
+ __pyx_r = 1;
+ goto __pyx_L0;
+ }
+ __pyx_L11:;
+ __pyx_L8_continue:;
+ }
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":238
+ *
+ * # We reached the end of at least one of the strings
+ * if cur1 < end1: # <<<<<<<<<<<<<<
+ * return 1 # Not at the end of cur1, must be at the end of cur2
+ * if cur2 < end2:
+ */
+ __pyx_t_1 = (__pyx_v_cur1 < __pyx_v_end1);
+ if (__pyx_t_1) {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":239
+ * # We reached the end of at least one of the strings
+ * if cur1 < end1:
+ * return 1 # Not at the end of cur1, must be at the end of cur2 # <<<<<<<<<<<<<<
+ * if cur2 < end2:
+ * return -1 # At the end of cur1, but not at cur2
+ */
+ __pyx_r = 1;
+ goto __pyx_L0;
+ goto __pyx_L12;
+ }
+ __pyx_L12:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":240
+ * if cur1 < end1:
+ * return 1 # Not at the end of cur1, must be at the end of cur2
+ * if cur2 < end2: # <<<<<<<<<<<<<<
+ * return -1 # At the end of cur1, but not at cur2
+ * # We reached the end of both strings
+ */
+ __pyx_t_1 = (__pyx_v_cur2 < __pyx_v_end2);
+ if (__pyx_t_1) {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":241
+ * return 1 # Not at the end of cur1, must be at the end of cur2
+ * if cur2 < end2:
+ * return -1 # At the end of cur1, but not at cur2 # <<<<<<<<<<<<<<
+ * # We reached the end of both strings
+ * return 0
+ */
+ __pyx_r = -1;
+ goto __pyx_L0;
+ goto __pyx_L13;
+ }
+ __pyx_L13:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":243
+ * return -1 # At the end of cur1, but not at cur2
+ * # We reached the end of both strings
+ * return 0 # <<<<<<<<<<<<<<
+ *
+ *
+ */
+ __pyx_r = 0;
+ goto __pyx_L0;
+
+ __pyx_r = 0;
+ goto __pyx_L0;
+ __pyx_L1_error:;
+ __Pyx_WriteUnraisable("bzrlib._dirstate_helpers_pyx._cmp_by_dirs");
+ __pyx_r = 0;
+ __pyx_L0:;
+ __Pyx_RefNannyFinishContext();
+ return __pyx_r;
+}
+
+/* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":246
+ *
+ *
+ * def cmp_by_dirs(path1, path2): # <<<<<<<<<<<<<<
+ * """Compare two paths directory by directory.
+ *
+ */
+
+static PyObject *__pyx_pf_6bzrlib_21_dirstate_helpers_pyx_cmp_by_dirs(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
+static char __pyx_doc_6bzrlib_21_dirstate_helpers_pyx_cmp_by_dirs[] = "Compare two paths directory by directory.\n\n This is equivalent to doing::\n\n cmp(path1.split('/'), path2.split('/'))\n\n The idea is that you should compare path components separately. This\n differs from plain ``cmp(path1, path2)`` for paths like ``'a-b'`` and\n ``a/b``. \"a-b\" comes after \"a\" but would come before \"a/b\" lexically.\n\n :param path1: first path\n :param path2: second path\n :return: negative number if ``path1`` comes first,\n 0 if paths are equal,\n and positive number if ``path2`` sorts first\n ";
+static PyObject *__pyx_pf_6bzrlib_21_dirstate_helpers_pyx_cmp_by_dirs(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
+ PyObject *__pyx_v_path1 = 0;
+ PyObject *__pyx_v_path2 = 0;
+ PyObject *__pyx_r = NULL;
+ int __pyx_t_1;
+ PyObject *__pyx_t_2 = NULL;
+ PyObject *__pyx_t_3 = NULL;
+ static PyObject **__pyx_pyargnames[] = {&__pyx_n_s__path1,&__pyx_n_s__path2,0};
+ __Pyx_RefNannySetupContext("cmp_by_dirs");
+ __pyx_self = __pyx_self;
+ if (unlikely(__pyx_kwds)) {
+ Py_ssize_t kw_args = PyDict_Size(__pyx_kwds);
+ PyObject* values[2] = {0,0};
+ switch (PyTuple_GET_SIZE(__pyx_args)) {
+ case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
+ case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
+ case 0: break;
+ default: goto __pyx_L5_argtuple_error;
+ }
+ switch (PyTuple_GET_SIZE(__pyx_args)) {
+ case 0:
+ values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__path1);
+ if (likely(values[0])) kw_args--;
+ else goto __pyx_L5_argtuple_error;
+ case 1:
+ values[1] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__path2);
+ if (likely(values[1])) kw_args--;
+ else {
+ __Pyx_RaiseArgtupleInvalid("cmp_by_dirs", 1, 2, 2, 1); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 246; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
+ }
+ }
+ if (unlikely(kw_args > 0)) {
+ if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, PyTuple_GET_SIZE(__pyx_args), "cmp_by_dirs") < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 246; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
+ }
+ __pyx_v_path1 = values[0];
+ __pyx_v_path2 = values[1];
+ } else if (PyTuple_GET_SIZE(__pyx_args) != 2) {
+ goto __pyx_L5_argtuple_error;
+ } else {
+ __pyx_v_path1 = PyTuple_GET_ITEM(__pyx_args, 0);
+ __pyx_v_path2 = PyTuple_GET_ITEM(__pyx_args, 1);
+ }
+ goto __pyx_L4_argument_unpacking_done;
+ __pyx_L5_argtuple_error:;
+ __Pyx_RaiseArgtupleInvalid("cmp_by_dirs", 1, 2, 2, PyTuple_GET_SIZE(__pyx_args)); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 246; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
+ __pyx_L3_error:;
+ __Pyx_AddTraceback("bzrlib._dirstate_helpers_pyx.cmp_by_dirs");
+ __Pyx_RefNannyFinishContext();
+ return NULL;
+ __pyx_L4_argument_unpacking_done:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":263
+ * and positive number if ``path2`` sorts first
+ * """
+ * if not PyString_CheckExact(path1): # <<<<<<<<<<<<<<
+ * raise TypeError("'path1' must be a plain string, not %s: %r"
+ * % (type(path1), path1))
+ */
+ __pyx_t_1 = (!PyString_CheckExact(__pyx_v_path1));
+ if (__pyx_t_1) {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":265
+ * if not PyString_CheckExact(path1):
+ * raise TypeError("'path1' must be a plain string, not %s: %r"
+ * % (type(path1), path1)) # <<<<<<<<<<<<<<
+ * if not PyString_CheckExact(path2):
+ * raise TypeError("'path2' must be a plain string, not %s: %r"
+ */
+ __pyx_t_2 = PyTuple_New(2); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 265; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_2);
+ __Pyx_INCREF(((PyObject *)Py_TYPE(__pyx_v_path1)));
+ PyTuple_SET_ITEM(__pyx_t_2, 0, ((PyObject *)Py_TYPE(__pyx_v_path1)));
+ __Pyx_GIVEREF(((PyObject *)Py_TYPE(__pyx_v_path1)));
+ __Pyx_INCREF(__pyx_v_path1);
+ PyTuple_SET_ITEM(__pyx_t_2, 1, __pyx_v_path1);
+ __Pyx_GIVEREF(__pyx_v_path1);
+ __pyx_t_3 = PyNumber_Remainder(((PyObject *)__pyx_kp_s_3), __pyx_t_2); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 265; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(((PyObject *)__pyx_t_3));
+ __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+ __pyx_t_2 = PyTuple_New(1); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 264; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_2);
+ PyTuple_SET_ITEM(__pyx_t_2, 0, ((PyObject *)__pyx_t_3));
+ __Pyx_GIVEREF(((PyObject *)__pyx_t_3));
+ __pyx_t_3 = 0;
+ __pyx_t_3 = PyObject_Call(__pyx_builtin_TypeError, __pyx_t_2, NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 264; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_3);
+ __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+ __Pyx_Raise(__pyx_t_3, 0, 0);
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 264; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ goto __pyx_L6;
+ }
+ __pyx_L6:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":266
+ * raise TypeError("'path1' must be a plain string, not %s: %r"
+ * % (type(path1), path1))
+ * if not PyString_CheckExact(path2): # <<<<<<<<<<<<<<
+ * raise TypeError("'path2' must be a plain string, not %s: %r"
+ * % (type(path2), path2))
+ */
+ __pyx_t_1 = (!PyString_CheckExact(__pyx_v_path2));
+ if (__pyx_t_1) {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":268
+ * if not PyString_CheckExact(path2):
+ * raise TypeError("'path2' must be a plain string, not %s: %r"
+ * % (type(path2), path2)) # <<<<<<<<<<<<<<
+ * return _cmp_by_dirs(PyString_AsString(path1),
+ * PyString_Size(path1),
+ */
+ __pyx_t_3 = PyTuple_New(2); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 268; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_3);
+ __Pyx_INCREF(((PyObject *)Py_TYPE(__pyx_v_path2)));
+ PyTuple_SET_ITEM(__pyx_t_3, 0, ((PyObject *)Py_TYPE(__pyx_v_path2)));
+ __Pyx_GIVEREF(((PyObject *)Py_TYPE(__pyx_v_path2)));
+ __Pyx_INCREF(__pyx_v_path2);
+ PyTuple_SET_ITEM(__pyx_t_3, 1, __pyx_v_path2);
+ __Pyx_GIVEREF(__pyx_v_path2);
+ __pyx_t_2 = PyNumber_Remainder(((PyObject *)__pyx_kp_s_4), __pyx_t_3); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 268; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(((PyObject *)__pyx_t_2));
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+ __pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 267; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_3);
+ PyTuple_SET_ITEM(__pyx_t_3, 0, ((PyObject *)__pyx_t_2));
+ __Pyx_GIVEREF(((PyObject *)__pyx_t_2));
+ __pyx_t_2 = 0;
+ __pyx_t_2 = PyObject_Call(__pyx_builtin_TypeError, __pyx_t_3, NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 267; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_2);
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+ __Pyx_Raise(__pyx_t_2, 0, 0);
+ __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 267; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ goto __pyx_L7;
+ }
+ __pyx_L7:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":269
+ * raise TypeError("'path2' must be a plain string, not %s: %r"
+ * % (type(path2), path2))
+ * return _cmp_by_dirs(PyString_AsString(path1), # <<<<<<<<<<<<<<
+ * PyString_Size(path1),
+ * PyString_AsString(path2),
+ */
+ __Pyx_XDECREF(__pyx_r);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":272
+ * PyString_Size(path1),
+ * PyString_AsString(path2),
+ * PyString_Size(path2)) # <<<<<<<<<<<<<<
+ *
+ *
+ */
+ __pyx_t_2 = PyInt_FromLong(__pyx_f_6bzrlib_21_dirstate_helpers_pyx__cmp_by_dirs(PyString_AsString(__pyx_v_path1), PyString_Size(__pyx_v_path1), PyString_AsString(__pyx_v_path2), PyString_Size(__pyx_v_path2))); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 269; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_2);
+ __pyx_r = __pyx_t_2;
+ __pyx_t_2 = 0;
+ goto __pyx_L0;
+
+ __pyx_r = Py_None; __Pyx_INCREF(Py_None);
+ goto __pyx_L0;
+ __pyx_L1_error:;
+ __Pyx_XDECREF(__pyx_t_2);
+ __Pyx_XDECREF(__pyx_t_3);
+ __Pyx_AddTraceback("bzrlib._dirstate_helpers_pyx.cmp_by_dirs");
+ __pyx_r = NULL;
+ __pyx_L0:;
+ __Pyx_XGIVEREF(__pyx_r);
+ __Pyx_RefNannyFinishContext();
+ return __pyx_r;
+}
+
+/* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":275
+ *
+ *
+ * def _cmp_path_by_dirblock(path1, path2): # <<<<<<<<<<<<<<
+ * """Compare two paths based on what directory they are in.
+ *
+ */
+
+static PyObject *__pyx_pf_6bzrlib_21_dirstate_helpers_pyx__cmp_path_by_dirblock(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
+static char __pyx_doc_6bzrlib_21_dirstate_helpers_pyx__cmp_path_by_dirblock[] = "Compare two paths based on what directory they are in.\n\n This generates a sort order, such that all children of a directory are\n sorted together, and grandchildren are in the same order as the\n children appear. But all grandchildren come after all children.\n\n In other words, all entries in a directory are sorted together, and\n directorys are sorted in cmp_by_dirs order.\n\n :param path1: first path\n :param path2: the second path\n :return: negative number if ``path1`` comes first,\n 0 if paths are equal\n and a positive number if ``path2`` sorts first\n ";
+static PyObject *__pyx_pf_6bzrlib_21_dirstate_helpers_pyx__cmp_path_by_dirblock(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
+ PyObject *__pyx_v_path1 = 0;
+ PyObject *__pyx_v_path2 = 0;
+ PyObject *__pyx_r = NULL;
+ int __pyx_t_1;
+ PyObject *__pyx_t_2 = NULL;
+ PyObject *__pyx_t_3 = NULL;
+ static PyObject **__pyx_pyargnames[] = {&__pyx_n_s__path1,&__pyx_n_s__path2,0};
+ __Pyx_RefNannySetupContext("_cmp_path_by_dirblock");
+ __pyx_self = __pyx_self;
+ if (unlikely(__pyx_kwds)) {
+ Py_ssize_t kw_args = PyDict_Size(__pyx_kwds);
+ PyObject* values[2] = {0,0};
+ switch (PyTuple_GET_SIZE(__pyx_args)) {
+ case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
+ case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
+ case 0: break;
+ default: goto __pyx_L5_argtuple_error;
+ }
+ switch (PyTuple_GET_SIZE(__pyx_args)) {
+ case 0:
+ values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__path1);
+ if (likely(values[0])) kw_args--;
+ else goto __pyx_L5_argtuple_error;
+ case 1:
+ values[1] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__path2);
+ if (likely(values[1])) kw_args--;
+ else {
+ __Pyx_RaiseArgtupleInvalid("_cmp_path_by_dirblock", 1, 2, 2, 1); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 275; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
+ }
+ }
+ if (unlikely(kw_args > 0)) {
+ if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, PyTuple_GET_SIZE(__pyx_args), "_cmp_path_by_dirblock") < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 275; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
+ }
+ __pyx_v_path1 = values[0];
+ __pyx_v_path2 = values[1];
+ } else if (PyTuple_GET_SIZE(__pyx_args) != 2) {
+ goto __pyx_L5_argtuple_error;
+ } else {
+ __pyx_v_path1 = PyTuple_GET_ITEM(__pyx_args, 0);
+ __pyx_v_path2 = PyTuple_GET_ITEM(__pyx_args, 1);
+ }
+ goto __pyx_L4_argument_unpacking_done;
+ __pyx_L5_argtuple_error:;
+ __Pyx_RaiseArgtupleInvalid("_cmp_path_by_dirblock", 1, 2, 2, PyTuple_GET_SIZE(__pyx_args)); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 275; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
+ __pyx_L3_error:;
+ __Pyx_AddTraceback("bzrlib._dirstate_helpers_pyx._cmp_path_by_dirblock");
+ __Pyx_RefNannyFinishContext();
+ return NULL;
+ __pyx_L4_argument_unpacking_done:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":291
+ * and a positive number if ``path2`` sorts first
+ * """
+ * if not PyString_CheckExact(path1): # <<<<<<<<<<<<<<
+ * raise TypeError("'path1' must be a plain string, not %s: %r"
+ * % (type(path1), path1))
+ */
+ __pyx_t_1 = (!PyString_CheckExact(__pyx_v_path1));
+ if (__pyx_t_1) {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":293
+ * if not PyString_CheckExact(path1):
+ * raise TypeError("'path1' must be a plain string, not %s: %r"
+ * % (type(path1), path1)) # <<<<<<<<<<<<<<
+ * if not PyString_CheckExact(path2):
+ * raise TypeError("'path2' must be a plain string, not %s: %r"
+ */
+ __pyx_t_2 = PyTuple_New(2); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 293; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_2);
+ __Pyx_INCREF(((PyObject *)Py_TYPE(__pyx_v_path1)));
+ PyTuple_SET_ITEM(__pyx_t_2, 0, ((PyObject *)Py_TYPE(__pyx_v_path1)));
+ __Pyx_GIVEREF(((PyObject *)Py_TYPE(__pyx_v_path1)));
+ __Pyx_INCREF(__pyx_v_path1);
+ PyTuple_SET_ITEM(__pyx_t_2, 1, __pyx_v_path1);
+ __Pyx_GIVEREF(__pyx_v_path1);
+ __pyx_t_3 = PyNumber_Remainder(((PyObject *)__pyx_kp_s_3), __pyx_t_2); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 293; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(((PyObject *)__pyx_t_3));
+ __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+ __pyx_t_2 = PyTuple_New(1); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 292; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_2);
+ PyTuple_SET_ITEM(__pyx_t_2, 0, ((PyObject *)__pyx_t_3));
+ __Pyx_GIVEREF(((PyObject *)__pyx_t_3));
+ __pyx_t_3 = 0;
+ __pyx_t_3 = PyObject_Call(__pyx_builtin_TypeError, __pyx_t_2, NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 292; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_3);
+ __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+ __Pyx_Raise(__pyx_t_3, 0, 0);
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 292; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ goto __pyx_L6;
+ }
+ __pyx_L6:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":294
+ * raise TypeError("'path1' must be a plain string, not %s: %r"
+ * % (type(path1), path1))
+ * if not PyString_CheckExact(path2): # <<<<<<<<<<<<<<
+ * raise TypeError("'path2' must be a plain string, not %s: %r"
+ * % (type(path2), path2))
+ */
+ __pyx_t_1 = (!PyString_CheckExact(__pyx_v_path2));
+ if (__pyx_t_1) {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":296
+ * if not PyString_CheckExact(path2):
+ * raise TypeError("'path2' must be a plain string, not %s: %r"
+ * % (type(path2), path2)) # <<<<<<<<<<<<<<
+ * return _cmp_path_by_dirblock_intern(PyString_AsString(path1),
+ * PyString_Size(path1),
+ */
+ __pyx_t_3 = PyTuple_New(2); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 296; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_3);
+ __Pyx_INCREF(((PyObject *)Py_TYPE(__pyx_v_path2)));
+ PyTuple_SET_ITEM(__pyx_t_3, 0, ((PyObject *)Py_TYPE(__pyx_v_path2)));
+ __Pyx_GIVEREF(((PyObject *)Py_TYPE(__pyx_v_path2)));
+ __Pyx_INCREF(__pyx_v_path2);
+ PyTuple_SET_ITEM(__pyx_t_3, 1, __pyx_v_path2);
+ __Pyx_GIVEREF(__pyx_v_path2);
+ __pyx_t_2 = PyNumber_Remainder(((PyObject *)__pyx_kp_s_4), __pyx_t_3); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 296; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(((PyObject *)__pyx_t_2));
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+ __pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 295; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_3);
+ PyTuple_SET_ITEM(__pyx_t_3, 0, ((PyObject *)__pyx_t_2));
+ __Pyx_GIVEREF(((PyObject *)__pyx_t_2));
+ __pyx_t_2 = 0;
+ __pyx_t_2 = PyObject_Call(__pyx_builtin_TypeError, __pyx_t_3, NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 295; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_2);
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+ __Pyx_Raise(__pyx_t_2, 0, 0);
+ __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 295; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ goto __pyx_L7;
+ }
+ __pyx_L7:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":297
+ * raise TypeError("'path2' must be a plain string, not %s: %r"
+ * % (type(path2), path2))
+ * return _cmp_path_by_dirblock_intern(PyString_AsString(path1), # <<<<<<<<<<<<<<
+ * PyString_Size(path1),
+ * PyString_AsString(path2),
+ */
+ __Pyx_XDECREF(__pyx_r);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":300
+ * PyString_Size(path1),
+ * PyString_AsString(path2),
+ * PyString_Size(path2)) # <<<<<<<<<<<<<<
+ *
+ *
+ */
+ __pyx_t_2 = PyInt_FromLong(__pyx_f_6bzrlib_21_dirstate_helpers_pyx__cmp_path_by_dirblock_intern(PyString_AsString(__pyx_v_path1), PyString_Size(__pyx_v_path1), PyString_AsString(__pyx_v_path2), PyString_Size(__pyx_v_path2))); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 297; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_2);
+ __pyx_r = __pyx_t_2;
+ __pyx_t_2 = 0;
+ goto __pyx_L0;
+
+ __pyx_r = Py_None; __Pyx_INCREF(Py_None);
+ goto __pyx_L0;
+ __pyx_L1_error:;
+ __Pyx_XDECREF(__pyx_t_2);
+ __Pyx_XDECREF(__pyx_t_3);
+ __Pyx_AddTraceback("bzrlib._dirstate_helpers_pyx._cmp_path_by_dirblock");
+ __pyx_r = NULL;
+ __pyx_L0:;
+ __Pyx_XGIVEREF(__pyx_r);
+ __Pyx_RefNannyFinishContext();
+ return __pyx_r;
+}
+
+/* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":303
+ *
+ *
+ * cdef int _cmp_path_by_dirblock_intern(char *path1, int path1_len, # <<<<<<<<<<<<<<
+ * char *path2, int path2_len): # cannot_raise
+ * """Compare two paths by what directory they are in.
+ */
+
+static int __pyx_f_6bzrlib_21_dirstate_helpers_pyx__cmp_path_by_dirblock_intern(char *__pyx_v_path1, int __pyx_v_path1_len, char *__pyx_v_path2, int __pyx_v_path2_len) {
+ char *__pyx_v_dirname1;
+ int __pyx_v_dirname1_len;
+ char *__pyx_v_dirname2;
+ int __pyx_v_dirname2_len;
+ char *__pyx_v_basename1;
+ int __pyx_v_basename1_len;
+ char *__pyx_v_basename2;
+ int __pyx_v_basename2_len;
+ int __pyx_v_cur_len;
+ int __pyx_v_cmp_val;
+ int __pyx_r;
+ int __pyx_t_1;
+ int __pyx_t_2;
+ int __pyx_t_3;
+ __Pyx_RefNannySetupContext("_cmp_path_by_dirblock_intern");
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":320
+ * cdef int cmp_val
+ *
+ * if path1_len == 0 and path2_len == 0: # <<<<<<<<<<<<<<
+ * return 0
+ *
+ */
+ __pyx_t_1 = (__pyx_v_path1_len == 0);
+ if (__pyx_t_1) {
+ __pyx_t_2 = (__pyx_v_path2_len == 0);
+ __pyx_t_3 = __pyx_t_2;
+ } else {
+ __pyx_t_3 = __pyx_t_1;
+ }
+ if (__pyx_t_3) {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":321
+ *
+ * if path1_len == 0 and path2_len == 0:
+ * return 0 # <<<<<<<<<<<<<<
+ *
+ * if path1 == path2 and path1_len == path2_len:
+ */
+ __pyx_r = 0;
+ goto __pyx_L0;
+ goto __pyx_L3;
+ }
+ __pyx_L3:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":323
+ * return 0
+ *
+ * if path1 == path2 and path1_len == path2_len: # <<<<<<<<<<<<<<
+ * return 0
+ *
+ */
+ __pyx_t_3 = (__pyx_v_path1 == __pyx_v_path2);
+ if (__pyx_t_3) {
+ __pyx_t_1 = (__pyx_v_path1_len == __pyx_v_path2_len);
+ __pyx_t_2 = __pyx_t_1;
+ } else {
+ __pyx_t_2 = __pyx_t_3;
+ }
+ if (__pyx_t_2) {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":324
+ *
+ * if path1 == path2 and path1_len == path2_len:
+ * return 0 # <<<<<<<<<<<<<<
+ *
+ * if path1_len == 0:
+ */
+ __pyx_r = 0;
+ goto __pyx_L0;
+ goto __pyx_L4;
+ }
+ __pyx_L4:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":326
+ * return 0
+ *
+ * if path1_len == 0: # <<<<<<<<<<<<<<
+ * return -1
+ *
+ */
+ __pyx_t_2 = (__pyx_v_path1_len == 0);
+ if (__pyx_t_2) {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":327
+ *
+ * if path1_len == 0:
+ * return -1 # <<<<<<<<<<<<<<
+ *
+ * if path2_len == 0:
+ */
+ __pyx_r = -1;
+ goto __pyx_L0;
+ goto __pyx_L5;
+ }
+ __pyx_L5:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":329
+ * return -1
+ *
+ * if path2_len == 0: # <<<<<<<<<<<<<<
+ * return 1
+ *
+ */
+ __pyx_t_2 = (__pyx_v_path2_len == 0);
+ if (__pyx_t_2) {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":330
+ *
+ * if path2_len == 0:
+ * return 1 # <<<<<<<<<<<<<<
+ *
+ * basename1 = <char*>_my_memrchr(path1, c'/', path1_len)
+ */
+ __pyx_r = 1;
+ goto __pyx_L0;
+ goto __pyx_L6;
+ }
+ __pyx_L6:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":332
+ * return 1
+ *
+ * basename1 = <char*>_my_memrchr(path1, c'/', path1_len) # <<<<<<<<<<<<<<
+ *
+ * if basename1 == NULL:
+ */
+ __pyx_v_basename1 = ((char *)__pyx_f_6bzrlib_21_dirstate_helpers_pyx__my_memrchr(__pyx_v_path1, '/', __pyx_v_path1_len));
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":334
+ * basename1 = <char*>_my_memrchr(path1, c'/', path1_len)
+ *
+ * if basename1 == NULL: # <<<<<<<<<<<<<<
+ * basename1 = path1
+ * basename1_len = path1_len
+ */
+ __pyx_t_2 = (__pyx_v_basename1 == NULL);
+ if (__pyx_t_2) {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":335
+ *
+ * if basename1 == NULL:
+ * basename1 = path1 # <<<<<<<<<<<<<<
+ * basename1_len = path1_len
+ * dirname1 = ''
+ */
+ __pyx_v_basename1 = __pyx_v_path1;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":336
+ * if basename1 == NULL:
+ * basename1 = path1
+ * basename1_len = path1_len # <<<<<<<<<<<<<<
+ * dirname1 = ''
+ * dirname1_len = 0
+ */
+ __pyx_v_basename1_len = __pyx_v_path1_len;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":337
+ * basename1 = path1
+ * basename1_len = path1_len
+ * dirname1 = '' # <<<<<<<<<<<<<<
+ * dirname1_len = 0
+ * else:
+ */
+ __pyx_v_dirname1 = __pyx_k_5;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":338
+ * basename1_len = path1_len
+ * dirname1 = ''
+ * dirname1_len = 0 # <<<<<<<<<<<<<<
+ * else:
+ * dirname1 = path1
+ */
+ __pyx_v_dirname1_len = 0;
+ goto __pyx_L7;
+ }
+ /*else*/ {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":340
+ * dirname1_len = 0
+ * else:
+ * dirname1 = path1 # <<<<<<<<<<<<<<
+ * dirname1_len = basename1 - path1
+ * basename1 = basename1 + 1
+ */
+ __pyx_v_dirname1 = __pyx_v_path1;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":341
+ * else:
+ * dirname1 = path1
+ * dirname1_len = basename1 - path1 # <<<<<<<<<<<<<<
+ * basename1 = basename1 + 1
+ * basename1_len = path1_len - dirname1_len - 1
+ */
+ __pyx_v_dirname1_len = (__pyx_v_basename1 - __pyx_v_path1);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":342
+ * dirname1 = path1
+ * dirname1_len = basename1 - path1
+ * basename1 = basename1 + 1 # <<<<<<<<<<<<<<
+ * basename1_len = path1_len - dirname1_len - 1
+ *
+ */
+ __pyx_v_basename1 = (__pyx_v_basename1 + 1);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":343
+ * dirname1_len = basename1 - path1
+ * basename1 = basename1 + 1
+ * basename1_len = path1_len - dirname1_len - 1 # <<<<<<<<<<<<<<
+ *
+ * basename2 = <char*>_my_memrchr(path2, c'/', path2_len)
+ */
+ __pyx_v_basename1_len = ((__pyx_v_path1_len - __pyx_v_dirname1_len) - 1);
+ }
+ __pyx_L7:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":345
+ * basename1_len = path1_len - dirname1_len - 1
+ *
+ * basename2 = <char*>_my_memrchr(path2, c'/', path2_len) # <<<<<<<<<<<<<<
+ *
+ * if basename2 == NULL:
+ */
+ __pyx_v_basename2 = ((char *)__pyx_f_6bzrlib_21_dirstate_helpers_pyx__my_memrchr(__pyx_v_path2, '/', __pyx_v_path2_len));
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":347
+ * basename2 = <char*>_my_memrchr(path2, c'/', path2_len)
+ *
+ * if basename2 == NULL: # <<<<<<<<<<<<<<
+ * basename2 = path2
+ * basename2_len = path2_len
+ */
+ __pyx_t_2 = (__pyx_v_basename2 == NULL);
+ if (__pyx_t_2) {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":348
+ *
+ * if basename2 == NULL:
+ * basename2 = path2 # <<<<<<<<<<<<<<
+ * basename2_len = path2_len
+ * dirname2 = ''
+ */
+ __pyx_v_basename2 = __pyx_v_path2;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":349
+ * if basename2 == NULL:
+ * basename2 = path2
+ * basename2_len = path2_len # <<<<<<<<<<<<<<
+ * dirname2 = ''
+ * dirname2_len = 0
+ */
+ __pyx_v_basename2_len = __pyx_v_path2_len;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":350
+ * basename2 = path2
+ * basename2_len = path2_len
+ * dirname2 = '' # <<<<<<<<<<<<<<
+ * dirname2_len = 0
+ * else:
+ */
+ __pyx_v_dirname2 = __pyx_k_5;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":351
+ * basename2_len = path2_len
+ * dirname2 = ''
+ * dirname2_len = 0 # <<<<<<<<<<<<<<
+ * else:
+ * dirname2 = path2
+ */
+ __pyx_v_dirname2_len = 0;
+ goto __pyx_L8;
+ }
+ /*else*/ {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":353
+ * dirname2_len = 0
+ * else:
+ * dirname2 = path2 # <<<<<<<<<<<<<<
+ * dirname2_len = basename2 - path2
+ * basename2 = basename2 + 1
+ */
+ __pyx_v_dirname2 = __pyx_v_path2;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":354
+ * else:
+ * dirname2 = path2
+ * dirname2_len = basename2 - path2 # <<<<<<<<<<<<<<
+ * basename2 = basename2 + 1
+ * basename2_len = path2_len - dirname2_len - 1
+ */
+ __pyx_v_dirname2_len = (__pyx_v_basename2 - __pyx_v_path2);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":355
+ * dirname2 = path2
+ * dirname2_len = basename2 - path2
+ * basename2 = basename2 + 1 # <<<<<<<<<<<<<<
+ * basename2_len = path2_len - dirname2_len - 1
+ *
+ */
+ __pyx_v_basename2 = (__pyx_v_basename2 + 1);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":356
+ * dirname2_len = basename2 - path2
+ * basename2 = basename2 + 1
+ * basename2_len = path2_len - dirname2_len - 1 # <<<<<<<<<<<<<<
+ *
+ * cmp_val = _cmp_by_dirs(dirname1, dirname1_len,
+ */
+ __pyx_v_basename2_len = ((__pyx_v_path2_len - __pyx_v_dirname2_len) - 1);
+ }
+ __pyx_L8:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":359
+ *
+ * cmp_val = _cmp_by_dirs(dirname1, dirname1_len,
+ * dirname2, dirname2_len) # <<<<<<<<<<<<<<
+ * if cmp_val != 0:
+ * return cmp_val
+ */
+ __pyx_v_cmp_val = __pyx_f_6bzrlib_21_dirstate_helpers_pyx__cmp_by_dirs(__pyx_v_dirname1, __pyx_v_dirname1_len, __pyx_v_dirname2, __pyx_v_dirname2_len);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":360
+ * cmp_val = _cmp_by_dirs(dirname1, dirname1_len,
+ * dirname2, dirname2_len)
+ * if cmp_val != 0: # <<<<<<<<<<<<<<
+ * return cmp_val
+ *
+ */
+ __pyx_t_2 = (__pyx_v_cmp_val != 0);
+ if (__pyx_t_2) {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":361
+ * dirname2, dirname2_len)
+ * if cmp_val != 0:
+ * return cmp_val # <<<<<<<<<<<<<<
+ *
+ * cur_len = basename1_len
+ */
+ __pyx_r = __pyx_v_cmp_val;
+ goto __pyx_L0;
+ goto __pyx_L9;
+ }
+ __pyx_L9:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":363
+ * return cmp_val
+ *
+ * cur_len = basename1_len # <<<<<<<<<<<<<<
+ * if basename2_len < basename1_len:
+ * cur_len = basename2_len
+ */
+ __pyx_v_cur_len = __pyx_v_basename1_len;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":364
+ *
+ * cur_len = basename1_len
+ * if basename2_len < basename1_len: # <<<<<<<<<<<<<<
+ * cur_len = basename2_len
+ *
+ */
+ __pyx_t_2 = (__pyx_v_basename2_len < __pyx_v_basename1_len);
+ if (__pyx_t_2) {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":365
+ * cur_len = basename1_len
+ * if basename2_len < basename1_len:
+ * cur_len = basename2_len # <<<<<<<<<<<<<<
+ *
+ * cmp_val = memcmp(basename1, basename2, cur_len)
+ */
+ __pyx_v_cur_len = __pyx_v_basename2_len;
+ goto __pyx_L10;
+ }
+ __pyx_L10:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":367
+ * cur_len = basename2_len
+ *
+ * cmp_val = memcmp(basename1, basename2, cur_len) # <<<<<<<<<<<<<<
+ * if cmp_val != 0:
+ * return cmp_val
+ */
+ __pyx_v_cmp_val = memcmp(__pyx_v_basename1, __pyx_v_basename2, __pyx_v_cur_len);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":368
+ *
+ * cmp_val = memcmp(basename1, basename2, cur_len)
+ * if cmp_val != 0: # <<<<<<<<<<<<<<
+ * return cmp_val
+ * if basename1_len == basename2_len:
+ */
+ __pyx_t_2 = (__pyx_v_cmp_val != 0);
+ if (__pyx_t_2) {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":369
+ * cmp_val = memcmp(basename1, basename2, cur_len)
+ * if cmp_val != 0:
+ * return cmp_val # <<<<<<<<<<<<<<
+ * if basename1_len == basename2_len:
+ * return 0
+ */
+ __pyx_r = __pyx_v_cmp_val;
+ goto __pyx_L0;
+ goto __pyx_L11;
+ }
+ __pyx_L11:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":370
+ * if cmp_val != 0:
+ * return cmp_val
+ * if basename1_len == basename2_len: # <<<<<<<<<<<<<<
+ * return 0
+ * if basename1_len < basename2_len:
+ */
+ __pyx_t_2 = (__pyx_v_basename1_len == __pyx_v_basename2_len);
+ if (__pyx_t_2) {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":371
+ * return cmp_val
+ * if basename1_len == basename2_len:
+ * return 0 # <<<<<<<<<<<<<<
+ * if basename1_len < basename2_len:
+ * return -1
+ */
+ __pyx_r = 0;
+ goto __pyx_L0;
+ goto __pyx_L12;
+ }
+ __pyx_L12:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":372
+ * if basename1_len == basename2_len:
+ * return 0
+ * if basename1_len < basename2_len: # <<<<<<<<<<<<<<
+ * return -1
+ * return 1
+ */
+ __pyx_t_2 = (__pyx_v_basename1_len < __pyx_v_basename2_len);
+ if (__pyx_t_2) {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":373
+ * return 0
+ * if basename1_len < basename2_len:
+ * return -1 # <<<<<<<<<<<<<<
+ * return 1
+ *
+ */
+ __pyx_r = -1;
+ goto __pyx_L0;
+ goto __pyx_L13;
+ }
+ __pyx_L13:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":374
+ * if basename1_len < basename2_len:
+ * return -1
+ * return 1 # <<<<<<<<<<<<<<
+ *
+ *
+ */
+ __pyx_r = 1;
+ goto __pyx_L0;
+
+ __pyx_r = 0;
+ __pyx_L0:;
+ __Pyx_RefNannyFinishContext();
+ return __pyx_r;
+}
+
+/* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":377
+ *
+ *
+ * def _bisect_path_left(paths, path): # <<<<<<<<<<<<<<
+ * """Return the index where to insert path into paths.
+ *
+ */
+
+static PyObject *__pyx_pf_6bzrlib_21_dirstate_helpers_pyx__bisect_path_left(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
+static char __pyx_doc_6bzrlib_21_dirstate_helpers_pyx__bisect_path_left[] = "Return the index where to insert path into paths.\n\n This uses a path-wise comparison so we get::\n a\n a-b\n a=b\n a/b\n Rather than::\n a\n a-b\n a/b\n a=b\n :param paths: A list of paths to search through\n :param path: A single path to insert\n :return: An offset where 'path' can be inserted.\n :seealso: bisect.bisect_left\n ";
+static PyObject *__pyx_pf_6bzrlib_21_dirstate_helpers_pyx__bisect_path_left(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
+ PyObject *__pyx_v_paths = 0;
+ PyObject *__pyx_v_path = 0;
+ int __pyx_v__lo;
+ int __pyx_v__hi;
+ int __pyx_v__mid;
+ char *__pyx_v_path_cstr;
+ int __pyx_v_path_size;
+ char *__pyx_v_cur_cstr;
+ int __pyx_v_cur_size;
+ void *__pyx_v_cur;
+ PyObject *__pyx_r = NULL;
+ int __pyx_t_1;
+ PyObject *__pyx_t_2 = NULL;
+ PyObject *__pyx_t_3 = NULL;
+ Py_ssize_t __pyx_t_4;
+ static PyObject **__pyx_pyargnames[] = {&__pyx_n_s__paths,&__pyx_n_s__path,0};
+ __Pyx_RefNannySetupContext("_bisect_path_left");
+ __pyx_self = __pyx_self;
+ if (unlikely(__pyx_kwds)) {
+ Py_ssize_t kw_args = PyDict_Size(__pyx_kwds);
+ PyObject* values[2] = {0,0};
+ switch (PyTuple_GET_SIZE(__pyx_args)) {
+ case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
+ case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
+ case 0: break;
+ default: goto __pyx_L5_argtuple_error;
+ }
+ switch (PyTuple_GET_SIZE(__pyx_args)) {
+ case 0:
+ values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__paths);
+ if (likely(values[0])) kw_args--;
+ else goto __pyx_L5_argtuple_error;
+ case 1:
+ values[1] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__path);
+ if (likely(values[1])) kw_args--;
+ else {
+ __Pyx_RaiseArgtupleInvalid("_bisect_path_left", 1, 2, 2, 1); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 377; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
+ }
+ }
+ if (unlikely(kw_args > 0)) {
+ if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, PyTuple_GET_SIZE(__pyx_args), "_bisect_path_left") < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 377; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
+ }
+ __pyx_v_paths = values[0];
+ __pyx_v_path = values[1];
+ } else if (PyTuple_GET_SIZE(__pyx_args) != 2) {
+ goto __pyx_L5_argtuple_error;
+ } else {
+ __pyx_v_paths = PyTuple_GET_ITEM(__pyx_args, 0);
+ __pyx_v_path = PyTuple_GET_ITEM(__pyx_args, 1);
+ }
+ goto __pyx_L4_argument_unpacking_done;
+ __pyx_L5_argtuple_error:;
+ __Pyx_RaiseArgtupleInvalid("_bisect_path_left", 1, 2, 2, PyTuple_GET_SIZE(__pyx_args)); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 377; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
+ __pyx_L3_error:;
+ __Pyx_AddTraceback("bzrlib._dirstate_helpers_pyx._bisect_path_left");
+ __Pyx_RefNannyFinishContext();
+ return NULL;
+ __pyx_L4_argument_unpacking_done:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":404
+ * cdef void *cur
+ *
+ * if not PyList_CheckExact(paths): # <<<<<<<<<<<<<<
+ * raise TypeError("you must pass a python list for 'paths' not: %s %r"
+ * % (type(paths), paths))
+ */
+ __pyx_t_1 = (!PyList_CheckExact(__pyx_v_paths));
+ if (__pyx_t_1) {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":406
+ * if not PyList_CheckExact(paths):
+ * raise TypeError("you must pass a python list for 'paths' not: %s %r"
+ * % (type(paths), paths)) # <<<<<<<<<<<<<<
+ * if not PyString_CheckExact(path):
+ * raise TypeError("you must pass a string for 'path' not: %s %r"
+ */
+ __pyx_t_2 = PyTuple_New(2); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 406; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_2);
+ __Pyx_INCREF(((PyObject *)Py_TYPE(__pyx_v_paths)));
+ PyTuple_SET_ITEM(__pyx_t_2, 0, ((PyObject *)Py_TYPE(__pyx_v_paths)));
+ __Pyx_GIVEREF(((PyObject *)Py_TYPE(__pyx_v_paths)));
+ __Pyx_INCREF(__pyx_v_paths);
+ PyTuple_SET_ITEM(__pyx_t_2, 1, __pyx_v_paths);
+ __Pyx_GIVEREF(__pyx_v_paths);
+ __pyx_t_3 = PyNumber_Remainder(((PyObject *)__pyx_kp_s_6), __pyx_t_2); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 406; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(((PyObject *)__pyx_t_3));
+ __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+ __pyx_t_2 = PyTuple_New(1); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 405; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_2);
+ PyTuple_SET_ITEM(__pyx_t_2, 0, ((PyObject *)__pyx_t_3));
+ __Pyx_GIVEREF(((PyObject *)__pyx_t_3));
+ __pyx_t_3 = 0;
+ __pyx_t_3 = PyObject_Call(__pyx_builtin_TypeError, __pyx_t_2, NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 405; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_3);
+ __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+ __Pyx_Raise(__pyx_t_3, 0, 0);
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 405; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ goto __pyx_L6;
+ }
+ __pyx_L6:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":407
+ * raise TypeError("you must pass a python list for 'paths' not: %s %r"
+ * % (type(paths), paths))
+ * if not PyString_CheckExact(path): # <<<<<<<<<<<<<<
+ * raise TypeError("you must pass a string for 'path' not: %s %r"
+ * % (type(path), path))
+ */
+ __pyx_t_1 = (!PyString_CheckExact(__pyx_v_path));
+ if (__pyx_t_1) {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":409
+ * if not PyString_CheckExact(path):
+ * raise TypeError("you must pass a string for 'path' not: %s %r"
+ * % (type(path), path)) # <<<<<<<<<<<<<<
+ *
+ * _hi = len(paths)
+ */
+ __pyx_t_3 = PyTuple_New(2); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 409; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_3);
+ __Pyx_INCREF(((PyObject *)Py_TYPE(__pyx_v_path)));
+ PyTuple_SET_ITEM(__pyx_t_3, 0, ((PyObject *)Py_TYPE(__pyx_v_path)));
+ __Pyx_GIVEREF(((PyObject *)Py_TYPE(__pyx_v_path)));
+ __Pyx_INCREF(__pyx_v_path);
+ PyTuple_SET_ITEM(__pyx_t_3, 1, __pyx_v_path);
+ __Pyx_GIVEREF(__pyx_v_path);
+ __pyx_t_2 = PyNumber_Remainder(((PyObject *)__pyx_kp_s_7), __pyx_t_3); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 409; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(((PyObject *)__pyx_t_2));
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+ __pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 408; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_3);
+ PyTuple_SET_ITEM(__pyx_t_3, 0, ((PyObject *)__pyx_t_2));
+ __Pyx_GIVEREF(((PyObject *)__pyx_t_2));
+ __pyx_t_2 = 0;
+ __pyx_t_2 = PyObject_Call(__pyx_builtin_TypeError, __pyx_t_3, NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 408; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_2);
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+ __Pyx_Raise(__pyx_t_2, 0, 0);
+ __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 408; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ goto __pyx_L7;
+ }
+ __pyx_L7:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":411
+ * % (type(path), path))
+ *
+ * _hi = len(paths) # <<<<<<<<<<<<<<
+ * _lo = 0
+ *
+ */
+ __pyx_t_4 = PyObject_Length(__pyx_v_paths); if (unlikely(__pyx_t_4 == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 411; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_v__hi = __pyx_t_4;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":412
+ *
+ * _hi = len(paths)
+ * _lo = 0 # <<<<<<<<<<<<<<
+ *
+ * path_cstr = PyString_AsString(path)
+ */
+ __pyx_v__lo = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":414
+ * _lo = 0
+ *
+ * path_cstr = PyString_AsString(path) # <<<<<<<<<<<<<<
+ * path_size = PyString_Size(path)
+ *
+ */
+ __pyx_v_path_cstr = PyString_AsString(__pyx_v_path);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":415
+ *
+ * path_cstr = PyString_AsString(path)
+ * path_size = PyString_Size(path) # <<<<<<<<<<<<<<
+ *
+ * while _lo < _hi:
+ */
+ __pyx_v_path_size = PyString_Size(__pyx_v_path);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":417
+ * path_size = PyString_Size(path)
+ *
+ * while _lo < _hi: # <<<<<<<<<<<<<<
+ * _mid = (_lo + _hi) / 2
+ * cur = PyList_GetItem_object_void(paths, _mid)
+ */
+ while (1) {
+ __pyx_t_1 = (__pyx_v__lo < __pyx_v__hi);
+ if (!__pyx_t_1) break;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":418
+ *
+ * while _lo < _hi:
+ * _mid = (_lo + _hi) / 2 # <<<<<<<<<<<<<<
+ * cur = PyList_GetItem_object_void(paths, _mid)
+ * cur_cstr = PyString_AS_STRING_void(cur)
+ */
+ __pyx_v__mid = __Pyx_div_long((__pyx_v__lo + __pyx_v__hi), 2);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":419
+ * while _lo < _hi:
+ * _mid = (_lo + _hi) / 2
+ * cur = PyList_GetItem_object_void(paths, _mid) # <<<<<<<<<<<<<<
+ * cur_cstr = PyString_AS_STRING_void(cur)
+ * cur_size = PyString_GET_SIZE_void(cur)
+ */
+ __pyx_v_cur = PyList_GET_ITEM(__pyx_v_paths, __pyx_v__mid);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":420
+ * _mid = (_lo + _hi) / 2
+ * cur = PyList_GetItem_object_void(paths, _mid)
+ * cur_cstr = PyString_AS_STRING_void(cur) # <<<<<<<<<<<<<<
+ * cur_size = PyString_GET_SIZE_void(cur)
+ * if _cmp_path_by_dirblock_intern(cur_cstr, cur_size,
+ */
+ __pyx_v_cur_cstr = PyString_AS_STRING(__pyx_v_cur);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":421
+ * cur = PyList_GetItem_object_void(paths, _mid)
+ * cur_cstr = PyString_AS_STRING_void(cur)
+ * cur_size = PyString_GET_SIZE_void(cur) # <<<<<<<<<<<<<<
+ * if _cmp_path_by_dirblock_intern(cur_cstr, cur_size,
+ * path_cstr, path_size) < 0:
+ */
+ __pyx_v_cur_size = PyString_GET_SIZE(__pyx_v_cur);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":423
+ * cur_size = PyString_GET_SIZE_void(cur)
+ * if _cmp_path_by_dirblock_intern(cur_cstr, cur_size,
+ * path_cstr, path_size) < 0: # <<<<<<<<<<<<<<
+ * _lo = _mid + 1
+ * else:
+ */
+ __pyx_t_1 = (__pyx_f_6bzrlib_21_dirstate_helpers_pyx__cmp_path_by_dirblock_intern(__pyx_v_cur_cstr, __pyx_v_cur_size, __pyx_v_path_cstr, __pyx_v_path_size) < 0);
+ if (__pyx_t_1) {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":424
+ * if _cmp_path_by_dirblock_intern(cur_cstr, cur_size,
+ * path_cstr, path_size) < 0:
+ * _lo = _mid + 1 # <<<<<<<<<<<<<<
+ * else:
+ * _hi = _mid
+ */
+ __pyx_v__lo = (__pyx_v__mid + 1);
+ goto __pyx_L10;
+ }
+ /*else*/ {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":426
+ * _lo = _mid + 1
+ * else:
+ * _hi = _mid # <<<<<<<<<<<<<<
+ * return _lo
+ *
+ */
+ __pyx_v__hi = __pyx_v__mid;
+ }
+ __pyx_L10:;
+ }
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":427
+ * else:
+ * _hi = _mid
+ * return _lo # <<<<<<<<<<<<<<
+ *
+ *
+ */
+ __Pyx_XDECREF(__pyx_r);
+ __pyx_t_2 = PyInt_FromLong(__pyx_v__lo); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 427; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_2);
+ __pyx_r = __pyx_t_2;
+ __pyx_t_2 = 0;
+ goto __pyx_L0;
+
+ __pyx_r = Py_None; __Pyx_INCREF(Py_None);
+ goto __pyx_L0;
+ __pyx_L1_error:;
+ __Pyx_XDECREF(__pyx_t_2);
+ __Pyx_XDECREF(__pyx_t_3);
+ __Pyx_AddTraceback("bzrlib._dirstate_helpers_pyx._bisect_path_left");
+ __pyx_r = NULL;
+ __pyx_L0:;
+ __Pyx_XGIVEREF(__pyx_r);
+ __Pyx_RefNannyFinishContext();
+ return __pyx_r;
+}
+
+/* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":430
+ *
+ *
+ * def _bisect_path_right(paths, path): # <<<<<<<<<<<<<<
+ * """Return the index where to insert path into paths.
+ *
+ */
+
+static PyObject *__pyx_pf_6bzrlib_21_dirstate_helpers_pyx__bisect_path_right(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
+static char __pyx_doc_6bzrlib_21_dirstate_helpers_pyx__bisect_path_right[] = "Return the index where to insert path into paths.\n\n This uses a path-wise comparison so we get::\n a\n a-b\n a=b\n a/b\n Rather than::\n a\n a-b\n a/b\n a=b\n :param paths: A list of paths to search through\n :param path: A single path to insert\n :return: An offset where 'path' can be inserted.\n :seealso: bisect.bisect_right\n ";
+static PyObject *__pyx_pf_6bzrlib_21_dirstate_helpers_pyx__bisect_path_right(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
+ PyObject *__pyx_v_paths = 0;
+ PyObject *__pyx_v_path = 0;
+ int __pyx_v__lo;
+ int __pyx_v__hi;
+ int __pyx_v__mid;
+ char *__pyx_v_path_cstr;
+ int __pyx_v_path_size;
+ char *__pyx_v_cur_cstr;
+ int __pyx_v_cur_size;
+ void *__pyx_v_cur;
+ PyObject *__pyx_r = NULL;
+ int __pyx_t_1;
+ PyObject *__pyx_t_2 = NULL;
+ PyObject *__pyx_t_3 = NULL;
+ Py_ssize_t __pyx_t_4;
+ static PyObject **__pyx_pyargnames[] = {&__pyx_n_s__paths,&__pyx_n_s__path,0};
+ __Pyx_RefNannySetupContext("_bisect_path_right");
+ __pyx_self = __pyx_self;
+ if (unlikely(__pyx_kwds)) {
+ Py_ssize_t kw_args = PyDict_Size(__pyx_kwds);
+ PyObject* values[2] = {0,0};
+ switch (PyTuple_GET_SIZE(__pyx_args)) {
+ case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
+ case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
+ case 0: break;
+ default: goto __pyx_L5_argtuple_error;
+ }
+ switch (PyTuple_GET_SIZE(__pyx_args)) {
+ case 0:
+ values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__paths);
+ if (likely(values[0])) kw_args--;
+ else goto __pyx_L5_argtuple_error;
+ case 1:
+ values[1] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__path);
+ if (likely(values[1])) kw_args--;
+ else {
+ __Pyx_RaiseArgtupleInvalid("_bisect_path_right", 1, 2, 2, 1); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 430; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
+ }
+ }
+ if (unlikely(kw_args > 0)) {
+ if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, PyTuple_GET_SIZE(__pyx_args), "_bisect_path_right") < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 430; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
+ }
+ __pyx_v_paths = values[0];
+ __pyx_v_path = values[1];
+ } else if (PyTuple_GET_SIZE(__pyx_args) != 2) {
+ goto __pyx_L5_argtuple_error;
+ } else {
+ __pyx_v_paths = PyTuple_GET_ITEM(__pyx_args, 0);
+ __pyx_v_path = PyTuple_GET_ITEM(__pyx_args, 1);
+ }
+ goto __pyx_L4_argument_unpacking_done;
+ __pyx_L5_argtuple_error:;
+ __Pyx_RaiseArgtupleInvalid("_bisect_path_right", 1, 2, 2, PyTuple_GET_SIZE(__pyx_args)); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 430; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
+ __pyx_L3_error:;
+ __Pyx_AddTraceback("bzrlib._dirstate_helpers_pyx._bisect_path_right");
+ __Pyx_RefNannyFinishContext();
+ return NULL;
+ __pyx_L4_argument_unpacking_done:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":457
+ * cdef void *cur
+ *
+ * if not PyList_CheckExact(paths): # <<<<<<<<<<<<<<
+ * raise TypeError("you must pass a python list for 'paths' not: %s %r"
+ * % (type(paths), paths))
+ */
+ __pyx_t_1 = (!PyList_CheckExact(__pyx_v_paths));
+ if (__pyx_t_1) {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":459
+ * if not PyList_CheckExact(paths):
+ * raise TypeError("you must pass a python list for 'paths' not: %s %r"
+ * % (type(paths), paths)) # <<<<<<<<<<<<<<
+ * if not PyString_CheckExact(path):
+ * raise TypeError("you must pass a string for 'path' not: %s %r"
+ */
+ __pyx_t_2 = PyTuple_New(2); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 459; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_2);
+ __Pyx_INCREF(((PyObject *)Py_TYPE(__pyx_v_paths)));
+ PyTuple_SET_ITEM(__pyx_t_2, 0, ((PyObject *)Py_TYPE(__pyx_v_paths)));
+ __Pyx_GIVEREF(((PyObject *)Py_TYPE(__pyx_v_paths)));
+ __Pyx_INCREF(__pyx_v_paths);
+ PyTuple_SET_ITEM(__pyx_t_2, 1, __pyx_v_paths);
+ __Pyx_GIVEREF(__pyx_v_paths);
+ __pyx_t_3 = PyNumber_Remainder(((PyObject *)__pyx_kp_s_6), __pyx_t_2); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 459; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(((PyObject *)__pyx_t_3));
+ __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+ __pyx_t_2 = PyTuple_New(1); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 458; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_2);
+ PyTuple_SET_ITEM(__pyx_t_2, 0, ((PyObject *)__pyx_t_3));
+ __Pyx_GIVEREF(((PyObject *)__pyx_t_3));
+ __pyx_t_3 = 0;
+ __pyx_t_3 = PyObject_Call(__pyx_builtin_TypeError, __pyx_t_2, NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 458; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_3);
+ __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+ __Pyx_Raise(__pyx_t_3, 0, 0);
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 458; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ goto __pyx_L6;
+ }
+ __pyx_L6:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":460
+ * raise TypeError("you must pass a python list for 'paths' not: %s %r"
+ * % (type(paths), paths))
+ * if not PyString_CheckExact(path): # <<<<<<<<<<<<<<
+ * raise TypeError("you must pass a string for 'path' not: %s %r"
+ * % (type(path), path))
+ */
+ __pyx_t_1 = (!PyString_CheckExact(__pyx_v_path));
+ if (__pyx_t_1) {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":462
+ * if not PyString_CheckExact(path):
+ * raise TypeError("you must pass a string for 'path' not: %s %r"
+ * % (type(path), path)) # <<<<<<<<<<<<<<
+ *
+ * _hi = len(paths)
+ */
+ __pyx_t_3 = PyTuple_New(2); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 462; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_3);
+ __Pyx_INCREF(((PyObject *)Py_TYPE(__pyx_v_path)));
+ PyTuple_SET_ITEM(__pyx_t_3, 0, ((PyObject *)Py_TYPE(__pyx_v_path)));
+ __Pyx_GIVEREF(((PyObject *)Py_TYPE(__pyx_v_path)));
+ __Pyx_INCREF(__pyx_v_path);
+ PyTuple_SET_ITEM(__pyx_t_3, 1, __pyx_v_path);
+ __Pyx_GIVEREF(__pyx_v_path);
+ __pyx_t_2 = PyNumber_Remainder(((PyObject *)__pyx_kp_s_7), __pyx_t_3); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 462; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(((PyObject *)__pyx_t_2));
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+ __pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 461; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_3);
+ PyTuple_SET_ITEM(__pyx_t_3, 0, ((PyObject *)__pyx_t_2));
+ __Pyx_GIVEREF(((PyObject *)__pyx_t_2));
+ __pyx_t_2 = 0;
+ __pyx_t_2 = PyObject_Call(__pyx_builtin_TypeError, __pyx_t_3, NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 461; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_2);
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+ __Pyx_Raise(__pyx_t_2, 0, 0);
+ __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 461; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ goto __pyx_L7;
+ }
+ __pyx_L7:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":464
+ * % (type(path), path))
+ *
+ * _hi = len(paths) # <<<<<<<<<<<<<<
+ * _lo = 0
+ *
+ */
+ __pyx_t_4 = PyObject_Length(__pyx_v_paths); if (unlikely(__pyx_t_4 == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 464; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_v__hi = __pyx_t_4;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":465
+ *
+ * _hi = len(paths)
+ * _lo = 0 # <<<<<<<<<<<<<<
+ *
+ * path_cstr = PyString_AsString(path)
+ */
+ __pyx_v__lo = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":467
+ * _lo = 0
+ *
+ * path_cstr = PyString_AsString(path) # <<<<<<<<<<<<<<
+ * path_size = PyString_Size(path)
+ *
+ */
+ __pyx_v_path_cstr = PyString_AsString(__pyx_v_path);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":468
+ *
+ * path_cstr = PyString_AsString(path)
+ * path_size = PyString_Size(path) # <<<<<<<<<<<<<<
+ *
+ * while _lo < _hi:
+ */
+ __pyx_v_path_size = PyString_Size(__pyx_v_path);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":470
+ * path_size = PyString_Size(path)
+ *
+ * while _lo < _hi: # <<<<<<<<<<<<<<
+ * _mid = (_lo + _hi) / 2
+ * cur = PyList_GetItem_object_void(paths, _mid)
+ */
+ while (1) {
+ __pyx_t_1 = (__pyx_v__lo < __pyx_v__hi);
+ if (!__pyx_t_1) break;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":471
+ *
+ * while _lo < _hi:
+ * _mid = (_lo + _hi) / 2 # <<<<<<<<<<<<<<
+ * cur = PyList_GetItem_object_void(paths, _mid)
+ * cur_cstr = PyString_AS_STRING_void(cur)
+ */
+ __pyx_v__mid = __Pyx_div_long((__pyx_v__lo + __pyx_v__hi), 2);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":472
+ * while _lo < _hi:
+ * _mid = (_lo + _hi) / 2
+ * cur = PyList_GetItem_object_void(paths, _mid) # <<<<<<<<<<<<<<
+ * cur_cstr = PyString_AS_STRING_void(cur)
+ * cur_size = PyString_GET_SIZE_void(cur)
+ */
+ __pyx_v_cur = PyList_GET_ITEM(__pyx_v_paths, __pyx_v__mid);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":473
+ * _mid = (_lo + _hi) / 2
+ * cur = PyList_GetItem_object_void(paths, _mid)
+ * cur_cstr = PyString_AS_STRING_void(cur) # <<<<<<<<<<<<<<
+ * cur_size = PyString_GET_SIZE_void(cur)
+ * if _cmp_path_by_dirblock_intern(path_cstr, path_size,
+ */
+ __pyx_v_cur_cstr = PyString_AS_STRING(__pyx_v_cur);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":474
+ * cur = PyList_GetItem_object_void(paths, _mid)
+ * cur_cstr = PyString_AS_STRING_void(cur)
+ * cur_size = PyString_GET_SIZE_void(cur) # <<<<<<<<<<<<<<
+ * if _cmp_path_by_dirblock_intern(path_cstr, path_size,
+ * cur_cstr, cur_size) < 0:
+ */
+ __pyx_v_cur_size = PyString_GET_SIZE(__pyx_v_cur);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":476
+ * cur_size = PyString_GET_SIZE_void(cur)
+ * if _cmp_path_by_dirblock_intern(path_cstr, path_size,
+ * cur_cstr, cur_size) < 0: # <<<<<<<<<<<<<<
+ * _hi = _mid
+ * else:
+ */
+ __pyx_t_1 = (__pyx_f_6bzrlib_21_dirstate_helpers_pyx__cmp_path_by_dirblock_intern(__pyx_v_path_cstr, __pyx_v_path_size, __pyx_v_cur_cstr, __pyx_v_cur_size) < 0);
+ if (__pyx_t_1) {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":477
+ * if _cmp_path_by_dirblock_intern(path_cstr, path_size,
+ * cur_cstr, cur_size) < 0:
+ * _hi = _mid # <<<<<<<<<<<<<<
+ * else:
+ * _lo = _mid + 1
+ */
+ __pyx_v__hi = __pyx_v__mid;
+ goto __pyx_L10;
+ }
+ /*else*/ {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":479
+ * _hi = _mid
+ * else:
+ * _lo = _mid + 1 # <<<<<<<<<<<<<<
+ * return _lo
+ *
+ */
+ __pyx_v__lo = (__pyx_v__mid + 1);
+ }
+ __pyx_L10:;
+ }
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":480
+ * else:
+ * _lo = _mid + 1
+ * return _lo # <<<<<<<<<<<<<<
+ *
+ *
+ */
+ __Pyx_XDECREF(__pyx_r);
+ __pyx_t_2 = PyInt_FromLong(__pyx_v__lo); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 480; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_2);
+ __pyx_r = __pyx_t_2;
+ __pyx_t_2 = 0;
+ goto __pyx_L0;
+
+ __pyx_r = Py_None; __Pyx_INCREF(Py_None);
+ goto __pyx_L0;
+ __pyx_L1_error:;
+ __Pyx_XDECREF(__pyx_t_2);
+ __Pyx_XDECREF(__pyx_t_3);
+ __Pyx_AddTraceback("bzrlib._dirstate_helpers_pyx._bisect_path_right");
+ __pyx_r = NULL;
+ __pyx_L0:;
+ __Pyx_XGIVEREF(__pyx_r);
+ __Pyx_RefNannyFinishContext();
+ return __pyx_r;
+}
+
+/* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":483
+ *
+ *
+ * def bisect_dirblock(dirblocks, dirname, lo=0, hi=None, cache=None): # <<<<<<<<<<<<<<
+ * """Return the index where to insert dirname into the dirblocks.
+ *
+ */
+
+static PyObject *__pyx_pf_6bzrlib_21_dirstate_helpers_pyx_bisect_dirblock(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
+static char __pyx_doc_6bzrlib_21_dirstate_helpers_pyx_bisect_dirblock[] = "Return the index where to insert dirname into the dirblocks.\n\n The return value idx is such that all directories blocks in dirblock[:idx]\n have names < dirname, and all blocks in dirblock[idx:] have names >=\n dirname.\n\n Optional args lo (default 0) and hi (default len(dirblocks)) bound the\n slice of a to be searched.\n ";
+static PyObject *__pyx_pf_6bzrlib_21_dirstate_helpers_pyx_bisect_dirblock(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
+ PyObject *__pyx_v_dirblocks = 0;
+ PyObject *__pyx_v_dirname = 0;
+ PyObject *__pyx_v_lo = 0;
+ PyObject *__pyx_v_hi = 0;
+ PyObject *__pyx_v_cache = 0;
+ int __pyx_v__lo;
+ int __pyx_v__hi;
+ int __pyx_v__mid;
+ char *__pyx_v_dirname_cstr;
+ int __pyx_v_dirname_size;
+ char *__pyx_v_cur_cstr;
+ int __pyx_v_cur_size;
+ void *__pyx_v_cur;
+ PyObject *__pyx_r = NULL;
+ int __pyx_t_1;
+ PyObject *__pyx_t_2 = NULL;
+ PyObject *__pyx_t_3 = NULL;
+ Py_ssize_t __pyx_t_4;
+ int __pyx_t_5;
+ static PyObject **__pyx_pyargnames[] = {&__pyx_n_s__dirblocks,&__pyx_n_s__dirname,&__pyx_n_s__lo,&__pyx_n_s__hi,&__pyx_n_s__cache,0};
+ __Pyx_RefNannySetupContext("bisect_dirblock");
+ __pyx_self = __pyx_self;
+ if (unlikely(__pyx_kwds)) {
+ Py_ssize_t kw_args = PyDict_Size(__pyx_kwds);
+ PyObject* values[5] = {0,0,0,0,0};
+ values[2] = ((PyObject *)__pyx_int_0);
+ values[3] = ((PyObject *)Py_None);
+ values[4] = ((PyObject *)Py_None);
+ switch (PyTuple_GET_SIZE(__pyx_args)) {
+ case 5: values[4] = PyTuple_GET_ITEM(__pyx_args, 4);
+ case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3);
+ case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2);
+ case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
+ case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
+ case 0: break;
+ default: goto __pyx_L5_argtuple_error;
+ }
+ switch (PyTuple_GET_SIZE(__pyx_args)) {
+ case 0:
+ values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__dirblocks);
+ if (likely(values[0])) kw_args--;
+ else goto __pyx_L5_argtuple_error;
+ case 1:
+ values[1] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__dirname);
+ if (likely(values[1])) kw_args--;
+ else {
+ __Pyx_RaiseArgtupleInvalid("bisect_dirblock", 0, 2, 5, 1); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 483; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
+ }
+ case 2:
+ if (kw_args > 0) {
+ PyObject* value = PyDict_GetItem(__pyx_kwds, __pyx_n_s__lo);
+ if (value) { values[2] = value; kw_args--; }
+ }
+ case 3:
+ if (kw_args > 0) {
+ PyObject* value = PyDict_GetItem(__pyx_kwds, __pyx_n_s__hi);
+ if (value) { values[3] = value; kw_args--; }
+ }
+ case 4:
+ if (kw_args > 0) {
+ PyObject* value = PyDict_GetItem(__pyx_kwds, __pyx_n_s__cache);
+ if (value) { values[4] = value; kw_args--; }
+ }
+ }
+ if (unlikely(kw_args > 0)) {
+ if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, PyTuple_GET_SIZE(__pyx_args), "bisect_dirblock") < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 483; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
+ }
+ __pyx_v_dirblocks = values[0];
+ __pyx_v_dirname = values[1];
+ __pyx_v_lo = values[2];
+ __pyx_v_hi = values[3];
+ __pyx_v_cache = values[4];
+ } else {
+ __pyx_v_lo = ((PyObject *)__pyx_int_0);
+ __pyx_v_hi = ((PyObject *)Py_None);
+ __pyx_v_cache = ((PyObject *)Py_None);
+ switch (PyTuple_GET_SIZE(__pyx_args)) {
+ case 5:
+ __pyx_v_cache = PyTuple_GET_ITEM(__pyx_args, 4);
+ case 4:
+ __pyx_v_hi = PyTuple_GET_ITEM(__pyx_args, 3);
+ case 3:
+ __pyx_v_lo = PyTuple_GET_ITEM(__pyx_args, 2);
+ case 2:
+ __pyx_v_dirname = PyTuple_GET_ITEM(__pyx_args, 1);
+ __pyx_v_dirblocks = PyTuple_GET_ITEM(__pyx_args, 0);
+ break;
+ default: goto __pyx_L5_argtuple_error;
+ }
+ }
+ goto __pyx_L4_argument_unpacking_done;
+ __pyx_L5_argtuple_error:;
+ __Pyx_RaiseArgtupleInvalid("bisect_dirblock", 0, 2, 5, PyTuple_GET_SIZE(__pyx_args)); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 483; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
+ __pyx_L3_error:;
+ __Pyx_AddTraceback("bzrlib._dirstate_helpers_pyx.bisect_dirblock");
+ __Pyx_RefNannyFinishContext();
+ return NULL;
+ __pyx_L4_argument_unpacking_done:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":502
+ * cdef void *cur
+ *
+ * if not PyList_CheckExact(dirblocks): # <<<<<<<<<<<<<<
+ * raise TypeError("you must pass a python list for 'dirblocks' not: %s %r"
+ * % (type(dirblocks), dirblocks))
+ */
+ __pyx_t_1 = (!PyList_CheckExact(__pyx_v_dirblocks));
+ if (__pyx_t_1) {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":504
+ * if not PyList_CheckExact(dirblocks):
+ * raise TypeError("you must pass a python list for 'dirblocks' not: %s %r"
+ * % (type(dirblocks), dirblocks)) # <<<<<<<<<<<<<<
+ * if not PyString_CheckExact(dirname):
+ * raise TypeError("you must pass a string for dirname not: %s %r"
+ */
+ __pyx_t_2 = PyTuple_New(2); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 504; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_2);
+ __Pyx_INCREF(((PyObject *)Py_TYPE(__pyx_v_dirblocks)));
+ PyTuple_SET_ITEM(__pyx_t_2, 0, ((PyObject *)Py_TYPE(__pyx_v_dirblocks)));
+ __Pyx_GIVEREF(((PyObject *)Py_TYPE(__pyx_v_dirblocks)));
+ __Pyx_INCREF(__pyx_v_dirblocks);
+ PyTuple_SET_ITEM(__pyx_t_2, 1, __pyx_v_dirblocks);
+ __Pyx_GIVEREF(__pyx_v_dirblocks);
+ __pyx_t_3 = PyNumber_Remainder(((PyObject *)__pyx_kp_s_8), __pyx_t_2); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 504; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(((PyObject *)__pyx_t_3));
+ __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+ __pyx_t_2 = PyTuple_New(1); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 503; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_2);
+ PyTuple_SET_ITEM(__pyx_t_2, 0, ((PyObject *)__pyx_t_3));
+ __Pyx_GIVEREF(((PyObject *)__pyx_t_3));
+ __pyx_t_3 = 0;
+ __pyx_t_3 = PyObject_Call(__pyx_builtin_TypeError, __pyx_t_2, NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 503; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_3);
+ __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+ __Pyx_Raise(__pyx_t_3, 0, 0);
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 503; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ goto __pyx_L6;
+ }
+ __pyx_L6:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":505
+ * raise TypeError("you must pass a python list for 'dirblocks' not: %s %r"
+ * % (type(dirblocks), dirblocks))
+ * if not PyString_CheckExact(dirname): # <<<<<<<<<<<<<<
+ * raise TypeError("you must pass a string for dirname not: %s %r"
+ * % (type(dirname), dirname))
+ */
+ __pyx_t_1 = (!PyString_CheckExact(__pyx_v_dirname));
+ if (__pyx_t_1) {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":507
+ * if not PyString_CheckExact(dirname):
+ * raise TypeError("you must pass a string for dirname not: %s %r"
+ * % (type(dirname), dirname)) # <<<<<<<<<<<<<<
+ * if hi is None:
+ * _hi = len(dirblocks)
+ */
+ __pyx_t_3 = PyTuple_New(2); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 507; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_3);
+ __Pyx_INCREF(((PyObject *)Py_TYPE(__pyx_v_dirname)));
+ PyTuple_SET_ITEM(__pyx_t_3, 0, ((PyObject *)Py_TYPE(__pyx_v_dirname)));
+ __Pyx_GIVEREF(((PyObject *)Py_TYPE(__pyx_v_dirname)));
+ __Pyx_INCREF(__pyx_v_dirname);
+ PyTuple_SET_ITEM(__pyx_t_3, 1, __pyx_v_dirname);
+ __Pyx_GIVEREF(__pyx_v_dirname);
+ __pyx_t_2 = PyNumber_Remainder(((PyObject *)__pyx_kp_s_9), __pyx_t_3); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 507; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(((PyObject *)__pyx_t_2));
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+ __pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 506; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_3);
+ PyTuple_SET_ITEM(__pyx_t_3, 0, ((PyObject *)__pyx_t_2));
+ __Pyx_GIVEREF(((PyObject *)__pyx_t_2));
+ __pyx_t_2 = 0;
+ __pyx_t_2 = PyObject_Call(__pyx_builtin_TypeError, __pyx_t_3, NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 506; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_2);
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+ __Pyx_Raise(__pyx_t_2, 0, 0);
+ __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 506; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ goto __pyx_L7;
+ }
+ __pyx_L7:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":508
+ * raise TypeError("you must pass a string for dirname not: %s %r"
+ * % (type(dirname), dirname))
+ * if hi is None: # <<<<<<<<<<<<<<
+ * _hi = len(dirblocks)
+ * else:
+ */
+ __pyx_t_1 = (__pyx_v_hi == Py_None);
+ if (__pyx_t_1) {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":509
+ * % (type(dirname), dirname))
+ * if hi is None:
+ * _hi = len(dirblocks) # <<<<<<<<<<<<<<
+ * else:
+ * _hi = hi
+ */
+ __pyx_t_4 = PyObject_Length(__pyx_v_dirblocks); if (unlikely(__pyx_t_4 == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 509; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_v__hi = __pyx_t_4;
+ goto __pyx_L8;
+ }
+ /*else*/ {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":511
+ * _hi = len(dirblocks)
+ * else:
+ * _hi = hi # <<<<<<<<<<<<<<
+ *
+ * _lo = lo
+ */
+ __pyx_t_5 = __Pyx_PyInt_AsInt(__pyx_v_hi); if (unlikely((__pyx_t_5 == (int)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 511; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_v__hi = __pyx_t_5;
+ }
+ __pyx_L8:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":513
+ * _hi = hi
+ *
+ * _lo = lo # <<<<<<<<<<<<<<
+ * dirname_cstr = PyString_AsString(dirname)
+ * dirname_size = PyString_Size(dirname)
+ */
+ __pyx_t_5 = __Pyx_PyInt_AsInt(__pyx_v_lo); if (unlikely((__pyx_t_5 == (int)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 513; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_v__lo = __pyx_t_5;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":514
+ *
+ * _lo = lo
+ * dirname_cstr = PyString_AsString(dirname) # <<<<<<<<<<<<<<
+ * dirname_size = PyString_Size(dirname)
+ *
+ */
+ __pyx_v_dirname_cstr = PyString_AsString(__pyx_v_dirname);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":515
+ * _lo = lo
+ * dirname_cstr = PyString_AsString(dirname)
+ * dirname_size = PyString_Size(dirname) # <<<<<<<<<<<<<<
+ *
+ * while _lo < _hi:
+ */
+ __pyx_v_dirname_size = PyString_Size(__pyx_v_dirname);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":517
+ * dirname_size = PyString_Size(dirname)
+ *
+ * while _lo < _hi: # <<<<<<<<<<<<<<
+ * _mid = (_lo + _hi) / 2
+ * # Grab the dirname for the current dirblock
+ */
+ while (1) {
+ __pyx_t_1 = (__pyx_v__lo < __pyx_v__hi);
+ if (!__pyx_t_1) break;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":518
+ *
+ * while _lo < _hi:
+ * _mid = (_lo + _hi) / 2 # <<<<<<<<<<<<<<
+ * # Grab the dirname for the current dirblock
+ * # cur = dirblocks[_mid][0]
+ */
+ __pyx_v__mid = __Pyx_div_long((__pyx_v__lo + __pyx_v__hi), 2);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":522
+ * # cur = dirblocks[_mid][0]
+ * cur = PyTuple_GetItem_void_void(
+ * PyList_GetItem_object_void(dirblocks, _mid), 0) # <<<<<<<<<<<<<<
+ * cur_cstr = PyString_AS_STRING_void(cur)
+ * cur_size = PyString_GET_SIZE_void(cur)
+ */
+ __pyx_v_cur = PyTuple_GET_ITEM(PyList_GET_ITEM(__pyx_v_dirblocks, __pyx_v__mid), 0);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":523
+ * cur = PyTuple_GetItem_void_void(
+ * PyList_GetItem_object_void(dirblocks, _mid), 0)
+ * cur_cstr = PyString_AS_STRING_void(cur) # <<<<<<<<<<<<<<
+ * cur_size = PyString_GET_SIZE_void(cur)
+ * if _cmp_by_dirs(cur_cstr, cur_size, dirname_cstr, dirname_size) < 0:
+ */
+ __pyx_v_cur_cstr = PyString_AS_STRING(__pyx_v_cur);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":524
+ * PyList_GetItem_object_void(dirblocks, _mid), 0)
+ * cur_cstr = PyString_AS_STRING_void(cur)
+ * cur_size = PyString_GET_SIZE_void(cur) # <<<<<<<<<<<<<<
+ * if _cmp_by_dirs(cur_cstr, cur_size, dirname_cstr, dirname_size) < 0:
+ * _lo = _mid + 1
+ */
+ __pyx_v_cur_size = PyString_GET_SIZE(__pyx_v_cur);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":525
+ * cur_cstr = PyString_AS_STRING_void(cur)
+ * cur_size = PyString_GET_SIZE_void(cur)
+ * if _cmp_by_dirs(cur_cstr, cur_size, dirname_cstr, dirname_size) < 0: # <<<<<<<<<<<<<<
+ * _lo = _mid + 1
+ * else:
+ */
+ __pyx_t_1 = (__pyx_f_6bzrlib_21_dirstate_helpers_pyx__cmp_by_dirs(__pyx_v_cur_cstr, __pyx_v_cur_size, __pyx_v_dirname_cstr, __pyx_v_dirname_size) < 0);
+ if (__pyx_t_1) {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":526
+ * cur_size = PyString_GET_SIZE_void(cur)
+ * if _cmp_by_dirs(cur_cstr, cur_size, dirname_cstr, dirname_size) < 0:
+ * _lo = _mid + 1 # <<<<<<<<<<<<<<
+ * else:
+ * _hi = _mid
+ */
+ __pyx_v__lo = (__pyx_v__mid + 1);
+ goto __pyx_L11;
+ }
+ /*else*/ {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":528
+ * _lo = _mid + 1
+ * else:
+ * _hi = _mid # <<<<<<<<<<<<<<
+ * return _lo
+ *
+ */
+ __pyx_v__hi = __pyx_v__mid;
+ }
+ __pyx_L11:;
+ }
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":529
+ * else:
+ * _hi = _mid
+ * return _lo # <<<<<<<<<<<<<<
+ *
+ *
+ */
+ __Pyx_XDECREF(__pyx_r);
+ __pyx_t_2 = PyInt_FromLong(__pyx_v__lo); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 529; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_2);
+ __pyx_r = __pyx_t_2;
+ __pyx_t_2 = 0;
+ goto __pyx_L0;
+
+ __pyx_r = Py_None; __Pyx_INCREF(Py_None);
+ goto __pyx_L0;
+ __pyx_L1_error:;
+ __Pyx_XDECREF(__pyx_t_2);
+ __Pyx_XDECREF(__pyx_t_3);
+ __Pyx_AddTraceback("bzrlib._dirstate_helpers_pyx.bisect_dirblock");
+ __pyx_r = NULL;
+ __pyx_L0:;
+ __Pyx_XGIVEREF(__pyx_r);
+ __Pyx_RefNannyFinishContext();
+ return __pyx_r;
+}
+
+/* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":544
+ * cdef char *next # Pointer to the end of this record
+ *
+ * def __init__(self, text, state): # <<<<<<<<<<<<<<
+ * self.state = state
+ * self.text = text
+ */
+
+static int __pyx_pf_6bzrlib_21_dirstate_helpers_pyx_6Reader___init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
+static int __pyx_pf_6bzrlib_21_dirstate_helpers_pyx_6Reader___init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
+ PyObject *__pyx_v_text = 0;
+ PyObject *__pyx_v_state = 0;
+ int __pyx_r;
+ static PyObject **__pyx_pyargnames[] = {&__pyx_n_s__text,&__pyx_n_s__state,0};
+ __Pyx_RefNannySetupContext("__init__");
+ if (unlikely(__pyx_kwds)) {
+ Py_ssize_t kw_args = PyDict_Size(__pyx_kwds);
+ PyObject* values[2] = {0,0};
+ switch (PyTuple_GET_SIZE(__pyx_args)) {
+ case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
+ case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
+ case 0: break;
+ default: goto __pyx_L5_argtuple_error;
+ }
+ switch (PyTuple_GET_SIZE(__pyx_args)) {
+ case 0:
+ values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__text);
+ if (likely(values[0])) kw_args--;
+ else goto __pyx_L5_argtuple_error;
+ case 1:
+ values[1] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__state);
+ if (likely(values[1])) kw_args--;
+ else {
+ __Pyx_RaiseArgtupleInvalid("__init__", 1, 2, 2, 1); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 544; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
+ }
+ }
+ if (unlikely(kw_args > 0)) {
+ if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, PyTuple_GET_SIZE(__pyx_args), "__init__") < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 544; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
+ }
+ __pyx_v_text = values[0];
+ __pyx_v_state = values[1];
+ } else if (PyTuple_GET_SIZE(__pyx_args) != 2) {
+ goto __pyx_L5_argtuple_error;
+ } else {
+ __pyx_v_text = PyTuple_GET_ITEM(__pyx_args, 0);
+ __pyx_v_state = PyTuple_GET_ITEM(__pyx_args, 1);
+ }
+ goto __pyx_L4_argument_unpacking_done;
+ __pyx_L5_argtuple_error:;
+ __Pyx_RaiseArgtupleInvalid("__init__", 1, 2, 2, PyTuple_GET_SIZE(__pyx_args)); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 544; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
+ __pyx_L3_error:;
+ __Pyx_AddTraceback("bzrlib._dirstate_helpers_pyx.Reader.__init__");
+ __Pyx_RefNannyFinishContext();
+ return -1;
+ __pyx_L4_argument_unpacking_done:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":545
+ *
+ * def __init__(self, text, state):
+ * self.state = state # <<<<<<<<<<<<<<
+ * self.text = text
+ * self.text_cstr = PyString_AsString(text)
+ */
+ __Pyx_INCREF(__pyx_v_state);
+ __Pyx_GIVEREF(__pyx_v_state);
+ __Pyx_GOTREF(((struct __pyx_obj_6bzrlib_21_dirstate_helpers_pyx_Reader *)__pyx_v_self)->state);
+ __Pyx_DECREF(((struct __pyx_obj_6bzrlib_21_dirstate_helpers_pyx_Reader *)__pyx_v_self)->state);
+ ((struct __pyx_obj_6bzrlib_21_dirstate_helpers_pyx_Reader *)__pyx_v_self)->state = __pyx_v_state;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":546
+ * def __init__(self, text, state):
+ * self.state = state
+ * self.text = text # <<<<<<<<<<<<<<
+ * self.text_cstr = PyString_AsString(text)
+ * self.text_size = PyString_Size(text)
+ */
+ __Pyx_INCREF(__pyx_v_text);
+ __Pyx_GIVEREF(__pyx_v_text);
+ __Pyx_GOTREF(((struct __pyx_obj_6bzrlib_21_dirstate_helpers_pyx_Reader *)__pyx_v_self)->text);
+ __Pyx_DECREF(((struct __pyx_obj_6bzrlib_21_dirstate_helpers_pyx_Reader *)__pyx_v_self)->text);
+ ((struct __pyx_obj_6bzrlib_21_dirstate_helpers_pyx_Reader *)__pyx_v_self)->text = __pyx_v_text;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":547
+ * self.state = state
+ * self.text = text
+ * self.text_cstr = PyString_AsString(text) # <<<<<<<<<<<<<<
+ * self.text_size = PyString_Size(text)
+ * self.end_cstr = self.text_cstr + self.text_size
+ */
+ ((struct __pyx_obj_6bzrlib_21_dirstate_helpers_pyx_Reader *)__pyx_v_self)->text_cstr = PyString_AsString(__pyx_v_text);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":548
+ * self.text = text
+ * self.text_cstr = PyString_AsString(text)
+ * self.text_size = PyString_Size(text) # <<<<<<<<<<<<<<
+ * self.end_cstr = self.text_cstr + self.text_size
+ * self.cur_cstr = self.text_cstr
+ */
+ ((struct __pyx_obj_6bzrlib_21_dirstate_helpers_pyx_Reader *)__pyx_v_self)->text_size = PyString_Size(__pyx_v_text);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":549
+ * self.text_cstr = PyString_AsString(text)
+ * self.text_size = PyString_Size(text)
+ * self.end_cstr = self.text_cstr + self.text_size # <<<<<<<<<<<<<<
+ * self.cur_cstr = self.text_cstr
+ *
+ */
+ ((struct __pyx_obj_6bzrlib_21_dirstate_helpers_pyx_Reader *)__pyx_v_self)->end_cstr = (((struct __pyx_obj_6bzrlib_21_dirstate_helpers_pyx_Reader *)__pyx_v_self)->text_cstr + ((struct __pyx_obj_6bzrlib_21_dirstate_helpers_pyx_Reader *)__pyx_v_self)->text_size);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":550
+ * self.text_size = PyString_Size(text)
+ * self.end_cstr = self.text_cstr + self.text_size
+ * self.cur_cstr = self.text_cstr # <<<<<<<<<<<<<<
+ *
+ * cdef char *get_next(self, int *size) except NULL:
+ */
+ ((struct __pyx_obj_6bzrlib_21_dirstate_helpers_pyx_Reader *)__pyx_v_self)->cur_cstr = ((struct __pyx_obj_6bzrlib_21_dirstate_helpers_pyx_Reader *)__pyx_v_self)->text_cstr;
+
+ __pyx_r = 0;
+ __Pyx_RefNannyFinishContext();
+ return __pyx_r;
+}
+
+/* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":552
+ * self.cur_cstr = self.text_cstr
+ *
+ * cdef char *get_next(self, int *size) except NULL: # <<<<<<<<<<<<<<
+ * """Return a pointer to the start of the next field."""
+ * cdef char *next
+ */
+
+static char *__pyx_f_6bzrlib_21_dirstate_helpers_pyx_6Reader_get_next(struct __pyx_obj_6bzrlib_21_dirstate_helpers_pyx_Reader *__pyx_v_self, int *__pyx_v_size) {
+ char *__pyx_v_next;
+ Py_ssize_t __pyx_v_extra_len;
+ char *__pyx_r;
+ int __pyx_t_1;
+ PyObject *__pyx_t_2 = NULL;
+ PyObject *__pyx_t_3 = NULL;
+ PyObject *__pyx_t_4 = NULL;
+ __Pyx_RefNannySetupContext("get_next");
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":557
+ * cdef Py_ssize_t extra_len
+ *
+ * if self.cur_cstr == NULL: # <<<<<<<<<<<<<<
+ * raise AssertionError('get_next() called when cur_str is NULL')
+ * elif self.cur_cstr >= self.end_cstr:
+ */
+ __pyx_t_1 = (__pyx_v_self->cur_cstr == NULL);
+ if (__pyx_t_1) {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":558
+ *
+ * if self.cur_cstr == NULL:
+ * raise AssertionError('get_next() called when cur_str is NULL') # <<<<<<<<<<<<<<
+ * elif self.cur_cstr >= self.end_cstr:
+ * raise AssertionError('get_next() called when there are no chars'
+ */
+ __pyx_t_2 = PyTuple_New(1); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 558; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_2);
+ __Pyx_INCREF(((PyObject *)__pyx_kp_s_10));
+ PyTuple_SET_ITEM(__pyx_t_2, 0, ((PyObject *)__pyx_kp_s_10));
+ __Pyx_GIVEREF(((PyObject *)__pyx_kp_s_10));
+ __pyx_t_3 = PyObject_Call(__pyx_builtin_AssertionError, __pyx_t_2, NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 558; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_3);
+ __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+ __Pyx_Raise(__pyx_t_3, 0, 0);
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 558; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ goto __pyx_L3;
+ }
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":559
+ * if self.cur_cstr == NULL:
+ * raise AssertionError('get_next() called when cur_str is NULL')
+ * elif self.cur_cstr >= self.end_cstr: # <<<<<<<<<<<<<<
+ * raise AssertionError('get_next() called when there are no chars'
+ * ' left')
+ */
+ __pyx_t_1 = (__pyx_v_self->cur_cstr >= __pyx_v_self->end_cstr);
+ if (__pyx_t_1) {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":560
+ * raise AssertionError('get_next() called when cur_str is NULL')
+ * elif self.cur_cstr >= self.end_cstr:
+ * raise AssertionError('get_next() called when there are no chars' # <<<<<<<<<<<<<<
+ * ' left')
+ * next = self.cur_cstr
+ */
+ __pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 560; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_3);
+ __Pyx_INCREF(((PyObject *)__pyx_kp_s_11));
+ PyTuple_SET_ITEM(__pyx_t_3, 0, ((PyObject *)__pyx_kp_s_11));
+ __Pyx_GIVEREF(((PyObject *)__pyx_kp_s_11));
+ __pyx_t_2 = PyObject_Call(__pyx_builtin_AssertionError, __pyx_t_3, NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 560; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_2);
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+ __Pyx_Raise(__pyx_t_2, 0, 0);
+ __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 560; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ goto __pyx_L3;
+ }
+ __pyx_L3:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":562
+ * raise AssertionError('get_next() called when there are no chars'
+ * ' left')
+ * next = self.cur_cstr # <<<<<<<<<<<<<<
+ * self.cur_cstr = <char*>memchr(next, c'\0', self.end_cstr - next)
+ * if self.cur_cstr == NULL:
+ */
+ __pyx_v_next = __pyx_v_self->cur_cstr;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":563
+ * ' left')
+ * next = self.cur_cstr
+ * self.cur_cstr = <char*>memchr(next, c'\0', self.end_cstr - next) # <<<<<<<<<<<<<<
+ * if self.cur_cstr == NULL:
+ * extra_len = self.end_cstr - next
+ */
+ __pyx_v_self->cur_cstr = ((char *)memchr(__pyx_v_next, '\x00', (__pyx_v_self->end_cstr - __pyx_v_next)));
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":564
+ * next = self.cur_cstr
+ * self.cur_cstr = <char*>memchr(next, c'\0', self.end_cstr - next)
+ * if self.cur_cstr == NULL: # <<<<<<<<<<<<<<
+ * extra_len = self.end_cstr - next
+ * raise errors.DirstateCorrupt(self.state,
+ */
+ __pyx_t_1 = (__pyx_v_self->cur_cstr == NULL);
+ if (__pyx_t_1) {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":565
+ * self.cur_cstr = <char*>memchr(next, c'\0', self.end_cstr - next)
+ * if self.cur_cstr == NULL:
+ * extra_len = self.end_cstr - next # <<<<<<<<<<<<<<
+ * raise errors.DirstateCorrupt(self.state,
+ * 'failed to find trailing NULL (\\0).'
+ */
+ __pyx_v_extra_len = (__pyx_v_self->end_cstr - __pyx_v_next);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":566
+ * if self.cur_cstr == NULL:
+ * extra_len = self.end_cstr - next
+ * raise errors.DirstateCorrupt(self.state, # <<<<<<<<<<<<<<
+ * 'failed to find trailing NULL (\\0).'
+ * ' Trailing garbage: %r'
+ */
+ __pyx_t_2 = __Pyx_GetName(__pyx_m, __pyx_n_s__errors); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 566; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_2);
+ __pyx_t_3 = PyObject_GetAttr(__pyx_t_2, __pyx_n_s__DirstateCorrupt); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 566; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_3);
+ __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":569
+ * 'failed to find trailing NULL (\\0).'
+ * ' Trailing garbage: %r'
+ * % safe_string_from_size(next, extra_len)) # <<<<<<<<<<<<<<
+ * size[0] = self.cur_cstr - next
+ * self.cur_cstr = self.cur_cstr + 1
+ */
+ __pyx_t_2 = __pyx_f_6bzrlib_21_dirstate_helpers_pyx_safe_string_from_size(__pyx_v_next, __pyx_v_extra_len); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 569; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_2);
+ __pyx_t_4 = PyNumber_Remainder(((PyObject *)__pyx_kp_s_12), __pyx_t_2); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 569; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(((PyObject *)__pyx_t_4));
+ __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+ __pyx_t_2 = PyTuple_New(2); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 566; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_2);
+ __Pyx_INCREF(__pyx_v_self->state);
+ PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_v_self->state);
+ __Pyx_GIVEREF(__pyx_v_self->state);
+ PyTuple_SET_ITEM(__pyx_t_2, 1, ((PyObject *)__pyx_t_4));
+ __Pyx_GIVEREF(((PyObject *)__pyx_t_4));
+ __pyx_t_4 = 0;
+ __pyx_t_4 = PyObject_Call(__pyx_t_3, __pyx_t_2, NULL); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 566; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_4);
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+ __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+ __Pyx_Raise(__pyx_t_4, 0, 0);
+ __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 566; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ goto __pyx_L4;
+ }
+ __pyx_L4:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":570
+ * ' Trailing garbage: %r'
+ * % safe_string_from_size(next, extra_len))
+ * size[0] = self.cur_cstr - next # <<<<<<<<<<<<<<
+ * self.cur_cstr = self.cur_cstr + 1
+ * return next
+ */
+ (__pyx_v_size[0]) = (__pyx_v_self->cur_cstr - __pyx_v_next);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":571
+ * % safe_string_from_size(next, extra_len))
+ * size[0] = self.cur_cstr - next
+ * self.cur_cstr = self.cur_cstr + 1 # <<<<<<<<<<<<<<
+ * return next
+ *
+ */
+ __pyx_v_self->cur_cstr = (__pyx_v_self->cur_cstr + 1);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":572
+ * size[0] = self.cur_cstr - next
+ * self.cur_cstr = self.cur_cstr + 1
+ * return next # <<<<<<<<<<<<<<
+ *
+ * cdef object get_next_str(self):
+ */
+ __pyx_r = __pyx_v_next;
+ goto __pyx_L0;
+
+ __pyx_r = 0;
+ goto __pyx_L0;
+ __pyx_L1_error:;
+ __Pyx_XDECREF(__pyx_t_2);
+ __Pyx_XDECREF(__pyx_t_3);
+ __Pyx_XDECREF(__pyx_t_4);
+ __Pyx_AddTraceback("bzrlib._dirstate_helpers_pyx.Reader.get_next");
+ __pyx_r = NULL;
+ __pyx_L0:;
+ __Pyx_RefNannyFinishContext();
+ return __pyx_r;
+}
+
+/* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":574
+ * return next
+ *
+ * cdef object get_next_str(self): # <<<<<<<<<<<<<<
+ * """Get the next field as a Python string."""
+ * cdef int size
+ */
+
+static PyObject *__pyx_f_6bzrlib_21_dirstate_helpers_pyx_6Reader_get_next_str(struct __pyx_obj_6bzrlib_21_dirstate_helpers_pyx_Reader *__pyx_v_self) {
+ int __pyx_v_size;
+ char *__pyx_v_next;
+ PyObject *__pyx_r = NULL;
+ char *__pyx_t_1;
+ PyObject *__pyx_t_2 = NULL;
+ __Pyx_RefNannySetupContext("get_next_str");
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":578
+ * cdef int size
+ * cdef char *next
+ * next = self.get_next(&size) # <<<<<<<<<<<<<<
+ * return safe_string_from_size(next, size)
+ *
+ */
+ __pyx_t_1 = ((struct __pyx_vtabstruct_6bzrlib_21_dirstate_helpers_pyx_Reader *)__pyx_v_self->__pyx_vtab)->get_next(__pyx_v_self, (&__pyx_v_size)); if (unlikely(__pyx_t_1 == NULL)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 578; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_v_next = __pyx_t_1;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":579
+ * cdef char *next
+ * next = self.get_next(&size)
+ * return safe_string_from_size(next, size) # <<<<<<<<<<<<<<
+ *
+ * cdef int _init(self) except -1:
+ */
+ __Pyx_XDECREF(__pyx_r);
+ __pyx_t_2 = __pyx_f_6bzrlib_21_dirstate_helpers_pyx_safe_string_from_size(__pyx_v_next, __pyx_v_size); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 579; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_2);
+ __pyx_r = __pyx_t_2;
+ __pyx_t_2 = 0;
+ goto __pyx_L0;
+
+ __pyx_r = Py_None; __Pyx_INCREF(Py_None);
+ goto __pyx_L0;
+ __pyx_L1_error:;
+ __Pyx_XDECREF(__pyx_t_2);
+ __Pyx_AddTraceback("bzrlib._dirstate_helpers_pyx.Reader.get_next_str");
+ __pyx_r = 0;
+ __pyx_L0:;
+ __Pyx_XGIVEREF(__pyx_r);
+ __Pyx_RefNannyFinishContext();
+ return __pyx_r;
+}
+
+/* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":581
+ * return safe_string_from_size(next, size)
+ *
+ * cdef int _init(self) except -1: # <<<<<<<<<<<<<<
+ * """Get the pointer ready.
+ *
+ */
+
+static int __pyx_f_6bzrlib_21_dirstate_helpers_pyx_6Reader__init(struct __pyx_obj_6bzrlib_21_dirstate_helpers_pyx_Reader *__pyx_v_self) {
+ char *__pyx_v_first;
+ int __pyx_v_size;
+ int __pyx_r;
+ char *__pyx_t_1;
+ int __pyx_t_2;
+ int __pyx_t_3;
+ int __pyx_t_4;
+ PyObject *__pyx_t_5 = NULL;
+ PyObject *__pyx_t_6 = NULL;
+ __Pyx_RefNannySetupContext("_init");
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":592
+ * cdef int size
+ * # The first field should be an empty string left over from the Header
+ * first = self.get_next(&size) # <<<<<<<<<<<<<<
+ * if first[0] != c'\0' and size == 0:
+ * raise AssertionError('First character should be null not: %s'
+ */
+ __pyx_t_1 = ((struct __pyx_vtabstruct_6bzrlib_21_dirstate_helpers_pyx_Reader *)__pyx_v_self->__pyx_vtab)->get_next(__pyx_v_self, (&__pyx_v_size)); if (unlikely(__pyx_t_1 == NULL)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 592; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_v_first = __pyx_t_1;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":593
+ * # The first field should be an empty string left over from the Header
+ * first = self.get_next(&size)
+ * if first[0] != c'\0' and size == 0: # <<<<<<<<<<<<<<
+ * raise AssertionError('First character should be null not: %s'
+ * % (first,))
+ */
+ __pyx_t_2 = ((__pyx_v_first[0]) != '\x00');
+ if (__pyx_t_2) {
+ __pyx_t_3 = (__pyx_v_size == 0);
+ __pyx_t_4 = __pyx_t_3;
+ } else {
+ __pyx_t_4 = __pyx_t_2;
+ }
+ if (__pyx_t_4) {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":595
+ * if first[0] != c'\0' and size == 0:
+ * raise AssertionError('First character should be null not: %s'
+ * % (first,)) # <<<<<<<<<<<<<<
+ * return 0
+ *
+ */
+ __pyx_t_5 = PyBytes_FromString(__pyx_v_first); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 595; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(((PyObject *)__pyx_t_5));
+ __pyx_t_6 = PyTuple_New(1); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 595; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_6);
+ PyTuple_SET_ITEM(__pyx_t_6, 0, ((PyObject *)__pyx_t_5));
+ __Pyx_GIVEREF(((PyObject *)__pyx_t_5));
+ __pyx_t_5 = 0;
+ __pyx_t_5 = PyNumber_Remainder(((PyObject *)__pyx_kp_s_13), __pyx_t_6); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 595; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(((PyObject *)__pyx_t_5));
+ __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
+ __pyx_t_6 = PyTuple_New(1); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 594; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_6);
+ PyTuple_SET_ITEM(__pyx_t_6, 0, ((PyObject *)__pyx_t_5));
+ __Pyx_GIVEREF(((PyObject *)__pyx_t_5));
+ __pyx_t_5 = 0;
+ __pyx_t_5 = PyObject_Call(__pyx_builtin_AssertionError, __pyx_t_6, NULL); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 594; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_5);
+ __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
+ __Pyx_Raise(__pyx_t_5, 0, 0);
+ __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 594; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ goto __pyx_L3;
+ }
+ __pyx_L3:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":596
+ * raise AssertionError('First character should be null not: %s'
+ * % (first,))
+ * return 0 # <<<<<<<<<<<<<<
+ *
+ * cdef object _get_entry(self, int num_trees, void **p_current_dirname,
+ */
+ __pyx_r = 0;
+ goto __pyx_L0;
+
+ __pyx_r = 0;
+ goto __pyx_L0;
+ __pyx_L1_error:;
+ __Pyx_XDECREF(__pyx_t_5);
+ __Pyx_XDECREF(__pyx_t_6);
+ __Pyx_AddTraceback("bzrlib._dirstate_helpers_pyx.Reader._init");
+ __pyx_r = -1;
+ __pyx_L0:;
+ __Pyx_RefNannyFinishContext();
+ return __pyx_r;
+}
+
+/* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":598
+ * return 0
+ *
+ * cdef object _get_entry(self, int num_trees, void **p_current_dirname, # <<<<<<<<<<<<<<
+ * int *new_block):
+ * """Extract the next entry.
+ */
+
+static PyObject *__pyx_f_6bzrlib_21_dirstate_helpers_pyx_6Reader__get_entry(struct __pyx_obj_6bzrlib_21_dirstate_helpers_pyx_Reader *__pyx_v_self, int __pyx_v_num_trees, void **__pyx_v_p_current_dirname, int *__pyx_v_new_block) {
+ StaticTuple *__pyx_v_path_name_file_id_key;
+ StaticTuple *__pyx_v_tmp;
+ char *__pyx_v_entry_size_cstr;
+ unsigned long __pyx_v_entry_size;
+ char *__pyx_v_executable_cstr;
+ int __pyx_v_is_executable;
+ char *__pyx_v_dirname_cstr;
+ char *__pyx_v_trailing;
+ int __pyx_v_cur_size;
+ int __pyx_v_i;
+ PyObject *__pyx_v_minikind;
+ PyObject *__pyx_v_fingerprint;
+ PyObject *__pyx_v_info;
+ PyObject *__pyx_v_dirname;
+ PyObject *__pyx_v_cur_dirname;
+ PyObject *__pyx_v_cur_basename;
+ PyObject *__pyx_v_cur_file_id;
+ PyObject *__pyx_v_trees;
+ PyObject *__pyx_v_ret;
+ PyObject *__pyx_r = NULL;
+ char *__pyx_t_1;
+ int __pyx_t_2;
+ int __pyx_t_3;
+ int __pyx_t_4;
+ PyObject *__pyx_t_5 = NULL;
+ void *__pyx_t_6;
+ int __pyx_t_7;
+ PyObject *__pyx_t_8 = NULL;
+ PyObject *__pyx_t_9 = NULL;
+ int __pyx_t_10;
+ PyObject *__pyx_t_11 = NULL;
+ __Pyx_RefNannySetupContext("_get_entry");
+ __pyx_v_path_name_file_id_key = ((StaticTuple *)Py_None); __Pyx_INCREF(Py_None);
+ __pyx_v_tmp = ((StaticTuple *)Py_None); __Pyx_INCREF(Py_None);
+ __pyx_v_minikind = Py_None; __Pyx_INCREF(Py_None);
+ __pyx_v_fingerprint = Py_None; __Pyx_INCREF(Py_None);
+ __pyx_v_info = Py_None; __Pyx_INCREF(Py_None);
+ __pyx_v_dirname = Py_None; __Pyx_INCREF(Py_None);
+ __pyx_v_cur_dirname = Py_None; __Pyx_INCREF(Py_None);
+ __pyx_v_cur_basename = Py_None; __Pyx_INCREF(Py_None);
+ __pyx_v_cur_file_id = Py_None; __Pyx_INCREF(Py_None);
+ __pyx_v_trees = ((PyObject *)Py_None); __Pyx_INCREF(Py_None);
+ __pyx_v_ret = ((PyObject *)Py_None); __Pyx_INCREF(Py_None);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":634
+ *
+ * # Read the 'key' information (dirname, name, file_id)
+ * dirname_cstr = self.get_next(&cur_size) # <<<<<<<<<<<<<<
+ * # Check to see if we have started a new directory block.
+ * # If so, then we need to create a new dirname PyString, so that it can
+ */
+ __pyx_t_1 = ((struct __pyx_vtabstruct_6bzrlib_21_dirstate_helpers_pyx_Reader *)__pyx_v_self->__pyx_vtab)->get_next(__pyx_v_self, (&__pyx_v_cur_size)); if (unlikely(__pyx_t_1 == NULL)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 634; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_v_dirname_cstr = __pyx_t_1;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":642
+ * # Do the cheap 'length of string' check first. If the string is a
+ * # different length, then we *have* to be a different directory.
+ * if (cur_size != PyString_GET_SIZE_void(p_current_dirname[0]) # <<<<<<<<<<<<<<
+ * or strncmp(dirname_cstr,
+ * # Extract the char* from our current dirname string. We
+ */
+ __pyx_t_2 = (__pyx_v_cur_size != PyString_GET_SIZE((__pyx_v_p_current_dirname[0])));
+ if (!__pyx_t_2) {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":650
+ * # <object>
+ * PyString_AS_STRING_void(p_current_dirname[0]),
+ * cur_size+1) != 0): # <<<<<<<<<<<<<<
+ * dirname = safe_string_from_size(dirname_cstr, cur_size)
+ * p_current_dirname[0] = <void*>dirname
+ */
+ __pyx_t_3 = (strncmp(__pyx_v_dirname_cstr, PyString_AS_STRING((__pyx_v_p_current_dirname[0])), (__pyx_v_cur_size + 1)) != 0);
+ __pyx_t_4 = __pyx_t_3;
+ } else {
+ __pyx_t_4 = __pyx_t_2;
+ }
+ if (__pyx_t_4) {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":651
+ * PyString_AS_STRING_void(p_current_dirname[0]),
+ * cur_size+1) != 0):
+ * dirname = safe_string_from_size(dirname_cstr, cur_size) # <<<<<<<<<<<<<<
+ * p_current_dirname[0] = <void*>dirname
+ * new_block[0] = 1
+ */
+ __pyx_t_5 = __pyx_f_6bzrlib_21_dirstate_helpers_pyx_safe_string_from_size(__pyx_v_dirname_cstr, __pyx_v_cur_size); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 651; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_5);
+ __Pyx_DECREF(__pyx_v_dirname);
+ __pyx_v_dirname = __pyx_t_5;
+ __pyx_t_5 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":652
+ * cur_size+1) != 0):
+ * dirname = safe_string_from_size(dirname_cstr, cur_size)
+ * p_current_dirname[0] = <void*>dirname # <<<<<<<<<<<<<<
+ * new_block[0] = 1
+ * else:
+ */
+ (__pyx_v_p_current_dirname[0]) = ((void *)__pyx_v_dirname);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":653
+ * dirname = safe_string_from_size(dirname_cstr, cur_size)
+ * p_current_dirname[0] = <void*>dirname
+ * new_block[0] = 1 # <<<<<<<<<<<<<<
+ * else:
+ * new_block[0] = 0
+ */
+ (__pyx_v_new_block[0]) = 1;
+ goto __pyx_L3;
+ }
+ /*else*/ {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":655
+ * new_block[0] = 1
+ * else:
+ * new_block[0] = 0 # <<<<<<<<<<<<<<
+ *
+ * # Build up the key that will be used.
+ */
+ (__pyx_v_new_block[0]) = 0;
+ }
+ __pyx_L3:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":660
+ * # By using <object>(void *) Pyrex will automatically handle the
+ * # Py_INCREF that we need.
+ * cur_dirname = <object>p_current_dirname[0] # <<<<<<<<<<<<<<
+ * # Use StaticTuple_New to pre-allocate, rather than creating a regular
+ * # tuple and passing it to the StaticTuple constructor.
+ */
+ __pyx_t_6 = (__pyx_v_p_current_dirname[0]);
+ __Pyx_INCREF(((PyObject *)__pyx_t_6));
+ __Pyx_DECREF(__pyx_v_cur_dirname);
+ __pyx_v_cur_dirname = ((PyObject *)__pyx_t_6);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":667
+ * # self.get_next_str(),
+ * # )
+ * tmp = StaticTuple_New(3) # <<<<<<<<<<<<<<
+ * Py_INCREF(cur_dirname); StaticTuple_SET_ITEM(tmp, 0, cur_dirname)
+ * cur_basename = self.get_next_str()
+ */
+ __pyx_t_5 = ((PyObject *)StaticTuple_New(3)); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 667; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_5);
+ __Pyx_DECREF(((PyObject *)__pyx_v_tmp));
+ __pyx_v_tmp = ((StaticTuple *)__pyx_t_5);
+ __pyx_t_5 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":668
+ * # )
+ * tmp = StaticTuple_New(3)
+ * Py_INCREF(cur_dirname); StaticTuple_SET_ITEM(tmp, 0, cur_dirname) # <<<<<<<<<<<<<<
+ * cur_basename = self.get_next_str()
+ * cur_file_id = self.get_next_str()
+ */
+ Py_INCREF(__pyx_v_cur_dirname);
+ StaticTuple_SET_ITEM(__pyx_v_tmp, 0, __pyx_v_cur_dirname);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":669
+ * tmp = StaticTuple_New(3)
+ * Py_INCREF(cur_dirname); StaticTuple_SET_ITEM(tmp, 0, cur_dirname)
+ * cur_basename = self.get_next_str() # <<<<<<<<<<<<<<
+ * cur_file_id = self.get_next_str()
+ * Py_INCREF(cur_basename); StaticTuple_SET_ITEM(tmp, 1, cur_basename)
+ */
+ __pyx_t_5 = ((struct __pyx_vtabstruct_6bzrlib_21_dirstate_helpers_pyx_Reader *)__pyx_v_self->__pyx_vtab)->get_next_str(__pyx_v_self); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 669; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_5);
+ __Pyx_DECREF(__pyx_v_cur_basename);
+ __pyx_v_cur_basename = __pyx_t_5;
+ __pyx_t_5 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":670
+ * Py_INCREF(cur_dirname); StaticTuple_SET_ITEM(tmp, 0, cur_dirname)
+ * cur_basename = self.get_next_str()
+ * cur_file_id = self.get_next_str() # <<<<<<<<<<<<<<
+ * Py_INCREF(cur_basename); StaticTuple_SET_ITEM(tmp, 1, cur_basename)
+ * Py_INCREF(cur_file_id); StaticTuple_SET_ITEM(tmp, 2, cur_file_id)
+ */
+ __pyx_t_5 = ((struct __pyx_vtabstruct_6bzrlib_21_dirstate_helpers_pyx_Reader *)__pyx_v_self->__pyx_vtab)->get_next_str(__pyx_v_self); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 670; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_5);
+ __Pyx_DECREF(__pyx_v_cur_file_id);
+ __pyx_v_cur_file_id = __pyx_t_5;
+ __pyx_t_5 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":671
+ * cur_basename = self.get_next_str()
+ * cur_file_id = self.get_next_str()
+ * Py_INCREF(cur_basename); StaticTuple_SET_ITEM(tmp, 1, cur_basename) # <<<<<<<<<<<<<<
+ * Py_INCREF(cur_file_id); StaticTuple_SET_ITEM(tmp, 2, cur_file_id)
+ * path_name_file_id_key = tmp
+ */
+ Py_INCREF(__pyx_v_cur_basename);
+ StaticTuple_SET_ITEM(__pyx_v_tmp, 1, __pyx_v_cur_basename);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":672
+ * cur_file_id = self.get_next_str()
+ * Py_INCREF(cur_basename); StaticTuple_SET_ITEM(tmp, 1, cur_basename)
+ * Py_INCREF(cur_file_id); StaticTuple_SET_ITEM(tmp, 2, cur_file_id) # <<<<<<<<<<<<<<
+ * path_name_file_id_key = tmp
+ *
+ */
+ Py_INCREF(__pyx_v_cur_file_id);
+ StaticTuple_SET_ITEM(__pyx_v_tmp, 2, __pyx_v_cur_file_id);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":673
+ * Py_INCREF(cur_basename); StaticTuple_SET_ITEM(tmp, 1, cur_basename)
+ * Py_INCREF(cur_file_id); StaticTuple_SET_ITEM(tmp, 2, cur_file_id)
+ * path_name_file_id_key = tmp # <<<<<<<<<<<<<<
+ *
+ * # Parse all of the per-tree information. current has the information in
+ */
+ __Pyx_INCREF(((PyObject *)__pyx_v_tmp));
+ __Pyx_DECREF(((PyObject *)__pyx_v_path_name_file_id_key));
+ __pyx_v_path_name_file_id_key = __pyx_v_tmp;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":688
+ * # Especially since this code is pretty much fixed at a max of
+ * # 4GB.
+ * trees = [] # <<<<<<<<<<<<<<
+ * for i from 0 <= i < num_trees:
+ * minikind = self.get_next_str()
+ */
+ __pyx_t_5 = PyList_New(0); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 688; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(((PyObject *)__pyx_t_5));
+ __Pyx_DECREF(((PyObject *)__pyx_v_trees));
+ __pyx_v_trees = __pyx_t_5;
+ __pyx_t_5 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":689
+ * # 4GB.
+ * trees = []
+ * for i from 0 <= i < num_trees: # <<<<<<<<<<<<<<
+ * minikind = self.get_next_str()
+ * fingerprint = self.get_next_str()
+ */
+ __pyx_t_7 = __pyx_v_num_trees;
+ for (__pyx_v_i = 0; __pyx_v_i < __pyx_t_7; __pyx_v_i++) {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":690
+ * trees = []
+ * for i from 0 <= i < num_trees:
+ * minikind = self.get_next_str() # <<<<<<<<<<<<<<
+ * fingerprint = self.get_next_str()
+ * entry_size_cstr = self.get_next(&cur_size)
+ */
+ __pyx_t_5 = ((struct __pyx_vtabstruct_6bzrlib_21_dirstate_helpers_pyx_Reader *)__pyx_v_self->__pyx_vtab)->get_next_str(__pyx_v_self); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 690; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_5);
+ __Pyx_DECREF(__pyx_v_minikind);
+ __pyx_v_minikind = __pyx_t_5;
+ __pyx_t_5 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":691
+ * for i from 0 <= i < num_trees:
+ * minikind = self.get_next_str()
+ * fingerprint = self.get_next_str() # <<<<<<<<<<<<<<
+ * entry_size_cstr = self.get_next(&cur_size)
+ * entry_size = strtoul(entry_size_cstr, NULL, 10)
+ */
+ __pyx_t_5 = ((struct __pyx_vtabstruct_6bzrlib_21_dirstate_helpers_pyx_Reader *)__pyx_v_self->__pyx_vtab)->get_next_str(__pyx_v_self); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 691; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_5);
+ __Pyx_DECREF(__pyx_v_fingerprint);
+ __pyx_v_fingerprint = __pyx_t_5;
+ __pyx_t_5 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":692
+ * minikind = self.get_next_str()
+ * fingerprint = self.get_next_str()
+ * entry_size_cstr = self.get_next(&cur_size) # <<<<<<<<<<<<<<
+ * entry_size = strtoul(entry_size_cstr, NULL, 10)
+ * executable_cstr = self.get_next(&cur_size)
+ */
+ __pyx_t_1 = ((struct __pyx_vtabstruct_6bzrlib_21_dirstate_helpers_pyx_Reader *)__pyx_v_self->__pyx_vtab)->get_next(__pyx_v_self, (&__pyx_v_cur_size)); if (unlikely(__pyx_t_1 == NULL)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 692; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_v_entry_size_cstr = __pyx_t_1;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":693
+ * fingerprint = self.get_next_str()
+ * entry_size_cstr = self.get_next(&cur_size)
+ * entry_size = strtoul(entry_size_cstr, NULL, 10) # <<<<<<<<<<<<<<
+ * executable_cstr = self.get_next(&cur_size)
+ * is_executable = (executable_cstr[0] == c'y')
+ */
+ __pyx_v_entry_size = strtoul(__pyx_v_entry_size_cstr, NULL, 10);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":694
+ * entry_size_cstr = self.get_next(&cur_size)
+ * entry_size = strtoul(entry_size_cstr, NULL, 10)
+ * executable_cstr = self.get_next(&cur_size) # <<<<<<<<<<<<<<
+ * is_executable = (executable_cstr[0] == c'y')
+ * info = self.get_next_str()
+ */
+ __pyx_t_1 = ((struct __pyx_vtabstruct_6bzrlib_21_dirstate_helpers_pyx_Reader *)__pyx_v_self->__pyx_vtab)->get_next(__pyx_v_self, (&__pyx_v_cur_size)); if (unlikely(__pyx_t_1 == NULL)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 694; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_v_executable_cstr = __pyx_t_1;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":695
+ * entry_size = strtoul(entry_size_cstr, NULL, 10)
+ * executable_cstr = self.get_next(&cur_size)
+ * is_executable = (executable_cstr[0] == c'y') # <<<<<<<<<<<<<<
+ * info = self.get_next_str()
+ * # TODO: If we want to use StaticTuple_New here we need to be pretty
+ */
+ __pyx_v_is_executable = ((__pyx_v_executable_cstr[0]) == 'y');
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":696
+ * executable_cstr = self.get_next(&cur_size)
+ * is_executable = (executable_cstr[0] == c'y')
+ * info = self.get_next_str() # <<<<<<<<<<<<<<
+ * # TODO: If we want to use StaticTuple_New here we need to be pretty
+ * # careful. We are relying on a bit of Pyrex
+ */
+ __pyx_t_5 = ((struct __pyx_vtabstruct_6bzrlib_21_dirstate_helpers_pyx_Reader *)__pyx_v_self->__pyx_vtab)->get_next_str(__pyx_v_self); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 696; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_5);
+ __Pyx_DECREF(__pyx_v_info);
+ __pyx_v_info = __pyx_t_5;
+ __pyx_t_5 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":713
+ * minikind, # minikind
+ * fingerprint, # fingerprint
+ * entry_size, # size # <<<<<<<<<<<<<<
+ * is_executable,# executable
+ * info, # packed_stat or revision_id
+ */
+ __pyx_t_5 = PyLong_FromUnsignedLong(__pyx_v_entry_size); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 713; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_5);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":714
+ * fingerprint, # fingerprint
+ * entry_size, # size
+ * is_executable,# executable # <<<<<<<<<<<<<<
+ * info, # packed_stat or revision_id
+ * ))
+ */
+ __pyx_t_8 = PyInt_FromLong(__pyx_v_is_executable); if (unlikely(!__pyx_t_8)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 714; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_8);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":715
+ * entry_size, # size
+ * is_executable,# executable
+ * info, # packed_stat or revision_id # <<<<<<<<<<<<<<
+ * ))
+ *
+ */
+ __pyx_t_9 = PyTuple_New(5); if (unlikely(!__pyx_t_9)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 710; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_9);
+ __Pyx_INCREF(__pyx_v_minikind);
+ PyTuple_SET_ITEM(__pyx_t_9, 0, __pyx_v_minikind);
+ __Pyx_GIVEREF(__pyx_v_minikind);
+ __Pyx_INCREF(__pyx_v_fingerprint);
+ PyTuple_SET_ITEM(__pyx_t_9, 1, __pyx_v_fingerprint);
+ __Pyx_GIVEREF(__pyx_v_fingerprint);
+ PyTuple_SET_ITEM(__pyx_t_9, 2, __pyx_t_5);
+ __Pyx_GIVEREF(__pyx_t_5);
+ PyTuple_SET_ITEM(__pyx_t_9, 3, __pyx_t_8);
+ __Pyx_GIVEREF(__pyx_t_8);
+ __Pyx_INCREF(__pyx_v_info);
+ PyTuple_SET_ITEM(__pyx_t_9, 4, __pyx_v_info);
+ __Pyx_GIVEREF(__pyx_v_info);
+ __pyx_t_5 = 0;
+ __pyx_t_8 = 0;
+ __pyx_t_8 = PyObject_Call(((PyObject *)((PyObject*)__pyx_ptype_6bzrlib_15_static_tuple_c_StaticTuple)), __pyx_t_9, NULL); if (unlikely(!__pyx_t_8)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 710; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_8);
+ __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
+ __pyx_t_10 = PyList_Append(((PyObject *)__pyx_v_trees), __pyx_t_8); if (unlikely(__pyx_t_10 == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 710; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0;
+ }
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":719
+ *
+ * # The returned tuple is (key, [trees])
+ * ret = (path_name_file_id_key, trees) # <<<<<<<<<<<<<<
+ * # Ignore the trailing newline, but assert that it does exist, this
+ * # ensures that we always finish parsing a line on an end-of-entry
+ */
+ __pyx_t_8 = PyTuple_New(2); if (unlikely(!__pyx_t_8)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 719; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_8);
+ __Pyx_INCREF(((PyObject *)__pyx_v_path_name_file_id_key));
+ PyTuple_SET_ITEM(__pyx_t_8, 0, ((PyObject *)__pyx_v_path_name_file_id_key));
+ __Pyx_GIVEREF(((PyObject *)__pyx_v_path_name_file_id_key));
+ __Pyx_INCREF(((PyObject *)__pyx_v_trees));
+ PyTuple_SET_ITEM(__pyx_t_8, 1, ((PyObject *)__pyx_v_trees));
+ __Pyx_GIVEREF(((PyObject *)__pyx_v_trees));
+ if (!(likely(PyTuple_CheckExact(__pyx_t_8))||(PyErr_Format(PyExc_TypeError, "Expected tuple, got %.200s", Py_TYPE(__pyx_t_8)->tp_name), 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 719; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_DECREF(((PyObject *)__pyx_v_ret));
+ __pyx_v_ret = ((PyObject *)__pyx_t_8);
+ __pyx_t_8 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":723
+ * # ensures that we always finish parsing a line on an end-of-entry
+ * # marker.
+ * trailing = self.get_next(&cur_size) # <<<<<<<<<<<<<<
+ * if cur_size != 1 or trailing[0] != c'\n':
+ * raise errors.DirstateCorrupt(self.state,
+ */
+ __pyx_t_1 = ((struct __pyx_vtabstruct_6bzrlib_21_dirstate_helpers_pyx_Reader *)__pyx_v_self->__pyx_vtab)->get_next(__pyx_v_self, (&__pyx_v_cur_size)); if (unlikely(__pyx_t_1 == NULL)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 723; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_v_trailing = __pyx_t_1;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":724
+ * # marker.
+ * trailing = self.get_next(&cur_size)
+ * if cur_size != 1 or trailing[0] != c'\n': # <<<<<<<<<<<<<<
+ * raise errors.DirstateCorrupt(self.state,
+ * 'Bad parse, we expected to end on \\n, not: %d %s: %s'
+ */
+ __pyx_t_4 = (__pyx_v_cur_size != 1);
+ if (!__pyx_t_4) {
+ __pyx_t_2 = ((__pyx_v_trailing[0]) != '\n');
+ __pyx_t_3 = __pyx_t_2;
+ } else {
+ __pyx_t_3 = __pyx_t_4;
+ }
+ if (__pyx_t_3) {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":725
+ * trailing = self.get_next(&cur_size)
+ * if cur_size != 1 or trailing[0] != c'\n':
+ * raise errors.DirstateCorrupt(self.state, # <<<<<<<<<<<<<<
+ * 'Bad parse, we expected to end on \\n, not: %d %s: %s'
+ * % (cur_size, safe_string_from_size(trailing, cur_size),
+ */
+ __pyx_t_8 = __Pyx_GetName(__pyx_m, __pyx_n_s__errors); if (unlikely(!__pyx_t_8)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 725; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_8);
+ __pyx_t_9 = PyObject_GetAttr(__pyx_t_8, __pyx_n_s__DirstateCorrupt); if (unlikely(!__pyx_t_9)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 725; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_9);
+ __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":727
+ * raise errors.DirstateCorrupt(self.state,
+ * 'Bad parse, we expected to end on \\n, not: %d %s: %s'
+ * % (cur_size, safe_string_from_size(trailing, cur_size), # <<<<<<<<<<<<<<
+ * ret))
+ * return ret
+ */
+ __pyx_t_8 = PyInt_FromLong(__pyx_v_cur_size); if (unlikely(!__pyx_t_8)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 727; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_8);
+ __pyx_t_5 = __pyx_f_6bzrlib_21_dirstate_helpers_pyx_safe_string_from_size(__pyx_v_trailing, __pyx_v_cur_size); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 727; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_5);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":728
+ * 'Bad parse, we expected to end on \\n, not: %d %s: %s'
+ * % (cur_size, safe_string_from_size(trailing, cur_size),
+ * ret)) # <<<<<<<<<<<<<<
+ * return ret
+ *
+ */
+ __pyx_t_11 = PyTuple_New(3); if (unlikely(!__pyx_t_11)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 727; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_11);
+ PyTuple_SET_ITEM(__pyx_t_11, 0, __pyx_t_8);
+ __Pyx_GIVEREF(__pyx_t_8);
+ PyTuple_SET_ITEM(__pyx_t_11, 1, __pyx_t_5);
+ __Pyx_GIVEREF(__pyx_t_5);
+ __Pyx_INCREF(((PyObject *)__pyx_v_ret));
+ PyTuple_SET_ITEM(__pyx_t_11, 2, ((PyObject *)__pyx_v_ret));
+ __Pyx_GIVEREF(((PyObject *)__pyx_v_ret));
+ __pyx_t_8 = 0;
+ __pyx_t_5 = 0;
+ __pyx_t_5 = PyNumber_Remainder(((PyObject *)__pyx_kp_s_14), __pyx_t_11); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 727; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(((PyObject *)__pyx_t_5));
+ __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0;
+ __pyx_t_11 = PyTuple_New(2); if (unlikely(!__pyx_t_11)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 725; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_11);
+ __Pyx_INCREF(__pyx_v_self->state);
+ PyTuple_SET_ITEM(__pyx_t_11, 0, __pyx_v_self->state);
+ __Pyx_GIVEREF(__pyx_v_self->state);
+ PyTuple_SET_ITEM(__pyx_t_11, 1, ((PyObject *)__pyx_t_5));
+ __Pyx_GIVEREF(((PyObject *)__pyx_t_5));
+ __pyx_t_5 = 0;
+ __pyx_t_5 = PyObject_Call(__pyx_t_9, __pyx_t_11, NULL); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 725; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_5);
+ __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
+ __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0;
+ __Pyx_Raise(__pyx_t_5, 0, 0);
+ __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 725; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ goto __pyx_L6;
+ }
+ __pyx_L6:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":729
+ * % (cur_size, safe_string_from_size(trailing, cur_size),
+ * ret))
+ * return ret # <<<<<<<<<<<<<<
+ *
+ * def _parse_dirblocks(self):
+ */
+ __Pyx_XDECREF(__pyx_r);
+ __Pyx_INCREF(((PyObject *)__pyx_v_ret));
+ __pyx_r = ((PyObject *)__pyx_v_ret);
+ goto __pyx_L0;
+
+ __pyx_r = Py_None; __Pyx_INCREF(Py_None);
+ goto __pyx_L0;
+ __pyx_L1_error:;
+ __Pyx_XDECREF(__pyx_t_5);
+ __Pyx_XDECREF(__pyx_t_8);
+ __Pyx_XDECREF(__pyx_t_9);
+ __Pyx_XDECREF(__pyx_t_11);
+ __Pyx_AddTraceback("bzrlib._dirstate_helpers_pyx.Reader._get_entry");
+ __pyx_r = 0;
+ __pyx_L0:;
+ __Pyx_DECREF((PyObject *)__pyx_v_path_name_file_id_key);
+ __Pyx_DECREF((PyObject *)__pyx_v_tmp);
+ __Pyx_DECREF(__pyx_v_minikind);
+ __Pyx_DECREF(__pyx_v_fingerprint);
+ __Pyx_DECREF(__pyx_v_info);
+ __Pyx_DECREF(__pyx_v_dirname);
+ __Pyx_DECREF(__pyx_v_cur_dirname);
+ __Pyx_DECREF(__pyx_v_cur_basename);
+ __Pyx_DECREF(__pyx_v_cur_file_id);
+ __Pyx_DECREF(__pyx_v_trees);
+ __Pyx_DECREF(__pyx_v_ret);
+ __Pyx_XGIVEREF(__pyx_r);
+ __Pyx_RefNannyFinishContext();
+ return __pyx_r;
+}
+
+/* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":731
+ * return ret
+ *
+ * def _parse_dirblocks(self): # <<<<<<<<<<<<<<
+ * """Parse all dirblocks in the state file."""
+ * cdef int num_trees
+ */
+
+static PyObject *__pyx_pf_6bzrlib_21_dirstate_helpers_pyx_6Reader__parse_dirblocks(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/
+static char __pyx_doc_6bzrlib_21_dirstate_helpers_pyx_6Reader__parse_dirblocks[] = "Parse all dirblocks in the state file.";
+static PyObject *__pyx_pf_6bzrlib_21_dirstate_helpers_pyx_6Reader__parse_dirblocks(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) {
+ int __pyx_v_num_trees;
+ PyObject *__pyx_v_current_block;
+ PyObject *__pyx_v_entry;
+ void *__pyx_v_current_dirname;
+ int __pyx_v_new_block;
+ int __pyx_v_expected_entry_count;
+ int __pyx_v_entry_count;
+ PyObject *__pyx_v_dirblocks;
+ PyObject *__pyx_v_obj;
+ PyObject *__pyx_r = NULL;
+ PyObject *__pyx_t_1 = NULL;
+ PyObject *__pyx_t_2 = NULL;
+ int __pyx_t_3;
+ PyObject *__pyx_t_4 = NULL;
+ int __pyx_t_5;
+ PyObject *__pyx_t_6 = NULL;
+ __Pyx_RefNannySetupContext("_parse_dirblocks");
+ __pyx_v_current_block = Py_None; __Pyx_INCREF(Py_None);
+ __pyx_v_entry = Py_None; __Pyx_INCREF(Py_None);
+ __pyx_v_dirblocks = ((PyObject *)Py_None); __Pyx_INCREF(Py_None);
+ __pyx_v_obj = Py_None; __Pyx_INCREF(Py_None);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":741
+ * cdef int entry_count
+ *
+ * num_trees = self.state._num_present_parents() + 1 # <<<<<<<<<<<<<<
+ * expected_entry_count = self.state._num_entries
+ *
+ */
+ __pyx_t_1 = PyObject_GetAttr(((struct __pyx_obj_6bzrlib_21_dirstate_helpers_pyx_Reader *)__pyx_v_self)->state, __pyx_n_s_15); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 741; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_1);
+ __pyx_t_2 = PyObject_Call(__pyx_t_1, ((PyObject *)__pyx_empty_tuple), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 741; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_2);
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+ __pyx_t_1 = PyNumber_Add(__pyx_t_2, __pyx_int_1); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 741; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_1);
+ __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+ __pyx_t_3 = __Pyx_PyInt_AsInt(__pyx_t_1); if (unlikely((__pyx_t_3 == (int)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 741; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+ __pyx_v_num_trees = __pyx_t_3;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":742
+ *
+ * num_trees = self.state._num_present_parents() + 1
+ * expected_entry_count = self.state._num_entries # <<<<<<<<<<<<<<
+ *
+ * # Ignore the first record
+ */
+ __pyx_t_1 = PyObject_GetAttr(((struct __pyx_obj_6bzrlib_21_dirstate_helpers_pyx_Reader *)__pyx_v_self)->state, __pyx_n_s___num_entries); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 742; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_1);
+ __pyx_t_3 = __Pyx_PyInt_AsInt(__pyx_t_1); if (unlikely((__pyx_t_3 == (int)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 742; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+ __pyx_v_expected_entry_count = __pyx_t_3;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":745
+ *
+ * # Ignore the first record
+ * self._init() # <<<<<<<<<<<<<<
+ *
+ * current_block = []
+ */
+ __pyx_t_3 = ((struct __pyx_vtabstruct_6bzrlib_21_dirstate_helpers_pyx_Reader *)((struct __pyx_obj_6bzrlib_21_dirstate_helpers_pyx_Reader *)__pyx_v_self)->__pyx_vtab)->_init(((struct __pyx_obj_6bzrlib_21_dirstate_helpers_pyx_Reader *)__pyx_v_self)); if (unlikely(__pyx_t_3 == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 745; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":747
+ * self._init()
+ *
+ * current_block = [] # <<<<<<<<<<<<<<
+ * dirblocks = [('', current_block), ('', [])]
+ * self.state._dirblocks = dirblocks
+ */
+ __pyx_t_1 = PyList_New(0); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 747; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(((PyObject *)__pyx_t_1));
+ __Pyx_DECREF(__pyx_v_current_block);
+ __pyx_v_current_block = ((PyObject *)__pyx_t_1);
+ __pyx_t_1 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":748
+ *
+ * current_block = []
+ * dirblocks = [('', current_block), ('', [])] # <<<<<<<<<<<<<<
+ * self.state._dirblocks = dirblocks
+ * obj = ''
+ */
+ __pyx_t_1 = PyTuple_New(2); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 748; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_1);
+ __Pyx_INCREF(((PyObject *)__pyx_kp_s_5));
+ PyTuple_SET_ITEM(__pyx_t_1, 0, ((PyObject *)__pyx_kp_s_5));
+ __Pyx_GIVEREF(((PyObject *)__pyx_kp_s_5));
+ __Pyx_INCREF(__pyx_v_current_block);
+ PyTuple_SET_ITEM(__pyx_t_1, 1, __pyx_v_current_block);
+ __Pyx_GIVEREF(__pyx_v_current_block);
+ __pyx_t_2 = PyList_New(0); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 748; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(((PyObject *)__pyx_t_2));
+ __pyx_t_4 = PyTuple_New(2); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 748; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_4);
+ __Pyx_INCREF(((PyObject *)__pyx_kp_s_5));
+ PyTuple_SET_ITEM(__pyx_t_4, 0, ((PyObject *)__pyx_kp_s_5));
+ __Pyx_GIVEREF(((PyObject *)__pyx_kp_s_5));
+ PyTuple_SET_ITEM(__pyx_t_4, 1, ((PyObject *)__pyx_t_2));
+ __Pyx_GIVEREF(((PyObject *)__pyx_t_2));
+ __pyx_t_2 = 0;
+ __pyx_t_2 = PyList_New(2); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 748; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(((PyObject *)__pyx_t_2));
+ PyList_SET_ITEM(__pyx_t_2, 0, __pyx_t_1);
+ __Pyx_GIVEREF(__pyx_t_1);
+ PyList_SET_ITEM(__pyx_t_2, 1, __pyx_t_4);
+ __Pyx_GIVEREF(__pyx_t_4);
+ __pyx_t_1 = 0;
+ __pyx_t_4 = 0;
+ __Pyx_DECREF(((PyObject *)__pyx_v_dirblocks));
+ __pyx_v_dirblocks = __pyx_t_2;
+ __pyx_t_2 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":749
+ * current_block = []
+ * dirblocks = [('', current_block), ('', [])]
+ * self.state._dirblocks = dirblocks # <<<<<<<<<<<<<<
+ * obj = ''
+ * current_dirname = <void*>obj
+ */
+ if (PyObject_SetAttr(((struct __pyx_obj_6bzrlib_21_dirstate_helpers_pyx_Reader *)__pyx_v_self)->state, __pyx_n_s___dirblocks, ((PyObject *)__pyx_v_dirblocks)) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 749; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":750
+ * dirblocks = [('', current_block), ('', [])]
+ * self.state._dirblocks = dirblocks
+ * obj = '' # <<<<<<<<<<<<<<
+ * current_dirname = <void*>obj
+ * new_block = 0
+ */
+ __Pyx_INCREF(((PyObject *)__pyx_kp_s_5));
+ __Pyx_DECREF(__pyx_v_obj);
+ __pyx_v_obj = ((PyObject *)__pyx_kp_s_5);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":751
+ * self.state._dirblocks = dirblocks
+ * obj = ''
+ * current_dirname = <void*>obj # <<<<<<<<<<<<<<
+ * new_block = 0
+ * entry_count = 0
+ */
+ __pyx_v_current_dirname = ((void *)__pyx_v_obj);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":752
+ * obj = ''
+ * current_dirname = <void*>obj
+ * new_block = 0 # <<<<<<<<<<<<<<
+ * entry_count = 0
+ *
+ */
+ __pyx_v_new_block = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":753
+ * current_dirname = <void*>obj
+ * new_block = 0
+ * entry_count = 0 # <<<<<<<<<<<<<<
+ *
+ * # TODO: jam 2007-05-07 Consider pre-allocating some space for the
+ */
+ __pyx_v_entry_count = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":762
+ * # so), and then truncate. That would give us a malloc + realloc,
+ * # rather than lots of reallocs.
+ * while self.cur_cstr < self.end_cstr: # <<<<<<<<<<<<<<
+ * entry = self._get_entry(num_trees, &current_dirname, &new_block)
+ * if new_block:
+ */
+ while (1) {
+ __pyx_t_5 = (((struct __pyx_obj_6bzrlib_21_dirstate_helpers_pyx_Reader *)__pyx_v_self)->cur_cstr < ((struct __pyx_obj_6bzrlib_21_dirstate_helpers_pyx_Reader *)__pyx_v_self)->end_cstr);
+ if (!__pyx_t_5) break;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":763
+ * # rather than lots of reallocs.
+ * while self.cur_cstr < self.end_cstr:
+ * entry = self._get_entry(num_trees, &current_dirname, &new_block) # <<<<<<<<<<<<<<
+ * if new_block:
+ * # new block - different dirname
+ */
+ __pyx_t_2 = ((struct __pyx_vtabstruct_6bzrlib_21_dirstate_helpers_pyx_Reader *)((struct __pyx_obj_6bzrlib_21_dirstate_helpers_pyx_Reader *)__pyx_v_self)->__pyx_vtab)->_get_entry(((struct __pyx_obj_6bzrlib_21_dirstate_helpers_pyx_Reader *)__pyx_v_self), __pyx_v_num_trees, (&__pyx_v_current_dirname), (&__pyx_v_new_block)); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 763; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_2);
+ __Pyx_DECREF(__pyx_v_entry);
+ __pyx_v_entry = __pyx_t_2;
+ __pyx_t_2 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":764
+ * while self.cur_cstr < self.end_cstr:
+ * entry = self._get_entry(num_trees, &current_dirname, &new_block)
+ * if new_block: # <<<<<<<<<<<<<<
+ * # new block - different dirname
+ * current_block = []
+ */
+ if (__pyx_v_new_block) {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":766
+ * if new_block:
+ * # new block - different dirname
+ * current_block = [] # <<<<<<<<<<<<<<
+ * PyList_Append(dirblocks,
+ * (<object>current_dirname, current_block))
+ */
+ __pyx_t_2 = PyList_New(0); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 766; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(((PyObject *)__pyx_t_2));
+ __Pyx_DECREF(__pyx_v_current_block);
+ __pyx_v_current_block = ((PyObject *)__pyx_t_2);
+ __pyx_t_2 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":768
+ * current_block = []
+ * PyList_Append(dirblocks,
+ * (<object>current_dirname, current_block)) # <<<<<<<<<<<<<<
+ * PyList_Append(current_block, entry)
+ * entry_count = entry_count + 1
+ */
+ __pyx_t_2 = PyTuple_New(2); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 768; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_2);
+ __Pyx_INCREF(((PyObject *)__pyx_v_current_dirname));
+ PyTuple_SET_ITEM(__pyx_t_2, 0, ((PyObject *)__pyx_v_current_dirname));
+ __Pyx_GIVEREF(((PyObject *)__pyx_v_current_dirname));
+ __Pyx_INCREF(__pyx_v_current_block);
+ PyTuple_SET_ITEM(__pyx_t_2, 1, __pyx_v_current_block);
+ __Pyx_GIVEREF(__pyx_v_current_block);
+ __pyx_t_3 = PyList_Append(((PyObject *)__pyx_v_dirblocks), __pyx_t_2); if (unlikely(__pyx_t_3 == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 767; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+ goto __pyx_L7;
+ }
+ __pyx_L7:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":769
+ * PyList_Append(dirblocks,
+ * (<object>current_dirname, current_block))
+ * PyList_Append(current_block, entry) # <<<<<<<<<<<<<<
+ * entry_count = entry_count + 1
+ * if entry_count != expected_entry_count:
+ */
+ __pyx_t_3 = PyList_Append(__pyx_v_current_block, __pyx_v_entry); if (unlikely(__pyx_t_3 == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 769; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":770
+ * (<object>current_dirname, current_block))
+ * PyList_Append(current_block, entry)
+ * entry_count = entry_count + 1 # <<<<<<<<<<<<<<
+ * if entry_count != expected_entry_count:
+ * raise errors.DirstateCorrupt(self.state,
+ */
+ __pyx_v_entry_count = (__pyx_v_entry_count + 1);
+ }
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":771
+ * PyList_Append(current_block, entry)
+ * entry_count = entry_count + 1
+ * if entry_count != expected_entry_count: # <<<<<<<<<<<<<<
+ * raise errors.DirstateCorrupt(self.state,
+ * 'We read the wrong number of entries.'
+ */
+ __pyx_t_5 = (__pyx_v_entry_count != __pyx_v_expected_entry_count);
+ if (__pyx_t_5) {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":772
+ * entry_count = entry_count + 1
+ * if entry_count != expected_entry_count:
+ * raise errors.DirstateCorrupt(self.state, # <<<<<<<<<<<<<<
+ * 'We read the wrong number of entries.'
+ * ' We expected to read %s, but read %s'
+ */
+ __pyx_t_2 = __Pyx_GetName(__pyx_m, __pyx_n_s__errors); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 772; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_2);
+ __pyx_t_4 = PyObject_GetAttr(__pyx_t_2, __pyx_n_s__DirstateCorrupt); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 772; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_4);
+ __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":775
+ * 'We read the wrong number of entries.'
+ * ' We expected to read %s, but read %s'
+ * % (expected_entry_count, entry_count)) # <<<<<<<<<<<<<<
+ * self.state._split_root_dirblock_into_contents()
+ *
+ */
+ __pyx_t_2 = PyInt_FromLong(__pyx_v_expected_entry_count); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 775; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_2);
+ __pyx_t_1 = PyInt_FromLong(__pyx_v_entry_count); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 775; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_1);
+ __pyx_t_6 = PyTuple_New(2); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 775; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_6);
+ PyTuple_SET_ITEM(__pyx_t_6, 0, __pyx_t_2);
+ __Pyx_GIVEREF(__pyx_t_2);
+ PyTuple_SET_ITEM(__pyx_t_6, 1, __pyx_t_1);
+ __Pyx_GIVEREF(__pyx_t_1);
+ __pyx_t_2 = 0;
+ __pyx_t_1 = 0;
+ __pyx_t_1 = PyNumber_Remainder(((PyObject *)__pyx_kp_s_16), __pyx_t_6); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 775; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(((PyObject *)__pyx_t_1));
+ __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
+ __pyx_t_6 = PyTuple_New(2); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 772; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_6);
+ __Pyx_INCREF(((struct __pyx_obj_6bzrlib_21_dirstate_helpers_pyx_Reader *)__pyx_v_self)->state);
+ PyTuple_SET_ITEM(__pyx_t_6, 0, ((struct __pyx_obj_6bzrlib_21_dirstate_helpers_pyx_Reader *)__pyx_v_self)->state);
+ __Pyx_GIVEREF(((struct __pyx_obj_6bzrlib_21_dirstate_helpers_pyx_Reader *)__pyx_v_self)->state);
+ PyTuple_SET_ITEM(__pyx_t_6, 1, ((PyObject *)__pyx_t_1));
+ __Pyx_GIVEREF(((PyObject *)__pyx_t_1));
+ __pyx_t_1 = 0;
+ __pyx_t_1 = PyObject_Call(__pyx_t_4, __pyx_t_6, NULL); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 772; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_1);
+ __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
+ __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
+ __Pyx_Raise(__pyx_t_1, 0, 0);
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 772; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ goto __pyx_L8;
+ }
+ __pyx_L8:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":776
+ * ' We expected to read %s, but read %s'
+ * % (expected_entry_count, entry_count))
+ * self.state._split_root_dirblock_into_contents() # <<<<<<<<<<<<<<
+ *
+ *
+ */
+ __pyx_t_1 = PyObject_GetAttr(((struct __pyx_obj_6bzrlib_21_dirstate_helpers_pyx_Reader *)__pyx_v_self)->state, __pyx_n_s_17); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 776; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_1);
+ __pyx_t_6 = PyObject_Call(__pyx_t_1, ((PyObject *)__pyx_empty_tuple), NULL); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 776; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_6);
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+ __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
+
+ __pyx_r = Py_None; __Pyx_INCREF(Py_None);
+ goto __pyx_L0;
+ __pyx_L1_error:;
+ __Pyx_XDECREF(__pyx_t_1);
+ __Pyx_XDECREF(__pyx_t_2);
+ __Pyx_XDECREF(__pyx_t_4);
+ __Pyx_XDECREF(__pyx_t_6);
+ __Pyx_AddTraceback("bzrlib._dirstate_helpers_pyx.Reader._parse_dirblocks");
+ __pyx_r = NULL;
+ __pyx_L0:;
+ __Pyx_DECREF(__pyx_v_current_block);
+ __Pyx_DECREF(__pyx_v_entry);
+ __Pyx_DECREF(__pyx_v_dirblocks);
+ __Pyx_DECREF(__pyx_v_obj);
+ __Pyx_XGIVEREF(__pyx_r);
+ __Pyx_RefNannyFinishContext();
+ return __pyx_r;
+}
+
+/* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":779
+ *
+ *
+ * def _read_dirblocks(state): # <<<<<<<<<<<<<<
+ * """Read in the dirblocks for the given DirState object.
+ *
+ */
+
+static PyObject *__pyx_pf_6bzrlib_21_dirstate_helpers_pyx__read_dirblocks(PyObject *__pyx_self, PyObject *__pyx_v_state); /*proto*/
+static char __pyx_doc_6bzrlib_21_dirstate_helpers_pyx__read_dirblocks[] = "Read in the dirblocks for the given DirState object.\n\n This is tightly bound to the DirState internal representation. It should be\n thought of as a member function, which is only separated out so that we can\n re-write it in pyrex.\n\n :param state: A DirState object.\n :return: None\n :postcondition: The dirblocks will be loaded into the appropriate fields in\n the DirState object.\n ";
+static PyObject *__pyx_pf_6bzrlib_21_dirstate_helpers_pyx__read_dirblocks(PyObject *__pyx_self, PyObject *__pyx_v_state) {
+ PyObject *__pyx_v_text;
+ struct __pyx_obj_6bzrlib_21_dirstate_helpers_pyx_Reader *__pyx_v_reader;
+ PyObject *__pyx_r = NULL;
+ PyObject *__pyx_t_1 = NULL;
+ PyObject *__pyx_t_2 = NULL;
+ PyObject *__pyx_t_3 = NULL;
+ __Pyx_RefNannySetupContext("_read_dirblocks");
+ __pyx_self = __pyx_self;
+ __pyx_v_text = Py_None; __Pyx_INCREF(Py_None);
+ __pyx_v_reader = ((struct __pyx_obj_6bzrlib_21_dirstate_helpers_pyx_Reader *)Py_None); __Pyx_INCREF(Py_None);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":791
+ * the DirState object.
+ * """
+ * state._state_file.seek(state._end_of_header) # <<<<<<<<<<<<<<
+ * text = state._state_file.read()
+ * # TODO: check the crc checksums. crc_measured = zlib.crc32(text)
+ */
+ __pyx_t_1 = PyObject_GetAttr(__pyx_v_state, __pyx_n_s___state_file); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 791; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_1);
+ __pyx_t_2 = PyObject_GetAttr(__pyx_t_1, __pyx_n_s__seek); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 791; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_2);
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+ __pyx_t_1 = PyObject_GetAttr(__pyx_v_state, __pyx_n_s___end_of_header); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 791; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_1);
+ __pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 791; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_3);
+ PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_1);
+ __Pyx_GIVEREF(__pyx_t_1);
+ __pyx_t_1 = 0;
+ __pyx_t_1 = PyObject_Call(__pyx_t_2, __pyx_t_3, NULL); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 791; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_1);
+ __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":792
+ * """
+ * state._state_file.seek(state._end_of_header)
+ * text = state._state_file.read() # <<<<<<<<<<<<<<
+ * # TODO: check the crc checksums. crc_measured = zlib.crc32(text)
+ *
+ */
+ __pyx_t_1 = PyObject_GetAttr(__pyx_v_state, __pyx_n_s___state_file); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 792; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_1);
+ __pyx_t_3 = PyObject_GetAttr(__pyx_t_1, __pyx_n_s__read); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 792; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_3);
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+ __pyx_t_1 = PyObject_Call(__pyx_t_3, ((PyObject *)__pyx_empty_tuple), NULL); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 792; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_1);
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+ __Pyx_DECREF(__pyx_v_text);
+ __pyx_v_text = __pyx_t_1;
+ __pyx_t_1 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":795
+ * # TODO: check the crc checksums. crc_measured = zlib.crc32(text)
+ *
+ * reader = Reader(text, state) # <<<<<<<<<<<<<<
+ *
+ * reader._parse_dirblocks()
+ */
+ __pyx_t_1 = PyTuple_New(2); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 795; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_1);
+ __Pyx_INCREF(__pyx_v_text);
+ PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_v_text);
+ __Pyx_GIVEREF(__pyx_v_text);
+ __Pyx_INCREF(__pyx_v_state);
+ PyTuple_SET_ITEM(__pyx_t_1, 1, __pyx_v_state);
+ __Pyx_GIVEREF(__pyx_v_state);
+ __pyx_t_3 = PyObject_Call(((PyObject *)((PyObject*)__pyx_ptype_6bzrlib_21_dirstate_helpers_pyx_Reader)), __pyx_t_1, NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 795; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_3);
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+ __Pyx_DECREF(((PyObject *)__pyx_v_reader));
+ __pyx_v_reader = ((struct __pyx_obj_6bzrlib_21_dirstate_helpers_pyx_Reader *)__pyx_t_3);
+ __pyx_t_3 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":797
+ * reader = Reader(text, state)
+ *
+ * reader._parse_dirblocks() # <<<<<<<<<<<<<<
+ * state._dirblock_state = DirState.IN_MEMORY_UNMODIFIED
+ *
+ */
+ __pyx_t_3 = PyObject_GetAttr(((PyObject *)__pyx_v_reader), __pyx_n_s___parse_dirblocks); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 797; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_3);
+ __pyx_t_1 = PyObject_Call(__pyx_t_3, ((PyObject *)__pyx_empty_tuple), NULL); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 797; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_1);
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":798
+ *
+ * reader._parse_dirblocks()
+ * state._dirblock_state = DirState.IN_MEMORY_UNMODIFIED # <<<<<<<<<<<<<<
+ *
+ *
+ */
+ __pyx_t_1 = __Pyx_GetName(__pyx_m, __pyx_n_s__DirState); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 798; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_1);
+ __pyx_t_3 = PyObject_GetAttr(__pyx_t_1, __pyx_n_s_18); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 798; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_3);
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+ if (PyObject_SetAttr(__pyx_v_state, __pyx_n_s___dirblock_state, __pyx_t_3) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 798; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+
+ __pyx_r = Py_None; __Pyx_INCREF(Py_None);
+ goto __pyx_L0;
+ __pyx_L1_error:;
+ __Pyx_XDECREF(__pyx_t_1);
+ __Pyx_XDECREF(__pyx_t_2);
+ __Pyx_XDECREF(__pyx_t_3);
+ __Pyx_AddTraceback("bzrlib._dirstate_helpers_pyx._read_dirblocks");
+ __pyx_r = NULL;
+ __pyx_L0:;
+ __Pyx_DECREF(__pyx_v_text);
+ __Pyx_DECREF((PyObject *)__pyx_v_reader);
+ __Pyx_XGIVEREF(__pyx_r);
+ __Pyx_RefNannyFinishContext();
+ return __pyx_r;
+}
+
+/* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":801
+ *
+ *
+ * cdef int minikind_from_mode(int mode): # cannot_raise # <<<<<<<<<<<<<<
+ * # in order of frequency:
+ * if S_ISREG(mode):
+ */
+
+static int __pyx_f_6bzrlib_21_dirstate_helpers_pyx_minikind_from_mode(int __pyx_v_mode) {
+ int __pyx_r;
+ int __pyx_t_1;
+ __Pyx_RefNannySetupContext("minikind_from_mode");
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":803
+ * cdef int minikind_from_mode(int mode): # cannot_raise
+ * # in order of frequency:
+ * if S_ISREG(mode): # <<<<<<<<<<<<<<
+ * return c"f"
+ * if S_ISDIR(mode):
+ */
+ __pyx_t_1 = S_ISREG(__pyx_v_mode);
+ if (__pyx_t_1) {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":804
+ * # in order of frequency:
+ * if S_ISREG(mode):
+ * return c"f" # <<<<<<<<<<<<<<
+ * if S_ISDIR(mode):
+ * return c"d"
+ */
+ __pyx_r = 'f';
+ goto __pyx_L0;
+ goto __pyx_L3;
+ }
+ __pyx_L3:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":805
+ * if S_ISREG(mode):
+ * return c"f"
+ * if S_ISDIR(mode): # <<<<<<<<<<<<<<
+ * return c"d"
+ * if S_ISLNK(mode):
+ */
+ __pyx_t_1 = S_ISDIR(__pyx_v_mode);
+ if (__pyx_t_1) {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":806
+ * return c"f"
+ * if S_ISDIR(mode):
+ * return c"d" # <<<<<<<<<<<<<<
+ * if S_ISLNK(mode):
+ * return c"l"
+ */
+ __pyx_r = 'd';
+ goto __pyx_L0;
+ goto __pyx_L4;
+ }
+ __pyx_L4:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":807
+ * if S_ISDIR(mode):
+ * return c"d"
+ * if S_ISLNK(mode): # <<<<<<<<<<<<<<
+ * return c"l"
+ * return 0
+ */
+ __pyx_t_1 = S_ISLNK(__pyx_v_mode);
+ if (__pyx_t_1) {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":808
+ * return c"d"
+ * if S_ISLNK(mode):
+ * return c"l" # <<<<<<<<<<<<<<
+ * return 0
+ *
+ */
+ __pyx_r = 'l';
+ goto __pyx_L0;
+ goto __pyx_L5;
+ }
+ __pyx_L5:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":809
+ * if S_ISLNK(mode):
+ * return c"l"
+ * return 0 # <<<<<<<<<<<<<<
+ *
+ *
+ */
+ __pyx_r = 0;
+ goto __pyx_L0;
+
+ __pyx_r = 0;
+ __pyx_L0:;
+ __Pyx_RefNannyFinishContext();
+ return __pyx_r;
+}
+
+/* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":815
+ *
+ *
+ * cdef _pack_stat(stat_value): # <<<<<<<<<<<<<<
+ * """return a string representing the stat value's key fields.
+ *
+ */
+
+static PyObject *__pyx_f_6bzrlib_21_dirstate_helpers_pyx__pack_stat(PyObject *__pyx_v_stat_value) {
+ char __pyx_v_result[(6 * 4)];
+ int *__pyx_v_aliased;
+ PyObject *__pyx_v_packed;
+ PyObject *__pyx_r = NULL;
+ PyObject *__pyx_t_1 = NULL;
+ unsigned long __pyx_t_2;
+ PyObject *__pyx_t_3 = NULL;
+ PyObject *__pyx_t_4 = NULL;
+ __Pyx_RefNannySetupContext("_pack_stat");
+ __pyx_v_packed = Py_None; __Pyx_INCREF(Py_None);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":823
+ * cdef char result[6*4] # 6 long ints
+ * cdef int *aliased
+ * aliased = <int *>result # <<<<<<<<<<<<<<
+ * aliased[0] = htonl(PyInt_AsUnsignedLongMask(stat_value.st_size))
+ * # mtime and ctime will often be floats but get converted to PyInt within
+ */
+ __pyx_v_aliased = ((int *)__pyx_v_result);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":824
+ * cdef int *aliased
+ * aliased = <int *>result
+ * aliased[0] = htonl(PyInt_AsUnsignedLongMask(stat_value.st_size)) # <<<<<<<<<<<<<<
+ * # mtime and ctime will often be floats but get converted to PyInt within
+ * aliased[1] = htonl(PyInt_AsUnsignedLongMask(stat_value.st_mtime))
+ */
+ __pyx_t_1 = PyObject_GetAttr(__pyx_v_stat_value, __pyx_n_s__st_size); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 824; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_1);
+ __pyx_t_2 = PyInt_AsUnsignedLongMask(__pyx_t_1); if (unlikely(__pyx_t_2 == -1 && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 824; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+ (__pyx_v_aliased[0]) = htonl(__pyx_t_2);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":826
+ * aliased[0] = htonl(PyInt_AsUnsignedLongMask(stat_value.st_size))
+ * # mtime and ctime will often be floats but get converted to PyInt within
+ * aliased[1] = htonl(PyInt_AsUnsignedLongMask(stat_value.st_mtime)) # <<<<<<<<<<<<<<
+ * aliased[2] = htonl(PyInt_AsUnsignedLongMask(stat_value.st_ctime))
+ * aliased[3] = htonl(PyInt_AsUnsignedLongMask(stat_value.st_dev))
+ */
+ __pyx_t_1 = PyObject_GetAttr(__pyx_v_stat_value, __pyx_n_s__st_mtime); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 826; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_1);
+ __pyx_t_2 = PyInt_AsUnsignedLongMask(__pyx_t_1); if (unlikely(__pyx_t_2 == -1 && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 826; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+ (__pyx_v_aliased[1]) = htonl(__pyx_t_2);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":827
+ * # mtime and ctime will often be floats but get converted to PyInt within
+ * aliased[1] = htonl(PyInt_AsUnsignedLongMask(stat_value.st_mtime))
+ * aliased[2] = htonl(PyInt_AsUnsignedLongMask(stat_value.st_ctime)) # <<<<<<<<<<<<<<
+ * aliased[3] = htonl(PyInt_AsUnsignedLongMask(stat_value.st_dev))
+ * aliased[4] = htonl(PyInt_AsUnsignedLongMask(stat_value.st_ino))
+ */
+ __pyx_t_1 = PyObject_GetAttr(__pyx_v_stat_value, __pyx_n_s__st_ctime); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 827; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_1);
+ __pyx_t_2 = PyInt_AsUnsignedLongMask(__pyx_t_1); if (unlikely(__pyx_t_2 == -1 && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 827; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+ (__pyx_v_aliased[2]) = htonl(__pyx_t_2);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":828
+ * aliased[1] = htonl(PyInt_AsUnsignedLongMask(stat_value.st_mtime))
+ * aliased[2] = htonl(PyInt_AsUnsignedLongMask(stat_value.st_ctime))
+ * aliased[3] = htonl(PyInt_AsUnsignedLongMask(stat_value.st_dev)) # <<<<<<<<<<<<<<
+ * aliased[4] = htonl(PyInt_AsUnsignedLongMask(stat_value.st_ino))
+ * aliased[5] = htonl(PyInt_AsUnsignedLongMask(stat_value.st_mode))
+ */
+ __pyx_t_1 = PyObject_GetAttr(__pyx_v_stat_value, __pyx_n_s__st_dev); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 828; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_1);
+ __pyx_t_2 = PyInt_AsUnsignedLongMask(__pyx_t_1); if (unlikely(__pyx_t_2 == -1 && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 828; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+ (__pyx_v_aliased[3]) = htonl(__pyx_t_2);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":829
+ * aliased[2] = htonl(PyInt_AsUnsignedLongMask(stat_value.st_ctime))
+ * aliased[3] = htonl(PyInt_AsUnsignedLongMask(stat_value.st_dev))
+ * aliased[4] = htonl(PyInt_AsUnsignedLongMask(stat_value.st_ino)) # <<<<<<<<<<<<<<
+ * aliased[5] = htonl(PyInt_AsUnsignedLongMask(stat_value.st_mode))
+ * packed = PyString_FromStringAndSize(result, 6*4)
+ */
+ __pyx_t_1 = PyObject_GetAttr(__pyx_v_stat_value, __pyx_n_s__st_ino); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 829; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_1);
+ __pyx_t_2 = PyInt_AsUnsignedLongMask(__pyx_t_1); if (unlikely(__pyx_t_2 == -1 && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 829; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+ (__pyx_v_aliased[4]) = htonl(__pyx_t_2);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":830
+ * aliased[3] = htonl(PyInt_AsUnsignedLongMask(stat_value.st_dev))
+ * aliased[4] = htonl(PyInt_AsUnsignedLongMask(stat_value.st_ino))
+ * aliased[5] = htonl(PyInt_AsUnsignedLongMask(stat_value.st_mode)) # <<<<<<<<<<<<<<
+ * packed = PyString_FromStringAndSize(result, 6*4)
+ * return _encode(packed)[:-1]
+ */
+ __pyx_t_1 = PyObject_GetAttr(__pyx_v_stat_value, __pyx_n_s__st_mode); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 830; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_1);
+ __pyx_t_2 = PyInt_AsUnsignedLongMask(__pyx_t_1); if (unlikely(__pyx_t_2 == -1 && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 830; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+ (__pyx_v_aliased[5]) = htonl(__pyx_t_2);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":831
+ * aliased[4] = htonl(PyInt_AsUnsignedLongMask(stat_value.st_ino))
+ * aliased[5] = htonl(PyInt_AsUnsignedLongMask(stat_value.st_mode))
+ * packed = PyString_FromStringAndSize(result, 6*4) # <<<<<<<<<<<<<<
+ * return _encode(packed)[:-1]
+ *
+ */
+ __pyx_t_1 = PyString_FromStringAndSize(__pyx_v_result, 24); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 831; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_1);
+ __Pyx_DECREF(__pyx_v_packed);
+ __pyx_v_packed = __pyx_t_1;
+ __pyx_t_1 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":832
+ * aliased[5] = htonl(PyInt_AsUnsignedLongMask(stat_value.st_mode))
+ * packed = PyString_FromStringAndSize(result, 6*4)
+ * return _encode(packed)[:-1] # <<<<<<<<<<<<<<
+ *
+ *
+ */
+ __Pyx_XDECREF(__pyx_r);
+ __pyx_t_1 = __Pyx_GetName(__pyx_m, __pyx_n_s___encode); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 832; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_1);
+ __pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 832; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_3);
+ __Pyx_INCREF(__pyx_v_packed);
+ PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_v_packed);
+ __Pyx_GIVEREF(__pyx_v_packed);
+ __pyx_t_4 = PyObject_Call(__pyx_t_1, __pyx_t_3, NULL); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 832; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_4);
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+ __pyx_t_3 = PySequence_GetSlice(__pyx_t_4, 0, -1); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 832; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_3);
+ __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
+ __pyx_r = __pyx_t_3;
+ __pyx_t_3 = 0;
+ goto __pyx_L0;
+
+ __pyx_r = Py_None; __Pyx_INCREF(Py_None);
+ goto __pyx_L0;
+ __pyx_L1_error:;
+ __Pyx_XDECREF(__pyx_t_1);
+ __Pyx_XDECREF(__pyx_t_3);
+ __Pyx_XDECREF(__pyx_t_4);
+ __Pyx_AddTraceback("bzrlib._dirstate_helpers_pyx._pack_stat");
+ __pyx_r = 0;
+ __pyx_L0:;
+ __Pyx_DECREF(__pyx_v_packed);
+ __Pyx_XGIVEREF(__pyx_r);
+ __Pyx_RefNannyFinishContext();
+ return __pyx_r;
+}
+
+/* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":835
+ *
+ *
+ * def pack_stat(stat_value): # <<<<<<<<<<<<<<
+ * """Convert stat value into a packed representation quickly with pyrex"""
+ * return _pack_stat(stat_value)
+ */
+
+static PyObject *__pyx_pf_6bzrlib_21_dirstate_helpers_pyx_pack_stat(PyObject *__pyx_self, PyObject *__pyx_v_stat_value); /*proto*/
+static char __pyx_doc_6bzrlib_21_dirstate_helpers_pyx_pack_stat[] = "Convert stat value into a packed representation quickly with pyrex";
+static PyObject *__pyx_pf_6bzrlib_21_dirstate_helpers_pyx_pack_stat(PyObject *__pyx_self, PyObject *__pyx_v_stat_value) {
+ PyObject *__pyx_r = NULL;
+ PyObject *__pyx_t_1 = NULL;
+ __Pyx_RefNannySetupContext("pack_stat");
+ __pyx_self = __pyx_self;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":837
+ * def pack_stat(stat_value):
+ * """Convert stat value into a packed representation quickly with pyrex"""
+ * return _pack_stat(stat_value) # <<<<<<<<<<<<<<
+ *
+ *
+ */
+ __Pyx_XDECREF(__pyx_r);
+ __pyx_t_1 = __pyx_f_6bzrlib_21_dirstate_helpers_pyx__pack_stat(__pyx_v_stat_value); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 837; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_1);
+ __pyx_r = __pyx_t_1;
+ __pyx_t_1 = 0;
+ goto __pyx_L0;
+
+ __pyx_r = Py_None; __Pyx_INCREF(Py_None);
+ goto __pyx_L0;
+ __pyx_L1_error:;
+ __Pyx_XDECREF(__pyx_t_1);
+ __Pyx_AddTraceback("bzrlib._dirstate_helpers_pyx.pack_stat");
+ __pyx_r = NULL;
+ __pyx_L0:;
+ __Pyx_XGIVEREF(__pyx_r);
+ __Pyx_RefNannyFinishContext();
+ return __pyx_r;
+}
+
+/* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":840
+ *
+ *
+ * def update_entry(self, entry, abspath, stat_value): # <<<<<<<<<<<<<<
+ * """Update the entry based on what is actually on disk.
+ *
+ */
+
+static PyObject *__pyx_pf_6bzrlib_21_dirstate_helpers_pyx_update_entry(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
+static char __pyx_doc_6bzrlib_21_dirstate_helpers_pyx_update_entry[] = "Update the entry based on what is actually on disk.\n\n This function only calculates the sha if it needs to - if the entry is\n uncachable, or clearly different to the first parent's entry, no sha\n is calculated, and None is returned.\n\n :param entry: This is the dirblock entry for the file in question.\n :param abspath: The path on disk for this file.\n :param stat_value: (optional) if we already have done a stat on the\n file, re-use it.\n :return: None, or The sha1 hexdigest of the file (40 bytes) or link\n target of a symlink.\n ";
+static PyObject *__pyx_pf_6bzrlib_21_dirstate_helpers_pyx_update_entry(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
+ PyObject *__pyx_v_self = 0;
+ PyObject *__pyx_v_entry = 0;
+ PyObject *__pyx_v_abspath = 0;
+ PyObject *__pyx_v_stat_value = 0;
+ PyObject *__pyx_r = NULL;
+ PyObject *__pyx_t_1 = NULL;
+ static PyObject **__pyx_pyargnames[] = {&__pyx_n_s__self,&__pyx_n_s__entry,&__pyx_n_s__abspath,&__pyx_n_s__stat_value,0};
+ __Pyx_RefNannySetupContext("update_entry");
+ __pyx_self = __pyx_self;
+ if (unlikely(__pyx_kwds)) {
+ Py_ssize_t kw_args = PyDict_Size(__pyx_kwds);
+ PyObject* values[4] = {0,0,0,0};
+ switch (PyTuple_GET_SIZE(__pyx_args)) {
+ case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3);
+ case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2);
+ case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
+ case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
+ case 0: break;
+ default: goto __pyx_L5_argtuple_error;
+ }
+ switch (PyTuple_GET_SIZE(__pyx_args)) {
+ case 0:
+ values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__self);
+ if (likely(values[0])) kw_args--;
+ else goto __pyx_L5_argtuple_error;
+ case 1:
+ values[1] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__entry);
+ if (likely(values[1])) kw_args--;
+ else {
+ __Pyx_RaiseArgtupleInvalid("update_entry", 1, 4, 4, 1); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 840; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
+ }
+ case 2:
+ values[2] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__abspath);
+ if (likely(values[2])) kw_args--;
+ else {
+ __Pyx_RaiseArgtupleInvalid("update_entry", 1, 4, 4, 2); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 840; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
+ }
+ case 3:
+ values[3] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__stat_value);
+ if (likely(values[3])) kw_args--;
+ else {
+ __Pyx_RaiseArgtupleInvalid("update_entry", 1, 4, 4, 3); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 840; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
+ }
+ }
+ if (unlikely(kw_args > 0)) {
+ if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, PyTuple_GET_SIZE(__pyx_args), "update_entry") < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 840; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
+ }
+ __pyx_v_self = values[0];
+ __pyx_v_entry = values[1];
+ __pyx_v_abspath = values[2];
+ __pyx_v_stat_value = values[3];
+ } else if (PyTuple_GET_SIZE(__pyx_args) != 4) {
+ goto __pyx_L5_argtuple_error;
+ } else {
+ __pyx_v_self = PyTuple_GET_ITEM(__pyx_args, 0);
+ __pyx_v_entry = PyTuple_GET_ITEM(__pyx_args, 1);
+ __pyx_v_abspath = PyTuple_GET_ITEM(__pyx_args, 2);
+ __pyx_v_stat_value = PyTuple_GET_ITEM(__pyx_args, 3);
+ }
+ goto __pyx_L4_argument_unpacking_done;
+ __pyx_L5_argtuple_error:;
+ __Pyx_RaiseArgtupleInvalid("update_entry", 1, 4, 4, PyTuple_GET_SIZE(__pyx_args)); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 840; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
+ __pyx_L3_error:;
+ __Pyx_AddTraceback("bzrlib._dirstate_helpers_pyx.update_entry");
+ __Pyx_RefNannyFinishContext();
+ return NULL;
+ __pyx_L4_argument_unpacking_done:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":854
+ * target of a symlink.
+ * """
+ * return _update_entry(self, entry, abspath, stat_value) # <<<<<<<<<<<<<<
+ *
+ *
+ */
+ __Pyx_XDECREF(__pyx_r);
+ __pyx_t_1 = __pyx_f_6bzrlib_21_dirstate_helpers_pyx__update_entry(__pyx_v_self, __pyx_v_entry, __pyx_v_abspath, __pyx_v_stat_value); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 854; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_1);
+ __pyx_r = __pyx_t_1;
+ __pyx_t_1 = 0;
+ goto __pyx_L0;
+
+ __pyx_r = Py_None; __Pyx_INCREF(Py_None);
+ goto __pyx_L0;
+ __pyx_L1_error:;
+ __Pyx_XDECREF(__pyx_t_1);
+ __Pyx_AddTraceback("bzrlib._dirstate_helpers_pyx.update_entry");
+ __pyx_r = NULL;
+ __pyx_L0:;
+ __Pyx_XGIVEREF(__pyx_r);
+ __Pyx_RefNannyFinishContext();
+ return __pyx_r;
+}
+
+/* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":857
+ *
+ *
+ * cdef _update_entry(self, entry, abspath, stat_value): # <<<<<<<<<<<<<<
+ * """Update the entry based on what is actually on disk.
+ *
+ */
+
+static PyObject *__pyx_f_6bzrlib_21_dirstate_helpers_pyx__update_entry(PyObject *__pyx_v_self, PyObject *__pyx_v_entry, PyObject *__pyx_v_abspath, PyObject *__pyx_v_stat_value) {
+ int __pyx_v_minikind;
+ int __pyx_v_saved_minikind;
+ void *__pyx_v_details;
+ int __pyx_v_worth_saving;
+ PyObject *__pyx_v_packed_stat;
+ PyObject *__pyx_v_saved_link_or_sha1;
+ PyObject *__pyx_v_saved_file_size;
+ PyObject *__pyx_v_saved_executable;
+ PyObject *__pyx_v_saved_packed_stat;
+ PyObject *__pyx_v_link_or_sha1;
+ PyObject *__pyx_v_executable;
+ PyObject *__pyx_v_block_index;
+ PyObject *__pyx_v_entry_index;
+ PyObject *__pyx_v_dir_present;
+ PyObject *__pyx_v_file_present;
+ PyObject *__pyx_r = NULL;
+ PyObject *__pyx_t_1 = NULL;
+ int __pyx_t_2;
+ int __pyx_t_3;
+ int __pyx_t_4;
+ int __pyx_t_5;
+ PyObject *__pyx_t_6 = NULL;
+ PyObject *__pyx_t_7 = NULL;
+ Py_ssize_t __pyx_t_8;
+ int __pyx_t_9;
+ int __pyx_t_10;
+ PyObject *__pyx_t_11 = NULL;
+ PyObject *__pyx_t_12 = NULL;
+ PyObject *__pyx_t_13 = NULL;
+ __Pyx_RefNannySetupContext("_update_entry");
+ __pyx_v_packed_stat = Py_None; __Pyx_INCREF(Py_None);
+ __pyx_v_saved_link_or_sha1 = Py_None; __Pyx_INCREF(Py_None);
+ __pyx_v_saved_file_size = Py_None; __Pyx_INCREF(Py_None);
+ __pyx_v_saved_executable = Py_None; __Pyx_INCREF(Py_None);
+ __pyx_v_saved_packed_stat = Py_None; __Pyx_INCREF(Py_None);
+ __pyx_v_link_or_sha1 = Py_None; __Pyx_INCREF(Py_None);
+ __pyx_v_executable = Py_None; __Pyx_INCREF(Py_None);
+ __pyx_v_block_index = Py_None; __Pyx_INCREF(Py_None);
+ __pyx_v_entry_index = Py_None; __Pyx_INCREF(Py_None);
+ __pyx_v_dir_present = Py_None; __Pyx_INCREF(Py_None);
+ __pyx_v_file_present = Py_None; __Pyx_INCREF(Py_None);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":876
+ * cdef void * details
+ * cdef int worth_saving
+ * minikind = minikind_from_mode(stat_value.st_mode) # <<<<<<<<<<<<<<
+ * if 0 == minikind:
+ * return None
+ */
+ __pyx_t_1 = PyObject_GetAttr(__pyx_v_stat_value, __pyx_n_s__st_mode); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 876; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_1);
+ __pyx_t_2 = __Pyx_PyInt_AsInt(__pyx_t_1); if (unlikely((__pyx_t_2 == (int)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 876; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+ __pyx_v_minikind = __pyx_f_6bzrlib_21_dirstate_helpers_pyx_minikind_from_mode(__pyx_t_2);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":877
+ * cdef int worth_saving
+ * minikind = minikind_from_mode(stat_value.st_mode)
+ * if 0 == minikind: # <<<<<<<<<<<<<<
+ * return None
+ * packed_stat = _pack_stat(stat_value)
+ */
+ __pyx_t_3 = (0 == __pyx_v_minikind);
+ if (__pyx_t_3) {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":878
+ * minikind = minikind_from_mode(stat_value.st_mode)
+ * if 0 == minikind:
+ * return None # <<<<<<<<<<<<<<
+ * packed_stat = _pack_stat(stat_value)
+ * details = PyList_GetItem_void_void(PyTuple_GetItem_void_void(<void *>entry, 1), 0)
+ */
+ __Pyx_XDECREF(__pyx_r);
+ __Pyx_INCREF(Py_None);
+ __pyx_r = Py_None;
+ goto __pyx_L0;
+ goto __pyx_L3;
+ }
+ __pyx_L3:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":879
+ * if 0 == minikind:
+ * return None
+ * packed_stat = _pack_stat(stat_value) # <<<<<<<<<<<<<<
+ * details = PyList_GetItem_void_void(PyTuple_GetItem_void_void(<void *>entry, 1), 0)
+ * saved_minikind = PyString_AsString_obj(<PyObject *>PyTuple_GetItem_void_void(details, 0))[0]
+ */
+ __pyx_t_1 = __pyx_f_6bzrlib_21_dirstate_helpers_pyx__pack_stat(__pyx_v_stat_value); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 879; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_1);
+ __Pyx_DECREF(__pyx_v_packed_stat);
+ __pyx_v_packed_stat = __pyx_t_1;
+ __pyx_t_1 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":880
+ * return None
+ * packed_stat = _pack_stat(stat_value)
+ * details = PyList_GetItem_void_void(PyTuple_GetItem_void_void(<void *>entry, 1), 0) # <<<<<<<<<<<<<<
+ * saved_minikind = PyString_AsString_obj(<PyObject *>PyTuple_GetItem_void_void(details, 0))[0]
+ * if minikind == c'd' and saved_minikind == c't':
+ */
+ __pyx_v_details = PyList_GET_ITEM(PyTuple_GET_ITEM(((void *)__pyx_v_entry), 1), 0);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":881
+ * packed_stat = _pack_stat(stat_value)
+ * details = PyList_GetItem_void_void(PyTuple_GetItem_void_void(<void *>entry, 1), 0)
+ * saved_minikind = PyString_AsString_obj(<PyObject *>PyTuple_GetItem_void_void(details, 0))[0] # <<<<<<<<<<<<<<
+ * if minikind == c'd' and saved_minikind == c't':
+ * minikind = c't'
+ */
+ __pyx_v_saved_minikind = (PyString_AsString(((PyObject *)PyTuple_GET_ITEM(__pyx_v_details, 0)))[0]);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":882
+ * details = PyList_GetItem_void_void(PyTuple_GetItem_void_void(<void *>entry, 1), 0)
+ * saved_minikind = PyString_AsString_obj(<PyObject *>PyTuple_GetItem_void_void(details, 0))[0]
+ * if minikind == c'd' and saved_minikind == c't': # <<<<<<<<<<<<<<
+ * minikind = c't'
+ * saved_link_or_sha1 = PyTuple_GetItem_void_object(details, 1)
+ */
+ __pyx_t_3 = (__pyx_v_minikind == 'd');
+ if (__pyx_t_3) {
+ __pyx_t_4 = (__pyx_v_saved_minikind == 't');
+ __pyx_t_5 = __pyx_t_4;
+ } else {
+ __pyx_t_5 = __pyx_t_3;
+ }
+ if (__pyx_t_5) {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":883
+ * saved_minikind = PyString_AsString_obj(<PyObject *>PyTuple_GetItem_void_void(details, 0))[0]
+ * if minikind == c'd' and saved_minikind == c't':
+ * minikind = c't' # <<<<<<<<<<<<<<
+ * saved_link_or_sha1 = PyTuple_GetItem_void_object(details, 1)
+ * saved_file_size = PyTuple_GetItem_void_object(details, 2)
+ */
+ __pyx_v_minikind = 't';
+ goto __pyx_L4;
+ }
+ __pyx_L4:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":884
+ * if minikind == c'd' and saved_minikind == c't':
+ * minikind = c't'
+ * saved_link_or_sha1 = PyTuple_GetItem_void_object(details, 1) # <<<<<<<<<<<<<<
+ * saved_file_size = PyTuple_GetItem_void_object(details, 2)
+ * saved_executable = PyTuple_GetItem_void_object(details, 3)
+ */
+ __pyx_t_1 = PyTuple_GET_ITEM(__pyx_v_details, 1); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 884; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_1);
+ __Pyx_DECREF(__pyx_v_saved_link_or_sha1);
+ __pyx_v_saved_link_or_sha1 = __pyx_t_1;
+ __pyx_t_1 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":885
+ * minikind = c't'
+ * saved_link_or_sha1 = PyTuple_GetItem_void_object(details, 1)
+ * saved_file_size = PyTuple_GetItem_void_object(details, 2) # <<<<<<<<<<<<<<
+ * saved_executable = PyTuple_GetItem_void_object(details, 3)
+ * saved_packed_stat = PyTuple_GetItem_void_object(details, 4)
+ */
+ __pyx_t_1 = PyTuple_GET_ITEM(__pyx_v_details, 2); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 885; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_1);
+ __Pyx_DECREF(__pyx_v_saved_file_size);
+ __pyx_v_saved_file_size = __pyx_t_1;
+ __pyx_t_1 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":886
+ * saved_link_or_sha1 = PyTuple_GetItem_void_object(details, 1)
+ * saved_file_size = PyTuple_GetItem_void_object(details, 2)
+ * saved_executable = PyTuple_GetItem_void_object(details, 3) # <<<<<<<<<<<<<<
+ * saved_packed_stat = PyTuple_GetItem_void_object(details, 4)
+ * # Deal with pyrex decrefing the objects
+ */
+ __pyx_t_1 = PyTuple_GET_ITEM(__pyx_v_details, 3); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 886; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_1);
+ __Pyx_DECREF(__pyx_v_saved_executable);
+ __pyx_v_saved_executable = __pyx_t_1;
+ __pyx_t_1 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":887
+ * saved_file_size = PyTuple_GetItem_void_object(details, 2)
+ * saved_executable = PyTuple_GetItem_void_object(details, 3)
+ * saved_packed_stat = PyTuple_GetItem_void_object(details, 4) # <<<<<<<<<<<<<<
+ * # Deal with pyrex decrefing the objects
+ * Py_INCREF(saved_link_or_sha1)
+ */
+ __pyx_t_1 = PyTuple_GET_ITEM(__pyx_v_details, 4); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 887; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_1);
+ __Pyx_DECREF(__pyx_v_saved_packed_stat);
+ __pyx_v_saved_packed_stat = __pyx_t_1;
+ __pyx_t_1 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":889
+ * saved_packed_stat = PyTuple_GetItem_void_object(details, 4)
+ * # Deal with pyrex decrefing the objects
+ * Py_INCREF(saved_link_or_sha1) # <<<<<<<<<<<<<<
+ * Py_INCREF(saved_file_size)
+ * Py_INCREF(saved_executable)
+ */
+ Py_INCREF(__pyx_v_saved_link_or_sha1);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":890
+ * # Deal with pyrex decrefing the objects
+ * Py_INCREF(saved_link_or_sha1)
+ * Py_INCREF(saved_file_size) # <<<<<<<<<<<<<<
+ * Py_INCREF(saved_executable)
+ * Py_INCREF(saved_packed_stat)
+ */
+ Py_INCREF(__pyx_v_saved_file_size);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":891
+ * Py_INCREF(saved_link_or_sha1)
+ * Py_INCREF(saved_file_size)
+ * Py_INCREF(saved_executable) # <<<<<<<<<<<<<<
+ * Py_INCREF(saved_packed_stat)
+ * #(saved_minikind, saved_link_or_sha1, saved_file_size,
+ */
+ Py_INCREF(__pyx_v_saved_executable);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":892
+ * Py_INCREF(saved_file_size)
+ * Py_INCREF(saved_executable)
+ * Py_INCREF(saved_packed_stat) # <<<<<<<<<<<<<<
+ * #(saved_minikind, saved_link_or_sha1, saved_file_size,
+ * # saved_executable, saved_packed_stat) = entry[1][0]
+ */
+ Py_INCREF(__pyx_v_saved_packed_stat);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":897
+ *
+ * if (minikind == saved_minikind
+ * and packed_stat == saved_packed_stat): # <<<<<<<<<<<<<<
+ * # The stat hasn't changed since we saved, so we can re-use the
+ * # saved sha hash.
+ */
+ __pyx_t_5 = (__pyx_v_minikind == __pyx_v_saved_minikind);
+ if (__pyx_t_5) {
+ __pyx_t_1 = PyObject_RichCompare(__pyx_v_packed_stat, __pyx_v_saved_packed_stat, Py_EQ); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 897; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_1);
+ __pyx_t_3 = __Pyx_PyObject_IsTrue(__pyx_t_1); if (unlikely(__pyx_t_3 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 897; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+ __pyx_t_4 = __pyx_t_3;
+ } else {
+ __pyx_t_4 = __pyx_t_5;
+ }
+ if (__pyx_t_4) {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":900
+ * # The stat hasn't changed since we saved, so we can re-use the
+ * # saved sha hash.
+ * if minikind == c'd': # <<<<<<<<<<<<<<
+ * return None
+ *
+ */
+ __pyx_t_4 = (__pyx_v_minikind == 'd');
+ if (__pyx_t_4) {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":901
+ * # saved sha hash.
+ * if minikind == c'd':
+ * return None # <<<<<<<<<<<<<<
+ *
+ * # size should also be in packed_stat
+ */
+ __Pyx_XDECREF(__pyx_r);
+ __Pyx_INCREF(Py_None);
+ __pyx_r = Py_None;
+ goto __pyx_L0;
+ goto __pyx_L6;
+ }
+ __pyx_L6:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":904
+ *
+ * # size should also be in packed_stat
+ * if saved_file_size == stat_value.st_size: # <<<<<<<<<<<<<<
+ * return saved_link_or_sha1
+ *
+ */
+ __pyx_t_1 = PyObject_GetAttr(__pyx_v_stat_value, __pyx_n_s__st_size); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 904; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_1);
+ __pyx_t_6 = PyObject_RichCompare(__pyx_v_saved_file_size, __pyx_t_1, Py_EQ); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 904; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_6);
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+ __pyx_t_4 = __Pyx_PyObject_IsTrue(__pyx_t_6); if (unlikely(__pyx_t_4 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 904; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
+ if (__pyx_t_4) {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":905
+ * # size should also be in packed_stat
+ * if saved_file_size == stat_value.st_size:
+ * return saved_link_or_sha1 # <<<<<<<<<<<<<<
+ *
+ * # If we have gotten this far, that means that we need to actually
+ */
+ __Pyx_XDECREF(__pyx_r);
+ __Pyx_INCREF(__pyx_v_saved_link_or_sha1);
+ __pyx_r = __pyx_v_saved_link_or_sha1;
+ goto __pyx_L0;
+ goto __pyx_L7;
+ }
+ __pyx_L7:;
+ goto __pyx_L5;
+ }
+ __pyx_L5:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":909
+ * # If we have gotten this far, that means that we need to actually
+ * # process this entry.
+ * link_or_sha1 = None # <<<<<<<<<<<<<<
+ * worth_saving = 1
+ * if minikind == c'f':
+ */
+ __Pyx_INCREF(Py_None);
+ __Pyx_DECREF(__pyx_v_link_or_sha1);
+ __pyx_v_link_or_sha1 = Py_None;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":910
+ * # process this entry.
+ * link_or_sha1 = None
+ * worth_saving = 1 # <<<<<<<<<<<<<<
+ * if minikind == c'f':
+ * executable = self._is_executable(stat_value.st_mode,
+ */
+ __pyx_v_worth_saving = 1;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":911
+ * link_or_sha1 = None
+ * worth_saving = 1
+ * if minikind == c'f': # <<<<<<<<<<<<<<
+ * executable = self._is_executable(stat_value.st_mode,
+ * saved_executable)
+ */
+ switch (__pyx_v_minikind) {
+ case 'f':
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":912
+ * worth_saving = 1
+ * if minikind == c'f':
+ * executable = self._is_executable(stat_value.st_mode, # <<<<<<<<<<<<<<
+ * saved_executable)
+ * if self._cutoff_time is None:
+ */
+ __pyx_t_6 = PyObject_GetAttr(__pyx_v_self, __pyx_n_s___is_executable); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 912; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_6);
+ __pyx_t_1 = PyObject_GetAttr(__pyx_v_stat_value, __pyx_n_s__st_mode); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 912; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_1);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":913
+ * if minikind == c'f':
+ * executable = self._is_executable(stat_value.st_mode,
+ * saved_executable) # <<<<<<<<<<<<<<
+ * if self._cutoff_time is None:
+ * self._sha_cutoff_time()
+ */
+ __pyx_t_7 = PyTuple_New(2); if (unlikely(!__pyx_t_7)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 912; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_7);
+ PyTuple_SET_ITEM(__pyx_t_7, 0, __pyx_t_1);
+ __Pyx_GIVEREF(__pyx_t_1);
+ __Pyx_INCREF(__pyx_v_saved_executable);
+ PyTuple_SET_ITEM(__pyx_t_7, 1, __pyx_v_saved_executable);
+ __Pyx_GIVEREF(__pyx_v_saved_executable);
+ __pyx_t_1 = 0;
+ __pyx_t_1 = PyObject_Call(__pyx_t_6, __pyx_t_7, NULL); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 912; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_1);
+ __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
+ __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
+ __Pyx_DECREF(__pyx_v_executable);
+ __pyx_v_executable = __pyx_t_1;
+ __pyx_t_1 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":914
+ * executable = self._is_executable(stat_value.st_mode,
+ * saved_executable)
+ * if self._cutoff_time is None: # <<<<<<<<<<<<<<
+ * self._sha_cutoff_time()
+ * if (stat_value.st_mtime < self._cutoff_time
+ */
+ __pyx_t_1 = PyObject_GetAttr(__pyx_v_self, __pyx_n_s___cutoff_time); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 914; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_1);
+ __pyx_t_4 = (__pyx_t_1 == Py_None);
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+ if (__pyx_t_4) {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":915
+ * saved_executable)
+ * if self._cutoff_time is None:
+ * self._sha_cutoff_time() # <<<<<<<<<<<<<<
+ * if (stat_value.st_mtime < self._cutoff_time
+ * and stat_value.st_ctime < self._cutoff_time
+ */
+ __pyx_t_1 = PyObject_GetAttr(__pyx_v_self, __pyx_n_s___sha_cutoff_time); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 915; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_1);
+ __pyx_t_7 = PyObject_Call(__pyx_t_1, ((PyObject *)__pyx_empty_tuple), NULL); if (unlikely(!__pyx_t_7)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 915; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_7);
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+ __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
+ goto __pyx_L8;
+ }
+ __pyx_L8:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":916
+ * if self._cutoff_time is None:
+ * self._sha_cutoff_time()
+ * if (stat_value.st_mtime < self._cutoff_time # <<<<<<<<<<<<<<
+ * and stat_value.st_ctime < self._cutoff_time
+ * and len(entry[1]) > 1
+ */
+ __pyx_t_7 = PyObject_GetAttr(__pyx_v_stat_value, __pyx_n_s__st_mtime); if (unlikely(!__pyx_t_7)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 916; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_7);
+ __pyx_t_1 = PyObject_GetAttr(__pyx_v_self, __pyx_n_s___cutoff_time); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 916; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_1);
+ __pyx_t_6 = PyObject_RichCompare(__pyx_t_7, __pyx_t_1, Py_LT); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 916; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_6);
+ __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+ __pyx_t_4 = __Pyx_PyObject_IsTrue(__pyx_t_6); if (unlikely(__pyx_t_4 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 916; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
+ if (__pyx_t_4) {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":917
+ * self._sha_cutoff_time()
+ * if (stat_value.st_mtime < self._cutoff_time
+ * and stat_value.st_ctime < self._cutoff_time # <<<<<<<<<<<<<<
+ * and len(entry[1]) > 1
+ * and entry[1][1][0] != 'a'):
+ */
+ __pyx_t_6 = PyObject_GetAttr(__pyx_v_stat_value, __pyx_n_s__st_ctime); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 917; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_6);
+ __pyx_t_1 = PyObject_GetAttr(__pyx_v_self, __pyx_n_s___cutoff_time); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 917; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_1);
+ __pyx_t_7 = PyObject_RichCompare(__pyx_t_6, __pyx_t_1, Py_LT); if (unlikely(!__pyx_t_7)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 917; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_7);
+ __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+ __pyx_t_5 = __Pyx_PyObject_IsTrue(__pyx_t_7); if (unlikely(__pyx_t_5 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 917; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
+ if (__pyx_t_5) {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":918
+ * if (stat_value.st_mtime < self._cutoff_time
+ * and stat_value.st_ctime < self._cutoff_time
+ * and len(entry[1]) > 1 # <<<<<<<<<<<<<<
+ * and entry[1][1][0] != 'a'):
+ * # Could check for size changes for further optimised
+ */
+ __pyx_t_7 = __Pyx_GetItemInt(__pyx_v_entry, 1, sizeof(long), PyInt_FromLong); if (!__pyx_t_7) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 918; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_7);
+ __pyx_t_8 = PyObject_Length(__pyx_t_7); if (unlikely(__pyx_t_8 == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 918; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
+ __pyx_t_3 = (__pyx_t_8 > 1);
+ if (__pyx_t_3) {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":919
+ * and stat_value.st_ctime < self._cutoff_time
+ * and len(entry[1]) > 1
+ * and entry[1][1][0] != 'a'): # <<<<<<<<<<<<<<
+ * # Could check for size changes for further optimised
+ * # avoidance of sha1's. However the most prominent case of
+ */
+ __pyx_t_7 = __Pyx_GetItemInt(__pyx_v_entry, 1, sizeof(long), PyInt_FromLong); if (!__pyx_t_7) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 919; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_7);
+ __pyx_t_1 = __Pyx_GetItemInt(__pyx_t_7, 1, sizeof(long), PyInt_FromLong); if (!__pyx_t_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 919; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_1);
+ __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
+ __pyx_t_7 = __Pyx_GetItemInt(__pyx_t_1, 0, sizeof(long), PyInt_FromLong); if (!__pyx_t_7) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 919; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_7);
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+ __pyx_t_1 = PyObject_RichCompare(__pyx_t_7, ((PyObject *)__pyx_n_s__a), Py_NE); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 919; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_1);
+ __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
+ __pyx_t_9 = __Pyx_PyObject_IsTrue(__pyx_t_1); if (unlikely(__pyx_t_9 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 919; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+ __pyx_t_10 = __pyx_t_9;
+ } else {
+ __pyx_t_10 = __pyx_t_3;
+ }
+ __pyx_t_3 = __pyx_t_10;
+ } else {
+ __pyx_t_3 = __pyx_t_5;
+ }
+ __pyx_t_5 = __pyx_t_3;
+ } else {
+ __pyx_t_5 = __pyx_t_4;
+ }
+ if (__pyx_t_5) {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":923
+ * # avoidance of sha1's. However the most prominent case of
+ * # over-shaing is during initial add, which this catches.
+ * link_or_sha1 = self._sha1_file(abspath) # <<<<<<<<<<<<<<
+ * entry[1][0] = ('f', link_or_sha1, stat_value.st_size,
+ * executable, packed_stat)
+ */
+ __pyx_t_1 = PyObject_GetAttr(__pyx_v_self, __pyx_n_s___sha1_file); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 923; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_1);
+ __pyx_t_7 = PyTuple_New(1); if (unlikely(!__pyx_t_7)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 923; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_7);
+ __Pyx_INCREF(__pyx_v_abspath);
+ PyTuple_SET_ITEM(__pyx_t_7, 0, __pyx_v_abspath);
+ __Pyx_GIVEREF(__pyx_v_abspath);
+ __pyx_t_6 = PyObject_Call(__pyx_t_1, __pyx_t_7, NULL); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 923; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_6);
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+ __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
+ __Pyx_DECREF(__pyx_v_link_or_sha1);
+ __pyx_v_link_or_sha1 = __pyx_t_6;
+ __pyx_t_6 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":924
+ * # over-shaing is during initial add, which this catches.
+ * link_or_sha1 = self._sha1_file(abspath)
+ * entry[1][0] = ('f', link_or_sha1, stat_value.st_size, # <<<<<<<<<<<<<<
+ * executable, packed_stat)
+ * else:
+ */
+ __pyx_t_6 = PyObject_GetAttr(__pyx_v_stat_value, __pyx_n_s__st_size); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 924; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_6);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":925
+ * link_or_sha1 = self._sha1_file(abspath)
+ * entry[1][0] = ('f', link_or_sha1, stat_value.st_size,
+ * executable, packed_stat) # <<<<<<<<<<<<<<
+ * else:
+ * # This file is not worth caching the sha1. Either it is too new, or
+ */
+ __pyx_t_7 = PyTuple_New(5); if (unlikely(!__pyx_t_7)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 924; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_7);
+ __Pyx_INCREF(((PyObject *)__pyx_n_s__f));
+ PyTuple_SET_ITEM(__pyx_t_7, 0, ((PyObject *)__pyx_n_s__f));
+ __Pyx_GIVEREF(((PyObject *)__pyx_n_s__f));
+ __Pyx_INCREF(__pyx_v_link_or_sha1);
+ PyTuple_SET_ITEM(__pyx_t_7, 1, __pyx_v_link_or_sha1);
+ __Pyx_GIVEREF(__pyx_v_link_or_sha1);
+ PyTuple_SET_ITEM(__pyx_t_7, 2, __pyx_t_6);
+ __Pyx_GIVEREF(__pyx_t_6);
+ __Pyx_INCREF(__pyx_v_executable);
+ PyTuple_SET_ITEM(__pyx_t_7, 3, __pyx_v_executable);
+ __Pyx_GIVEREF(__pyx_v_executable);
+ __Pyx_INCREF(__pyx_v_packed_stat);
+ PyTuple_SET_ITEM(__pyx_t_7, 4, __pyx_v_packed_stat);
+ __Pyx_GIVEREF(__pyx_v_packed_stat);
+ __pyx_t_6 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":924
+ * # over-shaing is during initial add, which this catches.
+ * link_or_sha1 = self._sha1_file(abspath)
+ * entry[1][0] = ('f', link_or_sha1, stat_value.st_size, # <<<<<<<<<<<<<<
+ * executable, packed_stat)
+ * else:
+ */
+ __pyx_t_6 = __Pyx_GetItemInt(__pyx_v_entry, 1, sizeof(long), PyInt_FromLong); if (!__pyx_t_6) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 924; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_6);
+ if (__Pyx_SetItemInt(__pyx_t_6, 0, __pyx_t_7, sizeof(long), PyInt_FromLong) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 924; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
+ __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
+ goto __pyx_L9;
+ }
+ /*else*/ {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":932
+ * # *not* set the IN_MEMORY_MODIFIED flag. (But we'll save the
+ * # updated values if there is *other* data worth saving.)
+ * entry[1][0] = ('f', '', stat_value.st_size, executable, # <<<<<<<<<<<<<<
+ * DirState.NULLSTAT)
+ * worth_saving = 0
+ */
+ __pyx_t_7 = PyObject_GetAttr(__pyx_v_stat_value, __pyx_n_s__st_size); if (unlikely(!__pyx_t_7)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 932; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_7);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":933
+ * # updated values if there is *other* data worth saving.)
+ * entry[1][0] = ('f', '', stat_value.st_size, executable,
+ * DirState.NULLSTAT) # <<<<<<<<<<<<<<
+ * worth_saving = 0
+ * elif minikind == c'd':
+ */
+ __pyx_t_6 = __Pyx_GetName(__pyx_m, __pyx_n_s__DirState); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 933; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_6);
+ __pyx_t_1 = PyObject_GetAttr(__pyx_t_6, __pyx_n_s__NULLSTAT); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 933; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_1);
+ __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
+ __pyx_t_6 = PyTuple_New(5); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 932; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_6);
+ __Pyx_INCREF(((PyObject *)__pyx_n_s__f));
+ PyTuple_SET_ITEM(__pyx_t_6, 0, ((PyObject *)__pyx_n_s__f));
+ __Pyx_GIVEREF(((PyObject *)__pyx_n_s__f));
+ __Pyx_INCREF(((PyObject *)__pyx_kp_s_5));
+ PyTuple_SET_ITEM(__pyx_t_6, 1, ((PyObject *)__pyx_kp_s_5));
+ __Pyx_GIVEREF(((PyObject *)__pyx_kp_s_5));
+ PyTuple_SET_ITEM(__pyx_t_6, 2, __pyx_t_7);
+ __Pyx_GIVEREF(__pyx_t_7);
+ __Pyx_INCREF(__pyx_v_executable);
+ PyTuple_SET_ITEM(__pyx_t_6, 3, __pyx_v_executable);
+ __Pyx_GIVEREF(__pyx_v_executable);
+ PyTuple_SET_ITEM(__pyx_t_6, 4, __pyx_t_1);
+ __Pyx_GIVEREF(__pyx_t_1);
+ __pyx_t_7 = 0;
+ __pyx_t_1 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":932
+ * # *not* set the IN_MEMORY_MODIFIED flag. (But we'll save the
+ * # updated values if there is *other* data worth saving.)
+ * entry[1][0] = ('f', '', stat_value.st_size, executable, # <<<<<<<<<<<<<<
+ * DirState.NULLSTAT)
+ * worth_saving = 0
+ */
+ __pyx_t_1 = __Pyx_GetItemInt(__pyx_v_entry, 1, sizeof(long), PyInt_FromLong); if (!__pyx_t_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 932; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_1);
+ if (__Pyx_SetItemInt(__pyx_t_1, 0, __pyx_t_6, sizeof(long), PyInt_FromLong) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 932; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+ __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":934
+ * entry[1][0] = ('f', '', stat_value.st_size, executable,
+ * DirState.NULLSTAT)
+ * worth_saving = 0 # <<<<<<<<<<<<<<
+ * elif minikind == c'd':
+ * entry[1][0] = ('d', '', 0, False, packed_stat)
+ */
+ __pyx_v_worth_saving = 0;
+ }
+ __pyx_L9:;
+ break;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":935
+ * DirState.NULLSTAT)
+ * worth_saving = 0
+ * elif minikind == c'd': # <<<<<<<<<<<<<<
+ * entry[1][0] = ('d', '', 0, False, packed_stat)
+ * if saved_minikind != c'd':
+ */
+ case 'd':
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":936
+ * worth_saving = 0
+ * elif minikind == c'd':
+ * entry[1][0] = ('d', '', 0, False, packed_stat) # <<<<<<<<<<<<<<
+ * if saved_minikind != c'd':
+ * # This changed from something into a directory. Make sure we
+ */
+ __pyx_t_6 = __Pyx_PyBool_FromLong(0); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 936; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_6);
+ __pyx_t_1 = PyTuple_New(5); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 936; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_1);
+ __Pyx_INCREF(((PyObject *)__pyx_n_s__d));
+ PyTuple_SET_ITEM(__pyx_t_1, 0, ((PyObject *)__pyx_n_s__d));
+ __Pyx_GIVEREF(((PyObject *)__pyx_n_s__d));
+ __Pyx_INCREF(((PyObject *)__pyx_kp_s_5));
+ PyTuple_SET_ITEM(__pyx_t_1, 1, ((PyObject *)__pyx_kp_s_5));
+ __Pyx_GIVEREF(((PyObject *)__pyx_kp_s_5));
+ __Pyx_INCREF(__pyx_int_0);
+ PyTuple_SET_ITEM(__pyx_t_1, 2, __pyx_int_0);
+ __Pyx_GIVEREF(__pyx_int_0);
+ PyTuple_SET_ITEM(__pyx_t_1, 3, __pyx_t_6);
+ __Pyx_GIVEREF(__pyx_t_6);
+ __Pyx_INCREF(__pyx_v_packed_stat);
+ PyTuple_SET_ITEM(__pyx_t_1, 4, __pyx_v_packed_stat);
+ __Pyx_GIVEREF(__pyx_v_packed_stat);
+ __pyx_t_6 = 0;
+ __pyx_t_6 = __Pyx_GetItemInt(__pyx_v_entry, 1, sizeof(long), PyInt_FromLong); if (!__pyx_t_6) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 936; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_6);
+ if (__Pyx_SetItemInt(__pyx_t_6, 0, __pyx_t_1, sizeof(long), PyInt_FromLong) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 936; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":937
+ * elif minikind == c'd':
+ * entry[1][0] = ('d', '', 0, False, packed_stat)
+ * if saved_minikind != c'd': # <<<<<<<<<<<<<<
+ * # This changed from something into a directory. Make sure we
+ * # have a directory block for it. This doesn't happen very
+ */
+ __pyx_t_5 = (__pyx_v_saved_minikind != 'd');
+ if (__pyx_t_5) {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":942
+ * # often, so this doesn't have to be super fast.
+ * block_index, entry_index, dir_present, file_present = \
+ * self._get_block_entry_index(entry[0][0], entry[0][1], 0) # <<<<<<<<<<<<<<
+ * self._ensure_block(block_index, entry_index,
+ * pathjoin(entry[0][0], entry[0][1]))
+ */
+ __pyx_t_1 = PyObject_GetAttr(__pyx_v_self, __pyx_n_s_19); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 942; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_1);
+ __pyx_t_6 = __Pyx_GetItemInt(__pyx_v_entry, 0, sizeof(long), PyInt_FromLong); if (!__pyx_t_6) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 942; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_6);
+ __pyx_t_7 = __Pyx_GetItemInt(__pyx_t_6, 0, sizeof(long), PyInt_FromLong); if (!__pyx_t_7) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 942; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_7);
+ __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
+ __pyx_t_6 = __Pyx_GetItemInt(__pyx_v_entry, 0, sizeof(long), PyInt_FromLong); if (!__pyx_t_6) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 942; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_6);
+ __pyx_t_11 = __Pyx_GetItemInt(__pyx_t_6, 1, sizeof(long), PyInt_FromLong); if (!__pyx_t_11) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 942; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_11);
+ __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
+ __pyx_t_6 = PyTuple_New(3); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 942; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_6);
+ PyTuple_SET_ITEM(__pyx_t_6, 0, __pyx_t_7);
+ __Pyx_GIVEREF(__pyx_t_7);
+ PyTuple_SET_ITEM(__pyx_t_6, 1, __pyx_t_11);
+ __Pyx_GIVEREF(__pyx_t_11);
+ __Pyx_INCREF(__pyx_int_0);
+ PyTuple_SET_ITEM(__pyx_t_6, 2, __pyx_int_0);
+ __Pyx_GIVEREF(__pyx_int_0);
+ __pyx_t_7 = 0;
+ __pyx_t_11 = 0;
+ __pyx_t_11 = PyObject_Call(__pyx_t_1, __pyx_t_6, NULL); if (unlikely(!__pyx_t_11)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 942; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_11);
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+ __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
+ if (PyTuple_CheckExact(__pyx_t_11) && likely(PyTuple_GET_SIZE(__pyx_t_11) == 4)) {
+ PyObject* tuple = __pyx_t_11;
+ __pyx_t_6 = PyTuple_GET_ITEM(tuple, 0); __Pyx_INCREF(__pyx_t_6);
+ __pyx_t_1 = PyTuple_GET_ITEM(tuple, 1); __Pyx_INCREF(__pyx_t_1);
+ __pyx_t_7 = PyTuple_GET_ITEM(tuple, 2); __Pyx_INCREF(__pyx_t_7);
+ __pyx_t_12 = PyTuple_GET_ITEM(tuple, 3); __Pyx_INCREF(__pyx_t_12);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":941
+ * # have a directory block for it. This doesn't happen very
+ * # often, so this doesn't have to be super fast.
+ * block_index, entry_index, dir_present, file_present = \ # <<<<<<<<<<<<<<
+ * self._get_block_entry_index(entry[0][0], entry[0][1], 0)
+ * self._ensure_block(block_index, entry_index,
+ */
+ __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0;
+ __Pyx_DECREF(__pyx_v_block_index);
+ __pyx_v_block_index = __pyx_t_6;
+ __pyx_t_6 = 0;
+ __Pyx_DECREF(__pyx_v_entry_index);
+ __pyx_v_entry_index = __pyx_t_1;
+ __pyx_t_1 = 0;
+ __Pyx_DECREF(__pyx_v_dir_present);
+ __pyx_v_dir_present = __pyx_t_7;
+ __pyx_t_7 = 0;
+ __Pyx_DECREF(__pyx_v_file_present);
+ __pyx_v_file_present = __pyx_t_12;
+ __pyx_t_12 = 0;
+ } else {
+ __pyx_t_13 = PyObject_GetIter(__pyx_t_11); if (unlikely(!__pyx_t_13)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 941; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_13);
+ __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0;
+ __pyx_t_6 = __Pyx_UnpackItem(__pyx_t_13, 0); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 941; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_6);
+ __pyx_t_1 = __Pyx_UnpackItem(__pyx_t_13, 1); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 941; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_1);
+ __pyx_t_7 = __Pyx_UnpackItem(__pyx_t_13, 2); if (unlikely(!__pyx_t_7)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 941; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_7);
+ __pyx_t_12 = __Pyx_UnpackItem(__pyx_t_13, 3); if (unlikely(!__pyx_t_12)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 941; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_12);
+ if (__Pyx_EndUnpack(__pyx_t_13, 4) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 941; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_DECREF(__pyx_t_13); __pyx_t_13 = 0;
+ __Pyx_DECREF(__pyx_v_block_index);
+ __pyx_v_block_index = __pyx_t_6;
+ __pyx_t_6 = 0;
+ __Pyx_DECREF(__pyx_v_entry_index);
+ __pyx_v_entry_index = __pyx_t_1;
+ __pyx_t_1 = 0;
+ __Pyx_DECREF(__pyx_v_dir_present);
+ __pyx_v_dir_present = __pyx_t_7;
+ __pyx_t_7 = 0;
+ __Pyx_DECREF(__pyx_v_file_present);
+ __pyx_v_file_present = __pyx_t_12;
+ __pyx_t_12 = 0;
+ }
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":943
+ * block_index, entry_index, dir_present, file_present = \
+ * self._get_block_entry_index(entry[0][0], entry[0][1], 0)
+ * self._ensure_block(block_index, entry_index, # <<<<<<<<<<<<<<
+ * pathjoin(entry[0][0], entry[0][1]))
+ * else:
+ */
+ __pyx_t_11 = PyObject_GetAttr(__pyx_v_self, __pyx_n_s___ensure_block); if (unlikely(!__pyx_t_11)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 943; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_11);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":944
+ * self._get_block_entry_index(entry[0][0], entry[0][1], 0)
+ * self._ensure_block(block_index, entry_index,
+ * pathjoin(entry[0][0], entry[0][1])) # <<<<<<<<<<<<<<
+ * else:
+ * # Any changes are derived trivially from the stat object, not worth
+ */
+ __pyx_t_12 = __Pyx_GetName(__pyx_m, __pyx_n_s__pathjoin); if (unlikely(!__pyx_t_12)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 944; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_12);
+ __pyx_t_7 = __Pyx_GetItemInt(__pyx_v_entry, 0, sizeof(long), PyInt_FromLong); if (!__pyx_t_7) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 944; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_7);
+ __pyx_t_1 = __Pyx_GetItemInt(__pyx_t_7, 0, sizeof(long), PyInt_FromLong); if (!__pyx_t_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 944; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_1);
+ __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
+ __pyx_t_7 = __Pyx_GetItemInt(__pyx_v_entry, 0, sizeof(long), PyInt_FromLong); if (!__pyx_t_7) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 944; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_7);
+ __pyx_t_6 = __Pyx_GetItemInt(__pyx_t_7, 1, sizeof(long), PyInt_FromLong); if (!__pyx_t_6) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 944; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_6);
+ __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
+ __pyx_t_7 = PyTuple_New(2); if (unlikely(!__pyx_t_7)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 944; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_7);
+ PyTuple_SET_ITEM(__pyx_t_7, 0, __pyx_t_1);
+ __Pyx_GIVEREF(__pyx_t_1);
+ PyTuple_SET_ITEM(__pyx_t_7, 1, __pyx_t_6);
+ __Pyx_GIVEREF(__pyx_t_6);
+ __pyx_t_1 = 0;
+ __pyx_t_6 = 0;
+ __pyx_t_6 = PyObject_Call(__pyx_t_12, __pyx_t_7, NULL); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 944; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_6);
+ __Pyx_DECREF(__pyx_t_12); __pyx_t_12 = 0;
+ __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
+ __pyx_t_7 = PyTuple_New(3); if (unlikely(!__pyx_t_7)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 943; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_7);
+ __Pyx_INCREF(__pyx_v_block_index);
+ PyTuple_SET_ITEM(__pyx_t_7, 0, __pyx_v_block_index);
+ __Pyx_GIVEREF(__pyx_v_block_index);
+ __Pyx_INCREF(__pyx_v_entry_index);
+ PyTuple_SET_ITEM(__pyx_t_7, 1, __pyx_v_entry_index);
+ __Pyx_GIVEREF(__pyx_v_entry_index);
+ PyTuple_SET_ITEM(__pyx_t_7, 2, __pyx_t_6);
+ __Pyx_GIVEREF(__pyx_t_6);
+ __pyx_t_6 = 0;
+ __pyx_t_6 = PyObject_Call(__pyx_t_11, __pyx_t_7, NULL); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 943; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_6);
+ __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0;
+ __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
+ __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
+ goto __pyx_L10;
+ }
+ /*else*/ {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":948
+ * # Any changes are derived trivially from the stat object, not worth
+ * # re-writing a dirstate for just this
+ * worth_saving = 0 # <<<<<<<<<<<<<<
+ * elif minikind == c'l':
+ * if saved_minikind == c'l':
+ */
+ __pyx_v_worth_saving = 0;
+ }
+ __pyx_L10:;
+ break;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":949
+ * # re-writing a dirstate for just this
+ * worth_saving = 0
+ * elif minikind == c'l': # <<<<<<<<<<<<<<
+ * if saved_minikind == c'l':
+ * # If the object hasn't changed kind, it isn't worth saving the
+ */
+ case 'l':
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":950
+ * worth_saving = 0
+ * elif minikind == c'l':
+ * if saved_minikind == c'l': # <<<<<<<<<<<<<<
+ * # If the object hasn't changed kind, it isn't worth saving the
+ * # dirstate just for a symlink. The default is 'fast symlinks' which
+ */
+ __pyx_t_5 = (__pyx_v_saved_minikind == 'l');
+ if (__pyx_t_5) {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":955
+ * # save the target in the inode entry, rather than separately. So to
+ * # stat, we've already read everything off disk.
+ * worth_saving = 0 # <<<<<<<<<<<<<<
+ * link_or_sha1 = self._read_link(abspath, saved_link_or_sha1)
+ * if self._cutoff_time is None:
+ */
+ __pyx_v_worth_saving = 0;
+ goto __pyx_L11;
+ }
+ __pyx_L11:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":956
+ * # stat, we've already read everything off disk.
+ * worth_saving = 0
+ * link_or_sha1 = self._read_link(abspath, saved_link_or_sha1) # <<<<<<<<<<<<<<
+ * if self._cutoff_time is None:
+ * self._sha_cutoff_time()
+ */
+ __pyx_t_6 = PyObject_GetAttr(__pyx_v_self, __pyx_n_s___read_link); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 956; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_6);
+ __pyx_t_7 = PyTuple_New(2); if (unlikely(!__pyx_t_7)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 956; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_7);
+ __Pyx_INCREF(__pyx_v_abspath);
+ PyTuple_SET_ITEM(__pyx_t_7, 0, __pyx_v_abspath);
+ __Pyx_GIVEREF(__pyx_v_abspath);
+ __Pyx_INCREF(__pyx_v_saved_link_or_sha1);
+ PyTuple_SET_ITEM(__pyx_t_7, 1, __pyx_v_saved_link_or_sha1);
+ __Pyx_GIVEREF(__pyx_v_saved_link_or_sha1);
+ __pyx_t_11 = PyObject_Call(__pyx_t_6, __pyx_t_7, NULL); if (unlikely(!__pyx_t_11)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 956; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_11);
+ __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
+ __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
+ __Pyx_DECREF(__pyx_v_link_or_sha1);
+ __pyx_v_link_or_sha1 = __pyx_t_11;
+ __pyx_t_11 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":957
+ * worth_saving = 0
+ * link_or_sha1 = self._read_link(abspath, saved_link_or_sha1)
+ * if self._cutoff_time is None: # <<<<<<<<<<<<<<
+ * self._sha_cutoff_time()
+ * if (stat_value.st_mtime < self._cutoff_time
+ */
+ __pyx_t_11 = PyObject_GetAttr(__pyx_v_self, __pyx_n_s___cutoff_time); if (unlikely(!__pyx_t_11)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 957; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_11);
+ __pyx_t_5 = (__pyx_t_11 == Py_None);
+ __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0;
+ if (__pyx_t_5) {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":958
+ * link_or_sha1 = self._read_link(abspath, saved_link_or_sha1)
+ * if self._cutoff_time is None:
+ * self._sha_cutoff_time() # <<<<<<<<<<<<<<
+ * if (stat_value.st_mtime < self._cutoff_time
+ * and stat_value.st_ctime < self._cutoff_time):
+ */
+ __pyx_t_11 = PyObject_GetAttr(__pyx_v_self, __pyx_n_s___sha_cutoff_time); if (unlikely(!__pyx_t_11)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 958; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_11);
+ __pyx_t_7 = PyObject_Call(__pyx_t_11, ((PyObject *)__pyx_empty_tuple), NULL); if (unlikely(!__pyx_t_7)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 958; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_7);
+ __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0;
+ __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
+ goto __pyx_L12;
+ }
+ __pyx_L12:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":959
+ * if self._cutoff_time is None:
+ * self._sha_cutoff_time()
+ * if (stat_value.st_mtime < self._cutoff_time # <<<<<<<<<<<<<<
+ * and stat_value.st_ctime < self._cutoff_time):
+ * entry[1][0] = ('l', link_or_sha1, stat_value.st_size,
+ */
+ __pyx_t_7 = PyObject_GetAttr(__pyx_v_stat_value, __pyx_n_s__st_mtime); if (unlikely(!__pyx_t_7)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 959; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_7);
+ __pyx_t_11 = PyObject_GetAttr(__pyx_v_self, __pyx_n_s___cutoff_time); if (unlikely(!__pyx_t_11)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 959; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_11);
+ __pyx_t_6 = PyObject_RichCompare(__pyx_t_7, __pyx_t_11, Py_LT); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 959; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_6);
+ __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
+ __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0;
+ __pyx_t_5 = __Pyx_PyObject_IsTrue(__pyx_t_6); if (unlikely(__pyx_t_5 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 959; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
+ if (__pyx_t_5) {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":960
+ * self._sha_cutoff_time()
+ * if (stat_value.st_mtime < self._cutoff_time
+ * and stat_value.st_ctime < self._cutoff_time): # <<<<<<<<<<<<<<
+ * entry[1][0] = ('l', link_or_sha1, stat_value.st_size,
+ * False, packed_stat)
+ */
+ __pyx_t_6 = PyObject_GetAttr(__pyx_v_stat_value, __pyx_n_s__st_ctime); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 960; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_6);
+ __pyx_t_11 = PyObject_GetAttr(__pyx_v_self, __pyx_n_s___cutoff_time); if (unlikely(!__pyx_t_11)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 960; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_11);
+ __pyx_t_7 = PyObject_RichCompare(__pyx_t_6, __pyx_t_11, Py_LT); if (unlikely(!__pyx_t_7)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 960; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_7);
+ __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
+ __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0;
+ __pyx_t_4 = __Pyx_PyObject_IsTrue(__pyx_t_7); if (unlikely(__pyx_t_4 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 960; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
+ __pyx_t_3 = __pyx_t_4;
+ } else {
+ __pyx_t_3 = __pyx_t_5;
+ }
+ if (__pyx_t_3) {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":961
+ * if (stat_value.st_mtime < self._cutoff_time
+ * and stat_value.st_ctime < self._cutoff_time):
+ * entry[1][0] = ('l', link_or_sha1, stat_value.st_size, # <<<<<<<<<<<<<<
+ * False, packed_stat)
+ * else:
+ */
+ __pyx_t_7 = PyObject_GetAttr(__pyx_v_stat_value, __pyx_n_s__st_size); if (unlikely(!__pyx_t_7)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 961; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_7);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":962
+ * and stat_value.st_ctime < self._cutoff_time):
+ * entry[1][0] = ('l', link_or_sha1, stat_value.st_size,
+ * False, packed_stat) # <<<<<<<<<<<<<<
+ * else:
+ * entry[1][0] = ('l', '', stat_value.st_size,
+ */
+ __pyx_t_11 = __Pyx_PyBool_FromLong(0); if (unlikely(!__pyx_t_11)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 962; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_11);
+ __pyx_t_6 = PyTuple_New(5); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 961; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_6);
+ __Pyx_INCREF(((PyObject *)__pyx_n_s__l));
+ PyTuple_SET_ITEM(__pyx_t_6, 0, ((PyObject *)__pyx_n_s__l));
+ __Pyx_GIVEREF(((PyObject *)__pyx_n_s__l));
+ __Pyx_INCREF(__pyx_v_link_or_sha1);
+ PyTuple_SET_ITEM(__pyx_t_6, 1, __pyx_v_link_or_sha1);
+ __Pyx_GIVEREF(__pyx_v_link_or_sha1);
+ PyTuple_SET_ITEM(__pyx_t_6, 2, __pyx_t_7);
+ __Pyx_GIVEREF(__pyx_t_7);
+ PyTuple_SET_ITEM(__pyx_t_6, 3, __pyx_t_11);
+ __Pyx_GIVEREF(__pyx_t_11);
+ __Pyx_INCREF(__pyx_v_packed_stat);
+ PyTuple_SET_ITEM(__pyx_t_6, 4, __pyx_v_packed_stat);
+ __Pyx_GIVEREF(__pyx_v_packed_stat);
+ __pyx_t_7 = 0;
+ __pyx_t_11 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":961
+ * if (stat_value.st_mtime < self._cutoff_time
+ * and stat_value.st_ctime < self._cutoff_time):
+ * entry[1][0] = ('l', link_or_sha1, stat_value.st_size, # <<<<<<<<<<<<<<
+ * False, packed_stat)
+ * else:
+ */
+ __pyx_t_11 = __Pyx_GetItemInt(__pyx_v_entry, 1, sizeof(long), PyInt_FromLong); if (!__pyx_t_11) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 961; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_11);
+ if (__Pyx_SetItemInt(__pyx_t_11, 0, __pyx_t_6, sizeof(long), PyInt_FromLong) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 961; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0;
+ __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
+ goto __pyx_L13;
+ }
+ /*else*/ {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":964
+ * False, packed_stat)
+ * else:
+ * entry[1][0] = ('l', '', stat_value.st_size, # <<<<<<<<<<<<<<
+ * False, DirState.NULLSTAT)
+ * if worth_saving:
+ */
+ __pyx_t_6 = PyObject_GetAttr(__pyx_v_stat_value, __pyx_n_s__st_size); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 964; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_6);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":965
+ * else:
+ * entry[1][0] = ('l', '', stat_value.st_size,
+ * False, DirState.NULLSTAT) # <<<<<<<<<<<<<<
+ * if worth_saving:
+ * # Note, even though _mark_modified will only set
+ */
+ __pyx_t_11 = __Pyx_PyBool_FromLong(0); if (unlikely(!__pyx_t_11)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 965; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_11);
+ __pyx_t_7 = __Pyx_GetName(__pyx_m, __pyx_n_s__DirState); if (unlikely(!__pyx_t_7)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 965; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_7);
+ __pyx_t_12 = PyObject_GetAttr(__pyx_t_7, __pyx_n_s__NULLSTAT); if (unlikely(!__pyx_t_12)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 965; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_12);
+ __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
+ __pyx_t_7 = PyTuple_New(5); if (unlikely(!__pyx_t_7)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 964; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_7);
+ __Pyx_INCREF(((PyObject *)__pyx_n_s__l));
+ PyTuple_SET_ITEM(__pyx_t_7, 0, ((PyObject *)__pyx_n_s__l));
+ __Pyx_GIVEREF(((PyObject *)__pyx_n_s__l));
+ __Pyx_INCREF(((PyObject *)__pyx_kp_s_5));
+ PyTuple_SET_ITEM(__pyx_t_7, 1, ((PyObject *)__pyx_kp_s_5));
+ __Pyx_GIVEREF(((PyObject *)__pyx_kp_s_5));
+ PyTuple_SET_ITEM(__pyx_t_7, 2, __pyx_t_6);
+ __Pyx_GIVEREF(__pyx_t_6);
+ PyTuple_SET_ITEM(__pyx_t_7, 3, __pyx_t_11);
+ __Pyx_GIVEREF(__pyx_t_11);
+ PyTuple_SET_ITEM(__pyx_t_7, 4, __pyx_t_12);
+ __Pyx_GIVEREF(__pyx_t_12);
+ __pyx_t_6 = 0;
+ __pyx_t_11 = 0;
+ __pyx_t_12 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":964
+ * False, packed_stat)
+ * else:
+ * entry[1][0] = ('l', '', stat_value.st_size, # <<<<<<<<<<<<<<
+ * False, DirState.NULLSTAT)
+ * if worth_saving:
+ */
+ __pyx_t_12 = __Pyx_GetItemInt(__pyx_v_entry, 1, sizeof(long), PyInt_FromLong); if (!__pyx_t_12) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 964; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_12);
+ if (__Pyx_SetItemInt(__pyx_t_12, 0, __pyx_t_7, sizeof(long), PyInt_FromLong) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 964; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_DECREF(__pyx_t_12); __pyx_t_12 = 0;
+ __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
+ }
+ __pyx_L13:;
+ break;
+ }
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":966
+ * entry[1][0] = ('l', '', stat_value.st_size,
+ * False, DirState.NULLSTAT)
+ * if worth_saving: # <<<<<<<<<<<<<<
+ * # Note, even though _mark_modified will only set
+ * # IN_MEMORY_HASH_MODIFIED, it still isn't worth
+ */
+ if (__pyx_v_worth_saving) {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":969
+ * # Note, even though _mark_modified will only set
+ * # IN_MEMORY_HASH_MODIFIED, it still isn't worth
+ * self._mark_modified([entry]) # <<<<<<<<<<<<<<
+ * return link_or_sha1
+ *
+ */
+ __pyx_t_7 = PyObject_GetAttr(__pyx_v_self, __pyx_n_s___mark_modified); if (unlikely(!__pyx_t_7)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 969; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_7);
+ __pyx_t_12 = PyList_New(1); if (unlikely(!__pyx_t_12)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 969; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(((PyObject *)__pyx_t_12));
+ __Pyx_INCREF(__pyx_v_entry);
+ PyList_SET_ITEM(__pyx_t_12, 0, __pyx_v_entry);
+ __Pyx_GIVEREF(__pyx_v_entry);
+ __pyx_t_11 = PyTuple_New(1); if (unlikely(!__pyx_t_11)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 969; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_11);
+ PyTuple_SET_ITEM(__pyx_t_11, 0, ((PyObject *)__pyx_t_12));
+ __Pyx_GIVEREF(((PyObject *)__pyx_t_12));
+ __pyx_t_12 = 0;
+ __pyx_t_12 = PyObject_Call(__pyx_t_7, __pyx_t_11, NULL); if (unlikely(!__pyx_t_12)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 969; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_12);
+ __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
+ __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0;
+ __Pyx_DECREF(__pyx_t_12); __pyx_t_12 = 0;
+ goto __pyx_L14;
+ }
+ __pyx_L14:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":970
+ * # IN_MEMORY_HASH_MODIFIED, it still isn't worth
+ * self._mark_modified([entry])
+ * return link_or_sha1 # <<<<<<<<<<<<<<
+ *
+ *
+ */
+ __Pyx_XDECREF(__pyx_r);
+ __Pyx_INCREF(__pyx_v_link_or_sha1);
+ __pyx_r = __pyx_v_link_or_sha1;
+ goto __pyx_L0;
+
+ __pyx_r = Py_None; __Pyx_INCREF(Py_None);
+ goto __pyx_L0;
+ __pyx_L1_error:;
+ __Pyx_XDECREF(__pyx_t_1);
+ __Pyx_XDECREF(__pyx_t_6);
+ __Pyx_XDECREF(__pyx_t_7);
+ __Pyx_XDECREF(__pyx_t_11);
+ __Pyx_XDECREF(__pyx_t_12);
+ __Pyx_XDECREF(__pyx_t_13);
+ __Pyx_AddTraceback("bzrlib._dirstate_helpers_pyx._update_entry");
+ __pyx_r = 0;
+ __pyx_L0:;
+ __Pyx_DECREF(__pyx_v_packed_stat);
+ __Pyx_DECREF(__pyx_v_saved_link_or_sha1);
+ __Pyx_DECREF(__pyx_v_saved_file_size);
+ __Pyx_DECREF(__pyx_v_saved_executable);
+ __Pyx_DECREF(__pyx_v_saved_packed_stat);
+ __Pyx_DECREF(__pyx_v_link_or_sha1);
+ __Pyx_DECREF(__pyx_v_executable);
+ __Pyx_DECREF(__pyx_v_block_index);
+ __Pyx_DECREF(__pyx_v_entry_index);
+ __Pyx_DECREF(__pyx_v_dir_present);
+ __Pyx_DECREF(__pyx_v_file_present);
+ __Pyx_XGIVEREF(__pyx_r);
+ __Pyx_RefNannyFinishContext();
+ return __pyx_r;
+}
+
+/* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":974
+ *
+ * # TODO: Do we want to worry about exceptions here?
+ * cdef char _minikind_from_string(object string) except? -1: # <<<<<<<<<<<<<<
+ * """Convert a python string to a char."""
+ * return PyString_AsString(string)[0]
+ */
+
+static char __pyx_f_6bzrlib_21_dirstate_helpers_pyx__minikind_from_string(PyObject *__pyx_v_string) {
+ char __pyx_r;
+ __Pyx_RefNannySetupContext("_minikind_from_string");
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":976
+ * cdef char _minikind_from_string(object string) except? -1:
+ * """Convert a python string to a char."""
+ * return PyString_AsString(string)[0] # <<<<<<<<<<<<<<
+ *
+ *
+ */
+ __pyx_r = (PyString_AsString(__pyx_v_string)[0]);
+ goto __pyx_L0;
+
+ __pyx_r = 0;
+ __pyx_L0:;
+ __Pyx_RefNannyFinishContext();
+ return __pyx_r;
+}
+
+/* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":993
+ *
+ *
+ * cdef object _minikind_to_kind(char minikind): # <<<<<<<<<<<<<<
+ * """Create a string kind for minikind."""
+ * cdef char _minikind[1]
+ */
+
+static PyObject *__pyx_f_6bzrlib_21_dirstate_helpers_pyx__minikind_to_kind(char __pyx_v_minikind) {
+ char __pyx_v__minikind[1];
+ PyObject *__pyx_r = NULL;
+ PyObject *__pyx_t_1 = NULL;
+ PyObject *__pyx_t_2 = NULL;
+ __Pyx_RefNannySetupContext("_minikind_to_kind");
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":996
+ * """Create a string kind for minikind."""
+ * cdef char _minikind[1]
+ * if minikind == c'f': # <<<<<<<<<<<<<<
+ * return _kind_file
+ * elif minikind == c'd':
+ */
+ switch (__pyx_v_minikind) {
+ case 'f':
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":997
+ * cdef char _minikind[1]
+ * if minikind == c'f':
+ * return _kind_file # <<<<<<<<<<<<<<
+ * elif minikind == c'd':
+ * return _kind_directory
+ */
+ __Pyx_XDECREF(__pyx_r);
+ __Pyx_INCREF(__pyx_v_6bzrlib_21_dirstate_helpers_pyx__kind_file);
+ __pyx_r = __pyx_v_6bzrlib_21_dirstate_helpers_pyx__kind_file;
+ goto __pyx_L0;
+ break;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":998
+ * if minikind == c'f':
+ * return _kind_file
+ * elif minikind == c'd': # <<<<<<<<<<<<<<
+ * return _kind_directory
+ * elif minikind == c'a':
+ */
+ case 'd':
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":999
+ * return _kind_file
+ * elif minikind == c'd':
+ * return _kind_directory # <<<<<<<<<<<<<<
+ * elif minikind == c'a':
+ * return _kind_absent
+ */
+ __Pyx_XDECREF(__pyx_r);
+ __Pyx_INCREF(__pyx_v_6bzrlib_21_dirstate_helpers_pyx__kind_directory);
+ __pyx_r = __pyx_v_6bzrlib_21_dirstate_helpers_pyx__kind_directory;
+ goto __pyx_L0;
+ break;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1000
+ * elif minikind == c'd':
+ * return _kind_directory
+ * elif minikind == c'a': # <<<<<<<<<<<<<<
+ * return _kind_absent
+ * elif minikind == c'r':
+ */
+ case 'a':
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1001
+ * return _kind_directory
+ * elif minikind == c'a':
+ * return _kind_absent # <<<<<<<<<<<<<<
+ * elif minikind == c'r':
+ * return _kind_relocated
+ */
+ __Pyx_XDECREF(__pyx_r);
+ __Pyx_INCREF(__pyx_v_6bzrlib_21_dirstate_helpers_pyx__kind_absent);
+ __pyx_r = __pyx_v_6bzrlib_21_dirstate_helpers_pyx__kind_absent;
+ goto __pyx_L0;
+ break;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1002
+ * elif minikind == c'a':
+ * return _kind_absent
+ * elif minikind == c'r': # <<<<<<<<<<<<<<
+ * return _kind_relocated
+ * elif minikind == c'l':
+ */
+ case 'r':
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1003
+ * return _kind_absent
+ * elif minikind == c'r':
+ * return _kind_relocated # <<<<<<<<<<<<<<
+ * elif minikind == c'l':
+ * return _kind_symlink
+ */
+ __Pyx_XDECREF(__pyx_r);
+ __Pyx_INCREF(__pyx_v_6bzrlib_21_dirstate_helpers_pyx__kind_relocated);
+ __pyx_r = __pyx_v_6bzrlib_21_dirstate_helpers_pyx__kind_relocated;
+ goto __pyx_L0;
+ break;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1004
+ * elif minikind == c'r':
+ * return _kind_relocated
+ * elif minikind == c'l': # <<<<<<<<<<<<<<
+ * return _kind_symlink
+ * elif minikind == c't':
+ */
+ case 'l':
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1005
+ * return _kind_relocated
+ * elif minikind == c'l':
+ * return _kind_symlink # <<<<<<<<<<<<<<
+ * elif minikind == c't':
+ * return _kind_tree_reference
+ */
+ __Pyx_XDECREF(__pyx_r);
+ __Pyx_INCREF(__pyx_v_6bzrlib_21_dirstate_helpers_pyx__kind_symlink);
+ __pyx_r = __pyx_v_6bzrlib_21_dirstate_helpers_pyx__kind_symlink;
+ goto __pyx_L0;
+ break;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1006
+ * elif minikind == c'l':
+ * return _kind_symlink
+ * elif minikind == c't': # <<<<<<<<<<<<<<
+ * return _kind_tree_reference
+ * _minikind[0] = minikind
+ */
+ case 't':
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1007
+ * return _kind_symlink
+ * elif minikind == c't':
+ * return _kind_tree_reference # <<<<<<<<<<<<<<
+ * _minikind[0] = minikind
+ * raise KeyError(PyString_FromStringAndSize(_minikind, 1))
+ */
+ __Pyx_XDECREF(__pyx_r);
+ __Pyx_INCREF(__pyx_v_6bzrlib_21_dirstate_helpers_pyx__kind_tree_reference);
+ __pyx_r = __pyx_v_6bzrlib_21_dirstate_helpers_pyx__kind_tree_reference;
+ goto __pyx_L0;
+ break;
+ }
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1008
+ * elif minikind == c't':
+ * return _kind_tree_reference
+ * _minikind[0] = minikind # <<<<<<<<<<<<<<
+ * raise KeyError(PyString_FromStringAndSize(_minikind, 1))
+ *
+ */
+ (__pyx_v__minikind[0]) = __pyx_v_minikind;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1009
+ * return _kind_tree_reference
+ * _minikind[0] = minikind
+ * raise KeyError(PyString_FromStringAndSize(_minikind, 1)) # <<<<<<<<<<<<<<
+ *
+ *
+ */
+ __pyx_t_1 = PyString_FromStringAndSize(__pyx_v__minikind, 1); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1009; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_1);
+ __pyx_t_2 = PyTuple_New(1); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1009; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_2);
+ PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_t_1);
+ __Pyx_GIVEREF(__pyx_t_1);
+ __pyx_t_1 = 0;
+ __pyx_t_1 = PyObject_Call(__pyx_builtin_KeyError, __pyx_t_2, NULL); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1009; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_1);
+ __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+ __Pyx_Raise(__pyx_t_1, 0, 0);
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1009; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+
+ __pyx_r = Py_None; __Pyx_INCREF(Py_None);
+ goto __pyx_L0;
+ __pyx_L1_error:;
+ __Pyx_XDECREF(__pyx_t_1);
+ __Pyx_XDECREF(__pyx_t_2);
+ __Pyx_AddTraceback("bzrlib._dirstate_helpers_pyx._minikind_to_kind");
+ __pyx_r = 0;
+ __pyx_L0:;
+ __Pyx_XGIVEREF(__pyx_r);
+ __Pyx_RefNannyFinishContext();
+ return __pyx_r;
+}
+
+/* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1012
+ *
+ *
+ * cdef int _versioned_minikind(char minikind): # cannot_raise # <<<<<<<<<<<<<<
+ * """Return non-zero if minikind is in fltd"""
+ * return (minikind == c'f' or
+ */
+
+static int __pyx_f_6bzrlib_21_dirstate_helpers_pyx__versioned_minikind(char __pyx_v_minikind) {
+ int __pyx_r;
+ int __pyx_t_1;
+ __Pyx_RefNannySetupContext("_versioned_minikind");
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1014
+ * cdef int _versioned_minikind(char minikind): # cannot_raise
+ * """Return non-zero if minikind is in fltd"""
+ * return (minikind == c'f' or # <<<<<<<<<<<<<<
+ * minikind == c'd' or
+ * minikind == c'l' or
+ */
+ switch (__pyx_v_minikind) {
+ case 'f':
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1015
+ * """Return non-zero if minikind is in fltd"""
+ * return (minikind == c'f' or
+ * minikind == c'd' or # <<<<<<<<<<<<<<
+ * minikind == c'l' or
+ * minikind == c't')
+ */
+ case 'd':
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1016
+ * return (minikind == c'f' or
+ * minikind == c'd' or
+ * minikind == c'l' or # <<<<<<<<<<<<<<
+ * minikind == c't')
+ *
+ */
+ case 'l':
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1017
+ * minikind == c'd' or
+ * minikind == c'l' or
+ * minikind == c't') # <<<<<<<<<<<<<<
+ *
+ *
+ */
+ case 't':
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1014
+ * cdef int _versioned_minikind(char minikind): # cannot_raise
+ * """Return non-zero if minikind is in fltd"""
+ * return (minikind == c'f' or # <<<<<<<<<<<<<<
+ * minikind == c'd' or
+ * minikind == c'l' or
+ */
+ __pyx_t_1 = 1;
+ break;
+ default:
+ __pyx_t_1 = 0;
+ break;
+ }
+ __pyx_r = __pyx_t_1;
+ goto __pyx_L0;
+
+ __pyx_r = 0;
+ __pyx_L0:;
+ __Pyx_RefNannyFinishContext();
+ return __pyx_r;
+}
+
+/* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1031
+ * cdef object use_filesystem_for_exec
+ * cdef object utf8_decode
+ * cdef readonly object searched_specific_files # <<<<<<<<<<<<<<
+ * cdef readonly object searched_exact_paths
+ * cdef object search_specific_files
+ */
+
+static PyObject *__pyx_pf_6bzrlib_21_dirstate_helpers_pyx_13ProcessEntryC_23searched_specific_files___get__(PyObject *__pyx_v_self); /*proto*/
+static PyObject *__pyx_pf_6bzrlib_21_dirstate_helpers_pyx_13ProcessEntryC_23searched_specific_files___get__(PyObject *__pyx_v_self) {
+ PyObject *__pyx_r = NULL;
+ __Pyx_RefNannySetupContext("__get__");
+ __Pyx_XDECREF(__pyx_r);
+ __Pyx_INCREF(((struct __pyx_obj_6bzrlib_21_dirstate_helpers_pyx_ProcessEntryC *)__pyx_v_self)->searched_specific_files);
+ __pyx_r = ((struct __pyx_obj_6bzrlib_21_dirstate_helpers_pyx_ProcessEntryC *)__pyx_v_self)->searched_specific_files;
+ goto __pyx_L0;
+
+ __pyx_r = Py_None; __Pyx_INCREF(Py_None);
+ __pyx_L0:;
+ __Pyx_XGIVEREF(__pyx_r);
+ __Pyx_RefNannyFinishContext();
+ return __pyx_r;
+}
+
+/* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1032
+ * cdef object utf8_decode
+ * cdef readonly object searched_specific_files
+ * cdef readonly object searched_exact_paths # <<<<<<<<<<<<<<
+ * cdef object search_specific_files
+ * # The parents up to the root of the paths we are searching.
+ */
+
+static PyObject *__pyx_pf_6bzrlib_21_dirstate_helpers_pyx_13ProcessEntryC_20searched_exact_paths___get__(PyObject *__pyx_v_self); /*proto*/
+static PyObject *__pyx_pf_6bzrlib_21_dirstate_helpers_pyx_13ProcessEntryC_20searched_exact_paths___get__(PyObject *__pyx_v_self) {
+ PyObject *__pyx_r = NULL;
+ __Pyx_RefNannySetupContext("__get__");
+ __Pyx_XDECREF(__pyx_r);
+ __Pyx_INCREF(((struct __pyx_obj_6bzrlib_21_dirstate_helpers_pyx_ProcessEntryC *)__pyx_v_self)->searched_exact_paths);
+ __pyx_r = ((struct __pyx_obj_6bzrlib_21_dirstate_helpers_pyx_ProcessEntryC *)__pyx_v_self)->searched_exact_paths;
+ goto __pyx_L0;
+
+ __pyx_r = Py_None; __Pyx_INCREF(Py_None);
+ __pyx_L0:;
+ __Pyx_XGIVEREF(__pyx_r);
+ __Pyx_RefNannyFinishContext();
+ return __pyx_r;
+}
+
+/* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1064
+ * cdef object sha_file
+ *
+ * def __init__(self, include_unchanged, use_filesystem_for_exec, # <<<<<<<<<<<<<<
+ * search_specific_files, state, source_index, target_index,
+ * want_unversioned, tree):
+ */
+
+static int __pyx_pf_6bzrlib_21_dirstate_helpers_pyx_13ProcessEntryC___init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
+static int __pyx_pf_6bzrlib_21_dirstate_helpers_pyx_13ProcessEntryC___init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
+ PyObject *__pyx_v_include_unchanged = 0;
+ PyObject *__pyx_v_use_filesystem_for_exec = 0;
+ PyObject *__pyx_v_search_specific_files = 0;
+ PyObject *__pyx_v_state = 0;
+ PyObject *__pyx_v_source_index = 0;
+ PyObject *__pyx_v_target_index = 0;
+ PyObject *__pyx_v_want_unversioned = 0;
+ PyObject *__pyx_v_tree = 0;
+ int __pyx_r;
+ PyObject *__pyx_t_1 = NULL;
+ PyObject *__pyx_t_2 = NULL;
+ PyObject *__pyx_t_3 = NULL;
+ int __pyx_t_4;
+ int __pyx_t_5;
+ static PyObject **__pyx_pyargnames[] = {&__pyx_n_s__include_unchanged,&__pyx_n_s_22,&__pyx_n_s_23,&__pyx_n_s__state,&__pyx_n_s__source_index,&__pyx_n_s__target_index,&__pyx_n_s__want_unversioned,&__pyx_n_s__tree,0};
+ __Pyx_RefNannySetupContext("__init__");
+ if (unlikely(__pyx_kwds)) {
+ Py_ssize_t kw_args = PyDict_Size(__pyx_kwds);
+ PyObject* values[8] = {0,0,0,0,0,0,0,0};
+ switch (PyTuple_GET_SIZE(__pyx_args)) {
+ case 8: values[7] = PyTuple_GET_ITEM(__pyx_args, 7);
+ case 7: values[6] = PyTuple_GET_ITEM(__pyx_args, 6);
+ case 6: values[5] = PyTuple_GET_ITEM(__pyx_args, 5);
+ case 5: values[4] = PyTuple_GET_ITEM(__pyx_args, 4);
+ case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3);
+ case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2);
+ case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
+ case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
+ case 0: break;
+ default: goto __pyx_L5_argtuple_error;
+ }
+ switch (PyTuple_GET_SIZE(__pyx_args)) {
+ case 0:
+ values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__include_unchanged);
+ if (likely(values[0])) kw_args--;
+ else goto __pyx_L5_argtuple_error;
+ case 1:
+ values[1] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_22);
+ if (likely(values[1])) kw_args--;
+ else {
+ __Pyx_RaiseArgtupleInvalid("__init__", 1, 8, 8, 1); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1064; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
+ }
+ case 2:
+ values[2] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_23);
+ if (likely(values[2])) kw_args--;
+ else {
+ __Pyx_RaiseArgtupleInvalid("__init__", 1, 8, 8, 2); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1064; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
+ }
+ case 3:
+ values[3] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__state);
+ if (likely(values[3])) kw_args--;
+ else {
+ __Pyx_RaiseArgtupleInvalid("__init__", 1, 8, 8, 3); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1064; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
+ }
+ case 4:
+ values[4] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__source_index);
+ if (likely(values[4])) kw_args--;
+ else {
+ __Pyx_RaiseArgtupleInvalid("__init__", 1, 8, 8, 4); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1064; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
+ }
+ case 5:
+ values[5] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__target_index);
+ if (likely(values[5])) kw_args--;
+ else {
+ __Pyx_RaiseArgtupleInvalid("__init__", 1, 8, 8, 5); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1064; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
+ }
+ case 6:
+ values[6] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__want_unversioned);
+ if (likely(values[6])) kw_args--;
+ else {
+ __Pyx_RaiseArgtupleInvalid("__init__", 1, 8, 8, 6); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1064; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
+ }
+ case 7:
+ values[7] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__tree);
+ if (likely(values[7])) kw_args--;
+ else {
+ __Pyx_RaiseArgtupleInvalid("__init__", 1, 8, 8, 7); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1064; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
+ }
+ }
+ if (unlikely(kw_args > 0)) {
+ if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, PyTuple_GET_SIZE(__pyx_args), "__init__") < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1064; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
+ }
+ __pyx_v_include_unchanged = values[0];
+ __pyx_v_use_filesystem_for_exec = values[1];
+ __pyx_v_search_specific_files = values[2];
+ __pyx_v_state = values[3];
+ __pyx_v_source_index = values[4];
+ __pyx_v_target_index = values[5];
+ __pyx_v_want_unversioned = values[6];
+ __pyx_v_tree = values[7];
+ } else if (PyTuple_GET_SIZE(__pyx_args) != 8) {
+ goto __pyx_L5_argtuple_error;
+ } else {
+ __pyx_v_include_unchanged = PyTuple_GET_ITEM(__pyx_args, 0);
+ __pyx_v_use_filesystem_for_exec = PyTuple_GET_ITEM(__pyx_args, 1);
+ __pyx_v_search_specific_files = PyTuple_GET_ITEM(__pyx_args, 2);
+ __pyx_v_state = PyTuple_GET_ITEM(__pyx_args, 3);
+ __pyx_v_source_index = PyTuple_GET_ITEM(__pyx_args, 4);
+ __pyx_v_target_index = PyTuple_GET_ITEM(__pyx_args, 5);
+ __pyx_v_want_unversioned = PyTuple_GET_ITEM(__pyx_args, 6);
+ __pyx_v_tree = PyTuple_GET_ITEM(__pyx_args, 7);
+ }
+ goto __pyx_L4_argument_unpacking_done;
+ __pyx_L5_argtuple_error:;
+ __Pyx_RaiseArgtupleInvalid("__init__", 1, 8, 8, PyTuple_GET_SIZE(__pyx_args)); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1064; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
+ __pyx_L3_error:;
+ __Pyx_AddTraceback("bzrlib._dirstate_helpers_pyx.ProcessEntryC.__init__");
+ __Pyx_RefNannyFinishContext();
+ return -1;
+ __pyx_L4_argument_unpacking_done:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1067
+ * search_specific_files, state, source_index, target_index,
+ * want_unversioned, tree):
+ * self.doing_consistency_expansion = 0 # <<<<<<<<<<<<<<
+ * self.old_dirname_to_file_id = {}
+ * self.new_dirname_to_file_id = {}
+ */
+ ((struct __pyx_obj_6bzrlib_21_dirstate_helpers_pyx_ProcessEntryC *)__pyx_v_self)->doing_consistency_expansion = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1068
+ * want_unversioned, tree):
+ * self.doing_consistency_expansion = 0
+ * self.old_dirname_to_file_id = {} # <<<<<<<<<<<<<<
+ * self.new_dirname_to_file_id = {}
+ * # Are we doing a partial iter_changes?
+ */
+ __pyx_t_1 = PyDict_New(); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1068; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(((PyObject *)__pyx_t_1));
+ __Pyx_GIVEREF(((PyObject *)__pyx_t_1));
+ __Pyx_GOTREF(((struct __pyx_obj_6bzrlib_21_dirstate_helpers_pyx_ProcessEntryC *)__pyx_v_self)->old_dirname_to_file_id);
+ __Pyx_DECREF(((struct __pyx_obj_6bzrlib_21_dirstate_helpers_pyx_ProcessEntryC *)__pyx_v_self)->old_dirname_to_file_id);
+ ((struct __pyx_obj_6bzrlib_21_dirstate_helpers_pyx_ProcessEntryC *)__pyx_v_self)->old_dirname_to_file_id = ((PyObject *)__pyx_t_1);
+ __pyx_t_1 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1069
+ * self.doing_consistency_expansion = 0
+ * self.old_dirname_to_file_id = {}
+ * self.new_dirname_to_file_id = {} # <<<<<<<<<<<<<<
+ * # Are we doing a partial iter_changes?
+ * self.partial = set(['']).__ne__(search_specific_files)
+ */
+ __pyx_t_1 = PyDict_New(); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1069; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(((PyObject *)__pyx_t_1));
+ __Pyx_GIVEREF(((PyObject *)__pyx_t_1));
+ __Pyx_GOTREF(((struct __pyx_obj_6bzrlib_21_dirstate_helpers_pyx_ProcessEntryC *)__pyx_v_self)->new_dirname_to_file_id);
+ __Pyx_DECREF(((struct __pyx_obj_6bzrlib_21_dirstate_helpers_pyx_ProcessEntryC *)__pyx_v_self)->new_dirname_to_file_id);
+ ((struct __pyx_obj_6bzrlib_21_dirstate_helpers_pyx_ProcessEntryC *)__pyx_v_self)->new_dirname_to_file_id = ((PyObject *)__pyx_t_1);
+ __pyx_t_1 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1071
+ * self.new_dirname_to_file_id = {}
+ * # Are we doing a partial iter_changes?
+ * self.partial = set(['']).__ne__(search_specific_files) # <<<<<<<<<<<<<<
+ * # Using a list so that we can access the values and change them in
+ * # nested scope. Each one is [path, file_id, entry]
+ */
+ __pyx_t_1 = PyList_New(1); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1071; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(((PyObject *)__pyx_t_1));
+ __Pyx_INCREF(((PyObject *)__pyx_kp_s_5));
+ PyList_SET_ITEM(__pyx_t_1, 0, ((PyObject *)__pyx_kp_s_5));
+ __Pyx_GIVEREF(((PyObject *)__pyx_kp_s_5));
+ __pyx_t_2 = PyTuple_New(1); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1071; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_2);
+ PyTuple_SET_ITEM(__pyx_t_2, 0, ((PyObject *)__pyx_t_1));
+ __Pyx_GIVEREF(((PyObject *)__pyx_t_1));
+ __pyx_t_1 = 0;
+ __pyx_t_1 = PyObject_Call(((PyObject *)((PyObject*)&PySet_Type)), __pyx_t_2, NULL); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1071; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_1);
+ __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+ __pyx_t_2 = PyObject_GetAttr(__pyx_t_1, __pyx_n_s____ne__); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1071; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_2);
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+ __pyx_t_1 = PyTuple_New(1); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1071; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_1);
+ __Pyx_INCREF(__pyx_v_search_specific_files);
+ PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_v_search_specific_files);
+ __Pyx_GIVEREF(__pyx_v_search_specific_files);
+ __pyx_t_3 = PyObject_Call(__pyx_t_2, __pyx_t_1, NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1071; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_3);
+ __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+ __pyx_t_4 = __Pyx_PyInt_AsInt(__pyx_t_3); if (unlikely((__pyx_t_4 == (int)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1071; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+ ((struct __pyx_obj_6bzrlib_21_dirstate_helpers_pyx_ProcessEntryC *)__pyx_v_self)->partial = __pyx_t_4;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1074
+ * # Using a list so that we can access the values and change them in
+ * # nested scope. Each one is [path, file_id, entry]
+ * self.last_source_parent = [None, None] # <<<<<<<<<<<<<<
+ * self.last_target_parent = [None, None]
+ * if include_unchanged is None:
+ */
+ __pyx_t_3 = PyList_New(2); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1074; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(((PyObject *)__pyx_t_3));
+ __Pyx_INCREF(Py_None);
+ PyList_SET_ITEM(__pyx_t_3, 0, Py_None);
+ __Pyx_GIVEREF(Py_None);
+ __Pyx_INCREF(Py_None);
+ PyList_SET_ITEM(__pyx_t_3, 1, Py_None);
+ __Pyx_GIVEREF(Py_None);
+ __Pyx_GIVEREF(((PyObject *)__pyx_t_3));
+ __Pyx_GOTREF(((struct __pyx_obj_6bzrlib_21_dirstate_helpers_pyx_ProcessEntryC *)__pyx_v_self)->last_source_parent);
+ __Pyx_DECREF(((struct __pyx_obj_6bzrlib_21_dirstate_helpers_pyx_ProcessEntryC *)__pyx_v_self)->last_source_parent);
+ ((struct __pyx_obj_6bzrlib_21_dirstate_helpers_pyx_ProcessEntryC *)__pyx_v_self)->last_source_parent = ((PyObject *)__pyx_t_3);
+ __pyx_t_3 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1075
+ * # nested scope. Each one is [path, file_id, entry]
+ * self.last_source_parent = [None, None]
+ * self.last_target_parent = [None, None] # <<<<<<<<<<<<<<
+ * if include_unchanged is None:
+ * self.include_unchanged = False
+ */
+ __pyx_t_3 = PyList_New(2); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1075; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(((PyObject *)__pyx_t_3));
+ __Pyx_INCREF(Py_None);
+ PyList_SET_ITEM(__pyx_t_3, 0, Py_None);
+ __Pyx_GIVEREF(Py_None);
+ __Pyx_INCREF(Py_None);
+ PyList_SET_ITEM(__pyx_t_3, 1, Py_None);
+ __Pyx_GIVEREF(Py_None);
+ __Pyx_GIVEREF(((PyObject *)__pyx_t_3));
+ __Pyx_GOTREF(((struct __pyx_obj_6bzrlib_21_dirstate_helpers_pyx_ProcessEntryC *)__pyx_v_self)->last_target_parent);
+ __Pyx_DECREF(((struct __pyx_obj_6bzrlib_21_dirstate_helpers_pyx_ProcessEntryC *)__pyx_v_self)->last_target_parent);
+ ((struct __pyx_obj_6bzrlib_21_dirstate_helpers_pyx_ProcessEntryC *)__pyx_v_self)->last_target_parent = ((PyObject *)__pyx_t_3);
+ __pyx_t_3 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1076
+ * self.last_source_parent = [None, None]
+ * self.last_target_parent = [None, None]
+ * if include_unchanged is None: # <<<<<<<<<<<<<<
+ * self.include_unchanged = False
+ * else:
+ */
+ __pyx_t_5 = (__pyx_v_include_unchanged == Py_None);
+ if (__pyx_t_5) {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1077
+ * self.last_target_parent = [None, None]
+ * if include_unchanged is None:
+ * self.include_unchanged = False # <<<<<<<<<<<<<<
+ * else:
+ * self.include_unchanged = int(include_unchanged)
+ */
+ ((struct __pyx_obj_6bzrlib_21_dirstate_helpers_pyx_ProcessEntryC *)__pyx_v_self)->include_unchanged = 0;
+ goto __pyx_L6;
+ }
+ /*else*/ {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1079
+ * self.include_unchanged = False
+ * else:
+ * self.include_unchanged = int(include_unchanged) # <<<<<<<<<<<<<<
+ * self.use_filesystem_for_exec = use_filesystem_for_exec
+ * self.utf8_decode = cache_utf8._utf8_decode
+ */
+ __pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1079; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_3);
+ __Pyx_INCREF(__pyx_v_include_unchanged);
+ PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_v_include_unchanged);
+ __Pyx_GIVEREF(__pyx_v_include_unchanged);
+ __pyx_t_1 = PyObject_Call(((PyObject *)((PyObject*)&PyInt_Type)), __pyx_t_3, NULL); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1079; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_1);
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+ __pyx_t_4 = __Pyx_PyInt_AsInt(__pyx_t_1); if (unlikely((__pyx_t_4 == (int)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1079; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+ ((struct __pyx_obj_6bzrlib_21_dirstate_helpers_pyx_ProcessEntryC *)__pyx_v_self)->include_unchanged = __pyx_t_4;
+ }
+ __pyx_L6:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1080
+ * else:
+ * self.include_unchanged = int(include_unchanged)
+ * self.use_filesystem_for_exec = use_filesystem_for_exec # <<<<<<<<<<<<<<
+ * self.utf8_decode = cache_utf8._utf8_decode
+ * # for all search_indexs in each path at or under each element of
+ */
+ __Pyx_INCREF(__pyx_v_use_filesystem_for_exec);
+ __Pyx_GIVEREF(__pyx_v_use_filesystem_for_exec);
+ __Pyx_GOTREF(((struct __pyx_obj_6bzrlib_21_dirstate_helpers_pyx_ProcessEntryC *)__pyx_v_self)->use_filesystem_for_exec);
+ __Pyx_DECREF(((struct __pyx_obj_6bzrlib_21_dirstate_helpers_pyx_ProcessEntryC *)__pyx_v_self)->use_filesystem_for_exec);
+ ((struct __pyx_obj_6bzrlib_21_dirstate_helpers_pyx_ProcessEntryC *)__pyx_v_self)->use_filesystem_for_exec = __pyx_v_use_filesystem_for_exec;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1081
+ * self.include_unchanged = int(include_unchanged)
+ * self.use_filesystem_for_exec = use_filesystem_for_exec
+ * self.utf8_decode = cache_utf8._utf8_decode # <<<<<<<<<<<<<<
+ * # for all search_indexs in each path at or under each element of
+ * # search_specific_files, if the detail is relocated: add the id, and
+ */
+ __pyx_t_1 = __Pyx_GetName(__pyx_m, __pyx_n_s__cache_utf8); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1081; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_1);
+ __pyx_t_3 = PyObject_GetAttr(__pyx_t_1, __pyx_n_s___utf8_decode); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1081; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_3);
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+ __Pyx_GIVEREF(__pyx_t_3);
+ __Pyx_GOTREF(((struct __pyx_obj_6bzrlib_21_dirstate_helpers_pyx_ProcessEntryC *)__pyx_v_self)->utf8_decode);
+ __Pyx_DECREF(((struct __pyx_obj_6bzrlib_21_dirstate_helpers_pyx_ProcessEntryC *)__pyx_v_self)->utf8_decode);
+ ((struct __pyx_obj_6bzrlib_21_dirstate_helpers_pyx_ProcessEntryC *)__pyx_v_self)->utf8_decode = __pyx_t_3;
+ __pyx_t_3 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1086
+ * # add the relocated path as one to search if its not searched already.
+ * # If the detail is not relocated, add the id.
+ * self.searched_specific_files = set() # <<<<<<<<<<<<<<
+ * # When we search exact paths without expanding downwards, we record
+ * # that here.
+ */
+ __pyx_t_3 = PySet_New(0); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1086; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(((PyObject *)__pyx_t_3));
+ __Pyx_GIVEREF(((PyObject *)__pyx_t_3));
+ __Pyx_GOTREF(((struct __pyx_obj_6bzrlib_21_dirstate_helpers_pyx_ProcessEntryC *)__pyx_v_self)->searched_specific_files);
+ __Pyx_DECREF(((struct __pyx_obj_6bzrlib_21_dirstate_helpers_pyx_ProcessEntryC *)__pyx_v_self)->searched_specific_files);
+ ((struct __pyx_obj_6bzrlib_21_dirstate_helpers_pyx_ProcessEntryC *)__pyx_v_self)->searched_specific_files = ((PyObject *)__pyx_t_3);
+ __pyx_t_3 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1089
+ * # When we search exact paths without expanding downwards, we record
+ * # that here.
+ * self.searched_exact_paths = set() # <<<<<<<<<<<<<<
+ * self.search_specific_files = search_specific_files
+ * # The parents up to the root of the paths we are searching.
+ */
+ __pyx_t_3 = PySet_New(0); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1089; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(((PyObject *)__pyx_t_3));
+ __Pyx_GIVEREF(((PyObject *)__pyx_t_3));
+ __Pyx_GOTREF(((struct __pyx_obj_6bzrlib_21_dirstate_helpers_pyx_ProcessEntryC *)__pyx_v_self)->searched_exact_paths);
+ __Pyx_DECREF(((struct __pyx_obj_6bzrlib_21_dirstate_helpers_pyx_ProcessEntryC *)__pyx_v_self)->searched_exact_paths);
+ ((struct __pyx_obj_6bzrlib_21_dirstate_helpers_pyx_ProcessEntryC *)__pyx_v_self)->searched_exact_paths = ((PyObject *)__pyx_t_3);
+ __pyx_t_3 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1090
+ * # that here.
+ * self.searched_exact_paths = set()
+ * self.search_specific_files = search_specific_files # <<<<<<<<<<<<<<
+ * # The parents up to the root of the paths we are searching.
+ * # After all normal paths are returned, these specific items are returned.
+ */
+ __Pyx_INCREF(__pyx_v_search_specific_files);
+ __Pyx_GIVEREF(__pyx_v_search_specific_files);
+ __Pyx_GOTREF(((struct __pyx_obj_6bzrlib_21_dirstate_helpers_pyx_ProcessEntryC *)__pyx_v_self)->search_specific_files);
+ __Pyx_DECREF(((struct __pyx_obj_6bzrlib_21_dirstate_helpers_pyx_ProcessEntryC *)__pyx_v_self)->search_specific_files);
+ ((struct __pyx_obj_6bzrlib_21_dirstate_helpers_pyx_ProcessEntryC *)__pyx_v_self)->search_specific_files = __pyx_v_search_specific_files;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1093
+ * # The parents up to the root of the paths we are searching.
+ * # After all normal paths are returned, these specific items are returned.
+ * self.search_specific_file_parents = set() # <<<<<<<<<<<<<<
+ * # The ids we've sent out in the delta.
+ * self.seen_ids = set()
+ */
+ __pyx_t_3 = PySet_New(0); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1093; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(((PyObject *)__pyx_t_3));
+ __Pyx_GIVEREF(((PyObject *)__pyx_t_3));
+ __Pyx_GOTREF(((struct __pyx_obj_6bzrlib_21_dirstate_helpers_pyx_ProcessEntryC *)__pyx_v_self)->search_specific_file_parents);
+ __Pyx_DECREF(((struct __pyx_obj_6bzrlib_21_dirstate_helpers_pyx_ProcessEntryC *)__pyx_v_self)->search_specific_file_parents);
+ ((struct __pyx_obj_6bzrlib_21_dirstate_helpers_pyx_ProcessEntryC *)__pyx_v_self)->search_specific_file_parents = ((PyObject *)__pyx_t_3);
+ __pyx_t_3 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1095
+ * self.search_specific_file_parents = set()
+ * # The ids we've sent out in the delta.
+ * self.seen_ids = set() # <<<<<<<<<<<<<<
+ * self.state = state
+ * self.current_root = None
+ */
+ __pyx_t_3 = PySet_New(0); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1095; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(((PyObject *)__pyx_t_3));
+ __Pyx_GIVEREF(((PyObject *)__pyx_t_3));
+ __Pyx_GOTREF(((struct __pyx_obj_6bzrlib_21_dirstate_helpers_pyx_ProcessEntryC *)__pyx_v_self)->seen_ids);
+ __Pyx_DECREF(((struct __pyx_obj_6bzrlib_21_dirstate_helpers_pyx_ProcessEntryC *)__pyx_v_self)->seen_ids);
+ ((struct __pyx_obj_6bzrlib_21_dirstate_helpers_pyx_ProcessEntryC *)__pyx_v_self)->seen_ids = ((PyObject *)__pyx_t_3);
+ __pyx_t_3 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1096
+ * # The ids we've sent out in the delta.
+ * self.seen_ids = set()
+ * self.state = state # <<<<<<<<<<<<<<
+ * self.current_root = None
+ * self.current_root_unicode = None
+ */
+ __Pyx_INCREF(__pyx_v_state);
+ __Pyx_GIVEREF(__pyx_v_state);
+ __Pyx_GOTREF(((struct __pyx_obj_6bzrlib_21_dirstate_helpers_pyx_ProcessEntryC *)__pyx_v_self)->state);
+ __Pyx_DECREF(((struct __pyx_obj_6bzrlib_21_dirstate_helpers_pyx_ProcessEntryC *)__pyx_v_self)->state);
+ ((struct __pyx_obj_6bzrlib_21_dirstate_helpers_pyx_ProcessEntryC *)__pyx_v_self)->state = __pyx_v_state;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1097
+ * self.seen_ids = set()
+ * self.state = state
+ * self.current_root = None # <<<<<<<<<<<<<<
+ * self.current_root_unicode = None
+ * self.root_entries = None
+ */
+ __Pyx_INCREF(Py_None);
+ __Pyx_GIVEREF(Py_None);
+ __Pyx_GOTREF(((struct __pyx_obj_6bzrlib_21_dirstate_helpers_pyx_ProcessEntryC *)__pyx_v_self)->current_root);
+ __Pyx_DECREF(((struct __pyx_obj_6bzrlib_21_dirstate_helpers_pyx_ProcessEntryC *)__pyx_v_self)->current_root);
+ ((struct __pyx_obj_6bzrlib_21_dirstate_helpers_pyx_ProcessEntryC *)__pyx_v_self)->current_root = Py_None;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1098
+ * self.state = state
+ * self.current_root = None
+ * self.current_root_unicode = None # <<<<<<<<<<<<<<
+ * self.root_entries = None
+ * self.root_entries_pos = 0
+ */
+ __Pyx_INCREF(Py_None);
+ __Pyx_GIVEREF(Py_None);
+ __Pyx_GOTREF(((struct __pyx_obj_6bzrlib_21_dirstate_helpers_pyx_ProcessEntryC *)__pyx_v_self)->current_root_unicode);
+ __Pyx_DECREF(((struct __pyx_obj_6bzrlib_21_dirstate_helpers_pyx_ProcessEntryC *)__pyx_v_self)->current_root_unicode);
+ ((struct __pyx_obj_6bzrlib_21_dirstate_helpers_pyx_ProcessEntryC *)__pyx_v_self)->current_root_unicode = Py_None;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1099
+ * self.current_root = None
+ * self.current_root_unicode = None
+ * self.root_entries = None # <<<<<<<<<<<<<<
+ * self.root_entries_pos = 0
+ * self.root_entries_len = 0
+ */
+ __Pyx_INCREF(Py_None);
+ __Pyx_GIVEREF(Py_None);
+ __Pyx_GOTREF(((struct __pyx_obj_6bzrlib_21_dirstate_helpers_pyx_ProcessEntryC *)__pyx_v_self)->root_entries);
+ __Pyx_DECREF(((struct __pyx_obj_6bzrlib_21_dirstate_helpers_pyx_ProcessEntryC *)__pyx_v_self)->root_entries);
+ ((struct __pyx_obj_6bzrlib_21_dirstate_helpers_pyx_ProcessEntryC *)__pyx_v_self)->root_entries = Py_None;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1100
+ * self.current_root_unicode = None
+ * self.root_entries = None
+ * self.root_entries_pos = 0 # <<<<<<<<<<<<<<
+ * self.root_entries_len = 0
+ * self.root_abspath = None
+ */
+ ((struct __pyx_obj_6bzrlib_21_dirstate_helpers_pyx_ProcessEntryC *)__pyx_v_self)->root_entries_pos = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1101
+ * self.root_entries = None
+ * self.root_entries_pos = 0
+ * self.root_entries_len = 0 # <<<<<<<<<<<<<<
+ * self.root_abspath = None
+ * if source_index is None:
+ */
+ ((struct __pyx_obj_6bzrlib_21_dirstate_helpers_pyx_ProcessEntryC *)__pyx_v_self)->root_entries_len = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1102
+ * self.root_entries_pos = 0
+ * self.root_entries_len = 0
+ * self.root_abspath = None # <<<<<<<<<<<<<<
+ * if source_index is None:
+ * self.source_index = -1
+ */
+ __Pyx_INCREF(Py_None);
+ __Pyx_GIVEREF(Py_None);
+ __Pyx_GOTREF(((struct __pyx_obj_6bzrlib_21_dirstate_helpers_pyx_ProcessEntryC *)__pyx_v_self)->root_abspath);
+ __Pyx_DECREF(((struct __pyx_obj_6bzrlib_21_dirstate_helpers_pyx_ProcessEntryC *)__pyx_v_self)->root_abspath);
+ ((struct __pyx_obj_6bzrlib_21_dirstate_helpers_pyx_ProcessEntryC *)__pyx_v_self)->root_abspath = Py_None;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1103
+ * self.root_entries_len = 0
+ * self.root_abspath = None
+ * if source_index is None: # <<<<<<<<<<<<<<
+ * self.source_index = -1
+ * else:
+ */
+ __pyx_t_5 = (__pyx_v_source_index == Py_None);
+ if (__pyx_t_5) {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1104
+ * self.root_abspath = None
+ * if source_index is None:
+ * self.source_index = -1 # <<<<<<<<<<<<<<
+ * else:
+ * self.source_index = source_index
+ */
+ ((struct __pyx_obj_6bzrlib_21_dirstate_helpers_pyx_ProcessEntryC *)__pyx_v_self)->source_index = -1;
+ goto __pyx_L7;
+ }
+ /*else*/ {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1106
+ * self.source_index = -1
+ * else:
+ * self.source_index = source_index # <<<<<<<<<<<<<<
+ * self.target_index = target_index
+ * self.want_unversioned = want_unversioned
+ */
+ __pyx_t_4 = __Pyx_PyInt_AsInt(__pyx_v_source_index); if (unlikely((__pyx_t_4 == (int)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1106; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ ((struct __pyx_obj_6bzrlib_21_dirstate_helpers_pyx_ProcessEntryC *)__pyx_v_self)->source_index = __pyx_t_4;
+ }
+ __pyx_L7:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1107
+ * else:
+ * self.source_index = source_index
+ * self.target_index = target_index # <<<<<<<<<<<<<<
+ * self.want_unversioned = want_unversioned
+ * self.tree = tree
+ */
+ __pyx_t_4 = __Pyx_PyInt_AsInt(__pyx_v_target_index); if (unlikely((__pyx_t_4 == (int)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1107; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ ((struct __pyx_obj_6bzrlib_21_dirstate_helpers_pyx_ProcessEntryC *)__pyx_v_self)->target_index = __pyx_t_4;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1108
+ * self.source_index = source_index
+ * self.target_index = target_index
+ * self.want_unversioned = want_unversioned # <<<<<<<<<<<<<<
+ * self.tree = tree
+ * self.dir_iterator = None
+ */
+ __pyx_t_4 = __Pyx_PyInt_AsInt(__pyx_v_want_unversioned); if (unlikely((__pyx_t_4 == (int)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1108; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ ((struct __pyx_obj_6bzrlib_21_dirstate_helpers_pyx_ProcessEntryC *)__pyx_v_self)->want_unversioned = __pyx_t_4;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1109
+ * self.target_index = target_index
+ * self.want_unversioned = want_unversioned
+ * self.tree = tree # <<<<<<<<<<<<<<
+ * self.dir_iterator = None
+ * self.block_index = -1
+ */
+ __Pyx_INCREF(__pyx_v_tree);
+ __Pyx_GIVEREF(__pyx_v_tree);
+ __Pyx_GOTREF(((struct __pyx_obj_6bzrlib_21_dirstate_helpers_pyx_ProcessEntryC *)__pyx_v_self)->tree);
+ __Pyx_DECREF(((struct __pyx_obj_6bzrlib_21_dirstate_helpers_pyx_ProcessEntryC *)__pyx_v_self)->tree);
+ ((struct __pyx_obj_6bzrlib_21_dirstate_helpers_pyx_ProcessEntryC *)__pyx_v_self)->tree = __pyx_v_tree;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1110
+ * self.want_unversioned = want_unversioned
+ * self.tree = tree
+ * self.dir_iterator = None # <<<<<<<<<<<<<<
+ * self.block_index = -1
+ * self.current_block = None
+ */
+ __Pyx_INCREF(Py_None);
+ __Pyx_GIVEREF(Py_None);
+ __Pyx_GOTREF(((struct __pyx_obj_6bzrlib_21_dirstate_helpers_pyx_ProcessEntryC *)__pyx_v_self)->dir_iterator);
+ __Pyx_DECREF(((struct __pyx_obj_6bzrlib_21_dirstate_helpers_pyx_ProcessEntryC *)__pyx_v_self)->dir_iterator);
+ ((struct __pyx_obj_6bzrlib_21_dirstate_helpers_pyx_ProcessEntryC *)__pyx_v_self)->dir_iterator = Py_None;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1111
+ * self.tree = tree
+ * self.dir_iterator = None
+ * self.block_index = -1 # <<<<<<<<<<<<<<
+ * self.current_block = None
+ * self.current_block_list = None
+ */
+ ((struct __pyx_obj_6bzrlib_21_dirstate_helpers_pyx_ProcessEntryC *)__pyx_v_self)->block_index = -1;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1112
+ * self.dir_iterator = None
+ * self.block_index = -1
+ * self.current_block = None # <<<<<<<<<<<<<<
+ * self.current_block_list = None
+ * self.current_block_pos = -1
+ */
+ __Pyx_INCREF(Py_None);
+ __Pyx_GIVEREF(Py_None);
+ __Pyx_GOTREF(((struct __pyx_obj_6bzrlib_21_dirstate_helpers_pyx_ProcessEntryC *)__pyx_v_self)->current_block);
+ __Pyx_DECREF(((struct __pyx_obj_6bzrlib_21_dirstate_helpers_pyx_ProcessEntryC *)__pyx_v_self)->current_block);
+ ((struct __pyx_obj_6bzrlib_21_dirstate_helpers_pyx_ProcessEntryC *)__pyx_v_self)->current_block = Py_None;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1113
+ * self.block_index = -1
+ * self.current_block = None
+ * self.current_block_list = None # <<<<<<<<<<<<<<
+ * self.current_block_pos = -1
+ * self.current_dir_info = None
+ */
+ __Pyx_INCREF(Py_None);
+ __Pyx_GIVEREF(Py_None);
+ __Pyx_GOTREF(((struct __pyx_obj_6bzrlib_21_dirstate_helpers_pyx_ProcessEntryC *)__pyx_v_self)->current_block_list);
+ __Pyx_DECREF(((struct __pyx_obj_6bzrlib_21_dirstate_helpers_pyx_ProcessEntryC *)__pyx_v_self)->current_block_list);
+ ((struct __pyx_obj_6bzrlib_21_dirstate_helpers_pyx_ProcessEntryC *)__pyx_v_self)->current_block_list = Py_None;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1114
+ * self.current_block = None
+ * self.current_block_list = None
+ * self.current_block_pos = -1 # <<<<<<<<<<<<<<
+ * self.current_dir_info = None
+ * self.current_dir_list = None
+ */
+ ((struct __pyx_obj_6bzrlib_21_dirstate_helpers_pyx_ProcessEntryC *)__pyx_v_self)->current_block_pos = -1;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1115
+ * self.current_block_list = None
+ * self.current_block_pos = -1
+ * self.current_dir_info = None # <<<<<<<<<<<<<<
+ * self.current_dir_list = None
+ * self._pending_consistent_entries = []
+ */
+ __Pyx_INCREF(Py_None);
+ __Pyx_GIVEREF(Py_None);
+ __Pyx_GOTREF(((struct __pyx_obj_6bzrlib_21_dirstate_helpers_pyx_ProcessEntryC *)__pyx_v_self)->current_dir_info);
+ __Pyx_DECREF(((struct __pyx_obj_6bzrlib_21_dirstate_helpers_pyx_ProcessEntryC *)__pyx_v_self)->current_dir_info);
+ ((struct __pyx_obj_6bzrlib_21_dirstate_helpers_pyx_ProcessEntryC *)__pyx_v_self)->current_dir_info = Py_None;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1116
+ * self.current_block_pos = -1
+ * self.current_dir_info = None
+ * self.current_dir_list = None # <<<<<<<<<<<<<<
+ * self._pending_consistent_entries = []
+ * self.path_index = 0
+ */
+ __Pyx_INCREF(Py_None);
+ __Pyx_GIVEREF(Py_None);
+ __Pyx_GOTREF(((struct __pyx_obj_6bzrlib_21_dirstate_helpers_pyx_ProcessEntryC *)__pyx_v_self)->current_dir_list);
+ __Pyx_DECREF(((struct __pyx_obj_6bzrlib_21_dirstate_helpers_pyx_ProcessEntryC *)__pyx_v_self)->current_dir_list);
+ ((struct __pyx_obj_6bzrlib_21_dirstate_helpers_pyx_ProcessEntryC *)__pyx_v_self)->current_dir_list = Py_None;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1117
+ * self.current_dir_info = None
+ * self.current_dir_list = None
+ * self._pending_consistent_entries = [] # <<<<<<<<<<<<<<
+ * self.path_index = 0
+ * self.root_dir_info = None
+ */
+ __pyx_t_3 = PyList_New(0); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1117; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(((PyObject *)__pyx_t_3));
+ __Pyx_GIVEREF(((PyObject *)__pyx_t_3));
+ __Pyx_GOTREF(((struct __pyx_obj_6bzrlib_21_dirstate_helpers_pyx_ProcessEntryC *)__pyx_v_self)->_pending_consistent_entries);
+ __Pyx_DECREF(((struct __pyx_obj_6bzrlib_21_dirstate_helpers_pyx_ProcessEntryC *)__pyx_v_self)->_pending_consistent_entries);
+ ((struct __pyx_obj_6bzrlib_21_dirstate_helpers_pyx_ProcessEntryC *)__pyx_v_self)->_pending_consistent_entries = ((PyObject *)__pyx_t_3);
+ __pyx_t_3 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1118
+ * self.current_dir_list = None
+ * self._pending_consistent_entries = []
+ * self.path_index = 0 # <<<<<<<<<<<<<<
+ * self.root_dir_info = None
+ * self.bisect_left = bisect.bisect_left
+ */
+ ((struct __pyx_obj_6bzrlib_21_dirstate_helpers_pyx_ProcessEntryC *)__pyx_v_self)->path_index = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1119
+ * self._pending_consistent_entries = []
+ * self.path_index = 0
+ * self.root_dir_info = None # <<<<<<<<<<<<<<
+ * self.bisect_left = bisect.bisect_left
+ * self.pathjoin = osutils.pathjoin
+ */
+ __Pyx_INCREF(Py_None);
+ __Pyx_GIVEREF(Py_None);
+ __Pyx_GOTREF(((struct __pyx_obj_6bzrlib_21_dirstate_helpers_pyx_ProcessEntryC *)__pyx_v_self)->root_dir_info);
+ __Pyx_DECREF(((struct __pyx_obj_6bzrlib_21_dirstate_helpers_pyx_ProcessEntryC *)__pyx_v_self)->root_dir_info);
+ ((struct __pyx_obj_6bzrlib_21_dirstate_helpers_pyx_ProcessEntryC *)__pyx_v_self)->root_dir_info = Py_None;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1120
+ * self.path_index = 0
+ * self.root_dir_info = None
+ * self.bisect_left = bisect.bisect_left # <<<<<<<<<<<<<<
+ * self.pathjoin = osutils.pathjoin
+ * self.fstat = os.fstat
+ */
+ __pyx_t_3 = __Pyx_GetName(__pyx_m, __pyx_n_s__bisect); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1120; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_3);
+ __pyx_t_1 = PyObject_GetAttr(__pyx_t_3, __pyx_n_s__bisect_left); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1120; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_1);
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+ __Pyx_GIVEREF(__pyx_t_1);
+ __Pyx_GOTREF(((struct __pyx_obj_6bzrlib_21_dirstate_helpers_pyx_ProcessEntryC *)__pyx_v_self)->bisect_left);
+ __Pyx_DECREF(((struct __pyx_obj_6bzrlib_21_dirstate_helpers_pyx_ProcessEntryC *)__pyx_v_self)->bisect_left);
+ ((struct __pyx_obj_6bzrlib_21_dirstate_helpers_pyx_ProcessEntryC *)__pyx_v_self)->bisect_left = __pyx_t_1;
+ __pyx_t_1 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1121
+ * self.root_dir_info = None
+ * self.bisect_left = bisect.bisect_left
+ * self.pathjoin = osutils.pathjoin # <<<<<<<<<<<<<<
+ * self.fstat = os.fstat
+ * self.sha_file = osutils.sha_file
+ */
+ __pyx_t_1 = __Pyx_GetName(__pyx_m, __pyx_n_s__osutils); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1121; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_1);
+ __pyx_t_3 = PyObject_GetAttr(__pyx_t_1, __pyx_n_s__pathjoin); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1121; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_3);
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+ __Pyx_GIVEREF(__pyx_t_3);
+ __Pyx_GOTREF(((struct __pyx_obj_6bzrlib_21_dirstate_helpers_pyx_ProcessEntryC *)__pyx_v_self)->pathjoin);
+ __Pyx_DECREF(((struct __pyx_obj_6bzrlib_21_dirstate_helpers_pyx_ProcessEntryC *)__pyx_v_self)->pathjoin);
+ ((struct __pyx_obj_6bzrlib_21_dirstate_helpers_pyx_ProcessEntryC *)__pyx_v_self)->pathjoin = __pyx_t_3;
+ __pyx_t_3 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1122
+ * self.bisect_left = bisect.bisect_left
+ * self.pathjoin = osutils.pathjoin
+ * self.fstat = os.fstat # <<<<<<<<<<<<<<
+ * self.sha_file = osutils.sha_file
+ * if target_index != 0:
+ */
+ __pyx_t_3 = __Pyx_GetName(__pyx_m, __pyx_n_s__os); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1122; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_3);
+ __pyx_t_1 = PyObject_GetAttr(__pyx_t_3, __pyx_n_s__fstat); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1122; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_1);
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+ __Pyx_GIVEREF(__pyx_t_1);
+ __Pyx_GOTREF(((struct __pyx_obj_6bzrlib_21_dirstate_helpers_pyx_ProcessEntryC *)__pyx_v_self)->fstat);
+ __Pyx_DECREF(((struct __pyx_obj_6bzrlib_21_dirstate_helpers_pyx_ProcessEntryC *)__pyx_v_self)->fstat);
+ ((struct __pyx_obj_6bzrlib_21_dirstate_helpers_pyx_ProcessEntryC *)__pyx_v_self)->fstat = __pyx_t_1;
+ __pyx_t_1 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1123
+ * self.pathjoin = osutils.pathjoin
+ * self.fstat = os.fstat
+ * self.sha_file = osutils.sha_file # <<<<<<<<<<<<<<
+ * if target_index != 0:
+ * # A lot of code in here depends on target_index == 0
+ */
+ __pyx_t_1 = __Pyx_GetName(__pyx_m, __pyx_n_s__osutils); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1123; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_1);
+ __pyx_t_3 = PyObject_GetAttr(__pyx_t_1, __pyx_n_s__sha_file); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1123; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_3);
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+ __Pyx_GIVEREF(__pyx_t_3);
+ __Pyx_GOTREF(((struct __pyx_obj_6bzrlib_21_dirstate_helpers_pyx_ProcessEntryC *)__pyx_v_self)->sha_file);
+ __Pyx_DECREF(((struct __pyx_obj_6bzrlib_21_dirstate_helpers_pyx_ProcessEntryC *)__pyx_v_self)->sha_file);
+ ((struct __pyx_obj_6bzrlib_21_dirstate_helpers_pyx_ProcessEntryC *)__pyx_v_self)->sha_file = __pyx_t_3;
+ __pyx_t_3 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1124
+ * self.fstat = os.fstat
+ * self.sha_file = osutils.sha_file
+ * if target_index != 0: # <<<<<<<<<<<<<<
+ * # A lot of code in here depends on target_index == 0
+ * raise errors.BzrError('unsupported target index')
+ */
+ __pyx_t_3 = PyObject_RichCompare(__pyx_v_target_index, __pyx_int_0, Py_NE); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1124; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_3);
+ __pyx_t_5 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_5 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1124; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+ if (__pyx_t_5) {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1126
+ * if target_index != 0:
+ * # A lot of code in here depends on target_index == 0
+ * raise errors.BzrError('unsupported target index') # <<<<<<<<<<<<<<
+ *
+ * cdef _process_entry(self, entry, path_info):
+ */
+ __pyx_t_3 = __Pyx_GetName(__pyx_m, __pyx_n_s__errors); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1126; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_3);
+ __pyx_t_1 = PyObject_GetAttr(__pyx_t_3, __pyx_n_s__BzrError); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1126; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_1);
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+ __pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1126; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_3);
+ __Pyx_INCREF(((PyObject *)__pyx_kp_s_30));
+ PyTuple_SET_ITEM(__pyx_t_3, 0, ((PyObject *)__pyx_kp_s_30));
+ __Pyx_GIVEREF(((PyObject *)__pyx_kp_s_30));
+ __pyx_t_2 = PyObject_Call(__pyx_t_1, __pyx_t_3, NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1126; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_2);
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+ __Pyx_Raise(__pyx_t_2, 0, 0);
+ __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1126; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ goto __pyx_L8;
+ }
+ __pyx_L8:;
+
+ __pyx_r = 0;
+ goto __pyx_L0;
+ __pyx_L1_error:;
+ __Pyx_XDECREF(__pyx_t_1);
+ __Pyx_XDECREF(__pyx_t_2);
+ __Pyx_XDECREF(__pyx_t_3);
+ __Pyx_AddTraceback("bzrlib._dirstate_helpers_pyx.ProcessEntryC.__init__");
+ __pyx_r = -1;
+ __pyx_L0:;
+ __Pyx_RefNannyFinishContext();
+ return __pyx_r;
+}
+
+/* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1128
+ * raise errors.BzrError('unsupported target index')
+ *
+ * cdef _process_entry(self, entry, path_info): # <<<<<<<<<<<<<<
+ * """Compare an entry and real disk to generate delta information.
+ *
+ */
+
+static PyObject *__pyx_f_6bzrlib_21_dirstate_helpers_pyx_13ProcessEntryC__process_entry(struct __pyx_obj_6bzrlib_21_dirstate_helpers_pyx_ProcessEntryC *__pyx_v_self, PyObject *__pyx_v_entry, PyObject *__pyx_v_path_info) {
+ char __pyx_v_target_minikind;
+ char __pyx_v_source_minikind;
+ PyObject *__pyx_v_file_id;
+ int __pyx_v_content_change;
+ PyObject *__pyx_v_details_list;
+ PyObject *__pyx_v_source_details;
+ PyObject *__pyx_v_target_details;
+ PyObject *__pyx_v_link_or_sha1;
+ PyObject *__pyx_v_old_dirname;
+ PyObject *__pyx_v_old_basename;
+ PyObject *__pyx_v_old_path;
+ PyObject *__pyx_v_path;
+ PyObject *__pyx_v_old_entry;
+ PyObject *__pyx_v_target_kind;
+ PyObject *__pyx_v_target_exec;
+ PyObject *__pyx_v_statvalue;
+ PyObject *__pyx_v_source_parent_id;
+ PyObject *__pyx_v__;
+ PyObject *__pyx_v_source_parent_entry;
+ PyObject *__pyx_v_new_dirname;
+ PyObject *__pyx_v_target_parent_id;
+ PyObject *__pyx_v_target_parent_entry;
+ PyObject *__pyx_v_source_exec;
+ PyObject *__pyx_v_changed;
+ PyObject *__pyx_v_old_path_u;
+ PyObject *__pyx_v_path_u;
+ PyObject *__pyx_v_source_kind;
+ PyObject *__pyx_v_parent_entry;
+ PyObject *__pyx_v_parent_id;
+ PyObject *__pyx_r = NULL;
+ PyObject *__pyx_t_1 = NULL;
+ int __pyx_t_2;
+ PyObject *__pyx_t_3 = NULL;
+ char __pyx_t_4;
+ int __pyx_t_5;
+ PyObject *__pyx_t_6 = NULL;
+ int __pyx_t_7;
+ PyObject *__pyx_t_8 = NULL;
+ PyObject *__pyx_t_9 = NULL;
+ int __pyx_t_10;
+ PyObject *__pyx_t_11 = NULL;
+ PyObject *__pyx_t_12 = NULL;
+ PyObject *__pyx_t_13 = NULL;
+ PyObject *__pyx_t_14 = NULL;
+ __Pyx_RefNannySetupContext("_process_entry");
+ __pyx_v_file_id = Py_None; __Pyx_INCREF(Py_None);
+ __pyx_v_details_list = Py_None; __Pyx_INCREF(Py_None);
+ __pyx_v_source_details = Py_None; __Pyx_INCREF(Py_None);
+ __pyx_v_target_details = Py_None; __Pyx_INCREF(Py_None);
+ __pyx_v_link_or_sha1 = Py_None; __Pyx_INCREF(Py_None);
+ __pyx_v_old_dirname = Py_None; __Pyx_INCREF(Py_None);
+ __pyx_v_old_basename = Py_None; __Pyx_INCREF(Py_None);
+ __pyx_v_old_path = Py_None; __Pyx_INCREF(Py_None);
+ __pyx_v_path = Py_None; __Pyx_INCREF(Py_None);
+ __pyx_v_old_entry = Py_None; __Pyx_INCREF(Py_None);
+ __pyx_v_target_kind = Py_None; __Pyx_INCREF(Py_None);
+ __pyx_v_target_exec = Py_None; __Pyx_INCREF(Py_None);
+ __pyx_v_statvalue = Py_None; __Pyx_INCREF(Py_None);
+ __pyx_v_source_parent_id = Py_None; __Pyx_INCREF(Py_None);
+ __pyx_v__ = Py_None; __Pyx_INCREF(Py_None);
+ __pyx_v_source_parent_entry = Py_None; __Pyx_INCREF(Py_None);
+ __pyx_v_new_dirname = Py_None; __Pyx_INCREF(Py_None);
+ __pyx_v_target_parent_id = Py_None; __Pyx_INCREF(Py_None);
+ __pyx_v_target_parent_entry = Py_None; __Pyx_INCREF(Py_None);
+ __pyx_v_source_exec = Py_None; __Pyx_INCREF(Py_None);
+ __pyx_v_changed = Py_None; __Pyx_INCREF(Py_None);
+ __pyx_v_old_path_u = Py_None; __Pyx_INCREF(Py_None);
+ __pyx_v_path_u = Py_None; __Pyx_INCREF(Py_None);
+ __pyx_v_source_kind = Py_None; __Pyx_INCREF(Py_None);
+ __pyx_v_parent_entry = Py_None; __Pyx_INCREF(Py_None);
+ __pyx_v_parent_id = Py_None; __Pyx_INCREF(Py_None);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1150
+ * cdef int content_change
+ * cdef object details_list
+ * file_id = None # <<<<<<<<<<<<<<
+ * details_list = entry[1]
+ * if -1 == self.source_index:
+ */
+ __Pyx_INCREF(Py_None);
+ __Pyx_DECREF(__pyx_v_file_id);
+ __pyx_v_file_id = Py_None;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1151
+ * cdef object details_list
+ * file_id = None
+ * details_list = entry[1] # <<<<<<<<<<<<<<
+ * if -1 == self.source_index:
+ * source_details = DirState.NULL_PARENT_DETAILS
+ */
+ __pyx_t_1 = __Pyx_GetItemInt(__pyx_v_entry, 1, sizeof(long), PyInt_FromLong); if (!__pyx_t_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1151; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_1);
+ __Pyx_DECREF(__pyx_v_details_list);
+ __pyx_v_details_list = __pyx_t_1;
+ __pyx_t_1 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1152
+ * file_id = None
+ * details_list = entry[1]
+ * if -1 == self.source_index: # <<<<<<<<<<<<<<
+ * source_details = DirState.NULL_PARENT_DETAILS
+ * else:
+ */
+ __pyx_t_2 = (-1 == __pyx_v_self->source_index);
+ if (__pyx_t_2) {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1153
+ * details_list = entry[1]
+ * if -1 == self.source_index:
+ * source_details = DirState.NULL_PARENT_DETAILS # <<<<<<<<<<<<<<
+ * else:
+ * source_details = details_list[self.source_index]
+ */
+ __pyx_t_1 = __Pyx_GetName(__pyx_m, __pyx_n_s__DirState); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1153; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_1);
+ __pyx_t_3 = PyObject_GetAttr(__pyx_t_1, __pyx_n_s__NULL_PARENT_DETAILS); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1153; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_3);
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+ __Pyx_DECREF(__pyx_v_source_details);
+ __pyx_v_source_details = __pyx_t_3;
+ __pyx_t_3 = 0;
+ goto __pyx_L3;
+ }
+ /*else*/ {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1155
+ * source_details = DirState.NULL_PARENT_DETAILS
+ * else:
+ * source_details = details_list[self.source_index] # <<<<<<<<<<<<<<
+ * target_details = details_list[self.target_index]
+ * target_minikind = _minikind_from_string(target_details[0])
+ */
+ __pyx_t_3 = __Pyx_GetItemInt(__pyx_v_details_list, __pyx_v_self->source_index, sizeof(int), PyInt_FromLong); if (!__pyx_t_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1155; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_3);
+ __Pyx_DECREF(__pyx_v_source_details);
+ __pyx_v_source_details = __pyx_t_3;
+ __pyx_t_3 = 0;
+ }
+ __pyx_L3:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1156
+ * else:
+ * source_details = details_list[self.source_index]
+ * target_details = details_list[self.target_index] # <<<<<<<<<<<<<<
+ * target_minikind = _minikind_from_string(target_details[0])
+ * if path_info is not None and _versioned_minikind(target_minikind):
+ */
+ __pyx_t_3 = __Pyx_GetItemInt(__pyx_v_details_list, __pyx_v_self->target_index, sizeof(int), PyInt_FromLong); if (!__pyx_t_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1156; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_3);
+ __Pyx_DECREF(__pyx_v_target_details);
+ __pyx_v_target_details = __pyx_t_3;
+ __pyx_t_3 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1157
+ * source_details = details_list[self.source_index]
+ * target_details = details_list[self.target_index]
+ * target_minikind = _minikind_from_string(target_details[0]) # <<<<<<<<<<<<<<
+ * if path_info is not None and _versioned_minikind(target_minikind):
+ * if self.target_index != 0:
+ */
+ __pyx_t_3 = __Pyx_GetItemInt(__pyx_v_target_details, 0, sizeof(long), PyInt_FromLong); if (!__pyx_t_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1157; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_3);
+ __pyx_t_4 = __pyx_f_6bzrlib_21_dirstate_helpers_pyx__minikind_from_string(__pyx_t_3); if (unlikely(__pyx_t_4 == -1 && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1157; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+ __pyx_v_target_minikind = __pyx_t_4;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1158
+ * target_details = details_list[self.target_index]
+ * target_minikind = _minikind_from_string(target_details[0])
+ * if path_info is not None and _versioned_minikind(target_minikind): # <<<<<<<<<<<<<<
+ * if self.target_index != 0:
+ * raise AssertionError("Unsupported target index %d" %
+ */
+ __pyx_t_2 = (__pyx_v_path_info != Py_None);
+ if (__pyx_t_2) {
+ __pyx_t_5 = __pyx_f_6bzrlib_21_dirstate_helpers_pyx__versioned_minikind(__pyx_v_target_minikind);
+ } else {
+ __pyx_t_5 = __pyx_t_2;
+ }
+ if (__pyx_t_5) {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1159
+ * target_minikind = _minikind_from_string(target_details[0])
+ * if path_info is not None and _versioned_minikind(target_minikind):
+ * if self.target_index != 0: # <<<<<<<<<<<<<<
+ * raise AssertionError("Unsupported target index %d" %
+ * self.target_index)
+ */
+ __pyx_t_5 = (__pyx_v_self->target_index != 0);
+ if (__pyx_t_5) {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1161
+ * if self.target_index != 0:
+ * raise AssertionError("Unsupported target index %d" %
+ * self.target_index) # <<<<<<<<<<<<<<
+ * link_or_sha1 = _update_entry(self.state, entry, path_info[4], path_info[3])
+ * # The entry may have been modified by update_entry
+ */
+ __pyx_t_3 = PyInt_FromLong(__pyx_v_self->target_index); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1161; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_3);
+ __pyx_t_1 = PyNumber_Remainder(((PyObject *)__pyx_kp_s_31), __pyx_t_3); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1160; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(((PyObject *)__pyx_t_1));
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+ __pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1160; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_3);
+ PyTuple_SET_ITEM(__pyx_t_3, 0, ((PyObject *)__pyx_t_1));
+ __Pyx_GIVEREF(((PyObject *)__pyx_t_1));
+ __pyx_t_1 = 0;
+ __pyx_t_1 = PyObject_Call(__pyx_builtin_AssertionError, __pyx_t_3, NULL); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1160; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_1);
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+ __Pyx_Raise(__pyx_t_1, 0, 0);
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1160; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ goto __pyx_L5;
+ }
+ __pyx_L5:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1162
+ * raise AssertionError("Unsupported target index %d" %
+ * self.target_index)
+ * link_or_sha1 = _update_entry(self.state, entry, path_info[4], path_info[3]) # <<<<<<<<<<<<<<
+ * # The entry may have been modified by update_entry
+ * target_details = details_list[self.target_index]
+ */
+ __pyx_t_1 = __Pyx_GetItemInt(__pyx_v_path_info, 4, sizeof(long), PyInt_FromLong); if (!__pyx_t_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1162; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_1);
+ __pyx_t_3 = __Pyx_GetItemInt(__pyx_v_path_info, 3, sizeof(long), PyInt_FromLong); if (!__pyx_t_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1162; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_3);
+ __pyx_t_6 = __pyx_f_6bzrlib_21_dirstate_helpers_pyx__update_entry(__pyx_v_self->state, __pyx_v_entry, __pyx_t_1, __pyx_t_3); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1162; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_6);
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+ __Pyx_DECREF(__pyx_v_link_or_sha1);
+ __pyx_v_link_or_sha1 = __pyx_t_6;
+ __pyx_t_6 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1164
+ * link_or_sha1 = _update_entry(self.state, entry, path_info[4], path_info[3])
+ * # The entry may have been modified by update_entry
+ * target_details = details_list[self.target_index] # <<<<<<<<<<<<<<
+ * target_minikind = _minikind_from_string(target_details[0])
+ * else:
+ */
+ __pyx_t_6 = __Pyx_GetItemInt(__pyx_v_details_list, __pyx_v_self->target_index, sizeof(int), PyInt_FromLong); if (!__pyx_t_6) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1164; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_6);
+ __Pyx_DECREF(__pyx_v_target_details);
+ __pyx_v_target_details = __pyx_t_6;
+ __pyx_t_6 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1165
+ * # The entry may have been modified by update_entry
+ * target_details = details_list[self.target_index]
+ * target_minikind = _minikind_from_string(target_details[0]) # <<<<<<<<<<<<<<
+ * else:
+ * link_or_sha1 = None
+ */
+ __pyx_t_6 = __Pyx_GetItemInt(__pyx_v_target_details, 0, sizeof(long), PyInt_FromLong); if (!__pyx_t_6) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1165; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_6);
+ __pyx_t_4 = __pyx_f_6bzrlib_21_dirstate_helpers_pyx__minikind_from_string(__pyx_t_6); if (unlikely(__pyx_t_4 == -1 && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1165; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
+ __pyx_v_target_minikind = __pyx_t_4;
+ goto __pyx_L4;
+ }
+ /*else*/ {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1167
+ * target_minikind = _minikind_from_string(target_details[0])
+ * else:
+ * link_or_sha1 = None # <<<<<<<<<<<<<<
+ * # the rest of this function is 0.3 seconds on 50K paths, or
+ * # 0.000006 seconds per call.
+ */
+ __Pyx_INCREF(Py_None);
+ __Pyx_DECREF(__pyx_v_link_or_sha1);
+ __pyx_v_link_or_sha1 = Py_None;
+ }
+ __pyx_L4:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1170
+ * # the rest of this function is 0.3 seconds on 50K paths, or
+ * # 0.000006 seconds per call.
+ * source_minikind = _minikind_from_string(source_details[0]) # <<<<<<<<<<<<<<
+ * if ((_versioned_minikind(source_minikind) or source_minikind == c'r')
+ * and _versioned_minikind(target_minikind)):
+ */
+ __pyx_t_6 = __Pyx_GetItemInt(__pyx_v_source_details, 0, sizeof(long), PyInt_FromLong); if (!__pyx_t_6) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1170; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_6);
+ __pyx_t_4 = __pyx_f_6bzrlib_21_dirstate_helpers_pyx__minikind_from_string(__pyx_t_6); if (unlikely(__pyx_t_4 == -1 && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1170; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
+ __pyx_v_source_minikind = __pyx_t_4;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1171
+ * # 0.000006 seconds per call.
+ * source_minikind = _minikind_from_string(source_details[0])
+ * if ((_versioned_minikind(source_minikind) or source_minikind == c'r') # <<<<<<<<<<<<<<
+ * and _versioned_minikind(target_minikind)):
+ * # claimed content in both: diff
+ */
+ if (!__pyx_f_6bzrlib_21_dirstate_helpers_pyx__versioned_minikind(__pyx_v_source_minikind)) {
+ __pyx_t_5 = (__pyx_v_source_minikind == 'r');
+ __pyx_t_2 = __pyx_t_5;
+ } else {
+ __pyx_t_2 = __pyx_f_6bzrlib_21_dirstate_helpers_pyx__versioned_minikind(__pyx_v_source_minikind);
+ }
+ if (__pyx_t_2) {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1172
+ * source_minikind = _minikind_from_string(source_details[0])
+ * if ((_versioned_minikind(source_minikind) or source_minikind == c'r')
+ * and _versioned_minikind(target_minikind)): # <<<<<<<<<<<<<<
+ * # claimed content in both: diff
+ * # r | fdlt | | add source to search, add id path move and perform
+ */
+ __pyx_t_5 = __pyx_f_6bzrlib_21_dirstate_helpers_pyx__versioned_minikind(__pyx_v_target_minikind);
+ } else {
+ __pyx_t_5 = __pyx_t_2;
+ }
+ if (__pyx_t_5) {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1178
+ * # r | fdlt | a | dangling file that was present in the basis.
+ * # | | | ???
+ * if source_minikind != c'r': # <<<<<<<<<<<<<<
+ * old_dirname = entry[0][0]
+ * old_basename = entry[0][1]
+ */
+ __pyx_t_5 = (__pyx_v_source_minikind != 'r');
+ if (__pyx_t_5) {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1179
+ * # | | | ???
+ * if source_minikind != c'r':
+ * old_dirname = entry[0][0] # <<<<<<<<<<<<<<
+ * old_basename = entry[0][1]
+ * old_path = path = None
+ */
+ __pyx_t_6 = __Pyx_GetItemInt(__pyx_v_entry, 0, sizeof(long), PyInt_FromLong); if (!__pyx_t_6) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1179; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_6);
+ __pyx_t_3 = __Pyx_GetItemInt(__pyx_t_6, 0, sizeof(long), PyInt_FromLong); if (!__pyx_t_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1179; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_3);
+ __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
+ __Pyx_DECREF(__pyx_v_old_dirname);
+ __pyx_v_old_dirname = __pyx_t_3;
+ __pyx_t_3 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1180
+ * if source_minikind != c'r':
+ * old_dirname = entry[0][0]
+ * old_basename = entry[0][1] # <<<<<<<<<<<<<<
+ * old_path = path = None
+ * else:
+ */
+ __pyx_t_3 = __Pyx_GetItemInt(__pyx_v_entry, 0, sizeof(long), PyInt_FromLong); if (!__pyx_t_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1180; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_3);
+ __pyx_t_6 = __Pyx_GetItemInt(__pyx_t_3, 1, sizeof(long), PyInt_FromLong); if (!__pyx_t_6) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1180; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_6);
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+ __Pyx_DECREF(__pyx_v_old_basename);
+ __pyx_v_old_basename = __pyx_t_6;
+ __pyx_t_6 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1181
+ * old_dirname = entry[0][0]
+ * old_basename = entry[0][1]
+ * old_path = path = None # <<<<<<<<<<<<<<
+ * else:
+ * # add the source to the search path to find any children it
+ */
+ __Pyx_INCREF(Py_None);
+ __Pyx_DECREF(__pyx_v_old_path);
+ __pyx_v_old_path = Py_None;
+ __Pyx_INCREF(Py_None);
+ __Pyx_DECREF(__pyx_v_path);
+ __pyx_v_path = Py_None;
+ goto __pyx_L7;
+ }
+ /*else*/ {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1185
+ * # add the source to the search path to find any children it
+ * # has. TODO ? : only add if it is a container ?
+ * if (not self.doing_consistency_expansion and # <<<<<<<<<<<<<<
+ * not osutils.is_inside_any(self.searched_specific_files,
+ * source_details[1])):
+ */
+ __pyx_t_5 = (!__pyx_v_self->doing_consistency_expansion);
+ if (__pyx_t_5) {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1186
+ * # has. TODO ? : only add if it is a container ?
+ * if (not self.doing_consistency_expansion and
+ * not osutils.is_inside_any(self.searched_specific_files, # <<<<<<<<<<<<<<
+ * source_details[1])):
+ * self.search_specific_files.add(source_details[1])
+ */
+ __pyx_t_6 = __Pyx_GetName(__pyx_m, __pyx_n_s__osutils); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1186; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_6);
+ __pyx_t_3 = PyObject_GetAttr(__pyx_t_6, __pyx_n_s__is_inside_any); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1186; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_3);
+ __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1187
+ * if (not self.doing_consistency_expansion and
+ * not osutils.is_inside_any(self.searched_specific_files,
+ * source_details[1])): # <<<<<<<<<<<<<<
+ * self.search_specific_files.add(source_details[1])
+ * # expanding from a user requested path, parent expansion
+ */
+ __pyx_t_6 = __Pyx_GetItemInt(__pyx_v_source_details, 1, sizeof(long), PyInt_FromLong); if (!__pyx_t_6) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1187; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_6);
+ __pyx_t_1 = PyTuple_New(2); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1186; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_1);
+ __Pyx_INCREF(__pyx_v_self->searched_specific_files);
+ PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_v_self->searched_specific_files);
+ __Pyx_GIVEREF(__pyx_v_self->searched_specific_files);
+ PyTuple_SET_ITEM(__pyx_t_1, 1, __pyx_t_6);
+ __Pyx_GIVEREF(__pyx_t_6);
+ __pyx_t_6 = 0;
+ __pyx_t_6 = PyObject_Call(__pyx_t_3, __pyx_t_1, NULL); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1186; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_6);
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+ __pyx_t_2 = __Pyx_PyObject_IsTrue(__pyx_t_6); if (unlikely(__pyx_t_2 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1186; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
+ __pyx_t_7 = (!__pyx_t_2);
+ __pyx_t_2 = __pyx_t_7;
+ } else {
+ __pyx_t_2 = __pyx_t_5;
+ }
+ if (__pyx_t_2) {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1188
+ * not osutils.is_inside_any(self.searched_specific_files,
+ * source_details[1])):
+ * self.search_specific_files.add(source_details[1]) # <<<<<<<<<<<<<<
+ * # expanding from a user requested path, parent expansion
+ * # for delta consistency happens later.
+ */
+ __pyx_t_6 = PyObject_GetAttr(__pyx_v_self->search_specific_files, __pyx_n_s__add); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1188; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_6);
+ __pyx_t_1 = __Pyx_GetItemInt(__pyx_v_source_details, 1, sizeof(long), PyInt_FromLong); if (!__pyx_t_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1188; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_1);
+ __pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1188; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_3);
+ PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_1);
+ __Pyx_GIVEREF(__pyx_t_1);
+ __pyx_t_1 = 0;
+ __pyx_t_1 = PyObject_Call(__pyx_t_6, __pyx_t_3, NULL); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1188; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_1);
+ __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+ goto __pyx_L8;
+ }
+ __pyx_L8:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1193
+ * # generate the old path; this is needed for stating later
+ * # as well.
+ * old_path = source_details[1] # <<<<<<<<<<<<<<
+ * old_dirname, old_basename = os.path.split(old_path)
+ * path = self.pathjoin(entry[0][0], entry[0][1])
+ */
+ __pyx_t_1 = __Pyx_GetItemInt(__pyx_v_source_details, 1, sizeof(long), PyInt_FromLong); if (!__pyx_t_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1193; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_1);
+ __Pyx_DECREF(__pyx_v_old_path);
+ __pyx_v_old_path = __pyx_t_1;
+ __pyx_t_1 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1194
+ * # as well.
+ * old_path = source_details[1]
+ * old_dirname, old_basename = os.path.split(old_path) # <<<<<<<<<<<<<<
+ * path = self.pathjoin(entry[0][0], entry[0][1])
+ * old_entry = self.state._get_entry(self.source_index,
+ */
+ __pyx_t_1 = __Pyx_GetName(__pyx_m, __pyx_n_s__os); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1194; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_1);
+ __pyx_t_3 = PyObject_GetAttr(__pyx_t_1, __pyx_n_s__path); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1194; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_3);
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+ __pyx_t_1 = PyObject_GetAttr(__pyx_t_3, __pyx_n_s__split); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1194; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_1);
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+ __pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1194; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_3);
+ __Pyx_INCREF(__pyx_v_old_path);
+ PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_v_old_path);
+ __Pyx_GIVEREF(__pyx_v_old_path);
+ __pyx_t_6 = PyObject_Call(__pyx_t_1, __pyx_t_3, NULL); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1194; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_6);
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+ if (PyTuple_CheckExact(__pyx_t_6) && likely(PyTuple_GET_SIZE(__pyx_t_6) == 2)) {
+ PyObject* tuple = __pyx_t_6;
+ __pyx_t_3 = PyTuple_GET_ITEM(tuple, 0); __Pyx_INCREF(__pyx_t_3);
+ __pyx_t_1 = PyTuple_GET_ITEM(tuple, 1); __Pyx_INCREF(__pyx_t_1);
+ __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
+ __Pyx_DECREF(__pyx_v_old_dirname);
+ __pyx_v_old_dirname = __pyx_t_3;
+ __pyx_t_3 = 0;
+ __Pyx_DECREF(__pyx_v_old_basename);
+ __pyx_v_old_basename = __pyx_t_1;
+ __pyx_t_1 = 0;
+ } else {
+ __pyx_t_8 = PyObject_GetIter(__pyx_t_6); if (unlikely(!__pyx_t_8)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1194; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_8);
+ __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
+ __pyx_t_3 = __Pyx_UnpackItem(__pyx_t_8, 0); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1194; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_3);
+ __pyx_t_1 = __Pyx_UnpackItem(__pyx_t_8, 1); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1194; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_1);
+ if (__Pyx_EndUnpack(__pyx_t_8, 2) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1194; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0;
+ __Pyx_DECREF(__pyx_v_old_dirname);
+ __pyx_v_old_dirname = __pyx_t_3;
+ __pyx_t_3 = 0;
+ __Pyx_DECREF(__pyx_v_old_basename);
+ __pyx_v_old_basename = __pyx_t_1;
+ __pyx_t_1 = 0;
+ }
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1195
+ * old_path = source_details[1]
+ * old_dirname, old_basename = os.path.split(old_path)
+ * path = self.pathjoin(entry[0][0], entry[0][1]) # <<<<<<<<<<<<<<
+ * old_entry = self.state._get_entry(self.source_index,
+ * path_utf8=old_path)
+ */
+ __pyx_t_6 = __Pyx_GetItemInt(__pyx_v_entry, 0, sizeof(long), PyInt_FromLong); if (!__pyx_t_6) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1195; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_6);
+ __pyx_t_1 = __Pyx_GetItemInt(__pyx_t_6, 0, sizeof(long), PyInt_FromLong); if (!__pyx_t_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1195; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_1);
+ __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
+ __pyx_t_6 = __Pyx_GetItemInt(__pyx_v_entry, 0, sizeof(long), PyInt_FromLong); if (!__pyx_t_6) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1195; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_6);
+ __pyx_t_3 = __Pyx_GetItemInt(__pyx_t_6, 1, sizeof(long), PyInt_FromLong); if (!__pyx_t_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1195; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_3);
+ __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
+ __pyx_t_6 = PyTuple_New(2); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1195; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_6);
+ PyTuple_SET_ITEM(__pyx_t_6, 0, __pyx_t_1);
+ __Pyx_GIVEREF(__pyx_t_1);
+ PyTuple_SET_ITEM(__pyx_t_6, 1, __pyx_t_3);
+ __Pyx_GIVEREF(__pyx_t_3);
+ __pyx_t_1 = 0;
+ __pyx_t_3 = 0;
+ __pyx_t_3 = PyObject_Call(__pyx_v_self->pathjoin, __pyx_t_6, NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1195; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_3);
+ __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
+ __Pyx_DECREF(__pyx_v_path);
+ __pyx_v_path = __pyx_t_3;
+ __pyx_t_3 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1196
+ * old_dirname, old_basename = os.path.split(old_path)
+ * path = self.pathjoin(entry[0][0], entry[0][1])
+ * old_entry = self.state._get_entry(self.source_index, # <<<<<<<<<<<<<<
+ * path_utf8=old_path)
+ * # update the source details variable to be the real
+ */
+ __pyx_t_3 = PyObject_GetAttr(__pyx_v_self->state, __pyx_n_s___get_entry); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1196; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_3);
+ __pyx_t_6 = PyInt_FromLong(__pyx_v_self->source_index); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1196; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_6);
+ __pyx_t_1 = PyTuple_New(1); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1196; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_1);
+ PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_t_6);
+ __Pyx_GIVEREF(__pyx_t_6);
+ __pyx_t_6 = 0;
+ __pyx_t_6 = PyDict_New(); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1196; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(((PyObject *)__pyx_t_6));
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1197
+ * path = self.pathjoin(entry[0][0], entry[0][1])
+ * old_entry = self.state._get_entry(self.source_index,
+ * path_utf8=old_path) # <<<<<<<<<<<<<<
+ * # update the source details variable to be the real
+ * # location.
+ */
+ if (PyDict_SetItem(__pyx_t_6, ((PyObject *)__pyx_n_s__path_utf8), __pyx_v_old_path) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1196; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_8 = PyEval_CallObjectWithKeywords(__pyx_t_3, __pyx_t_1, ((PyObject *)__pyx_t_6)); if (unlikely(!__pyx_t_8)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1196; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_8);
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+ __Pyx_DECREF(((PyObject *)__pyx_t_6)); __pyx_t_6 = 0;
+ __Pyx_DECREF(__pyx_v_old_entry);
+ __pyx_v_old_entry = __pyx_t_8;
+ __pyx_t_8 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1200
+ * # update the source details variable to be the real
+ * # location.
+ * if old_entry == (None, None): # <<<<<<<<<<<<<<
+ * raise errors.CorruptDirstate(self.state._filename,
+ * "entry '%s/%s' is considered renamed from %r"
+ */
+ __pyx_t_8 = PyTuple_New(2); if (unlikely(!__pyx_t_8)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1200; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_8);
+ __Pyx_INCREF(Py_None);
+ PyTuple_SET_ITEM(__pyx_t_8, 0, Py_None);
+ __Pyx_GIVEREF(Py_None);
+ __Pyx_INCREF(Py_None);
+ PyTuple_SET_ITEM(__pyx_t_8, 1, Py_None);
+ __Pyx_GIVEREF(Py_None);
+ __pyx_t_6 = PyObject_RichCompare(__pyx_v_old_entry, __pyx_t_8, Py_EQ); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1200; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_6);
+ __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0;
+ __pyx_t_2 = __Pyx_PyObject_IsTrue(__pyx_t_6); if (unlikely(__pyx_t_2 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1200; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
+ if (__pyx_t_2) {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1201
+ * # location.
+ * if old_entry == (None, None):
+ * raise errors.CorruptDirstate(self.state._filename, # <<<<<<<<<<<<<<
+ * "entry '%s/%s' is considered renamed from %r"
+ * " but source does not exist\n"
+ */
+ __pyx_t_6 = __Pyx_GetName(__pyx_m, __pyx_n_s__errors); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1201; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_6);
+ __pyx_t_8 = PyObject_GetAttr(__pyx_t_6, __pyx_n_s__CorruptDirstate); if (unlikely(!__pyx_t_8)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1201; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_8);
+ __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
+ __pyx_t_6 = PyObject_GetAttr(__pyx_v_self->state, __pyx_n_s___filename); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1201; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_6);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1204
+ * "entry '%s/%s' is considered renamed from %r"
+ * " but source does not exist\n"
+ * "entry: %s" % (entry[0][0], entry[0][1], old_path, entry)) # <<<<<<<<<<<<<<
+ * source_details = old_entry[1][self.source_index]
+ * source_minikind = _minikind_from_string(source_details[0])
+ */
+ __pyx_t_1 = __Pyx_GetItemInt(__pyx_v_entry, 0, sizeof(long), PyInt_FromLong); if (!__pyx_t_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1204; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_1);
+ __pyx_t_3 = __Pyx_GetItemInt(__pyx_t_1, 0, sizeof(long), PyInt_FromLong); if (!__pyx_t_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1204; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_3);
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+ __pyx_t_1 = __Pyx_GetItemInt(__pyx_v_entry, 0, sizeof(long), PyInt_FromLong); if (!__pyx_t_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1204; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_1);
+ __pyx_t_9 = __Pyx_GetItemInt(__pyx_t_1, 1, sizeof(long), PyInt_FromLong); if (!__pyx_t_9) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1204; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_9);
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+ __pyx_t_1 = PyTuple_New(4); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1204; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_1);
+ PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_t_3);
+ __Pyx_GIVEREF(__pyx_t_3);
+ PyTuple_SET_ITEM(__pyx_t_1, 1, __pyx_t_9);
+ __Pyx_GIVEREF(__pyx_t_9);
+ __Pyx_INCREF(__pyx_v_old_path);
+ PyTuple_SET_ITEM(__pyx_t_1, 2, __pyx_v_old_path);
+ __Pyx_GIVEREF(__pyx_v_old_path);
+ __Pyx_INCREF(__pyx_v_entry);
+ PyTuple_SET_ITEM(__pyx_t_1, 3, __pyx_v_entry);
+ __Pyx_GIVEREF(__pyx_v_entry);
+ __pyx_t_3 = 0;
+ __pyx_t_9 = 0;
+ __pyx_t_9 = PyNumber_Remainder(((PyObject *)__pyx_kp_s_32), __pyx_t_1); if (unlikely(!__pyx_t_9)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1204; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(((PyObject *)__pyx_t_9));
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+ __pyx_t_1 = PyTuple_New(2); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1201; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_1);
+ PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_t_6);
+ __Pyx_GIVEREF(__pyx_t_6);
+ PyTuple_SET_ITEM(__pyx_t_1, 1, ((PyObject *)__pyx_t_9));
+ __Pyx_GIVEREF(((PyObject *)__pyx_t_9));
+ __pyx_t_6 = 0;
+ __pyx_t_9 = 0;
+ __pyx_t_9 = PyObject_Call(__pyx_t_8, __pyx_t_1, NULL); if (unlikely(!__pyx_t_9)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1201; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_9);
+ __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0;
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+ __Pyx_Raise(__pyx_t_9, 0, 0);
+ __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1201; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ goto __pyx_L9;
+ }
+ __pyx_L9:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1205
+ * " but source does not exist\n"
+ * "entry: %s" % (entry[0][0], entry[0][1], old_path, entry))
+ * source_details = old_entry[1][self.source_index] # <<<<<<<<<<<<<<
+ * source_minikind = _minikind_from_string(source_details[0])
+ * if path_info is None:
+ */
+ __pyx_t_9 = __Pyx_GetItemInt(__pyx_v_old_entry, 1, sizeof(long), PyInt_FromLong); if (!__pyx_t_9) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1205; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_9);
+ __pyx_t_1 = __Pyx_GetItemInt(__pyx_t_9, __pyx_v_self->source_index, sizeof(int), PyInt_FromLong); if (!__pyx_t_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1205; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_1);
+ __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
+ __Pyx_DECREF(__pyx_v_source_details);
+ __pyx_v_source_details = __pyx_t_1;
+ __pyx_t_1 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1206
+ * "entry: %s" % (entry[0][0], entry[0][1], old_path, entry))
+ * source_details = old_entry[1][self.source_index]
+ * source_minikind = _minikind_from_string(source_details[0]) # <<<<<<<<<<<<<<
+ * if path_info is None:
+ * # the file is missing on disk, show as removed.
+ */
+ __pyx_t_1 = __Pyx_GetItemInt(__pyx_v_source_details, 0, sizeof(long), PyInt_FromLong); if (!__pyx_t_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1206; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_1);
+ __pyx_t_4 = __pyx_f_6bzrlib_21_dirstate_helpers_pyx__minikind_from_string(__pyx_t_1); if (unlikely(__pyx_t_4 == -1 && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1206; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+ __pyx_v_source_minikind = __pyx_t_4;
+ }
+ __pyx_L7:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1207
+ * source_details = old_entry[1][self.source_index]
+ * source_minikind = _minikind_from_string(source_details[0])
+ * if path_info is None: # <<<<<<<<<<<<<<
+ * # the file is missing on disk, show as removed.
+ * content_change = 1
+ */
+ __pyx_t_2 = (__pyx_v_path_info == Py_None);
+ if (__pyx_t_2) {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1209
+ * if path_info is None:
+ * # the file is missing on disk, show as removed.
+ * content_change = 1 # <<<<<<<<<<<<<<
+ * target_kind = None
+ * target_exec = False
+ */
+ __pyx_v_content_change = 1;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1210
+ * # the file is missing on disk, show as removed.
+ * content_change = 1
+ * target_kind = None # <<<<<<<<<<<<<<
+ * target_exec = False
+ * else:
+ */
+ __Pyx_INCREF(Py_None);
+ __Pyx_DECREF(__pyx_v_target_kind);
+ __pyx_v_target_kind = Py_None;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1211
+ * content_change = 1
+ * target_kind = None
+ * target_exec = False # <<<<<<<<<<<<<<
+ * else:
+ * # source and target are both versioned and disk file is present.
+ */
+ __pyx_t_1 = __Pyx_PyBool_FromLong(0); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1211; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_1);
+ __Pyx_DECREF(__pyx_v_target_exec);
+ __pyx_v_target_exec = __pyx_t_1;
+ __pyx_t_1 = 0;
+ goto __pyx_L10;
+ }
+ /*else*/ {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1214
+ * else:
+ * # source and target are both versioned and disk file is present.
+ * target_kind = path_info[2] # <<<<<<<<<<<<<<
+ * if target_kind == 'directory':
+ * if path is None:
+ */
+ __pyx_t_1 = __Pyx_GetItemInt(__pyx_v_path_info, 2, sizeof(long), PyInt_FromLong); if (!__pyx_t_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1214; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_1);
+ __Pyx_DECREF(__pyx_v_target_kind);
+ __pyx_v_target_kind = __pyx_t_1;
+ __pyx_t_1 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1215
+ * # source and target are both versioned and disk file is present.
+ * target_kind = path_info[2]
+ * if target_kind == 'directory': # <<<<<<<<<<<<<<
+ * if path is None:
+ * old_path = path = self.pathjoin(old_dirname, old_basename)
+ */
+ __pyx_t_1 = PyObject_RichCompare(__pyx_v_target_kind, ((PyObject *)__pyx_n_s__directory), Py_EQ); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1215; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_1);
+ __pyx_t_2 = __Pyx_PyObject_IsTrue(__pyx_t_1); if (unlikely(__pyx_t_2 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1215; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+ if (__pyx_t_2) {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1216
+ * target_kind = path_info[2]
+ * if target_kind == 'directory':
+ * if path is None: # <<<<<<<<<<<<<<
+ * old_path = path = self.pathjoin(old_dirname, old_basename)
+ * file_id = entry[0][2]
+ */
+ __pyx_t_2 = (__pyx_v_path == Py_None);
+ if (__pyx_t_2) {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1217
+ * if target_kind == 'directory':
+ * if path is None:
+ * old_path = path = self.pathjoin(old_dirname, old_basename) # <<<<<<<<<<<<<<
+ * file_id = entry[0][2]
+ * self.new_dirname_to_file_id[path] = file_id
+ */
+ __pyx_t_1 = PyTuple_New(2); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1217; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_1);
+ __Pyx_INCREF(__pyx_v_old_dirname);
+ PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_v_old_dirname);
+ __Pyx_GIVEREF(__pyx_v_old_dirname);
+ __Pyx_INCREF(__pyx_v_old_basename);
+ PyTuple_SET_ITEM(__pyx_t_1, 1, __pyx_v_old_basename);
+ __Pyx_GIVEREF(__pyx_v_old_basename);
+ __pyx_t_9 = PyObject_Call(__pyx_v_self->pathjoin, __pyx_t_1, NULL); if (unlikely(!__pyx_t_9)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1217; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_9);
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+ __Pyx_INCREF(__pyx_t_9);
+ __Pyx_DECREF(__pyx_v_old_path);
+ __pyx_v_old_path = __pyx_t_9;
+ __Pyx_INCREF(__pyx_t_9);
+ __Pyx_DECREF(__pyx_v_path);
+ __pyx_v_path = __pyx_t_9;
+ __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
+ goto __pyx_L12;
+ }
+ __pyx_L12:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1218
+ * if path is None:
+ * old_path = path = self.pathjoin(old_dirname, old_basename)
+ * file_id = entry[0][2] # <<<<<<<<<<<<<<
+ * self.new_dirname_to_file_id[path] = file_id
+ * if source_minikind != c'd':
+ */
+ __pyx_t_9 = __Pyx_GetItemInt(__pyx_v_entry, 0, sizeof(long), PyInt_FromLong); if (!__pyx_t_9) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1218; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_9);
+ __pyx_t_1 = __Pyx_GetItemInt(__pyx_t_9, 2, sizeof(long), PyInt_FromLong); if (!__pyx_t_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1218; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_1);
+ __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
+ __Pyx_DECREF(__pyx_v_file_id);
+ __pyx_v_file_id = __pyx_t_1;
+ __pyx_t_1 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1219
+ * old_path = path = self.pathjoin(old_dirname, old_basename)
+ * file_id = entry[0][2]
+ * self.new_dirname_to_file_id[path] = file_id # <<<<<<<<<<<<<<
+ * if source_minikind != c'd':
+ * content_change = 1
+ */
+ if (PyObject_SetItem(__pyx_v_self->new_dirname_to_file_id, __pyx_v_path, __pyx_v_file_id) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1219; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1220
+ * file_id = entry[0][2]
+ * self.new_dirname_to_file_id[path] = file_id
+ * if source_minikind != c'd': # <<<<<<<<<<<<<<
+ * content_change = 1
+ * else:
+ */
+ __pyx_t_2 = (__pyx_v_source_minikind != 'd');
+ if (__pyx_t_2) {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1221
+ * self.new_dirname_to_file_id[path] = file_id
+ * if source_minikind != c'd':
+ * content_change = 1 # <<<<<<<<<<<<<<
+ * else:
+ * # directories have no fingerprint
+ */
+ __pyx_v_content_change = 1;
+ goto __pyx_L13;
+ }
+ /*else*/ {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1224
+ * else:
+ * # directories have no fingerprint
+ * content_change = 0 # <<<<<<<<<<<<<<
+ * target_exec = False
+ * elif target_kind == 'file':
+ */
+ __pyx_v_content_change = 0;
+ }
+ __pyx_L13:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1225
+ * # directories have no fingerprint
+ * content_change = 0
+ * target_exec = False # <<<<<<<<<<<<<<
+ * elif target_kind == 'file':
+ * if source_minikind != c'f':
+ */
+ __pyx_t_1 = __Pyx_PyBool_FromLong(0); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1225; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_1);
+ __Pyx_DECREF(__pyx_v_target_exec);
+ __pyx_v_target_exec = __pyx_t_1;
+ __pyx_t_1 = 0;
+ goto __pyx_L11;
+ }
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1226
+ * content_change = 0
+ * target_exec = False
+ * elif target_kind == 'file': # <<<<<<<<<<<<<<
+ * if source_minikind != c'f':
+ * content_change = 1
+ */
+ __pyx_t_1 = PyObject_RichCompare(__pyx_v_target_kind, ((PyObject *)__pyx_n_s__file), Py_EQ); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1226; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_1);
+ __pyx_t_2 = __Pyx_PyObject_IsTrue(__pyx_t_1); if (unlikely(__pyx_t_2 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1226; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+ if (__pyx_t_2) {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1227
+ * target_exec = False
+ * elif target_kind == 'file':
+ * if source_minikind != c'f': # <<<<<<<<<<<<<<
+ * content_change = 1
+ * else:
+ */
+ __pyx_t_2 = (__pyx_v_source_minikind != 'f');
+ if (__pyx_t_2) {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1228
+ * elif target_kind == 'file':
+ * if source_minikind != c'f':
+ * content_change = 1 # <<<<<<<<<<<<<<
+ * else:
+ * # Check the sha. We can't just rely on the size as
+ */
+ __pyx_v_content_change = 1;
+ goto __pyx_L14;
+ }
+ /*else*/ {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1233
+ * # content filtering may mean differ sizes actually
+ * # map to the same content
+ * if link_or_sha1 is None: # <<<<<<<<<<<<<<
+ * # Stat cache miss:
+ * statvalue, link_or_sha1 = \
+ */
+ __pyx_t_2 = (__pyx_v_link_or_sha1 == Py_None);
+ if (__pyx_t_2) {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1236
+ * # Stat cache miss:
+ * statvalue, link_or_sha1 = \
+ * self.state._sha1_provider.stat_and_sha1( # <<<<<<<<<<<<<<
+ * path_info[4])
+ * self.state._observed_sha1(entry, link_or_sha1,
+ */
+ __pyx_t_1 = PyObject_GetAttr(__pyx_v_self->state, __pyx_n_s___sha1_provider); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1236; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_1);
+ __pyx_t_9 = PyObject_GetAttr(__pyx_t_1, __pyx_n_s__stat_and_sha1); if (unlikely(!__pyx_t_9)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1236; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_9);
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1237
+ * statvalue, link_or_sha1 = \
+ * self.state._sha1_provider.stat_and_sha1(
+ * path_info[4]) # <<<<<<<<<<<<<<
+ * self.state._observed_sha1(entry, link_or_sha1,
+ * statvalue)
+ */
+ __pyx_t_1 = __Pyx_GetItemInt(__pyx_v_path_info, 4, sizeof(long), PyInt_FromLong); if (!__pyx_t_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1237; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_1);
+ __pyx_t_8 = PyTuple_New(1); if (unlikely(!__pyx_t_8)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1236; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_8);
+ PyTuple_SET_ITEM(__pyx_t_8, 0, __pyx_t_1);
+ __Pyx_GIVEREF(__pyx_t_1);
+ __pyx_t_1 = 0;
+ __pyx_t_1 = PyObject_Call(__pyx_t_9, __pyx_t_8, NULL); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1236; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_1);
+ __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
+ __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0;
+ if (PyTuple_CheckExact(__pyx_t_1) && likely(PyTuple_GET_SIZE(__pyx_t_1) == 2)) {
+ PyObject* tuple = __pyx_t_1;
+ __pyx_t_8 = PyTuple_GET_ITEM(tuple, 0); __Pyx_INCREF(__pyx_t_8);
+ __pyx_t_9 = PyTuple_GET_ITEM(tuple, 1); __Pyx_INCREF(__pyx_t_9);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1235
+ * if link_or_sha1 is None:
+ * # Stat cache miss:
+ * statvalue, link_or_sha1 = \ # <<<<<<<<<<<<<<
+ * self.state._sha1_provider.stat_and_sha1(
+ * path_info[4])
+ */
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+ __Pyx_DECREF(__pyx_v_statvalue);
+ __pyx_v_statvalue = __pyx_t_8;
+ __pyx_t_8 = 0;
+ __Pyx_DECREF(__pyx_v_link_or_sha1);
+ __pyx_v_link_or_sha1 = __pyx_t_9;
+ __pyx_t_9 = 0;
+ } else {
+ __pyx_t_6 = PyObject_GetIter(__pyx_t_1); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1235; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_6);
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+ __pyx_t_8 = __Pyx_UnpackItem(__pyx_t_6, 0); if (unlikely(!__pyx_t_8)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1235; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_8);
+ __pyx_t_9 = __Pyx_UnpackItem(__pyx_t_6, 1); if (unlikely(!__pyx_t_9)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1235; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_9);
+ if (__Pyx_EndUnpack(__pyx_t_6, 2) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1235; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
+ __Pyx_DECREF(__pyx_v_statvalue);
+ __pyx_v_statvalue = __pyx_t_8;
+ __pyx_t_8 = 0;
+ __Pyx_DECREF(__pyx_v_link_or_sha1);
+ __pyx_v_link_or_sha1 = __pyx_t_9;
+ __pyx_t_9 = 0;
+ }
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1238
+ * self.state._sha1_provider.stat_and_sha1(
+ * path_info[4])
+ * self.state._observed_sha1(entry, link_or_sha1, # <<<<<<<<<<<<<<
+ * statvalue)
+ * content_change = (link_or_sha1 != source_details[1])
+ */
+ __pyx_t_1 = PyObject_GetAttr(__pyx_v_self->state, __pyx_n_s___observed_sha1); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1238; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_1);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1239
+ * path_info[4])
+ * self.state._observed_sha1(entry, link_or_sha1,
+ * statvalue) # <<<<<<<<<<<<<<
+ * content_change = (link_or_sha1 != source_details[1])
+ * # Target details is updated at update_entry time
+ */
+ __pyx_t_9 = PyTuple_New(3); if (unlikely(!__pyx_t_9)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1238; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_9);
+ __Pyx_INCREF(__pyx_v_entry);
+ PyTuple_SET_ITEM(__pyx_t_9, 0, __pyx_v_entry);
+ __Pyx_GIVEREF(__pyx_v_entry);
+ __Pyx_INCREF(__pyx_v_link_or_sha1);
+ PyTuple_SET_ITEM(__pyx_t_9, 1, __pyx_v_link_or_sha1);
+ __Pyx_GIVEREF(__pyx_v_link_or_sha1);
+ __Pyx_INCREF(__pyx_v_statvalue);
+ PyTuple_SET_ITEM(__pyx_t_9, 2, __pyx_v_statvalue);
+ __Pyx_GIVEREF(__pyx_v_statvalue);
+ __pyx_t_8 = PyObject_Call(__pyx_t_1, __pyx_t_9, NULL); if (unlikely(!__pyx_t_8)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1238; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_8);
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+ __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
+ __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0;
+ goto __pyx_L15;
+ }
+ __pyx_L15:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1240
+ * self.state._observed_sha1(entry, link_or_sha1,
+ * statvalue)
+ * content_change = (link_or_sha1 != source_details[1]) # <<<<<<<<<<<<<<
+ * # Target details is updated at update_entry time
+ * if self.use_filesystem_for_exec:
+ */
+ __pyx_t_8 = __Pyx_GetItemInt(__pyx_v_source_details, 1, sizeof(long), PyInt_FromLong); if (!__pyx_t_8) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1240; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_8);
+ __pyx_t_9 = PyObject_RichCompare(__pyx_v_link_or_sha1, __pyx_t_8, Py_NE); if (unlikely(!__pyx_t_9)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1240; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_9);
+ __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0;
+ __pyx_t_10 = __Pyx_PyInt_AsInt(__pyx_t_9); if (unlikely((__pyx_t_10 == (int)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1240; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
+ __pyx_v_content_change = __pyx_t_10;
+ }
+ __pyx_L14:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1242
+ * content_change = (link_or_sha1 != source_details[1])
+ * # Target details is updated at update_entry time
+ * if self.use_filesystem_for_exec: # <<<<<<<<<<<<<<
+ * # We don't need S_ISREG here, because we are sure
+ * # we are dealing with a file.
+ */
+ __pyx_t_2 = __Pyx_PyObject_IsTrue(__pyx_v_self->use_filesystem_for_exec); if (unlikely(__pyx_t_2 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1242; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ if (__pyx_t_2) {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1245
+ * # We don't need S_ISREG here, because we are sure
+ * # we are dealing with a file.
+ * target_exec = bool(S_IXUSR & path_info[3].st_mode) # <<<<<<<<<<<<<<
+ * else:
+ * target_exec = target_details[3]
+ */
+ __pyx_t_9 = PyInt_FromLong(S_IXUSR); if (unlikely(!__pyx_t_9)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1245; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_9);
+ __pyx_t_8 = __Pyx_GetItemInt(__pyx_v_path_info, 3, sizeof(long), PyInt_FromLong); if (!__pyx_t_8) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1245; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_8);
+ __pyx_t_1 = PyObject_GetAttr(__pyx_t_8, __pyx_n_s__st_mode); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1245; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_1);
+ __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0;
+ __pyx_t_8 = PyNumber_And(__pyx_t_9, __pyx_t_1); if (unlikely(!__pyx_t_8)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1245; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_8);
+ __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+ __pyx_t_2 = __Pyx_PyObject_IsTrue(__pyx_t_8); if (unlikely(__pyx_t_2 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1245; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0;
+ __pyx_t_8 = __Pyx_PyBool_FromLong(__pyx_t_2); if (unlikely(!__pyx_t_8)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1245; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_8);
+ __Pyx_DECREF(__pyx_v_target_exec);
+ __pyx_v_target_exec = __pyx_t_8;
+ __pyx_t_8 = 0;
+ goto __pyx_L16;
+ }
+ /*else*/ {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1247
+ * target_exec = bool(S_IXUSR & path_info[3].st_mode)
+ * else:
+ * target_exec = target_details[3] # <<<<<<<<<<<<<<
+ * elif target_kind == 'symlink':
+ * if source_minikind != c'l':
+ */
+ __pyx_t_8 = __Pyx_GetItemInt(__pyx_v_target_details, 3, sizeof(long), PyInt_FromLong); if (!__pyx_t_8) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1247; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_8);
+ __Pyx_DECREF(__pyx_v_target_exec);
+ __pyx_v_target_exec = __pyx_t_8;
+ __pyx_t_8 = 0;
+ }
+ __pyx_L16:;
+ goto __pyx_L11;
+ }
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1248
+ * else:
+ * target_exec = target_details[3]
+ * elif target_kind == 'symlink': # <<<<<<<<<<<<<<
+ * if source_minikind != c'l':
+ * content_change = 1
+ */
+ __pyx_t_8 = PyObject_RichCompare(__pyx_v_target_kind, ((PyObject *)__pyx_n_s__symlink), Py_EQ); if (unlikely(!__pyx_t_8)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1248; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_8);
+ __pyx_t_2 = __Pyx_PyObject_IsTrue(__pyx_t_8); if (unlikely(__pyx_t_2 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1248; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0;
+ if (__pyx_t_2) {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1249
+ * target_exec = target_details[3]
+ * elif target_kind == 'symlink':
+ * if source_minikind != c'l': # <<<<<<<<<<<<<<
+ * content_change = 1
+ * else:
+ */
+ __pyx_t_2 = (__pyx_v_source_minikind != 'l');
+ if (__pyx_t_2) {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1250
+ * elif target_kind == 'symlink':
+ * if source_minikind != c'l':
+ * content_change = 1 # <<<<<<<<<<<<<<
+ * else:
+ * content_change = (link_or_sha1 != source_details[1])
+ */
+ __pyx_v_content_change = 1;
+ goto __pyx_L17;
+ }
+ /*else*/ {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1252
+ * content_change = 1
+ * else:
+ * content_change = (link_or_sha1 != source_details[1]) # <<<<<<<<<<<<<<
+ * target_exec = False
+ * elif target_kind == 'tree-reference':
+ */
+ __pyx_t_8 = __Pyx_GetItemInt(__pyx_v_source_details, 1, sizeof(long), PyInt_FromLong); if (!__pyx_t_8) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1252; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_8);
+ __pyx_t_1 = PyObject_RichCompare(__pyx_v_link_or_sha1, __pyx_t_8, Py_NE); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1252; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_1);
+ __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0;
+ __pyx_t_10 = __Pyx_PyInt_AsInt(__pyx_t_1); if (unlikely((__pyx_t_10 == (int)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1252; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+ __pyx_v_content_change = __pyx_t_10;
+ }
+ __pyx_L17:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1253
+ * else:
+ * content_change = (link_or_sha1 != source_details[1])
+ * target_exec = False # <<<<<<<<<<<<<<
+ * elif target_kind == 'tree-reference':
+ * if source_minikind != c't':
+ */
+ __pyx_t_1 = __Pyx_PyBool_FromLong(0); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1253; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_1);
+ __Pyx_DECREF(__pyx_v_target_exec);
+ __pyx_v_target_exec = __pyx_t_1;
+ __pyx_t_1 = 0;
+ goto __pyx_L11;
+ }
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1254
+ * content_change = (link_or_sha1 != source_details[1])
+ * target_exec = False
+ * elif target_kind == 'tree-reference': # <<<<<<<<<<<<<<
+ * if source_minikind != c't':
+ * content_change = 1
+ */
+ __pyx_t_1 = PyObject_RichCompare(__pyx_v_target_kind, ((PyObject *)__pyx_kp_s_33), Py_EQ); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1254; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_1);
+ __pyx_t_2 = __Pyx_PyObject_IsTrue(__pyx_t_1); if (unlikely(__pyx_t_2 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1254; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+ if (__pyx_t_2) {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1255
+ * target_exec = False
+ * elif target_kind == 'tree-reference':
+ * if source_minikind != c't': # <<<<<<<<<<<<<<
+ * content_change = 1
+ * else:
+ */
+ __pyx_t_2 = (__pyx_v_source_minikind != 't');
+ if (__pyx_t_2) {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1256
+ * elif target_kind == 'tree-reference':
+ * if source_minikind != c't':
+ * content_change = 1 # <<<<<<<<<<<<<<
+ * else:
+ * content_change = 0
+ */
+ __pyx_v_content_change = 1;
+ goto __pyx_L18;
+ }
+ /*else*/ {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1258
+ * content_change = 1
+ * else:
+ * content_change = 0 # <<<<<<<<<<<<<<
+ * target_exec = False
+ * else:
+ */
+ __pyx_v_content_change = 0;
+ }
+ __pyx_L18:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1259
+ * else:
+ * content_change = 0
+ * target_exec = False # <<<<<<<<<<<<<<
+ * else:
+ * if path is None:
+ */
+ __pyx_t_1 = __Pyx_PyBool_FromLong(0); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1259; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_1);
+ __Pyx_DECREF(__pyx_v_target_exec);
+ __pyx_v_target_exec = __pyx_t_1;
+ __pyx_t_1 = 0;
+ goto __pyx_L11;
+ }
+ /*else*/ {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1261
+ * target_exec = False
+ * else:
+ * if path is None: # <<<<<<<<<<<<<<
+ * path = self.pathjoin(old_dirname, old_basename)
+ * raise errors.BadFileKindError(path, path_info[2])
+ */
+ __pyx_t_2 = (__pyx_v_path == Py_None);
+ if (__pyx_t_2) {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1262
+ * else:
+ * if path is None:
+ * path = self.pathjoin(old_dirname, old_basename) # <<<<<<<<<<<<<<
+ * raise errors.BadFileKindError(path, path_info[2])
+ * if source_minikind == c'd':
+ */
+ __pyx_t_1 = PyTuple_New(2); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1262; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_1);
+ __Pyx_INCREF(__pyx_v_old_dirname);
+ PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_v_old_dirname);
+ __Pyx_GIVEREF(__pyx_v_old_dirname);
+ __Pyx_INCREF(__pyx_v_old_basename);
+ PyTuple_SET_ITEM(__pyx_t_1, 1, __pyx_v_old_basename);
+ __Pyx_GIVEREF(__pyx_v_old_basename);
+ __pyx_t_8 = PyObject_Call(__pyx_v_self->pathjoin, __pyx_t_1, NULL); if (unlikely(!__pyx_t_8)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1262; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_8);
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+ __Pyx_DECREF(__pyx_v_path);
+ __pyx_v_path = __pyx_t_8;
+ __pyx_t_8 = 0;
+ goto __pyx_L19;
+ }
+ __pyx_L19:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1263
+ * if path is None:
+ * path = self.pathjoin(old_dirname, old_basename)
+ * raise errors.BadFileKindError(path, path_info[2]) # <<<<<<<<<<<<<<
+ * if source_minikind == c'd':
+ * if path is None:
+ */
+ __pyx_t_8 = __Pyx_GetName(__pyx_m, __pyx_n_s__errors); if (unlikely(!__pyx_t_8)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1263; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_8);
+ __pyx_t_1 = PyObject_GetAttr(__pyx_t_8, __pyx_n_s__BadFileKindError); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1263; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_1);
+ __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0;
+ __pyx_t_8 = __Pyx_GetItemInt(__pyx_v_path_info, 2, sizeof(long), PyInt_FromLong); if (!__pyx_t_8) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1263; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_8);
+ __pyx_t_9 = PyTuple_New(2); if (unlikely(!__pyx_t_9)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1263; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_9);
+ __Pyx_INCREF(__pyx_v_path);
+ PyTuple_SET_ITEM(__pyx_t_9, 0, __pyx_v_path);
+ __Pyx_GIVEREF(__pyx_v_path);
+ PyTuple_SET_ITEM(__pyx_t_9, 1, __pyx_t_8);
+ __Pyx_GIVEREF(__pyx_t_8);
+ __pyx_t_8 = 0;
+ __pyx_t_8 = PyObject_Call(__pyx_t_1, __pyx_t_9, NULL); if (unlikely(!__pyx_t_8)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1263; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_8);
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+ __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
+ __Pyx_Raise(__pyx_t_8, 0, 0);
+ __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0;
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1263; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ }
+ __pyx_L11:;
+ }
+ __pyx_L10:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1264
+ * path = self.pathjoin(old_dirname, old_basename)
+ * raise errors.BadFileKindError(path, path_info[2])
+ * if source_minikind == c'd': # <<<<<<<<<<<<<<
+ * if path is None:
+ * old_path = path = self.pathjoin(old_dirname, old_basename)
+ */
+ __pyx_t_2 = (__pyx_v_source_minikind == 'd');
+ if (__pyx_t_2) {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1265
+ * raise errors.BadFileKindError(path, path_info[2])
+ * if source_minikind == c'd':
+ * if path is None: # <<<<<<<<<<<<<<
+ * old_path = path = self.pathjoin(old_dirname, old_basename)
+ * if file_id is None:
+ */
+ __pyx_t_2 = (__pyx_v_path == Py_None);
+ if (__pyx_t_2) {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1266
+ * if source_minikind == c'd':
+ * if path is None:
+ * old_path = path = self.pathjoin(old_dirname, old_basename) # <<<<<<<<<<<<<<
+ * if file_id is None:
+ * file_id = entry[0][2]
+ */
+ __pyx_t_8 = PyTuple_New(2); if (unlikely(!__pyx_t_8)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1266; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_8);
+ __Pyx_INCREF(__pyx_v_old_dirname);
+ PyTuple_SET_ITEM(__pyx_t_8, 0, __pyx_v_old_dirname);
+ __Pyx_GIVEREF(__pyx_v_old_dirname);
+ __Pyx_INCREF(__pyx_v_old_basename);
+ PyTuple_SET_ITEM(__pyx_t_8, 1, __pyx_v_old_basename);
+ __Pyx_GIVEREF(__pyx_v_old_basename);
+ __pyx_t_9 = PyObject_Call(__pyx_v_self->pathjoin, __pyx_t_8, NULL); if (unlikely(!__pyx_t_9)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1266; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_9);
+ __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0;
+ __Pyx_INCREF(__pyx_t_9);
+ __Pyx_DECREF(__pyx_v_old_path);
+ __pyx_v_old_path = __pyx_t_9;
+ __Pyx_INCREF(__pyx_t_9);
+ __Pyx_DECREF(__pyx_v_path);
+ __pyx_v_path = __pyx_t_9;
+ __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
+ goto __pyx_L21;
+ }
+ __pyx_L21:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1267
+ * if path is None:
+ * old_path = path = self.pathjoin(old_dirname, old_basename)
+ * if file_id is None: # <<<<<<<<<<<<<<
+ * file_id = entry[0][2]
+ * self.old_dirname_to_file_id[old_path] = file_id
+ */
+ __pyx_t_2 = (__pyx_v_file_id == Py_None);
+ if (__pyx_t_2) {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1268
+ * old_path = path = self.pathjoin(old_dirname, old_basename)
+ * if file_id is None:
+ * file_id = entry[0][2] # <<<<<<<<<<<<<<
+ * self.old_dirname_to_file_id[old_path] = file_id
+ * # parent id is the entry for the path in the target tree
+ */
+ __pyx_t_9 = __Pyx_GetItemInt(__pyx_v_entry, 0, sizeof(long), PyInt_FromLong); if (!__pyx_t_9) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1268; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_9);
+ __pyx_t_8 = __Pyx_GetItemInt(__pyx_t_9, 2, sizeof(long), PyInt_FromLong); if (!__pyx_t_8) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1268; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_8);
+ __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
+ __Pyx_DECREF(__pyx_v_file_id);
+ __pyx_v_file_id = __pyx_t_8;
+ __pyx_t_8 = 0;
+ goto __pyx_L22;
+ }
+ __pyx_L22:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1269
+ * if file_id is None:
+ * file_id = entry[0][2]
+ * self.old_dirname_to_file_id[old_path] = file_id # <<<<<<<<<<<<<<
+ * # parent id is the entry for the path in the target tree
+ * if old_basename and old_dirname == self.last_source_parent[0]:
+ */
+ if (PyObject_SetItem(__pyx_v_self->old_dirname_to_file_id, __pyx_v_old_path, __pyx_v_file_id) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1269; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ goto __pyx_L20;
+ }
+ __pyx_L20:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1271
+ * self.old_dirname_to_file_id[old_path] = file_id
+ * # parent id is the entry for the path in the target tree
+ * if old_basename and old_dirname == self.last_source_parent[0]: # <<<<<<<<<<<<<<
+ * # use a cached hit for non-root source entries.
+ * source_parent_id = self.last_source_parent[1]
+ */
+ __pyx_t_2 = __Pyx_PyObject_IsTrue(__pyx_v_old_basename); if (unlikely(__pyx_t_2 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1271; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ if (__pyx_t_2) {
+ __pyx_t_8 = __Pyx_GetItemInt(__pyx_v_self->last_source_parent, 0, sizeof(long), PyInt_FromLong); if (!__pyx_t_8) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1271; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_8);
+ __pyx_t_9 = PyObject_RichCompare(__pyx_v_old_dirname, __pyx_t_8, Py_EQ); if (unlikely(!__pyx_t_9)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1271; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_9);
+ __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0;
+ __pyx_t_5 = __Pyx_PyObject_IsTrue(__pyx_t_9); if (unlikely(__pyx_t_5 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1271; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
+ __pyx_t_7 = __pyx_t_5;
+ } else {
+ __pyx_t_7 = __pyx_t_2;
+ }
+ if (__pyx_t_7) {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1273
+ * if old_basename and old_dirname == self.last_source_parent[0]:
+ * # use a cached hit for non-root source entries.
+ * source_parent_id = self.last_source_parent[1] # <<<<<<<<<<<<<<
+ * else:
+ * try:
+ */
+ __pyx_t_9 = __Pyx_GetItemInt(__pyx_v_self->last_source_parent, 1, sizeof(long), PyInt_FromLong); if (!__pyx_t_9) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1273; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_9);
+ __Pyx_DECREF(__pyx_v_source_parent_id);
+ __pyx_v_source_parent_id = __pyx_t_9;
+ __pyx_t_9 = 0;
+ goto __pyx_L23;
+ }
+ /*else*/ {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1275
+ * source_parent_id = self.last_source_parent[1]
+ * else:
+ * try: # <<<<<<<<<<<<<<
+ * source_parent_id = self.old_dirname_to_file_id[old_dirname]
+ * except KeyError, _:
+ */
+ {
+ PyObject *__pyx_save_exc_type, *__pyx_save_exc_value, *__pyx_save_exc_tb;
+ __Pyx_ExceptionSave(&__pyx_save_exc_type, &__pyx_save_exc_value, &__pyx_save_exc_tb);
+ __Pyx_XGOTREF(__pyx_save_exc_type);
+ __Pyx_XGOTREF(__pyx_save_exc_value);
+ __Pyx_XGOTREF(__pyx_save_exc_tb);
+ /*try:*/ {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1276
+ * else:
+ * try:
+ * source_parent_id = self.old_dirname_to_file_id[old_dirname] # <<<<<<<<<<<<<<
+ * except KeyError, _:
+ * source_parent_entry = self.state._get_entry(self.source_index,
+ */
+ __pyx_t_9 = PyObject_GetItem(__pyx_v_self->old_dirname_to_file_id, __pyx_v_old_dirname); if (!__pyx_t_9) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1276; __pyx_clineno = __LINE__; goto __pyx_L24_error;}
+ __Pyx_GOTREF(__pyx_t_9);
+ __Pyx_DECREF(__pyx_v_source_parent_id);
+ __pyx_v_source_parent_id = __pyx_t_9;
+ __pyx_t_9 = 0;
+ }
+ __Pyx_XDECREF(__pyx_save_exc_type); __pyx_save_exc_type = 0;
+ __Pyx_XDECREF(__pyx_save_exc_value); __pyx_save_exc_value = 0;
+ __Pyx_XDECREF(__pyx_save_exc_tb); __pyx_save_exc_tb = 0;
+ goto __pyx_L31_try_end;
+ __pyx_L24_error:;
+ __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
+ __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0;
+ __Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0;
+ __Pyx_XDECREF(__pyx_t_8); __pyx_t_8 = 0;
+ __Pyx_XDECREF(__pyx_t_9); __pyx_t_9 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1277
+ * try:
+ * source_parent_id = self.old_dirname_to_file_id[old_dirname]
+ * except KeyError, _: # <<<<<<<<<<<<<<
+ * source_parent_entry = self.state._get_entry(self.source_index,
+ * path_utf8=old_dirname)
+ */
+ __pyx_t_10 = PyErr_ExceptionMatches(__pyx_builtin_KeyError);
+ if (__pyx_t_10) {
+ __Pyx_AddTraceback("bzrlib._dirstate_helpers_pyx.ProcessEntryC._process_entry");
+ if (__Pyx_GetException(&__pyx_t_9, &__pyx_t_8, &__pyx_t_1) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1277; __pyx_clineno = __LINE__; goto __pyx_L26_except_error;}
+ __Pyx_GOTREF(__pyx_t_9);
+ __Pyx_GOTREF(__pyx_t_8);
+ __Pyx_GOTREF(__pyx_t_1);
+ __Pyx_INCREF(__pyx_t_8);
+ __Pyx_DECREF(__pyx_v__);
+ __pyx_v__ = __pyx_t_8;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1278
+ * source_parent_id = self.old_dirname_to_file_id[old_dirname]
+ * except KeyError, _:
+ * source_parent_entry = self.state._get_entry(self.source_index, # <<<<<<<<<<<<<<
+ * path_utf8=old_dirname)
+ * source_parent_id = source_parent_entry[0][2]
+ */
+ __pyx_t_6 = PyObject_GetAttr(__pyx_v_self->state, __pyx_n_s___get_entry); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1278; __pyx_clineno = __LINE__; goto __pyx_L26_except_error;}
+ __Pyx_GOTREF(__pyx_t_6);
+ __pyx_t_3 = PyInt_FromLong(__pyx_v_self->source_index); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1278; __pyx_clineno = __LINE__; goto __pyx_L26_except_error;}
+ __Pyx_GOTREF(__pyx_t_3);
+ __pyx_t_11 = PyTuple_New(1); if (unlikely(!__pyx_t_11)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1278; __pyx_clineno = __LINE__; goto __pyx_L26_except_error;}
+ __Pyx_GOTREF(__pyx_t_11);
+ PyTuple_SET_ITEM(__pyx_t_11, 0, __pyx_t_3);
+ __Pyx_GIVEREF(__pyx_t_3);
+ __pyx_t_3 = 0;
+ __pyx_t_3 = PyDict_New(); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1278; __pyx_clineno = __LINE__; goto __pyx_L26_except_error;}
+ __Pyx_GOTREF(((PyObject *)__pyx_t_3));
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1279
+ * except KeyError, _:
+ * source_parent_entry = self.state._get_entry(self.source_index,
+ * path_utf8=old_dirname) # <<<<<<<<<<<<<<
+ * source_parent_id = source_parent_entry[0][2]
+ * if source_parent_id == entry[0][2]:
+ */
+ if (PyDict_SetItem(__pyx_t_3, ((PyObject *)__pyx_n_s__path_utf8), __pyx_v_old_dirname) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1278; __pyx_clineno = __LINE__; goto __pyx_L26_except_error;}
+ __pyx_t_12 = PyEval_CallObjectWithKeywords(__pyx_t_6, __pyx_t_11, ((PyObject *)__pyx_t_3)); if (unlikely(!__pyx_t_12)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1278; __pyx_clineno = __LINE__; goto __pyx_L26_except_error;}
+ __Pyx_GOTREF(__pyx_t_12);
+ __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
+ __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0;
+ __Pyx_DECREF(((PyObject *)__pyx_t_3)); __pyx_t_3 = 0;
+ __Pyx_DECREF(__pyx_v_source_parent_entry);
+ __pyx_v_source_parent_entry = __pyx_t_12;
+ __pyx_t_12 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1280
+ * source_parent_entry = self.state._get_entry(self.source_index,
+ * path_utf8=old_dirname)
+ * source_parent_id = source_parent_entry[0][2] # <<<<<<<<<<<<<<
+ * if source_parent_id == entry[0][2]:
+ * # This is the root, so the parent is None
+ */
+ __pyx_t_12 = __Pyx_GetItemInt(__pyx_v_source_parent_entry, 0, sizeof(long), PyInt_FromLong); if (!__pyx_t_12) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1280; __pyx_clineno = __LINE__; goto __pyx_L26_except_error;}
+ __Pyx_GOTREF(__pyx_t_12);
+ __pyx_t_3 = __Pyx_GetItemInt(__pyx_t_12, 2, sizeof(long), PyInt_FromLong); if (!__pyx_t_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1280; __pyx_clineno = __LINE__; goto __pyx_L26_except_error;}
+ __Pyx_GOTREF(__pyx_t_3);
+ __Pyx_DECREF(__pyx_t_12); __pyx_t_12 = 0;
+ __Pyx_DECREF(__pyx_v_source_parent_id);
+ __pyx_v_source_parent_id = __pyx_t_3;
+ __pyx_t_3 = 0;
+ __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
+ __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0;
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+ goto __pyx_L25_exception_handled;
+ }
+ __pyx_L26_except_error:;
+ __Pyx_XGIVEREF(__pyx_save_exc_type);
+ __Pyx_XGIVEREF(__pyx_save_exc_value);
+ __Pyx_XGIVEREF(__pyx_save_exc_tb);
+ __Pyx_ExceptionReset(__pyx_save_exc_type, __pyx_save_exc_value, __pyx_save_exc_tb);
+ goto __pyx_L1_error;
+ __pyx_L25_exception_handled:;
+ __Pyx_XGIVEREF(__pyx_save_exc_type);
+ __Pyx_XGIVEREF(__pyx_save_exc_value);
+ __Pyx_XGIVEREF(__pyx_save_exc_tb);
+ __Pyx_ExceptionReset(__pyx_save_exc_type, __pyx_save_exc_value, __pyx_save_exc_tb);
+ __pyx_L31_try_end:;
+ }
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1281
+ * path_utf8=old_dirname)
+ * source_parent_id = source_parent_entry[0][2]
+ * if source_parent_id == entry[0][2]: # <<<<<<<<<<<<<<
+ * # This is the root, so the parent is None
+ * source_parent_id = None
+ */
+ __pyx_t_1 = __Pyx_GetItemInt(__pyx_v_entry, 0, sizeof(long), PyInt_FromLong); if (!__pyx_t_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1281; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_1);
+ __pyx_t_8 = __Pyx_GetItemInt(__pyx_t_1, 2, sizeof(long), PyInt_FromLong); if (!__pyx_t_8) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1281; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_8);
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+ __pyx_t_1 = PyObject_RichCompare(__pyx_v_source_parent_id, __pyx_t_8, Py_EQ); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1281; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_1);
+ __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0;
+ __pyx_t_7 = __Pyx_PyObject_IsTrue(__pyx_t_1); if (unlikely(__pyx_t_7 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1281; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+ if (__pyx_t_7) {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1283
+ * if source_parent_id == entry[0][2]:
+ * # This is the root, so the parent is None
+ * source_parent_id = None # <<<<<<<<<<<<<<
+ * else:
+ * self.last_source_parent[0] = old_dirname
+ */
+ __Pyx_INCREF(Py_None);
+ __Pyx_DECREF(__pyx_v_source_parent_id);
+ __pyx_v_source_parent_id = Py_None;
+ goto __pyx_L34;
+ }
+ /*else*/ {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1285
+ * source_parent_id = None
+ * else:
+ * self.last_source_parent[0] = old_dirname # <<<<<<<<<<<<<<
+ * self.last_source_parent[1] = source_parent_id
+ * new_dirname = entry[0][0]
+ */
+ if (__Pyx_SetItemInt(__pyx_v_self->last_source_parent, 0, __pyx_v_old_dirname, sizeof(long), PyInt_FromLong) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1285; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1286
+ * else:
+ * self.last_source_parent[0] = old_dirname
+ * self.last_source_parent[1] = source_parent_id # <<<<<<<<<<<<<<
+ * new_dirname = entry[0][0]
+ * if entry[0][1] and new_dirname == self.last_target_parent[0]:
+ */
+ if (__Pyx_SetItemInt(__pyx_v_self->last_source_parent, 1, __pyx_v_source_parent_id, sizeof(long), PyInt_FromLong) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1286; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ }
+ __pyx_L34:;
+ }
+ __pyx_L23:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1287
+ * self.last_source_parent[0] = old_dirname
+ * self.last_source_parent[1] = source_parent_id
+ * new_dirname = entry[0][0] # <<<<<<<<<<<<<<
+ * if entry[0][1] and new_dirname == self.last_target_parent[0]:
+ * # use a cached hit for non-root target entries.
+ */
+ __pyx_t_1 = __Pyx_GetItemInt(__pyx_v_entry, 0, sizeof(long), PyInt_FromLong); if (!__pyx_t_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1287; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_1);
+ __pyx_t_8 = __Pyx_GetItemInt(__pyx_t_1, 0, sizeof(long), PyInt_FromLong); if (!__pyx_t_8) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1287; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_8);
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+ __Pyx_DECREF(__pyx_v_new_dirname);
+ __pyx_v_new_dirname = __pyx_t_8;
+ __pyx_t_8 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1288
+ * self.last_source_parent[1] = source_parent_id
+ * new_dirname = entry[0][0]
+ * if entry[0][1] and new_dirname == self.last_target_parent[0]: # <<<<<<<<<<<<<<
+ * # use a cached hit for non-root target entries.
+ * target_parent_id = self.last_target_parent[1]
+ */
+ __pyx_t_8 = __Pyx_GetItemInt(__pyx_v_entry, 0, sizeof(long), PyInt_FromLong); if (!__pyx_t_8) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1288; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_8);
+ __pyx_t_1 = __Pyx_GetItemInt(__pyx_t_8, 1, sizeof(long), PyInt_FromLong); if (!__pyx_t_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1288; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_1);
+ __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0;
+ __pyx_t_7 = __Pyx_PyObject_IsTrue(__pyx_t_1); if (unlikely(__pyx_t_7 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1288; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+ if (__pyx_t_7) {
+ __pyx_t_1 = __Pyx_GetItemInt(__pyx_v_self->last_target_parent, 0, sizeof(long), PyInt_FromLong); if (!__pyx_t_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1288; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_1);
+ __pyx_t_8 = PyObject_RichCompare(__pyx_v_new_dirname, __pyx_t_1, Py_EQ); if (unlikely(!__pyx_t_8)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1288; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_8);
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+ __pyx_t_2 = __Pyx_PyObject_IsTrue(__pyx_t_8); if (unlikely(__pyx_t_2 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1288; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0;
+ __pyx_t_5 = __pyx_t_2;
+ } else {
+ __pyx_t_5 = __pyx_t_7;
+ }
+ if (__pyx_t_5) {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1290
+ * if entry[0][1] and new_dirname == self.last_target_parent[0]:
+ * # use a cached hit for non-root target entries.
+ * target_parent_id = self.last_target_parent[1] # <<<<<<<<<<<<<<
+ * else:
+ * try:
+ */
+ __pyx_t_8 = __Pyx_GetItemInt(__pyx_v_self->last_target_parent, 1, sizeof(long), PyInt_FromLong); if (!__pyx_t_8) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1290; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_8);
+ __Pyx_DECREF(__pyx_v_target_parent_id);
+ __pyx_v_target_parent_id = __pyx_t_8;
+ __pyx_t_8 = 0;
+ goto __pyx_L35;
+ }
+ /*else*/ {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1292
+ * target_parent_id = self.last_target_parent[1]
+ * else:
+ * try: # <<<<<<<<<<<<<<
+ * target_parent_id = self.new_dirname_to_file_id[new_dirname]
+ * except KeyError, _:
+ */
+ {
+ PyObject *__pyx_save_exc_type, *__pyx_save_exc_value, *__pyx_save_exc_tb;
+ __Pyx_ExceptionSave(&__pyx_save_exc_type, &__pyx_save_exc_value, &__pyx_save_exc_tb);
+ __Pyx_XGOTREF(__pyx_save_exc_type);
+ __Pyx_XGOTREF(__pyx_save_exc_value);
+ __Pyx_XGOTREF(__pyx_save_exc_tb);
+ /*try:*/ {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1293
+ * else:
+ * try:
+ * target_parent_id = self.new_dirname_to_file_id[new_dirname] # <<<<<<<<<<<<<<
+ * except KeyError, _:
+ * # TODO: We don't always need to do the lookup, because the
+ */
+ __pyx_t_8 = PyObject_GetItem(__pyx_v_self->new_dirname_to_file_id, __pyx_v_new_dirname); if (!__pyx_t_8) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1293; __pyx_clineno = __LINE__; goto __pyx_L36_error;}
+ __Pyx_GOTREF(__pyx_t_8);
+ __Pyx_DECREF(__pyx_v_target_parent_id);
+ __pyx_v_target_parent_id = __pyx_t_8;
+ __pyx_t_8 = 0;
+ }
+ __Pyx_XDECREF(__pyx_save_exc_type); __pyx_save_exc_type = 0;
+ __Pyx_XDECREF(__pyx_save_exc_value); __pyx_save_exc_value = 0;
+ __Pyx_XDECREF(__pyx_save_exc_tb); __pyx_save_exc_tb = 0;
+ goto __pyx_L43_try_end;
+ __pyx_L36_error:;
+ __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0;
+ __Pyx_XDECREF(__pyx_t_11); __pyx_t_11 = 0;
+ __Pyx_XDECREF(__pyx_t_12); __pyx_t_12 = 0;
+ __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
+ __Pyx_XDECREF(__pyx_t_9); __pyx_t_9 = 0;
+ __Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0;
+ __Pyx_XDECREF(__pyx_t_8); __pyx_t_8 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1294
+ * try:
+ * target_parent_id = self.new_dirname_to_file_id[new_dirname]
+ * except KeyError, _: # <<<<<<<<<<<<<<
+ * # TODO: We don't always need to do the lookup, because the
+ * # parent entry will be the same as the source entry.
+ */
+ __pyx_t_10 = PyErr_ExceptionMatches(__pyx_builtin_KeyError);
+ if (__pyx_t_10) {
+ __Pyx_AddTraceback("bzrlib._dirstate_helpers_pyx.ProcessEntryC._process_entry");
+ if (__Pyx_GetException(&__pyx_t_8, &__pyx_t_1, &__pyx_t_9) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1294; __pyx_clineno = __LINE__; goto __pyx_L38_except_error;}
+ __Pyx_GOTREF(__pyx_t_8);
+ __Pyx_GOTREF(__pyx_t_1);
+ __Pyx_GOTREF(__pyx_t_9);
+ __Pyx_INCREF(__pyx_t_1);
+ __Pyx_DECREF(__pyx_v__);
+ __pyx_v__ = __pyx_t_1;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1297
+ * # TODO: We don't always need to do the lookup, because the
+ * # parent entry will be the same as the source entry.
+ * target_parent_entry = self.state._get_entry(self.target_index, # <<<<<<<<<<<<<<
+ * path_utf8=new_dirname)
+ * if target_parent_entry == (None, None):
+ */
+ __pyx_t_3 = PyObject_GetAttr(__pyx_v_self->state, __pyx_n_s___get_entry); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1297; __pyx_clineno = __LINE__; goto __pyx_L38_except_error;}
+ __Pyx_GOTREF(__pyx_t_3);
+ __pyx_t_12 = PyInt_FromLong(__pyx_v_self->target_index); if (unlikely(!__pyx_t_12)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1297; __pyx_clineno = __LINE__; goto __pyx_L38_except_error;}
+ __Pyx_GOTREF(__pyx_t_12);
+ __pyx_t_11 = PyTuple_New(1); if (unlikely(!__pyx_t_11)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1297; __pyx_clineno = __LINE__; goto __pyx_L38_except_error;}
+ __Pyx_GOTREF(__pyx_t_11);
+ PyTuple_SET_ITEM(__pyx_t_11, 0, __pyx_t_12);
+ __Pyx_GIVEREF(__pyx_t_12);
+ __pyx_t_12 = 0;
+ __pyx_t_12 = PyDict_New(); if (unlikely(!__pyx_t_12)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1297; __pyx_clineno = __LINE__; goto __pyx_L38_except_error;}
+ __Pyx_GOTREF(((PyObject *)__pyx_t_12));
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1298
+ * # parent entry will be the same as the source entry.
+ * target_parent_entry = self.state._get_entry(self.target_index,
+ * path_utf8=new_dirname) # <<<<<<<<<<<<<<
+ * if target_parent_entry == (None, None):
+ * raise AssertionError(
+ */
+ if (PyDict_SetItem(__pyx_t_12, ((PyObject *)__pyx_n_s__path_utf8), __pyx_v_new_dirname) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1297; __pyx_clineno = __LINE__; goto __pyx_L38_except_error;}
+ __pyx_t_6 = PyEval_CallObjectWithKeywords(__pyx_t_3, __pyx_t_11, ((PyObject *)__pyx_t_12)); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1297; __pyx_clineno = __LINE__; goto __pyx_L38_except_error;}
+ __Pyx_GOTREF(__pyx_t_6);
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+ __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0;
+ __Pyx_DECREF(((PyObject *)__pyx_t_12)); __pyx_t_12 = 0;
+ __Pyx_DECREF(__pyx_v_target_parent_entry);
+ __pyx_v_target_parent_entry = __pyx_t_6;
+ __pyx_t_6 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1299
+ * target_parent_entry = self.state._get_entry(self.target_index,
+ * path_utf8=new_dirname)
+ * if target_parent_entry == (None, None): # <<<<<<<<<<<<<<
+ * raise AssertionError(
+ * "Could not find target parent in wt: %s\nparent of: %s"
+ */
+ __pyx_t_6 = PyTuple_New(2); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1299; __pyx_clineno = __LINE__; goto __pyx_L38_except_error;}
+ __Pyx_GOTREF(__pyx_t_6);
+ __Pyx_INCREF(Py_None);
+ PyTuple_SET_ITEM(__pyx_t_6, 0, Py_None);
+ __Pyx_GIVEREF(Py_None);
+ __Pyx_INCREF(Py_None);
+ PyTuple_SET_ITEM(__pyx_t_6, 1, Py_None);
+ __Pyx_GIVEREF(Py_None);
+ __pyx_t_12 = PyObject_RichCompare(__pyx_v_target_parent_entry, __pyx_t_6, Py_EQ); if (unlikely(!__pyx_t_12)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1299; __pyx_clineno = __LINE__; goto __pyx_L38_except_error;}
+ __Pyx_GOTREF(__pyx_t_12);
+ __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
+ __pyx_t_5 = __Pyx_PyObject_IsTrue(__pyx_t_12); if (unlikely(__pyx_t_5 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1299; __pyx_clineno = __LINE__; goto __pyx_L38_except_error;}
+ __Pyx_DECREF(__pyx_t_12); __pyx_t_12 = 0;
+ if (__pyx_t_5) {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1302
+ * raise AssertionError(
+ * "Could not find target parent in wt: %s\nparent of: %s"
+ * % (new_dirname, entry)) # <<<<<<<<<<<<<<
+ * target_parent_id = target_parent_entry[0][2]
+ * if target_parent_id == entry[0][2]:
+ */
+ __pyx_t_12 = PyTuple_New(2); if (unlikely(!__pyx_t_12)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1302; __pyx_clineno = __LINE__; goto __pyx_L38_except_error;}
+ __Pyx_GOTREF(__pyx_t_12);
+ __Pyx_INCREF(__pyx_v_new_dirname);
+ PyTuple_SET_ITEM(__pyx_t_12, 0, __pyx_v_new_dirname);
+ __Pyx_GIVEREF(__pyx_v_new_dirname);
+ __Pyx_INCREF(__pyx_v_entry);
+ PyTuple_SET_ITEM(__pyx_t_12, 1, __pyx_v_entry);
+ __Pyx_GIVEREF(__pyx_v_entry);
+ __pyx_t_6 = PyNumber_Remainder(((PyObject *)__pyx_kp_s_34), __pyx_t_12); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1302; __pyx_clineno = __LINE__; goto __pyx_L38_except_error;}
+ __Pyx_GOTREF(((PyObject *)__pyx_t_6));
+ __Pyx_DECREF(__pyx_t_12); __pyx_t_12 = 0;
+ __pyx_t_12 = PyTuple_New(1); if (unlikely(!__pyx_t_12)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1300; __pyx_clineno = __LINE__; goto __pyx_L38_except_error;}
+ __Pyx_GOTREF(__pyx_t_12);
+ PyTuple_SET_ITEM(__pyx_t_12, 0, ((PyObject *)__pyx_t_6));
+ __Pyx_GIVEREF(((PyObject *)__pyx_t_6));
+ __pyx_t_6 = 0;
+ __pyx_t_6 = PyObject_Call(__pyx_builtin_AssertionError, __pyx_t_12, NULL); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1300; __pyx_clineno = __LINE__; goto __pyx_L38_except_error;}
+ __Pyx_GOTREF(__pyx_t_6);
+ __Pyx_DECREF(__pyx_t_12); __pyx_t_12 = 0;
+ __Pyx_Raise(__pyx_t_6, 0, 0);
+ __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1300; __pyx_clineno = __LINE__; goto __pyx_L38_except_error;}
+ goto __pyx_L46;
+ }
+ __pyx_L46:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1303
+ * "Could not find target parent in wt: %s\nparent of: %s"
+ * % (new_dirname, entry))
+ * target_parent_id = target_parent_entry[0][2] # <<<<<<<<<<<<<<
+ * if target_parent_id == entry[0][2]:
+ * # This is the root, so the parent is None
+ */
+ __pyx_t_6 = __Pyx_GetItemInt(__pyx_v_target_parent_entry, 0, sizeof(long), PyInt_FromLong); if (!__pyx_t_6) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1303; __pyx_clineno = __LINE__; goto __pyx_L38_except_error;}
+ __Pyx_GOTREF(__pyx_t_6);
+ __pyx_t_12 = __Pyx_GetItemInt(__pyx_t_6, 2, sizeof(long), PyInt_FromLong); if (!__pyx_t_12) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1303; __pyx_clineno = __LINE__; goto __pyx_L38_except_error;}
+ __Pyx_GOTREF(__pyx_t_12);
+ __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
+ __Pyx_DECREF(__pyx_v_target_parent_id);
+ __pyx_v_target_parent_id = __pyx_t_12;
+ __pyx_t_12 = 0;
+ __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0;
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+ __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
+ goto __pyx_L37_exception_handled;
+ }
+ __pyx_L38_except_error:;
+ __Pyx_XGIVEREF(__pyx_save_exc_type);
+ __Pyx_XGIVEREF(__pyx_save_exc_value);
+ __Pyx_XGIVEREF(__pyx_save_exc_tb);
+ __Pyx_ExceptionReset(__pyx_save_exc_type, __pyx_save_exc_value, __pyx_save_exc_tb);
+ goto __pyx_L1_error;
+ __pyx_L37_exception_handled:;
+ __Pyx_XGIVEREF(__pyx_save_exc_type);
+ __Pyx_XGIVEREF(__pyx_save_exc_value);
+ __Pyx_XGIVEREF(__pyx_save_exc_tb);
+ __Pyx_ExceptionReset(__pyx_save_exc_type, __pyx_save_exc_value, __pyx_save_exc_tb);
+ __pyx_L43_try_end:;
+ }
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1304
+ * % (new_dirname, entry))
+ * target_parent_id = target_parent_entry[0][2]
+ * if target_parent_id == entry[0][2]: # <<<<<<<<<<<<<<
+ * # This is the root, so the parent is None
+ * target_parent_id = None
+ */
+ __pyx_t_9 = __Pyx_GetItemInt(__pyx_v_entry, 0, sizeof(long), PyInt_FromLong); if (!__pyx_t_9) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1304; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_9);
+ __pyx_t_1 = __Pyx_GetItemInt(__pyx_t_9, 2, sizeof(long), PyInt_FromLong); if (!__pyx_t_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1304; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_1);
+ __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
+ __pyx_t_9 = PyObject_RichCompare(__pyx_v_target_parent_id, __pyx_t_1, Py_EQ); if (unlikely(!__pyx_t_9)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1304; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_9);
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+ __pyx_t_5 = __Pyx_PyObject_IsTrue(__pyx_t_9); if (unlikely(__pyx_t_5 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1304; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
+ if (__pyx_t_5) {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1306
+ * if target_parent_id == entry[0][2]:
+ * # This is the root, so the parent is None
+ * target_parent_id = None # <<<<<<<<<<<<<<
+ * else:
+ * self.last_target_parent[0] = new_dirname
+ */
+ __Pyx_INCREF(Py_None);
+ __Pyx_DECREF(__pyx_v_target_parent_id);
+ __pyx_v_target_parent_id = Py_None;
+ goto __pyx_L47;
+ }
+ /*else*/ {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1308
+ * target_parent_id = None
+ * else:
+ * self.last_target_parent[0] = new_dirname # <<<<<<<<<<<<<<
+ * self.last_target_parent[1] = target_parent_id
+ *
+ */
+ if (__Pyx_SetItemInt(__pyx_v_self->last_target_parent, 0, __pyx_v_new_dirname, sizeof(long), PyInt_FromLong) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1308; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1309
+ * else:
+ * self.last_target_parent[0] = new_dirname
+ * self.last_target_parent[1] = target_parent_id # <<<<<<<<<<<<<<
+ *
+ * source_exec = source_details[3]
+ */
+ if (__Pyx_SetItemInt(__pyx_v_self->last_target_parent, 1, __pyx_v_target_parent_id, sizeof(long), PyInt_FromLong) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1309; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ }
+ __pyx_L47:;
+ }
+ __pyx_L35:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1311
+ * self.last_target_parent[1] = target_parent_id
+ *
+ * source_exec = source_details[3] # <<<<<<<<<<<<<<
+ * changed = (content_change
+ * or source_parent_id != target_parent_id
+ */
+ __pyx_t_9 = __Pyx_GetItemInt(__pyx_v_source_details, 3, sizeof(long), PyInt_FromLong); if (!__pyx_t_9) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1311; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_9);
+ __Pyx_DECREF(__pyx_v_source_exec);
+ __pyx_v_source_exec = __pyx_t_9;
+ __pyx_t_9 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1313
+ * source_exec = source_details[3]
+ * changed = (content_change
+ * or source_parent_id != target_parent_id # <<<<<<<<<<<<<<
+ * or old_basename != entry[0][1]
+ * or source_exec != target_exec
+ */
+ __pyx_t_9 = PyInt_FromLong(__pyx_v_content_change); if (unlikely(!__pyx_t_9)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1313; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_9);
+ __pyx_t_5 = __Pyx_PyObject_IsTrue(__pyx_t_9); if (unlikely(__pyx_t_5 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1313; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ if (!__pyx_t_5) {
+ __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1314
+ * changed = (content_change
+ * or source_parent_id != target_parent_id
+ * or old_basename != entry[0][1] # <<<<<<<<<<<<<<
+ * or source_exec != target_exec
+ * )
+ */
+ __pyx_t_1 = PyObject_RichCompare(__pyx_v_source_parent_id, __pyx_v_target_parent_id, Py_NE); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1313; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_1);
+ __pyx_t_5 = __Pyx_PyObject_IsTrue(__pyx_t_1); if (unlikely(__pyx_t_5 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1314; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ if (!__pyx_t_5) {
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1315
+ * or source_parent_id != target_parent_id
+ * or old_basename != entry[0][1]
+ * or source_exec != target_exec # <<<<<<<<<<<<<<
+ * )
+ * if not changed and not self.include_unchanged:
+ */
+ __pyx_t_8 = __Pyx_GetItemInt(__pyx_v_entry, 0, sizeof(long), PyInt_FromLong); if (!__pyx_t_8) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1314; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_8);
+ __pyx_t_12 = __Pyx_GetItemInt(__pyx_t_8, 1, sizeof(long), PyInt_FromLong); if (!__pyx_t_12) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1314; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_12);
+ __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0;
+ __pyx_t_8 = PyObject_RichCompare(__pyx_v_old_basename, __pyx_t_12, Py_NE); if (unlikely(!__pyx_t_8)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1314; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_8);
+ __Pyx_DECREF(__pyx_t_12); __pyx_t_12 = 0;
+ __pyx_t_5 = __Pyx_PyObject_IsTrue(__pyx_t_8); if (unlikely(__pyx_t_5 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1315; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ if (!__pyx_t_5) {
+ __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1316
+ * or old_basename != entry[0][1]
+ * or source_exec != target_exec
+ * ) # <<<<<<<<<<<<<<
+ * if not changed and not self.include_unchanged:
+ * return None, False
+ */
+ __pyx_t_12 = PyObject_RichCompare(__pyx_v_source_exec, __pyx_v_target_exec, Py_NE); if (unlikely(!__pyx_t_12)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1315; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_12);
+ __pyx_t_6 = __pyx_t_12;
+ __pyx_t_12 = 0;
+ } else {
+ __pyx_t_6 = __pyx_t_8;
+ __pyx_t_8 = 0;
+ }
+ __pyx_t_8 = __pyx_t_6;
+ __pyx_t_6 = 0;
+ } else {
+ __pyx_t_8 = __pyx_t_1;
+ __pyx_t_1 = 0;
+ }
+ __pyx_t_1 = __pyx_t_8;
+ __pyx_t_8 = 0;
+ } else {
+ __pyx_t_1 = __pyx_t_9;
+ __pyx_t_9 = 0;
+ }
+ __Pyx_DECREF(__pyx_v_changed);
+ __pyx_v_changed = __pyx_t_1;
+ __pyx_t_1 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1317
+ * or source_exec != target_exec
+ * )
+ * if not changed and not self.include_unchanged: # <<<<<<<<<<<<<<
+ * return None, False
+ * else:
+ */
+ __pyx_t_5 = __Pyx_PyObject_IsTrue(__pyx_v_changed); if (unlikely(__pyx_t_5 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1317; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_7 = (!__pyx_t_5);
+ if (__pyx_t_7) {
+ __pyx_t_5 = (!__pyx_v_self->include_unchanged);
+ __pyx_t_2 = __pyx_t_5;
+ } else {
+ __pyx_t_2 = __pyx_t_7;
+ }
+ if (__pyx_t_2) {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1318
+ * )
+ * if not changed and not self.include_unchanged:
+ * return None, False # <<<<<<<<<<<<<<
+ * else:
+ * if old_path is None:
+ */
+ __Pyx_XDECREF(__pyx_r);
+ __pyx_t_1 = __Pyx_PyBool_FromLong(0); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1318; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_1);
+ __pyx_t_9 = PyTuple_New(2); if (unlikely(!__pyx_t_9)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1318; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_9);
+ __Pyx_INCREF(Py_None);
+ PyTuple_SET_ITEM(__pyx_t_9, 0, Py_None);
+ __Pyx_GIVEREF(Py_None);
+ PyTuple_SET_ITEM(__pyx_t_9, 1, __pyx_t_1);
+ __Pyx_GIVEREF(__pyx_t_1);
+ __pyx_t_1 = 0;
+ __pyx_r = __pyx_t_9;
+ __pyx_t_9 = 0;
+ goto __pyx_L0;
+ goto __pyx_L48;
+ }
+ /*else*/ {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1320
+ * return None, False
+ * else:
+ * if old_path is None: # <<<<<<<<<<<<<<
+ * path = self.pathjoin(old_dirname, old_basename)
+ * old_path = path
+ */
+ __pyx_t_2 = (__pyx_v_old_path == Py_None);
+ if (__pyx_t_2) {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1321
+ * else:
+ * if old_path is None:
+ * path = self.pathjoin(old_dirname, old_basename) # <<<<<<<<<<<<<<
+ * old_path = path
+ * old_path_u = self.utf8_decode(old_path)[0]
+ */
+ __pyx_t_9 = PyTuple_New(2); if (unlikely(!__pyx_t_9)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1321; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_9);
+ __Pyx_INCREF(__pyx_v_old_dirname);
+ PyTuple_SET_ITEM(__pyx_t_9, 0, __pyx_v_old_dirname);
+ __Pyx_GIVEREF(__pyx_v_old_dirname);
+ __Pyx_INCREF(__pyx_v_old_basename);
+ PyTuple_SET_ITEM(__pyx_t_9, 1, __pyx_v_old_basename);
+ __Pyx_GIVEREF(__pyx_v_old_basename);
+ __pyx_t_1 = PyObject_Call(__pyx_v_self->pathjoin, __pyx_t_9, NULL); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1321; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_1);
+ __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
+ __Pyx_DECREF(__pyx_v_path);
+ __pyx_v_path = __pyx_t_1;
+ __pyx_t_1 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1322
+ * if old_path is None:
+ * path = self.pathjoin(old_dirname, old_basename)
+ * old_path = path # <<<<<<<<<<<<<<
+ * old_path_u = self.utf8_decode(old_path)[0]
+ * path_u = old_path_u
+ */
+ __Pyx_INCREF(__pyx_v_path);
+ __Pyx_DECREF(__pyx_v_old_path);
+ __pyx_v_old_path = __pyx_v_path;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1323
+ * path = self.pathjoin(old_dirname, old_basename)
+ * old_path = path
+ * old_path_u = self.utf8_decode(old_path)[0] # <<<<<<<<<<<<<<
+ * path_u = old_path_u
+ * else:
+ */
+ __pyx_t_1 = PyTuple_New(1); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1323; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_1);
+ __Pyx_INCREF(__pyx_v_old_path);
+ PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_v_old_path);
+ __Pyx_GIVEREF(__pyx_v_old_path);
+ __pyx_t_9 = PyObject_Call(__pyx_v_self->utf8_decode, __pyx_t_1, NULL); if (unlikely(!__pyx_t_9)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1323; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_9);
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+ __pyx_t_1 = __Pyx_GetItemInt(__pyx_t_9, 0, sizeof(long), PyInt_FromLong); if (!__pyx_t_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1323; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_1);
+ __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
+ __Pyx_DECREF(__pyx_v_old_path_u);
+ __pyx_v_old_path_u = __pyx_t_1;
+ __pyx_t_1 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1324
+ * old_path = path
+ * old_path_u = self.utf8_decode(old_path)[0]
+ * path_u = old_path_u # <<<<<<<<<<<<<<
+ * else:
+ * old_path_u = self.utf8_decode(old_path)[0]
+ */
+ __Pyx_INCREF(__pyx_v_old_path_u);
+ __Pyx_DECREF(__pyx_v_path_u);
+ __pyx_v_path_u = __pyx_v_old_path_u;
+ goto __pyx_L49;
+ }
+ /*else*/ {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1326
+ * path_u = old_path_u
+ * else:
+ * old_path_u = self.utf8_decode(old_path)[0] # <<<<<<<<<<<<<<
+ * if old_path == path:
+ * path_u = old_path_u
+ */
+ __pyx_t_1 = PyTuple_New(1); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1326; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_1);
+ __Pyx_INCREF(__pyx_v_old_path);
+ PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_v_old_path);
+ __Pyx_GIVEREF(__pyx_v_old_path);
+ __pyx_t_9 = PyObject_Call(__pyx_v_self->utf8_decode, __pyx_t_1, NULL); if (unlikely(!__pyx_t_9)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1326; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_9);
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+ __pyx_t_1 = __Pyx_GetItemInt(__pyx_t_9, 0, sizeof(long), PyInt_FromLong); if (!__pyx_t_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1326; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_1);
+ __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
+ __Pyx_DECREF(__pyx_v_old_path_u);
+ __pyx_v_old_path_u = __pyx_t_1;
+ __pyx_t_1 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1327
+ * else:
+ * old_path_u = self.utf8_decode(old_path)[0]
+ * if old_path == path: # <<<<<<<<<<<<<<
+ * path_u = old_path_u
+ * else:
+ */
+ __pyx_t_1 = PyObject_RichCompare(__pyx_v_old_path, __pyx_v_path, Py_EQ); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1327; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_1);
+ __pyx_t_2 = __Pyx_PyObject_IsTrue(__pyx_t_1); if (unlikely(__pyx_t_2 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1327; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+ if (__pyx_t_2) {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1328
+ * old_path_u = self.utf8_decode(old_path)[0]
+ * if old_path == path:
+ * path_u = old_path_u # <<<<<<<<<<<<<<
+ * else:
+ * path_u = self.utf8_decode(path)[0]
+ */
+ __Pyx_INCREF(__pyx_v_old_path_u);
+ __Pyx_DECREF(__pyx_v_path_u);
+ __pyx_v_path_u = __pyx_v_old_path_u;
+ goto __pyx_L50;
+ }
+ /*else*/ {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1330
+ * path_u = old_path_u
+ * else:
+ * path_u = self.utf8_decode(path)[0] # <<<<<<<<<<<<<<
+ * source_kind = _minikind_to_kind(source_minikind)
+ * return (entry[0][2],
+ */
+ __pyx_t_1 = PyTuple_New(1); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1330; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_1);
+ __Pyx_INCREF(__pyx_v_path);
+ PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_v_path);
+ __Pyx_GIVEREF(__pyx_v_path);
+ __pyx_t_9 = PyObject_Call(__pyx_v_self->utf8_decode, __pyx_t_1, NULL); if (unlikely(!__pyx_t_9)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1330; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_9);
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+ __pyx_t_1 = __Pyx_GetItemInt(__pyx_t_9, 0, sizeof(long), PyInt_FromLong); if (!__pyx_t_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1330; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_1);
+ __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
+ __Pyx_DECREF(__pyx_v_path_u);
+ __pyx_v_path_u = __pyx_t_1;
+ __pyx_t_1 = 0;
+ }
+ __pyx_L50:;
+ }
+ __pyx_L49:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1331
+ * else:
+ * path_u = self.utf8_decode(path)[0]
+ * source_kind = _minikind_to_kind(source_minikind) # <<<<<<<<<<<<<<
+ * return (entry[0][2],
+ * (old_path_u, path_u),
+ */
+ __pyx_t_1 = __pyx_f_6bzrlib_21_dirstate_helpers_pyx__minikind_to_kind(__pyx_v_source_minikind); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1331; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_1);
+ __Pyx_DECREF(__pyx_v_source_kind);
+ __pyx_v_source_kind = __pyx_t_1;
+ __pyx_t_1 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1332
+ * path_u = self.utf8_decode(path)[0]
+ * source_kind = _minikind_to_kind(source_minikind)
+ * return (entry[0][2], # <<<<<<<<<<<<<<
+ * (old_path_u, path_u),
+ * content_change,
+ */
+ __Pyx_XDECREF(__pyx_r);
+ __pyx_t_1 = __Pyx_GetItemInt(__pyx_v_entry, 0, sizeof(long), PyInt_FromLong); if (!__pyx_t_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1332; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_1);
+ __pyx_t_9 = __Pyx_GetItemInt(__pyx_t_1, 2, sizeof(long), PyInt_FromLong); if (!__pyx_t_9) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1332; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_9);
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1333
+ * source_kind = _minikind_to_kind(source_minikind)
+ * return (entry[0][2],
+ * (old_path_u, path_u), # <<<<<<<<<<<<<<
+ * content_change,
+ * (True, True),
+ */
+ __pyx_t_1 = PyTuple_New(2); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1333; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_1);
+ __Pyx_INCREF(__pyx_v_old_path_u);
+ PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_v_old_path_u);
+ __Pyx_GIVEREF(__pyx_v_old_path_u);
+ __Pyx_INCREF(__pyx_v_path_u);
+ PyTuple_SET_ITEM(__pyx_t_1, 1, __pyx_v_path_u);
+ __Pyx_GIVEREF(__pyx_v_path_u);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1334
+ * return (entry[0][2],
+ * (old_path_u, path_u),
+ * content_change, # <<<<<<<<<<<<<<
+ * (True, True),
+ * (source_parent_id, target_parent_id),
+ */
+ __pyx_t_8 = PyInt_FromLong(__pyx_v_content_change); if (unlikely(!__pyx_t_8)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1334; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_8);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1335
+ * (old_path_u, path_u),
+ * content_change,
+ * (True, True), # <<<<<<<<<<<<<<
+ * (source_parent_id, target_parent_id),
+ * (self.utf8_decode(old_basename)[0], self.utf8_decode(entry[0][1])[0]),
+ */
+ __pyx_t_6 = __Pyx_PyBool_FromLong(1); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1335; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_6);
+ __pyx_t_12 = __Pyx_PyBool_FromLong(1); if (unlikely(!__pyx_t_12)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1335; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_12);
+ __pyx_t_11 = PyTuple_New(2); if (unlikely(!__pyx_t_11)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1335; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_11);
+ PyTuple_SET_ITEM(__pyx_t_11, 0, __pyx_t_6);
+ __Pyx_GIVEREF(__pyx_t_6);
+ PyTuple_SET_ITEM(__pyx_t_11, 1, __pyx_t_12);
+ __Pyx_GIVEREF(__pyx_t_12);
+ __pyx_t_6 = 0;
+ __pyx_t_12 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1336
+ * content_change,
+ * (True, True),
+ * (source_parent_id, target_parent_id), # <<<<<<<<<<<<<<
+ * (self.utf8_decode(old_basename)[0], self.utf8_decode(entry[0][1])[0]),
+ * (source_kind, target_kind),
+ */
+ __pyx_t_12 = PyTuple_New(2); if (unlikely(!__pyx_t_12)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1336; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_12);
+ __Pyx_INCREF(__pyx_v_source_parent_id);
+ PyTuple_SET_ITEM(__pyx_t_12, 0, __pyx_v_source_parent_id);
+ __Pyx_GIVEREF(__pyx_v_source_parent_id);
+ __Pyx_INCREF(__pyx_v_target_parent_id);
+ PyTuple_SET_ITEM(__pyx_t_12, 1, __pyx_v_target_parent_id);
+ __Pyx_GIVEREF(__pyx_v_target_parent_id);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1337
+ * (True, True),
+ * (source_parent_id, target_parent_id),
+ * (self.utf8_decode(old_basename)[0], self.utf8_decode(entry[0][1])[0]), # <<<<<<<<<<<<<<
+ * (source_kind, target_kind),
+ * (source_exec, target_exec)), changed
+ */
+ __pyx_t_6 = PyTuple_New(1); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1337; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_6);
+ __Pyx_INCREF(__pyx_v_old_basename);
+ PyTuple_SET_ITEM(__pyx_t_6, 0, __pyx_v_old_basename);
+ __Pyx_GIVEREF(__pyx_v_old_basename);
+ __pyx_t_3 = PyObject_Call(__pyx_v_self->utf8_decode, __pyx_t_6, NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1337; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_3);
+ __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
+ __pyx_t_6 = __Pyx_GetItemInt(__pyx_t_3, 0, sizeof(long), PyInt_FromLong); if (!__pyx_t_6) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1337; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_6);
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+ __pyx_t_3 = __Pyx_GetItemInt(__pyx_v_entry, 0, sizeof(long), PyInt_FromLong); if (!__pyx_t_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1337; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_3);
+ __pyx_t_13 = __Pyx_GetItemInt(__pyx_t_3, 1, sizeof(long), PyInt_FromLong); if (!__pyx_t_13) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1337; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_13);
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+ __pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1337; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_3);
+ PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_13);
+ __Pyx_GIVEREF(__pyx_t_13);
+ __pyx_t_13 = 0;
+ __pyx_t_13 = PyObject_Call(__pyx_v_self->utf8_decode, __pyx_t_3, NULL); if (unlikely(!__pyx_t_13)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1337; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_13);
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+ __pyx_t_3 = __Pyx_GetItemInt(__pyx_t_13, 0, sizeof(long), PyInt_FromLong); if (!__pyx_t_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1337; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_3);
+ __Pyx_DECREF(__pyx_t_13); __pyx_t_13 = 0;
+ __pyx_t_13 = PyTuple_New(2); if (unlikely(!__pyx_t_13)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1337; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_13);
+ PyTuple_SET_ITEM(__pyx_t_13, 0, __pyx_t_6);
+ __Pyx_GIVEREF(__pyx_t_6);
+ PyTuple_SET_ITEM(__pyx_t_13, 1, __pyx_t_3);
+ __Pyx_GIVEREF(__pyx_t_3);
+ __pyx_t_6 = 0;
+ __pyx_t_3 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1338
+ * (source_parent_id, target_parent_id),
+ * (self.utf8_decode(old_basename)[0], self.utf8_decode(entry[0][1])[0]),
+ * (source_kind, target_kind), # <<<<<<<<<<<<<<
+ * (source_exec, target_exec)), changed
+ * elif source_minikind == c'a' and _versioned_minikind(target_minikind):
+ */
+ __pyx_t_3 = PyTuple_New(2); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1338; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_3);
+ __Pyx_INCREF(__pyx_v_source_kind);
+ PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_v_source_kind);
+ __Pyx_GIVEREF(__pyx_v_source_kind);
+ __Pyx_INCREF(__pyx_v_target_kind);
+ PyTuple_SET_ITEM(__pyx_t_3, 1, __pyx_v_target_kind);
+ __Pyx_GIVEREF(__pyx_v_target_kind);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1339
+ * (self.utf8_decode(old_basename)[0], self.utf8_decode(entry[0][1])[0]),
+ * (source_kind, target_kind),
+ * (source_exec, target_exec)), changed # <<<<<<<<<<<<<<
+ * elif source_minikind == c'a' and _versioned_minikind(target_minikind):
+ * # looks like a new file
+ */
+ __pyx_t_6 = PyTuple_New(2); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1339; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_6);
+ __Pyx_INCREF(__pyx_v_source_exec);
+ PyTuple_SET_ITEM(__pyx_t_6, 0, __pyx_v_source_exec);
+ __Pyx_GIVEREF(__pyx_v_source_exec);
+ __Pyx_INCREF(__pyx_v_target_exec);
+ PyTuple_SET_ITEM(__pyx_t_6, 1, __pyx_v_target_exec);
+ __Pyx_GIVEREF(__pyx_v_target_exec);
+ __pyx_t_14 = PyTuple_New(8); if (unlikely(!__pyx_t_14)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1332; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_14);
+ PyTuple_SET_ITEM(__pyx_t_14, 0, __pyx_t_9);
+ __Pyx_GIVEREF(__pyx_t_9);
+ PyTuple_SET_ITEM(__pyx_t_14, 1, __pyx_t_1);
+ __Pyx_GIVEREF(__pyx_t_1);
+ PyTuple_SET_ITEM(__pyx_t_14, 2, __pyx_t_8);
+ __Pyx_GIVEREF(__pyx_t_8);
+ PyTuple_SET_ITEM(__pyx_t_14, 3, __pyx_t_11);
+ __Pyx_GIVEREF(__pyx_t_11);
+ PyTuple_SET_ITEM(__pyx_t_14, 4, __pyx_t_12);
+ __Pyx_GIVEREF(__pyx_t_12);
+ PyTuple_SET_ITEM(__pyx_t_14, 5, __pyx_t_13);
+ __Pyx_GIVEREF(__pyx_t_13);
+ PyTuple_SET_ITEM(__pyx_t_14, 6, __pyx_t_3);
+ __Pyx_GIVEREF(__pyx_t_3);
+ PyTuple_SET_ITEM(__pyx_t_14, 7, __pyx_t_6);
+ __Pyx_GIVEREF(__pyx_t_6);
+ __pyx_t_9 = 0;
+ __pyx_t_1 = 0;
+ __pyx_t_8 = 0;
+ __pyx_t_11 = 0;
+ __pyx_t_12 = 0;
+ __pyx_t_13 = 0;
+ __pyx_t_3 = 0;
+ __pyx_t_6 = 0;
+ __pyx_t_6 = PyTuple_New(2); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1332; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_6);
+ PyTuple_SET_ITEM(__pyx_t_6, 0, __pyx_t_14);
+ __Pyx_GIVEREF(__pyx_t_14);
+ __Pyx_INCREF(__pyx_v_changed);
+ PyTuple_SET_ITEM(__pyx_t_6, 1, __pyx_v_changed);
+ __Pyx_GIVEREF(__pyx_v_changed);
+ __pyx_t_14 = 0;
+ __pyx_r = __pyx_t_6;
+ __pyx_t_6 = 0;
+ goto __pyx_L0;
+ }
+ __pyx_L48:;
+ goto __pyx_L6;
+ }
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1340
+ * (source_kind, target_kind),
+ * (source_exec, target_exec)), changed
+ * elif source_minikind == c'a' and _versioned_minikind(target_minikind): # <<<<<<<<<<<<<<
+ * # looks like a new file
+ * path = self.pathjoin(entry[0][0], entry[0][1])
+ */
+ __pyx_t_2 = (__pyx_v_source_minikind == 'a');
+ if (__pyx_t_2) {
+ __pyx_t_7 = __pyx_f_6bzrlib_21_dirstate_helpers_pyx__versioned_minikind(__pyx_v_target_minikind);
+ } else {
+ __pyx_t_7 = __pyx_t_2;
+ }
+ if (__pyx_t_7) {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1342
+ * elif source_minikind == c'a' and _versioned_minikind(target_minikind):
+ * # looks like a new file
+ * path = self.pathjoin(entry[0][0], entry[0][1]) # <<<<<<<<<<<<<<
+ * # parent id is the entry for the path in the target tree
+ * # TODO: these are the same for an entire directory: cache em.
+ */
+ __pyx_t_6 = __Pyx_GetItemInt(__pyx_v_entry, 0, sizeof(long), PyInt_FromLong); if (!__pyx_t_6) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1342; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_6);
+ __pyx_t_14 = __Pyx_GetItemInt(__pyx_t_6, 0, sizeof(long), PyInt_FromLong); if (!__pyx_t_14) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1342; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_14);
+ __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
+ __pyx_t_6 = __Pyx_GetItemInt(__pyx_v_entry, 0, sizeof(long), PyInt_FromLong); if (!__pyx_t_6) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1342; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_6);
+ __pyx_t_3 = __Pyx_GetItemInt(__pyx_t_6, 1, sizeof(long), PyInt_FromLong); if (!__pyx_t_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1342; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_3);
+ __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
+ __pyx_t_6 = PyTuple_New(2); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1342; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_6);
+ PyTuple_SET_ITEM(__pyx_t_6, 0, __pyx_t_14);
+ __Pyx_GIVEREF(__pyx_t_14);
+ PyTuple_SET_ITEM(__pyx_t_6, 1, __pyx_t_3);
+ __Pyx_GIVEREF(__pyx_t_3);
+ __pyx_t_14 = 0;
+ __pyx_t_3 = 0;
+ __pyx_t_3 = PyObject_Call(__pyx_v_self->pathjoin, __pyx_t_6, NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1342; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_3);
+ __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
+ __Pyx_DECREF(__pyx_v_path);
+ __pyx_v_path = __pyx_t_3;
+ __pyx_t_3 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1345
+ * # parent id is the entry for the path in the target tree
+ * # TODO: these are the same for an entire directory: cache em.
+ * parent_entry = self.state._get_entry(self.target_index, # <<<<<<<<<<<<<<
+ * path_utf8=entry[0][0])
+ * if parent_entry is None:
+ */
+ __pyx_t_3 = PyObject_GetAttr(__pyx_v_self->state, __pyx_n_s___get_entry); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1345; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_3);
+ __pyx_t_6 = PyInt_FromLong(__pyx_v_self->target_index); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1345; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_6);
+ __pyx_t_14 = PyTuple_New(1); if (unlikely(!__pyx_t_14)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1345; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_14);
+ PyTuple_SET_ITEM(__pyx_t_14, 0, __pyx_t_6);
+ __Pyx_GIVEREF(__pyx_t_6);
+ __pyx_t_6 = 0;
+ __pyx_t_6 = PyDict_New(); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1345; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(((PyObject *)__pyx_t_6));
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1346
+ * # TODO: these are the same for an entire directory: cache em.
+ * parent_entry = self.state._get_entry(self.target_index,
+ * path_utf8=entry[0][0]) # <<<<<<<<<<<<<<
+ * if parent_entry is None:
+ * raise errors.DirstateCorrupt(self.state,
+ */
+ __pyx_t_13 = __Pyx_GetItemInt(__pyx_v_entry, 0, sizeof(long), PyInt_FromLong); if (!__pyx_t_13) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1346; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_13);
+ __pyx_t_12 = __Pyx_GetItemInt(__pyx_t_13, 0, sizeof(long), PyInt_FromLong); if (!__pyx_t_12) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1346; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_12);
+ __Pyx_DECREF(__pyx_t_13); __pyx_t_13 = 0;
+ if (PyDict_SetItem(__pyx_t_6, ((PyObject *)__pyx_n_s__path_utf8), __pyx_t_12) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1345; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_DECREF(__pyx_t_12); __pyx_t_12 = 0;
+ __pyx_t_12 = PyEval_CallObjectWithKeywords(__pyx_t_3, __pyx_t_14, ((PyObject *)__pyx_t_6)); if (unlikely(!__pyx_t_12)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1345; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_12);
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+ __Pyx_DECREF(__pyx_t_14); __pyx_t_14 = 0;
+ __Pyx_DECREF(((PyObject *)__pyx_t_6)); __pyx_t_6 = 0;
+ __Pyx_DECREF(__pyx_v_parent_entry);
+ __pyx_v_parent_entry = __pyx_t_12;
+ __pyx_t_12 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1347
+ * parent_entry = self.state._get_entry(self.target_index,
+ * path_utf8=entry[0][0])
+ * if parent_entry is None: # <<<<<<<<<<<<<<
+ * raise errors.DirstateCorrupt(self.state,
+ * "We could not find the parent entry in index %d"
+ */
+ __pyx_t_7 = (__pyx_v_parent_entry == Py_None);
+ if (__pyx_t_7) {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1348
+ * path_utf8=entry[0][0])
+ * if parent_entry is None:
+ * raise errors.DirstateCorrupt(self.state, # <<<<<<<<<<<<<<
+ * "We could not find the parent entry in index %d"
+ * " for the entry: %s"
+ */
+ __pyx_t_12 = __Pyx_GetName(__pyx_m, __pyx_n_s__errors); if (unlikely(!__pyx_t_12)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1348; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_12);
+ __pyx_t_6 = PyObject_GetAttr(__pyx_t_12, __pyx_n_s__DirstateCorrupt); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1348; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_6);
+ __Pyx_DECREF(__pyx_t_12); __pyx_t_12 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1351
+ * "We could not find the parent entry in index %d"
+ * " for the entry: %s"
+ * % (self.target_index, entry[0])) # <<<<<<<<<<<<<<
+ * parent_id = parent_entry[0][2]
+ * if parent_id == entry[0][2]:
+ */
+ __pyx_t_12 = PyInt_FromLong(__pyx_v_self->target_index); if (unlikely(!__pyx_t_12)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1351; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_12);
+ __pyx_t_14 = __Pyx_GetItemInt(__pyx_v_entry, 0, sizeof(long), PyInt_FromLong); if (!__pyx_t_14) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1351; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_14);
+ __pyx_t_3 = PyTuple_New(2); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1351; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_3);
+ PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_12);
+ __Pyx_GIVEREF(__pyx_t_12);
+ PyTuple_SET_ITEM(__pyx_t_3, 1, __pyx_t_14);
+ __Pyx_GIVEREF(__pyx_t_14);
+ __pyx_t_12 = 0;
+ __pyx_t_14 = 0;
+ __pyx_t_14 = PyNumber_Remainder(((PyObject *)__pyx_kp_s_35), __pyx_t_3); if (unlikely(!__pyx_t_14)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1351; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(((PyObject *)__pyx_t_14));
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+ __pyx_t_3 = PyTuple_New(2); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1348; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_3);
+ __Pyx_INCREF(__pyx_v_self->state);
+ PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_v_self->state);
+ __Pyx_GIVEREF(__pyx_v_self->state);
+ PyTuple_SET_ITEM(__pyx_t_3, 1, ((PyObject *)__pyx_t_14));
+ __Pyx_GIVEREF(((PyObject *)__pyx_t_14));
+ __pyx_t_14 = 0;
+ __pyx_t_14 = PyObject_Call(__pyx_t_6, __pyx_t_3, NULL); if (unlikely(!__pyx_t_14)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1348; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_14);
+ __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+ __Pyx_Raise(__pyx_t_14, 0, 0);
+ __Pyx_DECREF(__pyx_t_14); __pyx_t_14 = 0;
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1348; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ goto __pyx_L51;
+ }
+ __pyx_L51:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1352
+ * " for the entry: %s"
+ * % (self.target_index, entry[0]))
+ * parent_id = parent_entry[0][2] # <<<<<<<<<<<<<<
+ * if parent_id == entry[0][2]:
+ * parent_id = None
+ */
+ __pyx_t_14 = __Pyx_GetItemInt(__pyx_v_parent_entry, 0, sizeof(long), PyInt_FromLong); if (!__pyx_t_14) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1352; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_14);
+ __pyx_t_3 = __Pyx_GetItemInt(__pyx_t_14, 2, sizeof(long), PyInt_FromLong); if (!__pyx_t_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1352; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_3);
+ __Pyx_DECREF(__pyx_t_14); __pyx_t_14 = 0;
+ __Pyx_DECREF(__pyx_v_parent_id);
+ __pyx_v_parent_id = __pyx_t_3;
+ __pyx_t_3 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1353
+ * % (self.target_index, entry[0]))
+ * parent_id = parent_entry[0][2]
+ * if parent_id == entry[0][2]: # <<<<<<<<<<<<<<
+ * parent_id = None
+ * if path_info is not None:
+ */
+ __pyx_t_3 = __Pyx_GetItemInt(__pyx_v_entry, 0, sizeof(long), PyInt_FromLong); if (!__pyx_t_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1353; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_3);
+ __pyx_t_14 = __Pyx_GetItemInt(__pyx_t_3, 2, sizeof(long), PyInt_FromLong); if (!__pyx_t_14) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1353; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_14);
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+ __pyx_t_3 = PyObject_RichCompare(__pyx_v_parent_id, __pyx_t_14, Py_EQ); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1353; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_3);
+ __Pyx_DECREF(__pyx_t_14); __pyx_t_14 = 0;
+ __pyx_t_7 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_7 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1353; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+ if (__pyx_t_7) {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1354
+ * parent_id = parent_entry[0][2]
+ * if parent_id == entry[0][2]:
+ * parent_id = None # <<<<<<<<<<<<<<
+ * if path_info is not None:
+ * # Present on disk:
+ */
+ __Pyx_INCREF(Py_None);
+ __Pyx_DECREF(__pyx_v_parent_id);
+ __pyx_v_parent_id = Py_None;
+ goto __pyx_L52;
+ }
+ __pyx_L52:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1355
+ * if parent_id == entry[0][2]:
+ * parent_id = None
+ * if path_info is not None: # <<<<<<<<<<<<<<
+ * # Present on disk:
+ * if self.use_filesystem_for_exec:
+ */
+ __pyx_t_7 = (__pyx_v_path_info != Py_None);
+ if (__pyx_t_7) {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1357
+ * if path_info is not None:
+ * # Present on disk:
+ * if self.use_filesystem_for_exec: # <<<<<<<<<<<<<<
+ * # We need S_ISREG here, because we aren't sure if this
+ * # is a file or not.
+ */
+ __pyx_t_7 = __Pyx_PyObject_IsTrue(__pyx_v_self->use_filesystem_for_exec); if (unlikely(__pyx_t_7 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1357; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ if (__pyx_t_7) {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1361
+ * # is a file or not.
+ * target_exec = bool(
+ * S_ISREG(path_info[3].st_mode) # <<<<<<<<<<<<<<
+ * and S_IXUSR & path_info[3].st_mode)
+ * else:
+ */
+ __pyx_t_3 = __Pyx_GetItemInt(__pyx_v_path_info, 3, sizeof(long), PyInt_FromLong); if (!__pyx_t_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1361; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_3);
+ __pyx_t_14 = PyObject_GetAttr(__pyx_t_3, __pyx_n_s__st_mode); if (unlikely(!__pyx_t_14)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1361; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_14);
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+ __pyx_t_10 = __Pyx_PyInt_AsInt(__pyx_t_14); if (unlikely((__pyx_t_10 == (int)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1361; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_DECREF(__pyx_t_14); __pyx_t_14 = 0;
+ __pyx_t_14 = PyInt_FromLong(S_ISREG(__pyx_t_10)); if (unlikely(!__pyx_t_14)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1361; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_14);
+ __pyx_t_7 = __Pyx_PyObject_IsTrue(__pyx_t_14); if (unlikely(__pyx_t_7 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1361; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_DECREF(__pyx_t_14); __pyx_t_14 = 0;
+ if (__pyx_t_7) {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1362
+ * target_exec = bool(
+ * S_ISREG(path_info[3].st_mode)
+ * and S_IXUSR & path_info[3].st_mode) # <<<<<<<<<<<<<<
+ * else:
+ * target_exec = target_details[3]
+ */
+ __pyx_t_14 = PyInt_FromLong(S_IXUSR); if (unlikely(!__pyx_t_14)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1362; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_14);
+ __pyx_t_3 = __Pyx_GetItemInt(__pyx_v_path_info, 3, sizeof(long), PyInt_FromLong); if (!__pyx_t_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1362; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_3);
+ __pyx_t_6 = PyObject_GetAttr(__pyx_t_3, __pyx_n_s__st_mode); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1362; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_6);
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+ __pyx_t_3 = PyNumber_And(__pyx_t_14, __pyx_t_6); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1362; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_3);
+ __Pyx_DECREF(__pyx_t_14); __pyx_t_14 = 0;
+ __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
+ __pyx_t_2 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_2 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1362; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+ __pyx_t_5 = __pyx_t_2;
+ } else {
+ __pyx_t_5 = __pyx_t_7;
+ }
+ __pyx_t_3 = __Pyx_PyBool_FromLong(__pyx_t_5); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1362; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_3);
+ __Pyx_DECREF(__pyx_v_target_exec);
+ __pyx_v_target_exec = __pyx_t_3;
+ __pyx_t_3 = 0;
+ goto __pyx_L54;
+ }
+ /*else*/ {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1364
+ * and S_IXUSR & path_info[3].st_mode)
+ * else:
+ * target_exec = target_details[3] # <<<<<<<<<<<<<<
+ * return (entry[0][2],
+ * (None, self.utf8_decode(path)[0]),
+ */
+ __pyx_t_3 = __Pyx_GetItemInt(__pyx_v_target_details, 3, sizeof(long), PyInt_FromLong); if (!__pyx_t_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1364; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_3);
+ __Pyx_DECREF(__pyx_v_target_exec);
+ __pyx_v_target_exec = __pyx_t_3;
+ __pyx_t_3 = 0;
+ }
+ __pyx_L54:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1365
+ * else:
+ * target_exec = target_details[3]
+ * return (entry[0][2], # <<<<<<<<<<<<<<
+ * (None, self.utf8_decode(path)[0]),
+ * True,
+ */
+ __Pyx_XDECREF(__pyx_r);
+ __pyx_t_3 = __Pyx_GetItemInt(__pyx_v_entry, 0, sizeof(long), PyInt_FromLong); if (!__pyx_t_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1365; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_3);
+ __pyx_t_6 = __Pyx_GetItemInt(__pyx_t_3, 2, sizeof(long), PyInt_FromLong); if (!__pyx_t_6) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1365; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_6);
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1366
+ * target_exec = target_details[3]
+ * return (entry[0][2],
+ * (None, self.utf8_decode(path)[0]), # <<<<<<<<<<<<<<
+ * True,
+ * (False, True),
+ */
+ __pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1366; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_3);
+ __Pyx_INCREF(__pyx_v_path);
+ PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_v_path);
+ __Pyx_GIVEREF(__pyx_v_path);
+ __pyx_t_14 = PyObject_Call(__pyx_v_self->utf8_decode, __pyx_t_3, NULL); if (unlikely(!__pyx_t_14)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1366; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_14);
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+ __pyx_t_3 = __Pyx_GetItemInt(__pyx_t_14, 0, sizeof(long), PyInt_FromLong); if (!__pyx_t_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1366; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_3);
+ __Pyx_DECREF(__pyx_t_14); __pyx_t_14 = 0;
+ __pyx_t_14 = PyTuple_New(2); if (unlikely(!__pyx_t_14)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1366; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_14);
+ __Pyx_INCREF(Py_None);
+ PyTuple_SET_ITEM(__pyx_t_14, 0, Py_None);
+ __Pyx_GIVEREF(Py_None);
+ PyTuple_SET_ITEM(__pyx_t_14, 1, __pyx_t_3);
+ __Pyx_GIVEREF(__pyx_t_3);
+ __pyx_t_3 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1367
+ * return (entry[0][2],
+ * (None, self.utf8_decode(path)[0]),
+ * True, # <<<<<<<<<<<<<<
+ * (False, True),
+ * (None, parent_id),
+ */
+ __pyx_t_3 = __Pyx_PyBool_FromLong(1); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1367; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_3);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1368
+ * (None, self.utf8_decode(path)[0]),
+ * True,
+ * (False, True), # <<<<<<<<<<<<<<
+ * (None, parent_id),
+ * (None, self.utf8_decode(entry[0][1])[0]),
+ */
+ __pyx_t_12 = __Pyx_PyBool_FromLong(0); if (unlikely(!__pyx_t_12)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1368; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_12);
+ __pyx_t_13 = __Pyx_PyBool_FromLong(1); if (unlikely(!__pyx_t_13)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1368; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_13);
+ __pyx_t_11 = PyTuple_New(2); if (unlikely(!__pyx_t_11)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1368; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_11);
+ PyTuple_SET_ITEM(__pyx_t_11, 0, __pyx_t_12);
+ __Pyx_GIVEREF(__pyx_t_12);
+ PyTuple_SET_ITEM(__pyx_t_11, 1, __pyx_t_13);
+ __Pyx_GIVEREF(__pyx_t_13);
+ __pyx_t_12 = 0;
+ __pyx_t_13 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1369
+ * True,
+ * (False, True),
+ * (None, parent_id), # <<<<<<<<<<<<<<
+ * (None, self.utf8_decode(entry[0][1])[0]),
+ * (None, path_info[2]),
+ */
+ __pyx_t_13 = PyTuple_New(2); if (unlikely(!__pyx_t_13)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1369; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_13);
+ __Pyx_INCREF(Py_None);
+ PyTuple_SET_ITEM(__pyx_t_13, 0, Py_None);
+ __Pyx_GIVEREF(Py_None);
+ __Pyx_INCREF(__pyx_v_parent_id);
+ PyTuple_SET_ITEM(__pyx_t_13, 1, __pyx_v_parent_id);
+ __Pyx_GIVEREF(__pyx_v_parent_id);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1370
+ * (False, True),
+ * (None, parent_id),
+ * (None, self.utf8_decode(entry[0][1])[0]), # <<<<<<<<<<<<<<
+ * (None, path_info[2]),
+ * (None, target_exec)), True
+ */
+ __pyx_t_12 = __Pyx_GetItemInt(__pyx_v_entry, 0, sizeof(long), PyInt_FromLong); if (!__pyx_t_12) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1370; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_12);
+ __pyx_t_8 = __Pyx_GetItemInt(__pyx_t_12, 1, sizeof(long), PyInt_FromLong); if (!__pyx_t_8) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1370; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_8);
+ __Pyx_DECREF(__pyx_t_12); __pyx_t_12 = 0;
+ __pyx_t_12 = PyTuple_New(1); if (unlikely(!__pyx_t_12)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1370; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_12);
+ PyTuple_SET_ITEM(__pyx_t_12, 0, __pyx_t_8);
+ __Pyx_GIVEREF(__pyx_t_8);
+ __pyx_t_8 = 0;
+ __pyx_t_8 = PyObject_Call(__pyx_v_self->utf8_decode, __pyx_t_12, NULL); if (unlikely(!__pyx_t_8)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1370; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_8);
+ __Pyx_DECREF(__pyx_t_12); __pyx_t_12 = 0;
+ __pyx_t_12 = __Pyx_GetItemInt(__pyx_t_8, 0, sizeof(long), PyInt_FromLong); if (!__pyx_t_12) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1370; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_12);
+ __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0;
+ __pyx_t_8 = PyTuple_New(2); if (unlikely(!__pyx_t_8)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1370; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_8);
+ __Pyx_INCREF(Py_None);
+ PyTuple_SET_ITEM(__pyx_t_8, 0, Py_None);
+ __Pyx_GIVEREF(Py_None);
+ PyTuple_SET_ITEM(__pyx_t_8, 1, __pyx_t_12);
+ __Pyx_GIVEREF(__pyx_t_12);
+ __pyx_t_12 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1371
+ * (None, parent_id),
+ * (None, self.utf8_decode(entry[0][1])[0]),
+ * (None, path_info[2]), # <<<<<<<<<<<<<<
+ * (None, target_exec)), True
+ * else:
+ */
+ __pyx_t_12 = __Pyx_GetItemInt(__pyx_v_path_info, 2, sizeof(long), PyInt_FromLong); if (!__pyx_t_12) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1371; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_12);
+ __pyx_t_1 = PyTuple_New(2); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1371; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_1);
+ __Pyx_INCREF(Py_None);
+ PyTuple_SET_ITEM(__pyx_t_1, 0, Py_None);
+ __Pyx_GIVEREF(Py_None);
+ PyTuple_SET_ITEM(__pyx_t_1, 1, __pyx_t_12);
+ __Pyx_GIVEREF(__pyx_t_12);
+ __pyx_t_12 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1372
+ * (None, self.utf8_decode(entry[0][1])[0]),
+ * (None, path_info[2]),
+ * (None, target_exec)), True # <<<<<<<<<<<<<<
+ * else:
+ * # Its a missing file, report it as such.
+ */
+ __pyx_t_12 = PyTuple_New(2); if (unlikely(!__pyx_t_12)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1372; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_12);
+ __Pyx_INCREF(Py_None);
+ PyTuple_SET_ITEM(__pyx_t_12, 0, Py_None);
+ __Pyx_GIVEREF(Py_None);
+ __Pyx_INCREF(__pyx_v_target_exec);
+ PyTuple_SET_ITEM(__pyx_t_12, 1, __pyx_v_target_exec);
+ __Pyx_GIVEREF(__pyx_v_target_exec);
+ __pyx_t_9 = PyTuple_New(8); if (unlikely(!__pyx_t_9)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1365; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_9);
+ PyTuple_SET_ITEM(__pyx_t_9, 0, __pyx_t_6);
+ __Pyx_GIVEREF(__pyx_t_6);
+ PyTuple_SET_ITEM(__pyx_t_9, 1, __pyx_t_14);
+ __Pyx_GIVEREF(__pyx_t_14);
+ PyTuple_SET_ITEM(__pyx_t_9, 2, __pyx_t_3);
+ __Pyx_GIVEREF(__pyx_t_3);
+ PyTuple_SET_ITEM(__pyx_t_9, 3, __pyx_t_11);
+ __Pyx_GIVEREF(__pyx_t_11);
+ PyTuple_SET_ITEM(__pyx_t_9, 4, __pyx_t_13);
+ __Pyx_GIVEREF(__pyx_t_13);
+ PyTuple_SET_ITEM(__pyx_t_9, 5, __pyx_t_8);
+ __Pyx_GIVEREF(__pyx_t_8);
+ PyTuple_SET_ITEM(__pyx_t_9, 6, __pyx_t_1);
+ __Pyx_GIVEREF(__pyx_t_1);
+ PyTuple_SET_ITEM(__pyx_t_9, 7, __pyx_t_12);
+ __Pyx_GIVEREF(__pyx_t_12);
+ __pyx_t_6 = 0;
+ __pyx_t_14 = 0;
+ __pyx_t_3 = 0;
+ __pyx_t_11 = 0;
+ __pyx_t_13 = 0;
+ __pyx_t_8 = 0;
+ __pyx_t_1 = 0;
+ __pyx_t_12 = 0;
+ __pyx_t_12 = __Pyx_PyBool_FromLong(1); if (unlikely(!__pyx_t_12)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1372; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_12);
+ __pyx_t_1 = PyTuple_New(2); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1365; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_1);
+ PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_t_9);
+ __Pyx_GIVEREF(__pyx_t_9);
+ PyTuple_SET_ITEM(__pyx_t_1, 1, __pyx_t_12);
+ __Pyx_GIVEREF(__pyx_t_12);
+ __pyx_t_9 = 0;
+ __pyx_t_12 = 0;
+ __pyx_r = __pyx_t_1;
+ __pyx_t_1 = 0;
+ goto __pyx_L0;
+ goto __pyx_L53;
+ }
+ /*else*/ {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1375
+ * else:
+ * # Its a missing file, report it as such.
+ * return (entry[0][2], # <<<<<<<<<<<<<<
+ * (None, self.utf8_decode(path)[0]),
+ * False,
+ */
+ __Pyx_XDECREF(__pyx_r);
+ __pyx_t_1 = __Pyx_GetItemInt(__pyx_v_entry, 0, sizeof(long), PyInt_FromLong); if (!__pyx_t_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1375; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_1);
+ __pyx_t_12 = __Pyx_GetItemInt(__pyx_t_1, 2, sizeof(long), PyInt_FromLong); if (!__pyx_t_12) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1375; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_12);
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1376
+ * # Its a missing file, report it as such.
+ * return (entry[0][2],
+ * (None, self.utf8_decode(path)[0]), # <<<<<<<<<<<<<<
+ * False,
+ * (False, True),
+ */
+ __pyx_t_1 = PyTuple_New(1); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1376; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_1);
+ __Pyx_INCREF(__pyx_v_path);
+ PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_v_path);
+ __Pyx_GIVEREF(__pyx_v_path);
+ __pyx_t_9 = PyObject_Call(__pyx_v_self->utf8_decode, __pyx_t_1, NULL); if (unlikely(!__pyx_t_9)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1376; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_9);
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+ __pyx_t_1 = __Pyx_GetItemInt(__pyx_t_9, 0, sizeof(long), PyInt_FromLong); if (!__pyx_t_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1376; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_1);
+ __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
+ __pyx_t_9 = PyTuple_New(2); if (unlikely(!__pyx_t_9)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1376; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_9);
+ __Pyx_INCREF(Py_None);
+ PyTuple_SET_ITEM(__pyx_t_9, 0, Py_None);
+ __Pyx_GIVEREF(Py_None);
+ PyTuple_SET_ITEM(__pyx_t_9, 1, __pyx_t_1);
+ __Pyx_GIVEREF(__pyx_t_1);
+ __pyx_t_1 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1377
+ * return (entry[0][2],
+ * (None, self.utf8_decode(path)[0]),
+ * False, # <<<<<<<<<<<<<<
+ * (False, True),
+ * (None, parent_id),
+ */
+ __pyx_t_1 = __Pyx_PyBool_FromLong(0); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1377; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_1);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1378
+ * (None, self.utf8_decode(path)[0]),
+ * False,
+ * (False, True), # <<<<<<<<<<<<<<
+ * (None, parent_id),
+ * (None, self.utf8_decode(entry[0][1])[0]),
+ */
+ __pyx_t_8 = __Pyx_PyBool_FromLong(0); if (unlikely(!__pyx_t_8)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1378; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_8);
+ __pyx_t_13 = __Pyx_PyBool_FromLong(1); if (unlikely(!__pyx_t_13)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1378; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_13);
+ __pyx_t_11 = PyTuple_New(2); if (unlikely(!__pyx_t_11)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1378; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_11);
+ PyTuple_SET_ITEM(__pyx_t_11, 0, __pyx_t_8);
+ __Pyx_GIVEREF(__pyx_t_8);
+ PyTuple_SET_ITEM(__pyx_t_11, 1, __pyx_t_13);
+ __Pyx_GIVEREF(__pyx_t_13);
+ __pyx_t_8 = 0;
+ __pyx_t_13 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1379
+ * False,
+ * (False, True),
+ * (None, parent_id), # <<<<<<<<<<<<<<
+ * (None, self.utf8_decode(entry[0][1])[0]),
+ * (None, None),
+ */
+ __pyx_t_13 = PyTuple_New(2); if (unlikely(!__pyx_t_13)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1379; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_13);
+ __Pyx_INCREF(Py_None);
+ PyTuple_SET_ITEM(__pyx_t_13, 0, Py_None);
+ __Pyx_GIVEREF(Py_None);
+ __Pyx_INCREF(__pyx_v_parent_id);
+ PyTuple_SET_ITEM(__pyx_t_13, 1, __pyx_v_parent_id);
+ __Pyx_GIVEREF(__pyx_v_parent_id);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1380
+ * (False, True),
+ * (None, parent_id),
+ * (None, self.utf8_decode(entry[0][1])[0]), # <<<<<<<<<<<<<<
+ * (None, None),
+ * (None, False)), True
+ */
+ __pyx_t_8 = __Pyx_GetItemInt(__pyx_v_entry, 0, sizeof(long), PyInt_FromLong); if (!__pyx_t_8) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1380; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_8);
+ __pyx_t_3 = __Pyx_GetItemInt(__pyx_t_8, 1, sizeof(long), PyInt_FromLong); if (!__pyx_t_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1380; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_3);
+ __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0;
+ __pyx_t_8 = PyTuple_New(1); if (unlikely(!__pyx_t_8)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1380; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_8);
+ PyTuple_SET_ITEM(__pyx_t_8, 0, __pyx_t_3);
+ __Pyx_GIVEREF(__pyx_t_3);
+ __pyx_t_3 = 0;
+ __pyx_t_3 = PyObject_Call(__pyx_v_self->utf8_decode, __pyx_t_8, NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1380; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_3);
+ __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0;
+ __pyx_t_8 = __Pyx_GetItemInt(__pyx_t_3, 0, sizeof(long), PyInt_FromLong); if (!__pyx_t_8) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1380; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_8);
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+ __pyx_t_3 = PyTuple_New(2); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1380; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_3);
+ __Pyx_INCREF(Py_None);
+ PyTuple_SET_ITEM(__pyx_t_3, 0, Py_None);
+ __Pyx_GIVEREF(Py_None);
+ PyTuple_SET_ITEM(__pyx_t_3, 1, __pyx_t_8);
+ __Pyx_GIVEREF(__pyx_t_8);
+ __pyx_t_8 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1381
+ * (None, parent_id),
+ * (None, self.utf8_decode(entry[0][1])[0]),
+ * (None, None), # <<<<<<<<<<<<<<
+ * (None, False)), True
+ * elif _versioned_minikind(source_minikind) and target_minikind == c'a':
+ */
+ __pyx_t_8 = PyTuple_New(2); if (unlikely(!__pyx_t_8)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1381; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_8);
+ __Pyx_INCREF(Py_None);
+ PyTuple_SET_ITEM(__pyx_t_8, 0, Py_None);
+ __Pyx_GIVEREF(Py_None);
+ __Pyx_INCREF(Py_None);
+ PyTuple_SET_ITEM(__pyx_t_8, 1, Py_None);
+ __Pyx_GIVEREF(Py_None);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1382
+ * (None, self.utf8_decode(entry[0][1])[0]),
+ * (None, None),
+ * (None, False)), True # <<<<<<<<<<<<<<
+ * elif _versioned_minikind(source_minikind) and target_minikind == c'a':
+ * # unversioned, possibly, or possibly not deleted: we dont care.
+ */
+ __pyx_t_14 = __Pyx_PyBool_FromLong(0); if (unlikely(!__pyx_t_14)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1382; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_14);
+ __pyx_t_6 = PyTuple_New(2); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1382; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_6);
+ __Pyx_INCREF(Py_None);
+ PyTuple_SET_ITEM(__pyx_t_6, 0, Py_None);
+ __Pyx_GIVEREF(Py_None);
+ PyTuple_SET_ITEM(__pyx_t_6, 1, __pyx_t_14);
+ __Pyx_GIVEREF(__pyx_t_14);
+ __pyx_t_14 = 0;
+ __pyx_t_14 = PyTuple_New(8); if (unlikely(!__pyx_t_14)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1375; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_14);
+ PyTuple_SET_ITEM(__pyx_t_14, 0, __pyx_t_12);
+ __Pyx_GIVEREF(__pyx_t_12);
+ PyTuple_SET_ITEM(__pyx_t_14, 1, __pyx_t_9);
+ __Pyx_GIVEREF(__pyx_t_9);
+ PyTuple_SET_ITEM(__pyx_t_14, 2, __pyx_t_1);
+ __Pyx_GIVEREF(__pyx_t_1);
+ PyTuple_SET_ITEM(__pyx_t_14, 3, __pyx_t_11);
+ __Pyx_GIVEREF(__pyx_t_11);
+ PyTuple_SET_ITEM(__pyx_t_14, 4, __pyx_t_13);
+ __Pyx_GIVEREF(__pyx_t_13);
+ PyTuple_SET_ITEM(__pyx_t_14, 5, __pyx_t_3);
+ __Pyx_GIVEREF(__pyx_t_3);
+ PyTuple_SET_ITEM(__pyx_t_14, 6, __pyx_t_8);
+ __Pyx_GIVEREF(__pyx_t_8);
+ PyTuple_SET_ITEM(__pyx_t_14, 7, __pyx_t_6);
+ __Pyx_GIVEREF(__pyx_t_6);
+ __pyx_t_12 = 0;
+ __pyx_t_9 = 0;
+ __pyx_t_1 = 0;
+ __pyx_t_11 = 0;
+ __pyx_t_13 = 0;
+ __pyx_t_3 = 0;
+ __pyx_t_8 = 0;
+ __pyx_t_6 = 0;
+ __pyx_t_6 = __Pyx_PyBool_FromLong(1); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1382; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_6);
+ __pyx_t_8 = PyTuple_New(2); if (unlikely(!__pyx_t_8)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1375; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_8);
+ PyTuple_SET_ITEM(__pyx_t_8, 0, __pyx_t_14);
+ __Pyx_GIVEREF(__pyx_t_14);
+ PyTuple_SET_ITEM(__pyx_t_8, 1, __pyx_t_6);
+ __Pyx_GIVEREF(__pyx_t_6);
+ __pyx_t_14 = 0;
+ __pyx_t_6 = 0;
+ __pyx_r = __pyx_t_8;
+ __pyx_t_8 = 0;
+ goto __pyx_L0;
+ }
+ __pyx_L53:;
+ goto __pyx_L6;
+ }
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1383
+ * (None, None),
+ * (None, False)), True
+ * elif _versioned_minikind(source_minikind) and target_minikind == c'a': # <<<<<<<<<<<<<<
+ * # unversioned, possibly, or possibly not deleted: we dont care.
+ * # if its still on disk, *and* theres no other entry at this
+ */
+ if (__pyx_f_6bzrlib_21_dirstate_helpers_pyx__versioned_minikind(__pyx_v_source_minikind)) {
+ __pyx_t_5 = (__pyx_v_target_minikind == 'a');
+ __pyx_t_7 = __pyx_t_5;
+ } else {
+ __pyx_t_7 = __pyx_f_6bzrlib_21_dirstate_helpers_pyx__versioned_minikind(__pyx_v_source_minikind);
+ }
+ if (__pyx_t_7) {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1388
+ * # path [we dont know this in this routine at the moment -
+ * # perhaps we should change this - then it would be an unknown.
+ * old_path = self.pathjoin(entry[0][0], entry[0][1]) # <<<<<<<<<<<<<<
+ * # parent id is the entry for the path in the target tree
+ * parent_id = self.state._get_entry(self.source_index, path_utf8=entry[0][0])[0][2]
+ */
+ __pyx_t_8 = __Pyx_GetItemInt(__pyx_v_entry, 0, sizeof(long), PyInt_FromLong); if (!__pyx_t_8) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1388; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_8);
+ __pyx_t_6 = __Pyx_GetItemInt(__pyx_t_8, 0, sizeof(long), PyInt_FromLong); if (!__pyx_t_6) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1388; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_6);
+ __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0;
+ __pyx_t_8 = __Pyx_GetItemInt(__pyx_v_entry, 0, sizeof(long), PyInt_FromLong); if (!__pyx_t_8) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1388; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_8);
+ __pyx_t_14 = __Pyx_GetItemInt(__pyx_t_8, 1, sizeof(long), PyInt_FromLong); if (!__pyx_t_14) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1388; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_14);
+ __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0;
+ __pyx_t_8 = PyTuple_New(2); if (unlikely(!__pyx_t_8)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1388; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_8);
+ PyTuple_SET_ITEM(__pyx_t_8, 0, __pyx_t_6);
+ __Pyx_GIVEREF(__pyx_t_6);
+ PyTuple_SET_ITEM(__pyx_t_8, 1, __pyx_t_14);
+ __Pyx_GIVEREF(__pyx_t_14);
+ __pyx_t_6 = 0;
+ __pyx_t_14 = 0;
+ __pyx_t_14 = PyObject_Call(__pyx_v_self->pathjoin, __pyx_t_8, NULL); if (unlikely(!__pyx_t_14)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1388; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_14);
+ __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0;
+ __Pyx_DECREF(__pyx_v_old_path);
+ __pyx_v_old_path = __pyx_t_14;
+ __pyx_t_14 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1390
+ * old_path = self.pathjoin(entry[0][0], entry[0][1])
+ * # parent id is the entry for the path in the target tree
+ * parent_id = self.state._get_entry(self.source_index, path_utf8=entry[0][0])[0][2] # <<<<<<<<<<<<<<
+ * if parent_id == entry[0][2]:
+ * parent_id = None
+ */
+ __pyx_t_14 = PyObject_GetAttr(__pyx_v_self->state, __pyx_n_s___get_entry); if (unlikely(!__pyx_t_14)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1390; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_14);
+ __pyx_t_8 = PyInt_FromLong(__pyx_v_self->source_index); if (unlikely(!__pyx_t_8)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1390; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_8);
+ __pyx_t_6 = PyTuple_New(1); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1390; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_6);
+ PyTuple_SET_ITEM(__pyx_t_6, 0, __pyx_t_8);
+ __Pyx_GIVEREF(__pyx_t_8);
+ __pyx_t_8 = 0;
+ __pyx_t_8 = PyDict_New(); if (unlikely(!__pyx_t_8)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1390; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(((PyObject *)__pyx_t_8));
+ __pyx_t_3 = __Pyx_GetItemInt(__pyx_v_entry, 0, sizeof(long), PyInt_FromLong); if (!__pyx_t_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1390; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_3);
+ __pyx_t_13 = __Pyx_GetItemInt(__pyx_t_3, 0, sizeof(long), PyInt_FromLong); if (!__pyx_t_13) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1390; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_13);
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+ if (PyDict_SetItem(__pyx_t_8, ((PyObject *)__pyx_n_s__path_utf8), __pyx_t_13) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1390; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_DECREF(__pyx_t_13); __pyx_t_13 = 0;
+ __pyx_t_13 = PyEval_CallObjectWithKeywords(__pyx_t_14, __pyx_t_6, ((PyObject *)__pyx_t_8)); if (unlikely(!__pyx_t_13)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1390; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_13);
+ __Pyx_DECREF(__pyx_t_14); __pyx_t_14 = 0;
+ __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
+ __Pyx_DECREF(((PyObject *)__pyx_t_8)); __pyx_t_8 = 0;
+ __pyx_t_8 = __Pyx_GetItemInt(__pyx_t_13, 0, sizeof(long), PyInt_FromLong); if (!__pyx_t_8) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1390; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_8);
+ __Pyx_DECREF(__pyx_t_13); __pyx_t_13 = 0;
+ __pyx_t_13 = __Pyx_GetItemInt(__pyx_t_8, 2, sizeof(long), PyInt_FromLong); if (!__pyx_t_13) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1390; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_13);
+ __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0;
+ __Pyx_DECREF(__pyx_v_parent_id);
+ __pyx_v_parent_id = __pyx_t_13;
+ __pyx_t_13 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1391
+ * # parent id is the entry for the path in the target tree
+ * parent_id = self.state._get_entry(self.source_index, path_utf8=entry[0][0])[0][2]
+ * if parent_id == entry[0][2]: # <<<<<<<<<<<<<<
+ * parent_id = None
+ * return (entry[0][2],
+ */
+ __pyx_t_13 = __Pyx_GetItemInt(__pyx_v_entry, 0, sizeof(long), PyInt_FromLong); if (!__pyx_t_13) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1391; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_13);
+ __pyx_t_8 = __Pyx_GetItemInt(__pyx_t_13, 2, sizeof(long), PyInt_FromLong); if (!__pyx_t_8) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1391; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_8);
+ __Pyx_DECREF(__pyx_t_13); __pyx_t_13 = 0;
+ __pyx_t_13 = PyObject_RichCompare(__pyx_v_parent_id, __pyx_t_8, Py_EQ); if (unlikely(!__pyx_t_13)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1391; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_13);
+ __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0;
+ __pyx_t_7 = __Pyx_PyObject_IsTrue(__pyx_t_13); if (unlikely(__pyx_t_7 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1391; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_DECREF(__pyx_t_13); __pyx_t_13 = 0;
+ if (__pyx_t_7) {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1392
+ * parent_id = self.state._get_entry(self.source_index, path_utf8=entry[0][0])[0][2]
+ * if parent_id == entry[0][2]:
+ * parent_id = None # <<<<<<<<<<<<<<
+ * return (entry[0][2],
+ * (self.utf8_decode(old_path)[0], None),
+ */
+ __Pyx_INCREF(Py_None);
+ __Pyx_DECREF(__pyx_v_parent_id);
+ __pyx_v_parent_id = Py_None;
+ goto __pyx_L55;
+ }
+ __pyx_L55:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1393
+ * if parent_id == entry[0][2]:
+ * parent_id = None
+ * return (entry[0][2], # <<<<<<<<<<<<<<
+ * (self.utf8_decode(old_path)[0], None),
+ * True,
+ */
+ __Pyx_XDECREF(__pyx_r);
+ __pyx_t_13 = __Pyx_GetItemInt(__pyx_v_entry, 0, sizeof(long), PyInt_FromLong); if (!__pyx_t_13) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1393; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_13);
+ __pyx_t_8 = __Pyx_GetItemInt(__pyx_t_13, 2, sizeof(long), PyInt_FromLong); if (!__pyx_t_8) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1393; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_8);
+ __Pyx_DECREF(__pyx_t_13); __pyx_t_13 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1394
+ * parent_id = None
+ * return (entry[0][2],
+ * (self.utf8_decode(old_path)[0], None), # <<<<<<<<<<<<<<
+ * True,
+ * (True, False),
+ */
+ __pyx_t_13 = PyTuple_New(1); if (unlikely(!__pyx_t_13)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1394; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_13);
+ __Pyx_INCREF(__pyx_v_old_path);
+ PyTuple_SET_ITEM(__pyx_t_13, 0, __pyx_v_old_path);
+ __Pyx_GIVEREF(__pyx_v_old_path);
+ __pyx_t_6 = PyObject_Call(__pyx_v_self->utf8_decode, __pyx_t_13, NULL); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1394; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_6);
+ __Pyx_DECREF(__pyx_t_13); __pyx_t_13 = 0;
+ __pyx_t_13 = __Pyx_GetItemInt(__pyx_t_6, 0, sizeof(long), PyInt_FromLong); if (!__pyx_t_13) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1394; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_13);
+ __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
+ __pyx_t_6 = PyTuple_New(2); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1394; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_6);
+ PyTuple_SET_ITEM(__pyx_t_6, 0, __pyx_t_13);
+ __Pyx_GIVEREF(__pyx_t_13);
+ __Pyx_INCREF(Py_None);
+ PyTuple_SET_ITEM(__pyx_t_6, 1, Py_None);
+ __Pyx_GIVEREF(Py_None);
+ __pyx_t_13 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1395
+ * return (entry[0][2],
+ * (self.utf8_decode(old_path)[0], None),
+ * True, # <<<<<<<<<<<<<<
+ * (True, False),
+ * (parent_id, None),
+ */
+ __pyx_t_13 = __Pyx_PyBool_FromLong(1); if (unlikely(!__pyx_t_13)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1395; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_13);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1396
+ * (self.utf8_decode(old_path)[0], None),
+ * True,
+ * (True, False), # <<<<<<<<<<<<<<
+ * (parent_id, None),
+ * (self.utf8_decode(entry[0][1])[0], None),
+ */
+ __pyx_t_14 = __Pyx_PyBool_FromLong(1); if (unlikely(!__pyx_t_14)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1396; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_14);
+ __pyx_t_3 = __Pyx_PyBool_FromLong(0); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1396; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_3);
+ __pyx_t_11 = PyTuple_New(2); if (unlikely(!__pyx_t_11)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1396; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_11);
+ PyTuple_SET_ITEM(__pyx_t_11, 0, __pyx_t_14);
+ __Pyx_GIVEREF(__pyx_t_14);
+ PyTuple_SET_ITEM(__pyx_t_11, 1, __pyx_t_3);
+ __Pyx_GIVEREF(__pyx_t_3);
+ __pyx_t_14 = 0;
+ __pyx_t_3 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1397
+ * True,
+ * (True, False),
+ * (parent_id, None), # <<<<<<<<<<<<<<
+ * (self.utf8_decode(entry[0][1])[0], None),
+ * (_minikind_to_kind(source_minikind), None),
+ */
+ __pyx_t_3 = PyTuple_New(2); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1397; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_3);
+ __Pyx_INCREF(__pyx_v_parent_id);
+ PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_v_parent_id);
+ __Pyx_GIVEREF(__pyx_v_parent_id);
+ __Pyx_INCREF(Py_None);
+ PyTuple_SET_ITEM(__pyx_t_3, 1, Py_None);
+ __Pyx_GIVEREF(Py_None);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1398
+ * (True, False),
+ * (parent_id, None),
+ * (self.utf8_decode(entry[0][1])[0], None), # <<<<<<<<<<<<<<
+ * (_minikind_to_kind(source_minikind), None),
+ * (source_details[3], None)), True
+ */
+ __pyx_t_14 = __Pyx_GetItemInt(__pyx_v_entry, 0, sizeof(long), PyInt_FromLong); if (!__pyx_t_14) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1398; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_14);
+ __pyx_t_1 = __Pyx_GetItemInt(__pyx_t_14, 1, sizeof(long), PyInt_FromLong); if (!__pyx_t_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1398; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_1);
+ __Pyx_DECREF(__pyx_t_14); __pyx_t_14 = 0;
+ __pyx_t_14 = PyTuple_New(1); if (unlikely(!__pyx_t_14)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1398; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_14);
+ PyTuple_SET_ITEM(__pyx_t_14, 0, __pyx_t_1);
+ __Pyx_GIVEREF(__pyx_t_1);
+ __pyx_t_1 = 0;
+ __pyx_t_1 = PyObject_Call(__pyx_v_self->utf8_decode, __pyx_t_14, NULL); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1398; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_1);
+ __Pyx_DECREF(__pyx_t_14); __pyx_t_14 = 0;
+ __pyx_t_14 = __Pyx_GetItemInt(__pyx_t_1, 0, sizeof(long), PyInt_FromLong); if (!__pyx_t_14) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1398; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_14);
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+ __pyx_t_1 = PyTuple_New(2); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1398; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_1);
+ PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_t_14);
+ __Pyx_GIVEREF(__pyx_t_14);
+ __Pyx_INCREF(Py_None);
+ PyTuple_SET_ITEM(__pyx_t_1, 1, Py_None);
+ __Pyx_GIVEREF(Py_None);
+ __pyx_t_14 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1399
+ * (parent_id, None),
+ * (self.utf8_decode(entry[0][1])[0], None),
+ * (_minikind_to_kind(source_minikind), None), # <<<<<<<<<<<<<<
+ * (source_details[3], None)), True
+ * elif _versioned_minikind(source_minikind) and target_minikind == c'r':
+ */
+ __pyx_t_14 = __pyx_f_6bzrlib_21_dirstate_helpers_pyx__minikind_to_kind(__pyx_v_source_minikind); if (unlikely(!__pyx_t_14)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1399; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_14);
+ __pyx_t_9 = PyTuple_New(2); if (unlikely(!__pyx_t_9)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1399; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_9);
+ PyTuple_SET_ITEM(__pyx_t_9, 0, __pyx_t_14);
+ __Pyx_GIVEREF(__pyx_t_14);
+ __Pyx_INCREF(Py_None);
+ PyTuple_SET_ITEM(__pyx_t_9, 1, Py_None);
+ __Pyx_GIVEREF(Py_None);
+ __pyx_t_14 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1400
+ * (self.utf8_decode(entry[0][1])[0], None),
+ * (_minikind_to_kind(source_minikind), None),
+ * (source_details[3], None)), True # <<<<<<<<<<<<<<
+ * elif _versioned_minikind(source_minikind) and target_minikind == c'r':
+ * # a rename; could be a true rename, or a rename inherited from
+ */
+ __pyx_t_14 = __Pyx_GetItemInt(__pyx_v_source_details, 3, sizeof(long), PyInt_FromLong); if (!__pyx_t_14) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1400; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_14);
+ __pyx_t_12 = PyTuple_New(2); if (unlikely(!__pyx_t_12)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1400; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_12);
+ PyTuple_SET_ITEM(__pyx_t_12, 0, __pyx_t_14);
+ __Pyx_GIVEREF(__pyx_t_14);
+ __Pyx_INCREF(Py_None);
+ PyTuple_SET_ITEM(__pyx_t_12, 1, Py_None);
+ __Pyx_GIVEREF(Py_None);
+ __pyx_t_14 = 0;
+ __pyx_t_14 = PyTuple_New(8); if (unlikely(!__pyx_t_14)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1393; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_14);
+ PyTuple_SET_ITEM(__pyx_t_14, 0, __pyx_t_8);
+ __Pyx_GIVEREF(__pyx_t_8);
+ PyTuple_SET_ITEM(__pyx_t_14, 1, __pyx_t_6);
+ __Pyx_GIVEREF(__pyx_t_6);
+ PyTuple_SET_ITEM(__pyx_t_14, 2, __pyx_t_13);
+ __Pyx_GIVEREF(__pyx_t_13);
+ PyTuple_SET_ITEM(__pyx_t_14, 3, __pyx_t_11);
+ __Pyx_GIVEREF(__pyx_t_11);
+ PyTuple_SET_ITEM(__pyx_t_14, 4, __pyx_t_3);
+ __Pyx_GIVEREF(__pyx_t_3);
+ PyTuple_SET_ITEM(__pyx_t_14, 5, __pyx_t_1);
+ __Pyx_GIVEREF(__pyx_t_1);
+ PyTuple_SET_ITEM(__pyx_t_14, 6, __pyx_t_9);
+ __Pyx_GIVEREF(__pyx_t_9);
+ PyTuple_SET_ITEM(__pyx_t_14, 7, __pyx_t_12);
+ __Pyx_GIVEREF(__pyx_t_12);
+ __pyx_t_8 = 0;
+ __pyx_t_6 = 0;
+ __pyx_t_13 = 0;
+ __pyx_t_11 = 0;
+ __pyx_t_3 = 0;
+ __pyx_t_1 = 0;
+ __pyx_t_9 = 0;
+ __pyx_t_12 = 0;
+ __pyx_t_12 = __Pyx_PyBool_FromLong(1); if (unlikely(!__pyx_t_12)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1400; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_12);
+ __pyx_t_9 = PyTuple_New(2); if (unlikely(!__pyx_t_9)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1393; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_9);
+ PyTuple_SET_ITEM(__pyx_t_9, 0, __pyx_t_14);
+ __Pyx_GIVEREF(__pyx_t_14);
+ PyTuple_SET_ITEM(__pyx_t_9, 1, __pyx_t_12);
+ __Pyx_GIVEREF(__pyx_t_12);
+ __pyx_t_14 = 0;
+ __pyx_t_12 = 0;
+ __pyx_r = __pyx_t_9;
+ __pyx_t_9 = 0;
+ goto __pyx_L0;
+ goto __pyx_L6;
+ }
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1401
+ * (_minikind_to_kind(source_minikind), None),
+ * (source_details[3], None)), True
+ * elif _versioned_minikind(source_minikind) and target_minikind == c'r': # <<<<<<<<<<<<<<
+ * # a rename; could be a true rename, or a rename inherited from
+ * # a renamed parent. TODO: handle this efficiently. Its not
+ */
+ if (__pyx_f_6bzrlib_21_dirstate_helpers_pyx__versioned_minikind(__pyx_v_source_minikind)) {
+ __pyx_t_7 = (__pyx_v_target_minikind == 'r');
+ __pyx_t_5 = __pyx_t_7;
+ } else {
+ __pyx_t_5 = __pyx_f_6bzrlib_21_dirstate_helpers_pyx__versioned_minikind(__pyx_v_source_minikind);
+ }
+ if (__pyx_t_5) {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1406
+ * # common case to rename dirs though, so a correct but slow
+ * # implementation will do.
+ * if (not self.doing_consistency_expansion and # <<<<<<<<<<<<<<
+ * not osutils.is_inside_any(self.searched_specific_files,
+ * target_details[1])):
+ */
+ __pyx_t_5 = (!__pyx_v_self->doing_consistency_expansion);
+ if (__pyx_t_5) {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1407
+ * # implementation will do.
+ * if (not self.doing_consistency_expansion and
+ * not osutils.is_inside_any(self.searched_specific_files, # <<<<<<<<<<<<<<
+ * target_details[1])):
+ * self.search_specific_files.add(target_details[1])
+ */
+ __pyx_t_9 = __Pyx_GetName(__pyx_m, __pyx_n_s__osutils); if (unlikely(!__pyx_t_9)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1407; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_9);
+ __pyx_t_12 = PyObject_GetAttr(__pyx_t_9, __pyx_n_s__is_inside_any); if (unlikely(!__pyx_t_12)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1407; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_12);
+ __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1408
+ * if (not self.doing_consistency_expansion and
+ * not osutils.is_inside_any(self.searched_specific_files,
+ * target_details[1])): # <<<<<<<<<<<<<<
+ * self.search_specific_files.add(target_details[1])
+ * # We don't expand the specific files parents list here as
+ */
+ __pyx_t_9 = __Pyx_GetItemInt(__pyx_v_target_details, 1, sizeof(long), PyInt_FromLong); if (!__pyx_t_9) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1408; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_9);
+ __pyx_t_14 = PyTuple_New(2); if (unlikely(!__pyx_t_14)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1407; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_14);
+ __Pyx_INCREF(__pyx_v_self->searched_specific_files);
+ PyTuple_SET_ITEM(__pyx_t_14, 0, __pyx_v_self->searched_specific_files);
+ __Pyx_GIVEREF(__pyx_v_self->searched_specific_files);
+ PyTuple_SET_ITEM(__pyx_t_14, 1, __pyx_t_9);
+ __Pyx_GIVEREF(__pyx_t_9);
+ __pyx_t_9 = 0;
+ __pyx_t_9 = PyObject_Call(__pyx_t_12, __pyx_t_14, NULL); if (unlikely(!__pyx_t_9)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1407; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_9);
+ __Pyx_DECREF(__pyx_t_12); __pyx_t_12 = 0;
+ __Pyx_DECREF(__pyx_t_14); __pyx_t_14 = 0;
+ __pyx_t_7 = __Pyx_PyObject_IsTrue(__pyx_t_9); if (unlikely(__pyx_t_7 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1407; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
+ __pyx_t_2 = (!__pyx_t_7);
+ __pyx_t_7 = __pyx_t_2;
+ } else {
+ __pyx_t_7 = __pyx_t_5;
+ }
+ if (__pyx_t_7) {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1409
+ * not osutils.is_inside_any(self.searched_specific_files,
+ * target_details[1])):
+ * self.search_specific_files.add(target_details[1]) # <<<<<<<<<<<<<<
+ * # We don't expand the specific files parents list here as
+ * # the path is absent in target and won't create a delta with
+ */
+ __pyx_t_9 = PyObject_GetAttr(__pyx_v_self->search_specific_files, __pyx_n_s__add); if (unlikely(!__pyx_t_9)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1409; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_9);
+ __pyx_t_14 = __Pyx_GetItemInt(__pyx_v_target_details, 1, sizeof(long), PyInt_FromLong); if (!__pyx_t_14) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1409; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_14);
+ __pyx_t_12 = PyTuple_New(1); if (unlikely(!__pyx_t_12)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1409; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_12);
+ PyTuple_SET_ITEM(__pyx_t_12, 0, __pyx_t_14);
+ __Pyx_GIVEREF(__pyx_t_14);
+ __pyx_t_14 = 0;
+ __pyx_t_14 = PyObject_Call(__pyx_t_9, __pyx_t_12, NULL); if (unlikely(!__pyx_t_14)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1409; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_14);
+ __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
+ __Pyx_DECREF(__pyx_t_12); __pyx_t_12 = 0;
+ __Pyx_DECREF(__pyx_t_14); __pyx_t_14 = 0;
+ goto __pyx_L56;
+ }
+ __pyx_L56:;
+ goto __pyx_L6;
+ }
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1413
+ * # the path is absent in target and won't create a delta with
+ * # missing parent.
+ * elif ((source_minikind == c'r' or source_minikind == c'a') and # <<<<<<<<<<<<<<
+ * (target_minikind == c'r' or target_minikind == c'a')):
+ * # neither of the selected trees contain this path,
+ */
+ switch (__pyx_v_source_minikind) {
+ case 'r':
+ case 'a':
+ __pyx_t_7 = 1;
+ break;
+ default:
+ __pyx_t_7 = 0;
+ break;
+ }
+ if (__pyx_t_7) {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1414
+ * # missing parent.
+ * elif ((source_minikind == c'r' or source_minikind == c'a') and
+ * (target_minikind == c'r' or target_minikind == c'a')): # <<<<<<<<<<<<<<
+ * # neither of the selected trees contain this path,
+ * # so skip over it. This is not currently directly tested, but
+ */
+ switch (__pyx_v_target_minikind) {
+ case 'r':
+ case 'a':
+ __pyx_t_5 = 1;
+ break;
+ default:
+ __pyx_t_5 = 0;
+ break;
+ }
+ __pyx_t_2 = __pyx_t_5;
+ } else {
+ __pyx_t_2 = __pyx_t_7;
+ }
+ if (__pyx_t_2) {
+ goto __pyx_L6;
+ }
+ /*else*/ {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1422
+ * raise AssertionError("don't know how to compare "
+ * "source_minikind=%r, target_minikind=%r"
+ * % (source_minikind, target_minikind)) # <<<<<<<<<<<<<<
+ * ## import pdb;pdb.set_trace()
+ * return None, None
+ */
+ __pyx_t_14 = PyInt_FromLong(__pyx_v_source_minikind); if (unlikely(!__pyx_t_14)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1422; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_14);
+ __pyx_t_12 = PyInt_FromLong(__pyx_v_target_minikind); if (unlikely(!__pyx_t_12)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1422; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_12);
+ __pyx_t_9 = PyTuple_New(2); if (unlikely(!__pyx_t_9)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1422; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_9);
+ PyTuple_SET_ITEM(__pyx_t_9, 0, __pyx_t_14);
+ __Pyx_GIVEREF(__pyx_t_14);
+ PyTuple_SET_ITEM(__pyx_t_9, 1, __pyx_t_12);
+ __Pyx_GIVEREF(__pyx_t_12);
+ __pyx_t_14 = 0;
+ __pyx_t_12 = 0;
+ __pyx_t_12 = PyNumber_Remainder(((PyObject *)__pyx_kp_s_36), __pyx_t_9); if (unlikely(!__pyx_t_12)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1422; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(((PyObject *)__pyx_t_12));
+ __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
+ __pyx_t_9 = PyTuple_New(1); if (unlikely(!__pyx_t_9)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1420; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_9);
+ PyTuple_SET_ITEM(__pyx_t_9, 0, ((PyObject *)__pyx_t_12));
+ __Pyx_GIVEREF(((PyObject *)__pyx_t_12));
+ __pyx_t_12 = 0;
+ __pyx_t_12 = PyObject_Call(__pyx_builtin_AssertionError, __pyx_t_9, NULL); if (unlikely(!__pyx_t_12)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1420; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_12);
+ __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
+ __Pyx_Raise(__pyx_t_12, 0, 0);
+ __Pyx_DECREF(__pyx_t_12); __pyx_t_12 = 0;
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1420; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ }
+ __pyx_L6:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1424
+ * % (source_minikind, target_minikind))
+ * ## import pdb;pdb.set_trace()
+ * return None, None # <<<<<<<<<<<<<<
+ *
+ * def __iter__(self):
+ */
+ __Pyx_XDECREF(__pyx_r);
+ __pyx_t_12 = PyTuple_New(2); if (unlikely(!__pyx_t_12)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1424; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_12);
+ __Pyx_INCREF(Py_None);
+ PyTuple_SET_ITEM(__pyx_t_12, 0, Py_None);
+ __Pyx_GIVEREF(Py_None);
+ __Pyx_INCREF(Py_None);
+ PyTuple_SET_ITEM(__pyx_t_12, 1, Py_None);
+ __Pyx_GIVEREF(Py_None);
+ __pyx_r = __pyx_t_12;
+ __pyx_t_12 = 0;
+ goto __pyx_L0;
+
+ __pyx_r = Py_None; __Pyx_INCREF(Py_None);
+ goto __pyx_L0;
+ __pyx_L1_error:;
+ __Pyx_XDECREF(__pyx_t_1);
+ __Pyx_XDECREF(__pyx_t_3);
+ __Pyx_XDECREF(__pyx_t_6);
+ __Pyx_XDECREF(__pyx_t_8);
+ __Pyx_XDECREF(__pyx_t_9);
+ __Pyx_XDECREF(__pyx_t_11);
+ __Pyx_XDECREF(__pyx_t_12);
+ __Pyx_XDECREF(__pyx_t_13);
+ __Pyx_XDECREF(__pyx_t_14);
+ __Pyx_AddTraceback("bzrlib._dirstate_helpers_pyx.ProcessEntryC._process_entry");
+ __pyx_r = 0;
+ __pyx_L0:;
+ __Pyx_DECREF(__pyx_v_file_id);
+ __Pyx_DECREF(__pyx_v_details_list);
+ __Pyx_DECREF(__pyx_v_source_details);
+ __Pyx_DECREF(__pyx_v_target_details);
+ __Pyx_DECREF(__pyx_v_link_or_sha1);
+ __Pyx_DECREF(__pyx_v_old_dirname);
+ __Pyx_DECREF(__pyx_v_old_basename);
+ __Pyx_DECREF(__pyx_v_old_path);
+ __Pyx_DECREF(__pyx_v_path);
+ __Pyx_DECREF(__pyx_v_old_entry);
+ __Pyx_DECREF(__pyx_v_target_kind);
+ __Pyx_DECREF(__pyx_v_target_exec);
+ __Pyx_DECREF(__pyx_v_statvalue);
+ __Pyx_DECREF(__pyx_v_source_parent_id);
+ __Pyx_DECREF(__pyx_v__);
+ __Pyx_DECREF(__pyx_v_source_parent_entry);
+ __Pyx_DECREF(__pyx_v_new_dirname);
+ __Pyx_DECREF(__pyx_v_target_parent_id);
+ __Pyx_DECREF(__pyx_v_target_parent_entry);
+ __Pyx_DECREF(__pyx_v_source_exec);
+ __Pyx_DECREF(__pyx_v_changed);
+ __Pyx_DECREF(__pyx_v_old_path_u);
+ __Pyx_DECREF(__pyx_v_path_u);
+ __Pyx_DECREF(__pyx_v_source_kind);
+ __Pyx_DECREF(__pyx_v_parent_entry);
+ __Pyx_DECREF(__pyx_v_parent_id);
+ __Pyx_XGIVEREF(__pyx_r);
+ __Pyx_RefNannyFinishContext();
+ return __pyx_r;
+}
+
+/* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1426
+ * return None, None
+ *
+ * def __iter__(self): # <<<<<<<<<<<<<<
+ * return self
+ *
+ */
+
+static PyObject *__pyx_pf_6bzrlib_21_dirstate_helpers_pyx_13ProcessEntryC___iter__(PyObject *__pyx_v_self); /*proto*/
+static PyObject *__pyx_pf_6bzrlib_21_dirstate_helpers_pyx_13ProcessEntryC___iter__(PyObject *__pyx_v_self) {
+ PyObject *__pyx_r = NULL;
+ __Pyx_RefNannySetupContext("__iter__");
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1427
+ *
+ * def __iter__(self):
+ * return self # <<<<<<<<<<<<<<
+ *
+ * def iter_changes(self):
+ */
+ __Pyx_XDECREF(__pyx_r);
+ __Pyx_INCREF(__pyx_v_self);
+ __pyx_r = __pyx_v_self;
+ goto __pyx_L0;
+
+ __pyx_r = Py_None; __Pyx_INCREF(Py_None);
+ __pyx_L0:;
+ __Pyx_XGIVEREF(__pyx_r);
+ __Pyx_RefNannyFinishContext();
+ return __pyx_r;
+}
+
+/* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1429
+ * return self
+ *
+ * def iter_changes(self): # <<<<<<<<<<<<<<
+ * return self
+ *
+ */
+
+static PyObject *__pyx_pf_6bzrlib_21_dirstate_helpers_pyx_13ProcessEntryC_iter_changes(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/
+static PyObject *__pyx_pf_6bzrlib_21_dirstate_helpers_pyx_13ProcessEntryC_iter_changes(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) {
+ PyObject *__pyx_r = NULL;
+ __Pyx_RefNannySetupContext("iter_changes");
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1430
+ *
+ * def iter_changes(self):
+ * return self # <<<<<<<<<<<<<<
+ *
+ * cdef int _gather_result_for_consistency(self, result) except -1:
+ */
+ __Pyx_XDECREF(__pyx_r);
+ __Pyx_INCREF(__pyx_v_self);
+ __pyx_r = __pyx_v_self;
+ goto __pyx_L0;
+
+ __pyx_r = Py_None; __Pyx_INCREF(Py_None);
+ __pyx_L0:;
+ __Pyx_XGIVEREF(__pyx_r);
+ __Pyx_RefNannyFinishContext();
+ return __pyx_r;
+}
+
+/* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1432
+ * return self
+ *
+ * cdef int _gather_result_for_consistency(self, result) except -1: # <<<<<<<<<<<<<<
+ * """Check a result we will yield to make sure we are consistent later.
+ *
+ */
+
+static int __pyx_f_6bzrlib_21_dirstate_helpers_pyx_13ProcessEntryC__gather_result_for_consistency(struct __pyx_obj_6bzrlib_21_dirstate_helpers_pyx_ProcessEntryC *__pyx_v_self, PyObject *__pyx_v_result) {
+ PyObject *__pyx_v_new_path;
+ int __pyx_r;
+ int __pyx_t_1;
+ PyObject *__pyx_t_2 = NULL;
+ int __pyx_t_3;
+ int __pyx_t_4;
+ PyObject *__pyx_t_5 = NULL;
+ PyObject *__pyx_t_6 = NULL;
+ PyObject *__pyx_t_7 = NULL;
+ PyObject *__pyx_t_8 = NULL;
+ __Pyx_RefNannySetupContext("_gather_result_for_consistency");
+ __pyx_v_new_path = Py_None; __Pyx_INCREF(Py_None);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1439
+ * :param result: A result tuple.
+ * """
+ * if not self.partial or not result[0]: # <<<<<<<<<<<<<<
+ * return 0
+ * self.seen_ids.add(result[0])
+ */
+ __pyx_t_1 = (!__pyx_v_self->partial);
+ if (!__pyx_t_1) {
+ __pyx_t_2 = __Pyx_GetItemInt(__pyx_v_result, 0, sizeof(long), PyInt_FromLong); if (!__pyx_t_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1439; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_2);
+ __pyx_t_3 = __Pyx_PyObject_IsTrue(__pyx_t_2); if (unlikely(__pyx_t_3 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1439; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+ __pyx_t_4 = (!__pyx_t_3);
+ __pyx_t_3 = __pyx_t_4;
+ } else {
+ __pyx_t_3 = __pyx_t_1;
+ }
+ if (__pyx_t_3) {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1440
+ * """
+ * if not self.partial or not result[0]:
+ * return 0 # <<<<<<<<<<<<<<
+ * self.seen_ids.add(result[0])
+ * new_path = result[1][1]
+ */
+ __pyx_r = 0;
+ goto __pyx_L0;
+ goto __pyx_L3;
+ }
+ __pyx_L3:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1441
+ * if not self.partial or not result[0]:
+ * return 0
+ * self.seen_ids.add(result[0]) # <<<<<<<<<<<<<<
+ * new_path = result[1][1]
+ * if new_path:
+ */
+ __pyx_t_2 = PyObject_GetAttr(__pyx_v_self->seen_ids, __pyx_n_s__add); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1441; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_2);
+ __pyx_t_5 = __Pyx_GetItemInt(__pyx_v_result, 0, sizeof(long), PyInt_FromLong); if (!__pyx_t_5) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1441; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_5);
+ __pyx_t_6 = PyTuple_New(1); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1441; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_6);
+ PyTuple_SET_ITEM(__pyx_t_6, 0, __pyx_t_5);
+ __Pyx_GIVEREF(__pyx_t_5);
+ __pyx_t_5 = 0;
+ __pyx_t_5 = PyObject_Call(__pyx_t_2, __pyx_t_6, NULL); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1441; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_5);
+ __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+ __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
+ __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1442
+ * return 0
+ * self.seen_ids.add(result[0])
+ * new_path = result[1][1] # <<<<<<<<<<<<<<
+ * if new_path:
+ * # Not the root and not a delete: queue up the parents of the path.
+ */
+ __pyx_t_5 = __Pyx_GetItemInt(__pyx_v_result, 1, sizeof(long), PyInt_FromLong); if (!__pyx_t_5) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1442; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_5);
+ __pyx_t_6 = __Pyx_GetItemInt(__pyx_t_5, 1, sizeof(long), PyInt_FromLong); if (!__pyx_t_6) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1442; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_6);
+ __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
+ __Pyx_DECREF(__pyx_v_new_path);
+ __pyx_v_new_path = __pyx_t_6;
+ __pyx_t_6 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1443
+ * self.seen_ids.add(result[0])
+ * new_path = result[1][1]
+ * if new_path: # <<<<<<<<<<<<<<
+ * # Not the root and not a delete: queue up the parents of the path.
+ * self.search_specific_file_parents.update(
+ */
+ __pyx_t_3 = __Pyx_PyObject_IsTrue(__pyx_v_new_path); if (unlikely(__pyx_t_3 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1443; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ if (__pyx_t_3) {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1445
+ * if new_path:
+ * # Not the root and not a delete: queue up the parents of the path.
+ * self.search_specific_file_parents.update( # <<<<<<<<<<<<<<
+ * osutils.parent_directories(new_path.encode('utf8')))
+ * # Add the root directory which parent_directories does not
+ */
+ __pyx_t_6 = PyObject_GetAttr(__pyx_v_self->search_specific_file_parents, __pyx_n_s__update); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1445; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_6);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1446
+ * # Not the root and not a delete: queue up the parents of the path.
+ * self.search_specific_file_parents.update(
+ * osutils.parent_directories(new_path.encode('utf8'))) # <<<<<<<<<<<<<<
+ * # Add the root directory which parent_directories does not
+ * # provide.
+ */
+ __pyx_t_5 = __Pyx_GetName(__pyx_m, __pyx_n_s__osutils); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1446; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_5);
+ __pyx_t_2 = PyObject_GetAttr(__pyx_t_5, __pyx_n_s__parent_directories); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1446; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_2);
+ __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
+ __pyx_t_5 = PyObject_GetAttr(__pyx_v_new_path, __pyx_n_s__encode); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1446; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_5);
+ __pyx_t_7 = PyTuple_New(1); if (unlikely(!__pyx_t_7)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1446; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_7);
+ __Pyx_INCREF(((PyObject *)__pyx_n_s__utf8));
+ PyTuple_SET_ITEM(__pyx_t_7, 0, ((PyObject *)__pyx_n_s__utf8));
+ __Pyx_GIVEREF(((PyObject *)__pyx_n_s__utf8));
+ __pyx_t_8 = PyObject_Call(__pyx_t_5, __pyx_t_7, NULL); if (unlikely(!__pyx_t_8)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1446; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_8);
+ __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
+ __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
+ __pyx_t_7 = PyTuple_New(1); if (unlikely(!__pyx_t_7)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1446; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_7);
+ PyTuple_SET_ITEM(__pyx_t_7, 0, __pyx_t_8);
+ __Pyx_GIVEREF(__pyx_t_8);
+ __pyx_t_8 = 0;
+ __pyx_t_8 = PyObject_Call(__pyx_t_2, __pyx_t_7, NULL); if (unlikely(!__pyx_t_8)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1446; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_8);
+ __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+ __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
+ __pyx_t_7 = PyTuple_New(1); if (unlikely(!__pyx_t_7)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1445; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_7);
+ PyTuple_SET_ITEM(__pyx_t_7, 0, __pyx_t_8);
+ __Pyx_GIVEREF(__pyx_t_8);
+ __pyx_t_8 = 0;
+ __pyx_t_8 = PyObject_Call(__pyx_t_6, __pyx_t_7, NULL); if (unlikely(!__pyx_t_8)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1445; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_8);
+ __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
+ __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
+ __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1449
+ * # Add the root directory which parent_directories does not
+ * # provide.
+ * self.search_specific_file_parents.add('') # <<<<<<<<<<<<<<
+ * return 0
+ *
+ */
+ __pyx_t_8 = PyObject_GetAttr(__pyx_v_self->search_specific_file_parents, __pyx_n_s__add); if (unlikely(!__pyx_t_8)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1449; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_8);
+ __pyx_t_7 = PyTuple_New(1); if (unlikely(!__pyx_t_7)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1449; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_7);
+ __Pyx_INCREF(((PyObject *)__pyx_kp_s_5));
+ PyTuple_SET_ITEM(__pyx_t_7, 0, ((PyObject *)__pyx_kp_s_5));
+ __Pyx_GIVEREF(((PyObject *)__pyx_kp_s_5));
+ __pyx_t_6 = PyObject_Call(__pyx_t_8, __pyx_t_7, NULL); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1449; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_6);
+ __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0;
+ __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
+ __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
+ goto __pyx_L4;
+ }
+ __pyx_L4:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1450
+ * # provide.
+ * self.search_specific_file_parents.add('')
+ * return 0 # <<<<<<<<<<<<<<
+ *
+ * cdef int _update_current_block(self) except -1:
+ */
+ __pyx_r = 0;
+ goto __pyx_L0;
+
+ __pyx_r = 0;
+ goto __pyx_L0;
+ __pyx_L1_error:;
+ __Pyx_XDECREF(__pyx_t_2);
+ __Pyx_XDECREF(__pyx_t_5);
+ __Pyx_XDECREF(__pyx_t_6);
+ __Pyx_XDECREF(__pyx_t_7);
+ __Pyx_XDECREF(__pyx_t_8);
+ __Pyx_AddTraceback("bzrlib._dirstate_helpers_pyx.ProcessEntryC._gather_result_for_consistency");
+ __pyx_r = -1;
+ __pyx_L0:;
+ __Pyx_DECREF(__pyx_v_new_path);
+ __Pyx_RefNannyFinishContext();
+ return __pyx_r;
+}
+
+/* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1452
+ * return 0
+ *
+ * cdef int _update_current_block(self) except -1: # <<<<<<<<<<<<<<
+ * if (self.block_index < len(self.state._dirblocks) and
+ * osutils.is_inside(self.current_root, self.state._dirblocks[self.block_index][0])):
+ */
+
+static int __pyx_f_6bzrlib_21_dirstate_helpers_pyx_13ProcessEntryC__update_current_block(struct __pyx_obj_6bzrlib_21_dirstate_helpers_pyx_ProcessEntryC *__pyx_v_self) {
+ int __pyx_r;
+ PyObject *__pyx_t_1 = NULL;
+ Py_ssize_t __pyx_t_2;
+ int __pyx_t_3;
+ PyObject *__pyx_t_4 = NULL;
+ PyObject *__pyx_t_5 = NULL;
+ int __pyx_t_6;
+ int __pyx_t_7;
+ __Pyx_RefNannySetupContext("_update_current_block");
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1453
+ *
+ * cdef int _update_current_block(self) except -1:
+ * if (self.block_index < len(self.state._dirblocks) and # <<<<<<<<<<<<<<
+ * osutils.is_inside(self.current_root, self.state._dirblocks[self.block_index][0])):
+ * self.current_block = self.state._dirblocks[self.block_index]
+ */
+ __pyx_t_1 = PyObject_GetAttr(__pyx_v_self->state, __pyx_n_s___dirblocks); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1453; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_1);
+ __pyx_t_2 = PyObject_Length(__pyx_t_1); if (unlikely(__pyx_t_2 == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1453; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+ __pyx_t_3 = (__pyx_v_self->block_index < __pyx_t_2);
+ if (__pyx_t_3) {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1454
+ * cdef int _update_current_block(self) except -1:
+ * if (self.block_index < len(self.state._dirblocks) and
+ * osutils.is_inside(self.current_root, self.state._dirblocks[self.block_index][0])): # <<<<<<<<<<<<<<
+ * self.current_block = self.state._dirblocks[self.block_index]
+ * self.current_block_list = self.current_block[1]
+ */
+ __pyx_t_1 = __Pyx_GetName(__pyx_m, __pyx_n_s__osutils); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1454; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_1);
+ __pyx_t_4 = PyObject_GetAttr(__pyx_t_1, __pyx_n_s__is_inside); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1454; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_4);
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+ __pyx_t_1 = PyObject_GetAttr(__pyx_v_self->state, __pyx_n_s___dirblocks); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1454; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_1);
+ __pyx_t_5 = __Pyx_GetItemInt(__pyx_t_1, __pyx_v_self->block_index, sizeof(int), PyInt_FromLong); if (!__pyx_t_5) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1454; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_5);
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+ __pyx_t_1 = __Pyx_GetItemInt(__pyx_t_5, 0, sizeof(long), PyInt_FromLong); if (!__pyx_t_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1454; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_1);
+ __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
+ __pyx_t_5 = PyTuple_New(2); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1454; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_5);
+ __Pyx_INCREF(__pyx_v_self->current_root);
+ PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_v_self->current_root);
+ __Pyx_GIVEREF(__pyx_v_self->current_root);
+ PyTuple_SET_ITEM(__pyx_t_5, 1, __pyx_t_1);
+ __Pyx_GIVEREF(__pyx_t_1);
+ __pyx_t_1 = 0;
+ __pyx_t_1 = PyObject_Call(__pyx_t_4, __pyx_t_5, NULL); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1454; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_1);
+ __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
+ __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
+ __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_1); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1454; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+ __pyx_t_7 = __pyx_t_6;
+ } else {
+ __pyx_t_7 = __pyx_t_3;
+ }
+ if (__pyx_t_7) {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1455
+ * if (self.block_index < len(self.state._dirblocks) and
+ * osutils.is_inside(self.current_root, self.state._dirblocks[self.block_index][0])):
+ * self.current_block = self.state._dirblocks[self.block_index] # <<<<<<<<<<<<<<
+ * self.current_block_list = self.current_block[1]
+ * self.current_block_pos = 0
+ */
+ __pyx_t_1 = PyObject_GetAttr(__pyx_v_self->state, __pyx_n_s___dirblocks); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1455; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_1);
+ __pyx_t_5 = __Pyx_GetItemInt(__pyx_t_1, __pyx_v_self->block_index, sizeof(int), PyInt_FromLong); if (!__pyx_t_5) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1455; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_5);
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+ __Pyx_GIVEREF(__pyx_t_5);
+ __Pyx_GOTREF(__pyx_v_self->current_block);
+ __Pyx_DECREF(__pyx_v_self->current_block);
+ __pyx_v_self->current_block = __pyx_t_5;
+ __pyx_t_5 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1456
+ * osutils.is_inside(self.current_root, self.state._dirblocks[self.block_index][0])):
+ * self.current_block = self.state._dirblocks[self.block_index]
+ * self.current_block_list = self.current_block[1] # <<<<<<<<<<<<<<
+ * self.current_block_pos = 0
+ * else:
+ */
+ __pyx_t_5 = __Pyx_GetItemInt(__pyx_v_self->current_block, 1, sizeof(long), PyInt_FromLong); if (!__pyx_t_5) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1456; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_5);
+ __Pyx_GIVEREF(__pyx_t_5);
+ __Pyx_GOTREF(__pyx_v_self->current_block_list);
+ __Pyx_DECREF(__pyx_v_self->current_block_list);
+ __pyx_v_self->current_block_list = __pyx_t_5;
+ __pyx_t_5 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1457
+ * self.current_block = self.state._dirblocks[self.block_index]
+ * self.current_block_list = self.current_block[1]
+ * self.current_block_pos = 0 # <<<<<<<<<<<<<<
+ * else:
+ * self.current_block = None
+ */
+ __pyx_v_self->current_block_pos = 0;
+ goto __pyx_L3;
+ }
+ /*else*/ {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1459
+ * self.current_block_pos = 0
+ * else:
+ * self.current_block = None # <<<<<<<<<<<<<<
+ * self.current_block_list = None
+ * return 0
+ */
+ __Pyx_INCREF(Py_None);
+ __Pyx_GIVEREF(Py_None);
+ __Pyx_GOTREF(__pyx_v_self->current_block);
+ __Pyx_DECREF(__pyx_v_self->current_block);
+ __pyx_v_self->current_block = Py_None;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1460
+ * else:
+ * self.current_block = None
+ * self.current_block_list = None # <<<<<<<<<<<<<<
+ * return 0
+ *
+ */
+ __Pyx_INCREF(Py_None);
+ __Pyx_GIVEREF(Py_None);
+ __Pyx_GOTREF(__pyx_v_self->current_block_list);
+ __Pyx_DECREF(__pyx_v_self->current_block_list);
+ __pyx_v_self->current_block_list = Py_None;
+ }
+ __pyx_L3:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1461
+ * self.current_block = None
+ * self.current_block_list = None
+ * return 0 # <<<<<<<<<<<<<<
+ *
+ * def __next__(self):
+ */
+ __pyx_r = 0;
+ goto __pyx_L0;
+
+ __pyx_r = 0;
+ goto __pyx_L0;
+ __pyx_L1_error:;
+ __Pyx_XDECREF(__pyx_t_1);
+ __Pyx_XDECREF(__pyx_t_4);
+ __Pyx_XDECREF(__pyx_t_5);
+ __Pyx_AddTraceback("bzrlib._dirstate_helpers_pyx.ProcessEntryC._update_current_block");
+ __pyx_r = -1;
+ __pyx_L0:;
+ __Pyx_RefNannyFinishContext();
+ return __pyx_r;
+}
+
+/* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1463
+ * return 0
+ *
+ * def __next__(self): # <<<<<<<<<<<<<<
+ * # Simple thunk to allow tail recursion without pyrex confusion
+ * return self._iter_next()
+ */
+
+static PyObject *__pyx_pf_6bzrlib_21_dirstate_helpers_pyx_13ProcessEntryC___next__(PyObject *__pyx_v_self); /*proto*/
+static PyObject *__pyx_pf_6bzrlib_21_dirstate_helpers_pyx_13ProcessEntryC___next__(PyObject *__pyx_v_self) {
+ PyObject *__pyx_r = NULL;
+ PyObject *__pyx_t_1 = NULL;
+ __Pyx_RefNannySetupContext("__next__");
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1465
+ * def __next__(self):
+ * # Simple thunk to allow tail recursion without pyrex confusion
+ * return self._iter_next() # <<<<<<<<<<<<<<
+ *
+ * cdef _iter_next(self):
+ */
+ __Pyx_XDECREF(__pyx_r);
+ __pyx_t_1 = ((struct __pyx_vtabstruct_6bzrlib_21_dirstate_helpers_pyx_ProcessEntryC *)((struct __pyx_obj_6bzrlib_21_dirstate_helpers_pyx_ProcessEntryC *)__pyx_v_self)->__pyx_vtab)->_iter_next(((struct __pyx_obj_6bzrlib_21_dirstate_helpers_pyx_ProcessEntryC *)__pyx_v_self)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1465; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_1);
+ __pyx_r = __pyx_t_1;
+ __pyx_t_1 = 0;
+ goto __pyx_L0;
+
+ __pyx_r = Py_None; __Pyx_INCREF(Py_None);
+ goto __pyx_L0;
+ __pyx_L1_error:;
+ __Pyx_XDECREF(__pyx_t_1);
+ __Pyx_AddTraceback("bzrlib._dirstate_helpers_pyx.ProcessEntryC.__next__");
+ __pyx_r = NULL;
+ __pyx_L0:;
+ __Pyx_XGIVEREF(__pyx_r);
+ __Pyx_RefNannyFinishContext();
+ return __pyx_r;
+}
+
+/* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1467
+ * return self._iter_next()
+ *
+ * cdef _iter_next(self): # <<<<<<<<<<<<<<
+ * """Iterate over the changes."""
+ * # This function single steps through an iterator. As such while loops
+ */
+
+static PyObject *__pyx_f_6bzrlib_21_dirstate_helpers_pyx_13ProcessEntryC__iter_next(struct __pyx_obj_6bzrlib_21_dirstate_helpers_pyx_ProcessEntryC *__pyx_v_self) {
+ PyObject *__pyx_v_current_dirname;
+ PyObject *__pyx_v_current_blockname;
+ char *__pyx_v_current_dirname_c;
+ char *__pyx_v_current_blockname_c;
+ int __pyx_v_path_handled;
+ PyObject *__pyx_v_searched_specific_files;
+ PyObject *__pyx_v_entry;
+ PyObject *__pyx_v_result;
+ PyObject *__pyx_v_changed;
+ PyObject *__pyx_v__;
+ PyObject *__pyx_v_root_stat;
+ PyObject *__pyx_v_e;
+ PyObject *__pyx_v_new_executable;
+ PyObject *__pyx_v_e_winerror;
+ PyObject *__pyx_v_win_errors;
+ PyObject *__pyx_v_bzr_index;
+ PyObject *__pyx_v_initial_key;
+ PyObject *__pyx_v_current_path_info;
+ PyObject *__pyx_v_current_entry;
+ PyObject *__pyx_r = NULL;
+ int __pyx_t_1;
+ PyObject *__pyx_t_2 = NULL;
+ PyObject *__pyx_t_3 = NULL;
+ PyObject *__pyx_t_4 = NULL;
+ PyObject *__pyx_t_5 = NULL;
+ int __pyx_t_6;
+ int __pyx_t_7;
+ Py_ssize_t __pyx_t_8;
+ PyObject *__pyx_t_9 = NULL;
+ PyObject *__pyx_t_10 = NULL;
+ int __pyx_t_11;
+ PyObject *__pyx_t_12 = NULL;
+ PyObject *__pyx_t_13 = NULL;
+ int __pyx_t_14;
+ __Pyx_RefNannySetupContext("_iter_next");
+ __pyx_v_current_dirname = Py_None; __Pyx_INCREF(Py_None);
+ __pyx_v_current_blockname = Py_None; __Pyx_INCREF(Py_None);
+ __pyx_v_searched_specific_files = Py_None; __Pyx_INCREF(Py_None);
+ __pyx_v_entry = Py_None; __Pyx_INCREF(Py_None);
+ __pyx_v_result = Py_None; __Pyx_INCREF(Py_None);
+ __pyx_v_changed = Py_None; __Pyx_INCREF(Py_None);
+ __pyx_v__ = Py_None; __Pyx_INCREF(Py_None);
+ __pyx_v_root_stat = Py_None; __Pyx_INCREF(Py_None);
+ __pyx_v_e = Py_None; __Pyx_INCREF(Py_None);
+ __pyx_v_new_executable = Py_None; __Pyx_INCREF(Py_None);
+ __pyx_v_e_winerror = Py_None; __Pyx_INCREF(Py_None);
+ __pyx_v_win_errors = ((PyObject *)Py_None); __Pyx_INCREF(Py_None);
+ __pyx_v_bzr_index = Py_None; __Pyx_INCREF(Py_None);
+ __pyx_v_initial_key = ((PyObject *)Py_None); __Pyx_INCREF(Py_None);
+ __pyx_v_current_path_info = Py_None; __Pyx_INCREF(Py_None);
+ __pyx_v_current_entry = Py_None; __Pyx_INCREF(Py_None);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1519
+ * cdef int advance_entry, advance_path
+ * cdef int path_handled
+ * searched_specific_files = self.searched_specific_files # <<<<<<<<<<<<<<
+ * # Are we walking a root?
+ * while self.root_entries_pos < self.root_entries_len:
+ */
+ __Pyx_INCREF(__pyx_v_self->searched_specific_files);
+ __Pyx_DECREF(__pyx_v_searched_specific_files);
+ __pyx_v_searched_specific_files = __pyx_v_self->searched_specific_files;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1521
+ * searched_specific_files = self.searched_specific_files
+ * # Are we walking a root?
+ * while self.root_entries_pos < self.root_entries_len: # <<<<<<<<<<<<<<
+ * entry = self.root_entries[self.root_entries_pos]
+ * self.root_entries_pos = self.root_entries_pos + 1
+ */
+ while (1) {
+ __pyx_t_1 = (__pyx_v_self->root_entries_pos < __pyx_v_self->root_entries_len);
+ if (!__pyx_t_1) break;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1522
+ * # Are we walking a root?
+ * while self.root_entries_pos < self.root_entries_len:
+ * entry = self.root_entries[self.root_entries_pos] # <<<<<<<<<<<<<<
+ * self.root_entries_pos = self.root_entries_pos + 1
+ * result, changed = self._process_entry(entry, self.root_dir_info)
+ */
+ __pyx_t_2 = __Pyx_GetItemInt(__pyx_v_self->root_entries, __pyx_v_self->root_entries_pos, sizeof(int), PyInt_FromLong); if (!__pyx_t_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1522; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_2);
+ __Pyx_DECREF(__pyx_v_entry);
+ __pyx_v_entry = __pyx_t_2;
+ __pyx_t_2 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1523
+ * while self.root_entries_pos < self.root_entries_len:
+ * entry = self.root_entries[self.root_entries_pos]
+ * self.root_entries_pos = self.root_entries_pos + 1 # <<<<<<<<<<<<<<
+ * result, changed = self._process_entry(entry, self.root_dir_info)
+ * if changed is not None:
+ */
+ __pyx_v_self->root_entries_pos = (__pyx_v_self->root_entries_pos + 1);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1524
+ * entry = self.root_entries[self.root_entries_pos]
+ * self.root_entries_pos = self.root_entries_pos + 1
+ * result, changed = self._process_entry(entry, self.root_dir_info) # <<<<<<<<<<<<<<
+ * if changed is not None:
+ * if changed:
+ */
+ __pyx_t_2 = ((struct __pyx_vtabstruct_6bzrlib_21_dirstate_helpers_pyx_ProcessEntryC *)__pyx_v_self->__pyx_vtab)->_process_entry(__pyx_v_self, __pyx_v_entry, __pyx_v_self->root_dir_info); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1524; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_2);
+ if (PyTuple_CheckExact(__pyx_t_2) && likely(PyTuple_GET_SIZE(__pyx_t_2) == 2)) {
+ PyObject* tuple = __pyx_t_2;
+ __pyx_t_3 = PyTuple_GET_ITEM(tuple, 0); __Pyx_INCREF(__pyx_t_3);
+ __pyx_t_4 = PyTuple_GET_ITEM(tuple, 1); __Pyx_INCREF(__pyx_t_4);
+ __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+ __Pyx_DECREF(__pyx_v_result);
+ __pyx_v_result = __pyx_t_3;
+ __pyx_t_3 = 0;
+ __Pyx_DECREF(__pyx_v_changed);
+ __pyx_v_changed = __pyx_t_4;
+ __pyx_t_4 = 0;
+ } else {
+ __pyx_t_5 = PyObject_GetIter(__pyx_t_2); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1524; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_5);
+ __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+ __pyx_t_3 = __Pyx_UnpackItem(__pyx_t_5, 0); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1524; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_3);
+ __pyx_t_4 = __Pyx_UnpackItem(__pyx_t_5, 1); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1524; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_4);
+ if (__Pyx_EndUnpack(__pyx_t_5, 2) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1524; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
+ __Pyx_DECREF(__pyx_v_result);
+ __pyx_v_result = __pyx_t_3;
+ __pyx_t_3 = 0;
+ __Pyx_DECREF(__pyx_v_changed);
+ __pyx_v_changed = __pyx_t_4;
+ __pyx_t_4 = 0;
+ }
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1525
+ * self.root_entries_pos = self.root_entries_pos + 1
+ * result, changed = self._process_entry(entry, self.root_dir_info)
+ * if changed is not None: # <<<<<<<<<<<<<<
+ * if changed:
+ * self._gather_result_for_consistency(result)
+ */
+ __pyx_t_1 = (__pyx_v_changed != Py_None);
+ if (__pyx_t_1) {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1526
+ * result, changed = self._process_entry(entry, self.root_dir_info)
+ * if changed is not None:
+ * if changed: # <<<<<<<<<<<<<<
+ * self._gather_result_for_consistency(result)
+ * if changed or self.include_unchanged:
+ */
+ __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_v_changed); if (unlikely(__pyx_t_1 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1526; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ if (__pyx_t_1) {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1527
+ * if changed is not None:
+ * if changed:
+ * self._gather_result_for_consistency(result) # <<<<<<<<<<<<<<
+ * if changed or self.include_unchanged:
+ * return result
+ */
+ __pyx_t_6 = ((struct __pyx_vtabstruct_6bzrlib_21_dirstate_helpers_pyx_ProcessEntryC *)__pyx_v_self->__pyx_vtab)->_gather_result_for_consistency(__pyx_v_self, __pyx_v_result); if (unlikely(__pyx_t_6 == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1527; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ goto __pyx_L6;
+ }
+ __pyx_L6:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1528
+ * if changed:
+ * self._gather_result_for_consistency(result)
+ * if changed or self.include_unchanged: # <<<<<<<<<<<<<<
+ * return result
+ * # Have we finished the prior root, or never started one ?
+ */
+ __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_v_changed); if (unlikely(__pyx_t_1 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1528; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ if (!__pyx_t_1) {
+ __pyx_t_7 = __pyx_v_self->include_unchanged;
+ } else {
+ __pyx_t_7 = __pyx_t_1;
+ }
+ if (__pyx_t_7) {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1529
+ * self._gather_result_for_consistency(result)
+ * if changed or self.include_unchanged:
+ * return result # <<<<<<<<<<<<<<
+ * # Have we finished the prior root, or never started one ?
+ * if self.current_root is None:
+ */
+ __Pyx_XDECREF(__pyx_r);
+ __Pyx_INCREF(__pyx_v_result);
+ __pyx_r = __pyx_v_result;
+ goto __pyx_L0;
+ goto __pyx_L7;
+ }
+ __pyx_L7:;
+ goto __pyx_L5;
+ }
+ __pyx_L5:;
+ }
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1531
+ * return result
+ * # Have we finished the prior root, or never started one ?
+ * if self.current_root is None: # <<<<<<<<<<<<<<
+ * # TODO: the pending list should be lexically sorted? the
+ * # interface doesn't require it.
+ */
+ __pyx_t_7 = (__pyx_v_self->current_root == Py_None);
+ if (__pyx_t_7) {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1534
+ * # TODO: the pending list should be lexically sorted? the
+ * # interface doesn't require it.
+ * try: # <<<<<<<<<<<<<<
+ * self.current_root = self.search_specific_files.pop()
+ * except KeyError, _:
+ */
+ {
+ PyObject *__pyx_save_exc_type, *__pyx_save_exc_value, *__pyx_save_exc_tb;
+ __Pyx_ExceptionSave(&__pyx_save_exc_type, &__pyx_save_exc_value, &__pyx_save_exc_tb);
+ __Pyx_XGOTREF(__pyx_save_exc_type);
+ __Pyx_XGOTREF(__pyx_save_exc_value);
+ __Pyx_XGOTREF(__pyx_save_exc_tb);
+ /*try:*/ {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1535
+ * # interface doesn't require it.
+ * try:
+ * self.current_root = self.search_specific_files.pop() # <<<<<<<<<<<<<<
+ * except KeyError, _:
+ * raise StopIteration()
+ */
+ __pyx_t_2 = __Pyx_PyObject_Pop(__pyx_v_self->search_specific_files); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1535; __pyx_clineno = __LINE__; goto __pyx_L9_error;}
+ __Pyx_GOTREF(__pyx_t_2);
+ __Pyx_GIVEREF(__pyx_t_2);
+ __Pyx_GOTREF(__pyx_v_self->current_root);
+ __Pyx_DECREF(__pyx_v_self->current_root);
+ __pyx_v_self->current_root = __pyx_t_2;
+ __pyx_t_2 = 0;
+ }
+ __Pyx_XDECREF(__pyx_save_exc_type); __pyx_save_exc_type = 0;
+ __Pyx_XDECREF(__pyx_save_exc_value); __pyx_save_exc_value = 0;
+ __Pyx_XDECREF(__pyx_save_exc_tb); __pyx_save_exc_tb = 0;
+ goto __pyx_L16_try_end;
+ __pyx_L9_error:;
+ __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0;
+ __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
+ __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0;
+ __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1536
+ * try:
+ * self.current_root = self.search_specific_files.pop()
+ * except KeyError, _: # <<<<<<<<<<<<<<
+ * raise StopIteration()
+ * self.searched_specific_files.add(self.current_root)
+ */
+ __pyx_t_6 = PyErr_ExceptionMatches(__pyx_builtin_KeyError);
+ if (__pyx_t_6) {
+ __Pyx_AddTraceback("bzrlib._dirstate_helpers_pyx.ProcessEntryC._iter_next");
+ if (__Pyx_GetException(&__pyx_t_2, &__pyx_t_4, &__pyx_t_3) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1536; __pyx_clineno = __LINE__; goto __pyx_L11_except_error;}
+ __Pyx_GOTREF(__pyx_t_2);
+ __Pyx_GOTREF(__pyx_t_4);
+ __Pyx_GOTREF(__pyx_t_3);
+ __Pyx_INCREF(__pyx_t_4);
+ __Pyx_DECREF(__pyx_v__);
+ __pyx_v__ = __pyx_t_4;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1537
+ * self.current_root = self.search_specific_files.pop()
+ * except KeyError, _:
+ * raise StopIteration() # <<<<<<<<<<<<<<
+ * self.searched_specific_files.add(self.current_root)
+ * # process the entries for this containing directory: the rest will be
+ */
+ __pyx_t_5 = PyObject_Call(__pyx_builtin_StopIteration, ((PyObject *)__pyx_empty_tuple), NULL); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1537; __pyx_clineno = __LINE__; goto __pyx_L11_except_error;}
+ __Pyx_GOTREF(__pyx_t_5);
+ __Pyx_Raise(__pyx_t_5, 0, 0);
+ __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1537; __pyx_clineno = __LINE__; goto __pyx_L11_except_error;}
+ __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+ __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+ goto __pyx_L10_exception_handled;
+ }
+ __pyx_L11_except_error:;
+ __Pyx_XGIVEREF(__pyx_save_exc_type);
+ __Pyx_XGIVEREF(__pyx_save_exc_value);
+ __Pyx_XGIVEREF(__pyx_save_exc_tb);
+ __Pyx_ExceptionReset(__pyx_save_exc_type, __pyx_save_exc_value, __pyx_save_exc_tb);
+ goto __pyx_L1_error;
+ __pyx_L10_exception_handled:;
+ __Pyx_XGIVEREF(__pyx_save_exc_type);
+ __Pyx_XGIVEREF(__pyx_save_exc_value);
+ __Pyx_XGIVEREF(__pyx_save_exc_tb);
+ __Pyx_ExceptionReset(__pyx_save_exc_type, __pyx_save_exc_value, __pyx_save_exc_tb);
+ __pyx_L16_try_end:;
+ }
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1538
+ * except KeyError, _:
+ * raise StopIteration()
+ * self.searched_specific_files.add(self.current_root) # <<<<<<<<<<<<<<
+ * # process the entries for this containing directory: the rest will be
+ * # found by their parents recursively.
+ */
+ __pyx_t_3 = PyObject_GetAttr(__pyx_v_self->searched_specific_files, __pyx_n_s__add); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1538; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_3);
+ __pyx_t_4 = PyTuple_New(1); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1538; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_4);
+ __Pyx_INCREF(__pyx_v_self->current_root);
+ PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_v_self->current_root);
+ __Pyx_GIVEREF(__pyx_v_self->current_root);
+ __pyx_t_2 = PyObject_Call(__pyx_t_3, __pyx_t_4, NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1538; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_2);
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+ __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
+ __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1541
+ * # process the entries for this containing directory: the rest will be
+ * # found by their parents recursively.
+ * self.root_entries = self.state._entries_for_path(self.current_root) # <<<<<<<<<<<<<<
+ * self.root_entries_len = len(self.root_entries)
+ * self.current_root_unicode = self.current_root.decode('utf8')
+ */
+ __pyx_t_2 = PyObject_GetAttr(__pyx_v_self->state, __pyx_n_s___entries_for_path); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1541; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_2);
+ __pyx_t_4 = PyTuple_New(1); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1541; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_4);
+ __Pyx_INCREF(__pyx_v_self->current_root);
+ PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_v_self->current_root);
+ __Pyx_GIVEREF(__pyx_v_self->current_root);
+ __pyx_t_3 = PyObject_Call(__pyx_t_2, __pyx_t_4, NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1541; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_3);
+ __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+ __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
+ __Pyx_GIVEREF(__pyx_t_3);
+ __Pyx_GOTREF(__pyx_v_self->root_entries);
+ __Pyx_DECREF(__pyx_v_self->root_entries);
+ __pyx_v_self->root_entries = __pyx_t_3;
+ __pyx_t_3 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1542
+ * # found by their parents recursively.
+ * self.root_entries = self.state._entries_for_path(self.current_root)
+ * self.root_entries_len = len(self.root_entries) # <<<<<<<<<<<<<<
+ * self.current_root_unicode = self.current_root.decode('utf8')
+ * self.root_abspath = self.tree.abspath(self.current_root_unicode)
+ */
+ __pyx_t_8 = PyObject_Length(__pyx_v_self->root_entries); if (unlikely(__pyx_t_8 == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1542; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_v_self->root_entries_len = __pyx_t_8;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1543
+ * self.root_entries = self.state._entries_for_path(self.current_root)
+ * self.root_entries_len = len(self.root_entries)
+ * self.current_root_unicode = self.current_root.decode('utf8') # <<<<<<<<<<<<<<
+ * self.root_abspath = self.tree.abspath(self.current_root_unicode)
+ * try:
+ */
+ __pyx_t_3 = PyObject_GetAttr(__pyx_v_self->current_root, __pyx_n_s__decode); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1543; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_3);
+ __pyx_t_4 = PyTuple_New(1); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1543; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_4);
+ __Pyx_INCREF(((PyObject *)__pyx_n_s__utf8));
+ PyTuple_SET_ITEM(__pyx_t_4, 0, ((PyObject *)__pyx_n_s__utf8));
+ __Pyx_GIVEREF(((PyObject *)__pyx_n_s__utf8));
+ __pyx_t_2 = PyObject_Call(__pyx_t_3, __pyx_t_4, NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1543; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_2);
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+ __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
+ __Pyx_GIVEREF(__pyx_t_2);
+ __Pyx_GOTREF(__pyx_v_self->current_root_unicode);
+ __Pyx_DECREF(__pyx_v_self->current_root_unicode);
+ __pyx_v_self->current_root_unicode = __pyx_t_2;
+ __pyx_t_2 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1544
+ * self.root_entries_len = len(self.root_entries)
+ * self.current_root_unicode = self.current_root.decode('utf8')
+ * self.root_abspath = self.tree.abspath(self.current_root_unicode) # <<<<<<<<<<<<<<
+ * try:
+ * root_stat = os.lstat(self.root_abspath)
+ */
+ __pyx_t_2 = PyObject_GetAttr(__pyx_v_self->tree, __pyx_n_s__abspath); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1544; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_2);
+ __pyx_t_4 = PyTuple_New(1); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1544; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_4);
+ __Pyx_INCREF(__pyx_v_self->current_root_unicode);
+ PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_v_self->current_root_unicode);
+ __Pyx_GIVEREF(__pyx_v_self->current_root_unicode);
+ __pyx_t_3 = PyObject_Call(__pyx_t_2, __pyx_t_4, NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1544; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_3);
+ __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+ __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
+ __Pyx_GIVEREF(__pyx_t_3);
+ __Pyx_GOTREF(__pyx_v_self->root_abspath);
+ __Pyx_DECREF(__pyx_v_self->root_abspath);
+ __pyx_v_self->root_abspath = __pyx_t_3;
+ __pyx_t_3 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1545
+ * self.current_root_unicode = self.current_root.decode('utf8')
+ * self.root_abspath = self.tree.abspath(self.current_root_unicode)
+ * try: # <<<<<<<<<<<<<<
+ * root_stat = os.lstat(self.root_abspath)
+ * except OSError, e:
+ */
+ {
+ PyObject *__pyx_save_exc_type, *__pyx_save_exc_value, *__pyx_save_exc_tb;
+ __Pyx_ExceptionSave(&__pyx_save_exc_type, &__pyx_save_exc_value, &__pyx_save_exc_tb);
+ __Pyx_XGOTREF(__pyx_save_exc_type);
+ __Pyx_XGOTREF(__pyx_save_exc_value);
+ __Pyx_XGOTREF(__pyx_save_exc_tb);
+ /*try:*/ {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1546
+ * self.root_abspath = self.tree.abspath(self.current_root_unicode)
+ * try:
+ * root_stat = os.lstat(self.root_abspath) # <<<<<<<<<<<<<<
+ * except OSError, e:
+ * if e.errno == errno.ENOENT:
+ */
+ __pyx_t_3 = __Pyx_GetName(__pyx_m, __pyx_n_s__os); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1546; __pyx_clineno = __LINE__; goto __pyx_L19_error;}
+ __Pyx_GOTREF(__pyx_t_3);
+ __pyx_t_4 = PyObject_GetAttr(__pyx_t_3, __pyx_n_s__lstat); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1546; __pyx_clineno = __LINE__; goto __pyx_L19_error;}
+ __Pyx_GOTREF(__pyx_t_4);
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+ __pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1546; __pyx_clineno = __LINE__; goto __pyx_L19_error;}
+ __Pyx_GOTREF(__pyx_t_3);
+ __Pyx_INCREF(__pyx_v_self->root_abspath);
+ PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_v_self->root_abspath);
+ __Pyx_GIVEREF(__pyx_v_self->root_abspath);
+ __pyx_t_2 = PyObject_Call(__pyx_t_4, __pyx_t_3, NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1546; __pyx_clineno = __LINE__; goto __pyx_L19_error;}
+ __Pyx_GOTREF(__pyx_t_2);
+ __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+ __Pyx_DECREF(__pyx_v_root_stat);
+ __pyx_v_root_stat = __pyx_t_2;
+ __pyx_t_2 = 0;
+ }
+ /*else:*/ {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1556
+ * else:
+ * self.root_dir_info = ('', self.current_root,
+ * osutils.file_kind_from_stat_mode(root_stat.st_mode), root_stat, # <<<<<<<<<<<<<<
+ * self.root_abspath)
+ * if self.root_dir_info[2] == 'directory':
+ */
+ __pyx_t_2 = __Pyx_GetName(__pyx_m, __pyx_n_s__osutils); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1556; __pyx_clineno = __LINE__; goto __pyx_L21_except_error;}
+ __Pyx_GOTREF(__pyx_t_2);
+ __pyx_t_3 = PyObject_GetAttr(__pyx_t_2, __pyx_n_s_38); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1556; __pyx_clineno = __LINE__; goto __pyx_L21_except_error;}
+ __Pyx_GOTREF(__pyx_t_3);
+ __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+ __pyx_t_2 = PyObject_GetAttr(__pyx_v_root_stat, __pyx_n_s__st_mode); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1556; __pyx_clineno = __LINE__; goto __pyx_L21_except_error;}
+ __Pyx_GOTREF(__pyx_t_2);
+ __pyx_t_4 = PyTuple_New(1); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1556; __pyx_clineno = __LINE__; goto __pyx_L21_except_error;}
+ __Pyx_GOTREF(__pyx_t_4);
+ PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_2);
+ __Pyx_GIVEREF(__pyx_t_2);
+ __pyx_t_2 = 0;
+ __pyx_t_2 = PyObject_Call(__pyx_t_3, __pyx_t_4, NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1556; __pyx_clineno = __LINE__; goto __pyx_L21_except_error;}
+ __Pyx_GOTREF(__pyx_t_2);
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+ __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1557
+ * self.root_dir_info = ('', self.current_root,
+ * osutils.file_kind_from_stat_mode(root_stat.st_mode), root_stat,
+ * self.root_abspath) # <<<<<<<<<<<<<<
+ * if self.root_dir_info[2] == 'directory':
+ * if self.tree._directory_is_tree_reference(
+ */
+ __pyx_t_4 = PyTuple_New(5); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1555; __pyx_clineno = __LINE__; goto __pyx_L21_except_error;}
+ __Pyx_GOTREF(__pyx_t_4);
+ __Pyx_INCREF(((PyObject *)__pyx_kp_s_5));
+ PyTuple_SET_ITEM(__pyx_t_4, 0, ((PyObject *)__pyx_kp_s_5));
+ __Pyx_GIVEREF(((PyObject *)__pyx_kp_s_5));
+ __Pyx_INCREF(__pyx_v_self->current_root);
+ PyTuple_SET_ITEM(__pyx_t_4, 1, __pyx_v_self->current_root);
+ __Pyx_GIVEREF(__pyx_v_self->current_root);
+ PyTuple_SET_ITEM(__pyx_t_4, 2, __pyx_t_2);
+ __Pyx_GIVEREF(__pyx_t_2);
+ __Pyx_INCREF(__pyx_v_root_stat);
+ PyTuple_SET_ITEM(__pyx_t_4, 3, __pyx_v_root_stat);
+ __Pyx_GIVEREF(__pyx_v_root_stat);
+ __Pyx_INCREF(__pyx_v_self->root_abspath);
+ PyTuple_SET_ITEM(__pyx_t_4, 4, __pyx_v_self->root_abspath);
+ __Pyx_GIVEREF(__pyx_v_self->root_abspath);
+ __pyx_t_2 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1555
+ * raise
+ * else:
+ * self.root_dir_info = ('', self.current_root, # <<<<<<<<<<<<<<
+ * osutils.file_kind_from_stat_mode(root_stat.st_mode), root_stat,
+ * self.root_abspath)
+ */
+ __Pyx_GIVEREF(__pyx_t_4);
+ __Pyx_GOTREF(__pyx_v_self->root_dir_info);
+ __Pyx_DECREF(__pyx_v_self->root_dir_info);
+ __pyx_v_self->root_dir_info = __pyx_t_4;
+ __pyx_t_4 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1558
+ * osutils.file_kind_from_stat_mode(root_stat.st_mode), root_stat,
+ * self.root_abspath)
+ * if self.root_dir_info[2] == 'directory': # <<<<<<<<<<<<<<
+ * if self.tree._directory_is_tree_reference(
+ * self.current_root_unicode):
+ */
+ __pyx_t_4 = __Pyx_GetItemInt(__pyx_v_self->root_dir_info, 2, sizeof(long), PyInt_FromLong); if (!__pyx_t_4) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1558; __pyx_clineno = __LINE__; goto __pyx_L21_except_error;}
+ __Pyx_GOTREF(__pyx_t_4);
+ __pyx_t_2 = PyObject_RichCompare(__pyx_t_4, ((PyObject *)__pyx_n_s__directory), Py_EQ); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1558; __pyx_clineno = __LINE__; goto __pyx_L21_except_error;}
+ __Pyx_GOTREF(__pyx_t_2);
+ __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
+ __pyx_t_7 = __Pyx_PyObject_IsTrue(__pyx_t_2); if (unlikely(__pyx_t_7 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1558; __pyx_clineno = __LINE__; goto __pyx_L21_except_error;}
+ __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+ if (__pyx_t_7) {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1559
+ * self.root_abspath)
+ * if self.root_dir_info[2] == 'directory':
+ * if self.tree._directory_is_tree_reference( # <<<<<<<<<<<<<<
+ * self.current_root_unicode):
+ * self.root_dir_info = self.root_dir_info[:2] + \
+ */
+ __pyx_t_2 = PyObject_GetAttr(__pyx_v_self->tree, __pyx_n_s_39); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1559; __pyx_clineno = __LINE__; goto __pyx_L21_except_error;}
+ __Pyx_GOTREF(__pyx_t_2);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1560
+ * if self.root_dir_info[2] == 'directory':
+ * if self.tree._directory_is_tree_reference(
+ * self.current_root_unicode): # <<<<<<<<<<<<<<
+ * self.root_dir_info = self.root_dir_info[:2] + \
+ * ('tree-reference',) + self.root_dir_info[3:]
+ */
+ __pyx_t_4 = PyTuple_New(1); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1559; __pyx_clineno = __LINE__; goto __pyx_L21_except_error;}
+ __Pyx_GOTREF(__pyx_t_4);
+ __Pyx_INCREF(__pyx_v_self->current_root_unicode);
+ PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_v_self->current_root_unicode);
+ __Pyx_GIVEREF(__pyx_v_self->current_root_unicode);
+ __pyx_t_3 = PyObject_Call(__pyx_t_2, __pyx_t_4, NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1559; __pyx_clineno = __LINE__; goto __pyx_L21_except_error;}
+ __Pyx_GOTREF(__pyx_t_3);
+ __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+ __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
+ __pyx_t_7 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_7 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1559; __pyx_clineno = __LINE__; goto __pyx_L21_except_error;}
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+ if (__pyx_t_7) {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1561
+ * if self.tree._directory_is_tree_reference(
+ * self.current_root_unicode):
+ * self.root_dir_info = self.root_dir_info[:2] + \ # <<<<<<<<<<<<<<
+ * ('tree-reference',) + self.root_dir_info[3:]
+ * if not self.root_entries and not self.root_dir_info:
+ */
+ __pyx_t_3 = PySequence_GetSlice(__pyx_v_self->root_dir_info, 0, 2); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1561; __pyx_clineno = __LINE__; goto __pyx_L21_except_error;}
+ __Pyx_GOTREF(__pyx_t_3);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1562
+ * self.current_root_unicode):
+ * self.root_dir_info = self.root_dir_info[:2] + \
+ * ('tree-reference',) + self.root_dir_info[3:] # <<<<<<<<<<<<<<
+ * if not self.root_entries and not self.root_dir_info:
+ * # this specified path is not present at all, skip it.
+ */
+ __pyx_t_4 = PyTuple_New(1); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1562; __pyx_clineno = __LINE__; goto __pyx_L21_except_error;}
+ __Pyx_GOTREF(__pyx_t_4);
+ __Pyx_INCREF(((PyObject *)__pyx_kp_s_33));
+ PyTuple_SET_ITEM(__pyx_t_4, 0, ((PyObject *)__pyx_kp_s_33));
+ __Pyx_GIVEREF(((PyObject *)__pyx_kp_s_33));
+ __pyx_t_2 = PyNumber_Add(__pyx_t_3, __pyx_t_4); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1561; __pyx_clineno = __LINE__; goto __pyx_L21_except_error;}
+ __Pyx_GOTREF(__pyx_t_2);
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+ __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
+ __pyx_t_4 = PySequence_GetSlice(__pyx_v_self->root_dir_info, 3, PY_SSIZE_T_MAX); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1562; __pyx_clineno = __LINE__; goto __pyx_L21_except_error;}
+ __Pyx_GOTREF(__pyx_t_4);
+ __pyx_t_3 = PyNumber_Add(__pyx_t_2, __pyx_t_4); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1562; __pyx_clineno = __LINE__; goto __pyx_L21_except_error;}
+ __Pyx_GOTREF(__pyx_t_3);
+ __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+ __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1561
+ * if self.tree._directory_is_tree_reference(
+ * self.current_root_unicode):
+ * self.root_dir_info = self.root_dir_info[:2] + \ # <<<<<<<<<<<<<<
+ * ('tree-reference',) + self.root_dir_info[3:]
+ * if not self.root_entries and not self.root_dir_info:
+ */
+ __Pyx_GIVEREF(__pyx_t_3);
+ __Pyx_GOTREF(__pyx_v_self->root_dir_info);
+ __Pyx_DECREF(__pyx_v_self->root_dir_info);
+ __pyx_v_self->root_dir_info = __pyx_t_3;
+ __pyx_t_3 = 0;
+ goto __pyx_L28;
+ }
+ __pyx_L28:;
+ goto __pyx_L27;
+ }
+ __pyx_L27:;
+ }
+ __Pyx_XDECREF(__pyx_save_exc_type); __pyx_save_exc_type = 0;
+ __Pyx_XDECREF(__pyx_save_exc_value); __pyx_save_exc_value = 0;
+ __Pyx_XDECREF(__pyx_save_exc_tb); __pyx_save_exc_tb = 0;
+ goto __pyx_L26_try_end;
+ __pyx_L19_error:;
+ __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0;
+ __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0;
+ __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
+ __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1547
+ * try:
+ * root_stat = os.lstat(self.root_abspath)
+ * except OSError, e: # <<<<<<<<<<<<<<
+ * if e.errno == errno.ENOENT:
+ * # the path does not exist: let _process_entry know that.
+ */
+ __pyx_t_6 = PyErr_ExceptionMatches(__pyx_builtin_OSError);
+ if (__pyx_t_6) {
+ __Pyx_AddTraceback("bzrlib._dirstate_helpers_pyx.ProcessEntryC._iter_next");
+ if (__Pyx_GetException(&__pyx_t_3, &__pyx_t_4, &__pyx_t_2) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1547; __pyx_clineno = __LINE__; goto __pyx_L21_except_error;}
+ __Pyx_GOTREF(__pyx_t_3);
+ __Pyx_GOTREF(__pyx_t_4);
+ __Pyx_GOTREF(__pyx_t_2);
+ __Pyx_INCREF(__pyx_t_4);
+ __Pyx_DECREF(__pyx_v_e);
+ __pyx_v_e = __pyx_t_4;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1548
+ * root_stat = os.lstat(self.root_abspath)
+ * except OSError, e:
+ * if e.errno == errno.ENOENT: # <<<<<<<<<<<<<<
+ * # the path does not exist: let _process_entry know that.
+ * self.root_dir_info = None
+ */
+ __pyx_t_5 = PyObject_GetAttr(__pyx_v_e, __pyx_n_s__errno); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1548; __pyx_clineno = __LINE__; goto __pyx_L21_except_error;}
+ __Pyx_GOTREF(__pyx_t_5);
+ __pyx_t_9 = __Pyx_GetName(__pyx_m, __pyx_n_s__errno); if (unlikely(!__pyx_t_9)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1548; __pyx_clineno = __LINE__; goto __pyx_L21_except_error;}
+ __Pyx_GOTREF(__pyx_t_9);
+ __pyx_t_10 = PyObject_GetAttr(__pyx_t_9, __pyx_n_s__ENOENT); if (unlikely(!__pyx_t_10)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1548; __pyx_clineno = __LINE__; goto __pyx_L21_except_error;}
+ __Pyx_GOTREF(__pyx_t_10);
+ __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
+ __pyx_t_9 = PyObject_RichCompare(__pyx_t_5, __pyx_t_10, Py_EQ); if (unlikely(!__pyx_t_9)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1548; __pyx_clineno = __LINE__; goto __pyx_L21_except_error;}
+ __Pyx_GOTREF(__pyx_t_9);
+ __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
+ __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;
+ __pyx_t_7 = __Pyx_PyObject_IsTrue(__pyx_t_9); if (unlikely(__pyx_t_7 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1548; __pyx_clineno = __LINE__; goto __pyx_L21_except_error;}
+ __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
+ if (__pyx_t_7) {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1550
+ * if e.errno == errno.ENOENT:
+ * # the path does not exist: let _process_entry know that.
+ * self.root_dir_info = None # <<<<<<<<<<<<<<
+ * else:
+ * # some other random error: hand it up.
+ */
+ __Pyx_INCREF(Py_None);
+ __Pyx_GIVEREF(Py_None);
+ __Pyx_GOTREF(__pyx_v_self->root_dir_info);
+ __Pyx_DECREF(__pyx_v_self->root_dir_info);
+ __pyx_v_self->root_dir_info = Py_None;
+ goto __pyx_L31;
+ }
+ /*else*/ {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1553
+ * else:
+ * # some other random error: hand it up.
+ * raise # <<<<<<<<<<<<<<
+ * else:
+ * self.root_dir_info = ('', self.current_root,
+ */
+ __Pyx_GIVEREF(__pyx_t_3);
+ __Pyx_GIVEREF(__pyx_t_4);
+ __Pyx_GIVEREF(__pyx_t_2);
+ __Pyx_ErrRestore(__pyx_t_3, __pyx_t_4, __pyx_t_2);
+ __pyx_t_3 = 0; __pyx_t_4 = 0; __pyx_t_2 = 0;
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1553; __pyx_clineno = __LINE__; goto __pyx_L21_except_error;}
+ }
+ __pyx_L31:;
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+ __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
+ __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+ goto __pyx_L20_exception_handled;
+ }
+ __pyx_L21_except_error:;
+ __Pyx_XGIVEREF(__pyx_save_exc_type);
+ __Pyx_XGIVEREF(__pyx_save_exc_value);
+ __Pyx_XGIVEREF(__pyx_save_exc_tb);
+ __Pyx_ExceptionReset(__pyx_save_exc_type, __pyx_save_exc_value, __pyx_save_exc_tb);
+ goto __pyx_L1_error;
+ __pyx_L20_exception_handled:;
+ __Pyx_XGIVEREF(__pyx_save_exc_type);
+ __Pyx_XGIVEREF(__pyx_save_exc_value);
+ __Pyx_XGIVEREF(__pyx_save_exc_tb);
+ __Pyx_ExceptionReset(__pyx_save_exc_type, __pyx_save_exc_value, __pyx_save_exc_tb);
+ __pyx_L26_try_end:;
+ }
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1563
+ * self.root_dir_info = self.root_dir_info[:2] + \
+ * ('tree-reference',) + self.root_dir_info[3:]
+ * if not self.root_entries and not self.root_dir_info: # <<<<<<<<<<<<<<
+ * # this specified path is not present at all, skip it.
+ * # (tail recursion, can do a loop once the full structure is
+ */
+ __pyx_t_7 = __Pyx_PyObject_IsTrue(__pyx_v_self->root_entries); if (unlikely(__pyx_t_7 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1563; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_1 = (!__pyx_t_7);
+ if (__pyx_t_1) {
+ __pyx_t_7 = __Pyx_PyObject_IsTrue(__pyx_v_self->root_dir_info); if (unlikely(__pyx_t_7 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1563; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_11 = (!__pyx_t_7);
+ __pyx_t_7 = __pyx_t_11;
+ } else {
+ __pyx_t_7 = __pyx_t_1;
+ }
+ if (__pyx_t_7) {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1567
+ * # (tail recursion, can do a loop once the full structure is
+ * # known).
+ * return self._iter_next() # <<<<<<<<<<<<<<
+ * path_handled = 0
+ * self.root_entries_pos = 0
+ */
+ __Pyx_XDECREF(__pyx_r);
+ __pyx_t_2 = ((struct __pyx_vtabstruct_6bzrlib_21_dirstate_helpers_pyx_ProcessEntryC *)__pyx_v_self->__pyx_vtab)->_iter_next(__pyx_v_self); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1567; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_2);
+ __pyx_r = __pyx_t_2;
+ __pyx_t_2 = 0;
+ goto __pyx_L0;
+ goto __pyx_L32;
+ }
+ __pyx_L32:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1568
+ * # known).
+ * return self._iter_next()
+ * path_handled = 0 # <<<<<<<<<<<<<<
+ * self.root_entries_pos = 0
+ * # XXX Clarity: This loop is duplicated a out the self.current_root
+ */
+ __pyx_v_path_handled = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1569
+ * return self._iter_next()
+ * path_handled = 0
+ * self.root_entries_pos = 0 # <<<<<<<<<<<<<<
+ * # XXX Clarity: This loop is duplicated a out the self.current_root
+ * # is None guard above: if we return from it, it completes there
+ */
+ __pyx_v_self->root_entries_pos = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1574
+ * # (and the following if block cannot trigger because
+ * # path_handled must be true, so the if block is not # duplicated.
+ * while self.root_entries_pos < self.root_entries_len: # <<<<<<<<<<<<<<
+ * entry = self.root_entries[self.root_entries_pos]
+ * self.root_entries_pos = self.root_entries_pos + 1
+ */
+ while (1) {
+ __pyx_t_7 = (__pyx_v_self->root_entries_pos < __pyx_v_self->root_entries_len);
+ if (!__pyx_t_7) break;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1575
+ * # path_handled must be true, so the if block is not # duplicated.
+ * while self.root_entries_pos < self.root_entries_len:
+ * entry = self.root_entries[self.root_entries_pos] # <<<<<<<<<<<<<<
+ * self.root_entries_pos = self.root_entries_pos + 1
+ * result, changed = self._process_entry(entry, self.root_dir_info)
+ */
+ __pyx_t_2 = __Pyx_GetItemInt(__pyx_v_self->root_entries, __pyx_v_self->root_entries_pos, sizeof(int), PyInt_FromLong); if (!__pyx_t_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1575; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_2);
+ __Pyx_DECREF(__pyx_v_entry);
+ __pyx_v_entry = __pyx_t_2;
+ __pyx_t_2 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1576
+ * while self.root_entries_pos < self.root_entries_len:
+ * entry = self.root_entries[self.root_entries_pos]
+ * self.root_entries_pos = self.root_entries_pos + 1 # <<<<<<<<<<<<<<
+ * result, changed = self._process_entry(entry, self.root_dir_info)
+ * if changed is not None:
+ */
+ __pyx_v_self->root_entries_pos = (__pyx_v_self->root_entries_pos + 1);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1577
+ * entry = self.root_entries[self.root_entries_pos]
+ * self.root_entries_pos = self.root_entries_pos + 1
+ * result, changed = self._process_entry(entry, self.root_dir_info) # <<<<<<<<<<<<<<
+ * if changed is not None:
+ * path_handled = -1
+ */
+ __pyx_t_2 = ((struct __pyx_vtabstruct_6bzrlib_21_dirstate_helpers_pyx_ProcessEntryC *)__pyx_v_self->__pyx_vtab)->_process_entry(__pyx_v_self, __pyx_v_entry, __pyx_v_self->root_dir_info); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1577; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_2);
+ if (PyTuple_CheckExact(__pyx_t_2) && likely(PyTuple_GET_SIZE(__pyx_t_2) == 2)) {
+ PyObject* tuple = __pyx_t_2;
+ __pyx_t_4 = PyTuple_GET_ITEM(tuple, 0); __Pyx_INCREF(__pyx_t_4);
+ __pyx_t_3 = PyTuple_GET_ITEM(tuple, 1); __Pyx_INCREF(__pyx_t_3);
+ __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+ __Pyx_DECREF(__pyx_v_result);
+ __pyx_v_result = __pyx_t_4;
+ __pyx_t_4 = 0;
+ __Pyx_DECREF(__pyx_v_changed);
+ __pyx_v_changed = __pyx_t_3;
+ __pyx_t_3 = 0;
+ } else {
+ __pyx_t_9 = PyObject_GetIter(__pyx_t_2); if (unlikely(!__pyx_t_9)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1577; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_9);
+ __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+ __pyx_t_4 = __Pyx_UnpackItem(__pyx_t_9, 0); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1577; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_4);
+ __pyx_t_3 = __Pyx_UnpackItem(__pyx_t_9, 1); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1577; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_3);
+ if (__Pyx_EndUnpack(__pyx_t_9, 2) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1577; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
+ __Pyx_DECREF(__pyx_v_result);
+ __pyx_v_result = __pyx_t_4;
+ __pyx_t_4 = 0;
+ __Pyx_DECREF(__pyx_v_changed);
+ __pyx_v_changed = __pyx_t_3;
+ __pyx_t_3 = 0;
+ }
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1578
+ * self.root_entries_pos = self.root_entries_pos + 1
+ * result, changed = self._process_entry(entry, self.root_dir_info)
+ * if changed is not None: # <<<<<<<<<<<<<<
+ * path_handled = -1
+ * if changed:
+ */
+ __pyx_t_7 = (__pyx_v_changed != Py_None);
+ if (__pyx_t_7) {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1579
+ * result, changed = self._process_entry(entry, self.root_dir_info)
+ * if changed is not None:
+ * path_handled = -1 # <<<<<<<<<<<<<<
+ * if changed:
+ * self._gather_result_for_consistency(result)
+ */
+ __pyx_v_path_handled = -1;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1580
+ * if changed is not None:
+ * path_handled = -1
+ * if changed: # <<<<<<<<<<<<<<
+ * self._gather_result_for_consistency(result)
+ * if changed or self.include_unchanged:
+ */
+ __pyx_t_7 = __Pyx_PyObject_IsTrue(__pyx_v_changed); if (unlikely(__pyx_t_7 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1580; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ if (__pyx_t_7) {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1581
+ * path_handled = -1
+ * if changed:
+ * self._gather_result_for_consistency(result) # <<<<<<<<<<<<<<
+ * if changed or self.include_unchanged:
+ * return result
+ */
+ __pyx_t_6 = ((struct __pyx_vtabstruct_6bzrlib_21_dirstate_helpers_pyx_ProcessEntryC *)__pyx_v_self->__pyx_vtab)->_gather_result_for_consistency(__pyx_v_self, __pyx_v_result); if (unlikely(__pyx_t_6 == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1581; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ goto __pyx_L36;
+ }
+ __pyx_L36:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1582
+ * if changed:
+ * self._gather_result_for_consistency(result)
+ * if changed or self.include_unchanged: # <<<<<<<<<<<<<<
+ * return result
+ * # handle unversioned specified paths:
+ */
+ __pyx_t_7 = __Pyx_PyObject_IsTrue(__pyx_v_changed); if (unlikely(__pyx_t_7 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1582; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ if (!__pyx_t_7) {
+ __pyx_t_1 = __pyx_v_self->include_unchanged;
+ } else {
+ __pyx_t_1 = __pyx_t_7;
+ }
+ if (__pyx_t_1) {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1583
+ * self._gather_result_for_consistency(result)
+ * if changed or self.include_unchanged:
+ * return result # <<<<<<<<<<<<<<
+ * # handle unversioned specified paths:
+ * if self.want_unversioned and not path_handled and self.root_dir_info:
+ */
+ __Pyx_XDECREF(__pyx_r);
+ __Pyx_INCREF(__pyx_v_result);
+ __pyx_r = __pyx_v_result;
+ goto __pyx_L0;
+ goto __pyx_L37;
+ }
+ __pyx_L37:;
+ goto __pyx_L35;
+ }
+ __pyx_L35:;
+ }
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1585
+ * return result
+ * # handle unversioned specified paths:
+ * if self.want_unversioned and not path_handled and self.root_dir_info: # <<<<<<<<<<<<<<
+ * new_executable = bool(
+ * stat.S_ISREG(self.root_dir_info[3].st_mode)
+ */
+ if (__pyx_v_self->want_unversioned) {
+ __pyx_t_1 = (!__pyx_v_path_handled);
+ if (__pyx_t_1) {
+ __pyx_t_7 = __Pyx_PyObject_IsTrue(__pyx_v_self->root_dir_info); if (unlikely(__pyx_t_7 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1585; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_11 = __pyx_t_7;
+ } else {
+ __pyx_t_11 = __pyx_t_1;
+ }
+ __pyx_t_1 = __pyx_t_11;
+ } else {
+ __pyx_t_1 = __pyx_v_self->want_unversioned;
+ }
+ if (__pyx_t_1) {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1587
+ * if self.want_unversioned and not path_handled and self.root_dir_info:
+ * new_executable = bool(
+ * stat.S_ISREG(self.root_dir_info[3].st_mode) # <<<<<<<<<<<<<<
+ * and stat.S_IEXEC & self.root_dir_info[3].st_mode)
+ * return (None,
+ */
+ __pyx_t_2 = __Pyx_GetName(__pyx_m, __pyx_n_s__stat); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1587; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_2);
+ __pyx_t_3 = PyObject_GetAttr(__pyx_t_2, __pyx_n_s__S_ISREG); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1587; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_3);
+ __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+ __pyx_t_2 = __Pyx_GetItemInt(__pyx_v_self->root_dir_info, 3, sizeof(long), PyInt_FromLong); if (!__pyx_t_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1587; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_2);
+ __pyx_t_4 = PyObject_GetAttr(__pyx_t_2, __pyx_n_s__st_mode); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1587; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_4);
+ __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+ __pyx_t_2 = PyTuple_New(1); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1587; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_2);
+ PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_t_4);
+ __Pyx_GIVEREF(__pyx_t_4);
+ __pyx_t_4 = 0;
+ __pyx_t_4 = PyObject_Call(__pyx_t_3, __pyx_t_2, NULL); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1587; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_4);
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+ __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+ __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_1 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1587; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
+ if (__pyx_t_1) {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1588
+ * new_executable = bool(
+ * stat.S_ISREG(self.root_dir_info[3].st_mode)
+ * and stat.S_IEXEC & self.root_dir_info[3].st_mode) # <<<<<<<<<<<<<<
+ * return (None,
+ * (None, self.current_root_unicode),
+ */
+ __pyx_t_4 = __Pyx_GetName(__pyx_m, __pyx_n_s__stat); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1588; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_4);
+ __pyx_t_2 = PyObject_GetAttr(__pyx_t_4, __pyx_n_s__S_IEXEC); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1588; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_2);
+ __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
+ __pyx_t_4 = __Pyx_GetItemInt(__pyx_v_self->root_dir_info, 3, sizeof(long), PyInt_FromLong); if (!__pyx_t_4) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1588; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_4);
+ __pyx_t_3 = PyObject_GetAttr(__pyx_t_4, __pyx_n_s__st_mode); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1588; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_3);
+ __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
+ __pyx_t_4 = PyNumber_And(__pyx_t_2, __pyx_t_3); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1588; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_4);
+ __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+ __pyx_t_11 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_11 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1588; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
+ __pyx_t_7 = __pyx_t_11;
+ } else {
+ __pyx_t_7 = __pyx_t_1;
+ }
+ __pyx_t_4 = __Pyx_PyBool_FromLong(__pyx_t_7); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1588; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_4);
+ __Pyx_DECREF(__pyx_v_new_executable);
+ __pyx_v_new_executable = __pyx_t_4;
+ __pyx_t_4 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1589
+ * stat.S_ISREG(self.root_dir_info[3].st_mode)
+ * and stat.S_IEXEC & self.root_dir_info[3].st_mode)
+ * return (None, # <<<<<<<<<<<<<<
+ * (None, self.current_root_unicode),
+ * True,
+ */
+ __Pyx_XDECREF(__pyx_r);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1590
+ * and stat.S_IEXEC & self.root_dir_info[3].st_mode)
+ * return (None,
+ * (None, self.current_root_unicode), # <<<<<<<<<<<<<<
+ * True,
+ * (False, False),
+ */
+ __pyx_t_4 = PyTuple_New(2); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1590; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_4);
+ __Pyx_INCREF(Py_None);
+ PyTuple_SET_ITEM(__pyx_t_4, 0, Py_None);
+ __Pyx_GIVEREF(Py_None);
+ __Pyx_INCREF(__pyx_v_self->current_root_unicode);
+ PyTuple_SET_ITEM(__pyx_t_4, 1, __pyx_v_self->current_root_unicode);
+ __Pyx_GIVEREF(__pyx_v_self->current_root_unicode);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1591
+ * return (None,
+ * (None, self.current_root_unicode),
+ * True, # <<<<<<<<<<<<<<
+ * (False, False),
+ * (None, None),
+ */
+ __pyx_t_3 = __Pyx_PyBool_FromLong(1); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1591; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_3);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1592
+ * (None, self.current_root_unicode),
+ * True,
+ * (False, False), # <<<<<<<<<<<<<<
+ * (None, None),
+ * (None, splitpath(self.current_root_unicode)[-1]),
+ */
+ __pyx_t_2 = __Pyx_PyBool_FromLong(0); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1592; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_2);
+ __pyx_t_9 = __Pyx_PyBool_FromLong(0); if (unlikely(!__pyx_t_9)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1592; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_9);
+ __pyx_t_10 = PyTuple_New(2); if (unlikely(!__pyx_t_10)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1592; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_10);
+ PyTuple_SET_ITEM(__pyx_t_10, 0, __pyx_t_2);
+ __Pyx_GIVEREF(__pyx_t_2);
+ PyTuple_SET_ITEM(__pyx_t_10, 1, __pyx_t_9);
+ __Pyx_GIVEREF(__pyx_t_9);
+ __pyx_t_2 = 0;
+ __pyx_t_9 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1593
+ * True,
+ * (False, False),
+ * (None, None), # <<<<<<<<<<<<<<
+ * (None, splitpath(self.current_root_unicode)[-1]),
+ * (None, self.root_dir_info[2]),
+ */
+ __pyx_t_9 = PyTuple_New(2); if (unlikely(!__pyx_t_9)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1593; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_9);
+ __Pyx_INCREF(Py_None);
+ PyTuple_SET_ITEM(__pyx_t_9, 0, Py_None);
+ __Pyx_GIVEREF(Py_None);
+ __Pyx_INCREF(Py_None);
+ PyTuple_SET_ITEM(__pyx_t_9, 1, Py_None);
+ __Pyx_GIVEREF(Py_None);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1594
+ * (False, False),
+ * (None, None),
+ * (None, splitpath(self.current_root_unicode)[-1]), # <<<<<<<<<<<<<<
+ * (None, self.root_dir_info[2]),
+ * (None, new_executable)
+ */
+ __pyx_t_2 = __Pyx_GetName(__pyx_m, __pyx_n_s__splitpath); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1594; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_2);
+ __pyx_t_5 = PyTuple_New(1); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1594; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_5);
+ __Pyx_INCREF(__pyx_v_self->current_root_unicode);
+ PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_v_self->current_root_unicode);
+ __Pyx_GIVEREF(__pyx_v_self->current_root_unicode);
+ __pyx_t_12 = PyObject_Call(__pyx_t_2, __pyx_t_5, NULL); if (unlikely(!__pyx_t_12)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1594; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_12);
+ __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+ __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
+ __pyx_t_5 = __Pyx_GetItemInt(__pyx_t_12, -1, sizeof(long), PyInt_FromLong); if (!__pyx_t_5) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1594; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_5);
+ __Pyx_DECREF(__pyx_t_12); __pyx_t_12 = 0;
+ __pyx_t_12 = PyTuple_New(2); if (unlikely(!__pyx_t_12)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1594; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_12);
+ __Pyx_INCREF(Py_None);
+ PyTuple_SET_ITEM(__pyx_t_12, 0, Py_None);
+ __Pyx_GIVEREF(Py_None);
+ PyTuple_SET_ITEM(__pyx_t_12, 1, __pyx_t_5);
+ __Pyx_GIVEREF(__pyx_t_5);
+ __pyx_t_5 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1595
+ * (None, None),
+ * (None, splitpath(self.current_root_unicode)[-1]),
+ * (None, self.root_dir_info[2]), # <<<<<<<<<<<<<<
+ * (None, new_executable)
+ * )
+ */
+ __pyx_t_5 = __Pyx_GetItemInt(__pyx_v_self->root_dir_info, 2, sizeof(long), PyInt_FromLong); if (!__pyx_t_5) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1595; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_5);
+ __pyx_t_2 = PyTuple_New(2); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1595; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_2);
+ __Pyx_INCREF(Py_None);
+ PyTuple_SET_ITEM(__pyx_t_2, 0, Py_None);
+ __Pyx_GIVEREF(Py_None);
+ PyTuple_SET_ITEM(__pyx_t_2, 1, __pyx_t_5);
+ __Pyx_GIVEREF(__pyx_t_5);
+ __pyx_t_5 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1596
+ * (None, splitpath(self.current_root_unicode)[-1]),
+ * (None, self.root_dir_info[2]),
+ * (None, new_executable) # <<<<<<<<<<<<<<
+ * )
+ * # If we reach here, the outer flow continues, which enters into the
+ */
+ __pyx_t_5 = PyTuple_New(2); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1596; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_5);
+ __Pyx_INCREF(Py_None);
+ PyTuple_SET_ITEM(__pyx_t_5, 0, Py_None);
+ __Pyx_GIVEREF(Py_None);
+ __Pyx_INCREF(__pyx_v_new_executable);
+ PyTuple_SET_ITEM(__pyx_t_5, 1, __pyx_v_new_executable);
+ __Pyx_GIVEREF(__pyx_v_new_executable);
+ __pyx_t_13 = PyTuple_New(8); if (unlikely(!__pyx_t_13)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1589; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_13);
+ __Pyx_INCREF(Py_None);
+ PyTuple_SET_ITEM(__pyx_t_13, 0, Py_None);
+ __Pyx_GIVEREF(Py_None);
+ PyTuple_SET_ITEM(__pyx_t_13, 1, __pyx_t_4);
+ __Pyx_GIVEREF(__pyx_t_4);
+ PyTuple_SET_ITEM(__pyx_t_13, 2, __pyx_t_3);
+ __Pyx_GIVEREF(__pyx_t_3);
+ PyTuple_SET_ITEM(__pyx_t_13, 3, __pyx_t_10);
+ __Pyx_GIVEREF(__pyx_t_10);
+ PyTuple_SET_ITEM(__pyx_t_13, 4, __pyx_t_9);
+ __Pyx_GIVEREF(__pyx_t_9);
+ PyTuple_SET_ITEM(__pyx_t_13, 5, __pyx_t_12);
+ __Pyx_GIVEREF(__pyx_t_12);
+ PyTuple_SET_ITEM(__pyx_t_13, 6, __pyx_t_2);
+ __Pyx_GIVEREF(__pyx_t_2);
+ PyTuple_SET_ITEM(__pyx_t_13, 7, __pyx_t_5);
+ __Pyx_GIVEREF(__pyx_t_5);
+ __pyx_t_4 = 0;
+ __pyx_t_3 = 0;
+ __pyx_t_10 = 0;
+ __pyx_t_9 = 0;
+ __pyx_t_12 = 0;
+ __pyx_t_2 = 0;
+ __pyx_t_5 = 0;
+ __pyx_r = __pyx_t_13;
+ __pyx_t_13 = 0;
+ goto __pyx_L0;
+ goto __pyx_L38;
+ }
+ __pyx_L38:;
+ goto __pyx_L8;
+ }
+ __pyx_L8:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1600
+ * # If we reach here, the outer flow continues, which enters into the
+ * # per-root setup logic.
+ * if (self.current_dir_info is None and self.current_block is None and not # <<<<<<<<<<<<<<
+ * self.doing_consistency_expansion):
+ * # setup iteration of this root:
+ */
+ __pyx_t_7 = (__pyx_v_self->current_dir_info == Py_None);
+ if (__pyx_t_7) {
+ __pyx_t_1 = (__pyx_v_self->current_block == Py_None);
+ if (__pyx_t_1) {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1601
+ * # per-root setup logic.
+ * if (self.current_dir_info is None and self.current_block is None and not
+ * self.doing_consistency_expansion): # <<<<<<<<<<<<<<
+ * # setup iteration of this root:
+ * self.current_dir_list = None
+ */
+ __pyx_t_11 = (!__pyx_v_self->doing_consistency_expansion);
+ __pyx_t_14 = __pyx_t_11;
+ } else {
+ __pyx_t_14 = __pyx_t_1;
+ }
+ __pyx_t_1 = __pyx_t_14;
+ } else {
+ __pyx_t_1 = __pyx_t_7;
+ }
+ if (__pyx_t_1) {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1603
+ * self.doing_consistency_expansion):
+ * # setup iteration of this root:
+ * self.current_dir_list = None # <<<<<<<<<<<<<<
+ * if self.root_dir_info and self.root_dir_info[2] == 'tree-reference':
+ * self.current_dir_info = None
+ */
+ __Pyx_INCREF(Py_None);
+ __Pyx_GIVEREF(Py_None);
+ __Pyx_GOTREF(__pyx_v_self->current_dir_list);
+ __Pyx_DECREF(__pyx_v_self->current_dir_list);
+ __pyx_v_self->current_dir_list = Py_None;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1604
+ * # setup iteration of this root:
+ * self.current_dir_list = None
+ * if self.root_dir_info and self.root_dir_info[2] == 'tree-reference': # <<<<<<<<<<<<<<
+ * self.current_dir_info = None
+ * else:
+ */
+ __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_v_self->root_dir_info); if (unlikely(__pyx_t_1 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1604; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ if (__pyx_t_1) {
+ __pyx_t_13 = __Pyx_GetItemInt(__pyx_v_self->root_dir_info, 2, sizeof(long), PyInt_FromLong); if (!__pyx_t_13) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1604; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_13);
+ __pyx_t_5 = PyObject_RichCompare(__pyx_t_13, ((PyObject *)__pyx_kp_s_33), Py_EQ); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1604; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_5);
+ __Pyx_DECREF(__pyx_t_13); __pyx_t_13 = 0;
+ __pyx_t_7 = __Pyx_PyObject_IsTrue(__pyx_t_5); if (unlikely(__pyx_t_7 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1604; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
+ __pyx_t_14 = __pyx_t_7;
+ } else {
+ __pyx_t_14 = __pyx_t_1;
+ }
+ if (__pyx_t_14) {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1605
+ * self.current_dir_list = None
+ * if self.root_dir_info and self.root_dir_info[2] == 'tree-reference':
+ * self.current_dir_info = None # <<<<<<<<<<<<<<
+ * else:
+ * self.dir_iterator = osutils._walkdirs_utf8(self.root_abspath,
+ */
+ __Pyx_INCREF(Py_None);
+ __Pyx_GIVEREF(Py_None);
+ __Pyx_GOTREF(__pyx_v_self->current_dir_info);
+ __Pyx_DECREF(__pyx_v_self->current_dir_info);
+ __pyx_v_self->current_dir_info = Py_None;
+ goto __pyx_L40;
+ }
+ /*else*/ {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1607
+ * self.current_dir_info = None
+ * else:
+ * self.dir_iterator = osutils._walkdirs_utf8(self.root_abspath, # <<<<<<<<<<<<<<
+ * prefix=self.current_root)
+ * self.path_index = 0
+ */
+ __pyx_t_5 = __Pyx_GetName(__pyx_m, __pyx_n_s__osutils); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1607; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_5);
+ __pyx_t_13 = PyObject_GetAttr(__pyx_t_5, __pyx_n_s___walkdirs_utf8); if (unlikely(!__pyx_t_13)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1607; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_13);
+ __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
+ __pyx_t_5 = PyTuple_New(1); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1607; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_5);
+ __Pyx_INCREF(__pyx_v_self->root_abspath);
+ PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_v_self->root_abspath);
+ __Pyx_GIVEREF(__pyx_v_self->root_abspath);
+ __pyx_t_2 = PyDict_New(); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1607; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(((PyObject *)__pyx_t_2));
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1608
+ * else:
+ * self.dir_iterator = osutils._walkdirs_utf8(self.root_abspath,
+ * prefix=self.current_root) # <<<<<<<<<<<<<<
+ * self.path_index = 0
+ * try:
+ */
+ if (PyDict_SetItem(__pyx_t_2, ((PyObject *)__pyx_n_s__prefix), __pyx_v_self->current_root) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1607; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_12 = PyEval_CallObjectWithKeywords(__pyx_t_13, __pyx_t_5, ((PyObject *)__pyx_t_2)); if (unlikely(!__pyx_t_12)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1607; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_12);
+ __Pyx_DECREF(__pyx_t_13); __pyx_t_13 = 0;
+ __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
+ __Pyx_DECREF(((PyObject *)__pyx_t_2)); __pyx_t_2 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1607
+ * self.current_dir_info = None
+ * else:
+ * self.dir_iterator = osutils._walkdirs_utf8(self.root_abspath, # <<<<<<<<<<<<<<
+ * prefix=self.current_root)
+ * self.path_index = 0
+ */
+ __Pyx_GIVEREF(__pyx_t_12);
+ __Pyx_GOTREF(__pyx_v_self->dir_iterator);
+ __Pyx_DECREF(__pyx_v_self->dir_iterator);
+ __pyx_v_self->dir_iterator = __pyx_t_12;
+ __pyx_t_12 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1609
+ * self.dir_iterator = osutils._walkdirs_utf8(self.root_abspath,
+ * prefix=self.current_root)
+ * self.path_index = 0 # <<<<<<<<<<<<<<
+ * try:
+ * self.current_dir_info = self.dir_iterator.next()
+ */
+ __pyx_v_self->path_index = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1610
+ * prefix=self.current_root)
+ * self.path_index = 0
+ * try: # <<<<<<<<<<<<<<
+ * self.current_dir_info = self.dir_iterator.next()
+ * self.current_dir_list = self.current_dir_info[1]
+ */
+ {
+ PyObject *__pyx_save_exc_type, *__pyx_save_exc_value, *__pyx_save_exc_tb;
+ __Pyx_ExceptionSave(&__pyx_save_exc_type, &__pyx_save_exc_value, &__pyx_save_exc_tb);
+ __Pyx_XGOTREF(__pyx_save_exc_type);
+ __Pyx_XGOTREF(__pyx_save_exc_value);
+ __Pyx_XGOTREF(__pyx_save_exc_tb);
+ /*try:*/ {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1611
+ * self.path_index = 0
+ * try:
+ * self.current_dir_info = self.dir_iterator.next() # <<<<<<<<<<<<<<
+ * self.current_dir_list = self.current_dir_info[1]
+ * except OSError, e:
+ */
+ __pyx_t_12 = PyObject_GetAttr(__pyx_v_self->dir_iterator, __pyx_n_s__next); if (unlikely(!__pyx_t_12)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1611; __pyx_clineno = __LINE__; goto __pyx_L41_error;}
+ __Pyx_GOTREF(__pyx_t_12);
+ __pyx_t_2 = PyObject_Call(__pyx_t_12, ((PyObject *)__pyx_empty_tuple), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1611; __pyx_clineno = __LINE__; goto __pyx_L41_error;}
+ __Pyx_GOTREF(__pyx_t_2);
+ __Pyx_DECREF(__pyx_t_12); __pyx_t_12 = 0;
+ __Pyx_GIVEREF(__pyx_t_2);
+ __Pyx_GOTREF(__pyx_v_self->current_dir_info);
+ __Pyx_DECREF(__pyx_v_self->current_dir_info);
+ __pyx_v_self->current_dir_info = __pyx_t_2;
+ __pyx_t_2 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1612
+ * try:
+ * self.current_dir_info = self.dir_iterator.next()
+ * self.current_dir_list = self.current_dir_info[1] # <<<<<<<<<<<<<<
+ * except OSError, e:
+ * # there may be directories in the inventory even though
+ */
+ __pyx_t_2 = __Pyx_GetItemInt(__pyx_v_self->current_dir_info, 1, sizeof(long), PyInt_FromLong); if (!__pyx_t_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1612; __pyx_clineno = __LINE__; goto __pyx_L41_error;}
+ __Pyx_GOTREF(__pyx_t_2);
+ __Pyx_GIVEREF(__pyx_t_2);
+ __Pyx_GOTREF(__pyx_v_self->current_dir_list);
+ __Pyx_DECREF(__pyx_v_self->current_dir_list);
+ __pyx_v_self->current_dir_list = __pyx_t_2;
+ __pyx_t_2 = 0;
+ }
+ /*else:*/ {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1636
+ * raise
+ * else:
+ * if self.current_dir_info[0][0] == '': # <<<<<<<<<<<<<<
+ * # remove .bzr from iteration
+ * bzr_index = self.bisect_left(self.current_dir_list, ('.bzr',))
+ */
+ __pyx_t_2 = __Pyx_GetItemInt(__pyx_v_self->current_dir_info, 0, sizeof(long), PyInt_FromLong); if (!__pyx_t_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1636; __pyx_clineno = __LINE__; goto __pyx_L43_except_error;}
+ __Pyx_GOTREF(__pyx_t_2);
+ __pyx_t_12 = __Pyx_GetItemInt(__pyx_t_2, 0, sizeof(long), PyInt_FromLong); if (!__pyx_t_12) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1636; __pyx_clineno = __LINE__; goto __pyx_L43_except_error;}
+ __Pyx_GOTREF(__pyx_t_12);
+ __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+ __pyx_t_2 = PyObject_RichCompare(__pyx_t_12, ((PyObject *)__pyx_kp_s_5), Py_EQ); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1636; __pyx_clineno = __LINE__; goto __pyx_L43_except_error;}
+ __Pyx_GOTREF(__pyx_t_2);
+ __Pyx_DECREF(__pyx_t_12); __pyx_t_12 = 0;
+ __pyx_t_14 = __Pyx_PyObject_IsTrue(__pyx_t_2); if (unlikely(__pyx_t_14 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1636; __pyx_clineno = __LINE__; goto __pyx_L43_except_error;}
+ __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+ if (__pyx_t_14) {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1638
+ * if self.current_dir_info[0][0] == '':
+ * # remove .bzr from iteration
+ * bzr_index = self.bisect_left(self.current_dir_list, ('.bzr',)) # <<<<<<<<<<<<<<
+ * if self.current_dir_list[bzr_index][0] != '.bzr':
+ * raise AssertionError()
+ */
+ __pyx_t_2 = PyTuple_New(1); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1638; __pyx_clineno = __LINE__; goto __pyx_L43_except_error;}
+ __Pyx_GOTREF(__pyx_t_2);
+ __Pyx_INCREF(((PyObject *)__pyx_kp_s_40));
+ PyTuple_SET_ITEM(__pyx_t_2, 0, ((PyObject *)__pyx_kp_s_40));
+ __Pyx_GIVEREF(((PyObject *)__pyx_kp_s_40));
+ __pyx_t_12 = PyTuple_New(2); if (unlikely(!__pyx_t_12)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1638; __pyx_clineno = __LINE__; goto __pyx_L43_except_error;}
+ __Pyx_GOTREF(__pyx_t_12);
+ __Pyx_INCREF(__pyx_v_self->current_dir_list);
+ PyTuple_SET_ITEM(__pyx_t_12, 0, __pyx_v_self->current_dir_list);
+ __Pyx_GIVEREF(__pyx_v_self->current_dir_list);
+ PyTuple_SET_ITEM(__pyx_t_12, 1, __pyx_t_2);
+ __Pyx_GIVEREF(__pyx_t_2);
+ __pyx_t_2 = 0;
+ __pyx_t_2 = PyObject_Call(__pyx_v_self->bisect_left, __pyx_t_12, NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1638; __pyx_clineno = __LINE__; goto __pyx_L43_except_error;}
+ __Pyx_GOTREF(__pyx_t_2);
+ __Pyx_DECREF(__pyx_t_12); __pyx_t_12 = 0;
+ __Pyx_DECREF(__pyx_v_bzr_index);
+ __pyx_v_bzr_index = __pyx_t_2;
+ __pyx_t_2 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1639
+ * # remove .bzr from iteration
+ * bzr_index = self.bisect_left(self.current_dir_list, ('.bzr',))
+ * if self.current_dir_list[bzr_index][0] != '.bzr': # <<<<<<<<<<<<<<
+ * raise AssertionError()
+ * del self.current_dir_list[bzr_index]
+ */
+ __pyx_t_2 = PyObject_GetItem(__pyx_v_self->current_dir_list, __pyx_v_bzr_index); if (!__pyx_t_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1639; __pyx_clineno = __LINE__; goto __pyx_L43_except_error;}
+ __Pyx_GOTREF(__pyx_t_2);
+ __pyx_t_12 = __Pyx_GetItemInt(__pyx_t_2, 0, sizeof(long), PyInt_FromLong); if (!__pyx_t_12) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1639; __pyx_clineno = __LINE__; goto __pyx_L43_except_error;}
+ __Pyx_GOTREF(__pyx_t_12);
+ __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+ __pyx_t_2 = PyObject_RichCompare(__pyx_t_12, ((PyObject *)__pyx_kp_s_40), Py_NE); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1639; __pyx_clineno = __LINE__; goto __pyx_L43_except_error;}
+ __Pyx_GOTREF(__pyx_t_2);
+ __Pyx_DECREF(__pyx_t_12); __pyx_t_12 = 0;
+ __pyx_t_14 = __Pyx_PyObject_IsTrue(__pyx_t_2); if (unlikely(__pyx_t_14 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1639; __pyx_clineno = __LINE__; goto __pyx_L43_except_error;}
+ __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+ if (__pyx_t_14) {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1640
+ * bzr_index = self.bisect_left(self.current_dir_list, ('.bzr',))
+ * if self.current_dir_list[bzr_index][0] != '.bzr':
+ * raise AssertionError() # <<<<<<<<<<<<<<
+ * del self.current_dir_list[bzr_index]
+ * initial_key = (self.current_root, '', '')
+ */
+ __pyx_t_2 = PyObject_Call(__pyx_builtin_AssertionError, ((PyObject *)__pyx_empty_tuple), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1640; __pyx_clineno = __LINE__; goto __pyx_L43_except_error;}
+ __Pyx_GOTREF(__pyx_t_2);
+ __Pyx_Raise(__pyx_t_2, 0, 0);
+ __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1640; __pyx_clineno = __LINE__; goto __pyx_L43_except_error;}
+ goto __pyx_L50;
+ }
+ __pyx_L50:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1641
+ * if self.current_dir_list[bzr_index][0] != '.bzr':
+ * raise AssertionError()
+ * del self.current_dir_list[bzr_index] # <<<<<<<<<<<<<<
+ * initial_key = (self.current_root, '', '')
+ * self.block_index, _ = self.state._find_block_index_from_key(initial_key)
+ */
+ if (PyObject_DelItem(__pyx_v_self->current_dir_list, __pyx_v_bzr_index) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1641; __pyx_clineno = __LINE__; goto __pyx_L43_except_error;}
+ goto __pyx_L49;
+ }
+ __pyx_L49:;
+ }
+ __Pyx_XDECREF(__pyx_save_exc_type); __pyx_save_exc_type = 0;
+ __Pyx_XDECREF(__pyx_save_exc_value); __pyx_save_exc_value = 0;
+ __Pyx_XDECREF(__pyx_save_exc_tb); __pyx_save_exc_tb = 0;
+ goto __pyx_L48_try_end;
+ __pyx_L41_error:;
+ __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0;
+ __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
+ __Pyx_XDECREF(__pyx_t_10); __pyx_t_10 = 0;
+ __Pyx_XDECREF(__pyx_t_9); __pyx_t_9 = 0;
+ __Pyx_XDECREF(__pyx_t_13); __pyx_t_13 = 0;
+ __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0;
+ __Pyx_XDECREF(__pyx_t_12); __pyx_t_12 = 0;
+ __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1613
+ * self.current_dir_info = self.dir_iterator.next()
+ * self.current_dir_list = self.current_dir_info[1]
+ * except OSError, e: # <<<<<<<<<<<<<<
+ * # there may be directories in the inventory even though
+ * # this path is not a file on disk: so mark it as end of
+ */
+ __pyx_t_6 = PyErr_ExceptionMatches(__pyx_builtin_OSError);
+ if (__pyx_t_6) {
+ __Pyx_AddTraceback("bzrlib._dirstate_helpers_pyx.ProcessEntryC._iter_next");
+ if (__Pyx_GetException(&__pyx_t_2, &__pyx_t_12, &__pyx_t_5) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1613; __pyx_clineno = __LINE__; goto __pyx_L43_except_error;}
+ __Pyx_GOTREF(__pyx_t_2);
+ __Pyx_GOTREF(__pyx_t_12);
+ __Pyx_GOTREF(__pyx_t_5);
+ __Pyx_INCREF(__pyx_t_12);
+ __Pyx_DECREF(__pyx_v_e);
+ __pyx_v_e = __pyx_t_12;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1617
+ * # this path is not a file on disk: so mark it as end of
+ * # iterator
+ * if e.errno in (errno.ENOENT, errno.ENOTDIR, errno.EINVAL): # <<<<<<<<<<<<<<
+ * self.current_dir_info = None
+ * elif sys.platform == 'win32':
+ */
+ __pyx_t_13 = PyObject_GetAttr(__pyx_v_e, __pyx_n_s__errno); if (unlikely(!__pyx_t_13)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1617; __pyx_clineno = __LINE__; goto __pyx_L43_except_error;}
+ __Pyx_GOTREF(__pyx_t_13);
+ __pyx_t_9 = __Pyx_GetName(__pyx_m, __pyx_n_s__errno); if (unlikely(!__pyx_t_9)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1617; __pyx_clineno = __LINE__; goto __pyx_L43_except_error;}
+ __Pyx_GOTREF(__pyx_t_9);
+ __pyx_t_10 = PyObject_GetAttr(__pyx_t_9, __pyx_n_s__ENOENT); if (unlikely(!__pyx_t_10)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1617; __pyx_clineno = __LINE__; goto __pyx_L43_except_error;}
+ __Pyx_GOTREF(__pyx_t_10);
+ __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
+ __pyx_t_9 = PyObject_RichCompare(__pyx_t_13, __pyx_t_10, Py_EQ); if (unlikely(!__pyx_t_9)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1617; __pyx_clineno = __LINE__; goto __pyx_L43_except_error;}
+ __Pyx_GOTREF(__pyx_t_9);
+ __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;
+ __pyx_t_14 = __Pyx_PyObject_IsTrue(__pyx_t_9); if (unlikely((__pyx_t_14 == (int)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1617; __pyx_clineno = __LINE__; goto __pyx_L43_except_error;}
+ __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
+ __pyx_t_1 = __pyx_t_14;
+ if (!__pyx_t_1) {
+ __pyx_t_9 = __Pyx_GetName(__pyx_m, __pyx_n_s__errno); if (unlikely(!__pyx_t_9)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1617; __pyx_clineno = __LINE__; goto __pyx_L43_except_error;}
+ __Pyx_GOTREF(__pyx_t_9);
+ __pyx_t_10 = PyObject_GetAttr(__pyx_t_9, __pyx_n_s__ENOTDIR); if (unlikely(!__pyx_t_10)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1617; __pyx_clineno = __LINE__; goto __pyx_L43_except_error;}
+ __Pyx_GOTREF(__pyx_t_10);
+ __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
+ __pyx_t_9 = PyObject_RichCompare(__pyx_t_13, __pyx_t_10, Py_EQ); if (unlikely(!__pyx_t_9)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1617; __pyx_clineno = __LINE__; goto __pyx_L43_except_error;}
+ __Pyx_GOTREF(__pyx_t_9);
+ __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;
+ __pyx_t_14 = __Pyx_PyObject_IsTrue(__pyx_t_9); if (unlikely((__pyx_t_14 == (int)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1617; __pyx_clineno = __LINE__; goto __pyx_L43_except_error;}
+ __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
+ __pyx_t_7 = __pyx_t_14;
+ __pyx_t_14 = __pyx_t_7;
+ } else {
+ __pyx_t_14 = __pyx_t_1;
+ }
+ if (!__pyx_t_14) {
+ __pyx_t_9 = __Pyx_GetName(__pyx_m, __pyx_n_s__errno); if (unlikely(!__pyx_t_9)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1617; __pyx_clineno = __LINE__; goto __pyx_L43_except_error;}
+ __Pyx_GOTREF(__pyx_t_9);
+ __pyx_t_10 = PyObject_GetAttr(__pyx_t_9, __pyx_n_s__EINVAL); if (unlikely(!__pyx_t_10)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1617; __pyx_clineno = __LINE__; goto __pyx_L43_except_error;}
+ __Pyx_GOTREF(__pyx_t_10);
+ __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
+ __pyx_t_9 = PyObject_RichCompare(__pyx_t_13, __pyx_t_10, Py_EQ); if (unlikely(!__pyx_t_9)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1617; __pyx_clineno = __LINE__; goto __pyx_L43_except_error;}
+ __Pyx_GOTREF(__pyx_t_9);
+ __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;
+ __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_t_9); if (unlikely((__pyx_t_1 == (int)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1617; __pyx_clineno = __LINE__; goto __pyx_L43_except_error;}
+ __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
+ __pyx_t_7 = __pyx_t_1;
+ __pyx_t_1 = __pyx_t_7;
+ } else {
+ __pyx_t_1 = __pyx_t_14;
+ }
+ __Pyx_DECREF(__pyx_t_13); __pyx_t_13 = 0;
+ __pyx_t_14 = __pyx_t_1;
+ if (__pyx_t_14) {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1618
+ * # iterator
+ * if e.errno in (errno.ENOENT, errno.ENOTDIR, errno.EINVAL):
+ * self.current_dir_info = None # <<<<<<<<<<<<<<
+ * elif sys.platform == 'win32':
+ * # on win32, python2.4 has e.errno == ERROR_DIRECTORY, but
+ */
+ __Pyx_INCREF(Py_None);
+ __Pyx_GIVEREF(Py_None);
+ __Pyx_GOTREF(__pyx_v_self->current_dir_info);
+ __Pyx_DECREF(__pyx_v_self->current_dir_info);
+ __pyx_v_self->current_dir_info = Py_None;
+ goto __pyx_L53;
+ }
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1619
+ * if e.errno in (errno.ENOENT, errno.ENOTDIR, errno.EINVAL):
+ * self.current_dir_info = None
+ * elif sys.platform == 'win32': # <<<<<<<<<<<<<<
+ * # on win32, python2.4 has e.errno == ERROR_DIRECTORY, but
+ * # python 2.5 has e.errno == EINVAL,
+ */
+ __pyx_t_13 = __Pyx_GetName(__pyx_m, __pyx_n_s__sys); if (unlikely(!__pyx_t_13)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1619; __pyx_clineno = __LINE__; goto __pyx_L43_except_error;}
+ __Pyx_GOTREF(__pyx_t_13);
+ __pyx_t_9 = PyObject_GetAttr(__pyx_t_13, __pyx_n_s__platform); if (unlikely(!__pyx_t_9)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1619; __pyx_clineno = __LINE__; goto __pyx_L43_except_error;}
+ __Pyx_GOTREF(__pyx_t_9);
+ __Pyx_DECREF(__pyx_t_13); __pyx_t_13 = 0;
+ __pyx_t_13 = PyObject_RichCompare(__pyx_t_9, ((PyObject *)__pyx_n_s__win32), Py_EQ); if (unlikely(!__pyx_t_13)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1619; __pyx_clineno = __LINE__; goto __pyx_L43_except_error;}
+ __Pyx_GOTREF(__pyx_t_13);
+ __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
+ __pyx_t_14 = __Pyx_PyObject_IsTrue(__pyx_t_13); if (unlikely(__pyx_t_14 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1619; __pyx_clineno = __LINE__; goto __pyx_L43_except_error;}
+ __Pyx_DECREF(__pyx_t_13); __pyx_t_13 = 0;
+ if (__pyx_t_14) {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1623
+ * # python 2.5 has e.errno == EINVAL,
+ * # and e.winerror == ERROR_DIRECTORY
+ * try: # <<<<<<<<<<<<<<
+ * e_winerror = e.winerror
+ * except AttributeError, _:
+ */
+ {
+ PyObject *__pyx_save_exc_type, *__pyx_save_exc_value, *__pyx_save_exc_tb;
+ __Pyx_ExceptionSave(&__pyx_save_exc_type, &__pyx_save_exc_value, &__pyx_save_exc_tb);
+ __Pyx_XGOTREF(__pyx_save_exc_type);
+ __Pyx_XGOTREF(__pyx_save_exc_value);
+ __Pyx_XGOTREF(__pyx_save_exc_tb);
+ /*try:*/ {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1624
+ * # and e.winerror == ERROR_DIRECTORY
+ * try:
+ * e_winerror = e.winerror # <<<<<<<<<<<<<<
+ * except AttributeError, _:
+ * e_winerror = None
+ */
+ __pyx_t_13 = PyObject_GetAttr(__pyx_v_e, __pyx_n_s__winerror); if (unlikely(!__pyx_t_13)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1624; __pyx_clineno = __LINE__; goto __pyx_L54_error;}
+ __Pyx_GOTREF(__pyx_t_13);
+ __Pyx_DECREF(__pyx_v_e_winerror);
+ __pyx_v_e_winerror = __pyx_t_13;
+ __pyx_t_13 = 0;
+ }
+ __Pyx_XDECREF(__pyx_save_exc_type); __pyx_save_exc_type = 0;
+ __Pyx_XDECREF(__pyx_save_exc_value); __pyx_save_exc_value = 0;
+ __Pyx_XDECREF(__pyx_save_exc_tb); __pyx_save_exc_tb = 0;
+ goto __pyx_L61_try_end;
+ __pyx_L54_error:;
+ __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0;
+ __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
+ __Pyx_XDECREF(__pyx_t_10); __pyx_t_10 = 0;
+ __Pyx_XDECREF(__pyx_t_9); __pyx_t_9 = 0;
+ __Pyx_XDECREF(__pyx_t_13); __pyx_t_13 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1625
+ * try:
+ * e_winerror = e.winerror
+ * except AttributeError, _: # <<<<<<<<<<<<<<
+ * e_winerror = None
+ * win_errors = (ERROR_DIRECTORY, ERROR_PATH_NOT_FOUND)
+ */
+ __pyx_t_6 = PyErr_ExceptionMatches(__pyx_builtin_AttributeError);
+ if (__pyx_t_6) {
+ __Pyx_AddTraceback("bzrlib._dirstate_helpers_pyx.ProcessEntryC._iter_next");
+ if (__Pyx_GetException(&__pyx_t_13, &__pyx_t_9, &__pyx_t_10) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1625; __pyx_clineno = __LINE__; goto __pyx_L56_except_error;}
+ __Pyx_GOTREF(__pyx_t_13);
+ __Pyx_GOTREF(__pyx_t_9);
+ __Pyx_GOTREF(__pyx_t_10);
+ __Pyx_INCREF(__pyx_t_9);
+ __Pyx_DECREF(__pyx_v__);
+ __pyx_v__ = __pyx_t_9;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1626
+ * e_winerror = e.winerror
+ * except AttributeError, _:
+ * e_winerror = None # <<<<<<<<<<<<<<
+ * win_errors = (ERROR_DIRECTORY, ERROR_PATH_NOT_FOUND)
+ * if (e.errno in win_errors or e_winerror in win_errors):
+ */
+ __Pyx_INCREF(Py_None);
+ __Pyx_DECREF(__pyx_v_e_winerror);
+ __pyx_v_e_winerror = Py_None;
+ __Pyx_DECREF(__pyx_t_13); __pyx_t_13 = 0;
+ __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
+ __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;
+ goto __pyx_L55_exception_handled;
+ }
+ __pyx_L56_except_error:;
+ __Pyx_XGIVEREF(__pyx_save_exc_type);
+ __Pyx_XGIVEREF(__pyx_save_exc_value);
+ __Pyx_XGIVEREF(__pyx_save_exc_tb);
+ __Pyx_ExceptionReset(__pyx_save_exc_type, __pyx_save_exc_value, __pyx_save_exc_tb);
+ goto __pyx_L43_except_error;
+ __pyx_L55_exception_handled:;
+ __Pyx_XGIVEREF(__pyx_save_exc_type);
+ __Pyx_XGIVEREF(__pyx_save_exc_value);
+ __Pyx_XGIVEREF(__pyx_save_exc_tb);
+ __Pyx_ExceptionReset(__pyx_save_exc_type, __pyx_save_exc_value, __pyx_save_exc_tb);
+ __pyx_L61_try_end:;
+ }
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1627
+ * except AttributeError, _:
+ * e_winerror = None
+ * win_errors = (ERROR_DIRECTORY, ERROR_PATH_NOT_FOUND) # <<<<<<<<<<<<<<
+ * if (e.errno in win_errors or e_winerror in win_errors):
+ * self.current_dir_info = None
+ */
+ __pyx_t_10 = PyInt_FromLong(__pyx_v_6bzrlib_21_dirstate_helpers_pyx_ERROR_DIRECTORY); if (unlikely(!__pyx_t_10)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1627; __pyx_clineno = __LINE__; goto __pyx_L43_except_error;}
+ __Pyx_GOTREF(__pyx_t_10);
+ __pyx_t_9 = PyInt_FromLong(__pyx_v_6bzrlib_21_dirstate_helpers_pyx_ERROR_PATH_NOT_FOUND); if (unlikely(!__pyx_t_9)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1627; __pyx_clineno = __LINE__; goto __pyx_L43_except_error;}
+ __Pyx_GOTREF(__pyx_t_9);
+ __pyx_t_13 = PyTuple_New(2); if (unlikely(!__pyx_t_13)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1627; __pyx_clineno = __LINE__; goto __pyx_L43_except_error;}
+ __Pyx_GOTREF(__pyx_t_13);
+ PyTuple_SET_ITEM(__pyx_t_13, 0, __pyx_t_10);
+ __Pyx_GIVEREF(__pyx_t_10);
+ PyTuple_SET_ITEM(__pyx_t_13, 1, __pyx_t_9);
+ __Pyx_GIVEREF(__pyx_t_9);
+ __pyx_t_10 = 0;
+ __pyx_t_9 = 0;
+ if (!(likely(PyTuple_CheckExact(__pyx_t_13))||(PyErr_Format(PyExc_TypeError, "Expected tuple, got %.200s", Py_TYPE(__pyx_t_13)->tp_name), 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1627; __pyx_clineno = __LINE__; goto __pyx_L43_except_error;}
+ __Pyx_DECREF(((PyObject *)__pyx_v_win_errors));
+ __pyx_v_win_errors = ((PyObject *)__pyx_t_13);
+ __pyx_t_13 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1628
+ * e_winerror = None
+ * win_errors = (ERROR_DIRECTORY, ERROR_PATH_NOT_FOUND)
+ * if (e.errno in win_errors or e_winerror in win_errors): # <<<<<<<<<<<<<<
+ * self.current_dir_info = None
+ * else:
+ */
+ __pyx_t_13 = PyObject_GetAttr(__pyx_v_e, __pyx_n_s__errno); if (unlikely(!__pyx_t_13)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1628; __pyx_clineno = __LINE__; goto __pyx_L43_except_error;}
+ __Pyx_GOTREF(__pyx_t_13);
+ __pyx_t_14 = ((PySequence_Contains(((PyObject *)__pyx_v_win_errors), __pyx_t_13))); if (unlikely(__pyx_t_14 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1628; __pyx_clineno = __LINE__; goto __pyx_L43_except_error;}
+ __Pyx_DECREF(__pyx_t_13); __pyx_t_13 = 0;
+ if (!__pyx_t_14) {
+ __pyx_t_1 = ((PySequence_Contains(((PyObject *)__pyx_v_win_errors), __pyx_v_e_winerror))); if (unlikely(__pyx_t_1 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1628; __pyx_clineno = __LINE__; goto __pyx_L43_except_error;}
+ __pyx_t_7 = __pyx_t_1;
+ } else {
+ __pyx_t_7 = __pyx_t_14;
+ }
+ if (__pyx_t_7) {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1629
+ * win_errors = (ERROR_DIRECTORY, ERROR_PATH_NOT_FOUND)
+ * if (e.errno in win_errors or e_winerror in win_errors):
+ * self.current_dir_info = None # <<<<<<<<<<<<<<
+ * else:
+ * # Will this really raise the right exception ?
+ */
+ __Pyx_INCREF(Py_None);
+ __Pyx_GIVEREF(Py_None);
+ __Pyx_GOTREF(__pyx_v_self->current_dir_info);
+ __Pyx_DECREF(__pyx_v_self->current_dir_info);
+ __pyx_v_self->current_dir_info = Py_None;
+ goto __pyx_L64;
+ }
+ /*else*/ {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1632
+ * else:
+ * # Will this really raise the right exception ?
+ * raise # <<<<<<<<<<<<<<
+ * else:
+ * raise
+ */
+ __Pyx_GIVEREF(__pyx_t_2);
+ __Pyx_GIVEREF(__pyx_t_12);
+ __Pyx_GIVEREF(__pyx_t_5);
+ __Pyx_ErrRestore(__pyx_t_2, __pyx_t_12, __pyx_t_5);
+ __pyx_t_2 = 0; __pyx_t_12 = 0; __pyx_t_5 = 0;
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1632; __pyx_clineno = __LINE__; goto __pyx_L43_except_error;}
+ }
+ __pyx_L64:;
+ goto __pyx_L53;
+ }
+ /*else*/ {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1634
+ * raise
+ * else:
+ * raise # <<<<<<<<<<<<<<
+ * else:
+ * if self.current_dir_info[0][0] == '':
+ */
+ __Pyx_GIVEREF(__pyx_t_2);
+ __Pyx_GIVEREF(__pyx_t_12);
+ __Pyx_GIVEREF(__pyx_t_5);
+ __Pyx_ErrRestore(__pyx_t_2, __pyx_t_12, __pyx_t_5);
+ __pyx_t_2 = 0; __pyx_t_12 = 0; __pyx_t_5 = 0;
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1634; __pyx_clineno = __LINE__; goto __pyx_L43_except_error;}
+ }
+ __pyx_L53:;
+ __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+ __Pyx_DECREF(__pyx_t_12); __pyx_t_12 = 0;
+ __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
+ goto __pyx_L42_exception_handled;
+ }
+ __pyx_L43_except_error:;
+ __Pyx_XGIVEREF(__pyx_save_exc_type);
+ __Pyx_XGIVEREF(__pyx_save_exc_value);
+ __Pyx_XGIVEREF(__pyx_save_exc_tb);
+ __Pyx_ExceptionReset(__pyx_save_exc_type, __pyx_save_exc_value, __pyx_save_exc_tb);
+ goto __pyx_L1_error;
+ __pyx_L42_exception_handled:;
+ __Pyx_XGIVEREF(__pyx_save_exc_type);
+ __Pyx_XGIVEREF(__pyx_save_exc_value);
+ __Pyx_XGIVEREF(__pyx_save_exc_tb);
+ __Pyx_ExceptionReset(__pyx_save_exc_type, __pyx_save_exc_value, __pyx_save_exc_tb);
+ __pyx_L48_try_end:;
+ }
+ }
+ __pyx_L40:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1642
+ * raise AssertionError()
+ * del self.current_dir_list[bzr_index]
+ * initial_key = (self.current_root, '', '') # <<<<<<<<<<<<<<
+ * self.block_index, _ = self.state._find_block_index_from_key(initial_key)
+ * if self.block_index == 0:
+ */
+ __pyx_t_5 = PyTuple_New(3); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1642; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_5);
+ __Pyx_INCREF(__pyx_v_self->current_root);
+ PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_v_self->current_root);
+ __Pyx_GIVEREF(__pyx_v_self->current_root);
+ __Pyx_INCREF(((PyObject *)__pyx_kp_s_5));
+ PyTuple_SET_ITEM(__pyx_t_5, 1, ((PyObject *)__pyx_kp_s_5));
+ __Pyx_GIVEREF(((PyObject *)__pyx_kp_s_5));
+ __Pyx_INCREF(((PyObject *)__pyx_kp_s_5));
+ PyTuple_SET_ITEM(__pyx_t_5, 2, ((PyObject *)__pyx_kp_s_5));
+ __Pyx_GIVEREF(((PyObject *)__pyx_kp_s_5));
+ if (!(likely(PyTuple_CheckExact(__pyx_t_5))||(PyErr_Format(PyExc_TypeError, "Expected tuple, got %.200s", Py_TYPE(__pyx_t_5)->tp_name), 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1642; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_DECREF(((PyObject *)__pyx_v_initial_key));
+ __pyx_v_initial_key = ((PyObject *)__pyx_t_5);
+ __pyx_t_5 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1643
+ * del self.current_dir_list[bzr_index]
+ * initial_key = (self.current_root, '', '')
+ * self.block_index, _ = self.state._find_block_index_from_key(initial_key) # <<<<<<<<<<<<<<
+ * if self.block_index == 0:
+ * # we have processed the total root already, but because the
+ */
+ __pyx_t_5 = PyObject_GetAttr(__pyx_v_self->state, __pyx_n_s_41); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1643; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_5);
+ __pyx_t_12 = PyTuple_New(1); if (unlikely(!__pyx_t_12)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1643; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_12);
+ __Pyx_INCREF(((PyObject *)__pyx_v_initial_key));
+ PyTuple_SET_ITEM(__pyx_t_12, 0, ((PyObject *)__pyx_v_initial_key));
+ __Pyx_GIVEREF(((PyObject *)__pyx_v_initial_key));
+ __pyx_t_2 = PyObject_Call(__pyx_t_5, __pyx_t_12, NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1643; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_2);
+ __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
+ __Pyx_DECREF(__pyx_t_12); __pyx_t_12 = 0;
+ if (PyTuple_CheckExact(__pyx_t_2) && likely(PyTuple_GET_SIZE(__pyx_t_2) == 2)) {
+ PyObject* tuple = __pyx_t_2;
+ __pyx_t_12 = PyTuple_GET_ITEM(tuple, 0); __Pyx_INCREF(__pyx_t_12);
+ __pyx_t_6 = __Pyx_PyInt_AsInt(__pyx_t_12); if (unlikely((__pyx_t_6 == (int)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1643; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_DECREF(__pyx_t_12); __pyx_t_12 = 0;
+ __pyx_t_5 = PyTuple_GET_ITEM(tuple, 1); __Pyx_INCREF(__pyx_t_5);
+ __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+ __pyx_v_self->block_index = __pyx_t_6;
+ __Pyx_DECREF(__pyx_v__);
+ __pyx_v__ = __pyx_t_5;
+ __pyx_t_5 = 0;
+ } else {
+ __pyx_t_13 = PyObject_GetIter(__pyx_t_2); if (unlikely(!__pyx_t_13)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1643; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_13);
+ __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+ __pyx_t_12 = __Pyx_UnpackItem(__pyx_t_13, 0); if (unlikely(!__pyx_t_12)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1643; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_12);
+ __pyx_t_6 = __Pyx_PyInt_AsInt(__pyx_t_12); if (unlikely((__pyx_t_6 == (int)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1643; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_DECREF(__pyx_t_12); __pyx_t_12 = 0;
+ __pyx_t_5 = __Pyx_UnpackItem(__pyx_t_13, 1); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1643; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_5);
+ if (__Pyx_EndUnpack(__pyx_t_13, 2) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1643; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_DECREF(__pyx_t_13); __pyx_t_13 = 0;
+ __pyx_v_self->block_index = __pyx_t_6;
+ __Pyx_DECREF(__pyx_v__);
+ __pyx_v__ = __pyx_t_5;
+ __pyx_t_5 = 0;
+ }
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1644
+ * initial_key = (self.current_root, '', '')
+ * self.block_index, _ = self.state._find_block_index_from_key(initial_key)
+ * if self.block_index == 0: # <<<<<<<<<<<<<<
+ * # we have processed the total root already, but because the
+ * # initial key matched it we should skip it here.
+ */
+ __pyx_t_7 = (__pyx_v_self->block_index == 0);
+ if (__pyx_t_7) {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1647
+ * # we have processed the total root already, but because the
+ * # initial key matched it we should skip it here.
+ * self.block_index = self.block_index + 1 # <<<<<<<<<<<<<<
+ * self._update_current_block()
+ * # walk until both the directory listing and the versioned metadata
+ */
+ __pyx_v_self->block_index = (__pyx_v_self->block_index + 1);
+ goto __pyx_L65;
+ }
+ __pyx_L65:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1648
+ * # initial key matched it we should skip it here.
+ * self.block_index = self.block_index + 1
+ * self._update_current_block() # <<<<<<<<<<<<<<
+ * # walk until both the directory listing and the versioned metadata
+ * # are exhausted.
+ */
+ __pyx_t_6 = ((struct __pyx_vtabstruct_6bzrlib_21_dirstate_helpers_pyx_ProcessEntryC *)__pyx_v_self->__pyx_vtab)->_update_current_block(__pyx_v_self); if (unlikely(__pyx_t_6 == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1648; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ goto __pyx_L39;
+ }
+ __pyx_L39:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1651
+ * # walk until both the directory listing and the versioned metadata
+ * # are exhausted.
+ * while (self.current_dir_info is not None # <<<<<<<<<<<<<<
+ * or self.current_block is not None):
+ * # Uncommon case - a missing directory or an unversioned directory:
+ */
+ while (1) {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1652
+ * # are exhausted.
+ * while (self.current_dir_info is not None
+ * or self.current_block is not None): # <<<<<<<<<<<<<<
+ * # Uncommon case - a missing directory or an unversioned directory:
+ * if (self.current_dir_info and self.current_block
+ */
+ __pyx_t_7 = (__pyx_v_self->current_dir_info != Py_None);
+ if (!__pyx_t_7) {
+ __pyx_t_14 = (__pyx_v_self->current_block != Py_None);
+ __pyx_t_1 = __pyx_t_14;
+ } else {
+ __pyx_t_1 = __pyx_t_7;
+ }
+ if (!__pyx_t_1) break;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1654
+ * or self.current_block is not None):
+ * # Uncommon case - a missing directory or an unversioned directory:
+ * if (self.current_dir_info and self.current_block # <<<<<<<<<<<<<<
+ * and self.current_dir_info[0][0] != self.current_block[0]):
+ * # Work around pyrex broken heuristic - current_dirname has
+ */
+ __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_v_self->current_dir_info); if (unlikely(__pyx_t_1 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1654; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ if (__pyx_t_1) {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1655
+ * # Uncommon case - a missing directory or an unversioned directory:
+ * if (self.current_dir_info and self.current_block
+ * and self.current_dir_info[0][0] != self.current_block[0]): # <<<<<<<<<<<<<<
+ * # Work around pyrex broken heuristic - current_dirname has
+ * # the same scope as current_dirname_c
+ */
+ __pyx_t_7 = __Pyx_PyObject_IsTrue(__pyx_v_self->current_block); if (unlikely(__pyx_t_7 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1654; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ if (__pyx_t_7) {
+ __pyx_t_2 = __Pyx_GetItemInt(__pyx_v_self->current_dir_info, 0, sizeof(long), PyInt_FromLong); if (!__pyx_t_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1655; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_2);
+ __pyx_t_5 = __Pyx_GetItemInt(__pyx_t_2, 0, sizeof(long), PyInt_FromLong); if (!__pyx_t_5) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1655; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_5);
+ __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+ __pyx_t_2 = __Pyx_GetItemInt(__pyx_v_self->current_block, 0, sizeof(long), PyInt_FromLong); if (!__pyx_t_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1655; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_2);
+ __pyx_t_12 = PyObject_RichCompare(__pyx_t_5, __pyx_t_2, Py_NE); if (unlikely(!__pyx_t_12)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1655; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_12);
+ __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
+ __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+ __pyx_t_14 = __Pyx_PyObject_IsTrue(__pyx_t_12); if (unlikely(__pyx_t_14 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1655; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_DECREF(__pyx_t_12); __pyx_t_12 = 0;
+ __pyx_t_11 = __pyx_t_14;
+ } else {
+ __pyx_t_11 = __pyx_t_7;
+ }
+ __pyx_t_7 = __pyx_t_11;
+ } else {
+ __pyx_t_7 = __pyx_t_1;
+ }
+ if (__pyx_t_7) {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1658
+ * # Work around pyrex broken heuristic - current_dirname has
+ * # the same scope as current_dirname_c
+ * current_dirname = self.current_dir_info[0][0] # <<<<<<<<<<<<<<
+ * current_dirname_c = PyString_AS_STRING_void(
+ * <void *>current_dirname)
+ */
+ __pyx_t_12 = __Pyx_GetItemInt(__pyx_v_self->current_dir_info, 0, sizeof(long), PyInt_FromLong); if (!__pyx_t_12) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1658; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_12);
+ __pyx_t_2 = __Pyx_GetItemInt(__pyx_t_12, 0, sizeof(long), PyInt_FromLong); if (!__pyx_t_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1658; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_2);
+ __Pyx_DECREF(__pyx_t_12); __pyx_t_12 = 0;
+ __Pyx_DECREF(__pyx_v_current_dirname);
+ __pyx_v_current_dirname = __pyx_t_2;
+ __pyx_t_2 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1660
+ * current_dirname = self.current_dir_info[0][0]
+ * current_dirname_c = PyString_AS_STRING_void(
+ * <void *>current_dirname) # <<<<<<<<<<<<<<
+ * current_blockname = self.current_block[0]
+ * current_blockname_c = PyString_AS_STRING_void(
+ */
+ __pyx_v_current_dirname_c = PyString_AS_STRING(((void *)__pyx_v_current_dirname));
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1661
+ * current_dirname_c = PyString_AS_STRING_void(
+ * <void *>current_dirname)
+ * current_blockname = self.current_block[0] # <<<<<<<<<<<<<<
+ * current_blockname_c = PyString_AS_STRING_void(
+ * <void *>current_blockname)
+ */
+ __pyx_t_2 = __Pyx_GetItemInt(__pyx_v_self->current_block, 0, sizeof(long), PyInt_FromLong); if (!__pyx_t_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1661; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_2);
+ __Pyx_DECREF(__pyx_v_current_blockname);
+ __pyx_v_current_blockname = __pyx_t_2;
+ __pyx_t_2 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1663
+ * current_blockname = self.current_block[0]
+ * current_blockname_c = PyString_AS_STRING_void(
+ * <void *>current_blockname) # <<<<<<<<<<<<<<
+ * # In the python generator we evaluate this if block once per
+ * # dir+block; because we reenter in the pyrex version its being
+ */
+ __pyx_v_current_blockname_c = PyString_AS_STRING(((void *)__pyx_v_current_blockname));
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1671
+ * PyString_Size(current_dirname),
+ * current_blockname_c,
+ * PyString_Size(current_blockname)) < 0: # <<<<<<<<<<<<<<
+ * # filesystem data refers to paths not covered by the
+ * # dirblock. this has two possibilities:
+ */
+ __pyx_t_7 = (__pyx_f_6bzrlib_21_dirstate_helpers_pyx__cmp_by_dirs(__pyx_v_current_dirname_c, PyString_Size(__pyx_v_current_dirname), __pyx_v_current_blockname_c, PyString_Size(__pyx_v_current_blockname)) < 0);
+ if (__pyx_t_7) {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1682
+ * # recurse into unknown directories.
+ * # We are doing a loop
+ * while self.path_index < len(self.current_dir_list): # <<<<<<<<<<<<<<
+ * current_path_info = self.current_dir_list[self.path_index]
+ * # dont descend into this unversioned path if it is
+ */
+ while (1) {
+ __pyx_t_8 = PyObject_Length(__pyx_v_self->current_dir_list); if (unlikely(__pyx_t_8 == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1682; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_7 = (__pyx_v_self->path_index < __pyx_t_8);
+ if (!__pyx_t_7) break;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1683
+ * # We are doing a loop
+ * while self.path_index < len(self.current_dir_list):
+ * current_path_info = self.current_dir_list[self.path_index] # <<<<<<<<<<<<<<
+ * # dont descend into this unversioned path if it is
+ * # a dir
+ */
+ __pyx_t_2 = __Pyx_GetItemInt(__pyx_v_self->current_dir_list, __pyx_v_self->path_index, sizeof(int), PyInt_FromLong); if (!__pyx_t_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1683; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_2);
+ __Pyx_DECREF(__pyx_v_current_path_info);
+ __pyx_v_current_path_info = __pyx_t_2;
+ __pyx_t_2 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1686
+ * # dont descend into this unversioned path if it is
+ * # a dir
+ * if current_path_info[2] in ('directory', # <<<<<<<<<<<<<<
+ * 'tree-reference'):
+ * del self.current_dir_list[self.path_index]
+ */
+ __pyx_t_2 = __Pyx_GetItemInt(__pyx_v_current_path_info, 2, sizeof(long), PyInt_FromLong); if (!__pyx_t_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1686; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_2);
+ __pyx_t_12 = PyObject_RichCompare(__pyx_t_2, ((PyObject *)__pyx_n_s__directory), Py_EQ); if (unlikely(!__pyx_t_12)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1686; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_12);
+ __pyx_t_7 = __Pyx_PyObject_IsTrue(__pyx_t_12); if (unlikely((__pyx_t_7 == (int)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1686; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_DECREF(__pyx_t_12); __pyx_t_12 = 0;
+ __pyx_t_1 = __pyx_t_7;
+ if (!__pyx_t_1) {
+ __pyx_t_12 = PyObject_RichCompare(__pyx_t_2, ((PyObject *)__pyx_kp_s_33), Py_EQ); if (unlikely(!__pyx_t_12)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1686; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_12);
+ __pyx_t_7 = __Pyx_PyObject_IsTrue(__pyx_t_12); if (unlikely((__pyx_t_7 == (int)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1686; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_DECREF(__pyx_t_12); __pyx_t_12 = 0;
+ __pyx_t_11 = __pyx_t_7;
+ __pyx_t_7 = __pyx_t_11;
+ } else {
+ __pyx_t_7 = __pyx_t_1;
+ }
+ __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+ __pyx_t_1 = __pyx_t_7;
+ if (__pyx_t_1) {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1688
+ * if current_path_info[2] in ('directory',
+ * 'tree-reference'):
+ * del self.current_dir_list[self.path_index] # <<<<<<<<<<<<<<
+ * self.path_index = self.path_index - 1
+ * self.path_index = self.path_index + 1
+ */
+ if (__Pyx_DelItemInt(__pyx_v_self->current_dir_list, __pyx_v_self->path_index, sizeof(int), PyInt_FromLong) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1688; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1689
+ * 'tree-reference'):
+ * del self.current_dir_list[self.path_index]
+ * self.path_index = self.path_index - 1 # <<<<<<<<<<<<<<
+ * self.path_index = self.path_index + 1
+ * if self.want_unversioned:
+ */
+ __pyx_v_self->path_index = (__pyx_v_self->path_index - 1);
+ goto __pyx_L72;
+ }
+ __pyx_L72:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1690
+ * del self.current_dir_list[self.path_index]
+ * self.path_index = self.path_index - 1
+ * self.path_index = self.path_index + 1 # <<<<<<<<<<<<<<
+ * if self.want_unversioned:
+ * if current_path_info[2] == 'directory':
+ */
+ __pyx_v_self->path_index = (__pyx_v_self->path_index + 1);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1691
+ * self.path_index = self.path_index - 1
+ * self.path_index = self.path_index + 1
+ * if self.want_unversioned: # <<<<<<<<<<<<<<
+ * if current_path_info[2] == 'directory':
+ * if self.tree._directory_is_tree_reference(
+ */
+ if (__pyx_v_self->want_unversioned) {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1692
+ * self.path_index = self.path_index + 1
+ * if self.want_unversioned:
+ * if current_path_info[2] == 'directory': # <<<<<<<<<<<<<<
+ * if self.tree._directory_is_tree_reference(
+ * self.utf8_decode(current_path_info[0])[0]):
+ */
+ __pyx_t_2 = __Pyx_GetItemInt(__pyx_v_current_path_info, 2, sizeof(long), PyInt_FromLong); if (!__pyx_t_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1692; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_2);
+ __pyx_t_12 = PyObject_RichCompare(__pyx_t_2, ((PyObject *)__pyx_n_s__directory), Py_EQ); if (unlikely(!__pyx_t_12)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1692; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_12);
+ __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+ __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_t_12); if (unlikely(__pyx_t_1 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1692; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_DECREF(__pyx_t_12); __pyx_t_12 = 0;
+ if (__pyx_t_1) {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1693
+ * if self.want_unversioned:
+ * if current_path_info[2] == 'directory':
+ * if self.tree._directory_is_tree_reference( # <<<<<<<<<<<<<<
+ * self.utf8_decode(current_path_info[0])[0]):
+ * current_path_info = current_path_info[:2] + \
+ */
+ __pyx_t_12 = PyObject_GetAttr(__pyx_v_self->tree, __pyx_n_s_39); if (unlikely(!__pyx_t_12)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1693; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_12);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1694
+ * if current_path_info[2] == 'directory':
+ * if self.tree._directory_is_tree_reference(
+ * self.utf8_decode(current_path_info[0])[0]): # <<<<<<<<<<<<<<
+ * current_path_info = current_path_info[:2] + \
+ * ('tree-reference',) + current_path_info[3:]
+ */
+ __pyx_t_2 = __Pyx_GetItemInt(__pyx_v_current_path_info, 0, sizeof(long), PyInt_FromLong); if (!__pyx_t_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1694; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_2);
+ __pyx_t_5 = PyTuple_New(1); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1694; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_5);
+ PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_t_2);
+ __Pyx_GIVEREF(__pyx_t_2);
+ __pyx_t_2 = 0;
+ __pyx_t_2 = PyObject_Call(__pyx_v_self->utf8_decode, __pyx_t_5, NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1694; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_2);
+ __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
+ __pyx_t_5 = __Pyx_GetItemInt(__pyx_t_2, 0, sizeof(long), PyInt_FromLong); if (!__pyx_t_5) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1694; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_5);
+ __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+ __pyx_t_2 = PyTuple_New(1); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1693; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_2);
+ PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_t_5);
+ __Pyx_GIVEREF(__pyx_t_5);
+ __pyx_t_5 = 0;
+ __pyx_t_5 = PyObject_Call(__pyx_t_12, __pyx_t_2, NULL); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1693; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_5);
+ __Pyx_DECREF(__pyx_t_12); __pyx_t_12 = 0;
+ __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+ __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_t_5); if (unlikely(__pyx_t_1 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1693; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
+ if (__pyx_t_1) {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1695
+ * if self.tree._directory_is_tree_reference(
+ * self.utf8_decode(current_path_info[0])[0]):
+ * current_path_info = current_path_info[:2] + \ # <<<<<<<<<<<<<<
+ * ('tree-reference',) + current_path_info[3:]
+ * new_executable = bool(
+ */
+ __pyx_t_5 = PySequence_GetSlice(__pyx_v_current_path_info, 0, 2); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1695; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_5);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1696
+ * self.utf8_decode(current_path_info[0])[0]):
+ * current_path_info = current_path_info[:2] + \
+ * ('tree-reference',) + current_path_info[3:] # <<<<<<<<<<<<<<
+ * new_executable = bool(
+ * stat.S_ISREG(current_path_info[3].st_mode)
+ */
+ __pyx_t_2 = PyTuple_New(1); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1696; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_2);
+ __Pyx_INCREF(((PyObject *)__pyx_kp_s_33));
+ PyTuple_SET_ITEM(__pyx_t_2, 0, ((PyObject *)__pyx_kp_s_33));
+ __Pyx_GIVEREF(((PyObject *)__pyx_kp_s_33));
+ __pyx_t_12 = PyNumber_Add(__pyx_t_5, __pyx_t_2); if (unlikely(!__pyx_t_12)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1695; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_12);
+ __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
+ __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+ __pyx_t_2 = PySequence_GetSlice(__pyx_v_current_path_info, 3, PY_SSIZE_T_MAX); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1696; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_2);
+ __pyx_t_5 = PyNumber_Add(__pyx_t_12, __pyx_t_2); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1696; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_5);
+ __Pyx_DECREF(__pyx_t_12); __pyx_t_12 = 0;
+ __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+ __Pyx_DECREF(__pyx_v_current_path_info);
+ __pyx_v_current_path_info = __pyx_t_5;
+ __pyx_t_5 = 0;
+ goto __pyx_L75;
+ }
+ __pyx_L75:;
+ goto __pyx_L74;
+ }
+ __pyx_L74:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1698
+ * ('tree-reference',) + current_path_info[3:]
+ * new_executable = bool(
+ * stat.S_ISREG(current_path_info[3].st_mode) # <<<<<<<<<<<<<<
+ * and stat.S_IEXEC & current_path_info[3].st_mode)
+ * return (None,
+ */
+ __pyx_t_5 = __Pyx_GetName(__pyx_m, __pyx_n_s__stat); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1698; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_5);
+ __pyx_t_2 = PyObject_GetAttr(__pyx_t_5, __pyx_n_s__S_ISREG); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1698; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_2);
+ __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
+ __pyx_t_5 = __Pyx_GetItemInt(__pyx_v_current_path_info, 3, sizeof(long), PyInt_FromLong); if (!__pyx_t_5) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1698; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_5);
+ __pyx_t_12 = PyObject_GetAttr(__pyx_t_5, __pyx_n_s__st_mode); if (unlikely(!__pyx_t_12)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1698; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_12);
+ __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
+ __pyx_t_5 = PyTuple_New(1); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1698; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_5);
+ PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_t_12);
+ __Pyx_GIVEREF(__pyx_t_12);
+ __pyx_t_12 = 0;
+ __pyx_t_12 = PyObject_Call(__pyx_t_2, __pyx_t_5, NULL); if (unlikely(!__pyx_t_12)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1698; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_12);
+ __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+ __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
+ __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_t_12); if (unlikely(__pyx_t_1 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1698; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_DECREF(__pyx_t_12); __pyx_t_12 = 0;
+ if (__pyx_t_1) {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1699
+ * new_executable = bool(
+ * stat.S_ISREG(current_path_info[3].st_mode)
+ * and stat.S_IEXEC & current_path_info[3].st_mode) # <<<<<<<<<<<<<<
+ * return (None,
+ * (None, self.utf8_decode(current_path_info[0])[0]),
+ */
+ __pyx_t_12 = __Pyx_GetName(__pyx_m, __pyx_n_s__stat); if (unlikely(!__pyx_t_12)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1699; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_12);
+ __pyx_t_5 = PyObject_GetAttr(__pyx_t_12, __pyx_n_s__S_IEXEC); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1699; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_5);
+ __Pyx_DECREF(__pyx_t_12); __pyx_t_12 = 0;
+ __pyx_t_12 = __Pyx_GetItemInt(__pyx_v_current_path_info, 3, sizeof(long), PyInt_FromLong); if (!__pyx_t_12) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1699; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_12);
+ __pyx_t_2 = PyObject_GetAttr(__pyx_t_12, __pyx_n_s__st_mode); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1699; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_2);
+ __Pyx_DECREF(__pyx_t_12); __pyx_t_12 = 0;
+ __pyx_t_12 = PyNumber_And(__pyx_t_5, __pyx_t_2); if (unlikely(!__pyx_t_12)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1699; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_12);
+ __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
+ __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+ __pyx_t_7 = __Pyx_PyObject_IsTrue(__pyx_t_12); if (unlikely(__pyx_t_7 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1699; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_DECREF(__pyx_t_12); __pyx_t_12 = 0;
+ __pyx_t_11 = __pyx_t_7;
+ } else {
+ __pyx_t_11 = __pyx_t_1;
+ }
+ __pyx_t_12 = __Pyx_PyBool_FromLong(__pyx_t_11); if (unlikely(!__pyx_t_12)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1699; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_12);
+ __Pyx_DECREF(__pyx_v_new_executable);
+ __pyx_v_new_executable = __pyx_t_12;
+ __pyx_t_12 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1700
+ * stat.S_ISREG(current_path_info[3].st_mode)
+ * and stat.S_IEXEC & current_path_info[3].st_mode)
+ * return (None, # <<<<<<<<<<<<<<
+ * (None, self.utf8_decode(current_path_info[0])[0]),
+ * True,
+ */
+ __Pyx_XDECREF(__pyx_r);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1701
+ * and stat.S_IEXEC & current_path_info[3].st_mode)
+ * return (None,
+ * (None, self.utf8_decode(current_path_info[0])[0]), # <<<<<<<<<<<<<<
+ * True,
+ * (False, False),
+ */
+ __pyx_t_12 = __Pyx_GetItemInt(__pyx_v_current_path_info, 0, sizeof(long), PyInt_FromLong); if (!__pyx_t_12) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1701; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_12);
+ __pyx_t_2 = PyTuple_New(1); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1701; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_2);
+ PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_t_12);
+ __Pyx_GIVEREF(__pyx_t_12);
+ __pyx_t_12 = 0;
+ __pyx_t_12 = PyObject_Call(__pyx_v_self->utf8_decode, __pyx_t_2, NULL); if (unlikely(!__pyx_t_12)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1701; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_12);
+ __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+ __pyx_t_2 = __Pyx_GetItemInt(__pyx_t_12, 0, sizeof(long), PyInt_FromLong); if (!__pyx_t_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1701; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_2);
+ __Pyx_DECREF(__pyx_t_12); __pyx_t_12 = 0;
+ __pyx_t_12 = PyTuple_New(2); if (unlikely(!__pyx_t_12)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1701; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_12);
+ __Pyx_INCREF(Py_None);
+ PyTuple_SET_ITEM(__pyx_t_12, 0, Py_None);
+ __Pyx_GIVEREF(Py_None);
+ PyTuple_SET_ITEM(__pyx_t_12, 1, __pyx_t_2);
+ __Pyx_GIVEREF(__pyx_t_2);
+ __pyx_t_2 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1702
+ * return (None,
+ * (None, self.utf8_decode(current_path_info[0])[0]),
+ * True, # <<<<<<<<<<<<<<
+ * (False, False),
+ * (None, None),
+ */
+ __pyx_t_2 = __Pyx_PyBool_FromLong(1); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1702; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_2);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1703
+ * (None, self.utf8_decode(current_path_info[0])[0]),
+ * True,
+ * (False, False), # <<<<<<<<<<<<<<
+ * (None, None),
+ * (None, self.utf8_decode(current_path_info[1])[0]),
+ */
+ __pyx_t_5 = __Pyx_PyBool_FromLong(0); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1703; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_5);
+ __pyx_t_13 = __Pyx_PyBool_FromLong(0); if (unlikely(!__pyx_t_13)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1703; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_13);
+ __pyx_t_9 = PyTuple_New(2); if (unlikely(!__pyx_t_9)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1703; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_9);
+ PyTuple_SET_ITEM(__pyx_t_9, 0, __pyx_t_5);
+ __Pyx_GIVEREF(__pyx_t_5);
+ PyTuple_SET_ITEM(__pyx_t_9, 1, __pyx_t_13);
+ __Pyx_GIVEREF(__pyx_t_13);
+ __pyx_t_5 = 0;
+ __pyx_t_13 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1704
+ * True,
+ * (False, False),
+ * (None, None), # <<<<<<<<<<<<<<
+ * (None, self.utf8_decode(current_path_info[1])[0]),
+ * (None, current_path_info[2]),
+ */
+ __pyx_t_13 = PyTuple_New(2); if (unlikely(!__pyx_t_13)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1704; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_13);
+ __Pyx_INCREF(Py_None);
+ PyTuple_SET_ITEM(__pyx_t_13, 0, Py_None);
+ __Pyx_GIVEREF(Py_None);
+ __Pyx_INCREF(Py_None);
+ PyTuple_SET_ITEM(__pyx_t_13, 1, Py_None);
+ __Pyx_GIVEREF(Py_None);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1705
+ * (False, False),
+ * (None, None),
+ * (None, self.utf8_decode(current_path_info[1])[0]), # <<<<<<<<<<<<<<
+ * (None, current_path_info[2]),
+ * (None, new_executable))
+ */
+ __pyx_t_5 = __Pyx_GetItemInt(__pyx_v_current_path_info, 1, sizeof(long), PyInt_FromLong); if (!__pyx_t_5) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1705; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_5);
+ __pyx_t_10 = PyTuple_New(1); if (unlikely(!__pyx_t_10)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1705; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_10);
+ PyTuple_SET_ITEM(__pyx_t_10, 0, __pyx_t_5);
+ __Pyx_GIVEREF(__pyx_t_5);
+ __pyx_t_5 = 0;
+ __pyx_t_5 = PyObject_Call(__pyx_v_self->utf8_decode, __pyx_t_10, NULL); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1705; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_5);
+ __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;
+ __pyx_t_10 = __Pyx_GetItemInt(__pyx_t_5, 0, sizeof(long), PyInt_FromLong); if (!__pyx_t_10) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1705; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_10);
+ __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
+ __pyx_t_5 = PyTuple_New(2); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1705; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_5);
+ __Pyx_INCREF(Py_None);
+ PyTuple_SET_ITEM(__pyx_t_5, 0, Py_None);
+ __Pyx_GIVEREF(Py_None);
+ PyTuple_SET_ITEM(__pyx_t_5, 1, __pyx_t_10);
+ __Pyx_GIVEREF(__pyx_t_10);
+ __pyx_t_10 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1706
+ * (None, None),
+ * (None, self.utf8_decode(current_path_info[1])[0]),
+ * (None, current_path_info[2]), # <<<<<<<<<<<<<<
+ * (None, new_executable))
+ * # This dir info has been handled, go to the next
+ */
+ __pyx_t_10 = __Pyx_GetItemInt(__pyx_v_current_path_info, 2, sizeof(long), PyInt_FromLong); if (!__pyx_t_10) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1706; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_10);
+ __pyx_t_3 = PyTuple_New(2); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1706; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_3);
+ __Pyx_INCREF(Py_None);
+ PyTuple_SET_ITEM(__pyx_t_3, 0, Py_None);
+ __Pyx_GIVEREF(Py_None);
+ PyTuple_SET_ITEM(__pyx_t_3, 1, __pyx_t_10);
+ __Pyx_GIVEREF(__pyx_t_10);
+ __pyx_t_10 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1707
+ * (None, self.utf8_decode(current_path_info[1])[0]),
+ * (None, current_path_info[2]),
+ * (None, new_executable)) # <<<<<<<<<<<<<<
+ * # This dir info has been handled, go to the next
+ * self.path_index = 0
+ */
+ __pyx_t_10 = PyTuple_New(2); if (unlikely(!__pyx_t_10)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1707; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_10);
+ __Pyx_INCREF(Py_None);
+ PyTuple_SET_ITEM(__pyx_t_10, 0, Py_None);
+ __Pyx_GIVEREF(Py_None);
+ __Pyx_INCREF(__pyx_v_new_executable);
+ PyTuple_SET_ITEM(__pyx_t_10, 1, __pyx_v_new_executable);
+ __Pyx_GIVEREF(__pyx_v_new_executable);
+ __pyx_t_4 = PyTuple_New(8); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1700; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_4);
+ __Pyx_INCREF(Py_None);
+ PyTuple_SET_ITEM(__pyx_t_4, 0, Py_None);
+ __Pyx_GIVEREF(Py_None);
+ PyTuple_SET_ITEM(__pyx_t_4, 1, __pyx_t_12);
+ __Pyx_GIVEREF(__pyx_t_12);
+ PyTuple_SET_ITEM(__pyx_t_4, 2, __pyx_t_2);
+ __Pyx_GIVEREF(__pyx_t_2);
+ PyTuple_SET_ITEM(__pyx_t_4, 3, __pyx_t_9);
+ __Pyx_GIVEREF(__pyx_t_9);
+ PyTuple_SET_ITEM(__pyx_t_4, 4, __pyx_t_13);
+ __Pyx_GIVEREF(__pyx_t_13);
+ PyTuple_SET_ITEM(__pyx_t_4, 5, __pyx_t_5);
+ __Pyx_GIVEREF(__pyx_t_5);
+ PyTuple_SET_ITEM(__pyx_t_4, 6, __pyx_t_3);
+ __Pyx_GIVEREF(__pyx_t_3);
+ PyTuple_SET_ITEM(__pyx_t_4, 7, __pyx_t_10);
+ __Pyx_GIVEREF(__pyx_t_10);
+ __pyx_t_12 = 0;
+ __pyx_t_2 = 0;
+ __pyx_t_9 = 0;
+ __pyx_t_13 = 0;
+ __pyx_t_5 = 0;
+ __pyx_t_3 = 0;
+ __pyx_t_10 = 0;
+ __pyx_r = __pyx_t_4;
+ __pyx_t_4 = 0;
+ goto __pyx_L0;
+ goto __pyx_L73;
+ }
+ __pyx_L73:;
+ }
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1709
+ * (None, new_executable))
+ * # This dir info has been handled, go to the next
+ * self.path_index = 0 # <<<<<<<<<<<<<<
+ * self.current_dir_list = None
+ * try:
+ */
+ __pyx_v_self->path_index = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1710
+ * # This dir info has been handled, go to the next
+ * self.path_index = 0
+ * self.current_dir_list = None # <<<<<<<<<<<<<<
+ * try:
+ * self.current_dir_info = self.dir_iterator.next()
+ */
+ __Pyx_INCREF(Py_None);
+ __Pyx_GIVEREF(Py_None);
+ __Pyx_GOTREF(__pyx_v_self->current_dir_list);
+ __Pyx_DECREF(__pyx_v_self->current_dir_list);
+ __pyx_v_self->current_dir_list = Py_None;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1711
+ * self.path_index = 0
+ * self.current_dir_list = None
+ * try: # <<<<<<<<<<<<<<
+ * self.current_dir_info = self.dir_iterator.next()
+ * self.current_dir_list = self.current_dir_info[1]
+ */
+ {
+ PyObject *__pyx_save_exc_type, *__pyx_save_exc_value, *__pyx_save_exc_tb;
+ __Pyx_ExceptionSave(&__pyx_save_exc_type, &__pyx_save_exc_value, &__pyx_save_exc_tb);
+ __Pyx_XGOTREF(__pyx_save_exc_type);
+ __Pyx_XGOTREF(__pyx_save_exc_value);
+ __Pyx_XGOTREF(__pyx_save_exc_tb);
+ /*try:*/ {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1712
+ * self.current_dir_list = None
+ * try:
+ * self.current_dir_info = self.dir_iterator.next() # <<<<<<<<<<<<<<
+ * self.current_dir_list = self.current_dir_info[1]
+ * except StopIteration, _:
+ */
+ __pyx_t_4 = PyObject_GetAttr(__pyx_v_self->dir_iterator, __pyx_n_s__next); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1712; __pyx_clineno = __LINE__; goto __pyx_L76_error;}
+ __Pyx_GOTREF(__pyx_t_4);
+ __pyx_t_10 = PyObject_Call(__pyx_t_4, ((PyObject *)__pyx_empty_tuple), NULL); if (unlikely(!__pyx_t_10)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1712; __pyx_clineno = __LINE__; goto __pyx_L76_error;}
+ __Pyx_GOTREF(__pyx_t_10);
+ __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
+ __Pyx_GIVEREF(__pyx_t_10);
+ __Pyx_GOTREF(__pyx_v_self->current_dir_info);
+ __Pyx_DECREF(__pyx_v_self->current_dir_info);
+ __pyx_v_self->current_dir_info = __pyx_t_10;
+ __pyx_t_10 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1713
+ * try:
+ * self.current_dir_info = self.dir_iterator.next()
+ * self.current_dir_list = self.current_dir_info[1] # <<<<<<<<<<<<<<
+ * except StopIteration, _:
+ * self.current_dir_info = None
+ */
+ __pyx_t_10 = __Pyx_GetItemInt(__pyx_v_self->current_dir_info, 1, sizeof(long), PyInt_FromLong); if (!__pyx_t_10) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1713; __pyx_clineno = __LINE__; goto __pyx_L76_error;}
+ __Pyx_GOTREF(__pyx_t_10);
+ __Pyx_GIVEREF(__pyx_t_10);
+ __Pyx_GOTREF(__pyx_v_self->current_dir_list);
+ __Pyx_DECREF(__pyx_v_self->current_dir_list);
+ __pyx_v_self->current_dir_list = __pyx_t_10;
+ __pyx_t_10 = 0;
+ }
+ __Pyx_XDECREF(__pyx_save_exc_type); __pyx_save_exc_type = 0;
+ __Pyx_XDECREF(__pyx_save_exc_value); __pyx_save_exc_value = 0;
+ __Pyx_XDECREF(__pyx_save_exc_tb); __pyx_save_exc_tb = 0;
+ goto __pyx_L83_try_end;
+ __pyx_L76_error:;
+ __Pyx_XDECREF(__pyx_t_12); __pyx_t_12 = 0;
+ __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
+ __Pyx_XDECREF(__pyx_t_9); __pyx_t_9 = 0;
+ __Pyx_XDECREF(__pyx_t_13); __pyx_t_13 = 0;
+ __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0;
+ __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
+ __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0;
+ __Pyx_XDECREF(__pyx_t_10); __pyx_t_10 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1714
+ * self.current_dir_info = self.dir_iterator.next()
+ * self.current_dir_list = self.current_dir_info[1]
+ * except StopIteration, _: # <<<<<<<<<<<<<<
+ * self.current_dir_info = None
+ * else: #(dircmp > 0)
+ */
+ __pyx_t_6 = PyErr_ExceptionMatches(__pyx_builtin_StopIteration);
+ if (__pyx_t_6) {
+ __Pyx_AddTraceback("bzrlib._dirstate_helpers_pyx.ProcessEntryC._iter_next");
+ if (__Pyx_GetException(&__pyx_t_10, &__pyx_t_4, &__pyx_t_3) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1714; __pyx_clineno = __LINE__; goto __pyx_L78_except_error;}
+ __Pyx_GOTREF(__pyx_t_10);
+ __Pyx_GOTREF(__pyx_t_4);
+ __Pyx_GOTREF(__pyx_t_3);
+ __Pyx_INCREF(__pyx_t_4);
+ __Pyx_DECREF(__pyx_v__);
+ __pyx_v__ = __pyx_t_4;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1715
+ * self.current_dir_list = self.current_dir_info[1]
+ * except StopIteration, _:
+ * self.current_dir_info = None # <<<<<<<<<<<<<<
+ * else: #(dircmp > 0)
+ * # We have a dirblock entry for this location, but there
+ */
+ __Pyx_INCREF(Py_None);
+ __Pyx_GIVEREF(Py_None);
+ __Pyx_GOTREF(__pyx_v_self->current_dir_info);
+ __Pyx_DECREF(__pyx_v_self->current_dir_info);
+ __pyx_v_self->current_dir_info = Py_None;
+ __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;
+ __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+ goto __pyx_L77_exception_handled;
+ }
+ __pyx_L78_except_error:;
+ __Pyx_XGIVEREF(__pyx_save_exc_type);
+ __Pyx_XGIVEREF(__pyx_save_exc_value);
+ __Pyx_XGIVEREF(__pyx_save_exc_tb);
+ __Pyx_ExceptionReset(__pyx_save_exc_type, __pyx_save_exc_value, __pyx_save_exc_tb);
+ goto __pyx_L1_error;
+ __pyx_L77_exception_handled:;
+ __Pyx_XGIVEREF(__pyx_save_exc_type);
+ __Pyx_XGIVEREF(__pyx_save_exc_value);
+ __Pyx_XGIVEREF(__pyx_save_exc_tb);
+ __Pyx_ExceptionReset(__pyx_save_exc_type, __pyx_save_exc_value, __pyx_save_exc_tb);
+ __pyx_L83_try_end:;
+ }
+ goto __pyx_L69;
+ }
+ /*else*/ {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1724
+ * # need to handle all of the files that are contained
+ * # within.
+ * while self.current_block_pos < len(self.current_block_list): # <<<<<<<<<<<<<<
+ * current_entry = self.current_block_list[self.current_block_pos]
+ * self.current_block_pos = self.current_block_pos + 1
+ */
+ while (1) {
+ __pyx_t_8 = PyObject_Length(__pyx_v_self->current_block_list); if (unlikely(__pyx_t_8 == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1724; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_11 = (__pyx_v_self->current_block_pos < __pyx_t_8);
+ if (!__pyx_t_11) break;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1725
+ * # within.
+ * while self.current_block_pos < len(self.current_block_list):
+ * current_entry = self.current_block_list[self.current_block_pos] # <<<<<<<<<<<<<<
+ * self.current_block_pos = self.current_block_pos + 1
+ * # entry referring to file not present on disk.
+ */
+ __pyx_t_3 = __Pyx_GetItemInt(__pyx_v_self->current_block_list, __pyx_v_self->current_block_pos, sizeof(int), PyInt_FromLong); if (!__pyx_t_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1725; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_3);
+ __Pyx_DECREF(__pyx_v_current_entry);
+ __pyx_v_current_entry = __pyx_t_3;
+ __pyx_t_3 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1726
+ * while self.current_block_pos < len(self.current_block_list):
+ * current_entry = self.current_block_list[self.current_block_pos]
+ * self.current_block_pos = self.current_block_pos + 1 # <<<<<<<<<<<<<<
+ * # entry referring to file not present on disk.
+ * # advance the entry only, after processing.
+ */
+ __pyx_v_self->current_block_pos = (__pyx_v_self->current_block_pos + 1);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1729
+ * # entry referring to file not present on disk.
+ * # advance the entry only, after processing.
+ * result, changed = self._process_entry(current_entry, None) # <<<<<<<<<<<<<<
+ * if changed is not None:
+ * if changed:
+ */
+ __pyx_t_3 = ((struct __pyx_vtabstruct_6bzrlib_21_dirstate_helpers_pyx_ProcessEntryC *)__pyx_v_self->__pyx_vtab)->_process_entry(__pyx_v_self, __pyx_v_current_entry, Py_None); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1729; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_3);
+ if (PyTuple_CheckExact(__pyx_t_3) && likely(PyTuple_GET_SIZE(__pyx_t_3) == 2)) {
+ PyObject* tuple = __pyx_t_3;
+ __pyx_t_4 = PyTuple_GET_ITEM(tuple, 0); __Pyx_INCREF(__pyx_t_4);
+ __pyx_t_10 = PyTuple_GET_ITEM(tuple, 1); __Pyx_INCREF(__pyx_t_10);
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+ __Pyx_DECREF(__pyx_v_result);
+ __pyx_v_result = __pyx_t_4;
+ __pyx_t_4 = 0;
+ __Pyx_DECREF(__pyx_v_changed);
+ __pyx_v_changed = __pyx_t_10;
+ __pyx_t_10 = 0;
+ } else {
+ __pyx_t_5 = PyObject_GetIter(__pyx_t_3); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1729; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_5);
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+ __pyx_t_4 = __Pyx_UnpackItem(__pyx_t_5, 0); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1729; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_4);
+ __pyx_t_10 = __Pyx_UnpackItem(__pyx_t_5, 1); if (unlikely(!__pyx_t_10)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1729; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_10);
+ if (__Pyx_EndUnpack(__pyx_t_5, 2) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1729; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
+ __Pyx_DECREF(__pyx_v_result);
+ __pyx_v_result = __pyx_t_4;
+ __pyx_t_4 = 0;
+ __Pyx_DECREF(__pyx_v_changed);
+ __pyx_v_changed = __pyx_t_10;
+ __pyx_t_10 = 0;
+ }
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1730
+ * # advance the entry only, after processing.
+ * result, changed = self._process_entry(current_entry, None)
+ * if changed is not None: # <<<<<<<<<<<<<<
+ * if changed:
+ * self._gather_result_for_consistency(result)
+ */
+ __pyx_t_11 = (__pyx_v_changed != Py_None);
+ if (__pyx_t_11) {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1731
+ * result, changed = self._process_entry(current_entry, None)
+ * if changed is not None:
+ * if changed: # <<<<<<<<<<<<<<
+ * self._gather_result_for_consistency(result)
+ * if changed or self.include_unchanged:
+ */
+ __pyx_t_11 = __Pyx_PyObject_IsTrue(__pyx_v_changed); if (unlikely(__pyx_t_11 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1731; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ if (__pyx_t_11) {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1732
+ * if changed is not None:
+ * if changed:
+ * self._gather_result_for_consistency(result) # <<<<<<<<<<<<<<
+ * if changed or self.include_unchanged:
+ * return result
+ */
+ __pyx_t_6 = ((struct __pyx_vtabstruct_6bzrlib_21_dirstate_helpers_pyx_ProcessEntryC *)__pyx_v_self->__pyx_vtab)->_gather_result_for_consistency(__pyx_v_self, __pyx_v_result); if (unlikely(__pyx_t_6 == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1732; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ goto __pyx_L89;
+ }
+ __pyx_L89:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1733
+ * if changed:
+ * self._gather_result_for_consistency(result)
+ * if changed or self.include_unchanged: # <<<<<<<<<<<<<<
+ * return result
+ * self.block_index = self.block_index + 1
+ */
+ __pyx_t_11 = __Pyx_PyObject_IsTrue(__pyx_v_changed); if (unlikely(__pyx_t_11 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1733; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ if (!__pyx_t_11) {
+ __pyx_t_1 = __pyx_v_self->include_unchanged;
+ } else {
+ __pyx_t_1 = __pyx_t_11;
+ }
+ if (__pyx_t_1) {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1734
+ * self._gather_result_for_consistency(result)
+ * if changed or self.include_unchanged:
+ * return result # <<<<<<<<<<<<<<
+ * self.block_index = self.block_index + 1
+ * self._update_current_block()
+ */
+ __Pyx_XDECREF(__pyx_r);
+ __Pyx_INCREF(__pyx_v_result);
+ __pyx_r = __pyx_v_result;
+ goto __pyx_L0;
+ goto __pyx_L90;
+ }
+ __pyx_L90:;
+ goto __pyx_L88;
+ }
+ __pyx_L88:;
+ }
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1735
+ * if changed or self.include_unchanged:
+ * return result
+ * self.block_index = self.block_index + 1 # <<<<<<<<<<<<<<
+ * self._update_current_block()
+ * continue # next loop-on-block/dir
+ */
+ __pyx_v_self->block_index = (__pyx_v_self->block_index + 1);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1736
+ * return result
+ * self.block_index = self.block_index + 1
+ * self._update_current_block() # <<<<<<<<<<<<<<
+ * continue # next loop-on-block/dir
+ * result = self._loop_one_block()
+ */
+ __pyx_t_6 = ((struct __pyx_vtabstruct_6bzrlib_21_dirstate_helpers_pyx_ProcessEntryC *)__pyx_v_self->__pyx_vtab)->_update_current_block(__pyx_v_self); if (unlikely(__pyx_t_6 == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1736; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ }
+ __pyx_L69:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1737
+ * self.block_index = self.block_index + 1
+ * self._update_current_block()
+ * continue # next loop-on-block/dir # <<<<<<<<<<<<<<
+ * result = self._loop_one_block()
+ * if result is not None:
+ */
+ goto __pyx_L66_continue;
+ goto __pyx_L68;
+ }
+ __pyx_L68:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1738
+ * self._update_current_block()
+ * continue # next loop-on-block/dir
+ * result = self._loop_one_block() # <<<<<<<<<<<<<<
+ * if result is not None:
+ * return result
+ */
+ __pyx_t_3 = ((struct __pyx_vtabstruct_6bzrlib_21_dirstate_helpers_pyx_ProcessEntryC *)__pyx_v_self->__pyx_vtab)->_loop_one_block(__pyx_v_self); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1738; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_3);
+ __Pyx_DECREF(__pyx_v_result);
+ __pyx_v_result = __pyx_t_3;
+ __pyx_t_3 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1739
+ * continue # next loop-on-block/dir
+ * result = self._loop_one_block()
+ * if result is not None: # <<<<<<<<<<<<<<
+ * return result
+ * if len(self.search_specific_files):
+ */
+ __pyx_t_1 = (__pyx_v_result != Py_None);
+ if (__pyx_t_1) {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1740
+ * result = self._loop_one_block()
+ * if result is not None:
+ * return result # <<<<<<<<<<<<<<
+ * if len(self.search_specific_files):
+ * # More supplied paths to process
+ */
+ __Pyx_XDECREF(__pyx_r);
+ __Pyx_INCREF(__pyx_v_result);
+ __pyx_r = __pyx_v_result;
+ goto __pyx_L0;
+ goto __pyx_L91;
+ }
+ __pyx_L91:;
+ __pyx_L66_continue:;
+ }
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1741
+ * if result is not None:
+ * return result
+ * if len(self.search_specific_files): # <<<<<<<<<<<<<<
+ * # More supplied paths to process
+ * self.current_root = None
+ */
+ __pyx_t_8 = PyObject_Length(__pyx_v_self->search_specific_files); if (unlikely(__pyx_t_8 == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1741; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ if (__pyx_t_8) {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1743
+ * if len(self.search_specific_files):
+ * # More supplied paths to process
+ * self.current_root = None # <<<<<<<<<<<<<<
+ * return self._iter_next()
+ * # Start expanding more conservatively, adding paths the user may not
+ */
+ __Pyx_INCREF(Py_None);
+ __Pyx_GIVEREF(Py_None);
+ __Pyx_GOTREF(__pyx_v_self->current_root);
+ __Pyx_DECREF(__pyx_v_self->current_root);
+ __pyx_v_self->current_root = Py_None;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1744
+ * # More supplied paths to process
+ * self.current_root = None
+ * return self._iter_next() # <<<<<<<<<<<<<<
+ * # Start expanding more conservatively, adding paths the user may not
+ * # have intended but required for consistent deltas.
+ */
+ __Pyx_XDECREF(__pyx_r);
+ __pyx_t_3 = ((struct __pyx_vtabstruct_6bzrlib_21_dirstate_helpers_pyx_ProcessEntryC *)__pyx_v_self->__pyx_vtab)->_iter_next(__pyx_v_self); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1744; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_3);
+ __pyx_r = __pyx_t_3;
+ __pyx_t_3 = 0;
+ goto __pyx_L0;
+ goto __pyx_L92;
+ }
+ __pyx_L92:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1747
+ * # Start expanding more conservatively, adding paths the user may not
+ * # have intended but required for consistent deltas.
+ * self.doing_consistency_expansion = 1 # <<<<<<<<<<<<<<
+ * if not self._pending_consistent_entries:
+ * self._pending_consistent_entries = self._next_consistent_entries()
+ */
+ __pyx_v_self->doing_consistency_expansion = 1;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1748
+ * # have intended but required for consistent deltas.
+ * self.doing_consistency_expansion = 1
+ * if not self._pending_consistent_entries: # <<<<<<<<<<<<<<
+ * self._pending_consistent_entries = self._next_consistent_entries()
+ * while self._pending_consistent_entries:
+ */
+ __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_v_self->_pending_consistent_entries); if (unlikely(__pyx_t_1 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1748; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_11 = (!__pyx_t_1);
+ if (__pyx_t_11) {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1749
+ * self.doing_consistency_expansion = 1
+ * if not self._pending_consistent_entries:
+ * self._pending_consistent_entries = self._next_consistent_entries() # <<<<<<<<<<<<<<
+ * while self._pending_consistent_entries:
+ * result, changed = self._pending_consistent_entries.pop()
+ */
+ __pyx_t_3 = ((struct __pyx_vtabstruct_6bzrlib_21_dirstate_helpers_pyx_ProcessEntryC *)__pyx_v_self->__pyx_vtab)->_next_consistent_entries(__pyx_v_self); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1749; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_3);
+ __Pyx_GIVEREF(__pyx_t_3);
+ __Pyx_GOTREF(__pyx_v_self->_pending_consistent_entries);
+ __Pyx_DECREF(__pyx_v_self->_pending_consistent_entries);
+ __pyx_v_self->_pending_consistent_entries = __pyx_t_3;
+ __pyx_t_3 = 0;
+ goto __pyx_L93;
+ }
+ __pyx_L93:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1750
+ * if not self._pending_consistent_entries:
+ * self._pending_consistent_entries = self._next_consistent_entries()
+ * while self._pending_consistent_entries: # <<<<<<<<<<<<<<
+ * result, changed = self._pending_consistent_entries.pop()
+ * if changed is not None:
+ */
+ while (1) {
+ __pyx_t_11 = __Pyx_PyObject_IsTrue(__pyx_v_self->_pending_consistent_entries); if (unlikely(__pyx_t_11 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1750; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ if (!__pyx_t_11) break;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1751
+ * self._pending_consistent_entries = self._next_consistent_entries()
+ * while self._pending_consistent_entries:
+ * result, changed = self._pending_consistent_entries.pop() # <<<<<<<<<<<<<<
+ * if changed is not None:
+ * return result
+ */
+ __pyx_t_3 = __Pyx_PyObject_Pop(__pyx_v_self->_pending_consistent_entries); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1751; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_3);
+ if (PyTuple_CheckExact(__pyx_t_3) && likely(PyTuple_GET_SIZE(__pyx_t_3) == 2)) {
+ PyObject* tuple = __pyx_t_3;
+ __pyx_t_10 = PyTuple_GET_ITEM(tuple, 0); __Pyx_INCREF(__pyx_t_10);
+ __pyx_t_4 = PyTuple_GET_ITEM(tuple, 1); __Pyx_INCREF(__pyx_t_4);
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+ __Pyx_DECREF(__pyx_v_result);
+ __pyx_v_result = __pyx_t_10;
+ __pyx_t_10 = 0;
+ __Pyx_DECREF(__pyx_v_changed);
+ __pyx_v_changed = __pyx_t_4;
+ __pyx_t_4 = 0;
+ } else {
+ __pyx_t_5 = PyObject_GetIter(__pyx_t_3); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1751; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_5);
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+ __pyx_t_10 = __Pyx_UnpackItem(__pyx_t_5, 0); if (unlikely(!__pyx_t_10)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1751; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_10);
+ __pyx_t_4 = __Pyx_UnpackItem(__pyx_t_5, 1); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1751; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_4);
+ if (__Pyx_EndUnpack(__pyx_t_5, 2) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1751; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
+ __Pyx_DECREF(__pyx_v_result);
+ __pyx_v_result = __pyx_t_10;
+ __pyx_t_10 = 0;
+ __Pyx_DECREF(__pyx_v_changed);
+ __pyx_v_changed = __pyx_t_4;
+ __pyx_t_4 = 0;
+ }
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1752
+ * while self._pending_consistent_entries:
+ * result, changed = self._pending_consistent_entries.pop()
+ * if changed is not None: # <<<<<<<<<<<<<<
+ * return result
+ * raise StopIteration()
+ */
+ __pyx_t_11 = (__pyx_v_changed != Py_None);
+ if (__pyx_t_11) {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1753
+ * result, changed = self._pending_consistent_entries.pop()
+ * if changed is not None:
+ * return result # <<<<<<<<<<<<<<
+ * raise StopIteration()
+ *
+ */
+ __Pyx_XDECREF(__pyx_r);
+ __Pyx_INCREF(__pyx_v_result);
+ __pyx_r = __pyx_v_result;
+ goto __pyx_L0;
+ goto __pyx_L96;
+ }
+ __pyx_L96:;
+ }
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1754
+ * if changed is not None:
+ * return result
+ * raise StopIteration() # <<<<<<<<<<<<<<
+ *
+ * cdef object _maybe_tree_ref(self, current_path_info):
+ */
+ __pyx_t_3 = PyObject_Call(__pyx_builtin_StopIteration, ((PyObject *)__pyx_empty_tuple), NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1754; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_3);
+ __Pyx_Raise(__pyx_t_3, 0, 0);
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1754; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+
+ __pyx_r = Py_None; __Pyx_INCREF(Py_None);
+ goto __pyx_L0;
+ __pyx_L1_error:;
+ __Pyx_XDECREF(__pyx_t_2);
+ __Pyx_XDECREF(__pyx_t_3);
+ __Pyx_XDECREF(__pyx_t_4);
+ __Pyx_XDECREF(__pyx_t_5);
+ __Pyx_XDECREF(__pyx_t_9);
+ __Pyx_XDECREF(__pyx_t_10);
+ __Pyx_XDECREF(__pyx_t_12);
+ __Pyx_XDECREF(__pyx_t_13);
+ __Pyx_AddTraceback("bzrlib._dirstate_helpers_pyx.ProcessEntryC._iter_next");
+ __pyx_r = 0;
+ __pyx_L0:;
+ __Pyx_DECREF(__pyx_v_current_dirname);
+ __Pyx_DECREF(__pyx_v_current_blockname);
+ __Pyx_DECREF(__pyx_v_searched_specific_files);
+ __Pyx_DECREF(__pyx_v_entry);
+ __Pyx_DECREF(__pyx_v_result);
+ __Pyx_DECREF(__pyx_v_changed);
+ __Pyx_DECREF(__pyx_v__);
+ __Pyx_DECREF(__pyx_v_root_stat);
+ __Pyx_DECREF(__pyx_v_e);
+ __Pyx_DECREF(__pyx_v_new_executable);
+ __Pyx_DECREF(__pyx_v_e_winerror);
+ __Pyx_DECREF(__pyx_v_win_errors);
+ __Pyx_DECREF(__pyx_v_bzr_index);
+ __Pyx_DECREF(__pyx_v_initial_key);
+ __Pyx_DECREF(__pyx_v_current_path_info);
+ __Pyx_DECREF(__pyx_v_current_entry);
+ __Pyx_XGIVEREF(__pyx_r);
+ __Pyx_RefNannyFinishContext();
+ return __pyx_r;
+}
+
+/* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1756
+ * raise StopIteration()
+ *
+ * cdef object _maybe_tree_ref(self, current_path_info): # <<<<<<<<<<<<<<
+ * if self.tree._directory_is_tree_reference(
+ * self.utf8_decode(current_path_info[0])[0]):
+ */
+
+static PyObject *__pyx_f_6bzrlib_21_dirstate_helpers_pyx_13ProcessEntryC__maybe_tree_ref(struct __pyx_obj_6bzrlib_21_dirstate_helpers_pyx_ProcessEntryC *__pyx_v_self, PyObject *__pyx_v_current_path_info) {
+ PyObject *__pyx_r = NULL;
+ PyObject *__pyx_t_1 = NULL;
+ PyObject *__pyx_t_2 = NULL;
+ PyObject *__pyx_t_3 = NULL;
+ int __pyx_t_4;
+ __Pyx_RefNannySetupContext("_maybe_tree_ref");
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1757
+ *
+ * cdef object _maybe_tree_ref(self, current_path_info):
+ * if self.tree._directory_is_tree_reference( # <<<<<<<<<<<<<<
+ * self.utf8_decode(current_path_info[0])[0]):
+ * return current_path_info[:2] + \
+ */
+ __pyx_t_1 = PyObject_GetAttr(__pyx_v_self->tree, __pyx_n_s_39); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1757; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_1);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1758
+ * cdef object _maybe_tree_ref(self, current_path_info):
+ * if self.tree._directory_is_tree_reference(
+ * self.utf8_decode(current_path_info[0])[0]): # <<<<<<<<<<<<<<
+ * return current_path_info[:2] + \
+ * ('tree-reference',) + current_path_info[3:]
+ */
+ __pyx_t_2 = __Pyx_GetItemInt(__pyx_v_current_path_info, 0, sizeof(long), PyInt_FromLong); if (!__pyx_t_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1758; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_2);
+ __pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1758; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_3);
+ PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_2);
+ __Pyx_GIVEREF(__pyx_t_2);
+ __pyx_t_2 = 0;
+ __pyx_t_2 = PyObject_Call(__pyx_v_self->utf8_decode, __pyx_t_3, NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1758; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_2);
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+ __pyx_t_3 = __Pyx_GetItemInt(__pyx_t_2, 0, sizeof(long), PyInt_FromLong); if (!__pyx_t_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1758; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_3);
+ __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+ __pyx_t_2 = PyTuple_New(1); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1757; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_2);
+ PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_t_3);
+ __Pyx_GIVEREF(__pyx_t_3);
+ __pyx_t_3 = 0;
+ __pyx_t_3 = PyObject_Call(__pyx_t_1, __pyx_t_2, NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1757; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_3);
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+ __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+ __pyx_t_4 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_4 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1757; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+ if (__pyx_t_4) {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1759
+ * if self.tree._directory_is_tree_reference(
+ * self.utf8_decode(current_path_info[0])[0]):
+ * return current_path_info[:2] + \ # <<<<<<<<<<<<<<
+ * ('tree-reference',) + current_path_info[3:]
+ * else:
+ */
+ __Pyx_XDECREF(__pyx_r);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1760
+ * self.utf8_decode(current_path_info[0])[0]):
+ * return current_path_info[:2] + \
+ * ('tree-reference',) + current_path_info[3:] # <<<<<<<<<<<<<<
+ * else:
+ * return current_path_info
+ */
+ __pyx_t_3 = PySequence_GetSlice(__pyx_v_current_path_info, 0, 2); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1759; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_3);
+ __pyx_t_2 = PyTuple_New(1); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1760; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_2);
+ __Pyx_INCREF(((PyObject *)__pyx_kp_s_33));
+ PyTuple_SET_ITEM(__pyx_t_2, 0, ((PyObject *)__pyx_kp_s_33));
+ __Pyx_GIVEREF(((PyObject *)__pyx_kp_s_33));
+ __pyx_t_1 = PyNumber_Add(__pyx_t_3, __pyx_t_2); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1759; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_1);
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+ __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+ __pyx_t_2 = PySequence_GetSlice(__pyx_v_current_path_info, 3, PY_SSIZE_T_MAX); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1760; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_2);
+ __pyx_t_3 = PyNumber_Add(__pyx_t_1, __pyx_t_2); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1760; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_3);
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+ __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+ __pyx_r = __pyx_t_3;
+ __pyx_t_3 = 0;
+ goto __pyx_L0;
+ goto __pyx_L3;
+ }
+ /*else*/ {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1762
+ * ('tree-reference',) + current_path_info[3:]
+ * else:
+ * return current_path_info # <<<<<<<<<<<<<<
+ *
+ * cdef object _loop_one_block(self):
+ */
+ __Pyx_XDECREF(__pyx_r);
+ __Pyx_INCREF(__pyx_v_current_path_info);
+ __pyx_r = __pyx_v_current_path_info;
+ goto __pyx_L0;
+ }
+ __pyx_L3:;
+
+ __pyx_r = Py_None; __Pyx_INCREF(Py_None);
+ goto __pyx_L0;
+ __pyx_L1_error:;
+ __Pyx_XDECREF(__pyx_t_1);
+ __Pyx_XDECREF(__pyx_t_2);
+ __Pyx_XDECREF(__pyx_t_3);
+ __Pyx_AddTraceback("bzrlib._dirstate_helpers_pyx.ProcessEntryC._maybe_tree_ref");
+ __pyx_r = 0;
+ __pyx_L0:;
+ __Pyx_XGIVEREF(__pyx_r);
+ __Pyx_RefNannyFinishContext();
+ return __pyx_r;
+}
+
+/* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1764
+ * return current_path_info
+ *
+ * cdef object _loop_one_block(self): # <<<<<<<<<<<<<<
+ * # current_dir_info and current_block refer to the same directory -
+ * # this is the common case code.
+ */
+
+static PyObject *__pyx_f_6bzrlib_21_dirstate_helpers_pyx_13ProcessEntryC__loop_one_block(struct __pyx_obj_6bzrlib_21_dirstate_helpers_pyx_ProcessEntryC *__pyx_v_self) {
+ PyObject *__pyx_v_current_entry;
+ PyObject *__pyx_v_current_path_info;
+ int __pyx_v_path_handled;
+ char __pyx_v_minikind;
+ int __pyx_v_cmp_result;
+ PyObject *__pyx_v_disk_kind;
+ long __pyx_v_advance_entry;
+ long __pyx_v_advance_path;
+ PyObject *__pyx_v_result;
+ PyObject *__pyx_v_changed;
+ PyObject *__pyx_v_new_executable;
+ PyObject *__pyx_v_relpath_unicode;
+ PyObject *__pyx_v__;
+ PyObject *__pyx_r = NULL;
+ int __pyx_t_1;
+ int __pyx_t_2;
+ int __pyx_t_3;
+ PyObject *__pyx_t_4 = NULL;
+ PyObject *__pyx_t_5 = NULL;
+ PyObject *__pyx_t_6 = NULL;
+ PyObject *__pyx_t_7 = NULL;
+ char __pyx_t_8;
+ int __pyx_t_9;
+ PyObject *__pyx_t_10 = NULL;
+ PyObject *__pyx_t_11 = NULL;
+ PyObject *__pyx_t_12 = NULL;
+ PyObject *__pyx_t_13 = NULL;
+ Py_ssize_t __pyx_t_14;
+ __Pyx_RefNannySetupContext("_loop_one_block");
+ __pyx_v_current_entry = Py_None; __Pyx_INCREF(Py_None);
+ __pyx_v_current_path_info = Py_None; __Pyx_INCREF(Py_None);
+ __pyx_v_disk_kind = Py_None; __Pyx_INCREF(Py_None);
+ __pyx_v_result = Py_None; __Pyx_INCREF(Py_None);
+ __pyx_v_changed = Py_None; __Pyx_INCREF(Py_None);
+ __pyx_v_new_executable = Py_None; __Pyx_INCREF(Py_None);
+ __pyx_v_relpath_unicode = Py_None; __Pyx_INCREF(Py_None);
+ __pyx_v__ = Py_None; __Pyx_INCREF(Py_None);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1777
+ * # PyString_AsStringAndSize(disk_kind, &temp_str, &temp_str_length)
+ * # if not strncmp(temp_str, "directory", temp_str_length):
+ * if (self.current_block is not None and # <<<<<<<<<<<<<<
+ * self.current_block_pos < PyList_GET_SIZE(self.current_block_list)):
+ * current_entry = PyList_GET_ITEM(self.current_block_list,
+ */
+ __pyx_t_1 = (__pyx_v_self->current_block != Py_None);
+ if (__pyx_t_1) {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1778
+ * # if not strncmp(temp_str, "directory", temp_str_length):
+ * if (self.current_block is not None and
+ * self.current_block_pos < PyList_GET_SIZE(self.current_block_list)): # <<<<<<<<<<<<<<
+ * current_entry = PyList_GET_ITEM(self.current_block_list,
+ * self.current_block_pos)
+ */
+ __pyx_t_2 = (__pyx_v_self->current_block_pos < PyList_GET_SIZE(__pyx_v_self->current_block_list));
+ __pyx_t_3 = __pyx_t_2;
+ } else {
+ __pyx_t_3 = __pyx_t_1;
+ }
+ if (__pyx_t_3) {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1780
+ * self.current_block_pos < PyList_GET_SIZE(self.current_block_list)):
+ * current_entry = PyList_GET_ITEM(self.current_block_list,
+ * self.current_block_pos) # <<<<<<<<<<<<<<
+ * # accomodate pyrex
+ * Py_INCREF(current_entry)
+ */
+ __pyx_t_4 = PyList_GET_ITEM(__pyx_v_self->current_block_list, __pyx_v_self->current_block_pos); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1779; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_4);
+ __Pyx_DECREF(__pyx_v_current_entry);
+ __pyx_v_current_entry = __pyx_t_4;
+ __pyx_t_4 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1782
+ * self.current_block_pos)
+ * # accomodate pyrex
+ * Py_INCREF(current_entry) # <<<<<<<<<<<<<<
+ * else:
+ * current_entry = None
+ */
+ Py_INCREF(__pyx_v_current_entry);
+ goto __pyx_L3;
+ }
+ /*else*/ {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1784
+ * Py_INCREF(current_entry)
+ * else:
+ * current_entry = None # <<<<<<<<<<<<<<
+ * if (self.current_dir_info is not None and
+ * self.path_index < PyList_GET_SIZE(self.current_dir_list)):
+ */
+ __Pyx_INCREF(Py_None);
+ __Pyx_DECREF(__pyx_v_current_entry);
+ __pyx_v_current_entry = Py_None;
+ }
+ __pyx_L3:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1785
+ * else:
+ * current_entry = None
+ * if (self.current_dir_info is not None and # <<<<<<<<<<<<<<
+ * self.path_index < PyList_GET_SIZE(self.current_dir_list)):
+ * current_path_info = PyList_GET_ITEM(self.current_dir_list,
+ */
+ __pyx_t_3 = (__pyx_v_self->current_dir_info != Py_None);
+ if (__pyx_t_3) {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1786
+ * current_entry = None
+ * if (self.current_dir_info is not None and
+ * self.path_index < PyList_GET_SIZE(self.current_dir_list)): # <<<<<<<<<<<<<<
+ * current_path_info = PyList_GET_ITEM(self.current_dir_list,
+ * self.path_index)
+ */
+ __pyx_t_1 = (__pyx_v_self->path_index < PyList_GET_SIZE(__pyx_v_self->current_dir_list));
+ __pyx_t_2 = __pyx_t_1;
+ } else {
+ __pyx_t_2 = __pyx_t_3;
+ }
+ if (__pyx_t_2) {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1788
+ * self.path_index < PyList_GET_SIZE(self.current_dir_list)):
+ * current_path_info = PyList_GET_ITEM(self.current_dir_list,
+ * self.path_index) # <<<<<<<<<<<<<<
+ * # accomodate pyrex
+ * Py_INCREF(current_path_info)
+ */
+ __pyx_t_4 = PyList_GET_ITEM(__pyx_v_self->current_dir_list, __pyx_v_self->path_index); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1787; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_4);
+ __Pyx_DECREF(__pyx_v_current_path_info);
+ __pyx_v_current_path_info = __pyx_t_4;
+ __pyx_t_4 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1790
+ * self.path_index)
+ * # accomodate pyrex
+ * Py_INCREF(current_path_info) # <<<<<<<<<<<<<<
+ * disk_kind = PyTuple_GET_ITEM(current_path_info, 2)
+ * # accomodate pyrex
+ */
+ Py_INCREF(__pyx_v_current_path_info);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1791
+ * # accomodate pyrex
+ * Py_INCREF(current_path_info)
+ * disk_kind = PyTuple_GET_ITEM(current_path_info, 2) # <<<<<<<<<<<<<<
+ * # accomodate pyrex
+ * Py_INCREF(disk_kind)
+ */
+ __pyx_t_4 = PyTuple_GET_ITEM(__pyx_v_current_path_info, 2); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1791; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_4);
+ __Pyx_DECREF(__pyx_v_disk_kind);
+ __pyx_v_disk_kind = __pyx_t_4;
+ __pyx_t_4 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1793
+ * disk_kind = PyTuple_GET_ITEM(current_path_info, 2)
+ * # accomodate pyrex
+ * Py_INCREF(disk_kind) # <<<<<<<<<<<<<<
+ * if disk_kind == "directory":
+ * current_path_info = self._maybe_tree_ref(current_path_info)
+ */
+ Py_INCREF(__pyx_v_disk_kind);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1794
+ * # accomodate pyrex
+ * Py_INCREF(disk_kind)
+ * if disk_kind == "directory": # <<<<<<<<<<<<<<
+ * current_path_info = self._maybe_tree_ref(current_path_info)
+ * else:
+ */
+ __pyx_t_4 = PyObject_RichCompare(__pyx_v_disk_kind, ((PyObject *)__pyx_n_s__directory), Py_EQ); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1794; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_4);
+ __pyx_t_2 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_2 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1794; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
+ if (__pyx_t_2) {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1795
+ * Py_INCREF(disk_kind)
+ * if disk_kind == "directory":
+ * current_path_info = self._maybe_tree_ref(current_path_info) # <<<<<<<<<<<<<<
+ * else:
+ * current_path_info = None
+ */
+ __pyx_t_4 = ((struct __pyx_vtabstruct_6bzrlib_21_dirstate_helpers_pyx_ProcessEntryC *)__pyx_v_self->__pyx_vtab)->_maybe_tree_ref(__pyx_v_self, __pyx_v_current_path_info); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1795; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_4);
+ __Pyx_DECREF(__pyx_v_current_path_info);
+ __pyx_v_current_path_info = __pyx_t_4;
+ __pyx_t_4 = 0;
+ goto __pyx_L5;
+ }
+ __pyx_L5:;
+ goto __pyx_L4;
+ }
+ /*else*/ {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1797
+ * current_path_info = self._maybe_tree_ref(current_path_info)
+ * else:
+ * current_path_info = None # <<<<<<<<<<<<<<
+ * while (current_entry is not None or current_path_info is not None):
+ * advance_entry = -1
+ */
+ __Pyx_INCREF(Py_None);
+ __Pyx_DECREF(__pyx_v_current_path_info);
+ __pyx_v_current_path_info = Py_None;
+ }
+ __pyx_L4:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1798
+ * else:
+ * current_path_info = None
+ * while (current_entry is not None or current_path_info is not None): # <<<<<<<<<<<<<<
+ * advance_entry = -1
+ * advance_path = -1
+ */
+ while (1) {
+ __pyx_t_2 = (__pyx_v_current_entry != Py_None);
+ if (!__pyx_t_2) {
+ __pyx_t_3 = (__pyx_v_current_path_info != Py_None);
+ __pyx_t_1 = __pyx_t_3;
+ } else {
+ __pyx_t_1 = __pyx_t_2;
+ }
+ if (!__pyx_t_1) break;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1799
+ * current_path_info = None
+ * while (current_entry is not None or current_path_info is not None):
+ * advance_entry = -1 # <<<<<<<<<<<<<<
+ * advance_path = -1
+ * result = None
+ */
+ __pyx_v_advance_entry = -1;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1800
+ * while (current_entry is not None or current_path_info is not None):
+ * advance_entry = -1
+ * advance_path = -1 # <<<<<<<<<<<<<<
+ * result = None
+ * changed = None
+ */
+ __pyx_v_advance_path = -1;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1801
+ * advance_entry = -1
+ * advance_path = -1
+ * result = None # <<<<<<<<<<<<<<
+ * changed = None
+ * path_handled = 0
+ */
+ __Pyx_INCREF(Py_None);
+ __Pyx_DECREF(__pyx_v_result);
+ __pyx_v_result = Py_None;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1802
+ * advance_path = -1
+ * result = None
+ * changed = None # <<<<<<<<<<<<<<
+ * path_handled = 0
+ * if current_entry is None:
+ */
+ __Pyx_INCREF(Py_None);
+ __Pyx_DECREF(__pyx_v_changed);
+ __pyx_v_changed = Py_None;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1803
+ * result = None
+ * changed = None
+ * path_handled = 0 # <<<<<<<<<<<<<<
+ * if current_entry is None:
+ * # unversioned - the check for path_handled when the path
+ */
+ __pyx_v_path_handled = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1804
+ * changed = None
+ * path_handled = 0
+ * if current_entry is None: # <<<<<<<<<<<<<<
+ * # unversioned - the check for path_handled when the path
+ * # is advanced will yield this path if needed.
+ */
+ __pyx_t_1 = (__pyx_v_current_entry == Py_None);
+ if (__pyx_t_1) {
+ goto __pyx_L8;
+ }
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1808
+ * # is advanced will yield this path if needed.
+ * pass
+ * elif current_path_info is None: # <<<<<<<<<<<<<<
+ * # no path is fine: the per entry code will handle it.
+ * result, changed = self._process_entry(current_entry,
+ */
+ __pyx_t_1 = (__pyx_v_current_path_info == Py_None);
+ if (__pyx_t_1) {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1811
+ * # no path is fine: the per entry code will handle it.
+ * result, changed = self._process_entry(current_entry,
+ * current_path_info) # <<<<<<<<<<<<<<
+ * else:
+ * minikind = _minikind_from_string(
+ */
+ __pyx_t_4 = ((struct __pyx_vtabstruct_6bzrlib_21_dirstate_helpers_pyx_ProcessEntryC *)__pyx_v_self->__pyx_vtab)->_process_entry(__pyx_v_self, __pyx_v_current_entry, __pyx_v_current_path_info); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1810; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_4);
+ if (PyTuple_CheckExact(__pyx_t_4) && likely(PyTuple_GET_SIZE(__pyx_t_4) == 2)) {
+ PyObject* tuple = __pyx_t_4;
+ __pyx_t_5 = PyTuple_GET_ITEM(tuple, 0); __Pyx_INCREF(__pyx_t_5);
+ __pyx_t_6 = PyTuple_GET_ITEM(tuple, 1); __Pyx_INCREF(__pyx_t_6);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1810
+ * elif current_path_info is None:
+ * # no path is fine: the per entry code will handle it.
+ * result, changed = self._process_entry(current_entry, # <<<<<<<<<<<<<<
+ * current_path_info)
+ * else:
+ */
+ __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
+ __Pyx_DECREF(__pyx_v_result);
+ __pyx_v_result = __pyx_t_5;
+ __pyx_t_5 = 0;
+ __Pyx_DECREF(__pyx_v_changed);
+ __pyx_v_changed = __pyx_t_6;
+ __pyx_t_6 = 0;
+ } else {
+ __pyx_t_7 = PyObject_GetIter(__pyx_t_4); if (unlikely(!__pyx_t_7)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1810; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_7);
+ __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
+ __pyx_t_5 = __Pyx_UnpackItem(__pyx_t_7, 0); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1810; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_5);
+ __pyx_t_6 = __Pyx_UnpackItem(__pyx_t_7, 1); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1810; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_6);
+ if (__Pyx_EndUnpack(__pyx_t_7, 2) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1810; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
+ __Pyx_DECREF(__pyx_v_result);
+ __pyx_v_result = __pyx_t_5;
+ __pyx_t_5 = 0;
+ __Pyx_DECREF(__pyx_v_changed);
+ __pyx_v_changed = __pyx_t_6;
+ __pyx_t_6 = 0;
+ }
+ goto __pyx_L8;
+ }
+ /*else*/ {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1814
+ * else:
+ * minikind = _minikind_from_string(
+ * current_entry[1][self.target_index][0]) # <<<<<<<<<<<<<<
+ * cmp_result = cmp(current_path_info[1], current_entry[0][1])
+ * if (cmp_result or minikind == c'a' or minikind == c'r'):
+ */
+ __pyx_t_4 = __Pyx_GetItemInt(__pyx_v_current_entry, 1, sizeof(long), PyInt_FromLong); if (!__pyx_t_4) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1814; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_4);
+ __pyx_t_6 = __Pyx_GetItemInt(__pyx_t_4, __pyx_v_self->target_index, sizeof(int), PyInt_FromLong); if (!__pyx_t_6) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1814; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_6);
+ __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
+ __pyx_t_4 = __Pyx_GetItemInt(__pyx_t_6, 0, sizeof(long), PyInt_FromLong); if (!__pyx_t_4) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1814; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_4);
+ __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
+ __pyx_t_8 = __pyx_f_6bzrlib_21_dirstate_helpers_pyx__minikind_from_string(__pyx_t_4); if (unlikely(__pyx_t_8 == -1 && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1813; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
+ __pyx_v_minikind = __pyx_t_8;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1815
+ * minikind = _minikind_from_string(
+ * current_entry[1][self.target_index][0])
+ * cmp_result = cmp(current_path_info[1], current_entry[0][1]) # <<<<<<<<<<<<<<
+ * if (cmp_result or minikind == c'a' or minikind == c'r'):
+ * # The current path on disk doesn't match the dirblock
+ */
+ __pyx_t_4 = __Pyx_GetItemInt(__pyx_v_current_path_info, 1, sizeof(long), PyInt_FromLong); if (!__pyx_t_4) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1815; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_4);
+ __pyx_t_6 = __Pyx_GetItemInt(__pyx_v_current_entry, 0, sizeof(long), PyInt_FromLong); if (!__pyx_t_6) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1815; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_6);
+ __pyx_t_5 = __Pyx_GetItemInt(__pyx_t_6, 1, sizeof(long), PyInt_FromLong); if (!__pyx_t_5) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1815; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_5);
+ __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
+ __pyx_t_6 = PyTuple_New(2); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1815; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_6);
+ PyTuple_SET_ITEM(__pyx_t_6, 0, __pyx_t_4);
+ __Pyx_GIVEREF(__pyx_t_4);
+ PyTuple_SET_ITEM(__pyx_t_6, 1, __pyx_t_5);
+ __Pyx_GIVEREF(__pyx_t_5);
+ __pyx_t_4 = 0;
+ __pyx_t_5 = 0;
+ __pyx_t_5 = PyObject_Call(__pyx_builtin_cmp, __pyx_t_6, NULL); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1815; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_5);
+ __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
+ __pyx_t_9 = __Pyx_PyInt_AsInt(__pyx_t_5); if (unlikely((__pyx_t_9 == (int)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1815; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
+ __pyx_v_cmp_result = __pyx_t_9;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1816
+ * current_entry[1][self.target_index][0])
+ * cmp_result = cmp(current_path_info[1], current_entry[0][1])
+ * if (cmp_result or minikind == c'a' or minikind == c'r'): # <<<<<<<<<<<<<<
+ * # The current path on disk doesn't match the dirblock
+ * # record. Either the dirblock record is marked as
+ */
+ if (!__pyx_v_cmp_result) {
+ switch (__pyx_v_minikind) {
+ case 'a':
+ case 'r':
+ __pyx_t_1 = 1;
+ break;
+ default:
+ __pyx_t_1 = 0;
+ break;
+ }
+ __pyx_t_2 = __pyx_t_1;
+ } else {
+ __pyx_t_2 = __pyx_v_cmp_result;
+ }
+ if (__pyx_t_2) {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1825
+ * # Compare the basename for these files to determine
+ * # which comes first
+ * if cmp_result < 0: # <<<<<<<<<<<<<<
+ * # extra file on disk: pass for now, but only
+ * # increment the path, not the entry
+ */
+ __pyx_t_2 = (__pyx_v_cmp_result < 0);
+ if (__pyx_t_2) {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1828
+ * # extra file on disk: pass for now, but only
+ * # increment the path, not the entry
+ * advance_entry = 0 # <<<<<<<<<<<<<<
+ * else:
+ * # entry referring to file not present on disk.
+ */
+ __pyx_v_advance_entry = 0;
+ goto __pyx_L10;
+ }
+ /*else*/ {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1833
+ * # advance the entry only, after processing.
+ * result, changed = self._process_entry(current_entry,
+ * None) # <<<<<<<<<<<<<<
+ * advance_path = 0
+ * else:
+ */
+ __pyx_t_5 = ((struct __pyx_vtabstruct_6bzrlib_21_dirstate_helpers_pyx_ProcessEntryC *)__pyx_v_self->__pyx_vtab)->_process_entry(__pyx_v_self, __pyx_v_current_entry, Py_None); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1832; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_5);
+ if (PyTuple_CheckExact(__pyx_t_5) && likely(PyTuple_GET_SIZE(__pyx_t_5) == 2)) {
+ PyObject* tuple = __pyx_t_5;
+ __pyx_t_6 = PyTuple_GET_ITEM(tuple, 0); __Pyx_INCREF(__pyx_t_6);
+ __pyx_t_4 = PyTuple_GET_ITEM(tuple, 1); __Pyx_INCREF(__pyx_t_4);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1832
+ * # entry referring to file not present on disk.
+ * # advance the entry only, after processing.
+ * result, changed = self._process_entry(current_entry, # <<<<<<<<<<<<<<
+ * None)
+ * advance_path = 0
+ */
+ __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
+ __Pyx_DECREF(__pyx_v_result);
+ __pyx_v_result = __pyx_t_6;
+ __pyx_t_6 = 0;
+ __Pyx_DECREF(__pyx_v_changed);
+ __pyx_v_changed = __pyx_t_4;
+ __pyx_t_4 = 0;
+ } else {
+ __pyx_t_7 = PyObject_GetIter(__pyx_t_5); if (unlikely(!__pyx_t_7)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1832; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_7);
+ __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
+ __pyx_t_6 = __Pyx_UnpackItem(__pyx_t_7, 0); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1832; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_6);
+ __pyx_t_4 = __Pyx_UnpackItem(__pyx_t_7, 1); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1832; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_4);
+ if (__Pyx_EndUnpack(__pyx_t_7, 2) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1832; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
+ __Pyx_DECREF(__pyx_v_result);
+ __pyx_v_result = __pyx_t_6;
+ __pyx_t_6 = 0;
+ __Pyx_DECREF(__pyx_v_changed);
+ __pyx_v_changed = __pyx_t_4;
+ __pyx_t_4 = 0;
+ }
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1834
+ * result, changed = self._process_entry(current_entry,
+ * None)
+ * advance_path = 0 # <<<<<<<<<<<<<<
+ * else:
+ * # paths are the same,and the dirstate entry is not
+ */
+ __pyx_v_advance_path = 0;
+ }
+ __pyx_L10:;
+ goto __pyx_L9;
+ }
+ /*else*/ {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1839
+ * # absent or renamed.
+ * result, changed = self._process_entry(current_entry,
+ * current_path_info) # <<<<<<<<<<<<<<
+ * if changed is not None:
+ * path_handled = -1
+ */
+ __pyx_t_5 = ((struct __pyx_vtabstruct_6bzrlib_21_dirstate_helpers_pyx_ProcessEntryC *)__pyx_v_self->__pyx_vtab)->_process_entry(__pyx_v_self, __pyx_v_current_entry, __pyx_v_current_path_info); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1838; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_5);
+ if (PyTuple_CheckExact(__pyx_t_5) && likely(PyTuple_GET_SIZE(__pyx_t_5) == 2)) {
+ PyObject* tuple = __pyx_t_5;
+ __pyx_t_4 = PyTuple_GET_ITEM(tuple, 0); __Pyx_INCREF(__pyx_t_4);
+ __pyx_t_6 = PyTuple_GET_ITEM(tuple, 1); __Pyx_INCREF(__pyx_t_6);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1838
+ * # paths are the same,and the dirstate entry is not
+ * # absent or renamed.
+ * result, changed = self._process_entry(current_entry, # <<<<<<<<<<<<<<
+ * current_path_info)
+ * if changed is not None:
+ */
+ __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
+ __Pyx_DECREF(__pyx_v_result);
+ __pyx_v_result = __pyx_t_4;
+ __pyx_t_4 = 0;
+ __Pyx_DECREF(__pyx_v_changed);
+ __pyx_v_changed = __pyx_t_6;
+ __pyx_t_6 = 0;
+ } else {
+ __pyx_t_7 = PyObject_GetIter(__pyx_t_5); if (unlikely(!__pyx_t_7)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1838; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_7);
+ __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
+ __pyx_t_4 = __Pyx_UnpackItem(__pyx_t_7, 0); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1838; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_4);
+ __pyx_t_6 = __Pyx_UnpackItem(__pyx_t_7, 1); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1838; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_6);
+ if (__Pyx_EndUnpack(__pyx_t_7, 2) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1838; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
+ __Pyx_DECREF(__pyx_v_result);
+ __pyx_v_result = __pyx_t_4;
+ __pyx_t_4 = 0;
+ __Pyx_DECREF(__pyx_v_changed);
+ __pyx_v_changed = __pyx_t_6;
+ __pyx_t_6 = 0;
+ }
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1840
+ * result, changed = self._process_entry(current_entry,
+ * current_path_info)
+ * if changed is not None: # <<<<<<<<<<<<<<
+ * path_handled = -1
+ * if not changed and not self.include_unchanged:
+ */
+ __pyx_t_2 = (__pyx_v_changed != Py_None);
+ if (__pyx_t_2) {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1841
+ * current_path_info)
+ * if changed is not None:
+ * path_handled = -1 # <<<<<<<<<<<<<<
+ * if not changed and not self.include_unchanged:
+ * changed = None
+ */
+ __pyx_v_path_handled = -1;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1842
+ * if changed is not None:
+ * path_handled = -1
+ * if not changed and not self.include_unchanged: # <<<<<<<<<<<<<<
+ * changed = None
+ * # >- loop control starts here:
+ */
+ __pyx_t_2 = __Pyx_PyObject_IsTrue(__pyx_v_changed); if (unlikely(__pyx_t_2 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1842; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_1 = (!__pyx_t_2);
+ if (__pyx_t_1) {
+ __pyx_t_2 = (!__pyx_v_self->include_unchanged);
+ __pyx_t_3 = __pyx_t_2;
+ } else {
+ __pyx_t_3 = __pyx_t_1;
+ }
+ if (__pyx_t_3) {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1843
+ * path_handled = -1
+ * if not changed and not self.include_unchanged:
+ * changed = None # <<<<<<<<<<<<<<
+ * # >- loop control starts here:
+ * # >- entry
+ */
+ __Pyx_INCREF(Py_None);
+ __Pyx_DECREF(__pyx_v_changed);
+ __pyx_v_changed = Py_None;
+ goto __pyx_L12;
+ }
+ __pyx_L12:;
+ goto __pyx_L11;
+ }
+ __pyx_L11:;
+ }
+ __pyx_L9:;
+ }
+ __pyx_L8:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1846
+ * # >- loop control starts here:
+ * # >- entry
+ * if advance_entry and current_entry is not None: # <<<<<<<<<<<<<<
+ * self.current_block_pos = self.current_block_pos + 1
+ * if self.current_block_pos < PyList_GET_SIZE(self.current_block_list):
+ */
+ if (__pyx_v_advance_entry) {
+ __pyx_t_3 = (__pyx_v_current_entry != Py_None);
+ __pyx_t_1 = __pyx_t_3;
+ } else {
+ __pyx_t_1 = __pyx_v_advance_entry;
+ }
+ if (__pyx_t_1) {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1847
+ * # >- entry
+ * if advance_entry and current_entry is not None:
+ * self.current_block_pos = self.current_block_pos + 1 # <<<<<<<<<<<<<<
+ * if self.current_block_pos < PyList_GET_SIZE(self.current_block_list):
+ * current_entry = self.current_block_list[self.current_block_pos]
+ */
+ __pyx_v_self->current_block_pos = (__pyx_v_self->current_block_pos + 1);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1848
+ * if advance_entry and current_entry is not None:
+ * self.current_block_pos = self.current_block_pos + 1
+ * if self.current_block_pos < PyList_GET_SIZE(self.current_block_list): # <<<<<<<<<<<<<<
+ * current_entry = self.current_block_list[self.current_block_pos]
+ * else:
+ */
+ __pyx_t_1 = (__pyx_v_self->current_block_pos < PyList_GET_SIZE(__pyx_v_self->current_block_list));
+ if (__pyx_t_1) {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1849
+ * self.current_block_pos = self.current_block_pos + 1
+ * if self.current_block_pos < PyList_GET_SIZE(self.current_block_list):
+ * current_entry = self.current_block_list[self.current_block_pos] # <<<<<<<<<<<<<<
+ * else:
+ * current_entry = None
+ */
+ __pyx_t_5 = __Pyx_GetItemInt(__pyx_v_self->current_block_list, __pyx_v_self->current_block_pos, sizeof(int), PyInt_FromLong); if (!__pyx_t_5) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1849; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_5);
+ __Pyx_DECREF(__pyx_v_current_entry);
+ __pyx_v_current_entry = __pyx_t_5;
+ __pyx_t_5 = 0;
+ goto __pyx_L14;
+ }
+ /*else*/ {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1851
+ * current_entry = self.current_block_list[self.current_block_pos]
+ * else:
+ * current_entry = None # <<<<<<<<<<<<<<
+ * # >- path
+ * if advance_path and current_path_info is not None:
+ */
+ __Pyx_INCREF(Py_None);
+ __Pyx_DECREF(__pyx_v_current_entry);
+ __pyx_v_current_entry = Py_None;
+ }
+ __pyx_L14:;
+ goto __pyx_L13;
+ }
+ __pyx_L13:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1853
+ * current_entry = None
+ * # >- path
+ * if advance_path and current_path_info is not None: # <<<<<<<<<<<<<<
+ * if not path_handled:
+ * # unversioned in all regards
+ */
+ if (__pyx_v_advance_path) {
+ __pyx_t_1 = (__pyx_v_current_path_info != Py_None);
+ __pyx_t_3 = __pyx_t_1;
+ } else {
+ __pyx_t_3 = __pyx_v_advance_path;
+ }
+ if (__pyx_t_3) {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1854
+ * # >- path
+ * if advance_path and current_path_info is not None:
+ * if not path_handled: # <<<<<<<<<<<<<<
+ * # unversioned in all regards
+ * if self.want_unversioned:
+ */
+ __pyx_t_3 = (!__pyx_v_path_handled);
+ if (__pyx_t_3) {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1856
+ * if not path_handled:
+ * # unversioned in all regards
+ * if self.want_unversioned: # <<<<<<<<<<<<<<
+ * new_executable = bool(
+ * stat.S_ISREG(current_path_info[3].st_mode)
+ */
+ if (__pyx_v_self->want_unversioned) {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1858
+ * if self.want_unversioned:
+ * new_executable = bool(
+ * stat.S_ISREG(current_path_info[3].st_mode) # <<<<<<<<<<<<<<
+ * and stat.S_IEXEC & current_path_info[3].st_mode)
+ * try:
+ */
+ __pyx_t_5 = __Pyx_GetName(__pyx_m, __pyx_n_s__stat); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1858; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_5);
+ __pyx_t_6 = PyObject_GetAttr(__pyx_t_5, __pyx_n_s__S_ISREG); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1858; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_6);
+ __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
+ __pyx_t_5 = __Pyx_GetItemInt(__pyx_v_current_path_info, 3, sizeof(long), PyInt_FromLong); if (!__pyx_t_5) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1858; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_5);
+ __pyx_t_4 = PyObject_GetAttr(__pyx_t_5, __pyx_n_s__st_mode); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1858; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_4);
+ __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
+ __pyx_t_5 = PyTuple_New(1); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1858; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_5);
+ PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_t_4);
+ __Pyx_GIVEREF(__pyx_t_4);
+ __pyx_t_4 = 0;
+ __pyx_t_4 = PyObject_Call(__pyx_t_6, __pyx_t_5, NULL); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1858; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_4);
+ __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
+ __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
+ __pyx_t_3 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_3 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1858; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
+ if (__pyx_t_3) {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1859
+ * new_executable = bool(
+ * stat.S_ISREG(current_path_info[3].st_mode)
+ * and stat.S_IEXEC & current_path_info[3].st_mode) # <<<<<<<<<<<<<<
+ * try:
+ * relpath_unicode = self.utf8_decode(current_path_info[0])[0]
+ */
+ __pyx_t_4 = __Pyx_GetName(__pyx_m, __pyx_n_s__stat); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1859; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_4);
+ __pyx_t_5 = PyObject_GetAttr(__pyx_t_4, __pyx_n_s__S_IEXEC); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1859; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_5);
+ __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
+ __pyx_t_4 = __Pyx_GetItemInt(__pyx_v_current_path_info, 3, sizeof(long), PyInt_FromLong); if (!__pyx_t_4) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1859; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_4);
+ __pyx_t_6 = PyObject_GetAttr(__pyx_t_4, __pyx_n_s__st_mode); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1859; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_6);
+ __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
+ __pyx_t_4 = PyNumber_And(__pyx_t_5, __pyx_t_6); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1859; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_4);
+ __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
+ __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
+ __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_1 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1859; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
+ __pyx_t_2 = __pyx_t_1;
+ } else {
+ __pyx_t_2 = __pyx_t_3;
+ }
+ __pyx_t_4 = __Pyx_PyBool_FromLong(__pyx_t_2); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1859; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_4);
+ __Pyx_DECREF(__pyx_v_new_executable);
+ __pyx_v_new_executable = __pyx_t_4;
+ __pyx_t_4 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1860
+ * stat.S_ISREG(current_path_info[3].st_mode)
+ * and stat.S_IEXEC & current_path_info[3].st_mode)
+ * try: # <<<<<<<<<<<<<<
+ * relpath_unicode = self.utf8_decode(current_path_info[0])[0]
+ * except UnicodeDecodeError, _:
+ */
+ {
+ PyObject *__pyx_save_exc_type, *__pyx_save_exc_value, *__pyx_save_exc_tb;
+ __Pyx_ExceptionSave(&__pyx_save_exc_type, &__pyx_save_exc_value, &__pyx_save_exc_tb);
+ __Pyx_XGOTREF(__pyx_save_exc_type);
+ __Pyx_XGOTREF(__pyx_save_exc_value);
+ __Pyx_XGOTREF(__pyx_save_exc_tb);
+ /*try:*/ {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1861
+ * and stat.S_IEXEC & current_path_info[3].st_mode)
+ * try:
+ * relpath_unicode = self.utf8_decode(current_path_info[0])[0] # <<<<<<<<<<<<<<
+ * except UnicodeDecodeError, _:
+ * raise errors.BadFilenameEncoding(
+ */
+ __pyx_t_4 = __Pyx_GetItemInt(__pyx_v_current_path_info, 0, sizeof(long), PyInt_FromLong); if (!__pyx_t_4) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1861; __pyx_clineno = __LINE__; goto __pyx_L18_error;}
+ __Pyx_GOTREF(__pyx_t_4);
+ __pyx_t_6 = PyTuple_New(1); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1861; __pyx_clineno = __LINE__; goto __pyx_L18_error;}
+ __Pyx_GOTREF(__pyx_t_6);
+ PyTuple_SET_ITEM(__pyx_t_6, 0, __pyx_t_4);
+ __Pyx_GIVEREF(__pyx_t_4);
+ __pyx_t_4 = 0;
+ __pyx_t_4 = PyObject_Call(__pyx_v_self->utf8_decode, __pyx_t_6, NULL); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1861; __pyx_clineno = __LINE__; goto __pyx_L18_error;}
+ __Pyx_GOTREF(__pyx_t_4);
+ __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
+ __pyx_t_6 = __Pyx_GetItemInt(__pyx_t_4, 0, sizeof(long), PyInt_FromLong); if (!__pyx_t_6) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1861; __pyx_clineno = __LINE__; goto __pyx_L18_error;}
+ __Pyx_GOTREF(__pyx_t_6);
+ __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
+ __Pyx_DECREF(__pyx_v_relpath_unicode);
+ __pyx_v_relpath_unicode = __pyx_t_6;
+ __pyx_t_6 = 0;
+ }
+ __Pyx_XDECREF(__pyx_save_exc_type); __pyx_save_exc_type = 0;
+ __Pyx_XDECREF(__pyx_save_exc_value); __pyx_save_exc_value = 0;
+ __Pyx_XDECREF(__pyx_save_exc_tb); __pyx_save_exc_tb = 0;
+ goto __pyx_L25_try_end;
+ __pyx_L18_error:;
+ __Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0;
+ __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0;
+ __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0;
+ __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1862
+ * try:
+ * relpath_unicode = self.utf8_decode(current_path_info[0])[0]
+ * except UnicodeDecodeError, _: # <<<<<<<<<<<<<<
+ * raise errors.BadFilenameEncoding(
+ * current_path_info[0], osutils._fs_enc)
+ */
+ __pyx_t_9 = PyErr_ExceptionMatches(__pyx_builtin_UnicodeDecodeError);
+ if (__pyx_t_9) {
+ __Pyx_AddTraceback("bzrlib._dirstate_helpers_pyx.ProcessEntryC._loop_one_block");
+ if (__Pyx_GetException(&__pyx_t_6, &__pyx_t_4, &__pyx_t_5) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1862; __pyx_clineno = __LINE__; goto __pyx_L20_except_error;}
+ __Pyx_GOTREF(__pyx_t_6);
+ __Pyx_GOTREF(__pyx_t_4);
+ __Pyx_GOTREF(__pyx_t_5);
+ __Pyx_INCREF(__pyx_t_4);
+ __Pyx_DECREF(__pyx_v__);
+ __pyx_v__ = __pyx_t_4;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1863
+ * relpath_unicode = self.utf8_decode(current_path_info[0])[0]
+ * except UnicodeDecodeError, _:
+ * raise errors.BadFilenameEncoding( # <<<<<<<<<<<<<<
+ * current_path_info[0], osutils._fs_enc)
+ * if changed is not None:
+ */
+ __pyx_t_7 = __Pyx_GetName(__pyx_m, __pyx_n_s__errors); if (unlikely(!__pyx_t_7)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1863; __pyx_clineno = __LINE__; goto __pyx_L20_except_error;}
+ __Pyx_GOTREF(__pyx_t_7);
+ __pyx_t_10 = PyObject_GetAttr(__pyx_t_7, __pyx_n_s__BadFilenameEncoding); if (unlikely(!__pyx_t_10)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1863; __pyx_clineno = __LINE__; goto __pyx_L20_except_error;}
+ __Pyx_GOTREF(__pyx_t_10);
+ __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1864
+ * except UnicodeDecodeError, _:
+ * raise errors.BadFilenameEncoding(
+ * current_path_info[0], osutils._fs_enc) # <<<<<<<<<<<<<<
+ * if changed is not None:
+ * raise AssertionError(
+ */
+ __pyx_t_7 = __Pyx_GetItemInt(__pyx_v_current_path_info, 0, sizeof(long), PyInt_FromLong); if (!__pyx_t_7) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1864; __pyx_clineno = __LINE__; goto __pyx_L20_except_error;}
+ __Pyx_GOTREF(__pyx_t_7);
+ __pyx_t_11 = __Pyx_GetName(__pyx_m, __pyx_n_s__osutils); if (unlikely(!__pyx_t_11)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1864; __pyx_clineno = __LINE__; goto __pyx_L20_except_error;}
+ __Pyx_GOTREF(__pyx_t_11);
+ __pyx_t_12 = PyObject_GetAttr(__pyx_t_11, __pyx_n_s___fs_enc); if (unlikely(!__pyx_t_12)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1864; __pyx_clineno = __LINE__; goto __pyx_L20_except_error;}
+ __Pyx_GOTREF(__pyx_t_12);
+ __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0;
+ __pyx_t_11 = PyTuple_New(2); if (unlikely(!__pyx_t_11)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1863; __pyx_clineno = __LINE__; goto __pyx_L20_except_error;}
+ __Pyx_GOTREF(__pyx_t_11);
+ PyTuple_SET_ITEM(__pyx_t_11, 0, __pyx_t_7);
+ __Pyx_GIVEREF(__pyx_t_7);
+ PyTuple_SET_ITEM(__pyx_t_11, 1, __pyx_t_12);
+ __Pyx_GIVEREF(__pyx_t_12);
+ __pyx_t_7 = 0;
+ __pyx_t_12 = 0;
+ __pyx_t_12 = PyObject_Call(__pyx_t_10, __pyx_t_11, NULL); if (unlikely(!__pyx_t_12)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1863; __pyx_clineno = __LINE__; goto __pyx_L20_except_error;}
+ __Pyx_GOTREF(__pyx_t_12);
+ __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;
+ __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0;
+ __Pyx_Raise(__pyx_t_12, 0, 0);
+ __Pyx_DECREF(__pyx_t_12); __pyx_t_12 = 0;
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1863; __pyx_clineno = __LINE__; goto __pyx_L20_except_error;}
+ __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
+ __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
+ __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
+ goto __pyx_L19_exception_handled;
+ }
+ __pyx_L20_except_error:;
+ __Pyx_XGIVEREF(__pyx_save_exc_type);
+ __Pyx_XGIVEREF(__pyx_save_exc_value);
+ __Pyx_XGIVEREF(__pyx_save_exc_tb);
+ __Pyx_ExceptionReset(__pyx_save_exc_type, __pyx_save_exc_value, __pyx_save_exc_tb);
+ goto __pyx_L1_error;
+ __pyx_L19_exception_handled:;
+ __Pyx_XGIVEREF(__pyx_save_exc_type);
+ __Pyx_XGIVEREF(__pyx_save_exc_value);
+ __Pyx_XGIVEREF(__pyx_save_exc_tb);
+ __Pyx_ExceptionReset(__pyx_save_exc_type, __pyx_save_exc_value, __pyx_save_exc_tb);
+ __pyx_L25_try_end:;
+ }
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1865
+ * raise errors.BadFilenameEncoding(
+ * current_path_info[0], osutils._fs_enc)
+ * if changed is not None: # <<<<<<<<<<<<<<
+ * raise AssertionError(
+ * "result is not None: %r" % result)
+ */
+ __pyx_t_2 = (__pyx_v_changed != Py_None);
+ if (__pyx_t_2) {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1867
+ * if changed is not None:
+ * raise AssertionError(
+ * "result is not None: %r" % result) # <<<<<<<<<<<<<<
+ * result = (None,
+ * (None, relpath_unicode),
+ */
+ __pyx_t_5 = PyNumber_Remainder(((PyObject *)__pyx_kp_s_44), __pyx_v_result); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1867; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(((PyObject *)__pyx_t_5));
+ __pyx_t_4 = PyTuple_New(1); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1866; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_4);
+ PyTuple_SET_ITEM(__pyx_t_4, 0, ((PyObject *)__pyx_t_5));
+ __Pyx_GIVEREF(((PyObject *)__pyx_t_5));
+ __pyx_t_5 = 0;
+ __pyx_t_5 = PyObject_Call(__pyx_builtin_AssertionError, __pyx_t_4, NULL); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1866; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_5);
+ __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
+ __Pyx_Raise(__pyx_t_5, 0, 0);
+ __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1866; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ goto __pyx_L28;
+ }
+ __pyx_L28:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1869
+ * "result is not None: %r" % result)
+ * result = (None,
+ * (None, relpath_unicode), # <<<<<<<<<<<<<<
+ * True,
+ * (False, False),
+ */
+ __pyx_t_5 = PyTuple_New(2); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1869; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_5);
+ __Pyx_INCREF(Py_None);
+ PyTuple_SET_ITEM(__pyx_t_5, 0, Py_None);
+ __Pyx_GIVEREF(Py_None);
+ __Pyx_INCREF(__pyx_v_relpath_unicode);
+ PyTuple_SET_ITEM(__pyx_t_5, 1, __pyx_v_relpath_unicode);
+ __Pyx_GIVEREF(__pyx_v_relpath_unicode);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1870
+ * result = (None,
+ * (None, relpath_unicode),
+ * True, # <<<<<<<<<<<<<<
+ * (False, False),
+ * (None, None),
+ */
+ __pyx_t_4 = __Pyx_PyBool_FromLong(1); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1870; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_4);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1871
+ * (None, relpath_unicode),
+ * True,
+ * (False, False), # <<<<<<<<<<<<<<
+ * (None, None),
+ * (None, self.utf8_decode(current_path_info[1])[0]),
+ */
+ __pyx_t_6 = __Pyx_PyBool_FromLong(0); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1871; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_6);
+ __pyx_t_12 = __Pyx_PyBool_FromLong(0); if (unlikely(!__pyx_t_12)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1871; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_12);
+ __pyx_t_11 = PyTuple_New(2); if (unlikely(!__pyx_t_11)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1871; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_11);
+ PyTuple_SET_ITEM(__pyx_t_11, 0, __pyx_t_6);
+ __Pyx_GIVEREF(__pyx_t_6);
+ PyTuple_SET_ITEM(__pyx_t_11, 1, __pyx_t_12);
+ __Pyx_GIVEREF(__pyx_t_12);
+ __pyx_t_6 = 0;
+ __pyx_t_12 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1872
+ * True,
+ * (False, False),
+ * (None, None), # <<<<<<<<<<<<<<
+ * (None, self.utf8_decode(current_path_info[1])[0]),
+ * (None, current_path_info[2]),
+ */
+ __pyx_t_12 = PyTuple_New(2); if (unlikely(!__pyx_t_12)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1872; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_12);
+ __Pyx_INCREF(Py_None);
+ PyTuple_SET_ITEM(__pyx_t_12, 0, Py_None);
+ __Pyx_GIVEREF(Py_None);
+ __Pyx_INCREF(Py_None);
+ PyTuple_SET_ITEM(__pyx_t_12, 1, Py_None);
+ __Pyx_GIVEREF(Py_None);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1873
+ * (False, False),
+ * (None, None),
+ * (None, self.utf8_decode(current_path_info[1])[0]), # <<<<<<<<<<<<<<
+ * (None, current_path_info[2]),
+ * (None, new_executable))
+ */
+ __pyx_t_6 = __Pyx_GetItemInt(__pyx_v_current_path_info, 1, sizeof(long), PyInt_FromLong); if (!__pyx_t_6) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1873; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_6);
+ __pyx_t_10 = PyTuple_New(1); if (unlikely(!__pyx_t_10)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1873; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_10);
+ PyTuple_SET_ITEM(__pyx_t_10, 0, __pyx_t_6);
+ __Pyx_GIVEREF(__pyx_t_6);
+ __pyx_t_6 = 0;
+ __pyx_t_6 = PyObject_Call(__pyx_v_self->utf8_decode, __pyx_t_10, NULL); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1873; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_6);
+ __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;
+ __pyx_t_10 = __Pyx_GetItemInt(__pyx_t_6, 0, sizeof(long), PyInt_FromLong); if (!__pyx_t_10) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1873; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_10);
+ __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
+ __pyx_t_6 = PyTuple_New(2); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1873; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_6);
+ __Pyx_INCREF(Py_None);
+ PyTuple_SET_ITEM(__pyx_t_6, 0, Py_None);
+ __Pyx_GIVEREF(Py_None);
+ PyTuple_SET_ITEM(__pyx_t_6, 1, __pyx_t_10);
+ __Pyx_GIVEREF(__pyx_t_10);
+ __pyx_t_10 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1874
+ * (None, None),
+ * (None, self.utf8_decode(current_path_info[1])[0]),
+ * (None, current_path_info[2]), # <<<<<<<<<<<<<<
+ * (None, new_executable))
+ * changed = True
+ */
+ __pyx_t_10 = __Pyx_GetItemInt(__pyx_v_current_path_info, 2, sizeof(long), PyInt_FromLong); if (!__pyx_t_10) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1874; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_10);
+ __pyx_t_7 = PyTuple_New(2); if (unlikely(!__pyx_t_7)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1874; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_7);
+ __Pyx_INCREF(Py_None);
+ PyTuple_SET_ITEM(__pyx_t_7, 0, Py_None);
+ __Pyx_GIVEREF(Py_None);
+ PyTuple_SET_ITEM(__pyx_t_7, 1, __pyx_t_10);
+ __Pyx_GIVEREF(__pyx_t_10);
+ __pyx_t_10 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1875
+ * (None, self.utf8_decode(current_path_info[1])[0]),
+ * (None, current_path_info[2]),
+ * (None, new_executable)) # <<<<<<<<<<<<<<
+ * changed = True
+ * # dont descend into this unversioned path if it is
+ */
+ __pyx_t_10 = PyTuple_New(2); if (unlikely(!__pyx_t_10)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1875; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_10);
+ __Pyx_INCREF(Py_None);
+ PyTuple_SET_ITEM(__pyx_t_10, 0, Py_None);
+ __Pyx_GIVEREF(Py_None);
+ __Pyx_INCREF(__pyx_v_new_executable);
+ PyTuple_SET_ITEM(__pyx_t_10, 1, __pyx_v_new_executable);
+ __Pyx_GIVEREF(__pyx_v_new_executable);
+ __pyx_t_13 = PyTuple_New(8); if (unlikely(!__pyx_t_13)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1868; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_13);
+ __Pyx_INCREF(Py_None);
+ PyTuple_SET_ITEM(__pyx_t_13, 0, Py_None);
+ __Pyx_GIVEREF(Py_None);
+ PyTuple_SET_ITEM(__pyx_t_13, 1, __pyx_t_5);
+ __Pyx_GIVEREF(__pyx_t_5);
+ PyTuple_SET_ITEM(__pyx_t_13, 2, __pyx_t_4);
+ __Pyx_GIVEREF(__pyx_t_4);
+ PyTuple_SET_ITEM(__pyx_t_13, 3, __pyx_t_11);
+ __Pyx_GIVEREF(__pyx_t_11);
+ PyTuple_SET_ITEM(__pyx_t_13, 4, __pyx_t_12);
+ __Pyx_GIVEREF(__pyx_t_12);
+ PyTuple_SET_ITEM(__pyx_t_13, 5, __pyx_t_6);
+ __Pyx_GIVEREF(__pyx_t_6);
+ PyTuple_SET_ITEM(__pyx_t_13, 6, __pyx_t_7);
+ __Pyx_GIVEREF(__pyx_t_7);
+ PyTuple_SET_ITEM(__pyx_t_13, 7, __pyx_t_10);
+ __Pyx_GIVEREF(__pyx_t_10);
+ __pyx_t_5 = 0;
+ __pyx_t_4 = 0;
+ __pyx_t_11 = 0;
+ __pyx_t_12 = 0;
+ __pyx_t_6 = 0;
+ __pyx_t_7 = 0;
+ __pyx_t_10 = 0;
+ __Pyx_DECREF(__pyx_v_result);
+ __pyx_v_result = __pyx_t_13;
+ __pyx_t_13 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1876
+ * (None, current_path_info[2]),
+ * (None, new_executable))
+ * changed = True # <<<<<<<<<<<<<<
+ * # dont descend into this unversioned path if it is
+ * # a dir
+ */
+ __pyx_t_13 = __Pyx_PyBool_FromLong(1); if (unlikely(!__pyx_t_13)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1876; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_13);
+ __Pyx_DECREF(__pyx_v_changed);
+ __pyx_v_changed = __pyx_t_13;
+ __pyx_t_13 = 0;
+ goto __pyx_L17;
+ }
+ __pyx_L17:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1879
+ * # dont descend into this unversioned path if it is
+ * # a dir
+ * if current_path_info[2] in ('directory'): # <<<<<<<<<<<<<<
+ * del self.current_dir_list[self.path_index]
+ * self.path_index = self.path_index - 1
+ */
+ __pyx_t_13 = __Pyx_GetItemInt(__pyx_v_current_path_info, 2, sizeof(long), PyInt_FromLong); if (!__pyx_t_13) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1879; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_13);
+ __pyx_t_2 = ((PySequence_Contains(((PyObject *)__pyx_n_s__directory), __pyx_t_13))); if (unlikely(__pyx_t_2 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1879; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_DECREF(__pyx_t_13); __pyx_t_13 = 0;
+ if (__pyx_t_2) {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1880
+ * # a dir
+ * if current_path_info[2] in ('directory'):
+ * del self.current_dir_list[self.path_index] # <<<<<<<<<<<<<<
+ * self.path_index = self.path_index - 1
+ * # dont descend the disk iterator into any tree
+ */
+ if (__Pyx_DelItemInt(__pyx_v_self->current_dir_list, __pyx_v_self->path_index, sizeof(int), PyInt_FromLong) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1880; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1881
+ * if current_path_info[2] in ('directory'):
+ * del self.current_dir_list[self.path_index]
+ * self.path_index = self.path_index - 1 # <<<<<<<<<<<<<<
+ * # dont descend the disk iterator into any tree
+ * # paths.
+ */
+ __pyx_v_self->path_index = (__pyx_v_self->path_index - 1);
+ goto __pyx_L29;
+ }
+ __pyx_L29:;
+ goto __pyx_L16;
+ }
+ __pyx_L16:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1884
+ * # dont descend the disk iterator into any tree
+ * # paths.
+ * if current_path_info[2] == 'tree-reference': # <<<<<<<<<<<<<<
+ * del self.current_dir_list[self.path_index]
+ * self.path_index = self.path_index - 1
+ */
+ __pyx_t_13 = __Pyx_GetItemInt(__pyx_v_current_path_info, 2, sizeof(long), PyInt_FromLong); if (!__pyx_t_13) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1884; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_13);
+ __pyx_t_10 = PyObject_RichCompare(__pyx_t_13, ((PyObject *)__pyx_kp_s_33), Py_EQ); if (unlikely(!__pyx_t_10)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1884; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_10);
+ __Pyx_DECREF(__pyx_t_13); __pyx_t_13 = 0;
+ __pyx_t_2 = __Pyx_PyObject_IsTrue(__pyx_t_10); if (unlikely(__pyx_t_2 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1884; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;
+ if (__pyx_t_2) {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1885
+ * # paths.
+ * if current_path_info[2] == 'tree-reference':
+ * del self.current_dir_list[self.path_index] # <<<<<<<<<<<<<<
+ * self.path_index = self.path_index - 1
+ * self.path_index = self.path_index + 1
+ */
+ if (__Pyx_DelItemInt(__pyx_v_self->current_dir_list, __pyx_v_self->path_index, sizeof(int), PyInt_FromLong) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1885; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1886
+ * if current_path_info[2] == 'tree-reference':
+ * del self.current_dir_list[self.path_index]
+ * self.path_index = self.path_index - 1 # <<<<<<<<<<<<<<
+ * self.path_index = self.path_index + 1
+ * if self.path_index < len(self.current_dir_list):
+ */
+ __pyx_v_self->path_index = (__pyx_v_self->path_index - 1);
+ goto __pyx_L30;
+ }
+ __pyx_L30:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1887
+ * del self.current_dir_list[self.path_index]
+ * self.path_index = self.path_index - 1
+ * self.path_index = self.path_index + 1 # <<<<<<<<<<<<<<
+ * if self.path_index < len(self.current_dir_list):
+ * current_path_info = self.current_dir_list[self.path_index]
+ */
+ __pyx_v_self->path_index = (__pyx_v_self->path_index + 1);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1888
+ * self.path_index = self.path_index - 1
+ * self.path_index = self.path_index + 1
+ * if self.path_index < len(self.current_dir_list): # <<<<<<<<<<<<<<
+ * current_path_info = self.current_dir_list[self.path_index]
+ * if current_path_info[2] == 'directory':
+ */
+ __pyx_t_14 = PyObject_Length(__pyx_v_self->current_dir_list); if (unlikely(__pyx_t_14 == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1888; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_2 = (__pyx_v_self->path_index < __pyx_t_14);
+ if (__pyx_t_2) {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1889
+ * self.path_index = self.path_index + 1
+ * if self.path_index < len(self.current_dir_list):
+ * current_path_info = self.current_dir_list[self.path_index] # <<<<<<<<<<<<<<
+ * if current_path_info[2] == 'directory':
+ * current_path_info = self._maybe_tree_ref(
+ */
+ __pyx_t_10 = __Pyx_GetItemInt(__pyx_v_self->current_dir_list, __pyx_v_self->path_index, sizeof(int), PyInt_FromLong); if (!__pyx_t_10) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1889; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_10);
+ __Pyx_DECREF(__pyx_v_current_path_info);
+ __pyx_v_current_path_info = __pyx_t_10;
+ __pyx_t_10 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1890
+ * if self.path_index < len(self.current_dir_list):
+ * current_path_info = self.current_dir_list[self.path_index]
+ * if current_path_info[2] == 'directory': # <<<<<<<<<<<<<<
+ * current_path_info = self._maybe_tree_ref(
+ * current_path_info)
+ */
+ __pyx_t_10 = __Pyx_GetItemInt(__pyx_v_current_path_info, 2, sizeof(long), PyInt_FromLong); if (!__pyx_t_10) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1890; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_10);
+ __pyx_t_13 = PyObject_RichCompare(__pyx_t_10, ((PyObject *)__pyx_n_s__directory), Py_EQ); if (unlikely(!__pyx_t_13)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1890; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_13);
+ __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;
+ __pyx_t_2 = __Pyx_PyObject_IsTrue(__pyx_t_13); if (unlikely(__pyx_t_2 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1890; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_DECREF(__pyx_t_13); __pyx_t_13 = 0;
+ if (__pyx_t_2) {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1892
+ * if current_path_info[2] == 'directory':
+ * current_path_info = self._maybe_tree_ref(
+ * current_path_info) # <<<<<<<<<<<<<<
+ * else:
+ * current_path_info = None
+ */
+ __pyx_t_13 = ((struct __pyx_vtabstruct_6bzrlib_21_dirstate_helpers_pyx_ProcessEntryC *)__pyx_v_self->__pyx_vtab)->_maybe_tree_ref(__pyx_v_self, __pyx_v_current_path_info); if (unlikely(!__pyx_t_13)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1891; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_13);
+ __Pyx_DECREF(__pyx_v_current_path_info);
+ __pyx_v_current_path_info = __pyx_t_13;
+ __pyx_t_13 = 0;
+ goto __pyx_L32;
+ }
+ __pyx_L32:;
+ goto __pyx_L31;
+ }
+ /*else*/ {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1894
+ * current_path_info)
+ * else:
+ * current_path_info = None # <<<<<<<<<<<<<<
+ * if changed is not None:
+ * # Found a result on this pass, yield it
+ */
+ __Pyx_INCREF(Py_None);
+ __Pyx_DECREF(__pyx_v_current_path_info);
+ __pyx_v_current_path_info = Py_None;
+ }
+ __pyx_L31:;
+ goto __pyx_L15;
+ }
+ __pyx_L15:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1895
+ * else:
+ * current_path_info = None
+ * if changed is not None: # <<<<<<<<<<<<<<
+ * # Found a result on this pass, yield it
+ * if changed:
+ */
+ __pyx_t_2 = (__pyx_v_changed != Py_None);
+ if (__pyx_t_2) {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1897
+ * if changed is not None:
+ * # Found a result on this pass, yield it
+ * if changed: # <<<<<<<<<<<<<<
+ * self._gather_result_for_consistency(result)
+ * if changed or self.include_unchanged:
+ */
+ __pyx_t_2 = __Pyx_PyObject_IsTrue(__pyx_v_changed); if (unlikely(__pyx_t_2 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1897; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ if (__pyx_t_2) {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1898
+ * # Found a result on this pass, yield it
+ * if changed:
+ * self._gather_result_for_consistency(result) # <<<<<<<<<<<<<<
+ * if changed or self.include_unchanged:
+ * return result
+ */
+ __pyx_t_9 = ((struct __pyx_vtabstruct_6bzrlib_21_dirstate_helpers_pyx_ProcessEntryC *)__pyx_v_self->__pyx_vtab)->_gather_result_for_consistency(__pyx_v_self, __pyx_v_result); if (unlikely(__pyx_t_9 == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1898; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ goto __pyx_L34;
+ }
+ __pyx_L34:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1899
+ * if changed:
+ * self._gather_result_for_consistency(result)
+ * if changed or self.include_unchanged: # <<<<<<<<<<<<<<
+ * return result
+ * if self.current_block is not None:
+ */
+ __pyx_t_2 = __Pyx_PyObject_IsTrue(__pyx_v_changed); if (unlikely(__pyx_t_2 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1899; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ if (!__pyx_t_2) {
+ __pyx_t_3 = __pyx_v_self->include_unchanged;
+ } else {
+ __pyx_t_3 = __pyx_t_2;
+ }
+ if (__pyx_t_3) {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1900
+ * self._gather_result_for_consistency(result)
+ * if changed or self.include_unchanged:
+ * return result # <<<<<<<<<<<<<<
+ * if self.current_block is not None:
+ * self.block_index = self.block_index + 1
+ */
+ __Pyx_XDECREF(__pyx_r);
+ __Pyx_INCREF(__pyx_v_result);
+ __pyx_r = __pyx_v_result;
+ goto __pyx_L0;
+ goto __pyx_L35;
+ }
+ __pyx_L35:;
+ goto __pyx_L33;
+ }
+ __pyx_L33:;
+ }
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1901
+ * if changed or self.include_unchanged:
+ * return result
+ * if self.current_block is not None: # <<<<<<<<<<<<<<
+ * self.block_index = self.block_index + 1
+ * self._update_current_block()
+ */
+ __pyx_t_3 = (__pyx_v_self->current_block != Py_None);
+ if (__pyx_t_3) {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1902
+ * return result
+ * if self.current_block is not None:
+ * self.block_index = self.block_index + 1 # <<<<<<<<<<<<<<
+ * self._update_current_block()
+ * if self.current_dir_info is not None:
+ */
+ __pyx_v_self->block_index = (__pyx_v_self->block_index + 1);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1903
+ * if self.current_block is not None:
+ * self.block_index = self.block_index + 1
+ * self._update_current_block() # <<<<<<<<<<<<<<
+ * if self.current_dir_info is not None:
+ * self.path_index = 0
+ */
+ __pyx_t_9 = ((struct __pyx_vtabstruct_6bzrlib_21_dirstate_helpers_pyx_ProcessEntryC *)__pyx_v_self->__pyx_vtab)->_update_current_block(__pyx_v_self); if (unlikely(__pyx_t_9 == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1903; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ goto __pyx_L36;
+ }
+ __pyx_L36:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1904
+ * self.block_index = self.block_index + 1
+ * self._update_current_block()
+ * if self.current_dir_info is not None: # <<<<<<<<<<<<<<
+ * self.path_index = 0
+ * self.current_dir_list = None
+ */
+ __pyx_t_3 = (__pyx_v_self->current_dir_info != Py_None);
+ if (__pyx_t_3) {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1905
+ * self._update_current_block()
+ * if self.current_dir_info is not None:
+ * self.path_index = 0 # <<<<<<<<<<<<<<
+ * self.current_dir_list = None
+ * try:
+ */
+ __pyx_v_self->path_index = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1906
+ * if self.current_dir_info is not None:
+ * self.path_index = 0
+ * self.current_dir_list = None # <<<<<<<<<<<<<<
+ * try:
+ * self.current_dir_info = self.dir_iterator.next()
+ */
+ __Pyx_INCREF(Py_None);
+ __Pyx_GIVEREF(Py_None);
+ __Pyx_GOTREF(__pyx_v_self->current_dir_list);
+ __Pyx_DECREF(__pyx_v_self->current_dir_list);
+ __pyx_v_self->current_dir_list = Py_None;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1907
+ * self.path_index = 0
+ * self.current_dir_list = None
+ * try: # <<<<<<<<<<<<<<
+ * self.current_dir_info = self.dir_iterator.next()
+ * self.current_dir_list = self.current_dir_info[1]
+ */
+ {
+ PyObject *__pyx_save_exc_type, *__pyx_save_exc_value, *__pyx_save_exc_tb;
+ __Pyx_ExceptionSave(&__pyx_save_exc_type, &__pyx_save_exc_value, &__pyx_save_exc_tb);
+ __Pyx_XGOTREF(__pyx_save_exc_type);
+ __Pyx_XGOTREF(__pyx_save_exc_value);
+ __Pyx_XGOTREF(__pyx_save_exc_tb);
+ /*try:*/ {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1908
+ * self.current_dir_list = None
+ * try:
+ * self.current_dir_info = self.dir_iterator.next() # <<<<<<<<<<<<<<
+ * self.current_dir_list = self.current_dir_info[1]
+ * except StopIteration, _:
+ */
+ __pyx_t_13 = PyObject_GetAttr(__pyx_v_self->dir_iterator, __pyx_n_s__next); if (unlikely(!__pyx_t_13)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1908; __pyx_clineno = __LINE__; goto __pyx_L38_error;}
+ __Pyx_GOTREF(__pyx_t_13);
+ __pyx_t_10 = PyObject_Call(__pyx_t_13, ((PyObject *)__pyx_empty_tuple), NULL); if (unlikely(!__pyx_t_10)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1908; __pyx_clineno = __LINE__; goto __pyx_L38_error;}
+ __Pyx_GOTREF(__pyx_t_10);
+ __Pyx_DECREF(__pyx_t_13); __pyx_t_13 = 0;
+ __Pyx_GIVEREF(__pyx_t_10);
+ __Pyx_GOTREF(__pyx_v_self->current_dir_info);
+ __Pyx_DECREF(__pyx_v_self->current_dir_info);
+ __pyx_v_self->current_dir_info = __pyx_t_10;
+ __pyx_t_10 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1909
+ * try:
+ * self.current_dir_info = self.dir_iterator.next()
+ * self.current_dir_list = self.current_dir_info[1] # <<<<<<<<<<<<<<
+ * except StopIteration, _:
+ * self.current_dir_info = None
+ */
+ __pyx_t_10 = __Pyx_GetItemInt(__pyx_v_self->current_dir_info, 1, sizeof(long), PyInt_FromLong); if (!__pyx_t_10) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1909; __pyx_clineno = __LINE__; goto __pyx_L38_error;}
+ __Pyx_GOTREF(__pyx_t_10);
+ __Pyx_GIVEREF(__pyx_t_10);
+ __Pyx_GOTREF(__pyx_v_self->current_dir_list);
+ __Pyx_DECREF(__pyx_v_self->current_dir_list);
+ __pyx_v_self->current_dir_list = __pyx_t_10;
+ __pyx_t_10 = 0;
+ }
+ __Pyx_XDECREF(__pyx_save_exc_type); __pyx_save_exc_type = 0;
+ __Pyx_XDECREF(__pyx_save_exc_value); __pyx_save_exc_value = 0;
+ __Pyx_XDECREF(__pyx_save_exc_tb); __pyx_save_exc_tb = 0;
+ goto __pyx_L45_try_end;
+ __pyx_L38_error:;
+ __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0;
+ __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0;
+ __Pyx_XDECREF(__pyx_t_11); __pyx_t_11 = 0;
+ __Pyx_XDECREF(__pyx_t_12); __pyx_t_12 = 0;
+ __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0;
+ __Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0;
+ __Pyx_XDECREF(__pyx_t_13); __pyx_t_13 = 0;
+ __Pyx_XDECREF(__pyx_t_10); __pyx_t_10 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1910
+ * self.current_dir_info = self.dir_iterator.next()
+ * self.current_dir_list = self.current_dir_info[1]
+ * except StopIteration, _: # <<<<<<<<<<<<<<
+ * self.current_dir_info = None
+ *
+ */
+ __pyx_t_9 = PyErr_ExceptionMatches(__pyx_builtin_StopIteration);
+ if (__pyx_t_9) {
+ __Pyx_AddTraceback("bzrlib._dirstate_helpers_pyx.ProcessEntryC._loop_one_block");
+ if (__Pyx_GetException(&__pyx_t_10, &__pyx_t_13, &__pyx_t_7) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1910; __pyx_clineno = __LINE__; goto __pyx_L40_except_error;}
+ __Pyx_GOTREF(__pyx_t_10);
+ __Pyx_GOTREF(__pyx_t_13);
+ __Pyx_GOTREF(__pyx_t_7);
+ __Pyx_INCREF(__pyx_t_13);
+ __Pyx_DECREF(__pyx_v__);
+ __pyx_v__ = __pyx_t_13;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1911
+ * self.current_dir_list = self.current_dir_info[1]
+ * except StopIteration, _:
+ * self.current_dir_info = None # <<<<<<<<<<<<<<
+ *
+ * cdef object _next_consistent_entries(self):
+ */
+ __Pyx_INCREF(Py_None);
+ __Pyx_GIVEREF(Py_None);
+ __Pyx_GOTREF(__pyx_v_self->current_dir_info);
+ __Pyx_DECREF(__pyx_v_self->current_dir_info);
+ __pyx_v_self->current_dir_info = Py_None;
+ __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;
+ __Pyx_DECREF(__pyx_t_13); __pyx_t_13 = 0;
+ __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
+ goto __pyx_L39_exception_handled;
+ }
+ __pyx_L40_except_error:;
+ __Pyx_XGIVEREF(__pyx_save_exc_type);
+ __Pyx_XGIVEREF(__pyx_save_exc_value);
+ __Pyx_XGIVEREF(__pyx_save_exc_tb);
+ __Pyx_ExceptionReset(__pyx_save_exc_type, __pyx_save_exc_value, __pyx_save_exc_tb);
+ goto __pyx_L1_error;
+ __pyx_L39_exception_handled:;
+ __Pyx_XGIVEREF(__pyx_save_exc_type);
+ __Pyx_XGIVEREF(__pyx_save_exc_value);
+ __Pyx_XGIVEREF(__pyx_save_exc_tb);
+ __Pyx_ExceptionReset(__pyx_save_exc_type, __pyx_save_exc_value, __pyx_save_exc_tb);
+ __pyx_L45_try_end:;
+ }
+ goto __pyx_L37;
+ }
+ __pyx_L37:;
+
+ __pyx_r = Py_None; __Pyx_INCREF(Py_None);
+ goto __pyx_L0;
+ __pyx_L1_error:;
+ __Pyx_XDECREF(__pyx_t_4);
+ __Pyx_XDECREF(__pyx_t_5);
+ __Pyx_XDECREF(__pyx_t_6);
+ __Pyx_XDECREF(__pyx_t_7);
+ __Pyx_XDECREF(__pyx_t_10);
+ __Pyx_XDECREF(__pyx_t_11);
+ __Pyx_XDECREF(__pyx_t_12);
+ __Pyx_XDECREF(__pyx_t_13);
+ __Pyx_AddTraceback("bzrlib._dirstate_helpers_pyx.ProcessEntryC._loop_one_block");
+ __pyx_r = 0;
+ __pyx_L0:;
+ __Pyx_DECREF(__pyx_v_current_entry);
+ __Pyx_DECREF(__pyx_v_current_path_info);
+ __Pyx_DECREF(__pyx_v_disk_kind);
+ __Pyx_DECREF(__pyx_v_result);
+ __Pyx_DECREF(__pyx_v_changed);
+ __Pyx_DECREF(__pyx_v_new_executable);
+ __Pyx_DECREF(__pyx_v_relpath_unicode);
+ __Pyx_DECREF(__pyx_v__);
+ __Pyx_XGIVEREF(__pyx_r);
+ __Pyx_RefNannyFinishContext();
+ return __pyx_r;
+}
+
+/* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1913
+ * self.current_dir_info = None
+ *
+ * cdef object _next_consistent_entries(self): # <<<<<<<<<<<<<<
+ * """Grabs the next specific file parent case to consider.
+ *
+ */
+
+static PyObject *__pyx_f_6bzrlib_21_dirstate_helpers_pyx_13ProcessEntryC__next_consistent_entries(struct __pyx_obj_6bzrlib_21_dirstate_helpers_pyx_ProcessEntryC *__pyx_v_self) {
+ PyObject *__pyx_v_results;
+ PyObject *__pyx_v_path_utf8;
+ PyObject *__pyx_v_path_entries;
+ PyObject *__pyx_v_selected_entries;
+ int __pyx_v_found_item;
+ PyObject *__pyx_v_candidate_entry;
+ PyObject *__pyx_v_path_info;
+ PyObject *__pyx_v_entry;
+ PyObject *__pyx_v_result;
+ PyObject *__pyx_v_changed;
+ PyObject *__pyx_v_entry_path_utf8;
+ PyObject *__pyx_v_initial_key;
+ PyObject *__pyx_v_block_index;
+ PyObject *__pyx_v__;
+ PyObject *__pyx_v_current_block;
+ PyObject *__pyx_r = NULL;
+ PyObject *__pyx_t_1 = NULL;
+ int __pyx_t_2;
+ PyObject *__pyx_t_3 = NULL;
+ PyObject *__pyx_t_4 = NULL;
+ Py_ssize_t __pyx_t_5;
+ int __pyx_t_6;
+ int __pyx_t_7;
+ int __pyx_t_8;
+ PyObject *__pyx_t_9 = NULL;
+ PyObject *__pyx_t_10 = NULL;
+ Py_ssize_t __pyx_t_11;
+ __Pyx_RefNannySetupContext("_next_consistent_entries");
+ __pyx_v_results = ((PyObject *)Py_None); __Pyx_INCREF(Py_None);
+ __pyx_v_path_utf8 = Py_None; __Pyx_INCREF(Py_None);
+ __pyx_v_path_entries = Py_None; __Pyx_INCREF(Py_None);
+ __pyx_v_selected_entries = ((PyObject *)Py_None); __Pyx_INCREF(Py_None);
+ __pyx_v_candidate_entry = Py_None; __Pyx_INCREF(Py_None);
+ __pyx_v_path_info = Py_None; __Pyx_INCREF(Py_None);
+ __pyx_v_entry = Py_None; __Pyx_INCREF(Py_None);
+ __pyx_v_result = Py_None; __Pyx_INCREF(Py_None);
+ __pyx_v_changed = Py_None; __Pyx_INCREF(Py_None);
+ __pyx_v_entry_path_utf8 = Py_None; __Pyx_INCREF(Py_None);
+ __pyx_v_initial_key = ((PyObject *)Py_None); __Pyx_INCREF(Py_None);
+ __pyx_v_block_index = Py_None; __Pyx_INCREF(Py_None);
+ __pyx_v__ = Py_None; __Pyx_INCREF(Py_None);
+ __pyx_v_current_block = Py_None; __Pyx_INCREF(Py_None);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1918
+ * :return: A list of the results, each of which is as for _process_entry.
+ * """
+ * results = [] # <<<<<<<<<<<<<<
+ * while self.search_specific_file_parents:
+ * # Process the parent directories for the paths we were iterating.
+ */
+ __pyx_t_1 = PyList_New(0); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1918; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(((PyObject *)__pyx_t_1));
+ __Pyx_DECREF(((PyObject *)__pyx_v_results));
+ __pyx_v_results = __pyx_t_1;
+ __pyx_t_1 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1919
+ * """
+ * results = []
+ * while self.search_specific_file_parents: # <<<<<<<<<<<<<<
+ * # Process the parent directories for the paths we were iterating.
+ * # Even in extremely large trees this should be modest, so currently
+ */
+ while (1) {
+ __pyx_t_2 = __Pyx_PyObject_IsTrue(__pyx_v_self->search_specific_file_parents); if (unlikely(__pyx_t_2 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1919; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ if (!__pyx_t_2) break;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1923
+ * # Even in extremely large trees this should be modest, so currently
+ * # no attempt is made to optimise.
+ * path_utf8 = self.search_specific_file_parents.pop() # <<<<<<<<<<<<<<
+ * if path_utf8 in self.searched_exact_paths:
+ * # We've examined this path.
+ */
+ __pyx_t_1 = __Pyx_PyObject_Pop(__pyx_v_self->search_specific_file_parents); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1923; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_1);
+ __Pyx_DECREF(__pyx_v_path_utf8);
+ __pyx_v_path_utf8 = __pyx_t_1;
+ __pyx_t_1 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1924
+ * # no attempt is made to optimise.
+ * path_utf8 = self.search_specific_file_parents.pop()
+ * if path_utf8 in self.searched_exact_paths: # <<<<<<<<<<<<<<
+ * # We've examined this path.
+ * continue
+ */
+ __pyx_t_2 = ((PySequence_Contains(__pyx_v_self->searched_exact_paths, __pyx_v_path_utf8))); if (unlikely(__pyx_t_2 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1924; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ if (__pyx_t_2) {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1926
+ * if path_utf8 in self.searched_exact_paths:
+ * # We've examined this path.
+ * continue # <<<<<<<<<<<<<<
+ * if osutils.is_inside_any(self.searched_specific_files, path_utf8):
+ * # We've examined this path.
+ */
+ goto __pyx_L3_continue;
+ goto __pyx_L5;
+ }
+ __pyx_L5:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1927
+ * # We've examined this path.
+ * continue
+ * if osutils.is_inside_any(self.searched_specific_files, path_utf8): # <<<<<<<<<<<<<<
+ * # We've examined this path.
+ * continue
+ */
+ __pyx_t_1 = __Pyx_GetName(__pyx_m, __pyx_n_s__osutils); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1927; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_1);
+ __pyx_t_3 = PyObject_GetAttr(__pyx_t_1, __pyx_n_s__is_inside_any); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1927; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_3);
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+ __pyx_t_1 = PyTuple_New(2); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1927; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_1);
+ __Pyx_INCREF(__pyx_v_self->searched_specific_files);
+ PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_v_self->searched_specific_files);
+ __Pyx_GIVEREF(__pyx_v_self->searched_specific_files);
+ __Pyx_INCREF(__pyx_v_path_utf8);
+ PyTuple_SET_ITEM(__pyx_t_1, 1, __pyx_v_path_utf8);
+ __Pyx_GIVEREF(__pyx_v_path_utf8);
+ __pyx_t_4 = PyObject_Call(__pyx_t_3, __pyx_t_1, NULL); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1927; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_4);
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+ __pyx_t_2 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_2 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1927; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
+ if (__pyx_t_2) {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1929
+ * if osutils.is_inside_any(self.searched_specific_files, path_utf8):
+ * # We've examined this path.
+ * continue # <<<<<<<<<<<<<<
+ * path_entries = self.state._entries_for_path(path_utf8)
+ * # We need either one or two entries. If the path in
+ */
+ goto __pyx_L3_continue;
+ goto __pyx_L6;
+ }
+ __pyx_L6:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1930
+ * # We've examined this path.
+ * continue
+ * path_entries = self.state._entries_for_path(path_utf8) # <<<<<<<<<<<<<<
+ * # We need either one or two entries. If the path in
+ * # self.target_index has moved (so the entry in source_index is in
+ */
+ __pyx_t_4 = PyObject_GetAttr(__pyx_v_self->state, __pyx_n_s___entries_for_path); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1930; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_4);
+ __pyx_t_1 = PyTuple_New(1); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1930; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_1);
+ __Pyx_INCREF(__pyx_v_path_utf8);
+ PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_v_path_utf8);
+ __Pyx_GIVEREF(__pyx_v_path_utf8);
+ __pyx_t_3 = PyObject_Call(__pyx_t_4, __pyx_t_1, NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1930; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_3);
+ __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+ __Pyx_DECREF(__pyx_v_path_entries);
+ __pyx_v_path_entries = __pyx_t_3;
+ __pyx_t_3 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1935
+ * # 'ar') then we need to also look for the entry for this path in
+ * # self.source_index, to output the appropriate delete-or-rename.
+ * selected_entries = [] # <<<<<<<<<<<<<<
+ * found_item = False
+ * for candidate_entry in path_entries:
+ */
+ __pyx_t_3 = PyList_New(0); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1935; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(((PyObject *)__pyx_t_3));
+ __Pyx_DECREF(((PyObject *)__pyx_v_selected_entries));
+ __pyx_v_selected_entries = __pyx_t_3;
+ __pyx_t_3 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1936
+ * # self.source_index, to output the appropriate delete-or-rename.
+ * selected_entries = []
+ * found_item = False # <<<<<<<<<<<<<<
+ * for candidate_entry in path_entries:
+ * # Find entries present in target at this path:
+ */
+ __pyx_v_found_item = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1937
+ * selected_entries = []
+ * found_item = False
+ * for candidate_entry in path_entries: # <<<<<<<<<<<<<<
+ * # Find entries present in target at this path:
+ * if candidate_entry[1][self.target_index][0] not in 'ar':
+ */
+ if (PyList_CheckExact(__pyx_v_path_entries) || PyTuple_CheckExact(__pyx_v_path_entries)) {
+ __pyx_t_5 = 0; __pyx_t_3 = __pyx_v_path_entries; __Pyx_INCREF(__pyx_t_3);
+ } else {
+ __pyx_t_5 = -1; __pyx_t_3 = PyObject_GetIter(__pyx_v_path_entries); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1937; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_3);
+ }
+ for (;;) {
+ if (likely(PyList_CheckExact(__pyx_t_3))) {
+ if (__pyx_t_5 >= PyList_GET_SIZE(__pyx_t_3)) break;
+ __pyx_t_1 = PyList_GET_ITEM(__pyx_t_3, __pyx_t_5); __Pyx_INCREF(__pyx_t_1); __pyx_t_5++;
+ } else if (likely(PyTuple_CheckExact(__pyx_t_3))) {
+ if (__pyx_t_5 >= PyTuple_GET_SIZE(__pyx_t_3)) break;
+ __pyx_t_1 = PyTuple_GET_ITEM(__pyx_t_3, __pyx_t_5); __Pyx_INCREF(__pyx_t_1); __pyx_t_5++;
+ } else {
+ __pyx_t_1 = PyIter_Next(__pyx_t_3);
+ if (!__pyx_t_1) {
+ if (unlikely(PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1937; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ break;
+ }
+ __Pyx_GOTREF(__pyx_t_1);
+ }
+ __Pyx_DECREF(__pyx_v_candidate_entry);
+ __pyx_v_candidate_entry = __pyx_t_1;
+ __pyx_t_1 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1939
+ * for candidate_entry in path_entries:
+ * # Find entries present in target at this path:
+ * if candidate_entry[1][self.target_index][0] not in 'ar': # <<<<<<<<<<<<<<
+ * found_item = True
+ * selected_entries.append(candidate_entry)
+ */
+ __pyx_t_1 = __Pyx_GetItemInt(__pyx_v_candidate_entry, 1, sizeof(long), PyInt_FromLong); if (!__pyx_t_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1939; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_1);
+ __pyx_t_4 = __Pyx_GetItemInt(__pyx_t_1, __pyx_v_self->target_index, sizeof(int), PyInt_FromLong); if (!__pyx_t_4) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1939; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_4);
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+ __pyx_t_1 = __Pyx_GetItemInt(__pyx_t_4, 0, sizeof(long), PyInt_FromLong); if (!__pyx_t_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1939; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_1);
+ __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
+ __pyx_t_2 = (__Pyx_NegateNonNeg(PySequence_Contains(((PyObject *)__pyx_n_s__ar), __pyx_t_1))); if (unlikely(__pyx_t_2 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1939; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+ if (__pyx_t_2) {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1940
+ * # Find entries present in target at this path:
+ * if candidate_entry[1][self.target_index][0] not in 'ar':
+ * found_item = True # <<<<<<<<<<<<<<
+ * selected_entries.append(candidate_entry)
+ * # Find entries present in source at this path:
+ */
+ __pyx_v_found_item = 1;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1941
+ * if candidate_entry[1][self.target_index][0] not in 'ar':
+ * found_item = True
+ * selected_entries.append(candidate_entry) # <<<<<<<<<<<<<<
+ * # Find entries present in source at this path:
+ * elif (self.source_index is not None and
+ */
+ if (unlikely(__pyx_v_selected_entries == Py_None)) {
+ PyErr_SetString(PyExc_AttributeError, "'NoneType' object has no attribute 'append'"); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1941; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ }
+ __pyx_t_6 = PyList_Append(((PyObject *)__pyx_v_selected_entries), __pyx_v_candidate_entry); if (unlikely(__pyx_t_6 == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1941; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ goto __pyx_L9;
+ }
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1943
+ * selected_entries.append(candidate_entry)
+ * # Find entries present in source at this path:
+ * elif (self.source_index is not None and # <<<<<<<<<<<<<<
+ * candidate_entry[1][self.source_index][0] not in 'ar'):
+ * found_item = True
+ */
+ __pyx_t_1 = PyInt_FromLong(__pyx_v_self->source_index); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1943; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_1);
+ __pyx_t_2 = (__pyx_t_1 != Py_None);
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+ if (__pyx_t_2) {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1944
+ * # Find entries present in source at this path:
+ * elif (self.source_index is not None and
+ * candidate_entry[1][self.source_index][0] not in 'ar'): # <<<<<<<<<<<<<<
+ * found_item = True
+ * if candidate_entry[1][self.target_index][0] == 'a':
+ */
+ __pyx_t_1 = __Pyx_GetItemInt(__pyx_v_candidate_entry, 1, sizeof(long), PyInt_FromLong); if (!__pyx_t_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1944; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_1);
+ __pyx_t_4 = __Pyx_GetItemInt(__pyx_t_1, __pyx_v_self->source_index, sizeof(int), PyInt_FromLong); if (!__pyx_t_4) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1944; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_4);
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+ __pyx_t_1 = __Pyx_GetItemInt(__pyx_t_4, 0, sizeof(long), PyInt_FromLong); if (!__pyx_t_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1944; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_1);
+ __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
+ __pyx_t_7 = (__Pyx_NegateNonNeg(PySequence_Contains(((PyObject *)__pyx_n_s__ar), __pyx_t_1))); if (unlikely(__pyx_t_7 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1944; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+ __pyx_t_8 = __pyx_t_7;
+ } else {
+ __pyx_t_8 = __pyx_t_2;
+ }
+ if (__pyx_t_8) {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1945
+ * elif (self.source_index is not None and
+ * candidate_entry[1][self.source_index][0] not in 'ar'):
+ * found_item = True # <<<<<<<<<<<<<<
+ * if candidate_entry[1][self.target_index][0] == 'a':
+ * # Deleted, emit it here.
+ */
+ __pyx_v_found_item = 1;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1946
+ * candidate_entry[1][self.source_index][0] not in 'ar'):
+ * found_item = True
+ * if candidate_entry[1][self.target_index][0] == 'a': # <<<<<<<<<<<<<<
+ * # Deleted, emit it here.
+ * selected_entries.append(candidate_entry)
+ */
+ __pyx_t_1 = __Pyx_GetItemInt(__pyx_v_candidate_entry, 1, sizeof(long), PyInt_FromLong); if (!__pyx_t_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1946; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_1);
+ __pyx_t_4 = __Pyx_GetItemInt(__pyx_t_1, __pyx_v_self->target_index, sizeof(int), PyInt_FromLong); if (!__pyx_t_4) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1946; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_4);
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+ __pyx_t_1 = __Pyx_GetItemInt(__pyx_t_4, 0, sizeof(long), PyInt_FromLong); if (!__pyx_t_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1946; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_1);
+ __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
+ __pyx_t_4 = PyObject_RichCompare(__pyx_t_1, ((PyObject *)__pyx_n_s__a), Py_EQ); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1946; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_4);
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+ __pyx_t_8 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_8 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1946; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
+ if (__pyx_t_8) {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1948
+ * if candidate_entry[1][self.target_index][0] == 'a':
+ * # Deleted, emit it here.
+ * selected_entries.append(candidate_entry) # <<<<<<<<<<<<<<
+ * else:
+ * # renamed, emit it when we process the directory it
+ */
+ if (unlikely(__pyx_v_selected_entries == Py_None)) {
+ PyErr_SetString(PyExc_AttributeError, "'NoneType' object has no attribute 'append'"); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1948; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ }
+ __pyx_t_6 = PyList_Append(((PyObject *)__pyx_v_selected_entries), __pyx_v_candidate_entry); if (unlikely(__pyx_t_6 == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1948; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ goto __pyx_L10;
+ }
+ /*else*/ {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1952
+ * # renamed, emit it when we process the directory it
+ * # ended up at.
+ * self.search_specific_file_parents.add( # <<<<<<<<<<<<<<
+ * candidate_entry[1][self.target_index][1])
+ * if not found_item:
+ */
+ __pyx_t_4 = PyObject_GetAttr(__pyx_v_self->search_specific_file_parents, __pyx_n_s__add); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1952; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_4);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1953
+ * # ended up at.
+ * self.search_specific_file_parents.add(
+ * candidate_entry[1][self.target_index][1]) # <<<<<<<<<<<<<<
+ * if not found_item:
+ * raise AssertionError(
+ */
+ __pyx_t_1 = __Pyx_GetItemInt(__pyx_v_candidate_entry, 1, sizeof(long), PyInt_FromLong); if (!__pyx_t_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1953; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_1);
+ __pyx_t_9 = __Pyx_GetItemInt(__pyx_t_1, __pyx_v_self->target_index, sizeof(int), PyInt_FromLong); if (!__pyx_t_9) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1953; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_9);
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+ __pyx_t_1 = __Pyx_GetItemInt(__pyx_t_9, 1, sizeof(long), PyInt_FromLong); if (!__pyx_t_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1953; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_1);
+ __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
+ __pyx_t_9 = PyTuple_New(1); if (unlikely(!__pyx_t_9)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1952; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_9);
+ PyTuple_SET_ITEM(__pyx_t_9, 0, __pyx_t_1);
+ __Pyx_GIVEREF(__pyx_t_1);
+ __pyx_t_1 = 0;
+ __pyx_t_1 = PyObject_Call(__pyx_t_4, __pyx_t_9, NULL); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1952; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_1);
+ __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
+ __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+ }
+ __pyx_L10:;
+ goto __pyx_L9;
+ }
+ __pyx_L9:;
+ }
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1954
+ * self.search_specific_file_parents.add(
+ * candidate_entry[1][self.target_index][1])
+ * if not found_item: # <<<<<<<<<<<<<<
+ * raise AssertionError(
+ * "Missing entry for specific path parent %r, %r" % (
+ */
+ __pyx_t_8 = (!__pyx_v_found_item);
+ if (__pyx_t_8) {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1957
+ * raise AssertionError(
+ * "Missing entry for specific path parent %r, %r" % (
+ * path_utf8, path_entries)) # <<<<<<<<<<<<<<
+ * path_info = self._path_info(path_utf8, path_utf8.decode('utf8'))
+ * for entry in selected_entries:
+ */
+ __pyx_t_3 = PyTuple_New(2); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1957; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_3);
+ __Pyx_INCREF(__pyx_v_path_utf8);
+ PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_v_path_utf8);
+ __Pyx_GIVEREF(__pyx_v_path_utf8);
+ __Pyx_INCREF(__pyx_v_path_entries);
+ PyTuple_SET_ITEM(__pyx_t_3, 1, __pyx_v_path_entries);
+ __Pyx_GIVEREF(__pyx_v_path_entries);
+ __pyx_t_1 = PyNumber_Remainder(((PyObject *)__pyx_kp_s_45), __pyx_t_3); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1956; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(((PyObject *)__pyx_t_1));
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+ __pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1955; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_3);
+ PyTuple_SET_ITEM(__pyx_t_3, 0, ((PyObject *)__pyx_t_1));
+ __Pyx_GIVEREF(((PyObject *)__pyx_t_1));
+ __pyx_t_1 = 0;
+ __pyx_t_1 = PyObject_Call(__pyx_builtin_AssertionError, __pyx_t_3, NULL); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1955; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_1);
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+ __Pyx_Raise(__pyx_t_1, 0, 0);
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1955; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ goto __pyx_L11;
+ }
+ __pyx_L11:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1958
+ * "Missing entry for specific path parent %r, %r" % (
+ * path_utf8, path_entries))
+ * path_info = self._path_info(path_utf8, path_utf8.decode('utf8')) # <<<<<<<<<<<<<<
+ * for entry in selected_entries:
+ * if entry[0][2] in self.seen_ids:
+ */
+ __pyx_t_1 = PyObject_GetAttr(__pyx_v_path_utf8, __pyx_n_s__decode); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1958; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_1);
+ __pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1958; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_3);
+ __Pyx_INCREF(((PyObject *)__pyx_n_s__utf8));
+ PyTuple_SET_ITEM(__pyx_t_3, 0, ((PyObject *)__pyx_n_s__utf8));
+ __Pyx_GIVEREF(((PyObject *)__pyx_n_s__utf8));
+ __pyx_t_9 = PyObject_Call(__pyx_t_1, __pyx_t_3, NULL); if (unlikely(!__pyx_t_9)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1958; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_9);
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+ __pyx_t_3 = ((struct __pyx_vtabstruct_6bzrlib_21_dirstate_helpers_pyx_ProcessEntryC *)__pyx_v_self->__pyx_vtab)->_path_info(__pyx_v_self, __pyx_v_path_utf8, __pyx_t_9); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1958; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_3);
+ __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
+ __Pyx_DECREF(__pyx_v_path_info);
+ __pyx_v_path_info = __pyx_t_3;
+ __pyx_t_3 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1959
+ * path_utf8, path_entries))
+ * path_info = self._path_info(path_utf8, path_utf8.decode('utf8'))
+ * for entry in selected_entries: # <<<<<<<<<<<<<<
+ * if entry[0][2] in self.seen_ids:
+ * continue
+ */
+ if (likely(((PyObject *)__pyx_v_selected_entries) != Py_None)) {
+ __pyx_t_5 = 0; __pyx_t_3 = ((PyObject *)__pyx_v_selected_entries); __Pyx_INCREF(__pyx_t_3);
+ } else {
+ PyErr_SetString(PyExc_TypeError, "'NoneType' object is not iterable"); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1959; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ }
+ for (;;) {
+ if (__pyx_t_5 >= PyList_GET_SIZE(__pyx_t_3)) break;
+ __pyx_t_9 = PyList_GET_ITEM(__pyx_t_3, __pyx_t_5); __Pyx_INCREF(__pyx_t_9); __pyx_t_5++;
+ __Pyx_DECREF(__pyx_v_entry);
+ __pyx_v_entry = __pyx_t_9;
+ __pyx_t_9 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1960
+ * path_info = self._path_info(path_utf8, path_utf8.decode('utf8'))
+ * for entry in selected_entries:
+ * if entry[0][2] in self.seen_ids: # <<<<<<<<<<<<<<
+ * continue
+ * result, changed = self._process_entry(entry, path_info)
+ */
+ __pyx_t_9 = __Pyx_GetItemInt(__pyx_v_entry, 0, sizeof(long), PyInt_FromLong); if (!__pyx_t_9) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1960; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_9);
+ __pyx_t_1 = __Pyx_GetItemInt(__pyx_t_9, 2, sizeof(long), PyInt_FromLong); if (!__pyx_t_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1960; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_1);
+ __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
+ __pyx_t_8 = ((PySequence_Contains(__pyx_v_self->seen_ids, __pyx_t_1))); if (unlikely(__pyx_t_8 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1960; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+ if (__pyx_t_8) {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1961
+ * for entry in selected_entries:
+ * if entry[0][2] in self.seen_ids:
+ * continue # <<<<<<<<<<<<<<
+ * result, changed = self._process_entry(entry, path_info)
+ * if changed is None:
+ */
+ goto __pyx_L12_continue;
+ goto __pyx_L14;
+ }
+ __pyx_L14:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1962
+ * if entry[0][2] in self.seen_ids:
+ * continue
+ * result, changed = self._process_entry(entry, path_info) # <<<<<<<<<<<<<<
+ * if changed is None:
+ * raise AssertionError(
+ */
+ __pyx_t_1 = ((struct __pyx_vtabstruct_6bzrlib_21_dirstate_helpers_pyx_ProcessEntryC *)__pyx_v_self->__pyx_vtab)->_process_entry(__pyx_v_self, __pyx_v_entry, __pyx_v_path_info); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1962; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_1);
+ if (PyTuple_CheckExact(__pyx_t_1) && likely(PyTuple_GET_SIZE(__pyx_t_1) == 2)) {
+ PyObject* tuple = __pyx_t_1;
+ __pyx_t_9 = PyTuple_GET_ITEM(tuple, 0); __Pyx_INCREF(__pyx_t_9);
+ __pyx_t_4 = PyTuple_GET_ITEM(tuple, 1); __Pyx_INCREF(__pyx_t_4);
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+ __Pyx_DECREF(__pyx_v_result);
+ __pyx_v_result = __pyx_t_9;
+ __pyx_t_9 = 0;
+ __Pyx_DECREF(__pyx_v_changed);
+ __pyx_v_changed = __pyx_t_4;
+ __pyx_t_4 = 0;
+ } else {
+ __pyx_t_10 = PyObject_GetIter(__pyx_t_1); if (unlikely(!__pyx_t_10)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1962; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_10);
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+ __pyx_t_9 = __Pyx_UnpackItem(__pyx_t_10, 0); if (unlikely(!__pyx_t_9)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1962; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_9);
+ __pyx_t_4 = __Pyx_UnpackItem(__pyx_t_10, 1); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1962; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_4);
+ if (__Pyx_EndUnpack(__pyx_t_10, 2) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1962; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;
+ __Pyx_DECREF(__pyx_v_result);
+ __pyx_v_result = __pyx_t_9;
+ __pyx_t_9 = 0;
+ __Pyx_DECREF(__pyx_v_changed);
+ __pyx_v_changed = __pyx_t_4;
+ __pyx_t_4 = 0;
+ }
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1963
+ * continue
+ * result, changed = self._process_entry(entry, path_info)
+ * if changed is None: # <<<<<<<<<<<<<<
+ * raise AssertionError(
+ * "Got entry<->path mismatch for specific path "
+ */
+ __pyx_t_8 = (__pyx_v_changed == Py_None);
+ if (__pyx_t_8) {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1967
+ * "Got entry<->path mismatch for specific path "
+ * "%r entry %r path_info %r " % (
+ * path_utf8, entry, path_info)) # <<<<<<<<<<<<<<
+ * # Only include changes - we're outside the users requested
+ * # expansion.
+ */
+ __pyx_t_1 = PyTuple_New(3); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1967; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_1);
+ __Pyx_INCREF(__pyx_v_path_utf8);
+ PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_v_path_utf8);
+ __Pyx_GIVEREF(__pyx_v_path_utf8);
+ __Pyx_INCREF(__pyx_v_entry);
+ PyTuple_SET_ITEM(__pyx_t_1, 1, __pyx_v_entry);
+ __Pyx_GIVEREF(__pyx_v_entry);
+ __Pyx_INCREF(__pyx_v_path_info);
+ PyTuple_SET_ITEM(__pyx_t_1, 2, __pyx_v_path_info);
+ __Pyx_GIVEREF(__pyx_v_path_info);
+ __pyx_t_4 = PyNumber_Remainder(((PyObject *)__pyx_kp_s_46), __pyx_t_1); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1966; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(((PyObject *)__pyx_t_4));
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+ __pyx_t_1 = PyTuple_New(1); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1964; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_1);
+ PyTuple_SET_ITEM(__pyx_t_1, 0, ((PyObject *)__pyx_t_4));
+ __Pyx_GIVEREF(((PyObject *)__pyx_t_4));
+ __pyx_t_4 = 0;
+ __pyx_t_4 = PyObject_Call(__pyx_builtin_AssertionError, __pyx_t_1, NULL); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1964; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_4);
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+ __Pyx_Raise(__pyx_t_4, 0, 0);
+ __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1964; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ goto __pyx_L15;
+ }
+ __pyx_L15:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1970
+ * # Only include changes - we're outside the users requested
+ * # expansion.
+ * if changed: # <<<<<<<<<<<<<<
+ * self._gather_result_for_consistency(result)
+ * if (result[6][0] == 'directory' and
+ */
+ __pyx_t_8 = __Pyx_PyObject_IsTrue(__pyx_v_changed); if (unlikely(__pyx_t_8 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1970; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ if (__pyx_t_8) {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1971
+ * # expansion.
+ * if changed:
+ * self._gather_result_for_consistency(result) # <<<<<<<<<<<<<<
+ * if (result[6][0] == 'directory' and
+ * result[6][1] != 'directory'):
+ */
+ __pyx_t_6 = ((struct __pyx_vtabstruct_6bzrlib_21_dirstate_helpers_pyx_ProcessEntryC *)__pyx_v_self->__pyx_vtab)->_gather_result_for_consistency(__pyx_v_self, __pyx_v_result); if (unlikely(__pyx_t_6 == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1971; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1972
+ * if changed:
+ * self._gather_result_for_consistency(result)
+ * if (result[6][0] == 'directory' and # <<<<<<<<<<<<<<
+ * result[6][1] != 'directory'):
+ * # This stopped being a directory, the old children have
+ */
+ __pyx_t_4 = __Pyx_GetItemInt(__pyx_v_result, 6, sizeof(long), PyInt_FromLong); if (!__pyx_t_4) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1972; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_4);
+ __pyx_t_1 = __Pyx_GetItemInt(__pyx_t_4, 0, sizeof(long), PyInt_FromLong); if (!__pyx_t_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1972; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_1);
+ __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
+ __pyx_t_4 = PyObject_RichCompare(__pyx_t_1, ((PyObject *)__pyx_n_s__directory), Py_EQ); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1972; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_4);
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+ __pyx_t_8 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_8 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1972; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
+ if (__pyx_t_8) {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1973
+ * self._gather_result_for_consistency(result)
+ * if (result[6][0] == 'directory' and
+ * result[6][1] != 'directory'): # <<<<<<<<<<<<<<
+ * # This stopped being a directory, the old children have
+ * # to be included.
+ */
+ __pyx_t_4 = __Pyx_GetItemInt(__pyx_v_result, 6, sizeof(long), PyInt_FromLong); if (!__pyx_t_4) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1973; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_4);
+ __pyx_t_1 = __Pyx_GetItemInt(__pyx_t_4, 1, sizeof(long), PyInt_FromLong); if (!__pyx_t_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1973; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_1);
+ __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
+ __pyx_t_4 = PyObject_RichCompare(__pyx_t_1, ((PyObject *)__pyx_n_s__directory), Py_NE); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1973; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_4);
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+ __pyx_t_2 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_2 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1973; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
+ __pyx_t_7 = __pyx_t_2;
+ } else {
+ __pyx_t_7 = __pyx_t_8;
+ }
+ if (__pyx_t_7) {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1976
+ * # This stopped being a directory, the old children have
+ * # to be included.
+ * if entry[1][self.source_index][0] == 'r': # <<<<<<<<<<<<<<
+ * # renamed, take the source path
+ * entry_path_utf8 = entry[1][self.source_index][1]
+ */
+ __pyx_t_4 = __Pyx_GetItemInt(__pyx_v_entry, 1, sizeof(long), PyInt_FromLong); if (!__pyx_t_4) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1976; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_4);
+ __pyx_t_1 = __Pyx_GetItemInt(__pyx_t_4, __pyx_v_self->source_index, sizeof(int), PyInt_FromLong); if (!__pyx_t_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1976; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_1);
+ __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
+ __pyx_t_4 = __Pyx_GetItemInt(__pyx_t_1, 0, sizeof(long), PyInt_FromLong); if (!__pyx_t_4) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1976; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_4);
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+ __pyx_t_1 = PyObject_RichCompare(__pyx_t_4, ((PyObject *)__pyx_n_s__r), Py_EQ); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1976; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_1);
+ __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
+ __pyx_t_7 = __Pyx_PyObject_IsTrue(__pyx_t_1); if (unlikely(__pyx_t_7 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1976; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+ if (__pyx_t_7) {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1978
+ * if entry[1][self.source_index][0] == 'r':
+ * # renamed, take the source path
+ * entry_path_utf8 = entry[1][self.source_index][1] # <<<<<<<<<<<<<<
+ * else:
+ * entry_path_utf8 = path_utf8
+ */
+ __pyx_t_1 = __Pyx_GetItemInt(__pyx_v_entry, 1, sizeof(long), PyInt_FromLong); if (!__pyx_t_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1978; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_1);
+ __pyx_t_4 = __Pyx_GetItemInt(__pyx_t_1, __pyx_v_self->source_index, sizeof(int), PyInt_FromLong); if (!__pyx_t_4) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1978; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_4);
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+ __pyx_t_1 = __Pyx_GetItemInt(__pyx_t_4, 1, sizeof(long), PyInt_FromLong); if (!__pyx_t_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1978; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_1);
+ __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
+ __Pyx_DECREF(__pyx_v_entry_path_utf8);
+ __pyx_v_entry_path_utf8 = __pyx_t_1;
+ __pyx_t_1 = 0;
+ goto __pyx_L18;
+ }
+ /*else*/ {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1980
+ * entry_path_utf8 = entry[1][self.source_index][1]
+ * else:
+ * entry_path_utf8 = path_utf8 # <<<<<<<<<<<<<<
+ * initial_key = (entry_path_utf8, '', '')
+ * block_index, _ = self.state._find_block_index_from_key(
+ */
+ __Pyx_INCREF(__pyx_v_path_utf8);
+ __Pyx_DECREF(__pyx_v_entry_path_utf8);
+ __pyx_v_entry_path_utf8 = __pyx_v_path_utf8;
+ }
+ __pyx_L18:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1981
+ * else:
+ * entry_path_utf8 = path_utf8
+ * initial_key = (entry_path_utf8, '', '') # <<<<<<<<<<<<<<
+ * block_index, _ = self.state._find_block_index_from_key(
+ * initial_key)
+ */
+ __pyx_t_1 = PyTuple_New(3); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1981; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_1);
+ __Pyx_INCREF(__pyx_v_entry_path_utf8);
+ PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_v_entry_path_utf8);
+ __Pyx_GIVEREF(__pyx_v_entry_path_utf8);
+ __Pyx_INCREF(((PyObject *)__pyx_kp_s_5));
+ PyTuple_SET_ITEM(__pyx_t_1, 1, ((PyObject *)__pyx_kp_s_5));
+ __Pyx_GIVEREF(((PyObject *)__pyx_kp_s_5));
+ __Pyx_INCREF(((PyObject *)__pyx_kp_s_5));
+ PyTuple_SET_ITEM(__pyx_t_1, 2, ((PyObject *)__pyx_kp_s_5));
+ __Pyx_GIVEREF(((PyObject *)__pyx_kp_s_5));
+ if (!(likely(PyTuple_CheckExact(__pyx_t_1))||(PyErr_Format(PyExc_TypeError, "Expected tuple, got %.200s", Py_TYPE(__pyx_t_1)->tp_name), 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1981; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_DECREF(((PyObject *)__pyx_v_initial_key));
+ __pyx_v_initial_key = ((PyObject *)__pyx_t_1);
+ __pyx_t_1 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1982
+ * entry_path_utf8 = path_utf8
+ * initial_key = (entry_path_utf8, '', '')
+ * block_index, _ = self.state._find_block_index_from_key( # <<<<<<<<<<<<<<
+ * initial_key)
+ * if block_index == 0:
+ */
+ __pyx_t_1 = PyObject_GetAttr(__pyx_v_self->state, __pyx_n_s_41); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1982; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_1);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1983
+ * initial_key = (entry_path_utf8, '', '')
+ * block_index, _ = self.state._find_block_index_from_key(
+ * initial_key) # <<<<<<<<<<<<<<
+ * if block_index == 0:
+ * # The children of the root are in block index 1.
+ */
+ __pyx_t_4 = PyTuple_New(1); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1982; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_4);
+ __Pyx_INCREF(((PyObject *)__pyx_v_initial_key));
+ PyTuple_SET_ITEM(__pyx_t_4, 0, ((PyObject *)__pyx_v_initial_key));
+ __Pyx_GIVEREF(((PyObject *)__pyx_v_initial_key));
+ __pyx_t_9 = PyObject_Call(__pyx_t_1, __pyx_t_4, NULL); if (unlikely(!__pyx_t_9)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1982; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_9);
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+ __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
+ if (PyTuple_CheckExact(__pyx_t_9) && likely(PyTuple_GET_SIZE(__pyx_t_9) == 2)) {
+ PyObject* tuple = __pyx_t_9;
+ __pyx_t_4 = PyTuple_GET_ITEM(tuple, 0); __Pyx_INCREF(__pyx_t_4);
+ __pyx_t_1 = PyTuple_GET_ITEM(tuple, 1); __Pyx_INCREF(__pyx_t_1);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1982
+ * entry_path_utf8 = path_utf8
+ * initial_key = (entry_path_utf8, '', '')
+ * block_index, _ = self.state._find_block_index_from_key( # <<<<<<<<<<<<<<
+ * initial_key)
+ * if block_index == 0:
+ */
+ __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
+ __Pyx_DECREF(__pyx_v_block_index);
+ __pyx_v_block_index = __pyx_t_4;
+ __pyx_t_4 = 0;
+ __Pyx_DECREF(__pyx_v__);
+ __pyx_v__ = __pyx_t_1;
+ __pyx_t_1 = 0;
+ } else {
+ __pyx_t_10 = PyObject_GetIter(__pyx_t_9); if (unlikely(!__pyx_t_10)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1982; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_10);
+ __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
+ __pyx_t_4 = __Pyx_UnpackItem(__pyx_t_10, 0); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1982; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_4);
+ __pyx_t_1 = __Pyx_UnpackItem(__pyx_t_10, 1); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1982; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_1);
+ if (__Pyx_EndUnpack(__pyx_t_10, 2) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1982; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;
+ __Pyx_DECREF(__pyx_v_block_index);
+ __pyx_v_block_index = __pyx_t_4;
+ __pyx_t_4 = 0;
+ __Pyx_DECREF(__pyx_v__);
+ __pyx_v__ = __pyx_t_1;
+ __pyx_t_1 = 0;
+ }
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1984
+ * block_index, _ = self.state._find_block_index_from_key(
+ * initial_key)
+ * if block_index == 0: # <<<<<<<<<<<<<<
+ * # The children of the root are in block index 1.
+ * block_index = block_index + 1
+ */
+ __pyx_t_9 = PyObject_RichCompare(__pyx_v_block_index, __pyx_int_0, Py_EQ); if (unlikely(!__pyx_t_9)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1984; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_9);
+ __pyx_t_7 = __Pyx_PyObject_IsTrue(__pyx_t_9); if (unlikely(__pyx_t_7 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1984; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
+ if (__pyx_t_7) {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1986
+ * if block_index == 0:
+ * # The children of the root are in block index 1.
+ * block_index = block_index + 1 # <<<<<<<<<<<<<<
+ * current_block = None
+ * if block_index < len(self.state._dirblocks):
+ */
+ __pyx_t_9 = PyNumber_Add(__pyx_v_block_index, __pyx_int_1); if (unlikely(!__pyx_t_9)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1986; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_9);
+ __Pyx_DECREF(__pyx_v_block_index);
+ __pyx_v_block_index = __pyx_t_9;
+ __pyx_t_9 = 0;
+ goto __pyx_L19;
+ }
+ __pyx_L19:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1987
+ * # The children of the root are in block index 1.
+ * block_index = block_index + 1
+ * current_block = None # <<<<<<<<<<<<<<
+ * if block_index < len(self.state._dirblocks):
+ * current_block = self.state._dirblocks[block_index]
+ */
+ __Pyx_INCREF(Py_None);
+ __Pyx_DECREF(__pyx_v_current_block);
+ __pyx_v_current_block = Py_None;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1988
+ * block_index = block_index + 1
+ * current_block = None
+ * if block_index < len(self.state._dirblocks): # <<<<<<<<<<<<<<
+ * current_block = self.state._dirblocks[block_index]
+ * if not osutils.is_inside(
+ */
+ __pyx_t_9 = PyObject_GetAttr(__pyx_v_self->state, __pyx_n_s___dirblocks); if (unlikely(!__pyx_t_9)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1988; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_9);
+ __pyx_t_11 = PyObject_Length(__pyx_t_9); if (unlikely(__pyx_t_11 == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1988; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
+ __pyx_t_9 = PyInt_FromSsize_t(__pyx_t_11); if (unlikely(!__pyx_t_9)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1988; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_9);
+ __pyx_t_1 = PyObject_RichCompare(__pyx_v_block_index, __pyx_t_9, Py_LT); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1988; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_1);
+ __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
+ __pyx_t_7 = __Pyx_PyObject_IsTrue(__pyx_t_1); if (unlikely(__pyx_t_7 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1988; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+ if (__pyx_t_7) {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1989
+ * current_block = None
+ * if block_index < len(self.state._dirblocks):
+ * current_block = self.state._dirblocks[block_index] # <<<<<<<<<<<<<<
+ * if not osutils.is_inside(
+ * entry_path_utf8, current_block[0]):
+ */
+ __pyx_t_1 = PyObject_GetAttr(__pyx_v_self->state, __pyx_n_s___dirblocks); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1989; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_1);
+ __pyx_t_9 = PyObject_GetItem(__pyx_t_1, __pyx_v_block_index); if (!__pyx_t_9) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1989; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_9);
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+ __Pyx_DECREF(__pyx_v_current_block);
+ __pyx_v_current_block = __pyx_t_9;
+ __pyx_t_9 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1990
+ * if block_index < len(self.state._dirblocks):
+ * current_block = self.state._dirblocks[block_index]
+ * if not osutils.is_inside( # <<<<<<<<<<<<<<
+ * entry_path_utf8, current_block[0]):
+ * # No entries for this directory at all.
+ */
+ __pyx_t_9 = __Pyx_GetName(__pyx_m, __pyx_n_s__osutils); if (unlikely(!__pyx_t_9)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1990; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_9);
+ __pyx_t_1 = PyObject_GetAttr(__pyx_t_9, __pyx_n_s__is_inside); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1990; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_1);
+ __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1991
+ * current_block = self.state._dirblocks[block_index]
+ * if not osutils.is_inside(
+ * entry_path_utf8, current_block[0]): # <<<<<<<<<<<<<<
+ * # No entries for this directory at all.
+ * current_block = None
+ */
+ __pyx_t_9 = __Pyx_GetItemInt(__pyx_v_current_block, 0, sizeof(long), PyInt_FromLong); if (!__pyx_t_9) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1991; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_9);
+ __pyx_t_4 = PyTuple_New(2); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1990; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_4);
+ __Pyx_INCREF(__pyx_v_entry_path_utf8);
+ PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_v_entry_path_utf8);
+ __Pyx_GIVEREF(__pyx_v_entry_path_utf8);
+ PyTuple_SET_ITEM(__pyx_t_4, 1, __pyx_t_9);
+ __Pyx_GIVEREF(__pyx_t_9);
+ __pyx_t_9 = 0;
+ __pyx_t_9 = PyObject_Call(__pyx_t_1, __pyx_t_4, NULL); if (unlikely(!__pyx_t_9)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1990; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_9);
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+ __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
+ __pyx_t_7 = __Pyx_PyObject_IsTrue(__pyx_t_9); if (unlikely(__pyx_t_7 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1990; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
+ __pyx_t_8 = (!__pyx_t_7);
+ if (__pyx_t_8) {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1993
+ * entry_path_utf8, current_block[0]):
+ * # No entries for this directory at all.
+ * current_block = None # <<<<<<<<<<<<<<
+ * if current_block is not None:
+ * for entry in current_block[1]:
+ */
+ __Pyx_INCREF(Py_None);
+ __Pyx_DECREF(__pyx_v_current_block);
+ __pyx_v_current_block = Py_None;
+ goto __pyx_L21;
+ }
+ __pyx_L21:;
+ goto __pyx_L20;
+ }
+ __pyx_L20:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1994
+ * # No entries for this directory at all.
+ * current_block = None
+ * if current_block is not None: # <<<<<<<<<<<<<<
+ * for entry in current_block[1]:
+ * if entry[1][self.source_index][0] in 'ar':
+ */
+ __pyx_t_8 = (__pyx_v_current_block != Py_None);
+ if (__pyx_t_8) {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1995
+ * current_block = None
+ * if current_block is not None:
+ * for entry in current_block[1]: # <<<<<<<<<<<<<<
+ * if entry[1][self.source_index][0] in 'ar':
+ * # Not in the source tree, so doesn't have to be
+ */
+ __pyx_t_9 = __Pyx_GetItemInt(__pyx_v_current_block, 1, sizeof(long), PyInt_FromLong); if (!__pyx_t_9) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1995; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_9);
+ if (PyList_CheckExact(__pyx_t_9) || PyTuple_CheckExact(__pyx_t_9)) {
+ __pyx_t_11 = 0; __pyx_t_4 = __pyx_t_9; __Pyx_INCREF(__pyx_t_4);
+ } else {
+ __pyx_t_11 = -1; __pyx_t_4 = PyObject_GetIter(__pyx_t_9); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1995; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_4);
+ }
+ __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
+ for (;;) {
+ if (likely(PyList_CheckExact(__pyx_t_4))) {
+ if (__pyx_t_11 >= PyList_GET_SIZE(__pyx_t_4)) break;
+ __pyx_t_9 = PyList_GET_ITEM(__pyx_t_4, __pyx_t_11); __Pyx_INCREF(__pyx_t_9); __pyx_t_11++;
+ } else if (likely(PyTuple_CheckExact(__pyx_t_4))) {
+ if (__pyx_t_11 >= PyTuple_GET_SIZE(__pyx_t_4)) break;
+ __pyx_t_9 = PyTuple_GET_ITEM(__pyx_t_4, __pyx_t_11); __Pyx_INCREF(__pyx_t_9); __pyx_t_11++;
+ } else {
+ __pyx_t_9 = PyIter_Next(__pyx_t_4);
+ if (!__pyx_t_9) {
+ if (unlikely(PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1995; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ break;
+ }
+ __Pyx_GOTREF(__pyx_t_9);
+ }
+ __Pyx_DECREF(__pyx_v_entry);
+ __pyx_v_entry = __pyx_t_9;
+ __pyx_t_9 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1996
+ * if current_block is not None:
+ * for entry in current_block[1]:
+ * if entry[1][self.source_index][0] in 'ar': # <<<<<<<<<<<<<<
+ * # Not in the source tree, so doesn't have to be
+ * # included.
+ */
+ __pyx_t_9 = __Pyx_GetItemInt(__pyx_v_entry, 1, sizeof(long), PyInt_FromLong); if (!__pyx_t_9) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1996; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_9);
+ __pyx_t_1 = __Pyx_GetItemInt(__pyx_t_9, __pyx_v_self->source_index, sizeof(int), PyInt_FromLong); if (!__pyx_t_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1996; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_1);
+ __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
+ __pyx_t_9 = __Pyx_GetItemInt(__pyx_t_1, 0, sizeof(long), PyInt_FromLong); if (!__pyx_t_9) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1996; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_9);
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+ __pyx_t_8 = ((PySequence_Contains(((PyObject *)__pyx_n_s__ar), __pyx_t_9))); if (unlikely(__pyx_t_8 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1996; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
+ if (__pyx_t_8) {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1999
+ * # Not in the source tree, so doesn't have to be
+ * # included.
+ * continue # <<<<<<<<<<<<<<
+ * # Path of the entry itself.
+ * self.search_specific_file_parents.add(
+ */
+ goto __pyx_L23_continue;
+ goto __pyx_L25;
+ }
+ __pyx_L25:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":2001
+ * continue
+ * # Path of the entry itself.
+ * self.search_specific_file_parents.add( # <<<<<<<<<<<<<<
+ * self.pathjoin(*entry[0][:2]))
+ * if changed or self.include_unchanged:
+ */
+ __pyx_t_9 = PyObject_GetAttr(__pyx_v_self->search_specific_file_parents, __pyx_n_s__add); if (unlikely(!__pyx_t_9)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2001; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_9);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":2002
+ * # Path of the entry itself.
+ * self.search_specific_file_parents.add(
+ * self.pathjoin(*entry[0][:2])) # <<<<<<<<<<<<<<
+ * if changed or self.include_unchanged:
+ * results.append((result, changed))
+ */
+ __pyx_t_1 = __Pyx_GetItemInt(__pyx_v_entry, 0, sizeof(long), PyInt_FromLong); if (!__pyx_t_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2002; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_1);
+ __pyx_t_10 = PySequence_GetSlice(__pyx_t_1, 0, 2); if (unlikely(!__pyx_t_10)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2002; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_10);
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+ __pyx_t_1 = PySequence_Tuple(__pyx_t_10); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2002; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(((PyObject *)__pyx_t_1));
+ __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;
+ __pyx_t_10 = PyObject_Call(__pyx_v_self->pathjoin, ((PyObject *)__pyx_t_1), NULL); if (unlikely(!__pyx_t_10)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2002; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_10);
+ __Pyx_DECREF(((PyObject *)__pyx_t_1)); __pyx_t_1 = 0;
+ __pyx_t_1 = PyTuple_New(1); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2001; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_1);
+ PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_t_10);
+ __Pyx_GIVEREF(__pyx_t_10);
+ __pyx_t_10 = 0;
+ __pyx_t_10 = PyObject_Call(__pyx_t_9, __pyx_t_1, NULL); if (unlikely(!__pyx_t_10)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2001; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_10);
+ __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+ __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;
+ __pyx_L23_continue:;
+ }
+ __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
+ goto __pyx_L22;
+ }
+ __pyx_L22:;
+ goto __pyx_L17;
+ }
+ __pyx_L17:;
+ goto __pyx_L16;
+ }
+ __pyx_L16:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":2003
+ * self.search_specific_file_parents.add(
+ * self.pathjoin(*entry[0][:2]))
+ * if changed or self.include_unchanged: # <<<<<<<<<<<<<<
+ * results.append((result, changed))
+ * self.searched_exact_paths.add(path_utf8)
+ */
+ __pyx_t_8 = __Pyx_PyObject_IsTrue(__pyx_v_changed); if (unlikely(__pyx_t_8 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2003; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ if (!__pyx_t_8) {
+ __pyx_t_7 = __pyx_v_self->include_unchanged;
+ } else {
+ __pyx_t_7 = __pyx_t_8;
+ }
+ if (__pyx_t_7) {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":2004
+ * self.pathjoin(*entry[0][:2]))
+ * if changed or self.include_unchanged:
+ * results.append((result, changed)) # <<<<<<<<<<<<<<
+ * self.searched_exact_paths.add(path_utf8)
+ * return results
+ */
+ if (unlikely(__pyx_v_results == Py_None)) {
+ PyErr_SetString(PyExc_AttributeError, "'NoneType' object has no attribute 'append'"); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2004; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ }
+ __pyx_t_4 = PyTuple_New(2); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2004; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_4);
+ __Pyx_INCREF(__pyx_v_result);
+ PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_v_result);
+ __Pyx_GIVEREF(__pyx_v_result);
+ __Pyx_INCREF(__pyx_v_changed);
+ PyTuple_SET_ITEM(__pyx_t_4, 1, __pyx_v_changed);
+ __Pyx_GIVEREF(__pyx_v_changed);
+ __pyx_t_6 = PyList_Append(((PyObject *)__pyx_v_results), __pyx_t_4); if (unlikely(__pyx_t_6 == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2004; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
+ goto __pyx_L26;
+ }
+ __pyx_L26:;
+ __pyx_L12_continue:;
+ }
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":2005
+ * if changed or self.include_unchanged:
+ * results.append((result, changed))
+ * self.searched_exact_paths.add(path_utf8) # <<<<<<<<<<<<<<
+ * return results
+ *
+ */
+ __pyx_t_3 = PyObject_GetAttr(__pyx_v_self->searched_exact_paths, __pyx_n_s__add); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2005; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_3);
+ __pyx_t_4 = PyTuple_New(1); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2005; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_4);
+ __Pyx_INCREF(__pyx_v_path_utf8);
+ PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_v_path_utf8);
+ __Pyx_GIVEREF(__pyx_v_path_utf8);
+ __pyx_t_10 = PyObject_Call(__pyx_t_3, __pyx_t_4, NULL); if (unlikely(!__pyx_t_10)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2005; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_10);
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+ __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
+ __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;
+ __pyx_L3_continue:;
+ }
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":2006
+ * results.append((result, changed))
+ * self.searched_exact_paths.add(path_utf8)
+ * return results # <<<<<<<<<<<<<<
+ *
+ * cdef object _path_info(self, utf8_path, unicode_path):
+ */
+ __Pyx_XDECREF(__pyx_r);
+ __Pyx_INCREF(((PyObject *)__pyx_v_results));
+ __pyx_r = ((PyObject *)__pyx_v_results);
+ goto __pyx_L0;
+
+ __pyx_r = Py_None; __Pyx_INCREF(Py_None);
+ goto __pyx_L0;
+ __pyx_L1_error:;
+ __Pyx_XDECREF(__pyx_t_1);
+ __Pyx_XDECREF(__pyx_t_3);
+ __Pyx_XDECREF(__pyx_t_4);
+ __Pyx_XDECREF(__pyx_t_9);
+ __Pyx_XDECREF(__pyx_t_10);
+ __Pyx_AddTraceback("bzrlib._dirstate_helpers_pyx.ProcessEntryC._next_consistent_entries");
+ __pyx_r = 0;
+ __pyx_L0:;
+ __Pyx_DECREF(__pyx_v_results);
+ __Pyx_DECREF(__pyx_v_path_utf8);
+ __Pyx_DECREF(__pyx_v_path_entries);
+ __Pyx_DECREF(__pyx_v_selected_entries);
+ __Pyx_DECREF(__pyx_v_candidate_entry);
+ __Pyx_DECREF(__pyx_v_path_info);
+ __Pyx_DECREF(__pyx_v_entry);
+ __Pyx_DECREF(__pyx_v_result);
+ __Pyx_DECREF(__pyx_v_changed);
+ __Pyx_DECREF(__pyx_v_entry_path_utf8);
+ __Pyx_DECREF(__pyx_v_initial_key);
+ __Pyx_DECREF(__pyx_v_block_index);
+ __Pyx_DECREF(__pyx_v__);
+ __Pyx_DECREF(__pyx_v_current_block);
+ __Pyx_XGIVEREF(__pyx_r);
+ __Pyx_RefNannyFinishContext();
+ return __pyx_r;
+}
+
+/* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":2008
+ * return results
+ *
+ * cdef object _path_info(self, utf8_path, unicode_path): # <<<<<<<<<<<<<<
+ * """Generate path_info for unicode_path.
+ *
+ */
+
+static PyObject *__pyx_f_6bzrlib_21_dirstate_helpers_pyx_13ProcessEntryC__path_info(struct __pyx_obj_6bzrlib_21_dirstate_helpers_pyx_ProcessEntryC *__pyx_v_self, PyObject *__pyx_v_utf8_path, PyObject *__pyx_v_unicode_path) {
+ PyObject *__pyx_v_abspath;
+ PyObject *__pyx_v_stat;
+ PyObject *__pyx_v_e;
+ PyObject *__pyx_v_utf8_basename;
+ PyObject *__pyx_v_dir_info;
+ PyObject *__pyx_r = NULL;
+ PyObject *__pyx_t_1 = NULL;
+ PyObject *__pyx_t_2 = NULL;
+ PyObject *__pyx_t_3 = NULL;
+ int __pyx_t_4;
+ PyObject *__pyx_t_5 = NULL;
+ PyObject *__pyx_t_6 = NULL;
+ PyObject *__pyx_t_7 = NULL;
+ int __pyx_t_8;
+ __Pyx_RefNannySetupContext("_path_info");
+ __pyx_v_abspath = Py_None; __Pyx_INCREF(Py_None);
+ __pyx_v_stat = Py_None; __Pyx_INCREF(Py_None);
+ __pyx_v_e = Py_None; __Pyx_INCREF(Py_None);
+ __pyx_v_utf8_basename = Py_None; __Pyx_INCREF(Py_None);
+ __pyx_v_dir_info = ((PyObject *)Py_None); __Pyx_INCREF(Py_None);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":2013
+ * :return: None if unicode_path does not exist, or a path_info tuple.
+ * """
+ * abspath = self.tree.abspath(unicode_path) # <<<<<<<<<<<<<<
+ * try:
+ * stat = os.lstat(abspath)
+ */
+ __pyx_t_1 = PyObject_GetAttr(__pyx_v_self->tree, __pyx_n_s__abspath); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2013; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_1);
+ __pyx_t_2 = PyTuple_New(1); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2013; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_2);
+ __Pyx_INCREF(__pyx_v_unicode_path);
+ PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_v_unicode_path);
+ __Pyx_GIVEREF(__pyx_v_unicode_path);
+ __pyx_t_3 = PyObject_Call(__pyx_t_1, __pyx_t_2, NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2013; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_3);
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+ __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+ __Pyx_DECREF(__pyx_v_abspath);
+ __pyx_v_abspath = __pyx_t_3;
+ __pyx_t_3 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":2014
+ * """
+ * abspath = self.tree.abspath(unicode_path)
+ * try: # <<<<<<<<<<<<<<
+ * stat = os.lstat(abspath)
+ * except OSError, e:
+ */
+ {
+ PyObject *__pyx_save_exc_type, *__pyx_save_exc_value, *__pyx_save_exc_tb;
+ __Pyx_ExceptionSave(&__pyx_save_exc_type, &__pyx_save_exc_value, &__pyx_save_exc_tb);
+ __Pyx_XGOTREF(__pyx_save_exc_type);
+ __Pyx_XGOTREF(__pyx_save_exc_value);
+ __Pyx_XGOTREF(__pyx_save_exc_tb);
+ /*try:*/ {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":2015
+ * abspath = self.tree.abspath(unicode_path)
+ * try:
+ * stat = os.lstat(abspath) # <<<<<<<<<<<<<<
+ * except OSError, e:
+ * if e.errno == errno.ENOENT:
+ */
+ __pyx_t_3 = __Pyx_GetName(__pyx_m, __pyx_n_s__os); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2015; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
+ __Pyx_GOTREF(__pyx_t_3);
+ __pyx_t_2 = PyObject_GetAttr(__pyx_t_3, __pyx_n_s__lstat); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2015; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
+ __Pyx_GOTREF(__pyx_t_2);
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+ __pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2015; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
+ __Pyx_GOTREF(__pyx_t_3);
+ __Pyx_INCREF(__pyx_v_abspath);
+ PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_v_abspath);
+ __Pyx_GIVEREF(__pyx_v_abspath);
+ __pyx_t_1 = PyObject_Call(__pyx_t_2, __pyx_t_3, NULL); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2015; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
+ __Pyx_GOTREF(__pyx_t_1);
+ __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+ __Pyx_DECREF(__pyx_v_stat);
+ __pyx_v_stat = __pyx_t_1;
+ __pyx_t_1 = 0;
+ }
+ __Pyx_XDECREF(__pyx_save_exc_type); __pyx_save_exc_type = 0;
+ __Pyx_XDECREF(__pyx_save_exc_value); __pyx_save_exc_value = 0;
+ __Pyx_XDECREF(__pyx_save_exc_tb); __pyx_save_exc_tb = 0;
+ goto __pyx_L10_try_end;
+ __pyx_L3_error:;
+ __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
+ __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
+ __Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":2016
+ * try:
+ * stat = os.lstat(abspath)
+ * except OSError, e: # <<<<<<<<<<<<<<
+ * if e.errno == errno.ENOENT:
+ * # the path does not exist.
+ */
+ __pyx_t_4 = PyErr_ExceptionMatches(__pyx_builtin_OSError);
+ if (__pyx_t_4) {
+ __Pyx_AddTraceback("bzrlib._dirstate_helpers_pyx.ProcessEntryC._path_info");
+ if (__Pyx_GetException(&__pyx_t_1, &__pyx_t_3, &__pyx_t_2) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2016; __pyx_clineno = __LINE__; goto __pyx_L5_except_error;}
+ __Pyx_GOTREF(__pyx_t_1);
+ __Pyx_GOTREF(__pyx_t_3);
+ __Pyx_GOTREF(__pyx_t_2);
+ __Pyx_INCREF(__pyx_t_3);
+ __Pyx_DECREF(__pyx_v_e);
+ __pyx_v_e = __pyx_t_3;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":2017
+ * stat = os.lstat(abspath)
+ * except OSError, e:
+ * if e.errno == errno.ENOENT: # <<<<<<<<<<<<<<
+ * # the path does not exist.
+ * return None
+ */
+ __pyx_t_5 = PyObject_GetAttr(__pyx_v_e, __pyx_n_s__errno); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2017; __pyx_clineno = __LINE__; goto __pyx_L5_except_error;}
+ __Pyx_GOTREF(__pyx_t_5);
+ __pyx_t_6 = __Pyx_GetName(__pyx_m, __pyx_n_s__errno); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2017; __pyx_clineno = __LINE__; goto __pyx_L5_except_error;}
+ __Pyx_GOTREF(__pyx_t_6);
+ __pyx_t_7 = PyObject_GetAttr(__pyx_t_6, __pyx_n_s__ENOENT); if (unlikely(!__pyx_t_7)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2017; __pyx_clineno = __LINE__; goto __pyx_L5_except_error;}
+ __Pyx_GOTREF(__pyx_t_7);
+ __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
+ __pyx_t_6 = PyObject_RichCompare(__pyx_t_5, __pyx_t_7, Py_EQ); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2017; __pyx_clineno = __LINE__; goto __pyx_L5_except_error;}
+ __Pyx_GOTREF(__pyx_t_6);
+ __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
+ __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
+ __pyx_t_8 = __Pyx_PyObject_IsTrue(__pyx_t_6); if (unlikely(__pyx_t_8 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2017; __pyx_clineno = __LINE__; goto __pyx_L5_except_error;}
+ __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
+ if (__pyx_t_8) {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":2019
+ * if e.errno == errno.ENOENT:
+ * # the path does not exist.
+ * return None # <<<<<<<<<<<<<<
+ * else:
+ * raise
+ */
+ __Pyx_XDECREF(__pyx_r);
+ __Pyx_INCREF(Py_None);
+ __pyx_r = Py_None;
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+ __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+ goto __pyx_L6_except_return;
+ goto __pyx_L13;
+ }
+ /*else*/ {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":2021
+ * return None
+ * else:
+ * raise # <<<<<<<<<<<<<<
+ * utf8_basename = utf8_path.rsplit('/', 1)[-1]
+ * dir_info = (utf8_path, utf8_basename,
+ */
+ __Pyx_GIVEREF(__pyx_t_1);
+ __Pyx_GIVEREF(__pyx_t_3);
+ __Pyx_GIVEREF(__pyx_t_2);
+ __Pyx_ErrRestore(__pyx_t_1, __pyx_t_3, __pyx_t_2);
+ __pyx_t_1 = 0; __pyx_t_3 = 0; __pyx_t_2 = 0;
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2021; __pyx_clineno = __LINE__; goto __pyx_L5_except_error;}
+ }
+ __pyx_L13:;
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+ __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+ goto __pyx_L4_exception_handled;
+ }
+ __pyx_L5_except_error:;
+ __Pyx_XGIVEREF(__pyx_save_exc_type);
+ __Pyx_XGIVEREF(__pyx_save_exc_value);
+ __Pyx_XGIVEREF(__pyx_save_exc_tb);
+ __Pyx_ExceptionReset(__pyx_save_exc_type, __pyx_save_exc_value, __pyx_save_exc_tb);
+ goto __pyx_L1_error;
+ __pyx_L6_except_return:;
+ __Pyx_XGIVEREF(__pyx_save_exc_type);
+ __Pyx_XGIVEREF(__pyx_save_exc_value);
+ __Pyx_XGIVEREF(__pyx_save_exc_tb);
+ __Pyx_ExceptionReset(__pyx_save_exc_type, __pyx_save_exc_value, __pyx_save_exc_tb);
+ goto __pyx_L0;
+ __pyx_L4_exception_handled:;
+ __Pyx_XGIVEREF(__pyx_save_exc_type);
+ __Pyx_XGIVEREF(__pyx_save_exc_value);
+ __Pyx_XGIVEREF(__pyx_save_exc_tb);
+ __Pyx_ExceptionReset(__pyx_save_exc_type, __pyx_save_exc_value, __pyx_save_exc_tb);
+ __pyx_L10_try_end:;
+ }
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":2022
+ * else:
+ * raise
+ * utf8_basename = utf8_path.rsplit('/', 1)[-1] # <<<<<<<<<<<<<<
+ * dir_info = (utf8_path, utf8_basename,
+ * osutils.file_kind_from_stat_mode(stat.st_mode), stat,
+ */
+ __pyx_t_2 = PyObject_GetAttr(__pyx_v_utf8_path, __pyx_n_s__rsplit); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2022; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_2);
+ __pyx_t_3 = PyTuple_New(2); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2022; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_3);
+ __Pyx_INCREF(((PyObject *)__pyx_kp_s_47));
+ PyTuple_SET_ITEM(__pyx_t_3, 0, ((PyObject *)__pyx_kp_s_47));
+ __Pyx_GIVEREF(((PyObject *)__pyx_kp_s_47));
+ __Pyx_INCREF(__pyx_int_1);
+ PyTuple_SET_ITEM(__pyx_t_3, 1, __pyx_int_1);
+ __Pyx_GIVEREF(__pyx_int_1);
+ __pyx_t_1 = PyObject_Call(__pyx_t_2, __pyx_t_3, NULL); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2022; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_1);
+ __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+ __pyx_t_3 = __Pyx_GetItemInt(__pyx_t_1, -1, sizeof(long), PyInt_FromLong); if (!__pyx_t_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2022; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_3);
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+ __Pyx_DECREF(__pyx_v_utf8_basename);
+ __pyx_v_utf8_basename = __pyx_t_3;
+ __pyx_t_3 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":2024
+ * utf8_basename = utf8_path.rsplit('/', 1)[-1]
+ * dir_info = (utf8_path, utf8_basename,
+ * osutils.file_kind_from_stat_mode(stat.st_mode), stat, # <<<<<<<<<<<<<<
+ * abspath)
+ * if dir_info[2] == 'directory':
+ */
+ __pyx_t_3 = __Pyx_GetName(__pyx_m, __pyx_n_s__osutils); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2024; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_3);
+ __pyx_t_1 = PyObject_GetAttr(__pyx_t_3, __pyx_n_s_38); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2024; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_1);
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+ __pyx_t_3 = PyObject_GetAttr(__pyx_v_stat, __pyx_n_s__st_mode); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2024; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_3);
+ __pyx_t_2 = PyTuple_New(1); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2024; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_2);
+ PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_t_3);
+ __Pyx_GIVEREF(__pyx_t_3);
+ __pyx_t_3 = 0;
+ __pyx_t_3 = PyObject_Call(__pyx_t_1, __pyx_t_2, NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2024; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_3);
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+ __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":2025
+ * dir_info = (utf8_path, utf8_basename,
+ * osutils.file_kind_from_stat_mode(stat.st_mode), stat,
+ * abspath) # <<<<<<<<<<<<<<
+ * if dir_info[2] == 'directory':
+ * if self.tree._directory_is_tree_reference(
+ */
+ __pyx_t_2 = PyTuple_New(5); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2023; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_2);
+ __Pyx_INCREF(__pyx_v_utf8_path);
+ PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_v_utf8_path);
+ __Pyx_GIVEREF(__pyx_v_utf8_path);
+ __Pyx_INCREF(__pyx_v_utf8_basename);
+ PyTuple_SET_ITEM(__pyx_t_2, 1, __pyx_v_utf8_basename);
+ __Pyx_GIVEREF(__pyx_v_utf8_basename);
+ PyTuple_SET_ITEM(__pyx_t_2, 2, __pyx_t_3);
+ __Pyx_GIVEREF(__pyx_t_3);
+ __Pyx_INCREF(__pyx_v_stat);
+ PyTuple_SET_ITEM(__pyx_t_2, 3, __pyx_v_stat);
+ __Pyx_GIVEREF(__pyx_v_stat);
+ __Pyx_INCREF(__pyx_v_abspath);
+ PyTuple_SET_ITEM(__pyx_t_2, 4, __pyx_v_abspath);
+ __Pyx_GIVEREF(__pyx_v_abspath);
+ __pyx_t_3 = 0;
+ if (!(likely(PyTuple_CheckExact(__pyx_t_2))||(PyErr_Format(PyExc_TypeError, "Expected tuple, got %.200s", Py_TYPE(__pyx_t_2)->tp_name), 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2023; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_DECREF(((PyObject *)__pyx_v_dir_info));
+ __pyx_v_dir_info = ((PyObject *)__pyx_t_2);
+ __pyx_t_2 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":2026
+ * osutils.file_kind_from_stat_mode(stat.st_mode), stat,
+ * abspath)
+ * if dir_info[2] == 'directory': # <<<<<<<<<<<<<<
+ * if self.tree._directory_is_tree_reference(
+ * unicode_path):
+ */
+ __pyx_t_2 = __Pyx_GetItemInt_Tuple(((PyObject *)__pyx_v_dir_info), 2, sizeof(long), PyInt_FromLong); if (!__pyx_t_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2026; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_2);
+ __pyx_t_3 = PyObject_RichCompare(__pyx_t_2, ((PyObject *)__pyx_n_s__directory), Py_EQ); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2026; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_3);
+ __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+ __pyx_t_8 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_8 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2026; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+ if (__pyx_t_8) {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":2027
+ * abspath)
+ * if dir_info[2] == 'directory':
+ * if self.tree._directory_is_tree_reference( # <<<<<<<<<<<<<<
+ * unicode_path):
+ * self.root_dir_info = self.root_dir_info[:2] + \
+ */
+ __pyx_t_3 = PyObject_GetAttr(__pyx_v_self->tree, __pyx_n_s_39); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2027; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_3);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":2028
+ * if dir_info[2] == 'directory':
+ * if self.tree._directory_is_tree_reference(
+ * unicode_path): # <<<<<<<<<<<<<<
+ * self.root_dir_info = self.root_dir_info[:2] + \
+ * ('tree-reference',) + self.root_dir_info[3:]
+ */
+ __pyx_t_2 = PyTuple_New(1); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2027; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_2);
+ __Pyx_INCREF(__pyx_v_unicode_path);
+ PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_v_unicode_path);
+ __Pyx_GIVEREF(__pyx_v_unicode_path);
+ __pyx_t_1 = PyObject_Call(__pyx_t_3, __pyx_t_2, NULL); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2027; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_1);
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+ __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+ __pyx_t_8 = __Pyx_PyObject_IsTrue(__pyx_t_1); if (unlikely(__pyx_t_8 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2027; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+ if (__pyx_t_8) {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":2029
+ * if self.tree._directory_is_tree_reference(
+ * unicode_path):
+ * self.root_dir_info = self.root_dir_info[:2] + \ # <<<<<<<<<<<<<<
+ * ('tree-reference',) + self.root_dir_info[3:]
+ * return dir_info
+ */
+ __pyx_t_1 = PySequence_GetSlice(__pyx_v_self->root_dir_info, 0, 2); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2029; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_1);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":2030
+ * unicode_path):
+ * self.root_dir_info = self.root_dir_info[:2] + \
+ * ('tree-reference',) + self.root_dir_info[3:] # <<<<<<<<<<<<<<
+ * return dir_info
+ */
+ __pyx_t_2 = PyTuple_New(1); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2030; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_2);
+ __Pyx_INCREF(((PyObject *)__pyx_kp_s_33));
+ PyTuple_SET_ITEM(__pyx_t_2, 0, ((PyObject *)__pyx_kp_s_33));
+ __Pyx_GIVEREF(((PyObject *)__pyx_kp_s_33));
+ __pyx_t_3 = PyNumber_Add(__pyx_t_1, __pyx_t_2); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2029; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_3);
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+ __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+ __pyx_t_2 = PySequence_GetSlice(__pyx_v_self->root_dir_info, 3, PY_SSIZE_T_MAX); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2030; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_2);
+ __pyx_t_1 = PyNumber_Add(__pyx_t_3, __pyx_t_2); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2030; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_1);
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+ __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":2029
+ * if self.tree._directory_is_tree_reference(
+ * unicode_path):
+ * self.root_dir_info = self.root_dir_info[:2] + \ # <<<<<<<<<<<<<<
+ * ('tree-reference',) + self.root_dir_info[3:]
+ * return dir_info
+ */
+ __Pyx_GIVEREF(__pyx_t_1);
+ __Pyx_GOTREF(__pyx_v_self->root_dir_info);
+ __Pyx_DECREF(__pyx_v_self->root_dir_info);
+ __pyx_v_self->root_dir_info = __pyx_t_1;
+ __pyx_t_1 = 0;
+ goto __pyx_L15;
+ }
+ __pyx_L15:;
+ goto __pyx_L14;
+ }
+ __pyx_L14:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":2031
+ * self.root_dir_info = self.root_dir_info[:2] + \
+ * ('tree-reference',) + self.root_dir_info[3:]
+ * return dir_info # <<<<<<<<<<<<<<
+ */
+ __Pyx_XDECREF(__pyx_r);
+ __Pyx_INCREF(((PyObject *)__pyx_v_dir_info));
+ __pyx_r = ((PyObject *)__pyx_v_dir_info);
+ goto __pyx_L0;
+
+ __pyx_r = Py_None; __Pyx_INCREF(Py_None);
+ goto __pyx_L0;
+ __pyx_L1_error:;
+ __Pyx_XDECREF(__pyx_t_1);
+ __Pyx_XDECREF(__pyx_t_2);
+ __Pyx_XDECREF(__pyx_t_3);
+ __Pyx_XDECREF(__pyx_t_5);
+ __Pyx_XDECREF(__pyx_t_6);
+ __Pyx_XDECREF(__pyx_t_7);
+ __Pyx_AddTraceback("bzrlib._dirstate_helpers_pyx.ProcessEntryC._path_info");
+ __pyx_r = 0;
+ __pyx_L0:;
+ __Pyx_DECREF(__pyx_v_abspath);
+ __Pyx_DECREF(__pyx_v_stat);
+ __Pyx_DECREF(__pyx_v_e);
+ __Pyx_DECREF(__pyx_v_utf8_basename);
+ __Pyx_DECREF(__pyx_v_dir_info);
+ __Pyx_XGIVEREF(__pyx_r);
+ __Pyx_RefNannyFinishContext();
+ return __pyx_r;
+}
+static struct __pyx_vtabstruct_6bzrlib_21_dirstate_helpers_pyx_Reader __pyx_vtable_6bzrlib_21_dirstate_helpers_pyx_Reader;
+
+static PyObject *__pyx_tp_new_6bzrlib_21_dirstate_helpers_pyx_Reader(PyTypeObject *t, PyObject *a, PyObject *k) {
+ struct __pyx_obj_6bzrlib_21_dirstate_helpers_pyx_Reader *p;
+ PyObject *o = (*t->tp_alloc)(t, 0);
+ if (!o) return 0;
+ p = ((struct __pyx_obj_6bzrlib_21_dirstate_helpers_pyx_Reader *)o);
+ p->__pyx_vtab = __pyx_vtabptr_6bzrlib_21_dirstate_helpers_pyx_Reader;
+ p->state = Py_None; Py_INCREF(Py_None);
+ p->text = Py_None; Py_INCREF(Py_None);
+ return o;
+}
+
+static void __pyx_tp_dealloc_6bzrlib_21_dirstate_helpers_pyx_Reader(PyObject *o) {
+ struct __pyx_obj_6bzrlib_21_dirstate_helpers_pyx_Reader *p = (struct __pyx_obj_6bzrlib_21_dirstate_helpers_pyx_Reader *)o;
+ Py_XDECREF(p->state);
+ Py_XDECREF(p->text);
+ (*Py_TYPE(o)->tp_free)(o);
+}
+
+static int __pyx_tp_traverse_6bzrlib_21_dirstate_helpers_pyx_Reader(PyObject *o, visitproc v, void *a) {
+ int e;
+ struct __pyx_obj_6bzrlib_21_dirstate_helpers_pyx_Reader *p = (struct __pyx_obj_6bzrlib_21_dirstate_helpers_pyx_Reader *)o;
+ if (p->state) {
+ e = (*v)(p->state, a); if (e) return e;
+ }
+ if (p->text) {
+ e = (*v)(p->text, a); if (e) return e;
+ }
+ return 0;
+}
+
+static int __pyx_tp_clear_6bzrlib_21_dirstate_helpers_pyx_Reader(PyObject *o) {
+ struct __pyx_obj_6bzrlib_21_dirstate_helpers_pyx_Reader *p = (struct __pyx_obj_6bzrlib_21_dirstate_helpers_pyx_Reader *)o;
+ PyObject* tmp;
+ tmp = ((PyObject*)p->state);
+ p->state = Py_None; Py_INCREF(Py_None);
+ Py_XDECREF(tmp);
+ tmp = ((PyObject*)p->text);
+ p->text = Py_None; Py_INCREF(Py_None);
+ Py_XDECREF(tmp);
+ return 0;
+}
+
+static PyMethodDef __pyx_methods_6bzrlib_21_dirstate_helpers_pyx_Reader[] = {
+ {__Pyx_NAMESTR("_parse_dirblocks"), (PyCFunction)__pyx_pf_6bzrlib_21_dirstate_helpers_pyx_6Reader__parse_dirblocks, METH_NOARGS, __Pyx_DOCSTR(__pyx_doc_6bzrlib_21_dirstate_helpers_pyx_6Reader__parse_dirblocks)},
+ {0, 0, 0, 0}
+};
+
+static PyNumberMethods __pyx_tp_as_number_Reader = {
+ 0, /*nb_add*/
+ 0, /*nb_subtract*/
+ 0, /*nb_multiply*/
+ #if PY_MAJOR_VERSION < 3
+ 0, /*nb_divide*/
+ #endif
+ 0, /*nb_remainder*/
+ 0, /*nb_divmod*/
+ 0, /*nb_power*/
+ 0, /*nb_negative*/
+ 0, /*nb_positive*/
+ 0, /*nb_absolute*/
+ 0, /*nb_nonzero*/
+ 0, /*nb_invert*/
+ 0, /*nb_lshift*/
+ 0, /*nb_rshift*/
+ 0, /*nb_and*/
+ 0, /*nb_xor*/
+ 0, /*nb_or*/
+ #if PY_MAJOR_VERSION < 3
+ 0, /*nb_coerce*/
+ #endif
+ 0, /*nb_int*/
+ #if PY_MAJOR_VERSION < 3
+ 0, /*nb_long*/
+ #else
+ 0, /*reserved*/
+ #endif
+ 0, /*nb_float*/
+ #if PY_MAJOR_VERSION < 3
+ 0, /*nb_oct*/
+ #endif
+ #if PY_MAJOR_VERSION < 3
+ 0, /*nb_hex*/
+ #endif
+ 0, /*nb_inplace_add*/
+ 0, /*nb_inplace_subtract*/
+ 0, /*nb_inplace_multiply*/
+ #if PY_MAJOR_VERSION < 3
+ 0, /*nb_inplace_divide*/
+ #endif
+ 0, /*nb_inplace_remainder*/
+ 0, /*nb_inplace_power*/
+ 0, /*nb_inplace_lshift*/
+ 0, /*nb_inplace_rshift*/
+ 0, /*nb_inplace_and*/
+ 0, /*nb_inplace_xor*/
+ 0, /*nb_inplace_or*/
+ 0, /*nb_floor_divide*/
+ 0, /*nb_true_divide*/
+ 0, /*nb_inplace_floor_divide*/
+ 0, /*nb_inplace_true_divide*/
+ #if PY_VERSION_HEX >= 0x02050000
+ 0, /*nb_index*/
+ #endif
+};
+
+static PySequenceMethods __pyx_tp_as_sequence_Reader = {
+ 0, /*sq_length*/
+ 0, /*sq_concat*/
+ 0, /*sq_repeat*/
+ 0, /*sq_item*/
+ 0, /*sq_slice*/
+ 0, /*sq_ass_item*/
+ 0, /*sq_ass_slice*/
+ 0, /*sq_contains*/
+ 0, /*sq_inplace_concat*/
+ 0, /*sq_inplace_repeat*/
+};
+
+static PyMappingMethods __pyx_tp_as_mapping_Reader = {
+ 0, /*mp_length*/
+ 0, /*mp_subscript*/
+ 0, /*mp_ass_subscript*/
+};
+
+static PyBufferProcs __pyx_tp_as_buffer_Reader = {
+ #if PY_MAJOR_VERSION < 3
+ 0, /*bf_getreadbuffer*/
+ #endif
+ #if PY_MAJOR_VERSION < 3
+ 0, /*bf_getwritebuffer*/
+ #endif
+ #if PY_MAJOR_VERSION < 3
+ 0, /*bf_getsegcount*/
+ #endif
+ #if PY_MAJOR_VERSION < 3
+ 0, /*bf_getcharbuffer*/
+ #endif
+ #if PY_VERSION_HEX >= 0x02060000
+ 0, /*bf_getbuffer*/
+ #endif
+ #if PY_VERSION_HEX >= 0x02060000
+ 0, /*bf_releasebuffer*/
+ #endif
+};
+
+PyTypeObject __pyx_type_6bzrlib_21_dirstate_helpers_pyx_Reader = {
+ PyVarObject_HEAD_INIT(0, 0)
+ __Pyx_NAMESTR("bzrlib._dirstate_helpers_pyx.Reader"), /*tp_name*/
+ sizeof(struct __pyx_obj_6bzrlib_21_dirstate_helpers_pyx_Reader), /*tp_basicsize*/
+ 0, /*tp_itemsize*/
+ __pyx_tp_dealloc_6bzrlib_21_dirstate_helpers_pyx_Reader, /*tp_dealloc*/
+ 0, /*tp_print*/
+ 0, /*tp_getattr*/
+ 0, /*tp_setattr*/
+ #if PY_MAJOR_VERSION < 3
+ 0, /*tp_compare*/
+ #else
+ 0, /*reserved*/
+ #endif
+ 0, /*tp_repr*/
+ &__pyx_tp_as_number_Reader, /*tp_as_number*/
+ &__pyx_tp_as_sequence_Reader, /*tp_as_sequence*/
+ &__pyx_tp_as_mapping_Reader, /*tp_as_mapping*/
+ 0, /*tp_hash*/
+ 0, /*tp_call*/
+ 0, /*tp_str*/
+ 0, /*tp_getattro*/
+ 0, /*tp_setattro*/
+ &__pyx_tp_as_buffer_Reader, /*tp_as_buffer*/
+ Py_TPFLAGS_DEFAULT|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_HAVE_GC, /*tp_flags*/
+ __Pyx_DOCSTR("Maintain the current location, and return fields as you parse them."), /*tp_doc*/
+ __pyx_tp_traverse_6bzrlib_21_dirstate_helpers_pyx_Reader, /*tp_traverse*/
+ __pyx_tp_clear_6bzrlib_21_dirstate_helpers_pyx_Reader, /*tp_clear*/
+ 0, /*tp_richcompare*/
+ 0, /*tp_weaklistoffset*/
+ 0, /*tp_iter*/
+ 0, /*tp_iternext*/
+ __pyx_methods_6bzrlib_21_dirstate_helpers_pyx_Reader, /*tp_methods*/
+ 0, /*tp_members*/
+ 0, /*tp_getset*/
+ 0, /*tp_base*/
+ 0, /*tp_dict*/
+ 0, /*tp_descr_get*/
+ 0, /*tp_descr_set*/
+ 0, /*tp_dictoffset*/
+ __pyx_pf_6bzrlib_21_dirstate_helpers_pyx_6Reader___init__, /*tp_init*/
+ 0, /*tp_alloc*/
+ __pyx_tp_new_6bzrlib_21_dirstate_helpers_pyx_Reader, /*tp_new*/
+ 0, /*tp_free*/
+ 0, /*tp_is_gc*/
+ 0, /*tp_bases*/
+ 0, /*tp_mro*/
+ 0, /*tp_cache*/
+ 0, /*tp_subclasses*/
+ 0, /*tp_weaklist*/
+ 0, /*tp_del*/
+ #if PY_VERSION_HEX >= 0x02060000
+ 0, /*tp_version_tag*/
+ #endif
+};
+static struct __pyx_vtabstruct_6bzrlib_21_dirstate_helpers_pyx_ProcessEntryC __pyx_vtable_6bzrlib_21_dirstate_helpers_pyx_ProcessEntryC;
+
+static PyObject *__pyx_tp_new_6bzrlib_21_dirstate_helpers_pyx_ProcessEntryC(PyTypeObject *t, PyObject *a, PyObject *k) {
+ struct __pyx_obj_6bzrlib_21_dirstate_helpers_pyx_ProcessEntryC *p;
+ PyObject *o = (*t->tp_alloc)(t, 0);
+ if (!o) return 0;
+ p = ((struct __pyx_obj_6bzrlib_21_dirstate_helpers_pyx_ProcessEntryC *)o);
+ p->__pyx_vtab = __pyx_vtabptr_6bzrlib_21_dirstate_helpers_pyx_ProcessEntryC;
+ p->old_dirname_to_file_id = Py_None; Py_INCREF(Py_None);
+ p->new_dirname_to_file_id = Py_None; Py_INCREF(Py_None);
+ p->last_source_parent = Py_None; Py_INCREF(Py_None);
+ p->last_target_parent = Py_None; Py_INCREF(Py_None);
+ p->use_filesystem_for_exec = Py_None; Py_INCREF(Py_None);
+ p->utf8_decode = Py_None; Py_INCREF(Py_None);
+ p->searched_specific_files = Py_None; Py_INCREF(Py_None);
+ p->searched_exact_paths = Py_None; Py_INCREF(Py_None);
+ p->search_specific_files = Py_None; Py_INCREF(Py_None);
+ p->search_specific_file_parents = Py_None; Py_INCREF(Py_None);
+ p->state = Py_None; Py_INCREF(Py_None);
+ p->current_root = Py_None; Py_INCREF(Py_None);
+ p->current_root_unicode = Py_None; Py_INCREF(Py_None);
+ p->root_entries = Py_None; Py_INCREF(Py_None);
+ p->root_abspath = Py_None; Py_INCREF(Py_None);
+ p->tree = Py_None; Py_INCREF(Py_None);
+ p->dir_iterator = Py_None; Py_INCREF(Py_None);
+ p->current_block = Py_None; Py_INCREF(Py_None);
+ p->current_block_list = Py_None; Py_INCREF(Py_None);
+ p->current_dir_info = Py_None; Py_INCREF(Py_None);
+ p->current_dir_list = Py_None; Py_INCREF(Py_None);
+ p->_pending_consistent_entries = Py_None; Py_INCREF(Py_None);
+ p->root_dir_info = Py_None; Py_INCREF(Py_None);
+ p->bisect_left = Py_None; Py_INCREF(Py_None);
+ p->pathjoin = Py_None; Py_INCREF(Py_None);
+ p->fstat = Py_None; Py_INCREF(Py_None);
+ p->seen_ids = Py_None; Py_INCREF(Py_None);
+ p->sha_file = Py_None; Py_INCREF(Py_None);
+ return o;
+}
+
+static void __pyx_tp_dealloc_6bzrlib_21_dirstate_helpers_pyx_ProcessEntryC(PyObject *o) {
+ struct __pyx_obj_6bzrlib_21_dirstate_helpers_pyx_ProcessEntryC *p = (struct __pyx_obj_6bzrlib_21_dirstate_helpers_pyx_ProcessEntryC *)o;
+ Py_XDECREF(p->old_dirname_to_file_id);
+ Py_XDECREF(p->new_dirname_to_file_id);
+ Py_XDECREF(p->last_source_parent);
+ Py_XDECREF(p->last_target_parent);
+ Py_XDECREF(p->use_filesystem_for_exec);
+ Py_XDECREF(p->utf8_decode);
+ Py_XDECREF(p->searched_specific_files);
+ Py_XDECREF(p->searched_exact_paths);
+ Py_XDECREF(p->search_specific_files);
+ Py_XDECREF(p->search_specific_file_parents);
+ Py_XDECREF(p->state);
+ Py_XDECREF(p->current_root);
+ Py_XDECREF(p->current_root_unicode);
+ Py_XDECREF(p->root_entries);
+ Py_XDECREF(p->root_abspath);
+ Py_XDECREF(p->tree);
+ Py_XDECREF(p->dir_iterator);
+ Py_XDECREF(p->current_block);
+ Py_XDECREF(p->current_block_list);
+ Py_XDECREF(p->current_dir_info);
+ Py_XDECREF(p->current_dir_list);
+ Py_XDECREF(p->_pending_consistent_entries);
+ Py_XDECREF(p->root_dir_info);
+ Py_XDECREF(p->bisect_left);
+ Py_XDECREF(p->pathjoin);
+ Py_XDECREF(p->fstat);
+ Py_XDECREF(p->seen_ids);
+ Py_XDECREF(p->sha_file);
+ (*Py_TYPE(o)->tp_free)(o);
+}
+
+static int __pyx_tp_traverse_6bzrlib_21_dirstate_helpers_pyx_ProcessEntryC(PyObject *o, visitproc v, void *a) {
+ int e;
+ struct __pyx_obj_6bzrlib_21_dirstate_helpers_pyx_ProcessEntryC *p = (struct __pyx_obj_6bzrlib_21_dirstate_helpers_pyx_ProcessEntryC *)o;
+ if (p->old_dirname_to_file_id) {
+ e = (*v)(p->old_dirname_to_file_id, a); if (e) return e;
+ }
+ if (p->new_dirname_to_file_id) {
+ e = (*v)(p->new_dirname_to_file_id, a); if (e) return e;
+ }
+ if (p->last_source_parent) {
+ e = (*v)(p->last_source_parent, a); if (e) return e;
+ }
+ if (p->last_target_parent) {
+ e = (*v)(p->last_target_parent, a); if (e) return e;
+ }
+ if (p->use_filesystem_for_exec) {
+ e = (*v)(p->use_filesystem_for_exec, a); if (e) return e;
+ }
+ if (p->utf8_decode) {
+ e = (*v)(p->utf8_decode, a); if (e) return e;
+ }
+ if (p->searched_specific_files) {
+ e = (*v)(p->searched_specific_files, a); if (e) return e;
+ }
+ if (p->searched_exact_paths) {
+ e = (*v)(p->searched_exact_paths, a); if (e) return e;
+ }
+ if (p->search_specific_files) {
+ e = (*v)(p->search_specific_files, a); if (e) return e;
+ }
+ if (p->search_specific_file_parents) {
+ e = (*v)(p->search_specific_file_parents, a); if (e) return e;
+ }
+ if (p->state) {
+ e = (*v)(p->state, a); if (e) return e;
+ }
+ if (p->current_root) {
+ e = (*v)(p->current_root, a); if (e) return e;
+ }
+ if (p->current_root_unicode) {
+ e = (*v)(p->current_root_unicode, a); if (e) return e;
+ }
+ if (p->root_entries) {
+ e = (*v)(p->root_entries, a); if (e) return e;
+ }
+ if (p->root_abspath) {
+ e = (*v)(p->root_abspath, a); if (e) return e;
+ }
+ if (p->tree) {
+ e = (*v)(p->tree, a); if (e) return e;
+ }
+ if (p->dir_iterator) {
+ e = (*v)(p->dir_iterator, a); if (e) return e;
+ }
+ if (p->current_block) {
+ e = (*v)(p->current_block, a); if (e) return e;
+ }
+ if (p->current_block_list) {
+ e = (*v)(p->current_block_list, a); if (e) return e;
+ }
+ if (p->current_dir_info) {
+ e = (*v)(p->current_dir_info, a); if (e) return e;
+ }
+ if (p->current_dir_list) {
+ e = (*v)(p->current_dir_list, a); if (e) return e;
+ }
+ if (p->_pending_consistent_entries) {
+ e = (*v)(p->_pending_consistent_entries, a); if (e) return e;
+ }
+ if (p->root_dir_info) {
+ e = (*v)(p->root_dir_info, a); if (e) return e;
+ }
+ if (p->bisect_left) {
+ e = (*v)(p->bisect_left, a); if (e) return e;
+ }
+ if (p->pathjoin) {
+ e = (*v)(p->pathjoin, a); if (e) return e;
+ }
+ if (p->fstat) {
+ e = (*v)(p->fstat, a); if (e) return e;
+ }
+ if (p->seen_ids) {
+ e = (*v)(p->seen_ids, a); if (e) return e;
+ }
+ if (p->sha_file) {
+ e = (*v)(p->sha_file, a); if (e) return e;
+ }
+ return 0;
+}
+
+static int __pyx_tp_clear_6bzrlib_21_dirstate_helpers_pyx_ProcessEntryC(PyObject *o) {
+ struct __pyx_obj_6bzrlib_21_dirstate_helpers_pyx_ProcessEntryC *p = (struct __pyx_obj_6bzrlib_21_dirstate_helpers_pyx_ProcessEntryC *)o;
+ PyObject* tmp;
+ tmp = ((PyObject*)p->old_dirname_to_file_id);
+ p->old_dirname_to_file_id = Py_None; Py_INCREF(Py_None);
+ Py_XDECREF(tmp);
+ tmp = ((PyObject*)p->new_dirname_to_file_id);
+ p->new_dirname_to_file_id = Py_None; Py_INCREF(Py_None);
+ Py_XDECREF(tmp);
+ tmp = ((PyObject*)p->last_source_parent);
+ p->last_source_parent = Py_None; Py_INCREF(Py_None);
+ Py_XDECREF(tmp);
+ tmp = ((PyObject*)p->last_target_parent);
+ p->last_target_parent = Py_None; Py_INCREF(Py_None);
+ Py_XDECREF(tmp);
+ tmp = ((PyObject*)p->use_filesystem_for_exec);
+ p->use_filesystem_for_exec = Py_None; Py_INCREF(Py_None);
+ Py_XDECREF(tmp);
+ tmp = ((PyObject*)p->utf8_decode);
+ p->utf8_decode = Py_None; Py_INCREF(Py_None);
+ Py_XDECREF(tmp);
+ tmp = ((PyObject*)p->searched_specific_files);
+ p->searched_specific_files = Py_None; Py_INCREF(Py_None);
+ Py_XDECREF(tmp);
+ tmp = ((PyObject*)p->searched_exact_paths);
+ p->searched_exact_paths = Py_None; Py_INCREF(Py_None);
+ Py_XDECREF(tmp);
+ tmp = ((PyObject*)p->search_specific_files);
+ p->search_specific_files = Py_None; Py_INCREF(Py_None);
+ Py_XDECREF(tmp);
+ tmp = ((PyObject*)p->search_specific_file_parents);
+ p->search_specific_file_parents = Py_None; Py_INCREF(Py_None);
+ Py_XDECREF(tmp);
+ tmp = ((PyObject*)p->state);
+ p->state = Py_None; Py_INCREF(Py_None);
+ Py_XDECREF(tmp);
+ tmp = ((PyObject*)p->current_root);
+ p->current_root = Py_None; Py_INCREF(Py_None);
+ Py_XDECREF(tmp);
+ tmp = ((PyObject*)p->current_root_unicode);
+ p->current_root_unicode = Py_None; Py_INCREF(Py_None);
+ Py_XDECREF(tmp);
+ tmp = ((PyObject*)p->root_entries);
+ p->root_entries = Py_None; Py_INCREF(Py_None);
+ Py_XDECREF(tmp);
+ tmp = ((PyObject*)p->root_abspath);
+ p->root_abspath = Py_None; Py_INCREF(Py_None);
+ Py_XDECREF(tmp);
+ tmp = ((PyObject*)p->tree);
+ p->tree = Py_None; Py_INCREF(Py_None);
+ Py_XDECREF(tmp);
+ tmp = ((PyObject*)p->dir_iterator);
+ p->dir_iterator = Py_None; Py_INCREF(Py_None);
+ Py_XDECREF(tmp);
+ tmp = ((PyObject*)p->current_block);
+ p->current_block = Py_None; Py_INCREF(Py_None);
+ Py_XDECREF(tmp);
+ tmp = ((PyObject*)p->current_block_list);
+ p->current_block_list = Py_None; Py_INCREF(Py_None);
+ Py_XDECREF(tmp);
+ tmp = ((PyObject*)p->current_dir_info);
+ p->current_dir_info = Py_None; Py_INCREF(Py_None);
+ Py_XDECREF(tmp);
+ tmp = ((PyObject*)p->current_dir_list);
+ p->current_dir_list = Py_None; Py_INCREF(Py_None);
+ Py_XDECREF(tmp);
+ tmp = ((PyObject*)p->_pending_consistent_entries);
+ p->_pending_consistent_entries = Py_None; Py_INCREF(Py_None);
+ Py_XDECREF(tmp);
+ tmp = ((PyObject*)p->root_dir_info);
+ p->root_dir_info = Py_None; Py_INCREF(Py_None);
+ Py_XDECREF(tmp);
+ tmp = ((PyObject*)p->bisect_left);
+ p->bisect_left = Py_None; Py_INCREF(Py_None);
+ Py_XDECREF(tmp);
+ tmp = ((PyObject*)p->pathjoin);
+ p->pathjoin = Py_None; Py_INCREF(Py_None);
+ Py_XDECREF(tmp);
+ tmp = ((PyObject*)p->fstat);
+ p->fstat = Py_None; Py_INCREF(Py_None);
+ Py_XDECREF(tmp);
+ tmp = ((PyObject*)p->seen_ids);
+ p->seen_ids = Py_None; Py_INCREF(Py_None);
+ Py_XDECREF(tmp);
+ tmp = ((PyObject*)p->sha_file);
+ p->sha_file = Py_None; Py_INCREF(Py_None);
+ Py_XDECREF(tmp);
+ return 0;
+}
+
+static PyObject *__pyx_getprop_6bzrlib_21_dirstate_helpers_pyx_13ProcessEntryC_searched_specific_files(PyObject *o, void *x) {
+ return __pyx_pf_6bzrlib_21_dirstate_helpers_pyx_13ProcessEntryC_23searched_specific_files___get__(o);
+}
+
+static PyObject *__pyx_getprop_6bzrlib_21_dirstate_helpers_pyx_13ProcessEntryC_searched_exact_paths(PyObject *o, void *x) {
+ return __pyx_pf_6bzrlib_21_dirstate_helpers_pyx_13ProcessEntryC_20searched_exact_paths___get__(o);
+}
+
+static PyMethodDef __pyx_methods_6bzrlib_21_dirstate_helpers_pyx_ProcessEntryC[] = {
+ {__Pyx_NAMESTR("iter_changes"), (PyCFunction)__pyx_pf_6bzrlib_21_dirstate_helpers_pyx_13ProcessEntryC_iter_changes, METH_NOARGS, __Pyx_DOCSTR(0)},
+ {__Pyx_NAMESTR("__next__"), (PyCFunction)__pyx_pf_6bzrlib_21_dirstate_helpers_pyx_13ProcessEntryC___next__, METH_NOARGS|METH_COEXIST, __Pyx_DOCSTR(0)},
+ {0, 0, 0, 0}
+};
+
+static struct PyGetSetDef __pyx_getsets_6bzrlib_21_dirstate_helpers_pyx_ProcessEntryC[] = {
+ {(char *)"searched_specific_files", __pyx_getprop_6bzrlib_21_dirstate_helpers_pyx_13ProcessEntryC_searched_specific_files, 0, 0, 0},
+ {(char *)"searched_exact_paths", __pyx_getprop_6bzrlib_21_dirstate_helpers_pyx_13ProcessEntryC_searched_exact_paths, 0, 0, 0},
+ {0, 0, 0, 0, 0}
+};
+
+static PyNumberMethods __pyx_tp_as_number_ProcessEntryC = {
+ 0, /*nb_add*/
+ 0, /*nb_subtract*/
+ 0, /*nb_multiply*/
+ #if PY_MAJOR_VERSION < 3
+ 0, /*nb_divide*/
+ #endif
+ 0, /*nb_remainder*/
+ 0, /*nb_divmod*/
+ 0, /*nb_power*/
+ 0, /*nb_negative*/
+ 0, /*nb_positive*/
+ 0, /*nb_absolute*/
+ 0, /*nb_nonzero*/
+ 0, /*nb_invert*/
+ 0, /*nb_lshift*/
+ 0, /*nb_rshift*/
+ 0, /*nb_and*/
+ 0, /*nb_xor*/
+ 0, /*nb_or*/
+ #if PY_MAJOR_VERSION < 3
+ 0, /*nb_coerce*/
+ #endif
+ 0, /*nb_int*/
+ #if PY_MAJOR_VERSION < 3
+ 0, /*nb_long*/
+ #else
+ 0, /*reserved*/
+ #endif
+ 0, /*nb_float*/
+ #if PY_MAJOR_VERSION < 3
+ 0, /*nb_oct*/
+ #endif
+ #if PY_MAJOR_VERSION < 3
+ 0, /*nb_hex*/
+ #endif
+ 0, /*nb_inplace_add*/
+ 0, /*nb_inplace_subtract*/
+ 0, /*nb_inplace_multiply*/
+ #if PY_MAJOR_VERSION < 3
+ 0, /*nb_inplace_divide*/
+ #endif
+ 0, /*nb_inplace_remainder*/
+ 0, /*nb_inplace_power*/
+ 0, /*nb_inplace_lshift*/
+ 0, /*nb_inplace_rshift*/
+ 0, /*nb_inplace_and*/
+ 0, /*nb_inplace_xor*/
+ 0, /*nb_inplace_or*/
+ 0, /*nb_floor_divide*/
+ 0, /*nb_true_divide*/
+ 0, /*nb_inplace_floor_divide*/
+ 0, /*nb_inplace_true_divide*/
+ #if PY_VERSION_HEX >= 0x02050000
+ 0, /*nb_index*/
+ #endif
+};
+
+static PySequenceMethods __pyx_tp_as_sequence_ProcessEntryC = {
+ 0, /*sq_length*/
+ 0, /*sq_concat*/
+ 0, /*sq_repeat*/
+ 0, /*sq_item*/
+ 0, /*sq_slice*/
+ 0, /*sq_ass_item*/
+ 0, /*sq_ass_slice*/
+ 0, /*sq_contains*/
+ 0, /*sq_inplace_concat*/
+ 0, /*sq_inplace_repeat*/
+};
+
+static PyMappingMethods __pyx_tp_as_mapping_ProcessEntryC = {
+ 0, /*mp_length*/
+ 0, /*mp_subscript*/
+ 0, /*mp_ass_subscript*/
+};
+
+static PyBufferProcs __pyx_tp_as_buffer_ProcessEntryC = {
+ #if PY_MAJOR_VERSION < 3
+ 0, /*bf_getreadbuffer*/
+ #endif
+ #if PY_MAJOR_VERSION < 3
+ 0, /*bf_getwritebuffer*/
+ #endif
+ #if PY_MAJOR_VERSION < 3
+ 0, /*bf_getsegcount*/
+ #endif
+ #if PY_MAJOR_VERSION < 3
+ 0, /*bf_getcharbuffer*/
+ #endif
+ #if PY_VERSION_HEX >= 0x02060000
+ 0, /*bf_getbuffer*/
+ #endif
+ #if PY_VERSION_HEX >= 0x02060000
+ 0, /*bf_releasebuffer*/
+ #endif
+};
+
+PyTypeObject __pyx_type_6bzrlib_21_dirstate_helpers_pyx_ProcessEntryC = {
+ PyVarObject_HEAD_INIT(0, 0)
+ __Pyx_NAMESTR("bzrlib._dirstate_helpers_pyx.ProcessEntryC"), /*tp_name*/
+ sizeof(struct __pyx_obj_6bzrlib_21_dirstate_helpers_pyx_ProcessEntryC), /*tp_basicsize*/
+ 0, /*tp_itemsize*/
+ __pyx_tp_dealloc_6bzrlib_21_dirstate_helpers_pyx_ProcessEntryC, /*tp_dealloc*/
+ 0, /*tp_print*/
+ 0, /*tp_getattr*/
+ 0, /*tp_setattr*/
+ #if PY_MAJOR_VERSION < 3
+ 0, /*tp_compare*/
+ #else
+ 0, /*reserved*/
+ #endif
+ 0, /*tp_repr*/
+ &__pyx_tp_as_number_ProcessEntryC, /*tp_as_number*/
+ &__pyx_tp_as_sequence_ProcessEntryC, /*tp_as_sequence*/
+ &__pyx_tp_as_mapping_ProcessEntryC, /*tp_as_mapping*/
+ 0, /*tp_hash*/
+ 0, /*tp_call*/
+ 0, /*tp_str*/
+ 0, /*tp_getattro*/
+ 0, /*tp_setattro*/
+ &__pyx_tp_as_buffer_ProcessEntryC, /*tp_as_buffer*/
+ Py_TPFLAGS_DEFAULT|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_HAVE_GC, /*tp_flags*/
+ 0, /*tp_doc*/
+ __pyx_tp_traverse_6bzrlib_21_dirstate_helpers_pyx_ProcessEntryC, /*tp_traverse*/
+ __pyx_tp_clear_6bzrlib_21_dirstate_helpers_pyx_ProcessEntryC, /*tp_clear*/
+ 0, /*tp_richcompare*/
+ 0, /*tp_weaklistoffset*/
+ __pyx_pf_6bzrlib_21_dirstate_helpers_pyx_13ProcessEntryC___iter__, /*tp_iter*/
+ __pyx_pf_6bzrlib_21_dirstate_helpers_pyx_13ProcessEntryC___next__, /*tp_iternext*/
+ __pyx_methods_6bzrlib_21_dirstate_helpers_pyx_ProcessEntryC, /*tp_methods*/
+ 0, /*tp_members*/
+ __pyx_getsets_6bzrlib_21_dirstate_helpers_pyx_ProcessEntryC, /*tp_getset*/
+ 0, /*tp_base*/
+ 0, /*tp_dict*/
+ 0, /*tp_descr_get*/
+ 0, /*tp_descr_set*/
+ 0, /*tp_dictoffset*/
+ __pyx_pf_6bzrlib_21_dirstate_helpers_pyx_13ProcessEntryC___init__, /*tp_init*/
+ 0, /*tp_alloc*/
+ __pyx_tp_new_6bzrlib_21_dirstate_helpers_pyx_ProcessEntryC, /*tp_new*/
+ 0, /*tp_free*/
+ 0, /*tp_is_gc*/
+ 0, /*tp_bases*/
+ 0, /*tp_mro*/
+ 0, /*tp_cache*/
+ 0, /*tp_subclasses*/
+ 0, /*tp_weaklist*/
+ 0, /*tp_del*/
+ #if PY_VERSION_HEX >= 0x02060000
+ 0, /*tp_version_tag*/
+ #endif
+};
+
+static PyMethodDef __pyx_methods[] = {
+ {__Pyx_NAMESTR("_py_memrchr"), (PyCFunction)__pyx_pf_6bzrlib_21_dirstate_helpers_pyx__py_memrchr, METH_VARARGS|METH_KEYWORDS, __Pyx_DOCSTR(__pyx_doc_6bzrlib_21_dirstate_helpers_pyx__py_memrchr)},
+ {__Pyx_NAMESTR("cmp_by_dirs"), (PyCFunction)__pyx_pf_6bzrlib_21_dirstate_helpers_pyx_cmp_by_dirs, METH_VARARGS|METH_KEYWORDS, __Pyx_DOCSTR(__pyx_doc_6bzrlib_21_dirstate_helpers_pyx_cmp_by_dirs)},
+ {__Pyx_NAMESTR("_cmp_path_by_dirblock"), (PyCFunction)__pyx_pf_6bzrlib_21_dirstate_helpers_pyx__cmp_path_by_dirblock, METH_VARARGS|METH_KEYWORDS, __Pyx_DOCSTR(__pyx_doc_6bzrlib_21_dirstate_helpers_pyx__cmp_path_by_dirblock)},
+ {__Pyx_NAMESTR("_bisect_path_left"), (PyCFunction)__pyx_pf_6bzrlib_21_dirstate_helpers_pyx__bisect_path_left, METH_VARARGS|METH_KEYWORDS, __Pyx_DOCSTR(__pyx_doc_6bzrlib_21_dirstate_helpers_pyx__bisect_path_left)},
+ {__Pyx_NAMESTR("_bisect_path_right"), (PyCFunction)__pyx_pf_6bzrlib_21_dirstate_helpers_pyx__bisect_path_right, METH_VARARGS|METH_KEYWORDS, __Pyx_DOCSTR(__pyx_doc_6bzrlib_21_dirstate_helpers_pyx__bisect_path_right)},
+ {__Pyx_NAMESTR("bisect_dirblock"), (PyCFunction)__pyx_pf_6bzrlib_21_dirstate_helpers_pyx_bisect_dirblock, METH_VARARGS|METH_KEYWORDS, __Pyx_DOCSTR(__pyx_doc_6bzrlib_21_dirstate_helpers_pyx_bisect_dirblock)},
+ {__Pyx_NAMESTR("_read_dirblocks"), (PyCFunction)__pyx_pf_6bzrlib_21_dirstate_helpers_pyx__read_dirblocks, METH_O, __Pyx_DOCSTR(__pyx_doc_6bzrlib_21_dirstate_helpers_pyx__read_dirblocks)},
+ {__Pyx_NAMESTR("pack_stat"), (PyCFunction)__pyx_pf_6bzrlib_21_dirstate_helpers_pyx_pack_stat, METH_O, __Pyx_DOCSTR(__pyx_doc_6bzrlib_21_dirstate_helpers_pyx_pack_stat)},
+ {__Pyx_NAMESTR("update_entry"), (PyCFunction)__pyx_pf_6bzrlib_21_dirstate_helpers_pyx_update_entry, METH_VARARGS|METH_KEYWORDS, __Pyx_DOCSTR(__pyx_doc_6bzrlib_21_dirstate_helpers_pyx_update_entry)},
+ {0, 0, 0, 0}
+};
+
+#if PY_MAJOR_VERSION >= 3
+static struct PyModuleDef __pyx_moduledef = {
+ PyModuleDef_HEAD_INIT,
+ __Pyx_NAMESTR("_dirstate_helpers_pyx"),
+ __Pyx_DOCSTR(__pyx_k_48), /* m_doc */
+ -1, /* m_size */
+ __pyx_methods /* m_methods */,
+ NULL, /* m_reload */
+ NULL, /* m_traverse */
+ NULL, /* m_clear */
+ NULL /* m_free */
+};
+#endif
+
+static __Pyx_StringTabEntry __pyx_string_tab[] = {
+ {&__pyx_kp_s_1, __pyx_k_1, sizeof(__pyx_k_1), 0, 0, 1, 0},
+ {&__pyx_kp_s_10, __pyx_k_10, sizeof(__pyx_k_10), 0, 0, 1, 0},
+ {&__pyx_kp_s_11, __pyx_k_11, sizeof(__pyx_k_11), 0, 0, 1, 0},
+ {&__pyx_kp_s_12, __pyx_k_12, sizeof(__pyx_k_12), 0, 0, 1, 0},
+ {&__pyx_kp_s_13, __pyx_k_13, sizeof(__pyx_k_13), 0, 0, 1, 0},
+ {&__pyx_kp_s_14, __pyx_k_14, sizeof(__pyx_k_14), 0, 0, 1, 0},
+ {&__pyx_n_s_15, __pyx_k_15, sizeof(__pyx_k_15), 0, 0, 1, 1},
+ {&__pyx_kp_s_16, __pyx_k_16, sizeof(__pyx_k_16), 0, 0, 1, 0},
+ {&__pyx_n_s_17, __pyx_k_17, sizeof(__pyx_k_17), 0, 0, 1, 1},
+ {&__pyx_n_s_18, __pyx_k_18, sizeof(__pyx_k_18), 0, 0, 1, 1},
+ {&__pyx_n_s_19, __pyx_k_19, sizeof(__pyx_k_19), 0, 0, 1, 1},
+ {&__pyx_kp_s_2, __pyx_k_2, sizeof(__pyx_k_2), 0, 0, 1, 0},
+ {&__pyx_n_s_20, __pyx_k_20, sizeof(__pyx_k_20), 0, 0, 1, 1},
+ {&__pyx_n_s_21, __pyx_k_21, sizeof(__pyx_k_21), 0, 0, 1, 1},
+ {&__pyx_n_s_22, __pyx_k_22, sizeof(__pyx_k_22), 0, 0, 1, 1},
+ {&__pyx_n_s_23, __pyx_k_23, sizeof(__pyx_k_23), 0, 0, 1, 1},
+ {&__pyx_n_s_24, __pyx_k_24, sizeof(__pyx_k_24), 0, 0, 1, 1},
+ {&__pyx_n_s_25, __pyx_k_25, sizeof(__pyx_k_25), 0, 0, 1, 1},
+ {&__pyx_n_s_26, __pyx_k_26, sizeof(__pyx_k_26), 0, 0, 1, 1},
+ {&__pyx_n_s_27, __pyx_k_27, sizeof(__pyx_k_27), 0, 0, 1, 1},
+ {&__pyx_n_s_28, __pyx_k_28, sizeof(__pyx_k_28), 0, 0, 1, 1},
+ {&__pyx_n_s_29, __pyx_k_29, sizeof(__pyx_k_29), 0, 0, 1, 1},
+ {&__pyx_kp_s_3, __pyx_k_3, sizeof(__pyx_k_3), 0, 0, 1, 0},
+ {&__pyx_kp_s_30, __pyx_k_30, sizeof(__pyx_k_30), 0, 0, 1, 0},
+ {&__pyx_kp_s_31, __pyx_k_31, sizeof(__pyx_k_31), 0, 0, 1, 0},
+ {&__pyx_kp_s_32, __pyx_k_32, sizeof(__pyx_k_32), 0, 0, 1, 0},
+ {&__pyx_kp_s_33, __pyx_k_33, sizeof(__pyx_k_33), 0, 0, 1, 0},
+ {&__pyx_kp_s_34, __pyx_k_34, sizeof(__pyx_k_34), 0, 0, 1, 0},
+ {&__pyx_kp_s_35, __pyx_k_35, sizeof(__pyx_k_35), 0, 0, 1, 0},
+ {&__pyx_kp_s_36, __pyx_k_36, sizeof(__pyx_k_36), 0, 0, 1, 0},
+ {&__pyx_n_s_37, __pyx_k_37, sizeof(__pyx_k_37), 0, 0, 1, 1},
+ {&__pyx_n_s_38, __pyx_k_38, sizeof(__pyx_k_38), 0, 0, 1, 1},
+ {&__pyx_n_s_39, __pyx_k_39, sizeof(__pyx_k_39), 0, 0, 1, 1},
+ {&__pyx_kp_s_4, __pyx_k_4, sizeof(__pyx_k_4), 0, 0, 1, 0},
+ {&__pyx_kp_s_40, __pyx_k_40, sizeof(__pyx_k_40), 0, 0, 1, 0},
+ {&__pyx_n_s_41, __pyx_k_41, sizeof(__pyx_k_41), 0, 0, 1, 1},
+ {&__pyx_n_s_42, __pyx_k_42, sizeof(__pyx_k_42), 0, 0, 1, 1},
+ {&__pyx_n_s_43, __pyx_k_43, sizeof(__pyx_k_43), 0, 0, 1, 1},
+ {&__pyx_kp_s_44, __pyx_k_44, sizeof(__pyx_k_44), 0, 0, 1, 0},
+ {&__pyx_kp_s_45, __pyx_k_45, sizeof(__pyx_k_45), 0, 0, 1, 0},
+ {&__pyx_kp_s_46, __pyx_k_46, sizeof(__pyx_k_46), 0, 0, 1, 0},
+ {&__pyx_kp_s_47, __pyx_k_47, sizeof(__pyx_k_47), 0, 0, 1, 0},
+ {&__pyx_n_s_49, __pyx_k_49, sizeof(__pyx_k_49), 0, 0, 1, 1},
+ {&__pyx_kp_s_5, __pyx_k_5, sizeof(__pyx_k_5), 0, 0, 1, 0},
+ {&__pyx_n_s_50, __pyx_k_50, sizeof(__pyx_k_50), 0, 0, 1, 1},
+ {&__pyx_kp_u_51, __pyx_k_51, sizeof(__pyx_k_51), 0, 1, 0, 0},
+ {&__pyx_kp_u_52, __pyx_k_52, sizeof(__pyx_k_52), 0, 1, 0, 0},
+ {&__pyx_kp_u_53, __pyx_k_53, sizeof(__pyx_k_53), 0, 1, 0, 0},
+ {&__pyx_n_s_54, __pyx_k_54, sizeof(__pyx_k_54), 0, 0, 1, 1},
+ {&__pyx_kp_u_55, __pyx_k_55, sizeof(__pyx_k_55), 0, 1, 0, 0},
+ {&__pyx_kp_u_56, __pyx_k_56, sizeof(__pyx_k_56), 0, 1, 0, 0},
+ {&__pyx_kp_u_57, __pyx_k_57, sizeof(__pyx_k_57), 0, 1, 0, 0},
+ {&__pyx_kp_u_58, __pyx_k_58, sizeof(__pyx_k_58), 0, 1, 0, 0},
+ {&__pyx_kp_u_59, __pyx_k_59, sizeof(__pyx_k_59), 0, 1, 0, 0},
+ {&__pyx_kp_s_6, __pyx_k_6, sizeof(__pyx_k_6), 0, 0, 1, 0},
+ {&__pyx_kp_u_60, __pyx_k_60, sizeof(__pyx_k_60), 0, 1, 0, 0},
+ {&__pyx_kp_u_61, __pyx_k_61, sizeof(__pyx_k_61), 0, 1, 0, 0},
+ {&__pyx_kp_s_7, __pyx_k_7, sizeof(__pyx_k_7), 0, 0, 1, 0},
+ {&__pyx_kp_s_8, __pyx_k_8, sizeof(__pyx_k_8), 0, 0, 1, 0},
+ {&__pyx_kp_s_9, __pyx_k_9, sizeof(__pyx_k_9), 0, 0, 1, 0},
+ {&__pyx_n_s__AssertionError, __pyx_k__AssertionError, sizeof(__pyx_k__AssertionError), 0, 0, 1, 1},
+ {&__pyx_n_s__AttributeError, __pyx_k__AttributeError, sizeof(__pyx_k__AttributeError), 0, 0, 1, 1},
+ {&__pyx_n_s__BadFileKindError, __pyx_k__BadFileKindError, sizeof(__pyx_k__BadFileKindError), 0, 0, 1, 1},
+ {&__pyx_n_s__BadFilenameEncoding, __pyx_k__BadFilenameEncoding, sizeof(__pyx_k__BadFilenameEncoding), 0, 0, 1, 1},
+ {&__pyx_n_s__BzrError, __pyx_k__BzrError, sizeof(__pyx_k__BzrError), 0, 0, 1, 1},
+ {&__pyx_n_s__CorruptDirstate, __pyx_k__CorruptDirstate, sizeof(__pyx_k__CorruptDirstate), 0, 0, 1, 1},
+ {&__pyx_n_s__DirState, __pyx_k__DirState, sizeof(__pyx_k__DirState), 0, 0, 1, 1},
+ {&__pyx_n_s__DirstateCorrupt, __pyx_k__DirstateCorrupt, sizeof(__pyx_k__DirstateCorrupt), 0, 0, 1, 1},
+ {&__pyx_n_s__EINVAL, __pyx_k__EINVAL, sizeof(__pyx_k__EINVAL), 0, 0, 1, 1},
+ {&__pyx_n_s__ENOENT, __pyx_k__ENOENT, sizeof(__pyx_k__ENOENT), 0, 0, 1, 1},
+ {&__pyx_n_s__ENOTDIR, __pyx_k__ENOTDIR, sizeof(__pyx_k__ENOTDIR), 0, 0, 1, 1},
+ {&__pyx_n_s__KeyError, __pyx_k__KeyError, sizeof(__pyx_k__KeyError), 0, 0, 1, 1},
+ {&__pyx_n_s__NULLSTAT, __pyx_k__NULLSTAT, sizeof(__pyx_k__NULLSTAT), 0, 0, 1, 1},
+ {&__pyx_n_s__NULL_PARENT_DETAILS, __pyx_k__NULL_PARENT_DETAILS, sizeof(__pyx_k__NULL_PARENT_DETAILS), 0, 0, 1, 1},
+ {&__pyx_n_s__OSError, __pyx_k__OSError, sizeof(__pyx_k__OSError), 0, 0, 1, 1},
+ {&__pyx_n_s__Reader, __pyx_k__Reader, sizeof(__pyx_k__Reader), 0, 0, 1, 1},
+ {&__pyx_n_s__S_IEXEC, __pyx_k__S_IEXEC, sizeof(__pyx_k__S_IEXEC), 0, 0, 1, 1},
+ {&__pyx_n_s__S_ISREG, __pyx_k__S_ISREG, sizeof(__pyx_k__S_ISREG), 0, 0, 1, 1},
+ {&__pyx_n_s__StopIteration, __pyx_k__StopIteration, sizeof(__pyx_k__StopIteration), 0, 0, 1, 1},
+ {&__pyx_n_s__TypeError, __pyx_k__TypeError, sizeof(__pyx_k__TypeError), 0, 0, 1, 1},
+ {&__pyx_n_s__UnicodeDecodeError, __pyx_k__UnicodeDecodeError, sizeof(__pyx_k__UnicodeDecodeError), 0, 0, 1, 1},
+ {&__pyx_n_s____main__, __pyx_k____main__, sizeof(__pyx_k____main__), 0, 0, 1, 1},
+ {&__pyx_n_s____ne__, __pyx_k____ne__, sizeof(__pyx_k____ne__), 0, 0, 1, 1},
+ {&__pyx_n_s____test__, __pyx_k____test__, sizeof(__pyx_k____test__), 0, 0, 1, 1},
+ {&__pyx_n_s___bisect_path_left, __pyx_k___bisect_path_left, sizeof(__pyx_k___bisect_path_left), 0, 0, 1, 1},
+ {&__pyx_n_s___bisect_path_right, __pyx_k___bisect_path_right, sizeof(__pyx_k___bisect_path_right), 0, 0, 1, 1},
+ {&__pyx_n_s___cutoff_time, __pyx_k___cutoff_time, sizeof(__pyx_k___cutoff_time), 0, 0, 1, 1},
+ {&__pyx_n_s___dirblock_state, __pyx_k___dirblock_state, sizeof(__pyx_k___dirblock_state), 0, 0, 1, 1},
+ {&__pyx_n_s___dirblocks, __pyx_k___dirblocks, sizeof(__pyx_k___dirblocks), 0, 0, 1, 1},
+ {&__pyx_n_s___encode, __pyx_k___encode, sizeof(__pyx_k___encode), 0, 0, 1, 1},
+ {&__pyx_n_s___end_of_header, __pyx_k___end_of_header, sizeof(__pyx_k___end_of_header), 0, 0, 1, 1},
+ {&__pyx_n_s___ensure_block, __pyx_k___ensure_block, sizeof(__pyx_k___ensure_block), 0, 0, 1, 1},
+ {&__pyx_n_s___entries_for_path, __pyx_k___entries_for_path, sizeof(__pyx_k___entries_for_path), 0, 0, 1, 1},
+ {&__pyx_n_s___filename, __pyx_k___filename, sizeof(__pyx_k___filename), 0, 0, 1, 1},
+ {&__pyx_n_s___fs_enc, __pyx_k___fs_enc, sizeof(__pyx_k___fs_enc), 0, 0, 1, 1},
+ {&__pyx_n_s___get_entry, __pyx_k___get_entry, sizeof(__pyx_k___get_entry), 0, 0, 1, 1},
+ {&__pyx_n_s___init, __pyx_k___init, sizeof(__pyx_k___init), 0, 0, 1, 1},
+ {&__pyx_n_s___is_executable, __pyx_k___is_executable, sizeof(__pyx_k___is_executable), 0, 0, 1, 1},
+ {&__pyx_n_s___iter_next, __pyx_k___iter_next, sizeof(__pyx_k___iter_next), 0, 0, 1, 1},
+ {&__pyx_n_s___loop_one_block, __pyx_k___loop_one_block, sizeof(__pyx_k___loop_one_block), 0, 0, 1, 1},
+ {&__pyx_n_s___mark_modified, __pyx_k___mark_modified, sizeof(__pyx_k___mark_modified), 0, 0, 1, 1},
+ {&__pyx_n_s___maybe_tree_ref, __pyx_k___maybe_tree_ref, sizeof(__pyx_k___maybe_tree_ref), 0, 0, 1, 1},
+ {&__pyx_n_s___num_entries, __pyx_k___num_entries, sizeof(__pyx_k___num_entries), 0, 0, 1, 1},
+ {&__pyx_n_s___observed_sha1, __pyx_k___observed_sha1, sizeof(__pyx_k___observed_sha1), 0, 0, 1, 1},
+ {&__pyx_n_s___parse_dirblocks, __pyx_k___parse_dirblocks, sizeof(__pyx_k___parse_dirblocks), 0, 0, 1, 1},
+ {&__pyx_n_s___path_info, __pyx_k___path_info, sizeof(__pyx_k___path_info), 0, 0, 1, 1},
+ {&__pyx_n_s___process_entry, __pyx_k___process_entry, sizeof(__pyx_k___process_entry), 0, 0, 1, 1},
+ {&__pyx_n_s___py_memrchr, __pyx_k___py_memrchr, sizeof(__pyx_k___py_memrchr), 0, 0, 1, 1},
+ {&__pyx_n_s___read_dirblocks, __pyx_k___read_dirblocks, sizeof(__pyx_k___read_dirblocks), 0, 0, 1, 1},
+ {&__pyx_n_s___read_link, __pyx_k___read_link, sizeof(__pyx_k___read_link), 0, 0, 1, 1},
+ {&__pyx_n_s___sha1_file, __pyx_k___sha1_file, sizeof(__pyx_k___sha1_file), 0, 0, 1, 1},
+ {&__pyx_n_s___sha1_provider, __pyx_k___sha1_provider, sizeof(__pyx_k___sha1_provider), 0, 0, 1, 1},
+ {&__pyx_n_s___sha_cutoff_time, __pyx_k___sha_cutoff_time, sizeof(__pyx_k___sha_cutoff_time), 0, 0, 1, 1},
+ {&__pyx_n_s___state_file, __pyx_k___state_file, sizeof(__pyx_k___state_file), 0, 0, 1, 1},
+ {&__pyx_n_s___utf8_decode, __pyx_k___utf8_decode, sizeof(__pyx_k___utf8_decode), 0, 0, 1, 1},
+ {&__pyx_n_s___walkdirs_utf8, __pyx_k___walkdirs_utf8, sizeof(__pyx_k___walkdirs_utf8), 0, 0, 1, 1},
+ {&__pyx_n_s__a, __pyx_k__a, sizeof(__pyx_k__a), 0, 0, 1, 1},
+ {&__pyx_n_s__absent, __pyx_k__absent, sizeof(__pyx_k__absent), 0, 0, 1, 1},
+ {&__pyx_n_s__abspath, __pyx_k__abspath, sizeof(__pyx_k__abspath), 0, 0, 1, 1},
+ {&__pyx_n_s__add, __pyx_k__add, sizeof(__pyx_k__add), 0, 0, 1, 1},
+ {&__pyx_n_s__ar, __pyx_k__ar, sizeof(__pyx_k__ar), 0, 0, 1, 1},
+ {&__pyx_n_s__b2a_base64, __pyx_k__b2a_base64, sizeof(__pyx_k__b2a_base64), 0, 0, 1, 1},
+ {&__pyx_n_s__binascii, __pyx_k__binascii, sizeof(__pyx_k__binascii), 0, 0, 1, 1},
+ {&__pyx_n_s__bisect, __pyx_k__bisect, sizeof(__pyx_k__bisect), 0, 0, 1, 1},
+ {&__pyx_n_s__bisect_dirblock, __pyx_k__bisect_dirblock, sizeof(__pyx_k__bisect_dirblock), 0, 0, 1, 1},
+ {&__pyx_n_s__bisect_left, __pyx_k__bisect_left, sizeof(__pyx_k__bisect_left), 0, 0, 1, 1},
+ {&__pyx_n_s__block_index, __pyx_k__block_index, sizeof(__pyx_k__block_index), 0, 0, 1, 1},
+ {&__pyx_n_s__bzrlib, __pyx_k__bzrlib, sizeof(__pyx_k__bzrlib), 0, 0, 1, 1},
+ {&__pyx_n_s__c, __pyx_k__c, sizeof(__pyx_k__c), 0, 0, 1, 1},
+ {&__pyx_n_s__cache, __pyx_k__cache, sizeof(__pyx_k__cache), 0, 0, 1, 1},
+ {&__pyx_n_s__cache_utf8, __pyx_k__cache_utf8, sizeof(__pyx_k__cache_utf8), 0, 0, 1, 1},
+ {&__pyx_n_s__cmp, __pyx_k__cmp, sizeof(__pyx_k__cmp), 0, 0, 1, 1},
+ {&__pyx_n_s__cmp_by_dirs, __pyx_k__cmp_by_dirs, sizeof(__pyx_k__cmp_by_dirs), 0, 0, 1, 1},
+ {&__pyx_n_s__cur_cstr, __pyx_k__cur_cstr, sizeof(__pyx_k__cur_cstr), 0, 0, 1, 1},
+ {&__pyx_n_s__current_block, __pyx_k__current_block, sizeof(__pyx_k__current_block), 0, 0, 1, 1},
+ {&__pyx_n_s__current_block_list, __pyx_k__current_block_list, sizeof(__pyx_k__current_block_list), 0, 0, 1, 1},
+ {&__pyx_n_s__current_block_pos, __pyx_k__current_block_pos, sizeof(__pyx_k__current_block_pos), 0, 0, 1, 1},
+ {&__pyx_n_s__current_dir_info, __pyx_k__current_dir_info, sizeof(__pyx_k__current_dir_info), 0, 0, 1, 1},
+ {&__pyx_n_s__current_dir_list, __pyx_k__current_dir_list, sizeof(__pyx_k__current_dir_list), 0, 0, 1, 1},
+ {&__pyx_n_s__current_root, __pyx_k__current_root, sizeof(__pyx_k__current_root), 0, 0, 1, 1},
+ {&__pyx_n_s__d, __pyx_k__d, sizeof(__pyx_k__d), 0, 0, 1, 1},
+ {&__pyx_n_s__decode, __pyx_k__decode, sizeof(__pyx_k__decode), 0, 0, 1, 1},
+ {&__pyx_n_s__dir_iterator, __pyx_k__dir_iterator, sizeof(__pyx_k__dir_iterator), 0, 0, 1, 1},
+ {&__pyx_n_s__dirblocks, __pyx_k__dirblocks, sizeof(__pyx_k__dirblocks), 0, 0, 1, 1},
+ {&__pyx_n_s__directory, __pyx_k__directory, sizeof(__pyx_k__directory), 0, 0, 1, 1},
+ {&__pyx_n_s__dirname, __pyx_k__dirname, sizeof(__pyx_k__dirname), 0, 0, 1, 1},
+ {&__pyx_n_s__encode, __pyx_k__encode, sizeof(__pyx_k__encode), 0, 0, 1, 1},
+ {&__pyx_n_s__end_cstr, __pyx_k__end_cstr, sizeof(__pyx_k__end_cstr), 0, 0, 1, 1},
+ {&__pyx_n_s__entry, __pyx_k__entry, sizeof(__pyx_k__entry), 0, 0, 1, 1},
+ {&__pyx_n_s__errno, __pyx_k__errno, sizeof(__pyx_k__errno), 0, 0, 1, 1},
+ {&__pyx_n_s__errors, __pyx_k__errors, sizeof(__pyx_k__errors), 0, 0, 1, 1},
+ {&__pyx_n_s__f, __pyx_k__f, sizeof(__pyx_k__f), 0, 0, 1, 1},
+ {&__pyx_n_s__file, __pyx_k__file, sizeof(__pyx_k__file), 0, 0, 1, 1},
+ {&__pyx_n_s__fstat, __pyx_k__fstat, sizeof(__pyx_k__fstat), 0, 0, 1, 1},
+ {&__pyx_n_s__get_next, __pyx_k__get_next, sizeof(__pyx_k__get_next), 0, 0, 1, 1},
+ {&__pyx_n_s__get_next_str, __pyx_k__get_next_str, sizeof(__pyx_k__get_next_str), 0, 0, 1, 1},
+ {&__pyx_n_s__hi, __pyx_k__hi, sizeof(__pyx_k__hi), 0, 0, 1, 1},
+ {&__pyx_n_s__include_unchanged, __pyx_k__include_unchanged, sizeof(__pyx_k__include_unchanged), 0, 0, 1, 1},
+ {&__pyx_n_s__is_inside, __pyx_k__is_inside, sizeof(__pyx_k__is_inside), 0, 0, 1, 1},
+ {&__pyx_n_s__is_inside_any, __pyx_k__is_inside_any, sizeof(__pyx_k__is_inside_any), 0, 0, 1, 1},
+ {&__pyx_n_s__l, __pyx_k__l, sizeof(__pyx_k__l), 0, 0, 1, 1},
+ {&__pyx_n_s__last_source_parent, __pyx_k__last_source_parent, sizeof(__pyx_k__last_source_parent), 0, 0, 1, 1},
+ {&__pyx_n_s__last_target_parent, __pyx_k__last_target_parent, sizeof(__pyx_k__last_target_parent), 0, 0, 1, 1},
+ {&__pyx_n_s__lo, __pyx_k__lo, sizeof(__pyx_k__lo), 0, 0, 1, 1},
+ {&__pyx_n_s__lstat, __pyx_k__lstat, sizeof(__pyx_k__lstat), 0, 0, 1, 1},
+ {&__pyx_n_s__next, __pyx_k__next, sizeof(__pyx_k__next), 0, 0, 1, 1},
+ {&__pyx_n_s__os, __pyx_k__os, sizeof(__pyx_k__os), 0, 0, 1, 1},
+ {&__pyx_n_s__osutils, __pyx_k__osutils, sizeof(__pyx_k__osutils), 0, 0, 1, 1},
+ {&__pyx_n_s__pack_stat, __pyx_k__pack_stat, sizeof(__pyx_k__pack_stat), 0, 0, 1, 1},
+ {&__pyx_n_s__parent_directories, __pyx_k__parent_directories, sizeof(__pyx_k__parent_directories), 0, 0, 1, 1},
+ {&__pyx_n_s__partial, __pyx_k__partial, sizeof(__pyx_k__partial), 0, 0, 1, 1},
+ {&__pyx_n_s__path, __pyx_k__path, sizeof(__pyx_k__path), 0, 0, 1, 1},
+ {&__pyx_n_s__path1, __pyx_k__path1, sizeof(__pyx_k__path1), 0, 0, 1, 1},
+ {&__pyx_n_s__path2, __pyx_k__path2, sizeof(__pyx_k__path2), 0, 0, 1, 1},
+ {&__pyx_n_s__path_index, __pyx_k__path_index, sizeof(__pyx_k__path_index), 0, 0, 1, 1},
+ {&__pyx_n_s__path_utf8, __pyx_k__path_utf8, sizeof(__pyx_k__path_utf8), 0, 0, 1, 1},
+ {&__pyx_n_s__pathjoin, __pyx_k__pathjoin, sizeof(__pyx_k__pathjoin), 0, 0, 1, 1},
+ {&__pyx_n_s__paths, __pyx_k__paths, sizeof(__pyx_k__paths), 0, 0, 1, 1},
+ {&__pyx_n_s__platform, __pyx_k__platform, sizeof(__pyx_k__platform), 0, 0, 1, 1},
+ {&__pyx_n_s__prefix, __pyx_k__prefix, sizeof(__pyx_k__prefix), 0, 0, 1, 1},
+ {&__pyx_n_s__r, __pyx_k__r, sizeof(__pyx_k__r), 0, 0, 1, 1},
+ {&__pyx_n_s__read, __pyx_k__read, sizeof(__pyx_k__read), 0, 0, 1, 1},
+ {&__pyx_n_s__relocated, __pyx_k__relocated, sizeof(__pyx_k__relocated), 0, 0, 1, 1},
+ {&__pyx_n_s__root_abspath, __pyx_k__root_abspath, sizeof(__pyx_k__root_abspath), 0, 0, 1, 1},
+ {&__pyx_n_s__root_dir_info, __pyx_k__root_dir_info, sizeof(__pyx_k__root_dir_info), 0, 0, 1, 1},
+ {&__pyx_n_s__root_entries, __pyx_k__root_entries, sizeof(__pyx_k__root_entries), 0, 0, 1, 1},
+ {&__pyx_n_s__root_entries_len, __pyx_k__root_entries_len, sizeof(__pyx_k__root_entries_len), 0, 0, 1, 1},
+ {&__pyx_n_s__root_entries_pos, __pyx_k__root_entries_pos, sizeof(__pyx_k__root_entries_pos), 0, 0, 1, 1},
+ {&__pyx_n_s__rsplit, __pyx_k__rsplit, sizeof(__pyx_k__rsplit), 0, 0, 1, 1},
+ {&__pyx_n_s__s, __pyx_k__s, sizeof(__pyx_k__s), 0, 0, 1, 1},
+ {&__pyx_n_s__seek, __pyx_k__seek, sizeof(__pyx_k__seek), 0, 0, 1, 1},
+ {&__pyx_n_s__seen_ids, __pyx_k__seen_ids, sizeof(__pyx_k__seen_ids), 0, 0, 1, 1},
+ {&__pyx_n_s__self, __pyx_k__self, sizeof(__pyx_k__self), 0, 0, 1, 1},
+ {&__pyx_n_s__sha_file, __pyx_k__sha_file, sizeof(__pyx_k__sha_file), 0, 0, 1, 1},
+ {&__pyx_n_s__source_index, __pyx_k__source_index, sizeof(__pyx_k__source_index), 0, 0, 1, 1},
+ {&__pyx_n_s__split, __pyx_k__split, sizeof(__pyx_k__split), 0, 0, 1, 1},
+ {&__pyx_n_s__splitpath, __pyx_k__splitpath, sizeof(__pyx_k__splitpath), 0, 0, 1, 1},
+ {&__pyx_n_s__st_ctime, __pyx_k__st_ctime, sizeof(__pyx_k__st_ctime), 0, 0, 1, 1},
+ {&__pyx_n_s__st_dev, __pyx_k__st_dev, sizeof(__pyx_k__st_dev), 0, 0, 1, 1},
+ {&__pyx_n_s__st_ino, __pyx_k__st_ino, sizeof(__pyx_k__st_ino), 0, 0, 1, 1},
+ {&__pyx_n_s__st_mode, __pyx_k__st_mode, sizeof(__pyx_k__st_mode), 0, 0, 1, 1},
+ {&__pyx_n_s__st_mtime, __pyx_k__st_mtime, sizeof(__pyx_k__st_mtime), 0, 0, 1, 1},
+ {&__pyx_n_s__st_size, __pyx_k__st_size, sizeof(__pyx_k__st_size), 0, 0, 1, 1},
+ {&__pyx_n_s__stat, __pyx_k__stat, sizeof(__pyx_k__stat), 0, 0, 1, 1},
+ {&__pyx_n_s__stat_and_sha1, __pyx_k__stat_and_sha1, sizeof(__pyx_k__stat_and_sha1), 0, 0, 1, 1},
+ {&__pyx_n_s__stat_value, __pyx_k__stat_value, sizeof(__pyx_k__stat_value), 0, 0, 1, 1},
+ {&__pyx_n_s__state, __pyx_k__state, sizeof(__pyx_k__state), 0, 0, 1, 1},
+ {&__pyx_n_s__symlink, __pyx_k__symlink, sizeof(__pyx_k__symlink), 0, 0, 1, 1},
+ {&__pyx_n_s__sys, __pyx_k__sys, sizeof(__pyx_k__sys), 0, 0, 1, 1},
+ {&__pyx_n_s__target_index, __pyx_k__target_index, sizeof(__pyx_k__target_index), 0, 0, 1, 1},
+ {&__pyx_n_s__text, __pyx_k__text, sizeof(__pyx_k__text), 0, 0, 1, 1},
+ {&__pyx_n_s__text_cstr, __pyx_k__text_cstr, sizeof(__pyx_k__text_cstr), 0, 0, 1, 1},
+ {&__pyx_n_s__text_size, __pyx_k__text_size, sizeof(__pyx_k__text_size), 0, 0, 1, 1},
+ {&__pyx_n_s__tree, __pyx_k__tree, sizeof(__pyx_k__tree), 0, 0, 1, 1},
+ {&__pyx_n_s__update, __pyx_k__update, sizeof(__pyx_k__update), 0, 0, 1, 1},
+ {&__pyx_n_s__update_entry, __pyx_k__update_entry, sizeof(__pyx_k__update_entry), 0, 0, 1, 1},
+ {&__pyx_n_s__utf8, __pyx_k__utf8, sizeof(__pyx_k__utf8), 0, 0, 1, 1},
+ {&__pyx_n_s__utf8_decode, __pyx_k__utf8_decode, sizeof(__pyx_k__utf8_decode), 0, 0, 1, 1},
+ {&__pyx_n_s__want_unversioned, __pyx_k__want_unversioned, sizeof(__pyx_k__want_unversioned), 0, 0, 1, 1},
+ {&__pyx_n_s__win32, __pyx_k__win32, sizeof(__pyx_k__win32), 0, 0, 1, 1},
+ {&__pyx_n_s__winerror, __pyx_k__winerror, sizeof(__pyx_k__winerror), 0, 0, 1, 1},
+ {0, 0, 0, 0, 0, 0, 0}
+};
+static int __Pyx_InitCachedBuiltins(void) {
+ __pyx_builtin_AssertionError = __Pyx_GetName(__pyx_b, __pyx_n_s__AssertionError); if (!__pyx_builtin_AssertionError) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 168; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_builtin_TypeError = __Pyx_GetName(__pyx_b, __pyx_n_s__TypeError); if (!__pyx_builtin_TypeError) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 264; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_builtin_KeyError = __Pyx_GetName(__pyx_b, __pyx_n_s__KeyError); if (!__pyx_builtin_KeyError) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1009; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_builtin_StopIteration = __Pyx_GetName(__pyx_b, __pyx_n_s__StopIteration); if (!__pyx_builtin_StopIteration) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1537; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_builtin_OSError = __Pyx_GetName(__pyx_b, __pyx_n_s__OSError); if (!__pyx_builtin_OSError) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1547; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_builtin_AttributeError = __Pyx_GetName(__pyx_b, __pyx_n_s__AttributeError); if (!__pyx_builtin_AttributeError) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1625; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_builtin_cmp = __Pyx_GetName(__pyx_b, __pyx_n_s__cmp); if (!__pyx_builtin_cmp) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1815; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_builtin_UnicodeDecodeError = __Pyx_GetName(__pyx_b, __pyx_n_s__UnicodeDecodeError); if (!__pyx_builtin_UnicodeDecodeError) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1862; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ return 0;
+ __pyx_L1_error:;
+ return -1;
+}
+
+static int __Pyx_InitGlobals(void) {
+ #if PY_VERSION_HEX < 0x02040000
+ if (unlikely(__Pyx_Py23SetsImport() < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ #endif
+ if (__Pyx_InitStrings(__pyx_string_tab) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;};
+ __pyx_int_0 = PyInt_FromLong(0); if (unlikely(!__pyx_int_0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;};
+ __pyx_int_1 = PyInt_FromLong(1); if (unlikely(!__pyx_int_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;};
+ return 0;
+ __pyx_L1_error:;
+ return -1;
+}
+
+#if PY_MAJOR_VERSION < 3
+PyMODINIT_FUNC init_dirstate_helpers_pyx(void); /*proto*/
+PyMODINIT_FUNC init_dirstate_helpers_pyx(void)
+#else
+PyMODINIT_FUNC PyInit__dirstate_helpers_pyx(void); /*proto*/
+PyMODINIT_FUNC PyInit__dirstate_helpers_pyx(void)
+#endif
+{
+ PyObject *__pyx_t_1 = NULL;
+ PyObject *__pyx_t_2 = NULL;
+ int __pyx_t_3;
+ PyObject *__pyx_t_4 = NULL;
+ #if CYTHON_REFNANNY
+ void* __pyx_refnanny = NULL;
+ __Pyx_RefNanny = __Pyx_RefNannyImportAPI("refnanny");
+ if (!__Pyx_RefNanny) {
+ PyErr_Clear();
+ __Pyx_RefNanny = __Pyx_RefNannyImportAPI("Cython.Runtime.refnanny");
+ if (!__Pyx_RefNanny)
+ Py_FatalError("failed to import 'refnanny' module");
+ }
+ __pyx_refnanny = __Pyx_RefNanny->SetupContext("PyMODINIT_FUNC PyInit__dirstate_helpers_pyx(void)", __LINE__, __FILE__);
+ #endif
+ __pyx_empty_tuple = PyTuple_New(0); if (unlikely(!__pyx_empty_tuple)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_empty_bytes = PyBytes_FromStringAndSize("", 0); if (unlikely(!__pyx_empty_bytes)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ #ifdef __pyx_binding_PyCFunctionType_USED
+ if (__pyx_binding_PyCFunctionType_init() < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ #endif
+ /*--- Library function declarations ---*/
+ /*--- Threads initialization code ---*/
+ #if defined(__PYX_FORCE_INIT_THREADS) && __PYX_FORCE_INIT_THREADS
+ #ifdef WITH_THREAD /* Python build with threading support? */
+ PyEval_InitThreads();
+ #endif
+ #endif
+ /*--- Module creation code ---*/
+ #if PY_MAJOR_VERSION < 3
+ __pyx_m = Py_InitModule4(__Pyx_NAMESTR("_dirstate_helpers_pyx"), __pyx_methods, __Pyx_DOCSTR(__pyx_k_48), 0, PYTHON_API_VERSION);
+ #else
+ __pyx_m = PyModule_Create(&__pyx_moduledef);
+ #endif
+ if (!__pyx_m) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;};
+ #if PY_MAJOR_VERSION < 3
+ Py_INCREF(__pyx_m);
+ #endif
+ __pyx_b = PyImport_AddModule(__Pyx_NAMESTR(__Pyx_BUILTIN_MODULE_NAME));
+ if (!__pyx_b) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;};
+ if (__Pyx_SetAttrString(__pyx_m, "__builtins__", __pyx_b) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;};
+ /*--- Initialize various global constants etc. ---*/
+ if (unlikely(__Pyx_InitGlobals() < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ if (__pyx_module_is_main_bzrlib___dirstate_helpers_pyx) {
+ if (__Pyx_SetAttrString(__pyx_m, "__name__", __pyx_n_s____main__) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;};
+ }
+ /*--- Builtin init code ---*/
+ if (unlikely(__Pyx_InitCachedBuiltins() < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ /*--- Global init code ---*/
+ __pyx_v_6bzrlib_21_dirstate_helpers_pyx__kind_absent = Py_None; Py_INCREF(Py_None);
+ __pyx_v_6bzrlib_21_dirstate_helpers_pyx__kind_file = Py_None; Py_INCREF(Py_None);
+ __pyx_v_6bzrlib_21_dirstate_helpers_pyx__kind_directory = Py_None; Py_INCREF(Py_None);
+ __pyx_v_6bzrlib_21_dirstate_helpers_pyx__kind_symlink = Py_None; Py_INCREF(Py_None);
+ __pyx_v_6bzrlib_21_dirstate_helpers_pyx__kind_relocated = Py_None; Py_INCREF(Py_None);
+ __pyx_v_6bzrlib_21_dirstate_helpers_pyx__kind_tree_reference = Py_None; Py_INCREF(Py_None);
+ /*--- Function export code ---*/
+ /*--- Type init code ---*/
+ __pyx_vtabptr_6bzrlib_21_dirstate_helpers_pyx_Reader = &__pyx_vtable_6bzrlib_21_dirstate_helpers_pyx_Reader;
+ #if PY_MAJOR_VERSION >= 3
+ __pyx_vtable_6bzrlib_21_dirstate_helpers_pyx_Reader.get_next = (char *(*)(struct __pyx_obj_6bzrlib_21_dirstate_helpers_pyx_Reader *, int *))__pyx_f_6bzrlib_21_dirstate_helpers_pyx_6Reader_get_next;
+ __pyx_vtable_6bzrlib_21_dirstate_helpers_pyx_Reader.get_next_str = (PyObject *(*)(struct __pyx_obj_6bzrlib_21_dirstate_helpers_pyx_Reader *))__pyx_f_6bzrlib_21_dirstate_helpers_pyx_6Reader_get_next_str;
+ __pyx_vtable_6bzrlib_21_dirstate_helpers_pyx_Reader._init = (int (*)(struct __pyx_obj_6bzrlib_21_dirstate_helpers_pyx_Reader *))__pyx_f_6bzrlib_21_dirstate_helpers_pyx_6Reader__init;
+ __pyx_vtable_6bzrlib_21_dirstate_helpers_pyx_Reader._get_entry = (PyObject *(*)(struct __pyx_obj_6bzrlib_21_dirstate_helpers_pyx_Reader *, int, void **, int *))__pyx_f_6bzrlib_21_dirstate_helpers_pyx_6Reader__get_entry;
+ #else
+ *(void(**)(void))&__pyx_vtable_6bzrlib_21_dirstate_helpers_pyx_Reader.get_next = (void(*)(void))__pyx_f_6bzrlib_21_dirstate_helpers_pyx_6Reader_get_next;
+ *(void(**)(void))&__pyx_vtable_6bzrlib_21_dirstate_helpers_pyx_Reader.get_next_str = (void(*)(void))__pyx_f_6bzrlib_21_dirstate_helpers_pyx_6Reader_get_next_str;
+ *(void(**)(void))&__pyx_vtable_6bzrlib_21_dirstate_helpers_pyx_Reader._init = (void(*)(void))__pyx_f_6bzrlib_21_dirstate_helpers_pyx_6Reader__init;
+ *(void(**)(void))&__pyx_vtable_6bzrlib_21_dirstate_helpers_pyx_Reader._get_entry = (void(*)(void))__pyx_f_6bzrlib_21_dirstate_helpers_pyx_6Reader__get_entry;
+ #endif
+ if (PyType_Ready(&__pyx_type_6bzrlib_21_dirstate_helpers_pyx_Reader) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 532; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ if (__Pyx_SetVtable(__pyx_type_6bzrlib_21_dirstate_helpers_pyx_Reader.tp_dict, __pyx_vtabptr_6bzrlib_21_dirstate_helpers_pyx_Reader) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 532; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ if (__Pyx_SetAttrString(__pyx_m, "Reader", (PyObject *)&__pyx_type_6bzrlib_21_dirstate_helpers_pyx_Reader) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 532; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_ptype_6bzrlib_21_dirstate_helpers_pyx_Reader = &__pyx_type_6bzrlib_21_dirstate_helpers_pyx_Reader;
+ __pyx_vtabptr_6bzrlib_21_dirstate_helpers_pyx_ProcessEntryC = &__pyx_vtable_6bzrlib_21_dirstate_helpers_pyx_ProcessEntryC;
+ #if PY_MAJOR_VERSION >= 3
+ __pyx_vtable_6bzrlib_21_dirstate_helpers_pyx_ProcessEntryC._process_entry = (PyObject *(*)(struct __pyx_obj_6bzrlib_21_dirstate_helpers_pyx_ProcessEntryC *, PyObject *, PyObject *))__pyx_f_6bzrlib_21_dirstate_helpers_pyx_13ProcessEntryC__process_entry;
+ __pyx_vtable_6bzrlib_21_dirstate_helpers_pyx_ProcessEntryC._gather_result_for_consistency = (int (*)(struct __pyx_obj_6bzrlib_21_dirstate_helpers_pyx_ProcessEntryC *, PyObject *))__pyx_f_6bzrlib_21_dirstate_helpers_pyx_13ProcessEntryC__gather_result_for_consistency;
+ __pyx_vtable_6bzrlib_21_dirstate_helpers_pyx_ProcessEntryC._update_current_block = (int (*)(struct __pyx_obj_6bzrlib_21_dirstate_helpers_pyx_ProcessEntryC *))__pyx_f_6bzrlib_21_dirstate_helpers_pyx_13ProcessEntryC__update_current_block;
+ __pyx_vtable_6bzrlib_21_dirstate_helpers_pyx_ProcessEntryC._iter_next = (PyObject *(*)(struct __pyx_obj_6bzrlib_21_dirstate_helpers_pyx_ProcessEntryC *))__pyx_f_6bzrlib_21_dirstate_helpers_pyx_13ProcessEntryC__iter_next;
+ __pyx_vtable_6bzrlib_21_dirstate_helpers_pyx_ProcessEntryC._maybe_tree_ref = (PyObject *(*)(struct __pyx_obj_6bzrlib_21_dirstate_helpers_pyx_ProcessEntryC *, PyObject *))__pyx_f_6bzrlib_21_dirstate_helpers_pyx_13ProcessEntryC__maybe_tree_ref;
+ __pyx_vtable_6bzrlib_21_dirstate_helpers_pyx_ProcessEntryC._loop_one_block = (PyObject *(*)(struct __pyx_obj_6bzrlib_21_dirstate_helpers_pyx_ProcessEntryC *))__pyx_f_6bzrlib_21_dirstate_helpers_pyx_13ProcessEntryC__loop_one_block;
+ __pyx_vtable_6bzrlib_21_dirstate_helpers_pyx_ProcessEntryC._next_consistent_entries = (PyObject *(*)(struct __pyx_obj_6bzrlib_21_dirstate_helpers_pyx_ProcessEntryC *))__pyx_f_6bzrlib_21_dirstate_helpers_pyx_13ProcessEntryC__next_consistent_entries;
+ __pyx_vtable_6bzrlib_21_dirstate_helpers_pyx_ProcessEntryC._path_info = (PyObject *(*)(struct __pyx_obj_6bzrlib_21_dirstate_helpers_pyx_ProcessEntryC *, PyObject *, PyObject *))__pyx_f_6bzrlib_21_dirstate_helpers_pyx_13ProcessEntryC__path_info;
+ #else
+ *(void(**)(void))&__pyx_vtable_6bzrlib_21_dirstate_helpers_pyx_ProcessEntryC._process_entry = (void(*)(void))__pyx_f_6bzrlib_21_dirstate_helpers_pyx_13ProcessEntryC__process_entry;
+ *(void(**)(void))&__pyx_vtable_6bzrlib_21_dirstate_helpers_pyx_ProcessEntryC._gather_result_for_consistency = (void(*)(void))__pyx_f_6bzrlib_21_dirstate_helpers_pyx_13ProcessEntryC__gather_result_for_consistency;
+ *(void(**)(void))&__pyx_vtable_6bzrlib_21_dirstate_helpers_pyx_ProcessEntryC._update_current_block = (void(*)(void))__pyx_f_6bzrlib_21_dirstate_helpers_pyx_13ProcessEntryC__update_current_block;
+ *(void(**)(void))&__pyx_vtable_6bzrlib_21_dirstate_helpers_pyx_ProcessEntryC._iter_next = (void(*)(void))__pyx_f_6bzrlib_21_dirstate_helpers_pyx_13ProcessEntryC__iter_next;
+ *(void(**)(void))&__pyx_vtable_6bzrlib_21_dirstate_helpers_pyx_ProcessEntryC._maybe_tree_ref = (void(*)(void))__pyx_f_6bzrlib_21_dirstate_helpers_pyx_13ProcessEntryC__maybe_tree_ref;
+ *(void(**)(void))&__pyx_vtable_6bzrlib_21_dirstate_helpers_pyx_ProcessEntryC._loop_one_block = (void(*)(void))__pyx_f_6bzrlib_21_dirstate_helpers_pyx_13ProcessEntryC__loop_one_block;
+ *(void(**)(void))&__pyx_vtable_6bzrlib_21_dirstate_helpers_pyx_ProcessEntryC._next_consistent_entries = (void(*)(void))__pyx_f_6bzrlib_21_dirstate_helpers_pyx_13ProcessEntryC__next_consistent_entries;
+ *(void(**)(void))&__pyx_vtable_6bzrlib_21_dirstate_helpers_pyx_ProcessEntryC._path_info = (void(*)(void))__pyx_f_6bzrlib_21_dirstate_helpers_pyx_13ProcessEntryC__path_info;
+ #endif
+ if (PyType_Ready(&__pyx_type_6bzrlib_21_dirstate_helpers_pyx_ProcessEntryC) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1020; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ if (__Pyx_SetVtable(__pyx_type_6bzrlib_21_dirstate_helpers_pyx_ProcessEntryC.tp_dict, __pyx_vtabptr_6bzrlib_21_dirstate_helpers_pyx_ProcessEntryC) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1020; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ if (__Pyx_SetAttrString(__pyx_m, "ProcessEntryC", (PyObject *)&__pyx_type_6bzrlib_21_dirstate_helpers_pyx_ProcessEntryC) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1020; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_ptype_6bzrlib_21_dirstate_helpers_pyx_ProcessEntryC = &__pyx_type_6bzrlib_21_dirstate_helpers_pyx_ProcessEntryC;
+ /*--- Type import code ---*/
+ __pyx_ptype_6bzrlib_15_static_tuple_c_StaticTuple = __Pyx_ImportType("bzrlib._static_tuple_c", "StaticTuple", sizeof(StaticTuple), 0); if (unlikely(!__pyx_ptype_6bzrlib_15_static_tuple_c_StaticTuple)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 26; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ /*--- Function import code ---*/
+ /*--- Execution code ---*/
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":22
+ * """
+ *
+ * import binascii # <<<<<<<<<<<<<<
+ * import bisect
+ * import errno
+ */
+ __pyx_t_1 = __Pyx_Import(((PyObject *)__pyx_n_s__binascii), 0); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 22; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_1);
+ if (PyObject_SetAttr(__pyx_m, __pyx_n_s__binascii, __pyx_t_1) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 22; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":23
+ *
+ * import binascii
+ * import bisect # <<<<<<<<<<<<<<
+ * import errno
+ * import os
+ */
+ __pyx_t_1 = __Pyx_Import(((PyObject *)__pyx_n_s__bisect), 0); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 23; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_1);
+ if (PyObject_SetAttr(__pyx_m, __pyx_n_s__bisect, __pyx_t_1) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 23; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":24
+ * import binascii
+ * import bisect
+ * import errno # <<<<<<<<<<<<<<
+ * import os
+ * import stat
+ */
+ __pyx_t_1 = __Pyx_Import(((PyObject *)__pyx_n_s__errno), 0); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 24; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_1);
+ if (PyObject_SetAttr(__pyx_m, __pyx_n_s__errno, __pyx_t_1) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 24; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":25
+ * import bisect
+ * import errno
+ * import os # <<<<<<<<<<<<<<
+ * import stat
+ * import sys
+ */
+ __pyx_t_1 = __Pyx_Import(((PyObject *)__pyx_n_s__os), 0); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 25; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_1);
+ if (PyObject_SetAttr(__pyx_m, __pyx_n_s__os, __pyx_t_1) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 25; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":26
+ * import errno
+ * import os
+ * import stat # <<<<<<<<<<<<<<
+ * import sys
+ *
+ */
+ __pyx_t_1 = __Pyx_Import(((PyObject *)__pyx_n_s__stat), 0); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 26; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_1);
+ if (PyObject_SetAttr(__pyx_m, __pyx_n_s__stat, __pyx_t_1) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 26; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":27
+ * import os
+ * import stat
+ * import sys # <<<<<<<<<<<<<<
+ *
+ * from bzrlib import cache_utf8, errors, osutils
+ */
+ __pyx_t_1 = __Pyx_Import(((PyObject *)__pyx_n_s__sys), 0); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 27; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_1);
+ if (PyObject_SetAttr(__pyx_m, __pyx_n_s__sys, __pyx_t_1) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 27; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":29
+ * import sys
+ *
+ * from bzrlib import cache_utf8, errors, osutils # <<<<<<<<<<<<<<
+ * from bzrlib.dirstate import DirState
+ * from bzrlib.osutils import parent_directories, pathjoin, splitpath
+ */
+ __pyx_t_1 = PyList_New(3); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 29; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(((PyObject *)__pyx_t_1));
+ __Pyx_INCREF(((PyObject *)__pyx_n_s__cache_utf8));
+ PyList_SET_ITEM(__pyx_t_1, 0, ((PyObject *)__pyx_n_s__cache_utf8));
+ __Pyx_GIVEREF(((PyObject *)__pyx_n_s__cache_utf8));
+ __Pyx_INCREF(((PyObject *)__pyx_n_s__errors));
+ PyList_SET_ITEM(__pyx_t_1, 1, ((PyObject *)__pyx_n_s__errors));
+ __Pyx_GIVEREF(((PyObject *)__pyx_n_s__errors));
+ __Pyx_INCREF(((PyObject *)__pyx_n_s__osutils));
+ PyList_SET_ITEM(__pyx_t_1, 2, ((PyObject *)__pyx_n_s__osutils));
+ __Pyx_GIVEREF(((PyObject *)__pyx_n_s__osutils));
+ __pyx_t_2 = __Pyx_Import(((PyObject *)__pyx_n_s__bzrlib), ((PyObject *)__pyx_t_1)); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 29; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_2);
+ __Pyx_DECREF(((PyObject *)__pyx_t_1)); __pyx_t_1 = 0;
+ __pyx_t_1 = PyObject_GetAttr(__pyx_t_2, __pyx_n_s__cache_utf8); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 29; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_1);
+ if (PyObject_SetAttr(__pyx_m, __pyx_n_s__cache_utf8, __pyx_t_1) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 29; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+ __pyx_t_1 = PyObject_GetAttr(__pyx_t_2, __pyx_n_s__errors); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 29; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_1);
+ if (PyObject_SetAttr(__pyx_m, __pyx_n_s__errors, __pyx_t_1) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 29; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+ __pyx_t_1 = PyObject_GetAttr(__pyx_t_2, __pyx_n_s__osutils); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 29; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_1);
+ if (PyObject_SetAttr(__pyx_m, __pyx_n_s__osutils, __pyx_t_1) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 29; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+ __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":30
+ *
+ * from bzrlib import cache_utf8, errors, osutils
+ * from bzrlib.dirstate import DirState # <<<<<<<<<<<<<<
+ * from bzrlib.osutils import parent_directories, pathjoin, splitpath
+ *
+ */
+ __pyx_t_2 = PyList_New(1); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 30; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(((PyObject *)__pyx_t_2));
+ __Pyx_INCREF(((PyObject *)__pyx_n_s__DirState));
+ PyList_SET_ITEM(__pyx_t_2, 0, ((PyObject *)__pyx_n_s__DirState));
+ __Pyx_GIVEREF(((PyObject *)__pyx_n_s__DirState));
+ __pyx_t_1 = __Pyx_Import(((PyObject *)__pyx_n_s_49), ((PyObject *)__pyx_t_2)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 30; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_1);
+ __Pyx_DECREF(((PyObject *)__pyx_t_2)); __pyx_t_2 = 0;
+ __pyx_t_2 = PyObject_GetAttr(__pyx_t_1, __pyx_n_s__DirState); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 30; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_2);
+ if (PyObject_SetAttr(__pyx_m, __pyx_n_s__DirState, __pyx_t_2) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 30; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":31
+ * from bzrlib import cache_utf8, errors, osutils
+ * from bzrlib.dirstate import DirState
+ * from bzrlib.osutils import parent_directories, pathjoin, splitpath # <<<<<<<<<<<<<<
+ *
+ *
+ */
+ __pyx_t_1 = PyList_New(3); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 31; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(((PyObject *)__pyx_t_1));
+ __Pyx_INCREF(((PyObject *)__pyx_n_s__parent_directories));
+ PyList_SET_ITEM(__pyx_t_1, 0, ((PyObject *)__pyx_n_s__parent_directories));
+ __Pyx_GIVEREF(((PyObject *)__pyx_n_s__parent_directories));
+ __Pyx_INCREF(((PyObject *)__pyx_n_s__pathjoin));
+ PyList_SET_ITEM(__pyx_t_1, 1, ((PyObject *)__pyx_n_s__pathjoin));
+ __Pyx_GIVEREF(((PyObject *)__pyx_n_s__pathjoin));
+ __Pyx_INCREF(((PyObject *)__pyx_n_s__splitpath));
+ PyList_SET_ITEM(__pyx_t_1, 2, ((PyObject *)__pyx_n_s__splitpath));
+ __Pyx_GIVEREF(((PyObject *)__pyx_n_s__splitpath));
+ __pyx_t_2 = __Pyx_Import(((PyObject *)__pyx_n_s_50), ((PyObject *)__pyx_t_1)); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 31; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_2);
+ __Pyx_DECREF(((PyObject *)__pyx_t_1)); __pyx_t_1 = 0;
+ __pyx_t_1 = PyObject_GetAttr(__pyx_t_2, __pyx_n_s__parent_directories); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 31; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_1);
+ if (PyObject_SetAttr(__pyx_m, __pyx_n_s__parent_directories, __pyx_t_1) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 31; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+ __pyx_t_1 = PyObject_GetAttr(__pyx_t_2, __pyx_n_s__pathjoin); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 31; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_1);
+ if (PyObject_SetAttr(__pyx_m, __pyx_n_s__pathjoin, __pyx_t_1) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 31; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+ __pyx_t_1 = PyObject_GetAttr(__pyx_t_2, __pyx_n_s__splitpath); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 31; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_1);
+ if (PyObject_SetAttr(__pyx_m, __pyx_n_s__splitpath, __pyx_t_1) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 31; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+ __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":39
+ * # XXX: Perhaps we could get it from a windows header ?
+ * cdef int ERROR_PATH_NOT_FOUND
+ * ERROR_PATH_NOT_FOUND = 3 # <<<<<<<<<<<<<<
+ * cdef int ERROR_DIRECTORY
+ * ERROR_DIRECTORY = 267
+ */
+ __pyx_v_6bzrlib_21_dirstate_helpers_pyx_ERROR_PATH_NOT_FOUND = 3;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":41
+ * ERROR_PATH_NOT_FOUND = 3
+ * cdef int ERROR_DIRECTORY
+ * ERROR_DIRECTORY = 267 # <<<<<<<<<<<<<<
+ *
+ * #python2.4 support, and other platform-dependent includes
+ */
+ __pyx_v_6bzrlib_21_dirstate_helpers_pyx_ERROR_DIRECTORY = 267;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":126
+ * StaticTuple_New, StaticTuple_SET_ITEM
+ *
+ * import_static_tuple_c() # <<<<<<<<<<<<<<
+ *
+ * cdef void* _my_memrchr(void *s, int c, size_t n): # cannot_raise
+ */
+ __pyx_t_3 = import_static_tuple_c(); if (unlikely(__pyx_t_3 == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 126; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":812
+ *
+ *
+ * _encode = binascii.b2a_base64 # <<<<<<<<<<<<<<
+ *
+ *
+ */
+ __pyx_t_2 = __Pyx_GetName(__pyx_m, __pyx_n_s__binascii); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 812; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_2);
+ __pyx_t_1 = PyObject_GetAttr(__pyx_t_2, __pyx_n_s__b2a_base64); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 812; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_1);
+ __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+ if (PyObject_SetAttr(__pyx_m, __pyx_n_s___encode, __pyx_t_1) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 812; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":985
+ * cdef object _kind_relocated
+ * cdef object _kind_tree_reference
+ * _kind_absent = "absent" # <<<<<<<<<<<<<<
+ * _kind_file = "file"
+ * _kind_directory = "directory"
+ */
+ __Pyx_INCREF(((PyObject *)__pyx_n_s__absent));
+ __Pyx_GOTREF(__pyx_v_6bzrlib_21_dirstate_helpers_pyx__kind_absent);
+ __Pyx_DECREF(__pyx_v_6bzrlib_21_dirstate_helpers_pyx__kind_absent);
+ __Pyx_GIVEREF(((PyObject *)__pyx_n_s__absent));
+ __pyx_v_6bzrlib_21_dirstate_helpers_pyx__kind_absent = ((PyObject *)__pyx_n_s__absent);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":986
+ * cdef object _kind_tree_reference
+ * _kind_absent = "absent"
+ * _kind_file = "file" # <<<<<<<<<<<<<<
+ * _kind_directory = "directory"
+ * _kind_symlink = "symlink"
+ */
+ __Pyx_INCREF(((PyObject *)__pyx_n_s__file));
+ __Pyx_GOTREF(__pyx_v_6bzrlib_21_dirstate_helpers_pyx__kind_file);
+ __Pyx_DECREF(__pyx_v_6bzrlib_21_dirstate_helpers_pyx__kind_file);
+ __Pyx_GIVEREF(((PyObject *)__pyx_n_s__file));
+ __pyx_v_6bzrlib_21_dirstate_helpers_pyx__kind_file = ((PyObject *)__pyx_n_s__file);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":987
+ * _kind_absent = "absent"
+ * _kind_file = "file"
+ * _kind_directory = "directory" # <<<<<<<<<<<<<<
+ * _kind_symlink = "symlink"
+ * _kind_relocated = "relocated"
+ */
+ __Pyx_INCREF(((PyObject *)__pyx_n_s__directory));
+ __Pyx_GOTREF(__pyx_v_6bzrlib_21_dirstate_helpers_pyx__kind_directory);
+ __Pyx_DECREF(__pyx_v_6bzrlib_21_dirstate_helpers_pyx__kind_directory);
+ __Pyx_GIVEREF(((PyObject *)__pyx_n_s__directory));
+ __pyx_v_6bzrlib_21_dirstate_helpers_pyx__kind_directory = ((PyObject *)__pyx_n_s__directory);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":988
+ * _kind_file = "file"
+ * _kind_directory = "directory"
+ * _kind_symlink = "symlink" # <<<<<<<<<<<<<<
+ * _kind_relocated = "relocated"
+ * _kind_tree_reference = "tree-reference"
+ */
+ __Pyx_INCREF(((PyObject *)__pyx_n_s__symlink));
+ __Pyx_GOTREF(__pyx_v_6bzrlib_21_dirstate_helpers_pyx__kind_symlink);
+ __Pyx_DECREF(__pyx_v_6bzrlib_21_dirstate_helpers_pyx__kind_symlink);
+ __Pyx_GIVEREF(((PyObject *)__pyx_n_s__symlink));
+ __pyx_v_6bzrlib_21_dirstate_helpers_pyx__kind_symlink = ((PyObject *)__pyx_n_s__symlink);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":989
+ * _kind_directory = "directory"
+ * _kind_symlink = "symlink"
+ * _kind_relocated = "relocated" # <<<<<<<<<<<<<<
+ * _kind_tree_reference = "tree-reference"
+ *
+ */
+ __Pyx_INCREF(((PyObject *)__pyx_n_s__relocated));
+ __Pyx_GOTREF(__pyx_v_6bzrlib_21_dirstate_helpers_pyx__kind_relocated);
+ __Pyx_DECREF(__pyx_v_6bzrlib_21_dirstate_helpers_pyx__kind_relocated);
+ __Pyx_GIVEREF(((PyObject *)__pyx_n_s__relocated));
+ __pyx_v_6bzrlib_21_dirstate_helpers_pyx__kind_relocated = ((PyObject *)__pyx_n_s__relocated);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":990
+ * _kind_symlink = "symlink"
+ * _kind_relocated = "relocated"
+ * _kind_tree_reference = "tree-reference" # <<<<<<<<<<<<<<
+ *
+ *
+ */
+ __Pyx_INCREF(((PyObject *)__pyx_kp_s_33));
+ __Pyx_GOTREF(__pyx_v_6bzrlib_21_dirstate_helpers_pyx__kind_tree_reference);
+ __Pyx_DECREF(__pyx_v_6bzrlib_21_dirstate_helpers_pyx__kind_tree_reference);
+ __Pyx_GIVEREF(((PyObject *)__pyx_kp_s_33));
+ __pyx_v_6bzrlib_21_dirstate_helpers_pyx__kind_tree_reference = ((PyObject *)__pyx_kp_s_33);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_dirstate_helpers_pyx.pyx":1
+ * # Copyright (C) 2007-2010 Canonical Ltd # <<<<<<<<<<<<<<
+ * #
+ * # This program is free software; you can redistribute it and/or modify
+ */
+ __pyx_t_1 = PyDict_New(); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(((PyObject *)__pyx_t_1));
+ __pyx_t_2 = PyObject_GetAttr(__pyx_m, __pyx_n_s___py_memrchr); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_2);
+ __pyx_t_4 = __Pyx_GetAttrString(__pyx_t_2, "__doc__"); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_4);
+ __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+ if (PyDict_SetItem(__pyx_t_1, ((PyObject *)__pyx_kp_u_51), __pyx_t_4) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
+ __pyx_t_4 = PyObject_GetAttr(__pyx_m, __pyx_n_s__cmp_by_dirs); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_4);
+ __pyx_t_2 = __Pyx_GetAttrString(__pyx_t_4, "__doc__"); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_2);
+ __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
+ if (PyDict_SetItem(__pyx_t_1, ((PyObject *)__pyx_kp_u_52), __pyx_t_2) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+ __pyx_t_2 = PyObject_GetAttr(__pyx_m, __pyx_n_s_54); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_2);
+ __pyx_t_4 = __Pyx_GetAttrString(__pyx_t_2, "__doc__"); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_4);
+ __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+ if (PyDict_SetItem(__pyx_t_1, ((PyObject *)__pyx_kp_u_53), __pyx_t_4) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
+ __pyx_t_4 = PyObject_GetAttr(__pyx_m, __pyx_n_s___bisect_path_left); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_4);
+ __pyx_t_2 = __Pyx_GetAttrString(__pyx_t_4, "__doc__"); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_2);
+ __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
+ if (PyDict_SetItem(__pyx_t_1, ((PyObject *)__pyx_kp_u_55), __pyx_t_2) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+ __pyx_t_2 = PyObject_GetAttr(__pyx_m, __pyx_n_s___bisect_path_right); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_2);
+ __pyx_t_4 = __Pyx_GetAttrString(__pyx_t_2, "__doc__"); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_4);
+ __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+ if (PyDict_SetItem(__pyx_t_1, ((PyObject *)__pyx_kp_u_56), __pyx_t_4) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
+ __pyx_t_4 = PyObject_GetAttr(__pyx_m, __pyx_n_s__bisect_dirblock); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_4);
+ __pyx_t_2 = __Pyx_GetAttrString(__pyx_t_4, "__doc__"); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_2);
+ __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
+ if (PyDict_SetItem(__pyx_t_1, ((PyObject *)__pyx_kp_u_57), __pyx_t_2) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+ __pyx_t_2 = PyObject_GetAttr(__pyx_m, __pyx_n_s__Reader); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_2);
+ __pyx_t_4 = PyObject_GetAttr(__pyx_t_2, __pyx_n_s___parse_dirblocks); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_4);
+ __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+ __pyx_t_2 = __Pyx_GetAttrString(__pyx_t_4, "__doc__"); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_2);
+ __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
+ if (PyDict_SetItem(__pyx_t_1, ((PyObject *)__pyx_kp_u_58), __pyx_t_2) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+ __pyx_t_2 = PyObject_GetAttr(__pyx_m, __pyx_n_s___read_dirblocks); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_2);
+ __pyx_t_4 = __Pyx_GetAttrString(__pyx_t_2, "__doc__"); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_4);
+ __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+ if (PyDict_SetItem(__pyx_t_1, ((PyObject *)__pyx_kp_u_59), __pyx_t_4) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
+ __pyx_t_4 = PyObject_GetAttr(__pyx_m, __pyx_n_s__pack_stat); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_4);
+ __pyx_t_2 = __Pyx_GetAttrString(__pyx_t_4, "__doc__"); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_2);
+ __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
+ if (PyDict_SetItem(__pyx_t_1, ((PyObject *)__pyx_kp_u_60), __pyx_t_2) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+ __pyx_t_2 = PyObject_GetAttr(__pyx_m, __pyx_n_s__update_entry); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_2);
+ __pyx_t_4 = __Pyx_GetAttrString(__pyx_t_2, "__doc__"); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_4);
+ __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+ if (PyDict_SetItem(__pyx_t_1, ((PyObject *)__pyx_kp_u_61), __pyx_t_4) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
+ if (PyObject_SetAttr(__pyx_m, __pyx_n_s____test__, ((PyObject *)__pyx_t_1)) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_DECREF(((PyObject *)__pyx_t_1)); __pyx_t_1 = 0;
+ goto __pyx_L0;
+ __pyx_L1_error:;
+ __Pyx_XDECREF(__pyx_t_1);
+ __Pyx_XDECREF(__pyx_t_2);
+ __Pyx_XDECREF(__pyx_t_4);
+ if (__pyx_m) {
+ __Pyx_AddTraceback("init bzrlib._dirstate_helpers_pyx");
+ Py_DECREF(__pyx_m); __pyx_m = 0;
+ } else if (!PyErr_Occurred()) {
+ PyErr_SetString(PyExc_ImportError, "init bzrlib._dirstate_helpers_pyx");
+ }
+ __pyx_L0:;
+ __Pyx_RefNannyFinishContext();
+ #if PY_MAJOR_VERSION < 3
+ return;
+ #else
+ return __pyx_m;
+ #endif
+}
+
+/* Runtime support code */
+
+static PyObject *__Pyx_GetName(PyObject *dict, PyObject *name) {
+ PyObject *result;
+ result = PyObject_GetAttr(dict, name);
+ if (!result)
+ PyErr_SetObject(PyExc_NameError, name);
+ return result;
+}
+
+static void __Pyx_RaiseDoubleKeywordsError(
+ const char* func_name,
+ PyObject* kw_name)
+{
+ PyErr_Format(PyExc_TypeError,
+ #if PY_MAJOR_VERSION >= 3
+ "%s() got multiple values for keyword argument '%U'", func_name, kw_name);
+ #else
+ "%s() got multiple values for keyword argument '%s'", func_name,
+ PyString_AS_STRING(kw_name));
+ #endif
+}
+
+static void __Pyx_RaiseArgtupleInvalid(
+ const char* func_name,
+ int exact,
+ Py_ssize_t num_min,
+ Py_ssize_t num_max,
+ Py_ssize_t num_found)
+{
+ Py_ssize_t num_expected;
+ const char *number, *more_or_less;
+
+ if (num_found < num_min) {
+ num_expected = num_min;
+ more_or_less = "at least";
+ } else {
+ num_expected = num_max;
+ more_or_less = "at most";
+ }
+ if (exact) {
+ more_or_less = "exactly";
+ }
+ number = (num_expected == 1) ? "" : "s";
+ PyErr_Format(PyExc_TypeError,
+ #if PY_VERSION_HEX < 0x02050000
+ "%s() takes %s %d positional argument%s (%d given)",
+ #else
+ "%s() takes %s %zd positional argument%s (%zd given)",
+ #endif
+ func_name, more_or_less, num_expected, number, num_found);
+}
+
+static int __Pyx_ParseOptionalKeywords(
+ PyObject *kwds,
+ PyObject **argnames[],
+ PyObject *kwds2,
+ PyObject *values[],
+ Py_ssize_t num_pos_args,
+ const char* function_name)
+{
+ PyObject *key = 0, *value = 0;
+ Py_ssize_t pos = 0;
+ PyObject*** name;
+ PyObject*** first_kw_arg = argnames + num_pos_args;
+
+ while (PyDict_Next(kwds, &pos, &key, &value)) {
+ name = first_kw_arg;
+ while (*name && (**name != key)) name++;
+ if (*name) {
+ values[name-argnames] = value;
+ } else {
+ #if PY_MAJOR_VERSION < 3
+ if (unlikely(!PyString_CheckExact(key)) && unlikely(!PyString_Check(key))) {
+ #else
+ if (unlikely(!PyUnicode_CheckExact(key)) && unlikely(!PyUnicode_Check(key))) {
+ #endif
+ goto invalid_keyword_type;
+ } else {
+ for (name = first_kw_arg; *name; name++) {
+ #if PY_MAJOR_VERSION >= 3
+ if (PyUnicode_GET_SIZE(**name) == PyUnicode_GET_SIZE(key) &&
+ PyUnicode_Compare(**name, key) == 0) break;
+ #else
+ if (PyString_GET_SIZE(**name) == PyString_GET_SIZE(key) &&
+ _PyString_Eq(**name, key)) break;
+ #endif
+ }
+ if (*name) {
+ values[name-argnames] = value;
+ } else {
+ /* unexpected keyword found */
+ for (name=argnames; name != first_kw_arg; name++) {
+ if (**name == key) goto arg_passed_twice;
+ #if PY_MAJOR_VERSION >= 3
+ if (PyUnicode_GET_SIZE(**name) == PyUnicode_GET_SIZE(key) &&
+ PyUnicode_Compare(**name, key) == 0) goto arg_passed_twice;
+ #else
+ if (PyString_GET_SIZE(**name) == PyString_GET_SIZE(key) &&
+ _PyString_Eq(**name, key)) goto arg_passed_twice;
+ #endif
+ }
+ if (kwds2) {
+ if (unlikely(PyDict_SetItem(kwds2, key, value))) goto bad;
+ } else {
+ goto invalid_keyword;
+ }
+ }
+ }
+ }
+ }
+ return 0;
+arg_passed_twice:
+ __Pyx_RaiseDoubleKeywordsError(function_name, **name);
+ goto bad;
+invalid_keyword_type:
+ PyErr_Format(PyExc_TypeError,
+ "%s() keywords must be strings", function_name);
+ goto bad;
+invalid_keyword:
+ PyErr_Format(PyExc_TypeError,
+ #if PY_MAJOR_VERSION < 3
+ "%s() got an unexpected keyword argument '%s'",
+ function_name, PyString_AsString(key));
+ #else
+ "%s() got an unexpected keyword argument '%U'",
+ function_name, key);
+ #endif
+bad:
+ return -1;
+}
+
+static CYTHON_INLINE long __Pyx_div_long(long a, long b) {
+ long q = a / b;
+ long r = a - q*b;
+ q -= ((r != 0) & ((r ^ b) < 0));
+ return q;
+}
+
+
+
+static CYTHON_INLINE void __Pyx_RaiseNeedMoreValuesError(Py_ssize_t index) {
+ PyErr_Format(PyExc_ValueError,
+ #if PY_VERSION_HEX < 0x02050000
+ "need more than %d value%s to unpack", (int)index,
+ #else
+ "need more than %zd value%s to unpack", index,
+ #endif
+ (index == 1) ? "" : "s");
+}
+
+static CYTHON_INLINE void __Pyx_RaiseTooManyValuesError(Py_ssize_t expected) {
+ PyErr_Format(PyExc_ValueError,
+ #if PY_VERSION_HEX < 0x02050000
+ "too many values to unpack (expected %d)", (int)expected);
+ #else
+ "too many values to unpack (expected %zd)", expected);
+ #endif
+}
+
+static PyObject *__Pyx_UnpackItem(PyObject *iter, Py_ssize_t index) {
+ PyObject *item;
+ if (!(item = PyIter_Next(iter))) {
+ if (!PyErr_Occurred()) {
+ __Pyx_RaiseNeedMoreValuesError(index);
+ }
+ }
+ return item;
+}
+
+static int __Pyx_EndUnpack(PyObject *iter, Py_ssize_t expected) {
+ PyObject *item;
+ if ((item = PyIter_Next(iter))) {
+ Py_DECREF(item);
+ __Pyx_RaiseTooManyValuesError(expected);
+ return -1;
+ }
+ else if (!PyErr_Occurred())
+ return 0;
+ else
+ return -1;
+}
+
+static int __Pyx_GetException(PyObject **type, PyObject **value, PyObject **tb) {
+ PyObject *local_type, *local_value, *local_tb;
+ PyObject *tmp_type, *tmp_value, *tmp_tb;
+ PyThreadState *tstate = PyThreadState_GET();
+ local_type = tstate->curexc_type;
+ local_value = tstate->curexc_value;
+ local_tb = tstate->curexc_traceback;
+ tstate->curexc_type = 0;
+ tstate->curexc_value = 0;
+ tstate->curexc_traceback = 0;
+ PyErr_NormalizeException(&local_type, &local_value, &local_tb);
+ if (unlikely(tstate->curexc_type))
+ goto bad;
+ #if PY_MAJOR_VERSION >= 3
+ if (unlikely(PyException_SetTraceback(local_value, local_tb) < 0))
+ goto bad;
+ #endif
+ *type = local_type;
+ *value = local_value;
+ *tb = local_tb;
+ Py_INCREF(local_type);
+ Py_INCREF(local_value);
+ Py_INCREF(local_tb);
+ tmp_type = tstate->exc_type;
+ tmp_value = tstate->exc_value;
+ tmp_tb = tstate->exc_traceback;
+ tstate->exc_type = local_type;
+ tstate->exc_value = local_value;
+ tstate->exc_traceback = local_tb;
+ /* Make sure tstate is in a consistent state when we XDECREF
+ these objects (XDECREF may run arbitrary code). */
+ Py_XDECREF(tmp_type);
+ Py_XDECREF(tmp_value);
+ Py_XDECREF(tmp_tb);
+ return 0;
+bad:
+ *type = 0;
+ *value = 0;
+ *tb = 0;
+ Py_XDECREF(local_type);
+ Py_XDECREF(local_value);
+ Py_XDECREF(local_tb);
+ return -1;
+}
+
+
+
+static CYTHON_INLINE void __Pyx_ExceptionSave(PyObject **type, PyObject **value, PyObject **tb) {
+ PyThreadState *tstate = PyThreadState_GET();
+ *type = tstate->exc_type;
+ *value = tstate->exc_value;
+ *tb = tstate->exc_traceback;
+ Py_XINCREF(*type);
+ Py_XINCREF(*value);
+ Py_XINCREF(*tb);
+}
+
+static void __Pyx_ExceptionReset(PyObject *type, PyObject *value, PyObject *tb) {
+ PyObject *tmp_type, *tmp_value, *tmp_tb;
+ PyThreadState *tstate = PyThreadState_GET();
+ tmp_type = tstate->exc_type;
+ tmp_value = tstate->exc_value;
+ tmp_tb = tstate->exc_traceback;
+ tstate->exc_type = type;
+ tstate->exc_value = value;
+ tstate->exc_traceback = tb;
+ Py_XDECREF(tmp_type);
+ Py_XDECREF(tmp_value);
+ Py_XDECREF(tmp_tb);
+}
+
+static PyObject *__Pyx_Import(PyObject *name, PyObject *from_list) {
+ PyObject *py_import = 0;
+ PyObject *empty_list = 0;
+ PyObject *module = 0;
+ PyObject *global_dict = 0;
+ PyObject *empty_dict = 0;
+ PyObject *list;
+ py_import = __Pyx_GetAttrString(__pyx_b, "__import__");
+ if (!py_import)
+ goto bad;
+ if (from_list)
+ list = from_list;
+ else {
+ empty_list = PyList_New(0);
+ if (!empty_list)
+ goto bad;
+ list = empty_list;
+ }
+ global_dict = PyModule_GetDict(__pyx_m);
+ if (!global_dict)
+ goto bad;
+ empty_dict = PyDict_New();
+ if (!empty_dict)
+ goto bad;
+ module = PyObject_CallFunctionObjArgs(py_import,
+ name, global_dict, empty_dict, list, NULL);
+bad:
+ Py_XDECREF(empty_list);
+ Py_XDECREF(py_import);
+ Py_XDECREF(empty_dict);
+ return module;
+}
+
+static CYTHON_INLINE void __Pyx_ErrRestore(PyObject *type, PyObject *value, PyObject *tb) {
+ PyObject *tmp_type, *tmp_value, *tmp_tb;
+ PyThreadState *tstate = PyThreadState_GET();
+
+ tmp_type = tstate->curexc_type;
+ tmp_value = tstate->curexc_value;
+ tmp_tb = tstate->curexc_traceback;
+ tstate->curexc_type = type;
+ tstate->curexc_value = value;
+ tstate->curexc_traceback = tb;
+ Py_XDECREF(tmp_type);
+ Py_XDECREF(tmp_value);
+ Py_XDECREF(tmp_tb);
+}
+
+static CYTHON_INLINE void __Pyx_ErrFetch(PyObject **type, PyObject **value, PyObject **tb) {
+ PyThreadState *tstate = PyThreadState_GET();
+ *type = tstate->curexc_type;
+ *value = tstate->curexc_value;
+ *tb = tstate->curexc_traceback;
+
+ tstate->curexc_type = 0;
+ tstate->curexc_value = 0;
+ tstate->curexc_traceback = 0;
+}
+
+
+#if PY_MAJOR_VERSION < 3
+static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb) {
+ Py_XINCREF(type);
+ Py_XINCREF(value);
+ Py_XINCREF(tb);
+ /* First, check the traceback argument, replacing None with NULL. */
+ if (tb == Py_None) {
+ Py_DECREF(tb);
+ tb = 0;
+ }
+ else if (tb != NULL && !PyTraceBack_Check(tb)) {
+ PyErr_SetString(PyExc_TypeError,
+ "raise: arg 3 must be a traceback or None");
+ goto raise_error;
+ }
+ /* Next, replace a missing value with None */
+ if (value == NULL) {
+ value = Py_None;
+ Py_INCREF(value);
+ }
+ #if PY_VERSION_HEX < 0x02050000
+ if (!PyClass_Check(type))
+ #else
+ if (!PyType_Check(type))
+ #endif
+ {
+ /* Raising an instance. The value should be a dummy. */
+ if (value != Py_None) {
+ PyErr_SetString(PyExc_TypeError,
+ "instance exception may not have a separate value");
+ goto raise_error;
+ }
+ /* Normalize to raise <class>, <instance> */
+ Py_DECREF(value);
+ value = type;
+ #if PY_VERSION_HEX < 0x02050000
+ if (PyInstance_Check(type)) {
+ type = (PyObject*) ((PyInstanceObject*)type)->in_class;
+ Py_INCREF(type);
+ }
+ else {
+ type = 0;
+ PyErr_SetString(PyExc_TypeError,
+ "raise: exception must be an old-style class or instance");
+ goto raise_error;
+ }
+ #else
+ type = (PyObject*) Py_TYPE(type);
+ Py_INCREF(type);
+ if (!PyType_IsSubtype((PyTypeObject *)type, (PyTypeObject *)PyExc_BaseException)) {
+ PyErr_SetString(PyExc_TypeError,
+ "raise: exception class must be a subclass of BaseException");
+ goto raise_error;
+ }
+ #endif
+ }
+
+ __Pyx_ErrRestore(type, value, tb);
+ return;
+raise_error:
+ Py_XDECREF(value);
+ Py_XDECREF(type);
+ Py_XDECREF(tb);
+ return;
+}
+
+#else /* Python 3+ */
+
+static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb) {
+ if (tb == Py_None) {
+ tb = 0;
+ } else if (tb && !PyTraceBack_Check(tb)) {
+ PyErr_SetString(PyExc_TypeError,
+ "raise: arg 3 must be a traceback or None");
+ goto bad;
+ }
+ if (value == Py_None)
+ value = 0;
+
+ if (PyExceptionInstance_Check(type)) {
+ if (value) {
+ PyErr_SetString(PyExc_TypeError,
+ "instance exception may not have a separate value");
+ goto bad;
+ }
+ value = type;
+ type = (PyObject*) Py_TYPE(value);
+ } else if (!PyExceptionClass_Check(type)) {
+ PyErr_SetString(PyExc_TypeError,
+ "raise: exception class must be a subclass of BaseException");
+ goto bad;
+ }
+
+ PyErr_SetObject(type, value);
+
+ if (tb) {
+ PyThreadState *tstate = PyThreadState_GET();
+ PyObject* tmp_tb = tstate->curexc_traceback;
+ if (tb != tmp_tb) {
+ Py_INCREF(tb);
+ tstate->curexc_traceback = tb;
+ Py_XDECREF(tmp_tb);
+ }
+ }
+
+bad:
+ return;
+}
+#endif
+
+static CYTHON_INLINE unsigned char __Pyx_PyInt_AsUnsignedChar(PyObject* x) {
+ const unsigned char neg_one = (unsigned char)-1, const_zero = 0;
+ const int is_unsigned = neg_one > const_zero;
+ if (sizeof(unsigned char) < sizeof(long)) {
+ long val = __Pyx_PyInt_AsLong(x);
+ if (unlikely(val != (long)(unsigned char)val)) {
+ if (!unlikely(val == -1 && PyErr_Occurred())) {
+ PyErr_SetString(PyExc_OverflowError,
+ (is_unsigned && unlikely(val < 0)) ?
+ "can't convert negative value to unsigned char" :
+ "value too large to convert to unsigned char");
+ }
+ return (unsigned char)-1;
+ }
+ return (unsigned char)val;
+ }
+ return (unsigned char)__Pyx_PyInt_AsUnsignedLong(x);
+}
+
+static CYTHON_INLINE unsigned short __Pyx_PyInt_AsUnsignedShort(PyObject* x) {
+ const unsigned short neg_one = (unsigned short)-1, const_zero = 0;
+ const int is_unsigned = neg_one > const_zero;
+ if (sizeof(unsigned short) < sizeof(long)) {
+ long val = __Pyx_PyInt_AsLong(x);
+ if (unlikely(val != (long)(unsigned short)val)) {
+ if (!unlikely(val == -1 && PyErr_Occurred())) {
+ PyErr_SetString(PyExc_OverflowError,
+ (is_unsigned && unlikely(val < 0)) ?
+ "can't convert negative value to unsigned short" :
+ "value too large to convert to unsigned short");
+ }
+ return (unsigned short)-1;
+ }
+ return (unsigned short)val;
+ }
+ return (unsigned short)__Pyx_PyInt_AsUnsignedLong(x);
+}
+
+static CYTHON_INLINE unsigned int __Pyx_PyInt_AsUnsignedInt(PyObject* x) {
+ const unsigned int neg_one = (unsigned int)-1, const_zero = 0;
+ const int is_unsigned = neg_one > const_zero;
+ if (sizeof(unsigned int) < sizeof(long)) {
+ long val = __Pyx_PyInt_AsLong(x);
+ if (unlikely(val != (long)(unsigned int)val)) {
+ if (!unlikely(val == -1 && PyErr_Occurred())) {
+ PyErr_SetString(PyExc_OverflowError,
+ (is_unsigned && unlikely(val < 0)) ?
+ "can't convert negative value to unsigned int" :
+ "value too large to convert to unsigned int");
+ }
+ return (unsigned int)-1;
+ }
+ return (unsigned int)val;
+ }
+ return (unsigned int)__Pyx_PyInt_AsUnsignedLong(x);
+}
+
+static CYTHON_INLINE char __Pyx_PyInt_AsChar(PyObject* x) {
+ const char neg_one = (char)-1, const_zero = 0;
+ const int is_unsigned = neg_one > const_zero;
+ if (sizeof(char) < sizeof(long)) {
+ long val = __Pyx_PyInt_AsLong(x);
+ if (unlikely(val != (long)(char)val)) {
+ if (!unlikely(val == -1 && PyErr_Occurred())) {
+ PyErr_SetString(PyExc_OverflowError,
+ (is_unsigned && unlikely(val < 0)) ?
+ "can't convert negative value to char" :
+ "value too large to convert to char");
+ }
+ return (char)-1;
+ }
+ return (char)val;
+ }
+ return (char)__Pyx_PyInt_AsLong(x);
+}
+
+static CYTHON_INLINE short __Pyx_PyInt_AsShort(PyObject* x) {
+ const short neg_one = (short)-1, const_zero = 0;
+ const int is_unsigned = neg_one > const_zero;
+ if (sizeof(short) < sizeof(long)) {
+ long val = __Pyx_PyInt_AsLong(x);
+ if (unlikely(val != (long)(short)val)) {
+ if (!unlikely(val == -1 && PyErr_Occurred())) {
+ PyErr_SetString(PyExc_OverflowError,
+ (is_unsigned && unlikely(val < 0)) ?
+ "can't convert negative value to short" :
+ "value too large to convert to short");
+ }
+ return (short)-1;
+ }
+ return (short)val;
+ }
+ return (short)__Pyx_PyInt_AsLong(x);
+}
+
+static CYTHON_INLINE int __Pyx_PyInt_AsInt(PyObject* x) {
+ const int neg_one = (int)-1, const_zero = 0;
+ const int is_unsigned = neg_one > const_zero;
+ if (sizeof(int) < sizeof(long)) {
+ long val = __Pyx_PyInt_AsLong(x);
+ if (unlikely(val != (long)(int)val)) {
+ if (!unlikely(val == -1 && PyErr_Occurred())) {
+ PyErr_SetString(PyExc_OverflowError,
+ (is_unsigned && unlikely(val < 0)) ?
+ "can't convert negative value to int" :
+ "value too large to convert to int");
+ }
+ return (int)-1;
+ }
+ return (int)val;
+ }
+ return (int)__Pyx_PyInt_AsLong(x);
+}
+
+static CYTHON_INLINE signed char __Pyx_PyInt_AsSignedChar(PyObject* x) {
+ const signed char neg_one = (signed char)-1, const_zero = 0;
+ const int is_unsigned = neg_one > const_zero;
+ if (sizeof(signed char) < sizeof(long)) {
+ long val = __Pyx_PyInt_AsLong(x);
+ if (unlikely(val != (long)(signed char)val)) {
+ if (!unlikely(val == -1 && PyErr_Occurred())) {
+ PyErr_SetString(PyExc_OverflowError,
+ (is_unsigned && unlikely(val < 0)) ?
+ "can't convert negative value to signed char" :
+ "value too large to convert to signed char");
+ }
+ return (signed char)-1;
+ }
+ return (signed char)val;
+ }
+ return (signed char)__Pyx_PyInt_AsSignedLong(x);
+}
+
+static CYTHON_INLINE signed short __Pyx_PyInt_AsSignedShort(PyObject* x) {
+ const signed short neg_one = (signed short)-1, const_zero = 0;
+ const int is_unsigned = neg_one > const_zero;
+ if (sizeof(signed short) < sizeof(long)) {
+ long val = __Pyx_PyInt_AsLong(x);
+ if (unlikely(val != (long)(signed short)val)) {
+ if (!unlikely(val == -1 && PyErr_Occurred())) {
+ PyErr_SetString(PyExc_OverflowError,
+ (is_unsigned && unlikely(val < 0)) ?
+ "can't convert negative value to signed short" :
+ "value too large to convert to signed short");
+ }
+ return (signed short)-1;
+ }
+ return (signed short)val;
+ }
+ return (signed short)__Pyx_PyInt_AsSignedLong(x);
+}
+
+static CYTHON_INLINE signed int __Pyx_PyInt_AsSignedInt(PyObject* x) {
+ const signed int neg_one = (signed int)-1, const_zero = 0;
+ const int is_unsigned = neg_one > const_zero;
+ if (sizeof(signed int) < sizeof(long)) {
+ long val = __Pyx_PyInt_AsLong(x);
+ if (unlikely(val != (long)(signed int)val)) {
+ if (!unlikely(val == -1 && PyErr_Occurred())) {
+ PyErr_SetString(PyExc_OverflowError,
+ (is_unsigned && unlikely(val < 0)) ?
+ "can't convert negative value to signed int" :
+ "value too large to convert to signed int");
+ }
+ return (signed int)-1;
+ }
+ return (signed int)val;
+ }
+ return (signed int)__Pyx_PyInt_AsSignedLong(x);
+}
+
+static CYTHON_INLINE int __Pyx_PyInt_AsLongDouble(PyObject* x) {
+ const int neg_one = (int)-1, const_zero = 0;
+ const int is_unsigned = neg_one > const_zero;
+ if (sizeof(int) < sizeof(long)) {
+ long val = __Pyx_PyInt_AsLong(x);
+ if (unlikely(val != (long)(int)val)) {
+ if (!unlikely(val == -1 && PyErr_Occurred())) {
+ PyErr_SetString(PyExc_OverflowError,
+ (is_unsigned && unlikely(val < 0)) ?
+ "can't convert negative value to int" :
+ "value too large to convert to int");
+ }
+ return (int)-1;
+ }
+ return (int)val;
+ }
+ return (int)__Pyx_PyInt_AsLong(x);
+}
+
+static CYTHON_INLINE unsigned long __Pyx_PyInt_AsUnsignedLong(PyObject* x) {
+ const unsigned long neg_one = (unsigned long)-1, const_zero = 0;
+ const int is_unsigned = neg_one > const_zero;
+#if PY_VERSION_HEX < 0x03000000
+ if (likely(PyInt_Check(x))) {
+ long val = PyInt_AS_LONG(x);
+ if (is_unsigned && unlikely(val < 0)) {
+ PyErr_SetString(PyExc_OverflowError,
+ "can't convert negative value to unsigned long");
+ return (unsigned long)-1;
+ }
+ return (unsigned long)val;
+ } else
+#endif
+ if (likely(PyLong_Check(x))) {
+ if (is_unsigned) {
+ if (unlikely(Py_SIZE(x) < 0)) {
+ PyErr_SetString(PyExc_OverflowError,
+ "can't convert negative value to unsigned long");
+ return (unsigned long)-1;
+ }
+ return PyLong_AsUnsignedLong(x);
+ } else {
+ return PyLong_AsLong(x);
+ }
+ } else {
+ unsigned long val;
+ PyObject *tmp = __Pyx_PyNumber_Int(x);
+ if (!tmp) return (unsigned long)-1;
+ val = __Pyx_PyInt_AsUnsignedLong(tmp);
+ Py_DECREF(tmp);
+ return val;
+ }
+}
+
+static CYTHON_INLINE unsigned PY_LONG_LONG __Pyx_PyInt_AsUnsignedLongLong(PyObject* x) {
+ const unsigned PY_LONG_LONG neg_one = (unsigned PY_LONG_LONG)-1, const_zero = 0;
+ const int is_unsigned = neg_one > const_zero;
+#if PY_VERSION_HEX < 0x03000000
+ if (likely(PyInt_Check(x))) {
+ long val = PyInt_AS_LONG(x);
+ if (is_unsigned && unlikely(val < 0)) {
+ PyErr_SetString(PyExc_OverflowError,
+ "can't convert negative value to unsigned PY_LONG_LONG");
+ return (unsigned PY_LONG_LONG)-1;
+ }
+ return (unsigned PY_LONG_LONG)val;
+ } else
+#endif
+ if (likely(PyLong_Check(x))) {
+ if (is_unsigned) {
+ if (unlikely(Py_SIZE(x) < 0)) {
+ PyErr_SetString(PyExc_OverflowError,
+ "can't convert negative value to unsigned PY_LONG_LONG");
+ return (unsigned PY_LONG_LONG)-1;
+ }
+ return PyLong_AsUnsignedLongLong(x);
+ } else {
+ return PyLong_AsLongLong(x);
+ }
+ } else {
+ unsigned PY_LONG_LONG val;
+ PyObject *tmp = __Pyx_PyNumber_Int(x);
+ if (!tmp) return (unsigned PY_LONG_LONG)-1;
+ val = __Pyx_PyInt_AsUnsignedLongLong(tmp);
+ Py_DECREF(tmp);
+ return val;
+ }
+}
+
+static CYTHON_INLINE long __Pyx_PyInt_AsLong(PyObject* x) {
+ const long neg_one = (long)-1, const_zero = 0;
+ const int is_unsigned = neg_one > const_zero;
+#if PY_VERSION_HEX < 0x03000000
+ if (likely(PyInt_Check(x))) {
+ long val = PyInt_AS_LONG(x);
+ if (is_unsigned && unlikely(val < 0)) {
+ PyErr_SetString(PyExc_OverflowError,
+ "can't convert negative value to long");
+ return (long)-1;
+ }
+ return (long)val;
+ } else
+#endif
+ if (likely(PyLong_Check(x))) {
+ if (is_unsigned) {
+ if (unlikely(Py_SIZE(x) < 0)) {
+ PyErr_SetString(PyExc_OverflowError,
+ "can't convert negative value to long");
+ return (long)-1;
+ }
+ return PyLong_AsUnsignedLong(x);
+ } else {
+ return PyLong_AsLong(x);
+ }
+ } else {
+ long val;
+ PyObject *tmp = __Pyx_PyNumber_Int(x);
+ if (!tmp) return (long)-1;
+ val = __Pyx_PyInt_AsLong(tmp);
+ Py_DECREF(tmp);
+ return val;
+ }
+}
+
+static CYTHON_INLINE PY_LONG_LONG __Pyx_PyInt_AsLongLong(PyObject* x) {
+ const PY_LONG_LONG neg_one = (PY_LONG_LONG)-1, const_zero = 0;
+ const int is_unsigned = neg_one > const_zero;
+#if PY_VERSION_HEX < 0x03000000
+ if (likely(PyInt_Check(x))) {
+ long val = PyInt_AS_LONG(x);
+ if (is_unsigned && unlikely(val < 0)) {
+ PyErr_SetString(PyExc_OverflowError,
+ "can't convert negative value to PY_LONG_LONG");
+ return (PY_LONG_LONG)-1;
+ }
+ return (PY_LONG_LONG)val;
+ } else
+#endif
+ if (likely(PyLong_Check(x))) {
+ if (is_unsigned) {
+ if (unlikely(Py_SIZE(x) < 0)) {
+ PyErr_SetString(PyExc_OverflowError,
+ "can't convert negative value to PY_LONG_LONG");
+ return (PY_LONG_LONG)-1;
+ }
+ return PyLong_AsUnsignedLongLong(x);
+ } else {
+ return PyLong_AsLongLong(x);
+ }
+ } else {
+ PY_LONG_LONG val;
+ PyObject *tmp = __Pyx_PyNumber_Int(x);
+ if (!tmp) return (PY_LONG_LONG)-1;
+ val = __Pyx_PyInt_AsLongLong(tmp);
+ Py_DECREF(tmp);
+ return val;
+ }
+}
+
+static CYTHON_INLINE signed long __Pyx_PyInt_AsSignedLong(PyObject* x) {
+ const signed long neg_one = (signed long)-1, const_zero = 0;
+ const int is_unsigned = neg_one > const_zero;
+#if PY_VERSION_HEX < 0x03000000
+ if (likely(PyInt_Check(x))) {
+ long val = PyInt_AS_LONG(x);
+ if (is_unsigned && unlikely(val < 0)) {
+ PyErr_SetString(PyExc_OverflowError,
+ "can't convert negative value to signed long");
+ return (signed long)-1;
+ }
+ return (signed long)val;
+ } else
+#endif
+ if (likely(PyLong_Check(x))) {
+ if (is_unsigned) {
+ if (unlikely(Py_SIZE(x) < 0)) {
+ PyErr_SetString(PyExc_OverflowError,
+ "can't convert negative value to signed long");
+ return (signed long)-1;
+ }
+ return PyLong_AsUnsignedLong(x);
+ } else {
+ return PyLong_AsLong(x);
+ }
+ } else {
+ signed long val;
+ PyObject *tmp = __Pyx_PyNumber_Int(x);
+ if (!tmp) return (signed long)-1;
+ val = __Pyx_PyInt_AsSignedLong(tmp);
+ Py_DECREF(tmp);
+ return val;
+ }
+}
+
+static CYTHON_INLINE signed PY_LONG_LONG __Pyx_PyInt_AsSignedLongLong(PyObject* x) {
+ const signed PY_LONG_LONG neg_one = (signed PY_LONG_LONG)-1, const_zero = 0;
+ const int is_unsigned = neg_one > const_zero;
+#if PY_VERSION_HEX < 0x03000000
+ if (likely(PyInt_Check(x))) {
+ long val = PyInt_AS_LONG(x);
+ if (is_unsigned && unlikely(val < 0)) {
+ PyErr_SetString(PyExc_OverflowError,
+ "can't convert negative value to signed PY_LONG_LONG");
+ return (signed PY_LONG_LONG)-1;
+ }
+ return (signed PY_LONG_LONG)val;
+ } else
+#endif
+ if (likely(PyLong_Check(x))) {
+ if (is_unsigned) {
+ if (unlikely(Py_SIZE(x) < 0)) {
+ PyErr_SetString(PyExc_OverflowError,
+ "can't convert negative value to signed PY_LONG_LONG");
+ return (signed PY_LONG_LONG)-1;
+ }
+ return PyLong_AsUnsignedLongLong(x);
+ } else {
+ return PyLong_AsLongLong(x);
+ }
+ } else {
+ signed PY_LONG_LONG val;
+ PyObject *tmp = __Pyx_PyNumber_Int(x);
+ if (!tmp) return (signed PY_LONG_LONG)-1;
+ val = __Pyx_PyInt_AsSignedLongLong(tmp);
+ Py_DECREF(tmp);
+ return val;
+ }
+}
+
+static void __Pyx_WriteUnraisable(const char *name) {
+ PyObject *old_exc, *old_val, *old_tb;
+ PyObject *ctx;
+ __Pyx_ErrFetch(&old_exc, &old_val, &old_tb);
+ #if PY_MAJOR_VERSION < 3
+ ctx = PyString_FromString(name);
+ #else
+ ctx = PyUnicode_FromString(name);
+ #endif
+ __Pyx_ErrRestore(old_exc, old_val, old_tb);
+ if (!ctx) {
+ PyErr_WriteUnraisable(Py_None);
+ } else {
+ PyErr_WriteUnraisable(ctx);
+ Py_DECREF(ctx);
+ }
+}
+
+static int __Pyx_SetVtable(PyObject *dict, void *vtable) {
+#if PY_VERSION_HEX >= 0x02070000 && !(PY_MAJOR_VERSION==3&&PY_MINOR_VERSION==0)
+ PyObject *ob = PyCapsule_New(vtable, 0, 0);
+#else
+ PyObject *ob = PyCObject_FromVoidPtr(vtable, 0);
+#endif
+ if (!ob)
+ goto bad;
+ if (PyDict_SetItemString(dict, "__pyx_vtable__", ob) < 0)
+ goto bad;
+ Py_DECREF(ob);
+ return 0;
+bad:
+ Py_XDECREF(ob);
+ return -1;
+}
+
+#ifndef __PYX_HAVE_RT_ImportType
+#define __PYX_HAVE_RT_ImportType
+static PyTypeObject *__Pyx_ImportType(const char *module_name, const char *class_name,
+ long size, int strict)
+{
+ PyObject *py_module = 0;
+ PyObject *result = 0;
+ PyObject *py_name = 0;
+ char warning[200];
+
+ py_module = __Pyx_ImportModule(module_name);
+ if (!py_module)
+ goto bad;
+ #if PY_MAJOR_VERSION < 3
+ py_name = PyString_FromString(class_name);
+ #else
+ py_name = PyUnicode_FromString(class_name);
+ #endif
+ if (!py_name)
+ goto bad;
+ result = PyObject_GetAttr(py_module, py_name);
+ Py_DECREF(py_name);
+ py_name = 0;
+ Py_DECREF(py_module);
+ py_module = 0;
+ if (!result)
+ goto bad;
+ if (!PyType_Check(result)) {
+ PyErr_Format(PyExc_TypeError,
+ "%s.%s is not a type object",
+ module_name, class_name);
+ goto bad;
+ }
+ if (!strict && ((PyTypeObject *)result)->tp_basicsize > size) {
+ PyOS_snprintf(warning, sizeof(warning),
+ "%s.%s size changed, may indicate binary incompatibility",
+ module_name, class_name);
+ #if PY_VERSION_HEX < 0x02050000
+ PyErr_Warn(NULL, warning);
+ #else
+ PyErr_WarnEx(NULL, warning, 0);
+ #endif
+ }
+ else if (((PyTypeObject *)result)->tp_basicsize != size) {
+ PyErr_Format(PyExc_ValueError,
+ "%s.%s has the wrong size, try recompiling",
+ module_name, class_name);
+ goto bad;
+ }
+ return (PyTypeObject *)result;
+bad:
+ Py_XDECREF(py_module);
+ Py_XDECREF(result);
+ return 0;
+}
+#endif
+
+#ifndef __PYX_HAVE_RT_ImportModule
+#define __PYX_HAVE_RT_ImportModule
+static PyObject *__Pyx_ImportModule(const char *name) {
+ PyObject *py_name = 0;
+ PyObject *py_module = 0;
+
+ #if PY_MAJOR_VERSION < 3
+ py_name = PyString_FromString(name);
+ #else
+ py_name = PyUnicode_FromString(name);
+ #endif
+ if (!py_name)
+ goto bad;
+ py_module = PyImport_Import(py_name);
+ Py_DECREF(py_name);
+ return py_module;
+bad:
+ Py_XDECREF(py_name);
+ return 0;
+}
+#endif
+
+#include "compile.h"
+#include "frameobject.h"
+#include "traceback.h"
+
+static void __Pyx_AddTraceback(const char *funcname) {
+ PyObject *py_srcfile = 0;
+ PyObject *py_funcname = 0;
+ PyObject *py_globals = 0;
+ PyCodeObject *py_code = 0;
+ PyFrameObject *py_frame = 0;
+
+ #if PY_MAJOR_VERSION < 3
+ py_srcfile = PyString_FromString(__pyx_filename);
+ #else
+ py_srcfile = PyUnicode_FromString(__pyx_filename);
+ #endif
+ if (!py_srcfile) goto bad;
+ if (__pyx_clineno) {
+ #if PY_MAJOR_VERSION < 3
+ py_funcname = PyString_FromFormat( "%s (%s:%d)", funcname, __pyx_cfilenm, __pyx_clineno);
+ #else
+ py_funcname = PyUnicode_FromFormat( "%s (%s:%d)", funcname, __pyx_cfilenm, __pyx_clineno);
+ #endif
+ }
+ else {
+ #if PY_MAJOR_VERSION < 3
+ py_funcname = PyString_FromString(funcname);
+ #else
+ py_funcname = PyUnicode_FromString(funcname);
+ #endif
+ }
+ if (!py_funcname) goto bad;
+ py_globals = PyModule_GetDict(__pyx_m);
+ if (!py_globals) goto bad;
+ py_code = PyCode_New(
+ 0, /*int argcount,*/
+ #if PY_MAJOR_VERSION >= 3
+ 0, /*int kwonlyargcount,*/
+ #endif
+ 0, /*int nlocals,*/
+ 0, /*int stacksize,*/
+ 0, /*int flags,*/
+ __pyx_empty_bytes, /*PyObject *code,*/
+ __pyx_empty_tuple, /*PyObject *consts,*/
+ __pyx_empty_tuple, /*PyObject *names,*/
+ __pyx_empty_tuple, /*PyObject *varnames,*/
+ __pyx_empty_tuple, /*PyObject *freevars,*/
+ __pyx_empty_tuple, /*PyObject *cellvars,*/
+ py_srcfile, /*PyObject *filename,*/
+ py_funcname, /*PyObject *name,*/
+ __pyx_lineno, /*int firstlineno,*/
+ __pyx_empty_bytes /*PyObject *lnotab*/
+ );
+ if (!py_code) goto bad;
+ py_frame = PyFrame_New(
+ PyThreadState_GET(), /*PyThreadState *tstate,*/
+ py_code, /*PyCodeObject *code,*/
+ py_globals, /*PyObject *globals,*/
+ 0 /*PyObject *locals*/
+ );
+ if (!py_frame) goto bad;
+ py_frame->f_lineno = __pyx_lineno;
+ PyTraceBack_Here(py_frame);
+bad:
+ Py_XDECREF(py_srcfile);
+ Py_XDECREF(py_funcname);
+ Py_XDECREF(py_code);
+ Py_XDECREF(py_frame);
+}
+
+static int __Pyx_InitStrings(__Pyx_StringTabEntry *t) {
+ while (t->p) {
+ #if PY_MAJOR_VERSION < 3
+ if (t->is_unicode) {
+ *t->p = PyUnicode_DecodeUTF8(t->s, t->n - 1, NULL);
+ } else if (t->intern) {
+ *t->p = PyString_InternFromString(t->s);
+ } else {
+ *t->p = PyString_FromStringAndSize(t->s, t->n - 1);
+ }
+ #else /* Python 3+ has unicode identifiers */
+ if (t->is_unicode | t->is_str) {
+ if (t->intern) {
+ *t->p = PyUnicode_InternFromString(t->s);
+ } else if (t->encoding) {
+ *t->p = PyUnicode_Decode(t->s, t->n - 1, t->encoding, NULL);
+ } else {
+ *t->p = PyUnicode_FromStringAndSize(t->s, t->n - 1);
+ }
+ } else {
+ *t->p = PyBytes_FromStringAndSize(t->s, t->n - 1);
+ }
+ #endif
+ if (!*t->p)
+ return -1;
+ ++t;
+ }
+ return 0;
+}
+
+/* Type Conversion Functions */
+
+static CYTHON_INLINE int __Pyx_PyObject_IsTrue(PyObject* x) {
+ int is_true = x == Py_True;
+ if (is_true | (x == Py_False) | (x == Py_None)) return is_true;
+ else return PyObject_IsTrue(x);
+}
+
+static CYTHON_INLINE PyObject* __Pyx_PyNumber_Int(PyObject* x) {
+ PyNumberMethods *m;
+ const char *name = NULL;
+ PyObject *res = NULL;
+#if PY_VERSION_HEX < 0x03000000
+ if (PyInt_Check(x) || PyLong_Check(x))
+#else
+ if (PyLong_Check(x))
+#endif
+ return Py_INCREF(x), x;
+ m = Py_TYPE(x)->tp_as_number;
+#if PY_VERSION_HEX < 0x03000000
+ if (m && m->nb_int) {
+ name = "int";
+ res = PyNumber_Int(x);
+ }
+ else if (m && m->nb_long) {
+ name = "long";
+ res = PyNumber_Long(x);
+ }
+#else
+ if (m && m->nb_int) {
+ name = "int";
+ res = PyNumber_Long(x);
+ }
+#endif
+ if (res) {
+#if PY_VERSION_HEX < 0x03000000
+ if (!PyInt_Check(res) && !PyLong_Check(res)) {
+#else
+ if (!PyLong_Check(res)) {
+#endif
+ PyErr_Format(PyExc_TypeError,
+ "__%s__ returned non-%s (type %.200s)",
+ name, name, Py_TYPE(res)->tp_name);
+ Py_DECREF(res);
+ return NULL;
+ }
+ }
+ else if (!PyErr_Occurred()) {
+ PyErr_SetString(PyExc_TypeError,
+ "an integer is required");
+ }
+ return res;
+}
+
+static CYTHON_INLINE Py_ssize_t __Pyx_PyIndex_AsSsize_t(PyObject* b) {
+ Py_ssize_t ival;
+ PyObject* x = PyNumber_Index(b);
+ if (!x) return -1;
+ ival = PyInt_AsSsize_t(x);
+ Py_DECREF(x);
+ return ival;
+}
+
+static CYTHON_INLINE PyObject * __Pyx_PyInt_FromSize_t(size_t ival) {
+#if PY_VERSION_HEX < 0x02050000
+ if (ival <= LONG_MAX)
+ return PyInt_FromLong((long)ival);
+ else {
+ unsigned char *bytes = (unsigned char *) &ival;
+ int one = 1; int little = (int)*(unsigned char*)&one;
+ return _PyLong_FromByteArray(bytes, sizeof(size_t), little, 0);
+ }
+#else
+ return PyInt_FromSize_t(ival);
+#endif
+}
+
+static CYTHON_INLINE size_t __Pyx_PyInt_AsSize_t(PyObject* x) {
+ unsigned PY_LONG_LONG val = __Pyx_PyInt_AsUnsignedLongLong(x);
+ if (unlikely(val == (unsigned PY_LONG_LONG)-1 && PyErr_Occurred())) {
+ return (size_t)-1;
+ } else if (unlikely(val != (unsigned PY_LONG_LONG)(size_t)val)) {
+ PyErr_SetString(PyExc_OverflowError,
+ "value too large to convert to size_t");
+ return (size_t)-1;
+ }
+ return (size_t)val;
+}
+
+
+#endif /* Py_PYTHON_H */
diff --git a/bzrlib/_dirstate_helpers_pyx.h b/bzrlib/_dirstate_helpers_pyx.h
new file mode 100644
index 0000000..e12a036
--- /dev/null
+++ b/bzrlib/_dirstate_helpers_pyx.h
@@ -0,0 +1,17 @@
+#ifndef _DIRSTATE_HELPERS_PYX_H
+#define _DIRSTATE_HELPERS_PYX_H
+
+/* for intptr_t */
+#ifdef _MSC_VER
+#include <io.h>
+#else
+
+#if defined(__SVR4) && defined(__sun)
+#include <inttypes.h>
+#else
+#include <stdint.h>
+#endif
+
+#endif
+
+#endif
diff --git a/bzrlib/_dirstate_helpers_pyx.pyx b/bzrlib/_dirstate_helpers_pyx.pyx
new file mode 100644
index 0000000..76847b4
--- /dev/null
+++ b/bzrlib/_dirstate_helpers_pyx.pyx
@@ -0,0 +1,2031 @@
+# Copyright (C) 2007-2010 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""Helper functions for DirState.
+
+This is the python implementation for DirState functions.
+"""
+
+import binascii
+import bisect
+import errno
+import os
+import stat
+import sys
+
+from bzrlib import cache_utf8, errors, osutils
+from bzrlib.dirstate import DirState
+from bzrlib.osutils import parent_directories, pathjoin, splitpath
+
+
+# This is the Windows equivalent of ENOTDIR
+# It is defined in pywin32.winerror, but we don't want a strong dependency for
+# just an error code.
+# XXX: Perhaps we could get it from a windows header ?
+cdef int ERROR_PATH_NOT_FOUND
+ERROR_PATH_NOT_FOUND = 3
+cdef int ERROR_DIRECTORY
+ERROR_DIRECTORY = 267
+
+#python2.4 support, and other platform-dependent includes
+cdef extern from "python-compat.h":
+ unsigned long htonl(unsigned long)
+
+# Give Pyrex some function definitions for it to understand.
+# All of these are just hints to Pyrex, so that it can try to convert python
+# objects into similar C objects. (such as PyInt => int).
+# In anything defined 'cdef extern from XXX' the real C header will be
+# imported, and the real definition will be used from there. So these are just
+# hints, and do not need to match exactly to the C definitions.
+
+cdef extern from *:
+ ctypedef unsigned long size_t
+
+cdef extern from "_dirstate_helpers_pyx.h":
+ ctypedef int intptr_t
+
+
+
+cdef extern from "stdlib.h":
+ unsigned long int strtoul(char *nptr, char **endptr, int base)
+
+
+cdef extern from 'sys/stat.h':
+ int S_ISDIR(int mode)
+ int S_ISREG(int mode)
+ # On win32, this actually comes from "python-compat.h"
+ int S_ISLNK(int mode)
+ int S_IXUSR
+
+# These functions allow us access to a bit of the 'bare metal' of python
+# objects, rather than going through the object abstraction. (For example,
+# PyList_Append, rather than getting the 'append' attribute of the object, and
+# creating a tuple, and then using PyCallObject).
+# Functions that return (or take) a void* are meant to grab a C PyObject*. This
+# differs from the Pyrex 'object'. If you declare a variable as 'object' Pyrex
+# will automatically Py_INCREF and Py_DECREF when appropriate. But for some
+# inner loops, we don't need to do that at all, as the reference only lasts for
+# a very short time.
+# Note that the C API GetItem calls borrow references, so pyrex does the wrong
+# thing if you declare e.g. object PyList_GetItem(object lst, int index) - you
+# need to manually Py_INCREF yourself.
+cdef extern from "Python.h":
+ ctypedef int Py_ssize_t
+ ctypedef struct PyObject:
+ pass
+ int PyList_Append(object lst, object item) except -1
+ void *PyList_GetItem_object_void "PyList_GET_ITEM" (object lst, int index)
+ void *PyList_GetItem_void_void "PyList_GET_ITEM" (void * lst, int index)
+ object PyList_GET_ITEM(object lst, Py_ssize_t index)
+ int PyList_CheckExact(object)
+ Py_ssize_t PyList_GET_SIZE (object p)
+
+ void *PyTuple_GetItem_void_void "PyTuple_GET_ITEM" (void* tpl, int index)
+ object PyTuple_GetItem_void_object "PyTuple_GET_ITEM" (void* tpl, int index)
+ object PyTuple_GET_ITEM(object tpl, Py_ssize_t index)
+
+ unsigned long PyInt_AsUnsignedLongMask(object number) except? -1
+
+ char *PyString_AsString(object p)
+ char *PyString_AsString_obj "PyString_AsString" (PyObject *string)
+ char *PyString_AS_STRING_void "PyString_AS_STRING" (void *p)
+ int PyString_AsStringAndSize(object str, char **buffer, Py_ssize_t *length) except -1
+ object PyString_FromString(char *)
+ object PyString_FromStringAndSize(char *, Py_ssize_t)
+ int PyString_Size(object p)
+ int PyString_GET_SIZE_void "PyString_GET_SIZE" (void *p)
+ int PyString_CheckExact(object p)
+ void Py_INCREF(object o)
+ void Py_DECREF(object o)
+
+
+cdef extern from "string.h":
+ int strncmp(char *s1, char *s2, int len)
+ void *memchr(void *s, int c, size_t len)
+ int memcmp(void *b1, void *b2, size_t len)
+ # ??? memrchr is a GNU extension :(
+ # void *memrchr(void *s, int c, size_t len)
+
+# cimport all of the definitions we will need to access
+from _static_tuple_c cimport import_static_tuple_c, StaticTuple, \
+ StaticTuple_New, StaticTuple_SET_ITEM
+
+import_static_tuple_c()
+
+cdef void* _my_memrchr(void *s, int c, size_t n): # cannot_raise
+ # memrchr seems to be a GNU extension, so we have to implement it ourselves
+ cdef char *pos
+ cdef char *start
+
+ start = <char*>s
+ pos = start + n - 1
+ while pos >= start:
+ if pos[0] == c:
+ return <void*>pos
+ pos = pos - 1
+ return NULL
+
+
+def _py_memrchr(s, c):
+ """Just to expose _my_memrchr for testing.
+
+ :param s: The Python string to search
+ :param c: The character to search for
+ :return: The offset to the last instance of 'c' in s
+ """
+ cdef void *_s
+ cdef void *found
+ cdef int length
+ cdef char *_c
+
+ _s = PyString_AsString(s)
+ length = PyString_Size(s)
+
+ _c = PyString_AsString(c)
+ assert PyString_Size(c) == 1,\
+ 'Must be a single character string, not %s' % (c,)
+ found = _my_memrchr(_s, _c[0], length)
+ if found == NULL:
+ return None
+ return <char*>found - <char*>_s
+
+
+cdef object safe_string_from_size(char *s, Py_ssize_t size):
+ if size < 0:
+ raise AssertionError(
+ 'tried to create a string with an invalid size: %d'
+ % (size))
+ return PyString_FromStringAndSize(s, size)
+
+
+cdef int _is_aligned(void *ptr): # cannot_raise
+ """Is this pointer aligned to an integer size offset?
+
+ :return: 1 if this pointer is aligned, 0 otherwise.
+ """
+ return ((<intptr_t>ptr) & ((sizeof(int))-1)) == 0
+
+
+cdef int _cmp_by_dirs(char *path1, int size1, char *path2, int size2): # cannot_raise
+ cdef unsigned char *cur1
+ cdef unsigned char *cur2
+ cdef unsigned char *end1
+ cdef unsigned char *end2
+ cdef int *cur_int1
+ cdef int *cur_int2
+ cdef int *end_int1
+ cdef int *end_int2
+
+ if path1 == path2 and size1 == size2:
+ return 0
+
+ end1 = <unsigned char*>path1+size1
+ end2 = <unsigned char*>path2+size2
+
+ # Use 32-bit comparisons for the matching portion of the string.
+ # Almost all CPU's are faster at loading and comparing 32-bit integers,
+ # than they are at 8-bit integers.
+ # 99% of the time, these will be aligned, but in case they aren't just skip
+ # this loop
+ if _is_aligned(path1) and _is_aligned(path2):
+ cur_int1 = <int*>path1
+ cur_int2 = <int*>path2
+ end_int1 = <int*>(path1 + size1 - (size1 % sizeof(int)))
+ end_int2 = <int*>(path2 + size2 - (size2 % sizeof(int)))
+
+ while cur_int1 < end_int1 and cur_int2 < end_int2:
+ if cur_int1[0] != cur_int2[0]:
+ break
+ cur_int1 = cur_int1 + 1
+ cur_int2 = cur_int2 + 1
+
+ cur1 = <unsigned char*>cur_int1
+ cur2 = <unsigned char*>cur_int2
+ else:
+ cur1 = <unsigned char*>path1
+ cur2 = <unsigned char*>path2
+
+ while cur1 < end1 and cur2 < end2:
+ if cur1[0] == cur2[0]:
+ # This character matches, just go to the next one
+ cur1 = cur1 + 1
+ cur2 = cur2 + 1
+ continue
+ # The current characters do not match
+ if cur1[0] == c'/':
+ return -1 # Reached the end of path1 segment first
+ elif cur2[0] == c'/':
+ return 1 # Reached the end of path2 segment first
+ elif cur1[0] < cur2[0]:
+ return -1
+ else:
+ return 1
+
+ # We reached the end of at least one of the strings
+ if cur1 < end1:
+ return 1 # Not at the end of cur1, must be at the end of cur2
+ if cur2 < end2:
+ return -1 # At the end of cur1, but not at cur2
+ # We reached the end of both strings
+ return 0
+
+
+def cmp_by_dirs(path1, path2):
+ """Compare two paths directory by directory.
+
+ This is equivalent to doing::
+
+ cmp(path1.split('/'), path2.split('/'))
+
+ The idea is that you should compare path components separately. This
+ differs from plain ``cmp(path1, path2)`` for paths like ``'a-b'`` and
+ ``a/b``. "a-b" comes after "a" but would come before "a/b" lexically.
+
+ :param path1: first path
+ :param path2: second path
+ :return: negative number if ``path1`` comes first,
+ 0 if paths are equal,
+ and positive number if ``path2`` sorts first
+ """
+ if not PyString_CheckExact(path1):
+ raise TypeError("'path1' must be a plain string, not %s: %r"
+ % (type(path1), path1))
+ if not PyString_CheckExact(path2):
+ raise TypeError("'path2' must be a plain string, not %s: %r"
+ % (type(path2), path2))
+ return _cmp_by_dirs(PyString_AsString(path1),
+ PyString_Size(path1),
+ PyString_AsString(path2),
+ PyString_Size(path2))
+
+
+def _cmp_path_by_dirblock(path1, path2):
+ """Compare two paths based on what directory they are in.
+
+ This generates a sort order, such that all children of a directory are
+ sorted together, and grandchildren are in the same order as the
+ children appear. But all grandchildren come after all children.
+
+ In other words, all entries in a directory are sorted together, and
+ directorys are sorted in cmp_by_dirs order.
+
+ :param path1: first path
+ :param path2: the second path
+ :return: negative number if ``path1`` comes first,
+ 0 if paths are equal
+ and a positive number if ``path2`` sorts first
+ """
+ if not PyString_CheckExact(path1):
+ raise TypeError("'path1' must be a plain string, not %s: %r"
+ % (type(path1), path1))
+ if not PyString_CheckExact(path2):
+ raise TypeError("'path2' must be a plain string, not %s: %r"
+ % (type(path2), path2))
+ return _cmp_path_by_dirblock_intern(PyString_AsString(path1),
+ PyString_Size(path1),
+ PyString_AsString(path2),
+ PyString_Size(path2))
+
+
+cdef int _cmp_path_by_dirblock_intern(char *path1, int path1_len,
+ char *path2, int path2_len): # cannot_raise
+ """Compare two paths by what directory they are in.
+
+ see ``_cmp_path_by_dirblock`` for details.
+ """
+ cdef char *dirname1
+ cdef int dirname1_len
+ cdef char *dirname2
+ cdef int dirname2_len
+ cdef char *basename1
+ cdef int basename1_len
+ cdef char *basename2
+ cdef int basename2_len
+ cdef int cur_len
+ cdef int cmp_val
+
+ if path1_len == 0 and path2_len == 0:
+ return 0
+
+ if path1 == path2 and path1_len == path2_len:
+ return 0
+
+ if path1_len == 0:
+ return -1
+
+ if path2_len == 0:
+ return 1
+
+ basename1 = <char*>_my_memrchr(path1, c'/', path1_len)
+
+ if basename1 == NULL:
+ basename1 = path1
+ basename1_len = path1_len
+ dirname1 = ''
+ dirname1_len = 0
+ else:
+ dirname1 = path1
+ dirname1_len = basename1 - path1
+ basename1 = basename1 + 1
+ basename1_len = path1_len - dirname1_len - 1
+
+ basename2 = <char*>_my_memrchr(path2, c'/', path2_len)
+
+ if basename2 == NULL:
+ basename2 = path2
+ basename2_len = path2_len
+ dirname2 = ''
+ dirname2_len = 0
+ else:
+ dirname2 = path2
+ dirname2_len = basename2 - path2
+ basename2 = basename2 + 1
+ basename2_len = path2_len - dirname2_len - 1
+
+ cmp_val = _cmp_by_dirs(dirname1, dirname1_len,
+ dirname2, dirname2_len)
+ if cmp_val != 0:
+ return cmp_val
+
+ cur_len = basename1_len
+ if basename2_len < basename1_len:
+ cur_len = basename2_len
+
+ cmp_val = memcmp(basename1, basename2, cur_len)
+ if cmp_val != 0:
+ return cmp_val
+ if basename1_len == basename2_len:
+ return 0
+ if basename1_len < basename2_len:
+ return -1
+ return 1
+
+
+def _bisect_path_left(paths, path):
+ """Return the index where to insert path into paths.
+
+ This uses a path-wise comparison so we get::
+ a
+ a-b
+ a=b
+ a/b
+ Rather than::
+ a
+ a-b
+ a/b
+ a=b
+ :param paths: A list of paths to search through
+ :param path: A single path to insert
+ :return: An offset where 'path' can be inserted.
+ :seealso: bisect.bisect_left
+ """
+ cdef int _lo
+ cdef int _hi
+ cdef int _mid
+ cdef char *path_cstr
+ cdef int path_size
+ cdef char *cur_cstr
+ cdef int cur_size
+ cdef void *cur
+
+ if not PyList_CheckExact(paths):
+ raise TypeError("you must pass a python list for 'paths' not: %s %r"
+ % (type(paths), paths))
+ if not PyString_CheckExact(path):
+ raise TypeError("you must pass a string for 'path' not: %s %r"
+ % (type(path), path))
+
+ _hi = len(paths)
+ _lo = 0
+
+ path_cstr = PyString_AsString(path)
+ path_size = PyString_Size(path)
+
+ while _lo < _hi:
+ _mid = (_lo + _hi) / 2
+ cur = PyList_GetItem_object_void(paths, _mid)
+ cur_cstr = PyString_AS_STRING_void(cur)
+ cur_size = PyString_GET_SIZE_void(cur)
+ if _cmp_path_by_dirblock_intern(cur_cstr, cur_size,
+ path_cstr, path_size) < 0:
+ _lo = _mid + 1
+ else:
+ _hi = _mid
+ return _lo
+
+
+def _bisect_path_right(paths, path):
+ """Return the index where to insert path into paths.
+
+ This uses a path-wise comparison so we get::
+ a
+ a-b
+ a=b
+ a/b
+ Rather than::
+ a
+ a-b
+ a/b
+ a=b
+ :param paths: A list of paths to search through
+ :param path: A single path to insert
+ :return: An offset where 'path' can be inserted.
+ :seealso: bisect.bisect_right
+ """
+ cdef int _lo
+ cdef int _hi
+ cdef int _mid
+ cdef char *path_cstr
+ cdef int path_size
+ cdef char *cur_cstr
+ cdef int cur_size
+ cdef void *cur
+
+ if not PyList_CheckExact(paths):
+ raise TypeError("you must pass a python list for 'paths' not: %s %r"
+ % (type(paths), paths))
+ if not PyString_CheckExact(path):
+ raise TypeError("you must pass a string for 'path' not: %s %r"
+ % (type(path), path))
+
+ _hi = len(paths)
+ _lo = 0
+
+ path_cstr = PyString_AsString(path)
+ path_size = PyString_Size(path)
+
+ while _lo < _hi:
+ _mid = (_lo + _hi) / 2
+ cur = PyList_GetItem_object_void(paths, _mid)
+ cur_cstr = PyString_AS_STRING_void(cur)
+ cur_size = PyString_GET_SIZE_void(cur)
+ if _cmp_path_by_dirblock_intern(path_cstr, path_size,
+ cur_cstr, cur_size) < 0:
+ _hi = _mid
+ else:
+ _lo = _mid + 1
+ return _lo
+
+
+def bisect_dirblock(dirblocks, dirname, lo=0, hi=None, cache=None):
+ """Return the index where to insert dirname into the dirblocks.
+
+ The return value idx is such that all directories blocks in dirblock[:idx]
+ have names < dirname, and all blocks in dirblock[idx:] have names >=
+ dirname.
+
+ Optional args lo (default 0) and hi (default len(dirblocks)) bound the
+ slice of a to be searched.
+ """
+ cdef int _lo
+ cdef int _hi
+ cdef int _mid
+ cdef char *dirname_cstr
+ cdef int dirname_size
+ cdef char *cur_cstr
+ cdef int cur_size
+ cdef void *cur
+
+ if not PyList_CheckExact(dirblocks):
+ raise TypeError("you must pass a python list for 'dirblocks' not: %s %r"
+ % (type(dirblocks), dirblocks))
+ if not PyString_CheckExact(dirname):
+ raise TypeError("you must pass a string for dirname not: %s %r"
+ % (type(dirname), dirname))
+ if hi is None:
+ _hi = len(dirblocks)
+ else:
+ _hi = hi
+
+ _lo = lo
+ dirname_cstr = PyString_AsString(dirname)
+ dirname_size = PyString_Size(dirname)
+
+ while _lo < _hi:
+ _mid = (_lo + _hi) / 2
+ # Grab the dirname for the current dirblock
+ # cur = dirblocks[_mid][0]
+ cur = PyTuple_GetItem_void_void(
+ PyList_GetItem_object_void(dirblocks, _mid), 0)
+ cur_cstr = PyString_AS_STRING_void(cur)
+ cur_size = PyString_GET_SIZE_void(cur)
+ if _cmp_by_dirs(cur_cstr, cur_size, dirname_cstr, dirname_size) < 0:
+ _lo = _mid + 1
+ else:
+ _hi = _mid
+ return _lo
+
+
+cdef class Reader:
+ """Maintain the current location, and return fields as you parse them."""
+
+ cdef object state # The DirState object
+ cdef object text # The overall string object
+ cdef char *text_cstr # Pointer to the beginning of text
+ cdef int text_size # Length of text
+
+ cdef char *end_cstr # End of text
+ cdef char *cur_cstr # Pointer to the current record
+ cdef char *next # Pointer to the end of this record
+
+ def __init__(self, text, state):
+ self.state = state
+ self.text = text
+ self.text_cstr = PyString_AsString(text)
+ self.text_size = PyString_Size(text)
+ self.end_cstr = self.text_cstr + self.text_size
+ self.cur_cstr = self.text_cstr
+
+ cdef char *get_next(self, int *size) except NULL:
+ """Return a pointer to the start of the next field."""
+ cdef char *next
+ cdef Py_ssize_t extra_len
+
+ if self.cur_cstr == NULL:
+ raise AssertionError('get_next() called when cur_str is NULL')
+ elif self.cur_cstr >= self.end_cstr:
+ raise AssertionError('get_next() called when there are no chars'
+ ' left')
+ next = self.cur_cstr
+ self.cur_cstr = <char*>memchr(next, c'\0', self.end_cstr - next)
+ if self.cur_cstr == NULL:
+ extra_len = self.end_cstr - next
+ raise errors.DirstateCorrupt(self.state,
+ 'failed to find trailing NULL (\\0).'
+ ' Trailing garbage: %r'
+ % safe_string_from_size(next, extra_len))
+ size[0] = self.cur_cstr - next
+ self.cur_cstr = self.cur_cstr + 1
+ return next
+
+ cdef object get_next_str(self):
+ """Get the next field as a Python string."""
+ cdef int size
+ cdef char *next
+ next = self.get_next(&size)
+ return safe_string_from_size(next, size)
+
+ cdef int _init(self) except -1:
+ """Get the pointer ready.
+
+ This assumes that the dirstate header has already been read, and we
+ already have the dirblock string loaded into memory.
+ This just initializes our memory pointers, etc for parsing of the
+ dirblock string.
+ """
+ cdef char *first
+ cdef int size
+ # The first field should be an empty string left over from the Header
+ first = self.get_next(&size)
+ if first[0] != c'\0' and size == 0:
+ raise AssertionError('First character should be null not: %s'
+ % (first,))
+ return 0
+
+ cdef object _get_entry(self, int num_trees, void **p_current_dirname,
+ int *new_block):
+ """Extract the next entry.
+
+ This parses the next entry based on the current location in
+ ``self.cur_cstr``.
+ Each entry can be considered a "row" in the total table. And each row
+ has a fixed number of columns. It is generally broken up into "key"
+ columns, then "current" columns, and then "parent" columns.
+
+ :param num_trees: How many parent trees need to be parsed
+ :param p_current_dirname: A pointer to the current PyString
+ representing the directory name.
+ We pass this in as a void * so that pyrex doesn't have to
+ increment/decrement the PyObject reference counter for each
+ _get_entry call.
+ We use a pointer so that _get_entry can update it with the new
+ value.
+ :param new_block: This is to let the caller know that it needs to
+ create a new directory block to store the next entry.
+ """
+ cdef StaticTuple path_name_file_id_key
+ cdef StaticTuple tmp
+ cdef char *entry_size_cstr
+ cdef unsigned long int entry_size
+ cdef char* executable_cstr
+ cdef int is_executable
+ cdef char* dirname_cstr
+ cdef char* trailing
+ cdef int cur_size
+ cdef int i
+ cdef object minikind
+ cdef object fingerprint
+ cdef object info
+
+ # Read the 'key' information (dirname, name, file_id)
+ dirname_cstr = self.get_next(&cur_size)
+ # Check to see if we have started a new directory block.
+ # If so, then we need to create a new dirname PyString, so that it can
+ # be used in all of the tuples. This saves time and memory, by re-using
+ # the same object repeatedly.
+
+ # Do the cheap 'length of string' check first. If the string is a
+ # different length, then we *have* to be a different directory.
+ if (cur_size != PyString_GET_SIZE_void(p_current_dirname[0])
+ or strncmp(dirname_cstr,
+ # Extract the char* from our current dirname string. We
+ # know it is a PyString, so we can use
+ # PyString_AS_STRING, we use the _void version because
+ # we are tricking Pyrex by using a void* rather than an
+ # <object>
+ PyString_AS_STRING_void(p_current_dirname[0]),
+ cur_size+1) != 0):
+ dirname = safe_string_from_size(dirname_cstr, cur_size)
+ p_current_dirname[0] = <void*>dirname
+ new_block[0] = 1
+ else:
+ new_block[0] = 0
+
+ # Build up the key that will be used.
+ # By using <object>(void *) Pyrex will automatically handle the
+ # Py_INCREF that we need.
+ cur_dirname = <object>p_current_dirname[0]
+ # Use StaticTuple_New to pre-allocate, rather than creating a regular
+ # tuple and passing it to the StaticTuple constructor.
+ # path_name_file_id_key = StaticTuple(<object>p_current_dirname[0],
+ # self.get_next_str(),
+ # self.get_next_str(),
+ # )
+ tmp = StaticTuple_New(3)
+ Py_INCREF(cur_dirname); StaticTuple_SET_ITEM(tmp, 0, cur_dirname)
+ cur_basename = self.get_next_str()
+ cur_file_id = self.get_next_str()
+ Py_INCREF(cur_basename); StaticTuple_SET_ITEM(tmp, 1, cur_basename)
+ Py_INCREF(cur_file_id); StaticTuple_SET_ITEM(tmp, 2, cur_file_id)
+ path_name_file_id_key = tmp
+
+ # Parse all of the per-tree information. current has the information in
+ # the same location as parent trees. The only difference is that 'info'
+ # is a 'packed_stat' for current, while it is a 'revision_id' for
+ # parent trees.
+ # minikind, fingerprint, and info will be returned as regular python
+ # strings
+ # entry_size and is_executable will be parsed into a python Long and
+ # python Boolean, respectively.
+ # TODO: jam 20070718 Consider changin the entry_size conversion to
+ # prefer python Int when possible. They are generally faster to
+ # work with, and it will be rare that we have a file >2GB.
+ # Especially since this code is pretty much fixed at a max of
+ # 4GB.
+ trees = []
+ for i from 0 <= i < num_trees:
+ minikind = self.get_next_str()
+ fingerprint = self.get_next_str()
+ entry_size_cstr = self.get_next(&cur_size)
+ entry_size = strtoul(entry_size_cstr, NULL, 10)
+ executable_cstr = self.get_next(&cur_size)
+ is_executable = (executable_cstr[0] == c'y')
+ info = self.get_next_str()
+ # TODO: If we want to use StaticTuple_New here we need to be pretty
+ # careful. We are relying on a bit of Pyrex
+ # automatic-conversion from 'int' to PyInt, and that doesn't
+ # play well with the StaticTuple_SET_ITEM macro.
+ # Timing doesn't (yet) show a worthwile improvement in speed
+ # versus complexity and maintainability.
+ # tmp = StaticTuple_New(5)
+ # Py_INCREF(minikind); StaticTuple_SET_ITEM(tmp, 0, minikind)
+ # Py_INCREF(fingerprint); StaticTuple_SET_ITEM(tmp, 1, fingerprint)
+ # Py_INCREF(entry_size); StaticTuple_SET_ITEM(tmp, 2, entry_size)
+ # Py_INCREF(is_executable); StaticTuple_SET_ITEM(tmp, 3, is_executable)
+ # Py_INCREF(info); StaticTuple_SET_ITEM(tmp, 4, info)
+ # PyList_Append(trees, tmp)
+ PyList_Append(trees, StaticTuple(
+ minikind, # minikind
+ fingerprint, # fingerprint
+ entry_size, # size
+ is_executable,# executable
+ info, # packed_stat or revision_id
+ ))
+
+ # The returned tuple is (key, [trees])
+ ret = (path_name_file_id_key, trees)
+ # Ignore the trailing newline, but assert that it does exist, this
+ # ensures that we always finish parsing a line on an end-of-entry
+ # marker.
+ trailing = self.get_next(&cur_size)
+ if cur_size != 1 or trailing[0] != c'\n':
+ raise errors.DirstateCorrupt(self.state,
+ 'Bad parse, we expected to end on \\n, not: %d %s: %s'
+ % (cur_size, safe_string_from_size(trailing, cur_size),
+ ret))
+ return ret
+
+ def _parse_dirblocks(self):
+ """Parse all dirblocks in the state file."""
+ cdef int num_trees
+ cdef object current_block
+ cdef object entry
+ cdef void * current_dirname
+ cdef int new_block
+ cdef int expected_entry_count
+ cdef int entry_count
+
+ num_trees = self.state._num_present_parents() + 1
+ expected_entry_count = self.state._num_entries
+
+ # Ignore the first record
+ self._init()
+
+ current_block = []
+ dirblocks = [('', current_block), ('', [])]
+ self.state._dirblocks = dirblocks
+ obj = ''
+ current_dirname = <void*>obj
+ new_block = 0
+ entry_count = 0
+
+ # TODO: jam 2007-05-07 Consider pre-allocating some space for the
+ # members, and then growing and shrinking from there. If most
+ # directories have close to 10 entries in them, it would save a
+ # few mallocs if we default our list size to something
+ # reasonable. Or we could malloc it to something large (100 or
+ # so), and then truncate. That would give us a malloc + realloc,
+ # rather than lots of reallocs.
+ while self.cur_cstr < self.end_cstr:
+ entry = self._get_entry(num_trees, &current_dirname, &new_block)
+ if new_block:
+ # new block - different dirname
+ current_block = []
+ PyList_Append(dirblocks,
+ (<object>current_dirname, current_block))
+ PyList_Append(current_block, entry)
+ entry_count = entry_count + 1
+ if entry_count != expected_entry_count:
+ raise errors.DirstateCorrupt(self.state,
+ 'We read the wrong number of entries.'
+ ' We expected to read %s, but read %s'
+ % (expected_entry_count, entry_count))
+ self.state._split_root_dirblock_into_contents()
+
+
+def _read_dirblocks(state):
+ """Read in the dirblocks for the given DirState object.
+
+ This is tightly bound to the DirState internal representation. It should be
+ thought of as a member function, which is only separated out so that we can
+ re-write it in pyrex.
+
+ :param state: A DirState object.
+ :return: None
+ :postcondition: The dirblocks will be loaded into the appropriate fields in
+ the DirState object.
+ """
+ state._state_file.seek(state._end_of_header)
+ text = state._state_file.read()
+ # TODO: check the crc checksums. crc_measured = zlib.crc32(text)
+
+ reader = Reader(text, state)
+
+ reader._parse_dirblocks()
+ state._dirblock_state = DirState.IN_MEMORY_UNMODIFIED
+
+
+cdef int minikind_from_mode(int mode): # cannot_raise
+ # in order of frequency:
+ if S_ISREG(mode):
+ return c"f"
+ if S_ISDIR(mode):
+ return c"d"
+ if S_ISLNK(mode):
+ return c"l"
+ return 0
+
+
+_encode = binascii.b2a_base64
+
+
+cdef _pack_stat(stat_value):
+ """return a string representing the stat value's key fields.
+
+ :param stat_value: A stat oject with st_size, st_mtime, st_ctime, st_dev,
+ st_ino and st_mode fields.
+ """
+ cdef char result[6*4] # 6 long ints
+ cdef int *aliased
+ aliased = <int *>result
+ aliased[0] = htonl(PyInt_AsUnsignedLongMask(stat_value.st_size))
+ # mtime and ctime will often be floats but get converted to PyInt within
+ aliased[1] = htonl(PyInt_AsUnsignedLongMask(stat_value.st_mtime))
+ aliased[2] = htonl(PyInt_AsUnsignedLongMask(stat_value.st_ctime))
+ aliased[3] = htonl(PyInt_AsUnsignedLongMask(stat_value.st_dev))
+ aliased[4] = htonl(PyInt_AsUnsignedLongMask(stat_value.st_ino))
+ aliased[5] = htonl(PyInt_AsUnsignedLongMask(stat_value.st_mode))
+ packed = PyString_FromStringAndSize(result, 6*4)
+ return _encode(packed)[:-1]
+
+
+def pack_stat(stat_value):
+ """Convert stat value into a packed representation quickly with pyrex"""
+ return _pack_stat(stat_value)
+
+
+def update_entry(self, entry, abspath, stat_value):
+ """Update the entry based on what is actually on disk.
+
+ This function only calculates the sha if it needs to - if the entry is
+ uncachable, or clearly different to the first parent's entry, no sha
+ is calculated, and None is returned.
+
+ :param entry: This is the dirblock entry for the file in question.
+ :param abspath: The path on disk for this file.
+ :param stat_value: (optional) if we already have done a stat on the
+ file, re-use it.
+ :return: None, or The sha1 hexdigest of the file (40 bytes) or link
+ target of a symlink.
+ """
+ return _update_entry(self, entry, abspath, stat_value)
+
+
+cdef _update_entry(self, entry, abspath, stat_value):
+ """Update the entry based on what is actually on disk.
+
+ This function only calculates the sha if it needs to - if the entry is
+ uncachable, or clearly different to the first parent's entry, no sha
+ is calculated, and None is returned.
+
+ :param self: The dirstate object this is operating on.
+ :param entry: This is the dirblock entry for the file in question.
+ :param abspath: The path on disk for this file.
+ :param stat_value: The stat value done on the path.
+ :return: None, or The sha1 hexdigest of the file (40 bytes) or link
+ target of a symlink.
+ """
+ # TODO - require pyrex 0.9.8, then use a pyd file to define access to the
+ # _st mode of the compiled stat objects.
+ cdef int minikind, saved_minikind
+ cdef void * details
+ cdef int worth_saving
+ minikind = minikind_from_mode(stat_value.st_mode)
+ if 0 == minikind:
+ return None
+ packed_stat = _pack_stat(stat_value)
+ details = PyList_GetItem_void_void(PyTuple_GetItem_void_void(<void *>entry, 1), 0)
+ saved_minikind = PyString_AsString_obj(<PyObject *>PyTuple_GetItem_void_void(details, 0))[0]
+ if minikind == c'd' and saved_minikind == c't':
+ minikind = c't'
+ saved_link_or_sha1 = PyTuple_GetItem_void_object(details, 1)
+ saved_file_size = PyTuple_GetItem_void_object(details, 2)
+ saved_executable = PyTuple_GetItem_void_object(details, 3)
+ saved_packed_stat = PyTuple_GetItem_void_object(details, 4)
+ # Deal with pyrex decrefing the objects
+ Py_INCREF(saved_link_or_sha1)
+ Py_INCREF(saved_file_size)
+ Py_INCREF(saved_executable)
+ Py_INCREF(saved_packed_stat)
+ #(saved_minikind, saved_link_or_sha1, saved_file_size,
+ # saved_executable, saved_packed_stat) = entry[1][0]
+
+ if (minikind == saved_minikind
+ and packed_stat == saved_packed_stat):
+ # The stat hasn't changed since we saved, so we can re-use the
+ # saved sha hash.
+ if minikind == c'd':
+ return None
+
+ # size should also be in packed_stat
+ if saved_file_size == stat_value.st_size:
+ return saved_link_or_sha1
+
+ # If we have gotten this far, that means that we need to actually
+ # process this entry.
+ link_or_sha1 = None
+ worth_saving = 1
+ if minikind == c'f':
+ executable = self._is_executable(stat_value.st_mode,
+ saved_executable)
+ if self._cutoff_time is None:
+ self._sha_cutoff_time()
+ if (stat_value.st_mtime < self._cutoff_time
+ and stat_value.st_ctime < self._cutoff_time
+ and len(entry[1]) > 1
+ and entry[1][1][0] != 'a'):
+ # Could check for size changes for further optimised
+ # avoidance of sha1's. However the most prominent case of
+ # over-shaing is during initial add, which this catches.
+ link_or_sha1 = self._sha1_file(abspath)
+ entry[1][0] = ('f', link_or_sha1, stat_value.st_size,
+ executable, packed_stat)
+ else:
+ # This file is not worth caching the sha1. Either it is too new, or
+ # it is newly added. Regardless, the only things we are changing
+ # are derived from the stat, and so are not worth caching. So we do
+ # *not* set the IN_MEMORY_MODIFIED flag. (But we'll save the
+ # updated values if there is *other* data worth saving.)
+ entry[1][0] = ('f', '', stat_value.st_size, executable,
+ DirState.NULLSTAT)
+ worth_saving = 0
+ elif minikind == c'd':
+ entry[1][0] = ('d', '', 0, False, packed_stat)
+ if saved_minikind != c'd':
+ # This changed from something into a directory. Make sure we
+ # have a directory block for it. This doesn't happen very
+ # often, so this doesn't have to be super fast.
+ block_index, entry_index, dir_present, file_present = \
+ self._get_block_entry_index(entry[0][0], entry[0][1], 0)
+ self._ensure_block(block_index, entry_index,
+ pathjoin(entry[0][0], entry[0][1]))
+ else:
+ # Any changes are derived trivially from the stat object, not worth
+ # re-writing a dirstate for just this
+ worth_saving = 0
+ elif minikind == c'l':
+ if saved_minikind == c'l':
+ # If the object hasn't changed kind, it isn't worth saving the
+ # dirstate just for a symlink. The default is 'fast symlinks' which
+ # save the target in the inode entry, rather than separately. So to
+ # stat, we've already read everything off disk.
+ worth_saving = 0
+ link_or_sha1 = self._read_link(abspath, saved_link_or_sha1)
+ if self._cutoff_time is None:
+ self._sha_cutoff_time()
+ if (stat_value.st_mtime < self._cutoff_time
+ and stat_value.st_ctime < self._cutoff_time):
+ entry[1][0] = ('l', link_or_sha1, stat_value.st_size,
+ False, packed_stat)
+ else:
+ entry[1][0] = ('l', '', stat_value.st_size,
+ False, DirState.NULLSTAT)
+ if worth_saving:
+ # Note, even though _mark_modified will only set
+ # IN_MEMORY_HASH_MODIFIED, it still isn't worth
+ self._mark_modified([entry])
+ return link_or_sha1
+
+
+# TODO: Do we want to worry about exceptions here?
+cdef char _minikind_from_string(object string) except? -1:
+ """Convert a python string to a char."""
+ return PyString_AsString(string)[0]
+
+
+cdef object _kind_absent
+cdef object _kind_file
+cdef object _kind_directory
+cdef object _kind_symlink
+cdef object _kind_relocated
+cdef object _kind_tree_reference
+_kind_absent = "absent"
+_kind_file = "file"
+_kind_directory = "directory"
+_kind_symlink = "symlink"
+_kind_relocated = "relocated"
+_kind_tree_reference = "tree-reference"
+
+
+cdef object _minikind_to_kind(char minikind):
+ """Create a string kind for minikind."""
+ cdef char _minikind[1]
+ if minikind == c'f':
+ return _kind_file
+ elif minikind == c'd':
+ return _kind_directory
+ elif minikind == c'a':
+ return _kind_absent
+ elif minikind == c'r':
+ return _kind_relocated
+ elif minikind == c'l':
+ return _kind_symlink
+ elif minikind == c't':
+ return _kind_tree_reference
+ _minikind[0] = minikind
+ raise KeyError(PyString_FromStringAndSize(_minikind, 1))
+
+
+cdef int _versioned_minikind(char minikind): # cannot_raise
+ """Return non-zero if minikind is in fltd"""
+ return (minikind == c'f' or
+ minikind == c'd' or
+ minikind == c'l' or
+ minikind == c't')
+
+
+cdef class ProcessEntryC:
+
+ cdef int doing_consistency_expansion
+ cdef object old_dirname_to_file_id # dict
+ cdef object new_dirname_to_file_id # dict
+ cdef object last_source_parent
+ cdef object last_target_parent
+ cdef int include_unchanged
+ cdef int partial
+ cdef object use_filesystem_for_exec
+ cdef object utf8_decode
+ cdef readonly object searched_specific_files
+ cdef readonly object searched_exact_paths
+ cdef object search_specific_files
+ # The parents up to the root of the paths we are searching.
+ # After all normal paths are returned, these specific items are returned.
+ cdef object search_specific_file_parents
+ cdef object state
+ # Current iteration variables:
+ cdef object current_root
+ cdef object current_root_unicode
+ cdef object root_entries
+ cdef int root_entries_pos, root_entries_len
+ cdef object root_abspath
+ cdef int source_index, target_index
+ cdef int want_unversioned
+ cdef object tree
+ cdef object dir_iterator
+ cdef int block_index
+ cdef object current_block
+ cdef int current_block_pos
+ cdef object current_block_list
+ cdef object current_dir_info
+ cdef object current_dir_list
+ cdef object _pending_consistent_entries # list
+ cdef int path_index
+ cdef object root_dir_info
+ cdef object bisect_left
+ cdef object pathjoin
+ cdef object fstat
+ # A set of the ids we've output when doing partial output.
+ cdef object seen_ids
+ cdef object sha_file
+
+ def __init__(self, include_unchanged, use_filesystem_for_exec,
+ search_specific_files, state, source_index, target_index,
+ want_unversioned, tree):
+ self.doing_consistency_expansion = 0
+ self.old_dirname_to_file_id = {}
+ self.new_dirname_to_file_id = {}
+ # Are we doing a partial iter_changes?
+ self.partial = set(['']).__ne__(search_specific_files)
+ # Using a list so that we can access the values and change them in
+ # nested scope. Each one is [path, file_id, entry]
+ self.last_source_parent = [None, None]
+ self.last_target_parent = [None, None]
+ if include_unchanged is None:
+ self.include_unchanged = False
+ else:
+ self.include_unchanged = int(include_unchanged)
+ self.use_filesystem_for_exec = use_filesystem_for_exec
+ self.utf8_decode = cache_utf8._utf8_decode
+ # for all search_indexs in each path at or under each element of
+ # search_specific_files, if the detail is relocated: add the id, and
+ # add the relocated path as one to search if its not searched already.
+ # If the detail is not relocated, add the id.
+ self.searched_specific_files = set()
+ # When we search exact paths without expanding downwards, we record
+ # that here.
+ self.searched_exact_paths = set()
+ self.search_specific_files = search_specific_files
+ # The parents up to the root of the paths we are searching.
+ # After all normal paths are returned, these specific items are returned.
+ self.search_specific_file_parents = set()
+ # The ids we've sent out in the delta.
+ self.seen_ids = set()
+ self.state = state
+ self.current_root = None
+ self.current_root_unicode = None
+ self.root_entries = None
+ self.root_entries_pos = 0
+ self.root_entries_len = 0
+ self.root_abspath = None
+ if source_index is None:
+ self.source_index = -1
+ else:
+ self.source_index = source_index
+ self.target_index = target_index
+ self.want_unversioned = want_unversioned
+ self.tree = tree
+ self.dir_iterator = None
+ self.block_index = -1
+ self.current_block = None
+ self.current_block_list = None
+ self.current_block_pos = -1
+ self.current_dir_info = None
+ self.current_dir_list = None
+ self._pending_consistent_entries = []
+ self.path_index = 0
+ self.root_dir_info = None
+ self.bisect_left = bisect.bisect_left
+ self.pathjoin = osutils.pathjoin
+ self.fstat = os.fstat
+ self.sha_file = osutils.sha_file
+ if target_index != 0:
+ # A lot of code in here depends on target_index == 0
+ raise errors.BzrError('unsupported target index')
+
+ cdef _process_entry(self, entry, path_info):
+ """Compare an entry and real disk to generate delta information.
+
+ :param path_info: top_relpath, basename, kind, lstat, abspath for
+ the path of entry. If None, then the path is considered absent in
+ the target (Perhaps we should pass in a concrete entry for this ?)
+ Basename is returned as a utf8 string because we expect this
+ tuple will be ignored, and don't want to take the time to
+ decode.
+ :return: (iter_changes_result, changed). If the entry has not been
+ handled then changed is None. Otherwise it is False if no content
+ or metadata changes have occured, and True if any content or
+ metadata change has occurred. If self.include_unchanged is True then
+ if changed is not None, iter_changes_result will always be a result
+ tuple. Otherwise, iter_changes_result is None unless changed is
+ True.
+ """
+ cdef char target_minikind
+ cdef char source_minikind
+ cdef object file_id
+ cdef int content_change
+ cdef object details_list
+ file_id = None
+ details_list = entry[1]
+ if -1 == self.source_index:
+ source_details = DirState.NULL_PARENT_DETAILS
+ else:
+ source_details = details_list[self.source_index]
+ target_details = details_list[self.target_index]
+ target_minikind = _minikind_from_string(target_details[0])
+ if path_info is not None and _versioned_minikind(target_minikind):
+ if self.target_index != 0:
+ raise AssertionError("Unsupported target index %d" %
+ self.target_index)
+ link_or_sha1 = _update_entry(self.state, entry, path_info[4], path_info[3])
+ # The entry may have been modified by update_entry
+ target_details = details_list[self.target_index]
+ target_minikind = _minikind_from_string(target_details[0])
+ else:
+ link_or_sha1 = None
+ # the rest of this function is 0.3 seconds on 50K paths, or
+ # 0.000006 seconds per call.
+ source_minikind = _minikind_from_string(source_details[0])
+ if ((_versioned_minikind(source_minikind) or source_minikind == c'r')
+ and _versioned_minikind(target_minikind)):
+ # claimed content in both: diff
+ # r | fdlt | | add source to search, add id path move and perform
+ # | | | diff check on source-target
+ # r | fdlt | a | dangling file that was present in the basis.
+ # | | | ???
+ if source_minikind != c'r':
+ old_dirname = entry[0][0]
+ old_basename = entry[0][1]
+ old_path = path = None
+ else:
+ # add the source to the search path to find any children it
+ # has. TODO ? : only add if it is a container ?
+ if (not self.doing_consistency_expansion and
+ not osutils.is_inside_any(self.searched_specific_files,
+ source_details[1])):
+ self.search_specific_files.add(source_details[1])
+ # expanding from a user requested path, parent expansion
+ # for delta consistency happens later.
+ # generate the old path; this is needed for stating later
+ # as well.
+ old_path = source_details[1]
+ old_dirname, old_basename = os.path.split(old_path)
+ path = self.pathjoin(entry[0][0], entry[0][1])
+ old_entry = self.state._get_entry(self.source_index,
+ path_utf8=old_path)
+ # update the source details variable to be the real
+ # location.
+ if old_entry == (None, None):
+ raise errors.CorruptDirstate(self.state._filename,
+ "entry '%s/%s' is considered renamed from %r"
+ " but source does not exist\n"
+ "entry: %s" % (entry[0][0], entry[0][1], old_path, entry))
+ source_details = old_entry[1][self.source_index]
+ source_minikind = _minikind_from_string(source_details[0])
+ if path_info is None:
+ # the file is missing on disk, show as removed.
+ content_change = 1
+ target_kind = None
+ target_exec = False
+ else:
+ # source and target are both versioned and disk file is present.
+ target_kind = path_info[2]
+ if target_kind == 'directory':
+ if path is None:
+ old_path = path = self.pathjoin(old_dirname, old_basename)
+ file_id = entry[0][2]
+ self.new_dirname_to_file_id[path] = file_id
+ if source_minikind != c'd':
+ content_change = 1
+ else:
+ # directories have no fingerprint
+ content_change = 0
+ target_exec = False
+ elif target_kind == 'file':
+ if source_minikind != c'f':
+ content_change = 1
+ else:
+ # Check the sha. We can't just rely on the size as
+ # content filtering may mean differ sizes actually
+ # map to the same content
+ if link_or_sha1 is None:
+ # Stat cache miss:
+ statvalue, link_or_sha1 = \
+ self.state._sha1_provider.stat_and_sha1(
+ path_info[4])
+ self.state._observed_sha1(entry, link_or_sha1,
+ statvalue)
+ content_change = (link_or_sha1 != source_details[1])
+ # Target details is updated at update_entry time
+ if self.use_filesystem_for_exec:
+ # We don't need S_ISREG here, because we are sure
+ # we are dealing with a file.
+ target_exec = bool(S_IXUSR & path_info[3].st_mode)
+ else:
+ target_exec = target_details[3]
+ elif target_kind == 'symlink':
+ if source_minikind != c'l':
+ content_change = 1
+ else:
+ content_change = (link_or_sha1 != source_details[1])
+ target_exec = False
+ elif target_kind == 'tree-reference':
+ if source_minikind != c't':
+ content_change = 1
+ else:
+ content_change = 0
+ target_exec = False
+ else:
+ if path is None:
+ path = self.pathjoin(old_dirname, old_basename)
+ raise errors.BadFileKindError(path, path_info[2])
+ if source_minikind == c'd':
+ if path is None:
+ old_path = path = self.pathjoin(old_dirname, old_basename)
+ if file_id is None:
+ file_id = entry[0][2]
+ self.old_dirname_to_file_id[old_path] = file_id
+ # parent id is the entry for the path in the target tree
+ if old_basename and old_dirname == self.last_source_parent[0]:
+ # use a cached hit for non-root source entries.
+ source_parent_id = self.last_source_parent[1]
+ else:
+ try:
+ source_parent_id = self.old_dirname_to_file_id[old_dirname]
+ except KeyError, _:
+ source_parent_entry = self.state._get_entry(self.source_index,
+ path_utf8=old_dirname)
+ source_parent_id = source_parent_entry[0][2]
+ if source_parent_id == entry[0][2]:
+ # This is the root, so the parent is None
+ source_parent_id = None
+ else:
+ self.last_source_parent[0] = old_dirname
+ self.last_source_parent[1] = source_parent_id
+ new_dirname = entry[0][0]
+ if entry[0][1] and new_dirname == self.last_target_parent[0]:
+ # use a cached hit for non-root target entries.
+ target_parent_id = self.last_target_parent[1]
+ else:
+ try:
+ target_parent_id = self.new_dirname_to_file_id[new_dirname]
+ except KeyError, _:
+ # TODO: We don't always need to do the lookup, because the
+ # parent entry will be the same as the source entry.
+ target_parent_entry = self.state._get_entry(self.target_index,
+ path_utf8=new_dirname)
+ if target_parent_entry == (None, None):
+ raise AssertionError(
+ "Could not find target parent in wt: %s\nparent of: %s"
+ % (new_dirname, entry))
+ target_parent_id = target_parent_entry[0][2]
+ if target_parent_id == entry[0][2]:
+ # This is the root, so the parent is None
+ target_parent_id = None
+ else:
+ self.last_target_parent[0] = new_dirname
+ self.last_target_parent[1] = target_parent_id
+
+ source_exec = source_details[3]
+ changed = (content_change
+ or source_parent_id != target_parent_id
+ or old_basename != entry[0][1]
+ or source_exec != target_exec
+ )
+ if not changed and not self.include_unchanged:
+ return None, False
+ else:
+ if old_path is None:
+ path = self.pathjoin(old_dirname, old_basename)
+ old_path = path
+ old_path_u = self.utf8_decode(old_path)[0]
+ path_u = old_path_u
+ else:
+ old_path_u = self.utf8_decode(old_path)[0]
+ if old_path == path:
+ path_u = old_path_u
+ else:
+ path_u = self.utf8_decode(path)[0]
+ source_kind = _minikind_to_kind(source_minikind)
+ return (entry[0][2],
+ (old_path_u, path_u),
+ content_change,
+ (True, True),
+ (source_parent_id, target_parent_id),
+ (self.utf8_decode(old_basename)[0], self.utf8_decode(entry[0][1])[0]),
+ (source_kind, target_kind),
+ (source_exec, target_exec)), changed
+ elif source_minikind == c'a' and _versioned_minikind(target_minikind):
+ # looks like a new file
+ path = self.pathjoin(entry[0][0], entry[0][1])
+ # parent id is the entry for the path in the target tree
+ # TODO: these are the same for an entire directory: cache em.
+ parent_entry = self.state._get_entry(self.target_index,
+ path_utf8=entry[0][0])
+ if parent_entry is None:
+ raise errors.DirstateCorrupt(self.state,
+ "We could not find the parent entry in index %d"
+ " for the entry: %s"
+ % (self.target_index, entry[0]))
+ parent_id = parent_entry[0][2]
+ if parent_id == entry[0][2]:
+ parent_id = None
+ if path_info is not None:
+ # Present on disk:
+ if self.use_filesystem_for_exec:
+ # We need S_ISREG here, because we aren't sure if this
+ # is a file or not.
+ target_exec = bool(
+ S_ISREG(path_info[3].st_mode)
+ and S_IXUSR & path_info[3].st_mode)
+ else:
+ target_exec = target_details[3]
+ return (entry[0][2],
+ (None, self.utf8_decode(path)[0]),
+ True,
+ (False, True),
+ (None, parent_id),
+ (None, self.utf8_decode(entry[0][1])[0]),
+ (None, path_info[2]),
+ (None, target_exec)), True
+ else:
+ # Its a missing file, report it as such.
+ return (entry[0][2],
+ (None, self.utf8_decode(path)[0]),
+ False,
+ (False, True),
+ (None, parent_id),
+ (None, self.utf8_decode(entry[0][1])[0]),
+ (None, None),
+ (None, False)), True
+ elif _versioned_minikind(source_minikind) and target_minikind == c'a':
+ # unversioned, possibly, or possibly not deleted: we dont care.
+ # if its still on disk, *and* theres no other entry at this
+ # path [we dont know this in this routine at the moment -
+ # perhaps we should change this - then it would be an unknown.
+ old_path = self.pathjoin(entry[0][0], entry[0][1])
+ # parent id is the entry for the path in the target tree
+ parent_id = self.state._get_entry(self.source_index, path_utf8=entry[0][0])[0][2]
+ if parent_id == entry[0][2]:
+ parent_id = None
+ return (entry[0][2],
+ (self.utf8_decode(old_path)[0], None),
+ True,
+ (True, False),
+ (parent_id, None),
+ (self.utf8_decode(entry[0][1])[0], None),
+ (_minikind_to_kind(source_minikind), None),
+ (source_details[3], None)), True
+ elif _versioned_minikind(source_minikind) and target_minikind == c'r':
+ # a rename; could be a true rename, or a rename inherited from
+ # a renamed parent. TODO: handle this efficiently. Its not
+ # common case to rename dirs though, so a correct but slow
+ # implementation will do.
+ if (not self.doing_consistency_expansion and
+ not osutils.is_inside_any(self.searched_specific_files,
+ target_details[1])):
+ self.search_specific_files.add(target_details[1])
+ # We don't expand the specific files parents list here as
+ # the path is absent in target and won't create a delta with
+ # missing parent.
+ elif ((source_minikind == c'r' or source_minikind == c'a') and
+ (target_minikind == c'r' or target_minikind == c'a')):
+ # neither of the selected trees contain this path,
+ # so skip over it. This is not currently directly tested, but
+ # is indirectly via test_too_much.TestCommands.test_conflicts.
+ pass
+ else:
+ raise AssertionError("don't know how to compare "
+ "source_minikind=%r, target_minikind=%r"
+ % (source_minikind, target_minikind))
+ ## import pdb;pdb.set_trace()
+ return None, None
+
+ def __iter__(self):
+ return self
+
+ def iter_changes(self):
+ return self
+
+ cdef int _gather_result_for_consistency(self, result) except -1:
+ """Check a result we will yield to make sure we are consistent later.
+
+ This gathers result's parents into a set to output later.
+
+ :param result: A result tuple.
+ """
+ if not self.partial or not result[0]:
+ return 0
+ self.seen_ids.add(result[0])
+ new_path = result[1][1]
+ if new_path:
+ # Not the root and not a delete: queue up the parents of the path.
+ self.search_specific_file_parents.update(
+ osutils.parent_directories(new_path.encode('utf8')))
+ # Add the root directory which parent_directories does not
+ # provide.
+ self.search_specific_file_parents.add('')
+ return 0
+
+ cdef int _update_current_block(self) except -1:
+ if (self.block_index < len(self.state._dirblocks) and
+ osutils.is_inside(self.current_root, self.state._dirblocks[self.block_index][0])):
+ self.current_block = self.state._dirblocks[self.block_index]
+ self.current_block_list = self.current_block[1]
+ self.current_block_pos = 0
+ else:
+ self.current_block = None
+ self.current_block_list = None
+ return 0
+
+ def __next__(self):
+ # Simple thunk to allow tail recursion without pyrex confusion
+ return self._iter_next()
+
+ cdef _iter_next(self):
+ """Iterate over the changes."""
+ # This function single steps through an iterator. As such while loops
+ # are often exited by 'return' - the code is structured so that the
+ # next call into the function will return to the same while loop. Note
+ # that all flow control needed to re-reach that step is reexecuted,
+ # which can be a performance problem. It has not yet been tuned to
+ # minimise this; a state machine is probably the simplest restructuring
+ # to both minimise this overhead and make the code considerably more
+ # understandable.
+
+ # sketch:
+ # compare source_index and target_index at or under each element of search_specific_files.
+ # follow the following comparison table. Note that we only want to do diff operations when
+ # the target is fdl because thats when the walkdirs logic will have exposed the pathinfo
+ # for the target.
+ # cases:
+ #
+ # Source | Target | disk | action
+ # r | fdlt | | add source to search, add id path move and perform
+ # | | | diff check on source-target
+ # r | fdlt | a | dangling file that was present in the basis.
+ # | | | ???
+ # r | a | | add source to search
+ # r | a | a |
+ # r | r | | this path is present in a non-examined tree, skip.
+ # r | r | a | this path is present in a non-examined tree, skip.
+ # a | fdlt | | add new id
+ # a | fdlt | a | dangling locally added file, skip
+ # a | a | | not present in either tree, skip
+ # a | a | a | not present in any tree, skip
+ # a | r | | not present in either tree at this path, skip as it
+ # | | | may not be selected by the users list of paths.
+ # a | r | a | not present in either tree at this path, skip as it
+ # | | | may not be selected by the users list of paths.
+ # fdlt | fdlt | | content in both: diff them
+ # fdlt | fdlt | a | deleted locally, but not unversioned - show as deleted ?
+ # fdlt | a | | unversioned: output deleted id for now
+ # fdlt | a | a | unversioned and deleted: output deleted id
+ # fdlt | r | | relocated in this tree, so add target to search.
+ # | | | Dont diff, we will see an r,fd; pair when we reach
+ # | | | this id at the other path.
+ # fdlt | r | a | relocated in this tree, so add target to search.
+ # | | | Dont diff, we will see an r,fd; pair when we reach
+ # | | | this id at the other path.
+
+ # TODO: jam 20070516 - Avoid the _get_entry lookup overhead by
+ # keeping a cache of directories that we have seen.
+ cdef object current_dirname, current_blockname
+ cdef char * current_dirname_c, * current_blockname_c
+ cdef int advance_entry, advance_path
+ cdef int path_handled
+ searched_specific_files = self.searched_specific_files
+ # Are we walking a root?
+ while self.root_entries_pos < self.root_entries_len:
+ entry = self.root_entries[self.root_entries_pos]
+ self.root_entries_pos = self.root_entries_pos + 1
+ result, changed = self._process_entry(entry, self.root_dir_info)
+ if changed is not None:
+ if changed:
+ self._gather_result_for_consistency(result)
+ if changed or self.include_unchanged:
+ return result
+ # Have we finished the prior root, or never started one ?
+ if self.current_root is None:
+ # TODO: the pending list should be lexically sorted? the
+ # interface doesn't require it.
+ try:
+ self.current_root = self.search_specific_files.pop()
+ except KeyError, _:
+ raise StopIteration()
+ self.searched_specific_files.add(self.current_root)
+ # process the entries for this containing directory: the rest will be
+ # found by their parents recursively.
+ self.root_entries = self.state._entries_for_path(self.current_root)
+ self.root_entries_len = len(self.root_entries)
+ self.current_root_unicode = self.current_root.decode('utf8')
+ self.root_abspath = self.tree.abspath(self.current_root_unicode)
+ try:
+ root_stat = os.lstat(self.root_abspath)
+ except OSError, e:
+ if e.errno == errno.ENOENT:
+ # the path does not exist: let _process_entry know that.
+ self.root_dir_info = None
+ else:
+ # some other random error: hand it up.
+ raise
+ else:
+ self.root_dir_info = ('', self.current_root,
+ osutils.file_kind_from_stat_mode(root_stat.st_mode), root_stat,
+ self.root_abspath)
+ if self.root_dir_info[2] == 'directory':
+ if self.tree._directory_is_tree_reference(
+ self.current_root_unicode):
+ self.root_dir_info = self.root_dir_info[:2] + \
+ ('tree-reference',) + self.root_dir_info[3:]
+ if not self.root_entries and not self.root_dir_info:
+ # this specified path is not present at all, skip it.
+ # (tail recursion, can do a loop once the full structure is
+ # known).
+ return self._iter_next()
+ path_handled = 0
+ self.root_entries_pos = 0
+ # XXX Clarity: This loop is duplicated a out the self.current_root
+ # is None guard above: if we return from it, it completes there
+ # (and the following if block cannot trigger because
+ # path_handled must be true, so the if block is not # duplicated.
+ while self.root_entries_pos < self.root_entries_len:
+ entry = self.root_entries[self.root_entries_pos]
+ self.root_entries_pos = self.root_entries_pos + 1
+ result, changed = self._process_entry(entry, self.root_dir_info)
+ if changed is not None:
+ path_handled = -1
+ if changed:
+ self._gather_result_for_consistency(result)
+ if changed or self.include_unchanged:
+ return result
+ # handle unversioned specified paths:
+ if self.want_unversioned and not path_handled and self.root_dir_info:
+ new_executable = bool(
+ stat.S_ISREG(self.root_dir_info[3].st_mode)
+ and stat.S_IEXEC & self.root_dir_info[3].st_mode)
+ return (None,
+ (None, self.current_root_unicode),
+ True,
+ (False, False),
+ (None, None),
+ (None, splitpath(self.current_root_unicode)[-1]),
+ (None, self.root_dir_info[2]),
+ (None, new_executable)
+ )
+ # If we reach here, the outer flow continues, which enters into the
+ # per-root setup logic.
+ if (self.current_dir_info is None and self.current_block is None and not
+ self.doing_consistency_expansion):
+ # setup iteration of this root:
+ self.current_dir_list = None
+ if self.root_dir_info and self.root_dir_info[2] == 'tree-reference':
+ self.current_dir_info = None
+ else:
+ self.dir_iterator = osutils._walkdirs_utf8(self.root_abspath,
+ prefix=self.current_root)
+ self.path_index = 0
+ try:
+ self.current_dir_info = self.dir_iterator.next()
+ self.current_dir_list = self.current_dir_info[1]
+ except OSError, e:
+ # there may be directories in the inventory even though
+ # this path is not a file on disk: so mark it as end of
+ # iterator
+ if e.errno in (errno.ENOENT, errno.ENOTDIR, errno.EINVAL):
+ self.current_dir_info = None
+ elif sys.platform == 'win32':
+ # on win32, python2.4 has e.errno == ERROR_DIRECTORY, but
+ # python 2.5 has e.errno == EINVAL,
+ # and e.winerror == ERROR_DIRECTORY
+ try:
+ e_winerror = e.winerror
+ except AttributeError, _:
+ e_winerror = None
+ win_errors = (ERROR_DIRECTORY, ERROR_PATH_NOT_FOUND)
+ if (e.errno in win_errors or e_winerror in win_errors):
+ self.current_dir_info = None
+ else:
+ # Will this really raise the right exception ?
+ raise
+ else:
+ raise
+ else:
+ if self.current_dir_info[0][0] == '':
+ # remove .bzr from iteration
+ bzr_index = self.bisect_left(self.current_dir_list, ('.bzr',))
+ if self.current_dir_list[bzr_index][0] != '.bzr':
+ raise AssertionError()
+ del self.current_dir_list[bzr_index]
+ initial_key = (self.current_root, '', '')
+ self.block_index, _ = self.state._find_block_index_from_key(initial_key)
+ if self.block_index == 0:
+ # we have processed the total root already, but because the
+ # initial key matched it we should skip it here.
+ self.block_index = self.block_index + 1
+ self._update_current_block()
+ # walk until both the directory listing and the versioned metadata
+ # are exhausted.
+ while (self.current_dir_info is not None
+ or self.current_block is not None):
+ # Uncommon case - a missing directory or an unversioned directory:
+ if (self.current_dir_info and self.current_block
+ and self.current_dir_info[0][0] != self.current_block[0]):
+ # Work around pyrex broken heuristic - current_dirname has
+ # the same scope as current_dirname_c
+ current_dirname = self.current_dir_info[0][0]
+ current_dirname_c = PyString_AS_STRING_void(
+ <void *>current_dirname)
+ current_blockname = self.current_block[0]
+ current_blockname_c = PyString_AS_STRING_void(
+ <void *>current_blockname)
+ # In the python generator we evaluate this if block once per
+ # dir+block; because we reenter in the pyrex version its being
+ # evaluated once per path: we could cache the result before
+ # doing the while loop and probably save time.
+ if _cmp_by_dirs(current_dirname_c,
+ PyString_Size(current_dirname),
+ current_blockname_c,
+ PyString_Size(current_blockname)) < 0:
+ # filesystem data refers to paths not covered by the
+ # dirblock. this has two possibilities:
+ # A) it is versioned but empty, so there is no block for it
+ # B) it is not versioned.
+
+ # if (A) then we need to recurse into it to check for
+ # new unknown files or directories.
+ # if (B) then we should ignore it, because we don't
+ # recurse into unknown directories.
+ # We are doing a loop
+ while self.path_index < len(self.current_dir_list):
+ current_path_info = self.current_dir_list[self.path_index]
+ # dont descend into this unversioned path if it is
+ # a dir
+ if current_path_info[2] in ('directory',
+ 'tree-reference'):
+ del self.current_dir_list[self.path_index]
+ self.path_index = self.path_index - 1
+ self.path_index = self.path_index + 1
+ if self.want_unversioned:
+ if current_path_info[2] == 'directory':
+ if self.tree._directory_is_tree_reference(
+ self.utf8_decode(current_path_info[0])[0]):
+ current_path_info = current_path_info[:2] + \
+ ('tree-reference',) + current_path_info[3:]
+ new_executable = bool(
+ stat.S_ISREG(current_path_info[3].st_mode)
+ and stat.S_IEXEC & current_path_info[3].st_mode)
+ return (None,
+ (None, self.utf8_decode(current_path_info[0])[0]),
+ True,
+ (False, False),
+ (None, None),
+ (None, self.utf8_decode(current_path_info[1])[0]),
+ (None, current_path_info[2]),
+ (None, new_executable))
+ # This dir info has been handled, go to the next
+ self.path_index = 0
+ self.current_dir_list = None
+ try:
+ self.current_dir_info = self.dir_iterator.next()
+ self.current_dir_list = self.current_dir_info[1]
+ except StopIteration, _:
+ self.current_dir_info = None
+ else: #(dircmp > 0)
+ # We have a dirblock entry for this location, but there
+ # is no filesystem path for this. This is most likely
+ # because a directory was removed from the disk.
+ # We don't have to report the missing directory,
+ # because that should have already been handled, but we
+ # need to handle all of the files that are contained
+ # within.
+ while self.current_block_pos < len(self.current_block_list):
+ current_entry = self.current_block_list[self.current_block_pos]
+ self.current_block_pos = self.current_block_pos + 1
+ # entry referring to file not present on disk.
+ # advance the entry only, after processing.
+ result, changed = self._process_entry(current_entry, None)
+ if changed is not None:
+ if changed:
+ self._gather_result_for_consistency(result)
+ if changed or self.include_unchanged:
+ return result
+ self.block_index = self.block_index + 1
+ self._update_current_block()
+ continue # next loop-on-block/dir
+ result = self._loop_one_block()
+ if result is not None:
+ return result
+ if len(self.search_specific_files):
+ # More supplied paths to process
+ self.current_root = None
+ return self._iter_next()
+ # Start expanding more conservatively, adding paths the user may not
+ # have intended but required for consistent deltas.
+ self.doing_consistency_expansion = 1
+ if not self._pending_consistent_entries:
+ self._pending_consistent_entries = self._next_consistent_entries()
+ while self._pending_consistent_entries:
+ result, changed = self._pending_consistent_entries.pop()
+ if changed is not None:
+ return result
+ raise StopIteration()
+
+ cdef object _maybe_tree_ref(self, current_path_info):
+ if self.tree._directory_is_tree_reference(
+ self.utf8_decode(current_path_info[0])[0]):
+ return current_path_info[:2] + \
+ ('tree-reference',) + current_path_info[3:]
+ else:
+ return current_path_info
+
+ cdef object _loop_one_block(self):
+ # current_dir_info and current_block refer to the same directory -
+ # this is the common case code.
+ # Assign local variables for current path and entry:
+ cdef object current_entry
+ cdef object current_path_info
+ cdef int path_handled
+ cdef char minikind
+ cdef int cmp_result
+ # cdef char * temp_str
+ # cdef Py_ssize_t temp_str_length
+ # PyString_AsStringAndSize(disk_kind, &temp_str, &temp_str_length)
+ # if not strncmp(temp_str, "directory", temp_str_length):
+ if (self.current_block is not None and
+ self.current_block_pos < PyList_GET_SIZE(self.current_block_list)):
+ current_entry = PyList_GET_ITEM(self.current_block_list,
+ self.current_block_pos)
+ # accomodate pyrex
+ Py_INCREF(current_entry)
+ else:
+ current_entry = None
+ if (self.current_dir_info is not None and
+ self.path_index < PyList_GET_SIZE(self.current_dir_list)):
+ current_path_info = PyList_GET_ITEM(self.current_dir_list,
+ self.path_index)
+ # accomodate pyrex
+ Py_INCREF(current_path_info)
+ disk_kind = PyTuple_GET_ITEM(current_path_info, 2)
+ # accomodate pyrex
+ Py_INCREF(disk_kind)
+ if disk_kind == "directory":
+ current_path_info = self._maybe_tree_ref(current_path_info)
+ else:
+ current_path_info = None
+ while (current_entry is not None or current_path_info is not None):
+ advance_entry = -1
+ advance_path = -1
+ result = None
+ changed = None
+ path_handled = 0
+ if current_entry is None:
+ # unversioned - the check for path_handled when the path
+ # is advanced will yield this path if needed.
+ pass
+ elif current_path_info is None:
+ # no path is fine: the per entry code will handle it.
+ result, changed = self._process_entry(current_entry,
+ current_path_info)
+ else:
+ minikind = _minikind_from_string(
+ current_entry[1][self.target_index][0])
+ cmp_result = cmp(current_path_info[1], current_entry[0][1])
+ if (cmp_result or minikind == c'a' or minikind == c'r'):
+ # The current path on disk doesn't match the dirblock
+ # record. Either the dirblock record is marked as
+ # absent/renamed, or the file on disk is not present at all
+ # in the dirblock. Either way, report about the dirblock
+ # entry, and let other code handle the filesystem one.
+
+ # Compare the basename for these files to determine
+ # which comes first
+ if cmp_result < 0:
+ # extra file on disk: pass for now, but only
+ # increment the path, not the entry
+ advance_entry = 0
+ else:
+ # entry referring to file not present on disk.
+ # advance the entry only, after processing.
+ result, changed = self._process_entry(current_entry,
+ None)
+ advance_path = 0
+ else:
+ # paths are the same,and the dirstate entry is not
+ # absent or renamed.
+ result, changed = self._process_entry(current_entry,
+ current_path_info)
+ if changed is not None:
+ path_handled = -1
+ if not changed and not self.include_unchanged:
+ changed = None
+ # >- loop control starts here:
+ # >- entry
+ if advance_entry and current_entry is not None:
+ self.current_block_pos = self.current_block_pos + 1
+ if self.current_block_pos < PyList_GET_SIZE(self.current_block_list):
+ current_entry = self.current_block_list[self.current_block_pos]
+ else:
+ current_entry = None
+ # >- path
+ if advance_path and current_path_info is not None:
+ if not path_handled:
+ # unversioned in all regards
+ if self.want_unversioned:
+ new_executable = bool(
+ stat.S_ISREG(current_path_info[3].st_mode)
+ and stat.S_IEXEC & current_path_info[3].st_mode)
+ try:
+ relpath_unicode = self.utf8_decode(current_path_info[0])[0]
+ except UnicodeDecodeError, _:
+ raise errors.BadFilenameEncoding(
+ current_path_info[0], osutils._fs_enc)
+ if changed is not None:
+ raise AssertionError(
+ "result is not None: %r" % result)
+ result = (None,
+ (None, relpath_unicode),
+ True,
+ (False, False),
+ (None, None),
+ (None, self.utf8_decode(current_path_info[1])[0]),
+ (None, current_path_info[2]),
+ (None, new_executable))
+ changed = True
+ # dont descend into this unversioned path if it is
+ # a dir
+ if current_path_info[2] in ('directory'):
+ del self.current_dir_list[self.path_index]
+ self.path_index = self.path_index - 1
+ # dont descend the disk iterator into any tree
+ # paths.
+ if current_path_info[2] == 'tree-reference':
+ del self.current_dir_list[self.path_index]
+ self.path_index = self.path_index - 1
+ self.path_index = self.path_index + 1
+ if self.path_index < len(self.current_dir_list):
+ current_path_info = self.current_dir_list[self.path_index]
+ if current_path_info[2] == 'directory':
+ current_path_info = self._maybe_tree_ref(
+ current_path_info)
+ else:
+ current_path_info = None
+ if changed is not None:
+ # Found a result on this pass, yield it
+ if changed:
+ self._gather_result_for_consistency(result)
+ if changed or self.include_unchanged:
+ return result
+ if self.current_block is not None:
+ self.block_index = self.block_index + 1
+ self._update_current_block()
+ if self.current_dir_info is not None:
+ self.path_index = 0
+ self.current_dir_list = None
+ try:
+ self.current_dir_info = self.dir_iterator.next()
+ self.current_dir_list = self.current_dir_info[1]
+ except StopIteration, _:
+ self.current_dir_info = None
+
+ cdef object _next_consistent_entries(self):
+ """Grabs the next specific file parent case to consider.
+
+ :return: A list of the results, each of which is as for _process_entry.
+ """
+ results = []
+ while self.search_specific_file_parents:
+ # Process the parent directories for the paths we were iterating.
+ # Even in extremely large trees this should be modest, so currently
+ # no attempt is made to optimise.
+ path_utf8 = self.search_specific_file_parents.pop()
+ if path_utf8 in self.searched_exact_paths:
+ # We've examined this path.
+ continue
+ if osutils.is_inside_any(self.searched_specific_files, path_utf8):
+ # We've examined this path.
+ continue
+ path_entries = self.state._entries_for_path(path_utf8)
+ # We need either one or two entries. If the path in
+ # self.target_index has moved (so the entry in source_index is in
+ # 'ar') then we need to also look for the entry for this path in
+ # self.source_index, to output the appropriate delete-or-rename.
+ selected_entries = []
+ found_item = False
+ for candidate_entry in path_entries:
+ # Find entries present in target at this path:
+ if candidate_entry[1][self.target_index][0] not in 'ar':
+ found_item = True
+ selected_entries.append(candidate_entry)
+ # Find entries present in source at this path:
+ elif (self.source_index is not None and
+ candidate_entry[1][self.source_index][0] not in 'ar'):
+ found_item = True
+ if candidate_entry[1][self.target_index][0] == 'a':
+ # Deleted, emit it here.
+ selected_entries.append(candidate_entry)
+ else:
+ # renamed, emit it when we process the directory it
+ # ended up at.
+ self.search_specific_file_parents.add(
+ candidate_entry[1][self.target_index][1])
+ if not found_item:
+ raise AssertionError(
+ "Missing entry for specific path parent %r, %r" % (
+ path_utf8, path_entries))
+ path_info = self._path_info(path_utf8, path_utf8.decode('utf8'))
+ for entry in selected_entries:
+ if entry[0][2] in self.seen_ids:
+ continue
+ result, changed = self._process_entry(entry, path_info)
+ if changed is None:
+ raise AssertionError(
+ "Got entry<->path mismatch for specific path "
+ "%r entry %r path_info %r " % (
+ path_utf8, entry, path_info))
+ # Only include changes - we're outside the users requested
+ # expansion.
+ if changed:
+ self._gather_result_for_consistency(result)
+ if (result[6][0] == 'directory' and
+ result[6][1] != 'directory'):
+ # This stopped being a directory, the old children have
+ # to be included.
+ if entry[1][self.source_index][0] == 'r':
+ # renamed, take the source path
+ entry_path_utf8 = entry[1][self.source_index][1]
+ else:
+ entry_path_utf8 = path_utf8
+ initial_key = (entry_path_utf8, '', '')
+ block_index, _ = self.state._find_block_index_from_key(
+ initial_key)
+ if block_index == 0:
+ # The children of the root are in block index 1.
+ block_index = block_index + 1
+ current_block = None
+ if block_index < len(self.state._dirblocks):
+ current_block = self.state._dirblocks[block_index]
+ if not osutils.is_inside(
+ entry_path_utf8, current_block[0]):
+ # No entries for this directory at all.
+ current_block = None
+ if current_block is not None:
+ for entry in current_block[1]:
+ if entry[1][self.source_index][0] in 'ar':
+ # Not in the source tree, so doesn't have to be
+ # included.
+ continue
+ # Path of the entry itself.
+ self.search_specific_file_parents.add(
+ self.pathjoin(*entry[0][:2]))
+ if changed or self.include_unchanged:
+ results.append((result, changed))
+ self.searched_exact_paths.add(path_utf8)
+ return results
+
+ cdef object _path_info(self, utf8_path, unicode_path):
+ """Generate path_info for unicode_path.
+
+ :return: None if unicode_path does not exist, or a path_info tuple.
+ """
+ abspath = self.tree.abspath(unicode_path)
+ try:
+ stat = os.lstat(abspath)
+ except OSError, e:
+ if e.errno == errno.ENOENT:
+ # the path does not exist.
+ return None
+ else:
+ raise
+ utf8_basename = utf8_path.rsplit('/', 1)[-1]
+ dir_info = (utf8_path, utf8_basename,
+ osutils.file_kind_from_stat_mode(stat.st_mode), stat,
+ abspath)
+ if dir_info[2] == 'directory':
+ if self.tree._directory_is_tree_reference(
+ unicode_path):
+ self.root_dir_info = self.root_dir_info[:2] + \
+ ('tree-reference',) + self.root_dir_info[3:]
+ return dir_info
diff --git a/bzrlib/_export_c_api.h b/bzrlib/_export_c_api.h
new file mode 100644
index 0000000..4e8625f
--- /dev/null
+++ b/bzrlib/_export_c_api.h
@@ -0,0 +1,104 @@
+/* Copyright (C) 2009 Canonical Ltd
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+
+/* This file contains helper functions for exporting a C API for a CPython
+ * extension module.
+ */
+
+#ifndef _EXPORT_C_API_H_
+#define _EXPORT_C_API_H_
+
+static const char *_C_API_NAME = "_C_API";
+
+/**
+ * Add a C function to the modules _C_API
+ * This wraps the function in a PyCObject, and inserts that into a dict.
+ * The key of the dict is the function name, and the description is the
+ * signature of the function.
+ * This is generally called during a modules init_MODULE function.
+ *
+ * @param module A Python module (the one being initialized)
+ * @param funcname The name of the function being exported
+ * @param func A pointer to the function
+ * @param signature The C signature of the function
+ * @return 0 if everything is successful, -1 if there is a problem. An
+ * exception should also be set
+ */
+static int
+_export_function(PyObject *module, char *funcname, void *func, char *signature)
+{
+ PyObject *d = NULL;
+ PyObject *c_obj = NULL;
+
+ /* (char *) is because python2.4 declares this api as 'char *' rather than
+ * const char* which it really is.
+ */
+ d = PyObject_GetAttrString(module, (char *)_C_API_NAME);
+ if (!d) {
+ PyErr_Clear();
+ d = PyDict_New();
+ if (!d)
+ goto bad;
+ Py_INCREF(d);
+ if (PyModule_AddObject(module, (char *)_C_API_NAME, d) < 0)
+ goto bad;
+ }
+ c_obj = PyCObject_FromVoidPtrAndDesc(func, signature, 0);
+ if (!c_obj)
+ goto bad;
+ if (PyDict_SetItemString(d, funcname, c_obj) < 0)
+ goto bad;
+ Py_DECREF(d);
+ return 0;
+bad:
+ Py_XDECREF(c_obj);
+ Py_XDECREF(d);
+ return -1;
+}
+
+/* Note:
+ * It feels like more could be done here. Specifically, if you look at
+ * _static_tuple_c.h you can see some boilerplate where we have:
+ * #ifdef STATIC_TUPLE_MODULE // are we exporting or importing
+ * static RETVAL FUNCNAME PROTO;
+ * #else
+ * static RETVAL (*FUNCNAME) PROTO;
+ * #endif
+ *
+ * And then in _static_tuple_c.c we have
+ * int setup_c_api()
+ * {
+ * _export_function(module, #FUNCNAME, FUNCNAME, #PROTO);
+ * }
+ *
+ * And then in _static_tuple_c.h import_##MODULE
+ * struct function_definition functions[] = {
+ * {#FUNCNAME, (void **)&FUNCNAME, #RETVAL #PROTO},
+ * ...
+ * {NULL}};
+ *
+ * And some similar stuff for types. However, this would mean that we would
+ * need a way for the C preprocessor to build up a list of definitions to be
+ * generated, and then expand that list at the appropriate time.
+ * I would guess there would be a way to do this, but probably not without a
+ * lot of magic, and the end result probably wouldn't be very pretty to
+ * maintain. Perhaps python's dynamic nature has left me jaded about writing
+ * boilerplate....
+ */
+
+#endif // _EXPORT_C_API_H_
diff --git a/bzrlib/_groupcompress_py.py b/bzrlib/_groupcompress_py.py
new file mode 100644
index 0000000..a9b7799
--- /dev/null
+++ b/bzrlib/_groupcompress_py.py
@@ -0,0 +1,468 @@
+# Copyright (C) 2009 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""Python version of compiled extensions for doing compression.
+
+We separate the implementation from the groupcompress.py to avoid importing
+useless stuff.
+"""
+
+from __future__ import absolute_import
+
+from bzrlib import osutils
+
+
+class _OutputHandler(object):
+ """A simple class which just tracks how to split up an insert request."""
+
+ def __init__(self, out_lines, index_lines, min_len_to_index):
+ self.out_lines = out_lines
+ self.index_lines = index_lines
+ self.min_len_to_index = min_len_to_index
+ self.cur_insert_lines = []
+ self.cur_insert_len = 0
+
+ def add_copy(self, start_byte, end_byte):
+ # The data stream allows >64kB in a copy, but to match the compiled
+ # code, we will also limit it to a 64kB copy
+ for start_byte in xrange(start_byte, end_byte, 64*1024):
+ num_bytes = min(64*1024, end_byte - start_byte)
+ copy_bytes = encode_copy_instruction(start_byte, num_bytes)
+ self.out_lines.append(copy_bytes)
+ self.index_lines.append(False)
+
+ def _flush_insert(self):
+ if not self.cur_insert_lines:
+ return
+ if self.cur_insert_len > 127:
+ raise AssertionError('We cannot insert more than 127 bytes'
+ ' at a time.')
+ self.out_lines.append(chr(self.cur_insert_len))
+ self.index_lines.append(False)
+ self.out_lines.extend(self.cur_insert_lines)
+ if self.cur_insert_len < self.min_len_to_index:
+ self.index_lines.extend([False]*len(self.cur_insert_lines))
+ else:
+ self.index_lines.extend([True]*len(self.cur_insert_lines))
+ self.cur_insert_lines = []
+ self.cur_insert_len = 0
+
+ def _insert_long_line(self, line):
+ # Flush out anything pending
+ self._flush_insert()
+ line_len = len(line)
+ for start_index in xrange(0, line_len, 127):
+ next_len = min(127, line_len - start_index)
+ self.out_lines.append(chr(next_len))
+ self.index_lines.append(False)
+ self.out_lines.append(line[start_index:start_index+next_len])
+ # We don't index long lines, because we won't be able to match
+ # a line split across multiple inserts anway
+ self.index_lines.append(False)
+
+ def add_insert(self, lines):
+ if self.cur_insert_lines != []:
+ raise AssertionError('self.cur_insert_lines must be empty when'
+ ' adding a new insert')
+ for line in lines:
+ if len(line) > 127:
+ self._insert_long_line(line)
+ else:
+ next_len = len(line) + self.cur_insert_len
+ if next_len > 127:
+ # Adding this line would overflow, so flush, and start over
+ self._flush_insert()
+ self.cur_insert_lines = [line]
+ self.cur_insert_len = len(line)
+ else:
+ self.cur_insert_lines.append(line)
+ self.cur_insert_len = next_len
+ self._flush_insert()
+
+
+class LinesDeltaIndex(object):
+ """This class indexes matches between strings.
+
+ :ivar lines: The 'static' lines that will be preserved between runs.
+ :ivar _matching_lines: A dict of {line:[matching offsets]}
+ :ivar line_offsets: The byte offset for the end of each line, used to
+ quickly map between a matching line number and the byte location
+ :ivar endpoint: The total number of bytes in self.line_offsets
+ """
+
+ _MIN_MATCH_BYTES = 10
+ _SOFT_MIN_MATCH_BYTES = 200
+
+ def __init__(self, lines):
+ self.lines = []
+ self.line_offsets = []
+ self.endpoint = 0
+ self._matching_lines = {}
+ self.extend_lines(lines, [True]*len(lines))
+
+ def _update_matching_lines(self, new_lines, index):
+ matches = self._matching_lines
+ start_idx = len(self.lines)
+ if len(new_lines) != len(index):
+ raise AssertionError('The number of lines to be indexed does'
+ ' not match the index/don\'t index flags: %d != %d'
+ % (len(new_lines), len(index)))
+ for idx, do_index in enumerate(index):
+ if not do_index:
+ continue
+ line = new_lines[idx]
+ try:
+ matches[line].add(start_idx + idx)
+ except KeyError:
+ matches[line] = set([start_idx + idx])
+
+ def get_matches(self, line):
+ """Return the lines which match the line in right."""
+ try:
+ return self._matching_lines[line]
+ except KeyError:
+ return None
+
+ def _get_longest_match(self, lines, pos):
+ """Look at all matches for the current line, return the longest.
+
+ :param lines: The lines we are matching against
+ :param pos: The current location we care about
+ :param locations: A list of lines that matched the current location.
+ This may be None, but often we'll have already found matches for
+ this line.
+ :return: (start_in_self, start_in_lines, num_lines)
+ All values are the offset in the list (aka the line number)
+ If start_in_self is None, then we have no matches, and this line
+ should be inserted in the target.
+ """
+ range_start = pos
+ range_len = 0
+ prev_locations = None
+ max_pos = len(lines)
+ matching = self._matching_lines
+ while pos < max_pos:
+ try:
+ locations = matching[lines[pos]]
+ except KeyError:
+ # No more matches, just return whatever we have, but we know
+ # that this last position is not going to match anything
+ pos += 1
+ break
+ # We have a match
+ if prev_locations is None:
+ # This is the first match in a range
+ prev_locations = locations
+ range_len = 1
+ locations = None # Consumed
+ else:
+ # We have a match started, compare to see if any of the
+ # current matches can be continued
+ next_locations = locations.intersection([loc + 1 for loc
+ in prev_locations])
+ if next_locations:
+ # At least one of the regions continues to match
+ prev_locations = set(next_locations)
+ range_len += 1
+ locations = None # Consumed
+ else:
+ # All current regions no longer match.
+ # This line does still match something, just not at the
+ # end of the previous matches. We will return locations
+ # so that we can avoid another _matching_lines lookup.
+ break
+ pos += 1
+ if prev_locations is None:
+ # We have no matches, this is a pure insert
+ return None, pos
+ smallest = min(prev_locations)
+ return (smallest - range_len + 1, range_start, range_len), pos
+
+ def get_matching_blocks(self, lines, soft=False):
+ """Return the ranges in lines which match self.lines.
+
+ :param lines: lines to compress
+ :return: A list of (old_start, new_start, length) tuples which reflect
+ a region in self.lines that is present in lines. The last element
+ of the list is always (old_len, new_len, 0) to provide a end point
+ for generating instructions from the matching blocks list.
+ """
+ # In this code, we iterate over multiple _get_longest_match calls, to
+ # find the next longest copy, and possible insert regions. We then
+ # convert that to the simple matching_blocks representation, since
+ # otherwise inserting 10 lines in a row would show up as 10
+ # instructions.
+ result = []
+ pos = 0
+ max_pos = len(lines)
+ result_append = result.append
+ min_match_bytes = self._MIN_MATCH_BYTES
+ if soft:
+ min_match_bytes = self._SOFT_MIN_MATCH_BYTES
+ while pos < max_pos:
+ block, pos = self._get_longest_match(lines, pos)
+ if block is not None:
+ # Check to see if we match fewer than min_match_bytes. As we
+ # will turn this into a pure 'insert', rather than a copy.
+ # block[-1] is the number of lines. A quick check says if we
+ # have more lines than min_match_bytes, then we know we have
+ # enough bytes.
+ if block[-1] < min_match_bytes:
+ # This block may be a 'short' block, check
+ old_start, new_start, range_len = block
+ matched_bytes = sum(map(len,
+ lines[new_start:new_start + range_len]))
+ if matched_bytes < min_match_bytes:
+ block = None
+ if block is not None:
+ result_append(block)
+ result_append((len(self.lines), len(lines), 0))
+ return result
+
+ def extend_lines(self, lines, index):
+ """Add more lines to the left-lines list.
+
+ :param lines: A list of lines to add
+ :param index: A True/False for each node to define if it should be
+ indexed.
+ """
+ self._update_matching_lines(lines, index)
+ self.lines.extend(lines)
+ endpoint = self.endpoint
+ for line in lines:
+ endpoint += len(line)
+ self.line_offsets.append(endpoint)
+ if len(self.line_offsets) != len(self.lines):
+ raise AssertionError('Somehow the line offset indicator'
+ ' got out of sync with the line counter.')
+ self.endpoint = endpoint
+
+ def _flush_insert(self, start_linenum, end_linenum,
+ new_lines, out_lines, index_lines):
+ """Add an 'insert' request to the data stream."""
+ bytes_to_insert = ''.join(new_lines[start_linenum:end_linenum])
+ insert_length = len(bytes_to_insert)
+ # Each insert instruction is at most 127 bytes long
+ for start_byte in xrange(0, insert_length, 127):
+ insert_count = min(insert_length - start_byte, 127)
+ out_lines.append(chr(insert_count))
+ # Don't index the 'insert' instruction
+ index_lines.append(False)
+ insert = bytes_to_insert[start_byte:start_byte+insert_count]
+ as_lines = osutils.split_lines(insert)
+ out_lines.extend(as_lines)
+ index_lines.extend([True]*len(as_lines))
+
+ def _flush_copy(self, old_start_linenum, num_lines,
+ out_lines, index_lines):
+ if old_start_linenum == 0:
+ first_byte = 0
+ else:
+ first_byte = self.line_offsets[old_start_linenum - 1]
+ stop_byte = self.line_offsets[old_start_linenum + num_lines - 1]
+ num_bytes = stop_byte - first_byte
+ # The data stream allows >64kB in a copy, but to match the compiled
+ # code, we will also limit it to a 64kB copy
+ for start_byte in xrange(first_byte, stop_byte, 64*1024):
+ num_bytes = min(64*1024, stop_byte - start_byte)
+ copy_bytes = encode_copy_instruction(start_byte, num_bytes)
+ out_lines.append(copy_bytes)
+ index_lines.append(False)
+
+ def make_delta(self, new_lines, bytes_length=None, soft=False):
+ """Compute the delta for this content versus the original content."""
+ if bytes_length is None:
+ bytes_length = sum(map(len, new_lines))
+ # reserved for content type, content length
+ out_lines = ['', '', encode_base128_int(bytes_length)]
+ index_lines = [False, False, False]
+ output_handler = _OutputHandler(out_lines, index_lines,
+ self._MIN_MATCH_BYTES)
+ blocks = self.get_matching_blocks(new_lines, soft=soft)
+ current_line_num = 0
+ # We either copy a range (while there are reusable lines) or we
+ # insert new lines. To find reusable lines we traverse
+ for old_start, new_start, range_len in blocks:
+ if new_start != current_line_num:
+ # non-matching region, insert the content
+ output_handler.add_insert(new_lines[current_line_num:new_start])
+ current_line_num = new_start + range_len
+ if range_len:
+ # Convert the line based offsets into byte based offsets
+ if old_start == 0:
+ first_byte = 0
+ else:
+ first_byte = self.line_offsets[old_start - 1]
+ last_byte = self.line_offsets[old_start + range_len - 1]
+ output_handler.add_copy(first_byte, last_byte)
+ return out_lines, index_lines
+
+
+def encode_base128_int(val):
+ """Convert an integer into a 7-bit lsb encoding."""
+ bytes = []
+ count = 0
+ while val >= 0x80:
+ bytes.append(chr((val | 0x80) & 0xFF))
+ val >>= 7
+ bytes.append(chr(val))
+ return ''.join(bytes)
+
+
+def decode_base128_int(bytes):
+ """Decode an integer from a 7-bit lsb encoding."""
+ offset = 0
+ val = 0
+ shift = 0
+ bval = ord(bytes[offset])
+ while bval >= 0x80:
+ val |= (bval & 0x7F) << shift
+ shift += 7
+ offset += 1
+ bval = ord(bytes[offset])
+ val |= bval << shift
+ offset += 1
+ return val, offset
+
+
+def encode_copy_instruction(offset, length):
+ """Convert this offset into a control code and bytes."""
+ copy_command = 0x80
+ copy_bytes = [None]
+
+ for copy_bit in (0x01, 0x02, 0x04, 0x08):
+ base_byte = offset & 0xff
+ if base_byte:
+ copy_command |= copy_bit
+ copy_bytes.append(chr(base_byte))
+ offset >>= 8
+ if length is None:
+ raise ValueError("cannot supply a length of None")
+ if length > 0x10000:
+ raise ValueError("we don't emit copy records for lengths > 64KiB")
+ if length == 0:
+ raise ValueError("We cannot emit a copy of length 0")
+ if length != 0x10000:
+ # A copy of length exactly 64*1024 == 0x10000 is sent as a length of 0,
+ # since that saves bytes for large chained copies
+ for copy_bit in (0x10, 0x20):
+ base_byte = length & 0xff
+ if base_byte:
+ copy_command |= copy_bit
+ copy_bytes.append(chr(base_byte))
+ length >>= 8
+ copy_bytes[0] = chr(copy_command)
+ return ''.join(copy_bytes)
+
+
+def decode_copy_instruction(bytes, cmd, pos):
+ """Decode a copy instruction from the next few bytes.
+
+ A copy instruction is a variable number of bytes, so we will parse the
+ bytes we care about, and return the new position, as well as the offset and
+ length referred to in the bytes.
+
+ :param bytes: A string of bytes
+ :param cmd: The command code
+ :param pos: The position in bytes right after the copy command
+ :return: (offset, length, newpos)
+ The offset of the copy start, the number of bytes to copy, and the
+ position after the last byte of the copy
+ """
+ if cmd & 0x80 != 0x80:
+ raise ValueError('copy instructions must have bit 0x80 set')
+ offset = 0
+ length = 0
+ if (cmd & 0x01):
+ offset = ord(bytes[pos])
+ pos += 1
+ if (cmd & 0x02):
+ offset = offset | (ord(bytes[pos]) << 8)
+ pos += 1
+ if (cmd & 0x04):
+ offset = offset | (ord(bytes[pos]) << 16)
+ pos += 1
+ if (cmd & 0x08):
+ offset = offset | (ord(bytes[pos]) << 24)
+ pos += 1
+ if (cmd & 0x10):
+ length = ord(bytes[pos])
+ pos += 1
+ if (cmd & 0x20):
+ length = length | (ord(bytes[pos]) << 8)
+ pos += 1
+ if (cmd & 0x40):
+ length = length | (ord(bytes[pos]) << 16)
+ pos += 1
+ if length == 0:
+ length = 65536
+ return (offset, length, pos)
+
+
+def make_delta(source_bytes, target_bytes):
+ """Create a delta from source to target."""
+ if type(source_bytes) is not str:
+ raise TypeError('source is not a str')
+ if type(target_bytes) is not str:
+ raise TypeError('target is not a str')
+ line_locations = LinesDeltaIndex(osutils.split_lines(source_bytes))
+ delta, _ = line_locations.make_delta(osutils.split_lines(target_bytes),
+ bytes_length=len(target_bytes))
+ return ''.join(delta)
+
+
+def apply_delta(basis, delta):
+ """Apply delta to this object to become new_version_id."""
+ if type(basis) is not str:
+ raise TypeError('basis is not a str')
+ if type(delta) is not str:
+ raise TypeError('delta is not a str')
+ target_length, pos = decode_base128_int(delta)
+ lines = []
+ len_delta = len(delta)
+ while pos < len_delta:
+ cmd = ord(delta[pos])
+ pos += 1
+ if cmd & 0x80:
+ offset, length, pos = decode_copy_instruction(delta, cmd, pos)
+ last = offset + length
+ if last > len(basis):
+ raise ValueError('data would copy bytes past the'
+ 'end of source')
+ lines.append(basis[offset:last])
+ else: # Insert of 'cmd' bytes
+ if cmd == 0:
+ raise ValueError('Command == 0 not supported yet')
+ lines.append(delta[pos:pos+cmd])
+ pos += cmd
+ bytes = ''.join(lines)
+ if len(bytes) != target_length:
+ raise ValueError('Delta claimed to be %d long, but ended up'
+ ' %d long' % (target_length, len(bytes)))
+ return bytes
+
+
+def apply_delta_to_source(source, delta_start, delta_end):
+ """Extract a delta from source bytes, and apply it."""
+ source_size = len(source)
+ if delta_start >= source_size:
+ raise ValueError('delta starts after source')
+ if delta_end > source_size:
+ raise ValueError('delta ends after source')
+ if delta_start >= delta_end:
+ raise ValueError('delta starts after it ends')
+ delta_bytes = source[delta_start:delta_end]
+ return apply_delta(source, delta_bytes)
diff --git a/bzrlib/_groupcompress_pyx.c b/bzrlib/_groupcompress_pyx.c
new file mode 100644
index 0000000..96cf75a
--- /dev/null
+++ b/bzrlib/_groupcompress_pyx.c
@@ -0,0 +1,6763 @@
+/* Generated by Cython 0.13 on Thu May 26 17:26:10 2011 */
+
+#define PY_SSIZE_T_CLEAN
+#include "Python.h"
+#ifndef Py_PYTHON_H
+ #error Python headers needed to compile C extensions, please install development version of Python.
+#else
+
+#include <stddef.h> /* For offsetof */
+#ifndef offsetof
+#define offsetof(type, member) ( (size_t) & ((type*)0) -> member )
+#endif
+
+#if !defined(WIN32) && !defined(MS_WINDOWS)
+ #ifndef __stdcall
+ #define __stdcall
+ #endif
+ #ifndef __cdecl
+ #define __cdecl
+ #endif
+ #ifndef __fastcall
+ #define __fastcall
+ #endif
+#endif
+
+#ifndef DL_IMPORT
+ #define DL_IMPORT(t) t
+#endif
+#ifndef DL_EXPORT
+ #define DL_EXPORT(t) t
+#endif
+
+#ifndef PY_LONG_LONG
+ #define PY_LONG_LONG LONG_LONG
+#endif
+
+#if PY_VERSION_HEX < 0x02040000
+ #define METH_COEXIST 0
+ #define PyDict_CheckExact(op) (Py_TYPE(op) == &PyDict_Type)
+ #define PyDict_Contains(d,o) PySequence_Contains(d,o)
+#endif
+
+#if PY_VERSION_HEX < 0x02050000
+ typedef int Py_ssize_t;
+ #define PY_SSIZE_T_MAX INT_MAX
+ #define PY_SSIZE_T_MIN INT_MIN
+ #define PY_FORMAT_SIZE_T ""
+ #define PyInt_FromSsize_t(z) PyInt_FromLong(z)
+ #define PyInt_AsSsize_t(o) PyInt_AsLong(o)
+ #define PyNumber_Index(o) PyNumber_Int(o)
+ #define PyIndex_Check(o) PyNumber_Check(o)
+ #define PyErr_WarnEx(category, message, stacklevel) PyErr_Warn(category, message)
+#endif
+
+#if PY_VERSION_HEX < 0x02060000
+ #define Py_REFCNT(ob) (((PyObject*)(ob))->ob_refcnt)
+ #define Py_TYPE(ob) (((PyObject*)(ob))->ob_type)
+ #define Py_SIZE(ob) (((PyVarObject*)(ob))->ob_size)
+ #define PyVarObject_HEAD_INIT(type, size) \
+ PyObject_HEAD_INIT(type) size,
+ #define PyType_Modified(t)
+
+ typedef struct {
+ void *buf;
+ PyObject *obj;
+ Py_ssize_t len;
+ Py_ssize_t itemsize;
+ int readonly;
+ int ndim;
+ char *format;
+ Py_ssize_t *shape;
+ Py_ssize_t *strides;
+ Py_ssize_t *suboffsets;
+ void *internal;
+ } Py_buffer;
+
+ #define PyBUF_SIMPLE 0
+ #define PyBUF_WRITABLE 0x0001
+ #define PyBUF_FORMAT 0x0004
+ #define PyBUF_ND 0x0008
+ #define PyBUF_STRIDES (0x0010 | PyBUF_ND)
+ #define PyBUF_C_CONTIGUOUS (0x0020 | PyBUF_STRIDES)
+ #define PyBUF_F_CONTIGUOUS (0x0040 | PyBUF_STRIDES)
+ #define PyBUF_ANY_CONTIGUOUS (0x0080 | PyBUF_STRIDES)
+ #define PyBUF_INDIRECT (0x0100 | PyBUF_STRIDES)
+
+#endif
+
+#if PY_MAJOR_VERSION < 3
+ #define __Pyx_BUILTIN_MODULE_NAME "__builtin__"
+#else
+ #define __Pyx_BUILTIN_MODULE_NAME "builtins"
+#endif
+
+#if PY_MAJOR_VERSION >= 3
+ #define Py_TPFLAGS_CHECKTYPES 0
+ #define Py_TPFLAGS_HAVE_INDEX 0
+#endif
+
+#if (PY_VERSION_HEX < 0x02060000) || (PY_MAJOR_VERSION >= 3)
+ #define Py_TPFLAGS_HAVE_NEWBUFFER 0
+#endif
+
+#if PY_MAJOR_VERSION >= 3
+ #define PyBaseString_Type PyUnicode_Type
+ #define PyStringObject PyUnicodeObject
+ #define PyString_Type PyUnicode_Type
+ #define PyString_Check PyUnicode_Check
+ #define PyString_CheckExact PyUnicode_CheckExact
+#endif
+
+#if PY_VERSION_HEX < 0x02060000
+ #define PyBytesObject PyStringObject
+ #define PyBytes_Type PyString_Type
+ #define PyBytes_Check PyString_Check
+ #define PyBytes_CheckExact PyString_CheckExact
+ #define PyBytes_FromString PyString_FromString
+ #define PyBytes_FromStringAndSize PyString_FromStringAndSize
+ #define PyBytes_FromFormat PyString_FromFormat
+ #define PyBytes_DecodeEscape PyString_DecodeEscape
+ #define PyBytes_AsString PyString_AsString
+ #define PyBytes_AsStringAndSize PyString_AsStringAndSize
+ #define PyBytes_Size PyString_Size
+ #define PyBytes_AS_STRING PyString_AS_STRING
+ #define PyBytes_GET_SIZE PyString_GET_SIZE
+ #define PyBytes_Repr PyString_Repr
+ #define PyBytes_Concat PyString_Concat
+ #define PyBytes_ConcatAndDel PyString_ConcatAndDel
+ #define PySet_Check(obj) PyObject_TypeCheck(obj, &PySet_Type)
+ #define PyFrozenSet_Check(obj) PyObject_TypeCheck(obj, &PyFrozenSet_Type)
+#endif
+
+#ifndef PySet_CheckExact
+# define PySet_CheckExact(obj) (Py_TYPE(obj) == &PySet_Type)
+#endif
+
+#if PY_MAJOR_VERSION >= 3
+ #define PyInt_Type PyLong_Type
+ #define PyInt_Check(op) PyLong_Check(op)
+ #define PyInt_CheckExact(op) PyLong_CheckExact(op)
+ #define PyInt_FromString PyLong_FromString
+ #define PyInt_FromUnicode PyLong_FromUnicode
+ #define PyInt_FromLong PyLong_FromLong
+ #define PyInt_FromSize_t PyLong_FromSize_t
+ #define PyInt_FromSsize_t PyLong_FromSsize_t
+ #define PyInt_AsLong PyLong_AsLong
+ #define PyInt_AS_LONG PyLong_AS_LONG
+ #define PyInt_AsSsize_t PyLong_AsSsize_t
+ #define PyInt_AsUnsignedLongMask PyLong_AsUnsignedLongMask
+ #define PyInt_AsUnsignedLongLongMask PyLong_AsUnsignedLongLongMask
+#endif
+
+#if PY_MAJOR_VERSION >= 3
+ #define PyBoolObject PyLongObject
+#endif
+
+
+#if PY_MAJOR_VERSION >= 3
+ #define __Pyx_PyNumber_Divide(x,y) PyNumber_TrueDivide(x,y)
+ #define __Pyx_PyNumber_InPlaceDivide(x,y) PyNumber_InPlaceTrueDivide(x,y)
+#else
+ #define __Pyx_PyNumber_Divide(x,y) PyNumber_Divide(x,y)
+ #define __Pyx_PyNumber_InPlaceDivide(x,y) PyNumber_InPlaceDivide(x,y)
+#endif
+
+#if PY_MAJOR_VERSION >= 3
+ #define PyMethod_New(func, self, klass) ((self) ? PyMethod_New(func, self) : PyInstanceMethod_New(func))
+#endif
+
+#if PY_VERSION_HEX < 0x02050000
+ #define __Pyx_GetAttrString(o,n) PyObject_GetAttrString((o),((char *)(n)))
+ #define __Pyx_SetAttrString(o,n,a) PyObject_SetAttrString((o),((char *)(n)),(a))
+ #define __Pyx_DelAttrString(o,n) PyObject_DelAttrString((o),((char *)(n)))
+#else
+ #define __Pyx_GetAttrString(o,n) PyObject_GetAttrString((o),(n))
+ #define __Pyx_SetAttrString(o,n,a) PyObject_SetAttrString((o),(n),(a))
+ #define __Pyx_DelAttrString(o,n) PyObject_DelAttrString((o),(n))
+#endif
+
+#if PY_VERSION_HEX < 0x02050000
+ #define __Pyx_NAMESTR(n) ((char *)(n))
+ #define __Pyx_DOCSTR(n) ((char *)(n))
+#else
+ #define __Pyx_NAMESTR(n) (n)
+ #define __Pyx_DOCSTR(n) (n)
+#endif
+
+#ifdef __cplusplus
+#define __PYX_EXTERN_C extern "C"
+#else
+#define __PYX_EXTERN_C extern
+#endif
+
+#if defined(WIN32) || defined(MS_WINDOWS)
+#define _USE_MATH_DEFINES
+#endif
+#include <math.h>
+#define __PYX_HAVE_API__bzrlib___groupcompress_pyx
+#include "python-compat.h"
+#include "delta.h"
+
+/* inline attribute */
+#ifndef CYTHON_INLINE
+ #if defined(__GNUC__)
+ #define CYTHON_INLINE __inline__
+ #elif defined(_MSC_VER)
+ #define CYTHON_INLINE __inline
+ #elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L
+ #define CYTHON_INLINE inline
+ #else
+ #define CYTHON_INLINE
+ #endif
+#endif
+
+/* unused attribute */
+#ifndef CYTHON_UNUSED
+# if defined(__GNUC__)
+# if !(defined(__cplusplus)) || (__GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ >= 4))
+# define CYTHON_UNUSED __attribute__ ((__unused__))
+# else
+# define CYTHON_UNUSED
+# endif
+# elif defined(__ICC) || defined(__INTEL_COMPILER)
+# define CYTHON_UNUSED __attribute__ ((__unused__))
+# else
+# define CYTHON_UNUSED
+# endif
+#endif
+
+typedef struct {PyObject **p; char *s; const long n; const char* encoding; const char is_unicode; const char is_str; const char intern; } __Pyx_StringTabEntry; /*proto*/
+
+
+/* Type Conversion Predeclarations */
+
+#define __Pyx_PyBytes_FromUString(s) PyBytes_FromString((char*)s)
+#define __Pyx_PyBytes_AsUString(s) ((unsigned char*) PyBytes_AsString(s))
+
+#define __Pyx_PyBool_FromLong(b) ((b) ? (Py_INCREF(Py_True), Py_True) : (Py_INCREF(Py_False), Py_False))
+static CYTHON_INLINE int __Pyx_PyObject_IsTrue(PyObject*);
+static CYTHON_INLINE PyObject* __Pyx_PyNumber_Int(PyObject* x);
+
+static CYTHON_INLINE Py_ssize_t __Pyx_PyIndex_AsSsize_t(PyObject*);
+static CYTHON_INLINE PyObject * __Pyx_PyInt_FromSize_t(size_t);
+static CYTHON_INLINE size_t __Pyx_PyInt_AsSize_t(PyObject*);
+
+#define __pyx_PyFloat_AsDouble(x) (PyFloat_CheckExact(x) ? PyFloat_AS_DOUBLE(x) : PyFloat_AsDouble(x))
+
+
+#ifdef __GNUC__
+/* Test for GCC > 2.95 */
+#if __GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95))
+#define likely(x) __builtin_expect(!!(x), 1)
+#define unlikely(x) __builtin_expect(!!(x), 0)
+#else /* __GNUC__ > 2 ... */
+#define likely(x) (x)
+#define unlikely(x) (x)
+#endif /* __GNUC__ > 2 ... */
+#else /* __GNUC__ */
+#define likely(x) (x)
+#define unlikely(x) (x)
+#endif /* __GNUC__ */
+
+static PyObject *__pyx_m;
+static PyObject *__pyx_b;
+static PyObject *__pyx_empty_tuple;
+static PyObject *__pyx_empty_bytes;
+static int __pyx_lineno;
+static int __pyx_clineno = 0;
+static const char * __pyx_cfilenm= __FILE__;
+static const char *__pyx_filename;
+
+
+static const char *__pyx_f[] = {
+ "_groupcompress_pyx.pyx",
+};
+
+/* Type declarations */
+
+/* "/home/vila/src/bzr/integration/trunk/bzrlib/_groupcompress_pyx.pyx":130
+ *
+ *
+ * cdef class DeltaIndex: # <<<<<<<<<<<<<<
+ *
+ * # We need Pyrex 0.9.8+ to understand a 'list' definition, and this object
+ */
+
+struct __pyx_obj_6bzrlib_18_groupcompress_pyx_DeltaIndex {
+ PyObject_HEAD
+ struct __pyx_vtabstruct_6bzrlib_18_groupcompress_pyx_DeltaIndex *__pyx_vtab;
+ PyObject *_sources;
+ struct source_info *_source_infos;
+ struct delta_index *_index;
+ unsigned long _source_offset;
+ unsigned int _max_num_sources;
+ int _max_bytes_to_index;
+};
+
+
+struct __pyx_vtabstruct_6bzrlib_18_groupcompress_pyx_DeltaIndex {
+ PyObject *(*_populate_first_index)(struct __pyx_obj_6bzrlib_18_groupcompress_pyx_DeltaIndex *);
+ PyObject *(*_expand_sources)(struct __pyx_obj_6bzrlib_18_groupcompress_pyx_DeltaIndex *);
+};
+static struct __pyx_vtabstruct_6bzrlib_18_groupcompress_pyx_DeltaIndex *__pyx_vtabptr_6bzrlib_18_groupcompress_pyx_DeltaIndex;
+
+#ifndef CYTHON_REFNANNY
+ #define CYTHON_REFNANNY 0
+#endif
+
+#if CYTHON_REFNANNY
+ typedef struct {
+ void (*INCREF)(void*, PyObject*, int);
+ void (*DECREF)(void*, PyObject*, int);
+ void (*GOTREF)(void*, PyObject*, int);
+ void (*GIVEREF)(void*, PyObject*, int);
+ void* (*SetupContext)(const char*, int, const char*);
+ void (*FinishContext)(void**);
+ } __Pyx_RefNannyAPIStruct;
+ static __Pyx_RefNannyAPIStruct *__Pyx_RefNanny = NULL;
+ static __Pyx_RefNannyAPIStruct * __Pyx_RefNannyImportAPI(const char *modname) {
+ PyObject *m = NULL, *p = NULL;
+ void *r = NULL;
+ m = PyImport_ImportModule((char *)modname);
+ if (!m) goto end;
+ p = PyObject_GetAttrString(m, (char *)"RefNannyAPI");
+ if (!p) goto end;
+ r = PyLong_AsVoidPtr(p);
+ end:
+ Py_XDECREF(p);
+ Py_XDECREF(m);
+ return (__Pyx_RefNannyAPIStruct *)r;
+ }
+ #define __Pyx_RefNannySetupContext(name) void *__pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__)
+ #define __Pyx_RefNannyFinishContext() __Pyx_RefNanny->FinishContext(&__pyx_refnanny)
+ #define __Pyx_INCREF(r) __Pyx_RefNanny->INCREF(__pyx_refnanny, (PyObject *)(r), __LINE__)
+ #define __Pyx_DECREF(r) __Pyx_RefNanny->DECREF(__pyx_refnanny, (PyObject *)(r), __LINE__)
+ #define __Pyx_GOTREF(r) __Pyx_RefNanny->GOTREF(__pyx_refnanny, (PyObject *)(r), __LINE__)
+ #define __Pyx_GIVEREF(r) __Pyx_RefNanny->GIVEREF(__pyx_refnanny, (PyObject *)(r), __LINE__)
+ #define __Pyx_XDECREF(r) do { if((r) != NULL) {__Pyx_DECREF(r);} } while(0)
+#else
+ #define __Pyx_RefNannySetupContext(name)
+ #define __Pyx_RefNannyFinishContext()
+ #define __Pyx_INCREF(r) Py_INCREF(r)
+ #define __Pyx_DECREF(r) Py_DECREF(r)
+ #define __Pyx_GOTREF(r)
+ #define __Pyx_GIVEREF(r)
+ #define __Pyx_XDECREF(r) Py_XDECREF(r)
+#endif /* CYTHON_REFNANNY */
+#define __Pyx_XGIVEREF(r) do { if((r) != NULL) {__Pyx_GIVEREF(r);} } while(0)
+#define __Pyx_XGOTREF(r) do { if((r) != NULL) {__Pyx_GOTREF(r);} } while(0)
+
+static PyObject *__Pyx_GetName(PyObject *dict, PyObject *name); /*proto*/
+
+static void __Pyx_RaiseDoubleKeywordsError(
+ const char* func_name, PyObject* kw_name); /*proto*/
+
+static void __Pyx_RaiseArgtupleInvalid(const char* func_name, int exact,
+ Py_ssize_t num_min, Py_ssize_t num_max, Py_ssize_t num_found); /*proto*/
+
+static int __Pyx_ParseOptionalKeywords(PyObject *kwds, PyObject **argnames[], PyObject *kwds2, PyObject *values[], Py_ssize_t num_pos_args, const char* function_name); /*proto*/
+
+static CYTHON_INLINE PyObject* __Pyx_PyObject_Append(PyObject* L, PyObject* x) {
+ if (likely(PyList_CheckExact(L))) {
+ if (PyList_Append(L, x) < 0) return NULL;
+ Py_INCREF(Py_None);
+ return Py_None; /* this is just to have an accurate signature */
+ }
+ else {
+ PyObject *r, *m;
+ m = __Pyx_GetAttrString(L, "append");
+ if (!m) return NULL;
+ r = PyObject_CallFunctionObjArgs(m, x, NULL);
+ Py_DECREF(m);
+ return r;
+ }
+}
+
+static CYTHON_INLINE void __Pyx_ErrRestore(PyObject *type, PyObject *value, PyObject *tb); /*proto*/
+static CYTHON_INLINE void __Pyx_ErrFetch(PyObject **type, PyObject **value, PyObject **tb); /*proto*/
+
+static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb); /*proto*/
+
+#ifndef __PYX_FORCE_INIT_THREADS
+ #if PY_VERSION_HEX < 0x02040200
+ #define __PYX_FORCE_INIT_THREADS 1
+ #else
+ #define __PYX_FORCE_INIT_THREADS 0
+ #endif
+#endif
+
+static CYTHON_INLINE unsigned char __Pyx_PyInt_AsUnsignedChar(PyObject *);
+
+static CYTHON_INLINE unsigned short __Pyx_PyInt_AsUnsignedShort(PyObject *);
+
+static CYTHON_INLINE unsigned int __Pyx_PyInt_AsUnsignedInt(PyObject *);
+
+static CYTHON_INLINE char __Pyx_PyInt_AsChar(PyObject *);
+
+static CYTHON_INLINE short __Pyx_PyInt_AsShort(PyObject *);
+
+static CYTHON_INLINE int __Pyx_PyInt_AsInt(PyObject *);
+
+static CYTHON_INLINE signed char __Pyx_PyInt_AsSignedChar(PyObject *);
+
+static CYTHON_INLINE signed short __Pyx_PyInt_AsSignedShort(PyObject *);
+
+static CYTHON_INLINE signed int __Pyx_PyInt_AsSignedInt(PyObject *);
+
+static CYTHON_INLINE int __Pyx_PyInt_AsLongDouble(PyObject *);
+
+static CYTHON_INLINE unsigned long __Pyx_PyInt_AsUnsignedLong(PyObject *);
+
+static CYTHON_INLINE unsigned PY_LONG_LONG __Pyx_PyInt_AsUnsignedLongLong(PyObject *);
+
+static CYTHON_INLINE long __Pyx_PyInt_AsLong(PyObject *);
+
+static CYTHON_INLINE PY_LONG_LONG __Pyx_PyInt_AsLongLong(PyObject *);
+
+static CYTHON_INLINE signed long __Pyx_PyInt_AsSignedLong(PyObject *);
+
+static CYTHON_INLINE signed PY_LONG_LONG __Pyx_PyInt_AsSignedLongLong(PyObject *);
+
+static int __Pyx_SetVtable(PyObject *dict, void *vtable); /*proto*/
+
+static void __Pyx_AddTraceback(const char *funcname); /*proto*/
+
+static int __Pyx_InitStrings(__Pyx_StringTabEntry *t); /*proto*/
+/* Module declarations from bzrlib._groupcompress_pyx */
+
+static PyTypeObject *__pyx_ptype_6bzrlib_18_groupcompress_pyx_DeltaIndex = 0;
+static void *__pyx_f_6bzrlib_18_groupcompress_pyx_safe_malloc(size_t); /*proto*/
+static void *__pyx_f_6bzrlib_18_groupcompress_pyx_safe_realloc(void *, size_t); /*proto*/
+static int __pyx_f_6bzrlib_18_groupcompress_pyx_safe_free(void **); /*proto*/
+static PyObject *__pyx_f_6bzrlib_18_groupcompress_pyx__translate_delta_failure(delta_result); /*proto*/
+static unsigned char *__pyx_f_6bzrlib_18_groupcompress_pyx__decode_copy_instruction(unsigned char *, unsigned char, unsigned int *, unsigned int *); /*proto*/
+static PyObject *__pyx_f_6bzrlib_18_groupcompress_pyx__apply_delta(char *, Py_ssize_t, char *, Py_ssize_t); /*proto*/
+#define __Pyx_MODULE_NAME "bzrlib._groupcompress_pyx"
+int __pyx_module_is_main_bzrlib___groupcompress_pyx = 0;
+
+/* Implementation of bzrlib._groupcompress_pyx */
+static PyObject *__pyx_builtin_MemoryError;
+static PyObject *__pyx_builtin_ValueError;
+static PyObject *__pyx_builtin_RuntimeError;
+static PyObject *__pyx_builtin_AssertionError;
+static PyObject *__pyx_builtin_map;
+static PyObject *__pyx_builtin_TypeError;
+static char __pyx_k_1[] = "Failed to allocate %d bytes of memory";
+static char __pyx_k_2[] = "Failed to reallocate to %d bytes of memory";
+static char __pyx_k_3[] = "Delta function failed to allocate memory";
+static char __pyx_k_4[] = "Delta function requires delta_index param";
+static char __pyx_k_5[] = "Delta function given empty source_info param";
+static char __pyx_k_6[] = "Delta function given invalid source_info param";
+static char __pyx_k_7[] = "Delta function given empty buffer params";
+static char __pyx_k_8[] = "Unrecognised delta result code: %d";
+static char __pyx_k_9[] = "content must be a string";
+static char __pyx_k_10[] = "content must be at least 16 bytes long";
+static char __pyx_k_11[] = "%s(%d, %d)";
+static char __pyx_k_12[] = "delta is not a str";
+static char __pyx_k_13[] = "source is not a str";
+static char __pyx_k_14[] = "_populate_first_index";
+static char __pyx_k_15[] = "_populate_first_index should only be called when we have a single source and no index yet";
+static char __pyx_k_16[] = "if we move self._source_infos, then we need to change all of the index pointers as well.";
+static char __pyx_k_17[] = "target is not a str";
+static char __pyx_k_18[] = "delta_size %d smaller than min delta size %d";
+static char __pyx_k_19[] = "Something wrong with: cp_off = %s, cp_size = %s source_size = %s, size = %s";
+static char __pyx_k_20[] = "Got delta opcode: 0, not supported";
+static char __pyx_k_21[] = "Insert instruction longer than remaining bytes: %d > %d";
+static char __pyx_k_22[] = "Did not extract the number of bytes we expected we were left with %d bytes in \"size\", and top - data = %d";
+static char __pyx_k_23[] = "Number of bytes extracted did not match the size encoded in the delta header.";
+static char __pyx_k_24[] = "delta starts after source";
+static char __pyx_k_25[] = "delta ends after source";
+static char __pyx_k_26[] = "delta starts after it ends";
+static char __pyx_k_27[] = "encode_base128_int overflowed the buffer";
+static char __pyx_k_28[] = "bytes is not a string";
+static char __pyx_k_29[] = "Data not properly formatted, we ran out of bytes before 0x80 stopped being set.";
+static char __pyx_k_30[] = "Compiled extensions for doing compression.";
+static char __pyx_k_31[] = "DeltaIndex._dump_index (line 185)";
+static char __pyx_k_32[] = "DeltaIndex.add_delta_source (line 223)";
+static char __pyx_k_33[] = "DeltaIndex.add_source (line 260)";
+static char __pyx_k_34[] = "DeltaIndex.make_delta (line 333)";
+static char __pyx_k_35[] = "make_delta (line 370)";
+static char __pyx_k_36[] = "apply_delta (line 376)";
+static char __pyx_k_37[] = "apply_delta_to_source (line 521)";
+static char __pyx_k_38[] = "apply_delta_to_source";
+static char __pyx_k_39[] = "encode_base128_int (line 549)";
+static char __pyx_k_40[] = "decode_base128_int (line 569)";
+static char __pyx_k__buf[] = "buf";
+static char __pyx_k__map[] = "map";
+static char __pyx_k__size[] = "size";
+static char __pyx_k__delta[] = "delta";
+static char __pyx_k___index[] = "_index";
+static char __pyx_k__source[] = "source";
+static char __pyx_k____main__[] = "__main__";
+static char __pyx_k____name__[] = "__name__";
+static char __pyx_k____test__[] = "__test__";
+static char __pyx_k___sources[] = "_sources";
+static char __pyx_k__TypeError[] = "TypeError";
+static char __pyx_k____class__[] = "__class__";
+static char __pyx_k__delta_end[] = "delta_end";
+static char __pyx_k__DeltaIndex[] = "DeltaIndex";
+static char __pyx_k__ValueError[] = "ValueError";
+static char __pyx_k__add_source[] = "add_source";
+static char __pyx_k__agg_offset[] = "agg_offset";
+static char __pyx_k__make_delta[] = "make_delta";
+static char __pyx_k__MemoryError[] = "MemoryError";
+static char __pyx_k___dump_index[] = "_dump_index";
+static char __pyx_k__apply_delta[] = "apply_delta";
+static char __pyx_k__delta_bytes[] = "delta_bytes";
+static char __pyx_k__delta_start[] = "delta_start";
+static char __pyx_k__RuntimeError[] = "RuntimeError";
+static char __pyx_k__source_bytes[] = "source_bytes";
+static char __pyx_k__target_bytes[] = "target_bytes";
+static char __pyx_k___source_infos[] = "_source_infos";
+static char __pyx_k__unadded_bytes[] = "unadded_bytes";
+static char __pyx_k__AssertionError[] = "AssertionError";
+static char __pyx_k___source_offset[] = "_source_offset";
+static char __pyx_k__max_delta_size[] = "max_delta_size";
+static char __pyx_k___expand_sources[] = "_expand_sources";
+static char __pyx_k___max_num_sources[] = "_max_num_sources";
+static char __pyx_k__add_delta_source[] = "add_delta_source";
+static char __pyx_k__decode_base128_int[] = "decode_base128_int";
+static char __pyx_k__encode_base128_int[] = "encode_base128_int";
+static char __pyx_k__max_bytes_to_index[] = "max_bytes_to_index";
+static char __pyx_k___max_bytes_to_index[] = "_max_bytes_to_index";
+static PyObject *__pyx_kp_s_1;
+static PyObject *__pyx_kp_s_10;
+static PyObject *__pyx_kp_s_11;
+static PyObject *__pyx_kp_s_12;
+static PyObject *__pyx_kp_s_13;
+static PyObject *__pyx_n_s_14;
+static PyObject *__pyx_kp_s_15;
+static PyObject *__pyx_kp_s_16;
+static PyObject *__pyx_kp_s_17;
+static PyObject *__pyx_kp_s_18;
+static PyObject *__pyx_kp_s_19;
+static PyObject *__pyx_kp_s_2;
+static PyObject *__pyx_kp_s_20;
+static PyObject *__pyx_kp_s_21;
+static PyObject *__pyx_kp_s_22;
+static PyObject *__pyx_kp_s_23;
+static PyObject *__pyx_kp_s_24;
+static PyObject *__pyx_kp_s_25;
+static PyObject *__pyx_kp_s_26;
+static PyObject *__pyx_kp_s_27;
+static PyObject *__pyx_kp_s_28;
+static PyObject *__pyx_kp_s_29;
+static PyObject *__pyx_kp_s_3;
+static PyObject *__pyx_kp_u_31;
+static PyObject *__pyx_kp_u_32;
+static PyObject *__pyx_kp_u_33;
+static PyObject *__pyx_kp_u_34;
+static PyObject *__pyx_kp_u_35;
+static PyObject *__pyx_kp_u_36;
+static PyObject *__pyx_kp_u_37;
+static PyObject *__pyx_n_s_38;
+static PyObject *__pyx_kp_u_39;
+static PyObject *__pyx_kp_s_4;
+static PyObject *__pyx_kp_u_40;
+static PyObject *__pyx_kp_s_5;
+static PyObject *__pyx_kp_s_6;
+static PyObject *__pyx_kp_s_7;
+static PyObject *__pyx_kp_s_8;
+static PyObject *__pyx_kp_s_9;
+static PyObject *__pyx_n_s__AssertionError;
+static PyObject *__pyx_n_s__DeltaIndex;
+static PyObject *__pyx_n_s__MemoryError;
+static PyObject *__pyx_n_s__RuntimeError;
+static PyObject *__pyx_n_s__TypeError;
+static PyObject *__pyx_n_s__ValueError;
+static PyObject *__pyx_n_s____class__;
+static PyObject *__pyx_n_s____main__;
+static PyObject *__pyx_n_s____name__;
+static PyObject *__pyx_n_s____test__;
+static PyObject *__pyx_n_s___dump_index;
+static PyObject *__pyx_n_s___expand_sources;
+static PyObject *__pyx_n_s___index;
+static PyObject *__pyx_n_s___max_bytes_to_index;
+static PyObject *__pyx_n_s___max_num_sources;
+static PyObject *__pyx_n_s___source_infos;
+static PyObject *__pyx_n_s___source_offset;
+static PyObject *__pyx_n_s___sources;
+static PyObject *__pyx_n_s__add_delta_source;
+static PyObject *__pyx_n_s__add_source;
+static PyObject *__pyx_n_s__agg_offset;
+static PyObject *__pyx_n_s__apply_delta;
+static PyObject *__pyx_n_s__buf;
+static PyObject *__pyx_n_s__decode_base128_int;
+static PyObject *__pyx_n_s__delta;
+static PyObject *__pyx_n_s__delta_bytes;
+static PyObject *__pyx_n_s__delta_end;
+static PyObject *__pyx_n_s__delta_start;
+static PyObject *__pyx_n_s__encode_base128_int;
+static PyObject *__pyx_n_s__make_delta;
+static PyObject *__pyx_n_s__map;
+static PyObject *__pyx_n_s__max_bytes_to_index;
+static PyObject *__pyx_n_s__max_delta_size;
+static PyObject *__pyx_n_s__size;
+static PyObject *__pyx_n_s__source;
+static PyObject *__pyx_n_s__source_bytes;
+static PyObject *__pyx_n_s__target_bytes;
+static PyObject *__pyx_n_s__unadded_bytes;
+static PyObject *__pyx_int_0;
+
+/* "/home/vila/src/bzr/integration/trunk/bzrlib/_groupcompress_pyx.pyx":80
+ *
+ *
+ * cdef void *safe_malloc(size_t count) except NULL: # <<<<<<<<<<<<<<
+ * cdef void *result
+ * result = malloc(count)
+ */
+
+static void *__pyx_f_6bzrlib_18_groupcompress_pyx_safe_malloc(size_t __pyx_v_count) {
+ void *__pyx_v_result;
+ void *__pyx_r;
+ int __pyx_t_1;
+ PyObject *__pyx_t_2 = NULL;
+ PyObject *__pyx_t_3 = NULL;
+ __Pyx_RefNannySetupContext("safe_malloc");
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_groupcompress_pyx.pyx":82
+ * cdef void *safe_malloc(size_t count) except NULL:
+ * cdef void *result
+ * result = malloc(count) # <<<<<<<<<<<<<<
+ * if result == NULL:
+ * raise MemoryError('Failed to allocate %d bytes of memory' % (count,))
+ */
+ __pyx_v_result = malloc(__pyx_v_count);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_groupcompress_pyx.pyx":83
+ * cdef void *result
+ * result = malloc(count)
+ * if result == NULL: # <<<<<<<<<<<<<<
+ * raise MemoryError('Failed to allocate %d bytes of memory' % (count,))
+ * return result
+ */
+ __pyx_t_1 = (__pyx_v_result == NULL);
+ if (__pyx_t_1) {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_groupcompress_pyx.pyx":84
+ * result = malloc(count)
+ * if result == NULL:
+ * raise MemoryError('Failed to allocate %d bytes of memory' % (count,)) # <<<<<<<<<<<<<<
+ * return result
+ *
+ */
+ __pyx_t_2 = __Pyx_PyInt_FromSize_t(__pyx_v_count); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 84; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_2);
+ __pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 84; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_3);
+ PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_2);
+ __Pyx_GIVEREF(__pyx_t_2);
+ __pyx_t_2 = 0;
+ __pyx_t_2 = PyNumber_Remainder(((PyObject *)__pyx_kp_s_1), __pyx_t_3); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 84; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(((PyObject *)__pyx_t_2));
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+ __pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 84; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_3);
+ PyTuple_SET_ITEM(__pyx_t_3, 0, ((PyObject *)__pyx_t_2));
+ __Pyx_GIVEREF(((PyObject *)__pyx_t_2));
+ __pyx_t_2 = 0;
+ __pyx_t_2 = PyObject_Call(__pyx_builtin_MemoryError, __pyx_t_3, NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 84; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_2);
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+ __Pyx_Raise(__pyx_t_2, 0, 0);
+ __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 84; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ goto __pyx_L3;
+ }
+ __pyx_L3:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_groupcompress_pyx.pyx":85
+ * if result == NULL:
+ * raise MemoryError('Failed to allocate %d bytes of memory' % (count,))
+ * return result # <<<<<<<<<<<<<<
+ *
+ *
+ */
+ __pyx_r = __pyx_v_result;
+ goto __pyx_L0;
+
+ __pyx_r = 0;
+ goto __pyx_L0;
+ __pyx_L1_error:;
+ __Pyx_XDECREF(__pyx_t_2);
+ __Pyx_XDECREF(__pyx_t_3);
+ __Pyx_AddTraceback("bzrlib._groupcompress_pyx.safe_malloc");
+ __pyx_r = NULL;
+ __pyx_L0:;
+ __Pyx_RefNannyFinishContext();
+ return __pyx_r;
+}
+
+/* "/home/vila/src/bzr/integration/trunk/bzrlib/_groupcompress_pyx.pyx":88
+ *
+ *
+ * cdef void *safe_realloc(void * old, size_t count) except NULL: # <<<<<<<<<<<<<<
+ * cdef void *result
+ * result = realloc(old, count)
+ */
+
+static void *__pyx_f_6bzrlib_18_groupcompress_pyx_safe_realloc(void *__pyx_v_old, size_t __pyx_v_count) {
+ void *__pyx_v_result;
+ void *__pyx_r;
+ int __pyx_t_1;
+ PyObject *__pyx_t_2 = NULL;
+ PyObject *__pyx_t_3 = NULL;
+ __Pyx_RefNannySetupContext("safe_realloc");
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_groupcompress_pyx.pyx":90
+ * cdef void *safe_realloc(void * old, size_t count) except NULL:
+ * cdef void *result
+ * result = realloc(old, count) # <<<<<<<<<<<<<<
+ * if result == NULL:
+ * raise MemoryError('Failed to reallocate to %d bytes of memory'
+ */
+ __pyx_v_result = realloc(__pyx_v_old, __pyx_v_count);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_groupcompress_pyx.pyx":91
+ * cdef void *result
+ * result = realloc(old, count)
+ * if result == NULL: # <<<<<<<<<<<<<<
+ * raise MemoryError('Failed to reallocate to %d bytes of memory'
+ * % (count,))
+ */
+ __pyx_t_1 = (__pyx_v_result == NULL);
+ if (__pyx_t_1) {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_groupcompress_pyx.pyx":93
+ * if result == NULL:
+ * raise MemoryError('Failed to reallocate to %d bytes of memory'
+ * % (count,)) # <<<<<<<<<<<<<<
+ * return result
+ *
+ */
+ __pyx_t_2 = __Pyx_PyInt_FromSize_t(__pyx_v_count); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 93; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_2);
+ __pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 93; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_3);
+ PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_2);
+ __Pyx_GIVEREF(__pyx_t_2);
+ __pyx_t_2 = 0;
+ __pyx_t_2 = PyNumber_Remainder(((PyObject *)__pyx_kp_s_2), __pyx_t_3); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 93; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(((PyObject *)__pyx_t_2));
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+ __pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 92; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_3);
+ PyTuple_SET_ITEM(__pyx_t_3, 0, ((PyObject *)__pyx_t_2));
+ __Pyx_GIVEREF(((PyObject *)__pyx_t_2));
+ __pyx_t_2 = 0;
+ __pyx_t_2 = PyObject_Call(__pyx_builtin_MemoryError, __pyx_t_3, NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 92; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_2);
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+ __Pyx_Raise(__pyx_t_2, 0, 0);
+ __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 92; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ goto __pyx_L3;
+ }
+ __pyx_L3:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_groupcompress_pyx.pyx":94
+ * raise MemoryError('Failed to reallocate to %d bytes of memory'
+ * % (count,))
+ * return result # <<<<<<<<<<<<<<
+ *
+ *
+ */
+ __pyx_r = __pyx_v_result;
+ goto __pyx_L0;
+
+ __pyx_r = 0;
+ goto __pyx_L0;
+ __pyx_L1_error:;
+ __Pyx_XDECREF(__pyx_t_2);
+ __Pyx_XDECREF(__pyx_t_3);
+ __Pyx_AddTraceback("bzrlib._groupcompress_pyx.safe_realloc");
+ __pyx_r = NULL;
+ __pyx_L0:;
+ __Pyx_RefNannyFinishContext();
+ return __pyx_r;
+}
+
+/* "/home/vila/src/bzr/integration/trunk/bzrlib/_groupcompress_pyx.pyx":97
+ *
+ *
+ * cdef int safe_free(void **val) except -1: # <<<<<<<<<<<<<<
+ * assert val != NULL
+ * if val[0] != NULL:
+ */
+
+static int __pyx_f_6bzrlib_18_groupcompress_pyx_safe_free(void **__pyx_v_val) {
+ int __pyx_r;
+ int __pyx_t_1;
+ __Pyx_RefNannySetupContext("safe_free");
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_groupcompress_pyx.pyx":98
+ *
+ * cdef int safe_free(void **val) except -1:
+ * assert val != NULL # <<<<<<<<<<<<<<
+ * if val[0] != NULL:
+ * free(val[0])
+ */
+ #ifndef PYREX_WITHOUT_ASSERTIONS
+ if (unlikely(!(__pyx_v_val != NULL))) {
+ PyErr_SetNone(PyExc_AssertionError);
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 98; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ }
+ #endif
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_groupcompress_pyx.pyx":99
+ * cdef int safe_free(void **val) except -1:
+ * assert val != NULL
+ * if val[0] != NULL: # <<<<<<<<<<<<<<
+ * free(val[0])
+ * val[0] = NULL
+ */
+ __pyx_t_1 = ((__pyx_v_val[0]) != NULL);
+ if (__pyx_t_1) {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_groupcompress_pyx.pyx":100
+ * assert val != NULL
+ * if val[0] != NULL:
+ * free(val[0]) # <<<<<<<<<<<<<<
+ * val[0] = NULL
+ *
+ */
+ free((__pyx_v_val[0]));
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_groupcompress_pyx.pyx":101
+ * if val[0] != NULL:
+ * free(val[0])
+ * val[0] = NULL # <<<<<<<<<<<<<<
+ *
+ * def make_delta_index(source):
+ */
+ (__pyx_v_val[0]) = NULL;
+ goto __pyx_L3;
+ }
+ __pyx_L3:;
+
+ __pyx_r = 0;
+ goto __pyx_L0;
+ __pyx_L1_error:;
+ __Pyx_AddTraceback("bzrlib._groupcompress_pyx.safe_free");
+ __pyx_r = -1;
+ __pyx_L0:;
+ __Pyx_RefNannyFinishContext();
+ return __pyx_r;
+}
+
+/* "/home/vila/src/bzr/integration/trunk/bzrlib/_groupcompress_pyx.pyx":103
+ * val[0] = NULL
+ *
+ * def make_delta_index(source): # <<<<<<<<<<<<<<
+ * return DeltaIndex(source)
+ *
+ */
+
+static PyObject *__pyx_pf_6bzrlib_18_groupcompress_pyx_make_delta_index(PyObject *__pyx_self, PyObject *__pyx_v_source); /*proto*/
+static PyObject *__pyx_pf_6bzrlib_18_groupcompress_pyx_make_delta_index(PyObject *__pyx_self, PyObject *__pyx_v_source) {
+ PyObject *__pyx_r = NULL;
+ PyObject *__pyx_t_1 = NULL;
+ PyObject *__pyx_t_2 = NULL;
+ __Pyx_RefNannySetupContext("make_delta_index");
+ __pyx_self = __pyx_self;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_groupcompress_pyx.pyx":104
+ *
+ * def make_delta_index(source):
+ * return DeltaIndex(source) # <<<<<<<<<<<<<<
+ *
+ *
+ */
+ __Pyx_XDECREF(__pyx_r);
+ __pyx_t_1 = PyTuple_New(1); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 104; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_1);
+ __Pyx_INCREF(__pyx_v_source);
+ PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_v_source);
+ __Pyx_GIVEREF(__pyx_v_source);
+ __pyx_t_2 = PyObject_Call(((PyObject *)((PyObject*)__pyx_ptype_6bzrlib_18_groupcompress_pyx_DeltaIndex)), __pyx_t_1, NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 104; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_2);
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+ __pyx_r = __pyx_t_2;
+ __pyx_t_2 = 0;
+ goto __pyx_L0;
+
+ __pyx_r = Py_None; __Pyx_INCREF(Py_None);
+ goto __pyx_L0;
+ __pyx_L1_error:;
+ __Pyx_XDECREF(__pyx_t_1);
+ __Pyx_XDECREF(__pyx_t_2);
+ __Pyx_AddTraceback("bzrlib._groupcompress_pyx.make_delta_index");
+ __pyx_r = NULL;
+ __pyx_L0:;
+ __Pyx_XGIVEREF(__pyx_r);
+ __Pyx_RefNannyFinishContext();
+ return __pyx_r;
+}
+
+/* "/home/vila/src/bzr/integration/trunk/bzrlib/_groupcompress_pyx.pyx":107
+ *
+ *
+ * cdef object _translate_delta_failure(delta_result result): # <<<<<<<<<<<<<<
+ * if result == DELTA_OUT_OF_MEMORY:
+ * return MemoryError("Delta function failed to allocate memory")
+ */
+
+static PyObject *__pyx_f_6bzrlib_18_groupcompress_pyx__translate_delta_failure(delta_result __pyx_v_result) {
+ PyObject *__pyx_r = NULL;
+ int __pyx_t_1;
+ PyObject *__pyx_t_2 = NULL;
+ PyObject *__pyx_t_3 = NULL;
+ __Pyx_RefNannySetupContext("_translate_delta_failure");
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_groupcompress_pyx.pyx":108
+ *
+ * cdef object _translate_delta_failure(delta_result result):
+ * if result == DELTA_OUT_OF_MEMORY: # <<<<<<<<<<<<<<
+ * return MemoryError("Delta function failed to allocate memory")
+ * elif result == DELTA_INDEX_NEEDED:
+ */
+ __pyx_t_1 = (__pyx_v_result == DELTA_OUT_OF_MEMORY);
+ if (__pyx_t_1) {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_groupcompress_pyx.pyx":109
+ * cdef object _translate_delta_failure(delta_result result):
+ * if result == DELTA_OUT_OF_MEMORY:
+ * return MemoryError("Delta function failed to allocate memory") # <<<<<<<<<<<<<<
+ * elif result == DELTA_INDEX_NEEDED:
+ * return ValueError("Delta function requires delta_index param")
+ */
+ __Pyx_XDECREF(__pyx_r);
+ __pyx_t_2 = PyTuple_New(1); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 109; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_2);
+ __Pyx_INCREF(((PyObject *)__pyx_kp_s_3));
+ PyTuple_SET_ITEM(__pyx_t_2, 0, ((PyObject *)__pyx_kp_s_3));
+ __Pyx_GIVEREF(((PyObject *)__pyx_kp_s_3));
+ __pyx_t_3 = PyObject_Call(__pyx_builtin_MemoryError, __pyx_t_2, NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 109; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_3);
+ __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+ __pyx_r = __pyx_t_3;
+ __pyx_t_3 = 0;
+ goto __pyx_L0;
+ goto __pyx_L3;
+ }
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_groupcompress_pyx.pyx":110
+ * if result == DELTA_OUT_OF_MEMORY:
+ * return MemoryError("Delta function failed to allocate memory")
+ * elif result == DELTA_INDEX_NEEDED: # <<<<<<<<<<<<<<
+ * return ValueError("Delta function requires delta_index param")
+ * elif result == DELTA_SOURCE_EMPTY:
+ */
+ __pyx_t_1 = (__pyx_v_result == DELTA_INDEX_NEEDED);
+ if (__pyx_t_1) {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_groupcompress_pyx.pyx":111
+ * return MemoryError("Delta function failed to allocate memory")
+ * elif result == DELTA_INDEX_NEEDED:
+ * return ValueError("Delta function requires delta_index param") # <<<<<<<<<<<<<<
+ * elif result == DELTA_SOURCE_EMPTY:
+ * return ValueError("Delta function given empty source_info param")
+ */
+ __Pyx_XDECREF(__pyx_r);
+ __pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 111; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_3);
+ __Pyx_INCREF(((PyObject *)__pyx_kp_s_4));
+ PyTuple_SET_ITEM(__pyx_t_3, 0, ((PyObject *)__pyx_kp_s_4));
+ __Pyx_GIVEREF(((PyObject *)__pyx_kp_s_4));
+ __pyx_t_2 = PyObject_Call(__pyx_builtin_ValueError, __pyx_t_3, NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 111; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_2);
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+ __pyx_r = __pyx_t_2;
+ __pyx_t_2 = 0;
+ goto __pyx_L0;
+ goto __pyx_L3;
+ }
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_groupcompress_pyx.pyx":112
+ * elif result == DELTA_INDEX_NEEDED:
+ * return ValueError("Delta function requires delta_index param")
+ * elif result == DELTA_SOURCE_EMPTY: # <<<<<<<<<<<<<<
+ * return ValueError("Delta function given empty source_info param")
+ * elif result == DELTA_SOURCE_BAD:
+ */
+ __pyx_t_1 = (__pyx_v_result == DELTA_SOURCE_EMPTY);
+ if (__pyx_t_1) {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_groupcompress_pyx.pyx":113
+ * return ValueError("Delta function requires delta_index param")
+ * elif result == DELTA_SOURCE_EMPTY:
+ * return ValueError("Delta function given empty source_info param") # <<<<<<<<<<<<<<
+ * elif result == DELTA_SOURCE_BAD:
+ * return RuntimeError("Delta function given invalid source_info param")
+ */
+ __Pyx_XDECREF(__pyx_r);
+ __pyx_t_2 = PyTuple_New(1); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 113; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_2);
+ __Pyx_INCREF(((PyObject *)__pyx_kp_s_5));
+ PyTuple_SET_ITEM(__pyx_t_2, 0, ((PyObject *)__pyx_kp_s_5));
+ __Pyx_GIVEREF(((PyObject *)__pyx_kp_s_5));
+ __pyx_t_3 = PyObject_Call(__pyx_builtin_ValueError, __pyx_t_2, NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 113; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_3);
+ __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+ __pyx_r = __pyx_t_3;
+ __pyx_t_3 = 0;
+ goto __pyx_L0;
+ goto __pyx_L3;
+ }
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_groupcompress_pyx.pyx":114
+ * elif result == DELTA_SOURCE_EMPTY:
+ * return ValueError("Delta function given empty source_info param")
+ * elif result == DELTA_SOURCE_BAD: # <<<<<<<<<<<<<<
+ * return RuntimeError("Delta function given invalid source_info param")
+ * elif result == DELTA_BUFFER_EMPTY:
+ */
+ __pyx_t_1 = (__pyx_v_result == DELTA_SOURCE_BAD);
+ if (__pyx_t_1) {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_groupcompress_pyx.pyx":115
+ * return ValueError("Delta function given empty source_info param")
+ * elif result == DELTA_SOURCE_BAD:
+ * return RuntimeError("Delta function given invalid source_info param") # <<<<<<<<<<<<<<
+ * elif result == DELTA_BUFFER_EMPTY:
+ * return ValueError("Delta function given empty buffer params")
+ */
+ __Pyx_XDECREF(__pyx_r);
+ __pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 115; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_3);
+ __Pyx_INCREF(((PyObject *)__pyx_kp_s_6));
+ PyTuple_SET_ITEM(__pyx_t_3, 0, ((PyObject *)__pyx_kp_s_6));
+ __Pyx_GIVEREF(((PyObject *)__pyx_kp_s_6));
+ __pyx_t_2 = PyObject_Call(__pyx_builtin_RuntimeError, __pyx_t_3, NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 115; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_2);
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+ __pyx_r = __pyx_t_2;
+ __pyx_t_2 = 0;
+ goto __pyx_L0;
+ goto __pyx_L3;
+ }
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_groupcompress_pyx.pyx":116
+ * elif result == DELTA_SOURCE_BAD:
+ * return RuntimeError("Delta function given invalid source_info param")
+ * elif result == DELTA_BUFFER_EMPTY: # <<<<<<<<<<<<<<
+ * return ValueError("Delta function given empty buffer params")
+ * return AssertionError("Unrecognised delta result code: %d" % result)
+ */
+ __pyx_t_1 = (__pyx_v_result == DELTA_BUFFER_EMPTY);
+ if (__pyx_t_1) {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_groupcompress_pyx.pyx":117
+ * return RuntimeError("Delta function given invalid source_info param")
+ * elif result == DELTA_BUFFER_EMPTY:
+ * return ValueError("Delta function given empty buffer params") # <<<<<<<<<<<<<<
+ * return AssertionError("Unrecognised delta result code: %d" % result)
+ *
+ */
+ __Pyx_XDECREF(__pyx_r);
+ __pyx_t_2 = PyTuple_New(1); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 117; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_2);
+ __Pyx_INCREF(((PyObject *)__pyx_kp_s_7));
+ PyTuple_SET_ITEM(__pyx_t_2, 0, ((PyObject *)__pyx_kp_s_7));
+ __Pyx_GIVEREF(((PyObject *)__pyx_kp_s_7));
+ __pyx_t_3 = PyObject_Call(__pyx_builtin_ValueError, __pyx_t_2, NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 117; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_3);
+ __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+ __pyx_r = __pyx_t_3;
+ __pyx_t_3 = 0;
+ goto __pyx_L0;
+ goto __pyx_L3;
+ }
+ __pyx_L3:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_groupcompress_pyx.pyx":118
+ * elif result == DELTA_BUFFER_EMPTY:
+ * return ValueError("Delta function given empty buffer params")
+ * return AssertionError("Unrecognised delta result code: %d" % result) # <<<<<<<<<<<<<<
+ *
+ *
+ */
+ __Pyx_XDECREF(__pyx_r);
+ __pyx_t_3 = PyInt_FromLong(__pyx_v_result); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 118; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_3);
+ __pyx_t_2 = PyNumber_Remainder(((PyObject *)__pyx_kp_s_8), __pyx_t_3); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 118; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(((PyObject *)__pyx_t_2));
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+ __pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 118; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_3);
+ PyTuple_SET_ITEM(__pyx_t_3, 0, ((PyObject *)__pyx_t_2));
+ __Pyx_GIVEREF(((PyObject *)__pyx_t_2));
+ __pyx_t_2 = 0;
+ __pyx_t_2 = PyObject_Call(__pyx_builtin_AssertionError, __pyx_t_3, NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 118; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_2);
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+ __pyx_r = __pyx_t_2;
+ __pyx_t_2 = 0;
+ goto __pyx_L0;
+
+ __pyx_r = Py_None; __Pyx_INCREF(Py_None);
+ goto __pyx_L0;
+ __pyx_L1_error:;
+ __Pyx_XDECREF(__pyx_t_2);
+ __Pyx_XDECREF(__pyx_t_3);
+ __Pyx_AddTraceback("bzrlib._groupcompress_pyx._translate_delta_failure");
+ __pyx_r = 0;
+ __pyx_L0:;
+ __Pyx_XGIVEREF(__pyx_r);
+ __Pyx_RefNannyFinishContext();
+ return __pyx_r;
+}
+
+/* "/home/vila/src/bzr/integration/trunk/bzrlib/_groupcompress_pyx.pyx":121
+ *
+ *
+ * def _rabin_hash(content): # <<<<<<<<<<<<<<
+ * if not PyString_CheckExact(content):
+ * raise ValueError('content must be a string')
+ */
+
+static PyObject *__pyx_pf_6bzrlib_18_groupcompress_pyx__rabin_hash(PyObject *__pyx_self, PyObject *__pyx_v_content); /*proto*/
+static PyObject *__pyx_pf_6bzrlib_18_groupcompress_pyx__rabin_hash(PyObject *__pyx_self, PyObject *__pyx_v_content) {
+ PyObject *__pyx_r = NULL;
+ int __pyx_t_1;
+ PyObject *__pyx_t_2 = NULL;
+ PyObject *__pyx_t_3 = NULL;
+ Py_ssize_t __pyx_t_4;
+ __Pyx_RefNannySetupContext("_rabin_hash");
+ __pyx_self = __pyx_self;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_groupcompress_pyx.pyx":122
+ *
+ * def _rabin_hash(content):
+ * if not PyString_CheckExact(content): # <<<<<<<<<<<<<<
+ * raise ValueError('content must be a string')
+ * if len(content) < 16:
+ */
+ __pyx_t_1 = (!PyString_CheckExact(__pyx_v_content));
+ if (__pyx_t_1) {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_groupcompress_pyx.pyx":123
+ * def _rabin_hash(content):
+ * if not PyString_CheckExact(content):
+ * raise ValueError('content must be a string') # <<<<<<<<<<<<<<
+ * if len(content) < 16:
+ * raise ValueError('content must be at least 16 bytes long')
+ */
+ __pyx_t_2 = PyTuple_New(1); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 123; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_2);
+ __Pyx_INCREF(((PyObject *)__pyx_kp_s_9));
+ PyTuple_SET_ITEM(__pyx_t_2, 0, ((PyObject *)__pyx_kp_s_9));
+ __Pyx_GIVEREF(((PyObject *)__pyx_kp_s_9));
+ __pyx_t_3 = PyObject_Call(__pyx_builtin_ValueError, __pyx_t_2, NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 123; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_3);
+ __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+ __Pyx_Raise(__pyx_t_3, 0, 0);
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 123; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ goto __pyx_L5;
+ }
+ __pyx_L5:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_groupcompress_pyx.pyx":124
+ * if not PyString_CheckExact(content):
+ * raise ValueError('content must be a string')
+ * if len(content) < 16: # <<<<<<<<<<<<<<
+ * raise ValueError('content must be at least 16 bytes long')
+ * # Try to cast it to an int, if it can fit
+ */
+ __pyx_t_4 = PyObject_Length(__pyx_v_content); if (unlikely(__pyx_t_4 == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 124; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_1 = (__pyx_t_4 < 16);
+ if (__pyx_t_1) {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_groupcompress_pyx.pyx":125
+ * raise ValueError('content must be a string')
+ * if len(content) < 16:
+ * raise ValueError('content must be at least 16 bytes long') # <<<<<<<<<<<<<<
+ * # Try to cast it to an int, if it can fit
+ * return int(rabin_hash(<unsigned char*>(PyString_AS_STRING(content))))
+ */
+ __pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 125; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_3);
+ __Pyx_INCREF(((PyObject *)__pyx_kp_s_10));
+ PyTuple_SET_ITEM(__pyx_t_3, 0, ((PyObject *)__pyx_kp_s_10));
+ __Pyx_GIVEREF(((PyObject *)__pyx_kp_s_10));
+ __pyx_t_2 = PyObject_Call(__pyx_builtin_ValueError, __pyx_t_3, NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 125; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_2);
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+ __Pyx_Raise(__pyx_t_2, 0, 0);
+ __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 125; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ goto __pyx_L6;
+ }
+ __pyx_L6:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_groupcompress_pyx.pyx":127
+ * raise ValueError('content must be at least 16 bytes long')
+ * # Try to cast it to an int, if it can fit
+ * return int(rabin_hash(<unsigned char*>(PyString_AS_STRING(content)))) # <<<<<<<<<<<<<<
+ *
+ *
+ */
+ __Pyx_XDECREF(__pyx_r);
+ __pyx_t_2 = PyLong_FromUnsignedLong(rabin_hash(((unsigned char *)PyString_AS_STRING(__pyx_v_content)))); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 127; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_2);
+ __pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 127; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_3);
+ PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_2);
+ __Pyx_GIVEREF(__pyx_t_2);
+ __pyx_t_2 = 0;
+ __pyx_t_2 = PyObject_Call(((PyObject *)((PyObject*)&PyInt_Type)), __pyx_t_3, NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 127; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_2);
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+ __pyx_r = __pyx_t_2;
+ __pyx_t_2 = 0;
+ goto __pyx_L0;
+
+ __pyx_r = Py_None; __Pyx_INCREF(Py_None);
+ goto __pyx_L0;
+ __pyx_L1_error:;
+ __Pyx_XDECREF(__pyx_t_2);
+ __Pyx_XDECREF(__pyx_t_3);
+ __Pyx_AddTraceback("bzrlib._groupcompress_pyx._rabin_hash");
+ __pyx_r = NULL;
+ __pyx_L0:;
+ __Pyx_XGIVEREF(__pyx_r);
+ __Pyx_RefNannyFinishContext();
+ return __pyx_r;
+}
+
+/* "/home/vila/src/bzr/integration/trunk/bzrlib/_groupcompress_pyx.pyx":135
+ * # isn't performance critical
+ * # cdef readonly list _sources
+ * cdef readonly object _sources # <<<<<<<<<<<<<<
+ * cdef source_info *_source_infos
+ * cdef delta_index *_index
+ */
+
+static PyObject *__pyx_pf_6bzrlib_18_groupcompress_pyx_10DeltaIndex_8_sources___get__(PyObject *__pyx_v_self); /*proto*/
+static PyObject *__pyx_pf_6bzrlib_18_groupcompress_pyx_10DeltaIndex_8_sources___get__(PyObject *__pyx_v_self) {
+ PyObject *__pyx_r = NULL;
+ __Pyx_RefNannySetupContext("__get__");
+ __Pyx_XDECREF(__pyx_r);
+ __Pyx_INCREF(((struct __pyx_obj_6bzrlib_18_groupcompress_pyx_DeltaIndex *)__pyx_v_self)->_sources);
+ __pyx_r = ((struct __pyx_obj_6bzrlib_18_groupcompress_pyx_DeltaIndex *)__pyx_v_self)->_sources;
+ goto __pyx_L0;
+
+ __pyx_r = Py_None; __Pyx_INCREF(Py_None);
+ __pyx_L0:;
+ __Pyx_XGIVEREF(__pyx_r);
+ __Pyx_RefNannyFinishContext();
+ return __pyx_r;
+}
+
+/* "/home/vila/src/bzr/integration/trunk/bzrlib/_groupcompress_pyx.pyx":138
+ * cdef source_info *_source_infos
+ * cdef delta_index *_index
+ * cdef public unsigned long _source_offset # <<<<<<<<<<<<<<
+ * cdef readonly unsigned int _max_num_sources
+ * cdef public int _max_bytes_to_index
+ */
+
+static PyObject *__pyx_pf_6bzrlib_18_groupcompress_pyx_10DeltaIndex_14_source_offset___get__(PyObject *__pyx_v_self); /*proto*/
+static PyObject *__pyx_pf_6bzrlib_18_groupcompress_pyx_10DeltaIndex_14_source_offset___get__(PyObject *__pyx_v_self) {
+ PyObject *__pyx_r = NULL;
+ PyObject *__pyx_t_1 = NULL;
+ __Pyx_RefNannySetupContext("__get__");
+ __Pyx_XDECREF(__pyx_r);
+ __pyx_t_1 = PyLong_FromUnsignedLong(((struct __pyx_obj_6bzrlib_18_groupcompress_pyx_DeltaIndex *)__pyx_v_self)->_source_offset); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 138; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_1);
+ __pyx_r = __pyx_t_1;
+ __pyx_t_1 = 0;
+ goto __pyx_L0;
+
+ __pyx_r = Py_None; __Pyx_INCREF(Py_None);
+ goto __pyx_L0;
+ __pyx_L1_error:;
+ __Pyx_XDECREF(__pyx_t_1);
+ __Pyx_AddTraceback("bzrlib._groupcompress_pyx.DeltaIndex._source_offset.__get__");
+ __pyx_r = NULL;
+ __pyx_L0:;
+ __Pyx_XGIVEREF(__pyx_r);
+ __Pyx_RefNannyFinishContext();
+ return __pyx_r;
+}
+
+static int __pyx_pf_6bzrlib_18_groupcompress_pyx_10DeltaIndex_14_source_offset___set__(PyObject *__pyx_v_self, PyObject *__pyx_v_value); /*proto*/
+static int __pyx_pf_6bzrlib_18_groupcompress_pyx_10DeltaIndex_14_source_offset___set__(PyObject *__pyx_v_self, PyObject *__pyx_v_value) {
+ int __pyx_r;
+ unsigned long __pyx_t_1;
+ __Pyx_RefNannySetupContext("__set__");
+ __pyx_t_1 = __Pyx_PyInt_AsUnsignedLong(__pyx_v_value); if (unlikely((__pyx_t_1 == (unsigned long)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 138; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ ((struct __pyx_obj_6bzrlib_18_groupcompress_pyx_DeltaIndex *)__pyx_v_self)->_source_offset = __pyx_t_1;
+
+ __pyx_r = 0;
+ goto __pyx_L0;
+ __pyx_L1_error:;
+ __Pyx_AddTraceback("bzrlib._groupcompress_pyx.DeltaIndex._source_offset.__set__");
+ __pyx_r = -1;
+ __pyx_L0:;
+ __Pyx_RefNannyFinishContext();
+ return __pyx_r;
+}
+
+/* "/home/vila/src/bzr/integration/trunk/bzrlib/_groupcompress_pyx.pyx":139
+ * cdef delta_index *_index
+ * cdef public unsigned long _source_offset
+ * cdef readonly unsigned int _max_num_sources # <<<<<<<<<<<<<<
+ * cdef public int _max_bytes_to_index
+ *
+ */
+
+static PyObject *__pyx_pf_6bzrlib_18_groupcompress_pyx_10DeltaIndex_16_max_num_sources___get__(PyObject *__pyx_v_self); /*proto*/
+static PyObject *__pyx_pf_6bzrlib_18_groupcompress_pyx_10DeltaIndex_16_max_num_sources___get__(PyObject *__pyx_v_self) {
+ PyObject *__pyx_r = NULL;
+ PyObject *__pyx_t_1 = NULL;
+ __Pyx_RefNannySetupContext("__get__");
+ __Pyx_XDECREF(__pyx_r);
+ __pyx_t_1 = PyLong_FromUnsignedLong(((struct __pyx_obj_6bzrlib_18_groupcompress_pyx_DeltaIndex *)__pyx_v_self)->_max_num_sources); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 139; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_1);
+ __pyx_r = __pyx_t_1;
+ __pyx_t_1 = 0;
+ goto __pyx_L0;
+
+ __pyx_r = Py_None; __Pyx_INCREF(Py_None);
+ goto __pyx_L0;
+ __pyx_L1_error:;
+ __Pyx_XDECREF(__pyx_t_1);
+ __Pyx_AddTraceback("bzrlib._groupcompress_pyx.DeltaIndex._max_num_sources.__get__");
+ __pyx_r = NULL;
+ __pyx_L0:;
+ __Pyx_XGIVEREF(__pyx_r);
+ __Pyx_RefNannyFinishContext();
+ return __pyx_r;
+}
+
+/* "/home/vila/src/bzr/integration/trunk/bzrlib/_groupcompress_pyx.pyx":140
+ * cdef public unsigned long _source_offset
+ * cdef readonly unsigned int _max_num_sources
+ * cdef public int _max_bytes_to_index # <<<<<<<<<<<<<<
+ *
+ * def __init__(self, source=None, max_bytes_to_index=None):
+ */
+
+static PyObject *__pyx_pf_6bzrlib_18_groupcompress_pyx_10DeltaIndex_19_max_bytes_to_index___get__(PyObject *__pyx_v_self); /*proto*/
+static PyObject *__pyx_pf_6bzrlib_18_groupcompress_pyx_10DeltaIndex_19_max_bytes_to_index___get__(PyObject *__pyx_v_self) {
+ PyObject *__pyx_r = NULL;
+ PyObject *__pyx_t_1 = NULL;
+ __Pyx_RefNannySetupContext("__get__");
+ __Pyx_XDECREF(__pyx_r);
+ __pyx_t_1 = PyInt_FromLong(((struct __pyx_obj_6bzrlib_18_groupcompress_pyx_DeltaIndex *)__pyx_v_self)->_max_bytes_to_index); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 140; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_1);
+ __pyx_r = __pyx_t_1;
+ __pyx_t_1 = 0;
+ goto __pyx_L0;
+
+ __pyx_r = Py_None; __Pyx_INCREF(Py_None);
+ goto __pyx_L0;
+ __pyx_L1_error:;
+ __Pyx_XDECREF(__pyx_t_1);
+ __Pyx_AddTraceback("bzrlib._groupcompress_pyx.DeltaIndex._max_bytes_to_index.__get__");
+ __pyx_r = NULL;
+ __pyx_L0:;
+ __Pyx_XGIVEREF(__pyx_r);
+ __Pyx_RefNannyFinishContext();
+ return __pyx_r;
+}
+
+static int __pyx_pf_6bzrlib_18_groupcompress_pyx_10DeltaIndex_19_max_bytes_to_index___set__(PyObject *__pyx_v_self, PyObject *__pyx_v_value); /*proto*/
+static int __pyx_pf_6bzrlib_18_groupcompress_pyx_10DeltaIndex_19_max_bytes_to_index___set__(PyObject *__pyx_v_self, PyObject *__pyx_v_value) {
+ int __pyx_r;
+ int __pyx_t_1;
+ __Pyx_RefNannySetupContext("__set__");
+ __pyx_t_1 = __Pyx_PyInt_AsInt(__pyx_v_value); if (unlikely((__pyx_t_1 == (int)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 140; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ ((struct __pyx_obj_6bzrlib_18_groupcompress_pyx_DeltaIndex *)__pyx_v_self)->_max_bytes_to_index = __pyx_t_1;
+
+ __pyx_r = 0;
+ goto __pyx_L0;
+ __pyx_L1_error:;
+ __Pyx_AddTraceback("bzrlib._groupcompress_pyx.DeltaIndex._max_bytes_to_index.__set__");
+ __pyx_r = -1;
+ __pyx_L0:;
+ __Pyx_RefNannyFinishContext();
+ return __pyx_r;
+}
+
+/* "/home/vila/src/bzr/integration/trunk/bzrlib/_groupcompress_pyx.pyx":142
+ * cdef public int _max_bytes_to_index
+ *
+ * def __init__(self, source=None, max_bytes_to_index=None): # <<<<<<<<<<<<<<
+ * self._sources = []
+ * self._index = NULL
+ */
+
+static int __pyx_pf_6bzrlib_18_groupcompress_pyx_10DeltaIndex___init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
+static int __pyx_pf_6bzrlib_18_groupcompress_pyx_10DeltaIndex___init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
+ PyObject *__pyx_v_source = 0;
+ PyObject *__pyx_v_max_bytes_to_index = 0;
+ int __pyx_r;
+ PyObject *__pyx_t_1 = NULL;
+ void *__pyx_t_2;
+ int __pyx_t_3;
+ int __pyx_t_4;
+ PyObject *__pyx_t_5 = NULL;
+ PyObject *__pyx_t_6 = NULL;
+ static PyObject **__pyx_pyargnames[] = {&__pyx_n_s__source,&__pyx_n_s__max_bytes_to_index,0};
+ __Pyx_RefNannySetupContext("__init__");
+ if (unlikely(__pyx_kwds)) {
+ Py_ssize_t kw_args = PyDict_Size(__pyx_kwds);
+ PyObject* values[2] = {0,0};
+ values[0] = ((PyObject *)Py_None);
+ values[1] = ((PyObject *)Py_None);
+ switch (PyTuple_GET_SIZE(__pyx_args)) {
+ case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
+ case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
+ case 0: break;
+ default: goto __pyx_L5_argtuple_error;
+ }
+ switch (PyTuple_GET_SIZE(__pyx_args)) {
+ case 0:
+ if (kw_args > 0) {
+ PyObject* value = PyDict_GetItem(__pyx_kwds, __pyx_n_s__source);
+ if (value) { values[0] = value; kw_args--; }
+ }
+ case 1:
+ if (kw_args > 0) {
+ PyObject* value = PyDict_GetItem(__pyx_kwds, __pyx_n_s__max_bytes_to_index);
+ if (value) { values[1] = value; kw_args--; }
+ }
+ }
+ if (unlikely(kw_args > 0)) {
+ if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, PyTuple_GET_SIZE(__pyx_args), "__init__") < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 142; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
+ }
+ __pyx_v_source = values[0];
+ __pyx_v_max_bytes_to_index = values[1];
+ } else {
+ __pyx_v_source = ((PyObject *)Py_None);
+ __pyx_v_max_bytes_to_index = ((PyObject *)Py_None);
+ switch (PyTuple_GET_SIZE(__pyx_args)) {
+ case 2: __pyx_v_max_bytes_to_index = PyTuple_GET_ITEM(__pyx_args, 1);
+ case 1: __pyx_v_source = PyTuple_GET_ITEM(__pyx_args, 0);
+ case 0: break;
+ default: goto __pyx_L5_argtuple_error;
+ }
+ }
+ goto __pyx_L4_argument_unpacking_done;
+ __pyx_L5_argtuple_error:;
+ __Pyx_RaiseArgtupleInvalid("__init__", 0, 0, 2, PyTuple_GET_SIZE(__pyx_args)); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 142; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
+ __pyx_L3_error:;
+ __Pyx_AddTraceback("bzrlib._groupcompress_pyx.DeltaIndex.__init__");
+ __Pyx_RefNannyFinishContext();
+ return -1;
+ __pyx_L4_argument_unpacking_done:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_groupcompress_pyx.pyx":143
+ *
+ * def __init__(self, source=None, max_bytes_to_index=None):
+ * self._sources = [] # <<<<<<<<<<<<<<
+ * self._index = NULL
+ * self._max_num_sources = 65000
+ */
+ __pyx_t_1 = PyList_New(0); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 143; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(((PyObject *)__pyx_t_1));
+ __Pyx_GIVEREF(((PyObject *)__pyx_t_1));
+ __Pyx_GOTREF(((struct __pyx_obj_6bzrlib_18_groupcompress_pyx_DeltaIndex *)__pyx_v_self)->_sources);
+ __Pyx_DECREF(((struct __pyx_obj_6bzrlib_18_groupcompress_pyx_DeltaIndex *)__pyx_v_self)->_sources);
+ ((struct __pyx_obj_6bzrlib_18_groupcompress_pyx_DeltaIndex *)__pyx_v_self)->_sources = ((PyObject *)__pyx_t_1);
+ __pyx_t_1 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_groupcompress_pyx.pyx":144
+ * def __init__(self, source=None, max_bytes_to_index=None):
+ * self._sources = []
+ * self._index = NULL # <<<<<<<<<<<<<<
+ * self._max_num_sources = 65000
+ * self._source_infos = <source_info *>safe_malloc(sizeof(source_info)
+ */
+ ((struct __pyx_obj_6bzrlib_18_groupcompress_pyx_DeltaIndex *)__pyx_v_self)->_index = NULL;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_groupcompress_pyx.pyx":145
+ * self._sources = []
+ * self._index = NULL
+ * self._max_num_sources = 65000 # <<<<<<<<<<<<<<
+ * self._source_infos = <source_info *>safe_malloc(sizeof(source_info)
+ * * self._max_num_sources)
+ */
+ ((struct __pyx_obj_6bzrlib_18_groupcompress_pyx_DeltaIndex *)__pyx_v_self)->_max_num_sources = 65000;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_groupcompress_pyx.pyx":147
+ * self._max_num_sources = 65000
+ * self._source_infos = <source_info *>safe_malloc(sizeof(source_info)
+ * * self._max_num_sources) # <<<<<<<<<<<<<<
+ * self._source_offset = 0
+ * self._max_bytes_to_index = 0
+ */
+ __pyx_t_2 = __pyx_f_6bzrlib_18_groupcompress_pyx_safe_malloc(((sizeof(struct source_info)) * ((struct __pyx_obj_6bzrlib_18_groupcompress_pyx_DeltaIndex *)__pyx_v_self)->_max_num_sources)); if (unlikely(__pyx_t_2 == NULL)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 146; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_groupcompress_pyx.pyx":146
+ * self._index = NULL
+ * self._max_num_sources = 65000
+ * self._source_infos = <source_info *>safe_malloc(sizeof(source_info) # <<<<<<<<<<<<<<
+ * * self._max_num_sources)
+ * self._source_offset = 0
+ */
+ ((struct __pyx_obj_6bzrlib_18_groupcompress_pyx_DeltaIndex *)__pyx_v_self)->_source_infos = ((struct source_info *)__pyx_t_2);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_groupcompress_pyx.pyx":148
+ * self._source_infos = <source_info *>safe_malloc(sizeof(source_info)
+ * * self._max_num_sources)
+ * self._source_offset = 0 # <<<<<<<<<<<<<<
+ * self._max_bytes_to_index = 0
+ * if max_bytes_to_index is not None:
+ */
+ ((struct __pyx_obj_6bzrlib_18_groupcompress_pyx_DeltaIndex *)__pyx_v_self)->_source_offset = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_groupcompress_pyx.pyx":149
+ * * self._max_num_sources)
+ * self._source_offset = 0
+ * self._max_bytes_to_index = 0 # <<<<<<<<<<<<<<
+ * if max_bytes_to_index is not None:
+ * self._max_bytes_to_index = max_bytes_to_index
+ */
+ ((struct __pyx_obj_6bzrlib_18_groupcompress_pyx_DeltaIndex *)__pyx_v_self)->_max_bytes_to_index = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_groupcompress_pyx.pyx":150
+ * self._source_offset = 0
+ * self._max_bytes_to_index = 0
+ * if max_bytes_to_index is not None: # <<<<<<<<<<<<<<
+ * self._max_bytes_to_index = max_bytes_to_index
+ *
+ */
+ __pyx_t_3 = (__pyx_v_max_bytes_to_index != Py_None);
+ if (__pyx_t_3) {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_groupcompress_pyx.pyx":151
+ * self._max_bytes_to_index = 0
+ * if max_bytes_to_index is not None:
+ * self._max_bytes_to_index = max_bytes_to_index # <<<<<<<<<<<<<<
+ *
+ * if source is not None:
+ */
+ __pyx_t_4 = __Pyx_PyInt_AsInt(__pyx_v_max_bytes_to_index); if (unlikely((__pyx_t_4 == (int)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 151; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ ((struct __pyx_obj_6bzrlib_18_groupcompress_pyx_DeltaIndex *)__pyx_v_self)->_max_bytes_to_index = __pyx_t_4;
+ goto __pyx_L6;
+ }
+ __pyx_L6:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_groupcompress_pyx.pyx":153
+ * self._max_bytes_to_index = max_bytes_to_index
+ *
+ * if source is not None: # <<<<<<<<<<<<<<
+ * self.add_source(source, 0)
+ *
+ */
+ __pyx_t_3 = (__pyx_v_source != Py_None);
+ if (__pyx_t_3) {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_groupcompress_pyx.pyx":154
+ *
+ * if source is not None:
+ * self.add_source(source, 0) # <<<<<<<<<<<<<<
+ *
+ * def __sizeof__(self):
+ */
+ __pyx_t_1 = PyObject_GetAttr(__pyx_v_self, __pyx_n_s__add_source); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 154; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_1);
+ __pyx_t_5 = PyTuple_New(2); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 154; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_5);
+ __Pyx_INCREF(__pyx_v_source);
+ PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_v_source);
+ __Pyx_GIVEREF(__pyx_v_source);
+ __Pyx_INCREF(__pyx_int_0);
+ PyTuple_SET_ITEM(__pyx_t_5, 1, __pyx_int_0);
+ __Pyx_GIVEREF(__pyx_int_0);
+ __pyx_t_6 = PyObject_Call(__pyx_t_1, __pyx_t_5, NULL); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 154; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_6);
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+ __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
+ __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
+ goto __pyx_L7;
+ }
+ __pyx_L7:;
+
+ __pyx_r = 0;
+ goto __pyx_L0;
+ __pyx_L1_error:;
+ __Pyx_XDECREF(__pyx_t_1);
+ __Pyx_XDECREF(__pyx_t_5);
+ __Pyx_XDECREF(__pyx_t_6);
+ __Pyx_AddTraceback("bzrlib._groupcompress_pyx.DeltaIndex.__init__");
+ __pyx_r = -1;
+ __pyx_L0:;
+ __Pyx_RefNannyFinishContext();
+ return __pyx_r;
+}
+
+/* "/home/vila/src/bzr/integration/trunk/bzrlib/_groupcompress_pyx.pyx":156
+ * self.add_source(source, 0)
+ *
+ * def __sizeof__(self): # <<<<<<<<<<<<<<
+ * # We want to track the _source_infos allocations, but the referenced
+ * # void* are actually tracked in _sources itself.
+ */
+
+static PyObject *__pyx_pf_6bzrlib_18_groupcompress_pyx_10DeltaIndex___sizeof__(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/
+static PyObject *__pyx_pf_6bzrlib_18_groupcompress_pyx_10DeltaIndex___sizeof__(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) {
+ Py_ssize_t __pyx_v_size;
+ PyObject *__pyx_r = NULL;
+ PyObject *__pyx_t_1 = NULL;
+ __Pyx_RefNannySetupContext("__sizeof__");
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_groupcompress_pyx.pyx":169
+ * + sizeof(unsigned int))
+ * + (sizeof(source_info) * self._max_num_sources)
+ * + sizeof_delta_index(self._index)) # <<<<<<<<<<<<<<
+ * return size
+ *
+ */
+ __pyx_v_size = (((((((sizeof(PyObject)) + (sizeof(void *))) + (3 * (sizeof(PyObject *)))) + (sizeof(unsigned long))) + (sizeof(unsigned int))) + ((sizeof(struct source_info)) * ((struct __pyx_obj_6bzrlib_18_groupcompress_pyx_DeltaIndex *)__pyx_v_self)->_max_num_sources)) + sizeof_delta_index(((struct __pyx_obj_6bzrlib_18_groupcompress_pyx_DeltaIndex *)__pyx_v_self)->_index));
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_groupcompress_pyx.pyx":170
+ * + (sizeof(source_info) * self._max_num_sources)
+ * + sizeof_delta_index(self._index))
+ * return size # <<<<<<<<<<<<<<
+ *
+ * def __repr__(self):
+ */
+ __Pyx_XDECREF(__pyx_r);
+ __pyx_t_1 = PyInt_FromSsize_t(__pyx_v_size); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 170; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_1);
+ __pyx_r = __pyx_t_1;
+ __pyx_t_1 = 0;
+ goto __pyx_L0;
+
+ __pyx_r = Py_None; __Pyx_INCREF(Py_None);
+ goto __pyx_L0;
+ __pyx_L1_error:;
+ __Pyx_XDECREF(__pyx_t_1);
+ __Pyx_AddTraceback("bzrlib._groupcompress_pyx.DeltaIndex.__sizeof__");
+ __pyx_r = NULL;
+ __pyx_L0:;
+ __Pyx_XGIVEREF(__pyx_r);
+ __Pyx_RefNannyFinishContext();
+ return __pyx_r;
+}
+
+/* "/home/vila/src/bzr/integration/trunk/bzrlib/_groupcompress_pyx.pyx":172
+ * return size
+ *
+ * def __repr__(self): # <<<<<<<<<<<<<<
+ * return '%s(%d, %d)' % (self.__class__.__name__,
+ * len(self._sources), self._source_offset)
+ */
+
+static PyObject *__pyx_pf_6bzrlib_18_groupcompress_pyx_10DeltaIndex___repr__(PyObject *__pyx_v_self); /*proto*/
+static PyObject *__pyx_pf_6bzrlib_18_groupcompress_pyx_10DeltaIndex___repr__(PyObject *__pyx_v_self) {
+ PyObject *__pyx_r = NULL;
+ PyObject *__pyx_t_1 = NULL;
+ PyObject *__pyx_t_2 = NULL;
+ Py_ssize_t __pyx_t_3;
+ PyObject *__pyx_t_4 = NULL;
+ PyObject *__pyx_t_5 = NULL;
+ __Pyx_RefNannySetupContext("__repr__");
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_groupcompress_pyx.pyx":173
+ *
+ * def __repr__(self):
+ * return '%s(%d, %d)' % (self.__class__.__name__, # <<<<<<<<<<<<<<
+ * len(self._sources), self._source_offset)
+ *
+ */
+ __Pyx_XDECREF(__pyx_r);
+ __pyx_t_1 = PyObject_GetAttr(__pyx_v_self, __pyx_n_s____class__); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 173; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_1);
+ __pyx_t_2 = PyObject_GetAttr(__pyx_t_1, __pyx_n_s____name__); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 173; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_2);
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_groupcompress_pyx.pyx":174
+ * def __repr__(self):
+ * return '%s(%d, %d)' % (self.__class__.__name__,
+ * len(self._sources), self._source_offset) # <<<<<<<<<<<<<<
+ *
+ * def __dealloc__(self):
+ */
+ __pyx_t_3 = PyObject_Length(((struct __pyx_obj_6bzrlib_18_groupcompress_pyx_DeltaIndex *)__pyx_v_self)->_sources); if (unlikely(__pyx_t_3 == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 174; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_1 = PyInt_FromSsize_t(__pyx_t_3); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 174; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_1);
+ __pyx_t_4 = PyLong_FromUnsignedLong(((struct __pyx_obj_6bzrlib_18_groupcompress_pyx_DeltaIndex *)__pyx_v_self)->_source_offset); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 174; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_4);
+ __pyx_t_5 = PyTuple_New(3); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 173; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_5);
+ PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_t_2);
+ __Pyx_GIVEREF(__pyx_t_2);
+ PyTuple_SET_ITEM(__pyx_t_5, 1, __pyx_t_1);
+ __Pyx_GIVEREF(__pyx_t_1);
+ PyTuple_SET_ITEM(__pyx_t_5, 2, __pyx_t_4);
+ __Pyx_GIVEREF(__pyx_t_4);
+ __pyx_t_2 = 0;
+ __pyx_t_1 = 0;
+ __pyx_t_4 = 0;
+ __pyx_t_4 = PyNumber_Remainder(((PyObject *)__pyx_kp_s_11), __pyx_t_5); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 173; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(((PyObject *)__pyx_t_4));
+ __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
+ __pyx_r = ((PyObject *)__pyx_t_4);
+ __pyx_t_4 = 0;
+ goto __pyx_L0;
+
+ __pyx_r = Py_None; __Pyx_INCREF(Py_None);
+ goto __pyx_L0;
+ __pyx_L1_error:;
+ __Pyx_XDECREF(__pyx_t_1);
+ __Pyx_XDECREF(__pyx_t_2);
+ __Pyx_XDECREF(__pyx_t_4);
+ __Pyx_XDECREF(__pyx_t_5);
+ __Pyx_AddTraceback("bzrlib._groupcompress_pyx.DeltaIndex.__repr__");
+ __pyx_r = NULL;
+ __pyx_L0:;
+ __Pyx_XGIVEREF(__pyx_r);
+ __Pyx_RefNannyFinishContext();
+ return __pyx_r;
+}
+
+/* "/home/vila/src/bzr/integration/trunk/bzrlib/_groupcompress_pyx.pyx":176
+ * len(self._sources), self._source_offset)
+ *
+ * def __dealloc__(self): # <<<<<<<<<<<<<<
+ * if self._index != NULL:
+ * free_delta_index(self._index)
+ */
+
+static void __pyx_pf_6bzrlib_18_groupcompress_pyx_10DeltaIndex___dealloc__(PyObject *__pyx_v_self); /*proto*/
+static void __pyx_pf_6bzrlib_18_groupcompress_pyx_10DeltaIndex___dealloc__(PyObject *__pyx_v_self) {
+ int __pyx_t_1;
+ int __pyx_t_2;
+ __Pyx_RefNannySetupContext("__dealloc__");
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_groupcompress_pyx.pyx":177
+ *
+ * def __dealloc__(self):
+ * if self._index != NULL: # <<<<<<<<<<<<<<
+ * free_delta_index(self._index)
+ * self._index = NULL
+ */
+ __pyx_t_1 = (((struct __pyx_obj_6bzrlib_18_groupcompress_pyx_DeltaIndex *)__pyx_v_self)->_index != NULL);
+ if (__pyx_t_1) {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_groupcompress_pyx.pyx":178
+ * def __dealloc__(self):
+ * if self._index != NULL:
+ * free_delta_index(self._index) # <<<<<<<<<<<<<<
+ * self._index = NULL
+ * safe_free(<void **>&self._source_infos)
+ */
+ free_delta_index(((struct __pyx_obj_6bzrlib_18_groupcompress_pyx_DeltaIndex *)__pyx_v_self)->_index);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_groupcompress_pyx.pyx":179
+ * if self._index != NULL:
+ * free_delta_index(self._index)
+ * self._index = NULL # <<<<<<<<<<<<<<
+ * safe_free(<void **>&self._source_infos)
+ *
+ */
+ ((struct __pyx_obj_6bzrlib_18_groupcompress_pyx_DeltaIndex *)__pyx_v_self)->_index = NULL;
+ goto __pyx_L5;
+ }
+ __pyx_L5:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_groupcompress_pyx.pyx":180
+ * free_delta_index(self._index)
+ * self._index = NULL
+ * safe_free(<void **>&self._source_infos) # <<<<<<<<<<<<<<
+ *
+ * def _has_index(self):
+ */
+ __pyx_t_2 = __pyx_f_6bzrlib_18_groupcompress_pyx_safe_free(((void **)(&((struct __pyx_obj_6bzrlib_18_groupcompress_pyx_DeltaIndex *)__pyx_v_self)->_source_infos))); if (unlikely(__pyx_t_2 == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 180; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+
+ goto __pyx_L0;
+ __pyx_L1_error:;
+ __Pyx_AddTraceback("bzrlib._groupcompress_pyx.DeltaIndex.__dealloc__");
+ __pyx_L0:;
+ __Pyx_RefNannyFinishContext();
+}
+
+/* "/home/vila/src/bzr/integration/trunk/bzrlib/_groupcompress_pyx.pyx":182
+ * safe_free(<void **>&self._source_infos)
+ *
+ * def _has_index(self): # <<<<<<<<<<<<<<
+ * return (self._index != NULL)
+ *
+ */
+
+static PyObject *__pyx_pf_6bzrlib_18_groupcompress_pyx_10DeltaIndex__has_index(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/
+static PyObject *__pyx_pf_6bzrlib_18_groupcompress_pyx_10DeltaIndex__has_index(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) {
+ PyObject *__pyx_r = NULL;
+ PyObject *__pyx_t_1 = NULL;
+ __Pyx_RefNannySetupContext("_has_index");
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_groupcompress_pyx.pyx":183
+ *
+ * def _has_index(self):
+ * return (self._index != NULL) # <<<<<<<<<<<<<<
+ *
+ * def _dump_index(self):
+ */
+ __Pyx_XDECREF(__pyx_r);
+ __pyx_t_1 = __Pyx_PyBool_FromLong((((struct __pyx_obj_6bzrlib_18_groupcompress_pyx_DeltaIndex *)__pyx_v_self)->_index != NULL)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 183; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_1);
+ __pyx_r = __pyx_t_1;
+ __pyx_t_1 = 0;
+ goto __pyx_L0;
+
+ __pyx_r = Py_None; __Pyx_INCREF(Py_None);
+ goto __pyx_L0;
+ __pyx_L1_error:;
+ __Pyx_XDECREF(__pyx_t_1);
+ __Pyx_AddTraceback("bzrlib._groupcompress_pyx.DeltaIndex._has_index");
+ __pyx_r = NULL;
+ __pyx_L0:;
+ __Pyx_XGIVEREF(__pyx_r);
+ __Pyx_RefNannyFinishContext();
+ return __pyx_r;
+}
+
+/* "/home/vila/src/bzr/integration/trunk/bzrlib/_groupcompress_pyx.pyx":185
+ * return (self._index != NULL)
+ *
+ * def _dump_index(self): # <<<<<<<<<<<<<<
+ * """Dump the pointers in the index.
+ *
+ */
+
+static PyObject *__pyx_pf_6bzrlib_18_groupcompress_pyx_10DeltaIndex__dump_index(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/
+static char __pyx_doc_6bzrlib_18_groupcompress_pyx_10DeltaIndex__dump_index[] = "Dump the pointers in the index.\n\n This is an arbitrary layout, used for testing. It is not meant to be\n used in production code.\n\n :return: (hash_list, entry_list)\n hash_list A list of offsets, so hash[i] points to the 'hash\n bucket' starting at the given offset and going until\n hash[i+1]\n entry_list A list of (text_offset, hash_val). text_offset is the\n offset in the \"source\" texts, and hash_val is the RABIN\n hash for that offset.\n Note that the entry should be in the hash bucket\n defined by\n hash[(hash_val & mask)] && hash[(hash_val & mask) + 1]\n ";
+static PyObject *__pyx_pf_6bzrlib_18_groupcompress_pyx_10DeltaIndex__dump_index(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) {
+ int __pyx_v_pos;
+ unsigned int __pyx_v_text_offset;
+ unsigned int __pyx_v_hash_val;
+ unsigned int __pyx_v_hash_offset;
+ PyObject *__pyx_v_hash_list;
+ PyObject *__pyx_v_entry_list;
+ PyObject *__pyx_v_val;
+ PyObject *__pyx_r = NULL;
+ int __pyx_t_1;
+ PyObject *__pyx_t_2 = NULL;
+ int __pyx_t_3;
+ PyObject *__pyx_t_4 = NULL;
+ PyObject *__pyx_t_5 = NULL;
+ __Pyx_RefNannySetupContext("_dump_index");
+ __pyx_v_hash_list = ((PyObject *)Py_None); __Pyx_INCREF(Py_None);
+ __pyx_v_entry_list = ((PyObject *)Py_None); __Pyx_INCREF(Py_None);
+ __pyx_v_val = ((PyObject *)Py_None); __Pyx_INCREF(Py_None);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_groupcompress_pyx.pyx":206
+ * cdef unsigned int hash_val
+ * cdef unsigned int hash_offset
+ * if self._index == NULL: # <<<<<<<<<<<<<<
+ * return None
+ * hash_list = []
+ */
+ __pyx_t_1 = (((struct __pyx_obj_6bzrlib_18_groupcompress_pyx_DeltaIndex *)__pyx_v_self)->_index == NULL);
+ if (__pyx_t_1) {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_groupcompress_pyx.pyx":207
+ * cdef unsigned int hash_offset
+ * if self._index == NULL:
+ * return None # <<<<<<<<<<<<<<
+ * hash_list = []
+ * pos = 0
+ */
+ __Pyx_XDECREF(__pyx_r);
+ __Pyx_INCREF(Py_None);
+ __pyx_r = Py_None;
+ goto __pyx_L0;
+ goto __pyx_L5;
+ }
+ __pyx_L5:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_groupcompress_pyx.pyx":208
+ * if self._index == NULL:
+ * return None
+ * hash_list = [] # <<<<<<<<<<<<<<
+ * pos = 0
+ * while get_hash_offset(self._index, pos, &hash_offset):
+ */
+ __pyx_t_2 = PyList_New(0); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 208; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(((PyObject *)__pyx_t_2));
+ __Pyx_DECREF(((PyObject *)__pyx_v_hash_list));
+ __pyx_v_hash_list = __pyx_t_2;
+ __pyx_t_2 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_groupcompress_pyx.pyx":209
+ * return None
+ * hash_list = []
+ * pos = 0 # <<<<<<<<<<<<<<
+ * while get_hash_offset(self._index, pos, &hash_offset):
+ * hash_list.append(int(hash_offset))
+ */
+ __pyx_v_pos = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_groupcompress_pyx.pyx":210
+ * hash_list = []
+ * pos = 0
+ * while get_hash_offset(self._index, pos, &hash_offset): # <<<<<<<<<<<<<<
+ * hash_list.append(int(hash_offset))
+ * pos += 1
+ */
+ while (1) {
+ __pyx_t_3 = get_hash_offset(((struct __pyx_obj_6bzrlib_18_groupcompress_pyx_DeltaIndex *)__pyx_v_self)->_index, __pyx_v_pos, (&__pyx_v_hash_offset));
+ if (!__pyx_t_3) break;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_groupcompress_pyx.pyx":211
+ * pos = 0
+ * while get_hash_offset(self._index, pos, &hash_offset):
+ * hash_list.append(int(hash_offset)) # <<<<<<<<<<<<<<
+ * pos += 1
+ * entry_list = []
+ */
+ if (unlikely(__pyx_v_hash_list == Py_None)) {
+ PyErr_SetString(PyExc_AttributeError, "'NoneType' object has no attribute 'append'"); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 211; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ }
+ __pyx_t_2 = PyLong_FromUnsignedLong(__pyx_v_hash_offset); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 211; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_2);
+ __pyx_t_4 = PyTuple_New(1); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 211; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_4);
+ PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_2);
+ __Pyx_GIVEREF(__pyx_t_2);
+ __pyx_t_2 = 0;
+ __pyx_t_2 = PyObject_Call(((PyObject *)((PyObject*)&PyInt_Type)), __pyx_t_4, NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 211; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_2);
+ __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
+ __pyx_t_3 = PyList_Append(((PyObject *)__pyx_v_hash_list), __pyx_t_2); if (unlikely(__pyx_t_3 == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 211; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_groupcompress_pyx.pyx":212
+ * while get_hash_offset(self._index, pos, &hash_offset):
+ * hash_list.append(int(hash_offset))
+ * pos += 1 # <<<<<<<<<<<<<<
+ * entry_list = []
+ * pos = 0
+ */
+ __pyx_v_pos += 1;
+ }
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_groupcompress_pyx.pyx":213
+ * hash_list.append(int(hash_offset))
+ * pos += 1
+ * entry_list = [] # <<<<<<<<<<<<<<
+ * pos = 0
+ * while get_entry_summary(self._index, pos, &text_offset, &hash_val):
+ */
+ __pyx_t_2 = PyList_New(0); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 213; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(((PyObject *)__pyx_t_2));
+ __Pyx_DECREF(((PyObject *)__pyx_v_entry_list));
+ __pyx_v_entry_list = __pyx_t_2;
+ __pyx_t_2 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_groupcompress_pyx.pyx":214
+ * pos += 1
+ * entry_list = []
+ * pos = 0 # <<<<<<<<<<<<<<
+ * while get_entry_summary(self._index, pos, &text_offset, &hash_val):
+ * # Map back using 'int' so that we don't get Long everywhere, when
+ */
+ __pyx_v_pos = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_groupcompress_pyx.pyx":215
+ * entry_list = []
+ * pos = 0
+ * while get_entry_summary(self._index, pos, &text_offset, &hash_val): # <<<<<<<<<<<<<<
+ * # Map back using 'int' so that we don't get Long everywhere, when
+ * # almost everything is <2**31.
+ */
+ while (1) {
+ __pyx_t_3 = get_entry_summary(((struct __pyx_obj_6bzrlib_18_groupcompress_pyx_DeltaIndex *)__pyx_v_self)->_index, __pyx_v_pos, (&__pyx_v_text_offset), (&__pyx_v_hash_val));
+ if (!__pyx_t_3) break;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_groupcompress_pyx.pyx":218
+ * # Map back using 'int' so that we don't get Long everywhere, when
+ * # almost everything is <2**31.
+ * val = tuple(map(int, [text_offset, hash_val])) # <<<<<<<<<<<<<<
+ * entry_list.append(val)
+ * pos += 1
+ */
+ __pyx_t_2 = PyLong_FromUnsignedLong(__pyx_v_text_offset); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 218; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_2);
+ __pyx_t_4 = PyLong_FromUnsignedLong(__pyx_v_hash_val); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 218; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_4);
+ __pyx_t_5 = PyList_New(2); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 218; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(((PyObject *)__pyx_t_5));
+ PyList_SET_ITEM(__pyx_t_5, 0, __pyx_t_2);
+ __Pyx_GIVEREF(__pyx_t_2);
+ PyList_SET_ITEM(__pyx_t_5, 1, __pyx_t_4);
+ __Pyx_GIVEREF(__pyx_t_4);
+ __pyx_t_2 = 0;
+ __pyx_t_4 = 0;
+ __pyx_t_4 = PyTuple_New(2); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 218; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_4);
+ __Pyx_INCREF(((PyObject *)((PyObject*)&PyInt_Type)));
+ PyTuple_SET_ITEM(__pyx_t_4, 0, ((PyObject *)((PyObject*)&PyInt_Type)));
+ __Pyx_GIVEREF(((PyObject *)((PyObject*)&PyInt_Type)));
+ PyTuple_SET_ITEM(__pyx_t_4, 1, ((PyObject *)__pyx_t_5));
+ __Pyx_GIVEREF(((PyObject *)__pyx_t_5));
+ __pyx_t_5 = 0;
+ __pyx_t_5 = PyObject_Call(__pyx_builtin_map, __pyx_t_4, NULL); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 218; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_5);
+ __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
+ __pyx_t_4 = PyTuple_New(1); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 218; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_4);
+ PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_5);
+ __Pyx_GIVEREF(__pyx_t_5);
+ __pyx_t_5 = 0;
+ __pyx_t_5 = PyObject_Call(((PyObject *)((PyObject*)&PyTuple_Type)), __pyx_t_4, NULL); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 218; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_5);
+ __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
+ __Pyx_DECREF(((PyObject *)__pyx_v_val));
+ __pyx_v_val = ((PyObject *)__pyx_t_5);
+ __pyx_t_5 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_groupcompress_pyx.pyx":219
+ * # almost everything is <2**31.
+ * val = tuple(map(int, [text_offset, hash_val]))
+ * entry_list.append(val) # <<<<<<<<<<<<<<
+ * pos += 1
+ * return hash_list, entry_list
+ */
+ if (unlikely(__pyx_v_entry_list == Py_None)) {
+ PyErr_SetString(PyExc_AttributeError, "'NoneType' object has no attribute 'append'"); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 219; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ }
+ __pyx_t_3 = PyList_Append(((PyObject *)__pyx_v_entry_list), ((PyObject *)__pyx_v_val)); if (unlikely(__pyx_t_3 == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 219; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_groupcompress_pyx.pyx":220
+ * val = tuple(map(int, [text_offset, hash_val]))
+ * entry_list.append(val)
+ * pos += 1 # <<<<<<<<<<<<<<
+ * return hash_list, entry_list
+ *
+ */
+ __pyx_v_pos += 1;
+ }
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_groupcompress_pyx.pyx":221
+ * entry_list.append(val)
+ * pos += 1
+ * return hash_list, entry_list # <<<<<<<<<<<<<<
+ *
+ * def add_delta_source(self, delta, unadded_bytes):
+ */
+ __Pyx_XDECREF(__pyx_r);
+ __pyx_t_5 = PyTuple_New(2); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 221; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_5);
+ __Pyx_INCREF(((PyObject *)__pyx_v_hash_list));
+ PyTuple_SET_ITEM(__pyx_t_5, 0, ((PyObject *)__pyx_v_hash_list));
+ __Pyx_GIVEREF(((PyObject *)__pyx_v_hash_list));
+ __Pyx_INCREF(((PyObject *)__pyx_v_entry_list));
+ PyTuple_SET_ITEM(__pyx_t_5, 1, ((PyObject *)__pyx_v_entry_list));
+ __Pyx_GIVEREF(((PyObject *)__pyx_v_entry_list));
+ __pyx_r = __pyx_t_5;
+ __pyx_t_5 = 0;
+ goto __pyx_L0;
+
+ __pyx_r = Py_None; __Pyx_INCREF(Py_None);
+ goto __pyx_L0;
+ __pyx_L1_error:;
+ __Pyx_XDECREF(__pyx_t_2);
+ __Pyx_XDECREF(__pyx_t_4);
+ __Pyx_XDECREF(__pyx_t_5);
+ __Pyx_AddTraceback("bzrlib._groupcompress_pyx.DeltaIndex._dump_index");
+ __pyx_r = NULL;
+ __pyx_L0:;
+ __Pyx_DECREF(__pyx_v_hash_list);
+ __Pyx_DECREF(__pyx_v_entry_list);
+ __Pyx_DECREF(__pyx_v_val);
+ __Pyx_XGIVEREF(__pyx_r);
+ __Pyx_RefNannyFinishContext();
+ return __pyx_r;
+}
+
+/* "/home/vila/src/bzr/integration/trunk/bzrlib/_groupcompress_pyx.pyx":223
+ * return hash_list, entry_list
+ *
+ * def add_delta_source(self, delta, unadded_bytes): # <<<<<<<<<<<<<<
+ * """Add a new delta to the source texts.
+ *
+ */
+
+static PyObject *__pyx_pf_6bzrlib_18_groupcompress_pyx_10DeltaIndex_add_delta_source(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
+static char __pyx_doc_6bzrlib_18_groupcompress_pyx_10DeltaIndex_add_delta_source[] = "Add a new delta to the source texts.\n\n :param delta: The text of the delta, this must be a byte string.\n :param unadded_bytes: Number of bytes that were added to the source\n that were not indexed.\n ";
+static PyObject *__pyx_pf_6bzrlib_18_groupcompress_pyx_10DeltaIndex_add_delta_source(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
+ PyObject *__pyx_v_delta = 0;
+ PyObject *__pyx_v_unadded_bytes = 0;
+ char *__pyx_v_c_delta;
+ Py_ssize_t __pyx_v_c_delta_size;
+ struct delta_index *__pyx_v_index;
+ delta_result __pyx_v_res;
+ unsigned int __pyx_v_source_location;
+ struct source_info *__pyx_v_src;
+ PyObject *__pyx_r = NULL;
+ int __pyx_t_1;
+ PyObject *__pyx_t_2 = NULL;
+ PyObject *__pyx_t_3 = NULL;
+ Py_ssize_t __pyx_t_4;
+ unsigned long __pyx_t_5;
+ static PyObject **__pyx_pyargnames[] = {&__pyx_n_s__delta,&__pyx_n_s__unadded_bytes,0};
+ __Pyx_RefNannySetupContext("add_delta_source");
+ if (unlikely(__pyx_kwds)) {
+ Py_ssize_t kw_args = PyDict_Size(__pyx_kwds);
+ PyObject* values[2] = {0,0};
+ switch (PyTuple_GET_SIZE(__pyx_args)) {
+ case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
+ case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
+ case 0: break;
+ default: goto __pyx_L5_argtuple_error;
+ }
+ switch (PyTuple_GET_SIZE(__pyx_args)) {
+ case 0:
+ values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__delta);
+ if (likely(values[0])) kw_args--;
+ else goto __pyx_L5_argtuple_error;
+ case 1:
+ values[1] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__unadded_bytes);
+ if (likely(values[1])) kw_args--;
+ else {
+ __Pyx_RaiseArgtupleInvalid("add_delta_source", 1, 2, 2, 1); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 223; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
+ }
+ }
+ if (unlikely(kw_args > 0)) {
+ if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, PyTuple_GET_SIZE(__pyx_args), "add_delta_source") < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 223; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
+ }
+ __pyx_v_delta = values[0];
+ __pyx_v_unadded_bytes = values[1];
+ } else if (PyTuple_GET_SIZE(__pyx_args) != 2) {
+ goto __pyx_L5_argtuple_error;
+ } else {
+ __pyx_v_delta = PyTuple_GET_ITEM(__pyx_args, 0);
+ __pyx_v_unadded_bytes = PyTuple_GET_ITEM(__pyx_args, 1);
+ }
+ goto __pyx_L4_argument_unpacking_done;
+ __pyx_L5_argtuple_error:;
+ __Pyx_RaiseArgtupleInvalid("add_delta_source", 1, 2, 2, PyTuple_GET_SIZE(__pyx_args)); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 223; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
+ __pyx_L3_error:;
+ __Pyx_AddTraceback("bzrlib._groupcompress_pyx.DeltaIndex.add_delta_source");
+ __Pyx_RefNannyFinishContext();
+ return NULL;
+ __pyx_L4_argument_unpacking_done:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_groupcompress_pyx.pyx":238
+ * cdef unsigned int num_indexes
+ *
+ * if not PyString_CheckExact(delta): # <<<<<<<<<<<<<<
+ * raise TypeError('delta is not a str')
+ *
+ */
+ __pyx_t_1 = (!PyString_CheckExact(__pyx_v_delta));
+ if (__pyx_t_1) {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_groupcompress_pyx.pyx":239
+ *
+ * if not PyString_CheckExact(delta):
+ * raise TypeError('delta is not a str') # <<<<<<<<<<<<<<
+ *
+ * source_location = len(self._sources)
+ */
+ __pyx_t_2 = PyTuple_New(1); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 239; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_2);
+ __Pyx_INCREF(((PyObject *)__pyx_kp_s_12));
+ PyTuple_SET_ITEM(__pyx_t_2, 0, ((PyObject *)__pyx_kp_s_12));
+ __Pyx_GIVEREF(((PyObject *)__pyx_kp_s_12));
+ __pyx_t_3 = PyObject_Call(__pyx_builtin_TypeError, __pyx_t_2, NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 239; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_3);
+ __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+ __Pyx_Raise(__pyx_t_3, 0, 0);
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 239; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ goto __pyx_L6;
+ }
+ __pyx_L6:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_groupcompress_pyx.pyx":241
+ * raise TypeError('delta is not a str')
+ *
+ * source_location = len(self._sources) # <<<<<<<<<<<<<<
+ * if source_location >= self._max_num_sources:
+ * self._expand_sources()
+ */
+ __pyx_t_4 = PyObject_Length(((struct __pyx_obj_6bzrlib_18_groupcompress_pyx_DeltaIndex *)__pyx_v_self)->_sources); if (unlikely(__pyx_t_4 == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 241; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_v_source_location = __pyx_t_4;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_groupcompress_pyx.pyx":242
+ *
+ * source_location = len(self._sources)
+ * if source_location >= self._max_num_sources: # <<<<<<<<<<<<<<
+ * self._expand_sources()
+ * self._sources.append(delta)
+ */
+ __pyx_t_1 = (__pyx_v_source_location >= ((struct __pyx_obj_6bzrlib_18_groupcompress_pyx_DeltaIndex *)__pyx_v_self)->_max_num_sources);
+ if (__pyx_t_1) {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_groupcompress_pyx.pyx":243
+ * source_location = len(self._sources)
+ * if source_location >= self._max_num_sources:
+ * self._expand_sources() # <<<<<<<<<<<<<<
+ * self._sources.append(delta)
+ * c_delta = PyString_AS_STRING(delta)
+ */
+ __pyx_t_3 = ((struct __pyx_vtabstruct_6bzrlib_18_groupcompress_pyx_DeltaIndex *)((struct __pyx_obj_6bzrlib_18_groupcompress_pyx_DeltaIndex *)__pyx_v_self)->__pyx_vtab)->_expand_sources(((struct __pyx_obj_6bzrlib_18_groupcompress_pyx_DeltaIndex *)__pyx_v_self)); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 243; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_3);
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+ goto __pyx_L7;
+ }
+ __pyx_L7:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_groupcompress_pyx.pyx":244
+ * if source_location >= self._max_num_sources:
+ * self._expand_sources()
+ * self._sources.append(delta) # <<<<<<<<<<<<<<
+ * c_delta = PyString_AS_STRING(delta)
+ * c_delta_size = PyString_GET_SIZE(delta)
+ */
+ __pyx_t_3 = __Pyx_PyObject_Append(((struct __pyx_obj_6bzrlib_18_groupcompress_pyx_DeltaIndex *)__pyx_v_self)->_sources, __pyx_v_delta); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 244; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_3);
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_groupcompress_pyx.pyx":245
+ * self._expand_sources()
+ * self._sources.append(delta)
+ * c_delta = PyString_AS_STRING(delta) # <<<<<<<<<<<<<<
+ * c_delta_size = PyString_GET_SIZE(delta)
+ * src = self._source_infos + source_location
+ */
+ __pyx_v_c_delta = PyString_AS_STRING(__pyx_v_delta);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_groupcompress_pyx.pyx":246
+ * self._sources.append(delta)
+ * c_delta = PyString_AS_STRING(delta)
+ * c_delta_size = PyString_GET_SIZE(delta) # <<<<<<<<<<<<<<
+ * src = self._source_infos + source_location
+ * src.buf = c_delta
+ */
+ __pyx_v_c_delta_size = PyString_GET_SIZE(__pyx_v_delta);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_groupcompress_pyx.pyx":247
+ * c_delta = PyString_AS_STRING(delta)
+ * c_delta_size = PyString_GET_SIZE(delta)
+ * src = self._source_infos + source_location # <<<<<<<<<<<<<<
+ * src.buf = c_delta
+ * src.size = c_delta_size
+ */
+ __pyx_v_src = (((struct __pyx_obj_6bzrlib_18_groupcompress_pyx_DeltaIndex *)__pyx_v_self)->_source_infos + __pyx_v_source_location);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_groupcompress_pyx.pyx":248
+ * c_delta_size = PyString_GET_SIZE(delta)
+ * src = self._source_infos + source_location
+ * src.buf = c_delta # <<<<<<<<<<<<<<
+ * src.size = c_delta_size
+ * src.agg_offset = self._source_offset + unadded_bytes
+ */
+ __pyx_v_src->buf = __pyx_v_c_delta;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_groupcompress_pyx.pyx":249
+ * src = self._source_infos + source_location
+ * src.buf = c_delta
+ * src.size = c_delta_size # <<<<<<<<<<<<<<
+ * src.agg_offset = self._source_offset + unadded_bytes
+ * with nogil:
+ */
+ __pyx_v_src->size = __pyx_v_c_delta_size;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_groupcompress_pyx.pyx":250
+ * src.buf = c_delta
+ * src.size = c_delta_size
+ * src.agg_offset = self._source_offset + unadded_bytes # <<<<<<<<<<<<<<
+ * with nogil:
+ * res = create_delta_index_from_delta(src, self._index, &index)
+ */
+ __pyx_t_3 = PyLong_FromUnsignedLong(((struct __pyx_obj_6bzrlib_18_groupcompress_pyx_DeltaIndex *)__pyx_v_self)->_source_offset); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 250; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_3);
+ __pyx_t_2 = PyNumber_Add(__pyx_t_3, __pyx_v_unadded_bytes); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 250; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_2);
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+ __pyx_t_5 = __Pyx_PyInt_AsUnsignedLong(__pyx_t_2); if (unlikely((__pyx_t_5 == (unsigned long)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 250; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+ __pyx_v_src->agg_offset = __pyx_t_5;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_groupcompress_pyx.pyx":251
+ * src.size = c_delta_size
+ * src.agg_offset = self._source_offset + unadded_bytes
+ * with nogil: # <<<<<<<<<<<<<<
+ * res = create_delta_index_from_delta(src, self._index, &index)
+ * if res != DELTA_OK:
+ */
+ { PyThreadState *_save;
+ Py_UNBLOCK_THREADS
+ /*try:*/ {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_groupcompress_pyx.pyx":252
+ * src.agg_offset = self._source_offset + unadded_bytes
+ * with nogil:
+ * res = create_delta_index_from_delta(src, self._index, &index) # <<<<<<<<<<<<<<
+ * if res != DELTA_OK:
+ * raise _translate_delta_failure(res)
+ */
+ __pyx_v_res = create_delta_index_from_delta(__pyx_v_src, ((struct __pyx_obj_6bzrlib_18_groupcompress_pyx_DeltaIndex *)__pyx_v_self)->_index, (&__pyx_v_index));
+ }
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_groupcompress_pyx.pyx":251
+ * src.size = c_delta_size
+ * src.agg_offset = self._source_offset + unadded_bytes
+ * with nogil: # <<<<<<<<<<<<<<
+ * res = create_delta_index_from_delta(src, self._index, &index)
+ * if res != DELTA_OK:
+ */
+ /*finally:*/ {
+ Py_BLOCK_THREADS
+ }
+ }
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_groupcompress_pyx.pyx":253
+ * with nogil:
+ * res = create_delta_index_from_delta(src, self._index, &index)
+ * if res != DELTA_OK: # <<<<<<<<<<<<<<
+ * raise _translate_delta_failure(res)
+ * self._source_offset = src.agg_offset + src.size
+ */
+ __pyx_t_1 = (__pyx_v_res != DELTA_OK);
+ if (__pyx_t_1) {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_groupcompress_pyx.pyx":254
+ * res = create_delta_index_from_delta(src, self._index, &index)
+ * if res != DELTA_OK:
+ * raise _translate_delta_failure(res) # <<<<<<<<<<<<<<
+ * self._source_offset = src.agg_offset + src.size
+ * if index != self._index:
+ */
+ __pyx_t_2 = __pyx_f_6bzrlib_18_groupcompress_pyx__translate_delta_failure(__pyx_v_res); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 254; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_2);
+ __Pyx_Raise(__pyx_t_2, 0, 0);
+ __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 254; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ goto __pyx_L11;
+ }
+ __pyx_L11:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_groupcompress_pyx.pyx":255
+ * if res != DELTA_OK:
+ * raise _translate_delta_failure(res)
+ * self._source_offset = src.agg_offset + src.size # <<<<<<<<<<<<<<
+ * if index != self._index:
+ * free_delta_index(self._index)
+ */
+ ((struct __pyx_obj_6bzrlib_18_groupcompress_pyx_DeltaIndex *)__pyx_v_self)->_source_offset = (__pyx_v_src->agg_offset + __pyx_v_src->size);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_groupcompress_pyx.pyx":256
+ * raise _translate_delta_failure(res)
+ * self._source_offset = src.agg_offset + src.size
+ * if index != self._index: # <<<<<<<<<<<<<<
+ * free_delta_index(self._index)
+ * self._index = index
+ */
+ __pyx_t_1 = (__pyx_v_index != ((struct __pyx_obj_6bzrlib_18_groupcompress_pyx_DeltaIndex *)__pyx_v_self)->_index);
+ if (__pyx_t_1) {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_groupcompress_pyx.pyx":257
+ * self._source_offset = src.agg_offset + src.size
+ * if index != self._index:
+ * free_delta_index(self._index) # <<<<<<<<<<<<<<
+ * self._index = index
+ *
+ */
+ free_delta_index(((struct __pyx_obj_6bzrlib_18_groupcompress_pyx_DeltaIndex *)__pyx_v_self)->_index);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_groupcompress_pyx.pyx":258
+ * if index != self._index:
+ * free_delta_index(self._index)
+ * self._index = index # <<<<<<<<<<<<<<
+ *
+ * def add_source(self, source, unadded_bytes):
+ */
+ ((struct __pyx_obj_6bzrlib_18_groupcompress_pyx_DeltaIndex *)__pyx_v_self)->_index = __pyx_v_index;
+ goto __pyx_L12;
+ }
+ __pyx_L12:;
+
+ __pyx_r = Py_None; __Pyx_INCREF(Py_None);
+ goto __pyx_L0;
+ __pyx_L1_error:;
+ __Pyx_XDECREF(__pyx_t_2);
+ __Pyx_XDECREF(__pyx_t_3);
+ __Pyx_AddTraceback("bzrlib._groupcompress_pyx.DeltaIndex.add_delta_source");
+ __pyx_r = NULL;
+ __pyx_L0:;
+ __Pyx_XGIVEREF(__pyx_r);
+ __Pyx_RefNannyFinishContext();
+ return __pyx_r;
+}
+
+/* "/home/vila/src/bzr/integration/trunk/bzrlib/_groupcompress_pyx.pyx":260
+ * self._index = index
+ *
+ * def add_source(self, source, unadded_bytes): # <<<<<<<<<<<<<<
+ * """Add a new bit of source text to the delta indexes.
+ *
+ */
+
+static PyObject *__pyx_pf_6bzrlib_18_groupcompress_pyx_10DeltaIndex_add_source(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
+static char __pyx_doc_6bzrlib_18_groupcompress_pyx_10DeltaIndex_add_source[] = "Add a new bit of source text to the delta indexes.\n\n :param source: The text in question, this must be a byte string\n :param unadded_bytes: Assume there are this many bytes that didn't get\n added between this source and the end of the previous source.\n :param max_pointers: Add no more than this many entries to the index.\n By default, we sample every 16 bytes, if that would require more\n than max_entries, we will reduce the sampling rate.\n A value of 0 means unlimited, None means use the default limit.\n ";
+static PyObject *__pyx_pf_6bzrlib_18_groupcompress_pyx_10DeltaIndex_add_source(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
+ PyObject *__pyx_v_source = 0;
+ PyObject *__pyx_v_unadded_bytes = 0;
+ char *__pyx_v_c_source;
+ Py_ssize_t __pyx_v_c_source_size;
+ struct delta_index *__pyx_v_index;
+ delta_result __pyx_v_res;
+ unsigned int __pyx_v_source_location;
+ struct source_info *__pyx_v_src;
+ PyObject *__pyx_r = NULL;
+ int __pyx_t_1;
+ PyObject *__pyx_t_2 = NULL;
+ PyObject *__pyx_t_3 = NULL;
+ Py_ssize_t __pyx_t_4;
+ int __pyx_t_5;
+ int __pyx_t_6;
+ unsigned long __pyx_t_7;
+ static PyObject **__pyx_pyargnames[] = {&__pyx_n_s__source,&__pyx_n_s__unadded_bytes,0};
+ __Pyx_RefNannySetupContext("add_source");
+ if (unlikely(__pyx_kwds)) {
+ Py_ssize_t kw_args = PyDict_Size(__pyx_kwds);
+ PyObject* values[2] = {0,0};
+ switch (PyTuple_GET_SIZE(__pyx_args)) {
+ case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
+ case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
+ case 0: break;
+ default: goto __pyx_L5_argtuple_error;
+ }
+ switch (PyTuple_GET_SIZE(__pyx_args)) {
+ case 0:
+ values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__source);
+ if (likely(values[0])) kw_args--;
+ else goto __pyx_L5_argtuple_error;
+ case 1:
+ values[1] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__unadded_bytes);
+ if (likely(values[1])) kw_args--;
+ else {
+ __Pyx_RaiseArgtupleInvalid("add_source", 1, 2, 2, 1); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 260; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
+ }
+ }
+ if (unlikely(kw_args > 0)) {
+ if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, PyTuple_GET_SIZE(__pyx_args), "add_source") < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 260; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
+ }
+ __pyx_v_source = values[0];
+ __pyx_v_unadded_bytes = values[1];
+ } else if (PyTuple_GET_SIZE(__pyx_args) != 2) {
+ goto __pyx_L5_argtuple_error;
+ } else {
+ __pyx_v_source = PyTuple_GET_ITEM(__pyx_args, 0);
+ __pyx_v_unadded_bytes = PyTuple_GET_ITEM(__pyx_args, 1);
+ }
+ goto __pyx_L4_argument_unpacking_done;
+ __pyx_L5_argtuple_error:;
+ __Pyx_RaiseArgtupleInvalid("add_source", 1, 2, 2, PyTuple_GET_SIZE(__pyx_args)); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 260; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
+ __pyx_L3_error:;
+ __Pyx_AddTraceback("bzrlib._groupcompress_pyx.DeltaIndex.add_source");
+ __Pyx_RefNannyFinishContext();
+ return NULL;
+ __pyx_L4_argument_unpacking_done:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_groupcompress_pyx.pyx":280
+ * cdef int max_num_entries
+ *
+ * if not PyString_CheckExact(source): # <<<<<<<<<<<<<<
+ * raise TypeError('source is not a str')
+ *
+ */
+ __pyx_t_1 = (!PyString_CheckExact(__pyx_v_source));
+ if (__pyx_t_1) {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_groupcompress_pyx.pyx":281
+ *
+ * if not PyString_CheckExact(source):
+ * raise TypeError('source is not a str') # <<<<<<<<<<<<<<
+ *
+ * source_location = len(self._sources)
+ */
+ __pyx_t_2 = PyTuple_New(1); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 281; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_2);
+ __Pyx_INCREF(((PyObject *)__pyx_kp_s_13));
+ PyTuple_SET_ITEM(__pyx_t_2, 0, ((PyObject *)__pyx_kp_s_13));
+ __Pyx_GIVEREF(((PyObject *)__pyx_kp_s_13));
+ __pyx_t_3 = PyObject_Call(__pyx_builtin_TypeError, __pyx_t_2, NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 281; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_3);
+ __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+ __Pyx_Raise(__pyx_t_3, 0, 0);
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 281; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ goto __pyx_L6;
+ }
+ __pyx_L6:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_groupcompress_pyx.pyx":283
+ * raise TypeError('source is not a str')
+ *
+ * source_location = len(self._sources) # <<<<<<<<<<<<<<
+ * if source_location >= self._max_num_sources:
+ * self._expand_sources()
+ */
+ __pyx_t_4 = PyObject_Length(((struct __pyx_obj_6bzrlib_18_groupcompress_pyx_DeltaIndex *)__pyx_v_self)->_sources); if (unlikely(__pyx_t_4 == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 283; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_v_source_location = __pyx_t_4;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_groupcompress_pyx.pyx":284
+ *
+ * source_location = len(self._sources)
+ * if source_location >= self._max_num_sources: # <<<<<<<<<<<<<<
+ * self._expand_sources()
+ * if source_location != 0 and self._index == NULL:
+ */
+ __pyx_t_1 = (__pyx_v_source_location >= ((struct __pyx_obj_6bzrlib_18_groupcompress_pyx_DeltaIndex *)__pyx_v_self)->_max_num_sources);
+ if (__pyx_t_1) {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_groupcompress_pyx.pyx":285
+ * source_location = len(self._sources)
+ * if source_location >= self._max_num_sources:
+ * self._expand_sources() # <<<<<<<<<<<<<<
+ * if source_location != 0 and self._index == NULL:
+ * # We were lazy about populating the index, create it now
+ */
+ __pyx_t_3 = ((struct __pyx_vtabstruct_6bzrlib_18_groupcompress_pyx_DeltaIndex *)((struct __pyx_obj_6bzrlib_18_groupcompress_pyx_DeltaIndex *)__pyx_v_self)->__pyx_vtab)->_expand_sources(((struct __pyx_obj_6bzrlib_18_groupcompress_pyx_DeltaIndex *)__pyx_v_self)); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 285; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_3);
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+ goto __pyx_L7;
+ }
+ __pyx_L7:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_groupcompress_pyx.pyx":286
+ * if source_location >= self._max_num_sources:
+ * self._expand_sources()
+ * if source_location != 0 and self._index == NULL: # <<<<<<<<<<<<<<
+ * # We were lazy about populating the index, create it now
+ * self._populate_first_index()
+ */
+ __pyx_t_1 = (__pyx_v_source_location != 0);
+ if (__pyx_t_1) {
+ __pyx_t_5 = (((struct __pyx_obj_6bzrlib_18_groupcompress_pyx_DeltaIndex *)__pyx_v_self)->_index == NULL);
+ __pyx_t_6 = __pyx_t_5;
+ } else {
+ __pyx_t_6 = __pyx_t_1;
+ }
+ if (__pyx_t_6) {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_groupcompress_pyx.pyx":288
+ * if source_location != 0 and self._index == NULL:
+ * # We were lazy about populating the index, create it now
+ * self._populate_first_index() # <<<<<<<<<<<<<<
+ * self._sources.append(source)
+ * c_source = PyString_AS_STRING(source)
+ */
+ __pyx_t_3 = ((struct __pyx_vtabstruct_6bzrlib_18_groupcompress_pyx_DeltaIndex *)((struct __pyx_obj_6bzrlib_18_groupcompress_pyx_DeltaIndex *)__pyx_v_self)->__pyx_vtab)->_populate_first_index(((struct __pyx_obj_6bzrlib_18_groupcompress_pyx_DeltaIndex *)__pyx_v_self)); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 288; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_3);
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+ goto __pyx_L8;
+ }
+ __pyx_L8:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_groupcompress_pyx.pyx":289
+ * # We were lazy about populating the index, create it now
+ * self._populate_first_index()
+ * self._sources.append(source) # <<<<<<<<<<<<<<
+ * c_source = PyString_AS_STRING(source)
+ * c_source_size = PyString_GET_SIZE(source)
+ */
+ __pyx_t_3 = __Pyx_PyObject_Append(((struct __pyx_obj_6bzrlib_18_groupcompress_pyx_DeltaIndex *)__pyx_v_self)->_sources, __pyx_v_source); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 289; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_3);
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_groupcompress_pyx.pyx":290
+ * self._populate_first_index()
+ * self._sources.append(source)
+ * c_source = PyString_AS_STRING(source) # <<<<<<<<<<<<<<
+ * c_source_size = PyString_GET_SIZE(source)
+ * src = self._source_infos + source_location
+ */
+ __pyx_v_c_source = PyString_AS_STRING(__pyx_v_source);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_groupcompress_pyx.pyx":291
+ * self._sources.append(source)
+ * c_source = PyString_AS_STRING(source)
+ * c_source_size = PyString_GET_SIZE(source) # <<<<<<<<<<<<<<
+ * src = self._source_infos + source_location
+ * src.buf = c_source
+ */
+ __pyx_v_c_source_size = PyString_GET_SIZE(__pyx_v_source);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_groupcompress_pyx.pyx":292
+ * c_source = PyString_AS_STRING(source)
+ * c_source_size = PyString_GET_SIZE(source)
+ * src = self._source_infos + source_location # <<<<<<<<<<<<<<
+ * src.buf = c_source
+ * src.size = c_source_size
+ */
+ __pyx_v_src = (((struct __pyx_obj_6bzrlib_18_groupcompress_pyx_DeltaIndex *)__pyx_v_self)->_source_infos + __pyx_v_source_location);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_groupcompress_pyx.pyx":293
+ * c_source_size = PyString_GET_SIZE(source)
+ * src = self._source_infos + source_location
+ * src.buf = c_source # <<<<<<<<<<<<<<
+ * src.size = c_source_size
+ *
+ */
+ __pyx_v_src->buf = __pyx_v_c_source;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_groupcompress_pyx.pyx":294
+ * src = self._source_infos + source_location
+ * src.buf = c_source
+ * src.size = c_source_size # <<<<<<<<<<<<<<
+ *
+ * src.agg_offset = self._source_offset + unadded_bytes
+ */
+ __pyx_v_src->size = __pyx_v_c_source_size;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_groupcompress_pyx.pyx":296
+ * src.size = c_source_size
+ *
+ * src.agg_offset = self._source_offset + unadded_bytes # <<<<<<<<<<<<<<
+ * self._source_offset = src.agg_offset + src.size
+ * # We delay creating the index on the first insert
+ */
+ __pyx_t_3 = PyLong_FromUnsignedLong(((struct __pyx_obj_6bzrlib_18_groupcompress_pyx_DeltaIndex *)__pyx_v_self)->_source_offset); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 296; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_3);
+ __pyx_t_2 = PyNumber_Add(__pyx_t_3, __pyx_v_unadded_bytes); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 296; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_2);
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+ __pyx_t_7 = __Pyx_PyInt_AsUnsignedLong(__pyx_t_2); if (unlikely((__pyx_t_7 == (unsigned long)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 296; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+ __pyx_v_src->agg_offset = __pyx_t_7;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_groupcompress_pyx.pyx":297
+ *
+ * src.agg_offset = self._source_offset + unadded_bytes
+ * self._source_offset = src.agg_offset + src.size # <<<<<<<<<<<<<<
+ * # We delay creating the index on the first insert
+ * if source_location != 0:
+ */
+ ((struct __pyx_obj_6bzrlib_18_groupcompress_pyx_DeltaIndex *)__pyx_v_self)->_source_offset = (__pyx_v_src->agg_offset + __pyx_v_src->size);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_groupcompress_pyx.pyx":299
+ * self._source_offset = src.agg_offset + src.size
+ * # We delay creating the index on the first insert
+ * if source_location != 0: # <<<<<<<<<<<<<<
+ * with nogil:
+ * res = create_delta_index(src, self._index, &index,
+ */
+ __pyx_t_6 = (__pyx_v_source_location != 0);
+ if (__pyx_t_6) {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_groupcompress_pyx.pyx":300
+ * # We delay creating the index on the first insert
+ * if source_location != 0:
+ * with nogil: # <<<<<<<<<<<<<<
+ * res = create_delta_index(src, self._index, &index,
+ * self._max_bytes_to_index)
+ */
+ { PyThreadState *_save;
+ Py_UNBLOCK_THREADS
+ /*try:*/ {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_groupcompress_pyx.pyx":302
+ * with nogil:
+ * res = create_delta_index(src, self._index, &index,
+ * self._max_bytes_to_index) # <<<<<<<<<<<<<<
+ * if res != DELTA_OK:
+ * raise _translate_delta_failure(res)
+ */
+ __pyx_v_res = create_delta_index(__pyx_v_src, ((struct __pyx_obj_6bzrlib_18_groupcompress_pyx_DeltaIndex *)__pyx_v_self)->_index, (&__pyx_v_index), ((struct __pyx_obj_6bzrlib_18_groupcompress_pyx_DeltaIndex *)__pyx_v_self)->_max_bytes_to_index);
+ }
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_groupcompress_pyx.pyx":300
+ * # We delay creating the index on the first insert
+ * if source_location != 0:
+ * with nogil: # <<<<<<<<<<<<<<
+ * res = create_delta_index(src, self._index, &index,
+ * self._max_bytes_to_index)
+ */
+ /*finally:*/ {
+ Py_BLOCK_THREADS
+ }
+ }
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_groupcompress_pyx.pyx":303
+ * res = create_delta_index(src, self._index, &index,
+ * self._max_bytes_to_index)
+ * if res != DELTA_OK: # <<<<<<<<<<<<<<
+ * raise _translate_delta_failure(res)
+ * if index != self._index:
+ */
+ __pyx_t_6 = (__pyx_v_res != DELTA_OK);
+ if (__pyx_t_6) {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_groupcompress_pyx.pyx":304
+ * self._max_bytes_to_index)
+ * if res != DELTA_OK:
+ * raise _translate_delta_failure(res) # <<<<<<<<<<<<<<
+ * if index != self._index:
+ * free_delta_index(self._index)
+ */
+ __pyx_t_2 = __pyx_f_6bzrlib_18_groupcompress_pyx__translate_delta_failure(__pyx_v_res); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 304; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_2);
+ __Pyx_Raise(__pyx_t_2, 0, 0);
+ __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 304; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ goto __pyx_L13;
+ }
+ __pyx_L13:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_groupcompress_pyx.pyx":305
+ * if res != DELTA_OK:
+ * raise _translate_delta_failure(res)
+ * if index != self._index: # <<<<<<<<<<<<<<
+ * free_delta_index(self._index)
+ * self._index = index
+ */
+ __pyx_t_6 = (__pyx_v_index != ((struct __pyx_obj_6bzrlib_18_groupcompress_pyx_DeltaIndex *)__pyx_v_self)->_index);
+ if (__pyx_t_6) {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_groupcompress_pyx.pyx":306
+ * raise _translate_delta_failure(res)
+ * if index != self._index:
+ * free_delta_index(self._index) # <<<<<<<<<<<<<<
+ * self._index = index
+ *
+ */
+ free_delta_index(((struct __pyx_obj_6bzrlib_18_groupcompress_pyx_DeltaIndex *)__pyx_v_self)->_index);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_groupcompress_pyx.pyx":307
+ * if index != self._index:
+ * free_delta_index(self._index)
+ * self._index = index # <<<<<<<<<<<<<<
+ *
+ * cdef _populate_first_index(self):
+ */
+ ((struct __pyx_obj_6bzrlib_18_groupcompress_pyx_DeltaIndex *)__pyx_v_self)->_index = __pyx_v_index;
+ goto __pyx_L14;
+ }
+ __pyx_L14:;
+ goto __pyx_L9;
+ }
+ __pyx_L9:;
+
+ __pyx_r = Py_None; __Pyx_INCREF(Py_None);
+ goto __pyx_L0;
+ __pyx_L1_error:;
+ __Pyx_XDECREF(__pyx_t_2);
+ __Pyx_XDECREF(__pyx_t_3);
+ __Pyx_AddTraceback("bzrlib._groupcompress_pyx.DeltaIndex.add_source");
+ __pyx_r = NULL;
+ __pyx_L0:;
+ __Pyx_XGIVEREF(__pyx_r);
+ __Pyx_RefNannyFinishContext();
+ return __pyx_r;
+}
+
+/* "/home/vila/src/bzr/integration/trunk/bzrlib/_groupcompress_pyx.pyx":309
+ * self._index = index
+ *
+ * cdef _populate_first_index(self): # <<<<<<<<<<<<<<
+ * cdef delta_index *index
+ * cdef delta_result res
+ */
+
+static PyObject *__pyx_f_6bzrlib_18_groupcompress_pyx_10DeltaIndex__populate_first_index(struct __pyx_obj_6bzrlib_18_groupcompress_pyx_DeltaIndex *__pyx_v_self) {
+ struct delta_index *__pyx_v_index;
+ delta_result __pyx_v_res;
+ PyObject *__pyx_r = NULL;
+ Py_ssize_t __pyx_t_1;
+ int __pyx_t_2;
+ int __pyx_t_3;
+ int __pyx_t_4;
+ PyObject *__pyx_t_5 = NULL;
+ PyObject *__pyx_t_6 = NULL;
+ __Pyx_RefNannySetupContext("_populate_first_index");
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_groupcompress_pyx.pyx":312
+ * cdef delta_index *index
+ * cdef delta_result res
+ * if len(self._sources) != 1 or self._index != NULL: # <<<<<<<<<<<<<<
+ * raise AssertionError('_populate_first_index should only be'
+ * ' called when we have a single source and no index yet')
+ */
+ __pyx_t_1 = PyObject_Length(__pyx_v_self->_sources); if (unlikely(__pyx_t_1 == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 312; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_2 = (__pyx_t_1 != 1);
+ if (!__pyx_t_2) {
+ __pyx_t_3 = (__pyx_v_self->_index != NULL);
+ __pyx_t_4 = __pyx_t_3;
+ } else {
+ __pyx_t_4 = __pyx_t_2;
+ }
+ if (__pyx_t_4) {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_groupcompress_pyx.pyx":313
+ * cdef delta_result res
+ * if len(self._sources) != 1 or self._index != NULL:
+ * raise AssertionError('_populate_first_index should only be' # <<<<<<<<<<<<<<
+ * ' called when we have a single source and no index yet')
+ *
+ */
+ __pyx_t_5 = PyTuple_New(1); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 313; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_5);
+ __Pyx_INCREF(((PyObject *)__pyx_kp_s_15));
+ PyTuple_SET_ITEM(__pyx_t_5, 0, ((PyObject *)__pyx_kp_s_15));
+ __Pyx_GIVEREF(((PyObject *)__pyx_kp_s_15));
+ __pyx_t_6 = PyObject_Call(__pyx_builtin_AssertionError, __pyx_t_5, NULL); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 313; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_6);
+ __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
+ __Pyx_Raise(__pyx_t_6, 0, 0);
+ __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 313; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ goto __pyx_L3;
+ }
+ __pyx_L3:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_groupcompress_pyx.pyx":318
+ * # We know that self._index is already NULL, so create_delta_index
+ * # will always create a new index unless there's a malloc failure
+ * with nogil: # <<<<<<<<<<<<<<
+ * res = create_delta_index(&self._source_infos[0], NULL, &index,
+ * self._max_bytes_to_index)
+ */
+ { PyThreadState *_save;
+ Py_UNBLOCK_THREADS
+ /*try:*/ {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_groupcompress_pyx.pyx":320
+ * with nogil:
+ * res = create_delta_index(&self._source_infos[0], NULL, &index,
+ * self._max_bytes_to_index) # <<<<<<<<<<<<<<
+ * if res != DELTA_OK:
+ * raise _translate_delta_failure(res)
+ */
+ __pyx_v_res = create_delta_index((&(__pyx_v_self->_source_infos[0])), NULL, (&__pyx_v_index), __pyx_v_self->_max_bytes_to_index);
+ }
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_groupcompress_pyx.pyx":318
+ * # We know that self._index is already NULL, so create_delta_index
+ * # will always create a new index unless there's a malloc failure
+ * with nogil: # <<<<<<<<<<<<<<
+ * res = create_delta_index(&self._source_infos[0], NULL, &index,
+ * self._max_bytes_to_index)
+ */
+ /*finally:*/ {
+ Py_BLOCK_THREADS
+ }
+ }
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_groupcompress_pyx.pyx":321
+ * res = create_delta_index(&self._source_infos[0], NULL, &index,
+ * self._max_bytes_to_index)
+ * if res != DELTA_OK: # <<<<<<<<<<<<<<
+ * raise _translate_delta_failure(res)
+ * self._index = index
+ */
+ __pyx_t_4 = (__pyx_v_res != DELTA_OK);
+ if (__pyx_t_4) {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_groupcompress_pyx.pyx":322
+ * self._max_bytes_to_index)
+ * if res != DELTA_OK:
+ * raise _translate_delta_failure(res) # <<<<<<<<<<<<<<
+ * self._index = index
+ *
+ */
+ __pyx_t_6 = __pyx_f_6bzrlib_18_groupcompress_pyx__translate_delta_failure(__pyx_v_res); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 322; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_6);
+ __Pyx_Raise(__pyx_t_6, 0, 0);
+ __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 322; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ goto __pyx_L7;
+ }
+ __pyx_L7:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_groupcompress_pyx.pyx":323
+ * if res != DELTA_OK:
+ * raise _translate_delta_failure(res)
+ * self._index = index # <<<<<<<<<<<<<<
+ *
+ * cdef _expand_sources(self):
+ */
+ __pyx_v_self->_index = __pyx_v_index;
+
+ __pyx_r = Py_None; __Pyx_INCREF(Py_None);
+ goto __pyx_L0;
+ __pyx_L1_error:;
+ __Pyx_XDECREF(__pyx_t_5);
+ __Pyx_XDECREF(__pyx_t_6);
+ __Pyx_AddTraceback("bzrlib._groupcompress_pyx.DeltaIndex._populate_first_index");
+ __pyx_r = 0;
+ __pyx_L0:;
+ __Pyx_XGIVEREF(__pyx_r);
+ __Pyx_RefNannyFinishContext();
+ return __pyx_r;
+}
+
+/* "/home/vila/src/bzr/integration/trunk/bzrlib/_groupcompress_pyx.pyx":325
+ * self._index = index
+ *
+ * cdef _expand_sources(self): # <<<<<<<<<<<<<<
+ * raise RuntimeError('if we move self._source_infos, then we need to'
+ * ' change all of the index pointers as well.')
+ */
+
+static PyObject *__pyx_f_6bzrlib_18_groupcompress_pyx_10DeltaIndex__expand_sources(struct __pyx_obj_6bzrlib_18_groupcompress_pyx_DeltaIndex *__pyx_v_self) {
+ PyObject *__pyx_r = NULL;
+ PyObject *__pyx_t_1 = NULL;
+ PyObject *__pyx_t_2 = NULL;
+ void *__pyx_t_3;
+ __Pyx_RefNannySetupContext("_expand_sources");
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_groupcompress_pyx.pyx":326
+ *
+ * cdef _expand_sources(self):
+ * raise RuntimeError('if we move self._source_infos, then we need to' # <<<<<<<<<<<<<<
+ * ' change all of the index pointers as well.')
+ * self._max_num_sources = self._max_num_sources * 2
+ */
+ __pyx_t_1 = PyTuple_New(1); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 326; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_1);
+ __Pyx_INCREF(((PyObject *)__pyx_kp_s_16));
+ PyTuple_SET_ITEM(__pyx_t_1, 0, ((PyObject *)__pyx_kp_s_16));
+ __Pyx_GIVEREF(((PyObject *)__pyx_kp_s_16));
+ __pyx_t_2 = PyObject_Call(__pyx_builtin_RuntimeError, __pyx_t_1, NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 326; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_2);
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+ __Pyx_Raise(__pyx_t_2, 0, 0);
+ __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 326; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_groupcompress_pyx.pyx":328
+ * raise RuntimeError('if we move self._source_infos, then we need to'
+ * ' change all of the index pointers as well.')
+ * self._max_num_sources = self._max_num_sources * 2 # <<<<<<<<<<<<<<
+ * self._source_infos = <source_info *>safe_realloc(self._source_infos,
+ * sizeof(source_info)
+ */
+ __pyx_v_self->_max_num_sources = (__pyx_v_self->_max_num_sources * 2);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_groupcompress_pyx.pyx":331
+ * self._source_infos = <source_info *>safe_realloc(self._source_infos,
+ * sizeof(source_info)
+ * * self._max_num_sources) # <<<<<<<<<<<<<<
+ *
+ * def make_delta(self, target_bytes, max_delta_size=0):
+ */
+ __pyx_t_3 = __pyx_f_6bzrlib_18_groupcompress_pyx_safe_realloc(__pyx_v_self->_source_infos, ((sizeof(struct source_info)) * __pyx_v_self->_max_num_sources)); if (unlikely(__pyx_t_3 == NULL)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 329; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_groupcompress_pyx.pyx":329
+ * ' change all of the index pointers as well.')
+ * self._max_num_sources = self._max_num_sources * 2
+ * self._source_infos = <source_info *>safe_realloc(self._source_infos, # <<<<<<<<<<<<<<
+ * sizeof(source_info)
+ * * self._max_num_sources)
+ */
+ __pyx_v_self->_source_infos = ((struct source_info *)__pyx_t_3);
+
+ __pyx_r = Py_None; __Pyx_INCREF(Py_None);
+ goto __pyx_L0;
+ __pyx_L1_error:;
+ __Pyx_XDECREF(__pyx_t_1);
+ __Pyx_XDECREF(__pyx_t_2);
+ __Pyx_AddTraceback("bzrlib._groupcompress_pyx.DeltaIndex._expand_sources");
+ __pyx_r = 0;
+ __pyx_L0:;
+ __Pyx_XGIVEREF(__pyx_r);
+ __Pyx_RefNannyFinishContext();
+ return __pyx_r;
+}
+
+/* "/home/vila/src/bzr/integration/trunk/bzrlib/_groupcompress_pyx.pyx":333
+ * * self._max_num_sources)
+ *
+ * def make_delta(self, target_bytes, max_delta_size=0): # <<<<<<<<<<<<<<
+ * """Create a delta from the current source to the target bytes."""
+ * cdef char *target
+ */
+
+static PyObject *__pyx_pf_6bzrlib_18_groupcompress_pyx_10DeltaIndex_make_delta(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
+static char __pyx_doc_6bzrlib_18_groupcompress_pyx_10DeltaIndex_make_delta[] = "Create a delta from the current source to the target bytes.";
+static PyObject *__pyx_pf_6bzrlib_18_groupcompress_pyx_10DeltaIndex_make_delta(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
+ PyObject *__pyx_v_target_bytes = 0;
+ PyObject *__pyx_v_max_delta_size = 0;
+ char *__pyx_v_target;
+ Py_ssize_t __pyx_v_target_size;
+ void *__pyx_v_delta;
+ unsigned long __pyx_v_delta_size;
+ unsigned long __pyx_v_c_max_delta_size;
+ delta_result __pyx_v_res;
+ PyObject *__pyx_v_result;
+ PyObject *__pyx_r = NULL;
+ int __pyx_t_1;
+ Py_ssize_t __pyx_t_2;
+ PyObject *__pyx_t_3 = NULL;
+ PyObject *__pyx_t_4 = NULL;
+ unsigned long __pyx_t_5;
+ static PyObject **__pyx_pyargnames[] = {&__pyx_n_s__target_bytes,&__pyx_n_s__max_delta_size,0};
+ __Pyx_RefNannySetupContext("make_delta");
+ if (unlikely(__pyx_kwds)) {
+ Py_ssize_t kw_args = PyDict_Size(__pyx_kwds);
+ PyObject* values[2] = {0,0};
+ values[1] = ((PyObject *)__pyx_int_0);
+ switch (PyTuple_GET_SIZE(__pyx_args)) {
+ case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
+ case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
+ case 0: break;
+ default: goto __pyx_L5_argtuple_error;
+ }
+ switch (PyTuple_GET_SIZE(__pyx_args)) {
+ case 0:
+ values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__target_bytes);
+ if (likely(values[0])) kw_args--;
+ else goto __pyx_L5_argtuple_error;
+ case 1:
+ if (kw_args > 0) {
+ PyObject* value = PyDict_GetItem(__pyx_kwds, __pyx_n_s__max_delta_size);
+ if (value) { values[1] = value; kw_args--; }
+ }
+ }
+ if (unlikely(kw_args > 0)) {
+ if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, PyTuple_GET_SIZE(__pyx_args), "make_delta") < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 333; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
+ }
+ __pyx_v_target_bytes = values[0];
+ __pyx_v_max_delta_size = values[1];
+ } else {
+ __pyx_v_max_delta_size = ((PyObject *)__pyx_int_0);
+ switch (PyTuple_GET_SIZE(__pyx_args)) {
+ case 2: __pyx_v_max_delta_size = PyTuple_GET_ITEM(__pyx_args, 1);
+ case 1: __pyx_v_target_bytes = PyTuple_GET_ITEM(__pyx_args, 0);
+ break;
+ default: goto __pyx_L5_argtuple_error;
+ }
+ }
+ goto __pyx_L4_argument_unpacking_done;
+ __pyx_L5_argtuple_error:;
+ __Pyx_RaiseArgtupleInvalid("make_delta", 0, 1, 2, PyTuple_GET_SIZE(__pyx_args)); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 333; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
+ __pyx_L3_error:;
+ __Pyx_AddTraceback("bzrlib._groupcompress_pyx.DeltaIndex.make_delta");
+ __Pyx_RefNannyFinishContext();
+ return NULL;
+ __pyx_L4_argument_unpacking_done:;
+ __pyx_v_result = Py_None; __Pyx_INCREF(Py_None);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_groupcompress_pyx.pyx":342
+ * cdef delta_result res
+ *
+ * if self._index == NULL: # <<<<<<<<<<<<<<
+ * if len(self._sources) == 0:
+ * return None
+ */
+ __pyx_t_1 = (((struct __pyx_obj_6bzrlib_18_groupcompress_pyx_DeltaIndex *)__pyx_v_self)->_index == NULL);
+ if (__pyx_t_1) {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_groupcompress_pyx.pyx":343
+ *
+ * if self._index == NULL:
+ * if len(self._sources) == 0: # <<<<<<<<<<<<<<
+ * return None
+ * # We were just lazy about generating the index
+ */
+ __pyx_t_2 = PyObject_Length(((struct __pyx_obj_6bzrlib_18_groupcompress_pyx_DeltaIndex *)__pyx_v_self)->_sources); if (unlikely(__pyx_t_2 == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 343; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_1 = (__pyx_t_2 == 0);
+ if (__pyx_t_1) {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_groupcompress_pyx.pyx":344
+ * if self._index == NULL:
+ * if len(self._sources) == 0:
+ * return None # <<<<<<<<<<<<<<
+ * # We were just lazy about generating the index
+ * self._populate_first_index()
+ */
+ __Pyx_XDECREF(__pyx_r);
+ __Pyx_INCREF(Py_None);
+ __pyx_r = Py_None;
+ goto __pyx_L0;
+ goto __pyx_L7;
+ }
+ __pyx_L7:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_groupcompress_pyx.pyx":346
+ * return None
+ * # We were just lazy about generating the index
+ * self._populate_first_index() # <<<<<<<<<<<<<<
+ *
+ * if not PyString_CheckExact(target_bytes):
+ */
+ __pyx_t_3 = ((struct __pyx_vtabstruct_6bzrlib_18_groupcompress_pyx_DeltaIndex *)((struct __pyx_obj_6bzrlib_18_groupcompress_pyx_DeltaIndex *)__pyx_v_self)->__pyx_vtab)->_populate_first_index(((struct __pyx_obj_6bzrlib_18_groupcompress_pyx_DeltaIndex *)__pyx_v_self)); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 346; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_3);
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+ goto __pyx_L6;
+ }
+ __pyx_L6:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_groupcompress_pyx.pyx":348
+ * self._populate_first_index()
+ *
+ * if not PyString_CheckExact(target_bytes): # <<<<<<<<<<<<<<
+ * raise TypeError('target is not a str')
+ *
+ */
+ __pyx_t_1 = (!PyString_CheckExact(__pyx_v_target_bytes));
+ if (__pyx_t_1) {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_groupcompress_pyx.pyx":349
+ *
+ * if not PyString_CheckExact(target_bytes):
+ * raise TypeError('target is not a str') # <<<<<<<<<<<<<<
+ *
+ * target = PyString_AS_STRING(target_bytes)
+ */
+ __pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 349; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_3);
+ __Pyx_INCREF(((PyObject *)__pyx_kp_s_17));
+ PyTuple_SET_ITEM(__pyx_t_3, 0, ((PyObject *)__pyx_kp_s_17));
+ __Pyx_GIVEREF(((PyObject *)__pyx_kp_s_17));
+ __pyx_t_4 = PyObject_Call(__pyx_builtin_TypeError, __pyx_t_3, NULL); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 349; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_4);
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+ __Pyx_Raise(__pyx_t_4, 0, 0);
+ __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 349; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ goto __pyx_L8;
+ }
+ __pyx_L8:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_groupcompress_pyx.pyx":351
+ * raise TypeError('target is not a str')
+ *
+ * target = PyString_AS_STRING(target_bytes) # <<<<<<<<<<<<<<
+ * target_size = PyString_GET_SIZE(target_bytes)
+ *
+ */
+ __pyx_v_target = PyString_AS_STRING(__pyx_v_target_bytes);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_groupcompress_pyx.pyx":352
+ *
+ * target = PyString_AS_STRING(target_bytes)
+ * target_size = PyString_GET_SIZE(target_bytes) # <<<<<<<<<<<<<<
+ *
+ * # TODO: inline some of create_delta so we at least don't have to double
+ */
+ __pyx_v_target_size = PyString_GET_SIZE(__pyx_v_target_bytes);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_groupcompress_pyx.pyx":357
+ * # malloc, and can instead use PyString_FromStringAndSize, to
+ * # allocate the bytes into the final string
+ * c_max_delta_size = max_delta_size # <<<<<<<<<<<<<<
+ * with nogil:
+ * res = create_delta(self._index, target, target_size,
+ */
+ __pyx_t_5 = __Pyx_PyInt_AsUnsignedLong(__pyx_v_max_delta_size); if (unlikely((__pyx_t_5 == (unsigned long)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 357; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_v_c_max_delta_size = __pyx_t_5;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_groupcompress_pyx.pyx":358
+ * # allocate the bytes into the final string
+ * c_max_delta_size = max_delta_size
+ * with nogil: # <<<<<<<<<<<<<<
+ * res = create_delta(self._index, target, target_size,
+ * &delta_size, c_max_delta_size, &delta)
+ */
+ { PyThreadState *_save;
+ Py_UNBLOCK_THREADS
+ /*try:*/ {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_groupcompress_pyx.pyx":360
+ * with nogil:
+ * res = create_delta(self._index, target, target_size,
+ * &delta_size, c_max_delta_size, &delta) # <<<<<<<<<<<<<<
+ * result = None
+ * if res == DELTA_OK:
+ */
+ __pyx_v_res = create_delta(((struct __pyx_obj_6bzrlib_18_groupcompress_pyx_DeltaIndex *)__pyx_v_self)->_index, __pyx_v_target, __pyx_v_target_size, (&__pyx_v_delta_size), __pyx_v_c_max_delta_size, (&__pyx_v_delta));
+ }
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_groupcompress_pyx.pyx":358
+ * # allocate the bytes into the final string
+ * c_max_delta_size = max_delta_size
+ * with nogil: # <<<<<<<<<<<<<<
+ * res = create_delta(self._index, target, target_size,
+ * &delta_size, c_max_delta_size, &delta)
+ */
+ /*finally:*/ {
+ Py_BLOCK_THREADS
+ }
+ }
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_groupcompress_pyx.pyx":361
+ * res = create_delta(self._index, target, target_size,
+ * &delta_size, c_max_delta_size, &delta)
+ * result = None # <<<<<<<<<<<<<<
+ * if res == DELTA_OK:
+ * result = PyString_FromStringAndSize(<char *>delta, delta_size)
+ */
+ __Pyx_INCREF(Py_None);
+ __Pyx_DECREF(__pyx_v_result);
+ __pyx_v_result = Py_None;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_groupcompress_pyx.pyx":362
+ * &delta_size, c_max_delta_size, &delta)
+ * result = None
+ * if res == DELTA_OK: # <<<<<<<<<<<<<<
+ * result = PyString_FromStringAndSize(<char *>delta, delta_size)
+ * free(delta)
+ */
+ __pyx_t_1 = (__pyx_v_res == DELTA_OK);
+ if (__pyx_t_1) {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_groupcompress_pyx.pyx":363
+ * result = None
+ * if res == DELTA_OK:
+ * result = PyString_FromStringAndSize(<char *>delta, delta_size) # <<<<<<<<<<<<<<
+ * free(delta)
+ * elif res != DELTA_SIZE_TOO_BIG:
+ */
+ __pyx_t_4 = PyString_FromStringAndSize(((char *)__pyx_v_delta), __pyx_v_delta_size); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 363; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_4);
+ __Pyx_DECREF(__pyx_v_result);
+ __pyx_v_result = __pyx_t_4;
+ __pyx_t_4 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_groupcompress_pyx.pyx":364
+ * if res == DELTA_OK:
+ * result = PyString_FromStringAndSize(<char *>delta, delta_size)
+ * free(delta) # <<<<<<<<<<<<<<
+ * elif res != DELTA_SIZE_TOO_BIG:
+ * raise _translate_delta_failure(res)
+ */
+ free(__pyx_v_delta);
+ goto __pyx_L12;
+ }
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_groupcompress_pyx.pyx":365
+ * result = PyString_FromStringAndSize(<char *>delta, delta_size)
+ * free(delta)
+ * elif res != DELTA_SIZE_TOO_BIG: # <<<<<<<<<<<<<<
+ * raise _translate_delta_failure(res)
+ * return result
+ */
+ __pyx_t_1 = (__pyx_v_res != DELTA_SIZE_TOO_BIG);
+ if (__pyx_t_1) {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_groupcompress_pyx.pyx":366
+ * free(delta)
+ * elif res != DELTA_SIZE_TOO_BIG:
+ * raise _translate_delta_failure(res) # <<<<<<<<<<<<<<
+ * return result
+ *
+ */
+ __pyx_t_4 = __pyx_f_6bzrlib_18_groupcompress_pyx__translate_delta_failure(__pyx_v_res); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 366; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_4);
+ __Pyx_Raise(__pyx_t_4, 0, 0);
+ __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 366; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ goto __pyx_L12;
+ }
+ __pyx_L12:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_groupcompress_pyx.pyx":367
+ * elif res != DELTA_SIZE_TOO_BIG:
+ * raise _translate_delta_failure(res)
+ * return result # <<<<<<<<<<<<<<
+ *
+ *
+ */
+ __Pyx_XDECREF(__pyx_r);
+ __Pyx_INCREF(__pyx_v_result);
+ __pyx_r = __pyx_v_result;
+ goto __pyx_L0;
+
+ __pyx_r = Py_None; __Pyx_INCREF(Py_None);
+ goto __pyx_L0;
+ __pyx_L1_error:;
+ __Pyx_XDECREF(__pyx_t_3);
+ __Pyx_XDECREF(__pyx_t_4);
+ __Pyx_AddTraceback("bzrlib._groupcompress_pyx.DeltaIndex.make_delta");
+ __pyx_r = NULL;
+ __pyx_L0:;
+ __Pyx_DECREF(__pyx_v_result);
+ __Pyx_XGIVEREF(__pyx_r);
+ __Pyx_RefNannyFinishContext();
+ return __pyx_r;
+}
+
+/* "/home/vila/src/bzr/integration/trunk/bzrlib/_groupcompress_pyx.pyx":370
+ *
+ *
+ * def make_delta(source_bytes, target_bytes): # <<<<<<<<<<<<<<
+ * """Create a delta, this is a wrapper around DeltaIndex.make_delta."""
+ * di = DeltaIndex(source_bytes)
+ */
+
+static PyObject *__pyx_pf_6bzrlib_18_groupcompress_pyx_make_delta(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
+static char __pyx_doc_6bzrlib_18_groupcompress_pyx_make_delta[] = "Create a delta, this is a wrapper around DeltaIndex.make_delta.";
+static PyObject *__pyx_pf_6bzrlib_18_groupcompress_pyx_make_delta(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
+ PyObject *__pyx_v_source_bytes = 0;
+ PyObject *__pyx_v_target_bytes = 0;
+ struct __pyx_obj_6bzrlib_18_groupcompress_pyx_DeltaIndex *__pyx_v_di;
+ PyObject *__pyx_r = NULL;
+ PyObject *__pyx_t_1 = NULL;
+ PyObject *__pyx_t_2 = NULL;
+ PyObject *__pyx_t_3 = NULL;
+ static PyObject **__pyx_pyargnames[] = {&__pyx_n_s__source_bytes,&__pyx_n_s__target_bytes,0};
+ __Pyx_RefNannySetupContext("make_delta");
+ __pyx_self = __pyx_self;
+ if (unlikely(__pyx_kwds)) {
+ Py_ssize_t kw_args = PyDict_Size(__pyx_kwds);
+ PyObject* values[2] = {0,0};
+ switch (PyTuple_GET_SIZE(__pyx_args)) {
+ case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
+ case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
+ case 0: break;
+ default: goto __pyx_L5_argtuple_error;
+ }
+ switch (PyTuple_GET_SIZE(__pyx_args)) {
+ case 0:
+ values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__source_bytes);
+ if (likely(values[0])) kw_args--;
+ else goto __pyx_L5_argtuple_error;
+ case 1:
+ values[1] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__target_bytes);
+ if (likely(values[1])) kw_args--;
+ else {
+ __Pyx_RaiseArgtupleInvalid("make_delta", 1, 2, 2, 1); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 370; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
+ }
+ }
+ if (unlikely(kw_args > 0)) {
+ if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, PyTuple_GET_SIZE(__pyx_args), "make_delta") < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 370; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
+ }
+ __pyx_v_source_bytes = values[0];
+ __pyx_v_target_bytes = values[1];
+ } else if (PyTuple_GET_SIZE(__pyx_args) != 2) {
+ goto __pyx_L5_argtuple_error;
+ } else {
+ __pyx_v_source_bytes = PyTuple_GET_ITEM(__pyx_args, 0);
+ __pyx_v_target_bytes = PyTuple_GET_ITEM(__pyx_args, 1);
+ }
+ goto __pyx_L4_argument_unpacking_done;
+ __pyx_L5_argtuple_error:;
+ __Pyx_RaiseArgtupleInvalid("make_delta", 1, 2, 2, PyTuple_GET_SIZE(__pyx_args)); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 370; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
+ __pyx_L3_error:;
+ __Pyx_AddTraceback("bzrlib._groupcompress_pyx.make_delta");
+ __Pyx_RefNannyFinishContext();
+ return NULL;
+ __pyx_L4_argument_unpacking_done:;
+ __pyx_v_di = ((struct __pyx_obj_6bzrlib_18_groupcompress_pyx_DeltaIndex *)Py_None); __Pyx_INCREF(Py_None);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_groupcompress_pyx.pyx":372
+ * def make_delta(source_bytes, target_bytes):
+ * """Create a delta, this is a wrapper around DeltaIndex.make_delta."""
+ * di = DeltaIndex(source_bytes) # <<<<<<<<<<<<<<
+ * return di.make_delta(target_bytes)
+ *
+ */
+ __pyx_t_1 = PyTuple_New(1); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 372; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_1);
+ __Pyx_INCREF(__pyx_v_source_bytes);
+ PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_v_source_bytes);
+ __Pyx_GIVEREF(__pyx_v_source_bytes);
+ __pyx_t_2 = PyObject_Call(((PyObject *)((PyObject*)__pyx_ptype_6bzrlib_18_groupcompress_pyx_DeltaIndex)), __pyx_t_1, NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 372; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_2);
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+ __Pyx_DECREF(((PyObject *)__pyx_v_di));
+ __pyx_v_di = ((struct __pyx_obj_6bzrlib_18_groupcompress_pyx_DeltaIndex *)__pyx_t_2);
+ __pyx_t_2 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_groupcompress_pyx.pyx":373
+ * """Create a delta, this is a wrapper around DeltaIndex.make_delta."""
+ * di = DeltaIndex(source_bytes)
+ * return di.make_delta(target_bytes) # <<<<<<<<<<<<<<
+ *
+ *
+ */
+ __Pyx_XDECREF(__pyx_r);
+ __pyx_t_2 = PyObject_GetAttr(((PyObject *)__pyx_v_di), __pyx_n_s__make_delta); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 373; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_2);
+ __pyx_t_1 = PyTuple_New(1); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 373; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_1);
+ __Pyx_INCREF(__pyx_v_target_bytes);
+ PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_v_target_bytes);
+ __Pyx_GIVEREF(__pyx_v_target_bytes);
+ __pyx_t_3 = PyObject_Call(__pyx_t_2, __pyx_t_1, NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 373; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_3);
+ __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+ __pyx_r = __pyx_t_3;
+ __pyx_t_3 = 0;
+ goto __pyx_L0;
+
+ __pyx_r = Py_None; __Pyx_INCREF(Py_None);
+ goto __pyx_L0;
+ __pyx_L1_error:;
+ __Pyx_XDECREF(__pyx_t_1);
+ __Pyx_XDECREF(__pyx_t_2);
+ __Pyx_XDECREF(__pyx_t_3);
+ __Pyx_AddTraceback("bzrlib._groupcompress_pyx.make_delta");
+ __pyx_r = NULL;
+ __pyx_L0:;
+ __Pyx_DECREF((PyObject *)__pyx_v_di);
+ __Pyx_XGIVEREF(__pyx_r);
+ __Pyx_RefNannyFinishContext();
+ return __pyx_r;
+}
+
+/* "/home/vila/src/bzr/integration/trunk/bzrlib/_groupcompress_pyx.pyx":376
+ *
+ *
+ * def apply_delta(source_bytes, delta_bytes): # <<<<<<<<<<<<<<
+ * """Apply a delta generated by make_delta to source_bytes."""
+ * cdef char *source
+ */
+
+static PyObject *__pyx_pf_6bzrlib_18_groupcompress_pyx_apply_delta(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
+static char __pyx_doc_6bzrlib_18_groupcompress_pyx_apply_delta[] = "Apply a delta generated by make_delta to source_bytes.";
+static PyObject *__pyx_pf_6bzrlib_18_groupcompress_pyx_apply_delta(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
+ PyObject *__pyx_v_source_bytes = 0;
+ PyObject *__pyx_v_delta_bytes = 0;
+ char *__pyx_v_source;
+ Py_ssize_t __pyx_v_source_size;
+ char *__pyx_v_delta;
+ Py_ssize_t __pyx_v_delta_size;
+ PyObject *__pyx_r = NULL;
+ int __pyx_t_1;
+ PyObject *__pyx_t_2 = NULL;
+ PyObject *__pyx_t_3 = NULL;
+ PyObject *__pyx_t_4 = NULL;
+ static PyObject **__pyx_pyargnames[] = {&__pyx_n_s__source_bytes,&__pyx_n_s__delta_bytes,0};
+ __Pyx_RefNannySetupContext("apply_delta");
+ __pyx_self = __pyx_self;
+ if (unlikely(__pyx_kwds)) {
+ Py_ssize_t kw_args = PyDict_Size(__pyx_kwds);
+ PyObject* values[2] = {0,0};
+ switch (PyTuple_GET_SIZE(__pyx_args)) {
+ case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
+ case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
+ case 0: break;
+ default: goto __pyx_L5_argtuple_error;
+ }
+ switch (PyTuple_GET_SIZE(__pyx_args)) {
+ case 0:
+ values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__source_bytes);
+ if (likely(values[0])) kw_args--;
+ else goto __pyx_L5_argtuple_error;
+ case 1:
+ values[1] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__delta_bytes);
+ if (likely(values[1])) kw_args--;
+ else {
+ __Pyx_RaiseArgtupleInvalid("apply_delta", 1, 2, 2, 1); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 376; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
+ }
+ }
+ if (unlikely(kw_args > 0)) {
+ if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, PyTuple_GET_SIZE(__pyx_args), "apply_delta") < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 376; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
+ }
+ __pyx_v_source_bytes = values[0];
+ __pyx_v_delta_bytes = values[1];
+ } else if (PyTuple_GET_SIZE(__pyx_args) != 2) {
+ goto __pyx_L5_argtuple_error;
+ } else {
+ __pyx_v_source_bytes = PyTuple_GET_ITEM(__pyx_args, 0);
+ __pyx_v_delta_bytes = PyTuple_GET_ITEM(__pyx_args, 1);
+ }
+ goto __pyx_L4_argument_unpacking_done;
+ __pyx_L5_argtuple_error:;
+ __Pyx_RaiseArgtupleInvalid("apply_delta", 1, 2, 2, PyTuple_GET_SIZE(__pyx_args)); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 376; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
+ __pyx_L3_error:;
+ __Pyx_AddTraceback("bzrlib._groupcompress_pyx.apply_delta");
+ __Pyx_RefNannyFinishContext();
+ return NULL;
+ __pyx_L4_argument_unpacking_done:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_groupcompress_pyx.pyx":383
+ * cdef Py_ssize_t delta_size
+ *
+ * if not PyString_CheckExact(source_bytes): # <<<<<<<<<<<<<<
+ * raise TypeError('source is not a str')
+ * if not PyString_CheckExact(delta_bytes):
+ */
+ __pyx_t_1 = (!PyString_CheckExact(__pyx_v_source_bytes));
+ if (__pyx_t_1) {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_groupcompress_pyx.pyx":384
+ *
+ * if not PyString_CheckExact(source_bytes):
+ * raise TypeError('source is not a str') # <<<<<<<<<<<<<<
+ * if not PyString_CheckExact(delta_bytes):
+ * raise TypeError('delta is not a str')
+ */
+ __pyx_t_2 = PyTuple_New(1); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 384; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_2);
+ __Pyx_INCREF(((PyObject *)__pyx_kp_s_13));
+ PyTuple_SET_ITEM(__pyx_t_2, 0, ((PyObject *)__pyx_kp_s_13));
+ __Pyx_GIVEREF(((PyObject *)__pyx_kp_s_13));
+ __pyx_t_3 = PyObject_Call(__pyx_builtin_TypeError, __pyx_t_2, NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 384; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_3);
+ __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+ __Pyx_Raise(__pyx_t_3, 0, 0);
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 384; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ goto __pyx_L6;
+ }
+ __pyx_L6:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_groupcompress_pyx.pyx":385
+ * if not PyString_CheckExact(source_bytes):
+ * raise TypeError('source is not a str')
+ * if not PyString_CheckExact(delta_bytes): # <<<<<<<<<<<<<<
+ * raise TypeError('delta is not a str')
+ * source = PyString_AS_STRING(source_bytes)
+ */
+ __pyx_t_1 = (!PyString_CheckExact(__pyx_v_delta_bytes));
+ if (__pyx_t_1) {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_groupcompress_pyx.pyx":386
+ * raise TypeError('source is not a str')
+ * if not PyString_CheckExact(delta_bytes):
+ * raise TypeError('delta is not a str') # <<<<<<<<<<<<<<
+ * source = PyString_AS_STRING(source_bytes)
+ * source_size = PyString_GET_SIZE(source_bytes)
+ */
+ __pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 386; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_3);
+ __Pyx_INCREF(((PyObject *)__pyx_kp_s_12));
+ PyTuple_SET_ITEM(__pyx_t_3, 0, ((PyObject *)__pyx_kp_s_12));
+ __Pyx_GIVEREF(((PyObject *)__pyx_kp_s_12));
+ __pyx_t_2 = PyObject_Call(__pyx_builtin_TypeError, __pyx_t_3, NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 386; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_2);
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+ __Pyx_Raise(__pyx_t_2, 0, 0);
+ __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 386; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ goto __pyx_L7;
+ }
+ __pyx_L7:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_groupcompress_pyx.pyx":387
+ * if not PyString_CheckExact(delta_bytes):
+ * raise TypeError('delta is not a str')
+ * source = PyString_AS_STRING(source_bytes) # <<<<<<<<<<<<<<
+ * source_size = PyString_GET_SIZE(source_bytes)
+ * delta = PyString_AS_STRING(delta_bytes)
+ */
+ __pyx_v_source = PyString_AS_STRING(__pyx_v_source_bytes);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_groupcompress_pyx.pyx":388
+ * raise TypeError('delta is not a str')
+ * source = PyString_AS_STRING(source_bytes)
+ * source_size = PyString_GET_SIZE(source_bytes) # <<<<<<<<<<<<<<
+ * delta = PyString_AS_STRING(delta_bytes)
+ * delta_size = PyString_GET_SIZE(delta_bytes)
+ */
+ __pyx_v_source_size = PyString_GET_SIZE(__pyx_v_source_bytes);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_groupcompress_pyx.pyx":389
+ * source = PyString_AS_STRING(source_bytes)
+ * source_size = PyString_GET_SIZE(source_bytes)
+ * delta = PyString_AS_STRING(delta_bytes) # <<<<<<<<<<<<<<
+ * delta_size = PyString_GET_SIZE(delta_bytes)
+ * # Code taken from patch-delta.c, only brought here to give better error
+ */
+ __pyx_v_delta = PyString_AS_STRING(__pyx_v_delta_bytes);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_groupcompress_pyx.pyx":390
+ * source_size = PyString_GET_SIZE(source_bytes)
+ * delta = PyString_AS_STRING(delta_bytes)
+ * delta_size = PyString_GET_SIZE(delta_bytes) # <<<<<<<<<<<<<<
+ * # Code taken from patch-delta.c, only brought here to give better error
+ * # handling, and to avoid double allocating memory
+ */
+ __pyx_v_delta_size = PyString_GET_SIZE(__pyx_v_delta_bytes);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_groupcompress_pyx.pyx":393
+ * # Code taken from patch-delta.c, only brought here to give better error
+ * # handling, and to avoid double allocating memory
+ * if (delta_size < DELTA_SIZE_MIN): # <<<<<<<<<<<<<<
+ * # XXX: Invalid delta block
+ * raise RuntimeError('delta_size %d smaller than min delta size %d'
+ */
+ __pyx_t_1 = (__pyx_v_delta_size < DELTA_SIZE_MIN);
+ if (__pyx_t_1) {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_groupcompress_pyx.pyx":396
+ * # XXX: Invalid delta block
+ * raise RuntimeError('delta_size %d smaller than min delta size %d'
+ * % (delta_size, DELTA_SIZE_MIN)) # <<<<<<<<<<<<<<
+ *
+ * return _apply_delta(source, source_size, delta, delta_size)
+ */
+ __pyx_t_2 = PyInt_FromSsize_t(__pyx_v_delta_size); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 396; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_2);
+ __pyx_t_3 = PyInt_FromSsize_t(DELTA_SIZE_MIN); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 396; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_3);
+ __pyx_t_4 = PyTuple_New(2); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 396; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_4);
+ PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_2);
+ __Pyx_GIVEREF(__pyx_t_2);
+ PyTuple_SET_ITEM(__pyx_t_4, 1, __pyx_t_3);
+ __Pyx_GIVEREF(__pyx_t_3);
+ __pyx_t_2 = 0;
+ __pyx_t_3 = 0;
+ __pyx_t_3 = PyNumber_Remainder(((PyObject *)__pyx_kp_s_18), __pyx_t_4); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 396; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(((PyObject *)__pyx_t_3));
+ __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
+ __pyx_t_4 = PyTuple_New(1); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 395; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_4);
+ PyTuple_SET_ITEM(__pyx_t_4, 0, ((PyObject *)__pyx_t_3));
+ __Pyx_GIVEREF(((PyObject *)__pyx_t_3));
+ __pyx_t_3 = 0;
+ __pyx_t_3 = PyObject_Call(__pyx_builtin_RuntimeError, __pyx_t_4, NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 395; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_3);
+ __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
+ __Pyx_Raise(__pyx_t_3, 0, 0);
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 395; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ goto __pyx_L8;
+ }
+ __pyx_L8:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_groupcompress_pyx.pyx":398
+ * % (delta_size, DELTA_SIZE_MIN))
+ *
+ * return _apply_delta(source, source_size, delta, delta_size) # <<<<<<<<<<<<<<
+ *
+ *
+ */
+ __Pyx_XDECREF(__pyx_r);
+ __pyx_t_3 = __pyx_f_6bzrlib_18_groupcompress_pyx__apply_delta(__pyx_v_source, __pyx_v_source_size, __pyx_v_delta, __pyx_v_delta_size); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 398; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_3);
+ __pyx_r = __pyx_t_3;
+ __pyx_t_3 = 0;
+ goto __pyx_L0;
+
+ __pyx_r = Py_None; __Pyx_INCREF(Py_None);
+ goto __pyx_L0;
+ __pyx_L1_error:;
+ __Pyx_XDECREF(__pyx_t_2);
+ __Pyx_XDECREF(__pyx_t_3);
+ __Pyx_XDECREF(__pyx_t_4);
+ __Pyx_AddTraceback("bzrlib._groupcompress_pyx.apply_delta");
+ __pyx_r = NULL;
+ __pyx_L0:;
+ __Pyx_XGIVEREF(__pyx_r);
+ __Pyx_RefNannyFinishContext();
+ return __pyx_r;
+}
+
+/* "/home/vila/src/bzr/integration/trunk/bzrlib/_groupcompress_pyx.pyx":401
+ *
+ *
+ * cdef unsigned char *_decode_copy_instruction(unsigned char *bytes, # <<<<<<<<<<<<<<
+ * unsigned char cmd, unsigned int *offset,
+ * unsigned int *length) nogil: # cannot_raise
+ */
+
+static unsigned char *__pyx_f_6bzrlib_18_groupcompress_pyx__decode_copy_instruction(unsigned char *__pyx_v_bytes, unsigned char __pyx_v_cmd, unsigned int *__pyx_v_offset, unsigned int *__pyx_v_length) {
+ unsigned int __pyx_v_off;
+ unsigned int __pyx_v_size;
+ unsigned int __pyx_v_count;
+ unsigned char *__pyx_r;
+ long __pyx_t_1;
+ int __pyx_t_2;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_groupcompress_pyx.pyx":415
+ * """
+ * cdef unsigned int off, size, count
+ * off = 0 # <<<<<<<<<<<<<<
+ * size = 0
+ * count = 0
+ */
+ __pyx_v_off = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_groupcompress_pyx.pyx":416
+ * cdef unsigned int off, size, count
+ * off = 0
+ * size = 0 # <<<<<<<<<<<<<<
+ * count = 0
+ * if (cmd & 0x01):
+ */
+ __pyx_v_size = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_groupcompress_pyx.pyx":417
+ * off = 0
+ * size = 0
+ * count = 0 # <<<<<<<<<<<<<<
+ * if (cmd & 0x01):
+ * off = bytes[count]
+ */
+ __pyx_v_count = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_groupcompress_pyx.pyx":418
+ * size = 0
+ * count = 0
+ * if (cmd & 0x01): # <<<<<<<<<<<<<<
+ * off = bytes[count]
+ * count = count + 1
+ */
+ __pyx_t_1 = (__pyx_v_cmd & 0x01);
+ if (__pyx_t_1) {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_groupcompress_pyx.pyx":419
+ * count = 0
+ * if (cmd & 0x01):
+ * off = bytes[count] # <<<<<<<<<<<<<<
+ * count = count + 1
+ * if (cmd & 0x02):
+ */
+ __pyx_v_off = (__pyx_v_bytes[__pyx_v_count]);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_groupcompress_pyx.pyx":420
+ * if (cmd & 0x01):
+ * off = bytes[count]
+ * count = count + 1 # <<<<<<<<<<<<<<
+ * if (cmd & 0x02):
+ * off = off | (bytes[count] << 8)
+ */
+ __pyx_v_count = (__pyx_v_count + 1);
+ goto __pyx_L3;
+ }
+ __pyx_L3:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_groupcompress_pyx.pyx":421
+ * off = bytes[count]
+ * count = count + 1
+ * if (cmd & 0x02): # <<<<<<<<<<<<<<
+ * off = off | (bytes[count] << 8)
+ * count = count + 1
+ */
+ __pyx_t_1 = (__pyx_v_cmd & 0x02);
+ if (__pyx_t_1) {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_groupcompress_pyx.pyx":422
+ * count = count + 1
+ * if (cmd & 0x02):
+ * off = off | (bytes[count] << 8) # <<<<<<<<<<<<<<
+ * count = count + 1
+ * if (cmd & 0x04):
+ */
+ __pyx_v_off = (__pyx_v_off | ((__pyx_v_bytes[__pyx_v_count]) << 8));
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_groupcompress_pyx.pyx":423
+ * if (cmd & 0x02):
+ * off = off | (bytes[count] << 8)
+ * count = count + 1 # <<<<<<<<<<<<<<
+ * if (cmd & 0x04):
+ * off = off | (bytes[count] << 16)
+ */
+ __pyx_v_count = (__pyx_v_count + 1);
+ goto __pyx_L4;
+ }
+ __pyx_L4:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_groupcompress_pyx.pyx":424
+ * off = off | (bytes[count] << 8)
+ * count = count + 1
+ * if (cmd & 0x04): # <<<<<<<<<<<<<<
+ * off = off | (bytes[count] << 16)
+ * count = count + 1
+ */
+ __pyx_t_1 = (__pyx_v_cmd & 0x04);
+ if (__pyx_t_1) {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_groupcompress_pyx.pyx":425
+ * count = count + 1
+ * if (cmd & 0x04):
+ * off = off | (bytes[count] << 16) # <<<<<<<<<<<<<<
+ * count = count + 1
+ * if (cmd & 0x08):
+ */
+ __pyx_v_off = (__pyx_v_off | ((__pyx_v_bytes[__pyx_v_count]) << 16));
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_groupcompress_pyx.pyx":426
+ * if (cmd & 0x04):
+ * off = off | (bytes[count] << 16)
+ * count = count + 1 # <<<<<<<<<<<<<<
+ * if (cmd & 0x08):
+ * off = off | (bytes[count] << 24)
+ */
+ __pyx_v_count = (__pyx_v_count + 1);
+ goto __pyx_L5;
+ }
+ __pyx_L5:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_groupcompress_pyx.pyx":427
+ * off = off | (bytes[count] << 16)
+ * count = count + 1
+ * if (cmd & 0x08): # <<<<<<<<<<<<<<
+ * off = off | (bytes[count] << 24)
+ * count = count + 1
+ */
+ __pyx_t_1 = (__pyx_v_cmd & 0x08);
+ if (__pyx_t_1) {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_groupcompress_pyx.pyx":428
+ * count = count + 1
+ * if (cmd & 0x08):
+ * off = off | (bytes[count] << 24) # <<<<<<<<<<<<<<
+ * count = count + 1
+ * if (cmd & 0x10):
+ */
+ __pyx_v_off = (__pyx_v_off | ((__pyx_v_bytes[__pyx_v_count]) << 24));
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_groupcompress_pyx.pyx":429
+ * if (cmd & 0x08):
+ * off = off | (bytes[count] << 24)
+ * count = count + 1 # <<<<<<<<<<<<<<
+ * if (cmd & 0x10):
+ * size = bytes[count]
+ */
+ __pyx_v_count = (__pyx_v_count + 1);
+ goto __pyx_L6;
+ }
+ __pyx_L6:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_groupcompress_pyx.pyx":430
+ * off = off | (bytes[count] << 24)
+ * count = count + 1
+ * if (cmd & 0x10): # <<<<<<<<<<<<<<
+ * size = bytes[count]
+ * count = count + 1
+ */
+ __pyx_t_1 = (__pyx_v_cmd & 0x10);
+ if (__pyx_t_1) {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_groupcompress_pyx.pyx":431
+ * count = count + 1
+ * if (cmd & 0x10):
+ * size = bytes[count] # <<<<<<<<<<<<<<
+ * count = count + 1
+ * if (cmd & 0x20):
+ */
+ __pyx_v_size = (__pyx_v_bytes[__pyx_v_count]);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_groupcompress_pyx.pyx":432
+ * if (cmd & 0x10):
+ * size = bytes[count]
+ * count = count + 1 # <<<<<<<<<<<<<<
+ * if (cmd & 0x20):
+ * size = size | (bytes[count] << 8)
+ */
+ __pyx_v_count = (__pyx_v_count + 1);
+ goto __pyx_L7;
+ }
+ __pyx_L7:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_groupcompress_pyx.pyx":433
+ * size = bytes[count]
+ * count = count + 1
+ * if (cmd & 0x20): # <<<<<<<<<<<<<<
+ * size = size | (bytes[count] << 8)
+ * count = count + 1
+ */
+ __pyx_t_1 = (__pyx_v_cmd & 0x20);
+ if (__pyx_t_1) {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_groupcompress_pyx.pyx":434
+ * count = count + 1
+ * if (cmd & 0x20):
+ * size = size | (bytes[count] << 8) # <<<<<<<<<<<<<<
+ * count = count + 1
+ * if (cmd & 0x40):
+ */
+ __pyx_v_size = (__pyx_v_size | ((__pyx_v_bytes[__pyx_v_count]) << 8));
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_groupcompress_pyx.pyx":435
+ * if (cmd & 0x20):
+ * size = size | (bytes[count] << 8)
+ * count = count + 1 # <<<<<<<<<<<<<<
+ * if (cmd & 0x40):
+ * size = size | (bytes[count] << 16)
+ */
+ __pyx_v_count = (__pyx_v_count + 1);
+ goto __pyx_L8;
+ }
+ __pyx_L8:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_groupcompress_pyx.pyx":436
+ * size = size | (bytes[count] << 8)
+ * count = count + 1
+ * if (cmd & 0x40): # <<<<<<<<<<<<<<
+ * size = size | (bytes[count] << 16)
+ * count = count + 1
+ */
+ __pyx_t_1 = (__pyx_v_cmd & 0x40);
+ if (__pyx_t_1) {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_groupcompress_pyx.pyx":437
+ * count = count + 1
+ * if (cmd & 0x40):
+ * size = size | (bytes[count] << 16) # <<<<<<<<<<<<<<
+ * count = count + 1
+ * if (size == 0):
+ */
+ __pyx_v_size = (__pyx_v_size | ((__pyx_v_bytes[__pyx_v_count]) << 16));
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_groupcompress_pyx.pyx":438
+ * if (cmd & 0x40):
+ * size = size | (bytes[count] << 16)
+ * count = count + 1 # <<<<<<<<<<<<<<
+ * if (size == 0):
+ * size = 0x10000
+ */
+ __pyx_v_count = (__pyx_v_count + 1);
+ goto __pyx_L9;
+ }
+ __pyx_L9:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_groupcompress_pyx.pyx":439
+ * size = size | (bytes[count] << 16)
+ * count = count + 1
+ * if (size == 0): # <<<<<<<<<<<<<<
+ * size = 0x10000
+ * offset[0] = off
+ */
+ __pyx_t_2 = (__pyx_v_size == 0);
+ if (__pyx_t_2) {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_groupcompress_pyx.pyx":440
+ * count = count + 1
+ * if (size == 0):
+ * size = 0x10000 # <<<<<<<<<<<<<<
+ * offset[0] = off
+ * length[0] = size
+ */
+ __pyx_v_size = 0x10000;
+ goto __pyx_L10;
+ }
+ __pyx_L10:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_groupcompress_pyx.pyx":441
+ * if (size == 0):
+ * size = 0x10000
+ * offset[0] = off # <<<<<<<<<<<<<<
+ * length[0] = size
+ * return bytes + count
+ */
+ (__pyx_v_offset[0]) = __pyx_v_off;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_groupcompress_pyx.pyx":442
+ * size = 0x10000
+ * offset[0] = off
+ * length[0] = size # <<<<<<<<<<<<<<
+ * return bytes + count
+ *
+ */
+ (__pyx_v_length[0]) = __pyx_v_size;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_groupcompress_pyx.pyx":443
+ * offset[0] = off
+ * length[0] = size
+ * return bytes + count # <<<<<<<<<<<<<<
+ *
+ *
+ */
+ __pyx_r = (__pyx_v_bytes + __pyx_v_count);
+ goto __pyx_L0;
+
+ __pyx_r = 0;
+ __pyx_L0:;
+ return __pyx_r;
+}
+
+/* "/home/vila/src/bzr/integration/trunk/bzrlib/_groupcompress_pyx.pyx":446
+ *
+ *
+ * cdef object _apply_delta(char *source, Py_ssize_t source_size, # <<<<<<<<<<<<<<
+ * char *delta, Py_ssize_t delta_size):
+ * """common functionality between apply_delta and apply_delta_to_source."""
+ */
+
+static PyObject *__pyx_f_6bzrlib_18_groupcompress_pyx__apply_delta(char *__pyx_v_source, Py_ssize_t __pyx_v_source_size, char *__pyx_v_delta, Py_ssize_t __pyx_v_delta_size) {
+ unsigned char *__pyx_v_data;
+ unsigned char *__pyx_v_top;
+ unsigned char *__pyx_v_dst_buf;
+ unsigned char *__pyx_v_out;
+ unsigned char __pyx_v_cmd;
+ Py_ssize_t __pyx_v_size;
+ unsigned int __pyx_v_cp_off;
+ unsigned int __pyx_v_cp_size;
+ int __pyx_v_failed;
+ PyObject *__pyx_v_result;
+ PyObject *__pyx_r = NULL;
+ PyObject *__pyx_t_1 = NULL;
+ int __pyx_t_2;
+ long __pyx_t_3;
+ int __pyx_t_4;
+ int __pyx_t_5;
+ int __pyx_t_6;
+ PyObject *__pyx_t_7 = NULL;
+ PyObject *__pyx_t_8 = NULL;
+ PyObject *__pyx_t_9 = NULL;
+ PyObject *__pyx_t_10 = NULL;
+ __Pyx_RefNannySetupContext("_apply_delta");
+ __pyx_v_result = Py_None; __Pyx_INCREF(Py_None);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_groupcompress_pyx.pyx":455
+ * cdef int failed
+ *
+ * data = <unsigned char *>delta # <<<<<<<<<<<<<<
+ * top = data + delta_size
+ *
+ */
+ __pyx_v_data = ((unsigned char *)__pyx_v_delta);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_groupcompress_pyx.pyx":456
+ *
+ * data = <unsigned char *>delta
+ * top = data + delta_size # <<<<<<<<<<<<<<
+ *
+ * # now the result size
+ */
+ __pyx_v_top = (__pyx_v_data + __pyx_v_delta_size);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_groupcompress_pyx.pyx":459
+ *
+ * # now the result size
+ * size = get_delta_hdr_size(&data, top) # <<<<<<<<<<<<<<
+ * result = PyString_FromStringAndSize(NULL, size)
+ * dst_buf = <unsigned char*>PyString_AS_STRING(result)
+ */
+ __pyx_v_size = get_delta_hdr_size((&__pyx_v_data), __pyx_v_top);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_groupcompress_pyx.pyx":460
+ * # now the result size
+ * size = get_delta_hdr_size(&data, top)
+ * result = PyString_FromStringAndSize(NULL, size) # <<<<<<<<<<<<<<
+ * dst_buf = <unsigned char*>PyString_AS_STRING(result)
+ *
+ */
+ __pyx_t_1 = PyString_FromStringAndSize(NULL, __pyx_v_size); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 460; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_1);
+ __Pyx_DECREF(__pyx_v_result);
+ __pyx_v_result = __pyx_t_1;
+ __pyx_t_1 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_groupcompress_pyx.pyx":461
+ * size = get_delta_hdr_size(&data, top)
+ * result = PyString_FromStringAndSize(NULL, size)
+ * dst_buf = <unsigned char*>PyString_AS_STRING(result) # <<<<<<<<<<<<<<
+ *
+ * failed = 0
+ */
+ __pyx_v_dst_buf = ((unsigned char *)PyString_AS_STRING(__pyx_v_result));
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_groupcompress_pyx.pyx":463
+ * dst_buf = <unsigned char*>PyString_AS_STRING(result)
+ *
+ * failed = 0 # <<<<<<<<<<<<<<
+ * with nogil:
+ * out = dst_buf
+ */
+ __pyx_v_failed = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_groupcompress_pyx.pyx":464
+ *
+ * failed = 0
+ * with nogil: # <<<<<<<<<<<<<<
+ * out = dst_buf
+ * while (data < top):
+ */
+ { PyThreadState *_save;
+ Py_UNBLOCK_THREADS
+ /*try:*/ {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_groupcompress_pyx.pyx":465
+ * failed = 0
+ * with nogil:
+ * out = dst_buf # <<<<<<<<<<<<<<
+ * while (data < top):
+ * cmd = data[0]
+ */
+ __pyx_v_out = __pyx_v_dst_buf;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_groupcompress_pyx.pyx":466
+ * with nogil:
+ * out = dst_buf
+ * while (data < top): # <<<<<<<<<<<<<<
+ * cmd = data[0]
+ * data = data + 1
+ */
+ while (1) {
+ __pyx_t_2 = (__pyx_v_data < __pyx_v_top);
+ if (!__pyx_t_2) break;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_groupcompress_pyx.pyx":467
+ * out = dst_buf
+ * while (data < top):
+ * cmd = data[0] # <<<<<<<<<<<<<<
+ * data = data + 1
+ * if (cmd & 0x80):
+ */
+ __pyx_v_cmd = (__pyx_v_data[0]);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_groupcompress_pyx.pyx":468
+ * while (data < top):
+ * cmd = data[0]
+ * data = data + 1 # <<<<<<<<<<<<<<
+ * if (cmd & 0x80):
+ * # Copy instruction
+ */
+ __pyx_v_data = (__pyx_v_data + 1);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_groupcompress_pyx.pyx":469
+ * cmd = data[0]
+ * data = data + 1
+ * if (cmd & 0x80): # <<<<<<<<<<<<<<
+ * # Copy instruction
+ * data = _decode_copy_instruction(data, cmd, &cp_off, &cp_size)
+ */
+ __pyx_t_3 = (__pyx_v_cmd & 0x80);
+ if (__pyx_t_3) {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_groupcompress_pyx.pyx":471
+ * if (cmd & 0x80):
+ * # Copy instruction
+ * data = _decode_copy_instruction(data, cmd, &cp_off, &cp_size) # <<<<<<<<<<<<<<
+ * if (cp_off + cp_size < cp_size or
+ * cp_off + cp_size > <unsigned int>source_size or
+ */
+ __pyx_v_data = __pyx_f_6bzrlib_18_groupcompress_pyx__decode_copy_instruction(__pyx_v_data, __pyx_v_cmd, (&__pyx_v_cp_off), (&__pyx_v_cp_size));
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_groupcompress_pyx.pyx":472
+ * # Copy instruction
+ * data = _decode_copy_instruction(data, cmd, &cp_off, &cp_size)
+ * if (cp_off + cp_size < cp_size or # <<<<<<<<<<<<<<
+ * cp_off + cp_size > <unsigned int>source_size or
+ * cp_size > <unsigned int>size):
+ */
+ __pyx_t_2 = ((__pyx_v_cp_off + __pyx_v_cp_size) < __pyx_v_cp_size);
+ if (!__pyx_t_2) {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_groupcompress_pyx.pyx":473
+ * data = _decode_copy_instruction(data, cmd, &cp_off, &cp_size)
+ * if (cp_off + cp_size < cp_size or
+ * cp_off + cp_size > <unsigned int>source_size or # <<<<<<<<<<<<<<
+ * cp_size > <unsigned int>size):
+ * failed = 1
+ */
+ __pyx_t_4 = ((__pyx_v_cp_off + __pyx_v_cp_size) > ((unsigned int)__pyx_v_source_size));
+ if (!__pyx_t_4) {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_groupcompress_pyx.pyx":474
+ * if (cp_off + cp_size < cp_size or
+ * cp_off + cp_size > <unsigned int>source_size or
+ * cp_size > <unsigned int>size): # <<<<<<<<<<<<<<
+ * failed = 1
+ * break
+ */
+ __pyx_t_5 = (__pyx_v_cp_size > ((unsigned int)__pyx_v_size));
+ __pyx_t_6 = __pyx_t_5;
+ } else {
+ __pyx_t_6 = __pyx_t_4;
+ }
+ __pyx_t_4 = __pyx_t_6;
+ } else {
+ __pyx_t_4 = __pyx_t_2;
+ }
+ if (__pyx_t_4) {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_groupcompress_pyx.pyx":475
+ * cp_off + cp_size > <unsigned int>source_size or
+ * cp_size > <unsigned int>size):
+ * failed = 1 # <<<<<<<<<<<<<<
+ * break
+ * memcpy(out, source + cp_off, cp_size)
+ */
+ __pyx_v_failed = 1;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_groupcompress_pyx.pyx":476
+ * cp_size > <unsigned int>size):
+ * failed = 1
+ * break # <<<<<<<<<<<<<<
+ * memcpy(out, source + cp_off, cp_size)
+ * out = out + cp_size
+ */
+ goto __pyx_L7_break;
+ goto __pyx_L9;
+ }
+ __pyx_L9:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_groupcompress_pyx.pyx":477
+ * failed = 1
+ * break
+ * memcpy(out, source + cp_off, cp_size) # <<<<<<<<<<<<<<
+ * out = out + cp_size
+ * size = size - cp_size
+ */
+ memcpy(__pyx_v_out, (__pyx_v_source + __pyx_v_cp_off), __pyx_v_cp_size);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_groupcompress_pyx.pyx":478
+ * break
+ * memcpy(out, source + cp_off, cp_size)
+ * out = out + cp_size # <<<<<<<<<<<<<<
+ * size = size - cp_size
+ * else:
+ */
+ __pyx_v_out = (__pyx_v_out + __pyx_v_cp_size);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_groupcompress_pyx.pyx":479
+ * memcpy(out, source + cp_off, cp_size)
+ * out = out + cp_size
+ * size = size - cp_size # <<<<<<<<<<<<<<
+ * else:
+ * # Insert instruction
+ */
+ __pyx_v_size = (__pyx_v_size - __pyx_v_cp_size);
+ goto __pyx_L8;
+ }
+ /*else*/ {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_groupcompress_pyx.pyx":482
+ * else:
+ * # Insert instruction
+ * if cmd == 0: # <<<<<<<<<<<<<<
+ * # cmd == 0 is reserved for future encoding
+ * # extensions. In the mean time we must fail when
+ */
+ __pyx_t_4 = (__pyx_v_cmd == 0);
+ if (__pyx_t_4) {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_groupcompress_pyx.pyx":486
+ * # extensions. In the mean time we must fail when
+ * # encountering them (might be data corruption).
+ * failed = 2 # <<<<<<<<<<<<<<
+ * break
+ * if cmd > size:
+ */
+ __pyx_v_failed = 2;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_groupcompress_pyx.pyx":487
+ * # encountering them (might be data corruption).
+ * failed = 2
+ * break # <<<<<<<<<<<<<<
+ * if cmd > size:
+ * failed = 3
+ */
+ goto __pyx_L7_break;
+ goto __pyx_L10;
+ }
+ __pyx_L10:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_groupcompress_pyx.pyx":488
+ * failed = 2
+ * break
+ * if cmd > size: # <<<<<<<<<<<<<<
+ * failed = 3
+ * break
+ */
+ __pyx_t_4 = (__pyx_v_cmd > __pyx_v_size);
+ if (__pyx_t_4) {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_groupcompress_pyx.pyx":489
+ * break
+ * if cmd > size:
+ * failed = 3 # <<<<<<<<<<<<<<
+ * break
+ * memcpy(out, data, cmd)
+ */
+ __pyx_v_failed = 3;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_groupcompress_pyx.pyx":490
+ * if cmd > size:
+ * failed = 3
+ * break # <<<<<<<<<<<<<<
+ * memcpy(out, data, cmd)
+ * out = out + cmd
+ */
+ goto __pyx_L7_break;
+ goto __pyx_L11;
+ }
+ __pyx_L11:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_groupcompress_pyx.pyx":491
+ * failed = 3
+ * break
+ * memcpy(out, data, cmd) # <<<<<<<<<<<<<<
+ * out = out + cmd
+ * data = data + cmd
+ */
+ memcpy(__pyx_v_out, __pyx_v_data, __pyx_v_cmd);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_groupcompress_pyx.pyx":492
+ * break
+ * memcpy(out, data, cmd)
+ * out = out + cmd # <<<<<<<<<<<<<<
+ * data = data + cmd
+ * size = size - cmd
+ */
+ __pyx_v_out = (__pyx_v_out + __pyx_v_cmd);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_groupcompress_pyx.pyx":493
+ * memcpy(out, data, cmd)
+ * out = out + cmd
+ * data = data + cmd # <<<<<<<<<<<<<<
+ * size = size - cmd
+ * if failed:
+ */
+ __pyx_v_data = (__pyx_v_data + __pyx_v_cmd);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_groupcompress_pyx.pyx":494
+ * out = out + cmd
+ * data = data + cmd
+ * size = size - cmd # <<<<<<<<<<<<<<
+ * if failed:
+ * if failed == 1:
+ */
+ __pyx_v_size = (__pyx_v_size - __pyx_v_cmd);
+ }
+ __pyx_L8:;
+ }
+ __pyx_L7_break:;
+ }
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_groupcompress_pyx.pyx":464
+ *
+ * failed = 0
+ * with nogil: # <<<<<<<<<<<<<<
+ * out = dst_buf
+ * while (data < top):
+ */
+ /*finally:*/ {
+ Py_BLOCK_THREADS
+ }
+ }
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_groupcompress_pyx.pyx":495
+ * data = data + cmd
+ * size = size - cmd
+ * if failed: # <<<<<<<<<<<<<<
+ * if failed == 1:
+ * raise ValueError('Something wrong with:'
+ */
+ if (__pyx_v_failed) {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_groupcompress_pyx.pyx":496
+ * size = size - cmd
+ * if failed:
+ * if failed == 1: # <<<<<<<<<<<<<<
+ * raise ValueError('Something wrong with:'
+ * ' cp_off = %s, cp_size = %s'
+ */
+ switch (__pyx_v_failed) {
+ case 1:
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_groupcompress_pyx.pyx":500
+ * ' cp_off = %s, cp_size = %s'
+ * ' source_size = %s, size = %s'
+ * % (cp_off, cp_size, source_size, size)) # <<<<<<<<<<<<<<
+ * elif failed == 2:
+ * raise ValueError('Got delta opcode: 0, not supported')
+ */
+ __pyx_t_1 = PyLong_FromUnsignedLong(__pyx_v_cp_off); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 500; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_1);
+ __pyx_t_7 = PyLong_FromUnsignedLong(__pyx_v_cp_size); if (unlikely(!__pyx_t_7)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 500; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_7);
+ __pyx_t_8 = PyInt_FromSsize_t(__pyx_v_source_size); if (unlikely(!__pyx_t_8)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 500; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_8);
+ __pyx_t_9 = PyInt_FromSsize_t(__pyx_v_size); if (unlikely(!__pyx_t_9)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 500; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_9);
+ __pyx_t_10 = PyTuple_New(4); if (unlikely(!__pyx_t_10)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 500; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_10);
+ PyTuple_SET_ITEM(__pyx_t_10, 0, __pyx_t_1);
+ __Pyx_GIVEREF(__pyx_t_1);
+ PyTuple_SET_ITEM(__pyx_t_10, 1, __pyx_t_7);
+ __Pyx_GIVEREF(__pyx_t_7);
+ PyTuple_SET_ITEM(__pyx_t_10, 2, __pyx_t_8);
+ __Pyx_GIVEREF(__pyx_t_8);
+ PyTuple_SET_ITEM(__pyx_t_10, 3, __pyx_t_9);
+ __Pyx_GIVEREF(__pyx_t_9);
+ __pyx_t_1 = 0;
+ __pyx_t_7 = 0;
+ __pyx_t_8 = 0;
+ __pyx_t_9 = 0;
+ __pyx_t_9 = PyNumber_Remainder(((PyObject *)__pyx_kp_s_19), __pyx_t_10); if (unlikely(!__pyx_t_9)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 500; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(((PyObject *)__pyx_t_9));
+ __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;
+ __pyx_t_10 = PyTuple_New(1); if (unlikely(!__pyx_t_10)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 497; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_10);
+ PyTuple_SET_ITEM(__pyx_t_10, 0, ((PyObject *)__pyx_t_9));
+ __Pyx_GIVEREF(((PyObject *)__pyx_t_9));
+ __pyx_t_9 = 0;
+ __pyx_t_9 = PyObject_Call(__pyx_builtin_ValueError, __pyx_t_10, NULL); if (unlikely(!__pyx_t_9)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 497; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_9);
+ __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;
+ __Pyx_Raise(__pyx_t_9, 0, 0);
+ __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 497; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ break;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_groupcompress_pyx.pyx":501
+ * ' source_size = %s, size = %s'
+ * % (cp_off, cp_size, source_size, size))
+ * elif failed == 2: # <<<<<<<<<<<<<<
+ * raise ValueError('Got delta opcode: 0, not supported')
+ * elif failed == 3:
+ */
+ case 2:
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_groupcompress_pyx.pyx":502
+ * % (cp_off, cp_size, source_size, size))
+ * elif failed == 2:
+ * raise ValueError('Got delta opcode: 0, not supported') # <<<<<<<<<<<<<<
+ * elif failed == 3:
+ * raise ValueError('Insert instruction longer than remaining'
+ */
+ __pyx_t_9 = PyTuple_New(1); if (unlikely(!__pyx_t_9)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 502; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_9);
+ __Pyx_INCREF(((PyObject *)__pyx_kp_s_20));
+ PyTuple_SET_ITEM(__pyx_t_9, 0, ((PyObject *)__pyx_kp_s_20));
+ __Pyx_GIVEREF(((PyObject *)__pyx_kp_s_20));
+ __pyx_t_10 = PyObject_Call(__pyx_builtin_ValueError, __pyx_t_9, NULL); if (unlikely(!__pyx_t_10)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 502; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_10);
+ __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
+ __Pyx_Raise(__pyx_t_10, 0, 0);
+ __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 502; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ break;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_groupcompress_pyx.pyx":503
+ * elif failed == 2:
+ * raise ValueError('Got delta opcode: 0, not supported')
+ * elif failed == 3: # <<<<<<<<<<<<<<
+ * raise ValueError('Insert instruction longer than remaining'
+ * ' bytes: %d > %d' % (cmd, size))
+ */
+ case 3:
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_groupcompress_pyx.pyx":505
+ * elif failed == 3:
+ * raise ValueError('Insert instruction longer than remaining'
+ * ' bytes: %d > %d' % (cmd, size)) # <<<<<<<<<<<<<<
+ *
+ * # sanity check
+ */
+ __pyx_t_10 = PyInt_FromLong(__pyx_v_cmd); if (unlikely(!__pyx_t_10)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 505; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_10);
+ __pyx_t_9 = PyInt_FromSsize_t(__pyx_v_size); if (unlikely(!__pyx_t_9)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 505; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_9);
+ __pyx_t_8 = PyTuple_New(2); if (unlikely(!__pyx_t_8)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 505; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_8);
+ PyTuple_SET_ITEM(__pyx_t_8, 0, __pyx_t_10);
+ __Pyx_GIVEREF(__pyx_t_10);
+ PyTuple_SET_ITEM(__pyx_t_8, 1, __pyx_t_9);
+ __Pyx_GIVEREF(__pyx_t_9);
+ __pyx_t_10 = 0;
+ __pyx_t_9 = 0;
+ __pyx_t_9 = PyNumber_Remainder(((PyObject *)__pyx_kp_s_21), __pyx_t_8); if (unlikely(!__pyx_t_9)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 505; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(((PyObject *)__pyx_t_9));
+ __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0;
+ __pyx_t_8 = PyTuple_New(1); if (unlikely(!__pyx_t_8)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 504; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_8);
+ PyTuple_SET_ITEM(__pyx_t_8, 0, ((PyObject *)__pyx_t_9));
+ __Pyx_GIVEREF(((PyObject *)__pyx_t_9));
+ __pyx_t_9 = 0;
+ __pyx_t_9 = PyObject_Call(__pyx_builtin_ValueError, __pyx_t_8, NULL); if (unlikely(!__pyx_t_9)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 504; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_9);
+ __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0;
+ __Pyx_Raise(__pyx_t_9, 0, 0);
+ __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 504; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ break;
+ }
+ goto __pyx_L12;
+ }
+ __pyx_L12:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_groupcompress_pyx.pyx":508
+ *
+ * # sanity check
+ * if (data != top or size != 0): # <<<<<<<<<<<<<<
+ * raise RuntimeError('Did not extract the number of bytes we expected'
+ * ' we were left with %d bytes in "size", and top - data = %d'
+ */
+ __pyx_t_4 = (__pyx_v_data != __pyx_v_top);
+ if (!__pyx_t_4) {
+ __pyx_t_2 = (__pyx_v_size != 0);
+ __pyx_t_6 = __pyx_t_2;
+ } else {
+ __pyx_t_6 = __pyx_t_4;
+ }
+ if (__pyx_t_6) {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_groupcompress_pyx.pyx":511
+ * raise RuntimeError('Did not extract the number of bytes we expected'
+ * ' we were left with %d bytes in "size", and top - data = %d'
+ * % (size, <int>(top - data))) # <<<<<<<<<<<<<<
+ * return None
+ *
+ */
+ __pyx_t_9 = PyInt_FromSsize_t(__pyx_v_size); if (unlikely(!__pyx_t_9)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 511; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_9);
+ __pyx_t_8 = PyInt_FromLong((__pyx_v_top - __pyx_v_data)); if (unlikely(!__pyx_t_8)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 511; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_8);
+ __pyx_t_10 = PyTuple_New(2); if (unlikely(!__pyx_t_10)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 511; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_10);
+ PyTuple_SET_ITEM(__pyx_t_10, 0, __pyx_t_9);
+ __Pyx_GIVEREF(__pyx_t_9);
+ PyTuple_SET_ITEM(__pyx_t_10, 1, __pyx_t_8);
+ __Pyx_GIVEREF(__pyx_t_8);
+ __pyx_t_9 = 0;
+ __pyx_t_8 = 0;
+ __pyx_t_8 = PyNumber_Remainder(((PyObject *)__pyx_kp_s_22), __pyx_t_10); if (unlikely(!__pyx_t_8)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 511; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(((PyObject *)__pyx_t_8));
+ __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;
+ __pyx_t_10 = PyTuple_New(1); if (unlikely(!__pyx_t_10)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 509; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_10);
+ PyTuple_SET_ITEM(__pyx_t_10, 0, ((PyObject *)__pyx_t_8));
+ __Pyx_GIVEREF(((PyObject *)__pyx_t_8));
+ __pyx_t_8 = 0;
+ __pyx_t_8 = PyObject_Call(__pyx_builtin_RuntimeError, __pyx_t_10, NULL); if (unlikely(!__pyx_t_8)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 509; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_8);
+ __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;
+ __Pyx_Raise(__pyx_t_8, 0, 0);
+ __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0;
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 509; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_groupcompress_pyx.pyx":512
+ * ' we were left with %d bytes in "size", and top - data = %d'
+ * % (size, <int>(top - data)))
+ * return None # <<<<<<<<<<<<<<
+ *
+ * # *dst_size = out - dst_buf;
+ */
+ __Pyx_XDECREF(__pyx_r);
+ __Pyx_INCREF(Py_None);
+ __pyx_r = Py_None;
+ goto __pyx_L0;
+ goto __pyx_L13;
+ }
+ __pyx_L13:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_groupcompress_pyx.pyx":515
+ *
+ * # *dst_size = out - dst_buf;
+ * if (out - dst_buf) != PyString_GET_SIZE(result): # <<<<<<<<<<<<<<
+ * raise RuntimeError('Number of bytes extracted did not match the'
+ * ' size encoded in the delta header.')
+ */
+ __pyx_t_6 = ((__pyx_v_out - __pyx_v_dst_buf) != PyString_GET_SIZE(__pyx_v_result));
+ if (__pyx_t_6) {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_groupcompress_pyx.pyx":516
+ * # *dst_size = out - dst_buf;
+ * if (out - dst_buf) != PyString_GET_SIZE(result):
+ * raise RuntimeError('Number of bytes extracted did not match the' # <<<<<<<<<<<<<<
+ * ' size encoded in the delta header.')
+ * return result
+ */
+ __pyx_t_8 = PyTuple_New(1); if (unlikely(!__pyx_t_8)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 516; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_8);
+ __Pyx_INCREF(((PyObject *)__pyx_kp_s_23));
+ PyTuple_SET_ITEM(__pyx_t_8, 0, ((PyObject *)__pyx_kp_s_23));
+ __Pyx_GIVEREF(((PyObject *)__pyx_kp_s_23));
+ __pyx_t_10 = PyObject_Call(__pyx_builtin_RuntimeError, __pyx_t_8, NULL); if (unlikely(!__pyx_t_10)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 516; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_10);
+ __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0;
+ __Pyx_Raise(__pyx_t_10, 0, 0);
+ __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 516; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ goto __pyx_L14;
+ }
+ __pyx_L14:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_groupcompress_pyx.pyx":518
+ * raise RuntimeError('Number of bytes extracted did not match the'
+ * ' size encoded in the delta header.')
+ * return result # <<<<<<<<<<<<<<
+ *
+ *
+ */
+ __Pyx_XDECREF(__pyx_r);
+ __Pyx_INCREF(__pyx_v_result);
+ __pyx_r = __pyx_v_result;
+ goto __pyx_L0;
+
+ __pyx_r = Py_None; __Pyx_INCREF(Py_None);
+ goto __pyx_L0;
+ __pyx_L1_error:;
+ __Pyx_XDECREF(__pyx_t_1);
+ __Pyx_XDECREF(__pyx_t_7);
+ __Pyx_XDECREF(__pyx_t_8);
+ __Pyx_XDECREF(__pyx_t_9);
+ __Pyx_XDECREF(__pyx_t_10);
+ __Pyx_AddTraceback("bzrlib._groupcompress_pyx._apply_delta");
+ __pyx_r = 0;
+ __pyx_L0:;
+ __Pyx_DECREF(__pyx_v_result);
+ __Pyx_XGIVEREF(__pyx_r);
+ __Pyx_RefNannyFinishContext();
+ return __pyx_r;
+}
+
+/* "/home/vila/src/bzr/integration/trunk/bzrlib/_groupcompress_pyx.pyx":521
+ *
+ *
+ * def apply_delta_to_source(source, delta_start, delta_end): # <<<<<<<<<<<<<<
+ * """Extract a delta from source bytes, and apply it."""
+ * cdef char *c_source
+ */
+
+static PyObject *__pyx_pf_6bzrlib_18_groupcompress_pyx_apply_delta_to_source(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
+static char __pyx_doc_6bzrlib_18_groupcompress_pyx_apply_delta_to_source[] = "Extract a delta from source bytes, and apply it.";
+static PyObject *__pyx_pf_6bzrlib_18_groupcompress_pyx_apply_delta_to_source(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
+ PyObject *__pyx_v_source = 0;
+ PyObject *__pyx_v_delta_start = 0;
+ PyObject *__pyx_v_delta_end = 0;
+ char *__pyx_v_c_source;
+ Py_ssize_t __pyx_v_c_source_size;
+ char *__pyx_v_c_delta;
+ Py_ssize_t __pyx_v_c_delta_size;
+ Py_ssize_t __pyx_v_c_delta_start;
+ Py_ssize_t __pyx_v_c_delta_end;
+ PyObject *__pyx_r = NULL;
+ int __pyx_t_1;
+ PyObject *__pyx_t_2 = NULL;
+ PyObject *__pyx_t_3 = NULL;
+ Py_ssize_t __pyx_t_4;
+ static PyObject **__pyx_pyargnames[] = {&__pyx_n_s__source,&__pyx_n_s__delta_start,&__pyx_n_s__delta_end,0};
+ __Pyx_RefNannySetupContext("apply_delta_to_source");
+ __pyx_self = __pyx_self;
+ if (unlikely(__pyx_kwds)) {
+ Py_ssize_t kw_args = PyDict_Size(__pyx_kwds);
+ PyObject* values[3] = {0,0,0};
+ switch (PyTuple_GET_SIZE(__pyx_args)) {
+ case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2);
+ case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
+ case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
+ case 0: break;
+ default: goto __pyx_L5_argtuple_error;
+ }
+ switch (PyTuple_GET_SIZE(__pyx_args)) {
+ case 0:
+ values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__source);
+ if (likely(values[0])) kw_args--;
+ else goto __pyx_L5_argtuple_error;
+ case 1:
+ values[1] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__delta_start);
+ if (likely(values[1])) kw_args--;
+ else {
+ __Pyx_RaiseArgtupleInvalid("apply_delta_to_source", 1, 3, 3, 1); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 521; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
+ }
+ case 2:
+ values[2] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__delta_end);
+ if (likely(values[2])) kw_args--;
+ else {
+ __Pyx_RaiseArgtupleInvalid("apply_delta_to_source", 1, 3, 3, 2); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 521; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
+ }
+ }
+ if (unlikely(kw_args > 0)) {
+ if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, PyTuple_GET_SIZE(__pyx_args), "apply_delta_to_source") < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 521; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
+ }
+ __pyx_v_source = values[0];
+ __pyx_v_delta_start = values[1];
+ __pyx_v_delta_end = values[2];
+ } else if (PyTuple_GET_SIZE(__pyx_args) != 3) {
+ goto __pyx_L5_argtuple_error;
+ } else {
+ __pyx_v_source = PyTuple_GET_ITEM(__pyx_args, 0);
+ __pyx_v_delta_start = PyTuple_GET_ITEM(__pyx_args, 1);
+ __pyx_v_delta_end = PyTuple_GET_ITEM(__pyx_args, 2);
+ }
+ goto __pyx_L4_argument_unpacking_done;
+ __pyx_L5_argtuple_error:;
+ __Pyx_RaiseArgtupleInvalid("apply_delta_to_source", 1, 3, 3, PyTuple_GET_SIZE(__pyx_args)); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 521; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
+ __pyx_L3_error:;
+ __Pyx_AddTraceback("bzrlib._groupcompress_pyx.apply_delta_to_source");
+ __Pyx_RefNannyFinishContext();
+ return NULL;
+ __pyx_L4_argument_unpacking_done:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_groupcompress_pyx.pyx":529
+ * cdef Py_ssize_t c_delta_start, c_delta_end
+ *
+ * if not PyString_CheckExact(source): # <<<<<<<<<<<<<<
+ * raise TypeError('source is not a str')
+ * c_source_size = PyString_GET_SIZE(source)
+ */
+ __pyx_t_1 = (!PyString_CheckExact(__pyx_v_source));
+ if (__pyx_t_1) {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_groupcompress_pyx.pyx":530
+ *
+ * if not PyString_CheckExact(source):
+ * raise TypeError('source is not a str') # <<<<<<<<<<<<<<
+ * c_source_size = PyString_GET_SIZE(source)
+ * c_delta_start = delta_start
+ */
+ __pyx_t_2 = PyTuple_New(1); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 530; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_2);
+ __Pyx_INCREF(((PyObject *)__pyx_kp_s_13));
+ PyTuple_SET_ITEM(__pyx_t_2, 0, ((PyObject *)__pyx_kp_s_13));
+ __Pyx_GIVEREF(((PyObject *)__pyx_kp_s_13));
+ __pyx_t_3 = PyObject_Call(__pyx_builtin_TypeError, __pyx_t_2, NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 530; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_3);
+ __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+ __Pyx_Raise(__pyx_t_3, 0, 0);
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 530; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ goto __pyx_L6;
+ }
+ __pyx_L6:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_groupcompress_pyx.pyx":531
+ * if not PyString_CheckExact(source):
+ * raise TypeError('source is not a str')
+ * c_source_size = PyString_GET_SIZE(source) # <<<<<<<<<<<<<<
+ * c_delta_start = delta_start
+ * c_delta_end = delta_end
+ */
+ __pyx_v_c_source_size = PyString_GET_SIZE(__pyx_v_source);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_groupcompress_pyx.pyx":532
+ * raise TypeError('source is not a str')
+ * c_source_size = PyString_GET_SIZE(source)
+ * c_delta_start = delta_start # <<<<<<<<<<<<<<
+ * c_delta_end = delta_end
+ * if c_delta_start >= c_source_size:
+ */
+ __pyx_t_4 = __Pyx_PyIndex_AsSsize_t(__pyx_v_delta_start); if (unlikely((__pyx_t_4 == (Py_ssize_t)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 532; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_v_c_delta_start = __pyx_t_4;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_groupcompress_pyx.pyx":533
+ * c_source_size = PyString_GET_SIZE(source)
+ * c_delta_start = delta_start
+ * c_delta_end = delta_end # <<<<<<<<<<<<<<
+ * if c_delta_start >= c_source_size:
+ * raise ValueError('delta starts after source')
+ */
+ __pyx_t_4 = __Pyx_PyIndex_AsSsize_t(__pyx_v_delta_end); if (unlikely((__pyx_t_4 == (Py_ssize_t)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 533; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_v_c_delta_end = __pyx_t_4;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_groupcompress_pyx.pyx":534
+ * c_delta_start = delta_start
+ * c_delta_end = delta_end
+ * if c_delta_start >= c_source_size: # <<<<<<<<<<<<<<
+ * raise ValueError('delta starts after source')
+ * if c_delta_end > c_source_size:
+ */
+ __pyx_t_1 = (__pyx_v_c_delta_start >= __pyx_v_c_source_size);
+ if (__pyx_t_1) {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_groupcompress_pyx.pyx":535
+ * c_delta_end = delta_end
+ * if c_delta_start >= c_source_size:
+ * raise ValueError('delta starts after source') # <<<<<<<<<<<<<<
+ * if c_delta_end > c_source_size:
+ * raise ValueError('delta ends after source')
+ */
+ __pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 535; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_3);
+ __Pyx_INCREF(((PyObject *)__pyx_kp_s_24));
+ PyTuple_SET_ITEM(__pyx_t_3, 0, ((PyObject *)__pyx_kp_s_24));
+ __Pyx_GIVEREF(((PyObject *)__pyx_kp_s_24));
+ __pyx_t_2 = PyObject_Call(__pyx_builtin_ValueError, __pyx_t_3, NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 535; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_2);
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+ __Pyx_Raise(__pyx_t_2, 0, 0);
+ __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 535; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ goto __pyx_L7;
+ }
+ __pyx_L7:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_groupcompress_pyx.pyx":536
+ * if c_delta_start >= c_source_size:
+ * raise ValueError('delta starts after source')
+ * if c_delta_end > c_source_size: # <<<<<<<<<<<<<<
+ * raise ValueError('delta ends after source')
+ * if c_delta_start >= c_delta_end:
+ */
+ __pyx_t_1 = (__pyx_v_c_delta_end > __pyx_v_c_source_size);
+ if (__pyx_t_1) {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_groupcompress_pyx.pyx":537
+ * raise ValueError('delta starts after source')
+ * if c_delta_end > c_source_size:
+ * raise ValueError('delta ends after source') # <<<<<<<<<<<<<<
+ * if c_delta_start >= c_delta_end:
+ * raise ValueError('delta starts after it ends')
+ */
+ __pyx_t_2 = PyTuple_New(1); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 537; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_2);
+ __Pyx_INCREF(((PyObject *)__pyx_kp_s_25));
+ PyTuple_SET_ITEM(__pyx_t_2, 0, ((PyObject *)__pyx_kp_s_25));
+ __Pyx_GIVEREF(((PyObject *)__pyx_kp_s_25));
+ __pyx_t_3 = PyObject_Call(__pyx_builtin_ValueError, __pyx_t_2, NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 537; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_3);
+ __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+ __Pyx_Raise(__pyx_t_3, 0, 0);
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 537; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ goto __pyx_L8;
+ }
+ __pyx_L8:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_groupcompress_pyx.pyx":538
+ * if c_delta_end > c_source_size:
+ * raise ValueError('delta ends after source')
+ * if c_delta_start >= c_delta_end: # <<<<<<<<<<<<<<
+ * raise ValueError('delta starts after it ends')
+ *
+ */
+ __pyx_t_1 = (__pyx_v_c_delta_start >= __pyx_v_c_delta_end);
+ if (__pyx_t_1) {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_groupcompress_pyx.pyx":539
+ * raise ValueError('delta ends after source')
+ * if c_delta_start >= c_delta_end:
+ * raise ValueError('delta starts after it ends') # <<<<<<<<<<<<<<
+ *
+ * c_delta_size = c_delta_end - c_delta_start
+ */
+ __pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 539; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_3);
+ __Pyx_INCREF(((PyObject *)__pyx_kp_s_26));
+ PyTuple_SET_ITEM(__pyx_t_3, 0, ((PyObject *)__pyx_kp_s_26));
+ __Pyx_GIVEREF(((PyObject *)__pyx_kp_s_26));
+ __pyx_t_2 = PyObject_Call(__pyx_builtin_ValueError, __pyx_t_3, NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 539; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_2);
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+ __Pyx_Raise(__pyx_t_2, 0, 0);
+ __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 539; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ goto __pyx_L9;
+ }
+ __pyx_L9:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_groupcompress_pyx.pyx":541
+ * raise ValueError('delta starts after it ends')
+ *
+ * c_delta_size = c_delta_end - c_delta_start # <<<<<<<<<<<<<<
+ * c_source = PyString_AS_STRING(source)
+ * c_delta = c_source + c_delta_start
+ */
+ __pyx_v_c_delta_size = (__pyx_v_c_delta_end - __pyx_v_c_delta_start);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_groupcompress_pyx.pyx":542
+ *
+ * c_delta_size = c_delta_end - c_delta_start
+ * c_source = PyString_AS_STRING(source) # <<<<<<<<<<<<<<
+ * c_delta = c_source + c_delta_start
+ * # We don't use source_size, because we know the delta should not refer to
+ */
+ __pyx_v_c_source = PyString_AS_STRING(__pyx_v_source);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_groupcompress_pyx.pyx":543
+ * c_delta_size = c_delta_end - c_delta_start
+ * c_source = PyString_AS_STRING(source)
+ * c_delta = c_source + c_delta_start # <<<<<<<<<<<<<<
+ * # We don't use source_size, because we know the delta should not refer to
+ * # any bytes after it starts
+ */
+ __pyx_v_c_delta = (__pyx_v_c_source + __pyx_v_c_delta_start);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_groupcompress_pyx.pyx":546
+ * # We don't use source_size, because we know the delta should not refer to
+ * # any bytes after it starts
+ * return _apply_delta(c_source, c_delta_start, c_delta, c_delta_size) # <<<<<<<<<<<<<<
+ *
+ *
+ */
+ __Pyx_XDECREF(__pyx_r);
+ __pyx_t_2 = __pyx_f_6bzrlib_18_groupcompress_pyx__apply_delta(__pyx_v_c_source, __pyx_v_c_delta_start, __pyx_v_c_delta, __pyx_v_c_delta_size); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 546; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_2);
+ __pyx_r = __pyx_t_2;
+ __pyx_t_2 = 0;
+ goto __pyx_L0;
+
+ __pyx_r = Py_None; __Pyx_INCREF(Py_None);
+ goto __pyx_L0;
+ __pyx_L1_error:;
+ __Pyx_XDECREF(__pyx_t_2);
+ __Pyx_XDECREF(__pyx_t_3);
+ __Pyx_AddTraceback("bzrlib._groupcompress_pyx.apply_delta_to_source");
+ __pyx_r = NULL;
+ __pyx_L0:;
+ __Pyx_XGIVEREF(__pyx_r);
+ __Pyx_RefNannyFinishContext();
+ return __pyx_r;
+}
+
+/* "/home/vila/src/bzr/integration/trunk/bzrlib/_groupcompress_pyx.pyx":549
+ *
+ *
+ * def encode_base128_int(val): # <<<<<<<<<<<<<<
+ * """Convert an integer into a 7-bit lsb encoding."""
+ * cdef unsigned int c_val
+ */
+
+static PyObject *__pyx_pf_6bzrlib_18_groupcompress_pyx_encode_base128_int(PyObject *__pyx_self, PyObject *__pyx_v_val); /*proto*/
+static char __pyx_doc_6bzrlib_18_groupcompress_pyx_encode_base128_int[] = "Convert an integer into a 7-bit lsb encoding.";
+static PyObject *__pyx_pf_6bzrlib_18_groupcompress_pyx_encode_base128_int(PyObject *__pyx_self, PyObject *__pyx_v_val) {
+ unsigned int __pyx_v_c_val;
+ Py_ssize_t __pyx_v_count;
+ unsigned char __pyx_v_c_bytes[8];
+ PyObject *__pyx_r = NULL;
+ unsigned int __pyx_t_1;
+ int __pyx_t_2;
+ int __pyx_t_3;
+ int __pyx_t_4;
+ PyObject *__pyx_t_5 = NULL;
+ PyObject *__pyx_t_6 = NULL;
+ __Pyx_RefNannySetupContext("encode_base128_int");
+ __pyx_self = __pyx_self;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_groupcompress_pyx.pyx":556
+ * cdef unsigned char c_bytes[8] # max size for 32-bit int is 5 bytes
+ *
+ * c_val = val # <<<<<<<<<<<<<<
+ * count = 0
+ * while c_val >= 0x80 and count < 8:
+ */
+ __pyx_t_1 = __Pyx_PyInt_AsUnsignedInt(__pyx_v_val); if (unlikely((__pyx_t_1 == (unsigned int)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 556; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_v_c_val = __pyx_t_1;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_groupcompress_pyx.pyx":557
+ *
+ * c_val = val
+ * count = 0 # <<<<<<<<<<<<<<
+ * while c_val >= 0x80 and count < 8:
+ * c_bytes[count] = <unsigned char>((c_val | 0x80) & 0xFF)
+ */
+ __pyx_v_count = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_groupcompress_pyx.pyx":558
+ * c_val = val
+ * count = 0
+ * while c_val >= 0x80 and count < 8: # <<<<<<<<<<<<<<
+ * c_bytes[count] = <unsigned char>((c_val | 0x80) & 0xFF)
+ * c_val = c_val >> 7
+ */
+ while (1) {
+ __pyx_t_2 = (__pyx_v_c_val >= 0x80);
+ if (__pyx_t_2) {
+ __pyx_t_3 = (__pyx_v_count < 8);
+ __pyx_t_4 = __pyx_t_3;
+ } else {
+ __pyx_t_4 = __pyx_t_2;
+ }
+ if (!__pyx_t_4) break;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_groupcompress_pyx.pyx":559
+ * count = 0
+ * while c_val >= 0x80 and count < 8:
+ * c_bytes[count] = <unsigned char>((c_val | 0x80) & 0xFF) # <<<<<<<<<<<<<<
+ * c_val = c_val >> 7
+ * count = count + 1
+ */
+ (__pyx_v_c_bytes[__pyx_v_count]) = ((unsigned char)((__pyx_v_c_val | 0x80) & 0xFF));
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_groupcompress_pyx.pyx":560
+ * while c_val >= 0x80 and count < 8:
+ * c_bytes[count] = <unsigned char>((c_val | 0x80) & 0xFF)
+ * c_val = c_val >> 7 # <<<<<<<<<<<<<<
+ * count = count + 1
+ * if count >= 8 or c_val >= 0x80:
+ */
+ __pyx_v_c_val = (__pyx_v_c_val >> 7);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_groupcompress_pyx.pyx":561
+ * c_bytes[count] = <unsigned char>((c_val | 0x80) & 0xFF)
+ * c_val = c_val >> 7
+ * count = count + 1 # <<<<<<<<<<<<<<
+ * if count >= 8 or c_val >= 0x80:
+ * raise ValueError('encode_base128_int overflowed the buffer')
+ */
+ __pyx_v_count = (__pyx_v_count + 1);
+ }
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_groupcompress_pyx.pyx":562
+ * c_val = c_val >> 7
+ * count = count + 1
+ * if count >= 8 or c_val >= 0x80: # <<<<<<<<<<<<<<
+ * raise ValueError('encode_base128_int overflowed the buffer')
+ * c_bytes[count] = <unsigned char>(c_val & 0xFF)
+ */
+ __pyx_t_4 = (__pyx_v_count >= 8);
+ if (!__pyx_t_4) {
+ __pyx_t_2 = (__pyx_v_c_val >= 0x80);
+ __pyx_t_3 = __pyx_t_2;
+ } else {
+ __pyx_t_3 = __pyx_t_4;
+ }
+ if (__pyx_t_3) {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_groupcompress_pyx.pyx":563
+ * count = count + 1
+ * if count >= 8 or c_val >= 0x80:
+ * raise ValueError('encode_base128_int overflowed the buffer') # <<<<<<<<<<<<<<
+ * c_bytes[count] = <unsigned char>(c_val & 0xFF)
+ * count = count + 1
+ */
+ __pyx_t_5 = PyTuple_New(1); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 563; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_5);
+ __Pyx_INCREF(((PyObject *)__pyx_kp_s_27));
+ PyTuple_SET_ITEM(__pyx_t_5, 0, ((PyObject *)__pyx_kp_s_27));
+ __Pyx_GIVEREF(((PyObject *)__pyx_kp_s_27));
+ __pyx_t_6 = PyObject_Call(__pyx_builtin_ValueError, __pyx_t_5, NULL); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 563; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_6);
+ __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
+ __Pyx_Raise(__pyx_t_6, 0, 0);
+ __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 563; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ goto __pyx_L7;
+ }
+ __pyx_L7:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_groupcompress_pyx.pyx":564
+ * if count >= 8 or c_val >= 0x80:
+ * raise ValueError('encode_base128_int overflowed the buffer')
+ * c_bytes[count] = <unsigned char>(c_val & 0xFF) # <<<<<<<<<<<<<<
+ * count = count + 1
+ * return PyString_FromStringAndSize(<char *>c_bytes, count)
+ */
+ (__pyx_v_c_bytes[__pyx_v_count]) = ((unsigned char)(__pyx_v_c_val & 0xFF));
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_groupcompress_pyx.pyx":565
+ * raise ValueError('encode_base128_int overflowed the buffer')
+ * c_bytes[count] = <unsigned char>(c_val & 0xFF)
+ * count = count + 1 # <<<<<<<<<<<<<<
+ * return PyString_FromStringAndSize(<char *>c_bytes, count)
+ *
+ */
+ __pyx_v_count = (__pyx_v_count + 1);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_groupcompress_pyx.pyx":566
+ * c_bytes[count] = <unsigned char>(c_val & 0xFF)
+ * count = count + 1
+ * return PyString_FromStringAndSize(<char *>c_bytes, count) # <<<<<<<<<<<<<<
+ *
+ *
+ */
+ __Pyx_XDECREF(__pyx_r);
+ __pyx_t_6 = PyString_FromStringAndSize(((char *)__pyx_v_c_bytes), __pyx_v_count); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 566; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_6);
+ __pyx_r = __pyx_t_6;
+ __pyx_t_6 = 0;
+ goto __pyx_L0;
+
+ __pyx_r = Py_None; __Pyx_INCREF(Py_None);
+ goto __pyx_L0;
+ __pyx_L1_error:;
+ __Pyx_XDECREF(__pyx_t_5);
+ __Pyx_XDECREF(__pyx_t_6);
+ __Pyx_AddTraceback("bzrlib._groupcompress_pyx.encode_base128_int");
+ __pyx_r = NULL;
+ __pyx_L0:;
+ __Pyx_XGIVEREF(__pyx_r);
+ __Pyx_RefNannyFinishContext();
+ return __pyx_r;
+}
+
+/* "/home/vila/src/bzr/integration/trunk/bzrlib/_groupcompress_pyx.pyx":569
+ *
+ *
+ * def decode_base128_int(bytes): # <<<<<<<<<<<<<<
+ * """Decode an integer from a 7-bit lsb encoding."""
+ * cdef int offset
+ */
+
+static PyObject *__pyx_pf_6bzrlib_18_groupcompress_pyx_decode_base128_int(PyObject *__pyx_self, PyObject *__pyx_v_bytes); /*proto*/
+static char __pyx_doc_6bzrlib_18_groupcompress_pyx_decode_base128_int[] = "Decode an integer from a 7-bit lsb encoding.";
+static PyObject *__pyx_pf_6bzrlib_18_groupcompress_pyx_decode_base128_int(PyObject *__pyx_self, PyObject *__pyx_v_bytes) {
+ int __pyx_v_offset;
+ int __pyx_v_val;
+ unsigned int __pyx_v_uval;
+ int __pyx_v_shift;
+ Py_ssize_t __pyx_v_num_low_bytes;
+ unsigned char *__pyx_v_c_bytes;
+ PyObject *__pyx_r = NULL;
+ int __pyx_t_1;
+ PyObject *__pyx_t_2 = NULL;
+ PyObject *__pyx_t_3 = NULL;
+ int __pyx_t_4;
+ long __pyx_t_5;
+ PyObject *__pyx_t_6 = NULL;
+ __Pyx_RefNannySetupContext("decode_base128_int");
+ __pyx_self = __pyx_self;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_groupcompress_pyx.pyx":578
+ * cdef unsigned char *c_bytes
+ *
+ * offset = 0 # <<<<<<<<<<<<<<
+ * val = 0
+ * shift = 0
+ */
+ __pyx_v_offset = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_groupcompress_pyx.pyx":579
+ *
+ * offset = 0
+ * val = 0 # <<<<<<<<<<<<<<
+ * shift = 0
+ * if not PyString_CheckExact(bytes):
+ */
+ __pyx_v_val = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_groupcompress_pyx.pyx":580
+ * offset = 0
+ * val = 0
+ * shift = 0 # <<<<<<<<<<<<<<
+ * if not PyString_CheckExact(bytes):
+ * raise TypeError('bytes is not a string')
+ */
+ __pyx_v_shift = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_groupcompress_pyx.pyx":581
+ * val = 0
+ * shift = 0
+ * if not PyString_CheckExact(bytes): # <<<<<<<<<<<<<<
+ * raise TypeError('bytes is not a string')
+ * c_bytes = <unsigned char*>PyString_AS_STRING(bytes)
+ */
+ __pyx_t_1 = (!PyString_CheckExact(__pyx_v_bytes));
+ if (__pyx_t_1) {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_groupcompress_pyx.pyx":582
+ * shift = 0
+ * if not PyString_CheckExact(bytes):
+ * raise TypeError('bytes is not a string') # <<<<<<<<<<<<<<
+ * c_bytes = <unsigned char*>PyString_AS_STRING(bytes)
+ * # We take off 1, because we have to be able to decode the non-expanded byte
+ */
+ __pyx_t_2 = PyTuple_New(1); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 582; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_2);
+ __Pyx_INCREF(((PyObject *)__pyx_kp_s_28));
+ PyTuple_SET_ITEM(__pyx_t_2, 0, ((PyObject *)__pyx_kp_s_28));
+ __Pyx_GIVEREF(((PyObject *)__pyx_kp_s_28));
+ __pyx_t_3 = PyObject_Call(__pyx_builtin_TypeError, __pyx_t_2, NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 582; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_3);
+ __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+ __Pyx_Raise(__pyx_t_3, 0, 0);
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 582; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ goto __pyx_L5;
+ }
+ __pyx_L5:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_groupcompress_pyx.pyx":583
+ * if not PyString_CheckExact(bytes):
+ * raise TypeError('bytes is not a string')
+ * c_bytes = <unsigned char*>PyString_AS_STRING(bytes) # <<<<<<<<<<<<<<
+ * # We take off 1, because we have to be able to decode the non-expanded byte
+ * num_low_bytes = PyString_GET_SIZE(bytes) - 1
+ */
+ __pyx_v_c_bytes = ((unsigned char *)PyString_AS_STRING(__pyx_v_bytes));
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_groupcompress_pyx.pyx":585
+ * c_bytes = <unsigned char*>PyString_AS_STRING(bytes)
+ * # We take off 1, because we have to be able to decode the non-expanded byte
+ * num_low_bytes = PyString_GET_SIZE(bytes) - 1 # <<<<<<<<<<<<<<
+ * while (c_bytes[offset] & 0x80) and offset < num_low_bytes:
+ * val = val | ((c_bytes[offset] & 0x7F) << shift)
+ */
+ __pyx_v_num_low_bytes = (PyString_GET_SIZE(__pyx_v_bytes) - 1);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_groupcompress_pyx.pyx":586
+ * # We take off 1, because we have to be able to decode the non-expanded byte
+ * num_low_bytes = PyString_GET_SIZE(bytes) - 1
+ * while (c_bytes[offset] & 0x80) and offset < num_low_bytes: # <<<<<<<<<<<<<<
+ * val = val | ((c_bytes[offset] & 0x7F) << shift)
+ * shift = shift + 7
+ */
+ while (1) {
+ if (((__pyx_v_c_bytes[__pyx_v_offset]) & 0x80)) {
+ __pyx_t_1 = (__pyx_v_offset < __pyx_v_num_low_bytes);
+ __pyx_t_4 = __pyx_t_1;
+ } else {
+ __pyx_t_4 = ((__pyx_v_c_bytes[__pyx_v_offset]) & 0x80);
+ }
+ if (!__pyx_t_4) break;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_groupcompress_pyx.pyx":587
+ * num_low_bytes = PyString_GET_SIZE(bytes) - 1
+ * while (c_bytes[offset] & 0x80) and offset < num_low_bytes:
+ * val = val | ((c_bytes[offset] & 0x7F) << shift) # <<<<<<<<<<<<<<
+ * shift = shift + 7
+ * offset = offset + 1
+ */
+ __pyx_v_val = (__pyx_v_val | (((__pyx_v_c_bytes[__pyx_v_offset]) & 0x7F) << __pyx_v_shift));
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_groupcompress_pyx.pyx":588
+ * while (c_bytes[offset] & 0x80) and offset < num_low_bytes:
+ * val = val | ((c_bytes[offset] & 0x7F) << shift)
+ * shift = shift + 7 # <<<<<<<<<<<<<<
+ * offset = offset + 1
+ * if c_bytes[offset] & 0x80:
+ */
+ __pyx_v_shift = (__pyx_v_shift + 7);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_groupcompress_pyx.pyx":589
+ * val = val | ((c_bytes[offset] & 0x7F) << shift)
+ * shift = shift + 7
+ * offset = offset + 1 # <<<<<<<<<<<<<<
+ * if c_bytes[offset] & 0x80:
+ * raise ValueError('Data not properly formatted, we ran out of'
+ */
+ __pyx_v_offset = (__pyx_v_offset + 1);
+ }
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_groupcompress_pyx.pyx":590
+ * shift = shift + 7
+ * offset = offset + 1
+ * if c_bytes[offset] & 0x80: # <<<<<<<<<<<<<<
+ * raise ValueError('Data not properly formatted, we ran out of'
+ * ' bytes before 0x80 stopped being set.')
+ */
+ __pyx_t_5 = ((__pyx_v_c_bytes[__pyx_v_offset]) & 0x80);
+ if (__pyx_t_5) {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_groupcompress_pyx.pyx":591
+ * offset = offset + 1
+ * if c_bytes[offset] & 0x80:
+ * raise ValueError('Data not properly formatted, we ran out of' # <<<<<<<<<<<<<<
+ * ' bytes before 0x80 stopped being set.')
+ * val = val | (c_bytes[offset] << shift)
+ */
+ __pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 591; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_3);
+ __Pyx_INCREF(((PyObject *)__pyx_kp_s_29));
+ PyTuple_SET_ITEM(__pyx_t_3, 0, ((PyObject *)__pyx_kp_s_29));
+ __Pyx_GIVEREF(((PyObject *)__pyx_kp_s_29));
+ __pyx_t_2 = PyObject_Call(__pyx_builtin_ValueError, __pyx_t_3, NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 591; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_2);
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+ __Pyx_Raise(__pyx_t_2, 0, 0);
+ __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 591; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ goto __pyx_L8;
+ }
+ __pyx_L8:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_groupcompress_pyx.pyx":593
+ * raise ValueError('Data not properly formatted, we ran out of'
+ * ' bytes before 0x80 stopped being set.')
+ * val = val | (c_bytes[offset] << shift) # <<<<<<<<<<<<<<
+ * offset = offset + 1
+ * if val < 0:
+ */
+ __pyx_v_val = (__pyx_v_val | ((__pyx_v_c_bytes[__pyx_v_offset]) << __pyx_v_shift));
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_groupcompress_pyx.pyx":594
+ * ' bytes before 0x80 stopped being set.')
+ * val = val | (c_bytes[offset] << shift)
+ * offset = offset + 1 # <<<<<<<<<<<<<<
+ * if val < 0:
+ * uval = <unsigned int> val
+ */
+ __pyx_v_offset = (__pyx_v_offset + 1);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_groupcompress_pyx.pyx":595
+ * val = val | (c_bytes[offset] << shift)
+ * offset = offset + 1
+ * if val < 0: # <<<<<<<<<<<<<<
+ * uval = <unsigned int> val
+ * return uval, offset
+ */
+ __pyx_t_4 = (__pyx_v_val < 0);
+ if (__pyx_t_4) {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_groupcompress_pyx.pyx":596
+ * offset = offset + 1
+ * if val < 0:
+ * uval = <unsigned int> val # <<<<<<<<<<<<<<
+ * return uval, offset
+ * return val, offset
+ */
+ __pyx_v_uval = ((unsigned int)__pyx_v_val);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_groupcompress_pyx.pyx":597
+ * if val < 0:
+ * uval = <unsigned int> val
+ * return uval, offset # <<<<<<<<<<<<<<
+ * return val, offset
+ *
+ */
+ __Pyx_XDECREF(__pyx_r);
+ __pyx_t_2 = PyLong_FromUnsignedLong(__pyx_v_uval); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 597; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_2);
+ __pyx_t_3 = PyInt_FromLong(__pyx_v_offset); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 597; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_3);
+ __pyx_t_6 = PyTuple_New(2); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 597; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_6);
+ PyTuple_SET_ITEM(__pyx_t_6, 0, __pyx_t_2);
+ __Pyx_GIVEREF(__pyx_t_2);
+ PyTuple_SET_ITEM(__pyx_t_6, 1, __pyx_t_3);
+ __Pyx_GIVEREF(__pyx_t_3);
+ __pyx_t_2 = 0;
+ __pyx_t_3 = 0;
+ __pyx_r = __pyx_t_6;
+ __pyx_t_6 = 0;
+ goto __pyx_L0;
+ goto __pyx_L9;
+ }
+ __pyx_L9:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_groupcompress_pyx.pyx":598
+ * uval = <unsigned int> val
+ * return uval, offset
+ * return val, offset # <<<<<<<<<<<<<<
+ *
+ *
+ */
+ __Pyx_XDECREF(__pyx_r);
+ __pyx_t_6 = PyInt_FromLong(__pyx_v_val); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 598; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_6);
+ __pyx_t_3 = PyInt_FromLong(__pyx_v_offset); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 598; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_3);
+ __pyx_t_2 = PyTuple_New(2); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 598; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_2);
+ PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_t_6);
+ __Pyx_GIVEREF(__pyx_t_6);
+ PyTuple_SET_ITEM(__pyx_t_2, 1, __pyx_t_3);
+ __Pyx_GIVEREF(__pyx_t_3);
+ __pyx_t_6 = 0;
+ __pyx_t_3 = 0;
+ __pyx_r = __pyx_t_2;
+ __pyx_t_2 = 0;
+ goto __pyx_L0;
+
+ __pyx_r = Py_None; __Pyx_INCREF(Py_None);
+ goto __pyx_L0;
+ __pyx_L1_error:;
+ __Pyx_XDECREF(__pyx_t_2);
+ __Pyx_XDECREF(__pyx_t_3);
+ __Pyx_XDECREF(__pyx_t_6);
+ __Pyx_AddTraceback("bzrlib._groupcompress_pyx.decode_base128_int");
+ __pyx_r = NULL;
+ __pyx_L0:;
+ __Pyx_XGIVEREF(__pyx_r);
+ __Pyx_RefNannyFinishContext();
+ return __pyx_r;
+}
+static struct __pyx_vtabstruct_6bzrlib_18_groupcompress_pyx_DeltaIndex __pyx_vtable_6bzrlib_18_groupcompress_pyx_DeltaIndex;
+
+static PyObject *__pyx_tp_new_6bzrlib_18_groupcompress_pyx_DeltaIndex(PyTypeObject *t, PyObject *a, PyObject *k) {
+ struct __pyx_obj_6bzrlib_18_groupcompress_pyx_DeltaIndex *p;
+ PyObject *o = (*t->tp_alloc)(t, 0);
+ if (!o) return 0;
+ p = ((struct __pyx_obj_6bzrlib_18_groupcompress_pyx_DeltaIndex *)o);
+ p->__pyx_vtab = __pyx_vtabptr_6bzrlib_18_groupcompress_pyx_DeltaIndex;
+ p->_sources = Py_None; Py_INCREF(Py_None);
+ return o;
+}
+
+static void __pyx_tp_dealloc_6bzrlib_18_groupcompress_pyx_DeltaIndex(PyObject *o) {
+ struct __pyx_obj_6bzrlib_18_groupcompress_pyx_DeltaIndex *p = (struct __pyx_obj_6bzrlib_18_groupcompress_pyx_DeltaIndex *)o;
+ {
+ PyObject *etype, *eval, *etb;
+ PyErr_Fetch(&etype, &eval, &etb);
+ ++Py_REFCNT(o);
+ __pyx_pf_6bzrlib_18_groupcompress_pyx_10DeltaIndex___dealloc__(o);
+ if (PyErr_Occurred()) PyErr_WriteUnraisable(o);
+ --Py_REFCNT(o);
+ PyErr_Restore(etype, eval, etb);
+ }
+ Py_XDECREF(p->_sources);
+ (*Py_TYPE(o)->tp_free)(o);
+}
+
+static int __pyx_tp_traverse_6bzrlib_18_groupcompress_pyx_DeltaIndex(PyObject *o, visitproc v, void *a) {
+ int e;
+ struct __pyx_obj_6bzrlib_18_groupcompress_pyx_DeltaIndex *p = (struct __pyx_obj_6bzrlib_18_groupcompress_pyx_DeltaIndex *)o;
+ if (p->_sources) {
+ e = (*v)(p->_sources, a); if (e) return e;
+ }
+ return 0;
+}
+
+static int __pyx_tp_clear_6bzrlib_18_groupcompress_pyx_DeltaIndex(PyObject *o) {
+ struct __pyx_obj_6bzrlib_18_groupcompress_pyx_DeltaIndex *p = (struct __pyx_obj_6bzrlib_18_groupcompress_pyx_DeltaIndex *)o;
+ PyObject* tmp;
+ tmp = ((PyObject*)p->_sources);
+ p->_sources = Py_None; Py_INCREF(Py_None);
+ Py_XDECREF(tmp);
+ return 0;
+}
+
+static PyObject *__pyx_getprop_6bzrlib_18_groupcompress_pyx_10DeltaIndex__sources(PyObject *o, void *x) {
+ return __pyx_pf_6bzrlib_18_groupcompress_pyx_10DeltaIndex_8_sources___get__(o);
+}
+
+static PyObject *__pyx_getprop_6bzrlib_18_groupcompress_pyx_10DeltaIndex__source_offset(PyObject *o, void *x) {
+ return __pyx_pf_6bzrlib_18_groupcompress_pyx_10DeltaIndex_14_source_offset___get__(o);
+}
+
+static int __pyx_setprop_6bzrlib_18_groupcompress_pyx_10DeltaIndex__source_offset(PyObject *o, PyObject *v, void *x) {
+ if (v) {
+ return __pyx_pf_6bzrlib_18_groupcompress_pyx_10DeltaIndex_14_source_offset___set__(o, v);
+ }
+ else {
+ PyErr_SetString(PyExc_NotImplementedError, "__del__");
+ return -1;
+ }
+}
+
+static PyObject *__pyx_getprop_6bzrlib_18_groupcompress_pyx_10DeltaIndex__max_num_sources(PyObject *o, void *x) {
+ return __pyx_pf_6bzrlib_18_groupcompress_pyx_10DeltaIndex_16_max_num_sources___get__(o);
+}
+
+static PyObject *__pyx_getprop_6bzrlib_18_groupcompress_pyx_10DeltaIndex__max_bytes_to_index(PyObject *o, void *x) {
+ return __pyx_pf_6bzrlib_18_groupcompress_pyx_10DeltaIndex_19_max_bytes_to_index___get__(o);
+}
+
+static int __pyx_setprop_6bzrlib_18_groupcompress_pyx_10DeltaIndex__max_bytes_to_index(PyObject *o, PyObject *v, void *x) {
+ if (v) {
+ return __pyx_pf_6bzrlib_18_groupcompress_pyx_10DeltaIndex_19_max_bytes_to_index___set__(o, v);
+ }
+ else {
+ PyErr_SetString(PyExc_NotImplementedError, "__del__");
+ return -1;
+ }
+}
+
+static PyMethodDef __pyx_methods_6bzrlib_18_groupcompress_pyx_DeltaIndex[] = {
+ {__Pyx_NAMESTR("__sizeof__"), (PyCFunction)__pyx_pf_6bzrlib_18_groupcompress_pyx_10DeltaIndex___sizeof__, METH_NOARGS, __Pyx_DOCSTR(0)},
+ {__Pyx_NAMESTR("_has_index"), (PyCFunction)__pyx_pf_6bzrlib_18_groupcompress_pyx_10DeltaIndex__has_index, METH_NOARGS, __Pyx_DOCSTR(0)},
+ {__Pyx_NAMESTR("_dump_index"), (PyCFunction)__pyx_pf_6bzrlib_18_groupcompress_pyx_10DeltaIndex__dump_index, METH_NOARGS, __Pyx_DOCSTR(__pyx_doc_6bzrlib_18_groupcompress_pyx_10DeltaIndex__dump_index)},
+ {__Pyx_NAMESTR("add_delta_source"), (PyCFunction)__pyx_pf_6bzrlib_18_groupcompress_pyx_10DeltaIndex_add_delta_source, METH_VARARGS|METH_KEYWORDS, __Pyx_DOCSTR(__pyx_doc_6bzrlib_18_groupcompress_pyx_10DeltaIndex_add_delta_source)},
+ {__Pyx_NAMESTR("add_source"), (PyCFunction)__pyx_pf_6bzrlib_18_groupcompress_pyx_10DeltaIndex_add_source, METH_VARARGS|METH_KEYWORDS, __Pyx_DOCSTR(__pyx_doc_6bzrlib_18_groupcompress_pyx_10DeltaIndex_add_source)},
+ {__Pyx_NAMESTR("make_delta"), (PyCFunction)__pyx_pf_6bzrlib_18_groupcompress_pyx_10DeltaIndex_make_delta, METH_VARARGS|METH_KEYWORDS, __Pyx_DOCSTR(__pyx_doc_6bzrlib_18_groupcompress_pyx_10DeltaIndex_make_delta)},
+ {0, 0, 0, 0}
+};
+
+static struct PyGetSetDef __pyx_getsets_6bzrlib_18_groupcompress_pyx_DeltaIndex[] = {
+ {(char *)"_sources", __pyx_getprop_6bzrlib_18_groupcompress_pyx_10DeltaIndex__sources, 0, 0, 0},
+ {(char *)"_source_offset", __pyx_getprop_6bzrlib_18_groupcompress_pyx_10DeltaIndex__source_offset, __pyx_setprop_6bzrlib_18_groupcompress_pyx_10DeltaIndex__source_offset, 0, 0},
+ {(char *)"_max_num_sources", __pyx_getprop_6bzrlib_18_groupcompress_pyx_10DeltaIndex__max_num_sources, 0, 0, 0},
+ {(char *)"_max_bytes_to_index", __pyx_getprop_6bzrlib_18_groupcompress_pyx_10DeltaIndex__max_bytes_to_index, __pyx_setprop_6bzrlib_18_groupcompress_pyx_10DeltaIndex__max_bytes_to_index, 0, 0},
+ {0, 0, 0, 0, 0}
+};
+
+static PyNumberMethods __pyx_tp_as_number_DeltaIndex = {
+ 0, /*nb_add*/
+ 0, /*nb_subtract*/
+ 0, /*nb_multiply*/
+ #if PY_MAJOR_VERSION < 3
+ 0, /*nb_divide*/
+ #endif
+ 0, /*nb_remainder*/
+ 0, /*nb_divmod*/
+ 0, /*nb_power*/
+ 0, /*nb_negative*/
+ 0, /*nb_positive*/
+ 0, /*nb_absolute*/
+ 0, /*nb_nonzero*/
+ 0, /*nb_invert*/
+ 0, /*nb_lshift*/
+ 0, /*nb_rshift*/
+ 0, /*nb_and*/
+ 0, /*nb_xor*/
+ 0, /*nb_or*/
+ #if PY_MAJOR_VERSION < 3
+ 0, /*nb_coerce*/
+ #endif
+ 0, /*nb_int*/
+ #if PY_MAJOR_VERSION < 3
+ 0, /*nb_long*/
+ #else
+ 0, /*reserved*/
+ #endif
+ 0, /*nb_float*/
+ #if PY_MAJOR_VERSION < 3
+ 0, /*nb_oct*/
+ #endif
+ #if PY_MAJOR_VERSION < 3
+ 0, /*nb_hex*/
+ #endif
+ 0, /*nb_inplace_add*/
+ 0, /*nb_inplace_subtract*/
+ 0, /*nb_inplace_multiply*/
+ #if PY_MAJOR_VERSION < 3
+ 0, /*nb_inplace_divide*/
+ #endif
+ 0, /*nb_inplace_remainder*/
+ 0, /*nb_inplace_power*/
+ 0, /*nb_inplace_lshift*/
+ 0, /*nb_inplace_rshift*/
+ 0, /*nb_inplace_and*/
+ 0, /*nb_inplace_xor*/
+ 0, /*nb_inplace_or*/
+ 0, /*nb_floor_divide*/
+ 0, /*nb_true_divide*/
+ 0, /*nb_inplace_floor_divide*/
+ 0, /*nb_inplace_true_divide*/
+ #if PY_VERSION_HEX >= 0x02050000
+ 0, /*nb_index*/
+ #endif
+};
+
+static PySequenceMethods __pyx_tp_as_sequence_DeltaIndex = {
+ 0, /*sq_length*/
+ 0, /*sq_concat*/
+ 0, /*sq_repeat*/
+ 0, /*sq_item*/
+ 0, /*sq_slice*/
+ 0, /*sq_ass_item*/
+ 0, /*sq_ass_slice*/
+ 0, /*sq_contains*/
+ 0, /*sq_inplace_concat*/
+ 0, /*sq_inplace_repeat*/
+};
+
+static PyMappingMethods __pyx_tp_as_mapping_DeltaIndex = {
+ 0, /*mp_length*/
+ 0, /*mp_subscript*/
+ 0, /*mp_ass_subscript*/
+};
+
+static PyBufferProcs __pyx_tp_as_buffer_DeltaIndex = {
+ #if PY_MAJOR_VERSION < 3
+ 0, /*bf_getreadbuffer*/
+ #endif
+ #if PY_MAJOR_VERSION < 3
+ 0, /*bf_getwritebuffer*/
+ #endif
+ #if PY_MAJOR_VERSION < 3
+ 0, /*bf_getsegcount*/
+ #endif
+ #if PY_MAJOR_VERSION < 3
+ 0, /*bf_getcharbuffer*/
+ #endif
+ #if PY_VERSION_HEX >= 0x02060000
+ 0, /*bf_getbuffer*/
+ #endif
+ #if PY_VERSION_HEX >= 0x02060000
+ 0, /*bf_releasebuffer*/
+ #endif
+};
+
+PyTypeObject __pyx_type_6bzrlib_18_groupcompress_pyx_DeltaIndex = {
+ PyVarObject_HEAD_INIT(0, 0)
+ __Pyx_NAMESTR("bzrlib._groupcompress_pyx.DeltaIndex"), /*tp_name*/
+ sizeof(struct __pyx_obj_6bzrlib_18_groupcompress_pyx_DeltaIndex), /*tp_basicsize*/
+ 0, /*tp_itemsize*/
+ __pyx_tp_dealloc_6bzrlib_18_groupcompress_pyx_DeltaIndex, /*tp_dealloc*/
+ 0, /*tp_print*/
+ 0, /*tp_getattr*/
+ 0, /*tp_setattr*/
+ #if PY_MAJOR_VERSION < 3
+ 0, /*tp_compare*/
+ #else
+ 0, /*reserved*/
+ #endif
+ __pyx_pf_6bzrlib_18_groupcompress_pyx_10DeltaIndex___repr__, /*tp_repr*/
+ &__pyx_tp_as_number_DeltaIndex, /*tp_as_number*/
+ &__pyx_tp_as_sequence_DeltaIndex, /*tp_as_sequence*/
+ &__pyx_tp_as_mapping_DeltaIndex, /*tp_as_mapping*/
+ 0, /*tp_hash*/
+ 0, /*tp_call*/
+ 0, /*tp_str*/
+ 0, /*tp_getattro*/
+ 0, /*tp_setattro*/
+ &__pyx_tp_as_buffer_DeltaIndex, /*tp_as_buffer*/
+ Py_TPFLAGS_DEFAULT|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_HAVE_GC, /*tp_flags*/
+ 0, /*tp_doc*/
+ __pyx_tp_traverse_6bzrlib_18_groupcompress_pyx_DeltaIndex, /*tp_traverse*/
+ __pyx_tp_clear_6bzrlib_18_groupcompress_pyx_DeltaIndex, /*tp_clear*/
+ 0, /*tp_richcompare*/
+ 0, /*tp_weaklistoffset*/
+ 0, /*tp_iter*/
+ 0, /*tp_iternext*/
+ __pyx_methods_6bzrlib_18_groupcompress_pyx_DeltaIndex, /*tp_methods*/
+ 0, /*tp_members*/
+ __pyx_getsets_6bzrlib_18_groupcompress_pyx_DeltaIndex, /*tp_getset*/
+ 0, /*tp_base*/
+ 0, /*tp_dict*/
+ 0, /*tp_descr_get*/
+ 0, /*tp_descr_set*/
+ 0, /*tp_dictoffset*/
+ __pyx_pf_6bzrlib_18_groupcompress_pyx_10DeltaIndex___init__, /*tp_init*/
+ 0, /*tp_alloc*/
+ __pyx_tp_new_6bzrlib_18_groupcompress_pyx_DeltaIndex, /*tp_new*/
+ 0, /*tp_free*/
+ 0, /*tp_is_gc*/
+ 0, /*tp_bases*/
+ 0, /*tp_mro*/
+ 0, /*tp_cache*/
+ 0, /*tp_subclasses*/
+ 0, /*tp_weaklist*/
+ 0, /*tp_del*/
+ #if PY_VERSION_HEX >= 0x02060000
+ 0, /*tp_version_tag*/
+ #endif
+};
+
+static PyMethodDef __pyx_methods[] = {
+ {__Pyx_NAMESTR("make_delta_index"), (PyCFunction)__pyx_pf_6bzrlib_18_groupcompress_pyx_make_delta_index, METH_O, __Pyx_DOCSTR(0)},
+ {__Pyx_NAMESTR("_rabin_hash"), (PyCFunction)__pyx_pf_6bzrlib_18_groupcompress_pyx__rabin_hash, METH_O, __Pyx_DOCSTR(0)},
+ {__Pyx_NAMESTR("make_delta"), (PyCFunction)__pyx_pf_6bzrlib_18_groupcompress_pyx_make_delta, METH_VARARGS|METH_KEYWORDS, __Pyx_DOCSTR(__pyx_doc_6bzrlib_18_groupcompress_pyx_make_delta)},
+ {__Pyx_NAMESTR("apply_delta"), (PyCFunction)__pyx_pf_6bzrlib_18_groupcompress_pyx_apply_delta, METH_VARARGS|METH_KEYWORDS, __Pyx_DOCSTR(__pyx_doc_6bzrlib_18_groupcompress_pyx_apply_delta)},
+ {__Pyx_NAMESTR("apply_delta_to_source"), (PyCFunction)__pyx_pf_6bzrlib_18_groupcompress_pyx_apply_delta_to_source, METH_VARARGS|METH_KEYWORDS, __Pyx_DOCSTR(__pyx_doc_6bzrlib_18_groupcompress_pyx_apply_delta_to_source)},
+ {__Pyx_NAMESTR("encode_base128_int"), (PyCFunction)__pyx_pf_6bzrlib_18_groupcompress_pyx_encode_base128_int, METH_O, __Pyx_DOCSTR(__pyx_doc_6bzrlib_18_groupcompress_pyx_encode_base128_int)},
+ {__Pyx_NAMESTR("decode_base128_int"), (PyCFunction)__pyx_pf_6bzrlib_18_groupcompress_pyx_decode_base128_int, METH_O, __Pyx_DOCSTR(__pyx_doc_6bzrlib_18_groupcompress_pyx_decode_base128_int)},
+ {0, 0, 0, 0}
+};
+
+#if PY_MAJOR_VERSION >= 3
+static struct PyModuleDef __pyx_moduledef = {
+ PyModuleDef_HEAD_INIT,
+ __Pyx_NAMESTR("_groupcompress_pyx"),
+ __Pyx_DOCSTR(__pyx_k_30), /* m_doc */
+ -1, /* m_size */
+ __pyx_methods /* m_methods */,
+ NULL, /* m_reload */
+ NULL, /* m_traverse */
+ NULL, /* m_clear */
+ NULL /* m_free */
+};
+#endif
+
+static __Pyx_StringTabEntry __pyx_string_tab[] = {
+ {&__pyx_kp_s_1, __pyx_k_1, sizeof(__pyx_k_1), 0, 0, 1, 0},
+ {&__pyx_kp_s_10, __pyx_k_10, sizeof(__pyx_k_10), 0, 0, 1, 0},
+ {&__pyx_kp_s_11, __pyx_k_11, sizeof(__pyx_k_11), 0, 0, 1, 0},
+ {&__pyx_kp_s_12, __pyx_k_12, sizeof(__pyx_k_12), 0, 0, 1, 0},
+ {&__pyx_kp_s_13, __pyx_k_13, sizeof(__pyx_k_13), 0, 0, 1, 0},
+ {&__pyx_n_s_14, __pyx_k_14, sizeof(__pyx_k_14), 0, 0, 1, 1},
+ {&__pyx_kp_s_15, __pyx_k_15, sizeof(__pyx_k_15), 0, 0, 1, 0},
+ {&__pyx_kp_s_16, __pyx_k_16, sizeof(__pyx_k_16), 0, 0, 1, 0},
+ {&__pyx_kp_s_17, __pyx_k_17, sizeof(__pyx_k_17), 0, 0, 1, 0},
+ {&__pyx_kp_s_18, __pyx_k_18, sizeof(__pyx_k_18), 0, 0, 1, 0},
+ {&__pyx_kp_s_19, __pyx_k_19, sizeof(__pyx_k_19), 0, 0, 1, 0},
+ {&__pyx_kp_s_2, __pyx_k_2, sizeof(__pyx_k_2), 0, 0, 1, 0},
+ {&__pyx_kp_s_20, __pyx_k_20, sizeof(__pyx_k_20), 0, 0, 1, 0},
+ {&__pyx_kp_s_21, __pyx_k_21, sizeof(__pyx_k_21), 0, 0, 1, 0},
+ {&__pyx_kp_s_22, __pyx_k_22, sizeof(__pyx_k_22), 0, 0, 1, 0},
+ {&__pyx_kp_s_23, __pyx_k_23, sizeof(__pyx_k_23), 0, 0, 1, 0},
+ {&__pyx_kp_s_24, __pyx_k_24, sizeof(__pyx_k_24), 0, 0, 1, 0},
+ {&__pyx_kp_s_25, __pyx_k_25, sizeof(__pyx_k_25), 0, 0, 1, 0},
+ {&__pyx_kp_s_26, __pyx_k_26, sizeof(__pyx_k_26), 0, 0, 1, 0},
+ {&__pyx_kp_s_27, __pyx_k_27, sizeof(__pyx_k_27), 0, 0, 1, 0},
+ {&__pyx_kp_s_28, __pyx_k_28, sizeof(__pyx_k_28), 0, 0, 1, 0},
+ {&__pyx_kp_s_29, __pyx_k_29, sizeof(__pyx_k_29), 0, 0, 1, 0},
+ {&__pyx_kp_s_3, __pyx_k_3, sizeof(__pyx_k_3), 0, 0, 1, 0},
+ {&__pyx_kp_u_31, __pyx_k_31, sizeof(__pyx_k_31), 0, 1, 0, 0},
+ {&__pyx_kp_u_32, __pyx_k_32, sizeof(__pyx_k_32), 0, 1, 0, 0},
+ {&__pyx_kp_u_33, __pyx_k_33, sizeof(__pyx_k_33), 0, 1, 0, 0},
+ {&__pyx_kp_u_34, __pyx_k_34, sizeof(__pyx_k_34), 0, 1, 0, 0},
+ {&__pyx_kp_u_35, __pyx_k_35, sizeof(__pyx_k_35), 0, 1, 0, 0},
+ {&__pyx_kp_u_36, __pyx_k_36, sizeof(__pyx_k_36), 0, 1, 0, 0},
+ {&__pyx_kp_u_37, __pyx_k_37, sizeof(__pyx_k_37), 0, 1, 0, 0},
+ {&__pyx_n_s_38, __pyx_k_38, sizeof(__pyx_k_38), 0, 0, 1, 1},
+ {&__pyx_kp_u_39, __pyx_k_39, sizeof(__pyx_k_39), 0, 1, 0, 0},
+ {&__pyx_kp_s_4, __pyx_k_4, sizeof(__pyx_k_4), 0, 0, 1, 0},
+ {&__pyx_kp_u_40, __pyx_k_40, sizeof(__pyx_k_40), 0, 1, 0, 0},
+ {&__pyx_kp_s_5, __pyx_k_5, sizeof(__pyx_k_5), 0, 0, 1, 0},
+ {&__pyx_kp_s_6, __pyx_k_6, sizeof(__pyx_k_6), 0, 0, 1, 0},
+ {&__pyx_kp_s_7, __pyx_k_7, sizeof(__pyx_k_7), 0, 0, 1, 0},
+ {&__pyx_kp_s_8, __pyx_k_8, sizeof(__pyx_k_8), 0, 0, 1, 0},
+ {&__pyx_kp_s_9, __pyx_k_9, sizeof(__pyx_k_9), 0, 0, 1, 0},
+ {&__pyx_n_s__AssertionError, __pyx_k__AssertionError, sizeof(__pyx_k__AssertionError), 0, 0, 1, 1},
+ {&__pyx_n_s__DeltaIndex, __pyx_k__DeltaIndex, sizeof(__pyx_k__DeltaIndex), 0, 0, 1, 1},
+ {&__pyx_n_s__MemoryError, __pyx_k__MemoryError, sizeof(__pyx_k__MemoryError), 0, 0, 1, 1},
+ {&__pyx_n_s__RuntimeError, __pyx_k__RuntimeError, sizeof(__pyx_k__RuntimeError), 0, 0, 1, 1},
+ {&__pyx_n_s__TypeError, __pyx_k__TypeError, sizeof(__pyx_k__TypeError), 0, 0, 1, 1},
+ {&__pyx_n_s__ValueError, __pyx_k__ValueError, sizeof(__pyx_k__ValueError), 0, 0, 1, 1},
+ {&__pyx_n_s____class__, __pyx_k____class__, sizeof(__pyx_k____class__), 0, 0, 1, 1},
+ {&__pyx_n_s____main__, __pyx_k____main__, sizeof(__pyx_k____main__), 0, 0, 1, 1},
+ {&__pyx_n_s____name__, __pyx_k____name__, sizeof(__pyx_k____name__), 0, 0, 1, 1},
+ {&__pyx_n_s____test__, __pyx_k____test__, sizeof(__pyx_k____test__), 0, 0, 1, 1},
+ {&__pyx_n_s___dump_index, __pyx_k___dump_index, sizeof(__pyx_k___dump_index), 0, 0, 1, 1},
+ {&__pyx_n_s___expand_sources, __pyx_k___expand_sources, sizeof(__pyx_k___expand_sources), 0, 0, 1, 1},
+ {&__pyx_n_s___index, __pyx_k___index, sizeof(__pyx_k___index), 0, 0, 1, 1},
+ {&__pyx_n_s___max_bytes_to_index, __pyx_k___max_bytes_to_index, sizeof(__pyx_k___max_bytes_to_index), 0, 0, 1, 1},
+ {&__pyx_n_s___max_num_sources, __pyx_k___max_num_sources, sizeof(__pyx_k___max_num_sources), 0, 0, 1, 1},
+ {&__pyx_n_s___source_infos, __pyx_k___source_infos, sizeof(__pyx_k___source_infos), 0, 0, 1, 1},
+ {&__pyx_n_s___source_offset, __pyx_k___source_offset, sizeof(__pyx_k___source_offset), 0, 0, 1, 1},
+ {&__pyx_n_s___sources, __pyx_k___sources, sizeof(__pyx_k___sources), 0, 0, 1, 1},
+ {&__pyx_n_s__add_delta_source, __pyx_k__add_delta_source, sizeof(__pyx_k__add_delta_source), 0, 0, 1, 1},
+ {&__pyx_n_s__add_source, __pyx_k__add_source, sizeof(__pyx_k__add_source), 0, 0, 1, 1},
+ {&__pyx_n_s__agg_offset, __pyx_k__agg_offset, sizeof(__pyx_k__agg_offset), 0, 0, 1, 1},
+ {&__pyx_n_s__apply_delta, __pyx_k__apply_delta, sizeof(__pyx_k__apply_delta), 0, 0, 1, 1},
+ {&__pyx_n_s__buf, __pyx_k__buf, sizeof(__pyx_k__buf), 0, 0, 1, 1},
+ {&__pyx_n_s__decode_base128_int, __pyx_k__decode_base128_int, sizeof(__pyx_k__decode_base128_int), 0, 0, 1, 1},
+ {&__pyx_n_s__delta, __pyx_k__delta, sizeof(__pyx_k__delta), 0, 0, 1, 1},
+ {&__pyx_n_s__delta_bytes, __pyx_k__delta_bytes, sizeof(__pyx_k__delta_bytes), 0, 0, 1, 1},
+ {&__pyx_n_s__delta_end, __pyx_k__delta_end, sizeof(__pyx_k__delta_end), 0, 0, 1, 1},
+ {&__pyx_n_s__delta_start, __pyx_k__delta_start, sizeof(__pyx_k__delta_start), 0, 0, 1, 1},
+ {&__pyx_n_s__encode_base128_int, __pyx_k__encode_base128_int, sizeof(__pyx_k__encode_base128_int), 0, 0, 1, 1},
+ {&__pyx_n_s__make_delta, __pyx_k__make_delta, sizeof(__pyx_k__make_delta), 0, 0, 1, 1},
+ {&__pyx_n_s__map, __pyx_k__map, sizeof(__pyx_k__map), 0, 0, 1, 1},
+ {&__pyx_n_s__max_bytes_to_index, __pyx_k__max_bytes_to_index, sizeof(__pyx_k__max_bytes_to_index), 0, 0, 1, 1},
+ {&__pyx_n_s__max_delta_size, __pyx_k__max_delta_size, sizeof(__pyx_k__max_delta_size), 0, 0, 1, 1},
+ {&__pyx_n_s__size, __pyx_k__size, sizeof(__pyx_k__size), 0, 0, 1, 1},
+ {&__pyx_n_s__source, __pyx_k__source, sizeof(__pyx_k__source), 0, 0, 1, 1},
+ {&__pyx_n_s__source_bytes, __pyx_k__source_bytes, sizeof(__pyx_k__source_bytes), 0, 0, 1, 1},
+ {&__pyx_n_s__target_bytes, __pyx_k__target_bytes, sizeof(__pyx_k__target_bytes), 0, 0, 1, 1},
+ {&__pyx_n_s__unadded_bytes, __pyx_k__unadded_bytes, sizeof(__pyx_k__unadded_bytes), 0, 0, 1, 1},
+ {0, 0, 0, 0, 0, 0, 0}
+};
+static int __Pyx_InitCachedBuiltins(void) {
+ __pyx_builtin_MemoryError = __Pyx_GetName(__pyx_b, __pyx_n_s__MemoryError); if (!__pyx_builtin_MemoryError) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 84; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_builtin_ValueError = __Pyx_GetName(__pyx_b, __pyx_n_s__ValueError); if (!__pyx_builtin_ValueError) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 111; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_builtin_RuntimeError = __Pyx_GetName(__pyx_b, __pyx_n_s__RuntimeError); if (!__pyx_builtin_RuntimeError) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 115; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_builtin_AssertionError = __Pyx_GetName(__pyx_b, __pyx_n_s__AssertionError); if (!__pyx_builtin_AssertionError) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 118; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_builtin_map = __Pyx_GetName(__pyx_b, __pyx_n_s__map); if (!__pyx_builtin_map) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 218; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_builtin_TypeError = __Pyx_GetName(__pyx_b, __pyx_n_s__TypeError); if (!__pyx_builtin_TypeError) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 239; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ return 0;
+ __pyx_L1_error:;
+ return -1;
+}
+
+static int __Pyx_InitGlobals(void) {
+ if (__Pyx_InitStrings(__pyx_string_tab) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;};
+ __pyx_int_0 = PyInt_FromLong(0); if (unlikely(!__pyx_int_0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;};
+ return 0;
+ __pyx_L1_error:;
+ return -1;
+}
+
+#if PY_MAJOR_VERSION < 3
+PyMODINIT_FUNC init_groupcompress_pyx(void); /*proto*/
+PyMODINIT_FUNC init_groupcompress_pyx(void)
+#else
+PyMODINIT_FUNC PyInit__groupcompress_pyx(void); /*proto*/
+PyMODINIT_FUNC PyInit__groupcompress_pyx(void)
+#endif
+{
+ PyObject *__pyx_t_1 = NULL;
+ PyObject *__pyx_t_2 = NULL;
+ PyObject *__pyx_t_3 = NULL;
+ #if CYTHON_REFNANNY
+ void* __pyx_refnanny = NULL;
+ __Pyx_RefNanny = __Pyx_RefNannyImportAPI("refnanny");
+ if (!__Pyx_RefNanny) {
+ PyErr_Clear();
+ __Pyx_RefNanny = __Pyx_RefNannyImportAPI("Cython.Runtime.refnanny");
+ if (!__Pyx_RefNanny)
+ Py_FatalError("failed to import 'refnanny' module");
+ }
+ __pyx_refnanny = __Pyx_RefNanny->SetupContext("PyMODINIT_FUNC PyInit__groupcompress_pyx(void)", __LINE__, __FILE__);
+ #endif
+ __pyx_empty_tuple = PyTuple_New(0); if (unlikely(!__pyx_empty_tuple)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_empty_bytes = PyBytes_FromStringAndSize("", 0); if (unlikely(!__pyx_empty_bytes)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ #ifdef __pyx_binding_PyCFunctionType_USED
+ if (__pyx_binding_PyCFunctionType_init() < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ #endif
+ /*--- Library function declarations ---*/
+ /*--- Threads initialization code ---*/
+ #if defined(__PYX_FORCE_INIT_THREADS) && __PYX_FORCE_INIT_THREADS
+ #ifdef WITH_THREAD /* Python build with threading support? */
+ PyEval_InitThreads();
+ #endif
+ #endif
+ /*--- Module creation code ---*/
+ #if PY_MAJOR_VERSION < 3
+ __pyx_m = Py_InitModule4(__Pyx_NAMESTR("_groupcompress_pyx"), __pyx_methods, __Pyx_DOCSTR(__pyx_k_30), 0, PYTHON_API_VERSION);
+ #else
+ __pyx_m = PyModule_Create(&__pyx_moduledef);
+ #endif
+ if (!__pyx_m) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;};
+ #if PY_MAJOR_VERSION < 3
+ Py_INCREF(__pyx_m);
+ #endif
+ __pyx_b = PyImport_AddModule(__Pyx_NAMESTR(__Pyx_BUILTIN_MODULE_NAME));
+ if (!__pyx_b) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;};
+ if (__Pyx_SetAttrString(__pyx_m, "__builtins__", __pyx_b) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;};
+ /*--- Initialize various global constants etc. ---*/
+ if (unlikely(__Pyx_InitGlobals() < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ if (__pyx_module_is_main_bzrlib___groupcompress_pyx) {
+ if (__Pyx_SetAttrString(__pyx_m, "__name__", __pyx_n_s____main__) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;};
+ }
+ /*--- Builtin init code ---*/
+ if (unlikely(__Pyx_InitCachedBuiltins() < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ /*--- Global init code ---*/
+ /*--- Function export code ---*/
+ /*--- Type init code ---*/
+ __pyx_vtabptr_6bzrlib_18_groupcompress_pyx_DeltaIndex = &__pyx_vtable_6bzrlib_18_groupcompress_pyx_DeltaIndex;
+ #if PY_MAJOR_VERSION >= 3
+ __pyx_vtable_6bzrlib_18_groupcompress_pyx_DeltaIndex._populate_first_index = (PyObject *(*)(struct __pyx_obj_6bzrlib_18_groupcompress_pyx_DeltaIndex *))__pyx_f_6bzrlib_18_groupcompress_pyx_10DeltaIndex__populate_first_index;
+ __pyx_vtable_6bzrlib_18_groupcompress_pyx_DeltaIndex._expand_sources = (PyObject *(*)(struct __pyx_obj_6bzrlib_18_groupcompress_pyx_DeltaIndex *))__pyx_f_6bzrlib_18_groupcompress_pyx_10DeltaIndex__expand_sources;
+ #else
+ *(void(**)(void))&__pyx_vtable_6bzrlib_18_groupcompress_pyx_DeltaIndex._populate_first_index = (void(*)(void))__pyx_f_6bzrlib_18_groupcompress_pyx_10DeltaIndex__populate_first_index;
+ *(void(**)(void))&__pyx_vtable_6bzrlib_18_groupcompress_pyx_DeltaIndex._expand_sources = (void(*)(void))__pyx_f_6bzrlib_18_groupcompress_pyx_10DeltaIndex__expand_sources;
+ #endif
+ if (PyType_Ready(&__pyx_type_6bzrlib_18_groupcompress_pyx_DeltaIndex) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 130; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ if (__Pyx_SetVtable(__pyx_type_6bzrlib_18_groupcompress_pyx_DeltaIndex.tp_dict, __pyx_vtabptr_6bzrlib_18_groupcompress_pyx_DeltaIndex) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 130; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ if (__Pyx_SetAttrString(__pyx_m, "DeltaIndex", (PyObject *)&__pyx_type_6bzrlib_18_groupcompress_pyx_DeltaIndex) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 130; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_ptype_6bzrlib_18_groupcompress_pyx_DeltaIndex = &__pyx_type_6bzrlib_18_groupcompress_pyx_DeltaIndex;
+ /*--- Type import code ---*/
+ /*--- Function import code ---*/
+ /*--- Execution code ---*/
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_groupcompress_pyx.pyx":1
+ * # Copyright (C) 2008, 2009, 2010 Canonical Ltd # <<<<<<<<<<<<<<
+ * #
+ * # This program is free software; you can redistribute it and/or modify
+ */
+ __pyx_t_1 = PyDict_New(); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(((PyObject *)__pyx_t_1));
+ __pyx_t_2 = PyObject_GetAttr(__pyx_m, __pyx_n_s__DeltaIndex); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_2);
+ __pyx_t_3 = PyObject_GetAttr(__pyx_t_2, __pyx_n_s___dump_index); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_3);
+ __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+ __pyx_t_2 = __Pyx_GetAttrString(__pyx_t_3, "__doc__"); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_2);
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+ if (PyDict_SetItem(__pyx_t_1, ((PyObject *)__pyx_kp_u_31), __pyx_t_2) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+ __pyx_t_2 = PyObject_GetAttr(__pyx_m, __pyx_n_s__DeltaIndex); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_2);
+ __pyx_t_3 = PyObject_GetAttr(__pyx_t_2, __pyx_n_s__add_delta_source); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_3);
+ __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+ __pyx_t_2 = __Pyx_GetAttrString(__pyx_t_3, "__doc__"); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_2);
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+ if (PyDict_SetItem(__pyx_t_1, ((PyObject *)__pyx_kp_u_32), __pyx_t_2) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+ __pyx_t_2 = PyObject_GetAttr(__pyx_m, __pyx_n_s__DeltaIndex); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_2);
+ __pyx_t_3 = PyObject_GetAttr(__pyx_t_2, __pyx_n_s__add_source); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_3);
+ __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+ __pyx_t_2 = __Pyx_GetAttrString(__pyx_t_3, "__doc__"); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_2);
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+ if (PyDict_SetItem(__pyx_t_1, ((PyObject *)__pyx_kp_u_33), __pyx_t_2) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+ __pyx_t_2 = PyObject_GetAttr(__pyx_m, __pyx_n_s__DeltaIndex); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_2);
+ __pyx_t_3 = PyObject_GetAttr(__pyx_t_2, __pyx_n_s__make_delta); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_3);
+ __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+ __pyx_t_2 = __Pyx_GetAttrString(__pyx_t_3, "__doc__"); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_2);
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+ if (PyDict_SetItem(__pyx_t_1, ((PyObject *)__pyx_kp_u_34), __pyx_t_2) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+ __pyx_t_2 = PyObject_GetAttr(__pyx_m, __pyx_n_s__make_delta); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_2);
+ __pyx_t_3 = __Pyx_GetAttrString(__pyx_t_2, "__doc__"); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_3);
+ __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+ if (PyDict_SetItem(__pyx_t_1, ((PyObject *)__pyx_kp_u_35), __pyx_t_3) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+ __pyx_t_3 = PyObject_GetAttr(__pyx_m, __pyx_n_s__apply_delta); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_3);
+ __pyx_t_2 = __Pyx_GetAttrString(__pyx_t_3, "__doc__"); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_2);
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+ if (PyDict_SetItem(__pyx_t_1, ((PyObject *)__pyx_kp_u_36), __pyx_t_2) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+ __pyx_t_2 = PyObject_GetAttr(__pyx_m, __pyx_n_s_38); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_2);
+ __pyx_t_3 = __Pyx_GetAttrString(__pyx_t_2, "__doc__"); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_3);
+ __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+ if (PyDict_SetItem(__pyx_t_1, ((PyObject *)__pyx_kp_u_37), __pyx_t_3) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+ __pyx_t_3 = PyObject_GetAttr(__pyx_m, __pyx_n_s__encode_base128_int); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_3);
+ __pyx_t_2 = __Pyx_GetAttrString(__pyx_t_3, "__doc__"); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_2);
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+ if (PyDict_SetItem(__pyx_t_1, ((PyObject *)__pyx_kp_u_39), __pyx_t_2) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+ __pyx_t_2 = PyObject_GetAttr(__pyx_m, __pyx_n_s__decode_base128_int); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_2);
+ __pyx_t_3 = __Pyx_GetAttrString(__pyx_t_2, "__doc__"); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_3);
+ __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+ if (PyDict_SetItem(__pyx_t_1, ((PyObject *)__pyx_kp_u_40), __pyx_t_3) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+ if (PyObject_SetAttr(__pyx_m, __pyx_n_s____test__, ((PyObject *)__pyx_t_1)) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_DECREF(((PyObject *)__pyx_t_1)); __pyx_t_1 = 0;
+ goto __pyx_L0;
+ __pyx_L1_error:;
+ __Pyx_XDECREF(__pyx_t_1);
+ __Pyx_XDECREF(__pyx_t_2);
+ __Pyx_XDECREF(__pyx_t_3);
+ if (__pyx_m) {
+ __Pyx_AddTraceback("init bzrlib._groupcompress_pyx");
+ Py_DECREF(__pyx_m); __pyx_m = 0;
+ } else if (!PyErr_Occurred()) {
+ PyErr_SetString(PyExc_ImportError, "init bzrlib._groupcompress_pyx");
+ }
+ __pyx_L0:;
+ __Pyx_RefNannyFinishContext();
+ #if PY_MAJOR_VERSION < 3
+ return;
+ #else
+ return __pyx_m;
+ #endif
+}
+
+/* Runtime support code */
+
+static PyObject *__Pyx_GetName(PyObject *dict, PyObject *name) {
+ PyObject *result;
+ result = PyObject_GetAttr(dict, name);
+ if (!result)
+ PyErr_SetObject(PyExc_NameError, name);
+ return result;
+}
+
+static void __Pyx_RaiseDoubleKeywordsError(
+ const char* func_name,
+ PyObject* kw_name)
+{
+ PyErr_Format(PyExc_TypeError,
+ #if PY_MAJOR_VERSION >= 3
+ "%s() got multiple values for keyword argument '%U'", func_name, kw_name);
+ #else
+ "%s() got multiple values for keyword argument '%s'", func_name,
+ PyString_AS_STRING(kw_name));
+ #endif
+}
+
+static void __Pyx_RaiseArgtupleInvalid(
+ const char* func_name,
+ int exact,
+ Py_ssize_t num_min,
+ Py_ssize_t num_max,
+ Py_ssize_t num_found)
+{
+ Py_ssize_t num_expected;
+ const char *number, *more_or_less;
+
+ if (num_found < num_min) {
+ num_expected = num_min;
+ more_or_less = "at least";
+ } else {
+ num_expected = num_max;
+ more_or_less = "at most";
+ }
+ if (exact) {
+ more_or_less = "exactly";
+ }
+ number = (num_expected == 1) ? "" : "s";
+ PyErr_Format(PyExc_TypeError,
+ #if PY_VERSION_HEX < 0x02050000
+ "%s() takes %s %d positional argument%s (%d given)",
+ #else
+ "%s() takes %s %zd positional argument%s (%zd given)",
+ #endif
+ func_name, more_or_less, num_expected, number, num_found);
+}
+
+static int __Pyx_ParseOptionalKeywords(
+ PyObject *kwds,
+ PyObject **argnames[],
+ PyObject *kwds2,
+ PyObject *values[],
+ Py_ssize_t num_pos_args,
+ const char* function_name)
+{
+ PyObject *key = 0, *value = 0;
+ Py_ssize_t pos = 0;
+ PyObject*** name;
+ PyObject*** first_kw_arg = argnames + num_pos_args;
+
+ while (PyDict_Next(kwds, &pos, &key, &value)) {
+ name = first_kw_arg;
+ while (*name && (**name != key)) name++;
+ if (*name) {
+ values[name-argnames] = value;
+ } else {
+ #if PY_MAJOR_VERSION < 3
+ if (unlikely(!PyString_CheckExact(key)) && unlikely(!PyString_Check(key))) {
+ #else
+ if (unlikely(!PyUnicode_CheckExact(key)) && unlikely(!PyUnicode_Check(key))) {
+ #endif
+ goto invalid_keyword_type;
+ } else {
+ for (name = first_kw_arg; *name; name++) {
+ #if PY_MAJOR_VERSION >= 3
+ if (PyUnicode_GET_SIZE(**name) == PyUnicode_GET_SIZE(key) &&
+ PyUnicode_Compare(**name, key) == 0) break;
+ #else
+ if (PyString_GET_SIZE(**name) == PyString_GET_SIZE(key) &&
+ _PyString_Eq(**name, key)) break;
+ #endif
+ }
+ if (*name) {
+ values[name-argnames] = value;
+ } else {
+ /* unexpected keyword found */
+ for (name=argnames; name != first_kw_arg; name++) {
+ if (**name == key) goto arg_passed_twice;
+ #if PY_MAJOR_VERSION >= 3
+ if (PyUnicode_GET_SIZE(**name) == PyUnicode_GET_SIZE(key) &&
+ PyUnicode_Compare(**name, key) == 0) goto arg_passed_twice;
+ #else
+ if (PyString_GET_SIZE(**name) == PyString_GET_SIZE(key) &&
+ _PyString_Eq(**name, key)) goto arg_passed_twice;
+ #endif
+ }
+ if (kwds2) {
+ if (unlikely(PyDict_SetItem(kwds2, key, value))) goto bad;
+ } else {
+ goto invalid_keyword;
+ }
+ }
+ }
+ }
+ }
+ return 0;
+arg_passed_twice:
+ __Pyx_RaiseDoubleKeywordsError(function_name, **name);
+ goto bad;
+invalid_keyword_type:
+ PyErr_Format(PyExc_TypeError,
+ "%s() keywords must be strings", function_name);
+ goto bad;
+invalid_keyword:
+ PyErr_Format(PyExc_TypeError,
+ #if PY_MAJOR_VERSION < 3
+ "%s() got an unexpected keyword argument '%s'",
+ function_name, PyString_AsString(key));
+ #else
+ "%s() got an unexpected keyword argument '%U'",
+ function_name, key);
+ #endif
+bad:
+ return -1;
+}
+
+static CYTHON_INLINE void __Pyx_ErrRestore(PyObject *type, PyObject *value, PyObject *tb) {
+ PyObject *tmp_type, *tmp_value, *tmp_tb;
+ PyThreadState *tstate = PyThreadState_GET();
+
+ tmp_type = tstate->curexc_type;
+ tmp_value = tstate->curexc_value;
+ tmp_tb = tstate->curexc_traceback;
+ tstate->curexc_type = type;
+ tstate->curexc_value = value;
+ tstate->curexc_traceback = tb;
+ Py_XDECREF(tmp_type);
+ Py_XDECREF(tmp_value);
+ Py_XDECREF(tmp_tb);
+}
+
+static CYTHON_INLINE void __Pyx_ErrFetch(PyObject **type, PyObject **value, PyObject **tb) {
+ PyThreadState *tstate = PyThreadState_GET();
+ *type = tstate->curexc_type;
+ *value = tstate->curexc_value;
+ *tb = tstate->curexc_traceback;
+
+ tstate->curexc_type = 0;
+ tstate->curexc_value = 0;
+ tstate->curexc_traceback = 0;
+}
+
+
+#if PY_MAJOR_VERSION < 3
+static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb) {
+ Py_XINCREF(type);
+ Py_XINCREF(value);
+ Py_XINCREF(tb);
+ /* First, check the traceback argument, replacing None with NULL. */
+ if (tb == Py_None) {
+ Py_DECREF(tb);
+ tb = 0;
+ }
+ else if (tb != NULL && !PyTraceBack_Check(tb)) {
+ PyErr_SetString(PyExc_TypeError,
+ "raise: arg 3 must be a traceback or None");
+ goto raise_error;
+ }
+ /* Next, replace a missing value with None */
+ if (value == NULL) {
+ value = Py_None;
+ Py_INCREF(value);
+ }
+ #if PY_VERSION_HEX < 0x02050000
+ if (!PyClass_Check(type))
+ #else
+ if (!PyType_Check(type))
+ #endif
+ {
+ /* Raising an instance. The value should be a dummy. */
+ if (value != Py_None) {
+ PyErr_SetString(PyExc_TypeError,
+ "instance exception may not have a separate value");
+ goto raise_error;
+ }
+ /* Normalize to raise <class>, <instance> */
+ Py_DECREF(value);
+ value = type;
+ #if PY_VERSION_HEX < 0x02050000
+ if (PyInstance_Check(type)) {
+ type = (PyObject*) ((PyInstanceObject*)type)->in_class;
+ Py_INCREF(type);
+ }
+ else {
+ type = 0;
+ PyErr_SetString(PyExc_TypeError,
+ "raise: exception must be an old-style class or instance");
+ goto raise_error;
+ }
+ #else
+ type = (PyObject*) Py_TYPE(type);
+ Py_INCREF(type);
+ if (!PyType_IsSubtype((PyTypeObject *)type, (PyTypeObject *)PyExc_BaseException)) {
+ PyErr_SetString(PyExc_TypeError,
+ "raise: exception class must be a subclass of BaseException");
+ goto raise_error;
+ }
+ #endif
+ }
+
+ __Pyx_ErrRestore(type, value, tb);
+ return;
+raise_error:
+ Py_XDECREF(value);
+ Py_XDECREF(type);
+ Py_XDECREF(tb);
+ return;
+}
+
+#else /* Python 3+ */
+
+static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb) {
+ if (tb == Py_None) {
+ tb = 0;
+ } else if (tb && !PyTraceBack_Check(tb)) {
+ PyErr_SetString(PyExc_TypeError,
+ "raise: arg 3 must be a traceback or None");
+ goto bad;
+ }
+ if (value == Py_None)
+ value = 0;
+
+ if (PyExceptionInstance_Check(type)) {
+ if (value) {
+ PyErr_SetString(PyExc_TypeError,
+ "instance exception may not have a separate value");
+ goto bad;
+ }
+ value = type;
+ type = (PyObject*) Py_TYPE(value);
+ } else if (!PyExceptionClass_Check(type)) {
+ PyErr_SetString(PyExc_TypeError,
+ "raise: exception class must be a subclass of BaseException");
+ goto bad;
+ }
+
+ PyErr_SetObject(type, value);
+
+ if (tb) {
+ PyThreadState *tstate = PyThreadState_GET();
+ PyObject* tmp_tb = tstate->curexc_traceback;
+ if (tb != tmp_tb) {
+ Py_INCREF(tb);
+ tstate->curexc_traceback = tb;
+ Py_XDECREF(tmp_tb);
+ }
+ }
+
+bad:
+ return;
+}
+#endif
+
+static CYTHON_INLINE unsigned char __Pyx_PyInt_AsUnsignedChar(PyObject* x) {
+ const unsigned char neg_one = (unsigned char)-1, const_zero = 0;
+ const int is_unsigned = neg_one > const_zero;
+ if (sizeof(unsigned char) < sizeof(long)) {
+ long val = __Pyx_PyInt_AsLong(x);
+ if (unlikely(val != (long)(unsigned char)val)) {
+ if (!unlikely(val == -1 && PyErr_Occurred())) {
+ PyErr_SetString(PyExc_OverflowError,
+ (is_unsigned && unlikely(val < 0)) ?
+ "can't convert negative value to unsigned char" :
+ "value too large to convert to unsigned char");
+ }
+ return (unsigned char)-1;
+ }
+ return (unsigned char)val;
+ }
+ return (unsigned char)__Pyx_PyInt_AsUnsignedLong(x);
+}
+
+static CYTHON_INLINE unsigned short __Pyx_PyInt_AsUnsignedShort(PyObject* x) {
+ const unsigned short neg_one = (unsigned short)-1, const_zero = 0;
+ const int is_unsigned = neg_one > const_zero;
+ if (sizeof(unsigned short) < sizeof(long)) {
+ long val = __Pyx_PyInt_AsLong(x);
+ if (unlikely(val != (long)(unsigned short)val)) {
+ if (!unlikely(val == -1 && PyErr_Occurred())) {
+ PyErr_SetString(PyExc_OverflowError,
+ (is_unsigned && unlikely(val < 0)) ?
+ "can't convert negative value to unsigned short" :
+ "value too large to convert to unsigned short");
+ }
+ return (unsigned short)-1;
+ }
+ return (unsigned short)val;
+ }
+ return (unsigned short)__Pyx_PyInt_AsUnsignedLong(x);
+}
+
+static CYTHON_INLINE unsigned int __Pyx_PyInt_AsUnsignedInt(PyObject* x) {
+ const unsigned int neg_one = (unsigned int)-1, const_zero = 0;
+ const int is_unsigned = neg_one > const_zero;
+ if (sizeof(unsigned int) < sizeof(long)) {
+ long val = __Pyx_PyInt_AsLong(x);
+ if (unlikely(val != (long)(unsigned int)val)) {
+ if (!unlikely(val == -1 && PyErr_Occurred())) {
+ PyErr_SetString(PyExc_OverflowError,
+ (is_unsigned && unlikely(val < 0)) ?
+ "can't convert negative value to unsigned int" :
+ "value too large to convert to unsigned int");
+ }
+ return (unsigned int)-1;
+ }
+ return (unsigned int)val;
+ }
+ return (unsigned int)__Pyx_PyInt_AsUnsignedLong(x);
+}
+
+static CYTHON_INLINE char __Pyx_PyInt_AsChar(PyObject* x) {
+ const char neg_one = (char)-1, const_zero = 0;
+ const int is_unsigned = neg_one > const_zero;
+ if (sizeof(char) < sizeof(long)) {
+ long val = __Pyx_PyInt_AsLong(x);
+ if (unlikely(val != (long)(char)val)) {
+ if (!unlikely(val == -1 && PyErr_Occurred())) {
+ PyErr_SetString(PyExc_OverflowError,
+ (is_unsigned && unlikely(val < 0)) ?
+ "can't convert negative value to char" :
+ "value too large to convert to char");
+ }
+ return (char)-1;
+ }
+ return (char)val;
+ }
+ return (char)__Pyx_PyInt_AsLong(x);
+}
+
+static CYTHON_INLINE short __Pyx_PyInt_AsShort(PyObject* x) {
+ const short neg_one = (short)-1, const_zero = 0;
+ const int is_unsigned = neg_one > const_zero;
+ if (sizeof(short) < sizeof(long)) {
+ long val = __Pyx_PyInt_AsLong(x);
+ if (unlikely(val != (long)(short)val)) {
+ if (!unlikely(val == -1 && PyErr_Occurred())) {
+ PyErr_SetString(PyExc_OverflowError,
+ (is_unsigned && unlikely(val < 0)) ?
+ "can't convert negative value to short" :
+ "value too large to convert to short");
+ }
+ return (short)-1;
+ }
+ return (short)val;
+ }
+ return (short)__Pyx_PyInt_AsLong(x);
+}
+
+static CYTHON_INLINE int __Pyx_PyInt_AsInt(PyObject* x) {
+ const int neg_one = (int)-1, const_zero = 0;
+ const int is_unsigned = neg_one > const_zero;
+ if (sizeof(int) < sizeof(long)) {
+ long val = __Pyx_PyInt_AsLong(x);
+ if (unlikely(val != (long)(int)val)) {
+ if (!unlikely(val == -1 && PyErr_Occurred())) {
+ PyErr_SetString(PyExc_OverflowError,
+ (is_unsigned && unlikely(val < 0)) ?
+ "can't convert negative value to int" :
+ "value too large to convert to int");
+ }
+ return (int)-1;
+ }
+ return (int)val;
+ }
+ return (int)__Pyx_PyInt_AsLong(x);
+}
+
+static CYTHON_INLINE signed char __Pyx_PyInt_AsSignedChar(PyObject* x) {
+ const signed char neg_one = (signed char)-1, const_zero = 0;
+ const int is_unsigned = neg_one > const_zero;
+ if (sizeof(signed char) < sizeof(long)) {
+ long val = __Pyx_PyInt_AsLong(x);
+ if (unlikely(val != (long)(signed char)val)) {
+ if (!unlikely(val == -1 && PyErr_Occurred())) {
+ PyErr_SetString(PyExc_OverflowError,
+ (is_unsigned && unlikely(val < 0)) ?
+ "can't convert negative value to signed char" :
+ "value too large to convert to signed char");
+ }
+ return (signed char)-1;
+ }
+ return (signed char)val;
+ }
+ return (signed char)__Pyx_PyInt_AsSignedLong(x);
+}
+
+static CYTHON_INLINE signed short __Pyx_PyInt_AsSignedShort(PyObject* x) {
+ const signed short neg_one = (signed short)-1, const_zero = 0;
+ const int is_unsigned = neg_one > const_zero;
+ if (sizeof(signed short) < sizeof(long)) {
+ long val = __Pyx_PyInt_AsLong(x);
+ if (unlikely(val != (long)(signed short)val)) {
+ if (!unlikely(val == -1 && PyErr_Occurred())) {
+ PyErr_SetString(PyExc_OverflowError,
+ (is_unsigned && unlikely(val < 0)) ?
+ "can't convert negative value to signed short" :
+ "value too large to convert to signed short");
+ }
+ return (signed short)-1;
+ }
+ return (signed short)val;
+ }
+ return (signed short)__Pyx_PyInt_AsSignedLong(x);
+}
+
+static CYTHON_INLINE signed int __Pyx_PyInt_AsSignedInt(PyObject* x) {
+ const signed int neg_one = (signed int)-1, const_zero = 0;
+ const int is_unsigned = neg_one > const_zero;
+ if (sizeof(signed int) < sizeof(long)) {
+ long val = __Pyx_PyInt_AsLong(x);
+ if (unlikely(val != (long)(signed int)val)) {
+ if (!unlikely(val == -1 && PyErr_Occurred())) {
+ PyErr_SetString(PyExc_OverflowError,
+ (is_unsigned && unlikely(val < 0)) ?
+ "can't convert negative value to signed int" :
+ "value too large to convert to signed int");
+ }
+ return (signed int)-1;
+ }
+ return (signed int)val;
+ }
+ return (signed int)__Pyx_PyInt_AsSignedLong(x);
+}
+
+static CYTHON_INLINE int __Pyx_PyInt_AsLongDouble(PyObject* x) {
+ const int neg_one = (int)-1, const_zero = 0;
+ const int is_unsigned = neg_one > const_zero;
+ if (sizeof(int) < sizeof(long)) {
+ long val = __Pyx_PyInt_AsLong(x);
+ if (unlikely(val != (long)(int)val)) {
+ if (!unlikely(val == -1 && PyErr_Occurred())) {
+ PyErr_SetString(PyExc_OverflowError,
+ (is_unsigned && unlikely(val < 0)) ?
+ "can't convert negative value to int" :
+ "value too large to convert to int");
+ }
+ return (int)-1;
+ }
+ return (int)val;
+ }
+ return (int)__Pyx_PyInt_AsLong(x);
+}
+
+static CYTHON_INLINE unsigned long __Pyx_PyInt_AsUnsignedLong(PyObject* x) {
+ const unsigned long neg_one = (unsigned long)-1, const_zero = 0;
+ const int is_unsigned = neg_one > const_zero;
+#if PY_VERSION_HEX < 0x03000000
+ if (likely(PyInt_Check(x))) {
+ long val = PyInt_AS_LONG(x);
+ if (is_unsigned && unlikely(val < 0)) {
+ PyErr_SetString(PyExc_OverflowError,
+ "can't convert negative value to unsigned long");
+ return (unsigned long)-1;
+ }
+ return (unsigned long)val;
+ } else
+#endif
+ if (likely(PyLong_Check(x))) {
+ if (is_unsigned) {
+ if (unlikely(Py_SIZE(x) < 0)) {
+ PyErr_SetString(PyExc_OverflowError,
+ "can't convert negative value to unsigned long");
+ return (unsigned long)-1;
+ }
+ return PyLong_AsUnsignedLong(x);
+ } else {
+ return PyLong_AsLong(x);
+ }
+ } else {
+ unsigned long val;
+ PyObject *tmp = __Pyx_PyNumber_Int(x);
+ if (!tmp) return (unsigned long)-1;
+ val = __Pyx_PyInt_AsUnsignedLong(tmp);
+ Py_DECREF(tmp);
+ return val;
+ }
+}
+
+static CYTHON_INLINE unsigned PY_LONG_LONG __Pyx_PyInt_AsUnsignedLongLong(PyObject* x) {
+ const unsigned PY_LONG_LONG neg_one = (unsigned PY_LONG_LONG)-1, const_zero = 0;
+ const int is_unsigned = neg_one > const_zero;
+#if PY_VERSION_HEX < 0x03000000
+ if (likely(PyInt_Check(x))) {
+ long val = PyInt_AS_LONG(x);
+ if (is_unsigned && unlikely(val < 0)) {
+ PyErr_SetString(PyExc_OverflowError,
+ "can't convert negative value to unsigned PY_LONG_LONG");
+ return (unsigned PY_LONG_LONG)-1;
+ }
+ return (unsigned PY_LONG_LONG)val;
+ } else
+#endif
+ if (likely(PyLong_Check(x))) {
+ if (is_unsigned) {
+ if (unlikely(Py_SIZE(x) < 0)) {
+ PyErr_SetString(PyExc_OverflowError,
+ "can't convert negative value to unsigned PY_LONG_LONG");
+ return (unsigned PY_LONG_LONG)-1;
+ }
+ return PyLong_AsUnsignedLongLong(x);
+ } else {
+ return PyLong_AsLongLong(x);
+ }
+ } else {
+ unsigned PY_LONG_LONG val;
+ PyObject *tmp = __Pyx_PyNumber_Int(x);
+ if (!tmp) return (unsigned PY_LONG_LONG)-1;
+ val = __Pyx_PyInt_AsUnsignedLongLong(tmp);
+ Py_DECREF(tmp);
+ return val;
+ }
+}
+
+static CYTHON_INLINE long __Pyx_PyInt_AsLong(PyObject* x) {
+ const long neg_one = (long)-1, const_zero = 0;
+ const int is_unsigned = neg_one > const_zero;
+#if PY_VERSION_HEX < 0x03000000
+ if (likely(PyInt_Check(x))) {
+ long val = PyInt_AS_LONG(x);
+ if (is_unsigned && unlikely(val < 0)) {
+ PyErr_SetString(PyExc_OverflowError,
+ "can't convert negative value to long");
+ return (long)-1;
+ }
+ return (long)val;
+ } else
+#endif
+ if (likely(PyLong_Check(x))) {
+ if (is_unsigned) {
+ if (unlikely(Py_SIZE(x) < 0)) {
+ PyErr_SetString(PyExc_OverflowError,
+ "can't convert negative value to long");
+ return (long)-1;
+ }
+ return PyLong_AsUnsignedLong(x);
+ } else {
+ return PyLong_AsLong(x);
+ }
+ } else {
+ long val;
+ PyObject *tmp = __Pyx_PyNumber_Int(x);
+ if (!tmp) return (long)-1;
+ val = __Pyx_PyInt_AsLong(tmp);
+ Py_DECREF(tmp);
+ return val;
+ }
+}
+
+static CYTHON_INLINE PY_LONG_LONG __Pyx_PyInt_AsLongLong(PyObject* x) {
+ const PY_LONG_LONG neg_one = (PY_LONG_LONG)-1, const_zero = 0;
+ const int is_unsigned = neg_one > const_zero;
+#if PY_VERSION_HEX < 0x03000000
+ if (likely(PyInt_Check(x))) {
+ long val = PyInt_AS_LONG(x);
+ if (is_unsigned && unlikely(val < 0)) {
+ PyErr_SetString(PyExc_OverflowError,
+ "can't convert negative value to PY_LONG_LONG");
+ return (PY_LONG_LONG)-1;
+ }
+ return (PY_LONG_LONG)val;
+ } else
+#endif
+ if (likely(PyLong_Check(x))) {
+ if (is_unsigned) {
+ if (unlikely(Py_SIZE(x) < 0)) {
+ PyErr_SetString(PyExc_OverflowError,
+ "can't convert negative value to PY_LONG_LONG");
+ return (PY_LONG_LONG)-1;
+ }
+ return PyLong_AsUnsignedLongLong(x);
+ } else {
+ return PyLong_AsLongLong(x);
+ }
+ } else {
+ PY_LONG_LONG val;
+ PyObject *tmp = __Pyx_PyNumber_Int(x);
+ if (!tmp) return (PY_LONG_LONG)-1;
+ val = __Pyx_PyInt_AsLongLong(tmp);
+ Py_DECREF(tmp);
+ return val;
+ }
+}
+
+static CYTHON_INLINE signed long __Pyx_PyInt_AsSignedLong(PyObject* x) {
+ const signed long neg_one = (signed long)-1, const_zero = 0;
+ const int is_unsigned = neg_one > const_zero;
+#if PY_VERSION_HEX < 0x03000000
+ if (likely(PyInt_Check(x))) {
+ long val = PyInt_AS_LONG(x);
+ if (is_unsigned && unlikely(val < 0)) {
+ PyErr_SetString(PyExc_OverflowError,
+ "can't convert negative value to signed long");
+ return (signed long)-1;
+ }
+ return (signed long)val;
+ } else
+#endif
+ if (likely(PyLong_Check(x))) {
+ if (is_unsigned) {
+ if (unlikely(Py_SIZE(x) < 0)) {
+ PyErr_SetString(PyExc_OverflowError,
+ "can't convert negative value to signed long");
+ return (signed long)-1;
+ }
+ return PyLong_AsUnsignedLong(x);
+ } else {
+ return PyLong_AsLong(x);
+ }
+ } else {
+ signed long val;
+ PyObject *tmp = __Pyx_PyNumber_Int(x);
+ if (!tmp) return (signed long)-1;
+ val = __Pyx_PyInt_AsSignedLong(tmp);
+ Py_DECREF(tmp);
+ return val;
+ }
+}
+
+static CYTHON_INLINE signed PY_LONG_LONG __Pyx_PyInt_AsSignedLongLong(PyObject* x) {
+ const signed PY_LONG_LONG neg_one = (signed PY_LONG_LONG)-1, const_zero = 0;
+ const int is_unsigned = neg_one > const_zero;
+#if PY_VERSION_HEX < 0x03000000
+ if (likely(PyInt_Check(x))) {
+ long val = PyInt_AS_LONG(x);
+ if (is_unsigned && unlikely(val < 0)) {
+ PyErr_SetString(PyExc_OverflowError,
+ "can't convert negative value to signed PY_LONG_LONG");
+ return (signed PY_LONG_LONG)-1;
+ }
+ return (signed PY_LONG_LONG)val;
+ } else
+#endif
+ if (likely(PyLong_Check(x))) {
+ if (is_unsigned) {
+ if (unlikely(Py_SIZE(x) < 0)) {
+ PyErr_SetString(PyExc_OverflowError,
+ "can't convert negative value to signed PY_LONG_LONG");
+ return (signed PY_LONG_LONG)-1;
+ }
+ return PyLong_AsUnsignedLongLong(x);
+ } else {
+ return PyLong_AsLongLong(x);
+ }
+ } else {
+ signed PY_LONG_LONG val;
+ PyObject *tmp = __Pyx_PyNumber_Int(x);
+ if (!tmp) return (signed PY_LONG_LONG)-1;
+ val = __Pyx_PyInt_AsSignedLongLong(tmp);
+ Py_DECREF(tmp);
+ return val;
+ }
+}
+
+static int __Pyx_SetVtable(PyObject *dict, void *vtable) {
+#if PY_VERSION_HEX >= 0x02070000 && !(PY_MAJOR_VERSION==3&&PY_MINOR_VERSION==0)
+ PyObject *ob = PyCapsule_New(vtable, 0, 0);
+#else
+ PyObject *ob = PyCObject_FromVoidPtr(vtable, 0);
+#endif
+ if (!ob)
+ goto bad;
+ if (PyDict_SetItemString(dict, "__pyx_vtable__", ob) < 0)
+ goto bad;
+ Py_DECREF(ob);
+ return 0;
+bad:
+ Py_XDECREF(ob);
+ return -1;
+}
+
+#include "compile.h"
+#include "frameobject.h"
+#include "traceback.h"
+
+static void __Pyx_AddTraceback(const char *funcname) {
+ PyObject *py_srcfile = 0;
+ PyObject *py_funcname = 0;
+ PyObject *py_globals = 0;
+ PyCodeObject *py_code = 0;
+ PyFrameObject *py_frame = 0;
+
+ #if PY_MAJOR_VERSION < 3
+ py_srcfile = PyString_FromString(__pyx_filename);
+ #else
+ py_srcfile = PyUnicode_FromString(__pyx_filename);
+ #endif
+ if (!py_srcfile) goto bad;
+ if (__pyx_clineno) {
+ #if PY_MAJOR_VERSION < 3
+ py_funcname = PyString_FromFormat( "%s (%s:%d)", funcname, __pyx_cfilenm, __pyx_clineno);
+ #else
+ py_funcname = PyUnicode_FromFormat( "%s (%s:%d)", funcname, __pyx_cfilenm, __pyx_clineno);
+ #endif
+ }
+ else {
+ #if PY_MAJOR_VERSION < 3
+ py_funcname = PyString_FromString(funcname);
+ #else
+ py_funcname = PyUnicode_FromString(funcname);
+ #endif
+ }
+ if (!py_funcname) goto bad;
+ py_globals = PyModule_GetDict(__pyx_m);
+ if (!py_globals) goto bad;
+ py_code = PyCode_New(
+ 0, /*int argcount,*/
+ #if PY_MAJOR_VERSION >= 3
+ 0, /*int kwonlyargcount,*/
+ #endif
+ 0, /*int nlocals,*/
+ 0, /*int stacksize,*/
+ 0, /*int flags,*/
+ __pyx_empty_bytes, /*PyObject *code,*/
+ __pyx_empty_tuple, /*PyObject *consts,*/
+ __pyx_empty_tuple, /*PyObject *names,*/
+ __pyx_empty_tuple, /*PyObject *varnames,*/
+ __pyx_empty_tuple, /*PyObject *freevars,*/
+ __pyx_empty_tuple, /*PyObject *cellvars,*/
+ py_srcfile, /*PyObject *filename,*/
+ py_funcname, /*PyObject *name,*/
+ __pyx_lineno, /*int firstlineno,*/
+ __pyx_empty_bytes /*PyObject *lnotab*/
+ );
+ if (!py_code) goto bad;
+ py_frame = PyFrame_New(
+ PyThreadState_GET(), /*PyThreadState *tstate,*/
+ py_code, /*PyCodeObject *code,*/
+ py_globals, /*PyObject *globals,*/
+ 0 /*PyObject *locals*/
+ );
+ if (!py_frame) goto bad;
+ py_frame->f_lineno = __pyx_lineno;
+ PyTraceBack_Here(py_frame);
+bad:
+ Py_XDECREF(py_srcfile);
+ Py_XDECREF(py_funcname);
+ Py_XDECREF(py_code);
+ Py_XDECREF(py_frame);
+}
+
+static int __Pyx_InitStrings(__Pyx_StringTabEntry *t) {
+ while (t->p) {
+ #if PY_MAJOR_VERSION < 3
+ if (t->is_unicode) {
+ *t->p = PyUnicode_DecodeUTF8(t->s, t->n - 1, NULL);
+ } else if (t->intern) {
+ *t->p = PyString_InternFromString(t->s);
+ } else {
+ *t->p = PyString_FromStringAndSize(t->s, t->n - 1);
+ }
+ #else /* Python 3+ has unicode identifiers */
+ if (t->is_unicode | t->is_str) {
+ if (t->intern) {
+ *t->p = PyUnicode_InternFromString(t->s);
+ } else if (t->encoding) {
+ *t->p = PyUnicode_Decode(t->s, t->n - 1, t->encoding, NULL);
+ } else {
+ *t->p = PyUnicode_FromStringAndSize(t->s, t->n - 1);
+ }
+ } else {
+ *t->p = PyBytes_FromStringAndSize(t->s, t->n - 1);
+ }
+ #endif
+ if (!*t->p)
+ return -1;
+ ++t;
+ }
+ return 0;
+}
+
+/* Type Conversion Functions */
+
+static CYTHON_INLINE int __Pyx_PyObject_IsTrue(PyObject* x) {
+ int is_true = x == Py_True;
+ if (is_true | (x == Py_False) | (x == Py_None)) return is_true;
+ else return PyObject_IsTrue(x);
+}
+
+static CYTHON_INLINE PyObject* __Pyx_PyNumber_Int(PyObject* x) {
+ PyNumberMethods *m;
+ const char *name = NULL;
+ PyObject *res = NULL;
+#if PY_VERSION_HEX < 0x03000000
+ if (PyInt_Check(x) || PyLong_Check(x))
+#else
+ if (PyLong_Check(x))
+#endif
+ return Py_INCREF(x), x;
+ m = Py_TYPE(x)->tp_as_number;
+#if PY_VERSION_HEX < 0x03000000
+ if (m && m->nb_int) {
+ name = "int";
+ res = PyNumber_Int(x);
+ }
+ else if (m && m->nb_long) {
+ name = "long";
+ res = PyNumber_Long(x);
+ }
+#else
+ if (m && m->nb_int) {
+ name = "int";
+ res = PyNumber_Long(x);
+ }
+#endif
+ if (res) {
+#if PY_VERSION_HEX < 0x03000000
+ if (!PyInt_Check(res) && !PyLong_Check(res)) {
+#else
+ if (!PyLong_Check(res)) {
+#endif
+ PyErr_Format(PyExc_TypeError,
+ "__%s__ returned non-%s (type %.200s)",
+ name, name, Py_TYPE(res)->tp_name);
+ Py_DECREF(res);
+ return NULL;
+ }
+ }
+ else if (!PyErr_Occurred()) {
+ PyErr_SetString(PyExc_TypeError,
+ "an integer is required");
+ }
+ return res;
+}
+
+static CYTHON_INLINE Py_ssize_t __Pyx_PyIndex_AsSsize_t(PyObject* b) {
+ Py_ssize_t ival;
+ PyObject* x = PyNumber_Index(b);
+ if (!x) return -1;
+ ival = PyInt_AsSsize_t(x);
+ Py_DECREF(x);
+ return ival;
+}
+
+static CYTHON_INLINE PyObject * __Pyx_PyInt_FromSize_t(size_t ival) {
+#if PY_VERSION_HEX < 0x02050000
+ if (ival <= LONG_MAX)
+ return PyInt_FromLong((long)ival);
+ else {
+ unsigned char *bytes = (unsigned char *) &ival;
+ int one = 1; int little = (int)*(unsigned char*)&one;
+ return _PyLong_FromByteArray(bytes, sizeof(size_t), little, 0);
+ }
+#else
+ return PyInt_FromSize_t(ival);
+#endif
+}
+
+static CYTHON_INLINE size_t __Pyx_PyInt_AsSize_t(PyObject* x) {
+ unsigned PY_LONG_LONG val = __Pyx_PyInt_AsUnsignedLongLong(x);
+ if (unlikely(val == (unsigned PY_LONG_LONG)-1 && PyErr_Occurred())) {
+ return (size_t)-1;
+ } else if (unlikely(val != (unsigned PY_LONG_LONG)(size_t)val)) {
+ PyErr_SetString(PyExc_OverflowError,
+ "value too large to convert to size_t");
+ return (size_t)-1;
+ }
+ return (size_t)val;
+}
+
+
+#endif /* Py_PYTHON_H */
diff --git a/bzrlib/_groupcompress_pyx.pyx b/bzrlib/_groupcompress_pyx.pyx
new file mode 100644
index 0000000..a283554
--- /dev/null
+++ b/bzrlib/_groupcompress_pyx.pyx
@@ -0,0 +1,600 @@
+# Copyright (C) 2008, 2009, 2010 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""Compiled extensions for doing compression."""
+
+#python2.4 support
+cdef extern from "python-compat.h":
+ pass
+
+
+cdef extern from "Python.h":
+ ctypedef struct PyObject:
+ pass
+ ctypedef int Py_ssize_t # Required for older pyrex versions
+ int PyString_CheckExact(object)
+ char * PyString_AS_STRING(object)
+ Py_ssize_t PyString_GET_SIZE(object)
+ object PyString_FromStringAndSize(char *, Py_ssize_t)
+
+
+cdef extern from *:
+ ctypedef unsigned long size_t
+ void * malloc(size_t) nogil
+ void * realloc(void *, size_t) nogil
+ void free(void *) nogil
+ void memcpy(void *, void *, size_t) nogil
+
+
+cdef extern from "delta.h":
+ struct source_info:
+ void *buf
+ unsigned long size
+ unsigned long agg_offset
+ struct delta_index:
+ pass
+ ctypedef enum delta_result:
+ DELTA_OK
+ DELTA_OUT_OF_MEMORY
+ DELTA_INDEX_NEEDED
+ DELTA_SOURCE_EMPTY
+ DELTA_SOURCE_BAD
+ DELTA_BUFFER_EMPTY
+ DELTA_SIZE_TOO_BIG
+ delta_result create_delta_index(source_info *src,
+ delta_index *old,
+ delta_index **fresh,
+ int max_entries) nogil
+ delta_result create_delta_index_from_delta(source_info *delta,
+ delta_index *old,
+ delta_index **fresh) nogil
+ void free_delta_index(delta_index *index) nogil
+ delta_result create_delta(delta_index *indexes,
+ void *buf, unsigned long bufsize,
+ unsigned long *delta_size,
+ unsigned long max_delta_size,
+ void **delta_data) nogil
+ unsigned long get_delta_hdr_size(unsigned char **datap,
+ unsigned char *top) nogil
+ unsigned long sizeof_delta_index(delta_index *index)
+ Py_ssize_t DELTA_SIZE_MIN
+ int get_hash_offset(delta_index *index, int pos, unsigned int *hash_offset)
+ int get_entry_summary(delta_index *index, int pos,
+ unsigned int *global_offset, unsigned int *hash_val)
+ unsigned int rabin_hash (unsigned char *data)
+
+
+cdef void *safe_malloc(size_t count) except NULL:
+ cdef void *result
+ result = malloc(count)
+ if result == NULL:
+ raise MemoryError('Failed to allocate %d bytes of memory' % (count,))
+ return result
+
+
+cdef void *safe_realloc(void * old, size_t count) except NULL:
+ cdef void *result
+ result = realloc(old, count)
+ if result == NULL:
+ raise MemoryError('Failed to reallocate to %d bytes of memory'
+ % (count,))
+ return result
+
+
+cdef int safe_free(void **val) except -1:
+ assert val != NULL
+ if val[0] != NULL:
+ free(val[0])
+ val[0] = NULL
+
+def make_delta_index(source):
+ return DeltaIndex(source)
+
+
+cdef object _translate_delta_failure(delta_result result):
+ if result == DELTA_OUT_OF_MEMORY:
+ return MemoryError("Delta function failed to allocate memory")
+ elif result == DELTA_INDEX_NEEDED:
+ return ValueError("Delta function requires delta_index param")
+ elif result == DELTA_SOURCE_EMPTY:
+ return ValueError("Delta function given empty source_info param")
+ elif result == DELTA_SOURCE_BAD:
+ return RuntimeError("Delta function given invalid source_info param")
+ elif result == DELTA_BUFFER_EMPTY:
+ return ValueError("Delta function given empty buffer params")
+ return AssertionError("Unrecognised delta result code: %d" % result)
+
+
+def _rabin_hash(content):
+ if not PyString_CheckExact(content):
+ raise ValueError('content must be a string')
+ if len(content) < 16:
+ raise ValueError('content must be at least 16 bytes long')
+ # Try to cast it to an int, if it can fit
+ return int(rabin_hash(<unsigned char*>(PyString_AS_STRING(content))))
+
+
+cdef class DeltaIndex:
+
+ # We need Pyrex 0.9.8+ to understand a 'list' definition, and this object
+ # isn't performance critical
+ # cdef readonly list _sources
+ cdef readonly object _sources
+ cdef source_info *_source_infos
+ cdef delta_index *_index
+ cdef public unsigned long _source_offset
+ cdef readonly unsigned int _max_num_sources
+ cdef public int _max_bytes_to_index
+
+ def __init__(self, source=None, max_bytes_to_index=None):
+ self._sources = []
+ self._index = NULL
+ self._max_num_sources = 65000
+ self._source_infos = <source_info *>safe_malloc(sizeof(source_info)
+ * self._max_num_sources)
+ self._source_offset = 0
+ self._max_bytes_to_index = 0
+ if max_bytes_to_index is not None:
+ self._max_bytes_to_index = max_bytes_to_index
+
+ if source is not None:
+ self.add_source(source, 0)
+
+ def __sizeof__(self):
+ # We want to track the _source_infos allocations, but the referenced
+ # void* are actually tracked in _sources itself.
+ # XXX: Cython is capable of doing sizeof(class) and returning the size
+ # of the underlying struct. Pyrex (<= 0.9.9) refuses, so we need
+ # to do it manually. *sigh* Note that we might get it wrong
+ # because of alignment issues.
+ cdef Py_ssize_t size
+ # PyObject start, vtable *, 3 object pointers, 2 C ints
+ size = ((sizeof(PyObject) + sizeof(void*) + 3*sizeof(PyObject*)
+ + sizeof(unsigned long)
+ + sizeof(unsigned int))
+ + (sizeof(source_info) * self._max_num_sources)
+ + sizeof_delta_index(self._index))
+ return size
+
+ def __repr__(self):
+ return '%s(%d, %d)' % (self.__class__.__name__,
+ len(self._sources), self._source_offset)
+
+ def __dealloc__(self):
+ if self._index != NULL:
+ free_delta_index(self._index)
+ self._index = NULL
+ safe_free(<void **>&self._source_infos)
+
+ def _has_index(self):
+ return (self._index != NULL)
+
+ def _dump_index(self):
+ """Dump the pointers in the index.
+
+ This is an arbitrary layout, used for testing. It is not meant to be
+ used in production code.
+
+ :return: (hash_list, entry_list)
+ hash_list A list of offsets, so hash[i] points to the 'hash
+ bucket' starting at the given offset and going until
+ hash[i+1]
+ entry_list A list of (text_offset, hash_val). text_offset is the
+ offset in the "source" texts, and hash_val is the RABIN
+ hash for that offset.
+ Note that the entry should be in the hash bucket
+ defined by
+ hash[(hash_val & mask)] && hash[(hash_val & mask) + 1]
+ """
+ cdef int pos
+ cdef unsigned int text_offset
+ cdef unsigned int hash_val
+ cdef unsigned int hash_offset
+ if self._index == NULL:
+ return None
+ hash_list = []
+ pos = 0
+ while get_hash_offset(self._index, pos, &hash_offset):
+ hash_list.append(int(hash_offset))
+ pos += 1
+ entry_list = []
+ pos = 0
+ while get_entry_summary(self._index, pos, &text_offset, &hash_val):
+ # Map back using 'int' so that we don't get Long everywhere, when
+ # almost everything is <2**31.
+ val = tuple(map(int, [text_offset, hash_val]))
+ entry_list.append(val)
+ pos += 1
+ return hash_list, entry_list
+
+ def add_delta_source(self, delta, unadded_bytes):
+ """Add a new delta to the source texts.
+
+ :param delta: The text of the delta, this must be a byte string.
+ :param unadded_bytes: Number of bytes that were added to the source
+ that were not indexed.
+ """
+ cdef char *c_delta
+ cdef Py_ssize_t c_delta_size
+ cdef delta_index *index
+ cdef delta_result res
+ cdef unsigned int source_location
+ cdef source_info *src
+ cdef unsigned int num_indexes
+
+ if not PyString_CheckExact(delta):
+ raise TypeError('delta is not a str')
+
+ source_location = len(self._sources)
+ if source_location >= self._max_num_sources:
+ self._expand_sources()
+ self._sources.append(delta)
+ c_delta = PyString_AS_STRING(delta)
+ c_delta_size = PyString_GET_SIZE(delta)
+ src = self._source_infos + source_location
+ src.buf = c_delta
+ src.size = c_delta_size
+ src.agg_offset = self._source_offset + unadded_bytes
+ with nogil:
+ res = create_delta_index_from_delta(src, self._index, &index)
+ if res != DELTA_OK:
+ raise _translate_delta_failure(res)
+ self._source_offset = src.agg_offset + src.size
+ if index != self._index:
+ free_delta_index(self._index)
+ self._index = index
+
+ def add_source(self, source, unadded_bytes):
+ """Add a new bit of source text to the delta indexes.
+
+ :param source: The text in question, this must be a byte string
+ :param unadded_bytes: Assume there are this many bytes that didn't get
+ added between this source and the end of the previous source.
+ :param max_pointers: Add no more than this many entries to the index.
+ By default, we sample every 16 bytes, if that would require more
+ than max_entries, we will reduce the sampling rate.
+ A value of 0 means unlimited, None means use the default limit.
+ """
+ cdef char *c_source
+ cdef Py_ssize_t c_source_size
+ cdef delta_index *index
+ cdef delta_result res
+ cdef unsigned int source_location
+ cdef source_info *src
+ cdef unsigned int num_indexes
+ cdef int max_num_entries
+
+ if not PyString_CheckExact(source):
+ raise TypeError('source is not a str')
+
+ source_location = len(self._sources)
+ if source_location >= self._max_num_sources:
+ self._expand_sources()
+ if source_location != 0 and self._index == NULL:
+ # We were lazy about populating the index, create it now
+ self._populate_first_index()
+ self._sources.append(source)
+ c_source = PyString_AS_STRING(source)
+ c_source_size = PyString_GET_SIZE(source)
+ src = self._source_infos + source_location
+ src.buf = c_source
+ src.size = c_source_size
+
+ src.agg_offset = self._source_offset + unadded_bytes
+ self._source_offset = src.agg_offset + src.size
+ # We delay creating the index on the first insert
+ if source_location != 0:
+ with nogil:
+ res = create_delta_index(src, self._index, &index,
+ self._max_bytes_to_index)
+ if res != DELTA_OK:
+ raise _translate_delta_failure(res)
+ if index != self._index:
+ free_delta_index(self._index)
+ self._index = index
+
+ cdef _populate_first_index(self):
+ cdef delta_index *index
+ cdef delta_result res
+ if len(self._sources) != 1 or self._index != NULL:
+ raise AssertionError('_populate_first_index should only be'
+ ' called when we have a single source and no index yet')
+
+ # We know that self._index is already NULL, so create_delta_index
+ # will always create a new index unless there's a malloc failure
+ with nogil:
+ res = create_delta_index(&self._source_infos[0], NULL, &index,
+ self._max_bytes_to_index)
+ if res != DELTA_OK:
+ raise _translate_delta_failure(res)
+ self._index = index
+
+ cdef _expand_sources(self):
+ raise RuntimeError('if we move self._source_infos, then we need to'
+ ' change all of the index pointers as well.')
+ self._max_num_sources = self._max_num_sources * 2
+ self._source_infos = <source_info *>safe_realloc(self._source_infos,
+ sizeof(source_info)
+ * self._max_num_sources)
+
+ def make_delta(self, target_bytes, max_delta_size=0):
+ """Create a delta from the current source to the target bytes."""
+ cdef char *target
+ cdef Py_ssize_t target_size
+ cdef void * delta
+ cdef unsigned long delta_size
+ cdef unsigned long c_max_delta_size
+ cdef delta_result res
+
+ if self._index == NULL:
+ if len(self._sources) == 0:
+ return None
+ # We were just lazy about generating the index
+ self._populate_first_index()
+
+ if not PyString_CheckExact(target_bytes):
+ raise TypeError('target is not a str')
+
+ target = PyString_AS_STRING(target_bytes)
+ target_size = PyString_GET_SIZE(target_bytes)
+
+ # TODO: inline some of create_delta so we at least don't have to double
+ # malloc, and can instead use PyString_FromStringAndSize, to
+ # allocate the bytes into the final string
+ c_max_delta_size = max_delta_size
+ with nogil:
+ res = create_delta(self._index, target, target_size,
+ &delta_size, c_max_delta_size, &delta)
+ result = None
+ if res == DELTA_OK:
+ result = PyString_FromStringAndSize(<char *>delta, delta_size)
+ free(delta)
+ elif res != DELTA_SIZE_TOO_BIG:
+ raise _translate_delta_failure(res)
+ return result
+
+
+def make_delta(source_bytes, target_bytes):
+ """Create a delta, this is a wrapper around DeltaIndex.make_delta."""
+ di = DeltaIndex(source_bytes)
+ return di.make_delta(target_bytes)
+
+
+def apply_delta(source_bytes, delta_bytes):
+ """Apply a delta generated by make_delta to source_bytes."""
+ cdef char *source
+ cdef Py_ssize_t source_size
+ cdef char *delta
+ cdef Py_ssize_t delta_size
+
+ if not PyString_CheckExact(source_bytes):
+ raise TypeError('source is not a str')
+ if not PyString_CheckExact(delta_bytes):
+ raise TypeError('delta is not a str')
+ source = PyString_AS_STRING(source_bytes)
+ source_size = PyString_GET_SIZE(source_bytes)
+ delta = PyString_AS_STRING(delta_bytes)
+ delta_size = PyString_GET_SIZE(delta_bytes)
+ # Code taken from patch-delta.c, only brought here to give better error
+ # handling, and to avoid double allocating memory
+ if (delta_size < DELTA_SIZE_MIN):
+ # XXX: Invalid delta block
+ raise RuntimeError('delta_size %d smaller than min delta size %d'
+ % (delta_size, DELTA_SIZE_MIN))
+
+ return _apply_delta(source, source_size, delta, delta_size)
+
+
+cdef unsigned char *_decode_copy_instruction(unsigned char *bytes,
+ unsigned char cmd, unsigned int *offset,
+ unsigned int *length) nogil: # cannot_raise
+ """Decode a copy instruction from the next few bytes.
+
+ A copy instruction is a variable number of bytes, so we will parse the
+ bytes we care about, and return the new position, as well as the offset and
+ length referred to in the bytes.
+
+ :param bytes: Pointer to the start of bytes after cmd
+ :param cmd: The command code
+ :return: Pointer to the bytes just after the last decode byte
+ """
+ cdef unsigned int off, size, count
+ off = 0
+ size = 0
+ count = 0
+ if (cmd & 0x01):
+ off = bytes[count]
+ count = count + 1
+ if (cmd & 0x02):
+ off = off | (bytes[count] << 8)
+ count = count + 1
+ if (cmd & 0x04):
+ off = off | (bytes[count] << 16)
+ count = count + 1
+ if (cmd & 0x08):
+ off = off | (bytes[count] << 24)
+ count = count + 1
+ if (cmd & 0x10):
+ size = bytes[count]
+ count = count + 1
+ if (cmd & 0x20):
+ size = size | (bytes[count] << 8)
+ count = count + 1
+ if (cmd & 0x40):
+ size = size | (bytes[count] << 16)
+ count = count + 1
+ if (size == 0):
+ size = 0x10000
+ offset[0] = off
+ length[0] = size
+ return bytes + count
+
+
+cdef object _apply_delta(char *source, Py_ssize_t source_size,
+ char *delta, Py_ssize_t delta_size):
+ """common functionality between apply_delta and apply_delta_to_source."""
+ cdef unsigned char *data, *top
+ cdef unsigned char *dst_buf, *out, cmd
+ cdef Py_ssize_t size
+ cdef unsigned int cp_off, cp_size
+ cdef int failed
+
+ data = <unsigned char *>delta
+ top = data + delta_size
+
+ # now the result size
+ size = get_delta_hdr_size(&data, top)
+ result = PyString_FromStringAndSize(NULL, size)
+ dst_buf = <unsigned char*>PyString_AS_STRING(result)
+
+ failed = 0
+ with nogil:
+ out = dst_buf
+ while (data < top):
+ cmd = data[0]
+ data = data + 1
+ if (cmd & 0x80):
+ # Copy instruction
+ data = _decode_copy_instruction(data, cmd, &cp_off, &cp_size)
+ if (cp_off + cp_size < cp_size or
+ cp_off + cp_size > <unsigned int>source_size or
+ cp_size > <unsigned int>size):
+ failed = 1
+ break
+ memcpy(out, source + cp_off, cp_size)
+ out = out + cp_size
+ size = size - cp_size
+ else:
+ # Insert instruction
+ if cmd == 0:
+ # cmd == 0 is reserved for future encoding
+ # extensions. In the mean time we must fail when
+ # encountering them (might be data corruption).
+ failed = 2
+ break
+ if cmd > size:
+ failed = 3
+ break
+ memcpy(out, data, cmd)
+ out = out + cmd
+ data = data + cmd
+ size = size - cmd
+ if failed:
+ if failed == 1:
+ raise ValueError('Something wrong with:'
+ ' cp_off = %s, cp_size = %s'
+ ' source_size = %s, size = %s'
+ % (cp_off, cp_size, source_size, size))
+ elif failed == 2:
+ raise ValueError('Got delta opcode: 0, not supported')
+ elif failed == 3:
+ raise ValueError('Insert instruction longer than remaining'
+ ' bytes: %d > %d' % (cmd, size))
+
+ # sanity check
+ if (data != top or size != 0):
+ raise RuntimeError('Did not extract the number of bytes we expected'
+ ' we were left with %d bytes in "size", and top - data = %d'
+ % (size, <int>(top - data)))
+ return None
+
+ # *dst_size = out - dst_buf;
+ if (out - dst_buf) != PyString_GET_SIZE(result):
+ raise RuntimeError('Number of bytes extracted did not match the'
+ ' size encoded in the delta header.')
+ return result
+
+
+def apply_delta_to_source(source, delta_start, delta_end):
+ """Extract a delta from source bytes, and apply it."""
+ cdef char *c_source
+ cdef Py_ssize_t c_source_size
+ cdef char *c_delta
+ cdef Py_ssize_t c_delta_size
+ cdef Py_ssize_t c_delta_start, c_delta_end
+
+ if not PyString_CheckExact(source):
+ raise TypeError('source is not a str')
+ c_source_size = PyString_GET_SIZE(source)
+ c_delta_start = delta_start
+ c_delta_end = delta_end
+ if c_delta_start >= c_source_size:
+ raise ValueError('delta starts after source')
+ if c_delta_end > c_source_size:
+ raise ValueError('delta ends after source')
+ if c_delta_start >= c_delta_end:
+ raise ValueError('delta starts after it ends')
+
+ c_delta_size = c_delta_end - c_delta_start
+ c_source = PyString_AS_STRING(source)
+ c_delta = c_source + c_delta_start
+ # We don't use source_size, because we know the delta should not refer to
+ # any bytes after it starts
+ return _apply_delta(c_source, c_delta_start, c_delta, c_delta_size)
+
+
+def encode_base128_int(val):
+ """Convert an integer into a 7-bit lsb encoding."""
+ cdef unsigned int c_val
+ cdef Py_ssize_t count
+ cdef unsigned int num_bytes
+ cdef unsigned char c_bytes[8] # max size for 32-bit int is 5 bytes
+
+ c_val = val
+ count = 0
+ while c_val >= 0x80 and count < 8:
+ c_bytes[count] = <unsigned char>((c_val | 0x80) & 0xFF)
+ c_val = c_val >> 7
+ count = count + 1
+ if count >= 8 or c_val >= 0x80:
+ raise ValueError('encode_base128_int overflowed the buffer')
+ c_bytes[count] = <unsigned char>(c_val & 0xFF)
+ count = count + 1
+ return PyString_FromStringAndSize(<char *>c_bytes, count)
+
+
+def decode_base128_int(bytes):
+ """Decode an integer from a 7-bit lsb encoding."""
+ cdef int offset
+ cdef int val
+ cdef unsigned int uval
+ cdef int shift
+ cdef Py_ssize_t num_low_bytes
+ cdef unsigned char *c_bytes
+
+ offset = 0
+ val = 0
+ shift = 0
+ if not PyString_CheckExact(bytes):
+ raise TypeError('bytes is not a string')
+ c_bytes = <unsigned char*>PyString_AS_STRING(bytes)
+ # We take off 1, because we have to be able to decode the non-expanded byte
+ num_low_bytes = PyString_GET_SIZE(bytes) - 1
+ while (c_bytes[offset] & 0x80) and offset < num_low_bytes:
+ val = val | ((c_bytes[offset] & 0x7F) << shift)
+ shift = shift + 7
+ offset = offset + 1
+ if c_bytes[offset] & 0x80:
+ raise ValueError('Data not properly formatted, we ran out of'
+ ' bytes before 0x80 stopped being set.')
+ val = val | (c_bytes[offset] << shift)
+ offset = offset + 1
+ if val < 0:
+ uval = <unsigned int> val
+ return uval, offset
+ return val, offset
+
+
diff --git a/bzrlib/_import_c_api.h b/bzrlib/_import_c_api.h
new file mode 100644
index 0000000..3ca2242
--- /dev/null
+++ b/bzrlib/_import_c_api.h
@@ -0,0 +1,189 @@
+/* Copyright (C) 2009 Canonical Ltd
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef _IMPORT_C_API_H_
+#define _IMPORT_C_API_H_
+
+/**
+ * Helper functions to eliminate some of the boilerplate when importing a C API
+ * from a CPython extension module.
+ *
+ * For more information see _export_c_api.h
+ */
+
+static const char *_C_API_NAME = "_C_API";
+
+/**
+ * Import a function from the _C_API_NAME dict that is part of module.
+ *
+ * @param module The Python module we are importing from
+ * the attribute _C_API_NAME will be used as a dictionary
+ * containing the function pointer we are looking for.
+ * @param funcname Name of the function we want to import
+ * @param func A pointer to the function handle where we will store the
+ * function.
+ * @param signature The C signature of the function. This is validated
+ * against the signature stored in the C api, to make sure
+ * there is no versioning skew.
+ */
+static int _import_function(PyObject *module, const char *funcname,
+ void **func, const char *signature)
+{
+ PyObject *d = NULL;
+ PyObject *c_obj = NULL;
+ const char *desc = NULL;
+
+ /* (char *) because Python2.4 defines this as (char *) rather than
+ * (const char *)
+ */
+ d = PyObject_GetAttrString(module, (char *)_C_API_NAME);
+ if (!d) {
+ // PyObject_GetAttrString sets an appropriate exception
+ goto bad;
+ }
+ c_obj = PyDict_GetItemString(d, funcname);
+ if (!c_obj) {
+ // PyDict_GetItemString does not set an exception
+ PyErr_Format(PyExc_AttributeError,
+ "Module %s did not export a function named %s\n",
+ PyModule_GetName(module), funcname);
+ goto bad;
+ }
+ desc = (char *)PyCObject_GetDesc(c_obj);
+ if (!desc || strcmp(desc, signature) != 0) {
+ if (desc == NULL) {
+ desc = "<null>";
+ }
+ PyErr_Format(PyExc_TypeError,
+ "C function %s.%s has wrong signature (expected %s, got %s)",
+ PyModule_GetName(module), funcname, signature, desc);
+ goto bad;
+ }
+ *func = PyCObject_AsVoidPtr(c_obj);
+ Py_DECREF(d);
+ return 0;
+bad:
+ Py_XDECREF(d);
+ return -1;
+}
+
+
+/**
+ * Get a pointer to an exported PyTypeObject.
+ *
+ * @param module The Python module we are importing from
+ * @param class_name Attribute of the module that should reference the
+ * Type object. Note that a PyTypeObject is the python
+ * description of the type, not the raw C structure.
+ * @return A Pointer to the requested type object. On error NULL will be
+ * returned and an exception will be set.
+ */
+static PyTypeObject *
+_import_type(PyObject *module, const char *class_name)
+{
+ PyObject *type = NULL;
+
+ type = PyObject_GetAttrString(module, (char *)class_name);
+ if (!type) {
+ goto bad;
+ }
+ if (!PyType_Check(type)) {
+ PyErr_Format(PyExc_TypeError,
+ "%s.%s is not a type object",
+ PyModule_GetName(module), class_name);
+ goto bad;
+ }
+ return (PyTypeObject *)type;
+bad:
+ Py_XDECREF(type);
+ return NULL;
+}
+
+
+struct function_description
+{
+ const char *name;
+ void **pointer;
+ const char *signature;
+};
+
+struct type_description
+{
+ const char *name;
+ PyTypeObject **pointer;
+};
+
+/**
+ * Helper for importing several functions and types in a data-driven manner.
+ *
+ * @param module The name of the module we will be importing
+ * @param functions A list of function_description objects, describing the
+ * functions being imported.
+ * The list should be terminated with {NULL} to indicate
+ * there are no more functions to import.
+ * @param types A list of type_description objects describing type
+ * objects that we want to import. The list should be
+ * terminated with {NULL} to indicate there are no more
+ * types to import.
+ * @return 0 on success, -1 on error and an exception should be set.
+ */
+
+static int
+_import_extension_module(const char *module_name,
+ struct function_description *functions,
+ struct type_description *types)
+{
+ PyObject *module = NULL;
+ struct function_description *cur_func;
+ struct type_description *cur_type;
+ int ret_code;
+
+ module = PyImport_ImportModule((char *)module_name);
+ if (!module)
+ goto bad;
+ if (functions != NULL) {
+ cur_func = functions;
+ while (cur_func->name != NULL) {
+ ret_code = _import_function(module, cur_func->name,
+ cur_func->pointer,
+ cur_func->signature);
+ if (ret_code < 0)
+ goto bad;
+ cur_func++;
+ }
+ }
+ if (types != NULL) {
+ PyTypeObject *type_p = NULL;
+ cur_type = types;
+ while (cur_type->name != NULL) {
+ type_p = _import_type(module, cur_type->name);
+ if (type_p == NULL)
+ goto bad;
+ *(cur_type->pointer) = type_p;
+ cur_type++;
+ }
+ }
+
+ Py_XDECREF(module);
+ return 0;
+bad:
+ Py_XDECREF(module);
+ return -1;
+}
+
+
+#endif // _IMPORT_C_API_H_
diff --git a/bzrlib/_knit_load_data_py.py b/bzrlib/_knit_load_data_py.py
new file mode 100644
index 0000000..7e781f8
--- /dev/null
+++ b/bzrlib/_knit_load_data_py.py
@@ -0,0 +1,96 @@
+# Copyright (C) 2007 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+from __future__ import absolute_import
+
+from bzrlib import errors
+
+
+def _load_data_py(kndx, fp):
+ """Read in a knit index."""
+ cache = kndx._cache
+ history = kndx._history
+
+ kndx.check_header(fp)
+ # readlines reads the whole file at once:
+ # bad for transports like http, good for local disk
+ # we save 60 ms doing this one change (
+ # from calling readline each time to calling
+ # readlines once.
+ # probably what we want for nice behaviour on
+ # http is a incremental readlines that yields, or
+ # a check for local vs non local indexes,
+ history_top = len(history) - 1
+ for line in fp.readlines():
+ rec = line.split()
+ if len(rec) < 5 or rec[-1] != ':':
+ # corrupt line.
+ # FIXME: in the future we should determine if its a
+ # short write - and ignore it
+ # or a different failure, and raise. RBC 20060407
+ continue
+
+ try:
+ parents = []
+ for value in rec[4:-1]:
+ if value[0] == '.':
+ # uncompressed reference
+ parent_id = value[1:]
+ else:
+ parent_id = history[int(value)]
+ parents.append(parent_id)
+ except (IndexError, ValueError), e:
+ # The parent could not be decoded to get its parent row. This
+ # at a minimum will cause this row to have wrong parents, or
+ # even to apply a delta to the wrong base and decode
+ # incorrectly. its therefore not usable, and because we have
+ # encountered a situation where a new knit index had this
+ # corrupt we can't asssume that no other rows referring to the
+ # index of this record actually mean the subsequent uncorrupt
+ # one, so we error.
+ raise errors.KnitCorrupt(kndx._filename, "line %r: %s" % (rec, e))
+
+ version_id, options, pos, size = rec[:4]
+ version_id = version_id
+ try:
+ pos = int(pos)
+ except ValueError, e:
+ raise errors.KnitCorrupt(kndx._filename,
+ "invalid position on line %r: %s"
+ % (rec, e))
+ try:
+ size = int(size)
+ except ValueError, e:
+ raise errors.KnitCorrupt(kndx._filename,
+ "invalid size on line %r: %s"
+ % (rec, e))
+
+ # See kndx._cache_version
+ # only want the _history index to reference the 1st
+ # index entry for version_id
+ if version_id not in cache:
+ history_top += 1
+ index = history_top
+ history.append(version_id)
+ else:
+ index = cache[version_id][5]
+ cache[version_id] = (version_id,
+ options.split(','),
+ pos,
+ size,
+ tuple(parents),
+ index)
+ # end kndx._cache_version
diff --git a/bzrlib/_knit_load_data_pyx.c b/bzrlib/_knit_load_data_pyx.c
new file mode 100644
index 0000000..85a8234
--- /dev/null
+++ b/bzrlib/_knit_load_data_pyx.c
@@ -0,0 +1,1405 @@
+/* Generated by Pyrex 0.9.8.5 on Fri Oct 8 14:00:57 2010 */
+
+#define PY_SSIZE_T_CLEAN
+#include "Python.h"
+#include "structmember.h"
+#ifndef PY_LONG_LONG
+ #define PY_LONG_LONG LONG_LONG
+#endif
+#if PY_VERSION_HEX < 0x02050000
+ typedef int Py_ssize_t;
+ #define PY_SSIZE_T_MAX INT_MAX
+ #define PY_SSIZE_T_MIN INT_MIN
+ #define PyInt_FromSsize_t(z) PyInt_FromLong(z)
+ #define PyInt_AsSsize_t(o) PyInt_AsLong(o)
+#endif
+#if !defined(WIN32) && !defined(MS_WINDOWS)
+ #ifndef __stdcall
+ #define __stdcall
+ #endif
+ #ifndef __cdecl
+ #define __cdecl
+ #endif
+#endif
+#ifdef __cplusplus
+#define __PYX_EXTERN_C extern "C"
+#else
+#define __PYX_EXTERN_C extern
+#endif
+#include <math.h>
+#include "stdlib.h"
+#include "string.h"
+
+
+typedef struct {PyObject **p; int i; char *s; long n;} __Pyx_StringTabEntry; /*proto*/
+
+static PyObject *__pyx_m;
+static PyObject *__pyx_b;
+static int __pyx_lineno;
+static char *__pyx_filename;
+static char **__pyx_f;
+
+static char __pyx_mdoc[] = "Pyrex extensions to knit parsing.";
+
+static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb); /*proto*/
+
+static int __Pyx_GetException(PyObject **type, PyObject **value, PyObject **tb); /*proto*/
+
+static PyObject *__Pyx_GetName(PyObject *dict, PyObject *name); /*proto*/
+
+static int __Pyx_InitStrings(__Pyx_StringTabEntry *t); /*proto*/
+
+static int __Pyx_SetVtable(PyObject *dict, void *vtable); /*proto*/
+
+static PyObject *__Pyx_Import(PyObject *name, PyObject *from_list); /*proto*/
+
+static void __Pyx_AddTraceback(char *funcname); /*proto*/
+
+/* Declarations from bzrlib._knit_load_data_pyx */
+
+
+/* Declarations from implementation of bzrlib._knit_load_data_pyx */
+
+struct __pyx_obj_6bzrlib_19_knit_load_data_pyx_KnitIndexReader {
+ PyObject_HEAD
+ struct __pyx_vtabstruct_6bzrlib_19_knit_load_data_pyx_KnitIndexReader *__pyx_vtab;
+ PyObject *kndx;
+ PyObject *fp;
+ PyObject *cache;
+ PyObject *history;
+ char *cur_str;
+ char *end_str;
+ int history_len;
+};
+
+
+struct __pyx_vtabstruct_6bzrlib_19_knit_load_data_pyx_KnitIndexReader {
+ int (*validate)(struct __pyx_obj_6bzrlib_19_knit_load_data_pyx_KnitIndexReader *);
+ PyObject *(*process_options)(struct __pyx_obj_6bzrlib_19_knit_load_data_pyx_KnitIndexReader *,char *,char *);
+ PyObject *(*process_parents)(struct __pyx_obj_6bzrlib_19_knit_load_data_pyx_KnitIndexReader *,char *,char *);
+ int (*process_one_record)(struct __pyx_obj_6bzrlib_19_knit_load_data_pyx_KnitIndexReader *,char *,char *);
+ int (*process_next_record)(struct __pyx_obj_6bzrlib_19_knit_load_data_pyx_KnitIndexReader *);
+};
+static struct __pyx_vtabstruct_6bzrlib_19_knit_load_data_pyx_KnitIndexReader *__pyx_vtabptr_6bzrlib_19_knit_load_data_pyx_KnitIndexReader;
+
+static PyTypeObject *__pyx_ptype_6bzrlib_19_knit_load_data_pyx_KnitIndexReader = 0;
+static int __pyx_f_6bzrlib_19_knit_load_data_pyx_string_to_int_safe(char *,char *,int *); /*proto*/
+
+static char __pyx_k1[] = "%r is not a valid integer";
+static char __pyx_k2[] = "_cache";
+static char __pyx_k3[] = "_history";
+static char __pyx_k4[] = "kndx._cache must be a python dict";
+static char __pyx_k5[] = "kndx._history must be a python list";
+static char __pyx_k6[] = "Parent index refers to a revision which does not exist yet. %d > %d";
+static char __pyx_k7[] = "errors";
+static char __pyx_k8[] = "KnitCorrupt";
+static char __pyx_k9[] = "_filename";
+static char __pyx_k10[] = "line %r: %s";
+static char __pyx_k11[] = "check_header";
+static char __pyx_k12[] = "read";
+static char __pyx_k13[] = "sys";
+static char __pyx_k14[] = "bzrlib";
+
+static PyObject *__pyx_n_KnitCorrupt;
+static PyObject *__pyx_n__cache;
+static PyObject *__pyx_n__filename;
+static PyObject *__pyx_n__history;
+static PyObject *__pyx_n_bzrlib;
+static PyObject *__pyx_n_check_header;
+static PyObject *__pyx_n_errors;
+static PyObject *__pyx_n_read;
+static PyObject *__pyx_n_sys;
+
+static PyObject *__pyx_k1p;
+static PyObject *__pyx_k4p;
+static PyObject *__pyx_k5p;
+static PyObject *__pyx_k6p;
+static PyObject *__pyx_k10p;
+
+static __Pyx_StringTabEntry __pyx_string_tab[] = {
+ {&__pyx_n_KnitCorrupt, 1, __pyx_k8, sizeof(__pyx_k8)},
+ {&__pyx_n__cache, 1, __pyx_k2, sizeof(__pyx_k2)},
+ {&__pyx_n__filename, 1, __pyx_k9, sizeof(__pyx_k9)},
+ {&__pyx_n__history, 1, __pyx_k3, sizeof(__pyx_k3)},
+ {&__pyx_n_bzrlib, 1, __pyx_k14, sizeof(__pyx_k14)},
+ {&__pyx_n_check_header, 1, __pyx_k11, sizeof(__pyx_k11)},
+ {&__pyx_n_errors, 1, __pyx_k7, sizeof(__pyx_k7)},
+ {&__pyx_n_read, 1, __pyx_k12, sizeof(__pyx_k12)},
+ {&__pyx_n_sys, 1, __pyx_k13, sizeof(__pyx_k13)},
+ {&__pyx_k1p, 0, __pyx_k1, sizeof(__pyx_k1)},
+ {&__pyx_k4p, 0, __pyx_k4, sizeof(__pyx_k4)},
+ {&__pyx_k5p, 0, __pyx_k5, sizeof(__pyx_k5)},
+ {&__pyx_k6p, 0, __pyx_k6, sizeof(__pyx_k6)},
+ {&__pyx_k10p, 0, __pyx_k10, sizeof(__pyx_k10)},
+ {0, 0, 0, 0}
+};
+
+
+
+/* Implementation of bzrlib._knit_load_data_pyx */
+
+static int __pyx_f_6bzrlib_19_knit_load_data_pyx_string_to_int_safe(char *__pyx_v_s,char *__pyx_v_end,int *__pyx_v_out) {
+ char *__pyx_v_integer_end;
+ PyObject *__pyx_v_py_s;
+ int __pyx_r;
+ int __pyx_1;
+ PyObject *__pyx_2 = 0;
+ PyObject *__pyx_3 = 0;
+ __pyx_v_py_s = Py_None; Py_INCREF(Py_None);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_knit_load_data_pyx.pyx":69 */
+ (__pyx_v_out[0]) = ((int)strtol(__pyx_v_s,(&__pyx_v_integer_end),10));
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_knit_load_data_pyx.pyx":70 */
+ __pyx_1 = (__pyx_v_integer_end != __pyx_v_end);
+ if (__pyx_1) {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_knit_load_data_pyx.pyx":71 */
+ __pyx_2 = PyString_FromStringAndSize(__pyx_v_s,(__pyx_v_end - __pyx_v_s)); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 71; goto __pyx_L1;}
+ Py_DECREF(__pyx_v_py_s);
+ __pyx_v_py_s = __pyx_2;
+ __pyx_2 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_knit_load_data_pyx.pyx":72 */
+ __pyx_2 = PyTuple_New(1); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 72; goto __pyx_L1;}
+ Py_INCREF(__pyx_v_py_s);
+ PyTuple_SET_ITEM(__pyx_2, 0, __pyx_v_py_s);
+ __pyx_3 = PyNumber_Remainder(__pyx_k1p, __pyx_2); if (!__pyx_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 72; goto __pyx_L1;}
+ Py_DECREF(__pyx_2); __pyx_2 = 0;
+ __pyx_2 = PyTuple_New(1); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 72; goto __pyx_L1;}
+ PyTuple_SET_ITEM(__pyx_2, 0, __pyx_3);
+ __pyx_3 = 0;
+ __pyx_3 = PyObject_CallObject(PyExc_ValueError, __pyx_2); if (!__pyx_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 72; goto __pyx_L1;}
+ Py_DECREF(__pyx_2); __pyx_2 = 0;
+ __Pyx_Raise(__pyx_3, 0, 0);
+ Py_DECREF(__pyx_3); __pyx_3 = 0;
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 72; goto __pyx_L1;}
+ goto __pyx_L2;
+ }
+ __pyx_L2:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_knit_load_data_pyx.pyx":73 */
+ __pyx_r = 0;
+ goto __pyx_L0;
+
+ __pyx_r = 0;
+ goto __pyx_L0;
+ __pyx_L1:;
+ Py_XDECREF(__pyx_2);
+ Py_XDECREF(__pyx_3);
+ __Pyx_AddTraceback("bzrlib._knit_load_data_pyx.string_to_int_safe");
+ __pyx_r = (-1);
+ __pyx_L0:;
+ Py_DECREF(__pyx_v_py_s);
+ return __pyx_r;
+}
+
+static int __pyx_f_6bzrlib_19_knit_load_data_pyx_15KnitIndexReader___init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
+static int __pyx_f_6bzrlib_19_knit_load_data_pyx_15KnitIndexReader___init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
+ PyObject *__pyx_v_kndx = 0;
+ PyObject *__pyx_v_fp = 0;
+ int __pyx_r;
+ PyObject *__pyx_1 = 0;
+ static char *__pyx_argnames[] = {"kndx","fp",0};
+ if (!PyArg_ParseTupleAndKeywords(__pyx_args, __pyx_kwds, "OO", __pyx_argnames, &__pyx_v_kndx, &__pyx_v_fp)) return -1;
+ Py_INCREF(__pyx_v_self);
+ Py_INCREF(__pyx_v_kndx);
+ Py_INCREF(__pyx_v_fp);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_knit_load_data_pyx.pyx":90 */
+ Py_INCREF(__pyx_v_kndx);
+ Py_DECREF(((struct __pyx_obj_6bzrlib_19_knit_load_data_pyx_KnitIndexReader *)__pyx_v_self)->kndx);
+ ((struct __pyx_obj_6bzrlib_19_knit_load_data_pyx_KnitIndexReader *)__pyx_v_self)->kndx = __pyx_v_kndx;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_knit_load_data_pyx.pyx":91 */
+ Py_INCREF(__pyx_v_fp);
+ Py_DECREF(((struct __pyx_obj_6bzrlib_19_knit_load_data_pyx_KnitIndexReader *)__pyx_v_self)->fp);
+ ((struct __pyx_obj_6bzrlib_19_knit_load_data_pyx_KnitIndexReader *)__pyx_v_self)->fp = __pyx_v_fp;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_knit_load_data_pyx.pyx":93 */
+ __pyx_1 = PyObject_GetAttr(__pyx_v_kndx, __pyx_n__cache); if (!__pyx_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 93; goto __pyx_L1;}
+ Py_DECREF(((struct __pyx_obj_6bzrlib_19_knit_load_data_pyx_KnitIndexReader *)__pyx_v_self)->cache);
+ ((struct __pyx_obj_6bzrlib_19_knit_load_data_pyx_KnitIndexReader *)__pyx_v_self)->cache = __pyx_1;
+ __pyx_1 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_knit_load_data_pyx.pyx":94 */
+ __pyx_1 = PyObject_GetAttr(__pyx_v_kndx, __pyx_n__history); if (!__pyx_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 94; goto __pyx_L1;}
+ Py_DECREF(((struct __pyx_obj_6bzrlib_19_knit_load_data_pyx_KnitIndexReader *)__pyx_v_self)->history);
+ ((struct __pyx_obj_6bzrlib_19_knit_load_data_pyx_KnitIndexReader *)__pyx_v_self)->history = __pyx_1;
+ __pyx_1 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_knit_load_data_pyx.pyx":96 */
+ ((struct __pyx_obj_6bzrlib_19_knit_load_data_pyx_KnitIndexReader *)__pyx_v_self)->cur_str = NULL;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_knit_load_data_pyx.pyx":97 */
+ ((struct __pyx_obj_6bzrlib_19_knit_load_data_pyx_KnitIndexReader *)__pyx_v_self)->end_str = NULL;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_knit_load_data_pyx.pyx":98 */
+ ((struct __pyx_obj_6bzrlib_19_knit_load_data_pyx_KnitIndexReader *)__pyx_v_self)->history_len = 0;
+
+ __pyx_r = 0;
+ goto __pyx_L0;
+ __pyx_L1:;
+ Py_XDECREF(__pyx_1);
+ __Pyx_AddTraceback("bzrlib._knit_load_data_pyx.KnitIndexReader.__init__");
+ __pyx_r = -1;
+ __pyx_L0:;
+ Py_DECREF(__pyx_v_self);
+ Py_DECREF(__pyx_v_kndx);
+ Py_DECREF(__pyx_v_fp);
+ return __pyx_r;
+}
+
+static int __pyx_f_6bzrlib_19_knit_load_data_pyx_15KnitIndexReader_validate(struct __pyx_obj_6bzrlib_19_knit_load_data_pyx_KnitIndexReader *__pyx_v_self) {
+ int __pyx_r;
+ int __pyx_1;
+ PyObject *__pyx_2 = 0;
+ PyObject *__pyx_3 = 0;
+ Py_INCREF(__pyx_v_self);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_knit_load_data_pyx.pyx":101 */
+ __pyx_1 = (!PyDict_CheckExact(__pyx_v_self->cache));
+ if (__pyx_1) {
+ __pyx_2 = PyTuple_New(1); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 102; goto __pyx_L1;}
+ Py_INCREF(__pyx_k4p);
+ PyTuple_SET_ITEM(__pyx_2, 0, __pyx_k4p);
+ __pyx_3 = PyObject_CallObject(PyExc_TypeError, __pyx_2); if (!__pyx_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 102; goto __pyx_L1;}
+ Py_DECREF(__pyx_2); __pyx_2 = 0;
+ __Pyx_Raise(__pyx_3, 0, 0);
+ Py_DECREF(__pyx_3); __pyx_3 = 0;
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 102; goto __pyx_L1;}
+ goto __pyx_L2;
+ }
+ __pyx_L2:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_knit_load_data_pyx.pyx":103 */
+ __pyx_1 = (!PyList_CheckExact(__pyx_v_self->history));
+ if (__pyx_1) {
+ __pyx_2 = PyTuple_New(1); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 104; goto __pyx_L1;}
+ Py_INCREF(__pyx_k5p);
+ PyTuple_SET_ITEM(__pyx_2, 0, __pyx_k5p);
+ __pyx_3 = PyObject_CallObject(PyExc_TypeError, __pyx_2); if (!__pyx_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 104; goto __pyx_L1;}
+ Py_DECREF(__pyx_2); __pyx_2 = 0;
+ __Pyx_Raise(__pyx_3, 0, 0);
+ Py_DECREF(__pyx_3); __pyx_3 = 0;
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 104; goto __pyx_L1;}
+ goto __pyx_L3;
+ }
+ __pyx_L3:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_knit_load_data_pyx.pyx":105 */
+ __pyx_r = 0;
+ goto __pyx_L0;
+
+ __pyx_r = 0;
+ goto __pyx_L0;
+ __pyx_L1:;
+ Py_XDECREF(__pyx_2);
+ Py_XDECREF(__pyx_3);
+ __Pyx_AddTraceback("bzrlib._knit_load_data_pyx.KnitIndexReader.validate");
+ __pyx_r = (-1);
+ __pyx_L0:;
+ Py_DECREF(__pyx_v_self);
+ return __pyx_r;
+}
+
+static PyObject *__pyx_f_6bzrlib_19_knit_load_data_pyx_15KnitIndexReader_process_options(struct __pyx_obj_6bzrlib_19_knit_load_data_pyx_KnitIndexReader *__pyx_v_self,char *__pyx_v_option_str,char *__pyx_v_end) {
+ char *__pyx_v_next;
+ PyObject *__pyx_v_final_options;
+ PyObject *__pyx_v_next_option;
+ PyObject *__pyx_r;
+ PyObject *__pyx_1 = 0;
+ int __pyx_2;
+ Py_INCREF(__pyx_v_self);
+ __pyx_v_final_options = Py_None; Py_INCREF(Py_None);
+ __pyx_v_next_option = Py_None; Py_INCREF(Py_None);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_knit_load_data_pyx.pyx":119 */
+ __pyx_1 = PyList_New(0); if (!__pyx_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 119; goto __pyx_L1;}
+ Py_DECREF(__pyx_v_final_options);
+ __pyx_v_final_options = __pyx_1;
+ __pyx_1 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_knit_load_data_pyx.pyx":121 */
+ while (1) {
+ __pyx_2 = (__pyx_v_option_str < __pyx_v_end);
+ if (!__pyx_2) break;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_knit_load_data_pyx.pyx":122 */
+ __pyx_v_next = ((char *)memchr(__pyx_v_option_str,',',(__pyx_v_end - __pyx_v_option_str)));
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_knit_load_data_pyx.pyx":123 */
+ __pyx_2 = (__pyx_v_next == NULL);
+ if (__pyx_2) {
+ __pyx_v_next = __pyx_v_end;
+ goto __pyx_L4;
+ }
+ __pyx_L4:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_knit_load_data_pyx.pyx":125 */
+ __pyx_1 = PyString_FromStringAndSize(__pyx_v_option_str,(__pyx_v_next - __pyx_v_option_str)); if (!__pyx_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 125; goto __pyx_L1;}
+ Py_DECREF(__pyx_v_next_option);
+ __pyx_v_next_option = __pyx_1;
+ __pyx_1 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_knit_load_data_pyx.pyx":127 */
+ __pyx_2 = PyList_Append(__pyx_v_final_options,__pyx_v_next_option); if (__pyx_2 == (-1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 127; goto __pyx_L1;}
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_knit_load_data_pyx.pyx":130 */
+ __pyx_v_option_str = (__pyx_v_next + 1);
+ }
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_knit_load_data_pyx.pyx":132 */
+ Py_INCREF(__pyx_v_final_options);
+ __pyx_r = __pyx_v_final_options;
+ goto __pyx_L0;
+
+ __pyx_r = Py_None; Py_INCREF(Py_None);
+ goto __pyx_L0;
+ __pyx_L1:;
+ Py_XDECREF(__pyx_1);
+ __Pyx_AddTraceback("bzrlib._knit_load_data_pyx.KnitIndexReader.process_options");
+ __pyx_r = 0;
+ __pyx_L0:;
+ Py_DECREF(__pyx_v_final_options);
+ Py_DECREF(__pyx_v_next_option);
+ Py_DECREF(__pyx_v_self);
+ return __pyx_r;
+}
+
+static PyObject *__pyx_f_6bzrlib_19_knit_load_data_pyx_15KnitIndexReader_process_parents(struct __pyx_obj_6bzrlib_19_knit_load_data_pyx_KnitIndexReader *__pyx_v_self,char *__pyx_v_parent_str,char *__pyx_v_end) {
+ char *__pyx_v_next;
+ int __pyx_v_int_parent;
+ PyObject *__pyx_v_parents;
+ PyObject *__pyx_v_parent;
+ PyObject *__pyx_r;
+ PyObject *__pyx_1 = 0;
+ int __pyx_2;
+ PyObject *__pyx_3 = 0;
+ PyObject *__pyx_4 = 0;
+ Py_INCREF(__pyx_v_self);
+ __pyx_v_parents = Py_None; Py_INCREF(Py_None);
+ __pyx_v_parent = Py_None; Py_INCREF(Py_None);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_knit_load_data_pyx.pyx":151 */
+ __pyx_1 = PyList_New(0); if (!__pyx_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 151; goto __pyx_L1;}
+ Py_DECREF(__pyx_v_parents);
+ __pyx_v_parents = __pyx_1;
+ __pyx_1 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_knit_load_data_pyx.pyx":152 */
+ while (1) {
+ __pyx_2 = (__pyx_v_parent_str <= __pyx_v_end);
+ if (!__pyx_2) break;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_knit_load_data_pyx.pyx":153 */
+ __pyx_v_next = ((char *)memchr(__pyx_v_parent_str,' ',(__pyx_v_end - __pyx_v_parent_str)));
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_knit_load_data_pyx.pyx":154 */
+ __pyx_2 = (__pyx_v_next == NULL);
+ if (!__pyx_2) {
+ __pyx_2 = (__pyx_v_next >= __pyx_v_end);
+ if (!__pyx_2) {
+ __pyx_2 = (__pyx_v_next == __pyx_v_parent_str);
+ }
+ }
+ if (__pyx_2) {
+ goto __pyx_L3;
+ goto __pyx_L4;
+ }
+ __pyx_L4:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_knit_load_data_pyx.pyx":157 */
+ __pyx_2 = ((__pyx_v_parent_str[0]) == '.');
+ if (__pyx_2) {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_knit_load_data_pyx.pyx":159 */
+ __pyx_v_parent_str = (__pyx_v_parent_str + 1);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_knit_load_data_pyx.pyx":160 */
+ __pyx_1 = PyString_FromStringAndSize(__pyx_v_parent_str,(__pyx_v_next - __pyx_v_parent_str)); if (!__pyx_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 160; goto __pyx_L1;}
+ Py_DECREF(__pyx_v_parent);
+ __pyx_v_parent = __pyx_1;
+ __pyx_1 = 0;
+ goto __pyx_L5;
+ }
+ /*else*/ {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_knit_load_data_pyx.pyx":164 */
+ __pyx_2 = __pyx_f_6bzrlib_19_knit_load_data_pyx_string_to_int_safe(__pyx_v_parent_str,__pyx_v_next,(&__pyx_v_int_parent)); if (__pyx_2 == (-1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 164; goto __pyx_L1;}
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_knit_load_data_pyx.pyx":166 */
+ __pyx_2 = (__pyx_v_int_parent >= __pyx_v_self->history_len);
+ if (__pyx_2) {
+ __pyx_1 = PyInt_FromLong(__pyx_v_int_parent); if (!__pyx_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 169; goto __pyx_L1;}
+ __pyx_3 = PyInt_FromLong(__pyx_v_self->history_len); if (!__pyx_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 169; goto __pyx_L1;}
+ __pyx_4 = PyTuple_New(2); if (!__pyx_4) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 169; goto __pyx_L1;}
+ PyTuple_SET_ITEM(__pyx_4, 0, __pyx_1);
+ PyTuple_SET_ITEM(__pyx_4, 1, __pyx_3);
+ __pyx_1 = 0;
+ __pyx_3 = 0;
+ __pyx_1 = PyNumber_Remainder(__pyx_k6p, __pyx_4); if (!__pyx_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 169; goto __pyx_L1;}
+ Py_DECREF(__pyx_4); __pyx_4 = 0;
+ __pyx_3 = PyTuple_New(1); if (!__pyx_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 167; goto __pyx_L1;}
+ PyTuple_SET_ITEM(__pyx_3, 0, __pyx_1);
+ __pyx_1 = 0;
+ __pyx_4 = PyObject_CallObject(PyExc_IndexError, __pyx_3); if (!__pyx_4) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 167; goto __pyx_L1;}
+ Py_DECREF(__pyx_3); __pyx_3 = 0;
+ __Pyx_Raise(__pyx_4, 0, 0);
+ Py_DECREF(__pyx_4); __pyx_4 = 0;
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 167; goto __pyx_L1;}
+ goto __pyx_L6;
+ }
+ __pyx_L6:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_knit_load_data_pyx.pyx":170 */
+ __pyx_1 = PyList_GET_ITEM(__pyx_v_self->history,__pyx_v_int_parent); if (!__pyx_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 170; goto __pyx_L1;}
+ Py_DECREF(__pyx_v_parent);
+ __pyx_v_parent = __pyx_1;
+ __pyx_1 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_knit_load_data_pyx.pyx":172 */
+ Py_INCREF(__pyx_v_parent);
+ }
+ __pyx_L5:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_knit_load_data_pyx.pyx":173 */
+ __pyx_2 = PyList_Append(__pyx_v_parents,__pyx_v_parent); if (__pyx_2 == (-1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 173; goto __pyx_L1;}
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_knit_load_data_pyx.pyx":174 */
+ __pyx_v_parent_str = (__pyx_v_next + 1);
+ }
+ __pyx_L3:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_knit_load_data_pyx.pyx":175 */
+ __pyx_3 = PyTuple_New(1); if (!__pyx_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 175; goto __pyx_L1;}
+ Py_INCREF(__pyx_v_parents);
+ PyTuple_SET_ITEM(__pyx_3, 0, __pyx_v_parents);
+ __pyx_4 = PyObject_CallObject(((PyObject *)(&PyTuple_Type)), __pyx_3); if (!__pyx_4) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 175; goto __pyx_L1;}
+ Py_DECREF(__pyx_3); __pyx_3 = 0;
+ __pyx_r = __pyx_4;
+ __pyx_4 = 0;
+ goto __pyx_L0;
+
+ __pyx_r = Py_None; Py_INCREF(Py_None);
+ goto __pyx_L0;
+ __pyx_L1:;
+ Py_XDECREF(__pyx_1);
+ Py_XDECREF(__pyx_3);
+ Py_XDECREF(__pyx_4);
+ __Pyx_AddTraceback("bzrlib._knit_load_data_pyx.KnitIndexReader.process_parents");
+ __pyx_r = 0;
+ __pyx_L0:;
+ Py_DECREF(__pyx_v_parents);
+ Py_DECREF(__pyx_v_parent);
+ Py_DECREF(__pyx_v_self);
+ return __pyx_r;
+}
+
+static int __pyx_f_6bzrlib_19_knit_load_data_pyx_15KnitIndexReader_process_one_record(struct __pyx_obj_6bzrlib_19_knit_load_data_pyx_KnitIndexReader *__pyx_v_self,char *__pyx_v_start,char *__pyx_v_end) {
+ char *__pyx_v_version_id_str;
+ int __pyx_v_version_id_size;
+ char *__pyx_v_option_str;
+ char *__pyx_v_option_end;
+ char *__pyx_v_pos_str;
+ int __pyx_v_pos;
+ char *__pyx_v_size_str;
+ int __pyx_v_size;
+ char *__pyx_v_parent_str;
+ void *__pyx_v_cache_entry;
+ PyObject *__pyx_v_version_id;
+ PyObject *__pyx_v_options;
+ PyObject *__pyx_v_parents;
+ PyObject *__pyx_v_e;
+ PyObject *__pyx_v_py_line;
+ PyObject *__pyx_v_index;
+ int __pyx_r;
+ int __pyx_1;
+ PyObject *__pyx_2 = 0;
+ PyObject *__pyx_3 = 0;
+ PyObject *__pyx_4 = 0;
+ PyObject *__pyx_5 = 0;
+ PyObject *__pyx_6 = 0;
+ PyObject *__pyx_7 = 0;
+ PyObject *__pyx_8 = 0;
+ Py_INCREF(__pyx_v_self);
+ __pyx_v_version_id = Py_None; Py_INCREF(Py_None);
+ __pyx_v_options = Py_None; Py_INCREF(Py_None);
+ __pyx_v_parents = Py_None; Py_INCREF(Py_None);
+ __pyx_v_e = Py_None; Py_INCREF(Py_None);
+ __pyx_v_py_line = Py_None; Py_INCREF(Py_None);
+ __pyx_v_index = Py_None; Py_INCREF(Py_None);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_knit_load_data_pyx.pyx":191 */
+ __pyx_v_version_id_str = __pyx_v_start;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_knit_load_data_pyx.pyx":192 */
+ __pyx_v_option_str = ((char *)memchr(__pyx_v_version_id_str,' ',(__pyx_v_end - __pyx_v_version_id_str)));
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_knit_load_data_pyx.pyx":193 */
+ __pyx_1 = (__pyx_v_option_str == NULL);
+ if (!__pyx_1) {
+ __pyx_1 = (__pyx_v_option_str >= __pyx_v_end);
+ }
+ if (__pyx_1) {
+ __pyx_r = 0;
+ goto __pyx_L0;
+ goto __pyx_L2;
+ }
+ __pyx_L2:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_knit_load_data_pyx.pyx":196 */
+ __pyx_v_version_id_size = (__pyx_v_option_str - __pyx_v_version_id_str);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_knit_load_data_pyx.pyx":198 */
+ __pyx_v_option_str = (__pyx_v_option_str + 1);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_knit_load_data_pyx.pyx":200 */
+ __pyx_v_pos_str = ((char *)memchr(__pyx_v_option_str,' ',(__pyx_v_end - __pyx_v_option_str)));
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_knit_load_data_pyx.pyx":201 */
+ __pyx_1 = (__pyx_v_pos_str == NULL);
+ if (!__pyx_1) {
+ __pyx_1 = (__pyx_v_pos_str >= __pyx_v_end);
+ }
+ if (__pyx_1) {
+ __pyx_r = 0;
+ goto __pyx_L0;
+ goto __pyx_L3;
+ }
+ __pyx_L3:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_knit_load_data_pyx.pyx":204 */
+ __pyx_v_option_end = __pyx_v_pos_str;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_knit_load_data_pyx.pyx":205 */
+ __pyx_v_pos_str = (__pyx_v_pos_str + 1);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_knit_load_data_pyx.pyx":207 */
+ __pyx_v_size_str = ((char *)memchr(__pyx_v_pos_str,' ',(__pyx_v_end - __pyx_v_pos_str)));
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_knit_load_data_pyx.pyx":208 */
+ __pyx_1 = (__pyx_v_size_str == NULL);
+ if (!__pyx_1) {
+ __pyx_1 = (__pyx_v_size_str >= __pyx_v_end);
+ }
+ if (__pyx_1) {
+ __pyx_r = 0;
+ goto __pyx_L0;
+ goto __pyx_L4;
+ }
+ __pyx_L4:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_knit_load_data_pyx.pyx":211 */
+ __pyx_v_size_str = (__pyx_v_size_str + 1);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_knit_load_data_pyx.pyx":213 */
+ __pyx_v_parent_str = ((char *)memchr(__pyx_v_size_str,' ',(__pyx_v_end - __pyx_v_size_str)));
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_knit_load_data_pyx.pyx":214 */
+ __pyx_1 = (__pyx_v_parent_str == NULL);
+ if (!__pyx_1) {
+ __pyx_1 = (__pyx_v_parent_str >= __pyx_v_end);
+ }
+ if (__pyx_1) {
+ __pyx_r = 0;
+ goto __pyx_L0;
+ goto __pyx_L5;
+ }
+ __pyx_L5:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_knit_load_data_pyx.pyx":217 */
+ __pyx_v_parent_str = (__pyx_v_parent_str + 1);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_knit_load_data_pyx.pyx":219 */
+ __pyx_2 = PyString_FromStringAndSize(__pyx_v_version_id_str,__pyx_v_version_id_size); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 219; goto __pyx_L1;}
+ Py_DECREF(__pyx_v_version_id);
+ __pyx_v_version_id = __pyx_2;
+ __pyx_2 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_knit_load_data_pyx.pyx":221 */
+ __pyx_2 = ((struct __pyx_vtabstruct_6bzrlib_19_knit_load_data_pyx_KnitIndexReader *)__pyx_v_self->__pyx_vtab)->process_options(__pyx_v_self,__pyx_v_option_str,__pyx_v_option_end); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 221; goto __pyx_L1;}
+ Py_DECREF(__pyx_v_options);
+ __pyx_v_options = __pyx_2;
+ __pyx_2 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_knit_load_data_pyx.pyx":223 */
+ /*try:*/ {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_knit_load_data_pyx.pyx":224 */
+ __pyx_1 = __pyx_f_6bzrlib_19_knit_load_data_pyx_string_to_int_safe(__pyx_v_pos_str,(__pyx_v_size_str - 1),(&__pyx_v_pos)); if (__pyx_1 == (-1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 224; goto __pyx_L6;}
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_knit_load_data_pyx.pyx":225 */
+ __pyx_1 = __pyx_f_6bzrlib_19_knit_load_data_pyx_string_to_int_safe(__pyx_v_size_str,(__pyx_v_parent_str - 1),(&__pyx_v_size)); if (__pyx_1 == (-1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 225; goto __pyx_L6;}
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_knit_load_data_pyx.pyx":226 */
+ __pyx_2 = ((struct __pyx_vtabstruct_6bzrlib_19_knit_load_data_pyx_KnitIndexReader *)__pyx_v_self->__pyx_vtab)->process_parents(__pyx_v_self,__pyx_v_parent_str,__pyx_v_end); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 226; goto __pyx_L6;}
+ Py_DECREF(__pyx_v_parents);
+ __pyx_v_parents = __pyx_2;
+ __pyx_2 = 0;
+ }
+ goto __pyx_L7;
+ __pyx_L6:;
+ Py_XDECREF(__pyx_2); __pyx_2 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_knit_load_data_pyx.pyx":227 */
+ __pyx_2 = PyTuple_New(2); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 227; goto __pyx_L1;}
+ Py_INCREF(PyExc_ValueError);
+ PyTuple_SET_ITEM(__pyx_2, 0, PyExc_ValueError);
+ Py_INCREF(PyExc_IndexError);
+ PyTuple_SET_ITEM(__pyx_2, 1, PyExc_IndexError);
+ __pyx_1 = PyErr_ExceptionMatches(__pyx_2);
+ Py_DECREF(__pyx_2); __pyx_2 = 0;
+ if (__pyx_1) {
+ __Pyx_AddTraceback("bzrlib._knit_load_data_pyx.process_one_record");
+ if (__Pyx_GetException(&__pyx_2, &__pyx_3, &__pyx_4) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 227; goto __pyx_L1;}
+ Py_INCREF(__pyx_3);
+ Py_DECREF(__pyx_v_e);
+ __pyx_v_e = __pyx_3;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_knit_load_data_pyx.pyx":228 */
+ __pyx_5 = PyString_FromStringAndSize(__pyx_v_start,(__pyx_v_end - __pyx_v_start)); if (!__pyx_5) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 228; goto __pyx_L1;}
+ Py_DECREF(__pyx_v_py_line);
+ __pyx_v_py_line = __pyx_5;
+ __pyx_5 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_knit_load_data_pyx.pyx":229 */
+ __pyx_5 = __Pyx_GetName(__pyx_m, __pyx_n_errors); if (!__pyx_5) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 229; goto __pyx_L1;}
+ __pyx_6 = PyObject_GetAttr(__pyx_5, __pyx_n_KnitCorrupt); if (!__pyx_6) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 229; goto __pyx_L1;}
+ Py_DECREF(__pyx_5); __pyx_5 = 0;
+ __pyx_5 = PyObject_GetAttr(__pyx_v_self->kndx, __pyx_n__filename); if (!__pyx_5) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 229; goto __pyx_L1;}
+ __pyx_7 = PyTuple_New(2); if (!__pyx_7) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 230; goto __pyx_L1;}
+ Py_INCREF(__pyx_v_py_line);
+ PyTuple_SET_ITEM(__pyx_7, 0, __pyx_v_py_line);
+ Py_INCREF(__pyx_v_e);
+ PyTuple_SET_ITEM(__pyx_7, 1, __pyx_v_e);
+ __pyx_8 = PyNumber_Remainder(__pyx_k10p, __pyx_7); if (!__pyx_8) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 230; goto __pyx_L1;}
+ Py_DECREF(__pyx_7); __pyx_7 = 0;
+ __pyx_7 = PyTuple_New(2); if (!__pyx_7) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 229; goto __pyx_L1;}
+ PyTuple_SET_ITEM(__pyx_7, 0, __pyx_5);
+ PyTuple_SET_ITEM(__pyx_7, 1, __pyx_8);
+ __pyx_5 = 0;
+ __pyx_8 = 0;
+ __pyx_5 = PyObject_CallObject(__pyx_6, __pyx_7); if (!__pyx_5) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 229; goto __pyx_L1;}
+ Py_DECREF(__pyx_6); __pyx_6 = 0;
+ Py_DECREF(__pyx_7); __pyx_7 = 0;
+ __Pyx_Raise(__pyx_5, 0, 0);
+ Py_DECREF(__pyx_5); __pyx_5 = 0;
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 229; goto __pyx_L1;}
+ Py_DECREF(__pyx_2); __pyx_2 = 0;
+ Py_DECREF(__pyx_3); __pyx_3 = 0;
+ Py_DECREF(__pyx_4); __pyx_4 = 0;
+ goto __pyx_L7;
+ }
+ goto __pyx_L1;
+ __pyx_L7:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_knit_load_data_pyx.pyx":232 */
+ __pyx_v_cache_entry = PyDict_GetItem(__pyx_v_self->cache,__pyx_v_version_id);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_knit_load_data_pyx.pyx":233 */
+ __pyx_1 = (__pyx_v_cache_entry == NULL);
+ if (__pyx_1) {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_knit_load_data_pyx.pyx":234 */
+ __pyx_1 = PyList_Append(__pyx_v_self->history,__pyx_v_version_id); if (__pyx_1 == (-1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 234; goto __pyx_L1;}
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_knit_load_data_pyx.pyx":235 */
+ __pyx_8 = PyInt_FromLong(__pyx_v_self->history_len); if (!__pyx_8) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 235; goto __pyx_L1;}
+ Py_DECREF(__pyx_v_index);
+ __pyx_v_index = __pyx_8;
+ __pyx_8 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_knit_load_data_pyx.pyx":236 */
+ __pyx_v_self->history_len = (__pyx_v_self->history_len + 1);
+ goto __pyx_L8;
+ }
+ /*else*/ {
+ Py_INCREF(((PyObject *)PyTuple_GET_ITEM(__pyx_v_cache_entry,5)));
+ Py_DECREF(__pyx_v_index);
+ __pyx_v_index = ((PyObject *)PyTuple_GET_ITEM(__pyx_v_cache_entry,5));
+ }
+ __pyx_L8:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_knit_load_data_pyx.pyx":242 */
+ __pyx_6 = PyInt_FromLong(__pyx_v_pos); if (!__pyx_6) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 245; goto __pyx_L1;}
+ __pyx_7 = PyInt_FromLong(__pyx_v_size); if (!__pyx_7) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 246; goto __pyx_L1;}
+ __pyx_5 = PyTuple_New(6); if (!__pyx_5) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 243; goto __pyx_L1;}
+ Py_INCREF(__pyx_v_version_id);
+ PyTuple_SET_ITEM(__pyx_5, 0, __pyx_v_version_id);
+ Py_INCREF(__pyx_v_options);
+ PyTuple_SET_ITEM(__pyx_5, 1, __pyx_v_options);
+ PyTuple_SET_ITEM(__pyx_5, 2, __pyx_6);
+ PyTuple_SET_ITEM(__pyx_5, 3, __pyx_7);
+ Py_INCREF(__pyx_v_parents);
+ PyTuple_SET_ITEM(__pyx_5, 4, __pyx_v_parents);
+ Py_INCREF(__pyx_v_index);
+ PyTuple_SET_ITEM(__pyx_5, 5, __pyx_v_index);
+ __pyx_6 = 0;
+ __pyx_7 = 0;
+ __pyx_1 = PyDict_SetItem(__pyx_v_self->cache,__pyx_v_version_id,__pyx_5); if (__pyx_1 == (-1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 242; goto __pyx_L1;}
+ Py_DECREF(__pyx_5); __pyx_5 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_knit_load_data_pyx.pyx":250 */
+ __pyx_r = 1;
+ goto __pyx_L0;
+
+ __pyx_r = 0;
+ goto __pyx_L0;
+ __pyx_L1:;
+ Py_XDECREF(__pyx_2);
+ Py_XDECREF(__pyx_3);
+ Py_XDECREF(__pyx_4);
+ Py_XDECREF(__pyx_5);
+ Py_XDECREF(__pyx_6);
+ Py_XDECREF(__pyx_7);
+ Py_XDECREF(__pyx_8);
+ __Pyx_AddTraceback("bzrlib._knit_load_data_pyx.KnitIndexReader.process_one_record");
+ __pyx_r = (-1);
+ __pyx_L0:;
+ Py_DECREF(__pyx_v_version_id);
+ Py_DECREF(__pyx_v_options);
+ Py_DECREF(__pyx_v_parents);
+ Py_DECREF(__pyx_v_e);
+ Py_DECREF(__pyx_v_py_line);
+ Py_DECREF(__pyx_v_index);
+ Py_DECREF(__pyx_v_self);
+ return __pyx_r;
+}
+
+static int __pyx_f_6bzrlib_19_knit_load_data_pyx_15KnitIndexReader_process_next_record(struct __pyx_obj_6bzrlib_19_knit_load_data_pyx_KnitIndexReader *__pyx_v_self) {
+ char *__pyx_v_last;
+ char *__pyx_v_start;
+ int __pyx_r;
+ int __pyx_1;
+ Py_INCREF(__pyx_v_self);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_knit_load_data_pyx.pyx":257 */
+ __pyx_v_start = __pyx_v_self->cur_str;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_knit_load_data_pyx.pyx":259 */
+ __pyx_v_last = ((char *)memchr(__pyx_v_start,'\n',(__pyx_v_self->end_str - __pyx_v_start)));
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_knit_load_data_pyx.pyx":260 */
+ __pyx_1 = (__pyx_v_last == NULL);
+ if (__pyx_1) {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_knit_load_data_pyx.pyx":262 */
+ __pyx_v_last = (__pyx_v_self->end_str - 1);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_knit_load_data_pyx.pyx":263 */
+ __pyx_v_self->cur_str = __pyx_v_self->end_str;
+ goto __pyx_L2;
+ }
+ /*else*/ {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_knit_load_data_pyx.pyx":267 */
+ __pyx_v_self->cur_str = (__pyx_v_last + 1);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_knit_load_data_pyx.pyx":268 */
+ __pyx_v_last = (__pyx_v_last - 1);
+ }
+ __pyx_L2:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_knit_load_data_pyx.pyx":270 */
+ __pyx_1 = (__pyx_v_last <= __pyx_v_start);
+ if (!__pyx_1) {
+ __pyx_1 = ((__pyx_v_last[0]) != ':');
+ }
+ if (__pyx_1) {
+ __pyx_r = 0;
+ goto __pyx_L0;
+ goto __pyx_L3;
+ }
+ __pyx_L3:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_knit_load_data_pyx.pyx":274 */
+ __pyx_1 = ((struct __pyx_vtabstruct_6bzrlib_19_knit_load_data_pyx_KnitIndexReader *)__pyx_v_self->__pyx_vtab)->process_one_record(__pyx_v_self,__pyx_v_start,__pyx_v_last); if (__pyx_1 == (-1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 274; goto __pyx_L1;}
+ __pyx_r = __pyx_1;
+ goto __pyx_L0;
+
+ __pyx_r = 0;
+ goto __pyx_L0;
+ __pyx_L1:;
+ __Pyx_AddTraceback("bzrlib._knit_load_data_pyx.KnitIndexReader.process_next_record");
+ __pyx_r = (-1);
+ __pyx_L0:;
+ Py_DECREF(__pyx_v_self);
+ return __pyx_r;
+}
+
+static PyObject *__pyx_f_6bzrlib_19_knit_load_data_pyx_15KnitIndexReader_read(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
+static PyObject *__pyx_f_6bzrlib_19_knit_load_data_pyx_15KnitIndexReader_read(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
+ int __pyx_v_text_size;
+ PyObject *__pyx_v_text;
+ PyObject *__pyx_r;
+ int __pyx_1;
+ PyObject *__pyx_2 = 0;
+ PyObject *__pyx_3 = 0;
+ PyObject *__pyx_4 = 0;
+ static char *__pyx_argnames[] = {0};
+ if (!PyArg_ParseTupleAndKeywords(__pyx_args, __pyx_kwds, "", __pyx_argnames)) return 0;
+ Py_INCREF(__pyx_v_self);
+ __pyx_v_text = Py_None; Py_INCREF(Py_None);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_knit_load_data_pyx.pyx":279 */
+ __pyx_1 = ((struct __pyx_vtabstruct_6bzrlib_19_knit_load_data_pyx_KnitIndexReader *)((struct __pyx_obj_6bzrlib_19_knit_load_data_pyx_KnitIndexReader *)__pyx_v_self)->__pyx_vtab)->validate(((struct __pyx_obj_6bzrlib_19_knit_load_data_pyx_KnitIndexReader *)__pyx_v_self)); if (__pyx_1 == (-1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 279; goto __pyx_L1;}
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_knit_load_data_pyx.pyx":281 */
+ __pyx_2 = PyObject_GetAttr(((struct __pyx_obj_6bzrlib_19_knit_load_data_pyx_KnitIndexReader *)__pyx_v_self)->kndx, __pyx_n_check_header); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 281; goto __pyx_L1;}
+ __pyx_3 = PyTuple_New(1); if (!__pyx_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 281; goto __pyx_L1;}
+ Py_INCREF(((struct __pyx_obj_6bzrlib_19_knit_load_data_pyx_KnitIndexReader *)__pyx_v_self)->fp);
+ PyTuple_SET_ITEM(__pyx_3, 0, ((struct __pyx_obj_6bzrlib_19_knit_load_data_pyx_KnitIndexReader *)__pyx_v_self)->fp);
+ __pyx_4 = PyObject_CallObject(__pyx_2, __pyx_3); if (!__pyx_4) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 281; goto __pyx_L1;}
+ Py_DECREF(__pyx_2); __pyx_2 = 0;
+ Py_DECREF(__pyx_3); __pyx_3 = 0;
+ Py_DECREF(__pyx_4); __pyx_4 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_knit_load_data_pyx.pyx":291 */
+ __pyx_2 = PyObject_GetAttr(((struct __pyx_obj_6bzrlib_19_knit_load_data_pyx_KnitIndexReader *)__pyx_v_self)->fp, __pyx_n_read); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 291; goto __pyx_L1;}
+ __pyx_3 = PyObject_CallObject(__pyx_2, 0); if (!__pyx_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 291; goto __pyx_L1;}
+ Py_DECREF(__pyx_2); __pyx_2 = 0;
+ Py_DECREF(__pyx_v_text);
+ __pyx_v_text = __pyx_3;
+ __pyx_3 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_knit_load_data_pyx.pyx":292 */
+ __pyx_v_text_size = PyString_Size(__pyx_v_text);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_knit_load_data_pyx.pyx":293 */
+ ((struct __pyx_obj_6bzrlib_19_knit_load_data_pyx_KnitIndexReader *)__pyx_v_self)->cur_str = PyString_AsString(__pyx_v_text);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_knit_load_data_pyx.pyx":295 */
+ ((struct __pyx_obj_6bzrlib_19_knit_load_data_pyx_KnitIndexReader *)__pyx_v_self)->end_str = (((struct __pyx_obj_6bzrlib_19_knit_load_data_pyx_KnitIndexReader *)__pyx_v_self)->cur_str + __pyx_v_text_size);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_knit_load_data_pyx.pyx":297 */
+ while (1) {
+ __pyx_1 = (((struct __pyx_obj_6bzrlib_19_knit_load_data_pyx_KnitIndexReader *)__pyx_v_self)->cur_str < ((struct __pyx_obj_6bzrlib_19_knit_load_data_pyx_KnitIndexReader *)__pyx_v_self)->end_str);
+ if (!__pyx_1) break;
+ __pyx_1 = ((struct __pyx_vtabstruct_6bzrlib_19_knit_load_data_pyx_KnitIndexReader *)((struct __pyx_obj_6bzrlib_19_knit_load_data_pyx_KnitIndexReader *)__pyx_v_self)->__pyx_vtab)->process_next_record(((struct __pyx_obj_6bzrlib_19_knit_load_data_pyx_KnitIndexReader *)__pyx_v_self)); if (__pyx_1 == (-1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 298; goto __pyx_L1;}
+ }
+
+ __pyx_r = Py_None; Py_INCREF(Py_None);
+ goto __pyx_L0;
+ __pyx_L1:;
+ Py_XDECREF(__pyx_2);
+ Py_XDECREF(__pyx_3);
+ Py_XDECREF(__pyx_4);
+ __Pyx_AddTraceback("bzrlib._knit_load_data_pyx.KnitIndexReader.read");
+ __pyx_r = 0;
+ __pyx_L0:;
+ Py_DECREF(__pyx_v_text);
+ Py_DECREF(__pyx_v_self);
+ return __pyx_r;
+}
+
+static PyObject *__pyx_f_6bzrlib_19_knit_load_data_pyx__load_data_c(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
+static char __pyx_doc_6bzrlib_19_knit_load_data_pyx__load_data_c[] = "Load the knit index file into memory.";
+static PyObject *__pyx_f_6bzrlib_19_knit_load_data_pyx__load_data_c(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
+ PyObject *__pyx_v_kndx = 0;
+ PyObject *__pyx_v_fp = 0;
+ PyObject *__pyx_v_reader;
+ PyObject *__pyx_r;
+ PyObject *__pyx_1 = 0;
+ PyObject *__pyx_2 = 0;
+ static char *__pyx_argnames[] = {"kndx","fp",0};
+ if (!PyArg_ParseTupleAndKeywords(__pyx_args, __pyx_kwds, "OO", __pyx_argnames, &__pyx_v_kndx, &__pyx_v_fp)) return 0;
+ Py_INCREF(__pyx_v_kndx);
+ Py_INCREF(__pyx_v_fp);
+ __pyx_v_reader = Py_None; Py_INCREF(Py_None);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_knit_load_data_pyx.pyx":303 */
+ __pyx_1 = PyTuple_New(2); if (!__pyx_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 303; goto __pyx_L1;}
+ Py_INCREF(__pyx_v_kndx);
+ PyTuple_SET_ITEM(__pyx_1, 0, __pyx_v_kndx);
+ Py_INCREF(__pyx_v_fp);
+ PyTuple_SET_ITEM(__pyx_1, 1, __pyx_v_fp);
+ __pyx_2 = PyObject_CallObject(((PyObject *)__pyx_ptype_6bzrlib_19_knit_load_data_pyx_KnitIndexReader), __pyx_1); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 303; goto __pyx_L1;}
+ Py_DECREF(__pyx_1); __pyx_1 = 0;
+ Py_DECREF(__pyx_v_reader);
+ __pyx_v_reader = __pyx_2;
+ __pyx_2 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_knit_load_data_pyx.pyx":304 */
+ __pyx_1 = PyObject_GetAttr(__pyx_v_reader, __pyx_n_read); if (!__pyx_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 304; goto __pyx_L1;}
+ __pyx_2 = PyObject_CallObject(__pyx_1, 0); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 304; goto __pyx_L1;}
+ Py_DECREF(__pyx_1); __pyx_1 = 0;
+ Py_DECREF(__pyx_2); __pyx_2 = 0;
+
+ __pyx_r = Py_None; Py_INCREF(Py_None);
+ goto __pyx_L0;
+ __pyx_L1:;
+ Py_XDECREF(__pyx_1);
+ Py_XDECREF(__pyx_2);
+ __Pyx_AddTraceback("bzrlib._knit_load_data_pyx._load_data_c");
+ __pyx_r = 0;
+ __pyx_L0:;
+ Py_DECREF(__pyx_v_reader);
+ Py_DECREF(__pyx_v_kndx);
+ Py_DECREF(__pyx_v_fp);
+ return __pyx_r;
+}
+static struct __pyx_vtabstruct_6bzrlib_19_knit_load_data_pyx_KnitIndexReader __pyx_vtable_6bzrlib_19_knit_load_data_pyx_KnitIndexReader;
+
+static PyObject *__pyx_tp_new_6bzrlib_19_knit_load_data_pyx_KnitIndexReader(PyTypeObject *t, PyObject *a, PyObject *k) {
+ struct __pyx_obj_6bzrlib_19_knit_load_data_pyx_KnitIndexReader *p;
+ PyObject *o = (*t->tp_alloc)(t, 0);
+ if (!o) return 0;
+ p = ((struct __pyx_obj_6bzrlib_19_knit_load_data_pyx_KnitIndexReader *)o);
+ *(struct __pyx_vtabstruct_6bzrlib_19_knit_load_data_pyx_KnitIndexReader **)&p->__pyx_vtab = __pyx_vtabptr_6bzrlib_19_knit_load_data_pyx_KnitIndexReader;
+ p->kndx = Py_None; Py_INCREF(Py_None);
+ p->fp = Py_None; Py_INCREF(Py_None);
+ p->cache = Py_None; Py_INCREF(Py_None);
+ p->history = Py_None; Py_INCREF(Py_None);
+ return o;
+}
+
+static void __pyx_tp_dealloc_6bzrlib_19_knit_load_data_pyx_KnitIndexReader(PyObject *o) {
+ struct __pyx_obj_6bzrlib_19_knit_load_data_pyx_KnitIndexReader *p = (struct __pyx_obj_6bzrlib_19_knit_load_data_pyx_KnitIndexReader *)o;
+ Py_XDECREF(p->kndx);
+ Py_XDECREF(p->fp);
+ Py_XDECREF(p->cache);
+ Py_XDECREF(p->history);
+ (*o->ob_type->tp_free)(o);
+}
+
+static int __pyx_tp_traverse_6bzrlib_19_knit_load_data_pyx_KnitIndexReader(PyObject *o, visitproc v, void *a) {
+ int e;
+ struct __pyx_obj_6bzrlib_19_knit_load_data_pyx_KnitIndexReader *p = (struct __pyx_obj_6bzrlib_19_knit_load_data_pyx_KnitIndexReader *)o;
+ if (p->kndx) {
+ e = (*v)(p->kndx, a); if (e) return e;
+ }
+ if (p->fp) {
+ e = (*v)(p->fp, a); if (e) return e;
+ }
+ if (p->cache) {
+ e = (*v)(p->cache, a); if (e) return e;
+ }
+ if (p->history) {
+ e = (*v)(p->history, a); if (e) return e;
+ }
+ return 0;
+}
+
+static int __pyx_tp_clear_6bzrlib_19_knit_load_data_pyx_KnitIndexReader(PyObject *o) {
+ struct __pyx_obj_6bzrlib_19_knit_load_data_pyx_KnitIndexReader *p = (struct __pyx_obj_6bzrlib_19_knit_load_data_pyx_KnitIndexReader *)o;
+ PyObject *t;
+ t = p->kndx;
+ p->kndx = Py_None; Py_INCREF(Py_None);
+ Py_XDECREF(t);
+ t = p->fp;
+ p->fp = Py_None; Py_INCREF(Py_None);
+ Py_XDECREF(t);
+ t = p->cache;
+ p->cache = Py_None; Py_INCREF(Py_None);
+ Py_XDECREF(t);
+ t = p->history;
+ p->history = Py_None; Py_INCREF(Py_None);
+ Py_XDECREF(t);
+ return 0;
+}
+
+static struct PyMethodDef __pyx_methods_6bzrlib_19_knit_load_data_pyx_KnitIndexReader[] = {
+ {"read", (PyCFunction)__pyx_f_6bzrlib_19_knit_load_data_pyx_15KnitIndexReader_read, METH_VARARGS|METH_KEYWORDS, 0},
+ {0, 0, 0, 0}
+};
+
+static PyNumberMethods __pyx_tp_as_number_KnitIndexReader = {
+ 0, /*nb_add*/
+ 0, /*nb_subtract*/
+ 0, /*nb_multiply*/
+ 0, /*nb_divide*/
+ 0, /*nb_remainder*/
+ 0, /*nb_divmod*/
+ 0, /*nb_power*/
+ 0, /*nb_negative*/
+ 0, /*nb_positive*/
+ 0, /*nb_absolute*/
+ 0, /*nb_nonzero*/
+ 0, /*nb_invert*/
+ 0, /*nb_lshift*/
+ 0, /*nb_rshift*/
+ 0, /*nb_and*/
+ 0, /*nb_xor*/
+ 0, /*nb_or*/
+ 0, /*nb_coerce*/
+ 0, /*nb_int*/
+ 0, /*nb_long*/
+ 0, /*nb_float*/
+ 0, /*nb_oct*/
+ 0, /*nb_hex*/
+ 0, /*nb_inplace_add*/
+ 0, /*nb_inplace_subtract*/
+ 0, /*nb_inplace_multiply*/
+ 0, /*nb_inplace_divide*/
+ 0, /*nb_inplace_remainder*/
+ 0, /*nb_inplace_power*/
+ 0, /*nb_inplace_lshift*/
+ 0, /*nb_inplace_rshift*/
+ 0, /*nb_inplace_and*/
+ 0, /*nb_inplace_xor*/
+ 0, /*nb_inplace_or*/
+ 0, /*nb_floor_divide*/
+ 0, /*nb_true_divide*/
+ 0, /*nb_inplace_floor_divide*/
+ 0, /*nb_inplace_true_divide*/
+ #if Py_TPFLAGS_DEFAULT & Py_TPFLAGS_HAVE_INDEX
+ 0, /*nb_index*/
+ #endif
+};
+
+static PySequenceMethods __pyx_tp_as_sequence_KnitIndexReader = {
+ 0, /*sq_length*/
+ 0, /*sq_concat*/
+ 0, /*sq_repeat*/
+ 0, /*sq_item*/
+ 0, /*sq_slice*/
+ 0, /*sq_ass_item*/
+ 0, /*sq_ass_slice*/
+ 0, /*sq_contains*/
+ 0, /*sq_inplace_concat*/
+ 0, /*sq_inplace_repeat*/
+};
+
+static PyMappingMethods __pyx_tp_as_mapping_KnitIndexReader = {
+ 0, /*mp_length*/
+ 0, /*mp_subscript*/
+ 0, /*mp_ass_subscript*/
+};
+
+static PyBufferProcs __pyx_tp_as_buffer_KnitIndexReader = {
+ 0, /*bf_getreadbuffer*/
+ 0, /*bf_getwritebuffer*/
+ 0, /*bf_getsegcount*/
+ 0, /*bf_getcharbuffer*/
+};
+
+PyTypeObject __pyx_type_6bzrlib_19_knit_load_data_pyx_KnitIndexReader = {
+ PyObject_HEAD_INIT(0)
+ 0, /*ob_size*/
+ "bzrlib._knit_load_data_pyx.KnitIndexReader", /*tp_name*/
+ sizeof(struct __pyx_obj_6bzrlib_19_knit_load_data_pyx_KnitIndexReader), /*tp_basicsize*/
+ 0, /*tp_itemsize*/
+ __pyx_tp_dealloc_6bzrlib_19_knit_load_data_pyx_KnitIndexReader, /*tp_dealloc*/
+ 0, /*tp_print*/
+ 0, /*tp_getattr*/
+ 0, /*tp_setattr*/
+ 0, /*tp_compare*/
+ 0, /*tp_repr*/
+ &__pyx_tp_as_number_KnitIndexReader, /*tp_as_number*/
+ &__pyx_tp_as_sequence_KnitIndexReader, /*tp_as_sequence*/
+ &__pyx_tp_as_mapping_KnitIndexReader, /*tp_as_mapping*/
+ 0, /*tp_hash*/
+ 0, /*tp_call*/
+ 0, /*tp_str*/
+ 0, /*tp_getattro*/
+ 0, /*tp_setattro*/
+ &__pyx_tp_as_buffer_KnitIndexReader, /*tp_as_buffer*/
+ Py_TPFLAGS_DEFAULT|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC, /*tp_flags*/
+ 0, /*tp_doc*/
+ __pyx_tp_traverse_6bzrlib_19_knit_load_data_pyx_KnitIndexReader, /*tp_traverse*/
+ __pyx_tp_clear_6bzrlib_19_knit_load_data_pyx_KnitIndexReader, /*tp_clear*/
+ 0, /*tp_richcompare*/
+ 0, /*tp_weaklistoffset*/
+ 0, /*tp_iter*/
+ 0, /*tp_iternext*/
+ __pyx_methods_6bzrlib_19_knit_load_data_pyx_KnitIndexReader, /*tp_methods*/
+ 0, /*tp_members*/
+ 0, /*tp_getset*/
+ 0, /*tp_base*/
+ 0, /*tp_dict*/
+ 0, /*tp_descr_get*/
+ 0, /*tp_descr_set*/
+ 0, /*tp_dictoffset*/
+ __pyx_f_6bzrlib_19_knit_load_data_pyx_15KnitIndexReader___init__, /*tp_init*/
+ 0, /*tp_alloc*/
+ __pyx_tp_new_6bzrlib_19_knit_load_data_pyx_KnitIndexReader, /*tp_new*/
+ 0, /*tp_free*/
+ 0, /*tp_is_gc*/
+ 0, /*tp_bases*/
+ 0, /*tp_mro*/
+ 0, /*tp_cache*/
+ 0, /*tp_subclasses*/
+ 0, /*tp_weaklist*/
+};
+
+static struct PyMethodDef __pyx_methods[] = {
+ {"_load_data_c", (PyCFunction)__pyx_f_6bzrlib_19_knit_load_data_pyx__load_data_c, METH_VARARGS|METH_KEYWORDS, __pyx_doc_6bzrlib_19_knit_load_data_pyx__load_data_c},
+ {0, 0, 0, 0}
+};
+
+static void __pyx_init_filenames(void); /*proto*/
+
+PyMODINIT_FUNC init_knit_load_data_pyx(void); /*proto*/
+PyMODINIT_FUNC init_knit_load_data_pyx(void) {
+ PyObject *__pyx_1 = 0;
+ PyObject *__pyx_2 = 0;
+ __pyx_init_filenames();
+ __pyx_m = Py_InitModule4("_knit_load_data_pyx", __pyx_methods, __pyx_mdoc, 0, PYTHON_API_VERSION);
+ if (!__pyx_m) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 17; goto __pyx_L1;};
+ Py_INCREF(__pyx_m);
+ __pyx_b = PyImport_AddModule("__builtin__");
+ if (!__pyx_b) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 17; goto __pyx_L1;};
+ if (PyObject_SetAttrString(__pyx_m, "__builtins__", __pyx_b) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 17; goto __pyx_L1;};
+ if (__Pyx_InitStrings(__pyx_string_tab) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 17; goto __pyx_L1;};
+ __pyx_vtabptr_6bzrlib_19_knit_load_data_pyx_KnitIndexReader = &__pyx_vtable_6bzrlib_19_knit_load_data_pyx_KnitIndexReader;
+ *(void(**)(void))&__pyx_vtable_6bzrlib_19_knit_load_data_pyx_KnitIndexReader.validate = (void(*)(void))__pyx_f_6bzrlib_19_knit_load_data_pyx_15KnitIndexReader_validate;
+ *(void(**)(void))&__pyx_vtable_6bzrlib_19_knit_load_data_pyx_KnitIndexReader.process_options = (void(*)(void))__pyx_f_6bzrlib_19_knit_load_data_pyx_15KnitIndexReader_process_options;
+ *(void(**)(void))&__pyx_vtable_6bzrlib_19_knit_load_data_pyx_KnitIndexReader.process_parents = (void(*)(void))__pyx_f_6bzrlib_19_knit_load_data_pyx_15KnitIndexReader_process_parents;
+ *(void(**)(void))&__pyx_vtable_6bzrlib_19_knit_load_data_pyx_KnitIndexReader.process_one_record = (void(*)(void))__pyx_f_6bzrlib_19_knit_load_data_pyx_15KnitIndexReader_process_one_record;
+ *(void(**)(void))&__pyx_vtable_6bzrlib_19_knit_load_data_pyx_KnitIndexReader.process_next_record = (void(*)(void))__pyx_f_6bzrlib_19_knit_load_data_pyx_15KnitIndexReader_process_next_record;
+ __pyx_type_6bzrlib_19_knit_load_data_pyx_KnitIndexReader.tp_free = _PyObject_GC_Del;
+ if (PyType_Ready(&__pyx_type_6bzrlib_19_knit_load_data_pyx_KnitIndexReader) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 76; goto __pyx_L1;}
+ if (__Pyx_SetVtable(__pyx_type_6bzrlib_19_knit_load_data_pyx_KnitIndexReader.tp_dict, __pyx_vtabptr_6bzrlib_19_knit_load_data_pyx_KnitIndexReader) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 76; goto __pyx_L1;}
+ if (PyObject_SetAttrString(__pyx_m, "KnitIndexReader", (PyObject *)&__pyx_type_6bzrlib_19_knit_load_data_pyx_KnitIndexReader) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 76; goto __pyx_L1;}
+ __pyx_ptype_6bzrlib_19_knit_load_data_pyx_KnitIndexReader = &__pyx_type_6bzrlib_19_knit_load_data_pyx_KnitIndexReader;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_knit_load_data_pyx.pyx":19 */
+ __pyx_1 = __Pyx_Import(__pyx_n_sys, 0); if (!__pyx_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 19; goto __pyx_L1;}
+ if (PyObject_SetAttr(__pyx_m, __pyx_n_sys, __pyx_1) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 19; goto __pyx_L1;}
+ Py_DECREF(__pyx_1); __pyx_1 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_knit_load_data_pyx.pyx":21 */
+ __pyx_1 = PyList_New(1); if (!__pyx_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 21; goto __pyx_L1;}
+ Py_INCREF(__pyx_n_errors);
+ PyList_SET_ITEM(__pyx_1, 0, __pyx_n_errors);
+ __pyx_2 = __Pyx_Import(__pyx_n_bzrlib, __pyx_1); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 21; goto __pyx_L1;}
+ Py_DECREF(__pyx_1); __pyx_1 = 0;
+ __pyx_1 = PyObject_GetAttr(__pyx_2, __pyx_n_errors); if (!__pyx_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 21; goto __pyx_L1;}
+ if (PyObject_SetAttr(__pyx_m, __pyx_n_errors, __pyx_1) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 21; goto __pyx_L1;}
+ Py_DECREF(__pyx_1); __pyx_1 = 0;
+ Py_DECREF(__pyx_2); __pyx_2 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_knit_load_data_pyx.pyx":301 */
+ return;
+ __pyx_L1:;
+ Py_XDECREF(__pyx_1);
+ Py_XDECREF(__pyx_2);
+ __Pyx_AddTraceback("bzrlib._knit_load_data_pyx");
+}
+
+static char *__pyx_filenames[] = {
+ "_knit_load_data_pyx.pyx",
+};
+
+/* Runtime support code */
+
+static void __pyx_init_filenames(void) {
+ __pyx_f = __pyx_filenames;
+}
+
+static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb) {
+ Py_XINCREF(type);
+ Py_XINCREF(value);
+ Py_XINCREF(tb);
+ /* First, check the traceback argument, replacing None with NULL. */
+ if (tb == Py_None) {
+ Py_DECREF(tb);
+ tb = 0;
+ }
+ else if (tb != NULL && !PyTraceBack_Check(tb)) {
+ PyErr_SetString(PyExc_TypeError,
+ "raise: arg 3 must be a traceback or None");
+ goto raise_error;
+ }
+ /* Next, replace a missing value with None */
+ if (value == NULL) {
+ value = Py_None;
+ Py_INCREF(value);
+ }
+ #if PY_VERSION_HEX < 0x02050000
+ if (!PyClass_Check(type))
+ #else
+ if (!PyType_Check(type))
+ #endif
+ {
+ /* Raising an instance. The value should be a dummy. */
+ if (value != Py_None) {
+ PyErr_SetString(PyExc_TypeError,
+ "instance exception may not have a separate value");
+ goto raise_error;
+ }
+ /* Normalize to raise <class>, <instance> */
+ Py_DECREF(value);
+ value = type;
+ #if PY_VERSION_HEX < 0x02050000
+ if (PyInstance_Check(type)) {
+ type = (PyObject*) ((PyInstanceObject*)type)->in_class;
+ Py_INCREF(type);
+ }
+ else {
+ PyErr_SetString(PyExc_TypeError,
+ "raise: exception must be an old-style class or instance");
+ goto raise_error;
+ }
+ #else
+ type = (PyObject*) type->ob_type;
+ Py_INCREF(type);
+ if (!PyType_IsSubtype((PyTypeObject *)type, (PyTypeObject *)PyExc_BaseException)) {
+ PyErr_SetString(PyExc_TypeError,
+ "raise: exception class must be a subclass of BaseException");
+ goto raise_error;
+ }
+ #endif
+ }
+ PyErr_Restore(type, value, tb);
+ return;
+raise_error:
+ Py_XDECREF(value);
+ Py_XDECREF(type);
+ Py_XDECREF(tb);
+ return;
+}
+
+static int __Pyx_GetException(PyObject **type, PyObject **value, PyObject **tb) {
+ PyThreadState *tstate = PyThreadState_Get();
+ PyErr_Fetch(type, value, tb);
+ PyErr_NormalizeException(type, value, tb);
+ if (PyErr_Occurred())
+ goto bad;
+ Py_INCREF(*type);
+ Py_INCREF(*value);
+ Py_INCREF(*tb);
+ Py_XDECREF(tstate->exc_type);
+ Py_XDECREF(tstate->exc_value);
+ Py_XDECREF(tstate->exc_traceback);
+ tstate->exc_type = *type;
+ tstate->exc_value = *value;
+ tstate->exc_traceback = *tb;
+ return 0;
+bad:
+ Py_XDECREF(*type);
+ Py_XDECREF(*value);
+ Py_XDECREF(*tb);
+ return -1;
+}
+
+static PyObject *__Pyx_GetName(PyObject *dict, PyObject *name) {
+ PyObject *result;
+ result = PyObject_GetAttr(dict, name);
+ if (!result)
+ PyErr_SetObject(PyExc_NameError, name);
+ return result;
+}
+
+static int __Pyx_InitStrings(__Pyx_StringTabEntry *t) {
+ while (t->p) {
+ *t->p = PyString_FromStringAndSize(t->s, t->n - 1);
+ if (!*t->p)
+ return -1;
+ if (t->i)
+ PyString_InternInPlace(t->p);
+ ++t;
+ }
+ return 0;
+}
+
+static int __Pyx_SetVtable(PyObject *dict, void *vtable) {
+ PyObject *pycobj = 0;
+ int result;
+
+ pycobj = PyCObject_FromVoidPtr(vtable, 0);
+ if (!pycobj)
+ goto bad;
+ if (PyDict_SetItemString(dict, "__pyx_vtable__", pycobj) < 0)
+ goto bad;
+ result = 0;
+ goto done;
+
+bad:
+ result = -1;
+done:
+ Py_XDECREF(pycobj);
+ return result;
+}
+
+static PyObject *__Pyx_Import(PyObject *name, PyObject *from_list) {
+ PyObject *__import__ = 0;
+ PyObject *empty_list = 0;
+ PyObject *module = 0;
+ PyObject *global_dict = 0;
+ PyObject *empty_dict = 0;
+ PyObject *list;
+ __import__ = PyObject_GetAttrString(__pyx_b, "__import__");
+ if (!__import__)
+ goto bad;
+ if (from_list)
+ list = from_list;
+ else {
+ empty_list = PyList_New(0);
+ if (!empty_list)
+ goto bad;
+ list = empty_list;
+ }
+ global_dict = PyModule_GetDict(__pyx_m);
+ if (!global_dict)
+ goto bad;
+ empty_dict = PyDict_New();
+ if (!empty_dict)
+ goto bad;
+ module = PyObject_CallFunction(__import__, "OOOO",
+ name, global_dict, empty_dict, list);
+bad:
+ Py_XDECREF(empty_list);
+ Py_XDECREF(__import__);
+ Py_XDECREF(empty_dict);
+ return module;
+}
+
+#include "compile.h"
+#include "frameobject.h"
+#include "traceback.h"
+
+static void __Pyx_AddTraceback(char *funcname) {
+ PyObject *py_srcfile = 0;
+ PyObject *py_funcname = 0;
+ PyObject *py_globals = 0;
+ PyObject *empty_tuple = 0;
+ PyObject *empty_string = 0;
+ PyCodeObject *py_code = 0;
+ PyFrameObject *py_frame = 0;
+
+ py_srcfile = PyString_FromString(__pyx_filename);
+ if (!py_srcfile) goto bad;
+ py_funcname = PyString_FromString(funcname);
+ if (!py_funcname) goto bad;
+ py_globals = PyModule_GetDict(__pyx_m);
+ if (!py_globals) goto bad;
+ empty_tuple = PyTuple_New(0);
+ if (!empty_tuple) goto bad;
+ empty_string = PyString_FromString("");
+ if (!empty_string) goto bad;
+ py_code = PyCode_New(
+ 0, /*int argcount,*/
+ 0, /*int nlocals,*/
+ 0, /*int stacksize,*/
+ 0, /*int flags,*/
+ empty_string, /*PyObject *code,*/
+ empty_tuple, /*PyObject *consts,*/
+ empty_tuple, /*PyObject *names,*/
+ empty_tuple, /*PyObject *varnames,*/
+ empty_tuple, /*PyObject *freevars,*/
+ empty_tuple, /*PyObject *cellvars,*/
+ py_srcfile, /*PyObject *filename,*/
+ py_funcname, /*PyObject *name,*/
+ __pyx_lineno, /*int firstlineno,*/
+ empty_string /*PyObject *lnotab*/
+ );
+ if (!py_code) goto bad;
+ py_frame = PyFrame_New(
+ PyThreadState_Get(), /*PyThreadState *tstate,*/
+ py_code, /*PyCodeObject *code,*/
+ py_globals, /*PyObject *globals,*/
+ 0 /*PyObject *locals*/
+ );
+ if (!py_frame) goto bad;
+ py_frame->f_lineno = __pyx_lineno;
+ PyTraceBack_Here(py_frame);
+bad:
+ Py_XDECREF(py_srcfile);
+ Py_XDECREF(py_funcname);
+ Py_XDECREF(empty_tuple);
+ Py_XDECREF(empty_string);
+ Py_XDECREF(py_code);
+ Py_XDECREF(py_frame);
+}
diff --git a/bzrlib/_knit_load_data_pyx.pyx b/bzrlib/_knit_load_data_pyx.pyx
new file mode 100644
index 0000000..746366d
--- /dev/null
+++ b/bzrlib/_knit_load_data_pyx.pyx
@@ -0,0 +1,304 @@
+# Copyright (C) 2007-2010 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""Pyrex extensions to knit parsing."""
+
+import sys
+
+from bzrlib import errors
+
+
+cdef extern from "stdlib.h":
+ ctypedef unsigned size_t
+ long int strtol(char *nptr, char **endptr, int base)
+
+
+cdef extern from "Python.h":
+ int PyDict_CheckExact(object)
+ void *PyDict_GetItem_void "PyDict_GetItem" (object p, object key)
+ int PyDict_SetItem(object p, object key, object val) except -1
+
+ int PyList_Append(object lst, object item) except -1
+ object PyList_GET_ITEM(object lst, int index)
+ int PyList_CheckExact(object)
+
+ void *PyTuple_GetItem_void_void "PyTuple_GET_ITEM" (void* tpl, int index)
+
+ char *PyString_AsString(object p)
+ object PyString_FromStringAndSize(char *, int)
+ int PyString_Size(object p)
+
+ void Py_INCREF(object)
+
+
+cdef extern from "string.h":
+ void *memchr(void *s, int c, size_t n)
+
+
+cdef int string_to_int_safe(char *s, char *end, int *out) except -1:
+ """Convert a base10 string to an integer.
+
+ This makes sure the whole string is consumed, or it raises ValueError.
+ This is similar to how int(s) works, except you don't need a Python
+ String object.
+
+ :param s: The string to convert
+ :param end: The character after the integer. So if the string is '12\0',
+ this should be pointing at the '\0'. If the string was '12 ' then this
+ should point at the ' '.
+ :param out: This is the integer that will be returned
+ :return: -1 if an exception is raised. 0 otherwise
+ """
+ cdef char *integer_end
+
+ # We can't just return the integer because of how pyrex determines when
+ # there is an exception.
+ out[0] = <int>strtol(s, &integer_end, 10)
+ if integer_end != end:
+ py_s = PyString_FromStringAndSize(s, end-s)
+ raise ValueError('%r is not a valid integer' % (py_s,))
+ return 0
+
+
+cdef class KnitIndexReader:
+
+ cdef object kndx
+ cdef object fp
+
+ cdef object cache
+ cdef object history
+
+ cdef char * cur_str
+ cdef char * end_str
+
+ cdef int history_len
+
+ def __init__(self, kndx, fp):
+ self.kndx = kndx
+ self.fp = fp
+
+ self.cache = kndx._cache
+ self.history = kndx._history
+
+ self.cur_str = NULL
+ self.end_str = NULL
+ self.history_len = 0
+
+ cdef int validate(self) except -1:
+ if not PyDict_CheckExact(self.cache):
+ raise TypeError('kndx._cache must be a python dict')
+ if not PyList_CheckExact(self.history):
+ raise TypeError('kndx._history must be a python list')
+ return 0
+
+ cdef object process_options(self, char *option_str, char *end):
+ """Process the options string into a list."""
+ cdef char *next
+
+ # This is alternative code which creates a python string and splits it.
+ # It is "correct" and more obvious, but slower than the following code.
+ # It can be uncommented to switch in case the other code is seen as
+ # suspect.
+ # options = PyString_FromStringAndSize(option_str,
+ # end - option_str)
+ # return options.split(',')
+
+ final_options = []
+
+ while option_str < end:
+ next = <char*>memchr(option_str, c',', end - option_str)
+ if next == NULL:
+ next = end
+ next_option = PyString_FromStringAndSize(option_str,
+ next - option_str)
+ PyList_Append(final_options, next_option)
+
+ # Move past the ','
+ option_str = next+1
+
+ return final_options
+
+ cdef object process_parents(self, char *parent_str, char *end):
+ cdef char *next
+ cdef int int_parent
+ cdef char *parent_end
+
+ # Alternative, correct but slower code.
+ #
+ # parents = PyString_FromStringAndSize(parent_str,
+ # end - parent_str)
+ # real_parents = []
+ # for parent in parents.split():
+ # if parent[0].startswith('.'):
+ # real_parents.append(parent[1:])
+ # else:
+ # real_parents.append(self.history[int(parent)])
+ # return real_parents
+
+ parents = []
+ while parent_str <= end:
+ next = <char*>memchr(parent_str, c' ', end - parent_str)
+ if next == NULL or next >= end or next == parent_str:
+ break
+
+ if parent_str[0] == c'.':
+ # This is an explicit revision id
+ parent_str = parent_str + 1
+ parent = PyString_FromStringAndSize(parent_str,
+ next - parent_str)
+ else:
+ # This in an integer mapping to original
+ string_to_int_safe(parent_str, next, &int_parent)
+
+ if int_parent >= self.history_len:
+ raise IndexError('Parent index refers to a revision which'
+ ' does not exist yet.'
+ ' %d > %d' % (int_parent, self.history_len))
+ parent = PyList_GET_ITEM(self.history, int_parent)
+ # PyList_GET_ITEM steals a reference
+ Py_INCREF(parent)
+ PyList_Append(parents, parent)
+ parent_str = next + 1
+ return tuple(parents)
+
+ cdef int process_one_record(self, char *start, char *end) except -1:
+ """Take a simple string and split it into an index record."""
+ cdef char *version_id_str
+ cdef int version_id_size
+ cdef char *option_str
+ cdef char *option_end
+ cdef char *pos_str
+ cdef int pos
+ cdef char *size_str
+ cdef int size
+ cdef char *parent_str
+ cdef int parent_size
+ cdef void *cache_entry
+
+ version_id_str = start
+ option_str = <char*>memchr(version_id_str, c' ', end - version_id_str)
+ if option_str == NULL or option_str >= end:
+ # Short entry
+ return 0
+ version_id_size = <int>(option_str - version_id_str)
+ # Move past the space character
+ option_str = option_str + 1
+
+ pos_str = <char*>memchr(option_str, c' ', end - option_str)
+ if pos_str == NULL or pos_str >= end:
+ # Short entry
+ return 0
+ option_end = pos_str
+ pos_str = pos_str + 1
+
+ size_str = <char*>memchr(pos_str, c' ', end - pos_str)
+ if size_str == NULL or size_str >= end:
+ # Short entry
+ return 0
+ size_str = size_str + 1
+
+ parent_str = <char*>memchr(size_str, c' ', end - size_str)
+ if parent_str == NULL or parent_str >= end:
+ # Missing parents
+ return 0
+ parent_str = parent_str + 1
+
+ version_id = PyString_FromStringAndSize(version_id_str,
+ version_id_size)
+ options = self.process_options(option_str, option_end)
+
+ try:
+ string_to_int_safe(pos_str, size_str - 1, &pos)
+ string_to_int_safe(size_str, parent_str - 1, &size)
+ parents = self.process_parents(parent_str, end)
+ except (ValueError, IndexError), e:
+ py_line = PyString_FromStringAndSize(start, end - start)
+ raise errors.KnitCorrupt(self.kndx._filename,
+ "line %r: %s" % (py_line, e))
+
+ cache_entry = PyDict_GetItem_void(self.cache, version_id)
+ if cache_entry == NULL:
+ PyList_Append(self.history, version_id)
+ index = self.history_len
+ self.history_len = self.history_len + 1
+ else:
+ # PyTuple_GetItem_void_void does *not* increment the reference
+ # counter, but casting to <object> does.
+ index = <object>PyTuple_GetItem_void_void(cache_entry, 5)
+
+ PyDict_SetItem(self.cache, version_id,
+ (version_id,
+ options,
+ pos,
+ size,
+ parents,
+ index,
+ ))
+ return 1
+
+ cdef int process_next_record(self) except -1:
+ """Process the next record in the file."""
+ cdef char *last
+ cdef char *start
+
+ start = self.cur_str
+ # Find the next newline
+ last = <char*>memchr(start, c'\n', self.end_str - start)
+ if last == NULL:
+ # Process until the end of the file
+ last = self.end_str - 1
+ self.cur_str = self.end_str
+ else:
+ # The last character is right before the '\n'
+ # And the next string is right after it
+ self.cur_str = last + 1
+ last = last - 1
+
+ if last <= start or last[0] != c':':
+ # Incomplete record
+ return 0
+
+ return self.process_one_record(start, last)
+
+ def read(self):
+ cdef int text_size
+
+ self.validate()
+
+ self.kndx.check_header(self.fp)
+
+ # We read the whole thing at once
+ # TODO: jam 2007-05-09 Consider reading incrementally rather than
+ # having to have the whole thing read up front.
+ # we already know that calling f.readlines() versus lots of
+ # f.readline() calls is faster.
+ # The other possibility is to avoid a Python String here
+ # completely. However self.fp may be a 'file-like' object
+ # it is not guaranteed to be a real file.
+ text = self.fp.read()
+ text_size = PyString_Size(text)
+ self.cur_str = PyString_AsString(text)
+ # This points to the last character in the string
+ self.end_str = self.cur_str + text_size
+
+ while self.cur_str < self.end_str:
+ self.process_next_record()
+
+
+def _load_data_c(kndx, fp):
+ """Load the knit index file into memory."""
+ reader = KnitIndexReader(kndx, fp)
+ reader.read()
diff --git a/bzrlib/_known_graph_py.py b/bzrlib/_known_graph_py.py
new file mode 100644
index 0000000..821f70c
--- /dev/null
+++ b/bzrlib/_known_graph_py.py
@@ -0,0 +1,374 @@
+# Copyright (C) 2009, 2010 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""Implementation of Graph algorithms when we have already loaded everything.
+"""
+
+from __future__ import absolute_import
+
+from collections import deque
+from bzrlib import (
+ errors,
+ revision,
+ )
+
+
+class _KnownGraphNode(object):
+ """Represents a single object in the known graph."""
+
+ __slots__ = ('key', 'parent_keys', 'child_keys', 'gdfo')
+
+ def __init__(self, key, parent_keys):
+ self.key = key
+ self.parent_keys = parent_keys
+ self.child_keys = []
+ # Greatest distance from origin
+ self.gdfo = None
+
+ def __repr__(self):
+ return '%s(%s gdfo:%s par:%s child:%s)' % (
+ self.__class__.__name__, self.key, self.gdfo,
+ self.parent_keys, self.child_keys)
+
+
+class _MergeSortNode(object):
+ """Information about a specific node in the merge graph."""
+
+ __slots__ = ('key', 'merge_depth', 'revno', 'end_of_merge')
+
+ def __init__(self, key, merge_depth, revno, end_of_merge):
+ self.key = key
+ self.merge_depth = merge_depth
+ self.revno = revno
+ self.end_of_merge = end_of_merge
+
+
+class KnownGraph(object):
+ """This is a class which assumes we already know the full graph."""
+
+ def __init__(self, parent_map, do_cache=True):
+ """Create a new KnownGraph instance.
+
+ :param parent_map: A dictionary mapping key => parent_keys
+ """
+ self._nodes = {}
+ # Maps {frozenset(revision_id, revision_id): heads}
+ self._known_heads = {}
+ self.do_cache = do_cache
+ self._initialize_nodes(parent_map)
+ self._find_gdfo()
+
+ def _initialize_nodes(self, parent_map):
+ """Populate self._nodes.
+
+ After this has finished:
+ - self._nodes will have an entry for every entry in parent_map.
+ - ghosts will have a parent_keys = None,
+ - all nodes found will also have .child_keys populated with all known
+ child_keys,
+ """
+ nodes = self._nodes
+ for key, parent_keys in parent_map.iteritems():
+ if key in nodes:
+ node = nodes[key]
+ node.parent_keys = parent_keys
+ else:
+ node = _KnownGraphNode(key, parent_keys)
+ nodes[key] = node
+ for parent_key in parent_keys:
+ try:
+ parent_node = nodes[parent_key]
+ except KeyError:
+ parent_node = _KnownGraphNode(parent_key, None)
+ nodes[parent_key] = parent_node
+ parent_node.child_keys.append(key)
+
+ def _find_tails(self):
+ return [node for node in self._nodes.itervalues()
+ if not node.parent_keys]
+
+ def _find_tips(self):
+ return [node for node in self._nodes.itervalues()
+ if not node.child_keys]
+
+ def _find_gdfo(self):
+ nodes = self._nodes
+ known_parent_gdfos = {}
+ pending = []
+
+ for node in self._find_tails():
+ node.gdfo = 1
+ pending.append(node)
+
+ while pending:
+ node = pending.pop()
+ for child_key in node.child_keys:
+ child = nodes[child_key]
+ if child_key in known_parent_gdfos:
+ known_gdfo = known_parent_gdfos[child_key] + 1
+ present = True
+ else:
+ known_gdfo = 1
+ present = False
+ if child.gdfo is None or node.gdfo + 1 > child.gdfo:
+ child.gdfo = node.gdfo + 1
+ if known_gdfo == len(child.parent_keys):
+ # We are the last parent updating that node, we can
+ # continue from there
+ pending.append(child)
+ if present:
+ del known_parent_gdfos[child_key]
+ else:
+ # Update known_parent_gdfos for a key we couldn't process
+ known_parent_gdfos[child_key] = known_gdfo
+
+ def add_node(self, key, parent_keys):
+ """Add a new node to the graph.
+
+ If this fills in a ghost, then the gdfos of all children will be
+ updated accordingly.
+
+ :param key: The node being added. If this is a duplicate, this is a
+ no-op.
+ :param parent_keys: The parents of the given node.
+ :return: None (should we return if this was a ghost, etc?)
+ """
+ nodes = self._nodes
+ if key in nodes:
+ node = nodes[key]
+ if node.parent_keys is None:
+ node.parent_keys = parent_keys
+ # A ghost is being added, we can no-longer trust the heads
+ # cache, so clear it
+ self._known_heads.clear()
+ else:
+ # Make sure we compare a list to a list, as tuple != list.
+ parent_keys = list(parent_keys)
+ existing_parent_keys = list(node.parent_keys)
+ if parent_keys == existing_parent_keys:
+ return # Identical content
+ else:
+ raise ValueError('Parent key mismatch, existing node %s'
+ ' has parents of %s not %s'
+ % (key, existing_parent_keys, parent_keys))
+ else:
+ node = _KnownGraphNode(key, parent_keys)
+ nodes[key] = node
+ parent_gdfo = 0
+ for parent_key in parent_keys:
+ try:
+ parent_node = nodes[parent_key]
+ except KeyError:
+ parent_node = _KnownGraphNode(parent_key, None)
+ # Ghosts and roots have gdfo 1
+ parent_node.gdfo = 1
+ nodes[parent_key] = parent_node
+ if parent_gdfo < parent_node.gdfo:
+ parent_gdfo = parent_node.gdfo
+ parent_node.child_keys.append(key)
+ node.gdfo = parent_gdfo + 1
+ # Now fill the gdfo to all children
+ # Note that this loop is slightly inefficient, in that we may visit the
+ # same child (and its decendents) more than once, however, it is
+ # 'efficient' in that we only walk to nodes that would be updated,
+ # rather than all nodes
+ # We use a deque rather than a simple list stack, to go for BFD rather
+ # than DFD. So that if a longer path is possible, we walk it before we
+ # get to the final child
+ pending = deque([node])
+ while pending:
+ node = pending.popleft()
+ next_gdfo = node.gdfo + 1
+ for child_key in node.child_keys:
+ child = nodes[child_key]
+ if child.gdfo < next_gdfo:
+ # This child is being updated, we need to check its
+ # children
+ child.gdfo = next_gdfo
+ pending.append(child)
+
+ def heads(self, keys):
+ """Return the heads from amongst keys.
+
+ This is done by searching the ancestries of each key. Any key that is
+ reachable from another key is not returned; all the others are.
+
+ This operation scales with the relative depth between any two keys. It
+ uses gdfo to avoid walking all ancestry.
+
+ :param keys: An iterable of keys.
+ :return: A set of the heads. Note that as a set there is no ordering
+ information. Callers will need to filter their input to create
+ order if they need it.
+ """
+ candidate_nodes = dict((key, self._nodes[key]) for key in keys)
+ if revision.NULL_REVISION in candidate_nodes:
+ # NULL_REVISION is only a head if it is the only entry
+ candidate_nodes.pop(revision.NULL_REVISION)
+ if not candidate_nodes:
+ return frozenset([revision.NULL_REVISION])
+ if len(candidate_nodes) < 2:
+ # No or only one candidate
+ return frozenset(candidate_nodes)
+ heads_key = frozenset(candidate_nodes)
+ # Do we have a cached result ?
+ try:
+ heads = self._known_heads[heads_key]
+ return heads
+ except KeyError:
+ pass
+ # Let's compute the heads
+ seen = set()
+ pending = []
+ min_gdfo = None
+ for node in candidate_nodes.values():
+ if node.parent_keys:
+ pending.extend(node.parent_keys)
+ if min_gdfo is None or node.gdfo < min_gdfo:
+ min_gdfo = node.gdfo
+ nodes = self._nodes
+ while pending:
+ node_key = pending.pop()
+ if node_key in seen:
+ # node already appears in some ancestry
+ continue
+ seen.add(node_key)
+ node = nodes[node_key]
+ if node.gdfo <= min_gdfo:
+ continue
+ if node.parent_keys:
+ pending.extend(node.parent_keys)
+ heads = heads_key.difference(seen)
+ if self.do_cache:
+ self._known_heads[heads_key] = heads
+ return heads
+
+ def topo_sort(self):
+ """Return the nodes in topological order.
+
+ All parents must occur before all children.
+ """
+ for node in self._nodes.itervalues():
+ if node.gdfo is None:
+ raise errors.GraphCycleError(self._nodes)
+ pending = self._find_tails()
+ pending_pop = pending.pop
+ pending_append = pending.append
+
+ topo_order = []
+ topo_order_append = topo_order.append
+
+ num_seen_parents = dict.fromkeys(self._nodes, 0)
+ while pending:
+ node = pending_pop()
+ if node.parent_keys is not None:
+ # We don't include ghost parents
+ topo_order_append(node.key)
+ for child_key in node.child_keys:
+ child_node = self._nodes[child_key]
+ seen_parents = num_seen_parents[child_key] + 1
+ if seen_parents == len(child_node.parent_keys):
+ # All parents have been processed, enqueue this child
+ pending_append(child_node)
+ # This has been queued up, stop tracking it
+ del num_seen_parents[child_key]
+ else:
+ num_seen_parents[child_key] = seen_parents
+ # We started from the parents, so we don't need to do anymore work
+ return topo_order
+
+ def gc_sort(self):
+ """Return a reverse topological ordering which is 'stable'.
+
+ There are a few constraints:
+ 1) Reverse topological (all children before all parents)
+ 2) Grouped by prefix
+ 3) 'stable' sorting, so that we get the same result, independent of
+ machine, or extra data.
+ To do this, we use the same basic algorithm as topo_sort, but when we
+ aren't sure what node to access next, we sort them lexicographically.
+ """
+ tips = self._find_tips()
+ # Split the tips based on prefix
+ prefix_tips = {}
+ for node in tips:
+ if node.key.__class__ is str or len(node.key) == 1:
+ prefix = ''
+ else:
+ prefix = node.key[0]
+ prefix_tips.setdefault(prefix, []).append(node)
+
+ num_seen_children = dict.fromkeys(self._nodes, 0)
+
+ result = []
+ for prefix in sorted(prefix_tips):
+ pending = sorted(prefix_tips[prefix], key=lambda n:n.key,
+ reverse=True)
+ while pending:
+ node = pending.pop()
+ if node.parent_keys is None:
+ # Ghost node, skip it
+ continue
+ result.append(node.key)
+ for parent_key in sorted(node.parent_keys, reverse=True):
+ parent_node = self._nodes[parent_key]
+ seen_children = num_seen_children[parent_key] + 1
+ if seen_children == len(parent_node.child_keys):
+ # All children have been processed, enqueue this parent
+ pending.append(parent_node)
+ # This has been queued up, stop tracking it
+ del num_seen_children[parent_key]
+ else:
+ num_seen_children[parent_key] = seen_children
+ return result
+
+ def merge_sort(self, tip_key):
+ """Compute the merge sorted graph output."""
+ from bzrlib import tsort
+ as_parent_map = dict((node.key, node.parent_keys)
+ for node in self._nodes.itervalues()
+ if node.parent_keys is not None)
+ # We intentionally always generate revnos and never force the
+ # mainline_revisions
+ # Strip the sequence_number that merge_sort generates
+ return [_MergeSortNode(key, merge_depth, revno, end_of_merge)
+ for _, key, merge_depth, revno, end_of_merge
+ in tsort.merge_sort(as_parent_map, tip_key,
+ mainline_revisions=None,
+ generate_revno=True)]
+
+ def get_parent_keys(self, key):
+ """Get the parents for a key
+
+ Returns a list containg the parents keys. If the key is a ghost,
+ None is returned. A KeyError will be raised if the key is not in
+ the graph.
+
+ :param keys: Key to check (eg revision_id)
+ :return: A list of parents
+ """
+ return self._nodes[key].parent_keys
+
+ def get_child_keys(self, key):
+ """Get the children for a key
+
+ Returns a list containg the children keys. A KeyError will be raised
+ if the key is not in the graph.
+
+ :param keys: Key to check (eg revision_id)
+ :return: A list of children
+ """
+ return self._nodes[key].child_keys
diff --git a/bzrlib/_known_graph_pyx.c b/bzrlib/_known_graph_pyx.c
new file mode 100644
index 0000000..f8232f3
--- /dev/null
+++ b/bzrlib/_known_graph_pyx.c
@@ -0,0 +1,4826 @@
+/* Generated by Pyrex 0.9.8.5 on Fri Oct 8 14:00:57 2010 */
+
+#define PY_SSIZE_T_CLEAN
+#include "Python.h"
+#include "structmember.h"
+#ifndef PY_LONG_LONG
+ #define PY_LONG_LONG LONG_LONG
+#endif
+#if PY_VERSION_HEX < 0x02050000
+ typedef int Py_ssize_t;
+ #define PY_SSIZE_T_MAX INT_MAX
+ #define PY_SSIZE_T_MIN INT_MIN
+ #define PyInt_FromSsize_t(z) PyInt_FromLong(z)
+ #define PyInt_AsSsize_t(o) PyInt_AsLong(o)
+#endif
+#if !defined(WIN32) && !defined(MS_WINDOWS)
+ #ifndef __stdcall
+ #define __stdcall
+ #endif
+ #ifndef __cdecl
+ #define __cdecl
+ #endif
+#endif
+#ifdef __cplusplus
+#define __PYX_EXTERN_C extern "C"
+#else
+#define __PYX_EXTERN_C extern
+#endif
+#include <math.h>
+#include "python-compat.h"
+
+
+typedef struct {PyObject **p; int i; char *s; long n;} __Pyx_StringTabEntry; /*proto*/
+
+static PyObject *__pyx_m;
+static PyObject *__pyx_b;
+static int __pyx_lineno;
+static char *__pyx_filename;
+static char **__pyx_f;
+
+static char __pyx_mdoc[] = "Implementation of Graph algorithms when we have already loaded everything.\n";
+
+static int __Pyx_TypeTest(PyObject *obj, PyTypeObject *type); /*proto*/
+
+static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb); /*proto*/
+
+static PyObject *__Pyx_GetName(PyObject *dict, PyObject *name); /*proto*/
+
+static PyObject *__Pyx_GetItemInt(PyObject *o, Py_ssize_t i); /*proto*/
+
+static void __Pyx_WriteUnraisable(char *name); /*proto*/
+
+static int __Pyx_InitStrings(__Pyx_StringTabEntry *t); /*proto*/
+
+static int __Pyx_SetVtable(PyObject *dict, void *vtable); /*proto*/
+
+static PyObject *__Pyx_Import(PyObject *name, PyObject *from_list); /*proto*/
+
+static void __Pyx_AddTraceback(char *funcname); /*proto*/
+
+/* Declarations from bzrlib._known_graph_pyx */
+
+
+/* Declarations from implementation of bzrlib._known_graph_pyx */
+
+struct __pyx_obj_6bzrlib_16_known_graph_pyx__KnownGraphNode {
+ PyObject_HEAD
+ struct __pyx_vtabstruct_6bzrlib_16_known_graph_pyx__KnownGraphNode *__pyx_vtab;
+ PyObject *key;
+ PyObject *parents;
+ PyObject *children;
+ long gdfo;
+ int seen;
+ PyObject *extra;
+};
+
+struct __pyx_obj_6bzrlib_16_known_graph_pyx_KnownGraph {
+ PyObject_HEAD
+ struct __pyx_vtabstruct_6bzrlib_16_known_graph_pyx_KnownGraph *__pyx_vtab;
+ PyObject *_nodes;
+ PyObject *_known_heads;
+ int do_cache;
+};
+
+struct __pyx_obj_6bzrlib_16_known_graph_pyx__MergeSortNode {
+ PyObject_HEAD
+ struct __pyx_vtabstruct_6bzrlib_16_known_graph_pyx__MergeSortNode *__pyx_vtab;
+ PyObject *key;
+ long merge_depth;
+ PyObject *end_of_merge;
+ struct __pyx_obj_6bzrlib_16_known_graph_pyx__KnownGraphNode *left_parent;
+ struct __pyx_obj_6bzrlib_16_known_graph_pyx__KnownGraphNode *left_pending_parent;
+ PyObject *pending_parents;
+ long _revno_first;
+ long _revno_second;
+ long _revno_last;
+ int is_first_child;
+ int seen_by_child;
+ int completed;
+};
+
+struct __pyx_obj_6bzrlib_16_known_graph_pyx__MergeSorter {
+ PyObject_HEAD
+ struct __pyx_vtabstruct_6bzrlib_16_known_graph_pyx__MergeSorter *__pyx_vtab;
+ struct __pyx_obj_6bzrlib_16_known_graph_pyx_KnownGraph *graph;
+ PyObject *_depth_first_stack;
+ Py_ssize_t _last_stack_item;
+ PyObject *_revno_to_branch_count;
+ PyObject *_scheduled_nodes;
+};
+
+
+struct __pyx_vtabstruct_6bzrlib_16_known_graph_pyx__KnownGraphNode {
+ PyObject *(*clear_references)(struct __pyx_obj_6bzrlib_16_known_graph_pyx__KnownGraphNode *);
+};
+static struct __pyx_vtabstruct_6bzrlib_16_known_graph_pyx__KnownGraphNode *__pyx_vtabptr_6bzrlib_16_known_graph_pyx__KnownGraphNode;
+
+
+struct __pyx_vtabstruct_6bzrlib_16_known_graph_pyx__MergeSorter {
+ struct __pyx_obj_6bzrlib_16_known_graph_pyx__MergeSortNode *(*_get_ms_node)(struct __pyx_obj_6bzrlib_16_known_graph_pyx__MergeSorter *,struct __pyx_obj_6bzrlib_16_known_graph_pyx__KnownGraphNode *);
+ PyObject *(*_push_node)(struct __pyx_obj_6bzrlib_16_known_graph_pyx__MergeSorter *,struct __pyx_obj_6bzrlib_16_known_graph_pyx__KnownGraphNode *,long);
+ PyObject *(*_pop_node)(struct __pyx_obj_6bzrlib_16_known_graph_pyx__MergeSorter *);
+ PyObject *(*_schedule_stack)(struct __pyx_obj_6bzrlib_16_known_graph_pyx__MergeSorter *);
+ PyObject *(*topo_order)(struct __pyx_obj_6bzrlib_16_known_graph_pyx__MergeSorter *);
+};
+static struct __pyx_vtabstruct_6bzrlib_16_known_graph_pyx__MergeSorter *__pyx_vtabptr_6bzrlib_16_known_graph_pyx__MergeSorter;
+
+
+struct __pyx_vtabstruct_6bzrlib_16_known_graph_pyx_KnownGraph {
+ struct __pyx_obj_6bzrlib_16_known_graph_pyx__KnownGraphNode *(*_get_or_create_node)(struct __pyx_obj_6bzrlib_16_known_graph_pyx_KnownGraph *,PyObject *);
+ PyObject *(*_populate_parents)(struct __pyx_obj_6bzrlib_16_known_graph_pyx_KnownGraph *,struct __pyx_obj_6bzrlib_16_known_graph_pyx__KnownGraphNode *,PyObject *);
+};
+static struct __pyx_vtabstruct_6bzrlib_16_known_graph_pyx_KnownGraph *__pyx_vtabptr_6bzrlib_16_known_graph_pyx_KnownGraph;
+
+
+struct __pyx_vtabstruct_6bzrlib_16_known_graph_pyx__MergeSortNode {
+ int (*has_pending_parents)(struct __pyx_obj_6bzrlib_16_known_graph_pyx__MergeSortNode *);
+ PyObject *(*_revno)(struct __pyx_obj_6bzrlib_16_known_graph_pyx__MergeSortNode *);
+};
+static struct __pyx_vtabstruct_6bzrlib_16_known_graph_pyx__MergeSortNode *__pyx_vtabptr_6bzrlib_16_known_graph_pyx__MergeSortNode;
+
+static PyTypeObject *__pyx_ptype_6bzrlib_16_known_graph_pyx__KnownGraphNode = 0;
+static PyTypeObject *__pyx_ptype_6bzrlib_16_known_graph_pyx__MergeSorter = 0;
+static PyTypeObject *__pyx_ptype_6bzrlib_16_known_graph_pyx_KnownGraph = 0;
+static PyTypeObject *__pyx_ptype_6bzrlib_16_known_graph_pyx__MergeSortNode = 0;
+static PyObject *__pyx_v_6bzrlib_16_known_graph_pyx_NULL_REVISION;
+static struct __pyx_obj_6bzrlib_16_known_graph_pyx__KnownGraphNode *__pyx_f_6bzrlib_16_known_graph_pyx__get_list_node(PyObject *,Py_ssize_t); /*proto*/
+static struct __pyx_obj_6bzrlib_16_known_graph_pyx__KnownGraphNode *__pyx_f_6bzrlib_16_known_graph_pyx__get_tuple_node(PyObject *,Py_ssize_t); /*proto*/
+static PyObject *__pyx_f_6bzrlib_16_known_graph_pyx__sort_list_nodes(PyObject *,int); /*proto*/
+
+static char __pyx_k1[] = "append";
+static char __pyx_k2[] = "%s(%s gdfo:%s par:%s child:%s)";
+static char __pyx_k3[] = "__class__";
+static char __pyx_k4[] = "__name__";
+static char __pyx_k5[] = "lst_or_tpl must be a list or tuple.";
+static char __pyx_k6[] = "sort";
+static char __pyx_k7[] = "key";
+static char __pyx_k8[] = "get_key";
+static char __pyx_k9[] = "reverse";
+static char __pyx_k10[] = "_initialize_nodes";
+static char __pyx_k11[] = "_find_gdfo";
+static char __pyx_k12[] = "parent_map should be a dict of {key:parent_keys}";
+static char __pyx_k13[] = "_find_tails";
+static char __pyx_k14[] = "clear";
+static char __pyx_k15[] = "Parent key mismatch, existing node %s has parents of %s not %s";
+static char __pyx_k16[] = "deque";
+static char __pyx_k17[] = "popleft";
+static char __pyx_k18[] = "frozenset";
+static char __pyx_k19[] = "key %s not in nodes";
+static char __pyx_k20[] = "pop";
+static char __pyx_k21[] = "extend";
+static char __pyx_k22[] = "errors";
+static char __pyx_k23[] = "GraphCycleError";
+static char __pyx_k24[] = "_find_tips";
+static char __pyx_k25[] = "";
+static char __pyx_k26[] = "sorted";
+static char __pyx_k27[] = "parent_keys";
+static char __pyx_k28[] = "child_keys";
+static char __pyx_k29[] = "%s(%s depth:%s rev:%s,%s,%s first:%s seen:%s)";
+static char __pyx_k30[] = "Something wrong with: %s";
+static char __pyx_k31[] = "ghost nodes should not be pushed onto the stack: %s";
+static char __pyx_k32[] = "collections";
+static char __pyx_k33[] = "gc";
+static char __pyx_k34[] = "bzrlib";
+static char __pyx_k35[] = "revision";
+static char __pyx_k36[] = "NULL_REVISION";
+
+static PyObject *__pyx_n_GraphCycleError;
+static PyObject *__pyx_n_NULL_REVISION;
+static PyObject *__pyx_n___class__;
+static PyObject *__pyx_n___name__;
+static PyObject *__pyx_n__find_gdfo;
+static PyObject *__pyx_n__find_tails;
+static PyObject *__pyx_n__find_tips;
+static PyObject *__pyx_n__initialize_nodes;
+static PyObject *__pyx_n_append;
+static PyObject *__pyx_n_bzrlib;
+static PyObject *__pyx_n_child_keys;
+static PyObject *__pyx_n_clear;
+static PyObject *__pyx_n_collections;
+static PyObject *__pyx_n_deque;
+static PyObject *__pyx_n_errors;
+static PyObject *__pyx_n_extend;
+static PyObject *__pyx_n_frozenset;
+static PyObject *__pyx_n_gc;
+static PyObject *__pyx_n_get_key;
+static PyObject *__pyx_n_key;
+static PyObject *__pyx_n_parent_keys;
+static PyObject *__pyx_n_pop;
+static PyObject *__pyx_n_popleft;
+static PyObject *__pyx_n_reverse;
+static PyObject *__pyx_n_revision;
+static PyObject *__pyx_n_sort;
+static PyObject *__pyx_n_sorted;
+
+static PyObject *__pyx_k2p;
+static PyObject *__pyx_k5p;
+static PyObject *__pyx_k12p;
+static PyObject *__pyx_k15p;
+static PyObject *__pyx_k19p;
+static PyObject *__pyx_k25p;
+static PyObject *__pyx_k29p;
+static PyObject *__pyx_k30p;
+static PyObject *__pyx_k31p;
+
+static __Pyx_StringTabEntry __pyx_string_tab[] = {
+ {&__pyx_n_GraphCycleError, 1, __pyx_k23, sizeof(__pyx_k23)},
+ {&__pyx_n_NULL_REVISION, 1, __pyx_k36, sizeof(__pyx_k36)},
+ {&__pyx_n___class__, 1, __pyx_k3, sizeof(__pyx_k3)},
+ {&__pyx_n___name__, 1, __pyx_k4, sizeof(__pyx_k4)},
+ {&__pyx_n__find_gdfo, 1, __pyx_k11, sizeof(__pyx_k11)},
+ {&__pyx_n__find_tails, 1, __pyx_k13, sizeof(__pyx_k13)},
+ {&__pyx_n__find_tips, 1, __pyx_k24, sizeof(__pyx_k24)},
+ {&__pyx_n__initialize_nodes, 1, __pyx_k10, sizeof(__pyx_k10)},
+ {&__pyx_n_append, 1, __pyx_k1, sizeof(__pyx_k1)},
+ {&__pyx_n_bzrlib, 1, __pyx_k34, sizeof(__pyx_k34)},
+ {&__pyx_n_child_keys, 1, __pyx_k28, sizeof(__pyx_k28)},
+ {&__pyx_n_clear, 1, __pyx_k14, sizeof(__pyx_k14)},
+ {&__pyx_n_collections, 1, __pyx_k32, sizeof(__pyx_k32)},
+ {&__pyx_n_deque, 1, __pyx_k16, sizeof(__pyx_k16)},
+ {&__pyx_n_errors, 1, __pyx_k22, sizeof(__pyx_k22)},
+ {&__pyx_n_extend, 1, __pyx_k21, sizeof(__pyx_k21)},
+ {&__pyx_n_frozenset, 1, __pyx_k18, sizeof(__pyx_k18)},
+ {&__pyx_n_gc, 1, __pyx_k33, sizeof(__pyx_k33)},
+ {&__pyx_n_get_key, 1, __pyx_k8, sizeof(__pyx_k8)},
+ {&__pyx_n_key, 1, __pyx_k7, sizeof(__pyx_k7)},
+ {&__pyx_n_parent_keys, 1, __pyx_k27, sizeof(__pyx_k27)},
+ {&__pyx_n_pop, 1, __pyx_k20, sizeof(__pyx_k20)},
+ {&__pyx_n_popleft, 1, __pyx_k17, sizeof(__pyx_k17)},
+ {&__pyx_n_reverse, 1, __pyx_k9, sizeof(__pyx_k9)},
+ {&__pyx_n_revision, 1, __pyx_k35, sizeof(__pyx_k35)},
+ {&__pyx_n_sort, 1, __pyx_k6, sizeof(__pyx_k6)},
+ {&__pyx_n_sorted, 1, __pyx_k26, sizeof(__pyx_k26)},
+ {&__pyx_k2p, 0, __pyx_k2, sizeof(__pyx_k2)},
+ {&__pyx_k5p, 0, __pyx_k5, sizeof(__pyx_k5)},
+ {&__pyx_k12p, 0, __pyx_k12, sizeof(__pyx_k12)},
+ {&__pyx_k15p, 0, __pyx_k15, sizeof(__pyx_k15)},
+ {&__pyx_k19p, 0, __pyx_k19, sizeof(__pyx_k19)},
+ {&__pyx_k25p, 0, __pyx_k25, sizeof(__pyx_k25)},
+ {&__pyx_k29p, 0, __pyx_k29, sizeof(__pyx_k29)},
+ {&__pyx_k30p, 0, __pyx_k30, sizeof(__pyx_k30)},
+ {&__pyx_k31p, 0, __pyx_k31, sizeof(__pyx_k31)},
+ {0, 0, 0, 0}
+};
+
+static PyObject *__pyx_d1;
+
+
+/* Implementation of bzrlib._known_graph_pyx */
+
+static int __pyx_f_6bzrlib_16_known_graph_pyx_15_KnownGraphNode___init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
+static int __pyx_f_6bzrlib_16_known_graph_pyx_15_KnownGraphNode___init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
+ PyObject *__pyx_v_key = 0;
+ int __pyx_r;
+ PyObject *__pyx_1 = 0;
+ static char *__pyx_argnames[] = {"key",0};
+ if (!PyArg_ParseTupleAndKeywords(__pyx_args, __pyx_kwds, "O", __pyx_argnames, &__pyx_v_key)) return -1;
+ Py_INCREF(__pyx_v_self);
+ Py_INCREF(__pyx_v_key);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_known_graph_pyx.pyx":74 */
+ Py_INCREF(__pyx_v_key);
+ Py_DECREF(((struct __pyx_obj_6bzrlib_16_known_graph_pyx__KnownGraphNode *)__pyx_v_self)->key);
+ ((struct __pyx_obj_6bzrlib_16_known_graph_pyx__KnownGraphNode *)__pyx_v_self)->key = __pyx_v_key;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_known_graph_pyx.pyx":75 */
+ Py_INCREF(Py_None);
+ Py_DECREF(((struct __pyx_obj_6bzrlib_16_known_graph_pyx__KnownGraphNode *)__pyx_v_self)->parents);
+ ((struct __pyx_obj_6bzrlib_16_known_graph_pyx__KnownGraphNode *)__pyx_v_self)->parents = Py_None;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_known_graph_pyx.pyx":77 */
+ __pyx_1 = PyList_New(0); if (!__pyx_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 77; goto __pyx_L1;}
+ Py_DECREF(((struct __pyx_obj_6bzrlib_16_known_graph_pyx__KnownGraphNode *)__pyx_v_self)->children);
+ ((struct __pyx_obj_6bzrlib_16_known_graph_pyx__KnownGraphNode *)__pyx_v_self)->children = __pyx_1;
+ __pyx_1 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_known_graph_pyx.pyx":79 */
+ ((struct __pyx_obj_6bzrlib_16_known_graph_pyx__KnownGraphNode *)__pyx_v_self)->gdfo = (-1);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_known_graph_pyx.pyx":80 */
+ ((struct __pyx_obj_6bzrlib_16_known_graph_pyx__KnownGraphNode *)__pyx_v_self)->seen = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_known_graph_pyx.pyx":81 */
+ Py_INCREF(Py_None);
+ Py_DECREF(((struct __pyx_obj_6bzrlib_16_known_graph_pyx__KnownGraphNode *)__pyx_v_self)->extra);
+ ((struct __pyx_obj_6bzrlib_16_known_graph_pyx__KnownGraphNode *)__pyx_v_self)->extra = Py_None;
+
+ __pyx_r = 0;
+ goto __pyx_L0;
+ __pyx_L1:;
+ Py_XDECREF(__pyx_1);
+ __Pyx_AddTraceback("bzrlib._known_graph_pyx._KnownGraphNode.__init__");
+ __pyx_r = -1;
+ __pyx_L0:;
+ Py_DECREF(__pyx_v_self);
+ Py_DECREF(__pyx_v_key);
+ return __pyx_r;
+}
+
+static PyObject *__pyx_f_6bzrlib_16_known_graph_pyx_15_KnownGraphNode_10child_keys___get__(PyObject *__pyx_v_self); /*proto*/
+static PyObject *__pyx_f_6bzrlib_16_known_graph_pyx_15_KnownGraphNode_10child_keys___get__(PyObject *__pyx_v_self) {
+ struct __pyx_obj_6bzrlib_16_known_graph_pyx__KnownGraphNode *__pyx_v_child;
+ PyObject *__pyx_v_keys;
+ PyObject *__pyx_r;
+ PyObject *__pyx_1 = 0;
+ PyObject *__pyx_2 = 0;
+ int __pyx_3;
+ Py_INCREF(__pyx_v_self);
+ __pyx_v_child = ((struct __pyx_obj_6bzrlib_16_known_graph_pyx__KnownGraphNode *)Py_None); Py_INCREF(Py_None);
+ __pyx_v_keys = Py_None; Py_INCREF(Py_None);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_known_graph_pyx.pyx":87 */
+ __pyx_1 = PyList_New(0); if (!__pyx_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 87; goto __pyx_L1;}
+ Py_DECREF(__pyx_v_keys);
+ __pyx_v_keys = __pyx_1;
+ __pyx_1 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_known_graph_pyx.pyx":88 */
+ __pyx_1 = PyObject_GetIter(((struct __pyx_obj_6bzrlib_16_known_graph_pyx__KnownGraphNode *)__pyx_v_self)->children); if (!__pyx_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 88; goto __pyx_L1;}
+ for (;;) {
+ __pyx_2 = PyIter_Next(__pyx_1);
+ if (!__pyx_2) {
+ if (PyErr_Occurred()) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 88; goto __pyx_L1;}
+ break;
+ }
+ if (!__Pyx_TypeTest(__pyx_2, __pyx_ptype_6bzrlib_16_known_graph_pyx__KnownGraphNode)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 88; goto __pyx_L1;}
+ Py_DECREF(((PyObject *)__pyx_v_child));
+ __pyx_v_child = ((struct __pyx_obj_6bzrlib_16_known_graph_pyx__KnownGraphNode *)__pyx_2);
+ __pyx_2 = 0;
+ __pyx_3 = PyList_Append(__pyx_v_keys,__pyx_v_child->key); if (__pyx_3 == (-1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 89; goto __pyx_L1;}
+ }
+ Py_DECREF(__pyx_1); __pyx_1 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_known_graph_pyx.pyx":90 */
+ Py_INCREF(__pyx_v_keys);
+ __pyx_r = __pyx_v_keys;
+ goto __pyx_L0;
+
+ __pyx_r = Py_None; Py_INCREF(Py_None);
+ goto __pyx_L0;
+ __pyx_L1:;
+ Py_XDECREF(__pyx_1);
+ Py_XDECREF(__pyx_2);
+ __Pyx_AddTraceback("bzrlib._known_graph_pyx._KnownGraphNode.child_keys.__get__");
+ __pyx_r = 0;
+ __pyx_L0:;
+ Py_DECREF(__pyx_v_child);
+ Py_DECREF(__pyx_v_keys);
+ Py_DECREF(__pyx_v_self);
+ return __pyx_r;
+}
+
+static PyObject *__pyx_f_6bzrlib_16_known_graph_pyx_15_KnownGraphNode_11parent_keys___get__(PyObject *__pyx_v_self); /*proto*/
+static PyObject *__pyx_f_6bzrlib_16_known_graph_pyx_15_KnownGraphNode_11parent_keys___get__(PyObject *__pyx_v_self) {
+ struct __pyx_obj_6bzrlib_16_known_graph_pyx__KnownGraphNode *__pyx_v_parent;
+ PyObject *__pyx_v_keys;
+ PyObject *__pyx_r;
+ int __pyx_1;
+ PyObject *__pyx_2 = 0;
+ PyObject *__pyx_3 = 0;
+ Py_INCREF(__pyx_v_self);
+ __pyx_v_parent = ((struct __pyx_obj_6bzrlib_16_known_graph_pyx__KnownGraphNode *)Py_None); Py_INCREF(Py_None);
+ __pyx_v_keys = Py_None; Py_INCREF(Py_None);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_known_graph_pyx.pyx":94 */
+ __pyx_1 = ((struct __pyx_obj_6bzrlib_16_known_graph_pyx__KnownGraphNode *)__pyx_v_self)->parents == Py_None;
+ if (__pyx_1) {
+ Py_INCREF(Py_None);
+ __pyx_r = Py_None;
+ goto __pyx_L0;
+ goto __pyx_L2;
+ }
+ __pyx_L2:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_known_graph_pyx.pyx":99 */
+ __pyx_2 = PyList_New(0); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 99; goto __pyx_L1;}
+ Py_DECREF(__pyx_v_keys);
+ __pyx_v_keys = __pyx_2;
+ __pyx_2 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_known_graph_pyx.pyx":100 */
+ __pyx_2 = PyObject_GetIter(((struct __pyx_obj_6bzrlib_16_known_graph_pyx__KnownGraphNode *)__pyx_v_self)->parents); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 100; goto __pyx_L1;}
+ for (;;) {
+ __pyx_3 = PyIter_Next(__pyx_2);
+ if (!__pyx_3) {
+ if (PyErr_Occurred()) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 100; goto __pyx_L1;}
+ break;
+ }
+ if (!__Pyx_TypeTest(__pyx_3, __pyx_ptype_6bzrlib_16_known_graph_pyx__KnownGraphNode)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 100; goto __pyx_L1;}
+ Py_DECREF(((PyObject *)__pyx_v_parent));
+ __pyx_v_parent = ((struct __pyx_obj_6bzrlib_16_known_graph_pyx__KnownGraphNode *)__pyx_3);
+ __pyx_3 = 0;
+ __pyx_1 = PyList_Append(__pyx_v_keys,__pyx_v_parent->key); if (__pyx_1 == (-1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 101; goto __pyx_L1;}
+ }
+ Py_DECREF(__pyx_2); __pyx_2 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_known_graph_pyx.pyx":102 */
+ Py_INCREF(__pyx_v_keys);
+ __pyx_r = __pyx_v_keys;
+ goto __pyx_L0;
+
+ __pyx_r = Py_None; Py_INCREF(Py_None);
+ goto __pyx_L0;
+ __pyx_L1:;
+ Py_XDECREF(__pyx_2);
+ Py_XDECREF(__pyx_3);
+ __Pyx_AddTraceback("bzrlib._known_graph_pyx._KnownGraphNode.parent_keys.__get__");
+ __pyx_r = 0;
+ __pyx_L0:;
+ Py_DECREF(__pyx_v_parent);
+ Py_DECREF(__pyx_v_keys);
+ Py_DECREF(__pyx_v_self);
+ return __pyx_r;
+}
+
+static PyObject *__pyx_f_6bzrlib_16_known_graph_pyx_15_KnownGraphNode_clear_references(struct __pyx_obj_6bzrlib_16_known_graph_pyx__KnownGraphNode *__pyx_v_self) {
+ PyObject *__pyx_r;
+ Py_INCREF(__pyx_v_self);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_known_graph_pyx.pyx":105 */
+ Py_INCREF(Py_None);
+ Py_DECREF(__pyx_v_self->parents);
+ __pyx_v_self->parents = Py_None;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_known_graph_pyx.pyx":106 */
+ Py_INCREF(Py_None);
+ Py_DECREF(__pyx_v_self->children);
+ __pyx_v_self->children = Py_None;
+
+ __pyx_r = Py_None; Py_INCREF(Py_None);
+ Py_DECREF(__pyx_v_self);
+ return __pyx_r;
+}
+
+static PyObject *__pyx_f_6bzrlib_16_known_graph_pyx_15_KnownGraphNode___repr__(PyObject *__pyx_v_self); /*proto*/
+static PyObject *__pyx_f_6bzrlib_16_known_graph_pyx_15_KnownGraphNode___repr__(PyObject *__pyx_v_self) {
+ struct __pyx_obj_6bzrlib_16_known_graph_pyx__KnownGraphNode *__pyx_v_node;
+ PyObject *__pyx_v_parent_keys;
+ PyObject *__pyx_v_child_keys;
+ PyObject *__pyx_r;
+ PyObject *__pyx_1 = 0;
+ int __pyx_2;
+ PyObject *__pyx_3 = 0;
+ PyObject *__pyx_4 = 0;
+ PyObject *__pyx_5 = 0;
+ Py_INCREF(__pyx_v_self);
+ __pyx_v_node = ((struct __pyx_obj_6bzrlib_16_known_graph_pyx__KnownGraphNode *)Py_None); Py_INCREF(Py_None);
+ __pyx_v_parent_keys = Py_None; Py_INCREF(Py_None);
+ __pyx_v_child_keys = Py_None; Py_INCREF(Py_None);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_known_graph_pyx.pyx":111 */
+ __pyx_1 = PyList_New(0); if (!__pyx_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 111; goto __pyx_L1;}
+ Py_DECREF(__pyx_v_parent_keys);
+ __pyx_v_parent_keys = __pyx_1;
+ __pyx_1 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_known_graph_pyx.pyx":112 */
+ __pyx_2 = ((struct __pyx_obj_6bzrlib_16_known_graph_pyx__KnownGraphNode *)__pyx_v_self)->parents != Py_None;
+ if (__pyx_2) {
+ __pyx_1 = PyObject_GetIter(((struct __pyx_obj_6bzrlib_16_known_graph_pyx__KnownGraphNode *)__pyx_v_self)->parents); if (!__pyx_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 113; goto __pyx_L1;}
+ for (;;) {
+ __pyx_3 = PyIter_Next(__pyx_1);
+ if (!__pyx_3) {
+ if (PyErr_Occurred()) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 113; goto __pyx_L1;}
+ break;
+ }
+ if (!__Pyx_TypeTest(__pyx_3, __pyx_ptype_6bzrlib_16_known_graph_pyx__KnownGraphNode)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 113; goto __pyx_L1;}
+ Py_DECREF(((PyObject *)__pyx_v_node));
+ __pyx_v_node = ((struct __pyx_obj_6bzrlib_16_known_graph_pyx__KnownGraphNode *)__pyx_3);
+ __pyx_3 = 0;
+ __pyx_3 = PyObject_GetAttr(__pyx_v_parent_keys, __pyx_n_append); if (!__pyx_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 114; goto __pyx_L1;}
+ __pyx_4 = PyTuple_New(1); if (!__pyx_4) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 114; goto __pyx_L1;}
+ Py_INCREF(__pyx_v_node->key);
+ PyTuple_SET_ITEM(__pyx_4, 0, __pyx_v_node->key);
+ __pyx_5 = PyObject_CallObject(__pyx_3, __pyx_4); if (!__pyx_5) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 114; goto __pyx_L1;}
+ Py_DECREF(__pyx_3); __pyx_3 = 0;
+ Py_DECREF(__pyx_4); __pyx_4 = 0;
+ Py_DECREF(__pyx_5); __pyx_5 = 0;
+ }
+ Py_DECREF(__pyx_1); __pyx_1 = 0;
+ goto __pyx_L2;
+ }
+ __pyx_L2:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_known_graph_pyx.pyx":115 */
+ __pyx_3 = PyList_New(0); if (!__pyx_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 115; goto __pyx_L1;}
+ Py_DECREF(__pyx_v_child_keys);
+ __pyx_v_child_keys = __pyx_3;
+ __pyx_3 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_known_graph_pyx.pyx":116 */
+ __pyx_2 = ((struct __pyx_obj_6bzrlib_16_known_graph_pyx__KnownGraphNode *)__pyx_v_self)->children != Py_None;
+ if (__pyx_2) {
+ __pyx_4 = PyObject_GetIter(((struct __pyx_obj_6bzrlib_16_known_graph_pyx__KnownGraphNode *)__pyx_v_self)->children); if (!__pyx_4) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 117; goto __pyx_L1;}
+ for (;;) {
+ __pyx_5 = PyIter_Next(__pyx_4);
+ if (!__pyx_5) {
+ if (PyErr_Occurred()) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 117; goto __pyx_L1;}
+ break;
+ }
+ if (!__Pyx_TypeTest(__pyx_5, __pyx_ptype_6bzrlib_16_known_graph_pyx__KnownGraphNode)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 117; goto __pyx_L1;}
+ Py_DECREF(((PyObject *)__pyx_v_node));
+ __pyx_v_node = ((struct __pyx_obj_6bzrlib_16_known_graph_pyx__KnownGraphNode *)__pyx_5);
+ __pyx_5 = 0;
+ __pyx_1 = PyObject_GetAttr(__pyx_v_child_keys, __pyx_n_append); if (!__pyx_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 118; goto __pyx_L1;}
+ __pyx_3 = PyTuple_New(1); if (!__pyx_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 118; goto __pyx_L1;}
+ Py_INCREF(__pyx_v_node->key);
+ PyTuple_SET_ITEM(__pyx_3, 0, __pyx_v_node->key);
+ __pyx_5 = PyObject_CallObject(__pyx_1, __pyx_3); if (!__pyx_5) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 118; goto __pyx_L1;}
+ Py_DECREF(__pyx_1); __pyx_1 = 0;
+ Py_DECREF(__pyx_3); __pyx_3 = 0;
+ Py_DECREF(__pyx_5); __pyx_5 = 0;
+ }
+ Py_DECREF(__pyx_4); __pyx_4 = 0;
+ goto __pyx_L5;
+ }
+ __pyx_L5:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_known_graph_pyx.pyx":119 */
+ __pyx_1 = PyObject_GetAttr(__pyx_v_self, __pyx_n___class__); if (!__pyx_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 120; goto __pyx_L1;}
+ __pyx_3 = PyObject_GetAttr(__pyx_1, __pyx_n___name__); if (!__pyx_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 120; goto __pyx_L1;}
+ Py_DECREF(__pyx_1); __pyx_1 = 0;
+ __pyx_5 = PyInt_FromLong(((struct __pyx_obj_6bzrlib_16_known_graph_pyx__KnownGraphNode *)__pyx_v_self)->gdfo); if (!__pyx_5) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 120; goto __pyx_L1;}
+ __pyx_4 = PyTuple_New(5); if (!__pyx_4) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 120; goto __pyx_L1;}
+ PyTuple_SET_ITEM(__pyx_4, 0, __pyx_3);
+ Py_INCREF(((struct __pyx_obj_6bzrlib_16_known_graph_pyx__KnownGraphNode *)__pyx_v_self)->key);
+ PyTuple_SET_ITEM(__pyx_4, 1, ((struct __pyx_obj_6bzrlib_16_known_graph_pyx__KnownGraphNode *)__pyx_v_self)->key);
+ PyTuple_SET_ITEM(__pyx_4, 2, __pyx_5);
+ Py_INCREF(__pyx_v_parent_keys);
+ PyTuple_SET_ITEM(__pyx_4, 3, __pyx_v_parent_keys);
+ Py_INCREF(__pyx_v_child_keys);
+ PyTuple_SET_ITEM(__pyx_4, 4, __pyx_v_child_keys);
+ __pyx_3 = 0;
+ __pyx_5 = 0;
+ __pyx_1 = PyNumber_Remainder(__pyx_k2p, __pyx_4); if (!__pyx_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 119; goto __pyx_L1;}
+ Py_DECREF(__pyx_4); __pyx_4 = 0;
+ __pyx_r = __pyx_1;
+ __pyx_1 = 0;
+ goto __pyx_L0;
+
+ __pyx_r = Py_None; Py_INCREF(Py_None);
+ goto __pyx_L0;
+ __pyx_L1:;
+ Py_XDECREF(__pyx_1);
+ Py_XDECREF(__pyx_3);
+ Py_XDECREF(__pyx_4);
+ Py_XDECREF(__pyx_5);
+ __Pyx_AddTraceback("bzrlib._known_graph_pyx._KnownGraphNode.__repr__");
+ __pyx_r = 0;
+ __pyx_L0:;
+ Py_DECREF(__pyx_v_node);
+ Py_DECREF(__pyx_v_parent_keys);
+ Py_DECREF(__pyx_v_child_keys);
+ Py_DECREF(__pyx_v_self);
+ return __pyx_r;
+}
+
+static struct __pyx_obj_6bzrlib_16_known_graph_pyx__KnownGraphNode *__pyx_f_6bzrlib_16_known_graph_pyx__get_list_node(PyObject *__pyx_v_lst,Py_ssize_t __pyx_v_pos) {
+ PyObject *__pyx_v_temp_node;
+ struct __pyx_obj_6bzrlib_16_known_graph_pyx__KnownGraphNode *__pyx_r;
+ Py_INCREF(__pyx_v_lst);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_known_graph_pyx.pyx":127 */
+ __pyx_v_temp_node = PyList_GET_ITEM(__pyx_v_lst,__pyx_v_pos);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_known_graph_pyx.pyx":128 */
+ Py_INCREF(((PyObject *)__pyx_v_temp_node));
+ __pyx_r = ((struct __pyx_obj_6bzrlib_16_known_graph_pyx__KnownGraphNode *)__pyx_v_temp_node);
+ goto __pyx_L0;
+
+ __pyx_r = ((struct __pyx_obj_6bzrlib_16_known_graph_pyx__KnownGraphNode *)Py_None); Py_INCREF(Py_None);
+ __pyx_L0:;
+ Py_DECREF(__pyx_v_lst);
+ return __pyx_r;
+}
+
+static struct __pyx_obj_6bzrlib_16_known_graph_pyx__KnownGraphNode *__pyx_f_6bzrlib_16_known_graph_pyx__get_tuple_node(PyObject *__pyx_v_tpl,Py_ssize_t __pyx_v_pos) {
+ PyObject *__pyx_v_temp_node;
+ struct __pyx_obj_6bzrlib_16_known_graph_pyx__KnownGraphNode *__pyx_r;
+ Py_INCREF(__pyx_v_tpl);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_known_graph_pyx.pyx":134 */
+ __pyx_v_temp_node = PyTuple_GET_ITEM(__pyx_v_tpl,__pyx_v_pos);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_known_graph_pyx.pyx":135 */
+ Py_INCREF(((PyObject *)__pyx_v_temp_node));
+ __pyx_r = ((struct __pyx_obj_6bzrlib_16_known_graph_pyx__KnownGraphNode *)__pyx_v_temp_node);
+ goto __pyx_L0;
+
+ __pyx_r = ((struct __pyx_obj_6bzrlib_16_known_graph_pyx__KnownGraphNode *)Py_None); Py_INCREF(Py_None);
+ __pyx_L0:;
+ Py_DECREF(__pyx_v_tpl);
+ return __pyx_r;
+}
+
+static PyObject *__pyx_f_6bzrlib_16_known_graph_pyx_get_key(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
+static PyObject *__pyx_f_6bzrlib_16_known_graph_pyx_get_key(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
+ PyObject *__pyx_v_node = 0;
+ struct __pyx_obj_6bzrlib_16_known_graph_pyx__KnownGraphNode *__pyx_v_real_node;
+ PyObject *__pyx_r;
+ static char *__pyx_argnames[] = {"node",0};
+ if (!PyArg_ParseTupleAndKeywords(__pyx_args, __pyx_kwds, "O", __pyx_argnames, &__pyx_v_node)) return 0;
+ Py_INCREF(__pyx_v_node);
+ __pyx_v_real_node = ((struct __pyx_obj_6bzrlib_16_known_graph_pyx__KnownGraphNode *)Py_None); Py_INCREF(Py_None);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_known_graph_pyx.pyx":140 */
+ if (!__Pyx_TypeTest(__pyx_v_node, __pyx_ptype_6bzrlib_16_known_graph_pyx__KnownGraphNode)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 140; goto __pyx_L1;}
+ Py_INCREF(__pyx_v_node);
+ Py_DECREF(((PyObject *)__pyx_v_real_node));
+ __pyx_v_real_node = ((struct __pyx_obj_6bzrlib_16_known_graph_pyx__KnownGraphNode *)__pyx_v_node);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_known_graph_pyx.pyx":141 */
+ Py_INCREF(__pyx_v_real_node->key);
+ __pyx_r = __pyx_v_real_node->key;
+ goto __pyx_L0;
+
+ __pyx_r = Py_None; Py_INCREF(Py_None);
+ goto __pyx_L0;
+ __pyx_L1:;
+ __Pyx_AddTraceback("bzrlib._known_graph_pyx.get_key");
+ __pyx_r = 0;
+ __pyx_L0:;
+ Py_DECREF(__pyx_v_real_node);
+ Py_DECREF(__pyx_v_node);
+ return __pyx_r;
+}
+
+static PyObject *__pyx_f_6bzrlib_16_known_graph_pyx__sort_list_nodes(PyObject *__pyx_v_lst_or_tpl,int __pyx_v_reverse) {
+ struct __pyx_obj_6bzrlib_16_known_graph_pyx__KnownGraphNode *__pyx_v_node1;
+ struct __pyx_obj_6bzrlib_16_known_graph_pyx__KnownGraphNode *__pyx_v_node2;
+ int __pyx_v_do_swap;
+ int __pyx_v_is_tuple;
+ Py_ssize_t __pyx_v_length;
+ PyObject *__pyx_r;
+ int __pyx_1;
+ int __pyx_2;
+ PyObject *__pyx_3 = 0;
+ PyObject *__pyx_4 = 0;
+ Py_ssize_t __pyx_5;
+ PyObject *__pyx_6 = 0;
+ PyObject *__pyx_7 = 0;
+ Py_INCREF(__pyx_v_lst_or_tpl);
+ __pyx_v_node1 = ((struct __pyx_obj_6bzrlib_16_known_graph_pyx__KnownGraphNode *)Py_None); Py_INCREF(Py_None);
+ __pyx_v_node2 = ((struct __pyx_obj_6bzrlib_16_known_graph_pyx__KnownGraphNode *)Py_None); Py_INCREF(Py_None);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_known_graph_pyx.pyx":154 */
+ __pyx_v_is_tuple = PyTuple_CheckExact(__pyx_v_lst_or_tpl);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_known_graph_pyx.pyx":155 */
+ __pyx_1 = __pyx_v_is_tuple;
+ if (!__pyx_1) {
+ __pyx_1 = PyList_CheckExact(__pyx_v_lst_or_tpl);
+ }
+ __pyx_2 = (!__pyx_1);
+ if (__pyx_2) {
+ __pyx_3 = PyTuple_New(1); if (!__pyx_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 156; goto __pyx_L1;}
+ Py_INCREF(__pyx_k5p);
+ PyTuple_SET_ITEM(__pyx_3, 0, __pyx_k5p);
+ __pyx_4 = PyObject_CallObject(PyExc_TypeError, __pyx_3); if (!__pyx_4) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 156; goto __pyx_L1;}
+ Py_DECREF(__pyx_3); __pyx_3 = 0;
+ __Pyx_Raise(__pyx_4, 0, 0);
+ Py_DECREF(__pyx_4); __pyx_4 = 0;
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 156; goto __pyx_L1;}
+ goto __pyx_L2;
+ }
+ __pyx_L2:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_known_graph_pyx.pyx":157 */
+ __pyx_5 = PyObject_Length(__pyx_v_lst_or_tpl); if (__pyx_5 == -1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 157; goto __pyx_L1;}
+ __pyx_v_length = __pyx_5;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_known_graph_pyx.pyx":158 */
+ __pyx_1 = (__pyx_v_length == 0);
+ if (!__pyx_1) {
+ __pyx_1 = (__pyx_v_length == 1);
+ }
+ if (__pyx_1) {
+ Py_INCREF(__pyx_v_lst_or_tpl);
+ __pyx_r = __pyx_v_lst_or_tpl;
+ goto __pyx_L0;
+ goto __pyx_L3;
+ }
+ __pyx_L3:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_known_graph_pyx.pyx":160 */
+ __pyx_2 = (__pyx_v_length == 2);
+ if (__pyx_2) {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_known_graph_pyx.pyx":161 */
+ __pyx_1 = __pyx_v_is_tuple;
+ if (__pyx_1) {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_known_graph_pyx.pyx":162 */
+ __pyx_3 = ((PyObject *)__pyx_f_6bzrlib_16_known_graph_pyx__get_tuple_node(__pyx_v_lst_or_tpl,0)); if (!__pyx_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 162; goto __pyx_L1;}
+ Py_DECREF(((PyObject *)__pyx_v_node1));
+ __pyx_v_node1 = ((struct __pyx_obj_6bzrlib_16_known_graph_pyx__KnownGraphNode *)__pyx_3);
+ __pyx_3 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_known_graph_pyx.pyx":163 */
+ __pyx_4 = ((PyObject *)__pyx_f_6bzrlib_16_known_graph_pyx__get_tuple_node(__pyx_v_lst_or_tpl,1)); if (!__pyx_4) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 163; goto __pyx_L1;}
+ Py_DECREF(((PyObject *)__pyx_v_node2));
+ __pyx_v_node2 = ((struct __pyx_obj_6bzrlib_16_known_graph_pyx__KnownGraphNode *)__pyx_4);
+ __pyx_4 = 0;
+ goto __pyx_L5;
+ }
+ /*else*/ {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_known_graph_pyx.pyx":165 */
+ __pyx_3 = ((PyObject *)__pyx_f_6bzrlib_16_known_graph_pyx__get_list_node(__pyx_v_lst_or_tpl,0)); if (!__pyx_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 165; goto __pyx_L1;}
+ Py_DECREF(((PyObject *)__pyx_v_node1));
+ __pyx_v_node1 = ((struct __pyx_obj_6bzrlib_16_known_graph_pyx__KnownGraphNode *)__pyx_3);
+ __pyx_3 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_known_graph_pyx.pyx":166 */
+ __pyx_4 = ((PyObject *)__pyx_f_6bzrlib_16_known_graph_pyx__get_list_node(__pyx_v_lst_or_tpl,1)); if (!__pyx_4) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 166; goto __pyx_L1;}
+ Py_DECREF(((PyObject *)__pyx_v_node2));
+ __pyx_v_node2 = ((struct __pyx_obj_6bzrlib_16_known_graph_pyx__KnownGraphNode *)__pyx_4);
+ __pyx_4 = 0;
+ }
+ __pyx_L5:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_known_graph_pyx.pyx":167 */
+ __pyx_2 = __pyx_v_reverse;
+ if (__pyx_2) {
+ __pyx_v_do_swap = PyObject_RichCompareBool(__pyx_v_node1->key,__pyx_v_node2->key,Py_LT);
+ goto __pyx_L6;
+ }
+ /*else*/ {
+ __pyx_v_do_swap = PyObject_RichCompareBool(__pyx_v_node2->key,__pyx_v_node1->key,Py_LT);
+ }
+ __pyx_L6:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_known_graph_pyx.pyx":171 */
+ __pyx_1 = (!__pyx_v_do_swap);
+ if (__pyx_1) {
+ Py_INCREF(__pyx_v_lst_or_tpl);
+ __pyx_r = __pyx_v_lst_or_tpl;
+ goto __pyx_L0;
+ goto __pyx_L7;
+ }
+ __pyx_L7:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_known_graph_pyx.pyx":173 */
+ __pyx_2 = __pyx_v_is_tuple;
+ if (__pyx_2) {
+ __pyx_3 = PyTuple_New(2); if (!__pyx_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 174; goto __pyx_L1;}
+ Py_INCREF(((PyObject *)__pyx_v_node2));
+ PyTuple_SET_ITEM(__pyx_3, 0, ((PyObject *)__pyx_v_node2));
+ Py_INCREF(((PyObject *)__pyx_v_node1));
+ PyTuple_SET_ITEM(__pyx_3, 1, ((PyObject *)__pyx_v_node1));
+ __pyx_r = __pyx_3;
+ __pyx_3 = 0;
+ goto __pyx_L0;
+ goto __pyx_L8;
+ }
+ /*else*/ {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_known_graph_pyx.pyx":177 */
+ Py_INCREF(((PyObject *)__pyx_v_node1));
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_known_graph_pyx.pyx":178 */
+ __pyx_1 = PyList_SetItem(__pyx_v_lst_or_tpl,1,((PyObject *)__pyx_v_node1)); if (__pyx_1 == (-1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 178; goto __pyx_L1;}
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_known_graph_pyx.pyx":179 */
+ Py_INCREF(((PyObject *)__pyx_v_node2));
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_known_graph_pyx.pyx":180 */
+ __pyx_2 = PyList_SetItem(__pyx_v_lst_or_tpl,0,((PyObject *)__pyx_v_node2)); if (__pyx_2 == (-1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 180; goto __pyx_L1;}
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_known_graph_pyx.pyx":181 */
+ Py_INCREF(__pyx_v_lst_or_tpl);
+ __pyx_r = __pyx_v_lst_or_tpl;
+ goto __pyx_L0;
+ }
+ __pyx_L8:;
+ goto __pyx_L4;
+ }
+ __pyx_L4:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_known_graph_pyx.pyx":183 */
+ __pyx_1 = __pyx_v_is_tuple;
+ if (__pyx_1) {
+ __pyx_4 = PyTuple_New(1); if (!__pyx_4) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 185; goto __pyx_L1;}
+ Py_INCREF(__pyx_v_lst_or_tpl);
+ PyTuple_SET_ITEM(__pyx_4, 0, __pyx_v_lst_or_tpl);
+ __pyx_3 = PyObject_CallObject(((PyObject *)(&PyList_Type)), __pyx_4); if (!__pyx_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 185; goto __pyx_L1;}
+ Py_DECREF(__pyx_4); __pyx_4 = 0;
+ Py_DECREF(__pyx_v_lst_or_tpl);
+ __pyx_v_lst_or_tpl = __pyx_3;
+ __pyx_3 = 0;
+ goto __pyx_L9;
+ }
+ __pyx_L9:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_known_graph_pyx.pyx":186 */
+ __pyx_4 = PyObject_GetAttr(__pyx_v_lst_or_tpl, __pyx_n_sort); if (!__pyx_4) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 186; goto __pyx_L1;}
+ __pyx_3 = PyTuple_New(0); if (!__pyx_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 186; goto __pyx_L1;}
+ __pyx_6 = PyDict_New(); if (!__pyx_6) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 186; goto __pyx_L1;}
+ __pyx_7 = __Pyx_GetName(__pyx_m, __pyx_n_get_key); if (!__pyx_7) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 186; goto __pyx_L1;}
+ if (PyDict_SetItem(__pyx_6, __pyx_n_key, __pyx_7) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 186; goto __pyx_L1;}
+ Py_DECREF(__pyx_7); __pyx_7 = 0;
+ __pyx_7 = PyInt_FromLong(__pyx_v_reverse); if (!__pyx_7) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 186; goto __pyx_L1;}
+ if (PyDict_SetItem(__pyx_6, __pyx_n_reverse, __pyx_7) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 186; goto __pyx_L1;}
+ Py_DECREF(__pyx_7); __pyx_7 = 0;
+ __pyx_7 = PyEval_CallObjectWithKeywords(__pyx_4, __pyx_3, __pyx_6); if (!__pyx_7) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 186; goto __pyx_L1;}
+ Py_DECREF(__pyx_4); __pyx_4 = 0;
+ Py_DECREF(__pyx_3); __pyx_3 = 0;
+ Py_DECREF(__pyx_6); __pyx_6 = 0;
+ Py_DECREF(__pyx_7); __pyx_7 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_known_graph_pyx.pyx":187 */
+ Py_INCREF(__pyx_v_lst_or_tpl);
+ __pyx_r = __pyx_v_lst_or_tpl;
+ goto __pyx_L0;
+
+ __pyx_r = Py_None; Py_INCREF(Py_None);
+ goto __pyx_L0;
+ __pyx_L1:;
+ Py_XDECREF(__pyx_3);
+ Py_XDECREF(__pyx_4);
+ Py_XDECREF(__pyx_6);
+ Py_XDECREF(__pyx_7);
+ __Pyx_AddTraceback("bzrlib._known_graph_pyx._sort_list_nodes");
+ __pyx_r = 0;
+ __pyx_L0:;
+ Py_DECREF(__pyx_v_node1);
+ Py_DECREF(__pyx_v_node2);
+ Py_DECREF(__pyx_v_lst_or_tpl);
+ return __pyx_r;
+}
+
+static int __pyx_f_6bzrlib_16_known_graph_pyx_10KnownGraph___init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
+static int __pyx_f_6bzrlib_16_known_graph_pyx_10KnownGraph___init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
+ PyObject *__pyx_v_parent_map = 0;
+ PyObject *__pyx_v_do_cache = 0;
+ int __pyx_r;
+ PyObject *__pyx_1 = 0;
+ PyObject *__pyx_2 = 0;
+ int __pyx_3;
+ PyObject *__pyx_4 = 0;
+ static char *__pyx_argnames[] = {"parent_map","do_cache",0};
+ __pyx_v_do_cache = __pyx_d1;
+ if (!PyArg_ParseTupleAndKeywords(__pyx_args, __pyx_kwds, "O|O", __pyx_argnames, &__pyx_v_parent_map, &__pyx_v_do_cache)) return -1;
+ Py_INCREF(__pyx_v_self);
+ Py_INCREF(__pyx_v_parent_map);
+ Py_INCREF(__pyx_v_do_cache);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_known_graph_pyx.pyx":205 */
+ __pyx_1 = PyDict_New(); if (!__pyx_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 205; goto __pyx_L1;}
+ Py_DECREF(((struct __pyx_obj_6bzrlib_16_known_graph_pyx_KnownGraph *)__pyx_v_self)->_nodes);
+ ((struct __pyx_obj_6bzrlib_16_known_graph_pyx_KnownGraph *)__pyx_v_self)->_nodes = __pyx_1;
+ __pyx_1 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_known_graph_pyx.pyx":207 */
+ __pyx_1 = PyDict_New(); if (!__pyx_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 207; goto __pyx_L1;}
+ Py_DECREF(((struct __pyx_obj_6bzrlib_16_known_graph_pyx_KnownGraph *)__pyx_v_self)->_known_heads);
+ ((struct __pyx_obj_6bzrlib_16_known_graph_pyx_KnownGraph *)__pyx_v_self)->_known_heads = __pyx_1;
+ __pyx_1 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_known_graph_pyx.pyx":208 */
+ __pyx_1 = PyTuple_New(1); if (!__pyx_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 208; goto __pyx_L1;}
+ Py_INCREF(__pyx_v_do_cache);
+ PyTuple_SET_ITEM(__pyx_1, 0, __pyx_v_do_cache);
+ __pyx_2 = PyObject_CallObject(((PyObject *)(&PyInt_Type)), __pyx_1); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 208; goto __pyx_L1;}
+ Py_DECREF(__pyx_1); __pyx_1 = 0;
+ __pyx_3 = PyInt_AsLong(__pyx_2); if (PyErr_Occurred()) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 208; goto __pyx_L1;}
+ Py_DECREF(__pyx_2); __pyx_2 = 0;
+ ((struct __pyx_obj_6bzrlib_16_known_graph_pyx_KnownGraph *)__pyx_v_self)->do_cache = __pyx_3;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_known_graph_pyx.pyx":212 */
+ __pyx_1 = PyObject_GetAttr(__pyx_v_self, __pyx_n__initialize_nodes); if (!__pyx_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 212; goto __pyx_L1;}
+ __pyx_2 = PyTuple_New(1); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 212; goto __pyx_L1;}
+ Py_INCREF(__pyx_v_parent_map);
+ PyTuple_SET_ITEM(__pyx_2, 0, __pyx_v_parent_map);
+ __pyx_4 = PyObject_CallObject(__pyx_1, __pyx_2); if (!__pyx_4) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 212; goto __pyx_L1;}
+ Py_DECREF(__pyx_1); __pyx_1 = 0;
+ Py_DECREF(__pyx_2); __pyx_2 = 0;
+ Py_DECREF(__pyx_4); __pyx_4 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_known_graph_pyx.pyx":213 */
+ __pyx_1 = PyObject_GetAttr(__pyx_v_self, __pyx_n__find_gdfo); if (!__pyx_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 213; goto __pyx_L1;}
+ __pyx_2 = PyObject_CallObject(__pyx_1, 0); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 213; goto __pyx_L1;}
+ Py_DECREF(__pyx_1); __pyx_1 = 0;
+ Py_DECREF(__pyx_2); __pyx_2 = 0;
+
+ __pyx_r = 0;
+ goto __pyx_L0;
+ __pyx_L1:;
+ Py_XDECREF(__pyx_1);
+ Py_XDECREF(__pyx_2);
+ Py_XDECREF(__pyx_4);
+ __Pyx_AddTraceback("bzrlib._known_graph_pyx.KnownGraph.__init__");
+ __pyx_r = -1;
+ __pyx_L0:;
+ Py_DECREF(__pyx_v_self);
+ Py_DECREF(__pyx_v_parent_map);
+ Py_DECREF(__pyx_v_do_cache);
+ return __pyx_r;
+}
+
+static void __pyx_f_6bzrlib_16_known_graph_pyx_10KnownGraph___dealloc__(PyObject *__pyx_v_self); /*proto*/
+static void __pyx_f_6bzrlib_16_known_graph_pyx_10KnownGraph___dealloc__(PyObject *__pyx_v_self) {
+ struct __pyx_obj_6bzrlib_16_known_graph_pyx__KnownGraphNode *__pyx_v_child;
+ Py_ssize_t __pyx_v_pos;
+ PyObject *__pyx_v_temp_node;
+ int __pyx_1;
+ PyObject *__pyx_2 = 0;
+ Py_INCREF(__pyx_v_self);
+ __pyx_v_child = ((struct __pyx_obj_6bzrlib_16_known_graph_pyx__KnownGraphNode *)Py_None); Py_INCREF(Py_None);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_known_graph_pyx.pyx":220 */
+ while (1) {
+ __pyx_1 = PyDict_Next(((struct __pyx_obj_6bzrlib_16_known_graph_pyx_KnownGraph *)__pyx_v_self)->_nodes,(&__pyx_v_pos),NULL,(&__pyx_v_temp_node));
+ if (!__pyx_1) break;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_known_graph_pyx.pyx":221 */
+ Py_INCREF(((PyObject *)__pyx_v_temp_node));
+ Py_DECREF(((PyObject *)__pyx_v_child));
+ __pyx_v_child = ((struct __pyx_obj_6bzrlib_16_known_graph_pyx__KnownGraphNode *)__pyx_v_temp_node);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_known_graph_pyx.pyx":222 */
+ __pyx_2 = ((struct __pyx_vtabstruct_6bzrlib_16_known_graph_pyx__KnownGraphNode *)__pyx_v_child->__pyx_vtab)->clear_references(__pyx_v_child); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 222; goto __pyx_L1;}
+ Py_DECREF(__pyx_2); __pyx_2 = 0;
+ }
+
+ goto __pyx_L0;
+ __pyx_L1:;
+ Py_XDECREF(__pyx_2);
+ __Pyx_AddTraceback("bzrlib._known_graph_pyx.KnownGraph.__dealloc__");
+ __pyx_L0:;
+ Py_DECREF(__pyx_v_child);
+ Py_DECREF(__pyx_v_self);
+}
+
+static struct __pyx_obj_6bzrlib_16_known_graph_pyx__KnownGraphNode *__pyx_f_6bzrlib_16_known_graph_pyx_10KnownGraph__get_or_create_node(struct __pyx_obj_6bzrlib_16_known_graph_pyx_KnownGraph *__pyx_v_self,PyObject *__pyx_v_key) {
+ PyObject *__pyx_v_temp_node;
+ struct __pyx_obj_6bzrlib_16_known_graph_pyx__KnownGraphNode *__pyx_v_node;
+ struct __pyx_obj_6bzrlib_16_known_graph_pyx__KnownGraphNode *__pyx_r;
+ int __pyx_1;
+ PyObject *__pyx_2 = 0;
+ PyObject *__pyx_3 = 0;
+ Py_INCREF(__pyx_v_self);
+ Py_INCREF(__pyx_v_key);
+ __pyx_v_node = ((struct __pyx_obj_6bzrlib_16_known_graph_pyx__KnownGraphNode *)Py_None); Py_INCREF(Py_None);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_known_graph_pyx.pyx":228 */
+ __pyx_v_temp_node = PyDict_GetItem(__pyx_v_self->_nodes,__pyx_v_key);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_known_graph_pyx.pyx":229 */
+ __pyx_1 = (__pyx_v_temp_node == NULL);
+ if (__pyx_1) {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_known_graph_pyx.pyx":230 */
+ __pyx_2 = PyTuple_New(1); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 230; goto __pyx_L1;}
+ Py_INCREF(__pyx_v_key);
+ PyTuple_SET_ITEM(__pyx_2, 0, __pyx_v_key);
+ __pyx_3 = PyObject_CallObject(((PyObject *)__pyx_ptype_6bzrlib_16_known_graph_pyx__KnownGraphNode), __pyx_2); if (!__pyx_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 230; goto __pyx_L1;}
+ Py_DECREF(__pyx_2); __pyx_2 = 0;
+ Py_DECREF(((PyObject *)__pyx_v_node));
+ __pyx_v_node = ((struct __pyx_obj_6bzrlib_16_known_graph_pyx__KnownGraphNode *)__pyx_3);
+ __pyx_3 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_known_graph_pyx.pyx":231 */
+ __pyx_1 = PyDict_SetItem(__pyx_v_self->_nodes,__pyx_v_key,((PyObject *)__pyx_v_node)); if (__pyx_1 == (-1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 231; goto __pyx_L1;}
+ goto __pyx_L2;
+ }
+ /*else*/ {
+ Py_INCREF(((PyObject *)__pyx_v_temp_node));
+ Py_DECREF(((PyObject *)__pyx_v_node));
+ __pyx_v_node = ((struct __pyx_obj_6bzrlib_16_known_graph_pyx__KnownGraphNode *)__pyx_v_temp_node);
+ }
+ __pyx_L2:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_known_graph_pyx.pyx":234 */
+ Py_INCREF(((PyObject *)__pyx_v_node));
+ __pyx_r = __pyx_v_node;
+ goto __pyx_L0;
+
+ __pyx_r = ((struct __pyx_obj_6bzrlib_16_known_graph_pyx__KnownGraphNode *)Py_None); Py_INCREF(Py_None);
+ goto __pyx_L0;
+ __pyx_L1:;
+ Py_XDECREF(__pyx_2);
+ Py_XDECREF(__pyx_3);
+ __Pyx_AddTraceback("bzrlib._known_graph_pyx.KnownGraph._get_or_create_node");
+ __pyx_r = 0;
+ __pyx_L0:;
+ Py_DECREF(__pyx_v_node);
+ Py_DECREF(__pyx_v_self);
+ Py_DECREF(__pyx_v_key);
+ return __pyx_r;
+}
+
+static PyObject *__pyx_f_6bzrlib_16_known_graph_pyx_10KnownGraph__populate_parents(struct __pyx_obj_6bzrlib_16_known_graph_pyx_KnownGraph *__pyx_v_self,struct __pyx_obj_6bzrlib_16_known_graph_pyx__KnownGraphNode *__pyx_v_node,PyObject *__pyx_v_parent_keys) {
+ Py_ssize_t __pyx_v_num_parent_keys;
+ Py_ssize_t __pyx_v_pos;
+ struct __pyx_obj_6bzrlib_16_known_graph_pyx__KnownGraphNode *__pyx_v_parent_node;
+ PyObject *__pyx_v_parent_nodes;
+ PyObject *__pyx_r;
+ Py_ssize_t __pyx_1;
+ PyObject *__pyx_2 = 0;
+ PyObject *__pyx_3 = 0;
+ int __pyx_4;
+ Py_INCREF(__pyx_v_self);
+ Py_INCREF(__pyx_v_node);
+ Py_INCREF(__pyx_v_parent_keys);
+ __pyx_v_parent_node = ((struct __pyx_obj_6bzrlib_16_known_graph_pyx__KnownGraphNode *)Py_None); Py_INCREF(Py_None);
+ __pyx_v_parent_nodes = Py_None; Py_INCREF(Py_None);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_known_graph_pyx.pyx":240 */
+ __pyx_1 = PyObject_Length(__pyx_v_parent_keys); if (__pyx_1 == -1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 240; goto __pyx_L1;}
+ __pyx_v_num_parent_keys = __pyx_1;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_known_graph_pyx.pyx":242 */
+ __pyx_2 = PyTuple_New(__pyx_v_num_parent_keys); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 242; goto __pyx_L1;}
+ Py_DECREF(__pyx_v_parent_nodes);
+ __pyx_v_parent_nodes = __pyx_2;
+ __pyx_2 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_known_graph_pyx.pyx":243 */
+ for (__pyx_v_pos = 0; __pyx_v_pos < __pyx_v_num_parent_keys; ++__pyx_v_pos) {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_known_graph_pyx.pyx":251 */
+ __pyx_2 = __Pyx_GetItemInt(__pyx_v_parent_keys, __pyx_v_pos); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 251; goto __pyx_L1;}
+ __pyx_3 = ((PyObject *)((struct __pyx_vtabstruct_6bzrlib_16_known_graph_pyx_KnownGraph *)__pyx_v_self->__pyx_vtab)->_get_or_create_node(__pyx_v_self,__pyx_2)); if (!__pyx_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 251; goto __pyx_L1;}
+ Py_DECREF(__pyx_2); __pyx_2 = 0;
+ Py_DECREF(((PyObject *)__pyx_v_parent_node));
+ __pyx_v_parent_node = ((struct __pyx_obj_6bzrlib_16_known_graph_pyx__KnownGraphNode *)__pyx_3);
+ __pyx_3 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_known_graph_pyx.pyx":253 */
+ Py_INCREF(((PyObject *)__pyx_v_parent_node));
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_known_graph_pyx.pyx":254 */
+ PyTuple_SET_ITEM(__pyx_v_parent_nodes,__pyx_v_pos,((PyObject *)__pyx_v_parent_node));
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_known_graph_pyx.pyx":255 */
+ __pyx_4 = PyList_Append(__pyx_v_parent_node->children,((PyObject *)__pyx_v_node)); if (__pyx_4 == (-1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 255; goto __pyx_L1;}
+ }
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_known_graph_pyx.pyx":256 */
+ Py_INCREF(__pyx_v_parent_nodes);
+ Py_DECREF(__pyx_v_node->parents);
+ __pyx_v_node->parents = __pyx_v_parent_nodes;
+
+ __pyx_r = Py_None; Py_INCREF(Py_None);
+ goto __pyx_L0;
+ __pyx_L1:;
+ Py_XDECREF(__pyx_2);
+ Py_XDECREF(__pyx_3);
+ __Pyx_AddTraceback("bzrlib._known_graph_pyx.KnownGraph._populate_parents");
+ __pyx_r = 0;
+ __pyx_L0:;
+ Py_DECREF(__pyx_v_parent_node);
+ Py_DECREF(__pyx_v_parent_nodes);
+ Py_DECREF(__pyx_v_self);
+ Py_DECREF(__pyx_v_node);
+ Py_DECREF(__pyx_v_parent_keys);
+ return __pyx_r;
+}
+
+static PyObject *__pyx_f_6bzrlib_16_known_graph_pyx_10KnownGraph__initialize_nodes(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
+static char __pyx_doc_6bzrlib_16_known_graph_pyx_10KnownGraph__initialize_nodes[] = "Populate self._nodes.\n\n After this has finished:\n - self._nodes will have an entry for every entry in parent_map.\n - ghosts will have a parent_keys = None,\n - all nodes found will also have child_keys populated with all known\n child keys,\n ";
+static PyObject *__pyx_f_6bzrlib_16_known_graph_pyx_10KnownGraph__initialize_nodes(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
+ PyObject *__pyx_v_parent_map = 0;
+ PyObject *__pyx_v_temp_key;
+ PyObject *__pyx_v_temp_parent_keys;
+ Py_ssize_t __pyx_v_pos;
+ struct __pyx_obj_6bzrlib_16_known_graph_pyx__KnownGraphNode *__pyx_v_node;
+ PyObject *__pyx_v_key;
+ PyObject *__pyx_v_parent_keys;
+ PyObject *__pyx_r;
+ int __pyx_1;
+ PyObject *__pyx_2 = 0;
+ PyObject *__pyx_3 = 0;
+ static char *__pyx_argnames[] = {"parent_map",0};
+ if (!PyArg_ParseTupleAndKeywords(__pyx_args, __pyx_kwds, "O", __pyx_argnames, &__pyx_v_parent_map)) return 0;
+ Py_INCREF(__pyx_v_self);
+ Py_INCREF(__pyx_v_parent_map);
+ __pyx_v_node = ((struct __pyx_obj_6bzrlib_16_known_graph_pyx__KnownGraphNode *)Py_None); Py_INCREF(Py_None);
+ __pyx_v_key = Py_None; Py_INCREF(Py_None);
+ __pyx_v_parent_keys = Py_None; Py_INCREF(Py_None);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_known_graph_pyx.pyx":272 */
+ __pyx_1 = (!PyDict_CheckExact(__pyx_v_parent_map));
+ if (__pyx_1) {
+ __pyx_2 = PyTuple_New(1); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 273; goto __pyx_L1;}
+ Py_INCREF(__pyx_k12p);
+ PyTuple_SET_ITEM(__pyx_2, 0, __pyx_k12p);
+ __pyx_3 = PyObject_CallObject(PyExc_TypeError, __pyx_2); if (!__pyx_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 273; goto __pyx_L1;}
+ Py_DECREF(__pyx_2); __pyx_2 = 0;
+ __Pyx_Raise(__pyx_3, 0, 0);
+ Py_DECREF(__pyx_3); __pyx_3 = 0;
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 273; goto __pyx_L1;}
+ goto __pyx_L2;
+ }
+ __pyx_L2:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_known_graph_pyx.pyx":275 */
+ __pyx_v_pos = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_known_graph_pyx.pyx":276 */
+ while (1) {
+ __pyx_1 = PyDict_Next(__pyx_v_parent_map,(&__pyx_v_pos),(&__pyx_v_temp_key),(&__pyx_v_temp_parent_keys));
+ if (!__pyx_1) break;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_known_graph_pyx.pyx":277 */
+ Py_INCREF(((PyObject *)__pyx_v_temp_key));
+ Py_DECREF(__pyx_v_key);
+ __pyx_v_key = ((PyObject *)__pyx_v_temp_key);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_known_graph_pyx.pyx":278 */
+ Py_INCREF(((PyObject *)__pyx_v_temp_parent_keys));
+ Py_DECREF(__pyx_v_parent_keys);
+ __pyx_v_parent_keys = ((PyObject *)__pyx_v_temp_parent_keys);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_known_graph_pyx.pyx":279 */
+ __pyx_2 = ((PyObject *)((struct __pyx_vtabstruct_6bzrlib_16_known_graph_pyx_KnownGraph *)((struct __pyx_obj_6bzrlib_16_known_graph_pyx_KnownGraph *)__pyx_v_self)->__pyx_vtab)->_get_or_create_node(((struct __pyx_obj_6bzrlib_16_known_graph_pyx_KnownGraph *)__pyx_v_self),__pyx_v_key)); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 279; goto __pyx_L1;}
+ Py_DECREF(((PyObject *)__pyx_v_node));
+ __pyx_v_node = ((struct __pyx_obj_6bzrlib_16_known_graph_pyx__KnownGraphNode *)__pyx_2);
+ __pyx_2 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_known_graph_pyx.pyx":280 */
+ __pyx_3 = ((struct __pyx_vtabstruct_6bzrlib_16_known_graph_pyx_KnownGraph *)((struct __pyx_obj_6bzrlib_16_known_graph_pyx_KnownGraph *)__pyx_v_self)->__pyx_vtab)->_populate_parents(((struct __pyx_obj_6bzrlib_16_known_graph_pyx_KnownGraph *)__pyx_v_self),__pyx_v_node,__pyx_v_parent_keys); if (!__pyx_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 280; goto __pyx_L1;}
+ Py_DECREF(__pyx_3); __pyx_3 = 0;
+ }
+
+ __pyx_r = Py_None; Py_INCREF(Py_None);
+ goto __pyx_L0;
+ __pyx_L1:;
+ Py_XDECREF(__pyx_2);
+ Py_XDECREF(__pyx_3);
+ __Pyx_AddTraceback("bzrlib._known_graph_pyx.KnownGraph._initialize_nodes");
+ __pyx_r = 0;
+ __pyx_L0:;
+ Py_DECREF(__pyx_v_node);
+ Py_DECREF(__pyx_v_key);
+ Py_DECREF(__pyx_v_parent_keys);
+ Py_DECREF(__pyx_v_self);
+ Py_DECREF(__pyx_v_parent_map);
+ return __pyx_r;
+}
+
+static PyObject *__pyx_f_6bzrlib_16_known_graph_pyx_10KnownGraph__find_tails(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
+static PyObject *__pyx_f_6bzrlib_16_known_graph_pyx_10KnownGraph__find_tails(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
+ PyObject *__pyx_v_temp_node;
+ struct __pyx_obj_6bzrlib_16_known_graph_pyx__KnownGraphNode *__pyx_v_node;
+ Py_ssize_t __pyx_v_pos;
+ PyObject *__pyx_v_tails;
+ PyObject *__pyx_r;
+ PyObject *__pyx_1 = 0;
+ int __pyx_2;
+ static char *__pyx_argnames[] = {0};
+ if (!PyArg_ParseTupleAndKeywords(__pyx_args, __pyx_kwds, "", __pyx_argnames)) return 0;
+ Py_INCREF(__pyx_v_self);
+ __pyx_v_node = ((struct __pyx_obj_6bzrlib_16_known_graph_pyx__KnownGraphNode *)Py_None); Py_INCREF(Py_None);
+ __pyx_v_tails = Py_None; Py_INCREF(Py_None);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_known_graph_pyx.pyx":287 */
+ __pyx_1 = PyList_New(0); if (!__pyx_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 287; goto __pyx_L1;}
+ Py_DECREF(__pyx_v_tails);
+ __pyx_v_tails = __pyx_1;
+ __pyx_1 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_known_graph_pyx.pyx":288 */
+ __pyx_v_pos = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_known_graph_pyx.pyx":289 */
+ while (1) {
+ __pyx_2 = PyDict_Next(((struct __pyx_obj_6bzrlib_16_known_graph_pyx_KnownGraph *)__pyx_v_self)->_nodes,(&__pyx_v_pos),NULL,(&__pyx_v_temp_node));
+ if (!__pyx_2) break;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_known_graph_pyx.pyx":290 */
+ Py_INCREF(((PyObject *)__pyx_v_temp_node));
+ Py_DECREF(((PyObject *)__pyx_v_node));
+ __pyx_v_node = ((struct __pyx_obj_6bzrlib_16_known_graph_pyx__KnownGraphNode *)__pyx_v_temp_node);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_known_graph_pyx.pyx":291 */
+ __pyx_2 = __pyx_v_node->parents == Py_None;
+ if (!__pyx_2) {
+ __pyx_2 = (PyTuple_GET_SIZE(__pyx_v_node->parents) == 0);
+ }
+ if (__pyx_2) {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_known_graph_pyx.pyx":292 */
+ __pyx_v_node->gdfo = 1;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_known_graph_pyx.pyx":293 */
+ __pyx_2 = PyList_Append(__pyx_v_tails,((PyObject *)__pyx_v_node)); if (__pyx_2 == (-1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 293; goto __pyx_L1;}
+ goto __pyx_L4;
+ }
+ __pyx_L4:;
+ }
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_known_graph_pyx.pyx":294 */
+ Py_INCREF(__pyx_v_tails);
+ __pyx_r = __pyx_v_tails;
+ goto __pyx_L0;
+
+ __pyx_r = Py_None; Py_INCREF(Py_None);
+ goto __pyx_L0;
+ __pyx_L1:;
+ Py_XDECREF(__pyx_1);
+ __Pyx_AddTraceback("bzrlib._known_graph_pyx.KnownGraph._find_tails");
+ __pyx_r = 0;
+ __pyx_L0:;
+ Py_DECREF(__pyx_v_node);
+ Py_DECREF(__pyx_v_tails);
+ Py_DECREF(__pyx_v_self);
+ return __pyx_r;
+}
+
+static PyObject *__pyx_f_6bzrlib_16_known_graph_pyx_10KnownGraph__find_tips(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
+static PyObject *__pyx_f_6bzrlib_16_known_graph_pyx_10KnownGraph__find_tips(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
+ PyObject *__pyx_v_temp_node;
+ struct __pyx_obj_6bzrlib_16_known_graph_pyx__KnownGraphNode *__pyx_v_node;
+ Py_ssize_t __pyx_v_pos;
+ PyObject *__pyx_v_tips;
+ PyObject *__pyx_r;
+ PyObject *__pyx_1 = 0;
+ int __pyx_2;
+ static char *__pyx_argnames[] = {0};
+ if (!PyArg_ParseTupleAndKeywords(__pyx_args, __pyx_kwds, "", __pyx_argnames)) return 0;
+ Py_INCREF(__pyx_v_self);
+ __pyx_v_node = ((struct __pyx_obj_6bzrlib_16_known_graph_pyx__KnownGraphNode *)Py_None); Py_INCREF(Py_None);
+ __pyx_v_tips = Py_None; Py_INCREF(Py_None);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_known_graph_pyx.pyx":301 */
+ __pyx_1 = PyList_New(0); if (!__pyx_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 301; goto __pyx_L1;}
+ Py_DECREF(__pyx_v_tips);
+ __pyx_v_tips = __pyx_1;
+ __pyx_1 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_known_graph_pyx.pyx":302 */
+ __pyx_v_pos = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_known_graph_pyx.pyx":303 */
+ while (1) {
+ __pyx_2 = PyDict_Next(((struct __pyx_obj_6bzrlib_16_known_graph_pyx_KnownGraph *)__pyx_v_self)->_nodes,(&__pyx_v_pos),NULL,(&__pyx_v_temp_node));
+ if (!__pyx_2) break;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_known_graph_pyx.pyx":304 */
+ Py_INCREF(((PyObject *)__pyx_v_temp_node));
+ Py_DECREF(((PyObject *)__pyx_v_node));
+ __pyx_v_node = ((struct __pyx_obj_6bzrlib_16_known_graph_pyx__KnownGraphNode *)__pyx_v_temp_node);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_known_graph_pyx.pyx":305 */
+ __pyx_2 = (PyList_GET_SIZE(__pyx_v_node->children) == 0);
+ if (__pyx_2) {
+ __pyx_2 = PyList_Append(__pyx_v_tips,((PyObject *)__pyx_v_node)); if (__pyx_2 == (-1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 306; goto __pyx_L1;}
+ goto __pyx_L4;
+ }
+ __pyx_L4:;
+ }
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_known_graph_pyx.pyx":307 */
+ Py_INCREF(__pyx_v_tips);
+ __pyx_r = __pyx_v_tips;
+ goto __pyx_L0;
+
+ __pyx_r = Py_None; Py_INCREF(Py_None);
+ goto __pyx_L0;
+ __pyx_L1:;
+ Py_XDECREF(__pyx_1);
+ __Pyx_AddTraceback("bzrlib._known_graph_pyx.KnownGraph._find_tips");
+ __pyx_r = 0;
+ __pyx_L0:;
+ Py_DECREF(__pyx_v_node);
+ Py_DECREF(__pyx_v_tips);
+ Py_DECREF(__pyx_v_self);
+ return __pyx_r;
+}
+
+static PyObject *__pyx_f_6bzrlib_16_known_graph_pyx_10KnownGraph__find_gdfo(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
+static PyObject *__pyx_f_6bzrlib_16_known_graph_pyx_10KnownGraph__find_gdfo(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
+ struct __pyx_obj_6bzrlib_16_known_graph_pyx__KnownGraphNode *__pyx_v_node;
+ struct __pyx_obj_6bzrlib_16_known_graph_pyx__KnownGraphNode *__pyx_v_child;
+ Py_ssize_t __pyx_v_pos;
+ Py_ssize_t __pyx_v_last_item;
+ long __pyx_v_next_gdfo;
+ PyObject *__pyx_v_pending;
+ PyObject *__pyx_r;
+ PyObject *__pyx_1 = 0;
+ PyObject *__pyx_2 = 0;
+ int __pyx_3;
+ Py_ssize_t __pyx_4;
+ static char *__pyx_argnames[] = {0};
+ if (!PyArg_ParseTupleAndKeywords(__pyx_args, __pyx_kwds, "", __pyx_argnames)) return 0;
+ Py_INCREF(__pyx_v_self);
+ __pyx_v_node = ((struct __pyx_obj_6bzrlib_16_known_graph_pyx__KnownGraphNode *)Py_None); Py_INCREF(Py_None);
+ __pyx_v_child = ((struct __pyx_obj_6bzrlib_16_known_graph_pyx__KnownGraphNode *)Py_None); Py_INCREF(Py_None);
+ __pyx_v_pending = Py_None; Py_INCREF(Py_None);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_known_graph_pyx.pyx":318 */
+ __pyx_1 = PyObject_GetAttr(__pyx_v_self, __pyx_n__find_tails); if (!__pyx_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 318; goto __pyx_L1;}
+ __pyx_2 = PyObject_CallObject(__pyx_1, 0); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 318; goto __pyx_L1;}
+ Py_DECREF(__pyx_1); __pyx_1 = 0;
+ Py_DECREF(__pyx_v_pending);
+ __pyx_v_pending = __pyx_2;
+ __pyx_2 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_known_graph_pyx.pyx":320 */
+ __pyx_v_last_item = (PyList_GET_SIZE(__pyx_v_pending) - 1);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_known_graph_pyx.pyx":321 */
+ while (1) {
+ __pyx_3 = (__pyx_v_last_item >= 0);
+ if (!__pyx_3) break;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_known_graph_pyx.pyx":324 */
+ __pyx_1 = ((PyObject *)__pyx_f_6bzrlib_16_known_graph_pyx__get_list_node(__pyx_v_pending,__pyx_v_last_item)); if (!__pyx_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 324; goto __pyx_L1;}
+ Py_DECREF(((PyObject *)__pyx_v_node));
+ __pyx_v_node = ((struct __pyx_obj_6bzrlib_16_known_graph_pyx__KnownGraphNode *)__pyx_1);
+ __pyx_1 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_known_graph_pyx.pyx":325 */
+ __pyx_v_last_item = (__pyx_v_last_item - 1);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_known_graph_pyx.pyx":326 */
+ __pyx_v_next_gdfo = (__pyx_v_node->gdfo + 1);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_known_graph_pyx.pyx":327 */
+ __pyx_4 = PyList_GET_SIZE(__pyx_v_node->children);
+ for (__pyx_v_pos = 0; __pyx_v_pos < __pyx_4; ++__pyx_v_pos) {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_known_graph_pyx.pyx":328 */
+ __pyx_2 = ((PyObject *)__pyx_f_6bzrlib_16_known_graph_pyx__get_list_node(__pyx_v_node->children,__pyx_v_pos)); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 328; goto __pyx_L1;}
+ Py_DECREF(((PyObject *)__pyx_v_child));
+ __pyx_v_child = ((struct __pyx_obj_6bzrlib_16_known_graph_pyx__KnownGraphNode *)__pyx_2);
+ __pyx_2 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_known_graph_pyx.pyx":329 */
+ __pyx_3 = (__pyx_v_next_gdfo > __pyx_v_child->gdfo);
+ if (__pyx_3) {
+ __pyx_v_child->gdfo = __pyx_v_next_gdfo;
+ goto __pyx_L6;
+ }
+ __pyx_L6:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_known_graph_pyx.pyx":331 */
+ __pyx_v_child->seen = (__pyx_v_child->seen + 1);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_known_graph_pyx.pyx":332 */
+ __pyx_3 = (__pyx_v_child->seen == PyTuple_GET_SIZE(__pyx_v_child->parents));
+ if (__pyx_3) {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_known_graph_pyx.pyx":334 */
+ __pyx_v_last_item = (__pyx_v_last_item + 1);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_known_graph_pyx.pyx":335 */
+ __pyx_3 = (__pyx_v_last_item < PyList_GET_SIZE(__pyx_v_pending));
+ if (__pyx_3) {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_known_graph_pyx.pyx":336 */
+ Py_INCREF(((PyObject *)__pyx_v_child));
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_known_graph_pyx.pyx":337 */
+ __pyx_3 = PyList_SetItem(__pyx_v_pending,__pyx_v_last_item,((PyObject *)__pyx_v_child)); if (__pyx_3 == (-1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 337; goto __pyx_L1;}
+ goto __pyx_L8;
+ }
+ /*else*/ {
+ __pyx_3 = PyList_Append(__pyx_v_pending,((PyObject *)__pyx_v_child)); if (__pyx_3 == (-1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 339; goto __pyx_L1;}
+ }
+ __pyx_L8:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_known_graph_pyx.pyx":342 */
+ __pyx_v_child->seen = 0;
+ goto __pyx_L7;
+ }
+ __pyx_L7:;
+ }
+ }
+
+ __pyx_r = Py_None; Py_INCREF(Py_None);
+ goto __pyx_L0;
+ __pyx_L1:;
+ Py_XDECREF(__pyx_1);
+ Py_XDECREF(__pyx_2);
+ __Pyx_AddTraceback("bzrlib._known_graph_pyx.KnownGraph._find_gdfo");
+ __pyx_r = 0;
+ __pyx_L0:;
+ Py_DECREF(__pyx_v_node);
+ Py_DECREF(__pyx_v_child);
+ Py_DECREF(__pyx_v_pending);
+ Py_DECREF(__pyx_v_self);
+ return __pyx_r;
+}
+
+static PyObject *__pyx_f_6bzrlib_16_known_graph_pyx_10KnownGraph_add_node(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
+static char __pyx_doc_6bzrlib_16_known_graph_pyx_10KnownGraph_add_node[] = "Add a new node to the graph.\n\n If this fills in a ghost, then the gdfos of all children will be\n updated accordingly.\n \n :param key: The node being added. If this is a duplicate, this is a\n no-op.\n :param parent_keys: The parents of the given node.\n :return: None (should we return if this was a ghost, etc?)\n ";
+static PyObject *__pyx_f_6bzrlib_16_known_graph_pyx_10KnownGraph_add_node(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
+ PyObject *__pyx_v_key = 0;
+ PyObject *__pyx_v_parent_keys = 0;
+ PyObject *__pyx_v_maybe_node;
+ struct __pyx_obj_6bzrlib_16_known_graph_pyx__KnownGraphNode *__pyx_v_node;
+ struct __pyx_obj_6bzrlib_16_known_graph_pyx__KnownGraphNode *__pyx_v_parent_node;
+ struct __pyx_obj_6bzrlib_16_known_graph_pyx__KnownGraphNode *__pyx_v_child_node;
+ long __pyx_v_parent_gdfo;
+ long __pyx_v_next_gdfo;
+ PyObject *__pyx_v_existing_parent_keys;
+ PyObject *__pyx_v_pending;
+ PyObject *__pyx_v_pending_popleft;
+ PyObject *__pyx_v_pending_append;
+ PyObject *__pyx_r;
+ int __pyx_1;
+ PyObject *__pyx_2 = 0;
+ PyObject *__pyx_3 = 0;
+ PyObject *__pyx_4 = 0;
+ PyObject *__pyx_5 = 0;
+ static char *__pyx_argnames[] = {"key","parent_keys",0};
+ if (!PyArg_ParseTupleAndKeywords(__pyx_args, __pyx_kwds, "OO", __pyx_argnames, &__pyx_v_key, &__pyx_v_parent_keys)) return 0;
+ Py_INCREF(__pyx_v_self);
+ Py_INCREF(__pyx_v_key);
+ Py_INCREF(__pyx_v_parent_keys);
+ __pyx_v_node = ((struct __pyx_obj_6bzrlib_16_known_graph_pyx__KnownGraphNode *)Py_None); Py_INCREF(Py_None);
+ __pyx_v_parent_node = ((struct __pyx_obj_6bzrlib_16_known_graph_pyx__KnownGraphNode *)Py_None); Py_INCREF(Py_None);
+ __pyx_v_child_node = ((struct __pyx_obj_6bzrlib_16_known_graph_pyx__KnownGraphNode *)Py_None); Py_INCREF(Py_None);
+ __pyx_v_existing_parent_keys = Py_None; Py_INCREF(Py_None);
+ __pyx_v_pending = Py_None; Py_INCREF(Py_None);
+ __pyx_v_pending_popleft = Py_None; Py_INCREF(Py_None);
+ __pyx_v_pending_append = Py_None; Py_INCREF(Py_None);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_known_graph_pyx.pyx":359 */
+ __pyx_v_maybe_node = PyDict_GetItem(((struct __pyx_obj_6bzrlib_16_known_graph_pyx_KnownGraph *)__pyx_v_self)->_nodes,__pyx_v_key);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_known_graph_pyx.pyx":360 */
+ __pyx_1 = (__pyx_v_maybe_node != NULL);
+ if (__pyx_1) {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_known_graph_pyx.pyx":361 */
+ Py_INCREF(((PyObject *)__pyx_v_maybe_node));
+ Py_DECREF(((PyObject *)__pyx_v_node));
+ __pyx_v_node = ((struct __pyx_obj_6bzrlib_16_known_graph_pyx__KnownGraphNode *)__pyx_v_maybe_node);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_known_graph_pyx.pyx":362 */
+ __pyx_1 = __pyx_v_node->parents == Py_None;
+ if (__pyx_1) {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_known_graph_pyx.pyx":364 */
+ __pyx_2 = ((struct __pyx_vtabstruct_6bzrlib_16_known_graph_pyx_KnownGraph *)((struct __pyx_obj_6bzrlib_16_known_graph_pyx_KnownGraph *)__pyx_v_self)->__pyx_vtab)->_populate_parents(((struct __pyx_obj_6bzrlib_16_known_graph_pyx_KnownGraph *)__pyx_v_self),__pyx_v_node,__pyx_v_parent_keys); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 364; goto __pyx_L1;}
+ Py_DECREF(__pyx_2); __pyx_2 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_known_graph_pyx.pyx":366 */
+ __pyx_2 = PyObject_GetAttr(((struct __pyx_obj_6bzrlib_16_known_graph_pyx_KnownGraph *)__pyx_v_self)->_known_heads, __pyx_n_clear); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 366; goto __pyx_L1;}
+ __pyx_3 = PyObject_CallObject(__pyx_2, 0); if (!__pyx_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 366; goto __pyx_L1;}
+ Py_DECREF(__pyx_2); __pyx_2 = 0;
+ Py_DECREF(__pyx_3); __pyx_3 = 0;
+ goto __pyx_L3;
+ }
+ /*else*/ {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_known_graph_pyx.pyx":368 */
+ __pyx_2 = PyList_New(0); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 368; goto __pyx_L1;}
+ Py_DECREF(__pyx_v_existing_parent_keys);
+ __pyx_v_existing_parent_keys = __pyx_2;
+ __pyx_2 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_known_graph_pyx.pyx":369 */
+ __pyx_3 = PyObject_GetIter(__pyx_v_node->parents); if (!__pyx_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 369; goto __pyx_L1;}
+ for (;;) {
+ __pyx_2 = PyIter_Next(__pyx_3);
+ if (!__pyx_2) {
+ if (PyErr_Occurred()) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 369; goto __pyx_L1;}
+ break;
+ }
+ if (!__Pyx_TypeTest(__pyx_2, __pyx_ptype_6bzrlib_16_known_graph_pyx__KnownGraphNode)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 369; goto __pyx_L1;}
+ Py_DECREF(((PyObject *)__pyx_v_parent_node));
+ __pyx_v_parent_node = ((struct __pyx_obj_6bzrlib_16_known_graph_pyx__KnownGraphNode *)__pyx_2);
+ __pyx_2 = 0;
+ __pyx_2 = PyObject_GetAttr(__pyx_v_existing_parent_keys, __pyx_n_append); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 370; goto __pyx_L1;}
+ __pyx_4 = PyTuple_New(1); if (!__pyx_4) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 370; goto __pyx_L1;}
+ Py_INCREF(__pyx_v_parent_node->key);
+ PyTuple_SET_ITEM(__pyx_4, 0, __pyx_v_parent_node->key);
+ __pyx_5 = PyObject_CallObject(__pyx_2, __pyx_4); if (!__pyx_5) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 370; goto __pyx_L1;}
+ Py_DECREF(__pyx_2); __pyx_2 = 0;
+ Py_DECREF(__pyx_4); __pyx_4 = 0;
+ Py_DECREF(__pyx_5); __pyx_5 = 0;
+ }
+ Py_DECREF(__pyx_3); __pyx_3 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_known_graph_pyx.pyx":373 */
+ __pyx_2 = PyTuple_New(1); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 373; goto __pyx_L1;}
+ Py_INCREF(__pyx_v_parent_keys);
+ PyTuple_SET_ITEM(__pyx_2, 0, __pyx_v_parent_keys);
+ __pyx_4 = PyObject_CallObject(((PyObject *)(&PyList_Type)), __pyx_2); if (!__pyx_4) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 373; goto __pyx_L1;}
+ Py_DECREF(__pyx_2); __pyx_2 = 0;
+ Py_DECREF(__pyx_v_parent_keys);
+ __pyx_v_parent_keys = __pyx_4;
+ __pyx_4 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_known_graph_pyx.pyx":374 */
+ if (PyObject_Cmp(__pyx_v_existing_parent_keys, __pyx_v_parent_keys, &__pyx_1) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 374; goto __pyx_L1;}
+ __pyx_1 = __pyx_1 == 0;
+ if (__pyx_1) {
+ __pyx_r = Py_None; Py_INCREF(Py_None);
+ goto __pyx_L0;
+ goto __pyx_L6;
+ }
+ /*else*/ {
+ __pyx_5 = PyTuple_New(3); if (!__pyx_5) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 380; goto __pyx_L1;}
+ Py_INCREF(__pyx_v_key);
+ PyTuple_SET_ITEM(__pyx_5, 0, __pyx_v_key);
+ Py_INCREF(__pyx_v_existing_parent_keys);
+ PyTuple_SET_ITEM(__pyx_5, 1, __pyx_v_existing_parent_keys);
+ Py_INCREF(__pyx_v_parent_keys);
+ PyTuple_SET_ITEM(__pyx_5, 2, __pyx_v_parent_keys);
+ __pyx_3 = PyNumber_Remainder(__pyx_k15p, __pyx_5); if (!__pyx_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 380; goto __pyx_L1;}
+ Py_DECREF(__pyx_5); __pyx_5 = 0;
+ __pyx_2 = PyTuple_New(1); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 378; goto __pyx_L1;}
+ PyTuple_SET_ITEM(__pyx_2, 0, __pyx_3);
+ __pyx_3 = 0;
+ __pyx_4 = PyObject_CallObject(PyExc_ValueError, __pyx_2); if (!__pyx_4) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 378; goto __pyx_L1;}
+ Py_DECREF(__pyx_2); __pyx_2 = 0;
+ __Pyx_Raise(__pyx_4, 0, 0);
+ Py_DECREF(__pyx_4); __pyx_4 = 0;
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 378; goto __pyx_L1;}
+ }
+ __pyx_L6:;
+ }
+ __pyx_L3:;
+ goto __pyx_L2;
+ }
+ /*else*/ {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_known_graph_pyx.pyx":382 */
+ __pyx_5 = PyTuple_New(1); if (!__pyx_5) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 382; goto __pyx_L1;}
+ Py_INCREF(__pyx_v_key);
+ PyTuple_SET_ITEM(__pyx_5, 0, __pyx_v_key);
+ __pyx_3 = PyObject_CallObject(((PyObject *)__pyx_ptype_6bzrlib_16_known_graph_pyx__KnownGraphNode), __pyx_5); if (!__pyx_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 382; goto __pyx_L1;}
+ Py_DECREF(__pyx_5); __pyx_5 = 0;
+ Py_DECREF(((PyObject *)__pyx_v_node));
+ __pyx_v_node = ((struct __pyx_obj_6bzrlib_16_known_graph_pyx__KnownGraphNode *)__pyx_3);
+ __pyx_3 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_known_graph_pyx.pyx":383 */
+ __pyx_1 = PyDict_SetItem(((struct __pyx_obj_6bzrlib_16_known_graph_pyx_KnownGraph *)__pyx_v_self)->_nodes,__pyx_v_key,((PyObject *)__pyx_v_node)); if (__pyx_1 == (-1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 383; goto __pyx_L1;}
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_known_graph_pyx.pyx":384 */
+ __pyx_2 = ((struct __pyx_vtabstruct_6bzrlib_16_known_graph_pyx_KnownGraph *)((struct __pyx_obj_6bzrlib_16_known_graph_pyx_KnownGraph *)__pyx_v_self)->__pyx_vtab)->_populate_parents(((struct __pyx_obj_6bzrlib_16_known_graph_pyx_KnownGraph *)__pyx_v_self),__pyx_v_node,__pyx_v_parent_keys); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 384; goto __pyx_L1;}
+ Py_DECREF(__pyx_2); __pyx_2 = 0;
+ }
+ __pyx_L2:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_known_graph_pyx.pyx":385 */
+ __pyx_v_parent_gdfo = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_known_graph_pyx.pyx":386 */
+ __pyx_4 = PyObject_GetIter(__pyx_v_node->parents); if (!__pyx_4) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 386; goto __pyx_L1;}
+ for (;;) {
+ __pyx_5 = PyIter_Next(__pyx_4);
+ if (!__pyx_5) {
+ if (PyErr_Occurred()) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 386; goto __pyx_L1;}
+ break;
+ }
+ if (!__Pyx_TypeTest(__pyx_5, __pyx_ptype_6bzrlib_16_known_graph_pyx__KnownGraphNode)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 386; goto __pyx_L1;}
+ Py_DECREF(((PyObject *)__pyx_v_parent_node));
+ __pyx_v_parent_node = ((struct __pyx_obj_6bzrlib_16_known_graph_pyx__KnownGraphNode *)__pyx_5);
+ __pyx_5 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_known_graph_pyx.pyx":387 */
+ __pyx_1 = (__pyx_v_parent_node->gdfo == (-1));
+ if (__pyx_1) {
+ __pyx_v_parent_node->gdfo = 1;
+ goto __pyx_L9;
+ }
+ __pyx_L9:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_known_graph_pyx.pyx":390 */
+ __pyx_1 = (__pyx_v_parent_gdfo < __pyx_v_parent_node->gdfo);
+ if (__pyx_1) {
+ __pyx_v_parent_gdfo = __pyx_v_parent_node->gdfo;
+ goto __pyx_L10;
+ }
+ __pyx_L10:;
+ }
+ Py_DECREF(__pyx_4); __pyx_4 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_known_graph_pyx.pyx":392 */
+ __pyx_v_node->gdfo = (__pyx_v_parent_gdfo + 1);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_known_graph_pyx.pyx":401 */
+ __pyx_3 = __Pyx_GetName(__pyx_m, __pyx_n_deque); if (!__pyx_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 401; goto __pyx_L1;}
+ __pyx_2 = PyList_New(1); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 401; goto __pyx_L1;}
+ Py_INCREF(((PyObject *)__pyx_v_node));
+ PyList_SET_ITEM(__pyx_2, 0, ((PyObject *)__pyx_v_node));
+ __pyx_5 = PyTuple_New(1); if (!__pyx_5) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 401; goto __pyx_L1;}
+ PyTuple_SET_ITEM(__pyx_5, 0, __pyx_2);
+ __pyx_2 = 0;
+ __pyx_4 = PyObject_CallObject(__pyx_3, __pyx_5); if (!__pyx_4) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 401; goto __pyx_L1;}
+ Py_DECREF(__pyx_3); __pyx_3 = 0;
+ Py_DECREF(__pyx_5); __pyx_5 = 0;
+ Py_DECREF(__pyx_v_pending);
+ __pyx_v_pending = __pyx_4;
+ __pyx_4 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_known_graph_pyx.pyx":402 */
+ __pyx_2 = PyObject_GetAttr(__pyx_v_pending, __pyx_n_popleft); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 402; goto __pyx_L1;}
+ Py_DECREF(__pyx_v_pending_popleft);
+ __pyx_v_pending_popleft = __pyx_2;
+ __pyx_2 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_known_graph_pyx.pyx":403 */
+ __pyx_3 = PyObject_GetAttr(__pyx_v_pending, __pyx_n_append); if (!__pyx_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 403; goto __pyx_L1;}
+ Py_DECREF(__pyx_v_pending_append);
+ __pyx_v_pending_append = __pyx_3;
+ __pyx_3 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_known_graph_pyx.pyx":404 */
+ while (1) {
+ __pyx_1 = PyObject_IsTrue(__pyx_v_pending); if (__pyx_1 < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 404; goto __pyx_L1;}
+ if (!__pyx_1) break;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_known_graph_pyx.pyx":405 */
+ __pyx_5 = PyObject_CallObject(__pyx_v_pending_popleft, 0); if (!__pyx_5) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 405; goto __pyx_L1;}
+ if (!__Pyx_TypeTest(__pyx_5, __pyx_ptype_6bzrlib_16_known_graph_pyx__KnownGraphNode)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 405; goto __pyx_L1;}
+ Py_DECREF(((PyObject *)__pyx_v_node));
+ __pyx_v_node = ((struct __pyx_obj_6bzrlib_16_known_graph_pyx__KnownGraphNode *)__pyx_5);
+ __pyx_5 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_known_graph_pyx.pyx":406 */
+ __pyx_v_next_gdfo = (__pyx_v_node->gdfo + 1);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_known_graph_pyx.pyx":407 */
+ __pyx_4 = PyObject_GetIter(__pyx_v_node->children); if (!__pyx_4) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 407; goto __pyx_L1;}
+ for (;;) {
+ __pyx_2 = PyIter_Next(__pyx_4);
+ if (!__pyx_2) {
+ if (PyErr_Occurred()) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 407; goto __pyx_L1;}
+ break;
+ }
+ if (!__Pyx_TypeTest(__pyx_2, __pyx_ptype_6bzrlib_16_known_graph_pyx__KnownGraphNode)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 407; goto __pyx_L1;}
+ Py_DECREF(((PyObject *)__pyx_v_child_node));
+ __pyx_v_child_node = ((struct __pyx_obj_6bzrlib_16_known_graph_pyx__KnownGraphNode *)__pyx_2);
+ __pyx_2 = 0;
+ __pyx_1 = (__pyx_v_child_node->gdfo < __pyx_v_next_gdfo);
+ if (__pyx_1) {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_known_graph_pyx.pyx":411 */
+ __pyx_v_child_node->gdfo = __pyx_v_next_gdfo;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_known_graph_pyx.pyx":412 */
+ __pyx_3 = PyTuple_New(1); if (!__pyx_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 412; goto __pyx_L1;}
+ Py_INCREF(((PyObject *)__pyx_v_child_node));
+ PyTuple_SET_ITEM(__pyx_3, 0, ((PyObject *)__pyx_v_child_node));
+ __pyx_5 = PyObject_CallObject(__pyx_v_pending_append, __pyx_3); if (!__pyx_5) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 412; goto __pyx_L1;}
+ Py_DECREF(__pyx_3); __pyx_3 = 0;
+ Py_DECREF(__pyx_5); __pyx_5 = 0;
+ goto __pyx_L15;
+ }
+ __pyx_L15:;
+ }
+ Py_DECREF(__pyx_4); __pyx_4 = 0;
+ }
+
+ __pyx_r = Py_None; Py_INCREF(Py_None);
+ goto __pyx_L0;
+ __pyx_L1:;
+ Py_XDECREF(__pyx_2);
+ Py_XDECREF(__pyx_3);
+ Py_XDECREF(__pyx_4);
+ Py_XDECREF(__pyx_5);
+ __Pyx_AddTraceback("bzrlib._known_graph_pyx.KnownGraph.add_node");
+ __pyx_r = 0;
+ __pyx_L0:;
+ Py_DECREF(__pyx_v_node);
+ Py_DECREF(__pyx_v_parent_node);
+ Py_DECREF(__pyx_v_child_node);
+ Py_DECREF(__pyx_v_existing_parent_keys);
+ Py_DECREF(__pyx_v_pending);
+ Py_DECREF(__pyx_v_pending_popleft);
+ Py_DECREF(__pyx_v_pending_append);
+ Py_DECREF(__pyx_v_self);
+ Py_DECREF(__pyx_v_key);
+ Py_DECREF(__pyx_v_parent_keys);
+ return __pyx_r;
+}
+
+static PyObject *__pyx_f_6bzrlib_16_known_graph_pyx_10KnownGraph_heads(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
+static char __pyx_doc_6bzrlib_16_known_graph_pyx_10KnownGraph_heads[] = "Return the heads from amongst keys.\n\n This is done by searching the ancestries of each key. Any key that is\n reachable from another key is not returned; all the others are.\n\n This operation scales with the relative depth between any two keys. It\n uses gdfo to avoid walking all ancestry.\n\n :param keys: An iterable of keys.\n :return: A set of the heads. Note that as a set there is no ordering\n information. Callers will need to filter their input to create\n order if they need it.\n ";
+static PyObject *__pyx_f_6bzrlib_16_known_graph_pyx_10KnownGraph_heads(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
+ PyObject *__pyx_v_keys = 0;
+ PyObject *__pyx_v_maybe_node;
+ PyObject *__pyx_v_maybe_heads;
+ PyObject *__pyx_v_temp_node;
+ struct __pyx_obj_6bzrlib_16_known_graph_pyx__KnownGraphNode *__pyx_v_node;
+ Py_ssize_t __pyx_v_pos;
+ Py_ssize_t __pyx_v_last_item;
+ long __pyx_v_min_gdfo;
+ PyObject *__pyx_v_heads_key;
+ PyObject *__pyx_v_candidate_nodes;
+ PyObject *__pyx_v_key;
+ PyObject *__pyx_v_cleanup;
+ PyObject *__pyx_v_pending;
+ PyObject *__pyx_v_parent_node;
+ PyObject *__pyx_v_heads;
+ PyObject *__pyx_r;
+ PyObject *__pyx_1 = 0;
+ PyObject *__pyx_2 = 0;
+ PyObject *__pyx_3 = 0;
+ int __pyx_4;
+ int __pyx_5;
+ Py_ssize_t __pyx_6;
+ static char *__pyx_argnames[] = {"keys",0};
+ if (!PyArg_ParseTupleAndKeywords(__pyx_args, __pyx_kwds, "O", __pyx_argnames, &__pyx_v_keys)) return 0;
+ Py_INCREF(__pyx_v_self);
+ Py_INCREF(__pyx_v_keys);
+ __pyx_v_node = ((struct __pyx_obj_6bzrlib_16_known_graph_pyx__KnownGraphNode *)Py_None); Py_INCREF(Py_None);
+ __pyx_v_heads_key = Py_None; Py_INCREF(Py_None);
+ __pyx_v_candidate_nodes = Py_None; Py_INCREF(Py_None);
+ __pyx_v_key = Py_None; Py_INCREF(Py_None);
+ __pyx_v_cleanup = Py_None; Py_INCREF(Py_None);
+ __pyx_v_pending = Py_None; Py_INCREF(Py_None);
+ __pyx_v_parent_node = Py_None; Py_INCREF(Py_None);
+ __pyx_v_heads = Py_None; Py_INCREF(Py_None);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_known_graph_pyx.pyx":435 */
+ __pyx_1 = __Pyx_GetName(__pyx_b, __pyx_n_frozenset); if (!__pyx_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 435; goto __pyx_L1;}
+ __pyx_2 = PyTuple_New(1); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 435; goto __pyx_L1;}
+ Py_INCREF(__pyx_v_keys);
+ PyTuple_SET_ITEM(__pyx_2, 0, __pyx_v_keys);
+ __pyx_3 = PyObject_CallObject(__pyx_1, __pyx_2); if (!__pyx_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 435; goto __pyx_L1;}
+ Py_DECREF(__pyx_1); __pyx_1 = 0;
+ Py_DECREF(__pyx_2); __pyx_2 = 0;
+ Py_DECREF(__pyx_v_heads_key);
+ __pyx_v_heads_key = __pyx_3;
+ __pyx_3 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_known_graph_pyx.pyx":436 */
+ __pyx_v_maybe_heads = PyDict_GetItem(((struct __pyx_obj_6bzrlib_16_known_graph_pyx_KnownGraph *)__pyx_v_self)->_known_heads,__pyx_v_heads_key);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_known_graph_pyx.pyx":437 */
+ __pyx_4 = (__pyx_v_maybe_heads != NULL);
+ if (__pyx_4) {
+ Py_INCREF(((PyObject *)__pyx_v_maybe_heads));
+ __pyx_r = ((PyObject *)__pyx_v_maybe_heads);
+ goto __pyx_L0;
+ goto __pyx_L2;
+ }
+ __pyx_L2:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_known_graph_pyx.pyx":440 */
+ __pyx_1 = PyDict_New(); if (!__pyx_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 440; goto __pyx_L1;}
+ Py_DECREF(__pyx_v_candidate_nodes);
+ __pyx_v_candidate_nodes = __pyx_1;
+ __pyx_1 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_known_graph_pyx.pyx":441 */
+ __pyx_2 = PyObject_GetIter(__pyx_v_keys); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 441; goto __pyx_L1;}
+ for (;;) {
+ __pyx_3 = PyIter_Next(__pyx_2);
+ if (!__pyx_3) {
+ if (PyErr_Occurred()) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 441; goto __pyx_L1;}
+ break;
+ }
+ Py_DECREF(__pyx_v_key);
+ __pyx_v_key = __pyx_3;
+ __pyx_3 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_known_graph_pyx.pyx":442 */
+ __pyx_v_maybe_node = PyDict_GetItem(((struct __pyx_obj_6bzrlib_16_known_graph_pyx_KnownGraph *)__pyx_v_self)->_nodes,__pyx_v_key);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_known_graph_pyx.pyx":443 */
+ __pyx_4 = (__pyx_v_maybe_node == NULL);
+ if (__pyx_4) {
+ __pyx_1 = PyTuple_New(1); if (!__pyx_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 444; goto __pyx_L1;}
+ Py_INCREF(__pyx_v_key);
+ PyTuple_SET_ITEM(__pyx_1, 0, __pyx_v_key);
+ __pyx_3 = PyNumber_Remainder(__pyx_k19p, __pyx_1); if (!__pyx_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 444; goto __pyx_L1;}
+ Py_DECREF(__pyx_1); __pyx_1 = 0;
+ __pyx_1 = PyTuple_New(1); if (!__pyx_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 444; goto __pyx_L1;}
+ PyTuple_SET_ITEM(__pyx_1, 0, __pyx_3);
+ __pyx_3 = 0;
+ __pyx_3 = PyObject_CallObject(PyExc_KeyError, __pyx_1); if (!__pyx_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 444; goto __pyx_L1;}
+ Py_DECREF(__pyx_1); __pyx_1 = 0;
+ __Pyx_Raise(__pyx_3, 0, 0);
+ Py_DECREF(__pyx_3); __pyx_3 = 0;
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 444; goto __pyx_L1;}
+ goto __pyx_L5;
+ }
+ __pyx_L5:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_known_graph_pyx.pyx":445 */
+ __pyx_4 = PyDict_SetItem(__pyx_v_candidate_nodes,__pyx_v_key,((PyObject *)__pyx_v_maybe_node)); if (__pyx_4 == (-1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 445; goto __pyx_L1;}
+ }
+ Py_DECREF(__pyx_2); __pyx_2 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_known_graph_pyx.pyx":446 */
+ __pyx_v_maybe_node = PyDict_GetItem(__pyx_v_candidate_nodes,__pyx_v_6bzrlib_16_known_graph_pyx_NULL_REVISION);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_known_graph_pyx.pyx":447 */
+ __pyx_4 = (__pyx_v_maybe_node != NULL);
+ if (__pyx_4) {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_known_graph_pyx.pyx":449 */
+ __pyx_1 = PyObject_GetAttr(__pyx_v_candidate_nodes, __pyx_n_pop); if (!__pyx_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 449; goto __pyx_L1;}
+ __pyx_3 = PyTuple_New(1); if (!__pyx_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 449; goto __pyx_L1;}
+ Py_INCREF(__pyx_v_6bzrlib_16_known_graph_pyx_NULL_REVISION);
+ PyTuple_SET_ITEM(__pyx_3, 0, __pyx_v_6bzrlib_16_known_graph_pyx_NULL_REVISION);
+ __pyx_2 = PyObject_CallObject(__pyx_1, __pyx_3); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 449; goto __pyx_L1;}
+ Py_DECREF(__pyx_1); __pyx_1 = 0;
+ Py_DECREF(__pyx_3); __pyx_3 = 0;
+ Py_DECREF(__pyx_2); __pyx_2 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_known_graph_pyx.pyx":450 */
+ __pyx_4 = PyObject_IsTrue(__pyx_v_candidate_nodes); if (__pyx_4 < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 450; goto __pyx_L1;}
+ __pyx_5 = (!__pyx_4);
+ if (__pyx_5) {
+ __pyx_1 = __Pyx_GetName(__pyx_b, __pyx_n_frozenset); if (!__pyx_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 451; goto __pyx_L1;}
+ __pyx_3 = PyList_New(1); if (!__pyx_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 451; goto __pyx_L1;}
+ Py_INCREF(__pyx_v_6bzrlib_16_known_graph_pyx_NULL_REVISION);
+ PyList_SET_ITEM(__pyx_3, 0, __pyx_v_6bzrlib_16_known_graph_pyx_NULL_REVISION);
+ __pyx_2 = PyTuple_New(1); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 451; goto __pyx_L1;}
+ PyTuple_SET_ITEM(__pyx_2, 0, __pyx_3);
+ __pyx_3 = 0;
+ __pyx_3 = PyObject_CallObject(__pyx_1, __pyx_2); if (!__pyx_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 451; goto __pyx_L1;}
+ Py_DECREF(__pyx_1); __pyx_1 = 0;
+ Py_DECREF(__pyx_2); __pyx_2 = 0;
+ __pyx_r = __pyx_3;
+ __pyx_3 = 0;
+ goto __pyx_L0;
+ goto __pyx_L7;
+ }
+ __pyx_L7:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_known_graph_pyx.pyx":453 */
+ __pyx_1 = __Pyx_GetName(__pyx_b, __pyx_n_frozenset); if (!__pyx_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 453; goto __pyx_L1;}
+ __pyx_2 = PyTuple_New(1); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 453; goto __pyx_L1;}
+ Py_INCREF(__pyx_v_candidate_nodes);
+ PyTuple_SET_ITEM(__pyx_2, 0, __pyx_v_candidate_nodes);
+ __pyx_3 = PyObject_CallObject(__pyx_1, __pyx_2); if (!__pyx_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 453; goto __pyx_L1;}
+ Py_DECREF(__pyx_1); __pyx_1 = 0;
+ Py_DECREF(__pyx_2); __pyx_2 = 0;
+ Py_DECREF(__pyx_v_heads_key);
+ __pyx_v_heads_key = __pyx_3;
+ __pyx_3 = 0;
+ goto __pyx_L6;
+ }
+ __pyx_L6:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_known_graph_pyx.pyx":454 */
+ __pyx_6 = PyDict_Size(__pyx_v_candidate_nodes); if (__pyx_6 == (-1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 454; goto __pyx_L1;}
+ __pyx_4 = (__pyx_6 < 2);
+ if (__pyx_4) {
+ Py_INCREF(__pyx_v_heads_key);
+ __pyx_r = __pyx_v_heads_key;
+ goto __pyx_L0;
+ goto __pyx_L8;
+ }
+ __pyx_L8:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_known_graph_pyx.pyx":457 */
+ __pyx_1 = PyList_New(0); if (!__pyx_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 457; goto __pyx_L1;}
+ Py_DECREF(__pyx_v_cleanup);
+ __pyx_v_cleanup = __pyx_1;
+ __pyx_1 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_known_graph_pyx.pyx":458 */
+ __pyx_2 = PyList_New(0); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 458; goto __pyx_L1;}
+ Py_DECREF(__pyx_v_pending);
+ __pyx_v_pending = __pyx_2;
+ __pyx_2 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_known_graph_pyx.pyx":460 */
+ __pyx_6 = PyDict_Size(((struct __pyx_obj_6bzrlib_16_known_graph_pyx_KnownGraph *)__pyx_v_self)->_nodes); if (__pyx_6 == (-1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 460; goto __pyx_L1;}
+ __pyx_v_min_gdfo = (__pyx_6 + 1);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_known_graph_pyx.pyx":463 */
+ __pyx_v_pos = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_known_graph_pyx.pyx":464 */
+ while (1) {
+ __pyx_5 = PyDict_Next(__pyx_v_candidate_nodes,(&__pyx_v_pos),NULL,(&__pyx_v_temp_node));
+ if (!__pyx_5) break;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_known_graph_pyx.pyx":465 */
+ Py_INCREF(((PyObject *)__pyx_v_temp_node));
+ Py_DECREF(((PyObject *)__pyx_v_node));
+ __pyx_v_node = ((struct __pyx_obj_6bzrlib_16_known_graph_pyx__KnownGraphNode *)__pyx_v_temp_node);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_known_graph_pyx.pyx":466 */
+ __pyx_4 = __pyx_v_node->parents != Py_None;
+ if (__pyx_4) {
+ __pyx_3 = PyObject_GetAttr(__pyx_v_pending, __pyx_n_extend); if (!__pyx_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 467; goto __pyx_L1;}
+ __pyx_1 = PyTuple_New(1); if (!__pyx_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 467; goto __pyx_L1;}
+ Py_INCREF(__pyx_v_node->parents);
+ PyTuple_SET_ITEM(__pyx_1, 0, __pyx_v_node->parents);
+ __pyx_2 = PyObject_CallObject(__pyx_3, __pyx_1); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 467; goto __pyx_L1;}
+ Py_DECREF(__pyx_3); __pyx_3 = 0;
+ Py_DECREF(__pyx_1); __pyx_1 = 0;
+ Py_DECREF(__pyx_2); __pyx_2 = 0;
+ goto __pyx_L11;
+ }
+ __pyx_L11:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_known_graph_pyx.pyx":468 */
+ __pyx_5 = (__pyx_v_node->gdfo < __pyx_v_min_gdfo);
+ if (__pyx_5) {
+ __pyx_v_min_gdfo = __pyx_v_node->gdfo;
+ goto __pyx_L12;
+ }
+ __pyx_L12:;
+ }
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_known_graph_pyx.pyx":472 */
+ __pyx_v_last_item = (PyList_GET_SIZE(__pyx_v_pending) - 1);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_known_graph_pyx.pyx":473 */
+ while (1) {
+ __pyx_4 = (__pyx_v_last_item >= 0);
+ if (!__pyx_4) break;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_known_graph_pyx.pyx":474 */
+ __pyx_3 = ((PyObject *)__pyx_f_6bzrlib_16_known_graph_pyx__get_list_node(__pyx_v_pending,__pyx_v_last_item)); if (!__pyx_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 474; goto __pyx_L1;}
+ Py_DECREF(((PyObject *)__pyx_v_node));
+ __pyx_v_node = ((struct __pyx_obj_6bzrlib_16_known_graph_pyx__KnownGraphNode *)__pyx_3);
+ __pyx_3 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_known_graph_pyx.pyx":475 */
+ __pyx_v_last_item = (__pyx_v_last_item - 1);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_known_graph_pyx.pyx":476 */
+ __pyx_5 = __pyx_v_node->seen;
+ if (__pyx_5) {
+ goto __pyx_L13;
+ goto __pyx_L15;
+ }
+ __pyx_L15:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_known_graph_pyx.pyx":479 */
+ __pyx_4 = PyList_Append(__pyx_v_cleanup,((PyObject *)__pyx_v_node)); if (__pyx_4 == (-1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 479; goto __pyx_L1;}
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_known_graph_pyx.pyx":480 */
+ __pyx_v_node->seen = 1;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_known_graph_pyx.pyx":481 */
+ __pyx_5 = (__pyx_v_node->gdfo <= __pyx_v_min_gdfo);
+ if (__pyx_5) {
+ goto __pyx_L13;
+ goto __pyx_L16;
+ }
+ __pyx_L16:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_known_graph_pyx.pyx":483 */
+ __pyx_4 = __pyx_v_node->parents != Py_None;
+ if (__pyx_4) {
+ __pyx_4 = (PyTuple_GET_SIZE(__pyx_v_node->parents) > 0);
+ }
+ if (__pyx_4) {
+ __pyx_6 = PyTuple_GET_SIZE(__pyx_v_node->parents);
+ for (__pyx_v_pos = 0; __pyx_v_pos < __pyx_6; ++__pyx_v_pos) {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_known_graph_pyx.pyx":485 */
+ __pyx_1 = ((PyObject *)__pyx_f_6bzrlib_16_known_graph_pyx__get_tuple_node(__pyx_v_node->parents,__pyx_v_pos)); if (!__pyx_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 485; goto __pyx_L1;}
+ Py_DECREF(__pyx_v_parent_node);
+ __pyx_v_parent_node = __pyx_1;
+ __pyx_1 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_known_graph_pyx.pyx":486 */
+ __pyx_v_last_item = (__pyx_v_last_item + 1);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_known_graph_pyx.pyx":487 */
+ __pyx_5 = (__pyx_v_last_item < PyList_GET_SIZE(__pyx_v_pending));
+ if (__pyx_5) {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_known_graph_pyx.pyx":488 */
+ Py_INCREF(__pyx_v_parent_node);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_known_graph_pyx.pyx":489 */
+ __pyx_4 = PyList_SetItem(__pyx_v_pending,__pyx_v_last_item,__pyx_v_parent_node); if (__pyx_4 == (-1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 489; goto __pyx_L1;}
+ goto __pyx_L20;
+ }
+ /*else*/ {
+ __pyx_5 = PyList_Append(__pyx_v_pending,__pyx_v_parent_node); if (__pyx_5 == (-1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 491; goto __pyx_L1;}
+ }
+ __pyx_L20:;
+ }
+ goto __pyx_L17;
+ }
+ __pyx_L17:;
+ __pyx_L13:;
+ }
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_known_graph_pyx.pyx":492 */
+ __pyx_2 = PyList_New(0); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 492; goto __pyx_L1;}
+ Py_DECREF(__pyx_v_heads);
+ __pyx_v_heads = __pyx_2;
+ __pyx_2 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_known_graph_pyx.pyx":493 */
+ __pyx_v_pos = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_known_graph_pyx.pyx":494 */
+ while (1) {
+ __pyx_4 = PyDict_Next(__pyx_v_candidate_nodes,(&__pyx_v_pos),NULL,(&__pyx_v_temp_node));
+ if (!__pyx_4) break;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_known_graph_pyx.pyx":495 */
+ Py_INCREF(((PyObject *)__pyx_v_temp_node));
+ Py_DECREF(((PyObject *)__pyx_v_node));
+ __pyx_v_node = ((struct __pyx_obj_6bzrlib_16_known_graph_pyx__KnownGraphNode *)__pyx_v_temp_node);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_known_graph_pyx.pyx":496 */
+ __pyx_5 = (!__pyx_v_node->seen);
+ if (__pyx_5) {
+ __pyx_4 = PyList_Append(__pyx_v_heads,__pyx_v_node->key); if (__pyx_4 == (-1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 497; goto __pyx_L1;}
+ goto __pyx_L23;
+ }
+ __pyx_L23:;
+ }
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_known_graph_pyx.pyx":498 */
+ __pyx_3 = __Pyx_GetName(__pyx_b, __pyx_n_frozenset); if (!__pyx_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 498; goto __pyx_L1;}
+ __pyx_1 = PyTuple_New(1); if (!__pyx_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 498; goto __pyx_L1;}
+ Py_INCREF(__pyx_v_heads);
+ PyTuple_SET_ITEM(__pyx_1, 0, __pyx_v_heads);
+ __pyx_2 = PyObject_CallObject(__pyx_3, __pyx_1); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 498; goto __pyx_L1;}
+ Py_DECREF(__pyx_3); __pyx_3 = 0;
+ Py_DECREF(__pyx_1); __pyx_1 = 0;
+ Py_DECREF(__pyx_v_heads);
+ __pyx_v_heads = __pyx_2;
+ __pyx_2 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_known_graph_pyx.pyx":499 */
+ __pyx_6 = PyList_GET_SIZE(__pyx_v_cleanup);
+ for (__pyx_v_pos = 0; __pyx_v_pos < __pyx_6; ++__pyx_v_pos) {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_known_graph_pyx.pyx":500 */
+ __pyx_3 = ((PyObject *)__pyx_f_6bzrlib_16_known_graph_pyx__get_list_node(__pyx_v_cleanup,__pyx_v_pos)); if (!__pyx_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 500; goto __pyx_L1;}
+ Py_DECREF(((PyObject *)__pyx_v_node));
+ __pyx_v_node = ((struct __pyx_obj_6bzrlib_16_known_graph_pyx__KnownGraphNode *)__pyx_3);
+ __pyx_3 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_known_graph_pyx.pyx":501 */
+ __pyx_v_node->seen = 0;
+ }
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_known_graph_pyx.pyx":502 */
+ __pyx_5 = ((struct __pyx_obj_6bzrlib_16_known_graph_pyx_KnownGraph *)__pyx_v_self)->do_cache;
+ if (__pyx_5) {
+ __pyx_4 = PyDict_SetItem(((struct __pyx_obj_6bzrlib_16_known_graph_pyx_KnownGraph *)__pyx_v_self)->_known_heads,__pyx_v_heads_key,__pyx_v_heads); if (__pyx_4 == (-1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 503; goto __pyx_L1;}
+ goto __pyx_L26;
+ }
+ __pyx_L26:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_known_graph_pyx.pyx":504 */
+ Py_INCREF(__pyx_v_heads);
+ __pyx_r = __pyx_v_heads;
+ goto __pyx_L0;
+
+ __pyx_r = Py_None; Py_INCREF(Py_None);
+ goto __pyx_L0;
+ __pyx_L1:;
+ Py_XDECREF(__pyx_1);
+ Py_XDECREF(__pyx_2);
+ Py_XDECREF(__pyx_3);
+ __Pyx_AddTraceback("bzrlib._known_graph_pyx.KnownGraph.heads");
+ __pyx_r = 0;
+ __pyx_L0:;
+ Py_DECREF(__pyx_v_node);
+ Py_DECREF(__pyx_v_heads_key);
+ Py_DECREF(__pyx_v_candidate_nodes);
+ Py_DECREF(__pyx_v_key);
+ Py_DECREF(__pyx_v_cleanup);
+ Py_DECREF(__pyx_v_pending);
+ Py_DECREF(__pyx_v_parent_node);
+ Py_DECREF(__pyx_v_heads);
+ Py_DECREF(__pyx_v_self);
+ Py_DECREF(__pyx_v_keys);
+ return __pyx_r;
+}
+
+static PyObject *__pyx_f_6bzrlib_16_known_graph_pyx_10KnownGraph_topo_sort(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
+static char __pyx_doc_6bzrlib_16_known_graph_pyx_10KnownGraph_topo_sort[] = "Return the nodes in topological order.\n\n All parents must occur before all children.\n ";
+static PyObject *__pyx_f_6bzrlib_16_known_graph_pyx_10KnownGraph_topo_sort(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
+ struct __pyx_obj_6bzrlib_16_known_graph_pyx__KnownGraphNode *__pyx_v_node;
+ struct __pyx_obj_6bzrlib_16_known_graph_pyx__KnownGraphNode *__pyx_v_child;
+ Py_ssize_t __pyx_v_pos;
+ Py_ssize_t __pyx_v_last_item;
+ PyObject *__pyx_v_pending;
+ PyObject *__pyx_v_topo_order;
+ PyObject *__pyx_r;
+ PyObject *__pyx_1 = 0;
+ PyObject *__pyx_2 = 0;
+ int __pyx_3;
+ Py_ssize_t __pyx_4;
+ PyObject *__pyx_5 = 0;
+ static char *__pyx_argnames[] = {0};
+ if (!PyArg_ParseTupleAndKeywords(__pyx_args, __pyx_kwds, "", __pyx_argnames)) return 0;
+ Py_INCREF(__pyx_v_self);
+ __pyx_v_node = ((struct __pyx_obj_6bzrlib_16_known_graph_pyx__KnownGraphNode *)Py_None); Py_INCREF(Py_None);
+ __pyx_v_child = ((struct __pyx_obj_6bzrlib_16_known_graph_pyx__KnownGraphNode *)Py_None); Py_INCREF(Py_None);
+ __pyx_v_pending = Py_None; Py_INCREF(Py_None);
+ __pyx_v_topo_order = Py_None; Py_INCREF(Py_None);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_known_graph_pyx.pyx":523 */
+ __pyx_1 = PyObject_GetAttr(__pyx_v_self, __pyx_n__find_tails); if (!__pyx_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 523; goto __pyx_L1;}
+ __pyx_2 = PyObject_CallObject(__pyx_1, 0); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 523; goto __pyx_L1;}
+ Py_DECREF(__pyx_1); __pyx_1 = 0;
+ Py_DECREF(__pyx_v_pending);
+ __pyx_v_pending = __pyx_2;
+ __pyx_2 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_known_graph_pyx.pyx":524 */
+ __pyx_3 = (PyList_GET_SIZE(__pyx_v_pending) == 0);
+ if (__pyx_3) {
+ __pyx_4 = PyObject_Length(((struct __pyx_obj_6bzrlib_16_known_graph_pyx_KnownGraph *)__pyx_v_self)->_nodes); if (__pyx_4 == -1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 524; goto __pyx_L1;}
+ __pyx_3 = (__pyx_4 > 0);
+ }
+ if (__pyx_3) {
+ __pyx_1 = __Pyx_GetName(__pyx_m, __pyx_n_errors); if (!__pyx_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 525; goto __pyx_L1;}
+ __pyx_2 = PyObject_GetAttr(__pyx_1, __pyx_n_GraphCycleError); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 525; goto __pyx_L1;}
+ Py_DECREF(__pyx_1); __pyx_1 = 0;
+ __pyx_1 = PyTuple_New(1); if (!__pyx_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 525; goto __pyx_L1;}
+ Py_INCREF(((struct __pyx_obj_6bzrlib_16_known_graph_pyx_KnownGraph *)__pyx_v_self)->_nodes);
+ PyTuple_SET_ITEM(__pyx_1, 0, ((struct __pyx_obj_6bzrlib_16_known_graph_pyx_KnownGraph *)__pyx_v_self)->_nodes);
+ __pyx_5 = PyObject_CallObject(__pyx_2, __pyx_1); if (!__pyx_5) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 525; goto __pyx_L1;}
+ Py_DECREF(__pyx_2); __pyx_2 = 0;
+ Py_DECREF(__pyx_1); __pyx_1 = 0;
+ __Pyx_Raise(__pyx_5, 0, 0);
+ Py_DECREF(__pyx_5); __pyx_5 = 0;
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 525; goto __pyx_L1;}
+ goto __pyx_L2;
+ }
+ __pyx_L2:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_known_graph_pyx.pyx":527 */
+ __pyx_2 = PyList_New(0); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 527; goto __pyx_L1;}
+ Py_DECREF(__pyx_v_topo_order);
+ __pyx_v_topo_order = __pyx_2;
+ __pyx_2 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_known_graph_pyx.pyx":529 */
+ __pyx_v_last_item = (PyList_GET_SIZE(__pyx_v_pending) - 1);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_known_graph_pyx.pyx":530 */
+ while (1) {
+ __pyx_3 = (__pyx_v_last_item >= 0);
+ if (!__pyx_3) break;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_known_graph_pyx.pyx":533 */
+ __pyx_1 = ((PyObject *)__pyx_f_6bzrlib_16_known_graph_pyx__get_list_node(__pyx_v_pending,__pyx_v_last_item)); if (!__pyx_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 533; goto __pyx_L1;}
+ Py_DECREF(((PyObject *)__pyx_v_node));
+ __pyx_v_node = ((struct __pyx_obj_6bzrlib_16_known_graph_pyx__KnownGraphNode *)__pyx_1);
+ __pyx_1 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_known_graph_pyx.pyx":534 */
+ __pyx_v_last_item = (__pyx_v_last_item - 1);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_known_graph_pyx.pyx":535 */
+ __pyx_3 = __pyx_v_node->parents != Py_None;
+ if (__pyx_3) {
+ __pyx_3 = PyList_Append(__pyx_v_topo_order,__pyx_v_node->key); if (__pyx_3 == (-1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 537; goto __pyx_L1;}
+ goto __pyx_L5;
+ }
+ __pyx_L5:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_known_graph_pyx.pyx":538 */
+ __pyx_4 = PyList_GET_SIZE(__pyx_v_node->children);
+ for (__pyx_v_pos = 0; __pyx_v_pos < __pyx_4; ++__pyx_v_pos) {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_known_graph_pyx.pyx":539 */
+ __pyx_5 = ((PyObject *)__pyx_f_6bzrlib_16_known_graph_pyx__get_list_node(__pyx_v_node->children,__pyx_v_pos)); if (!__pyx_5) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 539; goto __pyx_L1;}
+ Py_DECREF(((PyObject *)__pyx_v_child));
+ __pyx_v_child = ((struct __pyx_obj_6bzrlib_16_known_graph_pyx__KnownGraphNode *)__pyx_5);
+ __pyx_5 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_known_graph_pyx.pyx":540 */
+ __pyx_3 = (__pyx_v_child->gdfo == (-1));
+ if (__pyx_3) {
+ __pyx_2 = __Pyx_GetName(__pyx_m, __pyx_n_errors); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 543; goto __pyx_L1;}
+ __pyx_1 = PyObject_GetAttr(__pyx_2, __pyx_n_GraphCycleError); if (!__pyx_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 543; goto __pyx_L1;}
+ Py_DECREF(__pyx_2); __pyx_2 = 0;
+ __pyx_5 = PyTuple_New(1); if (!__pyx_5) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 543; goto __pyx_L1;}
+ Py_INCREF(((struct __pyx_obj_6bzrlib_16_known_graph_pyx_KnownGraph *)__pyx_v_self)->_nodes);
+ PyTuple_SET_ITEM(__pyx_5, 0, ((struct __pyx_obj_6bzrlib_16_known_graph_pyx_KnownGraph *)__pyx_v_self)->_nodes);
+ __pyx_2 = PyObject_CallObject(__pyx_1, __pyx_5); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 543; goto __pyx_L1;}
+ Py_DECREF(__pyx_1); __pyx_1 = 0;
+ Py_DECREF(__pyx_5); __pyx_5 = 0;
+ __Pyx_Raise(__pyx_2, 0, 0);
+ Py_DECREF(__pyx_2); __pyx_2 = 0;
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 543; goto __pyx_L1;}
+ goto __pyx_L8;
+ }
+ __pyx_L8:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_known_graph_pyx.pyx":544 */
+ __pyx_v_child->seen = (__pyx_v_child->seen + 1);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_known_graph_pyx.pyx":545 */
+ __pyx_3 = (__pyx_v_child->seen == PyTuple_GET_SIZE(__pyx_v_child->parents));
+ if (__pyx_3) {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_known_graph_pyx.pyx":548 */
+ __pyx_v_last_item = (__pyx_v_last_item + 1);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_known_graph_pyx.pyx":549 */
+ __pyx_3 = (__pyx_v_last_item < PyList_GET_SIZE(__pyx_v_pending));
+ if (__pyx_3) {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_known_graph_pyx.pyx":550 */
+ Py_INCREF(((PyObject *)__pyx_v_child));
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_known_graph_pyx.pyx":551 */
+ __pyx_3 = PyList_SetItem(__pyx_v_pending,__pyx_v_last_item,((PyObject *)__pyx_v_child)); if (__pyx_3 == (-1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 551; goto __pyx_L1;}
+ goto __pyx_L10;
+ }
+ /*else*/ {
+ __pyx_3 = PyList_Append(__pyx_v_pending,((PyObject *)__pyx_v_child)); if (__pyx_3 == (-1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 553; goto __pyx_L1;}
+ }
+ __pyx_L10:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_known_graph_pyx.pyx":556 */
+ __pyx_v_child->seen = 0;
+ goto __pyx_L9;
+ }
+ __pyx_L9:;
+ }
+ }
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_known_graph_pyx.pyx":558 */
+ Py_INCREF(__pyx_v_topo_order);
+ __pyx_r = __pyx_v_topo_order;
+ goto __pyx_L0;
+
+ __pyx_r = Py_None; Py_INCREF(Py_None);
+ goto __pyx_L0;
+ __pyx_L1:;
+ Py_XDECREF(__pyx_1);
+ Py_XDECREF(__pyx_2);
+ Py_XDECREF(__pyx_5);
+ __Pyx_AddTraceback("bzrlib._known_graph_pyx.KnownGraph.topo_sort");
+ __pyx_r = 0;
+ __pyx_L0:;
+ Py_DECREF(__pyx_v_node);
+ Py_DECREF(__pyx_v_child);
+ Py_DECREF(__pyx_v_pending);
+ Py_DECREF(__pyx_v_topo_order);
+ Py_DECREF(__pyx_v_self);
+ return __pyx_r;
+}
+
+static PyObject *__pyx_f_6bzrlib_16_known_graph_pyx_10KnownGraph_gc_sort(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
+static char __pyx_doc_6bzrlib_16_known_graph_pyx_10KnownGraph_gc_sort[] = "Return a reverse topological ordering which is \'stable\'.\n\n There are a few constraints:\n 1) Reverse topological (all children before all parents)\n 2) Grouped by prefix\n 3) \'stable\' sorting, so that we get the same result, independent of\n machine, or extra data.\n To do this, we use the same basic algorithm as topo_sort, but when we\n aren\'t sure what node to access next, we sort them lexicographically.\n ";
+static PyObject *__pyx_f_6bzrlib_16_known_graph_pyx_10KnownGraph_gc_sort(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
+ PyObject *__pyx_v_temp;
+ Py_ssize_t __pyx_v_pos;
+ Py_ssize_t __pyx_v_last_item;
+ struct __pyx_obj_6bzrlib_16_known_graph_pyx__KnownGraphNode *__pyx_v_node;
+ struct __pyx_obj_6bzrlib_16_known_graph_pyx__KnownGraphNode *__pyx_v_parent_node;
+ PyObject *__pyx_v_tips;
+ PyObject *__pyx_v_prefix_tips;
+ PyObject *__pyx_v_prefix;
+ PyObject *__pyx_v_tip_nodes;
+ PyObject *__pyx_v_result;
+ PyObject *__pyx_v_pending;
+ PyObject *__pyx_v_parents;
+ PyObject *__pyx_r;
+ PyObject *__pyx_1 = 0;
+ PyObject *__pyx_2 = 0;
+ Py_ssize_t __pyx_3;
+ int __pyx_4;
+ Py_ssize_t __pyx_5;
+ PyObject *__pyx_6 = 0;
+ static char *__pyx_argnames[] = {0};
+ if (!PyArg_ParseTupleAndKeywords(__pyx_args, __pyx_kwds, "", __pyx_argnames)) return 0;
+ Py_INCREF(__pyx_v_self);
+ __pyx_v_node = ((struct __pyx_obj_6bzrlib_16_known_graph_pyx__KnownGraphNode *)Py_None); Py_INCREF(Py_None);
+ __pyx_v_parent_node = ((struct __pyx_obj_6bzrlib_16_known_graph_pyx__KnownGraphNode *)Py_None); Py_INCREF(Py_None);
+ __pyx_v_tips = Py_None; Py_INCREF(Py_None);
+ __pyx_v_prefix_tips = Py_None; Py_INCREF(Py_None);
+ __pyx_v_prefix = Py_None; Py_INCREF(Py_None);
+ __pyx_v_tip_nodes = Py_None; Py_INCREF(Py_None);
+ __pyx_v_result = Py_None; Py_INCREF(Py_None);
+ __pyx_v_pending = Py_None; Py_INCREF(Py_None);
+ __pyx_v_parents = Py_None; Py_INCREF(Py_None);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_known_graph_pyx.pyx":575 */
+ __pyx_1 = PyObject_GetAttr(__pyx_v_self, __pyx_n__find_tips); if (!__pyx_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 575; goto __pyx_L1;}
+ __pyx_2 = PyObject_CallObject(__pyx_1, 0); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 575; goto __pyx_L1;}
+ Py_DECREF(__pyx_1); __pyx_1 = 0;
+ Py_DECREF(__pyx_v_tips);
+ __pyx_v_tips = __pyx_2;
+ __pyx_2 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_known_graph_pyx.pyx":577 */
+ __pyx_1 = PyDict_New(); if (!__pyx_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 577; goto __pyx_L1;}
+ Py_DECREF(__pyx_v_prefix_tips);
+ __pyx_v_prefix_tips = __pyx_1;
+ __pyx_1 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_known_graph_pyx.pyx":578 */
+ __pyx_3 = PyList_GET_SIZE(__pyx_v_tips);
+ for (__pyx_v_pos = 0; __pyx_v_pos < __pyx_3; ++__pyx_v_pos) {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_known_graph_pyx.pyx":579 */
+ __pyx_2 = ((PyObject *)__pyx_f_6bzrlib_16_known_graph_pyx__get_list_node(__pyx_v_tips,__pyx_v_pos)); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 579; goto __pyx_L1;}
+ Py_DECREF(((PyObject *)__pyx_v_node));
+ __pyx_v_node = ((struct __pyx_obj_6bzrlib_16_known_graph_pyx__KnownGraphNode *)__pyx_2);
+ __pyx_2 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_known_graph_pyx.pyx":580 */
+ __pyx_4 = PyString_CheckExact(__pyx_v_node->key);
+ if (!__pyx_4) {
+ __pyx_5 = PyObject_Length(__pyx_v_node->key); if (__pyx_5 == -1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 580; goto __pyx_L1;}
+ __pyx_4 = (__pyx_5 == 1);
+ }
+ if (__pyx_4) {
+ Py_INCREF(__pyx_k25p);
+ Py_DECREF(__pyx_v_prefix);
+ __pyx_v_prefix = __pyx_k25p;
+ goto __pyx_L4;
+ }
+ /*else*/ {
+ __pyx_1 = __Pyx_GetItemInt(__pyx_v_node->key, 0); if (!__pyx_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 583; goto __pyx_L1;}
+ Py_DECREF(__pyx_v_prefix);
+ __pyx_v_prefix = __pyx_1;
+ __pyx_1 = 0;
+ }
+ __pyx_L4:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_known_graph_pyx.pyx":584 */
+ __pyx_v_temp = PyDict_GetItem(__pyx_v_prefix_tips,__pyx_v_prefix);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_known_graph_pyx.pyx":585 */
+ __pyx_4 = (__pyx_v_temp == NULL);
+ if (__pyx_4) {
+ __pyx_2 = PyList_New(1); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 586; goto __pyx_L1;}
+ Py_INCREF(((PyObject *)__pyx_v_node));
+ PyList_SET_ITEM(__pyx_2, 0, ((PyObject *)__pyx_v_node));
+ if (PyObject_SetItem(__pyx_v_prefix_tips, __pyx_v_prefix, __pyx_2) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 586; goto __pyx_L1;}
+ Py_DECREF(__pyx_2); __pyx_2 = 0;
+ goto __pyx_L5;
+ }
+ /*else*/ {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_known_graph_pyx.pyx":588 */
+ Py_INCREF(((PyObject *)__pyx_v_temp));
+ Py_DECREF(__pyx_v_tip_nodes);
+ __pyx_v_tip_nodes = ((PyObject *)__pyx_v_temp);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_known_graph_pyx.pyx":589 */
+ __pyx_4 = PyList_Append(__pyx_v_tip_nodes,((PyObject *)__pyx_v_node)); if (__pyx_4 == (-1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 589; goto __pyx_L1;}
+ }
+ __pyx_L5:;
+ }
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_known_graph_pyx.pyx":591 */
+ __pyx_1 = PyList_New(0); if (!__pyx_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 591; goto __pyx_L1;}
+ Py_DECREF(__pyx_v_result);
+ __pyx_v_result = __pyx_1;
+ __pyx_1 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_known_graph_pyx.pyx":592 */
+ __pyx_2 = __Pyx_GetName(__pyx_b, __pyx_n_sorted); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 592; goto __pyx_L1;}
+ __pyx_1 = PyTuple_New(1); if (!__pyx_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 592; goto __pyx_L1;}
+ Py_INCREF(__pyx_v_prefix_tips);
+ PyTuple_SET_ITEM(__pyx_1, 0, __pyx_v_prefix_tips);
+ __pyx_6 = PyObject_CallObject(__pyx_2, __pyx_1); if (!__pyx_6) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 592; goto __pyx_L1;}
+ Py_DECREF(__pyx_2); __pyx_2 = 0;
+ Py_DECREF(__pyx_1); __pyx_1 = 0;
+ __pyx_2 = PyObject_GetIter(__pyx_6); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 592; goto __pyx_L1;}
+ Py_DECREF(__pyx_6); __pyx_6 = 0;
+ for (;;) {
+ __pyx_1 = PyIter_Next(__pyx_2);
+ if (!__pyx_1) {
+ if (PyErr_Occurred()) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 592; goto __pyx_L1;}
+ break;
+ }
+ Py_DECREF(__pyx_v_prefix);
+ __pyx_v_prefix = __pyx_1;
+ __pyx_1 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_known_graph_pyx.pyx":593 */
+ __pyx_v_temp = PyDict_GetItem(__pyx_v_prefix_tips,__pyx_v_prefix);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_known_graph_pyx.pyx":594 */
+ #ifndef PYREX_WITHOUT_ASSERTIONS
+ if (!(__pyx_v_temp != NULL)) {
+ PyErr_SetNone(PyExc_AssertionError);
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 594; goto __pyx_L1;}
+ }
+ #endif
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_known_graph_pyx.pyx":595 */
+ Py_INCREF(((PyObject *)__pyx_v_temp));
+ Py_DECREF(__pyx_v_tip_nodes);
+ __pyx_v_tip_nodes = ((PyObject *)__pyx_v_temp);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_known_graph_pyx.pyx":596 */
+ __pyx_6 = __pyx_f_6bzrlib_16_known_graph_pyx__sort_list_nodes(__pyx_v_tip_nodes,1); if (!__pyx_6) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 596; goto __pyx_L1;}
+ Py_DECREF(__pyx_v_pending);
+ __pyx_v_pending = __pyx_6;
+ __pyx_6 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_known_graph_pyx.pyx":597 */
+ __pyx_v_last_item = (PyList_GET_SIZE(__pyx_v_pending) - 1);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_known_graph_pyx.pyx":598 */
+ while (1) {
+ __pyx_4 = (__pyx_v_last_item >= 0);
+ if (!__pyx_4) break;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_known_graph_pyx.pyx":599 */
+ __pyx_1 = ((PyObject *)__pyx_f_6bzrlib_16_known_graph_pyx__get_list_node(__pyx_v_pending,__pyx_v_last_item)); if (!__pyx_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 599; goto __pyx_L1;}
+ Py_DECREF(((PyObject *)__pyx_v_node));
+ __pyx_v_node = ((struct __pyx_obj_6bzrlib_16_known_graph_pyx__KnownGraphNode *)__pyx_1);
+ __pyx_1 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_known_graph_pyx.pyx":600 */
+ __pyx_v_last_item = (__pyx_v_last_item - 1);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_known_graph_pyx.pyx":601 */
+ __pyx_4 = __pyx_v_node->parents == Py_None;
+ if (__pyx_4) {
+ goto __pyx_L8;
+ goto __pyx_L10;
+ }
+ __pyx_L10:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_known_graph_pyx.pyx":604 */
+ __pyx_4 = PyList_Append(__pyx_v_result,__pyx_v_node->key); if (__pyx_4 == (-1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 604; goto __pyx_L1;}
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_known_graph_pyx.pyx":611 */
+ __pyx_6 = __pyx_f_6bzrlib_16_known_graph_pyx__sort_list_nodes(__pyx_v_node->parents,1); if (!__pyx_6) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 611; goto __pyx_L1;}
+ Py_DECREF(__pyx_v_parents);
+ __pyx_v_parents = __pyx_6;
+ __pyx_6 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_known_graph_pyx.pyx":612 */
+ __pyx_5 = PyObject_Length(__pyx_v_parents); if (__pyx_5 == -1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 612; goto __pyx_L1;}
+ for (__pyx_v_pos = 0; __pyx_v_pos < __pyx_5; ++__pyx_v_pos) {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_known_graph_pyx.pyx":613 */
+ __pyx_4 = PyTuple_CheckExact(__pyx_v_parents);
+ if (__pyx_4) {
+ __pyx_1 = ((PyObject *)__pyx_f_6bzrlib_16_known_graph_pyx__get_tuple_node(__pyx_v_parents,__pyx_v_pos)); if (!__pyx_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 614; goto __pyx_L1;}
+ Py_DECREF(((PyObject *)__pyx_v_parent_node));
+ __pyx_v_parent_node = ((struct __pyx_obj_6bzrlib_16_known_graph_pyx__KnownGraphNode *)__pyx_1);
+ __pyx_1 = 0;
+ goto __pyx_L13;
+ }
+ /*else*/ {
+ __pyx_6 = ((PyObject *)__pyx_f_6bzrlib_16_known_graph_pyx__get_list_node(__pyx_v_parents,__pyx_v_pos)); if (!__pyx_6) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 616; goto __pyx_L1;}
+ Py_DECREF(((PyObject *)__pyx_v_parent_node));
+ __pyx_v_parent_node = ((struct __pyx_obj_6bzrlib_16_known_graph_pyx__KnownGraphNode *)__pyx_6);
+ __pyx_6 = 0;
+ }
+ __pyx_L13:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_known_graph_pyx.pyx":618 */
+ __pyx_v_parent_node->seen = (__pyx_v_parent_node->seen + 1);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_known_graph_pyx.pyx":619 */
+ __pyx_4 = (__pyx_v_parent_node->seen == PyList_GET_SIZE(__pyx_v_parent_node->children));
+ if (__pyx_4) {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_known_graph_pyx.pyx":623 */
+ __pyx_v_last_item = (__pyx_v_last_item + 1);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_known_graph_pyx.pyx":624 */
+ __pyx_4 = (__pyx_v_last_item < PyList_GET_SIZE(__pyx_v_pending));
+ if (__pyx_4) {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_known_graph_pyx.pyx":625 */
+ Py_INCREF(((PyObject *)__pyx_v_parent_node));
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_known_graph_pyx.pyx":626 */
+ __pyx_4 = PyList_SetItem(__pyx_v_pending,__pyx_v_last_item,((PyObject *)__pyx_v_parent_node)); if (__pyx_4 == (-1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 626; goto __pyx_L1;}
+ goto __pyx_L15;
+ }
+ /*else*/ {
+ __pyx_4 = PyList_Append(__pyx_v_pending,((PyObject *)__pyx_v_parent_node)); if (__pyx_4 == (-1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 628; goto __pyx_L1;}
+ }
+ __pyx_L15:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_known_graph_pyx.pyx":629 */
+ __pyx_v_parent_node->seen = 0;
+ goto __pyx_L14;
+ }
+ __pyx_L14:;
+ }
+ __pyx_L8:;
+ }
+ }
+ Py_DECREF(__pyx_2); __pyx_2 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_known_graph_pyx.pyx":630 */
+ Py_INCREF(__pyx_v_result);
+ __pyx_r = __pyx_v_result;
+ goto __pyx_L0;
+
+ __pyx_r = Py_None; Py_INCREF(Py_None);
+ goto __pyx_L0;
+ __pyx_L1:;
+ Py_XDECREF(__pyx_1);
+ Py_XDECREF(__pyx_2);
+ Py_XDECREF(__pyx_6);
+ __Pyx_AddTraceback("bzrlib._known_graph_pyx.KnownGraph.gc_sort");
+ __pyx_r = 0;
+ __pyx_L0:;
+ Py_DECREF(__pyx_v_node);
+ Py_DECREF(__pyx_v_parent_node);
+ Py_DECREF(__pyx_v_tips);
+ Py_DECREF(__pyx_v_prefix_tips);
+ Py_DECREF(__pyx_v_prefix);
+ Py_DECREF(__pyx_v_tip_nodes);
+ Py_DECREF(__pyx_v_result);
+ Py_DECREF(__pyx_v_pending);
+ Py_DECREF(__pyx_v_parents);
+ Py_DECREF(__pyx_v_self);
+ return __pyx_r;
+}
+
+static PyObject *__pyx_f_6bzrlib_16_known_graph_pyx_10KnownGraph_merge_sort(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
+static char __pyx_doc_6bzrlib_16_known_graph_pyx_10KnownGraph_merge_sort[] = "Compute the merge sorted graph output.";
+static PyObject *__pyx_f_6bzrlib_16_known_graph_pyx_10KnownGraph_merge_sort(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
+ PyObject *__pyx_v_tip_key = 0;
+ struct __pyx_obj_6bzrlib_16_known_graph_pyx__MergeSorter *__pyx_v_sorter;
+ PyObject *__pyx_r;
+ PyObject *__pyx_1 = 0;
+ PyObject *__pyx_2 = 0;
+ static char *__pyx_argnames[] = {"tip_key",0};
+ if (!PyArg_ParseTupleAndKeywords(__pyx_args, __pyx_kwds, "O", __pyx_argnames, &__pyx_v_tip_key)) return 0;
+ Py_INCREF(__pyx_v_self);
+ Py_INCREF(__pyx_v_tip_key);
+ __pyx_v_sorter = ((struct __pyx_obj_6bzrlib_16_known_graph_pyx__MergeSorter *)Py_None); Py_INCREF(Py_None);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_known_graph_pyx.pyx":639 */
+ __pyx_1 = PyTuple_New(2); if (!__pyx_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 639; goto __pyx_L1;}
+ Py_INCREF(__pyx_v_self);
+ PyTuple_SET_ITEM(__pyx_1, 0, __pyx_v_self);
+ Py_INCREF(__pyx_v_tip_key);
+ PyTuple_SET_ITEM(__pyx_1, 1, __pyx_v_tip_key);
+ __pyx_2 = PyObject_CallObject(((PyObject *)__pyx_ptype_6bzrlib_16_known_graph_pyx__MergeSorter), __pyx_1); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 639; goto __pyx_L1;}
+ Py_DECREF(__pyx_1); __pyx_1 = 0;
+ Py_DECREF(((PyObject *)__pyx_v_sorter));
+ __pyx_v_sorter = ((struct __pyx_obj_6bzrlib_16_known_graph_pyx__MergeSorter *)__pyx_2);
+ __pyx_2 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_known_graph_pyx.pyx":640 */
+ __pyx_1 = ((struct __pyx_vtabstruct_6bzrlib_16_known_graph_pyx__MergeSorter *)__pyx_v_sorter->__pyx_vtab)->topo_order(__pyx_v_sorter); if (!__pyx_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 640; goto __pyx_L1;}
+ __pyx_r = __pyx_1;
+ __pyx_1 = 0;
+ goto __pyx_L0;
+
+ __pyx_r = Py_None; Py_INCREF(Py_None);
+ goto __pyx_L0;
+ __pyx_L1:;
+ Py_XDECREF(__pyx_1);
+ Py_XDECREF(__pyx_2);
+ __Pyx_AddTraceback("bzrlib._known_graph_pyx.KnownGraph.merge_sort");
+ __pyx_r = 0;
+ __pyx_L0:;
+ Py_DECREF(__pyx_v_sorter);
+ Py_DECREF(__pyx_v_self);
+ Py_DECREF(__pyx_v_tip_key);
+ return __pyx_r;
+}
+
+static PyObject *__pyx_f_6bzrlib_16_known_graph_pyx_10KnownGraph_get_parent_keys(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
+static char __pyx_doc_6bzrlib_16_known_graph_pyx_10KnownGraph_get_parent_keys[] = "Get the parents for a key\n \n Returns a list containg the parents keys. If the key is a ghost,\n None is returned. A KeyError will be raised if the key is not in\n the graph.\n \n :param keys: Key to check (eg revision_id)\n :return: A list of parents\n ";
+static PyObject *__pyx_f_6bzrlib_16_known_graph_pyx_10KnownGraph_get_parent_keys(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
+ PyObject *__pyx_v_key = 0;
+ PyObject *__pyx_r;
+ PyObject *__pyx_1 = 0;
+ PyObject *__pyx_2 = 0;
+ static char *__pyx_argnames[] = {"key",0};
+ if (!PyArg_ParseTupleAndKeywords(__pyx_args, __pyx_kwds, "O", __pyx_argnames, &__pyx_v_key)) return 0;
+ Py_INCREF(__pyx_v_self);
+ Py_INCREF(__pyx_v_key);
+ __pyx_1 = PyObject_GetItem(((struct __pyx_obj_6bzrlib_16_known_graph_pyx_KnownGraph *)__pyx_v_self)->_nodes, __pyx_v_key); if (!__pyx_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 652; goto __pyx_L1;}
+ __pyx_2 = PyObject_GetAttr(__pyx_1, __pyx_n_parent_keys); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 652; goto __pyx_L1;}
+ Py_DECREF(__pyx_1); __pyx_1 = 0;
+ __pyx_r = __pyx_2;
+ __pyx_2 = 0;
+ goto __pyx_L0;
+
+ __pyx_r = Py_None; Py_INCREF(Py_None);
+ goto __pyx_L0;
+ __pyx_L1:;
+ Py_XDECREF(__pyx_1);
+ Py_XDECREF(__pyx_2);
+ __Pyx_AddTraceback("bzrlib._known_graph_pyx.KnownGraph.get_parent_keys");
+ __pyx_r = 0;
+ __pyx_L0:;
+ Py_DECREF(__pyx_v_self);
+ Py_DECREF(__pyx_v_key);
+ return __pyx_r;
+}
+
+static PyObject *__pyx_f_6bzrlib_16_known_graph_pyx_10KnownGraph_get_child_keys(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
+static char __pyx_doc_6bzrlib_16_known_graph_pyx_10KnownGraph_get_child_keys[] = "Get the children for a key\n \n Returns a list containg the children keys. A KeyError will be raised\n if the key is not in the graph.\n \n :param keys: Key to check (eg revision_id)\n :return: A list of children\n ";
+static PyObject *__pyx_f_6bzrlib_16_known_graph_pyx_10KnownGraph_get_child_keys(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
+ PyObject *__pyx_v_key = 0;
+ PyObject *__pyx_r;
+ PyObject *__pyx_1 = 0;
+ PyObject *__pyx_2 = 0;
+ static char *__pyx_argnames[] = {"key",0};
+ if (!PyArg_ParseTupleAndKeywords(__pyx_args, __pyx_kwds, "O", __pyx_argnames, &__pyx_v_key)) return 0;
+ Py_INCREF(__pyx_v_self);
+ Py_INCREF(__pyx_v_key);
+ __pyx_1 = PyObject_GetItem(((struct __pyx_obj_6bzrlib_16_known_graph_pyx_KnownGraph *)__pyx_v_self)->_nodes, __pyx_v_key); if (!__pyx_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 663; goto __pyx_L1;}
+ __pyx_2 = PyObject_GetAttr(__pyx_1, __pyx_n_child_keys); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 663; goto __pyx_L1;}
+ Py_DECREF(__pyx_1); __pyx_1 = 0;
+ __pyx_r = __pyx_2;
+ __pyx_2 = 0;
+ goto __pyx_L0;
+
+ __pyx_r = Py_None; Py_INCREF(Py_None);
+ goto __pyx_L0;
+ __pyx_L1:;
+ Py_XDECREF(__pyx_1);
+ Py_XDECREF(__pyx_2);
+ __Pyx_AddTraceback("bzrlib._known_graph_pyx.KnownGraph.get_child_keys");
+ __pyx_r = 0;
+ __pyx_L0:;
+ Py_DECREF(__pyx_v_self);
+ Py_DECREF(__pyx_v_key);
+ return __pyx_r;
+}
+
+static int __pyx_f_6bzrlib_16_known_graph_pyx_14_MergeSortNode___init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
+static int __pyx_f_6bzrlib_16_known_graph_pyx_14_MergeSortNode___init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
+ PyObject *__pyx_v_key = 0;
+ int __pyx_r;
+ static char *__pyx_argnames[] = {"key",0};
+ if (!PyArg_ParseTupleAndKeywords(__pyx_args, __pyx_kwds, "O", __pyx_argnames, &__pyx_v_key)) return -1;
+ Py_INCREF(__pyx_v_self);
+ Py_INCREF(__pyx_v_key);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_known_graph_pyx.pyx":687 */
+ Py_INCREF(__pyx_v_key);
+ Py_DECREF(((struct __pyx_obj_6bzrlib_16_known_graph_pyx__MergeSortNode *)__pyx_v_self)->key);
+ ((struct __pyx_obj_6bzrlib_16_known_graph_pyx__MergeSortNode *)__pyx_v_self)->key = __pyx_v_key;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_known_graph_pyx.pyx":688 */
+ ((struct __pyx_obj_6bzrlib_16_known_graph_pyx__MergeSortNode *)__pyx_v_self)->merge_depth = (-1);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_known_graph_pyx.pyx":689 */
+ Py_INCREF(Py_None);
+ Py_DECREF(((PyObject *)((struct __pyx_obj_6bzrlib_16_known_graph_pyx__MergeSortNode *)__pyx_v_self)->left_parent));
+ ((struct __pyx_obj_6bzrlib_16_known_graph_pyx__MergeSortNode *)__pyx_v_self)->left_parent = ((struct __pyx_obj_6bzrlib_16_known_graph_pyx__KnownGraphNode *)Py_None);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_known_graph_pyx.pyx":690 */
+ Py_INCREF(Py_None);
+ Py_DECREF(((PyObject *)((struct __pyx_obj_6bzrlib_16_known_graph_pyx__MergeSortNode *)__pyx_v_self)->left_pending_parent));
+ ((struct __pyx_obj_6bzrlib_16_known_graph_pyx__MergeSortNode *)__pyx_v_self)->left_pending_parent = ((struct __pyx_obj_6bzrlib_16_known_graph_pyx__KnownGraphNode *)Py_None);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_known_graph_pyx.pyx":691 */
+ Py_INCREF(Py_None);
+ Py_DECREF(((struct __pyx_obj_6bzrlib_16_known_graph_pyx__MergeSortNode *)__pyx_v_self)->pending_parents);
+ ((struct __pyx_obj_6bzrlib_16_known_graph_pyx__MergeSortNode *)__pyx_v_self)->pending_parents = Py_None;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_known_graph_pyx.pyx":692 */
+ ((struct __pyx_obj_6bzrlib_16_known_graph_pyx__MergeSortNode *)__pyx_v_self)->_revno_first = (-1);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_known_graph_pyx.pyx":693 */
+ ((struct __pyx_obj_6bzrlib_16_known_graph_pyx__MergeSortNode *)__pyx_v_self)->_revno_second = (-1);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_known_graph_pyx.pyx":694 */
+ ((struct __pyx_obj_6bzrlib_16_known_graph_pyx__MergeSortNode *)__pyx_v_self)->_revno_last = (-1);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_known_graph_pyx.pyx":695 */
+ ((struct __pyx_obj_6bzrlib_16_known_graph_pyx__MergeSortNode *)__pyx_v_self)->is_first_child = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_known_graph_pyx.pyx":696 */
+ ((struct __pyx_obj_6bzrlib_16_known_graph_pyx__MergeSortNode *)__pyx_v_self)->seen_by_child = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_known_graph_pyx.pyx":697 */
+ ((struct __pyx_obj_6bzrlib_16_known_graph_pyx__MergeSortNode *)__pyx_v_self)->completed = 0;
+
+ __pyx_r = 0;
+ Py_DECREF(__pyx_v_self);
+ Py_DECREF(__pyx_v_key);
+ return __pyx_r;
+}
+
+static PyObject *__pyx_f_6bzrlib_16_known_graph_pyx_14_MergeSortNode___repr__(PyObject *__pyx_v_self); /*proto*/
+static PyObject *__pyx_f_6bzrlib_16_known_graph_pyx_14_MergeSortNode___repr__(PyObject *__pyx_v_self) {
+ PyObject *__pyx_r;
+ PyObject *__pyx_1 = 0;
+ PyObject *__pyx_2 = 0;
+ PyObject *__pyx_3 = 0;
+ PyObject *__pyx_4 = 0;
+ PyObject *__pyx_5 = 0;
+ PyObject *__pyx_6 = 0;
+ PyObject *__pyx_7 = 0;
+ PyObject *__pyx_8 = 0;
+ Py_INCREF(__pyx_v_self);
+ __pyx_1 = PyObject_GetAttr(__pyx_v_self, __pyx_n___class__); if (!__pyx_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 701; goto __pyx_L1;}
+ __pyx_2 = PyObject_GetAttr(__pyx_1, __pyx_n___name__); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 701; goto __pyx_L1;}
+ Py_DECREF(__pyx_1); __pyx_1 = 0;
+ __pyx_1 = PyInt_FromLong(((struct __pyx_obj_6bzrlib_16_known_graph_pyx__MergeSortNode *)__pyx_v_self)->merge_depth); if (!__pyx_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 702; goto __pyx_L1;}
+ __pyx_3 = PyInt_FromLong(((struct __pyx_obj_6bzrlib_16_known_graph_pyx__MergeSortNode *)__pyx_v_self)->_revno_first); if (!__pyx_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 703; goto __pyx_L1;}
+ __pyx_4 = PyInt_FromLong(((struct __pyx_obj_6bzrlib_16_known_graph_pyx__MergeSortNode *)__pyx_v_self)->_revno_second); if (!__pyx_4) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 703; goto __pyx_L1;}
+ __pyx_5 = PyInt_FromLong(((struct __pyx_obj_6bzrlib_16_known_graph_pyx__MergeSortNode *)__pyx_v_self)->_revno_last); if (!__pyx_5) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 703; goto __pyx_L1;}
+ __pyx_6 = PyInt_FromLong(((struct __pyx_obj_6bzrlib_16_known_graph_pyx__MergeSortNode *)__pyx_v_self)->is_first_child); if (!__pyx_6) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 704; goto __pyx_L1;}
+ __pyx_7 = PyInt_FromLong(((struct __pyx_obj_6bzrlib_16_known_graph_pyx__MergeSortNode *)__pyx_v_self)->seen_by_child); if (!__pyx_7) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 704; goto __pyx_L1;}
+ __pyx_8 = PyTuple_New(8); if (!__pyx_8) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 701; goto __pyx_L1;}
+ PyTuple_SET_ITEM(__pyx_8, 0, __pyx_2);
+ Py_INCREF(((struct __pyx_obj_6bzrlib_16_known_graph_pyx__MergeSortNode *)__pyx_v_self)->key);
+ PyTuple_SET_ITEM(__pyx_8, 1, ((struct __pyx_obj_6bzrlib_16_known_graph_pyx__MergeSortNode *)__pyx_v_self)->key);
+ PyTuple_SET_ITEM(__pyx_8, 2, __pyx_1);
+ PyTuple_SET_ITEM(__pyx_8, 3, __pyx_3);
+ PyTuple_SET_ITEM(__pyx_8, 4, __pyx_4);
+ PyTuple_SET_ITEM(__pyx_8, 5, __pyx_5);
+ PyTuple_SET_ITEM(__pyx_8, 6, __pyx_6);
+ PyTuple_SET_ITEM(__pyx_8, 7, __pyx_7);
+ __pyx_2 = 0;
+ __pyx_1 = 0;
+ __pyx_3 = 0;
+ __pyx_4 = 0;
+ __pyx_5 = 0;
+ __pyx_6 = 0;
+ __pyx_7 = 0;
+ __pyx_2 = PyNumber_Remainder(__pyx_k29p, __pyx_8); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 700; goto __pyx_L1;}
+ Py_DECREF(__pyx_8); __pyx_8 = 0;
+ __pyx_r = __pyx_2;
+ __pyx_2 = 0;
+ goto __pyx_L0;
+
+ __pyx_r = Py_None; Py_INCREF(Py_None);
+ goto __pyx_L0;
+ __pyx_L1:;
+ Py_XDECREF(__pyx_1);
+ Py_XDECREF(__pyx_2);
+ Py_XDECREF(__pyx_3);
+ Py_XDECREF(__pyx_4);
+ Py_XDECREF(__pyx_5);
+ Py_XDECREF(__pyx_6);
+ Py_XDECREF(__pyx_7);
+ Py_XDECREF(__pyx_8);
+ __Pyx_AddTraceback("bzrlib._known_graph_pyx._MergeSortNode.__repr__");
+ __pyx_r = 0;
+ __pyx_L0:;
+ Py_DECREF(__pyx_v_self);
+ return __pyx_r;
+}
+
+static int __pyx_f_6bzrlib_16_known_graph_pyx_14_MergeSortNode_has_pending_parents(struct __pyx_obj_6bzrlib_16_known_graph_pyx__MergeSortNode *__pyx_v_self) {
+ int __pyx_r;
+ PyObject *__pyx_1 = 0;
+ int __pyx_2;
+ Py_INCREF(__pyx_v_self);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_known_graph_pyx.pyx":707 */
+ __pyx_2 = ((PyObject *)__pyx_v_self->left_pending_parent) != Py_None;
+ __pyx_1 = PyInt_FromLong(__pyx_2); if (!__pyx_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 707; goto __pyx_L1;}
+ __pyx_2 = PyObject_IsTrue(__pyx_1); if (__pyx_2 < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 707; goto __pyx_L1;}
+ if (!__pyx_2) {
+ Py_DECREF(__pyx_1); __pyx_1 = 0;
+ __pyx_1 = __pyx_v_self->pending_parents;
+ Py_INCREF(__pyx_1);
+ }
+ __pyx_2 = PyObject_IsTrue(__pyx_1); if (__pyx_2 < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 707; goto __pyx_L1;}
+ Py_DECREF(__pyx_1); __pyx_1 = 0;
+ if (__pyx_2) {
+ __pyx_r = 1;
+ goto __pyx_L0;
+ goto __pyx_L2;
+ }
+ __pyx_L2:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_known_graph_pyx.pyx":709 */
+ __pyx_r = 0;
+ goto __pyx_L0;
+
+ __pyx_r = 0;
+ goto __pyx_L0;
+ __pyx_L1:;
+ Py_XDECREF(__pyx_1);
+ __Pyx_WriteUnraisable("bzrlib._known_graph_pyx._MergeSortNode.has_pending_parents");
+ __pyx_r = 0;
+ __pyx_L0:;
+ Py_DECREF(__pyx_v_self);
+ return __pyx_r;
+}
+
+static PyObject *__pyx_f_6bzrlib_16_known_graph_pyx_14_MergeSortNode__revno(struct __pyx_obj_6bzrlib_16_known_graph_pyx__MergeSortNode *__pyx_v_self) {
+ PyObject *__pyx_r;
+ int __pyx_1;
+ PyObject *__pyx_2 = 0;
+ PyObject *__pyx_3 = 0;
+ PyObject *__pyx_4 = 0;
+ PyObject *__pyx_5 = 0;
+ Py_INCREF(__pyx_v_self);
+ __pyx_1 = (__pyx_v_self->_revno_first == (-1));
+ if (__pyx_1) {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_known_graph_pyx.pyx":713 */
+ __pyx_1 = (__pyx_v_self->_revno_second != (-1));
+ if (__pyx_1) {
+ __pyx_2 = PyTuple_New(1); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 714; goto __pyx_L1;}
+ Py_INCREF(((PyObject *)__pyx_v_self));
+ PyTuple_SET_ITEM(__pyx_2, 0, ((PyObject *)__pyx_v_self));
+ __pyx_3 = PyNumber_Remainder(__pyx_k30p, __pyx_2); if (!__pyx_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 714; goto __pyx_L1;}
+ Py_DECREF(__pyx_2); __pyx_2 = 0;
+ __pyx_2 = PyTuple_New(1); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 714; goto __pyx_L1;}
+ PyTuple_SET_ITEM(__pyx_2, 0, __pyx_3);
+ __pyx_3 = 0;
+ __pyx_3 = PyObject_CallObject(PyExc_RuntimeError, __pyx_2); if (!__pyx_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 714; goto __pyx_L1;}
+ Py_DECREF(__pyx_2); __pyx_2 = 0;
+ __Pyx_Raise(__pyx_3, 0, 0);
+ Py_DECREF(__pyx_3); __pyx_3 = 0;
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 714; goto __pyx_L1;}
+ goto __pyx_L3;
+ }
+ __pyx_L3:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_known_graph_pyx.pyx":715 */
+ __pyx_2 = PyInt_FromLong(__pyx_v_self->_revno_last); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 715; goto __pyx_L1;}
+ __pyx_3 = PyTuple_New(1); if (!__pyx_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 715; goto __pyx_L1;}
+ PyTuple_SET_ITEM(__pyx_3, 0, __pyx_2);
+ __pyx_2 = 0;
+ __pyx_r = __pyx_3;
+ __pyx_3 = 0;
+ goto __pyx_L0;
+ goto __pyx_L2;
+ }
+ /*else*/ {
+ __pyx_2 = PyInt_FromLong(__pyx_v_self->_revno_first); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 717; goto __pyx_L1;}
+ __pyx_3 = PyInt_FromLong(__pyx_v_self->_revno_second); if (!__pyx_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 717; goto __pyx_L1;}
+ __pyx_4 = PyInt_FromLong(__pyx_v_self->_revno_last); if (!__pyx_4) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 717; goto __pyx_L1;}
+ __pyx_5 = PyTuple_New(3); if (!__pyx_5) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 717; goto __pyx_L1;}
+ PyTuple_SET_ITEM(__pyx_5, 0, __pyx_2);
+ PyTuple_SET_ITEM(__pyx_5, 1, __pyx_3);
+ PyTuple_SET_ITEM(__pyx_5, 2, __pyx_4);
+ __pyx_2 = 0;
+ __pyx_3 = 0;
+ __pyx_4 = 0;
+ __pyx_r = __pyx_5;
+ __pyx_5 = 0;
+ goto __pyx_L0;
+ }
+ __pyx_L2:;
+
+ __pyx_r = Py_None; Py_INCREF(Py_None);
+ goto __pyx_L0;
+ __pyx_L1:;
+ Py_XDECREF(__pyx_2);
+ Py_XDECREF(__pyx_3);
+ Py_XDECREF(__pyx_4);
+ Py_XDECREF(__pyx_5);
+ __Pyx_AddTraceback("bzrlib._known_graph_pyx._MergeSortNode._revno");
+ __pyx_r = 0;
+ __pyx_L0:;
+ Py_DECREF(__pyx_v_self);
+ return __pyx_r;
+}
+
+static PyObject *__pyx_f_6bzrlib_16_known_graph_pyx_14_MergeSortNode_5revno___get__(PyObject *__pyx_v_self); /*proto*/
+static PyObject *__pyx_f_6bzrlib_16_known_graph_pyx_14_MergeSortNode_5revno___get__(PyObject *__pyx_v_self) {
+ PyObject *__pyx_r;
+ PyObject *__pyx_1 = 0;
+ Py_INCREF(__pyx_v_self);
+ __pyx_1 = ((struct __pyx_vtabstruct_6bzrlib_16_known_graph_pyx__MergeSortNode *)((struct __pyx_obj_6bzrlib_16_known_graph_pyx__MergeSortNode *)__pyx_v_self)->__pyx_vtab)->_revno(((struct __pyx_obj_6bzrlib_16_known_graph_pyx__MergeSortNode *)__pyx_v_self)); if (!__pyx_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 721; goto __pyx_L1;}
+ __pyx_r = __pyx_1;
+ __pyx_1 = 0;
+ goto __pyx_L0;
+
+ __pyx_r = Py_None; Py_INCREF(Py_None);
+ goto __pyx_L0;
+ __pyx_L1:;
+ Py_XDECREF(__pyx_1);
+ __Pyx_AddTraceback("bzrlib._known_graph_pyx._MergeSortNode.revno.__get__");
+ __pyx_r = 0;
+ __pyx_L0:;
+ Py_DECREF(__pyx_v_self);
+ return __pyx_r;
+}
+
+static int __pyx_f_6bzrlib_16_known_graph_pyx_12_MergeSorter___init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
+static int __pyx_f_6bzrlib_16_known_graph_pyx_12_MergeSorter___init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
+ PyObject *__pyx_v_known_graph = 0;
+ PyObject *__pyx_v_tip_key = 0;
+ struct __pyx_obj_6bzrlib_16_known_graph_pyx__KnownGraphNode *__pyx_v_node;
+ int __pyx_r;
+ PyObject *__pyx_1 = 0;
+ int __pyx_2;
+ static char *__pyx_argnames[] = {"known_graph","tip_key",0};
+ if (!PyArg_ParseTupleAndKeywords(__pyx_args, __pyx_kwds, "OO", __pyx_argnames, &__pyx_v_known_graph, &__pyx_v_tip_key)) return -1;
+ Py_INCREF(__pyx_v_self);
+ Py_INCREF(__pyx_v_known_graph);
+ Py_INCREF(__pyx_v_tip_key);
+ __pyx_v_node = ((struct __pyx_obj_6bzrlib_16_known_graph_pyx__KnownGraphNode *)Py_None); Py_INCREF(Py_None);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_known_graph_pyx.pyx":746 */
+ if (!__Pyx_TypeTest(__pyx_v_known_graph, __pyx_ptype_6bzrlib_16_known_graph_pyx_KnownGraph)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 746; goto __pyx_L1;}
+ Py_INCREF(__pyx_v_known_graph);
+ Py_DECREF(((PyObject *)((struct __pyx_obj_6bzrlib_16_known_graph_pyx__MergeSorter *)__pyx_v_self)->graph));
+ ((struct __pyx_obj_6bzrlib_16_known_graph_pyx__MergeSorter *)__pyx_v_self)->graph = ((struct __pyx_obj_6bzrlib_16_known_graph_pyx_KnownGraph *)__pyx_v_known_graph);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_known_graph_pyx.pyx":748 */
+ __pyx_1 = PyDict_New(); if (!__pyx_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 748; goto __pyx_L1;}
+ Py_DECREF(((struct __pyx_obj_6bzrlib_16_known_graph_pyx__MergeSorter *)__pyx_v_self)->_revno_to_branch_count);
+ ((struct __pyx_obj_6bzrlib_16_known_graph_pyx__MergeSorter *)__pyx_v_self)->_revno_to_branch_count = __pyx_1;
+ __pyx_1 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_known_graph_pyx.pyx":749 */
+ __pyx_1 = PyList_New(0); if (!__pyx_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 749; goto __pyx_L1;}
+ Py_DECREF(((struct __pyx_obj_6bzrlib_16_known_graph_pyx__MergeSorter *)__pyx_v_self)->_depth_first_stack);
+ ((struct __pyx_obj_6bzrlib_16_known_graph_pyx__MergeSorter *)__pyx_v_self)->_depth_first_stack = __pyx_1;
+ __pyx_1 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_known_graph_pyx.pyx":750 */
+ ((struct __pyx_obj_6bzrlib_16_known_graph_pyx__MergeSorter *)__pyx_v_self)->_last_stack_item = (-1);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_known_graph_pyx.pyx":751 */
+ __pyx_1 = PyList_New(0); if (!__pyx_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 751; goto __pyx_L1;}
+ Py_DECREF(((struct __pyx_obj_6bzrlib_16_known_graph_pyx__MergeSorter *)__pyx_v_self)->_scheduled_nodes);
+ ((struct __pyx_obj_6bzrlib_16_known_graph_pyx__MergeSorter *)__pyx_v_self)->_scheduled_nodes = __pyx_1;
+ __pyx_1 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_known_graph_pyx.pyx":752 */
+ __pyx_2 = __pyx_v_tip_key != Py_None;
+ if (__pyx_2) {
+ if (PyObject_Cmp(__pyx_v_tip_key, __pyx_v_6bzrlib_16_known_graph_pyx_NULL_REVISION, &__pyx_2) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 752; goto __pyx_L1;}
+ __pyx_2 = __pyx_2 != 0;
+ if (__pyx_2) {
+ __pyx_1 = PyTuple_New(1); if (!__pyx_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 753; goto __pyx_L1;}
+ Py_INCREF(__pyx_v_6bzrlib_16_known_graph_pyx_NULL_REVISION);
+ PyTuple_SET_ITEM(__pyx_1, 0, __pyx_v_6bzrlib_16_known_graph_pyx_NULL_REVISION);
+ if (PyObject_Cmp(__pyx_v_tip_key, __pyx_1, &__pyx_2) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 753; goto __pyx_L1;}
+ __pyx_2 = __pyx_2 != 0;
+ Py_DECREF(__pyx_1); __pyx_1 = 0;
+ }
+ }
+ if (__pyx_2) {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_known_graph_pyx.pyx":754 */
+ __pyx_1 = PyObject_GetItem(((struct __pyx_obj_6bzrlib_16_known_graph_pyx__MergeSorter *)__pyx_v_self)->graph->_nodes, __pyx_v_tip_key); if (!__pyx_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 754; goto __pyx_L1;}
+ if (!__Pyx_TypeTest(__pyx_1, __pyx_ptype_6bzrlib_16_known_graph_pyx__KnownGraphNode)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 754; goto __pyx_L1;}
+ Py_DECREF(((PyObject *)__pyx_v_node));
+ __pyx_v_node = ((struct __pyx_obj_6bzrlib_16_known_graph_pyx__KnownGraphNode *)__pyx_1);
+ __pyx_1 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_known_graph_pyx.pyx":755 */
+ __pyx_1 = ((struct __pyx_vtabstruct_6bzrlib_16_known_graph_pyx__MergeSorter *)((struct __pyx_obj_6bzrlib_16_known_graph_pyx__MergeSorter *)__pyx_v_self)->__pyx_vtab)->_push_node(((struct __pyx_obj_6bzrlib_16_known_graph_pyx__MergeSorter *)__pyx_v_self),__pyx_v_node,0); if (!__pyx_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 755; goto __pyx_L1;}
+ Py_DECREF(__pyx_1); __pyx_1 = 0;
+ goto __pyx_L2;
+ }
+ __pyx_L2:;
+
+ __pyx_r = 0;
+ goto __pyx_L0;
+ __pyx_L1:;
+ Py_XDECREF(__pyx_1);
+ __Pyx_AddTraceback("bzrlib._known_graph_pyx._MergeSorter.__init__");
+ __pyx_r = -1;
+ __pyx_L0:;
+ Py_DECREF(__pyx_v_node);
+ Py_DECREF(__pyx_v_self);
+ Py_DECREF(__pyx_v_known_graph);
+ Py_DECREF(__pyx_v_tip_key);
+ return __pyx_r;
+}
+
+static struct __pyx_obj_6bzrlib_16_known_graph_pyx__MergeSortNode *__pyx_f_6bzrlib_16_known_graph_pyx_12_MergeSorter__get_ms_node(struct __pyx_obj_6bzrlib_16_known_graph_pyx__MergeSorter *__pyx_v_self,struct __pyx_obj_6bzrlib_16_known_graph_pyx__KnownGraphNode *__pyx_v_node) {
+ struct __pyx_obj_6bzrlib_16_known_graph_pyx__MergeSortNode *__pyx_v_ms_node;
+ struct __pyx_obj_6bzrlib_16_known_graph_pyx__MergeSortNode *__pyx_r;
+ int __pyx_1;
+ PyObject *__pyx_2 = 0;
+ PyObject *__pyx_3 = 0;
+ Py_INCREF(__pyx_v_self);
+ Py_INCREF(__pyx_v_node);
+ __pyx_v_ms_node = ((struct __pyx_obj_6bzrlib_16_known_graph_pyx__MergeSortNode *)Py_None); Py_INCREF(Py_None);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_known_graph_pyx.pyx":761 */
+ __pyx_1 = __pyx_v_node->extra == Py_None;
+ if (__pyx_1) {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_known_graph_pyx.pyx":762 */
+ __pyx_2 = PyTuple_New(1); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 762; goto __pyx_L1;}
+ Py_INCREF(__pyx_v_node->key);
+ PyTuple_SET_ITEM(__pyx_2, 0, __pyx_v_node->key);
+ __pyx_3 = PyObject_CallObject(((PyObject *)__pyx_ptype_6bzrlib_16_known_graph_pyx__MergeSortNode), __pyx_2); if (!__pyx_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 762; goto __pyx_L1;}
+ Py_DECREF(__pyx_2); __pyx_2 = 0;
+ Py_DECREF(((PyObject *)__pyx_v_ms_node));
+ __pyx_v_ms_node = ((struct __pyx_obj_6bzrlib_16_known_graph_pyx__MergeSortNode *)__pyx_3);
+ __pyx_3 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_known_graph_pyx.pyx":763 */
+ Py_INCREF(((PyObject *)__pyx_v_ms_node));
+ Py_DECREF(__pyx_v_node->extra);
+ __pyx_v_node->extra = ((PyObject *)__pyx_v_ms_node);
+ goto __pyx_L2;
+ }
+ /*else*/ {
+ Py_INCREF(__pyx_v_node->extra);
+ Py_DECREF(((PyObject *)__pyx_v_ms_node));
+ __pyx_v_ms_node = ((struct __pyx_obj_6bzrlib_16_known_graph_pyx__MergeSortNode *)__pyx_v_node->extra);
+ }
+ __pyx_L2:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_known_graph_pyx.pyx":766 */
+ Py_INCREF(((PyObject *)__pyx_v_ms_node));
+ __pyx_r = __pyx_v_ms_node;
+ goto __pyx_L0;
+
+ __pyx_r = ((struct __pyx_obj_6bzrlib_16_known_graph_pyx__MergeSortNode *)Py_None); Py_INCREF(Py_None);
+ goto __pyx_L0;
+ __pyx_L1:;
+ Py_XDECREF(__pyx_2);
+ Py_XDECREF(__pyx_3);
+ __Pyx_AddTraceback("bzrlib._known_graph_pyx._MergeSorter._get_ms_node");
+ __pyx_r = 0;
+ __pyx_L0:;
+ Py_DECREF(__pyx_v_ms_node);
+ Py_DECREF(__pyx_v_self);
+ Py_DECREF(__pyx_v_node);
+ return __pyx_r;
+}
+
+static PyObject *__pyx_f_6bzrlib_16_known_graph_pyx_12_MergeSorter__push_node(struct __pyx_obj_6bzrlib_16_known_graph_pyx__MergeSorter *__pyx_v_self,struct __pyx_obj_6bzrlib_16_known_graph_pyx__KnownGraphNode *__pyx_v_node,long __pyx_v_merge_depth) {
+ struct __pyx_obj_6bzrlib_16_known_graph_pyx__KnownGraphNode *__pyx_v_parent_node;
+ struct __pyx_obj_6bzrlib_16_known_graph_pyx__MergeSortNode *__pyx_v_ms_node;
+ struct __pyx_obj_6bzrlib_16_known_graph_pyx__MergeSortNode *__pyx_v_ms_parent_node;
+ Py_ssize_t __pyx_v_pos;
+ PyObject *__pyx_r;
+ PyObject *__pyx_1 = 0;
+ int __pyx_2;
+ PyObject *__pyx_3 = 0;
+ Py_ssize_t __pyx_4;
+ Py_INCREF(__pyx_v_self);
+ Py_INCREF(__pyx_v_node);
+ __pyx_v_parent_node = ((struct __pyx_obj_6bzrlib_16_known_graph_pyx__KnownGraphNode *)Py_None); Py_INCREF(Py_None);
+ __pyx_v_ms_node = ((struct __pyx_obj_6bzrlib_16_known_graph_pyx__MergeSortNode *)Py_None); Py_INCREF(Py_None);
+ __pyx_v_ms_parent_node = ((struct __pyx_obj_6bzrlib_16_known_graph_pyx__MergeSortNode *)Py_None); Py_INCREF(Py_None);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_known_graph_pyx.pyx":773 */
+ __pyx_1 = ((PyObject *)((struct __pyx_vtabstruct_6bzrlib_16_known_graph_pyx__MergeSorter *)__pyx_v_self->__pyx_vtab)->_get_ms_node(__pyx_v_self,__pyx_v_node)); if (!__pyx_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 773; goto __pyx_L1;}
+ Py_DECREF(((PyObject *)__pyx_v_ms_node));
+ __pyx_v_ms_node = ((struct __pyx_obj_6bzrlib_16_known_graph_pyx__MergeSortNode *)__pyx_1);
+ __pyx_1 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_known_graph_pyx.pyx":774 */
+ __pyx_v_ms_node->merge_depth = __pyx_v_merge_depth;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_known_graph_pyx.pyx":775 */
+ __pyx_2 = __pyx_v_node->parents == Py_None;
+ if (__pyx_2) {
+ __pyx_1 = PyTuple_New(1); if (!__pyx_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 777; goto __pyx_L1;}
+ Py_INCREF(((PyObject *)__pyx_v_node));
+ PyTuple_SET_ITEM(__pyx_1, 0, ((PyObject *)__pyx_v_node));
+ __pyx_3 = PyNumber_Remainder(__pyx_k31p, __pyx_1); if (!__pyx_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 777; goto __pyx_L1;}
+ Py_DECREF(__pyx_1); __pyx_1 = 0;
+ __pyx_1 = PyTuple_New(1); if (!__pyx_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 776; goto __pyx_L1;}
+ PyTuple_SET_ITEM(__pyx_1, 0, __pyx_3);
+ __pyx_3 = 0;
+ __pyx_3 = PyObject_CallObject(PyExc_RuntimeError, __pyx_1); if (!__pyx_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 776; goto __pyx_L1;}
+ Py_DECREF(__pyx_1); __pyx_1 = 0;
+ __Pyx_Raise(__pyx_3, 0, 0);
+ Py_DECREF(__pyx_3); __pyx_3 = 0;
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 776; goto __pyx_L1;}
+ goto __pyx_L2;
+ }
+ __pyx_L2:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_known_graph_pyx.pyx":778 */
+ __pyx_2 = (PyTuple_GET_SIZE(__pyx_v_node->parents) > 0);
+ if (__pyx_2) {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_known_graph_pyx.pyx":779 */
+ __pyx_1 = ((PyObject *)__pyx_f_6bzrlib_16_known_graph_pyx__get_tuple_node(__pyx_v_node->parents,0)); if (!__pyx_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 779; goto __pyx_L1;}
+ Py_DECREF(((PyObject *)__pyx_v_parent_node));
+ __pyx_v_parent_node = ((struct __pyx_obj_6bzrlib_16_known_graph_pyx__KnownGraphNode *)__pyx_1);
+ __pyx_1 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_known_graph_pyx.pyx":780 */
+ Py_INCREF(((PyObject *)__pyx_v_parent_node));
+ Py_DECREF(((PyObject *)__pyx_v_ms_node->left_parent));
+ __pyx_v_ms_node->left_parent = __pyx_v_parent_node;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_known_graph_pyx.pyx":781 */
+ __pyx_2 = __pyx_v_parent_node->parents == Py_None;
+ if (__pyx_2) {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_known_graph_pyx.pyx":782 */
+ Py_INCREF(Py_None);
+ Py_DECREF(((PyObject *)__pyx_v_ms_node->left_pending_parent));
+ __pyx_v_ms_node->left_pending_parent = ((struct __pyx_obj_6bzrlib_16_known_graph_pyx__KnownGraphNode *)Py_None);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_known_graph_pyx.pyx":783 */
+ Py_INCREF(Py_None);
+ Py_DECREF(((PyObject *)__pyx_v_ms_node->left_parent));
+ __pyx_v_ms_node->left_parent = ((struct __pyx_obj_6bzrlib_16_known_graph_pyx__KnownGraphNode *)Py_None);
+ goto __pyx_L4;
+ }
+ /*else*/ {
+ Py_INCREF(((PyObject *)__pyx_v_parent_node));
+ Py_DECREF(((PyObject *)__pyx_v_ms_node->left_pending_parent));
+ __pyx_v_ms_node->left_pending_parent = __pyx_v_parent_node;
+ }
+ __pyx_L4:;
+ goto __pyx_L3;
+ }
+ __pyx_L3:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_known_graph_pyx.pyx":786 */
+ __pyx_2 = (PyTuple_GET_SIZE(__pyx_v_node->parents) > 1);
+ if (__pyx_2) {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_known_graph_pyx.pyx":787 */
+ __pyx_3 = PyList_New(0); if (!__pyx_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 787; goto __pyx_L1;}
+ Py_DECREF(__pyx_v_ms_node->pending_parents);
+ __pyx_v_ms_node->pending_parents = __pyx_3;
+ __pyx_3 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_known_graph_pyx.pyx":788 */
+ __pyx_4 = PyTuple_GET_SIZE(__pyx_v_node->parents);
+ for (__pyx_v_pos = 1; __pyx_v_pos < __pyx_4; ++__pyx_v_pos) {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_known_graph_pyx.pyx":789 */
+ __pyx_1 = ((PyObject *)__pyx_f_6bzrlib_16_known_graph_pyx__get_tuple_node(__pyx_v_node->parents,__pyx_v_pos)); if (!__pyx_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 789; goto __pyx_L1;}
+ Py_DECREF(((PyObject *)__pyx_v_parent_node));
+ __pyx_v_parent_node = ((struct __pyx_obj_6bzrlib_16_known_graph_pyx__KnownGraphNode *)__pyx_1);
+ __pyx_1 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_known_graph_pyx.pyx":790 */
+ __pyx_2 = __pyx_v_parent_node->parents == Py_None;
+ if (__pyx_2) {
+ goto __pyx_L6;
+ goto __pyx_L8;
+ }
+ __pyx_L8:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_known_graph_pyx.pyx":792 */
+ __pyx_2 = PyList_Append(__pyx_v_ms_node->pending_parents,((PyObject *)__pyx_v_parent_node)); if (__pyx_2 == (-1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 792; goto __pyx_L1;}
+ __pyx_L6:;
+ }
+ goto __pyx_L5;
+ }
+ __pyx_L5:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_known_graph_pyx.pyx":794 */
+ __pyx_v_ms_node->is_first_child = 1;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_known_graph_pyx.pyx":795 */
+ __pyx_2 = ((PyObject *)__pyx_v_ms_node->left_parent) != Py_None;
+ if (__pyx_2) {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_known_graph_pyx.pyx":796 */
+ __pyx_3 = ((PyObject *)((struct __pyx_vtabstruct_6bzrlib_16_known_graph_pyx__MergeSorter *)__pyx_v_self->__pyx_vtab)->_get_ms_node(__pyx_v_self,__pyx_v_ms_node->left_parent)); if (!__pyx_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 796; goto __pyx_L1;}
+ Py_DECREF(((PyObject *)__pyx_v_ms_parent_node));
+ __pyx_v_ms_parent_node = ((struct __pyx_obj_6bzrlib_16_known_graph_pyx__MergeSortNode *)__pyx_3);
+ __pyx_3 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_known_graph_pyx.pyx":797 */
+ __pyx_2 = __pyx_v_ms_parent_node->seen_by_child;
+ if (__pyx_2) {
+ __pyx_v_ms_node->is_first_child = 0;
+ goto __pyx_L10;
+ }
+ __pyx_L10:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_known_graph_pyx.pyx":799 */
+ __pyx_v_ms_parent_node->seen_by_child = 1;
+ goto __pyx_L9;
+ }
+ __pyx_L9:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_known_graph_pyx.pyx":800 */
+ __pyx_v_self->_last_stack_item = (__pyx_v_self->_last_stack_item + 1);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_known_graph_pyx.pyx":801 */
+ __pyx_2 = (__pyx_v_self->_last_stack_item < PyList_GET_SIZE(__pyx_v_self->_depth_first_stack));
+ if (__pyx_2) {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_known_graph_pyx.pyx":802 */
+ Py_INCREF(((PyObject *)__pyx_v_node));
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_known_graph_pyx.pyx":803 */
+ __pyx_2 = PyList_SetItem(__pyx_v_self->_depth_first_stack,__pyx_v_self->_last_stack_item,((PyObject *)__pyx_v_node)); if (__pyx_2 == (-1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 803; goto __pyx_L1;}
+ goto __pyx_L11;
+ }
+ /*else*/ {
+ __pyx_2 = PyList_Append(__pyx_v_self->_depth_first_stack,((PyObject *)__pyx_v_node)); if (__pyx_2 == (-1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 806; goto __pyx_L1;}
+ }
+ __pyx_L11:;
+
+ __pyx_r = Py_None; Py_INCREF(Py_None);
+ goto __pyx_L0;
+ __pyx_L1:;
+ Py_XDECREF(__pyx_1);
+ Py_XDECREF(__pyx_3);
+ __Pyx_AddTraceback("bzrlib._known_graph_pyx._MergeSorter._push_node");
+ __pyx_r = 0;
+ __pyx_L0:;
+ Py_DECREF(__pyx_v_parent_node);
+ Py_DECREF(__pyx_v_ms_node);
+ Py_DECREF(__pyx_v_ms_parent_node);
+ Py_DECREF(__pyx_v_self);
+ Py_DECREF(__pyx_v_node);
+ return __pyx_r;
+}
+
+static PyObject *__pyx_f_6bzrlib_16_known_graph_pyx_12_MergeSorter__pop_node(struct __pyx_obj_6bzrlib_16_known_graph_pyx__MergeSorter *__pyx_v_self) {
+ PyObject *__pyx_v_temp;
+ struct __pyx_obj_6bzrlib_16_known_graph_pyx__MergeSortNode *__pyx_v_ms_node;
+ struct __pyx_obj_6bzrlib_16_known_graph_pyx__MergeSortNode *__pyx_v_ms_parent_node;
+ struct __pyx_obj_6bzrlib_16_known_graph_pyx__MergeSortNode *__pyx_v_ms_prev_node;
+ struct __pyx_obj_6bzrlib_16_known_graph_pyx__KnownGraphNode *__pyx_v_node;
+ struct __pyx_obj_6bzrlib_16_known_graph_pyx__KnownGraphNode *__pyx_v_prev_node;
+ PyObject *__pyx_v_base_revno;
+ PyObject *__pyx_v_branch_count;
+ PyObject *__pyx_v_root_count;
+ PyObject *__pyx_r;
+ PyObject *__pyx_1 = 0;
+ int __pyx_2;
+ PyObject *__pyx_3 = 0;
+ long __pyx_4;
+ Py_INCREF(__pyx_v_self);
+ __pyx_v_ms_node = ((struct __pyx_obj_6bzrlib_16_known_graph_pyx__MergeSortNode *)Py_None); Py_INCREF(Py_None);
+ __pyx_v_ms_parent_node = ((struct __pyx_obj_6bzrlib_16_known_graph_pyx__MergeSortNode *)Py_None); Py_INCREF(Py_None);
+ __pyx_v_ms_prev_node = ((struct __pyx_obj_6bzrlib_16_known_graph_pyx__MergeSortNode *)Py_None); Py_INCREF(Py_None);
+ __pyx_v_node = ((struct __pyx_obj_6bzrlib_16_known_graph_pyx__KnownGraphNode *)Py_None); Py_INCREF(Py_None);
+ __pyx_v_prev_node = ((struct __pyx_obj_6bzrlib_16_known_graph_pyx__KnownGraphNode *)Py_None); Py_INCREF(Py_None);
+ __pyx_v_base_revno = Py_None; Py_INCREF(Py_None);
+ __pyx_v_branch_count = Py_None; Py_INCREF(Py_None);
+ __pyx_v_root_count = Py_None; Py_INCREF(Py_None);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_known_graph_pyx.pyx":813 */
+ __pyx_1 = ((PyObject *)__pyx_f_6bzrlib_16_known_graph_pyx__get_list_node(__pyx_v_self->_depth_first_stack,__pyx_v_self->_last_stack_item)); if (!__pyx_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 813; goto __pyx_L1;}
+ Py_DECREF(((PyObject *)__pyx_v_node));
+ __pyx_v_node = ((struct __pyx_obj_6bzrlib_16_known_graph_pyx__KnownGraphNode *)__pyx_1);
+ __pyx_1 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_known_graph_pyx.pyx":814 */
+ Py_INCREF(__pyx_v_node->extra);
+ Py_DECREF(((PyObject *)__pyx_v_ms_node));
+ __pyx_v_ms_node = ((struct __pyx_obj_6bzrlib_16_known_graph_pyx__MergeSortNode *)__pyx_v_node->extra);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_known_graph_pyx.pyx":815 */
+ __pyx_v_self->_last_stack_item = (__pyx_v_self->_last_stack_item - 1);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_known_graph_pyx.pyx":816 */
+ __pyx_2 = ((PyObject *)__pyx_v_ms_node->left_parent) != Py_None;
+ if (__pyx_2) {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_known_graph_pyx.pyx":818 */
+ Py_INCREF(__pyx_v_ms_node->left_parent->extra);
+ Py_DECREF(((PyObject *)__pyx_v_ms_parent_node));
+ __pyx_v_ms_parent_node = ((struct __pyx_obj_6bzrlib_16_known_graph_pyx__MergeSortNode *)__pyx_v_ms_node->left_parent->extra);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_known_graph_pyx.pyx":819 */
+ __pyx_2 = __pyx_v_ms_node->is_first_child;
+ if (__pyx_2) {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_known_graph_pyx.pyx":821 */
+ __pyx_v_ms_node->_revno_first = __pyx_v_ms_parent_node->_revno_first;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_known_graph_pyx.pyx":822 */
+ __pyx_v_ms_node->_revno_second = __pyx_v_ms_parent_node->_revno_second;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_known_graph_pyx.pyx":823 */
+ __pyx_v_ms_node->_revno_last = (__pyx_v_ms_parent_node->_revno_last + 1);
+ goto __pyx_L3;
+ }
+ /*else*/ {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_known_graph_pyx.pyx":827 */
+ __pyx_2 = (__pyx_v_ms_parent_node->_revno_first == (-1));
+ if (__pyx_2) {
+ __pyx_1 = PyInt_FromLong(__pyx_v_ms_parent_node->_revno_last); if (!__pyx_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 829; goto __pyx_L1;}
+ Py_DECREF(__pyx_v_base_revno);
+ __pyx_v_base_revno = __pyx_1;
+ __pyx_1 = 0;
+ goto __pyx_L4;
+ }
+ /*else*/ {
+ __pyx_1 = PyInt_FromLong(__pyx_v_ms_parent_node->_revno_first); if (!__pyx_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 831; goto __pyx_L1;}
+ Py_DECREF(__pyx_v_base_revno);
+ __pyx_v_base_revno = __pyx_1;
+ __pyx_1 = 0;
+ }
+ __pyx_L4:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_known_graph_pyx.pyx":832 */
+ __pyx_v_temp = PyDict_GetItem(__pyx_v_self->_revno_to_branch_count,__pyx_v_base_revno);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_known_graph_pyx.pyx":834 */
+ __pyx_2 = (__pyx_v_temp == NULL);
+ if (__pyx_2) {
+ __pyx_1 = PyInt_FromLong(1); if (!__pyx_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 835; goto __pyx_L1;}
+ Py_DECREF(__pyx_v_branch_count);
+ __pyx_v_branch_count = __pyx_1;
+ __pyx_1 = 0;
+ goto __pyx_L5;
+ }
+ /*else*/ {
+ __pyx_1 = PyInt_FromLong(1); if (!__pyx_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 837; goto __pyx_L1;}
+ __pyx_3 = PyNumber_Add(((PyObject *)__pyx_v_temp), __pyx_1); if (!__pyx_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 837; goto __pyx_L1;}
+ Py_DECREF(__pyx_1); __pyx_1 = 0;
+ Py_DECREF(__pyx_v_branch_count);
+ __pyx_v_branch_count = __pyx_3;
+ __pyx_3 = 0;
+ }
+ __pyx_L5:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_known_graph_pyx.pyx":838 */
+ __pyx_2 = PyDict_SetItem(__pyx_v_self->_revno_to_branch_count,__pyx_v_base_revno,__pyx_v_branch_count); if (__pyx_2 == (-1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 838; goto __pyx_L1;}
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_known_graph_pyx.pyx":840 */
+ __pyx_4 = PyInt_AsLong(__pyx_v_base_revno); if (PyErr_Occurred()) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 840; goto __pyx_L1;}
+ __pyx_v_ms_node->_revno_first = __pyx_4;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_known_graph_pyx.pyx":841 */
+ __pyx_4 = PyInt_AsLong(__pyx_v_branch_count); if (PyErr_Occurred()) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 841; goto __pyx_L1;}
+ __pyx_v_ms_node->_revno_second = __pyx_4;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_known_graph_pyx.pyx":842 */
+ __pyx_v_ms_node->_revno_last = 1;
+ }
+ __pyx_L3:;
+ goto __pyx_L2;
+ }
+ /*else*/ {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_known_graph_pyx.pyx":844 */
+ __pyx_1 = PyInt_FromLong(0); if (!__pyx_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 844; goto __pyx_L1;}
+ __pyx_v_temp = PyDict_GetItem(__pyx_v_self->_revno_to_branch_count,__pyx_1);
+ Py_DECREF(__pyx_1); __pyx_1 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_known_graph_pyx.pyx":845 */
+ __pyx_2 = (__pyx_v_temp == NULL);
+ if (__pyx_2) {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_known_graph_pyx.pyx":847 */
+ __pyx_3 = PyInt_FromLong(0); if (!__pyx_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 847; goto __pyx_L1;}
+ Py_DECREF(__pyx_v_root_count);
+ __pyx_v_root_count = __pyx_3;
+ __pyx_3 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_known_graph_pyx.pyx":848 */
+ __pyx_v_ms_node->_revno_first = (-1);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_known_graph_pyx.pyx":849 */
+ __pyx_v_ms_node->_revno_second = (-1);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_known_graph_pyx.pyx":850 */
+ __pyx_v_ms_node->_revno_last = 1;
+ goto __pyx_L6;
+ }
+ /*else*/ {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_known_graph_pyx.pyx":852 */
+ __pyx_1 = PyInt_FromLong(1); if (!__pyx_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 852; goto __pyx_L1;}
+ __pyx_3 = PyNumber_Add(((PyObject *)__pyx_v_temp), __pyx_1); if (!__pyx_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 852; goto __pyx_L1;}
+ Py_DECREF(__pyx_1); __pyx_1 = 0;
+ Py_DECREF(__pyx_v_root_count);
+ __pyx_v_root_count = __pyx_3;
+ __pyx_3 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_known_graph_pyx.pyx":853 */
+ __pyx_v_ms_node->_revno_first = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_known_graph_pyx.pyx":854 */
+ __pyx_4 = PyInt_AsLong(__pyx_v_root_count); if (PyErr_Occurred()) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 854; goto __pyx_L1;}
+ __pyx_v_ms_node->_revno_second = __pyx_4;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_known_graph_pyx.pyx":855 */
+ __pyx_v_ms_node->_revno_last = 1;
+ }
+ __pyx_L6:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_known_graph_pyx.pyx":856 */
+ __pyx_1 = PyInt_FromLong(0); if (!__pyx_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 856; goto __pyx_L1;}
+ __pyx_2 = PyDict_SetItem(__pyx_v_self->_revno_to_branch_count,__pyx_1,__pyx_v_root_count); if (__pyx_2 == (-1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 856; goto __pyx_L1;}
+ Py_DECREF(__pyx_1); __pyx_1 = 0;
+ }
+ __pyx_L2:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_known_graph_pyx.pyx":857 */
+ __pyx_v_ms_node->completed = 1;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_known_graph_pyx.pyx":858 */
+ __pyx_2 = (PyList_GET_SIZE(__pyx_v_self->_scheduled_nodes) == 0);
+ if (__pyx_2) {
+ Py_INCREF(Py_True);
+ Py_DECREF(__pyx_v_ms_node->end_of_merge);
+ __pyx_v_ms_node->end_of_merge = Py_True;
+ goto __pyx_L7;
+ }
+ /*else*/ {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_known_graph_pyx.pyx":862 */
+ __pyx_3 = ((PyObject *)__pyx_f_6bzrlib_16_known_graph_pyx__get_list_node(__pyx_v_self->_scheduled_nodes,(PyList_GET_SIZE(__pyx_v_self->_scheduled_nodes) - 1))); if (!__pyx_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 862; goto __pyx_L1;}
+ Py_DECREF(((PyObject *)__pyx_v_prev_node));
+ __pyx_v_prev_node = ((struct __pyx_obj_6bzrlib_16_known_graph_pyx__KnownGraphNode *)__pyx_3);
+ __pyx_3 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_known_graph_pyx.pyx":864 */
+ Py_INCREF(__pyx_v_prev_node->extra);
+ Py_DECREF(((PyObject *)__pyx_v_ms_prev_node));
+ __pyx_v_ms_prev_node = ((struct __pyx_obj_6bzrlib_16_known_graph_pyx__MergeSortNode *)__pyx_v_prev_node->extra);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_known_graph_pyx.pyx":865 */
+ __pyx_2 = (__pyx_v_ms_prev_node->merge_depth < __pyx_v_ms_node->merge_depth);
+ if (__pyx_2) {
+ Py_INCREF(Py_True);
+ Py_DECREF(__pyx_v_ms_node->end_of_merge);
+ __pyx_v_ms_node->end_of_merge = Py_True;
+ goto __pyx_L8;
+ }
+ __pyx_2 = (__pyx_v_ms_prev_node->merge_depth == __pyx_v_ms_node->merge_depth);
+ if (__pyx_2) {
+ __pyx_2 = PySequence_Contains(__pyx_v_node->parents, ((PyObject *)__pyx_v_prev_node)); if (__pyx_2 < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 870; goto __pyx_L1;}
+ __pyx_2 = !__pyx_2;
+ }
+ if (__pyx_2) {
+ Py_INCREF(Py_True);
+ Py_DECREF(__pyx_v_ms_node->end_of_merge);
+ __pyx_v_ms_node->end_of_merge = Py_True;
+ goto __pyx_L8;
+ }
+ /*else*/ {
+ Py_INCREF(Py_False);
+ Py_DECREF(__pyx_v_ms_node->end_of_merge);
+ __pyx_v_ms_node->end_of_merge = Py_False;
+ }
+ __pyx_L8:;
+ }
+ __pyx_L7:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_known_graph_pyx.pyx":875 */
+ __pyx_2 = PyList_Append(__pyx_v_self->_scheduled_nodes,((PyObject *)__pyx_v_node)); if (__pyx_2 == (-1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 875; goto __pyx_L1;}
+
+ __pyx_r = Py_None; Py_INCREF(Py_None);
+ goto __pyx_L0;
+ __pyx_L1:;
+ Py_XDECREF(__pyx_1);
+ Py_XDECREF(__pyx_3);
+ __Pyx_AddTraceback("bzrlib._known_graph_pyx._MergeSorter._pop_node");
+ __pyx_r = 0;
+ __pyx_L0:;
+ Py_DECREF(__pyx_v_ms_node);
+ Py_DECREF(__pyx_v_ms_parent_node);
+ Py_DECREF(__pyx_v_ms_prev_node);
+ Py_DECREF(__pyx_v_node);
+ Py_DECREF(__pyx_v_prev_node);
+ Py_DECREF(__pyx_v_base_revno);
+ Py_DECREF(__pyx_v_branch_count);
+ Py_DECREF(__pyx_v_root_count);
+ Py_DECREF(__pyx_v_self);
+ return __pyx_r;
+}
+
+static PyObject *__pyx_f_6bzrlib_16_known_graph_pyx_12_MergeSorter__schedule_stack(struct __pyx_obj_6bzrlib_16_known_graph_pyx__MergeSorter *__pyx_v_self) {
+ struct __pyx_obj_6bzrlib_16_known_graph_pyx__KnownGraphNode *__pyx_v_last_node;
+ struct __pyx_obj_6bzrlib_16_known_graph_pyx__KnownGraphNode *__pyx_v_next_node;
+ struct __pyx_obj_6bzrlib_16_known_graph_pyx__MergeSortNode *__pyx_v_ms_last_node;
+ struct __pyx_obj_6bzrlib_16_known_graph_pyx__MergeSortNode *__pyx_v_ms_next_node;
+ long __pyx_v_next_merge_depth;
+ PyObject *__pyx_v_ordered;
+ PyObject *__pyx_r;
+ PyObject *__pyx_1 = 0;
+ int __pyx_2;
+ PyObject *__pyx_3 = 0;
+ PyObject *__pyx_4 = 0;
+ Py_INCREF(__pyx_v_self);
+ __pyx_v_last_node = ((struct __pyx_obj_6bzrlib_16_known_graph_pyx__KnownGraphNode *)Py_None); Py_INCREF(Py_None);
+ __pyx_v_next_node = ((struct __pyx_obj_6bzrlib_16_known_graph_pyx__KnownGraphNode *)Py_None); Py_INCREF(Py_None);
+ __pyx_v_ms_last_node = ((struct __pyx_obj_6bzrlib_16_known_graph_pyx__MergeSortNode *)Py_None); Py_INCREF(Py_None);
+ __pyx_v_ms_next_node = ((struct __pyx_obj_6bzrlib_16_known_graph_pyx__MergeSortNode *)Py_None); Py_INCREF(Py_None);
+ __pyx_v_ordered = Py_None; Py_INCREF(Py_None);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_known_graph_pyx.pyx":881 */
+ __pyx_1 = PyList_New(0); if (!__pyx_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 881; goto __pyx_L1;}
+ Py_DECREF(__pyx_v_ordered);
+ __pyx_v_ordered = __pyx_1;
+ __pyx_1 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_known_graph_pyx.pyx":882 */
+ while (1) {
+ __pyx_2 = (__pyx_v_self->_last_stack_item >= 0);
+ if (!__pyx_2) break;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_known_graph_pyx.pyx":884 */
+ __pyx_1 = ((PyObject *)__pyx_f_6bzrlib_16_known_graph_pyx__get_list_node(__pyx_v_self->_depth_first_stack,__pyx_v_self->_last_stack_item)); if (!__pyx_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 884; goto __pyx_L1;}
+ Py_DECREF(((PyObject *)__pyx_v_last_node));
+ __pyx_v_last_node = ((struct __pyx_obj_6bzrlib_16_known_graph_pyx__KnownGraphNode *)__pyx_1);
+ __pyx_1 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_known_graph_pyx.pyx":886 */
+ __pyx_2 = (__pyx_v_last_node->gdfo == (-1));
+ if (__pyx_2) {
+ __pyx_1 = __Pyx_GetName(__pyx_m, __pyx_n_errors); if (!__pyx_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 889; goto __pyx_L1;}
+ __pyx_3 = PyObject_GetAttr(__pyx_1, __pyx_n_GraphCycleError); if (!__pyx_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 889; goto __pyx_L1;}
+ Py_DECREF(__pyx_1); __pyx_1 = 0;
+ __pyx_1 = PyTuple_New(1); if (!__pyx_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 889; goto __pyx_L1;}
+ Py_INCREF(__pyx_v_self->graph->_nodes);
+ PyTuple_SET_ITEM(__pyx_1, 0, __pyx_v_self->graph->_nodes);
+ __pyx_4 = PyObject_CallObject(__pyx_3, __pyx_1); if (!__pyx_4) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 889; goto __pyx_L1;}
+ Py_DECREF(__pyx_3); __pyx_3 = 0;
+ Py_DECREF(__pyx_1); __pyx_1 = 0;
+ __Pyx_Raise(__pyx_4, 0, 0);
+ Py_DECREF(__pyx_4); __pyx_4 = 0;
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 889; goto __pyx_L1;}
+ goto __pyx_L4;
+ }
+ __pyx_L4:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_known_graph_pyx.pyx":890 */
+ Py_INCREF(__pyx_v_last_node->extra);
+ Py_DECREF(((PyObject *)__pyx_v_ms_last_node));
+ __pyx_v_ms_last_node = ((struct __pyx_obj_6bzrlib_16_known_graph_pyx__MergeSortNode *)__pyx_v_last_node->extra);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_known_graph_pyx.pyx":891 */
+ __pyx_2 = (!((struct __pyx_vtabstruct_6bzrlib_16_known_graph_pyx__MergeSortNode *)__pyx_v_ms_last_node->__pyx_vtab)->has_pending_parents(__pyx_v_ms_last_node));
+ if (__pyx_2) {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_known_graph_pyx.pyx":893 */
+ __pyx_3 = ((struct __pyx_vtabstruct_6bzrlib_16_known_graph_pyx__MergeSorter *)__pyx_v_self->__pyx_vtab)->_pop_node(__pyx_v_self); if (!__pyx_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 893; goto __pyx_L1;}
+ Py_DECREF(__pyx_3); __pyx_3 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_known_graph_pyx.pyx":894 */
+ goto __pyx_L2;
+ goto __pyx_L5;
+ }
+ __pyx_L5:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_known_graph_pyx.pyx":895 */
+ while (1) {
+ __pyx_2 = ((struct __pyx_vtabstruct_6bzrlib_16_known_graph_pyx__MergeSortNode *)__pyx_v_ms_last_node->__pyx_vtab)->has_pending_parents(__pyx_v_ms_last_node);
+ if (!__pyx_2) break;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_known_graph_pyx.pyx":896 */
+ __pyx_2 = ((PyObject *)__pyx_v_ms_last_node->left_pending_parent) != Py_None;
+ if (__pyx_2) {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_known_graph_pyx.pyx":898 */
+ Py_INCREF(((PyObject *)__pyx_v_ms_last_node->left_pending_parent));
+ Py_DECREF(((PyObject *)__pyx_v_next_node));
+ __pyx_v_next_node = __pyx_v_ms_last_node->left_pending_parent;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_known_graph_pyx.pyx":899 */
+ Py_INCREF(Py_None);
+ Py_DECREF(((PyObject *)__pyx_v_ms_last_node->left_pending_parent));
+ __pyx_v_ms_last_node->left_pending_parent = ((struct __pyx_obj_6bzrlib_16_known_graph_pyx__KnownGraphNode *)Py_None);
+ goto __pyx_L8;
+ }
+ /*else*/ {
+ __pyx_1 = PyObject_GetAttr(__pyx_v_ms_last_node->pending_parents, __pyx_n_pop); if (!__pyx_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 908; goto __pyx_L1;}
+ __pyx_4 = PyObject_CallObject(__pyx_1, 0); if (!__pyx_4) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 908; goto __pyx_L1;}
+ Py_DECREF(__pyx_1); __pyx_1 = 0;
+ if (!__Pyx_TypeTest(__pyx_4, __pyx_ptype_6bzrlib_16_known_graph_pyx__KnownGraphNode)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 908; goto __pyx_L1;}
+ Py_DECREF(((PyObject *)__pyx_v_next_node));
+ __pyx_v_next_node = ((struct __pyx_obj_6bzrlib_16_known_graph_pyx__KnownGraphNode *)__pyx_4);
+ __pyx_4 = 0;
+ }
+ __pyx_L8:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_known_graph_pyx.pyx":909 */
+ __pyx_3 = ((PyObject *)((struct __pyx_vtabstruct_6bzrlib_16_known_graph_pyx__MergeSorter *)__pyx_v_self->__pyx_vtab)->_get_ms_node(__pyx_v_self,__pyx_v_next_node)); if (!__pyx_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 909; goto __pyx_L1;}
+ Py_DECREF(((PyObject *)__pyx_v_ms_next_node));
+ __pyx_v_ms_next_node = ((struct __pyx_obj_6bzrlib_16_known_graph_pyx__MergeSortNode *)__pyx_3);
+ __pyx_3 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_known_graph_pyx.pyx":910 */
+ __pyx_2 = __pyx_v_ms_next_node->completed;
+ if (__pyx_2) {
+ goto __pyx_L6;
+ goto __pyx_L9;
+ }
+ __pyx_L9:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_known_graph_pyx.pyx":917 */
+ __pyx_2 = __pyx_v_next_node == __pyx_v_ms_last_node->left_parent;
+ if (__pyx_2) {
+ __pyx_v_next_merge_depth = __pyx_v_ms_last_node->merge_depth;
+ goto __pyx_L10;
+ }
+ /*else*/ {
+ __pyx_v_next_merge_depth = (__pyx_v_ms_last_node->merge_depth + 1);
+ }
+ __pyx_L10:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_known_graph_pyx.pyx":921 */
+ __pyx_1 = ((struct __pyx_vtabstruct_6bzrlib_16_known_graph_pyx__MergeSorter *)__pyx_v_self->__pyx_vtab)->_push_node(__pyx_v_self,__pyx_v_next_node,__pyx_v_next_merge_depth); if (!__pyx_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 921; goto __pyx_L1;}
+ Py_DECREF(__pyx_1); __pyx_1 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_known_graph_pyx.pyx":924 */
+ goto __pyx_L7;
+ __pyx_L6:;
+ }
+ __pyx_L7:;
+ __pyx_L2:;
+ }
+
+ __pyx_r = Py_None; Py_INCREF(Py_None);
+ goto __pyx_L0;
+ __pyx_L1:;
+ Py_XDECREF(__pyx_1);
+ Py_XDECREF(__pyx_3);
+ Py_XDECREF(__pyx_4);
+ __Pyx_AddTraceback("bzrlib._known_graph_pyx._MergeSorter._schedule_stack");
+ __pyx_r = 0;
+ __pyx_L0:;
+ Py_DECREF(__pyx_v_last_node);
+ Py_DECREF(__pyx_v_next_node);
+ Py_DECREF(__pyx_v_ms_last_node);
+ Py_DECREF(__pyx_v_ms_next_node);
+ Py_DECREF(__pyx_v_ordered);
+ Py_DECREF(__pyx_v_self);
+ return __pyx_r;
+}
+
+static PyObject *__pyx_f_6bzrlib_16_known_graph_pyx_12_MergeSorter_topo_order(struct __pyx_obj_6bzrlib_16_known_graph_pyx__MergeSorter *__pyx_v_self) {
+ struct __pyx_obj_6bzrlib_16_known_graph_pyx__MergeSortNode *__pyx_v_ms_node;
+ struct __pyx_obj_6bzrlib_16_known_graph_pyx__KnownGraphNode *__pyx_v_node;
+ Py_ssize_t __pyx_v_pos;
+ PyObject *__pyx_v_ordered;
+ PyObject *__pyx_r;
+ PyObject *__pyx_1 = 0;
+ int __pyx_2;
+ Py_INCREF(__pyx_v_self);
+ __pyx_v_ms_node = ((struct __pyx_obj_6bzrlib_16_known_graph_pyx__MergeSortNode *)Py_None); Py_INCREF(Py_None);
+ __pyx_v_node = ((struct __pyx_obj_6bzrlib_16_known_graph_pyx__KnownGraphNode *)Py_None); Py_INCREF(Py_None);
+ __pyx_v_ordered = Py_None; Py_INCREF(Py_None);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_known_graph_pyx.pyx":936 */
+ __pyx_1 = ((struct __pyx_vtabstruct_6bzrlib_16_known_graph_pyx__MergeSorter *)__pyx_v_self->__pyx_vtab)->_schedule_stack(__pyx_v_self); if (!__pyx_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 936; goto __pyx_L1;}
+ Py_DECREF(__pyx_1); __pyx_1 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_known_graph_pyx.pyx":946 */
+ __pyx_1 = PyList_New(0); if (!__pyx_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 946; goto __pyx_L1;}
+ Py_DECREF(__pyx_v_ordered);
+ __pyx_v_ordered = __pyx_1;
+ __pyx_1 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_known_graph_pyx.pyx":948 */
+ for (__pyx_v_pos = PyList_GET_SIZE(__pyx_v_self->_scheduled_nodes)-1; __pyx_v_pos >= 0; --__pyx_v_pos) {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_known_graph_pyx.pyx":949 */
+ __pyx_1 = ((PyObject *)__pyx_f_6bzrlib_16_known_graph_pyx__get_list_node(__pyx_v_self->_scheduled_nodes,__pyx_v_pos)); if (!__pyx_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 949; goto __pyx_L1;}
+ Py_DECREF(((PyObject *)__pyx_v_node));
+ __pyx_v_node = ((struct __pyx_obj_6bzrlib_16_known_graph_pyx__KnownGraphNode *)__pyx_1);
+ __pyx_1 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_known_graph_pyx.pyx":950 */
+ Py_INCREF(__pyx_v_node->extra);
+ Py_DECREF(((PyObject *)__pyx_v_ms_node));
+ __pyx_v_ms_node = ((struct __pyx_obj_6bzrlib_16_known_graph_pyx__MergeSortNode *)__pyx_v_node->extra);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_known_graph_pyx.pyx":951 */
+ __pyx_2 = PyList_Append(__pyx_v_ordered,((PyObject *)__pyx_v_ms_node)); if (__pyx_2 == (-1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 951; goto __pyx_L1;}
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_known_graph_pyx.pyx":952 */
+ Py_INCREF(Py_None);
+ Py_DECREF(__pyx_v_node->extra);
+ __pyx_v_node->extra = Py_None;
+ }
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_known_graph_pyx.pyx":954 */
+ __pyx_1 = PyList_New(0); if (!__pyx_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 954; goto __pyx_L1;}
+ Py_DECREF(__pyx_v_self->_scheduled_nodes);
+ __pyx_v_self->_scheduled_nodes = __pyx_1;
+ __pyx_1 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_known_graph_pyx.pyx":955 */
+ Py_INCREF(__pyx_v_ordered);
+ __pyx_r = __pyx_v_ordered;
+ goto __pyx_L0;
+
+ __pyx_r = Py_None; Py_INCREF(Py_None);
+ goto __pyx_L0;
+ __pyx_L1:;
+ Py_XDECREF(__pyx_1);
+ __Pyx_AddTraceback("bzrlib._known_graph_pyx._MergeSorter.topo_order");
+ __pyx_r = 0;
+ __pyx_L0:;
+ Py_DECREF(__pyx_v_ms_node);
+ Py_DECREF(__pyx_v_node);
+ Py_DECREF(__pyx_v_ordered);
+ Py_DECREF(__pyx_v_self);
+ return __pyx_r;
+}
+static struct __pyx_vtabstruct_6bzrlib_16_known_graph_pyx__KnownGraphNode __pyx_vtable_6bzrlib_16_known_graph_pyx__KnownGraphNode;
+
+static PyObject *__pyx_tp_new_6bzrlib_16_known_graph_pyx__KnownGraphNode(PyTypeObject *t, PyObject *a, PyObject *k) {
+ struct __pyx_obj_6bzrlib_16_known_graph_pyx__KnownGraphNode *p;
+ PyObject *o = (*t->tp_alloc)(t, 0);
+ if (!o) return 0;
+ p = ((struct __pyx_obj_6bzrlib_16_known_graph_pyx__KnownGraphNode *)o);
+ *(struct __pyx_vtabstruct_6bzrlib_16_known_graph_pyx__KnownGraphNode **)&p->__pyx_vtab = __pyx_vtabptr_6bzrlib_16_known_graph_pyx__KnownGraphNode;
+ p->key = Py_None; Py_INCREF(Py_None);
+ p->parents = Py_None; Py_INCREF(Py_None);
+ p->children = Py_None; Py_INCREF(Py_None);
+ p->extra = Py_None; Py_INCREF(Py_None);
+ return o;
+}
+
+static void __pyx_tp_dealloc_6bzrlib_16_known_graph_pyx__KnownGraphNode(PyObject *o) {
+ struct __pyx_obj_6bzrlib_16_known_graph_pyx__KnownGraphNode *p = (struct __pyx_obj_6bzrlib_16_known_graph_pyx__KnownGraphNode *)o;
+ Py_XDECREF(p->key);
+ Py_XDECREF(p->parents);
+ Py_XDECREF(p->children);
+ Py_XDECREF(p->extra);
+ (*o->ob_type->tp_free)(o);
+}
+
+static int __pyx_tp_traverse_6bzrlib_16_known_graph_pyx__KnownGraphNode(PyObject *o, visitproc v, void *a) {
+ int e;
+ struct __pyx_obj_6bzrlib_16_known_graph_pyx__KnownGraphNode *p = (struct __pyx_obj_6bzrlib_16_known_graph_pyx__KnownGraphNode *)o;
+ if (p->key) {
+ e = (*v)(p->key, a); if (e) return e;
+ }
+ if (p->parents) {
+ e = (*v)(p->parents, a); if (e) return e;
+ }
+ if (p->children) {
+ e = (*v)(p->children, a); if (e) return e;
+ }
+ if (p->extra) {
+ e = (*v)(p->extra, a); if (e) return e;
+ }
+ return 0;
+}
+
+static int __pyx_tp_clear_6bzrlib_16_known_graph_pyx__KnownGraphNode(PyObject *o) {
+ struct __pyx_obj_6bzrlib_16_known_graph_pyx__KnownGraphNode *p = (struct __pyx_obj_6bzrlib_16_known_graph_pyx__KnownGraphNode *)o;
+ PyObject *t;
+ t = p->key;
+ p->key = Py_None; Py_INCREF(Py_None);
+ Py_XDECREF(t);
+ t = p->parents;
+ p->parents = Py_None; Py_INCREF(Py_None);
+ Py_XDECREF(t);
+ t = p->children;
+ p->children = Py_None; Py_INCREF(Py_None);
+ Py_XDECREF(t);
+ t = p->extra;
+ p->extra = Py_None; Py_INCREF(Py_None);
+ Py_XDECREF(t);
+ return 0;
+}
+
+static PyObject *__pyx_getprop_6bzrlib_16_known_graph_pyx_15_KnownGraphNode_child_keys(PyObject *o, void *x) {
+ return __pyx_f_6bzrlib_16_known_graph_pyx_15_KnownGraphNode_10child_keys___get__(o);
+}
+
+static PyObject *__pyx_getprop_6bzrlib_16_known_graph_pyx_15_KnownGraphNode_parent_keys(PyObject *o, void *x) {
+ return __pyx_f_6bzrlib_16_known_graph_pyx_15_KnownGraphNode_11parent_keys___get__(o);
+}
+
+static struct PyMethodDef __pyx_methods_6bzrlib_16_known_graph_pyx__KnownGraphNode[] = {
+ {0, 0, 0, 0}
+};
+
+static struct PyMemberDef __pyx_members_6bzrlib_16_known_graph_pyx__KnownGraphNode[] = {
+ {"gdfo", T_LONG, offsetof(struct __pyx_obj_6bzrlib_16_known_graph_pyx__KnownGraphNode, gdfo), 0, 0},
+ {0, 0, 0, 0, 0}
+};
+
+static struct PyGetSetDef __pyx_getsets_6bzrlib_16_known_graph_pyx__KnownGraphNode[] = {
+ {"child_keys", __pyx_getprop_6bzrlib_16_known_graph_pyx_15_KnownGraphNode_child_keys, 0, 0, 0},
+ {"parent_keys", __pyx_getprop_6bzrlib_16_known_graph_pyx_15_KnownGraphNode_parent_keys, 0, 0, 0},
+ {0, 0, 0, 0, 0}
+};
+
+static PyNumberMethods __pyx_tp_as_number__KnownGraphNode = {
+ 0, /*nb_add*/
+ 0, /*nb_subtract*/
+ 0, /*nb_multiply*/
+ 0, /*nb_divide*/
+ 0, /*nb_remainder*/
+ 0, /*nb_divmod*/
+ 0, /*nb_power*/
+ 0, /*nb_negative*/
+ 0, /*nb_positive*/
+ 0, /*nb_absolute*/
+ 0, /*nb_nonzero*/
+ 0, /*nb_invert*/
+ 0, /*nb_lshift*/
+ 0, /*nb_rshift*/
+ 0, /*nb_and*/
+ 0, /*nb_xor*/
+ 0, /*nb_or*/
+ 0, /*nb_coerce*/
+ 0, /*nb_int*/
+ 0, /*nb_long*/
+ 0, /*nb_float*/
+ 0, /*nb_oct*/
+ 0, /*nb_hex*/
+ 0, /*nb_inplace_add*/
+ 0, /*nb_inplace_subtract*/
+ 0, /*nb_inplace_multiply*/
+ 0, /*nb_inplace_divide*/
+ 0, /*nb_inplace_remainder*/
+ 0, /*nb_inplace_power*/
+ 0, /*nb_inplace_lshift*/
+ 0, /*nb_inplace_rshift*/
+ 0, /*nb_inplace_and*/
+ 0, /*nb_inplace_xor*/
+ 0, /*nb_inplace_or*/
+ 0, /*nb_floor_divide*/
+ 0, /*nb_true_divide*/
+ 0, /*nb_inplace_floor_divide*/
+ 0, /*nb_inplace_true_divide*/
+ #if Py_TPFLAGS_DEFAULT & Py_TPFLAGS_HAVE_INDEX
+ 0, /*nb_index*/
+ #endif
+};
+
+static PySequenceMethods __pyx_tp_as_sequence__KnownGraphNode = {
+ 0, /*sq_length*/
+ 0, /*sq_concat*/
+ 0, /*sq_repeat*/
+ 0, /*sq_item*/
+ 0, /*sq_slice*/
+ 0, /*sq_ass_item*/
+ 0, /*sq_ass_slice*/
+ 0, /*sq_contains*/
+ 0, /*sq_inplace_concat*/
+ 0, /*sq_inplace_repeat*/
+};
+
+static PyMappingMethods __pyx_tp_as_mapping__KnownGraphNode = {
+ 0, /*mp_length*/
+ 0, /*mp_subscript*/
+ 0, /*mp_ass_subscript*/
+};
+
+static PyBufferProcs __pyx_tp_as_buffer__KnownGraphNode = {
+ 0, /*bf_getreadbuffer*/
+ 0, /*bf_getwritebuffer*/
+ 0, /*bf_getsegcount*/
+ 0, /*bf_getcharbuffer*/
+};
+
+PyTypeObject __pyx_type_6bzrlib_16_known_graph_pyx__KnownGraphNode = {
+ PyObject_HEAD_INIT(0)
+ 0, /*ob_size*/
+ "bzrlib._known_graph_pyx._KnownGraphNode", /*tp_name*/
+ sizeof(struct __pyx_obj_6bzrlib_16_known_graph_pyx__KnownGraphNode), /*tp_basicsize*/
+ 0, /*tp_itemsize*/
+ __pyx_tp_dealloc_6bzrlib_16_known_graph_pyx__KnownGraphNode, /*tp_dealloc*/
+ 0, /*tp_print*/
+ 0, /*tp_getattr*/
+ 0, /*tp_setattr*/
+ 0, /*tp_compare*/
+ __pyx_f_6bzrlib_16_known_graph_pyx_15_KnownGraphNode___repr__, /*tp_repr*/
+ &__pyx_tp_as_number__KnownGraphNode, /*tp_as_number*/
+ &__pyx_tp_as_sequence__KnownGraphNode, /*tp_as_sequence*/
+ &__pyx_tp_as_mapping__KnownGraphNode, /*tp_as_mapping*/
+ 0, /*tp_hash*/
+ 0, /*tp_call*/
+ 0, /*tp_str*/
+ 0, /*tp_getattro*/
+ 0, /*tp_setattro*/
+ &__pyx_tp_as_buffer__KnownGraphNode, /*tp_as_buffer*/
+ Py_TPFLAGS_DEFAULT|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC, /*tp_flags*/
+ "Represents a single object in the known graph.", /*tp_doc*/
+ __pyx_tp_traverse_6bzrlib_16_known_graph_pyx__KnownGraphNode, /*tp_traverse*/
+ __pyx_tp_clear_6bzrlib_16_known_graph_pyx__KnownGraphNode, /*tp_clear*/
+ 0, /*tp_richcompare*/
+ 0, /*tp_weaklistoffset*/
+ 0, /*tp_iter*/
+ 0, /*tp_iternext*/
+ __pyx_methods_6bzrlib_16_known_graph_pyx__KnownGraphNode, /*tp_methods*/
+ __pyx_members_6bzrlib_16_known_graph_pyx__KnownGraphNode, /*tp_members*/
+ __pyx_getsets_6bzrlib_16_known_graph_pyx__KnownGraphNode, /*tp_getset*/
+ 0, /*tp_base*/
+ 0, /*tp_dict*/
+ 0, /*tp_descr_get*/
+ 0, /*tp_descr_set*/
+ 0, /*tp_dictoffset*/
+ __pyx_f_6bzrlib_16_known_graph_pyx_15_KnownGraphNode___init__, /*tp_init*/
+ 0, /*tp_alloc*/
+ __pyx_tp_new_6bzrlib_16_known_graph_pyx__KnownGraphNode, /*tp_new*/
+ 0, /*tp_free*/
+ 0, /*tp_is_gc*/
+ 0, /*tp_bases*/
+ 0, /*tp_mro*/
+ 0, /*tp_cache*/
+ 0, /*tp_subclasses*/
+ 0, /*tp_weaklist*/
+};
+static struct __pyx_vtabstruct_6bzrlib_16_known_graph_pyx__MergeSorter __pyx_vtable_6bzrlib_16_known_graph_pyx__MergeSorter;
+
+static PyObject *__pyx_tp_new_6bzrlib_16_known_graph_pyx__MergeSorter(PyTypeObject *t, PyObject *a, PyObject *k) {
+ struct __pyx_obj_6bzrlib_16_known_graph_pyx__MergeSorter *p;
+ PyObject *o = (*t->tp_alloc)(t, 0);
+ if (!o) return 0;
+ p = ((struct __pyx_obj_6bzrlib_16_known_graph_pyx__MergeSorter *)o);
+ *(struct __pyx_vtabstruct_6bzrlib_16_known_graph_pyx__MergeSorter **)&p->__pyx_vtab = __pyx_vtabptr_6bzrlib_16_known_graph_pyx__MergeSorter;
+ p->graph = ((struct __pyx_obj_6bzrlib_16_known_graph_pyx_KnownGraph *)Py_None); Py_INCREF(Py_None);
+ p->_depth_first_stack = Py_None; Py_INCREF(Py_None);
+ p->_revno_to_branch_count = Py_None; Py_INCREF(Py_None);
+ p->_scheduled_nodes = Py_None; Py_INCREF(Py_None);
+ return o;
+}
+
+static void __pyx_tp_dealloc_6bzrlib_16_known_graph_pyx__MergeSorter(PyObject *o) {
+ struct __pyx_obj_6bzrlib_16_known_graph_pyx__MergeSorter *p = (struct __pyx_obj_6bzrlib_16_known_graph_pyx__MergeSorter *)o;
+ Py_XDECREF(((PyObject *)p->graph));
+ Py_XDECREF(p->_depth_first_stack);
+ Py_XDECREF(p->_revno_to_branch_count);
+ Py_XDECREF(p->_scheduled_nodes);
+ (*o->ob_type->tp_free)(o);
+}
+
+static int __pyx_tp_traverse_6bzrlib_16_known_graph_pyx__MergeSorter(PyObject *o, visitproc v, void *a) {
+ int e;
+ struct __pyx_obj_6bzrlib_16_known_graph_pyx__MergeSorter *p = (struct __pyx_obj_6bzrlib_16_known_graph_pyx__MergeSorter *)o;
+ if (p->graph) {
+ e = (*v)(((PyObject*)p->graph), a); if (e) return e;
+ }
+ if (p->_depth_first_stack) {
+ e = (*v)(p->_depth_first_stack, a); if (e) return e;
+ }
+ if (p->_revno_to_branch_count) {
+ e = (*v)(p->_revno_to_branch_count, a); if (e) return e;
+ }
+ if (p->_scheduled_nodes) {
+ e = (*v)(p->_scheduled_nodes, a); if (e) return e;
+ }
+ return 0;
+}
+
+static int __pyx_tp_clear_6bzrlib_16_known_graph_pyx__MergeSorter(PyObject *o) {
+ struct __pyx_obj_6bzrlib_16_known_graph_pyx__MergeSorter *p = (struct __pyx_obj_6bzrlib_16_known_graph_pyx__MergeSorter *)o;
+ PyObject *t;
+ t = ((PyObject *)p->graph);
+ p->graph = ((struct __pyx_obj_6bzrlib_16_known_graph_pyx_KnownGraph *)Py_None); Py_INCREF(Py_None);
+ Py_XDECREF(t);
+ t = p->_depth_first_stack;
+ p->_depth_first_stack = Py_None; Py_INCREF(Py_None);
+ Py_XDECREF(t);
+ t = p->_revno_to_branch_count;
+ p->_revno_to_branch_count = Py_None; Py_INCREF(Py_None);
+ Py_XDECREF(t);
+ t = p->_scheduled_nodes;
+ p->_scheduled_nodes = Py_None; Py_INCREF(Py_None);
+ Py_XDECREF(t);
+ return 0;
+}
+
+static struct PyMethodDef __pyx_methods_6bzrlib_16_known_graph_pyx__MergeSorter[] = {
+ {0, 0, 0, 0}
+};
+
+static PyNumberMethods __pyx_tp_as_number__MergeSorter = {
+ 0, /*nb_add*/
+ 0, /*nb_subtract*/
+ 0, /*nb_multiply*/
+ 0, /*nb_divide*/
+ 0, /*nb_remainder*/
+ 0, /*nb_divmod*/
+ 0, /*nb_power*/
+ 0, /*nb_negative*/
+ 0, /*nb_positive*/
+ 0, /*nb_absolute*/
+ 0, /*nb_nonzero*/
+ 0, /*nb_invert*/
+ 0, /*nb_lshift*/
+ 0, /*nb_rshift*/
+ 0, /*nb_and*/
+ 0, /*nb_xor*/
+ 0, /*nb_or*/
+ 0, /*nb_coerce*/
+ 0, /*nb_int*/
+ 0, /*nb_long*/
+ 0, /*nb_float*/
+ 0, /*nb_oct*/
+ 0, /*nb_hex*/
+ 0, /*nb_inplace_add*/
+ 0, /*nb_inplace_subtract*/
+ 0, /*nb_inplace_multiply*/
+ 0, /*nb_inplace_divide*/
+ 0, /*nb_inplace_remainder*/
+ 0, /*nb_inplace_power*/
+ 0, /*nb_inplace_lshift*/
+ 0, /*nb_inplace_rshift*/
+ 0, /*nb_inplace_and*/
+ 0, /*nb_inplace_xor*/
+ 0, /*nb_inplace_or*/
+ 0, /*nb_floor_divide*/
+ 0, /*nb_true_divide*/
+ 0, /*nb_inplace_floor_divide*/
+ 0, /*nb_inplace_true_divide*/
+ #if Py_TPFLAGS_DEFAULT & Py_TPFLAGS_HAVE_INDEX
+ 0, /*nb_index*/
+ #endif
+};
+
+static PySequenceMethods __pyx_tp_as_sequence__MergeSorter = {
+ 0, /*sq_length*/
+ 0, /*sq_concat*/
+ 0, /*sq_repeat*/
+ 0, /*sq_item*/
+ 0, /*sq_slice*/
+ 0, /*sq_ass_item*/
+ 0, /*sq_ass_slice*/
+ 0, /*sq_contains*/
+ 0, /*sq_inplace_concat*/
+ 0, /*sq_inplace_repeat*/
+};
+
+static PyMappingMethods __pyx_tp_as_mapping__MergeSorter = {
+ 0, /*mp_length*/
+ 0, /*mp_subscript*/
+ 0, /*mp_ass_subscript*/
+};
+
+static PyBufferProcs __pyx_tp_as_buffer__MergeSorter = {
+ 0, /*bf_getreadbuffer*/
+ 0, /*bf_getwritebuffer*/
+ 0, /*bf_getsegcount*/
+ 0, /*bf_getcharbuffer*/
+};
+
+PyTypeObject __pyx_type_6bzrlib_16_known_graph_pyx__MergeSorter = {
+ PyObject_HEAD_INIT(0)
+ 0, /*ob_size*/
+ "bzrlib._known_graph_pyx._MergeSorter", /*tp_name*/
+ sizeof(struct __pyx_obj_6bzrlib_16_known_graph_pyx__MergeSorter), /*tp_basicsize*/
+ 0, /*tp_itemsize*/
+ __pyx_tp_dealloc_6bzrlib_16_known_graph_pyx__MergeSorter, /*tp_dealloc*/
+ 0, /*tp_print*/
+ 0, /*tp_getattr*/
+ 0, /*tp_setattr*/
+ 0, /*tp_compare*/
+ 0, /*tp_repr*/
+ &__pyx_tp_as_number__MergeSorter, /*tp_as_number*/
+ &__pyx_tp_as_sequence__MergeSorter, /*tp_as_sequence*/
+ &__pyx_tp_as_mapping__MergeSorter, /*tp_as_mapping*/
+ 0, /*tp_hash*/
+ 0, /*tp_call*/
+ 0, /*tp_str*/
+ 0, /*tp_getattro*/
+ 0, /*tp_setattro*/
+ &__pyx_tp_as_buffer__MergeSorter, /*tp_as_buffer*/
+ Py_TPFLAGS_DEFAULT|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC, /*tp_flags*/
+ "This class does the work of computing the merge_sort ordering.\n\n We have some small advantages, in that we get all the extra information\n that KnownGraph knows, like knowing the child lists, etc.\n ", /*tp_doc*/
+ __pyx_tp_traverse_6bzrlib_16_known_graph_pyx__MergeSorter, /*tp_traverse*/
+ __pyx_tp_clear_6bzrlib_16_known_graph_pyx__MergeSorter, /*tp_clear*/
+ 0, /*tp_richcompare*/
+ 0, /*tp_weaklistoffset*/
+ 0, /*tp_iter*/
+ 0, /*tp_iternext*/
+ __pyx_methods_6bzrlib_16_known_graph_pyx__MergeSorter, /*tp_methods*/
+ 0, /*tp_members*/
+ 0, /*tp_getset*/
+ 0, /*tp_base*/
+ 0, /*tp_dict*/
+ 0, /*tp_descr_get*/
+ 0, /*tp_descr_set*/
+ 0, /*tp_dictoffset*/
+ __pyx_f_6bzrlib_16_known_graph_pyx_12_MergeSorter___init__, /*tp_init*/
+ 0, /*tp_alloc*/
+ __pyx_tp_new_6bzrlib_16_known_graph_pyx__MergeSorter, /*tp_new*/
+ 0, /*tp_free*/
+ 0, /*tp_is_gc*/
+ 0, /*tp_bases*/
+ 0, /*tp_mro*/
+ 0, /*tp_cache*/
+ 0, /*tp_subclasses*/
+ 0, /*tp_weaklist*/
+};
+static struct __pyx_vtabstruct_6bzrlib_16_known_graph_pyx_KnownGraph __pyx_vtable_6bzrlib_16_known_graph_pyx_KnownGraph;
+
+static PyObject *__pyx_tp_new_6bzrlib_16_known_graph_pyx_KnownGraph(PyTypeObject *t, PyObject *a, PyObject *k) {
+ struct __pyx_obj_6bzrlib_16_known_graph_pyx_KnownGraph *p;
+ PyObject *o = (*t->tp_alloc)(t, 0);
+ if (!o) return 0;
+ p = ((struct __pyx_obj_6bzrlib_16_known_graph_pyx_KnownGraph *)o);
+ *(struct __pyx_vtabstruct_6bzrlib_16_known_graph_pyx_KnownGraph **)&p->__pyx_vtab = __pyx_vtabptr_6bzrlib_16_known_graph_pyx_KnownGraph;
+ p->_nodes = Py_None; Py_INCREF(Py_None);
+ p->_known_heads = Py_None; Py_INCREF(Py_None);
+ return o;
+}
+
+static void __pyx_tp_dealloc_6bzrlib_16_known_graph_pyx_KnownGraph(PyObject *o) {
+ struct __pyx_obj_6bzrlib_16_known_graph_pyx_KnownGraph *p = (struct __pyx_obj_6bzrlib_16_known_graph_pyx_KnownGraph *)o;
+ {
+ PyObject *etype, *eval, *etb;
+ PyErr_Fetch(&etype, &eval, &etb);
+ ++o->ob_refcnt;
+ __pyx_f_6bzrlib_16_known_graph_pyx_10KnownGraph___dealloc__(o);
+ if (PyErr_Occurred()) PyErr_WriteUnraisable(o);
+ --o->ob_refcnt;
+ PyErr_Restore(etype, eval, etb);
+ }
+ Py_XDECREF(p->_nodes);
+ Py_XDECREF(p->_known_heads);
+ (*o->ob_type->tp_free)(o);
+}
+
+static int __pyx_tp_traverse_6bzrlib_16_known_graph_pyx_KnownGraph(PyObject *o, visitproc v, void *a) {
+ int e;
+ struct __pyx_obj_6bzrlib_16_known_graph_pyx_KnownGraph *p = (struct __pyx_obj_6bzrlib_16_known_graph_pyx_KnownGraph *)o;
+ if (p->_nodes) {
+ e = (*v)(p->_nodes, a); if (e) return e;
+ }
+ if (p->_known_heads) {
+ e = (*v)(p->_known_heads, a); if (e) return e;
+ }
+ return 0;
+}
+
+static int __pyx_tp_clear_6bzrlib_16_known_graph_pyx_KnownGraph(PyObject *o) {
+ struct __pyx_obj_6bzrlib_16_known_graph_pyx_KnownGraph *p = (struct __pyx_obj_6bzrlib_16_known_graph_pyx_KnownGraph *)o;
+ PyObject *t;
+ t = p->_nodes;
+ p->_nodes = Py_None; Py_INCREF(Py_None);
+ Py_XDECREF(t);
+ t = p->_known_heads;
+ p->_known_heads = Py_None; Py_INCREF(Py_None);
+ Py_XDECREF(t);
+ return 0;
+}
+
+static struct PyMethodDef __pyx_methods_6bzrlib_16_known_graph_pyx_KnownGraph[] = {
+ {"_initialize_nodes", (PyCFunction)__pyx_f_6bzrlib_16_known_graph_pyx_10KnownGraph__initialize_nodes, METH_VARARGS|METH_KEYWORDS, __pyx_doc_6bzrlib_16_known_graph_pyx_10KnownGraph__initialize_nodes},
+ {"_find_tails", (PyCFunction)__pyx_f_6bzrlib_16_known_graph_pyx_10KnownGraph__find_tails, METH_VARARGS|METH_KEYWORDS, 0},
+ {"_find_tips", (PyCFunction)__pyx_f_6bzrlib_16_known_graph_pyx_10KnownGraph__find_tips, METH_VARARGS|METH_KEYWORDS, 0},
+ {"_find_gdfo", (PyCFunction)__pyx_f_6bzrlib_16_known_graph_pyx_10KnownGraph__find_gdfo, METH_VARARGS|METH_KEYWORDS, 0},
+ {"add_node", (PyCFunction)__pyx_f_6bzrlib_16_known_graph_pyx_10KnownGraph_add_node, METH_VARARGS|METH_KEYWORDS, __pyx_doc_6bzrlib_16_known_graph_pyx_10KnownGraph_add_node},
+ {"heads", (PyCFunction)__pyx_f_6bzrlib_16_known_graph_pyx_10KnownGraph_heads, METH_VARARGS|METH_KEYWORDS, __pyx_doc_6bzrlib_16_known_graph_pyx_10KnownGraph_heads},
+ {"topo_sort", (PyCFunction)__pyx_f_6bzrlib_16_known_graph_pyx_10KnownGraph_topo_sort, METH_VARARGS|METH_KEYWORDS, __pyx_doc_6bzrlib_16_known_graph_pyx_10KnownGraph_topo_sort},
+ {"gc_sort", (PyCFunction)__pyx_f_6bzrlib_16_known_graph_pyx_10KnownGraph_gc_sort, METH_VARARGS|METH_KEYWORDS, __pyx_doc_6bzrlib_16_known_graph_pyx_10KnownGraph_gc_sort},
+ {"merge_sort", (PyCFunction)__pyx_f_6bzrlib_16_known_graph_pyx_10KnownGraph_merge_sort, METH_VARARGS|METH_KEYWORDS, __pyx_doc_6bzrlib_16_known_graph_pyx_10KnownGraph_merge_sort},
+ {"get_parent_keys", (PyCFunction)__pyx_f_6bzrlib_16_known_graph_pyx_10KnownGraph_get_parent_keys, METH_VARARGS|METH_KEYWORDS, __pyx_doc_6bzrlib_16_known_graph_pyx_10KnownGraph_get_parent_keys},
+ {"get_child_keys", (PyCFunction)__pyx_f_6bzrlib_16_known_graph_pyx_10KnownGraph_get_child_keys, METH_VARARGS|METH_KEYWORDS, __pyx_doc_6bzrlib_16_known_graph_pyx_10KnownGraph_get_child_keys},
+ {0, 0, 0, 0}
+};
+
+static struct PyMemberDef __pyx_members_6bzrlib_16_known_graph_pyx_KnownGraph[] = {
+ {"_nodes", T_OBJECT, offsetof(struct __pyx_obj_6bzrlib_16_known_graph_pyx_KnownGraph, _nodes), 0, 0},
+ {"_known_heads", T_OBJECT, offsetof(struct __pyx_obj_6bzrlib_16_known_graph_pyx_KnownGraph, _known_heads), 0, 0},
+ {"do_cache", T_INT, offsetof(struct __pyx_obj_6bzrlib_16_known_graph_pyx_KnownGraph, do_cache), 0, 0},
+ {0, 0, 0, 0, 0}
+};
+
+static PyNumberMethods __pyx_tp_as_number_KnownGraph = {
+ 0, /*nb_add*/
+ 0, /*nb_subtract*/
+ 0, /*nb_multiply*/
+ 0, /*nb_divide*/
+ 0, /*nb_remainder*/
+ 0, /*nb_divmod*/
+ 0, /*nb_power*/
+ 0, /*nb_negative*/
+ 0, /*nb_positive*/
+ 0, /*nb_absolute*/
+ 0, /*nb_nonzero*/
+ 0, /*nb_invert*/
+ 0, /*nb_lshift*/
+ 0, /*nb_rshift*/
+ 0, /*nb_and*/
+ 0, /*nb_xor*/
+ 0, /*nb_or*/
+ 0, /*nb_coerce*/
+ 0, /*nb_int*/
+ 0, /*nb_long*/
+ 0, /*nb_float*/
+ 0, /*nb_oct*/
+ 0, /*nb_hex*/
+ 0, /*nb_inplace_add*/
+ 0, /*nb_inplace_subtract*/
+ 0, /*nb_inplace_multiply*/
+ 0, /*nb_inplace_divide*/
+ 0, /*nb_inplace_remainder*/
+ 0, /*nb_inplace_power*/
+ 0, /*nb_inplace_lshift*/
+ 0, /*nb_inplace_rshift*/
+ 0, /*nb_inplace_and*/
+ 0, /*nb_inplace_xor*/
+ 0, /*nb_inplace_or*/
+ 0, /*nb_floor_divide*/
+ 0, /*nb_true_divide*/
+ 0, /*nb_inplace_floor_divide*/
+ 0, /*nb_inplace_true_divide*/
+ #if Py_TPFLAGS_DEFAULT & Py_TPFLAGS_HAVE_INDEX
+ 0, /*nb_index*/
+ #endif
+};
+
+static PySequenceMethods __pyx_tp_as_sequence_KnownGraph = {
+ 0, /*sq_length*/
+ 0, /*sq_concat*/
+ 0, /*sq_repeat*/
+ 0, /*sq_item*/
+ 0, /*sq_slice*/
+ 0, /*sq_ass_item*/
+ 0, /*sq_ass_slice*/
+ 0, /*sq_contains*/
+ 0, /*sq_inplace_concat*/
+ 0, /*sq_inplace_repeat*/
+};
+
+static PyMappingMethods __pyx_tp_as_mapping_KnownGraph = {
+ 0, /*mp_length*/
+ 0, /*mp_subscript*/
+ 0, /*mp_ass_subscript*/
+};
+
+static PyBufferProcs __pyx_tp_as_buffer_KnownGraph = {
+ 0, /*bf_getreadbuffer*/
+ 0, /*bf_getwritebuffer*/
+ 0, /*bf_getsegcount*/
+ 0, /*bf_getcharbuffer*/
+};
+
+PyTypeObject __pyx_type_6bzrlib_16_known_graph_pyx_KnownGraph = {
+ PyObject_HEAD_INIT(0)
+ 0, /*ob_size*/
+ "bzrlib._known_graph_pyx.KnownGraph", /*tp_name*/
+ sizeof(struct __pyx_obj_6bzrlib_16_known_graph_pyx_KnownGraph), /*tp_basicsize*/
+ 0, /*tp_itemsize*/
+ __pyx_tp_dealloc_6bzrlib_16_known_graph_pyx_KnownGraph, /*tp_dealloc*/
+ 0, /*tp_print*/
+ 0, /*tp_getattr*/
+ 0, /*tp_setattr*/
+ 0, /*tp_compare*/
+ 0, /*tp_repr*/
+ &__pyx_tp_as_number_KnownGraph, /*tp_as_number*/
+ &__pyx_tp_as_sequence_KnownGraph, /*tp_as_sequence*/
+ &__pyx_tp_as_mapping_KnownGraph, /*tp_as_mapping*/
+ 0, /*tp_hash*/
+ 0, /*tp_call*/
+ 0, /*tp_str*/
+ 0, /*tp_getattro*/
+ 0, /*tp_setattro*/
+ &__pyx_tp_as_buffer_KnownGraph, /*tp_as_buffer*/
+ Py_TPFLAGS_DEFAULT|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC, /*tp_flags*/
+ "This is a class which assumes we already know the full graph.", /*tp_doc*/
+ __pyx_tp_traverse_6bzrlib_16_known_graph_pyx_KnownGraph, /*tp_traverse*/
+ __pyx_tp_clear_6bzrlib_16_known_graph_pyx_KnownGraph, /*tp_clear*/
+ 0, /*tp_richcompare*/
+ 0, /*tp_weaklistoffset*/
+ 0, /*tp_iter*/
+ 0, /*tp_iternext*/
+ __pyx_methods_6bzrlib_16_known_graph_pyx_KnownGraph, /*tp_methods*/
+ __pyx_members_6bzrlib_16_known_graph_pyx_KnownGraph, /*tp_members*/
+ 0, /*tp_getset*/
+ 0, /*tp_base*/
+ 0, /*tp_dict*/
+ 0, /*tp_descr_get*/
+ 0, /*tp_descr_set*/
+ 0, /*tp_dictoffset*/
+ __pyx_f_6bzrlib_16_known_graph_pyx_10KnownGraph___init__, /*tp_init*/
+ 0, /*tp_alloc*/
+ __pyx_tp_new_6bzrlib_16_known_graph_pyx_KnownGraph, /*tp_new*/
+ 0, /*tp_free*/
+ 0, /*tp_is_gc*/
+ 0, /*tp_bases*/
+ 0, /*tp_mro*/
+ 0, /*tp_cache*/
+ 0, /*tp_subclasses*/
+ 0, /*tp_weaklist*/
+};
+static struct __pyx_vtabstruct_6bzrlib_16_known_graph_pyx__MergeSortNode __pyx_vtable_6bzrlib_16_known_graph_pyx__MergeSortNode;
+
+static PyObject *__pyx_tp_new_6bzrlib_16_known_graph_pyx__MergeSortNode(PyTypeObject *t, PyObject *a, PyObject *k) {
+ struct __pyx_obj_6bzrlib_16_known_graph_pyx__MergeSortNode *p;
+ PyObject *o = (*t->tp_alloc)(t, 0);
+ if (!o) return 0;
+ p = ((struct __pyx_obj_6bzrlib_16_known_graph_pyx__MergeSortNode *)o);
+ *(struct __pyx_vtabstruct_6bzrlib_16_known_graph_pyx__MergeSortNode **)&p->__pyx_vtab = __pyx_vtabptr_6bzrlib_16_known_graph_pyx__MergeSortNode;
+ p->key = Py_None; Py_INCREF(Py_None);
+ p->end_of_merge = Py_None; Py_INCREF(Py_None);
+ p->left_parent = ((struct __pyx_obj_6bzrlib_16_known_graph_pyx__KnownGraphNode *)Py_None); Py_INCREF(Py_None);
+ p->left_pending_parent = ((struct __pyx_obj_6bzrlib_16_known_graph_pyx__KnownGraphNode *)Py_None); Py_INCREF(Py_None);
+ p->pending_parents = Py_None; Py_INCREF(Py_None);
+ return o;
+}
+
+static void __pyx_tp_dealloc_6bzrlib_16_known_graph_pyx__MergeSortNode(PyObject *o) {
+ struct __pyx_obj_6bzrlib_16_known_graph_pyx__MergeSortNode *p = (struct __pyx_obj_6bzrlib_16_known_graph_pyx__MergeSortNode *)o;
+ Py_XDECREF(p->key);
+ Py_XDECREF(p->end_of_merge);
+ Py_XDECREF(((PyObject *)p->left_parent));
+ Py_XDECREF(((PyObject *)p->left_pending_parent));
+ Py_XDECREF(p->pending_parents);
+ (*o->ob_type->tp_free)(o);
+}
+
+static int __pyx_tp_traverse_6bzrlib_16_known_graph_pyx__MergeSortNode(PyObject *o, visitproc v, void *a) {
+ int e;
+ struct __pyx_obj_6bzrlib_16_known_graph_pyx__MergeSortNode *p = (struct __pyx_obj_6bzrlib_16_known_graph_pyx__MergeSortNode *)o;
+ if (p->key) {
+ e = (*v)(p->key, a); if (e) return e;
+ }
+ if (p->end_of_merge) {
+ e = (*v)(p->end_of_merge, a); if (e) return e;
+ }
+ if (p->left_parent) {
+ e = (*v)(((PyObject*)p->left_parent), a); if (e) return e;
+ }
+ if (p->left_pending_parent) {
+ e = (*v)(((PyObject*)p->left_pending_parent), a); if (e) return e;
+ }
+ if (p->pending_parents) {
+ e = (*v)(p->pending_parents, a); if (e) return e;
+ }
+ return 0;
+}
+
+static int __pyx_tp_clear_6bzrlib_16_known_graph_pyx__MergeSortNode(PyObject *o) {
+ struct __pyx_obj_6bzrlib_16_known_graph_pyx__MergeSortNode *p = (struct __pyx_obj_6bzrlib_16_known_graph_pyx__MergeSortNode *)o;
+ PyObject *t;
+ t = p->key;
+ p->key = Py_None; Py_INCREF(Py_None);
+ Py_XDECREF(t);
+ t = p->end_of_merge;
+ p->end_of_merge = Py_None; Py_INCREF(Py_None);
+ Py_XDECREF(t);
+ t = ((PyObject *)p->left_parent);
+ p->left_parent = ((struct __pyx_obj_6bzrlib_16_known_graph_pyx__KnownGraphNode *)Py_None); Py_INCREF(Py_None);
+ Py_XDECREF(t);
+ t = ((PyObject *)p->left_pending_parent);
+ p->left_pending_parent = ((struct __pyx_obj_6bzrlib_16_known_graph_pyx__KnownGraphNode *)Py_None); Py_INCREF(Py_None);
+ Py_XDECREF(t);
+ t = p->pending_parents;
+ p->pending_parents = Py_None; Py_INCREF(Py_None);
+ Py_XDECREF(t);
+ return 0;
+}
+
+static PyObject *__pyx_getprop_6bzrlib_16_known_graph_pyx_14_MergeSortNode_revno(PyObject *o, void *x) {
+ return __pyx_f_6bzrlib_16_known_graph_pyx_14_MergeSortNode_5revno___get__(o);
+}
+
+static struct PyMethodDef __pyx_methods_6bzrlib_16_known_graph_pyx__MergeSortNode[] = {
+ {0, 0, 0, 0}
+};
+
+static struct PyMemberDef __pyx_members_6bzrlib_16_known_graph_pyx__MergeSortNode[] = {
+ {"key", T_OBJECT, offsetof(struct __pyx_obj_6bzrlib_16_known_graph_pyx__MergeSortNode, key), 0, 0},
+ {"merge_depth", T_LONG, offsetof(struct __pyx_obj_6bzrlib_16_known_graph_pyx__MergeSortNode, merge_depth), 0, 0},
+ {"end_of_merge", T_OBJECT, offsetof(struct __pyx_obj_6bzrlib_16_known_graph_pyx__MergeSortNode, end_of_merge), 0, 0},
+ {0, 0, 0, 0, 0}
+};
+
+static struct PyGetSetDef __pyx_getsets_6bzrlib_16_known_graph_pyx__MergeSortNode[] = {
+ {"revno", __pyx_getprop_6bzrlib_16_known_graph_pyx_14_MergeSortNode_revno, 0, 0, 0},
+ {0, 0, 0, 0, 0}
+};
+
+static PyNumberMethods __pyx_tp_as_number__MergeSortNode = {
+ 0, /*nb_add*/
+ 0, /*nb_subtract*/
+ 0, /*nb_multiply*/
+ 0, /*nb_divide*/
+ 0, /*nb_remainder*/
+ 0, /*nb_divmod*/
+ 0, /*nb_power*/
+ 0, /*nb_negative*/
+ 0, /*nb_positive*/
+ 0, /*nb_absolute*/
+ 0, /*nb_nonzero*/
+ 0, /*nb_invert*/
+ 0, /*nb_lshift*/
+ 0, /*nb_rshift*/
+ 0, /*nb_and*/
+ 0, /*nb_xor*/
+ 0, /*nb_or*/
+ 0, /*nb_coerce*/
+ 0, /*nb_int*/
+ 0, /*nb_long*/
+ 0, /*nb_float*/
+ 0, /*nb_oct*/
+ 0, /*nb_hex*/
+ 0, /*nb_inplace_add*/
+ 0, /*nb_inplace_subtract*/
+ 0, /*nb_inplace_multiply*/
+ 0, /*nb_inplace_divide*/
+ 0, /*nb_inplace_remainder*/
+ 0, /*nb_inplace_power*/
+ 0, /*nb_inplace_lshift*/
+ 0, /*nb_inplace_rshift*/
+ 0, /*nb_inplace_and*/
+ 0, /*nb_inplace_xor*/
+ 0, /*nb_inplace_or*/
+ 0, /*nb_floor_divide*/
+ 0, /*nb_true_divide*/
+ 0, /*nb_inplace_floor_divide*/
+ 0, /*nb_inplace_true_divide*/
+ #if Py_TPFLAGS_DEFAULT & Py_TPFLAGS_HAVE_INDEX
+ 0, /*nb_index*/
+ #endif
+};
+
+static PySequenceMethods __pyx_tp_as_sequence__MergeSortNode = {
+ 0, /*sq_length*/
+ 0, /*sq_concat*/
+ 0, /*sq_repeat*/
+ 0, /*sq_item*/
+ 0, /*sq_slice*/
+ 0, /*sq_ass_item*/
+ 0, /*sq_ass_slice*/
+ 0, /*sq_contains*/
+ 0, /*sq_inplace_concat*/
+ 0, /*sq_inplace_repeat*/
+};
+
+static PyMappingMethods __pyx_tp_as_mapping__MergeSortNode = {
+ 0, /*mp_length*/
+ 0, /*mp_subscript*/
+ 0, /*mp_ass_subscript*/
+};
+
+static PyBufferProcs __pyx_tp_as_buffer__MergeSortNode = {
+ 0, /*bf_getreadbuffer*/
+ 0, /*bf_getwritebuffer*/
+ 0, /*bf_getsegcount*/
+ 0, /*bf_getcharbuffer*/
+};
+
+PyTypeObject __pyx_type_6bzrlib_16_known_graph_pyx__MergeSortNode = {
+ PyObject_HEAD_INIT(0)
+ 0, /*ob_size*/
+ "bzrlib._known_graph_pyx._MergeSortNode", /*tp_name*/
+ sizeof(struct __pyx_obj_6bzrlib_16_known_graph_pyx__MergeSortNode), /*tp_basicsize*/
+ 0, /*tp_itemsize*/
+ __pyx_tp_dealloc_6bzrlib_16_known_graph_pyx__MergeSortNode, /*tp_dealloc*/
+ 0, /*tp_print*/
+ 0, /*tp_getattr*/
+ 0, /*tp_setattr*/
+ 0, /*tp_compare*/
+ __pyx_f_6bzrlib_16_known_graph_pyx_14_MergeSortNode___repr__, /*tp_repr*/
+ &__pyx_tp_as_number__MergeSortNode, /*tp_as_number*/
+ &__pyx_tp_as_sequence__MergeSortNode, /*tp_as_sequence*/
+ &__pyx_tp_as_mapping__MergeSortNode, /*tp_as_mapping*/
+ 0, /*tp_hash*/
+ 0, /*tp_call*/
+ 0, /*tp_str*/
+ 0, /*tp_getattro*/
+ 0, /*tp_setattro*/
+ &__pyx_tp_as_buffer__MergeSortNode, /*tp_as_buffer*/
+ Py_TPFLAGS_DEFAULT|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC, /*tp_flags*/
+ "Tracks information about a node during the merge_sort operation.", /*tp_doc*/
+ __pyx_tp_traverse_6bzrlib_16_known_graph_pyx__MergeSortNode, /*tp_traverse*/
+ __pyx_tp_clear_6bzrlib_16_known_graph_pyx__MergeSortNode, /*tp_clear*/
+ 0, /*tp_richcompare*/
+ 0, /*tp_weaklistoffset*/
+ 0, /*tp_iter*/
+ 0, /*tp_iternext*/
+ __pyx_methods_6bzrlib_16_known_graph_pyx__MergeSortNode, /*tp_methods*/
+ __pyx_members_6bzrlib_16_known_graph_pyx__MergeSortNode, /*tp_members*/
+ __pyx_getsets_6bzrlib_16_known_graph_pyx__MergeSortNode, /*tp_getset*/
+ 0, /*tp_base*/
+ 0, /*tp_dict*/
+ 0, /*tp_descr_get*/
+ 0, /*tp_descr_set*/
+ 0, /*tp_dictoffset*/
+ __pyx_f_6bzrlib_16_known_graph_pyx_14_MergeSortNode___init__, /*tp_init*/
+ 0, /*tp_alloc*/
+ __pyx_tp_new_6bzrlib_16_known_graph_pyx__MergeSortNode, /*tp_new*/
+ 0, /*tp_free*/
+ 0, /*tp_is_gc*/
+ 0, /*tp_bases*/
+ 0, /*tp_mro*/
+ 0, /*tp_cache*/
+ 0, /*tp_subclasses*/
+ 0, /*tp_weaklist*/
+};
+
+static struct PyMethodDef __pyx_methods[] = {
+ {"get_key", (PyCFunction)__pyx_f_6bzrlib_16_known_graph_pyx_get_key, METH_VARARGS|METH_KEYWORDS, 0},
+ {0, 0, 0, 0}
+};
+
+static void __pyx_init_filenames(void); /*proto*/
+
+PyMODINIT_FUNC init_known_graph_pyx(void); /*proto*/
+PyMODINIT_FUNC init_known_graph_pyx(void) {
+ PyObject *__pyx_1 = 0;
+ PyObject *__pyx_2 = 0;
+ __pyx_init_filenames();
+ __pyx_m = Py_InitModule4("_known_graph_pyx", __pyx_methods, __pyx_mdoc, 0, PYTHON_API_VERSION);
+ if (!__pyx_m) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 17; goto __pyx_L1;};
+ Py_INCREF(__pyx_m);
+ __pyx_b = PyImport_AddModule("__builtin__");
+ if (!__pyx_b) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 17; goto __pyx_L1;};
+ if (PyObject_SetAttrString(__pyx_m, "__builtins__", __pyx_b) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 17; goto __pyx_L1;};
+ if (__Pyx_InitStrings(__pyx_string_tab) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 17; goto __pyx_L1;};
+ __pyx_v_6bzrlib_16_known_graph_pyx_NULL_REVISION = Py_None; Py_INCREF(Py_None);
+ __pyx_vtabptr_6bzrlib_16_known_graph_pyx__KnownGraphNode = &__pyx_vtable_6bzrlib_16_known_graph_pyx__KnownGraphNode;
+ *(void(**)(void))&__pyx_vtable_6bzrlib_16_known_graph_pyx__KnownGraphNode.clear_references = (void(*)(void))__pyx_f_6bzrlib_16_known_graph_pyx_15_KnownGraphNode_clear_references;
+ __pyx_type_6bzrlib_16_known_graph_pyx__KnownGraphNode.tp_free = _PyObject_GC_Del;
+ if (PyType_Ready(&__pyx_type_6bzrlib_16_known_graph_pyx__KnownGraphNode) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 63; goto __pyx_L1;}
+ if (__Pyx_SetVtable(__pyx_type_6bzrlib_16_known_graph_pyx__KnownGraphNode.tp_dict, __pyx_vtabptr_6bzrlib_16_known_graph_pyx__KnownGraphNode) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 63; goto __pyx_L1;}
+ if (PyObject_SetAttrString(__pyx_m, "_KnownGraphNode", (PyObject *)&__pyx_type_6bzrlib_16_known_graph_pyx__KnownGraphNode) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 63; goto __pyx_L1;}
+ __pyx_ptype_6bzrlib_16_known_graph_pyx__KnownGraphNode = &__pyx_type_6bzrlib_16_known_graph_pyx__KnownGraphNode;
+ __pyx_vtabptr_6bzrlib_16_known_graph_pyx__MergeSorter = &__pyx_vtable_6bzrlib_16_known_graph_pyx__MergeSorter;
+ *(void(**)(void))&__pyx_vtable_6bzrlib_16_known_graph_pyx__MergeSorter._get_ms_node = (void(*)(void))__pyx_f_6bzrlib_16_known_graph_pyx_12_MergeSorter__get_ms_node;
+ *(void(**)(void))&__pyx_vtable_6bzrlib_16_known_graph_pyx__MergeSorter._push_node = (void(*)(void))__pyx_f_6bzrlib_16_known_graph_pyx_12_MergeSorter__push_node;
+ *(void(**)(void))&__pyx_vtable_6bzrlib_16_known_graph_pyx__MergeSorter._pop_node = (void(*)(void))__pyx_f_6bzrlib_16_known_graph_pyx_12_MergeSorter__pop_node;
+ *(void(**)(void))&__pyx_vtable_6bzrlib_16_known_graph_pyx__MergeSorter._schedule_stack = (void(*)(void))__pyx_f_6bzrlib_16_known_graph_pyx_12_MergeSorter__schedule_stack;
+ *(void(**)(void))&__pyx_vtable_6bzrlib_16_known_graph_pyx__MergeSorter.topo_order = (void(*)(void))__pyx_f_6bzrlib_16_known_graph_pyx_12_MergeSorter_topo_order;
+ __pyx_type_6bzrlib_16_known_graph_pyx__MergeSorter.tp_free = _PyObject_GC_Del;
+ if (PyType_Ready(&__pyx_type_6bzrlib_16_known_graph_pyx__MergeSorter) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 724; goto __pyx_L1;}
+ if (__Pyx_SetVtable(__pyx_type_6bzrlib_16_known_graph_pyx__MergeSorter.tp_dict, __pyx_vtabptr_6bzrlib_16_known_graph_pyx__MergeSorter) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 724; goto __pyx_L1;}
+ if (PyObject_SetAttrString(__pyx_m, "_MergeSorter", (PyObject *)&__pyx_type_6bzrlib_16_known_graph_pyx__MergeSorter) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 724; goto __pyx_L1;}
+ __pyx_ptype_6bzrlib_16_known_graph_pyx__MergeSorter = &__pyx_type_6bzrlib_16_known_graph_pyx__MergeSorter;
+ __pyx_vtabptr_6bzrlib_16_known_graph_pyx_KnownGraph = &__pyx_vtable_6bzrlib_16_known_graph_pyx_KnownGraph;
+ *(void(**)(void))&__pyx_vtable_6bzrlib_16_known_graph_pyx_KnownGraph._get_or_create_node = (void(*)(void))__pyx_f_6bzrlib_16_known_graph_pyx_10KnownGraph__get_or_create_node;
+ *(void(**)(void))&__pyx_vtable_6bzrlib_16_known_graph_pyx_KnownGraph._populate_parents = (void(*)(void))__pyx_f_6bzrlib_16_known_graph_pyx_10KnownGraph__populate_parents;
+ __pyx_type_6bzrlib_16_known_graph_pyx_KnownGraph.tp_free = _PyObject_GC_Del;
+ if (PyType_Ready(&__pyx_type_6bzrlib_16_known_graph_pyx_KnownGraph) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 192; goto __pyx_L1;}
+ if (__Pyx_SetVtable(__pyx_type_6bzrlib_16_known_graph_pyx_KnownGraph.tp_dict, __pyx_vtabptr_6bzrlib_16_known_graph_pyx_KnownGraph) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 192; goto __pyx_L1;}
+ if (PyObject_SetAttrString(__pyx_m, "KnownGraph", (PyObject *)&__pyx_type_6bzrlib_16_known_graph_pyx_KnownGraph) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 192; goto __pyx_L1;}
+ __pyx_ptype_6bzrlib_16_known_graph_pyx_KnownGraph = &__pyx_type_6bzrlib_16_known_graph_pyx_KnownGraph;
+ __pyx_vtabptr_6bzrlib_16_known_graph_pyx__MergeSortNode = &__pyx_vtable_6bzrlib_16_known_graph_pyx__MergeSortNode;
+ *(void(**)(void))&__pyx_vtable_6bzrlib_16_known_graph_pyx__MergeSortNode.has_pending_parents = (void(*)(void))__pyx_f_6bzrlib_16_known_graph_pyx_14_MergeSortNode_has_pending_parents;
+ *(void(**)(void))&__pyx_vtable_6bzrlib_16_known_graph_pyx__MergeSortNode._revno = (void(*)(void))__pyx_f_6bzrlib_16_known_graph_pyx_14_MergeSortNode__revno;
+ __pyx_type_6bzrlib_16_known_graph_pyx__MergeSortNode.tp_free = _PyObject_GC_Del;
+ if (PyType_Ready(&__pyx_type_6bzrlib_16_known_graph_pyx__MergeSortNode) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 666; goto __pyx_L1;}
+ if (__Pyx_SetVtable(__pyx_type_6bzrlib_16_known_graph_pyx__MergeSortNode.tp_dict, __pyx_vtabptr_6bzrlib_16_known_graph_pyx__MergeSortNode) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 666; goto __pyx_L1;}
+ if (PyObject_SetAttrString(__pyx_m, "_MergeSortNode", (PyObject *)&__pyx_type_6bzrlib_16_known_graph_pyx__MergeSortNode) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 666; goto __pyx_L1;}
+ __pyx_ptype_6bzrlib_16_known_graph_pyx__MergeSortNode = &__pyx_type_6bzrlib_16_known_graph_pyx__MergeSortNode;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_known_graph_pyx.pyx":54 */
+ __pyx_1 = PyList_New(1); if (!__pyx_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 54; goto __pyx_L1;}
+ Py_INCREF(__pyx_n_deque);
+ PyList_SET_ITEM(__pyx_1, 0, __pyx_n_deque);
+ __pyx_2 = __Pyx_Import(__pyx_n_collections, __pyx_1); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 54; goto __pyx_L1;}
+ Py_DECREF(__pyx_1); __pyx_1 = 0;
+ __pyx_1 = PyObject_GetAttr(__pyx_2, __pyx_n_deque); if (!__pyx_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 54; goto __pyx_L1;}
+ if (PyObject_SetAttr(__pyx_m, __pyx_n_deque, __pyx_1) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 54; goto __pyx_L1;}
+ Py_DECREF(__pyx_1); __pyx_1 = 0;
+ Py_DECREF(__pyx_2); __pyx_2 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_known_graph_pyx.pyx":55 */
+ __pyx_2 = __Pyx_Import(__pyx_n_gc, 0); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 55; goto __pyx_L1;}
+ if (PyObject_SetAttr(__pyx_m, __pyx_n_gc, __pyx_2) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 55; goto __pyx_L1;}
+ Py_DECREF(__pyx_2); __pyx_2 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_known_graph_pyx.pyx":57 */
+ __pyx_1 = PyList_New(2); if (!__pyx_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 57; goto __pyx_L1;}
+ Py_INCREF(__pyx_n_errors);
+ PyList_SET_ITEM(__pyx_1, 0, __pyx_n_errors);
+ Py_INCREF(__pyx_n_revision);
+ PyList_SET_ITEM(__pyx_1, 1, __pyx_n_revision);
+ __pyx_2 = __Pyx_Import(__pyx_n_bzrlib, __pyx_1); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 57; goto __pyx_L1;}
+ Py_DECREF(__pyx_1); __pyx_1 = 0;
+ __pyx_1 = PyObject_GetAttr(__pyx_2, __pyx_n_errors); if (!__pyx_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 57; goto __pyx_L1;}
+ if (PyObject_SetAttr(__pyx_m, __pyx_n_errors, __pyx_1) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 57; goto __pyx_L1;}
+ Py_DECREF(__pyx_1); __pyx_1 = 0;
+ __pyx_1 = PyObject_GetAttr(__pyx_2, __pyx_n_revision); if (!__pyx_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 57; goto __pyx_L1;}
+ if (PyObject_SetAttr(__pyx_m, __pyx_n_revision, __pyx_1) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 57; goto __pyx_L1;}
+ Py_DECREF(__pyx_1); __pyx_1 = 0;
+ Py_DECREF(__pyx_2); __pyx_2 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_known_graph_pyx.pyx":60 */
+ __pyx_2 = __Pyx_GetName(__pyx_m, __pyx_n_revision); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 60; goto __pyx_L1;}
+ __pyx_1 = PyObject_GetAttr(__pyx_2, __pyx_n_NULL_REVISION); if (!__pyx_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 60; goto __pyx_L1;}
+ Py_DECREF(__pyx_2); __pyx_2 = 0;
+ Py_DECREF(__pyx_v_6bzrlib_16_known_graph_pyx_NULL_REVISION);
+ __pyx_v_6bzrlib_16_known_graph_pyx_NULL_REVISION = __pyx_1;
+ __pyx_1 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_known_graph_pyx.pyx":199 */
+ Py_INCREF(Py_True);
+ __pyx_d1 = Py_True;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_known_graph_pyx.pyx":926 */
+ return;
+ __pyx_L1:;
+ Py_XDECREF(__pyx_1);
+ Py_XDECREF(__pyx_2);
+ __Pyx_AddTraceback("bzrlib._known_graph_pyx");
+}
+
+static char *__pyx_filenames[] = {
+ "_known_graph_pyx.pyx",
+};
+
+/* Runtime support code */
+
+static void __pyx_init_filenames(void) {
+ __pyx_f = __pyx_filenames;
+}
+
+static int __Pyx_TypeTest(PyObject *obj, PyTypeObject *type) {
+ if (!type) {
+ PyErr_Format(PyExc_SystemError, "Missing type object");
+ return 0;
+ }
+ if (obj == Py_None || PyObject_TypeCheck(obj, type))
+ return 1;
+ PyErr_Format(PyExc_TypeError, "Cannot convert %s to %s",
+ obj->ob_type->tp_name, type->tp_name);
+ return 0;
+}
+
+static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb) {
+ Py_XINCREF(type);
+ Py_XINCREF(value);
+ Py_XINCREF(tb);
+ /* First, check the traceback argument, replacing None with NULL. */
+ if (tb == Py_None) {
+ Py_DECREF(tb);
+ tb = 0;
+ }
+ else if (tb != NULL && !PyTraceBack_Check(tb)) {
+ PyErr_SetString(PyExc_TypeError,
+ "raise: arg 3 must be a traceback or None");
+ goto raise_error;
+ }
+ /* Next, replace a missing value with None */
+ if (value == NULL) {
+ value = Py_None;
+ Py_INCREF(value);
+ }
+ #if PY_VERSION_HEX < 0x02050000
+ if (!PyClass_Check(type))
+ #else
+ if (!PyType_Check(type))
+ #endif
+ {
+ /* Raising an instance. The value should be a dummy. */
+ if (value != Py_None) {
+ PyErr_SetString(PyExc_TypeError,
+ "instance exception may not have a separate value");
+ goto raise_error;
+ }
+ /* Normalize to raise <class>, <instance> */
+ Py_DECREF(value);
+ value = type;
+ #if PY_VERSION_HEX < 0x02050000
+ if (PyInstance_Check(type)) {
+ type = (PyObject*) ((PyInstanceObject*)type)->in_class;
+ Py_INCREF(type);
+ }
+ else {
+ PyErr_SetString(PyExc_TypeError,
+ "raise: exception must be an old-style class or instance");
+ goto raise_error;
+ }
+ #else
+ type = (PyObject*) type->ob_type;
+ Py_INCREF(type);
+ if (!PyType_IsSubtype((PyTypeObject *)type, (PyTypeObject *)PyExc_BaseException)) {
+ PyErr_SetString(PyExc_TypeError,
+ "raise: exception class must be a subclass of BaseException");
+ goto raise_error;
+ }
+ #endif
+ }
+ PyErr_Restore(type, value, tb);
+ return;
+raise_error:
+ Py_XDECREF(value);
+ Py_XDECREF(type);
+ Py_XDECREF(tb);
+ return;
+}
+
+static PyObject *__Pyx_GetName(PyObject *dict, PyObject *name) {
+ PyObject *result;
+ result = PyObject_GetAttr(dict, name);
+ if (!result)
+ PyErr_SetObject(PyExc_NameError, name);
+ return result;
+}
+
+static PyObject *__Pyx_GetItemInt(PyObject *o, Py_ssize_t i) {
+ PyTypeObject *t = o->ob_type;
+ PyObject *r;
+ if (t->tp_as_sequence && t->tp_as_sequence->sq_item)
+ r = PySequence_GetItem(o, i);
+ else {
+ PyObject *j = PyInt_FromLong(i);
+ if (!j)
+ return 0;
+ r = PyObject_GetItem(o, j);
+ Py_DECREF(j);
+ }
+ return r;
+}
+
+static void __Pyx_WriteUnraisable(char *name) {
+ PyObject *old_exc, *old_val, *old_tb;
+ PyObject *ctx;
+ PyErr_Fetch(&old_exc, &old_val, &old_tb);
+ ctx = PyString_FromString(name);
+ PyErr_Restore(old_exc, old_val, old_tb);
+ if (!ctx)
+ ctx = Py_None;
+ PyErr_WriteUnraisable(ctx);
+}
+
+static int __Pyx_InitStrings(__Pyx_StringTabEntry *t) {
+ while (t->p) {
+ *t->p = PyString_FromStringAndSize(t->s, t->n - 1);
+ if (!*t->p)
+ return -1;
+ if (t->i)
+ PyString_InternInPlace(t->p);
+ ++t;
+ }
+ return 0;
+}
+
+static int __Pyx_SetVtable(PyObject *dict, void *vtable) {
+ PyObject *pycobj = 0;
+ int result;
+
+ pycobj = PyCObject_FromVoidPtr(vtable, 0);
+ if (!pycobj)
+ goto bad;
+ if (PyDict_SetItemString(dict, "__pyx_vtable__", pycobj) < 0)
+ goto bad;
+ result = 0;
+ goto done;
+
+bad:
+ result = -1;
+done:
+ Py_XDECREF(pycobj);
+ return result;
+}
+
+static PyObject *__Pyx_Import(PyObject *name, PyObject *from_list) {
+ PyObject *__import__ = 0;
+ PyObject *empty_list = 0;
+ PyObject *module = 0;
+ PyObject *global_dict = 0;
+ PyObject *empty_dict = 0;
+ PyObject *list;
+ __import__ = PyObject_GetAttrString(__pyx_b, "__import__");
+ if (!__import__)
+ goto bad;
+ if (from_list)
+ list = from_list;
+ else {
+ empty_list = PyList_New(0);
+ if (!empty_list)
+ goto bad;
+ list = empty_list;
+ }
+ global_dict = PyModule_GetDict(__pyx_m);
+ if (!global_dict)
+ goto bad;
+ empty_dict = PyDict_New();
+ if (!empty_dict)
+ goto bad;
+ module = PyObject_CallFunction(__import__, "OOOO",
+ name, global_dict, empty_dict, list);
+bad:
+ Py_XDECREF(empty_list);
+ Py_XDECREF(__import__);
+ Py_XDECREF(empty_dict);
+ return module;
+}
+
+#include "compile.h"
+#include "frameobject.h"
+#include "traceback.h"
+
+static void __Pyx_AddTraceback(char *funcname) {
+ PyObject *py_srcfile = 0;
+ PyObject *py_funcname = 0;
+ PyObject *py_globals = 0;
+ PyObject *empty_tuple = 0;
+ PyObject *empty_string = 0;
+ PyCodeObject *py_code = 0;
+ PyFrameObject *py_frame = 0;
+
+ py_srcfile = PyString_FromString(__pyx_filename);
+ if (!py_srcfile) goto bad;
+ py_funcname = PyString_FromString(funcname);
+ if (!py_funcname) goto bad;
+ py_globals = PyModule_GetDict(__pyx_m);
+ if (!py_globals) goto bad;
+ empty_tuple = PyTuple_New(0);
+ if (!empty_tuple) goto bad;
+ empty_string = PyString_FromString("");
+ if (!empty_string) goto bad;
+ py_code = PyCode_New(
+ 0, /*int argcount,*/
+ 0, /*int nlocals,*/
+ 0, /*int stacksize,*/
+ 0, /*int flags,*/
+ empty_string, /*PyObject *code,*/
+ empty_tuple, /*PyObject *consts,*/
+ empty_tuple, /*PyObject *names,*/
+ empty_tuple, /*PyObject *varnames,*/
+ empty_tuple, /*PyObject *freevars,*/
+ empty_tuple, /*PyObject *cellvars,*/
+ py_srcfile, /*PyObject *filename,*/
+ py_funcname, /*PyObject *name,*/
+ __pyx_lineno, /*int firstlineno,*/
+ empty_string /*PyObject *lnotab*/
+ );
+ if (!py_code) goto bad;
+ py_frame = PyFrame_New(
+ PyThreadState_Get(), /*PyThreadState *tstate,*/
+ py_code, /*PyCodeObject *code,*/
+ py_globals, /*PyObject *globals,*/
+ 0 /*PyObject *locals*/
+ );
+ if (!py_frame) goto bad;
+ py_frame->f_lineno = __pyx_lineno;
+ PyTraceBack_Here(py_frame);
+bad:
+ Py_XDECREF(py_srcfile);
+ Py_XDECREF(py_funcname);
+ Py_XDECREF(empty_tuple);
+ Py_XDECREF(empty_string);
+ Py_XDECREF(py_code);
+ Py_XDECREF(py_frame);
+}
diff --git a/bzrlib/_known_graph_pyx.pyx b/bzrlib/_known_graph_pyx.pyx
new file mode 100644
index 0000000..a9e6d13
--- /dev/null
+++ b/bzrlib/_known_graph_pyx.pyx
@@ -0,0 +1,955 @@
+# Copyright (C) 2009, 2010 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""Implementation of Graph algorithms when we have already loaded everything.
+"""
+
+cdef extern from "python-compat.h":
+ pass
+
+cdef extern from "Python.h":
+ ctypedef int Py_ssize_t
+ ctypedef struct PyObject:
+ pass
+
+ int PyString_CheckExact(object)
+
+ int PyObject_RichCompareBool(object, object, int)
+ int Py_LT
+
+ int PyTuple_CheckExact(object)
+ object PyTuple_New(Py_ssize_t n)
+ Py_ssize_t PyTuple_GET_SIZE(object t)
+ PyObject * PyTuple_GET_ITEM(object t, Py_ssize_t o)
+ void PyTuple_SET_ITEM(object t, Py_ssize_t o, object v)
+
+ int PyList_CheckExact(object)
+ Py_ssize_t PyList_GET_SIZE(object l)
+ PyObject * PyList_GET_ITEM(object l, Py_ssize_t o)
+ int PyList_SetItem(object l, Py_ssize_t o, object l) except -1
+ int PyList_Append(object l, object v) except -1
+
+ int PyDict_CheckExact(object d)
+ Py_ssize_t PyDict_Size(object d) except -1
+ PyObject * PyDict_GetItem(object d, object k)
+ int PyDict_SetItem(object d, object k, object v) except -1
+ int PyDict_DelItem(object d, object k) except -1
+ int PyDict_Next(object d, Py_ssize_t *pos, PyObject **k, PyObject **v)
+
+ void Py_INCREF(object)
+
+from collections import deque
+import gc
+
+from bzrlib import errors, revision
+
+cdef object NULL_REVISION
+NULL_REVISION = revision.NULL_REVISION
+
+
+cdef class _KnownGraphNode:
+ """Represents a single object in the known graph."""
+
+ cdef object key
+ cdef object parents
+ cdef object children
+ cdef public long gdfo
+ cdef int seen
+ cdef object extra
+
+ def __init__(self, key):
+ self.key = key
+ self.parents = None
+
+ self.children = []
+ # Greatest distance from origin
+ self.gdfo = -1
+ self.seen = 0
+ self.extra = None
+
+ property child_keys:
+ def __get__(self):
+ cdef _KnownGraphNode child
+
+ keys = []
+ for child in self.children:
+ PyList_Append(keys, child.key)
+ return keys
+
+ property parent_keys:
+ def __get__(self):
+ if self.parents is None:
+ return None
+
+ cdef _KnownGraphNode parent
+
+ keys = []
+ for parent in self.parents:
+ PyList_Append(keys, parent.key)
+ return keys
+
+ cdef clear_references(self):
+ self.parents = None
+ self.children = None
+
+ def __repr__(self):
+ cdef _KnownGraphNode node
+
+ parent_keys = []
+ if self.parents is not None:
+ for node in self.parents:
+ parent_keys.append(node.key)
+ child_keys = []
+ if self.children is not None:
+ for node in self.children:
+ child_keys.append(node.key)
+ return '%s(%s gdfo:%s par:%s child:%s)' % (
+ self.__class__.__name__, self.key, self.gdfo,
+ parent_keys, child_keys)
+
+
+cdef _KnownGraphNode _get_list_node(lst, Py_ssize_t pos):
+ cdef PyObject *temp_node
+
+ temp_node = PyList_GET_ITEM(lst, pos)
+ return <_KnownGraphNode>temp_node
+
+
+cdef _KnownGraphNode _get_tuple_node(tpl, Py_ssize_t pos):
+ cdef PyObject *temp_node
+
+ temp_node = PyTuple_GET_ITEM(tpl, pos)
+ return <_KnownGraphNode>temp_node
+
+
+def get_key(node):
+ cdef _KnownGraphNode real_node
+ real_node = node
+ return real_node.key
+
+
+cdef object _sort_list_nodes(object lst_or_tpl, int reverse):
+ """Sort a list of _KnownGraphNode objects.
+
+ If lst_or_tpl is a list, it is allowed to mutate in place. It may also
+ just return the input list if everything is already sorted.
+ """
+ cdef _KnownGraphNode node1, node2
+ cdef int do_swap, is_tuple
+ cdef Py_ssize_t length
+
+ is_tuple = PyTuple_CheckExact(lst_or_tpl)
+ if not (is_tuple or PyList_CheckExact(lst_or_tpl)):
+ raise TypeError('lst_or_tpl must be a list or tuple.')
+ length = len(lst_or_tpl)
+ if length == 0 or length == 1:
+ return lst_or_tpl
+ if length == 2:
+ if is_tuple:
+ node1 = _get_tuple_node(lst_or_tpl, 0)
+ node2 = _get_tuple_node(lst_or_tpl, 1)
+ else:
+ node1 = _get_list_node(lst_or_tpl, 0)
+ node2 = _get_list_node(lst_or_tpl, 1)
+ if reverse:
+ do_swap = PyObject_RichCompareBool(node1.key, node2.key, Py_LT)
+ else:
+ do_swap = PyObject_RichCompareBool(node2.key, node1.key, Py_LT)
+ if not do_swap:
+ return lst_or_tpl
+ if is_tuple:
+ return (node2, node1)
+ else:
+ # Swap 'in-place', since lists are mutable
+ Py_INCREF(node1)
+ PyList_SetItem(lst_or_tpl, 1, node1)
+ Py_INCREF(node2)
+ PyList_SetItem(lst_or_tpl, 0, node2)
+ return lst_or_tpl
+ # For all other sizes, we just use 'sorted()'
+ if is_tuple:
+ # Note that sorted() is just list(iterable).sort()
+ lst_or_tpl = list(lst_or_tpl)
+ lst_or_tpl.sort(key=get_key, reverse=reverse)
+ return lst_or_tpl
+
+
+cdef class _MergeSorter
+
+cdef class KnownGraph:
+ """This is a class which assumes we already know the full graph."""
+
+ cdef public object _nodes
+ cdef public object _known_heads
+ cdef public int do_cache
+
+ def __init__(self, parent_map, do_cache=True):
+ """Create a new KnownGraph instance.
+
+ :param parent_map: A dictionary mapping key => parent_keys
+ """
+ # tests at pre-allocating the node dict actually slowed things down
+ self._nodes = {}
+ # Maps {sorted(revision_id, revision_id): heads}
+ self._known_heads = {}
+ self.do_cache = int(do_cache)
+ # TODO: consider disabling gc since we are allocating a lot of nodes
+ # that won't be collectable anyway. real world testing has not
+ # shown a specific impact, yet.
+ self._initialize_nodes(parent_map)
+ self._find_gdfo()
+
+ def __dealloc__(self):
+ cdef _KnownGraphNode child
+ cdef Py_ssize_t pos
+ cdef PyObject *temp_node
+
+ while PyDict_Next(self._nodes, &pos, NULL, &temp_node):
+ child = <_KnownGraphNode>temp_node
+ child.clear_references()
+
+ cdef _KnownGraphNode _get_or_create_node(self, key):
+ cdef PyObject *temp_node
+ cdef _KnownGraphNode node
+
+ temp_node = PyDict_GetItem(self._nodes, key)
+ if temp_node == NULL:
+ node = _KnownGraphNode(key)
+ PyDict_SetItem(self._nodes, key, node)
+ else:
+ node = <_KnownGraphNode>temp_node
+ return node
+
+ cdef _populate_parents(self, _KnownGraphNode node, parent_keys):
+ cdef Py_ssize_t num_parent_keys, pos
+ cdef _KnownGraphNode parent_node
+
+ num_parent_keys = len(parent_keys)
+ # We know how many parents, so we pre allocate the tuple
+ parent_nodes = PyTuple_New(num_parent_keys)
+ for pos from 0 <= pos < num_parent_keys:
+ # Note: it costs us 10ms out of 40ms to lookup all of these
+ # parents, it doesn't seem to be an allocation overhead,
+ # but rather a lookup overhead. There doesn't seem to be
+ # a way around it, and that is one reason why
+ # KnownGraphNode maintains a direct pointer to the parent
+ # node.
+ # We use [] because parent_keys may be a tuple or list
+ parent_node = self._get_or_create_node(parent_keys[pos])
+ # PyTuple_SET_ITEM will steal a reference, so INCREF first
+ Py_INCREF(parent_node)
+ PyTuple_SET_ITEM(parent_nodes, pos, parent_node)
+ PyList_Append(parent_node.children, node)
+ node.parents = parent_nodes
+
+ def _initialize_nodes(self, parent_map):
+ """Populate self._nodes.
+
+ After this has finished:
+ - self._nodes will have an entry for every entry in parent_map.
+ - ghosts will have a parent_keys = None,
+ - all nodes found will also have child_keys populated with all known
+ child keys,
+ """
+ cdef PyObject *temp_key, *temp_parent_keys, *temp_node
+ cdef Py_ssize_t pos
+ cdef _KnownGraphNode node
+ cdef _KnownGraphNode parent_node
+
+ if not PyDict_CheckExact(parent_map):
+ raise TypeError('parent_map should be a dict of {key:parent_keys}')
+ # for key, parent_keys in parent_map.iteritems():
+ pos = 0
+ while PyDict_Next(parent_map, &pos, &temp_key, &temp_parent_keys):
+ key = <object>temp_key
+ parent_keys = <object>temp_parent_keys
+ node = self._get_or_create_node(key)
+ self._populate_parents(node, parent_keys)
+
+ def _find_tails(self):
+ cdef PyObject *temp_node
+ cdef _KnownGraphNode node
+ cdef Py_ssize_t pos
+
+ tails = []
+ pos = 0
+ while PyDict_Next(self._nodes, &pos, NULL, &temp_node):
+ node = <_KnownGraphNode>temp_node
+ if node.parents is None or PyTuple_GET_SIZE(node.parents) == 0:
+ node.gdfo = 1
+ PyList_Append(tails, node)
+ return tails
+
+ def _find_tips(self):
+ cdef PyObject *temp_node
+ cdef _KnownGraphNode node
+ cdef Py_ssize_t pos
+
+ tips = []
+ pos = 0
+ while PyDict_Next(self._nodes, &pos, NULL, &temp_node):
+ node = <_KnownGraphNode>temp_node
+ if PyList_GET_SIZE(node.children) == 0:
+ PyList_Append(tips, node)
+ return tips
+
+ def _find_gdfo(self):
+ cdef _KnownGraphNode node
+ cdef _KnownGraphNode child
+ cdef PyObject *temp
+ cdef Py_ssize_t pos
+ cdef int replace
+ cdef Py_ssize_t last_item
+ cdef long next_gdfo
+
+ pending = self._find_tails()
+
+ last_item = PyList_GET_SIZE(pending) - 1
+ while last_item >= 0:
+ # Avoid pop followed by push, instead, peek, and replace
+ # timing shows this is 930ms => 770ms for OOo
+ node = _get_list_node(pending, last_item)
+ last_item = last_item - 1
+ next_gdfo = node.gdfo + 1
+ for pos from 0 <= pos < PyList_GET_SIZE(node.children):
+ child = _get_list_node(node.children, pos)
+ if next_gdfo > child.gdfo:
+ child.gdfo = next_gdfo
+ child.seen = child.seen + 1
+ if child.seen == PyTuple_GET_SIZE(child.parents):
+ # This child is populated, queue it to be walked
+ last_item = last_item + 1
+ if last_item < PyList_GET_SIZE(pending):
+ Py_INCREF(child) # SetItem steals a ref
+ PyList_SetItem(pending, last_item, child)
+ else:
+ PyList_Append(pending, child)
+ # We have queued this node, we don't need to track it
+ # anymore
+ child.seen = 0
+
+ def add_node(self, key, parent_keys):
+ """Add a new node to the graph.
+
+ If this fills in a ghost, then the gdfos of all children will be
+ updated accordingly.
+
+ :param key: The node being added. If this is a duplicate, this is a
+ no-op.
+ :param parent_keys: The parents of the given node.
+ :return: None (should we return if this was a ghost, etc?)
+ """
+ cdef PyObject *maybe_node
+ cdef _KnownGraphNode node, parent_node, child_node
+ cdef long parent_gdfo, next_gdfo
+
+ maybe_node = PyDict_GetItem(self._nodes, key)
+ if maybe_node != NULL:
+ node = <_KnownGraphNode>maybe_node
+ if node.parents is None:
+ # We are filling in a ghost
+ self._populate_parents(node, parent_keys)
+ # We can't trust cached heads anymore
+ self._known_heads.clear()
+ else: # Ensure that the parent_key list matches
+ existing_parent_keys = []
+ for parent_node in node.parents:
+ existing_parent_keys.append(parent_node.key)
+ # Make sure we use a list for the comparison, in case it was a
+ # tuple, etc
+ parent_keys = list(parent_keys)
+ if existing_parent_keys == parent_keys:
+ # Exact match, nothing more to do
+ return
+ else:
+ raise ValueError('Parent key mismatch, existing node %s'
+ ' has parents of %s not %s'
+ % (key, existing_parent_keys, parent_keys))
+ else:
+ node = _KnownGraphNode(key)
+ PyDict_SetItem(self._nodes, key, node)
+ self._populate_parents(node, parent_keys)
+ parent_gdfo = 0
+ for parent_node in node.parents:
+ if parent_node.gdfo == -1:
+ # This is a newly introduced ghost, so it gets gdfo of 1
+ parent_node.gdfo = 1
+ if parent_gdfo < parent_node.gdfo:
+ parent_gdfo = parent_node.gdfo
+ node.gdfo = parent_gdfo + 1
+ # Now fill the gdfo to all children
+ # Note that this loop is slightly inefficient, in that we may visit the
+ # same child (and its decendents) more than once, however, it is
+ # 'efficient' in that we only walk to nodes that would be updated,
+ # rather than all nodes
+ # We use a deque rather than a simple list stack, to go for BFD rather
+ # than DFD. So that if a longer path is possible, we walk it before we
+ # get to the final child
+ pending = deque([node])
+ pending_popleft = pending.popleft
+ pending_append = pending.append
+ while pending:
+ node = pending_popleft()
+ next_gdfo = node.gdfo + 1
+ for child_node in node.children:
+ if child_node.gdfo < next_gdfo:
+ # This child is being updated, we need to check its
+ # children
+ child_node.gdfo = next_gdfo
+ pending_append(child_node)
+
+ def heads(self, keys):
+ """Return the heads from amongst keys.
+
+ This is done by searching the ancestries of each key. Any key that is
+ reachable from another key is not returned; all the others are.
+
+ This operation scales with the relative depth between any two keys. It
+ uses gdfo to avoid walking all ancestry.
+
+ :param keys: An iterable of keys.
+ :return: A set of the heads. Note that as a set there is no ordering
+ information. Callers will need to filter their input to create
+ order if they need it.
+ """
+ cdef PyObject *maybe_node
+ cdef PyObject *maybe_heads
+ cdef PyObject *temp_node
+ cdef _KnownGraphNode node
+ cdef Py_ssize_t pos, last_item
+ cdef long min_gdfo
+
+ heads_key = frozenset(keys)
+ maybe_heads = PyDict_GetItem(self._known_heads, heads_key)
+ if maybe_heads != NULL:
+ return <object>maybe_heads
+ # Not cached, compute it ourselves
+ candidate_nodes = {}
+ for key in keys:
+ maybe_node = PyDict_GetItem(self._nodes, key)
+ if maybe_node == NULL:
+ raise KeyError('key %s not in nodes' % (key,))
+ PyDict_SetItem(candidate_nodes, key, <object>maybe_node)
+ maybe_node = PyDict_GetItem(candidate_nodes, NULL_REVISION)
+ if maybe_node != NULL:
+ # NULL_REVISION is only a head if it is the only entry
+ candidate_nodes.pop(NULL_REVISION)
+ if not candidate_nodes:
+ return frozenset([NULL_REVISION])
+ # The keys changed, so recalculate heads_key
+ heads_key = frozenset(candidate_nodes)
+ if PyDict_Size(candidate_nodes) < 2:
+ return heads_key
+
+ cleanup = []
+ pending = []
+ # we know a gdfo cannot be longer than a linear chain of all nodes
+ min_gdfo = PyDict_Size(self._nodes) + 1
+ # Build up nodes that need to be walked, note that starting nodes are
+ # not added to seen()
+ pos = 0
+ while PyDict_Next(candidate_nodes, &pos, NULL, &temp_node):
+ node = <_KnownGraphNode>temp_node
+ if node.parents is not None:
+ pending.extend(node.parents)
+ if node.gdfo < min_gdfo:
+ min_gdfo = node.gdfo
+
+ # Now do all the real work
+ last_item = PyList_GET_SIZE(pending) - 1
+ while last_item >= 0:
+ node = _get_list_node(pending, last_item)
+ last_item = last_item - 1
+ if node.seen:
+ # node already appears in some ancestry
+ continue
+ PyList_Append(cleanup, node)
+ node.seen = 1
+ if node.gdfo <= min_gdfo:
+ continue
+ if node.parents is not None and PyTuple_GET_SIZE(node.parents) > 0:
+ for pos from 0 <= pos < PyTuple_GET_SIZE(node.parents):
+ parent_node = _get_tuple_node(node.parents, pos)
+ last_item = last_item + 1
+ if last_item < PyList_GET_SIZE(pending):
+ Py_INCREF(parent_node) # SetItem steals a ref
+ PyList_SetItem(pending, last_item, parent_node)
+ else:
+ PyList_Append(pending, parent_node)
+ heads = []
+ pos = 0
+ while PyDict_Next(candidate_nodes, &pos, NULL, &temp_node):
+ node = <_KnownGraphNode>temp_node
+ if not node.seen:
+ PyList_Append(heads, node.key)
+ heads = frozenset(heads)
+ for pos from 0 <= pos < PyList_GET_SIZE(cleanup):
+ node = _get_list_node(cleanup, pos)
+ node.seen = 0
+ if self.do_cache:
+ PyDict_SetItem(self._known_heads, heads_key, heads)
+ return heads
+
+ def topo_sort(self):
+ """Return the nodes in topological order.
+
+ All parents must occur before all children.
+ """
+ # This is, for the most part, the same iteration order that we used for
+ # _find_gdfo, consider finding a way to remove the duplication
+ # In general, we find the 'tails' (nodes with no parents), and then
+ # walk to the children. For children that have all of their parents
+ # yielded, we queue up the child to be yielded as well.
+ cdef _KnownGraphNode node
+ cdef _KnownGraphNode child
+ cdef PyObject *temp
+ cdef Py_ssize_t pos
+ cdef int replace
+ cdef Py_ssize_t last_item
+
+ pending = self._find_tails()
+ if PyList_GET_SIZE(pending) == 0 and len(self._nodes) > 0:
+ raise errors.GraphCycleError(self._nodes)
+
+ topo_order = []
+
+ last_item = PyList_GET_SIZE(pending) - 1
+ while last_item >= 0:
+ # Avoid pop followed by push, instead, peek, and replace
+ # timing shows this is 930ms => 770ms for OOo
+ node = _get_list_node(pending, last_item)
+ last_item = last_item - 1
+ if node.parents is not None:
+ # We don't include ghost parents
+ PyList_Append(topo_order, node.key)
+ for pos from 0 <= pos < PyList_GET_SIZE(node.children):
+ child = _get_list_node(node.children, pos)
+ if child.gdfo == -1:
+ # We know we have a graph cycle because a node has a parent
+ # which we couldn't find
+ raise errors.GraphCycleError(self._nodes)
+ child.seen = child.seen + 1
+ if child.seen == PyTuple_GET_SIZE(child.parents):
+ # All parents of this child have been yielded, queue this
+ # one to be yielded as well
+ last_item = last_item + 1
+ if last_item < PyList_GET_SIZE(pending):
+ Py_INCREF(child) # SetItem steals a ref
+ PyList_SetItem(pending, last_item, child)
+ else:
+ PyList_Append(pending, child)
+ # We have queued this node, we don't need to track it
+ # anymore
+ child.seen = 0
+ # We started from the parents, so we don't need to do anymore work
+ return topo_order
+
+ def gc_sort(self):
+ """Return a reverse topological ordering which is 'stable'.
+
+ There are a few constraints:
+ 1) Reverse topological (all children before all parents)
+ 2) Grouped by prefix
+ 3) 'stable' sorting, so that we get the same result, independent of
+ machine, or extra data.
+ To do this, we use the same basic algorithm as topo_sort, but when we
+ aren't sure what node to access next, we sort them lexicographically.
+ """
+ cdef PyObject *temp
+ cdef Py_ssize_t pos, last_item
+ cdef _KnownGraphNode node, node2, parent_node
+
+ tips = self._find_tips()
+ # Split the tips based on prefix
+ prefix_tips = {}
+ for pos from 0 <= pos < PyList_GET_SIZE(tips):
+ node = _get_list_node(tips, pos)
+ if PyString_CheckExact(node.key) or len(node.key) == 1:
+ prefix = ''
+ else:
+ prefix = node.key[0]
+ temp = PyDict_GetItem(prefix_tips, prefix)
+ if temp == NULL:
+ prefix_tips[prefix] = [node]
+ else:
+ tip_nodes = <object>temp
+ PyList_Append(tip_nodes, node)
+
+ result = []
+ for prefix in sorted(prefix_tips):
+ temp = PyDict_GetItem(prefix_tips, prefix)
+ assert temp != NULL
+ tip_nodes = <object>temp
+ pending = _sort_list_nodes(tip_nodes, 1)
+ last_item = PyList_GET_SIZE(pending) - 1
+ while last_item >= 0:
+ node = _get_list_node(pending, last_item)
+ last_item = last_item - 1
+ if node.parents is None:
+ # Ghost
+ continue
+ PyList_Append(result, node.key)
+ # Sorting the parent keys isn't strictly necessary for stable
+ # sorting of a given graph. But it does help minimize the
+ # differences between graphs
+ # For bzr.dev ancestry:
+ # 4.73ms no sort
+ # 7.73ms RichCompareBool sort
+ parents = _sort_list_nodes(node.parents, 1)
+ for pos from 0 <= pos < len(parents):
+ if PyTuple_CheckExact(parents):
+ parent_node = _get_tuple_node(parents, pos)
+ else:
+ parent_node = _get_list_node(parents, pos)
+ # TODO: GraphCycle detection
+ parent_node.seen = parent_node.seen + 1
+ if (parent_node.seen
+ == PyList_GET_SIZE(parent_node.children)):
+ # All children have been processed, queue up this
+ # parent
+ last_item = last_item + 1
+ if last_item < PyList_GET_SIZE(pending):
+ Py_INCREF(parent_node) # SetItem steals a ref
+ PyList_SetItem(pending, last_item, parent_node)
+ else:
+ PyList_Append(pending, parent_node)
+ parent_node.seen = 0
+ return result
+
+ def merge_sort(self, tip_key):
+ """Compute the merge sorted graph output."""
+ cdef _MergeSorter sorter
+
+ # TODO: consider disabling gc since we are allocating a lot of nodes
+ # that won't be collectable anyway. real world testing has not
+ # shown a specific impact, yet.
+ sorter = _MergeSorter(self, tip_key)
+ return sorter.topo_order()
+
+ def get_parent_keys(self, key):
+ """Get the parents for a key
+
+ Returns a list containg the parents keys. If the key is a ghost,
+ None is returned. A KeyError will be raised if the key is not in
+ the graph.
+
+ :param keys: Key to check (eg revision_id)
+ :return: A list of parents
+ """
+ return self._nodes[key].parent_keys
+
+ def get_child_keys(self, key):
+ """Get the children for a key
+
+ Returns a list containg the children keys. A KeyError will be raised
+ if the key is not in the graph.
+
+ :param keys: Key to check (eg revision_id)
+ :return: A list of children
+ """
+ return self._nodes[key].child_keys
+
+
+cdef class _MergeSortNode:
+ """Tracks information about a node during the merge_sort operation."""
+
+ # Public api
+ cdef public object key
+ cdef public long merge_depth
+ cdef public object end_of_merge # True/False Is this the end of the current merge
+
+ # Private api, used while computing the information
+ cdef _KnownGraphNode left_parent
+ cdef _KnownGraphNode left_pending_parent
+ cdef object pending_parents # list of _KnownGraphNode for non-left parents
+ cdef long _revno_first
+ cdef long _revno_second
+ cdef long _revno_last
+ # TODO: turn these into flag/bit fields rather than individual members
+ cdef int is_first_child # Is this the first child?
+ cdef int seen_by_child # A child node has seen this parent
+ cdef int completed # Fully Processed
+
+ def __init__(self, key):
+ self.key = key
+ self.merge_depth = -1
+ self.left_parent = None
+ self.left_pending_parent = None
+ self.pending_parents = None
+ self._revno_first = -1
+ self._revno_second = -1
+ self._revno_last = -1
+ self.is_first_child = 0
+ self.seen_by_child = 0
+ self.completed = 0
+
+ def __repr__(self):
+ return '%s(%s depth:%s rev:%s,%s,%s first:%s seen:%s)' % (
+ self.__class__.__name__, self.key,
+ self.merge_depth,
+ self._revno_first, self._revno_second, self._revno_last,
+ self.is_first_child, self.seen_by_child)
+
+ cdef int has_pending_parents(self): # cannot_raise
+ if self.left_pending_parent is not None or self.pending_parents:
+ return 1
+ return 0
+
+ cdef object _revno(self):
+ if self._revno_first == -1:
+ if self._revno_second != -1:
+ raise RuntimeError('Something wrong with: %s' % (self,))
+ return (self._revno_last,)
+ else:
+ return (self._revno_first, self._revno_second, self._revno_last)
+
+ property revno:
+ def __get__(self):
+ return self._revno()
+
+
+cdef class _MergeSorter:
+ """This class does the work of computing the merge_sort ordering.
+
+ We have some small advantages, in that we get all the extra information
+ that KnownGraph knows, like knowing the child lists, etc.
+ """
+
+ # Current performance numbers for merge_sort(bzr_dev_parent_map):
+ # 302ms tsort.merge_sort()
+ # 91ms graph.KnownGraph().merge_sort()
+ # 40ms kg.merge_sort()
+
+ cdef KnownGraph graph
+ cdef object _depth_first_stack # list
+ cdef Py_ssize_t _last_stack_item # offset to last item on stack
+ # cdef object _ms_nodes # dict of key => _MergeSortNode
+ cdef object _revno_to_branch_count # {revno => num child branches}
+ cdef object _scheduled_nodes # List of nodes ready to be yielded
+
+ def __init__(self, known_graph, tip_key):
+ cdef _KnownGraphNode node
+
+ self.graph = known_graph
+ # self._ms_nodes = {}
+ self._revno_to_branch_count = {}
+ self._depth_first_stack = []
+ self._last_stack_item = -1
+ self._scheduled_nodes = []
+ if (tip_key is not None and tip_key != NULL_REVISION
+ and tip_key != (NULL_REVISION,)):
+ node = self.graph._nodes[tip_key]
+ self._push_node(node, 0)
+
+ cdef _MergeSortNode _get_ms_node(self, _KnownGraphNode node):
+ cdef PyObject *temp_node
+ cdef _MergeSortNode ms_node
+
+ if node.extra is None:
+ ms_node = _MergeSortNode(node.key)
+ node.extra = ms_node
+ else:
+ ms_node = <_MergeSortNode>node.extra
+ return ms_node
+
+ cdef _push_node(self, _KnownGraphNode node, long merge_depth):
+ cdef _KnownGraphNode parent_node
+ cdef _MergeSortNode ms_node, ms_parent_node
+ cdef Py_ssize_t pos
+
+ ms_node = self._get_ms_node(node)
+ ms_node.merge_depth = merge_depth
+ if node.parents is None:
+ raise RuntimeError('ghost nodes should not be pushed'
+ ' onto the stack: %s' % (node,))
+ if PyTuple_GET_SIZE(node.parents) > 0:
+ parent_node = _get_tuple_node(node.parents, 0)
+ ms_node.left_parent = parent_node
+ if parent_node.parents is None: # left-hand ghost
+ ms_node.left_pending_parent = None
+ ms_node.left_parent = None
+ else:
+ ms_node.left_pending_parent = parent_node
+ if PyTuple_GET_SIZE(node.parents) > 1:
+ ms_node.pending_parents = []
+ for pos from 1 <= pos < PyTuple_GET_SIZE(node.parents):
+ parent_node = _get_tuple_node(node.parents, pos)
+ if parent_node.parents is None: # ghost
+ continue
+ PyList_Append(ms_node.pending_parents, parent_node)
+
+ ms_node.is_first_child = 1
+ if ms_node.left_parent is not None:
+ ms_parent_node = self._get_ms_node(ms_node.left_parent)
+ if ms_parent_node.seen_by_child:
+ ms_node.is_first_child = 0
+ ms_parent_node.seen_by_child = 1
+ self._last_stack_item = self._last_stack_item + 1
+ if self._last_stack_item < PyList_GET_SIZE(self._depth_first_stack):
+ Py_INCREF(node) # SetItem steals a ref
+ PyList_SetItem(self._depth_first_stack, self._last_stack_item,
+ node)
+ else:
+ PyList_Append(self._depth_first_stack, node)
+
+ cdef _pop_node(self):
+ cdef PyObject *temp
+ cdef _MergeSortNode ms_node, ms_parent_node, ms_prev_node
+ cdef _KnownGraphNode node, parent_node, prev_node
+
+ node = _get_list_node(self._depth_first_stack, self._last_stack_item)
+ ms_node = <_MergeSortNode>node.extra
+ self._last_stack_item = self._last_stack_item - 1
+ if ms_node.left_parent is not None:
+ # Assign the revision number from the left-hand parent
+ ms_parent_node = <_MergeSortNode>ms_node.left_parent.extra
+ if ms_node.is_first_child:
+ # First child just increments the final digit
+ ms_node._revno_first = ms_parent_node._revno_first
+ ms_node._revno_second = ms_parent_node._revno_second
+ ms_node._revno_last = ms_parent_node._revno_last + 1
+ else:
+ # Not the first child, make a new branch
+ # (mainline_revno, branch_count, 1)
+ if ms_parent_node._revno_first == -1:
+ # Mainline ancestor, the increment is on the last digit
+ base_revno = ms_parent_node._revno_last
+ else:
+ base_revno = ms_parent_node._revno_first
+ temp = PyDict_GetItem(self._revno_to_branch_count,
+ base_revno)
+ if temp == NULL:
+ branch_count = 1
+ else:
+ branch_count = (<object>temp) + 1
+ PyDict_SetItem(self._revno_to_branch_count, base_revno,
+ branch_count)
+ ms_node._revno_first = base_revno
+ ms_node._revno_second = branch_count
+ ms_node._revno_last = 1
+ else:
+ temp = PyDict_GetItem(self._revno_to_branch_count, 0)
+ if temp == NULL:
+ # The first root node doesn't have a 3-digit revno
+ root_count = 0
+ ms_node._revno_first = -1
+ ms_node._revno_second = -1
+ ms_node._revno_last = 1
+ else:
+ root_count = (<object>temp) + 1
+ ms_node._revno_first = 0
+ ms_node._revno_second = root_count
+ ms_node._revno_last = 1
+ PyDict_SetItem(self._revno_to_branch_count, 0, root_count)
+ ms_node.completed = 1
+ if PyList_GET_SIZE(self._scheduled_nodes) == 0:
+ # The first scheduled node is always the end of merge
+ ms_node.end_of_merge = True
+ else:
+ prev_node = _get_list_node(self._scheduled_nodes,
+ PyList_GET_SIZE(self._scheduled_nodes) - 1)
+ ms_prev_node = <_MergeSortNode>prev_node.extra
+ if ms_prev_node.merge_depth < ms_node.merge_depth:
+ # The previously pushed node is to our left, so this is the end
+ # of this right-hand chain
+ ms_node.end_of_merge = True
+ elif (ms_prev_node.merge_depth == ms_node.merge_depth
+ and prev_node not in node.parents):
+ # The next node is not a direct parent of this node
+ ms_node.end_of_merge = True
+ else:
+ ms_node.end_of_merge = False
+ PyList_Append(self._scheduled_nodes, node)
+
+ cdef _schedule_stack(self):
+ cdef _KnownGraphNode last_node, next_node
+ cdef _MergeSortNode ms_node, ms_last_node, ms_next_node
+ cdef long next_merge_depth
+ ordered = []
+ while self._last_stack_item >= 0:
+ # Peek at the last item on the stack
+ last_node = _get_list_node(self._depth_first_stack,
+ self._last_stack_item)
+ if last_node.gdfo == -1:
+ # if _find_gdfo skipped a node, that means there is a graph
+ # cycle, error out now
+ raise errors.GraphCycleError(self.graph._nodes)
+ ms_last_node = <_MergeSortNode>last_node.extra
+ if not ms_last_node.has_pending_parents():
+ # Processed all parents, pop this node
+ self._pop_node()
+ continue
+ while ms_last_node.has_pending_parents():
+ if ms_last_node.left_pending_parent is not None:
+ # recurse depth first into the primary parent
+ next_node = ms_last_node.left_pending_parent
+ ms_last_node.left_pending_parent = None
+ else:
+ # place any merges in right-to-left order for scheduling
+ # which gives us left-to-right order after we reverse
+ # the scheduled queue.
+ # Note: This has the effect of allocating common-new
+ # revisions to the right-most subtree rather than the
+ # left most, which will display nicely (you get
+ # smaller trees at the top of the combined merge).
+ next_node = ms_last_node.pending_parents.pop()
+ ms_next_node = self._get_ms_node(next_node)
+ if ms_next_node.completed:
+ # this parent was completed by a child on the
+ # call stack. skip it.
+ continue
+ # otherwise transfer it from the source graph into the
+ # top of the current depth first search stack.
+
+ if next_node is ms_last_node.left_parent:
+ next_merge_depth = ms_last_node.merge_depth
+ else:
+ next_merge_depth = ms_last_node.merge_depth + 1
+ self._push_node(next_node, next_merge_depth)
+ # and do not continue processing parents until this 'call'
+ # has recursed.
+ break
+
+ cdef topo_order(self):
+ cdef _MergeSortNode ms_node
+ cdef _KnownGraphNode node
+ cdef Py_ssize_t pos
+ cdef PyObject *temp_key, *temp_node
+
+ # Note: allocating a _MergeSortNode and deallocating it for all nodes
+ # costs approx 8.52ms (21%) of the total runtime
+ # We might consider moving the attributes into the base
+ # KnownGraph object.
+ self._schedule_stack()
+
+ # We've set up the basic schedule, now we can continue processing the
+ # output.
+ # Note: This final loop costs us 40.0ms => 28.8ms (11ms, 25%) on
+ # bzr.dev, to convert the internal Object representation into a
+ # Tuple representation...
+ # 2ms is walking the data and computing revno tuples
+ # 7ms is computing the return tuple
+ # 4ms is PyList_Append()
+ ordered = []
+ # output the result in reverse order, and separate the generated info
+ for pos from PyList_GET_SIZE(self._scheduled_nodes) > pos >= 0:
+ node = _get_list_node(self._scheduled_nodes, pos)
+ ms_node = <_MergeSortNode>node.extra
+ PyList_Append(ordered, ms_node)
+ node.extra = None
+ # Clear out the scheduled nodes now that we're done
+ self._scheduled_nodes = []
+ return ordered
diff --git a/bzrlib/_patiencediff_c.c b/bzrlib/_patiencediff_c.c
new file mode 100644
index 0000000..4de410d
--- /dev/null
+++ b/bzrlib/_patiencediff_c.c
@@ -0,0 +1,1281 @@
+/*
+ Copyright (C) 2007, 2010 Canonical Ltd
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+ Function equate_lines based on bdiff.c from Mercurial.
+ Copyright (C) 2005, 2006 Matt Mackall <mpm@selenic.com>
+
+ Functions unique_lcs/recurse_matches based on _patiencediff_py.py.
+ Copyright (C) 2005 Bram Cohen, Copyright (C) 2005, 2006 Canonical Ltd
+*/
+
+
+#include <stdlib.h>
+#include <string.h>
+#include <Python.h>
+
+#include "python-compat.h"
+
+
+#if defined(__GNUC__)
+# define inline __inline__
+#elif defined(_MSC_VER)
+# define inline __inline
+#else
+# define inline
+#endif
+
+
+#define MIN(a, b) (((a) > (b)) ? (b) : (a))
+#define MAX(a, b) (((a) > (b)) ? (a) : (b))
+
+
+#define SENTINEL -1
+
+
+/* malloc returns NULL on some platforms if you try to allocate nothing,
+ * causing <https://bugs.launchpad.net/bzr/+bug/511267> and
+ * <https://bugs.launchpad.net/bzr/+bug/331095>. On glibc it passes, but
+ * let's make it fail to aid testing. */
+#define guarded_malloc(x) ( (x) ? malloc(x) : NULL )
+
+enum {
+ OP_EQUAL = 0,
+ OP_INSERT,
+ OP_DELETE,
+ OP_REPLACE
+};
+
+
+/* values from this array need to correspont to the order of the enum above */
+static char *opcode_names[] = {
+ "equal",
+ "insert",
+ "delete",
+ "replace",
+};
+
+
+struct line {
+ long hash; /* hash code of the string/object */
+ Py_ssize_t next; /* next line from the same equivalence class */
+ Py_ssize_t equiv; /* equivalence class */
+ PyObject *data;
+};
+
+
+struct bucket {
+ Py_ssize_t a_head; /* first item in `a` from this equivalence class */
+ Py_ssize_t a_count;
+ Py_ssize_t b_head; /* first item in `b` from this equivalence class */
+ Py_ssize_t b_count;
+ Py_ssize_t a_pos;
+ Py_ssize_t b_pos;
+};
+
+
+struct hashtable {
+ Py_ssize_t last_a_pos;
+ Py_ssize_t last_b_pos;
+ Py_ssize_t size;
+ struct bucket *table;
+};
+
+struct matching_line {
+ Py_ssize_t a; /* index of the line in `a` */
+ Py_ssize_t b; /* index of the line in `b` */
+};
+
+
+struct matching_block {
+ Py_ssize_t a; /* index of the first line in `a` */
+ Py_ssize_t b; /* index of the first line in `b` */
+ Py_ssize_t len; /* length of the block */
+};
+
+
+struct matching_blocks {
+ struct matching_block *matches;
+ Py_ssize_t count;
+};
+
+
+struct opcode {
+ int tag;
+ Py_ssize_t i1;
+ Py_ssize_t i2;
+ Py_ssize_t j1;
+ Py_ssize_t j2;
+};
+
+
+typedef struct {
+ PyObject_HEAD
+ Py_ssize_t asize;
+ Py_ssize_t bsize;
+ struct line *a;
+ struct line *b;
+ struct hashtable hashtable;
+ Py_ssize_t *backpointers;
+} PatienceSequenceMatcher;
+
+
+static inline Py_ssize_t
+bisect_left(Py_ssize_t *list, Py_ssize_t item, Py_ssize_t lo, Py_ssize_t hi)
+{
+ while (lo < hi) {
+ Py_ssize_t mid = lo / 2 + hi / 2 + (lo % 2 + hi % 2) / 2;
+ if (list[mid] < item)
+ lo = mid + 1;
+ else
+ hi = mid;
+ }
+ return lo;
+}
+
+
+static inline int
+compare_lines(struct line *a, struct line *b)
+{
+ return ((a->hash != b->hash)
+ || PyObject_Compare(a->data, b->data));
+}
+
+
+static inline int
+find_equivalence_class(struct bucket *hashtable, Py_ssize_t hsize,
+ struct line *lines, struct line *ref_lines,
+ Py_ssize_t i)
+{
+ Py_ssize_t j;
+ for (j = lines[i].hash & hsize; hashtable[j].b_head != SENTINEL; j = (j + 1) & hsize) {
+ if (!compare_lines(lines + i, ref_lines + hashtable[j].b_head)) {
+ break;
+ }
+ }
+ return j;
+}
+
+
+static int
+equate_lines(struct hashtable *result,
+ struct line *lines_a, struct line *lines_b,
+ Py_ssize_t asize, Py_ssize_t bsize)
+{
+ Py_ssize_t i, j, hsize;
+ struct bucket *hashtable;
+
+ /* check for overflow, we need the table to be at least bsize+1 */
+ if (bsize == PY_SSIZE_T_MAX) {
+ PyErr_SetNone(PyExc_OverflowError);
+ return 0;
+ }
+
+ /* build a hash table of the next highest power of 2 */
+ hsize = 1;
+ while (hsize < bsize + 1)
+ hsize *= 2;
+
+ /* can't be 0 */
+ hashtable = (struct bucket *) guarded_malloc(sizeof(struct bucket) * hsize);
+ if (hashtable == NULL) {
+ PyErr_NoMemory();
+ return 0;
+ }
+
+ /* initialise the hashtable */
+ for (i = 0; i < hsize; i++) {
+ hashtable[i].a_count = 0;
+ hashtable[i].b_count = 0;
+ hashtable[i].a_head = SENTINEL;
+ hashtable[i].b_head = SENTINEL;
+ }
+ hsize--;
+
+ /* add lines from lines_b to the hash table chains. iterating
+ backwards so the matching lines are sorted to the linked list
+ by the line number (because we are adding new lines to the
+ head of the list) */
+ for (i = bsize - 1; i >= 0; i--) {
+ /* find the first hashtable entry, which is either empty or contains
+ the same line as lines_b[i] */
+ j = find_equivalence_class(hashtable, hsize, lines_b, lines_b, i);
+
+ /* set the equivalence class */
+ lines_b[i].equiv = j;
+
+ /* add to the head of the equivalence class */
+ lines_b[i].next = hashtable[j].b_head;
+ hashtable[j].b_head = i;
+ hashtable[j].b_count++;
+ }
+
+ /* match items from lines_a to their equivalence class in lines_b.
+ again, iterating backwards for the right order of the linked lists */
+ for (i = asize - 1; i >= 0; i--) {
+ /* find the first hash entry, which is either empty or contains
+ the same line as lines_a[i] */
+ j = find_equivalence_class(hashtable, hsize, lines_a, lines_b, i);
+
+ /* set the equivalence class, even if we are not interested in this
+ line, because the values are not pre-filled */
+ lines_a[i].equiv = j;
+
+ /* we are not interested in lines which are not also in lines_b */
+ if (hashtable[j].b_head == SENTINEL)
+ continue;
+
+ /* add to the head of the equivalence class */
+ lines_a[i].next = hashtable[j].a_head;
+ hashtable[j].a_head = i;
+ hashtable[j].a_count++;
+ }
+
+ result->last_a_pos = -1;
+ result->last_b_pos = -1;
+ result->size = hsize + 1;
+ result->table = hashtable;
+
+ return 1;
+}
+
+
+
+/* Finds longest common subsequence of unique lines in a[alo:ahi] and
+ b[blo:bhi].
+ Parameter backpointers must have allocated memory for at least
+ 4 * (bhi - blo) ints. */
+Py_ssize_t
+unique_lcs(struct matching_line *answer,
+ struct hashtable *hashtable, Py_ssize_t *backpointers,
+ struct line *lines_a, struct line *lines_b,
+ Py_ssize_t alo, Py_ssize_t blo, Py_ssize_t ahi, Py_ssize_t bhi)
+{
+ Py_ssize_t i, k, equiv, apos, bpos, norm_apos, norm_bpos, bsize, stacksize;
+ Py_ssize_t *stacks, *lasts, *btoa;
+ struct bucket *h;
+
+ k = 0;
+ stacksize = 0;
+ bsize = bhi - blo;
+ h = hashtable->table;
+
+ /* "unpack" the allocated memory */
+ stacks = backpointers + bsize;
+ lasts = stacks + bsize;
+ btoa = lasts + bsize;
+
+ /* initialise the backpointers */
+ for (i = 0; i < bsize; i++)
+ backpointers[i] = SENTINEL;
+
+ if (hashtable->last_a_pos == -1 || hashtable->last_a_pos > alo)
+ for (i = 0; i < hashtable->size; i++)
+ h[i].a_pos = h[i].a_head;
+ hashtable->last_a_pos = alo;
+
+ if (hashtable->last_b_pos == -1 || hashtable->last_b_pos > blo)
+ for (i = 0; i < hashtable->size; i++)
+ h[i].b_pos = h[i].b_head;
+ hashtable->last_b_pos = blo;
+
+ for (bpos = blo; bpos < bhi; bpos++) {
+ equiv = lines_b[bpos].equiv;
+
+ /* no lines in a or b */
+ if (h[equiv].a_count == 0 || h[equiv].b_count == 0)
+ continue;
+
+ /* find an unique line in lines_a that matches lines_b[bpos]
+ if we find more than one line within the range alo:ahi,
+ jump to the next line from lines_b immediately */
+ apos = SENTINEL;
+ /* loop through all lines in the linked list */
+ for (i = h[equiv].a_pos; i != SENTINEL; i = lines_a[i].next) {
+ /* the index is lower than alo, continue to the next line */
+ if (i < alo) {
+ h[equiv].a_pos = i;
+ continue;
+ }
+ /* the index is higher than ahi, stop searching */
+ if (i >= ahi)
+ break;
+ /* if the line is within our range, check if it's a duplicate */
+ if (apos != SENTINEL)
+ goto nextb;
+ /* save index to the line */
+ apos = i;
+ }
+ /* this line has no equivalent in lines_a[alo:ahi] */
+ if (apos == SENTINEL)
+ goto nextb;
+
+ /* check for duplicates of this line in lines_b[blo:bhi] */
+ /* loop through all lines in the linked list */
+ for (i = h[equiv].b_pos; i != SENTINEL; i = lines_b[i].next) {
+ /* the index is lower than blo, continue to the next line */
+ if (i < blo) {
+ h[equiv].b_pos = i;
+ continue;
+ }
+ /* the index is higher than bhi, stop searching */
+ if (i >= bhi)
+ break;
+ /* if this isn't the line with started with and it's within
+ our range, it's a duplicate */
+ if (i != bpos)
+ goto nextb;
+ }
+
+ /* use normalised indexes ([0,ahi-alo) instead of [alo,ahi))
+ for the patience sorting algorithm */
+ norm_bpos = bpos - blo;
+ norm_apos = apos - alo;
+ btoa[norm_bpos] = norm_apos;
+
+ /*
+ Ok, how does this work...
+
+ We have a list of matching lines from two lists, a and b. These
+ matches are stored in variable `btoa`. As we are iterating over this
+ table by bpos, the lines from b already form an increasing sequence.
+ We need to "sort" also the lines from a using the patience sorting
+ algorithm, ignoring the lines which would need to be swapped.
+
+ http://en.wikipedia.org/wiki/Patience_sorting
+
+ For each pair of lines, we need to place the line from a on either
+ an existing pile that has higher value on the top or create a new
+ pile. Variable `stacks` represents the tops of these piles and in
+ variable `lasts` we store the lines from b, that correspond to the
+ lines from a in `stacks`.
+
+ Whenever we place a new line on top of a pile, we store a
+ backpointer to the line (b) from top of the previous pile. This means
+ that after the loop, variable `backpointers` will contain an index
+ to the previous matching lines that forms an increasing sequence
+ (over both indexes a and b) with the current matching lines. If
+ either index a or b of the previous matching lines would be higher
+ than indexes of the current one or if the indexes of the current
+ one are 0, it will contain SENTINEL.
+
+ To construct the LCS, we will just need to follow these backpointers
+ from the top of the last pile and stop when we reach SENTINEL.
+ */
+
+ /* as an optimization, check if the next line comes at the end,
+ because it usually does */
+ if (stacksize && stacks[stacksize - 1] < norm_apos)
+ k = stacksize;
+ /* as an optimization, check if the next line comes right after
+ the previous line, because usually it does */
+ else if (stacksize && (stacks[k] < norm_apos) &&
+ (k == stacksize - 1 || stacks[k + 1] > norm_apos))
+ k += 1;
+ else
+ k = bisect_left(stacks, norm_apos, 0, stacksize);
+
+ if (k > 0)
+ backpointers[norm_bpos] = lasts[k - 1];
+
+ if (k < stacksize) {
+ stacks[k] = norm_apos;
+ lasts[k] = norm_bpos;
+ }
+ else {
+ stacks[stacksize] = norm_apos;
+ lasts[stacksize] = norm_bpos;
+ stacksize += 1;
+ }
+
+
+nextb:
+ ;
+ }
+
+ if (stacksize == 0)
+ return 0;
+
+ /* backtrace the structures to find the LCS */
+ i = 0;
+ k = lasts[stacksize - 1];
+ while (k != SENTINEL) {
+ answer[i].a = btoa[k];
+ answer[i].b = k;
+ k = backpointers[k];
+ i++;
+ }
+
+ return i;
+}
+
+/* Adds a new line to the list of matching blocks, either extending the
+ current block or adding a new one. */
+static inline void
+add_matching_line(struct matching_blocks *answer, Py_ssize_t a, Py_ssize_t b)
+{
+ Py_ssize_t last_index = answer->count - 1;
+ if ((last_index >= 0) &&
+ (a == answer->matches[last_index].a +
+ answer->matches[last_index].len) &&
+ (b == answer->matches[last_index].b +
+ answer->matches[last_index].len)) {
+ /* enlarge the last block */
+ answer->matches[last_index].len++;
+ }
+ else {
+ /* create a new block */
+ last_index++;
+ answer->matches[last_index].a = a;
+ answer->matches[last_index].b = b;
+ answer->matches[last_index].len = 1;
+ answer->count++;
+ }
+}
+
+
+static int
+recurse_matches(struct matching_blocks *answer, struct hashtable *hashtable,
+ Py_ssize_t *backpointers, struct line *a, struct line *b,
+ Py_ssize_t alo, Py_ssize_t blo, Py_ssize_t ahi, Py_ssize_t bhi,
+ int maxrecursion)
+{
+ int res;
+ Py_ssize_t new, last_a_pos, last_b_pos, lcs_size, nahi, nbhi, i, apos, bpos;
+ struct matching_line *lcs;
+
+ if (maxrecursion < 0)
+ return 1;
+
+ if (alo == ahi || blo == bhi)
+ return 1;
+
+ new = 0;
+ last_a_pos = alo - 1;
+ last_b_pos = blo - 1;
+
+ lcs = (struct matching_line *)guarded_malloc(sizeof(struct matching_line) * (bhi - blo));
+ if (lcs == NULL)
+ return 0;
+
+ lcs_size = unique_lcs(lcs, hashtable, backpointers, a, b, alo, blo, ahi, bhi);
+
+ /* recurse between lines which are unique in each file and match */
+ for (i = lcs_size - 1; i >= 0; i--) {
+ apos = alo + lcs[i].a;
+ bpos = blo + lcs[i].b;
+ if (last_a_pos + 1 != apos || last_b_pos + 1 != bpos) {
+ res = recurse_matches(answer, hashtable,
+ backpointers, a, b,
+ last_a_pos + 1, last_b_pos + 1,
+ apos, bpos, maxrecursion - 1);
+ if (!res)
+ goto error;
+ }
+ last_a_pos = apos;
+ last_b_pos = bpos;
+ add_matching_line(answer, apos, bpos);
+ new = 1;
+ }
+
+ free(lcs);
+ lcs = NULL;
+
+ /* find matches between the last match and the end */
+ if (new > 0) {
+ res = recurse_matches(answer, hashtable,
+ backpointers, a, b,
+ last_a_pos + 1, last_b_pos + 1,
+ ahi, bhi, maxrecursion - 1);
+ if (!res)
+ goto error;
+ }
+
+
+ /* find matching lines at the very beginning */
+ else if (a[alo].equiv == b[blo].equiv) {
+ while (alo < ahi && blo < bhi && a[alo].equiv == b[blo].equiv)
+ add_matching_line(answer, alo++, blo++);
+ res = recurse_matches(answer, hashtable,
+ backpointers, a, b,
+ alo, blo, ahi, bhi, maxrecursion - 1);
+ if (!res)
+ goto error;
+ }
+
+ /* find matching lines at the very end */
+ else if (a[ahi - 1].equiv == b[bhi - 1].equiv) {
+ nahi = ahi - 1;
+ nbhi = bhi - 1;
+ while (nahi > alo && nbhi > blo && a[nahi - 1].equiv == b[nbhi - 1].equiv) {
+ nahi--;
+ nbhi--;
+ }
+ res = recurse_matches(answer, hashtable,
+ backpointers, a, b,
+ last_a_pos + 1, last_b_pos + 1,
+ nahi, nbhi, maxrecursion - 1);
+ if (!res)
+ goto error;
+ for (i = 0; i < ahi - nahi; i++)
+ add_matching_line(answer, nahi + i, nbhi + i);
+ }
+
+ return 1;
+
+error:
+ free(lcs);
+ return 0;
+}
+
+
+static void
+delete_lines(struct line *lines, Py_ssize_t size)
+{
+ struct line *line = lines;
+ while (size-- > 0) {
+ Py_XDECREF(line->data);
+ line++;
+ }
+ free(lines);
+}
+
+
+static Py_ssize_t
+load_lines(PyObject *orig, struct line **lines)
+{
+ Py_ssize_t size, i;
+ struct line *line;
+ PyObject *seq, *item;
+
+ seq = PySequence_Fast(orig, "sequence expected");
+ if (seq == NULL) {
+ return -1;
+ }
+
+ size = PySequence_Fast_GET_SIZE(seq);
+ if (size == 0) {
+ Py_DECREF(seq);
+ return 0;
+ }
+
+ /* Allocate a memory block for line data, initialized to 0 */
+ line = *lines = (struct line *)calloc(size, sizeof(struct line));
+ if (line == NULL) {
+ PyErr_NoMemory();
+ Py_DECREF(seq);
+ return -1;
+ }
+
+ for (i = 0; i < size; i++) {
+ item = PySequence_Fast_GET_ITEM(seq, i);
+ Py_INCREF(item);
+ line->data = item;
+ line->hash = PyObject_Hash(item);
+ if (line->hash == (-1)) {
+ /* Propogate the hash exception */
+ size = -1;
+ goto cleanup;
+ }
+ line->next = SENTINEL;
+ line++;
+ }
+
+ cleanup:
+ Py_DECREF(seq);
+ if (size == -1) {
+ /* Error -- cleanup unused object references */
+ delete_lines(*lines, i);
+ *lines = NULL;
+ }
+ return size;
+}
+
+
+static PyObject *
+py_unique_lcs(PyObject *self, PyObject *args)
+{
+ PyObject *aseq, *bseq, *res, *item;
+ Py_ssize_t asize, bsize, i, nmatches, *backpointers = NULL;
+ struct line *a = NULL, *b = NULL;
+ struct matching_line *matches = NULL;
+ struct hashtable hashtable;
+
+ if (!PyArg_ParseTuple(args, "OO", &aseq, &bseq))
+ return NULL;
+
+ hashtable.table = NULL;
+
+ asize = load_lines(aseq, &a);
+ bsize = load_lines(bseq, &b);
+ if (asize == -1 || bsize == -1)
+ goto error;
+
+ if (!equate_lines(&hashtable, a, b, asize, bsize))
+ goto error;
+
+ if (bsize > 0) {
+ matches = (struct matching_line *)guarded_malloc(sizeof(struct matching_line) * bsize);
+ if (matches == NULL)
+ goto error;
+
+ backpointers = (Py_ssize_t *)guarded_malloc(sizeof(Py_ssize_t) * bsize * 4);
+ if (backpointers == NULL)
+ goto error;
+ }
+
+ nmatches = unique_lcs(matches, &hashtable, backpointers, a, b, 0, 0, asize, bsize);
+
+ res = PyList_New(nmatches);
+ for (i = 0; i < nmatches; i++) {
+#if PY_VERSION_HEX < 0x02050000
+ item = Py_BuildValue("ii", matches[nmatches - i - 1].a, matches[nmatches - i - 1].b);
+#else
+ item = Py_BuildValue("nn", matches[nmatches - i - 1].a, matches[nmatches - i - 1].b);
+#endif
+ if (item == NULL)
+ goto error;
+ if (PyList_SetItem(res, i, item) != 0)
+ goto error;
+ }
+
+ free(backpointers);
+ free(matches);
+ free(hashtable.table);
+ delete_lines(b, bsize);
+ delete_lines(a, asize);
+ return res;
+
+error:
+ free(backpointers);
+ free(matches);
+ free(hashtable.table);
+ delete_lines(b, bsize);
+ delete_lines(a, asize);
+ return NULL;
+}
+
+
+static PyObject *
+py_recurse_matches(PyObject *self, PyObject *args)
+{
+ PyObject *aseq, *bseq, *item, *answer;
+ int maxrecursion, res;
+ Py_ssize_t i, j, asize, bsize, alo, blo, ahi, bhi;
+ Py_ssize_t *backpointers = NULL;
+ struct line *a = NULL, *b = NULL;
+ struct hashtable hashtable;
+ struct matching_blocks matches;
+
+#if PY_VERSION_HEX < 0x02050000
+ if (!PyArg_ParseTuple(args, "OOiiiiOi", &aseq, &bseq, &alo, &blo,
+ &ahi, &bhi, &answer, &maxrecursion))
+#else
+ if (!PyArg_ParseTuple(args, "OOnnnnOi", &aseq, &bseq, &alo, &blo,
+ &ahi, &bhi, &answer, &maxrecursion))
+#endif
+ return NULL;
+
+ hashtable.table = NULL;
+ matches.matches = NULL;
+
+ asize = load_lines(aseq, &a);
+ bsize = load_lines(bseq, &b);
+ if (asize == -1 || bsize == -1)
+ goto error;
+
+ if (!equate_lines(&hashtable, a, b, asize, bsize))
+ goto error;
+
+ matches.count = 0;
+
+ if (bsize > 0) {
+ matches.matches = (struct matching_block *)guarded_malloc(sizeof(struct matching_block) * bsize);
+ if (matches.matches == NULL)
+ goto error;
+
+ backpointers = (Py_ssize_t *)guarded_malloc(sizeof(Py_ssize_t) * bsize * 4);
+ if (backpointers == NULL)
+ goto error;
+ } else {
+ matches.matches = NULL;
+ backpointers = NULL;
+ }
+
+ res = recurse_matches(&matches, &hashtable, backpointers,
+ a, b, alo, blo, ahi, bhi, maxrecursion);
+ if (!res)
+ goto error;
+
+ for (i = 0; i < matches.count; i++) {
+ for (j = 0; j < matches.matches[i].len; j++) {
+#if PY_VERSION_HEX < 0x02050000
+ item = Py_BuildValue("ii", matches.matches[i].a + j,
+ matches.matches[i].b + j);
+#else
+ item = Py_BuildValue("nn", matches.matches[i].a + j,
+ matches.matches[i].b + j);
+#endif
+ if (item == NULL)
+ goto error;
+ if (PyList_Append(answer, item) != 0)
+ goto error;
+ }
+ }
+
+ free(backpointers);
+ free(matches.matches);
+ free(hashtable.table);
+ delete_lines(b, bsize);
+ delete_lines(a, asize);
+ Py_RETURN_NONE;
+
+error:
+ free(backpointers);
+ free(matches.matches);
+ free(hashtable.table);
+ delete_lines(b, bsize);
+ delete_lines(a, asize);
+ return NULL;
+}
+
+
+static PyObject *
+PatienceSequenceMatcher_new(PyTypeObject *type, PyObject *args, PyObject *kwds)
+{
+ PyObject *junk, *a, *b;
+ PatienceSequenceMatcher *self;
+
+ self = (PatienceSequenceMatcher *)type->tp_alloc(type, 0);
+ if (self != NULL) {
+
+ if (!PyArg_ParseTuple(args, "OOO", &junk, &a, &b)) {
+ Py_DECREF(self);
+ return NULL;
+ }
+
+ self->asize = load_lines(a, &(self->a));
+ self->bsize = load_lines(b, &(self->b));
+
+ if (self->asize == -1 || self->bsize == -1) {
+ Py_DECREF(self);
+ return NULL;
+ }
+
+ if (!equate_lines(&self->hashtable, self->a, self->b, self->asize, self->bsize)) {
+ Py_DECREF(self);
+ return NULL;
+ }
+
+ if (self->bsize > 0) {
+ self->backpointers = (Py_ssize_t *)guarded_malloc(sizeof(Py_ssize_t) * self->bsize * 4);
+ if (self->backpointers == NULL) {
+ Py_DECREF(self);
+ PyErr_NoMemory();
+ return NULL;
+ }
+ } else {
+ self->backpointers = NULL;
+ }
+
+ }
+
+ return (PyObject *)self;
+}
+
+
+static void
+PatienceSequenceMatcher_dealloc(PatienceSequenceMatcher* self)
+{
+ free(self->backpointers);
+ free(self->hashtable.table);
+ delete_lines(self->b, self->bsize);
+ delete_lines(self->a, self->asize);
+ self->ob_type->tp_free((PyObject *)self);
+}
+
+
+static char PatienceSequenceMatcher_get_matching_blocks_doc[] =
+ "Return list of triples describing matching subsequences.\n"
+ "\n"
+ "Each triple is of the form (i, j, n), and means that\n"
+ "a[i:i+n] == b[j:j+n]. The triples are monotonically increasing in\n"
+ "i and in j.\n"
+ "\n"
+ "The last triple is a dummy, (len(a), len(b), 0), and is the only\n"
+ "triple with n==0.\n"
+ "\n"
+ ">>> s = PatienceSequenceMatcher(None, \"abxcd\", \"abcd\")\n"
+ ">>> s.get_matching_blocks()\n"
+ "[(0, 0, 2), (3, 2, 2), (5, 4, 0)]\n";
+
+static PyObject *
+PatienceSequenceMatcher_get_matching_blocks(PatienceSequenceMatcher* self)
+{
+ PyObject *answer, *item;
+ int res;
+ Py_ssize_t i;
+ struct matching_blocks matches;
+
+ matches.count = 0;
+ if (self->bsize > 0) {
+ matches.matches = (struct matching_block *)
+ guarded_malloc(sizeof(struct matching_block) * self->bsize);
+ if (matches.matches == NULL)
+ return PyErr_NoMemory();
+ } else
+ matches.matches = NULL;
+
+ res = recurse_matches(&matches, &self->hashtable, self->backpointers,
+ self->a, self->b, 0, 0,
+ self->asize, self->bsize, 10);
+ if (!res) {
+ free(matches.matches);
+ return PyErr_NoMemory();
+ }
+
+ answer = PyList_New(matches.count + 1);
+ if (answer == NULL) {
+ free(matches.matches);
+ return NULL;
+ }
+
+ for (i = 0; i < matches.count; i++) {
+#if PY_VERSION_HEX < 0x02050000
+ item = Py_BuildValue("iii", matches.matches[i].a,
+ matches.matches[i].b, matches.matches[i].len);
+#else
+ item = Py_BuildValue("nnn", matches.matches[i].a,
+ matches.matches[i].b, matches.matches[i].len);
+#endif
+ if (item == NULL)
+ goto error;
+ if (PyList_SetItem(answer, i, item) != 0)
+ goto error;
+ }
+#if PY_VERSION_HEX < 0x02050000
+ item = Py_BuildValue("iii", self->asize, self->bsize, 0);
+#else
+ item = Py_BuildValue("nnn", self->asize, self->bsize, 0);
+#endif
+ if (item == NULL)
+ goto error;
+ if (PyList_SetItem(answer, i, item) != 0)
+ goto error;
+
+ free(matches.matches);
+ return answer;
+
+error:
+ free(matches.matches);
+ Py_DECREF(answer);
+ return NULL;
+}
+
+
+static char PatienceSequenceMatcher_get_opcodes_doc[] =
+ "Return list of 5-tuples describing how to turn a into b.\n"
+ "\n"
+ "Each tuple is of the form (tag, i1, i2, j1, j2). The first tuple\n"
+ "has i1 == j1 == 0, and remaining tuples have i1 == the i2 from the\n"
+ "tuple preceding it, and likewise for j1 == the previous j2.\n"
+ "\n"
+ "The tags are strings, with these meanings:\n"
+ "\n"
+ "'replace': a[i1:i2] should be replaced by b[j1:j2]\n"
+ "'delete': a[i1:i2] should be deleted.\n"
+ " Note that j1==j2 in this case.\n"
+ "'insert': b[j1:j2] should be inserted at a[i1:i1].\n"
+ " Note that i1==i2 in this case.\n"
+ "'equal': a[i1:i2] == b[j1:j2]\n"
+ "\n"
+ ">>> a = \"qabxcd\"\n"
+ ">>> b = \"abycdf\"\n"
+ ">>> s = PatienceSequenceMatcher(None, a, b)\n"
+ ">>> for tag, i1, i2, j1, j2 in s.get_opcodes():\n"
+ "... print (\"%7s a[%d:%d] (%s) b[%d:%d] (%s)\" %\n"
+ "... (tag, i1, i2, a[i1:i2], j1, j2, b[j1:j2]))\n"
+ " delete a[0:1] (q) b[0:0] ()\n"
+ " equal a[1:3] (ab) b[0:2] (ab)\n"
+ "replace a[3:4] (x) b[2:3] (y)\n"
+ " equal a[4:6] (cd) b[3:5] (cd)\n"
+ " insert a[6:6] () b[5:6] (f)\n";
+
+static PyObject *
+PatienceSequenceMatcher_get_opcodes(PatienceSequenceMatcher* self)
+{
+ PyObject *answer, *item;
+ Py_ssize_t i, j, k, ai, bj;
+ int tag, res;
+ struct matching_blocks matches;
+
+ matches.count = 0;
+ matches.matches = (struct matching_block *)guarded_malloc(sizeof(struct matching_block) * (self->bsize + 1));
+ if (matches.matches == NULL)
+ return PyErr_NoMemory();
+
+ res = recurse_matches(&matches, &self->hashtable, self->backpointers,
+ self->a, self->b, 0, 0,
+ self->asize, self->bsize, 10);
+ if (!res) {
+ free(matches.matches);
+ return PyErr_NoMemory();
+ }
+
+ matches.matches[matches.count].a = self->asize;
+ matches.matches[matches.count].b = self->bsize;
+ matches.matches[matches.count].len = 0;
+ matches.count++;
+
+ answer = PyList_New(0);
+ if (answer == NULL) {
+ free(matches.matches);
+ return NULL;
+ }
+
+ i = j = 0;
+ for (k = 0; k < matches.count; k++) {
+ ai = matches.matches[k].a;
+ bj = matches.matches[k].b;
+
+ tag = -1;
+ if (i < ai && j < bj)
+ tag = OP_REPLACE;
+ else if (i < ai)
+ tag = OP_DELETE;
+ else if (j < bj)
+ tag = OP_INSERT;
+
+ if (tag != -1) {
+#if PY_VERSION_HEX < 0x02050000
+ item = Py_BuildValue("siiii", opcode_names[tag], i, ai, j, bj);
+#else
+ item = Py_BuildValue("snnnn", opcode_names[tag], i, ai, j, bj);
+#endif
+ if (item == NULL)
+ goto error;
+ if (PyList_Append(answer, item) != 0)
+ goto error;
+ }
+
+ i = ai + matches.matches[k].len;
+ j = bj + matches.matches[k].len;
+
+ if (matches.matches[k].len > 0) {
+#if PY_VERSION_HEX < 0x02050000
+ item = Py_BuildValue("siiii", opcode_names[OP_EQUAL], ai, i, bj, j);
+#else
+ item = Py_BuildValue("snnnn", opcode_names[OP_EQUAL], ai, i, bj, j);
+#endif
+ if (item == NULL)
+ goto error;
+ if (PyList_Append(answer, item) != 0)
+ goto error;
+ }
+ }
+
+ free(matches.matches);
+ return answer;
+
+error:
+ free(matches.matches);
+ Py_DECREF(answer);
+ return NULL;
+}
+
+
+static char PatienceSequenceMatcher_get_grouped_opcodes_doc[] =
+ "Isolate change clusters by eliminating ranges with no changes.\n"
+ "\n"
+ "Return a list of groups with upto n lines of context.\n"
+ "Each group is in the same format as returned by get_opcodes().\n"
+ "\n"
+ ">>> from pprint import pprint\n"
+ ">>> a = map(str, range(1,40))\n"
+ ">>> b = a[:]\n"
+ ">>> b[8:8] = ['i'] # Make an insertion\n"
+ ">>> b[20] += 'x' # Make a replacement\n"
+ ">>> b[23:28] = [] # Make a deletion\n"
+ ">>> b[30] += 'y' # Make another replacement\n"
+ ">>> pprint(PatienceSequenceMatcher(None,a,b).get_grouped_opcodes())\n"
+ "[[('equal', 5, 8, 5, 8), ('insert', 8, 8, 8, 9), ('equal', 8, 11, 9, 12)],\n"
+ " [('equal', 16, 19, 17, 20),\n"
+ " ('replace', 19, 20, 20, 21),\n"
+ " ('equal', 20, 22, 21, 23),\n"
+ " ('delete', 22, 27, 23, 23),\n"
+ " ('equal', 27, 30, 23, 26)],\n"
+ " [('equal', 31, 34, 27, 30),\n"
+ " ('replace', 34, 35, 30, 31),\n"
+ " ('equal', 35, 38, 31, 34)]]\n";
+
+static PyObject *
+PatienceSequenceMatcher_get_grouped_opcodes(PatienceSequenceMatcher* self,
+ PyObject *args)
+{
+ PyObject *answer, *group, *item;
+ Py_ssize_t i, j, k, ai, bj, size, ncodes, tag;
+ Py_ssize_t i1, i2, j1, j2;
+ int n = 3, nn, res;
+ struct matching_blocks matches;
+ struct opcode *codes;
+
+ if (!PyArg_ParseTuple(args, "|i", &n))
+ return NULL;
+
+ matches.count = 0;
+ matches.matches = (struct matching_block *)guarded_malloc(sizeof(struct matching_block) * (self->bsize + 1));
+ if (matches.matches == NULL)
+ return PyErr_NoMemory();
+
+ res = recurse_matches(&matches, &self->hashtable, self->backpointers,
+ self->a, self->b, 0, 0,
+ self->asize, self->bsize, 10);
+ if (!res) {
+ free(matches.matches);
+ return PyErr_NoMemory();
+ }
+
+ matches.matches[matches.count].a = self->asize;
+ matches.matches[matches.count].b = self->bsize;
+ matches.matches[matches.count].len = 0;
+ matches.count++;
+
+ ncodes = 0;
+ codes = (struct opcode *)guarded_malloc(sizeof(struct opcode) * matches.count * 2);
+ if (codes == NULL) {
+ free(matches.matches);
+ return PyErr_NoMemory();
+ }
+
+ i = j = 0;
+ for (k = 0; k < matches.count; k++) {
+ ai = matches.matches[k].a;
+ bj = matches.matches[k].b;
+
+ tag = -1;
+ if (i < ai && j < bj)
+ tag = OP_REPLACE;
+ else if (i < ai)
+ tag = OP_DELETE;
+ else if (j < bj)
+ tag = OP_INSERT;
+
+ if (tag != -1) {
+ codes[ncodes].tag = tag;
+ codes[ncodes].i1 = i;
+ codes[ncodes].i2 = ai;
+ codes[ncodes].j1 = j;
+ codes[ncodes].j2 = bj;
+ ncodes++;
+ }
+
+ i = ai + matches.matches[k].len;
+ j = bj + matches.matches[k].len;
+
+ if (matches.matches[k].len > 0) {
+ codes[ncodes].tag = OP_EQUAL;
+ codes[ncodes].i1 = ai;
+ codes[ncodes].i2 = i;
+ codes[ncodes].j1 = bj;
+ codes[ncodes].j2 = j;
+ ncodes++;
+ }
+ }
+
+ if (ncodes == 0) {
+ codes[ncodes].tag = OP_EQUAL;
+ codes[ncodes].i1 = 0;
+ codes[ncodes].i2 = 1;
+ codes[ncodes].j1 = 0;
+ codes[ncodes].j2 = 1;
+ ncodes++;
+ }
+
+ /* fixup leading and trailing groups if they show no changes. */
+ if (codes[0].tag == OP_EQUAL) {
+ codes[0].i1 = MAX(codes[0].i1, codes[0].i2 - n);
+ codes[0].j1 = MAX(codes[0].j1, codes[0].j2 - n);
+ }
+ if (codes[ncodes - 1].tag == OP_EQUAL) {
+ codes[ncodes - 1].i2 = MIN(codes[ncodes - 1].i2,
+ codes[ncodes - 1].i1 + n);
+ codes[ncodes - 1].j2 = MIN(codes[ncodes - 1].j2,
+ codes[ncodes - 1].j1 + n);
+ }
+
+ group = NULL;
+
+ answer = PyList_New(0);
+ if (answer == NULL)
+ goto error;
+
+ group = PyList_New(0);
+ if (group == NULL)
+ goto error;
+
+ nn = n + n;
+ tag = -1;
+ for (i = 0; i < ncodes; i++) {
+ tag = codes[i].tag;
+ i1 = codes[i].i1;
+ i2 = codes[i].i2;
+ j1 = codes[i].j1;
+ j2 = codes[i].j2;
+ /* end the current group and start a new one whenever
+ there is a large range with no changes. */
+ if (tag == OP_EQUAL && i2 - i1 > nn) {
+#if PY_VERSION_HEX < 0x02050000
+ item = Py_BuildValue("siiii", opcode_names[tag],
+ i1, MIN(i2, i1 + n), j1, MIN(j2, j1 + n));
+#else
+ item = Py_BuildValue("snnnn", opcode_names[tag],
+ i1, MIN(i2, i1 + n), j1, MIN(j2, j1 + n));
+#endif
+ if (item == NULL)
+ goto error;
+ if (PyList_Append(group, item) != 0)
+ goto error;
+ if (PyList_Append(answer, group) != 0)
+ goto error;
+ group = PyList_New(0);
+ if (group == NULL)
+ goto error;
+ i1 = MAX(i1, i2 - n);
+ j1 = MAX(j1, j2 - n);
+ }
+#if PY_VERSION_HEX < 0x02050000
+ item = Py_BuildValue("siiii", opcode_names[tag], i1, i2, j1 ,j2);
+#else
+ item = Py_BuildValue("snnnn", opcode_names[tag], i1, i2, j1 ,j2);
+#endif
+ if (item == NULL)
+ goto error;
+ if (PyList_Append(group, item) != 0)
+ goto error;
+ }
+ size = PyList_Size(group);
+ if (size > 0 && !(size == 1 && tag == OP_EQUAL)) {
+ if (PyList_Append(answer, group) != 0)
+ goto error;
+ }
+ else
+ Py_DECREF(group);
+
+ free(codes);
+ free(matches.matches);
+ return answer;
+
+error:
+ free(codes);
+ free(matches.matches);
+ Py_DECREF(group);
+ Py_DECREF(answer);
+ return NULL;
+}
+
+
+static PyMethodDef PatienceSequenceMatcher_methods[] = {
+ {"get_matching_blocks",
+ (PyCFunction)PatienceSequenceMatcher_get_matching_blocks,
+ METH_NOARGS,
+ PatienceSequenceMatcher_get_matching_blocks_doc},
+ {"get_opcodes",
+ (PyCFunction)PatienceSequenceMatcher_get_opcodes,
+ METH_NOARGS,
+ PatienceSequenceMatcher_get_opcodes_doc},
+ {"get_grouped_opcodes",
+ (PyCFunction)PatienceSequenceMatcher_get_grouped_opcodes,
+ METH_VARARGS,
+ PatienceSequenceMatcher_get_grouped_opcodes_doc},
+ {NULL}
+};
+
+
+static char PatienceSequenceMatcher_doc[] =
+ "C implementation of PatienceSequenceMatcher";
+
+
+static PyTypeObject PatienceSequenceMatcherType = {
+ PyObject_HEAD_INIT(NULL)
+ 0, /* ob_size */
+ "PatienceSequenceMatcher", /* tp_name */
+ sizeof(PatienceSequenceMatcher), /* tp_basicsize */
+ 0, /* tp_itemsize */
+ (destructor)PatienceSequenceMatcher_dealloc, /* tp_dealloc */
+ 0, /* tp_print */
+ 0, /* tp_getattr */
+ 0, /* tp_setattr */
+ 0, /* tp_compare */
+ 0, /* tp_repr */
+ 0, /* tp_as_number */
+ 0, /* tp_as_sequence */
+ 0, /* tp_as_mapping */
+ 0, /* tp_hash */
+ 0, /* tp_call */
+ 0, /* tp_str */
+ 0, /* tp_getattro */
+ 0, /* tp_setattro */
+ 0, /* tp_as_buffer */
+ Py_TPFLAGS_DEFAULT, /* tp_flags*/
+ PatienceSequenceMatcher_doc, /* tp_doc */
+ 0, /* tp_traverse */
+ 0, /* tp_clear */
+ 0, /* tp_richcompare */
+ 0, /* tp_weaklistoffset */
+ 0, /* tp_iter */
+ 0, /* tp_iternext */
+ PatienceSequenceMatcher_methods, /* tp_methods */
+ 0, /* tp_members */
+ 0, /* tp_getset */
+ 0, /* tp_base */
+ 0, /* tp_dict */
+ 0, /* tp_descr_get */
+ 0, /* tp_descr_set */
+ 0, /* tp_dictoffset */
+ 0, /* tp_init */
+ 0, /* tp_alloc */
+ PatienceSequenceMatcher_new, /* tp_new */
+};
+
+
+static PyMethodDef cpatiencediff_methods[] = {
+ {"unique_lcs_c", py_unique_lcs, METH_VARARGS},
+ {"recurse_matches_c", py_recurse_matches, METH_VARARGS},
+ {NULL, NULL}
+};
+
+
+PyMODINIT_FUNC
+init_patiencediff_c(void)
+{
+ PyObject* m;
+
+ if (PyType_Ready(&PatienceSequenceMatcherType) < 0)
+ return;
+
+ m = Py_InitModule3("_patiencediff_c", cpatiencediff_methods,
+ "C implementation of PatienceSequenceMatcher");
+ if (m == NULL)
+ return;
+
+ Py_INCREF(&PatienceSequenceMatcherType);
+ PyModule_AddObject(m, "PatienceSequenceMatcher_c",
+ (PyObject *)&PatienceSequenceMatcherType);
+}
+
+
+/* vim: sw=4 et
+ */
diff --git a/bzrlib/_patiencediff_py.py b/bzrlib/_patiencediff_py.py
new file mode 100755
index 0000000..7bc9525
--- /dev/null
+++ b/bzrlib/_patiencediff_py.py
@@ -0,0 +1,253 @@
+#!/usr/bin/env python
+# Copyright (C) 2005 Bram Cohen, Copyright (C) 2005, 2006 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+from __future__ import absolute_import
+
+from bisect import bisect
+import difflib
+
+from bzrlib.trace import mutter
+
+
+__all__ = ['PatienceSequenceMatcher', 'unified_diff', 'unified_diff_files']
+
+
+def unique_lcs_py(a, b):
+ """Find the longest common subset for unique lines.
+
+ :param a: An indexable object (such as string or list of strings)
+ :param b: Another indexable object (such as string or list of strings)
+ :return: A list of tuples, one for each line which is matched.
+ [(line_in_a, line_in_b), ...]
+
+ This only matches lines which are unique on both sides.
+ This helps prevent common lines from over influencing match
+ results.
+ The longest common subset uses the Patience Sorting algorithm:
+ http://en.wikipedia.org/wiki/Patience_sorting
+ """
+ # set index[line in a] = position of line in a unless
+ # a is a duplicate, in which case it's set to None
+ index = {}
+ for i in xrange(len(a)):
+ line = a[i]
+ if line in index:
+ index[line] = None
+ else:
+ index[line]= i
+ # make btoa[i] = position of line i in a, unless
+ # that line doesn't occur exactly once in both,
+ # in which case it's set to None
+ btoa = [None] * len(b)
+ index2 = {}
+ for pos, line in enumerate(b):
+ next = index.get(line)
+ if next is not None:
+ if line in index2:
+ # unset the previous mapping, which we now know to
+ # be invalid because the line isn't unique
+ btoa[index2[line]] = None
+ del index[line]
+ else:
+ index2[line] = pos
+ btoa[pos] = next
+ # this is the Patience sorting algorithm
+ # see http://en.wikipedia.org/wiki/Patience_sorting
+ backpointers = [None] * len(b)
+ stacks = []
+ lasts = []
+ k = 0
+ for bpos, apos in enumerate(btoa):
+ if apos is None:
+ continue
+ # as an optimization, check if the next line comes at the end,
+ # because it usually does
+ if stacks and stacks[-1] < apos:
+ k = len(stacks)
+ # as an optimization, check if the next line comes right after
+ # the previous line, because usually it does
+ elif stacks and stacks[k] < apos and (k == len(stacks) - 1 or
+ stacks[k+1] > apos):
+ k += 1
+ else:
+ k = bisect(stacks, apos)
+ if k > 0:
+ backpointers[bpos] = lasts[k-1]
+ if k < len(stacks):
+ stacks[k] = apos
+ lasts[k] = bpos
+ else:
+ stacks.append(apos)
+ lasts.append(bpos)
+ if len(lasts) == 0:
+ return []
+ result = []
+ k = lasts[-1]
+ while k is not None:
+ result.append((btoa[k], k))
+ k = backpointers[k]
+ result.reverse()
+ return result
+
+
+def recurse_matches_py(a, b, alo, blo, ahi, bhi, answer, maxrecursion):
+ """Find all of the matching text in the lines of a and b.
+
+ :param a: A sequence
+ :param b: Another sequence
+ :param alo: The start location of a to check, typically 0
+ :param ahi: The start location of b to check, typically 0
+ :param ahi: The maximum length of a to check, typically len(a)
+ :param bhi: The maximum length of b to check, typically len(b)
+ :param answer: The return array. Will be filled with tuples
+ indicating [(line_in_a, line_in_b)]
+ :param maxrecursion: The maximum depth to recurse.
+ Must be a positive integer.
+ :return: None, the return value is in the parameter answer, which
+ should be a list
+
+ """
+ if maxrecursion < 0:
+ mutter('max recursion depth reached')
+ # this will never happen normally, this check is to prevent DOS attacks
+ return
+ oldlength = len(answer)
+ if alo == ahi or blo == bhi:
+ return
+ last_a_pos = alo-1
+ last_b_pos = blo-1
+ for apos, bpos in unique_lcs_py(a[alo:ahi], b[blo:bhi]):
+ # recurse between lines which are unique in each file and match
+ apos += alo
+ bpos += blo
+ # Most of the time, you will have a sequence of similar entries
+ if last_a_pos+1 != apos or last_b_pos+1 != bpos:
+ recurse_matches_py(a, b, last_a_pos+1, last_b_pos+1,
+ apos, bpos, answer, maxrecursion - 1)
+ last_a_pos = apos
+ last_b_pos = bpos
+ answer.append((apos, bpos))
+ if len(answer) > oldlength:
+ # find matches between the last match and the end
+ recurse_matches_py(a, b, last_a_pos+1, last_b_pos+1,
+ ahi, bhi, answer, maxrecursion - 1)
+ elif a[alo] == b[blo]:
+ # find matching lines at the very beginning
+ while alo < ahi and blo < bhi and a[alo] == b[blo]:
+ answer.append((alo, blo))
+ alo += 1
+ blo += 1
+ recurse_matches_py(a, b, alo, blo,
+ ahi, bhi, answer, maxrecursion - 1)
+ elif a[ahi - 1] == b[bhi - 1]:
+ # find matching lines at the very end
+ nahi = ahi - 1
+ nbhi = bhi - 1
+ while nahi > alo and nbhi > blo and a[nahi - 1] == b[nbhi - 1]:
+ nahi -= 1
+ nbhi -= 1
+ recurse_matches_py(a, b, last_a_pos+1, last_b_pos+1,
+ nahi, nbhi, answer, maxrecursion - 1)
+ for i in xrange(ahi - nahi):
+ answer.append((nahi + i, nbhi + i))
+
+
+def _collapse_sequences(matches):
+ """Find sequences of lines.
+
+ Given a sequence of [(line_in_a, line_in_b),]
+ find regions where they both increment at the same time
+ """
+ answer = []
+ start_a = start_b = None
+ length = 0
+ for i_a, i_b in matches:
+ if (start_a is not None
+ and (i_a == start_a + length)
+ and (i_b == start_b + length)):
+ length += 1
+ else:
+ if start_a is not None:
+ answer.append((start_a, start_b, length))
+ start_a = i_a
+ start_b = i_b
+ length = 1
+
+ if length != 0:
+ answer.append((start_a, start_b, length))
+
+ return answer
+
+
+def _check_consistency(answer):
+ # For consistency sake, make sure all matches are only increasing
+ next_a = -1
+ next_b = -1
+ for (a, b, match_len) in answer:
+ if a < next_a:
+ raise ValueError('Non increasing matches for a')
+ if b < next_b:
+ raise ValueError('Non increasing matches for b')
+ next_a = a + match_len
+ next_b = b + match_len
+
+
+class PatienceSequenceMatcher_py(difflib.SequenceMatcher):
+ """Compare a pair of sequences using longest common subset."""
+
+ _do_check_consistency = True
+
+ def __init__(self, isjunk=None, a='', b=''):
+ if isjunk is not None:
+ raise NotImplementedError('Currently we do not support'
+ ' isjunk for sequence matching')
+ difflib.SequenceMatcher.__init__(self, isjunk, a, b)
+
+ def get_matching_blocks(self):
+ """Return list of triples describing matching subsequences.
+
+ Each triple is of the form (i, j, n), and means that
+ a[i:i+n] == b[j:j+n]. The triples are monotonically increasing in
+ i and in j.
+
+ The last triple is a dummy, (len(a), len(b), 0), and is the only
+ triple with n==0.
+
+ >>> s = PatienceSequenceMatcher(None, "abxcd", "abcd")
+ >>> s.get_matching_blocks()
+ [(0, 0, 2), (3, 2, 2), (5, 4, 0)]
+ """
+ # jam 20060525 This is the python 2.4.1 difflib get_matching_blocks
+ # implementation which uses __helper. 2.4.3 got rid of helper for
+ # doing it inline with a queue.
+ # We should consider doing the same for recurse_matches
+
+ if self.matching_blocks is not None:
+ return self.matching_blocks
+
+ matches = []
+ recurse_matches_py(self.a, self.b, 0, 0,
+ len(self.a), len(self.b), matches, 10)
+ # Matches now has individual line pairs of
+ # line A matches line B, at the given offsets
+ self.matching_blocks = _collapse_sequences(matches)
+ self.matching_blocks.append( (len(self.a), len(self.b), 0) )
+ if PatienceSequenceMatcher_py._do_check_consistency:
+ if __debug__:
+ _check_consistency(self.matching_blocks)
+
+ return self.matching_blocks
diff --git a/bzrlib/_readdir_py.py b/bzrlib/_readdir_py.py
new file mode 100644
index 0000000..870b2b5
--- /dev/null
+++ b/bzrlib/_readdir_py.py
@@ -0,0 +1,52 @@
+# Copyright (C) 2006, 2008 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""Python implementation of readdir interface."""
+
+from __future__ import absolute_import
+
+import stat
+
+
+_directory = 'directory'
+_chardev = 'chardev'
+_block = 'block'
+_file = 'file'
+_fifo = 'fifo'
+_symlink = 'symlink'
+_socket = 'socket'
+_unknown = 'unknown'
+
+_formats = {
+ stat.S_IFDIR:'directory',
+ stat.S_IFCHR:'chardev',
+ stat.S_IFBLK:'block',
+ stat.S_IFREG:'file',
+ stat.S_IFIFO:'fifo',
+ stat.S_IFLNK:'symlink',
+ stat.S_IFSOCK:'socket',
+}
+
+
+def _kind_from_mode(stat_mode, _formats=_formats, _unknown='unknown'):
+ """Generate a file kind from a stat mode. This is used in walkdirs.
+
+ It's performance is critical: Do not mutate without careful benchmarking.
+ """
+ try:
+ return _formats[stat_mode & 0170000]
+ except KeyError:
+ return _unknown
diff --git a/bzrlib/_readdir_pyx.c b/bzrlib/_readdir_pyx.c
new file mode 100644
index 0000000..543e928
--- /dev/null
+++ b/bzrlib/_readdir_pyx.c
@@ -0,0 +1,1752 @@
+/* Generated by Pyrex 0.9.8.5 on Fri Oct 8 14:01:04 2010 */
+
+#define PY_SSIZE_T_CLEAN
+#include "Python.h"
+#include "structmember.h"
+#ifndef PY_LONG_LONG
+ #define PY_LONG_LONG LONG_LONG
+#endif
+#if PY_VERSION_HEX < 0x02050000
+ typedef int Py_ssize_t;
+ #define PY_SSIZE_T_MAX INT_MAX
+ #define PY_SSIZE_T_MIN INT_MIN
+ #define PyInt_FromSsize_t(z) PyInt_FromLong(z)
+ #define PyInt_AsSsize_t(o) PyInt_AsLong(o)
+#endif
+#if !defined(WIN32) && !defined(MS_WINDOWS)
+ #ifndef __stdcall
+ #define __stdcall
+ #endif
+ #ifndef __cdecl
+ #define __cdecl
+ #endif
+#endif
+#ifdef __cplusplus
+#define __PYX_EXTERN_C extern "C"
+#else
+#define __PYX_EXTERN_C extern
+#endif
+#include <math.h>
+#include "python-compat.h"
+#include "errno.h"
+#include "unistd.h"
+#include "stdlib.h"
+#include "sys/types.h"
+#include "sys/stat.h"
+#include "fcntl.h"
+#include "dirent.h"
+#include "readdir.h"
+
+
+typedef struct {PyObject **p; int i; char *s; long n;} __Pyx_StringTabEntry; /*proto*/
+
+static PyObject *__pyx_m;
+static PyObject *__pyx_b;
+static int __pyx_lineno;
+static char *__pyx_filename;
+static char **__pyx_f;
+
+static char __pyx_mdoc[] = "Wrapper for readdir which returns files ordered by inode.";
+
+static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb); /*proto*/
+
+static int __Pyx_InitStrings(__Pyx_StringTabEntry *t); /*proto*/
+
+static int __Pyx_SetVtable(PyObject *dict, void *vtable); /*proto*/
+
+static PyObject *__Pyx_Import(PyObject *name, PyObject *from_list); /*proto*/
+
+static PyObject *__Pyx_GetName(PyObject *dict, PyObject *name); /*proto*/
+
+static void __Pyx_AddTraceback(char *funcname); /*proto*/
+
+/* Declarations from bzrlib._readdir_pyx */
+
+
+/* Declarations from implementation of bzrlib._readdir_pyx */
+
+struct __pyx_obj_6bzrlib_12_readdir_pyx__Stat {
+ PyObject_HEAD
+ struct stat _st;
+};
+
+struct __pyx_obj_6bzrlib_12_readdir_pyx_UTF8DirReader {
+ PyObject_HEAD
+ struct __pyx_vtabstruct_6bzrlib_12_readdir_pyx_UTF8DirReader *__pyx_vtab;
+};
+
+
+
+struct __pyx_vtabstruct_6bzrlib_12_readdir_pyx_UTF8DirReader {
+ PyObject *(*_kind_from_mode)(struct __pyx_obj_6bzrlib_12_readdir_pyx_UTF8DirReader *,int);
+};
+static struct __pyx_vtabstruct_6bzrlib_12_readdir_pyx_UTF8DirReader *__pyx_vtabptr_6bzrlib_12_readdir_pyx_UTF8DirReader;
+
+static PyTypeObject *__pyx_ptype_6bzrlib_12_readdir_pyx__Stat = 0;
+static PyTypeObject *__pyx_ptype_6bzrlib_12_readdir_pyx_UTF8DirReader = 0;
+static PyObject *__pyx_v_6bzrlib_12_readdir_pyx__directory;
+static PyObject *__pyx_v_6bzrlib_12_readdir_pyx__chardev;
+static PyObject *__pyx_v_6bzrlib_12_readdir_pyx__block;
+static PyObject *__pyx_v_6bzrlib_12_readdir_pyx__file;
+static PyObject *__pyx_v_6bzrlib_12_readdir_pyx__fifo;
+static PyObject *__pyx_v_6bzrlib_12_readdir_pyx__symlink;
+static PyObject *__pyx_v_6bzrlib_12_readdir_pyx__socket;
+static PyObject *__pyx_v_6bzrlib_12_readdir_pyx__unknown;
+static PyObject *__pyx_v_6bzrlib_12_readdir_pyx__safe_utf8;
+static PyObject *__pyx_f_6bzrlib_12_readdir_pyx_raise_os_error(int,char *,PyObject *); /*proto*/
+static PyObject *__pyx_f_6bzrlib_12_readdir_pyx__read_dir(PyObject *); /*proto*/
+
+static char __pyx_k1[] = "st_mode";
+static char __pyx_k2[] = "st_size";
+static char __pyx_k3[] = "st_mtime";
+static char __pyx_k4[] = "st_ctime";
+static char __pyx_k5[] = "/";
+static char __pyx_k6[] = "";
+static char __pyx_k7[] = "failed to strcat";
+static char __pyx_k8[] = ".";
+static char __pyx_k9[] = "open: ";
+static char __pyx_k10[] = "chdir: ";
+static char __pyx_k11[] = "opendir: ";
+static char __pyx_k12[] = "readdir: ";
+static char __pyx_k13[] = "lstat: ";
+static char __pyx_k14[] = "closedir: ";
+static char __pyx_k15[] = "return to orig_dir: ";
+static char __pyx_k16[] = "os";
+static char __pyx_k17[] = "sys";
+static char __pyx_k18[] = "directory";
+static char __pyx_k19[] = "chardev";
+static char __pyx_k20[] = "block";
+static char __pyx_k21[] = "file";
+static char __pyx_k22[] = "fifo";
+static char __pyx_k23[] = "symlink";
+static char __pyx_k24[] = "socket";
+static char __pyx_k25[] = "unknown";
+static char __pyx_k26[] = "bzrlib";
+static char __pyx_k27[] = "osutils";
+static char __pyx_k28[] = "safe_utf8";
+
+static PyObject *__pyx_n_block;
+static PyObject *__pyx_n_bzrlib;
+static PyObject *__pyx_n_chardev;
+static PyObject *__pyx_n_directory;
+static PyObject *__pyx_n_fifo;
+static PyObject *__pyx_n_file;
+static PyObject *__pyx_n_os;
+static PyObject *__pyx_n_osutils;
+static PyObject *__pyx_n_safe_utf8;
+static PyObject *__pyx_n_socket;
+static PyObject *__pyx_n_st_ctime;
+static PyObject *__pyx_n_st_mode;
+static PyObject *__pyx_n_st_mtime;
+static PyObject *__pyx_n_st_size;
+static PyObject *__pyx_n_symlink;
+static PyObject *__pyx_n_sys;
+static PyObject *__pyx_n_unknown;
+
+static PyObject *__pyx_k5p;
+static PyObject *__pyx_k6p;
+static PyObject *__pyx_k7p;
+static PyObject *__pyx_k8p;
+
+static __Pyx_StringTabEntry __pyx_string_tab[] = {
+ {&__pyx_n_block, 1, __pyx_k20, sizeof(__pyx_k20)},
+ {&__pyx_n_bzrlib, 1, __pyx_k26, sizeof(__pyx_k26)},
+ {&__pyx_n_chardev, 1, __pyx_k19, sizeof(__pyx_k19)},
+ {&__pyx_n_directory, 1, __pyx_k18, sizeof(__pyx_k18)},
+ {&__pyx_n_fifo, 1, __pyx_k22, sizeof(__pyx_k22)},
+ {&__pyx_n_file, 1, __pyx_k21, sizeof(__pyx_k21)},
+ {&__pyx_n_os, 1, __pyx_k16, sizeof(__pyx_k16)},
+ {&__pyx_n_osutils, 1, __pyx_k27, sizeof(__pyx_k27)},
+ {&__pyx_n_safe_utf8, 1, __pyx_k28, sizeof(__pyx_k28)},
+ {&__pyx_n_socket, 1, __pyx_k24, sizeof(__pyx_k24)},
+ {&__pyx_n_st_ctime, 1, __pyx_k4, sizeof(__pyx_k4)},
+ {&__pyx_n_st_mode, 1, __pyx_k1, sizeof(__pyx_k1)},
+ {&__pyx_n_st_mtime, 1, __pyx_k3, sizeof(__pyx_k3)},
+ {&__pyx_n_st_size, 1, __pyx_k2, sizeof(__pyx_k2)},
+ {&__pyx_n_symlink, 1, __pyx_k23, sizeof(__pyx_k23)},
+ {&__pyx_n_sys, 1, __pyx_k17, sizeof(__pyx_k17)},
+ {&__pyx_n_unknown, 1, __pyx_k25, sizeof(__pyx_k25)},
+ {&__pyx_k5p, 0, __pyx_k5, sizeof(__pyx_k5)},
+ {&__pyx_k6p, 0, __pyx_k6, sizeof(__pyx_k6)},
+ {&__pyx_k7p, 0, __pyx_k7, sizeof(__pyx_k7)},
+ {&__pyx_k8p, 0, __pyx_k8, sizeof(__pyx_k8)},
+ {0, 0, 0, 0}
+};
+
+static PyObject *__pyx_d1;
+
+
+/* Implementation of bzrlib._readdir_pyx */
+
+static PyObject *__pyx_f_6bzrlib_12_readdir_pyx_5_Stat_6st_dev___get__(PyObject *__pyx_v_self); /*proto*/
+static PyObject *__pyx_f_6bzrlib_12_readdir_pyx_5_Stat_6st_dev___get__(PyObject *__pyx_v_self) {
+ PyObject *__pyx_r;
+ PyObject *__pyx_1 = 0;
+ Py_INCREF(__pyx_v_self);
+ __pyx_1 = PyInt_FromLong(((struct __pyx_obj_6bzrlib_12_readdir_pyx__Stat *)__pyx_v_self)->_st.st_dev); if (!__pyx_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 138; goto __pyx_L1;}
+ __pyx_r = __pyx_1;
+ __pyx_1 = 0;
+ goto __pyx_L0;
+
+ __pyx_r = Py_None; Py_INCREF(Py_None);
+ goto __pyx_L0;
+ __pyx_L1:;
+ Py_XDECREF(__pyx_1);
+ __Pyx_AddTraceback("bzrlib._readdir_pyx._Stat.st_dev.__get__");
+ __pyx_r = 0;
+ __pyx_L0:;
+ Py_DECREF(__pyx_v_self);
+ return __pyx_r;
+}
+
+static PyObject *__pyx_f_6bzrlib_12_readdir_pyx_5_Stat_6st_ino___get__(PyObject *__pyx_v_self); /*proto*/
+static PyObject *__pyx_f_6bzrlib_12_readdir_pyx_5_Stat_6st_ino___get__(PyObject *__pyx_v_self) {
+ PyObject *__pyx_r;
+ PyObject *__pyx_1 = 0;
+ Py_INCREF(__pyx_v_self);
+ __pyx_1 = PyLong_FromUnsignedLong(((struct __pyx_obj_6bzrlib_12_readdir_pyx__Stat *)__pyx_v_self)->_st.st_ino); if (!__pyx_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 142; goto __pyx_L1;}
+ __pyx_r = __pyx_1;
+ __pyx_1 = 0;
+ goto __pyx_L0;
+
+ __pyx_r = Py_None; Py_INCREF(Py_None);
+ goto __pyx_L0;
+ __pyx_L1:;
+ Py_XDECREF(__pyx_1);
+ __Pyx_AddTraceback("bzrlib._readdir_pyx._Stat.st_ino.__get__");
+ __pyx_r = 0;
+ __pyx_L0:;
+ Py_DECREF(__pyx_v_self);
+ return __pyx_r;
+}
+
+static PyObject *__pyx_f_6bzrlib_12_readdir_pyx_5_Stat_7st_mode___get__(PyObject *__pyx_v_self); /*proto*/
+static PyObject *__pyx_f_6bzrlib_12_readdir_pyx_5_Stat_7st_mode___get__(PyObject *__pyx_v_self) {
+ PyObject *__pyx_r;
+ PyObject *__pyx_1 = 0;
+ Py_INCREF(__pyx_v_self);
+ __pyx_1 = PyInt_FromLong(((struct __pyx_obj_6bzrlib_12_readdir_pyx__Stat *)__pyx_v_self)->_st.st_mode); if (!__pyx_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 146; goto __pyx_L1;}
+ __pyx_r = __pyx_1;
+ __pyx_1 = 0;
+ goto __pyx_L0;
+
+ __pyx_r = Py_None; Py_INCREF(Py_None);
+ goto __pyx_L0;
+ __pyx_L1:;
+ Py_XDECREF(__pyx_1);
+ __Pyx_AddTraceback("bzrlib._readdir_pyx._Stat.st_mode.__get__");
+ __pyx_r = 0;
+ __pyx_L0:;
+ Py_DECREF(__pyx_v_self);
+ return __pyx_r;
+}
+
+static PyObject *__pyx_f_6bzrlib_12_readdir_pyx_5_Stat_8st_ctime___get__(PyObject *__pyx_v_self); /*proto*/
+static PyObject *__pyx_f_6bzrlib_12_readdir_pyx_5_Stat_8st_ctime___get__(PyObject *__pyx_v_self) {
+ PyObject *__pyx_r;
+ PyObject *__pyx_1 = 0;
+ Py_INCREF(__pyx_v_self);
+ __pyx_1 = PyInt_FromLong(((struct __pyx_obj_6bzrlib_12_readdir_pyx__Stat *)__pyx_v_self)->_st.st_ctime); if (!__pyx_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 150; goto __pyx_L1;}
+ __pyx_r = __pyx_1;
+ __pyx_1 = 0;
+ goto __pyx_L0;
+
+ __pyx_r = Py_None; Py_INCREF(Py_None);
+ goto __pyx_L0;
+ __pyx_L1:;
+ Py_XDECREF(__pyx_1);
+ __Pyx_AddTraceback("bzrlib._readdir_pyx._Stat.st_ctime.__get__");
+ __pyx_r = 0;
+ __pyx_L0:;
+ Py_DECREF(__pyx_v_self);
+ return __pyx_r;
+}
+
+static PyObject *__pyx_f_6bzrlib_12_readdir_pyx_5_Stat_8st_mtime___get__(PyObject *__pyx_v_self); /*proto*/
+static PyObject *__pyx_f_6bzrlib_12_readdir_pyx_5_Stat_8st_mtime___get__(PyObject *__pyx_v_self) {
+ PyObject *__pyx_r;
+ PyObject *__pyx_1 = 0;
+ Py_INCREF(__pyx_v_self);
+ __pyx_1 = PyInt_FromLong(((struct __pyx_obj_6bzrlib_12_readdir_pyx__Stat *)__pyx_v_self)->_st.st_mtime); if (!__pyx_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 154; goto __pyx_L1;}
+ __pyx_r = __pyx_1;
+ __pyx_1 = 0;
+ goto __pyx_L0;
+
+ __pyx_r = Py_None; Py_INCREF(Py_None);
+ goto __pyx_L0;
+ __pyx_L1:;
+ Py_XDECREF(__pyx_1);
+ __Pyx_AddTraceback("bzrlib._readdir_pyx._Stat.st_mtime.__get__");
+ __pyx_r = 0;
+ __pyx_L0:;
+ Py_DECREF(__pyx_v_self);
+ return __pyx_r;
+}
+
+static PyObject *__pyx_f_6bzrlib_12_readdir_pyx_5_Stat_7st_size___get__(PyObject *__pyx_v_self); /*proto*/
+static PyObject *__pyx_f_6bzrlib_12_readdir_pyx_5_Stat_7st_size___get__(PyObject *__pyx_v_self) {
+ PyObject *__pyx_r;
+ PyObject *__pyx_1 = 0;
+ Py_INCREF(__pyx_v_self);
+ __pyx_1 = PyLong_FromUnsignedLongLong(((struct __pyx_obj_6bzrlib_12_readdir_pyx__Stat *)__pyx_v_self)->_st.st_size); if (!__pyx_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 158; goto __pyx_L1;}
+ __pyx_r = __pyx_1;
+ __pyx_1 = 0;
+ goto __pyx_L0;
+
+ __pyx_r = Py_None; Py_INCREF(Py_None);
+ goto __pyx_L0;
+ __pyx_L1:;
+ Py_XDECREF(__pyx_1);
+ __Pyx_AddTraceback("bzrlib._readdir_pyx._Stat.st_size.__get__");
+ __pyx_r = 0;
+ __pyx_L0:;
+ Py_DECREF(__pyx_v_self);
+ return __pyx_r;
+}
+
+static PyObject *__pyx_f_6bzrlib_12_readdir_pyx_5_Stat___repr__(PyObject *__pyx_v_self); /*proto*/
+static PyObject *__pyx_f_6bzrlib_12_readdir_pyx_5_Stat___repr__(PyObject *__pyx_v_self) {
+ PyObject *__pyx_r;
+ PyObject *__pyx_1 = 0;
+ PyObject *__pyx_2 = 0;
+ PyObject *__pyx_3 = 0;
+ PyObject *__pyx_4 = 0;
+ PyObject *__pyx_5 = 0;
+ PyObject *__pyx_6 = 0;
+ PyObject *__pyx_7 = 0;
+ PyObject *__pyx_8 = 0;
+ PyObject *__pyx_9 = 0;
+ PyObject *__pyx_10 = 0;
+ Py_INCREF(__pyx_v_self);
+ __pyx_1 = PyObject_GetAttr(__pyx_v_self, __pyx_n_st_mode); if (!__pyx_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 165; goto __pyx_L1;}
+ __pyx_2 = PyInt_FromLong(0); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 165; goto __pyx_L1;}
+ __pyx_3 = PyInt_FromLong(0); if (!__pyx_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 165; goto __pyx_L1;}
+ __pyx_4 = PyInt_FromLong(0); if (!__pyx_4) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 165; goto __pyx_L1;}
+ __pyx_5 = PyInt_FromLong(0); if (!__pyx_5) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 165; goto __pyx_L1;}
+ __pyx_6 = PyInt_FromLong(0); if (!__pyx_6) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 165; goto __pyx_L1;}
+ __pyx_7 = PyObject_GetAttr(__pyx_v_self, __pyx_n_st_size); if (!__pyx_7) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 165; goto __pyx_L1;}
+ __pyx_8 = PyObject_GetAttr(__pyx_v_self, __pyx_n_st_mtime); if (!__pyx_8) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 166; goto __pyx_L1;}
+ __pyx_9 = PyObject_GetAttr(__pyx_v_self, __pyx_n_st_ctime); if (!__pyx_9) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 166; goto __pyx_L1;}
+ __pyx_10 = PyTuple_New(10); if (!__pyx_10) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 165; goto __pyx_L1;}
+ PyTuple_SET_ITEM(__pyx_10, 0, __pyx_1);
+ PyTuple_SET_ITEM(__pyx_10, 1, __pyx_2);
+ PyTuple_SET_ITEM(__pyx_10, 2, __pyx_3);
+ PyTuple_SET_ITEM(__pyx_10, 3, __pyx_4);
+ PyTuple_SET_ITEM(__pyx_10, 4, __pyx_5);
+ PyTuple_SET_ITEM(__pyx_10, 5, __pyx_6);
+ PyTuple_SET_ITEM(__pyx_10, 6, __pyx_7);
+ Py_INCREF(Py_None);
+ PyTuple_SET_ITEM(__pyx_10, 7, Py_None);
+ PyTuple_SET_ITEM(__pyx_10, 8, __pyx_8);
+ PyTuple_SET_ITEM(__pyx_10, 9, __pyx_9);
+ __pyx_1 = 0;
+ __pyx_2 = 0;
+ __pyx_3 = 0;
+ __pyx_4 = 0;
+ __pyx_5 = 0;
+ __pyx_6 = 0;
+ __pyx_7 = 0;
+ __pyx_8 = 0;
+ __pyx_9 = 0;
+ __pyx_1 = PyObject_Repr(__pyx_10); if (!__pyx_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 165; goto __pyx_L1;}
+ Py_DECREF(__pyx_10); __pyx_10 = 0;
+ __pyx_r = __pyx_1;
+ __pyx_1 = 0;
+ goto __pyx_L0;
+
+ __pyx_r = Py_None; Py_INCREF(Py_None);
+ goto __pyx_L0;
+ __pyx_L1:;
+ Py_XDECREF(__pyx_1);
+ Py_XDECREF(__pyx_2);
+ Py_XDECREF(__pyx_3);
+ Py_XDECREF(__pyx_4);
+ Py_XDECREF(__pyx_5);
+ Py_XDECREF(__pyx_6);
+ Py_XDECREF(__pyx_7);
+ Py_XDECREF(__pyx_8);
+ Py_XDECREF(__pyx_9);
+ Py_XDECREF(__pyx_10);
+ __Pyx_AddTraceback("bzrlib._readdir_pyx._Stat.__repr__");
+ __pyx_r = 0;
+ __pyx_L0:;
+ Py_DECREF(__pyx_v_self);
+ return __pyx_r;
+}
+
+static PyObject *__pyx_f_6bzrlib_12_readdir_pyx_13UTF8DirReader_kind_from_mode(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
+static char __pyx_doc_6bzrlib_12_readdir_pyx_13UTF8DirReader_kind_from_mode[] = "Get the kind of a path from a mode status.";
+static PyObject *__pyx_f_6bzrlib_12_readdir_pyx_13UTF8DirReader_kind_from_mode(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
+ int __pyx_v_mode;
+ PyObject *__pyx_r;
+ PyObject *__pyx_1 = 0;
+ static char *__pyx_argnames[] = {"mode",0};
+ if (!PyArg_ParseTupleAndKeywords(__pyx_args, __pyx_kwds, "i", __pyx_argnames, &__pyx_v_mode)) return 0;
+ Py_INCREF(__pyx_v_self);
+ __pyx_1 = ((struct __pyx_vtabstruct_6bzrlib_12_readdir_pyx_UTF8DirReader *)((struct __pyx_obj_6bzrlib_12_readdir_pyx_UTF8DirReader *)__pyx_v_self)->__pyx_vtab)->_kind_from_mode(((struct __pyx_obj_6bzrlib_12_readdir_pyx_UTF8DirReader *)__pyx_v_self),__pyx_v_mode); if (!__pyx_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 179; goto __pyx_L1;}
+ __pyx_r = __pyx_1;
+ __pyx_1 = 0;
+ goto __pyx_L0;
+
+ __pyx_r = Py_None; Py_INCREF(Py_None);
+ goto __pyx_L0;
+ __pyx_L1:;
+ Py_XDECREF(__pyx_1);
+ __Pyx_AddTraceback("bzrlib._readdir_pyx.UTF8DirReader.kind_from_mode");
+ __pyx_r = 0;
+ __pyx_L0:;
+ Py_DECREF(__pyx_v_self);
+ return __pyx_r;
+}
+
+static PyObject *__pyx_f_6bzrlib_12_readdir_pyx_13UTF8DirReader__kind_from_mode(struct __pyx_obj_6bzrlib_12_readdir_pyx_UTF8DirReader *__pyx_v_self,int __pyx_v_mode) {
+ PyObject *__pyx_r;
+ int __pyx_1;
+ Py_INCREF(__pyx_v_self);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_readdir_pyx.pyx":183 */
+ __pyx_1 = S_ISREG(__pyx_v_mode);
+ if (__pyx_1) {
+ Py_INCREF(__pyx_v_6bzrlib_12_readdir_pyx__file);
+ __pyx_r = __pyx_v_6bzrlib_12_readdir_pyx__file;
+ goto __pyx_L0;
+ goto __pyx_L2;
+ }
+ __pyx_L2:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_readdir_pyx.pyx":185 */
+ __pyx_1 = S_ISDIR(__pyx_v_mode);
+ if (__pyx_1) {
+ Py_INCREF(__pyx_v_6bzrlib_12_readdir_pyx__directory);
+ __pyx_r = __pyx_v_6bzrlib_12_readdir_pyx__directory;
+ goto __pyx_L0;
+ goto __pyx_L3;
+ }
+ __pyx_L3:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_readdir_pyx.pyx":187 */
+ __pyx_1 = S_ISCHR(__pyx_v_mode);
+ if (__pyx_1) {
+ Py_INCREF(__pyx_v_6bzrlib_12_readdir_pyx__chardev);
+ __pyx_r = __pyx_v_6bzrlib_12_readdir_pyx__chardev;
+ goto __pyx_L0;
+ goto __pyx_L4;
+ }
+ __pyx_L4:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_readdir_pyx.pyx":189 */
+ __pyx_1 = S_ISBLK(__pyx_v_mode);
+ if (__pyx_1) {
+ Py_INCREF(__pyx_v_6bzrlib_12_readdir_pyx__block);
+ __pyx_r = __pyx_v_6bzrlib_12_readdir_pyx__block;
+ goto __pyx_L0;
+ goto __pyx_L5;
+ }
+ __pyx_L5:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_readdir_pyx.pyx":191 */
+ __pyx_1 = S_ISLNK(__pyx_v_mode);
+ if (__pyx_1) {
+ Py_INCREF(__pyx_v_6bzrlib_12_readdir_pyx__symlink);
+ __pyx_r = __pyx_v_6bzrlib_12_readdir_pyx__symlink;
+ goto __pyx_L0;
+ goto __pyx_L6;
+ }
+ __pyx_L6:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_readdir_pyx.pyx":193 */
+ __pyx_1 = S_ISFIFO(__pyx_v_mode);
+ if (__pyx_1) {
+ Py_INCREF(__pyx_v_6bzrlib_12_readdir_pyx__fifo);
+ __pyx_r = __pyx_v_6bzrlib_12_readdir_pyx__fifo;
+ goto __pyx_L0;
+ goto __pyx_L7;
+ }
+ __pyx_L7:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_readdir_pyx.pyx":195 */
+ __pyx_1 = S_ISSOCK(__pyx_v_mode);
+ if (__pyx_1) {
+ Py_INCREF(__pyx_v_6bzrlib_12_readdir_pyx__socket);
+ __pyx_r = __pyx_v_6bzrlib_12_readdir_pyx__socket;
+ goto __pyx_L0;
+ goto __pyx_L8;
+ }
+ __pyx_L8:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_readdir_pyx.pyx":197 */
+ Py_INCREF(__pyx_v_6bzrlib_12_readdir_pyx__unknown);
+ __pyx_r = __pyx_v_6bzrlib_12_readdir_pyx__unknown;
+ goto __pyx_L0;
+
+ __pyx_r = Py_None; Py_INCREF(Py_None);
+ __pyx_L0:;
+ Py_DECREF(__pyx_v_self);
+ return __pyx_r;
+}
+
+static PyObject *__pyx_f_6bzrlib_12_readdir_pyx_13UTF8DirReader_top_prefix_to_starting_dir(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
+static char __pyx_doc_6bzrlib_12_readdir_pyx_13UTF8DirReader_top_prefix_to_starting_dir[] = "See DirReader.top_prefix_to_starting_dir.";
+static PyObject *__pyx_f_6bzrlib_12_readdir_pyx_13UTF8DirReader_top_prefix_to_starting_dir(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
+ PyObject *__pyx_v_top = 0;
+ PyObject *__pyx_v_prefix = 0;
+ PyObject *__pyx_r;
+ PyObject *__pyx_1 = 0;
+ PyObject *__pyx_2 = 0;
+ PyObject *__pyx_3 = 0;
+ static char *__pyx_argnames[] = {"top","prefix",0};
+ __pyx_v_prefix = __pyx_d1;
+ if (!PyArg_ParseTupleAndKeywords(__pyx_args, __pyx_kwds, "O|O", __pyx_argnames, &__pyx_v_top, &__pyx_v_prefix)) return 0;
+ Py_INCREF(__pyx_v_self);
+ Py_INCREF(__pyx_v_top);
+ Py_INCREF(__pyx_v_prefix);
+ __pyx_1 = PyTuple_New(1); if (!__pyx_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 201; goto __pyx_L1;}
+ Py_INCREF(__pyx_v_prefix);
+ PyTuple_SET_ITEM(__pyx_1, 0, __pyx_v_prefix);
+ __pyx_2 = PyObject_CallObject(__pyx_v_6bzrlib_12_readdir_pyx__safe_utf8, __pyx_1); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 201; goto __pyx_L1;}
+ Py_DECREF(__pyx_1); __pyx_1 = 0;
+ __pyx_1 = PyTuple_New(1); if (!__pyx_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 201; goto __pyx_L1;}
+ Py_INCREF(__pyx_v_top);
+ PyTuple_SET_ITEM(__pyx_1, 0, __pyx_v_top);
+ __pyx_3 = PyObject_CallObject(__pyx_v_6bzrlib_12_readdir_pyx__safe_utf8, __pyx_1); if (!__pyx_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 201; goto __pyx_L1;}
+ Py_DECREF(__pyx_1); __pyx_1 = 0;
+ __pyx_1 = PyTuple_New(5); if (!__pyx_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 201; goto __pyx_L1;}
+ PyTuple_SET_ITEM(__pyx_1, 0, __pyx_2);
+ Py_INCREF(Py_None);
+ PyTuple_SET_ITEM(__pyx_1, 1, Py_None);
+ Py_INCREF(Py_None);
+ PyTuple_SET_ITEM(__pyx_1, 2, Py_None);
+ Py_INCREF(Py_None);
+ PyTuple_SET_ITEM(__pyx_1, 3, Py_None);
+ PyTuple_SET_ITEM(__pyx_1, 4, __pyx_3);
+ __pyx_2 = 0;
+ __pyx_3 = 0;
+ __pyx_r = __pyx_1;
+ __pyx_1 = 0;
+ goto __pyx_L0;
+
+ __pyx_r = Py_None; Py_INCREF(Py_None);
+ goto __pyx_L0;
+ __pyx_L1:;
+ Py_XDECREF(__pyx_1);
+ Py_XDECREF(__pyx_2);
+ Py_XDECREF(__pyx_3);
+ __Pyx_AddTraceback("bzrlib._readdir_pyx.UTF8DirReader.top_prefix_to_starting_dir");
+ __pyx_r = 0;
+ __pyx_L0:;
+ Py_DECREF(__pyx_v_self);
+ Py_DECREF(__pyx_v_top);
+ Py_DECREF(__pyx_v_prefix);
+ return __pyx_r;
+}
+
+static PyObject *__pyx_f_6bzrlib_12_readdir_pyx_13UTF8DirReader_read_dir(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
+static char __pyx_doc_6bzrlib_12_readdir_pyx_13UTF8DirReader_read_dir[] = "Read a single directory from a utf8 file system.\n\n All paths in and out are utf8.\n\n This sub-function is called when we know the filesystem is already in utf8\n encoding. So we don\'t need to transcode filenames.\n\n See DirReader.read_dir for details.\n ";
+static PyObject *__pyx_f_6bzrlib_12_readdir_pyx_13UTF8DirReader_read_dir(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
+ PyObject *__pyx_v_prefix = 0;
+ PyObject *__pyx_v_top = 0;
+ PyObject *__pyx_v_newval;
+ int __pyx_v_index;
+ int __pyx_v_length;
+ void *__pyx_v_atuple;
+ PyObject *__pyx_v_name;
+ PyObject *__pyx_v_new_val_obj;
+ PyObject *__pyx_v_relprefix;
+ PyObject *__pyx_v_top_slash;
+ PyObject *__pyx_v_result;
+ PyObject *__pyx_r;
+ Py_ssize_t __pyx_1;
+ PyObject *__pyx_2 = 0;
+ int __pyx_3;
+ PyObject *__pyx_4 = 0;
+ static char *__pyx_argnames[] = {"prefix","top",0};
+ if (!PyArg_ParseTupleAndKeywords(__pyx_args, __pyx_kwds, "OO", __pyx_argnames, &__pyx_v_prefix, &__pyx_v_top)) return 0;
+ Py_INCREF(__pyx_v_self);
+ Py_INCREF(__pyx_v_prefix);
+ Py_INCREF(__pyx_v_top);
+ __pyx_v_newval = Py_None; Py_INCREF(Py_None);
+ __pyx_v_name = Py_None; Py_INCREF(Py_None);
+ __pyx_v_relprefix = Py_None; Py_INCREF(Py_None);
+ __pyx_v_top_slash = Py_None; Py_INCREF(Py_None);
+ __pyx_v_result = Py_None; Py_INCREF(Py_None);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_readdir_pyx.pyx":223 */
+ __pyx_1 = PyString_Size(__pyx_v_prefix);
+ if (__pyx_1) {
+ __pyx_2 = PyNumber_Add(__pyx_v_prefix, __pyx_k5p); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 224; goto __pyx_L1;}
+ Py_DECREF(__pyx_v_relprefix);
+ __pyx_v_relprefix = __pyx_2;
+ __pyx_2 = 0;
+ goto __pyx_L2;
+ }
+ /*else*/ {
+ Py_INCREF(__pyx_k6p);
+ Py_DECREF(__pyx_v_relprefix);
+ __pyx_v_relprefix = __pyx_k6p;
+ }
+ __pyx_L2:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_readdir_pyx.pyx":227 */
+ __pyx_2 = PyNumber_Add(__pyx_v_top, __pyx_k5p); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 227; goto __pyx_L1;}
+ Py_DECREF(__pyx_v_top_slash);
+ __pyx_v_top_slash = __pyx_2;
+ __pyx_2 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_readdir_pyx.pyx":231 */
+ __pyx_2 = __pyx_f_6bzrlib_12_readdir_pyx__read_dir(__pyx_v_top); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 231; goto __pyx_L1;}
+ Py_DECREF(__pyx_v_result);
+ __pyx_v_result = __pyx_2;
+ __pyx_2 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_readdir_pyx.pyx":232 */
+ __pyx_1 = PyObject_Length(__pyx_v_result); if (__pyx_1 == -1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 232; goto __pyx_L1;}
+ __pyx_v_length = __pyx_1;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_readdir_pyx.pyx":234 */
+ for (__pyx_v_index = 0; __pyx_v_index < __pyx_v_length; ++__pyx_v_index) {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_readdir_pyx.pyx":235 */
+ __pyx_v_atuple = PyList_GET_ITEM(__pyx_v_result,__pyx_v_index);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_readdir_pyx.pyx":236 */
+ Py_INCREF(((PyObject *)PyTuple_GET_ITEM(__pyx_v_atuple,1)));
+ Py_DECREF(__pyx_v_name);
+ __pyx_v_name = ((PyObject *)PyTuple_GET_ITEM(__pyx_v_atuple,1));
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_readdir_pyx.pyx":241 */
+ __pyx_v_new_val_obj = ((PyObject *)__pyx_v_relprefix);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_readdir_pyx.pyx":242 */
+ Py_INCREF(__pyx_v_relprefix);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_readdir_pyx.pyx":243 */
+ PyString_Concat((&__pyx_v_new_val_obj),__pyx_v_name);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_readdir_pyx.pyx":244 */
+ __pyx_3 = (NULL == __pyx_v_new_val_obj);
+ if (__pyx_3) {
+ __pyx_2 = PyTuple_New(1); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 247; goto __pyx_L1;}
+ Py_INCREF(__pyx_k7p);
+ PyTuple_SET_ITEM(__pyx_2, 0, __pyx_k7p);
+ __pyx_4 = PyObject_CallObject(PyExc_Exception, __pyx_2); if (!__pyx_4) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 247; goto __pyx_L1;}
+ Py_DECREF(__pyx_2); __pyx_2 = 0;
+ __Pyx_Raise(__pyx_4, 0, 0);
+ Py_DECREF(__pyx_4); __pyx_4 = 0;
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 247; goto __pyx_L1;}
+ goto __pyx_L5;
+ }
+ __pyx_L5:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_readdir_pyx.pyx":248 */
+ __pyx_3 = PyTuple_SetItem(__pyx_v_atuple,0,__pyx_v_new_val_obj); if (__pyx_3 == (-1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 248; goto __pyx_L1;}
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_readdir_pyx.pyx":250 */
+ __pyx_2 = PyObject_GetAttr(((PyObject *)PyTuple_GET_ITEM(__pyx_v_atuple,3)), __pyx_n_st_mode); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 251; goto __pyx_L1;}
+ __pyx_3 = PyInt_AsLong(__pyx_2); if (PyErr_Occurred()) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 251; goto __pyx_L1;}
+ Py_DECREF(__pyx_2); __pyx_2 = 0;
+ __pyx_4 = ((struct __pyx_vtabstruct_6bzrlib_12_readdir_pyx_UTF8DirReader *)((struct __pyx_obj_6bzrlib_12_readdir_pyx_UTF8DirReader *)__pyx_v_self)->__pyx_vtab)->_kind_from_mode(((struct __pyx_obj_6bzrlib_12_readdir_pyx_UTF8DirReader *)__pyx_v_self),__pyx_3); if (!__pyx_4) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 250; goto __pyx_L1;}
+ Py_DECREF(__pyx_v_newval);
+ __pyx_v_newval = __pyx_4;
+ __pyx_4 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_readdir_pyx.pyx":252 */
+ Py_INCREF(__pyx_v_newval);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_readdir_pyx.pyx":253 */
+ __pyx_3 = PyTuple_SetItem(__pyx_v_atuple,2,__pyx_v_newval); if (__pyx_3 == (-1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 253; goto __pyx_L1;}
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_readdir_pyx.pyx":257 */
+ __pyx_v_new_val_obj = ((PyObject *)__pyx_v_top_slash);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_readdir_pyx.pyx":258 */
+ Py_INCREF(__pyx_v_top_slash);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_readdir_pyx.pyx":259 */
+ PyString_Concat((&__pyx_v_new_val_obj),__pyx_v_name);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_readdir_pyx.pyx":260 */
+ __pyx_3 = (NULL == __pyx_v_new_val_obj);
+ if (__pyx_3) {
+ __pyx_2 = PyTuple_New(1); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 263; goto __pyx_L1;}
+ Py_INCREF(__pyx_k7p);
+ PyTuple_SET_ITEM(__pyx_2, 0, __pyx_k7p);
+ __pyx_4 = PyObject_CallObject(PyExc_Exception, __pyx_2); if (!__pyx_4) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 263; goto __pyx_L1;}
+ Py_DECREF(__pyx_2); __pyx_2 = 0;
+ __Pyx_Raise(__pyx_4, 0, 0);
+ Py_DECREF(__pyx_4); __pyx_4 = 0;
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 263; goto __pyx_L1;}
+ goto __pyx_L6;
+ }
+ __pyx_L6:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_readdir_pyx.pyx":264 */
+ __pyx_3 = PyTuple_SetItem(__pyx_v_atuple,4,__pyx_v_new_val_obj); if (__pyx_3 == (-1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 264; goto __pyx_L1;}
+ }
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_readdir_pyx.pyx":265 */
+ Py_INCREF(__pyx_v_result);
+ __pyx_r = __pyx_v_result;
+ goto __pyx_L0;
+
+ __pyx_r = Py_None; Py_INCREF(Py_None);
+ goto __pyx_L0;
+ __pyx_L1:;
+ Py_XDECREF(__pyx_2);
+ Py_XDECREF(__pyx_4);
+ __Pyx_AddTraceback("bzrlib._readdir_pyx.UTF8DirReader.read_dir");
+ __pyx_r = 0;
+ __pyx_L0:;
+ Py_DECREF(__pyx_v_newval);
+ Py_DECREF(__pyx_v_name);
+ Py_DECREF(__pyx_v_relprefix);
+ Py_DECREF(__pyx_v_top_slash);
+ Py_DECREF(__pyx_v_result);
+ Py_DECREF(__pyx_v_self);
+ Py_DECREF(__pyx_v_prefix);
+ Py_DECREF(__pyx_v_top);
+ return __pyx_r;
+}
+
+static PyObject *__pyx_f_6bzrlib_12_readdir_pyx_raise_os_error(int __pyx_v_errnum,char *__pyx_v_msg_prefix,PyObject *__pyx_v_path) {
+ PyObject *__pyx_r;
+ int __pyx_1;
+ PyObject *__pyx_2 = 0;
+ PyObject *__pyx_3 = 0;
+ PyObject *__pyx_4 = 0;
+ PyObject *__pyx_5 = 0;
+ Py_INCREF(__pyx_v_path);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_readdir_pyx.pyx":269 */
+ __pyx_1 = (__pyx_v_errnum == EINTR);
+ if (__pyx_1) {
+ __pyx_1 = PyErr_CheckSignals(); if (__pyx_1 == (-1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 270; goto __pyx_L1;}
+ goto __pyx_L2;
+ }
+ __pyx_L2:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_readdir_pyx.pyx":271 */
+ __pyx_2 = PyInt_FromLong(__pyx_v_errnum); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 271; goto __pyx_L1;}
+ __pyx_3 = PyString_FromString(__pyx_v_msg_prefix); if (!__pyx_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 271; goto __pyx_L1;}
+ __pyx_4 = PyString_FromString(strerror(__pyx_v_errnum)); if (!__pyx_4) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 271; goto __pyx_L1;}
+ __pyx_5 = PyNumber_Add(__pyx_3, __pyx_4); if (!__pyx_5) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 271; goto __pyx_L1;}
+ Py_DECREF(__pyx_3); __pyx_3 = 0;
+ Py_DECREF(__pyx_4); __pyx_4 = 0;
+ __pyx_3 = PyTuple_New(3); if (!__pyx_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 271; goto __pyx_L1;}
+ PyTuple_SET_ITEM(__pyx_3, 0, __pyx_2);
+ PyTuple_SET_ITEM(__pyx_3, 1, __pyx_5);
+ Py_INCREF(__pyx_v_path);
+ PyTuple_SET_ITEM(__pyx_3, 2, __pyx_v_path);
+ __pyx_2 = 0;
+ __pyx_5 = 0;
+ __pyx_4 = PyObject_CallObject(PyExc_OSError, __pyx_3); if (!__pyx_4) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 271; goto __pyx_L1;}
+ Py_DECREF(__pyx_3); __pyx_3 = 0;
+ __Pyx_Raise(__pyx_4, 0, 0);
+ Py_DECREF(__pyx_4); __pyx_4 = 0;
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 271; goto __pyx_L1;}
+
+ __pyx_r = Py_None; Py_INCREF(Py_None);
+ goto __pyx_L0;
+ __pyx_L1:;
+ Py_XDECREF(__pyx_2);
+ Py_XDECREF(__pyx_3);
+ Py_XDECREF(__pyx_4);
+ Py_XDECREF(__pyx_5);
+ __Pyx_AddTraceback("bzrlib._readdir_pyx.raise_os_error");
+ __pyx_r = 0;
+ __pyx_L0:;
+ Py_DECREF(__pyx_v_path);
+ return __pyx_r;
+}
+
+static PyObject *__pyx_f_6bzrlib_12_readdir_pyx__read_dir(PyObject *__pyx_v_path) {
+ DIR *__pyx_v_the_dir;
+ dirent *__pyx_v_entry;
+ dirent __pyx_v_sentinel;
+ char *__pyx_v_name;
+ int __pyx_v_stat_result;
+ struct __pyx_obj_6bzrlib_12_readdir_pyx__Stat *__pyx_v_statvalue;
+ int __pyx_v_orig_dir_fd;
+ PyObject *__pyx_v_result;
+ PyObject *__pyx_v_failed;
+ PyObject *__pyx_r;
+ int __pyx_1;
+ PyObject *__pyx_2 = 0;
+ char *__pyx_3;
+ int __pyx_4;
+ PyObject *__pyx_5 = 0;
+ PyObject *__pyx_6 = 0;
+ Py_INCREF(__pyx_v_path);
+ __pyx_v_statvalue = ((struct __pyx_obj_6bzrlib_12_readdir_pyx__Stat *)Py_None); Py_INCREF(Py_None);
+ __pyx_v_result = Py_None; Py_INCREF(Py_None);
+ __pyx_v_failed = Py_None; Py_INCREF(Py_None);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_readdir_pyx.pyx":295 */
+ if (PyObject_Cmp(__pyx_v_path, __pyx_k6p, &__pyx_1) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 295; goto __pyx_L1;}
+ __pyx_1 = __pyx_1 != 0;
+ if (__pyx_1) {
+ if (PyObject_Cmp(__pyx_v_path, __pyx_k8p, &__pyx_1) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 295; goto __pyx_L1;}
+ __pyx_1 = __pyx_1 != 0;
+ }
+ if (__pyx_1) {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_readdir_pyx.pyx":299 */
+ __pyx_v_orig_dir_fd = open(__pyx_k8,O_RDONLY,0);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_readdir_pyx.pyx":300 */
+ __pyx_1 = (__pyx_v_orig_dir_fd == (-1));
+ if (__pyx_1) {
+ __pyx_2 = __pyx_f_6bzrlib_12_readdir_pyx_raise_os_error(errno,__pyx_k9,__pyx_k8p); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 301; goto __pyx_L1;}
+ Py_DECREF(__pyx_2); __pyx_2 = 0;
+ goto __pyx_L3;
+ }
+ __pyx_L3:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_readdir_pyx.pyx":302 */
+ __pyx_3 = PyString_AsString(__pyx_v_path); if (!__pyx_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 302; goto __pyx_L1;}
+ __pyx_1 = ((-1) == chdir(__pyx_3));
+ if (__pyx_1) {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_readdir_pyx.pyx":305 */
+ close(__pyx_v_orig_dir_fd);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_readdir_pyx.pyx":306 */
+ __pyx_2 = __pyx_f_6bzrlib_12_readdir_pyx_raise_os_error(errno,__pyx_k10,__pyx_v_path); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 306; goto __pyx_L1;}
+ Py_DECREF(__pyx_2); __pyx_2 = 0;
+ goto __pyx_L4;
+ }
+ __pyx_L4:;
+ goto __pyx_L2;
+ }
+ /*else*/ {
+ __pyx_v_orig_dir_fd = (-1);
+ }
+ __pyx_L2:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_readdir_pyx.pyx":310 */
+ /*try:*/ {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_readdir_pyx.pyx":311 */
+ __pyx_v_the_dir = opendir(__pyx_k8);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_readdir_pyx.pyx":312 */
+ __pyx_1 = (NULL == __pyx_v_the_dir);
+ if (__pyx_1) {
+ __pyx_2 = __pyx_f_6bzrlib_12_readdir_pyx_raise_os_error(errno,__pyx_k11,__pyx_v_path); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 313; goto __pyx_L6;}
+ Py_DECREF(__pyx_2); __pyx_2 = 0;
+ goto __pyx_L8;
+ }
+ __pyx_L8:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_readdir_pyx.pyx":314 */
+ /*try:*/ {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_readdir_pyx.pyx":315 */
+ __pyx_2 = PyList_New(0); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 315; goto __pyx_L10;}
+ Py_DECREF(__pyx_v_result);
+ __pyx_v_result = __pyx_2;
+ __pyx_2 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_readdir_pyx.pyx":316 */
+ __pyx_v_entry = (&__pyx_v_sentinel);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_readdir_pyx.pyx":317 */
+ while (1) {
+ __pyx_1 = (__pyx_v_entry != NULL);
+ if (!__pyx_1) break;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_readdir_pyx.pyx":321 */
+ while (1) {
+ __pyx_1 = PyObject_IsTrue(Py_True); if (__pyx_1 < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 321; goto __pyx_L10;}
+ if (!__pyx_1) break;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_readdir_pyx.pyx":322 */
+ errno = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_readdir_pyx.pyx":323 */
+ __pyx_v_entry = readdir(__pyx_v_the_dir);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_readdir_pyx.pyx":324 */
+ __pyx_1 = (__pyx_v_entry == NULL);
+ if (__pyx_1) {
+ __pyx_1 = (errno == EAGAIN);
+ if (!__pyx_1) {
+ __pyx_1 = (errno == EINTR);
+ }
+ }
+ if (__pyx_1) {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_readdir_pyx.pyx":325 */
+ __pyx_1 = (errno == EINTR);
+ if (__pyx_1) {
+ __pyx_1 = PyErr_CheckSignals(); if (__pyx_1 == (-1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 326; goto __pyx_L10;}
+ goto __pyx_L17;
+ }
+ __pyx_L17:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_readdir_pyx.pyx":328 */
+ goto __pyx_L14;
+ goto __pyx_L16;
+ }
+ /*else*/ {
+ goto __pyx_L15;
+ }
+ __pyx_L16:;
+ __pyx_L14:;
+ }
+ __pyx_L15:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_readdir_pyx.pyx":331 */
+ __pyx_1 = (__pyx_v_entry == NULL);
+ if (__pyx_1) {
+ __pyx_1 = (errno == ENOTDIR);
+ if (!__pyx_1) {
+ __pyx_1 = (errno == 0);
+ }
+ if (__pyx_1) {
+ goto __pyx_L12;
+ goto __pyx_L19;
+ }
+ /*else*/ {
+ __pyx_2 = __pyx_f_6bzrlib_12_readdir_pyx_raise_os_error(errno,__pyx_k12,__pyx_v_path); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 338; goto __pyx_L10;}
+ Py_DECREF(__pyx_2); __pyx_2 = 0;
+ }
+ __pyx_L19:;
+ goto __pyx_L18;
+ }
+ __pyx_L18:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_readdir_pyx.pyx":339 */
+ __pyx_v_name = __pyx_v_entry->d_name;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_readdir_pyx.pyx":340 */
+ __pyx_1 = ((__pyx_v_name[0]) == '.');
+ if (__pyx_1) {
+ __pyx_1 = ((__pyx_v_name[1]) == 0);
+ if (!__pyx_1) {
+ __pyx_1 = ((__pyx_v_name[1]) == '.');
+ if (__pyx_1) {
+ __pyx_1 = ((__pyx_v_name[2]) == 0);
+ }
+ }
+ }
+ __pyx_4 = (!__pyx_1);
+ if (__pyx_4) {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_readdir_pyx.pyx":344 */
+ __pyx_2 = PyObject_CallObject(((PyObject *)__pyx_ptype_6bzrlib_12_readdir_pyx__Stat), 0); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 344; goto __pyx_L10;}
+ Py_DECREF(((PyObject *)__pyx_v_statvalue));
+ __pyx_v_statvalue = ((struct __pyx_obj_6bzrlib_12_readdir_pyx__Stat *)__pyx_2);
+ __pyx_2 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_readdir_pyx.pyx":345 */
+ __pyx_v_stat_result = lstat(__pyx_v_entry->d_name,(&__pyx_v_statvalue->_st));
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_readdir_pyx.pyx":346 */
+ __pyx_1 = (__pyx_v_stat_result != 0);
+ if (__pyx_1) {
+ __pyx_4 = (errno != ENOENT);
+ if (__pyx_4) {
+ __pyx_2 = PyNumber_Add(__pyx_v_path, __pyx_k5p); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 349; goto __pyx_L10;}
+ __pyx_5 = PyString_FromString(__pyx_v_entry->d_name); if (!__pyx_5) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 349; goto __pyx_L10;}
+ __pyx_6 = PyNumber_Add(__pyx_2, __pyx_5); if (!__pyx_6) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 349; goto __pyx_L10;}
+ Py_DECREF(__pyx_2); __pyx_2 = 0;
+ Py_DECREF(__pyx_5); __pyx_5 = 0;
+ __pyx_2 = __pyx_f_6bzrlib_12_readdir_pyx_raise_os_error(errno,__pyx_k13,__pyx_6); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 348; goto __pyx_L10;}
+ Py_DECREF(__pyx_6); __pyx_6 = 0;
+ Py_DECREF(__pyx_2); __pyx_2 = 0;
+ goto __pyx_L22;
+ }
+ /*else*/ {
+ goto __pyx_L12;
+ }
+ __pyx_L22:;
+ goto __pyx_L21;
+ }
+ __pyx_L21:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_readdir_pyx.pyx":362 */
+ __pyx_5 = PyLong_FromUnsignedLong(__pyx_v_entry->d_ino); if (!__pyx_5) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 362; goto __pyx_L10;}
+ __pyx_6 = PyString_FromString(__pyx_v_entry->d_name); if (!__pyx_6) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 362; goto __pyx_L10;}
+ __pyx_2 = PyTuple_New(5); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 362; goto __pyx_L10;}
+ PyTuple_SET_ITEM(__pyx_2, 0, __pyx_5);
+ PyTuple_SET_ITEM(__pyx_2, 1, __pyx_6);
+ Py_INCREF(Py_None);
+ PyTuple_SET_ITEM(__pyx_2, 2, Py_None);
+ Py_INCREF(((PyObject *)__pyx_v_statvalue));
+ PyTuple_SET_ITEM(__pyx_2, 3, ((PyObject *)__pyx_v_statvalue));
+ Py_INCREF(Py_None);
+ PyTuple_SET_ITEM(__pyx_2, 4, Py_None);
+ __pyx_5 = 0;
+ __pyx_6 = 0;
+ __pyx_1 = PyList_Append(__pyx_v_result,__pyx_2); if (__pyx_1 == (-1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 362; goto __pyx_L10;}
+ Py_DECREF(__pyx_2); __pyx_2 = 0;
+ goto __pyx_L20;
+ }
+ __pyx_L20:;
+ __pyx_L12:;
+ }
+ }
+ /*finally:*/ {
+ int __pyx_why;
+ PyObject *__pyx_exc_type, *__pyx_exc_value, *__pyx_exc_tb;
+ int __pyx_exc_lineno;
+ __pyx_why = 0; goto __pyx_L11;
+ __pyx_L10: {
+ __pyx_why = 4;
+ Py_XDECREF(__pyx_5); __pyx_5 = 0;
+ Py_XDECREF(__pyx_6); __pyx_6 = 0;
+ Py_XDECREF(__pyx_2); __pyx_2 = 0;
+ PyErr_Fetch(&__pyx_exc_type, &__pyx_exc_value, &__pyx_exc_tb);
+ __pyx_exc_lineno = __pyx_lineno;
+ goto __pyx_L11;
+ }
+ __pyx_L11:;
+ __pyx_4 = ((-1) == closedir(__pyx_v_the_dir));
+ if (__pyx_4) {
+ __pyx_5 = __pyx_f_6bzrlib_12_readdir_pyx_raise_os_error(errno,__pyx_k14,__pyx_v_path); if (!__pyx_5) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 366; goto __pyx_L23;}
+ Py_DECREF(__pyx_5); __pyx_5 = 0;
+ goto __pyx_L24;
+ }
+ __pyx_L24:;
+ goto __pyx_L25;
+ __pyx_L23:;
+ if (__pyx_why == 4) {
+ Py_XDECREF(__pyx_exc_type);
+ Py_XDECREF(__pyx_exc_value);
+ Py_XDECREF(__pyx_exc_tb);
+ }
+ goto __pyx_L6;
+ __pyx_L25:;
+ switch (__pyx_why) {
+ case 4: {
+ PyErr_Restore(__pyx_exc_type, __pyx_exc_value, __pyx_exc_tb);
+ __pyx_lineno = __pyx_exc_lineno;
+ __pyx_exc_type = 0;
+ __pyx_exc_value = 0;
+ __pyx_exc_tb = 0;
+ goto __pyx_L6;
+ }
+ }
+ }
+ }
+ /*finally:*/ {
+ int __pyx_why;
+ PyObject *__pyx_exc_type, *__pyx_exc_value, *__pyx_exc_tb;
+ int __pyx_exc_lineno;
+ __pyx_why = 0; goto __pyx_L7;
+ __pyx_L6: {
+ __pyx_why = 4;
+ Py_XDECREF(__pyx_6); __pyx_6 = 0;
+ Py_XDECREF(__pyx_2); __pyx_2 = 0;
+ Py_XDECREF(__pyx_5); __pyx_5 = 0;
+ PyErr_Fetch(&__pyx_exc_type, &__pyx_exc_value, &__pyx_exc_tb);
+ __pyx_exc_lineno = __pyx_lineno;
+ goto __pyx_L7;
+ }
+ __pyx_L7:;
+ __pyx_1 = ((-1) != __pyx_v_orig_dir_fd);
+ if (__pyx_1) {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_readdir_pyx.pyx":369 */
+ Py_INCREF(Py_False);
+ Py_DECREF(__pyx_v_failed);
+ __pyx_v_failed = Py_False;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_readdir_pyx.pyx":370 */
+ __pyx_4 = ((-1) == fchdir(__pyx_v_orig_dir_fd));
+ if (__pyx_4) {
+ Py_INCREF(Py_True);
+ Py_DECREF(__pyx_v_failed);
+ __pyx_v_failed = Py_True;
+ goto __pyx_L28;
+ }
+ __pyx_L28:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_readdir_pyx.pyx":373 */
+ __pyx_6 = PyInt_FromLong(((-1) == close(__pyx_v_orig_dir_fd))); if (!__pyx_6) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 373; goto __pyx_L26;}
+ __pyx_1 = PyObject_IsTrue(__pyx_6); if (__pyx_1 < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 373; goto __pyx_L26;}
+ if (!__pyx_1) {
+ Py_DECREF(__pyx_6); __pyx_6 = 0;
+ __pyx_6 = __pyx_v_failed;
+ Py_INCREF(__pyx_6);
+ }
+ __pyx_4 = PyObject_IsTrue(__pyx_6); if (__pyx_4 < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 373; goto __pyx_L26;}
+ Py_DECREF(__pyx_6); __pyx_6 = 0;
+ if (__pyx_4) {
+ __pyx_2 = __pyx_f_6bzrlib_12_readdir_pyx_raise_os_error(errno,__pyx_k15,__pyx_k6p); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 374; goto __pyx_L26;}
+ Py_DECREF(__pyx_2); __pyx_2 = 0;
+ goto __pyx_L29;
+ }
+ __pyx_L29:;
+ goto __pyx_L27;
+ }
+ __pyx_L27:;
+ goto __pyx_L30;
+ __pyx_L26:;
+ if (__pyx_why == 4) {
+ Py_XDECREF(__pyx_exc_type);
+ Py_XDECREF(__pyx_exc_value);
+ Py_XDECREF(__pyx_exc_tb);
+ }
+ goto __pyx_L1;
+ __pyx_L30:;
+ switch (__pyx_why) {
+ case 4: {
+ PyErr_Restore(__pyx_exc_type, __pyx_exc_value, __pyx_exc_tb);
+ __pyx_lineno = __pyx_exc_lineno;
+ __pyx_exc_type = 0;
+ __pyx_exc_value = 0;
+ __pyx_exc_tb = 0;
+ goto __pyx_L1;
+ }
+ }
+ }
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_readdir_pyx.pyx":376 */
+ Py_INCREF(__pyx_v_result);
+ __pyx_r = __pyx_v_result;
+ goto __pyx_L0;
+
+ __pyx_r = Py_None; Py_INCREF(Py_None);
+ goto __pyx_L0;
+ __pyx_L1:;
+ Py_XDECREF(__pyx_2);
+ Py_XDECREF(__pyx_5);
+ Py_XDECREF(__pyx_6);
+ __Pyx_AddTraceback("bzrlib._readdir_pyx._read_dir");
+ __pyx_r = 0;
+ __pyx_L0:;
+ Py_DECREF(__pyx_v_statvalue);
+ Py_DECREF(__pyx_v_result);
+ Py_DECREF(__pyx_v_failed);
+ Py_DECREF(__pyx_v_path);
+ return __pyx_r;
+}
+
+static PyObject *__pyx_tp_new_6bzrlib_12_readdir_pyx__Stat(PyTypeObject *t, PyObject *a, PyObject *k) {
+ PyObject *o = (*t->tp_alloc)(t, 0);
+ if (!o) return 0;
+ return o;
+}
+
+static void __pyx_tp_dealloc_6bzrlib_12_readdir_pyx__Stat(PyObject *o) {
+ (*o->ob_type->tp_free)(o);
+}
+
+static PyObject *__pyx_getprop_6bzrlib_12_readdir_pyx_5_Stat_st_dev(PyObject *o, void *x) {
+ return __pyx_f_6bzrlib_12_readdir_pyx_5_Stat_6st_dev___get__(o);
+}
+
+static PyObject *__pyx_getprop_6bzrlib_12_readdir_pyx_5_Stat_st_ino(PyObject *o, void *x) {
+ return __pyx_f_6bzrlib_12_readdir_pyx_5_Stat_6st_ino___get__(o);
+}
+
+static PyObject *__pyx_getprop_6bzrlib_12_readdir_pyx_5_Stat_st_mode(PyObject *o, void *x) {
+ return __pyx_f_6bzrlib_12_readdir_pyx_5_Stat_7st_mode___get__(o);
+}
+
+static PyObject *__pyx_getprop_6bzrlib_12_readdir_pyx_5_Stat_st_ctime(PyObject *o, void *x) {
+ return __pyx_f_6bzrlib_12_readdir_pyx_5_Stat_8st_ctime___get__(o);
+}
+
+static PyObject *__pyx_getprop_6bzrlib_12_readdir_pyx_5_Stat_st_mtime(PyObject *o, void *x) {
+ return __pyx_f_6bzrlib_12_readdir_pyx_5_Stat_8st_mtime___get__(o);
+}
+
+static PyObject *__pyx_getprop_6bzrlib_12_readdir_pyx_5_Stat_st_size(PyObject *o, void *x) {
+ return __pyx_f_6bzrlib_12_readdir_pyx_5_Stat_7st_size___get__(o);
+}
+
+static struct PyMethodDef __pyx_methods_6bzrlib_12_readdir_pyx__Stat[] = {
+ {0, 0, 0, 0}
+};
+
+static struct PyGetSetDef __pyx_getsets_6bzrlib_12_readdir_pyx__Stat[] = {
+ {"st_dev", __pyx_getprop_6bzrlib_12_readdir_pyx_5_Stat_st_dev, 0, 0, 0},
+ {"st_ino", __pyx_getprop_6bzrlib_12_readdir_pyx_5_Stat_st_ino, 0, 0, 0},
+ {"st_mode", __pyx_getprop_6bzrlib_12_readdir_pyx_5_Stat_st_mode, 0, 0, 0},
+ {"st_ctime", __pyx_getprop_6bzrlib_12_readdir_pyx_5_Stat_st_ctime, 0, 0, 0},
+ {"st_mtime", __pyx_getprop_6bzrlib_12_readdir_pyx_5_Stat_st_mtime, 0, 0, 0},
+ {"st_size", __pyx_getprop_6bzrlib_12_readdir_pyx_5_Stat_st_size, 0, 0, 0},
+ {0, 0, 0, 0, 0}
+};
+
+static PyNumberMethods __pyx_tp_as_number__Stat = {
+ 0, /*nb_add*/
+ 0, /*nb_subtract*/
+ 0, /*nb_multiply*/
+ 0, /*nb_divide*/
+ 0, /*nb_remainder*/
+ 0, /*nb_divmod*/
+ 0, /*nb_power*/
+ 0, /*nb_negative*/
+ 0, /*nb_positive*/
+ 0, /*nb_absolute*/
+ 0, /*nb_nonzero*/
+ 0, /*nb_invert*/
+ 0, /*nb_lshift*/
+ 0, /*nb_rshift*/
+ 0, /*nb_and*/
+ 0, /*nb_xor*/
+ 0, /*nb_or*/
+ 0, /*nb_coerce*/
+ 0, /*nb_int*/
+ 0, /*nb_long*/
+ 0, /*nb_float*/
+ 0, /*nb_oct*/
+ 0, /*nb_hex*/
+ 0, /*nb_inplace_add*/
+ 0, /*nb_inplace_subtract*/
+ 0, /*nb_inplace_multiply*/
+ 0, /*nb_inplace_divide*/
+ 0, /*nb_inplace_remainder*/
+ 0, /*nb_inplace_power*/
+ 0, /*nb_inplace_lshift*/
+ 0, /*nb_inplace_rshift*/
+ 0, /*nb_inplace_and*/
+ 0, /*nb_inplace_xor*/
+ 0, /*nb_inplace_or*/
+ 0, /*nb_floor_divide*/
+ 0, /*nb_true_divide*/
+ 0, /*nb_inplace_floor_divide*/
+ 0, /*nb_inplace_true_divide*/
+ #if Py_TPFLAGS_DEFAULT & Py_TPFLAGS_HAVE_INDEX
+ 0, /*nb_index*/
+ #endif
+};
+
+static PySequenceMethods __pyx_tp_as_sequence__Stat = {
+ 0, /*sq_length*/
+ 0, /*sq_concat*/
+ 0, /*sq_repeat*/
+ 0, /*sq_item*/
+ 0, /*sq_slice*/
+ 0, /*sq_ass_item*/
+ 0, /*sq_ass_slice*/
+ 0, /*sq_contains*/
+ 0, /*sq_inplace_concat*/
+ 0, /*sq_inplace_repeat*/
+};
+
+static PyMappingMethods __pyx_tp_as_mapping__Stat = {
+ 0, /*mp_length*/
+ 0, /*mp_subscript*/
+ 0, /*mp_ass_subscript*/
+};
+
+static PyBufferProcs __pyx_tp_as_buffer__Stat = {
+ 0, /*bf_getreadbuffer*/
+ 0, /*bf_getwritebuffer*/
+ 0, /*bf_getsegcount*/
+ 0, /*bf_getcharbuffer*/
+};
+
+PyTypeObject __pyx_type_6bzrlib_12_readdir_pyx__Stat = {
+ PyObject_HEAD_INIT(0)
+ 0, /*ob_size*/
+ "bzrlib._readdir_pyx._Stat", /*tp_name*/
+ sizeof(struct __pyx_obj_6bzrlib_12_readdir_pyx__Stat), /*tp_basicsize*/
+ 0, /*tp_itemsize*/
+ __pyx_tp_dealloc_6bzrlib_12_readdir_pyx__Stat, /*tp_dealloc*/
+ 0, /*tp_print*/
+ 0, /*tp_getattr*/
+ 0, /*tp_setattr*/
+ 0, /*tp_compare*/
+ __pyx_f_6bzrlib_12_readdir_pyx_5_Stat___repr__, /*tp_repr*/
+ &__pyx_tp_as_number__Stat, /*tp_as_number*/
+ &__pyx_tp_as_sequence__Stat, /*tp_as_sequence*/
+ &__pyx_tp_as_mapping__Stat, /*tp_as_mapping*/
+ 0, /*tp_hash*/
+ 0, /*tp_call*/
+ 0, /*tp_str*/
+ 0, /*tp_getattro*/
+ 0, /*tp_setattro*/
+ &__pyx_tp_as_buffer__Stat, /*tp_as_buffer*/
+ Py_TPFLAGS_DEFAULT|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_BASETYPE, /*tp_flags*/
+ "Represent a \'stat\' result.", /*tp_doc*/
+ 0, /*tp_traverse*/
+ 0, /*tp_clear*/
+ 0, /*tp_richcompare*/
+ 0, /*tp_weaklistoffset*/
+ 0, /*tp_iter*/
+ 0, /*tp_iternext*/
+ __pyx_methods_6bzrlib_12_readdir_pyx__Stat, /*tp_methods*/
+ 0, /*tp_members*/
+ __pyx_getsets_6bzrlib_12_readdir_pyx__Stat, /*tp_getset*/
+ 0, /*tp_base*/
+ 0, /*tp_dict*/
+ 0, /*tp_descr_get*/
+ 0, /*tp_descr_set*/
+ 0, /*tp_dictoffset*/
+ 0, /*tp_init*/
+ 0, /*tp_alloc*/
+ __pyx_tp_new_6bzrlib_12_readdir_pyx__Stat, /*tp_new*/
+ 0, /*tp_free*/
+ 0, /*tp_is_gc*/
+ 0, /*tp_bases*/
+ 0, /*tp_mro*/
+ 0, /*tp_cache*/
+ 0, /*tp_subclasses*/
+ 0, /*tp_weaklist*/
+};
+static struct __pyx_vtabstruct_6bzrlib_12_readdir_pyx_UTF8DirReader __pyx_vtable_6bzrlib_12_readdir_pyx_UTF8DirReader;
+
+static PyObject *__pyx_tp_new_6bzrlib_12_readdir_pyx_UTF8DirReader(PyTypeObject *t, PyObject *a, PyObject *k) {
+ struct __pyx_obj_6bzrlib_12_readdir_pyx_UTF8DirReader *p;
+ PyObject *o = (*t->tp_alloc)(t, 0);
+ if (!o) return 0;
+ p = ((struct __pyx_obj_6bzrlib_12_readdir_pyx_UTF8DirReader *)o);
+ *(struct __pyx_vtabstruct_6bzrlib_12_readdir_pyx_UTF8DirReader **)&p->__pyx_vtab = __pyx_vtabptr_6bzrlib_12_readdir_pyx_UTF8DirReader;
+ return o;
+}
+
+static void __pyx_tp_dealloc_6bzrlib_12_readdir_pyx_UTF8DirReader(PyObject *o) {
+ (*o->ob_type->tp_free)(o);
+}
+
+static struct PyMethodDef __pyx_methods_6bzrlib_12_readdir_pyx_UTF8DirReader[] = {
+ {"kind_from_mode", (PyCFunction)__pyx_f_6bzrlib_12_readdir_pyx_13UTF8DirReader_kind_from_mode, METH_VARARGS|METH_KEYWORDS, __pyx_doc_6bzrlib_12_readdir_pyx_13UTF8DirReader_kind_from_mode},
+ {"top_prefix_to_starting_dir", (PyCFunction)__pyx_f_6bzrlib_12_readdir_pyx_13UTF8DirReader_top_prefix_to_starting_dir, METH_VARARGS|METH_KEYWORDS, __pyx_doc_6bzrlib_12_readdir_pyx_13UTF8DirReader_top_prefix_to_starting_dir},
+ {"read_dir", (PyCFunction)__pyx_f_6bzrlib_12_readdir_pyx_13UTF8DirReader_read_dir, METH_VARARGS|METH_KEYWORDS, __pyx_doc_6bzrlib_12_readdir_pyx_13UTF8DirReader_read_dir},
+ {0, 0, 0, 0}
+};
+
+static PyNumberMethods __pyx_tp_as_number_UTF8DirReader = {
+ 0, /*nb_add*/
+ 0, /*nb_subtract*/
+ 0, /*nb_multiply*/
+ 0, /*nb_divide*/
+ 0, /*nb_remainder*/
+ 0, /*nb_divmod*/
+ 0, /*nb_power*/
+ 0, /*nb_negative*/
+ 0, /*nb_positive*/
+ 0, /*nb_absolute*/
+ 0, /*nb_nonzero*/
+ 0, /*nb_invert*/
+ 0, /*nb_lshift*/
+ 0, /*nb_rshift*/
+ 0, /*nb_and*/
+ 0, /*nb_xor*/
+ 0, /*nb_or*/
+ 0, /*nb_coerce*/
+ 0, /*nb_int*/
+ 0, /*nb_long*/
+ 0, /*nb_float*/
+ 0, /*nb_oct*/
+ 0, /*nb_hex*/
+ 0, /*nb_inplace_add*/
+ 0, /*nb_inplace_subtract*/
+ 0, /*nb_inplace_multiply*/
+ 0, /*nb_inplace_divide*/
+ 0, /*nb_inplace_remainder*/
+ 0, /*nb_inplace_power*/
+ 0, /*nb_inplace_lshift*/
+ 0, /*nb_inplace_rshift*/
+ 0, /*nb_inplace_and*/
+ 0, /*nb_inplace_xor*/
+ 0, /*nb_inplace_or*/
+ 0, /*nb_floor_divide*/
+ 0, /*nb_true_divide*/
+ 0, /*nb_inplace_floor_divide*/
+ 0, /*nb_inplace_true_divide*/
+ #if Py_TPFLAGS_DEFAULT & Py_TPFLAGS_HAVE_INDEX
+ 0, /*nb_index*/
+ #endif
+};
+
+static PySequenceMethods __pyx_tp_as_sequence_UTF8DirReader = {
+ 0, /*sq_length*/
+ 0, /*sq_concat*/
+ 0, /*sq_repeat*/
+ 0, /*sq_item*/
+ 0, /*sq_slice*/
+ 0, /*sq_ass_item*/
+ 0, /*sq_ass_slice*/
+ 0, /*sq_contains*/
+ 0, /*sq_inplace_concat*/
+ 0, /*sq_inplace_repeat*/
+};
+
+static PyMappingMethods __pyx_tp_as_mapping_UTF8DirReader = {
+ 0, /*mp_length*/
+ 0, /*mp_subscript*/
+ 0, /*mp_ass_subscript*/
+};
+
+static PyBufferProcs __pyx_tp_as_buffer_UTF8DirReader = {
+ 0, /*bf_getreadbuffer*/
+ 0, /*bf_getwritebuffer*/
+ 0, /*bf_getsegcount*/
+ 0, /*bf_getcharbuffer*/
+};
+
+PyTypeObject __pyx_type_6bzrlib_12_readdir_pyx_UTF8DirReader = {
+ PyObject_HEAD_INIT(0)
+ 0, /*ob_size*/
+ "bzrlib._readdir_pyx.UTF8DirReader", /*tp_name*/
+ sizeof(struct __pyx_obj_6bzrlib_12_readdir_pyx_UTF8DirReader), /*tp_basicsize*/
+ 0, /*tp_itemsize*/
+ __pyx_tp_dealloc_6bzrlib_12_readdir_pyx_UTF8DirReader, /*tp_dealloc*/
+ 0, /*tp_print*/
+ 0, /*tp_getattr*/
+ 0, /*tp_setattr*/
+ 0, /*tp_compare*/
+ 0, /*tp_repr*/
+ &__pyx_tp_as_number_UTF8DirReader, /*tp_as_number*/
+ &__pyx_tp_as_sequence_UTF8DirReader, /*tp_as_sequence*/
+ &__pyx_tp_as_mapping_UTF8DirReader, /*tp_as_mapping*/
+ 0, /*tp_hash*/
+ 0, /*tp_call*/
+ 0, /*tp_str*/
+ 0, /*tp_getattro*/
+ 0, /*tp_setattro*/
+ &__pyx_tp_as_buffer_UTF8DirReader, /*tp_as_buffer*/
+ Py_TPFLAGS_DEFAULT|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_BASETYPE, /*tp_flags*/
+ "A dir reader for utf8 file systems.", /*tp_doc*/
+ 0, /*tp_traverse*/
+ 0, /*tp_clear*/
+ 0, /*tp_richcompare*/
+ 0, /*tp_weaklistoffset*/
+ 0, /*tp_iter*/
+ 0, /*tp_iternext*/
+ __pyx_methods_6bzrlib_12_readdir_pyx_UTF8DirReader, /*tp_methods*/
+ 0, /*tp_members*/
+ 0, /*tp_getset*/
+ 0, /*tp_base*/
+ 0, /*tp_dict*/
+ 0, /*tp_descr_get*/
+ 0, /*tp_descr_set*/
+ 0, /*tp_dictoffset*/
+ 0, /*tp_init*/
+ 0, /*tp_alloc*/
+ __pyx_tp_new_6bzrlib_12_readdir_pyx_UTF8DirReader, /*tp_new*/
+ 0, /*tp_free*/
+ 0, /*tp_is_gc*/
+ 0, /*tp_bases*/
+ 0, /*tp_mro*/
+ 0, /*tp_cache*/
+ 0, /*tp_subclasses*/
+ 0, /*tp_weaklist*/
+};
+
+static struct PyMethodDef __pyx_methods[] = {
+ {0, 0, 0, 0}
+};
+
+static void __pyx_init_filenames(void); /*proto*/
+
+PyMODINIT_FUNC init_readdir_pyx(void); /*proto*/
+PyMODINIT_FUNC init_readdir_pyx(void) {
+ PyObject *__pyx_1 = 0;
+ PyObject *__pyx_2 = 0;
+ __pyx_init_filenames();
+ __pyx_m = Py_InitModule4("_readdir_pyx", __pyx_methods, __pyx_mdoc, 0, PYTHON_API_VERSION);
+ if (!__pyx_m) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 17; goto __pyx_L1;};
+ Py_INCREF(__pyx_m);
+ __pyx_b = PyImport_AddModule("__builtin__");
+ if (!__pyx_b) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 17; goto __pyx_L1;};
+ if (PyObject_SetAttrString(__pyx_m, "__builtins__", __pyx_b) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 17; goto __pyx_L1;};
+ if (__Pyx_InitStrings(__pyx_string_tab) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 17; goto __pyx_L1;};
+ __pyx_v_6bzrlib_12_readdir_pyx__directory = Py_None; Py_INCREF(Py_None);
+ __pyx_v_6bzrlib_12_readdir_pyx__chardev = Py_None; Py_INCREF(Py_None);
+ __pyx_v_6bzrlib_12_readdir_pyx__block = Py_None; Py_INCREF(Py_None);
+ __pyx_v_6bzrlib_12_readdir_pyx__file = Py_None; Py_INCREF(Py_None);
+ __pyx_v_6bzrlib_12_readdir_pyx__fifo = Py_None; Py_INCREF(Py_None);
+ __pyx_v_6bzrlib_12_readdir_pyx__symlink = Py_None; Py_INCREF(Py_None);
+ __pyx_v_6bzrlib_12_readdir_pyx__socket = Py_None; Py_INCREF(Py_None);
+ __pyx_v_6bzrlib_12_readdir_pyx__unknown = Py_None; Py_INCREF(Py_None);
+ __pyx_v_6bzrlib_12_readdir_pyx__safe_utf8 = Py_None; Py_INCREF(Py_None);
+ if (PyType_Ready(&__pyx_type_6bzrlib_12_readdir_pyx__Stat) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 131; goto __pyx_L1;}
+ if (PyObject_SetAttrString(__pyx_m, "_Stat", (PyObject *)&__pyx_type_6bzrlib_12_readdir_pyx__Stat) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 131; goto __pyx_L1;}
+ __pyx_ptype_6bzrlib_12_readdir_pyx__Stat = &__pyx_type_6bzrlib_12_readdir_pyx__Stat;
+ __pyx_vtabptr_6bzrlib_12_readdir_pyx_UTF8DirReader = &__pyx_vtable_6bzrlib_12_readdir_pyx_UTF8DirReader;
+ *(void(**)(void))&__pyx_vtable_6bzrlib_12_readdir_pyx_UTF8DirReader._kind_from_mode = (void(*)(void))__pyx_f_6bzrlib_12_readdir_pyx_13UTF8DirReader__kind_from_mode;
+ if (PyType_Ready(&__pyx_type_6bzrlib_12_readdir_pyx_UTF8DirReader) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 174; goto __pyx_L1;}
+ if (__Pyx_SetVtable(__pyx_type_6bzrlib_12_readdir_pyx_UTF8DirReader.tp_dict, __pyx_vtabptr_6bzrlib_12_readdir_pyx_UTF8DirReader) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 174; goto __pyx_L1;}
+ if (PyObject_SetAttrString(__pyx_m, "UTF8DirReader", (PyObject *)&__pyx_type_6bzrlib_12_readdir_pyx_UTF8DirReader) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 174; goto __pyx_L1;}
+ __pyx_ptype_6bzrlib_12_readdir_pyx_UTF8DirReader = &__pyx_type_6bzrlib_12_readdir_pyx_UTF8DirReader;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_readdir_pyx.pyx":20 */
+ __pyx_1 = __Pyx_Import(__pyx_n_os, 0); if (!__pyx_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 20; goto __pyx_L1;}
+ if (PyObject_SetAttr(__pyx_m, __pyx_n_os, __pyx_1) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 20; goto __pyx_L1;}
+ Py_DECREF(__pyx_1); __pyx_1 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_readdir_pyx.pyx":21 */
+ __pyx_1 = __Pyx_Import(__pyx_n_sys, 0); if (!__pyx_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 21; goto __pyx_L1;}
+ if (PyObject_SetAttr(__pyx_m, __pyx_n_sys, __pyx_1) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 21; goto __pyx_L1;}
+ Py_DECREF(__pyx_1); __pyx_1 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_readdir_pyx.pyx":110 */
+ Py_INCREF(__pyx_n_directory);
+ Py_DECREF(__pyx_v_6bzrlib_12_readdir_pyx__directory);
+ __pyx_v_6bzrlib_12_readdir_pyx__directory = __pyx_n_directory;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_readdir_pyx.pyx":112 */
+ Py_INCREF(__pyx_n_chardev);
+ Py_DECREF(__pyx_v_6bzrlib_12_readdir_pyx__chardev);
+ __pyx_v_6bzrlib_12_readdir_pyx__chardev = __pyx_n_chardev;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_readdir_pyx.pyx":114 */
+ Py_INCREF(__pyx_n_block);
+ Py_DECREF(__pyx_v_6bzrlib_12_readdir_pyx__block);
+ __pyx_v_6bzrlib_12_readdir_pyx__block = __pyx_n_block;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_readdir_pyx.pyx":116 */
+ Py_INCREF(__pyx_n_file);
+ Py_DECREF(__pyx_v_6bzrlib_12_readdir_pyx__file);
+ __pyx_v_6bzrlib_12_readdir_pyx__file = __pyx_n_file;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_readdir_pyx.pyx":118 */
+ Py_INCREF(__pyx_n_fifo);
+ Py_DECREF(__pyx_v_6bzrlib_12_readdir_pyx__fifo);
+ __pyx_v_6bzrlib_12_readdir_pyx__fifo = __pyx_n_fifo;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_readdir_pyx.pyx":120 */
+ Py_INCREF(__pyx_n_symlink);
+ Py_DECREF(__pyx_v_6bzrlib_12_readdir_pyx__symlink);
+ __pyx_v_6bzrlib_12_readdir_pyx__symlink = __pyx_n_symlink;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_readdir_pyx.pyx":122 */
+ Py_INCREF(__pyx_n_socket);
+ Py_DECREF(__pyx_v_6bzrlib_12_readdir_pyx__socket);
+ __pyx_v_6bzrlib_12_readdir_pyx__socket = __pyx_n_socket;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_readdir_pyx.pyx":124 */
+ Py_INCREF(__pyx_n_unknown);
+ Py_DECREF(__pyx_v_6bzrlib_12_readdir_pyx__unknown);
+ __pyx_v_6bzrlib_12_readdir_pyx__unknown = __pyx_n_unknown;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_readdir_pyx.pyx":169 */
+ __pyx_1 = PyList_New(1); if (!__pyx_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 169; goto __pyx_L1;}
+ Py_INCREF(__pyx_n_osutils);
+ PyList_SET_ITEM(__pyx_1, 0, __pyx_n_osutils);
+ __pyx_2 = __Pyx_Import(__pyx_n_bzrlib, __pyx_1); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 169; goto __pyx_L1;}
+ Py_DECREF(__pyx_1); __pyx_1 = 0;
+ __pyx_1 = PyObject_GetAttr(__pyx_2, __pyx_n_osutils); if (!__pyx_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 169; goto __pyx_L1;}
+ if (PyObject_SetAttr(__pyx_m, __pyx_n_osutils, __pyx_1) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 169; goto __pyx_L1;}
+ Py_DECREF(__pyx_1); __pyx_1 = 0;
+ Py_DECREF(__pyx_2); __pyx_2 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_readdir_pyx.pyx":172 */
+ __pyx_2 = __Pyx_GetName(__pyx_m, __pyx_n_osutils); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 172; goto __pyx_L1;}
+ __pyx_1 = PyObject_GetAttr(__pyx_2, __pyx_n_safe_utf8); if (!__pyx_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 172; goto __pyx_L1;}
+ Py_DECREF(__pyx_2); __pyx_2 = 0;
+ Py_DECREF(__pyx_v_6bzrlib_12_readdir_pyx__safe_utf8);
+ __pyx_v_6bzrlib_12_readdir_pyx__safe_utf8 = __pyx_1;
+ __pyx_1 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_readdir_pyx.pyx":199 */
+ Py_INCREF(__pyx_k6p);
+ __pyx_d1 = __pyx_k6p;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_readdir_pyx.pyx":274 */
+ return;
+ __pyx_L1:;
+ Py_XDECREF(__pyx_1);
+ Py_XDECREF(__pyx_2);
+ __Pyx_AddTraceback("bzrlib._readdir_pyx");
+}
+
+static char *__pyx_filenames[] = {
+ "_readdir_pyx.pyx",
+};
+
+/* Runtime support code */
+
+static void __pyx_init_filenames(void) {
+ __pyx_f = __pyx_filenames;
+}
+
+static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb) {
+ Py_XINCREF(type);
+ Py_XINCREF(value);
+ Py_XINCREF(tb);
+ /* First, check the traceback argument, replacing None with NULL. */
+ if (tb == Py_None) {
+ Py_DECREF(tb);
+ tb = 0;
+ }
+ else if (tb != NULL && !PyTraceBack_Check(tb)) {
+ PyErr_SetString(PyExc_TypeError,
+ "raise: arg 3 must be a traceback or None");
+ goto raise_error;
+ }
+ /* Next, replace a missing value with None */
+ if (value == NULL) {
+ value = Py_None;
+ Py_INCREF(value);
+ }
+ #if PY_VERSION_HEX < 0x02050000
+ if (!PyClass_Check(type))
+ #else
+ if (!PyType_Check(type))
+ #endif
+ {
+ /* Raising an instance. The value should be a dummy. */
+ if (value != Py_None) {
+ PyErr_SetString(PyExc_TypeError,
+ "instance exception may not have a separate value");
+ goto raise_error;
+ }
+ /* Normalize to raise <class>, <instance> */
+ Py_DECREF(value);
+ value = type;
+ #if PY_VERSION_HEX < 0x02050000
+ if (PyInstance_Check(type)) {
+ type = (PyObject*) ((PyInstanceObject*)type)->in_class;
+ Py_INCREF(type);
+ }
+ else {
+ PyErr_SetString(PyExc_TypeError,
+ "raise: exception must be an old-style class or instance");
+ goto raise_error;
+ }
+ #else
+ type = (PyObject*) type->ob_type;
+ Py_INCREF(type);
+ if (!PyType_IsSubtype((PyTypeObject *)type, (PyTypeObject *)PyExc_BaseException)) {
+ PyErr_SetString(PyExc_TypeError,
+ "raise: exception class must be a subclass of BaseException");
+ goto raise_error;
+ }
+ #endif
+ }
+ PyErr_Restore(type, value, tb);
+ return;
+raise_error:
+ Py_XDECREF(value);
+ Py_XDECREF(type);
+ Py_XDECREF(tb);
+ return;
+}
+
+static int __Pyx_InitStrings(__Pyx_StringTabEntry *t) {
+ while (t->p) {
+ *t->p = PyString_FromStringAndSize(t->s, t->n - 1);
+ if (!*t->p)
+ return -1;
+ if (t->i)
+ PyString_InternInPlace(t->p);
+ ++t;
+ }
+ return 0;
+}
+
+static int __Pyx_SetVtable(PyObject *dict, void *vtable) {
+ PyObject *pycobj = 0;
+ int result;
+
+ pycobj = PyCObject_FromVoidPtr(vtable, 0);
+ if (!pycobj)
+ goto bad;
+ if (PyDict_SetItemString(dict, "__pyx_vtable__", pycobj) < 0)
+ goto bad;
+ result = 0;
+ goto done;
+
+bad:
+ result = -1;
+done:
+ Py_XDECREF(pycobj);
+ return result;
+}
+
+static PyObject *__Pyx_Import(PyObject *name, PyObject *from_list) {
+ PyObject *__import__ = 0;
+ PyObject *empty_list = 0;
+ PyObject *module = 0;
+ PyObject *global_dict = 0;
+ PyObject *empty_dict = 0;
+ PyObject *list;
+ __import__ = PyObject_GetAttrString(__pyx_b, "__import__");
+ if (!__import__)
+ goto bad;
+ if (from_list)
+ list = from_list;
+ else {
+ empty_list = PyList_New(0);
+ if (!empty_list)
+ goto bad;
+ list = empty_list;
+ }
+ global_dict = PyModule_GetDict(__pyx_m);
+ if (!global_dict)
+ goto bad;
+ empty_dict = PyDict_New();
+ if (!empty_dict)
+ goto bad;
+ module = PyObject_CallFunction(__import__, "OOOO",
+ name, global_dict, empty_dict, list);
+bad:
+ Py_XDECREF(empty_list);
+ Py_XDECREF(__import__);
+ Py_XDECREF(empty_dict);
+ return module;
+}
+
+static PyObject *__Pyx_GetName(PyObject *dict, PyObject *name) {
+ PyObject *result;
+ result = PyObject_GetAttr(dict, name);
+ if (!result)
+ PyErr_SetObject(PyExc_NameError, name);
+ return result;
+}
+
+#include "compile.h"
+#include "frameobject.h"
+#include "traceback.h"
+
+static void __Pyx_AddTraceback(char *funcname) {
+ PyObject *py_srcfile = 0;
+ PyObject *py_funcname = 0;
+ PyObject *py_globals = 0;
+ PyObject *empty_tuple = 0;
+ PyObject *empty_string = 0;
+ PyCodeObject *py_code = 0;
+ PyFrameObject *py_frame = 0;
+
+ py_srcfile = PyString_FromString(__pyx_filename);
+ if (!py_srcfile) goto bad;
+ py_funcname = PyString_FromString(funcname);
+ if (!py_funcname) goto bad;
+ py_globals = PyModule_GetDict(__pyx_m);
+ if (!py_globals) goto bad;
+ empty_tuple = PyTuple_New(0);
+ if (!empty_tuple) goto bad;
+ empty_string = PyString_FromString("");
+ if (!empty_string) goto bad;
+ py_code = PyCode_New(
+ 0, /*int argcount,*/
+ 0, /*int nlocals,*/
+ 0, /*int stacksize,*/
+ 0, /*int flags,*/
+ empty_string, /*PyObject *code,*/
+ empty_tuple, /*PyObject *consts,*/
+ empty_tuple, /*PyObject *names,*/
+ empty_tuple, /*PyObject *varnames,*/
+ empty_tuple, /*PyObject *freevars,*/
+ empty_tuple, /*PyObject *cellvars,*/
+ py_srcfile, /*PyObject *filename,*/
+ py_funcname, /*PyObject *name,*/
+ __pyx_lineno, /*int firstlineno,*/
+ empty_string /*PyObject *lnotab*/
+ );
+ if (!py_code) goto bad;
+ py_frame = PyFrame_New(
+ PyThreadState_Get(), /*PyThreadState *tstate,*/
+ py_code, /*PyCodeObject *code,*/
+ py_globals, /*PyObject *globals,*/
+ 0 /*PyObject *locals*/
+ );
+ if (!py_frame) goto bad;
+ py_frame->f_lineno = __pyx_lineno;
+ PyTraceBack_Here(py_frame);
+bad:
+ Py_XDECREF(py_srcfile);
+ Py_XDECREF(py_funcname);
+ Py_XDECREF(empty_tuple);
+ Py_XDECREF(empty_string);
+ Py_XDECREF(py_code);
+ Py_XDECREF(py_frame);
+}
diff --git a/bzrlib/_readdir_pyx.pyx b/bzrlib/_readdir_pyx.pyx
new file mode 100644
index 0000000..a6f93d9
--- /dev/null
+++ b/bzrlib/_readdir_pyx.pyx
@@ -0,0 +1,379 @@
+# Copyright (C) 2006, 2008, 2009, 2010 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""Wrapper for readdir which returns files ordered by inode."""
+
+
+import os
+import sys
+
+#python2.4 support
+cdef extern from "python-compat.h":
+ pass
+
+
+cdef extern from 'errno.h':
+ int ENOENT
+ int ENOTDIR
+ int EAGAIN
+ int EINTR
+ char *strerror(int errno)
+ # not necessarily a real variable, but this should be close enough
+ int errno
+
+cdef extern from 'unistd.h':
+ int chdir(char *path)
+ int close(int fd)
+ int fchdir(int fd)
+ char *getcwd(char *, int size)
+
+cdef extern from 'stdlib.h':
+ void *malloc(int)
+ void free(void *)
+
+
+cdef extern from 'sys/types.h':
+ ctypedef long ssize_t
+ ctypedef unsigned long size_t
+ ctypedef long time_t
+ ctypedef unsigned long ino_t
+ ctypedef unsigned long long off_t
+ ctypedef int mode_t
+
+
+cdef extern from 'sys/stat.h':
+ cdef struct stat:
+ int st_mode
+ off_t st_size
+ int st_dev
+ ino_t st_ino
+ int st_mtime
+ int st_ctime
+ int lstat(char *path, stat *buf)
+ int S_ISDIR(int mode)
+ int S_ISCHR(int mode)
+ int S_ISBLK(int mode)
+ int S_ISREG(int mode)
+ int S_ISFIFO(int mode)
+ int S_ISLNK(int mode)
+ int S_ISSOCK(int mode)
+
+
+cdef extern from 'fcntl.h':
+ int O_RDONLY
+ int open(char *pathname, int flags, mode_t mode)
+
+
+cdef extern from 'Python.h':
+ int PyErr_CheckSignals() except -1
+ char * PyString_AS_STRING(object)
+ ctypedef int Py_ssize_t # Required for older pyrex versions
+ ctypedef struct PyObject:
+ pass
+ Py_ssize_t PyString_Size(object s)
+ object PyList_GetItem(object lst, Py_ssize_t index)
+ void *PyList_GetItem_object_void "PyList_GET_ITEM" (object lst, int index)
+ int PyList_Append(object lst, object item) except -1
+ void *PyTuple_GetItem_void_void "PyTuple_GET_ITEM" (void* tpl, int index)
+ int PyTuple_SetItem(void *, Py_ssize_t pos, object item) except -1
+ int PyTuple_SetItem_obj "PyTuple_SetItem" (void *, Py_ssize_t pos, PyObject * item) except -1
+ void Py_INCREF(object o)
+ void Py_DECREF(object o)
+ void PyString_Concat(PyObject **string, object newpart)
+
+
+cdef extern from 'dirent.h':
+ ctypedef struct dirent:
+ char d_name[256]
+ ino_t d_ino
+ # the opaque C library DIR type.
+ ctypedef struct DIR
+ # should be DIR *, pyrex barfs.
+ DIR * opendir(char * name)
+ int closedir(DIR * dir)
+ dirent *readdir(DIR *dir)
+
+cdef object _directory
+_directory = 'directory'
+cdef object _chardev
+_chardev = 'chardev'
+cdef object _block
+_block = 'block'
+cdef object _file
+_file = 'file'
+cdef object _fifo
+_fifo = 'fifo'
+cdef object _symlink
+_symlink = 'symlink'
+cdef object _socket
+_socket = 'socket'
+cdef object _unknown
+_unknown = 'unknown'
+
+# add a typedef struct dirent dirent to workaround pyrex
+cdef extern from 'readdir.h':
+ pass
+
+
+cdef class _Stat:
+ """Represent a 'stat' result."""
+
+ cdef stat _st
+
+ property st_dev:
+ def __get__(self):
+ return self._st.st_dev
+
+ property st_ino:
+ def __get__(self):
+ return self._st.st_ino
+
+ property st_mode:
+ def __get__(self):
+ return self._st.st_mode
+
+ property st_ctime:
+ def __get__(self):
+ return self._st.st_ctime
+
+ property st_mtime:
+ def __get__(self):
+ return self._st.st_mtime
+
+ property st_size:
+ def __get__(self):
+ return self._st.st_size
+
+ def __repr__(self):
+ """Repr is the same as a Stat object.
+
+ (mode, ino, dev, nlink, uid, gid, size, None(atime), mtime, ctime)
+ """
+ return repr((self.st_mode, 0, 0, 0, 0, 0, self.st_size, None,
+ self.st_mtime, self.st_ctime))
+
+
+from bzrlib import osutils
+
+cdef object _safe_utf8
+_safe_utf8 = osutils.safe_utf8
+
+cdef class UTF8DirReader:
+ """A dir reader for utf8 file systems."""
+
+ def kind_from_mode(self, int mode):
+ """Get the kind of a path from a mode status."""
+ return self._kind_from_mode(mode)
+
+ cdef _kind_from_mode(self, int mode):
+ # Files and directories are the most common - check them first.
+ if S_ISREG(mode):
+ return _file
+ if S_ISDIR(mode):
+ return _directory
+ if S_ISCHR(mode):
+ return _chardev
+ if S_ISBLK(mode):
+ return _block
+ if S_ISLNK(mode):
+ return _symlink
+ if S_ISFIFO(mode):
+ return _fifo
+ if S_ISSOCK(mode):
+ return _socket
+ return _unknown
+
+ def top_prefix_to_starting_dir(self, top, prefix=""):
+ """See DirReader.top_prefix_to_starting_dir."""
+ return (_safe_utf8(prefix), None, None, None, _safe_utf8(top))
+
+ def read_dir(self, prefix, top):
+ """Read a single directory from a utf8 file system.
+
+ All paths in and out are utf8.
+
+ This sub-function is called when we know the filesystem is already in utf8
+ encoding. So we don't need to transcode filenames.
+
+ See DirReader.read_dir for details.
+ """
+ #cdef char *_prefix = prefix
+ #cdef char *_top = top
+ # Use C accelerated directory listing.
+ cdef object newval
+ cdef int index
+ cdef int length
+ cdef void * atuple
+ cdef object name
+ cdef PyObject * new_val_obj
+
+ if PyString_Size(prefix):
+ relprefix = prefix + '/'
+ else:
+ relprefix = ''
+ top_slash = top + '/'
+
+ # read_dir supplies in should-stat order.
+ # for _, name in sorted(_listdir(top)):
+ result = _read_dir(top)
+ length = len(result)
+ # result.sort()
+ for index from 0 <= index < length:
+ atuple = PyList_GetItem_object_void(result, index)
+ name = <object>PyTuple_GetItem_void_void(atuple, 1)
+ # We have a tuple with (inode, name, None, statvalue, None)
+ # Now edit it:
+ # inode -> path_from_top
+ # direct concat - faster than operator +.
+ new_val_obj = <PyObject *>relprefix
+ Py_INCREF(relprefix)
+ PyString_Concat(&new_val_obj, name)
+ if NULL == new_val_obj:
+ # PyString_Concat will have setup an exception, but how to get
+ # at it?
+ raise Exception("failed to strcat")
+ PyTuple_SetItem_obj(atuple, 0, new_val_obj)
+ # 1st None -> kind
+ newval = self._kind_from_mode(
+ (<_Stat>PyTuple_GetItem_void_void(atuple, 3)).st_mode)
+ Py_INCREF(newval)
+ PyTuple_SetItem(atuple, 2, newval)
+ # 2nd None -> abspath # for all - the caller may need to stat files
+ # etc.
+ # direct concat - faster than operator +.
+ new_val_obj = <PyObject *>top_slash
+ Py_INCREF(top_slash)
+ PyString_Concat(&new_val_obj, name)
+ if NULL == new_val_obj:
+ # PyString_Concat will have setup an exception, but how to get
+ # at it?
+ raise Exception("failed to strcat")
+ PyTuple_SetItem_obj(atuple, 4, new_val_obj)
+ return result
+
+
+cdef raise_os_error(int errnum, char *msg_prefix, path):
+ if errnum == EINTR:
+ PyErr_CheckSignals()
+ raise OSError(errnum, msg_prefix + strerror(errnum), path)
+
+
+cdef _read_dir(path):
+ """Like os.listdir, this reads the contents of a directory.
+
+ :param path: the directory to list.
+ :return: a list of single-owner (the list) tuples ready for editing into
+ the result tuples walkdirs needs to yield. They contain (inode, name,
+ None, statvalue, None).
+ """
+ cdef DIR *the_dir
+ # currently this needs a fixup - the C code says 'dirent' but should say
+ # 'struct dirent'
+ cdef dirent * entry
+ cdef dirent sentinel
+ cdef char *name
+ cdef int stat_result
+ cdef _Stat statvalue
+ global errno
+ cdef int orig_dir_fd
+
+ # Avoid chdir('') because it causes problems on Sun OS, and avoid this if
+ # staying in .
+ if path != "" and path != '.':
+ # we change into the requested directory before reading, and back at the
+ # end, because that turns out to make the stat calls measurably faster than
+ # passing full paths every time.
+ orig_dir_fd = open(".", O_RDONLY, 0)
+ if orig_dir_fd == -1:
+ raise_os_error(errno, "open: ", ".")
+ if -1 == chdir(path):
+ # Ignore the return value, because we are already raising an
+ # exception
+ close(orig_dir_fd)
+ raise_os_error(errno, "chdir: ", path)
+ else:
+ orig_dir_fd = -1
+
+ try:
+ the_dir = opendir(".")
+ if NULL == the_dir:
+ raise_os_error(errno, "opendir: ", path)
+ try:
+ result = []
+ entry = &sentinel
+ while entry != NULL:
+ # Unlike most libc functions, readdir needs errno set to 0
+ # beforehand so that eof can be distinguished from errors. See
+ # <https://bugs.launchpad.net/bzr/+bug/279381>
+ while True:
+ errno = 0
+ entry = readdir(the_dir)
+ if entry == NULL and (errno == EAGAIN or errno == EINTR):
+ if errno == EINTR:
+ PyErr_CheckSignals()
+ # try again
+ continue
+ else:
+ break
+ if entry == NULL:
+ if errno == ENOTDIR or errno == 0:
+ # We see ENOTDIR at the end of a normal directory.
+ # As ENOTDIR for read_dir(file) is triggered on opendir,
+ # we consider ENOTDIR to be 'no error'.
+ continue
+ else:
+ raise_os_error(errno, "readdir: ", path)
+ name = entry.d_name
+ if not (name[0] == c"." and (
+ (name[1] == 0) or
+ (name[1] == c"." and name[2] == 0))
+ ):
+ statvalue = _Stat()
+ stat_result = lstat(entry.d_name, &statvalue._st)
+ if stat_result != 0:
+ if errno != ENOENT:
+ raise_os_error(errno, "lstat: ",
+ path + "/" + entry.d_name)
+ else:
+ # the file seems to have disappeared after being
+ # seen by readdir - perhaps a transient temporary
+ # file. there's no point returning it.
+ continue
+ # We append a 5-tuple that can be modified in-place by the C
+ # api:
+ # inode to sort on (to replace with top_path)
+ # name (to keep)
+ # kind (None, to set)
+ # statvalue (to keep)
+ # abspath (None, to set)
+ PyList_Append(result, (entry.d_ino, entry.d_name, None,
+ statvalue, None))
+ finally:
+ if -1 == closedir(the_dir):
+ raise_os_error(errno, "closedir: ", path)
+ finally:
+ if -1 != orig_dir_fd:
+ failed = False
+ if -1 == fchdir(orig_dir_fd):
+ # try to close the original directory anyhow
+ failed = True
+ if -1 == close(orig_dir_fd) or failed:
+ raise_os_error(errno, "return to orig_dir: ", "")
+
+ return result
+
+
+# vim: tw=79 ai expandtab sw=4 sts=4
diff --git a/bzrlib/_rio_py.py b/bzrlib/_rio_py.py
new file mode 100644
index 0000000..8364e6b
--- /dev/null
+++ b/bzrlib/_rio_py.py
@@ -0,0 +1,79 @@
+# Copyright (C) 2009 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""Python implementation of _read_stanza_*."""
+
+from __future__ import absolute_import
+
+import re
+
+from bzrlib.rio import (
+ Stanza,
+ )
+
+_tag_re = re.compile(r'^[-a-zA-Z0-9_]+$')
+def _valid_tag(tag):
+ if type(tag) != str:
+ raise TypeError(tag)
+ return bool(_tag_re.match(tag))
+
+
+def _read_stanza_utf8(line_iter):
+ def iter_unicode_lines():
+ for line in line_iter:
+ if type(line) != str:
+ raise TypeError(line)
+ yield line.decode('utf-8')
+ return _read_stanza_unicode(iter_unicode_lines())
+
+
+def _read_stanza_unicode(unicode_iter):
+ stanza = Stanza()
+ tag = None
+ accum_value = None
+
+ # TODO: jam 20060922 This code should raise real errors rather than
+ # using 'assert' to process user input, or raising ValueError
+ # rather than a more specific error.
+
+ for line in unicode_iter:
+ if line is None or line == u'':
+ break # end of file
+ if line == u'\n':
+ break # end of stanza
+ real_l = line
+ if line[0] == u'\t': # continues previous value
+ if tag is None:
+ raise ValueError('invalid continuation line %r' % real_l)
+ accum_value.append(u'\n' + line[1:-1])
+ else: # new tag:value line
+ if tag is not None:
+ stanza.add(tag, u''.join(accum_value))
+ try:
+ colon_index = line.index(u': ')
+ except ValueError:
+ raise ValueError('tag/value separator not found in line %r'
+ % real_l)
+ tag = str(line[:colon_index])
+ if not _valid_tag(tag):
+ raise ValueError("invalid rio tag %r" % (tag,))
+ accum_value = [line[colon_index+2:-1]]
+
+ if tag is not None: # add last tag-value
+ stanza.add(tag, u''.join(accum_value))
+ return stanza
+ else: # didn't see any content
+ return None
diff --git a/bzrlib/_rio_pyx.c b/bzrlib/_rio_pyx.c
new file mode 100644
index 0000000..884e126
--- /dev/null
+++ b/bzrlib/_rio_pyx.c
@@ -0,0 +1,1203 @@
+/* Generated by Pyrex 0.9.8.5 on Fri Oct 8 14:00:59 2010 */
+
+#define PY_SSIZE_T_CLEAN
+#include "Python.h"
+#include "structmember.h"
+#ifndef PY_LONG_LONG
+ #define PY_LONG_LONG LONG_LONG
+#endif
+#if PY_VERSION_HEX < 0x02050000
+ typedef int Py_ssize_t;
+ #define PY_SSIZE_T_MAX INT_MAX
+ #define PY_SSIZE_T_MIN INT_MIN
+ #define PyInt_FromSsize_t(z) PyInt_FromLong(z)
+ #define PyInt_AsSsize_t(o) PyInt_AsLong(o)
+#endif
+#if !defined(WIN32) && !defined(MS_WINDOWS)
+ #ifndef __stdcall
+ #define __stdcall
+ #endif
+ #ifndef __cdecl
+ #define __cdecl
+ #endif
+#endif
+#ifdef __cplusplus
+#define __PYX_EXTERN_C extern "C"
+#else
+#define __PYX_EXTERN_C extern
+#endif
+#include <math.h>
+#include "python-compat.h"
+#include "stdlib.h"
+#include "string.h"
+
+
+typedef struct {PyObject **p; int i; char *s; long n;} __Pyx_StringTabEntry; /*proto*/
+
+static PyObject *__pyx_m;
+static PyObject *__pyx_b;
+static int __pyx_lineno;
+static char *__pyx_filename;
+static char **__pyx_f;
+
+static char __pyx_mdoc[] = "Pyrex implementation of _read_stanza_*.";
+
+static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb); /*proto*/
+
+static PyObject *__Pyx_GetName(PyObject *dict, PyObject *name); /*proto*/
+
+static int __Pyx_InitStrings(__Pyx_StringTabEntry *t); /*proto*/
+
+static PyObject *__Pyx_Import(PyObject *name, PyObject *from_list); /*proto*/
+
+static void __Pyx_AddTraceback(char *funcname); /*proto*/
+
+/* Declarations from bzrlib._rio_pyx */
+
+
+/* Declarations from implementation of bzrlib._rio_pyx */
+
+static int __pyx_f_6bzrlib_8_rio_pyx__valid_tag_char(char); /*proto*/
+static PyObject *__pyx_f_6bzrlib_8_rio_pyx__split_first_line_utf8(char *,int,char *,Py_ssize_t *); /*proto*/
+static PyObject *__pyx_f_6bzrlib_8_rio_pyx__split_first_line_unicode(Py_UNICODE *,int,Py_UNICODE *,Py_ssize_t *); /*proto*/
+
+static char __pyx_k1[] = "invalid tag in line %r";
+static char __pyx_k2[] = "tag/value separator not found in line %r";
+static char __pyx_k3[] = "strict";
+static char __pyx_k4[] = "%r is not a plain string";
+static char __pyx_k5[] = "invalid continuation line %r";
+static char __pyx_k6[] = "_valid_tag";
+static char __pyx_k7[] = "invalid rio tag %r";
+static char __pyx_k8[] = "Stanza";
+static char __pyx_k9[] = "from_pairs";
+static char __pyx_k10[] = "%r is not a unicode string";
+static char __pyx_k11[] = "bzrlib.rio";
+
+static PyObject *__pyx_n_Stanza;
+static PyObject *__pyx_n__valid_tag;
+static PyObject *__pyx_n_from_pairs;
+
+static PyObject *__pyx_k1p;
+static PyObject *__pyx_k2p;
+static PyObject *__pyx_k4p;
+static PyObject *__pyx_k5p;
+static PyObject *__pyx_k7p;
+static PyObject *__pyx_k10p;
+static PyObject *__pyx_k11p;
+
+static __Pyx_StringTabEntry __pyx_string_tab[] = {
+ {&__pyx_n_Stanza, 1, __pyx_k8, sizeof(__pyx_k8)},
+ {&__pyx_n__valid_tag, 1, __pyx_k6, sizeof(__pyx_k6)},
+ {&__pyx_n_from_pairs, 1, __pyx_k9, sizeof(__pyx_k9)},
+ {&__pyx_k1p, 0, __pyx_k1, sizeof(__pyx_k1)},
+ {&__pyx_k2p, 0, __pyx_k2, sizeof(__pyx_k2)},
+ {&__pyx_k4p, 0, __pyx_k4, sizeof(__pyx_k4)},
+ {&__pyx_k5p, 0, __pyx_k5, sizeof(__pyx_k5)},
+ {&__pyx_k7p, 0, __pyx_k7, sizeof(__pyx_k7)},
+ {&__pyx_k10p, 0, __pyx_k10, sizeof(__pyx_k10)},
+ {&__pyx_k11p, 0, __pyx_k11, sizeof(__pyx_k11)},
+ {0, 0, 0, 0}
+};
+
+
+
+/* Implementation of bzrlib._rio_pyx */
+
+static int __pyx_f_6bzrlib_8_rio_pyx__valid_tag_char(char __pyx_v_c) {
+ int __pyx_r;
+ int __pyx_1;
+ __pyx_1 = (__pyx_v_c == '_');
+ if (!__pyx_1) {
+ __pyx_1 = (__pyx_v_c == '-');
+ if (!__pyx_1) {
+ __pyx_1 = (__pyx_v_c >= 'a');
+ if (__pyx_1) {
+ __pyx_1 = (__pyx_v_c <= 'z');
+ }
+ if (!__pyx_1) {
+ __pyx_1 = (__pyx_v_c >= 'A');
+ if (__pyx_1) {
+ __pyx_1 = (__pyx_v_c <= 'Z');
+ }
+ if (!__pyx_1) {
+ __pyx_1 = (__pyx_v_c >= '0');
+ if (__pyx_1) {
+ __pyx_1 = (__pyx_v_c <= '9');
+ }
+ }
+ }
+ }
+ }
+ __pyx_r = __pyx_1;
+ goto __pyx_L0;
+
+ __pyx_r = 0;
+ __pyx_L0:;
+ return __pyx_r;
+}
+
+static PyObject *__pyx_f_6bzrlib_8_rio_pyx__valid_tag(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
+static PyObject *__pyx_f_6bzrlib_8_rio_pyx__valid_tag(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
+ PyObject *__pyx_v_tag = 0;
+ char *__pyx_v_c_tag;
+ Py_ssize_t __pyx_v_c_len;
+ int __pyx_v_i;
+ PyObject *__pyx_r;
+ int __pyx_1;
+ PyObject *__pyx_2 = 0;
+ PyObject *__pyx_3 = 0;
+ Py_ssize_t __pyx_4;
+ static char *__pyx_argnames[] = {"tag",0};
+ if (!PyArg_ParseTupleAndKeywords(__pyx_args, __pyx_kwds, "O", __pyx_argnames, &__pyx_v_tag)) return 0;
+ Py_INCREF(__pyx_v_tag);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_rio_pyx.pyx":63 */
+ __pyx_1 = (!PyString_CheckExact(__pyx_v_tag));
+ if (__pyx_1) {
+ __pyx_2 = PyTuple_New(1); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 64; goto __pyx_L1;}
+ Py_INCREF(__pyx_v_tag);
+ PyTuple_SET_ITEM(__pyx_2, 0, __pyx_v_tag);
+ __pyx_3 = PyObject_CallObject(PyExc_TypeError, __pyx_2); if (!__pyx_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 64; goto __pyx_L1;}
+ Py_DECREF(__pyx_2); __pyx_2 = 0;
+ __Pyx_Raise(__pyx_3, 0, 0);
+ Py_DECREF(__pyx_3); __pyx_3 = 0;
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 64; goto __pyx_L1;}
+ goto __pyx_L2;
+ }
+ __pyx_L2:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_rio_pyx.pyx":65 */
+ __pyx_v_c_tag = PyString_AS_STRING(__pyx_v_tag);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_rio_pyx.pyx":66 */
+ __pyx_4 = PyString_GET_SIZE(__pyx_v_tag); if (__pyx_4 == (-1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 66; goto __pyx_L1;}
+ __pyx_v_c_len = __pyx_4;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_rio_pyx.pyx":67 */
+ __pyx_1 = (__pyx_v_c_len < 1);
+ if (__pyx_1) {
+ Py_INCREF(Py_False);
+ __pyx_r = Py_False;
+ goto __pyx_L0;
+ goto __pyx_L3;
+ }
+ __pyx_L3:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_rio_pyx.pyx":69 */
+ for (__pyx_v_i = 0; __pyx_v_i < __pyx_v_c_len; ++__pyx_v_i) {
+ __pyx_1 = (!__pyx_f_6bzrlib_8_rio_pyx__valid_tag_char((__pyx_v_c_tag[__pyx_v_i])));
+ if (__pyx_1) {
+ Py_INCREF(Py_False);
+ __pyx_r = Py_False;
+ goto __pyx_L0;
+ goto __pyx_L6;
+ }
+ __pyx_L6:;
+ }
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_rio_pyx.pyx":72 */
+ Py_INCREF(Py_True);
+ __pyx_r = Py_True;
+ goto __pyx_L0;
+
+ __pyx_r = Py_None; Py_INCREF(Py_None);
+ goto __pyx_L0;
+ __pyx_L1:;
+ Py_XDECREF(__pyx_2);
+ Py_XDECREF(__pyx_3);
+ __Pyx_AddTraceback("bzrlib._rio_pyx._valid_tag");
+ __pyx_r = 0;
+ __pyx_L0:;
+ Py_DECREF(__pyx_v_tag);
+ return __pyx_r;
+}
+
+static PyObject *__pyx_f_6bzrlib_8_rio_pyx__split_first_line_utf8(char *__pyx_v_line,int __pyx_v_len,char *__pyx_v_value,Py_ssize_t *__pyx_v_value_len) {
+ int __pyx_v_i;
+ PyObject *__pyx_r;
+ int __pyx_1;
+ PyObject *__pyx_2 = 0;
+ PyObject *__pyx_3 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_rio_pyx.pyx":78 */
+ for (__pyx_v_i = 0; __pyx_v_i < __pyx_v_len; ++__pyx_v_i) {
+ __pyx_1 = ((__pyx_v_line[__pyx_v_i]) == ':');
+ if (__pyx_1) {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_rio_pyx.pyx":80 */
+ __pyx_1 = ((__pyx_v_line[(__pyx_v_i + 1)]) != ' ');
+ if (__pyx_1) {
+ __pyx_2 = PyString_FromString(__pyx_v_line); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 81; goto __pyx_L1;}
+ __pyx_3 = PyNumber_Remainder(__pyx_k1p, __pyx_2); if (!__pyx_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 81; goto __pyx_L1;}
+ Py_DECREF(__pyx_2); __pyx_2 = 0;
+ __pyx_2 = PyTuple_New(1); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 81; goto __pyx_L1;}
+ PyTuple_SET_ITEM(__pyx_2, 0, __pyx_3);
+ __pyx_3 = 0;
+ __pyx_3 = PyObject_CallObject(PyExc_ValueError, __pyx_2); if (!__pyx_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 81; goto __pyx_L1;}
+ Py_DECREF(__pyx_2); __pyx_2 = 0;
+ __Pyx_Raise(__pyx_3, 0, 0);
+ Py_DECREF(__pyx_3); __pyx_3 = 0;
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 81; goto __pyx_L1;}
+ goto __pyx_L5;
+ }
+ __pyx_L5:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_rio_pyx.pyx":82 */
+ memcpy(__pyx_v_value,((__pyx_v_line + __pyx_v_i) + 2),((__pyx_v_len - __pyx_v_i) - 2));
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_rio_pyx.pyx":83 */
+ (__pyx_v_value_len[0]) = ((__pyx_v_len - __pyx_v_i) - 2);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_rio_pyx.pyx":84 */
+ __pyx_2 = PyString_FromStringAndSize(__pyx_v_line,__pyx_v_i); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 84; goto __pyx_L1;}
+ __pyx_r = __pyx_2;
+ __pyx_2 = 0;
+ goto __pyx_L0;
+ goto __pyx_L4;
+ }
+ __pyx_L4:;
+ }
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_rio_pyx.pyx":85 */
+ __pyx_3 = PyString_FromString(__pyx_v_line); if (!__pyx_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 85; goto __pyx_L1;}
+ __pyx_2 = PyNumber_Remainder(__pyx_k2p, __pyx_3); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 85; goto __pyx_L1;}
+ Py_DECREF(__pyx_3); __pyx_3 = 0;
+ __pyx_3 = PyTuple_New(1); if (!__pyx_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 85; goto __pyx_L1;}
+ PyTuple_SET_ITEM(__pyx_3, 0, __pyx_2);
+ __pyx_2 = 0;
+ __pyx_2 = PyObject_CallObject(PyExc_ValueError, __pyx_3); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 85; goto __pyx_L1;}
+ Py_DECREF(__pyx_3); __pyx_3 = 0;
+ __Pyx_Raise(__pyx_2, 0, 0);
+ Py_DECREF(__pyx_2); __pyx_2 = 0;
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 85; goto __pyx_L1;}
+
+ __pyx_r = Py_None; Py_INCREF(Py_None);
+ goto __pyx_L0;
+ __pyx_L1:;
+ Py_XDECREF(__pyx_2);
+ Py_XDECREF(__pyx_3);
+ __Pyx_AddTraceback("bzrlib._rio_pyx._split_first_line_utf8");
+ __pyx_r = 0;
+ __pyx_L0:;
+ return __pyx_r;
+}
+
+static PyObject *__pyx_f_6bzrlib_8_rio_pyx__split_first_line_unicode(Py_UNICODE *__pyx_v_line,int __pyx_v_len,Py_UNICODE *__pyx_v_value,Py_ssize_t *__pyx_v_value_len) {
+ int __pyx_v_i;
+ PyObject *__pyx_r;
+ int __pyx_1;
+ PyObject *__pyx_2 = 0;
+ PyObject *__pyx_3 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_rio_pyx.pyx":91 */
+ for (__pyx_v_i = 0; __pyx_v_i < __pyx_v_len; ++__pyx_v_i) {
+ __pyx_1 = ((__pyx_v_line[__pyx_v_i]) == ':');
+ if (__pyx_1) {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_rio_pyx.pyx":93 */
+ __pyx_1 = ((__pyx_v_line[(__pyx_v_i + 1)]) != ' ');
+ if (__pyx_1) {
+ __pyx_2 = PyUnicode_FromUnicode(__pyx_v_line,__pyx_v_len); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 95; goto __pyx_L1;}
+ __pyx_3 = PyNumber_Remainder(__pyx_k1p, __pyx_2); if (!__pyx_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 94; goto __pyx_L1;}
+ Py_DECREF(__pyx_2); __pyx_2 = 0;
+ __pyx_2 = PyTuple_New(1); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 94; goto __pyx_L1;}
+ PyTuple_SET_ITEM(__pyx_2, 0, __pyx_3);
+ __pyx_3 = 0;
+ __pyx_3 = PyObject_CallObject(PyExc_ValueError, __pyx_2); if (!__pyx_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 94; goto __pyx_L1;}
+ Py_DECREF(__pyx_2); __pyx_2 = 0;
+ __Pyx_Raise(__pyx_3, 0, 0);
+ Py_DECREF(__pyx_3); __pyx_3 = 0;
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 94; goto __pyx_L1;}
+ goto __pyx_L5;
+ }
+ __pyx_L5:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_rio_pyx.pyx":96 */
+ memcpy(__pyx_v_value,(&(__pyx_v_line[(__pyx_v_i + 2)])),(((__pyx_v_len - __pyx_v_i) - 2) * (sizeof(Py_UNICODE))));
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_rio_pyx.pyx":97 */
+ (__pyx_v_value_len[0]) = ((__pyx_v_len - __pyx_v_i) - 2);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_rio_pyx.pyx":98 */
+ __pyx_2 = PyUnicode_EncodeASCII(__pyx_v_line,__pyx_v_i,__pyx_k3); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 98; goto __pyx_L1;}
+ __pyx_r = __pyx_2;
+ __pyx_2 = 0;
+ goto __pyx_L0;
+ goto __pyx_L4;
+ }
+ __pyx_L4:;
+ }
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_rio_pyx.pyx":99 */
+ __pyx_3 = PyUnicode_FromUnicode(__pyx_v_line,__pyx_v_len); if (!__pyx_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 100; goto __pyx_L1;}
+ __pyx_2 = PyNumber_Remainder(__pyx_k2p, __pyx_3); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 99; goto __pyx_L1;}
+ Py_DECREF(__pyx_3); __pyx_3 = 0;
+ __pyx_3 = PyTuple_New(1); if (!__pyx_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 99; goto __pyx_L1;}
+ PyTuple_SET_ITEM(__pyx_3, 0, __pyx_2);
+ __pyx_2 = 0;
+ __pyx_2 = PyObject_CallObject(PyExc_ValueError, __pyx_3); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 99; goto __pyx_L1;}
+ Py_DECREF(__pyx_3); __pyx_3 = 0;
+ __Pyx_Raise(__pyx_2, 0, 0);
+ Py_DECREF(__pyx_2); __pyx_2 = 0;
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 99; goto __pyx_L1;}
+
+ __pyx_r = Py_None; Py_INCREF(Py_None);
+ goto __pyx_L0;
+ __pyx_L1:;
+ Py_XDECREF(__pyx_2);
+ Py_XDECREF(__pyx_3);
+ __Pyx_AddTraceback("bzrlib._rio_pyx._split_first_line_unicode");
+ __pyx_r = 0;
+ __pyx_L0:;
+ return __pyx_r;
+}
+
+static PyObject *__pyx_f_6bzrlib_8_rio_pyx__read_stanza_utf8(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
+static PyObject *__pyx_f_6bzrlib_8_rio_pyx__read_stanza_utf8(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
+ PyObject *__pyx_v_line_iter = 0;
+ char *__pyx_v_c_line;
+ Py_ssize_t __pyx_v_c_len;
+ char *__pyx_v_accum_value;
+ char *__pyx_v_new_accum_value;
+ Py_ssize_t __pyx_v_accum_len;
+ Py_ssize_t __pyx_v_accum_size;
+ PyObject *__pyx_v_pairs;
+ PyObject *__pyx_v_tag;
+ PyObject *__pyx_v_line;
+ PyObject *__pyx_r;
+ PyObject *__pyx_1 = 0;
+ int __pyx_2;
+ PyObject *__pyx_3 = 0;
+ PyObject *__pyx_4 = 0;
+ Py_ssize_t __pyx_5;
+ PyObject *__pyx_6 = 0;
+ int __pyx_7;
+ static char *__pyx_argnames[] = {"line_iter",0};
+ if (!PyArg_ParseTupleAndKeywords(__pyx_args, __pyx_kwds, "O", __pyx_argnames, &__pyx_v_line_iter)) return 0;
+ Py_INCREF(__pyx_v_line_iter);
+ __pyx_v_pairs = Py_None; Py_INCREF(Py_None);
+ __pyx_v_tag = Py_None; Py_INCREF(Py_None);
+ __pyx_v_line = Py_None; Py_INCREF(Py_None);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_rio_pyx.pyx":108 */
+ __pyx_1 = PyList_New(0); if (!__pyx_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 108; goto __pyx_L1;}
+ Py_DECREF(__pyx_v_pairs);
+ __pyx_v_pairs = __pyx_1;
+ __pyx_1 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_rio_pyx.pyx":109 */
+ Py_INCREF(Py_None);
+ Py_DECREF(__pyx_v_tag);
+ __pyx_v_tag = Py_None;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_rio_pyx.pyx":110 */
+ __pyx_v_accum_len = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_rio_pyx.pyx":111 */
+ __pyx_v_accum_size = 4096;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_rio_pyx.pyx":112 */
+ __pyx_v_accum_value = ((char *)malloc(__pyx_v_accum_size));
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_rio_pyx.pyx":113 */
+ __pyx_2 = (__pyx_v_accum_value == NULL);
+ if (__pyx_2) {
+ __Pyx_Raise(PyExc_MemoryError, 0, 0);
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 114; goto __pyx_L1;}
+ goto __pyx_L2;
+ }
+ __pyx_L2:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_rio_pyx.pyx":115 */
+ /*try:*/ {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_rio_pyx.pyx":116 */
+ __pyx_1 = PyObject_GetIter(__pyx_v_line_iter); if (!__pyx_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 116; goto __pyx_L4;}
+ for (;;) {
+ __pyx_3 = PyIter_Next(__pyx_1);
+ if (!__pyx_3) {
+ if (PyErr_Occurred()) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 116; goto __pyx_L4;}
+ break;
+ }
+ Py_DECREF(__pyx_v_line);
+ __pyx_v_line = __pyx_3;
+ __pyx_3 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_rio_pyx.pyx":117 */
+ __pyx_2 = __pyx_v_line == Py_None;
+ if (__pyx_2) {
+ goto __pyx_L7;
+ goto __pyx_L8;
+ }
+ __pyx_L8:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_rio_pyx.pyx":119 */
+ __pyx_2 = (!PyString_CheckExact(__pyx_v_line));
+ if (__pyx_2) {
+ __pyx_3 = PyNumber_Remainder(__pyx_k4p, __pyx_v_line); if (!__pyx_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 120; goto __pyx_L4;}
+ __pyx_4 = PyTuple_New(1); if (!__pyx_4) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 120; goto __pyx_L4;}
+ PyTuple_SET_ITEM(__pyx_4, 0, __pyx_3);
+ __pyx_3 = 0;
+ __pyx_3 = PyObject_CallObject(PyExc_TypeError, __pyx_4); if (!__pyx_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 120; goto __pyx_L4;}
+ Py_DECREF(__pyx_4); __pyx_4 = 0;
+ __Pyx_Raise(__pyx_3, 0, 0);
+ Py_DECREF(__pyx_3); __pyx_3 = 0;
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 120; goto __pyx_L4;}
+ goto __pyx_L9;
+ }
+ __pyx_L9:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_rio_pyx.pyx":121 */
+ __pyx_v_c_line = PyString_AS_STRING(__pyx_v_line);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_rio_pyx.pyx":122 */
+ __pyx_5 = PyString_GET_SIZE(__pyx_v_line); if (__pyx_5 == (-1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 122; goto __pyx_L4;}
+ __pyx_v_c_len = __pyx_5;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_rio_pyx.pyx":123 */
+ __pyx_2 = (__pyx_v_c_len < 1);
+ if (__pyx_2) {
+ goto __pyx_L7;
+ goto __pyx_L10;
+ }
+ __pyx_L10:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_rio_pyx.pyx":125 */
+ __pyx_2 = (__pyx_v_c_len == 1);
+ if (__pyx_2) {
+ __pyx_2 = ((__pyx_v_c_line[0]) == '\n');
+ }
+ if (__pyx_2) {
+ goto __pyx_L7;
+ goto __pyx_L11;
+ }
+ __pyx_L11:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_rio_pyx.pyx":127 */
+ __pyx_2 = ((__pyx_v_accum_len + __pyx_v_c_len) > __pyx_v_accum_size);
+ if (__pyx_2) {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_rio_pyx.pyx":128 */
+ __pyx_v_accum_size = (__pyx_v_accum_len + __pyx_v_c_len);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_rio_pyx.pyx":129 */
+ __pyx_v_new_accum_value = ((char *)realloc(__pyx_v_accum_value,__pyx_v_accum_size));
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_rio_pyx.pyx":130 */
+ __pyx_2 = (__pyx_v_new_accum_value == NULL);
+ if (__pyx_2) {
+ __Pyx_Raise(PyExc_MemoryError, 0, 0);
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 131; goto __pyx_L4;}
+ goto __pyx_L13;
+ }
+ /*else*/ {
+ __pyx_v_accum_value = __pyx_v_new_accum_value;
+ }
+ __pyx_L13:;
+ goto __pyx_L12;
+ }
+ __pyx_L12:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_rio_pyx.pyx":134 */
+ __pyx_2 = ((__pyx_v_c_line[0]) == '\t');
+ if (__pyx_2) {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_rio_pyx.pyx":135 */
+ __pyx_2 = __pyx_v_tag == Py_None;
+ if (__pyx_2) {
+ __pyx_4 = PyNumber_Remainder(__pyx_k5p, __pyx_v_line); if (!__pyx_4) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 136; goto __pyx_L4;}
+ __pyx_3 = PyTuple_New(1); if (!__pyx_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 136; goto __pyx_L4;}
+ PyTuple_SET_ITEM(__pyx_3, 0, __pyx_4);
+ __pyx_4 = 0;
+ __pyx_4 = PyObject_CallObject(PyExc_ValueError, __pyx_3); if (!__pyx_4) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 136; goto __pyx_L4;}
+ Py_DECREF(__pyx_3); __pyx_3 = 0;
+ __Pyx_Raise(__pyx_4, 0, 0);
+ Py_DECREF(__pyx_4); __pyx_4 = 0;
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 136; goto __pyx_L4;}
+ goto __pyx_L15;
+ }
+ __pyx_L15:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_rio_pyx.pyx":137 */
+ memcpy((__pyx_v_accum_value + __pyx_v_accum_len),(__pyx_v_c_line + 1),(__pyx_v_c_len - 1));
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_rio_pyx.pyx":138 */
+ __pyx_v_accum_len = ((__pyx_v_accum_len + __pyx_v_c_len) - 1);
+ goto __pyx_L14;
+ }
+ /*else*/ {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_rio_pyx.pyx":140 */
+ __pyx_2 = __pyx_v_tag != Py_None;
+ if (__pyx_2) {
+ __pyx_3 = PyUnicode_DecodeUTF8(__pyx_v_accum_value,(__pyx_v_accum_len - 1),__pyx_k3); if (!__pyx_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 142; goto __pyx_L4;}
+ __pyx_4 = PyTuple_New(2); if (!__pyx_4) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 142; goto __pyx_L4;}
+ Py_INCREF(__pyx_v_tag);
+ PyTuple_SET_ITEM(__pyx_4, 0, __pyx_v_tag);
+ PyTuple_SET_ITEM(__pyx_4, 1, __pyx_3);
+ __pyx_3 = 0;
+ __pyx_2 = PyList_Append(__pyx_v_pairs,__pyx_4); if (__pyx_2 == (-1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 141; goto __pyx_L4;}
+ Py_DECREF(__pyx_4); __pyx_4 = 0;
+ goto __pyx_L16;
+ }
+ __pyx_L16:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_rio_pyx.pyx":144 */
+ __pyx_3 = __pyx_f_6bzrlib_8_rio_pyx__split_first_line_utf8(__pyx_v_c_line,__pyx_v_c_len,__pyx_v_accum_value,(&__pyx_v_accum_len)); if (!__pyx_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 144; goto __pyx_L4;}
+ Py_DECREF(__pyx_v_tag);
+ __pyx_v_tag = __pyx_3;
+ __pyx_3 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_rio_pyx.pyx":146 */
+ __pyx_4 = __Pyx_GetName(__pyx_m, __pyx_n__valid_tag); if (!__pyx_4) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 146; goto __pyx_L4;}
+ __pyx_3 = PyTuple_New(1); if (!__pyx_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 146; goto __pyx_L4;}
+ Py_INCREF(__pyx_v_tag);
+ PyTuple_SET_ITEM(__pyx_3, 0, __pyx_v_tag);
+ __pyx_6 = PyObject_CallObject(__pyx_4, __pyx_3); if (!__pyx_6) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 146; goto __pyx_L4;}
+ Py_DECREF(__pyx_4); __pyx_4 = 0;
+ Py_DECREF(__pyx_3); __pyx_3 = 0;
+ __pyx_2 = PyObject_IsTrue(__pyx_6); if (__pyx_2 < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 146; goto __pyx_L4;}
+ Py_DECREF(__pyx_6); __pyx_6 = 0;
+ __pyx_7 = (!__pyx_2);
+ if (__pyx_7) {
+ __pyx_4 = PyTuple_New(1); if (!__pyx_4) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 147; goto __pyx_L4;}
+ Py_INCREF(__pyx_v_tag);
+ PyTuple_SET_ITEM(__pyx_4, 0, __pyx_v_tag);
+ __pyx_3 = PyNumber_Remainder(__pyx_k7p, __pyx_4); if (!__pyx_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 147; goto __pyx_L4;}
+ Py_DECREF(__pyx_4); __pyx_4 = 0;
+ __pyx_6 = PyTuple_New(1); if (!__pyx_6) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 147; goto __pyx_L4;}
+ PyTuple_SET_ITEM(__pyx_6, 0, __pyx_3);
+ __pyx_3 = 0;
+ __pyx_4 = PyObject_CallObject(PyExc_ValueError, __pyx_6); if (!__pyx_4) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 147; goto __pyx_L4;}
+ Py_DECREF(__pyx_6); __pyx_6 = 0;
+ __Pyx_Raise(__pyx_4, 0, 0);
+ Py_DECREF(__pyx_4); __pyx_4 = 0;
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 147; goto __pyx_L4;}
+ goto __pyx_L17;
+ }
+ __pyx_L17:;
+ }
+ __pyx_L14:;
+ }
+ __pyx_L7:;
+ Py_DECREF(__pyx_1); __pyx_1 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_rio_pyx.pyx":148 */
+ __pyx_2 = __pyx_v_tag != Py_None;
+ if (__pyx_2) {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_rio_pyx.pyx":149 */
+ __pyx_3 = PyUnicode_DecodeUTF8(__pyx_v_accum_value,(__pyx_v_accum_len - 1),__pyx_k3); if (!__pyx_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 150; goto __pyx_L4;}
+ __pyx_6 = PyTuple_New(2); if (!__pyx_6) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 150; goto __pyx_L4;}
+ Py_INCREF(__pyx_v_tag);
+ PyTuple_SET_ITEM(__pyx_6, 0, __pyx_v_tag);
+ PyTuple_SET_ITEM(__pyx_6, 1, __pyx_3);
+ __pyx_3 = 0;
+ __pyx_7 = PyList_Append(__pyx_v_pairs,__pyx_6); if (__pyx_7 == (-1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 149; goto __pyx_L4;}
+ Py_DECREF(__pyx_6); __pyx_6 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_rio_pyx.pyx":151 */
+ __pyx_4 = __Pyx_GetName(__pyx_m, __pyx_n_Stanza); if (!__pyx_4) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 151; goto __pyx_L4;}
+ __pyx_1 = PyObject_GetAttr(__pyx_4, __pyx_n_from_pairs); if (!__pyx_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 151; goto __pyx_L4;}
+ Py_DECREF(__pyx_4); __pyx_4 = 0;
+ __pyx_3 = PyTuple_New(1); if (!__pyx_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 151; goto __pyx_L4;}
+ Py_INCREF(__pyx_v_pairs);
+ PyTuple_SET_ITEM(__pyx_3, 0, __pyx_v_pairs);
+ __pyx_6 = PyObject_CallObject(__pyx_1, __pyx_3); if (!__pyx_6) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 151; goto __pyx_L4;}
+ Py_DECREF(__pyx_1); __pyx_1 = 0;
+ Py_DECREF(__pyx_3); __pyx_3 = 0;
+ __pyx_r = __pyx_6;
+ __pyx_6 = 0;
+ goto __pyx_L3;
+ goto __pyx_L18;
+ }
+ /*else*/ {
+ Py_INCREF(Py_None);
+ __pyx_r = Py_None;
+ goto __pyx_L3;
+ }
+ __pyx_L18:;
+ }
+ /*finally:*/ {
+ int __pyx_why;
+ PyObject *__pyx_exc_type, *__pyx_exc_value, *__pyx_exc_tb;
+ int __pyx_exc_lineno;
+ __pyx_why = 0; goto __pyx_L5;
+ __pyx_L3: __pyx_why = 3; goto __pyx_L5;
+ __pyx_L4: {
+ __pyx_why = 4;
+ Py_XDECREF(__pyx_4); __pyx_4 = 0;
+ Py_XDECREF(__pyx_1); __pyx_1 = 0;
+ Py_XDECREF(__pyx_3); __pyx_3 = 0;
+ Py_XDECREF(__pyx_6); __pyx_6 = 0;
+ PyErr_Fetch(&__pyx_exc_type, &__pyx_exc_value, &__pyx_exc_tb);
+ __pyx_exc_lineno = __pyx_lineno;
+ goto __pyx_L5;
+ }
+ __pyx_L5:;
+ free(__pyx_v_accum_value);
+ switch (__pyx_why) {
+ case 3: goto __pyx_L0;
+ case 4: {
+ PyErr_Restore(__pyx_exc_type, __pyx_exc_value, __pyx_exc_tb);
+ __pyx_lineno = __pyx_exc_lineno;
+ __pyx_exc_type = 0;
+ __pyx_exc_value = 0;
+ __pyx_exc_tb = 0;
+ goto __pyx_L1;
+ }
+ }
+ }
+
+ __pyx_r = Py_None; Py_INCREF(Py_None);
+ goto __pyx_L0;
+ __pyx_L1:;
+ Py_XDECREF(__pyx_1);
+ Py_XDECREF(__pyx_3);
+ Py_XDECREF(__pyx_4);
+ Py_XDECREF(__pyx_6);
+ __Pyx_AddTraceback("bzrlib._rio_pyx._read_stanza_utf8");
+ __pyx_r = 0;
+ __pyx_L0:;
+ Py_DECREF(__pyx_v_pairs);
+ Py_DECREF(__pyx_v_tag);
+ Py_DECREF(__pyx_v_line);
+ Py_DECREF(__pyx_v_line_iter);
+ return __pyx_r;
+}
+
+static PyObject *__pyx_f_6bzrlib_8_rio_pyx__read_stanza_unicode(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
+static PyObject *__pyx_f_6bzrlib_8_rio_pyx__read_stanza_unicode(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
+ PyObject *__pyx_v_unicode_iter = 0;
+ Py_UNICODE *__pyx_v_c_line;
+ int __pyx_v_c_len;
+ Py_UNICODE *__pyx_v_accum_value;
+ Py_UNICODE *__pyx_v_new_accum_value;
+ Py_ssize_t __pyx_v_accum_len;
+ Py_ssize_t __pyx_v_accum_size;
+ PyObject *__pyx_v_pairs;
+ PyObject *__pyx_v_tag;
+ PyObject *__pyx_v_line;
+ PyObject *__pyx_r;
+ PyObject *__pyx_1 = 0;
+ int __pyx_2;
+ PyObject *__pyx_3 = 0;
+ PyObject *__pyx_4 = 0;
+ Py_ssize_t __pyx_5;
+ PyObject *__pyx_6 = 0;
+ int __pyx_7;
+ static char *__pyx_argnames[] = {"unicode_iter",0};
+ if (!PyArg_ParseTupleAndKeywords(__pyx_args, __pyx_kwds, "O", __pyx_argnames, &__pyx_v_unicode_iter)) return 0;
+ Py_INCREF(__pyx_v_unicode_iter);
+ __pyx_v_pairs = Py_None; Py_INCREF(Py_None);
+ __pyx_v_tag = Py_None; Py_INCREF(Py_None);
+ __pyx_v_line = Py_None; Py_INCREF(Py_None);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_rio_pyx.pyx":163 */
+ __pyx_1 = PyList_New(0); if (!__pyx_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 163; goto __pyx_L1;}
+ Py_DECREF(__pyx_v_pairs);
+ __pyx_v_pairs = __pyx_1;
+ __pyx_1 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_rio_pyx.pyx":164 */
+ Py_INCREF(Py_None);
+ Py_DECREF(__pyx_v_tag);
+ __pyx_v_tag = Py_None;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_rio_pyx.pyx":165 */
+ __pyx_v_accum_len = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_rio_pyx.pyx":166 */
+ __pyx_v_accum_size = 4096;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_rio_pyx.pyx":167 */
+ __pyx_v_accum_value = ((Py_UNICODE *)malloc((__pyx_v_accum_size * (sizeof(Py_UNICODE)))));
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_rio_pyx.pyx":168 */
+ __pyx_2 = (__pyx_v_accum_value == NULL);
+ if (__pyx_2) {
+ __Pyx_Raise(PyExc_MemoryError, 0, 0);
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 169; goto __pyx_L1;}
+ goto __pyx_L2;
+ }
+ __pyx_L2:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_rio_pyx.pyx":170 */
+ /*try:*/ {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_rio_pyx.pyx":171 */
+ __pyx_1 = PyObject_GetIter(__pyx_v_unicode_iter); if (!__pyx_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 171; goto __pyx_L4;}
+ for (;;) {
+ __pyx_3 = PyIter_Next(__pyx_1);
+ if (!__pyx_3) {
+ if (PyErr_Occurred()) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 171; goto __pyx_L4;}
+ break;
+ }
+ Py_DECREF(__pyx_v_line);
+ __pyx_v_line = __pyx_3;
+ __pyx_3 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_rio_pyx.pyx":172 */
+ __pyx_2 = __pyx_v_line == Py_None;
+ if (__pyx_2) {
+ goto __pyx_L7;
+ goto __pyx_L8;
+ }
+ __pyx_L8:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_rio_pyx.pyx":174 */
+ __pyx_2 = (!PyUnicode_CheckExact(__pyx_v_line));
+ if (__pyx_2) {
+ __pyx_3 = PyNumber_Remainder(__pyx_k10p, __pyx_v_line); if (!__pyx_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 175; goto __pyx_L4;}
+ __pyx_4 = PyTuple_New(1); if (!__pyx_4) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 175; goto __pyx_L4;}
+ PyTuple_SET_ITEM(__pyx_4, 0, __pyx_3);
+ __pyx_3 = 0;
+ __pyx_3 = PyObject_CallObject(PyExc_TypeError, __pyx_4); if (!__pyx_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 175; goto __pyx_L4;}
+ Py_DECREF(__pyx_4); __pyx_4 = 0;
+ __Pyx_Raise(__pyx_3, 0, 0);
+ Py_DECREF(__pyx_3); __pyx_3 = 0;
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 175; goto __pyx_L4;}
+ goto __pyx_L9;
+ }
+ __pyx_L9:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_rio_pyx.pyx":176 */
+ __pyx_v_c_line = PyUnicode_AS_UNICODE(__pyx_v_line);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_rio_pyx.pyx":177 */
+ __pyx_5 = PyUnicode_GET_SIZE(__pyx_v_line); if (__pyx_5 == (-1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 177; goto __pyx_L4;}
+ __pyx_v_c_len = __pyx_5;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_rio_pyx.pyx":178 */
+ __pyx_2 = (__pyx_v_c_len < 1);
+ if (__pyx_2) {
+ goto __pyx_L7;
+ goto __pyx_L10;
+ }
+ __pyx_L10:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_rio_pyx.pyx":180 */
+ __pyx_2 = Py_UNICODE_ISLINEBREAK((__pyx_v_c_line[0]));
+ if (__pyx_2) {
+ goto __pyx_L7;
+ goto __pyx_L11;
+ }
+ __pyx_L11:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_rio_pyx.pyx":182 */
+ __pyx_2 = ((__pyx_v_accum_len + __pyx_v_c_len) > __pyx_v_accum_size);
+ if (__pyx_2) {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_rio_pyx.pyx":183 */
+ __pyx_v_accum_size = (__pyx_v_accum_len + __pyx_v_c_len);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_rio_pyx.pyx":184 */
+ __pyx_v_new_accum_value = ((Py_UNICODE *)realloc(__pyx_v_accum_value,(__pyx_v_accum_size * (sizeof(Py_UNICODE)))));
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_rio_pyx.pyx":186 */
+ __pyx_2 = (__pyx_v_new_accum_value == NULL);
+ if (__pyx_2) {
+ __Pyx_Raise(PyExc_MemoryError, 0, 0);
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 187; goto __pyx_L4;}
+ goto __pyx_L13;
+ }
+ /*else*/ {
+ __pyx_v_accum_value = __pyx_v_new_accum_value;
+ }
+ __pyx_L13:;
+ goto __pyx_L12;
+ }
+ __pyx_L12:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_rio_pyx.pyx":190 */
+ __pyx_2 = ((__pyx_v_c_line[0]) == '\t');
+ if (__pyx_2) {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_rio_pyx.pyx":191 */
+ __pyx_2 = __pyx_v_tag == Py_None;
+ if (__pyx_2) {
+ __pyx_4 = PyNumber_Remainder(__pyx_k5p, __pyx_v_line); if (!__pyx_4) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 192; goto __pyx_L4;}
+ __pyx_3 = PyTuple_New(1); if (!__pyx_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 192; goto __pyx_L4;}
+ PyTuple_SET_ITEM(__pyx_3, 0, __pyx_4);
+ __pyx_4 = 0;
+ __pyx_4 = PyObject_CallObject(PyExc_ValueError, __pyx_3); if (!__pyx_4) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 192; goto __pyx_L4;}
+ Py_DECREF(__pyx_3); __pyx_3 = 0;
+ __Pyx_Raise(__pyx_4, 0, 0);
+ Py_DECREF(__pyx_4); __pyx_4 = 0;
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 192; goto __pyx_L4;}
+ goto __pyx_L15;
+ }
+ __pyx_L15:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_rio_pyx.pyx":193 */
+ memcpy((&(__pyx_v_accum_value[__pyx_v_accum_len])),(&(__pyx_v_c_line[1])),((__pyx_v_c_len - 1) * (sizeof(Py_UNICODE))));
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_rio_pyx.pyx":195 */
+ __pyx_v_accum_len = (__pyx_v_accum_len + (__pyx_v_c_len - 1));
+ goto __pyx_L14;
+ }
+ /*else*/ {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_rio_pyx.pyx":197 */
+ __pyx_2 = __pyx_v_tag != Py_None;
+ if (__pyx_2) {
+ __pyx_3 = PyUnicode_FromUnicode(__pyx_v_accum_value,(__pyx_v_accum_len - 1)); if (!__pyx_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 199; goto __pyx_L4;}
+ __pyx_4 = PyTuple_New(2); if (!__pyx_4) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 199; goto __pyx_L4;}
+ Py_INCREF(__pyx_v_tag);
+ PyTuple_SET_ITEM(__pyx_4, 0, __pyx_v_tag);
+ PyTuple_SET_ITEM(__pyx_4, 1, __pyx_3);
+ __pyx_3 = 0;
+ __pyx_2 = PyList_Append(__pyx_v_pairs,__pyx_4); if (__pyx_2 == (-1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 198; goto __pyx_L4;}
+ Py_DECREF(__pyx_4); __pyx_4 = 0;
+ goto __pyx_L16;
+ }
+ __pyx_L16:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_rio_pyx.pyx":200 */
+ __pyx_3 = __pyx_f_6bzrlib_8_rio_pyx__split_first_line_unicode(__pyx_v_c_line,__pyx_v_c_len,__pyx_v_accum_value,(&__pyx_v_accum_len)); if (!__pyx_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 200; goto __pyx_L4;}
+ Py_DECREF(__pyx_v_tag);
+ __pyx_v_tag = __pyx_3;
+ __pyx_3 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_rio_pyx.pyx":202 */
+ __pyx_4 = __Pyx_GetName(__pyx_m, __pyx_n__valid_tag); if (!__pyx_4) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 202; goto __pyx_L4;}
+ __pyx_3 = PyTuple_New(1); if (!__pyx_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 202; goto __pyx_L4;}
+ Py_INCREF(__pyx_v_tag);
+ PyTuple_SET_ITEM(__pyx_3, 0, __pyx_v_tag);
+ __pyx_6 = PyObject_CallObject(__pyx_4, __pyx_3); if (!__pyx_6) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 202; goto __pyx_L4;}
+ Py_DECREF(__pyx_4); __pyx_4 = 0;
+ Py_DECREF(__pyx_3); __pyx_3 = 0;
+ __pyx_2 = PyObject_IsTrue(__pyx_6); if (__pyx_2 < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 202; goto __pyx_L4;}
+ Py_DECREF(__pyx_6); __pyx_6 = 0;
+ __pyx_7 = (!__pyx_2);
+ if (__pyx_7) {
+ __pyx_4 = PyTuple_New(1); if (!__pyx_4) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 203; goto __pyx_L4;}
+ Py_INCREF(__pyx_v_tag);
+ PyTuple_SET_ITEM(__pyx_4, 0, __pyx_v_tag);
+ __pyx_3 = PyNumber_Remainder(__pyx_k7p, __pyx_4); if (!__pyx_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 203; goto __pyx_L4;}
+ Py_DECREF(__pyx_4); __pyx_4 = 0;
+ __pyx_6 = PyTuple_New(1); if (!__pyx_6) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 203; goto __pyx_L4;}
+ PyTuple_SET_ITEM(__pyx_6, 0, __pyx_3);
+ __pyx_3 = 0;
+ __pyx_4 = PyObject_CallObject(PyExc_ValueError, __pyx_6); if (!__pyx_4) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 203; goto __pyx_L4;}
+ Py_DECREF(__pyx_6); __pyx_6 = 0;
+ __Pyx_Raise(__pyx_4, 0, 0);
+ Py_DECREF(__pyx_4); __pyx_4 = 0;
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 203; goto __pyx_L4;}
+ goto __pyx_L17;
+ }
+ __pyx_L17:;
+ }
+ __pyx_L14:;
+ }
+ __pyx_L7:;
+ Py_DECREF(__pyx_1); __pyx_1 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_rio_pyx.pyx":204 */
+ __pyx_2 = __pyx_v_tag != Py_None;
+ if (__pyx_2) {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_rio_pyx.pyx":205 */
+ __pyx_3 = PyUnicode_FromUnicode(__pyx_v_accum_value,(__pyx_v_accum_len - 1)); if (!__pyx_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 206; goto __pyx_L4;}
+ __pyx_6 = PyTuple_New(2); if (!__pyx_6) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 206; goto __pyx_L4;}
+ Py_INCREF(__pyx_v_tag);
+ PyTuple_SET_ITEM(__pyx_6, 0, __pyx_v_tag);
+ PyTuple_SET_ITEM(__pyx_6, 1, __pyx_3);
+ __pyx_3 = 0;
+ __pyx_7 = PyList_Append(__pyx_v_pairs,__pyx_6); if (__pyx_7 == (-1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 205; goto __pyx_L4;}
+ Py_DECREF(__pyx_6); __pyx_6 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_rio_pyx.pyx":207 */
+ __pyx_4 = __Pyx_GetName(__pyx_m, __pyx_n_Stanza); if (!__pyx_4) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 207; goto __pyx_L4;}
+ __pyx_1 = PyObject_GetAttr(__pyx_4, __pyx_n_from_pairs); if (!__pyx_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 207; goto __pyx_L4;}
+ Py_DECREF(__pyx_4); __pyx_4 = 0;
+ __pyx_3 = PyTuple_New(1); if (!__pyx_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 207; goto __pyx_L4;}
+ Py_INCREF(__pyx_v_pairs);
+ PyTuple_SET_ITEM(__pyx_3, 0, __pyx_v_pairs);
+ __pyx_6 = PyObject_CallObject(__pyx_1, __pyx_3); if (!__pyx_6) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 207; goto __pyx_L4;}
+ Py_DECREF(__pyx_1); __pyx_1 = 0;
+ Py_DECREF(__pyx_3); __pyx_3 = 0;
+ __pyx_r = __pyx_6;
+ __pyx_6 = 0;
+ goto __pyx_L3;
+ goto __pyx_L18;
+ }
+ /*else*/ {
+ Py_INCREF(Py_None);
+ __pyx_r = Py_None;
+ goto __pyx_L3;
+ }
+ __pyx_L18:;
+ }
+ /*finally:*/ {
+ int __pyx_why;
+ PyObject *__pyx_exc_type, *__pyx_exc_value, *__pyx_exc_tb;
+ int __pyx_exc_lineno;
+ __pyx_why = 0; goto __pyx_L5;
+ __pyx_L3: __pyx_why = 3; goto __pyx_L5;
+ __pyx_L4: {
+ __pyx_why = 4;
+ Py_XDECREF(__pyx_4); __pyx_4 = 0;
+ Py_XDECREF(__pyx_1); __pyx_1 = 0;
+ Py_XDECREF(__pyx_3); __pyx_3 = 0;
+ Py_XDECREF(__pyx_6); __pyx_6 = 0;
+ PyErr_Fetch(&__pyx_exc_type, &__pyx_exc_value, &__pyx_exc_tb);
+ __pyx_exc_lineno = __pyx_lineno;
+ goto __pyx_L5;
+ }
+ __pyx_L5:;
+ free(__pyx_v_accum_value);
+ switch (__pyx_why) {
+ case 3: goto __pyx_L0;
+ case 4: {
+ PyErr_Restore(__pyx_exc_type, __pyx_exc_value, __pyx_exc_tb);
+ __pyx_lineno = __pyx_exc_lineno;
+ __pyx_exc_type = 0;
+ __pyx_exc_value = 0;
+ __pyx_exc_tb = 0;
+ goto __pyx_L1;
+ }
+ }
+ }
+
+ __pyx_r = Py_None; Py_INCREF(Py_None);
+ goto __pyx_L0;
+ __pyx_L1:;
+ Py_XDECREF(__pyx_1);
+ Py_XDECREF(__pyx_3);
+ Py_XDECREF(__pyx_4);
+ Py_XDECREF(__pyx_6);
+ __Pyx_AddTraceback("bzrlib._rio_pyx._read_stanza_unicode");
+ __pyx_r = 0;
+ __pyx_L0:;
+ Py_DECREF(__pyx_v_pairs);
+ Py_DECREF(__pyx_v_tag);
+ Py_DECREF(__pyx_v_line);
+ Py_DECREF(__pyx_v_unicode_iter);
+ return __pyx_r;
+}
+
+static struct PyMethodDef __pyx_methods[] = {
+ {"_valid_tag", (PyCFunction)__pyx_f_6bzrlib_8_rio_pyx__valid_tag, METH_VARARGS|METH_KEYWORDS, 0},
+ {"_read_stanza_utf8", (PyCFunction)__pyx_f_6bzrlib_8_rio_pyx__read_stanza_utf8, METH_VARARGS|METH_KEYWORDS, 0},
+ {"_read_stanza_unicode", (PyCFunction)__pyx_f_6bzrlib_8_rio_pyx__read_stanza_unicode, METH_VARARGS|METH_KEYWORDS, 0},
+ {0, 0, 0, 0}
+};
+
+static void __pyx_init_filenames(void); /*proto*/
+
+PyMODINIT_FUNC init_rio_pyx(void); /*proto*/
+PyMODINIT_FUNC init_rio_pyx(void) {
+ PyObject *__pyx_1 = 0;
+ PyObject *__pyx_2 = 0;
+ __pyx_init_filenames();
+ __pyx_m = Py_InitModule4("_rio_pyx", __pyx_methods, __pyx_mdoc, 0, PYTHON_API_VERSION);
+ if (!__pyx_m) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 17; goto __pyx_L1;};
+ Py_INCREF(__pyx_m);
+ __pyx_b = PyImport_AddModule("__builtin__");
+ if (!__pyx_b) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 17; goto __pyx_L1;};
+ if (PyObject_SetAttrString(__pyx_m, "__builtins__", __pyx_b) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 17; goto __pyx_L1;};
+ if (__Pyx_InitStrings(__pyx_string_tab) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 17; goto __pyx_L1;};
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_rio_pyx.pyx":50 */
+ __pyx_1 = PyList_New(1); if (!__pyx_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 50; goto __pyx_L1;}
+ Py_INCREF(__pyx_n_Stanza);
+ PyList_SET_ITEM(__pyx_1, 0, __pyx_n_Stanza);
+ __pyx_2 = __Pyx_Import(__pyx_k11p, __pyx_1); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 50; goto __pyx_L1;}
+ Py_DECREF(__pyx_1); __pyx_1 = 0;
+ __pyx_1 = PyObject_GetAttr(__pyx_2, __pyx_n_Stanza); if (!__pyx_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 50; goto __pyx_L1;}
+ if (PyObject_SetAttr(__pyx_m, __pyx_n_Stanza, __pyx_1) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 50; goto __pyx_L1;}
+ Py_DECREF(__pyx_1); __pyx_1 = 0;
+ Py_DECREF(__pyx_2); __pyx_2 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_rio_pyx.pyx":158 */
+ return;
+ __pyx_L1:;
+ Py_XDECREF(__pyx_1);
+ Py_XDECREF(__pyx_2);
+ __Pyx_AddTraceback("bzrlib._rio_pyx");
+}
+
+static char *__pyx_filenames[] = {
+ "_rio_pyx.pyx",
+};
+
+/* Runtime support code */
+
+static void __pyx_init_filenames(void) {
+ __pyx_f = __pyx_filenames;
+}
+
+static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb) {
+ Py_XINCREF(type);
+ Py_XINCREF(value);
+ Py_XINCREF(tb);
+ /* First, check the traceback argument, replacing None with NULL. */
+ if (tb == Py_None) {
+ Py_DECREF(tb);
+ tb = 0;
+ }
+ else if (tb != NULL && !PyTraceBack_Check(tb)) {
+ PyErr_SetString(PyExc_TypeError,
+ "raise: arg 3 must be a traceback or None");
+ goto raise_error;
+ }
+ /* Next, replace a missing value with None */
+ if (value == NULL) {
+ value = Py_None;
+ Py_INCREF(value);
+ }
+ #if PY_VERSION_HEX < 0x02050000
+ if (!PyClass_Check(type))
+ #else
+ if (!PyType_Check(type))
+ #endif
+ {
+ /* Raising an instance. The value should be a dummy. */
+ if (value != Py_None) {
+ PyErr_SetString(PyExc_TypeError,
+ "instance exception may not have a separate value");
+ goto raise_error;
+ }
+ /* Normalize to raise <class>, <instance> */
+ Py_DECREF(value);
+ value = type;
+ #if PY_VERSION_HEX < 0x02050000
+ if (PyInstance_Check(type)) {
+ type = (PyObject*) ((PyInstanceObject*)type)->in_class;
+ Py_INCREF(type);
+ }
+ else {
+ PyErr_SetString(PyExc_TypeError,
+ "raise: exception must be an old-style class or instance");
+ goto raise_error;
+ }
+ #else
+ type = (PyObject*) type->ob_type;
+ Py_INCREF(type);
+ if (!PyType_IsSubtype((PyTypeObject *)type, (PyTypeObject *)PyExc_BaseException)) {
+ PyErr_SetString(PyExc_TypeError,
+ "raise: exception class must be a subclass of BaseException");
+ goto raise_error;
+ }
+ #endif
+ }
+ PyErr_Restore(type, value, tb);
+ return;
+raise_error:
+ Py_XDECREF(value);
+ Py_XDECREF(type);
+ Py_XDECREF(tb);
+ return;
+}
+
+static PyObject *__Pyx_GetName(PyObject *dict, PyObject *name) {
+ PyObject *result;
+ result = PyObject_GetAttr(dict, name);
+ if (!result)
+ PyErr_SetObject(PyExc_NameError, name);
+ return result;
+}
+
+static int __Pyx_InitStrings(__Pyx_StringTabEntry *t) {
+ while (t->p) {
+ *t->p = PyString_FromStringAndSize(t->s, t->n - 1);
+ if (!*t->p)
+ return -1;
+ if (t->i)
+ PyString_InternInPlace(t->p);
+ ++t;
+ }
+ return 0;
+}
+
+static PyObject *__Pyx_Import(PyObject *name, PyObject *from_list) {
+ PyObject *__import__ = 0;
+ PyObject *empty_list = 0;
+ PyObject *module = 0;
+ PyObject *global_dict = 0;
+ PyObject *empty_dict = 0;
+ PyObject *list;
+ __import__ = PyObject_GetAttrString(__pyx_b, "__import__");
+ if (!__import__)
+ goto bad;
+ if (from_list)
+ list = from_list;
+ else {
+ empty_list = PyList_New(0);
+ if (!empty_list)
+ goto bad;
+ list = empty_list;
+ }
+ global_dict = PyModule_GetDict(__pyx_m);
+ if (!global_dict)
+ goto bad;
+ empty_dict = PyDict_New();
+ if (!empty_dict)
+ goto bad;
+ module = PyObject_CallFunction(__import__, "OOOO",
+ name, global_dict, empty_dict, list);
+bad:
+ Py_XDECREF(empty_list);
+ Py_XDECREF(__import__);
+ Py_XDECREF(empty_dict);
+ return module;
+}
+
+#include "compile.h"
+#include "frameobject.h"
+#include "traceback.h"
+
+static void __Pyx_AddTraceback(char *funcname) {
+ PyObject *py_srcfile = 0;
+ PyObject *py_funcname = 0;
+ PyObject *py_globals = 0;
+ PyObject *empty_tuple = 0;
+ PyObject *empty_string = 0;
+ PyCodeObject *py_code = 0;
+ PyFrameObject *py_frame = 0;
+
+ py_srcfile = PyString_FromString(__pyx_filename);
+ if (!py_srcfile) goto bad;
+ py_funcname = PyString_FromString(funcname);
+ if (!py_funcname) goto bad;
+ py_globals = PyModule_GetDict(__pyx_m);
+ if (!py_globals) goto bad;
+ empty_tuple = PyTuple_New(0);
+ if (!empty_tuple) goto bad;
+ empty_string = PyString_FromString("");
+ if (!empty_string) goto bad;
+ py_code = PyCode_New(
+ 0, /*int argcount,*/
+ 0, /*int nlocals,*/
+ 0, /*int stacksize,*/
+ 0, /*int flags,*/
+ empty_string, /*PyObject *code,*/
+ empty_tuple, /*PyObject *consts,*/
+ empty_tuple, /*PyObject *names,*/
+ empty_tuple, /*PyObject *varnames,*/
+ empty_tuple, /*PyObject *freevars,*/
+ empty_tuple, /*PyObject *cellvars,*/
+ py_srcfile, /*PyObject *filename,*/
+ py_funcname, /*PyObject *name,*/
+ __pyx_lineno, /*int firstlineno,*/
+ empty_string /*PyObject *lnotab*/
+ );
+ if (!py_code) goto bad;
+ py_frame = PyFrame_New(
+ PyThreadState_Get(), /*PyThreadState *tstate,*/
+ py_code, /*PyCodeObject *code,*/
+ py_globals, /*PyObject *globals,*/
+ 0 /*PyObject *locals*/
+ );
+ if (!py_frame) goto bad;
+ py_frame->f_lineno = __pyx_lineno;
+ PyTraceBack_Here(py_frame);
+bad:
+ Py_XDECREF(py_srcfile);
+ Py_XDECREF(py_funcname);
+ Py_XDECREF(empty_tuple);
+ Py_XDECREF(empty_string);
+ Py_XDECREF(py_code);
+ Py_XDECREF(py_frame);
+}
diff --git a/bzrlib/_rio_pyx.pyx b/bzrlib/_rio_pyx.pyx
new file mode 100644
index 0000000..76c3f61
--- /dev/null
+++ b/bzrlib/_rio_pyx.pyx
@@ -0,0 +1,211 @@
+# Copyright (C) 2009, 2010 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""Pyrex implementation of _read_stanza_*."""
+
+#python2.4 support
+cdef extern from "python-compat.h":
+ pass
+
+cdef extern from "stdlib.h":
+ void *malloc(int)
+ void *realloc(void *, int)
+ void free(void *)
+
+cdef extern from "Python.h":
+ ctypedef int Py_ssize_t # Required for older pyrex versions
+ ctypedef int Py_UNICODE
+ char *PyString_AS_STRING(object s)
+ Py_ssize_t PyString_GET_SIZE(object t) except -1
+ object PyUnicode_DecodeUTF8(char *string, Py_ssize_t length, char *errors)
+ object PyString_FromStringAndSize(char *s, Py_ssize_t len)
+ int PyString_CheckExact(object)
+ int PyUnicode_CheckExact(object)
+ object PyUnicode_Join(object, object)
+ object PyUnicode_EncodeASCII(Py_UNICODE *, int, char *)
+ Py_UNICODE *PyUnicode_AS_UNICODE(object)
+ Py_UNICODE *PyUnicode_AsUnicode(object)
+ Py_ssize_t PyUnicode_GET_SIZE(object) except -1
+ int PyList_Append(object, object) except -1
+ int Py_UNICODE_ISLINEBREAK(Py_UNICODE)
+ object PyUnicode_FromUnicode(Py_UNICODE *, int)
+ void *Py_UNICODE_COPY(Py_UNICODE *, Py_UNICODE *, int)
+
+cdef extern from "string.h":
+ void *memcpy(void *, void *, int)
+
+from bzrlib.rio import Stanza
+
+cdef int _valid_tag_char(char c): # cannot_raise
+ return (c == c'_' or c == c'-' or
+ (c >= c'a' and c <= c'z') or
+ (c >= c'A' and c <= c'Z') or
+ (c >= c'0' and c <= c'9'))
+
+
+def _valid_tag(tag):
+ cdef char *c_tag
+ cdef Py_ssize_t c_len
+ cdef int i
+ if not PyString_CheckExact(tag):
+ raise TypeError(tag)
+ c_tag = PyString_AS_STRING(tag)
+ c_len = PyString_GET_SIZE(tag)
+ if c_len < 1:
+ return False
+ for i from 0 <= i < c_len:
+ if not _valid_tag_char(c_tag[i]):
+ return False
+ return True
+
+
+cdef object _split_first_line_utf8(char *line, int len,
+ char *value, Py_ssize_t *value_len):
+ cdef int i
+ for i from 0 <= i < len:
+ if line[i] == c':':
+ if line[i+1] != c' ':
+ raise ValueError("invalid tag in line %r" % line)
+ memcpy(value, line+i+2, len-i-2)
+ value_len[0] = len-i-2
+ return PyString_FromStringAndSize(line, i)
+ raise ValueError('tag/value separator not found in line %r' % line)
+
+
+cdef object _split_first_line_unicode(Py_UNICODE *line, int len,
+ Py_UNICODE *value, Py_ssize_t *value_len):
+ cdef int i
+ for i from 0 <= i < len:
+ if line[i] == c':':
+ if line[i+1] != c' ':
+ raise ValueError("invalid tag in line %r" %
+ PyUnicode_FromUnicode(line, len))
+ memcpy(value, &line[i+2], (len-i-2) * sizeof(Py_UNICODE))
+ value_len[0] = len-i-2
+ return PyUnicode_EncodeASCII(line, i, "strict")
+ raise ValueError("tag/value separator not found in line %r" %
+ PyUnicode_FromUnicode(line, len))
+
+
+def _read_stanza_utf8(line_iter):
+ cdef char *c_line
+ cdef Py_ssize_t c_len
+ cdef char *accum_value, *new_accum_value
+ cdef Py_ssize_t accum_len, accum_size
+ pairs = []
+ tag = None
+ accum_len = 0
+ accum_size = 4096
+ accum_value = <char *>malloc(accum_size)
+ if accum_value == NULL:
+ raise MemoryError
+ try:
+ for line in line_iter:
+ if line is None:
+ break # end of file
+ if not PyString_CheckExact(line):
+ raise TypeError("%r is not a plain string" % line)
+ c_line = PyString_AS_STRING(line)
+ c_len = PyString_GET_SIZE(line)
+ if c_len < 1:
+ break # end of file
+ if c_len == 1 and c_line[0] == c"\n":
+ break # end of stanza
+ if accum_len + c_len > accum_size:
+ accum_size = (accum_len + c_len)
+ new_accum_value = <char *>realloc(accum_value, accum_size)
+ if new_accum_value == NULL:
+ raise MemoryError
+ else:
+ accum_value = new_accum_value
+ if c_line[0] == c'\t': # continues previous value
+ if tag is None:
+ raise ValueError('invalid continuation line %r' % line)
+ memcpy(accum_value+accum_len, c_line+1, c_len-1)
+ accum_len = accum_len + c_len-1
+ else: # new tag:value line
+ if tag is not None:
+ PyList_Append(pairs,
+ (tag, PyUnicode_DecodeUTF8(accum_value, accum_len-1,
+ "strict")))
+ tag = _split_first_line_utf8(c_line, c_len, accum_value,
+ &accum_len)
+ if not _valid_tag(tag):
+ raise ValueError("invalid rio tag %r" % (tag,))
+ if tag is not None: # add last tag-value
+ PyList_Append(pairs,
+ (tag, PyUnicode_DecodeUTF8(accum_value, accum_len-1, "strict")))
+ return Stanza.from_pairs(pairs)
+ else: # didn't see any content
+ return None
+ finally:
+ free(accum_value)
+
+
+def _read_stanza_unicode(unicode_iter):
+ cdef Py_UNICODE *c_line
+ cdef int c_len
+ cdef Py_UNICODE *accum_value, *new_accum_value
+ cdef Py_ssize_t accum_len, accum_size
+ pairs = []
+ tag = None
+ accum_len = 0
+ accum_size = 4096
+ accum_value = <Py_UNICODE *>malloc(accum_size*sizeof(Py_UNICODE))
+ if accum_value == NULL:
+ raise MemoryError
+ try:
+ for line in unicode_iter:
+ if line is None:
+ break # end of file
+ if not PyUnicode_CheckExact(line):
+ raise TypeError("%r is not a unicode string" % line)
+ c_line = PyUnicode_AS_UNICODE(line)
+ c_len = PyUnicode_GET_SIZE(line)
+ if c_len < 1:
+ break # end of file
+ if Py_UNICODE_ISLINEBREAK(c_line[0]):
+ break # end of stanza
+ if accum_len + c_len > accum_size:
+ accum_size = accum_len + c_len
+ new_accum_value = <Py_UNICODE *>realloc(accum_value,
+ accum_size*sizeof(Py_UNICODE))
+ if new_accum_value == NULL:
+ raise MemoryError
+ else:
+ accum_value = new_accum_value
+ if c_line[0] == c'\t': # continues previous value,
+ if tag is None:
+ raise ValueError('invalid continuation line %r' % line)
+ memcpy(&accum_value[accum_len], &c_line[1],
+ (c_len-1)*sizeof(Py_UNICODE))
+ accum_len = accum_len + (c_len-1)
+ else: # new tag:value line
+ if tag is not None:
+ PyList_Append(pairs,
+ (tag, PyUnicode_FromUnicode(accum_value, accum_len-1)))
+ tag = _split_first_line_unicode(c_line, c_len, accum_value,
+ &accum_len)
+ if not _valid_tag(tag):
+ raise ValueError("invalid rio tag %r" % (tag,))
+ if tag is not None: # add last tag-value
+ PyList_Append(pairs,
+ (tag, PyUnicode_FromUnicode(accum_value, accum_len-1)))
+ return Stanza.from_pairs(pairs)
+ else: # didn't see any content
+ return None
+ finally:
+ free(accum_value)
diff --git a/bzrlib/_simple_set_pyx.c b/bzrlib/_simple_set_pyx.c
new file mode 100644
index 0000000..99f3b82
--- /dev/null
+++ b/bzrlib/_simple_set_pyx.c
@@ -0,0 +1,2381 @@
+/* Generated by Pyrex 0.9.8.5 on Fri Oct 8 14:01:06 2010 */
+
+#define PY_SSIZE_T_CLEAN
+#include "Python.h"
+#include "structmember.h"
+#ifndef PY_LONG_LONG
+ #define PY_LONG_LONG LONG_LONG
+#endif
+#if PY_VERSION_HEX < 0x02050000
+ typedef int Py_ssize_t;
+ #define PY_SSIZE_T_MAX INT_MAX
+ #define PY_SSIZE_T_MIN INT_MIN
+ #define PyInt_FromSsize_t(z) PyInt_FromLong(z)
+ #define PyInt_AsSsize_t(o) PyInt_AsLong(o)
+#endif
+#if !defined(WIN32) && !defined(MS_WINDOWS)
+ #ifndef __stdcall
+ #define __stdcall
+ #endif
+ #ifndef __cdecl
+ #define __cdecl
+ #endif
+#endif
+#ifdef __cplusplus
+#define __PYX_EXTERN_C extern "C"
+#else
+#define __PYX_EXTERN_C extern
+#endif
+#include <math.h>
+#include "python-compat.h"
+
+
+typedef struct {PyObject **p; int i; char *s; long n;} __Pyx_StringTabEntry; /*proto*/
+
+static PyObject *__pyx_m;
+static PyObject *__pyx_b;
+static int __pyx_lineno;
+static char *__pyx_filename;
+static char **__pyx_f;
+
+static char __pyx_mdoc[] = "Interface definition of a class like PySet but without caching the hash.\n\nThis is generally useful when you want to \'intern\' objects, etc. Note that this\ndiffers from Set in that we:\n 1) Don\'t have all of the .intersection, .difference, etc functions\n 2) Do return the object from the set via queries\n eg. SimpleSet.add(key) => saved_key and SimpleSet[key] => saved_key\n\nDefinition of a class that is similar to Set with some small changes.";
+
+static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb); /*proto*/
+
+static int __Pyx_TypeTest(PyObject *obj, PyTypeObject *type); /*proto*/
+
+static int __Pyx_InitStrings(__Pyx_StringTabEntry *t); /*proto*/
+
+static int __Pyx_ExportFunction(char *n, void *f, char *s); /*proto*/
+
+static int __Pyx_SetVtable(PyObject *dict, void *vtable); /*proto*/
+
+static PyObject *__Pyx_GetName(PyObject *dict, PyObject *name); /*proto*/
+
+static void __Pyx_AddTraceback(char *funcname); /*proto*/
+
+/* Declarations from bzrlib._simple_set_pyx */
+
+struct SimpleSetObject {
+ PyObject_HEAD
+ struct __pyx_vtabstruct_6bzrlib_15_simple_set_pyx_SimpleSet *__pyx_vtab;
+ Py_ssize_t _used;
+ Py_ssize_t _fill;
+ Py_ssize_t _mask;
+ PyObject **_table;
+};
+
+__PYX_EXTERN_C DL_EXPORT(PyTypeObject) SimpleSet_Type;
+
+struct __pyx_vtabstruct_6bzrlib_15_simple_set_pyx_SimpleSet {
+ PyObject *(*_get)(struct SimpleSetObject *,PyObject *);
+ PyObject *(*_add)(struct SimpleSetObject *,PyObject *);
+ int (*_discard)(struct SimpleSetObject *,PyObject *);
+ int (*_insert_clean)(struct SimpleSetObject *,PyObject *);
+ Py_ssize_t (*_resize)(struct SimpleSetObject *,Py_ssize_t);
+};
+static struct __pyx_vtabstruct_6bzrlib_15_simple_set_pyx_SimpleSet *__pyx_vtabptr_6bzrlib_15_simple_set_pyx_SimpleSet;
+
+static PyTypeObject *__pyx_ptype_6bzrlib_15_simple_set_pyx_SimpleSet = 0;
+static struct SimpleSetObject *SimpleSet_New(void); /*proto*/
+static PyObject *SimpleSet_Add(PyObject *,PyObject *); /*proto*/
+static int SimpleSet_Contains(PyObject *,PyObject *); /*proto*/
+static int SimpleSet_Discard(PyObject *,PyObject *); /*proto*/
+static PyObject *SimpleSet_Get(struct SimpleSetObject *,PyObject *); /*proto*/
+static Py_ssize_t SimpleSet_Size(PyObject *); /*proto*/
+static int SimpleSet_Next(PyObject *,Py_ssize_t *,PyObject **); /*proto*/
+
+/* Declarations from implementation of bzrlib._simple_set_pyx */
+
+struct __pyx_obj_6bzrlib_15_simple_set_pyx__SimpleSet_iterator {
+ PyObject_HEAD
+ Py_ssize_t pos;
+ struct SimpleSetObject *set;
+ Py_ssize_t _used;
+ Py_ssize_t len;
+};
+
+
+static PyTypeObject *__pyx_ptype_6bzrlib_15_simple_set_pyx__SimpleSet_iterator = 0;
+static PyObject *__pyx_v_6bzrlib_15_simple_set_pyx__dummy_obj;
+static PyObject *__pyx_v_6bzrlib_15_simple_set_pyx__dummy;
+static PyObject *__pyx_v_6bzrlib_15_simple_set_pyx__NotImplemented;
+static int __pyx_f_6bzrlib_15_simple_set_pyx__is_equal(PyObject *,long,PyObject *); /*proto*/
+static struct SimpleSetObject *__pyx_f_6bzrlib_15_simple_set_pyx__check_self(PyObject *); /*proto*/
+static PyObject **__pyx_f_6bzrlib_15_simple_set_pyx__lookup(struct SimpleSetObject *,PyObject *); /*proto*/
+static PyObject **_SimpleSet_Lookup(PyObject *,PyObject *); /*proto*/
+static int __pyx_f_6bzrlib_15_simple_set_pyx_SimpleSet_traverse(struct SimpleSetObject *,visitproc,void *); /*proto*/
+
+static char __pyx_k1[] = "<null>";
+static char __pyx_k2[] = "<dummy>";
+static char __pyx_k3[] = "Key %s is not present";
+static char __pyx_k4[] = "ran out of slots.";
+static char __pyx_k5[] = "Types added to SimpleSet must implement both tp_richcompare and tp_hash";
+static char __pyx_k6[] = "Set size changed during iteration";
+static char __pyx_k7[] = "self must not be None";
+static char __pyx_k8[] = "should never get here";
+static char __pyx_k9[] = "object";
+static char __pyx_k10[] = "NotImplemented";
+
+static PyObject *__pyx_n_NotImplemented;
+static PyObject *__pyx_n_object;
+
+static PyObject *__pyx_k1p;
+static PyObject *__pyx_k2p;
+static PyObject *__pyx_k3p;
+static PyObject *__pyx_k4p;
+static PyObject *__pyx_k5p;
+static PyObject *__pyx_k6p;
+static PyObject *__pyx_k7p;
+static PyObject *__pyx_k8p;
+
+static __Pyx_StringTabEntry __pyx_string_tab[] = {
+ {&__pyx_n_NotImplemented, 1, __pyx_k10, sizeof(__pyx_k10)},
+ {&__pyx_n_object, 1, __pyx_k9, sizeof(__pyx_k9)},
+ {&__pyx_k1p, 0, __pyx_k1, sizeof(__pyx_k1)},
+ {&__pyx_k2p, 0, __pyx_k2, sizeof(__pyx_k2)},
+ {&__pyx_k3p, 0, __pyx_k3, sizeof(__pyx_k3)},
+ {&__pyx_k4p, 0, __pyx_k4, sizeof(__pyx_k4)},
+ {&__pyx_k5p, 0, __pyx_k5, sizeof(__pyx_k5)},
+ {&__pyx_k6p, 0, __pyx_k6, sizeof(__pyx_k6)},
+ {&__pyx_k7p, 0, __pyx_k7, sizeof(__pyx_k7)},
+ {&__pyx_k8p, 0, __pyx_k8, sizeof(__pyx_k8)},
+ {0, 0, 0, 0}
+};
+
+
+
+/* Implementation of bzrlib._simple_set_pyx */
+
+static int __pyx_f_6bzrlib_15_simple_set_pyx__is_equal(PyObject *__pyx_v_this,long __pyx_v_this_hash,PyObject *__pyx_v_other) {
+ long __pyx_v_other_hash;
+ PyObject *__pyx_v_res;
+ int __pyx_r;
+ int __pyx_1;
+ long __pyx_2;
+ PyObject *__pyx_3 = 0;
+ __pyx_v_res = Py_None; Py_INCREF(Py_None);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_simple_set_pyx.pyx":65 */
+ __pyx_1 = (__pyx_v_this == __pyx_v_other);
+ if (__pyx_1) {
+ __pyx_r = 1;
+ goto __pyx_L0;
+ goto __pyx_L2;
+ }
+ __pyx_L2:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_simple_set_pyx.pyx":67 */
+ __pyx_2 = PyObject_Hash(__pyx_v_other); if (__pyx_2 == (-1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 67; goto __pyx_L1;}
+ __pyx_v_other_hash = __pyx_2;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_simple_set_pyx.pyx":68 */
+ __pyx_1 = (__pyx_v_other_hash != __pyx_v_this_hash);
+ if (__pyx_1) {
+ __pyx_r = 0;
+ goto __pyx_L0;
+ goto __pyx_L3;
+ }
+ __pyx_L3:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_simple_set_pyx.pyx":78 */
+ __pyx_3 = Py_TYPE(__pyx_v_this)->tp_richcompare(__pyx_v_this,__pyx_v_other,Py_EQ); if (!__pyx_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 78; goto __pyx_L1;}
+ Py_DECREF(__pyx_v_res);
+ __pyx_v_res = __pyx_3;
+ __pyx_3 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_simple_set_pyx.pyx":79 */
+ __pyx_1 = __pyx_v_res == __pyx_v_6bzrlib_15_simple_set_pyx__NotImplemented;
+ if (__pyx_1) {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_simple_set_pyx.pyx":80 */
+ __pyx_3 = Py_TYPE(__pyx_v_other)->tp_richcompare(__pyx_v_other,__pyx_v_this,Py_EQ); if (!__pyx_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 80; goto __pyx_L1;}
+ Py_DECREF(__pyx_v_res);
+ __pyx_v_res = __pyx_3;
+ __pyx_3 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_simple_set_pyx.pyx":81 */
+ __pyx_1 = __pyx_v_res == __pyx_v_6bzrlib_15_simple_set_pyx__NotImplemented;
+ if (__pyx_1) {
+ __pyx_r = 0;
+ goto __pyx_L0;
+ goto __pyx_L5;
+ }
+ __pyx_L5:;
+ goto __pyx_L4;
+ }
+ __pyx_L4:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_simple_set_pyx.pyx":83 */
+ __pyx_1 = PyObject_IsTrue(__pyx_v_res); if (__pyx_1 < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 83; goto __pyx_L1;}
+ if (__pyx_1) {
+ __pyx_r = 1;
+ goto __pyx_L0;
+ goto __pyx_L6;
+ }
+ __pyx_L6:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_simple_set_pyx.pyx":85 */
+ __pyx_r = 0;
+ goto __pyx_L0;
+
+ __pyx_r = 0;
+ goto __pyx_L0;
+ __pyx_L1:;
+ Py_XDECREF(__pyx_3);
+ __Pyx_AddTraceback("bzrlib._simple_set_pyx._is_equal");
+ __pyx_r = (-1);
+ __pyx_L0:;
+ Py_DECREF(__pyx_v_res);
+ return __pyx_r;
+}
+
+static int __pyx_f_6bzrlib_15_simple_set_pyx_9SimpleSet___init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
+static int __pyx_f_6bzrlib_15_simple_set_pyx_9SimpleSet___init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
+ Py_ssize_t __pyx_v_size;
+ Py_ssize_t __pyx_v_n_bytes;
+ int __pyx_r;
+ int __pyx_1;
+ PyObject *__pyx_2 = 0;
+ static char *__pyx_argnames[] = {0};
+ if (!PyArg_ParseTupleAndKeywords(__pyx_args, __pyx_kwds, "", __pyx_argnames)) return -1;
+ Py_INCREF(__pyx_v_self);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_simple_set_pyx.pyx":108 */
+ __pyx_v_size = 1024;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_simple_set_pyx.pyx":109 */
+ ((struct SimpleSetObject *)__pyx_v_self)->_mask = (__pyx_v_size - 1);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_simple_set_pyx.pyx":110 */
+ ((struct SimpleSetObject *)__pyx_v_self)->_used = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_simple_set_pyx.pyx":111 */
+ ((struct SimpleSetObject *)__pyx_v_self)->_fill = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_simple_set_pyx.pyx":112 */
+ __pyx_v_n_bytes = ((sizeof(PyObject *)) * __pyx_v_size);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_simple_set_pyx.pyx":113 */
+ ((struct SimpleSetObject *)__pyx_v_self)->_table = ((PyObject **)PyMem_Malloc(__pyx_v_n_bytes));
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_simple_set_pyx.pyx":114 */
+ __pyx_1 = (((struct SimpleSetObject *)__pyx_v_self)->_table == NULL);
+ if (__pyx_1) {
+ __pyx_2 = PyObject_CallObject(PyExc_MemoryError, 0); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 115; goto __pyx_L1;}
+ __Pyx_Raise(__pyx_2, 0, 0);
+ Py_DECREF(__pyx_2); __pyx_2 = 0;
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 115; goto __pyx_L1;}
+ goto __pyx_L2;
+ }
+ __pyx_L2:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_simple_set_pyx.pyx":116 */
+ memset(((struct SimpleSetObject *)__pyx_v_self)->_table,0,__pyx_v_n_bytes);
+
+ __pyx_r = 0;
+ goto __pyx_L0;
+ __pyx_L1:;
+ Py_XDECREF(__pyx_2);
+ __Pyx_AddTraceback("bzrlib._simple_set_pyx.SimpleSet.__init__");
+ __pyx_r = -1;
+ __pyx_L0:;
+ Py_DECREF(__pyx_v_self);
+ return __pyx_r;
+}
+
+static PyObject *__pyx_f_6bzrlib_15_simple_set_pyx_9SimpleSet___sizeof__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
+static PyObject *__pyx_f_6bzrlib_15_simple_set_pyx_9SimpleSet___sizeof__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
+ PyObject *__pyx_r;
+ PyObject *__pyx_1 = 0;
+ static char *__pyx_argnames[] = {0};
+ if (!PyArg_ParseTupleAndKeywords(__pyx_args, __pyx_kwds, "", __pyx_argnames)) return 0;
+ Py_INCREF(__pyx_v_self);
+ __pyx_1 = PyInt_FromSsize_t((((((sizeof(PyObject)) + (sizeof(void *))) + (3 * (sizeof(Py_ssize_t)))) + (sizeof(PyObject **))) + ((((struct SimpleSetObject *)__pyx_v_self)->_mask + 1) * (sizeof(PyObject *))))); if (!__pyx_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 130; goto __pyx_L1;}
+ __pyx_r = __pyx_1;
+ __pyx_1 = 0;
+ goto __pyx_L0;
+
+ __pyx_r = Py_None; Py_INCREF(Py_None);
+ goto __pyx_L0;
+ __pyx_L1:;
+ Py_XDECREF(__pyx_1);
+ __Pyx_AddTraceback("bzrlib._simple_set_pyx.SimpleSet.__sizeof__");
+ __pyx_r = 0;
+ __pyx_L0:;
+ Py_DECREF(__pyx_v_self);
+ return __pyx_r;
+}
+
+static void __pyx_f_6bzrlib_15_simple_set_pyx_9SimpleSet___dealloc__(PyObject *__pyx_v_self); /*proto*/
+static void __pyx_f_6bzrlib_15_simple_set_pyx_9SimpleSet___dealloc__(PyObject *__pyx_v_self) {
+ int __pyx_1;
+ Py_INCREF(__pyx_v_self);
+ __pyx_1 = (((struct SimpleSetObject *)__pyx_v_self)->_table != NULL);
+ if (__pyx_1) {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_simple_set_pyx.pyx":134 */
+ PyMem_Free(((struct SimpleSetObject *)__pyx_v_self)->_table);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_simple_set_pyx.pyx":135 */
+ ((struct SimpleSetObject *)__pyx_v_self)->_table = NULL;
+ goto __pyx_L2;
+ }
+ __pyx_L2:;
+
+ Py_DECREF(__pyx_v_self);
+}
+
+static PyObject *__pyx_f_6bzrlib_15_simple_set_pyx_9SimpleSet_4used___get__(PyObject *__pyx_v_self); /*proto*/
+static PyObject *__pyx_f_6bzrlib_15_simple_set_pyx_9SimpleSet_4used___get__(PyObject *__pyx_v_self) {
+ PyObject *__pyx_r;
+ PyObject *__pyx_1 = 0;
+ Py_INCREF(__pyx_v_self);
+ __pyx_1 = PyInt_FromSsize_t(((struct SimpleSetObject *)__pyx_v_self)->_used); if (!__pyx_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 139; goto __pyx_L1;}
+ __pyx_r = __pyx_1;
+ __pyx_1 = 0;
+ goto __pyx_L0;
+
+ __pyx_r = Py_None; Py_INCREF(Py_None);
+ goto __pyx_L0;
+ __pyx_L1:;
+ Py_XDECREF(__pyx_1);
+ __Pyx_AddTraceback("bzrlib._simple_set_pyx.SimpleSet.used.__get__");
+ __pyx_r = 0;
+ __pyx_L0:;
+ Py_DECREF(__pyx_v_self);
+ return __pyx_r;
+}
+
+static PyObject *__pyx_f_6bzrlib_15_simple_set_pyx_9SimpleSet_4fill___get__(PyObject *__pyx_v_self); /*proto*/
+static PyObject *__pyx_f_6bzrlib_15_simple_set_pyx_9SimpleSet_4fill___get__(PyObject *__pyx_v_self) {
+ PyObject *__pyx_r;
+ PyObject *__pyx_1 = 0;
+ Py_INCREF(__pyx_v_self);
+ __pyx_1 = PyInt_FromSsize_t(((struct SimpleSetObject *)__pyx_v_self)->_fill); if (!__pyx_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 143; goto __pyx_L1;}
+ __pyx_r = __pyx_1;
+ __pyx_1 = 0;
+ goto __pyx_L0;
+
+ __pyx_r = Py_None; Py_INCREF(Py_None);
+ goto __pyx_L0;
+ __pyx_L1:;
+ Py_XDECREF(__pyx_1);
+ __Pyx_AddTraceback("bzrlib._simple_set_pyx.SimpleSet.fill.__get__");
+ __pyx_r = 0;
+ __pyx_L0:;
+ Py_DECREF(__pyx_v_self);
+ return __pyx_r;
+}
+
+static PyObject *__pyx_f_6bzrlib_15_simple_set_pyx_9SimpleSet_4mask___get__(PyObject *__pyx_v_self); /*proto*/
+static PyObject *__pyx_f_6bzrlib_15_simple_set_pyx_9SimpleSet_4mask___get__(PyObject *__pyx_v_self) {
+ PyObject *__pyx_r;
+ PyObject *__pyx_1 = 0;
+ Py_INCREF(__pyx_v_self);
+ __pyx_1 = PyInt_FromSsize_t(((struct SimpleSetObject *)__pyx_v_self)->_mask); if (!__pyx_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 147; goto __pyx_L1;}
+ __pyx_r = __pyx_1;
+ __pyx_1 = 0;
+ goto __pyx_L0;
+
+ __pyx_r = Py_None; Py_INCREF(Py_None);
+ goto __pyx_L0;
+ __pyx_L1:;
+ Py_XDECREF(__pyx_1);
+ __Pyx_AddTraceback("bzrlib._simple_set_pyx.SimpleSet.mask.__get__");
+ __pyx_r = 0;
+ __pyx_L0:;
+ Py_DECREF(__pyx_v_self);
+ return __pyx_r;
+}
+
+static PyObject *__pyx_f_6bzrlib_15_simple_set_pyx_9SimpleSet__memory_size(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
+static char __pyx_doc_6bzrlib_15_simple_set_pyx_9SimpleSet__memory_size[] = "Return the number of bytes of memory consumed by this class.";
+static PyObject *__pyx_f_6bzrlib_15_simple_set_pyx_9SimpleSet__memory_size(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
+ PyObject *__pyx_r;
+ PyObject *__pyx_1 = 0;
+ static char *__pyx_argnames[] = {0};
+ if (!PyArg_ParseTupleAndKeywords(__pyx_args, __pyx_kwds, "", __pyx_argnames)) return 0;
+ Py_INCREF(__pyx_v_self);
+ __pyx_1 = PyInt_FromSsize_t(((sizeof(__pyx_v_self)) + ((sizeof(PyObject *)) * (((struct SimpleSetObject *)__pyx_v_self)->_mask + 1)))); if (!__pyx_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 151; goto __pyx_L1;}
+ __pyx_r = __pyx_1;
+ __pyx_1 = 0;
+ goto __pyx_L0;
+
+ __pyx_r = Py_None; Py_INCREF(Py_None);
+ goto __pyx_L0;
+ __pyx_L1:;
+ Py_XDECREF(__pyx_1);
+ __Pyx_AddTraceback("bzrlib._simple_set_pyx.SimpleSet._memory_size");
+ __pyx_r = 0;
+ __pyx_L0:;
+ Py_DECREF(__pyx_v_self);
+ return __pyx_r;
+}
+
+static Py_ssize_t __pyx_f_6bzrlib_15_simple_set_pyx_9SimpleSet___len__(PyObject *__pyx_v_self); /*proto*/
+static Py_ssize_t __pyx_f_6bzrlib_15_simple_set_pyx_9SimpleSet___len__(PyObject *__pyx_v_self) {
+ Py_ssize_t __pyx_r;
+ Py_INCREF(__pyx_v_self);
+ __pyx_r = ((struct SimpleSetObject *)__pyx_v_self)->_used;
+ goto __pyx_L0;
+
+ __pyx_r = 0;
+ __pyx_L0:;
+ Py_DECREF(__pyx_v_self);
+ return __pyx_r;
+}
+
+static PyObject *__pyx_f_6bzrlib_15_simple_set_pyx_9SimpleSet__test_lookup(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
+static PyObject *__pyx_f_6bzrlib_15_simple_set_pyx_9SimpleSet__test_lookup(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
+ PyObject *__pyx_v_key = 0;
+ PyObject **__pyx_v_slot;
+ PyObject *__pyx_v_res;
+ PyObject *__pyx_r;
+ PyObject **__pyx_1;
+ int __pyx_2;
+ PyObject *__pyx_3 = 0;
+ PyObject *__pyx_4 = 0;
+ static char *__pyx_argnames[] = {"key",0};
+ if (!PyArg_ParseTupleAndKeywords(__pyx_args, __pyx_kwds, "O", __pyx_argnames, &__pyx_v_key)) return 0;
+ Py_INCREF(__pyx_v_self);
+ Py_INCREF(__pyx_v_key);
+ __pyx_v_res = Py_None; Py_INCREF(Py_None);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_simple_set_pyx.pyx":159 */
+ __pyx_1 = __pyx_f_6bzrlib_15_simple_set_pyx__lookup(((struct SimpleSetObject *)__pyx_v_self),__pyx_v_key); if (__pyx_1 == NULL) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 159; goto __pyx_L1;}
+ __pyx_v_slot = __pyx_1;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_simple_set_pyx.pyx":160 */
+ __pyx_2 = ((__pyx_v_slot[0]) == NULL);
+ if (__pyx_2) {
+ Py_INCREF(__pyx_k1p);
+ Py_DECREF(__pyx_v_res);
+ __pyx_v_res = __pyx_k1p;
+ goto __pyx_L2;
+ }
+ __pyx_2 = ((__pyx_v_slot[0]) == __pyx_v_6bzrlib_15_simple_set_pyx__dummy);
+ if (__pyx_2) {
+ Py_INCREF(__pyx_k2p);
+ Py_DECREF(__pyx_v_res);
+ __pyx_v_res = __pyx_k2p;
+ goto __pyx_L2;
+ }
+ /*else*/ {
+ Py_INCREF(((PyObject *)(__pyx_v_slot[0])));
+ Py_DECREF(__pyx_v_res);
+ __pyx_v_res = ((PyObject *)(__pyx_v_slot[0]));
+ }
+ __pyx_L2:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_simple_set_pyx.pyx":166 */
+ __pyx_3 = PyInt_FromLong(((int)(__pyx_v_slot - ((struct SimpleSetObject *)__pyx_v_self)->_table))); if (!__pyx_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 166; goto __pyx_L1;}
+ __pyx_4 = PyTuple_New(2); if (!__pyx_4) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 166; goto __pyx_L1;}
+ PyTuple_SET_ITEM(__pyx_4, 0, __pyx_3);
+ Py_INCREF(__pyx_v_res);
+ PyTuple_SET_ITEM(__pyx_4, 1, __pyx_v_res);
+ __pyx_3 = 0;
+ __pyx_r = __pyx_4;
+ __pyx_4 = 0;
+ goto __pyx_L0;
+
+ __pyx_r = Py_None; Py_INCREF(Py_None);
+ goto __pyx_L0;
+ __pyx_L1:;
+ Py_XDECREF(__pyx_3);
+ Py_XDECREF(__pyx_4);
+ __Pyx_AddTraceback("bzrlib._simple_set_pyx.SimpleSet._test_lookup");
+ __pyx_r = 0;
+ __pyx_L0:;
+ Py_DECREF(__pyx_v_res);
+ Py_DECREF(__pyx_v_self);
+ Py_DECREF(__pyx_v_key);
+ return __pyx_r;
+}
+
+static int __pyx_f_6bzrlib_15_simple_set_pyx_9SimpleSet___contains__(PyObject *__pyx_v_self, PyObject *__pyx_v_key); /*proto*/
+static int __pyx_f_6bzrlib_15_simple_set_pyx_9SimpleSet___contains__(PyObject *__pyx_v_self, PyObject *__pyx_v_key) {
+ PyObject **__pyx_v_slot;
+ int __pyx_r;
+ PyObject **__pyx_1;
+ int __pyx_2;
+ Py_INCREF(__pyx_v_self);
+ Py_INCREF(__pyx_v_key);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_simple_set_pyx.pyx":172 */
+ __pyx_1 = __pyx_f_6bzrlib_15_simple_set_pyx__lookup(((struct SimpleSetObject *)__pyx_v_self),__pyx_v_key); if (__pyx_1 == NULL) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 172; goto __pyx_L1;}
+ __pyx_v_slot = __pyx_1;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_simple_set_pyx.pyx":173 */
+ __pyx_2 = ((__pyx_v_slot[0]) == NULL);
+ if (!__pyx_2) {
+ __pyx_2 = ((__pyx_v_slot[0]) == __pyx_v_6bzrlib_15_simple_set_pyx__dummy);
+ }
+ if (__pyx_2) {
+ __pyx_2 = PyInt_AsLong(Py_False); if (PyErr_Occurred()) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 174; goto __pyx_L1;}
+ __pyx_r = __pyx_2;
+ goto __pyx_L0;
+ goto __pyx_L2;
+ }
+ __pyx_L2:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_simple_set_pyx.pyx":175 */
+ __pyx_2 = PyInt_AsLong(Py_True); if (PyErr_Occurred()) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 175; goto __pyx_L1;}
+ __pyx_r = __pyx_2;
+ goto __pyx_L0;
+
+ __pyx_r = 0;
+ goto __pyx_L0;
+ __pyx_L1:;
+ __Pyx_AddTraceback("bzrlib._simple_set_pyx.SimpleSet.__contains__");
+ __pyx_r = -1;
+ __pyx_L0:;
+ Py_DECREF(__pyx_v_self);
+ Py_DECREF(__pyx_v_key);
+ return __pyx_r;
+}
+
+static PyObject *__pyx_f_6bzrlib_15_simple_set_pyx_9SimpleSet__get(struct SimpleSetObject *__pyx_v_self,PyObject *__pyx_v_key) {
+ PyObject **__pyx_v_slot;
+ PyObject *__pyx_r;
+ PyObject **__pyx_1;
+ int __pyx_2;
+ Py_INCREF(__pyx_v_self);
+ Py_INCREF(__pyx_v_key);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_simple_set_pyx.pyx":181 */
+ __pyx_1 = __pyx_f_6bzrlib_15_simple_set_pyx__lookup(__pyx_v_self,__pyx_v_key); if (__pyx_1 == NULL) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 181; goto __pyx_L1;}
+ __pyx_v_slot = __pyx_1;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_simple_set_pyx.pyx":182 */
+ __pyx_2 = ((__pyx_v_slot[0]) == NULL);
+ if (!__pyx_2) {
+ __pyx_2 = ((__pyx_v_slot[0]) == __pyx_v_6bzrlib_15_simple_set_pyx__dummy);
+ }
+ if (__pyx_2) {
+ __pyx_r = NULL;
+ goto __pyx_L0;
+ goto __pyx_L2;
+ }
+ __pyx_L2:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_simple_set_pyx.pyx":184 */
+ __pyx_r = (__pyx_v_slot[0]);
+ goto __pyx_L0;
+
+ __pyx_r = 0;
+ goto __pyx_L0;
+ __pyx_L1:;
+ __Pyx_AddTraceback("bzrlib._simple_set_pyx.SimpleSet._get");
+ __pyx_r = NULL;
+ __pyx_L0:;
+ Py_DECREF(__pyx_v_self);
+ Py_DECREF(__pyx_v_key);
+ return __pyx_r;
+}
+
+static PyObject *__pyx_f_6bzrlib_15_simple_set_pyx_9SimpleSet___getitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_key); /*proto*/
+static PyObject *__pyx_f_6bzrlib_15_simple_set_pyx_9SimpleSet___getitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_key) {
+ PyObject *__pyx_v_py_val;
+ PyObject *__pyx_v_val;
+ PyObject *__pyx_r;
+ PyObject *__pyx_1;
+ int __pyx_2;
+ PyObject *__pyx_3 = 0;
+ PyObject *__pyx_4 = 0;
+ Py_INCREF(__pyx_v_self);
+ Py_INCREF(__pyx_v_key);
+ __pyx_v_val = Py_None; Py_INCREF(Py_None);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_simple_set_pyx.pyx":190 */
+ __pyx_1 = ((struct __pyx_vtabstruct_6bzrlib_15_simple_set_pyx_SimpleSet *)((struct SimpleSetObject *)__pyx_v_self)->__pyx_vtab)->_get(((struct SimpleSetObject *)__pyx_v_self),__pyx_v_key); if (__pyx_1 == NULL && PyErr_Occurred()) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 190; goto __pyx_L1;}
+ __pyx_v_py_val = __pyx_1;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_simple_set_pyx.pyx":191 */
+ __pyx_2 = (__pyx_v_py_val == NULL);
+ if (__pyx_2) {
+ __pyx_3 = PyNumber_Remainder(__pyx_k3p, __pyx_v_key); if (!__pyx_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 192; goto __pyx_L1;}
+ __pyx_4 = PyTuple_New(1); if (!__pyx_4) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 192; goto __pyx_L1;}
+ PyTuple_SET_ITEM(__pyx_4, 0, __pyx_3);
+ __pyx_3 = 0;
+ __pyx_3 = PyObject_CallObject(PyExc_KeyError, __pyx_4); if (!__pyx_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 192; goto __pyx_L1;}
+ Py_DECREF(__pyx_4); __pyx_4 = 0;
+ __Pyx_Raise(__pyx_3, 0, 0);
+ Py_DECREF(__pyx_3); __pyx_3 = 0;
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 192; goto __pyx_L1;}
+ goto __pyx_L2;
+ }
+ __pyx_L2:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_simple_set_pyx.pyx":193 */
+ Py_INCREF(((PyObject *)__pyx_v_py_val));
+ Py_DECREF(__pyx_v_val);
+ __pyx_v_val = ((PyObject *)__pyx_v_py_val);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_simple_set_pyx.pyx":194 */
+ Py_INCREF(__pyx_v_val);
+ __pyx_r = __pyx_v_val;
+ goto __pyx_L0;
+
+ __pyx_r = Py_None; Py_INCREF(Py_None);
+ goto __pyx_L0;
+ __pyx_L1:;
+ Py_XDECREF(__pyx_3);
+ Py_XDECREF(__pyx_4);
+ __Pyx_AddTraceback("bzrlib._simple_set_pyx.SimpleSet.__getitem__");
+ __pyx_r = 0;
+ __pyx_L0:;
+ Py_DECREF(__pyx_v_val);
+ Py_DECREF(__pyx_v_self);
+ Py_DECREF(__pyx_v_key);
+ return __pyx_r;
+}
+
+static int __pyx_f_6bzrlib_15_simple_set_pyx_9SimpleSet__insert_clean(struct SimpleSetObject *__pyx_v_self,PyObject *__pyx_v_key) {
+ size_t __pyx_v_i;
+ size_t __pyx_v_n_lookup;
+ long __pyx_v_the_hash;
+ PyObject **__pyx_v_table;
+ PyObject **__pyx_v_slot;
+ Py_ssize_t __pyx_v_mask;
+ int __pyx_r;
+ long __pyx_1;
+ size_t __pyx_2;
+ int __pyx_3;
+ PyObject *__pyx_4 = 0;
+ PyObject *__pyx_5 = 0;
+ Py_INCREF(__pyx_v_self);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_simple_set_pyx.pyx":208 */
+ __pyx_v_mask = __pyx_v_self->_mask;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_simple_set_pyx.pyx":209 */
+ __pyx_v_table = __pyx_v_self->_table;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_simple_set_pyx.pyx":211 */
+ __pyx_1 = PyObject_Hash(__pyx_v_key); if (__pyx_1 == (-1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 211; goto __pyx_L1;}
+ __pyx_v_the_hash = __pyx_1;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_simple_set_pyx.pyx":212 */
+ __pyx_v_i = __pyx_v_the_hash;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_simple_set_pyx.pyx":213 */
+ __pyx_2 = __pyx_v_mask;
+ for (__pyx_v_n_lookup = 0; __pyx_v_n_lookup <= __pyx_2; ++__pyx_v_n_lookup) {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_simple_set_pyx.pyx":214 */
+ __pyx_v_slot = (&(__pyx_v_table[(__pyx_v_i & __pyx_v_mask)]));
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_simple_set_pyx.pyx":215 */
+ __pyx_3 = ((__pyx_v_slot[0]) == NULL);
+ if (__pyx_3) {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_simple_set_pyx.pyx":216 */
+ (__pyx_v_slot[0]) = __pyx_v_key;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_simple_set_pyx.pyx":217 */
+ __pyx_v_self->_fill = (__pyx_v_self->_fill + 1);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_simple_set_pyx.pyx":218 */
+ __pyx_v_self->_used = (__pyx_v_self->_used + 1);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_simple_set_pyx.pyx":219 */
+ __pyx_r = 1;
+ goto __pyx_L0;
+ goto __pyx_L4;
+ }
+ __pyx_L4:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_simple_set_pyx.pyx":220 */
+ __pyx_v_i = ((__pyx_v_i + 1) + __pyx_v_n_lookup);
+ }
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_simple_set_pyx.pyx":221 */
+ __pyx_4 = PyTuple_New(1); if (!__pyx_4) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 221; goto __pyx_L1;}
+ Py_INCREF(__pyx_k4p);
+ PyTuple_SET_ITEM(__pyx_4, 0, __pyx_k4p);
+ __pyx_5 = PyObject_CallObject(PyExc_RuntimeError, __pyx_4); if (!__pyx_5) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 221; goto __pyx_L1;}
+ Py_DECREF(__pyx_4); __pyx_4 = 0;
+ __Pyx_Raise(__pyx_5, 0, 0);
+ Py_DECREF(__pyx_5); __pyx_5 = 0;
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 221; goto __pyx_L1;}
+
+ __pyx_r = 0;
+ goto __pyx_L0;
+ __pyx_L1:;
+ Py_XDECREF(__pyx_4);
+ Py_XDECREF(__pyx_5);
+ __Pyx_AddTraceback("bzrlib._simple_set_pyx.SimpleSet._insert_clean");
+ __pyx_r = (-1);
+ __pyx_L0:;
+ Py_DECREF(__pyx_v_self);
+ return __pyx_r;
+}
+
+static PyObject *__pyx_f_6bzrlib_15_simple_set_pyx_9SimpleSet__py_resize(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
+static char __pyx_doc_6bzrlib_15_simple_set_pyx_9SimpleSet__py_resize[] = "Do not use this directly, it is only exposed for testing.";
+static PyObject *__pyx_f_6bzrlib_15_simple_set_pyx_9SimpleSet__py_resize(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
+ PyObject *__pyx_v_min_used = 0;
+ PyObject *__pyx_r;
+ Py_ssize_t __pyx_1;
+ Py_ssize_t __pyx_2;
+ PyObject *__pyx_3 = 0;
+ static char *__pyx_argnames[] = {"min_used",0};
+ if (!PyArg_ParseTupleAndKeywords(__pyx_args, __pyx_kwds, "O", __pyx_argnames, &__pyx_v_min_used)) return 0;
+ Py_INCREF(__pyx_v_self);
+ Py_INCREF(__pyx_v_min_used);
+ __pyx_1 = PyInt_AsSsize_t(__pyx_v_min_used); if (PyErr_Occurred()) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 225; goto __pyx_L1;}
+ __pyx_2 = ((struct __pyx_vtabstruct_6bzrlib_15_simple_set_pyx_SimpleSet *)((struct SimpleSetObject *)__pyx_v_self)->__pyx_vtab)->_resize(((struct SimpleSetObject *)__pyx_v_self),__pyx_1); if (__pyx_2 == (-1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 225; goto __pyx_L1;}
+ __pyx_3 = PyInt_FromSsize_t(__pyx_2); if (!__pyx_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 225; goto __pyx_L1;}
+ __pyx_r = __pyx_3;
+ __pyx_3 = 0;
+ goto __pyx_L0;
+
+ __pyx_r = Py_None; Py_INCREF(Py_None);
+ goto __pyx_L0;
+ __pyx_L1:;
+ Py_XDECREF(__pyx_3);
+ __Pyx_AddTraceback("bzrlib._simple_set_pyx.SimpleSet._py_resize");
+ __pyx_r = 0;
+ __pyx_L0:;
+ Py_DECREF(__pyx_v_self);
+ Py_DECREF(__pyx_v_min_used);
+ return __pyx_r;
+}
+
+static Py_ssize_t __pyx_f_6bzrlib_15_simple_set_pyx_9SimpleSet__resize(struct SimpleSetObject *__pyx_v_self,Py_ssize_t __pyx_v_min_used) {
+ Py_ssize_t __pyx_v_new_size;
+ Py_ssize_t __pyx_v_n_bytes;
+ Py_ssize_t __pyx_v_remaining;
+ PyObject **__pyx_v_new_table;
+ PyObject **__pyx_v_old_table;
+ PyObject **__pyx_v_slot;
+ Py_ssize_t __pyx_r;
+ int __pyx_1;
+ PyObject *__pyx_2 = 0;
+ Py_INCREF(__pyx_v_self);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_simple_set_pyx.pyx":239 */
+ __pyx_v_new_size = 1024;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_simple_set_pyx.pyx":240 */
+ while (1) {
+ __pyx_1 = (__pyx_v_new_size <= __pyx_v_min_used);
+ if (__pyx_1) {
+ __pyx_1 = (__pyx_v_new_size > 0);
+ }
+ if (!__pyx_1) break;
+ __pyx_v_new_size = (__pyx_v_new_size << 1);
+ }
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_simple_set_pyx.pyx":243 */
+ __pyx_1 = (__pyx_v_new_size <= 0);
+ if (__pyx_1) {
+ __pyx_2 = PyObject_CallObject(PyExc_MemoryError, 0); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 244; goto __pyx_L1;}
+ __Pyx_Raise(__pyx_2, 0, 0);
+ Py_DECREF(__pyx_2); __pyx_2 = 0;
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 244; goto __pyx_L1;}
+ goto __pyx_L4;
+ }
+ __pyx_L4:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_simple_set_pyx.pyx":252 */
+ __pyx_v_n_bytes = ((sizeof(PyObject *)) * __pyx_v_new_size);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_simple_set_pyx.pyx":253 */
+ __pyx_v_new_table = ((PyObject **)PyMem_Malloc(__pyx_v_n_bytes));
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_simple_set_pyx.pyx":254 */
+ __pyx_1 = (__pyx_v_new_table == NULL);
+ if (__pyx_1) {
+ __pyx_2 = PyObject_CallObject(PyExc_MemoryError, 0); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 255; goto __pyx_L1;}
+ __Pyx_Raise(__pyx_2, 0, 0);
+ Py_DECREF(__pyx_2); __pyx_2 = 0;
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 255; goto __pyx_L1;}
+ goto __pyx_L5;
+ }
+ __pyx_L5:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_simple_set_pyx.pyx":257 */
+ __pyx_v_old_table = __pyx_v_self->_table;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_simple_set_pyx.pyx":258 */
+ __pyx_v_self->_table = __pyx_v_new_table;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_simple_set_pyx.pyx":259 */
+ memset(__pyx_v_self->_table,0,__pyx_v_n_bytes);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_simple_set_pyx.pyx":260 */
+ __pyx_v_self->_mask = (__pyx_v_new_size - 1);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_simple_set_pyx.pyx":261 */
+ __pyx_v_self->_used = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_simple_set_pyx.pyx":262 */
+ __pyx_v_remaining = __pyx_v_self->_fill;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_simple_set_pyx.pyx":263 */
+ __pyx_v_self->_fill = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_simple_set_pyx.pyx":267 */
+ __pyx_v_slot = __pyx_v_old_table;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_simple_set_pyx.pyx":268 */
+ while (1) {
+ __pyx_1 = (__pyx_v_remaining > 0);
+ if (!__pyx_1) break;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_simple_set_pyx.pyx":269 */
+ __pyx_1 = ((__pyx_v_slot[0]) == NULL);
+ if (__pyx_1) {
+ goto __pyx_L8;
+ }
+ __pyx_1 = ((__pyx_v_slot[0]) == __pyx_v_6bzrlib_15_simple_set_pyx__dummy);
+ if (__pyx_1) {
+ __pyx_v_remaining = (__pyx_v_remaining - 1);
+ goto __pyx_L8;
+ }
+ /*else*/ {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_simple_set_pyx.pyx":274 */
+ __pyx_v_remaining = (__pyx_v_remaining - 1);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_simple_set_pyx.pyx":275 */
+ __pyx_1 = ((struct __pyx_vtabstruct_6bzrlib_15_simple_set_pyx_SimpleSet *)__pyx_v_self->__pyx_vtab)->_insert_clean(__pyx_v_self,(__pyx_v_slot[0])); if (__pyx_1 == (-1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 275; goto __pyx_L1;}
+ }
+ __pyx_L8:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_simple_set_pyx.pyx":276 */
+ __pyx_v_slot = (__pyx_v_slot + 1);
+ }
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_simple_set_pyx.pyx":277 */
+ PyMem_Free(__pyx_v_old_table);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_simple_set_pyx.pyx":278 */
+ __pyx_r = __pyx_v_new_size;
+ goto __pyx_L0;
+
+ __pyx_r = 0;
+ goto __pyx_L0;
+ __pyx_L1:;
+ Py_XDECREF(__pyx_2);
+ __Pyx_AddTraceback("bzrlib._simple_set_pyx.SimpleSet._resize");
+ __pyx_r = (-1);
+ __pyx_L0:;
+ Py_DECREF(__pyx_v_self);
+ return __pyx_r;
+}
+
+static PyObject *__pyx_f_6bzrlib_15_simple_set_pyx_9SimpleSet_add(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
+static char __pyx_doc_6bzrlib_15_simple_set_pyx_9SimpleSet_add[] = "Similar to set.add(), start tracking this key.\n \n There is one small difference, which is that we return the object that\n is stored at the given location. (which is closer to the\n dict.setdefault() functionality.)\n ";
+static PyObject *__pyx_f_6bzrlib_15_simple_set_pyx_9SimpleSet_add(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
+ PyObject *__pyx_v_key = 0;
+ PyObject *__pyx_r;
+ PyObject *__pyx_1 = 0;
+ static char *__pyx_argnames[] = {"key",0};
+ if (!PyArg_ParseTupleAndKeywords(__pyx_args, __pyx_kwds, "O", __pyx_argnames, &__pyx_v_key)) return 0;
+ Py_INCREF(__pyx_v_self);
+ Py_INCREF(__pyx_v_key);
+ __pyx_1 = ((struct __pyx_vtabstruct_6bzrlib_15_simple_set_pyx_SimpleSet *)((struct SimpleSetObject *)__pyx_v_self)->__pyx_vtab)->_add(((struct SimpleSetObject *)__pyx_v_self),__pyx_v_key); if (!__pyx_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 287; goto __pyx_L1;}
+ __pyx_r = __pyx_1;
+ __pyx_1 = 0;
+ goto __pyx_L0;
+
+ __pyx_r = Py_None; Py_INCREF(Py_None);
+ goto __pyx_L0;
+ __pyx_L1:;
+ Py_XDECREF(__pyx_1);
+ __Pyx_AddTraceback("bzrlib._simple_set_pyx.SimpleSet.add");
+ __pyx_r = 0;
+ __pyx_L0:;
+ Py_DECREF(__pyx_v_self);
+ Py_DECREF(__pyx_v_key);
+ return __pyx_r;
+}
+
+static PyObject *__pyx_f_6bzrlib_15_simple_set_pyx_9SimpleSet__add(struct SimpleSetObject *__pyx_v_self,PyObject *__pyx_v_key) {
+ PyObject **__pyx_v_slot;
+ PyObject *__pyx_v_py_key;
+ int __pyx_v_added;
+ PyObject *__pyx_v_retval;
+ PyObject *__pyx_r;
+ int __pyx_1;
+ PyObject *__pyx_2 = 0;
+ PyObject *__pyx_3 = 0;
+ PyObject **__pyx_4;
+ Py_ssize_t __pyx_5;
+ Py_INCREF(__pyx_v_self);
+ Py_INCREF(__pyx_v_key);
+ __pyx_v_retval = Py_None; Py_INCREF(Py_None);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_simple_set_pyx.pyx":293 */
+ __pyx_v_py_key = ((PyObject *)__pyx_v_key);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_simple_set_pyx.pyx":294 */
+ __pyx_1 = (Py_TYPE(__pyx_v_py_key)->tp_richcompare == NULL);
+ if (!__pyx_1) {
+ __pyx_1 = (Py_TYPE(__pyx_v_py_key)->tp_hash == NULL);
+ }
+ if (__pyx_1) {
+ __pyx_2 = PyTuple_New(1); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 296; goto __pyx_L1;}
+ Py_INCREF(__pyx_k5p);
+ PyTuple_SET_ITEM(__pyx_2, 0, __pyx_k5p);
+ __pyx_3 = PyObject_CallObject(PyExc_TypeError, __pyx_2); if (!__pyx_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 296; goto __pyx_L1;}
+ Py_DECREF(__pyx_2); __pyx_2 = 0;
+ __Pyx_Raise(__pyx_3, 0, 0);
+ Py_DECREF(__pyx_3); __pyx_3 = 0;
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 296; goto __pyx_L1;}
+ goto __pyx_L2;
+ }
+ __pyx_L2:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_simple_set_pyx.pyx":298 */
+ __pyx_v_added = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_simple_set_pyx.pyx":300 */
+ #ifndef PYREX_WITHOUT_ASSERTIONS
+ if (!(__pyx_v_self->_used < __pyx_v_self->_mask)) {
+ PyErr_SetNone(PyExc_AssertionError);
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 300; goto __pyx_L1;}
+ }
+ #endif
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_simple_set_pyx.pyx":301 */
+ __pyx_4 = __pyx_f_6bzrlib_15_simple_set_pyx__lookup(__pyx_v_self,__pyx_v_key); if (__pyx_4 == NULL) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 301; goto __pyx_L1;}
+ __pyx_v_slot = __pyx_4;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_simple_set_pyx.pyx":302 */
+ __pyx_1 = ((__pyx_v_slot[0]) == NULL);
+ if (__pyx_1) {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_simple_set_pyx.pyx":303 */
+ Py_INCREF(__pyx_v_py_key);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_simple_set_pyx.pyx":304 */
+ __pyx_v_self->_fill = (__pyx_v_self->_fill + 1);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_simple_set_pyx.pyx":305 */
+ __pyx_v_self->_used = (__pyx_v_self->_used + 1);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_simple_set_pyx.pyx":306 */
+ (__pyx_v_slot[0]) = __pyx_v_py_key;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_simple_set_pyx.pyx":307 */
+ __pyx_v_added = 1;
+ goto __pyx_L3;
+ }
+ __pyx_1 = ((__pyx_v_slot[0]) == __pyx_v_6bzrlib_15_simple_set_pyx__dummy);
+ if (__pyx_1) {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_simple_set_pyx.pyx":309 */
+ Py_INCREF(__pyx_v_py_key);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_simple_set_pyx.pyx":310 */
+ __pyx_v_self->_used = (__pyx_v_self->_used + 1);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_simple_set_pyx.pyx":311 */
+ (__pyx_v_slot[0]) = __pyx_v_py_key;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_simple_set_pyx.pyx":312 */
+ __pyx_v_added = 1;
+ goto __pyx_L3;
+ }
+ __pyx_L3:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_simple_set_pyx.pyx":315 */
+ Py_INCREF(((PyObject *)(__pyx_v_slot[0])));
+ Py_DECREF(__pyx_v_retval);
+ __pyx_v_retval = ((PyObject *)(__pyx_v_slot[0]));
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_simple_set_pyx.pyx":317 */
+ __pyx_1 = __pyx_v_added;
+ if (__pyx_1) {
+ __pyx_1 = ((__pyx_v_self->_fill * 3) >= ((__pyx_v_self->_mask + 1) * 2));
+ }
+ if (__pyx_1) {
+ __pyx_5 = ((struct __pyx_vtabstruct_6bzrlib_15_simple_set_pyx_SimpleSet *)__pyx_v_self->__pyx_vtab)->_resize(__pyx_v_self,(__pyx_v_self->_used * 2)); if (__pyx_5 == (-1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 319; goto __pyx_L1;}
+ goto __pyx_L4;
+ }
+ __pyx_L4:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_simple_set_pyx.pyx":323 */
+ Py_INCREF(__pyx_v_retval);
+ __pyx_r = __pyx_v_retval;
+ goto __pyx_L0;
+
+ __pyx_r = Py_None; Py_INCREF(Py_None);
+ goto __pyx_L0;
+ __pyx_L1:;
+ Py_XDECREF(__pyx_2);
+ Py_XDECREF(__pyx_3);
+ __Pyx_AddTraceback("bzrlib._simple_set_pyx.SimpleSet._add");
+ __pyx_r = 0;
+ __pyx_L0:;
+ Py_DECREF(__pyx_v_retval);
+ Py_DECREF(__pyx_v_self);
+ Py_DECREF(__pyx_v_key);
+ return __pyx_r;
+}
+
+static PyObject *__pyx_f_6bzrlib_15_simple_set_pyx_9SimpleSet_discard(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
+static char __pyx_doc_6bzrlib_15_simple_set_pyx_9SimpleSet_discard[] = "Remove key from the set, whether it exists or not.\n\n :return: False if the item did not exist, True if it did\n ";
+static PyObject *__pyx_f_6bzrlib_15_simple_set_pyx_9SimpleSet_discard(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
+ PyObject *__pyx_v_key = 0;
+ PyObject *__pyx_r;
+ int __pyx_1;
+ static char *__pyx_argnames[] = {"key",0};
+ if (!PyArg_ParseTupleAndKeywords(__pyx_args, __pyx_kwds, "O", __pyx_argnames, &__pyx_v_key)) return 0;
+ Py_INCREF(__pyx_v_self);
+ Py_INCREF(__pyx_v_key);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_simple_set_pyx.pyx":330 */
+ __pyx_1 = ((struct __pyx_vtabstruct_6bzrlib_15_simple_set_pyx_SimpleSet *)((struct SimpleSetObject *)__pyx_v_self)->__pyx_vtab)->_discard(((struct SimpleSetObject *)__pyx_v_self),__pyx_v_key); if (__pyx_1 == (-1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 330; goto __pyx_L1;}
+ if (__pyx_1) {
+ Py_INCREF(Py_True);
+ __pyx_r = Py_True;
+ goto __pyx_L0;
+ goto __pyx_L2;
+ }
+ __pyx_L2:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_simple_set_pyx.pyx":332 */
+ Py_INCREF(Py_False);
+ __pyx_r = Py_False;
+ goto __pyx_L0;
+
+ __pyx_r = Py_None; Py_INCREF(Py_None);
+ goto __pyx_L0;
+ __pyx_L1:;
+ __Pyx_AddTraceback("bzrlib._simple_set_pyx.SimpleSet.discard");
+ __pyx_r = 0;
+ __pyx_L0:;
+ Py_DECREF(__pyx_v_self);
+ Py_DECREF(__pyx_v_key);
+ return __pyx_r;
+}
+
+static int __pyx_f_6bzrlib_15_simple_set_pyx_9SimpleSet__discard(struct SimpleSetObject *__pyx_v_self,PyObject *__pyx_v_key) {
+ PyObject **__pyx_v_slot;
+ int __pyx_r;
+ PyObject **__pyx_1;
+ int __pyx_2;
+ Py_ssize_t __pyx_3;
+ Py_INCREF(__pyx_v_self);
+ Py_INCREF(__pyx_v_key);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_simple_set_pyx.pyx":337 */
+ __pyx_1 = __pyx_f_6bzrlib_15_simple_set_pyx__lookup(__pyx_v_self,__pyx_v_key); if (__pyx_1 == NULL) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 337; goto __pyx_L1;}
+ __pyx_v_slot = __pyx_1;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_simple_set_pyx.pyx":338 */
+ __pyx_2 = ((__pyx_v_slot[0]) == NULL);
+ if (!__pyx_2) {
+ __pyx_2 = ((__pyx_v_slot[0]) == __pyx_v_6bzrlib_15_simple_set_pyx__dummy);
+ }
+ if (__pyx_2) {
+ __pyx_r = 0;
+ goto __pyx_L0;
+ goto __pyx_L2;
+ }
+ __pyx_L2:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_simple_set_pyx.pyx":340 */
+ __pyx_v_self->_used = (__pyx_v_self->_used - 1);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_simple_set_pyx.pyx":341 */
+ Py_DECREF((__pyx_v_slot[0]));
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_simple_set_pyx.pyx":342 */
+ (__pyx_v_slot[0]) = __pyx_v_6bzrlib_15_simple_set_pyx__dummy;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_simple_set_pyx.pyx":353 */
+ __pyx_2 = (((__pyx_v_self->_fill - __pyx_v_self->_used) * 5) > __pyx_v_self->_mask);
+ if (__pyx_2) {
+ __pyx_3 = ((struct __pyx_vtabstruct_6bzrlib_15_simple_set_pyx_SimpleSet *)__pyx_v_self->__pyx_vtab)->_resize(__pyx_v_self,(__pyx_v_self->_used * 2)); if (__pyx_3 == (-1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 354; goto __pyx_L1;}
+ goto __pyx_L3;
+ }
+ __pyx_L3:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_simple_set_pyx.pyx":355 */
+ __pyx_r = 1;
+ goto __pyx_L0;
+
+ __pyx_r = 0;
+ goto __pyx_L0;
+ __pyx_L1:;
+ __Pyx_AddTraceback("bzrlib._simple_set_pyx.SimpleSet._discard");
+ __pyx_r = (-1);
+ __pyx_L0:;
+ Py_DECREF(__pyx_v_self);
+ Py_DECREF(__pyx_v_key);
+ return __pyx_r;
+}
+
+static PyObject *__pyx_f_6bzrlib_15_simple_set_pyx_9SimpleSet___iter__(PyObject *__pyx_v_self); /*proto*/
+static PyObject *__pyx_f_6bzrlib_15_simple_set_pyx_9SimpleSet___iter__(PyObject *__pyx_v_self) {
+ PyObject *__pyx_r;
+ PyObject *__pyx_1 = 0;
+ PyObject *__pyx_2 = 0;
+ Py_INCREF(__pyx_v_self);
+ __pyx_1 = PyTuple_New(1); if (!__pyx_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 358; goto __pyx_L1;}
+ Py_INCREF(__pyx_v_self);
+ PyTuple_SET_ITEM(__pyx_1, 0, __pyx_v_self);
+ __pyx_2 = PyObject_CallObject(((PyObject *)__pyx_ptype_6bzrlib_15_simple_set_pyx__SimpleSet_iterator), __pyx_1); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 358; goto __pyx_L1;}
+ Py_DECREF(__pyx_1); __pyx_1 = 0;
+ __pyx_r = __pyx_2;
+ __pyx_2 = 0;
+ goto __pyx_L0;
+
+ __pyx_r = Py_None; Py_INCREF(Py_None);
+ goto __pyx_L0;
+ __pyx_L1:;
+ Py_XDECREF(__pyx_1);
+ Py_XDECREF(__pyx_2);
+ __Pyx_AddTraceback("bzrlib._simple_set_pyx.SimpleSet.__iter__");
+ __pyx_r = 0;
+ __pyx_L0:;
+ Py_DECREF(__pyx_v_self);
+ return __pyx_r;
+}
+
+static int __pyx_f_6bzrlib_15_simple_set_pyx_19_SimpleSet_iterator___init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
+static int __pyx_f_6bzrlib_15_simple_set_pyx_19_SimpleSet_iterator___init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
+ PyObject *__pyx_v_obj = 0;
+ int __pyx_r;
+ static char *__pyx_argnames[] = {"obj",0};
+ if (!PyArg_ParseTupleAndKeywords(__pyx_args, __pyx_kwds, "O", __pyx_argnames, &__pyx_v_obj)) return -1;
+ Py_INCREF(__pyx_v_self);
+ Py_INCREF(__pyx_v_obj);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_simple_set_pyx.pyx":370 */
+ if (!__Pyx_TypeTest(__pyx_v_obj, __pyx_ptype_6bzrlib_15_simple_set_pyx_SimpleSet)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 370; goto __pyx_L1;}
+ Py_INCREF(__pyx_v_obj);
+ Py_DECREF(((PyObject *)((struct __pyx_obj_6bzrlib_15_simple_set_pyx__SimpleSet_iterator *)__pyx_v_self)->set));
+ ((struct __pyx_obj_6bzrlib_15_simple_set_pyx__SimpleSet_iterator *)__pyx_v_self)->set = ((struct SimpleSetObject *)__pyx_v_obj);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_simple_set_pyx.pyx":371 */
+ ((struct __pyx_obj_6bzrlib_15_simple_set_pyx__SimpleSet_iterator *)__pyx_v_self)->pos = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_simple_set_pyx.pyx":372 */
+ ((struct __pyx_obj_6bzrlib_15_simple_set_pyx__SimpleSet_iterator *)__pyx_v_self)->_used = ((struct __pyx_obj_6bzrlib_15_simple_set_pyx__SimpleSet_iterator *)__pyx_v_self)->set->_used;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_simple_set_pyx.pyx":373 */
+ ((struct __pyx_obj_6bzrlib_15_simple_set_pyx__SimpleSet_iterator *)__pyx_v_self)->len = ((struct __pyx_obj_6bzrlib_15_simple_set_pyx__SimpleSet_iterator *)__pyx_v_self)->set->_used;
+
+ __pyx_r = 0;
+ goto __pyx_L0;
+ __pyx_L1:;
+ __Pyx_AddTraceback("bzrlib._simple_set_pyx._SimpleSet_iterator.__init__");
+ __pyx_r = -1;
+ __pyx_L0:;
+ Py_DECREF(__pyx_v_self);
+ Py_DECREF(__pyx_v_obj);
+ return __pyx_r;
+}
+
+static PyObject *__pyx_f_6bzrlib_15_simple_set_pyx_19_SimpleSet_iterator___iter__(PyObject *__pyx_v_self); /*proto*/
+static PyObject *__pyx_f_6bzrlib_15_simple_set_pyx_19_SimpleSet_iterator___iter__(PyObject *__pyx_v_self) {
+ PyObject *__pyx_r;
+ Py_INCREF(__pyx_v_self);
+ Py_INCREF(__pyx_v_self);
+ __pyx_r = __pyx_v_self;
+ goto __pyx_L0;
+
+ __pyx_r = Py_None; Py_INCREF(Py_None);
+ __pyx_L0:;
+ Py_DECREF(__pyx_v_self);
+ return __pyx_r;
+}
+
+static PyObject *__pyx_f_6bzrlib_15_simple_set_pyx_19_SimpleSet_iterator___next__(PyObject *__pyx_v_self); /*proto*/
+static PyObject *__pyx_f_6bzrlib_15_simple_set_pyx_19_SimpleSet_iterator___next__(PyObject *__pyx_v_self) {
+ PyObject *__pyx_v_key;
+ PyObject *__pyx_v_the_key;
+ PyObject *__pyx_r;
+ int __pyx_1;
+ PyObject *__pyx_2 = 0;
+ PyObject *__pyx_3 = 0;
+ int __pyx_4;
+ Py_INCREF(__pyx_v_self);
+ __pyx_v_the_key = Py_None; Py_INCREF(Py_None);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_simple_set_pyx.pyx":382 */
+ __pyx_1 = ((PyObject *)((struct __pyx_obj_6bzrlib_15_simple_set_pyx__SimpleSet_iterator *)__pyx_v_self)->set) == Py_None;
+ if (__pyx_1) {
+ __Pyx_Raise(PyExc_StopIteration, 0, 0);
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 383; goto __pyx_L1;}
+ goto __pyx_L2;
+ }
+ __pyx_L2:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_simple_set_pyx.pyx":384 */
+ __pyx_1 = (((struct __pyx_obj_6bzrlib_15_simple_set_pyx__SimpleSet_iterator *)__pyx_v_self)->set->_used != ((struct __pyx_obj_6bzrlib_15_simple_set_pyx__SimpleSet_iterator *)__pyx_v_self)->_used);
+ if (__pyx_1) {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_simple_set_pyx.pyx":386 */
+ ((struct __pyx_obj_6bzrlib_15_simple_set_pyx__SimpleSet_iterator *)__pyx_v_self)->_used = (-1);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_simple_set_pyx.pyx":387 */
+ __pyx_2 = PyTuple_New(1); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 387; goto __pyx_L1;}
+ Py_INCREF(__pyx_k6p);
+ PyTuple_SET_ITEM(__pyx_2, 0, __pyx_k6p);
+ __pyx_3 = PyObject_CallObject(PyExc_RuntimeError, __pyx_2); if (!__pyx_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 387; goto __pyx_L1;}
+ Py_DECREF(__pyx_2); __pyx_2 = 0;
+ __Pyx_Raise(__pyx_3, 0, 0);
+ Py_DECREF(__pyx_3); __pyx_3 = 0;
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 387; goto __pyx_L1;}
+ goto __pyx_L3;
+ }
+ __pyx_L3:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_simple_set_pyx.pyx":388 */
+ __pyx_1 = SimpleSet_Next(((PyObject *)((struct __pyx_obj_6bzrlib_15_simple_set_pyx__SimpleSet_iterator *)__pyx_v_self)->set),(&((struct __pyx_obj_6bzrlib_15_simple_set_pyx__SimpleSet_iterator *)__pyx_v_self)->pos),(&__pyx_v_key)); if (__pyx_1 == (-1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 388; goto __pyx_L1;}
+ __pyx_4 = (!__pyx_1);
+ if (__pyx_4) {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_simple_set_pyx.pyx":389 */
+ Py_INCREF(Py_None);
+ Py_DECREF(((PyObject *)((struct __pyx_obj_6bzrlib_15_simple_set_pyx__SimpleSet_iterator *)__pyx_v_self)->set));
+ ((struct __pyx_obj_6bzrlib_15_simple_set_pyx__SimpleSet_iterator *)__pyx_v_self)->set = ((struct SimpleSetObject *)Py_None);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_simple_set_pyx.pyx":390 */
+ __Pyx_Raise(PyExc_StopIteration, 0, 0);
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 390; goto __pyx_L1;}
+ goto __pyx_L4;
+ }
+ __pyx_L4:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_simple_set_pyx.pyx":392 */
+ Py_INCREF(((PyObject *)__pyx_v_key));
+ Py_DECREF(__pyx_v_the_key);
+ __pyx_v_the_key = ((PyObject *)__pyx_v_key);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_simple_set_pyx.pyx":393 */
+ ((struct __pyx_obj_6bzrlib_15_simple_set_pyx__SimpleSet_iterator *)__pyx_v_self)->len = (((struct __pyx_obj_6bzrlib_15_simple_set_pyx__SimpleSet_iterator *)__pyx_v_self)->len - 1);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_simple_set_pyx.pyx":394 */
+ Py_INCREF(__pyx_v_the_key);
+ __pyx_r = __pyx_v_the_key;
+ goto __pyx_L0;
+
+ __pyx_r = Py_None; Py_INCREF(Py_None);
+ goto __pyx_L0;
+ __pyx_L1:;
+ Py_XDECREF(__pyx_2);
+ Py_XDECREF(__pyx_3);
+ __Pyx_AddTraceback("bzrlib._simple_set_pyx._SimpleSet_iterator.__next__");
+ __pyx_r = 0;
+ __pyx_L0:;
+ Py_DECREF(__pyx_v_the_key);
+ Py_DECREF(__pyx_v_self);
+ return __pyx_r;
+}
+
+static PyObject *__pyx_f_6bzrlib_15_simple_set_pyx_19_SimpleSet_iterator___length_hint__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
+static PyObject *__pyx_f_6bzrlib_15_simple_set_pyx_19_SimpleSet_iterator___length_hint__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
+ PyObject *__pyx_r;
+ int __pyx_1;
+ PyObject *__pyx_2 = 0;
+ static char *__pyx_argnames[] = {0};
+ if (!PyArg_ParseTupleAndKeywords(__pyx_args, __pyx_kwds, "", __pyx_argnames)) return 0;
+ Py_INCREF(__pyx_v_self);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_simple_set_pyx.pyx":397 */
+ __pyx_1 = ((PyObject *)((struct __pyx_obj_6bzrlib_15_simple_set_pyx__SimpleSet_iterator *)__pyx_v_self)->set) != Py_None;
+ if (__pyx_1) {
+ __pyx_1 = (((struct __pyx_obj_6bzrlib_15_simple_set_pyx__SimpleSet_iterator *)__pyx_v_self)->_used == ((struct __pyx_obj_6bzrlib_15_simple_set_pyx__SimpleSet_iterator *)__pyx_v_self)->set->_used);
+ }
+ if (__pyx_1) {
+ __pyx_2 = PyInt_FromSsize_t(((struct __pyx_obj_6bzrlib_15_simple_set_pyx__SimpleSet_iterator *)__pyx_v_self)->len); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 398; goto __pyx_L1;}
+ __pyx_r = __pyx_2;
+ __pyx_2 = 0;
+ goto __pyx_L0;
+ goto __pyx_L2;
+ }
+ __pyx_L2:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_simple_set_pyx.pyx":399 */
+ __pyx_2 = PyInt_FromLong(0); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 399; goto __pyx_L1;}
+ __pyx_r = __pyx_2;
+ __pyx_2 = 0;
+ goto __pyx_L0;
+
+ __pyx_r = Py_None; Py_INCREF(Py_None);
+ goto __pyx_L0;
+ __pyx_L1:;
+ Py_XDECREF(__pyx_2);
+ __Pyx_AddTraceback("bzrlib._simple_set_pyx._SimpleSet_iterator.__length_hint__");
+ __pyx_r = 0;
+ __pyx_L0:;
+ Py_DECREF(__pyx_v_self);
+ return __pyx_r;
+}
+
+static struct SimpleSetObject *SimpleSet_New(void) {
+ struct SimpleSetObject *__pyx_r;
+ PyObject *__pyx_1 = 0;
+ __pyx_1 = PyObject_CallObject(((PyObject *)__pyx_ptype_6bzrlib_15_simple_set_pyx_SimpleSet), 0); if (!__pyx_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 405; goto __pyx_L1;}
+ __pyx_r = ((struct SimpleSetObject *)__pyx_1);
+ __pyx_1 = 0;
+ goto __pyx_L0;
+
+ __pyx_r = ((struct SimpleSetObject *)Py_None); Py_INCREF(Py_None);
+ goto __pyx_L0;
+ __pyx_L1:;
+ Py_XDECREF(__pyx_1);
+ __Pyx_AddTraceback("bzrlib._simple_set_pyx.SimpleSet_New");
+ __pyx_r = 0;
+ __pyx_L0:;
+ return __pyx_r;
+}
+
+static struct SimpleSetObject *__pyx_f_6bzrlib_15_simple_set_pyx__check_self(PyObject *__pyx_v_self) {
+ struct SimpleSetObject *__pyx_v_true_self;
+ struct SimpleSetObject *__pyx_r;
+ int __pyx_1;
+ PyObject *__pyx_2 = 0;
+ PyObject *__pyx_3 = 0;
+ Py_INCREF(__pyx_v_self);
+ __pyx_v_true_self = ((struct SimpleSetObject *)Py_None); Py_INCREF(Py_None);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_simple_set_pyx.pyx":417 */
+ __pyx_1 = __pyx_v_self == Py_None;
+ if (__pyx_1) {
+ __pyx_2 = PyTuple_New(1); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 418; goto __pyx_L1;}
+ Py_INCREF(__pyx_k7p);
+ PyTuple_SET_ITEM(__pyx_2, 0, __pyx_k7p);
+ __pyx_3 = PyObject_CallObject(PyExc_TypeError, __pyx_2); if (!__pyx_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 418; goto __pyx_L1;}
+ Py_DECREF(__pyx_2); __pyx_2 = 0;
+ __Pyx_Raise(__pyx_3, 0, 0);
+ Py_DECREF(__pyx_3); __pyx_3 = 0;
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 418; goto __pyx_L1;}
+ goto __pyx_L2;
+ }
+ __pyx_L2:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_simple_set_pyx.pyx":419 */
+ if (!__Pyx_TypeTest(__pyx_v_self, __pyx_ptype_6bzrlib_15_simple_set_pyx_SimpleSet)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 419; goto __pyx_L1;}
+ Py_INCREF(__pyx_v_self);
+ Py_DECREF(((PyObject *)__pyx_v_true_self));
+ __pyx_v_true_self = ((struct SimpleSetObject *)__pyx_v_self);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_simple_set_pyx.pyx":420 */
+ Py_INCREF(((PyObject *)__pyx_v_true_self));
+ __pyx_r = __pyx_v_true_self;
+ goto __pyx_L0;
+
+ __pyx_r = ((struct SimpleSetObject *)Py_None); Py_INCREF(Py_None);
+ goto __pyx_L0;
+ __pyx_L1:;
+ Py_XDECREF(__pyx_2);
+ Py_XDECREF(__pyx_3);
+ __Pyx_AddTraceback("bzrlib._simple_set_pyx._check_self");
+ __pyx_r = 0;
+ __pyx_L0:;
+ Py_DECREF(__pyx_v_true_self);
+ Py_DECREF(__pyx_v_self);
+ return __pyx_r;
+}
+
+static PyObject **__pyx_f_6bzrlib_15_simple_set_pyx__lookup(struct SimpleSetObject *__pyx_v_self,PyObject *__pyx_v_key) {
+ size_t __pyx_v_i;
+ size_t __pyx_v_n_lookup;
+ Py_ssize_t __pyx_v_mask;
+ long __pyx_v_key_hash;
+ PyObject **__pyx_v_table;
+ PyObject **__pyx_v_slot;
+ PyObject *__pyx_v_cur;
+ PyObject **__pyx_v_free_slot;
+ PyObject *__pyx_v_py_key;
+ PyObject **__pyx_r;
+ long __pyx_1;
+ size_t __pyx_2;
+ int __pyx_3;
+ PyObject *__pyx_4 = 0;
+ PyObject *__pyx_5 = 0;
+ Py_INCREF(__pyx_v_self);
+ Py_INCREF(__pyx_v_key);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_simple_set_pyx.pyx":464 */
+ __pyx_v_py_key = ((PyObject *)__pyx_v_key);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_simple_set_pyx.pyx":467 */
+ __pyx_1 = PyObject_Hash(__pyx_v_py_key); if (__pyx_1 == (-1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 467; goto __pyx_L1;}
+ __pyx_v_key_hash = __pyx_1;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_simple_set_pyx.pyx":468 */
+ __pyx_v_i = __pyx_v_key_hash;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_simple_set_pyx.pyx":469 */
+ __pyx_v_mask = __pyx_v_self->_mask;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_simple_set_pyx.pyx":470 */
+ __pyx_v_table = __pyx_v_self->_table;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_simple_set_pyx.pyx":471 */
+ __pyx_v_free_slot = NULL;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_simple_set_pyx.pyx":472 */
+ __pyx_2 = __pyx_v_mask;
+ for (__pyx_v_n_lookup = 0; __pyx_v_n_lookup <= __pyx_2; ++__pyx_v_n_lookup) {
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_simple_set_pyx.pyx":473 */
+ __pyx_v_slot = (&(__pyx_v_table[(__pyx_v_i & __pyx_v_mask)]));
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_simple_set_pyx.pyx":474 */
+ __pyx_v_cur = (__pyx_v_slot[0]);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_simple_set_pyx.pyx":475 */
+ __pyx_3 = (__pyx_v_cur == NULL);
+ if (__pyx_3) {
+ __pyx_3 = (__pyx_v_free_slot != NULL);
+ if (__pyx_3) {
+ __pyx_r = __pyx_v_free_slot;
+ goto __pyx_L0;
+ goto __pyx_L5;
+ }
+ /*else*/ {
+ __pyx_r = __pyx_v_slot;
+ goto __pyx_L0;
+ }
+ __pyx_L5:;
+ goto __pyx_L4;
+ }
+ __pyx_L4:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_simple_set_pyx.pyx":482 */
+ __pyx_3 = (__pyx_v_cur == __pyx_v_py_key);
+ if (__pyx_3) {
+ __pyx_r = __pyx_v_slot;
+ goto __pyx_L0;
+ goto __pyx_L6;
+ }
+ __pyx_L6:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_simple_set_pyx.pyx":485 */
+ __pyx_3 = (__pyx_v_cur == __pyx_v_6bzrlib_15_simple_set_pyx__dummy);
+ if (__pyx_3) {
+ __pyx_3 = (__pyx_v_free_slot == NULL);
+ if (__pyx_3) {
+ __pyx_v_free_slot = __pyx_v_slot;
+ goto __pyx_L8;
+ }
+ __pyx_L8:;
+ goto __pyx_L7;
+ }
+ __pyx_3 = __pyx_f_6bzrlib_15_simple_set_pyx__is_equal(__pyx_v_py_key,__pyx_v_key_hash,__pyx_v_cur); if (__pyx_3 == (-1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 488; goto __pyx_L1;}
+ if (__pyx_3) {
+ __pyx_r = __pyx_v_slot;
+ goto __pyx_L0;
+ goto __pyx_L7;
+ }
+ __pyx_L7:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_simple_set_pyx.pyx":491 */
+ __pyx_v_i = ((__pyx_v_i + 1) + __pyx_v_n_lookup);
+ }
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_simple_set_pyx.pyx":492 */
+ __pyx_4 = PyTuple_New(1); if (!__pyx_4) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 492; goto __pyx_L1;}
+ Py_INCREF(__pyx_k8p);
+ PyTuple_SET_ITEM(__pyx_4, 0, __pyx_k8p);
+ __pyx_5 = PyObject_CallObject(PyExc_AssertionError, __pyx_4); if (!__pyx_5) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 492; goto __pyx_L1;}
+ Py_DECREF(__pyx_4); __pyx_4 = 0;
+ __Pyx_Raise(__pyx_5, 0, 0);
+ Py_DECREF(__pyx_5); __pyx_5 = 0;
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 492; goto __pyx_L1;}
+
+ __pyx_r = 0;
+ goto __pyx_L0;
+ __pyx_L1:;
+ Py_XDECREF(__pyx_4);
+ Py_XDECREF(__pyx_5);
+ __Pyx_AddTraceback("bzrlib._simple_set_pyx._lookup");
+ __pyx_r = NULL;
+ __pyx_L0:;
+ Py_DECREF(__pyx_v_self);
+ Py_DECREF(__pyx_v_key);
+ return __pyx_r;
+}
+
+static PyObject **_SimpleSet_Lookup(PyObject *__pyx_v_self,PyObject *__pyx_v_key) {
+ PyObject **__pyx_r;
+ PyObject *__pyx_1 = 0;
+ PyObject **__pyx_2;
+ Py_INCREF(__pyx_v_self);
+ Py_INCREF(__pyx_v_key);
+ __pyx_1 = ((PyObject *)__pyx_f_6bzrlib_15_simple_set_pyx__check_self(__pyx_v_self)); if (!__pyx_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 507; goto __pyx_L1;}
+ __pyx_2 = __pyx_f_6bzrlib_15_simple_set_pyx__lookup(((struct SimpleSetObject *)__pyx_1),__pyx_v_key); if (__pyx_2 == NULL) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 507; goto __pyx_L1;}
+ Py_DECREF(__pyx_1); __pyx_1 = 0;
+ __pyx_r = __pyx_2;
+ goto __pyx_L0;
+
+ __pyx_r = 0;
+ goto __pyx_L0;
+ __pyx_L1:;
+ Py_XDECREF(__pyx_1);
+ __Pyx_AddTraceback("bzrlib._simple_set_pyx._SimpleSet_Lookup");
+ __pyx_r = NULL;
+ __pyx_L0:;
+ Py_DECREF(__pyx_v_self);
+ Py_DECREF(__pyx_v_key);
+ return __pyx_r;
+}
+
+static PyObject *SimpleSet_Add(PyObject *__pyx_v_self,PyObject *__pyx_v_key) {
+ PyObject *__pyx_r;
+ PyObject *__pyx_1 = 0;
+ PyObject *__pyx_2 = 0;
+ Py_INCREF(__pyx_v_self);
+ Py_INCREF(__pyx_v_key);
+ __pyx_1 = ((PyObject *)__pyx_f_6bzrlib_15_simple_set_pyx__check_self(__pyx_v_self)); if (!__pyx_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 520; goto __pyx_L1;}
+ __pyx_2 = ((struct __pyx_vtabstruct_6bzrlib_15_simple_set_pyx_SimpleSet *)((struct SimpleSetObject *)__pyx_1)->__pyx_vtab)->_add(((struct SimpleSetObject *)__pyx_1),__pyx_v_key); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 520; goto __pyx_L1;}
+ Py_DECREF(__pyx_1); __pyx_1 = 0;
+ __pyx_r = __pyx_2;
+ __pyx_2 = 0;
+ goto __pyx_L0;
+
+ __pyx_r = Py_None; Py_INCREF(Py_None);
+ goto __pyx_L0;
+ __pyx_L1:;
+ Py_XDECREF(__pyx_1);
+ Py_XDECREF(__pyx_2);
+ __Pyx_AddTraceback("bzrlib._simple_set_pyx.SimpleSet_Add");
+ __pyx_r = 0;
+ __pyx_L0:;
+ Py_DECREF(__pyx_v_self);
+ Py_DECREF(__pyx_v_key);
+ return __pyx_r;
+}
+
+static int SimpleSet_Contains(PyObject *__pyx_v_self,PyObject *__pyx_v_key) {
+ int __pyx_r;
+ PyObject *__pyx_1 = 0;
+ int __pyx_2;
+ Py_INCREF(__pyx_v_self);
+ Py_INCREF(__pyx_v_key);
+ __pyx_1 = ((PyObject *)__pyx_f_6bzrlib_15_simple_set_pyx__check_self(__pyx_v_self)); if (!__pyx_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 525; goto __pyx_L1;}
+ __pyx_2 = PySequence_Contains(__pyx_1, __pyx_v_key); if (__pyx_2 < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 525; goto __pyx_L1;}
+ Py_DECREF(__pyx_1); __pyx_1 = 0;
+ __pyx_r = __pyx_2;
+ goto __pyx_L0;
+
+ __pyx_r = 0;
+ goto __pyx_L0;
+ __pyx_L1:;
+ Py_XDECREF(__pyx_1);
+ __Pyx_AddTraceback("bzrlib._simple_set_pyx.SimpleSet_Contains");
+ __pyx_r = (-1);
+ __pyx_L0:;
+ Py_DECREF(__pyx_v_self);
+ Py_DECREF(__pyx_v_key);
+ return __pyx_r;
+}
+
+static int SimpleSet_Discard(PyObject *__pyx_v_self,PyObject *__pyx_v_key) {
+ int __pyx_r;
+ PyObject *__pyx_1 = 0;
+ int __pyx_2;
+ Py_INCREF(__pyx_v_self);
+ Py_INCREF(__pyx_v_key);
+ __pyx_1 = ((PyObject *)__pyx_f_6bzrlib_15_simple_set_pyx__check_self(__pyx_v_self)); if (!__pyx_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 536; goto __pyx_L1;}
+ __pyx_2 = ((struct __pyx_vtabstruct_6bzrlib_15_simple_set_pyx_SimpleSet *)((struct SimpleSetObject *)__pyx_1)->__pyx_vtab)->_discard(((struct SimpleSetObject *)__pyx_1),__pyx_v_key); if (__pyx_2 == (-1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 536; goto __pyx_L1;}
+ Py_DECREF(__pyx_1); __pyx_1 = 0;
+ __pyx_r = __pyx_2;
+ goto __pyx_L0;
+
+ __pyx_r = 0;
+ goto __pyx_L0;
+ __pyx_L1:;
+ Py_XDECREF(__pyx_1);
+ __Pyx_AddTraceback("bzrlib._simple_set_pyx.SimpleSet_Discard");
+ __pyx_r = (-1);
+ __pyx_L0:;
+ Py_DECREF(__pyx_v_self);
+ Py_DECREF(__pyx_v_key);
+ return __pyx_r;
+}
+
+static PyObject *SimpleSet_Get(struct SimpleSetObject *__pyx_v_self,PyObject *__pyx_v_key) {
+ PyObject *__pyx_r;
+ PyObject *__pyx_1 = 0;
+ PyObject *__pyx_2;
+ Py_INCREF(__pyx_v_self);
+ Py_INCREF(__pyx_v_key);
+ __pyx_1 = ((PyObject *)__pyx_f_6bzrlib_15_simple_set_pyx__check_self(((PyObject *)__pyx_v_self))); if (!__pyx_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 549; goto __pyx_L1;}
+ __pyx_2 = ((struct __pyx_vtabstruct_6bzrlib_15_simple_set_pyx_SimpleSet *)((struct SimpleSetObject *)__pyx_1)->__pyx_vtab)->_get(((struct SimpleSetObject *)__pyx_1),__pyx_v_key); if (__pyx_2 == NULL && PyErr_Occurred()) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 549; goto __pyx_L1;}
+ Py_DECREF(__pyx_1); __pyx_1 = 0;
+ __pyx_r = __pyx_2;
+ goto __pyx_L0;
+
+ __pyx_r = 0;
+ goto __pyx_L0;
+ __pyx_L1:;
+ Py_XDECREF(__pyx_1);
+ __Pyx_AddTraceback("bzrlib._simple_set_pyx.SimpleSet_Get");
+ __pyx_r = NULL;
+ __pyx_L0:;
+ Py_DECREF(__pyx_v_self);
+ Py_DECREF(__pyx_v_key);
+ return __pyx_r;
+}
+
+static Py_ssize_t SimpleSet_Size(PyObject *__pyx_v_self) {
+ Py_ssize_t __pyx_r;
+ PyObject *__pyx_1 = 0;
+ Py_INCREF(__pyx_v_self);
+ __pyx_1 = ((PyObject *)__pyx_f_6bzrlib_15_simple_set_pyx__check_self(__pyx_v_self)); if (!__pyx_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 554; goto __pyx_L1;}
+ __pyx_r = ((struct SimpleSetObject *)__pyx_1)->_used;
+ Py_DECREF(__pyx_1); __pyx_1 = 0;
+ goto __pyx_L0;
+
+ __pyx_r = 0;
+ goto __pyx_L0;
+ __pyx_L1:;
+ Py_XDECREF(__pyx_1);
+ __Pyx_AddTraceback("bzrlib._simple_set_pyx.SimpleSet_Size");
+ __pyx_r = (-1);
+ __pyx_L0:;
+ Py_DECREF(__pyx_v_self);
+ return __pyx_r;
+}
+
+static int SimpleSet_Next(PyObject *__pyx_v_self,Py_ssize_t *__pyx_v_pos,PyObject **__pyx_v_key) {
+ Py_ssize_t __pyx_v_i;
+ Py_ssize_t __pyx_v_mask;
+ struct SimpleSetObject *__pyx_v_true_self;
+ PyObject **__pyx_v_table;
+ int __pyx_r;
+ PyObject *__pyx_1 = 0;
+ int __pyx_2;
+ Py_INCREF(__pyx_v_self);
+ __pyx_v_true_self = ((struct SimpleSetObject *)Py_None); Py_INCREF(Py_None);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_simple_set_pyx.pyx":569 */
+ __pyx_1 = ((PyObject *)__pyx_f_6bzrlib_15_simple_set_pyx__check_self(__pyx_v_self)); if (!__pyx_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 569; goto __pyx_L1;}
+ Py_DECREF(((PyObject *)__pyx_v_true_self));
+ __pyx_v_true_self = ((struct SimpleSetObject *)__pyx_1);
+ __pyx_1 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_simple_set_pyx.pyx":570 */
+ __pyx_v_i = (__pyx_v_pos[0]);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_simple_set_pyx.pyx":571 */
+ __pyx_2 = (__pyx_v_i < 0);
+ if (__pyx_2) {
+ __pyx_r = 0;
+ goto __pyx_L0;
+ goto __pyx_L2;
+ }
+ __pyx_L2:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_simple_set_pyx.pyx":573 */
+ __pyx_v_mask = __pyx_v_true_self->_mask;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_simple_set_pyx.pyx":574 */
+ __pyx_v_table = __pyx_v_true_self->_table;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_simple_set_pyx.pyx":575 */
+ while (1) {
+ __pyx_2 = (__pyx_v_i <= __pyx_v_mask);
+ if (__pyx_2) {
+ __pyx_2 = ((__pyx_v_table[__pyx_v_i]) == NULL);
+ if (!__pyx_2) {
+ __pyx_2 = ((__pyx_v_table[__pyx_v_i]) == __pyx_v_6bzrlib_15_simple_set_pyx__dummy);
+ }
+ }
+ if (!__pyx_2) break;
+ __pyx_v_i = (__pyx_v_i + 1);
+ }
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_simple_set_pyx.pyx":577 */
+ (__pyx_v_pos[0]) = (__pyx_v_i + 1);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_simple_set_pyx.pyx":578 */
+ __pyx_2 = (__pyx_v_i > __pyx_v_mask);
+ if (__pyx_2) {
+ __pyx_r = 0;
+ goto __pyx_L0;
+ goto __pyx_L5;
+ }
+ __pyx_L5:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_simple_set_pyx.pyx":580 */
+ __pyx_2 = (__pyx_v_key != NULL);
+ if (__pyx_2) {
+ (__pyx_v_key[0]) = (__pyx_v_table[__pyx_v_i]);
+ goto __pyx_L6;
+ }
+ __pyx_L6:;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_simple_set_pyx.pyx":582 */
+ __pyx_r = 1;
+ goto __pyx_L0;
+
+ __pyx_r = 0;
+ goto __pyx_L0;
+ __pyx_L1:;
+ Py_XDECREF(__pyx_1);
+ __Pyx_AddTraceback("bzrlib._simple_set_pyx.SimpleSet_Next");
+ __pyx_r = (-1);
+ __pyx_L0:;
+ Py_DECREF(__pyx_v_true_self);
+ Py_DECREF(__pyx_v_self);
+ return __pyx_r;
+}
+
+static int __pyx_f_6bzrlib_15_simple_set_pyx_SimpleSet_traverse(struct SimpleSetObject *__pyx_v_self,visitproc __pyx_v_visit,void *__pyx_v_arg) {
+ Py_ssize_t __pyx_v_pos;
+ PyObject *__pyx_v_next_key;
+ int __pyx_v_ret;
+ int __pyx_r;
+ int __pyx_1;
+ Py_INCREF(__pyx_v_self);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_simple_set_pyx.pyx":597 */
+ __pyx_v_pos = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_simple_set_pyx.pyx":598 */
+ while (1) {
+ __pyx_1 = SimpleSet_Next(((PyObject *)__pyx_v_self),(&__pyx_v_pos),(&__pyx_v_next_key)); if (__pyx_1 == (-1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 598; goto __pyx_L1;}
+ if (!__pyx_1) break;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_simple_set_pyx.pyx":599 */
+ __pyx_v_ret = __pyx_v_visit(__pyx_v_next_key,__pyx_v_arg);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_simple_set_pyx.pyx":600 */
+ __pyx_1 = __pyx_v_ret;
+ if (__pyx_1) {
+ __pyx_r = __pyx_v_ret;
+ goto __pyx_L0;
+ goto __pyx_L4;
+ }
+ __pyx_L4:;
+ }
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_simple_set_pyx.pyx":602 */
+ __pyx_r = 0;
+ goto __pyx_L0;
+
+ __pyx_r = 0;
+ goto __pyx_L0;
+ __pyx_L1:;
+ __Pyx_AddTraceback("bzrlib._simple_set_pyx.SimpleSet_traverse");
+ __pyx_r = (-1);
+ __pyx_L0:;
+ Py_DECREF(__pyx_v_self);
+ return __pyx_r;
+}
+static struct __pyx_vtabstruct_6bzrlib_15_simple_set_pyx_SimpleSet __pyx_vtable_6bzrlib_15_simple_set_pyx_SimpleSet;
+
+static PyObject *__pyx_tp_new_6bzrlib_15_simple_set_pyx_SimpleSet(PyTypeObject *t, PyObject *a, PyObject *k) {
+ struct SimpleSetObject *p;
+ PyObject *o = (*t->tp_alloc)(t, 0);
+ if (!o) return 0;
+ p = ((struct SimpleSetObject *)o);
+ *(struct __pyx_vtabstruct_6bzrlib_15_simple_set_pyx_SimpleSet **)&p->__pyx_vtab = __pyx_vtabptr_6bzrlib_15_simple_set_pyx_SimpleSet;
+ return o;
+}
+
+static void __pyx_tp_dealloc_6bzrlib_15_simple_set_pyx_SimpleSet(PyObject *o) {
+ {
+ PyObject *etype, *eval, *etb;
+ PyErr_Fetch(&etype, &eval, &etb);
+ ++o->ob_refcnt;
+ __pyx_f_6bzrlib_15_simple_set_pyx_9SimpleSet___dealloc__(o);
+ if (PyErr_Occurred()) PyErr_WriteUnraisable(o);
+ --o->ob_refcnt;
+ PyErr_Restore(etype, eval, etb);
+ }
+ (*o->ob_type->tp_free)(o);
+}
+static PyObject *__pyx_sq_item_6bzrlib_15_simple_set_pyx_SimpleSet(PyObject *o, Py_ssize_t i) {
+ PyObject *r;
+ PyObject *x = PyInt_FromSsize_t(i); if(!x) return 0;
+ r = o->ob_type->tp_as_mapping->mp_subscript(o, x);
+ Py_DECREF(x);
+ return r;
+}
+
+static PyObject *__pyx_getprop_6bzrlib_15_simple_set_pyx_9SimpleSet_used(PyObject *o, void *x) {
+ return __pyx_f_6bzrlib_15_simple_set_pyx_9SimpleSet_4used___get__(o);
+}
+
+static PyObject *__pyx_getprop_6bzrlib_15_simple_set_pyx_9SimpleSet_fill(PyObject *o, void *x) {
+ return __pyx_f_6bzrlib_15_simple_set_pyx_9SimpleSet_4fill___get__(o);
+}
+
+static PyObject *__pyx_getprop_6bzrlib_15_simple_set_pyx_9SimpleSet_mask(PyObject *o, void *x) {
+ return __pyx_f_6bzrlib_15_simple_set_pyx_9SimpleSet_4mask___get__(o);
+}
+
+static struct PyMethodDef __pyx_methods_6bzrlib_15_simple_set_pyx_SimpleSet[] = {
+ {"__sizeof__", (PyCFunction)__pyx_f_6bzrlib_15_simple_set_pyx_9SimpleSet___sizeof__, METH_VARARGS|METH_KEYWORDS, 0},
+ {"_memory_size", (PyCFunction)__pyx_f_6bzrlib_15_simple_set_pyx_9SimpleSet__memory_size, METH_VARARGS|METH_KEYWORDS, __pyx_doc_6bzrlib_15_simple_set_pyx_9SimpleSet__memory_size},
+ {"_test_lookup", (PyCFunction)__pyx_f_6bzrlib_15_simple_set_pyx_9SimpleSet__test_lookup, METH_VARARGS|METH_KEYWORDS, 0},
+ {"_py_resize", (PyCFunction)__pyx_f_6bzrlib_15_simple_set_pyx_9SimpleSet__py_resize, METH_VARARGS|METH_KEYWORDS, __pyx_doc_6bzrlib_15_simple_set_pyx_9SimpleSet__py_resize},
+ {"add", (PyCFunction)__pyx_f_6bzrlib_15_simple_set_pyx_9SimpleSet_add, METH_VARARGS|METH_KEYWORDS, __pyx_doc_6bzrlib_15_simple_set_pyx_9SimpleSet_add},
+ {"discard", (PyCFunction)__pyx_f_6bzrlib_15_simple_set_pyx_9SimpleSet_discard, METH_VARARGS|METH_KEYWORDS, __pyx_doc_6bzrlib_15_simple_set_pyx_9SimpleSet_discard},
+ {0, 0, 0, 0}
+};
+
+static struct PyGetSetDef __pyx_getsets_6bzrlib_15_simple_set_pyx_SimpleSet[] = {
+ {"used", __pyx_getprop_6bzrlib_15_simple_set_pyx_9SimpleSet_used, 0, 0, 0},
+ {"fill", __pyx_getprop_6bzrlib_15_simple_set_pyx_9SimpleSet_fill, 0, 0, 0},
+ {"mask", __pyx_getprop_6bzrlib_15_simple_set_pyx_9SimpleSet_mask, 0, 0, 0},
+ {0, 0, 0, 0, 0}
+};
+
+static PyNumberMethods __pyx_tp_as_number_SimpleSet = {
+ 0, /*nb_add*/
+ 0, /*nb_subtract*/
+ 0, /*nb_multiply*/
+ 0, /*nb_divide*/
+ 0, /*nb_remainder*/
+ 0, /*nb_divmod*/
+ 0, /*nb_power*/
+ 0, /*nb_negative*/
+ 0, /*nb_positive*/
+ 0, /*nb_absolute*/
+ 0, /*nb_nonzero*/
+ 0, /*nb_invert*/
+ 0, /*nb_lshift*/
+ 0, /*nb_rshift*/
+ 0, /*nb_and*/
+ 0, /*nb_xor*/
+ 0, /*nb_or*/
+ 0, /*nb_coerce*/
+ 0, /*nb_int*/
+ 0, /*nb_long*/
+ 0, /*nb_float*/
+ 0, /*nb_oct*/
+ 0, /*nb_hex*/
+ 0, /*nb_inplace_add*/
+ 0, /*nb_inplace_subtract*/
+ 0, /*nb_inplace_multiply*/
+ 0, /*nb_inplace_divide*/
+ 0, /*nb_inplace_remainder*/
+ 0, /*nb_inplace_power*/
+ 0, /*nb_inplace_lshift*/
+ 0, /*nb_inplace_rshift*/
+ 0, /*nb_inplace_and*/
+ 0, /*nb_inplace_xor*/
+ 0, /*nb_inplace_or*/
+ 0, /*nb_floor_divide*/
+ 0, /*nb_true_divide*/
+ 0, /*nb_inplace_floor_divide*/
+ 0, /*nb_inplace_true_divide*/
+ #if Py_TPFLAGS_DEFAULT & Py_TPFLAGS_HAVE_INDEX
+ 0, /*nb_index*/
+ #endif
+};
+
+static PySequenceMethods __pyx_tp_as_sequence_SimpleSet = {
+ __pyx_f_6bzrlib_15_simple_set_pyx_9SimpleSet___len__, /*sq_length*/
+ 0, /*sq_concat*/
+ 0, /*sq_repeat*/
+ __pyx_sq_item_6bzrlib_15_simple_set_pyx_SimpleSet, /*sq_item*/
+ 0, /*sq_slice*/
+ 0, /*sq_ass_item*/
+ 0, /*sq_ass_slice*/
+ __pyx_f_6bzrlib_15_simple_set_pyx_9SimpleSet___contains__, /*sq_contains*/
+ 0, /*sq_inplace_concat*/
+ 0, /*sq_inplace_repeat*/
+};
+
+static PyMappingMethods __pyx_tp_as_mapping_SimpleSet = {
+ __pyx_f_6bzrlib_15_simple_set_pyx_9SimpleSet___len__, /*mp_length*/
+ __pyx_f_6bzrlib_15_simple_set_pyx_9SimpleSet___getitem__, /*mp_subscript*/
+ 0, /*mp_ass_subscript*/
+};
+
+static PyBufferProcs __pyx_tp_as_buffer_SimpleSet = {
+ 0, /*bf_getreadbuffer*/
+ 0, /*bf_getwritebuffer*/
+ 0, /*bf_getsegcount*/
+ 0, /*bf_getcharbuffer*/
+};
+
+DL_EXPORT(PyTypeObject) SimpleSet_Type = {
+ PyObject_HEAD_INIT(0)
+ 0, /*ob_size*/
+ "bzrlib._simple_set_pyx.SimpleSet", /*tp_name*/
+ sizeof(struct SimpleSetObject), /*tp_basicsize*/
+ 0, /*tp_itemsize*/
+ __pyx_tp_dealloc_6bzrlib_15_simple_set_pyx_SimpleSet, /*tp_dealloc*/
+ 0, /*tp_print*/
+ 0, /*tp_getattr*/
+ 0, /*tp_setattr*/
+ 0, /*tp_compare*/
+ 0, /*tp_repr*/
+ &__pyx_tp_as_number_SimpleSet, /*tp_as_number*/
+ &__pyx_tp_as_sequence_SimpleSet, /*tp_as_sequence*/
+ &__pyx_tp_as_mapping_SimpleSet, /*tp_as_mapping*/
+ 0, /*tp_hash*/
+ 0, /*tp_call*/
+ 0, /*tp_str*/
+ 0, /*tp_getattro*/
+ 0, /*tp_setattro*/
+ &__pyx_tp_as_buffer_SimpleSet, /*tp_as_buffer*/
+ Py_TPFLAGS_DEFAULT|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_BASETYPE, /*tp_flags*/
+ "This class can be used to track canonical forms for objects.\n\n It is similar in function to the interned dictionary that is used by\n strings. However:\n\n 1) It assumes that hash(obj) is cheap, so does not need to inline a copy\n of it\n 2) It only stores one reference to the object, rather than 2 (key vs\n key:value)\n\n As such, it uses 1/3rd the amount of memory to store a pointer to the\n interned object.\n ", /*tp_doc*/
+ 0, /*tp_traverse*/
+ 0, /*tp_clear*/
+ 0, /*tp_richcompare*/
+ 0, /*tp_weaklistoffset*/
+ __pyx_f_6bzrlib_15_simple_set_pyx_9SimpleSet___iter__, /*tp_iter*/
+ 0, /*tp_iternext*/
+ __pyx_methods_6bzrlib_15_simple_set_pyx_SimpleSet, /*tp_methods*/
+ 0, /*tp_members*/
+ __pyx_getsets_6bzrlib_15_simple_set_pyx_SimpleSet, /*tp_getset*/
+ 0, /*tp_base*/
+ 0, /*tp_dict*/
+ 0, /*tp_descr_get*/
+ 0, /*tp_descr_set*/
+ 0, /*tp_dictoffset*/
+ __pyx_f_6bzrlib_15_simple_set_pyx_9SimpleSet___init__, /*tp_init*/
+ 0, /*tp_alloc*/
+ __pyx_tp_new_6bzrlib_15_simple_set_pyx_SimpleSet, /*tp_new*/
+ 0, /*tp_free*/
+ 0, /*tp_is_gc*/
+ 0, /*tp_bases*/
+ 0, /*tp_mro*/
+ 0, /*tp_cache*/
+ 0, /*tp_subclasses*/
+ 0, /*tp_weaklist*/
+};
+
+static PyObject *__pyx_tp_new_6bzrlib_15_simple_set_pyx__SimpleSet_iterator(PyTypeObject *t, PyObject *a, PyObject *k) {
+ struct __pyx_obj_6bzrlib_15_simple_set_pyx__SimpleSet_iterator *p;
+ PyObject *o = (*t->tp_alloc)(t, 0);
+ if (!o) return 0;
+ p = ((struct __pyx_obj_6bzrlib_15_simple_set_pyx__SimpleSet_iterator *)o);
+ p->set = ((struct SimpleSetObject *)Py_None); Py_INCREF(Py_None);
+ return o;
+}
+
+static void __pyx_tp_dealloc_6bzrlib_15_simple_set_pyx__SimpleSet_iterator(PyObject *o) {
+ struct __pyx_obj_6bzrlib_15_simple_set_pyx__SimpleSet_iterator *p = (struct __pyx_obj_6bzrlib_15_simple_set_pyx__SimpleSet_iterator *)o;
+ Py_XDECREF(((PyObject *)p->set));
+ (*o->ob_type->tp_free)(o);
+}
+
+static int __pyx_tp_traverse_6bzrlib_15_simple_set_pyx__SimpleSet_iterator(PyObject *o, visitproc v, void *a) {
+ int e;
+ struct __pyx_obj_6bzrlib_15_simple_set_pyx__SimpleSet_iterator *p = (struct __pyx_obj_6bzrlib_15_simple_set_pyx__SimpleSet_iterator *)o;
+ if (p->set) {
+ e = (*v)(((PyObject*)p->set), a); if (e) return e;
+ }
+ return 0;
+}
+
+static int __pyx_tp_clear_6bzrlib_15_simple_set_pyx__SimpleSet_iterator(PyObject *o) {
+ struct __pyx_obj_6bzrlib_15_simple_set_pyx__SimpleSet_iterator *p = (struct __pyx_obj_6bzrlib_15_simple_set_pyx__SimpleSet_iterator *)o;
+ PyObject *t;
+ t = ((PyObject *)p->set);
+ p->set = ((struct SimpleSetObject *)Py_None); Py_INCREF(Py_None);
+ Py_XDECREF(t);
+ return 0;
+}
+
+static struct PyMethodDef __pyx_methods_6bzrlib_15_simple_set_pyx__SimpleSet_iterator[] = {
+ {"__length_hint__", (PyCFunction)__pyx_f_6bzrlib_15_simple_set_pyx_19_SimpleSet_iterator___length_hint__, METH_VARARGS|METH_KEYWORDS, 0},
+ {0, 0, 0, 0}
+};
+
+static PyNumberMethods __pyx_tp_as_number__SimpleSet_iterator = {
+ 0, /*nb_add*/
+ 0, /*nb_subtract*/
+ 0, /*nb_multiply*/
+ 0, /*nb_divide*/
+ 0, /*nb_remainder*/
+ 0, /*nb_divmod*/
+ 0, /*nb_power*/
+ 0, /*nb_negative*/
+ 0, /*nb_positive*/
+ 0, /*nb_absolute*/
+ 0, /*nb_nonzero*/
+ 0, /*nb_invert*/
+ 0, /*nb_lshift*/
+ 0, /*nb_rshift*/
+ 0, /*nb_and*/
+ 0, /*nb_xor*/
+ 0, /*nb_or*/
+ 0, /*nb_coerce*/
+ 0, /*nb_int*/
+ 0, /*nb_long*/
+ 0, /*nb_float*/
+ 0, /*nb_oct*/
+ 0, /*nb_hex*/
+ 0, /*nb_inplace_add*/
+ 0, /*nb_inplace_subtract*/
+ 0, /*nb_inplace_multiply*/
+ 0, /*nb_inplace_divide*/
+ 0, /*nb_inplace_remainder*/
+ 0, /*nb_inplace_power*/
+ 0, /*nb_inplace_lshift*/
+ 0, /*nb_inplace_rshift*/
+ 0, /*nb_inplace_and*/
+ 0, /*nb_inplace_xor*/
+ 0, /*nb_inplace_or*/
+ 0, /*nb_floor_divide*/
+ 0, /*nb_true_divide*/
+ 0, /*nb_inplace_floor_divide*/
+ 0, /*nb_inplace_true_divide*/
+ #if Py_TPFLAGS_DEFAULT & Py_TPFLAGS_HAVE_INDEX
+ 0, /*nb_index*/
+ #endif
+};
+
+static PySequenceMethods __pyx_tp_as_sequence__SimpleSet_iterator = {
+ 0, /*sq_length*/
+ 0, /*sq_concat*/
+ 0, /*sq_repeat*/
+ 0, /*sq_item*/
+ 0, /*sq_slice*/
+ 0, /*sq_ass_item*/
+ 0, /*sq_ass_slice*/
+ 0, /*sq_contains*/
+ 0, /*sq_inplace_concat*/
+ 0, /*sq_inplace_repeat*/
+};
+
+static PyMappingMethods __pyx_tp_as_mapping__SimpleSet_iterator = {
+ 0, /*mp_length*/
+ 0, /*mp_subscript*/
+ 0, /*mp_ass_subscript*/
+};
+
+static PyBufferProcs __pyx_tp_as_buffer__SimpleSet_iterator = {
+ 0, /*bf_getreadbuffer*/
+ 0, /*bf_getwritebuffer*/
+ 0, /*bf_getsegcount*/
+ 0, /*bf_getcharbuffer*/
+};
+
+PyTypeObject __pyx_type_6bzrlib_15_simple_set_pyx__SimpleSet_iterator = {
+ PyObject_HEAD_INIT(0)
+ 0, /*ob_size*/
+ "bzrlib._simple_set_pyx._SimpleSet_iterator", /*tp_name*/
+ sizeof(struct __pyx_obj_6bzrlib_15_simple_set_pyx__SimpleSet_iterator), /*tp_basicsize*/
+ 0, /*tp_itemsize*/
+ __pyx_tp_dealloc_6bzrlib_15_simple_set_pyx__SimpleSet_iterator, /*tp_dealloc*/
+ 0, /*tp_print*/
+ 0, /*tp_getattr*/
+ 0, /*tp_setattr*/
+ 0, /*tp_compare*/
+ 0, /*tp_repr*/
+ &__pyx_tp_as_number__SimpleSet_iterator, /*tp_as_number*/
+ &__pyx_tp_as_sequence__SimpleSet_iterator, /*tp_as_sequence*/
+ &__pyx_tp_as_mapping__SimpleSet_iterator, /*tp_as_mapping*/
+ 0, /*tp_hash*/
+ 0, /*tp_call*/
+ 0, /*tp_str*/
+ 0, /*tp_getattro*/
+ 0, /*tp_setattro*/
+ &__pyx_tp_as_buffer__SimpleSet_iterator, /*tp_as_buffer*/
+ Py_TPFLAGS_DEFAULT|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC, /*tp_flags*/
+ "Iterator over the SimpleSet structure.", /*tp_doc*/
+ __pyx_tp_traverse_6bzrlib_15_simple_set_pyx__SimpleSet_iterator, /*tp_traverse*/
+ __pyx_tp_clear_6bzrlib_15_simple_set_pyx__SimpleSet_iterator, /*tp_clear*/
+ 0, /*tp_richcompare*/
+ 0, /*tp_weaklistoffset*/
+ __pyx_f_6bzrlib_15_simple_set_pyx_19_SimpleSet_iterator___iter__, /*tp_iter*/
+ __pyx_f_6bzrlib_15_simple_set_pyx_19_SimpleSet_iterator___next__, /*tp_iternext*/
+ __pyx_methods_6bzrlib_15_simple_set_pyx__SimpleSet_iterator, /*tp_methods*/
+ 0, /*tp_members*/
+ 0, /*tp_getset*/
+ 0, /*tp_base*/
+ 0, /*tp_dict*/
+ 0, /*tp_descr_get*/
+ 0, /*tp_descr_set*/
+ 0, /*tp_dictoffset*/
+ __pyx_f_6bzrlib_15_simple_set_pyx_19_SimpleSet_iterator___init__, /*tp_init*/
+ 0, /*tp_alloc*/
+ __pyx_tp_new_6bzrlib_15_simple_set_pyx__SimpleSet_iterator, /*tp_new*/
+ 0, /*tp_free*/
+ 0, /*tp_is_gc*/
+ 0, /*tp_bases*/
+ 0, /*tp_mro*/
+ 0, /*tp_cache*/
+ 0, /*tp_subclasses*/
+ 0, /*tp_weaklist*/
+};
+
+static struct PyMethodDef __pyx_methods[] = {
+ {0, 0, 0, 0}
+};
+
+static void __pyx_init_filenames(void); /*proto*/
+
+PyMODINIT_FUNC init_simple_set_pyx(void); /*proto*/
+PyMODINIT_FUNC init_simple_set_pyx(void) {
+ PyObject *__pyx_1 = 0;
+ PyObject *__pyx_2 = 0;
+ __pyx_init_filenames();
+ __pyx_m = Py_InitModule4("_simple_set_pyx", __pyx_methods, __pyx_mdoc, 0, PYTHON_API_VERSION);
+ if (!__pyx_m) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 17; goto __pyx_L1;};
+ Py_INCREF(__pyx_m);
+ __pyx_b = PyImport_AddModule("__builtin__");
+ if (!__pyx_b) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 17; goto __pyx_L1;};
+ if (PyObject_SetAttrString(__pyx_m, "__builtins__", __pyx_b) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 17; goto __pyx_L1;};
+ if (__Pyx_InitStrings(__pyx_string_tab) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 17; goto __pyx_L1;};
+ __pyx_v_6bzrlib_15_simple_set_pyx__dummy_obj = Py_None; Py_INCREF(Py_None);
+ __pyx_v_6bzrlib_15_simple_set_pyx__NotImplemented = Py_None; Py_INCREF(Py_None);
+ if (__Pyx_ExportFunction("SimpleSet_New", (void*)SimpleSet_New, "struct SimpleSetObject *(void)") < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 17; goto __pyx_L1;}
+ if (__Pyx_ExportFunction("SimpleSet_Add", (void*)SimpleSet_Add, "PyObject *(PyObject *,PyObject *)") < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 17; goto __pyx_L1;}
+ if (__Pyx_ExportFunction("SimpleSet_Contains", (void*)SimpleSet_Contains, "int (PyObject *,PyObject *)") < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 17; goto __pyx_L1;}
+ if (__Pyx_ExportFunction("SimpleSet_Discard", (void*)SimpleSet_Discard, "int (PyObject *,PyObject *)") < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 17; goto __pyx_L1;}
+ if (__Pyx_ExportFunction("SimpleSet_Get", (void*)SimpleSet_Get, "PyObject *(struct SimpleSetObject *,PyObject *)") < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 17; goto __pyx_L1;}
+ if (__Pyx_ExportFunction("SimpleSet_Size", (void*)SimpleSet_Size, "Py_ssize_t (PyObject *)") < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 17; goto __pyx_L1;}
+ if (__Pyx_ExportFunction("SimpleSet_Next", (void*)SimpleSet_Next, "int (PyObject *,Py_ssize_t *,PyObject **)") < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 17; goto __pyx_L1;}
+ if (__Pyx_ExportFunction("_SimpleSet_Lookup", (void*)_SimpleSet_Lookup, "PyObject **(PyObject *,PyObject *)") < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 17; goto __pyx_L1;}
+ __pyx_vtabptr_6bzrlib_15_simple_set_pyx_SimpleSet = &__pyx_vtable_6bzrlib_15_simple_set_pyx_SimpleSet;
+ *(void(**)(void))&__pyx_vtable_6bzrlib_15_simple_set_pyx_SimpleSet._get = (void(*)(void))__pyx_f_6bzrlib_15_simple_set_pyx_9SimpleSet__get;
+ *(void(**)(void))&__pyx_vtable_6bzrlib_15_simple_set_pyx_SimpleSet._add = (void(*)(void))__pyx_f_6bzrlib_15_simple_set_pyx_9SimpleSet__add;
+ *(void(**)(void))&__pyx_vtable_6bzrlib_15_simple_set_pyx_SimpleSet._discard = (void(*)(void))__pyx_f_6bzrlib_15_simple_set_pyx_9SimpleSet__discard;
+ *(void(**)(void))&__pyx_vtable_6bzrlib_15_simple_set_pyx_SimpleSet._insert_clean = (void(*)(void))__pyx_f_6bzrlib_15_simple_set_pyx_9SimpleSet__insert_clean;
+ *(void(**)(void))&__pyx_vtable_6bzrlib_15_simple_set_pyx_SimpleSet._resize = (void(*)(void))__pyx_f_6bzrlib_15_simple_set_pyx_9SimpleSet__resize;
+ if (PyType_Ready(&SimpleSet_Type) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 88; goto __pyx_L1;}
+ if (__Pyx_SetVtable(SimpleSet_Type.tp_dict, __pyx_vtabptr_6bzrlib_15_simple_set_pyx_SimpleSet) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 88; goto __pyx_L1;}
+ if (PyObject_SetAttrString(__pyx_m, "SimpleSet", (PyObject *)&SimpleSet_Type) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 88; goto __pyx_L1;}
+ __pyx_ptype_6bzrlib_15_simple_set_pyx_SimpleSet = &SimpleSet_Type;
+ __pyx_type_6bzrlib_15_simple_set_pyx__SimpleSet_iterator.tp_free = _PyObject_GC_Del;
+ if (PyType_Ready(&__pyx_type_6bzrlib_15_simple_set_pyx__SimpleSet_iterator) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 361; goto __pyx_L1;}
+ if (PyObject_SetAttrString(__pyx_m, "_SimpleSet_iterator", (PyObject *)&__pyx_type_6bzrlib_15_simple_set_pyx__SimpleSet_iterator) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 361; goto __pyx_L1;}
+ __pyx_ptype_6bzrlib_15_simple_set_pyx__SimpleSet_iterator = &__pyx_type_6bzrlib_15_simple_set_pyx__SimpleSet_iterator;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_simple_set_pyx.pyx":54 */
+ __pyx_1 = __Pyx_GetName(__pyx_b, __pyx_n_object); if (!__pyx_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 54; goto __pyx_L1;}
+ __pyx_2 = PyObject_CallObject(__pyx_1, 0); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 54; goto __pyx_L1;}
+ Py_DECREF(__pyx_1); __pyx_1 = 0;
+ Py_DECREF(__pyx_v_6bzrlib_15_simple_set_pyx__dummy_obj);
+ __pyx_v_6bzrlib_15_simple_set_pyx__dummy_obj = __pyx_2;
+ __pyx_2 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_simple_set_pyx.pyx":55 */
+ __pyx_v_6bzrlib_15_simple_set_pyx__dummy = ((PyObject *)__pyx_v_6bzrlib_15_simple_set_pyx__dummy_obj);
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_simple_set_pyx.pyx":59 */
+ __pyx_1 = __Pyx_GetName(__pyx_b, __pyx_n_NotImplemented); if (!__pyx_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 59; goto __pyx_L1;}
+ Py_DECREF(__pyx_v_6bzrlib_15_simple_set_pyx__NotImplemented);
+ __pyx_v_6bzrlib_15_simple_set_pyx__NotImplemented = __pyx_1;
+ __pyx_1 = 0;
+
+ /* "/home/vila/src/bzr/integration/trunk/bzrlib/_simple_set_pyx.pyx":606 */
+ ((PyTypeObject *)__pyx_ptype_6bzrlib_15_simple_set_pyx_SimpleSet)->tp_traverse = ((traverseproc)__pyx_f_6bzrlib_15_simple_set_pyx_SimpleSet_traverse);
+ return;
+ __pyx_L1:;
+ Py_XDECREF(__pyx_1);
+ Py_XDECREF(__pyx_2);
+ __Pyx_AddTraceback("bzrlib._simple_set_pyx");
+}
+
+static char *__pyx_filenames[] = {
+ "_simple_set_pyx.pyx",
+};
+
+/* Runtime support code */
+
+static void __pyx_init_filenames(void) {
+ __pyx_f = __pyx_filenames;
+}
+
+static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb) {
+ Py_XINCREF(type);
+ Py_XINCREF(value);
+ Py_XINCREF(tb);
+ /* First, check the traceback argument, replacing None with NULL. */
+ if (tb == Py_None) {
+ Py_DECREF(tb);
+ tb = 0;
+ }
+ else if (tb != NULL && !PyTraceBack_Check(tb)) {
+ PyErr_SetString(PyExc_TypeError,
+ "raise: arg 3 must be a traceback or None");
+ goto raise_error;
+ }
+ /* Next, replace a missing value with None */
+ if (value == NULL) {
+ value = Py_None;
+ Py_INCREF(value);
+ }
+ #if PY_VERSION_HEX < 0x02050000
+ if (!PyClass_Check(type))
+ #else
+ if (!PyType_Check(type))
+ #endif
+ {
+ /* Raising an instance. The value should be a dummy. */
+ if (value != Py_None) {
+ PyErr_SetString(PyExc_TypeError,
+ "instance exception may not have a separate value");
+ goto raise_error;
+ }
+ /* Normalize to raise <class>, <instance> */
+ Py_DECREF(value);
+ value = type;
+ #if PY_VERSION_HEX < 0x02050000
+ if (PyInstance_Check(type)) {
+ type = (PyObject*) ((PyInstanceObject*)type)->in_class;
+ Py_INCREF(type);
+ }
+ else {
+ PyErr_SetString(PyExc_TypeError,
+ "raise: exception must be an old-style class or instance");
+ goto raise_error;
+ }
+ #else
+ type = (PyObject*) type->ob_type;
+ Py_INCREF(type);
+ if (!PyType_IsSubtype((PyTypeObject *)type, (PyTypeObject *)PyExc_BaseException)) {
+ PyErr_SetString(PyExc_TypeError,
+ "raise: exception class must be a subclass of BaseException");
+ goto raise_error;
+ }
+ #endif
+ }
+ PyErr_Restore(type, value, tb);
+ return;
+raise_error:
+ Py_XDECREF(value);
+ Py_XDECREF(type);
+ Py_XDECREF(tb);
+ return;
+}
+
+static int __Pyx_TypeTest(PyObject *obj, PyTypeObject *type) {
+ if (!type) {
+ PyErr_Format(PyExc_SystemError, "Missing type object");
+ return 0;
+ }
+ if (obj == Py_None || PyObject_TypeCheck(obj, type))
+ return 1;
+ PyErr_Format(PyExc_TypeError, "Cannot convert %s to %s",
+ obj->ob_type->tp_name, type->tp_name);
+ return 0;
+}
+
+static int __Pyx_InitStrings(__Pyx_StringTabEntry *t) {
+ while (t->p) {
+ *t->p = PyString_FromStringAndSize(t->s, t->n - 1);
+ if (!*t->p)
+ return -1;
+ if (t->i)
+ PyString_InternInPlace(t->p);
+ ++t;
+ }
+ return 0;
+}
+
+static int __Pyx_ExportFunction(char *n, void *f, char *s) {
+ PyObject *d = 0;
+ PyObject *p = 0;
+ d = PyObject_GetAttrString(__pyx_m, "__pyx_capi__");
+ if (!d) {
+ PyErr_Clear();
+ d = PyDict_New();
+ if (!d)
+ goto bad;
+ Py_INCREF(d);
+ if (PyModule_AddObject(__pyx_m, "__pyx_capi__", d) < 0)
+ goto bad;
+ }
+ p = PyCObject_FromVoidPtrAndDesc(f, s, 0);
+ if (!p)
+ goto bad;
+ if (PyDict_SetItemString(d, n, p) < 0)
+ goto bad;
+ Py_DECREF(d);
+ return 0;
+bad:
+ Py_XDECREF(p);
+ Py_XDECREF(d);
+ return -1;
+}
+
+static int __Pyx_SetVtable(PyObject *dict, void *vtable) {
+ PyObject *pycobj = 0;
+ int result;
+
+ pycobj = PyCObject_FromVoidPtr(vtable, 0);
+ if (!pycobj)
+ goto bad;
+ if (PyDict_SetItemString(dict, "__pyx_vtable__", pycobj) < 0)
+ goto bad;
+ result = 0;
+ goto done;
+
+bad:
+ result = -1;
+done:
+ Py_XDECREF(pycobj);
+ return result;
+}
+
+static PyObject *__Pyx_GetName(PyObject *dict, PyObject *name) {
+ PyObject *result;
+ result = PyObject_GetAttr(dict, name);
+ if (!result)
+ PyErr_SetObject(PyExc_NameError, name);
+ return result;
+}
+
+#include "compile.h"
+#include "frameobject.h"
+#include "traceback.h"
+
+static void __Pyx_AddTraceback(char *funcname) {
+ PyObject *py_srcfile = 0;
+ PyObject *py_funcname = 0;
+ PyObject *py_globals = 0;
+ PyObject *empty_tuple = 0;
+ PyObject *empty_string = 0;
+ PyCodeObject *py_code = 0;
+ PyFrameObject *py_frame = 0;
+
+ py_srcfile = PyString_FromString(__pyx_filename);
+ if (!py_srcfile) goto bad;
+ py_funcname = PyString_FromString(funcname);
+ if (!py_funcname) goto bad;
+ py_globals = PyModule_GetDict(__pyx_m);
+ if (!py_globals) goto bad;
+ empty_tuple = PyTuple_New(0);
+ if (!empty_tuple) goto bad;
+ empty_string = PyString_FromString("");
+ if (!empty_string) goto bad;
+ py_code = PyCode_New(
+ 0, /*int argcount,*/
+ 0, /*int nlocals,*/
+ 0, /*int stacksize,*/
+ 0, /*int flags,*/
+ empty_string, /*PyObject *code,*/
+ empty_tuple, /*PyObject *consts,*/
+ empty_tuple, /*PyObject *names,*/
+ empty_tuple, /*PyObject *varnames,*/
+ empty_tuple, /*PyObject *freevars,*/
+ empty_tuple, /*PyObject *cellvars,*/
+ py_srcfile, /*PyObject *filename,*/
+ py_funcname, /*PyObject *name,*/
+ __pyx_lineno, /*int firstlineno,*/
+ empty_string /*PyObject *lnotab*/
+ );
+ if (!py_code) goto bad;
+ py_frame = PyFrame_New(
+ PyThreadState_Get(), /*PyThreadState *tstate,*/
+ py_code, /*PyCodeObject *code,*/
+ py_globals, /*PyObject *globals,*/
+ 0 /*PyObject *locals*/
+ );
+ if (!py_frame) goto bad;
+ py_frame->f_lineno = __pyx_lineno;
+ PyTraceBack_Here(py_frame);
+bad:
+ Py_XDECREF(py_srcfile);
+ Py_XDECREF(py_funcname);
+ Py_XDECREF(empty_tuple);
+ Py_XDECREF(empty_string);
+ Py_XDECREF(py_code);
+ Py_XDECREF(py_frame);
+}
diff --git a/bzrlib/_simple_set_pyx.h b/bzrlib/_simple_set_pyx.h
new file mode 100644
index 0000000..2f63225
--- /dev/null
+++ b/bzrlib/_simple_set_pyx.h
@@ -0,0 +1,26 @@
+#ifndef __PYX_HAVE__bzrlib___simple_set_pyx
+#define __PYX_HAVE__bzrlib___simple_set_pyx
+#ifdef __cplusplus
+#define __PYX_EXTERN_C extern "C"
+#else
+#define __PYX_EXTERN_C extern
+#endif
+
+struct SimpleSetObject {
+ PyObject_HEAD
+ struct __pyx_vtabstruct_6bzrlib_15_simple_set_pyx_SimpleSet *__pyx_vtab;
+ Py_ssize_t _used;
+ Py_ssize_t _fill;
+ Py_ssize_t _mask;
+ PyObject **_table;
+};
+
+#ifndef __PYX_HAVE_API__bzrlib___simple_set_pyx
+
+__PYX_EXTERN_C DL_IMPORT(PyTypeObject) SimpleSet_Type;
+
+#endif
+
+PyMODINIT_FUNC init_simple_set_pyx(void);
+
+#endif
diff --git a/bzrlib/_simple_set_pyx.pxd b/bzrlib/_simple_set_pyx.pxd
new file mode 100644
index 0000000..fc81894
--- /dev/null
+++ b/bzrlib/_simple_set_pyx.pxd
@@ -0,0 +1,91 @@
+# Copyright (C) 2009, 2010 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""Interface definition of a class like PySet but without caching the hash.
+
+This is generally useful when you want to 'intern' objects, etc. Note that this
+differs from Set in that we:
+ 1) Don't have all of the .intersection, .difference, etc functions
+ 2) Do return the object from the set via queries
+ eg. SimpleSet.add(key) => saved_key and SimpleSet[key] => saved_key
+"""
+
+cdef extern from "Python.h":
+ ctypedef struct PyObject:
+ pass
+
+
+cdef public api class SimpleSet [object SimpleSetObject, type SimpleSet_Type]:
+ """A class similar to PySet, but with simpler implementation.
+
+ The main advantage is that this class uses only 2N memory to store N
+ objects rather than 4N memory. The main trade-off is that we do not cache
+ the hash value of saved objects. As such, it is assumed that computing the
+ hash will be cheap (such as strings or tuples of strings, etc.)
+
+ This also differs in that you can get back the objects that are stored
+ (like a dict), but we also don't implement the complete list of 'set'
+ operations (difference, intersection, etc).
+ """
+ # Data structure definition:
+ # This is a basic hash table using open addressing.
+ # http://en.wikipedia.org/wiki/Open_addressing
+ # Basically that means we keep an array of pointers to Python objects
+ # (called a table). Each location in the array is called a 'slot'.
+ #
+ # An empty slot holds a NULL pointer, a slot where there was an item
+ # which was then deleted will hold a pointer to _dummy, and a filled slot
+ # points at the actual object which fills that slot.
+ #
+ # The table is always a power of two, and the default location where an
+ # object is inserted is at hash(object) & (table_size - 1)
+ #
+ # If there is a collision, then we search for another location. The
+ # specific algorithm is in _lookup. We search until we:
+ # find the object
+ # find an equivalent object (by tp_richcompare(obj1, obj2, Py_EQ))
+ # find a NULL slot
+ #
+ # When an object is deleted, we set its slot to _dummy. this way we don't
+ # have to track whether there was a collision, and find the corresponding
+ # keys. (The collision resolution algorithm makes that nearly impossible
+ # anyway, because it depends on the upper bits of the hash.)
+ # The main effect of this, is that if we find _dummy, then we can insert
+ # an object there, but we have to keep searching until we find NULL to
+ # know that the object is not present elsewhere.
+
+ cdef Py_ssize_t _used # active
+ cdef Py_ssize_t _fill # active + dummy
+ cdef Py_ssize_t _mask # Table contains (mask+1) slots, a power of 2
+ cdef PyObject **_table # Pyrex/Cython doesn't support arrays to 'object'
+ # so we manage it manually
+
+ cdef PyObject *_get(self, object key) except? NULL
+ cdef object _add(self, key)
+ cdef int _discard(self, key) except -1
+ cdef int _insert_clean(self, PyObject *key) except -1
+ cdef Py_ssize_t _resize(self, Py_ssize_t min_unused) except -1
+
+
+# TODO: might want to export the C api here, though it is all available from
+# the class object...
+cdef api SimpleSet SimpleSet_New()
+cdef api object SimpleSet_Add(object self, object key)
+cdef api int SimpleSet_Contains(object self, object key) except -1
+cdef api int SimpleSet_Discard(object self, object key) except -1
+cdef api PyObject *SimpleSet_Get(SimpleSet self, object key) except? NULL
+cdef api Py_ssize_t SimpleSet_Size(object self) except -1
+cdef api int SimpleSet_Next(object self, Py_ssize_t *pos, PyObject **key) except -1
diff --git a/bzrlib/_simple_set_pyx.pyx b/bzrlib/_simple_set_pyx.pyx
new file mode 100644
index 0000000..a9cee98
--- /dev/null
+++ b/bzrlib/_simple_set_pyx.pyx
@@ -0,0 +1,606 @@
+# Copyright (C) 2009, 2010 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""Definition of a class that is similar to Set with some small changes."""
+
+cdef extern from "python-compat.h":
+ pass
+
+cdef extern from "Python.h":
+ ctypedef unsigned long size_t
+ ctypedef long (*hashfunc)(PyObject*) except -1
+ ctypedef object (*richcmpfunc)(PyObject *, PyObject *, int)
+ ctypedef int (*visitproc)(PyObject *, void *)
+ ctypedef int (*traverseproc)(PyObject *, visitproc, void *)
+ int Py_EQ
+ void Py_INCREF(PyObject *)
+ void Py_DECREF(PyObject *)
+ ctypedef struct PyTypeObject:
+ hashfunc tp_hash
+ richcmpfunc tp_richcompare
+ traverseproc tp_traverse
+
+ PyTypeObject *Py_TYPE(PyObject *)
+ # Note: *Don't* use hash(), Pyrex 0.9.8.5 thinks it returns an 'int', and
+ # thus silently truncates to 32-bits on 64-bit machines.
+ long PyObject_Hash(PyObject *) except -1
+
+ void *PyMem_Malloc(size_t nbytes)
+ void PyMem_Free(void *)
+ void memset(void *, int, size_t)
+
+
+# Dummy is an object used to mark nodes that have been deleted. Since
+# collisions require us to move a node to an alternative location, if we just
+# set an entry to NULL on delete, we won't find any relocated nodes.
+# We have to use _dummy_obj because we need to keep a refcount to it, but we
+# also use _dummy as a pointer, because it avoids having to put <PyObject*> all
+# over the code base.
+cdef object _dummy_obj
+cdef PyObject *_dummy
+_dummy_obj = object()
+_dummy = <PyObject *>_dummy_obj
+
+
+cdef object _NotImplemented
+_NotImplemented = NotImplemented
+
+
+cdef int _is_equal(PyObject *this, long this_hash, PyObject *other) except -1:
+ cdef long other_hash
+
+ if this == other:
+ return 1
+ other_hash = PyObject_Hash(other)
+ if other_hash != this_hash:
+ return 0
+
+ # This implements a subset of the PyObject_RichCompareBool functionality.
+ # Namely it:
+ # 1) Doesn't try to do anything with old-style classes
+ # 2) Assumes that both objects have a tp_richcompare implementation, and
+ # that if that is not enough to compare equal, then they are not
+ # equal. (It doesn't try to cast them both to some intermediate form
+ # that would compare equal.)
+ res = Py_TYPE(this).tp_richcompare(this, other, Py_EQ)
+ if res is _NotImplemented:
+ res = Py_TYPE(other).tp_richcompare(other, this, Py_EQ)
+ if res is _NotImplemented:
+ return 0
+ if res:
+ return 1
+ return 0
+
+
+cdef public api class SimpleSet [object SimpleSetObject, type SimpleSet_Type]:
+ """This class can be used to track canonical forms for objects.
+
+ It is similar in function to the interned dictionary that is used by
+ strings. However:
+
+ 1) It assumes that hash(obj) is cheap, so does not need to inline a copy
+ of it
+ 2) It only stores one reference to the object, rather than 2 (key vs
+ key:value)
+
+ As such, it uses 1/3rd the amount of memory to store a pointer to the
+ interned object.
+ """
+ # Attributes are defined in the .pxd file
+ DEF DEFAULT_SIZE=1024
+
+ def __init__(self):
+ cdef Py_ssize_t size, n_bytes
+
+ size = DEFAULT_SIZE
+ self._mask = size - 1
+ self._used = 0
+ self._fill = 0
+ n_bytes = sizeof(PyObject*) * size;
+ self._table = <PyObject **>PyMem_Malloc(n_bytes)
+ if self._table == NULL:
+ raise MemoryError()
+ memset(self._table, 0, n_bytes)
+
+ def __sizeof__(self):
+ # Note: Pyrex doesn't allow sizeof(class) so we re-implement it here.
+ # Bits are:
+ # 1: PyObject
+ # 2: vtable *
+ # 3: 3 Py_ssize_t
+ # 4: PyObject**
+ # Note that we might get alignment, etc, wrong, but at least this is
+ # better than no estimate at all
+ # return sizeof(SimpleSet) + (self._mask + 1) * (sizeof(PyObject*))
+ return (sizeof(PyObject) + sizeof(void*)
+ + 3*sizeof(Py_ssize_t) + sizeof(PyObject**)
+ + (self._mask + 1) * sizeof(PyObject*))
+
+ def __dealloc__(self):
+ if self._table != NULL:
+ PyMem_Free(self._table)
+ self._table = NULL
+
+ property used:
+ def __get__(self):
+ return self._used
+
+ property fill:
+ def __get__(self):
+ return self._fill
+
+ property mask:
+ def __get__(self):
+ return self._mask
+
+ def _memory_size(self):
+ """Return the number of bytes of memory consumed by this class."""
+ return sizeof(self) + (sizeof(PyObject*)*(self._mask + 1))
+
+ def __len__(self):
+ return self._used
+
+ def _test_lookup(self, key):
+ cdef PyObject **slot
+
+ slot = _lookup(self, key)
+ if slot[0] == NULL:
+ res = '<null>'
+ elif slot[0] == _dummy:
+ res = '<dummy>'
+ else:
+ res = <object>slot[0]
+ return <int>(slot - self._table), res
+
+ def __contains__(self, key):
+ """Is key present in this SimpleSet."""
+ cdef PyObject **slot
+
+ slot = _lookup(self, key)
+ if slot[0] == NULL or slot[0] == _dummy:
+ return False
+ return True
+
+ cdef PyObject *_get(self, object key) except? NULL:
+ """Return the object (or nothing) define at the given location."""
+ cdef PyObject **slot
+
+ slot = _lookup(self, key)
+ if slot[0] == NULL or slot[0] == _dummy:
+ return NULL
+ return slot[0]
+
+ def __getitem__(self, key):
+ """Return a stored item that is equivalent to key."""
+ cdef PyObject *py_val
+
+ py_val = self._get(key)
+ if py_val == NULL:
+ raise KeyError("Key %s is not present" % key)
+ val = <object>(py_val)
+ return val
+
+ cdef int _insert_clean(self, PyObject *key) except -1:
+ """Insert a key into self.table.
+
+ This is only meant to be used during times like '_resize',
+ as it makes a lot of assuptions about keys not already being present,
+ and there being no dummy entries.
+ """
+ cdef size_t i, n_lookup
+ cdef long the_hash
+ cdef PyObject **table, **slot
+ cdef Py_ssize_t mask
+
+ mask = self._mask
+ table = self._table
+
+ the_hash = PyObject_Hash(key)
+ i = the_hash
+ for n_lookup from 0 <= n_lookup <= <size_t>mask: # Don't loop forever
+ slot = &table[i & mask]
+ if slot[0] == NULL:
+ slot[0] = key
+ self._fill = self._fill + 1
+ self._used = self._used + 1
+ return 1
+ i = i + 1 + n_lookup
+ raise RuntimeError('ran out of slots.')
+
+ def _py_resize(self, min_used):
+ """Do not use this directly, it is only exposed for testing."""
+ return self._resize(min_used)
+
+ cdef Py_ssize_t _resize(self, Py_ssize_t min_used) except -1:
+ """Resize the internal table.
+
+ The final table will be big enough to hold at least min_used entries.
+ We will copy the data from the existing table over, leaving out dummy
+ entries.
+
+ :return: The new size of the internal table
+ """
+ cdef Py_ssize_t new_size, n_bytes, remaining
+ cdef PyObject **new_table, **old_table, **slot
+
+ new_size = DEFAULT_SIZE
+ while new_size <= min_used and new_size > 0:
+ new_size = new_size << 1
+ # We rolled over our signed size field
+ if new_size <= 0:
+ raise MemoryError()
+ # Even if min_used == self._mask + 1, and we aren't changing the actual
+ # size, we will still run the algorithm so that dummy entries are
+ # removed
+ # TODO: Test this
+ # if new_size < self._used:
+ # raise RuntimeError('cannot shrink SimpleSet to something'
+ # ' smaller than the number of used slots.')
+ n_bytes = sizeof(PyObject*) * new_size;
+ new_table = <PyObject **>PyMem_Malloc(n_bytes)
+ if new_table == NULL:
+ raise MemoryError()
+
+ old_table = self._table
+ self._table = new_table
+ memset(self._table, 0, n_bytes)
+ self._mask = new_size - 1
+ self._used = 0
+ remaining = self._fill
+ self._fill = 0
+
+ # Moving everything to the other table is refcount neutral, so we don't
+ # worry about it.
+ slot = old_table
+ while remaining > 0:
+ if slot[0] == NULL: # unused slot
+ pass
+ elif slot[0] == _dummy: # dummy slot
+ remaining = remaining - 1
+ else: # active slot
+ remaining = remaining - 1
+ self._insert_clean(slot[0])
+ slot = slot + 1
+ PyMem_Free(old_table)
+ return new_size
+
+ def add(self, key):
+ """Similar to set.add(), start tracking this key.
+
+ There is one small difference, which is that we return the object that
+ is stored at the given location. (which is closer to the
+ dict.setdefault() functionality.)
+ """
+ return self._add(key)
+
+ cdef object _add(self, key):
+ cdef PyObject **slot, *py_key
+ cdef int added
+
+ py_key = <PyObject *>key
+ if (Py_TYPE(py_key).tp_richcompare == NULL
+ or Py_TYPE(py_key).tp_hash == NULL):
+ raise TypeError('Types added to SimpleSet must implement'
+ ' both tp_richcompare and tp_hash')
+ added = 0
+ # We need at least one empty slot
+ assert self._used < self._mask
+ slot = _lookup(self, key)
+ if (slot[0] == NULL):
+ Py_INCREF(py_key)
+ self._fill = self._fill + 1
+ self._used = self._used + 1
+ slot[0] = py_key
+ added = 1
+ elif (slot[0] == _dummy):
+ Py_INCREF(py_key)
+ self._used = self._used + 1
+ slot[0] = py_key
+ added = 1
+ # No else: clause. If _lookup returns a pointer to
+ # a live object, then we already have a value at this location.
+ retval = <object>(slot[0])
+ # PySet and PyDict use a 2-3rds full algorithm, we'll follow suit
+ if added and (self._fill * 3) >= ((self._mask + 1) * 2):
+ # However, we always work for a load factor of 2:1
+ self._resize(self._used * 2)
+ # Even if we resized and ended up moving retval into a different slot,
+ # it is still the value that is held at the slot equivalent to 'key',
+ # so we can still return it
+ return retval
+
+ def discard(self, key):
+ """Remove key from the set, whether it exists or not.
+
+ :return: False if the item did not exist, True if it did
+ """
+ if self._discard(key):
+ return True
+ return False
+
+ cdef int _discard(self, key) except -1:
+ cdef PyObject **slot, *py_key
+
+ slot = _lookup(self, key)
+ if slot[0] == NULL or slot[0] == _dummy:
+ return 0
+ self._used = self._used - 1
+ Py_DECREF(slot[0])
+ slot[0] = _dummy
+ # PySet uses the heuristic: If more than 1/5 are dummies, then resize
+ # them away
+ # if ((so->_fill - so->_used) * 5 < so->mask)
+ # However, we are planning on using this as an interning structure, in
+ # which we will be putting a lot of objects. And we expect that large
+ # groups of them are going to have the same lifetime.
+ # Dummy entries hurt a little bit because they cause the lookup to keep
+ # searching, but resizing is also rather expensive
+ # For now, we'll just use their algorithm, but we may want to revisit
+ # it
+ if ((self._fill - self._used) * 5 > self._mask):
+ self._resize(self._used * 2)
+ return 1
+
+ def __iter__(self):
+ return _SimpleSet_iterator(self)
+
+
+cdef class _SimpleSet_iterator:
+ """Iterator over the SimpleSet structure."""
+
+ cdef Py_ssize_t pos
+ cdef SimpleSet set
+ cdef Py_ssize_t _used # track if things have been mutated while iterating
+ cdef Py_ssize_t len # number of entries left
+
+ def __init__(self, obj):
+ self.set = obj
+ self.pos = 0
+ self._used = self.set._used
+ self.len = self.set._used
+
+ def __iter__(self):
+ return self
+
+ def __next__(self):
+ cdef Py_ssize_t mask, i
+ cdef PyObject *key
+
+ if self.set is None:
+ raise StopIteration
+ if self.set._used != self._used:
+ # Force this exception to continue to be raised
+ self._used = -1
+ raise RuntimeError("Set size changed during iteration")
+ if not SimpleSet_Next(self.set, &self.pos, &key):
+ self.set = None
+ raise StopIteration
+ # we found something
+ the_key = <object>key # INCREF
+ self.len = self.len - 1
+ return the_key
+
+ def __length_hint__(self):
+ if self.set is not None and self._used == self.set._used:
+ return self.len
+ return 0
+
+
+
+cdef api SimpleSet SimpleSet_New():
+ """Create a new SimpleSet object."""
+ return SimpleSet()
+
+
+cdef SimpleSet _check_self(object self):
+ """Check that the parameter is not None.
+
+ Pyrex/Cython will do type checking, but only to ensure that an object is
+ either the right type or None. You can say "object foo not None" for pure
+ python functions, but not for C functions.
+ So this is just a helper for all the apis that need to do the check.
+ """
+ cdef SimpleSet true_self
+ if self is None:
+ raise TypeError('self must not be None')
+ true_self = self
+ return true_self
+
+
+cdef PyObject **_lookup(SimpleSet self, object key) except NULL:
+ """Find the slot where 'key' would fit.
+
+ This is the same as a dicts 'lookup' function.
+
+ :param key: An object we are looking up
+ :param hash: The hash for key
+ :return: The location in self.table where key should be put.
+ location == NULL is an exception, but (*location) == NULL just
+ indicates the slot is empty and can be used.
+ """
+ # This uses Quadratic Probing:
+ # http://en.wikipedia.org/wiki/Quadratic_probing
+ # with c1 = c2 = 1/2
+ # This leads to probe locations at:
+ # h0 = hash(k1)
+ # h1 = h0 + 1
+ # h2 = h0 + 3 = h1 + 1 + 1
+ # h3 = h0 + 6 = h2 + 1 + 2
+ # h4 = h0 + 10 = h2 + 1 + 3
+ # Note that all of these are '& mask', but that is computed *after* the
+ # offset.
+ # This differs from the algorithm used by Set and Dict. Which, effectively,
+ # use double-hashing, and a step size that starts large, but dwindles to
+ # stepping one-by-one.
+ # This gives more 'locality' in that if you have a collision at offset X,
+ # the first fallback is X+1, which is fast to check. However, that means
+ # that an object w/ hash X+1 will also check there, and then X+2 next.
+ # However, for objects with differing hashes, their chains are different.
+ # The former checks X, X+1, X+3, ... the latter checks X+1, X+2, X+4, ...
+ # So different hashes diverge quickly.
+ # A bigger problem is that we *only* ever use the lowest bits of the hash
+ # So all integers (x + SIZE*N) will resolve into the same bucket, and all
+ # use the same collision resolution. We may want to try to find a way to
+ # incorporate the upper bits of the hash with quadratic probing. (For
+ # example, X, X+1, X+3+some_upper_bits, X+6+more_upper_bits, etc.)
+ cdef size_t i, n_lookup
+ cdef Py_ssize_t mask
+ cdef long key_hash
+ cdef PyObject **table, **slot, *cur, **free_slot, *py_key
+
+ py_key = <PyObject *>key
+ # Note: avoid using hash(obj) because of a bug w/ pyrex 0.9.8.5 and 64-bit
+ # (it treats hash() as returning an 'int' rather than a 'long')
+ key_hash = PyObject_Hash(py_key)
+ i = <size_t>key_hash
+ mask = self._mask
+ table = self._table
+ free_slot = NULL
+ for n_lookup from 0 <= n_lookup <= <size_t>mask: # Don't loop forever
+ slot = &table[i & mask]
+ cur = slot[0]
+ if cur == NULL:
+ # Found a blank spot
+ if free_slot != NULL:
+ # Did we find an earlier _dummy entry?
+ return free_slot
+ else:
+ return slot
+ if cur == py_key:
+ # Found an exact pointer to the key
+ return slot
+ if cur == _dummy:
+ if free_slot == NULL:
+ free_slot = slot
+ elif _is_equal(py_key, key_hash, cur):
+ # Both py_key and cur belong in this slot, return it
+ return slot
+ i = i + 1 + n_lookup
+ raise AssertionError('should never get here')
+
+
+cdef api PyObject **_SimpleSet_Lookup(object self, object key) except NULL:
+ """Find the slot where 'key' would fit.
+
+ This is the same as a dicts 'lookup' function. This is a private
+ api because mutating what you get without maintaing the other invariants
+ is a 'bad thing'.
+
+ :param key: An object we are looking up
+ :param hash: The hash for key
+ :return: The location in self._table where key should be put
+ should never be NULL, but may reference a NULL (PyObject*)
+ """
+ return _lookup(_check_self(self), key)
+
+
+cdef api object SimpleSet_Add(object self, object key):
+ """Add a key to the SimpleSet (set).
+
+ :param self: The SimpleSet to add the key to.
+ :param key: The key to be added. If the key is already present,
+ self will not be modified
+ :return: The current key stored at the location defined by 'key'.
+ This may be the same object, or it may be an equivalent object.
+ (consider dict.setdefault(key, key))
+ """
+ return _check_self(self)._add(key)
+
+
+cdef api int SimpleSet_Contains(object self, object key) except -1:
+ """Is key present in self?"""
+ return (key in _check_self(self))
+
+
+cdef api int SimpleSet_Discard(object self, object key) except -1:
+ """Remove the object referenced at location 'key'.
+
+ :param self: The SimpleSet being modified
+ :param key: The key we are checking on
+ :return: 1 if there was an object present, 0 if there was not, and -1 on
+ error.
+ """
+ return _check_self(self)._discard(key)
+
+
+cdef api PyObject *SimpleSet_Get(SimpleSet self, object key) except? NULL:
+ """Get a pointer to the object present at location 'key'.
+
+ This returns an object which is equal to key which was previously added to
+ self. This returns a borrowed reference, as it may also return NULL if no
+ value is present at that location.
+
+ :param key: The value we are looking for
+ :return: The object present at that location
+ """
+ return _check_self(self)._get(key)
+
+
+cdef api Py_ssize_t SimpleSet_Size(object self) except -1:
+ """Get the number of active entries in 'self'"""
+ return _check_self(self)._used
+
+
+cdef api int SimpleSet_Next(object self, Py_ssize_t *pos,
+ PyObject **key) except -1:
+ """Walk over items in a SimpleSet.
+
+ :param pos: should be initialized to 0 by the caller, and will be updated
+ by this function
+ :param key: Will return a borrowed reference to key
+ :return: 0 if nothing left, 1 if we are returning a new value
+ """
+ cdef Py_ssize_t i, mask
+ cdef SimpleSet true_self
+ cdef PyObject **table
+ true_self = _check_self(self)
+ i = pos[0]
+ if (i < 0):
+ return 0
+ mask = true_self._mask
+ table= true_self._table
+ while (i <= mask and (table[i] == NULL or table[i] == _dummy)):
+ i = i + 1
+ pos[0] = i + 1
+ if (i > mask):
+ return 0 # All done
+ if (key != NULL):
+ key[0] = table[i]
+ return 1
+
+
+cdef int SimpleSet_traverse(SimpleSet self, visitproc visit,
+ void *arg) except -1:
+ """This is an implementation of 'tp_traverse' that hits the whole table.
+
+ Cython/Pyrex don't seem to let you define a tp_traverse, and they only
+ define one for you if you have an 'object' attribute. Since they don't
+ support C arrays of objects, we access the PyObject * directly.
+ """
+ cdef Py_ssize_t pos
+ cdef PyObject *next_key
+ cdef int ret
+
+ pos = 0
+ while SimpleSet_Next(self, &pos, &next_key):
+ ret = visit(next_key, arg)
+ if ret:
+ return ret
+ return 0
+
+# It is a little bit ugly to do this, but it works, and means that Meliae can
+# dump the total memory consumed by all child objects.
+(<PyTypeObject *>SimpleSet).tp_traverse = <traverseproc>SimpleSet_traverse
diff --git a/bzrlib/_simple_set_pyx_api.h b/bzrlib/_simple_set_pyx_api.h
new file mode 100644
index 0000000..536764b
--- /dev/null
+++ b/bzrlib/_simple_set_pyx_api.h
@@ -0,0 +1,128 @@
+#ifndef __PYX_HAVE_API__bzrlib___simple_set_pyx
+#define __PYX_HAVE_API__bzrlib___simple_set_pyx
+#include "Python.h"
+#include "_simple_set_pyx.h"
+
+static PyTypeObject *__pyx_ptype_6bzrlib_15_simple_set_pyx_SimpleSet;
+#define SimpleSet_Type (*__pyx_ptype_6bzrlib_15_simple_set_pyx_SimpleSet)
+
+static struct SimpleSetObject *(*SimpleSet_New)(void);
+static PyObject *(*SimpleSet_Add)(PyObject *,PyObject *);
+static int (*SimpleSet_Contains)(PyObject *,PyObject *);
+static int (*SimpleSet_Discard)(PyObject *,PyObject *);
+static PyObject *(*SimpleSet_Get)(struct SimpleSetObject *,PyObject *);
+static Py_ssize_t (*SimpleSet_Size)(PyObject *);
+static int (*SimpleSet_Next)(PyObject *,Py_ssize_t *,PyObject **);
+static PyObject **(*_SimpleSet_Lookup)(PyObject *,PyObject *);
+
+#ifndef __PYX_HAVE_API_FUNC_import_module
+#define __PYX_HAVE_API_FUNC_import_module
+
+#ifndef __PYX_HAVE_RT_ImportModule
+#define __PYX_HAVE_RT_ImportModule
+static PyObject *__Pyx_ImportModule(char *name) {
+ PyObject *py_name = 0;
+
+ py_name = PyString_FromString(name);
+ if (!py_name)
+ goto bad;
+ return PyImport_Import(py_name);
+bad:
+ Py_XDECREF(py_name);
+ return 0;
+}
+#endif
+
+#endif
+
+
+#ifndef __PYX_HAVE_RT_ImportFunction
+#define __PYX_HAVE_RT_ImportFunction
+static int __Pyx_ImportFunction(PyObject *module, char *funcname, void **f, char *sig) {
+ PyObject *d = 0;
+ PyObject *cobj = 0;
+ char *desc;
+
+ d = PyObject_GetAttrString(module, "__pyx_capi__");
+ if (!d)
+ goto bad;
+ cobj = PyDict_GetItemString(d, funcname);
+ if (!cobj) {
+ PyErr_Format(PyExc_ImportError,
+ "%s does not export expected C function %s",
+ PyModule_GetName(module), funcname);
+ goto bad;
+ }
+ desc = (char *)PyCObject_GetDesc(cobj);
+ if (!desc)
+ goto bad;
+ if (strcmp(desc, sig) != 0) {
+ PyErr_Format(PyExc_TypeError,
+ "C function %s.%s has wrong signature (expected %s, got %s)",
+ PyModule_GetName(module), funcname, sig, desc);
+ goto bad;
+ }
+ *f = PyCObject_AsVoidPtr(cobj);
+ Py_DECREF(d);
+ return 0;
+bad:
+ Py_XDECREF(d);
+ return -1;
+}
+#endif
+
+
+#ifndef __PYX_HAVE_RT_ImportType
+#define __PYX_HAVE_RT_ImportType
+static PyTypeObject *__Pyx_ImportType(char *module_name, char *class_name,
+ long size)
+{
+ PyObject *py_module = 0;
+ PyObject *result = 0;
+
+ py_module = __Pyx_ImportModule(module_name);
+ if (!py_module)
+ goto bad;
+ result = PyObject_GetAttrString(py_module, class_name);
+ if (!result)
+ goto bad;
+ if (!PyType_Check(result)) {
+ PyErr_Format(PyExc_TypeError,
+ "%s.%s is not a type object",
+ module_name, class_name);
+ goto bad;
+ }
+ if (((PyTypeObject *)result)->tp_basicsize != size) {
+ PyErr_Format(PyExc_ValueError,
+ "%s.%s does not appear to be the correct type object",
+ module_name, class_name);
+ goto bad;
+ }
+ return (PyTypeObject *)result;
+bad:
+ Py_XDECREF(result);
+ return 0;
+}
+#endif
+
+static int import_bzrlib___simple_set_pyx(void) {
+ PyObject *module = 0;
+ module = __Pyx_ImportModule("bzrlib._simple_set_pyx");
+ if (!module) goto bad;
+ if (__Pyx_ImportFunction(module, "SimpleSet_New", (void**)&SimpleSet_New, "struct SimpleSetObject *(void)") < 0) goto bad;
+ if (__Pyx_ImportFunction(module, "SimpleSet_Add", (void**)&SimpleSet_Add, "PyObject *(PyObject *,PyObject *)") < 0) goto bad;
+ if (__Pyx_ImportFunction(module, "SimpleSet_Contains", (void**)&SimpleSet_Contains, "int (PyObject *,PyObject *)") < 0) goto bad;
+ if (__Pyx_ImportFunction(module, "SimpleSet_Discard", (void**)&SimpleSet_Discard, "int (PyObject *,PyObject *)") < 0) goto bad;
+ if (__Pyx_ImportFunction(module, "SimpleSet_Get", (void**)&SimpleSet_Get, "PyObject *(struct SimpleSetObject *,PyObject *)") < 0) goto bad;
+ if (__Pyx_ImportFunction(module, "SimpleSet_Size", (void**)&SimpleSet_Size, "Py_ssize_t (PyObject *)") < 0) goto bad;
+ if (__Pyx_ImportFunction(module, "SimpleSet_Next", (void**)&SimpleSet_Next, "int (PyObject *,Py_ssize_t *,PyObject **)") < 0) goto bad;
+ if (__Pyx_ImportFunction(module, "_SimpleSet_Lookup", (void**)&_SimpleSet_Lookup, "PyObject **(PyObject *,PyObject *)") < 0) goto bad;
+ Py_DECREF(module); module = 0;
+ __pyx_ptype_6bzrlib_15_simple_set_pyx_SimpleSet = __Pyx_ImportType("bzrlib._simple_set_pyx", "SimpleSet", sizeof(struct SimpleSetObject)); if (!__pyx_ptype_6bzrlib_15_simple_set_pyx_SimpleSet) goto bad;
+ return 0;
+ bad:
+ Py_XDECREF(module);
+ return -1;
+}
+
+#endif
diff --git a/bzrlib/_static_tuple_c.c b/bzrlib/_static_tuple_c.c
new file mode 100644
index 0000000..1a88269
--- /dev/null
+++ b/bzrlib/_static_tuple_c.c
@@ -0,0 +1,958 @@
+/* Copyright (C) 2009, 2010 Canonical Ltd
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/* Must be defined before importing _static_tuple_c.h so that we get the right
+ * linkage.
+ */
+#define STATIC_TUPLE_MODULE
+
+#include <Python.h>
+#include "python-compat.h"
+
+#include "_static_tuple_c.h"
+#include "_export_c_api.h"
+
+/* Pyrex 0.9.6.4 exports _simple_set_pyx_api as
+ * import__simple_set_pyx(), while Pyrex 0.9.8.5 and Cython 0.11.3 export them
+ * as import_bzrlib___simple_set_pyx(). As such, we just #define one to be
+ * equivalent to the other in our internal code.
+ */
+#define import__simple_set_pyx import_bzrlib___simple_set_pyx
+#include "_simple_set_pyx_api.h"
+
+#if defined(__GNUC__)
+# define inline __inline__
+#elif defined(_MSC_VER)
+# define inline __inline
+#else
+# define inline
+#endif
+
+
+/* The one and only StaticTuple with no values */
+static StaticTuple *_empty_tuple = NULL;
+static PyObject *_interned_tuples = NULL;
+
+
+static inline int
+_StaticTuple_is_interned(StaticTuple *self)
+{
+ return self->flags & STATIC_TUPLE_INTERNED_FLAG;
+}
+
+
+
+static PyObject *
+StaticTuple_as_tuple(StaticTuple *self)
+{
+ PyObject *tpl = NULL, *obj = NULL;
+ int i, len;
+
+ len = self->size;
+ tpl = PyTuple_New(len);
+ if (!tpl) {
+ /* Malloc failure */
+ return NULL;
+ }
+ for (i = 0; i < len; ++i) {
+ obj = (PyObject *)self->items[i];
+ Py_INCREF(obj);
+ PyTuple_SET_ITEM(tpl, i, obj);
+ }
+ return tpl;
+}
+
+
+static char StaticTuple_as_tuple_doc[] = "as_tuple() => tuple";
+
+static StaticTuple *
+StaticTuple_Intern(StaticTuple *self)
+{
+ PyObject *canonical_tuple = NULL;
+
+ if (_interned_tuples == NULL || _StaticTuple_is_interned(self)) {
+ Py_INCREF(self);
+ return self;
+ }
+ /* SimpleSet_Add returns whatever object is present at self
+ * or the new object if it needs to add it.
+ */
+ canonical_tuple = SimpleSet_Add(_interned_tuples, (PyObject *)self);
+ if (!canonical_tuple) {
+ // Some sort of exception, propogate it.
+ return NULL;
+ }
+ if (canonical_tuple != (PyObject *)self) {
+ // There was already a tuple with that value
+ return (StaticTuple *)canonical_tuple;
+ }
+ self->flags |= STATIC_TUPLE_INTERNED_FLAG;
+ // The two references in the dict do not count, so that the StaticTuple
+ // object does not become immortal just because it was interned.
+ Py_REFCNT(self) -= 1;
+ return self;
+}
+
+static char StaticTuple_Intern_doc[] = "intern() => unique StaticTuple\n"
+ "Return a 'canonical' StaticTuple object.\n"
+ "Similar to intern() for strings, this makes sure there\n"
+ "is only one StaticTuple object for a given value\n."
+ "Common usage is:\n"
+ " key = StaticTuple('foo', 'bar').intern()\n";
+
+
+static void
+StaticTuple_dealloc(StaticTuple *self)
+{
+ int i, len;
+
+ if (_StaticTuple_is_interned(self)) {
+ /* revive dead object temporarily for Discard */
+ Py_REFCNT(self) = 2;
+ if (SimpleSet_Discard(_interned_tuples, (PyObject*)self) != 1)
+ Py_FatalError("deletion of interned StaticTuple failed");
+ self->flags &= ~STATIC_TUPLE_INTERNED_FLAG;
+ }
+ len = self->size;
+ for (i = 0; i < len; ++i) {
+ Py_XDECREF(self->items[i]);
+ }
+ Py_TYPE(self)->tp_free((PyObject *)self);
+}
+
+
+/* Similar to PyTuple_New() */
+static StaticTuple *
+StaticTuple_New(Py_ssize_t size)
+{
+ StaticTuple *stuple;
+
+ if (size < 0 || size > 255) {
+ /* Too big or too small */
+ PyErr_SetString(PyExc_ValueError, "StaticTuple(...)"
+ " takes from 0 to 255 items");
+ return NULL;
+ }
+ if (size == 0 && _empty_tuple != NULL) {
+ Py_INCREF(_empty_tuple);
+ return _empty_tuple;
+ }
+ /* Note that we use PyObject_NewVar because we want to allocate a variable
+ * width entry. However we *aren't* truly a PyVarObject because we don't
+ * use a long for ob_size. Instead we use a plain 'size' that is an int,
+ * and will be overloaded with flags in the future.
+ * As such we do the alloc, and then have to clean up anything it does
+ * incorrectly.
+ */
+ stuple = PyObject_NewVar(StaticTuple, &StaticTuple_Type, size);
+ if (stuple == NULL) {
+ return NULL;
+ }
+ stuple->size = size;
+ stuple->flags = 0;
+ stuple->_unused0 = 0;
+ stuple->_unused1 = 0;
+ if (size > 0) {
+ memset(stuple->items, 0, sizeof(PyObject *) * size);
+ }
+#if STATIC_TUPLE_HAS_HASH
+ stuple->hash = -1;
+#endif
+ return stuple;
+}
+
+
+static StaticTuple *
+StaticTuple_FromSequence(PyObject *sequence)
+{
+ StaticTuple *new = NULL;
+ PyObject *as_tuple = NULL;
+ PyObject *item;
+ Py_ssize_t i, size;
+
+ if (StaticTuple_CheckExact(sequence)) {
+ Py_INCREF(sequence);
+ return (StaticTuple *)sequence;
+ }
+ if (!PySequence_Check(sequence)) {
+ as_tuple = PySequence_Tuple(sequence);
+ if (as_tuple == NULL)
+ goto done;
+ sequence = as_tuple;
+ }
+ size = PySequence_Size(sequence);
+ if (size == -1) {
+ goto done;
+ }
+ new = StaticTuple_New(size);
+ if (new == NULL) {
+ goto done;
+ }
+ for (i = 0; i < size; ++i) {
+ // This returns a new reference, which we then 'steal' with
+ // StaticTuple_SET_ITEM
+ item = PySequence_GetItem(sequence, i);
+ if (item == NULL) {
+ Py_DECREF(new);
+ new = NULL;
+ goto done;
+ }
+ StaticTuple_SET_ITEM(new, i, item);
+ }
+done:
+ Py_XDECREF(as_tuple);
+ return (StaticTuple *)new;
+}
+
+static StaticTuple *
+StaticTuple_from_sequence(PyObject *self, PyObject *args, PyObject *kwargs)
+{
+ PyObject *sequence;
+ if (!PyArg_ParseTuple(args, "O", &sequence))
+ return NULL;
+ return StaticTuple_FromSequence(sequence);
+}
+
+
+/* Check that all items we point to are 'valid' */
+static int
+StaticTuple_check_items(StaticTuple *self)
+{
+ int i;
+ PyObject *obj;
+
+ for (i = 0; i < self->size; ++i) {
+ obj = self->items[i];
+ if (obj == NULL) {
+ PyErr_SetString(PyExc_RuntimeError, "StaticTuple(...)"
+ " should not have a NULL entry.");
+ return 0;
+ }
+ if (PyString_CheckExact(obj)
+ || StaticTuple_CheckExact(obj)
+ || obj == Py_None
+ || PyBool_Check(obj)
+ || PyInt_CheckExact(obj)
+ || PyLong_CheckExact(obj)
+ || PyFloat_CheckExact(obj)
+ || PyUnicode_CheckExact(obj)
+ ) continue;
+ PyErr_Format(PyExc_TypeError, "StaticTuple(...)"
+ " requires that all items are one of"
+ " str, StaticTuple, None, bool, int, long, float, or unicode"
+ " not %s.", Py_TYPE(obj)->tp_name);
+ return 0;
+ }
+ return 1;
+}
+
+static PyObject *
+StaticTuple_new_constructor(PyTypeObject *type, PyObject *args, PyObject *kwds)
+{
+ StaticTuple *self;
+ PyObject *obj = NULL;
+ Py_ssize_t i, len = 0;
+
+ if (type != &StaticTuple_Type) {
+ PyErr_SetString(PyExc_TypeError, "we only support creating StaticTuple");
+ return NULL;
+ }
+ if (!PyTuple_CheckExact(args)) {
+ PyErr_SetString(PyExc_TypeError, "args must be a tuple");
+ return NULL;
+ }
+ len = PyTuple_GET_SIZE(args);
+ if (len < 0 || len > 255) {
+ /* Check the length here so we can raise a TypeError instead of
+ * StaticTuple_New's ValueError.
+ */
+ PyErr_SetString(PyExc_TypeError, "StaticTuple(...)"
+ " takes from 0 to 255 items");
+ return NULL;
+ }
+ self = (StaticTuple *)StaticTuple_New(len);
+ if (self == NULL) {
+ return NULL;
+ }
+ for (i = 0; i < len; ++i) {
+ obj = PyTuple_GET_ITEM(args, i);
+ Py_INCREF(obj);
+ self->items[i] = obj;
+ }
+ if (!StaticTuple_check_items(self)) {
+ type->tp_dealloc((PyObject *)self);
+ return NULL;
+ }
+ return (PyObject *)self;
+}
+
+static PyObject *
+StaticTuple_repr(StaticTuple *self)
+{
+ PyObject *as_tuple, *tuple_repr, *result;
+
+ as_tuple = StaticTuple_as_tuple(self);
+ if (as_tuple == NULL) {
+ return NULL;
+ }
+ tuple_repr = PyObject_Repr(as_tuple);
+ Py_DECREF(as_tuple);
+ if (tuple_repr == NULL) {
+ return NULL;
+ }
+ result = PyString_FromFormat("StaticTuple%s",
+ PyString_AsString(tuple_repr));
+ return result;
+}
+
+static long
+StaticTuple_hash(StaticTuple *self)
+{
+ /* adapted from tuplehash(), is the specific hash value considered
+ * 'stable'?
+ */
+ register long x, y;
+ Py_ssize_t len = self->size;
+ PyObject **p;
+ long mult = 1000003L;
+
+#if STATIC_TUPLE_HAS_HASH
+ if (self->hash != -1) {
+ return self->hash;
+ }
+#endif
+ x = 0x345678L;
+ p = self->items;
+ // TODO: We could set specific flags if we know that, for example, all the
+ // items are strings. I haven't seen a real-world benefit to that
+ // yet, though.
+ while (--len >= 0) {
+ y = PyObject_Hash(*p++);
+ if (y == -1) /* failure */
+ return -1;
+ x = (x ^ y) * mult;
+ /* the cast might truncate len; that doesn't change hash stability */
+ mult += (long)(82520L + len + len);
+ }
+ x += 97531L;
+ if (x == -1)
+ x = -2;
+#if STATIC_TUPLE_HAS_HASH
+ self->hash = x;
+#endif
+ return x;
+}
+
+static PyObject *
+StaticTuple_richcompare_to_tuple(StaticTuple *v, PyObject *wt, int op)
+{
+ PyObject *vt;
+ PyObject *result = NULL;
+
+ vt = StaticTuple_as_tuple((StaticTuple *)v);
+ if (vt == NULL) {
+ goto done;
+ }
+ if (!PyTuple_Check(wt)) {
+ PyErr_BadInternalCall();
+ goto done;
+ }
+ /* Now we have 2 tuples to compare, do it */
+ result = PyTuple_Type.tp_richcompare(vt, wt, op);
+done:
+ Py_XDECREF(vt);
+ return result;
+}
+
+/** Compare two objects to determine if they are equivalent.
+ * The basic flow is as follows
+ * 1) First make sure that both objects are StaticTuple instances. If they
+ * aren't then cast self to a tuple, and have the tuple do the comparison.
+ * 2) Special case comparison to Py_None, because it happens to occur fairly
+ * often in the test suite.
+ * 3) Special case when v and w are the same pointer. As we know the answer to
+ * all queries without walking individual items.
+ * 4) For all operations, we then walk the items to find the first paired
+ * items that are not equal.
+ * 5) If all items found are equal, we then check the length of self and
+ * other to determine equality.
+ * 6) If an item differs, then we apply "op" to those last two items. (eg.
+ * StaticTuple(A, B) > StaticTuple(A, C) iff B > C)
+ */
+
+static PyObject *
+StaticTuple_richcompare(PyObject *v, PyObject *w, int op)
+{
+ StaticTuple *v_st, *w_st;
+ Py_ssize_t vlen, wlen, min_len, i;
+ PyObject *v_obj, *w_obj;
+ richcmpfunc string_richcompare;
+
+ if (!StaticTuple_CheckExact(v)) {
+ /* This has never triggered, according to python-dev it seems this
+ * might trigger if '__op__' is defined but '__rop__' is not, sort of
+ * case. Such as "None == StaticTuple()"
+ */
+ fprintf(stderr, "self is not StaticTuple\n");
+ Py_INCREF(Py_NotImplemented);
+ return Py_NotImplemented;
+ }
+ v_st = (StaticTuple *)v;
+ if (StaticTuple_CheckExact(w)) {
+ /* The most common case */
+ w_st = (StaticTuple*)w;
+ } else if (PyTuple_Check(w)) {
+ /* One of v or w is a tuple, so we go the 'slow' route and cast up to
+ * tuples to compare.
+ */
+ /* TODO: This seems to be triggering more than I thought it would...
+ * We probably want to optimize comparing self to other when
+ * other is a tuple.
+ */
+ return StaticTuple_richcompare_to_tuple(v_st, w, op);
+ } else if (w == Py_None) {
+ // None is always less than the object
+ switch (op) {
+ case Py_NE:case Py_GT:case Py_GE:
+ Py_INCREF(Py_True);
+ return Py_True;
+ case Py_EQ:case Py_LT:case Py_LE:
+ Py_INCREF(Py_False);
+ return Py_False;
+ default: // Should never happen
+ return Py_NotImplemented;
+ }
+ } else {
+ /* We don't special case this comparison, we just let python handle
+ * it.
+ */
+ Py_INCREF(Py_NotImplemented);
+ return Py_NotImplemented;
+ }
+ /* Now we know that we have 2 StaticTuple objects, so let's compare them.
+ * This code is inspired from tuplerichcompare, except we know our
+ * objects are limited in scope, so we can inline some comparisons.
+ */
+ if (v == w) {
+ /* Identical pointers, we can shortcut this easily. */
+ switch (op) {
+ case Py_EQ:case Py_LE:case Py_GE:
+ Py_INCREF(Py_True);
+ return Py_True;
+ case Py_NE:case Py_LT:case Py_GT:
+ Py_INCREF(Py_False);
+ return Py_False;
+ }
+ }
+ if (op == Py_EQ
+ && _StaticTuple_is_interned(v_st)
+ && _StaticTuple_is_interned(w_st))
+ {
+ /* If both objects are interned, we know they are different if the
+ * pointer is not the same, which would have been handled by the
+ * previous if. No need to compare the entries.
+ */
+ Py_INCREF(Py_False);
+ return Py_False;
+ }
+
+ /* The only time we are likely to compare items of different lengths is in
+ * something like the interned_keys set. However, the hash is good enough
+ * that it is rare. Note that 'tuple_richcompare' also does not compare
+ * lengths here.
+ */
+ vlen = v_st->size;
+ wlen = w_st->size;
+ min_len = (vlen < wlen) ? vlen : wlen;
+ string_richcompare = PyString_Type.tp_richcompare;
+ for (i = 0; i < min_len; i++) {
+ PyObject *result = NULL;
+ v_obj = StaticTuple_GET_ITEM(v_st, i);
+ w_obj = StaticTuple_GET_ITEM(w_st, i);
+ if (v_obj == w_obj) {
+ /* Shortcut case, these must be identical */
+ continue;
+ }
+ if (PyString_CheckExact(v_obj) && PyString_CheckExact(w_obj)) {
+ result = string_richcompare(v_obj, w_obj, Py_EQ);
+ } else if (StaticTuple_CheckExact(v_obj) &&
+ StaticTuple_CheckExact(w_obj))
+ {
+ /* Both are StaticTuple types, so recurse */
+ result = StaticTuple_richcompare(v_obj, w_obj, Py_EQ);
+ } else {
+ /* Fall back to generic richcompare */
+ result = PyObject_RichCompare(v_obj, w_obj, Py_EQ);
+ }
+ if (result == NULL) {
+ return NULL; /* There seems to be an error */
+ }
+ if (result == Py_False) {
+ // This entry is not identical, Shortcut for Py_EQ
+ if (op == Py_EQ) {
+ return result;
+ }
+ Py_DECREF(result);
+ break;
+ }
+ if (result != Py_True) {
+ /* We don't know *what* richcompare is returning, but it
+ * isn't something we recognize
+ */
+ PyErr_BadInternalCall();
+ Py_DECREF(result);
+ return NULL;
+ }
+ Py_DECREF(result);
+ }
+ if (i >= min_len) {
+ /* We walked off one of the lists, but everything compared equal so
+ * far. Just compare the size.
+ */
+ int cmp;
+ PyObject *res;
+ switch (op) {
+ case Py_LT: cmp = vlen < wlen; break;
+ case Py_LE: cmp = vlen <= wlen; break;
+ case Py_EQ: cmp = vlen == wlen; break;
+ case Py_NE: cmp = vlen != wlen; break;
+ case Py_GT: cmp = vlen > wlen; break;
+ case Py_GE: cmp = vlen >= wlen; break;
+ default: return NULL; /* cannot happen */
+ }
+ if (cmp)
+ res = Py_True;
+ else
+ res = Py_False;
+ Py_INCREF(res);
+ return res;
+ }
+ /* The last item differs, shortcut the Py_NE case */
+ if (op == Py_NE) {
+ Py_INCREF(Py_True);
+ return Py_True;
+ }
+ /* It is some other comparison, go ahead and do the real check. */
+ if (PyString_CheckExact(v_obj) && PyString_CheckExact(w_obj))
+ {
+ return string_richcompare(v_obj, w_obj, op);
+ } else if (StaticTuple_CheckExact(v_obj) &&
+ StaticTuple_CheckExact(w_obj))
+ {
+ /* Both are StaticTuple types, so recurse */
+ return StaticTuple_richcompare(v_obj, w_obj, op);
+ } else {
+ return PyObject_RichCompare(v_obj, w_obj, op);
+ }
+}
+
+
+static Py_ssize_t
+StaticTuple_length(StaticTuple *self)
+{
+ return self->size;
+}
+
+
+static PyObject *
+StaticTuple__is_interned(StaticTuple *self)
+{
+ if (_StaticTuple_is_interned(self)) {
+ Py_INCREF(Py_True);
+ return Py_True;
+ }
+ Py_INCREF(Py_False);
+ return Py_False;
+}
+
+static char StaticTuple__is_interned_doc[] = "_is_interned() => True/False\n"
+ "Check to see if this tuple has been interned.\n";
+
+
+static PyObject *
+StaticTuple_reduce(StaticTuple *self)
+{
+ PyObject *result = NULL, *as_tuple = NULL;
+
+ result = PyTuple_New(2);
+ if (!result) {
+ return NULL;
+ }
+ as_tuple = StaticTuple_as_tuple(self);
+ if (as_tuple == NULL) {
+ Py_DECREF(result);
+ return NULL;
+ }
+ Py_INCREF(&StaticTuple_Type);
+ PyTuple_SET_ITEM(result, 0, (PyObject *)&StaticTuple_Type);
+ PyTuple_SET_ITEM(result, 1, as_tuple);
+ return result;
+}
+
+static char StaticTuple_reduce_doc[] = "__reduce__() => tuple\n";
+
+
+static PyObject *
+StaticTuple_add(PyObject *v, PyObject *w)
+{
+ Py_ssize_t i, len_v, len_w;
+ PyObject *item;
+ StaticTuple *result;
+ /* StaticTuples and plain tuples may be added (concatenated) to
+ * StaticTuples.
+ */
+ if (StaticTuple_CheckExact(v)) {
+ len_v = ((StaticTuple*)v)->size;
+ } else if (PyTuple_Check(v)) {
+ len_v = PyTuple_GET_SIZE(v);
+ } else {
+ Py_INCREF(Py_NotImplemented);
+ return Py_NotImplemented;
+ }
+ if (StaticTuple_CheckExact(w)) {
+ len_w = ((StaticTuple*)w)->size;
+ } else if (PyTuple_Check(w)) {
+ len_w = PyTuple_GET_SIZE(w);
+ } else {
+ Py_INCREF(Py_NotImplemented);
+ return Py_NotImplemented;
+ }
+ result = StaticTuple_New(len_v + len_w);
+ if (result == NULL)
+ return NULL;
+ for (i = 0; i < len_v; ++i) {
+ // This returns a new reference, which we then 'steal' with
+ // StaticTuple_SET_ITEM
+ item = PySequence_GetItem(v, i);
+ if (item == NULL) {
+ Py_DECREF(result);
+ return NULL;
+ }
+ StaticTuple_SET_ITEM(result, i, item);
+ }
+ for (i = 0; i < len_w; ++i) {
+ item = PySequence_GetItem(w, i);
+ if (item == NULL) {
+ Py_DECREF(result);
+ return NULL;
+ }
+ StaticTuple_SET_ITEM(result, i+len_v, item);
+ }
+ if (!StaticTuple_check_items(result)) {
+ Py_DECREF(result);
+ return NULL;
+ }
+ return (PyObject *)result;
+}
+
+static PyObject *
+StaticTuple_item(StaticTuple *self, Py_ssize_t offset)
+{
+ PyObject *obj;
+ /* We cast to (int) to avoid worrying about whether Py_ssize_t is a
+ * long long, etc. offsets should never be >2**31 anyway.
+ */
+ if (offset < 0) {
+ PyErr_Format(PyExc_IndexError, "StaticTuple_item does not support"
+ " negative indices: %d\n", (int)offset);
+ } else if (offset >= self->size) {
+ PyErr_Format(PyExc_IndexError, "StaticTuple index out of range"
+ " %d >= %d", (int)offset, (int)self->size);
+ return NULL;
+ }
+ obj = (PyObject *)self->items[offset];
+ Py_INCREF(obj);
+ return obj;
+}
+
+static PyObject *
+StaticTuple_slice(StaticTuple *self, Py_ssize_t ilow, Py_ssize_t ihigh)
+{
+ PyObject *as_tuple, *result;
+
+ as_tuple = StaticTuple_as_tuple(self);
+ if (as_tuple == NULL) {
+ return NULL;
+ }
+ result = PyTuple_Type.tp_as_sequence->sq_slice(as_tuple, ilow, ihigh);
+ Py_DECREF(as_tuple);
+ return result;
+}
+
+static int
+StaticTuple_traverse(StaticTuple *self, visitproc visit, void *arg)
+{
+ Py_ssize_t i;
+ for (i = self->size; --i >= 0;) {
+ Py_VISIT(self->items[i]);
+ }
+ return 0;
+}
+
+
+static PyObject *
+StaticTuple_sizeof(StaticTuple *self)
+{
+ Py_ssize_t res;
+
+ res = _PyObject_SIZE(&StaticTuple_Type) + (int)self->size * sizeof(void*);
+ return PyInt_FromSsize_t(res);
+}
+
+
+
+static char StaticTuple_doc[] =
+ "C implementation of a StaticTuple structure."
+ "\n This is used as StaticTuple(item1, item2, item3)"
+ "\n This is similar to tuple, less flexible in what it"
+ "\n supports, but also lighter memory consumption."
+ "\n Note that the constructor mimics the () form of tuples"
+ "\n Rather than the 'tuple()' constructor."
+ "\n eg. StaticTuple(a, b) == (a, b) == tuple((a, b))";
+
+static PyMethodDef StaticTuple_methods[] = {
+ {"as_tuple", (PyCFunction)StaticTuple_as_tuple, METH_NOARGS, StaticTuple_as_tuple_doc},
+ {"intern", (PyCFunction)StaticTuple_Intern, METH_NOARGS, StaticTuple_Intern_doc},
+ {"_is_interned", (PyCFunction)StaticTuple__is_interned, METH_NOARGS,
+ StaticTuple__is_interned_doc},
+ {"from_sequence", (PyCFunction)StaticTuple_from_sequence,
+ METH_STATIC | METH_VARARGS,
+ "Create a StaticTuple from a given sequence. This functions"
+ " the same as the tuple() constructor."},
+ {"__reduce__", (PyCFunction)StaticTuple_reduce, METH_NOARGS, StaticTuple_reduce_doc},
+ {"__sizeof__", (PyCFunction)StaticTuple_sizeof, METH_NOARGS},
+ {NULL, NULL} /* sentinel */
+};
+
+
+static PyNumberMethods StaticTuple_as_number = {
+ (binaryfunc) StaticTuple_add, /* nb_add */
+ 0, /* nb_subtract */
+ 0, /* nb_multiply */
+ 0, /* nb_divide */
+ 0, /* nb_remainder */
+ 0, /* nb_divmod */
+ 0, /* nb_power */
+ 0, /* nb_negative */
+ 0, /* nb_positive */
+ 0, /* nb_absolute */
+ 0, /* nb_nonzero */
+ 0, /* nb_invert */
+ 0, /* nb_lshift */
+ 0, /* nb_rshift */
+ 0, /* nb_and */
+ 0, /* nb_xor */
+ 0, /* nb_or */
+ 0, /* nb_coerce */
+};
+
+
+static PySequenceMethods StaticTuple_as_sequence = {
+ (lenfunc)StaticTuple_length, /* sq_length */
+ 0, /* sq_concat */
+ 0, /* sq_repeat */
+ (ssizeargfunc)StaticTuple_item, /* sq_item */
+ (ssizessizeargfunc)StaticTuple_slice, /* sq_slice */
+ 0, /* sq_ass_item */
+ 0, /* sq_ass_slice */
+ 0, /* sq_contains */
+};
+
+/* TODO: Implement StaticTuple_as_mapping.
+ * The only thing we really want to support from there is mp_subscript,
+ * so that we could support extended slicing (foo[::2]). Not worth it
+ * yet, though.
+ */
+
+
+PyTypeObject StaticTuple_Type = {
+ PyObject_HEAD_INIT(NULL)
+ 0, /* ob_size */
+ "bzrlib._static_tuple_c.StaticTuple", /* tp_name */
+ sizeof(StaticTuple), /* tp_basicsize */
+ sizeof(PyObject *), /* tp_itemsize */
+ (destructor)StaticTuple_dealloc, /* tp_dealloc */
+ 0, /* tp_print */
+ 0, /* tp_getattr */
+ 0, /* tp_setattr */
+ 0, /* tp_compare */
+ (reprfunc)StaticTuple_repr, /* tp_repr */
+ &StaticTuple_as_number, /* tp_as_number */
+ &StaticTuple_as_sequence, /* tp_as_sequence */
+ 0, /* tp_as_mapping */
+ (hashfunc)StaticTuple_hash, /* tp_hash */
+ 0, /* tp_call */
+ 0, /* tp_str */
+ 0, /* tp_getattro */
+ 0, /* tp_setattro */
+ 0, /* tp_as_buffer */
+ /* Py_TPFLAGS_CHECKTYPES tells the number operations that they shouldn't
+ * try to 'coerce' but instead stuff like 'add' will check it arguments.
+ */
+ Py_TPFLAGS_DEFAULT | Py_TPFLAGS_CHECKTYPES, /* tp_flags*/
+ StaticTuple_doc, /* tp_doc */
+ /* gc.get_referents checks the IS_GC flag before it calls tp_traverse
+ * And we don't include this object in the garbage collector because we
+ * know it doesn't create cycles. However, 'meliae' will follow
+ * tp_traverse, even if the object isn't GC, and we want that.
+ */
+ (traverseproc)StaticTuple_traverse, /* tp_traverse */
+ 0, /* tp_clear */
+ StaticTuple_richcompare, /* tp_richcompare */
+ 0, /* tp_weaklistoffset */
+ // without implementing tp_iter, Python will fall back to PySequence*
+ // which seems to work ok, we may need something faster/lighter in the
+ // future.
+ 0, /* tp_iter */
+ 0, /* tp_iternext */
+ StaticTuple_methods, /* tp_methods */
+ 0, /* tp_members */
+ 0, /* tp_getset */
+ 0, /* tp_base */
+ 0, /* tp_dict */
+ 0, /* tp_descr_get */
+ 0, /* tp_descr_set */
+ 0, /* tp_dictoffset */
+ 0, /* tp_init */
+ 0, /* tp_alloc */
+ StaticTuple_new_constructor, /* tp_new */
+};
+
+
+static PyMethodDef static_tuple_c_methods[] = {
+ {NULL, NULL}
+};
+
+
+static void
+setup_interned_tuples(PyObject *m)
+{
+ _interned_tuples = (PyObject *)SimpleSet_New();
+ if (_interned_tuples != NULL) {
+ Py_INCREF(_interned_tuples);
+ PyModule_AddObject(m, "_interned_tuples", _interned_tuples);
+ }
+}
+
+
+static void
+setup_empty_tuple(PyObject *m)
+{
+ StaticTuple *stuple;
+ if (_interned_tuples == NULL) {
+ fprintf(stderr, "You need to call setup_interned_tuples() before"
+ " setup_empty_tuple, because we intern it.\n");
+ }
+ // We need to create the empty tuple
+ stuple = (StaticTuple *)StaticTuple_New(0);
+ _empty_tuple = StaticTuple_Intern(stuple);
+ assert(_empty_tuple == stuple);
+ // At this point, refcnt is 2: 1 from New(), and 1 from the return from
+ // intern(). We will keep 1 for the _empty_tuple global, and use the other
+ // for the module reference.
+ PyModule_AddObject(m, "_empty_tuple", (PyObject *)_empty_tuple);
+}
+
+static int
+_StaticTuple_CheckExact(PyObject *obj)
+{
+ return StaticTuple_CheckExact(obj);
+}
+
+static void
+setup_c_api(PyObject *m)
+{
+ _export_function(m, "StaticTuple_New", StaticTuple_New,
+ "StaticTuple *(Py_ssize_t)");
+ _export_function(m, "StaticTuple_Intern", StaticTuple_Intern,
+ "StaticTuple *(StaticTuple *)");
+ _export_function(m, "StaticTuple_FromSequence", StaticTuple_FromSequence,
+ "StaticTuple *(PyObject *)");
+ _export_function(m, "_StaticTuple_CheckExact", _StaticTuple_CheckExact,
+ "int(PyObject *)");
+}
+
+
+static int
+_workaround_pyrex_096(void)
+{
+ /* Work around an incompatibility in how pyrex 0.9.6 exports a module,
+ * versus how pyrex 0.9.8 and cython 0.11 export it.
+ * Namely 0.9.6 exports import__simple_set_pyx and tries to
+ * "import _simple_set_pyx" but it is available only as
+ * "import bzrlib._simple_set_pyx"
+ * It is a shame to hack up sys.modules, but that is what we've got to do.
+ */
+ PyObject *sys_module = NULL, *modules = NULL, *set_module = NULL;
+ int retval = -1;
+
+ /* Clear out the current ImportError exception, and try again. */
+ PyErr_Clear();
+ /* Note that this only seems to work if somewhere else imports
+ * bzrlib._simple_set_pyx before importing bzrlib._static_tuple_c
+ */
+ set_module = PyImport_ImportModule("bzrlib._simple_set_pyx");
+ if (set_module == NULL) {
+ goto end;
+ }
+ /* Add the _simple_set_pyx into sys.modules at the appropriate location. */
+ sys_module = PyImport_ImportModule("sys");
+ if (sys_module == NULL) {
+ goto end;
+ }
+ modules = PyObject_GetAttrString(sys_module, "modules");
+ if (modules == NULL || !PyDict_Check(modules)) {
+ goto end;
+ }
+ PyDict_SetItemString(modules, "_simple_set_pyx", set_module);
+ /* Now that we have hacked it in, try the import again. */
+ retval = import_bzrlib___simple_set_pyx();
+end:
+ Py_XDECREF(set_module);
+ Py_XDECREF(sys_module);
+ Py_XDECREF(modules);
+ return retval;
+}
+
+
+PyMODINIT_FUNC
+init_static_tuple_c(void)
+{
+ PyObject* m;
+
+ StaticTuple_Type.tp_getattro = PyObject_GenericGetAttr;
+ if (PyType_Ready(&StaticTuple_Type) < 0)
+ return;
+
+ m = Py_InitModule3("_static_tuple_c", static_tuple_c_methods,
+ "C implementation of a StaticTuple structure");
+ if (m == NULL)
+ return;
+
+ Py_INCREF(&StaticTuple_Type);
+ PyModule_AddObject(m, "StaticTuple", (PyObject *)&StaticTuple_Type);
+ if (import_bzrlib___simple_set_pyx() == -1
+ && _workaround_pyrex_096() == -1)
+ {
+ return;
+ }
+ setup_interned_tuples(m);
+ setup_empty_tuple(m);
+ setup_c_api(m);
+}
+
+// vim: tabstop=4 sw=4 expandtab
diff --git a/bzrlib/_static_tuple_c.h b/bzrlib/_static_tuple_c.h
new file mode 100644
index 0000000..93261bf
--- /dev/null
+++ b/bzrlib/_static_tuple_c.h
@@ -0,0 +1,117 @@
+/* Copyright (C) 2009, 2010 Canonical Ltd
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef _STATIC_TUPLE_H_
+#define _STATIC_TUPLE_H_
+#include <Python.h>
+#include <string.h>
+
+#define STATIC_TUPLE_HAS_HASH 0
+/* Caching the hash adds memory, but allows us to save a little time during
+ * lookups. TIMEIT hash(key) shows it as
+ * 0.108usec w/ hash
+ * 0.160usec w/o hash
+ * Note that the entries themselves are strings, which already cache their
+ * hashes. So while there is a 1.5:1 difference in the time for hash(), it is
+ * already a function which is quite fast. Probably the only reason we might
+ * want to do so, is if we customized SimpleSet to the point that the item
+ * pointers were exactly certain types, and then accessed table[i]->hash
+ * directly. So far StaticTuple_hash() is fast enough to not warrant the memory
+ * difference.
+ */
+
+/* This defines a single variable-width key.
+ * It is basically the same as a tuple, but
+ * 1) Lighter weight in memory
+ * 2) Only supports strings or other static types (that don't reference other
+ * objects.)
+ */
+
+#define STATIC_TUPLE_INTERNED_FLAG 0x01
+typedef struct {
+ PyObject_HEAD
+ // We could go with unsigned short here, and support 64k width tuples
+ // without any memory impact, might be worthwhile
+ unsigned char size;
+ unsigned char flags;
+ unsigned char _unused0;
+ unsigned char _unused1;
+ // Note that on 64-bit, we actually have 4-more unused bytes
+ // because items will always be aligned to a 64-bit boundary
+#if STATIC_TUPLE_HAS_HASH
+ long hash;
+#endif
+ PyObject *items[0];
+} StaticTuple;
+extern PyTypeObject StaticTuple_Type;
+
+typedef struct {
+ PyObject_VAR_HEAD
+ PyObject *table[0];
+} KeyIntern;
+
+#define StaticTuple_SET_ITEM(key, offset, val) \
+ ((((StaticTuple*)(key))->items[(offset)]) = ((PyObject *)(val)))
+#define StaticTuple_GET_ITEM(key, offset) (((StaticTuple*)key)->items[offset])
+#define StaticTuple_GET_SIZE(key) (((StaticTuple*)key)->size)
+
+
+#ifdef STATIC_TUPLE_MODULE
+/* Used when compiling _static_tuple_c.c */
+
+static StaticTuple * StaticTuple_New(Py_ssize_t);
+static StaticTuple * StaticTuple_Intern(StaticTuple *self);
+static StaticTuple * StaticTuple_FromSequence(PyObject *);
+#define StaticTuple_CheckExact(op) (Py_TYPE(op) == &StaticTuple_Type)
+
+#else
+/* Used as the foreign api */
+
+#include "_import_c_api.h"
+
+static StaticTuple *(*StaticTuple_New)(Py_ssize_t);
+static StaticTuple *(*StaticTuple_Intern)(StaticTuple *);
+static StaticTuple *(*StaticTuple_FromSequence)(PyObject *);
+static PyTypeObject *_p_StaticTuple_Type;
+
+#define StaticTuple_CheckExact(op) (Py_TYPE(op) == _p_StaticTuple_Type)
+static int (*_StaticTuple_CheckExact)(PyObject *);
+
+
+/* Return -1 and set exception on error, 0 on success */
+static int
+import_static_tuple_c(void)
+{
+ struct function_description functions[] = {
+ {"StaticTuple_New", (void **)&StaticTuple_New,
+ "StaticTuple *(Py_ssize_t)"},
+ {"StaticTuple_Intern", (void **)&StaticTuple_Intern,
+ "StaticTuple *(StaticTuple *)"},
+ {"StaticTuple_FromSequence", (void **)&StaticTuple_FromSequence,
+ "StaticTuple *(PyObject *)"},
+ {"_StaticTuple_CheckExact", (void **)&_StaticTuple_CheckExact,
+ "int(PyObject *)"},
+ {NULL}};
+ struct type_description types[] = {
+ {"StaticTuple", &_p_StaticTuple_Type},
+ {NULL}};
+ return _import_extension_module("bzrlib._static_tuple_c",
+ functions, types);
+}
+
+#endif // !STATIC_TUPLE_MODULE
+#endif // !_STATIC_TUPLE_H_
diff --git a/bzrlib/_static_tuple_c.pxd b/bzrlib/_static_tuple_c.pxd
new file mode 100644
index 0000000..a0d219b
--- /dev/null
+++ b/bzrlib/_static_tuple_c.pxd
@@ -0,0 +1,46 @@
+# Copyright (C) 2009, 2010 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""The interface definition file for the StaticTuple class."""
+
+
+cdef extern from "Python.h":
+ ctypedef int Py_ssize_t # Required for older pyrex versions
+ ctypedef struct PyObject:
+ pass
+
+cdef extern from "_static_tuple_c.h":
+ ctypedef class bzrlib._static_tuple_c.StaticTuple [object StaticTuple]:
+ cdef unsigned char size
+ cdef unsigned char flags
+ cdef PyObject *items[0]
+
+ # Must be called before using any of the C api, as it sets the function
+ # pointers in memory.
+ int import_static_tuple_c() except -1
+ StaticTuple StaticTuple_New(Py_ssize_t)
+ StaticTuple StaticTuple_Intern(StaticTuple)
+ StaticTuple StaticTuple_FromSequence(object)
+
+ # Steals a reference and val must be a valid type, no checking is done
+ void StaticTuple_SET_ITEM(StaticTuple key, Py_ssize_t offset, object val)
+ # We would normally use PyObject * here. However it seems that cython/pyrex
+ # treat the PyObject defined in this header as something different than one
+ # defined in a .pyx file. And since we don't INCREF, we need a raw pointer,
+ # not an 'object' return value.
+ void *StaticTuple_GET_ITEM(StaticTuple key, Py_ssize_t offset)
+ int StaticTuple_CheckExact(object)
+ Py_ssize_t StaticTuple_GET_SIZE(StaticTuple key)
diff --git a/bzrlib/_static_tuple_py.py b/bzrlib/_static_tuple_py.py
new file mode 100644
index 0000000..a00592a
--- /dev/null
+++ b/bzrlib/_static_tuple_py.py
@@ -0,0 +1,82 @@
+# Copyright (C) 2009, 2010 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""The pure-python implementation of the StaticTuple type.
+
+Note that it is generally just implemented as using tuples of tuples of
+strings.
+"""
+
+from __future__ import absolute_import
+
+
+class StaticTuple(tuple):
+ """A static type, similar to a tuple of strings."""
+
+ __slots__ = ()
+
+ def __new__(cls, *args):
+ # Make the empty StaticTuple a singleton
+ if not args and _empty_tuple is not None:
+ return _empty_tuple
+ return tuple.__new__(cls, args)
+
+ def __init__(self, *args):
+ """Create a new 'StaticTuple'"""
+ num_keys = len(args)
+ if num_keys < 0 or num_keys > 255:
+ raise TypeError('StaticTuple(...) takes from 0 to 255 items')
+ for bit in args:
+ if type(bit) not in (str, StaticTuple, unicode, int, long, float,
+ None.__class__, bool):
+ raise TypeError('StaticTuple can only point to'
+ ' StaticTuple, str, unicode, int, long, float, bool, or'
+ ' None not %s' % (type(bit),))
+ # We don't need to pass args to tuple.__init__, because that was
+ # already handled in __new__.
+ tuple.__init__(self)
+
+ def __repr__(self):
+ return '%s%s' % (self.__class__.__name__, tuple.__repr__(self))
+
+ def __reduce__(self):
+ return (StaticTuple, tuple(self))
+
+ def __add__(self, other):
+ """Concatenate self with other"""
+ return StaticTuple.from_sequence(tuple.__add__(self,other))
+
+ def as_tuple(self):
+ return tuple(self)
+
+ def intern(self):
+ return _interned_tuples.setdefault(self, self)
+
+ @staticmethod
+ def from_sequence(seq):
+ """Convert a sequence object into a StaticTuple instance."""
+ if isinstance(seq, StaticTuple):
+ # it already is
+ return seq
+ return StaticTuple(*seq)
+
+
+
+# Have to set it to None first, so that __new__ can determine whether
+# the _empty_tuple singleton has been created yet or not.
+_empty_tuple = None
+_empty_tuple = StaticTuple()
+_interned_tuples = {}
diff --git a/bzrlib/_walkdirs_win32.pyx b/bzrlib/_walkdirs_win32.pyx
new file mode 100644
index 0000000..a32229f
--- /dev/null
+++ b/bzrlib/_walkdirs_win32.pyx
@@ -0,0 +1,300 @@
+# Copyright (C) 2008-2012 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""Helper functions for Walkdirs on win32."""
+
+
+cdef extern from "python-compat.h":
+ struct _HANDLE:
+ pass
+ ctypedef _HANDLE *HANDLE
+ ctypedef unsigned long DWORD
+ ctypedef long long __int64
+ ctypedef unsigned short WCHAR
+ struct _FILETIME:
+ DWORD dwHighDateTime
+ DWORD dwLowDateTime
+ ctypedef _FILETIME FILETIME
+
+ struct _WIN32_FIND_DATAW:
+ DWORD dwFileAttributes
+ FILETIME ftCreationTime
+ FILETIME ftLastAccessTime
+ FILETIME ftLastWriteTime
+ DWORD nFileSizeHigh
+ DWORD nFileSizeLow
+ # Some reserved stuff here
+ WCHAR cFileName[260] # MAX_PATH
+ WCHAR cAlternateFilename[14]
+
+ # We have to use the typedef trick, otherwise pyrex uses:
+ # struct WIN32_FIND_DATAW
+ # which fails due to 'incomplete type'
+ ctypedef _WIN32_FIND_DATAW WIN32_FIND_DATAW
+
+ HANDLE INVALID_HANDLE_VALUE
+ HANDLE FindFirstFileW(WCHAR *path, WIN32_FIND_DATAW *data)
+ int FindNextFileW(HANDLE search, WIN32_FIND_DATAW *data)
+ int FindClose(HANDLE search)
+
+ DWORD FILE_ATTRIBUTE_READONLY
+ DWORD FILE_ATTRIBUTE_DIRECTORY
+ int ERROR_NO_MORE_FILES
+
+ int GetLastError()
+
+ # Wide character functions
+ DWORD wcslen(WCHAR *)
+
+
+cdef extern from "Python.h":
+ WCHAR *PyUnicode_AS_UNICODE(object)
+ Py_ssize_t PyUnicode_GET_SIZE(object)
+ object PyUnicode_FromUnicode(WCHAR *, Py_ssize_t)
+ int PyList_Append(object, object) except -1
+ object PyUnicode_AsUTF8String(object)
+
+
+import operator
+import os
+import stat
+
+from bzrlib import _readdir_py
+
+cdef object osutils
+osutils = None
+
+
+cdef class _Win32Stat:
+ """Represent a 'stat' result generated from WIN32_FIND_DATA"""
+
+ cdef readonly int st_mode
+ cdef readonly double st_ctime
+ cdef readonly double st_mtime
+ cdef readonly double st_atime
+ # We can't just declare this as 'readonly' because python2.4 doesn't define
+ # T_LONGLONG as a structure member. So instead we just use a property that
+ # will convert it correctly anyway.
+ cdef __int64 _st_size
+
+ property st_size:
+ def __get__(self):
+ return self._st_size
+
+ # os.stat always returns 0, so we hard code it here
+ property st_dev:
+ def __get__(self):
+ return 0
+ property st_ino:
+ def __get__(self):
+ return 0
+ # st_uid and st_gid required for some external tools like bzr-git & dulwich
+ property st_uid:
+ def __get__(self):
+ return 0
+ property st_gid:
+ def __get__(self):
+ return 0
+
+ def __repr__(self):
+ """Repr is the same as a Stat object.
+
+ (mode, ino, dev, nlink, uid, gid, size, atime, mtime, ctime)
+ """
+ return repr((self.st_mode, 0, 0, 0, 0, 0, self.st_size, self.st_atime,
+ self.st_mtime, self.st_ctime))
+
+
+cdef object _get_name(WIN32_FIND_DATAW *data):
+ """Extract the Unicode name for this file/dir."""
+ return PyUnicode_FromUnicode(data.cFileName,
+ wcslen(data.cFileName))
+
+
+cdef int _get_mode_bits(WIN32_FIND_DATAW *data): # cannot_raise
+ cdef int mode_bits
+
+ mode_bits = 0100666 # writeable file, the most common
+ if data.dwFileAttributes & FILE_ATTRIBUTE_READONLY == FILE_ATTRIBUTE_READONLY:
+ mode_bits = mode_bits ^ 0222 # remove the write bits
+ if data.dwFileAttributes & FILE_ATTRIBUTE_DIRECTORY == FILE_ATTRIBUTE_DIRECTORY:
+ # Remove the FILE bit, set the DIR bit, and set the EXEC bits
+ mode_bits = mode_bits ^ 0140111
+ return mode_bits
+
+
+cdef __int64 _get_size(WIN32_FIND_DATAW *data): # cannot_raise
+ # Pyrex casts a DWORD into a PyLong anyway, so it is safe to do << 32
+ # on a DWORD
+ return ((<__int64>data.nFileSizeHigh) << 32) + data.nFileSizeLow
+
+
+cdef double _ftime_to_timestamp(FILETIME *ft): # cannot_raise
+ """Convert from a FILETIME struct into a floating point timestamp.
+
+ The fields of a FILETIME structure are the hi and lo part
+ of a 64-bit value expressed in 100 nanosecond units.
+ 1e7 is one second in such units; 1e-7 the inverse.
+ 429.4967296 is 2**32 / 1e7 or 2**32 * 1e-7.
+ It also uses the epoch 1601-01-01 rather than 1970-01-01
+ (taken from posixmodule.c)
+ """
+ cdef __int64 val
+ # NB: This gives slightly different results versus casting to a 64-bit
+ # integer and doing integer math before casting into a floating
+ # point number. But the difference is in the sub millisecond range,
+ # which doesn't seem critical here.
+ # secs between epochs: 11,644,473,600
+ val = ((<__int64>ft.dwHighDateTime) << 32) + ft.dwLowDateTime
+ return (val * 1.0e-7) - 11644473600.0
+
+
+cdef int _should_skip(WIN32_FIND_DATAW *data): # cannot_raise
+ """Is this '.' or '..' so we should skip it?"""
+ if (data.cFileName[0] != c'.'):
+ return 0
+ if data.cFileName[1] == c'\0':
+ return 1
+ if data.cFileName[1] == c'.' and data.cFileName[2] == c'\0':
+ return 1
+ return 0
+
+
+cdef class Win32ReadDir:
+ """Read directories on win32."""
+
+ cdef object _directory_kind
+ cdef object _file_kind
+
+ def __init__(self):
+ self._directory_kind = _readdir_py._directory
+ self._file_kind = _readdir_py._file
+
+ def top_prefix_to_starting_dir(self, top, prefix=""):
+ """See DirReader.top_prefix_to_starting_dir."""
+ global osutils
+ if osutils is None:
+ from bzrlib import osutils
+ return (osutils.safe_utf8(prefix), None, None, None,
+ osutils.safe_unicode(top))
+
+ cdef object _get_kind(self, WIN32_FIND_DATAW *data):
+ if data.dwFileAttributes & FILE_ATTRIBUTE_DIRECTORY:
+ return self._directory_kind
+ return self._file_kind
+
+ cdef _Win32Stat _get_stat_value(self, WIN32_FIND_DATAW *data):
+ """Get the filename and the stat information."""
+ cdef _Win32Stat statvalue
+
+ statvalue = _Win32Stat()
+ statvalue.st_mode = _get_mode_bits(data)
+ statvalue.st_ctime = _ftime_to_timestamp(&data.ftCreationTime)
+ statvalue.st_mtime = _ftime_to_timestamp(&data.ftLastWriteTime)
+ statvalue.st_atime = _ftime_to_timestamp(&data.ftLastAccessTime)
+ statvalue._st_size = _get_size(data)
+ return statvalue
+
+ def read_dir(self, prefix, top):
+ """Win32 implementation of DirReader.read_dir.
+
+ :seealso: DirReader.read_dir
+ """
+ cdef WIN32_FIND_DATAW search_data
+ cdef HANDLE hFindFile
+ cdef int last_err
+ cdef WCHAR *query
+ cdef int result
+
+ if prefix:
+ relprefix = prefix + '/'
+ else:
+ relprefix = ''
+ top_slash = top + '/'
+
+ top_star = top_slash + '*'
+
+ dirblock = []
+
+ query = PyUnicode_AS_UNICODE(top_star)
+ hFindFile = FindFirstFileW(query, &search_data)
+ if hFindFile == INVALID_HANDLE_VALUE:
+ # Raise an exception? This path doesn't seem to exist
+ raise WindowsError(GetLastError(), top_star)
+
+ try:
+ result = 1
+ while result:
+ # Skip '.' and '..'
+ if _should_skip(&search_data):
+ result = FindNextFileW(hFindFile, &search_data)
+ continue
+ name_unicode = _get_name(&search_data)
+ name_utf8 = PyUnicode_AsUTF8String(name_unicode)
+ PyList_Append(dirblock,
+ (relprefix + name_utf8, name_utf8,
+ self._get_kind(&search_data),
+ self._get_stat_value(&search_data),
+ top_slash + name_unicode))
+
+ result = FindNextFileW(hFindFile, &search_data)
+ # FindNextFileW sets GetLastError() == ERROR_NO_MORE_FILES when it
+ # actually finishes. If we have anything else, then we have a
+ # genuine problem
+ last_err = GetLastError()
+ if last_err != ERROR_NO_MORE_FILES:
+ raise WindowsError(last_err)
+ finally:
+ result = FindClose(hFindFile)
+ if result == 0:
+ last_err = GetLastError()
+ # TODO: We should probably raise an exception if FindClose
+ # returns an error, however, I don't want to supress an
+ # earlier Exception, so for now, I'm ignoring this
+ dirblock.sort(key=operator.itemgetter(1))
+ return dirblock
+
+
+def lstat(path):
+ """Equivalent to os.lstat, except match Win32ReadDir._get_stat_value.
+ """
+ return wrap_stat(os.lstat(path))
+
+
+def fstat(fd):
+ """Like os.fstat, except match Win32ReadDir._get_stat_value
+
+ :seealso: wrap_stat
+ """
+ return wrap_stat(os.fstat(fd))
+
+
+def wrap_stat(st):
+ """Return a _Win32Stat object, based on the given stat result.
+
+ On Windows, os.fstat(open(fname).fileno()) != os.lstat(fname). This is
+ generally because os.lstat and os.fstat differ in what they put into st_ino
+ and st_dev. What gets set where seems to also be dependent on the python
+ version. So we always set it to 0 to avoid worrying about it.
+ """
+ cdef _Win32Stat statvalue
+ statvalue = _Win32Stat()
+ statvalue.st_mode = st.st_mode
+ statvalue.st_ctime = st.st_ctime
+ statvalue.st_mtime = st.st_mtime
+ statvalue.st_atime = st.st_atime
+ statvalue._st_size = st.st_size
+ return statvalue
diff --git a/bzrlib/add.py b/bzrlib/add.py
new file mode 100644
index 0000000..118dc1e
--- /dev/null
+++ b/bzrlib/add.py
@@ -0,0 +1,139 @@
+# Copyright (C) 2005-2010 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""Helper functions for adding files to working trees."""
+
+from __future__ import absolute_import
+
+import sys
+import os
+
+from bzrlib import (
+ osutils,
+ ui,
+ )
+from bzrlib.i18n import gettext
+
+class AddAction(object):
+ """A class which defines what action to take when adding a file."""
+
+ def __init__(self, to_file=None, should_print=None):
+ """Initialize an action which prints added files to an output stream.
+
+ :param to_file: The stream to write into. This is expected to take
+ Unicode paths. If not supplied, it will default to ``sys.stdout``.
+ :param should_print: If False, printing will be suppressed.
+ """
+ self._to_file = to_file
+ if to_file is None:
+ self._to_file = sys.stdout
+ self.should_print = False
+ if should_print is not None:
+ self.should_print = should_print
+
+ def __call__(self, inv, parent_ie, path, kind, _quote=osutils.quotefn):
+ """Add path to inventory.
+
+ The default action does nothing.
+
+ :param inv: The inventory we are working with.
+ :param path: The FastPath being added
+ :param kind: The kind of the object being added.
+ """
+ if self.should_print:
+ self._to_file.write('adding %s\n' % _quote(path))
+ return None
+
+ def skip_file(self, tree, path, kind, stat_value = None):
+ """Test whether the given file should be skipped or not.
+
+ The default action never skips. Note this is only called during
+ recursive adds
+
+ :param tree: The tree we are working in
+ :param path: The path being added
+ :param kind: The kind of object being added.
+ :param stat: Stat result for this file, if available already
+ :return bool. True if the file should be skipped (not added)
+ """
+ return False
+
+
+class AddWithSkipLargeAction(AddAction):
+ """A class that can decide to skip a file if it's considered too large"""
+
+ _maxSize = None
+
+ def skip_file(self, tree, path, kind, stat_value = None):
+ if kind != 'file':
+ return False
+ opt_name = 'add.maximum_file_size'
+ if self._maxSize is None:
+ config = tree.get_config_stack()
+ self._maxSize = config.get(opt_name)
+ if stat_value is None:
+ file_size = os.path.getsize(path);
+ else:
+ file_size = stat_value.st_size;
+ if self._maxSize > 0 and file_size > self._maxSize:
+ ui.ui_factory.show_warning(gettext(
+ "skipping {0} (larger than {1} of {2} bytes)").format(
+ path, opt_name, self._maxSize))
+ return True
+ return False
+
+
+class AddFromBaseAction(AddAction):
+ """This class will try to extract file ids from another tree."""
+
+ def __init__(self, base_tree, base_path, to_file=None, should_print=None):
+ super(AddFromBaseAction, self).__init__(to_file=to_file,
+ should_print=should_print)
+ self.base_tree = base_tree
+ self.base_path = base_path
+
+ def __call__(self, inv, parent_ie, path, kind):
+ # Place the parent call
+ # Now check to see if we can extract an id for this file
+ file_id, base_path = self._get_base_file_id(path, parent_ie)
+ if file_id is not None:
+ if self.should_print:
+ self._to_file.write('adding %s w/ file id from %s\n'
+ % (path, base_path))
+ else:
+ # we aren't doing anything special, so let the default
+ # reporter happen
+ file_id = super(AddFromBaseAction, self).__call__(
+ inv, parent_ie, path, kind)
+ return file_id
+
+ def _get_base_file_id(self, path, parent_ie):
+ """Look for a file id in the base branch.
+
+ First, if the base tree has the parent directory,
+ we look for a file with the same name in that directory.
+ Else, we look for an entry in the base tree with the same path.
+ """
+ if self.base_tree.has_id(parent_ie.file_id):
+ base_path = osutils.pathjoin(
+ self.base_tree.id2path(parent_ie.file_id),
+ osutils.basename(path))
+ base_id = self.base_tree.path2id(base_path)
+ if base_id is not None:
+ return (base_id, base_path)
+ full_base_path = osutils.pathjoin(self.base_path, path)
+ # This may return None, but it is our last attempt
+ return self.base_tree.path2id(full_base_path), full_base_path
diff --git a/bzrlib/annotate.py b/bzrlib/annotate.py
new file mode 100644
index 0000000..4ca9dbc
--- /dev/null
+++ b/bzrlib/annotate.py
@@ -0,0 +1,445 @@
+# Copyright (C) 2005-2010 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""File annotate based on weave storage"""
+
+from __future__ import absolute_import
+
+# TODO: Choice of more or less verbose formats:
+#
+# interposed: show more details between blocks of modified lines
+
+# TODO: Show which revision caused a line to merge into the parent
+
+# TODO: perhaps abbreviate timescales depending on how recent they are
+# e.g. "3:12 Tue", "13 Oct", "Oct 2005", etc.
+
+import sys
+import time
+
+from bzrlib.lazy_import import lazy_import
+lazy_import(globals(), """
+from bzrlib import (
+ patiencediff,
+ tsort,
+ )
+""")
+from bzrlib import (
+ errors,
+ osutils,
+ )
+from bzrlib.config import extract_email_address
+from bzrlib.repository import _strip_NULL_ghosts
+from bzrlib.revision import (
+ CURRENT_REVISION,
+ Revision,
+ )
+
+
+def annotate_file_tree(tree, file_id, to_file, verbose=False, full=False,
+ show_ids=False, branch=None):
+ """Annotate file_id in a tree.
+
+ The tree should already be read_locked() when annotate_file_tree is called.
+
+ :param tree: The tree to look for revision numbers and history from.
+ :param file_id: The file_id to annotate.
+ :param to_file: The file to output the annotation to.
+ :param verbose: Show all details rather than truncating to ensure
+ reasonable text width.
+ :param full: XXXX Not sure what this does.
+ :param show_ids: Show revision ids in the annotation output.
+ :param branch: Branch to use for revision revno lookups
+ """
+ if branch is None:
+ branch = tree.branch
+ if to_file is None:
+ to_file = sys.stdout
+
+ # Handle the show_ids case
+ annotations = list(tree.annotate_iter(file_id))
+ if show_ids:
+ return _show_id_annotations(annotations, to_file, full)
+
+ if not getattr(tree, "get_revision_id", False):
+ # Create a virtual revision to represent the current tree state.
+ # Should get some more pending commit attributes, like pending tags,
+ # bugfixes etc.
+ current_rev = Revision(CURRENT_REVISION)
+ current_rev.parent_ids = tree.get_parent_ids()
+ try:
+ current_rev.committer = branch.get_config_stack().get('email')
+ except errors.NoWhoami:
+ current_rev.committer = 'local user'
+ current_rev.message = "?"
+ current_rev.timestamp = round(time.time(), 3)
+ current_rev.timezone = osutils.local_time_offset()
+ else:
+ current_rev = None
+ annotation = list(_expand_annotations(annotations, branch,
+ current_rev))
+ _print_annotations(annotation, verbose, to_file, full)
+
+
+def _print_annotations(annotation, verbose, to_file, full):
+ """Print annotations to to_file.
+
+ :param to_file: The file to output the annotation to.
+ :param verbose: Show all details rather than truncating to ensure
+ reasonable text width.
+ :param full: XXXX Not sure what this does.
+ """
+ if len(annotation) == 0:
+ max_origin_len = max_revno_len = max_revid_len = 0
+ else:
+ max_origin_len = max(len(x[1]) for x in annotation)
+ max_revno_len = max(len(x[0]) for x in annotation)
+ max_revid_len = max(len(x[3]) for x in annotation)
+ if not verbose:
+ max_revno_len = min(max_revno_len, 12)
+ max_revno_len = max(max_revno_len, 3)
+
+ # Output the annotations
+ prevanno = ''
+ encoding = getattr(to_file, 'encoding', None) or \
+ osutils.get_terminal_encoding()
+ for (revno_str, author, date_str, line_rev_id, text) in annotation:
+ if verbose:
+ anno = '%-*s %-*s %8s ' % (max_revno_len, revno_str,
+ max_origin_len, author, date_str)
+ else:
+ if len(revno_str) > max_revno_len:
+ revno_str = revno_str[:max_revno_len-1] + '>'
+ anno = "%-*s %-7s " % (max_revno_len, revno_str, author[:7])
+ if anno.lstrip() == "" and full:
+ anno = prevanno
+ try:
+ to_file.write(anno)
+ except UnicodeEncodeError:
+ # cmd_annotate should be passing in an 'exact' object, which means
+ # we have a direct handle to sys.stdout or equivalent. It may not
+ # be able to handle the exact Unicode characters, but 'annotate' is
+ # a user function (non-scripting), so shouldn't die because of
+ # unrepresentable annotation characters. So encode using 'replace',
+ # and write them again.
+ to_file.write(anno.encode(encoding, 'replace'))
+ to_file.write('| %s\n' % (text,))
+ prevanno = anno
+
+
+def _show_id_annotations(annotations, to_file, full):
+ if not annotations:
+ return
+ last_rev_id = None
+ max_origin_len = max(len(origin) for origin, text in annotations)
+ for origin, text in annotations:
+ if full or last_rev_id != origin:
+ this = origin
+ else:
+ this = ''
+ to_file.write('%*s | %s' % (max_origin_len, this, text))
+ last_rev_id = origin
+ return
+
+
+def _expand_annotations(annotations, branch, current_rev=None):
+ """Expand a file's annotations into command line UI ready tuples.
+
+ Each tuple includes detailed information, such as the author name, and date
+ string for the commit, rather than just the revision id.
+
+ :param annotations: The annotations to expand.
+ :param revision_id_to_revno: A map from id to revision numbers.
+ :param branch: A locked branch to query for revision details.
+ """
+ repository = branch.repository
+ if current_rev is not None:
+ # This can probably become a function on MutableTree, get_revno_map
+ # there, or something.
+ last_revision = current_rev.revision_id
+ # XXX: Partially Cloned from branch, uses the old_get_graph, eep.
+ # XXX: The main difficulty is that we need to inject a single new node
+ # (current_rev) into the graph before it gets numbered, etc.
+ # Once KnownGraph gets an 'add_node()' function, we can use
+ # VF.get_known_graph_ancestry().
+ graph = repository.get_graph()
+ revision_graph = dict(((key, value) for key, value in
+ graph.iter_ancestry(current_rev.parent_ids) if value is not None))
+ revision_graph = _strip_NULL_ghosts(revision_graph)
+ revision_graph[last_revision] = current_rev.parent_ids
+ merge_sorted_revisions = tsort.merge_sort(
+ revision_graph,
+ last_revision,
+ None,
+ generate_revno=True)
+ revision_id_to_revno = dict((rev_id, revno)
+ for seq_num, rev_id, depth, revno, end_of_merge in
+ merge_sorted_revisions)
+ else:
+ revision_id_to_revno = branch.get_revision_id_to_revno_map()
+ last_origin = None
+ revision_ids = set(o for o, t in annotations)
+ revisions = {}
+ if CURRENT_REVISION in revision_ids:
+ revision_id_to_revno[CURRENT_REVISION] = (
+ "%d?" % (branch.revno() + 1),)
+ revisions[CURRENT_REVISION] = current_rev
+ revision_ids = [o for o in revision_ids if
+ repository.has_revision(o)]
+ revisions.update((r.revision_id, r) for r in
+ repository.get_revisions(revision_ids))
+ for origin, text in annotations:
+ text = text.rstrip('\r\n')
+ if origin == last_origin:
+ (revno_str, author, date_str) = ('','','')
+ else:
+ last_origin = origin
+ if origin not in revisions:
+ (revno_str, author, date_str) = ('?','?','?')
+ else:
+ revno_str = '.'.join(str(i) for i in
+ revision_id_to_revno[origin])
+ rev = revisions[origin]
+ tz = rev.timezone or 0
+ date_str = time.strftime('%Y%m%d',
+ time.gmtime(rev.timestamp + tz))
+ # a lazy way to get something like the email address
+ # TODO: Get real email address
+ author = rev.get_apparent_authors()[0]
+ try:
+ author = extract_email_address(author)
+ except errors.NoEmailInUsername:
+ pass # use the whole name
+ yield (revno_str, author, date_str, origin, text)
+
+
+def reannotate(parents_lines, new_lines, new_revision_id,
+ _left_matching_blocks=None,
+ heads_provider=None):
+ """Create a new annotated version from new lines and parent annotations.
+
+ :param parents_lines: List of annotated lines for all parents
+ :param new_lines: The un-annotated new lines
+ :param new_revision_id: The revision-id to associate with new lines
+ (will often be CURRENT_REVISION)
+ :param left_matching_blocks: a hint about which areas are common
+ between the text and its left-hand-parent. The format is
+ the SequenceMatcher.get_matching_blocks format
+ (start_left, start_right, length_of_match).
+ :param heads_provider: An object which provides a .heads() call to resolve
+ if any revision ids are children of others.
+ If None, then any ancestry disputes will be resolved with
+ new_revision_id
+ """
+ if len(parents_lines) == 0:
+ lines = [(new_revision_id, line) for line in new_lines]
+ elif len(parents_lines) == 1:
+ lines = _reannotate(parents_lines[0], new_lines, new_revision_id,
+ _left_matching_blocks)
+ elif len(parents_lines) == 2:
+ left = _reannotate(parents_lines[0], new_lines, new_revision_id,
+ _left_matching_blocks)
+ lines = _reannotate_annotated(parents_lines[1], new_lines,
+ new_revision_id, left,
+ heads_provider)
+ else:
+ reannotations = [_reannotate(parents_lines[0], new_lines,
+ new_revision_id, _left_matching_blocks)]
+ reannotations.extend(_reannotate(p, new_lines, new_revision_id)
+ for p in parents_lines[1:])
+ lines = []
+ for annos in zip(*reannotations):
+ origins = set(a for a, l in annos)
+ if len(origins) == 1:
+ # All the parents agree, so just return the first one
+ lines.append(annos[0])
+ else:
+ line = annos[0][1]
+ if len(origins) == 2 and new_revision_id in origins:
+ origins.remove(new_revision_id)
+ if len(origins) == 1:
+ lines.append((origins.pop(), line))
+ else:
+ lines.append((new_revision_id, line))
+ return lines
+
+
+def _reannotate(parent_lines, new_lines, new_revision_id,
+ matching_blocks=None):
+ new_cur = 0
+ if matching_blocks is None:
+ plain_parent_lines = [l for r, l in parent_lines]
+ matcher = patiencediff.PatienceSequenceMatcher(None,
+ plain_parent_lines, new_lines)
+ matching_blocks = matcher.get_matching_blocks()
+ lines = []
+ for i, j, n in matching_blocks:
+ for line in new_lines[new_cur:j]:
+ lines.append((new_revision_id, line))
+ lines.extend(parent_lines[i:i+n])
+ new_cur = j + n
+ return lines
+
+
+def _get_matching_blocks(old, new):
+ matcher = patiencediff.PatienceSequenceMatcher(None, old, new)
+ return matcher.get_matching_blocks()
+
+
+_break_annotation_tie = None
+
+def _old_break_annotation_tie(annotated_lines):
+ """Chose an attribution between several possible ones.
+
+ :param annotated_lines: A list of tuples ((file_id, rev_id), line) where
+ the lines are identical but the revids different while no parent
+ relation exist between them
+
+ :return : The "winning" line. This must be one with a revid that
+ guarantees that further criss-cross merges will converge. Failing to
+ do so have performance implications.
+ """
+ # sort lexicographically so that we always get a stable result.
+
+ # TODO: while 'sort' is the easiest (and nearly the only possible solution)
+ # with the current implementation, chosing the oldest revision is known to
+ # provide better results (as in matching user expectations). The most
+ # common use case being manual cherry-pick from an already existing
+ # revision.
+ return sorted(annotated_lines)[0]
+
+
+def _find_matching_unannotated_lines(output_lines, plain_child_lines,
+ child_lines, start_child, end_child,
+ right_lines, start_right, end_right,
+ heads_provider, revision_id):
+ """Find lines in plain_right_lines that match the existing lines.
+
+ :param output_lines: Append final annotated lines to this list
+ :param plain_child_lines: The unannotated new lines for the child text
+ :param child_lines: Lines for the child text which have been annotated
+ for the left parent
+
+ :param start_child: Position in plain_child_lines and child_lines to start
+ the match searching
+ :param end_child: Last position in plain_child_lines and child_lines to
+ search for a match
+ :param right_lines: The annotated lines for the whole text for the right
+ parent
+ :param start_right: Position in right_lines to start the match
+ :param end_right: Last position in right_lines to search for a match
+ :param heads_provider: When parents disagree on the lineage of a line, we
+ need to check if one side supersedes the other
+ :param revision_id: The label to give if a line should be labeled 'tip'
+ """
+ output_extend = output_lines.extend
+ output_append = output_lines.append
+ # We need to see if any of the unannotated lines match
+ plain_right_subset = [l for a,l in right_lines[start_right:end_right]]
+ plain_child_subset = plain_child_lines[start_child:end_child]
+ match_blocks = _get_matching_blocks(plain_right_subset, plain_child_subset)
+
+ last_child_idx = 0
+
+ for right_idx, child_idx, match_len in match_blocks:
+ # All the lines that don't match are just passed along
+ if child_idx > last_child_idx:
+ output_extend(child_lines[start_child + last_child_idx
+ :start_child + child_idx])
+ for offset in xrange(match_len):
+ left = child_lines[start_child+child_idx+offset]
+ right = right_lines[start_right+right_idx+offset]
+ if left[0] == right[0]:
+ # The annotations match, just return the left one
+ output_append(left)
+ elif left[0] == revision_id:
+ # The left parent marked this as unmatched, so let the
+ # right parent claim it
+ output_append(right)
+ else:
+ # Left and Right both claim this line
+ if heads_provider is None:
+ output_append((revision_id, left[1]))
+ else:
+ heads = heads_provider.heads((left[0], right[0]))
+ if len(heads) == 1:
+ output_append((iter(heads).next(), left[1]))
+ else:
+ # Both claim different origins, get a stable result.
+ # If the result is not stable, there is a risk a
+ # performance degradation as criss-cross merges will
+ # flip-flop the attribution.
+ if _break_annotation_tie is None:
+ output_append(
+ _old_break_annotation_tie([left, right]))
+ else:
+ output_append(_break_annotation_tie([left, right]))
+ last_child_idx = child_idx + match_len
+
+
+def _reannotate_annotated(right_parent_lines, new_lines, new_revision_id,
+ annotated_lines, heads_provider):
+ """Update the annotations for a node based on another parent.
+
+ :param right_parent_lines: A list of annotated lines for the right-hand
+ parent.
+ :param new_lines: The unannotated new lines.
+ :param new_revision_id: The revision_id to attribute to lines which are not
+ present in either parent.
+ :param annotated_lines: A list of annotated lines. This should be the
+ annotation of new_lines based on parents seen so far.
+ :param heads_provider: When parents disagree on the lineage of a line, we
+ need to check if one side supersedes the other.
+ """
+ if len(new_lines) != len(annotated_lines):
+ raise AssertionError("mismatched new_lines and annotated_lines")
+ # First compare the newly annotated lines with the right annotated lines.
+ # Lines which were not changed in left or right should match. This tends to
+ # be the bulk of the lines, and they will need no further processing.
+ lines = []
+ lines_extend = lines.extend
+ last_right_idx = 0 # The line just after the last match from the right side
+ last_left_idx = 0
+ matching_left_and_right = _get_matching_blocks(right_parent_lines,
+ annotated_lines)
+ for right_idx, left_idx, match_len in matching_left_and_right:
+ # annotated lines from last_left_idx to left_idx did not match the
+ # lines from last_right_idx to right_idx, the raw lines should be
+ # compared to determine what annotations need to be updated
+ if last_right_idx == right_idx or last_left_idx == left_idx:
+ # One of the sides is empty, so this is a pure insertion
+ lines_extend(annotated_lines[last_left_idx:left_idx])
+ else:
+ # We need to see if any of the unannotated lines match
+ _find_matching_unannotated_lines(lines,
+ new_lines, annotated_lines,
+ last_left_idx, left_idx,
+ right_parent_lines,
+ last_right_idx, right_idx,
+ heads_provider,
+ new_revision_id)
+ last_right_idx = right_idx + match_len
+ last_left_idx = left_idx + match_len
+ # If left and right agree on a range, just push that into the output
+ lines_extend(annotated_lines[left_idx:left_idx + match_len])
+ return lines
+
+
+try:
+ from bzrlib._annotator_pyx import Annotator
+except ImportError, e:
+ osutils.failed_to_load_extension(e)
+ from bzrlib._annotator_py import Annotator
diff --git a/bzrlib/api.py b/bzrlib/api.py
new file mode 100644
index 0000000..d7c075c
--- /dev/null
+++ b/bzrlib/api.py
@@ -0,0 +1,103 @@
+# Copyright (C) 2007, 2008, 2009, 2011 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""Library API versioning support.
+
+Added in bzrlib 0.18 this allows export of compatibility information about
+bzrlib. Please see doc/developers/api-versioning.txt for design details and
+examples.
+"""
+
+from __future__ import absolute_import
+
+import bzrlib
+from bzrlib.errors import IncompatibleAPI
+
+
+def get_current_api_version(object_with_api):
+ """Return the API version tuple for object_with_api.
+
+ :param object_with_api: An object to look for an API version on. If the
+ object has a api_current_version attribute, that is used. Otherwise if
+ there is a version_info attribute, its first three elements are used.
+ Finally if there was no version_info attribute, the current api version
+ of bzrlib itself is used.
+
+ Added in bzrlib 0.18.
+ """
+ try:
+ return object_with_api.api_current_version
+ except AttributeError:
+ try:
+ return object_with_api.version_info[0:3]
+ except AttributeError:
+ return get_current_api_version(bzrlib)
+
+
+def get_minimum_api_version(object_with_api):
+ """Return the minimum API version supported by object_with_api.
+
+ :param object_with_api: An object to look for an API version on. If the
+ object has a api_minimum_version attribute, that is used. Otherwise the
+ minimum api version of bzrlib itself is used.
+
+ Added in bzrlib 0.18.
+ """
+ try:
+ return object_with_api.api_minimum_version
+ except AttributeError:
+ return get_minimum_api_version(bzrlib)
+
+
+def require_api(object_with_api, wanted_api):
+ """Check if object_with_api supports the api version wanted_api.
+
+ :param object_with_api: An object which exports an API minimum and current
+ version. See get_minimum_api_version and get_current_api_version for
+ details.
+ :param wanted_api: The API version for which support is required.
+ :return: None
+ :raises IncompatibleAPI: When the wanted_api is not supported by
+ object_with_api.
+
+ Added in bzrlib 0.18.
+ """
+ current = get_current_api_version(object_with_api)
+ minimum = get_minimum_api_version(object_with_api)
+ if wanted_api < minimum or wanted_api > current:
+ raise IncompatibleAPI(object_with_api, wanted_api, minimum, current)
+
+
+def require_any_api(object_with_api, wanted_api_list):
+ """Check if object_with_api supports the api version wanted_api.
+
+ :param object_with_api: An object which exports an API minimum and current
+ version. See get_minimum_api_version and get_current_api_version for
+ details.
+ :param wanted_api: A list of API versions, any of which being available is
+ sufficent.
+ :return: None
+ :raises IncompatibleAPI: When the wanted_api is not supported by
+ object_with_api.
+
+ Added in bzrlib 1.9.
+ """
+ for api in wanted_api_list[:-1]:
+ try:
+ return require_api(object_with_api, api)
+ except IncompatibleAPI:
+ pass
+ require_api(object_with_api, wanted_api_list[-1])
diff --git a/bzrlib/atomicfile.py b/bzrlib/atomicfile.py
new file mode 100644
index 0000000..92e1505
--- /dev/null
+++ b/bzrlib/atomicfile.py
@@ -0,0 +1,114 @@
+# Copyright (C) 2005, 2006, 2008, 2009, 2010 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+from __future__ import absolute_import
+
+import os
+
+from bzrlib.lazy_import import lazy_import
+lazy_import(globals(), """
+import stat
+import warnings
+
+from bzrlib import (
+ errors,
+ osutils,
+ symbol_versioning,
+ )
+""")
+
+# not forksafe - but we dont fork.
+_pid = os.getpid()
+_hostname = None
+
+
+class AtomicFile(object):
+ """A file that does an atomic-rename to move into place.
+
+ This also causes hardlinks to break when it's written out.
+
+ Open this as for a regular file, then use commit() to move into
+ place or abort() to cancel.
+ """
+
+ __slots__ = ['tmpfilename', 'realfilename', '_fd']
+
+ def __init__(self, filename, mode='wb', new_mode=None):
+ global _hostname
+
+ self._fd = None
+
+ if _hostname is None:
+ _hostname = osutils.get_host_name()
+
+ self.tmpfilename = '%s.%d.%s.%s.tmp' % (filename, _pid, _hostname,
+ osutils.rand_chars(10))
+
+ self.realfilename = filename
+
+ flags = os.O_EXCL | os.O_CREAT | os.O_WRONLY | osutils.O_NOINHERIT
+ if mode == 'wb':
+ flags |= osutils.O_BINARY
+ elif mode != 'wt':
+ raise ValueError("invalid AtomicFile mode %r" % mode)
+
+ if new_mode is not None:
+ local_mode = new_mode
+ else:
+ local_mode = 0666
+
+ # Use a low level fd operation to avoid chmodding later.
+ # This may not succeed, but it should help most of the time
+ self._fd = os.open(self.tmpfilename, flags, local_mode)
+
+ if new_mode is not None:
+ # Because of umask issues, we may need to chmod anyway
+ # the common case is that we won't, though.
+ st = os.fstat(self._fd)
+ if stat.S_IMODE(st.st_mode) != new_mode:
+ osutils.chmod_if_possible(self.tmpfilename, new_mode)
+
+ def __repr__(self):
+ return '%s(%r)' % (self.__class__.__name__,
+ self.realfilename)
+
+ def write(self, data):
+ """Write some data to the file. Like file.write()"""
+ os.write(self._fd, data)
+
+ def _close_tmpfile(self, func_name):
+ """Close the local temp file in preparation for commit or abort"""
+ if self._fd is None:
+ raise errors.AtomicFileAlreadyClosed(path=self.realfilename,
+ function=func_name)
+ fd = self._fd
+ self._fd = None
+ os.close(fd)
+
+ def commit(self):
+ """Close the file and move to final name."""
+ self._close_tmpfile('commit')
+ osutils.rename(self.tmpfilename, self.realfilename)
+
+ def abort(self):
+ """Discard temporary file without committing changes."""
+ self._close_tmpfile('abort')
+ os.remove(self.tmpfilename)
+
+ def close(self):
+ """Discard the file unless already committed."""
+ if self._fd is not None:
+ self.abort()
diff --git a/bzrlib/bencode.py b/bzrlib/bencode.py
new file mode 100644
index 0000000..d280b31
--- /dev/null
+++ b/bzrlib/bencode.py
@@ -0,0 +1,27 @@
+# Copyright (C) 2007,2009 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""Wrapper around the bencode pyrex and python implementation"""
+
+from __future__ import absolute_import
+
+from bzrlib import osutils
+
+try:
+ from bzrlib._bencode_pyx import bdecode, bdecode_as_tuple, bencode, Bencached
+except ImportError, e:
+ osutils.failed_to_load_extension(e)
+ from bzrlib.util._bencode_py import bdecode, bdecode_as_tuple, bencode, Bencached
diff --git a/bzrlib/bisect_multi.py b/bzrlib/bisect_multi.py
new file mode 100644
index 0000000..c74f91a
--- /dev/null
+++ b/bzrlib/bisect_multi.py
@@ -0,0 +1,66 @@
+# Copyright (C) 2007 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""Bisection lookup multiple keys."""
+
+from __future__ import absolute_import
+
+__all__ = [
+ 'bisect_multi_bytes',
+ ]
+
+
+def bisect_multi_bytes(content_lookup, size, keys):
+ """Perform bisection lookups for keys using byte based addressing.
+
+ The keys are looked up via the content_lookup routine. The content_lookup
+ routine gives bisect_multi_bytes information about where to keep looking up
+ to find the data for the key, and bisect_multi_bytes feeds this back into
+ the lookup function until the search is complete. The search is complete
+ when the list of keys which have returned something other than -1 or +1 is
+ empty. Keys which are not found are not returned to the caller.
+
+ :param content_lookup: A callable that takes a list of (offset, key) pairs
+ and returns a list of result tuples ((offset, key), result). Each
+ result can be one of:
+ -1: The key comes earlier in the content.
+ False: The key is not present in the content.
+ +1: The key comes later in the content.
+ Any other value: A final result to return to the caller.
+ :param size: The length of the content.
+ :param keys: The keys to bisect for.
+ :return: An iterator of the results.
+ """
+ # possibly make this a generator, but a list meets the contract for now.
+ result = []
+ delta = size // 2
+ search_keys = [(delta, key) for key in keys]
+ while search_keys:
+ search_results = content_lookup(search_keys)
+ if delta > 1:
+ delta = delta // 2
+ search_keys = []
+ for (location, key), status in search_results:
+ if status == -1:
+ search_keys.append((location - delta, key))
+ elif status == 1:
+ search_keys.append((location + delta, key))
+ elif status == False:
+ # not present, stop searching
+ continue
+ else:
+ result.append((key, status))
+ return result
diff --git a/bzrlib/branch.py b/bzrlib/branch.py
new file mode 100644
index 0000000..a5cc67e
--- /dev/null
+++ b/bzrlib/branch.py
@@ -0,0 +1,3407 @@
+# Copyright (C) 2005-2012 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+from __future__ import absolute_import
+
+import bzrlib.bzrdir
+
+from cStringIO import StringIO
+
+from bzrlib.lazy_import import lazy_import
+lazy_import(globals(), """
+import itertools
+from bzrlib import (
+ bzrdir,
+ controldir,
+ cache_utf8,
+ cleanup,
+ config as _mod_config,
+ debug,
+ errors,
+ fetch,
+ graph as _mod_graph,
+ lockdir,
+ lockable_files,
+ remote,
+ repository,
+ revision as _mod_revision,
+ rio,
+ tag as _mod_tag,
+ transport,
+ ui,
+ urlutils,
+ vf_search,
+ )
+from bzrlib.i18n import gettext, ngettext
+""")
+
+# Explicitly import bzrlib.bzrdir so that the BzrProber
+# is guaranteed to be registered.
+import bzrlib.bzrdir
+
+from bzrlib import (
+ bzrdir,
+ controldir,
+ )
+from bzrlib.decorators import (
+ needs_read_lock,
+ needs_write_lock,
+ only_raises,
+ )
+from bzrlib.hooks import Hooks
+from bzrlib.inter import InterObject
+from bzrlib.lock import _RelockDebugMixin, LogicalLockResult
+from bzrlib import registry
+from bzrlib.symbol_versioning import (
+ deprecated_in,
+ deprecated_method,
+ )
+from bzrlib.trace import mutter, mutter_callsite, note, is_quiet
+
+
+class Branch(controldir.ControlComponent):
+ """Branch holding a history of revisions.
+
+ :ivar base:
+ Base directory/url of the branch; using control_url and
+ control_transport is more standardized.
+ :ivar hooks: An instance of BranchHooks.
+ :ivar _master_branch_cache: cached result of get_master_branch, see
+ _clear_cached_state.
+ """
+ # this is really an instance variable - FIXME move it there
+ # - RBC 20060112
+ base = None
+
+ @property
+ def control_transport(self):
+ return self._transport
+
+ @property
+ def user_transport(self):
+ return self.bzrdir.user_transport
+
+ def __init__(self, possible_transports=None):
+ self.tags = self._format.make_tags(self)
+ self._revision_history_cache = None
+ self._revision_id_to_revno_cache = None
+ self._partial_revision_id_to_revno_cache = {}
+ self._partial_revision_history_cache = []
+ self._tags_bytes = None
+ self._last_revision_info_cache = None
+ self._master_branch_cache = None
+ self._merge_sorted_revisions_cache = None
+ self._open_hook(possible_transports)
+ hooks = Branch.hooks['open']
+ for hook in hooks:
+ hook(self)
+
+ def _open_hook(self, possible_transports):
+ """Called by init to allow simpler extension of the base class."""
+
+ def _activate_fallback_location(self, url, possible_transports):
+ """Activate the branch/repository from url as a fallback repository."""
+ for existing_fallback_repo in self.repository._fallback_repositories:
+ if existing_fallback_repo.user_url == url:
+ # This fallback is already configured. This probably only
+ # happens because ControlDir.sprout is a horrible mess. To avoid
+ # confusing _unstack we don't add this a second time.
+ mutter('duplicate activation of fallback %r on %r', url, self)
+ return
+ repo = self._get_fallback_repository(url, possible_transports)
+ if repo.has_same_location(self.repository):
+ raise errors.UnstackableLocationError(self.user_url, url)
+ self.repository.add_fallback_repository(repo)
+
+ def break_lock(self):
+ """Break a lock if one is present from another instance.
+
+ Uses the ui factory to ask for confirmation if the lock may be from
+ an active process.
+
+ This will probe the repository for its lock as well.
+ """
+ self.control_files.break_lock()
+ self.repository.break_lock()
+ master = self.get_master_branch()
+ if master is not None:
+ master.break_lock()
+
+ def _check_stackable_repo(self):
+ if not self.repository._format.supports_external_lookups:
+ raise errors.UnstackableRepositoryFormat(self.repository._format,
+ self.repository.base)
+
+ def _extend_partial_history(self, stop_index=None, stop_revision=None):
+ """Extend the partial history to include a given index
+
+ If a stop_index is supplied, stop when that index has been reached.
+ If a stop_revision is supplied, stop when that revision is
+ encountered. Otherwise, stop when the beginning of history is
+ reached.
+
+ :param stop_index: The index which should be present. When it is
+ present, history extension will stop.
+ :param stop_revision: The revision id which should be present. When
+ it is encountered, history extension will stop.
+ """
+ if len(self._partial_revision_history_cache) == 0:
+ self._partial_revision_history_cache = [self.last_revision()]
+ repository._iter_for_revno(
+ self.repository, self._partial_revision_history_cache,
+ stop_index=stop_index, stop_revision=stop_revision)
+ if self._partial_revision_history_cache[-1] == _mod_revision.NULL_REVISION:
+ self._partial_revision_history_cache.pop()
+
+ def _get_check_refs(self):
+ """Get the references needed for check().
+
+ See bzrlib.check.
+ """
+ revid = self.last_revision()
+ return [('revision-existence', revid), ('lefthand-distance', revid)]
+
+ @staticmethod
+ def open(base, _unsupported=False, possible_transports=None):
+ """Open the branch rooted at base.
+
+ For instance, if the branch is at URL/.bzr/branch,
+ Branch.open(URL) -> a Branch instance.
+ """
+ control = controldir.ControlDir.open(base,
+ possible_transports=possible_transports, _unsupported=_unsupported)
+ return control.open_branch(unsupported=_unsupported,
+ possible_transports=possible_transports)
+
+ @staticmethod
+ def open_from_transport(transport, name=None, _unsupported=False,
+ possible_transports=None):
+ """Open the branch rooted at transport"""
+ control = controldir.ControlDir.open_from_transport(transport, _unsupported)
+ return control.open_branch(name=name, unsupported=_unsupported,
+ possible_transports=possible_transports)
+
+ @staticmethod
+ def open_containing(url, possible_transports=None):
+ """Open an existing branch which contains url.
+
+ This probes for a branch at url, and searches upwards from there.
+
+ Basically we keep looking up until we find the control directory or
+ run into the root. If there isn't one, raises NotBranchError.
+ If there is one and it is either an unrecognised format or an unsupported
+ format, UnknownFormatError or UnsupportedFormatError are raised.
+ If there is one, it is returned, along with the unused portion of url.
+ """
+ control, relpath = controldir.ControlDir.open_containing(url,
+ possible_transports)
+ branch = control.open_branch(possible_transports=possible_transports)
+ return (branch, relpath)
+
+ def _push_should_merge_tags(self):
+ """Should _basic_push merge this branch's tags into the target?
+
+ The default implementation returns False if this branch has no tags,
+ and True the rest of the time. Subclasses may override this.
+ """
+ return self.supports_tags() and self.tags.get_tag_dict()
+
+ def get_config(self):
+ """Get a bzrlib.config.BranchConfig for this Branch.
+
+ This can then be used to get and set configuration options for the
+ branch.
+
+ :return: A bzrlib.config.BranchConfig.
+ """
+ return _mod_config.BranchConfig(self)
+
+ def get_config_stack(self):
+ """Get a bzrlib.config.BranchStack for this Branch.
+
+ This can then be used to get and set configuration options for the
+ branch.
+
+ :return: A bzrlib.config.BranchStack.
+ """
+ return _mod_config.BranchStack(self)
+
+ def _get_config(self):
+ """Get the concrete config for just the config in this branch.
+
+ This is not intended for client use; see Branch.get_config for the
+ public API.
+
+ Added in 1.14.
+
+ :return: An object supporting get_option and set_option.
+ """
+ raise NotImplementedError(self._get_config)
+
+ def _get_fallback_repository(self, url, possible_transports):
+ """Get the repository we fallback to at url."""
+ url = urlutils.join(self.base, url)
+ a_branch = Branch.open(url, possible_transports=possible_transports)
+ return a_branch.repository
+
+ @needs_read_lock
+ def _get_tags_bytes(self):
+ """Get the bytes of a serialised tags dict.
+
+ Note that not all branches support tags, nor do all use the same tags
+ logic: this method is specific to BasicTags. Other tag implementations
+ may use the same method name and behave differently, safely, because
+ of the double-dispatch via
+ format.make_tags->tags_instance->get_tags_dict.
+
+ :return: The bytes of the tags file.
+ :seealso: Branch._set_tags_bytes.
+ """
+ if self._tags_bytes is None:
+ self._tags_bytes = self._transport.get_bytes('tags')
+ return self._tags_bytes
+
+ def _get_nick(self, local=False, possible_transports=None):
+ config = self.get_config()
+ # explicit overrides master, but don't look for master if local is True
+ if not local and not config.has_explicit_nickname():
+ try:
+ master = self.get_master_branch(possible_transports)
+ if master and self.user_url == master.user_url:
+ raise errors.RecursiveBind(self.user_url)
+ if master is not None:
+ # return the master branch value
+ return master.nick
+ except errors.RecursiveBind, e:
+ raise e
+ except errors.BzrError, e:
+ # Silently fall back to local implicit nick if the master is
+ # unavailable
+ mutter("Could not connect to bound branch, "
+ "falling back to local nick.\n " + str(e))
+ return config.get_nickname()
+
+ def _set_nick(self, nick):
+ self.get_config().set_user_option('nickname', nick, warn_masked=True)
+
+ nick = property(_get_nick, _set_nick)
+
+ def is_locked(self):
+ raise NotImplementedError(self.is_locked)
+
+ def _lefthand_history(self, revision_id, last_rev=None,
+ other_branch=None):
+ if 'evil' in debug.debug_flags:
+ mutter_callsite(4, "_lefthand_history scales with history.")
+ # stop_revision must be a descendant of last_revision
+ graph = self.repository.get_graph()
+ if last_rev is not None:
+ if not graph.is_ancestor(last_rev, revision_id):
+ # our previous tip is not merged into stop_revision
+ raise errors.DivergedBranches(self, other_branch)
+ # make a new revision history from the graph
+ parents_map = graph.get_parent_map([revision_id])
+ if revision_id not in parents_map:
+ raise errors.NoSuchRevision(self, revision_id)
+ current_rev_id = revision_id
+ new_history = []
+ check_not_reserved_id = _mod_revision.check_not_reserved_id
+ # Do not include ghosts or graph origin in revision_history
+ while (current_rev_id in parents_map and
+ len(parents_map[current_rev_id]) > 0):
+ check_not_reserved_id(current_rev_id)
+ new_history.append(current_rev_id)
+ current_rev_id = parents_map[current_rev_id][0]
+ parents_map = graph.get_parent_map([current_rev_id])
+ new_history.reverse()
+ return new_history
+
+ def lock_write(self, token=None):
+ """Lock the branch for write operations.
+
+ :param token: A token to permit reacquiring a previously held and
+ preserved lock.
+ :return: A BranchWriteLockResult.
+ """
+ raise NotImplementedError(self.lock_write)
+
+ def lock_read(self):
+ """Lock the branch for read operations.
+
+ :return: A bzrlib.lock.LogicalLockResult.
+ """
+ raise NotImplementedError(self.lock_read)
+
+ def unlock(self):
+ raise NotImplementedError(self.unlock)
+
+ def peek_lock_mode(self):
+ """Return lock mode for the Branch: 'r', 'w' or None"""
+ raise NotImplementedError(self.peek_lock_mode)
+
+ def get_physical_lock_status(self):
+ raise NotImplementedError(self.get_physical_lock_status)
+
+ @needs_read_lock
+ def dotted_revno_to_revision_id(self, revno, _cache_reverse=False):
+ """Return the revision_id for a dotted revno.
+
+ :param revno: a tuple like (1,) or (1,1,2)
+ :param _cache_reverse: a private parameter enabling storage
+ of the reverse mapping in a top level cache. (This should
+ only be done in selective circumstances as we want to
+ avoid having the mapping cached multiple times.)
+ :return: the revision_id
+ :raises errors.NoSuchRevision: if the revno doesn't exist
+ """
+ rev_id = self._do_dotted_revno_to_revision_id(revno)
+ if _cache_reverse:
+ self._partial_revision_id_to_revno_cache[rev_id] = revno
+ return rev_id
+
+ def _do_dotted_revno_to_revision_id(self, revno):
+ """Worker function for dotted_revno_to_revision_id.
+
+ Subclasses should override this if they wish to
+ provide a more efficient implementation.
+ """
+ if len(revno) == 1:
+ return self.get_rev_id(revno[0])
+ revision_id_to_revno = self.get_revision_id_to_revno_map()
+ revision_ids = [revision_id for revision_id, this_revno
+ in revision_id_to_revno.iteritems()
+ if revno == this_revno]
+ if len(revision_ids) == 1:
+ return revision_ids[0]
+ else:
+ revno_str = '.'.join(map(str, revno))
+ raise errors.NoSuchRevision(self, revno_str)
+
+ @needs_read_lock
+ def revision_id_to_dotted_revno(self, revision_id):
+ """Given a revision id, return its dotted revno.
+
+ :return: a tuple like (1,) or (400,1,3).
+ """
+ return self._do_revision_id_to_dotted_revno(revision_id)
+
+ def _do_revision_id_to_dotted_revno(self, revision_id):
+ """Worker function for revision_id_to_revno."""
+ # Try the caches if they are loaded
+ result = self._partial_revision_id_to_revno_cache.get(revision_id)
+ if result is not None:
+ return result
+ if self._revision_id_to_revno_cache:
+ result = self._revision_id_to_revno_cache.get(revision_id)
+ if result is None:
+ raise errors.NoSuchRevision(self, revision_id)
+ # Try the mainline as it's optimised
+ try:
+ revno = self.revision_id_to_revno(revision_id)
+ return (revno,)
+ except errors.NoSuchRevision:
+ # We need to load and use the full revno map after all
+ result = self.get_revision_id_to_revno_map().get(revision_id)
+ if result is None:
+ raise errors.NoSuchRevision(self, revision_id)
+ return result
+
+ @needs_read_lock
+ def get_revision_id_to_revno_map(self):
+ """Return the revision_id => dotted revno map.
+
+ This will be regenerated on demand, but will be cached.
+
+ :return: A dictionary mapping revision_id => dotted revno.
+ This dictionary should not be modified by the caller.
+ """
+ if self._revision_id_to_revno_cache is not None:
+ mapping = self._revision_id_to_revno_cache
+ else:
+ mapping = self._gen_revno_map()
+ self._cache_revision_id_to_revno(mapping)
+ # TODO: jam 20070417 Since this is being cached, should we be returning
+ # a copy?
+ # I would rather not, and instead just declare that users should not
+ # modify the return value.
+ return mapping
+
+ def _gen_revno_map(self):
+ """Create a new mapping from revision ids to dotted revnos.
+
+ Dotted revnos are generated based on the current tip in the revision
+ history.
+ This is the worker function for get_revision_id_to_revno_map, which
+ just caches the return value.
+
+ :return: A dictionary mapping revision_id => dotted revno.
+ """
+ revision_id_to_revno = dict((rev_id, revno)
+ for rev_id, depth, revno, end_of_merge
+ in self.iter_merge_sorted_revisions())
+ return revision_id_to_revno
+
+ @needs_read_lock
+ def iter_merge_sorted_revisions(self, start_revision_id=None,
+ stop_revision_id=None, stop_rule='exclude', direction='reverse'):
+ """Walk the revisions for a branch in merge sorted order.
+
+ Merge sorted order is the output from a merge-aware,
+ topological sort, i.e. all parents come before their
+ children going forward; the opposite for reverse.
+
+ :param start_revision_id: the revision_id to begin walking from.
+ If None, the branch tip is used.
+ :param stop_revision_id: the revision_id to terminate the walk
+ after. If None, the rest of history is included.
+ :param stop_rule: if stop_revision_id is not None, the precise rule
+ to use for termination:
+
+ * 'exclude' - leave the stop revision out of the result (default)
+ * 'include' - the stop revision is the last item in the result
+ * 'with-merges' - include the stop revision and all of its
+ merged revisions in the result
+ * 'with-merges-without-common-ancestry' - filter out revisions
+ that are in both ancestries
+ :param direction: either 'reverse' or 'forward':
+
+ * reverse means return the start_revision_id first, i.e.
+ start at the most recent revision and go backwards in history
+ * forward returns tuples in the opposite order to reverse.
+ Note in particular that forward does *not* do any intelligent
+ ordering w.r.t. depth as some clients of this API may like.
+ (If required, that ought to be done at higher layers.)
+
+ :return: an iterator over (revision_id, depth, revno, end_of_merge)
+ tuples where:
+
+ * revision_id: the unique id of the revision
+ * depth: How many levels of merging deep this node has been
+ found.
+ * revno_sequence: This field provides a sequence of
+ revision numbers for all revisions. The format is:
+ (REVNO, BRANCHNUM, BRANCHREVNO). BRANCHNUM is the number of the
+ branch that the revno is on. From left to right the REVNO numbers
+ are the sequence numbers within that branch of the revision.
+ * end_of_merge: When True the next node (earlier in history) is
+ part of a different merge.
+ """
+ # Note: depth and revno values are in the context of the branch so
+ # we need the full graph to get stable numbers, regardless of the
+ # start_revision_id.
+ if self._merge_sorted_revisions_cache is None:
+ last_revision = self.last_revision()
+ known_graph = self.repository.get_known_graph_ancestry(
+ [last_revision])
+ self._merge_sorted_revisions_cache = known_graph.merge_sort(
+ last_revision)
+ filtered = self._filter_merge_sorted_revisions(
+ self._merge_sorted_revisions_cache, start_revision_id,
+ stop_revision_id, stop_rule)
+ # Make sure we don't return revisions that are not part of the
+ # start_revision_id ancestry.
+ filtered = self._filter_start_non_ancestors(filtered)
+ if direction == 'reverse':
+ return filtered
+ if direction == 'forward':
+ return reversed(list(filtered))
+ else:
+ raise ValueError('invalid direction %r' % direction)
+
+ def _filter_merge_sorted_revisions(self, merge_sorted_revisions,
+ start_revision_id, stop_revision_id, stop_rule):
+ """Iterate over an inclusive range of sorted revisions."""
+ rev_iter = iter(merge_sorted_revisions)
+ if start_revision_id is not None:
+ for node in rev_iter:
+ rev_id = node.key
+ if rev_id != start_revision_id:
+ continue
+ else:
+ # The decision to include the start or not
+ # depends on the stop_rule if a stop is provided
+ # so pop this node back into the iterator
+ rev_iter = itertools.chain(iter([node]), rev_iter)
+ break
+ if stop_revision_id is None:
+ # Yield everything
+ for node in rev_iter:
+ rev_id = node.key
+ yield (rev_id, node.merge_depth, node.revno,
+ node.end_of_merge)
+ elif stop_rule == 'exclude':
+ for node in rev_iter:
+ rev_id = node.key
+ if rev_id == stop_revision_id:
+ return
+ yield (rev_id, node.merge_depth, node.revno,
+ node.end_of_merge)
+ elif stop_rule == 'include':
+ for node in rev_iter:
+ rev_id = node.key
+ yield (rev_id, node.merge_depth, node.revno,
+ node.end_of_merge)
+ if rev_id == stop_revision_id:
+ return
+ elif stop_rule == 'with-merges-without-common-ancestry':
+ # We want to exclude all revisions that are already part of the
+ # stop_revision_id ancestry.
+ graph = self.repository.get_graph()
+ ancestors = graph.find_unique_ancestors(start_revision_id,
+ [stop_revision_id])
+ for node in rev_iter:
+ rev_id = node.key
+ if rev_id not in ancestors:
+ continue
+ yield (rev_id, node.merge_depth, node.revno,
+ node.end_of_merge)
+ elif stop_rule == 'with-merges':
+ stop_rev = self.repository.get_revision(stop_revision_id)
+ if stop_rev.parent_ids:
+ left_parent = stop_rev.parent_ids[0]
+ else:
+ left_parent = _mod_revision.NULL_REVISION
+ # left_parent is the actual revision we want to stop logging at,
+ # since we want to show the merged revisions after the stop_rev too
+ reached_stop_revision_id = False
+ revision_id_whitelist = []
+ for node in rev_iter:
+ rev_id = node.key
+ if rev_id == left_parent:
+ # reached the left parent after the stop_revision
+ return
+ if (not reached_stop_revision_id or
+ rev_id in revision_id_whitelist):
+ yield (rev_id, node.merge_depth, node.revno,
+ node.end_of_merge)
+ if reached_stop_revision_id or rev_id == stop_revision_id:
+ # only do the merged revs of rev_id from now on
+ rev = self.repository.get_revision(rev_id)
+ if rev.parent_ids:
+ reached_stop_revision_id = True
+ revision_id_whitelist.extend(rev.parent_ids)
+ else:
+ raise ValueError('invalid stop_rule %r' % stop_rule)
+
+ def _filter_start_non_ancestors(self, rev_iter):
+ # If we started from a dotted revno, we want to consider it as a tip
+ # and don't want to yield revisions that are not part of its
+ # ancestry. Given the order guaranteed by the merge sort, we will see
+ # uninteresting descendants of the first parent of our tip before the
+ # tip itself.
+ first = rev_iter.next()
+ (rev_id, merge_depth, revno, end_of_merge) = first
+ yield first
+ if not merge_depth:
+ # We start at a mainline revision so by definition, all others
+ # revisions in rev_iter are ancestors
+ for node in rev_iter:
+ yield node
+
+ clean = False
+ whitelist = set()
+ pmap = self.repository.get_parent_map([rev_id])
+ parents = pmap.get(rev_id, [])
+ if parents:
+ whitelist.update(parents)
+ else:
+ # If there is no parents, there is nothing of interest left
+
+ # FIXME: It's hard to test this scenario here as this code is never
+ # called in that case. -- vila 20100322
+ return
+
+ for (rev_id, merge_depth, revno, end_of_merge) in rev_iter:
+ if not clean:
+ if rev_id in whitelist:
+ pmap = self.repository.get_parent_map([rev_id])
+ parents = pmap.get(rev_id, [])
+ whitelist.remove(rev_id)
+ whitelist.update(parents)
+ if merge_depth == 0:
+ # We've reached the mainline, there is nothing left to
+ # filter
+ clean = True
+ else:
+ # A revision that is not part of the ancestry of our
+ # starting revision.
+ continue
+ yield (rev_id, merge_depth, revno, end_of_merge)
+
+ def leave_lock_in_place(self):
+ """Tell this branch object not to release the physical lock when this
+ object is unlocked.
+
+ If lock_write doesn't return a token, then this method is not supported.
+ """
+ self.control_files.leave_in_place()
+
+ def dont_leave_lock_in_place(self):
+ """Tell this branch object to release the physical lock when this
+ object is unlocked, even if it didn't originally acquire it.
+
+ If lock_write doesn't return a token, then this method is not supported.
+ """
+ self.control_files.dont_leave_in_place()
+
+ def bind(self, other):
+ """Bind the local branch the other branch.
+
+ :param other: The branch to bind to
+ :type other: Branch
+ """
+ raise errors.UpgradeRequired(self.user_url)
+
+ def get_append_revisions_only(self):
+ """Whether it is only possible to append revisions to the history.
+ """
+ if not self._format.supports_set_append_revisions_only():
+ return False
+ return self.get_config_stack().get('append_revisions_only')
+
+ def set_append_revisions_only(self, enabled):
+ if not self._format.supports_set_append_revisions_only():
+ raise errors.UpgradeRequired(self.user_url)
+ self.get_config_stack().set('append_revisions_only', enabled)
+
+ def set_reference_info(self, file_id, tree_path, branch_location):
+ """Set the branch location to use for a tree reference."""
+ raise errors.UnsupportedOperation(self.set_reference_info, self)
+
+ def get_reference_info(self, file_id):
+ """Get the tree_path and branch_location for a tree reference."""
+ raise errors.UnsupportedOperation(self.get_reference_info, self)
+
+ @needs_write_lock
+ def fetch(self, from_branch, last_revision=None, limit=None):
+ """Copy revisions from from_branch into this branch.
+
+ :param from_branch: Where to copy from.
+ :param last_revision: What revision to stop at (None for at the end
+ of the branch.
+ :param limit: Optional rough limit of revisions to fetch
+ :return: None
+ """
+ return InterBranch.get(from_branch, self).fetch(last_revision, limit=limit)
+
+ def get_bound_location(self):
+ """Return the URL of the branch we are bound to.
+
+ Older format branches cannot bind, please be sure to use a metadir
+ branch.
+ """
+ return None
+
+ def get_old_bound_location(self):
+ """Return the URL of the branch we used to be bound to
+ """
+ raise errors.UpgradeRequired(self.user_url)
+
+ def get_commit_builder(self, parents, config_stack=None, timestamp=None,
+ timezone=None, committer=None, revprops=None,
+ revision_id=None, lossy=False):
+ """Obtain a CommitBuilder for this branch.
+
+ :param parents: Revision ids of the parents of the new revision.
+ :param config: Optional configuration to use.
+ :param timestamp: Optional timestamp recorded for commit.
+ :param timezone: Optional timezone for timestamp.
+ :param committer: Optional committer to set for commit.
+ :param revprops: Optional dictionary of revision properties.
+ :param revision_id: Optional revision id.
+ :param lossy: Whether to discard data that can not be natively
+ represented, when pushing to a foreign VCS
+ """
+
+ if config_stack is None:
+ config_stack = self.get_config_stack()
+
+ return self.repository.get_commit_builder(self, parents, config_stack,
+ timestamp, timezone, committer, revprops, revision_id,
+ lossy)
+
+ def get_master_branch(self, possible_transports=None):
+ """Return the branch we are bound to.
+
+ :return: Either a Branch, or None
+ """
+ return None
+
+ @deprecated_method(deprecated_in((2, 5, 0)))
+ def get_revision_delta(self, revno):
+ """Return the delta for one revision.
+
+ The delta is relative to its mainline predecessor, or the
+ empty tree for revision 1.
+ """
+ try:
+ revid = self.get_rev_id(revno)
+ except errors.NoSuchRevision:
+ raise errors.InvalidRevisionNumber(revno)
+ return self.repository.get_revision_delta(revid)
+
+ def get_stacked_on_url(self):
+ """Get the URL this branch is stacked against.
+
+ :raises NotStacked: If the branch is not stacked.
+ :raises UnstackableBranchFormat: If the branch does not support
+ stacking.
+ """
+ raise NotImplementedError(self.get_stacked_on_url)
+
+ def print_file(self, file, revision_id):
+ """Print `file` to stdout."""
+ raise NotImplementedError(self.print_file)
+
+ @needs_write_lock
+ def set_last_revision_info(self, revno, revision_id):
+ """Set the last revision of this branch.
+
+ The caller is responsible for checking that the revno is correct
+ for this revision id.
+
+ It may be possible to set the branch last revision to an id not
+ present in the repository. However, branches can also be
+ configured to check constraints on history, in which case this may not
+ be permitted.
+ """
+ raise NotImplementedError(self.set_last_revision_info)
+
+ @needs_write_lock
+ def generate_revision_history(self, revision_id, last_rev=None,
+ other_branch=None):
+ """See Branch.generate_revision_history"""
+ graph = self.repository.get_graph()
+ (last_revno, last_revid) = self.last_revision_info()
+ known_revision_ids = [
+ (last_revid, last_revno),
+ (_mod_revision.NULL_REVISION, 0),
+ ]
+ if last_rev is not None:
+ if not graph.is_ancestor(last_rev, revision_id):
+ # our previous tip is not merged into stop_revision
+ raise errors.DivergedBranches(self, other_branch)
+ revno = graph.find_distance_to_null(revision_id, known_revision_ids)
+ self.set_last_revision_info(revno, revision_id)
+
+ @needs_write_lock
+ def set_parent(self, url):
+ """See Branch.set_parent."""
+ # TODO: Maybe delete old location files?
+ # URLs should never be unicode, even on the local fs,
+ # FIXUP this and get_parent in a future branch format bump:
+ # read and rewrite the file. RBC 20060125
+ if url is not None:
+ if isinstance(url, unicode):
+ try:
+ url = url.encode('ascii')
+ except UnicodeEncodeError:
+ raise errors.InvalidURL(url,
+ "Urls must be 7-bit ascii, "
+ "use bzrlib.urlutils.escape")
+ url = urlutils.relative_url(self.base, url)
+ self._set_parent_location(url)
+
+ @needs_write_lock
+ def set_stacked_on_url(self, url):
+ """Set the URL this branch is stacked against.
+
+ :raises UnstackableBranchFormat: If the branch does not support
+ stacking.
+ :raises UnstackableRepositoryFormat: If the repository does not support
+ stacking.
+ """
+ if not self._format.supports_stacking():
+ raise errors.UnstackableBranchFormat(self._format, self.user_url)
+ # XXX: Changing from one fallback repository to another does not check
+ # that all the data you need is present in the new fallback.
+ # Possibly it should.
+ self._check_stackable_repo()
+ if not url:
+ try:
+ old_url = self.get_stacked_on_url()
+ except (errors.NotStacked, errors.UnstackableBranchFormat,
+ errors.UnstackableRepositoryFormat):
+ return
+ self._unstack()
+ else:
+ self._activate_fallback_location(url,
+ possible_transports=[self.bzrdir.root_transport])
+ # write this out after the repository is stacked to avoid setting a
+ # stacked config that doesn't work.
+ self._set_config_location('stacked_on_location', url)
+
+ def _unstack(self):
+ """Change a branch to be unstacked, copying data as needed.
+
+ Don't call this directly, use set_stacked_on_url(None).
+ """
+ pb = ui.ui_factory.nested_progress_bar()
+ try:
+ pb.update(gettext("Unstacking"))
+ # The basic approach here is to fetch the tip of the branch,
+ # including all available ghosts, from the existing stacked
+ # repository into a new repository object without the fallbacks.
+ #
+ # XXX: See <https://launchpad.net/bugs/397286> - this may not be
+ # correct for CHKMap repostiories
+ old_repository = self.repository
+ if len(old_repository._fallback_repositories) != 1:
+ raise AssertionError("can't cope with fallback repositories "
+ "of %r (fallbacks: %r)" % (old_repository,
+ old_repository._fallback_repositories))
+ # Open the new repository object.
+ # Repositories don't offer an interface to remove fallback
+ # repositories today; take the conceptually simpler option and just
+ # reopen it. We reopen it starting from the URL so that we
+ # get a separate connection for RemoteRepositories and can
+ # stream from one of them to the other. This does mean doing
+ # separate SSH connection setup, but unstacking is not a
+ # common operation so it's tolerable.
+ new_bzrdir = controldir.ControlDir.open(
+ self.bzrdir.root_transport.base)
+ new_repository = new_bzrdir.find_repository()
+ if new_repository._fallback_repositories:
+ raise AssertionError("didn't expect %r to have "
+ "fallback_repositories"
+ % (self.repository,))
+ # Replace self.repository with the new repository.
+ # Do our best to transfer the lock state (i.e. lock-tokens and
+ # lock count) of self.repository to the new repository.
+ lock_token = old_repository.lock_write().repository_token
+ self.repository = new_repository
+ if isinstance(self, remote.RemoteBranch):
+ # Remote branches can have a second reference to the old
+ # repository that need to be replaced.
+ if self._real_branch is not None:
+ self._real_branch.repository = new_repository
+ self.repository.lock_write(token=lock_token)
+ if lock_token is not None:
+ old_repository.leave_lock_in_place()
+ old_repository.unlock()
+ if lock_token is not None:
+ # XXX: self.repository.leave_lock_in_place() before this
+ # function will not be preserved. Fortunately that doesn't
+ # affect the current default format (2a), and would be a
+ # corner-case anyway.
+ # - Andrew Bennetts, 2010/06/30
+ self.repository.dont_leave_lock_in_place()
+ old_lock_count = 0
+ while True:
+ try:
+ old_repository.unlock()
+ except errors.LockNotHeld:
+ break
+ old_lock_count += 1
+ if old_lock_count == 0:
+ raise AssertionError(
+ 'old_repository should have been locked at least once.')
+ for i in range(old_lock_count-1):
+ self.repository.lock_write()
+ # Fetch from the old repository into the new.
+ old_repository.lock_read()
+ try:
+ # XXX: If you unstack a branch while it has a working tree
+ # with a pending merge, the pending-merged revisions will no
+ # longer be present. You can (probably) revert and remerge.
+ try:
+ tags_to_fetch = set(self.tags.get_reverse_tag_dict())
+ except errors.TagsNotSupported:
+ tags_to_fetch = set()
+ fetch_spec = vf_search.NotInOtherForRevs(self.repository,
+ old_repository, required_ids=[self.last_revision()],
+ if_present_ids=tags_to_fetch, find_ghosts=True).execute()
+ self.repository.fetch(old_repository, fetch_spec=fetch_spec)
+ finally:
+ old_repository.unlock()
+ finally:
+ pb.finished()
+
+ def _set_tags_bytes(self, bytes):
+ """Mirror method for _get_tags_bytes.
+
+ :seealso: Branch._get_tags_bytes.
+ """
+ op = cleanup.OperationWithCleanups(self._set_tags_bytes_locked)
+ op.add_cleanup(self.lock_write().unlock)
+ return op.run_simple(bytes)
+
+ def _set_tags_bytes_locked(self, bytes):
+ self._tags_bytes = bytes
+ return self._transport.put_bytes('tags', bytes)
+
+ def _cache_revision_history(self, rev_history):
+ """Set the cached revision history to rev_history.
+
+ The revision_history method will use this cache to avoid regenerating
+ the revision history.
+
+ This API is semi-public; it only for use by subclasses, all other code
+ should consider it to be private.
+ """
+ self._revision_history_cache = rev_history
+
+ def _cache_revision_id_to_revno(self, revision_id_to_revno):
+ """Set the cached revision_id => revno map to revision_id_to_revno.
+
+ This API is semi-public; it only for use by subclasses, all other code
+ should consider it to be private.
+ """
+ self._revision_id_to_revno_cache = revision_id_to_revno
+
+ def _clear_cached_state(self):
+ """Clear any cached data on this branch, e.g. cached revision history.
+
+ This means the next call to revision_history will need to call
+ _gen_revision_history.
+
+ This API is semi-public; it is only for use by subclasses, all other
+ code should consider it to be private.
+ """
+ self._revision_history_cache = None
+ self._revision_id_to_revno_cache = None
+ self._last_revision_info_cache = None
+ self._master_branch_cache = None
+ self._merge_sorted_revisions_cache = None
+ self._partial_revision_history_cache = []
+ self._partial_revision_id_to_revno_cache = {}
+ self._tags_bytes = None
+
+ def _gen_revision_history(self):
+ """Return sequence of revision hashes on to this branch.
+
+ Unlike revision_history, this method always regenerates or rereads the
+ revision history, i.e. it does not cache the result, so repeated calls
+ may be expensive.
+
+ Concrete subclasses should override this instead of revision_history so
+ that subclasses do not need to deal with caching logic.
+
+ This API is semi-public; it only for use by subclasses, all other code
+ should consider it to be private.
+ """
+ raise NotImplementedError(self._gen_revision_history)
+
+ def _revision_history(self):
+ if 'evil' in debug.debug_flags:
+ mutter_callsite(3, "revision_history scales with history.")
+ if self._revision_history_cache is not None:
+ history = self._revision_history_cache
+ else:
+ history = self._gen_revision_history()
+ self._cache_revision_history(history)
+ return list(history)
+
+ def revno(self):
+ """Return current revision number for this branch.
+
+ That is equivalent to the number of revisions committed to
+ this branch.
+ """
+ return self.last_revision_info()[0]
+
+ def unbind(self):
+ """Older format branches cannot bind or unbind."""
+ raise errors.UpgradeRequired(self.user_url)
+
+ def last_revision(self):
+ """Return last revision id, or NULL_REVISION."""
+ return self.last_revision_info()[1]
+
+ @needs_read_lock
+ def last_revision_info(self):
+ """Return information about the last revision.
+
+ :return: A tuple (revno, revision_id).
+ """
+ if self._last_revision_info_cache is None:
+ self._last_revision_info_cache = self._read_last_revision_info()
+ return self._last_revision_info_cache
+
+ def _read_last_revision_info(self):
+ raise NotImplementedError(self._read_last_revision_info)
+
+ def import_last_revision_info_and_tags(self, source, revno, revid,
+ lossy=False):
+ """Set the last revision info, importing from another repo if necessary.
+
+ This is used by the bound branch code to upload a revision to
+ the master branch first before updating the tip of the local branch.
+ Revisions referenced by source's tags are also transferred.
+
+ :param source: Source branch to optionally fetch from
+ :param revno: Revision number of the new tip
+ :param revid: Revision id of the new tip
+ :param lossy: Whether to discard metadata that can not be
+ natively represented
+ :return: Tuple with the new revision number and revision id
+ (should only be different from the arguments when lossy=True)
+ """
+ if not self.repository.has_same_location(source.repository):
+ self.fetch(source, revid)
+ self.set_last_revision_info(revno, revid)
+ return (revno, revid)
+
+ def revision_id_to_revno(self, revision_id):
+ """Given a revision id, return its revno"""
+ if _mod_revision.is_null(revision_id):
+ return 0
+ history = self._revision_history()
+ try:
+ return history.index(revision_id) + 1
+ except ValueError:
+ raise errors.NoSuchRevision(self, revision_id)
+
+ @needs_read_lock
+ def get_rev_id(self, revno, history=None):
+ """Find the revision id of the specified revno."""
+ if revno == 0:
+ return _mod_revision.NULL_REVISION
+ last_revno, last_revid = self.last_revision_info()
+ if revno == last_revno:
+ return last_revid
+ if revno <= 0 or revno > last_revno:
+ raise errors.NoSuchRevision(self, revno)
+ distance_from_last = last_revno - revno
+ if len(self._partial_revision_history_cache) <= distance_from_last:
+ self._extend_partial_history(distance_from_last)
+ return self._partial_revision_history_cache[distance_from_last]
+
+ def pull(self, source, overwrite=False, stop_revision=None,
+ possible_transports=None, *args, **kwargs):
+ """Mirror source into this branch.
+
+ This branch is considered to be 'local', having low latency.
+
+ :returns: PullResult instance
+ """
+ return InterBranch.get(source, self).pull(overwrite=overwrite,
+ stop_revision=stop_revision,
+ possible_transports=possible_transports, *args, **kwargs)
+
+ def push(self, target, overwrite=False, stop_revision=None, lossy=False,
+ *args, **kwargs):
+ """Mirror this branch into target.
+
+ This branch is considered to be 'local', having low latency.
+ """
+ return InterBranch.get(self, target).push(overwrite, stop_revision,
+ lossy, *args, **kwargs)
+
+ def basis_tree(self):
+ """Return `Tree` object for last revision."""
+ return self.repository.revision_tree(self.last_revision())
+
+ def get_parent(self):
+ """Return the parent location of the branch.
+
+ This is the default location for pull/missing. The usual
+ pattern is that the user can override it by specifying a
+ location.
+ """
+ parent = self._get_parent_location()
+ if parent is None:
+ return parent
+ # This is an old-format absolute path to a local branch
+ # turn it into a url
+ if parent.startswith('/'):
+ parent = urlutils.local_path_to_url(parent.decode('utf8'))
+ try:
+ return urlutils.join(self.base[:-1], parent)
+ except errors.InvalidURLJoin, e:
+ raise errors.InaccessibleParent(parent, self.user_url)
+
+ def _get_parent_location(self):
+ raise NotImplementedError(self._get_parent_location)
+
+ def _set_config_location(self, name, url, config=None,
+ make_relative=False):
+ if config is None:
+ config = self.get_config_stack()
+ if url is None:
+ url = ''
+ elif make_relative:
+ url = urlutils.relative_url(self.base, url)
+ config.set(name, url)
+
+ def _get_config_location(self, name, config=None):
+ if config is None:
+ config = self.get_config_stack()
+ location = config.get(name)
+ if location == '':
+ location = None
+ return location
+
+ def get_child_submit_format(self):
+ """Return the preferred format of submissions to this branch."""
+ return self.get_config_stack().get('child_submit_format')
+
+ def get_submit_branch(self):
+ """Return the submit location of the branch.
+
+ This is the default location for bundle. The usual
+ pattern is that the user can override it by specifying a
+ location.
+ """
+ return self.get_config_stack().get('submit_branch')
+
+ def set_submit_branch(self, location):
+ """Return the submit location of the branch.
+
+ This is the default location for bundle. The usual
+ pattern is that the user can override it by specifying a
+ location.
+ """
+ self.get_config_stack().set('submit_branch', location)
+
+ def get_public_branch(self):
+ """Return the public location of the branch.
+
+ This is used by merge directives.
+ """
+ return self._get_config_location('public_branch')
+
+ def set_public_branch(self, location):
+ """Return the submit location of the branch.
+
+ This is the default location for bundle. The usual
+ pattern is that the user can override it by specifying a
+ location.
+ """
+ self._set_config_location('public_branch', location)
+
+ def get_push_location(self):
+ """Return None or the location to push this branch to."""
+ return self.get_config_stack().get('push_location')
+
+ def set_push_location(self, location):
+ """Set a new push location for this branch."""
+ raise NotImplementedError(self.set_push_location)
+
+ def _run_post_change_branch_tip_hooks(self, old_revno, old_revid):
+ """Run the post_change_branch_tip hooks."""
+ hooks = Branch.hooks['post_change_branch_tip']
+ if not hooks:
+ return
+ new_revno, new_revid = self.last_revision_info()
+ params = ChangeBranchTipParams(
+ self, old_revno, new_revno, old_revid, new_revid)
+ for hook in hooks:
+ hook(params)
+
+ def _run_pre_change_branch_tip_hooks(self, new_revno, new_revid):
+ """Run the pre_change_branch_tip hooks."""
+ hooks = Branch.hooks['pre_change_branch_tip']
+ if not hooks:
+ return
+ old_revno, old_revid = self.last_revision_info()
+ params = ChangeBranchTipParams(
+ self, old_revno, new_revno, old_revid, new_revid)
+ for hook in hooks:
+ hook(params)
+
+ @needs_write_lock
+ def update(self):
+ """Synchronise this branch with the master branch if any.
+
+ :return: None or the last_revision pivoted out during the update.
+ """
+ return None
+
+ def check_revno(self, revno):
+ """\
+ Check whether a revno corresponds to any revision.
+ Zero (the NULL revision) is considered valid.
+ """
+ if revno != 0:
+ self.check_real_revno(revno)
+
+ def check_real_revno(self, revno):
+ """\
+ Check whether a revno corresponds to a real revision.
+ Zero (the NULL revision) is considered invalid
+ """
+ if revno < 1 or revno > self.revno():
+ raise errors.InvalidRevisionNumber(revno)
+
+ @needs_read_lock
+ def clone(self, to_bzrdir, revision_id=None, repository_policy=None):
+ """Clone this branch into to_bzrdir preserving all semantic values.
+
+ Most API users will want 'create_clone_on_transport', which creates a
+ new bzrdir and branch on the fly.
+
+ revision_id: if not None, the revision history in the new branch will
+ be truncated to end with revision_id.
+ """
+ result = to_bzrdir.create_branch()
+ result.lock_write()
+ try:
+ if repository_policy is not None:
+ repository_policy.configure_branch(result)
+ self.copy_content_into(result, revision_id=revision_id)
+ finally:
+ result.unlock()
+ return result
+
+ @needs_read_lock
+ def sprout(self, to_bzrdir, revision_id=None, repository_policy=None,
+ repository=None):
+ """Create a new line of development from the branch, into to_bzrdir.
+
+ to_bzrdir controls the branch format.
+
+ revision_id: if not None, the revision history in the new branch will
+ be truncated to end with revision_id.
+ """
+ if (repository_policy is not None and
+ repository_policy.requires_stacking()):
+ to_bzrdir._format.require_stacking(_skip_repo=True)
+ result = to_bzrdir.create_branch(repository=repository)
+ result.lock_write()
+ try:
+ if repository_policy is not None:
+ repository_policy.configure_branch(result)
+ self.copy_content_into(result, revision_id=revision_id)
+ master_url = self.get_bound_location()
+ if master_url is None:
+ result.set_parent(self.bzrdir.root_transport.base)
+ else:
+ result.set_parent(master_url)
+ finally:
+ result.unlock()
+ return result
+
+ def _synchronize_history(self, destination, revision_id):
+ """Synchronize last revision and revision history between branches.
+
+ This version is most efficient when the destination is also a
+ BzrBranch6, but works for BzrBranch5, as long as the destination's
+ repository contains all the lefthand ancestors of the intended
+ last_revision. If not, set_last_revision_info will fail.
+
+ :param destination: The branch to copy the history into
+ :param revision_id: The revision-id to truncate history at. May
+ be None to copy complete history.
+ """
+ source_revno, source_revision_id = self.last_revision_info()
+ if revision_id is None:
+ revno, revision_id = source_revno, source_revision_id
+ else:
+ graph = self.repository.get_graph()
+ try:
+ revno = graph.find_distance_to_null(revision_id,
+ [(source_revision_id, source_revno)])
+ except errors.GhostRevisionsHaveNoRevno:
+ # Default to 1, if we can't find anything else
+ revno = 1
+ destination.set_last_revision_info(revno, revision_id)
+
+ def copy_content_into(self, destination, revision_id=None):
+ """Copy the content of self into destination.
+
+ revision_id: if not None, the revision history in the new branch will
+ be truncated to end with revision_id.
+ """
+ return InterBranch.get(self, destination).copy_content_into(
+ revision_id=revision_id)
+
+ def update_references(self, target):
+ if not getattr(self._format, 'supports_reference_locations', False):
+ return
+ reference_dict = self._get_all_reference_info()
+ if len(reference_dict) == 0:
+ return
+ old_base = self.base
+ new_base = target.base
+ target_reference_dict = target._get_all_reference_info()
+ for file_id, (tree_path, branch_location) in (
+ reference_dict.items()):
+ branch_location = urlutils.rebase_url(branch_location,
+ old_base, new_base)
+ target_reference_dict.setdefault(
+ file_id, (tree_path, branch_location))
+ target._set_all_reference_info(target_reference_dict)
+
+ @needs_read_lock
+ def check(self, refs):
+ """Check consistency of the branch.
+
+ In particular this checks that revisions given in the revision-history
+ do actually match up in the revision graph, and that they're all
+ present in the repository.
+
+ Callers will typically also want to check the repository.
+
+ :param refs: Calculated refs for this branch as specified by
+ branch._get_check_refs()
+ :return: A BranchCheckResult.
+ """
+ result = BranchCheckResult(self)
+ last_revno, last_revision_id = self.last_revision_info()
+ actual_revno = refs[('lefthand-distance', last_revision_id)]
+ if actual_revno != last_revno:
+ result.errors.append(errors.BzrCheckError(
+ 'revno does not match len(mainline) %s != %s' % (
+ last_revno, actual_revno)))
+ # TODO: We should probably also check that self.revision_history
+ # matches the repository for older branch formats.
+ # If looking for the code that cross-checks repository parents against
+ # the Graph.iter_lefthand_ancestry output, that is now a repository
+ # specific check.
+ return result
+
+ def _get_checkout_format(self, lightweight=False):
+ """Return the most suitable metadir for a checkout of this branch.
+ Weaves are used if this branch's repository uses weaves.
+ """
+ format = self.repository.bzrdir.checkout_metadir()
+ format.set_branch_format(self._format)
+ return format
+
+ def create_clone_on_transport(self, to_transport, revision_id=None,
+ stacked_on=None, create_prefix=False, use_existing_dir=False,
+ no_tree=None):
+ """Create a clone of this branch and its bzrdir.
+
+ :param to_transport: The transport to clone onto.
+ :param revision_id: The revision id to use as tip in the new branch.
+ If None the tip is obtained from this branch.
+ :param stacked_on: An optional URL to stack the clone on.
+ :param create_prefix: Create any missing directories leading up to
+ to_transport.
+ :param use_existing_dir: Use an existing directory if one exists.
+ """
+ # XXX: Fix the bzrdir API to allow getting the branch back from the
+ # clone call. Or something. 20090224 RBC/spiv.
+ # XXX: Should this perhaps clone colocated branches as well,
+ # rather than just the default branch? 20100319 JRV
+ if revision_id is None:
+ revision_id = self.last_revision()
+ dir_to = self.bzrdir.clone_on_transport(to_transport,
+ revision_id=revision_id, stacked_on=stacked_on,
+ create_prefix=create_prefix, use_existing_dir=use_existing_dir,
+ no_tree=no_tree)
+ return dir_to.open_branch()
+
+ def create_checkout(self, to_location, revision_id=None,
+ lightweight=False, accelerator_tree=None,
+ hardlink=False):
+ """Create a checkout of a branch.
+
+ :param to_location: The url to produce the checkout at
+ :param revision_id: The revision to check out
+ :param lightweight: If True, produce a lightweight checkout, otherwise,
+ produce a bound branch (heavyweight checkout)
+ :param accelerator_tree: A tree which can be used for retrieving file
+ contents more quickly than the revision tree, i.e. a workingtree.
+ The revision tree will be used for cases where accelerator_tree's
+ content is different.
+ :param hardlink: If true, hard-link files from accelerator_tree,
+ where possible.
+ :return: The tree of the created checkout
+ """
+ t = transport.get_transport(to_location)
+ t.ensure_base()
+ format = self._get_checkout_format(lightweight=lightweight)
+ try:
+ checkout = format.initialize_on_transport(t)
+ except errors.AlreadyControlDirError:
+ # It's fine if the control directory already exists,
+ # as long as there is no existing branch and working tree.
+ checkout = controldir.ControlDir.open_from_transport(t)
+ try:
+ checkout.open_branch()
+ except errors.NotBranchError:
+ pass
+ else:
+ raise errors.AlreadyControlDirError(t.base)
+ if checkout.control_transport.base == self.bzrdir.control_transport.base:
+ # When checking out to the same control directory,
+ # always create a lightweight checkout
+ lightweight = True
+
+ if lightweight:
+ from_branch = checkout.set_branch_reference(target_branch=self)
+ else:
+ policy = checkout.determine_repository_policy()
+ repo = policy.acquire_repository()[0]
+ checkout_branch = checkout.create_branch()
+ checkout_branch.bind(self)
+ # pull up to the specified revision_id to set the initial
+ # branch tip correctly, and seed it with history.
+ checkout_branch.pull(self, stop_revision=revision_id)
+ from_branch = None
+ tree = checkout.create_workingtree(revision_id,
+ from_branch=from_branch,
+ accelerator_tree=accelerator_tree,
+ hardlink=hardlink)
+ basis_tree = tree.basis_tree()
+ basis_tree.lock_read()
+ try:
+ for path, file_id in basis_tree.iter_references():
+ reference_parent = self.reference_parent(file_id, path)
+ reference_parent.create_checkout(tree.abspath(path),
+ basis_tree.get_reference_revision(file_id, path),
+ lightweight)
+ finally:
+ basis_tree.unlock()
+ return tree
+
+ @needs_write_lock
+ def reconcile(self, thorough=True):
+ """Make sure the data stored in this branch is consistent."""
+ from bzrlib.reconcile import BranchReconciler
+ reconciler = BranchReconciler(self, thorough=thorough)
+ reconciler.reconcile()
+ return reconciler
+
+ def reference_parent(self, file_id, path, possible_transports=None):
+ """Return the parent branch for a tree-reference file_id
+
+ :param file_id: The file_id of the tree reference
+ :param path: The path of the file_id in the tree
+ :return: A branch associated with the file_id
+ """
+ # FIXME should provide multiple branches, based on config
+ return Branch.open(self.bzrdir.root_transport.clone(path).base,
+ possible_transports=possible_transports)
+
+ def supports_tags(self):
+ return self._format.supports_tags()
+
+ def automatic_tag_name(self, revision_id):
+ """Try to automatically find the tag name for a revision.
+
+ :param revision_id: Revision id of the revision.
+ :return: A tag name or None if no tag name could be determined.
+ """
+ for hook in Branch.hooks['automatic_tag_name']:
+ ret = hook(self, revision_id)
+ if ret is not None:
+ return ret
+ return None
+
+ def _check_if_descendant_or_diverged(self, revision_a, revision_b, graph,
+ other_branch):
+ """Ensure that revision_b is a descendant of revision_a.
+
+ This is a helper function for update_revisions.
+
+ :raises: DivergedBranches if revision_b has diverged from revision_a.
+ :returns: True if revision_b is a descendant of revision_a.
+ """
+ relation = self._revision_relations(revision_a, revision_b, graph)
+ if relation == 'b_descends_from_a':
+ return True
+ elif relation == 'diverged':
+ raise errors.DivergedBranches(self, other_branch)
+ elif relation == 'a_descends_from_b':
+ return False
+ else:
+ raise AssertionError("invalid relation: %r" % (relation,))
+
+ def _revision_relations(self, revision_a, revision_b, graph):
+ """Determine the relationship between two revisions.
+
+ :returns: One of: 'a_descends_from_b', 'b_descends_from_a', 'diverged'
+ """
+ heads = graph.heads([revision_a, revision_b])
+ if heads == set([revision_b]):
+ return 'b_descends_from_a'
+ elif heads == set([revision_a, revision_b]):
+ # These branches have diverged
+ return 'diverged'
+ elif heads == set([revision_a]):
+ return 'a_descends_from_b'
+ else:
+ raise AssertionError("invalid heads: %r" % (heads,))
+
+ def heads_to_fetch(self):
+ """Return the heads that must and that should be fetched to copy this
+ branch into another repo.
+
+ :returns: a 2-tuple of (must_fetch, if_present_fetch). must_fetch is a
+ set of heads that must be fetched. if_present_fetch is a set of
+ heads that must be fetched if present, but no error is necessary if
+ they are not present.
+ """
+ # For bzr native formats must_fetch is just the tip, and
+ # if_present_fetch are the tags.
+ must_fetch = set([self.last_revision()])
+ if_present_fetch = set()
+ if self.get_config_stack().get('branch.fetch_tags'):
+ try:
+ if_present_fetch = set(self.tags.get_reverse_tag_dict())
+ except errors.TagsNotSupported:
+ pass
+ must_fetch.discard(_mod_revision.NULL_REVISION)
+ if_present_fetch.discard(_mod_revision.NULL_REVISION)
+ return must_fetch, if_present_fetch
+
+
+class BranchFormat(controldir.ControlComponentFormat):
+ """An encapsulation of the initialization and open routines for a format.
+
+ Formats provide three things:
+ * An initialization routine,
+ * a format description
+ * an open routine.
+
+ Formats are placed in an dict by their format string for reference
+ during branch opening. It's not required that these be instances, they
+ can be classes themselves with class methods - it simply depends on
+ whether state is needed for a given format or not.
+
+ Once a format is deprecated, just deprecate the initialize and open
+ methods on the format class. Do not deprecate the object, as the
+ object will be created every time regardless.
+ """
+
+ def __eq__(self, other):
+ return self.__class__ is other.__class__
+
+ def __ne__(self, other):
+ return not (self == other)
+
+ def get_reference(self, controldir, name=None):
+ """Get the target reference of the branch in controldir.
+
+ format probing must have been completed before calling
+ this method - it is assumed that the format of the branch
+ in controldir is correct.
+
+ :param controldir: The controldir to get the branch data from.
+ :param name: Name of the colocated branch to fetch
+ :return: None if the branch is not a reference branch.
+ """
+ return None
+
+ @classmethod
+ def set_reference(self, controldir, name, to_branch):
+ """Set the target reference of the branch in controldir.
+
+ format probing must have been completed before calling
+ this method - it is assumed that the format of the branch
+ in controldir is correct.
+
+ :param controldir: The controldir to set the branch reference for.
+ :param name: Name of colocated branch to set, None for default
+ :param to_branch: branch that the checkout is to reference
+ """
+ raise NotImplementedError(self.set_reference)
+
+ def get_format_description(self):
+ """Return the short format description for this format."""
+ raise NotImplementedError(self.get_format_description)
+
+ def _run_post_branch_init_hooks(self, controldir, name, branch):
+ hooks = Branch.hooks['post_branch_init']
+ if not hooks:
+ return
+ params = BranchInitHookParams(self, controldir, name, branch)
+ for hook in hooks:
+ hook(params)
+
+ def initialize(self, controldir, name=None, repository=None,
+ append_revisions_only=None):
+ """Create a branch of this format in controldir.
+
+ :param name: Name of the colocated branch to create.
+ """
+ raise NotImplementedError(self.initialize)
+
+ def is_supported(self):
+ """Is this format supported?
+
+ Supported formats can be initialized and opened.
+ Unsupported formats may not support initialization or committing or
+ some other features depending on the reason for not being supported.
+ """
+ return True
+
+ def make_tags(self, branch):
+ """Create a tags object for branch.
+
+ This method is on BranchFormat, because BranchFormats are reflected
+ over the wire via network_name(), whereas full Branch instances require
+ multiple VFS method calls to operate at all.
+
+ The default implementation returns a disabled-tags instance.
+
+ Note that it is normal for branch to be a RemoteBranch when using tags
+ on a RemoteBranch.
+ """
+ return _mod_tag.DisabledTags(branch)
+
+ def network_name(self):
+ """A simple byte string uniquely identifying this format for RPC calls.
+
+ MetaDir branch formats use their disk format string to identify the
+ repository over the wire. All in one formats such as bzr < 0.8, and
+ foreign formats like svn/git and hg should use some marker which is
+ unique and immutable.
+ """
+ raise NotImplementedError(self.network_name)
+
+ def open(self, controldir, name=None, _found=False, ignore_fallbacks=False,
+ found_repository=None, possible_transports=None):
+ """Return the branch object for controldir.
+
+ :param controldir: A ControlDir that contains a branch.
+ :param name: Name of colocated branch to open
+ :param _found: a private parameter, do not use it. It is used to
+ indicate if format probing has already be done.
+ :param ignore_fallbacks: when set, no fallback branches will be opened
+ (if there are any). Default is to open fallbacks.
+ """
+ raise NotImplementedError(self.open)
+
+ def supports_set_append_revisions_only(self):
+ """True if this format supports set_append_revisions_only."""
+ return False
+
+ def supports_stacking(self):
+ """True if this format records a stacked-on branch."""
+ return False
+
+ def supports_leaving_lock(self):
+ """True if this format supports leaving locks in place."""
+ return False # by default
+
+ def __str__(self):
+ return self.get_format_description().rstrip()
+
+ def supports_tags(self):
+ """True if this format supports tags stored in the branch"""
+ return False # by default
+
+ def tags_are_versioned(self):
+ """Whether the tag container for this branch versions tags."""
+ return False
+
+ def supports_tags_referencing_ghosts(self):
+ """True if tags can reference ghost revisions."""
+ return True
+
+
+class MetaDirBranchFormatFactory(registry._LazyObjectGetter):
+ """A factory for a BranchFormat object, permitting simple lazy registration.
+
+ While none of the built in BranchFormats are lazy registered yet,
+ bzrlib.tests.test_branch.TestMetaDirBranchFormatFactory demonstrates how to
+ use it, and the bzr-loom plugin uses it as well (see
+ bzrlib.plugins.loom.formats).
+ """
+
+ def __init__(self, format_string, module_name, member_name):
+ """Create a MetaDirBranchFormatFactory.
+
+ :param format_string: The format string the format has.
+ :param module_name: Module to load the format class from.
+ :param member_name: Attribute name within the module for the format class.
+ """
+ registry._LazyObjectGetter.__init__(self, module_name, member_name)
+ self._format_string = format_string
+
+ def get_format_string(self):
+ """See BranchFormat.get_format_string."""
+ return self._format_string
+
+ def __call__(self):
+ """Used for network_format_registry support."""
+ return self.get_obj()()
+
+
+class BranchHooks(Hooks):
+ """A dictionary mapping hook name to a list of callables for branch hooks.
+
+ e.g. ['post_push'] Is the list of items to be called when the
+ push function is invoked.
+ """
+
+ def __init__(self):
+ """Create the default hooks.
+
+ These are all empty initially, because by default nothing should get
+ notified.
+ """
+ Hooks.__init__(self, "bzrlib.branch", "Branch.hooks")
+ self.add_hook('open',
+ "Called with the Branch object that has been opened after a "
+ "branch is opened.", (1, 8))
+ self.add_hook('post_push',
+ "Called after a push operation completes. post_push is called "
+ "with a bzrlib.branch.BranchPushResult object and only runs in the "
+ "bzr client.", (0, 15))
+ self.add_hook('post_pull',
+ "Called after a pull operation completes. post_pull is called "
+ "with a bzrlib.branch.PullResult object and only runs in the "
+ "bzr client.", (0, 15))
+ self.add_hook('pre_commit',
+ "Called after a commit is calculated but before it is "
+ "completed. pre_commit is called with (local, master, old_revno, "
+ "old_revid, future_revno, future_revid, tree_delta, future_tree"
+ "). old_revid is NULL_REVISION for the first commit to a branch, "
+ "tree_delta is a TreeDelta object describing changes from the "
+ "basis revision. hooks MUST NOT modify this delta. "
+ " future_tree is an in-memory tree obtained from "
+ "CommitBuilder.revision_tree() and hooks MUST NOT modify this "
+ "tree.", (0,91))
+ self.add_hook('post_commit',
+ "Called in the bzr client after a commit has completed. "
+ "post_commit is called with (local, master, old_revno, old_revid, "
+ "new_revno, new_revid). old_revid is NULL_REVISION for the first "
+ "commit to a branch.", (0, 15))
+ self.add_hook('post_uncommit',
+ "Called in the bzr client after an uncommit completes. "
+ "post_uncommit is called with (local, master, old_revno, "
+ "old_revid, new_revno, new_revid) where local is the local branch "
+ "or None, master is the target branch, and an empty branch "
+ "receives new_revno of 0, new_revid of None.", (0, 15))
+ self.add_hook('pre_change_branch_tip',
+ "Called in bzr client and server before a change to the tip of a "
+ "branch is made. pre_change_branch_tip is called with a "
+ "bzrlib.branch.ChangeBranchTipParams. Note that push, pull, "
+ "commit, uncommit will all trigger this hook.", (1, 6))
+ self.add_hook('post_change_branch_tip',
+ "Called in bzr client and server after a change to the tip of a "
+ "branch is made. post_change_branch_tip is called with a "
+ "bzrlib.branch.ChangeBranchTipParams. Note that push, pull, "
+ "commit, uncommit will all trigger this hook.", (1, 4))
+ self.add_hook('transform_fallback_location',
+ "Called when a stacked branch is activating its fallback "
+ "locations. transform_fallback_location is called with (branch, "
+ "url), and should return a new url. Returning the same url "
+ "allows it to be used as-is, returning a different one can be "
+ "used to cause the branch to stack on a closer copy of that "
+ "fallback_location. Note that the branch cannot have history "
+ "accessing methods called on it during this hook because the "
+ "fallback locations have not been activated. When there are "
+ "multiple hooks installed for transform_fallback_location, "
+ "all are called with the url returned from the previous hook."
+ "The order is however undefined.", (1, 9))
+ self.add_hook('automatic_tag_name',
+ "Called to determine an automatic tag name for a revision. "
+ "automatic_tag_name is called with (branch, revision_id) and "
+ "should return a tag name or None if no tag name could be "
+ "determined. The first non-None tag name returned will be used.",
+ (2, 2))
+ self.add_hook('post_branch_init',
+ "Called after new branch initialization completes. "
+ "post_branch_init is called with a "
+ "bzrlib.branch.BranchInitHookParams. "
+ "Note that init, branch and checkout (both heavyweight and "
+ "lightweight) will all trigger this hook.", (2, 2))
+ self.add_hook('post_switch',
+ "Called after a checkout switches branch. "
+ "post_switch is called with a "
+ "bzrlib.branch.SwitchHookParams.", (2, 2))
+
+
+
+# install the default hooks into the Branch class.
+Branch.hooks = BranchHooks()
+
+
+class ChangeBranchTipParams(object):
+ """Object holding parameters passed to `*_change_branch_tip` hooks.
+
+ There are 5 fields that hooks may wish to access:
+
+ :ivar branch: the branch being changed
+ :ivar old_revno: revision number before the change
+ :ivar new_revno: revision number after the change
+ :ivar old_revid: revision id before the change
+ :ivar new_revid: revision id after the change
+
+ The revid fields are strings. The revno fields are integers.
+ """
+
+ def __init__(self, branch, old_revno, new_revno, old_revid, new_revid):
+ """Create a group of ChangeBranchTip parameters.
+
+ :param branch: The branch being changed.
+ :param old_revno: Revision number before the change.
+ :param new_revno: Revision number after the change.
+ :param old_revid: Tip revision id before the change.
+ :param new_revid: Tip revision id after the change.
+ """
+ self.branch = branch
+ self.old_revno = old_revno
+ self.new_revno = new_revno
+ self.old_revid = old_revid
+ self.new_revid = new_revid
+
+ def __eq__(self, other):
+ return self.__dict__ == other.__dict__
+
+ def __repr__(self):
+ return "<%s of %s from (%s, %s) to (%s, %s)>" % (
+ self.__class__.__name__, self.branch,
+ self.old_revno, self.old_revid, self.new_revno, self.new_revid)
+
+
+class BranchInitHookParams(object):
+ """Object holding parameters passed to `*_branch_init` hooks.
+
+ There are 4 fields that hooks may wish to access:
+
+ :ivar format: the branch format
+ :ivar bzrdir: the ControlDir where the branch will be/has been initialized
+ :ivar name: name of colocated branch, if any (or None)
+ :ivar branch: the branch created
+
+ Note that for lightweight checkouts, the bzrdir and format fields refer to
+ the checkout, hence they are different from the corresponding fields in
+ branch, which refer to the original branch.
+ """
+
+ def __init__(self, format, controldir, name, branch):
+ """Create a group of BranchInitHook parameters.
+
+ :param format: the branch format
+ :param controldir: the ControlDir where the branch will be/has been
+ initialized
+ :param name: name of colocated branch, if any (or None)
+ :param branch: the branch created
+
+ Note that for lightweight checkouts, the bzrdir and format fields refer
+ to the checkout, hence they are different from the corresponding fields
+ in branch, which refer to the original branch.
+ """
+ self.format = format
+ self.bzrdir = controldir
+ self.name = name
+ self.branch = branch
+
+ def __eq__(self, other):
+ return self.__dict__ == other.__dict__
+
+ def __repr__(self):
+ return "<%s of %s>" % (self.__class__.__name__, self.branch)
+
+
+class SwitchHookParams(object):
+ """Object holding parameters passed to `*_switch` hooks.
+
+ There are 4 fields that hooks may wish to access:
+
+ :ivar control_dir: ControlDir of the checkout to change
+ :ivar to_branch: branch that the checkout is to reference
+ :ivar force: skip the check for local commits in a heavy checkout
+ :ivar revision_id: revision ID to switch to (or None)
+ """
+
+ def __init__(self, control_dir, to_branch, force, revision_id):
+ """Create a group of SwitchHook parameters.
+
+ :param control_dir: ControlDir of the checkout to change
+ :param to_branch: branch that the checkout is to reference
+ :param force: skip the check for local commits in a heavy checkout
+ :param revision_id: revision ID to switch to (or None)
+ """
+ self.control_dir = control_dir
+ self.to_branch = to_branch
+ self.force = force
+ self.revision_id = revision_id
+
+ def __eq__(self, other):
+ return self.__dict__ == other.__dict__
+
+ def __repr__(self):
+ return "<%s for %s to (%s, %s)>" % (self.__class__.__name__,
+ self.control_dir, self.to_branch,
+ self.revision_id)
+
+
+class BranchFormatMetadir(bzrdir.BzrFormat, BranchFormat):
+ """Base class for branch formats that live in meta directories.
+ """
+
+ def __init__(self):
+ BranchFormat.__init__(self)
+ bzrdir.BzrFormat.__init__(self)
+
+ @classmethod
+ def find_format(klass, controldir, name=None):
+ """Return the format for the branch object in controldir."""
+ try:
+ transport = controldir.get_branch_transport(None, name=name)
+ except errors.NoSuchFile:
+ raise errors.NotBranchError(path=name, bzrdir=controldir)
+ try:
+ format_string = transport.get_bytes("format")
+ except errors.NoSuchFile:
+ raise errors.NotBranchError(path=transport.base, bzrdir=controldir)
+ return klass._find_format(format_registry, 'branch', format_string)
+
+ def _branch_class(self):
+ """What class to instantiate on open calls."""
+ raise NotImplementedError(self._branch_class)
+
+ def _get_initial_config(self, append_revisions_only=None):
+ if append_revisions_only:
+ return "append_revisions_only = True\n"
+ else:
+ # Avoid writing anything if append_revisions_only is disabled,
+ # as that is the default.
+ return ""
+
+ def _initialize_helper(self, a_bzrdir, utf8_files, name=None,
+ repository=None):
+ """Initialize a branch in a control dir, with specified files
+
+ :param a_bzrdir: The bzrdir to initialize the branch in
+ :param utf8_files: The files to create as a list of
+ (filename, content) tuples
+ :param name: Name of colocated branch to create, if any
+ :return: a branch in this format
+ """
+ if name is None:
+ name = a_bzrdir._get_selected_branch()
+ mutter('creating branch %r in %s', self, a_bzrdir.user_url)
+ branch_transport = a_bzrdir.get_branch_transport(self, name=name)
+ control_files = lockable_files.LockableFiles(branch_transport,
+ 'lock', lockdir.LockDir)
+ control_files.create_lock()
+ control_files.lock_write()
+ try:
+ utf8_files += [('format', self.as_string())]
+ for (filename, content) in utf8_files:
+ branch_transport.put_bytes(
+ filename, content,
+ mode=a_bzrdir._get_file_mode())
+ finally:
+ control_files.unlock()
+ branch = self.open(a_bzrdir, name, _found=True,
+ found_repository=repository)
+ self._run_post_branch_init_hooks(a_bzrdir, name, branch)
+ return branch
+
+ def open(self, a_bzrdir, name=None, _found=False, ignore_fallbacks=False,
+ found_repository=None, possible_transports=None):
+ """See BranchFormat.open()."""
+ if name is None:
+ name = a_bzrdir._get_selected_branch()
+ if not _found:
+ format = BranchFormatMetadir.find_format(a_bzrdir, name=name)
+ if format.__class__ != self.__class__:
+ raise AssertionError("wrong format %r found for %r" %
+ (format, self))
+ transport = a_bzrdir.get_branch_transport(None, name=name)
+ try:
+ control_files = lockable_files.LockableFiles(transport, 'lock',
+ lockdir.LockDir)
+ if found_repository is None:
+ found_repository = a_bzrdir.find_repository()
+ return self._branch_class()(_format=self,
+ _control_files=control_files,
+ name=name,
+ a_bzrdir=a_bzrdir,
+ _repository=found_repository,
+ ignore_fallbacks=ignore_fallbacks,
+ possible_transports=possible_transports)
+ except errors.NoSuchFile:
+ raise errors.NotBranchError(path=transport.base, bzrdir=a_bzrdir)
+
+ @property
+ def _matchingbzrdir(self):
+ ret = bzrdir.BzrDirMetaFormat1()
+ ret.set_branch_format(self)
+ return ret
+
+ def supports_tags(self):
+ return True
+
+ def supports_leaving_lock(self):
+ return True
+
+ def check_support_status(self, allow_unsupported, recommend_upgrade=True,
+ basedir=None):
+ BranchFormat.check_support_status(self,
+ allow_unsupported=allow_unsupported, recommend_upgrade=recommend_upgrade,
+ basedir=basedir)
+ bzrdir.BzrFormat.check_support_status(self, allow_unsupported=allow_unsupported,
+ recommend_upgrade=recommend_upgrade, basedir=basedir)
+
+
+class BzrBranchFormat6(BranchFormatMetadir):
+ """Branch format with last-revision and tags.
+
+ Unlike previous formats, this has no explicit revision history. Instead,
+ this just stores the last-revision, and the left-hand history leading
+ up to there is the history.
+
+ This format was introduced in bzr 0.15
+ and became the default in 0.91.
+ """
+
+ def _branch_class(self):
+ return BzrBranch6
+
+ @classmethod
+ def get_format_string(cls):
+ """See BranchFormat.get_format_string()."""
+ return "Bazaar Branch Format 6 (bzr 0.15)\n"
+
+ def get_format_description(self):
+ """See BranchFormat.get_format_description()."""
+ return "Branch format 6"
+
+ def initialize(self, a_bzrdir, name=None, repository=None,
+ append_revisions_only=None):
+ """Create a branch of this format in a_bzrdir."""
+ utf8_files = [('last-revision', '0 null:\n'),
+ ('branch.conf',
+ self._get_initial_config(append_revisions_only)),
+ ('tags', ''),
+ ]
+ return self._initialize_helper(a_bzrdir, utf8_files, name, repository)
+
+ def make_tags(self, branch):
+ """See bzrlib.branch.BranchFormat.make_tags()."""
+ return _mod_tag.BasicTags(branch)
+
+ def supports_set_append_revisions_only(self):
+ return True
+
+
+class BzrBranchFormat8(BranchFormatMetadir):
+ """Metadir format supporting storing locations of subtree branches."""
+
+ def _branch_class(self):
+ return BzrBranch8
+
+ @classmethod
+ def get_format_string(cls):
+ """See BranchFormat.get_format_string()."""
+ return "Bazaar Branch Format 8 (needs bzr 1.15)\n"
+
+ def get_format_description(self):
+ """See BranchFormat.get_format_description()."""
+ return "Branch format 8"
+
+ def initialize(self, a_bzrdir, name=None, repository=None,
+ append_revisions_only=None):
+ """Create a branch of this format in a_bzrdir."""
+ utf8_files = [('last-revision', '0 null:\n'),
+ ('branch.conf',
+ self._get_initial_config(append_revisions_only)),
+ ('tags', ''),
+ ('references', '')
+ ]
+ return self._initialize_helper(a_bzrdir, utf8_files, name, repository)
+
+ def make_tags(self, branch):
+ """See bzrlib.branch.BranchFormat.make_tags()."""
+ return _mod_tag.BasicTags(branch)
+
+ def supports_set_append_revisions_only(self):
+ return True
+
+ def supports_stacking(self):
+ return True
+
+ supports_reference_locations = True
+
+
+class BzrBranchFormat7(BranchFormatMetadir):
+ """Branch format with last-revision, tags, and a stacked location pointer.
+
+ The stacked location pointer is passed down to the repository and requires
+ a repository format with supports_external_lookups = True.
+
+ This format was introduced in bzr 1.6.
+ """
+
+ def initialize(self, a_bzrdir, name=None, repository=None,
+ append_revisions_only=None):
+ """Create a branch of this format in a_bzrdir."""
+ utf8_files = [('last-revision', '0 null:\n'),
+ ('branch.conf',
+ self._get_initial_config(append_revisions_only)),
+ ('tags', ''),
+ ]
+ return self._initialize_helper(a_bzrdir, utf8_files, name, repository)
+
+ def _branch_class(self):
+ return BzrBranch7
+
+ @classmethod
+ def get_format_string(cls):
+ """See BranchFormat.get_format_string()."""
+ return "Bazaar Branch Format 7 (needs bzr 1.6)\n"
+
+ def get_format_description(self):
+ """See BranchFormat.get_format_description()."""
+ return "Branch format 7"
+
+ def supports_set_append_revisions_only(self):
+ return True
+
+ def supports_stacking(self):
+ return True
+
+ def make_tags(self, branch):
+ """See bzrlib.branch.BranchFormat.make_tags()."""
+ return _mod_tag.BasicTags(branch)
+
+ supports_reference_locations = False
+
+
+class BranchReferenceFormat(BranchFormatMetadir):
+ """Bzr branch reference format.
+
+ Branch references are used in implementing checkouts, they
+ act as an alias to the real branch which is at some other url.
+
+ This format has:
+ - A location file
+ - a format string
+ """
+
+ @classmethod
+ def get_format_string(cls):
+ """See BranchFormat.get_format_string()."""
+ return "Bazaar-NG Branch Reference Format 1\n"
+
+ def get_format_description(self):
+ """See BranchFormat.get_format_description()."""
+ return "Checkout reference format 1"
+
+ def get_reference(self, a_bzrdir, name=None):
+ """See BranchFormat.get_reference()."""
+ transport = a_bzrdir.get_branch_transport(None, name=name)
+ return transport.get_bytes('location')
+
+ def set_reference(self, a_bzrdir, name, to_branch):
+ """See BranchFormat.set_reference()."""
+ transport = a_bzrdir.get_branch_transport(None, name=name)
+ location = transport.put_bytes('location', to_branch.base)
+
+ def initialize(self, a_bzrdir, name=None, target_branch=None,
+ repository=None, append_revisions_only=None):
+ """Create a branch of this format in a_bzrdir."""
+ if target_branch is None:
+ # this format does not implement branch itself, thus the implicit
+ # creation contract must see it as uninitializable
+ raise errors.UninitializableFormat(self)
+ mutter('creating branch reference in %s', a_bzrdir.user_url)
+ if a_bzrdir._format.fixed_components:
+ raise errors.IncompatibleFormat(self, a_bzrdir._format)
+ if name is None:
+ name = a_bzrdir._get_selected_branch()
+ branch_transport = a_bzrdir.get_branch_transport(self, name=name)
+ branch_transport.put_bytes('location',
+ target_branch.user_url)
+ branch_transport.put_bytes('format', self.as_string())
+ branch = self.open(a_bzrdir, name, _found=True,
+ possible_transports=[target_branch.bzrdir.root_transport])
+ self._run_post_branch_init_hooks(a_bzrdir, name, branch)
+ return branch
+
+ def _make_reference_clone_function(format, a_branch):
+ """Create a clone() routine for a branch dynamically."""
+ def clone(to_bzrdir, revision_id=None,
+ repository_policy=None):
+ """See Branch.clone()."""
+ return format.initialize(to_bzrdir, target_branch=a_branch)
+ # cannot obey revision_id limits when cloning a reference ...
+ # FIXME RBC 20060210 either nuke revision_id for clone, or
+ # emit some sort of warning/error to the caller ?!
+ return clone
+
+ def open(self, a_bzrdir, name=None, _found=False, location=None,
+ possible_transports=None, ignore_fallbacks=False,
+ found_repository=None):
+ """Return the branch that the branch reference in a_bzrdir points at.
+
+ :param a_bzrdir: A BzrDir that contains a branch.
+ :param name: Name of colocated branch to open, if any
+ :param _found: a private parameter, do not use it. It is used to
+ indicate if format probing has already be done.
+ :param ignore_fallbacks: when set, no fallback branches will be opened
+ (if there are any). Default is to open fallbacks.
+ :param location: The location of the referenced branch. If
+ unspecified, this will be determined from the branch reference in
+ a_bzrdir.
+ :param possible_transports: An optional reusable transports list.
+ """
+ if name is None:
+ name = a_bzrdir._get_selected_branch()
+ if not _found:
+ format = BranchFormatMetadir.find_format(a_bzrdir, name=name)
+ if format.__class__ != self.__class__:
+ raise AssertionError("wrong format %r found for %r" %
+ (format, self))
+ if location is None:
+ location = self.get_reference(a_bzrdir, name)
+ real_bzrdir = controldir.ControlDir.open(
+ location, possible_transports=possible_transports)
+ result = real_bzrdir.open_branch(ignore_fallbacks=ignore_fallbacks,
+ possible_transports=possible_transports)
+ # this changes the behaviour of result.clone to create a new reference
+ # rather than a copy of the content of the branch.
+ # I did not use a proxy object because that needs much more extensive
+ # testing, and we are only changing one behaviour at the moment.
+ # If we decide to alter more behaviours - i.e. the implicit nickname
+ # then this should be refactored to introduce a tested proxy branch
+ # and a subclass of that for use in overriding clone() and ....
+ # - RBC 20060210
+ result.clone = self._make_reference_clone_function(result)
+ return result
+
+
+class BranchFormatRegistry(controldir.ControlComponentFormatRegistry):
+ """Branch format registry."""
+
+ def __init__(self, other_registry=None):
+ super(BranchFormatRegistry, self).__init__(other_registry)
+ self._default_format = None
+
+ def set_default(self, format):
+ self._default_format = format
+
+ def get_default(self):
+ return self._default_format
+
+
+network_format_registry = registry.FormatRegistry()
+"""Registry of formats indexed by their network name.
+
+The network name for a branch format is an identifier that can be used when
+referring to formats with smart server operations. See
+BranchFormat.network_name() for more detail.
+"""
+
+format_registry = BranchFormatRegistry(network_format_registry)
+
+
+# formats which have no format string are not discoverable
+# and not independently creatable, so are not registered.
+__format6 = BzrBranchFormat6()
+__format7 = BzrBranchFormat7()
+__format8 = BzrBranchFormat8()
+format_registry.register_lazy(
+ "Bazaar-NG branch format 5\n", "bzrlib.branchfmt.fullhistory", "BzrBranchFormat5")
+format_registry.register(BranchReferenceFormat())
+format_registry.register(__format6)
+format_registry.register(__format7)
+format_registry.register(__format8)
+format_registry.set_default(__format7)
+
+
+class BranchWriteLockResult(LogicalLockResult):
+ """The result of write locking a branch.
+
+ :ivar branch_token: The token obtained from the underlying branch lock, or
+ None.
+ :ivar unlock: A callable which will unlock the lock.
+ """
+
+ def __init__(self, unlock, branch_token):
+ LogicalLockResult.__init__(self, unlock)
+ self.branch_token = branch_token
+
+ def __repr__(self):
+ return "BranchWriteLockResult(%s, %s)" % (self.branch_token,
+ self.unlock)
+
+
+class BzrBranch(Branch, _RelockDebugMixin):
+ """A branch stored in the actual filesystem.
+
+ Note that it's "local" in the context of the filesystem; it doesn't
+ really matter if it's on an nfs/smb/afs/coda/... share, as long as
+ it's writable, and can be accessed via the normal filesystem API.
+
+ :ivar _transport: Transport for file operations on this branch's
+ control files, typically pointing to the .bzr/branch directory.
+ :ivar repository: Repository for this branch.
+ :ivar base: The url of the base directory for this branch; the one
+ containing the .bzr directory.
+ :ivar name: Optional colocated branch name as it exists in the control
+ directory.
+ """
+
+ def __init__(self, _format=None,
+ _control_files=None, a_bzrdir=None, name=None,
+ _repository=None, ignore_fallbacks=False,
+ possible_transports=None):
+ """Create new branch object at a particular location."""
+ if a_bzrdir is None:
+ raise ValueError('a_bzrdir must be supplied')
+ if name is None:
+ raise ValueError('name must be supplied')
+ self.bzrdir = a_bzrdir
+ self._user_transport = self.bzrdir.transport.clone('..')
+ if name != "":
+ self._user_transport.set_segment_parameter(
+ "branch", urlutils.escape(name))
+ self._base = self._user_transport.base
+ self.name = name
+ self._format = _format
+ if _control_files is None:
+ raise ValueError('BzrBranch _control_files is None')
+ self.control_files = _control_files
+ self._transport = _control_files._transport
+ self.repository = _repository
+ self.conf_store = None
+ Branch.__init__(self, possible_transports)
+
+ def __str__(self):
+ return '%s(%s)' % (self.__class__.__name__, self.user_url)
+
+ __repr__ = __str__
+
+ def _get_base(self):
+ """Returns the directory containing the control directory."""
+ return self._base
+
+ base = property(_get_base, doc="The URL for the root of this branch.")
+
+ @property
+ def user_transport(self):
+ return self._user_transport
+
+ def _get_config(self):
+ return _mod_config.TransportConfig(self._transport, 'branch.conf')
+
+ def _get_config_store(self):
+ if self.conf_store is None:
+ self.conf_store = _mod_config.BranchStore(self)
+ return self.conf_store
+
+ def is_locked(self):
+ return self.control_files.is_locked()
+
+ def lock_write(self, token=None):
+ """Lock the branch for write operations.
+
+ :param token: A token to permit reacquiring a previously held and
+ preserved lock.
+ :return: A BranchWriteLockResult.
+ """
+ if not self.is_locked():
+ self._note_lock('w')
+ self.repository._warn_if_deprecated(self)
+ self.repository.lock_write()
+ took_lock = True
+ else:
+ took_lock = False
+ try:
+ return BranchWriteLockResult(self.unlock,
+ self.control_files.lock_write(token=token))
+ except:
+ if took_lock:
+ self.repository.unlock()
+ raise
+
+ def lock_read(self):
+ """Lock the branch for read operations.
+
+ :return: A bzrlib.lock.LogicalLockResult.
+ """
+ if not self.is_locked():
+ self._note_lock('r')
+ self.repository._warn_if_deprecated(self)
+ self.repository.lock_read()
+ took_lock = True
+ else:
+ took_lock = False
+ try:
+ self.control_files.lock_read()
+ return LogicalLockResult(self.unlock)
+ except:
+ if took_lock:
+ self.repository.unlock()
+ raise
+
+ @only_raises(errors.LockNotHeld, errors.LockBroken)
+ def unlock(self):
+ if self.control_files._lock_count == 1 and self.conf_store is not None:
+ self.conf_store.save_changes()
+ try:
+ self.control_files.unlock()
+ finally:
+ if not self.control_files.is_locked():
+ self.repository.unlock()
+ # we just released the lock
+ self._clear_cached_state()
+
+ def peek_lock_mode(self):
+ if self.control_files._lock_count == 0:
+ return None
+ else:
+ return self.control_files._lock_mode
+
+ def get_physical_lock_status(self):
+ return self.control_files.get_physical_lock_status()
+
+ @needs_read_lock
+ def print_file(self, file, revision_id):
+ """See Branch.print_file."""
+ return self.repository.print_file(file, revision_id)
+
+ @needs_write_lock
+ def set_last_revision_info(self, revno, revision_id):
+ if not revision_id or not isinstance(revision_id, basestring):
+ raise errors.InvalidRevisionId(revision_id=revision_id, branch=self)
+ revision_id = _mod_revision.ensure_null(revision_id)
+ old_revno, old_revid = self.last_revision_info()
+ if self.get_append_revisions_only():
+ self._check_history_violation(revision_id)
+ self._run_pre_change_branch_tip_hooks(revno, revision_id)
+ self._write_last_revision_info(revno, revision_id)
+ self._clear_cached_state()
+ self._last_revision_info_cache = revno, revision_id
+ self._run_post_change_branch_tip_hooks(old_revno, old_revid)
+
+ def basis_tree(self):
+ """See Branch.basis_tree."""
+ return self.repository.revision_tree(self.last_revision())
+
+ def _get_parent_location(self):
+ _locs = ['parent', 'pull', 'x-pull']
+ for l in _locs:
+ try:
+ return self._transport.get_bytes(l).strip('\n')
+ except errors.NoSuchFile:
+ pass
+ return None
+
+ def get_stacked_on_url(self):
+ raise errors.UnstackableBranchFormat(self._format, self.user_url)
+
+ def set_push_location(self, location):
+ """See Branch.set_push_location."""
+ self.get_config().set_user_option(
+ 'push_location', location,
+ store=_mod_config.STORE_LOCATION_NORECURSE)
+
+ def _set_parent_location(self, url):
+ if url is None:
+ self._transport.delete('parent')
+ else:
+ self._transport.put_bytes('parent', url + '\n',
+ mode=self.bzrdir._get_file_mode())
+
+ @needs_write_lock
+ def unbind(self):
+ """If bound, unbind"""
+ return self.set_bound_location(None)
+
+ @needs_write_lock
+ def bind(self, other):
+ """Bind this branch to the branch other.
+
+ This does not push or pull data between the branches, though it does
+ check for divergence to raise an error when the branches are not
+ either the same, or one a prefix of the other. That behaviour may not
+ be useful, so that check may be removed in future.
+
+ :param other: The branch to bind to
+ :type other: Branch
+ """
+ # TODO: jam 20051230 Consider checking if the target is bound
+ # It is debatable whether you should be able to bind to
+ # a branch which is itself bound.
+ # Committing is obviously forbidden,
+ # but binding itself may not be.
+ # Since we *have* to check at commit time, we don't
+ # *need* to check here
+
+ # we want to raise diverged if:
+ # last_rev is not in the other_last_rev history, AND
+ # other_last_rev is not in our history, and do it without pulling
+ # history around
+ self.set_bound_location(other.base)
+
+ def get_bound_location(self):
+ try:
+ return self._transport.get_bytes('bound')[:-1]
+ except errors.NoSuchFile:
+ return None
+
+ @needs_read_lock
+ def get_master_branch(self, possible_transports=None):
+ """Return the branch we are bound to.
+
+ :return: Either a Branch, or None
+ """
+ if self._master_branch_cache is None:
+ self._master_branch_cache = self._get_master_branch(
+ possible_transports)
+ return self._master_branch_cache
+
+ def _get_master_branch(self, possible_transports):
+ bound_loc = self.get_bound_location()
+ if not bound_loc:
+ return None
+ try:
+ return Branch.open(bound_loc,
+ possible_transports=possible_transports)
+ except (errors.NotBranchError, errors.ConnectionError), e:
+ raise errors.BoundBranchConnectionFailure(
+ self, bound_loc, e)
+
+ @needs_write_lock
+ def set_bound_location(self, location):
+ """Set the target where this branch is bound to.
+
+ :param location: URL to the target branch
+ """
+ self._master_branch_cache = None
+ if location:
+ self._transport.put_bytes('bound', location+'\n',
+ mode=self.bzrdir._get_file_mode())
+ else:
+ try:
+ self._transport.delete('bound')
+ except errors.NoSuchFile:
+ return False
+ return True
+
+ @needs_write_lock
+ def update(self, possible_transports=None):
+ """Synchronise this branch with the master branch if any.
+
+ :return: None or the last_revision that was pivoted out during the
+ update.
+ """
+ master = self.get_master_branch(possible_transports)
+ if master is not None:
+ old_tip = _mod_revision.ensure_null(self.last_revision())
+ self.pull(master, overwrite=True)
+ if self.repository.get_graph().is_ancestor(old_tip,
+ _mod_revision.ensure_null(self.last_revision())):
+ return None
+ return old_tip
+ return None
+
+ def _read_last_revision_info(self):
+ revision_string = self._transport.get_bytes('last-revision')
+ revno, revision_id = revision_string.rstrip('\n').split(' ', 1)
+ revision_id = cache_utf8.get_cached_utf8(revision_id)
+ revno = int(revno)
+ return revno, revision_id
+
+ def _write_last_revision_info(self, revno, revision_id):
+ """Simply write out the revision id, with no checks.
+
+ Use set_last_revision_info to perform this safely.
+
+ Does not update the revision_history cache.
+ """
+ revision_id = _mod_revision.ensure_null(revision_id)
+ out_string = '%d %s\n' % (revno, revision_id)
+ self._transport.put_bytes('last-revision', out_string,
+ mode=self.bzrdir._get_file_mode())
+
+ @needs_write_lock
+ def update_feature_flags(self, updated_flags):
+ """Update the feature flags for this branch.
+
+ :param updated_flags: Dictionary mapping feature names to necessities
+ A necessity can be None to indicate the feature should be removed
+ """
+ self._format._update_feature_flags(updated_flags)
+ self.control_transport.put_bytes('format', self._format.as_string())
+
+
+class BzrBranch8(BzrBranch):
+ """A branch that stores tree-reference locations."""
+
+ def _open_hook(self, possible_transports=None):
+ if self._ignore_fallbacks:
+ return
+ if possible_transports is None:
+ possible_transports = [self.bzrdir.root_transport]
+ try:
+ url = self.get_stacked_on_url()
+ except (errors.UnstackableRepositoryFormat, errors.NotStacked,
+ errors.UnstackableBranchFormat):
+ pass
+ else:
+ for hook in Branch.hooks['transform_fallback_location']:
+ url = hook(self, url)
+ if url is None:
+ hook_name = Branch.hooks.get_hook_name(hook)
+ raise AssertionError(
+ "'transform_fallback_location' hook %s returned "
+ "None, not a URL." % hook_name)
+ self._activate_fallback_location(url,
+ possible_transports=possible_transports)
+
+ def __init__(self, *args, **kwargs):
+ self._ignore_fallbacks = kwargs.get('ignore_fallbacks', False)
+ super(BzrBranch8, self).__init__(*args, **kwargs)
+ self._last_revision_info_cache = None
+ self._reference_info = None
+
+ def _clear_cached_state(self):
+ super(BzrBranch8, self)._clear_cached_state()
+ self._last_revision_info_cache = None
+ self._reference_info = None
+
+ def _check_history_violation(self, revision_id):
+ current_revid = self.last_revision()
+ last_revision = _mod_revision.ensure_null(current_revid)
+ if _mod_revision.is_null(last_revision):
+ return
+ graph = self.repository.get_graph()
+ for lh_ancestor in graph.iter_lefthand_ancestry(revision_id):
+ if lh_ancestor == current_revid:
+ return
+ raise errors.AppendRevisionsOnlyViolation(self.user_url)
+
+ def _gen_revision_history(self):
+ """Generate the revision history from last revision
+ """
+ last_revno, last_revision = self.last_revision_info()
+ self._extend_partial_history(stop_index=last_revno-1)
+ return list(reversed(self._partial_revision_history_cache))
+
+ @needs_write_lock
+ def _set_parent_location(self, url):
+ """Set the parent branch"""
+ self._set_config_location('parent_location', url, make_relative=True)
+
+ @needs_read_lock
+ def _get_parent_location(self):
+ """Set the parent branch"""
+ return self._get_config_location('parent_location')
+
+ @needs_write_lock
+ def _set_all_reference_info(self, info_dict):
+ """Replace all reference info stored in a branch.
+
+ :param info_dict: A dict of {file_id: (tree_path, branch_location)}
+ """
+ s = StringIO()
+ writer = rio.RioWriter(s)
+ for key, (tree_path, branch_location) in info_dict.iteritems():
+ stanza = rio.Stanza(file_id=key, tree_path=tree_path,
+ branch_location=branch_location)
+ writer.write_stanza(stanza)
+ self._transport.put_bytes('references', s.getvalue())
+ self._reference_info = info_dict
+
+ @needs_read_lock
+ def _get_all_reference_info(self):
+ """Return all the reference info stored in a branch.
+
+ :return: A dict of {file_id: (tree_path, branch_location)}
+ """
+ if self._reference_info is not None:
+ return self._reference_info
+ rio_file = self._transport.get('references')
+ try:
+ stanzas = rio.read_stanzas(rio_file)
+ info_dict = dict((s['file_id'], (s['tree_path'],
+ s['branch_location'])) for s in stanzas)
+ finally:
+ rio_file.close()
+ self._reference_info = info_dict
+ return info_dict
+
+ def set_reference_info(self, file_id, tree_path, branch_location):
+ """Set the branch location to use for a tree reference.
+
+ :param file_id: The file-id of the tree reference.
+ :param tree_path: The path of the tree reference in the tree.
+ :param branch_location: The location of the branch to retrieve tree
+ references from.
+ """
+ info_dict = self._get_all_reference_info()
+ info_dict[file_id] = (tree_path, branch_location)
+ if None in (tree_path, branch_location):
+ if tree_path is not None:
+ raise ValueError('tree_path must be None when branch_location'
+ ' is None.')
+ if branch_location is not None:
+ raise ValueError('branch_location must be None when tree_path'
+ ' is None.')
+ del info_dict[file_id]
+ self._set_all_reference_info(info_dict)
+
+ def get_reference_info(self, file_id):
+ """Get the tree_path and branch_location for a tree reference.
+
+ :return: a tuple of (tree_path, branch_location)
+ """
+ return self._get_all_reference_info().get(file_id, (None, None))
+
+ def reference_parent(self, file_id, path, possible_transports=None):
+ """Return the parent branch for a tree-reference file_id.
+
+ :param file_id: The file_id of the tree reference
+ :param path: The path of the file_id in the tree
+ :return: A branch associated with the file_id
+ """
+ branch_location = self.get_reference_info(file_id)[1]
+ if branch_location is None:
+ return Branch.reference_parent(self, file_id, path,
+ possible_transports)
+ branch_location = urlutils.join(self.user_url, branch_location)
+ return Branch.open(branch_location,
+ possible_transports=possible_transports)
+
+ def set_push_location(self, location):
+ """See Branch.set_push_location."""
+ self._set_config_location('push_location', location)
+
+ def set_bound_location(self, location):
+ """See Branch.set_push_location."""
+ self._master_branch_cache = None
+ result = None
+ conf = self.get_config_stack()
+ if location is None:
+ if not conf.get('bound'):
+ return False
+ else:
+ conf.set('bound', 'False')
+ return True
+ else:
+ self._set_config_location('bound_location', location,
+ config=conf)
+ conf.set('bound', 'True')
+ return True
+
+ def _get_bound_location(self, bound):
+ """Return the bound location in the config file.
+
+ Return None if the bound parameter does not match"""
+ conf = self.get_config_stack()
+ if conf.get('bound') != bound:
+ return None
+ return self._get_config_location('bound_location', config=conf)
+
+ def get_bound_location(self):
+ """See Branch.get_bound_location."""
+ return self._get_bound_location(True)
+
+ def get_old_bound_location(self):
+ """See Branch.get_old_bound_location"""
+ return self._get_bound_location(False)
+
+ def get_stacked_on_url(self):
+ # you can always ask for the URL; but you might not be able to use it
+ # if the repo can't support stacking.
+ ## self._check_stackable_repo()
+ # stacked_on_location is only ever defined in branch.conf, so don't
+ # waste effort reading the whole stack of config files.
+ conf = _mod_config.BranchOnlyStack(self)
+ stacked_url = self._get_config_location('stacked_on_location',
+ config=conf)
+ if stacked_url is None:
+ raise errors.NotStacked(self)
+ return stacked_url.encode('utf-8')
+
+ @needs_read_lock
+ def get_rev_id(self, revno, history=None):
+ """Find the revision id of the specified revno."""
+ if revno == 0:
+ return _mod_revision.NULL_REVISION
+
+ last_revno, last_revision_id = self.last_revision_info()
+ if revno <= 0 or revno > last_revno:
+ raise errors.NoSuchRevision(self, revno)
+
+ if history is not None:
+ return history[revno - 1]
+
+ index = last_revno - revno
+ if len(self._partial_revision_history_cache) <= index:
+ self._extend_partial_history(stop_index=index)
+ if len(self._partial_revision_history_cache) > index:
+ return self._partial_revision_history_cache[index]
+ else:
+ raise errors.NoSuchRevision(self, revno)
+
+ @needs_read_lock
+ def revision_id_to_revno(self, revision_id):
+ """Given a revision id, return its revno"""
+ if _mod_revision.is_null(revision_id):
+ return 0
+ try:
+ index = self._partial_revision_history_cache.index(revision_id)
+ except ValueError:
+ try:
+ self._extend_partial_history(stop_revision=revision_id)
+ except errors.RevisionNotPresent, e:
+ raise errors.GhostRevisionsHaveNoRevno(revision_id, e.revision_id)
+ index = len(self._partial_revision_history_cache) - 1
+ if index < 0:
+ raise errors.NoSuchRevision(self, revision_id)
+ if self._partial_revision_history_cache[index] != revision_id:
+ raise errors.NoSuchRevision(self, revision_id)
+ return self.revno() - index
+
+
+class BzrBranch7(BzrBranch8):
+ """A branch with support for a fallback repository."""
+
+ def set_reference_info(self, file_id, tree_path, branch_location):
+ Branch.set_reference_info(self, file_id, tree_path, branch_location)
+
+ def get_reference_info(self, file_id):
+ Branch.get_reference_info(self, file_id)
+
+ def reference_parent(self, file_id, path, possible_transports=None):
+ return Branch.reference_parent(self, file_id, path,
+ possible_transports)
+
+
+class BzrBranch6(BzrBranch7):
+ """See BzrBranchFormat6 for the capabilities of this branch.
+
+ This subclass of BzrBranch7 disables the new features BzrBranch7 added,
+ i.e. stacking.
+ """
+
+ def get_stacked_on_url(self):
+ raise errors.UnstackableBranchFormat(self._format, self.user_url)
+
+
+######################################################################
+# results of operations
+
+
+class _Result(object):
+
+ def _show_tag_conficts(self, to_file):
+ if not getattr(self, 'tag_conflicts', None):
+ return
+ to_file.write('Conflicting tags:\n')
+ for name, value1, value2 in self.tag_conflicts:
+ to_file.write(' %s\n' % (name, ))
+
+
+class PullResult(_Result):
+ """Result of a Branch.pull operation.
+
+ :ivar old_revno: Revision number before pull.
+ :ivar new_revno: Revision number after pull.
+ :ivar old_revid: Tip revision id before pull.
+ :ivar new_revid: Tip revision id after pull.
+ :ivar source_branch: Source (local) branch object. (read locked)
+ :ivar master_branch: Master branch of the target, or the target if no
+ Master
+ :ivar local_branch: target branch if there is a Master, else None
+ :ivar target_branch: Target/destination branch object. (write locked)
+ :ivar tag_conflicts: A list of tag conflicts, see BasicTags.merge_to
+ :ivar tag_updates: A dict with new tags, see BasicTags.merge_to
+ """
+
+ def report(self, to_file):
+ tag_conflicts = getattr(self, "tag_conflicts", None)
+ tag_updates = getattr(self, "tag_updates", None)
+ if not is_quiet():
+ if self.old_revid != self.new_revid:
+ to_file.write('Now on revision %d.\n' % self.new_revno)
+ if tag_updates:
+ to_file.write('%d tag(s) updated.\n' % len(tag_updates))
+ if self.old_revid == self.new_revid and not tag_updates:
+ if not tag_conflicts:
+ to_file.write('No revisions or tags to pull.\n')
+ else:
+ to_file.write('No revisions to pull.\n')
+ self._show_tag_conficts(to_file)
+
+
+class BranchPushResult(_Result):
+ """Result of a Branch.push operation.
+
+ :ivar old_revno: Revision number (eg 10) of the target before push.
+ :ivar new_revno: Revision number (eg 12) of the target after push.
+ :ivar old_revid: Tip revision id (eg joe@foo.com-1234234-aoeua34) of target
+ before the push.
+ :ivar new_revid: Tip revision id (eg joe@foo.com-5676566-boa234a) of target
+ after the push.
+ :ivar source_branch: Source branch object that the push was from. This is
+ read locked, and generally is a local (and thus low latency) branch.
+ :ivar master_branch: If target is a bound branch, the master branch of
+ target, or target itself. Always write locked.
+ :ivar target_branch: The direct Branch where data is being sent (write
+ locked).
+ :ivar local_branch: If the target is a bound branch this will be the
+ target, otherwise it will be None.
+ """
+
+ def report(self, to_file):
+ # TODO: This function gets passed a to_file, but then
+ # ignores it and calls note() instead. This is also
+ # inconsistent with PullResult(), which writes to stdout.
+ # -- JRV20110901, bug #838853
+ tag_conflicts = getattr(self, "tag_conflicts", None)
+ tag_updates = getattr(self, "tag_updates", None)
+ if not is_quiet():
+ if self.old_revid != self.new_revid:
+ note(gettext('Pushed up to revision %d.') % self.new_revno)
+ if tag_updates:
+ note(ngettext('%d tag updated.', '%d tags updated.', len(tag_updates)) % len(tag_updates))
+ if self.old_revid == self.new_revid and not tag_updates:
+ if not tag_conflicts:
+ note(gettext('No new revisions or tags to push.'))
+ else:
+ note(gettext('No new revisions to push.'))
+ self._show_tag_conficts(to_file)
+
+
+class BranchCheckResult(object):
+ """Results of checking branch consistency.
+
+ :see: Branch.check
+ """
+
+ def __init__(self, branch):
+ self.branch = branch
+ self.errors = []
+
+ def report_results(self, verbose):
+ """Report the check results via trace.note.
+
+ :param verbose: Requests more detailed display of what was checked,
+ if any.
+ """
+ note(gettext('checked branch {0} format {1}').format(
+ self.branch.user_url, self.branch._format))
+ for error in self.errors:
+ note(gettext('found error:%s'), error)
+
+
+class Converter5to6(object):
+ """Perform an in-place upgrade of format 5 to format 6"""
+
+ def convert(self, branch):
+ # Data for 5 and 6 can peacefully coexist.
+ format = BzrBranchFormat6()
+ new_branch = format.open(branch.bzrdir, _found=True)
+
+ # Copy source data into target
+ new_branch._write_last_revision_info(*branch.last_revision_info())
+ new_branch.lock_write()
+ try:
+ new_branch.set_parent(branch.get_parent())
+ new_branch.set_bound_location(branch.get_bound_location())
+ new_branch.set_push_location(branch.get_push_location())
+ finally:
+ new_branch.unlock()
+
+ # New branch has no tags by default
+ new_branch.tags._set_tag_dict({})
+
+ # Copying done; now update target format
+ new_branch._transport.put_bytes('format',
+ format.as_string(),
+ mode=new_branch.bzrdir._get_file_mode())
+
+ # Clean up old files
+ new_branch._transport.delete('revision-history')
+ branch.lock_write()
+ try:
+ try:
+ branch.set_parent(None)
+ except errors.NoSuchFile:
+ pass
+ branch.set_bound_location(None)
+ finally:
+ branch.unlock()
+
+
+class Converter6to7(object):
+ """Perform an in-place upgrade of format 6 to format 7"""
+
+ def convert(self, branch):
+ format = BzrBranchFormat7()
+ branch._set_config_location('stacked_on_location', '')
+ # update target format
+ branch._transport.put_bytes('format', format.as_string())
+
+
+class Converter7to8(object):
+ """Perform an in-place upgrade of format 7 to format 8"""
+
+ def convert(self, branch):
+ format = BzrBranchFormat8()
+ branch._transport.put_bytes('references', '')
+ # update target format
+ branch._transport.put_bytes('format', format.as_string())
+
+
+class InterBranch(InterObject):
+ """This class represents operations taking place between two branches.
+
+ Its instances have methods like pull() and push() and contain
+ references to the source and target repositories these operations
+ can be carried out on.
+ """
+
+ _optimisers = []
+ """The available optimised InterBranch types."""
+
+ @classmethod
+ def _get_branch_formats_to_test(klass):
+ """Return an iterable of format tuples for testing.
+
+ :return: An iterable of (from_format, to_format) to use when testing
+ this InterBranch class. Each InterBranch class should define this
+ method itself.
+ """
+ raise NotImplementedError(klass._get_branch_formats_to_test)
+
+ @needs_write_lock
+ def pull(self, overwrite=False, stop_revision=None,
+ possible_transports=None, local=False):
+ """Mirror source into target branch.
+
+ The target branch is considered to be 'local', having low latency.
+
+ :returns: PullResult instance
+ """
+ raise NotImplementedError(self.pull)
+
+ @needs_write_lock
+ def push(self, overwrite=False, stop_revision=None, lossy=False,
+ _override_hook_source_branch=None):
+ """Mirror the source branch into the target branch.
+
+ The source branch is considered to be 'local', having low latency.
+ """
+ raise NotImplementedError(self.push)
+
+ @needs_write_lock
+ def copy_content_into(self, revision_id=None):
+ """Copy the content of source into target
+
+ revision_id: if not None, the revision history in the new branch will
+ be truncated to end with revision_id.
+ """
+ raise NotImplementedError(self.copy_content_into)
+
+ @needs_write_lock
+ def fetch(self, stop_revision=None, limit=None):
+ """Fetch revisions.
+
+ :param stop_revision: Last revision to fetch
+ :param limit: Optional rough limit of revisions to fetch
+ """
+ raise NotImplementedError(self.fetch)
+
+
+def _fix_overwrite_type(overwrite):
+ if isinstance(overwrite, bool):
+ if overwrite:
+ return ["history", "tags"]
+ else:
+ return []
+ return overwrite
+
+
+class GenericInterBranch(InterBranch):
+ """InterBranch implementation that uses public Branch functions."""
+
+ @classmethod
+ def is_compatible(klass, source, target):
+ # GenericBranch uses the public API, so always compatible
+ return True
+
+ @classmethod
+ def _get_branch_formats_to_test(klass):
+ return [(format_registry.get_default(), format_registry.get_default())]
+
+ @classmethod
+ def unwrap_format(klass, format):
+ if isinstance(format, remote.RemoteBranchFormat):
+ format._ensure_real()
+ return format._custom_format
+ return format
+
+ @needs_write_lock
+ def copy_content_into(self, revision_id=None):
+ """Copy the content of source into target
+
+ revision_id: if not None, the revision history in the new branch will
+ be truncated to end with revision_id.
+ """
+ self.source.update_references(self.target)
+ self.source._synchronize_history(self.target, revision_id)
+ try:
+ parent = self.source.get_parent()
+ except errors.InaccessibleParent, e:
+ mutter('parent was not accessible to copy: %s', e)
+ else:
+ if parent:
+ self.target.set_parent(parent)
+ if self.source._push_should_merge_tags():
+ self.source.tags.merge_to(self.target.tags)
+
+ @needs_write_lock
+ def fetch(self, stop_revision=None, limit=None):
+ if self.target.base == self.source.base:
+ return (0, [])
+ self.source.lock_read()
+ try:
+ fetch_spec_factory = fetch.FetchSpecFactory()
+ fetch_spec_factory.source_branch = self.source
+ fetch_spec_factory.source_branch_stop_revision_id = stop_revision
+ fetch_spec_factory.source_repo = self.source.repository
+ fetch_spec_factory.target_repo = self.target.repository
+ fetch_spec_factory.target_repo_kind = fetch.TargetRepoKinds.PREEXISTING
+ fetch_spec_factory.limit = limit
+ fetch_spec = fetch_spec_factory.make_fetch_spec()
+ return self.target.repository.fetch(self.source.repository,
+ fetch_spec=fetch_spec)
+ finally:
+ self.source.unlock()
+
+ @needs_write_lock
+ def _update_revisions(self, stop_revision=None, overwrite=False,
+ graph=None):
+ other_revno, other_last_revision = self.source.last_revision_info()
+ stop_revno = None # unknown
+ if stop_revision is None:
+ stop_revision = other_last_revision
+ if _mod_revision.is_null(stop_revision):
+ # if there are no commits, we're done.
+ return
+ stop_revno = other_revno
+
+ # what's the current last revision, before we fetch [and change it
+ # possibly]
+ last_rev = _mod_revision.ensure_null(self.target.last_revision())
+ # we fetch here so that we don't process data twice in the common
+ # case of having something to pull, and so that the check for
+ # already merged can operate on the just fetched graph, which will
+ # be cached in memory.
+ self.fetch(stop_revision=stop_revision)
+ # Check to see if one is an ancestor of the other
+ if not overwrite:
+ if graph is None:
+ graph = self.target.repository.get_graph()
+ if self.target._check_if_descendant_or_diverged(
+ stop_revision, last_rev, graph, self.source):
+ # stop_revision is a descendant of last_rev, but we aren't
+ # overwriting, so we're done.
+ return
+ if stop_revno is None:
+ if graph is None:
+ graph = self.target.repository.get_graph()
+ this_revno, this_last_revision = \
+ self.target.last_revision_info()
+ stop_revno = graph.find_distance_to_null(stop_revision,
+ [(other_last_revision, other_revno),
+ (this_last_revision, this_revno)])
+ self.target.set_last_revision_info(stop_revno, stop_revision)
+
+ @needs_write_lock
+ def pull(self, overwrite=False, stop_revision=None,
+ possible_transports=None, run_hooks=True,
+ _override_hook_target=None, local=False):
+ """Pull from source into self, updating my master if any.
+
+ :param run_hooks: Private parameter - if false, this branch
+ is being called because it's the master of the primary branch,
+ so it should not run its hooks.
+ """
+ bound_location = self.target.get_bound_location()
+ if local and not bound_location:
+ raise errors.LocalRequiresBoundBranch()
+ master_branch = None
+ source_is_master = False
+ if bound_location:
+ # bound_location comes from a config file, some care has to be
+ # taken to relate it to source.user_url
+ normalized = urlutils.normalize_url(bound_location)
+ try:
+ relpath = self.source.user_transport.relpath(normalized)
+ source_is_master = (relpath == '')
+ except (errors.PathNotChild, errors.InvalidURL):
+ source_is_master = False
+ if not local and bound_location and not source_is_master:
+ # not pulling from master, so we need to update master.
+ master_branch = self.target.get_master_branch(possible_transports)
+ master_branch.lock_write()
+ try:
+ if master_branch:
+ # pull from source into master.
+ master_branch.pull(self.source, overwrite, stop_revision,
+ run_hooks=False)
+ return self._pull(overwrite,
+ stop_revision, _hook_master=master_branch,
+ run_hooks=run_hooks,
+ _override_hook_target=_override_hook_target,
+ merge_tags_to_master=not source_is_master)
+ finally:
+ if master_branch:
+ master_branch.unlock()
+
+ def push(self, overwrite=False, stop_revision=None, lossy=False,
+ _override_hook_source_branch=None):
+ """See InterBranch.push.
+
+ This is the basic concrete implementation of push()
+
+ :param _override_hook_source_branch: If specified, run the hooks
+ passing this Branch as the source, rather than self. This is for
+ use of RemoteBranch, where push is delegated to the underlying
+ vfs-based Branch.
+ """
+ if lossy:
+ raise errors.LossyPushToSameVCS(self.source, self.target)
+ # TODO: Public option to disable running hooks - should be trivial but
+ # needs tests.
+
+ op = cleanup.OperationWithCleanups(self._push_with_bound_branches)
+ op.add_cleanup(self.source.lock_read().unlock)
+ op.add_cleanup(self.target.lock_write().unlock)
+ return op.run(overwrite, stop_revision,
+ _override_hook_source_branch=_override_hook_source_branch)
+
+ def _basic_push(self, overwrite, stop_revision):
+ """Basic implementation of push without bound branches or hooks.
+
+ Must be called with source read locked and target write locked.
+ """
+ result = BranchPushResult()
+ result.source_branch = self.source
+ result.target_branch = self.target
+ result.old_revno, result.old_revid = self.target.last_revision_info()
+ self.source.update_references(self.target)
+ overwrite = _fix_overwrite_type(overwrite)
+ if result.old_revid != stop_revision:
+ # We assume that during 'push' this repository is closer than
+ # the target.
+ graph = self.source.repository.get_graph(self.target.repository)
+ self._update_revisions(stop_revision,
+ overwrite=("history" in overwrite),
+ graph=graph)
+ if self.source._push_should_merge_tags():
+ result.tag_updates, result.tag_conflicts = (
+ self.source.tags.merge_to(
+ self.target.tags, "tags" in overwrite))
+ result.new_revno, result.new_revid = self.target.last_revision_info()
+ return result
+
+ def _push_with_bound_branches(self, operation, overwrite, stop_revision,
+ _override_hook_source_branch=None):
+ """Push from source into target, and into target's master if any.
+ """
+ def _run_hooks():
+ if _override_hook_source_branch:
+ result.source_branch = _override_hook_source_branch
+ for hook in Branch.hooks['post_push']:
+ hook(result)
+
+ bound_location = self.target.get_bound_location()
+ if bound_location and self.target.base != bound_location:
+ # there is a master branch.
+ #
+ # XXX: Why the second check? Is it even supported for a branch to
+ # be bound to itself? -- mbp 20070507
+ master_branch = self.target.get_master_branch()
+ master_branch.lock_write()
+ operation.add_cleanup(master_branch.unlock)
+ # push into the master from the source branch.
+ master_inter = InterBranch.get(self.source, master_branch)
+ master_inter._basic_push(overwrite, stop_revision)
+ # and push into the target branch from the source. Note that
+ # we push from the source branch again, because it's considered
+ # the highest bandwidth repository.
+ result = self._basic_push(overwrite, stop_revision)
+ result.master_branch = master_branch
+ result.local_branch = self.target
+ else:
+ master_branch = None
+ # no master branch
+ result = self._basic_push(overwrite, stop_revision)
+ # TODO: Why set master_branch and local_branch if there's no
+ # binding? Maybe cleaner to just leave them unset? -- mbp
+ # 20070504
+ result.master_branch = self.target
+ result.local_branch = None
+ _run_hooks()
+ return result
+
+ def _pull(self, overwrite=False, stop_revision=None,
+ possible_transports=None, _hook_master=None, run_hooks=True,
+ _override_hook_target=None, local=False,
+ merge_tags_to_master=True):
+ """See Branch.pull.
+
+ This function is the core worker, used by GenericInterBranch.pull to
+ avoid duplication when pulling source->master and source->local.
+
+ :param _hook_master: Private parameter - set the branch to
+ be supplied as the master to pull hooks.
+ :param run_hooks: Private parameter - if false, this branch
+ is being called because it's the master of the primary branch,
+ so it should not run its hooks.
+ is being called because it's the master of the primary branch,
+ so it should not run its hooks.
+ :param _override_hook_target: Private parameter - set the branch to be
+ supplied as the target_branch to pull hooks.
+ :param local: Only update the local branch, and not the bound branch.
+ """
+ # This type of branch can't be bound.
+ if local:
+ raise errors.LocalRequiresBoundBranch()
+ result = PullResult()
+ result.source_branch = self.source
+ if _override_hook_target is None:
+ result.target_branch = self.target
+ else:
+ result.target_branch = _override_hook_target
+ self.source.lock_read()
+ try:
+ # We assume that during 'pull' the target repository is closer than
+ # the source one.
+ self.source.update_references(self.target)
+ graph = self.target.repository.get_graph(self.source.repository)
+ # TODO: Branch formats should have a flag that indicates
+ # that revno's are expensive, and pull() should honor that flag.
+ # -- JRV20090506
+ result.old_revno, result.old_revid = \
+ self.target.last_revision_info()
+ overwrite = _fix_overwrite_type(overwrite)
+ self._update_revisions(stop_revision,
+ overwrite=("history" in overwrite),
+ graph=graph)
+ # TODO: The old revid should be specified when merging tags,
+ # so a tags implementation that versions tags can only
+ # pull in the most recent changes. -- JRV20090506
+ result.tag_updates, result.tag_conflicts = (
+ self.source.tags.merge_to(self.target.tags,
+ "tags" in overwrite,
+ ignore_master=not merge_tags_to_master))
+ result.new_revno, result.new_revid = self.target.last_revision_info()
+ if _hook_master:
+ result.master_branch = _hook_master
+ result.local_branch = result.target_branch
+ else:
+ result.master_branch = result.target_branch
+ result.local_branch = None
+ if run_hooks:
+ for hook in Branch.hooks['post_pull']:
+ hook(result)
+ finally:
+ self.source.unlock()
+ return result
+
+
+InterBranch.register_optimiser(GenericInterBranch)
diff --git a/bzrlib/branchbuilder.py b/bzrlib/branchbuilder.py
new file mode 100644
index 0000000..ecb996f
--- /dev/null
+++ b/bzrlib/branchbuilder.py
@@ -0,0 +1,303 @@
+# Copyright (C) 2007, 2008, 2009 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""Utility for create branches with particular contents."""
+
+from __future__ import absolute_import
+
+from bzrlib import (
+ controldir,
+ commit,
+ errors,
+ memorytree,
+ revision,
+ )
+
+
+class BranchBuilder(object):
+ r"""A BranchBuilder aids creating Branches with particular shapes.
+
+ The expected way to use BranchBuilder is to construct a
+ BranchBuilder on the transport you want your branch on, and then call
+ appropriate build_ methods on it to get the shape of history you want.
+
+ This is meant as a helper for the test suite, not as a general class for
+ real data.
+
+ For instance:
+
+ >>> from bzrlib.transport.memory import MemoryTransport
+ >>> builder = BranchBuilder(MemoryTransport("memory:///"))
+ >>> builder.start_series()
+ >>> builder.build_snapshot('rev-id', None, [
+ ... ('add', ('', 'root-id', 'directory', '')),
+ ... ('add', ('filename', 'f-id', 'file', 'content\n'))])
+ 'rev-id'
+ >>> builder.build_snapshot('rev2-id', ['rev-id'],
+ ... [('modify', ('f-id', 'new-content\n'))])
+ 'rev2-id'
+ >>> builder.finish_series()
+ >>> branch = builder.get_branch()
+
+ :ivar _tree: This is a private member which is not meant to be modified by
+ users of this class. While a 'series' is in progress, it should hold a
+ MemoryTree with the contents of the last commit (ready to be modified
+ by the next build_snapshot command) with a held write lock. Outside of
+ a series in progress, it should be None.
+ """
+
+ def __init__(self, transport=None, format=None, branch=None):
+ """Construct a BranchBuilder on transport.
+
+ :param transport: The transport the branch should be created on.
+ If the path of the transport does not exist but its parent does
+ it will be created.
+ :param format: Either a BzrDirFormat, or the name of a format in the
+ controldir format registry for the branch to be built.
+ :param branch: An already constructed branch to use. This param is
+ mutually exclusive with the transport and format params.
+ """
+ if branch is not None:
+ if format is not None:
+ raise AssertionError(
+ "branch and format kwargs are mutually exclusive")
+ if transport is not None:
+ raise AssertionError(
+ "branch and transport kwargs are mutually exclusive")
+ self._branch = branch
+ else:
+ if not transport.has('.'):
+ transport.mkdir('.')
+ if format is None:
+ format = 'default'
+ if isinstance(format, str):
+ format = controldir.format_registry.make_bzrdir(format)
+ self._branch = controldir.ControlDir.create_branch_convenience(
+ transport.base, format=format, force_new_tree=False)
+ self._tree = None
+
+ def build_commit(self, parent_ids=None, allow_leftmost_as_ghost=False,
+ **commit_kwargs):
+ """Build a commit on the branch.
+
+ This makes a commit with no real file content for when you only want
+ to look at the revision graph structure.
+
+ :param commit_kwargs: Arguments to pass through to commit, such as
+ timestamp.
+ """
+ if parent_ids is not None:
+ if len(parent_ids) == 0:
+ base_id = revision.NULL_REVISION
+ else:
+ base_id = parent_ids[0]
+ if base_id != self._branch.last_revision():
+ self._move_branch_pointer(base_id,
+ allow_leftmost_as_ghost=allow_leftmost_as_ghost)
+ tree = memorytree.MemoryTree.create_on_branch(self._branch)
+ tree.lock_write()
+ try:
+ if parent_ids is not None:
+ tree.set_parent_ids(parent_ids,
+ allow_leftmost_as_ghost=allow_leftmost_as_ghost)
+ tree.add('')
+ return self._do_commit(tree, **commit_kwargs)
+ finally:
+ tree.unlock()
+
+ def _do_commit(self, tree, message=None, message_callback=None, **kwargs):
+ reporter = commit.NullCommitReporter()
+ if message is None and message_callback is None:
+ message = u'commit %d' % (self._branch.revno() + 1,)
+ return tree.commit(message, message_callback=message_callback,
+ reporter=reporter,
+ **kwargs)
+
+ def _move_branch_pointer(self, new_revision_id,
+ allow_leftmost_as_ghost=False):
+ """Point self._branch to a different revision id."""
+ self._branch.lock_write()
+ try:
+ # We don't seem to have a simple set_last_revision(), so we
+ # implement it here.
+ cur_revno, cur_revision_id = self._branch.last_revision_info()
+ try:
+ g = self._branch.repository.get_graph()
+ new_revno = g.find_distance_to_null(new_revision_id,
+ [(cur_revision_id, cur_revno)])
+ self._branch.set_last_revision_info(new_revno, new_revision_id)
+ except errors.GhostRevisionsHaveNoRevno:
+ if not allow_leftmost_as_ghost:
+ raise
+ new_revno = 1
+ finally:
+ self._branch.unlock()
+ if self._tree is not None:
+ # We are currently processing a series, but when switching branch
+ # pointers, it is easiest to just create a new memory tree.
+ # That way we are sure to have the right files-on-disk
+ # We are cheating a little bit here, and locking the new tree
+ # before the old tree is unlocked. But that way the branch stays
+ # locked throughout.
+ new_tree = memorytree.MemoryTree.create_on_branch(self._branch)
+ new_tree.lock_write()
+ self._tree.unlock()
+ self._tree = new_tree
+
+ def start_series(self):
+ """We will be creating a series of commits.
+
+ This allows us to hold open the locks while we are processing.
+
+ Make sure to call 'finish_series' when you are done.
+ """
+ if self._tree is not None:
+ raise AssertionError('You cannot start a new series while a'
+ ' series is already going.')
+ self._tree = memorytree.MemoryTree.create_on_branch(self._branch)
+ self._tree.lock_write()
+
+ def finish_series(self):
+ """Call this after start_series to unlock the various objects."""
+ self._tree.unlock()
+ self._tree = None
+
+ def build_snapshot(self, revision_id, parent_ids, actions,
+ message=None, timestamp=None, allow_leftmost_as_ghost=False,
+ committer=None, timezone=None, message_callback=None):
+ """Build a commit, shaped in a specific way.
+
+ Most of the actions are self-explanatory. 'flush' is special action to
+ break a series of actions into discrete steps so that complex changes
+ (such as unversioning a file-id and re-adding it with a different kind)
+ can be expressed in a way that will clearly work.
+
+ :param revision_id: The handle for the new commit, can be None
+ :param parent_ids: A list of parent_ids to use for the commit.
+ It can be None, which indicates to use the last commit.
+ :param actions: A list of actions to perform. Supported actions are:
+ ('add', ('path', 'file-id', 'kind', 'content' or None))
+ ('modify', ('file-id', 'new-content'))
+ ('unversion', 'file-id')
+ ('rename', ('orig-path', 'new-path'))
+ ('flush', None)
+ :param message: An optional commit message, if not supplied, a default
+ commit message will be written.
+ :param message_callback: A message callback to use for the commit, as
+ per mutabletree.commit.
+ :param timestamp: If non-None, set the timestamp of the commit to this
+ value.
+ :param timezone: An optional timezone for timestamp.
+ :param committer: An optional username to use for commit
+ :param allow_leftmost_as_ghost: True if the leftmost parent should be
+ permitted to be a ghost.
+ :return: The revision_id of the new commit
+ """
+ if parent_ids is not None:
+ if len(parent_ids) == 0:
+ base_id = revision.NULL_REVISION
+ else:
+ base_id = parent_ids[0]
+ if base_id != self._branch.last_revision():
+ self._move_branch_pointer(base_id,
+ allow_leftmost_as_ghost=allow_leftmost_as_ghost)
+
+ if self._tree is not None:
+ tree = self._tree
+ else:
+ tree = memorytree.MemoryTree.create_on_branch(self._branch)
+ tree.lock_write()
+ try:
+ if parent_ids is not None:
+ tree.set_parent_ids(parent_ids,
+ allow_leftmost_as_ghost=allow_leftmost_as_ghost)
+ # Unfortunately, MemoryTree.add(directory) just creates an
+ # inventory entry. And the only public function to create a
+ # directory is MemoryTree.mkdir() which creates the directory, but
+ # also always adds it. So we have to use a multi-pass setup.
+ pending = _PendingActions()
+ for action, info in actions:
+ if action == 'add':
+ path, file_id, kind, content = info
+ if kind == 'directory':
+ pending.to_add_directories.append((path, file_id))
+ else:
+ pending.to_add_files.append(path)
+ pending.to_add_file_ids.append(file_id)
+ pending.to_add_kinds.append(kind)
+ if content is not None:
+ pending.new_contents[file_id] = content
+ elif action == 'modify':
+ file_id, content = info
+ pending.new_contents[file_id] = content
+ elif action == 'unversion':
+ pending.to_unversion_ids.add(info)
+ elif action == 'rename':
+ from_relpath, to_relpath = info
+ pending.to_rename.append((from_relpath, to_relpath))
+ elif action == 'flush':
+ self._flush_pending(tree, pending)
+ pending = _PendingActions()
+ else:
+ raise ValueError('Unknown build action: "%s"' % (action,))
+ self._flush_pending(tree, pending)
+ return self._do_commit(tree, message=message, rev_id=revision_id,
+ timestamp=timestamp, timezone=timezone, committer=committer,
+ message_callback=message_callback)
+ finally:
+ tree.unlock()
+
+ def _flush_pending(self, tree, pending):
+ """Flush the pending actions in 'pending', i.e. apply them to 'tree'."""
+ for path, file_id in pending.to_add_directories:
+ if path == '':
+ old_id = tree.path2id(path)
+ if old_id is not None and old_id in pending.to_unversion_ids:
+ # We're overwriting this path, no need to unversion
+ pending.to_unversion_ids.discard(old_id)
+ # Special case, because the path already exists
+ tree.add([path], [file_id], ['directory'])
+ else:
+ tree.mkdir(path, file_id)
+ for from_relpath, to_relpath in pending.to_rename:
+ tree.rename_one(from_relpath, to_relpath)
+ if pending.to_unversion_ids:
+ tree.unversion(pending.to_unversion_ids)
+ tree.add(pending.to_add_files, pending.to_add_file_ids, pending.to_add_kinds)
+ for file_id, content in pending.new_contents.iteritems():
+ tree.put_file_bytes_non_atomic(file_id, content)
+
+ def get_branch(self):
+ """Return the branch created by the builder."""
+ return self._branch
+
+
+class _PendingActions(object):
+ """Pending actions for build_snapshot to take.
+
+ This is just a simple class to hold a bunch of the intermediate state of
+ build_snapshot in single object.
+ """
+
+ def __init__(self):
+ self.to_add_directories = []
+ self.to_add_files = []
+ self.to_add_file_ids = []
+ self.to_add_kinds = []
+ self.new_contents = {}
+ self.to_unversion_ids = set()
+ self.to_rename = []
+
diff --git a/bzrlib/branchfmt/__init__.py b/bzrlib/branchfmt/__init__.py
new file mode 100644
index 0000000..c620e85
--- /dev/null
+++ b/bzrlib/branchfmt/__init__.py
@@ -0,0 +1,25 @@
+# Copyright (C) 2012 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""Branch formats.
+
+This package contains various branch format implementations. Ideally
+all specific format implementations will be moved out of bzrlib.branch
+into this package.
+"""
+
+from __future__ import absolute_import
+
diff --git a/bzrlib/branchfmt/fullhistory.py b/bzrlib/branchfmt/fullhistory.py
new file mode 100644
index 0000000..4d0364d
--- /dev/null
+++ b/bzrlib/branchfmt/fullhistory.py
@@ -0,0 +1,178 @@
+# Copyright (C) 2006-2012 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""Full history branch formats."""
+
+from __future__ import absolute_import
+
+from bzrlib import (
+ debug,
+ errors,
+ revision as _mod_revision,
+ )
+
+from bzrlib.branch import (
+ Branch,
+ BranchFormatMetadir,
+ BzrBranch,
+ )
+
+from bzrlib.decorators import (
+ needs_write_lock,
+ )
+from bzrlib.trace import mutter_callsite
+
+
+class FullHistoryBzrBranch(BzrBranch):
+ """Bzr branch which contains the full revision history."""
+
+ @needs_write_lock
+ def set_last_revision_info(self, revno, revision_id):
+ if not revision_id or not isinstance(revision_id, basestring):
+ raise errors.InvalidRevisionId(revision_id=revision_id, branch=self)
+ revision_id = _mod_revision.ensure_null(revision_id)
+ # this old format stores the full history, but this api doesn't
+ # provide it, so we must generate, and might as well check it's
+ # correct
+ history = self._lefthand_history(revision_id)
+ if len(history) != revno:
+ raise AssertionError('%d != %d' % (len(history), revno))
+ self._set_revision_history(history)
+
+ def _read_last_revision_info(self):
+ rh = self._revision_history()
+ revno = len(rh)
+ if revno:
+ return (revno, rh[-1])
+ else:
+ return (0, _mod_revision.NULL_REVISION)
+
+ def _set_revision_history(self, rev_history):
+ if 'evil' in debug.debug_flags:
+ mutter_callsite(3, "set_revision_history scales with history.")
+ check_not_reserved_id = _mod_revision.check_not_reserved_id
+ for rev_id in rev_history:
+ check_not_reserved_id(rev_id)
+ if Branch.hooks['post_change_branch_tip']:
+ # Don't calculate the last_revision_info() if there are no hooks
+ # that will use it.
+ old_revno, old_revid = self.last_revision_info()
+ if len(rev_history) == 0:
+ revid = _mod_revision.NULL_REVISION
+ else:
+ revid = rev_history[-1]
+ self._run_pre_change_branch_tip_hooks(len(rev_history), revid)
+ self._write_revision_history(rev_history)
+ self._clear_cached_state()
+ self._cache_revision_history(rev_history)
+ if Branch.hooks['post_change_branch_tip']:
+ self._run_post_change_branch_tip_hooks(old_revno, old_revid)
+
+ def _write_revision_history(self, history):
+ """Factored out of set_revision_history.
+
+ This performs the actual writing to disk.
+ It is intended to be called by set_revision_history."""
+ self._transport.put_bytes(
+ 'revision-history', '\n'.join(history),
+ mode=self.bzrdir._get_file_mode())
+
+ def _gen_revision_history(self):
+ history = self._transport.get_bytes('revision-history').split('\n')
+ if history[-1:] == ['']:
+ # There shouldn't be a trailing newline, but just in case.
+ history.pop()
+ return history
+
+ def _synchronize_history(self, destination, revision_id):
+ if not isinstance(destination, FullHistoryBzrBranch):
+ super(BzrBranch, self)._synchronize_history(
+ destination, revision_id)
+ return
+ if revision_id == _mod_revision.NULL_REVISION:
+ new_history = []
+ else:
+ new_history = self._revision_history()
+ if revision_id is not None and new_history != []:
+ try:
+ new_history = new_history[:new_history.index(revision_id) + 1]
+ except ValueError:
+ rev = self.repository.get_revision(revision_id)
+ new_history = rev.get_history(self.repository)[1:]
+ destination._set_revision_history(new_history)
+
+ @needs_write_lock
+ def generate_revision_history(self, revision_id, last_rev=None,
+ other_branch=None):
+ """Create a new revision history that will finish with revision_id.
+
+ :param revision_id: the new tip to use.
+ :param last_rev: The previous last_revision. If not None, then this
+ must be a ancestory of revision_id, or DivergedBranches is raised.
+ :param other_branch: The other branch that DivergedBranches should
+ raise with respect to.
+ """
+ self._set_revision_history(self._lefthand_history(revision_id,
+ last_rev, other_branch))
+
+
+class BzrBranch5(FullHistoryBzrBranch):
+ """A format 5 branch. This supports new features over plain branches.
+
+ It has support for a master_branch which is the data for bound branches.
+ """
+
+
+class BzrBranchFormat5(BranchFormatMetadir):
+ """Bzr branch format 5.
+
+ This format has:
+ - a revision-history file.
+ - a format string
+ - a lock dir guarding the branch itself
+ - all of this stored in a branch/ subdirectory
+ - works with shared repositories.
+
+ This format is new in bzr 0.8.
+ """
+
+ def _branch_class(self):
+ return BzrBranch5
+
+ @classmethod
+ def get_format_string(cls):
+ """See BranchFormat.get_format_string()."""
+ return "Bazaar-NG branch format 5\n"
+
+ def get_format_description(self):
+ """See BranchFormat.get_format_description()."""
+ return "Branch format 5"
+
+ def initialize(self, a_bzrdir, name=None, repository=None,
+ append_revisions_only=None):
+ """Create a branch of this format in a_bzrdir."""
+ if append_revisions_only:
+ raise errors.UpgradeRequired(a_bzrdir.user_url)
+ utf8_files = [('revision-history', ''),
+ ('branch-name', ''),
+ ]
+ return self._initialize_helper(a_bzrdir, utf8_files, name, repository)
+
+ def supports_tags(self):
+ return False
+
+
+
diff --git a/bzrlib/breakin.py b/bzrlib/breakin.py
new file mode 100644
index 0000000..d55baa0
--- /dev/null
+++ b/bzrlib/breakin.py
@@ -0,0 +1,82 @@
+# Copyright (C) 2007, 2009, 2010 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+from __future__ import absolute_import
+
+import os
+import signal
+
+
+_breakin_signal_number = None
+_breakin_signal_name = None
+
+
+def _debug(signal_number, interrupted_frame):
+ import pdb
+ import sys
+ sys.stderr.write("** %s received, entering debugger\n"
+ "** Type 'c' to continue or 'q' to stop the process\n"
+ "** Or %s again to quit (and possibly dump core)\n"
+ % (_breakin_signal_name, _breakin_signal_name))
+ # It seems that on Windows, when sys.stderr is to a PIPE, then we need to
+ # flush. Not sure why it is buffered, but that seems to be the case.
+ sys.stderr.flush()
+ # restore default meaning so that you can kill the process by hitting it
+ # twice
+ signal.signal(_breakin_signal_number, signal.SIG_DFL)
+ try:
+ pdb.set_trace()
+ finally:
+ signal.signal(_breakin_signal_number, _debug)
+
+
+def determine_signal():
+ global _breakin_signal_number
+ global _breakin_signal_name
+ if _breakin_signal_number is not None:
+ return _breakin_signal_number
+ # Note: As near as I can tell, Windows is the only one to define SIGBREAK,
+ # and other platforms defined SIGQUIT. There doesn't seem to be a
+ # platform that defines both.
+ # -- jam 2009-07-30
+ sigquit = getattr(signal, 'SIGQUIT', None)
+ sigbreak = getattr(signal, 'SIGBREAK', None)
+ if sigquit is not None:
+ _breakin_signal_number = sigquit
+ _breakin_signal_name = 'SIGQUIT'
+ elif sigbreak is not None:
+ _breakin_signal_number = sigbreak
+ _breakin_signal_name = 'SIGBREAK'
+
+ return _breakin_signal_number
+
+
+def hook_debugger_to_signal():
+ """Add a signal handler so we drop into the debugger.
+
+ On Unix, this is hooked into SIGQUIT (C-\\), and on Windows, this is
+ hooked into SIGBREAK (C-Pause).
+ """
+
+ # when sigquit (C-\) or sigbreak (C-Pause) is received go into pdb
+ if os.environ.get('BZR_SIGQUIT_PDB', '1') == '0':
+ # User explicitly requested we don't support this
+ return
+ sig = determine_signal()
+ if sig is None:
+ return
+ # print 'hooking into %s' % (_breakin_signal_name,)
+ signal.signal(sig, _debug)
diff --git a/bzrlib/btree_index.py b/bzrlib/btree_index.py
new file mode 100644
index 0000000..6d60490
--- /dev/null
+++ b/bzrlib/btree_index.py
@@ -0,0 +1,1608 @@
+# Copyright (C) 2008-2011 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+#
+
+"""B+Tree indices"""
+
+from __future__ import absolute_import
+
+import cStringIO
+
+from bzrlib.lazy_import import lazy_import
+lazy_import(globals(), """
+import bisect
+import math
+import tempfile
+import zlib
+""")
+
+from bzrlib import (
+ chunk_writer,
+ debug,
+ errors,
+ fifo_cache,
+ index,
+ lru_cache,
+ osutils,
+ static_tuple,
+ trace,
+ transport,
+ )
+from bzrlib.index import _OPTION_NODE_REFS, _OPTION_KEY_ELEMENTS, _OPTION_LEN
+
+
+_BTSIGNATURE = "B+Tree Graph Index 2\n"
+_OPTION_ROW_LENGTHS = "row_lengths="
+_LEAF_FLAG = "type=leaf\n"
+_INTERNAL_FLAG = "type=internal\n"
+_INTERNAL_OFFSET = "offset="
+
+_RESERVED_HEADER_BYTES = 120
+_PAGE_SIZE = 4096
+
+# 4K per page: 4MB - 1000 entries
+_NODE_CACHE_SIZE = 1000
+
+
+class _BuilderRow(object):
+ """The stored state accumulated while writing out a row in the index.
+
+ :ivar spool: A temporary file used to accumulate nodes for this row
+ in the tree.
+ :ivar nodes: The count of nodes emitted so far.
+ """
+
+ def __init__(self):
+ """Create a _BuilderRow."""
+ self.nodes = 0
+ self.spool = None# tempfile.TemporaryFile(prefix='bzr-index-row-')
+ self.writer = None
+
+ def finish_node(self, pad=True):
+ byte_lines, _, padding = self.writer.finish()
+ if self.nodes == 0:
+ self.spool = cStringIO.StringIO()
+ # padded note:
+ self.spool.write("\x00" * _RESERVED_HEADER_BYTES)
+ elif self.nodes == 1:
+ # We got bigger than 1 node, switch to a temp file
+ spool = tempfile.TemporaryFile(prefix='bzr-index-row-')
+ spool.write(self.spool.getvalue())
+ self.spool = spool
+ skipped_bytes = 0
+ if not pad and padding:
+ del byte_lines[-1]
+ skipped_bytes = padding
+ self.spool.writelines(byte_lines)
+ remainder = (self.spool.tell() + skipped_bytes) % _PAGE_SIZE
+ if remainder != 0:
+ raise AssertionError("incorrect node length: %d, %d"
+ % (self.spool.tell(), remainder))
+ self.nodes += 1
+ self.writer = None
+
+
+class _InternalBuilderRow(_BuilderRow):
+ """The stored state accumulated while writing out internal rows."""
+
+ def finish_node(self, pad=True):
+ if not pad:
+ raise AssertionError("Must pad internal nodes only.")
+ _BuilderRow.finish_node(self)
+
+
+class _LeafBuilderRow(_BuilderRow):
+ """The stored state accumulated while writing out a leaf rows."""
+
+
+class BTreeBuilder(index.GraphIndexBuilder):
+ """A Builder for B+Tree based Graph indices.
+
+ The resulting graph has the structure:
+
+ _SIGNATURE OPTIONS NODES
+ _SIGNATURE := 'B+Tree Graph Index 1' NEWLINE
+ OPTIONS := REF_LISTS KEY_ELEMENTS LENGTH
+ REF_LISTS := 'node_ref_lists=' DIGITS NEWLINE
+ KEY_ELEMENTS := 'key_elements=' DIGITS NEWLINE
+ LENGTH := 'len=' DIGITS NEWLINE
+ ROW_LENGTHS := 'row_lengths' DIGITS (COMMA DIGITS)*
+ NODES := NODE_COMPRESSED*
+ NODE_COMPRESSED:= COMPRESSED_BYTES{4096}
+ NODE_RAW := INTERNAL | LEAF
+ INTERNAL := INTERNAL_FLAG POINTERS
+ LEAF := LEAF_FLAG ROWS
+ KEY_ELEMENT := Not-whitespace-utf8
+ KEY := KEY_ELEMENT (NULL KEY_ELEMENT)*
+ ROWS := ROW*
+ ROW := KEY NULL ABSENT? NULL REFERENCES NULL VALUE NEWLINE
+ ABSENT := 'a'
+ REFERENCES := REFERENCE_LIST (TAB REFERENCE_LIST){node_ref_lists - 1}
+ REFERENCE_LIST := (REFERENCE (CR REFERENCE)*)?
+ REFERENCE := KEY
+ VALUE := no-newline-no-null-bytes
+ """
+
+ def __init__(self, reference_lists=0, key_elements=1, spill_at=100000):
+ """See GraphIndexBuilder.__init__.
+
+ :param spill_at: Optional parameter controlling the maximum number
+ of nodes that BTreeBuilder will hold in memory.
+ """
+ index.GraphIndexBuilder.__init__(self, reference_lists=reference_lists,
+ key_elements=key_elements)
+ self._spill_at = spill_at
+ self._backing_indices = []
+ # A map of {key: (node_refs, value)}
+ self._nodes = {}
+ # Indicate it hasn't been built yet
+ self._nodes_by_key = None
+ self._optimize_for_size = False
+
+ def add_node(self, key, value, references=()):
+ """Add a node to the index.
+
+ If adding the node causes the builder to reach its spill_at threshold,
+ disk spilling will be triggered.
+
+ :param key: The key. keys are non-empty tuples containing
+ as many whitespace-free utf8 bytestrings as the key length
+ defined for this index.
+ :param references: An iterable of iterables of keys. Each is a
+ reference to another key.
+ :param value: The value to associate with the key. It may be any
+ bytes as long as it does not contain \\0 or \\n.
+ """
+ # Ensure that 'key' is a StaticTuple
+ key = static_tuple.StaticTuple.from_sequence(key).intern()
+ # we don't care about absent_references
+ node_refs, _ = self._check_key_ref_value(key, references, value)
+ if key in self._nodes:
+ raise errors.BadIndexDuplicateKey(key, self)
+ self._nodes[key] = static_tuple.StaticTuple(node_refs, value)
+ if self._nodes_by_key is not None and self._key_length > 1:
+ self._update_nodes_by_key(key, value, node_refs)
+ if len(self._nodes) < self._spill_at:
+ return
+ self._spill_mem_keys_to_disk()
+
+ def _spill_mem_keys_to_disk(self):
+ """Write the in memory keys down to disk to cap memory consumption.
+
+ If we already have some keys written to disk, we will combine them so
+ as to preserve the sorted order. The algorithm for combining uses
+ powers of two. So on the first spill, write all mem nodes into a
+ single index. On the second spill, combine the mem nodes with the nodes
+ on disk to create a 2x sized disk index and get rid of the first index.
+ On the third spill, create a single new disk index, which will contain
+ the mem nodes, and preserve the existing 2x sized index. On the fourth,
+ combine mem with the first and second indexes, creating a new one of
+ size 4x. On the fifth create a single new one, etc.
+ """
+ if self._combine_backing_indices:
+ (new_backing_file, size,
+ backing_pos) = self._spill_mem_keys_and_combine()
+ else:
+ new_backing_file, size = self._spill_mem_keys_without_combining()
+ # Note: The transport here isn't strictly needed, because we will use
+ # direct access to the new_backing._file object
+ new_backing = BTreeGraphIndex(transport.get_transport_from_path('.'),
+ '<temp>', size)
+ # GC will clean up the file
+ new_backing._file = new_backing_file
+ if self._combine_backing_indices:
+ if len(self._backing_indices) == backing_pos:
+ self._backing_indices.append(None)
+ self._backing_indices[backing_pos] = new_backing
+ for backing_pos in range(backing_pos):
+ self._backing_indices[backing_pos] = None
+ else:
+ self._backing_indices.append(new_backing)
+ self._nodes = {}
+ self._nodes_by_key = None
+
+ def _spill_mem_keys_without_combining(self):
+ return self._write_nodes(self._iter_mem_nodes(), allow_optimize=False)
+
+ def _spill_mem_keys_and_combine(self):
+ iterators_to_combine = [self._iter_mem_nodes()]
+ pos = -1
+ for pos, backing in enumerate(self._backing_indices):
+ if backing is None:
+ pos -= 1
+ break
+ iterators_to_combine.append(backing.iter_all_entries())
+ backing_pos = pos + 1
+ new_backing_file, size = \
+ self._write_nodes(self._iter_smallest(iterators_to_combine),
+ allow_optimize=False)
+ return new_backing_file, size, backing_pos
+
+ def add_nodes(self, nodes):
+ """Add nodes to the index.
+
+ :param nodes: An iterable of (key, node_refs, value) entries to add.
+ """
+ if self.reference_lists:
+ for (key, value, node_refs) in nodes:
+ self.add_node(key, value, node_refs)
+ else:
+ for (key, value) in nodes:
+ self.add_node(key, value)
+
+ def _iter_mem_nodes(self):
+ """Iterate over the nodes held in memory."""
+ nodes = self._nodes
+ if self.reference_lists:
+ for key in sorted(nodes):
+ references, value = nodes[key]
+ yield self, key, value, references
+ else:
+ for key in sorted(nodes):
+ references, value = nodes[key]
+ yield self, key, value
+
+ def _iter_smallest(self, iterators_to_combine):
+ if len(iterators_to_combine) == 1:
+ for value in iterators_to_combine[0]:
+ yield value
+ return
+ current_values = []
+ for iterator in iterators_to_combine:
+ try:
+ current_values.append(iterator.next())
+ except StopIteration:
+ current_values.append(None)
+ last = None
+ while True:
+ # Decorate candidates with the value to allow 2.4's min to be used.
+ candidates = [(item[1][1], item) for item
+ in enumerate(current_values) if item[1] is not None]
+ if not len(candidates):
+ return
+ selected = min(candidates)
+ # undecorate back to (pos, node)
+ selected = selected[1]
+ if last == selected[1][1]:
+ raise errors.BadIndexDuplicateKey(last, self)
+ last = selected[1][1]
+ # Yield, with self as the index
+ yield (self,) + selected[1][1:]
+ pos = selected[0]
+ try:
+ current_values[pos] = iterators_to_combine[pos].next()
+ except StopIteration:
+ current_values[pos] = None
+
+ def _add_key(self, string_key, line, rows, allow_optimize=True):
+ """Add a key to the current chunk.
+
+ :param string_key: The key to add.
+ :param line: The fully serialised key and value.
+ :param allow_optimize: If set to False, prevent setting the optimize
+ flag when writing out. This is used by the _spill_mem_keys_to_disk
+ functionality.
+ """
+ new_leaf = False
+ if rows[-1].writer is None:
+ # opening a new leaf chunk;
+ new_leaf = True
+ for pos, internal_row in enumerate(rows[:-1]):
+ # flesh out any internal nodes that are needed to
+ # preserve the height of the tree
+ if internal_row.writer is None:
+ length = _PAGE_SIZE
+ if internal_row.nodes == 0:
+ length -= _RESERVED_HEADER_BYTES # padded
+ if allow_optimize:
+ optimize_for_size = self._optimize_for_size
+ else:
+ optimize_for_size = False
+ internal_row.writer = chunk_writer.ChunkWriter(length, 0,
+ optimize_for_size=optimize_for_size)
+ internal_row.writer.write(_INTERNAL_FLAG)
+ internal_row.writer.write(_INTERNAL_OFFSET +
+ str(rows[pos + 1].nodes) + "\n")
+ # add a new leaf
+ length = _PAGE_SIZE
+ if rows[-1].nodes == 0:
+ length -= _RESERVED_HEADER_BYTES # padded
+ rows[-1].writer = chunk_writer.ChunkWriter(length,
+ optimize_for_size=self._optimize_for_size)
+ rows[-1].writer.write(_LEAF_FLAG)
+ if rows[-1].writer.write(line):
+ # if we failed to write, despite having an empty page to write to,
+ # then line is too big. raising the error avoids infinite recursion
+ # searching for a suitably large page that will not be found.
+ if new_leaf:
+ raise errors.BadIndexKey(string_key)
+ # this key did not fit in the node:
+ rows[-1].finish_node()
+ key_line = string_key + "\n"
+ new_row = True
+ for row in reversed(rows[:-1]):
+ # Mark the start of the next node in the node above. If it
+ # doesn't fit then propagate upwards until we find one that
+ # it does fit into.
+ if row.writer.write(key_line):
+ row.finish_node()
+ else:
+ # We've found a node that can handle the pointer.
+ new_row = False
+ break
+ # If we reached the current root without being able to mark the
+ # division point, then we need a new root:
+ if new_row:
+ # We need a new row
+ if 'index' in debug.debug_flags:
+ trace.mutter('Inserting new global row.')
+ new_row = _InternalBuilderRow()
+ reserved_bytes = 0
+ rows.insert(0, new_row)
+ # This will be padded, hence the -100
+ new_row.writer = chunk_writer.ChunkWriter(
+ _PAGE_SIZE - _RESERVED_HEADER_BYTES,
+ reserved_bytes,
+ optimize_for_size=self._optimize_for_size)
+ new_row.writer.write(_INTERNAL_FLAG)
+ new_row.writer.write(_INTERNAL_OFFSET +
+ str(rows[1].nodes - 1) + "\n")
+ new_row.writer.write(key_line)
+ self._add_key(string_key, line, rows, allow_optimize=allow_optimize)
+
+ def _write_nodes(self, node_iterator, allow_optimize=True):
+ """Write node_iterator out as a B+Tree.
+
+ :param node_iterator: An iterator of sorted nodes. Each node should
+ match the output given by iter_all_entries.
+ :param allow_optimize: If set to False, prevent setting the optimize
+ flag when writing out. This is used by the _spill_mem_keys_to_disk
+ functionality.
+ :return: A file handle for a temporary file containing a B+Tree for
+ the nodes.
+ """
+ # The index rows - rows[0] is the root, rows[1] is the layer under it
+ # etc.
+ rows = []
+ # forward sorted by key. In future we may consider topological sorting,
+ # at the cost of table scans for direct lookup, or a second index for
+ # direct lookup
+ key_count = 0
+ # A stack with the number of nodes of each size. 0 is the root node
+ # and must always be 1 (if there are any nodes in the tree).
+ self.row_lengths = []
+ # Loop over all nodes adding them to the bottom row
+ # (rows[-1]). When we finish a chunk in a row,
+ # propagate the key that didn't fit (comes after the chunk) to the
+ # row above, transitively.
+ for node in node_iterator:
+ if key_count == 0:
+ # First key triggers the first row
+ rows.append(_LeafBuilderRow())
+ key_count += 1
+ string_key, line = _btree_serializer._flatten_node(node,
+ self.reference_lists)
+ self._add_key(string_key, line, rows, allow_optimize=allow_optimize)
+ for row in reversed(rows):
+ pad = (type(row) != _LeafBuilderRow)
+ row.finish_node(pad=pad)
+ lines = [_BTSIGNATURE]
+ lines.append(_OPTION_NODE_REFS + str(self.reference_lists) + '\n')
+ lines.append(_OPTION_KEY_ELEMENTS + str(self._key_length) + '\n')
+ lines.append(_OPTION_LEN + str(key_count) + '\n')
+ row_lengths = [row.nodes for row in rows]
+ lines.append(_OPTION_ROW_LENGTHS + ','.join(map(str, row_lengths)) + '\n')
+ if row_lengths and row_lengths[-1] > 1:
+ result = tempfile.NamedTemporaryFile(prefix='bzr-index-')
+ else:
+ result = cStringIO.StringIO()
+ result.writelines(lines)
+ position = sum(map(len, lines))
+ root_row = True
+ if position > _RESERVED_HEADER_BYTES:
+ raise AssertionError("Could not fit the header in the"
+ " reserved space: %d > %d"
+ % (position, _RESERVED_HEADER_BYTES))
+ # write the rows out:
+ for row in rows:
+ reserved = _RESERVED_HEADER_BYTES # reserved space for first node
+ row.spool.flush()
+ row.spool.seek(0)
+ # copy nodes to the finalised file.
+ # Special case the first node as it may be prefixed
+ node = row.spool.read(_PAGE_SIZE)
+ result.write(node[reserved:])
+ if len(node) == _PAGE_SIZE:
+ result.write("\x00" * (reserved - position))
+ position = 0 # Only the root row actually has an offset
+ copied_len = osutils.pumpfile(row.spool, result)
+ if copied_len != (row.nodes - 1) * _PAGE_SIZE:
+ if type(row) != _LeafBuilderRow:
+ raise AssertionError("Incorrect amount of data copied"
+ " expected: %d, got: %d"
+ % ((row.nodes - 1) * _PAGE_SIZE,
+ copied_len))
+ result.flush()
+ size = result.tell()
+ result.seek(0)
+ return result, size
+
+ def finish(self):
+ """Finalise the index.
+
+ :return: A file handle for a temporary file containing the nodes added
+ to the index.
+ """
+ return self._write_nodes(self.iter_all_entries())[0]
+
+ def iter_all_entries(self):
+ """Iterate over all keys within the index
+
+ :return: An iterable of (index, key, value, reference_lists). There is
+ no defined order for the result iteration - it will be in the most
+ efficient order for the index (in this case dictionary hash order).
+ """
+ if 'evil' in debug.debug_flags:
+ trace.mutter_callsite(3,
+ "iter_all_entries scales with size of history.")
+ # Doing serial rather than ordered would be faster; but this shouldn't
+ # be getting called routinely anyway.
+ iterators = [self._iter_mem_nodes()]
+ for backing in self._backing_indices:
+ if backing is not None:
+ iterators.append(backing.iter_all_entries())
+ if len(iterators) == 1:
+ return iterators[0]
+ return self._iter_smallest(iterators)
+
+ def iter_entries(self, keys):
+ """Iterate over keys within the index.
+
+ :param keys: An iterable providing the keys to be retrieved.
+ :return: An iterable of (index, key, value, reference_lists). There is no
+ defined order for the result iteration - it will be in the most
+ efficient order for the index (keys iteration order in this case).
+ """
+ keys = set(keys)
+ # Note: We don't use keys.intersection() here. If you read the C api,
+ # set.intersection(other) special cases when other is a set and
+ # will iterate the smaller of the two and lookup in the other.
+ # It does *not* do this for any other type (even dict, unlike
+ # some other set functions.) Since we expect keys is generally <<
+ # self._nodes, it is faster to iterate over it in a list
+ # comprehension
+ nodes = self._nodes
+ local_keys = [key for key in keys if key in nodes]
+ if self.reference_lists:
+ for key in local_keys:
+ node = nodes[key]
+ yield self, key, node[1], node[0]
+ else:
+ for key in local_keys:
+ node = nodes[key]
+ yield self, key, node[1]
+ # Find things that are in backing indices that have not been handled
+ # yet.
+ if not self._backing_indices:
+ return # We won't find anything there either
+ # Remove all of the keys that we found locally
+ keys.difference_update(local_keys)
+ for backing in self._backing_indices:
+ if backing is None:
+ continue
+ if not keys:
+ return
+ for node in backing.iter_entries(keys):
+ keys.remove(node[1])
+ yield (self,) + node[1:]
+
+ def iter_entries_prefix(self, keys):
+ """Iterate over keys within the index using prefix matching.
+
+ Prefix matching is applied within the tuple of a key, not to within
+ the bytestring of each key element. e.g. if you have the keys ('foo',
+ 'bar'), ('foobar', 'gam') and do a prefix search for ('foo', None) then
+ only the former key is returned.
+
+ :param keys: An iterable providing the key prefixes to be retrieved.
+ Each key prefix takes the form of a tuple the length of a key, but
+ with the last N elements 'None' rather than a regular bytestring.
+ The first element cannot be 'None'.
+ :return: An iterable as per iter_all_entries, but restricted to the
+ keys with a matching prefix to those supplied. No additional keys
+ will be returned, and every match that is in the index will be
+ returned.
+ """
+ # XXX: To much duplication with the GraphIndex class; consider finding
+ # a good place to pull out the actual common logic.
+ keys = set(keys)
+ if not keys:
+ return
+ for backing in self._backing_indices:
+ if backing is None:
+ continue
+ for node in backing.iter_entries_prefix(keys):
+ yield (self,) + node[1:]
+ if self._key_length == 1:
+ for key in keys:
+ # sanity check
+ if key[0] is None:
+ raise errors.BadIndexKey(key)
+ if len(key) != self._key_length:
+ raise errors.BadIndexKey(key)
+ try:
+ node = self._nodes[key]
+ except KeyError:
+ continue
+ if self.reference_lists:
+ yield self, key, node[1], node[0]
+ else:
+ yield self, key, node[1]
+ return
+ for key in keys:
+ # sanity check
+ if key[0] is None:
+ raise errors.BadIndexKey(key)
+ if len(key) != self._key_length:
+ raise errors.BadIndexKey(key)
+ # find what it refers to:
+ key_dict = self._get_nodes_by_key()
+ elements = list(key)
+ # find the subdict to return
+ try:
+ while len(elements) and elements[0] is not None:
+ key_dict = key_dict[elements[0]]
+ elements.pop(0)
+ except KeyError:
+ # a non-existant lookup.
+ continue
+ if len(elements):
+ dicts = [key_dict]
+ while dicts:
+ key_dict = dicts.pop(-1)
+ # can't be empty or would not exist
+ item, value = key_dict.iteritems().next()
+ if type(value) == dict:
+ # push keys
+ dicts.extend(key_dict.itervalues())
+ else:
+ # yield keys
+ for value in key_dict.itervalues():
+ yield (self, ) + tuple(value)
+ else:
+ yield (self, ) + key_dict
+
+ def _get_nodes_by_key(self):
+ if self._nodes_by_key is None:
+ nodes_by_key = {}
+ if self.reference_lists:
+ for key, (references, value) in self._nodes.iteritems():
+ key_dict = nodes_by_key
+ for subkey in key[:-1]:
+ key_dict = key_dict.setdefault(subkey, {})
+ key_dict[key[-1]] = key, value, references
+ else:
+ for key, (references, value) in self._nodes.iteritems():
+ key_dict = nodes_by_key
+ for subkey in key[:-1]:
+ key_dict = key_dict.setdefault(subkey, {})
+ key_dict[key[-1]] = key, value
+ self._nodes_by_key = nodes_by_key
+ return self._nodes_by_key
+
+ def key_count(self):
+ """Return an estimate of the number of keys in this index.
+
+ For InMemoryGraphIndex the estimate is exact.
+ """
+ return len(self._nodes) + sum(backing.key_count() for backing in
+ self._backing_indices if backing is not None)
+
+ def validate(self):
+ """In memory index's have no known corruption at the moment."""
+
+
+class _LeafNode(dict):
+ """A leaf node for a serialised B+Tree index."""
+
+ __slots__ = ('min_key', 'max_key', '_keys')
+
+ def __init__(self, bytes, key_length, ref_list_length):
+ """Parse bytes to create a leaf node object."""
+ # splitlines mangles the \r delimiters.. don't use it.
+ key_list = _btree_serializer._parse_leaf_lines(bytes,
+ key_length, ref_list_length)
+ if key_list:
+ self.min_key = key_list[0][0]
+ self.max_key = key_list[-1][0]
+ else:
+ self.min_key = self.max_key = None
+ super(_LeafNode, self).__init__(key_list)
+ self._keys = dict(self)
+
+ def all_items(self):
+ """Return a sorted list of (key, (value, refs)) items"""
+ items = self.items()
+ items.sort()
+ return items
+
+ def all_keys(self):
+ """Return a sorted list of all keys."""
+ keys = self.keys()
+ keys.sort()
+ return keys
+
+
+class _InternalNode(object):
+ """An internal node for a serialised B+Tree index."""
+
+ __slots__ = ('keys', 'offset')
+
+ def __init__(self, bytes):
+ """Parse bytes to create an internal node object."""
+ # splitlines mangles the \r delimiters.. don't use it.
+ self.keys = self._parse_lines(bytes.split('\n'))
+
+ def _parse_lines(self, lines):
+ nodes = []
+ self.offset = int(lines[1][7:])
+ as_st = static_tuple.StaticTuple.from_sequence
+ for line in lines[2:]:
+ if line == '':
+ break
+ nodes.append(as_st(map(intern, line.split('\0'))).intern())
+ return nodes
+
+
+class BTreeGraphIndex(object):
+ """Access to nodes via the standard GraphIndex interface for B+Tree's.
+
+ Individual nodes are held in a LRU cache. This holds the root node in
+ memory except when very large walks are done.
+ """
+
+ def __init__(self, transport, name, size, unlimited_cache=False,
+ offset=0):
+ """Create a B+Tree index object on the index name.
+
+ :param transport: The transport to read data for the index from.
+ :param name: The file name of the index on transport.
+ :param size: Optional size of the index in bytes. This allows
+ compatibility with the GraphIndex API, as well as ensuring that
+ the initial read (to read the root node header) can be done
+ without over-reading even on empty indices, and on small indices
+ allows single-IO to read the entire index.
+ :param unlimited_cache: If set to True, then instead of using an
+ LRUCache with size _NODE_CACHE_SIZE, we will use a dict and always
+ cache all leaf nodes.
+ :param offset: The start of the btree index data isn't byte 0 of the
+ file. Instead it starts at some point later.
+ """
+ self._transport = transport
+ self._name = name
+ self._size = size
+ self._file = None
+ self._recommended_pages = self._compute_recommended_pages()
+ self._root_node = None
+ self._base_offset = offset
+ self._leaf_factory = _LeafNode
+ # Default max size is 100,000 leave values
+ self._leaf_value_cache = None # lru_cache.LRUCache(100*1000)
+ if unlimited_cache:
+ self._leaf_node_cache = {}
+ self._internal_node_cache = {}
+ else:
+ self._leaf_node_cache = lru_cache.LRUCache(_NODE_CACHE_SIZE)
+ # We use a FIFO here just to prevent possible blowout. However, a
+ # 300k record btree has only 3k leaf nodes, and only 20 internal
+ # nodes. A value of 100 scales to ~100*100*100 = 1M records.
+ self._internal_node_cache = fifo_cache.FIFOCache(100)
+ self._key_count = None
+ self._row_lengths = None
+ self._row_offsets = None # Start of each row, [-1] is the end
+
+ def __eq__(self, other):
+ """Equal when self and other were created with the same parameters."""
+ return (
+ type(self) == type(other) and
+ self._transport == other._transport and
+ self._name == other._name and
+ self._size == other._size)
+
+ def __ne__(self, other):
+ return not self.__eq__(other)
+
+ def _get_and_cache_nodes(self, nodes):
+ """Read nodes and cache them in the lru.
+
+ The nodes list supplied is sorted and then read from disk, each node
+ being inserted it into the _node_cache.
+
+ Note: Asking for more nodes than the _node_cache can contain will
+ result in some of the results being immediately discarded, to prevent
+ this an assertion is raised if more nodes are asked for than are
+ cachable.
+
+ :return: A dict of {node_pos: node}
+ """
+ found = {}
+ start_of_leaves = None
+ for node_pos, node in self._read_nodes(sorted(nodes)):
+ if node_pos == 0: # Special case
+ self._root_node = node
+ else:
+ if start_of_leaves is None:
+ start_of_leaves = self._row_offsets[-2]
+ if node_pos < start_of_leaves:
+ self._internal_node_cache[node_pos] = node
+ else:
+ self._leaf_node_cache[node_pos] = node
+ found[node_pos] = node
+ return found
+
+ def _compute_recommended_pages(self):
+ """Convert transport's recommended_page_size into btree pages.
+
+ recommended_page_size is in bytes, we want to know how many _PAGE_SIZE
+ pages fit in that length.
+ """
+ recommended_read = self._transport.recommended_page_size()
+ recommended_pages = int(math.ceil(recommended_read /
+ float(_PAGE_SIZE)))
+ return recommended_pages
+
+ def _compute_total_pages_in_index(self):
+ """How many pages are in the index.
+
+ If we have read the header we will use the value stored there.
+ Otherwise it will be computed based on the length of the index.
+ """
+ if self._size is None:
+ raise AssertionError('_compute_total_pages_in_index should not be'
+ ' called when self._size is None')
+ if self._root_node is not None:
+ # This is the number of pages as defined by the header
+ return self._row_offsets[-1]
+ # This is the number of pages as defined by the size of the index. They
+ # should be indentical.
+ total_pages = int(math.ceil(self._size / float(_PAGE_SIZE)))
+ return total_pages
+
+ def _expand_offsets(self, offsets):
+ """Find extra pages to download.
+
+ The idea is that we always want to make big-enough requests (like 64kB
+ for http), so that we don't waste round trips. So given the entries
+ that we already have cached and the new pages being downloaded figure
+ out what other pages we might want to read.
+
+ See also doc/developers/btree_index_prefetch.txt for more details.
+
+ :param offsets: The offsets to be read
+ :return: A list of offsets to download
+ """
+ if 'index' in debug.debug_flags:
+ trace.mutter('expanding: %s\toffsets: %s', self._name, offsets)
+
+ if len(offsets) >= self._recommended_pages:
+ # Don't add more, we are already requesting more than enough
+ if 'index' in debug.debug_flags:
+ trace.mutter(' not expanding large request (%s >= %s)',
+ len(offsets), self._recommended_pages)
+ return offsets
+ if self._size is None:
+ # Don't try anything, because we don't know where the file ends
+ if 'index' in debug.debug_flags:
+ trace.mutter(' not expanding without knowing index size')
+ return offsets
+ total_pages = self._compute_total_pages_in_index()
+ cached_offsets = self._get_offsets_to_cached_pages()
+ # If reading recommended_pages would read the rest of the index, just
+ # do so.
+ if total_pages - len(cached_offsets) <= self._recommended_pages:
+ # Read whatever is left
+ if cached_offsets:
+ expanded = [x for x in xrange(total_pages)
+ if x not in cached_offsets]
+ else:
+ expanded = range(total_pages)
+ if 'index' in debug.debug_flags:
+ trace.mutter(' reading all unread pages: %s', expanded)
+ return expanded
+
+ if self._root_node is None:
+ # ATM on the first read of the root node of a large index, we don't
+ # bother pre-reading any other pages. This is because the
+ # likelyhood of actually reading interesting pages is very low.
+ # See doc/developers/btree_index_prefetch.txt for a discussion, and
+ # a possible implementation when we are guessing that the second
+ # layer index is small
+ final_offsets = offsets
+ else:
+ tree_depth = len(self._row_lengths)
+ if len(cached_offsets) < tree_depth and len(offsets) == 1:
+ # We haven't read enough to justify expansion
+ # If we are only going to read the root node, and 1 leaf node,
+ # then it isn't worth expanding our request. Once we've read at
+ # least 2 nodes, then we are probably doing a search, and we
+ # start expanding our requests.
+ if 'index' in debug.debug_flags:
+ trace.mutter(' not expanding on first reads')
+ return offsets
+ final_offsets = self._expand_to_neighbors(offsets, cached_offsets,
+ total_pages)
+
+ final_offsets = sorted(final_offsets)
+ if 'index' in debug.debug_flags:
+ trace.mutter('expanded: %s', final_offsets)
+ return final_offsets
+
+ def _expand_to_neighbors(self, offsets, cached_offsets, total_pages):
+ """Expand requests to neighbors until we have enough pages.
+
+ This is called from _expand_offsets after policy has determined that we
+ want to expand.
+ We only want to expand requests within a given layer. We cheat a little
+ bit and assume all requests will be in the same layer. This is true
+ given the current design, but if it changes this algorithm may perform
+ oddly.
+
+ :param offsets: requested offsets
+ :param cached_offsets: offsets for pages we currently have cached
+ :return: A set() of offsets after expansion
+ """
+ final_offsets = set(offsets)
+ first = end = None
+ new_tips = set(final_offsets)
+ while len(final_offsets) < self._recommended_pages and new_tips:
+ next_tips = set()
+ for pos in new_tips:
+ if first is None:
+ first, end = self._find_layer_first_and_end(pos)
+ previous = pos - 1
+ if (previous > 0
+ and previous not in cached_offsets
+ and previous not in final_offsets
+ and previous >= first):
+ next_tips.add(previous)
+ after = pos + 1
+ if (after < total_pages
+ and after not in cached_offsets
+ and after not in final_offsets
+ and after < end):
+ next_tips.add(after)
+ # This would keep us from going bigger than
+ # recommended_pages by only expanding the first offsets.
+ # However, if we are making a 'wide' request, it is
+ # reasonable to expand all points equally.
+ # if len(final_offsets) > recommended_pages:
+ # break
+ final_offsets.update(next_tips)
+ new_tips = next_tips
+ return final_offsets
+
+ def clear_cache(self):
+ """Clear out any cached/memoized values.
+
+ This can be called at any time, but generally it is used when we have
+ extracted some information, but don't expect to be requesting any more
+ from this index.
+ """
+ # Note that we don't touch self._root_node or self._internal_node_cache
+ # We don't expect either of those to be big, and it can save
+ # round-trips in the future. We may re-evaluate this if InternalNode
+ # memory starts to be an issue.
+ self._leaf_node_cache.clear()
+
+ def external_references(self, ref_list_num):
+ if self._root_node is None:
+ self._get_root_node()
+ if ref_list_num + 1 > self.node_ref_lists:
+ raise ValueError('No ref list %d, index has %d ref lists'
+ % (ref_list_num, self.node_ref_lists))
+ keys = set()
+ refs = set()
+ for node in self.iter_all_entries():
+ keys.add(node[1])
+ refs.update(node[3][ref_list_num])
+ return refs - keys
+
+ def _find_layer_first_and_end(self, offset):
+ """Find the start/stop nodes for the layer corresponding to offset.
+
+ :return: (first, end)
+ first is the first node in this layer
+ end is the first node of the next layer
+ """
+ first = end = 0
+ for roffset in self._row_offsets:
+ first = end
+ end = roffset
+ if offset < roffset:
+ break
+ return first, end
+
+ def _get_offsets_to_cached_pages(self):
+ """Determine what nodes we already have cached."""
+ cached_offsets = set(self._internal_node_cache.keys())
+ cached_offsets.update(self._leaf_node_cache.keys())
+ if self._root_node is not None:
+ cached_offsets.add(0)
+ return cached_offsets
+
+ def _get_root_node(self):
+ if self._root_node is None:
+ # We may not have a root node yet
+ self._get_internal_nodes([0])
+ return self._root_node
+
+ def _get_nodes(self, cache, node_indexes):
+ found = {}
+ needed = []
+ for idx in node_indexes:
+ if idx == 0 and self._root_node is not None:
+ found[0] = self._root_node
+ continue
+ try:
+ found[idx] = cache[idx]
+ except KeyError:
+ needed.append(idx)
+ if not needed:
+ return found
+ needed = self._expand_offsets(needed)
+ found.update(self._get_and_cache_nodes(needed))
+ return found
+
+ def _get_internal_nodes(self, node_indexes):
+ """Get a node, from cache or disk.
+
+ After getting it, the node will be cached.
+ """
+ return self._get_nodes(self._internal_node_cache, node_indexes)
+
+ def _cache_leaf_values(self, nodes):
+ """Cache directly from key => value, skipping the btree."""
+ if self._leaf_value_cache is not None:
+ for node in nodes.itervalues():
+ for key, value in node.all_items():
+ if key in self._leaf_value_cache:
+ # Don't add the rest of the keys, we've seen this node
+ # before.
+ break
+ self._leaf_value_cache[key] = value
+
+ def _get_leaf_nodes(self, node_indexes):
+ """Get a bunch of nodes, from cache or disk."""
+ found = self._get_nodes(self._leaf_node_cache, node_indexes)
+ self._cache_leaf_values(found)
+ return found
+
+ def iter_all_entries(self):
+ """Iterate over all keys within the index.
+
+ :return: An iterable of (index, key, value) or (index, key, value, reference_lists).
+ The former tuple is used when there are no reference lists in the
+ index, making the API compatible with simple key:value index types.
+ There is no defined order for the result iteration - it will be in
+ the most efficient order for the index.
+ """
+ if 'evil' in debug.debug_flags:
+ trace.mutter_callsite(3,
+ "iter_all_entries scales with size of history.")
+ if not self.key_count():
+ return
+ if self._row_offsets[-1] == 1:
+ # There is only the root node, and we read that via key_count()
+ if self.node_ref_lists:
+ for key, (value, refs) in self._root_node.all_items():
+ yield (self, key, value, refs)
+ else:
+ for key, (value, refs) in self._root_node.all_items():
+ yield (self, key, value)
+ return
+ start_of_leaves = self._row_offsets[-2]
+ end_of_leaves = self._row_offsets[-1]
+ needed_offsets = range(start_of_leaves, end_of_leaves)
+ if needed_offsets == [0]:
+ # Special case when we only have a root node, as we have already
+ # read everything
+ nodes = [(0, self._root_node)]
+ else:
+ nodes = self._read_nodes(needed_offsets)
+ # We iterate strictly in-order so that we can use this function
+ # for spilling index builds to disk.
+ if self.node_ref_lists:
+ for _, node in nodes:
+ for key, (value, refs) in node.all_items():
+ yield (self, key, value, refs)
+ else:
+ for _, node in nodes:
+ for key, (value, refs) in node.all_items():
+ yield (self, key, value)
+
+ @staticmethod
+ def _multi_bisect_right(in_keys, fixed_keys):
+ """Find the positions where each 'in_key' would fit in fixed_keys.
+
+ This is equivalent to doing "bisect_right" on each in_key into
+ fixed_keys
+
+ :param in_keys: A sorted list of keys to match with fixed_keys
+ :param fixed_keys: A sorted list of keys to match against
+ :return: A list of (integer position, [key list]) tuples.
+ """
+ if not in_keys:
+ return []
+ if not fixed_keys:
+ # no pointers in the fixed_keys list, which means everything must
+ # fall to the left.
+ return [(0, in_keys)]
+
+ # TODO: Iterating both lists will generally take M + N steps
+ # Bisecting each key will generally take M * log2 N steps.
+ # If we had an efficient way to compare, we could pick the method
+ # based on which has the fewer number of steps.
+ # There is also the argument that bisect_right is a compiled
+ # function, so there is even more to be gained.
+ # iter_steps = len(in_keys) + len(fixed_keys)
+ # bisect_steps = len(in_keys) * math.log(len(fixed_keys), 2)
+ if len(in_keys) == 1: # Bisect will always be faster for M = 1
+ return [(bisect.bisect_right(fixed_keys, in_keys[0]), in_keys)]
+ # elif bisect_steps < iter_steps:
+ # offsets = {}
+ # for key in in_keys:
+ # offsets.setdefault(bisect_right(fixed_keys, key),
+ # []).append(key)
+ # return [(o, offsets[o]) for o in sorted(offsets)]
+ in_keys_iter = iter(in_keys)
+ fixed_keys_iter = enumerate(fixed_keys)
+ cur_in_key = in_keys_iter.next()
+ cur_fixed_offset, cur_fixed_key = fixed_keys_iter.next()
+
+ class InputDone(Exception): pass
+ class FixedDone(Exception): pass
+
+ output = []
+ cur_out = []
+
+ # TODO: Another possibility is that rather than iterating on each side,
+ # we could use a combination of bisecting and iterating. For
+ # example, while cur_in_key < fixed_key, bisect to find its
+ # point, then iterate all matching keys, then bisect (restricted
+ # to only the remainder) for the next one, etc.
+ try:
+ while True:
+ if cur_in_key < cur_fixed_key:
+ cur_keys = []
+ cur_out = (cur_fixed_offset, cur_keys)
+ output.append(cur_out)
+ while cur_in_key < cur_fixed_key:
+ cur_keys.append(cur_in_key)
+ try:
+ cur_in_key = in_keys_iter.next()
+ except StopIteration:
+ raise InputDone
+ # At this point cur_in_key must be >= cur_fixed_key
+ # step the cur_fixed_key until we pass the cur key, or walk off
+ # the end
+ while cur_in_key >= cur_fixed_key:
+ try:
+ cur_fixed_offset, cur_fixed_key = fixed_keys_iter.next()
+ except StopIteration:
+ raise FixedDone
+ except InputDone:
+ # We consumed all of the input, nothing more to do
+ pass
+ except FixedDone:
+ # There was some input left, but we consumed all of fixed, so we
+ # have to add one more for the tail
+ cur_keys = [cur_in_key]
+ cur_keys.extend(in_keys_iter)
+ cur_out = (len(fixed_keys), cur_keys)
+ output.append(cur_out)
+ return output
+
+ def _walk_through_internal_nodes(self, keys):
+ """Take the given set of keys, and find the corresponding LeafNodes.
+
+ :param keys: An unsorted iterable of keys to search for
+ :return: (nodes, index_and_keys)
+ nodes is a dict mapping {index: LeafNode}
+ keys_at_index is a list of tuples of [(index, [keys for Leaf])]
+ """
+ # 6 seconds spent in miss_torture using the sorted() line.
+ # Even with out of order disk IO it seems faster not to sort it when
+ # large queries are being made.
+ keys_at_index = [(0, sorted(keys))]
+
+ for row_pos, next_row_start in enumerate(self._row_offsets[1:-1]):
+ node_indexes = [idx for idx, s_keys in keys_at_index]
+ nodes = self._get_internal_nodes(node_indexes)
+
+ next_nodes_and_keys = []
+ for node_index, sub_keys in keys_at_index:
+ node = nodes[node_index]
+ positions = self._multi_bisect_right(sub_keys, node.keys)
+ node_offset = next_row_start + node.offset
+ next_nodes_and_keys.extend([(node_offset + pos, s_keys)
+ for pos, s_keys in positions])
+ keys_at_index = next_nodes_and_keys
+ # We should now be at the _LeafNodes
+ node_indexes = [idx for idx, s_keys in keys_at_index]
+
+ # TODO: We may *not* want to always read all the nodes in one
+ # big go. Consider setting a max size on this.
+ nodes = self._get_leaf_nodes(node_indexes)
+ return nodes, keys_at_index
+
+ def iter_entries(self, keys):
+ """Iterate over keys within the index.
+
+ :param keys: An iterable providing the keys to be retrieved.
+ :return: An iterable as per iter_all_entries, but restricted to the
+ keys supplied. No additional keys will be returned, and every
+ key supplied that is in the index will be returned.
+ """
+ # 6 seconds spent in miss_torture using the sorted() line.
+ # Even with out of order disk IO it seems faster not to sort it when
+ # large queries are being made.
+ # However, now that we are doing multi-way bisecting, we need the keys
+ # in sorted order anyway. We could change the multi-way code to not
+ # require sorted order. (For example, it bisects for the first node,
+ # does an in-order search until a key comes before the current point,
+ # which it then bisects for, etc.)
+ keys = frozenset(keys)
+ if not keys:
+ return
+
+ if not self.key_count():
+ return
+
+ needed_keys = []
+ if self._leaf_value_cache is None:
+ needed_keys = keys
+ else:
+ for key in keys:
+ value = self._leaf_value_cache.get(key, None)
+ if value is not None:
+ # This key is known not to be here, skip it
+ value, refs = value
+ if self.node_ref_lists:
+ yield (self, key, value, refs)
+ else:
+ yield (self, key, value)
+ else:
+ needed_keys.append(key)
+
+ last_key = None
+ needed_keys = keys
+ if not needed_keys:
+ return
+ nodes, nodes_and_keys = self._walk_through_internal_nodes(needed_keys)
+ for node_index, sub_keys in nodes_and_keys:
+ if not sub_keys:
+ continue
+ node = nodes[node_index]
+ for next_sub_key in sub_keys:
+ if next_sub_key in node:
+ value, refs = node[next_sub_key]
+ if self.node_ref_lists:
+ yield (self, next_sub_key, value, refs)
+ else:
+ yield (self, next_sub_key, value)
+
+ def _find_ancestors(self, keys, ref_list_num, parent_map, missing_keys):
+ """Find the parent_map information for the set of keys.
+
+ This populates the parent_map dict and missing_keys set based on the
+ queried keys. It also can fill out an arbitrary number of parents that
+ it finds while searching for the supplied keys.
+
+ It is unlikely that you want to call this directly. See
+ "CombinedGraphIndex.find_ancestry()" for a more appropriate API.
+
+ :param keys: A keys whose ancestry we want to return
+ Every key will either end up in 'parent_map' or 'missing_keys'.
+ :param ref_list_num: This index in the ref_lists is the parents we
+ care about.
+ :param parent_map: {key: parent_keys} for keys that are present in this
+ index. This may contain more entries than were in 'keys', that are
+ reachable ancestors of the keys requested.
+ :param missing_keys: keys which are known to be missing in this index.
+ This may include parents that were not directly requested, but we
+ were able to determine that they are not present in this index.
+ :return: search_keys parents that were found but not queried to know
+ if they are missing or present. Callers can re-query this index for
+ those keys, and they will be placed into parent_map or missing_keys
+ """
+ if not self.key_count():
+ # We use key_count() to trigger reading the root node and
+ # determining info about this BTreeGraphIndex
+ # If we don't have any keys, then everything is missing
+ missing_keys.update(keys)
+ return set()
+ if ref_list_num >= self.node_ref_lists:
+ raise ValueError('No ref list %d, index has %d ref lists'
+ % (ref_list_num, self.node_ref_lists))
+
+ # The main trick we are trying to accomplish is that when we find a
+ # key listing its parents, we expect that the parent key is also likely
+ # to sit on the same page. Allowing us to expand parents quickly
+ # without suffering the full stack of bisecting, etc.
+ nodes, nodes_and_keys = self._walk_through_internal_nodes(keys)
+
+ # These are parent keys which could not be immediately resolved on the
+ # page where the child was present. Note that we may already be
+ # searching for that key, and it may actually be present [or known
+ # missing] on one of the other pages we are reading.
+ # TODO:
+ # We could try searching for them in the immediate previous or next
+ # page. If they occur "later" we could put them in a pending lookup
+ # set, and then for each node we read thereafter we could check to
+ # see if they are present.
+ # However, we don't know the impact of keeping this list of things
+ # that I'm going to search for every node I come across from here on
+ # out.
+ # It doesn't handle the case when the parent key is missing on a
+ # page that we *don't* read. So we already have to handle being
+ # re-entrant for that.
+ # Since most keys contain a date string, they are more likely to be
+ # found earlier in the file than later, but we would know that right
+ # away (key < min_key), and wouldn't keep searching it on every other
+ # page that we read.
+ # Mostly, it is an idea, one which should be benchmarked.
+ parents_not_on_page = set()
+
+ for node_index, sub_keys in nodes_and_keys:
+ if not sub_keys:
+ continue
+ # sub_keys is all of the keys we are looking for that should exist
+ # on this page, if they aren't here, then they won't be found
+ node = nodes[node_index]
+ parents_to_check = set()
+ for next_sub_key in sub_keys:
+ if next_sub_key not in node:
+ # This one is just not present in the index at all
+ missing_keys.add(next_sub_key)
+ else:
+ value, refs = node[next_sub_key]
+ parent_keys = refs[ref_list_num]
+ parent_map[next_sub_key] = parent_keys
+ parents_to_check.update(parent_keys)
+ # Don't look for things we've already found
+ parents_to_check = parents_to_check.difference(parent_map)
+ # this can be used to test the benefit of having the check loop
+ # inlined.
+ # parents_not_on_page.update(parents_to_check)
+ # continue
+ while parents_to_check:
+ next_parents_to_check = set()
+ for key in parents_to_check:
+ if key in node:
+ value, refs = node[key]
+ parent_keys = refs[ref_list_num]
+ parent_map[key] = parent_keys
+ next_parents_to_check.update(parent_keys)
+ else:
+ # This parent either is genuinely missing, or should be
+ # found on another page. Perf test whether it is better
+ # to check if this node should fit on this page or not.
+ # in the 'everything-in-one-pack' scenario, this *not*
+ # doing the check is 237ms vs 243ms.
+ # So slightly better, but I assume the standard 'lots
+ # of packs' is going to show a reasonable improvement
+ # from the check, because it avoids 'going around
+ # again' for everything that is in another index
+ # parents_not_on_page.add(key)
+ # Missing for some reason
+ if key < node.min_key:
+ # in the case of bzr.dev, 3.4k/5.3k misses are
+ # 'earlier' misses (65%)
+ parents_not_on_page.add(key)
+ elif key > node.max_key:
+ # This parent key would be present on a different
+ # LeafNode
+ parents_not_on_page.add(key)
+ else:
+ # assert key != node.min_key and key != node.max_key
+ # If it was going to be present, it would be on
+ # *this* page, so mark it missing.
+ missing_keys.add(key)
+ parents_to_check = next_parents_to_check.difference(parent_map)
+ # Might want to do another .difference() from missing_keys
+ # parents_not_on_page could have been found on a different page, or be
+ # known to be missing. So cull out everything that has already been
+ # found.
+ search_keys = parents_not_on_page.difference(
+ parent_map).difference(missing_keys)
+ return search_keys
+
+ def iter_entries_prefix(self, keys):
+ """Iterate over keys within the index using prefix matching.
+
+ Prefix matching is applied within the tuple of a key, not to within
+ the bytestring of each key element. e.g. if you have the keys ('foo',
+ 'bar'), ('foobar', 'gam') and do a prefix search for ('foo', None) then
+ only the former key is returned.
+
+ WARNING: Note that this method currently causes a full index parse
+ unconditionally (which is reasonably appropriate as it is a means for
+ thunking many small indices into one larger one and still supplies
+ iter_all_entries at the thunk layer).
+
+ :param keys: An iterable providing the key prefixes to be retrieved.
+ Each key prefix takes the form of a tuple the length of a key, but
+ with the last N elements 'None' rather than a regular bytestring.
+ The first element cannot be 'None'.
+ :return: An iterable as per iter_all_entries, but restricted to the
+ keys with a matching prefix to those supplied. No additional keys
+ will be returned, and every match that is in the index will be
+ returned.
+ """
+ keys = sorted(set(keys))
+ if not keys:
+ return
+ # Load if needed to check key lengths
+ if self._key_count is None:
+ self._get_root_node()
+ # TODO: only access nodes that can satisfy the prefixes we are looking
+ # for. For now, to meet API usage (as this function is not used by
+ # current bzrlib) just suck the entire index and iterate in memory.
+ nodes = {}
+ if self.node_ref_lists:
+ if self._key_length == 1:
+ for _1, key, value, refs in self.iter_all_entries():
+ nodes[key] = value, refs
+ else:
+ nodes_by_key = {}
+ for _1, key, value, refs in self.iter_all_entries():
+ key_value = key, value, refs
+ # For a key of (foo, bar, baz) create
+ # _nodes_by_key[foo][bar][baz] = key_value
+ key_dict = nodes_by_key
+ for subkey in key[:-1]:
+ key_dict = key_dict.setdefault(subkey, {})
+ key_dict[key[-1]] = key_value
+ else:
+ if self._key_length == 1:
+ for _1, key, value in self.iter_all_entries():
+ nodes[key] = value
+ else:
+ nodes_by_key = {}
+ for _1, key, value in self.iter_all_entries():
+ key_value = key, value
+ # For a key of (foo, bar, baz) create
+ # _nodes_by_key[foo][bar][baz] = key_value
+ key_dict = nodes_by_key
+ for subkey in key[:-1]:
+ key_dict = key_dict.setdefault(subkey, {})
+ key_dict[key[-1]] = key_value
+ if self._key_length == 1:
+ for key in keys:
+ # sanity check
+ if key[0] is None:
+ raise errors.BadIndexKey(key)
+ if len(key) != self._key_length:
+ raise errors.BadIndexKey(key)
+ try:
+ if self.node_ref_lists:
+ value, node_refs = nodes[key]
+ yield self, key, value, node_refs
+ else:
+ yield self, key, nodes[key]
+ except KeyError:
+ pass
+ return
+ for key in keys:
+ # sanity check
+ if key[0] is None:
+ raise errors.BadIndexKey(key)
+ if len(key) != self._key_length:
+ raise errors.BadIndexKey(key)
+ # find what it refers to:
+ key_dict = nodes_by_key
+ elements = list(key)
+ # find the subdict whose contents should be returned.
+ try:
+ while len(elements) and elements[0] is not None:
+ key_dict = key_dict[elements[0]]
+ elements.pop(0)
+ except KeyError:
+ # a non-existant lookup.
+ continue
+ if len(elements):
+ dicts = [key_dict]
+ while dicts:
+ key_dict = dicts.pop(-1)
+ # can't be empty or would not exist
+ item, value = key_dict.iteritems().next()
+ if type(value) == dict:
+ # push keys
+ dicts.extend(key_dict.itervalues())
+ else:
+ # yield keys
+ for value in key_dict.itervalues():
+ # each value is the key:value:node refs tuple
+ # ready to yield.
+ yield (self, ) + value
+ else:
+ # the last thing looked up was a terminal element
+ yield (self, ) + key_dict
+
+ def key_count(self):
+ """Return an estimate of the number of keys in this index.
+
+ For BTreeGraphIndex the estimate is exact as it is contained in the
+ header.
+ """
+ if self._key_count is None:
+ self._get_root_node()
+ return self._key_count
+
+ def _compute_row_offsets(self):
+ """Fill out the _row_offsets attribute based on _row_lengths."""
+ offsets = []
+ row_offset = 0
+ for row in self._row_lengths:
+ offsets.append(row_offset)
+ row_offset += row
+ offsets.append(row_offset)
+ self._row_offsets = offsets
+
+ def _parse_header_from_bytes(self, bytes):
+ """Parse the header from a region of bytes.
+
+ :param bytes: The data to parse.
+ :return: An offset, data tuple such as readv yields, for the unparsed
+ data. (which may be of length 0).
+ """
+ signature = bytes[0:len(self._signature())]
+ if not signature == self._signature():
+ raise errors.BadIndexFormatSignature(self._name, BTreeGraphIndex)
+ lines = bytes[len(self._signature()):].splitlines()
+ options_line = lines[0]
+ if not options_line.startswith(_OPTION_NODE_REFS):
+ raise errors.BadIndexOptions(self)
+ try:
+ self.node_ref_lists = int(options_line[len(_OPTION_NODE_REFS):])
+ except ValueError:
+ raise errors.BadIndexOptions(self)
+ options_line = lines[1]
+ if not options_line.startswith(_OPTION_KEY_ELEMENTS):
+ raise errors.BadIndexOptions(self)
+ try:
+ self._key_length = int(options_line[len(_OPTION_KEY_ELEMENTS):])
+ except ValueError:
+ raise errors.BadIndexOptions(self)
+ options_line = lines[2]
+ if not options_line.startswith(_OPTION_LEN):
+ raise errors.BadIndexOptions(self)
+ try:
+ self._key_count = int(options_line[len(_OPTION_LEN):])
+ except ValueError:
+ raise errors.BadIndexOptions(self)
+ options_line = lines[3]
+ if not options_line.startswith(_OPTION_ROW_LENGTHS):
+ raise errors.BadIndexOptions(self)
+ try:
+ self._row_lengths = map(int, [length for length in
+ options_line[len(_OPTION_ROW_LENGTHS):].split(',')
+ if len(length)])
+ except ValueError:
+ raise errors.BadIndexOptions(self)
+ self._compute_row_offsets()
+
+ # calculate the bytes we have processed
+ header_end = (len(signature) + sum(map(len, lines[0:4])) + 4)
+ return header_end, bytes[header_end:]
+
+ def _read_nodes(self, nodes):
+ """Read some nodes from disk into the LRU cache.
+
+ This performs a readv to get the node data into memory, and parses each
+ node, then yields it to the caller. The nodes are requested in the
+ supplied order. If possible doing sort() on the list before requesting
+ a read may improve performance.
+
+ :param nodes: The nodes to read. 0 - first node, 1 - second node etc.
+ :return: None
+ """
+ # may be the byte string of the whole file
+ bytes = None
+ # list of (offset, length) regions of the file that should, evenually
+ # be read in to data_ranges, either from 'bytes' or from the transport
+ ranges = []
+ base_offset = self._base_offset
+ for index in nodes:
+ offset = (index * _PAGE_SIZE)
+ size = _PAGE_SIZE
+ if index == 0:
+ # Root node - special case
+ if self._size:
+ size = min(_PAGE_SIZE, self._size)
+ else:
+ # The only case where we don't know the size, is for very
+ # small indexes. So we read the whole thing
+ bytes = self._transport.get_bytes(self._name)
+ num_bytes = len(bytes)
+ self._size = num_bytes - base_offset
+ # the whole thing should be parsed out of 'bytes'
+ ranges = [(start, min(_PAGE_SIZE, num_bytes - start))
+ for start in xrange(base_offset, num_bytes, _PAGE_SIZE)]
+ break
+ else:
+ if offset > self._size:
+ raise AssertionError('tried to read past the end'
+ ' of the file %s > %s'
+ % (offset, self._size))
+ size = min(size, self._size - offset)
+ ranges.append((base_offset + offset, size))
+ if not ranges:
+ return
+ elif bytes is not None:
+ # already have the whole file
+ data_ranges = [(start, bytes[start:start+size])
+ for start, size in ranges]
+ elif self._file is None:
+ data_ranges = self._transport.readv(self._name, ranges)
+ else:
+ data_ranges = []
+ for offset, size in ranges:
+ self._file.seek(offset)
+ data_ranges.append((offset, self._file.read(size)))
+ for offset, data in data_ranges:
+ offset -= base_offset
+ if offset == 0:
+ # extract the header
+ offset, data = self._parse_header_from_bytes(data)
+ if len(data) == 0:
+ continue
+ bytes = zlib.decompress(data)
+ if bytes.startswith(_LEAF_FLAG):
+ node = self._leaf_factory(bytes, self._key_length,
+ self.node_ref_lists)
+ elif bytes.startswith(_INTERNAL_FLAG):
+ node = _InternalNode(bytes)
+ else:
+ raise AssertionError("Unknown node type for %r" % bytes)
+ yield offset / _PAGE_SIZE, node
+
+ def _signature(self):
+ """The file signature for this index type."""
+ return _BTSIGNATURE
+
+ def validate(self):
+ """Validate that everything in the index can be accessed."""
+ # just read and parse every node.
+ self._get_root_node()
+ if len(self._row_lengths) > 1:
+ start_node = self._row_offsets[1]
+ else:
+ # We shouldn't be reading anything anyway
+ start_node = 1
+ node_end = self._row_offsets[-1]
+ for node in self._read_nodes(range(start_node, node_end)):
+ pass
+
+
+_gcchk_factory = _LeafNode
+
+try:
+ from bzrlib import _btree_serializer_pyx as _btree_serializer
+ _gcchk_factory = _btree_serializer._parse_into_chk
+except ImportError, e:
+ osutils.failed_to_load_extension(e)
+ from bzrlib import _btree_serializer_py as _btree_serializer
diff --git a/bzrlib/bugtracker.py b/bzrlib/bugtracker.py
new file mode 100644
index 0000000..7ab0a8e
--- /dev/null
+++ b/bzrlib/bugtracker.py
@@ -0,0 +1,328 @@
+# Copyright (C) 2007-2010 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+from __future__ import absolute_import
+
+from bzrlib import registry
+from bzrlib.lazy_import import lazy_import
+lazy_import(globals(), """
+from bzrlib import errors, urlutils
+""")
+
+
+"""Provides a shorthand for referring to bugs on a variety of bug trackers.
+
+'commit --fixes' stores references to bugs as a <bug_url> -> <bug_status>
+mapping in the properties for that revision.
+
+However, it's inconvenient to type out full URLs for bugs on the command line,
+particularly given that many users will be using only a single bug tracker per
+branch.
+
+Thus, this module provides a registry of types of bug tracker (e.g. Launchpad,
+Trac). Given an abbreviated name (e.g. 'lp', 'twisted') and a branch with
+configuration information, these tracker types can return an instance capable
+of converting bug IDs into URLs.
+"""
+
+
+_bugs_help = \
+"""When making a commit, metadata about bugs fixed by that change can be
+recorded by using the ``--fixes`` option. For each bug marked as fixed, an
+entry is included in the 'bugs' revision property stating '<url> <status>'.
+(The only ``status`` value currently supported is ``fixed.``)
+
+The ``--fixes`` option allows you to specify a bug tracker and a bug identifier
+rather than a full URL. This looks like::
+
+ bzr commit --fixes <tracker>:<id>
+
+or::
+
+ bzr commit --fixes <id>
+
+where "<tracker>" is an identifier for the bug tracker, and "<id>" is the
+identifier for that bug within the bugtracker, usually the bug number.
+If "<tracker>" is not specified the ``bugtracker`` set in the branch
+or global configuration is used.
+
+Bazaar knows about a few bug trackers that have many users. If
+you use one of these bug trackers then there is no setup required to
+use this feature, you just need to know the tracker identifier to use.
+These are the bugtrackers that are built in:
+
+ ============================ ============ ============
+ URL Abbreviation Example
+ ============================ ============ ============
+ https://bugs.launchpad.net/ lp lp:12345
+ http://bugs.debian.org/ deb deb:12345
+ http://bugzilla.gnome.org/ gnome gnome:12345
+ ============================ ============ ============
+
+For the bug trackers not listed above configuration is required.
+Support for generating the URLs for any project using Bugzilla or Trac
+is built in, along with a template mechanism for other bugtrackers with
+simple URL schemes. If your bug tracker can't be described by one
+of the schemes described below then you can write a plugin to support
+it.
+
+If you use Bugzilla or Trac, then you only need to set a configuration
+variable which contains the base URL of the bug tracker. These options
+can go into ``bazaar.conf``, ``branch.conf`` or into a branch-specific
+configuration section in ``locations.conf``. You can set up these values
+for each of the projects you work on.
+
+Note: As you provide a short name for each tracker, you can specify one or
+more bugs in one or more trackers at commit time if you wish.
+
+Launchpad
+---------
+
+Use ``bzr commit --fixes lp:2`` to record that this commit fixes bug 2.
+
+bugzilla_<tracker>_url
+----------------------
+
+If present, the location of the Bugzilla bug tracker referred to by
+<tracker>. This option can then be used together with ``bzr commit
+--fixes`` to mark bugs in that tracker as being fixed by that commit. For
+example::
+
+ bugzilla_squid_url = http://bugs.squid-cache.org
+
+would allow ``bzr commit --fixes squid:1234`` to mark Squid's bug 1234 as
+fixed.
+
+trac_<tracker>_url
+------------------
+
+If present, the location of the Trac instance referred to by
+<tracker>. This option can then be used together with ``bzr commit
+--fixes`` to mark bugs in that tracker as being fixed by that commit. For
+example::
+
+ trac_twisted_url = http://www.twistedmatrix.com/trac
+
+would allow ``bzr commit --fixes twisted:1234`` to mark Twisted's bug 1234 as
+fixed.
+
+bugtracker_<tracker>_url
+------------------------
+
+If present, the location of a generic bug tracker instance referred to by
+<tracker>. The location must contain an ``{id}`` placeholder,
+which will be replaced by a specific bug ID. This option can then be used
+together with ``bzr commit --fixes`` to mark bugs in that tracker as being
+fixed by that commit. For example::
+
+ bugtracker_python_url = http://bugs.python.org/issue{id}
+
+would allow ``bzr commit --fixes python:1234`` to mark bug 1234 in Python's
+Roundup bug tracker as fixed, or::
+
+ bugtracker_cpan_url = http://rt.cpan.org/Public/Bug/Display.html?id={id}
+
+would allow ``bzr commit --fixes cpan:1234`` to mark bug 1234 in CPAN's
+RT bug tracker as fixed, or::
+
+ bugtracker_hudson_url = http://issues.hudson-ci.org/browse/{id}
+
+would allow ``bzr commit --fixes hudson:HUDSON-1234`` to mark bug HUDSON-1234
+in Hudson's JIRA bug tracker as fixed.
+"""
+
+
+def get_bug_url(abbreviated_bugtracker_name, branch, bug_id):
+ """Return a URL pointing to the canonical web page of the bug identified by
+ 'bug_id'.
+ """
+ tracker = tracker_registry.get_tracker(abbreviated_bugtracker_name, branch)
+ return tracker.get_bug_url(bug_id)
+
+
+class TrackerRegistry(registry.Registry):
+ """Registry of bug tracker types."""
+
+ def get_tracker(self, abbreviated_bugtracker_name, branch):
+ """Return the first registered tracker that understands
+ 'abbreviated_bugtracker_name'.
+
+ If no such tracker is found, raise KeyError.
+ """
+ for tracker_name in self.keys():
+ tracker_type = self.get(tracker_name)
+ tracker = tracker_type.get(abbreviated_bugtracker_name, branch)
+ if tracker is not None:
+ return tracker
+ raise errors.UnknownBugTrackerAbbreviation(abbreviated_bugtracker_name,
+ branch)
+
+ def help_topic(self, topic):
+ return _bugs_help
+
+
+tracker_registry = TrackerRegistry()
+"""Registry of bug trackers."""
+
+
+class BugTracker(object):
+ """Base class for bug trackers."""
+
+ def check_bug_id(self, bug_id):
+ """Check that the bug_id is valid.
+
+ The base implementation assumes that all bug_ids are valid.
+ """
+
+ def get_bug_url(self, bug_id):
+ """Return the URL for bug_id. Raise an error if bug ID is malformed."""
+ self.check_bug_id(bug_id)
+ return self._get_bug_url(bug_id)
+
+ def _get_bug_url(self, bug_id):
+ """Given a validated bug_id, return the bug's web page's URL."""
+
+
+class IntegerBugTracker(BugTracker):
+ """A bug tracker that only allows integer bug IDs."""
+
+ def check_bug_id(self, bug_id):
+ try:
+ int(bug_id)
+ except ValueError:
+ raise errors.MalformedBugIdentifier(bug_id, "Must be an integer")
+
+
+class UniqueIntegerBugTracker(IntegerBugTracker):
+ """A style of bug tracker that exists in one place only, such as Launchpad.
+
+ If you have one of these trackers then register an instance passing in an
+ abbreviated name for the bug tracker and a base URL. The bug ids are
+ appended directly to the URL.
+ """
+
+ def __init__(self, abbreviated_bugtracker_name, base_url):
+ self.abbreviation = abbreviated_bugtracker_name
+ self.base_url = base_url
+
+ def get(self, abbreviated_bugtracker_name, branch):
+ """Returns the tracker if the abbreviation matches. Returns None
+ otherwise."""
+ if abbreviated_bugtracker_name != self.abbreviation:
+ return None
+ return self
+
+ def _get_bug_url(self, bug_id):
+ """Return the URL for bug_id."""
+ return self.base_url + bug_id
+
+
+tracker_registry.register(
+ 'launchpad', UniqueIntegerBugTracker('lp', 'https://launchpad.net/bugs/'))
+
+
+tracker_registry.register(
+ 'debian', UniqueIntegerBugTracker('deb', 'http://bugs.debian.org/'))
+
+
+tracker_registry.register('gnome',
+ UniqueIntegerBugTracker('gnome',
+ 'http://bugzilla.gnome.org/show_bug.cgi?id='))
+
+
+class URLParametrizedBugTracker(BugTracker):
+ """A type of bug tracker that can be found on a variety of different sites,
+ and thus needs to have the base URL configured.
+
+ Looks for a config setting in the form '<type_name>_<abbreviation>_url'.
+ `type_name` is the name of the type of tracker and `abbreviation`
+ is a short name for the particular instance.
+ """
+
+ def get(self, abbreviation, branch):
+ config = branch.get_config()
+ url = config.get_user_option(
+ "%s_%s_url" % (self.type_name, abbreviation), expand=False)
+ if url is None:
+ return None
+ self._base_url = url
+ return self
+
+ def __init__(self, type_name, bug_area):
+ self.type_name = type_name
+ self._bug_area = bug_area
+
+ def _get_bug_url(self, bug_id):
+ """Return a URL for a bug on this Trac instance."""
+ return urlutils.join(self._base_url, self._bug_area) + str(bug_id)
+
+
+class URLParametrizedIntegerBugTracker(IntegerBugTracker,
+ URLParametrizedBugTracker):
+ """A type of bug tracker that only allows integer bug IDs.
+
+ This can be found on a variety of different sites, and thus needs to have
+ the base URL configured.
+
+ Looks for a config setting in the form '<type_name>_<abbreviation>_url'.
+ `type_name` is the name of the type of tracker (e.g. 'bugzilla' or 'trac')
+ and `abbreviation` is a short name for the particular instance (e.g.
+ 'squid' or 'apache').
+ """
+
+tracker_registry.register(
+ 'trac', URLParametrizedIntegerBugTracker('trac', 'ticket/'))
+
+tracker_registry.register(
+ 'bugzilla',
+ URLParametrizedIntegerBugTracker('bugzilla', 'show_bug.cgi?id='))
+
+
+class GenericBugTracker(URLParametrizedBugTracker):
+ """Generic bug tracker specified by an URL template."""
+
+ def __init__(self):
+ super(GenericBugTracker, self).__init__('bugtracker', None)
+
+ def get(self, abbreviation, branch):
+ self._abbreviation = abbreviation
+ return super(GenericBugTracker, self).get(abbreviation, branch)
+
+ def _get_bug_url(self, bug_id):
+ """Given a validated bug_id, return the bug's web page's URL."""
+ if '{id}' not in self._base_url:
+ raise errors.InvalidBugTrackerURL(self._abbreviation,
+ self._base_url)
+ return self._base_url.replace('{id}', str(bug_id))
+
+
+tracker_registry.register('generic', GenericBugTracker())
+
+
+FIXED = 'fixed'
+
+ALLOWED_BUG_STATUSES = set([FIXED])
+
+
+def encode_fixes_bug_urls(bug_urls):
+ """Get the revision property value for a commit that fixes bugs.
+
+ :param bug_urls: An iterable of escaped URLs to bugs. These normally
+ come from `get_bug_url`.
+ :return: A string that will be set as the 'bugs' property of a revision
+ as part of a commit.
+ """
+ return '\n'.join(('%s %s' % (url, FIXED)) for url in bug_urls)
diff --git a/bzrlib/builtins.py b/bzrlib/builtins.py
new file mode 100644
index 0000000..d41c8fd
--- /dev/null
+++ b/bzrlib/builtins.py
@@ -0,0 +1,6731 @@
+# Copyright (C) 2005-2012 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""builtin bzr commands"""
+
+from __future__ import absolute_import
+
+import os
+
+import bzrlib.bzrdir
+
+from bzrlib import lazy_import
+lazy_import.lazy_import(globals(), """
+import cStringIO
+import errno
+import sys
+import time
+
+import bzrlib
+from bzrlib import (
+ bugtracker,
+ bundle,
+ btree_index,
+ controldir,
+ directory_service,
+ delta,
+ config as _mod_config,
+ errors,
+ globbing,
+ hooks,
+ log,
+ merge as _mod_merge,
+ merge_directive,
+ osutils,
+ reconfigure,
+ rename_map,
+ revision as _mod_revision,
+ static_tuple,
+ timestamp,
+ transport,
+ ui,
+ urlutils,
+ views,
+ gpg,
+ )
+from bzrlib.branch import Branch
+from bzrlib.conflicts import ConflictList
+from bzrlib.transport import memory
+from bzrlib.revisionspec import RevisionSpec, RevisionInfo
+from bzrlib.smtp_connection import SMTPConnection
+from bzrlib.workingtree import WorkingTree
+from bzrlib.i18n import gettext, ngettext
+""")
+
+from bzrlib.commands import (
+ Command,
+ builtin_command_registry,
+ display_command,
+ )
+from bzrlib.option import (
+ ListOption,
+ Option,
+ RegistryOption,
+ custom_help,
+ _parse_revision_str,
+ )
+from bzrlib.trace import mutter, note, warning, is_quiet, get_verbosity_level
+from bzrlib import (
+ symbol_versioning,
+ )
+
+
+def _get_branch_location(control_dir, possible_transports=None):
+ """Return location of branch for this control dir."""
+ try:
+ target = control_dir.get_branch_reference()
+ except errors.NotBranchError:
+ return control_dir.root_transport.base
+ if target is not None:
+ return target
+ this_branch = control_dir.open_branch(
+ possible_transports=possible_transports)
+ # This may be a heavy checkout, where we want the master branch
+ master_location = this_branch.get_bound_location()
+ if master_location is not None:
+ return master_location
+ # If not, use a local sibling
+ return this_branch.base
+
+
+def _is_colocated(control_dir, possible_transports=None):
+ """Check if the branch in control_dir is colocated.
+
+ :param control_dir: Control directory
+ :return: Tuple with boolean indicating whether the branch is colocated
+ and the full URL to the actual branch
+ """
+ # This path is meant to be relative to the existing branch
+ this_url = _get_branch_location(control_dir,
+ possible_transports=possible_transports)
+ # Perhaps the target control dir supports colocated branches?
+ try:
+ root = controldir.ControlDir.open(this_url,
+ possible_transports=possible_transports)
+ except errors.NotBranchError:
+ return (False, this_url)
+ else:
+ try:
+ wt = control_dir.open_workingtree()
+ except (errors.NoWorkingTree, errors.NotLocalUrl):
+ return (False, this_url)
+ else:
+ return (
+ root._format.colocated_branches and
+ control_dir.control_url == root.control_url,
+ this_url)
+
+
+def lookup_new_sibling_branch(control_dir, location, possible_transports=None):
+ """Lookup the location for a new sibling branch.
+
+ :param control_dir: Control directory to find sibling branches from
+ :param location: Name of the new branch
+ :return: Full location to the new branch
+ """
+ location = directory_service.directories.dereference(location)
+ if '/' not in location and '\\' not in location:
+ (colocated, this_url) = _is_colocated(control_dir, possible_transports)
+
+ if colocated:
+ return urlutils.join_segment_parameters(this_url,
+ {"branch": urlutils.escape(location)})
+ else:
+ return urlutils.join(this_url, '..', urlutils.escape(location))
+ return location
+
+
+def open_sibling_branch(control_dir, location, possible_transports=None):
+ """Open a branch, possibly a sibling of another.
+
+ :param control_dir: Control directory relative to which to lookup the
+ location.
+ :param location: Location to look up
+ :return: branch to open
+ """
+ try:
+ # Perhaps it's a colocated branch?
+ return control_dir.open_branch(location,
+ possible_transports=possible_transports)
+ except (errors.NotBranchError, errors.NoColocatedBranchSupport):
+ this_url = _get_branch_location(control_dir)
+ return Branch.open(
+ urlutils.join(
+ this_url, '..', urlutils.escape(location)))
+
+
+def open_nearby_branch(near=None, location=None, possible_transports=None):
+ """Open a nearby branch.
+
+ :param near: Optional location of container from which to open branch
+ :param location: Location of the branch
+ :return: Branch instance
+ """
+ if near is None:
+ if location is None:
+ location = "."
+ try:
+ return Branch.open(location,
+ possible_transports=possible_transports)
+ except errors.NotBranchError:
+ near = "."
+ cdir = controldir.ControlDir.open(near,
+ possible_transports=possible_transports)
+ return open_sibling_branch(cdir, location,
+ possible_transports=possible_transports)
+
+
+def iter_sibling_branches(control_dir, possible_transports=None):
+ """Iterate over the siblings of a branch.
+
+ :param control_dir: Control directory for which to look up the siblings
+ :return: Iterator over tuples with branch name and branch object
+ """
+ seen_urls = set()
+ try:
+ reference = control_dir.get_branch_reference()
+ except errors.NotBranchError:
+ # There is no active branch, just return the colocated branches.
+ for name, branch in control_dir.get_branches().iteritems():
+ yield name, branch
+ return
+ if reference is not None:
+ ref_branch = Branch.open(reference,
+ possible_transports=possible_transports)
+ else:
+ ref_branch = None
+ if ref_branch is None or ref_branch.name:
+ if ref_branch is not None:
+ control_dir = ref_branch.bzrdir
+ for name, branch in control_dir.get_branches().iteritems():
+ yield name, branch
+ else:
+ repo = ref_branch.bzrdir.find_repository()
+ for branch in repo.find_branches(using=True):
+ name = urlutils.relative_url(repo.user_url,
+ branch.user_url).rstrip("/")
+ yield name, branch
+
+
+def tree_files_for_add(file_list):
+ """
+ Return a tree and list of absolute paths from a file list.
+
+ Similar to tree_files, but add handles files a bit differently, so it a
+ custom implementation. In particular, MutableTreeTree.smart_add expects
+ absolute paths, which it immediately converts to relative paths.
+ """
+ # FIXME Would be nice to just return the relative paths like
+ # internal_tree_files does, but there are a large number of unit tests
+ # that assume the current interface to mutabletree.smart_add
+ if file_list:
+ tree, relpath = WorkingTree.open_containing(file_list[0])
+ if tree.supports_views():
+ view_files = tree.views.lookup_view()
+ if view_files:
+ for filename in file_list:
+ if not osutils.is_inside_any(view_files, filename):
+ raise errors.FileOutsideView(filename, view_files)
+ file_list = file_list[:]
+ file_list[0] = tree.abspath(relpath)
+ else:
+ tree = WorkingTree.open_containing(u'.')[0]
+ if tree.supports_views():
+ view_files = tree.views.lookup_view()
+ if view_files:
+ file_list = view_files
+ view_str = views.view_display_str(view_files)
+ note(gettext("Ignoring files outside view. View is %s") % view_str)
+ return tree, file_list
+
+
+def _get_one_revision(command_name, revisions):
+ if revisions is None:
+ return None
+ if len(revisions) != 1:
+ raise errors.BzrCommandError(gettext(
+ 'bzr %s --revision takes exactly one revision identifier') % (
+ command_name,))
+ return revisions[0]
+
+
+def _get_one_revision_tree(command_name, revisions, branch=None, tree=None):
+ """Get a revision tree. Not suitable for commands that change the tree.
+
+ Specifically, the basis tree in dirstate trees is coupled to the dirstate
+ and doing a commit/uncommit/pull will at best fail due to changing the
+ basis revision data.
+
+ If tree is passed in, it should be already locked, for lifetime management
+ of the trees internal cached state.
+ """
+ if branch is None:
+ branch = tree.branch
+ if revisions is None:
+ if tree is not None:
+ rev_tree = tree.basis_tree()
+ else:
+ rev_tree = branch.basis_tree()
+ else:
+ revision = _get_one_revision(command_name, revisions)
+ rev_tree = revision.as_tree(branch)
+ return rev_tree
+
+
+def _get_view_info_for_change_reporter(tree):
+ """Get the view information from a tree for change reporting."""
+ view_info = None
+ try:
+ current_view = tree.views.get_view_info()[0]
+ if current_view is not None:
+ view_info = (current_view, tree.views.lookup_view())
+ except errors.ViewsNotSupported:
+ pass
+ return view_info
+
+
+def _open_directory_or_containing_tree_or_branch(filename, directory):
+ """Open the tree or branch containing the specified file, unless
+ the --directory option is used to specify a different branch."""
+ if directory is not None:
+ return (None, Branch.open(directory), filename)
+ return controldir.ControlDir.open_containing_tree_or_branch(filename)
+
+
+# TODO: Make sure no commands unconditionally use the working directory as a
+# branch. If a filename argument is used, the first of them should be used to
+# specify the branch. (Perhaps this can be factored out into some kind of
+# Argument class, representing a file in a branch, where the first occurrence
+# opens the branch?)
+
+class cmd_status(Command):
+ __doc__ = """Display status summary.
+
+ This reports on versioned and unknown files, reporting them
+ grouped by state. Possible states are:
+
+ added
+ Versioned in the working copy but not in the previous revision.
+
+ removed
+ Versioned in the previous revision but removed or deleted
+ in the working copy.
+
+ renamed
+ Path of this file changed from the previous revision;
+ the text may also have changed. This includes files whose
+ parent directory was renamed.
+
+ modified
+ Text has changed since the previous revision.
+
+ kind changed
+ File kind has been changed (e.g. from file to directory).
+
+ unknown
+ Not versioned and not matching an ignore pattern.
+
+ Additionally for directories, symlinks and files with a changed
+ executable bit, Bazaar indicates their type using a trailing
+ character: '/', '@' or '*' respectively. These decorations can be
+ disabled using the '--no-classify' option.
+
+ To see ignored files use 'bzr ignored'. For details on the
+ changes to file texts, use 'bzr diff'.
+
+ Note that --short or -S gives status flags for each item, similar
+ to Subversion's status command. To get output similar to svn -q,
+ use bzr status -SV.
+
+ If no arguments are specified, the status of the entire working
+ directory is shown. Otherwise, only the status of the specified
+ files or directories is reported. If a directory is given, status
+ is reported for everything inside that directory.
+
+ Before merges are committed, the pending merge tip revisions are
+ shown. To see all pending merge revisions, use the -v option.
+ To skip the display of pending merge information altogether, use
+ the no-pending option or specify a file/directory.
+
+ To compare the working directory to a specific revision, pass a
+ single revision to the revision argument.
+
+ To see which files have changed in a specific revision, or between
+ two revisions, pass a revision range to the revision argument.
+ This will produce the same results as calling 'bzr diff --summarize'.
+ """
+
+ # TODO: --no-recurse/-N, --recurse options
+
+ takes_args = ['file*']
+ takes_options = ['show-ids', 'revision', 'change', 'verbose',
+ Option('short', help='Use short status indicators.',
+ short_name='S'),
+ Option('versioned', help='Only show versioned files.',
+ short_name='V'),
+ Option('no-pending', help='Don\'t show pending merges.',
+ ),
+ Option('no-classify',
+ help='Do not mark object type using indicator.',
+ ),
+ ]
+ aliases = ['st', 'stat']
+
+ encoding_type = 'replace'
+ _see_also = ['diff', 'revert', 'status-flags']
+
+ @display_command
+ def run(self, show_ids=False, file_list=None, revision=None, short=False,
+ versioned=False, no_pending=False, verbose=False,
+ no_classify=False):
+ from bzrlib.status import show_tree_status
+
+ if revision and len(revision) > 2:
+ raise errors.BzrCommandError(gettext('bzr status --revision takes exactly'
+ ' one or two revision specifiers'))
+
+ tree, relfile_list = WorkingTree.open_containing_paths(file_list)
+ # Avoid asking for specific files when that is not needed.
+ if relfile_list == ['']:
+ relfile_list = None
+ # Don't disable pending merges for full trees other than '.'.
+ if file_list == ['.']:
+ no_pending = True
+ # A specific path within a tree was given.
+ elif relfile_list is not None:
+ no_pending = True
+ show_tree_status(tree, show_ids=show_ids,
+ specific_files=relfile_list, revision=revision,
+ to_file=self.outf, short=short, versioned=versioned,
+ show_pending=(not no_pending), verbose=verbose,
+ classify=not no_classify)
+
+
+class cmd_cat_revision(Command):
+ __doc__ = """Write out metadata for a revision.
+
+ The revision to print can either be specified by a specific
+ revision identifier, or you can use --revision.
+ """
+
+ hidden = True
+ takes_args = ['revision_id?']
+ takes_options = ['directory', 'revision']
+ # cat-revision is more for frontends so should be exact
+ encoding = 'strict'
+
+ def print_revision(self, revisions, revid):
+ stream = revisions.get_record_stream([(revid,)], 'unordered', True)
+ record = stream.next()
+ if record.storage_kind == 'absent':
+ raise errors.NoSuchRevision(revisions, revid)
+ revtext = record.get_bytes_as('fulltext')
+ self.outf.write(revtext.decode('utf-8'))
+
+ @display_command
+ def run(self, revision_id=None, revision=None, directory=u'.'):
+ if revision_id is not None and revision is not None:
+ raise errors.BzrCommandError(gettext('You can only supply one of'
+ ' revision_id or --revision'))
+ if revision_id is None and revision is None:
+ raise errors.BzrCommandError(gettext('You must supply either'
+ ' --revision or a revision_id'))
+
+ b = controldir.ControlDir.open_containing_tree_or_branch(directory)[1]
+
+ revisions = b.repository.revisions
+ if revisions is None:
+ raise errors.BzrCommandError(gettext('Repository %r does not support '
+ 'access to raw revision texts'))
+
+ b.repository.lock_read()
+ try:
+ # TODO: jam 20060112 should cat-revision always output utf-8?
+ if revision_id is not None:
+ revision_id = osutils.safe_revision_id(revision_id, warn=False)
+ try:
+ self.print_revision(revisions, revision_id)
+ except errors.NoSuchRevision:
+ msg = gettext("The repository {0} contains no revision {1}.").format(
+ b.repository.base, revision_id)
+ raise errors.BzrCommandError(msg)
+ elif revision is not None:
+ for rev in revision:
+ if rev is None:
+ raise errors.BzrCommandError(
+ gettext('You cannot specify a NULL revision.'))
+ rev_id = rev.as_revision_id(b)
+ self.print_revision(revisions, rev_id)
+ finally:
+ b.repository.unlock()
+
+
+class cmd_dump_btree(Command):
+ __doc__ = """Dump the contents of a btree index file to stdout.
+
+ PATH is a btree index file, it can be any URL. This includes things like
+ .bzr/repository/pack-names, or .bzr/repository/indices/a34b3a...ca4a4.iix
+
+ By default, the tuples stored in the index file will be displayed. With
+ --raw, we will uncompress the pages, but otherwise display the raw bytes
+ stored in the index.
+ """
+
+ # TODO: Do we want to dump the internal nodes as well?
+ # TODO: It would be nice to be able to dump the un-parsed information,
+ # rather than only going through iter_all_entries. However, this is
+ # good enough for a start
+ hidden = True
+ encoding_type = 'exact'
+ takes_args = ['path']
+ takes_options = [Option('raw', help='Write the uncompressed bytes out,'
+ ' rather than the parsed tuples.'),
+ ]
+
+ def run(self, path, raw=False):
+ dirname, basename = osutils.split(path)
+ t = transport.get_transport(dirname)
+ if raw:
+ self._dump_raw_bytes(t, basename)
+ else:
+ self._dump_entries(t, basename)
+
+ def _get_index_and_bytes(self, trans, basename):
+ """Create a BTreeGraphIndex and raw bytes."""
+ bt = btree_index.BTreeGraphIndex(trans, basename, None)
+ bytes = trans.get_bytes(basename)
+ bt._file = cStringIO.StringIO(bytes)
+ bt._size = len(bytes)
+ return bt, bytes
+
+ def _dump_raw_bytes(self, trans, basename):
+ import zlib
+
+ # We need to parse at least the root node.
+ # This is because the first page of every row starts with an
+ # uncompressed header.
+ bt, bytes = self._get_index_and_bytes(trans, basename)
+ for page_idx, page_start in enumerate(xrange(0, len(bytes),
+ btree_index._PAGE_SIZE)):
+ page_end = min(page_start + btree_index._PAGE_SIZE, len(bytes))
+ page_bytes = bytes[page_start:page_end]
+ if page_idx == 0:
+ self.outf.write('Root node:\n')
+ header_end, data = bt._parse_header_from_bytes(page_bytes)
+ self.outf.write(page_bytes[:header_end])
+ page_bytes = data
+ self.outf.write('\nPage %d\n' % (page_idx,))
+ if len(page_bytes) == 0:
+ self.outf.write('(empty)\n');
+ else:
+ decomp_bytes = zlib.decompress(page_bytes)
+ self.outf.write(decomp_bytes)
+ self.outf.write('\n')
+
+ def _dump_entries(self, trans, basename):
+ try:
+ st = trans.stat(basename)
+ except errors.TransportNotPossible:
+ # We can't stat, so we'll fake it because we have to do the 'get()'
+ # anyway.
+ bt, _ = self._get_index_and_bytes(trans, basename)
+ else:
+ bt = btree_index.BTreeGraphIndex(trans, basename, st.st_size)
+ for node in bt.iter_all_entries():
+ # Node is made up of:
+ # (index, key, value, [references])
+ try:
+ refs = node[3]
+ except IndexError:
+ refs_as_tuples = None
+ else:
+ refs_as_tuples = static_tuple.as_tuples(refs)
+ as_tuple = (tuple(node[1]), node[2], refs_as_tuples)
+ self.outf.write('%s\n' % (as_tuple,))
+
+
+class cmd_remove_tree(Command):
+ __doc__ = """Remove the working tree from a given branch/checkout.
+
+ Since a lightweight checkout is little more than a working tree
+ this will refuse to run against one.
+
+ To re-create the working tree, use "bzr checkout".
+ """
+ _see_also = ['checkout', 'working-trees']
+ takes_args = ['location*']
+ takes_options = [
+ Option('force',
+ help='Remove the working tree even if it has '
+ 'uncommitted or shelved changes.'),
+ ]
+
+ def run(self, location_list, force=False):
+ if not location_list:
+ location_list=['.']
+
+ for location in location_list:
+ d = controldir.ControlDir.open(location)
+
+ try:
+ working = d.open_workingtree()
+ except errors.NoWorkingTree:
+ raise errors.BzrCommandError(gettext("No working tree to remove"))
+ except errors.NotLocalUrl:
+ raise errors.BzrCommandError(gettext("You cannot remove the working tree"
+ " of a remote path"))
+ if not force:
+ if (working.has_changes()):
+ raise errors.UncommittedChanges(working)
+ if working.get_shelf_manager().last_shelf() is not None:
+ raise errors.ShelvedChanges(working)
+
+ if working.user_url != working.branch.user_url:
+ raise errors.BzrCommandError(gettext("You cannot remove the working tree"
+ " from a lightweight checkout"))
+
+ d.destroy_workingtree()
+
+
+class cmd_repair_workingtree(Command):
+ __doc__ = """Reset the working tree state file.
+
+ This is not meant to be used normally, but more as a way to recover from
+ filesystem corruption, etc. This rebuilds the working inventory back to a
+ 'known good' state. Any new modifications (adding a file, renaming, etc)
+ will be lost, though modified files will still be detected as such.
+
+ Most users will want something more like "bzr revert" or "bzr update"
+ unless the state file has become corrupted.
+
+ By default this attempts to recover the current state by looking at the
+ headers of the state file. If the state file is too corrupted to even do
+ that, you can supply --revision to force the state of the tree.
+ """
+
+ takes_options = ['revision', 'directory',
+ Option('force',
+ help='Reset the tree even if it doesn\'t appear to be'
+ ' corrupted.'),
+ ]
+ hidden = True
+
+ def run(self, revision=None, directory='.', force=False):
+ tree, _ = WorkingTree.open_containing(directory)
+ self.add_cleanup(tree.lock_tree_write().unlock)
+ if not force:
+ try:
+ tree.check_state()
+ except errors.BzrError:
+ pass # There seems to be a real error here, so we'll reset
+ else:
+ # Refuse
+ raise errors.BzrCommandError(gettext(
+ 'The tree does not appear to be corrupt. You probably'
+ ' want "bzr revert" instead. Use "--force" if you are'
+ ' sure you want to reset the working tree.'))
+ if revision is None:
+ revision_ids = None
+ else:
+ revision_ids = [r.as_revision_id(tree.branch) for r in revision]
+ try:
+ tree.reset_state(revision_ids)
+ except errors.BzrError, e:
+ if revision_ids is None:
+ extra = (gettext(', the header appears corrupt, try passing -r -1'
+ ' to set the state to the last commit'))
+ else:
+ extra = ''
+ raise errors.BzrCommandError(gettext('failed to reset the tree state{0}').format(extra))
+
+
+class cmd_revno(Command):
+ __doc__ = """Show current revision number.
+
+ This is equal to the number of revisions on this branch.
+ """
+
+ _see_also = ['info']
+ takes_args = ['location?']
+ takes_options = [
+ Option('tree', help='Show revno of working tree.'),
+ 'revision',
+ ]
+
+ @display_command
+ def run(self, tree=False, location=u'.', revision=None):
+ if revision is not None and tree:
+ raise errors.BzrCommandError(gettext("--tree and --revision can "
+ "not be used together"))
+
+ if tree:
+ try:
+ wt = WorkingTree.open_containing(location)[0]
+ self.add_cleanup(wt.lock_read().unlock)
+ except (errors.NoWorkingTree, errors.NotLocalUrl):
+ raise errors.NoWorkingTree(location)
+ b = wt.branch
+ revid = wt.last_revision()
+ else:
+ b = Branch.open_containing(location)[0]
+ self.add_cleanup(b.lock_read().unlock)
+ if revision:
+ if len(revision) != 1:
+ raise errors.BzrCommandError(gettext(
+ "Tags can only be placed on a single revision, "
+ "not on a range"))
+ revid = revision[0].as_revision_id(b)
+ else:
+ revid = b.last_revision()
+ try:
+ revno_t = b.revision_id_to_dotted_revno(revid)
+ except errors.NoSuchRevision:
+ revno_t = ('???',)
+ revno = ".".join(str(n) for n in revno_t)
+ self.cleanup_now()
+ self.outf.write(revno + '\n')
+
+
+class cmd_revision_info(Command):
+ __doc__ = """Show revision number and revision id for a given revision identifier.
+ """
+ hidden = True
+ takes_args = ['revision_info*']
+ takes_options = [
+ 'revision',
+ custom_help('directory',
+ help='Branch to examine, '
+ 'rather than the one containing the working directory.'),
+ Option('tree', help='Show revno of working tree.'),
+ ]
+
+ @display_command
+ def run(self, revision=None, directory=u'.', tree=False,
+ revision_info_list=[]):
+
+ try:
+ wt = WorkingTree.open_containing(directory)[0]
+ b = wt.branch
+ self.add_cleanup(wt.lock_read().unlock)
+ except (errors.NoWorkingTree, errors.NotLocalUrl):
+ wt = None
+ b = Branch.open_containing(directory)[0]
+ self.add_cleanup(b.lock_read().unlock)
+ revision_ids = []
+ if revision is not None:
+ revision_ids.extend(rev.as_revision_id(b) for rev in revision)
+ if revision_info_list is not None:
+ for rev_str in revision_info_list:
+ rev_spec = RevisionSpec.from_string(rev_str)
+ revision_ids.append(rev_spec.as_revision_id(b))
+ # No arguments supplied, default to the last revision
+ if len(revision_ids) == 0:
+ if tree:
+ if wt is None:
+ raise errors.NoWorkingTree(directory)
+ revision_ids.append(wt.last_revision())
+ else:
+ revision_ids.append(b.last_revision())
+
+ revinfos = []
+ maxlen = 0
+ for revision_id in revision_ids:
+ try:
+ dotted_revno = b.revision_id_to_dotted_revno(revision_id)
+ revno = '.'.join(str(i) for i in dotted_revno)
+ except errors.NoSuchRevision:
+ revno = '???'
+ maxlen = max(maxlen, len(revno))
+ revinfos.append([revno, revision_id])
+
+ self.cleanup_now()
+ for ri in revinfos:
+ self.outf.write('%*s %s\n' % (maxlen, ri[0], ri[1]))
+
+
+class cmd_add(Command):
+ __doc__ = """Add specified files or directories.
+
+ In non-recursive mode, all the named items are added, regardless
+ of whether they were previously ignored. A warning is given if
+ any of the named files are already versioned.
+
+ In recursive mode (the default), files are treated the same way
+ but the behaviour for directories is different. Directories that
+ are already versioned do not give a warning. All directories,
+ whether already versioned or not, are searched for files or
+ subdirectories that are neither versioned or ignored, and these
+ are added. This search proceeds recursively into versioned
+ directories. If no names are given '.' is assumed.
+
+ A warning will be printed when nested trees are encountered,
+ unless they are explicitly ignored.
+
+ Therefore simply saying 'bzr add' will version all files that
+ are currently unknown.
+
+ Adding a file whose parent directory is not versioned will
+ implicitly add the parent, and so on up to the root. This means
+ you should never need to explicitly add a directory, they'll just
+ get added when you add a file in the directory.
+
+ --dry-run will show which files would be added, but not actually
+ add them.
+
+ --file-ids-from will try to use the file ids from the supplied path.
+ It looks up ids trying to find a matching parent directory with the
+ same filename, and then by pure path. This option is rarely needed
+ but can be useful when adding the same logical file into two
+ branches that will be merged later (without showing the two different
+ adds as a conflict). It is also useful when merging another project
+ into a subdirectory of this one.
+
+ Any files matching patterns in the ignore list will not be added
+ unless they are explicitly mentioned.
+
+ In recursive mode, files larger than the configuration option
+ add.maximum_file_size will be skipped. Named items are never skipped due
+ to file size.
+ """
+ takes_args = ['file*']
+ takes_options = [
+ Option('no-recurse',
+ help="Don't recursively add the contents of directories.",
+ short_name='N'),
+ Option('dry-run',
+ help="Show what would be done, but don't actually do anything."),
+ 'verbose',
+ Option('file-ids-from',
+ type=unicode,
+ help='Lookup file ids from this tree.'),
+ ]
+ encoding_type = 'replace'
+ _see_also = ['remove', 'ignore']
+
+ def run(self, file_list, no_recurse=False, dry_run=False, verbose=False,
+ file_ids_from=None):
+ import bzrlib.add
+
+ base_tree = None
+ if file_ids_from is not None:
+ try:
+ base_tree, base_path = WorkingTree.open_containing(
+ file_ids_from)
+ except errors.NoWorkingTree:
+ base_branch, base_path = Branch.open_containing(
+ file_ids_from)
+ base_tree = base_branch.basis_tree()
+
+ action = bzrlib.add.AddFromBaseAction(base_tree, base_path,
+ to_file=self.outf, should_print=(not is_quiet()))
+ else:
+ action = bzrlib.add.AddWithSkipLargeAction(to_file=self.outf,
+ should_print=(not is_quiet()))
+
+ if base_tree:
+ self.add_cleanup(base_tree.lock_read().unlock)
+ tree, file_list = tree_files_for_add(file_list)
+ added, ignored = tree.smart_add(file_list, not
+ no_recurse, action=action, save=not dry_run)
+ self.cleanup_now()
+ if len(ignored) > 0:
+ if verbose:
+ for glob in sorted(ignored.keys()):
+ for path in ignored[glob]:
+ self.outf.write(
+ gettext("ignored {0} matching \"{1}\"\n").format(
+ path, glob))
+
+
+class cmd_mkdir(Command):
+ __doc__ = """Create a new versioned directory.
+
+ This is equivalent to creating the directory and then adding it.
+ """
+
+ takes_args = ['dir+']
+ takes_options = [
+ Option(
+ 'parents',
+ help='No error if existing, make parent directories as needed.',
+ short_name='p'
+ )
+ ]
+ encoding_type = 'replace'
+
+ @classmethod
+ def add_file_with_parents(cls, wt, relpath):
+ if wt.path2id(relpath) is not None:
+ return
+ cls.add_file_with_parents(wt, osutils.dirname(relpath))
+ wt.add([relpath])
+
+ @classmethod
+ def add_file_single(cls, wt, relpath):
+ wt.add([relpath])
+
+ def run(self, dir_list, parents=False):
+ if parents:
+ add_file = self.add_file_with_parents
+ else:
+ add_file = self.add_file_single
+ for dir in dir_list:
+ wt, relpath = WorkingTree.open_containing(dir)
+ if parents:
+ try:
+ os.makedirs(dir)
+ except OSError, e:
+ if e.errno != errno.EEXIST:
+ raise
+ else:
+ os.mkdir(dir)
+ add_file(wt, relpath)
+ if not is_quiet():
+ self.outf.write(gettext('added %s\n') % dir)
+
+
+class cmd_relpath(Command):
+ __doc__ = """Show path of a file relative to root"""
+
+ takes_args = ['filename']
+ hidden = True
+
+ @display_command
+ def run(self, filename):
+ # TODO: jam 20050106 Can relpath return a munged path if
+ # sys.stdout encoding cannot represent it?
+ tree, relpath = WorkingTree.open_containing(filename)
+ self.outf.write(relpath)
+ self.outf.write('\n')
+
+
+class cmd_inventory(Command):
+ __doc__ = """Show inventory of the current working copy or a revision.
+
+ It is possible to limit the output to a particular entry
+ type using the --kind option. For example: --kind file.
+
+ It is also possible to restrict the list of files to a specific
+ set. For example: bzr inventory --show-ids this/file
+ """
+
+ hidden = True
+ _see_also = ['ls']
+ takes_options = [
+ 'revision',
+ 'show-ids',
+ Option('kind',
+ help='List entries of a particular kind: file, directory, symlink.',
+ type=unicode),
+ ]
+ takes_args = ['file*']
+
+ @display_command
+ def run(self, revision=None, show_ids=False, kind=None, file_list=None):
+ if kind and kind not in ['file', 'directory', 'symlink']:
+ raise errors.BzrCommandError(gettext('invalid kind %r specified') % (kind,))
+
+ revision = _get_one_revision('inventory', revision)
+ work_tree, file_list = WorkingTree.open_containing_paths(file_list)
+ self.add_cleanup(work_tree.lock_read().unlock)
+ if revision is not None:
+ tree = revision.as_tree(work_tree.branch)
+
+ extra_trees = [work_tree]
+ self.add_cleanup(tree.lock_read().unlock)
+ else:
+ tree = work_tree
+ extra_trees = []
+
+ self.add_cleanup(tree.lock_read().unlock)
+ if file_list is not None:
+ file_ids = tree.paths2ids(file_list, trees=extra_trees,
+ require_versioned=True)
+ # find_ids_across_trees may include some paths that don't
+ # exist in 'tree'.
+ entries = tree.iter_entries_by_dir(specific_file_ids=file_ids)
+ else:
+ entries = tree.iter_entries_by_dir()
+
+ for path, entry in sorted(entries):
+ if kind and kind != entry.kind:
+ continue
+ if path == "":
+ continue
+ if show_ids:
+ self.outf.write('%-50s %s\n' % (path, entry.file_id))
+ else:
+ self.outf.write(path)
+ self.outf.write('\n')
+
+
+class cmd_mv(Command):
+ __doc__ = """Move or rename a file.
+
+ :Usage:
+ bzr mv OLDNAME NEWNAME
+
+ bzr mv SOURCE... DESTINATION
+
+ If the last argument is a versioned directory, all the other names
+ are moved into it. Otherwise, there must be exactly two arguments
+ and the file is changed to a new name.
+
+ If OLDNAME does not exist on the filesystem but is versioned and
+ NEWNAME does exist on the filesystem but is not versioned, mv
+ assumes that the file has been manually moved and only updates
+ its internal inventory to reflect that change.
+ The same is valid when moving many SOURCE files to a DESTINATION.
+
+ Files cannot be moved between branches.
+ """
+
+ takes_args = ['names*']
+ takes_options = [Option("after", help="Move only the bzr identifier"
+ " of the file, because the file has already been moved."),
+ Option('auto', help='Automatically guess renames.'),
+ Option('dry-run', help='Avoid making changes when guessing renames.'),
+ ]
+ aliases = ['move', 'rename']
+ encoding_type = 'replace'
+
+ def run(self, names_list, after=False, auto=False, dry_run=False):
+ if auto:
+ return self.run_auto(names_list, after, dry_run)
+ elif dry_run:
+ raise errors.BzrCommandError(gettext('--dry-run requires --auto.'))
+ if names_list is None:
+ names_list = []
+ if len(names_list) < 2:
+ raise errors.BzrCommandError(gettext("missing file argument"))
+ tree, rel_names = WorkingTree.open_containing_paths(names_list, canonicalize=False)
+ for file_name in rel_names[0:-1]:
+ if file_name == '':
+ raise errors.BzrCommandError(gettext("can not move root of branch"))
+ self.add_cleanup(tree.lock_tree_write().unlock)
+ self._run(tree, names_list, rel_names, after)
+
+ def run_auto(self, names_list, after, dry_run):
+ if names_list is not None and len(names_list) > 1:
+ raise errors.BzrCommandError(gettext('Only one path may be specified to'
+ ' --auto.'))
+ if after:
+ raise errors.BzrCommandError(gettext('--after cannot be specified with'
+ ' --auto.'))
+ work_tree, file_list = WorkingTree.open_containing_paths(
+ names_list, default_directory='.')
+ self.add_cleanup(work_tree.lock_tree_write().unlock)
+ rename_map.RenameMap.guess_renames(work_tree, dry_run)
+
+ def _run(self, tree, names_list, rel_names, after):
+ into_existing = osutils.isdir(names_list[-1])
+ if into_existing and len(names_list) == 2:
+ # special cases:
+ # a. case-insensitive filesystem and change case of dir
+ # b. move directory after the fact (if the source used to be
+ # a directory, but now doesn't exist in the working tree
+ # and the target is an existing directory, just rename it)
+ if (not tree.case_sensitive
+ and rel_names[0].lower() == rel_names[1].lower()):
+ into_existing = False
+ else:
+ # 'fix' the case of a potential 'from'
+ from_id = tree.path2id(
+ tree.get_canonical_inventory_path(rel_names[0]))
+ if (not osutils.lexists(names_list[0]) and
+ from_id and tree.stored_kind(from_id) == "directory"):
+ into_existing = False
+ # move/rename
+ if into_existing:
+ # move into existing directory
+ # All entries reference existing inventory items, so fix them up
+ # for cicp file-systems.
+ rel_names = tree.get_canonical_inventory_paths(rel_names)
+ for src, dest in tree.move(rel_names[:-1], rel_names[-1], after=after):
+ if not is_quiet():
+ self.outf.write("%s => %s\n" % (src, dest))
+ else:
+ if len(names_list) != 2:
+ raise errors.BzrCommandError(gettext('to mv multiple files the'
+ ' destination must be a versioned'
+ ' directory'))
+
+ # for cicp file-systems: the src references an existing inventory
+ # item:
+ src = tree.get_canonical_inventory_path(rel_names[0])
+ # Find the canonical version of the destination: In all cases, the
+ # parent of the target must be in the inventory, so we fetch the
+ # canonical version from there (we do not always *use* the
+ # canonicalized tail portion - we may be attempting to rename the
+ # case of the tail)
+ canon_dest = tree.get_canonical_inventory_path(rel_names[1])
+ dest_parent = osutils.dirname(canon_dest)
+ spec_tail = osutils.basename(rel_names[1])
+ # For a CICP file-system, we need to avoid creating 2 inventory
+ # entries that differ only by case. So regardless of the case
+ # we *want* to use (ie, specified by the user or the file-system),
+ # we must always choose to use the case of any existing inventory
+ # items. The only exception to this is when we are attempting a
+ # case-only rename (ie, canonical versions of src and dest are
+ # the same)
+ dest_id = tree.path2id(canon_dest)
+ if dest_id is None or tree.path2id(src) == dest_id:
+ # No existing item we care about, so work out what case we
+ # are actually going to use.
+ if after:
+ # If 'after' is specified, the tail must refer to a file on disk.
+ if dest_parent:
+ dest_parent_fq = osutils.pathjoin(tree.basedir, dest_parent)
+ else:
+ # pathjoin with an empty tail adds a slash, which breaks
+ # relpath :(
+ dest_parent_fq = tree.basedir
+
+ dest_tail = osutils.canonical_relpath(
+ dest_parent_fq,
+ osutils.pathjoin(dest_parent_fq, spec_tail))
+ else:
+ # not 'after', so case as specified is used
+ dest_tail = spec_tail
+ else:
+ # Use the existing item so 'mv' fails with AlreadyVersioned.
+ dest_tail = os.path.basename(canon_dest)
+ dest = osutils.pathjoin(dest_parent, dest_tail)
+ mutter("attempting to move %s => %s", src, dest)
+ tree.rename_one(src, dest, after=after)
+ if not is_quiet():
+ self.outf.write("%s => %s\n" % (src, dest))
+
+
+class cmd_pull(Command):
+ __doc__ = """Turn this branch into a mirror of another branch.
+
+ By default, this command only works on branches that have not diverged.
+ Branches are considered diverged if the destination branch's most recent
+ commit is one that has not been merged (directly or indirectly) into the
+ parent.
+
+ If branches have diverged, you can use 'bzr merge' to integrate the changes
+ from one into the other. Once one branch has merged, the other should
+ be able to pull it again.
+
+ If you want to replace your local changes and just want your branch to
+ match the remote one, use pull --overwrite. This will work even if the two
+ branches have diverged.
+
+ If there is no default location set, the first pull will set it (use
+ --no-remember to avoid setting it). After that, you can omit the
+ location to use the default. To change the default, use --remember. The
+ value will only be saved if the remote location can be accessed.
+
+ The --verbose option will display the revisions pulled using the log_format
+ configuration option. You can use a different format by overriding it with
+ -Olog_format=<other_format>.
+
+ Note: The location can be specified either in the form of a branch,
+ or in the form of a path to a file containing a merge directive generated
+ with bzr send.
+ """
+
+ _see_also = ['push', 'update', 'status-flags', 'send']
+ takes_options = ['remember', 'overwrite', 'revision',
+ custom_help('verbose',
+ help='Show logs of pulled revisions.'),
+ custom_help('directory',
+ help='Branch to pull into, '
+ 'rather than the one containing the working directory.'),
+ Option('local',
+ help="Perform a local pull in a bound "
+ "branch. Local pulls are not applied to "
+ "the master branch."
+ ),
+ Option('show-base',
+ help="Show base revision text in conflicts."),
+ Option('overwrite-tags',
+ help="Overwrite tags only."),
+ ]
+ takes_args = ['location?']
+ encoding_type = 'replace'
+
+ def run(self, location=None, remember=None, overwrite=False,
+ revision=None, verbose=False,
+ directory=None, local=False,
+ show_base=False, overwrite_tags=False):
+
+ if overwrite:
+ overwrite = ["history", "tags"]
+ elif overwrite_tags:
+ overwrite = ["tags"]
+ else:
+ overwrite = []
+ # FIXME: too much stuff is in the command class
+ revision_id = None
+ mergeable = None
+ if directory is None:
+ directory = u'.'
+ try:
+ tree_to = WorkingTree.open_containing(directory)[0]
+ branch_to = tree_to.branch
+ self.add_cleanup(tree_to.lock_write().unlock)
+ except errors.NoWorkingTree:
+ tree_to = None
+ branch_to = Branch.open_containing(directory)[0]
+ self.add_cleanup(branch_to.lock_write().unlock)
+
+ if tree_to is None and show_base:
+ raise errors.BzrCommandError(gettext("Need working tree for --show-base."))
+
+ if local and not branch_to.get_bound_location():
+ raise errors.LocalRequiresBoundBranch()
+
+ possible_transports = []
+ if location is not None:
+ try:
+ mergeable = bundle.read_mergeable_from_url(location,
+ possible_transports=possible_transports)
+ except errors.NotABundle:
+ mergeable = None
+
+ stored_loc = branch_to.get_parent()
+ if location is None:
+ if stored_loc is None:
+ raise errors.BzrCommandError(gettext("No pull location known or"
+ " specified."))
+ else:
+ display_url = urlutils.unescape_for_display(stored_loc,
+ self.outf.encoding)
+ if not is_quiet():
+ self.outf.write(gettext("Using saved parent location: %s\n") % display_url)
+ location = stored_loc
+
+ revision = _get_one_revision('pull', revision)
+ if mergeable is not None:
+ if revision is not None:
+ raise errors.BzrCommandError(gettext(
+ 'Cannot use -r with merge directives or bundles'))
+ mergeable.install_revisions(branch_to.repository)
+ base_revision_id, revision_id, verified = \
+ mergeable.get_merge_request(branch_to.repository)
+ branch_from = branch_to
+ else:
+ branch_from = Branch.open(location,
+ possible_transports=possible_transports)
+ self.add_cleanup(branch_from.lock_read().unlock)
+ # Remembers if asked explicitly or no previous location is set
+ if (remember
+ or (remember is None and branch_to.get_parent() is None)):
+ # FIXME: This shouldn't be done before the pull
+ # succeeds... -- vila 2012-01-02
+ branch_to.set_parent(branch_from.base)
+
+ if revision is not None:
+ revision_id = revision.as_revision_id(branch_from)
+
+ if tree_to is not None:
+ view_info = _get_view_info_for_change_reporter(tree_to)
+ change_reporter = delta._ChangeReporter(
+ unversioned_filter=tree_to.is_ignored,
+ view_info=view_info)
+ result = tree_to.pull(
+ branch_from, overwrite, revision_id, change_reporter,
+ local=local, show_base=show_base)
+ else:
+ result = branch_to.pull(
+ branch_from, overwrite, revision_id, local=local)
+
+ result.report(self.outf)
+ if verbose and result.old_revid != result.new_revid:
+ log.show_branch_change(
+ branch_to, self.outf, result.old_revno,
+ result.old_revid)
+ if getattr(result, 'tag_conflicts', None):
+ return 1
+ else:
+ return 0
+
+
+class cmd_push(Command):
+ __doc__ = """Update a mirror of this branch.
+
+ The target branch will not have its working tree populated because this
+ is both expensive, and is not supported on remote file systems.
+
+ Some smart servers or protocols *may* put the working tree in place in
+ the future.
+
+ This command only works on branches that have not diverged. Branches are
+ considered diverged if the destination branch's most recent commit is one
+ that has not been merged (directly or indirectly) by the source branch.
+
+ If branches have diverged, you can use 'bzr push --overwrite' to replace
+ the other branch completely, discarding its unmerged changes.
+
+ If you want to ensure you have the different changes in the other branch,
+ do a merge (see bzr help merge) from the other branch, and commit that.
+ After that you will be able to do a push without '--overwrite'.
+
+ If there is no default push location set, the first push will set it (use
+ --no-remember to avoid setting it). After that, you can omit the
+ location to use the default. To change the default, use --remember. The
+ value will only be saved if the remote location can be accessed.
+
+ The --verbose option will display the revisions pushed using the log_format
+ configuration option. You can use a different format by overriding it with
+ -Olog_format=<other_format>.
+ """
+
+ _see_also = ['pull', 'update', 'working-trees']
+ takes_options = ['remember', 'overwrite', 'verbose', 'revision',
+ Option('create-prefix',
+ help='Create the path leading up to the branch '
+ 'if it does not already exist.'),
+ custom_help('directory',
+ help='Branch to push from, '
+ 'rather than the one containing the working directory.'),
+ Option('use-existing-dir',
+ help='By default push will fail if the target'
+ ' directory exists, but does not already'
+ ' have a control directory. This flag will'
+ ' allow push to proceed.'),
+ Option('stacked',
+ help='Create a stacked branch that references the public location '
+ 'of the parent branch.'),
+ Option('stacked-on',
+ help='Create a stacked branch that refers to another branch '
+ 'for the commit history. Only the work not present in the '
+ 'referenced branch is included in the branch created.',
+ type=unicode),
+ Option('strict',
+ help='Refuse to push if there are uncommitted changes in'
+ ' the working tree, --no-strict disables the check.'),
+ Option('no-tree',
+ help="Don't populate the working tree, even for protocols"
+ " that support it."),
+ Option('overwrite-tags',
+ help="Overwrite tags only."),
+ ]
+ takes_args = ['location?']
+ encoding_type = 'replace'
+
+ def run(self, location=None, remember=None, overwrite=False,
+ create_prefix=False, verbose=False, revision=None,
+ use_existing_dir=False, directory=None, stacked_on=None,
+ stacked=False, strict=None, no_tree=False,
+ overwrite_tags=False):
+ from bzrlib.push import _show_push_branch
+
+ if overwrite:
+ overwrite = ["history", "tags"]
+ elif overwrite_tags:
+ overwrite = ["tags"]
+ else:
+ overwrite = []
+
+ if directory is None:
+ directory = '.'
+ # Get the source branch
+ (tree, br_from,
+ _unused) = controldir.ControlDir.open_containing_tree_or_branch(directory)
+ # Get the tip's revision_id
+ revision = _get_one_revision('push', revision)
+ if revision is not None:
+ revision_id = revision.in_history(br_from).rev_id
+ else:
+ revision_id = None
+ if tree is not None and revision_id is None:
+ tree.check_changed_or_out_of_date(
+ strict, 'push_strict',
+ more_error='Use --no-strict to force the push.',
+ more_warning='Uncommitted changes will not be pushed.')
+ # Get the stacked_on branch, if any
+ if stacked_on is not None:
+ stacked_on = urlutils.normalize_url(stacked_on)
+ elif stacked:
+ parent_url = br_from.get_parent()
+ if parent_url:
+ parent = Branch.open(parent_url)
+ stacked_on = parent.get_public_branch()
+ if not stacked_on:
+ # I considered excluding non-http url's here, thus forcing
+ # 'public' branches only, but that only works for some
+ # users, so it's best to just depend on the user spotting an
+ # error by the feedback given to them. RBC 20080227.
+ stacked_on = parent_url
+ if not stacked_on:
+ raise errors.BzrCommandError(gettext(
+ "Could not determine branch to refer to."))
+
+ # Get the destination location
+ if location is None:
+ stored_loc = br_from.get_push_location()
+ if stored_loc is None:
+ parent_loc = br_from.get_parent()
+ if parent_loc:
+ raise errors.BzrCommandError(gettext(
+ "No push location known or specified. To push to the "
+ "parent branch (at %s), use 'bzr push :parent'." %
+ urlutils.unescape_for_display(parent_loc,
+ self.outf.encoding)))
+ else:
+ raise errors.BzrCommandError(gettext(
+ "No push location known or specified."))
+ else:
+ display_url = urlutils.unescape_for_display(stored_loc,
+ self.outf.encoding)
+ note(gettext("Using saved push location: %s") % display_url)
+ location = stored_loc
+
+ _show_push_branch(br_from, revision_id, location, self.outf,
+ verbose=verbose, overwrite=overwrite, remember=remember,
+ stacked_on=stacked_on, create_prefix=create_prefix,
+ use_existing_dir=use_existing_dir, no_tree=no_tree)
+
+
+class cmd_branch(Command):
+ __doc__ = """Create a new branch that is a copy of an existing branch.
+
+ If the TO_LOCATION is omitted, the last component of the FROM_LOCATION will
+ be used. In other words, "branch ../foo/bar" will attempt to create ./bar.
+ If the FROM_LOCATION has no / or path separator embedded, the TO_LOCATION
+ is derived from the FROM_LOCATION by stripping a leading scheme or drive
+ identifier, if any. For example, "branch lp:foo-bar" will attempt to
+ create ./foo-bar.
+
+ To retrieve the branch as of a particular revision, supply the --revision
+ parameter, as in "branch foo/bar -r 5".
+
+ The synonyms 'clone' and 'get' for this command are deprecated.
+ """
+
+ _see_also = ['checkout']
+ takes_args = ['from_location', 'to_location?']
+ takes_options = ['revision',
+ Option('hardlink', help='Hard-link working tree files where possible.'),
+ Option('files-from', type=str,
+ help="Get file contents from this tree."),
+ Option('no-tree',
+ help="Create a branch without a working-tree."),
+ Option('switch',
+ help="Switch the checkout in the current directory "
+ "to the new branch."),
+ Option('stacked',
+ help='Create a stacked branch referring to the source branch. '
+ 'The new branch will depend on the availability of the source '
+ 'branch for all operations.'),
+ Option('standalone',
+ help='Do not use a shared repository, even if available.'),
+ Option('use-existing-dir',
+ help='By default branch will fail if the target'
+ ' directory exists, but does not already'
+ ' have a control directory. This flag will'
+ ' allow branch to proceed.'),
+ Option('bind',
+ help="Bind new branch to from location."),
+ ]
+ aliases = ['get', 'clone']
+
+ def run(self, from_location, to_location=None, revision=None,
+ hardlink=False, stacked=False, standalone=False, no_tree=False,
+ use_existing_dir=False, switch=False, bind=False,
+ files_from=None):
+ from bzrlib import switch as _mod_switch
+ from bzrlib.tag import _merge_tags_if_possible
+ if self.invoked_as in ['get', 'clone']:
+ ui.ui_factory.show_user_warning(
+ 'deprecated_command',
+ deprecated_name=self.invoked_as,
+ recommended_name='branch',
+ deprecated_in_version='2.4')
+ accelerator_tree, br_from = controldir.ControlDir.open_tree_or_branch(
+ from_location)
+ if not (hardlink or files_from):
+ # accelerator_tree is usually slower because you have to read N
+ # files (no readahead, lots of seeks, etc), but allow the user to
+ # explicitly request it
+ accelerator_tree = None
+ if files_from is not None and files_from != from_location:
+ accelerator_tree = WorkingTree.open(files_from)
+ revision = _get_one_revision('branch', revision)
+ self.add_cleanup(br_from.lock_read().unlock)
+ if revision is not None:
+ revision_id = revision.as_revision_id(br_from)
+ else:
+ # FIXME - wt.last_revision, fallback to branch, fall back to
+ # None or perhaps NULL_REVISION to mean copy nothing
+ # RBC 20060209
+ revision_id = br_from.last_revision()
+ if to_location is None:
+ to_location = getattr(br_from, "name", None)
+ if not to_location:
+ to_location = urlutils.derive_to_location(from_location)
+ to_transport = transport.get_transport(to_location)
+ try:
+ to_transport.mkdir('.')
+ except errors.FileExists:
+ try:
+ to_dir = controldir.ControlDir.open_from_transport(
+ to_transport)
+ except errors.NotBranchError:
+ if not use_existing_dir:
+ raise errors.BzrCommandError(gettext('Target directory "%s" '
+ 'already exists.') % to_location)
+ else:
+ to_dir = None
+ else:
+ try:
+ to_dir.open_branch()
+ except errors.NotBranchError:
+ pass
+ else:
+ raise errors.AlreadyBranchError(to_location)
+ except errors.NoSuchFile:
+ raise errors.BzrCommandError(gettext('Parent of "%s" does not exist.')
+ % to_location)
+ else:
+ to_dir = None
+ if to_dir is None:
+ try:
+ # preserve whatever source format we have.
+ to_dir = br_from.bzrdir.sprout(to_transport.base, revision_id,
+ possible_transports=[to_transport],
+ accelerator_tree=accelerator_tree,
+ hardlink=hardlink, stacked=stacked,
+ force_new_repo=standalone,
+ create_tree_if_local=not no_tree,
+ source_branch=br_from)
+ branch = to_dir.open_branch(
+ possible_transports=[
+ br_from.bzrdir.root_transport, to_transport])
+ except errors.NoSuchRevision:
+ to_transport.delete_tree('.')
+ msg = gettext("The branch {0} has no revision {1}.").format(
+ from_location, revision)
+ raise errors.BzrCommandError(msg)
+ else:
+ try:
+ to_repo = to_dir.open_repository()
+ except errors.NoRepositoryPresent:
+ to_repo = to_dir.create_repository()
+ to_repo.fetch(br_from.repository, revision_id=revision_id)
+ branch = br_from.sprout(to_dir, revision_id=revision_id)
+ _merge_tags_if_possible(br_from, branch)
+ # If the source branch is stacked, the new branch may
+ # be stacked whether we asked for that explicitly or not.
+ # We therefore need a try/except here and not just 'if stacked:'
+ try:
+ note(gettext('Created new stacked branch referring to %s.') %
+ branch.get_stacked_on_url())
+ except (errors.NotStacked, errors.UnstackableBranchFormat,
+ errors.UnstackableRepositoryFormat), e:
+ note(ngettext('Branched %d revision.', 'Branched %d revisions.', branch.revno()) % branch.revno())
+ if bind:
+ # Bind to the parent
+ parent_branch = Branch.open(from_location)
+ branch.bind(parent_branch)
+ note(gettext('New branch bound to %s') % from_location)
+ if switch:
+ # Switch to the new branch
+ wt, _ = WorkingTree.open_containing('.')
+ _mod_switch.switch(wt.bzrdir, branch)
+ note(gettext('Switched to branch: %s'),
+ urlutils.unescape_for_display(branch.base, 'utf-8'))
+
+
+class cmd_branches(Command):
+ __doc__ = """List the branches available at the current location.
+
+ This command will print the names of all the branches at the current
+ location.
+ """
+
+ takes_args = ['location?']
+ takes_options = [
+ Option('recursive', short_name='R',
+ help='Recursively scan for branches rather than '
+ 'just looking in the specified location.')]
+
+ def run(self, location=".", recursive=False):
+ if recursive:
+ t = transport.get_transport(location)
+ if not t.listable():
+ raise errors.BzrCommandError(
+ "Can't scan this type of location.")
+ for b in controldir.ControlDir.find_branches(t):
+ self.outf.write("%s\n" % urlutils.unescape_for_display(
+ urlutils.relative_url(t.base, b.base),
+ self.outf.encoding).rstrip("/"))
+ else:
+ dir = controldir.ControlDir.open_containing(location)[0]
+ try:
+ active_branch = dir.open_branch(name="")
+ except errors.NotBranchError:
+ active_branch = None
+ names = {}
+ for name, branch in iter_sibling_branches(dir):
+ if name == "":
+ continue
+ active = (active_branch is not None and
+ active_branch.base == branch.base)
+ names[name] = active
+ # Only mention the current branch explicitly if it's not
+ # one of the colocated branches
+ if not any(names.values()) and active_branch is not None:
+ self.outf.write("* %s\n" % gettext("(default)"))
+ for name in sorted(names.keys()):
+ active = names[name]
+ if active:
+ prefix = "*"
+ else:
+ prefix = " "
+ self.outf.write("%s %s\n" % (
+ prefix, name.encode(self.outf.encoding)))
+
+
+class cmd_checkout(Command):
+ __doc__ = """Create a new checkout of an existing branch.
+
+ If BRANCH_LOCATION is omitted, checkout will reconstitute a working tree for
+ the branch found in '.'. This is useful if you have removed the working tree
+ or if it was never created - i.e. if you pushed the branch to its current
+ location using SFTP.
+
+ If the TO_LOCATION is omitted, the last component of the BRANCH_LOCATION will
+ be used. In other words, "checkout ../foo/bar" will attempt to create ./bar.
+ If the BRANCH_LOCATION has no / or path separator embedded, the TO_LOCATION
+ is derived from the BRANCH_LOCATION by stripping a leading scheme or drive
+ identifier, if any. For example, "checkout lp:foo-bar" will attempt to
+ create ./foo-bar.
+
+ To retrieve the branch as of a particular revision, supply the --revision
+ parameter, as in "checkout foo/bar -r 5". Note that this will be immediately
+ out of date [so you cannot commit] but it may be useful (i.e. to examine old
+ code.)
+ """
+
+ _see_also = ['checkouts', 'branch']
+ takes_args = ['branch_location?', 'to_location?']
+ takes_options = ['revision',
+ Option('lightweight',
+ help="Perform a lightweight checkout. Lightweight "
+ "checkouts depend on access to the branch for "
+ "every operation. Normal checkouts can perform "
+ "common operations like diff and status without "
+ "such access, and also support local commits."
+ ),
+ Option('files-from', type=str,
+ help="Get file contents from this tree."),
+ Option('hardlink',
+ help='Hard-link working tree files where possible.'
+ ),
+ ]
+ aliases = ['co']
+
+ def run(self, branch_location=None, to_location=None, revision=None,
+ lightweight=False, files_from=None, hardlink=False):
+ if branch_location is None:
+ branch_location = osutils.getcwd()
+ to_location = branch_location
+ accelerator_tree, source = controldir.ControlDir.open_tree_or_branch(
+ branch_location)
+ if not (hardlink or files_from):
+ # accelerator_tree is usually slower because you have to read N
+ # files (no readahead, lots of seeks, etc), but allow the user to
+ # explicitly request it
+ accelerator_tree = None
+ revision = _get_one_revision('checkout', revision)
+ if files_from is not None and files_from != branch_location:
+ accelerator_tree = WorkingTree.open(files_from)
+ if revision is not None:
+ revision_id = revision.as_revision_id(source)
+ else:
+ revision_id = None
+ if to_location is None:
+ to_location = urlutils.derive_to_location(branch_location)
+ # if the source and to_location are the same,
+ # and there is no working tree,
+ # then reconstitute a branch
+ if (osutils.abspath(to_location) ==
+ osutils.abspath(branch_location)):
+ try:
+ source.bzrdir.open_workingtree()
+ except errors.NoWorkingTree:
+ source.bzrdir.create_workingtree(revision_id)
+ return
+ source.create_checkout(to_location, revision_id, lightweight,
+ accelerator_tree, hardlink)
+
+
+class cmd_renames(Command):
+ __doc__ = """Show list of renamed files.
+ """
+ # TODO: Option to show renames between two historical versions.
+
+ # TODO: Only show renames under dir, rather than in the whole branch.
+ _see_also = ['status']
+ takes_args = ['dir?']
+
+ @display_command
+ def run(self, dir=u'.'):
+ tree = WorkingTree.open_containing(dir)[0]
+ self.add_cleanup(tree.lock_read().unlock)
+ old_tree = tree.basis_tree()
+ self.add_cleanup(old_tree.lock_read().unlock)
+ renames = []
+ iterator = tree.iter_changes(old_tree, include_unchanged=True)
+ for f, paths, c, v, p, n, k, e in iterator:
+ if paths[0] == paths[1]:
+ continue
+ if None in (paths):
+ continue
+ renames.append(paths)
+ renames.sort()
+ for old_name, new_name in renames:
+ self.outf.write("%s => %s\n" % (old_name, new_name))
+
+
+class cmd_update(Command):
+ __doc__ = """Update a working tree to a new revision.
+
+ This will perform a merge of the destination revision (the tip of the
+ branch, or the specified revision) into the working tree, and then make
+ that revision the basis revision for the working tree.
+
+ You can use this to visit an older revision, or to update a working tree
+ that is out of date from its branch.
+
+ If there are any uncommitted changes in the tree, they will be carried
+ across and remain as uncommitted changes after the update. To discard
+ these changes, use 'bzr revert'. The uncommitted changes may conflict
+ with the changes brought in by the change in basis revision.
+
+ If the tree's branch is bound to a master branch, bzr will also update
+ the branch from the master.
+
+ You cannot update just a single file or directory, because each Bazaar
+ working tree has just a single basis revision. If you want to restore a
+ file that has been removed locally, use 'bzr revert' instead of 'bzr
+ update'. If you want to restore a file to its state in a previous
+ revision, use 'bzr revert' with a '-r' option, or use 'bzr cat' to write
+ out the old content of that file to a new location.
+
+ The 'dir' argument, if given, must be the location of the root of a
+ working tree to update. By default, the working tree that contains the
+ current working directory is used.
+ """
+
+ _see_also = ['pull', 'working-trees', 'status-flags']
+ takes_args = ['dir?']
+ takes_options = ['revision',
+ Option('show-base',
+ help="Show base revision text in conflicts."),
+ ]
+ aliases = ['up']
+
+ def run(self, dir=None, revision=None, show_base=None):
+ if revision is not None and len(revision) != 1:
+ raise errors.BzrCommandError(gettext(
+ "bzr update --revision takes exactly one revision"))
+ if dir is None:
+ tree = WorkingTree.open_containing('.')[0]
+ else:
+ tree, relpath = WorkingTree.open_containing(dir)
+ if relpath:
+ # See bug 557886.
+ raise errors.BzrCommandError(gettext(
+ "bzr update can only update a whole tree, "
+ "not a file or subdirectory"))
+ branch = tree.branch
+ possible_transports = []
+ master = branch.get_master_branch(
+ possible_transports=possible_transports)
+ if master is not None:
+ branch_location = master.base
+ tree.lock_write()
+ else:
+ branch_location = tree.branch.base
+ tree.lock_tree_write()
+ self.add_cleanup(tree.unlock)
+ # get rid of the final '/' and be ready for display
+ branch_location = urlutils.unescape_for_display(
+ branch_location.rstrip('/'),
+ self.outf.encoding)
+ existing_pending_merges = tree.get_parent_ids()[1:]
+ if master is None:
+ old_tip = None
+ else:
+ # may need to fetch data into a heavyweight checkout
+ # XXX: this may take some time, maybe we should display a
+ # message
+ old_tip = branch.update(possible_transports)
+ if revision is not None:
+ revision_id = revision[0].as_revision_id(branch)
+ else:
+ revision_id = branch.last_revision()
+ if revision_id == _mod_revision.ensure_null(tree.last_revision()):
+ revno = branch.revision_id_to_dotted_revno(revision_id)
+ note(gettext("Tree is up to date at revision {0} of branch {1}"
+ ).format('.'.join(map(str, revno)), branch_location))
+ return 0
+ view_info = _get_view_info_for_change_reporter(tree)
+ change_reporter = delta._ChangeReporter(
+ unversioned_filter=tree.is_ignored,
+ view_info=view_info)
+ try:
+ conflicts = tree.update(
+ change_reporter,
+ possible_transports=possible_transports,
+ revision=revision_id,
+ old_tip=old_tip,
+ show_base=show_base)
+ except errors.NoSuchRevision, e:
+ raise errors.BzrCommandError(gettext(
+ "branch has no revision %s\n"
+ "bzr update --revision only works"
+ " for a revision in the branch history")
+ % (e.revision))
+ revno = tree.branch.revision_id_to_dotted_revno(
+ _mod_revision.ensure_null(tree.last_revision()))
+ note(gettext('Updated to revision {0} of branch {1}').format(
+ '.'.join(map(str, revno)), branch_location))
+ parent_ids = tree.get_parent_ids()
+ if parent_ids[1:] and parent_ids[1:] != existing_pending_merges:
+ note(gettext('Your local commits will now show as pending merges with '
+ "'bzr status', and can be committed with 'bzr commit'."))
+ if conflicts != 0:
+ return 1
+ else:
+ return 0
+
+
+class cmd_info(Command):
+ __doc__ = """Show information about a working tree, branch or repository.
+
+ This command will show all known locations and formats associated to the
+ tree, branch or repository.
+
+ In verbose mode, statistical information is included with each report.
+ To see extended statistic information, use a verbosity level of 2 or
+ higher by specifying the verbose option multiple times, e.g. -vv.
+
+ Branches and working trees will also report any missing revisions.
+
+ :Examples:
+
+ Display information on the format and related locations:
+
+ bzr info
+
+ Display the above together with extended format information and
+ basic statistics (like the number of files in the working tree and
+ number of revisions in the branch and repository):
+
+ bzr info -v
+
+ Display the above together with number of committers to the branch:
+
+ bzr info -vv
+ """
+ _see_also = ['revno', 'working-trees', 'repositories']
+ takes_args = ['location?']
+ takes_options = ['verbose']
+ encoding_type = 'replace'
+
+ @display_command
+ def run(self, location=None, verbose=False):
+ if verbose:
+ noise_level = get_verbosity_level()
+ else:
+ noise_level = 0
+ from bzrlib.info import show_bzrdir_info
+ show_bzrdir_info(controldir.ControlDir.open_containing(location)[0],
+ verbose=noise_level, outfile=self.outf)
+
+
+class cmd_remove(Command):
+ __doc__ = """Remove files or directories.
+
+ This makes Bazaar stop tracking changes to the specified files. Bazaar will
+ delete them if they can easily be recovered using revert otherwise they
+ will be backed up (adding an extension of the form .~#~). If no options or
+ parameters are given Bazaar will scan for files that are being tracked by
+ Bazaar but missing in your tree and stop tracking them for you.
+ """
+ takes_args = ['file*']
+ takes_options = ['verbose',
+ Option('new', help='Only remove files that have never been committed.'),
+ RegistryOption.from_kwargs('file-deletion-strategy',
+ 'The file deletion mode to be used.',
+ title='Deletion Strategy', value_switches=True, enum_switch=False,
+ safe='Backup changed files (default).',
+ keep='Delete from bzr but leave the working copy.',
+ no_backup='Don\'t backup changed files.',
+ force='Delete all the specified files, even if they can not be '
+ 'recovered and even if they are non-empty directories. '
+ '(deprecated, use no-backup)')]
+ aliases = ['rm', 'del']
+ encoding_type = 'replace'
+
+ def run(self, file_list, verbose=False, new=False,
+ file_deletion_strategy='safe'):
+ if file_deletion_strategy == 'force':
+ note(gettext("(The --force option is deprecated, rather use --no-backup "
+ "in future.)"))
+ file_deletion_strategy = 'no-backup'
+
+ tree, file_list = WorkingTree.open_containing_paths(file_list)
+
+ if file_list is not None:
+ file_list = [f for f in file_list]
+
+ self.add_cleanup(tree.lock_write().unlock)
+ # Heuristics should probably all move into tree.remove_smart or
+ # some such?
+ if new:
+ added = tree.changes_from(tree.basis_tree(),
+ specific_files=file_list).added
+ file_list = sorted([f[0] for f in added], reverse=True)
+ if len(file_list) == 0:
+ raise errors.BzrCommandError(gettext('No matching files.'))
+ elif file_list is None:
+ # missing files show up in iter_changes(basis) as
+ # versioned-with-no-kind.
+ missing = []
+ for change in tree.iter_changes(tree.basis_tree()):
+ # Find paths in the working tree that have no kind:
+ if change[1][1] is not None and change[6][1] is None:
+ missing.append(change[1][1])
+ file_list = sorted(missing, reverse=True)
+ file_deletion_strategy = 'keep'
+ tree.remove(file_list, verbose=verbose, to_file=self.outf,
+ keep_files=file_deletion_strategy=='keep',
+ force=(file_deletion_strategy=='no-backup'))
+
+
+class cmd_file_id(Command):
+ __doc__ = """Print file_id of a particular file or directory.
+
+ The file_id is assigned when the file is first added and remains the
+ same through all revisions where the file exists, even when it is
+ moved or renamed.
+ """
+
+ hidden = True
+ _see_also = ['inventory', 'ls']
+ takes_args = ['filename']
+
+ @display_command
+ def run(self, filename):
+ tree, relpath = WorkingTree.open_containing(filename)
+ i = tree.path2id(relpath)
+ if i is None:
+ raise errors.NotVersionedError(filename)
+ else:
+ self.outf.write(i + '\n')
+
+
+class cmd_file_path(Command):
+ __doc__ = """Print path of file_ids to a file or directory.
+
+ This prints one line for each directory down to the target,
+ starting at the branch root.
+ """
+
+ hidden = True
+ takes_args = ['filename']
+
+ @display_command
+ def run(self, filename):
+ tree, relpath = WorkingTree.open_containing(filename)
+ fid = tree.path2id(relpath)
+ if fid is None:
+ raise errors.NotVersionedError(filename)
+ segments = osutils.splitpath(relpath)
+ for pos in range(1, len(segments) + 1):
+ path = osutils.joinpath(segments[:pos])
+ self.outf.write("%s\n" % tree.path2id(path))
+
+
+class cmd_reconcile(Command):
+ __doc__ = """Reconcile bzr metadata in a branch.
+
+ This can correct data mismatches that may have been caused by
+ previous ghost operations or bzr upgrades. You should only
+ need to run this command if 'bzr check' or a bzr developer
+ advises you to run it.
+
+ If a second branch is provided, cross-branch reconciliation is
+ also attempted, which will check that data like the tree root
+ id which was not present in very early bzr versions is represented
+ correctly in both branches.
+
+ At the same time it is run it may recompress data resulting in
+ a potential saving in disk space or performance gain.
+
+ The branch *MUST* be on a listable system such as local disk or sftp.
+ """
+
+ _see_also = ['check']
+ takes_args = ['branch?']
+ takes_options = [
+ Option('canonicalize-chks',
+ help='Make sure CHKs are in canonical form (repairs '
+ 'bug 522637).',
+ hidden=True),
+ ]
+
+ def run(self, branch=".", canonicalize_chks=False):
+ from bzrlib.reconcile import reconcile
+ dir = controldir.ControlDir.open(branch)
+ reconcile(dir, canonicalize_chks=canonicalize_chks)
+
+
+class cmd_revision_history(Command):
+ __doc__ = """Display the list of revision ids on a branch."""
+
+ _see_also = ['log']
+ takes_args = ['location?']
+
+ hidden = True
+
+ @display_command
+ def run(self, location="."):
+ branch = Branch.open_containing(location)[0]
+ self.add_cleanup(branch.lock_read().unlock)
+ graph = branch.repository.get_graph()
+ history = list(graph.iter_lefthand_ancestry(branch.last_revision(),
+ [_mod_revision.NULL_REVISION]))
+ for revid in reversed(history):
+ self.outf.write(revid)
+ self.outf.write('\n')
+
+
+class cmd_ancestry(Command):
+ __doc__ = """List all revisions merged into this branch."""
+
+ _see_also = ['log', 'revision-history']
+ takes_args = ['location?']
+
+ hidden = True
+
+ @display_command
+ def run(self, location="."):
+ try:
+ wt = WorkingTree.open_containing(location)[0]
+ except errors.NoWorkingTree:
+ b = Branch.open(location)
+ last_revision = b.last_revision()
+ else:
+ b = wt.branch
+ last_revision = wt.last_revision()
+
+ self.add_cleanup(b.repository.lock_read().unlock)
+ graph = b.repository.get_graph()
+ revisions = [revid for revid, parents in
+ graph.iter_ancestry([last_revision])]
+ for revision_id in reversed(revisions):
+ if _mod_revision.is_null(revision_id):
+ continue
+ self.outf.write(revision_id + '\n')
+
+
+class cmd_init(Command):
+ __doc__ = """Make a directory into a versioned branch.
+
+ Use this to create an empty branch, or before importing an
+ existing project.
+
+ If there is a repository in a parent directory of the location, then
+ the history of the branch will be stored in the repository. Otherwise
+ init creates a standalone branch which carries its own history
+ in the .bzr directory.
+
+ If there is already a branch at the location but it has no working tree,
+ the tree can be populated with 'bzr checkout'.
+
+ Recipe for importing a tree of files::
+
+ cd ~/project
+ bzr init
+ bzr add .
+ bzr status
+ bzr commit -m "imported project"
+ """
+
+ _see_also = ['init-repository', 'branch', 'checkout']
+ takes_args = ['location?']
+ takes_options = [
+ Option('create-prefix',
+ help='Create the path leading up to the branch '
+ 'if it does not already exist.'),
+ RegistryOption('format',
+ help='Specify a format for this branch. '
+ 'See "help formats".',
+ lazy_registry=('bzrlib.controldir', 'format_registry'),
+ converter=lambda name: controldir.format_registry.make_bzrdir(name),
+ value_switches=True,
+ title="Branch format",
+ ),
+ Option('append-revisions-only',
+ help='Never change revnos or the existing log.'
+ ' Append revisions to it only.'),
+ Option('no-tree',
+ 'Create a branch without a working tree.')
+ ]
+ def run(self, location=None, format=None, append_revisions_only=False,
+ create_prefix=False, no_tree=False):
+ if format is None:
+ format = controldir.format_registry.make_bzrdir('default')
+ if location is None:
+ location = u'.'
+
+ to_transport = transport.get_transport(location)
+
+ # The path has to exist to initialize a
+ # branch inside of it.
+ # Just using os.mkdir, since I don't
+ # believe that we want to create a bunch of
+ # locations if the user supplies an extended path
+ try:
+ to_transport.ensure_base()
+ except errors.NoSuchFile:
+ if not create_prefix:
+ raise errors.BzrCommandError(gettext("Parent directory of %s"
+ " does not exist."
+ "\nYou may supply --create-prefix to create all"
+ " leading parent directories.")
+ % location)
+ to_transport.create_prefix()
+
+ try:
+ a_bzrdir = controldir.ControlDir.open_from_transport(to_transport)
+ except errors.NotBranchError:
+ # really a NotBzrDir error...
+ create_branch = controldir.ControlDir.create_branch_convenience
+ if no_tree:
+ force_new_tree = False
+ else:
+ force_new_tree = None
+ branch = create_branch(to_transport.base, format=format,
+ possible_transports=[to_transport],
+ force_new_tree=force_new_tree)
+ a_bzrdir = branch.bzrdir
+ else:
+ from bzrlib.transport.local import LocalTransport
+ if a_bzrdir.has_branch():
+ if (isinstance(to_transport, LocalTransport)
+ and not a_bzrdir.has_workingtree()):
+ raise errors.BranchExistsWithoutWorkingTree(location)
+ raise errors.AlreadyBranchError(location)
+ branch = a_bzrdir.create_branch()
+ if not no_tree and not a_bzrdir.has_workingtree():
+ a_bzrdir.create_workingtree()
+ if append_revisions_only:
+ try:
+ branch.set_append_revisions_only(True)
+ except errors.UpgradeRequired:
+ raise errors.BzrCommandError(gettext('This branch format cannot be set'
+ ' to append-revisions-only. Try --default.'))
+ if not is_quiet():
+ from bzrlib.info import describe_layout, describe_format
+ try:
+ tree = a_bzrdir.open_workingtree(recommend_upgrade=False)
+ except (errors.NoWorkingTree, errors.NotLocalUrl):
+ tree = None
+ repository = branch.repository
+ layout = describe_layout(repository, branch, tree).lower()
+ format = describe_format(a_bzrdir, repository, branch, tree)
+ self.outf.write(gettext("Created a {0} (format: {1})\n").format(
+ layout, format))
+ if repository.is_shared():
+ #XXX: maybe this can be refactored into transport.path_or_url()
+ url = repository.bzrdir.root_transport.external_url()
+ try:
+ url = urlutils.local_path_from_url(url)
+ except errors.InvalidURL:
+ pass
+ self.outf.write(gettext("Using shared repository: %s\n") % url)
+
+
+class cmd_init_repository(Command):
+ __doc__ = """Create a shared repository for branches to share storage space.
+
+ New branches created under the repository directory will store their
+ revisions in the repository, not in the branch directory. For branches
+ with shared history, this reduces the amount of storage needed and
+ speeds up the creation of new branches.
+
+ If the --no-trees option is given then the branches in the repository
+ will not have working trees by default. They will still exist as
+ directories on disk, but they will not have separate copies of the
+ files at a certain revision. This can be useful for repositories that
+ store branches which are interacted with through checkouts or remote
+ branches, such as on a server.
+
+ :Examples:
+ Create a shared repository holding just branches::
+
+ bzr init-repo --no-trees repo
+ bzr init repo/trunk
+
+ Make a lightweight checkout elsewhere::
+
+ bzr checkout --lightweight repo/trunk trunk-checkout
+ cd trunk-checkout
+ (add files here)
+ """
+
+ _see_also = ['init', 'branch', 'checkout', 'repositories']
+ takes_args = ["location"]
+ takes_options = [RegistryOption('format',
+ help='Specify a format for this repository. See'
+ ' "bzr help formats" for details.',
+ lazy_registry=('bzrlib.controldir', 'format_registry'),
+ converter=lambda name: controldir.format_registry.make_bzrdir(name),
+ value_switches=True, title='Repository format'),
+ Option('no-trees',
+ help='Branches in the repository will default to'
+ ' not having a working tree.'),
+ ]
+ aliases = ["init-repo"]
+
+ def run(self, location, format=None, no_trees=False):
+ if format is None:
+ format = controldir.format_registry.make_bzrdir('default')
+
+ if location is None:
+ location = '.'
+
+ to_transport = transport.get_transport(location)
+
+ (repo, newdir, require_stacking, repository_policy) = (
+ format.initialize_on_transport_ex(to_transport,
+ create_prefix=True, make_working_trees=not no_trees,
+ shared_repo=True, force_new_repo=True,
+ use_existing_dir=True,
+ repo_format_name=format.repository_format.get_format_string()))
+ if not is_quiet():
+ from bzrlib.info import show_bzrdir_info
+ show_bzrdir_info(newdir, verbose=0, outfile=self.outf)
+
+
+class cmd_diff(Command):
+ __doc__ = """Show differences in the working tree, between revisions or branches.
+
+ If no arguments are given, all changes for the current tree are listed.
+ If files are given, only the changes in those files are listed.
+ Remote and multiple branches can be compared by using the --old and
+ --new options. If not provided, the default for both is derived from
+ the first argument, if any, or the current tree if no arguments are
+ given.
+
+ "bzr diff -p1" is equivalent to "bzr diff --prefix old/:new/", and
+ produces patches suitable for "patch -p1".
+
+ Note that when using the -r argument with a range of revisions, the
+ differences are computed between the two specified revisions. That
+ is, the command does not show the changes introduced by the first
+ revision in the range. This differs from the interpretation of
+ revision ranges used by "bzr log" which includes the first revision
+ in the range.
+
+ :Exit values:
+ 1 - changed
+ 2 - unrepresentable changes
+ 3 - error
+ 0 - no change
+
+ :Examples:
+ Shows the difference in the working tree versus the last commit::
+
+ bzr diff
+
+ Difference between the working tree and revision 1::
+
+ bzr diff -r1
+
+ Difference between revision 3 and revision 1::
+
+ bzr diff -r1..3
+
+ Difference between revision 3 and revision 1 for branch xxx::
+
+ bzr diff -r1..3 xxx
+
+ The changes introduced by revision 2 (equivalent to -r1..2)::
+
+ bzr diff -c2
+
+ To see the changes introduced by revision X::
+
+ bzr diff -cX
+
+ Note that in the case of a merge, the -c option shows the changes
+ compared to the left hand parent. To see the changes against
+ another parent, use::
+
+ bzr diff -r<chosen_parent>..X
+
+ The changes between the current revision and the previous revision
+ (equivalent to -c-1 and -r-2..-1)
+
+ bzr diff -r-2..
+
+ Show just the differences for file NEWS::
+
+ bzr diff NEWS
+
+ Show the differences in working tree xxx for file NEWS::
+
+ bzr diff xxx/NEWS
+
+ Show the differences from branch xxx to this working tree:
+
+ bzr diff --old xxx
+
+ Show the differences between two branches for file NEWS::
+
+ bzr diff --old xxx --new yyy NEWS
+
+ Same as 'bzr diff' but prefix paths with old/ and new/::
+
+ bzr diff --prefix old/:new/
+
+ Show the differences using a custom diff program with options::
+
+ bzr diff --using /usr/bin/diff --diff-options -wu
+ """
+ _see_also = ['status']
+ takes_args = ['file*']
+ takes_options = [
+ Option('diff-options', type=str,
+ help='Pass these options to the external diff program.'),
+ Option('prefix', type=str,
+ short_name='p',
+ help='Set prefixes added to old and new filenames, as '
+ 'two values separated by a colon. (eg "old/:new/").'),
+ Option('old',
+ help='Branch/tree to compare from.',
+ type=unicode,
+ ),
+ Option('new',
+ help='Branch/tree to compare to.',
+ type=unicode,
+ ),
+ 'revision',
+ 'change',
+ Option('using',
+ help='Use this command to compare files.',
+ type=unicode,
+ ),
+ RegistryOption('format',
+ short_name='F',
+ help='Diff format to use.',
+ lazy_registry=('bzrlib.diff', 'format_registry'),
+ title='Diff format'),
+ ]
+ aliases = ['di', 'dif']
+ encoding_type = 'exact'
+
+ @display_command
+ def run(self, revision=None, file_list=None, diff_options=None,
+ prefix=None, old=None, new=None, using=None, format=None):
+ from bzrlib.diff import (get_trees_and_branches_to_diff_locked,
+ show_diff_trees)
+
+ if (prefix is None) or (prefix == '0'):
+ # diff -p0 format
+ old_label = ''
+ new_label = ''
+ elif prefix == '1':
+ old_label = 'old/'
+ new_label = 'new/'
+ elif ':' in prefix:
+ old_label, new_label = prefix.split(":")
+ else:
+ raise errors.BzrCommandError(gettext(
+ '--prefix expects two values separated by a colon'
+ ' (eg "old/:new/")'))
+
+ if revision and len(revision) > 2:
+ raise errors.BzrCommandError(gettext('bzr diff --revision takes exactly'
+ ' one or two revision specifiers'))
+
+ if using is not None and format is not None:
+ raise errors.BzrCommandError(gettext(
+ '{0} and {1} are mutually exclusive').format(
+ '--using', '--format'))
+
+ (old_tree, new_tree,
+ old_branch, new_branch,
+ specific_files, extra_trees) = get_trees_and_branches_to_diff_locked(
+ file_list, revision, old, new, self.add_cleanup, apply_view=True)
+ # GNU diff on Windows uses ANSI encoding for filenames
+ path_encoding = osutils.get_diff_header_encoding()
+ return show_diff_trees(old_tree, new_tree, sys.stdout,
+ specific_files=specific_files,
+ external_diff_options=diff_options,
+ old_label=old_label, new_label=new_label,
+ extra_trees=extra_trees,
+ path_encoding=path_encoding,
+ using=using,
+ format_cls=format)
+
+
+class cmd_deleted(Command):
+ __doc__ = """List files deleted in the working tree.
+ """
+ # TODO: Show files deleted since a previous revision, or
+ # between two revisions.
+ # TODO: Much more efficient way to do this: read in new
+ # directories with readdir, rather than stating each one. Same
+ # level of effort but possibly much less IO. (Or possibly not,
+ # if the directories are very large...)
+ _see_also = ['status', 'ls']
+ takes_options = ['directory', 'show-ids']
+
+ @display_command
+ def run(self, show_ids=False, directory=u'.'):
+ tree = WorkingTree.open_containing(directory)[0]
+ self.add_cleanup(tree.lock_read().unlock)
+ old = tree.basis_tree()
+ self.add_cleanup(old.lock_read().unlock)
+ for path, ie in old.iter_entries_by_dir():
+ if not tree.has_id(ie.file_id):
+ self.outf.write(path)
+ if show_ids:
+ self.outf.write(' ')
+ self.outf.write(ie.file_id)
+ self.outf.write('\n')
+
+
+class cmd_modified(Command):
+ __doc__ = """List files modified in working tree.
+ """
+
+ hidden = True
+ _see_also = ['status', 'ls']
+ takes_options = ['directory', 'null']
+
+ @display_command
+ def run(self, null=False, directory=u'.'):
+ tree = WorkingTree.open_containing(directory)[0]
+ self.add_cleanup(tree.lock_read().unlock)
+ td = tree.changes_from(tree.basis_tree())
+ self.cleanup_now()
+ for path, id, kind, text_modified, meta_modified in td.modified:
+ if null:
+ self.outf.write(path + '\0')
+ else:
+ self.outf.write(osutils.quotefn(path) + '\n')
+
+
+class cmd_added(Command):
+ __doc__ = """List files added in working tree.
+ """
+
+ hidden = True
+ _see_also = ['status', 'ls']
+ takes_options = ['directory', 'null']
+
+ @display_command
+ def run(self, null=False, directory=u'.'):
+ wt = WorkingTree.open_containing(directory)[0]
+ self.add_cleanup(wt.lock_read().unlock)
+ basis = wt.basis_tree()
+ self.add_cleanup(basis.lock_read().unlock)
+ root_id = wt.get_root_id()
+ for file_id in wt.all_file_ids():
+ if basis.has_id(file_id):
+ continue
+ if root_id == file_id:
+ continue
+ path = wt.id2path(file_id)
+ if not os.access(osutils.pathjoin(wt.basedir, path), os.F_OK):
+ continue
+ if null:
+ self.outf.write(path + '\0')
+ else:
+ self.outf.write(osutils.quotefn(path) + '\n')
+
+
+class cmd_root(Command):
+ __doc__ = """Show the tree root directory.
+
+ The root is the nearest enclosing directory with a .bzr control
+ directory."""
+
+ takes_args = ['filename?']
+ @display_command
+ def run(self, filename=None):
+ """Print the branch root."""
+ tree = WorkingTree.open_containing(filename)[0]
+ self.outf.write(tree.basedir + '\n')
+
+
+def _parse_limit(limitstring):
+ try:
+ return int(limitstring)
+ except ValueError:
+ msg = gettext("The limit argument must be an integer.")
+ raise errors.BzrCommandError(msg)
+
+
+def _parse_levels(s):
+ try:
+ return int(s)
+ except ValueError:
+ msg = gettext("The levels argument must be an integer.")
+ raise errors.BzrCommandError(msg)
+
+
+class cmd_log(Command):
+ __doc__ = """Show historical log for a branch or subset of a branch.
+
+ log is bzr's default tool for exploring the history of a branch.
+ The branch to use is taken from the first parameter. If no parameters
+ are given, the branch containing the working directory is logged.
+ Here are some simple examples::
+
+ bzr log log the current branch
+ bzr log foo.py log a file in its branch
+ bzr log http://server/branch log a branch on a server
+
+ The filtering, ordering and information shown for each revision can
+ be controlled as explained below. By default, all revisions are
+ shown sorted (topologically) so that newer revisions appear before
+ older ones and descendants always appear before ancestors. If displayed,
+ merged revisions are shown indented under the revision in which they
+ were merged.
+
+ :Output control:
+
+ The log format controls how information about each revision is
+ displayed. The standard log formats are called ``long``, ``short``
+ and ``line``. The default is long. See ``bzr help log-formats``
+ for more details on log formats.
+
+ The following options can be used to control what information is
+ displayed::
+
+ -l N display a maximum of N revisions
+ -n N display N levels of revisions (0 for all, 1 for collapsed)
+ -v display a status summary (delta) for each revision
+ -p display a diff (patch) for each revision
+ --show-ids display revision-ids (and file-ids), not just revnos
+
+ Note that the default number of levels to display is a function of the
+ log format. If the -n option is not used, the standard log formats show
+ just the top level (mainline).
+
+ Status summaries are shown using status flags like A, M, etc. To see
+ the changes explained using words like ``added`` and ``modified``
+ instead, use the -vv option.
+
+ :Ordering control:
+
+ To display revisions from oldest to newest, use the --forward option.
+ In most cases, using this option will have little impact on the total
+ time taken to produce a log, though --forward does not incrementally
+ display revisions like --reverse does when it can.
+
+ :Revision filtering:
+
+ The -r option can be used to specify what revision or range of revisions
+ to filter against. The various forms are shown below::
+
+ -rX display revision X
+ -rX.. display revision X and later
+ -r..Y display up to and including revision Y
+ -rX..Y display from X to Y inclusive
+
+ See ``bzr help revisionspec`` for details on how to specify X and Y.
+ Some common examples are given below::
+
+ -r-1 show just the tip
+ -r-10.. show the last 10 mainline revisions
+ -rsubmit:.. show what's new on this branch
+ -rancestor:path.. show changes since the common ancestor of this
+ branch and the one at location path
+ -rdate:yesterday.. show changes since yesterday
+
+ When logging a range of revisions using -rX..Y, log starts at
+ revision Y and searches back in history through the primary
+ ("left-hand") parents until it finds X. When logging just the
+ top level (using -n1), an error is reported if X is not found
+ along the way. If multi-level logging is used (-n0), X may be
+ a nested merge revision and the log will be truncated accordingly.
+
+ :Path filtering:
+
+ If parameters are given and the first one is not a branch, the log
+ will be filtered to show only those revisions that changed the
+ nominated files or directories.
+
+ Filenames are interpreted within their historical context. To log a
+ deleted file, specify a revision range so that the file existed at
+ the end or start of the range.
+
+ Historical context is also important when interpreting pathnames of
+ renamed files/directories. Consider the following example:
+
+ * revision 1: add tutorial.txt
+ * revision 2: modify tutorial.txt
+ * revision 3: rename tutorial.txt to guide.txt; add tutorial.txt
+
+ In this case:
+
+ * ``bzr log guide.txt`` will log the file added in revision 1
+
+ * ``bzr log tutorial.txt`` will log the new file added in revision 3
+
+ * ``bzr log -r2 -p tutorial.txt`` will show the changes made to
+ the original file in revision 2.
+
+ * ``bzr log -r2 -p guide.txt`` will display an error message as there
+ was no file called guide.txt in revision 2.
+
+ Renames are always followed by log. By design, there is no need to
+ explicitly ask for this (and no way to stop logging a file back
+ until it was last renamed).
+
+ :Other filtering:
+
+ The --match option can be used for finding revisions that match a
+ regular expression in a commit message, committer, author or bug.
+ Specifying the option several times will match any of the supplied
+ expressions. --match-author, --match-bugs, --match-committer and
+ --match-message can be used to only match a specific field.
+
+ :Tips & tricks:
+
+ GUI tools and IDEs are often better at exploring history than command
+ line tools: you may prefer qlog or viz from qbzr or bzr-gtk, the
+ bzr-explorer shell, or the Loggerhead web interface. See the Plugin
+ Guide <http://doc.bazaar.canonical.com/plugins/en/> and
+ <http://wiki.bazaar.canonical.com/IDEIntegration>.
+
+ You may find it useful to add the aliases below to ``bazaar.conf``::
+
+ [ALIASES]
+ tip = log -r-1
+ top = log -l10 --line
+ show = log -v -p
+
+ ``bzr tip`` will then show the latest revision while ``bzr top``
+ will show the last 10 mainline revisions. To see the details of a
+ particular revision X, ``bzr show -rX``.
+
+ If you are interested in looking deeper into a particular merge X,
+ use ``bzr log -n0 -rX``.
+
+ ``bzr log -v`` on a branch with lots of history is currently
+ very slow. A fix for this issue is currently under development.
+ With or without that fix, it is recommended that a revision range
+ be given when using the -v option.
+
+ bzr has a generic full-text matching plugin, bzr-search, that can be
+ used to find revisions matching user names, commit messages, etc.
+ Among other features, this plugin can find all revisions containing
+ a list of words but not others.
+
+ When exploring non-mainline history on large projects with deep
+ history, the performance of log can be greatly improved by installing
+ the historycache plugin. This plugin buffers historical information
+ trading disk space for faster speed.
+ """
+ takes_args = ['file*']
+ _see_also = ['log-formats', 'revisionspec']
+ takes_options = [
+ Option('forward',
+ help='Show from oldest to newest.'),
+ 'timezone',
+ custom_help('verbose',
+ help='Show files changed in each revision.'),
+ 'show-ids',
+ 'revision',
+ Option('change',
+ type=bzrlib.option._parse_revision_str,
+ short_name='c',
+ help='Show just the specified revision.'
+ ' See also "help revisionspec".'),
+ 'log-format',
+ RegistryOption('authors',
+ 'What names to list as authors - first, all or committer.',
+ title='Authors',
+ lazy_registry=('bzrlib.log', 'author_list_registry'),
+ ),
+ Option('levels',
+ short_name='n',
+ help='Number of levels to display - 0 for all, 1 for flat.',
+ argname='N',
+ type=_parse_levels),
+ Option('message',
+ help='Show revisions whose message matches this '
+ 'regular expression.',
+ type=str,
+ hidden=True),
+ Option('limit',
+ short_name='l',
+ help='Limit the output to the first N revisions.',
+ argname='N',
+ type=_parse_limit),
+ Option('show-diff',
+ short_name='p',
+ help='Show changes made in each revision as a patch.'),
+ Option('include-merged',
+ help='Show merged revisions like --levels 0 does.'),
+ Option('include-merges', hidden=True,
+ help='Historical alias for --include-merged.'),
+ Option('omit-merges',
+ help='Do not report commits with more than one parent.'),
+ Option('exclude-common-ancestry',
+ help='Display only the revisions that are not part'
+ ' of both ancestries (require -rX..Y).'
+ ),
+ Option('signatures',
+ help='Show digital signature validity.'),
+ ListOption('match',
+ short_name='m',
+ help='Show revisions whose properties match this '
+ 'expression.',
+ type=str),
+ ListOption('match-message',
+ help='Show revisions whose message matches this '
+ 'expression.',
+ type=str),
+ ListOption('match-committer',
+ help='Show revisions whose committer matches this '
+ 'expression.',
+ type=str),
+ ListOption('match-author',
+ help='Show revisions whose authors match this '
+ 'expression.',
+ type=str),
+ ListOption('match-bugs',
+ help='Show revisions whose bugs match this '
+ 'expression.',
+ type=str)
+ ]
+ encoding_type = 'replace'
+
+ @display_command
+ def run(self, file_list=None, timezone='original',
+ verbose=False,
+ show_ids=False,
+ forward=False,
+ revision=None,
+ change=None,
+ log_format=None,
+ levels=None,
+ message=None,
+ limit=None,
+ show_diff=False,
+ include_merged=None,
+ authors=None,
+ exclude_common_ancestry=False,
+ signatures=False,
+ match=None,
+ match_message=None,
+ match_committer=None,
+ match_author=None,
+ match_bugs=None,
+ omit_merges=False,
+ include_merges=symbol_versioning.DEPRECATED_PARAMETER,
+ ):
+ from bzrlib.log import (
+ Logger,
+ make_log_request_dict,
+ _get_info_for_log_files,
+ )
+ direction = (forward and 'forward') or 'reverse'
+ if symbol_versioning.deprecated_passed(include_merges):
+ ui.ui_factory.show_user_warning(
+ 'deprecated_command_option',
+ deprecated_name='--include-merges',
+ recommended_name='--include-merged',
+ deprecated_in_version='2.5',
+ command=self.invoked_as)
+ if include_merged is None:
+ include_merged = include_merges
+ else:
+ raise errors.BzrCommandError(gettext(
+ '{0} and {1} are mutually exclusive').format(
+ '--include-merges', '--include-merged'))
+ if include_merged is None:
+ include_merged = False
+ if (exclude_common_ancestry
+ and (revision is None or len(revision) != 2)):
+ raise errors.BzrCommandError(gettext(
+ '--exclude-common-ancestry requires -r with two revisions'))
+ if include_merged:
+ if levels is None:
+ levels = 0
+ else:
+ raise errors.BzrCommandError(gettext(
+ '{0} and {1} are mutually exclusive').format(
+ '--levels', '--include-merged'))
+
+ if change is not None:
+ if len(change) > 1:
+ raise errors.RangeInChangeOption()
+ if revision is not None:
+ raise errors.BzrCommandError(gettext(
+ '{0} and {1} are mutually exclusive').format(
+ '--revision', '--change'))
+ else:
+ revision = change
+
+ file_ids = []
+ filter_by_dir = False
+ if file_list:
+ # find the file ids to log and check for directory filtering
+ b, file_info_list, rev1, rev2 = _get_info_for_log_files(
+ revision, file_list, self.add_cleanup)
+ for relpath, file_id, kind in file_info_list:
+ if file_id is None:
+ raise errors.BzrCommandError(gettext(
+ "Path unknown at end or start of revision range: %s") %
+ relpath)
+ # If the relpath is the top of the tree, we log everything
+ if relpath == '':
+ file_ids = []
+ break
+ else:
+ file_ids.append(file_id)
+ filter_by_dir = filter_by_dir or (
+ kind in ['directory', 'tree-reference'])
+ else:
+ # log everything
+ # FIXME ? log the current subdir only RBC 20060203
+ if revision is not None \
+ and len(revision) > 0 and revision[0].get_branch():
+ location = revision[0].get_branch()
+ else:
+ location = '.'
+ dir, relpath = controldir.ControlDir.open_containing(location)
+ b = dir.open_branch()
+ self.add_cleanup(b.lock_read().unlock)
+ rev1, rev2 = _get_revision_range(revision, b, self.name())
+
+ if b.get_config_stack().get('validate_signatures_in_log'):
+ signatures = True
+
+ if signatures:
+ if not gpg.GPGStrategy.verify_signatures_available():
+ raise errors.GpgmeNotInstalled(None)
+
+ # Decide on the type of delta & diff filtering to use
+ # TODO: add an --all-files option to make this configurable & consistent
+ if not verbose:
+ delta_type = None
+ else:
+ delta_type = 'full'
+ if not show_diff:
+ diff_type = None
+ elif file_ids:
+ diff_type = 'partial'
+ else:
+ diff_type = 'full'
+
+ # Build the log formatter
+ if log_format is None:
+ log_format = log.log_formatter_registry.get_default(b)
+ # Make a non-encoding output to include the diffs - bug 328007
+ unencoded_output = ui.ui_factory.make_output_stream(encoding_type='exact')
+ lf = log_format(show_ids=show_ids, to_file=self.outf,
+ to_exact_file=unencoded_output,
+ show_timezone=timezone,
+ delta_format=get_verbosity_level(),
+ levels=levels,
+ show_advice=levels is None,
+ author_list_handler=authors)
+
+ # Choose the algorithm for doing the logging. It's annoying
+ # having multiple code paths like this but necessary until
+ # the underlying repository format is faster at generating
+ # deltas or can provide everything we need from the indices.
+ # The default algorithm - match-using-deltas - works for
+ # multiple files and directories and is faster for small
+ # amounts of history (200 revisions say). However, it's too
+ # slow for logging a single file in a repository with deep
+ # history, i.e. > 10K revisions. In the spirit of "do no
+ # evil when adding features", we continue to use the
+ # original algorithm - per-file-graph - for the "single
+ # file that isn't a directory without showing a delta" case.
+ partial_history = revision and b.repository._format.supports_chks
+ match_using_deltas = (len(file_ids) != 1 or filter_by_dir
+ or delta_type or partial_history)
+
+ match_dict = {}
+ if match:
+ match_dict[''] = match
+ if match_message:
+ match_dict['message'] = match_message
+ if match_committer:
+ match_dict['committer'] = match_committer
+ if match_author:
+ match_dict['author'] = match_author
+ if match_bugs:
+ match_dict['bugs'] = match_bugs
+
+ # Build the LogRequest and execute it
+ if len(file_ids) == 0:
+ file_ids = None
+ rqst = make_log_request_dict(
+ direction=direction, specific_fileids=file_ids,
+ start_revision=rev1, end_revision=rev2, limit=limit,
+ message_search=message, delta_type=delta_type,
+ diff_type=diff_type, _match_using_deltas=match_using_deltas,
+ exclude_common_ancestry=exclude_common_ancestry, match=match_dict,
+ signature=signatures, omit_merges=omit_merges,
+ )
+ Logger(b, rqst).show(lf)
+
+
+def _get_revision_range(revisionspec_list, branch, command_name):
+ """Take the input of a revision option and turn it into a revision range.
+
+ It returns RevisionInfo objects which can be used to obtain the rev_id's
+ of the desired revisions. It does some user input validations.
+ """
+ if revisionspec_list is None:
+ rev1 = None
+ rev2 = None
+ elif len(revisionspec_list) == 1:
+ rev1 = rev2 = revisionspec_list[0].in_history(branch)
+ elif len(revisionspec_list) == 2:
+ start_spec = revisionspec_list[0]
+ end_spec = revisionspec_list[1]
+ if end_spec.get_branch() != start_spec.get_branch():
+ # b is taken from revision[0].get_branch(), and
+ # show_log will use its revision_history. Having
+ # different branches will lead to weird behaviors.
+ raise errors.BzrCommandError(gettext(
+ "bzr %s doesn't accept two revisions in different"
+ " branches.") % command_name)
+ if start_spec.spec is None:
+ # Avoid loading all the history.
+ rev1 = RevisionInfo(branch, None, None)
+ else:
+ rev1 = start_spec.in_history(branch)
+ # Avoid loading all of history when we know a missing
+ # end of range means the last revision ...
+ if end_spec.spec is None:
+ last_revno, last_revision_id = branch.last_revision_info()
+ rev2 = RevisionInfo(branch, last_revno, last_revision_id)
+ else:
+ rev2 = end_spec.in_history(branch)
+ else:
+ raise errors.BzrCommandError(gettext(
+ 'bzr %s --revision takes one or two values.') % command_name)
+ return rev1, rev2
+
+
+def _revision_range_to_revid_range(revision_range):
+ rev_id1 = None
+ rev_id2 = None
+ if revision_range[0] is not None:
+ rev_id1 = revision_range[0].rev_id
+ if revision_range[1] is not None:
+ rev_id2 = revision_range[1].rev_id
+ return rev_id1, rev_id2
+
+def get_log_format(long=False, short=False, line=False, default='long'):
+ log_format = default
+ if long:
+ log_format = 'long'
+ if short:
+ log_format = 'short'
+ if line:
+ log_format = 'line'
+ return log_format
+
+
+class cmd_touching_revisions(Command):
+ __doc__ = """Return revision-ids which affected a particular file.
+
+ A more user-friendly interface is "bzr log FILE".
+ """
+
+ hidden = True
+ takes_args = ["filename"]
+
+ @display_command
+ def run(self, filename):
+ tree, relpath = WorkingTree.open_containing(filename)
+ file_id = tree.path2id(relpath)
+ b = tree.branch
+ self.add_cleanup(b.lock_read().unlock)
+ touching_revs = log.find_touching_revisions(b, file_id)
+ for revno, revision_id, what in touching_revs:
+ self.outf.write("%6d %s\n" % (revno, what))
+
+
+class cmd_ls(Command):
+ __doc__ = """List files in a tree.
+ """
+
+ _see_also = ['status', 'cat']
+ takes_args = ['path?']
+ takes_options = [
+ 'verbose',
+ 'revision',
+ Option('recursive', short_name='R',
+ help='Recurse into subdirectories.'),
+ Option('from-root',
+ help='Print paths relative to the root of the branch.'),
+ Option('unknown', short_name='u',
+ help='Print unknown files.'),
+ Option('versioned', help='Print versioned files.',
+ short_name='V'),
+ Option('ignored', short_name='i',
+ help='Print ignored files.'),
+ Option('kind', short_name='k',
+ help='List entries of a particular kind: file, directory, symlink.',
+ type=unicode),
+ 'null',
+ 'show-ids',
+ 'directory',
+ ]
+ @display_command
+ def run(self, revision=None, verbose=False,
+ recursive=False, from_root=False,
+ unknown=False, versioned=False, ignored=False,
+ null=False, kind=None, show_ids=False, path=None, directory=None):
+
+ if kind and kind not in ('file', 'directory', 'symlink'):
+ raise errors.BzrCommandError(gettext('invalid kind specified'))
+
+ if verbose and null:
+ raise errors.BzrCommandError(gettext('Cannot set both --verbose and --null'))
+ all = not (unknown or versioned or ignored)
+
+ selection = {'I':ignored, '?':unknown, 'V':versioned}
+
+ if path is None:
+ fs_path = '.'
+ else:
+ if from_root:
+ raise errors.BzrCommandError(gettext('cannot specify both --from-root'
+ ' and PATH'))
+ fs_path = path
+ tree, branch, relpath = \
+ _open_directory_or_containing_tree_or_branch(fs_path, directory)
+
+ # Calculate the prefix to use
+ prefix = None
+ if from_root:
+ if relpath:
+ prefix = relpath + '/'
+ elif fs_path != '.' and not fs_path.endswith('/'):
+ prefix = fs_path + '/'
+
+ if revision is not None or tree is None:
+ tree = _get_one_revision_tree('ls', revision, branch=branch)
+
+ apply_view = False
+ if isinstance(tree, WorkingTree) and tree.supports_views():
+ view_files = tree.views.lookup_view()
+ if view_files:
+ apply_view = True
+ view_str = views.view_display_str(view_files)
+ note(gettext("Ignoring files outside view. View is %s") % view_str)
+
+ self.add_cleanup(tree.lock_read().unlock)
+ for fp, fc, fkind, fid, entry in tree.list_files(include_root=False,
+ from_dir=relpath, recursive=recursive):
+ # Apply additional masking
+ if not all and not selection[fc]:
+ continue
+ if kind is not None and fkind != kind:
+ continue
+ if apply_view:
+ try:
+ if relpath:
+ fullpath = osutils.pathjoin(relpath, fp)
+ else:
+ fullpath = fp
+ views.check_path_in_view(tree, fullpath)
+ except errors.FileOutsideView:
+ continue
+
+ # Output the entry
+ if prefix:
+ fp = osutils.pathjoin(prefix, fp)
+ kindch = entry.kind_character()
+ outstring = fp + kindch
+ ui.ui_factory.clear_term()
+ if verbose:
+ outstring = '%-8s %s' % (fc, outstring)
+ if show_ids and fid is not None:
+ outstring = "%-50s %s" % (outstring, fid)
+ self.outf.write(outstring + '\n')
+ elif null:
+ self.outf.write(fp + '\0')
+ if show_ids:
+ if fid is not None:
+ self.outf.write(fid)
+ self.outf.write('\0')
+ self.outf.flush()
+ else:
+ if show_ids:
+ if fid is not None:
+ my_id = fid
+ else:
+ my_id = ''
+ self.outf.write('%-50s %s\n' % (outstring, my_id))
+ else:
+ self.outf.write(outstring + '\n')
+
+
+class cmd_unknowns(Command):
+ __doc__ = """List unknown files.
+ """
+
+ hidden = True
+ _see_also = ['ls']
+ takes_options = ['directory']
+
+ @display_command
+ def run(self, directory=u'.'):
+ for f in WorkingTree.open_containing(directory)[0].unknowns():
+ self.outf.write(osutils.quotefn(f) + '\n')
+
+
+class cmd_ignore(Command):
+ __doc__ = """Ignore specified files or patterns.
+
+ See ``bzr help patterns`` for details on the syntax of patterns.
+
+ If a .bzrignore file does not exist, the ignore command
+ will create one and add the specified files or patterns to the newly
+ created file. The ignore command will also automatically add the
+ .bzrignore file to be versioned. Creating a .bzrignore file without
+ the use of the ignore command will require an explicit add command.
+
+ To remove patterns from the ignore list, edit the .bzrignore file.
+ After adding, editing or deleting that file either indirectly by
+ using this command or directly by using an editor, be sure to commit
+ it.
+
+ Bazaar also supports a global ignore file ~/.bazaar/ignore. On Windows
+ the global ignore file can be found in the application data directory as
+ C:\\Documents and Settings\\<user>\\Application Data\\Bazaar\\2.0\\ignore.
+ Global ignores are not touched by this command. The global ignore file
+ can be edited directly using an editor.
+
+ Patterns prefixed with '!' are exceptions to ignore patterns and take
+ precedence over regular ignores. Such exceptions are used to specify
+ files that should be versioned which would otherwise be ignored.
+
+ Patterns prefixed with '!!' act as regular ignore patterns, but have
+ precedence over the '!' exception patterns.
+
+ :Notes:
+
+ * Ignore patterns containing shell wildcards must be quoted from
+ the shell on Unix.
+
+ * Ignore patterns starting with "#" act as comments in the ignore file.
+ To ignore patterns that begin with that character, use the "RE:" prefix.
+
+ :Examples:
+ Ignore the top level Makefile::
+
+ bzr ignore ./Makefile
+
+ Ignore .class files in all directories...::
+
+ bzr ignore "*.class"
+
+ ...but do not ignore "special.class"::
+
+ bzr ignore "!special.class"
+
+ Ignore files whose name begins with the "#" character::
+
+ bzr ignore "RE:^#"
+
+ Ignore .o files under the lib directory::
+
+ bzr ignore "lib/**/*.o"
+
+ Ignore .o files under the lib directory::
+
+ bzr ignore "RE:lib/.*\.o"
+
+ Ignore everything but the "debian" toplevel directory::
+
+ bzr ignore "RE:(?!debian/).*"
+
+ Ignore everything except the "local" toplevel directory,
+ but always ignore autosave files ending in ~, even under local/::
+
+ bzr ignore "*"
+ bzr ignore "!./local"
+ bzr ignore "!!*~"
+ """
+
+ _see_also = ['status', 'ignored', 'patterns']
+ takes_args = ['name_pattern*']
+ takes_options = ['directory',
+ Option('default-rules',
+ help='Display the default ignore rules that bzr uses.')
+ ]
+
+ def run(self, name_pattern_list=None, default_rules=None,
+ directory=u'.'):
+ from bzrlib import ignores
+ if default_rules is not None:
+ # dump the default rules and exit
+ for pattern in ignores.USER_DEFAULTS:
+ self.outf.write("%s\n" % pattern)
+ return
+ if not name_pattern_list:
+ raise errors.BzrCommandError(gettext("ignore requires at least one "
+ "NAME_PATTERN or --default-rules."))
+ name_pattern_list = [globbing.normalize_pattern(p)
+ for p in name_pattern_list]
+ bad_patterns = ''
+ bad_patterns_count = 0
+ for p in name_pattern_list:
+ if not globbing.Globster.is_pattern_valid(p):
+ bad_patterns_count += 1
+ bad_patterns += ('\n %s' % p)
+ if bad_patterns:
+ msg = (ngettext('Invalid ignore pattern found. %s',
+ 'Invalid ignore patterns found. %s',
+ bad_patterns_count) % bad_patterns)
+ ui.ui_factory.show_error(msg)
+ raise errors.InvalidPattern('')
+ for name_pattern in name_pattern_list:
+ if (name_pattern[0] == '/' or
+ (len(name_pattern) > 1 and name_pattern[1] == ':')):
+ raise errors.BzrCommandError(gettext(
+ "NAME_PATTERN should not be an absolute path"))
+ tree, relpath = WorkingTree.open_containing(directory)
+ ignores.tree_ignores_add_patterns(tree, name_pattern_list)
+ ignored = globbing.Globster(name_pattern_list)
+ matches = []
+ self.add_cleanup(tree.lock_read().unlock)
+ for entry in tree.list_files():
+ id = entry[3]
+ if id is not None:
+ filename = entry[0]
+ if ignored.match(filename):
+ matches.append(filename)
+ if len(matches) > 0:
+ self.outf.write(gettext("Warning: the following files are version "
+ "controlled and match your ignore pattern:\n%s"
+ "\nThese files will continue to be version controlled"
+ " unless you 'bzr remove' them.\n") % ("\n".join(matches),))
+
+
+class cmd_ignored(Command):
+ __doc__ = """List ignored files and the patterns that matched them.
+
+ List all the ignored files and the ignore pattern that caused the file to
+ be ignored.
+
+ Alternatively, to list just the files::
+
+ bzr ls --ignored
+ """
+
+ encoding_type = 'replace'
+ _see_also = ['ignore', 'ls']
+ takes_options = ['directory']
+
+ @display_command
+ def run(self, directory=u'.'):
+ tree = WorkingTree.open_containing(directory)[0]
+ self.add_cleanup(tree.lock_read().unlock)
+ for path, file_class, kind, file_id, entry in tree.list_files():
+ if file_class != 'I':
+ continue
+ ## XXX: Slightly inefficient since this was already calculated
+ pat = tree.is_ignored(path)
+ self.outf.write('%-50s %s\n' % (path, pat))
+
+
+class cmd_lookup_revision(Command):
+ __doc__ = """Lookup the revision-id from a revision-number
+
+ :Examples:
+ bzr lookup-revision 33
+ """
+ hidden = True
+ takes_args = ['revno']
+ takes_options = ['directory']
+
+ @display_command
+ def run(self, revno, directory=u'.'):
+ try:
+ revno = int(revno)
+ except ValueError:
+ raise errors.BzrCommandError(gettext("not a valid revision-number: %r")
+ % revno)
+ revid = WorkingTree.open_containing(directory)[0].branch.get_rev_id(revno)
+ self.outf.write("%s\n" % revid)
+
+
+class cmd_export(Command):
+ __doc__ = """Export current or past revision to a destination directory or archive.
+
+ If no revision is specified this exports the last committed revision.
+
+ Format may be an "exporter" name, such as tar, tgz, tbz2. If none is
+ given, try to find the format with the extension. If no extension
+ is found exports to a directory (equivalent to --format=dir).
+
+ If root is supplied, it will be used as the root directory inside
+ container formats (tar, zip, etc). If it is not supplied it will default
+ to the exported filename. The root option has no effect for 'dir' format.
+
+ If branch is omitted then the branch containing the current working
+ directory will be used.
+
+ Note: Export of tree with non-ASCII filenames to zip is not supported.
+
+ ================= =========================
+ Supported formats Autodetected by extension
+ ================= =========================
+ dir (none)
+ tar .tar
+ tbz2 .tar.bz2, .tbz2
+ tgz .tar.gz, .tgz
+ zip .zip
+ ================= =========================
+ """
+ encoding = 'exact'
+ takes_args = ['dest', 'branch_or_subdir?']
+ takes_options = ['directory',
+ Option('format',
+ help="Type of file to export to.",
+ type=unicode),
+ 'revision',
+ Option('filters', help='Apply content filters to export the '
+ 'convenient form.'),
+ Option('root',
+ type=str,
+ help="Name of the root directory inside the exported file."),
+ Option('per-file-timestamps',
+ help='Set modification time of files to that of the last '
+ 'revision in which it was changed.'),
+ Option('uncommitted',
+ help='Export the working tree contents rather than that of the '
+ 'last revision.'),
+ ]
+ def run(self, dest, branch_or_subdir=None, revision=None, format=None,
+ root=None, filters=False, per_file_timestamps=False, uncommitted=False,
+ directory=u'.'):
+ from bzrlib.export import export
+
+ if branch_or_subdir is None:
+ branch_or_subdir = directory
+
+ (tree, b, subdir) = controldir.ControlDir.open_containing_tree_or_branch(
+ branch_or_subdir)
+ if tree is not None:
+ self.add_cleanup(tree.lock_read().unlock)
+
+ if uncommitted:
+ if tree is None:
+ raise errors.BzrCommandError(
+ gettext("--uncommitted requires a working tree"))
+ export_tree = tree
+ else:
+ export_tree = _get_one_revision_tree('export', revision, branch=b, tree=tree)
+ try:
+ export(export_tree, dest, format, root, subdir, filtered=filters,
+ per_file_timestamps=per_file_timestamps)
+ except errors.NoSuchExportFormat, e:
+ raise errors.BzrCommandError(
+ gettext('Unsupported export format: %s') % e.format)
+
+
+class cmd_cat(Command):
+ __doc__ = """Write the contents of a file as of a given revision to standard output.
+
+ If no revision is nominated, the last revision is used.
+
+ Note: Take care to redirect standard output when using this command on a
+ binary file.
+ """
+
+ _see_also = ['ls']
+ takes_options = ['directory',
+ Option('name-from-revision', help='The path name in the old tree.'),
+ Option('filters', help='Apply content filters to display the '
+ 'convenience form.'),
+ 'revision',
+ ]
+ takes_args = ['filename']
+ encoding_type = 'exact'
+
+ @display_command
+ def run(self, filename, revision=None, name_from_revision=False,
+ filters=False, directory=None):
+ if revision is not None and len(revision) != 1:
+ raise errors.BzrCommandError(gettext("bzr cat --revision takes exactly"
+ " one revision specifier"))
+ tree, branch, relpath = \
+ _open_directory_or_containing_tree_or_branch(filename, directory)
+ self.add_cleanup(branch.lock_read().unlock)
+ return self._run(tree, branch, relpath, filename, revision,
+ name_from_revision, filters)
+
+ def _run(self, tree, b, relpath, filename, revision, name_from_revision,
+ filtered):
+ if tree is None:
+ tree = b.basis_tree()
+ rev_tree = _get_one_revision_tree('cat', revision, branch=b)
+ self.add_cleanup(rev_tree.lock_read().unlock)
+
+ old_file_id = rev_tree.path2id(relpath)
+
+ # TODO: Split out this code to something that generically finds the
+ # best id for a path across one or more trees; it's like
+ # find_ids_across_trees but restricted to find just one. -- mbp
+ # 20110705.
+ if name_from_revision:
+ # Try in revision if requested
+ if old_file_id is None:
+ raise errors.BzrCommandError(gettext(
+ "{0!r} is not present in revision {1}").format(
+ filename, rev_tree.get_revision_id()))
+ else:
+ actual_file_id = old_file_id
+ else:
+ cur_file_id = tree.path2id(relpath)
+ if cur_file_id is not None and rev_tree.has_id(cur_file_id):
+ actual_file_id = cur_file_id
+ elif old_file_id is not None:
+ actual_file_id = old_file_id
+ else:
+ raise errors.BzrCommandError(gettext(
+ "{0!r} is not present in revision {1}").format(
+ filename, rev_tree.get_revision_id()))
+ if filtered:
+ from bzrlib.filter_tree import ContentFilterTree
+ filter_tree = ContentFilterTree(rev_tree,
+ rev_tree._content_filter_stack)
+ content = filter_tree.get_file_text(actual_file_id)
+ else:
+ content = rev_tree.get_file_text(actual_file_id)
+ self.cleanup_now()
+ self.outf.write(content)
+
+
+class cmd_local_time_offset(Command):
+ __doc__ = """Show the offset in seconds from GMT to local time."""
+ hidden = True
+ @display_command
+ def run(self):
+ self.outf.write("%s\n" % osutils.local_time_offset())
+
+
+
+class cmd_commit(Command):
+ __doc__ = """Commit changes into a new revision.
+
+ An explanatory message needs to be given for each commit. This is
+ often done by using the --message option (getting the message from the
+ command line) or by using the --file option (getting the message from
+ a file). If neither of these options is given, an editor is opened for
+ the user to enter the message. To see the changed files in the
+ boilerplate text loaded into the editor, use the --show-diff option.
+
+ By default, the entire tree is committed and the person doing the
+ commit is assumed to be the author. These defaults can be overridden
+ as explained below.
+
+ :Selective commits:
+
+ If selected files are specified, only changes to those files are
+ committed. If a directory is specified then the directory and
+ everything within it is committed.
+
+ When excludes are given, they take precedence over selected files.
+ For example, to commit only changes within foo, but not changes
+ within foo/bar::
+
+ bzr commit foo -x foo/bar
+
+ A selective commit after a merge is not yet supported.
+
+ :Custom authors:
+
+ If the author of the change is not the same person as the committer,
+ you can specify the author's name using the --author option. The
+ name should be in the same format as a committer-id, e.g.
+ "John Doe <jdoe@example.com>". If there is more than one author of
+ the change you can specify the option multiple times, once for each
+ author.
+
+ :Checks:
+
+ A common mistake is to forget to add a new file or directory before
+ running the commit command. The --strict option checks for unknown
+ files and aborts the commit if any are found. More advanced pre-commit
+ checks can be implemented by defining hooks. See ``bzr help hooks``
+ for details.
+
+ :Things to note:
+
+ If you accidentially commit the wrong changes or make a spelling
+ mistake in the commit message say, you can use the uncommit command
+ to undo it. See ``bzr help uncommit`` for details.
+
+ Hooks can also be configured to run after a commit. This allows you
+ to trigger updates to external systems like bug trackers. The --fixes
+ option can be used to record the association between a revision and
+ one or more bugs. See ``bzr help bugs`` for details.
+ """
+
+ _see_also = ['add', 'bugs', 'hooks', 'uncommit']
+ takes_args = ['selected*']
+ takes_options = [
+ ListOption('exclude', type=str, short_name='x',
+ help="Do not consider changes made to a given path."),
+ Option('message', type=unicode,
+ short_name='m',
+ help="Description of the new revision."),
+ 'verbose',
+ Option('unchanged',
+ help='Commit even if nothing has changed.'),
+ Option('file', type=str,
+ short_name='F',
+ argname='msgfile',
+ help='Take commit message from this file.'),
+ Option('strict',
+ help="Refuse to commit if there are unknown "
+ "files in the working tree."),
+ Option('commit-time', type=str,
+ help="Manually set a commit time using commit date "
+ "format, e.g. '2009-10-10 08:00:00 +0100'."),
+ ListOption('fixes', type=str,
+ help="Mark a bug as being fixed by this revision "
+ "(see \"bzr help bugs\")."),
+ ListOption('author', type=unicode,
+ help="Set the author's name, if it's different "
+ "from the committer."),
+ Option('local',
+ help="Perform a local commit in a bound "
+ "branch. Local commits are not pushed to "
+ "the master branch until a normal commit "
+ "is performed."
+ ),
+ Option('show-diff', short_name='p',
+ help='When no message is supplied, show the diff along'
+ ' with the status summary in the message editor.'),
+ Option('lossy',
+ help='When committing to a foreign version control '
+ 'system do not push data that can not be natively '
+ 'represented.'),
+ ]
+ aliases = ['ci', 'checkin']
+
+ def _iter_bug_fix_urls(self, fixes, branch):
+ default_bugtracker = None
+ # Configure the properties for bug fixing attributes.
+ for fixed_bug in fixes:
+ tokens = fixed_bug.split(':')
+ if len(tokens) == 1:
+ if default_bugtracker is None:
+ branch_config = branch.get_config_stack()
+ default_bugtracker = branch_config.get(
+ "bugtracker")
+ if default_bugtracker is None:
+ raise errors.BzrCommandError(gettext(
+ "No tracker specified for bug %s. Use the form "
+ "'tracker:id' or specify a default bug tracker "
+ "using the `bugtracker` option.\nSee "
+ "\"bzr help bugs\" for more information on this "
+ "feature. Commit refused.") % fixed_bug)
+ tag = default_bugtracker
+ bug_id = tokens[0]
+ elif len(tokens) != 2:
+ raise errors.BzrCommandError(gettext(
+ "Invalid bug %s. Must be in the form of 'tracker:id'. "
+ "See \"bzr help bugs\" for more information on this "
+ "feature.\nCommit refused.") % fixed_bug)
+ else:
+ tag, bug_id = tokens
+ try:
+ yield bugtracker.get_bug_url(tag, branch, bug_id)
+ except errors.UnknownBugTrackerAbbreviation:
+ raise errors.BzrCommandError(gettext(
+ 'Unrecognized bug %s. Commit refused.') % fixed_bug)
+ except errors.MalformedBugIdentifier, e:
+ raise errors.BzrCommandError(gettext(
+ "%s\nCommit refused.") % (str(e),))
+
+ def run(self, message=None, file=None, verbose=False, selected_list=None,
+ unchanged=False, strict=False, local=False, fixes=None,
+ author=None, show_diff=False, exclude=None, commit_time=None,
+ lossy=False):
+ from bzrlib.errors import (
+ PointlessCommit,
+ ConflictsInTree,
+ StrictCommitFailed
+ )
+ from bzrlib.msgeditor import (
+ edit_commit_message_encoded,
+ generate_commit_message_template,
+ make_commit_message_template_encoded,
+ set_commit_message,
+ )
+
+ commit_stamp = offset = None
+ if commit_time is not None:
+ try:
+ commit_stamp, offset = timestamp.parse_patch_date(commit_time)
+ except ValueError, e:
+ raise errors.BzrCommandError(gettext(
+ "Could not parse --commit-time: " + str(e)))
+
+ properties = {}
+
+ tree, selected_list = WorkingTree.open_containing_paths(selected_list)
+ if selected_list == ['']:
+ # workaround - commit of root of tree should be exactly the same
+ # as just default commit in that tree, and succeed even though
+ # selected-file merge commit is not done yet
+ selected_list = []
+
+ if fixes is None:
+ fixes = []
+ bug_property = bugtracker.encode_fixes_bug_urls(
+ self._iter_bug_fix_urls(fixes, tree.branch))
+ if bug_property:
+ properties['bugs'] = bug_property
+
+ if local and not tree.branch.get_bound_location():
+ raise errors.LocalRequiresBoundBranch()
+
+ if message is not None:
+ try:
+ file_exists = osutils.lexists(message)
+ except UnicodeError:
+ # The commit message contains unicode characters that can't be
+ # represented in the filesystem encoding, so that can't be a
+ # file.
+ file_exists = False
+ if file_exists:
+ warning_msg = (
+ 'The commit message is a file name: "%(f)s".\n'
+ '(use --file "%(f)s" to take commit message from that file)'
+ % { 'f': message })
+ ui.ui_factory.show_warning(warning_msg)
+ if '\r' in message:
+ message = message.replace('\r\n', '\n')
+ message = message.replace('\r', '\n')
+ if file:
+ raise errors.BzrCommandError(gettext(
+ "please specify either --message or --file"))
+
+ def get_message(commit_obj):
+ """Callback to get commit message"""
+ if file:
+ f = open(file)
+ try:
+ my_message = f.read().decode(osutils.get_user_encoding())
+ finally:
+ f.close()
+ elif message is not None:
+ my_message = message
+ else:
+ # No message supplied: make one up.
+ # text is the status of the tree
+ text = make_commit_message_template_encoded(tree,
+ selected_list, diff=show_diff,
+ output_encoding=osutils.get_user_encoding())
+ # start_message is the template generated from hooks
+ # XXX: Warning - looks like hooks return unicode,
+ # make_commit_message_template_encoded returns user encoding.
+ # We probably want to be using edit_commit_message instead to
+ # avoid this.
+ my_message = set_commit_message(commit_obj)
+ if my_message is None:
+ start_message = generate_commit_message_template(commit_obj)
+ my_message = edit_commit_message_encoded(text,
+ start_message=start_message)
+ if my_message is None:
+ raise errors.BzrCommandError(gettext("please specify a commit"
+ " message with either --message or --file"))
+ if my_message == "":
+ raise errors.BzrCommandError(gettext("Empty commit message specified."
+ " Please specify a commit message with either"
+ " --message or --file or leave a blank message"
+ " with --message \"\"."))
+ return my_message
+
+ # The API permits a commit with a filter of [] to mean 'select nothing'
+ # but the command line should not do that.
+ if not selected_list:
+ selected_list = None
+ try:
+ tree.commit(message_callback=get_message,
+ specific_files=selected_list,
+ allow_pointless=unchanged, strict=strict, local=local,
+ reporter=None, verbose=verbose, revprops=properties,
+ authors=author, timestamp=commit_stamp,
+ timezone=offset,
+ exclude=tree.safe_relpath_files(exclude),
+ lossy=lossy)
+ except PointlessCommit:
+ raise errors.BzrCommandError(gettext("No changes to commit."
+ " Please 'bzr add' the files you want to commit, or use"
+ " --unchanged to force an empty commit."))
+ except ConflictsInTree:
+ raise errors.BzrCommandError(gettext('Conflicts detected in working '
+ 'tree. Use "bzr conflicts" to list, "bzr resolve FILE" to'
+ ' resolve.'))
+ except StrictCommitFailed:
+ raise errors.BzrCommandError(gettext("Commit refused because there are"
+ " unknown files in the working tree."))
+ except errors.BoundBranchOutOfDate, e:
+ e.extra_help = (gettext("\n"
+ 'To commit to master branch, run update and then commit.\n'
+ 'You can also pass --local to commit to continue working '
+ 'disconnected.'))
+ raise
+
+
+class cmd_check(Command):
+ __doc__ = """Validate working tree structure, branch consistency and repository history.
+
+ This command checks various invariants about branch and repository storage
+ to detect data corruption or bzr bugs.
+
+ The working tree and branch checks will only give output if a problem is
+ detected. The output fields of the repository check are:
+
+ revisions
+ This is just the number of revisions checked. It doesn't
+ indicate a problem.
+
+ versionedfiles
+ This is just the number of versionedfiles checked. It
+ doesn't indicate a problem.
+
+ unreferenced ancestors
+ Texts that are ancestors of other texts, but
+ are not properly referenced by the revision ancestry. This is a
+ subtle problem that Bazaar can work around.
+
+ unique file texts
+ This is the total number of unique file contents
+ seen in the checked revisions. It does not indicate a problem.
+
+ repeated file texts
+ This is the total number of repeated texts seen
+ in the checked revisions. Texts can be repeated when their file
+ entries are modified, but the file contents are not. It does not
+ indicate a problem.
+
+ If no restrictions are specified, all Bazaar data that is found at the given
+ location will be checked.
+
+ :Examples:
+
+ Check the tree and branch at 'foo'::
+
+ bzr check --tree --branch foo
+
+ Check only the repository at 'bar'::
+
+ bzr check --repo bar
+
+ Check everything at 'baz'::
+
+ bzr check baz
+ """
+
+ _see_also = ['reconcile']
+ takes_args = ['path?']
+ takes_options = ['verbose',
+ Option('branch', help="Check the branch related to the"
+ " current directory."),
+ Option('repo', help="Check the repository related to the"
+ " current directory."),
+ Option('tree', help="Check the working tree related to"
+ " the current directory.")]
+
+ def run(self, path=None, verbose=False, branch=False, repo=False,
+ tree=False):
+ from bzrlib.check import check_dwim
+ if path is None:
+ path = '.'
+ if not branch and not repo and not tree:
+ branch = repo = tree = True
+ check_dwim(path, verbose, do_branch=branch, do_repo=repo, do_tree=tree)
+
+
+class cmd_upgrade(Command):
+ __doc__ = """Upgrade a repository, branch or working tree to a newer format.
+
+ When the default format has changed after a major new release of
+ Bazaar, you may be informed during certain operations that you
+ should upgrade. Upgrading to a newer format may improve performance
+ or make new features available. It may however limit interoperability
+ with older repositories or with older versions of Bazaar.
+
+ If you wish to upgrade to a particular format rather than the
+ current default, that can be specified using the --format option.
+ As a consequence, you can use the upgrade command this way to
+ "downgrade" to an earlier format, though some conversions are
+ a one way process (e.g. changing from the 1.x default to the
+ 2.x default) so downgrading is not always possible.
+
+ A backup.bzr.~#~ directory is created at the start of the conversion
+ process (where # is a number). By default, this is left there on
+ completion. If the conversion fails, delete the new .bzr directory
+ and rename this one back in its place. Use the --clean option to ask
+ for the backup.bzr directory to be removed on successful conversion.
+ Alternatively, you can delete it by hand if everything looks good
+ afterwards.
+
+ If the location given is a shared repository, dependent branches
+ are also converted provided the repository converts successfully.
+ If the conversion of a branch fails, remaining branches are still
+ tried.
+
+ For more information on upgrades, see the Bazaar Upgrade Guide,
+ http://doc.bazaar.canonical.com/latest/en/upgrade-guide/.
+ """
+
+ _see_also = ['check', 'reconcile', 'formats']
+ takes_args = ['url?']
+ takes_options = [
+ RegistryOption('format',
+ help='Upgrade to a specific format. See "bzr help'
+ ' formats" for details.',
+ lazy_registry=('bzrlib.controldir', 'format_registry'),
+ converter=lambda name: controldir.format_registry.make_bzrdir(name),
+ value_switches=True, title='Branch format'),
+ Option('clean',
+ help='Remove the backup.bzr directory if successful.'),
+ Option('dry-run',
+ help="Show what would be done, but don't actually do anything."),
+ ]
+
+ def run(self, url='.', format=None, clean=False, dry_run=False):
+ from bzrlib.upgrade import upgrade
+ exceptions = upgrade(url, format, clean_up=clean, dry_run=dry_run)
+ if exceptions:
+ if len(exceptions) == 1:
+ # Compatibility with historical behavior
+ raise exceptions[0]
+ else:
+ return 3
+
+
+class cmd_whoami(Command):
+ __doc__ = """Show or set bzr user id.
+
+ :Examples:
+ Show the email of the current user::
+
+ bzr whoami --email
+
+ Set the current user::
+
+ bzr whoami "Frank Chu <fchu@example.com>"
+ """
+ takes_options = [ 'directory',
+ Option('email',
+ help='Display email address only.'),
+ Option('branch',
+ help='Set identity for the current branch instead of '
+ 'globally.'),
+ ]
+ takes_args = ['name?']
+ encoding_type = 'replace'
+
+ @display_command
+ def run(self, email=False, branch=False, name=None, directory=None):
+ if name is None:
+ if directory is None:
+ # use branch if we're inside one; otherwise global config
+ try:
+ c = Branch.open_containing(u'.')[0].get_config_stack()
+ except errors.NotBranchError:
+ c = _mod_config.GlobalStack()
+ else:
+ c = Branch.open(directory).get_config_stack()
+ identity = c.get('email')
+ if email:
+ self.outf.write(_mod_config.extract_email_address(identity)
+ + '\n')
+ else:
+ self.outf.write(identity + '\n')
+ return
+
+ if email:
+ raise errors.BzrCommandError(gettext("--email can only be used to display existing "
+ "identity"))
+
+ # display a warning if an email address isn't included in the given name.
+ try:
+ _mod_config.extract_email_address(name)
+ except errors.NoEmailInUsername, e:
+ warning('"%s" does not seem to contain an email address. '
+ 'This is allowed, but not recommended.', name)
+
+ # use global config unless --branch given
+ if branch:
+ if directory is None:
+ c = Branch.open_containing(u'.')[0].get_config_stack()
+ else:
+ b = Branch.open(directory)
+ self.add_cleanup(b.lock_write().unlock)
+ c = b.get_config_stack()
+ else:
+ c = _mod_config.GlobalStack()
+ c.set('email', name)
+
+
+class cmd_nick(Command):
+ __doc__ = """Print or set the branch nickname.
+
+ If unset, the colocated branch name is used for colocated branches, and
+ the branch directory name is used for other branches. To print the
+ current nickname, execute with no argument.
+
+ Bound branches use the nickname of its master branch unless it is set
+ locally.
+ """
+
+ _see_also = ['info']
+ takes_args = ['nickname?']
+ takes_options = ['directory']
+ def run(self, nickname=None, directory=u'.'):
+ branch = Branch.open_containing(directory)[0]
+ if nickname is None:
+ self.printme(branch)
+ else:
+ branch.nick = nickname
+
+ @display_command
+ def printme(self, branch):
+ self.outf.write('%s\n' % branch.nick)
+
+
+class cmd_alias(Command):
+ __doc__ = """Set/unset and display aliases.
+
+ :Examples:
+ Show the current aliases::
+
+ bzr alias
+
+ Show the alias specified for 'll'::
+
+ bzr alias ll
+
+ Set an alias for 'll'::
+
+ bzr alias ll="log --line -r-10..-1"
+
+ To remove an alias for 'll'::
+
+ bzr alias --remove ll
+
+ """
+ takes_args = ['name?']
+ takes_options = [
+ Option('remove', help='Remove the alias.'),
+ ]
+
+ def run(self, name=None, remove=False):
+ if remove:
+ self.remove_alias(name)
+ elif name is None:
+ self.print_aliases()
+ else:
+ equal_pos = name.find('=')
+ if equal_pos == -1:
+ self.print_alias(name)
+ else:
+ self.set_alias(name[:equal_pos], name[equal_pos+1:])
+
+ def remove_alias(self, alias_name):
+ if alias_name is None:
+ raise errors.BzrCommandError(gettext(
+ 'bzr alias --remove expects an alias to remove.'))
+ # If alias is not found, print something like:
+ # unalias: foo: not found
+ c = _mod_config.GlobalConfig()
+ c.unset_alias(alias_name)
+
+ @display_command
+ def print_aliases(self):
+ """Print out the defined aliases in a similar format to bash."""
+ aliases = _mod_config.GlobalConfig().get_aliases()
+ for key, value in sorted(aliases.iteritems()):
+ self.outf.write('bzr alias %s="%s"\n' % (key, value))
+
+ @display_command
+ def print_alias(self, alias_name):
+ from bzrlib.commands import get_alias
+ alias = get_alias(alias_name)
+ if alias is None:
+ self.outf.write("bzr alias: %s: not found\n" % alias_name)
+ else:
+ self.outf.write(
+ 'bzr alias %s="%s"\n' % (alias_name, ' '.join(alias)))
+
+ def set_alias(self, alias_name, alias_command):
+ """Save the alias in the global config."""
+ c = _mod_config.GlobalConfig()
+ c.set_alias(alias_name, alias_command)
+
+
+class cmd_selftest(Command):
+ __doc__ = """Run internal test suite.
+
+ If arguments are given, they are regular expressions that say which tests
+ should run. Tests matching any expression are run, and other tests are
+ not run.
+
+ Alternatively if --first is given, matching tests are run first and then
+ all other tests are run. This is useful if you have been working in a
+ particular area, but want to make sure nothing else was broken.
+
+ If --exclude is given, tests that match that regular expression are
+ excluded, regardless of whether they match --first or not.
+
+ To help catch accidential dependencies between tests, the --randomize
+ option is useful. In most cases, the argument used is the word 'now'.
+ Note that the seed used for the random number generator is displayed
+ when this option is used. The seed can be explicitly passed as the
+ argument to this option if required. This enables reproduction of the
+ actual ordering used if and when an order sensitive problem is encountered.
+
+ If --list-only is given, the tests that would be run are listed. This is
+ useful when combined with --first, --exclude and/or --randomize to
+ understand their impact. The test harness reports "Listed nn tests in ..."
+ instead of "Ran nn tests in ..." when list mode is enabled.
+
+ If the global option '--no-plugins' is given, plugins are not loaded
+ before running the selftests. This has two effects: features provided or
+ modified by plugins will not be tested, and tests provided by plugins will
+ not be run.
+
+ Tests that need working space on disk use a common temporary directory,
+ typically inside $TMPDIR or /tmp.
+
+ If you set BZR_TEST_PDB=1 when running selftest, failing tests will drop
+ into a pdb postmortem session.
+
+ The --coverage=DIRNAME global option produces a report with covered code
+ indicated.
+
+ :Examples:
+ Run only tests relating to 'ignore'::
+
+ bzr selftest ignore
+
+ Disable plugins and list tests as they're run::
+
+ bzr --no-plugins selftest -v
+ """
+ # NB: this is used from the class without creating an instance, which is
+ # why it does not have a self parameter.
+ def get_transport_type(typestring):
+ """Parse and return a transport specifier."""
+ if typestring == "sftp":
+ from bzrlib.tests import stub_sftp
+ return stub_sftp.SFTPAbsoluteServer
+ elif typestring == "memory":
+ from bzrlib.tests import test_server
+ return memory.MemoryServer
+ elif typestring == "fakenfs":
+ from bzrlib.tests import test_server
+ return test_server.FakeNFSServer
+ msg = "No known transport type %s. Supported types are: sftp\n" %\
+ (typestring)
+ raise errors.BzrCommandError(msg)
+
+ hidden = True
+ takes_args = ['testspecs*']
+ takes_options = ['verbose',
+ Option('one',
+ help='Stop when one test fails.',
+ short_name='1',
+ ),
+ Option('transport',
+ help='Use a different transport by default '
+ 'throughout the test suite.',
+ type=get_transport_type),
+ Option('benchmark',
+ help='Run the benchmarks rather than selftests.',
+ hidden=True),
+ Option('lsprof-timed',
+ help='Generate lsprof output for benchmarked'
+ ' sections of code.'),
+ Option('lsprof-tests',
+ help='Generate lsprof output for each test.'),
+ Option('first',
+ help='Run all tests, but run specified tests first.',
+ short_name='f',
+ ),
+ Option('list-only',
+ help='List the tests instead of running them.'),
+ RegistryOption('parallel',
+ help="Run the test suite in parallel.",
+ lazy_registry=('bzrlib.tests', 'parallel_registry'),
+ value_switches=False,
+ ),
+ Option('randomize', type=str, argname="SEED",
+ help='Randomize the order of tests using the given'
+ ' seed or "now" for the current time.'),
+ ListOption('exclude', type=str, argname="PATTERN",
+ short_name='x',
+ help='Exclude tests that match this regular'
+ ' expression.'),
+ Option('subunit',
+ help='Output test progress via subunit.'),
+ Option('strict', help='Fail on missing dependencies or '
+ 'known failures.'),
+ Option('load-list', type=str, argname='TESTLISTFILE',
+ help='Load a test id list from a text file.'),
+ ListOption('debugflag', type=str, short_name='E',
+ help='Turn on a selftest debug flag.'),
+ ListOption('starting-with', type=str, argname='TESTID',
+ param_name='starting_with', short_name='s',
+ help=
+ 'Load only the tests starting with TESTID.'),
+ Option('sync',
+ help="By default we disable fsync and fdatasync"
+ " while running the test suite.")
+ ]
+ encoding_type = 'replace'
+
+ def __init__(self):
+ Command.__init__(self)
+ self.additional_selftest_args = {}
+
+ def run(self, testspecs_list=None, verbose=False, one=False,
+ transport=None, benchmark=None,
+ lsprof_timed=None,
+ first=False, list_only=False,
+ randomize=None, exclude=None, strict=False,
+ load_list=None, debugflag=None, starting_with=None, subunit=False,
+ parallel=None, lsprof_tests=False,
+ sync=False):
+
+ # During selftest, disallow proxying, as it can cause severe
+ # performance penalties and is only needed for thread
+ # safety. The selftest command is assumed to not use threads
+ # too heavily. The call should be as early as possible, as
+ # error reporting for past duplicate imports won't have useful
+ # backtraces.
+ lazy_import.disallow_proxying()
+
+ from bzrlib import tests
+
+ if testspecs_list is not None:
+ pattern = '|'.join(testspecs_list)
+ else:
+ pattern = ".*"
+ if subunit:
+ try:
+ from bzrlib.tests import SubUnitBzrRunner
+ except ImportError:
+ raise errors.BzrCommandError(gettext("subunit not available. subunit "
+ "needs to be installed to use --subunit."))
+ self.additional_selftest_args['runner_class'] = SubUnitBzrRunner
+ # On Windows, disable automatic conversion of '\n' to '\r\n' in
+ # stdout, which would corrupt the subunit stream.
+ # FIXME: This has been fixed in subunit trunk (>0.0.5) so the
+ # following code can be deleted when it's sufficiently deployed
+ # -- vila/mgz 20100514
+ if (sys.platform == "win32"
+ and getattr(sys.stdout, 'fileno', None) is not None):
+ import msvcrt
+ msvcrt.setmode(sys.stdout.fileno(), os.O_BINARY)
+ if parallel:
+ self.additional_selftest_args.setdefault(
+ 'suite_decorators', []).append(parallel)
+ if benchmark:
+ raise errors.BzrCommandError(gettext(
+ "--benchmark is no longer supported from bzr 2.2; "
+ "use bzr-usertest instead"))
+ test_suite_factory = None
+ if not exclude:
+ exclude_pattern = None
+ else:
+ exclude_pattern = '(' + '|'.join(exclude) + ')'
+ if not sync:
+ self._disable_fsync()
+ selftest_kwargs = {"verbose": verbose,
+ "pattern": pattern,
+ "stop_on_failure": one,
+ "transport": transport,
+ "test_suite_factory": test_suite_factory,
+ "lsprof_timed": lsprof_timed,
+ "lsprof_tests": lsprof_tests,
+ "matching_tests_first": first,
+ "list_only": list_only,
+ "random_seed": randomize,
+ "exclude_pattern": exclude_pattern,
+ "strict": strict,
+ "load_list": load_list,
+ "debug_flags": debugflag,
+ "starting_with": starting_with
+ }
+ selftest_kwargs.update(self.additional_selftest_args)
+
+ # Make deprecation warnings visible, unless -Werror is set
+ cleanup = symbol_versioning.activate_deprecation_warnings(
+ override=False)
+ try:
+ result = tests.selftest(**selftest_kwargs)
+ finally:
+ cleanup()
+ return int(not result)
+
+ def _disable_fsync(self):
+ """Change the 'os' functionality to not synchronize."""
+ self._orig_fsync = getattr(os, 'fsync', None)
+ if self._orig_fsync is not None:
+ os.fsync = lambda filedes: None
+ self._orig_fdatasync = getattr(os, 'fdatasync', None)
+ if self._orig_fdatasync is not None:
+ os.fdatasync = lambda filedes: None
+
+
+class cmd_version(Command):
+ __doc__ = """Show version of bzr."""
+
+ encoding_type = 'replace'
+ takes_options = [
+ Option("short", help="Print just the version number."),
+ ]
+
+ @display_command
+ def run(self, short=False):
+ from bzrlib.version import show_version
+ if short:
+ self.outf.write(bzrlib.version_string + '\n')
+ else:
+ show_version(to_file=self.outf)
+
+
+class cmd_rocks(Command):
+ __doc__ = """Statement of optimism."""
+
+ hidden = True
+
+ @display_command
+ def run(self):
+ self.outf.write(gettext("It sure does!\n"))
+
+
+class cmd_find_merge_base(Command):
+ __doc__ = """Find and print a base revision for merging two branches."""
+ # TODO: Options to specify revisions on either side, as if
+ # merging only part of the history.
+ takes_args = ['branch', 'other']
+ hidden = True
+
+ @display_command
+ def run(self, branch, other):
+ from bzrlib.revision import ensure_null
+
+ branch1 = Branch.open_containing(branch)[0]
+ branch2 = Branch.open_containing(other)[0]
+ self.add_cleanup(branch1.lock_read().unlock)
+ self.add_cleanup(branch2.lock_read().unlock)
+ last1 = ensure_null(branch1.last_revision())
+ last2 = ensure_null(branch2.last_revision())
+
+ graph = branch1.repository.get_graph(branch2.repository)
+ base_rev_id = graph.find_unique_lca(last1, last2)
+
+ self.outf.write(gettext('merge base is revision %s\n') % base_rev_id)
+
+
+class cmd_merge(Command):
+ __doc__ = """Perform a three-way merge.
+
+ The source of the merge can be specified either in the form of a branch,
+ or in the form of a path to a file containing a merge directive generated
+ with bzr send. If neither is specified, the default is the upstream branch
+ or the branch most recently merged using --remember. The source of the
+ merge may also be specified in the form of a path to a file in another
+ branch: in this case, only the modifications to that file are merged into
+ the current working tree.
+
+ When merging from a branch, by default bzr will try to merge in all new
+ work from the other branch, automatically determining an appropriate base
+ revision. If this fails, you may need to give an explicit base.
+
+ To pick a different ending revision, pass "--revision OTHER". bzr will
+ try to merge in all new work up to and including revision OTHER.
+
+ If you specify two values, "--revision BASE..OTHER", only revisions BASE
+ through OTHER, excluding BASE but including OTHER, will be merged. If this
+ causes some revisions to be skipped, i.e. if the destination branch does
+ not already contain revision BASE, such a merge is commonly referred to as
+ a "cherrypick". Unlike a normal merge, Bazaar does not currently track
+ cherrypicks. The changes look like a normal commit, and the history of the
+ changes from the other branch is not stored in the commit.
+
+ Revision numbers are always relative to the source branch.
+
+ Merge will do its best to combine the changes in two branches, but there
+ are some kinds of problems only a human can fix. When it encounters those,
+ it will mark a conflict. A conflict means that you need to fix something,
+ before you can commit.
+
+ Use bzr resolve when you have fixed a problem. See also bzr conflicts.
+
+ If there is no default branch set, the first merge will set it (use
+ --no-remember to avoid setting it). After that, you can omit the branch
+ to use the default. To change the default, use --remember. The value will
+ only be saved if the remote location can be accessed.
+
+ The results of the merge are placed into the destination working
+ directory, where they can be reviewed (with bzr diff), tested, and then
+ committed to record the result of the merge.
+
+ merge refuses to run if there are any uncommitted changes, unless
+ --force is given. If --force is given, then the changes from the source
+ will be merged with the current working tree, including any uncommitted
+ changes in the tree. The --force option can also be used to create a
+ merge revision which has more than two parents.
+
+ If one would like to merge changes from the working tree of the other
+ branch without merging any committed revisions, the --uncommitted option
+ can be given.
+
+ To select only some changes to merge, use "merge -i", which will prompt
+ you to apply each diff hunk and file change, similar to "shelve".
+
+ :Examples:
+ To merge all new revisions from bzr.dev::
+
+ bzr merge ../bzr.dev
+
+ To merge changes up to and including revision 82 from bzr.dev::
+
+ bzr merge -r 82 ../bzr.dev
+
+ To merge the changes introduced by 82, without previous changes::
+
+ bzr merge -r 81..82 ../bzr.dev
+
+ To apply a merge directive contained in /tmp/merge::
+
+ bzr merge /tmp/merge
+
+ To create a merge revision with three parents from two branches
+ feature1a and feature1b:
+
+ bzr merge ../feature1a
+ bzr merge ../feature1b --force
+ bzr commit -m 'revision with three parents'
+ """
+
+ encoding_type = 'exact'
+ _see_also = ['update', 'remerge', 'status-flags', 'send']
+ takes_args = ['location?']
+ takes_options = [
+ 'change',
+ 'revision',
+ Option('force',
+ help='Merge even if the destination tree has uncommitted changes.'),
+ 'merge-type',
+ 'reprocess',
+ 'remember',
+ Option('show-base', help="Show base revision text in "
+ "conflicts."),
+ Option('uncommitted', help='Apply uncommitted changes'
+ ' from a working copy, instead of branch changes.'),
+ Option('pull', help='If the destination is already'
+ ' completely merged into the source, pull from the'
+ ' source rather than merging. When this happens,'
+ ' you do not need to commit the result.'),
+ custom_help('directory',
+ help='Branch to merge into, '
+ 'rather than the one containing the working directory.'),
+ Option('preview', help='Instead of merging, show a diff of the'
+ ' merge.'),
+ Option('interactive', help='Select changes interactively.',
+ short_name='i')
+ ]
+
+ def run(self, location=None, revision=None, force=False,
+ merge_type=None, show_base=False, reprocess=None, remember=None,
+ uncommitted=False, pull=False,
+ directory=None,
+ preview=False,
+ interactive=False,
+ ):
+ if merge_type is None:
+ merge_type = _mod_merge.Merge3Merger
+
+ if directory is None: directory = u'.'
+ possible_transports = []
+ merger = None
+ allow_pending = True
+ verified = 'inapplicable'
+
+ tree = WorkingTree.open_containing(directory)[0]
+ if tree.branch.revno() == 0:
+ raise errors.BzrCommandError(gettext('Merging into empty branches not currently supported, '
+ 'https://bugs.launchpad.net/bzr/+bug/308562'))
+
+ try:
+ basis_tree = tree.revision_tree(tree.last_revision())
+ except errors.NoSuchRevision:
+ basis_tree = tree.basis_tree()
+
+ # die as quickly as possible if there are uncommitted changes
+ if not force:
+ if tree.has_changes():
+ raise errors.UncommittedChanges(tree)
+
+ view_info = _get_view_info_for_change_reporter(tree)
+ change_reporter = delta._ChangeReporter(
+ unversioned_filter=tree.is_ignored, view_info=view_info)
+ pb = ui.ui_factory.nested_progress_bar()
+ self.add_cleanup(pb.finished)
+ self.add_cleanup(tree.lock_write().unlock)
+ if location is not None:
+ try:
+ mergeable = bundle.read_mergeable_from_url(location,
+ possible_transports=possible_transports)
+ except errors.NotABundle:
+ mergeable = None
+ else:
+ if uncommitted:
+ raise errors.BzrCommandError(gettext('Cannot use --uncommitted'
+ ' with bundles or merge directives.'))
+
+ if revision is not None:
+ raise errors.BzrCommandError(gettext(
+ 'Cannot use -r with merge directives or bundles'))
+ merger, verified = _mod_merge.Merger.from_mergeable(tree,
+ mergeable, None)
+
+ if merger is None and uncommitted:
+ if revision is not None and len(revision) > 0:
+ raise errors.BzrCommandError(gettext('Cannot use --uncommitted and'
+ ' --revision at the same time.'))
+ merger = self.get_merger_from_uncommitted(tree, location, None)
+ allow_pending = False
+
+ if merger is None:
+ merger, allow_pending = self._get_merger_from_branch(tree,
+ location, revision, remember, possible_transports, None)
+
+ merger.merge_type = merge_type
+ merger.reprocess = reprocess
+ merger.show_base = show_base
+ self.sanity_check_merger(merger)
+ if (merger.base_rev_id == merger.other_rev_id and
+ merger.other_rev_id is not None):
+ # check if location is a nonexistent file (and not a branch) to
+ # disambiguate the 'Nothing to do'
+ if merger.interesting_files:
+ if not merger.other_tree.has_filename(
+ merger.interesting_files[0]):
+ note(gettext("merger: ") + str(merger))
+ raise errors.PathsDoNotExist([location])
+ note(gettext('Nothing to do.'))
+ return 0
+ if pull and not preview:
+ if merger.interesting_files is not None:
+ raise errors.BzrCommandError(gettext('Cannot pull individual files'))
+ if (merger.base_rev_id == tree.last_revision()):
+ result = tree.pull(merger.other_branch, False,
+ merger.other_rev_id)
+ result.report(self.outf)
+ return 0
+ if merger.this_basis is None:
+ raise errors.BzrCommandError(gettext(
+ "This branch has no commits."
+ " (perhaps you would prefer 'bzr pull')"))
+ if preview:
+ return self._do_preview(merger)
+ elif interactive:
+ return self._do_interactive(merger)
+ else:
+ return self._do_merge(merger, change_reporter, allow_pending,
+ verified)
+
+ def _get_preview(self, merger):
+ tree_merger = merger.make_merger()
+ tt = tree_merger.make_preview_transform()
+ self.add_cleanup(tt.finalize)
+ result_tree = tt.get_preview_tree()
+ return result_tree
+
+ def _do_preview(self, merger):
+ from bzrlib.diff import show_diff_trees
+ result_tree = self._get_preview(merger)
+ path_encoding = osutils.get_diff_header_encoding()
+ show_diff_trees(merger.this_tree, result_tree, self.outf,
+ old_label='', new_label='',
+ path_encoding=path_encoding)
+
+ def _do_merge(self, merger, change_reporter, allow_pending, verified):
+ merger.change_reporter = change_reporter
+ conflict_count = merger.do_merge()
+ if allow_pending:
+ merger.set_pending()
+ if verified == 'failed':
+ warning('Preview patch does not match changes')
+ if conflict_count != 0:
+ return 1
+ else:
+ return 0
+
+ def _do_interactive(self, merger):
+ """Perform an interactive merge.
+
+ This works by generating a preview tree of the merge, then using
+ Shelver to selectively remove the differences between the working tree
+ and the preview tree.
+ """
+ from bzrlib import shelf_ui
+ result_tree = self._get_preview(merger)
+ writer = bzrlib.option.diff_writer_registry.get()
+ shelver = shelf_ui.Shelver(merger.this_tree, result_tree, destroy=True,
+ reporter=shelf_ui.ApplyReporter(),
+ diff_writer=writer(sys.stdout))
+ try:
+ shelver.run()
+ finally:
+ shelver.finalize()
+
+ def sanity_check_merger(self, merger):
+ if (merger.show_base and
+ not merger.merge_type is _mod_merge.Merge3Merger):
+ raise errors.BzrCommandError(gettext("Show-base is not supported for this"
+ " merge type. %s") % merger.merge_type)
+ if merger.reprocess is None:
+ if merger.show_base:
+ merger.reprocess = False
+ else:
+ # Use reprocess if the merger supports it
+ merger.reprocess = merger.merge_type.supports_reprocess
+ if merger.reprocess and not merger.merge_type.supports_reprocess:
+ raise errors.BzrCommandError(gettext("Conflict reduction is not supported"
+ " for merge type %s.") %
+ merger.merge_type)
+ if merger.reprocess and merger.show_base:
+ raise errors.BzrCommandError(gettext("Cannot do conflict reduction and"
+ " show base."))
+
+ def _get_merger_from_branch(self, tree, location, revision, remember,
+ possible_transports, pb):
+ """Produce a merger from a location, assuming it refers to a branch."""
+ from bzrlib.tag import _merge_tags_if_possible
+ # find the branch locations
+ other_loc, user_location = self._select_branch_location(tree, location,
+ revision, -1)
+ if revision is not None and len(revision) == 2:
+ base_loc, _unused = self._select_branch_location(tree,
+ location, revision, 0)
+ else:
+ base_loc = other_loc
+ # Open the branches
+ other_branch, other_path = Branch.open_containing(other_loc,
+ possible_transports)
+ if base_loc == other_loc:
+ base_branch = other_branch
+ else:
+ base_branch, base_path = Branch.open_containing(base_loc,
+ possible_transports)
+ # Find the revision ids
+ other_revision_id = None
+ base_revision_id = None
+ if revision is not None:
+ if len(revision) >= 1:
+ other_revision_id = revision[-1].as_revision_id(other_branch)
+ if len(revision) == 2:
+ base_revision_id = revision[0].as_revision_id(base_branch)
+ if other_revision_id is None:
+ other_revision_id = _mod_revision.ensure_null(
+ other_branch.last_revision())
+ # Remember where we merge from. We need to remember if:
+ # - user specify a location (and we don't merge from the parent
+ # branch)
+ # - user ask to remember or there is no previous location set to merge
+ # from and user didn't ask to *not* remember
+ if (user_location is not None
+ and ((remember
+ or (remember is None
+ and tree.branch.get_submit_branch() is None)))):
+ tree.branch.set_submit_branch(other_branch.base)
+ # Merge tags (but don't set them in the master branch yet, the user
+ # might revert this merge). Commit will propagate them.
+ _merge_tags_if_possible(other_branch, tree.branch, ignore_master=True)
+ merger = _mod_merge.Merger.from_revision_ids(pb, tree,
+ other_revision_id, base_revision_id, other_branch, base_branch)
+ if other_path != '':
+ allow_pending = False
+ merger.interesting_files = [other_path]
+ else:
+ allow_pending = True
+ return merger, allow_pending
+
+ def get_merger_from_uncommitted(self, tree, location, pb):
+ """Get a merger for uncommitted changes.
+
+ :param tree: The tree the merger should apply to.
+ :param location: The location containing uncommitted changes.
+ :param pb: The progress bar to use for showing progress.
+ """
+ location = self._select_branch_location(tree, location)[0]
+ other_tree, other_path = WorkingTree.open_containing(location)
+ merger = _mod_merge.Merger.from_uncommitted(tree, other_tree, pb)
+ if other_path != '':
+ merger.interesting_files = [other_path]
+ return merger
+
+ def _select_branch_location(self, tree, user_location, revision=None,
+ index=None):
+ """Select a branch location, according to possible inputs.
+
+ If provided, branches from ``revision`` are preferred. (Both
+ ``revision`` and ``index`` must be supplied.)
+
+ Otherwise, the ``location`` parameter is used. If it is None, then the
+ ``submit`` or ``parent`` location is used, and a note is printed.
+
+ :param tree: The working tree to select a branch for merging into
+ :param location: The location entered by the user
+ :param revision: The revision parameter to the command
+ :param index: The index to use for the revision parameter. Negative
+ indices are permitted.
+ :return: (selected_location, user_location). The default location
+ will be the user-entered location.
+ """
+ if (revision is not None and index is not None
+ and revision[index] is not None):
+ branch = revision[index].get_branch()
+ if branch is not None:
+ return branch, branch
+ if user_location is None:
+ location = self._get_remembered(tree, 'Merging from')
+ else:
+ location = user_location
+ return location, user_location
+
+ def _get_remembered(self, tree, verb_string):
+ """Use tree.branch's parent if none was supplied.
+
+ Report if the remembered location was used.
+ """
+ stored_location = tree.branch.get_submit_branch()
+ stored_location_type = "submit"
+ if stored_location is None:
+ stored_location = tree.branch.get_parent()
+ stored_location_type = "parent"
+ mutter("%s", stored_location)
+ if stored_location is None:
+ raise errors.BzrCommandError(gettext("No location specified or remembered"))
+ display_url = urlutils.unescape_for_display(stored_location, 'utf-8')
+ note(gettext("{0} remembered {1} location {2}").format(verb_string,
+ stored_location_type, display_url))
+ return stored_location
+
+
+class cmd_remerge(Command):
+ __doc__ = """Redo a merge.
+
+ Use this if you want to try a different merge technique while resolving
+ conflicts. Some merge techniques are better than others, and remerge
+ lets you try different ones on different files.
+
+ The options for remerge have the same meaning and defaults as the ones for
+ merge. The difference is that remerge can (only) be run when there is a
+ pending merge, and it lets you specify particular files.
+
+ :Examples:
+ Re-do the merge of all conflicted files, and show the base text in
+ conflict regions, in addition to the usual THIS and OTHER texts::
+
+ bzr remerge --show-base
+
+ Re-do the merge of "foobar", using the weave merge algorithm, with
+ additional processing to reduce the size of conflict regions::
+
+ bzr remerge --merge-type weave --reprocess foobar
+ """
+ takes_args = ['file*']
+ takes_options = [
+ 'merge-type',
+ 'reprocess',
+ Option('show-base',
+ help="Show base revision text in conflicts."),
+ ]
+
+ def run(self, file_list=None, merge_type=None, show_base=False,
+ reprocess=False):
+ from bzrlib.conflicts import restore
+ if merge_type is None:
+ merge_type = _mod_merge.Merge3Merger
+ tree, file_list = WorkingTree.open_containing_paths(file_list)
+ self.add_cleanup(tree.lock_write().unlock)
+ parents = tree.get_parent_ids()
+ if len(parents) != 2:
+ raise errors.BzrCommandError(gettext("Sorry, remerge only works after normal"
+ " merges. Not cherrypicking or"
+ " multi-merges."))
+ repository = tree.branch.repository
+ interesting_ids = None
+ new_conflicts = []
+ conflicts = tree.conflicts()
+ if file_list is not None:
+ interesting_ids = set()
+ for filename in file_list:
+ file_id = tree.path2id(filename)
+ if file_id is None:
+ raise errors.NotVersionedError(filename)
+ interesting_ids.add(file_id)
+ if tree.kind(file_id) != "directory":
+ continue
+
+ # FIXME: Support nested trees
+ for name, ie in tree.root_inventory.iter_entries(file_id):
+ interesting_ids.add(ie.file_id)
+ new_conflicts = conflicts.select_conflicts(tree, file_list)[0]
+ else:
+ # Remerge only supports resolving contents conflicts
+ allowed_conflicts = ('text conflict', 'contents conflict')
+ restore_files = [c.path for c in conflicts
+ if c.typestring in allowed_conflicts]
+ _mod_merge.transform_tree(tree, tree.basis_tree(), interesting_ids)
+ tree.set_conflicts(ConflictList(new_conflicts))
+ if file_list is not None:
+ restore_files = file_list
+ for filename in restore_files:
+ try:
+ restore(tree.abspath(filename))
+ except errors.NotConflicted:
+ pass
+ # Disable pending merges, because the file texts we are remerging
+ # have not had those merges performed. If we use the wrong parents
+ # list, we imply that the working tree text has seen and rejected
+ # all the changes from the other tree, when in fact those changes
+ # have not yet been seen.
+ tree.set_parent_ids(parents[:1])
+ try:
+ merger = _mod_merge.Merger.from_revision_ids(None, tree, parents[1])
+ merger.interesting_ids = interesting_ids
+ merger.merge_type = merge_type
+ merger.show_base = show_base
+ merger.reprocess = reprocess
+ conflicts = merger.do_merge()
+ finally:
+ tree.set_parent_ids(parents)
+ if conflicts > 0:
+ return 1
+ else:
+ return 0
+
+
+class cmd_revert(Command):
+ __doc__ = """\
+ Set files in the working tree back to the contents of a previous revision.
+
+ Giving a list of files will revert only those files. Otherwise, all files
+ will be reverted. If the revision is not specified with '--revision', the
+ working tree basis revision is used. A revert operation affects only the
+ working tree, not any revision history like the branch and repository or
+ the working tree basis revision.
+
+ To remove only some changes, without reverting to a prior version, use
+ merge instead. For example, "merge . -r -2..-3" (don't forget the ".")
+ will remove the changes introduced by the second last commit (-2), without
+ affecting the changes introduced by the last commit (-1). To remove
+ certain changes on a hunk-by-hunk basis, see the shelve command.
+ To update the branch to a specific revision or the latest revision and
+ update the working tree accordingly while preserving local changes, see the
+ update command.
+
+ Uncommitted changes to files that are reverted will be discarded.
+ Howver, by default, any files that have been manually changed will be
+ backed up first. (Files changed only by merge are not backed up.) Backup
+ files have '.~#~' appended to their name, where # is a number.
+
+ When you provide files, you can use their current pathname or the pathname
+ from the target revision. So you can use revert to "undelete" a file by
+ name. If you name a directory, all the contents of that directory will be
+ reverted.
+
+ If you have newly added files since the target revision, they will be
+ removed. If the files to be removed have been changed, backups will be
+ created as above. Directories containing unknown files will not be
+ deleted.
+
+ The working tree contains a list of revisions that have been merged but
+ not yet committed. These revisions will be included as additional parents
+ of the next commit. Normally, using revert clears that list as well as
+ reverting the files. If any files are specified, revert leaves the list
+ of uncommitted merges alone and reverts only the files. Use ``bzr revert
+ .`` in the tree root to revert all files but keep the recorded merges,
+ and ``bzr revert --forget-merges`` to clear the pending merge list without
+ reverting any files.
+
+ Using "bzr revert --forget-merges", it is possible to apply all of the
+ changes from a branch in a single revision. To do this, perform the merge
+ as desired. Then doing revert with the "--forget-merges" option will keep
+ the content of the tree as it was, but it will clear the list of pending
+ merges. The next commit will then contain all of the changes that are
+ present in the other branch, but without any other parent revisions.
+ Because this technique forgets where these changes originated, it may
+ cause additional conflicts on later merges involving the same source and
+ target branches.
+ """
+
+ _see_also = ['cat', 'export', 'merge', 'shelve']
+ takes_options = [
+ 'revision',
+ Option('no-backup', "Do not save backups of reverted files."),
+ Option('forget-merges',
+ 'Remove pending merge marker, without changing any files.'),
+ ]
+ takes_args = ['file*']
+
+ def run(self, revision=None, no_backup=False, file_list=None,
+ forget_merges=None):
+ tree, file_list = WorkingTree.open_containing_paths(file_list)
+ self.add_cleanup(tree.lock_tree_write().unlock)
+ if forget_merges:
+ tree.set_parent_ids(tree.get_parent_ids()[:1])
+ else:
+ self._revert_tree_to_revision(tree, revision, file_list, no_backup)
+
+ @staticmethod
+ def _revert_tree_to_revision(tree, revision, file_list, no_backup):
+ rev_tree = _get_one_revision_tree('revert', revision, tree=tree)
+ tree.revert(file_list, rev_tree, not no_backup, None,
+ report_changes=True)
+
+
+class cmd_assert_fail(Command):
+ __doc__ = """Test reporting of assertion failures"""
+ # intended just for use in testing
+
+ hidden = True
+
+ def run(self):
+ raise AssertionError("always fails")
+
+
+class cmd_help(Command):
+ __doc__ = """Show help on a command or other topic.
+ """
+
+ _see_also = ['topics']
+ takes_options = [
+ Option('long', 'Show help on all commands.'),
+ ]
+ takes_args = ['topic?']
+ aliases = ['?', '--help', '-?', '-h']
+
+ @display_command
+ def run(self, topic=None, long=False):
+ import bzrlib.help
+ if topic is None and long:
+ topic = "commands"
+ bzrlib.help.help(topic)
+
+
+class cmd_shell_complete(Command):
+ __doc__ = """Show appropriate completions for context.
+
+ For a list of all available commands, say 'bzr shell-complete'.
+ """
+ takes_args = ['context?']
+ aliases = ['s-c']
+ hidden = True
+
+ @display_command
+ def run(self, context=None):
+ from bzrlib import shellcomplete
+ shellcomplete.shellcomplete(context)
+
+
+class cmd_missing(Command):
+ __doc__ = """Show unmerged/unpulled revisions between two branches.
+
+ OTHER_BRANCH may be local or remote.
+
+ To filter on a range of revisions, you can use the command -r begin..end
+ -r revision requests a specific revision, -r ..end or -r begin.. are
+ also valid.
+
+ :Exit values:
+ 1 - some missing revisions
+ 0 - no missing revisions
+
+ :Examples:
+
+ Determine the missing revisions between this and the branch at the
+ remembered pull location::
+
+ bzr missing
+
+ Determine the missing revisions between this and another branch::
+
+ bzr missing http://server/branch
+
+ Determine the missing revisions up to a specific revision on the other
+ branch::
+
+ bzr missing -r ..-10
+
+ Determine the missing revisions up to a specific revision on this
+ branch::
+
+ bzr missing --my-revision ..-10
+ """
+
+ _see_also = ['merge', 'pull']
+ takes_args = ['other_branch?']
+ takes_options = [
+ 'directory',
+ Option('reverse', 'Reverse the order of revisions.'),
+ Option('mine-only',
+ 'Display changes in the local branch only.'),
+ Option('this' , 'Same as --mine-only.'),
+ Option('theirs-only',
+ 'Display changes in the remote branch only.'),
+ Option('other', 'Same as --theirs-only.'),
+ 'log-format',
+ 'show-ids',
+ 'verbose',
+ custom_help('revision',
+ help='Filter on other branch revisions (inclusive). '
+ 'See "help revisionspec" for details.'),
+ Option('my-revision',
+ type=_parse_revision_str,
+ help='Filter on local branch revisions (inclusive). '
+ 'See "help revisionspec" for details.'),
+ Option('include-merged',
+ 'Show all revisions in addition to the mainline ones.'),
+ Option('include-merges', hidden=True,
+ help='Historical alias for --include-merged.'),
+ ]
+ encoding_type = 'replace'
+
+ @display_command
+ def run(self, other_branch=None, reverse=False, mine_only=False,
+ theirs_only=False,
+ log_format=None, long=False, short=False, line=False,
+ show_ids=False, verbose=False, this=False, other=False,
+ include_merged=None, revision=None, my_revision=None,
+ directory=u'.',
+ include_merges=symbol_versioning.DEPRECATED_PARAMETER):
+ from bzrlib.missing import find_unmerged, iter_log_revisions
+ def message(s):
+ if not is_quiet():
+ self.outf.write(s)
+
+ if symbol_versioning.deprecated_passed(include_merges):
+ ui.ui_factory.show_user_warning(
+ 'deprecated_command_option',
+ deprecated_name='--include-merges',
+ recommended_name='--include-merged',
+ deprecated_in_version='2.5',
+ command=self.invoked_as)
+ if include_merged is None:
+ include_merged = include_merges
+ else:
+ raise errors.BzrCommandError(gettext(
+ '{0} and {1} are mutually exclusive').format(
+ '--include-merges', '--include-merged'))
+ if include_merged is None:
+ include_merged = False
+ if this:
+ mine_only = this
+ if other:
+ theirs_only = other
+ # TODO: We should probably check that we don't have mine-only and
+ # theirs-only set, but it gets complicated because we also have
+ # this and other which could be used.
+ restrict = 'all'
+ if mine_only:
+ restrict = 'local'
+ elif theirs_only:
+ restrict = 'remote'
+
+ local_branch = Branch.open_containing(directory)[0]
+ self.add_cleanup(local_branch.lock_read().unlock)
+
+ parent = local_branch.get_parent()
+ if other_branch is None:
+ other_branch = parent
+ if other_branch is None:
+ raise errors.BzrCommandError(gettext("No peer location known"
+ " or specified."))
+ display_url = urlutils.unescape_for_display(parent,
+ self.outf.encoding)
+ message(gettext("Using saved parent location: {0}\n").format(
+ display_url))
+
+ remote_branch = Branch.open(other_branch)
+ if remote_branch.base == local_branch.base:
+ remote_branch = local_branch
+ else:
+ self.add_cleanup(remote_branch.lock_read().unlock)
+
+ local_revid_range = _revision_range_to_revid_range(
+ _get_revision_range(my_revision, local_branch,
+ self.name()))
+
+ remote_revid_range = _revision_range_to_revid_range(
+ _get_revision_range(revision,
+ remote_branch, self.name()))
+
+ local_extra, remote_extra = find_unmerged(
+ local_branch, remote_branch, restrict,
+ backward=not reverse,
+ include_merged=include_merged,
+ local_revid_range=local_revid_range,
+ remote_revid_range=remote_revid_range)
+
+ if log_format is None:
+ registry = log.log_formatter_registry
+ log_format = registry.get_default(local_branch)
+ lf = log_format(to_file=self.outf,
+ show_ids=show_ids,
+ show_timezone='original')
+
+ status_code = 0
+ if local_extra and not theirs_only:
+ message(ngettext("You have %d extra revision:\n",
+ "You have %d extra revisions:\n",
+ len(local_extra)) %
+ len(local_extra))
+ rev_tag_dict = {}
+ if local_branch.supports_tags():
+ rev_tag_dict = local_branch.tags.get_reverse_tag_dict()
+ for revision in iter_log_revisions(local_extra,
+ local_branch.repository,
+ verbose,
+ rev_tag_dict):
+ lf.log_revision(revision)
+ printed_local = True
+ status_code = 1
+ else:
+ printed_local = False
+
+ if remote_extra and not mine_only:
+ if printed_local is True:
+ message("\n\n\n")
+ message(ngettext("You are missing %d revision:\n",
+ "You are missing %d revisions:\n",
+ len(remote_extra)) %
+ len(remote_extra))
+ if remote_branch.supports_tags():
+ rev_tag_dict = remote_branch.tags.get_reverse_tag_dict()
+ for revision in iter_log_revisions(remote_extra,
+ remote_branch.repository,
+ verbose,
+ rev_tag_dict):
+ lf.log_revision(revision)
+ status_code = 1
+
+ if mine_only and not local_extra:
+ # We checked local, and found nothing extra
+ message(gettext('This branch has no new revisions.\n'))
+ elif theirs_only and not remote_extra:
+ # We checked remote, and found nothing extra
+ message(gettext('Other branch has no new revisions.\n'))
+ elif not (mine_only or theirs_only or local_extra or
+ remote_extra):
+ # We checked both branches, and neither one had extra
+ # revisions
+ message(gettext("Branches are up to date.\n"))
+ self.cleanup_now()
+ if not status_code and parent is None and other_branch is not None:
+ self.add_cleanup(local_branch.lock_write().unlock)
+ # handle race conditions - a parent might be set while we run.
+ if local_branch.get_parent() is None:
+ local_branch.set_parent(remote_branch.base)
+ return status_code
+
+
+class cmd_pack(Command):
+ __doc__ = """Compress the data within a repository.
+
+ This operation compresses the data within a bazaar repository. As
+ bazaar supports automatic packing of repository, this operation is
+ normally not required to be done manually.
+
+ During the pack operation, bazaar takes a backup of existing repository
+ data, i.e. pack files. This backup is eventually removed by bazaar
+ automatically when it is safe to do so. To save disk space by removing
+ the backed up pack files, the --clean-obsolete-packs option may be
+ used.
+
+ Warning: If you use --clean-obsolete-packs and your machine crashes
+ during or immediately after repacking, you may be left with a state
+ where the deletion has been written to disk but the new packs have not
+ been. In this case the repository may be unusable.
+ """
+
+ _see_also = ['repositories']
+ takes_args = ['branch_or_repo?']
+ takes_options = [
+ Option('clean-obsolete-packs', 'Delete obsolete packs to save disk space.'),
+ ]
+
+ def run(self, branch_or_repo='.', clean_obsolete_packs=False):
+ dir = controldir.ControlDir.open_containing(branch_or_repo)[0]
+ try:
+ branch = dir.open_branch()
+ repository = branch.repository
+ except errors.NotBranchError:
+ repository = dir.open_repository()
+ repository.pack(clean_obsolete_packs=clean_obsolete_packs)
+
+
+class cmd_plugins(Command):
+ __doc__ = """List the installed plugins.
+
+ This command displays the list of installed plugins including
+ version of plugin and a short description of each.
+
+ --verbose shows the path where each plugin is located.
+
+ A plugin is an external component for Bazaar that extends the
+ revision control system, by adding or replacing code in Bazaar.
+ Plugins can do a variety of things, including overriding commands,
+ adding new commands, providing additional network transports and
+ customizing log output.
+
+ See the Bazaar Plugin Guide <http://doc.bazaar.canonical.com/plugins/en/>
+ for further information on plugins including where to find them and how to
+ install them. Instructions are also provided there on how to write new
+ plugins using the Python programming language.
+ """
+ takes_options = ['verbose']
+
+ @display_command
+ def run(self, verbose=False):
+ from bzrlib import plugin
+ # Don't give writelines a generator as some codecs don't like that
+ self.outf.writelines(
+ list(plugin.describe_plugins(show_paths=verbose)))
+
+
+class cmd_testament(Command):
+ __doc__ = """Show testament (signing-form) of a revision."""
+ takes_options = [
+ 'revision',
+ Option('long', help='Produce long-format testament.'),
+ Option('strict',
+ help='Produce a strict-format testament.')]
+ takes_args = ['branch?']
+ @display_command
+ def run(self, branch=u'.', revision=None, long=False, strict=False):
+ from bzrlib.testament import Testament, StrictTestament
+ if strict is True:
+ testament_class = StrictTestament
+ else:
+ testament_class = Testament
+ if branch == '.':
+ b = Branch.open_containing(branch)[0]
+ else:
+ b = Branch.open(branch)
+ self.add_cleanup(b.lock_read().unlock)
+ if revision is None:
+ rev_id = b.last_revision()
+ else:
+ rev_id = revision[0].as_revision_id(b)
+ t = testament_class.from_revision(b.repository, rev_id)
+ if long:
+ sys.stdout.writelines(t.as_text_lines())
+ else:
+ sys.stdout.write(t.as_short_text())
+
+
+class cmd_annotate(Command):
+ __doc__ = """Show the origin of each line in a file.
+
+ This prints out the given file with an annotation on the left side
+ indicating which revision, author and date introduced the change.
+
+ If the origin is the same for a run of consecutive lines, it is
+ shown only at the top, unless the --all option is given.
+ """
+ # TODO: annotate directories; showing when each file was last changed
+ # TODO: if the working copy is modified, show annotations on that
+ # with new uncommitted lines marked
+ aliases = ['ann', 'blame', 'praise']
+ takes_args = ['filename']
+ takes_options = [Option('all', help='Show annotations on all lines.'),
+ Option('long', help='Show commit date in annotations.'),
+ 'revision',
+ 'show-ids',
+ 'directory',
+ ]
+ encoding_type = 'exact'
+
+ @display_command
+ def run(self, filename, all=False, long=False, revision=None,
+ show_ids=False, directory=None):
+ from bzrlib.annotate import (
+ annotate_file_tree,
+ )
+ wt, branch, relpath = \
+ _open_directory_or_containing_tree_or_branch(filename, directory)
+ if wt is not None:
+ self.add_cleanup(wt.lock_read().unlock)
+ else:
+ self.add_cleanup(branch.lock_read().unlock)
+ tree = _get_one_revision_tree('annotate', revision, branch=branch)
+ self.add_cleanup(tree.lock_read().unlock)
+ if wt is not None and revision is None:
+ file_id = wt.path2id(relpath)
+ else:
+ file_id = tree.path2id(relpath)
+ if file_id is None:
+ raise errors.NotVersionedError(filename)
+ if wt is not None and revision is None:
+ # If there is a tree and we're not annotating historical
+ # versions, annotate the working tree's content.
+ annotate_file_tree(wt, file_id, self.outf, long, all,
+ show_ids=show_ids)
+ else:
+ annotate_file_tree(tree, file_id, self.outf, long, all,
+ show_ids=show_ids, branch=branch)
+
+
+class cmd_re_sign(Command):
+ __doc__ = """Create a digital signature for an existing revision."""
+ # TODO be able to replace existing ones.
+
+ hidden = True # is this right ?
+ takes_args = ['revision_id*']
+ takes_options = ['directory', 'revision']
+
+ def run(self, revision_id_list=None, revision=None, directory=u'.'):
+ if revision_id_list is not None and revision is not None:
+ raise errors.BzrCommandError(gettext('You can only supply one of revision_id or --revision'))
+ if revision_id_list is None and revision is None:
+ raise errors.BzrCommandError(gettext('You must supply either --revision or a revision_id'))
+ b = WorkingTree.open_containing(directory)[0].branch
+ self.add_cleanup(b.lock_write().unlock)
+ return self._run(b, revision_id_list, revision)
+
+ def _run(self, b, revision_id_list, revision):
+ import bzrlib.gpg as gpg
+ gpg_strategy = gpg.GPGStrategy(b.get_config_stack())
+ if revision_id_list is not None:
+ b.repository.start_write_group()
+ try:
+ for revision_id in revision_id_list:
+ b.repository.sign_revision(revision_id, gpg_strategy)
+ except:
+ b.repository.abort_write_group()
+ raise
+ else:
+ b.repository.commit_write_group()
+ elif revision is not None:
+ if len(revision) == 1:
+ revno, rev_id = revision[0].in_history(b)
+ b.repository.start_write_group()
+ try:
+ b.repository.sign_revision(rev_id, gpg_strategy)
+ except:
+ b.repository.abort_write_group()
+ raise
+ else:
+ b.repository.commit_write_group()
+ elif len(revision) == 2:
+ # are they both on rh- if so we can walk between them
+ # might be nice to have a range helper for arbitrary
+ # revision paths. hmm.
+ from_revno, from_revid = revision[0].in_history(b)
+ to_revno, to_revid = revision[1].in_history(b)
+ if to_revid is None:
+ to_revno = b.revno()
+ if from_revno is None or to_revno is None:
+ raise errors.BzrCommandError(gettext('Cannot sign a range of non-revision-history revisions'))
+ b.repository.start_write_group()
+ try:
+ for revno in range(from_revno, to_revno + 1):
+ b.repository.sign_revision(b.get_rev_id(revno),
+ gpg_strategy)
+ except:
+ b.repository.abort_write_group()
+ raise
+ else:
+ b.repository.commit_write_group()
+ else:
+ raise errors.BzrCommandError(gettext('Please supply either one revision, or a range.'))
+
+
+class cmd_bind(Command):
+ __doc__ = """Convert the current branch into a checkout of the supplied branch.
+ If no branch is supplied, rebind to the last bound location.
+
+ Once converted into a checkout, commits must succeed on the master branch
+ before they will be applied to the local branch.
+
+ Bound branches use the nickname of its master branch unless it is set
+ locally, in which case binding will update the local nickname to be
+ that of the master.
+ """
+
+ _see_also = ['checkouts', 'unbind']
+ takes_args = ['location?']
+ takes_options = ['directory']
+
+ def run(self, location=None, directory=u'.'):
+ b, relpath = Branch.open_containing(directory)
+ if location is None:
+ try:
+ location = b.get_old_bound_location()
+ except errors.UpgradeRequired:
+ raise errors.BzrCommandError(gettext('No location supplied. '
+ 'This format does not remember old locations.'))
+ else:
+ if location is None:
+ if b.get_bound_location() is not None:
+ raise errors.BzrCommandError(
+ gettext('Branch is already bound'))
+ else:
+ raise errors.BzrCommandError(
+ gettext('No location supplied'
+ ' and no previous location known'))
+ b_other = Branch.open(location)
+ try:
+ b.bind(b_other)
+ except errors.DivergedBranches:
+ raise errors.BzrCommandError(gettext('These branches have diverged.'
+ ' Try merging, and then bind again.'))
+ if b.get_config().has_explicit_nickname():
+ b.nick = b_other.nick
+
+
+class cmd_unbind(Command):
+ __doc__ = """Convert the current checkout into a regular branch.
+
+ After unbinding, the local branch is considered independent and subsequent
+ commits will be local only.
+ """
+
+ _see_also = ['checkouts', 'bind']
+ takes_args = []
+ takes_options = ['directory']
+
+ def run(self, directory=u'.'):
+ b, relpath = Branch.open_containing(directory)
+ if not b.unbind():
+ raise errors.BzrCommandError(gettext('Local branch is not bound'))
+
+
+class cmd_uncommit(Command):
+ __doc__ = """Remove the last committed revision.
+
+ --verbose will print out what is being removed.
+ --dry-run will go through all the motions, but not actually
+ remove anything.
+
+ If --revision is specified, uncommit revisions to leave the branch at the
+ specified revision. For example, "bzr uncommit -r 15" will leave the
+ branch at revision 15.
+
+ Uncommit leaves the working tree ready for a new commit. The only change
+ it may make is to restore any pending merges that were present before
+ the commit.
+ """
+
+ # TODO: jam 20060108 Add an option to allow uncommit to remove
+ # unreferenced information in 'branch-as-repository' branches.
+ # TODO: jam 20060108 Add the ability for uncommit to remove unreferenced
+ # information in shared branches as well.
+ _see_also = ['commit']
+ takes_options = ['verbose', 'revision',
+ Option('dry-run', help='Don\'t actually make changes.'),
+ Option('force', help='Say yes to all questions.'),
+ Option('keep-tags',
+ help='Keep tags that point to removed revisions.'),
+ Option('local',
+ help="Only remove the commits from the local branch"
+ " when in a checkout."
+ ),
+ ]
+ takes_args = ['location?']
+ aliases = []
+ encoding_type = 'replace'
+
+ def run(self, location=None, dry_run=False, verbose=False,
+ revision=None, force=False, local=False, keep_tags=False):
+ if location is None:
+ location = u'.'
+ control, relpath = controldir.ControlDir.open_containing(location)
+ try:
+ tree = control.open_workingtree()
+ b = tree.branch
+ except (errors.NoWorkingTree, errors.NotLocalUrl):
+ tree = None
+ b = control.open_branch()
+
+ if tree is not None:
+ self.add_cleanup(tree.lock_write().unlock)
+ else:
+ self.add_cleanup(b.lock_write().unlock)
+ return self._run(b, tree, dry_run, verbose, revision, force,
+ local, keep_tags)
+
+ def _run(self, b, tree, dry_run, verbose, revision, force, local,
+ keep_tags):
+ from bzrlib.log import log_formatter, show_log
+ from bzrlib.uncommit import uncommit
+
+ last_revno, last_rev_id = b.last_revision_info()
+
+ rev_id = None
+ if revision is None:
+ revno = last_revno
+ rev_id = last_rev_id
+ else:
+ # 'bzr uncommit -r 10' actually means uncommit
+ # so that the final tree is at revno 10.
+ # but bzrlib.uncommit.uncommit() actually uncommits
+ # the revisions that are supplied.
+ # So we need to offset it by one
+ revno = revision[0].in_history(b).revno + 1
+ if revno <= last_revno:
+ rev_id = b.get_rev_id(revno)
+
+ if rev_id is None or _mod_revision.is_null(rev_id):
+ self.outf.write(gettext('No revisions to uncommit.\n'))
+ return 1
+
+ lf = log_formatter('short',
+ to_file=self.outf,
+ show_timezone='original')
+
+ show_log(b,
+ lf,
+ verbose=False,
+ direction='forward',
+ start_revision=revno,
+ end_revision=last_revno)
+
+ if dry_run:
+ self.outf.write(gettext('Dry-run, pretending to remove'
+ ' the above revisions.\n'))
+ else:
+ self.outf.write(gettext('The above revision(s) will be removed.\n'))
+
+ if not force:
+ if not ui.ui_factory.confirm_action(
+ gettext(u'Uncommit these revisions'),
+ 'bzrlib.builtins.uncommit',
+ {}):
+ self.outf.write(gettext('Canceled\n'))
+ return 0
+
+ mutter('Uncommitting from {%s} to {%s}',
+ last_rev_id, rev_id)
+ uncommit(b, tree=tree, dry_run=dry_run, verbose=verbose,
+ revno=revno, local=local, keep_tags=keep_tags)
+ self.outf.write(gettext('You can restore the old tip by running:\n'
+ ' bzr pull . -r revid:%s\n') % last_rev_id)
+
+
+class cmd_break_lock(Command):
+ __doc__ = """Break a dead lock.
+
+ This command breaks a lock on a repository, branch, working directory or
+ config file.
+
+ CAUTION: Locks should only be broken when you are sure that the process
+ holding the lock has been stopped.
+
+ You can get information on what locks are open via the 'bzr info
+ [location]' command.
+
+ :Examples:
+ bzr break-lock
+ bzr break-lock bzr+ssh://example.com/bzr/foo
+ bzr break-lock --conf ~/.bazaar
+ """
+
+ takes_args = ['location?']
+ takes_options = [
+ Option('config',
+ help='LOCATION is the directory where the config lock is.'),
+ Option('force',
+ help='Do not ask for confirmation before breaking the lock.'),
+ ]
+
+ def run(self, location=None, config=False, force=False):
+ if location is None:
+ location = u'.'
+ if force:
+ ui.ui_factory = ui.ConfirmationUserInterfacePolicy(ui.ui_factory,
+ None,
+ {'bzrlib.lockdir.break': True})
+ if config:
+ conf = _mod_config.LockableConfig(file_name=location)
+ conf.break_lock()
+ else:
+ control, relpath = controldir.ControlDir.open_containing(location)
+ try:
+ control.break_lock()
+ except NotImplementedError:
+ pass
+
+
+class cmd_wait_until_signalled(Command):
+ __doc__ = """Test helper for test_start_and_stop_bzr_subprocess_send_signal.
+
+ This just prints a line to signal when it is ready, then blocks on stdin.
+ """
+
+ hidden = True
+
+ def run(self):
+ sys.stdout.write("running\n")
+ sys.stdout.flush()
+ sys.stdin.readline()
+
+
+class cmd_serve(Command):
+ __doc__ = """Run the bzr server."""
+
+ aliases = ['server']
+
+ takes_options = [
+ Option('inet',
+ help='Serve on stdin/out for use from inetd or sshd.'),
+ RegistryOption('protocol',
+ help="Protocol to serve.",
+ lazy_registry=('bzrlib.transport', 'transport_server_registry'),
+ value_switches=True),
+ Option('listen',
+ help='Listen for connections on nominated address.', type=str),
+ Option('port',
+ help='Listen for connections on nominated port. Passing 0 as '
+ 'the port number will result in a dynamically allocated '
+ 'port. The default port depends on the protocol.',
+ type=int),
+ custom_help('directory',
+ help='Serve contents of this directory.'),
+ Option('allow-writes',
+ help='By default the server is a readonly server. Supplying '
+ '--allow-writes enables write access to the contents of '
+ 'the served directory and below. Note that ``bzr serve`` '
+ 'does not perform authentication, so unless some form of '
+ 'external authentication is arranged supplying this '
+ 'option leads to global uncontrolled write access to your '
+ 'file system.'
+ ),
+ Option('client-timeout', type=float,
+ help='Override the default idle client timeout (5min).'),
+ ]
+
+ def run(self, listen=None, port=None, inet=False, directory=None,
+ allow_writes=False, protocol=None, client_timeout=None):
+ from bzrlib import transport
+ if directory is None:
+ directory = os.getcwd()
+ if protocol is None:
+ protocol = transport.transport_server_registry.get()
+ url = transport.location_to_url(directory)
+ if not allow_writes:
+ url = 'readonly+' + url
+ t = transport.get_transport_from_url(url)
+ protocol(t, listen, port, inet, client_timeout)
+
+
+class cmd_join(Command):
+ __doc__ = """Combine a tree into its containing tree.
+
+ This command requires the target tree to be in a rich-root format.
+
+ The TREE argument should be an independent tree, inside another tree, but
+ not part of it. (Such trees can be produced by "bzr split", but also by
+ running "bzr branch" with the target inside a tree.)
+
+ The result is a combined tree, with the subtree no longer an independent
+ part. This is marked as a merge of the subtree into the containing tree,
+ and all history is preserved.
+ """
+
+ _see_also = ['split']
+ takes_args = ['tree']
+ takes_options = [
+ Option('reference', help='Join by reference.', hidden=True),
+ ]
+
+ def run(self, tree, reference=False):
+ sub_tree = WorkingTree.open(tree)
+ parent_dir = osutils.dirname(sub_tree.basedir)
+ containing_tree = WorkingTree.open_containing(parent_dir)[0]
+ repo = containing_tree.branch.repository
+ if not repo.supports_rich_root():
+ raise errors.BzrCommandError(gettext(
+ "Can't join trees because %s doesn't support rich root data.\n"
+ "You can use bzr upgrade on the repository.")
+ % (repo,))
+ if reference:
+ try:
+ containing_tree.add_reference(sub_tree)
+ except errors.BadReferenceTarget, e:
+ # XXX: Would be better to just raise a nicely printable
+ # exception from the real origin. Also below. mbp 20070306
+ raise errors.BzrCommandError(
+ gettext("Cannot join {0}. {1}").format(tree, e.reason))
+ else:
+ try:
+ containing_tree.subsume(sub_tree)
+ except errors.BadSubsumeSource, e:
+ raise errors.BzrCommandError(
+ gettext("Cannot join {0}. {1}").format(tree, e.reason))
+
+
+class cmd_split(Command):
+ __doc__ = """Split a subdirectory of a tree into a separate tree.
+
+ This command will produce a target tree in a format that supports
+ rich roots, like 'rich-root' or 'rich-root-pack'. These formats cannot be
+ converted into earlier formats like 'dirstate-tags'.
+
+ The TREE argument should be a subdirectory of a working tree. That
+ subdirectory will be converted into an independent tree, with its own
+ branch. Commits in the top-level tree will not apply to the new subtree.
+ """
+
+ _see_also = ['join']
+ takes_args = ['tree']
+
+ def run(self, tree):
+ containing_tree, subdir = WorkingTree.open_containing(tree)
+ sub_id = containing_tree.path2id(subdir)
+ if sub_id is None:
+ raise errors.NotVersionedError(subdir)
+ try:
+ containing_tree.extract(sub_id)
+ except errors.RootNotRich:
+ raise errors.RichRootUpgradeRequired(containing_tree.branch.base)
+
+
+class cmd_merge_directive(Command):
+ __doc__ = """Generate a merge directive for auto-merge tools.
+
+ A directive requests a merge to be performed, and also provides all the
+ information necessary to do so. This means it must either include a
+ revision bundle, or the location of a branch containing the desired
+ revision.
+
+ A submit branch (the location to merge into) must be supplied the first
+ time the command is issued. After it has been supplied once, it will
+ be remembered as the default.
+
+ A public branch is optional if a revision bundle is supplied, but required
+ if --diff or --plain is specified. It will be remembered as the default
+ after the first use.
+ """
+
+ takes_args = ['submit_branch?', 'public_branch?']
+
+ hidden = True
+
+ _see_also = ['send']
+
+ takes_options = [
+ 'directory',
+ RegistryOption.from_kwargs('patch-type',
+ 'The type of patch to include in the directive.',
+ title='Patch type',
+ value_switches=True,
+ enum_switch=False,
+ bundle='Bazaar revision bundle (default).',
+ diff='Normal unified diff.',
+ plain='No patch, just directive.'),
+ Option('sign', help='GPG-sign the directive.'), 'revision',
+ Option('mail-to', type=str,
+ help='Instead of printing the directive, email to this address.'),
+ Option('message', type=str, short_name='m',
+ help='Message to use when committing this merge.')
+ ]
+
+ encoding_type = 'exact'
+
+ def run(self, submit_branch=None, public_branch=None, patch_type='bundle',
+ sign=False, revision=None, mail_to=None, message=None,
+ directory=u'.'):
+ from bzrlib.revision import ensure_null, NULL_REVISION
+ include_patch, include_bundle = {
+ 'plain': (False, False),
+ 'diff': (True, False),
+ 'bundle': (True, True),
+ }[patch_type]
+ branch = Branch.open(directory)
+ stored_submit_branch = branch.get_submit_branch()
+ if submit_branch is None:
+ submit_branch = stored_submit_branch
+ else:
+ if stored_submit_branch is None:
+ branch.set_submit_branch(submit_branch)
+ if submit_branch is None:
+ submit_branch = branch.get_parent()
+ if submit_branch is None:
+ raise errors.BzrCommandError(gettext('No submit branch specified or known'))
+
+ stored_public_branch = branch.get_public_branch()
+ if public_branch is None:
+ public_branch = stored_public_branch
+ elif stored_public_branch is None:
+ # FIXME: Should be done only if we succeed ? -- vila 2012-01-03
+ branch.set_public_branch(public_branch)
+ if not include_bundle and public_branch is None:
+ raise errors.BzrCommandError(gettext('No public branch specified or'
+ ' known'))
+ base_revision_id = None
+ if revision is not None:
+ if len(revision) > 2:
+ raise errors.BzrCommandError(gettext('bzr merge-directive takes '
+ 'at most two one revision identifiers'))
+ revision_id = revision[-1].as_revision_id(branch)
+ if len(revision) == 2:
+ base_revision_id = revision[0].as_revision_id(branch)
+ else:
+ revision_id = branch.last_revision()
+ revision_id = ensure_null(revision_id)
+ if revision_id == NULL_REVISION:
+ raise errors.BzrCommandError(gettext('No revisions to bundle.'))
+ directive = merge_directive.MergeDirective2.from_objects(
+ branch.repository, revision_id, time.time(),
+ osutils.local_time_offset(), submit_branch,
+ public_branch=public_branch, include_patch=include_patch,
+ include_bundle=include_bundle, message=message,
+ base_revision_id=base_revision_id)
+ if mail_to is None:
+ if sign:
+ self.outf.write(directive.to_signed(branch))
+ else:
+ self.outf.writelines(directive.to_lines())
+ else:
+ message = directive.to_email(mail_to, branch, sign)
+ s = SMTPConnection(branch.get_config_stack())
+ s.send_email(message)
+
+
+class cmd_send(Command):
+ __doc__ = """Mail or create a merge-directive for submitting changes.
+
+ A merge directive provides many things needed for requesting merges:
+
+ * A machine-readable description of the merge to perform
+
+ * An optional patch that is a preview of the changes requested
+
+ * An optional bundle of revision data, so that the changes can be applied
+ directly from the merge directive, without retrieving data from a
+ branch.
+
+ `bzr send` creates a compact data set that, when applied using bzr
+ merge, has the same effect as merging from the source branch.
+
+ By default the merge directive is self-contained and can be applied to any
+ branch containing submit_branch in its ancestory without needing access to
+ the source branch.
+
+ If --no-bundle is specified, then Bazaar doesn't send the contents of the
+ revisions, but only a structured request to merge from the
+ public_location. In that case the public_branch is needed and it must be
+ up-to-date and accessible to the recipient. The public_branch is always
+ included if known, so that people can check it later.
+
+ The submit branch defaults to the parent of the source branch, but can be
+ overridden. Both submit branch and public branch will be remembered in
+ branch.conf the first time they are used for a particular branch. The
+ source branch defaults to that containing the working directory, but can
+ be changed using --from.
+
+ Both the submit branch and the public branch follow the usual behavior with
+ respect to --remember: If there is no default location set, the first send
+ will set it (use --no-remember to avoid setting it). After that, you can
+ omit the location to use the default. To change the default, use
+ --remember. The value will only be saved if the location can be accessed.
+
+ In order to calculate those changes, bzr must analyse the submit branch.
+ Therefore it is most efficient for the submit branch to be a local mirror.
+ If a public location is known for the submit_branch, that location is used
+ in the merge directive.
+
+ The default behaviour is to send the merge directive by mail, unless -o is
+ given, in which case it is sent to a file.
+
+ Mail is sent using your preferred mail program. This should be transparent
+ on Windows (it uses MAPI). On Unix, it requires the xdg-email utility.
+ If the preferred client can't be found (or used), your editor will be used.
+
+ To use a specific mail program, set the mail_client configuration option.
+ (For Thunderbird 1.5, this works around some bugs.) Supported values for
+ specific clients are "claws", "evolution", "kmail", "mail.app" (MacOS X's
+ Mail.app), "mutt", and "thunderbird"; generic options are "default",
+ "editor", "emacsclient", "mapi", and "xdg-email". Plugins may also add
+ supported clients.
+
+ If mail is being sent, a to address is required. This can be supplied
+ either on the commandline, by setting the submit_to configuration
+ option in the branch itself or the child_submit_to configuration option
+ in the submit branch.
+
+ Two formats are currently supported: "4" uses revision bundle format 4 and
+ merge directive format 2. It is significantly faster and smaller than
+ older formats. It is compatible with Bazaar 0.19 and later. It is the
+ default. "0.9" uses revision bundle format 0.9 and merge directive
+ format 1. It is compatible with Bazaar 0.12 - 0.18.
+
+ The merge directives created by bzr send may be applied using bzr merge or
+ bzr pull by specifying a file containing a merge directive as the location.
+
+ bzr send makes extensive use of public locations to map local locations into
+ URLs that can be used by other people. See `bzr help configuration` to
+ set them, and use `bzr info` to display them.
+ """
+
+ encoding_type = 'exact'
+
+ _see_also = ['merge', 'pull']
+
+ takes_args = ['submit_branch?', 'public_branch?']
+
+ takes_options = [
+ Option('no-bundle',
+ help='Do not include a bundle in the merge directive.'),
+ Option('no-patch', help='Do not include a preview patch in the merge'
+ ' directive.'),
+ Option('remember',
+ help='Remember submit and public branch.'),
+ Option('from',
+ help='Branch to generate the submission from, '
+ 'rather than the one containing the working directory.',
+ short_name='f',
+ type=unicode),
+ Option('output', short_name='o',
+ help='Write merge directive to this file or directory; '
+ 'use - for stdout.',
+ type=unicode),
+ Option('strict',
+ help='Refuse to send if there are uncommitted changes in'
+ ' the working tree, --no-strict disables the check.'),
+ Option('mail-to', help='Mail the request to this address.',
+ type=unicode),
+ 'revision',
+ 'message',
+ Option('body', help='Body for the email.', type=unicode),
+ RegistryOption('format',
+ help='Use the specified output format.',
+ lazy_registry=('bzrlib.send', 'format_registry')),
+ ]
+
+ def run(self, submit_branch=None, public_branch=None, no_bundle=False,
+ no_patch=False, revision=None, remember=None, output=None,
+ format=None, mail_to=None, message=None, body=None,
+ strict=None, **kwargs):
+ from bzrlib.send import send
+ return send(submit_branch, revision, public_branch, remember,
+ format, no_bundle, no_patch, output,
+ kwargs.get('from', '.'), mail_to, message, body,
+ self.outf,
+ strict=strict)
+
+
+class cmd_bundle_revisions(cmd_send):
+ __doc__ = """Create a merge-directive for submitting changes.
+
+ A merge directive provides many things needed for requesting merges:
+
+ * A machine-readable description of the merge to perform
+
+ * An optional patch that is a preview of the changes requested
+
+ * An optional bundle of revision data, so that the changes can be applied
+ directly from the merge directive, without retrieving data from a
+ branch.
+
+ If --no-bundle is specified, then public_branch is needed (and must be
+ up-to-date), so that the receiver can perform the merge using the
+ public_branch. The public_branch is always included if known, so that
+ people can check it later.
+
+ The submit branch defaults to the parent, but can be overridden. Both
+ submit branch and public branch will be remembered if supplied.
+
+ If a public_branch is known for the submit_branch, that public submit
+ branch is used in the merge instructions. This means that a local mirror
+ can be used as your actual submit branch, once you have set public_branch
+ for that mirror.
+
+ Two formats are currently supported: "4" uses revision bundle format 4 and
+ merge directive format 2. It is significantly faster and smaller than
+ older formats. It is compatible with Bazaar 0.19 and later. It is the
+ default. "0.9" uses revision bundle format 0.9 and merge directive
+ format 1. It is compatible with Bazaar 0.12 - 0.18.
+ """
+
+ takes_options = [
+ Option('no-bundle',
+ help='Do not include a bundle in the merge directive.'),
+ Option('no-patch', help='Do not include a preview patch in the merge'
+ ' directive.'),
+ Option('remember',
+ help='Remember submit and public branch.'),
+ Option('from',
+ help='Branch to generate the submission from, '
+ 'rather than the one containing the working directory.',
+ short_name='f',
+ type=unicode),
+ Option('output', short_name='o', help='Write directive to this file.',
+ type=unicode),
+ Option('strict',
+ help='Refuse to bundle revisions if there are uncommitted'
+ ' changes in the working tree, --no-strict disables the check.'),
+ 'revision',
+ RegistryOption('format',
+ help='Use the specified output format.',
+ lazy_registry=('bzrlib.send', 'format_registry')),
+ ]
+ aliases = ['bundle']
+
+ _see_also = ['send', 'merge']
+
+ hidden = True
+
+ def run(self, submit_branch=None, public_branch=None, no_bundle=False,
+ no_patch=False, revision=None, remember=False, output=None,
+ format=None, strict=None, **kwargs):
+ if output is None:
+ output = '-'
+ from bzrlib.send import send
+ return send(submit_branch, revision, public_branch, remember,
+ format, no_bundle, no_patch, output,
+ kwargs.get('from', '.'), None, None, None,
+ self.outf, strict=strict)
+
+
+class cmd_tag(Command):
+ __doc__ = """Create, remove or modify a tag naming a revision.
+
+ Tags give human-meaningful names to revisions. Commands that take a -r
+ (--revision) option can be given -rtag:X, where X is any previously
+ created tag.
+
+ Tags are stored in the branch. Tags are copied from one branch to another
+ along when you branch, push, pull or merge.
+
+ It is an error to give a tag name that already exists unless you pass
+ --force, in which case the tag is moved to point to the new revision.
+
+ To rename a tag (change the name but keep it on the same revsion), run ``bzr
+ tag new-name -r tag:old-name`` and then ``bzr tag --delete oldname``.
+
+ If no tag name is specified it will be determined through the
+ 'automatic_tag_name' hook. This can e.g. be used to automatically tag
+ upstream releases by reading configure.ac. See ``bzr help hooks`` for
+ details.
+ """
+
+ _see_also = ['commit', 'tags']
+ takes_args = ['tag_name?']
+ takes_options = [
+ Option('delete',
+ help='Delete this tag rather than placing it.',
+ ),
+ custom_help('directory',
+ help='Branch in which to place the tag.'),
+ Option('force',
+ help='Replace existing tags.',
+ ),
+ 'revision',
+ ]
+
+ def run(self, tag_name=None,
+ delete=None,
+ directory='.',
+ force=None,
+ revision=None,
+ ):
+ branch, relpath = Branch.open_containing(directory)
+ self.add_cleanup(branch.lock_write().unlock)
+ if delete:
+ if tag_name is None:
+ raise errors.BzrCommandError(gettext("No tag specified to delete."))
+ branch.tags.delete_tag(tag_name)
+ note(gettext('Deleted tag %s.') % tag_name)
+ else:
+ if revision:
+ if len(revision) != 1:
+ raise errors.BzrCommandError(gettext(
+ "Tags can only be placed on a single revision, "
+ "not on a range"))
+ revision_id = revision[0].as_revision_id(branch)
+ else:
+ revision_id = branch.last_revision()
+ if tag_name is None:
+ tag_name = branch.automatic_tag_name(revision_id)
+ if tag_name is None:
+ raise errors.BzrCommandError(gettext(
+ "Please specify a tag name."))
+ try:
+ existing_target = branch.tags.lookup_tag(tag_name)
+ except errors.NoSuchTag:
+ existing_target = None
+ if not force and existing_target not in (None, revision_id):
+ raise errors.TagAlreadyExists(tag_name)
+ if existing_target == revision_id:
+ note(gettext('Tag %s already exists for that revision.') % tag_name)
+ else:
+ branch.tags.set_tag(tag_name, revision_id)
+ if existing_target is None:
+ note(gettext('Created tag %s.') % tag_name)
+ else:
+ note(gettext('Updated tag %s.') % tag_name)
+
+
+class cmd_tags(Command):
+ __doc__ = """List tags.
+
+ This command shows a table of tag names and the revisions they reference.
+ """
+
+ _see_also = ['tag']
+ takes_options = [
+ custom_help('directory',
+ help='Branch whose tags should be displayed.'),
+ RegistryOption('sort',
+ 'Sort tags by different criteria.', title='Sorting',
+ lazy_registry=('bzrlib.tag', 'tag_sort_methods')
+ ),
+ 'show-ids',
+ 'revision',
+ ]
+
+ @display_command
+ def run(self, directory='.', sort=None, show_ids=False, revision=None):
+ from bzrlib.tag import tag_sort_methods
+ branch, relpath = Branch.open_containing(directory)
+
+ tags = branch.tags.get_tag_dict().items()
+ if not tags:
+ return
+
+ self.add_cleanup(branch.lock_read().unlock)
+ if revision:
+ # Restrict to the specified range
+ tags = self._tags_for_range(branch, revision)
+ if sort is None:
+ sort = tag_sort_methods.get()
+ sort(branch, tags)
+ if not show_ids:
+ # [ (tag, revid), ... ] -> [ (tag, dotted_revno), ... ]
+ for index, (tag, revid) in enumerate(tags):
+ try:
+ revno = branch.revision_id_to_dotted_revno(revid)
+ if isinstance(revno, tuple):
+ revno = '.'.join(map(str, revno))
+ except (errors.NoSuchRevision,
+ errors.GhostRevisionsHaveNoRevno,
+ errors.UnsupportedOperation):
+ # Bad tag data/merges can lead to tagged revisions
+ # which are not in this branch. Fail gracefully ...
+ revno = '?'
+ tags[index] = (tag, revno)
+ self.cleanup_now()
+ for tag, revspec in tags:
+ self.outf.write('%-20s %s\n' % (tag, revspec))
+
+ def _tags_for_range(self, branch, revision):
+ range_valid = True
+ rev1, rev2 = _get_revision_range(revision, branch, self.name())
+ revid1, revid2 = rev1.rev_id, rev2.rev_id
+ # _get_revision_range will always set revid2 if it's not specified.
+ # If revid1 is None, it means we want to start from the branch
+ # origin which is always a valid ancestor. If revid1 == revid2, the
+ # ancestry check is useless.
+ if revid1 and revid1 != revid2:
+ # FIXME: We really want to use the same graph than
+ # branch.iter_merge_sorted_revisions below, but this is not
+ # easily available -- vila 2011-09-23
+ if branch.repository.get_graph().is_ancestor(revid2, revid1):
+ # We don't want to output anything in this case...
+ return []
+ # only show revisions between revid1 and revid2 (inclusive)
+ tagged_revids = branch.tags.get_reverse_tag_dict()
+ found = []
+ for r in branch.iter_merge_sorted_revisions(
+ start_revision_id=revid2, stop_revision_id=revid1,
+ stop_rule='include'):
+ revid_tags = tagged_revids.get(r[0], None)
+ if revid_tags:
+ found.extend([(tag, r[0]) for tag in revid_tags])
+ return found
+
+
+class cmd_reconfigure(Command):
+ __doc__ = """Reconfigure the type of a bzr directory.
+
+ A target configuration must be specified.
+
+ For checkouts, the bind-to location will be auto-detected if not specified.
+ The order of preference is
+ 1. For a lightweight checkout, the current bound location.
+ 2. For branches that used to be checkouts, the previously-bound location.
+ 3. The push location.
+ 4. The parent location.
+ If none of these is available, --bind-to must be specified.
+ """
+
+ _see_also = ['branches', 'checkouts', 'standalone-trees', 'working-trees']
+ takes_args = ['location?']
+ takes_options = [
+ RegistryOption.from_kwargs(
+ 'tree_type',
+ title='Tree type',
+ help='The relation between branch and tree.',
+ value_switches=True, enum_switch=False,
+ branch='Reconfigure to be an unbound branch with no working tree.',
+ tree='Reconfigure to be an unbound branch with a working tree.',
+ checkout='Reconfigure to be a bound branch with a working tree.',
+ lightweight_checkout='Reconfigure to be a lightweight'
+ ' checkout (with no local history).',
+ ),
+ RegistryOption.from_kwargs(
+ 'repository_type',
+ title='Repository type',
+ help='Location fo the repository.',
+ value_switches=True, enum_switch=False,
+ standalone='Reconfigure to be a standalone branch '
+ '(i.e. stop using shared repository).',
+ use_shared='Reconfigure to use a shared repository.',
+ ),
+ RegistryOption.from_kwargs(
+ 'repository_trees',
+ title='Trees in Repository',
+ help='Whether new branches in the repository have trees.',
+ value_switches=True, enum_switch=False,
+ with_trees='Reconfigure repository to create '
+ 'working trees on branches by default.',
+ with_no_trees='Reconfigure repository to not create '
+ 'working trees on branches by default.'
+ ),
+ Option('bind-to', help='Branch to bind checkout to.', type=str),
+ Option('force',
+ help='Perform reconfiguration even if local changes'
+ ' will be lost.'),
+ Option('stacked-on',
+ help='Reconfigure a branch to be stacked on another branch.',
+ type=unicode,
+ ),
+ Option('unstacked',
+ help='Reconfigure a branch to be unstacked. This '
+ 'may require copying substantial data into it.',
+ ),
+ ]
+
+ def run(self, location=None, bind_to=None, force=False,
+ tree_type=None, repository_type=None, repository_trees=None,
+ stacked_on=None, unstacked=None):
+ directory = controldir.ControlDir.open(location)
+ if stacked_on and unstacked:
+ raise errors.BzrCommandError(gettext("Can't use both --stacked-on and --unstacked"))
+ elif stacked_on is not None:
+ reconfigure.ReconfigureStackedOn().apply(directory, stacked_on)
+ elif unstacked:
+ reconfigure.ReconfigureUnstacked().apply(directory)
+ # At the moment you can use --stacked-on and a different
+ # reconfiguration shape at the same time; there seems no good reason
+ # to ban it.
+ if (tree_type is None and
+ repository_type is None and
+ repository_trees is None):
+ if stacked_on or unstacked:
+ return
+ else:
+ raise errors.BzrCommandError(gettext('No target configuration '
+ 'specified'))
+ reconfiguration = None
+ if tree_type == 'branch':
+ reconfiguration = reconfigure.Reconfigure.to_branch(directory)
+ elif tree_type == 'tree':
+ reconfiguration = reconfigure.Reconfigure.to_tree(directory)
+ elif tree_type == 'checkout':
+ reconfiguration = reconfigure.Reconfigure.to_checkout(
+ directory, bind_to)
+ elif tree_type == 'lightweight-checkout':
+ reconfiguration = reconfigure.Reconfigure.to_lightweight_checkout(
+ directory, bind_to)
+ if reconfiguration:
+ reconfiguration.apply(force)
+ reconfiguration = None
+ if repository_type == 'use-shared':
+ reconfiguration = reconfigure.Reconfigure.to_use_shared(directory)
+ elif repository_type == 'standalone':
+ reconfiguration = reconfigure.Reconfigure.to_standalone(directory)
+ if reconfiguration:
+ reconfiguration.apply(force)
+ reconfiguration = None
+ if repository_trees == 'with-trees':
+ reconfiguration = reconfigure.Reconfigure.set_repository_trees(
+ directory, True)
+ elif repository_trees == 'with-no-trees':
+ reconfiguration = reconfigure.Reconfigure.set_repository_trees(
+ directory, False)
+ if reconfiguration:
+ reconfiguration.apply(force)
+ reconfiguration = None
+
+
+class cmd_switch(Command):
+ __doc__ = """Set the branch of a checkout and update.
+
+ For lightweight checkouts, this changes the branch being referenced.
+ For heavyweight checkouts, this checks that there are no local commits
+ versus the current bound branch, then it makes the local branch a mirror
+ of the new location and binds to it.
+
+ In both cases, the working tree is updated and uncommitted changes
+ are merged. The user can commit or revert these as they desire.
+
+ Pending merges need to be committed or reverted before using switch.
+
+ The path to the branch to switch to can be specified relative to the parent
+ directory of the current branch. For example, if you are currently in a
+ checkout of /path/to/branch, specifying 'newbranch' will find a branch at
+ /path/to/newbranch.
+
+ Bound branches use the nickname of its master branch unless it is set
+ locally, in which case switching will update the local nickname to be
+ that of the master.
+ """
+
+ takes_args = ['to_location?']
+ takes_options = ['directory',
+ Option('force',
+ help='Switch even if local commits will be lost.'),
+ 'revision',
+ Option('create-branch', short_name='b',
+ help='Create the target branch from this one before'
+ ' switching to it.'),
+ ]
+
+ def run(self, to_location=None, force=False, create_branch=False,
+ revision=None, directory=u'.'):
+ from bzrlib import switch
+ tree_location = directory
+ revision = _get_one_revision('switch', revision)
+ possible_transports = []
+ control_dir = controldir.ControlDir.open_containing(tree_location,
+ possible_transports=possible_transports)[0]
+ if to_location is None:
+ if revision is None:
+ raise errors.BzrCommandError(gettext('You must supply either a'
+ ' revision or a location'))
+ to_location = tree_location
+ try:
+ branch = control_dir.open_branch(
+ possible_transports=possible_transports)
+ had_explicit_nick = branch.get_config().has_explicit_nickname()
+ except errors.NotBranchError:
+ branch = None
+ had_explicit_nick = False
+ if create_branch:
+ if branch is None:
+ raise errors.BzrCommandError(
+ gettext('cannot create branch without source branch'))
+ to_location = lookup_new_sibling_branch(control_dir, to_location,
+ possible_transports=possible_transports)
+ to_branch = branch.bzrdir.sprout(to_location,
+ possible_transports=possible_transports,
+ source_branch=branch).open_branch()
+ else:
+ try:
+ to_branch = Branch.open(to_location,
+ possible_transports=possible_transports)
+ except errors.NotBranchError:
+ to_branch = open_sibling_branch(control_dir, to_location,
+ possible_transports=possible_transports)
+ if revision is not None:
+ revision = revision.as_revision_id(to_branch)
+ switch.switch(control_dir, to_branch, force, revision_id=revision)
+ if had_explicit_nick:
+ branch = control_dir.open_branch() #get the new branch!
+ branch.nick = to_branch.nick
+ note(gettext('Switched to branch: %s'),
+ urlutils.unescape_for_display(to_branch.base, 'utf-8'))
+
+
+
+class cmd_view(Command):
+ __doc__ = """Manage filtered views.
+
+ Views provide a mask over the tree so that users can focus on
+ a subset of a tree when doing their work. After creating a view,
+ commands that support a list of files - status, diff, commit, etc -
+ effectively have that list of files implicitly given each time.
+ An explicit list of files can still be given but those files
+ must be within the current view.
+
+ In most cases, a view has a short life-span: it is created to make
+ a selected change and is deleted once that change is committed.
+ At other times, you may wish to create one or more named views
+ and switch between them.
+
+ To disable the current view without deleting it, you can switch to
+ the pseudo view called ``off``. This can be useful when you need
+ to see the whole tree for an operation or two (e.g. merge) but
+ want to switch back to your view after that.
+
+ :Examples:
+ To define the current view::
+
+ bzr view file1 dir1 ...
+
+ To list the current view::
+
+ bzr view
+
+ To delete the current view::
+
+ bzr view --delete
+
+ To disable the current view without deleting it::
+
+ bzr view --switch off
+
+ To define a named view and switch to it::
+
+ bzr view --name view-name file1 dir1 ...
+
+ To list a named view::
+
+ bzr view --name view-name
+
+ To delete a named view::
+
+ bzr view --name view-name --delete
+
+ To switch to a named view::
+
+ bzr view --switch view-name
+
+ To list all views defined::
+
+ bzr view --all
+
+ To delete all views::
+
+ bzr view --delete --all
+ """
+
+ _see_also = []
+ takes_args = ['file*']
+ takes_options = [
+ Option('all',
+ help='Apply list or delete action to all views.',
+ ),
+ Option('delete',
+ help='Delete the view.',
+ ),
+ Option('name',
+ help='Name of the view to define, list or delete.',
+ type=unicode,
+ ),
+ Option('switch',
+ help='Name of the view to switch to.',
+ type=unicode,
+ ),
+ ]
+
+ def run(self, file_list,
+ all=False,
+ delete=False,
+ name=None,
+ switch=None,
+ ):
+ tree, file_list = WorkingTree.open_containing_paths(file_list,
+ apply_view=False)
+ current_view, view_dict = tree.views.get_view_info()
+ if name is None:
+ name = current_view
+ if delete:
+ if file_list:
+ raise errors.BzrCommandError(gettext(
+ "Both --delete and a file list specified"))
+ elif switch:
+ raise errors.BzrCommandError(gettext(
+ "Both --delete and --switch specified"))
+ elif all:
+ tree.views.set_view_info(None, {})
+ self.outf.write(gettext("Deleted all views.\n"))
+ elif name is None:
+ raise errors.BzrCommandError(gettext("No current view to delete"))
+ else:
+ tree.views.delete_view(name)
+ self.outf.write(gettext("Deleted '%s' view.\n") % name)
+ elif switch:
+ if file_list:
+ raise errors.BzrCommandError(gettext(
+ "Both --switch and a file list specified"))
+ elif all:
+ raise errors.BzrCommandError(gettext(
+ "Both --switch and --all specified"))
+ elif switch == 'off':
+ if current_view is None:
+ raise errors.BzrCommandError(gettext("No current view to disable"))
+ tree.views.set_view_info(None, view_dict)
+ self.outf.write(gettext("Disabled '%s' view.\n") % (current_view))
+ else:
+ tree.views.set_view_info(switch, view_dict)
+ view_str = views.view_display_str(tree.views.lookup_view())
+ self.outf.write(gettext("Using '{0}' view: {1}\n").format(switch, view_str))
+ elif all:
+ if view_dict:
+ self.outf.write(gettext('Views defined:\n'))
+ for view in sorted(view_dict):
+ if view == current_view:
+ active = "=>"
+ else:
+ active = " "
+ view_str = views.view_display_str(view_dict[view])
+ self.outf.write('%s %-20s %s\n' % (active, view, view_str))
+ else:
+ self.outf.write(gettext('No views defined.\n'))
+ elif file_list:
+ if name is None:
+ # No name given and no current view set
+ name = 'my'
+ elif name == 'off':
+ raise errors.BzrCommandError(gettext(
+ "Cannot change the 'off' pseudo view"))
+ tree.views.set_view(name, sorted(file_list))
+ view_str = views.view_display_str(tree.views.lookup_view())
+ self.outf.write(gettext("Using '{0}' view: {1}\n").format(name, view_str))
+ else:
+ # list the files
+ if name is None:
+ # No name given and no current view set
+ self.outf.write(gettext('No current view.\n'))
+ else:
+ view_str = views.view_display_str(tree.views.lookup_view(name))
+ self.outf.write(gettext("'{0}' view is: {1}\n").format(name, view_str))
+
+
+class cmd_hooks(Command):
+ __doc__ = """Show hooks."""
+
+ hidden = True
+
+ def run(self):
+ for hook_key in sorted(hooks.known_hooks.keys()):
+ some_hooks = hooks.known_hooks_key_to_object(hook_key)
+ self.outf.write("%s:\n" % type(some_hooks).__name__)
+ for hook_name, hook_point in sorted(some_hooks.items()):
+ self.outf.write(" %s:\n" % (hook_name,))
+ found_hooks = list(hook_point)
+ if found_hooks:
+ for hook in found_hooks:
+ self.outf.write(" %s\n" %
+ (some_hooks.get_hook_name(hook),))
+ else:
+ self.outf.write(gettext(" <no hooks installed>\n"))
+
+
+class cmd_remove_branch(Command):
+ __doc__ = """Remove a branch.
+
+ This will remove the branch from the specified location but
+ will keep any working tree or repository in place.
+
+ :Examples:
+
+ Remove the branch at repo/trunk::
+
+ bzr remove-branch repo/trunk
+
+ """
+
+ takes_args = ["location?"]
+
+ takes_options = ['directory',
+ Option('force', help='Remove branch even if it is the active branch.')]
+
+ aliases = ["rmbranch"]
+
+ def run(self, directory=None, location=None, force=False):
+ br = open_nearby_branch(near=directory, location=location)
+ if not force and br.bzrdir.has_workingtree():
+ try:
+ active_branch = br.bzrdir.open_branch(name="")
+ except errors.NotBranchError:
+ active_branch = None
+ if (active_branch is not None and
+ br.control_url == active_branch.control_url):
+ raise errors.BzrCommandError(
+ gettext("Branch is active. Use --force to remove it."))
+ br.bzrdir.destroy_branch(br.name)
+
+
+class cmd_shelve(Command):
+ __doc__ = """Temporarily set aside some changes from the current tree.
+
+ Shelve allows you to temporarily put changes you've made "on the shelf",
+ ie. out of the way, until a later time when you can bring them back from
+ the shelf with the 'unshelve' command. The changes are stored alongside
+ your working tree, and so they aren't propagated along with your branch nor
+ will they survive its deletion.
+
+ If shelve --list is specified, previously-shelved changes are listed.
+
+ Shelve is intended to help separate several sets of changes that have
+ been inappropriately mingled. If you just want to get rid of all changes
+ and you don't need to restore them later, use revert. If you want to
+ shelve all text changes at once, use shelve --all.
+
+ If filenames are specified, only the changes to those files will be
+ shelved. Other files will be left untouched.
+
+ If a revision is specified, changes since that revision will be shelved.
+
+ You can put multiple items on the shelf, and by default, 'unshelve' will
+ restore the most recently shelved changes.
+
+ For complicated changes, it is possible to edit the changes in a separate
+ editor program to decide what the file remaining in the working copy
+ should look like. To do this, add the configuration option
+
+ change_editor = PROGRAM @new_path @old_path
+
+ where @new_path is replaced with the path of the new version of the
+ file and @old_path is replaced with the path of the old version of
+ the file. The PROGRAM should save the new file with the desired
+ contents of the file in the working tree.
+
+ """
+
+ takes_args = ['file*']
+
+ takes_options = [
+ 'directory',
+ 'revision',
+ Option('all', help='Shelve all changes.'),
+ 'message',
+ RegistryOption('writer', 'Method to use for writing diffs.',
+ bzrlib.option.diff_writer_registry,
+ value_switches=True, enum_switch=False),
+
+ Option('list', help='List shelved changes.'),
+ Option('destroy',
+ help='Destroy removed changes instead of shelving them.'),
+ ]
+ _see_also = ['unshelve', 'configuration']
+
+ def run(self, revision=None, all=False, file_list=None, message=None,
+ writer=None, list=False, destroy=False, directory=None):
+ if list:
+ return self.run_for_list(directory=directory)
+ from bzrlib.shelf_ui import Shelver
+ if writer is None:
+ writer = bzrlib.option.diff_writer_registry.get()
+ try:
+ shelver = Shelver.from_args(writer(sys.stdout), revision, all,
+ file_list, message, destroy=destroy, directory=directory)
+ try:
+ shelver.run()
+ finally:
+ shelver.finalize()
+ except errors.UserAbort:
+ return 0
+
+ def run_for_list(self, directory=None):
+ if directory is None:
+ directory = u'.'
+ tree = WorkingTree.open_containing(directory)[0]
+ self.add_cleanup(tree.lock_read().unlock)
+ manager = tree.get_shelf_manager()
+ shelves = manager.active_shelves()
+ if len(shelves) == 0:
+ note(gettext('No shelved changes.'))
+ return 0
+ for shelf_id in reversed(shelves):
+ message = manager.get_metadata(shelf_id).get('message')
+ if message is None:
+ message = '<no message>'
+ self.outf.write('%3d: %s\n' % (shelf_id, message))
+ return 1
+
+
+class cmd_unshelve(Command):
+ __doc__ = """Restore shelved changes.
+
+ By default, the most recently shelved changes are restored. However if you
+ specify a shelf by id those changes will be restored instead. This works
+ best when the changes don't depend on each other.
+ """
+
+ takes_args = ['shelf_id?']
+ takes_options = [
+ 'directory',
+ RegistryOption.from_kwargs(
+ 'action', help="The action to perform.",
+ enum_switch=False, value_switches=True,
+ apply="Apply changes and remove from the shelf.",
+ dry_run="Show changes, but do not apply or remove them.",
+ preview="Instead of unshelving the changes, show the diff that "
+ "would result from unshelving.",
+ delete_only="Delete changes without applying them.",
+ keep="Apply changes but don't delete them.",
+ )
+ ]
+ _see_also = ['shelve']
+
+ def run(self, shelf_id=None, action='apply', directory=u'.'):
+ from bzrlib.shelf_ui import Unshelver
+ unshelver = Unshelver.from_args(shelf_id, action, directory=directory)
+ try:
+ unshelver.run()
+ finally:
+ unshelver.tree.unlock()
+
+
+class cmd_clean_tree(Command):
+ __doc__ = """Remove unwanted files from working tree.
+
+ By default, only unknown files, not ignored files, are deleted. Versioned
+ files are never deleted.
+
+ Another class is 'detritus', which includes files emitted by bzr during
+ normal operations and selftests. (The value of these files decreases with
+ time.)
+
+ If no options are specified, unknown files are deleted. Otherwise, option
+ flags are respected, and may be combined.
+
+ To check what clean-tree will do, use --dry-run.
+ """
+ takes_options = ['directory',
+ Option('ignored', help='Delete all ignored files.'),
+ Option('detritus', help='Delete conflict files, merge and revert'
+ ' backups, and failed selftest dirs.'),
+ Option('unknown',
+ help='Delete files unknown to bzr (default).'),
+ Option('dry-run', help='Show files to delete instead of'
+ ' deleting them.'),
+ Option('force', help='Do not prompt before deleting.')]
+ def run(self, unknown=False, ignored=False, detritus=False, dry_run=False,
+ force=False, directory=u'.'):
+ from bzrlib.clean_tree import clean_tree
+ if not (unknown or ignored or detritus):
+ unknown = True
+ if dry_run:
+ force = True
+ clean_tree(directory, unknown=unknown, ignored=ignored,
+ detritus=detritus, dry_run=dry_run, no_prompt=force)
+
+
+class cmd_reference(Command):
+ __doc__ = """list, view and set branch locations for nested trees.
+
+ If no arguments are provided, lists the branch locations for nested trees.
+ If one argument is provided, display the branch location for that tree.
+ If two arguments are provided, set the branch location for that tree.
+ """
+
+ hidden = True
+
+ takes_args = ['path?', 'location?']
+
+ def run(self, path=None, location=None):
+ branchdir = '.'
+ if path is not None:
+ branchdir = path
+ tree, branch, relpath =(
+ controldir.ControlDir.open_containing_tree_or_branch(branchdir))
+ if path is not None:
+ path = relpath
+ if tree is None:
+ tree = branch.basis_tree()
+ if path is None:
+ info = branch._get_all_reference_info().iteritems()
+ self._display_reference_info(tree, branch, info)
+ else:
+ file_id = tree.path2id(path)
+ if file_id is None:
+ raise errors.NotVersionedError(path)
+ if location is None:
+ info = [(file_id, branch.get_reference_info(file_id))]
+ self._display_reference_info(tree, branch, info)
+ else:
+ branch.set_reference_info(file_id, path, location)
+
+ def _display_reference_info(self, tree, branch, info):
+ ref_list = []
+ for file_id, (path, location) in info:
+ try:
+ path = tree.id2path(file_id)
+ except errors.NoSuchId:
+ pass
+ ref_list.append((path, location))
+ for path, location in sorted(ref_list):
+ self.outf.write('%s %s\n' % (path, location))
+
+
+class cmd_export_pot(Command):
+ __doc__ = """Export command helps and error messages in po format."""
+
+ hidden = True
+ takes_options = [Option('plugin',
+ help='Export help text from named command '\
+ '(defaults to all built in commands).',
+ type=str),
+ Option('include-duplicates',
+ help='Output multiple copies of the same msgid '
+ 'string if it appears more than once.'),
+ ]
+
+ def run(self, plugin=None, include_duplicates=False):
+ from bzrlib.export_pot import export_pot
+ export_pot(self.outf, plugin, include_duplicates)
+
+
+def _register_lazy_builtins():
+ # register lazy builtins from other modules; called at startup and should
+ # be only called once.
+ for (name, aliases, module_name) in [
+ ('cmd_bundle_info', [], 'bzrlib.bundle.commands'),
+ ('cmd_config', [], 'bzrlib.config'),
+ ('cmd_dpush', [], 'bzrlib.foreign'),
+ ('cmd_version_info', [], 'bzrlib.cmd_version_info'),
+ ('cmd_resolve', ['resolved'], 'bzrlib.conflicts'),
+ ('cmd_conflicts', [], 'bzrlib.conflicts'),
+ ('cmd_sign_my_commits', [], 'bzrlib.commit_signature_commands'),
+ ('cmd_verify_signatures', [], 'bzrlib.commit_signature_commands'),
+ ('cmd_test_script', [], 'bzrlib.cmd_test_script'),
+ ]:
+ builtin_command_registry.register_lazy(name, aliases, module_name)
diff --git a/bzrlib/bundle/__init__.py b/bzrlib/bundle/__init__.py
new file mode 100644
index 0000000..7f8a143
--- /dev/null
+++ b/bzrlib/bundle/__init__.py
@@ -0,0 +1,87 @@
+# Copyright (C) 2005-2010 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+from __future__ import absolute_import
+
+from cStringIO import StringIO
+
+from bzrlib.lazy_import import lazy_import
+lazy_import(globals(), """
+from bzrlib import (
+ errors,
+ transport as _mod_transport,
+ urlutils,
+ )
+from bzrlib.bundle import serializer as _serializer
+from bzrlib.merge_directive import MergeDirective
+from bzrlib.i18n import gettext
+""")
+from bzrlib.trace import note
+
+
+def read_mergeable_from_url(url, _do_directive=True, possible_transports=None):
+ """Read mergable object from a given URL.
+
+ :return: An object supporting get_target_revision. Raises NotABundle if
+ the target is not a mergeable type.
+ """
+ child_transport = _mod_transport.get_transport(url,
+ possible_transports=possible_transports)
+ transport = child_transport.clone('..')
+ filename = transport.relpath(child_transport.base)
+ mergeable, transport = read_mergeable_from_transport(transport, filename,
+ _do_directive)
+ return mergeable
+
+
+def read_mergeable_from_transport(transport, filename, _do_directive=True):
+ def get_bundle(transport):
+ return StringIO(transport.get_bytes(filename)), transport
+
+ def redirected_transport(transport, exception, redirection_notice):
+ note(redirection_notice)
+ url, filename = urlutils.split(exception.target,
+ exclude_trailing_slash=False)
+ if not filename:
+ raise errors.NotABundle(gettext('A directory cannot be a bundle'))
+ return _mod_transport.get_transport_from_url(url)
+
+ try:
+ bytef, transport = _mod_transport.do_catching_redirections(
+ get_bundle, transport, redirected_transport)
+ except errors.TooManyRedirections:
+ raise errors.NotABundle(transport.clone(filename).base)
+ except (errors.ConnectionReset, errors.ConnectionError), e:
+ raise
+ except (errors.TransportError, errors.PathError), e:
+ raise errors.NotABundle(str(e))
+ except (IOError,), e:
+ # jam 20060707
+ # Abstraction leakage, SFTPTransport.get('directory')
+ # doesn't always fail at get() time. Sometimes it fails
+ # during read. And that raises a generic IOError with
+ # just the string 'Failure'
+ # StubSFTPServer does fail during get() (because of prefetch)
+ # so it has an opportunity to translate the error.
+ raise errors.NotABundle(str(e))
+
+ if _do_directive:
+ try:
+ return MergeDirective.from_lines(bytef), transport
+ except errors.NotAMergeDirective:
+ bytef.seek(0)
+
+ return _serializer.read_bundle(bytef), transport
diff --git a/bzrlib/bundle/apply_bundle.py b/bzrlib/bundle/apply_bundle.py
new file mode 100644
index 0000000..514eab3
--- /dev/null
+++ b/bzrlib/bundle/apply_bundle.py
@@ -0,0 +1,80 @@
+# Copyright (C) 2005, 2006, 2007, 2009, 2010, 2011 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""\
+This contains functionality for installing bundles into repositories
+"""
+
+from __future__ import absolute_import
+
+from bzrlib import ui
+from bzrlib.i18n import gettext
+from bzrlib.merge import Merger
+from bzrlib.progress import ProgressPhase
+from bzrlib.trace import note
+from bzrlib.vf_repository import install_revision
+
+
+def install_bundle(repository, bundle_reader):
+ custom_install = getattr(bundle_reader, 'install', None)
+ if custom_install is not None:
+ return custom_install(repository)
+ pb = ui.ui_factory.nested_progress_bar()
+ repository.lock_write()
+ try:
+ real_revisions = bundle_reader.real_revisions
+ for i, revision in enumerate(reversed(real_revisions)):
+ pb.update(gettext("Install revisions"),i, len(real_revisions))
+ if repository.has_revision(revision.revision_id):
+ continue
+ cset_tree = bundle_reader.revision_tree(repository,
+ revision.revision_id)
+ install_revision(repository, revision, cset_tree)
+ finally:
+ repository.unlock()
+ pb.finished()
+
+
+def merge_bundle(reader, tree, check_clean, merge_type,
+ reprocess, show_base, change_reporter=None):
+ """Merge a revision bundle into the current tree."""
+ pb = ui.ui_factory.nested_progress_bar()
+ try:
+ pp = ProgressPhase("Merge phase", 6, pb)
+ pp.next_phase()
+ install_bundle(tree.branch.repository, reader)
+ merger = Merger(tree.branch, this_tree=tree,
+ change_reporter=change_reporter)
+ merger.pp = pp
+ merger.pp.next_phase()
+ if check_clean and tree.has_changes():
+ raise errors.UncommittedChanges(self)
+ merger.other_rev_id = reader.target
+ merger.other_tree = merger.revision_tree(reader.target)
+ merger.other_basis = reader.target
+ merger.pp.next_phase()
+ merger.find_base()
+ if merger.base_rev_id == merger.other_rev_id:
+ note(gettext("Nothing to do."))
+ return 0
+ merger.merge_type = merge_type
+ merger.show_base = show_base
+ merger.reprocess = reprocess
+ conflicts = merger.do_merge()
+ merger.set_pending()
+ finally:
+ pb.clear()
+ return conflicts
diff --git a/bzrlib/bundle/bundle_data.py b/bzrlib/bundle/bundle_data.py
new file mode 100644
index 0000000..3b1a4e4
--- /dev/null
+++ b/bzrlib/bundle/bundle_data.py
@@ -0,0 +1,794 @@
+# Copyright (C) 2005-2010 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""Read in a bundle stream, and process it into a BundleReader object."""
+
+from __future__ import absolute_import
+
+import base64
+from cStringIO import StringIO
+import os
+import pprint
+
+from bzrlib import (
+ osutils,
+ timestamp,
+ )
+from bzrlib.bundle import apply_bundle
+from bzrlib.errors import (
+ TestamentMismatch,
+ BzrError,
+ )
+from bzrlib.inventory import (
+ Inventory,
+ InventoryDirectory,
+ InventoryFile,
+ InventoryLink,
+ )
+from bzrlib.osutils import sha_string, pathjoin
+from bzrlib.revision import Revision, NULL_REVISION
+from bzrlib.testament import StrictTestament
+from bzrlib.trace import mutter, warning
+from bzrlib.tree import Tree
+from bzrlib.xml5 import serializer_v5
+
+
+class RevisionInfo(object):
+ """Gets filled out for each revision object that is read.
+ """
+ def __init__(self, revision_id):
+ self.revision_id = revision_id
+ self.sha1 = None
+ self.committer = None
+ self.date = None
+ self.timestamp = None
+ self.timezone = None
+ self.inventory_sha1 = None
+
+ self.parent_ids = None
+ self.base_id = None
+ self.message = None
+ self.properties = None
+ self.tree_actions = None
+
+ def __str__(self):
+ return pprint.pformat(self.__dict__)
+
+ def as_revision(self):
+ rev = Revision(revision_id=self.revision_id,
+ committer=self.committer,
+ timestamp=float(self.timestamp),
+ timezone=int(self.timezone),
+ inventory_sha1=self.inventory_sha1,
+ message='\n'.join(self.message))
+
+ if self.parent_ids:
+ rev.parent_ids.extend(self.parent_ids)
+
+ if self.properties:
+ for property in self.properties:
+ key_end = property.find(': ')
+ if key_end == -1:
+ if not property.endswith(':'):
+ raise ValueError(property)
+ key = str(property[:-1])
+ value = ''
+ else:
+ key = str(property[:key_end])
+ value = property[key_end+2:]
+ rev.properties[key] = value
+
+ return rev
+
+ @staticmethod
+ def from_revision(revision):
+ revision_info = RevisionInfo(revision.revision_id)
+ date = timestamp.format_highres_date(revision.timestamp,
+ revision.timezone)
+ revision_info.date = date
+ revision_info.timezone = revision.timezone
+ revision_info.timestamp = revision.timestamp
+ revision_info.message = revision.message.split('\n')
+ revision_info.properties = [': '.join(p) for p in
+ revision.properties.iteritems()]
+ return revision_info
+
+
+class BundleInfo(object):
+ """This contains the meta information. Stuff that allows you to
+ recreate the revision or inventory XML.
+ """
+ def __init__(self, bundle_format=None):
+ self.bundle_format = None
+ self.committer = None
+ self.date = None
+ self.message = None
+
+ # A list of RevisionInfo objects
+ self.revisions = []
+
+ # The next entries are created during complete_info() and
+ # other post-read functions.
+
+ # A list of real Revision objects
+ self.real_revisions = []
+
+ self.timestamp = None
+ self.timezone = None
+
+ # Have we checked the repository yet?
+ self._validated_revisions_against_repo = False
+
+ def __str__(self):
+ return pprint.pformat(self.__dict__)
+
+ def complete_info(self):
+ """This makes sure that all information is properly
+ split up, based on the assumptions that can be made
+ when information is missing.
+ """
+ from bzrlib.timestamp import unpack_highres_date
+ # Put in all of the guessable information.
+ if not self.timestamp and self.date:
+ self.timestamp, self.timezone = unpack_highres_date(self.date)
+
+ self.real_revisions = []
+ for rev in self.revisions:
+ if rev.timestamp is None:
+ if rev.date is not None:
+ rev.timestamp, rev.timezone = \
+ unpack_highres_date(rev.date)
+ else:
+ rev.timestamp = self.timestamp
+ rev.timezone = self.timezone
+ if rev.message is None and self.message:
+ rev.message = self.message
+ if rev.committer is None and self.committer:
+ rev.committer = self.committer
+ self.real_revisions.append(rev.as_revision())
+
+ def get_base(self, revision):
+ revision_info = self.get_revision_info(revision.revision_id)
+ if revision_info.base_id is not None:
+ return revision_info.base_id
+ if len(revision.parent_ids) == 0:
+ # There is no base listed, and
+ # the lowest revision doesn't have a parent
+ # so this is probably against the empty tree
+ # and thus base truly is NULL_REVISION
+ return NULL_REVISION
+ else:
+ return revision.parent_ids[-1]
+
+ def _get_target(self):
+ """Return the target revision."""
+ if len(self.real_revisions) > 0:
+ return self.real_revisions[0].revision_id
+ elif len(self.revisions) > 0:
+ return self.revisions[0].revision_id
+ return None
+
+ target = property(_get_target, doc='The target revision id')
+
+ def get_revision(self, revision_id):
+ for r in self.real_revisions:
+ if r.revision_id == revision_id:
+ return r
+ raise KeyError(revision_id)
+
+ def get_revision_info(self, revision_id):
+ for r in self.revisions:
+ if r.revision_id == revision_id:
+ return r
+ raise KeyError(revision_id)
+
+ def revision_tree(self, repository, revision_id, base=None):
+ revision = self.get_revision(revision_id)
+ base = self.get_base(revision)
+ if base == revision_id:
+ raise AssertionError()
+ if not self._validated_revisions_against_repo:
+ self._validate_references_from_repository(repository)
+ revision_info = self.get_revision_info(revision_id)
+ inventory_revision_id = revision_id
+ bundle_tree = BundleTree(repository.revision_tree(base),
+ inventory_revision_id)
+ self._update_tree(bundle_tree, revision_id)
+
+ inv = bundle_tree.inventory
+ self._validate_inventory(inv, revision_id)
+ self._validate_revision(bundle_tree, revision_id)
+
+ return bundle_tree
+
+ def _validate_references_from_repository(self, repository):
+ """Now that we have a repository which should have some of the
+ revisions we care about, go through and validate all of them
+ that we can.
+ """
+ rev_to_sha = {}
+ inv_to_sha = {}
+ def add_sha(d, revision_id, sha1):
+ if revision_id is None:
+ if sha1 is not None:
+ raise BzrError('A Null revision should always'
+ 'have a null sha1 hash')
+ return
+ if revision_id in d:
+ # This really should have been validated as part
+ # of _validate_revisions but lets do it again
+ if sha1 != d[revision_id]:
+ raise BzrError('** Revision %r referenced with 2 different'
+ ' sha hashes %s != %s' % (revision_id,
+ sha1, d[revision_id]))
+ else:
+ d[revision_id] = sha1
+
+ # All of the contained revisions were checked
+ # in _validate_revisions
+ checked = {}
+ for rev_info in self.revisions:
+ checked[rev_info.revision_id] = True
+ add_sha(rev_to_sha, rev_info.revision_id, rev_info.sha1)
+
+ for (rev, rev_info) in zip(self.real_revisions, self.revisions):
+ add_sha(inv_to_sha, rev_info.revision_id, rev_info.inventory_sha1)
+
+ count = 0
+ missing = {}
+ for revision_id, sha1 in rev_to_sha.iteritems():
+ if repository.has_revision(revision_id):
+ testament = StrictTestament.from_revision(repository,
+ revision_id)
+ local_sha1 = self._testament_sha1_from_revision(repository,
+ revision_id)
+ if sha1 != local_sha1:
+ raise BzrError('sha1 mismatch. For revision id {%s}'
+ 'local: %s, bundle: %s' % (revision_id, local_sha1, sha1))
+ else:
+ count += 1
+ elif revision_id not in checked:
+ missing[revision_id] = sha1
+
+ if len(missing) > 0:
+ # I don't know if this is an error yet
+ warning('Not all revision hashes could be validated.'
+ ' Unable validate %d hashes' % len(missing))
+ mutter('Verified %d sha hashes for the bundle.' % count)
+ self._validated_revisions_against_repo = True
+
+ def _validate_inventory(self, inv, revision_id):
+ """At this point we should have generated the BundleTree,
+ so build up an inventory, and make sure the hashes match.
+ """
+ # Now we should have a complete inventory entry.
+ s = serializer_v5.write_inventory_to_string(inv)
+ sha1 = sha_string(s)
+ # Target revision is the last entry in the real_revisions list
+ rev = self.get_revision(revision_id)
+ if rev.revision_id != revision_id:
+ raise AssertionError()
+ if sha1 != rev.inventory_sha1:
+ f = open(',,bogus-inv', 'wb')
+ try:
+ f.write(s)
+ finally:
+ f.close()
+ warning('Inventory sha hash mismatch for revision %s. %s'
+ ' != %s' % (revision_id, sha1, rev.inventory_sha1))
+
+ def _validate_revision(self, tree, revision_id):
+ """Make sure all revision entries match their checksum."""
+
+ # This is a mapping from each revision id to its sha hash
+ rev_to_sha1 = {}
+
+ rev = self.get_revision(revision_id)
+ rev_info = self.get_revision_info(revision_id)
+ if not (rev.revision_id == rev_info.revision_id):
+ raise AssertionError()
+ if not (rev.revision_id == revision_id):
+ raise AssertionError()
+ sha1 = self._testament_sha1(rev, tree)
+ if sha1 != rev_info.sha1:
+ raise TestamentMismatch(rev.revision_id, rev_info.sha1, sha1)
+ if rev.revision_id in rev_to_sha1:
+ raise BzrError('Revision {%s} given twice in the list'
+ % (rev.revision_id))
+ rev_to_sha1[rev.revision_id] = sha1
+
+ def _update_tree(self, bundle_tree, revision_id):
+ """This fills out a BundleTree based on the information
+ that was read in.
+
+ :param bundle_tree: A BundleTree to update with the new information.
+ """
+
+ def get_rev_id(last_changed, path, kind):
+ if last_changed is not None:
+ # last_changed will be a Unicode string because of how it was
+ # read. Convert it back to utf8.
+ changed_revision_id = osutils.safe_revision_id(last_changed,
+ warn=False)
+ else:
+ changed_revision_id = revision_id
+ bundle_tree.note_last_changed(path, changed_revision_id)
+ return changed_revision_id
+
+ def extra_info(info, new_path):
+ last_changed = None
+ encoding = None
+ for info_item in info:
+ try:
+ name, value = info_item.split(':', 1)
+ except ValueError:
+ raise ValueError('Value %r has no colon' % info_item)
+ if name == 'last-changed':
+ last_changed = value
+ elif name == 'executable':
+ val = (value == 'yes')
+ bundle_tree.note_executable(new_path, val)
+ elif name == 'target':
+ bundle_tree.note_target(new_path, value)
+ elif name == 'encoding':
+ encoding = value
+ return last_changed, encoding
+
+ def do_patch(path, lines, encoding):
+ if encoding == 'base64':
+ patch = base64.decodestring(''.join(lines))
+ elif encoding is None:
+ patch = ''.join(lines)
+ else:
+ raise ValueError(encoding)
+ bundle_tree.note_patch(path, patch)
+
+ def renamed(kind, extra, lines):
+ info = extra.split(' // ')
+ if len(info) < 2:
+ raise BzrError('renamed action lines need both a from and to'
+ ': %r' % extra)
+ old_path = info[0]
+ if info[1].startswith('=> '):
+ new_path = info[1][3:]
+ else:
+ new_path = info[1]
+
+ bundle_tree.note_rename(old_path, new_path)
+ last_modified, encoding = extra_info(info[2:], new_path)
+ revision = get_rev_id(last_modified, new_path, kind)
+ if lines:
+ do_patch(new_path, lines, encoding)
+
+ def removed(kind, extra, lines):
+ info = extra.split(' // ')
+ if len(info) > 1:
+ # TODO: in the future we might allow file ids to be
+ # given for removed entries
+ raise BzrError('removed action lines should only have the path'
+ ': %r' % extra)
+ path = info[0]
+ bundle_tree.note_deletion(path)
+
+ def added(kind, extra, lines):
+ info = extra.split(' // ')
+ if len(info) <= 1:
+ raise BzrError('add action lines require the path and file id'
+ ': %r' % extra)
+ elif len(info) > 5:
+ raise BzrError('add action lines have fewer than 5 entries.'
+ ': %r' % extra)
+ path = info[0]
+ if not info[1].startswith('file-id:'):
+ raise BzrError('The file-id should follow the path for an add'
+ ': %r' % extra)
+ # This will be Unicode because of how the stream is read. Turn it
+ # back into a utf8 file_id
+ file_id = osutils.safe_file_id(info[1][8:], warn=False)
+
+ bundle_tree.note_id(file_id, path, kind)
+ # this will be overridden in extra_info if executable is specified.
+ bundle_tree.note_executable(path, False)
+ last_changed, encoding = extra_info(info[2:], path)
+ revision = get_rev_id(last_changed, path, kind)
+ if kind == 'directory':
+ return
+ do_patch(path, lines, encoding)
+
+ def modified(kind, extra, lines):
+ info = extra.split(' // ')
+ if len(info) < 1:
+ raise BzrError('modified action lines have at least'
+ 'the path in them: %r' % extra)
+ path = info[0]
+
+ last_modified, encoding = extra_info(info[1:], path)
+ revision = get_rev_id(last_modified, path, kind)
+ if lines:
+ do_patch(path, lines, encoding)
+
+ valid_actions = {
+ 'renamed':renamed,
+ 'removed':removed,
+ 'added':added,
+ 'modified':modified
+ }
+ for action_line, lines in \
+ self.get_revision_info(revision_id).tree_actions:
+ first = action_line.find(' ')
+ if first == -1:
+ raise BzrError('Bogus action line'
+ ' (no opening space): %r' % action_line)
+ second = action_line.find(' ', first+1)
+ if second == -1:
+ raise BzrError('Bogus action line'
+ ' (missing second space): %r' % action_line)
+ action = action_line[:first]
+ kind = action_line[first+1:second]
+ if kind not in ('file', 'directory', 'symlink'):
+ raise BzrError('Bogus action line'
+ ' (invalid object kind %r): %r' % (kind, action_line))
+ extra = action_line[second+1:]
+
+ if action not in valid_actions:
+ raise BzrError('Bogus action line'
+ ' (unrecognized action): %r' % action_line)
+ valid_actions[action](kind, extra, lines)
+
+ def install_revisions(self, target_repo, stream_input=True):
+ """Install revisions and return the target revision
+
+ :param target_repo: The repository to install into
+ :param stream_input: Ignored by this implementation.
+ """
+ apply_bundle.install_bundle(target_repo, self)
+ return self.target
+
+ def get_merge_request(self, target_repo):
+ """Provide data for performing a merge
+
+ Returns suggested base, suggested target, and patch verification status
+ """
+ return None, self.target, 'inapplicable'
+
+
+class BundleTree(Tree):
+
+ def __init__(self, base_tree, revision_id):
+ self.base_tree = base_tree
+ self._renamed = {} # Mapping from old_path => new_path
+ self._renamed_r = {} # new_path => old_path
+ self._new_id = {} # new_path => new_id
+ self._new_id_r = {} # new_id => new_path
+ self._kinds = {} # new_id => kind
+ self._last_changed = {} # new_id => revision_id
+ self._executable = {} # new_id => executable value
+ self.patches = {}
+ self._targets = {} # new path => new symlink target
+ self.deleted = []
+ self.contents_by_id = True
+ self.revision_id = revision_id
+ self._inventory = None
+
+ def __str__(self):
+ return pprint.pformat(self.__dict__)
+
+ def note_rename(self, old_path, new_path):
+ """A file/directory has been renamed from old_path => new_path"""
+ if new_path in self._renamed:
+ raise AssertionError(new_path)
+ if old_path in self._renamed_r:
+ raise AssertionError(old_path)
+ self._renamed[new_path] = old_path
+ self._renamed_r[old_path] = new_path
+
+ def note_id(self, new_id, new_path, kind='file'):
+ """Files that don't exist in base need a new id."""
+ self._new_id[new_path] = new_id
+ self._new_id_r[new_id] = new_path
+ self._kinds[new_id] = kind
+
+ def note_last_changed(self, file_id, revision_id):
+ if (file_id in self._last_changed
+ and self._last_changed[file_id] != revision_id):
+ raise BzrError('Mismatched last-changed revision for file_id {%s}'
+ ': %s != %s' % (file_id,
+ self._last_changed[file_id],
+ revision_id))
+ self._last_changed[file_id] = revision_id
+
+ def note_patch(self, new_path, patch):
+ """There is a patch for a given filename."""
+ self.patches[new_path] = patch
+
+ def note_target(self, new_path, target):
+ """The symlink at the new path has the given target"""
+ self._targets[new_path] = target
+
+ def note_deletion(self, old_path):
+ """The file at old_path has been deleted."""
+ self.deleted.append(old_path)
+
+ def note_executable(self, new_path, executable):
+ self._executable[new_path] = executable
+
+ def old_path(self, new_path):
+ """Get the old_path (path in the base_tree) for the file at new_path"""
+ if new_path[:1] in ('\\', '/'):
+ raise ValueError(new_path)
+ old_path = self._renamed.get(new_path)
+ if old_path is not None:
+ return old_path
+ dirname,basename = os.path.split(new_path)
+ # dirname is not '' doesn't work, because
+ # dirname may be a unicode entry, and is
+ # requires the objects to be identical
+ if dirname != '':
+ old_dir = self.old_path(dirname)
+ if old_dir is None:
+ old_path = None
+ else:
+ old_path = pathjoin(old_dir, basename)
+ else:
+ old_path = new_path
+ #If the new path wasn't in renamed, the old one shouldn't be in
+ #renamed_r
+ if old_path in self._renamed_r:
+ return None
+ return old_path
+
+ def new_path(self, old_path):
+ """Get the new_path (path in the target_tree) for the file at old_path
+ in the base tree.
+ """
+ if old_path[:1] in ('\\', '/'):
+ raise ValueError(old_path)
+ new_path = self._renamed_r.get(old_path)
+ if new_path is not None:
+ return new_path
+ if new_path in self._renamed:
+ return None
+ dirname,basename = os.path.split(old_path)
+ if dirname != '':
+ new_dir = self.new_path(dirname)
+ if new_dir is None:
+ new_path = None
+ else:
+ new_path = pathjoin(new_dir, basename)
+ else:
+ new_path = old_path
+ #If the old path wasn't in renamed, the new one shouldn't be in
+ #renamed_r
+ if new_path in self._renamed:
+ return None
+ return new_path
+
+ def get_root_id(self):
+ return self.path2id('')
+
+ def path2id(self, path):
+ """Return the id of the file present at path in the target tree."""
+ file_id = self._new_id.get(path)
+ if file_id is not None:
+ return file_id
+ old_path = self.old_path(path)
+ if old_path is None:
+ return None
+ if old_path in self.deleted:
+ return None
+ return self.base_tree.path2id(old_path)
+
+ def id2path(self, file_id):
+ """Return the new path in the target tree of the file with id file_id"""
+ path = self._new_id_r.get(file_id)
+ if path is not None:
+ return path
+ old_path = self.base_tree.id2path(file_id)
+ if old_path is None:
+ return None
+ if old_path in self.deleted:
+ return None
+ return self.new_path(old_path)
+
+ def old_contents_id(self, file_id):
+ """Return the id in the base_tree for the given file_id.
+ Return None if the file did not exist in base.
+ """
+ if self.contents_by_id:
+ if self.base_tree.has_id(file_id):
+ return file_id
+ else:
+ return None
+ new_path = self.id2path(file_id)
+ return self.base_tree.path2id(new_path)
+
+ def get_file(self, file_id):
+ """Return a file-like object containing the new contents of the
+ file given by file_id.
+
+ TODO: It might be nice if this actually generated an entry
+ in the text-store, so that the file contents would
+ then be cached.
+ """
+ base_id = self.old_contents_id(file_id)
+ if (base_id is not None and
+ base_id != self.base_tree.get_root_id()):
+ patch_original = self.base_tree.get_file(base_id)
+ else:
+ patch_original = None
+ file_patch = self.patches.get(self.id2path(file_id))
+ if file_patch is None:
+ if (patch_original is None and
+ self.kind(file_id) == 'directory'):
+ return StringIO()
+ if patch_original is None:
+ raise AssertionError("None: %s" % file_id)
+ return patch_original
+
+ if file_patch.startswith('\\'):
+ raise ValueError(
+ 'Malformed patch for %s, %r' % (file_id, file_patch))
+ return patched_file(file_patch, patch_original)
+
+ def get_symlink_target(self, file_id, path=None):
+ if path is None:
+ path = self.id2path(file_id)
+ try:
+ return self._targets[path]
+ except KeyError:
+ return self.base_tree.get_symlink_target(file_id)
+
+ def kind(self, file_id):
+ if file_id in self._kinds:
+ return self._kinds[file_id]
+ return self.base_tree.kind(file_id)
+
+ def get_file_revision(self, file_id):
+ path = self.id2path(file_id)
+ if path in self._last_changed:
+ return self._last_changed[path]
+ else:
+ return self.base_tree.get_file_revision(file_id)
+
+ def is_executable(self, file_id):
+ path = self.id2path(file_id)
+ if path in self._executable:
+ return self._executable[path]
+ else:
+ return self.base_tree.is_executable(file_id)
+
+ def get_last_changed(self, file_id):
+ path = self.id2path(file_id)
+ if path in self._last_changed:
+ return self._last_changed[path]
+ return self.base_tree.get_file_revision(file_id)
+
+ def get_size_and_sha1(self, file_id):
+ """Return the size and sha1 hash of the given file id.
+ If the file was not locally modified, this is extracted
+ from the base_tree. Rather than re-reading the file.
+ """
+ new_path = self.id2path(file_id)
+ if new_path is None:
+ return None, None
+ if new_path not in self.patches:
+ # If the entry does not have a patch, then the
+ # contents must be the same as in the base_tree
+ text_size = self.base_tree.get_file_size(file_id)
+ text_sha1 = self.base_tree.get_file_sha1(file_id)
+ return text_size, text_sha1
+ fileobj = self.get_file(file_id)
+ content = fileobj.read()
+ return len(content), sha_string(content)
+
+ def _get_inventory(self):
+ """Build up the inventory entry for the BundleTree.
+
+ This need to be called before ever accessing self.inventory
+ """
+ from os.path import dirname, basename
+ inv = Inventory(None, self.revision_id)
+
+ def add_entry(file_id):
+ path = self.id2path(file_id)
+ if path is None:
+ return
+ if path == '':
+ parent_id = None
+ else:
+ parent_path = dirname(path)
+ parent_id = self.path2id(parent_path)
+
+ kind = self.kind(file_id)
+ revision_id = self.get_last_changed(file_id)
+
+ name = basename(path)
+ if kind == 'directory':
+ ie = InventoryDirectory(file_id, name, parent_id)
+ elif kind == 'file':
+ ie = InventoryFile(file_id, name, parent_id)
+ ie.executable = self.is_executable(file_id)
+ elif kind == 'symlink':
+ ie = InventoryLink(file_id, name, parent_id)
+ ie.symlink_target = self.get_symlink_target(file_id, path)
+ ie.revision = revision_id
+
+ if kind == 'file':
+ ie.text_size, ie.text_sha1 = self.get_size_and_sha1(file_id)
+ if ie.text_size is None:
+ raise BzrError(
+ 'Got a text_size of None for file_id %r' % file_id)
+ inv.add(ie)
+
+ sorted_entries = self.sorted_path_id()
+ for path, file_id in sorted_entries:
+ add_entry(file_id)
+
+ return inv
+
+ # Have to overload the inherited inventory property
+ # because _get_inventory is only called in the parent.
+ # Reading the docs, property() objects do not use
+ # overloading, they use the function as it was defined
+ # at that instant
+ inventory = property(_get_inventory)
+
+ root_inventory = property(_get_inventory)
+
+ def all_file_ids(self):
+ return set(
+ [entry.file_id for path, entry in self.inventory.iter_entries()])
+
+ def list_files(self, include_root=False, from_dir=None, recursive=True):
+ # The only files returned by this are those from the version
+ inv = self.inventory
+ if from_dir is None:
+ from_dir_id = None
+ else:
+ from_dir_id = inv.path2id(from_dir)
+ if from_dir_id is None:
+ # Directory not versioned
+ return
+ entries = inv.iter_entries(from_dir=from_dir_id, recursive=recursive)
+ if inv.root is not None and not include_root and from_dir is None:
+ # skip the root for compatability with the current apis.
+ entries.next()
+ for path, entry in entries:
+ yield path, 'V', entry.kind, entry.file_id, entry
+
+ def sorted_path_id(self):
+ paths = []
+ for result in self._new_id.iteritems():
+ paths.append(result)
+ for id in self.base_tree.all_file_ids():
+ path = self.id2path(id)
+ if path is None:
+ continue
+ paths.append((path, id))
+ paths.sort()
+ return paths
+
+
+def patched_file(file_patch, original):
+ """Produce a file-like object with the patched version of a text"""
+ from bzrlib.patches import iter_patched
+ from bzrlib.iterablefile import IterableFile
+ if file_patch == "":
+ return IterableFile(())
+ # string.splitlines(True) also splits on '\r', but the iter_patched code
+ # only expects to iterate over '\n' style lines
+ return IterableFile(iter_patched(original,
+ StringIO(file_patch).readlines()))
diff --git a/bzrlib/bundle/commands.py b/bzrlib/bundle/commands.py
new file mode 100644
index 0000000..da83712
--- /dev/null
+++ b/bzrlib/bundle/commands.py
@@ -0,0 +1,105 @@
+# Copyright (C) 2005-2011 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""\
+This is an attempt to take the internal delta object, and represent
+it as a single-file text-only changeset.
+This should have commands for both generating a changeset,
+and for applying a changeset.
+"""
+
+from __future__ import absolute_import
+
+from cStringIO import StringIO
+
+from bzrlib.lazy_import import lazy_import
+lazy_import(globals(), """
+from bzrlib import (
+ branch,
+ errors,
+ merge_directive,
+ revision as _mod_revision,
+ urlutils,
+ transport,
+ )
+from bzrlib.i18n import gettext
+""")
+
+from bzrlib.commands import Command
+
+
+class cmd_bundle_info(Command):
+ __doc__ = """Output interesting stats about a bundle"""
+
+ hidden = True
+ takes_args = ['location']
+ takes_options = ['verbose']
+ encoding_type = 'exact'
+
+ def run(self, location, verbose=False):
+ from bzrlib.bundle.serializer import read_bundle
+ from bzrlib.bundle import read_mergeable_from_url
+ from bzrlib import osutils
+ term_encoding = osutils.get_terminal_encoding()
+ bundle_info = read_mergeable_from_url(location)
+ if isinstance(bundle_info, merge_directive.BaseMergeDirective):
+ bundle_file = StringIO(bundle_info.get_raw_bundle())
+ bundle_info = read_bundle(bundle_file)
+ else:
+ if verbose:
+ raise errors.BzrCommandError(gettext(
+ '--verbose requires a merge directive'))
+ reader_method = getattr(bundle_info, 'get_bundle_reader', None)
+ if reader_method is None:
+ raise errors.BzrCommandError(gettext('Bundle format not supported'))
+
+ by_kind = {}
+ file_ids = set()
+ for bytes, parents, repo_kind, revision_id, file_id\
+ in reader_method().iter_records():
+ by_kind.setdefault(repo_kind, []).append(
+ (bytes, parents, repo_kind, revision_id, file_id))
+ if file_id is not None:
+ file_ids.add(file_id)
+ self.outf.write(gettext('Records\n'))
+ for kind, records in sorted(by_kind.iteritems()):
+ multiparent = sum(1 for b, m, k, r, f in records if
+ len(m.get('parents', [])) > 1)
+ self.outf.write(gettext('{0}: {1} ({2} multiparent)\n').format(
+ kind, len(records), multiparent))
+ self.outf.write(gettext('unique files: %d\n') % len(file_ids))
+ self.outf.write('\n')
+ nicks = set()
+ committers = set()
+ for revision in bundle_info.real_revisions:
+ if 'branch-nick' in revision.properties:
+ nicks.add(revision.properties['branch-nick'])
+ committers.add(revision.committer)
+
+ self.outf.write(gettext('Revisions\n'))
+ self.outf.write((gettext('nicks: %s\n')
+ % ', '.join(sorted(nicks))).encode(term_encoding, 'replace'))
+ self.outf.write((gettext('committers: \n%s\n') %
+ '\n'.join(sorted(committers)).encode(term_encoding, 'replace')))
+ if verbose:
+ self.outf.write('\n')
+ bundle_file.seek(0)
+ line = bundle_file.readline()
+ line = bundle_file.readline()
+ content = bundle_file.read().decode('bz2')
+ self.outf.write(gettext("Decoded contents\n"))
+ self.outf.write(content)
+ self.outf.write('\n')
diff --git a/bzrlib/bundle/serializer/__init__.py b/bzrlib/bundle/serializer/__init__.py
new file mode 100644
index 0000000..35f024a
--- /dev/null
+++ b/bzrlib/bundle/serializer/__init__.py
@@ -0,0 +1,216 @@
+# Copyright (C) 2005, 2006, 2007, 2009, 2010 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""Serializer factory for reading and writing bundles.
+"""
+
+from __future__ import absolute_import
+
+import base64
+from StringIO import StringIO
+import re
+
+from bzrlib import (
+ errors,
+ pyutils,
+ )
+from bzrlib.diff import internal_diff
+from bzrlib.revision import NULL_REVISION
+# For backwards-compatibility
+from bzrlib.timestamp import unpack_highres_date, format_highres_date
+
+
+# New bundles should try to use this header format
+BUNDLE_HEADER = '# Bazaar revision bundle v'
+BUNDLE_HEADER_RE = re.compile(
+ r'^# Bazaar revision bundle v(?P<version>\d+[\w.]*)(?P<lineending>\r?)\n$')
+CHANGESET_OLD_HEADER_RE = re.compile(
+ r'^# Bazaar-NG changeset v(?P<version>\d+[\w.]*)(?P<lineending>\r?)\n$')
+
+
+_serializers = {}
+
+v4_string = '4'
+
+def _get_bundle_header(version):
+ return '%s%s\n' % (BUNDLE_HEADER, version)
+
+def _get_filename(f):
+ return getattr(f, 'name', '<unknown>')
+
+
+def read_bundle(f):
+ """Read in a bundle from a filelike object.
+
+ :param f: A file-like object
+ :return: A list of Bundle objects
+ """
+ version = None
+ for line in f:
+ m = BUNDLE_HEADER_RE.match(line)
+ if m:
+ if m.group('lineending') != '':
+ raise errors.UnsupportedEOLMarker()
+ version = m.group('version')
+ break
+ elif line.startswith(BUNDLE_HEADER):
+ raise errors.MalformedHeader(
+ 'Extra characters after version number')
+ m = CHANGESET_OLD_HEADER_RE.match(line)
+ if m:
+ version = m.group('version')
+ raise errors.BundleNotSupported(version,
+ 'old format bundles not supported')
+
+ if version is None:
+ raise errors.NotABundle('Did not find an opening header')
+
+ # Now we have a version, to figure out how to read the bundle
+ if version not in _serializers:
+ raise errors.BundleNotSupported(version,
+ 'version not listed in known versions')
+
+ serializer = _serializers[version](version)
+
+ return serializer.read(f)
+
+
+def get_serializer(version):
+ try:
+ return _serializers[version](version)
+ except KeyError:
+ raise errors.BundleNotSupported(version, 'unknown bundle format')
+
+
+def write(source, revision_ids, f, version=None, forced_bases={}):
+ """Serialize a list of bundles to a filelike object.
+
+ :param source: A source for revision information
+ :param revision_ids: The list of revision ids to serialize
+ :param f: The file to output to
+ :param version: [optional] target serialization version
+ """
+
+ source.lock_read()
+ try:
+ return get_serializer(version).write(source, revision_ids,
+ forced_bases, f)
+ finally:
+ source.unlock()
+
+
+def write_bundle(repository, revision_id, base_revision_id, out, format=None):
+ """Write a bundle of revisions.
+
+ :param repository: Repository containing revisions to serialize.
+ :param revision_id: Head revision_id of the bundle.
+ :param base_revision_id: Revision assumed to be present in repositories
+ applying the bundle.
+ :param out: Output file.
+ """
+ repository.lock_read()
+ try:
+ return get_serializer(format).write_bundle(repository, revision_id,
+ base_revision_id, out)
+ finally:
+ repository.unlock()
+
+
+class BundleSerializer(object):
+ """The base class for Serializers.
+
+ Common functionality should be included here.
+ """
+ def __init__(self, version):
+ self.version = version
+
+ def read(self, f):
+ """Read the rest of the bundles from the supplied file.
+
+ :param f: The file to read from
+ :return: A list of bundle trees
+ """
+ raise NotImplementedError
+
+ def write_bundle(self, repository, target, base, fileobj):
+ """Write the bundle to the supplied file.
+
+ :param repository: The repository to retrieve revision data from
+ :param target: The revision to provide data for
+ :param base: The most recent of ancestor of the revision that does not
+ need to be included in the bundle
+ :param fileobj: The file to output to
+ """
+ raise NotImplementedError
+
+ def _write_bundle(self, repository, revision_id, base_revision_id, out):
+ """Helper function for translating write_bundle to write"""
+ forced_bases = {revision_id:base_revision_id}
+ if base_revision_id is NULL_REVISION:
+ base_revision_id = None
+ graph = repository.get_graph()
+ revision_ids = graph.find_unique_ancestors(revision_id,
+ [base_revision_id])
+ revision_ids = list(repository.get_graph().iter_topo_order(
+ revision_ids))
+ revision_ids.reverse()
+ self.write(repository, revision_ids, forced_bases, out)
+ return revision_ids
+
+
+def register(version, klass, overwrite=False):
+ """Register a BundleSerializer version.
+
+ :param version: The version associated with this format
+ :param klass: The class to instantiate, which must take a version argument
+ """
+ global _serializers
+ if overwrite:
+ _serializers[version] = klass
+ return
+
+ if version not in _serializers:
+ _serializers[version] = klass
+
+
+def register_lazy(version, module, classname, overwrite=False):
+ """Register lazy-loaded bundle serializer.
+
+ :param version: The version associated with this reader
+ :param module: String indicating what module should be loaded
+ :param classname: Name of the class that will be instantiated
+ :param overwrite: Should this version override a default
+ """
+ def _loader(version):
+ klass = pyutils.get_named_object(module, classname)
+ return klass(version)
+ register(version, _loader, overwrite=overwrite)
+
+
+def binary_diff(old_filename, old_lines, new_filename, new_lines, to_file):
+ temp = StringIO()
+ internal_diff(old_filename, old_lines, new_filename, new_lines, temp,
+ allow_binary=True)
+ temp.seek(0)
+ base64.encode(temp, to_file)
+ to_file.write('\n')
+
+register_lazy('0.8', 'bzrlib.bundle.serializer.v08', 'BundleSerializerV08')
+register_lazy('0.9', 'bzrlib.bundle.serializer.v09', 'BundleSerializerV09')
+register_lazy(v4_string, 'bzrlib.bundle.serializer.v4',
+ 'BundleSerializerV4')
+register_lazy(None, 'bzrlib.bundle.serializer.v4', 'BundleSerializerV4')
+
diff --git a/bzrlib/bundle/serializer/v08.py b/bzrlib/bundle/serializer/v08.py
new file mode 100644
index 0000000..77e3cd4
--- /dev/null
+++ b/bzrlib/bundle/serializer/v08.py
@@ -0,0 +1,554 @@
+# Copyright (C) 2005, 2006, 2009 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""Serializer factory for reading and writing bundles.
+"""
+
+from __future__ import absolute_import
+
+from bzrlib import (
+ errors,
+ ui,
+ )
+from bzrlib.bundle.serializer import (BundleSerializer,
+ _get_bundle_header,
+ )
+from bzrlib.bundle.serializer import binary_diff
+from bzrlib.bundle.bundle_data import (RevisionInfo, BundleInfo)
+from bzrlib.diff import internal_diff
+from bzrlib.revision import NULL_REVISION
+from bzrlib.testament import StrictTestament
+from bzrlib.timestamp import (
+ format_highres_date,
+ )
+from bzrlib.textfile import text_file
+from bzrlib.trace import mutter
+
+bool_text = {True: 'yes', False: 'no'}
+
+
+class Action(object):
+ """Represent an action"""
+
+ def __init__(self, name, parameters=None, properties=None):
+ self.name = name
+ if parameters is None:
+ self.parameters = []
+ else:
+ self.parameters = parameters
+ if properties is None:
+ self.properties = []
+ else:
+ self.properties = properties
+
+ def add_utf8_property(self, name, value):
+ """Add a property whose value is currently utf8 to the action."""
+ self.properties.append((name, value.decode('utf8')))
+
+ def add_property(self, name, value):
+ """Add a property to the action"""
+ self.properties.append((name, value))
+
+ def add_bool_property(self, name, value):
+ """Add a boolean property to the action"""
+ self.add_property(name, bool_text[value])
+
+ def write(self, to_file):
+ """Write action as to a file"""
+ p_texts = [' '.join([self.name]+self.parameters)]
+ for prop in self.properties:
+ if len(prop) == 1:
+ p_texts.append(prop[0])
+ else:
+ try:
+ p_texts.append('%s:%s' % prop)
+ except:
+ raise repr(prop)
+ text = ['=== ']
+ text.append(' // '.join(p_texts))
+ text_line = ''.join(text).encode('utf-8')
+ available = 79
+ while len(text_line) > available:
+ to_file.write(text_line[:available])
+ text_line = text_line[available:]
+ to_file.write('\n... ')
+ available = 79 - len('... ')
+ to_file.write(text_line+'\n')
+
+
+class BundleSerializerV08(BundleSerializer):
+ def read(self, f):
+ """Read the rest of the bundles from the supplied file.
+
+ :param f: The file to read from
+ :return: A list of bundles
+ """
+ return BundleReader(f).info
+
+ def check_compatible(self):
+ if self.source.supports_rich_root():
+ raise errors.IncompatibleBundleFormat('0.8', repr(self.source))
+
+ def write(self, source, revision_ids, forced_bases, f):
+ """Write the bundless to the supplied files.
+
+ :param source: A source for revision information
+ :param revision_ids: The list of revision ids to serialize
+ :param forced_bases: A dict of revision -> base that overrides default
+ :param f: The file to output to
+ """
+ self.source = source
+ self.revision_ids = revision_ids
+ self.forced_bases = forced_bases
+ self.to_file = f
+ self.check_compatible()
+ source.lock_read()
+ try:
+ self._write_main_header()
+ pb = ui.ui_factory.nested_progress_bar()
+ try:
+ self._write_revisions(pb)
+ finally:
+ pb.finished()
+ finally:
+ source.unlock()
+
+ def write_bundle(self, repository, target, base, fileobj):
+ return self._write_bundle(repository, target, base, fileobj)
+
+ def _write_main_header(self):
+ """Write the header for the changes"""
+ f = self.to_file
+ f.write(_get_bundle_header('0.8'))
+ f.write('#\n')
+
+ def _write(self, key, value, indent=1, trailing_space_when_empty=False):
+ """Write out meta information, with proper indenting, etc.
+
+ :param trailing_space_when_empty: To work around a bug in earlier
+ bundle readers, when writing an empty property, we use "prop: \n"
+ rather than writing "prop:\n".
+ If this parameter is True, and value is the empty string, we will
+ write an extra space.
+ """
+ if indent < 1:
+ raise ValueError('indentation must be greater than 0')
+ f = self.to_file
+ f.write('#' + (' ' * indent))
+ f.write(key.encode('utf-8'))
+ if not value:
+ if trailing_space_when_empty and value == '':
+ f.write(': \n')
+ else:
+ f.write(':\n')
+ elif isinstance(value, str):
+ f.write(': ')
+ f.write(value)
+ f.write('\n')
+ elif isinstance(value, unicode):
+ f.write(': ')
+ f.write(value.encode('utf-8'))
+ f.write('\n')
+ else:
+ f.write(':\n')
+ for entry in value:
+ f.write('#' + (' ' * (indent+2)))
+ if isinstance(entry, str):
+ f.write(entry)
+ else:
+ f.write(entry.encode('utf-8'))
+ f.write('\n')
+
+ def _write_revisions(self, pb):
+ """Write the information for all of the revisions."""
+
+ # Optimize for the case of revisions in order
+ last_rev_id = None
+ last_rev_tree = None
+
+ i_max = len(self.revision_ids)
+ for i, rev_id in enumerate(self.revision_ids):
+ pb.update("Generating revision data", i, i_max)
+ rev = self.source.get_revision(rev_id)
+ if rev_id == last_rev_id:
+ rev_tree = last_rev_tree
+ else:
+ rev_tree = self.source.revision_tree(rev_id)
+ if rev_id in self.forced_bases:
+ explicit_base = True
+ base_id = self.forced_bases[rev_id]
+ if base_id is None:
+ base_id = NULL_REVISION
+ else:
+ explicit_base = False
+ if rev.parent_ids:
+ base_id = rev.parent_ids[-1]
+ else:
+ base_id = NULL_REVISION
+
+ if base_id == last_rev_id:
+ base_tree = last_rev_tree
+ else:
+ base_tree = self.source.revision_tree(base_id)
+ force_binary = (i != 0)
+ self._write_revision(rev, rev_tree, base_id, base_tree,
+ explicit_base, force_binary)
+
+ last_rev_id = base_id
+ last_rev_tree = base_tree
+
+ def _testament_sha1(self, revision_id):
+ return StrictTestament.from_revision(self.source,
+ revision_id).as_sha1()
+
+ def _write_revision(self, rev, rev_tree, base_rev, base_tree,
+ explicit_base, force_binary):
+ """Write out the information for a revision."""
+ def w(key, value):
+ self._write(key, value, indent=1)
+
+ w('message', rev.message.split('\n'))
+ w('committer', rev.committer)
+ w('date', format_highres_date(rev.timestamp, rev.timezone))
+ self.to_file.write('\n')
+
+ self._write_delta(rev_tree, base_tree, rev.revision_id, force_binary)
+
+ w('revision id', rev.revision_id)
+ w('sha1', self._testament_sha1(rev.revision_id))
+ w('inventory sha1', rev.inventory_sha1)
+ if rev.parent_ids:
+ w('parent ids', rev.parent_ids)
+ if explicit_base:
+ w('base id', base_rev)
+ if rev.properties:
+ self._write('properties', None, indent=1)
+ for name, value in sorted(rev.properties.items()):
+ self._write(name, value, indent=3,
+ trailing_space_when_empty=True)
+
+ # Add an extra blank space at the end
+ self.to_file.write('\n')
+
+ def _write_action(self, name, parameters, properties=None):
+ if properties is None:
+ properties = []
+ p_texts = ['%s:%s' % v for v in properties]
+ self.to_file.write('=== ')
+ self.to_file.write(' '.join([name]+parameters).encode('utf-8'))
+ self.to_file.write(' // '.join(p_texts).encode('utf-8'))
+ self.to_file.write('\n')
+
+ def _write_delta(self, new_tree, old_tree, default_revision_id,
+ force_binary):
+ """Write out the changes between the trees."""
+ DEVNULL = '/dev/null'
+ old_label = ''
+ new_label = ''
+
+ def do_diff(file_id, old_path, new_path, action, force_binary):
+ def tree_lines(tree, require_text=False):
+ if tree.has_id(file_id):
+ tree_file = tree.get_file(file_id)
+ if require_text is True:
+ tree_file = text_file(tree_file)
+ return tree_file.readlines()
+ else:
+ return []
+
+ try:
+ if force_binary:
+ raise errors.BinaryFile()
+ old_lines = tree_lines(old_tree, require_text=True)
+ new_lines = tree_lines(new_tree, require_text=True)
+ action.write(self.to_file)
+ internal_diff(old_path, old_lines, new_path, new_lines,
+ self.to_file)
+ except errors.BinaryFile:
+ old_lines = tree_lines(old_tree, require_text=False)
+ new_lines = tree_lines(new_tree, require_text=False)
+ action.add_property('encoding', 'base64')
+ action.write(self.to_file)
+ binary_diff(old_path, old_lines, new_path, new_lines,
+ self.to_file)
+
+ def finish_action(action, file_id, kind, meta_modified, text_modified,
+ old_path, new_path):
+ entry = new_tree.root_inventory[file_id]
+ if entry.revision != default_revision_id:
+ action.add_utf8_property('last-changed', entry.revision)
+ if meta_modified:
+ action.add_bool_property('executable', entry.executable)
+ if text_modified and kind == "symlink":
+ action.add_property('target', entry.symlink_target)
+ if text_modified and kind == "file":
+ do_diff(file_id, old_path, new_path, action, force_binary)
+ else:
+ action.write(self.to_file)
+
+ delta = new_tree.changes_from(old_tree, want_unchanged=True,
+ include_root=True)
+ for path, file_id, kind in delta.removed:
+ action = Action('removed', [kind, path]).write(self.to_file)
+
+ for path, file_id, kind in delta.added:
+ action = Action('added', [kind, path], [('file-id', file_id)])
+ meta_modified = (kind=='file' and
+ new_tree.is_executable(file_id))
+ finish_action(action, file_id, kind, meta_modified, True,
+ DEVNULL, path)
+
+ for (old_path, new_path, file_id, kind,
+ text_modified, meta_modified) in delta.renamed:
+ action = Action('renamed', [kind, old_path], [(new_path,)])
+ finish_action(action, file_id, kind, meta_modified, text_modified,
+ old_path, new_path)
+
+ for (path, file_id, kind,
+ text_modified, meta_modified) in delta.modified:
+ action = Action('modified', [kind, path])
+ finish_action(action, file_id, kind, meta_modified, text_modified,
+ path, path)
+
+ for path, file_id, kind in delta.unchanged:
+ new_rev = new_tree.get_file_revision(file_id)
+ if new_rev is None:
+ continue
+ old_rev = old_tree.get_file_revision(file_id)
+ if new_rev != old_rev:
+ action = Action('modified', [new_tree.kind(file_id),
+ new_tree.id2path(file_id)])
+ action.add_utf8_property('last-changed', new_rev)
+ action.write(self.to_file)
+
+
+class BundleReader(object):
+ """This class reads in a bundle from a file, and returns
+ a Bundle object, which can then be applied against a tree.
+ """
+ def __init__(self, from_file):
+ """Read in the bundle from the file.
+
+ :param from_file: A file-like object (must have iterator support).
+ """
+ object.__init__(self)
+ self.from_file = iter(from_file)
+ self._next_line = None
+
+ self.info = self._get_info()
+ # We put the actual inventory ids in the footer, so that the patch
+ # is easier to read for humans.
+ # Unfortunately, that means we need to read everything before we
+ # can create a proper bundle.
+ self._read()
+ self._validate()
+
+ def _get_info(self):
+ return BundleInfo08()
+
+ def _read(self):
+ self._next().next()
+ while self._next_line is not None:
+ if not self._read_revision_header():
+ break
+ if self._next_line is None:
+ break
+ self._read_patches()
+ self._read_footer()
+
+ def _validate(self):
+ """Make sure that the information read in makes sense
+ and passes appropriate checksums.
+ """
+ # Fill in all the missing blanks for the revisions
+ # and generate the real_revisions list.
+ self.info.complete_info()
+
+ def _next(self):
+ """yield the next line, but secretly
+ keep 1 extra line for peeking.
+ """
+ for line in self.from_file:
+ last = self._next_line
+ self._next_line = line
+ if last is not None:
+ #mutter('yielding line: %r' % last)
+ yield last
+ last = self._next_line
+ self._next_line = None
+ #mutter('yielding line: %r' % last)
+ yield last
+
+ def _read_revision_header(self):
+ found_something = False
+ self.info.revisions.append(RevisionInfo(None))
+ for line in self._next():
+ # The bzr header is terminated with a blank line
+ # which does not start with '#'
+ if line is None or line == '\n':
+ break
+ if not line.startswith('#'):
+ continue
+ found_something = True
+ self._handle_next(line)
+ if not found_something:
+ # Nothing was there, so remove the added revision
+ self.info.revisions.pop()
+ return found_something
+
+ def _read_next_entry(self, line, indent=1):
+ """Read in a key-value pair
+ """
+ if not line.startswith('#'):
+ raise errors.MalformedHeader('Bzr header did not start with #')
+ line = line[1:-1].decode('utf-8') # Remove the '#' and '\n'
+ if line[:indent] == ' '*indent:
+ line = line[indent:]
+ if not line:
+ return None, None# Ignore blank lines
+
+ loc = line.find(': ')
+ if loc != -1:
+ key = line[:loc]
+ value = line[loc+2:]
+ if not value:
+ value = self._read_many(indent=indent+2)
+ elif line[-1:] == ':':
+ key = line[:-1]
+ value = self._read_many(indent=indent+2)
+ else:
+ raise errors.MalformedHeader('While looking for key: value pairs,'
+ ' did not find the colon %r' % (line))
+
+ key = key.replace(' ', '_')
+ #mutter('found %s: %s' % (key, value))
+ return key, value
+
+ def _handle_next(self, line):
+ if line is None:
+ return
+ key, value = self._read_next_entry(line, indent=1)
+ mutter('_handle_next %r => %r' % (key, value))
+ if key is None:
+ return
+
+ revision_info = self.info.revisions[-1]
+ if key in revision_info.__dict__:
+ if getattr(revision_info, key) is None:
+ if key in ('file_id', 'revision_id', 'base_id'):
+ value = value.encode('utf8')
+ elif key in ('parent_ids'):
+ value = [v.encode('utf8') for v in value]
+ setattr(revision_info, key, value)
+ else:
+ raise errors.MalformedHeader('Duplicated Key: %s' % key)
+ else:
+ # What do we do with a key we don't recognize
+ raise errors.MalformedHeader('Unknown Key: "%s"' % key)
+
+ def _read_many(self, indent):
+ """If a line ends with no entry, that means that it should be
+ followed with multiple lines of values.
+
+ This detects the end of the list, because it will be a line that
+ does not start properly indented.
+ """
+ values = []
+ start = '#' + (' '*indent)
+
+ if self._next_line is None or self._next_line[:len(start)] != start:
+ return values
+
+ for line in self._next():
+ values.append(line[len(start):-1].decode('utf-8'))
+ if self._next_line is None or self._next_line[:len(start)] != start:
+ break
+ return values
+
+ def _read_one_patch(self):
+ """Read in one patch, return the complete patch, along with
+ the next line.
+
+ :return: action, lines, do_continue
+ """
+ #mutter('_read_one_patch: %r' % self._next_line)
+ # Peek and see if there are no patches
+ if self._next_line is None or self._next_line.startswith('#'):
+ return None, [], False
+
+ first = True
+ lines = []
+ for line in self._next():
+ if first:
+ if not line.startswith('==='):
+ raise errors.MalformedPatches('The first line of all patches'
+ ' should be a bzr meta line "==="'
+ ': %r' % line)
+ action = line[4:-1].decode('utf-8')
+ elif line.startswith('... '):
+ action += line[len('... '):-1].decode('utf-8')
+
+ if (self._next_line is not None and
+ self._next_line.startswith('===')):
+ return action, lines, True
+ elif self._next_line is None or self._next_line.startswith('#'):
+ return action, lines, False
+
+ if first:
+ first = False
+ elif not line.startswith('... '):
+ lines.append(line)
+
+ return action, lines, False
+
+ def _read_patches(self):
+ do_continue = True
+ revision_actions = []
+ while do_continue:
+ action, lines, do_continue = self._read_one_patch()
+ if action is not None:
+ revision_actions.append((action, lines))
+ if self.info.revisions[-1].tree_actions is not None:
+ raise AssertionError()
+ self.info.revisions[-1].tree_actions = revision_actions
+
+ def _read_footer(self):
+ """Read the rest of the meta information.
+
+ :param first_line: The previous step iterates past what it
+ can handle. That extra line is given here.
+ """
+ for line in self._next():
+ self._handle_next(line)
+ if self._next_line is None:
+ break
+ if not self._next_line.startswith('#'):
+ # Consume the trailing \n and stop processing
+ self._next().next()
+ break
+
+class BundleInfo08(BundleInfo):
+
+ def _update_tree(self, bundle_tree, revision_id):
+ bundle_tree.note_last_changed('', revision_id)
+ BundleInfo._update_tree(self, bundle_tree, revision_id)
+
+ def _testament_sha1_from_revision(self, repository, revision_id):
+ testament = StrictTestament.from_revision(repository, revision_id)
+ return testament.as_sha1()
+
+ def _testament_sha1(self, revision, tree):
+ return StrictTestament(revision, tree).as_sha1()
diff --git a/bzrlib/bundle/serializer/v09.py b/bzrlib/bundle/serializer/v09.py
new file mode 100644
index 0000000..34f71b9
--- /dev/null
+++ b/bzrlib/bundle/serializer/v09.py
@@ -0,0 +1,76 @@
+# Copyright (C) 2006 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+from __future__ import absolute_import
+
+from bzrlib.bundle.serializer import _get_bundle_header
+from bzrlib.bundle.serializer.v08 import BundleSerializerV08, BundleReader
+from bzrlib.testament import StrictTestament3
+from bzrlib.bundle.bundle_data import BundleInfo
+
+
+"""Serializer for bundle format 0.9"""
+
+
+class BundleSerializerV09(BundleSerializerV08):
+ """Serializer for bzr bundle format 0.9
+
+ This format supports rich root data, for the nested-trees work, but also
+ supports repositories that don't have rich root data. It cannot be
+ used to transfer from a knit2 repo into a knit1 repo, because that would
+ be lossy.
+ """
+
+ def check_compatible(self):
+ pass
+
+ def _write_main_header(self):
+ """Write the header for the changes"""
+ f = self.to_file
+ f.write(_get_bundle_header('0.9') + '#\n')
+
+ def _testament_sha1(self, revision_id):
+ return StrictTestament3.from_revision(self.source,
+ revision_id).as_sha1()
+
+ def read(self, f):
+ """Read the rest of the bundles from the supplied file.
+
+ :param f: The file to read from
+ :return: A list of bundles
+ """
+ return BundleReaderV09(f).info
+
+
+class BundleInfo09(BundleInfo):
+ """BundleInfo that uses StrictTestament3
+
+ This means that the root data is included in the testament.
+ """
+
+ def _testament_sha1_from_revision(self, repository, revision_id):
+ testament = StrictTestament3.from_revision(repository, revision_id)
+ return testament.as_sha1()
+
+ def _testament_sha1(self, revision, tree):
+ return StrictTestament3(revision, tree).as_sha1()
+
+
+class BundleReaderV09(BundleReader):
+ """BundleReader for 0.9 bundles"""
+
+ def _get_info(self):
+ return BundleInfo09()
diff --git a/bzrlib/bundle/serializer/v4.py b/bzrlib/bundle/serializer/v4.py
new file mode 100644
index 0000000..0d7f254
--- /dev/null
+++ b/bzrlib/bundle/serializer/v4.py
@@ -0,0 +1,742 @@
+# Copyright (C) 2007-2010 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+from __future__ import absolute_import
+
+from cStringIO import StringIO
+import bz2
+import re
+
+from bzrlib import (
+ errors,
+ iterablefile,
+ lru_cache,
+ multiparent,
+ osutils,
+ pack,
+ revision as _mod_revision,
+ serializer,
+ trace,
+ ui,
+ versionedfile as _mod_versionedfile,
+ )
+from bzrlib.bundle import bundle_data, serializer as bundle_serializer
+from bzrlib.i18n import ngettext
+from bzrlib import bencode
+
+
+class _MPDiffInventoryGenerator(_mod_versionedfile._MPDiffGenerator):
+ """Generate Inventory diffs serialized inventories."""
+
+ def __init__(self, repo, inventory_keys):
+ super(_MPDiffInventoryGenerator, self).__init__(repo.inventories,
+ inventory_keys)
+ self.repo = repo
+ self.sha1s = {}
+
+ def iter_diffs(self):
+ """Compute the diffs one at a time."""
+ # This is instead of compute_diffs() since we guarantee our ordering of
+ # inventories, we don't have to do any buffering
+ self._find_needed_keys()
+ # We actually use a slightly different ordering. We grab all of the
+ # parents first, and then grab the ordered requests.
+ needed_ids = [k[-1] for k in self.present_parents]
+ needed_ids.extend([k[-1] for k in self.ordered_keys])
+ inv_to_str = self.repo._serializer.write_inventory_to_string
+ for inv in self.repo.iter_inventories(needed_ids):
+ revision_id = inv.revision_id
+ key = (revision_id,)
+ if key in self.present_parents:
+ # Not a key we will transmit, which is a shame, since because
+ # of that bundles don't work with stacked branches
+ parent_ids = None
+ else:
+ parent_ids = [k[-1] for k in self.parent_map[key]]
+ as_bytes = inv_to_str(inv)
+ self._process_one_record(key, (as_bytes,))
+ if parent_ids is None:
+ continue
+ diff = self.diffs.pop(key)
+ sha1 = osutils.sha_string(as_bytes)
+ yield revision_id, parent_ids, sha1, diff
+
+
+class BundleWriter(object):
+ """Writer for bundle-format files.
+
+ This serves roughly the same purpose as ContainerReader, but acts as a
+ layer on top of it.
+
+ Provides ways of writing the specific record types supported this bundle
+ format.
+ """
+
+ def __init__(self, fileobj):
+ self._container = pack.ContainerWriter(self._write_encoded)
+ self._fileobj = fileobj
+ self._compressor = bz2.BZ2Compressor()
+
+ def _write_encoded(self, bytes):
+ """Write bzip2-encoded bytes to the file"""
+ self._fileobj.write(self._compressor.compress(bytes))
+
+ def begin(self):
+ """Start writing the bundle"""
+ self._fileobj.write(bundle_serializer._get_bundle_header(
+ bundle_serializer.v4_string))
+ self._fileobj.write('#\n')
+ self._container.begin()
+
+ def end(self):
+ """Finish writing the bundle"""
+ self._container.end()
+ self._fileobj.write(self._compressor.flush())
+
+ def add_multiparent_record(self, mp_bytes, sha1, parents, repo_kind,
+ revision_id, file_id):
+ """Add a record for a multi-parent diff
+
+ :mp_bytes: A multi-parent diff, as a bytestring
+ :sha1: The sha1 hash of the fulltext
+ :parents: a list of revision-ids of the parents
+ :repo_kind: The kind of object in the repository. May be 'file' or
+ 'inventory'
+ :revision_id: The revision id of the mpdiff being added.
+ :file_id: The file-id of the file, or None for inventories.
+ """
+ metadata = {'parents': parents,
+ 'storage_kind': 'mpdiff',
+ 'sha1': sha1}
+ self._add_record(mp_bytes, metadata, repo_kind, revision_id, file_id)
+
+ def add_fulltext_record(self, bytes, parents, repo_kind, revision_id):
+ """Add a record for a fulltext
+
+ :bytes: The fulltext, as a bytestring
+ :parents: a list of revision-ids of the parents
+ :repo_kind: The kind of object in the repository. May be 'revision' or
+ 'signature'
+ :revision_id: The revision id of the fulltext being added.
+ """
+ metadata = {'parents': parents,
+ 'storage_kind': 'mpdiff'}
+ self._add_record(bytes, {'parents': parents,
+ 'storage_kind': 'fulltext'}, repo_kind, revision_id, None)
+
+ def add_info_record(self, **kwargs):
+ """Add an info record to the bundle
+
+ Any parameters may be supplied, except 'self' and 'storage_kind'.
+ Values must be lists, strings, integers, dicts, or a combination.
+ """
+ kwargs['storage_kind'] = 'header'
+ self._add_record(None, kwargs, 'info', None, None)
+
+ @staticmethod
+ def encode_name(content_kind, revision_id, file_id=None):
+ """Encode semantic ids as a container name"""
+ if content_kind not in ('revision', 'file', 'inventory', 'signature',
+ 'info'):
+ raise ValueError(content_kind)
+ if content_kind == 'file':
+ if file_id is None:
+ raise AssertionError()
+ else:
+ if file_id is not None:
+ raise AssertionError()
+ if content_kind == 'info':
+ if revision_id is not None:
+ raise AssertionError()
+ elif revision_id is None:
+ raise AssertionError()
+ names = [n.replace('/', '//') for n in
+ (content_kind, revision_id, file_id) if n is not None]
+ return '/'.join(names)
+
+ def _add_record(self, bytes, metadata, repo_kind, revision_id, file_id):
+ """Add a bundle record to the container.
+
+ Most bundle records are recorded as header/body pairs, with the
+ body being nameless. Records with storage_kind 'header' have no
+ body.
+ """
+ name = self.encode_name(repo_kind, revision_id, file_id)
+ encoded_metadata = bencode.bencode(metadata)
+ self._container.add_bytes_record(encoded_metadata, [(name, )])
+ if metadata['storage_kind'] != 'header':
+ self._container.add_bytes_record(bytes, [])
+
+
+class BundleReader(object):
+ """Reader for bundle-format files.
+
+ This serves roughly the same purpose as ContainerReader, but acts as a
+ layer on top of it, providing metadata, a semantic name, and a record
+ body
+ """
+
+ def __init__(self, fileobj, stream_input=True):
+ """Constructor
+
+ :param fileobj: a file containing a bzip-encoded container
+ :param stream_input: If True, the BundleReader stream input rather than
+ reading it all into memory at once. Reading it into memory all at
+ once is (currently) faster.
+ """
+ line = fileobj.readline()
+ if line != '\n':
+ fileobj.readline()
+ self.patch_lines = []
+ if stream_input:
+ source_file = iterablefile.IterableFile(self.iter_decode(fileobj))
+ else:
+ source_file = StringIO(bz2.decompress(fileobj.read()))
+ self._container_file = source_file
+
+ @staticmethod
+ def iter_decode(fileobj):
+ """Iterate through decoded fragments of the file"""
+ decompressor = bz2.BZ2Decompressor()
+ for line in fileobj:
+ try:
+ yield decompressor.decompress(line)
+ except EOFError:
+ return
+
+ @staticmethod
+ def decode_name(name):
+ """Decode a name from its container form into a semantic form
+
+ :retval: content_kind, revision_id, file_id
+ """
+ segments = re.split('(//?)', name)
+ names = ['']
+ for segment in segments:
+ if segment == '//':
+ names[-1] += '/'
+ elif segment == '/':
+ names.append('')
+ else:
+ names[-1] += segment
+ content_kind = names[0]
+ revision_id = None
+ file_id = None
+ if len(names) > 1:
+ revision_id = names[1]
+ if len(names) > 2:
+ file_id = names[2]
+ return content_kind, revision_id, file_id
+
+ def iter_records(self):
+ """Iterate through bundle records
+
+ :return: a generator of (bytes, metadata, content_kind, revision_id,
+ file_id)
+ """
+ iterator = pack.iter_records_from_file(self._container_file)
+ for names, bytes in iterator:
+ if len(names) != 1:
+ raise errors.BadBundle('Record has %d names instead of 1'
+ % len(names))
+ metadata = bencode.bdecode(bytes)
+ if metadata['storage_kind'] == 'header':
+ bytes = None
+ else:
+ _unused, bytes = iterator.next()
+ yield (bytes, metadata) + self.decode_name(names[0][0])
+
+
+class BundleSerializerV4(bundle_serializer.BundleSerializer):
+ """Implement the high-level bundle interface"""
+
+ def write(self, repository, revision_ids, forced_bases, fileobj):
+ """Write a bundle to a file-like object
+
+ For backwards-compatibility only
+ """
+ write_op = BundleWriteOperation.from_old_args(repository, revision_ids,
+ forced_bases, fileobj)
+ return write_op.do_write()
+
+ def write_bundle(self, repository, target, base, fileobj):
+ """Write a bundle to a file object
+
+ :param repository: The repository to retrieve revision data from
+ :param target: The head revision to include ancestors of
+ :param base: The ancestor of the target to stop including acestors
+ at.
+ :param fileobj: The file-like object to write to
+ """
+ write_op = BundleWriteOperation(base, target, repository, fileobj)
+ return write_op.do_write()
+
+ def read(self, file):
+ """return a reader object for a given file"""
+ bundle = BundleInfoV4(file, self)
+ return bundle
+
+ @staticmethod
+ def get_source_serializer(info):
+ """Retrieve the serializer for a given info object"""
+ return serializer.format_registry.get(info['serializer'])
+
+
+class BundleWriteOperation(object):
+ """Perform the operation of writing revisions to a bundle"""
+
+ @classmethod
+ def from_old_args(cls, repository, revision_ids, forced_bases, fileobj):
+ """Create a BundleWriteOperation from old-style arguments"""
+ base, target = cls.get_base_target(revision_ids, forced_bases,
+ repository)
+ return BundleWriteOperation(base, target, repository, fileobj,
+ revision_ids)
+
+ def __init__(self, base, target, repository, fileobj, revision_ids=None):
+ self.base = base
+ self.target = target
+ self.repository = repository
+ bundle = BundleWriter(fileobj)
+ self.bundle = bundle
+ if revision_ids is not None:
+ self.revision_ids = revision_ids
+ else:
+ graph = repository.get_graph()
+ revision_ids = graph.find_unique_ancestors(target, [base])
+ # Strip ghosts
+ parents = graph.get_parent_map(revision_ids)
+ self.revision_ids = [r for r in revision_ids if r in parents]
+ self.revision_keys = set([(revid,) for revid in self.revision_ids])
+
+ def do_write(self):
+ """Write all data to the bundle"""
+ trace.note(ngettext('Bundling %d revision.', 'Bundling %d revisions.',
+ len(self.revision_ids)), len(self.revision_ids))
+ self.repository.lock_read()
+ try:
+ self.bundle.begin()
+ self.write_info()
+ self.write_files()
+ self.write_revisions()
+ self.bundle.end()
+ finally:
+ self.repository.unlock()
+ return self.revision_ids
+
+ def write_info(self):
+ """Write format info"""
+ serializer_format = self.repository.get_serializer_format()
+ supports_rich_root = {True: 1, False: 0}[
+ self.repository.supports_rich_root()]
+ self.bundle.add_info_record(serializer=serializer_format,
+ supports_rich_root=supports_rich_root)
+
+ def write_files(self):
+ """Write bundle records for all revisions of all files"""
+ text_keys = []
+ altered_fileids = self.repository.fileids_altered_by_revision_ids(
+ self.revision_ids)
+ for file_id, revision_ids in altered_fileids.iteritems():
+ for revision_id in revision_ids:
+ text_keys.append((file_id, revision_id))
+ self._add_mp_records_keys('file', self.repository.texts, text_keys)
+
+ def write_revisions(self):
+ """Write bundle records for all revisions and signatures"""
+ inv_vf = self.repository.inventories
+ topological_order = [key[-1] for key in multiparent.topo_iter_keys(
+ inv_vf, self.revision_keys)]
+ revision_order = topological_order
+ if self.target is not None and self.target in self.revision_ids:
+ # Make sure the target revision is always the last entry
+ revision_order = list(topological_order)
+ revision_order.remove(self.target)
+ revision_order.append(self.target)
+ if self.repository._serializer.support_altered_by_hack:
+ # Repositories that support_altered_by_hack means that
+ # inventories.make_mpdiffs() contains all the data about the tree
+ # shape. Formats without support_altered_by_hack require
+ # chk_bytes/etc, so we use a different code path.
+ self._add_mp_records_keys('inventory', inv_vf,
+ [(revid,) for revid in topological_order])
+ else:
+ # Inventories should always be added in pure-topological order, so
+ # that we can apply the mpdiff for the child to the parent texts.
+ self._add_inventory_mpdiffs_from_serializer(topological_order)
+ self._add_revision_texts(revision_order)
+
+ def _add_inventory_mpdiffs_from_serializer(self, revision_order):
+ """Generate mpdiffs by serializing inventories.
+
+ The current repository only has part of the tree shape information in
+ the 'inventories' vf. So we use serializer.write_inventory_to_string to
+ get a 'full' representation of the tree shape, and then generate
+ mpdiffs on that data stream. This stream can then be reconstructed on
+ the other side.
+ """
+ inventory_key_order = [(r,) for r in revision_order]
+ generator = _MPDiffInventoryGenerator(self.repository,
+ inventory_key_order)
+ for revision_id, parent_ids, sha1, diff in generator.iter_diffs():
+ text = ''.join(diff.to_patch())
+ self.bundle.add_multiparent_record(text, sha1, parent_ids,
+ 'inventory', revision_id, None)
+
+ def _add_revision_texts(self, revision_order):
+ parent_map = self.repository.get_parent_map(revision_order)
+ revision_to_str = self.repository._serializer.write_revision_to_string
+ revisions = self.repository.get_revisions(revision_order)
+ for revision in revisions:
+ revision_id = revision.revision_id
+ parents = parent_map.get(revision_id, None)
+ revision_text = revision_to_str(revision)
+ self.bundle.add_fulltext_record(revision_text, parents,
+ 'revision', revision_id)
+ try:
+ self.bundle.add_fulltext_record(
+ self.repository.get_signature_text(
+ revision_id), parents, 'signature', revision_id)
+ except errors.NoSuchRevision:
+ pass
+
+ @staticmethod
+ def get_base_target(revision_ids, forced_bases, repository):
+ """Determine the base and target from old-style revision ids"""
+ if len(revision_ids) == 0:
+ return None, None
+ target = revision_ids[0]
+ base = forced_bases.get(target)
+ if base is None:
+ parents = repository.get_revision(target).parent_ids
+ if len(parents) == 0:
+ base = _mod_revision.NULL_REVISION
+ else:
+ base = parents[0]
+ return base, target
+
+ def _add_mp_records_keys(self, repo_kind, vf, keys):
+ """Add multi-parent diff records to a bundle"""
+ ordered_keys = list(multiparent.topo_iter_keys(vf, keys))
+ mpdiffs = vf.make_mpdiffs(ordered_keys)
+ sha1s = vf.get_sha1s(ordered_keys)
+ parent_map = vf.get_parent_map(ordered_keys)
+ for mpdiff, item_key, in zip(mpdiffs, ordered_keys):
+ sha1 = sha1s[item_key]
+ parents = [key[-1] for key in parent_map[item_key]]
+ text = ''.join(mpdiff.to_patch())
+ # Infer file id records as appropriate.
+ if len(item_key) == 2:
+ file_id = item_key[0]
+ else:
+ file_id = None
+ self.bundle.add_multiparent_record(text, sha1, parents, repo_kind,
+ item_key[-1], file_id)
+
+
+class BundleInfoV4(object):
+
+ """Provide (most of) the BundleInfo interface"""
+ def __init__(self, fileobj, serializer):
+ self._fileobj = fileobj
+ self._serializer = serializer
+ self.__real_revisions = None
+ self.__revisions = None
+
+ def install(self, repository):
+ return self.install_revisions(repository)
+
+ def install_revisions(self, repository, stream_input=True):
+ """Install this bundle's revisions into the specified repository
+
+ :param target_repo: The repository to install into
+ :param stream_input: If True, will stream input rather than reading it
+ all into memory at once. Reading it into memory all at once is
+ (currently) faster.
+ """
+ repository.lock_write()
+ try:
+ ri = RevisionInstaller(self.get_bundle_reader(stream_input),
+ self._serializer, repository)
+ return ri.install()
+ finally:
+ repository.unlock()
+
+ def get_merge_request(self, target_repo):
+ """Provide data for performing a merge
+
+ Returns suggested base, suggested target, and patch verification status
+ """
+ return None, self.target, 'inapplicable'
+
+ def get_bundle_reader(self, stream_input=True):
+ """Return a new BundleReader for the associated bundle
+
+ :param stream_input: If True, the BundleReader stream input rather than
+ reading it all into memory at once. Reading it into memory all at
+ once is (currently) faster.
+ """
+ self._fileobj.seek(0)
+ return BundleReader(self._fileobj, stream_input)
+
+ def _get_real_revisions(self):
+ if self.__real_revisions is None:
+ self.__real_revisions = []
+ bundle_reader = self.get_bundle_reader()
+ for bytes, metadata, repo_kind, revision_id, file_id in \
+ bundle_reader.iter_records():
+ if repo_kind == 'info':
+ serializer =\
+ self._serializer.get_source_serializer(metadata)
+ if repo_kind == 'revision':
+ rev = serializer.read_revision_from_string(bytes)
+ self.__real_revisions.append(rev)
+ return self.__real_revisions
+ real_revisions = property(_get_real_revisions)
+
+ def _get_revisions(self):
+ if self.__revisions is None:
+ self.__revisions = []
+ for revision in self.real_revisions:
+ self.__revisions.append(
+ bundle_data.RevisionInfo.from_revision(revision))
+ return self.__revisions
+
+ revisions = property(_get_revisions)
+
+ def _get_target(self):
+ return self.revisions[-1].revision_id
+
+ target = property(_get_target)
+
+
+class RevisionInstaller(object):
+ """Installs revisions into a repository"""
+
+ def __init__(self, container, serializer, repository):
+ self._container = container
+ self._serializer = serializer
+ self._repository = repository
+ self._info = None
+
+ def install(self):
+ """Perform the installation.
+
+ Must be called with the Repository locked.
+ """
+ self._repository.start_write_group()
+ try:
+ result = self._install_in_write_group()
+ except:
+ self._repository.abort_write_group()
+ raise
+ self._repository.commit_write_group()
+ return result
+
+ def _install_in_write_group(self):
+ current_file = None
+ current_versionedfile = None
+ pending_file_records = []
+ inventory_vf = None
+ pending_inventory_records = []
+ added_inv = set()
+ target_revision = None
+ for bytes, metadata, repo_kind, revision_id, file_id in\
+ self._container.iter_records():
+ if repo_kind == 'info':
+ if self._info is not None:
+ raise AssertionError()
+ self._handle_info(metadata)
+ if (pending_file_records and
+ (repo_kind, file_id) != ('file', current_file)):
+ # Flush the data for a single file - prevents memory
+ # spiking due to buffering all files in memory.
+ self._install_mp_records_keys(self._repository.texts,
+ pending_file_records)
+ current_file = None
+ del pending_file_records[:]
+ if len(pending_inventory_records) > 0 and repo_kind != 'inventory':
+ self._install_inventory_records(pending_inventory_records)
+ pending_inventory_records = []
+ if repo_kind == 'inventory':
+ pending_inventory_records.append(((revision_id,), metadata, bytes))
+ if repo_kind == 'revision':
+ target_revision = revision_id
+ self._install_revision(revision_id, metadata, bytes)
+ if repo_kind == 'signature':
+ self._install_signature(revision_id, metadata, bytes)
+ if repo_kind == 'file':
+ current_file = file_id
+ pending_file_records.append(((file_id, revision_id), metadata, bytes))
+ self._install_mp_records_keys(self._repository.texts, pending_file_records)
+ return target_revision
+
+ def _handle_info(self, info):
+ """Extract data from an info record"""
+ self._info = info
+ self._source_serializer = self._serializer.get_source_serializer(info)
+ if (info['supports_rich_root'] == 0 and
+ self._repository.supports_rich_root()):
+ self.update_root = True
+ else:
+ self.update_root = False
+
+ def _install_mp_records(self, versionedfile, records):
+ if len(records) == 0:
+ return
+ d_func = multiparent.MultiParent.from_patch
+ vf_records = [(r, m['parents'], m['sha1'], d_func(t)) for r, m, t in
+ records if r not in versionedfile]
+ versionedfile.add_mpdiffs(vf_records)
+
+ def _install_mp_records_keys(self, versionedfile, records):
+ d_func = multiparent.MultiParent.from_patch
+ vf_records = []
+ for key, meta, text in records:
+ # Adapt to tuple interface: A length two key is a file_id,
+ # revision_id pair, a length 1 key is a
+ # revision/signature/inventory. We need to do this because
+ # the metadata extraction from the bundle has not yet been updated
+ # to use the consistent tuple interface itself.
+ if len(key) == 2:
+ prefix = key[:1]
+ else:
+ prefix = ()
+ parents = [prefix + (parent,) for parent in meta['parents']]
+ vf_records.append((key, parents, meta['sha1'], d_func(text)))
+ versionedfile.add_mpdiffs(vf_records)
+
+ def _get_parent_inventory_texts(self, inventory_text_cache,
+ inventory_cache, parent_ids):
+ cached_parent_texts = {}
+ remaining_parent_ids = []
+ for parent_id in parent_ids:
+ p_text = inventory_text_cache.get(parent_id, None)
+ if p_text is None:
+ remaining_parent_ids.append(parent_id)
+ else:
+ cached_parent_texts[parent_id] = p_text
+ ghosts = ()
+ # TODO: Use inventory_cache to grab inventories we already have in
+ # memory
+ if remaining_parent_ids:
+ # first determine what keys are actually present in the local
+ # inventories object (don't use revisions as they haven't been
+ # installed yet.)
+ parent_keys = [(r,) for r in remaining_parent_ids]
+ present_parent_map = self._repository.inventories.get_parent_map(
+ parent_keys)
+ present_parent_ids = []
+ ghosts = set()
+ for p_id in remaining_parent_ids:
+ if (p_id,) in present_parent_map:
+ present_parent_ids.append(p_id)
+ else:
+ ghosts.add(p_id)
+ to_string = self._source_serializer.write_inventory_to_string
+ for parent_inv in self._repository.iter_inventories(
+ present_parent_ids):
+ p_text = to_string(parent_inv)
+ inventory_cache[parent_inv.revision_id] = parent_inv
+ cached_parent_texts[parent_inv.revision_id] = p_text
+ inventory_text_cache[parent_inv.revision_id] = p_text
+
+ parent_texts = [cached_parent_texts[parent_id]
+ for parent_id in parent_ids
+ if parent_id not in ghosts]
+ return parent_texts
+
+ def _install_inventory_records(self, records):
+ if (self._info['serializer'] == self._repository._serializer.format_num
+ and self._repository._serializer.support_altered_by_hack):
+ return self._install_mp_records_keys(self._repository.inventories,
+ records)
+ # Use a 10MB text cache, since these are string xml inventories. Note
+ # that 10MB is fairly small for large projects (a single inventory can
+ # be >5MB). Another possibility is to cache 10-20 inventory texts
+ # instead
+ inventory_text_cache = lru_cache.LRUSizeCache(10*1024*1024)
+ # Also cache the in-memory representation. This allows us to create
+ # inventory deltas to apply rather than calling add_inventory from
+ # scratch each time.
+ inventory_cache = lru_cache.LRUCache(10)
+ pb = ui.ui_factory.nested_progress_bar()
+ try:
+ num_records = len(records)
+ for idx, (key, metadata, bytes) in enumerate(records):
+ pb.update('installing inventory', idx, num_records)
+ revision_id = key[-1]
+ parent_ids = metadata['parents']
+ # Note: This assumes the local ghosts are identical to the
+ # ghosts in the source, as the Bundle serialization
+ # format doesn't record ghosts.
+ p_texts = self._get_parent_inventory_texts(inventory_text_cache,
+ inventory_cache,
+ parent_ids)
+ # Why does to_lines() take strings as the source, it seems that
+ # it would have to cast to a list of lines, which we get back
+ # as lines and then cast back to a string.
+ target_lines = multiparent.MultiParent.from_patch(bytes
+ ).to_lines(p_texts)
+ inv_text = ''.join(target_lines)
+ del target_lines
+ sha1 = osutils.sha_string(inv_text)
+ if sha1 != metadata['sha1']:
+ raise errors.BadBundle("Can't convert to target format")
+ # Add this to the cache so we don't have to extract it again.
+ inventory_text_cache[revision_id] = inv_text
+ target_inv = self._source_serializer.read_inventory_from_string(
+ inv_text)
+ self._handle_root(target_inv, parent_ids)
+ parent_inv = None
+ if parent_ids:
+ parent_inv = inventory_cache.get(parent_ids[0], None)
+ try:
+ if parent_inv is None:
+ self._repository.add_inventory(revision_id, target_inv,
+ parent_ids)
+ else:
+ delta = target_inv._make_delta(parent_inv)
+ self._repository.add_inventory_by_delta(parent_ids[0],
+ delta, revision_id, parent_ids)
+ except errors.UnsupportedInventoryKind:
+ raise errors.IncompatibleRevision(repr(self._repository))
+ inventory_cache[revision_id] = target_inv
+ finally:
+ pb.finished()
+
+ def _handle_root(self, target_inv, parent_ids):
+ revision_id = target_inv.revision_id
+ if self.update_root:
+ text_key = (target_inv.root.file_id, revision_id)
+ parent_keys = [(target_inv.root.file_id, parent) for
+ parent in parent_ids]
+ self._repository.texts.add_lines(text_key, parent_keys, [])
+ elif not self._repository.supports_rich_root():
+ if target_inv.root.revision != revision_id:
+ raise errors.IncompatibleRevision(repr(self._repository))
+
+ def _install_revision(self, revision_id, metadata, text):
+ if self._repository.has_revision(revision_id):
+ return
+ revision = self._source_serializer.read_revision_from_string(text)
+ self._repository.add_revision(revision.revision_id, revision)
+
+ def _install_signature(self, revision_id, metadata, text):
+ transaction = self._repository.get_transaction()
+ if self._repository.has_signature_for_revision_id(revision_id):
+ return
+ self._repository.add_signature_text(revision_id, text)
diff --git a/bzrlib/bzr_distutils.py b/bzrlib/bzr_distutils.py
new file mode 100644
index 0000000..3b7f8ed
--- /dev/null
+++ b/bzrlib/bzr_distutils.py
@@ -0,0 +1,118 @@
+# -*- coding: utf-8 -*-
+#
+# Copyright (C) 2007,2009,2011 Canonical Ltd.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+# This code is from bzr-explorer and modified for bzr.
+
+"""build_mo command for setup.py"""
+
+from __future__ import absolute_import
+
+from distutils import log
+from distutils.core import Command
+from distutils.dep_util import newer
+from distutils.spawn import find_executable
+import os
+import re
+
+
+class build_mo(Command):
+ """Subcommand of build command: build_mo"""
+
+ description = 'compile po files to mo files'
+
+ # List of options:
+ # - long name,
+ # - short name (None if no short name),
+ # - help string.
+ user_options = [('build-dir=', 'd', 'Directory to build locale files'),
+ ('output-base=', 'o', 'mo-files base name'),
+ ('source-dir=', None, 'Directory with sources po files'),
+ ('force', 'f', 'Force creation of mo files'),
+ ('lang=', None, 'Comma-separated list of languages '
+ 'to process'),
+ ]
+
+ boolean_options = ['force']
+
+ def initialize_options(self):
+ self.build_dir = None
+ self.output_base = None
+ self.source_dir = None
+ self.force = None
+ self.lang = None
+
+ def finalize_options(self):
+ self.set_undefined_options('build', ('force', 'force'))
+ self.prj_name = self.distribution.get_name()
+ if self.build_dir is None:
+ self.build_dir = 'bzrlib/locale'
+ if not self.output_base:
+ self.output_base = self.prj_name or 'messages'
+ if self.source_dir is None:
+ self.source_dir = 'po'
+ if self.lang is None:
+ re_po = re.compile(r'^([a-zA-Z_]+)\.po$')
+ self.lang = []
+ for i in os.listdir(self.source_dir):
+ mo = re_po.match(i)
+ if mo:
+ self.lang.append(mo.group(1))
+ else:
+ self.lang = [i.strip() for i in self.lang.split(',') if i.strip()]
+
+ def run(self):
+ """Run msgfmt for each language"""
+ if not self.lang:
+ return
+
+ if find_executable('msgfmt') is None:
+ log.warn("GNU gettext msgfmt utility not found!")
+ log.warn("Skip compiling po files.")
+ return
+
+ if 'en' in self.lang:
+ if find_executable('msginit') is None:
+ log.warn("GNU gettext msginit utility not found!")
+ log.warn("Skip creating English PO file.")
+ else:
+ log.info('Creating English PO file...')
+ pot = (self.prj_name or 'messages') + '.pot'
+ en_po = 'en.po'
+ self.spawn(['msginit',
+ '--no-translator',
+ '-l', 'en',
+ '-i', os.path.join(self.source_dir, pot),
+ '-o', os.path.join(self.source_dir, en_po),
+ ])
+
+ basename = self.output_base
+ if not basename.endswith('.mo'):
+ basename += '.mo'
+
+ for lang in self.lang:
+ po = os.path.join('po', lang + '.po')
+ if not os.path.isfile(po):
+ po = os.path.join('po', lang + '.po')
+ dir_ = os.path.join(self.build_dir, lang, 'LC_MESSAGES')
+ self.mkpath(dir_)
+ mo = os.path.join(dir_, basename)
+ if self.force or newer(po, mo):
+ log.info('Compile: %s -> %s' % (po, mo))
+ self.spawn(['msgfmt', '-o', mo, po])
+
+
diff --git a/bzrlib/bzrdir.py b/bzrlib/bzrdir.py
new file mode 100644
index 0000000..8e782cd
--- /dev/null
+++ b/bzrlib/bzrdir.py
@@ -0,0 +1,2315 @@
+# Copyright (C) 2006-2011 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""BzrDir logic. The BzrDir is the basic control directory used by bzr.
+
+At format 7 this was split out into Branch, Repository and Checkout control
+directories.
+
+Note: This module has a lot of ``open`` functions/methods that return
+references to in-memory objects. As a rule, there are no matching ``close``
+methods. To free any associated resources, simply stop referencing the
+objects returned.
+"""
+
+from __future__ import absolute_import
+
+import sys
+
+from bzrlib.lazy_import import lazy_import
+lazy_import(globals(), """
+import bzrlib
+from bzrlib import (
+ branch as _mod_branch,
+ cleanup,
+ errors,
+ fetch,
+ graph,
+ lockable_files,
+ lockdir,
+ osutils,
+ pyutils,
+ remote,
+ repository,
+ revision as _mod_revision,
+ transport as _mod_transport,
+ ui,
+ urlutils,
+ vf_search,
+ win32utils,
+ workingtree_3,
+ workingtree_4,
+ )
+from bzrlib.branchfmt import fullhistory as fullhistorybranch
+from bzrlib.repofmt import knitpack_repo
+from bzrlib.transport import (
+ do_catching_redirections,
+ local,
+ )
+from bzrlib.i18n import gettext
+""")
+
+from bzrlib.trace import (
+ mutter,
+ note,
+ )
+
+from bzrlib import (
+ config,
+ controldir,
+ registry,
+ )
+from bzrlib.symbol_versioning import (
+ deprecated_in,
+ deprecated_method,
+ )
+
+
+class BzrDir(controldir.ControlDir):
+ """A .bzr control diretory.
+
+ BzrDir instances let you create or open any of the things that can be
+ found within .bzr - checkouts, branches and repositories.
+
+ :ivar transport:
+ the transport which this bzr dir is rooted at (i.e. file:///.../.bzr/)
+ :ivar root_transport:
+ a transport connected to the directory this bzr was opened from
+ (i.e. the parent directory holding the .bzr directory).
+
+ Everything in the bzrdir should have the same file permissions.
+
+ :cvar hooks: An instance of BzrDirHooks.
+ """
+
+ def break_lock(self):
+ """Invoke break_lock on the first object in the bzrdir.
+
+ If there is a tree, the tree is opened and break_lock() called.
+ Otherwise, branch is tried, and finally repository.
+ """
+ # XXX: This seems more like a UI function than something that really
+ # belongs in this class.
+ try:
+ thing_to_unlock = self.open_workingtree()
+ except (errors.NotLocalUrl, errors.NoWorkingTree):
+ try:
+ thing_to_unlock = self.open_branch()
+ except errors.NotBranchError:
+ try:
+ thing_to_unlock = self.open_repository()
+ except errors.NoRepositoryPresent:
+ return
+ thing_to_unlock.break_lock()
+
+ def check_conversion_target(self, target_format):
+ """Check that a bzrdir as a whole can be converted to a new format."""
+ # The only current restriction is that the repository content can be
+ # fetched compatibly with the target.
+ target_repo_format = target_format.repository_format
+ try:
+ self.open_repository()._format.check_conversion_target(
+ target_repo_format)
+ except errors.NoRepositoryPresent:
+ # No repo, no problem.
+ pass
+
+ def clone_on_transport(self, transport, revision_id=None,
+ force_new_repo=False, preserve_stacking=False, stacked_on=None,
+ create_prefix=False, use_existing_dir=True, no_tree=False):
+ """Clone this bzrdir and its contents to transport verbatim.
+
+ :param transport: The transport for the location to produce the clone
+ at. If the target directory does not exist, it will be created.
+ :param revision_id: The tip revision-id to use for any branch or
+ working tree. If not None, then the clone operation may tune
+ itself to download less data.
+ :param force_new_repo: Do not use a shared repository for the target,
+ even if one is available.
+ :param preserve_stacking: When cloning a stacked branch, stack the
+ new branch on top of the other branch's stacked-on branch.
+ :param create_prefix: Create any missing directories leading up to
+ to_transport.
+ :param use_existing_dir: Use an existing directory if one exists.
+ :param no_tree: If set to true prevents creation of a working tree.
+ """
+ # Overview: put together a broad description of what we want to end up
+ # with; then make as few api calls as possible to do it.
+
+ # We may want to create a repo/branch/tree, if we do so what format
+ # would we want for each:
+ require_stacking = (stacked_on is not None)
+ format = self.cloning_metadir(require_stacking)
+
+ # Figure out what objects we want:
+ try:
+ local_repo = self.find_repository()
+ except errors.NoRepositoryPresent:
+ local_repo = None
+ try:
+ local_branch = self.open_branch()
+ except errors.NotBranchError:
+ local_branch = None
+ else:
+ # enable fallbacks when branch is not a branch reference
+ if local_branch.repository.has_same_location(local_repo):
+ local_repo = local_branch.repository
+ if preserve_stacking:
+ try:
+ stacked_on = local_branch.get_stacked_on_url()
+ except (errors.UnstackableBranchFormat,
+ errors.UnstackableRepositoryFormat,
+ errors.NotStacked):
+ pass
+ # Bug: We create a metadir without knowing if it can support stacking,
+ # we should look up the policy needs first, or just use it as a hint,
+ # or something.
+ if local_repo:
+ make_working_trees = local_repo.make_working_trees() and not no_tree
+ want_shared = local_repo.is_shared()
+ repo_format_name = format.repository_format.network_name()
+ else:
+ make_working_trees = False
+ want_shared = False
+ repo_format_name = None
+
+ result_repo, result, require_stacking, repository_policy = \
+ format.initialize_on_transport_ex(transport,
+ use_existing_dir=use_existing_dir, create_prefix=create_prefix,
+ force_new_repo=force_new_repo, stacked_on=stacked_on,
+ stack_on_pwd=self.root_transport.base,
+ repo_format_name=repo_format_name,
+ make_working_trees=make_working_trees, shared_repo=want_shared)
+ if repo_format_name:
+ try:
+ # If the result repository is in the same place as the
+ # resulting bzr dir, it will have no content, further if the
+ # result is not stacked then we know all content should be
+ # copied, and finally if we are copying up to a specific
+ # revision_id then we can use the pending-ancestry-result which
+ # does not require traversing all of history to describe it.
+ if (result_repo.user_url == result.user_url
+ and not require_stacking and
+ revision_id is not None):
+ fetch_spec = vf_search.PendingAncestryResult(
+ [revision_id], local_repo)
+ result_repo.fetch(local_repo, fetch_spec=fetch_spec)
+ else:
+ result_repo.fetch(local_repo, revision_id=revision_id)
+ finally:
+ result_repo.unlock()
+ else:
+ if result_repo is not None:
+ raise AssertionError('result_repo not None(%r)' % result_repo)
+ # 1 if there is a branch present
+ # make sure its content is available in the target repository
+ # clone it.
+ if local_branch is not None:
+ result_branch = local_branch.clone(result, revision_id=revision_id,
+ repository_policy=repository_policy)
+ try:
+ # Cheaper to check if the target is not local, than to try making
+ # the tree and fail.
+ result.root_transport.local_abspath('.')
+ if result_repo is None or result_repo.make_working_trees():
+ self.open_workingtree().clone(result, revision_id=revision_id)
+ except (errors.NoWorkingTree, errors.NotLocalUrl):
+ pass
+ return result
+
+ # TODO: This should be given a Transport, and should chdir up; otherwise
+ # this will open a new connection.
+ def _make_tail(self, url):
+ t = _mod_transport.get_transport(url)
+ t.ensure_base()
+
+ def determine_repository_policy(self, force_new_repo=False, stack_on=None,
+ stack_on_pwd=None, require_stacking=False):
+ """Return an object representing a policy to use.
+
+ This controls whether a new repository is created, and the format of
+ that repository, or some existing shared repository used instead.
+
+ If stack_on is supplied, will not seek a containing shared repo.
+
+ :param force_new_repo: If True, require a new repository to be created.
+ :param stack_on: If supplied, the location to stack on. If not
+ supplied, a default_stack_on location may be used.
+ :param stack_on_pwd: If stack_on is relative, the location it is
+ relative to.
+ """
+ def repository_policy(found_bzrdir):
+ stack_on = None
+ stack_on_pwd = None
+ config = found_bzrdir.get_config()
+ stop = False
+ stack_on = config.get_default_stack_on()
+ if stack_on is not None:
+ stack_on_pwd = found_bzrdir.user_url
+ stop = True
+ # does it have a repository ?
+ try:
+ repository = found_bzrdir.open_repository()
+ except errors.NoRepositoryPresent:
+ repository = None
+ else:
+ if (found_bzrdir.user_url != self.user_url
+ and not repository.is_shared()):
+ # Don't look higher, can't use a higher shared repo.
+ repository = None
+ stop = True
+ else:
+ stop = True
+ if not stop:
+ return None, False
+ if repository:
+ return UseExistingRepository(repository, stack_on,
+ stack_on_pwd, require_stacking=require_stacking), True
+ else:
+ return CreateRepository(self, stack_on, stack_on_pwd,
+ require_stacking=require_stacking), True
+
+ if not force_new_repo:
+ if stack_on is None:
+ policy = self._find_containing(repository_policy)
+ if policy is not None:
+ return policy
+ else:
+ try:
+ return UseExistingRepository(self.open_repository(),
+ stack_on, stack_on_pwd,
+ require_stacking=require_stacking)
+ except errors.NoRepositoryPresent:
+ pass
+ return CreateRepository(self, stack_on, stack_on_pwd,
+ require_stacking=require_stacking)
+
+ def _find_or_create_repository(self, force_new_repo):
+ """Create a new repository if needed, returning the repository."""
+ policy = self.determine_repository_policy(force_new_repo)
+ return policy.acquire_repository()[0]
+
+ def _find_source_repo(self, add_cleanup, source_branch):
+ """Find the source branch and repo for a sprout operation.
+
+ This is helper intended for use by _sprout.
+
+ :returns: (source_branch, source_repository). Either or both may be
+ None. If not None, they will be read-locked (and their unlock(s)
+ scheduled via the add_cleanup param).
+ """
+ if source_branch is not None:
+ add_cleanup(source_branch.lock_read().unlock)
+ return source_branch, source_branch.repository
+ try:
+ source_branch = self.open_branch()
+ source_repository = source_branch.repository
+ except errors.NotBranchError:
+ source_branch = None
+ try:
+ source_repository = self.open_repository()
+ except errors.NoRepositoryPresent:
+ source_repository = None
+ else:
+ add_cleanup(source_repository.lock_read().unlock)
+ else:
+ add_cleanup(source_branch.lock_read().unlock)
+ return source_branch, source_repository
+
+ def sprout(self, url, revision_id=None, force_new_repo=False,
+ recurse='down', possible_transports=None,
+ accelerator_tree=None, hardlink=False, stacked=False,
+ source_branch=None, create_tree_if_local=True):
+ """Create a copy of this controldir prepared for use as a new line of
+ development.
+
+ If url's last component does not exist, it will be created.
+
+ Attributes related to the identity of the source branch like
+ branch nickname will be cleaned, a working tree is created
+ whether one existed before or not; and a local branch is always
+ created.
+
+ if revision_id is not None, then the clone operation may tune
+ itself to download less data.
+
+ :param accelerator_tree: A tree which can be used for retrieving file
+ contents more quickly than the revision tree, i.e. a workingtree.
+ The revision tree will be used for cases where accelerator_tree's
+ content is different.
+ :param hardlink: If true, hard-link files from accelerator_tree,
+ where possible.
+ :param stacked: If true, create a stacked branch referring to the
+ location of this control directory.
+ :param create_tree_if_local: If true, a working-tree will be created
+ when working locally.
+ :return: The created control directory
+ """
+ operation = cleanup.OperationWithCleanups(self._sprout)
+ return operation.run(url, revision_id=revision_id,
+ force_new_repo=force_new_repo, recurse=recurse,
+ possible_transports=possible_transports,
+ accelerator_tree=accelerator_tree, hardlink=hardlink,
+ stacked=stacked, source_branch=source_branch,
+ create_tree_if_local=create_tree_if_local)
+
+ def _sprout(self, op, url, revision_id=None, force_new_repo=False,
+ recurse='down', possible_transports=None,
+ accelerator_tree=None, hardlink=False, stacked=False,
+ source_branch=None, create_tree_if_local=True):
+ add_cleanup = op.add_cleanup
+ fetch_spec_factory = fetch.FetchSpecFactory()
+ if revision_id is not None:
+ fetch_spec_factory.add_revision_ids([revision_id])
+ fetch_spec_factory.source_branch_stop_revision_id = revision_id
+ if possible_transports is None:
+ possible_transports = []
+ else:
+ possible_transports = list(possible_transports) + [
+ self.root_transport]
+ target_transport = _mod_transport.get_transport(url,
+ possible_transports)
+ target_transport.ensure_base()
+ cloning_format = self.cloning_metadir(stacked)
+ # Create/update the result branch
+ try:
+ result = controldir.ControlDir.open_from_transport(target_transport)
+ except errors.NotBranchError:
+ result = cloning_format.initialize_on_transport(target_transport)
+ source_branch, source_repository = self._find_source_repo(
+ add_cleanup, source_branch)
+ fetch_spec_factory.source_branch = source_branch
+ # if a stacked branch wasn't requested, we don't create one
+ # even if the origin was stacked
+ if stacked and source_branch is not None:
+ stacked_branch_url = self.root_transport.base
+ else:
+ stacked_branch_url = None
+ repository_policy = result.determine_repository_policy(
+ force_new_repo, stacked_branch_url, require_stacking=stacked)
+ result_repo, is_new_repo = repository_policy.acquire_repository(
+ possible_transports=possible_transports)
+ add_cleanup(result_repo.lock_write().unlock)
+ fetch_spec_factory.source_repo = source_repository
+ fetch_spec_factory.target_repo = result_repo
+ if stacked or (len(result_repo._fallback_repositories) != 0):
+ target_repo_kind = fetch.TargetRepoKinds.STACKED
+ elif is_new_repo:
+ target_repo_kind = fetch.TargetRepoKinds.EMPTY
+ else:
+ target_repo_kind = fetch.TargetRepoKinds.PREEXISTING
+ fetch_spec_factory.target_repo_kind = target_repo_kind
+ if source_repository is not None:
+ fetch_spec = fetch_spec_factory.make_fetch_spec()
+ result_repo.fetch(source_repository, fetch_spec=fetch_spec)
+
+ if source_branch is None:
+ # this is for sprouting a controldir without a branch; is that
+ # actually useful?
+ # Not especially, but it's part of the contract.
+ result_branch = result.create_branch()
+ else:
+ result_branch = source_branch.sprout(result,
+ revision_id=revision_id, repository_policy=repository_policy,
+ repository=result_repo)
+ mutter("created new branch %r" % (result_branch,))
+
+ # Create/update the result working tree
+ if (create_tree_if_local and not result.has_workingtree() and
+ isinstance(target_transport, local.LocalTransport) and
+ (result_repo is None or result_repo.make_working_trees())):
+ wt = result.create_workingtree(accelerator_tree=accelerator_tree,
+ hardlink=hardlink, from_branch=result_branch)
+ wt.lock_write()
+ try:
+ if wt.path2id('') is None:
+ try:
+ wt.set_root_id(self.open_workingtree.get_root_id())
+ except errors.NoWorkingTree:
+ pass
+ finally:
+ wt.unlock()
+ else:
+ wt = None
+ if recurse == 'down':
+ basis = None
+ if wt is not None:
+ basis = wt.basis_tree()
+ elif result_branch is not None:
+ basis = result_branch.basis_tree()
+ elif source_branch is not None:
+ basis = source_branch.basis_tree()
+ if basis is not None:
+ add_cleanup(basis.lock_read().unlock)
+ subtrees = basis.iter_references()
+ else:
+ subtrees = []
+ for path, file_id in subtrees:
+ target = urlutils.join(url, urlutils.escape(path))
+ sublocation = source_branch.reference_parent(file_id, path)
+ sublocation.bzrdir.sprout(target,
+ basis.get_reference_revision(file_id, path),
+ force_new_repo=force_new_repo, recurse=recurse,
+ stacked=stacked)
+ return result
+
+ def _available_backup_name(self, base):
+ """Find a non-existing backup file name based on base.
+
+ See bzrlib.osutils.available_backup_name about race conditions.
+ """
+ return osutils.available_backup_name(base, self.root_transport.has)
+
+ def backup_bzrdir(self):
+ """Backup this bzr control directory.
+
+ :return: Tuple with old path name and new path name
+ """
+
+ pb = ui.ui_factory.nested_progress_bar()
+ try:
+ old_path = self.root_transport.abspath('.bzr')
+ backup_dir = self._available_backup_name('backup.bzr')
+ new_path = self.root_transport.abspath(backup_dir)
+ ui.ui_factory.note(gettext('making backup of {0}\n to {1}').format(
+ urlutils.unescape_for_display(old_path, 'utf-8'),
+ urlutils.unescape_for_display(new_path, 'utf-8')))
+ self.root_transport.copy_tree('.bzr', backup_dir)
+ return (old_path, new_path)
+ finally:
+ pb.finished()
+
+ def retire_bzrdir(self, limit=10000):
+ """Permanently disable the bzrdir.
+
+ This is done by renaming it to give the user some ability to recover
+ if there was a problem.
+
+ This will have horrible consequences if anyone has anything locked or
+ in use.
+ :param limit: number of times to retry
+ """
+ i = 0
+ while True:
+ try:
+ to_path = '.bzr.retired.%d' % i
+ self.root_transport.rename('.bzr', to_path)
+ note(gettext("renamed {0} to {1}").format(
+ self.root_transport.abspath('.bzr'), to_path))
+ return
+ except (errors.TransportError, IOError, errors.PathError):
+ i += 1
+ if i > limit:
+ raise
+ else:
+ pass
+
+ def _find_containing(self, evaluate):
+ """Find something in a containing control directory.
+
+ This method will scan containing control dirs, until it finds what
+ it is looking for, decides that it will never find it, or runs out
+ of containing control directories to check.
+
+ It is used to implement find_repository and
+ determine_repository_policy.
+
+ :param evaluate: A function returning (value, stop). If stop is True,
+ the value will be returned.
+ """
+ found_bzrdir = self
+ while True:
+ result, stop = evaluate(found_bzrdir)
+ if stop:
+ return result
+ next_transport = found_bzrdir.root_transport.clone('..')
+ if (found_bzrdir.user_url == next_transport.base):
+ # top of the file system
+ return None
+ # find the next containing bzrdir
+ try:
+ found_bzrdir = self.open_containing_from_transport(
+ next_transport)[0]
+ except errors.NotBranchError:
+ return None
+
+ def find_repository(self):
+ """Find the repository that should be used.
+
+ This does not require a branch as we use it to find the repo for
+ new branches as well as to hook existing branches up to their
+ repository.
+ """
+ def usable_repository(found_bzrdir):
+ # does it have a repository ?
+ try:
+ repository = found_bzrdir.open_repository()
+ except errors.NoRepositoryPresent:
+ return None, False
+ if found_bzrdir.user_url == self.user_url:
+ return repository, True
+ elif repository.is_shared():
+ return repository, True
+ else:
+ return None, True
+
+ found_repo = self._find_containing(usable_repository)
+ if found_repo is None:
+ raise errors.NoRepositoryPresent(self)
+ return found_repo
+
+ def _find_creation_modes(self):
+ """Determine the appropriate modes for files and directories.
+
+ They're always set to be consistent with the base directory,
+ assuming that this transport allows setting modes.
+ """
+ # TODO: Do we need or want an option (maybe a config setting) to turn
+ # this off or override it for particular locations? -- mbp 20080512
+ if self._mode_check_done:
+ return
+ self._mode_check_done = True
+ try:
+ st = self.transport.stat('.')
+ except errors.TransportNotPossible:
+ self._dir_mode = None
+ self._file_mode = None
+ else:
+ # Check the directory mode, but also make sure the created
+ # directories and files are read-write for this user. This is
+ # mostly a workaround for filesystems which lie about being able to
+ # write to a directory (cygwin & win32)
+ if (st.st_mode & 07777 == 00000):
+ # FTP allows stat but does not return dir/file modes
+ self._dir_mode = None
+ self._file_mode = None
+ else:
+ self._dir_mode = (st.st_mode & 07777) | 00700
+ # Remove the sticky and execute bits for files
+ self._file_mode = self._dir_mode & ~07111
+
+ def _get_file_mode(self):
+ """Return Unix mode for newly created files, or None.
+ """
+ if not self._mode_check_done:
+ self._find_creation_modes()
+ return self._file_mode
+
+ def _get_dir_mode(self):
+ """Return Unix mode for newly created directories, or None.
+ """
+ if not self._mode_check_done:
+ self._find_creation_modes()
+ return self._dir_mode
+
+ def get_config(self):
+ """Get configuration for this BzrDir."""
+ return config.BzrDirConfig(self)
+
+ def _get_config(self):
+ """By default, no configuration is available."""
+ return None
+
+ def __init__(self, _transport, _format):
+ """Initialize a Bzr control dir object.
+
+ Only really common logic should reside here, concrete classes should be
+ made with varying behaviours.
+
+ :param _format: the format that is creating this BzrDir instance.
+ :param _transport: the transport this dir is based at.
+ """
+ self._format = _format
+ # these are also under the more standard names of
+ # control_transport and user_transport
+ self.transport = _transport.clone('.bzr')
+ self.root_transport = _transport
+ self._mode_check_done = False
+
+ @property
+ def user_transport(self):
+ return self.root_transport
+
+ @property
+ def control_transport(self):
+ return self.transport
+
+ def is_control_filename(self, filename):
+ """True if filename is the name of a path which is reserved for bzrdir's.
+
+ :param filename: A filename within the root transport of this bzrdir.
+
+ This is true IF and ONLY IF the filename is part of the namespace reserved
+ for bzr control dirs. Currently this is the '.bzr' directory in the root
+ of the root_transport.
+ """
+ # this might be better on the BzrDirFormat class because it refers to
+ # all the possible bzrdir disk formats.
+ # This method is tested via the workingtree is_control_filename tests-
+ # it was extracted from WorkingTree.is_control_filename. If the method's
+ # contract is extended beyond the current trivial implementation, please
+ # add new tests for it to the appropriate place.
+ return filename == '.bzr' or filename.startswith('.bzr/')
+
+ def _cloning_metadir(self):
+ """Produce a metadir suitable for cloning with.
+
+ :returns: (destination_bzrdir_format, source_repository)
+ """
+ result_format = self._format.__class__()
+ try:
+ try:
+ branch = self.open_branch(ignore_fallbacks=True)
+ source_repository = branch.repository
+ result_format._branch_format = branch._format
+ except errors.NotBranchError:
+ source_branch = None
+ source_repository = self.open_repository()
+ except errors.NoRepositoryPresent:
+ source_repository = None
+ else:
+ # XXX TODO: This isinstance is here because we have not implemented
+ # the fix recommended in bug # 103195 - to delegate this choice the
+ # repository itself.
+ repo_format = source_repository._format
+ if isinstance(repo_format, remote.RemoteRepositoryFormat):
+ source_repository._ensure_real()
+ repo_format = source_repository._real_repository._format
+ result_format.repository_format = repo_format
+ try:
+ # TODO: Couldn't we just probe for the format in these cases,
+ # rather than opening the whole tree? It would be a little
+ # faster. mbp 20070401
+ tree = self.open_workingtree(recommend_upgrade=False)
+ except (errors.NoWorkingTree, errors.NotLocalUrl):
+ result_format.workingtree_format = None
+ else:
+ result_format.workingtree_format = tree._format.__class__()
+ return result_format, source_repository
+
+ def cloning_metadir(self, require_stacking=False):
+ """Produce a metadir suitable for cloning or sprouting with.
+
+ These operations may produce workingtrees (yes, even though they're
+ "cloning" something that doesn't have a tree), so a viable workingtree
+ format must be selected.
+
+ :require_stacking: If True, non-stackable formats will be upgraded
+ to similar stackable formats.
+ :returns: a ControlDirFormat with all component formats either set
+ appropriately or set to None if that component should not be
+ created.
+ """
+ format, repository = self._cloning_metadir()
+ if format._workingtree_format is None:
+ # No tree in self.
+ if repository is None:
+ # No repository either
+ return format
+ # We have a repository, so set a working tree? (Why? This seems to
+ # contradict the stated return value in the docstring).
+ tree_format = repository._format._matchingbzrdir.workingtree_format
+ format.workingtree_format = tree_format.__class__()
+ if require_stacking:
+ format.require_stacking()
+ return format
+
+ def get_branch_transport(self, branch_format, name=None):
+ """Get the transport for use by branch format in this BzrDir.
+
+ Note that bzr dirs that do not support format strings will raise
+ IncompatibleFormat if the branch format they are given has
+ a format string, and vice versa.
+
+ If branch_format is None, the transport is returned with no
+ checking. If it is not None, then the returned transport is
+ guaranteed to point to an existing directory ready for use.
+ """
+ raise NotImplementedError(self.get_branch_transport)
+
+ def get_repository_transport(self, repository_format):
+ """Get the transport for use by repository format in this BzrDir.
+
+ Note that bzr dirs that do not support format strings will raise
+ IncompatibleFormat if the repository format they are given has
+ a format string, and vice versa.
+
+ If repository_format is None, the transport is returned with no
+ checking. If it is not None, then the returned transport is
+ guaranteed to point to an existing directory ready for use.
+ """
+ raise NotImplementedError(self.get_repository_transport)
+
+ def get_workingtree_transport(self, tree_format):
+ """Get the transport for use by workingtree format in this BzrDir.
+
+ Note that bzr dirs that do not support format strings will raise
+ IncompatibleFormat if the workingtree format they are given has a
+ format string, and vice versa.
+
+ If workingtree_format is None, the transport is returned with no
+ checking. If it is not None, then the returned transport is
+ guaranteed to point to an existing directory ready for use.
+ """
+ raise NotImplementedError(self.get_workingtree_transport)
+
+ @classmethod
+ def create(cls, base, format=None, possible_transports=None):
+ """Create a new BzrDir at the url 'base'.
+
+ :param format: If supplied, the format of branch to create. If not
+ supplied, the default is used.
+ :param possible_transports: If supplied, a list of transports that
+ can be reused to share a remote connection.
+ """
+ if cls is not BzrDir:
+ raise AssertionError("BzrDir.create always creates the "
+ "default format, not one of %r" % cls)
+ return controldir.ControlDir.create(base, format=format,
+ possible_transports=possible_transports)
+
+ def __repr__(self):
+ return "<%s at %r>" % (self.__class__.__name__, self.user_url)
+
+ def update_feature_flags(self, updated_flags):
+ """Update the features required by this bzrdir.
+
+ :param updated_flags: Dictionary mapping feature names to necessities
+ A necessity can be None to indicate the feature should be removed
+ """
+ self.control_files.lock_write()
+ try:
+ self._format._update_feature_flags(updated_flags)
+ self.transport.put_bytes('branch-format', self._format.as_string())
+ finally:
+ self.control_files.unlock()
+
+
+class BzrDirMeta1(BzrDir):
+ """A .bzr meta version 1 control object.
+
+ This is the first control object where the
+ individual aspects are really split out: there are separate repository,
+ workingtree and branch subdirectories and any subset of the three can be
+ present within a BzrDir.
+ """
+
+ def _get_branch_path(self, name):
+ """Obtain the branch path to use.
+
+ This uses the API specified branch name first, and then falls back to
+ the branch name specified in the URL. If neither of those is specified,
+ it uses the default branch.
+
+ :param name: Optional branch name to use
+ :return: Relative path to branch
+ """
+ if name == "":
+ return 'branch'
+ return urlutils.join('branches', name.encode("utf-8"))
+
+ def _read_branch_list(self):
+ """Read the branch list.
+
+ :return: List of utf-8 encoded branch names.
+ """
+ try:
+ f = self.control_transport.get('branch-list')
+ except errors.NoSuchFile:
+ return []
+
+ ret = []
+ try:
+ for name in f:
+ ret.append(name.rstrip("\n"))
+ finally:
+ f.close()
+ return ret
+
+ def _write_branch_list(self, branches):
+ """Write out the branch list.
+
+ :param branches: List of utf-8 branch names to write
+ """
+ self.transport.put_bytes('branch-list',
+ "".join([name+"\n" for name in branches]))
+
+ def __init__(self, _transport, _format):
+ super(BzrDirMeta1, self).__init__(_transport, _format)
+ self.control_files = lockable_files.LockableFiles(
+ self.control_transport, self._format._lock_file_name,
+ self._format._lock_class)
+
+ def can_convert_format(self):
+ """See BzrDir.can_convert_format()."""
+ return True
+
+ def create_branch(self, name=None, repository=None,
+ append_revisions_only=None):
+ """See ControlDir.create_branch."""
+ if name is None:
+ name = self._get_selected_branch()
+ return self._format.get_branch_format().initialize(self, name=name,
+ repository=repository,
+ append_revisions_only=append_revisions_only)
+
+ def destroy_branch(self, name=None):
+ """See ControlDir.destroy_branch."""
+ if name is None:
+ name = self._get_selected_branch()
+ path = self._get_branch_path(name)
+ if name != "":
+ self.control_files.lock_write()
+ try:
+ branches = self._read_branch_list()
+ try:
+ branches.remove(name.encode("utf-8"))
+ except ValueError:
+ raise errors.NotBranchError(name)
+ self._write_branch_list(branches)
+ finally:
+ self.control_files.unlock()
+ try:
+ self.transport.delete_tree(path)
+ except errors.NoSuchFile:
+ raise errors.NotBranchError(path=urlutils.join(self.transport.base,
+ path), bzrdir=self)
+
+ def create_repository(self, shared=False):
+ """See BzrDir.create_repository."""
+ return self._format.repository_format.initialize(self, shared)
+
+ def destroy_repository(self):
+ """See BzrDir.destroy_repository."""
+ try:
+ self.transport.delete_tree('repository')
+ except errors.NoSuchFile:
+ raise errors.NoRepositoryPresent(self)
+
+ def create_workingtree(self, revision_id=None, from_branch=None,
+ accelerator_tree=None, hardlink=False):
+ """See BzrDir.create_workingtree."""
+ return self._format.workingtree_format.initialize(
+ self, revision_id, from_branch=from_branch,
+ accelerator_tree=accelerator_tree, hardlink=hardlink)
+
+ def destroy_workingtree(self):
+ """See BzrDir.destroy_workingtree."""
+ wt = self.open_workingtree(recommend_upgrade=False)
+ repository = wt.branch.repository
+ empty = repository.revision_tree(_mod_revision.NULL_REVISION)
+ # We ignore the conflicts returned by wt.revert since we're about to
+ # delete the wt metadata anyway, all that should be left here are
+ # detritus. But see bug #634470 about subtree .bzr dirs.
+ conflicts = wt.revert(old_tree=empty)
+ self.destroy_workingtree_metadata()
+
+ def destroy_workingtree_metadata(self):
+ self.transport.delete_tree('checkout')
+
+ def find_branch_format(self, name=None):
+ """Find the branch 'format' for this bzrdir.
+
+ This might be a synthetic object for e.g. RemoteBranch and SVN.
+ """
+ from bzrlib.branch import BranchFormatMetadir
+ return BranchFormatMetadir.find_format(self, name=name)
+
+ def _get_mkdir_mode(self):
+ """Figure out the mode to use when creating a bzrdir subdir."""
+ temp_control = lockable_files.LockableFiles(self.transport, '',
+ lockable_files.TransportLock)
+ return temp_control._dir_mode
+
+ def get_branch_reference(self, name=None):
+ """See BzrDir.get_branch_reference()."""
+ from bzrlib.branch import BranchFormatMetadir
+ format = BranchFormatMetadir.find_format(self, name=name)
+ return format.get_reference(self, name=name)
+
+ def set_branch_reference(self, target_branch, name=None):
+ format = _mod_branch.BranchReferenceFormat()
+ return format.initialize(self, target_branch=target_branch, name=name)
+
+ def get_branch_transport(self, branch_format, name=None):
+ """See BzrDir.get_branch_transport()."""
+ if name is None:
+ name = self._get_selected_branch()
+ path = self._get_branch_path(name)
+ # XXX: this shouldn't implicitly create the directory if it's just
+ # promising to get a transport -- mbp 20090727
+ if branch_format is None:
+ return self.transport.clone(path)
+ try:
+ branch_format.get_format_string()
+ except NotImplementedError:
+ raise errors.IncompatibleFormat(branch_format, self._format)
+ if name != "":
+ branches = self._read_branch_list()
+ utf8_name = name.encode("utf-8")
+ if not utf8_name in branches:
+ self.control_files.lock_write()
+ try:
+ branches = self._read_branch_list()
+ dirname = urlutils.dirname(utf8_name)
+ if dirname != "" and dirname in branches:
+ raise errors.ParentBranchExists(name)
+ child_branches = [
+ b.startswith(utf8_name+"/") for b in branches]
+ if any(child_branches):
+ raise errors.AlreadyBranchError(name)
+ branches.append(utf8_name)
+ self._write_branch_list(branches)
+ finally:
+ self.control_files.unlock()
+ branch_transport = self.transport.clone(path)
+ mode = self._get_mkdir_mode()
+ branch_transport.create_prefix(mode=mode)
+ try:
+ self.transport.mkdir(path, mode=mode)
+ except errors.FileExists:
+ pass
+ return self.transport.clone(path)
+
+ def get_repository_transport(self, repository_format):
+ """See BzrDir.get_repository_transport()."""
+ if repository_format is None:
+ return self.transport.clone('repository')
+ try:
+ repository_format.get_format_string()
+ except NotImplementedError:
+ raise errors.IncompatibleFormat(repository_format, self._format)
+ try:
+ self.transport.mkdir('repository', mode=self._get_mkdir_mode())
+ except errors.FileExists:
+ pass
+ return self.transport.clone('repository')
+
+ def get_workingtree_transport(self, workingtree_format):
+ """See BzrDir.get_workingtree_transport()."""
+ if workingtree_format is None:
+ return self.transport.clone('checkout')
+ try:
+ workingtree_format.get_format_string()
+ except NotImplementedError:
+ raise errors.IncompatibleFormat(workingtree_format, self._format)
+ try:
+ self.transport.mkdir('checkout', mode=self._get_mkdir_mode())
+ except errors.FileExists:
+ pass
+ return self.transport.clone('checkout')
+
+ def get_branches(self):
+ """See ControlDir.get_branches."""
+ ret = {}
+ try:
+ ret[""] = self.open_branch(name="")
+ except (errors.NotBranchError, errors.NoRepositoryPresent):
+ pass
+
+ for name in self._read_branch_list():
+ ret[name] = self.open_branch(name=name.decode('utf-8'))
+
+ return ret
+
+ def has_workingtree(self):
+ """Tell if this bzrdir contains a working tree.
+
+ Note: if you're going to open the working tree, you should just go
+ ahead and try, and not ask permission first.
+ """
+ from bzrlib.workingtree import WorkingTreeFormatMetaDir
+ try:
+ WorkingTreeFormatMetaDir.find_format_string(self)
+ except errors.NoWorkingTree:
+ return False
+ return True
+
+ def needs_format_conversion(self, format):
+ """See BzrDir.needs_format_conversion()."""
+ if (not isinstance(self._format, format.__class__) or
+ self._format.get_format_string() != format.get_format_string()):
+ # it is not a meta dir format, conversion is needed.
+ return True
+ # we might want to push this down to the repository?
+ try:
+ if not isinstance(self.open_repository()._format,
+ format.repository_format.__class__):
+ # the repository needs an upgrade.
+ return True
+ except errors.NoRepositoryPresent:
+ pass
+ for branch in self.list_branches():
+ if not isinstance(branch._format,
+ format.get_branch_format().__class__):
+ # the branch needs an upgrade.
+ return True
+ try:
+ my_wt = self.open_workingtree(recommend_upgrade=False)
+ if not isinstance(my_wt._format,
+ format.workingtree_format.__class__):
+ # the workingtree needs an upgrade.
+ return True
+ except (errors.NoWorkingTree, errors.NotLocalUrl):
+ pass
+ return False
+
+ def open_branch(self, name=None, unsupported=False,
+ ignore_fallbacks=False, possible_transports=None):
+ """See ControlDir.open_branch."""
+ if name is None:
+ name = self._get_selected_branch()
+ format = self.find_branch_format(name=name)
+ format.check_support_status(unsupported)
+ return format.open(self, name=name,
+ _found=True, ignore_fallbacks=ignore_fallbacks,
+ possible_transports=possible_transports)
+
+ def open_repository(self, unsupported=False):
+ """See BzrDir.open_repository."""
+ from bzrlib.repository import RepositoryFormatMetaDir
+ format = RepositoryFormatMetaDir.find_format(self)
+ format.check_support_status(unsupported)
+ return format.open(self, _found=True)
+
+ def open_workingtree(self, unsupported=False,
+ recommend_upgrade=True):
+ """See BzrDir.open_workingtree."""
+ from bzrlib.workingtree import WorkingTreeFormatMetaDir
+ format = WorkingTreeFormatMetaDir.find_format(self)
+ format.check_support_status(unsupported, recommend_upgrade,
+ basedir=self.root_transport.base)
+ return format.open(self, _found=True)
+
+ def _get_config(self):
+ return config.TransportConfig(self.transport, 'control.conf')
+
+
+class BzrFormat(object):
+ """Base class for all formats of things living in metadirs.
+
+ This class manages the format string that is stored in the 'format'
+ or 'branch-format' file.
+
+ All classes for (branch-, repository-, workingtree-) formats that
+ live in meta directories and have their own 'format' file
+ (i.e. different from .bzr/branch-format) derive from this class,
+ as well as the relevant base class for their kind
+ (BranchFormat, WorkingTreeFormat, RepositoryFormat).
+
+ Each format is identified by a "format" or "branch-format" file with a
+ single line containing the base format name and then an optional list of
+ feature flags.
+
+ Feature flags are supported as of bzr 2.5. Setting feature flags on formats
+ will render them inaccessible to older versions of bzr.
+
+ :ivar features: Dictionary mapping feature names to their necessity
+ """
+
+ _present_features = set()
+
+ def __init__(self):
+ self.features = {}
+
+ @classmethod
+ def register_feature(cls, name):
+ """Register a feature as being present.
+
+ :param name: Name of the feature
+ """
+ if " " in name:
+ raise ValueError("spaces are not allowed in feature names")
+ if name in cls._present_features:
+ raise errors.FeatureAlreadyRegistered(name)
+ cls._present_features.add(name)
+
+ @classmethod
+ def unregister_feature(cls, name):
+ """Unregister a feature."""
+ cls._present_features.remove(name)
+
+ def check_support_status(self, allow_unsupported, recommend_upgrade=True,
+ basedir=None):
+ for name, necessity in self.features.iteritems():
+ if name in self._present_features:
+ continue
+ if necessity == "optional":
+ mutter("ignoring optional missing feature %s", name)
+ continue
+ elif necessity == "required":
+ raise errors.MissingFeature(name)
+ else:
+ mutter("treating unknown necessity as require for %s",
+ name)
+ raise errors.MissingFeature(name)
+
+ @classmethod
+ def get_format_string(cls):
+ """Return the ASCII format string that identifies this format."""
+ raise NotImplementedError(cls.get_format_string)
+
+ @classmethod
+ def from_string(cls, text):
+ format_string = cls.get_format_string()
+ if not text.startswith(format_string):
+ raise AssertionError("Invalid format header %r for %r" % (text, cls))
+ lines = text[len(format_string):].splitlines()
+ ret = cls()
+ for lineno, line in enumerate(lines):
+ try:
+ (necessity, feature) = line.split(" ", 1)
+ except ValueError:
+ raise errors.ParseFormatError(format=cls, lineno=lineno+2,
+ line=line, text=text)
+ ret.features[feature] = necessity
+ return ret
+
+ def as_string(self):
+ """Return the string representation of this format.
+ """
+ lines = [self.get_format_string()]
+ lines.extend([("%s %s\n" % (item[1], item[0])) for item in
+ self.features.iteritems()])
+ return "".join(lines)
+
+ @classmethod
+ def _find_format(klass, registry, kind, format_string):
+ try:
+ first_line = format_string[:format_string.index("\n")+1]
+ except ValueError:
+ first_line = format_string
+ try:
+ cls = registry.get(first_line)
+ except KeyError:
+ raise errors.UnknownFormatError(format=first_line, kind=kind)
+ return cls.from_string(format_string)
+
+ def network_name(self):
+ """A simple byte string uniquely identifying this format for RPC calls.
+
+ Metadir branch formats use their format string.
+ """
+ return self.as_string()
+
+ def __eq__(self, other):
+ return (self.__class__ is other.__class__ and
+ self.features == other.features)
+
+ def _update_feature_flags(self, updated_flags):
+ """Update the feature flags in this format.
+
+ :param updated_flags: Updated feature flags
+ """
+ for name, necessity in updated_flags.iteritems():
+ if necessity is None:
+ try:
+ del self.features[name]
+ except KeyError:
+ pass
+ else:
+ self.features[name] = necessity
+
+
+class BzrProber(controldir.Prober):
+ """Prober for formats that use a .bzr/ control directory."""
+
+ formats = registry.FormatRegistry(controldir.network_format_registry)
+ """The known .bzr formats."""
+
+ @classmethod
+ def probe_transport(klass, transport):
+ """Return the .bzrdir style format present in a directory."""
+ try:
+ format_string = transport.get_bytes(".bzr/branch-format")
+ except errors.NoSuchFile:
+ raise errors.NotBranchError(path=transport.base)
+ try:
+ first_line = format_string[:format_string.index("\n")+1]
+ except ValueError:
+ first_line = format_string
+ try:
+ cls = klass.formats.get(first_line)
+ except KeyError:
+ raise errors.UnknownFormatError(format=first_line, kind='bzrdir')
+ return cls.from_string(format_string)
+
+ @classmethod
+ def known_formats(cls):
+ result = set()
+ for name, format in cls.formats.iteritems():
+ if callable(format):
+ format = format()
+ result.add(format)
+ return result
+
+
+controldir.ControlDirFormat.register_prober(BzrProber)
+
+
+class RemoteBzrProber(controldir.Prober):
+ """Prober for remote servers that provide a Bazaar smart server."""
+
+ @classmethod
+ def probe_transport(klass, transport):
+ """Return a RemoteBzrDirFormat object if it looks possible."""
+ try:
+ medium = transport.get_smart_medium()
+ except (NotImplementedError, AttributeError,
+ errors.TransportNotPossible, errors.NoSmartMedium,
+ errors.SmartProtocolError):
+ # no smart server, so not a branch for this format type.
+ raise errors.NotBranchError(path=transport.base)
+ else:
+ # Decline to open it if the server doesn't support our required
+ # version (3) so that the VFS-based transport will do it.
+ if medium.should_probe():
+ try:
+ server_version = medium.protocol_version()
+ except errors.SmartProtocolError:
+ # Apparently there's no usable smart server there, even though
+ # the medium supports the smart protocol.
+ raise errors.NotBranchError(path=transport.base)
+ if server_version != '2':
+ raise errors.NotBranchError(path=transport.base)
+ from bzrlib.remote import RemoteBzrDirFormat
+ return RemoteBzrDirFormat()
+
+ @classmethod
+ def known_formats(cls):
+ from bzrlib.remote import RemoteBzrDirFormat
+ return set([RemoteBzrDirFormat()])
+
+
+class BzrDirFormat(BzrFormat, controldir.ControlDirFormat):
+ """ControlDirFormat base class for .bzr/ directories.
+
+ Formats are placed in a dict by their format string for reference
+ during bzrdir opening. These should be subclasses of BzrDirFormat
+ for consistency.
+
+ Once a format is deprecated, just deprecate the initialize and open
+ methods on the format class. Do not deprecate the object, as the
+ object will be created every system load.
+ """
+
+ _lock_file_name = 'branch-lock'
+
+ # _lock_class must be set in subclasses to the lock type, typ.
+ # TransportLock or LockDir
+
+ def initialize_on_transport(self, transport):
+ """Initialize a new bzrdir in the base directory of a Transport."""
+ try:
+ # can we hand off the request to the smart server rather than using
+ # vfs calls?
+ client_medium = transport.get_smart_medium()
+ except errors.NoSmartMedium:
+ return self._initialize_on_transport_vfs(transport)
+ else:
+ # Current RPC's only know how to create bzr metadir1 instances, so
+ # we still delegate to vfs methods if the requested format is not a
+ # metadir1
+ if type(self) != BzrDirMetaFormat1:
+ return self._initialize_on_transport_vfs(transport)
+ from bzrlib.remote import RemoteBzrDirFormat
+ remote_format = RemoteBzrDirFormat()
+ self._supply_sub_formats_to(remote_format)
+ return remote_format.initialize_on_transport(transport)
+
+ def initialize_on_transport_ex(self, transport, use_existing_dir=False,
+ create_prefix=False, force_new_repo=False, stacked_on=None,
+ stack_on_pwd=None, repo_format_name=None, make_working_trees=None,
+ shared_repo=False, vfs_only=False):
+ """Create this format on transport.
+
+ The directory to initialize will be created.
+
+ :param force_new_repo: Do not use a shared repository for the target,
+ even if one is available.
+ :param create_prefix: Create any missing directories leading up to
+ to_transport.
+ :param use_existing_dir: Use an existing directory if one exists.
+ :param stacked_on: A url to stack any created branch on, None to follow
+ any target stacking policy.
+ :param stack_on_pwd: If stack_on is relative, the location it is
+ relative to.
+ :param repo_format_name: If non-None, a repository will be
+ made-or-found. Should none be found, or if force_new_repo is True
+ the repo_format_name is used to select the format of repository to
+ create.
+ :param make_working_trees: Control the setting of make_working_trees
+ for a new shared repository when one is made. None to use whatever
+ default the format has.
+ :param shared_repo: Control whether made repositories are shared or
+ not.
+ :param vfs_only: If True do not attempt to use a smart server
+ :return: repo, controldir, require_stacking, repository_policy. repo is
+ None if none was created or found, bzrdir is always valid.
+ require_stacking is the result of examining the stacked_on
+ parameter and any stacking policy found for the target.
+ """
+ if not vfs_only:
+ # Try to hand off to a smart server
+ try:
+ client_medium = transport.get_smart_medium()
+ except errors.NoSmartMedium:
+ pass
+ else:
+ from bzrlib.remote import RemoteBzrDirFormat
+ # TODO: lookup the local format from a server hint.
+ remote_dir_format = RemoteBzrDirFormat()
+ remote_dir_format._network_name = self.network_name()
+ self._supply_sub_formats_to(remote_dir_format)
+ return remote_dir_format.initialize_on_transport_ex(transport,
+ use_existing_dir=use_existing_dir, create_prefix=create_prefix,
+ force_new_repo=force_new_repo, stacked_on=stacked_on,
+ stack_on_pwd=stack_on_pwd, repo_format_name=repo_format_name,
+ make_working_trees=make_working_trees, shared_repo=shared_repo)
+ # XXX: Refactor the create_prefix/no_create_prefix code into a
+ # common helper function
+ # The destination may not exist - if so make it according to policy.
+ def make_directory(transport):
+ transport.mkdir('.')
+ return transport
+ def redirected(transport, e, redirection_notice):
+ note(redirection_notice)
+ return transport._redirected_to(e.source, e.target)
+ try:
+ transport = do_catching_redirections(make_directory, transport,
+ redirected)
+ except errors.FileExists:
+ if not use_existing_dir:
+ raise
+ except errors.NoSuchFile:
+ if not create_prefix:
+ raise
+ transport.create_prefix()
+
+ require_stacking = (stacked_on is not None)
+ # Now the target directory exists, but doesn't have a .bzr
+ # directory. So we need to create it, along with any work to create
+ # all of the dependent branches, etc.
+
+ result = self.initialize_on_transport(transport)
+ if repo_format_name:
+ try:
+ # use a custom format
+ result._format.repository_format = \
+ repository.network_format_registry.get(repo_format_name)
+ except AttributeError:
+ # The format didn't permit it to be set.
+ pass
+ # A repository is desired, either in-place or shared.
+ repository_policy = result.determine_repository_policy(
+ force_new_repo, stacked_on, stack_on_pwd,
+ require_stacking=require_stacking)
+ result_repo, is_new_repo = repository_policy.acquire_repository(
+ make_working_trees, shared_repo)
+ if not require_stacking and repository_policy._require_stacking:
+ require_stacking = True
+ result._format.require_stacking()
+ result_repo.lock_write()
+ else:
+ result_repo = None
+ repository_policy = None
+ return result_repo, result, require_stacking, repository_policy
+
+ def _initialize_on_transport_vfs(self, transport):
+ """Initialize a new bzrdir using VFS calls.
+
+ :param transport: The transport to create the .bzr directory in.
+ :return: A
+ """
+ # Since we are creating a .bzr directory, inherit the
+ # mode from the root directory
+ temp_control = lockable_files.LockableFiles(transport,
+ '', lockable_files.TransportLock)
+ try:
+ temp_control._transport.mkdir('.bzr',
+ # FIXME: RBC 20060121 don't peek under
+ # the covers
+ mode=temp_control._dir_mode)
+ except errors.FileExists:
+ raise errors.AlreadyControlDirError(transport.base)
+ if sys.platform == 'win32' and isinstance(transport, local.LocalTransport):
+ win32utils.set_file_attr_hidden(transport._abspath('.bzr'))
+ file_mode = temp_control._file_mode
+ del temp_control
+ bzrdir_transport = transport.clone('.bzr')
+ utf8_files = [('README',
+ "This is a Bazaar control directory.\n"
+ "Do not change any files in this directory.\n"
+ "See http://bazaar.canonical.com/ for more information about Bazaar.\n"),
+ ('branch-format', self.as_string()),
+ ]
+ # NB: no need to escape relative paths that are url safe.
+ control_files = lockable_files.LockableFiles(bzrdir_transport,
+ self._lock_file_name, self._lock_class)
+ control_files.create_lock()
+ control_files.lock_write()
+ try:
+ for (filename, content) in utf8_files:
+ bzrdir_transport.put_bytes(filename, content,
+ mode=file_mode)
+ finally:
+ control_files.unlock()
+ return self.open(transport, _found=True)
+
+ def open(self, transport, _found=False):
+ """Return an instance of this format for the dir transport points at.
+
+ _found is a private parameter, do not use it.
+ """
+ if not _found:
+ found_format = controldir.ControlDirFormat.find_format(transport)
+ if not isinstance(found_format, self.__class__):
+ raise AssertionError("%s was asked to open %s, but it seems to need "
+ "format %s"
+ % (self, transport, found_format))
+ # Allow subclasses - use the found format.
+ self._supply_sub_formats_to(found_format)
+ return found_format._open(transport)
+ return self._open(transport)
+
+ def _open(self, transport):
+ """Template method helper for opening BzrDirectories.
+
+ This performs the actual open and any additional logic or parameter
+ passing.
+ """
+ raise NotImplementedError(self._open)
+
+ def _supply_sub_formats_to(self, other_format):
+ """Give other_format the same values for sub formats as this has.
+
+ This method is expected to be used when parameterising a
+ RemoteBzrDirFormat instance with the parameters from a
+ BzrDirMetaFormat1 instance.
+
+ :param other_format: other_format is a format which should be
+ compatible with whatever sub formats are supported by self.
+ :return: None.
+ """
+ other_format.features = dict(self.features)
+
+ def supports_transport(self, transport):
+ # bzr formats can be opened over all known transports
+ return True
+
+ def check_support_status(self, allow_unsupported, recommend_upgrade=True,
+ basedir=None):
+ controldir.ControlDirFormat.check_support_status(self,
+ allow_unsupported=allow_unsupported, recommend_upgrade=recommend_upgrade,
+ basedir=basedir)
+ BzrFormat.check_support_status(self, allow_unsupported=allow_unsupported,
+ recommend_upgrade=recommend_upgrade, basedir=basedir)
+
+
+class BzrDirMetaFormat1(BzrDirFormat):
+ """Bzr meta control format 1
+
+ This is the first format with split out working tree, branch and repository
+ disk storage.
+
+ It has:
+
+ - Format 3 working trees [optional]
+ - Format 5 branches [optional]
+ - Format 7 repositories [optional]
+ """
+
+ _lock_class = lockdir.LockDir
+
+ fixed_components = False
+
+ colocated_branches = True
+
+ def __init__(self):
+ BzrDirFormat.__init__(self)
+ self._workingtree_format = None
+ self._branch_format = None
+ self._repository_format = None
+
+ def __eq__(self, other):
+ if other.__class__ is not self.__class__:
+ return False
+ if other.repository_format != self.repository_format:
+ return False
+ if other.workingtree_format != self.workingtree_format:
+ return False
+ if other.features != self.features:
+ return False
+ return True
+
+ def __ne__(self, other):
+ return not self == other
+
+ def get_branch_format(self):
+ if self._branch_format is None:
+ from bzrlib.branch import format_registry as branch_format_registry
+ self._branch_format = branch_format_registry.get_default()
+ return self._branch_format
+
+ def set_branch_format(self, format):
+ self._branch_format = format
+
+ def require_stacking(self, stack_on=None, possible_transports=None,
+ _skip_repo=False):
+ """We have a request to stack, try to ensure the formats support it.
+
+ :param stack_on: If supplied, it is the URL to a branch that we want to
+ stack on. Check to see if that format supports stacking before
+ forcing an upgrade.
+ """
+ # Stacking is desired. requested by the target, but does the place it
+ # points at support stacking? If it doesn't then we should
+ # not implicitly upgrade. We check this here.
+ new_repo_format = None
+ new_branch_format = None
+
+ # a bit of state for get_target_branch so that we don't try to open it
+ # 2 times, for both repo *and* branch
+ target = [None, False, None] # target_branch, checked, upgrade anyway
+ def get_target_branch():
+ if target[1]:
+ # We've checked, don't check again
+ return target
+ if stack_on is None:
+ # No target format, that means we want to force upgrading
+ target[:] = [None, True, True]
+ return target
+ try:
+ target_dir = BzrDir.open(stack_on,
+ possible_transports=possible_transports)
+ except errors.NotBranchError:
+ # Nothing there, don't change formats
+ target[:] = [None, True, False]
+ return target
+ except errors.JailBreak:
+ # JailBreak, JFDI and upgrade anyway
+ target[:] = [None, True, True]
+ return target
+ try:
+ target_branch = target_dir.open_branch()
+ except errors.NotBranchError:
+ # No branch, don't upgrade formats
+ target[:] = [None, True, False]
+ return target
+ target[:] = [target_branch, True, False]
+ return target
+
+ if (not _skip_repo and
+ not self.repository_format.supports_external_lookups):
+ # We need to upgrade the Repository.
+ target_branch, _, do_upgrade = get_target_branch()
+ if target_branch is None:
+ # We don't have a target branch, should we upgrade anyway?
+ if do_upgrade:
+ # stack_on is inaccessible, JFDI.
+ # TODO: bad monkey, hard-coded formats...
+ if self.repository_format.rich_root_data:
+ new_repo_format = knitpack_repo.RepositoryFormatKnitPack5RichRoot()
+ else:
+ new_repo_format = knitpack_repo.RepositoryFormatKnitPack5()
+ else:
+ # If the target already supports stacking, then we know the
+ # project is already able to use stacking, so auto-upgrade
+ # for them
+ new_repo_format = target_branch.repository._format
+ if not new_repo_format.supports_external_lookups:
+ # target doesn't, source doesn't, so don't auto upgrade
+ # repo
+ new_repo_format = None
+ if new_repo_format is not None:
+ self.repository_format = new_repo_format
+ note(gettext('Source repository format does not support stacking,'
+ ' using format:\n %s'),
+ new_repo_format.get_format_description())
+
+ if not self.get_branch_format().supports_stacking():
+ # We just checked the repo, now lets check if we need to
+ # upgrade the branch format
+ target_branch, _, do_upgrade = get_target_branch()
+ if target_branch is None:
+ if do_upgrade:
+ # TODO: bad monkey, hard-coded formats...
+ from bzrlib.branch import BzrBranchFormat7
+ new_branch_format = BzrBranchFormat7()
+ else:
+ new_branch_format = target_branch._format
+ if not new_branch_format.supports_stacking():
+ new_branch_format = None
+ if new_branch_format is not None:
+ # Does support stacking, use its format.
+ self.set_branch_format(new_branch_format)
+ note(gettext('Source branch format does not support stacking,'
+ ' using format:\n %s'),
+ new_branch_format.get_format_description())
+
+ def get_converter(self, format=None):
+ """See BzrDirFormat.get_converter()."""
+ if format is None:
+ format = BzrDirFormat.get_default_format()
+ if (type(self) is BzrDirMetaFormat1 and
+ type(format) is BzrDirMetaFormat1Colo):
+ return ConvertMetaToColo(format)
+ if (type(self) is BzrDirMetaFormat1Colo and
+ type(format) is BzrDirMetaFormat1):
+ return ConvertMetaToColo(format)
+ if not isinstance(self, format.__class__):
+ # converting away from metadir is not implemented
+ raise NotImplementedError(self.get_converter)
+ return ConvertMetaToMeta(format)
+
+ @classmethod
+ def get_format_string(cls):
+ """See BzrDirFormat.get_format_string()."""
+ return "Bazaar-NG meta directory, format 1\n"
+
+ def get_format_description(self):
+ """See BzrDirFormat.get_format_description()."""
+ return "Meta directory format 1"
+
+ def _open(self, transport):
+ """See BzrDirFormat._open."""
+ # Create a new format instance because otherwise initialisation of new
+ # metadirs share the global default format object leading to alias
+ # problems.
+ format = BzrDirMetaFormat1()
+ self._supply_sub_formats_to(format)
+ return BzrDirMeta1(transport, format)
+
+ def __return_repository_format(self):
+ """Circular import protection."""
+ if self._repository_format:
+ return self._repository_format
+ from bzrlib.repository import format_registry
+ return format_registry.get_default()
+
+ def _set_repository_format(self, value):
+ """Allow changing the repository format for metadir formats."""
+ self._repository_format = value
+
+ repository_format = property(__return_repository_format,
+ _set_repository_format)
+
+ def _supply_sub_formats_to(self, other_format):
+ """Give other_format the same values for sub formats as this has.
+
+ This method is expected to be used when parameterising a
+ RemoteBzrDirFormat instance with the parameters from a
+ BzrDirMetaFormat1 instance.
+
+ :param other_format: other_format is a format which should be
+ compatible with whatever sub formats are supported by self.
+ :return: None.
+ """
+ super(BzrDirMetaFormat1, self)._supply_sub_formats_to(other_format)
+ if getattr(self, '_repository_format', None) is not None:
+ other_format.repository_format = self.repository_format
+ if self._branch_format is not None:
+ other_format._branch_format = self._branch_format
+ if self._workingtree_format is not None:
+ other_format.workingtree_format = self.workingtree_format
+
+ def __get_workingtree_format(self):
+ if self._workingtree_format is None:
+ from bzrlib.workingtree import (
+ format_registry as wt_format_registry,
+ )
+ self._workingtree_format = wt_format_registry.get_default()
+ return self._workingtree_format
+
+ def __set_workingtree_format(self, wt_format):
+ self._workingtree_format = wt_format
+
+ def __repr__(self):
+ return "<%r>" % (self.__class__.__name__,)
+
+ workingtree_format = property(__get_workingtree_format,
+ __set_workingtree_format)
+
+
+# Register bzr formats
+BzrProber.formats.register(BzrDirMetaFormat1.get_format_string(),
+ BzrDirMetaFormat1)
+controldir.ControlDirFormat._default_format = BzrDirMetaFormat1()
+
+
+class BzrDirMetaFormat1Colo(BzrDirMetaFormat1):
+ """BzrDirMeta1 format with support for colocated branches."""
+
+ colocated_branches = True
+
+ @classmethod
+ def get_format_string(cls):
+ """See BzrDirFormat.get_format_string()."""
+ return "Bazaar meta directory, format 1 (with colocated branches)\n"
+
+ def get_format_description(self):
+ """See BzrDirFormat.get_format_description()."""
+ return "Meta directory format 1 with support for colocated branches"
+
+ def _open(self, transport):
+ """See BzrDirFormat._open."""
+ # Create a new format instance because otherwise initialisation of new
+ # metadirs share the global default format object leading to alias
+ # problems.
+ format = BzrDirMetaFormat1Colo()
+ self._supply_sub_formats_to(format)
+ return BzrDirMeta1(transport, format)
+
+
+BzrProber.formats.register(BzrDirMetaFormat1Colo.get_format_string(),
+ BzrDirMetaFormat1Colo)
+
+
+class ConvertMetaToMeta(controldir.Converter):
+ """Converts the components of metadirs."""
+
+ def __init__(self, target_format):
+ """Create a metadir to metadir converter.
+
+ :param target_format: The final metadir format that is desired.
+ """
+ self.target_format = target_format
+
+ def convert(self, to_convert, pb):
+ """See Converter.convert()."""
+ self.bzrdir = to_convert
+ self.pb = ui.ui_factory.nested_progress_bar()
+ self.count = 0
+ self.total = 1
+ self.step('checking repository format')
+ try:
+ repo = self.bzrdir.open_repository()
+ except errors.NoRepositoryPresent:
+ pass
+ else:
+ if not isinstance(repo._format, self.target_format.repository_format.__class__):
+ from bzrlib.repository import CopyConverter
+ ui.ui_factory.note(gettext('starting repository conversion'))
+ converter = CopyConverter(self.target_format.repository_format)
+ converter.convert(repo, pb)
+ for branch in self.bzrdir.list_branches():
+ # TODO: conversions of Branch and Tree should be done by
+ # InterXFormat lookups/some sort of registry.
+ # Avoid circular imports
+ old = branch._format.__class__
+ new = self.target_format.get_branch_format().__class__
+ while old != new:
+ if (old == fullhistorybranch.BzrBranchFormat5 and
+ new in (_mod_branch.BzrBranchFormat6,
+ _mod_branch.BzrBranchFormat7,
+ _mod_branch.BzrBranchFormat8)):
+ branch_converter = _mod_branch.Converter5to6()
+ elif (old == _mod_branch.BzrBranchFormat6 and
+ new in (_mod_branch.BzrBranchFormat7,
+ _mod_branch.BzrBranchFormat8)):
+ branch_converter = _mod_branch.Converter6to7()
+ elif (old == _mod_branch.BzrBranchFormat7 and
+ new is _mod_branch.BzrBranchFormat8):
+ branch_converter = _mod_branch.Converter7to8()
+ else:
+ raise errors.BadConversionTarget("No converter", new,
+ branch._format)
+ branch_converter.convert(branch)
+ branch = self.bzrdir.open_branch()
+ old = branch._format.__class__
+ try:
+ tree = self.bzrdir.open_workingtree(recommend_upgrade=False)
+ except (errors.NoWorkingTree, errors.NotLocalUrl):
+ pass
+ else:
+ # TODO: conversions of Branch and Tree should be done by
+ # InterXFormat lookups
+ if (isinstance(tree, workingtree_3.WorkingTree3) and
+ not isinstance(tree, workingtree_4.DirStateWorkingTree) and
+ isinstance(self.target_format.workingtree_format,
+ workingtree_4.DirStateWorkingTreeFormat)):
+ workingtree_4.Converter3to4().convert(tree)
+ if (isinstance(tree, workingtree_4.DirStateWorkingTree) and
+ not isinstance(tree, workingtree_4.WorkingTree5) and
+ isinstance(self.target_format.workingtree_format,
+ workingtree_4.WorkingTreeFormat5)):
+ workingtree_4.Converter4to5().convert(tree)
+ if (isinstance(tree, workingtree_4.DirStateWorkingTree) and
+ not isinstance(tree, workingtree_4.WorkingTree6) and
+ isinstance(self.target_format.workingtree_format,
+ workingtree_4.WorkingTreeFormat6)):
+ workingtree_4.Converter4or5to6().convert(tree)
+ self.pb.finished()
+ return to_convert
+
+
+class ConvertMetaToColo(controldir.Converter):
+ """Add colocated branch support."""
+
+ def __init__(self, target_format):
+ """Create a converter.that upgrades a metadir to the colo format.
+
+ :param target_format: The final metadir format that is desired.
+ """
+ self.target_format = target_format
+
+ def convert(self, to_convert, pb):
+ """See Converter.convert()."""
+ to_convert.transport.put_bytes('branch-format',
+ self.target_format.as_string())
+ return BzrDir.open_from_transport(to_convert.root_transport)
+
+
+class ConvertMetaToColo(controldir.Converter):
+ """Convert a 'development-colo' bzrdir to a '2a' bzrdir."""
+
+ def __init__(self, target_format):
+ """Create a converter that converts a 'development-colo' metadir
+ to a '2a' metadir.
+
+ :param target_format: The final metadir format that is desired.
+ """
+ self.target_format = target_format
+
+ def convert(self, to_convert, pb):
+ """See Converter.convert()."""
+ to_convert.transport.put_bytes('branch-format',
+ self.target_format.as_string())
+ return BzrDir.open_from_transport(to_convert.root_transport)
+
+
+controldir.ControlDirFormat.register_server_prober(RemoteBzrProber)
+
+
+class RepositoryAcquisitionPolicy(object):
+ """Abstract base class for repository acquisition policies.
+
+ A repository acquisition policy decides how a BzrDir acquires a repository
+ for a branch that is being created. The most basic policy decision is
+ whether to create a new repository or use an existing one.
+ """
+ def __init__(self, stack_on, stack_on_pwd, require_stacking):
+ """Constructor.
+
+ :param stack_on: A location to stack on
+ :param stack_on_pwd: If stack_on is relative, the location it is
+ relative to.
+ :param require_stacking: If True, it is a failure to not stack.
+ """
+ self._stack_on = stack_on
+ self._stack_on_pwd = stack_on_pwd
+ self._require_stacking = require_stacking
+
+ def configure_branch(self, branch):
+ """Apply any configuration data from this policy to the branch.
+
+ Default implementation sets repository stacking.
+ """
+ if self._stack_on is None:
+ return
+ if self._stack_on_pwd is None:
+ stack_on = self._stack_on
+ else:
+ try:
+ stack_on = urlutils.rebase_url(self._stack_on,
+ self._stack_on_pwd,
+ branch.user_url)
+ except errors.InvalidRebaseURLs:
+ stack_on = self._get_full_stack_on()
+ try:
+ branch.set_stacked_on_url(stack_on)
+ except (errors.UnstackableBranchFormat,
+ errors.UnstackableRepositoryFormat):
+ if self._require_stacking:
+ raise
+
+ def requires_stacking(self):
+ """Return True if this policy requires stacking."""
+ return self._stack_on is not None and self._require_stacking
+
+ def _get_full_stack_on(self):
+ """Get a fully-qualified URL for the stack_on location."""
+ if self._stack_on is None:
+ return None
+ if self._stack_on_pwd is None:
+ return self._stack_on
+ else:
+ return urlutils.join(self._stack_on_pwd, self._stack_on)
+
+ def _add_fallback(self, repository, possible_transports=None):
+ """Add a fallback to the supplied repository, if stacking is set."""
+ stack_on = self._get_full_stack_on()
+ if stack_on is None:
+ return
+ try:
+ stacked_dir = BzrDir.open(stack_on,
+ possible_transports=possible_transports)
+ except errors.JailBreak:
+ # We keep the stacking details, but we are in the server code so
+ # actually stacking is not needed.
+ return
+ try:
+ stacked_repo = stacked_dir.open_branch().repository
+ except errors.NotBranchError:
+ stacked_repo = stacked_dir.open_repository()
+ try:
+ repository.add_fallback_repository(stacked_repo)
+ except errors.UnstackableRepositoryFormat:
+ if self._require_stacking:
+ raise
+ else:
+ self._require_stacking = True
+
+ def acquire_repository(self, make_working_trees=None, shared=False,
+ possible_transports=None):
+ """Acquire a repository for this bzrdir.
+
+ Implementations may create a new repository or use a pre-exising
+ repository.
+
+ :param make_working_trees: If creating a repository, set
+ make_working_trees to this value (if non-None)
+ :param shared: If creating a repository, make it shared if True
+ :return: A repository, is_new_flag (True if the repository was
+ created).
+ """
+ raise NotImplementedError(RepositoryAcquisitionPolicy.acquire_repository)
+
+
+class CreateRepository(RepositoryAcquisitionPolicy):
+ """A policy of creating a new repository"""
+
+ def __init__(self, bzrdir, stack_on=None, stack_on_pwd=None,
+ require_stacking=False):
+ """Constructor.
+
+ :param bzrdir: The bzrdir to create the repository on.
+ :param stack_on: A location to stack on
+ :param stack_on_pwd: If stack_on is relative, the location it is
+ relative to.
+ """
+ RepositoryAcquisitionPolicy.__init__(self, stack_on, stack_on_pwd,
+ require_stacking)
+ self._bzrdir = bzrdir
+
+ def acquire_repository(self, make_working_trees=None, shared=False,
+ possible_transports=None):
+ """Implementation of RepositoryAcquisitionPolicy.acquire_repository
+
+ Creates the desired repository in the bzrdir we already have.
+ """
+ if possible_transports is None:
+ possible_transports = []
+ else:
+ possible_transports = list(possible_transports)
+ possible_transports.append(self._bzrdir.root_transport)
+ stack_on = self._get_full_stack_on()
+ if stack_on:
+ format = self._bzrdir._format
+ format.require_stacking(stack_on=stack_on,
+ possible_transports=possible_transports)
+ if not self._require_stacking:
+ # We have picked up automatic stacking somewhere.
+ note(gettext('Using default stacking branch {0} at {1}').format(
+ self._stack_on, self._stack_on_pwd))
+ repository = self._bzrdir.create_repository(shared=shared)
+ self._add_fallback(repository,
+ possible_transports=possible_transports)
+ if make_working_trees is not None:
+ repository.set_make_working_trees(make_working_trees)
+ return repository, True
+
+
+class UseExistingRepository(RepositoryAcquisitionPolicy):
+ """A policy of reusing an existing repository"""
+
+ def __init__(self, repository, stack_on=None, stack_on_pwd=None,
+ require_stacking=False):
+ """Constructor.
+
+ :param repository: The repository to use.
+ :param stack_on: A location to stack on
+ :param stack_on_pwd: If stack_on is relative, the location it is
+ relative to.
+ """
+ RepositoryAcquisitionPolicy.__init__(self, stack_on, stack_on_pwd,
+ require_stacking)
+ self._repository = repository
+
+ def acquire_repository(self, make_working_trees=None, shared=False,
+ possible_transports=None):
+ """Implementation of RepositoryAcquisitionPolicy.acquire_repository
+
+ Returns an existing repository to use.
+ """
+ if possible_transports is None:
+ possible_transports = []
+ else:
+ possible_transports = list(possible_transports)
+ possible_transports.append(self._repository.bzrdir.transport)
+ self._add_fallback(self._repository,
+ possible_transports=possible_transports)
+ return self._repository, False
+
+
+def register_metadir(registry, key,
+ repository_format, help, native=True, deprecated=False,
+ branch_format=None,
+ tree_format=None,
+ hidden=False,
+ experimental=False,
+ alias=False, bzrdir_format=None):
+ """Register a metadir subformat.
+
+ These all use a meta bzrdir, but can be parameterized by the
+ Repository/Branch/WorkingTreeformats.
+
+ :param repository_format: The fully-qualified repository format class
+ name as a string.
+ :param branch_format: Fully-qualified branch format class name as
+ a string.
+ :param tree_format: Fully-qualified tree format class name as
+ a string.
+ """
+ if bzrdir_format is None:
+ bzrdir_format = BzrDirMetaFormat1
+ # This should be expanded to support setting WorkingTree and Branch
+ # formats, once the API supports that.
+ def _load(full_name):
+ mod_name, factory_name = full_name.rsplit('.', 1)
+ try:
+ factory = pyutils.get_named_object(mod_name, factory_name)
+ except ImportError, e:
+ raise ImportError('failed to load %s: %s' % (full_name, e))
+ except AttributeError:
+ raise AttributeError('no factory %s in module %r'
+ % (full_name, sys.modules[mod_name]))
+ return factory()
+
+ def helper():
+ bd = bzrdir_format()
+ if branch_format is not None:
+ bd.set_branch_format(_load(branch_format))
+ if tree_format is not None:
+ bd.workingtree_format = _load(tree_format)
+ if repository_format is not None:
+ bd.repository_format = _load(repository_format)
+ return bd
+ registry.register(key, helper, help, native, deprecated, hidden,
+ experimental, alias)
+
+register_metadir(controldir.format_registry, 'knit',
+ 'bzrlib.repofmt.knitrepo.RepositoryFormatKnit1',
+ 'Format using knits. Recommended for interoperation with bzr <= 0.14.',
+ branch_format='bzrlib.branchfmt.fullhistory.BzrBranchFormat5',
+ tree_format='bzrlib.workingtree_3.WorkingTreeFormat3',
+ hidden=True,
+ deprecated=True)
+register_metadir(controldir.format_registry, 'dirstate',
+ 'bzrlib.repofmt.knitrepo.RepositoryFormatKnit1',
+ help='Format using dirstate for working trees. '
+ 'Compatible with bzr 0.8 and '
+ 'above when accessed over the network. Introduced in bzr 0.15.',
+ branch_format='bzrlib.branchfmt.fullhistory.BzrBranchFormat5',
+ tree_format='bzrlib.workingtree_4.WorkingTreeFormat4',
+ hidden=True,
+ deprecated=True)
+register_metadir(controldir.format_registry, 'dirstate-tags',
+ 'bzrlib.repofmt.knitrepo.RepositoryFormatKnit1',
+ help='Variant of dirstate with support for tags. '
+ 'Introduced in bzr 0.15.',
+ branch_format='bzrlib.branch.BzrBranchFormat6',
+ tree_format='bzrlib.workingtree_4.WorkingTreeFormat4',
+ hidden=True,
+ deprecated=True)
+register_metadir(controldir.format_registry, 'rich-root',
+ 'bzrlib.repofmt.knitrepo.RepositoryFormatKnit4',
+ help='Variant of dirstate with better handling of tree roots. '
+ 'Introduced in bzr 1.0',
+ branch_format='bzrlib.branch.BzrBranchFormat6',
+ tree_format='bzrlib.workingtree_4.WorkingTreeFormat4',
+ hidden=True,
+ deprecated=True)
+register_metadir(controldir.format_registry, 'dirstate-with-subtree',
+ 'bzrlib.repofmt.knitrepo.RepositoryFormatKnit3',
+ help='Variant of dirstate with support for nested trees. '
+ 'Introduced in 0.15.',
+ branch_format='bzrlib.branch.BzrBranchFormat6',
+ tree_format='bzrlib.workingtree_4.WorkingTreeFormat4',
+ experimental=True,
+ hidden=True,
+ )
+register_metadir(controldir.format_registry, 'pack-0.92',
+ 'bzrlib.repofmt.knitpack_repo.RepositoryFormatKnitPack1',
+ help='Pack-based format used in 1.x series. Introduced in 0.92. '
+ 'Interoperates with bzr repositories before 0.92 but cannot be '
+ 'read by bzr < 0.92. '
+ ,
+ branch_format='bzrlib.branch.BzrBranchFormat6',
+ tree_format='bzrlib.workingtree_4.WorkingTreeFormat4',
+ deprecated=True,
+ )
+register_metadir(controldir.format_registry, 'pack-0.92-subtree',
+ 'bzrlib.repofmt.knitpack_repo.RepositoryFormatKnitPack3',
+ help='Pack-based format used in 1.x series, with subtree support. '
+ 'Introduced in 0.92. Interoperates with '
+ 'bzr repositories before 0.92 but cannot be read by bzr < 0.92. '
+ ,
+ branch_format='bzrlib.branch.BzrBranchFormat6',
+ tree_format='bzrlib.workingtree_4.WorkingTreeFormat4',
+ hidden=True,
+ deprecated=True,
+ experimental=True,
+ )
+register_metadir(controldir.format_registry, 'rich-root-pack',
+ 'bzrlib.repofmt.knitpack_repo.RepositoryFormatKnitPack4',
+ help='A variant of pack-0.92 that supports rich-root data '
+ '(needed for bzr-svn and bzr-git). Introduced in 1.0.',
+ branch_format='bzrlib.branch.BzrBranchFormat6',
+ tree_format='bzrlib.workingtree_4.WorkingTreeFormat4',
+ hidden=True,
+ deprecated=True,
+ )
+register_metadir(controldir.format_registry, '1.6',
+ 'bzrlib.repofmt.knitpack_repo.RepositoryFormatKnitPack5',
+ help='A format that allows a branch to indicate that there is another '
+ '(stacked) repository that should be used to access data that is '
+ 'not present locally.',
+ branch_format='bzrlib.branch.BzrBranchFormat7',
+ tree_format='bzrlib.workingtree_4.WorkingTreeFormat4',
+ hidden=True,
+ deprecated=True,
+ )
+register_metadir(controldir.format_registry, '1.6.1-rich-root',
+ 'bzrlib.repofmt.knitpack_repo.RepositoryFormatKnitPack5RichRoot',
+ help='A variant of 1.6 that supports rich-root data '
+ '(needed for bzr-svn and bzr-git).',
+ branch_format='bzrlib.branch.BzrBranchFormat7',
+ tree_format='bzrlib.workingtree_4.WorkingTreeFormat4',
+ hidden=True,
+ deprecated=True,
+ )
+register_metadir(controldir.format_registry, '1.9',
+ 'bzrlib.repofmt.knitpack_repo.RepositoryFormatKnitPack6',
+ help='A repository format using B+tree indexes. These indexes '
+ 'are smaller in size, have smarter caching and provide faster '
+ 'performance for most operations.',
+ branch_format='bzrlib.branch.BzrBranchFormat7',
+ tree_format='bzrlib.workingtree_4.WorkingTreeFormat4',
+ hidden=True,
+ deprecated=True,
+ )
+register_metadir(controldir.format_registry, '1.9-rich-root',
+ 'bzrlib.repofmt.knitpack_repo.RepositoryFormatKnitPack6RichRoot',
+ help='A variant of 1.9 that supports rich-root data '
+ '(needed for bzr-svn and bzr-git).',
+ branch_format='bzrlib.branch.BzrBranchFormat7',
+ tree_format='bzrlib.workingtree_4.WorkingTreeFormat4',
+ hidden=True,
+ deprecated=True,
+ )
+register_metadir(controldir.format_registry, '1.14',
+ 'bzrlib.repofmt.knitpack_repo.RepositoryFormatKnitPack6',
+ help='A working-tree format that supports content filtering.',
+ branch_format='bzrlib.branch.BzrBranchFormat7',
+ tree_format='bzrlib.workingtree_4.WorkingTreeFormat5',
+ hidden=True,
+ deprecated=True,
+ )
+register_metadir(controldir.format_registry, '1.14-rich-root',
+ 'bzrlib.repofmt.knitpack_repo.RepositoryFormatKnitPack6RichRoot',
+ help='A variant of 1.14 that supports rich-root data '
+ '(needed for bzr-svn and bzr-git).',
+ branch_format='bzrlib.branch.BzrBranchFormat7',
+ tree_format='bzrlib.workingtree_4.WorkingTreeFormat5',
+ hidden=True,
+ deprecated=True,
+ )
+# The following un-numbered 'development' formats should always just be aliases.
+register_metadir(controldir.format_registry, 'development-subtree',
+ 'bzrlib.repofmt.groupcompress_repo.RepositoryFormat2aSubtree',
+ help='Current development format, subtree variant. Can convert data to and '
+ 'from pack-0.92-subtree (and anything compatible with '
+ 'pack-0.92-subtree) format repositories. Repositories and branches in '
+ 'this format can only be read by bzr.dev. Please read '
+ 'http://doc.bazaar.canonical.com/latest/developers/development-repo.html '
+ 'before use.',
+ branch_format='bzrlib.branch.BzrBranchFormat7',
+ tree_format='bzrlib.workingtree_4.WorkingTreeFormat6',
+ experimental=True,
+ hidden=True,
+ alias=False, # Restore to being an alias when an actual development subtree format is added
+ # This current non-alias status is simply because we did not introduce a
+ # chk based subtree format.
+ )
+register_metadir(controldir.format_registry, 'development5-subtree',
+ 'bzrlib.repofmt.knitpack_repo.RepositoryFormatPackDevelopment2Subtree',
+ help='Development format, subtree variant. Can convert data to and '
+ 'from pack-0.92-subtree (and anything compatible with '
+ 'pack-0.92-subtree) format repositories. Repositories and branches in '
+ 'this format can only be read by bzr.dev. Please read '
+ 'http://doc.bazaar.canonical.com/latest/developers/development-repo.html '
+ 'before use.',
+ branch_format='bzrlib.branch.BzrBranchFormat7',
+ tree_format='bzrlib.workingtree_4.WorkingTreeFormat6',
+ experimental=True,
+ hidden=True,
+ alias=False,
+ )
+
+register_metadir(controldir.format_registry, 'development-colo',
+ 'bzrlib.repofmt.groupcompress_repo.RepositoryFormat2a',
+ help='The 2a format with experimental support for colocated branches.\n',
+ branch_format='bzrlib.branch.BzrBranchFormat7',
+ tree_format='bzrlib.workingtree_4.WorkingTreeFormat6',
+ experimental=True,
+ bzrdir_format=BzrDirMetaFormat1Colo,
+ )
+
+
+# And the development formats above will have aliased one of the following:
+
+# Finally, the current format.
+register_metadir(controldir.format_registry, '2a',
+ 'bzrlib.repofmt.groupcompress_repo.RepositoryFormat2a',
+ help='Format for the bzr 2.0 series.\n'
+ 'Uses group-compress storage.\n'
+ 'Provides rich roots which are a one-way transition.\n',
+ # 'storage in packs, 255-way hashed CHK inventory, bencode revision, group compress, '
+ # 'rich roots. Supported by bzr 1.16 and later.',
+ branch_format='bzrlib.branch.BzrBranchFormat7',
+ tree_format='bzrlib.workingtree_4.WorkingTreeFormat6',
+ experimental=False,
+ )
+
+# The following format should be an alias for the rich root equivalent
+# of the default format
+register_metadir(controldir.format_registry, 'default-rich-root',
+ 'bzrlib.repofmt.groupcompress_repo.RepositoryFormat2a',
+ branch_format='bzrlib.branch.BzrBranchFormat7',
+ tree_format='bzrlib.workingtree_4.WorkingTreeFormat6',
+ alias=True,
+ hidden=True,
+ help='Same as 2a.')
+
+# The current format that is made on 'bzr init'.
+format_name = config.GlobalStack().get('default_format')
+controldir.format_registry.set_default(format_name)
+
+# XXX 2010-08-20 JRV: There is still a lot of code relying on
+# bzrlib.bzrdir.format_registry existing. When BzrDir.create/BzrDir.open/etc
+# get changed to ControlDir.create/ControlDir.open/etc this should be removed.
+format_registry = controldir.format_registry
diff --git a/bzrlib/cache_utf8.py b/bzrlib/cache_utf8.py
new file mode 100644
index 0000000..830ff2a
--- /dev/null
+++ b/bzrlib/cache_utf8.py
@@ -0,0 +1,119 @@
+# Copyright (C) 2006 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+# TODO: Some kind of command-line display of revision properties:
+# perhaps show them in log -v and allow them as options to the commit command.
+
+"""Some functions to enable caching the conversion between unicode to utf8"""
+
+from __future__ import absolute_import
+
+import codecs
+
+_utf8_encode = codecs.utf_8_encode
+_utf8_decode = codecs.utf_8_decode
+def _utf8_decode_with_None(bytestring, _utf8_decode=_utf8_decode):
+ """wrap _utf8_decode to support None->None for optional strings.
+
+ Also, only return the Unicode portion, since we don't care about the second
+ return value.
+ """
+ if bytestring is None:
+ return None
+ else:
+ return _utf8_decode(bytestring)[0]
+
+# Map revisions from and to utf8 encoding
+# Whenever we do an encode/decode operation, we save the result, so that
+# we don't have to do it again.
+_unicode_to_utf8_map = {}
+_utf8_to_unicode_map = {}
+
+
+def encode(unicode_str,
+ _uni_to_utf8=_unicode_to_utf8_map,
+ _utf8_to_uni=_utf8_to_unicode_map,
+ _utf8_encode=_utf8_encode):
+ """Take this unicode revision id, and get a unicode version"""
+ # If the key is in the cache try/KeyError is 50% faster than
+ # val = dict.get(key), if val is None:
+ # On jam's machine the difference is
+ # try/KeyError: 900ms
+ # if None: 1250ms
+ # Since these are primarily used when iterating over a knit entry
+ # *most* of the time the key will already be in the cache, so use the
+ # fast path
+ try:
+ return _uni_to_utf8[unicode_str]
+ except KeyError:
+ _uni_to_utf8[unicode_str] = utf8_str = _utf8_encode(unicode_str)[0]
+ _utf8_to_uni[utf8_str] = unicode_str
+ return utf8_str
+
+
+def decode(utf8_str,
+ _uni_to_utf8=_unicode_to_utf8_map,
+ _utf8_to_uni=_utf8_to_unicode_map,
+ _utf8_decode=_utf8_decode):
+ """Take a utf8 revision id, and decode it, but cache the result"""
+ try:
+ return _utf8_to_uni[utf8_str]
+ except KeyError:
+ unicode_str = _utf8_decode(utf8_str)[0]
+ _utf8_to_uni[utf8_str] = unicode_str
+ _uni_to_utf8[unicode_str] = utf8_str
+ return unicode_str
+
+
+def get_cached_unicode(unicode_str):
+ """Return a cached version of the unicode string.
+
+ This has a similar idea to that of intern() in that it tries
+ to return a singleton string. Only it works for unicode strings.
+ """
+ # This might return the same object, or it might return the cached one
+ # the decode() should just be a hash lookup, because the encode() side
+ # should add the entry to the maps
+ return decode(encode(unicode_str))
+
+
+def get_cached_utf8(utf8_str):
+ """Return a cached version of the utf-8 string.
+
+ Get a cached version of this string (similar to intern()).
+ At present, this will be decoded to ensure it is a utf-8 string. In the
+ future this might change to simply caching the string.
+ """
+ return encode(decode(utf8_str))
+
+
+def get_cached_ascii(ascii_str,
+ _uni_to_utf8=_unicode_to_utf8_map,
+ _utf8_to_uni=_utf8_to_unicode_map):
+ """This is a string which is identical in utf-8 and unicode."""
+ # We don't need to do any encoding, but we want _utf8_to_uni to return a
+ # real Unicode string. Unicode and plain strings of this type will have the
+ # same hash, so we can just use it as the key in _uni_to_utf8, but we need
+ # the return value to be different in _utf8_to_uni
+ ascii_str = _uni_to_utf8.setdefault(ascii_str, ascii_str)
+ _utf8_to_uni.setdefault(ascii_str, unicode(ascii_str))
+ return ascii_str
+
+
+def clear_encoding_cache():
+ """Clear the encoding and decoding caches"""
+ _unicode_to_utf8_map.clear()
+ _utf8_to_unicode_map.clear()
diff --git a/bzrlib/cethread.py b/bzrlib/cethread.py
new file mode 100644
index 0000000..9d5972e
--- /dev/null
+++ b/bzrlib/cethread.py
@@ -0,0 +1,156 @@
+# Copyright (C) 2011 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+from __future__ import absolute_import
+
+import sys
+import threading
+
+
+class CatchingExceptionThread(threading.Thread):
+ """A thread that keeps track of exceptions.
+
+ If an exception occurs during the thread execution, it's caught and
+ re-raised when the thread is joined().
+ """
+
+ def __init__(self, *args, **kwargs):
+ # There are cases where the calling thread must wait, yet, if an
+ # exception occurs, the event should be set so the caller is not
+ # blocked. The main example is a calling thread that want to wait for
+ # the called thread to be in a given state before continuing.
+ try:
+ sync_event = kwargs.pop('sync_event')
+ except KeyError:
+ # If the caller didn't pass a specific event, create our own
+ sync_event = threading.Event()
+ super(CatchingExceptionThread, self).__init__(*args, **kwargs)
+ self.set_sync_event(sync_event)
+ self.exception = None
+ self.ignored_exceptions = None # see set_ignored_exceptions
+ self.lock = threading.Lock()
+
+ # compatibility thunk for python-2.4 and python-2.5...
+ if sys.version_info < (2, 6):
+ name = property(threading.Thread.getName, threading.Thread.setName)
+
+ def set_sync_event(self, event):
+ """Set the ``sync_event`` event used to synchronize exception catching.
+
+ When the thread uses an event to synchronize itself with another thread
+ (setting it when the other thread can wake up from a ``wait`` call),
+ the event must be set after catching an exception or the other thread
+ will hang.
+
+ Some threads require multiple events and should set the relevant one
+ when appropriate.
+
+ Note that the event should be initially cleared so the caller can
+ wait() on him and be released when the thread set the event.
+
+ Also note that the thread can use multiple events, setting them as it
+ progress, while the caller can chose to wait on any of them. What
+ matters is that there is always one event set so that the caller is
+ always released when an exception is caught. Re-using the same event is
+ therefore risky as the thread itself has no idea about which event the
+ caller is waiting on. If the caller has already been released then a
+ cleared event won't guarantee that the caller is still waiting on it.
+ """
+ self.sync_event = event
+
+ def switch_and_set(self, new):
+ """Switch to a new ``sync_event`` and set the current one.
+
+ Using this method protects against race conditions while setting a new
+ ``sync_event``.
+
+ Note that this allows a caller to wait either on the old or the new
+ event depending on whether it wants a fine control on what is happening
+ inside a thread.
+
+ :param new: The event that will become ``sync_event``
+ """
+ cur = self.sync_event
+ self.lock.acquire()
+ try: # Always release the lock
+ try:
+ self.set_sync_event(new)
+ # From now on, any exception will be synced with the new event
+ except:
+ # Unlucky, we couldn't set the new sync event, try restoring a
+ # safe state
+ self.set_sync_event(cur)
+ raise
+ # Setting the current ``sync_event`` will release callers waiting
+ # on it, note that it will also be set in run() if an exception is
+ # raised
+ cur.set()
+ finally:
+ self.lock.release()
+
+ def set_ignored_exceptions(self, ignored):
+ """Declare which exceptions will be ignored.
+
+ :param ignored: Can be either:
+
+ - None: all exceptions will be raised,
+ - an exception class: the instances of this class will be ignored,
+ - a tuple of exception classes: the instances of any class of the
+ list will be ignored,
+ - a callable: that will be passed the exception object
+ and should return True if the exception should be ignored
+ """
+ if ignored is None:
+ self.ignored_exceptions = None
+ elif isinstance(ignored, (Exception, tuple)):
+ self.ignored_exceptions = lambda e: isinstance(e, ignored)
+ else:
+ self.ignored_exceptions = ignored
+
+ def run(self):
+ """Overrides Thread.run to capture any exception."""
+ self.sync_event.clear()
+ try:
+ try:
+ super(CatchingExceptionThread, self).run()
+ except:
+ self.exception = sys.exc_info()
+ finally:
+ # Make sure the calling thread is released
+ self.sync_event.set()
+
+
+ def join(self, timeout=None):
+ """Overrides Thread.join to raise any exception caught.
+
+ Calling join(timeout=0) will raise the caught exception or return None
+ if the thread is still alive.
+ """
+ super(CatchingExceptionThread, self).join(timeout)
+ if self.exception is not None:
+ exc_class, exc_value, exc_tb = self.exception
+ self.exception = None # The exception should be raised only once
+ if (self.ignored_exceptions is None
+ or not self.ignored_exceptions(exc_value)):
+ # Raise non ignored exceptions
+ raise exc_class, exc_value, exc_tb
+
+ def pending_exception(self):
+ """Raise the caught exception.
+
+ This does nothing if no exception occurred.
+ """
+ self.join(timeout=0)
diff --git a/bzrlib/check.py b/bzrlib/check.py
new file mode 100644
index 0000000..48d9ace
--- /dev/null
+++ b/bzrlib/check.py
@@ -0,0 +1,446 @@
+# Copyright (C) 2005, 2006 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+# TODO: Check ancestries are correct for every revision: includes
+# every committed so far, and in a reasonable order.
+
+# TODO: Also check non-mainline revisions mentioned as parents.
+
+# TODO: Check for extra files in the control directory.
+
+# TODO: Check revision, inventory and entry objects have all
+# required fields.
+
+# TODO: Get every revision in the revision-store even if they're not
+# referenced by history and make sure they're all valid.
+
+# TODO: Perhaps have a way to record errors other than by raising exceptions;
+# would perhaps be enough to accumulate exception objects in a list without
+# raising them. If there's more than one exception it'd be good to see them
+# all.
+
+"""Checking of bzr objects.
+
+check_refs is a concept used for optimising check. Objects that depend on other
+objects (e.g. tree on repository) can list the objects they would be requesting
+so that when the dependent object is checked, matches can be pulled out and
+evaluated in-line rather than re-reading the same data many times.
+check_refs are tuples (kind, value). Currently defined kinds are:
+
+* 'trees', where value is a revid and the looked up objects are revision trees.
+* 'lefthand-distance', where value is a revid and the looked up objects are the
+ distance along the lefthand path to NULL for that revid.
+* 'revision-existence', where value is a revid, and the result is True or False
+ indicating that the revision was found/not found.
+"""
+
+from __future__ import absolute_import
+
+from bzrlib import (
+ errors,
+ ui,
+ )
+from bzrlib.branch import Branch
+from bzrlib.controldir import ControlDir
+from bzrlib.revision import NULL_REVISION
+from bzrlib.trace import note
+from bzrlib.workingtree import WorkingTree
+from bzrlib.i18n import gettext
+
+class Check(object):
+ """Check a repository"""
+
+ def __init__(self, repository, check_repo=True):
+ self.repository = repository
+
+ def report_results(self, verbose):
+ raise NotImplementedError(self.report_results)
+
+
+class VersionedFileCheck(Check):
+ """Check a versioned file repository"""
+
+ # The Check object interacts with InventoryEntry.check, etc.
+
+ def __init__(self, repository, check_repo=True):
+ self.repository = repository
+ self.checked_rev_cnt = 0
+ self.ghosts = set()
+ self.missing_parent_links = {}
+ self.missing_inventory_sha_cnt = 0
+ self.missing_revision_cnt = 0
+ self.checked_weaves = set()
+ self.unreferenced_versions = set()
+ self.inconsistent_parents = []
+ self.rich_roots = repository.supports_rich_root()
+ self.text_key_references = {}
+ self.check_repo = check_repo
+ self.other_results = []
+ # Plain text lines to include in the report
+ self._report_items = []
+ # Keys we are looking for; may be large and need spilling to disk.
+ # key->(type(revision/inventory/text/signature/map), sha1, first-referer)
+ self.pending_keys = {}
+ # Ancestors map for all of revisions being checked; while large helper
+ # functions we call would create it anyway, so better to have once and
+ # keep.
+ self.ancestors = {}
+
+ def check(self, callback_refs=None, check_repo=True):
+ if callback_refs is None:
+ callback_refs = {}
+ self.repository.lock_read()
+ self.progress = ui.ui_factory.nested_progress_bar()
+ try:
+ self.progress.update(gettext('check'), 0, 4)
+ if self.check_repo:
+ self.progress.update(gettext('checking revisions'), 0)
+ self.check_revisions()
+ self.progress.update(gettext('checking commit contents'), 1)
+ self.repository._check_inventories(self)
+ self.progress.update(gettext('checking file graphs'), 2)
+ # check_weaves is done after the revision scan so that
+ # revision index is known to be valid.
+ self.check_weaves()
+ self.progress.update(gettext('checking branches and trees'), 3)
+ if callback_refs:
+ repo = self.repository
+ # calculate all refs, and callback the objects requesting them.
+ refs = {}
+ wanting_items = set()
+ # Current crude version calculates everything and calls
+ # everything at once. Doing a queue and popping as things are
+ # satisfied would be cheaper on memory [but few people have
+ # huge numbers of working trees today. TODO: fix before
+ # landing].
+ distances = set()
+ existences = set()
+ for ref, wantlist in callback_refs.iteritems():
+ wanting_items.update(wantlist)
+ kind, value = ref
+ if kind == 'trees':
+ refs[ref] = repo.revision_tree(value)
+ elif kind == 'lefthand-distance':
+ distances.add(value)
+ elif kind == 'revision-existence':
+ existences.add(value)
+ else:
+ raise AssertionError(
+ 'unknown ref kind for ref %s' % ref)
+ node_distances = repo.get_graph().find_lefthand_distances(distances)
+ for key, distance in node_distances.iteritems():
+ refs[('lefthand-distance', key)] = distance
+ if key in existences and distance > 0:
+ refs[('revision-existence', key)] = True
+ existences.remove(key)
+ parent_map = repo.get_graph().get_parent_map(existences)
+ for key in parent_map:
+ refs[('revision-existence', key)] = True
+ existences.remove(key)
+ for key in existences:
+ refs[('revision-existence', key)] = False
+ for item in wanting_items:
+ if isinstance(item, WorkingTree):
+ item._check(refs)
+ if isinstance(item, Branch):
+ self.other_results.append(item.check(refs))
+ finally:
+ self.progress.finished()
+ self.repository.unlock()
+
+ def _check_revisions(self, revisions_iterator):
+ """Check revision objects by decorating a generator.
+
+ :param revisions_iterator: An iterator of(revid, Revision-or-None).
+ :return: A generator of the contents of revisions_iterator.
+ """
+ self.planned_revisions = set()
+ for revid, revision in revisions_iterator:
+ yield revid, revision
+ self._check_one_rev(revid, revision)
+ # Flatten the revisions we found to guarantee consistent later
+ # iteration.
+ self.planned_revisions = list(self.planned_revisions)
+ # TODO: extract digital signatures as items to callback on too.
+
+ def check_revisions(self):
+ """Scan revisions, checking data directly available as we go."""
+ revision_iterator = self.repository._iter_revisions(None)
+ revision_iterator = self._check_revisions(revision_iterator)
+ # We read the all revisions here:
+ # - doing this allows later code to depend on the revision index.
+ # - we can fill out existence flags at this point
+ # - we can read the revision inventory sha at this point
+ # - we can check properties and serialisers etc.
+ if not self.repository._format.revision_graph_can_have_wrong_parents:
+ # The check against the index isn't needed.
+ self.revs_with_bad_parents_in_index = None
+ for thing in revision_iterator:
+ pass
+ else:
+ bad_revisions = self.repository._find_inconsistent_revision_parents(
+ revision_iterator)
+ self.revs_with_bad_parents_in_index = list(bad_revisions)
+
+ def report_results(self, verbose):
+ if self.check_repo:
+ self._report_repo_results(verbose)
+ for result in self.other_results:
+ result.report_results(verbose)
+
+ def _report_repo_results(self, verbose):
+ note(gettext('checked repository {0} format {1}').format(
+ self.repository.user_url,
+ self.repository._format))
+ note(gettext('%6d revisions'), self.checked_rev_cnt)
+ note(gettext('%6d file-ids'), len(self.checked_weaves))
+ if verbose:
+ note(gettext('%6d unreferenced text versions'),
+ len(self.unreferenced_versions))
+ if verbose and len(self.unreferenced_versions):
+ for file_id, revision_id in self.unreferenced_versions:
+ note(gettext('unreferenced version: {{{0}}} in {1}').format(revision_id,
+ file_id))
+ if self.missing_inventory_sha_cnt:
+ note(gettext('%6d revisions are missing inventory_sha1'),
+ self.missing_inventory_sha_cnt)
+ if self.missing_revision_cnt:
+ note(gettext('%6d revisions are mentioned but not present'),
+ self.missing_revision_cnt)
+ if len(self.ghosts):
+ note(gettext('%6d ghost revisions'), len(self.ghosts))
+ if verbose:
+ for ghost in self.ghosts:
+ note(' %s', ghost)
+ if len(self.missing_parent_links):
+ note(gettext('%6d revisions missing parents in ancestry'),
+ len(self.missing_parent_links))
+ if verbose:
+ for link, linkers in self.missing_parent_links.items():
+ note(gettext(' %s should be in the ancestry for:'), link)
+ for linker in linkers:
+ note(' * %s', linker)
+ if len(self.inconsistent_parents):
+ note(gettext('%6d inconsistent parents'), len(self.inconsistent_parents))
+ if verbose:
+ for info in self.inconsistent_parents:
+ revision_id, file_id, found_parents, correct_parents = info
+ note(gettext(' * {0} version {1} has parents {2!r} '
+ 'but should have {3!r}').format(
+ file_id, revision_id, found_parents,
+ correct_parents))
+ if self.revs_with_bad_parents_in_index:
+ note(gettext(
+ '%6d revisions have incorrect parents in the revision index'),
+ len(self.revs_with_bad_parents_in_index))
+ if verbose:
+ for item in self.revs_with_bad_parents_in_index:
+ revision_id, index_parents, actual_parents = item
+ note(gettext(
+ ' {0} has wrong parents in index: '
+ '{1!r} should be {2!r}').format(
+ revision_id, index_parents, actual_parents))
+ for item in self._report_items:
+ note(item)
+
+ def _check_one_rev(self, rev_id, rev):
+ """Cross-check one revision.
+
+ :param rev_id: A revision id to check.
+ :param rev: A revision or None to indicate a missing revision.
+ """
+ if rev.revision_id != rev_id:
+ self._report_items.append(gettext(
+ 'Mismatched internal revid {{{0}}} and index revid {{{1}}}').format(
+ rev.revision_id, rev_id))
+ rev_id = rev.revision_id
+ # Check this revision tree etc, and count as seen when we encounter a
+ # reference to it.
+ self.planned_revisions.add(rev_id)
+ # It is not a ghost
+ self.ghosts.discard(rev_id)
+ # Count all parents as ghosts if we haven't seen them yet.
+ for parent in rev.parent_ids:
+ if not parent in self.planned_revisions:
+ self.ghosts.add(parent)
+
+ self.ancestors[rev_id] = tuple(rev.parent_ids) or (NULL_REVISION,)
+ self.add_pending_item(rev_id, ('inventories', rev_id), 'inventory',
+ rev.inventory_sha1)
+ self.checked_rev_cnt += 1
+
+ def add_pending_item(self, referer, key, kind, sha1):
+ """Add a reference to a sha1 to be cross checked against a key.
+
+ :param referer: The referer that expects key to have sha1.
+ :param key: A storage key e.g. ('texts', 'foo@bar-20040504-1234')
+ :param kind: revision/inventory/text/map/signature
+ :param sha1: A hex sha1 or None if no sha1 is known.
+ """
+ existing = self.pending_keys.get(key)
+ if existing:
+ if sha1 != existing[1]:
+ self._report_items.append(gettext('Multiple expected sha1s for {0}. {{{1}}}'
+ ' expects {{{2}}}, {{{3}}} expects {{{4}}}').format(
+ key, referer, sha1, existing[1], existing[0]))
+ else:
+ self.pending_keys[key] = (kind, sha1, referer)
+
+ def check_weaves(self):
+ """Check all the weaves we can get our hands on.
+ """
+ weave_ids = []
+ storebar = ui.ui_factory.nested_progress_bar()
+ try:
+ self._check_weaves(storebar)
+ finally:
+ storebar.finished()
+
+ def _check_weaves(self, storebar):
+ storebar.update('text-index', 0, 2)
+ if self.repository._format.fast_deltas:
+ # We haven't considered every fileid instance so far.
+ weave_checker = self.repository._get_versioned_file_checker(
+ ancestors=self.ancestors)
+ else:
+ weave_checker = self.repository._get_versioned_file_checker(
+ text_key_references=self.text_key_references,
+ ancestors=self.ancestors)
+ storebar.update('file-graph', 1)
+ result = weave_checker.check_file_version_parents(
+ self.repository.texts)
+ self.checked_weaves = weave_checker.file_ids
+ bad_parents, unused_versions = result
+ bad_parents = bad_parents.items()
+ for text_key, (stored_parents, correct_parents) in bad_parents:
+ # XXX not ready for id join/split operations.
+ weave_id = text_key[0]
+ revision_id = text_key[-1]
+ weave_parents = tuple([parent[-1] for parent in stored_parents])
+ correct_parents = tuple([parent[-1] for parent in correct_parents])
+ self.inconsistent_parents.append(
+ (revision_id, weave_id, weave_parents, correct_parents))
+ self.unreferenced_versions.update(unused_versions)
+
+ def _add_entry_to_text_key_references(self, inv, entry):
+ if not self.rich_roots and entry.name == '':
+ return
+ key = (entry.file_id, entry.revision)
+ self.text_key_references.setdefault(key, False)
+ if entry.revision == inv.revision_id:
+ self.text_key_references[key] = True
+
+
+def scan_branch(branch, needed_refs, to_unlock):
+ """Scan a branch for refs.
+
+ :param branch: The branch to schedule for checking.
+ :param needed_refs: Refs we are accumulating.
+ :param to_unlock: The unlock list accumulating.
+ """
+ note(gettext("Checking branch at '%s'.") % (branch.base,))
+ branch.lock_read()
+ to_unlock.append(branch)
+ branch_refs = branch._get_check_refs()
+ for ref in branch_refs:
+ reflist = needed_refs.setdefault(ref, [])
+ reflist.append(branch)
+
+
+def scan_tree(base_tree, tree, needed_refs, to_unlock):
+ """Scan a tree for refs.
+
+ :param base_tree: The original tree check opened, used to detect duplicate
+ tree checks.
+ :param tree: The tree to schedule for checking.
+ :param needed_refs: Refs we are accumulating.
+ :param to_unlock: The unlock list accumulating.
+ """
+ if base_tree is not None and tree.basedir == base_tree.basedir:
+ return
+ note(gettext("Checking working tree at '%s'.") % (tree.basedir,))
+ tree.lock_read()
+ to_unlock.append(tree)
+ tree_refs = tree._get_check_refs()
+ for ref in tree_refs:
+ reflist = needed_refs.setdefault(ref, [])
+ reflist.append(tree)
+
+
+def check_dwim(path, verbose, do_branch=False, do_repo=False, do_tree=False):
+ """Check multiple objects.
+
+ If errors occur they are accumulated and reported as far as possible, and
+ an exception raised at the end of the process.
+ """
+ try:
+ base_tree, branch, repo, relpath = \
+ ControlDir.open_containing_tree_branch_or_repository(path)
+ except errors.NotBranchError:
+ base_tree = branch = repo = None
+
+ to_unlock = []
+ needed_refs= {}
+ try:
+ if base_tree is not None:
+ # If the tree is a lightweight checkout we won't see it in
+ # repo.find_branches - add now.
+ if do_tree:
+ scan_tree(None, base_tree, needed_refs, to_unlock)
+ branch = base_tree.branch
+ if branch is not None:
+ # We have a branch
+ if repo is None:
+ # The branch is in a shared repository
+ repo = branch.repository
+ if repo is not None:
+ repo.lock_read()
+ to_unlock.append(repo)
+ branches = repo.find_branches(using=True)
+ saw_tree = False
+ if do_branch or do_tree:
+ for branch in branches:
+ if do_tree:
+ try:
+ tree = branch.bzrdir.open_workingtree()
+ saw_tree = True
+ except (errors.NotLocalUrl, errors.NoWorkingTree):
+ pass
+ else:
+ scan_tree(base_tree, tree, needed_refs, to_unlock)
+ if do_branch:
+ scan_branch(branch, needed_refs, to_unlock)
+ if do_branch and not branches:
+ note(gettext("No branch found at specified location."))
+ if do_tree and base_tree is None and not saw_tree:
+ note(gettext("No working tree found at specified location."))
+ if do_repo or do_branch or do_tree:
+ if do_repo:
+ note(gettext("Checking repository at '%s'.")
+ % (repo.user_url,))
+ result = repo.check(None, callback_refs=needed_refs,
+ check_repo=do_repo)
+ result.report_results(verbose)
+ else:
+ if do_tree:
+ note(gettext("No working tree found at specified location."))
+ if do_branch:
+ note(gettext("No branch found at specified location."))
+ if do_repo:
+ note(gettext("No repository found at specified location."))
+ finally:
+ for thing in to_unlock:
+ thing.unlock()
diff --git a/bzrlib/chk_map.py b/bzrlib/chk_map.py
new file mode 100644
index 0000000..749e435
--- /dev/null
+++ b/bzrlib/chk_map.py
@@ -0,0 +1,1764 @@
+# Copyright (C) 2008-2011 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""Persistent maps from tuple_of_strings->string using CHK stores.
+
+Overview and current status:
+
+The CHKMap class implements a dict from tuple_of_strings->string by using a trie
+with internal nodes of 8-bit fan out; The key tuples are mapped to strings by
+joining them by \x00, and \x00 padding shorter keys out to the length of the
+longest key. Leaf nodes are packed as densely as possible, and internal nodes
+are all an additional 8-bits wide leading to a sparse upper tree.
+
+Updates to a CHKMap are done preferentially via the apply_delta method, to
+allow optimisation of the update operation; but individual map/unmap calls are
+possible and supported. Individual changes via map/unmap are buffered in memory
+until the _save method is called to force serialisation of the tree.
+apply_delta records its changes immediately by performing an implicit _save.
+
+TODO:
+-----
+
+Densely packed upper nodes.
+
+"""
+
+from __future__ import absolute_import
+
+import heapq
+import threading
+
+from bzrlib import lazy_import
+lazy_import.lazy_import(globals(), """
+from bzrlib import (
+ errors,
+ )
+""")
+from bzrlib import (
+ errors,
+ lru_cache,
+ osutils,
+ registry,
+ static_tuple,
+ trace,
+ )
+from bzrlib.static_tuple import StaticTuple
+
+# approx 4MB
+# If each line is 50 bytes, and you have 255 internal pages, with 255-way fan
+# out, it takes 3.1MB to cache the layer.
+_PAGE_CACHE_SIZE = 4*1024*1024
+# Per thread caches for 2 reasons:
+# - in the server we may be serving very different content, so we get less
+# cache thrashing.
+# - we avoid locking on every cache lookup.
+_thread_caches = threading.local()
+# The page cache.
+_thread_caches.page_cache = None
+
+def _get_cache():
+ """Get the per-thread page cache.
+
+ We need a function to do this because in a new thread the _thread_caches
+ threading.local object does not have the cache initialized yet.
+ """
+ page_cache = getattr(_thread_caches, 'page_cache', None)
+ if page_cache is None:
+ # We are caching bytes so len(value) is perfectly accurate
+ page_cache = lru_cache.LRUSizeCache(_PAGE_CACHE_SIZE)
+ _thread_caches.page_cache = page_cache
+ return page_cache
+
+
+def clear_cache():
+ _get_cache().clear()
+
+
+# If a ChildNode falls below this many bytes, we check for a remap
+_INTERESTING_NEW_SIZE = 50
+# If a ChildNode shrinks by more than this amount, we check for a remap
+_INTERESTING_SHRINKAGE_LIMIT = 20
+
+
+def _search_key_plain(key):
+ """Map the key tuple into a search string that just uses the key bytes."""
+ return '\x00'.join(key)
+
+
+search_key_registry = registry.Registry()
+search_key_registry.register('plain', _search_key_plain)
+
+
+class CHKMap(object):
+ """A persistent map from string to string backed by a CHK store."""
+
+ __slots__ = ('_store', '_root_node', '_search_key_func')
+
+ def __init__(self, store, root_key, search_key_func=None):
+ """Create a CHKMap object.
+
+ :param store: The store the CHKMap is stored in.
+ :param root_key: The root key of the map. None to create an empty
+ CHKMap.
+ :param search_key_func: A function mapping a key => bytes. These bytes
+ are then used by the internal nodes to split up leaf nodes into
+ multiple pages.
+ """
+ self._store = store
+ if search_key_func is None:
+ search_key_func = _search_key_plain
+ self._search_key_func = search_key_func
+ if root_key is None:
+ self._root_node = LeafNode(search_key_func=search_key_func)
+ else:
+ self._root_node = self._node_key(root_key)
+
+ def apply_delta(self, delta):
+ """Apply a delta to the map.
+
+ :param delta: An iterable of old_key, new_key, new_value tuples.
+ If new_key is not None, then new_key->new_value is inserted
+ into the map; if old_key is not None, then the old mapping
+ of old_key is removed.
+ """
+ has_deletes = False
+ # Check preconditions first.
+ as_st = StaticTuple.from_sequence
+ new_items = set([as_st(key) for (old, key, value) in delta
+ if key is not None and old is None])
+ existing_new = list(self.iteritems(key_filter=new_items))
+ if existing_new:
+ raise errors.InconsistentDeltaDelta(delta,
+ "New items are already in the map %r." % existing_new)
+ # Now apply changes.
+ for old, new, value in delta:
+ if old is not None and old != new:
+ self.unmap(old, check_remap=False)
+ has_deletes = True
+ for old, new, value in delta:
+ if new is not None:
+ self.map(new, value)
+ if has_deletes:
+ self._check_remap()
+ return self._save()
+
+ def _ensure_root(self):
+ """Ensure that the root node is an object not a key."""
+ if type(self._root_node) is StaticTuple:
+ # Demand-load the root
+ self._root_node = self._get_node(self._root_node)
+
+ def _get_node(self, node):
+ """Get a node.
+
+ Note that this does not update the _items dict in objects containing a
+ reference to this node. As such it does not prevent subsequent IO being
+ performed.
+
+ :param node: A tuple key or node object.
+ :return: A node object.
+ """
+ if type(node) is StaticTuple:
+ bytes = self._read_bytes(node)
+ return _deserialise(bytes, node,
+ search_key_func=self._search_key_func)
+ else:
+ return node
+
+ def _read_bytes(self, key):
+ try:
+ return _get_cache()[key]
+ except KeyError:
+ stream = self._store.get_record_stream([key], 'unordered', True)
+ bytes = stream.next().get_bytes_as('fulltext')
+ _get_cache()[key] = bytes
+ return bytes
+
+ def _dump_tree(self, include_keys=False):
+ """Return the tree in a string representation."""
+ self._ensure_root()
+ res = self._dump_tree_node(self._root_node, prefix='', indent='',
+ include_keys=include_keys)
+ res.append('') # Give a trailing '\n'
+ return '\n'.join(res)
+
+ def _dump_tree_node(self, node, prefix, indent, include_keys=True):
+ """For this node and all children, generate a string representation."""
+ result = []
+ if not include_keys:
+ key_str = ''
+ else:
+ node_key = node.key()
+ if node_key is not None:
+ key_str = ' %s' % (node_key[0],)
+ else:
+ key_str = ' None'
+ result.append('%s%r %s%s' % (indent, prefix, node.__class__.__name__,
+ key_str))
+ if type(node) is InternalNode:
+ # Trigger all child nodes to get loaded
+ list(node._iter_nodes(self._store))
+ for prefix, sub in sorted(node._items.iteritems()):
+ result.extend(self._dump_tree_node(sub, prefix, indent + ' ',
+ include_keys=include_keys))
+ else:
+ for key, value in sorted(node._items.iteritems()):
+ # Don't use prefix nor indent here to line up when used in
+ # tests in conjunction with assertEqualDiff
+ result.append(' %r %r' % (tuple(key), value))
+ return result
+
+ @classmethod
+ def from_dict(klass, store, initial_value, maximum_size=0, key_width=1,
+ search_key_func=None):
+ """Create a CHKMap in store with initial_value as the content.
+
+ :param store: The store to record initial_value in, a VersionedFiles
+ object with 1-tuple keys supporting CHK key generation.
+ :param initial_value: A dict to store in store. Its keys and values
+ must be bytestrings.
+ :param maximum_size: The maximum_size rule to apply to nodes. This
+ determines the size at which no new data is added to a single node.
+ :param key_width: The number of elements in each key_tuple being stored
+ in this map.
+ :param search_key_func: A function mapping a key => bytes. These bytes
+ are then used by the internal nodes to split up leaf nodes into
+ multiple pages.
+ :return: The root chk of the resulting CHKMap.
+ """
+ root_key = klass._create_directly(store, initial_value,
+ maximum_size=maximum_size, key_width=key_width,
+ search_key_func=search_key_func)
+ if type(root_key) is not StaticTuple:
+ raise AssertionError('we got a %s instead of a StaticTuple'
+ % (type(root_key),))
+ return root_key
+
+ @classmethod
+ def _create_via_map(klass, store, initial_value, maximum_size=0,
+ key_width=1, search_key_func=None):
+ result = klass(store, None, search_key_func=search_key_func)
+ result._root_node.set_maximum_size(maximum_size)
+ result._root_node._key_width = key_width
+ delta = []
+ for key, value in initial_value.items():
+ delta.append((None, key, value))
+ root_key = result.apply_delta(delta)
+ return root_key
+
+ @classmethod
+ def _create_directly(klass, store, initial_value, maximum_size=0,
+ key_width=1, search_key_func=None):
+ node = LeafNode(search_key_func=search_key_func)
+ node.set_maximum_size(maximum_size)
+ node._key_width = key_width
+ as_st = StaticTuple.from_sequence
+ node._items = dict([(as_st(key), val) for key, val
+ in initial_value.iteritems()])
+ node._raw_size = sum([node._key_value_len(key, value)
+ for key,value in node._items.iteritems()])
+ node._len = len(node._items)
+ node._compute_search_prefix()
+ node._compute_serialised_prefix()
+ if (node._len > 1
+ and maximum_size
+ and node._current_size() > maximum_size):
+ prefix, node_details = node._split(store)
+ if len(node_details) == 1:
+ raise AssertionError('Failed to split using node._split')
+ node = InternalNode(prefix, search_key_func=search_key_func)
+ node.set_maximum_size(maximum_size)
+ node._key_width = key_width
+ for split, subnode in node_details:
+ node.add_node(split, subnode)
+ keys = list(node.serialise(store))
+ return keys[-1]
+
+ def iter_changes(self, basis):
+ """Iterate over the changes between basis and self.
+
+ :return: An iterator of tuples: (key, old_value, new_value). Old_value
+ is None for keys only in self; new_value is None for keys only in
+ basis.
+ """
+ # Overview:
+ # Read both trees in lexographic, highest-first order.
+ # Any identical nodes we skip
+ # Any unique prefixes we output immediately.
+ # values in a leaf node are treated as single-value nodes in the tree
+ # which allows them to be not-special-cased. We know to output them
+ # because their value is a string, not a key(tuple) or node.
+ #
+ # corner cases to beware of when considering this function:
+ # *) common references are at different heights.
+ # consider two trees:
+ # {'a': LeafNode={'aaa':'foo', 'aab':'bar'}, 'b': LeafNode={'b'}}
+ # {'a': InternalNode={'aa':LeafNode={'aaa':'foo', 'aab':'bar'},
+ # 'ab':LeafNode={'ab':'bar'}}
+ # 'b': LeafNode={'b'}}
+ # the node with aaa/aab will only be encountered in the second tree
+ # after reading the 'a' subtree, but it is encountered in the first
+ # tree immediately. Variations on this may have read internal nodes
+ # like this. we want to cut the entire pending subtree when we
+ # realise we have a common node. For this we use a list of keys -
+ # the path to a node - and check the entire path is clean as we
+ # process each item.
+ if self._node_key(self._root_node) == self._node_key(basis._root_node):
+ return
+ self._ensure_root()
+ basis._ensure_root()
+ excluded_keys = set()
+ self_node = self._root_node
+ basis_node = basis._root_node
+ # A heap, each element is prefix, node(tuple/NodeObject/string),
+ # key_path (a list of tuples, tail-sharing down the tree.)
+ self_pending = []
+ basis_pending = []
+ def process_node(node, path, a_map, pending):
+ # take a node and expand it
+ node = a_map._get_node(node)
+ if type(node) == LeafNode:
+ path = (node._key, path)
+ for key, value in node._items.items():
+ # For a LeafNode, the key is a serialized_key, rather than
+ # a search_key, but the heap is using search_keys
+ search_key = node._search_key_func(key)
+ heapq.heappush(pending, (search_key, key, value, path))
+ else:
+ # type(node) == InternalNode
+ path = (node._key, path)
+ for prefix, child in node._items.items():
+ heapq.heappush(pending, (prefix, None, child, path))
+ def process_common_internal_nodes(self_node, basis_node):
+ self_items = set(self_node._items.items())
+ basis_items = set(basis_node._items.items())
+ path = (self_node._key, None)
+ for prefix, child in self_items - basis_items:
+ heapq.heappush(self_pending, (prefix, None, child, path))
+ path = (basis_node._key, None)
+ for prefix, child in basis_items - self_items:
+ heapq.heappush(basis_pending, (prefix, None, child, path))
+ def process_common_leaf_nodes(self_node, basis_node):
+ self_items = set(self_node._items.items())
+ basis_items = set(basis_node._items.items())
+ path = (self_node._key, None)
+ for key, value in self_items - basis_items:
+ prefix = self._search_key_func(key)
+ heapq.heappush(self_pending, (prefix, key, value, path))
+ path = (basis_node._key, None)
+ for key, value in basis_items - self_items:
+ prefix = basis._search_key_func(key)
+ heapq.heappush(basis_pending, (prefix, key, value, path))
+ def process_common_prefix_nodes(self_node, self_path,
+ basis_node, basis_path):
+ # Would it be more efficient if we could request both at the same
+ # time?
+ self_node = self._get_node(self_node)
+ basis_node = basis._get_node(basis_node)
+ if (type(self_node) == InternalNode
+ and type(basis_node) == InternalNode):
+ # Matching internal nodes
+ process_common_internal_nodes(self_node, basis_node)
+ elif (type(self_node) == LeafNode
+ and type(basis_node) == LeafNode):
+ process_common_leaf_nodes(self_node, basis_node)
+ else:
+ process_node(self_node, self_path, self, self_pending)
+ process_node(basis_node, basis_path, basis, basis_pending)
+ process_common_prefix_nodes(self_node, None, basis_node, None)
+ self_seen = set()
+ basis_seen = set()
+ excluded_keys = set()
+ def check_excluded(key_path):
+ # Note that this is N^2, it depends on us trimming trees
+ # aggressively to not become slow.
+ # A better implementation would probably have a reverse map
+ # back to the children of a node, and jump straight to it when
+ # a common node is detected, the proceed to remove the already
+ # pending children. bzrlib.graph has a searcher module with a
+ # similar problem.
+ while key_path is not None:
+ key, key_path = key_path
+ if key in excluded_keys:
+ return True
+ return False
+
+ loop_counter = 0
+ while self_pending or basis_pending:
+ loop_counter += 1
+ if not self_pending:
+ # self is exhausted: output remainder of basis
+ for prefix, key, node, path in basis_pending:
+ if check_excluded(path):
+ continue
+ node = basis._get_node(node)
+ if key is not None:
+ # a value
+ yield (key, node, None)
+ else:
+ # subtree - fastpath the entire thing.
+ for key, value in node.iteritems(basis._store):
+ yield (key, value, None)
+ return
+ elif not basis_pending:
+ # basis is exhausted: output remainder of self.
+ for prefix, key, node, path in self_pending:
+ if check_excluded(path):
+ continue
+ node = self._get_node(node)
+ if key is not None:
+ # a value
+ yield (key, None, node)
+ else:
+ # subtree - fastpath the entire thing.
+ for key, value in node.iteritems(self._store):
+ yield (key, None, value)
+ return
+ else:
+ # XXX: future optimisation - yield the smaller items
+ # immediately rather than pushing everything on/off the
+ # heaps. Applies to both internal nodes and leafnodes.
+ if self_pending[0][0] < basis_pending[0][0]:
+ # expand self
+ prefix, key, node, path = heapq.heappop(self_pending)
+ if check_excluded(path):
+ continue
+ if key is not None:
+ # a value
+ yield (key, None, node)
+ else:
+ process_node(node, path, self, self_pending)
+ continue
+ elif self_pending[0][0] > basis_pending[0][0]:
+ # expand basis
+ prefix, key, node, path = heapq.heappop(basis_pending)
+ if check_excluded(path):
+ continue
+ if key is not None:
+ # a value
+ yield (key, node, None)
+ else:
+ process_node(node, path, basis, basis_pending)
+ continue
+ else:
+ # common prefix: possibly expand both
+ if self_pending[0][1] is None:
+ # process next self
+ read_self = True
+ else:
+ read_self = False
+ if basis_pending[0][1] is None:
+ # process next basis
+ read_basis = True
+ else:
+ read_basis = False
+ if not read_self and not read_basis:
+ # compare a common value
+ self_details = heapq.heappop(self_pending)
+ basis_details = heapq.heappop(basis_pending)
+ if self_details[2] != basis_details[2]:
+ yield (self_details[1],
+ basis_details[2], self_details[2])
+ continue
+ # At least one side wasn't a simple value
+ if (self._node_key(self_pending[0][2]) ==
+ self._node_key(basis_pending[0][2])):
+ # Identical pointers, skip (and don't bother adding to
+ # excluded, it won't turn up again.
+ heapq.heappop(self_pending)
+ heapq.heappop(basis_pending)
+ continue
+ # Now we need to expand this node before we can continue
+ if read_self and read_basis:
+ # Both sides start with the same prefix, so process
+ # them in parallel
+ self_prefix, _, self_node, self_path = heapq.heappop(
+ self_pending)
+ basis_prefix, _, basis_node, basis_path = heapq.heappop(
+ basis_pending)
+ if self_prefix != basis_prefix:
+ raise AssertionError(
+ '%r != %r' % (self_prefix, basis_prefix))
+ process_common_prefix_nodes(
+ self_node, self_path,
+ basis_node, basis_path)
+ continue
+ if read_self:
+ prefix, key, node, path = heapq.heappop(self_pending)
+ if check_excluded(path):
+ continue
+ process_node(node, path, self, self_pending)
+ if read_basis:
+ prefix, key, node, path = heapq.heappop(basis_pending)
+ if check_excluded(path):
+ continue
+ process_node(node, path, basis, basis_pending)
+ # print loop_counter
+
+ def iteritems(self, key_filter=None):
+ """Iterate over the entire CHKMap's contents."""
+ self._ensure_root()
+ if key_filter is not None:
+ as_st = StaticTuple.from_sequence
+ key_filter = [as_st(key) for key in key_filter]
+ return self._root_node.iteritems(self._store, key_filter=key_filter)
+
+ def key(self):
+ """Return the key for this map."""
+ if type(self._root_node) is StaticTuple:
+ return self._root_node
+ else:
+ return self._root_node._key
+
+ def __len__(self):
+ self._ensure_root()
+ return len(self._root_node)
+
+ def map(self, key, value):
+ """Map a key tuple to value.
+
+ :param key: A key to map.
+ :param value: The value to assign to key.
+ """
+ key = StaticTuple.from_sequence(key)
+ # Need a root object.
+ self._ensure_root()
+ prefix, node_details = self._root_node.map(self._store, key, value)
+ if len(node_details) == 1:
+ self._root_node = node_details[0][1]
+ else:
+ self._root_node = InternalNode(prefix,
+ search_key_func=self._search_key_func)
+ self._root_node.set_maximum_size(node_details[0][1].maximum_size)
+ self._root_node._key_width = node_details[0][1]._key_width
+ for split, node in node_details:
+ self._root_node.add_node(split, node)
+
+ def _node_key(self, node):
+ """Get the key for a node whether it's a tuple or node."""
+ if type(node) is tuple:
+ node = StaticTuple.from_sequence(node)
+ if type(node) is StaticTuple:
+ return node
+ else:
+ return node._key
+
+ def unmap(self, key, check_remap=True):
+ """remove key from the map."""
+ key = StaticTuple.from_sequence(key)
+ self._ensure_root()
+ if type(self._root_node) is InternalNode:
+ unmapped = self._root_node.unmap(self._store, key,
+ check_remap=check_remap)
+ else:
+ unmapped = self._root_node.unmap(self._store, key)
+ self._root_node = unmapped
+
+ def _check_remap(self):
+ """Check if nodes can be collapsed."""
+ self._ensure_root()
+ if type(self._root_node) is InternalNode:
+ self._root_node = self._root_node._check_remap(self._store)
+
+ def _save(self):
+ """Save the map completely.
+
+ :return: The key of the root node.
+ """
+ if type(self._root_node) is StaticTuple:
+ # Already saved.
+ return self._root_node
+ keys = list(self._root_node.serialise(self._store))
+ return keys[-1]
+
+
+class Node(object):
+ """Base class defining the protocol for CHK Map nodes.
+
+ :ivar _raw_size: The total size of the serialized key:value data, before
+ adding the header bytes, and without prefix compression.
+ """
+
+ __slots__ = ('_key', '_len', '_maximum_size', '_key_width',
+ '_raw_size', '_items', '_search_prefix', '_search_key_func'
+ )
+
+ def __init__(self, key_width=1):
+ """Create a node.
+
+ :param key_width: The width of keys for this node.
+ """
+ self._key = None
+ # Current number of elements
+ self._len = 0
+ self._maximum_size = 0
+ self._key_width = key_width
+ # current size in bytes
+ self._raw_size = 0
+ # The pointers/values this node has - meaning defined by child classes.
+ self._items = {}
+ # The common search prefix
+ self._search_prefix = None
+
+ def __repr__(self):
+ items_str = str(sorted(self._items))
+ if len(items_str) > 20:
+ items_str = items_str[:16] + '...]'
+ return '%s(key:%s len:%s size:%s max:%s prefix:%s items:%s)' % (
+ self.__class__.__name__, self._key, self._len, self._raw_size,
+ self._maximum_size, self._search_prefix, items_str)
+
+ def key(self):
+ return self._key
+
+ def __len__(self):
+ return self._len
+
+ @property
+ def maximum_size(self):
+ """What is the upper limit for adding references to a node."""
+ return self._maximum_size
+
+ def set_maximum_size(self, new_size):
+ """Set the size threshold for nodes.
+
+ :param new_size: The size at which no data is added to a node. 0 for
+ unlimited.
+ """
+ self._maximum_size = new_size
+
+ @classmethod
+ def common_prefix(cls, prefix, key):
+ """Given 2 strings, return the longest prefix common to both.
+
+ :param prefix: This has been the common prefix for other keys, so it is
+ more likely to be the common prefix in this case as well.
+ :param key: Another string to compare to
+ """
+ if key.startswith(prefix):
+ return prefix
+ pos = -1
+ # Is there a better way to do this?
+ for pos, (left, right) in enumerate(zip(prefix, key)):
+ if left != right:
+ pos -= 1
+ break
+ common = prefix[:pos+1]
+ return common
+
+ @classmethod
+ def common_prefix_for_keys(cls, keys):
+ """Given a list of keys, find their common prefix.
+
+ :param keys: An iterable of strings.
+ :return: The longest common prefix of all keys.
+ """
+ common_prefix = None
+ for key in keys:
+ if common_prefix is None:
+ common_prefix = key
+ continue
+ common_prefix = cls.common_prefix(common_prefix, key)
+ if not common_prefix:
+ # if common_prefix is the empty string, then we know it won't
+ # change further
+ return ''
+ return common_prefix
+
+
+# Singleton indicating we have not computed _search_prefix yet
+_unknown = object()
+
+class LeafNode(Node):
+ """A node containing actual key:value pairs.
+
+ :ivar _items: A dict of key->value items. The key is in tuple form.
+ :ivar _size: The number of bytes that would be used by serializing all of
+ the key/value pairs.
+ """
+
+ __slots__ = ('_common_serialised_prefix',)
+
+ def __init__(self, search_key_func=None):
+ Node.__init__(self)
+ # All of the keys in this leaf node share this common prefix
+ self._common_serialised_prefix = None
+ if search_key_func is None:
+ self._search_key_func = _search_key_plain
+ else:
+ self._search_key_func = search_key_func
+
+ def __repr__(self):
+ items_str = str(sorted(self._items))
+ if len(items_str) > 20:
+ items_str = items_str[:16] + '...]'
+ return \
+ '%s(key:%s len:%s size:%s max:%s prefix:%s keywidth:%s items:%s)' \
+ % (self.__class__.__name__, self._key, self._len, self._raw_size,
+ self._maximum_size, self._search_prefix, self._key_width, items_str)
+
+ def _current_size(self):
+ """Answer the current serialised size of this node.
+
+ This differs from self._raw_size in that it includes the bytes used for
+ the header.
+ """
+ if self._common_serialised_prefix is None:
+ bytes_for_items = 0
+ prefix_len = 0
+ else:
+ # We will store a single string with the common prefix
+ # And then that common prefix will not be stored in any of the
+ # entry lines
+ prefix_len = len(self._common_serialised_prefix)
+ bytes_for_items = (self._raw_size - (prefix_len * self._len))
+ return (9 # 'chkleaf:\n'
+ + len(str(self._maximum_size)) + 1
+ + len(str(self._key_width)) + 1
+ + len(str(self._len)) + 1
+ + prefix_len + 1
+ + bytes_for_items)
+
+ @classmethod
+ def deserialise(klass, bytes, key, search_key_func=None):
+ """Deserialise bytes, with key key, into a LeafNode.
+
+ :param bytes: The bytes of the node.
+ :param key: The key that the serialised node has.
+ """
+ key = static_tuple.expect_static_tuple(key)
+ return _deserialise_leaf_node(bytes, key,
+ search_key_func=search_key_func)
+
+ def iteritems(self, store, key_filter=None):
+ """Iterate over items in the node.
+
+ :param key_filter: A filter to apply to the node. It should be a
+ list/set/dict or similar repeatedly iterable container.
+ """
+ if key_filter is not None:
+ # Adjust the filter - short elements go to a prefix filter. All
+ # other items are looked up directly.
+ # XXX: perhaps defaultdict? Profiling<rinse and repeat>
+ filters = {}
+ for key in key_filter:
+ if len(key) == self._key_width:
+ # This filter is meant to match exactly one key, yield it
+ # if we have it.
+ try:
+ yield key, self._items[key]
+ except KeyError:
+ # This key is not present in this map, continue
+ pass
+ else:
+ # Short items, we need to match based on a prefix
+ length_filter = filters.setdefault(len(key), set())
+ length_filter.add(key)
+ if filters:
+ filters = filters.items()
+ for item in self._items.iteritems():
+ for length, length_filter in filters:
+ if item[0][:length] in length_filter:
+ yield item
+ break
+ else:
+ for item in self._items.iteritems():
+ yield item
+
+ def _key_value_len(self, key, value):
+ # TODO: Should probably be done without actually joining the key, but
+ # then that can be done via the C extension
+ return (len(self._serialise_key(key)) + 1
+ + len(str(value.count('\n'))) + 1
+ + len(value) + 1)
+
+ def _search_key(self, key):
+ return self._search_key_func(key)
+
+ def _map_no_split(self, key, value):
+ """Map a key to a value.
+
+ This assumes either the key does not already exist, or you have already
+ removed its size and length from self.
+
+ :return: True if adding this node should cause us to split.
+ """
+ self._items[key] = value
+ self._raw_size += self._key_value_len(key, value)
+ self._len += 1
+ serialised_key = self._serialise_key(key)
+ if self._common_serialised_prefix is None:
+ self._common_serialised_prefix = serialised_key
+ else:
+ self._common_serialised_prefix = self.common_prefix(
+ self._common_serialised_prefix, serialised_key)
+ search_key = self._search_key(key)
+ if self._search_prefix is _unknown:
+ self._compute_search_prefix()
+ if self._search_prefix is None:
+ self._search_prefix = search_key
+ else:
+ self._search_prefix = self.common_prefix(
+ self._search_prefix, search_key)
+ if (self._len > 1
+ and self._maximum_size
+ and self._current_size() > self._maximum_size):
+ # Check to see if all of the search_keys for this node are
+ # identical. We allow the node to grow under that circumstance
+ # (we could track this as common state, but it is infrequent)
+ if (search_key != self._search_prefix
+ or not self._are_search_keys_identical()):
+ return True
+ return False
+
+ def _split(self, store):
+ """We have overflowed.
+
+ Split this node into multiple LeafNodes, return it up the stack so that
+ the next layer creates a new InternalNode and references the new nodes.
+
+ :return: (common_serialised_prefix, [(node_serialised_prefix, node)])
+ """
+ if self._search_prefix is _unknown:
+ raise AssertionError('Search prefix must be known')
+ common_prefix = self._search_prefix
+ split_at = len(common_prefix) + 1
+ result = {}
+ for key, value in self._items.iteritems():
+ search_key = self._search_key(key)
+ prefix = search_key[:split_at]
+ # TODO: Generally only 1 key can be exactly the right length,
+ # which means we can only have 1 key in the node pointed
+ # at by the 'prefix\0' key. We might want to consider
+ # folding it into the containing InternalNode rather than
+ # having a fixed length-1 node.
+ # Note this is probably not true for hash keys, as they
+ # may get a '\00' node anywhere, but won't have keys of
+ # different lengths.
+ if len(prefix) < split_at:
+ prefix += '\x00'*(split_at - len(prefix))
+ if prefix not in result:
+ node = LeafNode(search_key_func=self._search_key_func)
+ node.set_maximum_size(self._maximum_size)
+ node._key_width = self._key_width
+ result[prefix] = node
+ else:
+ node = result[prefix]
+ sub_prefix, node_details = node.map(store, key, value)
+ if len(node_details) > 1:
+ if prefix != sub_prefix:
+ # This node has been split and is now found via a different
+ # path
+ result.pop(prefix)
+ new_node = InternalNode(sub_prefix,
+ search_key_func=self._search_key_func)
+ new_node.set_maximum_size(self._maximum_size)
+ new_node._key_width = self._key_width
+ for split, node in node_details:
+ new_node.add_node(split, node)
+ result[prefix] = new_node
+ return common_prefix, result.items()
+
+ def map(self, store, key, value):
+ """Map key to value."""
+ if key in self._items:
+ self._raw_size -= self._key_value_len(key, self._items[key])
+ self._len -= 1
+ self._key = None
+ if self._map_no_split(key, value):
+ return self._split(store)
+ else:
+ if self._search_prefix is _unknown:
+ raise AssertionError('%r must be known' % self._search_prefix)
+ return self._search_prefix, [("", self)]
+
+ _serialise_key = '\x00'.join
+
+ def serialise(self, store):
+ """Serialise the LeafNode to store.
+
+ :param store: A VersionedFiles honouring the CHK extensions.
+ :return: An iterable of the keys inserted by this operation.
+ """
+ lines = ["chkleaf:\n"]
+ lines.append("%d\n" % self._maximum_size)
+ lines.append("%d\n" % self._key_width)
+ lines.append("%d\n" % self._len)
+ if self._common_serialised_prefix is None:
+ lines.append('\n')
+ if len(self._items) != 0:
+ raise AssertionError('If _common_serialised_prefix is None'
+ ' we should have no items')
+ else:
+ lines.append('%s\n' % (self._common_serialised_prefix,))
+ prefix_len = len(self._common_serialised_prefix)
+ for key, value in sorted(self._items.items()):
+ # Always add a final newline
+ value_lines = osutils.chunks_to_lines([value + '\n'])
+ serialized = "%s\x00%s\n" % (self._serialise_key(key),
+ len(value_lines))
+ if not serialized.startswith(self._common_serialised_prefix):
+ raise AssertionError('We thought the common prefix was %r'
+ ' but entry %r does not have it in common'
+ % (self._common_serialised_prefix, serialized))
+ lines.append(serialized[prefix_len:])
+ lines.extend(value_lines)
+ sha1, _, _ = store.add_lines((None,), (), lines)
+ self._key = StaticTuple("sha1:" + sha1,).intern()
+ bytes = ''.join(lines)
+ if len(bytes) != self._current_size():
+ raise AssertionError('Invalid _current_size')
+ _get_cache()[self._key] = bytes
+ return [self._key]
+
+ def refs(self):
+ """Return the references to other CHK's held by this node."""
+ return []
+
+ def _compute_search_prefix(self):
+ """Determine the common search prefix for all keys in this node.
+
+ :return: A bytestring of the longest search key prefix that is
+ unique within this node.
+ """
+ search_keys = [self._search_key_func(key) for key in self._items]
+ self._search_prefix = self.common_prefix_for_keys(search_keys)
+ return self._search_prefix
+
+ def _are_search_keys_identical(self):
+ """Check to see if the search keys for all entries are the same.
+
+ When using a hash as the search_key it is possible for non-identical
+ keys to collide. If that happens enough, we may try overflow a
+ LeafNode, but as all are collisions, we must not split.
+ """
+ common_search_key = None
+ for key in self._items:
+ search_key = self._search_key(key)
+ if common_search_key is None:
+ common_search_key = search_key
+ elif search_key != common_search_key:
+ return False
+ return True
+
+ def _compute_serialised_prefix(self):
+ """Determine the common prefix for serialised keys in this node.
+
+ :return: A bytestring of the longest serialised key prefix that is
+ unique within this node.
+ """
+ serialised_keys = [self._serialise_key(key) for key in self._items]
+ self._common_serialised_prefix = self.common_prefix_for_keys(
+ serialised_keys)
+ return self._common_serialised_prefix
+
+ def unmap(self, store, key):
+ """Unmap key from the node."""
+ try:
+ self._raw_size -= self._key_value_len(key, self._items[key])
+ except KeyError:
+ trace.mutter("key %s not found in %r", key, self._items)
+ raise
+ self._len -= 1
+ del self._items[key]
+ self._key = None
+ # Recompute from scratch
+ self._compute_search_prefix()
+ self._compute_serialised_prefix()
+ return self
+
+
+class InternalNode(Node):
+ """A node that contains references to other nodes.
+
+ An InternalNode is responsible for mapping search key prefixes to child
+ nodes.
+
+ :ivar _items: serialised_key => node dictionary. node may be a tuple,
+ LeafNode or InternalNode.
+ """
+
+ __slots__ = ('_node_width',)
+
+ def __init__(self, prefix='', search_key_func=None):
+ Node.__init__(self)
+ # The size of an internalnode with default values and no children.
+ # How many octets key prefixes within this node are.
+ self._node_width = 0
+ self._search_prefix = prefix
+ if search_key_func is None:
+ self._search_key_func = _search_key_plain
+ else:
+ self._search_key_func = search_key_func
+
+ def add_node(self, prefix, node):
+ """Add a child node with prefix prefix, and node node.
+
+ :param prefix: The search key prefix for node.
+ :param node: The node being added.
+ """
+ if self._search_prefix is None:
+ raise AssertionError("_search_prefix should not be None")
+ if not prefix.startswith(self._search_prefix):
+ raise AssertionError("prefixes mismatch: %s must start with %s"
+ % (prefix,self._search_prefix))
+ if len(prefix) != len(self._search_prefix) + 1:
+ raise AssertionError("prefix wrong length: len(%s) is not %d" %
+ (prefix, len(self._search_prefix) + 1))
+ self._len += len(node)
+ if not len(self._items):
+ self._node_width = len(prefix)
+ if self._node_width != len(self._search_prefix) + 1:
+ raise AssertionError("node width mismatch: %d is not %d" %
+ (self._node_width, len(self._search_prefix) + 1))
+ self._items[prefix] = node
+ self._key = None
+
+ def _current_size(self):
+ """Answer the current serialised size of this node."""
+ return (self._raw_size + len(str(self._len)) + len(str(self._key_width)) +
+ len(str(self._maximum_size)))
+
+ @classmethod
+ def deserialise(klass, bytes, key, search_key_func=None):
+ """Deserialise bytes to an InternalNode, with key key.
+
+ :param bytes: The bytes of the node.
+ :param key: The key that the serialised node has.
+ :return: An InternalNode instance.
+ """
+ key = static_tuple.expect_static_tuple(key)
+ return _deserialise_internal_node(bytes, key,
+ search_key_func=search_key_func)
+
+ def iteritems(self, store, key_filter=None):
+ for node, node_filter in self._iter_nodes(store, key_filter=key_filter):
+ for item in node.iteritems(store, key_filter=node_filter):
+ yield item
+
+ def _iter_nodes(self, store, key_filter=None, batch_size=None):
+ """Iterate over node objects which match key_filter.
+
+ :param store: A store to use for accessing content.
+ :param key_filter: A key filter to filter nodes. Only nodes that might
+ contain a key in key_filter will be returned.
+ :param batch_size: If not None, then we will return the nodes that had
+ to be read using get_record_stream in batches, rather than reading
+ them all at once.
+ :return: An iterable of nodes. This function does not have to be fully
+ consumed. (There will be no pending I/O when items are being returned.)
+ """
+ # Map from chk key ('sha1:...',) to (prefix, key_filter)
+ # prefix is the key in self._items to use, key_filter is the key_filter
+ # entries that would match this node
+ keys = {}
+ shortcut = False
+ if key_filter is None:
+ # yielding all nodes, yield whatever we have, and queue up a read
+ # for whatever we are missing
+ shortcut = True
+ for prefix, node in self._items.iteritems():
+ if node.__class__ is StaticTuple:
+ keys[node] = (prefix, None)
+ else:
+ yield node, None
+ elif len(key_filter) == 1:
+ # Technically, this path could also be handled by the first check
+ # in 'self._node_width' in length_filters. However, we can handle
+ # this case without spending any time building up the
+ # prefix_to_keys, etc state.
+
+ # This is a bit ugly, but TIMEIT showed it to be by far the fastest
+ # 0.626us list(key_filter)[0]
+ # is a func() for list(), 2 mallocs, and a getitem
+ # 0.489us [k for k in key_filter][0]
+ # still has the mallocs, avoids the func() call
+ # 0.350us iter(key_filter).next()
+ # has a func() call, and mallocs an iterator
+ # 0.125us for key in key_filter: pass
+ # no func() overhead, might malloc an iterator
+ # 0.105us for key in key_filter: break
+ # no func() overhead, might malloc an iterator, probably
+ # avoids checking an 'else' clause as part of the for
+ for key in key_filter:
+ break
+ search_prefix = self._search_prefix_filter(key)
+ if len(search_prefix) == self._node_width:
+ # This item will match exactly, so just do a dict lookup, and
+ # see what we can return
+ shortcut = True
+ try:
+ node = self._items[search_prefix]
+ except KeyError:
+ # A given key can only match 1 child node, if it isn't
+ # there, then we can just return nothing
+ return
+ if node.__class__ is StaticTuple:
+ keys[node] = (search_prefix, [key])
+ else:
+ # This is loaded, and the only thing that can match,
+ # return
+ yield node, [key]
+ return
+ if not shortcut:
+ # First, convert all keys into a list of search prefixes
+ # Aggregate common prefixes, and track the keys they come from
+ prefix_to_keys = {}
+ length_filters = {}
+ for key in key_filter:
+ search_prefix = self._search_prefix_filter(key)
+ length_filter = length_filters.setdefault(
+ len(search_prefix), set())
+ length_filter.add(search_prefix)
+ prefix_to_keys.setdefault(search_prefix, []).append(key)
+
+ if (self._node_width in length_filters
+ and len(length_filters) == 1):
+ # all of the search prefixes match exactly _node_width. This
+ # means that everything is an exact match, and we can do a
+ # lookup into self._items, rather than iterating over the items
+ # dict.
+ search_prefixes = length_filters[self._node_width]
+ for search_prefix in search_prefixes:
+ try:
+ node = self._items[search_prefix]
+ except KeyError:
+ # We can ignore this one
+ continue
+ node_key_filter = prefix_to_keys[search_prefix]
+ if node.__class__ is StaticTuple:
+ keys[node] = (search_prefix, node_key_filter)
+ else:
+ yield node, node_key_filter
+ else:
+ # The slow way. We walk every item in self._items, and check to
+ # see if there are any matches
+ length_filters = length_filters.items()
+ for prefix, node in self._items.iteritems():
+ node_key_filter = []
+ for length, length_filter in length_filters:
+ sub_prefix = prefix[:length]
+ if sub_prefix in length_filter:
+ node_key_filter.extend(prefix_to_keys[sub_prefix])
+ if node_key_filter: # this key matched something, yield it
+ if node.__class__ is StaticTuple:
+ keys[node] = (prefix, node_key_filter)
+ else:
+ yield node, node_key_filter
+ if keys:
+ # Look in the page cache for some more bytes
+ found_keys = set()
+ for key in keys:
+ try:
+ bytes = _get_cache()[key]
+ except KeyError:
+ continue
+ else:
+ node = _deserialise(bytes, key,
+ search_key_func=self._search_key_func)
+ prefix, node_key_filter = keys[key]
+ self._items[prefix] = node
+ found_keys.add(key)
+ yield node, node_key_filter
+ for key in found_keys:
+ del keys[key]
+ if keys:
+ # demand load some pages.
+ if batch_size is None:
+ # Read all the keys in
+ batch_size = len(keys)
+ key_order = list(keys)
+ for batch_start in range(0, len(key_order), batch_size):
+ batch = key_order[batch_start:batch_start + batch_size]
+ # We have to fully consume the stream so there is no pending
+ # I/O, so we buffer the nodes for now.
+ stream = store.get_record_stream(batch, 'unordered', True)
+ node_and_filters = []
+ for record in stream:
+ bytes = record.get_bytes_as('fulltext')
+ node = _deserialise(bytes, record.key,
+ search_key_func=self._search_key_func)
+ prefix, node_key_filter = keys[record.key]
+ node_and_filters.append((node, node_key_filter))
+ self._items[prefix] = node
+ _get_cache()[record.key] = bytes
+ for info in node_and_filters:
+ yield info
+
+ def map(self, store, key, value):
+ """Map key to value."""
+ if not len(self._items):
+ raise AssertionError("can't map in an empty InternalNode.")
+ search_key = self._search_key(key)
+ if self._node_width != len(self._search_prefix) + 1:
+ raise AssertionError("node width mismatch: %d is not %d" %
+ (self._node_width, len(self._search_prefix) + 1))
+ if not search_key.startswith(self._search_prefix):
+ # This key doesn't fit in this index, so we need to split at the
+ # point where it would fit, insert self into that internal node,
+ # and then map this key into that node.
+ new_prefix = self.common_prefix(self._search_prefix,
+ search_key)
+ new_parent = InternalNode(new_prefix,
+ search_key_func=self._search_key_func)
+ new_parent.set_maximum_size(self._maximum_size)
+ new_parent._key_width = self._key_width
+ new_parent.add_node(self._search_prefix[:len(new_prefix)+1],
+ self)
+ return new_parent.map(store, key, value)
+ children = [node for node, _
+ in self._iter_nodes(store, key_filter=[key])]
+ if children:
+ child = children[0]
+ else:
+ # new child needed:
+ child = self._new_child(search_key, LeafNode)
+ old_len = len(child)
+ if type(child) is LeafNode:
+ old_size = child._current_size()
+ else:
+ old_size = None
+ prefix, node_details = child.map(store, key, value)
+ if len(node_details) == 1:
+ # child may have shrunk, or might be a new node
+ child = node_details[0][1]
+ self._len = self._len - old_len + len(child)
+ self._items[search_key] = child
+ self._key = None
+ new_node = self
+ if type(child) is LeafNode:
+ if old_size is None:
+ # The old node was an InternalNode which means it has now
+ # collapsed, so we need to check if it will chain to a
+ # collapse at this level.
+ trace.mutter("checking remap as InternalNode -> LeafNode")
+ new_node = self._check_remap(store)
+ else:
+ # If the LeafNode has shrunk in size, we may want to run
+ # a remap check. Checking for a remap is expensive though
+ # and the frequency of a successful remap is very low.
+ # Shrinkage by small amounts is common, so we only do the
+ # remap check if the new_size is low or the shrinkage
+ # amount is over a configurable limit.
+ new_size = child._current_size()
+ shrinkage = old_size - new_size
+ if (shrinkage > 0 and new_size < _INTERESTING_NEW_SIZE
+ or shrinkage > _INTERESTING_SHRINKAGE_LIMIT):
+ trace.mutter(
+ "checking remap as size shrunk by %d to be %d",
+ shrinkage, new_size)
+ new_node = self._check_remap(store)
+ if new_node._search_prefix is None:
+ raise AssertionError("_search_prefix should not be None")
+ return new_node._search_prefix, [('', new_node)]
+ # child has overflown - create a new intermediate node.
+ # XXX: This is where we might want to try and expand our depth
+ # to refer to more bytes of every child (which would give us
+ # multiple pointers to child nodes, but less intermediate nodes)
+ child = self._new_child(search_key, InternalNode)
+ child._search_prefix = prefix
+ for split, node in node_details:
+ child.add_node(split, node)
+ self._len = self._len - old_len + len(child)
+ self._key = None
+ return self._search_prefix, [("", self)]
+
+ def _new_child(self, search_key, klass):
+ """Create a new child node of type klass."""
+ child = klass()
+ child.set_maximum_size(self._maximum_size)
+ child._key_width = self._key_width
+ child._search_key_func = self._search_key_func
+ self._items[search_key] = child
+ return child
+
+ def serialise(self, store):
+ """Serialise the node to store.
+
+ :param store: A VersionedFiles honouring the CHK extensions.
+ :return: An iterable of the keys inserted by this operation.
+ """
+ for node in self._items.itervalues():
+ if type(node) is StaticTuple:
+ # Never deserialised.
+ continue
+ if node._key is not None:
+ # Never altered
+ continue
+ for key in node.serialise(store):
+ yield key
+ lines = ["chknode:\n"]
+ lines.append("%d\n" % self._maximum_size)
+ lines.append("%d\n" % self._key_width)
+ lines.append("%d\n" % self._len)
+ if self._search_prefix is None:
+ raise AssertionError("_search_prefix should not be None")
+ lines.append('%s\n' % (self._search_prefix,))
+ prefix_len = len(self._search_prefix)
+ for prefix, node in sorted(self._items.items()):
+ if type(node) is StaticTuple:
+ key = node[0]
+ else:
+ key = node._key[0]
+ serialised = "%s\x00%s\n" % (prefix, key)
+ if not serialised.startswith(self._search_prefix):
+ raise AssertionError("prefixes mismatch: %s must start with %s"
+ % (serialised, self._search_prefix))
+ lines.append(serialised[prefix_len:])
+ sha1, _, _ = store.add_lines((None,), (), lines)
+ self._key = StaticTuple("sha1:" + sha1,).intern()
+ _get_cache()[self._key] = ''.join(lines)
+ yield self._key
+
+ def _search_key(self, key):
+ """Return the serialised key for key in this node."""
+ # search keys are fixed width. All will be self._node_width wide, so we
+ # pad as necessary.
+ return (self._search_key_func(key) + '\x00'*self._node_width)[:self._node_width]
+
+ def _search_prefix_filter(self, key):
+ """Serialise key for use as a prefix filter in iteritems."""
+ return self._search_key_func(key)[:self._node_width]
+
+ def _split(self, offset):
+ """Split this node into smaller nodes starting at offset.
+
+ :param offset: The offset to start the new child nodes at.
+ :return: An iterable of (prefix, node) tuples. prefix is a byte
+ prefix for reaching node.
+ """
+ if offset >= self._node_width:
+ for node in self._items.values():
+ for result in node._split(offset):
+ yield result
+ return
+ for key, node in self._items.items():
+ pass
+
+ def refs(self):
+ """Return the references to other CHK's held by this node."""
+ if self._key is None:
+ raise AssertionError("unserialised nodes have no refs.")
+ refs = []
+ for value in self._items.itervalues():
+ if type(value) is StaticTuple:
+ refs.append(value)
+ else:
+ refs.append(value.key())
+ return refs
+
+ def _compute_search_prefix(self, extra_key=None):
+ """Return the unique key prefix for this node.
+
+ :return: A bytestring of the longest search key prefix that is
+ unique within this node.
+ """
+ self._search_prefix = self.common_prefix_for_keys(self._items)
+ return self._search_prefix
+
+ def unmap(self, store, key, check_remap=True):
+ """Remove key from this node and its children."""
+ if not len(self._items):
+ raise AssertionError("can't unmap in an empty InternalNode.")
+ children = [node for node, _
+ in self._iter_nodes(store, key_filter=[key])]
+ if children:
+ child = children[0]
+ else:
+ raise KeyError(key)
+ self._len -= 1
+ unmapped = child.unmap(store, key)
+ self._key = None
+ search_key = self._search_key(key)
+ if len(unmapped) == 0:
+ # All child nodes are gone, remove the child:
+ del self._items[search_key]
+ unmapped = None
+ else:
+ # Stash the returned node
+ self._items[search_key] = unmapped
+ if len(self._items) == 1:
+ # this node is no longer needed:
+ return self._items.values()[0]
+ if type(unmapped) is InternalNode:
+ return self
+ if check_remap:
+ return self._check_remap(store)
+ else:
+ return self
+
+ def _check_remap(self, store):
+ """Check if all keys contained by children fit in a single LeafNode.
+
+ :param store: A store to use for reading more nodes
+ :return: Either self, or a new LeafNode which should replace self.
+ """
+ # Logic for how we determine when we need to rebuild
+ # 1) Implicitly unmap() is removing a key which means that the child
+ # nodes are going to be shrinking by some extent.
+ # 2) If all children are LeafNodes, it is possible that they could be
+ # combined into a single LeafNode, which can then completely replace
+ # this internal node with a single LeafNode
+ # 3) If *one* child is an InternalNode, we assume it has already done
+ # all the work to determine that its children cannot collapse, and
+ # we can then assume that those nodes *plus* the current nodes don't
+ # have a chance of collapsing either.
+ # So a very cheap check is to just say if 'unmapped' is an
+ # InternalNode, we don't have to check further.
+
+ # TODO: Another alternative is to check the total size of all known
+ # LeafNodes. If there is some formula we can use to determine the
+ # final size without actually having to read in any more
+ # children, it would be nice to have. However, we have to be
+ # careful with stuff like nodes that pull out the common prefix
+ # of each key, as adding a new key can change the common prefix
+ # and cause size changes greater than the length of one key.
+ # So for now, we just add everything to a new Leaf until it
+ # splits, as we know that will give the right answer
+ new_leaf = LeafNode(search_key_func=self._search_key_func)
+ new_leaf.set_maximum_size(self._maximum_size)
+ new_leaf._key_width = self._key_width
+ # A batch_size of 16 was chosen because:
+ # a) In testing, a 4k page held 14 times. So if we have more than 16
+ # leaf nodes we are unlikely to hold them in a single new leaf
+ # node. This still allows for 1 round trip
+ # b) With 16-way fan out, we can still do a single round trip
+ # c) With 255-way fan out, we don't want to read all 255 and destroy
+ # the page cache, just to determine that we really don't need it.
+ for node, _ in self._iter_nodes(store, batch_size=16):
+ if type(node) is InternalNode:
+ # Without looking at any leaf nodes, we are sure
+ return self
+ for key, value in node._items.iteritems():
+ if new_leaf._map_no_split(key, value):
+ return self
+ trace.mutter("remap generated a new LeafNode")
+ return new_leaf
+
+
+def _deserialise(bytes, key, search_key_func):
+ """Helper for repositorydetails - convert bytes to a node."""
+ if bytes.startswith("chkleaf:\n"):
+ node = LeafNode.deserialise(bytes, key, search_key_func=search_key_func)
+ elif bytes.startswith("chknode:\n"):
+ node = InternalNode.deserialise(bytes, key,
+ search_key_func=search_key_func)
+ else:
+ raise AssertionError("Unknown node type.")
+ return node
+
+
+class CHKMapDifference(object):
+ """Iterate the stored pages and key,value pairs for (new - old).
+
+ This class provides a generator over the stored CHK pages and the
+ (key, value) pairs that are in any of the new maps and not in any of the
+ old maps.
+
+ Note that it may yield chk pages that are common (especially root nodes),
+ but it won't yield (key,value) pairs that are common.
+ """
+
+ def __init__(self, store, new_root_keys, old_root_keys,
+ search_key_func, pb=None):
+ # TODO: Should we add a StaticTuple barrier here? It would be nice to
+ # force callers to use StaticTuple, because there will often be
+ # lots of keys passed in here. And even if we cast it locally,
+ # that just meanst that we will have *both* a StaticTuple and a
+ # tuple() in memory, referring to the same object. (so a net
+ # increase in memory, not a decrease.)
+ self._store = store
+ self._new_root_keys = new_root_keys
+ self._old_root_keys = old_root_keys
+ self._pb = pb
+ # All uninteresting chks that we have seen. By the time they are added
+ # here, they should be either fully ignored, or queued up for
+ # processing
+ # TODO: This might grow to a large size if there are lots of merge
+ # parents, etc. However, it probably doesn't scale to O(history)
+ # like _processed_new_refs does.
+ self._all_old_chks = set(self._old_root_keys)
+ # All items that we have seen from the old_root_keys
+ self._all_old_items = set()
+ # These are interesting items which were either read, or already in the
+ # interesting queue (so we don't need to walk them again)
+ # TODO: processed_new_refs becomes O(all_chks), consider switching to
+ # SimpleSet here.
+ self._processed_new_refs = set()
+ self._search_key_func = search_key_func
+
+ # The uninteresting and interesting nodes to be searched
+ self._old_queue = []
+ self._new_queue = []
+ # Holds the (key, value) items found when processing the root nodes,
+ # waiting for the uninteresting nodes to be walked
+ self._new_item_queue = []
+ self._state = None
+
+ def _read_nodes_from_store(self, keys):
+ # We chose not to use _get_cache(), because we think in
+ # terms of records to be yielded. Also, we expect to touch each page
+ # only 1 time during this code. (We may want to evaluate saving the
+ # raw bytes into the page cache, which would allow a working tree
+ # update after the fetch to not have to read the bytes again.)
+ as_st = StaticTuple.from_sequence
+ stream = self._store.get_record_stream(keys, 'unordered', True)
+ for record in stream:
+ if self._pb is not None:
+ self._pb.tick()
+ if record.storage_kind == 'absent':
+ raise errors.NoSuchRevision(self._store, record.key)
+ bytes = record.get_bytes_as('fulltext')
+ node = _deserialise(bytes, record.key,
+ search_key_func=self._search_key_func)
+ if type(node) is InternalNode:
+ # Note we don't have to do node.refs() because we know that
+ # there are no children that have been pushed into this node
+ # Note: Using as_st() here seemed to save 1.2MB, which would
+ # indicate that we keep 100k prefix_refs around while
+ # processing. They *should* be shorter lived than that...
+ # It does cost us ~10s of processing time
+ #prefix_refs = [as_st(item) for item in node._items.iteritems()]
+ prefix_refs = node._items.items()
+ items = []
+ else:
+ prefix_refs = []
+ # Note: We don't use a StaticTuple here. Profiling showed a
+ # minor memory improvement (0.8MB out of 335MB peak 0.2%)
+ # But a significant slowdown (15s / 145s, or 10%)
+ items = node._items.items()
+ yield record, node, prefix_refs, items
+
+ def _read_old_roots(self):
+ old_chks_to_enqueue = []
+ all_old_chks = self._all_old_chks
+ for record, node, prefix_refs, items in \
+ self._read_nodes_from_store(self._old_root_keys):
+ # Uninteresting node
+ prefix_refs = [p_r for p_r in prefix_refs
+ if p_r[1] not in all_old_chks]
+ new_refs = [p_r[1] for p_r in prefix_refs]
+ all_old_chks.update(new_refs)
+ # TODO: This might be a good time to turn items into StaticTuple
+ # instances and possibly intern them. However, this does not
+ # impact 'initial branch' performance, so I'm not worrying
+ # about this yet
+ self._all_old_items.update(items)
+ # Queue up the uninteresting references
+ # Don't actually put them in the 'to-read' queue until we have
+ # finished checking the interesting references
+ old_chks_to_enqueue.extend(prefix_refs)
+ return old_chks_to_enqueue
+
+ def _enqueue_old(self, new_prefixes, old_chks_to_enqueue):
+ # At this point, we have read all the uninteresting and interesting
+ # items, so we can queue up the uninteresting stuff, knowing that we've
+ # handled the interesting ones
+ for prefix, ref in old_chks_to_enqueue:
+ not_interesting = True
+ for i in xrange(len(prefix), 0, -1):
+ if prefix[:i] in new_prefixes:
+ not_interesting = False
+ break
+ if not_interesting:
+ # This prefix is not part of the remaining 'interesting set'
+ continue
+ self._old_queue.append(ref)
+
+ def _read_all_roots(self):
+ """Read the root pages.
+
+ This is structured as a generator, so that the root records can be
+ yielded up to whoever needs them without any buffering.
+ """
+ # This is the bootstrap phase
+ if not self._old_root_keys:
+ # With no old_root_keys we can just shortcut and be ready
+ # for _flush_new_queue
+ self._new_queue = list(self._new_root_keys)
+ return
+ old_chks_to_enqueue = self._read_old_roots()
+ # filter out any root keys that are already known to be uninteresting
+ new_keys = set(self._new_root_keys).difference(self._all_old_chks)
+ # These are prefixes that are present in new_keys that we are
+ # thinking to yield
+ new_prefixes = set()
+ # We are about to yield all of these, so we don't want them getting
+ # added a second time
+ processed_new_refs = self._processed_new_refs
+ processed_new_refs.update(new_keys)
+ for record, node, prefix_refs, items in \
+ self._read_nodes_from_store(new_keys):
+ # At this level, we now know all the uninteresting references
+ # So we filter and queue up whatever is remaining
+ prefix_refs = [p_r for p_r in prefix_refs
+ if p_r[1] not in self._all_old_chks
+ and p_r[1] not in processed_new_refs]
+ refs = [p_r[1] for p_r in prefix_refs]
+ new_prefixes.update([p_r[0] for p_r in prefix_refs])
+ self._new_queue.extend(refs)
+ # TODO: We can potentially get multiple items here, however the
+ # current design allows for this, as callers will do the work
+ # to make the results unique. We might profile whether we
+ # gain anything by ensuring unique return values for items
+ # TODO: This might be a good time to cast to StaticTuple, as
+ # self._new_item_queue will hold the contents of multiple
+ # records for an extended lifetime
+ new_items = [item for item in items
+ if item not in self._all_old_items]
+ self._new_item_queue.extend(new_items)
+ new_prefixes.update([self._search_key_func(item[0])
+ for item in new_items])
+ processed_new_refs.update(refs)
+ yield record
+ # For new_prefixes we have the full length prefixes queued up.
+ # However, we also need possible prefixes. (If we have a known ref to
+ # 'ab', then we also need to include 'a'.) So expand the
+ # new_prefixes to include all shorter prefixes
+ for prefix in list(new_prefixes):
+ new_prefixes.update([prefix[:i] for i in xrange(1, len(prefix))])
+ self._enqueue_old(new_prefixes, old_chks_to_enqueue)
+
+ def _flush_new_queue(self):
+ # No need to maintain the heap invariant anymore, just pull things out
+ # and process them
+ refs = set(self._new_queue)
+ self._new_queue = []
+ # First pass, flush all interesting items and convert to using direct refs
+ all_old_chks = self._all_old_chks
+ processed_new_refs = self._processed_new_refs
+ all_old_items = self._all_old_items
+ new_items = [item for item in self._new_item_queue
+ if item not in all_old_items]
+ self._new_item_queue = []
+ if new_items:
+ yield None, new_items
+ refs = refs.difference(all_old_chks)
+ processed_new_refs.update(refs)
+ while refs:
+ # TODO: Using a SimpleSet for self._processed_new_refs and
+ # saved as much as 10MB of peak memory. However, it requires
+ # implementing a non-pyrex version.
+ next_refs = set()
+ next_refs_update = next_refs.update
+ # Inlining _read_nodes_from_store improves 'bzr branch bzr.dev'
+ # from 1m54s to 1m51s. Consider it.
+ for record, _, p_refs, items in self._read_nodes_from_store(refs):
+ if all_old_items:
+ # using the 'if' check saves about 145s => 141s, when
+ # streaming initial branch of Launchpad data.
+ items = [item for item in items
+ if item not in all_old_items]
+ yield record, items
+ next_refs_update([p_r[1] for p_r in p_refs])
+ del p_refs
+ # set1.difference(set/dict) walks all of set1, and checks if it
+ # exists in 'other'.
+ # set1.difference(iterable) walks all of iterable, and does a
+ # 'difference_update' on a clone of set1. Pick wisely based on the
+ # expected sizes of objects.
+ # in our case it is expected that 'new_refs' will always be quite
+ # small.
+ next_refs = next_refs.difference(all_old_chks)
+ next_refs = next_refs.difference(processed_new_refs)
+ processed_new_refs.update(next_refs)
+ refs = next_refs
+
+ def _process_next_old(self):
+ # Since we don't filter uninteresting any further than during
+ # _read_all_roots, process the whole queue in a single pass.
+ refs = self._old_queue
+ self._old_queue = []
+ all_old_chks = self._all_old_chks
+ for record, _, prefix_refs, items in self._read_nodes_from_store(refs):
+ # TODO: Use StaticTuple here?
+ self._all_old_items.update(items)
+ refs = [r for _,r in prefix_refs if r not in all_old_chks]
+ self._old_queue.extend(refs)
+ all_old_chks.update(refs)
+
+ def _process_queues(self):
+ while self._old_queue:
+ self._process_next_old()
+ return self._flush_new_queue()
+
+ def process(self):
+ for record in self._read_all_roots():
+ yield record, []
+ for record, items in self._process_queues():
+ yield record, items
+
+
+def iter_interesting_nodes(store, interesting_root_keys,
+ uninteresting_root_keys, pb=None):
+ """Given root keys, find interesting nodes.
+
+ Evaluate nodes referenced by interesting_root_keys. Ones that are also
+ referenced from uninteresting_root_keys are not considered interesting.
+
+ :param interesting_root_keys: keys which should be part of the
+ "interesting" nodes (which will be yielded)
+ :param uninteresting_root_keys: keys which should be filtered out of the
+ result set.
+ :return: Yield
+ (interesting record, {interesting key:values})
+ """
+ iterator = CHKMapDifference(store, interesting_root_keys,
+ uninteresting_root_keys,
+ search_key_func=store._search_key_func,
+ pb=pb)
+ return iterator.process()
+
+
+try:
+ from bzrlib._chk_map_pyx import (
+ _bytes_to_text_key,
+ _search_key_16,
+ _search_key_255,
+ _deserialise_leaf_node,
+ _deserialise_internal_node,
+ )
+except ImportError, e:
+ osutils.failed_to_load_extension(e)
+ from bzrlib._chk_map_py import (
+ _bytes_to_text_key,
+ _search_key_16,
+ _search_key_255,
+ _deserialise_leaf_node,
+ _deserialise_internal_node,
+ )
+search_key_registry.register('hash-16-way', _search_key_16)
+search_key_registry.register('hash-255-way', _search_key_255)
+
+
+def _check_key(key):
+ """Helper function to assert that a key is properly formatted.
+
+ This generally shouldn't be used in production code, but it can be helpful
+ to debug problems.
+ """
+ if type(key) is not StaticTuple:
+ raise TypeError('key %r is not StaticTuple but %s' % (key, type(key)))
+ if len(key) != 1:
+ raise ValueError('key %r should have length 1, not %d' % (key, len(key),))
+ if type(key[0]) is not str:
+ raise TypeError('key %r should hold a str, not %r'
+ % (key, type(key[0])))
+ if not key[0].startswith('sha1:'):
+ raise ValueError('key %r should point to a sha1:' % (key,))
+
+
diff --git a/bzrlib/chk_serializer.py b/bzrlib/chk_serializer.py
new file mode 100644
index 0000000..c00e22d
--- /dev/null
+++ b/bzrlib/chk_serializer.py
@@ -0,0 +1,254 @@
+# Copyright (C) 2008, 2009, 2010 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""Serializer object for CHK based inventory storage."""
+
+from __future__ import absolute_import
+
+from cStringIO import StringIO
+
+from bzrlib import lazy_import
+lazy_import.lazy_import(globals(),
+"""
+from bzrlib import (
+ xml_serializer,
+ )
+""")
+from bzrlib import (
+ bencode,
+ cache_utf8,
+ errors,
+ revision as _mod_revision,
+ serializer,
+ )
+
+
+def _validate_properties(props, _decode=cache_utf8._utf8_decode):
+ # TODO: we really want an 'isascii' check for key
+ # Cast the utf8 properties into Unicode 'in place'
+ for key, value in props.iteritems():
+ props[key] = _decode(value)[0]
+ return props
+
+
+def _is_format_10(value):
+ if value != 10:
+ raise ValueError('Format number was not recognized, expected 10 got %d'
+ % (value,))
+ return 10
+
+
+class BEncodeRevisionSerializer1(object):
+ """Simple revision serializer based around bencode.
+ """
+
+ squashes_xml_invalid_characters = False
+
+ # Maps {key:(Revision attribute, bencode_type, validator)}
+ # This tells us what kind we expect bdecode to create, what variable on
+ # Revision we should be using, and a function to call to validate/transform
+ # the type.
+ # TODO: add a 'validate_utf8' for things like revision_id and file_id
+ # and a validator for parent-ids
+ _schema = {'format': (None, int, _is_format_10),
+ 'committer': ('committer', str, cache_utf8.decode),
+ 'timezone': ('timezone', int, None),
+ 'timestamp': ('timestamp', str, float),
+ 'revision-id': ('revision_id', str, None),
+ 'parent-ids': ('parent_ids', list, None),
+ 'inventory-sha1': ('inventory_sha1', str, None),
+ 'message': ('message', str, cache_utf8.decode),
+ 'properties': ('properties', dict, _validate_properties),
+ }
+
+ def write_revision_to_string(self, rev):
+ encode_utf8 = cache_utf8._utf8_encode
+ # Use a list of tuples rather than a dict
+ # This lets us control the ordering, so that we are able to create
+ # smaller deltas
+ ret = [
+ ("format", 10),
+ ("committer", encode_utf8(rev.committer)[0]),
+ ]
+ if rev.timezone is not None:
+ ret.append(("timezone", rev.timezone))
+ # For bzr revisions, the most common property is just 'branch-nick'
+ # which changes infrequently.
+ revprops = {}
+ for key, value in rev.properties.iteritems():
+ revprops[key] = encode_utf8(value)[0]
+ ret.append(('properties', revprops))
+ ret.extend([
+ ("timestamp", "%.3f" % rev.timestamp),
+ ("revision-id", rev.revision_id),
+ ("parent-ids", rev.parent_ids),
+ ("inventory-sha1", rev.inventory_sha1),
+ ("message", encode_utf8(rev.message)[0]),
+ ])
+ return bencode.bencode(ret)
+
+ def write_revision(self, rev, f):
+ f.write(self.write_revision_to_string(rev))
+
+ def read_revision_from_string(self, text):
+ # TODO: consider writing a Revision decoder, rather than using the
+ # generic bencode decoder
+ # However, to decode all 25k revisions of bzr takes approx 1.3s
+ # If we remove all extra validation that goes down to about 1.2s.
+ # Of that time, probably 0.6s is spend in bencode.bdecode().
+ # Regardless 'time bzr log' of everything is 7+s, so 1.3s to
+ # extract revision texts isn't a majority of time.
+ ret = bencode.bdecode(text)
+ if not isinstance(ret, list):
+ raise ValueError("invalid revision text")
+ schema = self._schema
+ # timezone is allowed to be missing, but should be set
+ bits = {'timezone': None}
+ for key, value in ret:
+ # Will raise KeyError if not a valid part of the schema, or an
+ # entry is given 2 times.
+ var_name, expected_type, validator = schema[key]
+ if value.__class__ is not expected_type:
+ raise ValueError('key %s did not conform to the expected type'
+ ' %s, but was %s'
+ % (key, expected_type, type(value)))
+ if validator is not None:
+ value = validator(value)
+ bits[var_name] = value
+ if len(bits) != len(schema):
+ missing = [key for key, (var_name, _, _) in schema.iteritems()
+ if var_name not in bits]
+ raise ValueError('Revision text was missing expected keys %s.'
+ ' text %r' % (missing, text))
+ del bits[None] # Get rid of 'format' since it doesn't get mapped
+ rev = _mod_revision.Revision(**bits)
+ return rev
+
+ def read_revision(self, f):
+ return self.read_revision_from_string(f.read())
+
+
+class CHKSerializer(serializer.Serializer):
+ """A CHKInventory based serializer with 'plain' behaviour."""
+
+ format_num = '9'
+ revision_format_num = None
+ support_altered_by_hack = False
+ supported_kinds = set(['file', 'directory', 'symlink', 'tree-reference'])
+
+ def __init__(self, node_size, search_key_name):
+ self.maximum_size = node_size
+ self.search_key_name = search_key_name
+
+ def _unpack_inventory(self, elt, revision_id=None, entry_cache=None,
+ return_from_cache=False):
+ """Construct from XML Element"""
+ inv = xml_serializer.unpack_inventory_flat(elt, self.format_num,
+ xml_serializer.unpack_inventory_entry, entry_cache,
+ return_from_cache)
+ return inv
+
+ def read_inventory_from_string(self, xml_string, revision_id=None,
+ entry_cache=None, return_from_cache=False):
+ """Read xml_string into an inventory object.
+
+ :param xml_string: The xml to read.
+ :param revision_id: If not-None, the expected revision id of the
+ inventory.
+ :param entry_cache: An optional cache of InventoryEntry objects. If
+ supplied we will look up entries via (file_id, revision_id) which
+ should map to a valid InventoryEntry (File/Directory/etc) object.
+ :param return_from_cache: Return entries directly from the cache,
+ rather than copying them first. This is only safe if the caller
+ promises not to mutate the returned inventory entries, but it can
+ make some operations significantly faster.
+ """
+ try:
+ return self._unpack_inventory(
+ xml_serializer.fromstring(xml_string), revision_id,
+ entry_cache=entry_cache,
+ return_from_cache=return_from_cache)
+ except xml_serializer.ParseError, e:
+ raise errors.UnexpectedInventoryFormat(e)
+
+ def read_inventory(self, f, revision_id=None):
+ """Read an inventory from a file-like object."""
+ try:
+ try:
+ return self._unpack_inventory(self._read_element(f),
+ revision_id=None)
+ finally:
+ f.close()
+ except xml_serializer.ParseError, e:
+ raise errors.UnexpectedInventoryFormat(e)
+
+ def write_inventory_to_lines(self, inv):
+ """Return a list of lines with the encoded inventory."""
+ return self.write_inventory(inv, None)
+
+ def write_inventory_to_string(self, inv, working=False):
+ """Just call write_inventory with a StringIO and return the value.
+
+ :param working: If True skip history data - text_sha1, text_size,
+ reference_revision, symlink_target.
+ """
+ sio = StringIO()
+ self.write_inventory(inv, sio, working)
+ return sio.getvalue()
+
+ def write_inventory(self, inv, f, working=False):
+ """Write inventory to a file.
+
+ :param inv: the inventory to write.
+ :param f: the file to write. (May be None if the lines are the desired
+ output).
+ :param working: If True skip history data - text_sha1, text_size,
+ reference_revision, symlink_target.
+ :return: The inventory as a list of lines.
+ """
+ output = []
+ append = output.append
+ if inv.revision_id is not None:
+ revid1 = ' revision_id="'
+ revid2 = xml_serializer.encode_and_escape(inv.revision_id)
+ else:
+ revid1 = ""
+ revid2 = ""
+ append('<inventory format="%s"%s%s>\n' % (
+ self.format_num, revid1, revid2))
+ append('<directory file_id="%s name="%s revision="%s />\n' % (
+ xml_serializer.encode_and_escape(inv.root.file_id),
+ xml_serializer.encode_and_escape(inv.root.name),
+ xml_serializer.encode_and_escape(inv.root.revision)))
+ xml_serializer.serialize_inventory_flat(inv,
+ append,
+ root_id=None, supported_kinds=self.supported_kinds,
+ working=working)
+ if f is not None:
+ f.writelines(output)
+ return output
+
+
+chk_serializer_255_bigpage = CHKSerializer(65536, 'hash-255-way')
+
+
+class CHKBEncodeSerializer(BEncodeRevisionSerializer1, CHKSerializer):
+ """A CHKInventory and BEncode based serializer with 'plain' behaviour."""
+
+ format_num = '10'
+
+
+chk_bencode_serializer = CHKBEncodeSerializer(65536, 'hash-255-way')
diff --git a/bzrlib/chunk_writer.py b/bzrlib/chunk_writer.py
new file mode 100644
index 0000000..a9f3b85
--- /dev/null
+++ b/bzrlib/chunk_writer.py
@@ -0,0 +1,278 @@
+# Copyright (C) 2008 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+#
+
+"""ChunkWriter: write compressed data out with a fixed upper bound."""
+
+from __future__ import absolute_import
+
+import zlib
+from zlib import Z_FINISH, Z_SYNC_FLUSH
+
+
+class ChunkWriter(object):
+ """ChunkWriter allows writing of compressed data with a fixed size.
+
+ If less data is supplied than fills a chunk, the chunk is padded with
+ NULL bytes. If more data is supplied, then the writer packs as much
+ in as it can, but never splits any item it was given.
+
+ The algorithm for packing is open to improvement! Current it is:
+ - write the bytes given
+ - if the total seen bytes so far exceeds the chunk size, flush.
+
+ :cvar _max_repack: To fit the maximum number of entries into a node, we
+ will sometimes start over and compress the whole list to get tighter
+ packing. We get diminishing returns after a while, so this limits the
+ number of times we will try.
+ The default is to try to avoid recompressing entirely, but setting this
+ to something like 20 will give maximum compression.
+
+ :cvar _max_zsync: Another tunable nob. If _max_repack is set to 0, then you
+ can limit the number of times we will try to pack more data into a
+ node. This allows us to do a single compression pass, rather than
+ trying until we overflow, and then recompressing again.
+ """
+ # In testing, some values for bzr.dev::
+ # repack time MB max full
+ # 1 7.5 4.6 1140 0
+ # 2 8.4 4.2 1036 1
+ # 3 9.8 4.1 1012 278
+ # 4 10.8 4.1 728 945
+ # 20 11.1 4.1 0 1012
+ # repack = 0
+ # zsync time MB repack stop_for_z
+ # 0 5.0 24.7 0 6270
+ # 1 4.3 13.2 0 3342
+ # 2 4.9 9.6 0 2414
+ # 5 4.8 6.2 0 1549
+ # 6 4.8 5.8 1 1435
+ # 7 4.8 5.5 19 1337
+ # 8 4.4 5.3 81 1220
+ # 10 5.3 5.0 260 967
+ # 11 5.3 4.9 366 839
+ # 12 5.1 4.8 454 731
+ # 15 5.8 4.7 704 450
+ # 20 5.8 4.6 1133 7
+
+ # In testing, some values for mysql-unpacked::
+ # next_bytes estim
+ # repack time MB full stop_for_repack
+ # 1 15.4 0 3913
+ # 2 35.4 13.7 0 346
+ # 20 46.7 13.4 3380 0
+ # repack=0
+ # zsync stop_for_z
+ # 0 29.5 116.5 0 29782
+ # 1 27.8 60.2 0 15356
+ # 2 27.8 42.4 0 10822
+ # 5 26.8 25.5 0 6491
+ # 6 27.3 23.2 13 5896
+ # 7 27.5 21.6 29 5451
+ # 8 27.1 20.3 52 5108
+ # 10 29.4 18.6 195 4526
+ # 11 29.2 18.0 421 4143
+ # 12 28.0 17.5 702 3738
+ # 15 28.9 16.5 1223 2969
+ # 20 29.6 15.7 2182 1810
+ # 30 31.4 15.4 3891 23
+
+ # Tuple of (num_repack_attempts, num_zsync_attempts)
+ # num_zsync_attempts only has meaning if num_repack_attempts is 0.
+ _repack_opts_for_speed = (0, 8)
+ _repack_opts_for_size = (20, 0)
+
+ def __init__(self, chunk_size, reserved=0, optimize_for_size=False):
+ """Create a ChunkWriter to write chunk_size chunks.
+
+ :param chunk_size: The total byte count to emit at the end of the
+ chunk.
+ :param reserved: How many bytes to allow for reserved data. reserved
+ data space can only be written to via the write(..., reserved=True).
+ """
+ self.chunk_size = chunk_size
+ self.compressor = zlib.compressobj()
+ self.bytes_in = []
+ self.bytes_list = []
+ self.bytes_out_len = 0
+ # bytes that have been seen, but not included in a flush to out yet
+ self.unflushed_in_bytes = 0
+ self.num_repack = 0
+ self.num_zsync = 0
+ self.unused_bytes = None
+ self.reserved_size = reserved
+ # Default is to make building fast rather than compact
+ self.set_optimize(for_size=optimize_for_size)
+
+ def finish(self):
+ """Finish the chunk.
+
+ This returns the final compressed chunk, and either None, or the
+ bytes that did not fit in the chunk.
+
+ :return: (compressed_bytes, unused_bytes, num_nulls_needed)
+
+ * compressed_bytes: a list of bytes that were output from the
+ compressor. If the compressed length was not exactly chunk_size,
+ the final string will be a string of all null bytes to pad this
+ to chunk_size
+ * unused_bytes: None, or the last bytes that were added, which we
+ could not fit.
+ * num_nulls_needed: How many nulls are padded at the end
+ """
+ self.bytes_in = None # Free the data cached so far, we don't need it
+ out = self.compressor.flush(Z_FINISH)
+ self.bytes_list.append(out)
+ self.bytes_out_len += len(out)
+
+ if self.bytes_out_len > self.chunk_size:
+ raise AssertionError('Somehow we ended up with too much'
+ ' compressed data, %d > %d'
+ % (self.bytes_out_len, self.chunk_size))
+ nulls_needed = self.chunk_size - self.bytes_out_len
+ if nulls_needed:
+ self.bytes_list.append("\x00" * nulls_needed)
+ return self.bytes_list, self.unused_bytes, nulls_needed
+
+ def set_optimize(self, for_size=True):
+ """Change how we optimize our writes.
+
+ :param for_size: If True, optimize for minimum space usage, otherwise
+ optimize for fastest writing speed.
+ :return: None
+ """
+ if for_size:
+ opts = ChunkWriter._repack_opts_for_size
+ else:
+ opts = ChunkWriter._repack_opts_for_speed
+ self._max_repack, self._max_zsync = opts
+
+ def _recompress_all_bytes_in(self, extra_bytes=None):
+ """Recompress the current bytes_in, and optionally more.
+
+ :param extra_bytes: Optional, if supplied we will add it with
+ Z_SYNC_FLUSH
+ :return: (bytes_out, bytes_out_len, alt_compressed)
+
+ * bytes_out: is the compressed bytes returned from the compressor
+ * bytes_out_len: the length of the compressed output
+ * compressor: An object with everything packed in so far, and
+ Z_SYNC_FLUSH called.
+ """
+ compressor = zlib.compressobj()
+ bytes_out = []
+ append = bytes_out.append
+ compress = compressor.compress
+ for accepted_bytes in self.bytes_in:
+ out = compress(accepted_bytes)
+ if out:
+ append(out)
+ if extra_bytes:
+ out = compress(extra_bytes)
+ out += compressor.flush(Z_SYNC_FLUSH)
+ append(out)
+ bytes_out_len = sum(map(len, bytes_out))
+ return bytes_out, bytes_out_len, compressor
+
+ def write(self, bytes, reserved=False):
+ """Write some bytes to the chunk.
+
+ If the bytes fit, False is returned. Otherwise True is returned
+ and the bytes have not been added to the chunk.
+
+ :param bytes: The bytes to include
+ :param reserved: If True, we can use the space reserved in the
+ constructor.
+ """
+ if self.num_repack > self._max_repack and not reserved:
+ self.unused_bytes = bytes
+ return True
+ if reserved:
+ capacity = self.chunk_size
+ else:
+ capacity = self.chunk_size - self.reserved_size
+ comp = self.compressor
+
+ # Check to see if the currently unflushed bytes would fit with a bit of
+ # room to spare, assuming no compression.
+ next_unflushed = self.unflushed_in_bytes + len(bytes)
+ remaining_capacity = capacity - self.bytes_out_len - 10
+ if (next_unflushed < remaining_capacity):
+ # looks like it will fit
+ out = comp.compress(bytes)
+ if out:
+ self.bytes_list.append(out)
+ self.bytes_out_len += len(out)
+ self.bytes_in.append(bytes)
+ self.unflushed_in_bytes += len(bytes)
+ else:
+ # This may or may not fit, try to add it with Z_SYNC_FLUSH
+ # Note: It is tempting to do this as a look-ahead pass, and to
+ # 'copy()' the compressor before flushing. However, it seems
+ # that Which means that it is the same thing as increasing
+ # repack, similar cost, same benefit. And this way we still
+ # have the 'repack' knob that can be adjusted, and not depend
+ # on a platform-specific 'copy()' function.
+ self.num_zsync += 1
+ if self._max_repack == 0 and self.num_zsync > self._max_zsync:
+ self.num_repack += 1
+ self.unused_bytes = bytes
+ return True
+ out = comp.compress(bytes)
+ out += comp.flush(Z_SYNC_FLUSH)
+ self.unflushed_in_bytes = 0
+ if out:
+ self.bytes_list.append(out)
+ self.bytes_out_len += len(out)
+
+ # We are a bit extra conservative, because it seems that you *can*
+ # get better compression with Z_SYNC_FLUSH than a full compress. It
+ # is probably very rare, but we were able to trigger it.
+ if self.num_repack == 0:
+ safety_margin = 100
+ else:
+ safety_margin = 10
+ if self.bytes_out_len + safety_margin <= capacity:
+ # It fit, so mark it added
+ self.bytes_in.append(bytes)
+ else:
+ # We are over budget, try to squeeze this in without any
+ # Z_SYNC_FLUSH calls
+ self.num_repack += 1
+ (bytes_out, this_len,
+ compressor) = self._recompress_all_bytes_in(bytes)
+ if self.num_repack >= self._max_repack:
+ # When we get *to* _max_repack, bump over so that the
+ # earlier > _max_repack will be triggered.
+ self.num_repack += 1
+ if this_len + 10 > capacity:
+ (bytes_out, this_len,
+ compressor) = self._recompress_all_bytes_in()
+ self.compressor = compressor
+ # Force us to not allow more data
+ self.num_repack = self._max_repack + 1
+ self.bytes_list = bytes_out
+ self.bytes_out_len = this_len
+ self.unused_bytes = bytes
+ return True
+ else:
+ # This fits when we pack it tighter, so use the new packing
+ self.compressor = compressor
+ self.bytes_in.append(bytes)
+ self.bytes_list = bytes_out
+ self.bytes_out_len = this_len
+ return False
+
diff --git a/bzrlib/clean_tree.py b/bzrlib/clean_tree.py
new file mode 100644
index 0000000..e005169
--- /dev/null
+++ b/bzrlib/clean_tree.py
@@ -0,0 +1,130 @@
+# Copyright (C) 2009, 2010 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+from __future__ import absolute_import
+
+import errno
+import os
+import shutil
+
+from bzrlib import (
+ controldir,
+ errors,
+ ui,
+ )
+from bzrlib.osutils import isdir
+from bzrlib.trace import note
+from bzrlib.workingtree import WorkingTree
+from bzrlib.i18n import gettext
+
+def is_detritus(subp):
+ """Return True if the supplied path is detritus, False otherwise"""
+ return subp.endswith('.THIS') or subp.endswith('.BASE') or\
+ subp.endswith('.OTHER') or subp.endswith('~') or subp.endswith('.tmp')
+
+
+def iter_deletables(tree, unknown=False, ignored=False, detritus=False):
+ """Iterate through files that may be deleted"""
+ for subp in tree.extras():
+ if detritus and is_detritus(subp):
+ yield tree.abspath(subp), subp
+ continue
+ if tree.is_ignored(subp):
+ if ignored:
+ yield tree.abspath(subp), subp
+ else:
+ if unknown:
+ yield tree.abspath(subp), subp
+
+
+def clean_tree(directory, unknown=False, ignored=False, detritus=False,
+ dry_run=False, no_prompt=False):
+ """Remove files in the specified classes from the tree"""
+ tree = WorkingTree.open_containing(directory)[0]
+ tree.lock_read()
+ try:
+ deletables = list(iter_deletables(tree, unknown=unknown,
+ ignored=ignored, detritus=detritus))
+ deletables = _filter_out_nested_bzrdirs(deletables)
+ if len(deletables) == 0:
+ note(gettext('Nothing to delete.'))
+ return 0
+ if not no_prompt:
+ for path, subp in deletables:
+ ui.ui_factory.note(subp)
+ prompt = gettext('Are you sure you wish to delete these')
+ if not ui.ui_factory.get_boolean(prompt):
+ ui.ui_factory.note(gettext('Canceled'))
+ return 0
+ delete_items(deletables, dry_run=dry_run)
+ finally:
+ tree.unlock()
+
+
+def _filter_out_nested_bzrdirs(deletables):
+ result = []
+ for path, subp in deletables:
+ # bzr won't recurse into unknowns/ignored directories by default
+ # so we don't pay a penalty for checking subdirs of path for nested
+ # bzrdir.
+ # That said we won't detect the branch in the subdir of non-branch
+ # directory and therefore delete it. (worth to FIXME?)
+ if isdir(path):
+ try:
+ controldir.ControlDir.open(path)
+ except errors.NotBranchError:
+ result.append((path,subp))
+ else:
+ # TODO may be we need to notify user about skipped directories?
+ pass
+ else:
+ result.append((path,subp))
+ return result
+
+
+def delete_items(deletables, dry_run=False):
+ """Delete files in the deletables iterable"""
+ def onerror(function, path, excinfo):
+ """Show warning for errors seen by rmtree.
+ """
+ # Handle only permission error while removing files.
+ # Other errors are re-raised.
+ if function is not os.remove or excinfo[1].errno != errno.EACCES:
+ raise
+ ui.ui_factory.show_warning(gettext('unable to remove %s') % path)
+ has_deleted = False
+ for path, subp in deletables:
+ if not has_deleted:
+ note(gettext("deleting paths:"))
+ has_deleted = True
+ if not dry_run:
+ if isdir(path):
+ shutil.rmtree(path, onerror=onerror)
+ else:
+ try:
+ os.unlink(path)
+ note(' ' + subp)
+ except OSError, e:
+ # We handle only permission error here
+ if e.errno != errno.EACCES:
+ raise e
+ ui.ui_factory.show_warning(gettext(
+ 'unable to remove "{0}": {1}.').format(
+ path, e.strerror))
+ else:
+ note(' ' + subp)
+ if not has_deleted:
+ note(gettext("No files deleted."))
diff --git a/bzrlib/cleanup.py b/bzrlib/cleanup.py
new file mode 100644
index 0000000..b5ed578
--- /dev/null
+++ b/bzrlib/cleanup.py
@@ -0,0 +1,199 @@
+# Copyright (C) 2009, 2010 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""Helpers for managing cleanup functions and the errors they might raise.
+
+The usual way to run cleanup code in Python is::
+
+ try:
+ do_something()
+ finally:
+ cleanup_something()
+
+However if both `do_something` and `cleanup_something` raise an exception
+Python will forget the original exception and propagate the one from
+cleanup_something. Unfortunately, this is almost always much less useful than
+the original exception.
+
+If you want to be certain that the first, and only the first, error is raised,
+then use::
+
+ operation = OperationWithCleanups(do_something)
+ operation.add_cleanup(cleanup_something)
+ operation.run_simple()
+
+This is more inconvenient (because you need to make every try block a
+function), but will ensure that the first error encountered is the one raised,
+while also ensuring all cleanups are run. See OperationWithCleanups for more
+details.
+"""
+
+from __future__ import absolute_import
+
+from collections import deque
+import sys
+from bzrlib import (
+ debug,
+ trace,
+ )
+
+def _log_cleanup_error(exc):
+ trace.mutter('Cleanup failed:')
+ trace.log_exception_quietly()
+ if 'cleanup' in debug.debug_flags:
+ trace.warning('bzr: warning: Cleanup failed: %s', exc)
+
+
+def _run_cleanup(func, *args, **kwargs):
+ """Run func(*args, **kwargs), logging but not propagating any error it
+ raises.
+
+ :returns: True if func raised no errors, else False.
+ """
+ try:
+ func(*args, **kwargs)
+ except KeyboardInterrupt:
+ raise
+ except Exception, exc:
+ _log_cleanup_error(exc)
+ return False
+ return True
+
+
+def _run_cleanups(funcs):
+ """Run a series of cleanup functions."""
+ for func, args, kwargs in funcs:
+ _run_cleanup(func, *args, **kwargs)
+
+
+class ObjectWithCleanups(object):
+ """A mixin for objects that hold a cleanup list.
+
+ Subclass or client code can call add_cleanup and then later `cleanup_now`.
+ """
+ def __init__(self):
+ self.cleanups = deque()
+
+ def add_cleanup(self, cleanup_func, *args, **kwargs):
+ """Add a cleanup to run.
+
+ Cleanups may be added at any time.
+ Cleanups will be executed in LIFO order.
+ """
+ self.cleanups.appendleft((cleanup_func, args, kwargs))
+
+ def cleanup_now(self):
+ _run_cleanups(self.cleanups)
+ self.cleanups.clear()
+
+
+class OperationWithCleanups(ObjectWithCleanups):
+ """A way to run some code with a dynamic cleanup list.
+
+ This provides a way to add cleanups while the function-with-cleanups is
+ running.
+
+ Typical use::
+
+ operation = OperationWithCleanups(some_func)
+ operation.run(args...)
+
+ where `some_func` is::
+
+ def some_func(operation, args, ...):
+ do_something()
+ operation.add_cleanup(something)
+ # etc
+
+ Note that the first argument passed to `some_func` will be the
+ OperationWithCleanups object. To invoke `some_func` without that, use
+ `run_simple` instead of `run`.
+ """
+
+ def __init__(self, func):
+ super(OperationWithCleanups, self).__init__()
+ self.func = func
+
+ def run(self, *args, **kwargs):
+ return _do_with_cleanups(
+ self.cleanups, self.func, self, *args, **kwargs)
+
+ def run_simple(self, *args, **kwargs):
+ return _do_with_cleanups(
+ self.cleanups, self.func, *args, **kwargs)
+
+
+def _do_with_cleanups(cleanup_funcs, func, *args, **kwargs):
+ """Run `func`, then call all the cleanup_funcs.
+
+ All the cleanup_funcs are guaranteed to be run. The first exception raised
+ by func or any of the cleanup_funcs is the one that will be propagted by
+ this function (subsequent errors are caught and logged).
+
+ Conceptually similar to::
+
+ try:
+ return func(*args, **kwargs)
+ finally:
+ for cleanup, cargs, ckwargs in cleanup_funcs:
+ cleanup(*cargs, **ckwargs)
+
+ It avoids several problems with using try/finally directly:
+ * an exception from func will not be obscured by a subsequent exception
+ from a cleanup.
+ * an exception from a cleanup will not prevent other cleanups from
+ running (but the first exception encountered is still the one
+ propagated).
+
+ Unike `_run_cleanup`, `_do_with_cleanups` can propagate an exception from a
+ cleanup, but only if there is no exception from func.
+ """
+ # As correct as Python 2.4 allows.
+ try:
+ result = func(*args, **kwargs)
+ except:
+ # We have an exception from func already, so suppress cleanup errors.
+ _run_cleanups(cleanup_funcs)
+ raise
+ else:
+ # No exception from func, so allow the first exception from
+ # cleanup_funcs to propagate if one occurs (but only after running all
+ # of them).
+ exc_info = None
+ for cleanup, c_args, c_kwargs in cleanup_funcs:
+ # XXX: Hmm, if KeyboardInterrupt arrives at exactly this line, we
+ # won't run all cleanups... perhaps we should temporarily install a
+ # SIGINT handler?
+ if exc_info is None:
+ try:
+ cleanup(*c_args, **c_kwargs)
+ except:
+ # This is the first cleanup to fail, so remember its
+ # details.
+ exc_info = sys.exc_info()
+ else:
+ # We already have an exception to propagate, so log any errors
+ # but don't propagate them.
+ _run_cleanup(cleanup, *c_args, **kwargs)
+ if exc_info is not None:
+ try:
+ raise exc_info[0], exc_info[1], exc_info[2]
+ finally:
+ del exc_info
+ # No error, so we can return the result
+ return result
+
+
diff --git a/bzrlib/cmd_test_script.py b/bzrlib/cmd_test_script.py
new file mode 100644
index 0000000..d2d8cd4
--- /dev/null
+++ b/bzrlib/cmd_test_script.py
@@ -0,0 +1,67 @@
+# Copyright (C) 2010 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""Front-end command for shell-like test scripts.
+
+See doc/developers/testing.txt for more explanations.
+This module should be importable even if testtools aren't available.
+"""
+
+from __future__ import absolute_import
+
+import os
+
+from bzrlib import (
+ commands,
+ option,
+ )
+
+
+class cmd_test_script(commands.Command):
+ """Run a shell-like test from a file."""
+
+ hidden = True
+ takes_args = ['infile']
+ takes_options = [
+ option.Option('null-output',
+ help='Null command outputs match any output.'),
+ ]
+
+ @commands.display_command
+ def run(self, infile, null_output=False):
+ # local imports to defer testtools dependency
+ from bzrlib import tests
+ from bzrlib.tests.script import TestCaseWithTransportAndScript
+
+ f = open(infile)
+ try:
+ script = f.read()
+ finally:
+ f.close()
+
+ class Test(TestCaseWithTransportAndScript):
+
+ script = None # Set before running
+
+ def test_it(self):
+ self.run_script(script,
+ null_output_matches_anything=null_output)
+
+ runner = tests.TextTestRunner(stream=self.outf)
+ test = Test('test_it')
+ test.path = os.path.realpath(infile)
+ res = runner.run(test)
+ return len(res.errors) + len(res.failures)
diff --git a/bzrlib/cmd_version_info.py b/bzrlib/cmd_version_info.py
new file mode 100644
index 0000000..1db4684
--- /dev/null
+++ b/bzrlib/cmd_version_info.py
@@ -0,0 +1,138 @@
+# Copyright (C) 2005-2011 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""Commands for generating snapshot information about a bzr tree."""
+
+from __future__ import absolute_import
+
+from bzrlib.lazy_import import lazy_import
+
+lazy_import(globals(), """
+from bzrlib import (
+ branch,
+ errors,
+ version_info_formats,
+ workingtree,
+ )
+from bzrlib.i18n import gettext
+""")
+
+from bzrlib.commands import Command
+from bzrlib.option import Option, RegistryOption
+
+
+def _parse_version_info_format(format):
+ """Convert a string passed by the user into a VersionInfoFormat.
+
+ This looks in the version info format registry, and if the format
+ cannot be found, generates a useful error exception.
+ """
+ try:
+ return version_info_formats.get_builder(format)
+ except KeyError:
+ formats = version_info_formats.get_builder_formats()
+ raise errors.BzrCommandError(gettext('No known version info format {0}.'
+ ' Supported types are: {1}').format(
+ format, formats))
+
+
+class cmd_version_info(Command):
+ __doc__ = """Show version information about this tree.
+
+ You can use this command to add information about version into
+ source code of an application. The output can be in one of the
+ supported formats or in a custom format based on a template.
+
+ For example::
+
+ bzr version-info --custom \\
+ --template="#define VERSION_INFO \\"Project 1.2.3 (r{revno})\\"\\n"
+
+ will produce a C header file with formatted string containing the
+ current revision number. Other supported variables in templates are:
+
+ * {date} - date of the last revision
+ * {build_date} - current date
+ * {revno} - revision number
+ * {revision_id} - revision id
+ * {branch_nick} - branch nickname
+ * {clean} - 0 if the source tree contains uncommitted changes,
+ otherwise 1
+ """
+
+ takes_options = [RegistryOption('format',
+ 'Select the output format.',
+ value_switches=True,
+ lazy_registry=('bzrlib.version_info_formats',
+ 'format_registry')),
+ Option('all', help='Include all possible information.'),
+ Option('check-clean', help='Check if tree is clean.'),
+ Option('include-history',
+ help='Include the revision-history.'),
+ Option('include-file-revisions',
+ help='Include the last revision for each file.'),
+ Option('template', type=str, help='Template for the output.'),
+ 'revision',
+ ]
+ takes_args = ['location?']
+
+ encoding_type = 'exact'
+
+ def run(self, location=None, format=None,
+ all=False, check_clean=False, include_history=False,
+ include_file_revisions=False, template=None,
+ revision=None):
+
+ if revision and len(revision) > 1:
+ raise errors.BzrCommandError(
+ gettext('bzr version-info --revision takes exactly'
+ ' one revision specifier'))
+
+ if location is None:
+ location = '.'
+
+ if format is None:
+ format = version_info_formats.format_registry.get()
+
+ try:
+ wt = workingtree.WorkingTree.open_containing(location)[0]
+ except errors.NoWorkingTree:
+ b = branch.Branch.open(location)
+ wt = None
+ else:
+ b = wt.branch
+
+ if all:
+ include_history = True
+ check_clean = True
+ include_file_revisions = True
+ if template:
+ include_history = True
+ include_file_revisions = True
+ if '{clean}' in template:
+ check_clean = True
+
+ if revision is not None:
+ revision_id = revision[0].as_revision_id(b)
+ else:
+ revision_id = None
+
+ builder = format(b, working_tree=wt,
+ check_for_clean=check_clean,
+ include_revision_history=include_history,
+ include_file_revisions=include_file_revisions,
+ template=template, revision_id=revision_id)
+ builder.generate(self.outf)
diff --git a/bzrlib/cmdline.py b/bzrlib/cmdline.py
new file mode 100644
index 0000000..2cb4983
--- /dev/null
+++ b/bzrlib/cmdline.py
@@ -0,0 +1,166 @@
+# Copyright (C) 2010-2011 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""Unicode-compatible command-line splitter for all platforms.
+
+The user-visible behaviour of this module is described in
+configuring_bazaar.txt.
+"""
+
+from __future__ import absolute_import
+
+import re
+
+
+_whitespace_match = re.compile(u'\s', re.UNICODE).match
+
+
+class _PushbackSequence(object):
+ def __init__(self, orig):
+ self._iter = iter(orig)
+ self._pushback_buffer = []
+
+ def next(self):
+ if len(self._pushback_buffer) > 0:
+ return self._pushback_buffer.pop()
+ else:
+ return self._iter.next()
+
+ def pushback(self, char):
+ self._pushback_buffer.append(char)
+
+ def __iter__(self):
+ return self
+
+
+class _Whitespace(object):
+ def process(self, next_char, context):
+ if _whitespace_match(next_char):
+ if len(context.token) > 0:
+ return None
+ else:
+ return self
+ elif next_char in context.allowed_quote_chars:
+ context.quoted = True
+ return _Quotes(next_char, self)
+ elif next_char == u'\\':
+ return _Backslash(self)
+ else:
+ context.token.append(next_char)
+ return _Word()
+
+
+class _Quotes(object):
+ def __init__(self, quote_char, exit_state):
+ self.quote_char = quote_char
+ self.exit_state = exit_state
+
+ def process(self, next_char, context):
+ if next_char == u'\\':
+ return _Backslash(self)
+ elif next_char == self.quote_char:
+ return self.exit_state
+ else:
+ context.token.append(next_char)
+ return self
+
+
+class _Backslash(object):
+ # See http://msdn.microsoft.com/en-us/library/bb776391(VS.85).aspx
+ def __init__(self, exit_state):
+ self.exit_state = exit_state
+ self.count = 1
+
+ def process(self, next_char, context):
+ if next_char == u'\\':
+ self.count += 1
+ return self
+ elif next_char in context.allowed_quote_chars:
+ # 2N backslashes followed by a quote are N backslashes
+ context.token.append(u'\\' * (self.count/2))
+ # 2N+1 backslashes follwed by a quote are N backslashes followed by
+ # the quote which should not be processed as the start or end of
+ # the quoted arg
+ if self.count % 2 == 1:
+ # odd number of \ escapes the quote
+ context.token.append(next_char)
+ else:
+ # let exit_state handle next_char
+ context.seq.pushback(next_char)
+ self.count = 0
+ return self.exit_state
+ else:
+ # N backslashes not followed by a quote are just N backslashes
+ if self.count > 0:
+ context.token.append(u'\\' * self.count)
+ self.count = 0
+ # let exit_state handle next_char
+ context.seq.pushback(next_char)
+ return self.exit_state
+
+ def finish(self, context):
+ if self.count > 0:
+ context.token.append(u'\\' * self.count)
+
+
+class _Word(object):
+ def process(self, next_char, context):
+ if _whitespace_match(next_char):
+ return None
+ elif next_char in context.allowed_quote_chars:
+ return _Quotes(next_char, self)
+ elif next_char == u'\\':
+ return _Backslash(self)
+ else:
+ context.token.append(next_char)
+ return self
+
+
+class Splitter(object):
+ def __init__(self, command_line, single_quotes_allowed):
+ self.seq = _PushbackSequence(command_line)
+ self.allowed_quote_chars = u'"'
+ if single_quotes_allowed:
+ self.allowed_quote_chars += u"'"
+
+ def __iter__(self):
+ return self
+
+ def next(self):
+ quoted, token = self._get_token()
+ if token is None:
+ raise StopIteration
+ return quoted, token
+
+ def _get_token(self):
+ self.quoted = False
+ self.token = []
+ state = _Whitespace()
+ for next_char in self.seq:
+ state = state.process(next_char, self)
+ if state is None:
+ break
+ if not state is None and not getattr(state, 'finish', None) is None:
+ state.finish(self)
+ result = u''.join(self.token)
+ if not self.quoted and result == '':
+ result = None
+ return self.quoted, result
+
+
+def split(unsplit, single_quotes_allowed=True):
+ splitter = Splitter(unsplit, single_quotes_allowed=single_quotes_allowed)
+ return [arg for quoted, arg in splitter]
diff --git a/bzrlib/commands.py b/bzrlib/commands.py
new file mode 100644
index 0000000..4c2b389
--- /dev/null
+++ b/bzrlib/commands.py
@@ -0,0 +1,1303 @@
+# Copyright (C) 2005-2011 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+from __future__ import absolute_import
+
+# TODO: Define arguments by objects, rather than just using names.
+# Those objects can specify the expected type of the argument, which
+# would help with validation and shell completion. They could also provide
+# help/explanation for that argument in a structured way.
+
+# TODO: Specific "examples" property on commands for consistent formatting.
+
+import os
+import sys
+
+from bzrlib.lazy_import import lazy_import
+lazy_import(globals(), """
+import errno
+import threading
+
+import bzrlib
+from bzrlib import (
+ config,
+ cleanup,
+ cmdline,
+ debug,
+ errors,
+ i18n,
+ option,
+ osutils,
+ trace,
+ ui,
+ )
+""")
+
+from bzrlib.hooks import Hooks
+from bzrlib.i18n import gettext
+# Compatibility - Option used to be in commands.
+from bzrlib.option import Option
+from bzrlib.plugin import disable_plugins, load_plugins
+from bzrlib import registry
+
+
+class CommandInfo(object):
+ """Information about a command."""
+
+ def __init__(self, aliases):
+ """The list of aliases for the command."""
+ self.aliases = aliases
+
+ @classmethod
+ def from_command(klass, command):
+ """Factory to construct a CommandInfo from a command."""
+ return klass(command.aliases)
+
+
+class CommandRegistry(registry.Registry):
+ """Special registry mapping command names to command classes.
+
+ :ivar overridden_registry: Look in this registry for commands being
+ overridden by this registry. This can be used to tell plugin commands
+ about the builtin they're decorating.
+ """
+
+ def __init__(self):
+ registry.Registry.__init__(self)
+ self.overridden_registry = None
+ # map from aliases to the real command that implements the name
+ self._alias_dict = {}
+
+ def get(self, command_name):
+ real_name = self._alias_dict.get(command_name, command_name)
+ return registry.Registry.get(self, real_name)
+
+ @staticmethod
+ def _get_name(command_name):
+ if command_name.startswith("cmd_"):
+ return _unsquish_command_name(command_name)
+ else:
+ return command_name
+
+ def register(self, cmd, decorate=False):
+ """Utility function to help register a command
+
+ :param cmd: Command subclass to register
+ :param decorate: If true, allow overriding an existing command
+ of the same name; the old command is returned by this function.
+ Otherwise it is an error to try to override an existing command.
+ """
+ k = cmd.__name__
+ k_unsquished = self._get_name(k)
+ try:
+ previous = self.get(k_unsquished)
+ except KeyError:
+ previous = None
+ if self.overridden_registry:
+ try:
+ previous = self.overridden_registry.get(k_unsquished)
+ except KeyError:
+ pass
+ info = CommandInfo.from_command(cmd)
+ try:
+ registry.Registry.register(self, k_unsquished, cmd,
+ override_existing=decorate, info=info)
+ except KeyError:
+ trace.warning('Two plugins defined the same command: %r' % k)
+ trace.warning('Not loading the one in %r' %
+ sys.modules[cmd.__module__])
+ trace.warning('Previously this command was registered from %r' %
+ sys.modules[previous.__module__])
+ for a in cmd.aliases:
+ self._alias_dict[a] = k_unsquished
+ return previous
+
+ def register_lazy(self, command_name, aliases, module_name):
+ """Register a command without loading its module.
+
+ :param command_name: The primary name of the command.
+ :param aliases: A list of aliases for the command.
+ :module_name: The module that the command lives in.
+ """
+ key = self._get_name(command_name)
+ registry.Registry.register_lazy(self, key, module_name, command_name,
+ info=CommandInfo(aliases))
+ for a in aliases:
+ self._alias_dict[a] = key
+
+
+plugin_cmds = CommandRegistry()
+builtin_command_registry = CommandRegistry()
+plugin_cmds.overridden_registry = builtin_command_registry
+
+
+def register_command(cmd, decorate=False):
+ """Register a plugin command.
+
+ Should generally be avoided in favor of lazy registration.
+ """
+ global plugin_cmds
+ return plugin_cmds.register(cmd, decorate)
+
+
+def _squish_command_name(cmd):
+ return 'cmd_' + cmd.replace('-', '_')
+
+
+def _unsquish_command_name(cmd):
+ return cmd[4:].replace('_','-')
+
+
+def _register_builtin_commands():
+ if builtin_command_registry.keys():
+ # only load once
+ return
+ import bzrlib.builtins
+ for cmd_class in _scan_module_for_commands(bzrlib.builtins).values():
+ builtin_command_registry.register(cmd_class)
+ bzrlib.builtins._register_lazy_builtins()
+
+
+def _scan_module_for_commands(module):
+ r = {}
+ for name, obj in module.__dict__.iteritems():
+ if name.startswith("cmd_"):
+ real_name = _unsquish_command_name(name)
+ r[real_name] = obj
+ return r
+
+
+def _list_bzr_commands(names):
+ """Find commands from bzr's core and plugins.
+
+ This is not the public interface, just the default hook called by all_command_names.
+ """
+ # to eliminate duplicates
+ names.update(builtin_command_names())
+ names.update(plugin_command_names())
+ return names
+
+
+def all_command_names():
+ """Return a set of all command names."""
+ names = set()
+ for hook in Command.hooks['list_commands']:
+ names = hook(names)
+ if names is None:
+ raise AssertionError(
+ 'hook %s returned None' % Command.hooks.get_hook_name(hook))
+ return names
+
+
+def builtin_command_names():
+ """Return list of builtin command names.
+
+ Use of all_command_names() is encouraged rather than builtin_command_names
+ and/or plugin_command_names.
+ """
+ _register_builtin_commands()
+ return builtin_command_registry.keys()
+
+
+def plugin_command_names():
+ """Returns command names from commands registered by plugins."""
+ return plugin_cmds.keys()
+
+
+def get_cmd_object(cmd_name, plugins_override=True):
+ """Return the command object for a command.
+
+ plugins_override
+ If true, plugin commands can override builtins.
+ """
+ try:
+ return _get_cmd_object(cmd_name, plugins_override)
+ except KeyError:
+ raise errors.BzrCommandError(gettext('unknown command "%s"') % cmd_name)
+
+
+def _get_cmd_object(cmd_name, plugins_override=True, check_missing=True):
+ """Get a command object.
+
+ :param cmd_name: The name of the command.
+ :param plugins_override: Allow plugins to override builtins.
+ :param check_missing: Look up commands not found in the regular index via
+ the get_missing_command hook.
+ :return: A Command object instance
+ :raises KeyError: If no command is found.
+ """
+ # We want only 'ascii' command names, but the user may have typed
+ # in a Unicode name. In that case, they should just get a
+ # 'command not found' error later.
+ # In the future, we may actually support Unicode command names.
+ cmd = None
+ # Get a command
+ for hook in Command.hooks['get_command']:
+ cmd = hook(cmd, cmd_name)
+ if cmd is not None and not plugins_override and not cmd.plugin_name():
+ # We've found a non-plugin command, don't permit it to be
+ # overridden.
+ break
+ if cmd is None and check_missing:
+ for hook in Command.hooks['get_missing_command']:
+ cmd = hook(cmd_name)
+ if cmd is not None:
+ break
+ if cmd is None:
+ # No command found.
+ raise KeyError
+ # Allow plugins to extend commands
+ for hook in Command.hooks['extend_command']:
+ hook(cmd)
+ if getattr(cmd, 'invoked_as', None) is None:
+ cmd.invoked_as = cmd_name
+ return cmd
+
+
+def _try_plugin_provider(cmd_name):
+ """Probe for a plugin provider having cmd_name."""
+ try:
+ plugin_metadata, provider = probe_for_provider(cmd_name)
+ raise errors.CommandAvailableInPlugin(cmd_name,
+ plugin_metadata, provider)
+ except errors.NoPluginAvailable:
+ pass
+
+
+def probe_for_provider(cmd_name):
+ """Look for a provider for cmd_name.
+
+ :param cmd_name: The command name.
+ :return: plugin_metadata, provider for getting cmd_name.
+ :raises NoPluginAvailable: When no provider can supply the plugin.
+ """
+ # look for providers that provide this command but aren't installed
+ for provider in command_providers_registry:
+ try:
+ return provider.plugin_for_command(cmd_name), provider
+ except errors.NoPluginAvailable:
+ pass
+ raise errors.NoPluginAvailable(cmd_name)
+
+
+def _get_bzr_command(cmd_or_None, cmd_name):
+ """Get a command from bzr's core."""
+ try:
+ cmd_class = builtin_command_registry.get(cmd_name)
+ except KeyError:
+ pass
+ else:
+ return cmd_class()
+ return cmd_or_None
+
+
+def _get_external_command(cmd_or_None, cmd_name):
+ """Lookup a command that is a shell script."""
+ # Only do external command lookups when no command is found so far.
+ if cmd_or_None is not None:
+ return cmd_or_None
+ from bzrlib.externalcommand import ExternalCommand
+ cmd_obj = ExternalCommand.find_command(cmd_name)
+ if cmd_obj:
+ return cmd_obj
+
+
+def _get_plugin_command(cmd_or_None, cmd_name):
+ """Get a command from bzr's plugins."""
+ try:
+ return plugin_cmds.get(cmd_name)()
+ except KeyError:
+ pass
+ for key in plugin_cmds.keys():
+ info = plugin_cmds.get_info(key)
+ if cmd_name in info.aliases:
+ return plugin_cmds.get(key)()
+ return cmd_or_None
+
+
+class Command(object):
+ """Base class for commands.
+
+ Commands are the heart of the command-line bzr interface.
+
+ The command object mostly handles the mapping of command-line
+ parameters into one or more bzrlib operations, and of the results
+ into textual output.
+
+ Commands normally don't have any state. All their arguments are
+ passed in to the run method. (Subclasses may take a different
+ policy if the behaviour of the instance needs to depend on e.g. a
+ shell plugin and not just its Python class.)
+
+ The docstring for an actual command should give a single-line
+ summary, then a complete description of the command. A grammar
+ description will be inserted.
+
+ :cvar aliases: Other accepted names for this command.
+
+ :cvar takes_args: List of argument forms, marked with whether they are
+ optional, repeated, etc. Examples::
+
+ ['to_location', 'from_branch?', 'file*']
+
+ * 'to_location' is required
+ * 'from_branch' is optional
+ * 'file' can be specified 0 or more times
+
+ :cvar takes_options: List of options that may be given for this command.
+ These can be either strings, referring to globally-defined options, or
+ option objects. Retrieve through options().
+
+ :cvar hidden: If true, this command isn't advertised. This is typically
+ for commands intended for expert users.
+
+ :cvar encoding_type: Command objects will get a 'outf' attribute, which has
+ been setup to properly handle encoding of unicode strings.
+ encoding_type determines what will happen when characters cannot be
+ encoded:
+
+ * strict - abort if we cannot decode
+ * replace - put in a bogus character (typically '?')
+ * exact - do not encode sys.stdout
+
+ NOTE: by default on Windows, sys.stdout is opened as a text stream,
+ therefore LF line-endings are converted to CRLF. When a command uses
+ encoding_type = 'exact', then sys.stdout is forced to be a binary
+ stream, and line-endings will not mangled.
+
+ :cvar invoked_as:
+ A string indicating the real name under which this command was
+ invoked, before expansion of aliases.
+ (This may be None if the command was constructed and run in-process.)
+
+ :cvar hooks: An instance of CommandHooks.
+
+ :cvar __doc__: The help shown by 'bzr help command' for this command.
+ This is set by assigning explicitly to __doc__ so that -OO can
+ be used::
+
+ class Foo(Command):
+ __doc__ = "My help goes here"
+ """
+ aliases = []
+ takes_args = []
+ takes_options = []
+ encoding_type = 'strict'
+ invoked_as = None
+ l10n = True
+
+ hidden = False
+
+ def __init__(self):
+ """Construct an instance of this command."""
+ # List of standard options directly supported
+ self.supported_std_options = []
+ self._setup_run()
+
+ def add_cleanup(self, cleanup_func, *args, **kwargs):
+ """Register a function to call after self.run returns or raises.
+
+ Functions will be called in LIFO order.
+ """
+ self._operation.add_cleanup(cleanup_func, *args, **kwargs)
+
+ def cleanup_now(self):
+ """Execute and empty pending cleanup functions immediately.
+
+ After cleanup_now all registered cleanups are forgotten. add_cleanup
+ may be called again after cleanup_now; these cleanups will be called
+ after self.run returns or raises (or when cleanup_now is next called).
+
+ This is useful for releasing expensive or contentious resources (such
+ as write locks) before doing further work that does not require those
+ resources (such as writing results to self.outf). Note though, that
+ as it releases all resources, this may release locks that the command
+ wants to hold, so use should be done with care.
+ """
+ self._operation.cleanup_now()
+
+ def _usage(self):
+ """Return single-line grammar for this command.
+
+ Only describes arguments, not options.
+ """
+ s = 'bzr ' + self.name() + ' '
+ for aname in self.takes_args:
+ aname = aname.upper()
+ if aname[-1] in ['$', '+']:
+ aname = aname[:-1] + '...'
+ elif aname[-1] == '?':
+ aname = '[' + aname[:-1] + ']'
+ elif aname[-1] == '*':
+ aname = '[' + aname[:-1] + '...]'
+ s += aname + ' '
+ s = s[:-1] # remove last space
+ return s
+
+ def get_help_text(self, additional_see_also=None, plain=True,
+ see_also_as_links=False, verbose=True):
+ """Return a text string with help for this command.
+
+ :param additional_see_also: Additional help topics to be
+ cross-referenced.
+ :param plain: if False, raw help (reStructuredText) is
+ returned instead of plain text.
+ :param see_also_as_links: if True, convert items in 'See also'
+ list to internal links (used by bzr_man rstx generator)
+ :param verbose: if True, display the full help, otherwise
+ leave out the descriptive sections and just display
+ usage help (e.g. Purpose, Usage, Options) with a
+ message explaining how to obtain full help.
+ """
+ if self.l10n:
+ i18n.install() # Install i18n only for get_help_text for now.
+ doc = self.help()
+ if doc:
+ # Note: If self.gettext() translates ':Usage:\n', the section will
+ # be shown after "Description" section and we don't want to
+ # translate the usage string.
+ # Though, bzr export-pot don't exports :Usage: section and it must
+ # not be translated.
+ doc = self.gettext(doc)
+ else:
+ doc = gettext("No help for this command.")
+
+ # Extract the summary (purpose) and sections out from the text
+ purpose,sections,order = self._get_help_parts(doc)
+
+ # If a custom usage section was provided, use it
+ if sections.has_key('Usage'):
+ usage = sections.pop('Usage')
+ else:
+ usage = self._usage()
+
+ # The header is the purpose and usage
+ result = ""
+ result += gettext(':Purpose: %s\n') % (purpose,)
+ if usage.find('\n') >= 0:
+ result += gettext(':Usage:\n%s\n') % (usage,)
+ else:
+ result += gettext(':Usage: %s\n') % (usage,)
+ result += '\n'
+
+ # Add the options
+ #
+ # XXX: optparse implicitly rewraps the help, and not always perfectly,
+ # so we get <https://bugs.launchpad.net/bzr/+bug/249908>. -- mbp
+ # 20090319
+ parser = option.get_optparser(self.options())
+ options = parser.format_option_help()
+ # FIXME: According to the spec, ReST option lists actually don't
+ # support options like --1.14 so that causes syntax errors (in Sphinx
+ # at least). As that pattern always appears in the commands that
+ # break, we trap on that and then format that block of 'format' options
+ # as a literal block. We use the most recent format still listed so we
+ # don't have to do that too often -- vila 20110514
+ if not plain and options.find(' --1.14 ') != -1:
+ options = options.replace(' format:\n', ' format::\n\n', 1)
+ if options.startswith('Options:'):
+ result += gettext(':Options:%s') % (options[len('options:'):],)
+ else:
+ result += options
+ result += '\n'
+
+ if verbose:
+ # Add the description, indenting it 2 spaces
+ # to match the indentation of the options
+ if sections.has_key(None):
+ text = sections.pop(None)
+ text = '\n '.join(text.splitlines())
+ result += gettext(':Description:\n %s\n\n') % (text,)
+
+ # Add the custom sections (e.g. Examples). Note that there's no need
+ # to indent these as they must be indented already in the source.
+ if sections:
+ for label in order:
+ if label in sections:
+ result += ':%s:\n%s\n' % (label, sections[label])
+ result += '\n'
+ else:
+ result += (gettext("See bzr help %s for more details and examples.\n\n")
+ % self.name())
+
+ # Add the aliases, source (plug-in) and see also links, if any
+ if self.aliases:
+ result += gettext(':Aliases: ')
+ result += ', '.join(self.aliases) + '\n'
+ plugin_name = self.plugin_name()
+ if plugin_name is not None:
+ result += gettext(':From: plugin "%s"\n') % plugin_name
+ see_also = self.get_see_also(additional_see_also)
+ if see_also:
+ if not plain and see_also_as_links:
+ see_also_links = []
+ for item in see_also:
+ if item == 'topics':
+ # topics doesn't have an independent section
+ # so don't create a real link
+ see_also_links.append(item)
+ else:
+ # Use a Sphinx link for this entry
+ link_text = gettext(":doc:`{0} <{1}-help>`").format(
+ item, item)
+ see_also_links.append(link_text)
+ see_also = see_also_links
+ result += gettext(':See also: %s') % ', '.join(see_also) + '\n'
+
+ # If this will be rendered as plain text, convert it
+ if plain:
+ import bzrlib.help_topics
+ result = bzrlib.help_topics.help_as_plain_text(result)
+ return result
+
+ @staticmethod
+ def _get_help_parts(text):
+ """Split help text into a summary and named sections.
+
+ :return: (summary,sections,order) where summary is the top line and
+ sections is a dictionary of the rest indexed by section name.
+ order is the order the section appear in the text.
+ A section starts with a heading line of the form ":xxx:".
+ Indented text on following lines is the section value.
+ All text found outside a named section is assigned to the
+ default section which is given the key of None.
+ """
+ def save_section(sections, order, label, section):
+ if len(section) > 0:
+ if sections.has_key(label):
+ sections[label] += '\n' + section
+ else:
+ order.append(label)
+ sections[label] = section
+
+ lines = text.rstrip().splitlines()
+ summary = lines.pop(0)
+ sections = {}
+ order = []
+ label,section = None,''
+ for line in lines:
+ if line.startswith(':') and line.endswith(':') and len(line) > 2:
+ save_section(sections, order, label, section)
+ label,section = line[1:-1],''
+ elif (label is not None) and len(line) > 1 and not line[0].isspace():
+ save_section(sections, order, label, section)
+ label,section = None,line
+ else:
+ if len(section) > 0:
+ section += '\n' + line
+ else:
+ section = line
+ save_section(sections, order, label, section)
+ return summary, sections, order
+
+ def get_help_topic(self):
+ """Return the commands help topic - its name."""
+ return self.name()
+
+ def get_see_also(self, additional_terms=None):
+ """Return a list of help topics that are related to this command.
+
+ The list is derived from the content of the _see_also attribute. Any
+ duplicates are removed and the result is in lexical order.
+ :param additional_terms: Additional help topics to cross-reference.
+ :return: A list of help topics.
+ """
+ see_also = set(getattr(self, '_see_also', []))
+ if additional_terms:
+ see_also.update(additional_terms)
+ return sorted(see_also)
+
+ def options(self):
+ """Return dict of valid options for this command.
+
+ Maps from long option name to option object."""
+ r = Option.STD_OPTIONS.copy()
+ std_names = r.keys()
+ for o in self.takes_options:
+ if isinstance(o, basestring):
+ o = option.Option.OPTIONS[o]
+ r[o.name] = o
+ if o.name in std_names:
+ self.supported_std_options.append(o.name)
+ return r
+
+ def _setup_outf(self):
+ """Return a file linked to stdout, which has proper encoding."""
+ self.outf = ui.ui_factory.make_output_stream(
+ encoding_type=self.encoding_type)
+
+ def run_argv_aliases(self, argv, alias_argv=None):
+ """Parse the command line and run with extra aliases in alias_argv."""
+ args, opts = parse_args(self, argv, alias_argv)
+ self._setup_outf()
+
+ # Process the standard options
+ if 'help' in opts: # e.g. bzr add --help
+ self.outf.write(self.get_help_text())
+ return 0
+ if 'usage' in opts: # e.g. bzr add --usage
+ self.outf.write(self.get_help_text(verbose=False))
+ return 0
+ trace.set_verbosity_level(option._verbosity_level)
+ if 'verbose' in self.supported_std_options:
+ opts['verbose'] = trace.is_verbose()
+ elif opts.has_key('verbose'):
+ del opts['verbose']
+ if 'quiet' in self.supported_std_options:
+ opts['quiet'] = trace.is_quiet()
+ elif opts.has_key('quiet'):
+ del opts['quiet']
+ # mix arguments and options into one dictionary
+ cmdargs = _match_argform(self.name(), self.takes_args, args)
+ cmdopts = {}
+ for k, v in opts.items():
+ cmdopts[k.replace('-', '_')] = v
+
+ all_cmd_args = cmdargs.copy()
+ all_cmd_args.update(cmdopts)
+
+ try:
+ return self.run(**all_cmd_args)
+ finally:
+ # reset it, so that other commands run in the same process won't
+ # inherit state. Before we reset it, log any activity, so that it
+ # gets properly tracked.
+ ui.ui_factory.log_transport_activity(
+ display=('bytes' in debug.debug_flags))
+ trace.set_verbosity_level(0)
+
+ def _setup_run(self):
+ """Wrap the defined run method on self with a cleanup.
+
+ This is called by __init__ to make the Command be able to be run
+ by just calling run(), as it could be before cleanups were added.
+
+ If a different form of cleanups are in use by your Command subclass,
+ you can override this method.
+ """
+ class_run = self.run
+ def run(*args, **kwargs):
+ for hook in Command.hooks['pre_command']:
+ hook(self)
+ self._operation = cleanup.OperationWithCleanups(class_run)
+ try:
+ return self._operation.run_simple(*args, **kwargs)
+ finally:
+ del self._operation
+ for hook in Command.hooks['post_command']:
+ hook(self)
+ self.run = run
+
+ def run(self):
+ """Actually run the command.
+
+ This is invoked with the options and arguments bound to
+ keyword parameters.
+
+ Return 0 or None if the command was successful, or a non-zero
+ shell error code if not. It's OK for this method to allow
+ an exception to raise up.
+
+ This method is automatically wrapped by Command.__init__ with a
+ cleanup operation, stored as self._operation. This can be used
+ via self.add_cleanup to perform automatic cleanups at the end of
+ run().
+
+ The argument for run are assembled by introspection. So for instance,
+ if your command takes an argument files, you would declare::
+
+ def run(self, files=None):
+ pass
+ """
+ raise NotImplementedError('no implementation of command %r'
+ % self.name())
+
+ def help(self):
+ """Return help message for this class."""
+ from inspect import getdoc
+ if self.__doc__ is Command.__doc__:
+ return None
+ return getdoc(self)
+
+ def gettext(self, message):
+ """Returns the gettext function used to translate this command's help.
+
+ Commands provided by plugins should override this to use their
+ own i18n system.
+ """
+ return i18n.gettext_per_paragraph(message)
+
+ def name(self):
+ """Return the canonical name for this command.
+
+ The name under which it was actually invoked is available in invoked_as.
+ """
+ return _unsquish_command_name(self.__class__.__name__)
+
+ def plugin_name(self):
+ """Get the name of the plugin that provides this command.
+
+ :return: The name of the plugin or None if the command is builtin.
+ """
+ mod_parts = self.__module__.split('.')
+ if len(mod_parts) >= 3 and mod_parts[1] == 'plugins':
+ return mod_parts[2]
+ else:
+ return None
+
+
+class CommandHooks(Hooks):
+ """Hooks related to Command object creation/enumeration."""
+
+ def __init__(self):
+ """Create the default hooks.
+
+ These are all empty initially, because by default nothing should get
+ notified.
+ """
+ Hooks.__init__(self, "bzrlib.commands", "Command.hooks")
+ self.add_hook('extend_command',
+ "Called after creating a command object to allow modifications "
+ "such as adding or removing options, docs etc. Called with the "
+ "new bzrlib.commands.Command object.", (1, 13))
+ self.add_hook('get_command',
+ "Called when creating a single command. Called with "
+ "(cmd_or_None, command_name). get_command should either return "
+ "the cmd_or_None parameter, or a replacement Command object that "
+ "should be used for the command. Note that the Command.hooks "
+ "hooks are core infrastructure. Many users will prefer to use "
+ "bzrlib.commands.register_command or plugin_cmds.register_lazy.",
+ (1, 17))
+ self.add_hook('get_missing_command',
+ "Called when creating a single command if no command could be "
+ "found. Called with (command_name). get_missing_command should "
+ "either return None, or a Command object to be used for the "
+ "command.", (1, 17))
+ self.add_hook('list_commands',
+ "Called when enumerating commands. Called with a set of "
+ "cmd_name strings for all the commands found so far. This set "
+ " is safe to mutate - e.g. to remove a command. "
+ "list_commands should return the updated set of command names.",
+ (1, 17))
+ self.add_hook('pre_command',
+ "Called prior to executing a command. Called with the command "
+ "object.", (2, 6))
+ self.add_hook('post_command',
+ "Called after executing a command. Called with the command "
+ "object.", (2, 6))
+
+Command.hooks = CommandHooks()
+
+
+def parse_args(command, argv, alias_argv=None):
+ """Parse command line.
+
+ Arguments and options are parsed at this level before being passed
+ down to specific command handlers. This routine knows, from a
+ lookup table, something about the available options, what optargs
+ they take, and which commands will accept them.
+ """
+ # TODO: make it a method of the Command?
+ parser = option.get_optparser(command.options())
+ if alias_argv is not None:
+ args = alias_argv + argv
+ else:
+ args = argv
+
+ # for python 2.5 and later, optparse raises this exception if a non-ascii
+ # option name is given. See http://bugs.python.org/issue2931
+ try:
+ options, args = parser.parse_args(args)
+ except UnicodeEncodeError,e:
+ raise errors.BzrCommandError(
+ gettext('Only ASCII permitted in option names'))
+
+ opts = dict([(k, v) for k, v in options.__dict__.iteritems() if
+ v is not option.OptionParser.DEFAULT_VALUE])
+ return args, opts
+
+
+def _match_argform(cmd, takes_args, args):
+ argdict = {}
+
+ # step through args and takes_args, allowing appropriate 0-many matches
+ for ap in takes_args:
+ argname = ap[:-1]
+ if ap[-1] == '?':
+ if args:
+ argdict[argname] = args.pop(0)
+ elif ap[-1] == '*': # all remaining arguments
+ if args:
+ argdict[argname + '_list'] = args[:]
+ args = []
+ else:
+ argdict[argname + '_list'] = None
+ elif ap[-1] == '+':
+ if not args:
+ raise errors.BzrCommandError(gettext(
+ "command {0!r} needs one or more {1}").format(
+ cmd, argname.upper()))
+ else:
+ argdict[argname + '_list'] = args[:]
+ args = []
+ elif ap[-1] == '$': # all but one
+ if len(args) < 2:
+ raise errors.BzrCommandError(
+ gettext("command {0!r} needs one or more {1}").format(
+ cmd, argname.upper()))
+ argdict[argname + '_list'] = args[:-1]
+ args[:-1] = []
+ else:
+ # just a plain arg
+ argname = ap
+ if not args:
+ raise errors.BzrCommandError(
+ gettext("command {0!r} requires argument {1}").format(
+ cmd, argname.upper()))
+ else:
+ argdict[argname] = args.pop(0)
+
+ if args:
+ raise errors.BzrCommandError( gettext(
+ "extra argument to command {0}: {1}").format(
+ cmd, args[0]) )
+
+ return argdict
+
+def apply_coveraged(dirname, the_callable, *args, **kwargs):
+ # Cannot use "import trace", as that would import bzrlib.trace instead of
+ # the standard library's trace.
+ trace = __import__('trace')
+
+ tracer = trace.Trace(count=1, trace=0)
+ sys.settrace(tracer.globaltrace)
+ threading.settrace(tracer.globaltrace)
+
+ try:
+ return exception_to_return_code(the_callable, *args, **kwargs)
+ finally:
+ sys.settrace(None)
+ results = tracer.results()
+ results.write_results(show_missing=1, summary=False,
+ coverdir=dirname)
+
+
+def apply_profiled(the_callable, *args, **kwargs):
+ import hotshot
+ import tempfile
+ import hotshot.stats
+ pffileno, pfname = tempfile.mkstemp()
+ try:
+ prof = hotshot.Profile(pfname)
+ try:
+ ret = prof.runcall(exception_to_return_code, the_callable, *args,
+ **kwargs) or 0
+ finally:
+ prof.close()
+ stats = hotshot.stats.load(pfname)
+ stats.strip_dirs()
+ stats.sort_stats('cum') # 'time'
+ ## XXX: Might like to write to stderr or the trace file instead but
+ ## print_stats seems hardcoded to stdout
+ stats.print_stats(20)
+ return ret
+ finally:
+ os.close(pffileno)
+ os.remove(pfname)
+
+
+def exception_to_return_code(the_callable, *args, **kwargs):
+ """UI level helper for profiling and coverage.
+
+ This transforms exceptions into a return value of 3. As such its only
+ relevant to the UI layer, and should never be called where catching
+ exceptions may be desirable.
+ """
+ try:
+ return the_callable(*args, **kwargs)
+ except (KeyboardInterrupt, Exception), e:
+ # used to handle AssertionError and KeyboardInterrupt
+ # specially here, but hopefully they're handled ok by the logger now
+ exc_info = sys.exc_info()
+ exitcode = trace.report_exception(exc_info, sys.stderr)
+ if os.environ.get('BZR_PDB'):
+ print '**** entering debugger'
+ tb = exc_info[2]
+ import pdb
+ if sys.version_info[:2] < (2, 6):
+ # XXX: we want to do
+ # pdb.post_mortem(tb)
+ # but because pdb.post_mortem gives bad results for tracebacks
+ # from inside generators, we do it manually.
+ # (http://bugs.python.org/issue4150, fixed in Python 2.6)
+
+ # Setup pdb on the traceback
+ p = pdb.Pdb()
+ p.reset()
+ p.setup(tb.tb_frame, tb)
+ # Point the debugger at the deepest frame of the stack
+ p.curindex = len(p.stack) - 1
+ p.curframe = p.stack[p.curindex][0]
+ # Start the pdb prompt.
+ p.print_stack_entry(p.stack[p.curindex])
+ p.execRcLines()
+ p.cmdloop()
+ else:
+ pdb.post_mortem(tb)
+ return exitcode
+
+
+def apply_lsprofiled(filename, the_callable, *args, **kwargs):
+ from bzrlib.lsprof import profile
+ ret, stats = profile(exception_to_return_code, the_callable,
+ *args, **kwargs)
+ stats.sort()
+ if filename is None:
+ stats.pprint()
+ else:
+ stats.save(filename)
+ trace.note(gettext('Profile data written to "%s".'), filename)
+ return ret
+
+
+def get_alias(cmd, config=None):
+ """Return an expanded alias, or None if no alias exists.
+
+ cmd
+ Command to be checked for an alias.
+ config
+ Used to specify an alternative config to use,
+ which is especially useful for testing.
+ If it is unspecified, the global config will be used.
+ """
+ if config is None:
+ import bzrlib.config
+ config = bzrlib.config.GlobalConfig()
+ alias = config.get_alias(cmd)
+ if (alias):
+ return cmdline.split(alias)
+ return None
+
+
+def run_bzr(argv, load_plugins=load_plugins, disable_plugins=disable_plugins):
+ """Execute a command.
+
+ :param argv: The command-line arguments, without the program name from
+ argv[0] These should already be decoded. All library/test code calling
+ run_bzr should be passing valid strings (don't need decoding).
+ :param load_plugins: What function to call when triggering plugin loading.
+ This function should take no arguments and cause all plugins to be
+ loaded.
+ :param disable_plugins: What function to call when disabling plugin
+ loading. This function should take no arguments and cause all plugin
+ loading to be prohibited (so that code paths in your application that
+ know about some plugins possibly being present will fail to import
+ those plugins even if they are installed.)
+ :return: Returns a command exit code or raises an exception.
+
+ Special master options: these must come before the command because
+ they control how the command is interpreted.
+
+ --no-plugins
+ Do not load plugin modules at all
+
+ --no-aliases
+ Do not allow aliases
+
+ --builtin
+ Only use builtin commands. (Plugins are still allowed to change
+ other behaviour.)
+
+ --profile
+ Run under the Python hotshot profiler.
+
+ --lsprof
+ Run under the Python lsprof profiler.
+
+ --coverage
+ Generate line coverage report in the specified directory.
+
+ --concurrency
+ Specify the number of processes that can be run concurrently (selftest).
+ """
+ trace.mutter("bazaar version: " + bzrlib.__version__)
+ argv = _specified_or_unicode_argv(argv)
+ trace.mutter("bzr arguments: %r", argv)
+
+ opt_lsprof = opt_profile = opt_no_plugins = opt_builtin = \
+ opt_no_l10n = opt_no_aliases = False
+ opt_lsprof_file = opt_coverage_dir = None
+
+ # --no-plugins is handled specially at a very early stage. We need
+ # to load plugins before doing other command parsing so that they
+ # can override commands, but this needs to happen first.
+
+ argv_copy = []
+ i = 0
+ override_config = []
+ while i < len(argv):
+ a = argv[i]
+ if a == '--profile':
+ opt_profile = True
+ elif a == '--lsprof':
+ opt_lsprof = True
+ elif a == '--lsprof-file':
+ opt_lsprof = True
+ opt_lsprof_file = argv[i + 1]
+ i += 1
+ elif a == '--no-plugins':
+ opt_no_plugins = True
+ elif a == '--no-aliases':
+ opt_no_aliases = True
+ elif a == '--no-l10n':
+ opt_no_l10n = True
+ elif a == '--builtin':
+ opt_builtin = True
+ elif a == '--concurrency':
+ os.environ['BZR_CONCURRENCY'] = argv[i + 1]
+ i += 1
+ elif a == '--coverage':
+ opt_coverage_dir = argv[i + 1]
+ i += 1
+ elif a == '--profile-imports':
+ pass # already handled in startup script Bug #588277
+ elif a.startswith('-D'):
+ debug.debug_flags.add(a[2:])
+ elif a.startswith('-O'):
+ override_config.append(a[2:])
+ else:
+ argv_copy.append(a)
+ i += 1
+
+ if bzrlib.global_state is None:
+ # FIXME: Workaround for users that imported bzrlib but didn't call
+ # bzrlib.initialize -- vila 2012-01-19
+ cmdline_overrides = config.CommandLineStore()
+ else:
+ cmdline_overrides = bzrlib.global_state.cmdline_overrides
+ cmdline_overrides._from_cmdline(override_config)
+
+ debug.set_debug_flags_from_config()
+
+ if not opt_no_plugins:
+ load_plugins()
+ else:
+ disable_plugins()
+
+ argv = argv_copy
+ if (not argv):
+ get_cmd_object('help').run_argv_aliases([])
+ return 0
+
+ if argv[0] == '--version':
+ get_cmd_object('version').run_argv_aliases([])
+ return 0
+
+ alias_argv = None
+
+ if not opt_no_aliases:
+ alias_argv = get_alias(argv[0])
+ if alias_argv:
+ argv[0] = alias_argv.pop(0)
+
+ cmd = argv.pop(0)
+ cmd_obj = get_cmd_object(cmd, plugins_override=not opt_builtin)
+ if opt_no_l10n:
+ cmd.l10n = False
+ run = cmd_obj.run_argv_aliases
+ run_argv = [argv, alias_argv]
+
+ try:
+ # We can be called recursively (tests for example), but we don't want
+ # the verbosity level to propagate.
+ saved_verbosity_level = option._verbosity_level
+ option._verbosity_level = 0
+ if opt_lsprof:
+ if opt_coverage_dir:
+ trace.warning(
+ '--coverage ignored, because --lsprof is in use.')
+ ret = apply_lsprofiled(opt_lsprof_file, run, *run_argv)
+ elif opt_profile:
+ if opt_coverage_dir:
+ trace.warning(
+ '--coverage ignored, because --profile is in use.')
+ ret = apply_profiled(run, *run_argv)
+ elif opt_coverage_dir:
+ ret = apply_coveraged(opt_coverage_dir, run, *run_argv)
+ else:
+ ret = run(*run_argv)
+ return ret or 0
+ finally:
+ # reset, in case we may do other commands later within the same
+ # process. Commands that want to execute sub-commands must propagate
+ # --verbose in their own way.
+ if 'memory' in debug.debug_flags:
+ trace.debug_memory('Process status after command:', short=False)
+ option._verbosity_level = saved_verbosity_level
+ # Reset the overrides
+ cmdline_overrides._reset()
+
+
+def display_command(func):
+ """Decorator that suppresses pipe/interrupt errors."""
+ def ignore_pipe(*args, **kwargs):
+ try:
+ result = func(*args, **kwargs)
+ sys.stdout.flush()
+ return result
+ except IOError, e:
+ if getattr(e, 'errno', None) is None:
+ raise
+ if e.errno != errno.EPIPE:
+ # Win32 raises IOError with errno=0 on a broken pipe
+ if sys.platform != 'win32' or (e.errno not in (0, errno.EINVAL)):
+ raise
+ pass
+ except KeyboardInterrupt:
+ pass
+ return ignore_pipe
+
+
+def install_bzr_command_hooks():
+ """Install the hooks to supply bzr's own commands."""
+ if _list_bzr_commands in Command.hooks["list_commands"]:
+ return
+ Command.hooks.install_named_hook("list_commands", _list_bzr_commands,
+ "bzr commands")
+ Command.hooks.install_named_hook("get_command", _get_bzr_command,
+ "bzr commands")
+ Command.hooks.install_named_hook("get_command", _get_plugin_command,
+ "bzr plugin commands")
+ Command.hooks.install_named_hook("get_command", _get_external_command,
+ "bzr external command lookup")
+ Command.hooks.install_named_hook("get_missing_command",
+ _try_plugin_provider,
+ "bzr plugin-provider-db check")
+
+
+
+def _specified_or_unicode_argv(argv):
+ # For internal or testing use, argv can be passed. Otherwise, get it from
+ # the process arguments in a unicode-safe way.
+ if argv is None:
+ return osutils.get_unicode_argv()
+ else:
+ new_argv = []
+ try:
+ # ensure all arguments are unicode strings
+ for a in argv:
+ if isinstance(a, unicode):
+ new_argv.append(a)
+ else:
+ new_argv.append(a.decode('ascii'))
+ except UnicodeDecodeError:
+ raise errors.BzrError("argv should be list of unicode strings.")
+ return new_argv
+
+
+def main(argv=None):
+ """Main entry point of command-line interface.
+
+ Typically `bzrlib.initialize` should be called first.
+
+ :param argv: list of unicode command-line arguments similar to sys.argv.
+ argv[0] is script name usually, it will be ignored.
+ Don't pass here sys.argv because this list contains plain strings
+ and not unicode; pass None instead.
+
+ :return: exit code of bzr command.
+ """
+ if argv is not None:
+ argv = argv[1:]
+ _register_builtin_commands()
+ ret = run_bzr_catch_errors(argv)
+ trace.mutter("return code %d", ret)
+ return ret
+
+
+def run_bzr_catch_errors(argv):
+ """Run a bzr command with parameters as described by argv.
+
+ This function assumed that that UI layer is setup, that symbol deprecations
+ are already applied, and that unicode decoding has already been performed on argv.
+ """
+ # done here so that they're covered for every test run
+ install_bzr_command_hooks()
+ return exception_to_return_code(run_bzr, argv)
+
+
+def run_bzr_catch_user_errors(argv):
+ """Run bzr and report user errors, but let internal errors propagate.
+
+ This is used for the test suite, and might be useful for other programs
+ that want to wrap the commandline interface.
+ """
+ # done here so that they're covered for every test run
+ install_bzr_command_hooks()
+ try:
+ return run_bzr(argv)
+ except Exception, e:
+ if (isinstance(e, (OSError, IOError))
+ or not getattr(e, 'internal_error', True)):
+ trace.report_exception(sys.exc_info(), sys.stderr)
+ return 3
+ else:
+ raise
+
+
+class HelpCommandIndex(object):
+ """A index for bzr help that returns commands."""
+
+ def __init__(self):
+ self.prefix = 'commands/'
+
+ def get_topics(self, topic):
+ """Search for topic amongst commands.
+
+ :param topic: A topic to search for.
+ :return: A list which is either empty or contains a single
+ Command entry.
+ """
+ if topic and topic.startswith(self.prefix):
+ topic = topic[len(self.prefix):]
+ try:
+ cmd = _get_cmd_object(topic, check_missing=False)
+ except KeyError:
+ return []
+ else:
+ return [cmd]
+
+
+class Provider(object):
+ """Generic class to be overriden by plugins"""
+
+ def plugin_for_command(self, cmd_name):
+ """Takes a command and returns the information for that plugin
+
+ :return: A dictionary with all the available information
+ for the requested plugin
+ """
+ raise NotImplementedError
+
+
+class ProvidersRegistry(registry.Registry):
+ """This registry exists to allow other providers to exist"""
+
+ def __iter__(self):
+ for key, provider in self.iteritems():
+ yield provider
+
+command_providers_registry = ProvidersRegistry()
diff --git a/bzrlib/commit.py b/bzrlib/commit.py
new file mode 100644
index 0000000..5bb5566
--- /dev/null
+++ b/bzrlib/commit.py
@@ -0,0 +1,1019 @@
+# Copyright (C) 2005-2011 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+from __future__ import absolute_import
+
+# The newly committed revision is going to have a shape corresponding
+# to that of the working tree. Files that are not in the
+# working tree and that were in the predecessor are reported as
+# removed --- this can include files that were either removed from the
+# inventory or deleted in the working tree. If they were only
+# deleted from disk, they are removed from the working inventory.
+
+# We then consider the remaining entries, which will be in the new
+# version. Directory entries are simply copied across. File entries
+# must be checked to see if a new version of the file should be
+# recorded. For each parent revision tree, we check to see what
+# version of the file was present. If the file was present in at
+# least one tree, and if it was the same version in all the trees,
+# then we can just refer to that version. Otherwise, a new version
+# representing the merger of the file versions must be added.
+
+# TODO: Update hashcache before and after - or does the WorkingTree
+# look after that?
+
+# TODO: Rather than mashing together the ancestry and storing it back,
+# perhaps the weave should have single method which does it all in one
+# go, avoiding a lot of redundant work.
+
+# TODO: Perhaps give a warning if one of the revisions marked as
+# merged is already in the ancestry, and then don't record it as a
+# distinct parent.
+
+# TODO: If the file is newly merged but unchanged from the version it
+# merges from, then it should still be reported as newly added
+# relative to the basis revision.
+
+# TODO: Change the parameter 'rev_id' to 'revision_id' to be consistent with
+# the rest of the code; add a deprecation of the old name.
+
+from bzrlib import (
+ debug,
+ errors,
+ trace,
+ tree,
+ ui,
+ )
+from bzrlib.branch import Branch
+from bzrlib.cleanup import OperationWithCleanups
+import bzrlib.config
+from bzrlib.errors import (BzrError, PointlessCommit,
+ ConflictsInTree,
+ StrictCommitFailed
+ )
+from bzrlib.osutils import (get_user_encoding,
+ is_inside_any,
+ minimum_path_selection,
+ splitpath,
+ )
+from bzrlib.trace import mutter, note, is_quiet
+from bzrlib.inventory import Inventory, InventoryEntry, make_entry
+from bzrlib import symbol_versioning
+from bzrlib.urlutils import unescape_for_display
+from bzrlib.i18n import gettext
+
+class NullCommitReporter(object):
+ """I report on progress of a commit."""
+
+ def started(self, revno, revid, location=None):
+ if location is None:
+ symbol_versioning.warn("As of bzr 1.0 you must pass a location "
+ "to started.", DeprecationWarning,
+ stacklevel=2)
+ pass
+
+ def snapshot_change(self, change, path):
+ pass
+
+ def completed(self, revno, rev_id):
+ pass
+
+ def deleted(self, path):
+ pass
+
+ def missing(self, path):
+ pass
+
+ def renamed(self, change, old_path, new_path):
+ pass
+
+ def is_verbose(self):
+ return False
+
+
+class ReportCommitToLog(NullCommitReporter):
+
+ def _note(self, format, *args):
+ """Output a message.
+
+ Subclasses may choose to override this method.
+ """
+ note(format, *args)
+
+ def snapshot_change(self, change, path):
+ if path == '' and change in (gettext('added'), gettext('modified')):
+ return
+ self._note("%s %s", change, path)
+
+ def started(self, revno, rev_id, location=None):
+ if location is not None:
+ location = ' to: ' + unescape_for_display(location, 'utf-8')
+ else:
+ # When started was added, location was only made optional by
+ # accident. Matt Nordhoff 20071129
+ symbol_versioning.warn("As of bzr 1.0 you must pass a location "
+ "to started.", DeprecationWarning,
+ stacklevel=2)
+ location = ''
+ self._note(gettext('Committing%s'), location)
+
+ def completed(self, revno, rev_id):
+ self._note(gettext('Committed revision %d.'), revno)
+ # self._note goes to the console too; so while we want to log the
+ # rev_id, we can't trivially only log it. (See bug 526425). Long
+ # term we should rearrange the reporting structure, but for now
+ # we just mutter seperately. We mutter the revid and revno together
+ # so that concurrent bzr invocations won't lead to confusion.
+ mutter('Committed revid %s as revno %d.', rev_id, revno)
+
+ def deleted(self, path):
+ self._note(gettext('deleted %s'), path)
+
+ def missing(self, path):
+ self._note(gettext('missing %s'), path)
+
+ def renamed(self, change, old_path, new_path):
+ self._note('%s %s => %s', change, old_path, new_path)
+
+ def is_verbose(self):
+ return True
+
+
+class Commit(object):
+ """Task of committing a new revision.
+
+ This is a MethodObject: it accumulates state as the commit is
+ prepared, and then it is discarded. It doesn't represent
+ historical revisions, just the act of recording a new one.
+
+ missing_ids
+ Modified to hold a list of files that have been deleted from
+ the working directory; these should be removed from the
+ working inventory.
+ """
+ def __init__(self,
+ reporter=None,
+ config_stack=None):
+ """Create a Commit object.
+
+ :param reporter: the default reporter to use or None to decide later
+ """
+ self.reporter = reporter
+ self.config_stack = config_stack
+
+ @staticmethod
+ def update_revprops(revprops, branch, authors=None, author=None,
+ local=False, possible_master_transports=None):
+ if revprops is None:
+ revprops = {}
+ if possible_master_transports is None:
+ possible_master_transports = []
+ if not 'branch-nick' in revprops:
+ revprops['branch-nick'] = branch._get_nick(
+ local,
+ possible_master_transports)
+ if authors is not None:
+ if author is not None:
+ raise AssertionError('Specifying both author and authors '
+ 'is not allowed. Specify just authors instead')
+ if 'author' in revprops or 'authors' in revprops:
+ # XXX: maybe we should just accept one of them?
+ raise AssertionError('author property given twice')
+ if authors:
+ for individual in authors:
+ if '\n' in individual:
+ raise AssertionError('\\n is not a valid character '
+ 'in an author identity')
+ revprops['authors'] = '\n'.join(authors)
+ if author is not None:
+ symbol_versioning.warn('The parameter author was deprecated'
+ ' in version 1.13. Use authors instead',
+ DeprecationWarning)
+ if 'author' in revprops or 'authors' in revprops:
+ # XXX: maybe we should just accept one of them?
+ raise AssertionError('author property given twice')
+ if '\n' in author:
+ raise AssertionError('\\n is not a valid character '
+ 'in an author identity')
+ revprops['authors'] = author
+ return revprops
+
+ def commit(self,
+ message=None,
+ timestamp=None,
+ timezone=None,
+ committer=None,
+ specific_files=None,
+ rev_id=None,
+ allow_pointless=True,
+ strict=False,
+ verbose=False,
+ revprops=None,
+ working_tree=None,
+ local=False,
+ reporter=None,
+ config=None,
+ message_callback=None,
+ recursive='down',
+ exclude=None,
+ possible_master_transports=None,
+ lossy=False):
+ """Commit working copy as a new revision.
+
+ :param message: the commit message (it or message_callback is required)
+ :param message_callback: A callback: message = message_callback(cmt_obj)
+
+ :param timestamp: if not None, seconds-since-epoch for a
+ postdated/predated commit.
+
+ :param specific_files: If not None, commit only those files. An empty
+ list means 'commit no files'.
+
+ :param rev_id: If set, use this as the new revision id.
+ Useful for test or import commands that need to tightly
+ control what revisions are assigned. If you duplicate
+ a revision id that exists elsewhere it is your own fault.
+ If null (default), a time/random revision id is generated.
+
+ :param allow_pointless: If true (default), commit even if nothing
+ has changed and no merges are recorded.
+
+ :param strict: If true, don't allow a commit if the working tree
+ contains unknown files.
+
+ :param revprops: Properties for new revision
+ :param local: Perform a local only commit.
+ :param reporter: the reporter to use or None for the default
+ :param verbose: if True and the reporter is not None, report everything
+ :param recursive: If set to 'down', commit in any subtrees that have
+ pending changes of any sort during this commit.
+ :param exclude: None or a list of relative paths to exclude from the
+ commit. Pending changes to excluded files will be ignored by the
+ commit.
+ :param lossy: When committing to a foreign VCS, ignore any
+ data that can not be natively represented.
+ """
+ operation = OperationWithCleanups(self._commit)
+ self.revprops = revprops or {}
+ # XXX: Can be set on __init__ or passed in - this is a bit ugly.
+ self.config_stack = config or self.config_stack
+ return operation.run(
+ message=message,
+ timestamp=timestamp,
+ timezone=timezone,
+ committer=committer,
+ specific_files=specific_files,
+ rev_id=rev_id,
+ allow_pointless=allow_pointless,
+ strict=strict,
+ verbose=verbose,
+ working_tree=working_tree,
+ local=local,
+ reporter=reporter,
+ message_callback=message_callback,
+ recursive=recursive,
+ exclude=exclude,
+ possible_master_transports=possible_master_transports,
+ lossy=lossy)
+
+ def _commit(self, operation, message, timestamp, timezone, committer,
+ specific_files, rev_id, allow_pointless, strict, verbose,
+ working_tree, local, reporter, message_callback, recursive,
+ exclude, possible_master_transports, lossy):
+ mutter('preparing to commit')
+
+ if working_tree is None:
+ raise BzrError("working_tree must be passed into commit().")
+ else:
+ self.work_tree = working_tree
+ self.branch = self.work_tree.branch
+ if getattr(self.work_tree, 'requires_rich_root', lambda: False)():
+ if not self.branch.repository.supports_rich_root():
+ raise errors.RootNotRich()
+ if message_callback is None:
+ if message is not None:
+ if isinstance(message, str):
+ message = message.decode(get_user_encoding())
+ message_callback = lambda x: message
+ else:
+ raise BzrError("The message or message_callback keyword"
+ " parameter is required for commit().")
+
+ self.bound_branch = None
+ self.any_entries_deleted = False
+ if exclude is not None:
+ self.exclude = sorted(
+ minimum_path_selection(exclude))
+ else:
+ self.exclude = []
+ self.local = local
+ self.master_branch = None
+ self.recursive = recursive
+ self.rev_id = None
+ # self.specific_files is None to indicate no filter, or any iterable to
+ # indicate a filter - [] means no files at all, as per iter_changes.
+ if specific_files is not None:
+ self.specific_files = sorted(
+ minimum_path_selection(specific_files))
+ else:
+ self.specific_files = None
+
+ self.allow_pointless = allow_pointless
+ self.message_callback = message_callback
+ self.timestamp = timestamp
+ self.timezone = timezone
+ self.committer = committer
+ self.strict = strict
+ self.verbose = verbose
+
+ self.work_tree.lock_write()
+ operation.add_cleanup(self.work_tree.unlock)
+ self.parents = self.work_tree.get_parent_ids()
+ # We can use record_iter_changes IFF iter_changes is compatible with
+ # the command line parameters, and the repository has fast delta
+ # generation. See bug 347649.
+ self.use_record_iter_changes = (
+ not self.exclude and
+ not self.branch.repository._format.supports_tree_reference and
+ (self.branch.repository._format.fast_deltas or
+ len(self.parents) < 2))
+ self.pb = ui.ui_factory.nested_progress_bar()
+ operation.add_cleanup(self.pb.finished)
+ self.basis_revid = self.work_tree.last_revision()
+ self.basis_tree = self.work_tree.basis_tree()
+ self.basis_tree.lock_read()
+ operation.add_cleanup(self.basis_tree.unlock)
+ # Cannot commit with conflicts present.
+ if len(self.work_tree.conflicts()) > 0:
+ raise ConflictsInTree
+
+ # Setup the bound branch variables as needed.
+ self._check_bound_branch(operation, possible_master_transports)
+
+ # Check that the working tree is up to date
+ old_revno, old_revid, new_revno = self._check_out_of_date_tree()
+
+ # Complete configuration setup
+ if reporter is not None:
+ self.reporter = reporter
+ elif self.reporter is None:
+ self.reporter = self._select_reporter()
+ if self.config_stack is None:
+ self.config_stack = self.work_tree.get_config_stack()
+
+ self._set_specific_file_ids()
+
+ # Setup the progress bar. As the number of files that need to be
+ # committed in unknown, progress is reported as stages.
+ # We keep track of entries separately though and include that
+ # information in the progress bar during the relevant stages.
+ self.pb_stage_name = ""
+ self.pb_stage_count = 0
+ self.pb_stage_total = 5
+ if self.bound_branch:
+ # 2 extra stages: "Uploading data to master branch" and "Merging
+ # tags to master branch"
+ self.pb_stage_total += 2
+ self.pb.show_pct = False
+ self.pb.show_spinner = False
+ self.pb.show_eta = False
+ self.pb.show_count = True
+ self.pb.show_bar = True
+
+ self._gather_parents()
+ # After a merge, a selected file commit is not supported.
+ # See 'bzr help merge' for an explanation as to why.
+ if len(self.parents) > 1 and self.specific_files is not None:
+ raise errors.CannotCommitSelectedFileMerge(self.specific_files)
+ # Excludes are a form of selected file commit.
+ if len(self.parents) > 1 and self.exclude:
+ raise errors.CannotCommitSelectedFileMerge(self.exclude)
+
+ # Collect the changes
+ self._set_progress_stage("Collecting changes", counter=True)
+ self._lossy = lossy
+ self.builder = self.branch.get_commit_builder(self.parents,
+ self.config_stack, timestamp, timezone, committer, self.revprops,
+ rev_id, lossy=lossy)
+ if not self.builder.supports_record_entry_contents and self.exclude:
+ self.builder.abort()
+ raise errors.ExcludesUnsupported(self.branch.repository)
+
+ if self.builder.updates_branch and self.bound_branch:
+ self.builder.abort()
+ raise AssertionError(
+ "bound branches not supported for commit builders "
+ "that update the branch")
+
+ try:
+ self.builder.will_record_deletes()
+ # find the location being committed to
+ if self.bound_branch:
+ master_location = self.master_branch.base
+ else:
+ master_location = self.branch.base
+
+ # report the start of the commit
+ self.reporter.started(new_revno, self.rev_id, master_location)
+
+ self._update_builder_with_changes()
+ self._check_pointless()
+
+ # TODO: Now the new inventory is known, check for conflicts.
+ # ADHB 2006-08-08: If this is done, populate_new_inv should not add
+ # weave lines, because nothing should be recorded until it is known
+ # that commit will succeed.
+ self._set_progress_stage("Saving data locally")
+ self.builder.finish_inventory()
+
+ # Prompt the user for a commit message if none provided
+ message = message_callback(self)
+ self.message = message
+
+ # Add revision data to the local branch
+ self.rev_id = self.builder.commit(self.message)
+
+ except Exception, e:
+ mutter("aborting commit write group because of exception:")
+ trace.log_exception_quietly()
+ self.builder.abort()
+ raise
+
+ self._update_branches(old_revno, old_revid, new_revno)
+
+ # Make the working tree be up to date with the branch. This
+ # includes automatic changes scheduled to be made to the tree, such
+ # as updating its basis and unversioning paths that were missing.
+ self.work_tree.unversion(self.deleted_ids)
+ self._set_progress_stage("Updating the working tree")
+ self.work_tree.update_basis_by_delta(self.rev_id,
+ self.builder.get_basis_delta())
+ self.reporter.completed(new_revno, self.rev_id)
+ self._process_post_hooks(old_revno, new_revno)
+ return self.rev_id
+
+ def _update_branches(self, old_revno, old_revid, new_revno):
+ """Update the master and local branch to the new revision.
+
+ This will try to make sure that the master branch is updated
+ before the local branch.
+
+ :param old_revno: Revision number of master branch before the
+ commit
+ :param old_revid: Tip of master branch before the commit
+ :param new_revno: Revision number of the new commit
+ """
+ if not self.builder.updates_branch:
+ self._process_pre_hooks(old_revno, new_revno)
+
+ # Upload revision data to the master.
+ # this will propagate merged revisions too if needed.
+ if self.bound_branch:
+ self._set_progress_stage("Uploading data to master branch")
+ # 'commit' to the master first so a timeout here causes the
+ # local branch to be out of date
+ (new_revno, self.rev_id) = self.master_branch.import_last_revision_info_and_tags(
+ self.branch, new_revno, self.rev_id, lossy=self._lossy)
+ if self._lossy:
+ self.branch.fetch(self.master_branch, self.rev_id)
+
+ # and now do the commit locally.
+ self.branch.set_last_revision_info(new_revno, self.rev_id)
+ else:
+ try:
+ self._process_pre_hooks(old_revno, new_revno)
+ except:
+ # The commit builder will already have updated the branch,
+ # revert it.
+ self.branch.set_last_revision_info(old_revno, old_revid)
+ raise
+
+ # Merge local tags to remote
+ if self.bound_branch:
+ self._set_progress_stage("Merging tags to master branch")
+ tag_updates, tag_conflicts = self.branch.tags.merge_to(
+ self.master_branch.tags)
+ if tag_conflicts:
+ warning_lines = [' ' + name for name, _, _ in tag_conflicts]
+ note( gettext("Conflicting tags in bound branch:\n{0}".format(
+ "\n".join(warning_lines))) )
+
+ def _select_reporter(self):
+ """Select the CommitReporter to use."""
+ if is_quiet():
+ return NullCommitReporter()
+ return ReportCommitToLog()
+
+ def _check_pointless(self):
+ if self.allow_pointless:
+ return
+ # A merge with no effect on files
+ if len(self.parents) > 1:
+ return
+ if self.builder.any_changes():
+ return
+ raise PointlessCommit()
+
+ def _check_bound_branch(self, operation, possible_master_transports=None):
+ """Check to see if the local branch is bound.
+
+ If it is bound, then most of the commit will actually be
+ done using the remote branch as the target branch.
+ Only at the end will the local branch be updated.
+ """
+ if self.local and not self.branch.get_bound_location():
+ raise errors.LocalRequiresBoundBranch()
+
+ if not self.local:
+ self.master_branch = self.branch.get_master_branch(
+ possible_master_transports)
+
+ if not self.master_branch:
+ # make this branch the reference branch for out of date checks.
+ self.master_branch = self.branch
+ return
+
+ # If the master branch is bound, we must fail
+ master_bound_location = self.master_branch.get_bound_location()
+ if master_bound_location:
+ raise errors.CommitToDoubleBoundBranch(self.branch,
+ self.master_branch, master_bound_location)
+
+ # TODO: jam 20051230 We could automatically push local
+ # commits to the remote branch if they would fit.
+ # But for now, just require remote to be identical
+ # to local.
+
+ # Make sure the local branch is identical to the master
+ master_info = self.master_branch.last_revision_info()
+ local_info = self.branch.last_revision_info()
+ if local_info != master_info:
+ raise errors.BoundBranchOutOfDate(self.branch,
+ self.master_branch)
+
+ # Now things are ready to change the master branch
+ # so grab the lock
+ self.bound_branch = self.branch
+ self.master_branch.lock_write()
+ operation.add_cleanup(self.master_branch.unlock)
+
+ def _check_out_of_date_tree(self):
+ """Check that the working tree is up to date.
+
+ :return: old_revision_number, old_revision_id, new_revision_number
+ tuple
+ """
+ try:
+ first_tree_parent = self.work_tree.get_parent_ids()[0]
+ except IndexError:
+ # if there are no parents, treat our parent as 'None'
+ # this is so that we still consider the master branch
+ # - in a checkout scenario the tree may have no
+ # parents but the branch may do.
+ first_tree_parent = bzrlib.revision.NULL_REVISION
+ old_revno, master_last = self.master_branch.last_revision_info()
+ if master_last != first_tree_parent:
+ if master_last != bzrlib.revision.NULL_REVISION:
+ raise errors.OutOfDateTree(self.work_tree)
+ if self.branch.repository.has_revision(first_tree_parent):
+ new_revno = old_revno + 1
+ else:
+ # ghost parents never appear in revision history.
+ new_revno = 1
+ return old_revno, master_last, new_revno
+
+ def _process_pre_hooks(self, old_revno, new_revno):
+ """Process any registered pre commit hooks."""
+ self._set_progress_stage("Running pre_commit hooks")
+ self._process_hooks("pre_commit", old_revno, new_revno)
+
+ def _process_post_hooks(self, old_revno, new_revno):
+ """Process any registered post commit hooks."""
+ # Process the post commit hooks, if any
+ self._set_progress_stage("Running post_commit hooks")
+ # old style commit hooks - should be deprecated ? (obsoleted in
+ # 0.15^H^H^H^H 2.5.0)
+ post_commit = self.config_stack.get('post_commit')
+ if post_commit is not None:
+ hooks = post_commit.split(' ')
+ # this would be nicer with twisted.python.reflect.namedAny
+ for hook in hooks:
+ result = eval(hook + '(branch, rev_id)',
+ {'branch':self.branch,
+ 'bzrlib':bzrlib,
+ 'rev_id':self.rev_id})
+ # process new style post commit hooks
+ self._process_hooks("post_commit", old_revno, new_revno)
+
+ def _process_hooks(self, hook_name, old_revno, new_revno):
+ if not Branch.hooks[hook_name]:
+ return
+
+ # new style commit hooks:
+ if not self.bound_branch:
+ hook_master = self.branch
+ hook_local = None
+ else:
+ hook_master = self.master_branch
+ hook_local = self.branch
+ # With bound branches, when the master is behind the local branch,
+ # the 'old_revno' and old_revid values here are incorrect.
+ # XXX: FIXME ^. RBC 20060206
+ if self.parents:
+ old_revid = self.parents[0]
+ else:
+ old_revid = bzrlib.revision.NULL_REVISION
+
+ if hook_name == "pre_commit":
+ future_tree = self.builder.revision_tree()
+ tree_delta = future_tree.changes_from(self.basis_tree,
+ include_root=True)
+
+ for hook in Branch.hooks[hook_name]:
+ # show the running hook in the progress bar. As hooks may
+ # end up doing nothing (e.g. because they are not configured by
+ # the user) this is still showing progress, not showing overall
+ # actions - its up to each plugin to show a UI if it want's to
+ # (such as 'Emailing diff to foo@example.com').
+ self.pb_stage_name = "Running %s hooks [%s]" % \
+ (hook_name, Branch.hooks.get_hook_name(hook))
+ self._emit_progress()
+ if 'hooks' in debug.debug_flags:
+ mutter("Invoking commit hook: %r", hook)
+ if hook_name == "post_commit":
+ hook(hook_local, hook_master, old_revno, old_revid, new_revno,
+ self.rev_id)
+ elif hook_name == "pre_commit":
+ hook(hook_local, hook_master,
+ old_revno, old_revid, new_revno, self.rev_id,
+ tree_delta, future_tree)
+
+ def _gather_parents(self):
+ """Record the parents of a merge for merge detection."""
+ # TODO: Make sure that this list doesn't contain duplicate
+ # entries and the order is preserved when doing this.
+ if self.use_record_iter_changes:
+ return
+ self.basis_inv = self.basis_tree.root_inventory
+ self.parent_invs = [self.basis_inv]
+ for revision in self.parents[1:]:
+ if self.branch.repository.has_revision(revision):
+ mutter('commit parent revision {%s}', revision)
+ inventory = self.branch.repository.get_inventory(revision)
+ self.parent_invs.append(inventory)
+ else:
+ mutter('commit parent ghost revision {%s}', revision)
+
+ def _update_builder_with_changes(self):
+ """Update the commit builder with the data about what has changed.
+ """
+ exclude = self.exclude
+ specific_files = self.specific_files
+ mutter("Selecting files for commit with filter %s", specific_files)
+
+ self._check_strict()
+ if self.use_record_iter_changes:
+ iter_changes = self.work_tree.iter_changes(self.basis_tree,
+ specific_files=specific_files)
+ iter_changes = self._filter_iter_changes(iter_changes)
+ for file_id, path, fs_hash in self.builder.record_iter_changes(
+ self.work_tree, self.basis_revid, iter_changes):
+ self.work_tree._observed_sha1(file_id, path, fs_hash)
+ else:
+ # Build the new inventory
+ self._populate_from_inventory()
+ self._record_unselected()
+ self._report_and_accumulate_deletes()
+
+ def _filter_iter_changes(self, iter_changes):
+ """Process iter_changes.
+
+ This method reports on the changes in iter_changes to the user, and
+ converts 'missing' entries in the iter_changes iterator to 'deleted'
+ entries. 'missing' entries have their
+
+ :param iter_changes: An iter_changes to process.
+ :return: A generator of changes.
+ """
+ reporter = self.reporter
+ report_changes = reporter.is_verbose()
+ deleted_ids = []
+ for change in iter_changes:
+ if report_changes:
+ old_path = change[1][0]
+ new_path = change[1][1]
+ versioned = change[3][1]
+ kind = change[6][1]
+ versioned = change[3][1]
+ if kind is None and versioned:
+ # 'missing' path
+ if report_changes:
+ reporter.missing(new_path)
+ deleted_ids.append(change[0])
+ # Reset the new path (None) and new versioned flag (False)
+ change = (change[0], (change[1][0], None), change[2],
+ (change[3][0], False)) + change[4:]
+ new_path = change[1][1]
+ versioned = False
+ elif kind == 'tree-reference':
+ if self.recursive == 'down':
+ self._commit_nested_tree(change[0], change[1][1])
+ if change[3][0] or change[3][1]:
+ yield change
+ if report_changes:
+ if new_path is None:
+ reporter.deleted(old_path)
+ elif old_path is None:
+ reporter.snapshot_change(gettext('added'), new_path)
+ elif old_path != new_path:
+ reporter.renamed(gettext('renamed'), old_path, new_path)
+ else:
+ if (new_path or
+ self.work_tree.branch.repository._format.rich_root_data):
+ # Don't report on changes to '' in non rich root
+ # repositories.
+ reporter.snapshot_change(gettext('modified'), new_path)
+ self._next_progress_entry()
+ # Unversion IDs that were found to be deleted
+ self.deleted_ids = deleted_ids
+
+ def _record_unselected(self):
+ # If specific files are selected, then all un-selected files must be
+ # recorded in their previous state. For more details, see
+ # https://lists.ubuntu.com/archives/bazaar/2007q3/028476.html.
+ if self.specific_files or self.exclude:
+ specific_files = self.specific_files or []
+ for path, old_ie in self.basis_inv.iter_entries():
+ if self.builder.new_inventory.has_id(old_ie.file_id):
+ # already added - skip.
+ continue
+ if (is_inside_any(specific_files, path)
+ and not is_inside_any(self.exclude, path)):
+ # was inside the selected path, and not excluded - if not
+ # present it has been deleted so skip.
+ continue
+ # From here down it was either not selected, or was excluded:
+ # We preserve the entry unaltered.
+ ie = old_ie.copy()
+ # Note: specific file commits after a merge are currently
+ # prohibited. This test is for sanity/safety in case it's
+ # required after that changes.
+ if len(self.parents) > 1:
+ ie.revision = None
+ self.builder.record_entry_contents(ie, self.parent_invs, path,
+ self.basis_tree, None)
+
+ def _report_and_accumulate_deletes(self):
+ if (isinstance(self.basis_inv, Inventory)
+ and isinstance(self.builder.new_inventory, Inventory)):
+ # the older Inventory classes provide a _byid dict, and building a
+ # set from the keys of this dict is substantially faster than even
+ # getting a set of ids from the inventory
+ #
+ # <lifeless> set(dict) is roughly the same speed as
+ # set(iter(dict)) and both are significantly slower than
+ # set(dict.keys())
+ deleted_ids = set(self.basis_inv._byid.keys()) - \
+ set(self.builder.new_inventory._byid.keys())
+ else:
+ deleted_ids = set(self.basis_inv) - set(self.builder.new_inventory)
+ if deleted_ids:
+ self.any_entries_deleted = True
+ deleted = [(self.basis_tree.id2path(file_id), file_id)
+ for file_id in deleted_ids]
+ deleted.sort()
+ # XXX: this is not quite directory-order sorting
+ for path, file_id in deleted:
+ self.builder.record_delete(path, file_id)
+ self.reporter.deleted(path)
+
+ def _check_strict(self):
+ # XXX: when we use iter_changes this would likely be faster if
+ # iter_changes would check for us (even in the presence of
+ # selected_files).
+ if self.strict:
+ # raise an exception as soon as we find a single unknown.
+ for unknown in self.work_tree.unknowns():
+ raise StrictCommitFailed()
+
+ def _populate_from_inventory(self):
+ """Populate the CommitBuilder by walking the working tree inventory."""
+ # Build the revision inventory.
+ #
+ # This starts by creating a new empty inventory. Depending on
+ # which files are selected for commit, and what is present in the
+ # current tree, the new inventory is populated. inventory entries
+ # which are candidates for modification have their revision set to
+ # None; inventory entries that are carried over untouched have their
+ # revision set to their prior value.
+ #
+ # ESEPARATIONOFCONCERNS: this function is diffing and using the diff
+ # results to create a new inventory at the same time, which results
+ # in bugs like #46635. Any reason not to use/enhance Tree.changes_from?
+ # ADHB 11-07-2006
+
+ specific_files = self.specific_files
+ exclude = self.exclude
+ report_changes = self.reporter.is_verbose()
+ deleted_ids = []
+ # A tree of paths that have been deleted. E.g. if foo/bar has been
+ # deleted, then we have {'foo':{'bar':{}}}
+ deleted_paths = {}
+ # XXX: Note that entries may have the wrong kind because the entry does
+ # not reflect the status on disk.
+ # NB: entries will include entries within the excluded ids/paths
+ # because iter_entries_by_dir has no 'exclude' facility today.
+ entries = self.work_tree.iter_entries_by_dir(
+ specific_file_ids=self.specific_file_ids, yield_parents=True)
+ for path, existing_ie in entries:
+ file_id = existing_ie.file_id
+ name = existing_ie.name
+ parent_id = existing_ie.parent_id
+ kind = existing_ie.kind
+ # Skip files that have been deleted from the working tree.
+ # The deleted path ids are also recorded so they can be explicitly
+ # unversioned later.
+ if deleted_paths:
+ path_segments = splitpath(path)
+ deleted_dict = deleted_paths
+ for segment in path_segments:
+ deleted_dict = deleted_dict.get(segment, None)
+ if not deleted_dict:
+ # We either took a path not present in the dict
+ # (deleted_dict was None), or we've reached an empty
+ # child dir in the dict, so are now a sub-path.
+ break
+ else:
+ deleted_dict = None
+ if deleted_dict is not None:
+ # the path has a deleted parent, do not add it.
+ continue
+ if exclude and is_inside_any(exclude, path):
+ # Skip excluded paths. Excluded paths are processed by
+ # _update_builder_with_changes.
+ continue
+ content_summary = self.work_tree.path_content_summary(path)
+ kind = content_summary[0]
+ # Note that when a filter of specific files is given, we must only
+ # skip/record deleted files matching that filter.
+ if not specific_files or is_inside_any(specific_files, path):
+ if kind == 'missing':
+ if not deleted_paths:
+ # path won't have been split yet.
+ path_segments = splitpath(path)
+ deleted_dict = deleted_paths
+ for segment in path_segments:
+ deleted_dict = deleted_dict.setdefault(segment, {})
+ self.reporter.missing(path)
+ self._next_progress_entry()
+ deleted_ids.append(file_id)
+ continue
+ # TODO: have the builder do the nested commit just-in-time IF and
+ # only if needed.
+ if kind == 'tree-reference':
+ # enforce repository nested tree policy.
+ if (not self.work_tree.supports_tree_reference() or
+ # repository does not support it either.
+ not self.branch.repository._format.supports_tree_reference):
+ kind = 'directory'
+ content_summary = (kind, None, None, None)
+ elif self.recursive == 'down':
+ nested_revision_id = self._commit_nested_tree(
+ file_id, path)
+ content_summary = (kind, None, None, nested_revision_id)
+ else:
+ nested_revision_id = self.work_tree.get_reference_revision(file_id)
+ content_summary = (kind, None, None, nested_revision_id)
+
+ # Record an entry for this item
+ # Note: I don't particularly want to have the existing_ie
+ # parameter but the test suite currently (28-Jun-07) breaks
+ # without it thanks to a unicode normalisation issue. :-(
+ definitely_changed = kind != existing_ie.kind
+ self._record_entry(path, file_id, specific_files, kind, name,
+ parent_id, definitely_changed, existing_ie, report_changes,
+ content_summary)
+
+ # Unversion IDs that were found to be deleted
+ self.deleted_ids = deleted_ids
+
+ def _commit_nested_tree(self, file_id, path):
+ "Commit a nested tree."
+ sub_tree = self.work_tree.get_nested_tree(file_id, path)
+ # FIXME: be more comprehensive here:
+ # this works when both trees are in --trees repository,
+ # but when both are bound to a different repository,
+ # it fails; a better way of approaching this is to
+ # finally implement the explicit-caches approach design
+ # a while back - RBC 20070306.
+ if sub_tree.branch.repository.has_same_location(
+ self.work_tree.branch.repository):
+ sub_tree.branch.repository = \
+ self.work_tree.branch.repository
+ try:
+ return sub_tree.commit(message=None, revprops=self.revprops,
+ recursive=self.recursive,
+ message_callback=self.message_callback,
+ timestamp=self.timestamp, timezone=self.timezone,
+ committer=self.committer,
+ allow_pointless=self.allow_pointless,
+ strict=self.strict, verbose=self.verbose,
+ local=self.local, reporter=self.reporter)
+ except errors.PointlessCommit:
+ return self.work_tree.get_reference_revision(file_id)
+
+ def _record_entry(self, path, file_id, specific_files, kind, name,
+ parent_id, definitely_changed, existing_ie, report_changes,
+ content_summary):
+ "Record the new inventory entry for a path if any."
+ # mutter('check %s {%s}', path, file_id)
+ # mutter('%s selected for commit', path)
+ if definitely_changed or existing_ie is None:
+ ie = make_entry(kind, name, parent_id, file_id)
+ else:
+ ie = existing_ie.copy()
+ ie.revision = None
+ # For carried over entries we don't care about the fs hash - the repo
+ # isn't generating a sha, so we're not saving computation time.
+ _, _, fs_hash = self.builder.record_entry_contents(
+ ie, self.parent_invs, path, self.work_tree, content_summary)
+ if report_changes:
+ self._report_change(ie, path)
+ if fs_hash:
+ self.work_tree._observed_sha1(ie.file_id, path, fs_hash)
+ return ie
+
+ def _report_change(self, ie, path):
+ """Report a change to the user.
+
+ The change that has occurred is described relative to the basis
+ inventory.
+ """
+ if (self.basis_inv.has_id(ie.file_id)):
+ basis_ie = self.basis_inv[ie.file_id]
+ else:
+ basis_ie = None
+ change = ie.describe_change(basis_ie, ie)
+ if change in (InventoryEntry.RENAMED,
+ InventoryEntry.MODIFIED_AND_RENAMED):
+ old_path = self.basis_inv.id2path(ie.file_id)
+ self.reporter.renamed(change, old_path, path)
+ self._next_progress_entry()
+ else:
+ if change == gettext('unchanged'):
+ return
+ self.reporter.snapshot_change(change, path)
+ self._next_progress_entry()
+
+ def _set_progress_stage(self, name, counter=False):
+ """Set the progress stage and emit an update to the progress bar."""
+ self.pb_stage_name = name
+ self.pb_stage_count += 1
+ if counter:
+ self.pb_entries_count = 0
+ else:
+ self.pb_entries_count = None
+ self._emit_progress()
+
+ def _next_progress_entry(self):
+ """Emit an update to the progress bar and increment the entry count."""
+ self.pb_entries_count += 1
+ self._emit_progress()
+
+ def _emit_progress(self):
+ if self.pb_entries_count is not None:
+ text = gettext("{0} [{1}] - Stage").format(self.pb_stage_name,
+ self.pb_entries_count)
+ else:
+ text = gettext("%s - Stage") % (self.pb_stage_name, )
+ self.pb.update(text, self.pb_stage_count, self.pb_stage_total)
+
+ def _set_specific_file_ids(self):
+ """populate self.specific_file_ids if we will use it."""
+ if not self.use_record_iter_changes:
+ # If provided, ensure the specified files are versioned
+ if self.specific_files is not None:
+ # Note: This routine is being called because it raises
+ # PathNotVersionedError as a side effect of finding the IDs. We
+ # later use the ids we found as input to the working tree
+ # inventory iterator, so we only consider those ids rather than
+ # examining the whole tree again.
+ # XXX: Dont we have filter_unversioned to do this more
+ # cheaply?
+ self.specific_file_ids = tree.find_ids_across_trees(
+ self.specific_files, [self.basis_tree, self.work_tree])
+ else:
+ self.specific_file_ids = None
diff --git a/bzrlib/commit_signature_commands.py b/bzrlib/commit_signature_commands.py
new file mode 100644
index 0000000..7dde294
--- /dev/null
+++ b/bzrlib/commit_signature_commands.py
@@ -0,0 +1,191 @@
+# Copyright (C) 2006, 2007, 2009, 2010, 2011 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""Command which looks for unsigned commits by the current user, and signs them.
+"""
+
+from __future__ import absolute_import
+
+from bzrlib import (
+ controldir,
+ errors,
+ gpg,
+ revision as _mod_revision,
+ )
+from bzrlib.commands import Command
+from bzrlib.option import Option
+from bzrlib.i18n import gettext, ngettext
+
+
+class cmd_sign_my_commits(Command):
+ __doc__ = """Sign all commits by a given committer.
+
+ If location is not specified the local tree is used.
+ If committer is not specified the default committer is used.
+
+ This does not sign commits that already have signatures.
+ """
+ # Note that this signs everything on the branch's ancestry
+ # (both mainline and merged), but not other revisions that may be in the
+ # repository
+
+ takes_options = [
+ Option('dry-run',
+ help='Don\'t actually sign anything, just print'
+ ' the revisions that would be signed.'),
+ ]
+ takes_args = ['location?', 'committer?']
+
+ def run(self, location=None, committer=None, dry_run=False):
+ if location is None:
+ bzrdir = controldir.ControlDir.open_containing('.')[0]
+ else:
+ # Passed in locations should be exact
+ bzrdir = controldir.ControlDir.open(location)
+ branch = bzrdir.open_branch()
+ repo = branch.repository
+ branch_config = branch.get_config_stack()
+
+ if committer is None:
+ committer = branch_config.get('email')
+ gpg_strategy = gpg.GPGStrategy(branch_config)
+
+ count = 0
+ repo.lock_write()
+ try:
+ graph = repo.get_graph()
+ repo.start_write_group()
+ try:
+ for rev_id, parents in graph.iter_ancestry(
+ [branch.last_revision()]):
+ if _mod_revision.is_null(rev_id):
+ continue
+ if parents is None:
+ # Ignore ghosts
+ continue
+ if repo.has_signature_for_revision_id(rev_id):
+ continue
+ rev = repo.get_revision(rev_id)
+ if rev.committer != committer:
+ continue
+ # We have a revision without a signature who has a
+ # matching committer, start signing
+ self.outf.write("%s\n" % rev_id)
+ count += 1
+ if not dry_run:
+ repo.sign_revision(rev_id, gpg_strategy)
+ except:
+ repo.abort_write_group()
+ raise
+ else:
+ repo.commit_write_group()
+ finally:
+ repo.unlock()
+ self.outf.write(
+ ngettext('Signed %d revision.\n', 'Signed %d revisions.\n', count) %
+ count)
+
+
+class cmd_verify_signatures(Command):
+ __doc__ = """Verify all commit signatures.
+
+ Verifies that all commits in the branch are signed by known GnuPG keys.
+ """
+
+ takes_options = [
+ Option('acceptable-keys',
+ help='Comma separated list of GPG key patterns which are'
+ ' acceptable for verification.',
+ short_name='k',
+ type=str,),
+ 'revision',
+ 'verbose',
+ ]
+ takes_args = ['location?']
+
+ def run(self, acceptable_keys=None, revision=None, verbose=None,
+ location=u'.'):
+ bzrdir = controldir.ControlDir.open_containing(location)[0]
+ branch = bzrdir.open_branch()
+ repo = branch.repository
+ branch_config = branch.get_config_stack()
+ gpg_strategy = gpg.GPGStrategy(branch_config)
+
+ gpg_strategy.set_acceptable_keys(acceptable_keys)
+
+ def write(string):
+ self.outf.write(string + "\n")
+ def write_verbose(string):
+ self.outf.write(" " + string + "\n")
+
+ self.add_cleanup(repo.lock_read().unlock)
+ #get our list of revisions
+ revisions = []
+ if revision is not None:
+ if len(revision) == 1:
+ revno, rev_id = revision[0].in_history(branch)
+ revisions.append(rev_id)
+ elif len(revision) == 2:
+ from_revno, from_revid = revision[0].in_history(branch)
+ to_revno, to_revid = revision[1].in_history(branch)
+ if to_revid is None:
+ to_revno = branch.revno()
+ if from_revno is None or to_revno is None:
+ raise errors.BzrCommandError(gettext(
+ 'Cannot verify a range of non-revision-history revisions'))
+ for revno in range(from_revno, to_revno + 1):
+ revisions.append(branch.get_rev_id(revno))
+ else:
+ #all revisions by default including merges
+ graph = repo.get_graph()
+ revisions = []
+ for rev_id, parents in graph.iter_ancestry(
+ [branch.last_revision()]):
+ if _mod_revision.is_null(rev_id):
+ continue
+ if parents is None:
+ # Ignore ghosts
+ continue
+ revisions.append(rev_id)
+ count, result, all_verifiable =\
+ gpg.bulk_verify_signatures(repo, revisions, gpg_strategy)
+ if all_verifiable:
+ write(gettext("All commits signed with verifiable keys"))
+ if verbose:
+ write(gpg.verbose_valid_message(result))
+ return 0
+ else:
+ write(gpg.valid_commits_message(count))
+ if verbose:
+ for message in gpg.verbose_valid_message(result):
+ write_verbose(message)
+ write(gpg.expired_commit_message(count))
+ if verbose:
+ for message in gpg.verbose_expired_key_message(result, repo):
+ write_verbose(message)
+ write(gpg.unknown_key_message(count))
+ if verbose:
+ for message in gpg.verbose_missing_key_message(result):
+ write_verbose(message)
+ write(gpg.commit_not_valid_message(count))
+ if verbose:
+ for message in gpg.verbose_not_valid_message(result, repo):
+ write_verbose(message)
+ write(gpg.commit_not_signed_message(count))
+ if verbose:
+ for message in gpg.verbose_not_signed_message(result, repo):
+ write_verbose(message)
+ return 1
diff --git a/bzrlib/config.py b/bzrlib/config.py
new file mode 100644
index 0000000..779ae80
--- /dev/null
+++ b/bzrlib/config.py
@@ -0,0 +1,4195 @@
+# Copyright (C) 2005-2012 Canonical Ltd
+# Authors: Robert Collins <robert.collins@canonical.com>
+# and others
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""Configuration that affects the behaviour of Bazaar.
+
+Currently this configuration resides in ~/.bazaar/bazaar.conf
+and ~/.bazaar/locations.conf, which is written to by bzr.
+
+In bazaar.conf the following options may be set:
+[DEFAULT]
+editor=name-of-program
+email=Your Name <your@email.address>
+check_signatures=require|ignore|check-available(default)
+create_signatures=always|never|when-required(default)
+gpg_signing_command=name-of-program
+log_format=name-of-format
+validate_signatures_in_log=true|false(default)
+acceptable_keys=pattern1,pattern2
+gpg_signing_key=amy@example.com
+
+in locations.conf, you specify the url of a branch and options for it.
+Wildcards may be used - * and ? as normal in shell completion. Options
+set in both bazaar.conf and locations.conf are overridden by the locations.conf
+setting.
+[/home/robertc/source]
+recurse=False|True(default)
+email= as above
+check_signatures= as above
+create_signatures= as above.
+validate_signatures_in_log=as above
+acceptable_keys=as above
+
+explanation of options
+----------------------
+editor - this option sets the pop up editor to use during commits.
+email - this option sets the user id bzr will use when committing.
+check_signatures - this option will control whether bzr will require good gpg
+ signatures, ignore them, or check them if they are
+ present. Currently it is unused except that check_signatures
+ turns on create_signatures.
+create_signatures - this option controls whether bzr will always create
+ gpg signatures or not on commits. There is an unused
+ option which in future is expected to work if
+ branch settings require signatures.
+log_format - this option sets the default log format. Possible values are
+ long, short, line, or a plugin can register new formats.
+validate_signatures_in_log - show GPG signature validity in log output
+acceptable_keys - comma separated list of key patterns acceptable for
+ verify-signatures command
+
+In bazaar.conf you can also define aliases in the ALIASES sections, example
+
+[ALIASES]
+lastlog=log --line -r-10..-1
+ll=log --line -r-10..-1
+h=help
+up=pull
+"""
+
+from __future__ import absolute_import
+from cStringIO import StringIO
+import os
+import sys
+
+import bzrlib
+from bzrlib.decorators import needs_write_lock
+from bzrlib.lazy_import import lazy_import
+lazy_import(globals(), """
+import fnmatch
+import re
+
+from bzrlib import (
+ atomicfile,
+ controldir,
+ debug,
+ errors,
+ lazy_regex,
+ library_state,
+ lockdir,
+ mergetools,
+ osutils,
+ symbol_versioning,
+ trace,
+ transport,
+ ui,
+ urlutils,
+ win32utils,
+ )
+from bzrlib.i18n import gettext
+from bzrlib.util.configobj import configobj
+""")
+from bzrlib import (
+ commands,
+ hooks,
+ lazy_regex,
+ registry,
+ )
+from bzrlib.symbol_versioning import (
+ deprecated_in,
+ deprecated_method,
+ )
+
+
+CHECK_IF_POSSIBLE=0
+CHECK_ALWAYS=1
+CHECK_NEVER=2
+
+
+SIGN_WHEN_REQUIRED=0
+SIGN_ALWAYS=1
+SIGN_NEVER=2
+
+
+POLICY_NONE = 0
+POLICY_NORECURSE = 1
+POLICY_APPENDPATH = 2
+
+_policy_name = {
+ POLICY_NONE: None,
+ POLICY_NORECURSE: 'norecurse',
+ POLICY_APPENDPATH: 'appendpath',
+ }
+_policy_value = {
+ None: POLICY_NONE,
+ 'none': POLICY_NONE,
+ 'norecurse': POLICY_NORECURSE,
+ 'appendpath': POLICY_APPENDPATH,
+ }
+
+
+STORE_LOCATION = POLICY_NONE
+STORE_LOCATION_NORECURSE = POLICY_NORECURSE
+STORE_LOCATION_APPENDPATH = POLICY_APPENDPATH
+STORE_BRANCH = 3
+STORE_GLOBAL = 4
+
+
+def signature_policy_from_unicode(signature_string):
+ """Convert a string to a signing policy."""
+ if signature_string.lower() == 'check-available':
+ return CHECK_IF_POSSIBLE
+ if signature_string.lower() == 'ignore':
+ return CHECK_NEVER
+ if signature_string.lower() == 'require':
+ return CHECK_ALWAYS
+ raise ValueError("Invalid signatures policy '%s'"
+ % signature_string)
+
+
+def signing_policy_from_unicode(signature_string):
+ """Convert a string to a signing policy."""
+ if signature_string.lower() == 'when-required':
+ return SIGN_WHEN_REQUIRED
+ if signature_string.lower() == 'never':
+ return SIGN_NEVER
+ if signature_string.lower() == 'always':
+ return SIGN_ALWAYS
+ raise ValueError("Invalid signing policy '%s'"
+ % signature_string)
+
+
+class ConfigObj(configobj.ConfigObj):
+
+ def __init__(self, infile=None, **kwargs):
+ # We define our own interpolation mechanism calling it option expansion
+ super(ConfigObj, self).__init__(infile=infile,
+ interpolation=False,
+ **kwargs)
+
+ def get_bool(self, section, key):
+ return self[section].as_bool(key)
+
+ def get_value(self, section, name):
+ # Try [] for the old DEFAULT section.
+ if section == "DEFAULT":
+ try:
+ return self[name]
+ except KeyError:
+ pass
+ return self[section][name]
+
+
+class Config(object):
+ """A configuration policy - what username, editor, gpg needs etc."""
+
+ def __init__(self):
+ super(Config, self).__init__()
+
+ def config_id(self):
+ """Returns a unique ID for the config."""
+ raise NotImplementedError(self.config_id)
+
+ def get_change_editor(self, old_tree, new_tree):
+ from bzrlib import diff
+ cmd = self._get_change_editor()
+ if cmd is None:
+ return None
+ return diff.DiffFromTool.from_string(cmd, old_tree, new_tree,
+ sys.stdout)
+
+ def _get_signature_checking(self):
+ """Template method to override signature checking policy."""
+
+ def _get_signing_policy(self):
+ """Template method to override signature creation policy."""
+
+ option_ref_re = None
+
+ def expand_options(self, string, env=None):
+ """Expand option references in the string in the configuration context.
+
+ :param string: The string containing option to expand.
+
+ :param env: An option dict defining additional configuration options or
+ overriding existing ones.
+
+ :returns: The expanded string.
+ """
+ return self._expand_options_in_string(string, env)
+
+ def _expand_options_in_list(self, slist, env=None, _ref_stack=None):
+ """Expand options in a list of strings in the configuration context.
+
+ :param slist: A list of strings.
+
+ :param env: An option dict defining additional configuration options or
+ overriding existing ones.
+
+ :param _ref_stack: Private list containing the options being
+ expanded to detect loops.
+
+ :returns: The flatten list of expanded strings.
+ """
+ # expand options in each value separately flattening lists
+ result = []
+ for s in slist:
+ value = self._expand_options_in_string(s, env, _ref_stack)
+ if isinstance(value, list):
+ result.extend(value)
+ else:
+ result.append(value)
+ return result
+
+ def _expand_options_in_string(self, string, env=None, _ref_stack=None):
+ """Expand options in the string in the configuration context.
+
+ :param string: The string to be expanded.
+
+ :param env: An option dict defining additional configuration options or
+ overriding existing ones.
+
+ :param _ref_stack: Private list containing the options being
+ expanded to detect loops.
+
+ :returns: The expanded string.
+ """
+ if string is None:
+ # Not much to expand there
+ return None
+ if _ref_stack is None:
+ # What references are currently resolved (to detect loops)
+ _ref_stack = []
+ if self.option_ref_re is None:
+ # We want to match the most embedded reference first (i.e. for
+ # '{{foo}}' we will get '{foo}',
+ # for '{bar{baz}}' we will get '{baz}'
+ self.option_ref_re = re.compile('({[^{}]+})')
+ result = string
+ # We need to iterate until no more refs appear ({{foo}} will need two
+ # iterations for example).
+ while True:
+ raw_chunks = self.option_ref_re.split(result)
+ if len(raw_chunks) == 1:
+ # Shorcut the trivial case: no refs
+ return result
+ chunks = []
+ list_value = False
+ # Split will isolate refs so that every other chunk is a ref
+ chunk_is_ref = False
+ for chunk in raw_chunks:
+ if not chunk_is_ref:
+ if chunk:
+ # Keep only non-empty strings (or we get bogus empty
+ # slots when a list value is involved).
+ chunks.append(chunk)
+ chunk_is_ref = True
+ else:
+ name = chunk[1:-1]
+ if name in _ref_stack:
+ raise errors.OptionExpansionLoop(string, _ref_stack)
+ _ref_stack.append(name)
+ value = self._expand_option(name, env, _ref_stack)
+ if value is None:
+ raise errors.ExpandingUnknownOption(name, string)
+ if isinstance(value, list):
+ list_value = True
+ chunks.extend(value)
+ else:
+ chunks.append(value)
+ _ref_stack.pop()
+ chunk_is_ref = False
+ if list_value:
+ # Once a list appears as the result of an expansion, all
+ # callers will get a list result. This allows a consistent
+ # behavior even when some options in the expansion chain
+ # defined as strings (no comma in their value) but their
+ # expanded value is a list.
+ return self._expand_options_in_list(chunks, env, _ref_stack)
+ else:
+ result = ''.join(chunks)
+ return result
+
+ def _expand_option(self, name, env, _ref_stack):
+ if env is not None and name in env:
+ # Special case, values provided in env takes precedence over
+ # anything else
+ value = env[name]
+ else:
+ # FIXME: This is a limited implementation, what we really need is a
+ # way to query the bzr config for the value of an option,
+ # respecting the scope rules (That is, once we implement fallback
+ # configs, getting the option value should restart from the top
+ # config, not the current one) -- vila 20101222
+ value = self.get_user_option(name, expand=False)
+ if isinstance(value, list):
+ value = self._expand_options_in_list(value, env, _ref_stack)
+ else:
+ value = self._expand_options_in_string(value, env, _ref_stack)
+ return value
+
+ def _get_user_option(self, option_name):
+ """Template method to provide a user option."""
+ return None
+
+ def get_user_option(self, option_name, expand=True):
+ """Get a generic option - no special process, no default.
+
+ :param option_name: The queried option.
+
+ :param expand: Whether options references should be expanded.
+
+ :returns: The value of the option.
+ """
+ value = self._get_user_option(option_name)
+ if expand:
+ if isinstance(value, list):
+ value = self._expand_options_in_list(value)
+ elif isinstance(value, dict):
+ trace.warning('Cannot expand "%s":'
+ ' Dicts do not support option expansion'
+ % (option_name,))
+ else:
+ value = self._expand_options_in_string(value)
+ for hook in OldConfigHooks['get']:
+ hook(self, option_name, value)
+ return value
+
+ def get_user_option_as_bool(self, option_name, expand=None, default=None):
+ """Get a generic option as a boolean.
+
+ :param expand: Allow expanding references to other config values.
+ :param default: Default value if nothing is configured
+ :return None if the option doesn't exist or its value can't be
+ interpreted as a boolean. Returns True or False otherwise.
+ """
+ s = self.get_user_option(option_name, expand=expand)
+ if s is None:
+ # The option doesn't exist
+ return default
+ val = ui.bool_from_string(s)
+ if val is None:
+ # The value can't be interpreted as a boolean
+ trace.warning('Value "%s" is not a boolean for "%s"',
+ s, option_name)
+ return val
+
+ def get_user_option_as_list(self, option_name, expand=None):
+ """Get a generic option as a list - no special process, no default.
+
+ :return None if the option doesn't exist. Returns the value as a list
+ otherwise.
+ """
+ l = self.get_user_option(option_name, expand=expand)
+ if isinstance(l, (str, unicode)):
+ # A single value, most probably the user forgot (or didn't care to
+ # add) the final ','
+ l = [l]
+ return l
+
+ @deprecated_method(deprecated_in((2, 5, 0)))
+ def get_user_option_as_int_from_SI(self, option_name, default=None):
+ """Get a generic option from a human readable size in SI units, e.g 10MB
+
+ Accepted suffixes are K,M,G. It is case-insensitive and may be followed
+ by a trailing b (i.e. Kb, MB). This is intended to be practical and not
+ pedantic.
+
+ :return Integer, expanded to its base-10 value if a proper SI unit is
+ found. If the option doesn't exist, or isn't a value in
+ SI units, return default (which defaults to None)
+ """
+ val = self.get_user_option(option_name)
+ if isinstance(val, list):
+ val = val[0]
+ if val is None:
+ val = default
+ else:
+ p = re.compile("^(\d+)([kmg])*b*$", re.IGNORECASE)
+ try:
+ m = p.match(val)
+ if m is not None:
+ val = int(m.group(1))
+ if m.group(2) is not None:
+ if m.group(2).lower() == 'k':
+ val *= 10**3
+ elif m.group(2).lower() == 'm':
+ val *= 10**6
+ elif m.group(2).lower() == 'g':
+ val *= 10**9
+ else:
+ ui.ui_factory.show_warning(gettext('Invalid config value for "{0}" '
+ ' value {1!r} is not an SI unit.').format(
+ option_name, val))
+ val = default
+ except TypeError:
+ val = default
+ return val
+
+ @deprecated_method(deprecated_in((2, 5, 0)))
+ def gpg_signing_command(self):
+ """What program should be used to sign signatures?"""
+ result = self._gpg_signing_command()
+ if result is None:
+ result = "gpg"
+ return result
+
+ def _gpg_signing_command(self):
+ """See gpg_signing_command()."""
+ return None
+
+ @deprecated_method(deprecated_in((2, 5, 0)))
+ def log_format(self):
+ """What log format should be used"""
+ result = self._log_format()
+ if result is None:
+ result = "long"
+ return result
+
+ def _log_format(self):
+ """See log_format()."""
+ return None
+
+ def validate_signatures_in_log(self):
+ """Show GPG signature validity in log"""
+ result = self._validate_signatures_in_log()
+ if result == "true":
+ result = True
+ else:
+ result = False
+ return result
+
+ def _validate_signatures_in_log(self):
+ """See validate_signatures_in_log()."""
+ return None
+
+ @deprecated_method(deprecated_in((2, 5, 0)))
+ def acceptable_keys(self):
+ """Comma separated list of key patterns acceptable to
+ verify-signatures command"""
+ result = self._acceptable_keys()
+ return result
+
+ def _acceptable_keys(self):
+ """See acceptable_keys()."""
+ return None
+
+ @deprecated_method(deprecated_in((2, 5, 0)))
+ def post_commit(self):
+ """An ordered list of python functions to call.
+
+ Each function takes branch, rev_id as parameters.
+ """
+ return self._post_commit()
+
+ def _post_commit(self):
+ """See Config.post_commit."""
+ return None
+
+ def user_email(self):
+ """Return just the email component of a username."""
+ return extract_email_address(self.username())
+
+ def username(self):
+ """Return email-style username.
+
+ Something similar to 'Martin Pool <mbp@sourcefrog.net>'
+
+ $BZR_EMAIL can be set to override this, then
+ the concrete policy type is checked, and finally
+ $EMAIL is examined.
+ If no username can be found, errors.NoWhoami exception is raised.
+ """
+ v = os.environ.get('BZR_EMAIL')
+ if v:
+ return v.decode(osutils.get_user_encoding())
+ v = self._get_user_id()
+ if v:
+ return v
+ return default_email()
+
+ def ensure_username(self):
+ """Raise errors.NoWhoami if username is not set.
+
+ This method relies on the username() function raising the error.
+ """
+ self.username()
+
+ @deprecated_method(deprecated_in((2, 5, 0)))
+ def signature_checking(self):
+ """What is the current policy for signature checking?."""
+ policy = self._get_signature_checking()
+ if policy is not None:
+ return policy
+ return CHECK_IF_POSSIBLE
+
+ @deprecated_method(deprecated_in((2, 5, 0)))
+ def signing_policy(self):
+ """What is the current policy for signature checking?."""
+ policy = self._get_signing_policy()
+ if policy is not None:
+ return policy
+ return SIGN_WHEN_REQUIRED
+
+ @deprecated_method(deprecated_in((2, 5, 0)))
+ def signature_needed(self):
+ """Is a signature needed when committing ?."""
+ policy = self._get_signing_policy()
+ if policy is None:
+ policy = self._get_signature_checking()
+ if policy is not None:
+ #this warning should go away once check_signatures is
+ #implemented (if not before)
+ trace.warning("Please use create_signatures,"
+ " not check_signatures to set signing policy.")
+ elif policy == SIGN_ALWAYS:
+ return True
+ return False
+
+ @deprecated_method(deprecated_in((2, 5, 0)))
+ def gpg_signing_key(self):
+ """GPG user-id to sign commits"""
+ key = self.get_user_option('gpg_signing_key')
+ if key == "default" or key == None:
+ return self.user_email()
+ else:
+ return key
+
+ def get_alias(self, value):
+ return self._get_alias(value)
+
+ def _get_alias(self, value):
+ pass
+
+ def get_nickname(self):
+ return self._get_nickname()
+
+ def _get_nickname(self):
+ return None
+
+ def get_bzr_remote_path(self):
+ try:
+ return os.environ['BZR_REMOTE_PATH']
+ except KeyError:
+ path = self.get_user_option("bzr_remote_path")
+ if path is None:
+ path = 'bzr'
+ return path
+
+ def suppress_warning(self, warning):
+ """Should the warning be suppressed or emitted.
+
+ :param warning: The name of the warning being tested.
+
+ :returns: True if the warning should be suppressed, False otherwise.
+ """
+ warnings = self.get_user_option_as_list('suppress_warnings')
+ if warnings is None or warning not in warnings:
+ return False
+ else:
+ return True
+
+ def get_merge_tools(self):
+ tools = {}
+ for (oname, value, section, conf_id, parser) in self._get_options():
+ if oname.startswith('bzr.mergetool.'):
+ tool_name = oname[len('bzr.mergetool.'):]
+ tools[tool_name] = self.get_user_option(oname, False)
+ trace.mutter('loaded merge tools: %r' % tools)
+ return tools
+
+ def find_merge_tool(self, name):
+ # We fake a defaults mechanism here by checking if the given name can
+ # be found in the known_merge_tools if it's not found in the config.
+ # This should be done through the proposed config defaults mechanism
+ # when it becomes available in the future.
+ command_line = (self.get_user_option('bzr.mergetool.%s' % name,
+ expand=False)
+ or mergetools.known_merge_tools.get(name, None))
+ return command_line
+
+
+class _ConfigHooks(hooks.Hooks):
+ """A dict mapping hook names and a list of callables for configs.
+ """
+
+ def __init__(self):
+ """Create the default hooks.
+
+ These are all empty initially, because by default nothing should get
+ notified.
+ """
+ super(_ConfigHooks, self).__init__('bzrlib.config', 'ConfigHooks')
+ self.add_hook('load',
+ 'Invoked when a config store is loaded.'
+ ' The signature is (store).',
+ (2, 4))
+ self.add_hook('save',
+ 'Invoked when a config store is saved.'
+ ' The signature is (store).',
+ (2, 4))
+ # The hooks for config options
+ self.add_hook('get',
+ 'Invoked when a config option is read.'
+ ' The signature is (stack, name, value).',
+ (2, 4))
+ self.add_hook('set',
+ 'Invoked when a config option is set.'
+ ' The signature is (stack, name, value).',
+ (2, 4))
+ self.add_hook('remove',
+ 'Invoked when a config option is removed.'
+ ' The signature is (stack, name).',
+ (2, 4))
+ConfigHooks = _ConfigHooks()
+
+
+class _OldConfigHooks(hooks.Hooks):
+ """A dict mapping hook names and a list of callables for configs.
+ """
+
+ def __init__(self):
+ """Create the default hooks.
+
+ These are all empty initially, because by default nothing should get
+ notified.
+ """
+ super(_OldConfigHooks, self).__init__('bzrlib.config', 'OldConfigHooks')
+ self.add_hook('load',
+ 'Invoked when a config store is loaded.'
+ ' The signature is (config).',
+ (2, 4))
+ self.add_hook('save',
+ 'Invoked when a config store is saved.'
+ ' The signature is (config).',
+ (2, 4))
+ # The hooks for config options
+ self.add_hook('get',
+ 'Invoked when a config option is read.'
+ ' The signature is (config, name, value).',
+ (2, 4))
+ self.add_hook('set',
+ 'Invoked when a config option is set.'
+ ' The signature is (config, name, value).',
+ (2, 4))
+ self.add_hook('remove',
+ 'Invoked when a config option is removed.'
+ ' The signature is (config, name).',
+ (2, 4))
+OldConfigHooks = _OldConfigHooks()
+
+
+class IniBasedConfig(Config):
+ """A configuration policy that draws from ini files."""
+
+ def __init__(self, get_filename=symbol_versioning.DEPRECATED_PARAMETER,
+ file_name=None):
+ """Base class for configuration files using an ini-like syntax.
+
+ :param file_name: The configuration file path.
+ """
+ super(IniBasedConfig, self).__init__()
+ self.file_name = file_name
+ if symbol_versioning.deprecated_passed(get_filename):
+ symbol_versioning.warn(
+ 'IniBasedConfig.__init__(get_filename) was deprecated in 2.3.'
+ ' Use file_name instead.',
+ DeprecationWarning,
+ stacklevel=2)
+ if get_filename is not None:
+ self.file_name = get_filename()
+ else:
+ self.file_name = file_name
+ self._content = None
+ self._parser = None
+
+ @classmethod
+ def from_string(cls, str_or_unicode, file_name=None, save=False):
+ """Create a config object from a string.
+
+ :param str_or_unicode: A string representing the file content. This will
+ be utf-8 encoded.
+
+ :param file_name: The configuration file path.
+
+ :param _save: Whether the file should be saved upon creation.
+ """
+ conf = cls(file_name=file_name)
+ conf._create_from_string(str_or_unicode, save)
+ return conf
+
+ def _create_from_string(self, str_or_unicode, save):
+ self._content = StringIO(str_or_unicode.encode('utf-8'))
+ # Some tests use in-memory configs, some other always need the config
+ # file to exist on disk.
+ if save:
+ self._write_config_file()
+
+ def _get_parser(self, file=symbol_versioning.DEPRECATED_PARAMETER):
+ if self._parser is not None:
+ return self._parser
+ if symbol_versioning.deprecated_passed(file):
+ symbol_versioning.warn(
+ 'IniBasedConfig._get_parser(file=xxx) was deprecated in 2.3.'
+ ' Use IniBasedConfig(_content=xxx) instead.',
+ DeprecationWarning,
+ stacklevel=2)
+ if self._content is not None:
+ co_input = self._content
+ elif self.file_name is None:
+ raise AssertionError('We have no content to create the config')
+ else:
+ co_input = self.file_name
+ try:
+ self._parser = ConfigObj(co_input, encoding='utf-8')
+ except configobj.ConfigObjError, e:
+ raise errors.ParseConfigError(e.errors, e.config.filename)
+ except UnicodeDecodeError:
+ raise errors.ConfigContentError(self.file_name)
+ # Make sure self.reload() will use the right file name
+ self._parser.filename = self.file_name
+ for hook in OldConfigHooks['load']:
+ hook(self)
+ return self._parser
+
+ def reload(self):
+ """Reload the config file from disk."""
+ if self.file_name is None:
+ raise AssertionError('We need a file name to reload the config')
+ if self._parser is not None:
+ self._parser.reload()
+ for hook in ConfigHooks['load']:
+ hook(self)
+
+ def _get_matching_sections(self):
+ """Return an ordered list of (section_name, extra_path) pairs.
+
+ If the section contains inherited configuration, extra_path is
+ a string containing the additional path components.
+ """
+ section = self._get_section()
+ if section is not None:
+ return [(section, '')]
+ else:
+ return []
+
+ def _get_section(self):
+ """Override this to define the section used by the config."""
+ return "DEFAULT"
+
+ def _get_sections(self, name=None):
+ """Returns an iterator of the sections specified by ``name``.
+
+ :param name: The section name. If None is supplied, the default
+ configurations are yielded.
+
+ :return: A tuple (name, section, config_id) for all sections that will
+ be walked by user_get_option() in the 'right' order. The first one
+ is where set_user_option() will update the value.
+ """
+ parser = self._get_parser()
+ if name is not None:
+ yield (name, parser[name], self.config_id())
+ else:
+ # No section name has been given so we fallback to the configobj
+ # itself which holds the variables defined outside of any section.
+ yield (None, parser, self.config_id())
+
+ def _get_options(self, sections=None):
+ """Return an ordered list of (name, value, section, config_id) tuples.
+
+ All options are returned with their associated value and the section
+ they appeared in. ``config_id`` is a unique identifier for the
+ configuration file the option is defined in.
+
+ :param sections: Default to ``_get_matching_sections`` if not
+ specified. This gives a better control to daughter classes about
+ which sections should be searched. This is a list of (name,
+ configobj) tuples.
+ """
+ opts = []
+ if sections is None:
+ parser = self._get_parser()
+ sections = []
+ for (section_name, _) in self._get_matching_sections():
+ try:
+ section = parser[section_name]
+ except KeyError:
+ # This could happen for an empty file for which we define a
+ # DEFAULT section. FIXME: Force callers to provide sections
+ # instead ? -- vila 20100930
+ continue
+ sections.append((section_name, section))
+ config_id = self.config_id()
+ for (section_name, section) in sections:
+ for (name, value) in section.iteritems():
+ yield (name, parser._quote(value), section_name,
+ config_id, parser)
+
+ def _get_option_policy(self, section, option_name):
+ """Return the policy for the given (section, option_name) pair."""
+ return POLICY_NONE
+
+ def _get_change_editor(self):
+ return self.get_user_option('change_editor')
+
+ def _get_signature_checking(self):
+ """See Config._get_signature_checking."""
+ policy = self._get_user_option('check_signatures')
+ if policy:
+ return signature_policy_from_unicode(policy)
+
+ def _get_signing_policy(self):
+ """See Config._get_signing_policy"""
+ policy = self._get_user_option('create_signatures')
+ if policy:
+ return signing_policy_from_unicode(policy)
+
+ def _get_user_id(self):
+ """Get the user id from the 'email' key in the current section."""
+ return self._get_user_option('email')
+
+ def _get_user_option(self, option_name):
+ """See Config._get_user_option."""
+ for (section, extra_path) in self._get_matching_sections():
+ try:
+ value = self._get_parser().get_value(section, option_name)
+ except KeyError:
+ continue
+ policy = self._get_option_policy(section, option_name)
+ if policy == POLICY_NONE:
+ return value
+ elif policy == POLICY_NORECURSE:
+ # norecurse items only apply to the exact path
+ if extra_path:
+ continue
+ else:
+ return value
+ elif policy == POLICY_APPENDPATH:
+ if extra_path:
+ value = urlutils.join(value, extra_path)
+ return value
+ else:
+ raise AssertionError('Unexpected config policy %r' % policy)
+ else:
+ return None
+
+ def _gpg_signing_command(self):
+ """See Config.gpg_signing_command."""
+ return self._get_user_option('gpg_signing_command')
+
+ def _log_format(self):
+ """See Config.log_format."""
+ return self._get_user_option('log_format')
+
+ def _validate_signatures_in_log(self):
+ """See Config.validate_signatures_in_log."""
+ return self._get_user_option('validate_signatures_in_log')
+
+ def _acceptable_keys(self):
+ """See Config.acceptable_keys."""
+ return self._get_user_option('acceptable_keys')
+
+ def _post_commit(self):
+ """See Config.post_commit."""
+ return self._get_user_option('post_commit')
+
+ def _get_alias(self, value):
+ try:
+ return self._get_parser().get_value("ALIASES",
+ value)
+ except KeyError:
+ pass
+
+ def _get_nickname(self):
+ return self.get_user_option('nickname')
+
+ def remove_user_option(self, option_name, section_name=None):
+ """Remove a user option and save the configuration file.
+
+ :param option_name: The option to be removed.
+
+ :param section_name: The section the option is defined in, default to
+ the default section.
+ """
+ self.reload()
+ parser = self._get_parser()
+ if section_name is None:
+ section = parser
+ else:
+ section = parser[section_name]
+ try:
+ del section[option_name]
+ except KeyError:
+ raise errors.NoSuchConfigOption(option_name)
+ self._write_config_file()
+ for hook in OldConfigHooks['remove']:
+ hook(self, option_name)
+
+ def _write_config_file(self):
+ if self.file_name is None:
+ raise AssertionError('We cannot save, self.file_name is None')
+ conf_dir = os.path.dirname(self.file_name)
+ ensure_config_dir_exists(conf_dir)
+ atomic_file = atomicfile.AtomicFile(self.file_name)
+ self._get_parser().write(atomic_file)
+ atomic_file.commit()
+ atomic_file.close()
+ osutils.copy_ownership_from_path(self.file_name)
+ for hook in OldConfigHooks['save']:
+ hook(self)
+
+
+class LockableConfig(IniBasedConfig):
+ """A configuration needing explicit locking for access.
+
+ If several processes try to write the config file, the accesses need to be
+ serialized.
+
+ Daughter classes should decorate all methods that update a config with the
+ ``@needs_write_lock`` decorator (they call, directly or indirectly, the
+ ``_write_config_file()`` method. These methods (typically ``set_option()``
+ and variants must reload the config file from disk before calling
+ ``_write_config_file()``), this can be achieved by calling the
+ ``self.reload()`` method. Note that the lock scope should cover both the
+ reading and the writing of the config file which is why the decorator can't
+ be applied to ``_write_config_file()`` only.
+
+ This should be enough to implement the following logic:
+ - lock for exclusive write access,
+ - reload the config file from disk,
+ - set the new value
+ - unlock
+
+ This logic guarantees that a writer can update a value without erasing an
+ update made by another writer.
+ """
+
+ lock_name = 'lock'
+
+ def __init__(self, file_name):
+ super(LockableConfig, self).__init__(file_name=file_name)
+ self.dir = osutils.dirname(osutils.safe_unicode(self.file_name))
+ # FIXME: It doesn't matter that we don't provide possible_transports
+ # below since this is currently used only for local config files ;
+ # local transports are not shared. But if/when we start using
+ # LockableConfig for other kind of transports, we will need to reuse
+ # whatever connection is already established -- vila 20100929
+ self.transport = transport.get_transport_from_path(self.dir)
+ self._lock = lockdir.LockDir(self.transport, self.lock_name)
+
+ def _create_from_string(self, unicode_bytes, save):
+ super(LockableConfig, self)._create_from_string(unicode_bytes, False)
+ if save:
+ # We need to handle the saving here (as opposed to IniBasedConfig)
+ # to be able to lock
+ self.lock_write()
+ self._write_config_file()
+ self.unlock()
+
+ def lock_write(self, token=None):
+ """Takes a write lock in the directory containing the config file.
+
+ If the directory doesn't exist it is created.
+ """
+ ensure_config_dir_exists(self.dir)
+ return self._lock.lock_write(token)
+
+ def unlock(self):
+ self._lock.unlock()
+
+ def break_lock(self):
+ self._lock.break_lock()
+
+ @needs_write_lock
+ def remove_user_option(self, option_name, section_name=None):
+ super(LockableConfig, self).remove_user_option(option_name,
+ section_name)
+
+ def _write_config_file(self):
+ if self._lock is None or not self._lock.is_held:
+ # NB: if the following exception is raised it probably means a
+ # missing @needs_write_lock decorator on one of the callers.
+ raise errors.ObjectNotLocked(self)
+ super(LockableConfig, self)._write_config_file()
+
+
+class GlobalConfig(LockableConfig):
+ """The configuration that should be used for a specific location."""
+
+ def __init__(self):
+ super(GlobalConfig, self).__init__(file_name=config_filename())
+
+ def config_id(self):
+ return 'bazaar'
+
+ @classmethod
+ def from_string(cls, str_or_unicode, save=False):
+ """Create a config object from a string.
+
+ :param str_or_unicode: A string representing the file content. This
+ will be utf-8 encoded.
+
+ :param save: Whether the file should be saved upon creation.
+ """
+ conf = cls()
+ conf._create_from_string(str_or_unicode, save)
+ return conf
+
+ @needs_write_lock
+ def set_user_option(self, option, value):
+ """Save option and its value in the configuration."""
+ self._set_option(option, value, 'DEFAULT')
+
+ def get_aliases(self):
+ """Return the aliases section."""
+ if 'ALIASES' in self._get_parser():
+ return self._get_parser()['ALIASES']
+ else:
+ return {}
+
+ @needs_write_lock
+ def set_alias(self, alias_name, alias_command):
+ """Save the alias in the configuration."""
+ self._set_option(alias_name, alias_command, 'ALIASES')
+
+ @needs_write_lock
+ def unset_alias(self, alias_name):
+ """Unset an existing alias."""
+ self.reload()
+ aliases = self._get_parser().get('ALIASES')
+ if not aliases or alias_name not in aliases:
+ raise errors.NoSuchAlias(alias_name)
+ del aliases[alias_name]
+ self._write_config_file()
+
+ def _set_option(self, option, value, section):
+ self.reload()
+ self._get_parser().setdefault(section, {})[option] = value
+ self._write_config_file()
+ for hook in OldConfigHooks['set']:
+ hook(self, option, value)
+
+ def _get_sections(self, name=None):
+ """See IniBasedConfig._get_sections()."""
+ parser = self._get_parser()
+ # We don't give access to options defined outside of any section, we
+ # used the DEFAULT section by... default.
+ if name in (None, 'DEFAULT'):
+ # This could happen for an empty file where the DEFAULT section
+ # doesn't exist yet. So we force DEFAULT when yielding
+ name = 'DEFAULT'
+ if 'DEFAULT' not in parser:
+ parser['DEFAULT']= {}
+ yield (name, parser[name], self.config_id())
+
+ @needs_write_lock
+ def remove_user_option(self, option_name, section_name=None):
+ if section_name is None:
+ # We need to force the default section.
+ section_name = 'DEFAULT'
+ # We need to avoid the LockableConfig implementation or we'll lock
+ # twice
+ super(LockableConfig, self).remove_user_option(option_name,
+ section_name)
+
+def _iter_for_location_by_parts(sections, location):
+ """Keep only the sessions matching the specified location.
+
+ :param sections: An iterable of section names.
+
+ :param location: An url or a local path to match against.
+
+ :returns: An iterator of (section, extra_path, nb_parts) where nb is the
+ number of path components in the section name, section is the section
+ name and extra_path is the difference between location and the section
+ name.
+
+ ``location`` will always be a local path and never a 'file://' url but the
+ section names themselves can be in either form.
+ """
+ location_parts = location.rstrip('/').split('/')
+
+ for section in sections:
+ # location is a local path if possible, so we need to convert 'file://'
+ # urls in section names to local paths if necessary.
+
+ # This also avoids having file:///path be a more exact
+ # match than '/path'.
+
+ # FIXME: This still raises an issue if a user defines both file:///path
+ # *and* /path. Should we raise an error in this case -- vila 20110505
+
+ if section.startswith('file://'):
+ section_path = urlutils.local_path_from_url(section)
+ else:
+ section_path = section
+ section_parts = section_path.rstrip('/').split('/')
+
+ matched = True
+ if len(section_parts) > len(location_parts):
+ # More path components in the section, they can't match
+ matched = False
+ else:
+ # Rely on zip truncating in length to the length of the shortest
+ # argument sequence.
+ names = zip(location_parts, section_parts)
+ for name in names:
+ if not fnmatch.fnmatch(name[0], name[1]):
+ matched = False
+ break
+ if not matched:
+ continue
+ # build the path difference between the section and the location
+ extra_path = '/'.join(location_parts[len(section_parts):])
+ yield section, extra_path, len(section_parts)
+
+
+class LocationConfig(LockableConfig):
+ """A configuration object that gives the policy for a location."""
+
+ def __init__(self, location):
+ super(LocationConfig, self).__init__(
+ file_name=locations_config_filename())
+ # local file locations are looked up by local path, rather than
+ # by file url. This is because the config file is a user
+ # file, and we would rather not expose the user to file urls.
+ if location.startswith('file://'):
+ location = urlutils.local_path_from_url(location)
+ self.location = location
+
+ def config_id(self):
+ return 'locations'
+
+ @classmethod
+ def from_string(cls, str_or_unicode, location, save=False):
+ """Create a config object from a string.
+
+ :param str_or_unicode: A string representing the file content. This will
+ be utf-8 encoded.
+
+ :param location: The location url to filter the configuration.
+
+ :param save: Whether the file should be saved upon creation.
+ """
+ conf = cls(location)
+ conf._create_from_string(str_or_unicode, save)
+ return conf
+
+ def _get_matching_sections(self):
+ """Return an ordered list of section names matching this location."""
+ matches = list(_iter_for_location_by_parts(self._get_parser(),
+ self.location))
+ # put the longest (aka more specific) locations first
+ matches.sort(
+ key=lambda (section, extra_path, length): (length, section),
+ reverse=True)
+ for (section, extra_path, length) in matches:
+ yield section, extra_path
+ # should we stop looking for parent configs here?
+ try:
+ if self._get_parser()[section].as_bool('ignore_parents'):
+ break
+ except KeyError:
+ pass
+
+ def _get_sections(self, name=None):
+ """See IniBasedConfig._get_sections()."""
+ # We ignore the name here as the only sections handled are named with
+ # the location path and we don't expose embedded sections either.
+ parser = self._get_parser()
+ for name, extra_path in self._get_matching_sections():
+ yield (name, parser[name], self.config_id())
+
+ def _get_option_policy(self, section, option_name):
+ """Return the policy for the given (section, option_name) pair."""
+ # check for the old 'recurse=False' flag
+ try:
+ recurse = self._get_parser()[section].as_bool('recurse')
+ except KeyError:
+ recurse = True
+ if not recurse:
+ return POLICY_NORECURSE
+
+ policy_key = option_name + ':policy'
+ try:
+ policy_name = self._get_parser()[section][policy_key]
+ except KeyError:
+ policy_name = None
+
+ return _policy_value[policy_name]
+
+ def _set_option_policy(self, section, option_name, option_policy):
+ """Set the policy for the given option name in the given section."""
+ # The old recurse=False option affects all options in the
+ # section. To handle multiple policies in the section, we
+ # need to convert it to a policy_norecurse key.
+ try:
+ recurse = self._get_parser()[section].as_bool('recurse')
+ except KeyError:
+ pass
+ else:
+ symbol_versioning.warn(
+ 'The recurse option is deprecated as of 0.14. '
+ 'The section "%s" has been converted to use policies.'
+ % section,
+ DeprecationWarning)
+ del self._get_parser()[section]['recurse']
+ if not recurse:
+ for key in self._get_parser()[section].keys():
+ if not key.endswith(':policy'):
+ self._get_parser()[section][key +
+ ':policy'] = 'norecurse'
+
+ policy_key = option_name + ':policy'
+ policy_name = _policy_name[option_policy]
+ if policy_name is not None:
+ self._get_parser()[section][policy_key] = policy_name
+ else:
+ if policy_key in self._get_parser()[section]:
+ del self._get_parser()[section][policy_key]
+
+ @needs_write_lock
+ def set_user_option(self, option, value, store=STORE_LOCATION):
+ """Save option and its value in the configuration."""
+ if store not in [STORE_LOCATION,
+ STORE_LOCATION_NORECURSE,
+ STORE_LOCATION_APPENDPATH]:
+ raise ValueError('bad storage policy %r for %r' %
+ (store, option))
+ self.reload()
+ location = self.location
+ if location.endswith('/'):
+ location = location[:-1]
+ parser = self._get_parser()
+ if not location in parser and not location + '/' in parser:
+ parser[location] = {}
+ elif location + '/' in parser:
+ location = location + '/'
+ parser[location][option]=value
+ # the allowed values of store match the config policies
+ self._set_option_policy(location, option, store)
+ self._write_config_file()
+ for hook in OldConfigHooks['set']:
+ hook(self, option, value)
+
+
+class BranchConfig(Config):
+ """A configuration object giving the policy for a branch."""
+
+ def __init__(self, branch):
+ super(BranchConfig, self).__init__()
+ self._location_config = None
+ self._branch_data_config = None
+ self._global_config = None
+ self.branch = branch
+ self.option_sources = (self._get_location_config,
+ self._get_branch_data_config,
+ self._get_global_config)
+
+ def config_id(self):
+ return 'branch'
+
+ def _get_branch_data_config(self):
+ if self._branch_data_config is None:
+ self._branch_data_config = TreeConfig(self.branch)
+ self._branch_data_config.config_id = self.config_id
+ return self._branch_data_config
+
+ def _get_location_config(self):
+ if self._location_config is None:
+ self._location_config = LocationConfig(self.branch.base)
+ return self._location_config
+
+ def _get_global_config(self):
+ if self._global_config is None:
+ self._global_config = GlobalConfig()
+ return self._global_config
+
+ def _get_best_value(self, option_name):
+ """This returns a user option from local, tree or global config.
+
+ They are tried in that order. Use get_safe_value if trusted values
+ are necessary.
+ """
+ for source in self.option_sources:
+ value = getattr(source(), option_name)()
+ if value is not None:
+ return value
+ return None
+
+ def _get_safe_value(self, option_name):
+ """This variant of get_best_value never returns untrusted values.
+
+ It does not return values from the branch data, because the branch may
+ not be controlled by the user.
+
+ We may wish to allow locations.conf to control whether branches are
+ trusted in the future.
+ """
+ for source in (self._get_location_config, self._get_global_config):
+ value = getattr(source(), option_name)()
+ if value is not None:
+ return value
+ return None
+
+ def _get_user_id(self):
+ """Return the full user id for the branch.
+
+ e.g. "John Hacker <jhacker@example.com>"
+ This is looked up in the email controlfile for the branch.
+ """
+ return self._get_best_value('_get_user_id')
+
+ def _get_change_editor(self):
+ return self._get_best_value('_get_change_editor')
+
+ def _get_signature_checking(self):
+ """See Config._get_signature_checking."""
+ return self._get_best_value('_get_signature_checking')
+
+ def _get_signing_policy(self):
+ """See Config._get_signing_policy."""
+ return self._get_best_value('_get_signing_policy')
+
+ def _get_user_option(self, option_name):
+ """See Config._get_user_option."""
+ for source in self.option_sources:
+ value = source()._get_user_option(option_name)
+ if value is not None:
+ return value
+ return None
+
+ def _get_sections(self, name=None):
+ """See IniBasedConfig.get_sections()."""
+ for source in self.option_sources:
+ for section in source()._get_sections(name):
+ yield section
+
+ def _get_options(self, sections=None):
+ opts = []
+ # First the locations options
+ for option in self._get_location_config()._get_options():
+ yield option
+ # Then the branch options
+ branch_config = self._get_branch_data_config()
+ if sections is None:
+ sections = [('DEFAULT', branch_config._get_parser())]
+ # FIXME: We shouldn't have to duplicate the code in IniBasedConfig but
+ # Config itself has no notion of sections :( -- vila 20101001
+ config_id = self.config_id()
+ for (section_name, section) in sections:
+ for (name, value) in section.iteritems():
+ yield (name, value, section_name,
+ config_id, branch_config._get_parser())
+ # Then the global options
+ for option in self._get_global_config()._get_options():
+ yield option
+
+ def set_user_option(self, name, value, store=STORE_BRANCH,
+ warn_masked=False):
+ if store == STORE_BRANCH:
+ self._get_branch_data_config().set_option(value, name)
+ elif store == STORE_GLOBAL:
+ self._get_global_config().set_user_option(name, value)
+ else:
+ self._get_location_config().set_user_option(name, value, store)
+ if not warn_masked:
+ return
+ if store in (STORE_GLOBAL, STORE_BRANCH):
+ mask_value = self._get_location_config().get_user_option(name)
+ if mask_value is not None:
+ trace.warning('Value "%s" is masked by "%s" from'
+ ' locations.conf', value, mask_value)
+ else:
+ if store == STORE_GLOBAL:
+ branch_config = self._get_branch_data_config()
+ mask_value = branch_config.get_user_option(name)
+ if mask_value is not None:
+ trace.warning('Value "%s" is masked by "%s" from'
+ ' branch.conf', value, mask_value)
+
+ def remove_user_option(self, option_name, section_name=None):
+ self._get_branch_data_config().remove_option(option_name, section_name)
+
+ def _gpg_signing_command(self):
+ """See Config.gpg_signing_command."""
+ return self._get_safe_value('_gpg_signing_command')
+
+ def _post_commit(self):
+ """See Config.post_commit."""
+ return self._get_safe_value('_post_commit')
+
+ def _get_nickname(self):
+ value = self._get_explicit_nickname()
+ if value is not None:
+ return value
+ if self.branch.name:
+ return self.branch.name
+ return urlutils.unescape(self.branch.base.split('/')[-2])
+
+ def has_explicit_nickname(self):
+ """Return true if a nickname has been explicitly assigned."""
+ return self._get_explicit_nickname() is not None
+
+ def _get_explicit_nickname(self):
+ return self._get_best_value('_get_nickname')
+
+ def _log_format(self):
+ """See Config.log_format."""
+ return self._get_best_value('_log_format')
+
+ def _validate_signatures_in_log(self):
+ """See Config.validate_signatures_in_log."""
+ return self._get_best_value('_validate_signatures_in_log')
+
+ def _acceptable_keys(self):
+ """See Config.acceptable_keys."""
+ return self._get_best_value('_acceptable_keys')
+
+
+def ensure_config_dir_exists(path=None):
+ """Make sure a configuration directory exists.
+ This makes sure that the directory exists.
+ On windows, since configuration directories are 2 levels deep,
+ it makes sure both the directory and the parent directory exists.
+ """
+ if path is None:
+ path = config_dir()
+ if not os.path.isdir(path):
+ if sys.platform == 'win32':
+ parent_dir = os.path.dirname(path)
+ if not os.path.isdir(parent_dir):
+ trace.mutter('creating config parent directory: %r', parent_dir)
+ os.mkdir(parent_dir)
+ trace.mutter('creating config directory: %r', path)
+ os.mkdir(path)
+ osutils.copy_ownership_from_path(path)
+
+
+def config_dir():
+ """Return per-user configuration directory as unicode string
+
+ By default this is %APPDATA%/bazaar/2.0 on Windows, ~/.bazaar on Mac OS X
+ and Linux. On Linux, if there is a $XDG_CONFIG_HOME/bazaar directory,
+ that will be used instead.
+
+ TODO: Global option --config-dir to override this.
+ """
+ base = osutils.path_from_environ('BZR_HOME')
+ if sys.platform == 'win32':
+ if base is None:
+ base = win32utils.get_appdata_location()
+ if base is None:
+ base = win32utils.get_home_location()
+ # GZ 2012-02-01: Really the two level subdirs only make sense inside
+ # APPDATA, but hard to move. See bug 348640 for more.
+ return osutils.pathjoin(base, 'bazaar', '2.0')
+ if base is None:
+ # GZ 2012-02-01: What should OSX use instead of XDG if anything?
+ if sys.platform != 'darwin':
+ xdg_dir = osutils.path_from_environ('XDG_CONFIG_HOME')
+ if xdg_dir is None:
+ xdg_dir = osutils.pathjoin(osutils._get_home_dir(), ".config")
+ xdg_dir = osutils.pathjoin(xdg_dir, 'bazaar')
+ if osutils.isdir(xdg_dir):
+ trace.mutter(
+ "Using configuration in XDG directory %s." % xdg_dir)
+ return xdg_dir
+ base = osutils._get_home_dir()
+ return osutils.pathjoin(base, ".bazaar")
+
+
+def config_filename():
+ """Return per-user configuration ini file filename."""
+ return osutils.pathjoin(config_dir(), 'bazaar.conf')
+
+
+def locations_config_filename():
+ """Return per-user configuration ini file filename."""
+ return osutils.pathjoin(config_dir(), 'locations.conf')
+
+
+def authentication_config_filename():
+ """Return per-user authentication ini file filename."""
+ return osutils.pathjoin(config_dir(), 'authentication.conf')
+
+
+def user_ignore_config_filename():
+ """Return the user default ignore filename"""
+ return osutils.pathjoin(config_dir(), 'ignore')
+
+
+def crash_dir():
+ """Return the directory name to store crash files.
+
+ This doesn't implicitly create it.
+
+ On Windows it's in the config directory; elsewhere it's /var/crash
+ which may be monitored by apport. It can be overridden by
+ $APPORT_CRASH_DIR.
+ """
+ if sys.platform == 'win32':
+ return osutils.pathjoin(config_dir(), 'Crash')
+ else:
+ # XXX: hardcoded in apport_python_hook.py; therefore here too -- mbp
+ # 2010-01-31
+ return os.environ.get('APPORT_CRASH_DIR', '/var/crash')
+
+
+def xdg_cache_dir():
+ # See http://standards.freedesktop.org/basedir-spec/latest/ar01s03.html
+ # Possibly this should be different on Windows?
+ e = os.environ.get('XDG_CACHE_DIR', None)
+ if e:
+ return e
+ else:
+ return os.path.expanduser('~/.cache')
+
+
+def _get_default_mail_domain():
+ """If possible, return the assumed default email domain.
+
+ :returns: string mail domain, or None.
+ """
+ if sys.platform == 'win32':
+ # No implementation yet; patches welcome
+ return None
+ try:
+ f = open('/etc/mailname')
+ except (IOError, OSError), e:
+ return None
+ try:
+ domain = f.read().strip()
+ return domain
+ finally:
+ f.close()
+
+
+def default_email():
+ v = os.environ.get('BZR_EMAIL')
+ if v:
+ return v.decode(osutils.get_user_encoding())
+ v = os.environ.get('EMAIL')
+ if v:
+ return v.decode(osutils.get_user_encoding())
+ name, email = _auto_user_id()
+ if name and email:
+ return u'%s <%s>' % (name, email)
+ elif email:
+ return email
+ raise errors.NoWhoami()
+
+
+def _auto_user_id():
+ """Calculate automatic user identification.
+
+ :returns: (realname, email), either of which may be None if they can't be
+ determined.
+
+ Only used when none is set in the environment or the id file.
+
+ This only returns an email address if we can be fairly sure the
+ address is reasonable, ie if /etc/mailname is set on unix.
+
+ This doesn't use the FQDN as the default domain because that may be
+ slow, and it doesn't use the hostname alone because that's not normally
+ a reasonable address.
+ """
+ if sys.platform == 'win32':
+ # No implementation to reliably determine Windows default mail
+ # address; please add one.
+ return None, None
+
+ default_mail_domain = _get_default_mail_domain()
+ if not default_mail_domain:
+ return None, None
+
+ import pwd
+ uid = os.getuid()
+ try:
+ w = pwd.getpwuid(uid)
+ except KeyError:
+ trace.mutter('no passwd entry for uid %d?' % uid)
+ return None, None
+
+ # we try utf-8 first, because on many variants (like Linux),
+ # /etc/passwd "should" be in utf-8, and because it's unlikely to give
+ # false positives. (many users will have their user encoding set to
+ # latin-1, which cannot raise UnicodeError.)
+ try:
+ gecos = w.pw_gecos.decode('utf-8')
+ encoding = 'utf-8'
+ except UnicodeError:
+ try:
+ encoding = osutils.get_user_encoding()
+ gecos = w.pw_gecos.decode(encoding)
+ except UnicodeError, e:
+ trace.mutter("cannot decode passwd entry %s" % w)
+ return None, None
+ try:
+ username = w.pw_name.decode(encoding)
+ except UnicodeError, e:
+ trace.mutter("cannot decode passwd entry %s" % w)
+ return None, None
+
+ comma = gecos.find(',')
+ if comma == -1:
+ realname = gecos
+ else:
+ realname = gecos[:comma]
+
+ return realname, (username + '@' + default_mail_domain)
+
+
+def parse_username(username):
+ """Parse e-mail username and return a (name, address) tuple."""
+ match = re.match(r'(.*?)\s*<?([\w+.-]+@[\w+.-]+)>?', username)
+ if match is None:
+ return (username, '')
+ else:
+ return (match.group(1), match.group(2))
+
+
+def extract_email_address(e):
+ """Return just the address part of an email string.
+
+ That is just the user@domain part, nothing else.
+ This part is required to contain only ascii characters.
+ If it can't be extracted, raises an error.
+
+ >>> extract_email_address('Jane Tester <jane@test.com>')
+ "jane@test.com"
+ """
+ name, email = parse_username(e)
+ if not email:
+ raise errors.NoEmailInUsername(e)
+ return email
+
+
+class TreeConfig(IniBasedConfig):
+ """Branch configuration data associated with its contents, not location"""
+
+ # XXX: Really needs a better name, as this is not part of the tree! -- mbp 20080507
+
+ def __init__(self, branch):
+ self._config = branch._get_config()
+ self.branch = branch
+
+ def _get_parser(self, file=None):
+ if file is not None:
+ return IniBasedConfig._get_parser(file)
+ return self._config._get_configobj()
+
+ def get_option(self, name, section=None, default=None):
+ self.branch.lock_read()
+ try:
+ return self._config.get_option(name, section, default)
+ finally:
+ self.branch.unlock()
+
+ def set_option(self, value, name, section=None):
+ """Set a per-branch configuration option"""
+ # FIXME: We shouldn't need to lock explicitly here but rather rely on
+ # higher levels providing the right lock -- vila 20101004
+ self.branch.lock_write()
+ try:
+ self._config.set_option(value, name, section)
+ finally:
+ self.branch.unlock()
+
+ def remove_option(self, option_name, section_name=None):
+ # FIXME: We shouldn't need to lock explicitly here but rather rely on
+ # higher levels providing the right lock -- vila 20101004
+ self.branch.lock_write()
+ try:
+ self._config.remove_option(option_name, section_name)
+ finally:
+ self.branch.unlock()
+
+
+class AuthenticationConfig(object):
+ """The authentication configuration file based on a ini file.
+
+ Implements the authentication.conf file described in
+ doc/developers/authentication-ring.txt.
+ """
+
+ def __init__(self, _file=None):
+ self._config = None # The ConfigObj
+ if _file is None:
+ self._filename = authentication_config_filename()
+ self._input = self._filename = authentication_config_filename()
+ else:
+ # Tests can provide a string as _file
+ self._filename = None
+ self._input = _file
+
+ def _get_config(self):
+ if self._config is not None:
+ return self._config
+ try:
+ # FIXME: Should we validate something here ? Includes: empty
+ # sections are useless, at least one of
+ # user/password/password_encoding should be defined, etc.
+
+ # Note: the encoding below declares that the file itself is utf-8
+ # encoded, but the values in the ConfigObj are always Unicode.
+ self._config = ConfigObj(self._input, encoding='utf-8')
+ except configobj.ConfigObjError, e:
+ raise errors.ParseConfigError(e.errors, e.config.filename)
+ except UnicodeError:
+ raise errors.ConfigContentError(self._filename)
+ return self._config
+
+ def _save(self):
+ """Save the config file, only tests should use it for now."""
+ conf_dir = os.path.dirname(self._filename)
+ ensure_config_dir_exists(conf_dir)
+ f = file(self._filename, 'wb')
+ try:
+ self._get_config().write(f)
+ finally:
+ f.close()
+
+ def _set_option(self, section_name, option_name, value):
+ """Set an authentication configuration option"""
+ conf = self._get_config()
+ section = conf.get(section_name)
+ if section is None:
+ conf[section] = {}
+ section = conf[section]
+ section[option_name] = value
+ self._save()
+
+ def get_credentials(self, scheme, host, port=None, user=None, path=None,
+ realm=None):
+ """Returns the matching credentials from authentication.conf file.
+
+ :param scheme: protocol
+
+ :param host: the server address
+
+ :param port: the associated port (optional)
+
+ :param user: login (optional)
+
+ :param path: the absolute path on the server (optional)
+
+ :param realm: the http authentication realm (optional)
+
+ :return: A dict containing the matching credentials or None.
+ This includes:
+ - name: the section name of the credentials in the
+ authentication.conf file,
+ - user: can't be different from the provided user if any,
+ - scheme: the server protocol,
+ - host: the server address,
+ - port: the server port (can be None),
+ - path: the absolute server path (can be None),
+ - realm: the http specific authentication realm (can be None),
+ - password: the decoded password, could be None if the credential
+ defines only the user
+ - verify_certificates: https specific, True if the server
+ certificate should be verified, False otherwise.
+ """
+ credentials = None
+ for auth_def_name, auth_def in self._get_config().items():
+ if type(auth_def) is not configobj.Section:
+ raise ValueError("%s defined outside a section" % auth_def_name)
+
+ a_scheme, a_host, a_user, a_path = map(
+ auth_def.get, ['scheme', 'host', 'user', 'path'])
+
+ try:
+ a_port = auth_def.as_int('port')
+ except KeyError:
+ a_port = None
+ except ValueError:
+ raise ValueError("'port' not numeric in %s" % auth_def_name)
+ try:
+ a_verify_certificates = auth_def.as_bool('verify_certificates')
+ except KeyError:
+ a_verify_certificates = True
+ except ValueError:
+ raise ValueError(
+ "'verify_certificates' not boolean in %s" % auth_def_name)
+
+ # Attempt matching
+ if a_scheme is not None and scheme != a_scheme:
+ continue
+ if a_host is not None:
+ if not (host == a_host
+ or (a_host.startswith('.') and host.endswith(a_host))):
+ continue
+ if a_port is not None and port != a_port:
+ continue
+ if (a_path is not None and path is not None
+ and not path.startswith(a_path)):
+ continue
+ if (a_user is not None and user is not None
+ and a_user != user):
+ # Never contradict the caller about the user to be used
+ continue
+ if a_user is None:
+ # Can't find a user
+ continue
+ # Prepare a credentials dictionary with additional keys
+ # for the credential providers
+ credentials = dict(name=auth_def_name,
+ user=a_user,
+ scheme=a_scheme,
+ host=host,
+ port=port,
+ path=path,
+ realm=realm,
+ password=auth_def.get('password', None),
+ verify_certificates=a_verify_certificates)
+ # Decode the password in the credentials (or get one)
+ self.decode_password(credentials,
+ auth_def.get('password_encoding', None))
+ if 'auth' in debug.debug_flags:
+ trace.mutter("Using authentication section: %r", auth_def_name)
+ break
+
+ if credentials is None:
+ # No credentials were found in authentication.conf, try the fallback
+ # credentials stores.
+ credentials = credential_store_registry.get_fallback_credentials(
+ scheme, host, port, user, path, realm)
+
+ return credentials
+
+ def set_credentials(self, name, host, user, scheme=None, password=None,
+ port=None, path=None, verify_certificates=None,
+ realm=None):
+ """Set authentication credentials for a host.
+
+ Any existing credentials with matching scheme, host, port and path
+ will be deleted, regardless of name.
+
+ :param name: An arbitrary name to describe this set of credentials.
+ :param host: Name of the host that accepts these credentials.
+ :param user: The username portion of these credentials.
+ :param scheme: The URL scheme (e.g. ssh, http) the credentials apply
+ to.
+ :param password: Password portion of these credentials.
+ :param port: The IP port on the host that these credentials apply to.
+ :param path: A filesystem path on the host that these credentials
+ apply to.
+ :param verify_certificates: On https, verify server certificates if
+ True.
+ :param realm: The http authentication realm (optional).
+ """
+ values = {'host': host, 'user': user}
+ if password is not None:
+ values['password'] = password
+ if scheme is not None:
+ values['scheme'] = scheme
+ if port is not None:
+ values['port'] = '%d' % port
+ if path is not None:
+ values['path'] = path
+ if verify_certificates is not None:
+ values['verify_certificates'] = str(verify_certificates)
+ if realm is not None:
+ values['realm'] = realm
+ config = self._get_config()
+ for_deletion = []
+ for section, existing_values in config.items():
+ for key in ('scheme', 'host', 'port', 'path', 'realm'):
+ if existing_values.get(key) != values.get(key):
+ break
+ else:
+ del config[section]
+ config.update({name: values})
+ self._save()
+
+ def get_user(self, scheme, host, port=None, realm=None, path=None,
+ prompt=None, ask=False, default=None):
+ """Get a user from authentication file.
+
+ :param scheme: protocol
+
+ :param host: the server address
+
+ :param port: the associated port (optional)
+
+ :param realm: the realm sent by the server (optional)
+
+ :param path: the absolute path on the server (optional)
+
+ :param ask: Ask the user if there is no explicitly configured username
+ (optional)
+
+ :param default: The username returned if none is defined (optional).
+
+ :return: The found user.
+ """
+ credentials = self.get_credentials(scheme, host, port, user=None,
+ path=path, realm=realm)
+ if credentials is not None:
+ user = credentials['user']
+ else:
+ user = None
+ if user is None:
+ if ask:
+ if prompt is None:
+ # Create a default prompt suitable for most cases
+ prompt = u'%s' % (scheme.upper(),) + u' %(host)s username'
+ # Special handling for optional fields in the prompt
+ if port is not None:
+ prompt_host = '%s:%d' % (host, port)
+ else:
+ prompt_host = host
+ user = ui.ui_factory.get_username(prompt, host=prompt_host)
+ else:
+ user = default
+ return user
+
+ def get_password(self, scheme, host, user, port=None,
+ realm=None, path=None, prompt=None):
+ """Get a password from authentication file or prompt the user for one.
+
+ :param scheme: protocol
+
+ :param host: the server address
+
+ :param port: the associated port (optional)
+
+ :param user: login
+
+ :param realm: the realm sent by the server (optional)
+
+ :param path: the absolute path on the server (optional)
+
+ :return: The found password or the one entered by the user.
+ """
+ credentials = self.get_credentials(scheme, host, port, user, path,
+ realm)
+ if credentials is not None:
+ password = credentials['password']
+ if password is not None and scheme is 'ssh':
+ trace.warning('password ignored in section [%s],'
+ ' use an ssh agent instead'
+ % credentials['name'])
+ password = None
+ else:
+ password = None
+ # Prompt user only if we could't find a password
+ if password is None:
+ if prompt is None:
+ # Create a default prompt suitable for most cases
+ prompt = u'%s' % scheme.upper() + u' %(user)s@%(host)s password'
+ # Special handling for optional fields in the prompt
+ if port is not None:
+ prompt_host = '%s:%d' % (host, port)
+ else:
+ prompt_host = host
+ password = ui.ui_factory.get_password(prompt,
+ host=prompt_host, user=user)
+ return password
+
+ def decode_password(self, credentials, encoding):
+ try:
+ cs = credential_store_registry.get_credential_store(encoding)
+ except KeyError:
+ raise ValueError('%r is not a known password_encoding' % encoding)
+ credentials['password'] = cs.decode_password(credentials)
+ return credentials
+
+
+class CredentialStoreRegistry(registry.Registry):
+ """A class that registers credential stores.
+
+ A credential store provides access to credentials via the password_encoding
+ field in authentication.conf sections.
+
+ Except for stores provided by bzr itself, most stores are expected to be
+ provided by plugins that will therefore use
+ register_lazy(password_encoding, module_name, member_name, help=help,
+ fallback=fallback) to install themselves.
+
+ A fallback credential store is one that is queried if no credentials can be
+ found via authentication.conf.
+ """
+
+ def get_credential_store(self, encoding=None):
+ cs = self.get(encoding)
+ if callable(cs):
+ cs = cs()
+ return cs
+
+ def is_fallback(self, name):
+ """Check if the named credentials store should be used as fallback."""
+ return self.get_info(name)
+
+ def get_fallback_credentials(self, scheme, host, port=None, user=None,
+ path=None, realm=None):
+ """Request credentials from all fallback credentials stores.
+
+ The first credentials store that can provide credentials wins.
+ """
+ credentials = None
+ for name in self.keys():
+ if not self.is_fallback(name):
+ continue
+ cs = self.get_credential_store(name)
+ credentials = cs.get_credentials(scheme, host, port, user,
+ path, realm)
+ if credentials is not None:
+ # We found some credentials
+ break
+ return credentials
+
+ def register(self, key, obj, help=None, override_existing=False,
+ fallback=False):
+ """Register a new object to a name.
+
+ :param key: This is the key to use to request the object later.
+ :param obj: The object to register.
+ :param help: Help text for this entry. This may be a string or
+ a callable. If it is a callable, it should take two
+ parameters (registry, key): this registry and the key that
+ the help was registered under.
+ :param override_existing: Raise KeyErorr if False and something has
+ already been registered for that key. If True, ignore if there
+ is an existing key (always register the new value).
+ :param fallback: Whether this credential store should be
+ used as fallback.
+ """
+ return super(CredentialStoreRegistry,
+ self).register(key, obj, help, info=fallback,
+ override_existing=override_existing)
+
+ def register_lazy(self, key, module_name, member_name,
+ help=None, override_existing=False,
+ fallback=False):
+ """Register a new credential store to be loaded on request.
+
+ :param module_name: The python path to the module. Such as 'os.path'.
+ :param member_name: The member of the module to return. If empty or
+ None, get() will return the module itself.
+ :param help: Help text for this entry. This may be a string or
+ a callable.
+ :param override_existing: If True, replace the existing object
+ with the new one. If False, if there is already something
+ registered with the same key, raise a KeyError
+ :param fallback: Whether this credential store should be
+ used as fallback.
+ """
+ return super(CredentialStoreRegistry, self).register_lazy(
+ key, module_name, member_name, help,
+ info=fallback, override_existing=override_existing)
+
+
+credential_store_registry = CredentialStoreRegistry()
+
+
+class CredentialStore(object):
+ """An abstract class to implement storage for credentials"""
+
+ def decode_password(self, credentials):
+ """Returns a clear text password for the provided credentials."""
+ raise NotImplementedError(self.decode_password)
+
+ def get_credentials(self, scheme, host, port=None, user=None, path=None,
+ realm=None):
+ """Return the matching credentials from this credential store.
+
+ This method is only called on fallback credential stores.
+ """
+ raise NotImplementedError(self.get_credentials)
+
+
+
+class PlainTextCredentialStore(CredentialStore):
+ __doc__ = """Plain text credential store for the authentication.conf file"""
+
+ def decode_password(self, credentials):
+ """See CredentialStore.decode_password."""
+ return credentials['password']
+
+
+credential_store_registry.register('plain', PlainTextCredentialStore,
+ help=PlainTextCredentialStore.__doc__)
+credential_store_registry.default_key = 'plain'
+
+
+class BzrDirConfig(object):
+
+ def __init__(self, bzrdir):
+ self._bzrdir = bzrdir
+ self._config = bzrdir._get_config()
+
+ def set_default_stack_on(self, value):
+ """Set the default stacking location.
+
+ It may be set to a location, or None.
+
+ This policy affects all branches contained by this control dir, except
+ for those under repositories.
+ """
+ if self._config is None:
+ raise errors.BzrError("Cannot set configuration in %s" % self._bzrdir)
+ if value is None:
+ self._config.set_option('', 'default_stack_on')
+ else:
+ self._config.set_option(value, 'default_stack_on')
+
+ def get_default_stack_on(self):
+ """Return the default stacking location.
+
+ This will either be a location, or None.
+
+ This policy affects all branches contained by this control dir, except
+ for those under repositories.
+ """
+ if self._config is None:
+ return None
+ value = self._config.get_option('default_stack_on')
+ if value == '':
+ value = None
+ return value
+
+
+class TransportConfig(object):
+ """A Config that reads/writes a config file on a Transport.
+
+ It is a low-level object that considers config data to be name/value pairs
+ that may be associated with a section. Assigning meaning to these values
+ is done at higher levels like TreeConfig.
+ """
+
+ def __init__(self, transport, filename):
+ self._transport = transport
+ self._filename = filename
+
+ def get_option(self, name, section=None, default=None):
+ """Return the value associated with a named option.
+
+ :param name: The name of the value
+ :param section: The section the option is in (if any)
+ :param default: The value to return if the value is not set
+ :return: The value or default value
+ """
+ configobj = self._get_configobj()
+ if section is None:
+ section_obj = configobj
+ else:
+ try:
+ section_obj = configobj[section]
+ except KeyError:
+ return default
+ value = section_obj.get(name, default)
+ for hook in OldConfigHooks['get']:
+ hook(self, name, value)
+ return value
+
+ def set_option(self, value, name, section=None):
+ """Set the value associated with a named option.
+
+ :param value: The value to set
+ :param name: The name of the value to set
+ :param section: The section the option is in (if any)
+ """
+ configobj = self._get_configobj()
+ if section is None:
+ configobj[name] = value
+ else:
+ configobj.setdefault(section, {})[name] = value
+ for hook in OldConfigHooks['set']:
+ hook(self, name, value)
+ self._set_configobj(configobj)
+
+ def remove_option(self, option_name, section_name=None):
+ configobj = self._get_configobj()
+ if section_name is None:
+ del configobj[option_name]
+ else:
+ del configobj[section_name][option_name]
+ for hook in OldConfigHooks['remove']:
+ hook(self, option_name)
+ self._set_configobj(configobj)
+
+ def _get_config_file(self):
+ try:
+ f = StringIO(self._transport.get_bytes(self._filename))
+ for hook in OldConfigHooks['load']:
+ hook(self)
+ return f
+ except errors.NoSuchFile:
+ return StringIO()
+ except errors.PermissionDenied, e:
+ trace.warning("Permission denied while trying to open "
+ "configuration file %s.", urlutils.unescape_for_display(
+ urlutils.join(self._transport.base, self._filename), "utf-8"))
+ return StringIO()
+
+ def _external_url(self):
+ return urlutils.join(self._transport.external_url(), self._filename)
+
+ def _get_configobj(self):
+ f = self._get_config_file()
+ try:
+ try:
+ conf = ConfigObj(f, encoding='utf-8')
+ except configobj.ConfigObjError, e:
+ raise errors.ParseConfigError(e.errors, self._external_url())
+ except UnicodeDecodeError:
+ raise errors.ConfigContentError(self._external_url())
+ finally:
+ f.close()
+ return conf
+
+ def _set_configobj(self, configobj):
+ out_file = StringIO()
+ configobj.write(out_file)
+ out_file.seek(0)
+ self._transport.put_file(self._filename, out_file)
+ for hook in OldConfigHooks['save']:
+ hook(self)
+
+
+class Option(object):
+ """An option definition.
+
+ The option *values* are stored in config files and found in sections.
+
+ Here we define various properties about the option itself, its default
+ value, how to convert it from stores, what to do when invalid values are
+ encoutered, in which config files it can be stored.
+ """
+
+ def __init__(self, name, override_from_env=None,
+ default=None, default_from_env=None,
+ help=None, from_unicode=None, invalid=None, unquote=True):
+ """Build an option definition.
+
+ :param name: the name used to refer to the option.
+
+ :param override_from_env: A list of environment variables which can
+ provide override any configuration setting.
+
+ :param default: the default value to use when none exist in the config
+ stores. This is either a string that ``from_unicode`` will convert
+ into the proper type, a callable returning a unicode string so that
+ ``from_unicode`` can be used on the return value, or a python
+ object that can be stringified (so only the empty list is supported
+ for example).
+
+ :param default_from_env: A list of environment variables which can
+ provide a default value. 'default' will be used only if none of the
+ variables specified here are set in the environment.
+
+ :param help: a doc string to explain the option to the user.
+
+ :param from_unicode: a callable to convert the unicode string
+ representing the option value in a store. This is not called for
+ the default value.
+
+ :param invalid: the action to be taken when an invalid value is
+ encountered in a store. This is called only when from_unicode is
+ invoked to convert a string and returns None or raise ValueError or
+ TypeError. Accepted values are: None (ignore invalid values),
+ 'warning' (emit a warning), 'error' (emit an error message and
+ terminates).
+
+ :param unquote: should the unicode value be unquoted before conversion.
+ This should be used only when the store providing the values cannot
+ safely unquote them (see http://pad.lv/906897). It is provided so
+ daughter classes can handle the quoting themselves.
+ """
+ if override_from_env is None:
+ override_from_env = []
+ if default_from_env is None:
+ default_from_env = []
+ self.name = name
+ self.override_from_env = override_from_env
+ # Convert the default value to a unicode string so all values are
+ # strings internally before conversion (via from_unicode) is attempted.
+ if default is None:
+ self.default = None
+ elif isinstance(default, list):
+ # Only the empty list is supported
+ if default:
+ raise AssertionError(
+ 'Only empty lists are supported as default values')
+ self.default = u','
+ elif isinstance(default, (str, unicode, bool, int, float)):
+ # Rely on python to convert strings, booleans and integers
+ self.default = u'%s' % (default,)
+ elif callable(default):
+ self.default = default
+ else:
+ # other python objects are not expected
+ raise AssertionError('%r is not supported as a default value'
+ % (default,))
+ self.default_from_env = default_from_env
+ self._help = help
+ self.from_unicode = from_unicode
+ self.unquote = unquote
+ if invalid and invalid not in ('warning', 'error'):
+ raise AssertionError("%s not supported for 'invalid'" % (invalid,))
+ self.invalid = invalid
+
+ @property
+ def help(self):
+ return self._help
+
+ def convert_from_unicode(self, store, unicode_value):
+ if self.unquote and store is not None and unicode_value is not None:
+ unicode_value = store.unquote(unicode_value)
+ if self.from_unicode is None or unicode_value is None:
+ # Don't convert or nothing to convert
+ return unicode_value
+ try:
+ converted = self.from_unicode(unicode_value)
+ except (ValueError, TypeError):
+ # Invalid values are ignored
+ converted = None
+ if converted is None and self.invalid is not None:
+ # The conversion failed
+ if self.invalid == 'warning':
+ trace.warning('Value "%s" is not valid for "%s"',
+ unicode_value, self.name)
+ elif self.invalid == 'error':
+ raise errors.ConfigOptionValueError(self.name, unicode_value)
+ return converted
+
+ def get_override(self):
+ value = None
+ for var in self.override_from_env:
+ try:
+ # If the env variable is defined, its value takes precedence
+ value = os.environ[var].decode(osutils.get_user_encoding())
+ break
+ except KeyError:
+ continue
+ return value
+
+ def get_default(self):
+ value = None
+ for var in self.default_from_env:
+ try:
+ # If the env variable is defined, its value is the default one
+ value = os.environ[var].decode(osutils.get_user_encoding())
+ break
+ except KeyError:
+ continue
+ if value is None:
+ # Otherwise, fallback to the value defined at registration
+ if callable(self.default):
+ value = self.default()
+ if not isinstance(value, unicode):
+ raise AssertionError(
+ "Callable default value for '%s' should be unicode"
+ % (self.name))
+ else:
+ value = self.default
+ return value
+
+ def get_help_topic(self):
+ return self.name
+
+ def get_help_text(self, additional_see_also=None, plain=True):
+ result = self.help
+ from bzrlib import help_topics
+ result += help_topics._format_see_also(additional_see_also)
+ if plain:
+ result = help_topics.help_as_plain_text(result)
+ return result
+
+
+# Predefined converters to get proper values from store
+
+def bool_from_store(unicode_str):
+ return ui.bool_from_string(unicode_str)
+
+
+def int_from_store(unicode_str):
+ return int(unicode_str)
+
+
+_unit_suffixes = dict(K=10**3, M=10**6, G=10**9)
+
+def int_SI_from_store(unicode_str):
+ """Convert a human readable size in SI units, e.g 10MB into an integer.
+
+ Accepted suffixes are K,M,G. It is case-insensitive and may be followed
+ by a trailing b (i.e. Kb, MB). This is intended to be practical and not
+ pedantic.
+
+ :return Integer, expanded to its base-10 value if a proper SI unit is
+ found, None otherwise.
+ """
+ regexp = "^(\d+)(([" + ''.join(_unit_suffixes) + "])b?)?$"
+ p = re.compile(regexp, re.IGNORECASE)
+ m = p.match(unicode_str)
+ val = None
+ if m is not None:
+ val, _, unit = m.groups()
+ val = int(val)
+ if unit:
+ try:
+ coeff = _unit_suffixes[unit.upper()]
+ except KeyError:
+ raise ValueError(gettext('{0} is not an SI unit.').format(unit))
+ val *= coeff
+ return val
+
+
+def float_from_store(unicode_str):
+ return float(unicode_str)
+
+
+# Use a an empty dict to initialize an empty configobj avoiding all
+# parsing and encoding checks
+_list_converter_config = configobj.ConfigObj(
+ {}, encoding='utf-8', list_values=True, interpolation=False)
+
+
+class ListOption(Option):
+
+ def __init__(self, name, default=None, default_from_env=None,
+ help=None, invalid=None):
+ """A list Option definition.
+
+ This overrides the base class so the conversion from a unicode string
+ can take quoting into account.
+ """
+ super(ListOption, self).__init__(
+ name, default=default, default_from_env=default_from_env,
+ from_unicode=self.from_unicode, help=help,
+ invalid=invalid, unquote=False)
+
+ def from_unicode(self, unicode_str):
+ if not isinstance(unicode_str, basestring):
+ raise TypeError
+ # Now inject our string directly as unicode. All callers got their
+ # value from configobj, so values that need to be quoted are already
+ # properly quoted.
+ _list_converter_config.reset()
+ _list_converter_config._parse([u"list=%s" % (unicode_str,)])
+ maybe_list = _list_converter_config['list']
+ if isinstance(maybe_list, basestring):
+ if maybe_list:
+ # A single value, most probably the user forgot (or didn't care
+ # to add) the final ','
+ l = [maybe_list]
+ else:
+ # The empty string, convert to empty list
+ l = []
+ else:
+ # We rely on ConfigObj providing us with a list already
+ l = maybe_list
+ return l
+
+
+class RegistryOption(Option):
+ """Option for a choice from a registry."""
+
+ def __init__(self, name, registry, default_from_env=None,
+ help=None, invalid=None):
+ """A registry based Option definition.
+
+ This overrides the base class so the conversion from a unicode string
+ can take quoting into account.
+ """
+ super(RegistryOption, self).__init__(
+ name, default=lambda: unicode(registry.default_key),
+ default_from_env=default_from_env,
+ from_unicode=self.from_unicode, help=help,
+ invalid=invalid, unquote=False)
+ self.registry = registry
+
+ def from_unicode(self, unicode_str):
+ if not isinstance(unicode_str, basestring):
+ raise TypeError
+ try:
+ return self.registry.get(unicode_str)
+ except KeyError:
+ raise ValueError(
+ "Invalid value %s for %s."
+ "See help for a list of possible values." % (unicode_str,
+ self.name))
+
+ @property
+ def help(self):
+ ret = [self._help, "\n\nThe following values are supported:\n"]
+ for key in self.registry.keys():
+ ret.append(" %s - %s\n" % (key, self.registry.get_help(key)))
+ return "".join(ret)
+
+
+class OptionRegistry(registry.Registry):
+ """Register config options by their name.
+
+ This overrides ``registry.Registry`` to simplify registration by acquiring
+ some information from the option object itself.
+ """
+
+ def register(self, option):
+ """Register a new option to its name.
+
+ :param option: The option to register. Its name is used as the key.
+ """
+ super(OptionRegistry, self).register(option.name, option,
+ help=option.help)
+
+ def register_lazy(self, key, module_name, member_name):
+ """Register a new option to be loaded on request.
+
+ :param key: the key to request the option later. Since the registration
+ is lazy, it should be provided and match the option name.
+
+ :param module_name: the python path to the module. Such as 'os.path'.
+
+ :param member_name: the member of the module to return. If empty or
+ None, get() will return the module itself.
+ """
+ super(OptionRegistry, self).register_lazy(key,
+ module_name, member_name)
+
+ def get_help(self, key=None):
+ """Get the help text associated with the given key"""
+ option = self.get(key)
+ the_help = option.help
+ if callable(the_help):
+ return the_help(self, key)
+ return the_help
+
+
+option_registry = OptionRegistry()
+
+
+# Registered options in lexicographical order
+
+option_registry.register(
+ Option('append_revisions_only',
+ default=None, from_unicode=bool_from_store, invalid='warning',
+ help='''\
+Whether to only append revisions to the mainline.
+
+If this is set to true, then it is not possible to change the
+existing mainline of the branch.
+'''))
+option_registry.register(
+ ListOption('acceptable_keys',
+ default=None,
+ help="""\
+List of GPG key patterns which are acceptable for verification.
+"""))
+option_registry.register(
+ Option('add.maximum_file_size',
+ default=u'20MB', from_unicode=int_SI_from_store,
+ help="""\
+Size above which files should be added manually.
+
+Files below this size are added automatically when using ``bzr add`` without
+arguments.
+
+A negative value means disable the size check.
+"""))
+option_registry.register(
+ Option('bound',
+ default=None, from_unicode=bool_from_store,
+ help="""\
+Is the branch bound to ``bound_location``.
+
+If set to "True", the branch should act as a checkout, and push each commit to
+the bound_location. This option is normally set by ``bind``/``unbind``.
+
+See also: bound_location.
+"""))
+option_registry.register(
+ Option('bound_location',
+ default=None,
+ help="""\
+The location that commits should go to when acting as a checkout.
+
+This option is normally set by ``bind``.
+
+See also: bound.
+"""))
+option_registry.register(
+ Option('branch.fetch_tags', default=False, from_unicode=bool_from_store,
+ help="""\
+Whether revisions associated with tags should be fetched.
+"""))
+option_registry.register_lazy(
+ 'bzr.transform.orphan_policy', 'bzrlib.transform', 'opt_transform_orphan')
+option_registry.register(
+ Option('bzr.workingtree.worth_saving_limit', default=10,
+ from_unicode=int_from_store, invalid='warning',
+ help='''\
+How many changes before saving the dirstate.
+
+-1 means that we will never rewrite the dirstate file for only
+stat-cache changes. Regardless of this setting, we will always rewrite
+the dirstate file if a file is added/removed/renamed/etc. This flag only
+affects the behavior of updating the dirstate file after we notice that
+a file has been touched.
+'''))
+option_registry.register(
+ Option('bugtracker', default=None,
+ help='''\
+Default bug tracker to use.
+
+This bug tracker will be used for example when marking bugs
+as fixed using ``bzr commit --fixes``, if no explicit
+bug tracker was specified.
+'''))
+option_registry.register(
+ Option('check_signatures', default=CHECK_IF_POSSIBLE,
+ from_unicode=signature_policy_from_unicode,
+ help='''\
+GPG checking policy.
+
+Possible values: require, ignore, check-available (default)
+
+this option will control whether bzr will require good gpg
+signatures, ignore them, or check them if they are
+present.
+'''))
+option_registry.register(
+ Option('child_submit_format',
+ help='''The preferred format of submissions to this branch.'''))
+option_registry.register(
+ Option('child_submit_to',
+ help='''Where submissions to this branch are mailed to.'''))
+option_registry.register(
+ Option('create_signatures', default=SIGN_WHEN_REQUIRED,
+ from_unicode=signing_policy_from_unicode,
+ help='''\
+GPG Signing policy.
+
+Possible values: always, never, when-required (default)
+
+This option controls whether bzr will always create
+gpg signatures or not on commits.
+'''))
+option_registry.register(
+ Option('dirstate.fdatasync', default=True,
+ from_unicode=bool_from_store,
+ help='''\
+Flush dirstate changes onto physical disk?
+
+If true (default), working tree metadata changes are flushed through the
+OS buffers to physical disk. This is somewhat slower, but means data
+should not be lost if the machine crashes. See also repository.fdatasync.
+'''))
+option_registry.register(
+ ListOption('debug_flags', default=[],
+ help='Debug flags to activate.'))
+option_registry.register(
+ Option('default_format', default='2a',
+ help='Format used when creating branches.'))
+option_registry.register(
+ Option('dpush_strict', default=None,
+ from_unicode=bool_from_store,
+ help='''\
+The default value for ``dpush --strict``.
+
+If present, defines the ``--strict`` option default value for checking
+uncommitted changes before pushing into a different VCS without any
+custom bzr metadata.
+'''))
+option_registry.register(
+ Option('editor',
+ help='The command called to launch an editor to enter a message.'))
+option_registry.register(
+ Option('email', override_from_env=['BZR_EMAIL'], default=default_email,
+ help='The users identity'))
+option_registry.register(
+ Option('gpg_signing_command',
+ default='gpg',
+ help="""\
+Program to use use for creating signatures.
+
+This should support at least the -u and --clearsign options.
+"""))
+option_registry.register(
+ Option('gpg_signing_key',
+ default=None,
+ help="""\
+GPG key to use for signing.
+
+This defaults to the first key associated with the users email.
+"""))
+option_registry.register(
+ Option('ignore_missing_extensions', default=False,
+ from_unicode=bool_from_store,
+ help='''\
+Control the missing extensions warning display.
+
+The warning will not be emitted if set to True.
+'''))
+option_registry.register(
+ Option('language',
+ help='Language to translate messages into.'))
+option_registry.register(
+ Option('locks.steal_dead', default=False, from_unicode=bool_from_store,
+ help='''\
+Steal locks that appears to be dead.
+
+If set to True, bzr will check if a lock is supposed to be held by an
+active process from the same user on the same machine. If the user and
+machine match, but no process with the given PID is active, then bzr
+will automatically break the stale lock, and create a new lock for
+this process.
+Otherwise, bzr will prompt as normal to break the lock.
+'''))
+option_registry.register(
+ Option('log_format', default='long',
+ help= '''\
+Log format to use when displaying revisions.
+
+Standard log formats are ``long``, ``short`` and ``line``. Additional formats
+may be provided by plugins.
+'''))
+option_registry.register_lazy('mail_client', 'bzrlib.mail_client',
+ 'opt_mail_client')
+option_registry.register(
+ Option('output_encoding',
+ help= 'Unicode encoding for output'
+ ' (terminal encoding if not specified).'))
+option_registry.register(
+ Option('parent_location',
+ default=None,
+ help="""\
+The location of the default branch for pull or merge.
+
+This option is normally set when creating a branch, the first ``pull`` or by
+``pull --remember``.
+"""))
+option_registry.register(
+ Option('post_commit', default=None,
+ help='''\
+Post commit functions.
+
+An ordered list of python functions to call, separated by spaces.
+
+Each function takes branch, rev_id as parameters.
+'''))
+option_registry.register(
+ Option('public_branch',
+ default=None,
+ help="""\
+A publically-accessible version of this branch.
+
+This implies that the branch setting this option is not publically-accessible.
+Used and set by ``bzr send``.
+"""))
+option_registry.register(
+ Option('push_location',
+ default=None,
+ help="""\
+The location of the default branch for push.
+
+This option is normally set by the first ``push`` or ``push --remember``.
+"""))
+option_registry.register(
+ Option('push_strict', default=None,
+ from_unicode=bool_from_store,
+ help='''\
+The default value for ``push --strict``.
+
+If present, defines the ``--strict`` option default value for checking
+uncommitted changes before sending a merge directive.
+'''))
+option_registry.register(
+ Option('repository.fdatasync', default=True,
+ from_unicode=bool_from_store,
+ help='''\
+Flush repository changes onto physical disk?
+
+If true (default), repository changes are flushed through the OS buffers
+to physical disk. This is somewhat slower, but means data should not be
+lost if the machine crashes. See also dirstate.fdatasync.
+'''))
+option_registry.register_lazy('smtp_server',
+ 'bzrlib.smtp_connection', 'smtp_server')
+option_registry.register_lazy('smtp_password',
+ 'bzrlib.smtp_connection', 'smtp_password')
+option_registry.register_lazy('smtp_username',
+ 'bzrlib.smtp_connection', 'smtp_username')
+option_registry.register(
+ Option('selftest.timeout',
+ default='600',
+ from_unicode=int_from_store,
+ help='Abort selftest if one test takes longer than this many seconds',
+ ))
+
+option_registry.register(
+ Option('send_strict', default=None,
+ from_unicode=bool_from_store,
+ help='''\
+The default value for ``send --strict``.
+
+If present, defines the ``--strict`` option default value for checking
+uncommitted changes before sending a bundle.
+'''))
+
+option_registry.register(
+ Option('serve.client_timeout',
+ default=300.0, from_unicode=float_from_store,
+ help="If we wait for a new request from a client for more than"
+ " X seconds, consider the client idle, and hangup."))
+option_registry.register(
+ Option('stacked_on_location',
+ default=None,
+ help="""The location where this branch is stacked on."""))
+option_registry.register(
+ Option('submit_branch',
+ default=None,
+ help="""\
+The branch you intend to submit your current work to.
+
+This is automatically set by ``bzr send`` and ``bzr merge``, and is also used
+by the ``submit:`` revision spec.
+"""))
+option_registry.register(
+ Option('submit_to',
+ help='''Where submissions from this branch are mailed to.'''))
+option_registry.register(
+ ListOption('suppress_warnings',
+ default=[],
+ help="List of warning classes to suppress."))
+option_registry.register(
+ Option('validate_signatures_in_log', default=False,
+ from_unicode=bool_from_store, invalid='warning',
+ help='''Whether to validate signatures in bzr log.'''))
+option_registry.register_lazy('ssl.ca_certs',
+ 'bzrlib.transport.http._urllib2_wrappers', 'opt_ssl_ca_certs')
+
+option_registry.register_lazy('ssl.cert_reqs',
+ 'bzrlib.transport.http._urllib2_wrappers', 'opt_ssl_cert_reqs')
+
+
+class Section(object):
+ """A section defines a dict of option name => value.
+
+ This is merely a read-only dict which can add some knowledge about the
+ options. It is *not* a python dict object though and doesn't try to mimic
+ its API.
+ """
+
+ def __init__(self, section_id, options):
+ self.id = section_id
+ # We re-use the dict-like object received
+ self.options = options
+
+ def get(self, name, default=None, expand=True):
+ return self.options.get(name, default)
+
+ def iter_option_names(self):
+ for k in self.options.iterkeys():
+ yield k
+
+ def __repr__(self):
+ # Mostly for debugging use
+ return "<config.%s id=%s>" % (self.__class__.__name__, self.id)
+
+
+_NewlyCreatedOption = object()
+"""Was the option created during the MutableSection lifetime"""
+_DeletedOption = object()
+"""Was the option deleted during the MutableSection lifetime"""
+
+
+class MutableSection(Section):
+ """A section allowing changes and keeping track of the original values."""
+
+ def __init__(self, section_id, options):
+ super(MutableSection, self).__init__(section_id, options)
+ self.reset_changes()
+
+ def set(self, name, value):
+ if name not in self.options:
+ # This is a new option
+ self.orig[name] = _NewlyCreatedOption
+ elif name not in self.orig:
+ self.orig[name] = self.get(name, None)
+ self.options[name] = value
+
+ def remove(self, name):
+ if name not in self.orig:
+ self.orig[name] = self.get(name, None)
+ del self.options[name]
+
+ def reset_changes(self):
+ self.orig = {}
+
+ def apply_changes(self, dirty, store):
+ """Apply option value changes.
+
+ ``self`` has been reloaded from the persistent storage. ``dirty``
+ contains the changes made since the previous loading.
+
+ :param dirty: the mutable section containing the changes.
+
+ :param store: the store containing the section
+ """
+ for k, expected in dirty.orig.iteritems():
+ actual = dirty.get(k, _DeletedOption)
+ reloaded = self.get(k, _NewlyCreatedOption)
+ if actual is _DeletedOption:
+ if k in self.options:
+ self.remove(k)
+ else:
+ self.set(k, actual)
+ # Report concurrent updates in an ad-hoc way. This should only
+ # occurs when different processes try to update the same option
+ # which is not supported (as in: the config framework is not meant
+ # to be used as a sharing mechanism).
+ if expected != reloaded:
+ if actual is _DeletedOption:
+ actual = '<DELETED>'
+ if reloaded is _NewlyCreatedOption:
+ reloaded = '<CREATED>'
+ if expected is _NewlyCreatedOption:
+ expected = '<CREATED>'
+ # Someone changed the value since we get it from the persistent
+ # storage.
+ trace.warning(gettext(
+ "Option {0} in section {1} of {2} was changed"
+ " from {3} to {4}. The {5} value will be saved.".format(
+ k, self.id, store.external_url(), expected,
+ reloaded, actual)))
+ # No need to keep track of these changes
+ self.reset_changes()
+
+
+class Store(object):
+ """Abstract interface to persistent storage for configuration options."""
+
+ readonly_section_class = Section
+ mutable_section_class = MutableSection
+
+ def __init__(self):
+ # Which sections need to be saved (by section id). We use a dict here
+ # so the dirty sections can be shared by multiple callers.
+ self.dirty_sections = {}
+
+ def is_loaded(self):
+ """Returns True if the Store has been loaded.
+
+ This is used to implement lazy loading and ensure the persistent
+ storage is queried only when needed.
+ """
+ raise NotImplementedError(self.is_loaded)
+
+ def load(self):
+ """Loads the Store from persistent storage."""
+ raise NotImplementedError(self.load)
+
+ def _load_from_string(self, bytes):
+ """Create a store from a string in configobj syntax.
+
+ :param bytes: A string representing the file content.
+ """
+ raise NotImplementedError(self._load_from_string)
+
+ def unload(self):
+ """Unloads the Store.
+
+ This should make is_loaded() return False. This is used when the caller
+ knows that the persistent storage has changed or may have change since
+ the last load.
+ """
+ raise NotImplementedError(self.unload)
+
+ def quote(self, value):
+ """Quote a configuration option value for storing purposes.
+
+ This allows Stacks to present values as they will be stored.
+ """
+ return value
+
+ def unquote(self, value):
+ """Unquote a configuration option value into unicode.
+
+ The received value is quoted as stored.
+ """
+ return value
+
+ def save(self):
+ """Saves the Store to persistent storage."""
+ raise NotImplementedError(self.save)
+
+ def _need_saving(self):
+ for s in self.dirty_sections.values():
+ if s.orig:
+ # At least one dirty section contains a modification
+ return True
+ return False
+
+ def apply_changes(self, dirty_sections):
+ """Apply changes from dirty sections while checking for coherency.
+
+ The Store content is discarded and reloaded from persistent storage to
+ acquire up-to-date values.
+
+ Dirty sections are MutableSection which kept track of the value they
+ are expected to update.
+ """
+ # We need an up-to-date version from the persistent storage, unload the
+ # store. The reload will occur when needed (triggered by the first
+ # get_mutable_section() call below.
+ self.unload()
+ # Apply the changes from the preserved dirty sections
+ for section_id, dirty in dirty_sections.iteritems():
+ clean = self.get_mutable_section(section_id)
+ clean.apply_changes(dirty, self)
+ # Everything is clean now
+ self.dirty_sections = {}
+
+ def save_changes(self):
+ """Saves the Store to persistent storage if changes occurred.
+
+ Apply the changes recorded in the mutable sections to a store content
+ refreshed from persistent storage.
+ """
+ raise NotImplementedError(self.save_changes)
+
+ def external_url(self):
+ raise NotImplementedError(self.external_url)
+
+ def get_sections(self):
+ """Returns an ordered iterable of existing sections.
+
+ :returns: An iterable of (store, section).
+ """
+ raise NotImplementedError(self.get_sections)
+
+ def get_mutable_section(self, section_id=None):
+ """Returns the specified mutable section.
+
+ :param section_id: The section identifier
+ """
+ raise NotImplementedError(self.get_mutable_section)
+
+ def __repr__(self):
+ # Mostly for debugging use
+ return "<config.%s(%s)>" % (self.__class__.__name__,
+ self.external_url())
+
+
+class CommandLineStore(Store):
+ "A store to carry command line overrides for the config options."""
+
+ def __init__(self, opts=None):
+ super(CommandLineStore, self).__init__()
+ if opts is None:
+ opts = {}
+ self.options = {}
+ self.id = 'cmdline'
+
+ def _reset(self):
+ # The dict should be cleared but not replaced so it can be shared.
+ self.options.clear()
+
+ def _from_cmdline(self, overrides):
+ # Reset before accepting new definitions
+ self._reset()
+ for over in overrides:
+ try:
+ name, value = over.split('=', 1)
+ except ValueError:
+ raise errors.BzrCommandError(
+ gettext("Invalid '%s', should be of the form 'name=value'")
+ % (over,))
+ self.options[name] = value
+
+ def external_url(self):
+ # Not an url but it makes debugging easier and is never needed
+ # otherwise
+ return 'cmdline'
+
+ def get_sections(self):
+ yield self, self.readonly_section_class(None, self.options)
+
+
+class IniFileStore(Store):
+ """A config Store using ConfigObj for storage.
+
+ :ivar _config_obj: Private member to hold the ConfigObj instance used to
+ serialize/deserialize the config file.
+ """
+
+ def __init__(self):
+ """A config Store using ConfigObj for storage.
+ """
+ super(IniFileStore, self).__init__()
+ self._config_obj = None
+
+ def is_loaded(self):
+ return self._config_obj != None
+
+ def unload(self):
+ self._config_obj = None
+ self.dirty_sections = {}
+
+ def _load_content(self):
+ """Load the config file bytes.
+
+ This should be provided by subclasses
+
+ :return: Byte string
+ """
+ raise NotImplementedError(self._load_content)
+
+ def _save_content(self, content):
+ """Save the config file bytes.
+
+ This should be provided by subclasses
+
+ :param content: Config file bytes to write
+ """
+ raise NotImplementedError(self._save_content)
+
+ def load(self):
+ """Load the store from the associated file."""
+ if self.is_loaded():
+ return
+ content = self._load_content()
+ self._load_from_string(content)
+ for hook in ConfigHooks['load']:
+ hook(self)
+
+ def _load_from_string(self, bytes):
+ """Create a config store from a string.
+
+ :param bytes: A string representing the file content.
+ """
+ if self.is_loaded():
+ raise AssertionError('Already loaded: %r' % (self._config_obj,))
+ co_input = StringIO(bytes)
+ try:
+ # The config files are always stored utf8-encoded
+ self._config_obj = ConfigObj(co_input, encoding='utf-8',
+ list_values=False)
+ except configobj.ConfigObjError, e:
+ self._config_obj = None
+ raise errors.ParseConfigError(e.errors, self.external_url())
+ except UnicodeDecodeError:
+ raise errors.ConfigContentError(self.external_url())
+
+ def save_changes(self):
+ if not self.is_loaded():
+ # Nothing to save
+ return
+ if not self._need_saving():
+ return
+ # Preserve the current version
+ dirty_sections = dict(self.dirty_sections.items())
+ self.apply_changes(dirty_sections)
+ # Save to the persistent storage
+ self.save()
+
+ def save(self):
+ if not self.is_loaded():
+ # Nothing to save
+ return
+ out = StringIO()
+ self._config_obj.write(out)
+ self._save_content(out.getvalue())
+ for hook in ConfigHooks['save']:
+ hook(self)
+
+ def get_sections(self):
+ """Get the configobj section in the file order.
+
+ :returns: An iterable of (store, section).
+ """
+ # We need a loaded store
+ try:
+ self.load()
+ except (errors.NoSuchFile, errors.PermissionDenied):
+ # If the file can't be read, there is no sections
+ return
+ cobj = self._config_obj
+ if cobj.scalars:
+ yield self, self.readonly_section_class(None, cobj)
+ for section_name in cobj.sections:
+ yield (self,
+ self.readonly_section_class(section_name,
+ cobj[section_name]))
+
+ def get_mutable_section(self, section_id=None):
+ # We need a loaded store
+ try:
+ self.load()
+ except errors.NoSuchFile:
+ # The file doesn't exist, let's pretend it was empty
+ self._load_from_string('')
+ if section_id in self.dirty_sections:
+ # We already created a mutable section for this id
+ return self.dirty_sections[section_id]
+ if section_id is None:
+ section = self._config_obj
+ else:
+ section = self._config_obj.setdefault(section_id, {})
+ mutable_section = self.mutable_section_class(section_id, section)
+ # All mutable sections can become dirty
+ self.dirty_sections[section_id] = mutable_section
+ return mutable_section
+
+ def quote(self, value):
+ try:
+ # configobj conflates automagical list values and quoting
+ self._config_obj.list_values = True
+ return self._config_obj._quote(value)
+ finally:
+ self._config_obj.list_values = False
+
+ def unquote(self, value):
+ if value and isinstance(value, basestring):
+ # _unquote doesn't handle None nor empty strings nor anything that
+ # is not a string, really.
+ value = self._config_obj._unquote(value)
+ return value
+
+ def external_url(self):
+ # Since an IniFileStore can be used without a file (at least in tests),
+ # it's better to provide something than raising a NotImplementedError.
+ # All daughter classes are supposed to provide an implementation
+ # anyway.
+ return 'In-Process Store, no URL'
+
+class TransportIniFileStore(IniFileStore):
+ """IniFileStore that loads files from a transport.
+
+ :ivar transport: The transport object where the config file is located.
+
+ :ivar file_name: The config file basename in the transport directory.
+ """
+
+ def __init__(self, transport, file_name):
+ """A Store using a ini file on a Transport
+
+ :param transport: The transport object where the config file is located.
+ :param file_name: The config file basename in the transport directory.
+ """
+ super(TransportIniFileStore, self).__init__()
+ self.transport = transport
+ self.file_name = file_name
+
+ def _load_content(self):
+ try:
+ return self.transport.get_bytes(self.file_name)
+ except errors.PermissionDenied:
+ trace.warning("Permission denied while trying to load "
+ "configuration store %s.", self.external_url())
+ raise
+
+ def _save_content(self, content):
+ self.transport.put_bytes(self.file_name, content)
+
+ def external_url(self):
+ # FIXME: external_url should really accepts an optional relpath
+ # parameter (bug #750169) :-/ -- vila 2011-04-04
+ # The following will do in the interim but maybe we don't want to
+ # expose a path here but rather a config ID and its associated
+ # object </hand wawe>.
+ return urlutils.join(self.transport.external_url(), self.file_name)
+
+
+# Note that LockableConfigObjStore inherits from ConfigObjStore because we need
+# unlockable stores for use with objects that can already ensure the locking
+# (think branches). If different stores (not based on ConfigObj) are created,
+# they may face the same issue.
+
+
+class LockableIniFileStore(TransportIniFileStore):
+ """A ConfigObjStore using locks on save to ensure store integrity."""
+
+ def __init__(self, transport, file_name, lock_dir_name=None):
+ """A config Store using ConfigObj for storage.
+
+ :param transport: The transport object where the config file is located.
+
+ :param file_name: The config file basename in the transport directory.
+ """
+ if lock_dir_name is None:
+ lock_dir_name = 'lock'
+ self.lock_dir_name = lock_dir_name
+ super(LockableIniFileStore, self).__init__(transport, file_name)
+ self._lock = lockdir.LockDir(self.transport, self.lock_dir_name)
+
+ def lock_write(self, token=None):
+ """Takes a write lock in the directory containing the config file.
+
+ If the directory doesn't exist it is created.
+ """
+ # FIXME: This doesn't check the ownership of the created directories as
+ # ensure_config_dir_exists does. It should if the transport is local
+ # -- vila 2011-04-06
+ self.transport.create_prefix()
+ return self._lock.lock_write(token)
+
+ def unlock(self):
+ self._lock.unlock()
+
+ def break_lock(self):
+ self._lock.break_lock()
+
+ @needs_write_lock
+ def save(self):
+ # We need to be able to override the undecorated implementation
+ self.save_without_locking()
+
+ def save_without_locking(self):
+ super(LockableIniFileStore, self).save()
+
+
+# FIXME: global, bazaar, shouldn't that be 'user' instead or even
+# 'user_defaults' as opposed to 'user_overrides', 'system_defaults'
+# (/etc/bzr/bazaar.conf) and 'system_overrides' ? -- vila 2011-04-05
+
+# FIXME: Moreover, we shouldn't need classes for these stores either, factory
+# functions or a registry will make it easier and clearer for tests, focusing
+# on the relevant parts of the API that needs testing -- vila 20110503 (based
+# on a poolie's remark)
+class GlobalStore(LockableIniFileStore):
+
+ def __init__(self, possible_transports=None):
+ t = transport.get_transport_from_path(
+ config_dir(), possible_transports=possible_transports)
+ super(GlobalStore, self).__init__(t, 'bazaar.conf')
+ self.id = 'bazaar'
+
+
+class LocationStore(LockableIniFileStore):
+
+ def __init__(self, possible_transports=None):
+ t = transport.get_transport_from_path(
+ config_dir(), possible_transports=possible_transports)
+ super(LocationStore, self).__init__(t, 'locations.conf')
+ self.id = 'locations'
+
+
+class BranchStore(TransportIniFileStore):
+
+ def __init__(self, branch):
+ super(BranchStore, self).__init__(branch.control_transport,
+ 'branch.conf')
+ self.branch = branch
+ self.id = 'branch'
+
+
+class ControlStore(LockableIniFileStore):
+
+ def __init__(self, bzrdir):
+ super(ControlStore, self).__init__(bzrdir.transport,
+ 'control.conf',
+ lock_dir_name='branch_lock')
+ self.id = 'control'
+
+
+class SectionMatcher(object):
+ """Select sections into a given Store.
+
+ This is intended to be used to postpone getting an iterable of sections
+ from a store.
+ """
+
+ def __init__(self, store):
+ self.store = store
+
+ def get_sections(self):
+ # This is where we require loading the store so we can see all defined
+ # sections.
+ sections = self.store.get_sections()
+ # Walk the revisions in the order provided
+ for store, s in sections:
+ if self.match(s):
+ yield store, s
+
+ def match(self, section):
+ """Does the proposed section match.
+
+ :param section: A Section object.
+
+ :returns: True if the section matches, False otherwise.
+ """
+ raise NotImplementedError(self.match)
+
+
+class NameMatcher(SectionMatcher):
+
+ def __init__(self, store, section_id):
+ super(NameMatcher, self).__init__(store)
+ self.section_id = section_id
+
+ def match(self, section):
+ return section.id == self.section_id
+
+
+class LocationSection(Section):
+
+ def __init__(self, section, extra_path, branch_name=None):
+ super(LocationSection, self).__init__(section.id, section.options)
+ self.extra_path = extra_path
+ if branch_name is None:
+ branch_name = ''
+ self.locals = {'relpath': extra_path,
+ 'basename': urlutils.basename(extra_path),
+ 'branchname': branch_name}
+
+ def get(self, name, default=None, expand=True):
+ value = super(LocationSection, self).get(name, default)
+ if value is not None and expand:
+ policy_name = self.get(name + ':policy', None)
+ policy = _policy_value.get(policy_name, POLICY_NONE)
+ if policy == POLICY_APPENDPATH:
+ value = urlutils.join(value, self.extra_path)
+ # expand section local options right now (since POLICY_APPENDPATH
+ # will never add options references, it's ok to expand after it).
+ chunks = []
+ for is_ref, chunk in iter_option_refs(value):
+ if not is_ref:
+ chunks.append(chunk)
+ else:
+ ref = chunk[1:-1]
+ if ref in self.locals:
+ chunks.append(self.locals[ref])
+ else:
+ chunks.append(chunk)
+ value = ''.join(chunks)
+ return value
+
+
+class StartingPathMatcher(SectionMatcher):
+ """Select sections for a given location respecting the Store order."""
+
+ # FIXME: Both local paths and urls can be used for section names as well as
+ # ``location`` to stay consistent with ``LocationMatcher`` which itself
+ # inherited the fuzziness from the previous ``LocationConfig``
+ # implementation. We probably need to revisit which encoding is allowed for
+ # both ``location`` and section names and how we normalize
+ # them. http://pad.lv/85479, http://pad.lv/437009 and http://359320 are
+ # related too. -- vila 2012-01-04
+
+ def __init__(self, store, location):
+ super(StartingPathMatcher, self).__init__(store)
+ if location.startswith('file://'):
+ location = urlutils.local_path_from_url(location)
+ self.location = location
+
+ def get_sections(self):
+ """Get all sections matching ``location`` in the store.
+
+ The most generic sections are described first in the store, then more
+ specific ones can be provided for reduced scopes.
+
+ The returned section are therefore returned in the reversed order so
+ the most specific ones can be found first.
+ """
+ location_parts = self.location.rstrip('/').split('/')
+ store = self.store
+ sections = []
+ # Later sections are more specific, they should be returned first
+ for _, section in reversed(list(store.get_sections())):
+ if section.id is None:
+ # The no-name section is always included if present
+ yield store, LocationSection(section, self.location)
+ continue
+ section_path = section.id
+ if section_path.startswith('file://'):
+ # the location is already a local path or URL, convert the
+ # section id to the same format
+ section_path = urlutils.local_path_from_url(section_path)
+ if (self.location.startswith(section_path)
+ or fnmatch.fnmatch(self.location, section_path)):
+ section_parts = section_path.rstrip('/').split('/')
+ extra_path = '/'.join(location_parts[len(section_parts):])
+ yield store, LocationSection(section, extra_path)
+
+
+class LocationMatcher(SectionMatcher):
+
+ def __init__(self, store, location):
+ super(LocationMatcher, self).__init__(store)
+ url, params = urlutils.split_segment_parameters(location)
+ if location.startswith('file://'):
+ location = urlutils.local_path_from_url(location)
+ self.location = location
+ branch_name = params.get('branch')
+ if branch_name is None:
+ self.branch_name = urlutils.basename(self.location)
+ else:
+ self.branch_name = urlutils.unescape(branch_name)
+
+ def _get_matching_sections(self):
+ """Get all sections matching ``location``."""
+ # We slightly diverge from LocalConfig here by allowing the no-name
+ # section as the most generic one and the lower priority.
+ no_name_section = None
+ all_sections = []
+ # Filter out the no_name_section so _iter_for_location_by_parts can be
+ # used (it assumes all sections have a name).
+ for _, section in self.store.get_sections():
+ if section.id is None:
+ no_name_section = section
+ else:
+ all_sections.append(section)
+ # Unfortunately _iter_for_location_by_parts deals with section names so
+ # we have to resync.
+ filtered_sections = _iter_for_location_by_parts(
+ [s.id for s in all_sections], self.location)
+ iter_all_sections = iter(all_sections)
+ matching_sections = []
+ if no_name_section is not None:
+ matching_sections.append(
+ (0, LocationSection(no_name_section, self.location)))
+ for section_id, extra_path, length in filtered_sections:
+ # a section id is unique for a given store so it's safe to take the
+ # first matching section while iterating. Also, all filtered
+ # sections are part of 'all_sections' and will always be found
+ # there.
+ while True:
+ section = iter_all_sections.next()
+ if section_id == section.id:
+ section = LocationSection(section, extra_path,
+ self.branch_name)
+ matching_sections.append((length, section))
+ break
+ return matching_sections
+
+ def get_sections(self):
+ # Override the default implementation as we want to change the order
+ matching_sections = self._get_matching_sections()
+ # We want the longest (aka more specific) locations first
+ sections = sorted(matching_sections,
+ key=lambda (length, section): (length, section.id),
+ reverse=True)
+ # Sections mentioning 'ignore_parents' restrict the selection
+ for _, section in sections:
+ # FIXME: We really want to use as_bool below -- vila 2011-04-07
+ ignore = section.get('ignore_parents', None)
+ if ignore is not None:
+ ignore = ui.bool_from_string(ignore)
+ if ignore:
+ break
+ # Finally, we have a valid section
+ yield self.store, section
+
+
+_option_ref_re = lazy_regex.lazy_compile('({[^{}\n]+})')
+"""Describes an expandable option reference.
+
+We want to match the most embedded reference first.
+
+I.e. for '{{foo}}' we will get '{foo}',
+for '{bar{baz}}' we will get '{baz}'
+"""
+
+def iter_option_refs(string):
+ # Split isolate refs so every other chunk is a ref
+ is_ref = False
+ for chunk in _option_ref_re.split(string):
+ yield is_ref, chunk
+ is_ref = not is_ref
+
+
+class Stack(object):
+ """A stack of configurations where an option can be defined"""
+
+ def __init__(self, sections_def, store=None, mutable_section_id=None):
+ """Creates a stack of sections with an optional store for changes.
+
+ :param sections_def: A list of Section or callables that returns an
+ iterable of Section. This defines the Sections for the Stack and
+ can be called repeatedly if needed.
+
+ :param store: The optional Store where modifications will be
+ recorded. If none is specified, no modifications can be done.
+
+ :param mutable_section_id: The id of the MutableSection where changes
+ are recorded. This requires the ``store`` parameter to be
+ specified.
+ """
+ self.sections_def = sections_def
+ self.store = store
+ self.mutable_section_id = mutable_section_id
+
+ def iter_sections(self):
+ """Iterate all the defined sections."""
+ # Ensuring lazy loading is achieved by delaying section matching (which
+ # implies querying the persistent storage) until it can't be avoided
+ # anymore by using callables to describe (possibly empty) section
+ # lists.
+ for sections in self.sections_def:
+ for store, section in sections():
+ yield store, section
+
+ def get(self, name, expand=True, convert=True):
+ """Return the *first* option value found in the sections.
+
+ This is where we guarantee that sections coming from Store are loaded
+ lazily: the loading is delayed until we need to either check that an
+ option exists or get its value, which in turn may require to discover
+ in which sections it can be defined. Both of these (section and option
+ existence) require loading the store (even partially).
+
+ :param name: The queried option.
+
+ :param expand: Whether options references should be expanded.
+
+ :param convert: Whether the option value should be converted from
+ unicode (do nothing for non-registered options).
+
+ :returns: The value of the option.
+ """
+ # FIXME: No caching of options nor sections yet -- vila 20110503
+ value = None
+ found_store = None # Where the option value has been found
+ # If the option is registered, it may provide additional info about
+ # value handling
+ try:
+ opt = option_registry.get(name)
+ except KeyError:
+ # Not registered
+ opt = None
+
+ def expand_and_convert(val):
+ # This may need to be called in different contexts if the value is
+ # None or ends up being None during expansion or conversion.
+ if val is not None:
+ if expand:
+ if isinstance(val, basestring):
+ val = self._expand_options_in_string(val)
+ else:
+ trace.warning('Cannot expand "%s":'
+ ' %s does not support option expansion'
+ % (name, type(val)))
+ if opt is None:
+ val = found_store.unquote(val)
+ elif convert:
+ val = opt.convert_from_unicode(found_store, val)
+ return val
+
+ # First of all, check if the environment can override the configuration
+ # value
+ if opt is not None and opt.override_from_env:
+ value = opt.get_override()
+ value = expand_and_convert(value)
+ if value is None:
+ for store, section in self.iter_sections():
+ value = section.get(name)
+ if value is not None:
+ found_store = store
+ break
+ value = expand_and_convert(value)
+ if opt is not None and value is None:
+ # If the option is registered, it may provide a default value
+ value = opt.get_default()
+ value = expand_and_convert(value)
+ for hook in ConfigHooks['get']:
+ hook(self, name, value)
+ return value
+
+ def expand_options(self, string, env=None):
+ """Expand option references in the string in the configuration context.
+
+ :param string: The string containing option(s) to expand.
+
+ :param env: An option dict defining additional configuration options or
+ overriding existing ones.
+
+ :returns: The expanded string.
+ """
+ return self._expand_options_in_string(string, env)
+
+ def _expand_options_in_string(self, string, env=None, _refs=None):
+ """Expand options in the string in the configuration context.
+
+ :param string: The string to be expanded.
+
+ :param env: An option dict defining additional configuration options or
+ overriding existing ones.
+
+ :param _refs: Private list (FIFO) containing the options being expanded
+ to detect loops.
+
+ :returns: The expanded string.
+ """
+ if string is None:
+ # Not much to expand there
+ return None
+ if _refs is None:
+ # What references are currently resolved (to detect loops)
+ _refs = []
+ result = string
+ # We need to iterate until no more refs appear ({{foo}} will need two
+ # iterations for example).
+ expanded = True
+ while expanded:
+ expanded = False
+ chunks = []
+ for is_ref, chunk in iter_option_refs(result):
+ if not is_ref:
+ chunks.append(chunk)
+ else:
+ expanded = True
+ name = chunk[1:-1]
+ if name in _refs:
+ raise errors.OptionExpansionLoop(string, _refs)
+ _refs.append(name)
+ value = self._expand_option(name, env, _refs)
+ if value is None:
+ raise errors.ExpandingUnknownOption(name, string)
+ chunks.append(value)
+ _refs.pop()
+ result = ''.join(chunks)
+ return result
+
+ def _expand_option(self, name, env, _refs):
+ if env is not None and name in env:
+ # Special case, values provided in env takes precedence over
+ # anything else
+ value = env[name]
+ else:
+ value = self.get(name, expand=False, convert=False)
+ value = self._expand_options_in_string(value, env, _refs)
+ return value
+
+ def _get_mutable_section(self):
+ """Get the MutableSection for the Stack.
+
+ This is where we guarantee that the mutable section is lazily loaded:
+ this means we won't load the corresponding store before setting a value
+ or deleting an option. In practice the store will often be loaded but
+ this helps catching some programming errors.
+ """
+ store = self.store
+ section = store.get_mutable_section(self.mutable_section_id)
+ return store, section
+
+ def set(self, name, value):
+ """Set a new value for the option."""
+ store, section = self._get_mutable_section()
+ section.set(name, store.quote(value))
+ for hook in ConfigHooks['set']:
+ hook(self, name, value)
+
+ def remove(self, name):
+ """Remove an existing option."""
+ _, section = self._get_mutable_section()
+ section.remove(name)
+ for hook in ConfigHooks['remove']:
+ hook(self, name)
+
+ def __repr__(self):
+ # Mostly for debugging use
+ return "<config.%s(%s)>" % (self.__class__.__name__, id(self))
+
+ def _get_overrides(self):
+ # Hack around library_state.initialize never called
+ if bzrlib.global_state is not None:
+ return bzrlib.global_state.cmdline_overrides.get_sections()
+ return []
+
+
+class MemoryStack(Stack):
+ """A configuration stack defined from a string.
+
+ This is mainly intended for tests and requires no disk resources.
+ """
+
+ def __init__(self, content=None):
+ """Create an in-memory stack from a given content.
+
+ It uses a single store based on configobj and support reading and
+ writing options.
+
+ :param content: The initial content of the store. If None, the store is
+ not loaded and ``_load_from_string`` can and should be used if
+ needed.
+ """
+ store = IniFileStore()
+ if content is not None:
+ store._load_from_string(content)
+ super(MemoryStack, self).__init__(
+ [store.get_sections], store)
+
+
+class _CompatibleStack(Stack):
+ """Place holder for compatibility with previous design.
+
+ This is intended to ease the transition from the Config-based design to the
+ Stack-based design and should not be used nor relied upon by plugins.
+
+ One assumption made here is that the daughter classes will all use Stores
+ derived from LockableIniFileStore).
+
+ It implements set() and remove () by re-loading the store before applying
+ the modification and saving it.
+
+ The long term plan being to implement a single write by store to save
+ all modifications, this class should not be used in the interim.
+ """
+
+ def set(self, name, value):
+ # Force a reload
+ self.store.unload()
+ super(_CompatibleStack, self).set(name, value)
+ # Force a write to persistent storage
+ self.store.save()
+
+ def remove(self, name):
+ # Force a reload
+ self.store.unload()
+ super(_CompatibleStack, self).remove(name)
+ # Force a write to persistent storage
+ self.store.save()
+
+
+class GlobalStack(_CompatibleStack):
+ """Global options only stack.
+
+ The following sections are queried:
+
+ * command-line overrides,
+
+ * the 'DEFAULT' section in bazaar.conf
+
+ This stack will use the ``DEFAULT`` section in bazaar.conf as its
+ MutableSection.
+ """
+
+ def __init__(self):
+ gstore = GlobalStore()
+ super(GlobalStack, self).__init__(
+ [self._get_overrides,
+ NameMatcher(gstore, 'DEFAULT').get_sections],
+ gstore, mutable_section_id='DEFAULT')
+
+
+class LocationStack(_CompatibleStack):
+ """Per-location options falling back to global options stack.
+
+
+ The following sections are queried:
+
+ * command-line overrides,
+
+ * the sections matching ``location`` in ``locations.conf``, the order being
+ defined by the number of path components in the section glob, higher
+ numbers first (from most specific section to most generic).
+
+ * the 'DEFAULT' section in bazaar.conf
+
+ This stack will use the ``location`` section in locations.conf as its
+ MutableSection.
+ """
+
+ def __init__(self, location):
+ """Make a new stack for a location and global configuration.
+
+ :param location: A URL prefix to """
+ lstore = LocationStore()
+ if location.startswith('file://'):
+ location = urlutils.local_path_from_url(location)
+ gstore = GlobalStore()
+ super(LocationStack, self).__init__(
+ [self._get_overrides,
+ LocationMatcher(lstore, location).get_sections,
+ NameMatcher(gstore, 'DEFAULT').get_sections],
+ lstore, mutable_section_id=location)
+
+
+class BranchStack(Stack):
+ """Per-location options falling back to branch then global options stack.
+
+ The following sections are queried:
+
+ * command-line overrides,
+
+ * the sections matching ``location`` in ``locations.conf``, the order being
+ defined by the number of path components in the section glob, higher
+ numbers first (from most specific section to most generic),
+
+ * the no-name section in branch.conf,
+
+ * the ``DEFAULT`` section in ``bazaar.conf``.
+
+ This stack will use the no-name section in ``branch.conf`` as its
+ MutableSection.
+ """
+
+ def __init__(self, branch):
+ lstore = LocationStore()
+ bstore = branch._get_config_store()
+ gstore = GlobalStore()
+ super(BranchStack, self).__init__(
+ [self._get_overrides,
+ LocationMatcher(lstore, branch.base).get_sections,
+ NameMatcher(bstore, None).get_sections,
+ NameMatcher(gstore, 'DEFAULT').get_sections],
+ bstore)
+ self.branch = branch
+
+ def lock_write(self, token=None):
+ return self.branch.lock_write(token)
+
+ def unlock(self):
+ return self.branch.unlock()
+
+ @needs_write_lock
+ def set(self, name, value):
+ super(BranchStack, self).set(name, value)
+ # Unlocking the branch will trigger a store.save_changes() so the last
+ # unlock saves all the changes.
+
+ @needs_write_lock
+ def remove(self, name):
+ super(BranchStack, self).remove(name)
+ # Unlocking the branch will trigger a store.save_changes() so the last
+ # unlock saves all the changes.
+
+
+class RemoteControlStack(_CompatibleStack):
+ """Remote control-only options stack."""
+
+ # FIXME 2011-11-22 JRV This should probably be renamed to avoid confusion
+ # with the stack used for remote bzr dirs. RemoteControlStack only uses
+ # control.conf and is used only for stack options.
+
+ def __init__(self, bzrdir):
+ cstore = bzrdir._get_config_store()
+ super(RemoteControlStack, self).__init__(
+ [NameMatcher(cstore, None).get_sections],
+ cstore)
+ self.bzrdir = bzrdir
+
+
+class BranchOnlyStack(Stack):
+ """Branch-only options stack."""
+
+ # FIXME: _BranchOnlyStack only uses branch.conf and is used only for the
+ # stacked_on_location options waiting for http://pad.lv/832042 to be fixed.
+ # -- vila 2011-12-16
+
+ def __init__(self, branch):
+ bstore = branch._get_config_store()
+ super(BranchOnlyStack, self).__init__(
+ [NameMatcher(bstore, None).get_sections],
+ bstore)
+ self.branch = branch
+
+ def lock_write(self, token=None):
+ return self.branch.lock_write(token)
+
+ def unlock(self):
+ return self.branch.unlock()
+
+ @needs_write_lock
+ def set(self, name, value):
+ super(BranchOnlyStack, self).set(name, value)
+ # Force a write to persistent storage
+ self.store.save_changes()
+
+ @needs_write_lock
+ def remove(self, name):
+ super(BranchOnlyStack, self).remove(name)
+ # Force a write to persistent storage
+ self.store.save_changes()
+
+
+class cmd_config(commands.Command):
+ __doc__ = """Display, set or remove a configuration option.
+
+ Display the active value for a given option.
+
+ If --all is specified, NAME is interpreted as a regular expression and all
+ matching options are displayed mentioning their scope. The active value
+ that bzr will take into account is the first one displayed for each option.
+
+ If no NAME is given, --all .* is implied.
+
+ Setting a value is achieved by using name=value without spaces. The value
+ is set in the most relevant scope and can be checked by displaying the
+ option again.
+ """
+
+ takes_args = ['name?']
+
+ takes_options = [
+ 'directory',
+ # FIXME: This should be a registry option so that plugins can register
+ # their own config files (or not) and will also address
+ # http://pad.lv/788991 -- vila 20101115
+ commands.Option('scope', help='Reduce the scope to the specified'
+ ' configuration file.',
+ type=unicode),
+ commands.Option('all',
+ help='Display all the defined values for the matching options.',
+ ),
+ commands.Option('remove', help='Remove the option from'
+ ' the configuration file.'),
+ ]
+
+ _see_also = ['configuration']
+
+ @commands.display_command
+ def run(self, name=None, all=False, directory=None, scope=None,
+ remove=False):
+ if directory is None:
+ directory = '.'
+ directory = urlutils.normalize_url(directory)
+ if remove and all:
+ raise errors.BzrError(
+ '--all and --remove are mutually exclusive.')
+ elif remove:
+ # Delete the option in the given scope
+ self._remove_config_option(name, directory, scope)
+ elif name is None:
+ # Defaults to all options
+ self._show_matching_options('.*', directory, scope)
+ else:
+ try:
+ name, value = name.split('=', 1)
+ except ValueError:
+ # Display the option(s) value(s)
+ if all:
+ self._show_matching_options(name, directory, scope)
+ else:
+ self._show_value(name, directory, scope)
+ else:
+ if all:
+ raise errors.BzrError(
+ 'Only one option can be set.')
+ # Set the option value
+ self._set_config_option(name, value, directory, scope)
+
+ def _get_stack(self, directory, scope=None, write_access=False):
+ """Get the configuration stack specified by ``directory`` and ``scope``.
+
+ :param directory: Where the configurations are derived from.
+
+ :param scope: A specific config to start from.
+
+ :param write_access: Whether a write access to the stack will be
+ attempted.
+ """
+ # FIXME: scope should allow access to plugin-specific stacks (even
+ # reduced to the plugin-specific store), related to
+ # http://pad.lv/788991 -- vila 2011-11-15
+ if scope is not None:
+ if scope == 'bazaar':
+ return GlobalStack()
+ elif scope == 'locations':
+ return LocationStack(directory)
+ elif scope == 'branch':
+ (_, br, _) = (
+ controldir.ControlDir.open_containing_tree_or_branch(
+ directory))
+ if write_access:
+ self.add_cleanup(br.lock_write().unlock)
+ return br.get_config_stack()
+ raise errors.NoSuchConfig(scope)
+ else:
+ try:
+ (_, br, _) = (
+ controldir.ControlDir.open_containing_tree_or_branch(
+ directory))
+ if write_access:
+ self.add_cleanup(br.lock_write().unlock)
+ return br.get_config_stack()
+ except errors.NotBranchError:
+ return LocationStack(directory)
+
+ def _quote_multiline(self, value):
+ if '\n' in value:
+ value = '"""' + value + '"""'
+ return value
+
+ def _show_value(self, name, directory, scope):
+ conf = self._get_stack(directory, scope)
+ value = conf.get(name, expand=True, convert=False)
+ if value is not None:
+ # Quote the value appropriately
+ value = self._quote_multiline(value)
+ self.outf.write('%s\n' % (value,))
+ else:
+ raise errors.NoSuchConfigOption(name)
+
+ def _show_matching_options(self, name, directory, scope):
+ name = lazy_regex.lazy_compile(name)
+ # We want any error in the regexp to be raised *now* so we need to
+ # avoid the delay introduced by the lazy regexp. But, we still do
+ # want the nicer errors raised by lazy_regex.
+ name._compile_and_collapse()
+ cur_store_id = None
+ cur_section = None
+ conf = self._get_stack(directory, scope)
+ for store, section in conf.iter_sections():
+ for oname in section.iter_option_names():
+ if name.search(oname):
+ if cur_store_id != store.id:
+ # Explain where the options are defined
+ self.outf.write('%s:\n' % (store.id,))
+ cur_store_id = store.id
+ cur_section = None
+ if (section.id is not None and cur_section != section.id):
+ # Display the section id as it appears in the store
+ # (None doesn't appear by definition)
+ self.outf.write(' [%s]\n' % (section.id,))
+ cur_section = section.id
+ value = section.get(oname, expand=False)
+ # Quote the value appropriately
+ value = self._quote_multiline(value)
+ self.outf.write(' %s = %s\n' % (oname, value))
+
+ def _set_config_option(self, name, value, directory, scope):
+ conf = self._get_stack(directory, scope, write_access=True)
+ conf.set(name, value)
+
+ def _remove_config_option(self, name, directory, scope):
+ if name is None:
+ raise errors.BzrCommandError(
+ '--remove expects an option to remove.')
+ conf = self._get_stack(directory, scope, write_access=True)
+ try:
+ conf.remove(name)
+ except KeyError:
+ raise errors.NoSuchConfigOption(name)
+
+
+# Test registries
+#
+# We need adapters that can build a Store or a Stack in a test context. Test
+# classes, based on TestCaseWithTransport, can use the registry to parametrize
+# themselves. The builder will receive a test instance and should return a
+# ready-to-use store or stack. Plugins that define new store/stacks can also
+# register themselves here to be tested against the tests defined in
+# bzrlib.tests.test_config. Note that the builder can be called multiple times
+# for the same test.
+
+# The registered object should be a callable receiving a test instance
+# parameter (inheriting from tests.TestCaseWithTransport) and returning a Store
+# object.
+test_store_builder_registry = registry.Registry()
+
+# The registered object should be a callable receiving a test instance
+# parameter (inheriting from tests.TestCaseWithTransport) and returning a Stack
+# object.
+test_stack_builder_registry = registry.Registry()
diff --git a/bzrlib/conflicts.py b/bzrlib/conflicts.py
new file mode 100644
index 0000000..348919e
--- /dev/null
+++ b/bzrlib/conflicts.py
@@ -0,0 +1,893 @@
+# Copyright (C) 2005, 2006, 2007, 2009, 2010, 2011 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+# TODO: 'bzr resolve' should accept a directory name and work from that
+# point down
+
+from __future__ import absolute_import
+
+import os
+
+from bzrlib.lazy_import import lazy_import
+lazy_import(globals(), """
+import errno
+
+from bzrlib import (
+ cleanup,
+ errors,
+ osutils,
+ rio,
+ trace,
+ transform,
+ workingtree,
+ )
+from bzrlib.i18n import gettext, ngettext
+""")
+from bzrlib import (
+ commands,
+ option,
+ registry,
+ )
+
+
+CONFLICT_SUFFIXES = ('.THIS', '.BASE', '.OTHER')
+
+
+class cmd_conflicts(commands.Command):
+ __doc__ = """List files with conflicts.
+
+ Merge will do its best to combine the changes in two branches, but there
+ are some kinds of problems only a human can fix. When it encounters those,
+ it will mark a conflict. A conflict means that you need to fix something,
+ before you can commit.
+
+ Conflicts normally are listed as short, human-readable messages. If --text
+ is supplied, the pathnames of files with text conflicts are listed,
+ instead. (This is useful for editing all files with text conflicts.)
+
+ Use bzr resolve when you have fixed a problem.
+ """
+ takes_options = [
+ 'directory',
+ option.Option('text',
+ help='List paths of files with text conflicts.'),
+ ]
+ _see_also = ['resolve', 'conflict-types']
+
+ def run(self, text=False, directory=u'.'):
+ wt = workingtree.WorkingTree.open_containing(directory)[0]
+ for conflict in wt.conflicts():
+ if text:
+ if conflict.typestring != 'text conflict':
+ continue
+ self.outf.write(conflict.path + '\n')
+ else:
+ self.outf.write(unicode(conflict) + '\n')
+
+
+resolve_action_registry = registry.Registry()
+
+
+resolve_action_registry.register(
+ 'done', 'done', 'Marks the conflict as resolved.')
+resolve_action_registry.register(
+ 'take-this', 'take_this',
+ 'Resolve the conflict preserving the version in the working tree.')
+resolve_action_registry.register(
+ 'take-other', 'take_other',
+ 'Resolve the conflict taking the merged version into account.')
+resolve_action_registry.default_key = 'done'
+
+class ResolveActionOption(option.RegistryOption):
+
+ def __init__(self):
+ super(ResolveActionOption, self).__init__(
+ 'action', 'How to resolve the conflict.',
+ value_switches=True,
+ registry=resolve_action_registry)
+
+
+class cmd_resolve(commands.Command):
+ __doc__ = """Mark a conflict as resolved.
+
+ Merge will do its best to combine the changes in two branches, but there
+ are some kinds of problems only a human can fix. When it encounters those,
+ it will mark a conflict. A conflict means that you need to fix something,
+ before you can commit.
+
+ Once you have fixed a problem, use "bzr resolve" to automatically mark
+ text conflicts as fixed, "bzr resolve FILE" to mark a specific conflict as
+ resolved, or "bzr resolve --all" to mark all conflicts as resolved.
+ """
+ aliases = ['resolved']
+ takes_args = ['file*']
+ takes_options = [
+ 'directory',
+ option.Option('all', help='Resolve all conflicts in this tree.'),
+ ResolveActionOption(),
+ ]
+ _see_also = ['conflicts']
+ def run(self, file_list=None, all=False, action=None, directory=None):
+ if all:
+ if file_list:
+ raise errors.BzrCommandError(gettext("If --all is specified,"
+ " no FILE may be provided"))
+ if directory is None:
+ directory = u'.'
+ tree = workingtree.WorkingTree.open_containing(directory)[0]
+ if action is None:
+ action = 'done'
+ else:
+ tree, file_list = workingtree.WorkingTree.open_containing_paths(
+ file_list, directory)
+ if file_list is None:
+ if action is None:
+ # FIXME: There is a special case here related to the option
+ # handling that could be clearer and easier to discover by
+ # providing an --auto action (bug #344013 and #383396) and
+ # make it mandatory instead of implicit and active only
+ # when no file_list is provided -- vila 091229
+ action = 'auto'
+ else:
+ if action is None:
+ action = 'done'
+ if action == 'auto':
+ if file_list is None:
+ un_resolved, resolved = tree.auto_resolve()
+ if len(un_resolved) > 0:
+ trace.note(ngettext('%d conflict auto-resolved.',
+ '%d conflicts auto-resolved.', len(resolved)),
+ len(resolved))
+ trace.note(gettext('Remaining conflicts:'))
+ for conflict in un_resolved:
+ trace.note(unicode(conflict))
+ return 1
+ else:
+ trace.note(gettext('All conflicts resolved.'))
+ return 0
+ else:
+ # FIXME: This can never occur but the block above needs some
+ # refactoring to transfer tree.auto_resolve() to
+ # conflict.auto(tree) --vila 091242
+ pass
+ else:
+ before, after = resolve(tree, file_list, action=action)
+ trace.note(ngettext('{0} conflict resolved, {1} remaining',
+ '{0} conflicts resolved, {1} remaining',
+ before-after).format(before - after, after))
+
+
+def resolve(tree, paths=None, ignore_misses=False, recursive=False,
+ action='done'):
+ """Resolve some or all of the conflicts in a working tree.
+
+ :param paths: If None, resolve all conflicts. Otherwise, select only
+ specified conflicts.
+ :param recursive: If True, then elements of paths which are directories
+ have all their children resolved, etc. When invoked as part of
+ recursive commands like revert, this should be True. For commands
+ or applications wishing finer-grained control, like the resolve
+ command, this should be False.
+ :param ignore_misses: If False, warnings will be printed if the supplied
+ paths do not have conflicts.
+ :param action: How the conflict should be resolved,
+ """
+ tree.lock_tree_write()
+ nb_conflicts_after = None
+ try:
+ tree_conflicts = tree.conflicts()
+ nb_conflicts_before = len(tree_conflicts)
+ if paths is None:
+ new_conflicts = ConflictList()
+ to_process = tree_conflicts
+ else:
+ new_conflicts, to_process = tree_conflicts.select_conflicts(
+ tree, paths, ignore_misses, recursive)
+ for conflict in to_process:
+ try:
+ conflict._do(action, tree)
+ conflict.cleanup(tree)
+ except NotImplementedError:
+ new_conflicts.append(conflict)
+ try:
+ nb_conflicts_after = len(new_conflicts)
+ tree.set_conflicts(new_conflicts)
+ except errors.UnsupportedOperation:
+ pass
+ finally:
+ tree.unlock()
+ if nb_conflicts_after is None:
+ nb_conflicts_after = nb_conflicts_before
+ return nb_conflicts_before, nb_conflicts_after
+
+
+def restore(filename):
+ """Restore a conflicted file to the state it was in before merging.
+
+ Only text restoration is supported at present.
+ """
+ conflicted = False
+ try:
+ osutils.rename(filename + ".THIS", filename)
+ conflicted = True
+ except OSError, e:
+ if e.errno != errno.ENOENT:
+ raise
+ try:
+ os.unlink(filename + ".BASE")
+ conflicted = True
+ except OSError, e:
+ if e.errno != errno.ENOENT:
+ raise
+ try:
+ os.unlink(filename + ".OTHER")
+ conflicted = True
+ except OSError, e:
+ if e.errno != errno.ENOENT:
+ raise
+ if not conflicted:
+ raise errors.NotConflicted(filename)
+
+
+class ConflictList(object):
+ """List of conflicts.
+
+ Typically obtained from WorkingTree.conflicts()
+
+ Can be instantiated from stanzas or from Conflict subclasses.
+ """
+
+ def __init__(self, conflicts=None):
+ object.__init__(self)
+ if conflicts is None:
+ self.__list = []
+ else:
+ self.__list = conflicts
+
+ def is_empty(self):
+ return len(self.__list) == 0
+
+ def __len__(self):
+ return len(self.__list)
+
+ def __iter__(self):
+ return iter(self.__list)
+
+ def __getitem__(self, key):
+ return self.__list[key]
+
+ def append(self, conflict):
+ return self.__list.append(conflict)
+
+ def __eq__(self, other_list):
+ return list(self) == list(other_list)
+
+ def __ne__(self, other_list):
+ return not (self == other_list)
+
+ def __repr__(self):
+ return "ConflictList(%r)" % self.__list
+
+ @staticmethod
+ def from_stanzas(stanzas):
+ """Produce a new ConflictList from an iterable of stanzas"""
+ conflicts = ConflictList()
+ for stanza in stanzas:
+ conflicts.append(Conflict.factory(**stanza.as_dict()))
+ return conflicts
+
+ def to_stanzas(self):
+ """Generator of stanzas"""
+ for conflict in self:
+ yield conflict.as_stanza()
+
+ def to_strings(self):
+ """Generate strings for the provided conflicts"""
+ for conflict in self:
+ yield unicode(conflict)
+
+ def remove_files(self, tree):
+ """Remove the THIS, BASE and OTHER files for listed conflicts"""
+ for conflict in self:
+ if not conflict.has_files:
+ continue
+ conflict.cleanup(tree)
+
+ def select_conflicts(self, tree, paths, ignore_misses=False,
+ recurse=False):
+ """Select the conflicts associated with paths in a tree.
+
+ File-ids are also used for this.
+ :return: a pair of ConflictLists: (not_selected, selected)
+ """
+ path_set = set(paths)
+ ids = {}
+ selected_paths = set()
+ new_conflicts = ConflictList()
+ selected_conflicts = ConflictList()
+ for path in paths:
+ file_id = tree.path2id(path)
+ if file_id is not None:
+ ids[file_id] = path
+
+ for conflict in self:
+ selected = False
+ for key in ('path', 'conflict_path'):
+ cpath = getattr(conflict, key, None)
+ if cpath is None:
+ continue
+ if cpath in path_set:
+ selected = True
+ selected_paths.add(cpath)
+ if recurse:
+ if osutils.is_inside_any(path_set, cpath):
+ selected = True
+ selected_paths.add(cpath)
+
+ for key in ('file_id', 'conflict_file_id'):
+ cfile_id = getattr(conflict, key, None)
+ if cfile_id is None:
+ continue
+ try:
+ cpath = ids[cfile_id]
+ except KeyError:
+ continue
+ selected = True
+ selected_paths.add(cpath)
+ if selected:
+ selected_conflicts.append(conflict)
+ else:
+ new_conflicts.append(conflict)
+ if ignore_misses is not True:
+ for path in [p for p in paths if p not in selected_paths]:
+ if not os.path.exists(tree.abspath(path)):
+ print "%s does not exist" % path
+ else:
+ print "%s is not conflicted" % path
+ return new_conflicts, selected_conflicts
+
+
+class Conflict(object):
+ """Base class for all types of conflict"""
+
+ # FIXME: cleanup should take care of that ? -- vila 091229
+ has_files = False
+
+ def __init__(self, path, file_id=None):
+ self.path = path
+ # warn turned off, because the factory blindly transfers the Stanza
+ # values to __init__ and Stanza is purely a Unicode api.
+ self.file_id = osutils.safe_file_id(file_id, warn=False)
+
+ def as_stanza(self):
+ s = rio.Stanza(type=self.typestring, path=self.path)
+ if self.file_id is not None:
+ # Stanza requires Unicode apis
+ s.add('file_id', self.file_id.decode('utf8'))
+ return s
+
+ def _cmp_list(self):
+ return [type(self), self.path, self.file_id]
+
+ def __cmp__(self, other):
+ if getattr(other, "_cmp_list", None) is None:
+ return -1
+ return cmp(self._cmp_list(), other._cmp_list())
+
+ def __hash__(self):
+ return hash((type(self), self.path, self.file_id))
+
+ def __eq__(self, other):
+ return self.__cmp__(other) == 0
+
+ def __ne__(self, other):
+ return not self.__eq__(other)
+
+ def __unicode__(self):
+ return self.format % self.__dict__
+
+ def __repr__(self):
+ rdict = dict(self.__dict__)
+ rdict['class'] = self.__class__.__name__
+ return self.rformat % rdict
+
+ @staticmethod
+ def factory(type, **kwargs):
+ global ctype
+ return ctype[type](**kwargs)
+
+ @staticmethod
+ def sort_key(conflict):
+ if conflict.path is not None:
+ return conflict.path, conflict.typestring
+ elif getattr(conflict, "conflict_path", None) is not None:
+ return conflict.conflict_path, conflict.typestring
+ else:
+ return None, conflict.typestring
+
+ def _do(self, action, tree):
+ """Apply the specified action to the conflict.
+
+ :param action: The method name to call.
+
+ :param tree: The tree passed as a parameter to the method.
+ """
+ meth = getattr(self, 'action_%s' % action, None)
+ if meth is None:
+ raise NotImplementedError(self.__class__.__name__ + '.' + action)
+ meth(tree)
+
+ def associated_filenames(self):
+ """The names of the files generated to help resolve the conflict."""
+ raise NotImplementedError(self.associated_filenames)
+
+ def cleanup(self, tree):
+ for fname in self.associated_filenames():
+ try:
+ osutils.delete_any(tree.abspath(fname))
+ except OSError, e:
+ if e.errno != errno.ENOENT:
+ raise
+
+ def action_done(self, tree):
+ """Mark the conflict as solved once it has been handled."""
+ # This method does nothing but simplifies the design of upper levels.
+ pass
+
+ def action_take_this(self, tree):
+ raise NotImplementedError(self.action_take_this)
+
+ def action_take_other(self, tree):
+ raise NotImplementedError(self.action_take_other)
+
+ def _resolve_with_cleanups(self, tree, *args, **kwargs):
+ tt = transform.TreeTransform(tree)
+ op = cleanup.OperationWithCleanups(self._resolve)
+ op.add_cleanup(tt.finalize)
+ op.run_simple(tt, *args, **kwargs)
+
+
+class PathConflict(Conflict):
+ """A conflict was encountered merging file paths"""
+
+ typestring = 'path conflict'
+
+ format = 'Path conflict: %(path)s / %(conflict_path)s'
+
+ rformat = '%(class)s(%(path)r, %(conflict_path)r, %(file_id)r)'
+
+ def __init__(self, path, conflict_path=None, file_id=None):
+ Conflict.__init__(self, path, file_id)
+ self.conflict_path = conflict_path
+
+ def as_stanza(self):
+ s = Conflict.as_stanza(self)
+ if self.conflict_path is not None:
+ s.add('conflict_path', self.conflict_path)
+ return s
+
+ def associated_filenames(self):
+ # No additional files have been generated here
+ return []
+
+ def _resolve(self, tt, file_id, path, winner):
+ """Resolve the conflict.
+
+ :param tt: The TreeTransform where the conflict is resolved.
+ :param file_id: The retained file id.
+ :param path: The retained path.
+ :param winner: 'this' or 'other' indicates which side is the winner.
+ """
+ path_to_create = None
+ if winner == 'this':
+ if self.path == '<deleted>':
+ return # Nothing to do
+ if self.conflict_path == '<deleted>':
+ path_to_create = self.path
+ revid = tt._tree.get_parent_ids()[0]
+ elif winner == 'other':
+ if self.conflict_path == '<deleted>':
+ return # Nothing to do
+ if self.path == '<deleted>':
+ path_to_create = self.conflict_path
+ # FIXME: If there are more than two parents we may need to
+ # iterate. Taking the last parent is the safer bet in the mean
+ # time. -- vila 20100309
+ revid = tt._tree.get_parent_ids()[-1]
+ else:
+ # Programmer error
+ raise AssertionError('bad winner: %r' % (winner,))
+ if path_to_create is not None:
+ tid = tt.trans_id_tree_path(path_to_create)
+ transform.create_from_tree(
+ tt, tid, self._revision_tree(tt._tree, revid), file_id)
+ tt.version_file(file_id, tid)
+ else:
+ tid = tt.trans_id_file_id(file_id)
+ # Adjust the path for the retained file id
+ parent_tid = tt.get_tree_parent(tid)
+ tt.adjust_path(osutils.basename(path), parent_tid, tid)
+ tt.apply()
+
+ def _revision_tree(self, tree, revid):
+ return tree.branch.repository.revision_tree(revid)
+
+ def _infer_file_id(self, tree):
+ # Prior to bug #531967, file_id wasn't always set, there may still be
+ # conflict files in the wild so we need to cope with them
+ # Establish which path we should use to find back the file-id
+ possible_paths = []
+ for p in (self.path, self.conflict_path):
+ if p == '<deleted>':
+ # special hard-coded path
+ continue
+ if p is not None:
+ possible_paths.append(p)
+ # Search the file-id in the parents with any path available
+ file_id = None
+ for revid in tree.get_parent_ids():
+ revtree = self._revision_tree(tree, revid)
+ for p in possible_paths:
+ file_id = revtree.path2id(p)
+ if file_id is not None:
+ return revtree, file_id
+ return None, None
+
+ def action_take_this(self, tree):
+ if self.file_id is not None:
+ self._resolve_with_cleanups(tree, self.file_id, self.path,
+ winner='this')
+ else:
+ # Prior to bug #531967 we need to find back the file_id and restore
+ # the content from there
+ revtree, file_id = self._infer_file_id(tree)
+ tree.revert([revtree.id2path(file_id)],
+ old_tree=revtree, backups=False)
+
+ def action_take_other(self, tree):
+ if self.file_id is not None:
+ self._resolve_with_cleanups(tree, self.file_id,
+ self.conflict_path,
+ winner='other')
+ else:
+ # Prior to bug #531967 we need to find back the file_id and restore
+ # the content from there
+ revtree, file_id = self._infer_file_id(tree)
+ tree.revert([revtree.id2path(file_id)],
+ old_tree=revtree, backups=False)
+
+
+class ContentsConflict(PathConflict):
+ """The files are of different types (or both binary), or not present"""
+
+ has_files = True
+
+ typestring = 'contents conflict'
+
+ format = 'Contents conflict in %(path)s'
+
+ def associated_filenames(self):
+ return [self.path + suffix for suffix in ('.BASE', '.OTHER')]
+
+ def _resolve(self, tt, suffix_to_remove):
+ """Resolve the conflict.
+
+ :param tt: The TreeTransform where the conflict is resolved.
+ :param suffix_to_remove: Either 'THIS' or 'OTHER'
+
+ The resolution is symmetric: when taking THIS, OTHER is deleted and
+ item.THIS is renamed into item and vice-versa.
+ """
+ try:
+ # Delete 'item.THIS' or 'item.OTHER' depending on
+ # suffix_to_remove
+ tt.delete_contents(
+ tt.trans_id_tree_path(self.path + '.' + suffix_to_remove))
+ except errors.NoSuchFile:
+ # There are valid cases where 'item.suffix_to_remove' either
+ # never existed or was already deleted (including the case
+ # where the user deleted it)
+ pass
+ try:
+ this_path = tt._tree.id2path(self.file_id)
+ except errors.NoSuchId:
+ # The file is not present anymore. This may happen if the user
+ # deleted the file either manually or when resolving a conflict on
+ # the parent. We may raise some exception to indicate that the
+ # conflict doesn't exist anymore and as such doesn't need to be
+ # resolved ? -- vila 20110615
+ this_tid = None
+ else:
+ this_tid = tt.trans_id_tree_path(this_path)
+ if this_tid is not None:
+ # Rename 'item.suffix_to_remove' (note that if
+ # 'item.suffix_to_remove' has been deleted, this is a no-op)
+ parent_tid = tt.get_tree_parent(this_tid)
+ tt.adjust_path(osutils.basename(self.path), parent_tid, this_tid)
+ tt.apply()
+
+ def action_take_this(self, tree):
+ self._resolve_with_cleanups(tree, 'OTHER')
+
+ def action_take_other(self, tree):
+ self._resolve_with_cleanups(tree, 'THIS')
+
+
+# TODO: There should be a base revid attribute to better inform the user about
+# how the conflicts were generated.
+class TextConflict(Conflict):
+ """The merge algorithm could not resolve all differences encountered."""
+
+ has_files = True
+
+ typestring = 'text conflict'
+
+ format = 'Text conflict in %(path)s'
+
+ rformat = '%(class)s(%(path)r, %(file_id)r)'
+
+ def associated_filenames(self):
+ return [self.path + suffix for suffix in CONFLICT_SUFFIXES]
+
+ def _resolve(self, tt, winner_suffix):
+ """Resolve the conflict by copying one of .THIS or .OTHER into file.
+
+ :param tt: The TreeTransform where the conflict is resolved.
+ :param winner_suffix: Either 'THIS' or 'OTHER'
+
+ The resolution is symmetric, when taking THIS, item.THIS is renamed
+ into item and vice-versa. This takes one of the files as a whole
+ ignoring every difference that could have been merged cleanly.
+ """
+ # To avoid useless copies, we switch item and item.winner_suffix, only
+ # item will exist after the conflict has been resolved anyway.
+ item_tid = tt.trans_id_file_id(self.file_id)
+ item_parent_tid = tt.get_tree_parent(item_tid)
+ winner_path = self.path + '.' + winner_suffix
+ winner_tid = tt.trans_id_tree_path(winner_path)
+ winner_parent_tid = tt.get_tree_parent(winner_tid)
+ # Switch the paths to preserve the content
+ tt.adjust_path(osutils.basename(self.path),
+ winner_parent_tid, winner_tid)
+ tt.adjust_path(osutils.basename(winner_path), item_parent_tid, item_tid)
+ # Associate the file_id to the right content
+ tt.unversion_file(item_tid)
+ tt.version_file(self.file_id, winner_tid)
+ tt.apply()
+
+ def action_take_this(self, tree):
+ self._resolve_with_cleanups(tree, 'THIS')
+
+ def action_take_other(self, tree):
+ self._resolve_with_cleanups(tree, 'OTHER')
+
+
+class HandledConflict(Conflict):
+ """A path problem that has been provisionally resolved.
+ This is intended to be a base class.
+ """
+
+ rformat = "%(class)s(%(action)r, %(path)r, %(file_id)r)"
+
+ def __init__(self, action, path, file_id=None):
+ Conflict.__init__(self, path, file_id)
+ self.action = action
+
+ def _cmp_list(self):
+ return Conflict._cmp_list(self) + [self.action]
+
+ def as_stanza(self):
+ s = Conflict.as_stanza(self)
+ s.add('action', self.action)
+ return s
+
+ def associated_filenames(self):
+ # Nothing has been generated here
+ return []
+
+
+class HandledPathConflict(HandledConflict):
+ """A provisionally-resolved path problem involving two paths.
+ This is intended to be a base class.
+ """
+
+ rformat = "%(class)s(%(action)r, %(path)r, %(conflict_path)r,"\
+ " %(file_id)r, %(conflict_file_id)r)"
+
+ def __init__(self, action, path, conflict_path, file_id=None,
+ conflict_file_id=None):
+ HandledConflict.__init__(self, action, path, file_id)
+ self.conflict_path = conflict_path
+ # warn turned off, because the factory blindly transfers the Stanza
+ # values to __init__.
+ self.conflict_file_id = osutils.safe_file_id(conflict_file_id,
+ warn=False)
+
+ def _cmp_list(self):
+ return HandledConflict._cmp_list(self) + [self.conflict_path,
+ self.conflict_file_id]
+
+ def as_stanza(self):
+ s = HandledConflict.as_stanza(self)
+ s.add('conflict_path', self.conflict_path)
+ if self.conflict_file_id is not None:
+ s.add('conflict_file_id', self.conflict_file_id.decode('utf8'))
+
+ return s
+
+
+class DuplicateID(HandledPathConflict):
+ """Two files want the same file_id."""
+
+ typestring = 'duplicate id'
+
+ format = 'Conflict adding id to %(conflict_path)s. %(action)s %(path)s.'
+
+
+class DuplicateEntry(HandledPathConflict):
+ """Two directory entries want to have the same name."""
+
+ typestring = 'duplicate'
+
+ format = 'Conflict adding file %(conflict_path)s. %(action)s %(path)s.'
+
+ def action_take_this(self, tree):
+ tree.remove([self.conflict_path], force=True, keep_files=False)
+ tree.rename_one(self.path, self.conflict_path)
+
+ def action_take_other(self, tree):
+ tree.remove([self.path], force=True, keep_files=False)
+
+
+class ParentLoop(HandledPathConflict):
+ """An attempt to create an infinitely-looping directory structure.
+ This is rare, but can be produced like so:
+
+ tree A:
+ mv foo bar
+ tree B:
+ mv bar foo
+ merge A and B
+ """
+
+ typestring = 'parent loop'
+
+ format = 'Conflict moving %(path)s into %(conflict_path)s. %(action)s.'
+
+ def action_take_this(self, tree):
+ # just acccept bzr proposal
+ pass
+
+ def action_take_other(self, tree):
+ tt = transform.TreeTransform(tree)
+ try:
+ p_tid = tt.trans_id_file_id(self.file_id)
+ parent_tid = tt.get_tree_parent(p_tid)
+ cp_tid = tt.trans_id_file_id(self.conflict_file_id)
+ cparent_tid = tt.get_tree_parent(cp_tid)
+ tt.adjust_path(osutils.basename(self.path), cparent_tid, cp_tid)
+ tt.adjust_path(osutils.basename(self.conflict_path),
+ parent_tid, p_tid)
+ tt.apply()
+ finally:
+ tt.finalize()
+
+
+class UnversionedParent(HandledConflict):
+ """An attempt to version a file whose parent directory is not versioned.
+ Typically, the result of a merge where one tree unversioned the directory
+ and the other added a versioned file to it.
+ """
+
+ typestring = 'unversioned parent'
+
+ format = 'Conflict because %(path)s is not versioned, but has versioned'\
+ ' children. %(action)s.'
+
+ # FIXME: We silently do nothing to make tests pass, but most probably the
+ # conflict shouldn't exist (the long story is that the conflict is
+ # generated with another one that can be resolved properly) -- vila 091224
+ def action_take_this(self, tree):
+ pass
+
+ def action_take_other(self, tree):
+ pass
+
+
+class MissingParent(HandledConflict):
+ """An attempt to add files to a directory that is not present.
+ Typically, the result of a merge where THIS deleted the directory and
+ the OTHER added a file to it.
+ See also: DeletingParent (same situation, THIS and OTHER reversed)
+ """
+
+ typestring = 'missing parent'
+
+ format = 'Conflict adding files to %(path)s. %(action)s.'
+
+ def action_take_this(self, tree):
+ tree.remove([self.path], force=True, keep_files=False)
+
+ def action_take_other(self, tree):
+ # just acccept bzr proposal
+ pass
+
+
+class DeletingParent(HandledConflict):
+ """An attempt to add files to a directory that is not present.
+ Typically, the result of a merge where one OTHER deleted the directory and
+ the THIS added a file to it.
+ """
+
+ typestring = 'deleting parent'
+
+ format = "Conflict: can't delete %(path)s because it is not empty. "\
+ "%(action)s."
+
+ # FIXME: It's a bit strange that the default action is not coherent with
+ # MissingParent from the *user* pov.
+
+ def action_take_this(self, tree):
+ # just acccept bzr proposal
+ pass
+
+ def action_take_other(self, tree):
+ tree.remove([self.path], force=True, keep_files=False)
+
+
+class NonDirectoryParent(HandledConflict):
+ """An attempt to add files to a directory that is not a directory or
+ an attempt to change the kind of a directory with files.
+ """
+
+ typestring = 'non-directory parent'
+
+ format = "Conflict: %(path)s is not a directory, but has files in it."\
+ " %(action)s."
+
+ # FIXME: .OTHER should be used instead of .new when the conflict is created
+
+ def action_take_this(self, tree):
+ # FIXME: we should preserve that path when the conflict is generated !
+ if self.path.endswith('.new'):
+ conflict_path = self.path[:-(len('.new'))]
+ tree.remove([self.path], force=True, keep_files=False)
+ tree.add(conflict_path)
+ else:
+ raise NotImplementedError(self.action_take_this)
+
+ def action_take_other(self, tree):
+ # FIXME: we should preserve that path when the conflict is generated !
+ if self.path.endswith('.new'):
+ conflict_path = self.path[:-(len('.new'))]
+ tree.remove([conflict_path], force=True, keep_files=False)
+ tree.rename_one(self.path, conflict_path)
+ else:
+ raise NotImplementedError(self.action_take_other)
+
+
+ctype = {}
+
+
+def register_types(*conflict_types):
+ """Register a Conflict subclass for serialization purposes"""
+ global ctype
+ for conflict_type in conflict_types:
+ ctype[conflict_type.typestring] = conflict_type
+
+register_types(ContentsConflict, TextConflict, PathConflict, DuplicateID,
+ DuplicateEntry, ParentLoop, UnversionedParent, MissingParent,
+ DeletingParent, NonDirectoryParent)
diff --git a/bzrlib/controldir.py b/bzrlib/controldir.py
new file mode 100644
index 0000000..73d2f1b
--- /dev/null
+++ b/bzrlib/controldir.py
@@ -0,0 +1,1460 @@
+# Copyright (C) 2010, 2011, 2012 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""ControlDir is the basic control directory class.
+
+The ControlDir class is the base for the control directory used
+by all bzr and foreign formats. For the ".bzr" implementation,
+see bzrlib.bzrdir.BzrDir.
+
+"""
+
+from __future__ import absolute_import
+
+from bzrlib.lazy_import import lazy_import
+lazy_import(globals(), """
+import textwrap
+
+from bzrlib import (
+ errors,
+ hooks,
+ revision as _mod_revision,
+ transport as _mod_transport,
+ trace,
+ ui,
+ urlutils,
+ )
+from bzrlib.transport import local
+from bzrlib.push import (
+ PushResult,
+ )
+
+from bzrlib.i18n import gettext
+""")
+
+from bzrlib import registry
+
+
+class ControlComponent(object):
+ """Abstract base class for control directory components.
+
+ This provides interfaces that are common across controldirs,
+ repositories, branches, and workingtree control directories.
+
+ They all expose two urls and transports: the *user* URL is the
+ one that stops above the control directory (eg .bzr) and that
+ should normally be used in messages, and the *control* URL is
+ under that in eg .bzr/checkout and is used to read the control
+ files.
+
+ This can be used as a mixin and is intended to fit with
+ foreign formats.
+ """
+
+ @property
+ def control_transport(self):
+ raise NotImplementedError
+
+ @property
+ def control_url(self):
+ return self.control_transport.base
+
+ @property
+ def user_transport(self):
+ raise NotImplementedError
+
+ @property
+ def user_url(self):
+ return self.user_transport.base
+
+
+class ControlDir(ControlComponent):
+ """A control directory.
+
+ While this represents a generic control directory, there are a few
+ features that are present in this interface that are currently only
+ supported by one of its implementations, BzrDir.
+
+ These features (bound branches, stacked branches) are currently only
+ supported by Bazaar, but could be supported by other version control
+ systems as well. Implementations are required to raise the appropriate
+ exceptions when an operation is requested that is not supported.
+
+ This also makes life easier for API users who can rely on the
+ implementation always allowing a particular feature to be requested but
+ raising an exception when it is not supported, rather than requiring the
+ API users to check for magic attributes to see what features are supported.
+ """
+
+ def can_convert_format(self):
+ """Return true if this controldir is one whose format we can convert
+ from."""
+ return True
+
+ def list_branches(self):
+ """Return a sequence of all branches local to this control directory.
+
+ """
+ return self.get_branches().values()
+
+ def get_branches(self):
+ """Get all branches in this control directory, as a dictionary.
+
+ :return: Dictionary mapping branch names to instances.
+ """
+ try:
+ return { "": self.open_branch() }
+ except (errors.NotBranchError, errors.NoRepositoryPresent):
+ return {}
+
+ def is_control_filename(self, filename):
+ """True if filename is the name of a path which is reserved for
+ controldirs.
+
+ :param filename: A filename within the root transport of this
+ controldir.
+
+ This is true IF and ONLY IF the filename is part of the namespace reserved
+ for bzr control dirs. Currently this is the '.bzr' directory in the root
+ of the root_transport. it is expected that plugins will need to extend
+ this in the future - for instance to make bzr talk with svn working
+ trees.
+ """
+ raise NotImplementedError(self.is_control_filename)
+
+ def needs_format_conversion(self, format=None):
+ """Return true if this controldir needs convert_format run on it.
+
+ For instance, if the repository format is out of date but the
+ branch and working tree are not, this should return True.
+
+ :param format: Optional parameter indicating a specific desired
+ format we plan to arrive at.
+ """
+ raise NotImplementedError(self.needs_format_conversion)
+
+ def create_repository(self, shared=False):
+ """Create a new repository in this control directory.
+
+ :param shared: If a shared repository should be created
+ :return: The newly created repository
+ """
+ raise NotImplementedError(self.create_repository)
+
+ def destroy_repository(self):
+ """Destroy the repository in this ControlDir."""
+ raise NotImplementedError(self.destroy_repository)
+
+ def create_branch(self, name=None, repository=None,
+ append_revisions_only=None):
+ """Create a branch in this ControlDir.
+
+ :param name: Name of the colocated branch to create, None for
+ the user selected branch or "" for the active branch.
+ :param append_revisions_only: Whether this branch should only allow
+ appending new revisions to its history.
+
+ The controldirs format will control what branch format is created.
+ For more control see BranchFormatXX.create(a_controldir).
+ """
+ raise NotImplementedError(self.create_branch)
+
+ def destroy_branch(self, name=None):
+ """Destroy a branch in this ControlDir.
+
+ :param name: Name of the branch to destroy, None for the
+ user selected branch or "" for the active branch.
+ :raise NotBranchError: When the branch does not exist
+ """
+ raise NotImplementedError(self.destroy_branch)
+
+ def create_workingtree(self, revision_id=None, from_branch=None,
+ accelerator_tree=None, hardlink=False):
+ """Create a working tree at this ControlDir.
+
+ :param revision_id: create it as of this revision id.
+ :param from_branch: override controldir branch
+ (for lightweight checkouts)
+ :param accelerator_tree: A tree which can be used for retrieving file
+ contents more quickly than the revision tree, i.e. a workingtree.
+ The revision tree will be used for cases where accelerator_tree's
+ content is different.
+ """
+ raise NotImplementedError(self.create_workingtree)
+
+ def destroy_workingtree(self):
+ """Destroy the working tree at this ControlDir.
+
+ Formats that do not support this may raise UnsupportedOperation.
+ """
+ raise NotImplementedError(self.destroy_workingtree)
+
+ def destroy_workingtree_metadata(self):
+ """Destroy the control files for the working tree at this ControlDir.
+
+ The contents of working tree files are not affected.
+ Formats that do not support this may raise UnsupportedOperation.
+ """
+ raise NotImplementedError(self.destroy_workingtree_metadata)
+
+ def find_branch_format(self, name=None):
+ """Find the branch 'format' for this controldir.
+
+ This might be a synthetic object for e.g. RemoteBranch and SVN.
+ """
+ raise NotImplementedError(self.find_branch_format)
+
+ def get_branch_reference(self, name=None):
+ """Return the referenced URL for the branch in this controldir.
+
+ :param name: Optional colocated branch name
+ :raises NotBranchError: If there is no Branch.
+ :raises NoColocatedBranchSupport: If a branch name was specified
+ but colocated branches are not supported.
+ :return: The URL the branch in this controldir references if it is a
+ reference branch, or None for regular branches.
+ """
+ if name is not None:
+ raise errors.NoColocatedBranchSupport(self)
+ return None
+
+ def set_branch_reference(self, target_branch, name=None):
+ """Set the referenced URL for the branch in this controldir.
+
+ :param name: Optional colocated branch name
+ :param target_branch: Branch to reference
+ :raises NoColocatedBranchSupport: If a branch name was specified
+ but colocated branches are not supported.
+ :return: The referencing branch
+ """
+ raise NotImplementedError(self.set_branch_reference)
+
+ def open_branch(self, name=None, unsupported=False,
+ ignore_fallbacks=False, possible_transports=None):
+ """Open the branch object at this ControlDir if one is present.
+
+ :param unsupported: if True, then no longer supported branch formats can
+ still be opened.
+ :param ignore_fallbacks: Whether to open fallback repositories
+ :param possible_transports: Transports to use for opening e.g.
+ fallback repositories.
+ """
+ raise NotImplementedError(self.open_branch)
+
+ def open_repository(self, _unsupported=False):
+ """Open the repository object at this ControlDir if one is present.
+
+ This will not follow the Branch object pointer - it's strictly a direct
+ open facility. Most client code should use open_branch().repository to
+ get at a repository.
+
+ :param _unsupported: a private parameter, not part of the api.
+ """
+ raise NotImplementedError(self.open_repository)
+
+ def find_repository(self):
+ """Find the repository that should be used.
+
+ This does not require a branch as we use it to find the repo for
+ new branches as well as to hook existing branches up to their
+ repository.
+ """
+ raise NotImplementedError(self.find_repository)
+
+ def open_workingtree(self, unsupported=False,
+ recommend_upgrade=True, from_branch=None):
+ """Open the workingtree object at this ControlDir if one is present.
+
+ :param recommend_upgrade: Optional keyword parameter, when True (the
+ default), emit through the ui module a recommendation that the user
+ upgrade the working tree when the workingtree being opened is old
+ (but still fully supported).
+ :param from_branch: override controldir branch (for lightweight
+ checkouts)
+ """
+ raise NotImplementedError(self.open_workingtree)
+
+ def has_branch(self, name=None):
+ """Tell if this controldir contains a branch.
+
+ Note: if you're going to open the branch, you should just go ahead
+ and try, and not ask permission first. (This method just opens the
+ branch and discards it, and that's somewhat expensive.)
+ """
+ try:
+ self.open_branch(name, ignore_fallbacks=True)
+ return True
+ except errors.NotBranchError:
+ return False
+
+ def _get_selected_branch(self):
+ """Return the name of the branch selected by the user.
+
+ :return: Name of the branch selected by the user, or "".
+ """
+ branch = self.root_transport.get_segment_parameters().get("branch")
+ if branch is None:
+ branch = ""
+ return urlutils.unescape(branch)
+
+ def has_workingtree(self):
+ """Tell if this controldir contains a working tree.
+
+ This will still raise an exception if the controldir has a workingtree
+ that is remote & inaccessible.
+
+ Note: if you're going to open the working tree, you should just go ahead
+ and try, and not ask permission first. (This method just opens the
+ workingtree and discards it, and that's somewhat expensive.)
+ """
+ try:
+ self.open_workingtree(recommend_upgrade=False)
+ return True
+ except errors.NoWorkingTree:
+ return False
+
+ def cloning_metadir(self, require_stacking=False):
+ """Produce a metadir suitable for cloning or sprouting with.
+
+ These operations may produce workingtrees (yes, even though they're
+ "cloning" something that doesn't have a tree), so a viable workingtree
+ format must be selected.
+
+ :require_stacking: If True, non-stackable formats will be upgraded
+ to similar stackable formats.
+ :returns: a ControlDirFormat with all component formats either set
+ appropriately or set to None if that component should not be
+ created.
+ """
+ raise NotImplementedError(self.cloning_metadir)
+
+ def checkout_metadir(self):
+ """Produce a metadir suitable for checkouts of this controldir.
+
+ :returns: A ControlDirFormat with all component formats
+ either set appropriately or set to None if that component
+ should not be created.
+ """
+ return self.cloning_metadir()
+
+ def sprout(self, url, revision_id=None, force_new_repo=False,
+ recurse='down', possible_transports=None,
+ accelerator_tree=None, hardlink=False, stacked=False,
+ source_branch=None, create_tree_if_local=True):
+ """Create a copy of this controldir prepared for use as a new line of
+ development.
+
+ If url's last component does not exist, it will be created.
+
+ Attributes related to the identity of the source branch like
+ branch nickname will be cleaned, a working tree is created
+ whether one existed before or not; and a local branch is always
+ created.
+
+ :param revision_id: if revision_id is not None, then the clone
+ operation may tune itself to download less data.
+ :param accelerator_tree: A tree which can be used for retrieving file
+ contents more quickly than the revision tree, i.e. a workingtree.
+ The revision tree will be used for cases where accelerator_tree's
+ content is different.
+ :param hardlink: If true, hard-link files from accelerator_tree,
+ where possible.
+ :param stacked: If true, create a stacked branch referring to the
+ location of this control directory.
+ :param create_tree_if_local: If true, a working-tree will be created
+ when working locally.
+ """
+ raise NotImplementedError(self.sprout)
+
+ def push_branch(self, source, revision_id=None, overwrite=False,
+ remember=False, create_prefix=False):
+ """Push the source branch into this ControlDir."""
+ br_to = None
+ # If we can open a branch, use its direct repository, otherwise see
+ # if there is a repository without a branch.
+ try:
+ br_to = self.open_branch()
+ except errors.NotBranchError:
+ # Didn't find a branch, can we find a repository?
+ repository_to = self.find_repository()
+ else:
+ # Found a branch, so we must have found a repository
+ repository_to = br_to.repository
+
+ push_result = PushResult()
+ push_result.source_branch = source
+ if br_to is None:
+ # We have a repository but no branch, copy the revisions, and then
+ # create a branch.
+ if revision_id is None:
+ # No revision supplied by the user, default to the branch
+ # revision
+ revision_id = source.last_revision()
+ repository_to.fetch(source.repository, revision_id=revision_id)
+ br_to = source.clone(self, revision_id=revision_id)
+ if source.get_push_location() is None or remember:
+ # FIXME: Should be done only if we succeed ? -- vila 2012-01-18
+ source.set_push_location(br_to.base)
+ push_result.stacked_on = None
+ push_result.branch_push_result = None
+ push_result.old_revno = None
+ push_result.old_revid = _mod_revision.NULL_REVISION
+ push_result.target_branch = br_to
+ push_result.master_branch = None
+ push_result.workingtree_updated = False
+ else:
+ # We have successfully opened the branch, remember if necessary:
+ if source.get_push_location() is None or remember:
+ # FIXME: Should be done only if we succeed ? -- vila 2012-01-18
+ source.set_push_location(br_to.base)
+ try:
+ tree_to = self.open_workingtree()
+ except errors.NotLocalUrl:
+ push_result.branch_push_result = source.push(br_to,
+ overwrite, stop_revision=revision_id)
+ push_result.workingtree_updated = False
+ except errors.NoWorkingTree:
+ push_result.branch_push_result = source.push(br_to,
+ overwrite, stop_revision=revision_id)
+ push_result.workingtree_updated = None # Not applicable
+ else:
+ tree_to.lock_write()
+ try:
+ push_result.branch_push_result = source.push(
+ tree_to.branch, overwrite, stop_revision=revision_id)
+ tree_to.update()
+ finally:
+ tree_to.unlock()
+ push_result.workingtree_updated = True
+ push_result.old_revno = push_result.branch_push_result.old_revno
+ push_result.old_revid = push_result.branch_push_result.old_revid
+ push_result.target_branch = \
+ push_result.branch_push_result.target_branch
+ return push_result
+
+ def _get_tree_branch(self, name=None):
+ """Return the branch and tree, if any, for this controldir.
+
+ :param name: Name of colocated branch to open.
+
+ Return None for tree if not present or inaccessible.
+ Raise NotBranchError if no branch is present.
+ :return: (tree, branch)
+ """
+ try:
+ tree = self.open_workingtree()
+ except (errors.NoWorkingTree, errors.NotLocalUrl):
+ tree = None
+ branch = self.open_branch(name=name)
+ else:
+ if name is not None:
+ branch = self.open_branch(name=name)
+ else:
+ branch = tree.branch
+ return tree, branch
+
+ def get_config(self):
+ """Get configuration for this ControlDir."""
+ raise NotImplementedError(self.get_config)
+
+ def check_conversion_target(self, target_format):
+ """Check that a controldir as a whole can be converted to a new format."""
+ raise NotImplementedError(self.check_conversion_target)
+
+ def clone(self, url, revision_id=None, force_new_repo=False,
+ preserve_stacking=False):
+ """Clone this controldir and its contents to url verbatim.
+
+ :param url: The url create the clone at. If url's last component does
+ not exist, it will be created.
+ :param revision_id: The tip revision-id to use for any branch or
+ working tree. If not None, then the clone operation may tune
+ itself to download less data.
+ :param force_new_repo: Do not use a shared repository for the target
+ even if one is available.
+ :param preserve_stacking: When cloning a stacked branch, stack the
+ new branch on top of the other branch's stacked-on branch.
+ """
+ return self.clone_on_transport(_mod_transport.get_transport(url),
+ revision_id=revision_id,
+ force_new_repo=force_new_repo,
+ preserve_stacking=preserve_stacking)
+
+ def clone_on_transport(self, transport, revision_id=None,
+ force_new_repo=False, preserve_stacking=False, stacked_on=None,
+ create_prefix=False, use_existing_dir=True, no_tree=False):
+ """Clone this controldir and its contents to transport verbatim.
+
+ :param transport: The transport for the location to produce the clone
+ at. If the target directory does not exist, it will be created.
+ :param revision_id: The tip revision-id to use for any branch or
+ working tree. If not None, then the clone operation may tune
+ itself to download less data.
+ :param force_new_repo: Do not use a shared repository for the target,
+ even if one is available.
+ :param preserve_stacking: When cloning a stacked branch, stack the
+ new branch on top of the other branch's stacked-on branch.
+ :param create_prefix: Create any missing directories leading up to
+ to_transport.
+ :param use_existing_dir: Use an existing directory if one exists.
+ :param no_tree: If set to true prevents creation of a working tree.
+ """
+ raise NotImplementedError(self.clone_on_transport)
+
+ @classmethod
+ def find_bzrdirs(klass, transport, evaluate=None, list_current=None):
+ """Find control dirs recursively from current location.
+
+ This is intended primarily as a building block for more sophisticated
+ functionality, like finding trees under a directory, or finding
+ branches that use a given repository.
+
+ :param evaluate: An optional callable that yields recurse, value,
+ where recurse controls whether this controldir is recursed into
+ and value is the value to yield. By default, all bzrdirs
+ are recursed into, and the return value is the controldir.
+ :param list_current: if supplied, use this function to list the current
+ directory, instead of Transport.list_dir
+ :return: a generator of found bzrdirs, or whatever evaluate returns.
+ """
+ if list_current is None:
+ def list_current(transport):
+ return transport.list_dir('')
+ if evaluate is None:
+ def evaluate(controldir):
+ return True, controldir
+
+ pending = [transport]
+ while len(pending) > 0:
+ current_transport = pending.pop()
+ recurse = True
+ try:
+ controldir = klass.open_from_transport(current_transport)
+ except (errors.NotBranchError, errors.PermissionDenied):
+ pass
+ else:
+ recurse, value = evaluate(controldir)
+ yield value
+ try:
+ subdirs = list_current(current_transport)
+ except (errors.NoSuchFile, errors.PermissionDenied):
+ continue
+ if recurse:
+ for subdir in sorted(subdirs, reverse=True):
+ pending.append(current_transport.clone(subdir))
+
+ @classmethod
+ def find_branches(klass, transport):
+ """Find all branches under a transport.
+
+ This will find all branches below the transport, including branches
+ inside other branches. Where possible, it will use
+ Repository.find_branches.
+
+ To list all the branches that use a particular Repository, see
+ Repository.find_branches
+ """
+ def evaluate(controldir):
+ try:
+ repository = controldir.open_repository()
+ except errors.NoRepositoryPresent:
+ pass
+ else:
+ return False, ([], repository)
+ return True, (controldir.list_branches(), None)
+ ret = []
+ for branches, repo in klass.find_bzrdirs(
+ transport, evaluate=evaluate):
+ if repo is not None:
+ ret.extend(repo.find_branches())
+ if branches is not None:
+ ret.extend(branches)
+ return ret
+
+ @classmethod
+ def create_branch_and_repo(klass, base, force_new_repo=False, format=None):
+ """Create a new ControlDir, Branch and Repository at the url 'base'.
+
+ This will use the current default ControlDirFormat unless one is
+ specified, and use whatever
+ repository format that that uses via controldir.create_branch and
+ create_repository. If a shared repository is available that is used
+ preferentially.
+
+ The created Branch object is returned.
+
+ :param base: The URL to create the branch at.
+ :param force_new_repo: If True a new repository is always created.
+ :param format: If supplied, the format of branch to create. If not
+ supplied, the default is used.
+ """
+ controldir = klass.create(base, format)
+ controldir._find_or_create_repository(force_new_repo)
+ return controldir.create_branch()
+
+ @classmethod
+ def create_branch_convenience(klass, base, force_new_repo=False,
+ force_new_tree=None, format=None,
+ possible_transports=None):
+ """Create a new ControlDir, Branch and Repository at the url 'base'.
+
+ This is a convenience function - it will use an existing repository
+ if possible, can be told explicitly whether to create a working tree or
+ not.
+
+ This will use the current default ControlDirFormat unless one is
+ specified, and use whatever
+ repository format that that uses via ControlDir.create_branch and
+ create_repository. If a shared repository is available that is used
+ preferentially. Whatever repository is used, its tree creation policy
+ is followed.
+
+ The created Branch object is returned.
+ If a working tree cannot be made due to base not being a file:// url,
+ no error is raised unless force_new_tree is True, in which case no
+ data is created on disk and NotLocalUrl is raised.
+
+ :param base: The URL to create the branch at.
+ :param force_new_repo: If True a new repository is always created.
+ :param force_new_tree: If True or False force creation of a tree or
+ prevent such creation respectively.
+ :param format: Override for the controldir format to create.
+ :param possible_transports: An optional reusable transports list.
+ """
+ if force_new_tree:
+ # check for non local urls
+ t = _mod_transport.get_transport(base, possible_transports)
+ if not isinstance(t, local.LocalTransport):
+ raise errors.NotLocalUrl(base)
+ controldir = klass.create(base, format, possible_transports)
+ repo = controldir._find_or_create_repository(force_new_repo)
+ result = controldir.create_branch()
+ if force_new_tree or (repo.make_working_trees() and
+ force_new_tree is None):
+ try:
+ controldir.create_workingtree()
+ except errors.NotLocalUrl:
+ pass
+ return result
+
+ @classmethod
+ def create_standalone_workingtree(klass, base, format=None):
+ """Create a new ControlDir, WorkingTree, Branch and Repository at 'base'.
+
+ 'base' must be a local path or a file:// url.
+
+ This will use the current default ControlDirFormat unless one is
+ specified, and use whatever
+ repository format that that uses for bzrdirformat.create_workingtree,
+ create_branch and create_repository.
+
+ :param format: Override for the controldir format to create.
+ :return: The WorkingTree object.
+ """
+ t = _mod_transport.get_transport(base)
+ if not isinstance(t, local.LocalTransport):
+ raise errors.NotLocalUrl(base)
+ controldir = klass.create_branch_and_repo(base,
+ force_new_repo=True,
+ format=format).bzrdir
+ return controldir.create_workingtree()
+
+ @classmethod
+ def open_unsupported(klass, base):
+ """Open a branch which is not supported."""
+ return klass.open(base, _unsupported=True)
+
+ @classmethod
+ def open(klass, base, possible_transports=None, probers=None,
+ _unsupported=False):
+ """Open an existing controldir, rooted at 'base' (url).
+
+ :param _unsupported: a private parameter to the ControlDir class.
+ """
+ t = _mod_transport.get_transport(base, possible_transports)
+ return klass.open_from_transport(t, probers=probers,
+ _unsupported=_unsupported)
+
+ @classmethod
+ def open_from_transport(klass, transport, _unsupported=False,
+ probers=None):
+ """Open a controldir within a particular directory.
+
+ :param transport: Transport containing the controldir.
+ :param _unsupported: private.
+ """
+ for hook in klass.hooks['pre_open']:
+ hook(transport)
+ # Keep initial base since 'transport' may be modified while following
+ # the redirections.
+ base = transport.base
+ def find_format(transport):
+ return transport, ControlDirFormat.find_format(transport,
+ probers=probers)
+
+ def redirected(transport, e, redirection_notice):
+ redirected_transport = transport._redirected_to(e.source, e.target)
+ if redirected_transport is None:
+ raise errors.NotBranchError(base)
+ trace.note(gettext('{0} is{1} redirected to {2}').format(
+ transport.base, e.permanently, redirected_transport.base))
+ return redirected_transport
+
+ try:
+ transport, format = _mod_transport.do_catching_redirections(
+ find_format, transport, redirected)
+ except errors.TooManyRedirections:
+ raise errors.NotBranchError(base)
+
+ format.check_support_status(_unsupported)
+ return format.open(transport, _found=True)
+
+ @classmethod
+ def open_containing(klass, url, possible_transports=None):
+ """Open an existing branch which contains url.
+
+ :param url: url to search from.
+
+ See open_containing_from_transport for more detail.
+ """
+ transport = _mod_transport.get_transport(url, possible_transports)
+ return klass.open_containing_from_transport(transport)
+
+ @classmethod
+ def open_containing_from_transport(klass, a_transport):
+ """Open an existing branch which contains a_transport.base.
+
+ This probes for a branch at a_transport, and searches upwards from there.
+
+ Basically we keep looking up until we find the control directory or
+ run into the root. If there isn't one, raises NotBranchError.
+ If there is one and it is either an unrecognised format or an unsupported
+ format, UnknownFormatError or UnsupportedFormatError are raised.
+ If there is one, it is returned, along with the unused portion of url.
+
+ :return: The ControlDir that contains the path, and a Unicode path
+ for the rest of the URL.
+ """
+ # this gets the normalised url back. I.e. '.' -> the full path.
+ url = a_transport.base
+ while True:
+ try:
+ result = klass.open_from_transport(a_transport)
+ return result, urlutils.unescape(a_transport.relpath(url))
+ except errors.NotBranchError, e:
+ pass
+ except errors.PermissionDenied:
+ pass
+ try:
+ new_t = a_transport.clone('..')
+ except errors.InvalidURLJoin:
+ # reached the root, whatever that may be
+ raise errors.NotBranchError(path=url)
+ if new_t.base == a_transport.base:
+ # reached the root, whatever that may be
+ raise errors.NotBranchError(path=url)
+ a_transport = new_t
+
+ @classmethod
+ def open_tree_or_branch(klass, location):
+ """Return the branch and working tree at a location.
+
+ If there is no tree at the location, tree will be None.
+ If there is no branch at the location, an exception will be
+ raised
+ :return: (tree, branch)
+ """
+ controldir = klass.open(location)
+ return controldir._get_tree_branch()
+
+ @classmethod
+ def open_containing_tree_or_branch(klass, location,
+ possible_transports=None):
+ """Return the branch and working tree contained by a location.
+
+ Returns (tree, branch, relpath).
+ If there is no tree at containing the location, tree will be None.
+ If there is no branch containing the location, an exception will be
+ raised
+ relpath is the portion of the path that is contained by the branch.
+ """
+ controldir, relpath = klass.open_containing(location,
+ possible_transports=possible_transports)
+ tree, branch = controldir._get_tree_branch()
+ return tree, branch, relpath
+
+ @classmethod
+ def open_containing_tree_branch_or_repository(klass, location):
+ """Return the working tree, branch and repo contained by a location.
+
+ Returns (tree, branch, repository, relpath).
+ If there is no tree containing the location, tree will be None.
+ If there is no branch containing the location, branch will be None.
+ If there is no repository containing the location, repository will be
+ None.
+ relpath is the portion of the path that is contained by the innermost
+ ControlDir.
+
+ If no tree, branch or repository is found, a NotBranchError is raised.
+ """
+ controldir, relpath = klass.open_containing(location)
+ try:
+ tree, branch = controldir._get_tree_branch()
+ except errors.NotBranchError:
+ try:
+ repo = controldir.find_repository()
+ return None, None, repo, relpath
+ except (errors.NoRepositoryPresent):
+ raise errors.NotBranchError(location)
+ return tree, branch, branch.repository, relpath
+
+ @classmethod
+ def create(klass, base, format=None, possible_transports=None):
+ """Create a new ControlDir at the url 'base'.
+
+ :param format: If supplied, the format of branch to create. If not
+ supplied, the default is used.
+ :param possible_transports: If supplied, a list of transports that
+ can be reused to share a remote connection.
+ """
+ if klass is not ControlDir:
+ raise AssertionError("ControlDir.create always creates the"
+ "default format, not one of %r" % klass)
+ t = _mod_transport.get_transport(base, possible_transports)
+ t.ensure_base()
+ if format is None:
+ format = ControlDirFormat.get_default_format()
+ return format.initialize_on_transport(t)
+
+
+class ControlDirHooks(hooks.Hooks):
+ """Hooks for ControlDir operations."""
+
+ def __init__(self):
+ """Create the default hooks."""
+ hooks.Hooks.__init__(self, "bzrlib.controldir", "ControlDir.hooks")
+ self.add_hook('pre_open',
+ "Invoked before attempting to open a ControlDir with the transport "
+ "that the open will use.", (1, 14))
+ self.add_hook('post_repo_init',
+ "Invoked after a repository has been initialized. "
+ "post_repo_init is called with a "
+ "bzrlib.controldir.RepoInitHookParams.",
+ (2, 2))
+
+# install the default hooks
+ControlDir.hooks = ControlDirHooks()
+
+
+class ControlComponentFormat(object):
+ """A component that can live inside of a control directory."""
+
+ upgrade_recommended = False
+
+ def get_format_description(self):
+ """Return the short description for this format."""
+ raise NotImplementedError(self.get_format_description)
+
+ def is_supported(self):
+ """Is this format supported?
+
+ Supported formats must be initializable and openable.
+ Unsupported formats may not support initialization or committing or
+ some other features depending on the reason for not being supported.
+ """
+ return True
+
+ def check_support_status(self, allow_unsupported, recommend_upgrade=True,
+ basedir=None):
+ """Give an error or warning on old formats.
+
+ :param allow_unsupported: If true, allow opening
+ formats that are strongly deprecated, and which may
+ have limited functionality.
+
+ :param recommend_upgrade: If true (default), warn
+ the user through the ui object that they may wish
+ to upgrade the object.
+ """
+ if not allow_unsupported and not self.is_supported():
+ # see open_downlevel to open legacy branches.
+ raise errors.UnsupportedFormatError(format=self)
+ if recommend_upgrade and self.upgrade_recommended:
+ ui.ui_factory.recommend_upgrade(
+ self.get_format_description(), basedir)
+
+ @classmethod
+ def get_format_string(cls):
+ raise NotImplementedError(cls.get_format_string)
+
+
+class ControlComponentFormatRegistry(registry.FormatRegistry):
+ """A registry for control components (branch, workingtree, repository)."""
+
+ def __init__(self, other_registry=None):
+ super(ControlComponentFormatRegistry, self).__init__(other_registry)
+ self._extra_formats = []
+
+ def register(self, format):
+ """Register a new format."""
+ super(ControlComponentFormatRegistry, self).register(
+ format.get_format_string(), format)
+
+ def remove(self, format):
+ """Remove a registered format."""
+ super(ControlComponentFormatRegistry, self).remove(
+ format.get_format_string())
+
+ def register_extra(self, format):
+ """Register a format that can not be used in a metadir.
+
+ This is mainly useful to allow custom repository formats, such as older
+ Bazaar formats and foreign formats, to be tested.
+ """
+ self._extra_formats.append(registry._ObjectGetter(format))
+
+ def remove_extra(self, format):
+ """Remove an extra format.
+ """
+ self._extra_formats.remove(registry._ObjectGetter(format))
+
+ def register_extra_lazy(self, module_name, member_name):
+ """Register a format lazily.
+ """
+ self._extra_formats.append(
+ registry._LazyObjectGetter(module_name, member_name))
+
+ def _get_extra(self):
+ """Return all "extra" formats, not usable in meta directories."""
+ result = []
+ for getter in self._extra_formats:
+ f = getter.get_obj()
+ if callable(f):
+ f = f()
+ result.append(f)
+ return result
+
+ def _get_all(self):
+ """Return all formats, even those not usable in metadirs.
+ """
+ result = []
+ for name in self.keys():
+ fmt = self.get(name)
+ if callable(fmt):
+ fmt = fmt()
+ result.append(fmt)
+ return result + self._get_extra()
+
+ def _get_all_modules(self):
+ """Return a set of the modules providing objects."""
+ modules = set()
+ for name in self.keys():
+ modules.add(self._get_module(name))
+ for getter in self._extra_formats:
+ modules.add(getter.get_module())
+ return modules
+
+
+class Converter(object):
+ """Converts a disk format object from one format to another."""
+
+ def convert(self, to_convert, pb):
+ """Perform the conversion of to_convert, giving feedback via pb.
+
+ :param to_convert: The disk object to convert.
+ :param pb: a progress bar to use for progress information.
+ """
+
+ def step(self, message):
+ """Update the pb by a step."""
+ self.count +=1
+ self.pb.update(message, self.count, self.total)
+
+
+class ControlDirFormat(object):
+ """An encapsulation of the initialization and open routines for a format.
+
+ Formats provide three things:
+ * An initialization routine,
+ * a format string,
+ * an open routine.
+
+ Formats are placed in a dict by their format string for reference
+ during controldir opening. These should be subclasses of ControlDirFormat
+ for consistency.
+
+ Once a format is deprecated, just deprecate the initialize and open
+ methods on the format class. Do not deprecate the object, as the
+ object will be created every system load.
+
+ :cvar colocated_branches: Whether this formats supports colocated branches.
+ :cvar supports_workingtrees: This control directory can co-exist with a
+ working tree.
+ """
+
+ _default_format = None
+ """The default format used for new control directories."""
+
+ _server_probers = []
+ """The registered server format probers, e.g. RemoteBzrProber.
+
+ This is a list of Prober-derived classes.
+ """
+
+ _probers = []
+ """The registered format probers, e.g. BzrProber.
+
+ This is a list of Prober-derived classes.
+ """
+
+ colocated_branches = False
+ """Whether co-located branches are supported for this control dir format.
+ """
+
+ supports_workingtrees = True
+ """Whether working trees can exist in control directories of this format.
+ """
+
+ fixed_components = False
+ """Whether components can not change format independent of the control dir.
+ """
+
+ upgrade_recommended = False
+ """Whether an upgrade from this format is recommended."""
+
+ def get_format_description(self):
+ """Return the short description for this format."""
+ raise NotImplementedError(self.get_format_description)
+
+ def get_converter(self, format=None):
+ """Return the converter to use to convert controldirs needing converts.
+
+ This returns a bzrlib.controldir.Converter object.
+
+ This should return the best upgrader to step this format towards the
+ current default format. In the case of plugins we can/should provide
+ some means for them to extend the range of returnable converters.
+
+ :param format: Optional format to override the default format of the
+ library.
+ """
+ raise NotImplementedError(self.get_converter)
+
+ def is_supported(self):
+ """Is this format supported?
+
+ Supported formats must be openable.
+ Unsupported formats may not support initialization or committing or
+ some other features depending on the reason for not being supported.
+ """
+ return True
+
+ def is_initializable(self):
+ """Whether new control directories of this format can be initialized.
+ """
+ return self.is_supported()
+
+ def check_support_status(self, allow_unsupported, recommend_upgrade=True,
+ basedir=None):
+ """Give an error or warning on old formats.
+
+ :param allow_unsupported: If true, allow opening
+ formats that are strongly deprecated, and which may
+ have limited functionality.
+
+ :param recommend_upgrade: If true (default), warn
+ the user through the ui object that they may wish
+ to upgrade the object.
+ """
+ if not allow_unsupported and not self.is_supported():
+ # see open_downlevel to open legacy branches.
+ raise errors.UnsupportedFormatError(format=self)
+ if recommend_upgrade and self.upgrade_recommended:
+ ui.ui_factory.recommend_upgrade(
+ self.get_format_description(), basedir)
+
+ def same_model(self, target_format):
+ return (self.repository_format.rich_root_data ==
+ target_format.rich_root_data)
+
+ @classmethod
+ def register_format(klass, format):
+ """Register a format that does not use '.bzr' for its control dir.
+
+ """
+ raise errors.BzrError("ControlDirFormat.register_format() has been "
+ "removed in Bazaar 2.4. Please upgrade your plugins.")
+
+ @classmethod
+ def register_prober(klass, prober):
+ """Register a prober that can look for a control dir.
+
+ """
+ klass._probers.append(prober)
+
+ @classmethod
+ def unregister_prober(klass, prober):
+ """Unregister a prober.
+
+ """
+ klass._probers.remove(prober)
+
+ @classmethod
+ def register_server_prober(klass, prober):
+ """Register a control format prober for client-server environments.
+
+ These probers will be used before ones registered with
+ register_prober. This gives implementations that decide to the
+ chance to grab it before anything looks at the contents of the format
+ file.
+ """
+ klass._server_probers.append(prober)
+
+ def __str__(self):
+ # Trim the newline
+ return self.get_format_description().rstrip()
+
+ @classmethod
+ def all_probers(klass):
+ return klass._server_probers + klass._probers
+
+ @classmethod
+ def known_formats(klass):
+ """Return all the known formats.
+ """
+ result = set()
+ for prober_kls in klass.all_probers():
+ result.update(prober_kls.known_formats())
+ return result
+
+ @classmethod
+ def find_format(klass, transport, probers=None):
+ """Return the format present at transport."""
+ if probers is None:
+ probers = klass.all_probers()
+ for prober_kls in probers:
+ prober = prober_kls()
+ try:
+ return prober.probe_transport(transport)
+ except errors.NotBranchError:
+ # this format does not find a control dir here.
+ pass
+ raise errors.NotBranchError(path=transport.base)
+
+ def initialize(self, url, possible_transports=None):
+ """Create a control dir at this url and return an opened copy.
+
+ While not deprecated, this method is very specific and its use will
+ lead to many round trips to setup a working environment. See
+ initialize_on_transport_ex for a [nearly] all-in-one method.
+
+ Subclasses should typically override initialize_on_transport
+ instead of this method.
+ """
+ return self.initialize_on_transport(
+ _mod_transport.get_transport(url, possible_transports))
+
+ def initialize_on_transport(self, transport):
+ """Initialize a new controldir in the base directory of a Transport."""
+ raise NotImplementedError(self.initialize_on_transport)
+
+ def initialize_on_transport_ex(self, transport, use_existing_dir=False,
+ create_prefix=False, force_new_repo=False, stacked_on=None,
+ stack_on_pwd=None, repo_format_name=None, make_working_trees=None,
+ shared_repo=False, vfs_only=False):
+ """Create this format on transport.
+
+ The directory to initialize will be created.
+
+ :param force_new_repo: Do not use a shared repository for the target,
+ even if one is available.
+ :param create_prefix: Create any missing directories leading up to
+ to_transport.
+ :param use_existing_dir: Use an existing directory if one exists.
+ :param stacked_on: A url to stack any created branch on, None to follow
+ any target stacking policy.
+ :param stack_on_pwd: If stack_on is relative, the location it is
+ relative to.
+ :param repo_format_name: If non-None, a repository will be
+ made-or-found. Should none be found, or if force_new_repo is True
+ the repo_format_name is used to select the format of repository to
+ create.
+ :param make_working_trees: Control the setting of make_working_trees
+ for a new shared repository when one is made. None to use whatever
+ default the format has.
+ :param shared_repo: Control whether made repositories are shared or
+ not.
+ :param vfs_only: If True do not attempt to use a smart server
+ :return: repo, controldir, require_stacking, repository_policy. repo is
+ None if none was created or found, controldir is always valid.
+ require_stacking is the result of examining the stacked_on
+ parameter and any stacking policy found for the target.
+ """
+ raise NotImplementedError(self.initialize_on_transport_ex)
+
+ def network_name(self):
+ """A simple byte string uniquely identifying this format for RPC calls.
+
+ Bzr control formats use this disk format string to identify the format
+ over the wire. Its possible that other control formats have more
+ complex detection requirements, so we permit them to use any unique and
+ immutable string they desire.
+ """
+ raise NotImplementedError(self.network_name)
+
+ def open(self, transport, _found=False):
+ """Return an instance of this format for the dir transport points at.
+ """
+ raise NotImplementedError(self.open)
+
+ @classmethod
+ def _set_default_format(klass, format):
+ """Set default format (for testing behavior of defaults only)"""
+ klass._default_format = format
+
+ @classmethod
+ def get_default_format(klass):
+ """Return the current default format."""
+ return klass._default_format
+
+ def supports_transport(self, transport):
+ """Check if this format can be opened over a particular transport.
+ """
+ raise NotImplementedError(self.supports_transport)
+
+
+class Prober(object):
+ """Abstract class that can be used to detect a particular kind of
+ control directory.
+
+ At the moment this just contains a single method to probe a particular
+ transport, but it may be extended in the future to e.g. avoid
+ multiple levels of probing for Subversion repositories.
+
+ See BzrProber and RemoteBzrProber in bzrlib.bzrdir for the
+ probers that detect .bzr/ directories and Bazaar smart servers,
+ respectively.
+
+ Probers should be registered using the register_server_prober or
+ register_prober methods on ControlDirFormat.
+ """
+
+ def probe_transport(self, transport):
+ """Return the controldir style format present in a directory.
+
+ :raise UnknownFormatError: If a control dir was found but is
+ in an unknown format.
+ :raise NotBranchError: If no control directory was found.
+ :return: A ControlDirFormat instance.
+ """
+ raise NotImplementedError(self.probe_transport)
+
+ @classmethod
+ def known_formats(klass):
+ """Return the control dir formats known by this prober.
+
+ Multiple probers can return the same formats, so this should
+ return a set.
+
+ :return: A set of known formats.
+ """
+ raise NotImplementedError(klass.known_formats)
+
+
+class ControlDirFormatInfo(object):
+
+ def __init__(self, native, deprecated, hidden, experimental):
+ self.deprecated = deprecated
+ self.native = native
+ self.hidden = hidden
+ self.experimental = experimental
+
+
+class ControlDirFormatRegistry(registry.Registry):
+ """Registry of user-selectable ControlDir subformats.
+
+ Differs from ControlDirFormat._formats in that it provides sub-formats,
+ e.g. BzrDirMeta1 with weave repository. Also, it's more user-oriented.
+ """
+
+ def __init__(self):
+ """Create a ControlDirFormatRegistry."""
+ self._aliases = set()
+ self._registration_order = list()
+ super(ControlDirFormatRegistry, self).__init__()
+
+ def aliases(self):
+ """Return a set of the format names which are aliases."""
+ return frozenset(self._aliases)
+
+ def register(self, key, factory, help, native=True, deprecated=False,
+ hidden=False, experimental=False, alias=False):
+ """Register a ControlDirFormat factory.
+
+ The factory must be a callable that takes one parameter: the key.
+ It must produce an instance of the ControlDirFormat when called.
+
+ This function mainly exists to prevent the info object from being
+ supplied directly.
+ """
+ registry.Registry.register(self, key, factory, help,
+ ControlDirFormatInfo(native, deprecated, hidden, experimental))
+ if alias:
+ self._aliases.add(key)
+ self._registration_order.append(key)
+
+ def register_lazy(self, key, module_name, member_name, help, native=True,
+ deprecated=False, hidden=False, experimental=False, alias=False):
+ registry.Registry.register_lazy(self, key, module_name, member_name,
+ help, ControlDirFormatInfo(native, deprecated, hidden, experimental))
+ if alias:
+ self._aliases.add(key)
+ self._registration_order.append(key)
+
+ def set_default(self, key):
+ """Set the 'default' key to be a clone of the supplied key.
+
+ This method must be called once and only once.
+ """
+ registry.Registry.register(self, 'default', self.get(key),
+ self.get_help(key), info=self.get_info(key))
+ self._aliases.add('default')
+
+ def set_default_repository(self, key):
+ """Set the FormatRegistry default and Repository default.
+
+ This is a transitional method while Repository.set_default_format
+ is deprecated.
+ """
+ if 'default' in self:
+ self.remove('default')
+ self.set_default(key)
+ format = self.get('default')()
+
+ def make_bzrdir(self, key):
+ return self.get(key)()
+
+ def help_topic(self, topic):
+ output = ""
+ default_realkey = None
+ default_help = self.get_help('default')
+ help_pairs = []
+ for key in self._registration_order:
+ if key == 'default':
+ continue
+ help = self.get_help(key)
+ if help == default_help:
+ default_realkey = key
+ else:
+ help_pairs.append((key, help))
+
+ def wrapped(key, help, info):
+ if info.native:
+ help = '(native) ' + help
+ return ':%s:\n%s\n\n' % (key,
+ textwrap.fill(help, initial_indent=' ',
+ subsequent_indent=' ',
+ break_long_words=False))
+ if default_realkey is not None:
+ output += wrapped(default_realkey, '(default) %s' % default_help,
+ self.get_info('default'))
+ deprecated_pairs = []
+ experimental_pairs = []
+ for key, help in help_pairs:
+ info = self.get_info(key)
+ if info.hidden:
+ continue
+ elif info.deprecated:
+ deprecated_pairs.append((key, help))
+ elif info.experimental:
+ experimental_pairs.append((key, help))
+ else:
+ output += wrapped(key, help, info)
+ output += "\nSee :doc:`formats-help` for more about storage formats."
+ other_output = ""
+ if len(experimental_pairs) > 0:
+ other_output += "Experimental formats are shown below.\n\n"
+ for key, help in experimental_pairs:
+ info = self.get_info(key)
+ other_output += wrapped(key, help, info)
+ else:
+ other_output += \
+ "No experimental formats are available.\n\n"
+ if len(deprecated_pairs) > 0:
+ other_output += "\nDeprecated formats are shown below.\n\n"
+ for key, help in deprecated_pairs:
+ info = self.get_info(key)
+ other_output += wrapped(key, help, info)
+ else:
+ other_output += \
+ "\nNo deprecated formats are available.\n\n"
+ other_output += \
+ "\nSee :doc:`formats-help` for more about storage formats."
+
+ if topic == 'other-formats':
+ return other_output
+ else:
+ return output
+
+
+class RepoInitHookParams(object):
+ """Object holding parameters passed to `*_repo_init` hooks.
+
+ There are 4 fields that hooks may wish to access:
+
+ :ivar repository: Repository created
+ :ivar format: Repository format
+ :ivar bzrdir: The controldir for the repository
+ :ivar shared: The repository is shared
+ """
+
+ def __init__(self, repository, format, controldir, shared):
+ """Create a group of RepoInitHook parameters.
+
+ :param repository: Repository created
+ :param format: Repository format
+ :param controldir: The controldir for the repository
+ :param shared: The repository is shared
+ """
+ self.repository = repository
+ self.format = format
+ self.bzrdir = controldir
+ self.shared = shared
+
+ def __eq__(self, other):
+ return self.__dict__ == other.__dict__
+
+ def __repr__(self):
+ if self.repository:
+ return "<%s for %s>" % (self.__class__.__name__,
+ self.repository)
+ else:
+ return "<%s for %s>" % (self.__class__.__name__,
+ self.bzrdir)
+
+
+# Please register new formats after old formats so that formats
+# appear in chronological order and format descriptions can build
+# on previous ones.
+format_registry = ControlDirFormatRegistry()
+
+network_format_registry = registry.FormatRegistry()
+"""Registry of formats indexed by their network name.
+
+The network name for a ControlDirFormat is an identifier that can be used when
+referring to formats with smart server operations. See
+ControlDirFormat.network_name() for more detail.
+"""
diff --git a/bzrlib/counted_lock.py b/bzrlib/counted_lock.py
new file mode 100644
index 0000000..5d5e501
--- /dev/null
+++ b/bzrlib/counted_lock.py
@@ -0,0 +1,113 @@
+# Copyright (C) 2007, 2008 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""Counted lock class"""
+
+from __future__ import absolute_import
+
+from bzrlib import (
+ errors,
+ )
+
+
+class CountedLock(object):
+ """Decorator around a lock that makes it reentrant.
+
+ This can be used with any object that provides a basic Lock interface,
+ including LockDirs and OS file locks.
+
+ :ivar _token: While a write lock is held, this is the token
+ for it.
+ """
+
+ def __init__(self, real_lock):
+ self._real_lock = real_lock
+ self._lock_mode = None
+ self._lock_count = 0
+
+ def __repr__(self):
+ return "%s(%r)" % (self.__class__.__name__,
+ self._real_lock)
+
+ def break_lock(self):
+ self._real_lock.break_lock()
+ self._lock_mode = None
+ self._lock_count = 0
+
+ def get_physical_lock_status(self):
+ """Return physical lock status.
+
+ Returns true if a lock is held on the transport. If no lock is held, or
+ the underlying locking mechanism does not support querying lock
+ status, false is returned.
+ """
+ try:
+ return self._real_lock.peek() is not None
+ except NotImplementedError:
+ return False
+
+ def is_locked(self):
+ return self._lock_mode is not None
+
+ def lock_read(self):
+ """Acquire the lock in read mode.
+
+ If the lock is already held in either read or write mode this
+ increments the count and succeeds. If the lock is not already held,
+ it is taken in read mode.
+ """
+ if self._lock_mode:
+ self._lock_count += 1
+ else:
+ self._real_lock.lock_read()
+ self._lock_count = 1
+ self._lock_mode = 'r'
+
+ def lock_write(self, token=None):
+ """Acquire the lock in write mode.
+
+ If the lock was originally acquired in read mode this will fail.
+
+ :param token: If given and the lock is already held,
+ then validate that we already hold the real
+ lock with this token.
+
+ :returns: The token from the underlying lock.
+ """
+ if self._lock_count == 0:
+ self._token = self._real_lock.lock_write(token=token)
+ self._lock_mode = 'w'
+ self._lock_count += 1
+ return self._token
+ elif self._lock_mode != 'w':
+ raise errors.ReadOnlyError(self)
+ else:
+ self._real_lock.validate_token(token)
+ self._lock_count += 1
+ return self._token
+
+ def unlock(self):
+ if self._lock_count == 0:
+ raise errors.LockNotHeld(self)
+ elif self._lock_count == 1:
+ # these are decremented first; if we fail to unlock the most
+ # reasonable assumption is that we still don't have the lock
+ # anymore
+ self._lock_mode = None
+ self._lock_count -= 1
+ self._real_lock.unlock()
+ else:
+ self._lock_count -= 1
diff --git a/bzrlib/crash.py b/bzrlib/crash.py
new file mode 100644
index 0000000..3eac8fa
--- /dev/null
+++ b/bzrlib/crash.py
@@ -0,0 +1,270 @@
+# Copyright (C) 2009-2011 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""Handling and reporting crashes.
+
+A crash is an exception propagated up almost to the top level of Bazaar.
+
+If we have apport <https://launchpad.net/apport/>, we store a report of the
+crash using apport into its /var/crash spool directory, from where the user
+can either manually send it to Launchpad. In some cases (at least Ubuntu
+development releases), Apport may pop up a window asking if they want
+to send it.
+
+Without apport, we just write a crash report to stderr and the user can report
+this manually if the wish.
+
+We never send crash data across the network without user opt-in.
+
+In principle apport can run on any platform though as of Feb 2010 there seem
+to be some portability bugs.
+
+To force this off in bzr turn set APPORT_DISABLE in the environment or
+-Dno_apport.
+"""
+
+from __future__ import absolute_import
+
+# for interactive testing, try the 'bzr assert-fail' command
+# or see http://code.launchpad.net/~mbp/bzr/bzr-fail
+#
+# to test with apport it's useful to set
+# export APPORT_IGNORE_OBSOLETE_PACKAGES=1
+
+import os
+import platform
+import pprint
+import sys
+import time
+from StringIO import StringIO
+
+import bzrlib
+from bzrlib import (
+ config,
+ debug,
+ osutils,
+ plugin,
+ trace,
+ )
+
+
+def report_bug(exc_info, stderr):
+ if ('no_apport' in debug.debug_flags) or \
+ os.environ.get('APPORT_DISABLE', None):
+ return report_bug_legacy(exc_info, stderr)
+ try:
+ if report_bug_to_apport(exc_info, stderr):
+ # wrote a file; if None then report the old way
+ return
+ except ImportError, e:
+ trace.mutter("couldn't find apport bug-reporting library: %s" % e)
+ except Exception, e:
+ # this should only happen if apport is installed but it didn't
+ # work, eg because of an io error writing the crash file
+ trace.mutter("bzr: failed to report crash using apport: %r" % e)
+ trace.log_exception_quietly()
+ return report_bug_legacy(exc_info, stderr)
+
+
+def report_bug_legacy(exc_info, err_file):
+ """Report a bug by just printing a message to the user."""
+ trace.print_exception(exc_info, err_file)
+ err_file.write('\n')
+ import textwrap
+ def print_wrapped(l):
+ err_file.write(textwrap.fill(l,
+ width=78, subsequent_indent=' ') + '\n')
+ print_wrapped('bzr %s on python %s (%s)\n' % \
+ (bzrlib.__version__,
+ bzrlib._format_version_tuple(sys.version_info),
+ platform.platform(aliased=1)))
+ print_wrapped('arguments: %r\n' % sys.argv)
+ print_wrapped(textwrap.fill(
+ 'plugins: ' + plugin.format_concise_plugin_list(),
+ width=78,
+ subsequent_indent=' ',
+ ) + '\n')
+ print_wrapped(
+ 'encoding: %r, fsenc: %r, lang: %r\n' % (
+ osutils.get_user_encoding(), sys.getfilesystemencoding(),
+ os.environ.get('LANG')))
+ # We used to show all the plugins here, but it's too verbose.
+ err_file.write(
+ "\n"
+ "*** Bazaar has encountered an internal error. This probably indicates a\n"
+ " bug in Bazaar. You can help us fix it by filing a bug report at\n"
+ " https://bugs.launchpad.net/bzr/+filebug\n"
+ " including this traceback and a description of the problem.\n"
+ )
+
+
+def report_bug_to_apport(exc_info, stderr):
+ """Report a bug to apport for optional automatic filing.
+
+ :returns: The name of the crash file, or None if we didn't write one.
+ """
+ # this function is based on apport_package_hook.py, but omitting some of the
+ # Ubuntu-specific policy about what to report and when
+
+ # This import is apparently not used, but we're doing it so that if the
+ # import fails, the exception will be caught at a higher level and we'll
+ # report the error by other means.
+ import apport
+
+ crash_filename = _write_apport_report_to_file(exc_info)
+
+ if crash_filename is None:
+ stderr.write("\n"
+ "apport is set to ignore crashes in this version of bzr.\n"
+ )
+ else:
+ trace.print_exception(exc_info, stderr)
+ stderr.write("\n"
+ "You can report this problem to Bazaar's developers by running\n"
+ " apport-bug %s\n"
+ "if a bug-reporting window does not automatically appear.\n"
+ % (crash_filename))
+ # XXX: on Windows, Mac, and other platforms where we might have the
+ # apport libraries but not have an apport always running, we could
+ # synchronously file now
+
+ return crash_filename
+
+
+def _write_apport_report_to_file(exc_info):
+ import traceback
+ from apport.report import Report
+
+ exc_type, exc_object, exc_tb = exc_info
+
+ pr = Report()
+ # add_proc_info sets the ExecutablePath, InterpreterPath, etc.
+ pr.add_proc_info()
+ # It also adds ProcMaps which for us is rarely useful and mostly noise, so
+ # let's remove it.
+ del pr['ProcMaps']
+ pr.add_user_info()
+
+ # Package and SourcePackage are needed so that apport will report about even
+ # non-packaged versions of bzr; also this reports on their packaged
+ # dependencies which is useful.
+ pr['SourcePackage'] = 'bzr'
+ pr['Package'] = 'bzr'
+
+ pr['CommandLine'] = pprint.pformat(sys.argv)
+ pr['BzrVersion'] = bzrlib.__version__
+ pr['PythonVersion'] = bzrlib._format_version_tuple(sys.version_info)
+ pr['Platform'] = platform.platform(aliased=1)
+ pr['UserEncoding'] = osutils.get_user_encoding()
+ pr['FileSystemEncoding'] = sys.getfilesystemencoding()
+ pr['Locale'] = os.environ.get('LANG')
+ pr['BzrPlugins'] = _format_plugin_list()
+ pr['PythonLoadedModules'] = _format_module_list()
+ pr['BzrDebugFlags'] = pprint.pformat(debug.debug_flags)
+
+ # actually we'd rather file directly against the upstream product, but
+ # apport does seem to count on there being one in there; we might need to
+ # redirect it elsewhere anyhow
+ pr['SourcePackage'] = 'bzr'
+ pr['Package'] = 'bzr'
+
+ # tell apport to file directly against the bzr package using
+ # <https://bugs.launchpad.net/bzr/+bug/391015>
+ #
+ # XXX: unfortunately apport may crash later if the crashdb definition
+ # file isn't present
+ pr['CrashDb'] = 'bzr'
+
+ tb_file = StringIO()
+ traceback.print_exception(exc_type, exc_object, exc_tb, file=tb_file)
+ pr['Traceback'] = tb_file.getvalue()
+
+ _attach_log_tail(pr)
+
+ # We want to use the 'bzr' crashdb so that it gets sent directly upstream,
+ # which is a reasonable default for most internal errors. However, if we
+ # set it here then apport will crash later if it doesn't know about that
+ # crashdb. Instead, we rely on the bzr package installing both a
+ # source hook telling crashes to go to this crashdb, and a crashdb
+ # configuration describing it.
+
+ # these may contain some sensitive info (smtp_passwords)
+ # TODO: strip that out and attach the rest
+ #
+ #attach_file_if_exists(report,
+ # os.path.join(dot_bzr, 'bazaar.conf', 'BzrConfig')
+ #attach_file_if_exists(report,
+ # os.path.join(dot_bzr, 'locations.conf', 'BzrLocations')
+
+ # strip username, hostname, etc
+ pr.anonymize()
+
+ if pr.check_ignored():
+ # eg configured off in ~/.apport-ignore.xml
+ return None
+ else:
+ crash_file_name, crash_file = _open_crash_file()
+ pr.write(crash_file)
+ crash_file.close()
+ return crash_file_name
+
+
+def _attach_log_tail(pr):
+ try:
+ bzr_log = open(trace._get_bzr_log_filename(), 'rt')
+ except (IOError, OSError), e:
+ pr['BzrLogTail'] = repr(e)
+ return
+ try:
+ lines = bzr_log.readlines()
+ pr['BzrLogTail'] = ''.join(lines[-40:])
+ finally:
+ bzr_log.close()
+
+
+def _open_crash_file():
+ crash_dir = config.crash_dir()
+ if not osutils.isdir(crash_dir):
+ # on unix this should be /var/crash and should already exist; on
+ # Windows or if it's manually configured it might need to be created,
+ # and then it should be private
+ os.makedirs(crash_dir, mode=0600)
+ date_string = time.strftime('%Y-%m-%dT%H:%M', time.gmtime())
+ # XXX: getuid doesn't work on win32, but the crash directory is per-user
+ if sys.platform == 'win32':
+ user_part = ''
+ else:
+ user_part = '.%d' % os.getuid()
+ filename = osutils.pathjoin(
+ crash_dir,
+ 'bzr%s.%s.crash' % (
+ user_part,
+ date_string))
+ # be careful here that people can't play tmp-type symlink mischief in the
+ # world-writable directory
+ return filename, os.fdopen(
+ os.open(filename,
+ os.O_WRONLY|os.O_CREAT|os.O_EXCL,
+ 0600),
+ 'w')
+
+
+def _format_plugin_list():
+ return ''.join(plugin.describe_plugins(show_paths=True))
+
+
+def _format_module_list():
+ return pprint.pformat(sys.modules)
diff --git a/bzrlib/debug.py b/bzrlib/debug.py
new file mode 100644
index 0000000..cca8956
--- /dev/null
+++ b/bzrlib/debug.py
@@ -0,0 +1,59 @@
+# Copyright (C) 2005, 2006, 2009 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""Set of flags that enable different debug behaviour.
+
+These are set with eg ``-Dlock`` on the bzr command line or in
+~/.bazaar/bazaar.conf debug_flags.
+
+See `bzr help debug-flags` or `bzrlib/help_topics/en/debug-flags.txt`
+for a list of the available options.
+"""
+
+from __future__ import absolute_import
+
+debug_flags = set()
+
+
+def set_debug_flags_from_config():
+ """Turn on debug flags based on the global configuration"""
+
+ from bzrlib import config
+
+ c = config.GlobalStack()
+ for f in c.get('debug_flags'):
+ debug_flags.add(f)
+
+
+def set_trace():
+ """Pdb using original stdin and stdout.
+
+ When debugging blackbox tests, sys.stdin and sys.stdout are captured for
+ test purposes and cannot be used for interactive debugging. This class uses
+ the origianl stdin/stdout to allow such use.
+
+ Instead of doing:
+
+ import pdb; pdb.set_trace()
+
+ you can do:
+
+ from bzrlib import debug; debug.set_trace()
+ """
+ import pdb
+ import sys
+ pdb.Pdb(stdin=sys.__stdin__, stdout=sys.__stdout__
+ ).set_trace(sys._getframe().f_back)
diff --git a/bzrlib/decorators.py b/bzrlib/decorators.py
new file mode 100644
index 0000000..bc073e9
--- /dev/null
+++ b/bzrlib/decorators.py
@@ -0,0 +1,359 @@
+# Copyright (C) 2006-2010 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+from __future__ import absolute_import
+
+__all__ = ['needs_read_lock',
+ 'needs_write_lock',
+ 'use_fast_decorators',
+ 'use_pretty_decorators',
+ ]
+
+
+import sys
+
+from bzrlib import trace
+
+
+def _get_parameters(func):
+ """Recreate the parameters for a function using introspection.
+
+ :return: (function_params, calling_params, default_values)
+ function_params: is a string representing the parameters of the
+ function. (such as "a, b, c=None, d=1")
+ This is used in the function declaration.
+ calling_params: is another string representing how you would call the
+ function with the correct parameters. (such as "a, b, c=c, d=d")
+ Assuming you used function_params in the function declaration, this
+ is the parameters to put in the function call.
+ default_values_block: a dict with the default values to be passed as
+ the scope for the 'exec' statement.
+
+ For example:
+
+ def wrapper(%(function_params)s):
+ return original(%(calling_params)s)
+ """
+ # "import inspect" should stay in local scope. 'inspect' takes a long time
+ # to import the first time. And since we don't always need it, don't import
+ # it globally.
+ import inspect
+ args, varargs, varkw, defaults = inspect.getargspec(func)
+ defaults_dict = {}
+ def formatvalue(value):
+ default_name = '__default_%d' % len(defaults_dict)
+ defaults_dict[default_name] = value
+ return '=' + default_name
+ formatted = inspect.formatargspec(args, varargs=varargs,
+ varkw=varkw,
+ defaults=defaults,
+ formatvalue=formatvalue)
+ if defaults is None:
+ args_passed = args
+ else:
+ first_default = len(args) - len(defaults)
+ args_passed = args[:first_default]
+ for arg in args[first_default:]:
+ args_passed.append("%s=%s" % (arg, arg))
+ if varargs is not None:
+ args_passed.append('*' + varargs)
+ if varkw is not None:
+ args_passed.append('**' + varkw)
+ args_passed = ', '.join(args_passed)
+
+ return formatted[1:-1], args_passed, defaults_dict
+
+
+def _pretty_needs_read_lock(unbound):
+ """Decorate unbound to take out and release a read lock.
+
+ This decorator can be applied to methods of any class with lock_read() and
+ unlock() methods.
+
+ Typical usage:
+
+ class Branch(...):
+ @needs_read_lock
+ def branch_method(self, ...):
+ stuff
+ """
+ # This compiles a function with a similar name, but wrapped with
+ # lock_read/unlock calls. We use dynamic creation, because we need the
+ # internal name of the function to be modified so that --lsprof will see
+ # the correct name.
+ # TODO: jam 20070111 Modify this template so that the generated function
+ # has the same argument signature as the original function, which
+ # will help commands like epydoc.
+ # This seems possible by introspecting foo.func_defaults, and
+ # foo.func_code.co_argcount and foo.func_code.co_varnames
+ template = """\
+def %(name)s_read_locked(%(params)s):
+ self.lock_read()
+ try:
+ result = unbound(%(passed_params)s)
+ except:
+ import sys
+ exc_info = sys.exc_info()
+ try:
+ self.unlock()
+ finally:
+ try:
+ raise exc_info[0], exc_info[1], exc_info[2]
+ finally:
+ del exc_info
+ else:
+ self.unlock()
+ return result
+read_locked = %(name)s_read_locked
+"""
+ params, passed_params, defaults_dict = _get_parameters(unbound)
+ variables = {'name':unbound.__name__,
+ 'params':params,
+ 'passed_params':passed_params,
+ }
+ func_def = template % variables
+
+ scope = dict(defaults_dict)
+ scope['unbound'] = unbound
+ exec func_def in scope
+ read_locked = scope['read_locked']
+
+ read_locked.__doc__ = unbound.__doc__
+ read_locked.__name__ = unbound.__name__
+ return read_locked
+
+
+def _fast_needs_read_lock(unbound):
+ """Decorate unbound to take out and release a read lock.
+
+ This decorator can be applied to methods of any class with lock_read() and
+ unlock() methods.
+
+ Typical usage:
+
+ class Branch(...):
+ @needs_read_lock
+ def branch_method(self, ...):
+ stuff
+ """
+ def read_locked(self, *args, **kwargs):
+ self.lock_read()
+ try:
+ result = unbound(self, *args, **kwargs)
+ except:
+ import sys
+ exc_info = sys.exc_info()
+ try:
+ self.unlock()
+ finally:
+ try:
+ raise exc_info[0], exc_info[1], exc_info[2]
+ finally:
+ del exc_info
+ else:
+ self.unlock()
+ return result
+ read_locked.__doc__ = unbound.__doc__
+ read_locked.__name__ = unbound.__name__
+ return read_locked
+
+
+def _pretty_needs_write_lock(unbound):
+ """Decorate unbound to take out and release a write lock."""
+ template = """\
+def %(name)s_write_locked(%(params)s):
+ self.lock_write()
+ try:
+ result = unbound(%(passed_params)s)
+ except:
+ import sys
+ exc_info = sys.exc_info()
+ try:
+ self.unlock()
+ finally:
+ try:
+ raise exc_info[0], exc_info[1], exc_info[2]
+ finally:
+ del exc_info
+ else:
+ self.unlock()
+ return result
+write_locked = %(name)s_write_locked
+"""
+ params, passed_params, defaults_dict = _get_parameters(unbound)
+ variables = {'name':unbound.__name__,
+ 'params':params,
+ 'passed_params':passed_params,
+ }
+ func_def = template % variables
+
+ scope = dict(defaults_dict)
+ scope['unbound'] = unbound
+ exec func_def in scope
+ write_locked = scope['write_locked']
+
+ write_locked.__doc__ = unbound.__doc__
+ write_locked.__name__ = unbound.__name__
+ return write_locked
+
+
+def _fast_needs_write_lock(unbound):
+ """Decorate unbound to take out and release a write lock."""
+ def write_locked(self, *args, **kwargs):
+ self.lock_write()
+ try:
+ result = unbound(self, *args, **kwargs)
+ except:
+ exc_info = sys.exc_info()
+ try:
+ self.unlock()
+ finally:
+ try:
+ raise exc_info[0], exc_info[1], exc_info[2]
+ finally:
+ del exc_info
+ else:
+ self.unlock()
+ return result
+ write_locked.__doc__ = unbound.__doc__
+ write_locked.__name__ = unbound.__name__
+ return write_locked
+
+
+def only_raises(*errors):
+ """Make a decorator that will only allow the given error classes to be
+ raised. All other errors will be logged and then discarded.
+
+ Typical use is something like::
+
+ @only_raises(LockNotHeld, LockBroken)
+ def unlock(self):
+ # etc
+ """
+ def decorator(unbound):
+ def wrapped(*args, **kwargs):
+ try:
+ return unbound(*args, **kwargs)
+ except errors:
+ raise
+ except:
+ trace.mutter('Error suppressed by only_raises:')
+ trace.log_exception_quietly()
+ wrapped.__doc__ = unbound.__doc__
+ wrapped.__name__ = unbound.__name__
+ return wrapped
+ return decorator
+
+
+# Default is more functionality, 'bzr' the commandline will request fast
+# versions.
+needs_read_lock = _pretty_needs_read_lock
+needs_write_lock = _pretty_needs_write_lock
+
+
+def use_fast_decorators():
+ """Change the default decorators to be fast loading ones.
+
+ The alternative is to have decorators that do more work to produce
+ nice-looking decorated functions, but this slows startup time.
+ """
+ global needs_read_lock, needs_write_lock
+ needs_read_lock = _fast_needs_read_lock
+ needs_write_lock = _fast_needs_write_lock
+
+
+def use_pretty_decorators():
+ """Change the default decorators to be pretty ones."""
+ global needs_read_lock, needs_write_lock
+ needs_read_lock = _pretty_needs_read_lock
+ needs_write_lock = _pretty_needs_write_lock
+
+
+# This implementation of cachedproperty is copied from Launchpad's
+# canonical.launchpad.cachedproperty module (with permission from flacoste)
+# -- spiv & vila 100120
+def cachedproperty(attrname_or_fn):
+ """A decorator for methods that makes them properties with their return
+ value cached.
+
+ The value is cached on the instance, using the attribute name provided.
+
+ If you don't provide a name, the mangled name of the property is used.
+
+ >>> class CachedPropertyTest(object):
+ ...
+ ... @cachedproperty('_foo_cache')
+ ... def foo(self):
+ ... print 'foo computed'
+ ... return 23
+ ...
+ ... @cachedproperty
+ ... def bar(self):
+ ... print 'bar computed'
+ ... return 69
+
+ >>> cpt = CachedPropertyTest()
+ >>> getattr(cpt, '_foo_cache', None) is None
+ True
+ >>> cpt.foo
+ foo computed
+ 23
+ >>> cpt.foo
+ 23
+ >>> cpt._foo_cache
+ 23
+ >>> cpt.bar
+ bar computed
+ 69
+ >>> cpt._bar_cached_value
+ 69
+
+ """
+ if isinstance(attrname_or_fn, basestring):
+ attrname = attrname_or_fn
+ return _CachedPropertyForAttr(attrname)
+ else:
+ fn = attrname_or_fn
+ attrname = '_%s_cached_value' % fn.__name__
+ return _CachedProperty(attrname, fn)
+
+
+class _CachedPropertyForAttr(object):
+
+ def __init__(self, attrname):
+ self.attrname = attrname
+
+ def __call__(self, fn):
+ return _CachedProperty(self.attrname, fn)
+
+
+class _CachedProperty(object):
+
+ def __init__(self, attrname, fn):
+ self.fn = fn
+ self.attrname = attrname
+ self.marker = object()
+
+ def __get__(self, inst, cls=None):
+ if inst is None:
+ return self
+ cachedresult = getattr(inst, self.attrname, self.marker)
+ if cachedresult is self.marker:
+ result = self.fn(inst)
+ setattr(inst, self.attrname, result)
+ return result
+ else:
+ return cachedresult
diff --git a/bzrlib/delta.h b/bzrlib/delta.h
new file mode 100644
index 0000000..37668f4
--- /dev/null
+++ b/bzrlib/delta.h
@@ -0,0 +1,154 @@
+/*
+ * delta.h: headers for delta functionality
+ *
+ * Adapted from GIT for Bazaar by
+ * John Arbash Meinel <john@arbash-meinel.com> (C) 2009
+ *
+ * This code is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef DELTA_H
+#define DELTA_H
+
+/* opaque object for delta index */
+struct delta_index;
+
+struct source_info {
+ const void *buf; /* Pointer to the beginning of source data */
+ unsigned long size; /* Total length of source data */
+ unsigned long agg_offset; /* Start of source data as part of the
+ aggregate source */
+};
+
+/* result type for functions that have multiple failure modes */
+typedef enum {
+ DELTA_OK, /* Success */
+ DELTA_OUT_OF_MEMORY, /* Could not allocate required memory */
+ DELTA_INDEX_NEEDED, /* A delta_index must be passed */
+ DELTA_SOURCE_EMPTY, /* A source_info had no content */
+ DELTA_SOURCE_BAD, /* A source_info had invalid or corrupt content */
+ DELTA_BUFFER_EMPTY, /* A buffer pointer and size */
+ DELTA_SIZE_TOO_BIG, /* Delta data is larger than the max requested */
+} delta_result;
+
+
+/*
+ * create_delta_index: compute index data from given buffer
+ *
+ * Returns a delta_result status, when DELTA_OK then *fresh is set to a struct
+ * delta_index that should be passed to subsequent create_delta() calls, or to
+ * free_delta_index(). Other values are a failure, and *fresh is unset.
+ * The given buffer must not be freed nor altered before free_delta_index() is
+ * called. The resultant struct must be freed using free_delta_index().
+ *
+ * :param max_bytes_to_index: Limit the number of regions to sample to this
+ * amount of text. We will store at most max_bytes_to_index / RABIN_WINDOW
+ * pointers into the source text. Useful if src can be unbounded in size,
+ * and you are willing to trade match accuracy for peak memory.
+ */
+extern delta_result
+create_delta_index(const struct source_info *src,
+ struct delta_index *old,
+ struct delta_index **fresh,
+ int max_bytes_to_index);
+
+
+/*
+ * create_delta_index_from_delta: compute index data from given buffer
+ *
+ * Returns a delta_result status, when DELTA_OK then *fresh is set to a struct
+ * delta_index that should be passed to subsequent create_delta() calls, or to
+ * free_delta_index(). Other values are a failure, and *fresh is unset.
+ * The bytes must be in the form of a delta structure, as generated by
+ * create_delta(). The generated index will only index the insert bytes, and
+ * not any of the control structures.
+ */
+extern delta_result
+create_delta_index_from_delta(const struct source_info *delta,
+ struct delta_index *old,
+ struct delta_index **fresh);
+/*
+ * free_delta_index: free the index created by create_delta_index()
+ *
+ * Given pointer must be what create_delta_index() returned, or NULL.
+ */
+extern void free_delta_index(struct delta_index *index);
+
+/*
+ * sizeof_delta_index: returns memory usage of delta index
+ *
+ * Given pointer must be what create_delta_index() returned, or NULL.
+ */
+extern unsigned long sizeof_delta_index(struct delta_index *index);
+
+/*
+ * create_delta: create a delta from given index for the given buffer
+ *
+ * This function may be called multiple times with different buffers using
+ * the same delta_index pointer. If max_delta_size is non-zero and the
+ * resulting delta is to be larger than max_delta_size then DELTA_SIZE_TOO_BIG
+ * is returned. Otherwise on success, DELTA_OK is returned and *delta_data is
+ * set to a new buffer with the delta data and *delta_size is updated with its
+ * size. That buffer must be freed by the caller.
+ */
+extern delta_result
+create_delta(const struct delta_index *index,
+ const void *buf, unsigned long bufsize,
+ unsigned long *delta_size, unsigned long max_delta_size,
+ void **delta_data);
+
+/* the smallest possible delta size is 3 bytes
+ * Target size, Copy command, Copy length
+ */
+#define DELTA_SIZE_MIN 3
+
+/*
+ * This must be called twice on the delta data buffer, first to get the
+ * expected source buffer size, and again to get the target buffer size.
+ */
+static unsigned long
+get_delta_hdr_size(unsigned char **datap, const unsigned char *top)
+{
+ unsigned char *data = *datap;
+ unsigned char cmd;
+ unsigned long size = 0;
+ int i = 0;
+ do {
+ cmd = *data++;
+ size |= (cmd & ~0x80) << i;
+ i += 7;
+ } while (cmd & 0x80 && data < top);
+ *datap = data;
+ return size;
+}
+
+/*
+ * Return the basic information about a given delta index.
+ * :param index: The delta_index object
+ * :param pos: The offset in the entry list. Start at 0, and walk until you get
+ * 0 as a return code.
+ * :param global_offset: return value, distance to the beginning of all sources
+ * :param hash_val: return value, the RABIN hash associated with this pointer
+ * :param hash_offset: Location for this entry in the hash array.
+ * :return: 1 if pos != -1 (there was data produced)
+ */
+extern int
+get_entry_summary(const struct delta_index *index, int pos,
+ unsigned int *text_offset, unsigned int *hash_val);
+
+/*
+ * Determine what entry index->hash[X] points to.
+ */
+extern int
+get_hash_offset(const struct delta_index *index, int pos,
+ unsigned int *entry_offset);
+
+/*
+ * Compute the rabin_hash of the given data, it is assumed the data is at least
+ * RABIN_WINDOW wide (16 bytes).
+ */
+extern unsigned int
+rabin_hash(const unsigned char *data);
+
+#endif
diff --git a/bzrlib/delta.py b/bzrlib/delta.py
new file mode 100644
index 0000000..ba54fcc
--- /dev/null
+++ b/bzrlib/delta.py
@@ -0,0 +1,455 @@
+# Copyright (C) 2005-2010 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+from __future__ import absolute_import
+
+from bzrlib import (
+ osutils,
+ )
+from bzrlib.trace import is_quiet
+
+
+class TreeDelta(object):
+ """Describes changes from one tree to another.
+
+ Contains seven lists:
+
+ added
+ (path, id, kind)
+ removed
+ (path, id, kind)
+ renamed
+ (oldpath, newpath, id, kind, text_modified, meta_modified)
+ kind_changed
+ (path, id, old_kind, new_kind)
+ modified
+ (path, id, kind, text_modified, meta_modified)
+ unchanged
+ (path, id, kind)
+ unversioned
+ (path, None, kind)
+
+ Each id is listed only once.
+
+ Files that are both modified and renamed are listed only in
+ renamed, with the text_modified flag true. The text_modified
+ applies either to the content of the file or the target of the
+ symbolic link, depending of the kind of file.
+
+ Files are only considered renamed if their name has changed or
+ their parent directory has changed. Renaming a directory
+ does not count as renaming all its contents.
+
+ The lists are normally sorted when the delta is created.
+ """
+ def __init__(self):
+ self.added = []
+ self.removed = []
+ self.renamed = []
+ self.kind_changed = []
+ self.modified = []
+ self.unchanged = []
+ self.unversioned = []
+ self.missing = []
+
+ def __eq__(self, other):
+ if not isinstance(other, TreeDelta):
+ return False
+ return self.added == other.added \
+ and self.removed == other.removed \
+ and self.renamed == other.renamed \
+ and self.modified == other.modified \
+ and self.unchanged == other.unchanged \
+ and self.kind_changed == other.kind_changed \
+ and self.unversioned == other.unversioned
+
+ def __ne__(self, other):
+ return not (self == other)
+
+ def __repr__(self):
+ return "TreeDelta(added=%r, removed=%r, renamed=%r," \
+ " kind_changed=%r, modified=%r, unchanged=%r," \
+ " unversioned=%r)" % (self.added,
+ self.removed, self.renamed, self.kind_changed, self.modified,
+ self.unchanged, self.unversioned)
+
+ def has_changed(self):
+ return bool(self.modified
+ or self.added
+ or self.removed
+ or self.renamed
+ or self.kind_changed)
+
+ def touches_file_id(self, file_id):
+ """Return True if file_id is modified by this delta."""
+ for l in self.added, self.removed, self.modified:
+ for v in l:
+ if v[1] == file_id:
+ return True
+ for v in self.renamed:
+ if v[2] == file_id:
+ return True
+ for v in self.kind_changed:
+ if v[1] == file_id:
+ return True
+ return False
+
+ def get_changes_as_text(self, show_ids=False, show_unchanged=False,
+ short_status=False):
+ import StringIO
+ output = StringIO.StringIO()
+ report_delta(output, self, short_status, show_ids, show_unchanged)
+ return output.getvalue()
+
+
+def _compare_trees(old_tree, new_tree, want_unchanged, specific_files,
+ include_root, extra_trees=None,
+ require_versioned=False, want_unversioned=False):
+ """Worker function that implements Tree.changes_from."""
+ delta = TreeDelta()
+ # mutter('start compare_trees')
+
+ for (file_id, path, content_change, versioned, parent_id, name, kind,
+ executable) in new_tree.iter_changes(old_tree, want_unchanged,
+ specific_files, extra_trees=extra_trees,
+ require_versioned=require_versioned,
+ want_unversioned=want_unversioned):
+ if versioned == (False, False):
+ delta.unversioned.append((path[1], None, kind[1]))
+ continue
+ if not include_root and (None, None) == parent_id:
+ continue
+ fully_present = tuple((versioned[x] and kind[x] is not None) for
+ x in range(2))
+ if fully_present[0] != fully_present[1]:
+ if fully_present[1] is True:
+ delta.added.append((path[1], file_id, kind[1]))
+ else:
+ delta.removed.append((path[0], file_id, kind[0]))
+ elif fully_present[0] is False:
+ delta.missing.append((path[1], file_id, kind[1]))
+ elif name[0] != name[1] or parent_id[0] != parent_id[1]:
+ # If the name changes, or the parent_id changes, we have a rename
+ # (if we move a parent, that doesn't count as a rename for the
+ # file)
+ delta.renamed.append((path[0],
+ path[1],
+ file_id,
+ kind[1],
+ content_change,
+ (executable[0] != executable[1])))
+ elif kind[0] != kind[1]:
+ delta.kind_changed.append((path[1], file_id, kind[0], kind[1]))
+ elif content_change or executable[0] != executable[1]:
+ delta.modified.append((path[1], file_id, kind[1],
+ content_change,
+ (executable[0] != executable[1])))
+ else:
+ delta.unchanged.append((path[1], file_id, kind[1]))
+
+ delta.removed.sort()
+ delta.added.sort()
+ delta.renamed.sort()
+ delta.missing.sort()
+ # TODO: jam 20060529 These lists shouldn't need to be sorted
+ # since we added them in alphabetical order.
+ delta.modified.sort()
+ delta.unchanged.sort()
+
+ return delta
+
+
+class _ChangeReporter(object):
+ """Report changes between two trees"""
+
+ def __init__(self, output=None, suppress_root_add=True,
+ output_file=None, unversioned_filter=None, view_info=None,
+ classify=True):
+ """Constructor
+
+ :param output: a function with the signature of trace.note, i.e.
+ accepts a format and parameters.
+ :param supress_root_add: If true, adding the root will be ignored
+ (i.e. when a tree has just been initted)
+ :param output_file: If supplied, a file-like object to write to.
+ Only one of output and output_file may be supplied.
+ :param unversioned_filter: A filter function to be called on
+ unversioned files. This should return True to ignore a path.
+ By default, no filtering takes place.
+ :param view_info: A tuple of view_name,view_files if only
+ items inside a view are to be reported on, or None for
+ no view filtering.
+ :param classify: Add special symbols to indicate file kind.
+ """
+ if output_file is not None:
+ if output is not None:
+ raise BzrError('Cannot specify both output and output_file')
+ def output(fmt, *args):
+ output_file.write((fmt % args) + '\n')
+ self.output = output
+ if self.output is None:
+ from bzrlib import trace
+ self.output = trace.note
+ self.suppress_root_add = suppress_root_add
+ self.modified_map = {'kind changed': 'K',
+ 'unchanged': ' ',
+ 'created': 'N',
+ 'modified': 'M',
+ 'deleted': 'D',
+ 'missing': '!',
+ }
+ self.versioned_map = {'added': '+', # versioned target
+ 'unchanged': ' ', # versioned in both
+ 'removed': '-', # versioned in source
+ 'unversioned': '?', # versioned in neither
+ }
+ self.unversioned_filter = unversioned_filter
+ if classify:
+ self.kind_marker = osutils.kind_marker
+ else:
+ self.kind_marker = lambda kind: ''
+ if view_info is None:
+ self.view_name = None
+ self.view_files = []
+ else:
+ self.view_name = view_info[0]
+ self.view_files = view_info[1]
+ self.output("Operating on whole tree but only reporting on "
+ "'%s' view." % (self.view_name,))
+
+ def report(self, file_id, paths, versioned, renamed, modified, exe_change,
+ kind):
+ """Report one change to a file
+
+ :param file_id: The file_id of the file
+ :param path: The old and new paths as generated by Tree.iter_changes.
+ :param versioned: may be 'added', 'removed', 'unchanged', or
+ 'unversioned.
+ :param renamed: may be True or False
+ :param modified: may be 'created', 'deleted', 'kind changed',
+ 'modified' or 'unchanged'.
+ :param exe_change: True if the execute bit has changed
+ :param kind: A pair of file kinds, as generated by Tree.iter_changes.
+ None indicates no file present.
+ """
+ if is_quiet():
+ return
+ if paths[1] == '' and versioned == 'added' and self.suppress_root_add:
+ return
+ if self.view_files and not osutils.is_inside_any(self.view_files,
+ paths[1]):
+ return
+ if versioned == 'unversioned':
+ # skip ignored unversioned files if needed.
+ if self.unversioned_filter is not None:
+ if self.unversioned_filter(paths[1]):
+ return
+ # dont show a content change in the output.
+ modified = 'unchanged'
+ # we show both paths in the following situations:
+ # the file versioning is unchanged AND
+ # ( the path is different OR
+ # the kind is different)
+ if (versioned == 'unchanged' and
+ (renamed or modified == 'kind changed')):
+ if renamed:
+ # on a rename, we show old and new
+ old_path, path = paths
+ else:
+ # if it's not renamed, we're showing both for kind changes
+ # so only show the new path
+ old_path, path = paths[1], paths[1]
+ # if the file is not missing in the source, we show its kind
+ # when we show two paths.
+ if kind[0] is not None:
+ old_path += self.kind_marker(kind[0])
+ old_path += " => "
+ elif versioned == 'removed':
+ # not present in target
+ old_path = ""
+ path = paths[0]
+ else:
+ old_path = ""
+ path = paths[1]
+ if renamed:
+ rename = "R"
+ else:
+ rename = self.versioned_map[versioned]
+ # we show the old kind on the new path when the content is deleted.
+ if modified == 'deleted':
+ path += self.kind_marker(kind[0])
+ # otherwise we always show the current kind when there is one
+ elif kind[1] is not None:
+ path += self.kind_marker(kind[1])
+ if exe_change:
+ exe = '*'
+ else:
+ exe = ' '
+ self.output("%s%s%s %s%s", rename, self.modified_map[modified], exe,
+ old_path, path)
+
+def report_changes(change_iterator, reporter):
+ """Report the changes from a change iterator.
+
+ This is essentially a translation from low-level to medium-level changes.
+ Further processing may be required to produce a human-readable output.
+ Unfortunately, some tree-changing operations are very complex
+ :change_iterator: an iterator or sequence of changes in the format
+ generated by Tree.iter_changes
+ :param reporter: The _ChangeReporter that will report the changes.
+ """
+ versioned_change_map = {
+ (True, True) : 'unchanged',
+ (True, False) : 'removed',
+ (False, True) : 'added',
+ (False, False): 'unversioned',
+ }
+ for (file_id, path, content_change, versioned, parent_id, name, kind,
+ executable) in change_iterator:
+ exe_change = False
+ # files are "renamed" if they are moved or if name changes, as long
+ # as it had a value
+ if None not in name and None not in parent_id and\
+ (name[0] != name[1] or parent_id[0] != parent_id[1]):
+ renamed = True
+ else:
+ renamed = False
+ if kind[0] != kind[1]:
+ if kind[0] is None:
+ modified = "created"
+ elif kind[1] is None:
+ modified = "deleted"
+ else:
+ modified = "kind changed"
+ else:
+ if content_change:
+ modified = "modified"
+ elif kind[0] is None:
+ modified = "missing"
+ else:
+ modified = "unchanged"
+ if kind[1] == "file":
+ exe_change = (executable[0] != executable[1])
+ versioned_change = versioned_change_map[versioned]
+ reporter.report(file_id, path, versioned_change, renamed, modified,
+ exe_change, kind)
+
+def report_delta(to_file, delta, short_status=False, show_ids=False,
+ show_unchanged=False, indent='', filter=None, classify=True):
+ """Output this delta in status-like form to to_file.
+
+ :param to_file: A file-like object where the output is displayed.
+
+ :param delta: A TreeDelta containing the changes to be displayed
+
+ :param short_status: Single-line status if True.
+
+ :param show_ids: Output the file ids if True.
+
+ :param show_unchanged: Output the unchanged files if True.
+
+ :param indent: Added at the beginning of all output lines (for merged
+ revisions).
+
+ :param filter: A callable receiving a path and a file id and
+ returning True if the path should be displayed.
+
+ :param classify: Add special symbols to indicate file kind.
+ """
+
+ def decorate_path(path, kind, meta_modified=None):
+ if not classify:
+ return path
+ if kind == 'directory':
+ path += '/'
+ elif kind == 'symlink':
+ path += '@'
+ if meta_modified:
+ path += '*'
+ return path
+
+ def show_more_renamed(item):
+ (oldpath, file_id, kind,
+ text_modified, meta_modified, newpath) = item
+ dec_new_path = decorate_path(newpath, kind, meta_modified)
+ to_file.write(' => %s' % dec_new_path)
+ if text_modified or meta_modified:
+ extra_modified.append((newpath, file_id, kind,
+ text_modified, meta_modified))
+
+ def show_more_kind_changed(item):
+ (path, file_id, old_kind, new_kind) = item
+ to_file.write(' (%s => %s)' % (old_kind, new_kind))
+
+ def show_path(path, file_id, kind, meta_modified,
+ default_format, with_file_id_format):
+ dec_path = decorate_path(path, kind, meta_modified)
+ if show_ids:
+ to_file.write(with_file_id_format % dec_path)
+ else:
+ to_file.write(default_format % dec_path)
+
+ def show_list(files, long_status_name, short_status_letter,
+ default_format='%s', with_file_id_format='%-30s',
+ show_more=None):
+ if files:
+ header_shown = False
+ if short_status:
+ prefix = short_status_letter
+ else:
+ prefix = ''
+ prefix = indent + prefix + ' '
+
+ for item in files:
+ path, file_id, kind = item[:3]
+ if (filter is not None and not filter(path, file_id)):
+ continue
+ if not header_shown and not short_status:
+ to_file.write(indent + long_status_name + ':\n')
+ header_shown = True
+ meta_modified = None
+ if len(item) == 5:
+ meta_modified = item[4]
+
+ to_file.write(prefix)
+ show_path(path, file_id, kind, meta_modified,
+ default_format, with_file_id_format)
+ if show_more is not None:
+ show_more(item)
+ if show_ids:
+ to_file.write(' %s' % file_id)
+ to_file.write('\n')
+
+ show_list(delta.removed, 'removed', 'D')
+ show_list(delta.added, 'added', 'A')
+ show_list(delta.missing, 'missing', '!')
+ extra_modified = []
+ # Reorder delta.renamed tuples so that all lists share the same
+ # order for their 3 first fields and that they also begin like
+ # the delta.modified tuples
+ renamed = [(p, i, k, tm, mm, np)
+ for p, np, i, k, tm, mm in delta.renamed]
+ show_list(renamed, 'renamed', 'R', with_file_id_format='%s',
+ show_more=show_more_renamed)
+ show_list(delta.kind_changed, 'kind changed', 'K',
+ with_file_id_format='%s',
+ show_more=show_more_kind_changed)
+ show_list(delta.modified + extra_modified, 'modified', 'M')
+ if show_unchanged:
+ show_list(delta.unchanged, 'unchanged', 'S')
+
+ show_list(delta.unversioned, 'unknown', ' ')
+
diff --git a/bzrlib/diff-delta.c b/bzrlib/diff-delta.c
new file mode 100644
index 0000000..0801c96
--- /dev/null
+++ b/bzrlib/diff-delta.c
@@ -0,0 +1,1188 @@
+/*
+ * diff-delta.c: generate a delta between two buffers
+ *
+ * This code was greatly inspired by parts of LibXDiff from Davide Libenzi
+ * http://www.xmailserver.org/xdiff-lib.html
+ *
+ * Rewritten for GIT by Nicolas Pitre <nico@fluxnic.net>, (C) 2005-2007
+ * Adapted for Bazaar by John Arbash Meinel <john@arbash-meinel.com> (C) 2009
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * NB: The version in GIT is 'version 2 of the Licence only', however Nicolas
+ * has granted permission for use under 'version 2 or later' in private email
+ * to Robert Collins and Karl Fogel on the 6th April 2009.
+ */
+
+#include <stdio.h>
+
+#include "delta.h"
+#include <stdlib.h>
+#include <string.h>
+#include <assert.h>
+
+/* maximum hash entry list for the same hash bucket */
+#define HASH_LIMIT 64
+
+#define RABIN_SHIFT 23
+#define RABIN_WINDOW 16
+
+/* The hash map is sized to put 4 entries per bucket, this gives us ~even room
+ * for more data. Tweaking this number above 4 doesn't seem to help much,
+ * anyway.
+ */
+#define EXTRA_NULLS 4
+
+static const unsigned int T[256] = {
+ 0x00000000, 0xab59b4d1, 0x56b369a2, 0xfdeadd73, 0x063f6795, 0xad66d344,
+ 0x508c0e37, 0xfbd5bae6, 0x0c7ecf2a, 0xa7277bfb, 0x5acda688, 0xf1941259,
+ 0x0a41a8bf, 0xa1181c6e, 0x5cf2c11d, 0xf7ab75cc, 0x18fd9e54, 0xb3a42a85,
+ 0x4e4ef7f6, 0xe5174327, 0x1ec2f9c1, 0xb59b4d10, 0x48719063, 0xe32824b2,
+ 0x1483517e, 0xbfdae5af, 0x423038dc, 0xe9698c0d, 0x12bc36eb, 0xb9e5823a,
+ 0x440f5f49, 0xef56eb98, 0x31fb3ca8, 0x9aa28879, 0x6748550a, 0xcc11e1db,
+ 0x37c45b3d, 0x9c9defec, 0x6177329f, 0xca2e864e, 0x3d85f382, 0x96dc4753,
+ 0x6b369a20, 0xc06f2ef1, 0x3bba9417, 0x90e320c6, 0x6d09fdb5, 0xc6504964,
+ 0x2906a2fc, 0x825f162d, 0x7fb5cb5e, 0xd4ec7f8f, 0x2f39c569, 0x846071b8,
+ 0x798aaccb, 0xd2d3181a, 0x25786dd6, 0x8e21d907, 0x73cb0474, 0xd892b0a5,
+ 0x23470a43, 0x881ebe92, 0x75f463e1, 0xdeadd730, 0x63f67950, 0xc8afcd81,
+ 0x354510f2, 0x9e1ca423, 0x65c91ec5, 0xce90aa14, 0x337a7767, 0x9823c3b6,
+ 0x6f88b67a, 0xc4d102ab, 0x393bdfd8, 0x92626b09, 0x69b7d1ef, 0xc2ee653e,
+ 0x3f04b84d, 0x945d0c9c, 0x7b0be704, 0xd05253d5, 0x2db88ea6, 0x86e13a77,
+ 0x7d348091, 0xd66d3440, 0x2b87e933, 0x80de5de2, 0x7775282e, 0xdc2c9cff,
+ 0x21c6418c, 0x8a9ff55d, 0x714a4fbb, 0xda13fb6a, 0x27f92619, 0x8ca092c8,
+ 0x520d45f8, 0xf954f129, 0x04be2c5a, 0xafe7988b, 0x5432226d, 0xff6b96bc,
+ 0x02814bcf, 0xa9d8ff1e, 0x5e738ad2, 0xf52a3e03, 0x08c0e370, 0xa39957a1,
+ 0x584ced47, 0xf3155996, 0x0eff84e5, 0xa5a63034, 0x4af0dbac, 0xe1a96f7d,
+ 0x1c43b20e, 0xb71a06df, 0x4ccfbc39, 0xe79608e8, 0x1a7cd59b, 0xb125614a,
+ 0x468e1486, 0xedd7a057, 0x103d7d24, 0xbb64c9f5, 0x40b17313, 0xebe8c7c2,
+ 0x16021ab1, 0xbd5bae60, 0x6cb54671, 0xc7ecf2a0, 0x3a062fd3, 0x915f9b02,
+ 0x6a8a21e4, 0xc1d39535, 0x3c394846, 0x9760fc97, 0x60cb895b, 0xcb923d8a,
+ 0x3678e0f9, 0x9d215428, 0x66f4eece, 0xcdad5a1f, 0x3047876c, 0x9b1e33bd,
+ 0x7448d825, 0xdf116cf4, 0x22fbb187, 0x89a20556, 0x7277bfb0, 0xd92e0b61,
+ 0x24c4d612, 0x8f9d62c3, 0x7836170f, 0xd36fa3de, 0x2e857ead, 0x85dcca7c,
+ 0x7e09709a, 0xd550c44b, 0x28ba1938, 0x83e3ade9, 0x5d4e7ad9, 0xf617ce08,
+ 0x0bfd137b, 0xa0a4a7aa, 0x5b711d4c, 0xf028a99d, 0x0dc274ee, 0xa69bc03f,
+ 0x5130b5f3, 0xfa690122, 0x0783dc51, 0xacda6880, 0x570fd266, 0xfc5666b7,
+ 0x01bcbbc4, 0xaae50f15, 0x45b3e48d, 0xeeea505c, 0x13008d2f, 0xb85939fe,
+ 0x438c8318, 0xe8d537c9, 0x153feaba, 0xbe665e6b, 0x49cd2ba7, 0xe2949f76,
+ 0x1f7e4205, 0xb427f6d4, 0x4ff24c32, 0xe4abf8e3, 0x19412590, 0xb2189141,
+ 0x0f433f21, 0xa41a8bf0, 0x59f05683, 0xf2a9e252, 0x097c58b4, 0xa225ec65,
+ 0x5fcf3116, 0xf49685c7, 0x033df00b, 0xa86444da, 0x558e99a9, 0xfed72d78,
+ 0x0502979e, 0xae5b234f, 0x53b1fe3c, 0xf8e84aed, 0x17bea175, 0xbce715a4,
+ 0x410dc8d7, 0xea547c06, 0x1181c6e0, 0xbad87231, 0x4732af42, 0xec6b1b93,
+ 0x1bc06e5f, 0xb099da8e, 0x4d7307fd, 0xe62ab32c, 0x1dff09ca, 0xb6a6bd1b,
+ 0x4b4c6068, 0xe015d4b9, 0x3eb80389, 0x95e1b758, 0x680b6a2b, 0xc352defa,
+ 0x3887641c, 0x93ded0cd, 0x6e340dbe, 0xc56db96f, 0x32c6cca3, 0x999f7872,
+ 0x6475a501, 0xcf2c11d0, 0x34f9ab36, 0x9fa01fe7, 0x624ac294, 0xc9137645,
+ 0x26459ddd, 0x8d1c290c, 0x70f6f47f, 0xdbaf40ae, 0x207afa48, 0x8b234e99,
+ 0x76c993ea, 0xdd90273b, 0x2a3b52f7, 0x8162e626, 0x7c883b55, 0xd7d18f84,
+ 0x2c043562, 0x875d81b3, 0x7ab75cc0, 0xd1eee811
+};
+
+static const unsigned int U[256] = {
+ 0x00000000, 0x7eb5200d, 0x5633f4cb, 0x2886d4c6, 0x073e5d47, 0x798b7d4a,
+ 0x510da98c, 0x2fb88981, 0x0e7cba8e, 0x70c99a83, 0x584f4e45, 0x26fa6e48,
+ 0x0942e7c9, 0x77f7c7c4, 0x5f711302, 0x21c4330f, 0x1cf9751c, 0x624c5511,
+ 0x4aca81d7, 0x347fa1da, 0x1bc7285b, 0x65720856, 0x4df4dc90, 0x3341fc9d,
+ 0x1285cf92, 0x6c30ef9f, 0x44b63b59, 0x3a031b54, 0x15bb92d5, 0x6b0eb2d8,
+ 0x4388661e, 0x3d3d4613, 0x39f2ea38, 0x4747ca35, 0x6fc11ef3, 0x11743efe,
+ 0x3eccb77f, 0x40799772, 0x68ff43b4, 0x164a63b9, 0x378e50b6, 0x493b70bb,
+ 0x61bda47d, 0x1f088470, 0x30b00df1, 0x4e052dfc, 0x6683f93a, 0x1836d937,
+ 0x250b9f24, 0x5bbebf29, 0x73386bef, 0x0d8d4be2, 0x2235c263, 0x5c80e26e,
+ 0x740636a8, 0x0ab316a5, 0x2b7725aa, 0x55c205a7, 0x7d44d161, 0x03f1f16c,
+ 0x2c4978ed, 0x52fc58e0, 0x7a7a8c26, 0x04cfac2b, 0x73e5d470, 0x0d50f47d,
+ 0x25d620bb, 0x5b6300b6, 0x74db8937, 0x0a6ea93a, 0x22e87dfc, 0x5c5d5df1,
+ 0x7d996efe, 0x032c4ef3, 0x2baa9a35, 0x551fba38, 0x7aa733b9, 0x041213b4,
+ 0x2c94c772, 0x5221e77f, 0x6f1ca16c, 0x11a98161, 0x392f55a7, 0x479a75aa,
+ 0x6822fc2b, 0x1697dc26, 0x3e1108e0, 0x40a428ed, 0x61601be2, 0x1fd53bef,
+ 0x3753ef29, 0x49e6cf24, 0x665e46a5, 0x18eb66a8, 0x306db26e, 0x4ed89263,
+ 0x4a173e48, 0x34a21e45, 0x1c24ca83, 0x6291ea8e, 0x4d29630f, 0x339c4302,
+ 0x1b1a97c4, 0x65afb7c9, 0x446b84c6, 0x3adea4cb, 0x1258700d, 0x6ced5000,
+ 0x4355d981, 0x3de0f98c, 0x15662d4a, 0x6bd30d47, 0x56ee4b54, 0x285b6b59,
+ 0x00ddbf9f, 0x7e689f92, 0x51d01613, 0x2f65361e, 0x07e3e2d8, 0x7956c2d5,
+ 0x5892f1da, 0x2627d1d7, 0x0ea10511, 0x7014251c, 0x5facac9d, 0x21198c90,
+ 0x099f5856, 0x772a785b, 0x4c921c31, 0x32273c3c, 0x1aa1e8fa, 0x6414c8f7,
+ 0x4bac4176, 0x3519617b, 0x1d9fb5bd, 0x632a95b0, 0x42eea6bf, 0x3c5b86b2,
+ 0x14dd5274, 0x6a687279, 0x45d0fbf8, 0x3b65dbf5, 0x13e30f33, 0x6d562f3e,
+ 0x506b692d, 0x2ede4920, 0x06589de6, 0x78edbdeb, 0x5755346a, 0x29e01467,
+ 0x0166c0a1, 0x7fd3e0ac, 0x5e17d3a3, 0x20a2f3ae, 0x08242768, 0x76910765,
+ 0x59298ee4, 0x279caee9, 0x0f1a7a2f, 0x71af5a22, 0x7560f609, 0x0bd5d604,
+ 0x235302c2, 0x5de622cf, 0x725eab4e, 0x0ceb8b43, 0x246d5f85, 0x5ad87f88,
+ 0x7b1c4c87, 0x05a96c8a, 0x2d2fb84c, 0x539a9841, 0x7c2211c0, 0x029731cd,
+ 0x2a11e50b, 0x54a4c506, 0x69998315, 0x172ca318, 0x3faa77de, 0x411f57d3,
+ 0x6ea7de52, 0x1012fe5f, 0x38942a99, 0x46210a94, 0x67e5399b, 0x19501996,
+ 0x31d6cd50, 0x4f63ed5d, 0x60db64dc, 0x1e6e44d1, 0x36e89017, 0x485db01a,
+ 0x3f77c841, 0x41c2e84c, 0x69443c8a, 0x17f11c87, 0x38499506, 0x46fcb50b,
+ 0x6e7a61cd, 0x10cf41c0, 0x310b72cf, 0x4fbe52c2, 0x67388604, 0x198da609,
+ 0x36352f88, 0x48800f85, 0x6006db43, 0x1eb3fb4e, 0x238ebd5d, 0x5d3b9d50,
+ 0x75bd4996, 0x0b08699b, 0x24b0e01a, 0x5a05c017, 0x728314d1, 0x0c3634dc,
+ 0x2df207d3, 0x534727de, 0x7bc1f318, 0x0574d315, 0x2acc5a94, 0x54797a99,
+ 0x7cffae5f, 0x024a8e52, 0x06852279, 0x78300274, 0x50b6d6b2, 0x2e03f6bf,
+ 0x01bb7f3e, 0x7f0e5f33, 0x57888bf5, 0x293dabf8, 0x08f998f7, 0x764cb8fa,
+ 0x5eca6c3c, 0x207f4c31, 0x0fc7c5b0, 0x7172e5bd, 0x59f4317b, 0x27411176,
+ 0x1a7c5765, 0x64c97768, 0x4c4fa3ae, 0x32fa83a3, 0x1d420a22, 0x63f72a2f,
+ 0x4b71fee9, 0x35c4dee4, 0x1400edeb, 0x6ab5cde6, 0x42331920, 0x3c86392d,
+ 0x133eb0ac, 0x6d8b90a1, 0x450d4467, 0x3bb8646a
+};
+
+struct index_entry {
+ const unsigned char *ptr;
+ const struct source_info *src;
+ unsigned int val;
+};
+
+struct index_entry_linked_list {
+ struct index_entry *p_entry;
+ struct index_entry_linked_list *next;
+};
+
+struct unpacked_index_entry {
+ struct index_entry entry;
+ struct unpacked_index_entry *next;
+};
+
+struct delta_index {
+ unsigned long memsize; /* Total bytes pointed to by this index */
+ const struct source_info *last_src; /* Information about the referenced source */
+ unsigned int hash_mask; /* val & hash_mask gives the hash index for a given
+ entry */
+ unsigned int num_entries; /* The total number of entries in this index */
+ struct index_entry *last_entry; /* Pointer to the last valid entry */
+ struct index_entry *hash[];
+};
+
+static unsigned int
+limit_hash_buckets(struct unpacked_index_entry **hash,
+ unsigned int *hash_count, unsigned int hsize,
+ unsigned int entries)
+{
+ struct unpacked_index_entry *entry;
+ unsigned int i;
+ /*
+ * Determine a limit on the number of entries in the same hash
+ * bucket. This guards us against pathological data sets causing
+ * really bad hash distribution with most entries in the same hash
+ * bucket that would bring us to O(m*n) computing costs (m and n
+ * corresponding to reference and target buffer sizes).
+ *
+ * Make sure none of the hash buckets has more entries than
+ * we're willing to test. Otherwise we cull the entry list
+ * uniformly to still preserve a good repartition across
+ * the reference buffer.
+ */
+ for (i = 0; i < hsize; i++) {
+ int acc;
+
+ if (hash_count[i] <= HASH_LIMIT)
+ continue;
+
+ /* We leave exactly HASH_LIMIT entries in the bucket */
+ entries -= hash_count[i] - HASH_LIMIT;
+
+ entry = hash[i];
+ acc = 0;
+
+ /*
+ * Assume that this loop is gone through exactly
+ * HASH_LIMIT times and is entered and left with
+ * acc==0. So the first statement in the loop
+ * contributes (hash_count[i]-HASH_LIMIT)*HASH_LIMIT
+ * to the accumulator, and the inner loop consequently
+ * is run (hash_count[i]-HASH_LIMIT) times, removing
+ * one element from the list each time. Since acc
+ * balances out to 0 at the final run, the inner loop
+ * body can't be left with entry==NULL. So we indeed
+ * encounter entry==NULL in the outer loop only.
+ */
+ do {
+ acc += hash_count[i] - HASH_LIMIT;
+ if (acc > 0) {
+ struct unpacked_index_entry *keep = entry;
+ do {
+ entry = entry->next;
+ acc -= HASH_LIMIT;
+ } while (acc > 0);
+ keep->next = entry->next;
+ }
+ entry = entry->next;
+ } while (entry);
+ }
+ return entries;
+}
+
+static struct delta_index *
+pack_delta_index(struct unpacked_index_entry **hash, unsigned int hsize,
+ unsigned int num_entries, struct delta_index *old_index)
+{
+ unsigned int i, j, hmask, memsize, fit_in_old, copied_count;
+ struct unpacked_index_entry *entry;
+ struct delta_index *index;
+ struct index_entry *packed_entry, **packed_hash, *old_entry, *copy_from;
+ struct index_entry null_entry = {0};
+ void *mem;
+
+ hmask = hsize - 1;
+
+ // if (old_index) {
+ // fprintf(stderr, "Packing %d entries into %d for total of %d entries"
+ // " %x => %x\n",
+ // num_entries - old_index->num_entries,
+ // old_index->num_entries, num_entries,
+ // old_index->hash_mask, hmask);
+ // } else {
+ // fprintf(stderr, "Packing %d entries into a new index\n",
+ // num_entries);
+ // }
+ /* First, see if we can squeeze the new items into the existing structure.
+ */
+ fit_in_old = 0;
+ copied_count = 0;
+ if (old_index && old_index->hash_mask == hmask) {
+ fit_in_old = 1;
+ for (i = 0; i < hsize; ++i) {
+ packed_entry = NULL;
+ for (entry = hash[i]; entry; entry = entry->next) {
+ if (packed_entry == NULL) {
+ /* Find the last open spot */
+ packed_entry = old_index->hash[i + 1];
+ --packed_entry;
+ while (packed_entry >= old_index->hash[i]
+ && packed_entry->ptr == NULL) {
+ --packed_entry;
+ }
+ ++packed_entry;
+ }
+ if (packed_entry >= old_index->hash[i+1]
+ || packed_entry->ptr != NULL) {
+ /* There are no free spots here :( */
+ fit_in_old = 0;
+ break;
+ }
+ /* We found an empty spot to put this entry
+ * Copy it over, and remove it from the linked list, just in
+ * case we end up running out of room later.
+ */
+ *packed_entry++ = entry->entry;
+ assert(entry == hash[i]);
+ hash[i] = entry->next;
+ copied_count += 1;
+ old_index->num_entries++;
+ }
+ if (!fit_in_old) {
+ break;
+ }
+ }
+ }
+ if (old_index) {
+ if (fit_in_old) {
+ // fprintf(stderr, "Fit all %d entries into old index\n",
+ // copied_count);
+ /*
+ * No need to allocate a new buffer, but return old_index ptr so
+ * callers can distinguish this from an OOM failure.
+ */
+ return old_index;
+ } else {
+ // fprintf(stderr, "Fit only %d entries into old index,"
+ // " reallocating\n", copied_count);
+ }
+ }
+ /*
+ * Now create the packed index in array form
+ * rather than linked lists.
+ * Leave a 2-entry gap for inserting more entries between the groups
+ */
+ memsize = sizeof(*index)
+ + sizeof(*packed_hash) * (hsize+1)
+ + sizeof(*packed_entry) * (num_entries + hsize * EXTRA_NULLS);
+ mem = malloc(memsize);
+ if (!mem) {
+ return NULL;
+ }
+
+ index = mem;
+ index->memsize = memsize;
+ index->hash_mask = hmask;
+ index->num_entries = num_entries;
+ if (old_index) {
+ if (hmask < old_index->hash_mask) {
+ fprintf(stderr, "hash mask was shrunk %x => %x\n",
+ old_index->hash_mask, hmask);
+ }
+ assert(hmask >= old_index->hash_mask);
+ }
+
+ mem = index->hash;
+ packed_hash = mem;
+ mem = packed_hash + (hsize+1);
+ packed_entry = mem;
+
+ for (i = 0; i < hsize; i++) {
+ /*
+ * Coalesce all entries belonging to one linked list
+ * into consecutive array entries.
+ */
+ packed_hash[i] = packed_entry;
+ /* Old comes earlier as a source, so it always comes first in a given
+ * hash bucket.
+ */
+ if (old_index) {
+ /* Could we optimize this to use memcpy when hmask ==
+ * old_index->hash_mask? Would it make any real difference?
+ */
+ j = i & old_index->hash_mask;
+ copy_from = old_index->hash[j];
+ for (old_entry = old_index->hash[j];
+ old_entry < old_index->hash[j + 1] && old_entry->ptr != NULL;
+ old_entry++) {
+ if ((old_entry->val & hmask) == i) {
+ *packed_entry++ = *old_entry;
+ }
+ }
+ }
+ for (entry = hash[i]; entry; entry = entry->next) {
+ *packed_entry++ = entry->entry;
+ }
+ /* TODO: At this point packed_entry - packed_hash[i] is the number of
+ * records that we have inserted into this hash bucket.
+ * We should *really* consider doing some limiting along the
+ * lines of limit_hash_buckets() to avoid pathological behavior.
+ */
+ /* Now add extra 'NULL' entries that we can use for future expansion. */
+ for (j = 0; j < EXTRA_NULLS; ++j ) {
+ *packed_entry++ = null_entry;
+ }
+ }
+
+ /* Sentinel value to indicate the length of the last hash bucket */
+ packed_hash[hsize] = packed_entry;
+
+ if (packed_entry - (struct index_entry *)mem
+ != num_entries + hsize*EXTRA_NULLS) {
+ fprintf(stderr, "We expected %d entries, but created %d\n",
+ num_entries + hsize*EXTRA_NULLS,
+ (int)(packed_entry - (struct index_entry*)mem));
+ }
+ assert(packed_entry - (struct index_entry *)mem
+ == num_entries + hsize*EXTRA_NULLS);
+ index->last_entry = (packed_entry - 1);
+ return index;
+}
+
+
+delta_result
+create_delta_index(const struct source_info *src,
+ struct delta_index *old,
+ struct delta_index **fresh,
+ int max_bytes_to_index)
+{
+ unsigned int i, hsize, hmask, num_entries, prev_val, *hash_count;
+ unsigned int total_num_entries, stride, max_entries;
+ const unsigned char *data, *buffer;
+ struct delta_index *index;
+ struct unpacked_index_entry *entry, **hash;
+ void *mem;
+ unsigned long memsize;
+
+ if (!src->buf || !src->size)
+ return DELTA_SOURCE_EMPTY;
+ buffer = src->buf;
+
+ /* Determine index hash size. Note that indexing skips the
+ first byte so we subtract 1 to get the edge cases right.
+ */
+ stride = RABIN_WINDOW;
+ num_entries = (src->size - 1) / RABIN_WINDOW;
+ if (max_bytes_to_index > 0) {
+ max_entries = (unsigned int) (max_bytes_to_index / RABIN_WINDOW);
+ if (num_entries > max_entries) {
+ /* Limit the max number of matching entries. This reduces the 'best'
+ * possible match, but means we don't consume all of ram.
+ */
+ num_entries = max_entries;
+ stride = (src->size - 1) / num_entries;
+ }
+ }
+ if (old != NULL)
+ total_num_entries = num_entries + old->num_entries;
+ else
+ total_num_entries = num_entries;
+ hsize = total_num_entries / 4;
+ for (i = 4; (1u << i) < hsize && i < 31; i++);
+ hsize = 1 << i;
+ hmask = hsize - 1;
+ if (old && old->hash_mask > hmask) {
+ hmask = old->hash_mask;
+ hsize = hmask + 1;
+ }
+
+ /* allocate lookup index */
+ memsize = sizeof(*hash) * hsize +
+ sizeof(*entry) * total_num_entries;
+ mem = malloc(memsize);
+ if (!mem)
+ return DELTA_OUT_OF_MEMORY;
+ hash = mem;
+ mem = hash + hsize;
+ entry = mem;
+
+ memset(hash, 0, hsize * sizeof(*hash));
+
+ /* allocate an array to count hash num_entries */
+ hash_count = calloc(hsize, sizeof(*hash_count));
+ if (!hash_count) {
+ free(hash);
+ return DELTA_OUT_OF_MEMORY;
+ }
+
+ /* then populate the index for the new data */
+ prev_val = ~0;
+ for (data = buffer + num_entries * stride - RABIN_WINDOW;
+ data >= buffer;
+ data -= stride) {
+ unsigned int val = 0;
+ for (i = 1; i <= RABIN_WINDOW; i++)
+ val = ((val << 8) | data[i]) ^ T[val >> RABIN_SHIFT];
+ if (val == prev_val) {
+ /* keep the lowest of consecutive identical blocks */
+ entry[-1].entry.ptr = data + RABIN_WINDOW;
+ --num_entries;
+ --total_num_entries;
+ } else {
+ prev_val = val;
+ i = val & hmask;
+ entry->entry.ptr = data + RABIN_WINDOW;
+ entry->entry.val = val;
+ entry->entry.src = src;
+ entry->next = hash[i];
+ hash[i] = entry++;
+ hash_count[i]++;
+ }
+ }
+ /* TODO: It would be nice to limit_hash_buckets at a better time. */
+ total_num_entries = limit_hash_buckets(hash, hash_count, hsize,
+ total_num_entries);
+ free(hash_count);
+ index = pack_delta_index(hash, hsize, total_num_entries, old);
+ free(hash);
+ /* pack_delta_index only returns NULL on malloc failure */
+ if (!index) {
+ return DELTA_OUT_OF_MEMORY;
+ }
+ index->last_src = src;
+ *fresh = index;
+ return DELTA_OK;
+}
+
+/* Take some entries, and put them into a custom hash.
+ * @param entries A list of entries, sorted by position in file
+ * @param num_entries Length of entries
+ * @param out_hsize The maximum size of the hash, the final size will be
+ * returned here
+ */
+struct index_entry_linked_list **
+_put_entries_into_hash(struct index_entry *entries, unsigned int num_entries,
+ unsigned int hsize)
+{
+ unsigned int hash_offset, hmask, memsize;
+ struct index_entry *entry;
+ struct index_entry_linked_list *out_entry, **hash;
+ void *mem;
+
+ hmask = hsize - 1;
+
+ memsize = sizeof(*hash) * hsize +
+ sizeof(*out_entry) * num_entries;
+ mem = malloc(memsize);
+ if (!mem)
+ return NULL;
+ hash = mem;
+ mem = hash + hsize;
+ out_entry = mem;
+
+ memset(hash, 0, sizeof(*hash)*(hsize+1));
+
+ /* We know that entries are in the order we want in the output, but they
+ * aren't "grouped" by hash bucket yet.
+ */
+ for (entry = entries + num_entries - 1; entry >= entries; --entry) {
+ hash_offset = entry->val & hmask;
+ out_entry->p_entry = entry;
+ out_entry->next = hash[hash_offset];
+ /* TODO: Remove entries that have identical vals, or at least filter
+ * the map a little bit.
+ * if (hash[i] != NULL) {
+ * }
+ */
+ hash[hash_offset] = out_entry;
+ ++out_entry;
+ }
+ return hash;
+}
+
+
+struct delta_index *
+create_index_from_old_and_new_entries(const struct delta_index *old_index,
+ struct index_entry *entries,
+ unsigned int num_entries)
+{
+ unsigned int i, j, hsize, hmask, total_num_entries;
+ struct delta_index *index;
+ struct index_entry *entry, *packed_entry, **packed_hash;
+ struct index_entry null_entry = {0};
+ void *mem;
+ unsigned long memsize;
+ struct index_entry_linked_list *unpacked_entry, **mini_hash;
+
+ /* Determine index hash size. Note that indexing skips the
+ first byte to allow for optimizing the Rabin's polynomial
+ initialization in create_delta(). */
+ total_num_entries = num_entries + old_index->num_entries;
+ hsize = total_num_entries / 4;
+ for (i = 4; (1u << i) < hsize && i < 31; i++);
+ hsize = 1 << i;
+ if (hsize < old_index->hash_mask) {
+ /* For some reason, there was a code path that would actually *shrink*
+ * the hash size. This screws with some later code, and in general, I
+ * think it better to make the hash bigger, rather than smaller. So
+ * we'll just force the size here.
+ * Possibly done by create_delta_index running into a
+ * limit_hash_buckets call, that ended up transitioning across a
+ * power-of-2. The cause isn't 100% clear, though.
+ */
+ hsize = old_index->hash_mask + 1;
+ }
+ hmask = hsize - 1;
+ // fprintf(stderr, "resizing index to insert %d entries into array"
+ // " with %d entries: %x => %x\n",
+ // num_entries, old_index->num_entries, old_index->hash_mask, hmask);
+
+ memsize = sizeof(*index)
+ + sizeof(*packed_hash) * (hsize+1)
+ + sizeof(*packed_entry) * (total_num_entries + hsize*EXTRA_NULLS);
+ mem = malloc(memsize);
+ if (!mem) {
+ return NULL;
+ }
+ index = mem;
+ index->memsize = memsize;
+ index->hash_mask = hmask;
+ index->num_entries = total_num_entries;
+ index->last_src = old_index->last_src;
+
+ mem = index->hash;
+ packed_hash = mem;
+ mem = packed_hash + (hsize+1);
+ packed_entry = mem;
+
+ mini_hash = _put_entries_into_hash(entries, num_entries, hsize);
+ if (mini_hash == NULL) {
+ free(index);
+ return NULL;
+ }
+ for (i = 0; i < hsize; i++) {
+ /*
+ * Coalesce all entries belonging in one hash bucket
+ * into consecutive array entries.
+ * The entries in old_index all come before 'entries'.
+ */
+ packed_hash[i] = packed_entry;
+ /* Copy any of the old entries across */
+ /* Would we rather use memcpy? */
+ if (hmask == old_index->hash_mask) {
+ for (entry = old_index->hash[i];
+ entry < old_index->hash[i+1] && entry->ptr != NULL;
+ ++entry) {
+ assert((entry->val & hmask) == i);
+ *packed_entry++ = *entry;
+ }
+ } else {
+ /* If we resized the index from this action, all of the old values
+ * will be found in the previous location, but they will end up
+ * spread across the new locations.
+ */
+ j = i & old_index->hash_mask;
+ for (entry = old_index->hash[j];
+ entry < old_index->hash[j+1] && entry->ptr != NULL;
+ ++entry) {
+ assert((entry->val & old_index->hash_mask) == j);
+ if ((entry->val & hmask) == i) {
+ /* Any entries not picked up here will be picked up on the
+ * next pass.
+ */
+ *packed_entry++ = *entry;
+ }
+ }
+ }
+ /* Now see if we need to insert any of the new entries.
+ * Note that loop ends up O(hsize*num_entries), so we expect that
+ * num_entries is always small.
+ * We also help a little bit by collapsing the entry range when the
+ * endpoints are inserted. However, an alternative would be to build a
+ * quick hash lookup for just the new entries.
+ * Testing shows that this list can easily get up to about 100
+ * entries, the tradeoff is a malloc, 1 pass over the entries, copying
+ * them into a sorted buffer, and a free() when done,
+ */
+ for (unpacked_entry = mini_hash[i];
+ unpacked_entry;
+ unpacked_entry = unpacked_entry->next) {
+ assert((unpacked_entry->p_entry->val & hmask) == i);
+ *packed_entry++ = *(unpacked_entry->p_entry);
+ }
+ /* Now insert some extra nulls */
+ for (j = 0; j < EXTRA_NULLS; ++j) {
+ *packed_entry++ = null_entry;
+ }
+ }
+ free(mini_hash);
+
+ /* Sentinel value to indicate the length of the last hash bucket */
+ packed_hash[hsize] = packed_entry;
+
+ if ((packed_entry - (struct index_entry *)mem)
+ != (total_num_entries + hsize*EXTRA_NULLS)) {
+ fprintf(stderr, "We expected %d entries, but created %d\n",
+ total_num_entries + hsize*EXTRA_NULLS,
+ (int)(packed_entry - (struct index_entry*)mem));
+ fflush(stderr);
+ }
+ assert((packed_entry - (struct index_entry *)mem)
+ == (total_num_entries + hsize * EXTRA_NULLS));
+ index->last_entry = (packed_entry - 1);
+ return index;
+}
+
+
+void
+get_text(char buff[128], const unsigned char *ptr)
+{
+ unsigned int i;
+ const unsigned char *start;
+ unsigned char cmd;
+ start = (ptr-RABIN_WINDOW-1);
+ cmd = *(start);
+ if (cmd < 0x80) {// This is likely to be an insert instruction
+ if (cmd < RABIN_WINDOW) {
+ cmd = RABIN_WINDOW;
+ }
+ } else {
+ /* This was either a copy [should never be] or it
+ * was a longer insert so the insert start happened at 16 more
+ * bytes back.
+ */
+ cmd = RABIN_WINDOW + 1;
+ }
+ if (cmd > 60) {
+ cmd = 60; /* Be friendly to 80char terms */
+ }
+ /* Copy the 1 byte command, and 4 bytes after the insert */
+ cmd += 5;
+ memcpy(buff, start, cmd);
+ buff[cmd] = 0;
+ for (i = 0; i < cmd; ++i) {
+ if (buff[i] == '\n') {
+ buff[i] = 'N';
+ } else if (buff[i] == '\t') {
+ buff[i] = 'T';
+ }
+ }
+}
+
+delta_result
+create_delta_index_from_delta(const struct source_info *src,
+ struct delta_index *old_index,
+ struct delta_index **fresh)
+{
+ unsigned int i, num_entries, max_num_entries, prev_val, num_inserted;
+ unsigned int hash_offset;
+ const unsigned char *data, *buffer, *top;
+ unsigned char cmd;
+ struct delta_index *new_index;
+ struct index_entry *entry, *entries;
+
+ if (!old_index)
+ return DELTA_INDEX_NEEDED;
+ if (!src->buf || !src->size)
+ return DELTA_SOURCE_EMPTY;
+ buffer = src->buf;
+ top = buffer + src->size;
+
+ /* Determine index hash size. Note that indexing skips the
+ first byte to allow for optimizing the Rabin's polynomial
+ initialization in create_delta().
+ This computes the maximum number of entries that could be held. The
+ actual number will be recomputed during processing.
+ */
+
+ max_num_entries = (src->size - 1) / RABIN_WINDOW;
+
+ if (!max_num_entries) {
+ *fresh = old_index;
+ return DELTA_OK;
+ }
+
+ /* allocate an array to hold whatever entries we find */
+ entries = malloc(sizeof(*entry) * max_num_entries);
+ if (!entries) /* malloc failure */
+ return DELTA_OUT_OF_MEMORY;
+
+ /* then populate the index for the new data */
+ prev_val = ~0;
+ data = buffer;
+ /* target size */
+ /* get_delta_hdr_size doesn't mutate the content, just moves the
+ * start-of-data pointer, so it is safe to do the cast.
+ */
+ get_delta_hdr_size((unsigned char**)&data, top);
+ entry = entries; /* start at the first slot */
+ num_entries = 0; /* calculate the real number of entries */
+ while (data < top) {
+ cmd = *data++;
+ if (cmd & 0x80) {
+ /* Copy instruction, skip it */
+ if (cmd & 0x01) data++;
+ if (cmd & 0x02) data++;
+ if (cmd & 0x04) data++;
+ if (cmd & 0x08) data++;
+ if (cmd & 0x10) data++;
+ if (cmd & 0x20) data++;
+ if (cmd & 0x40) data++;
+ } else if (cmd) {
+ /* Insert instruction, we want to index these bytes */
+ if (data + cmd > top) {
+ /* Invalid insert, not enough bytes in the delta */
+ break;
+ }
+ /* The create_delta code requires a match at least 4 characters
+ * (including only the last char of the RABIN_WINDOW) before it
+ * will consider it something worth copying rather than inserting.
+ * So we don't want to index anything that we know won't ever be a
+ * match.
+ */
+ for (; cmd > RABIN_WINDOW + 3; cmd -= RABIN_WINDOW,
+ data += RABIN_WINDOW) {
+ unsigned int val = 0;
+ for (i = 1; i <= RABIN_WINDOW; i++)
+ val = ((val << 8) | data[i]) ^ T[val >> RABIN_SHIFT];
+ if (val != prev_val) {
+ /* Only keep the first of consecutive data */
+ prev_val = val;
+ num_entries++;
+ entry->ptr = data + RABIN_WINDOW;
+ entry->val = val;
+ entry->src = src;
+ entry++;
+ if (num_entries > max_num_entries) {
+ /* We ran out of entry room, something is really wrong
+ */
+ break;
+ }
+ }
+ }
+ /* Move the data pointer by whatever remainder is left */
+ data += cmd;
+ } else {
+ /*
+ * cmd == 0 is reserved for future encoding
+ * extensions. In the mean time we must fail when
+ * encountering them (might be data corruption).
+ */
+ break;
+ }
+ }
+ if (data != top) {
+ /* The source_info data passed was corrupted or otherwise invalid */
+ free(entries);
+ return DELTA_SOURCE_BAD;
+ }
+ if (num_entries == 0) {
+ /** Nothing to index **/
+ free(entries);
+ *fresh = old_index;
+ return DELTA_OK;
+ }
+ old_index->last_src = src;
+ /* See if we can fill in these values into the holes in the array */
+ entry = entries;
+ num_inserted = 0;
+ for (; num_entries > 0; --num_entries, ++entry) {
+ struct index_entry *next_bucket_entry, *cur_entry, *bucket_first_entry;
+ hash_offset = (entry->val & old_index->hash_mask);
+ /* The basic structure is a hash => packed_entries that fit in that
+ * hash bucket. Things are structured such that the hash-pointers are
+ * strictly ordered. So we start by pointing to the next pointer, and
+ * walk back until we stop getting NULL targets, and then go back
+ * forward. If there are no NULL targets, then we know because
+ * entry->ptr will not be NULL.
+ */
+ // The start of the next bucket, this may point past the end of the
+ // entry table if hash_offset is the last bucket.
+ next_bucket_entry = old_index->hash[hash_offset + 1];
+ // First entry in this bucket
+ bucket_first_entry = old_index->hash[hash_offset];
+ cur_entry = next_bucket_entry - 1;
+ while (cur_entry->ptr == NULL && cur_entry >= bucket_first_entry) {
+ cur_entry--;
+ }
+ // cur_entry now either points at the first NULL, or it points to
+ // next_bucket_entry if there were no blank spots.
+ cur_entry++;
+ if (cur_entry >= next_bucket_entry || cur_entry->ptr != NULL) {
+ /* There is no room for this entry, we have to resize */
+ // char buff[128];
+ // get_text(buff, entry->ptr);
+ // fprintf(stderr, "Failed to find an opening @%x for %8x:\n '%s'\n",
+ // hash_offset, entry->val, buff);
+ // for (old_entry = old_index->hash[hash_offset];
+ // old_entry < old_index->hash[hash_offset+1];
+ // ++old_entry) {
+ // get_text(buff, old_entry->ptr);
+ // fprintf(stderr, " [%2d] %8x %8x: '%s'\n",
+ // (int)(old_entry - old_index->hash[hash_offset]),
+ // old_entry->val, old_entry->ptr, buff);
+ // }
+ break;
+ }
+ num_inserted++;
+ *cur_entry = *entry;
+ /* For entries which we *do* manage to insert into old_index, we don't
+ * want them double copied into the final output.
+ */
+ old_index->num_entries++;
+ }
+ if (num_entries > 0) {
+ /* We couldn't fit the new entries into the old index, so allocate a
+ * new one, and fill it with stuff.
+ */
+ // fprintf(stderr, "inserted %d before resize\n", num_inserted);
+ new_index = create_index_from_old_and_new_entries(old_index,
+ entry, num_entries);
+ } else {
+ new_index = old_index;
+ // fprintf(stderr, "inserted %d without resizing\n", num_inserted);
+ }
+ free(entries);
+ /* create_index_from_old_and_new_entries returns NULL on malloc failure */
+ if (!new_index)
+ return DELTA_OUT_OF_MEMORY;
+ *fresh = new_index;
+ return DELTA_OK;
+}
+
+void free_delta_index(struct delta_index *index)
+{
+ free(index);
+}
+
+unsigned long
+sizeof_delta_index(struct delta_index *index)
+{
+ if (index)
+ return index->memsize;
+ else
+ return 0;
+}
+
+/*
+ * The maximum size for any opcode sequence, including the initial header
+ * plus Rabin window plus biggest copy.
+ */
+#define MAX_OP_SIZE (5 + 5 + 1 + RABIN_WINDOW + 7)
+
+delta_result
+create_delta(const struct delta_index *index,
+ const void *trg_buf, unsigned long trg_size,
+ unsigned long *delta_size, unsigned long max_size,
+ void **delta_data)
+{
+ unsigned int i, outpos, outsize, moff, val;
+ int msize;
+ const struct source_info *msource;
+ int inscnt;
+ const unsigned char *ref_data, *ref_top, *data, *top;
+ unsigned char *out;
+ unsigned long source_size;
+
+ if (!trg_buf || !trg_size)
+ return DELTA_BUFFER_EMPTY;
+ if (index == NULL)
+ return DELTA_INDEX_NEEDED;
+
+ outpos = 0;
+ outsize = 8192;
+ if (max_size && outsize >= max_size)
+ outsize = max_size + MAX_OP_SIZE + 1;
+ out = malloc(outsize);
+ if (!out)
+ return DELTA_OUT_OF_MEMORY;
+
+ source_size = index->last_src->size + index->last_src->agg_offset;
+
+ /* store target buffer size */
+ i = trg_size;
+ while (i >= 0x80) {
+ out[outpos++] = i | 0x80;
+ i >>= 7;
+ }
+ out[outpos++] = i;
+
+ data = trg_buf;
+ top = (const unsigned char *) trg_buf + trg_size;
+
+ /* Start the matching by filling out with a simple 'insert' instruction, of
+ * the first RABIN_WINDOW bytes of the input.
+ */
+ outpos++; /* leave a byte for the insert command */
+ val = 0;
+ for (i = 0; i < RABIN_WINDOW && data < top; i++, data++) {
+ out[outpos++] = *data;
+ val = ((val << 8) | *data) ^ T[val >> RABIN_SHIFT];
+ }
+ /* we are now setup with an insert of 'i' bytes and val contains the RABIN
+ * hash for those bytes, and data points to the RABIN_WINDOW+1 byte of
+ * input.
+ */
+ inscnt = i;
+
+ moff = 0;
+ msize = 0;
+ msource = NULL;
+ while (data < top) {
+ if (msize < 4096) {
+ /* we don't have a 'worthy enough' match yet, so let's look for
+ * one.
+ */
+ struct index_entry *entry;
+ /* Shift the window by one byte. */
+ val ^= U[data[-RABIN_WINDOW]];
+ val = ((val << 8) | *data) ^ T[val >> RABIN_SHIFT];
+ i = val & index->hash_mask;
+ /* TODO: When using multiple indexes like this, the hash tables
+ * mapping val => index_entry become less efficient.
+ * You end up getting a lot more collisions in the hash,
+ * which doesn't actually lead to a entry->val match.
+ */
+ for (entry = index->hash[i];
+ entry < index->hash[i+1] && entry->src != NULL;
+ entry++) {
+ const unsigned char *ref;
+ const unsigned char *src;
+ int ref_size;
+ if (entry->val != val)
+ continue;
+ ref = entry->ptr;
+ src = data;
+ ref_data = entry->src->buf;
+ ref_top = ref_data + entry->src->size;
+ ref_size = ref_top - ref;
+ /* ref_size is the longest possible match that we could make
+ * here. If ref_size <= msize, then we know that we cannot
+ * match more bytes with this location that we have already
+ * matched.
+ */
+ if (ref_size > (top - src))
+ ref_size = top - src;
+ if (ref_size <= msize)
+ break;
+ /* See how many bytes actually match at this location. */
+ while (ref_size-- && *src++ == *ref)
+ ref++;
+ if (msize < (ref - entry->ptr)) {
+ /* this is our best match so far */
+ msize = ref - entry->ptr;
+ msource = entry->src;
+ moff = entry->ptr - ref_data;
+ if (msize >= 4096) /* good enough */
+ break;
+ }
+ }
+ }
+
+ if (msize < 4) {
+ /* The best match right now is less than 4 bytes long. So just add
+ * the current byte to the insert instruction. Increment the insert
+ * counter, and copy the byte of data into the output buffer.
+ */
+ if (!inscnt)
+ outpos++;
+ out[outpos++] = *data++;
+ inscnt++;
+ if (inscnt == 0x7f) {
+ /* We have a max length insert instruction, finalize it in the
+ * output.
+ */
+ out[outpos - inscnt - 1] = inscnt;
+ inscnt = 0;
+ }
+ msize = 0;
+ } else {
+ unsigned int left;
+ unsigned char *op;
+
+ if (inscnt) {
+ ref_data = msource->buf;
+ while (moff && ref_data[moff-1] == data[-1]) {
+ /* we can match one byte back */
+ msize++;
+ moff--;
+ data--;
+ outpos--;
+ if (--inscnt)
+ continue;
+ outpos--; /* remove count slot */
+ inscnt--; /* make it -1 */
+ break;
+ }
+ out[outpos - inscnt - 1] = inscnt;
+ inscnt = 0;
+ }
+
+ /* A copy op is currently limited to 64KB (pack v2) */
+ left = (msize < 0x10000) ? 0 : (msize - 0x10000);
+ msize -= left;
+
+ op = out + outpos++;
+ i = 0x80;
+
+ /* moff is the offset in the local structure, for encoding, we need
+ * to push it into the global offset
+ */
+ assert(moff < msource->size);
+ moff += msource->agg_offset;
+ assert(moff + msize <= source_size);
+ if (moff & 0x000000ff)
+ out[outpos++] = moff >> 0, i |= 0x01;
+ if (moff & 0x0000ff00)
+ out[outpos++] = moff >> 8, i |= 0x02;
+ if (moff & 0x00ff0000)
+ out[outpos++] = moff >> 16, i |= 0x04;
+ if (moff & 0xff000000)
+ out[outpos++] = moff >> 24, i |= 0x08;
+ /* Put it back into local coordinates, in case we have multiple
+ * copies in a row.
+ */
+ moff -= msource->agg_offset;
+
+ if (msize & 0x00ff)
+ out[outpos++] = msize >> 0, i |= 0x10;
+ if (msize & 0xff00)
+ out[outpos++] = msize >> 8, i |= 0x20;
+
+ *op = i;
+
+ data += msize;
+ moff += msize;
+ msize = left;
+
+ if (msize < 4096) {
+ int j;
+ val = 0;
+ for (j = -RABIN_WINDOW; j < 0; j++)
+ val = ((val << 8) | data[j])
+ ^ T[val >> RABIN_SHIFT];
+ }
+ }
+
+ if (outpos >= outsize - MAX_OP_SIZE) {
+ void *tmp = out;
+ outsize = outsize * 3 / 2;
+ if (max_size && outsize >= max_size)
+ outsize = max_size + MAX_OP_SIZE + 1;
+ if (max_size && outpos > max_size)
+ break;
+ out = realloc(out, outsize);
+ if (!out) {
+ free(tmp);
+ return DELTA_OUT_OF_MEMORY;
+ }
+ }
+ }
+
+ if (inscnt)
+ out[outpos - inscnt - 1] = inscnt;
+
+ if (max_size && outpos > max_size) {
+ free(out);
+ return DELTA_SIZE_TOO_BIG;
+ }
+
+ *delta_size = outpos;
+ *delta_data = out;
+ return DELTA_OK;
+}
+
+
+int
+get_entry_summary(const struct delta_index *index, int pos,
+ unsigned int *text_offset, unsigned int *hash_val)
+{
+ int hsize;
+ const struct index_entry *entry;
+ const struct index_entry *start_of_entries;
+ unsigned int offset;
+ if (pos < 0 || text_offset == NULL || hash_val == NULL
+ || index == NULL)
+ {
+ return 0;
+ }
+ hsize = index->hash_mask + 1;
+ start_of_entries = (struct index_entry *)(((struct index_entry **)index->hash) + (hsize + 1));
+ entry = start_of_entries + pos;
+ if (entry > index->last_entry) {
+ return 0;
+ }
+ if (entry->ptr == NULL) {
+ *text_offset = 0;
+ *hash_val = 0;
+ } else {
+ offset = entry->src->agg_offset;
+ offset += (entry->ptr - ((unsigned char *)entry->src->buf));
+ *text_offset = offset;
+ *hash_val = entry->val;
+ }
+ return 1;
+}
+
+
+int
+get_hash_offset(const struct delta_index *index, int pos,
+ unsigned int *entry_offset)
+{
+ int hsize;
+ const struct index_entry *entry;
+ const struct index_entry *start_of_entries;
+ if (pos < 0 || index == NULL || entry_offset == NULL)
+ {
+ return 0;
+ }
+ hsize = index->hash_mask + 1;
+ start_of_entries = (struct index_entry *)(((struct index_entry **)index->hash) + (hsize + 1));
+ if (pos >= hsize) {
+ return 0;
+ }
+ entry = index->hash[pos];
+ if (entry == NULL) {
+ *entry_offset = -1;
+ } else {
+ *entry_offset = (entry - start_of_entries);
+ }
+ return 1;
+}
+
+
+unsigned int
+rabin_hash(const unsigned char *data)
+{
+ int i;
+ unsigned int val = 0;
+ for (i = 0; i < RABIN_WINDOW; i++)
+ val = ((val << 8) | data[i]) ^ T[val >> RABIN_SHIFT];
+ return val;
+}
+
+/* vim: et ts=4 sw=4 sts=4
+ */
diff --git a/bzrlib/diff.py b/bzrlib/diff.py
new file mode 100644
index 0000000..1359358
--- /dev/null
+++ b/bzrlib/diff.py
@@ -0,0 +1,1043 @@
+# Copyright (C) 2005-2011 Canonical Ltd.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+from __future__ import absolute_import
+
+import difflib
+import os
+import re
+import string
+import sys
+
+from bzrlib.lazy_import import lazy_import
+lazy_import(globals(), """
+import errno
+import subprocess
+import tempfile
+
+from bzrlib import (
+ cleanup,
+ cmdline,
+ controldir,
+ errors,
+ osutils,
+ patiencediff,
+ textfile,
+ timestamp,
+ views,
+ )
+
+from bzrlib.workingtree import WorkingTree
+from bzrlib.i18n import gettext
+""")
+
+from bzrlib.registry import (
+ Registry,
+ )
+from bzrlib.trace import mutter, note, warning
+
+
+class AtTemplate(string.Template):
+ """Templating class that uses @ instead of $."""
+
+ delimiter = '@'
+
+
+# TODO: Rather than building a changeset object, we should probably
+# invoke callbacks on an object. That object can either accumulate a
+# list, write them out directly, etc etc.
+
+
+class _PrematchedMatcher(difflib.SequenceMatcher):
+ """Allow SequenceMatcher operations to use predetermined blocks"""
+
+ def __init__(self, matching_blocks):
+ difflib.SequenceMatcher(self, None, None)
+ self.matching_blocks = matching_blocks
+ self.opcodes = None
+
+
+def internal_diff(old_filename, oldlines, new_filename, newlines, to_file,
+ allow_binary=False, sequence_matcher=None,
+ path_encoding='utf8'):
+ # FIXME: difflib is wrong if there is no trailing newline.
+ # The syntax used by patch seems to be "\ No newline at
+ # end of file" following the last diff line from that
+ # file. This is not trivial to insert into the
+ # unified_diff output and it might be better to just fix
+ # or replace that function.
+
+ # In the meantime we at least make sure the patch isn't
+ # mangled.
+
+
+ # Special workaround for Python2.3, where difflib fails if
+ # both sequences are empty.
+ if not oldlines and not newlines:
+ return
+
+ if allow_binary is False:
+ textfile.check_text_lines(oldlines)
+ textfile.check_text_lines(newlines)
+
+ if sequence_matcher is None:
+ sequence_matcher = patiencediff.PatienceSequenceMatcher
+ ud = patiencediff.unified_diff(oldlines, newlines,
+ fromfile=old_filename.encode(path_encoding, 'replace'),
+ tofile=new_filename.encode(path_encoding, 'replace'),
+ sequencematcher=sequence_matcher)
+
+ ud = list(ud)
+ if len(ud) == 0: # Identical contents, nothing to do
+ return
+ # work-around for difflib being too smart for its own good
+ # if /dev/null is "1,0", patch won't recognize it as /dev/null
+ if not oldlines:
+ ud[2] = ud[2].replace('-1,0', '-0,0')
+ elif not newlines:
+ ud[2] = ud[2].replace('+1,0', '+0,0')
+
+ for line in ud:
+ to_file.write(line)
+ if not line.endswith('\n'):
+ to_file.write("\n\\ No newline at end of file\n")
+ to_file.write('\n')
+
+
+def _spawn_external_diff(diffcmd, capture_errors=True):
+ """Spawn the externall diff process, and return the child handle.
+
+ :param diffcmd: The command list to spawn
+ :param capture_errors: Capture stderr as well as setting LANG=C
+ and LC_ALL=C. This lets us read and understand the output of diff,
+ and respond to any errors.
+ :return: A Popen object.
+ """
+ if capture_errors:
+ # construct minimal environment
+ env = {}
+ path = os.environ.get('PATH')
+ if path is not None:
+ env['PATH'] = path
+ env['LANGUAGE'] = 'C' # on win32 only LANGUAGE has effect
+ env['LANG'] = 'C'
+ env['LC_ALL'] = 'C'
+ stderr = subprocess.PIPE
+ else:
+ env = None
+ stderr = None
+
+ try:
+ pipe = subprocess.Popen(diffcmd,
+ stdin=subprocess.PIPE,
+ stdout=subprocess.PIPE,
+ stderr=stderr,
+ env=env)
+ except OSError, e:
+ if e.errno == errno.ENOENT:
+ raise errors.NoDiff(str(e))
+ raise
+
+ return pipe
+
+
+def external_diff(old_filename, oldlines, new_filename, newlines, to_file,
+ diff_opts):
+ """Display a diff by calling out to the external diff program."""
+ # make sure our own output is properly ordered before the diff
+ to_file.flush()
+
+ oldtmp_fd, old_abspath = tempfile.mkstemp(prefix='bzr-diff-old-')
+ newtmp_fd, new_abspath = tempfile.mkstemp(prefix='bzr-diff-new-')
+ oldtmpf = os.fdopen(oldtmp_fd, 'wb')
+ newtmpf = os.fdopen(newtmp_fd, 'wb')
+
+ try:
+ # TODO: perhaps a special case for comparing to or from the empty
+ # sequence; can just use /dev/null on Unix
+
+ # TODO: if either of the files being compared already exists as a
+ # regular named file (e.g. in the working directory) then we can
+ # compare directly to that, rather than copying it.
+
+ oldtmpf.writelines(oldlines)
+ newtmpf.writelines(newlines)
+
+ oldtmpf.close()
+ newtmpf.close()
+
+ if not diff_opts:
+ diff_opts = []
+ if sys.platform == 'win32':
+ # Popen doesn't do the proper encoding for external commands
+ # Since we are dealing with an ANSI api, use mbcs encoding
+ old_filename = old_filename.encode('mbcs')
+ new_filename = new_filename.encode('mbcs')
+ diffcmd = ['diff',
+ '--label', old_filename,
+ old_abspath,
+ '--label', new_filename,
+ new_abspath,
+ '--binary',
+ ]
+
+ # diff only allows one style to be specified; they don't override.
+ # note that some of these take optargs, and the optargs can be
+ # directly appended to the options.
+ # this is only an approximate parser; it doesn't properly understand
+ # the grammar.
+ for s in ['-c', '-u', '-C', '-U',
+ '-e', '--ed',
+ '-q', '--brief',
+ '--normal',
+ '-n', '--rcs',
+ '-y', '--side-by-side',
+ '-D', '--ifdef']:
+ for j in diff_opts:
+ if j.startswith(s):
+ break
+ else:
+ continue
+ break
+ else:
+ diffcmd.append('-u')
+
+ if diff_opts:
+ diffcmd.extend(diff_opts)
+
+ pipe = _spawn_external_diff(diffcmd, capture_errors=True)
+ out,err = pipe.communicate()
+ rc = pipe.returncode
+
+ # internal_diff() adds a trailing newline, add one here for consistency
+ out += '\n'
+ if rc == 2:
+ # 'diff' gives retcode == 2 for all sorts of errors
+ # one of those is 'Binary files differ'.
+ # Bad options could also be the problem.
+ # 'Binary files' is not a real error, so we suppress that error.
+ lang_c_out = out
+
+ # Since we got here, we want to make sure to give an i18n error
+ pipe = _spawn_external_diff(diffcmd, capture_errors=False)
+ out, err = pipe.communicate()
+
+ # Write out the new i18n diff response
+ to_file.write(out+'\n')
+ if pipe.returncode != 2:
+ raise errors.BzrError(
+ 'external diff failed with exit code 2'
+ ' when run with LANG=C and LC_ALL=C,'
+ ' but not when run natively: %r' % (diffcmd,))
+
+ first_line = lang_c_out.split('\n', 1)[0]
+ # Starting with diffutils 2.8.4 the word "binary" was dropped.
+ m = re.match('^(binary )?files.*differ$', first_line, re.I)
+ if m is None:
+ raise errors.BzrError('external diff failed with exit code 2;'
+ ' command: %r' % (diffcmd,))
+ else:
+ # Binary files differ, just return
+ return
+
+ # If we got to here, we haven't written out the output of diff
+ # do so now
+ to_file.write(out)
+ if rc not in (0, 1):
+ # returns 1 if files differ; that's OK
+ if rc < 0:
+ msg = 'signal %d' % (-rc)
+ else:
+ msg = 'exit code %d' % rc
+
+ raise errors.BzrError('external diff failed with %s; command: %r'
+ % (rc, diffcmd))
+
+
+ finally:
+ oldtmpf.close() # and delete
+ newtmpf.close()
+ # Clean up. Warn in case the files couldn't be deleted
+ # (in case windows still holds the file open, but not
+ # if the files have already been deleted)
+ try:
+ os.remove(old_abspath)
+ except OSError, e:
+ if e.errno not in (errno.ENOENT,):
+ warning('Failed to delete temporary file: %s %s',
+ old_abspath, e)
+ try:
+ os.remove(new_abspath)
+ except OSError:
+ if e.errno not in (errno.ENOENT,):
+ warning('Failed to delete temporary file: %s %s',
+ new_abspath, e)
+
+
+def get_trees_and_branches_to_diff_locked(
+ path_list, revision_specs, old_url, new_url, add_cleanup, apply_view=True):
+ """Get the trees and specific files to diff given a list of paths.
+
+ This method works out the trees to be diff'ed and the files of
+ interest within those trees.
+
+ :param path_list:
+ the list of arguments passed to the diff command
+ :param revision_specs:
+ Zero, one or two RevisionSpecs from the diff command line,
+ saying what revisions to compare.
+ :param old_url:
+ The url of the old branch or tree. If None, the tree to use is
+ taken from the first path, if any, or the current working tree.
+ :param new_url:
+ The url of the new branch or tree. If None, the tree to use is
+ taken from the first path, if any, or the current working tree.
+ :param add_cleanup:
+ a callable like Command.add_cleanup. get_trees_and_branches_to_diff
+ will register cleanups that must be run to unlock the trees, etc.
+ :param apply_view:
+ if True and a view is set, apply the view or check that the paths
+ are within it
+ :returns:
+ a tuple of (old_tree, new_tree, old_branch, new_branch,
+ specific_files, extra_trees) where extra_trees is a sequence of
+ additional trees to search in for file-ids. The trees and branches
+ will be read-locked until the cleanups registered via the add_cleanup
+ param are run.
+ """
+ # Get the old and new revision specs
+ old_revision_spec = None
+ new_revision_spec = None
+ if revision_specs is not None:
+ if len(revision_specs) > 0:
+ old_revision_spec = revision_specs[0]
+ if old_url is None:
+ old_url = old_revision_spec.get_branch()
+ if len(revision_specs) > 1:
+ new_revision_spec = revision_specs[1]
+ if new_url is None:
+ new_url = new_revision_spec.get_branch()
+
+ other_paths = []
+ make_paths_wt_relative = True
+ consider_relpath = True
+ if path_list is None or len(path_list) == 0:
+ # If no path is given, the current working tree is used
+ default_location = u'.'
+ consider_relpath = False
+ elif old_url is not None and new_url is not None:
+ other_paths = path_list
+ make_paths_wt_relative = False
+ else:
+ default_location = path_list[0]
+ other_paths = path_list[1:]
+
+ def lock_tree_or_branch(wt, br):
+ if wt is not None:
+ wt.lock_read()
+ add_cleanup(wt.unlock)
+ elif br is not None:
+ br.lock_read()
+ add_cleanup(br.unlock)
+
+ # Get the old location
+ specific_files = []
+ if old_url is None:
+ old_url = default_location
+ working_tree, branch, relpath = \
+ controldir.ControlDir.open_containing_tree_or_branch(old_url)
+ lock_tree_or_branch(working_tree, branch)
+ if consider_relpath and relpath != '':
+ if working_tree is not None and apply_view:
+ views.check_path_in_view(working_tree, relpath)
+ specific_files.append(relpath)
+ old_tree = _get_tree_to_diff(old_revision_spec, working_tree, branch)
+ old_branch = branch
+
+ # Get the new location
+ if new_url is None:
+ new_url = default_location
+ if new_url != old_url:
+ working_tree, branch, relpath = \
+ controldir.ControlDir.open_containing_tree_or_branch(new_url)
+ lock_tree_or_branch(working_tree, branch)
+ if consider_relpath and relpath != '':
+ if working_tree is not None and apply_view:
+ views.check_path_in_view(working_tree, relpath)
+ specific_files.append(relpath)
+ new_tree = _get_tree_to_diff(new_revision_spec, working_tree, branch,
+ basis_is_default=working_tree is None)
+ new_branch = branch
+
+ # Get the specific files (all files is None, no files is [])
+ if make_paths_wt_relative and working_tree is not None:
+ other_paths = working_tree.safe_relpath_files(
+ other_paths,
+ apply_view=apply_view)
+ specific_files.extend(other_paths)
+ if len(specific_files) == 0:
+ specific_files = None
+ if (working_tree is not None and working_tree.supports_views()
+ and apply_view):
+ view_files = working_tree.views.lookup_view()
+ if view_files:
+ specific_files = view_files
+ view_str = views.view_display_str(view_files)
+ note(gettext("*** Ignoring files outside view. View is %s") % view_str)
+
+ # Get extra trees that ought to be searched for file-ids
+ extra_trees = None
+ if working_tree is not None and working_tree not in (old_tree, new_tree):
+ extra_trees = (working_tree,)
+ return (old_tree, new_tree, old_branch, new_branch,
+ specific_files, extra_trees)
+
+
+def _get_tree_to_diff(spec, tree=None, branch=None, basis_is_default=True):
+ if branch is None and tree is not None:
+ branch = tree.branch
+ if spec is None or spec.spec is None:
+ if basis_is_default:
+ if tree is not None:
+ return tree.basis_tree()
+ else:
+ return branch.basis_tree()
+ else:
+ return tree
+ return spec.as_tree(branch)
+
+
+def show_diff_trees(old_tree, new_tree, to_file, specific_files=None,
+ external_diff_options=None,
+ old_label='a/', new_label='b/',
+ extra_trees=None,
+ path_encoding='utf8',
+ using=None,
+ format_cls=None):
+ """Show in text form the changes from one tree to another.
+
+ :param to_file: The output stream.
+ :param specific_files: Include only changes to these files - None for all
+ changes.
+ :param external_diff_options: If set, use an external GNU diff and pass
+ these options.
+ :param extra_trees: If set, more Trees to use for looking up file ids
+ :param path_encoding: If set, the path will be encoded as specified,
+ otherwise is supposed to be utf8
+ :param format_cls: Formatter class (DiffTree subclass)
+ """
+ if format_cls is None:
+ format_cls = DiffTree
+ old_tree.lock_read()
+ try:
+ if extra_trees is not None:
+ for tree in extra_trees:
+ tree.lock_read()
+ new_tree.lock_read()
+ try:
+ differ = format_cls.from_trees_options(old_tree, new_tree, to_file,
+ path_encoding,
+ external_diff_options,
+ old_label, new_label, using)
+ return differ.show_diff(specific_files, extra_trees)
+ finally:
+ new_tree.unlock()
+ if extra_trees is not None:
+ for tree in extra_trees:
+ tree.unlock()
+ finally:
+ old_tree.unlock()
+
+
+def _patch_header_date(tree, file_id, path):
+ """Returns a timestamp suitable for use in a patch header."""
+ try:
+ mtime = tree.get_file_mtime(file_id, path)
+ except errors.FileTimestampUnavailable:
+ mtime = 0
+ return timestamp.format_patch_date(mtime)
+
+
+def get_executable_change(old_is_x, new_is_x):
+ descr = { True:"+x", False:"-x", None:"??" }
+ if old_is_x != new_is_x:
+ return ["%s to %s" % (descr[old_is_x], descr[new_is_x],)]
+ else:
+ return []
+
+
+class DiffPath(object):
+ """Base type for command object that compare files"""
+
+ # The type or contents of the file were unsuitable for diffing
+ CANNOT_DIFF = 'CANNOT_DIFF'
+ # The file has changed in a semantic way
+ CHANGED = 'CHANGED'
+ # The file content may have changed, but there is no semantic change
+ UNCHANGED = 'UNCHANGED'
+
+ def __init__(self, old_tree, new_tree, to_file, path_encoding='utf-8'):
+ """Constructor.
+
+ :param old_tree: The tree to show as the old tree in the comparison
+ :param new_tree: The tree to show as new in the comparison
+ :param to_file: The file to write comparison data to
+ :param path_encoding: The character encoding to write paths in
+ """
+ self.old_tree = old_tree
+ self.new_tree = new_tree
+ self.to_file = to_file
+ self.path_encoding = path_encoding
+
+ def finish(self):
+ pass
+
+ @classmethod
+ def from_diff_tree(klass, diff_tree):
+ return klass(diff_tree.old_tree, diff_tree.new_tree,
+ diff_tree.to_file, diff_tree.path_encoding)
+
+ @staticmethod
+ def _diff_many(differs, file_id, old_path, new_path, old_kind, new_kind):
+ for file_differ in differs:
+ result = file_differ.diff(file_id, old_path, new_path, old_kind,
+ new_kind)
+ if result is not DiffPath.CANNOT_DIFF:
+ return result
+ else:
+ return DiffPath.CANNOT_DIFF
+
+
+class DiffKindChange(object):
+ """Special differ for file kind changes.
+
+ Represents kind change as deletion + creation. Uses the other differs
+ to do this.
+ """
+ def __init__(self, differs):
+ self.differs = differs
+
+ def finish(self):
+ pass
+
+ @classmethod
+ def from_diff_tree(klass, diff_tree):
+ return klass(diff_tree.differs)
+
+ def diff(self, file_id, old_path, new_path, old_kind, new_kind):
+ """Perform comparison
+
+ :param file_id: The file_id of the file to compare
+ :param old_path: Path of the file in the old tree
+ :param new_path: Path of the file in the new tree
+ :param old_kind: Old file-kind of the file
+ :param new_kind: New file-kind of the file
+ """
+ if None in (old_kind, new_kind):
+ return DiffPath.CANNOT_DIFF
+ result = DiffPath._diff_many(self.differs, file_id, old_path,
+ new_path, old_kind, None)
+ if result is DiffPath.CANNOT_DIFF:
+ return result
+ return DiffPath._diff_many(self.differs, file_id, old_path, new_path,
+ None, new_kind)
+
+
+class DiffDirectory(DiffPath):
+
+ def diff(self, file_id, old_path, new_path, old_kind, new_kind):
+ """Perform comparison between two directories. (dummy)
+
+ """
+ if 'directory' not in (old_kind, new_kind):
+ return self.CANNOT_DIFF
+ if old_kind not in ('directory', None):
+ return self.CANNOT_DIFF
+ if new_kind not in ('directory', None):
+ return self.CANNOT_DIFF
+ return self.CHANGED
+
+
+class DiffSymlink(DiffPath):
+
+ def diff(self, file_id, old_path, new_path, old_kind, new_kind):
+ """Perform comparison between two symlinks
+
+ :param file_id: The file_id of the file to compare
+ :param old_path: Path of the file in the old tree
+ :param new_path: Path of the file in the new tree
+ :param old_kind: Old file-kind of the file
+ :param new_kind: New file-kind of the file
+ """
+ if 'symlink' not in (old_kind, new_kind):
+ return self.CANNOT_DIFF
+ if old_kind == 'symlink':
+ old_target = self.old_tree.get_symlink_target(file_id)
+ elif old_kind is None:
+ old_target = None
+ else:
+ return self.CANNOT_DIFF
+ if new_kind == 'symlink':
+ new_target = self.new_tree.get_symlink_target(file_id)
+ elif new_kind is None:
+ new_target = None
+ else:
+ return self.CANNOT_DIFF
+ return self.diff_symlink(old_target, new_target)
+
+ def diff_symlink(self, old_target, new_target):
+ if old_target is None:
+ self.to_file.write('=== target is %r\n' % new_target)
+ elif new_target is None:
+ self.to_file.write('=== target was %r\n' % old_target)
+ else:
+ self.to_file.write('=== target changed %r => %r\n' %
+ (old_target, new_target))
+ return self.CHANGED
+
+
+class DiffText(DiffPath):
+
+ # GNU Patch uses the epoch date to detect files that are being added
+ # or removed in a diff.
+ EPOCH_DATE = '1970-01-01 00:00:00 +0000'
+
+ def __init__(self, old_tree, new_tree, to_file, path_encoding='utf-8',
+ old_label='', new_label='', text_differ=internal_diff):
+ DiffPath.__init__(self, old_tree, new_tree, to_file, path_encoding)
+ self.text_differ = text_differ
+ self.old_label = old_label
+ self.new_label = new_label
+ self.path_encoding = path_encoding
+
+ def diff(self, file_id, old_path, new_path, old_kind, new_kind):
+ """Compare two files in unified diff format
+
+ :param file_id: The file_id of the file to compare
+ :param old_path: Path of the file in the old tree
+ :param new_path: Path of the file in the new tree
+ :param old_kind: Old file-kind of the file
+ :param new_kind: New file-kind of the file
+ """
+ if 'file' not in (old_kind, new_kind):
+ return self.CANNOT_DIFF
+ from_file_id = to_file_id = file_id
+ if old_kind == 'file':
+ old_date = _patch_header_date(self.old_tree, file_id, old_path)
+ elif old_kind is None:
+ old_date = self.EPOCH_DATE
+ from_file_id = None
+ else:
+ return self.CANNOT_DIFF
+ if new_kind == 'file':
+ new_date = _patch_header_date(self.new_tree, file_id, new_path)
+ elif new_kind is None:
+ new_date = self.EPOCH_DATE
+ to_file_id = None
+ else:
+ return self.CANNOT_DIFF
+ from_label = '%s%s\t%s' % (self.old_label, old_path, old_date)
+ to_label = '%s%s\t%s' % (self.new_label, new_path, new_date)
+ return self.diff_text(from_file_id, to_file_id, from_label, to_label,
+ old_path, new_path)
+
+ def diff_text(self, from_file_id, to_file_id, from_label, to_label,
+ from_path=None, to_path=None):
+ """Diff the content of given files in two trees
+
+ :param from_file_id: The id of the file in the from tree. If None,
+ the file is not present in the from tree.
+ :param to_file_id: The id of the file in the to tree. This may refer
+ to a different file from from_file_id. If None,
+ the file is not present in the to tree.
+ :param from_path: The path in the from tree or None if unknown.
+ :param to_path: The path in the to tree or None if unknown.
+ """
+ def _get_text(tree, file_id, path):
+ if file_id is not None:
+ return tree.get_file_lines(file_id, path)
+ else:
+ return []
+ try:
+ from_text = _get_text(self.old_tree, from_file_id, from_path)
+ to_text = _get_text(self.new_tree, to_file_id, to_path)
+ self.text_differ(from_label, from_text, to_label, to_text,
+ self.to_file, path_encoding=self.path_encoding)
+ except errors.BinaryFile:
+ self.to_file.write(
+ ("Binary files %s and %s differ\n" %
+ (from_label, to_label)).encode(self.path_encoding,'replace'))
+ return self.CHANGED
+
+
+class DiffFromTool(DiffPath):
+
+ def __init__(self, command_template, old_tree, new_tree, to_file,
+ path_encoding='utf-8'):
+ DiffPath.__init__(self, old_tree, new_tree, to_file, path_encoding)
+ self.command_template = command_template
+ self._root = osutils.mkdtemp(prefix='bzr-diff-')
+
+ @classmethod
+ def from_string(klass, command_string, old_tree, new_tree, to_file,
+ path_encoding='utf-8'):
+ command_template = cmdline.split(command_string)
+ if '@' not in command_string:
+ command_template.extend(['@old_path', '@new_path'])
+ return klass(command_template, old_tree, new_tree, to_file,
+ path_encoding)
+
+ @classmethod
+ def make_from_diff_tree(klass, command_string, external_diff_options=None):
+ def from_diff_tree(diff_tree):
+ full_command_string = [command_string]
+ if external_diff_options is not None:
+ full_command_string += ' ' + external_diff_options
+ return klass.from_string(full_command_string, diff_tree.old_tree,
+ diff_tree.new_tree, diff_tree.to_file)
+ return from_diff_tree
+
+ def _get_command(self, old_path, new_path):
+ my_map = {'old_path': old_path, 'new_path': new_path}
+ command = [AtTemplate(t).substitute(my_map) for t in
+ self.command_template]
+ if sys.platform == 'win32': # Popen doesn't accept unicode on win32
+ command_encoded = []
+ for c in command:
+ if isinstance(c, unicode):
+ command_encoded.append(c.encode('mbcs'))
+ else:
+ command_encoded.append(c)
+ return command_encoded
+ else:
+ return command
+
+ def _execute(self, old_path, new_path):
+ command = self._get_command(old_path, new_path)
+ try:
+ proc = subprocess.Popen(command, stdout=subprocess.PIPE,
+ cwd=self._root)
+ except OSError, e:
+ if e.errno == errno.ENOENT:
+ raise errors.ExecutableMissing(command[0])
+ else:
+ raise
+ self.to_file.write(proc.stdout.read())
+ return proc.wait()
+
+ def _try_symlink_root(self, tree, prefix):
+ if (getattr(tree, 'abspath', None) is None
+ or not osutils.host_os_dereferences_symlinks()):
+ return False
+ try:
+ os.symlink(tree.abspath(''), osutils.pathjoin(self._root, prefix))
+ except OSError, e:
+ if e.errno != errno.EEXIST:
+ raise
+ return True
+
+ @staticmethod
+ def _fenc():
+ """Returns safe encoding for passing file path to diff tool"""
+ if sys.platform == 'win32':
+ return 'mbcs'
+ else:
+ # Don't fallback to 'utf-8' because subprocess may not be able to
+ # handle utf-8 correctly when locale is not utf-8.
+ return sys.getfilesystemencoding() or 'ascii'
+
+ def _is_safepath(self, path):
+ """Return true if `path` may be able to pass to subprocess."""
+ fenc = self._fenc()
+ try:
+ return path == path.encode(fenc).decode(fenc)
+ except UnicodeError:
+ return False
+
+ def _safe_filename(self, prefix, relpath):
+ """Replace unsafe character in `relpath` then join `self._root`,
+ `prefix` and `relpath`."""
+ fenc = self._fenc()
+ # encoded_str.replace('?', '_') may break multibyte char.
+ # So we should encode, decode, then replace(u'?', u'_')
+ relpath_tmp = relpath.encode(fenc, 'replace').decode(fenc, 'replace')
+ relpath_tmp = relpath_tmp.replace(u'?', u'_')
+ return osutils.pathjoin(self._root, prefix, relpath_tmp)
+
+ def _write_file(self, file_id, tree, prefix, relpath, force_temp=False,
+ allow_write=False):
+ if not force_temp and isinstance(tree, WorkingTree):
+ full_path = tree.abspath(tree.id2path(file_id))
+ if self._is_safepath(full_path):
+ return full_path
+
+ full_path = self._safe_filename(prefix, relpath)
+ if not force_temp and self._try_symlink_root(tree, prefix):
+ return full_path
+ parent_dir = osutils.dirname(full_path)
+ try:
+ os.makedirs(parent_dir)
+ except OSError, e:
+ if e.errno != errno.EEXIST:
+ raise
+ source = tree.get_file(file_id, relpath)
+ try:
+ target = open(full_path, 'wb')
+ try:
+ osutils.pumpfile(source, target)
+ finally:
+ target.close()
+ finally:
+ source.close()
+ try:
+ mtime = tree.get_file_mtime(file_id)
+ except errors.FileTimestampUnavailable:
+ pass
+ else:
+ os.utime(full_path, (mtime, mtime))
+ if not allow_write:
+ osutils.make_readonly(full_path)
+ return full_path
+
+ def _prepare_files(self, file_id, old_path, new_path, force_temp=False,
+ allow_write_new=False):
+ old_disk_path = self._write_file(file_id, self.old_tree, 'old',
+ old_path, force_temp)
+ new_disk_path = self._write_file(file_id, self.new_tree, 'new',
+ new_path, force_temp,
+ allow_write=allow_write_new)
+ return old_disk_path, new_disk_path
+
+ def finish(self):
+ try:
+ osutils.rmtree(self._root)
+ except OSError, e:
+ if e.errno != errno.ENOENT:
+ mutter("The temporary directory \"%s\" was not "
+ "cleanly removed: %s." % (self._root, e))
+
+ def diff(self, file_id, old_path, new_path, old_kind, new_kind):
+ if (old_kind, new_kind) != ('file', 'file'):
+ return DiffPath.CANNOT_DIFF
+ (old_disk_path, new_disk_path) = self._prepare_files(
+ file_id, old_path, new_path)
+ self._execute(old_disk_path, new_disk_path)
+
+ def edit_file(self, file_id):
+ """Use this tool to edit a file.
+
+ A temporary copy will be edited, and the new contents will be
+ returned.
+
+ :param file_id: The id of the file to edit.
+ :return: The new contents of the file.
+ """
+ old_path = self.old_tree.id2path(file_id)
+ new_path = self.new_tree.id2path(file_id)
+ old_abs_path, new_abs_path = self._prepare_files(
+ file_id, old_path, new_path,
+ allow_write_new=True,
+ force_temp=True)
+ command = self._get_command(old_abs_path, new_abs_path)
+ subprocess.call(command, cwd=self._root)
+ new_file = open(new_abs_path, 'rb')
+ try:
+ return new_file.read()
+ finally:
+ new_file.close()
+
+
+class DiffTree(object):
+ """Provides textual representations of the difference between two trees.
+
+ A DiffTree examines two trees and where a file-id has altered
+ between them, generates a textual representation of the difference.
+ DiffTree uses a sequence of DiffPath objects which are each
+ given the opportunity to handle a given altered fileid. The list
+ of DiffPath objects can be extended globally by appending to
+ DiffTree.diff_factories, or for a specific diff operation by
+ supplying the extra_factories option to the appropriate method.
+ """
+
+ # list of factories that can provide instances of DiffPath objects
+ # may be extended by plugins.
+ diff_factories = [DiffSymlink.from_diff_tree,
+ DiffDirectory.from_diff_tree]
+
+ def __init__(self, old_tree, new_tree, to_file, path_encoding='utf-8',
+ diff_text=None, extra_factories=None):
+ """Constructor
+
+ :param old_tree: Tree to show as old in the comparison
+ :param new_tree: Tree to show as new in the comparison
+ :param to_file: File to write comparision to
+ :param path_encoding: Character encoding to write paths in
+ :param diff_text: DiffPath-type object to use as a last resort for
+ diffing text files.
+ :param extra_factories: Factories of DiffPaths to try before any other
+ DiffPaths"""
+ if diff_text is None:
+ diff_text = DiffText(old_tree, new_tree, to_file, path_encoding,
+ '', '', internal_diff)
+ self.old_tree = old_tree
+ self.new_tree = new_tree
+ self.to_file = to_file
+ self.path_encoding = path_encoding
+ self.differs = []
+ if extra_factories is not None:
+ self.differs.extend(f(self) for f in extra_factories)
+ self.differs.extend(f(self) for f in self.diff_factories)
+ self.differs.extend([diff_text, DiffKindChange.from_diff_tree(self)])
+
+ @classmethod
+ def from_trees_options(klass, old_tree, new_tree, to_file,
+ path_encoding, external_diff_options, old_label,
+ new_label, using):
+ """Factory for producing a DiffTree.
+
+ Designed to accept options used by show_diff_trees.
+
+ :param old_tree: The tree to show as old in the comparison
+ :param new_tree: The tree to show as new in the comparison
+ :param to_file: File to write comparisons to
+ :param path_encoding: Character encoding to use for writing paths
+ :param external_diff_options: If supplied, use the installed diff
+ binary to perform file comparison, using supplied options.
+ :param old_label: Prefix to use for old file labels
+ :param new_label: Prefix to use for new file labels
+ :param using: Commandline to use to invoke an external diff tool
+ """
+ if using is not None:
+ extra_factories = [DiffFromTool.make_from_diff_tree(using, external_diff_options)]
+ else:
+ extra_factories = []
+ if external_diff_options:
+ opts = external_diff_options.split()
+ def diff_file(olab, olines, nlab, nlines, to_file, path_encoding=None):
+ """:param path_encoding: not used but required
+ to match the signature of internal_diff.
+ """
+ external_diff(olab, olines, nlab, nlines, to_file, opts)
+ else:
+ diff_file = internal_diff
+ diff_text = DiffText(old_tree, new_tree, to_file, path_encoding,
+ old_label, new_label, diff_file)
+ return klass(old_tree, new_tree, to_file, path_encoding, diff_text,
+ extra_factories)
+
+ def show_diff(self, specific_files, extra_trees=None):
+ """Write tree diff to self.to_file
+
+ :param specific_files: the specific files to compare (recursive)
+ :param extra_trees: extra trees to use for mapping paths to file_ids
+ """
+ try:
+ return self._show_diff(specific_files, extra_trees)
+ finally:
+ for differ in self.differs:
+ differ.finish()
+
+ def _show_diff(self, specific_files, extra_trees):
+ # TODO: Generation of pseudo-diffs for added/deleted files could
+ # be usefully made into a much faster special case.
+ iterator = self.new_tree.iter_changes(self.old_tree,
+ specific_files=specific_files,
+ extra_trees=extra_trees,
+ require_versioned=True)
+ has_changes = 0
+ def changes_key(change):
+ old_path, new_path = change[1]
+ path = new_path
+ if path is None:
+ path = old_path
+ return path
+ def get_encoded_path(path):
+ if path is not None:
+ return path.encode(self.path_encoding, "replace")
+ for (file_id, paths, changed_content, versioned, parent, name, kind,
+ executable) in sorted(iterator, key=changes_key):
+ # The root does not get diffed, and items with no known kind (that
+ # is, missing) in both trees are skipped as well.
+ if parent == (None, None) or kind == (None, None):
+ continue
+ oldpath, newpath = paths
+ oldpath_encoded = get_encoded_path(paths[0])
+ newpath_encoded = get_encoded_path(paths[1])
+ old_present = (kind[0] is not None and versioned[0])
+ new_present = (kind[1] is not None and versioned[1])
+ renamed = (parent[0], name[0]) != (parent[1], name[1])
+
+ properties_changed = []
+ properties_changed.extend(get_executable_change(executable[0], executable[1]))
+
+ if properties_changed:
+ prop_str = " (properties changed: %s)" % (", ".join(properties_changed),)
+ else:
+ prop_str = ""
+
+ if (old_present, new_present) == (True, False):
+ self.to_file.write("=== removed %s '%s'\n" %
+ (kind[0], oldpath_encoded))
+ newpath = oldpath
+ elif (old_present, new_present) == (False, True):
+ self.to_file.write("=== added %s '%s'\n" %
+ (kind[1], newpath_encoded))
+ oldpath = newpath
+ elif renamed:
+ self.to_file.write("=== renamed %s '%s' => '%s'%s\n" %
+ (kind[0], oldpath_encoded, newpath_encoded, prop_str))
+ else:
+ # if it was produced by iter_changes, it must be
+ # modified *somehow*, either content or execute bit.
+ self.to_file.write("=== modified %s '%s'%s\n" % (kind[0],
+ newpath_encoded, prop_str))
+ if changed_content:
+ self._diff(file_id, oldpath, newpath, kind[0], kind[1])
+ has_changes = 1
+ if renamed:
+ has_changes = 1
+ return has_changes
+
+ def diff(self, file_id, old_path, new_path):
+ """Perform a diff of a single file
+
+ :param file_id: file-id of the file
+ :param old_path: The path of the file in the old tree
+ :param new_path: The path of the file in the new tree
+ """
+ try:
+ old_kind = self.old_tree.kind(file_id)
+ except (errors.NoSuchId, errors.NoSuchFile):
+ old_kind = None
+ try:
+ new_kind = self.new_tree.kind(file_id)
+ except (errors.NoSuchId, errors.NoSuchFile):
+ new_kind = None
+ self._diff(file_id, old_path, new_path, old_kind, new_kind)
+
+
+ def _diff(self, file_id, old_path, new_path, old_kind, new_kind):
+ result = DiffPath._diff_many(self.differs, file_id, old_path,
+ new_path, old_kind, new_kind)
+ if result is DiffPath.CANNOT_DIFF:
+ error_path = new_path
+ if error_path is None:
+ error_path = old_path
+ raise errors.NoDiffFound(error_path)
+
+
+format_registry = Registry()
+format_registry.register('default', DiffTree)
diff --git a/bzrlib/directory_service.py b/bzrlib/directory_service.py
new file mode 100644
index 0000000..eac7d8d
--- /dev/null
+++ b/bzrlib/directory_service.py
@@ -0,0 +1,152 @@
+# Copyright (C) 2008, 2009, 2011 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""Directory service registration and usage.
+
+Directory services are utilities that provide a mapping from URL-like strings
+to true URLs. Examples include lp:urls and per-user location aliases.
+"""
+
+from __future__ import absolute_import
+
+from bzrlib import (
+ errors,
+ registry,
+ )
+from bzrlib.lazy_import import lazy_import
+lazy_import(globals(), """
+from bzrlib import (
+ branch as _mod_branch,
+ controldir as _mod_controldir,
+ urlutils,
+ )
+""")
+
+
+class DirectoryServiceRegistry(registry.Registry):
+ """This object maintains and uses a list of directory services.
+
+ Directory services may be registered via the standard Registry methods.
+ They will be invoked if their key is a prefix of the supplied URL.
+
+ Each item registered should be a factory of objects that provide a look_up
+ method, as invoked by dereference. Specifically, look_up should accept a
+ name and URL, and return a URL.
+ """
+
+ def dereference(self, url):
+ """Dereference a supplied URL if possible.
+
+ URLs that match a registered directory service prefix are looked up in
+ it. Non-matching urls are returned verbatim.
+
+ This is applied only once; the resulting URL must not be one that
+ requires further dereferencing.
+
+ :param url: The URL to dereference
+ :return: The dereferenced URL if applicable, the input URL otherwise.
+ """
+ match = self.get_prefix(url)
+ if match is None:
+ return url
+ service, name = match
+ return service().look_up(name, url)
+
+directories = DirectoryServiceRegistry()
+
+class AliasDirectory(object):
+ """Directory lookup for locations associated with a branch.
+
+ :parent, :submit, :public, :push, :this, and :bound are currently
+ supported. On error, a subclass of DirectoryLookupFailure will be raised.
+ """
+
+ branch_aliases = registry.Registry()
+ branch_aliases.register('parent', lambda b: b.get_parent(),
+ help="The parent of this branch.")
+ branch_aliases.register('submit', lambda b: b.get_submit_branch(),
+ help="The submit branch for this branch.")
+ branch_aliases.register('public', lambda b: b.get_public_branch(),
+ help="The public location of this branch.")
+ branch_aliases.register('bound', lambda b: b.get_bound_location(),
+ help="The branch this branch is bound to, for bound branches.")
+ branch_aliases.register('push', lambda b: b.get_push_location(),
+ help="The saved location used for `bzr push` with no arguments.")
+ branch_aliases.register('this', lambda b: b.base,
+ help="This branch.")
+
+ def look_up(self, name, url):
+ branch = _mod_branch.Branch.open_containing('.')[0]
+ parts = url.split('/', 1)
+ if len(parts) == 2:
+ name, extra = parts
+ else:
+ (name,) = parts
+ extra = None
+ try:
+ method = self.branch_aliases.get(name[1:])
+ except KeyError:
+ raise errors.InvalidLocationAlias(url)
+ else:
+ result = method(branch)
+ if result is None:
+ raise errors.UnsetLocationAlias(url)
+ if extra is not None:
+ result = urlutils.join(result, extra)
+ return result
+
+ @classmethod
+ def help_text(cls, topic):
+ alias_lines = []
+ for key in cls.branch_aliases.keys():
+ help = cls.branch_aliases.get_help(key)
+ alias_lines.append(" :%-10s%s\n" % (key, help))
+ return """\
+Location aliases
+================
+
+Bazaar defines several aliases for locations associated with a branch. These
+can be used with most commands that expect a location, such as `bzr push`.
+
+The aliases are::
+
+%s
+For example, to push to the parent location::
+
+ bzr push :parent
+""" % "".join(alias_lines)
+
+
+directories.register(':', AliasDirectory,
+ 'Easy access to remembered branch locations')
+
+
+class ColocatedDirectory(object):
+ """Directory lookup for colocated branches.
+
+ co:somename will resolve to the colocated branch with "somename" in
+ the current directory.
+ """
+
+ def look_up(self, name, url):
+ dir = _mod_controldir.ControlDir.open_containing('.')[0]
+ return urlutils.join_segment_parameters(dir.user_url,
+ {"branch": urlutils.escape(name)})
+
+
+directories.register('co:', ColocatedDirectory,
+ 'Easy access to colocated branches')
+
diff --git a/bzrlib/dirstate.py b/bzrlib/dirstate.py
new file mode 100644
index 0000000..8f5725b
--- /dev/null
+++ b/bzrlib/dirstate.py
@@ -0,0 +1,4275 @@
+# Copyright (C) 2006-2011 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""DirState objects record the state of a directory and its bzr metadata.
+
+Pseudo EBNF grammar for the state file. Fields are separated by NULLs, and
+lines by NL. The field delimiters are ommitted in the grammar, line delimiters
+are not - this is done for clarity of reading. All string data is in utf8.
+
+::
+
+ MINIKIND = "f" | "d" | "l" | "a" | "r" | "t";
+ NL = "\\n";
+ NULL = "\\0";
+ WHOLE_NUMBER = {digit}, digit;
+ BOOLEAN = "y" | "n";
+ REVISION_ID = a non-empty utf8 string;
+
+ dirstate format = header line, full checksum, row count, parent details,
+ ghost_details, entries;
+ header line = "#bazaar dirstate flat format 3", NL;
+ full checksum = "crc32: ", ["-"], WHOLE_NUMBER, NL;
+ row count = "num_entries: ", WHOLE_NUMBER, NL;
+ parent_details = WHOLE NUMBER, {REVISION_ID}* NL;
+ ghost_details = WHOLE NUMBER, {REVISION_ID}*, NL;
+ entries = {entry};
+ entry = entry_key, current_entry_details, {parent_entry_details};
+ entry_key = dirname, basename, fileid;
+ current_entry_details = common_entry_details, working_entry_details;
+ parent_entry_details = common_entry_details, history_entry_details;
+ common_entry_details = MINIKIND, fingerprint, size, executable
+ working_entry_details = packed_stat
+ history_entry_details = REVISION_ID;
+ executable = BOOLEAN;
+ size = WHOLE_NUMBER;
+ fingerprint = a nonempty utf8 sequence with meaning defined by minikind.
+
+Given this definition, the following is useful to know::
+
+ entry (aka row) - all the data for a given key.
+ entry[0]: The key (dirname, basename, fileid)
+ entry[0][0]: dirname
+ entry[0][1]: basename
+ entry[0][2]: fileid
+ entry[1]: The tree(s) data for this path and id combination.
+ entry[1][0]: The current tree
+ entry[1][1]: The second tree
+
+For an entry for a tree, we have (using tree 0 - current tree) to demonstrate::
+
+ entry[1][0][0]: minikind
+ entry[1][0][1]: fingerprint
+ entry[1][0][2]: size
+ entry[1][0][3]: executable
+ entry[1][0][4]: packed_stat
+
+OR (for non tree-0)::
+
+ entry[1][1][4]: revision_id
+
+There may be multiple rows at the root, one per id present in the root, so the
+in memory root row is now::
+
+ self._dirblocks[0] -> ('', [entry ...]),
+
+and the entries in there are::
+
+ entries[0][0]: ''
+ entries[0][1]: ''
+ entries[0][2]: file_id
+ entries[1][0]: The tree data for the current tree for this fileid at /
+ etc.
+
+Kinds::
+
+ 'r' is a relocated entry: This path is not present in this tree with this
+ id, but the id can be found at another location. The fingerprint is
+ used to point to the target location.
+ 'a' is an absent entry: In that tree the id is not present at this path.
+ 'd' is a directory entry: This path in this tree is a directory with the
+ current file id. There is no fingerprint for directories.
+ 'f' is a file entry: As for directory, but it's a file. The fingerprint is
+ the sha1 value of the file's canonical form, i.e. after any read
+ filters have been applied to the convenience form stored in the working
+ tree.
+ 'l' is a symlink entry: As for directory, but a symlink. The fingerprint is
+ the link target.
+ 't' is a reference to a nested subtree; the fingerprint is the referenced
+ revision.
+
+Ordering:
+
+The entries on disk and in memory are ordered according to the following keys::
+
+ directory, as a list of components
+ filename
+ file-id
+
+--- Format 1 had the following different definition: ---
+
+::
+
+ rows = dirname, NULL, basename, NULL, MINIKIND, NULL, fileid_utf8, NULL,
+ WHOLE NUMBER (* size *), NULL, packed stat, NULL, sha1|symlink target,
+ {PARENT ROW}
+ PARENT ROW = NULL, revision_utf8, NULL, MINIKIND, NULL, dirname, NULL,
+ basename, NULL, WHOLE NUMBER (* size *), NULL, "y" | "n", NULL,
+ SHA1
+
+PARENT ROW's are emitted for every parent that is not in the ghosts details
+line. That is, if the parents are foo, bar, baz, and the ghosts are bar, then
+each row will have a PARENT ROW for foo and baz, but not for bar.
+
+
+In any tree, a kind of 'moved' indicates that the fingerprint field
+(which we treat as opaque data specific to the 'kind' anyway) has the
+details for the id of this row in that tree.
+
+I'm strongly tempted to add a id->path index as well, but I think that
+where we need id->path mapping; we also usually read the whole file, so
+I'm going to skip that for the moment, as we have the ability to locate
+via bisect any path in any tree, and if we lookup things by path, we can
+accumulate an id->path mapping as we go, which will tend to match what we
+looked for.
+
+I plan to implement this asap, so please speak up now to alter/tweak the
+design - and once we stabilise on this, I'll update the wiki page for
+it.
+
+The rationale for all this is that we want fast operations for the
+common case (diff/status/commit/merge on all files) and extremely fast
+operations for the less common but still occurs a lot status/diff/commit
+on specific files). Operations on specific files involve a scan for all
+the children of a path, *in every involved tree*, which the current
+format did not accommodate.
+----
+
+Design priorities:
+ 1. Fast end to end use for bzr's top 5 uses cases. (commmit/diff/status/merge/???)
+ 2. fall back current object model as needed.
+ 3. scale usably to the largest trees known today - say 50K entries. (mozilla
+ is an example of this)
+
+
+Locking:
+
+ Eventually reuse dirstate objects across locks IFF the dirstate file has not
+ been modified, but will require that we flush/ignore cached stat-hit data
+ because we won't want to restat all files on disk just because a lock was
+ acquired, yet we cannot trust the data after the previous lock was released.
+
+Memory representation::
+
+ vector of all directories, and vector of the childen ?
+ i.e.
+ root_entrie = (direntry for root, [parent_direntries_for_root]),
+ dirblocks = [
+ ('', ['data for achild', 'data for bchild', 'data for cchild'])
+ ('dir', ['achild', 'cchild', 'echild'])
+ ]
+ - single bisect to find N subtrees from a path spec
+ - in-order for serialisation - this is 'dirblock' grouping.
+ - insertion of a file '/a' affects only the '/' child-vector, that is, to
+ insert 10K elements from scratch does not generates O(N^2) memoves of a
+ single vector, rather each individual, which tends to be limited to a
+ manageable number. Will scale badly on trees with 10K entries in a
+ single directory. compare with Inventory.InventoryDirectory which has
+ a dictionary for the children. No bisect capability, can only probe for
+ exact matches, or grab all elements and sort.
+ - What's the risk of error here? Once we have the base format being processed
+ we should have a net win regardless of optimality. So we are going to
+ go with what seems reasonable.
+
+open questions:
+
+Maybe we should do a test profile of the core structure - 10K simulated
+searches/lookups/etc?
+
+Objects for each row?
+The lifetime of Dirstate objects is current per lock, but see above for
+possible extensions. The lifetime of a row from a dirstate is expected to be
+very short in the optimistic case: which we are optimising for. For instance,
+subtree status will determine from analysis of the disk data what rows need to
+be examined at all, and will be able to determine from a single row whether
+that file has altered or not, so we are aiming to process tens of thousands of
+entries each second within the dirstate context, before exposing anything to
+the larger codebase. This suggests we want the time for a single file
+comparison to be < 0.1 milliseconds. That would give us 10000 paths per second
+processed, and to scale to 100 thousand we'll another order of magnitude to do
+that. Now, as the lifetime for all unchanged entries is the time to parse, stat
+the file on disk, and then immediately discard, the overhead of object creation
+becomes a significant cost.
+
+Figures: Creating a tuple from 3 elements was profiled at 0.0625
+microseconds, whereas creating a object which is subclassed from tuple was
+0.500 microseconds, and creating an object with 3 elements and slots was 3
+microseconds long. 0.1 milliseconds is 100 microseconds, and ideally we'll get
+down to 10 microseconds for the total processing - having 33% of that be object
+creation is a huge overhead. There is a potential cost in using tuples within
+each row which is that the conditional code to do comparisons may be slower
+than method invocation, but method invocation is known to be slow due to stack
+frame creation, so avoiding methods in these tight inner loops in unfortunately
+desirable. We can consider a pyrex version of this with objects in future if
+desired.
+
+"""
+
+from __future__ import absolute_import
+
+import bisect
+import errno
+import operator
+import os
+from stat import S_IEXEC
+import stat
+import sys
+import time
+import zlib
+
+from bzrlib import (
+ cache_utf8,
+ config,
+ debug,
+ errors,
+ inventory,
+ lock,
+ osutils,
+ static_tuple,
+ trace,
+ urlutils,
+ )
+
+
+# This is the Windows equivalent of ENOTDIR
+# It is defined in pywin32.winerror, but we don't want a strong dependency for
+# just an error code.
+ERROR_PATH_NOT_FOUND = 3
+ERROR_DIRECTORY = 267
+
+
+class SHA1Provider(object):
+ """An interface for getting sha1s of a file."""
+
+ def sha1(self, abspath):
+ """Return the sha1 of a file given its absolute path.
+
+ :param abspath: May be a filesystem encoded absolute path
+ or a unicode path.
+ """
+ raise NotImplementedError(self.sha1)
+
+ def stat_and_sha1(self, abspath):
+ """Return the stat and sha1 of a file given its absolute path.
+
+ :param abspath: May be a filesystem encoded absolute path
+ or a unicode path.
+
+ Note: the stat should be the stat of the physical file
+ while the sha may be the sha of its canonical content.
+ """
+ raise NotImplementedError(self.stat_and_sha1)
+
+
+class DefaultSHA1Provider(SHA1Provider):
+ """A SHA1Provider that reads directly from the filesystem."""
+
+ def sha1(self, abspath):
+ """Return the sha1 of a file given its absolute path."""
+ return osutils.sha_file_by_name(abspath)
+
+ def stat_and_sha1(self, abspath):
+ """Return the stat and sha1 of a file given its absolute path."""
+ file_obj = file(abspath, 'rb')
+ try:
+ statvalue = os.fstat(file_obj.fileno())
+ sha1 = osutils.sha_file(file_obj)
+ finally:
+ file_obj.close()
+ return statvalue, sha1
+
+
+class DirState(object):
+ """Record directory and metadata state for fast access.
+
+ A dirstate is a specialised data structure for managing local working
+ tree state information. Its not yet well defined whether it is platform
+ specific, and if it is how we detect/parameterize that.
+
+ Dirstates use the usual lock_write, lock_read and unlock mechanisms.
+ Unlike most bzr disk formats, DirStates must be locked for reading, using
+ lock_read. (This is an os file lock internally.) This is necessary
+ because the file can be rewritten in place.
+
+ DirStates must be explicitly written with save() to commit changes; just
+ unlocking them does not write the changes to disk.
+ """
+
+ _kind_to_minikind = {
+ 'absent': 'a',
+ 'file': 'f',
+ 'directory': 'd',
+ 'relocated': 'r',
+ 'symlink': 'l',
+ 'tree-reference': 't',
+ }
+ _minikind_to_kind = {
+ 'a': 'absent',
+ 'f': 'file',
+ 'd': 'directory',
+ 'l':'symlink',
+ 'r': 'relocated',
+ 't': 'tree-reference',
+ }
+ _stat_to_minikind = {
+ stat.S_IFDIR:'d',
+ stat.S_IFREG:'f',
+ stat.S_IFLNK:'l',
+ }
+ _to_yesno = {True:'y', False: 'n'} # TODO profile the performance gain
+ # of using int conversion rather than a dict here. AND BLAME ANDREW IF
+ # it is faster.
+
+ # TODO: jam 20070221 Figure out what to do if we have a record that exceeds
+ # the BISECT_PAGE_SIZE. For now, we just have to make it large enough
+ # that we are sure a single record will always fit.
+ BISECT_PAGE_SIZE = 4096
+
+ NOT_IN_MEMORY = 0
+ IN_MEMORY_UNMODIFIED = 1
+ IN_MEMORY_MODIFIED = 2
+ IN_MEMORY_HASH_MODIFIED = 3 # Only hash-cache updates
+
+ # A pack_stat (the x's) that is just noise and will never match the output
+ # of base64 encode.
+ NULLSTAT = 'x' * 32
+ NULL_PARENT_DETAILS = static_tuple.StaticTuple('a', '', 0, False, '')
+
+ HEADER_FORMAT_2 = '#bazaar dirstate flat format 2\n'
+ HEADER_FORMAT_3 = '#bazaar dirstate flat format 3\n'
+
+ def __init__(self, path, sha1_provider, worth_saving_limit=0):
+ """Create a DirState object.
+
+ :param path: The path at which the dirstate file on disk should live.
+ :param sha1_provider: an object meeting the SHA1Provider interface.
+ :param worth_saving_limit: when the exact number of hash changed
+ entries is known, only bother saving the dirstate if more than
+ this count of entries have changed.
+ -1 means never save hash changes, 0 means always save hash changes.
+ """
+ # _header_state and _dirblock_state represent the current state
+ # of the dirstate metadata and the per-row data respectiely.
+ # NOT_IN_MEMORY indicates that no data is in memory
+ # IN_MEMORY_UNMODIFIED indicates that what we have in memory
+ # is the same as is on disk
+ # IN_MEMORY_MODIFIED indicates that we have a modified version
+ # of what is on disk.
+ # In future we will add more granularity, for instance _dirblock_state
+ # will probably support partially-in-memory as a separate variable,
+ # allowing for partially-in-memory unmodified and partially-in-memory
+ # modified states.
+ self._header_state = DirState.NOT_IN_MEMORY
+ self._dirblock_state = DirState.NOT_IN_MEMORY
+ # If true, an error has been detected while updating the dirstate, and
+ # for safety we're not going to commit to disk.
+ self._changes_aborted = False
+ self._dirblocks = []
+ self._ghosts = []
+ self._parents = []
+ self._state_file = None
+ self._filename = path
+ self._lock_token = None
+ self._lock_state = None
+ self._id_index = None
+ # a map from packed_stat to sha's.
+ self._packed_stat_index = None
+ self._end_of_header = None
+ self._cutoff_time = None
+ self._split_path_cache = {}
+ self._bisect_page_size = DirState.BISECT_PAGE_SIZE
+ self._sha1_provider = sha1_provider
+ if 'hashcache' in debug.debug_flags:
+ self._sha1_file = self._sha1_file_and_mutter
+ else:
+ self._sha1_file = self._sha1_provider.sha1
+ # These two attributes provide a simple cache for lookups into the
+ # dirstate in-memory vectors. By probing respectively for the last
+ # block, and for the next entry, we save nearly 2 bisections per path
+ # during commit.
+ self._last_block_index = None
+ self._last_entry_index = None
+ # The set of known hash changes
+ self._known_hash_changes = set()
+ # How many hash changed entries can we have without saving
+ self._worth_saving_limit = worth_saving_limit
+ self._config_stack = config.LocationStack(urlutils.local_path_to_url(
+ path))
+
+ def __repr__(self):
+ return "%s(%r)" % \
+ (self.__class__.__name__, self._filename)
+
+ def _mark_modified(self, hash_changed_entries=None, header_modified=False):
+ """Mark this dirstate as modified.
+
+ :param hash_changed_entries: if non-None, mark just these entries as
+ having their hash modified.
+ :param header_modified: mark the header modified as well, not just the
+ dirblocks.
+ """
+ #trace.mutter_callsite(3, "modified hash entries: %s", hash_changed_entries)
+ if hash_changed_entries:
+ self._known_hash_changes.update([e[0] for e in hash_changed_entries])
+ if self._dirblock_state in (DirState.NOT_IN_MEMORY,
+ DirState.IN_MEMORY_UNMODIFIED):
+ # If the dirstate is already marked a IN_MEMORY_MODIFIED, then
+ # that takes precedence.
+ self._dirblock_state = DirState.IN_MEMORY_HASH_MODIFIED
+ else:
+ # TODO: Since we now have a IN_MEMORY_HASH_MODIFIED state, we
+ # should fail noisily if someone tries to set
+ # IN_MEMORY_MODIFIED but we don't have a write-lock!
+ # We don't know exactly what changed so disable smart saving
+ self._dirblock_state = DirState.IN_MEMORY_MODIFIED
+ if header_modified:
+ self._header_state = DirState.IN_MEMORY_MODIFIED
+
+ def _mark_unmodified(self):
+ """Mark this dirstate as unmodified."""
+ self._header_state = DirState.IN_MEMORY_UNMODIFIED
+ self._dirblock_state = DirState.IN_MEMORY_UNMODIFIED
+ self._known_hash_changes = set()
+
+ def add(self, path, file_id, kind, stat, fingerprint):
+ """Add a path to be tracked.
+
+ :param path: The path within the dirstate - '' is the root, 'foo' is the
+ path foo within the root, 'foo/bar' is the path bar within foo
+ within the root.
+ :param file_id: The file id of the path being added.
+ :param kind: The kind of the path, as a string like 'file',
+ 'directory', etc.
+ :param stat: The output of os.lstat for the path.
+ :param fingerprint: The sha value of the file's canonical form (i.e.
+ after any read filters have been applied),
+ or the target of a symlink,
+ or the referenced revision id for tree-references,
+ or '' for directories.
+ """
+ # adding a file:
+ # find the block its in.
+ # find the location in the block.
+ # check its not there
+ # add it.
+ #------- copied from inventory.ensure_normalized_name - keep synced.
+ # --- normalized_filename wants a unicode basename only, so get one.
+ dirname, basename = osutils.split(path)
+ # we dont import normalized_filename directly because we want to be
+ # able to change the implementation at runtime for tests.
+ norm_name, can_access = osutils.normalized_filename(basename)
+ if norm_name != basename:
+ if can_access:
+ basename = norm_name
+ else:
+ raise errors.InvalidNormalization(path)
+ # you should never have files called . or ..; just add the directory
+ # in the parent, or according to the special treatment for the root
+ if basename == '.' or basename == '..':
+ raise errors.InvalidEntryName(path)
+ # now that we've normalised, we need the correct utf8 path and
+ # dirname and basename elements. This single encode and split should be
+ # faster than three separate encodes.
+ utf8path = (dirname + '/' + basename).strip('/').encode('utf8')
+ dirname, basename = osutils.split(utf8path)
+ # uses __class__ for speed; the check is needed for safety
+ if file_id.__class__ is not str:
+ raise AssertionError(
+ "must be a utf8 file_id not %s" % (type(file_id), ))
+ # Make sure the file_id does not exist in this tree
+ rename_from = None
+ file_id_entry = self._get_entry(0, fileid_utf8=file_id, include_deleted=True)
+ if file_id_entry != (None, None):
+ if file_id_entry[1][0][0] == 'a':
+ if file_id_entry[0] != (dirname, basename, file_id):
+ # set the old name's current operation to rename
+ self.update_minimal(file_id_entry[0],
+ 'r',
+ path_utf8='',
+ packed_stat='',
+ fingerprint=utf8path
+ )
+ rename_from = file_id_entry[0][0:2]
+ else:
+ path = osutils.pathjoin(file_id_entry[0][0], file_id_entry[0][1])
+ kind = DirState._minikind_to_kind[file_id_entry[1][0][0]]
+ info = '%s:%s' % (kind, path)
+ raise errors.DuplicateFileId(file_id, info)
+ first_key = (dirname, basename, '')
+ block_index, present = self._find_block_index_from_key(first_key)
+ if present:
+ # check the path is not in the tree
+ block = self._dirblocks[block_index][1]
+ entry_index, _ = self._find_entry_index(first_key, block)
+ while (entry_index < len(block) and
+ block[entry_index][0][0:2] == first_key[0:2]):
+ if block[entry_index][1][0][0] not in 'ar':
+ # this path is in the dirstate in the current tree.
+ raise Exception, "adding already added path!"
+ entry_index += 1
+ else:
+ # The block where we want to put the file is not present. But it
+ # might be because the directory was empty, or not loaded yet. Look
+ # for a parent entry, if not found, raise NotVersionedError
+ parent_dir, parent_base = osutils.split(dirname)
+ parent_block_idx, parent_entry_idx, _, parent_present = \
+ self._get_block_entry_index(parent_dir, parent_base, 0)
+ if not parent_present:
+ raise errors.NotVersionedError(path, str(self))
+ self._ensure_block(parent_block_idx, parent_entry_idx, dirname)
+ block = self._dirblocks[block_index][1]
+ entry_key = (dirname, basename, file_id)
+ if stat is None:
+ size = 0
+ packed_stat = DirState.NULLSTAT
+ else:
+ size = stat.st_size
+ packed_stat = pack_stat(stat)
+ parent_info = self._empty_parent_info()
+ minikind = DirState._kind_to_minikind[kind]
+ if rename_from is not None:
+ if rename_from[0]:
+ old_path_utf8 = '%s/%s' % rename_from
+ else:
+ old_path_utf8 = rename_from[1]
+ parent_info[0] = ('r', old_path_utf8, 0, False, '')
+ if kind == 'file':
+ entry_data = entry_key, [
+ (minikind, fingerprint, size, False, packed_stat),
+ ] + parent_info
+ elif kind == 'directory':
+ entry_data = entry_key, [
+ (minikind, '', 0, False, packed_stat),
+ ] + parent_info
+ elif kind == 'symlink':
+ entry_data = entry_key, [
+ (minikind, fingerprint, size, False, packed_stat),
+ ] + parent_info
+ elif kind == 'tree-reference':
+ entry_data = entry_key, [
+ (minikind, fingerprint, 0, False, packed_stat),
+ ] + parent_info
+ else:
+ raise errors.BzrError('unknown kind %r' % kind)
+ entry_index, present = self._find_entry_index(entry_key, block)
+ if not present:
+ block.insert(entry_index, entry_data)
+ else:
+ if block[entry_index][1][0][0] != 'a':
+ raise AssertionError(" %r(%r) already added" % (basename, file_id))
+ block[entry_index][1][0] = entry_data[1][0]
+
+ if kind == 'directory':
+ # insert a new dirblock
+ self._ensure_block(block_index, entry_index, utf8path)
+ self._mark_modified()
+ if self._id_index:
+ self._add_to_id_index(self._id_index, entry_key)
+
+ def _bisect(self, paths):
+ """Bisect through the disk structure for specific rows.
+
+ :param paths: A list of paths to find
+ :return: A dict mapping path => entries for found entries. Missing
+ entries will not be in the map.
+ The list is not sorted, and entries will be populated
+ based on when they were read.
+ """
+ self._requires_lock()
+ # We need the file pointer to be right after the initial header block
+ self._read_header_if_needed()
+ # If _dirblock_state was in memory, we should just return info from
+ # there, this function is only meant to handle when we want to read
+ # part of the disk.
+ if self._dirblock_state != DirState.NOT_IN_MEMORY:
+ raise AssertionError("bad dirblock state %r" % self._dirblock_state)
+
+ # The disk representation is generally info + '\0\n\0' at the end. But
+ # for bisecting, it is easier to treat this as '\0' + info + '\0\n'
+ # Because it means we can sync on the '\n'
+ state_file = self._state_file
+ file_size = os.fstat(state_file.fileno()).st_size
+ # We end up with 2 extra fields, we should have a trailing '\n' to
+ # ensure that we read the whole record, and we should have a precursur
+ # '' which ensures that we start after the previous '\n'
+ entry_field_count = self._fields_per_entry() + 1
+
+ low = self._end_of_header
+ high = file_size - 1 # Ignore the final '\0'
+ # Map from (dir, name) => entry
+ found = {}
+
+ # Avoid infinite seeking
+ max_count = 30*len(paths)
+ count = 0
+ # pending is a list of places to look.
+ # each entry is a tuple of low, high, dir_names
+ # low -> the first byte offset to read (inclusive)
+ # high -> the last byte offset (inclusive)
+ # dir_names -> The list of (dir, name) pairs that should be found in
+ # the [low, high] range
+ pending = [(low, high, paths)]
+
+ page_size = self._bisect_page_size
+
+ fields_to_entry = self._get_fields_to_entry()
+
+ while pending:
+ low, high, cur_files = pending.pop()
+
+ if not cur_files or low >= high:
+ # Nothing to find
+ continue
+
+ count += 1
+ if count > max_count:
+ raise errors.BzrError('Too many seeks, most likely a bug.')
+
+ mid = max(low, (low+high-page_size)/2)
+
+ state_file.seek(mid)
+ # limit the read size, so we don't end up reading data that we have
+ # already read.
+ read_size = min(page_size, (high-mid)+1)
+ block = state_file.read(read_size)
+
+ start = mid
+ entries = block.split('\n')
+
+ if len(entries) < 2:
+ # We didn't find a '\n', so we cannot have found any records.
+ # So put this range back and try again. But we know we have to
+ # increase the page size, because a single read did not contain
+ # a record break (so records must be larger than page_size)
+ page_size *= 2
+ pending.append((low, high, cur_files))
+ continue
+
+ # Check the first and last entries, in case they are partial, or if
+ # we don't care about the rest of this page
+ first_entry_num = 0
+ first_fields = entries[0].split('\0')
+ if len(first_fields) < entry_field_count:
+ # We didn't get the complete first entry
+ # so move start, and grab the next, which
+ # should be a full entry
+ start += len(entries[0])+1
+ first_fields = entries[1].split('\0')
+ first_entry_num = 1
+
+ if len(first_fields) <= 2:
+ # We didn't even get a filename here... what do we do?
+ # Try a large page size and repeat this query
+ page_size *= 2
+ pending.append((low, high, cur_files))
+ continue
+ else:
+ # Find what entries we are looking for, which occur before and
+ # after this first record.
+ after = start
+ if first_fields[1]:
+ first_path = first_fields[1] + '/' + first_fields[2]
+ else:
+ first_path = first_fields[2]
+ first_loc = _bisect_path_left(cur_files, first_path)
+
+ # These exist before the current location
+ pre = cur_files[:first_loc]
+ # These occur after the current location, which may be in the
+ # data we read, or might be after the last entry
+ post = cur_files[first_loc:]
+
+ if post and len(first_fields) >= entry_field_count:
+ # We have files after the first entry
+
+ # Parse the last entry
+ last_entry_num = len(entries)-1
+ last_fields = entries[last_entry_num].split('\0')
+ if len(last_fields) < entry_field_count:
+ # The very last hunk was not complete,
+ # read the previous hunk
+ after = mid + len(block) - len(entries[-1])
+ last_entry_num -= 1
+ last_fields = entries[last_entry_num].split('\0')
+ else:
+ after = mid + len(block)
+
+ if last_fields[1]:
+ last_path = last_fields[1] + '/' + last_fields[2]
+ else:
+ last_path = last_fields[2]
+ last_loc = _bisect_path_right(post, last_path)
+
+ middle_files = post[:last_loc]
+ post = post[last_loc:]
+
+ if middle_files:
+ # We have files that should occur in this block
+ # (>= first, <= last)
+ # Either we will find them here, or we can mark them as
+ # missing.
+
+ if middle_files[0] == first_path:
+ # We might need to go before this location
+ pre.append(first_path)
+ if middle_files[-1] == last_path:
+ post.insert(0, last_path)
+
+ # Find out what paths we have
+ paths = {first_path:[first_fields]}
+ # last_path might == first_path so we need to be
+ # careful if we should append rather than overwrite
+ if last_entry_num != first_entry_num:
+ paths.setdefault(last_path, []).append(last_fields)
+ for num in xrange(first_entry_num+1, last_entry_num):
+ # TODO: jam 20070223 We are already splitting here, so
+ # shouldn't we just split the whole thing rather
+ # than doing the split again in add_one_record?
+ fields = entries[num].split('\0')
+ if fields[1]:
+ path = fields[1] + '/' + fields[2]
+ else:
+ path = fields[2]
+ paths.setdefault(path, []).append(fields)
+
+ for path in middle_files:
+ for fields in paths.get(path, []):
+ # offset by 1 because of the opening '\0'
+ # consider changing fields_to_entry to avoid the
+ # extra list slice
+ entry = fields_to_entry(fields[1:])
+ found.setdefault(path, []).append(entry)
+
+ # Now we have split up everything into pre, middle, and post, and
+ # we have handled everything that fell in 'middle'.
+ # We add 'post' first, so that we prefer to seek towards the
+ # beginning, so that we will tend to go as early as we need, and
+ # then only seek forward after that.
+ if post:
+ pending.append((after, high, post))
+ if pre:
+ pending.append((low, start-1, pre))
+
+ # Consider that we may want to return the directory entries in sorted
+ # order. For now, we just return them in whatever order we found them,
+ # and leave it up to the caller if they care if it is ordered or not.
+ return found
+
+ def _bisect_dirblocks(self, dir_list):
+ """Bisect through the disk structure to find entries in given dirs.
+
+ _bisect_dirblocks is meant to find the contents of directories, which
+ differs from _bisect, which only finds individual entries.
+
+ :param dir_list: A sorted list of directory names ['', 'dir', 'foo'].
+ :return: A map from dir => entries_for_dir
+ """
+ # TODO: jam 20070223 A lot of the bisecting logic could be shared
+ # between this and _bisect. It would require parameterizing the
+ # inner loop with a function, though. We should evaluate the
+ # performance difference.
+ self._requires_lock()
+ # We need the file pointer to be right after the initial header block
+ self._read_header_if_needed()
+ # If _dirblock_state was in memory, we should just return info from
+ # there, this function is only meant to handle when we want to read
+ # part of the disk.
+ if self._dirblock_state != DirState.NOT_IN_MEMORY:
+ raise AssertionError("bad dirblock state %r" % self._dirblock_state)
+ # The disk representation is generally info + '\0\n\0' at the end. But
+ # for bisecting, it is easier to treat this as '\0' + info + '\0\n'
+ # Because it means we can sync on the '\n'
+ state_file = self._state_file
+ file_size = os.fstat(state_file.fileno()).st_size
+ # We end up with 2 extra fields, we should have a trailing '\n' to
+ # ensure that we read the whole record, and we should have a precursur
+ # '' which ensures that we start after the previous '\n'
+ entry_field_count = self._fields_per_entry() + 1
+
+ low = self._end_of_header
+ high = file_size - 1 # Ignore the final '\0'
+ # Map from dir => entry
+ found = {}
+
+ # Avoid infinite seeking
+ max_count = 30*len(dir_list)
+ count = 0
+ # pending is a list of places to look.
+ # each entry is a tuple of low, high, dir_names
+ # low -> the first byte offset to read (inclusive)
+ # high -> the last byte offset (inclusive)
+ # dirs -> The list of directories that should be found in
+ # the [low, high] range
+ pending = [(low, high, dir_list)]
+
+ page_size = self._bisect_page_size
+
+ fields_to_entry = self._get_fields_to_entry()
+
+ while pending:
+ low, high, cur_dirs = pending.pop()
+
+ if not cur_dirs or low >= high:
+ # Nothing to find
+ continue
+
+ count += 1
+ if count > max_count:
+ raise errors.BzrError('Too many seeks, most likely a bug.')
+
+ mid = max(low, (low+high-page_size)/2)
+
+ state_file.seek(mid)
+ # limit the read size, so we don't end up reading data that we have
+ # already read.
+ read_size = min(page_size, (high-mid)+1)
+ block = state_file.read(read_size)
+
+ start = mid
+ entries = block.split('\n')
+
+ if len(entries) < 2:
+ # We didn't find a '\n', so we cannot have found any records.
+ # So put this range back and try again. But we know we have to
+ # increase the page size, because a single read did not contain
+ # a record break (so records must be larger than page_size)
+ page_size *= 2
+ pending.append((low, high, cur_dirs))
+ continue
+
+ # Check the first and last entries, in case they are partial, or if
+ # we don't care about the rest of this page
+ first_entry_num = 0
+ first_fields = entries[0].split('\0')
+ if len(first_fields) < entry_field_count:
+ # We didn't get the complete first entry
+ # so move start, and grab the next, which
+ # should be a full entry
+ start += len(entries[0])+1
+ first_fields = entries[1].split('\0')
+ first_entry_num = 1
+
+ if len(first_fields) <= 1:
+ # We didn't even get a dirname here... what do we do?
+ # Try a large page size and repeat this query
+ page_size *= 2
+ pending.append((low, high, cur_dirs))
+ continue
+ else:
+ # Find what entries we are looking for, which occur before and
+ # after this first record.
+ after = start
+ first_dir = first_fields[1]
+ first_loc = bisect.bisect_left(cur_dirs, first_dir)
+
+ # These exist before the current location
+ pre = cur_dirs[:first_loc]
+ # These occur after the current location, which may be in the
+ # data we read, or might be after the last entry
+ post = cur_dirs[first_loc:]
+
+ if post and len(first_fields) >= entry_field_count:
+ # We have records to look at after the first entry
+
+ # Parse the last entry
+ last_entry_num = len(entries)-1
+ last_fields = entries[last_entry_num].split('\0')
+ if len(last_fields) < entry_field_count:
+ # The very last hunk was not complete,
+ # read the previous hunk
+ after = mid + len(block) - len(entries[-1])
+ last_entry_num -= 1
+ last_fields = entries[last_entry_num].split('\0')
+ else:
+ after = mid + len(block)
+
+ last_dir = last_fields[1]
+ last_loc = bisect.bisect_right(post, last_dir)
+
+ middle_files = post[:last_loc]
+ post = post[last_loc:]
+
+ if middle_files:
+ # We have files that should occur in this block
+ # (>= first, <= last)
+ # Either we will find them here, or we can mark them as
+ # missing.
+
+ if middle_files[0] == first_dir:
+ # We might need to go before this location
+ pre.append(first_dir)
+ if middle_files[-1] == last_dir:
+ post.insert(0, last_dir)
+
+ # Find out what paths we have
+ paths = {first_dir:[first_fields]}
+ # last_dir might == first_dir so we need to be
+ # careful if we should append rather than overwrite
+ if last_entry_num != first_entry_num:
+ paths.setdefault(last_dir, []).append(last_fields)
+ for num in xrange(first_entry_num+1, last_entry_num):
+ # TODO: jam 20070223 We are already splitting here, so
+ # shouldn't we just split the whole thing rather
+ # than doing the split again in add_one_record?
+ fields = entries[num].split('\0')
+ paths.setdefault(fields[1], []).append(fields)
+
+ for cur_dir in middle_files:
+ for fields in paths.get(cur_dir, []):
+ # offset by 1 because of the opening '\0'
+ # consider changing fields_to_entry to avoid the
+ # extra list slice
+ entry = fields_to_entry(fields[1:])
+ found.setdefault(cur_dir, []).append(entry)
+
+ # Now we have split up everything into pre, middle, and post, and
+ # we have handled everything that fell in 'middle'.
+ # We add 'post' first, so that we prefer to seek towards the
+ # beginning, so that we will tend to go as early as we need, and
+ # then only seek forward after that.
+ if post:
+ pending.append((after, high, post))
+ if pre:
+ pending.append((low, start-1, pre))
+
+ return found
+
+ def _bisect_recursive(self, paths):
+ """Bisect for entries for all paths and their children.
+
+ This will use bisect to find all records for the supplied paths. It
+ will then continue to bisect for any records which are marked as
+ directories. (and renames?)
+
+ :param paths: A sorted list of (dir, name) pairs
+ eg: [('', 'a'), ('', 'f'), ('a/b', 'c')]
+ :return: A dictionary mapping (dir, name, file_id) => [tree_info]
+ """
+ # Map from (dir, name, file_id) => [tree_info]
+ found = {}
+
+ found_dir_names = set()
+
+ # Directories that have been read
+ processed_dirs = set()
+ # Get the ball rolling with the first bisect for all entries.
+ newly_found = self._bisect(paths)
+
+ while newly_found:
+ # Directories that need to be read
+ pending_dirs = set()
+ paths_to_search = set()
+ for entry_list in newly_found.itervalues():
+ for dir_name_id, trees_info in entry_list:
+ found[dir_name_id] = trees_info
+ found_dir_names.add(dir_name_id[:2])
+ is_dir = False
+ for tree_info in trees_info:
+ minikind = tree_info[0]
+ if minikind == 'd':
+ if is_dir:
+ # We already processed this one as a directory,
+ # we don't need to do the extra work again.
+ continue
+ subdir, name, file_id = dir_name_id
+ path = osutils.pathjoin(subdir, name)
+ is_dir = True
+ if path not in processed_dirs:
+ pending_dirs.add(path)
+ elif minikind == 'r':
+ # Rename, we need to directly search the target
+ # which is contained in the fingerprint column
+ dir_name = osutils.split(tree_info[1])
+ if dir_name[0] in pending_dirs:
+ # This entry will be found in the dir search
+ continue
+ if dir_name not in found_dir_names:
+ paths_to_search.add(tree_info[1])
+ # Now we have a list of paths to look for directly, and
+ # directory blocks that need to be read.
+ # newly_found is mixing the keys between (dir, name) and path
+ # entries, but that is okay, because we only really care about the
+ # targets.
+ newly_found = self._bisect(sorted(paths_to_search))
+ newly_found.update(self._bisect_dirblocks(sorted(pending_dirs)))
+ processed_dirs.update(pending_dirs)
+ return found
+
+ def _discard_merge_parents(self):
+ """Discard any parents trees beyond the first.
+
+ Note that if this fails the dirstate is corrupted.
+
+ After this function returns the dirstate contains 2 trees, neither of
+ which are ghosted.
+ """
+ self._read_header_if_needed()
+ parents = self.get_parent_ids()
+ if len(parents) < 1:
+ return
+ # only require all dirblocks if we are doing a full-pass removal.
+ self._read_dirblocks_if_needed()
+ dead_patterns = set([('a', 'r'), ('a', 'a'), ('r', 'r'), ('r', 'a')])
+ def iter_entries_removable():
+ for block in self._dirblocks:
+ deleted_positions = []
+ for pos, entry in enumerate(block[1]):
+ yield entry
+ if (entry[1][0][0], entry[1][1][0]) in dead_patterns:
+ deleted_positions.append(pos)
+ if deleted_positions:
+ if len(deleted_positions) == len(block[1]):
+ del block[1][:]
+ else:
+ for pos in reversed(deleted_positions):
+ del block[1][pos]
+ # if the first parent is a ghost:
+ if parents[0] in self.get_ghosts():
+ empty_parent = [DirState.NULL_PARENT_DETAILS]
+ for entry in iter_entries_removable():
+ entry[1][1:] = empty_parent
+ else:
+ for entry in iter_entries_removable():
+ del entry[1][2:]
+
+ self._ghosts = []
+ self._parents = [parents[0]]
+ self._mark_modified(header_modified=True)
+
+ def _empty_parent_info(self):
+ return [DirState.NULL_PARENT_DETAILS] * (len(self._parents) -
+ len(self._ghosts))
+
+ def _ensure_block(self, parent_block_index, parent_row_index, dirname):
+ """Ensure a block for dirname exists.
+
+ This function exists to let callers which know that there is a
+ directory dirname ensure that the block for it exists. This block can
+ fail to exist because of demand loading, or because a directory had no
+ children. In either case it is not an error. It is however an error to
+ call this if there is no parent entry for the directory, and thus the
+ function requires the coordinates of such an entry to be provided.
+
+ The root row is special cased and can be indicated with a parent block
+ and row index of -1
+
+ :param parent_block_index: The index of the block in which dirname's row
+ exists.
+ :param parent_row_index: The index in the parent block where the row
+ exists.
+ :param dirname: The utf8 dirname to ensure there is a block for.
+ :return: The index for the block.
+ """
+ if dirname == '' and parent_row_index == 0 and parent_block_index == 0:
+ # This is the signature of the root row, and the
+ # contents-of-root row is always index 1
+ return 1
+ # the basename of the directory must be the end of its full name.
+ if not (parent_block_index == -1 and
+ parent_block_index == -1 and dirname == ''):
+ if not dirname.endswith(
+ self._dirblocks[parent_block_index][1][parent_row_index][0][1]):
+ raise AssertionError("bad dirname %r" % dirname)
+ block_index, present = self._find_block_index_from_key((dirname, '', ''))
+ if not present:
+ ## In future, when doing partial parsing, this should load and
+ # populate the entire block.
+ self._dirblocks.insert(block_index, (dirname, []))
+ return block_index
+
+ def _entries_to_current_state(self, new_entries):
+ """Load new_entries into self.dirblocks.
+
+ Process new_entries into the current state object, making them the active
+ state. The entries are grouped together by directory to form dirblocks.
+
+ :param new_entries: A sorted list of entries. This function does not sort
+ to prevent unneeded overhead when callers have a sorted list already.
+ :return: Nothing.
+ """
+ if new_entries[0][0][0:2] != ('', ''):
+ raise AssertionError(
+ "Missing root row %r" % (new_entries[0][0],))
+ # The two blocks here are deliberate: the root block and the
+ # contents-of-root block.
+ self._dirblocks = [('', []), ('', [])]
+ current_block = self._dirblocks[0][1]
+ current_dirname = ''
+ root_key = ('', '')
+ append_entry = current_block.append
+ for entry in new_entries:
+ if entry[0][0] != current_dirname:
+ # new block - different dirname
+ current_block = []
+ current_dirname = entry[0][0]
+ self._dirblocks.append((current_dirname, current_block))
+ append_entry = current_block.append
+ # append the entry to the current block
+ append_entry(entry)
+ self._split_root_dirblock_into_contents()
+
+ def _split_root_dirblock_into_contents(self):
+ """Split the root dirblocks into root and contents-of-root.
+
+ After parsing by path, we end up with root entries and contents-of-root
+ entries in the same block. This loop splits them out again.
+ """
+ # The above loop leaves the "root block" entries mixed with the
+ # "contents-of-root block". But we don't want an if check on
+ # all entries, so instead we just fix it up here.
+ if self._dirblocks[1] != ('', []):
+ raise ValueError("bad dirblock start %r" % (self._dirblocks[1],))
+ root_block = []
+ contents_of_root_block = []
+ for entry in self._dirblocks[0][1]:
+ if not entry[0][1]: # This is a root entry
+ root_block.append(entry)
+ else:
+ contents_of_root_block.append(entry)
+ self._dirblocks[0] = ('', root_block)
+ self._dirblocks[1] = ('', contents_of_root_block)
+
+ def _entries_for_path(self, path):
+ """Return a list with all the entries that match path for all ids."""
+ dirname, basename = os.path.split(path)
+ key = (dirname, basename, '')
+ block_index, present = self._find_block_index_from_key(key)
+ if not present:
+ # the block which should contain path is absent.
+ return []
+ result = []
+ block = self._dirblocks[block_index][1]
+ entry_index, _ = self._find_entry_index(key, block)
+ # we may need to look at multiple entries at this path: walk while the specific_files match.
+ while (entry_index < len(block) and
+ block[entry_index][0][0:2] == key[0:2]):
+ result.append(block[entry_index])
+ entry_index += 1
+ return result
+
+ def _entry_to_line(self, entry):
+ """Serialize entry to a NULL delimited line ready for _get_output_lines.
+
+ :param entry: An entry_tuple as defined in the module docstring.
+ """
+ entire_entry = list(entry[0])
+ for tree_number, tree_data in enumerate(entry[1]):
+ # (minikind, fingerprint, size, executable, tree_specific_string)
+ entire_entry.extend(tree_data)
+ # 3 for the key, 5 for the fields per tree.
+ tree_offset = 3 + tree_number * 5
+ # minikind
+ entire_entry[tree_offset + 0] = tree_data[0]
+ # size
+ entire_entry[tree_offset + 2] = str(tree_data[2])
+ # executable
+ entire_entry[tree_offset + 3] = DirState._to_yesno[tree_data[3]]
+ return '\0'.join(entire_entry)
+
+ def _fields_per_entry(self):
+ """How many null separated fields should be in each entry row.
+
+ Each line now has an extra '\\n' field which is not used
+ so we just skip over it
+
+ entry size::
+ 3 fields for the key
+ + number of fields per tree_data (5) * tree count
+ + newline
+ """
+ tree_count = 1 + self._num_present_parents()
+ return 3 + 5 * tree_count + 1
+
+ def _find_block(self, key, add_if_missing=False):
+ """Return the block that key should be present in.
+
+ :param key: A dirstate entry key.
+ :return: The block tuple.
+ """
+ block_index, present = self._find_block_index_from_key(key)
+ if not present:
+ if not add_if_missing:
+ # check to see if key is versioned itself - we might want to
+ # add it anyway, because dirs with no entries dont get a
+ # dirblock at parse time.
+ # This is an uncommon branch to take: most dirs have children,
+ # and most code works with versioned paths.
+ parent_base, parent_name = osutils.split(key[0])
+ if not self._get_block_entry_index(parent_base, parent_name, 0)[3]:
+ # some parent path has not been added - its an error to add
+ # this child
+ raise errors.NotVersionedError(key[0:2], str(self))
+ self._dirblocks.insert(block_index, (key[0], []))
+ return self._dirblocks[block_index]
+
+ def _find_block_index_from_key(self, key):
+ """Find the dirblock index for a key.
+
+ :return: The block index, True if the block for the key is present.
+ """
+ if key[0:2] == ('', ''):
+ return 0, True
+ try:
+ if (self._last_block_index is not None and
+ self._dirblocks[self._last_block_index][0] == key[0]):
+ return self._last_block_index, True
+ except IndexError:
+ pass
+ block_index = bisect_dirblock(self._dirblocks, key[0], 1,
+ cache=self._split_path_cache)
+ # _right returns one-past-where-key is so we have to subtract
+ # one to use it. we use _right here because there are two
+ # '' blocks - the root, and the contents of root
+ # we always have a minimum of 2 in self._dirblocks: root and
+ # root-contents, and for '', we get 2 back, so this is
+ # simple and correct:
+ present = (block_index < len(self._dirblocks) and
+ self._dirblocks[block_index][0] == key[0])
+ self._last_block_index = block_index
+ # Reset the entry index cache to the beginning of the block.
+ self._last_entry_index = -1
+ return block_index, present
+
+ def _find_entry_index(self, key, block):
+ """Find the entry index for a key in a block.
+
+ :return: The entry index, True if the entry for the key is present.
+ """
+ len_block = len(block)
+ try:
+ if self._last_entry_index is not None:
+ # mini-bisect here.
+ entry_index = self._last_entry_index + 1
+ # A hit is when the key is after the last slot, and before or
+ # equal to the next slot.
+ if ((entry_index > 0 and block[entry_index - 1][0] < key) and
+ key <= block[entry_index][0]):
+ self._last_entry_index = entry_index
+ present = (block[entry_index][0] == key)
+ return entry_index, present
+ except IndexError:
+ pass
+ entry_index = bisect.bisect_left(block, (key, []))
+ present = (entry_index < len_block and
+ block[entry_index][0] == key)
+ self._last_entry_index = entry_index
+ return entry_index, present
+
+ @staticmethod
+ def from_tree(tree, dir_state_filename, sha1_provider=None):
+ """Create a dirstate from a bzr Tree.
+
+ :param tree: The tree which should provide parent information and
+ inventory ids.
+ :param sha1_provider: an object meeting the SHA1Provider interface.
+ If None, a DefaultSHA1Provider is used.
+ :return: a DirState object which is currently locked for writing.
+ (it was locked by DirState.initialize)
+ """
+ result = DirState.initialize(dir_state_filename,
+ sha1_provider=sha1_provider)
+ try:
+ tree.lock_read()
+ try:
+ parent_ids = tree.get_parent_ids()
+ num_parents = len(parent_ids)
+ parent_trees = []
+ for parent_id in parent_ids:
+ parent_tree = tree.branch.repository.revision_tree(parent_id)
+ parent_trees.append((parent_id, parent_tree))
+ parent_tree.lock_read()
+ result.set_parent_trees(parent_trees, [])
+ result.set_state_from_inventory(tree.root_inventory)
+ finally:
+ for revid, parent_tree in parent_trees:
+ parent_tree.unlock()
+ tree.unlock()
+ except:
+ # The caller won't have a chance to unlock this, so make sure we
+ # cleanup ourselves
+ result.unlock()
+ raise
+ return result
+
+ def _check_delta_is_valid(self, delta):
+ return list(inventory._check_delta_unique_ids(
+ inventory._check_delta_unique_old_paths(
+ inventory._check_delta_unique_new_paths(
+ inventory._check_delta_ids_match_entry(
+ inventory._check_delta_ids_are_valid(
+ inventory._check_delta_new_path_entry_both_or_None(delta)))))))
+
+ def update_by_delta(self, delta):
+ """Apply an inventory delta to the dirstate for tree 0
+
+ This is the workhorse for apply_inventory_delta in dirstate based
+ trees.
+
+ :param delta: An inventory delta. See Inventory.apply_delta for
+ details.
+ """
+ self._read_dirblocks_if_needed()
+ encode = cache_utf8.encode
+ insertions = {}
+ removals = {}
+ # Accumulate parent references (path_utf8, id), to check for parentless
+ # items or items placed under files/links/tree-references. We get
+ # references from every item in the delta that is not a deletion and
+ # is not itself the root.
+ parents = set()
+ # Added ids must not be in the dirstate already. This set holds those
+ # ids.
+ new_ids = set()
+ # This loop transforms the delta to single atomic operations that can
+ # be executed and validated.
+ delta = sorted(self._check_delta_is_valid(delta), reverse=True)
+ for old_path, new_path, file_id, inv_entry in delta:
+ if (file_id in insertions) or (file_id in removals):
+ self._raise_invalid(old_path or new_path, file_id,
+ "repeated file_id")
+ if old_path is not None:
+ old_path = old_path.encode('utf-8')
+ removals[file_id] = old_path
+ else:
+ new_ids.add(file_id)
+ if new_path is not None:
+ if inv_entry is None:
+ self._raise_invalid(new_path, file_id,
+ "new_path with no entry")
+ new_path = new_path.encode('utf-8')
+ dirname_utf8, basename = osutils.split(new_path)
+ if basename:
+ parents.add((dirname_utf8, inv_entry.parent_id))
+ key = (dirname_utf8, basename, file_id)
+ minikind = DirState._kind_to_minikind[inv_entry.kind]
+ if minikind == 't':
+ fingerprint = inv_entry.reference_revision or ''
+ else:
+ fingerprint = ''
+ insertions[file_id] = (key, minikind, inv_entry.executable,
+ fingerprint, new_path)
+ # Transform moves into delete+add pairs
+ if None not in (old_path, new_path):
+ for child in self._iter_child_entries(0, old_path):
+ if child[0][2] in insertions or child[0][2] in removals:
+ continue
+ child_dirname = child[0][0]
+ child_basename = child[0][1]
+ minikind = child[1][0][0]
+ fingerprint = child[1][0][4]
+ executable = child[1][0][3]
+ old_child_path = osutils.pathjoin(child_dirname,
+ child_basename)
+ removals[child[0][2]] = old_child_path
+ child_suffix = child_dirname[len(old_path):]
+ new_child_dirname = (new_path + child_suffix)
+ key = (new_child_dirname, child_basename, child[0][2])
+ new_child_path = osutils.pathjoin(new_child_dirname,
+ child_basename)
+ insertions[child[0][2]] = (key, minikind, executable,
+ fingerprint, new_child_path)
+ self._check_delta_ids_absent(new_ids, delta, 0)
+ try:
+ self._apply_removals(removals.iteritems())
+ self._apply_insertions(insertions.values())
+ # Validate parents
+ self._after_delta_check_parents(parents, 0)
+ except errors.BzrError, e:
+ self._changes_aborted = True
+ if 'integrity error' not in str(e):
+ raise
+ # _get_entry raises BzrError when a request is inconsistent; we
+ # want such errors to be shown as InconsistentDelta - and that
+ # fits the behaviour we trigger.
+ raise errors.InconsistentDeltaDelta(delta,
+ "error from _get_entry. %s" % (e,))
+
+ def _apply_removals(self, removals):
+ for file_id, path in sorted(removals, reverse=True,
+ key=operator.itemgetter(1)):
+ dirname, basename = osutils.split(path)
+ block_i, entry_i, d_present, f_present = \
+ self._get_block_entry_index(dirname, basename, 0)
+ try:
+ entry = self._dirblocks[block_i][1][entry_i]
+ except IndexError:
+ self._raise_invalid(path, file_id,
+ "Wrong path for old path.")
+ if not f_present or entry[1][0][0] in 'ar':
+ self._raise_invalid(path, file_id,
+ "Wrong path for old path.")
+ if file_id != entry[0][2]:
+ self._raise_invalid(path, file_id,
+ "Attempt to remove path has wrong id - found %r."
+ % entry[0][2])
+ self._make_absent(entry)
+ # See if we have a malformed delta: deleting a directory must not
+ # leave crud behind. This increases the number of bisects needed
+ # substantially, but deletion or renames of large numbers of paths
+ # is rare enough it shouldn't be an issue (famous last words?) RBC
+ # 20080730.
+ block_i, entry_i, d_present, f_present = \
+ self._get_block_entry_index(path, '', 0)
+ if d_present:
+ # The dir block is still present in the dirstate; this could
+ # be due to it being in a parent tree, or a corrupt delta.
+ for child_entry in self._dirblocks[block_i][1]:
+ if child_entry[1][0][0] not in ('r', 'a'):
+ self._raise_invalid(path, entry[0][2],
+ "The file id was deleted but its children were "
+ "not deleted.")
+
+ def _apply_insertions(self, adds):
+ try:
+ for key, minikind, executable, fingerprint, path_utf8 in sorted(adds):
+ self.update_minimal(key, minikind, executable, fingerprint,
+ path_utf8=path_utf8)
+ except errors.NotVersionedError:
+ self._raise_invalid(path_utf8.decode('utf8'), key[2],
+ "Missing parent")
+
+ def update_basis_by_delta(self, delta, new_revid):
+ """Update the parents of this tree after a commit.
+
+ This gives the tree one parent, with revision id new_revid. The
+ inventory delta is applied to the current basis tree to generate the
+ inventory for the parent new_revid, and all other parent trees are
+ discarded.
+
+ Note that an exception during the operation of this method will leave
+ the dirstate in a corrupt state where it should not be saved.
+
+ :param new_revid: The new revision id for the trees parent.
+ :param delta: An inventory delta (see apply_inventory_delta) describing
+ the changes from the current left most parent revision to new_revid.
+ """
+ self._read_dirblocks_if_needed()
+ self._discard_merge_parents()
+ if self._ghosts != []:
+ raise NotImplementedError(self.update_basis_by_delta)
+ if len(self._parents) == 0:
+ # setup a blank tree, the most simple way.
+ empty_parent = DirState.NULL_PARENT_DETAILS
+ for entry in self._iter_entries():
+ entry[1].append(empty_parent)
+ self._parents.append(new_revid)
+
+ self._parents[0] = new_revid
+
+ delta = sorted(self._check_delta_is_valid(delta), reverse=True)
+ adds = []
+ changes = []
+ deletes = []
+ # The paths this function accepts are unicode and must be encoded as we
+ # go.
+ encode = cache_utf8.encode
+ inv_to_entry = self._inv_entry_to_details
+ # delta is now (deletes, changes), (adds) in reverse lexographical
+ # order.
+ # deletes in reverse lexographic order are safe to process in situ.
+ # renames are not, as a rename from any path could go to a path
+ # lexographically lower, so we transform renames into delete, add pairs,
+ # expanding them recursively as needed.
+ # At the same time, to reduce interface friction we convert the input
+ # inventory entries to dirstate.
+ root_only = ('', '')
+ # Accumulate parent references (path_utf8, id), to check for parentless
+ # items or items placed under files/links/tree-references. We get
+ # references from every item in the delta that is not a deletion and
+ # is not itself the root.
+ parents = set()
+ # Added ids must not be in the dirstate already. This set holds those
+ # ids.
+ new_ids = set()
+ for old_path, new_path, file_id, inv_entry in delta:
+ if inv_entry is not None and file_id != inv_entry.file_id:
+ self._raise_invalid(new_path, file_id,
+ "mismatched entry file_id %r" % inv_entry)
+ if new_path is None:
+ new_path_utf8 = None
+ else:
+ if inv_entry is None:
+ self._raise_invalid(new_path, file_id,
+ "new_path with no entry")
+ new_path_utf8 = encode(new_path)
+ # note the parent for validation
+ dirname_utf8, basename_utf8 = osutils.split(new_path_utf8)
+ if basename_utf8:
+ parents.add((dirname_utf8, inv_entry.parent_id))
+ if old_path is None:
+ old_path_utf8 = None
+ else:
+ old_path_utf8 = encode(old_path)
+ if old_path is None:
+ adds.append((None, new_path_utf8, file_id,
+ inv_to_entry(inv_entry), True))
+ new_ids.add(file_id)
+ elif new_path is None:
+ deletes.append((old_path_utf8, None, file_id, None, True))
+ elif (old_path, new_path) == root_only:
+ # change things in-place
+ # Note: the case of a parent directory changing its file_id
+ # tends to break optimizations here, because officially
+ # the file has actually been moved, it just happens to
+ # end up at the same path. If we can figure out how to
+ # handle that case, we can avoid a lot of add+delete
+ # pairs for objects that stay put.
+ # elif old_path == new_path:
+ changes.append((old_path_utf8, new_path_utf8, file_id,
+ inv_to_entry(inv_entry)))
+ else:
+ # Renames:
+ # Because renames must preserve their children we must have
+ # processed all relocations and removes before hand. The sort
+ # order ensures we've examined the child paths, but we also
+ # have to execute the removals, or the split to an add/delete
+ # pair will result in the deleted item being reinserted, or
+ # renamed items being reinserted twice - and possibly at the
+ # wrong place. Splitting into a delete/add pair also simplifies
+ # the handling of entries with ('f', ...), ('r' ...) because
+ # the target of the 'r' is old_path here, and we add that to
+ # deletes, meaning that the add handler does not need to check
+ # for 'r' items on every pass.
+ self._update_basis_apply_deletes(deletes)
+ deletes = []
+ # Split into an add/delete pair recursively.
+ adds.append((old_path_utf8, new_path_utf8, file_id,
+ inv_to_entry(inv_entry), False))
+ # Expunge deletes that we've seen so that deleted/renamed
+ # children of a rename directory are handled correctly.
+ new_deletes = reversed(list(
+ self._iter_child_entries(1, old_path_utf8)))
+ # Remove the current contents of the tree at orig_path, and
+ # reinsert at the correct new path.
+ for entry in new_deletes:
+ child_dirname, child_basename, child_file_id = entry[0]
+ if child_dirname:
+ source_path = child_dirname + '/' + child_basename
+ else:
+ source_path = child_basename
+ if new_path_utf8:
+ target_path = \
+ new_path_utf8 + source_path[len(old_path_utf8):]
+ else:
+ if old_path_utf8 == '':
+ raise AssertionError("cannot rename directory to"
+ " itself")
+ target_path = source_path[len(old_path_utf8) + 1:]
+ adds.append((None, target_path, entry[0][2], entry[1][1], False))
+ deletes.append(
+ (source_path, target_path, entry[0][2], None, False))
+ deletes.append(
+ (old_path_utf8, new_path_utf8, file_id, None, False))
+
+ self._check_delta_ids_absent(new_ids, delta, 1)
+ try:
+ # Finish expunging deletes/first half of renames.
+ self._update_basis_apply_deletes(deletes)
+ # Reinstate second half of renames and new paths.
+ self._update_basis_apply_adds(adds)
+ # Apply in-situ changes.
+ self._update_basis_apply_changes(changes)
+ # Validate parents
+ self._after_delta_check_parents(parents, 1)
+ except errors.BzrError, e:
+ self._changes_aborted = True
+ if 'integrity error' not in str(e):
+ raise
+ # _get_entry raises BzrError when a request is inconsistent; we
+ # want such errors to be shown as InconsistentDelta - and that
+ # fits the behaviour we trigger.
+ raise errors.InconsistentDeltaDelta(delta,
+ "error from _get_entry. %s" % (e,))
+
+ self._mark_modified(header_modified=True)
+ self._id_index = None
+ return
+
+ def _check_delta_ids_absent(self, new_ids, delta, tree_index):
+ """Check that none of the file_ids in new_ids are present in a tree."""
+ if not new_ids:
+ return
+ id_index = self._get_id_index()
+ for file_id in new_ids:
+ for key in id_index.get(file_id, ()):
+ block_i, entry_i, d_present, f_present = \
+ self._get_block_entry_index(key[0], key[1], tree_index)
+ if not f_present:
+ # In a different tree
+ continue
+ entry = self._dirblocks[block_i][1][entry_i]
+ if entry[0][2] != file_id:
+ # Different file_id, so not what we want.
+ continue
+ self._raise_invalid(("%s/%s" % key[0:2]).decode('utf8'), file_id,
+ "This file_id is new in the delta but already present in "
+ "the target")
+
+ def _raise_invalid(self, path, file_id, reason):
+ self._changes_aborted = True
+ raise errors.InconsistentDelta(path, file_id, reason)
+
+ def _update_basis_apply_adds(self, adds):
+ """Apply a sequence of adds to tree 1 during update_basis_by_delta.
+
+ They may be adds, or renames that have been split into add/delete
+ pairs.
+
+ :param adds: A sequence of adds. Each add is a tuple:
+ (None, new_path_utf8, file_id, (entry_details), real_add). real_add
+ is False when the add is the second half of a remove-and-reinsert
+ pair created to handle renames and deletes.
+ """
+ # Adds are accumulated partly from renames, so can be in any input
+ # order - sort it.
+ # TODO: we may want to sort in dirblocks order. That way each entry
+ # will end up in the same directory, allowing the _get_entry
+ # fast-path for looking up 2 items in the same dir work.
+ adds.sort(key=lambda x: x[1])
+ # adds is now in lexographic order, which places all parents before
+ # their children, so we can process it linearly.
+ absent = 'ar'
+ st = static_tuple.StaticTuple
+ for old_path, new_path, file_id, new_details, real_add in adds:
+ dirname, basename = osutils.split(new_path)
+ entry_key = st(dirname, basename, file_id)
+ block_index, present = self._find_block_index_from_key(entry_key)
+ if not present:
+ self._raise_invalid(new_path, file_id,
+ "Unable to find block for this record."
+ " Was the parent added?")
+ block = self._dirblocks[block_index][1]
+ entry_index, present = self._find_entry_index(entry_key, block)
+ if real_add:
+ if old_path is not None:
+ self._raise_invalid(new_path, file_id,
+ 'considered a real add but still had old_path at %s'
+ % (old_path,))
+ if present:
+ entry = block[entry_index]
+ basis_kind = entry[1][1][0]
+ if basis_kind == 'a':
+ entry[1][1] = new_details
+ elif basis_kind == 'r':
+ raise NotImplementedError()
+ else:
+ self._raise_invalid(new_path, file_id,
+ "An entry was marked as a new add"
+ " but the basis target already existed")
+ else:
+ # The exact key was not found in the block. However, we need to
+ # check if there is a key next to us that would have matched.
+ # We only need to check 2 locations, because there are only 2
+ # trees present.
+ for maybe_index in range(entry_index-1, entry_index+1):
+ if maybe_index < 0 or maybe_index >= len(block):
+ continue
+ maybe_entry = block[maybe_index]
+ if maybe_entry[0][:2] != (dirname, basename):
+ # Just a random neighbor
+ continue
+ if maybe_entry[0][2] == file_id:
+ raise AssertionError(
+ '_find_entry_index didnt find a key match'
+ ' but walking the data did, for %s'
+ % (entry_key,))
+ basis_kind = maybe_entry[1][1][0]
+ if basis_kind not in 'ar':
+ self._raise_invalid(new_path, file_id,
+ "we have an add record for path, but the path"
+ " is already present with another file_id %s"
+ % (maybe_entry[0][2],))
+
+ entry = (entry_key, [DirState.NULL_PARENT_DETAILS,
+ new_details])
+ block.insert(entry_index, entry)
+
+ active_kind = entry[1][0][0]
+ if active_kind == 'a':
+ # The active record shows up as absent, this could be genuine,
+ # or it could be present at some other location. We need to
+ # verify.
+ id_index = self._get_id_index()
+ # The id_index may not be perfectly accurate for tree1, because
+ # we haven't been keeping it updated. However, it should be
+ # fine for tree0, and that gives us enough info for what we
+ # need
+ keys = id_index.get(file_id, ())
+ for key in keys:
+ block_i, entry_i, d_present, f_present = \
+ self._get_block_entry_index(key[0], key[1], 0)
+ if not f_present:
+ continue
+ active_entry = self._dirblocks[block_i][1][entry_i]
+ if (active_entry[0][2] != file_id):
+ # Some other file is at this path, we don't need to
+ # link it.
+ continue
+ real_active_kind = active_entry[1][0][0]
+ if real_active_kind in 'ar':
+ # We found a record, which was not *this* record,
+ # which matches the file_id, but is not actually
+ # present. Something seems *really* wrong.
+ self._raise_invalid(new_path, file_id,
+ "We found a tree0 entry that doesnt make sense")
+ # Now, we've found a tree0 entry which matches the file_id
+ # but is at a different location. So update them to be
+ # rename records.
+ active_dir, active_name = active_entry[0][:2]
+ if active_dir:
+ active_path = active_dir + '/' + active_name
+ else:
+ active_path = active_name
+ active_entry[1][1] = st('r', new_path, 0, False, '')
+ entry[1][0] = st('r', active_path, 0, False, '')
+ elif active_kind == 'r':
+ raise NotImplementedError()
+
+ new_kind = new_details[0]
+ if new_kind == 'd':
+ self._ensure_block(block_index, entry_index, new_path)
+
+ def _update_basis_apply_changes(self, changes):
+ """Apply a sequence of changes to tree 1 during update_basis_by_delta.
+
+ :param adds: A sequence of changes. Each change is a tuple:
+ (path_utf8, path_utf8, file_id, (entry_details))
+ """
+ absent = 'ar'
+ for old_path, new_path, file_id, new_details in changes:
+ # the entry for this file_id must be in tree 0.
+ entry = self._get_entry(1, file_id, new_path)
+ if entry[0] is None or entry[1][1][0] in 'ar':
+ self._raise_invalid(new_path, file_id,
+ 'changed entry considered not present')
+ entry[1][1] = new_details
+
+ def _update_basis_apply_deletes(self, deletes):
+ """Apply a sequence of deletes to tree 1 during update_basis_by_delta.
+
+ They may be deletes, or renames that have been split into add/delete
+ pairs.
+
+ :param deletes: A sequence of deletes. Each delete is a tuple:
+ (old_path_utf8, new_path_utf8, file_id, None, real_delete).
+ real_delete is True when the desired outcome is an actual deletion
+ rather than the rename handling logic temporarily deleting a path
+ during the replacement of a parent.
+ """
+ null = DirState.NULL_PARENT_DETAILS
+ for old_path, new_path, file_id, _, real_delete in deletes:
+ if real_delete != (new_path is None):
+ self._raise_invalid(old_path, file_id, "bad delete delta")
+ # the entry for this file_id must be in tree 1.
+ dirname, basename = osutils.split(old_path)
+ block_index, entry_index, dir_present, file_present = \
+ self._get_block_entry_index(dirname, basename, 1)
+ if not file_present:
+ self._raise_invalid(old_path, file_id,
+ 'basis tree does not contain removed entry')
+ entry = self._dirblocks[block_index][1][entry_index]
+ # The state of the entry in the 'active' WT
+ active_kind = entry[1][0][0]
+ if entry[0][2] != file_id:
+ self._raise_invalid(old_path, file_id,
+ 'mismatched file_id in tree 1')
+ dir_block = ()
+ old_kind = entry[1][1][0]
+ if active_kind in 'ar':
+ # The active tree doesn't have this file_id.
+ # The basis tree is changing this record. If this is a
+ # rename, then we don't want the record here at all
+ # anymore. If it is just an in-place change, we want the
+ # record here, but we'll add it if we need to. So we just
+ # delete it
+ if active_kind == 'r':
+ active_path = entry[1][0][1]
+ active_entry = self._get_entry(0, file_id, active_path)
+ if active_entry[1][1][0] != 'r':
+ self._raise_invalid(old_path, file_id,
+ "Dirstate did not have matching rename entries")
+ elif active_entry[1][0][0] in 'ar':
+ self._raise_invalid(old_path, file_id,
+ "Dirstate had a rename pointing at an inactive"
+ " tree0")
+ active_entry[1][1] = null
+ del self._dirblocks[block_index][1][entry_index]
+ if old_kind == 'd':
+ # This was a directory, and the active tree says it
+ # doesn't exist, and now the basis tree says it doesn't
+ # exist. Remove its dirblock if present
+ (dir_block_index,
+ present) = self._find_block_index_from_key(
+ (old_path, '', ''))
+ if present:
+ dir_block = self._dirblocks[dir_block_index][1]
+ if not dir_block:
+ # This entry is empty, go ahead and just remove it
+ del self._dirblocks[dir_block_index]
+ else:
+ # There is still an active record, so just mark this
+ # removed.
+ entry[1][1] = null
+ block_i, entry_i, d_present, f_present = \
+ self._get_block_entry_index(old_path, '', 1)
+ if d_present:
+ dir_block = self._dirblocks[block_i][1]
+ for child_entry in dir_block:
+ child_basis_kind = child_entry[1][1][0]
+ if child_basis_kind not in 'ar':
+ self._raise_invalid(old_path, file_id,
+ "The file id was deleted but its children were "
+ "not deleted.")
+
+ def _after_delta_check_parents(self, parents, index):
+ """Check that parents required by the delta are all intact.
+
+ :param parents: An iterable of (path_utf8, file_id) tuples which are
+ required to be present in tree 'index' at path_utf8 with id file_id
+ and be a directory.
+ :param index: The column in the dirstate to check for parents in.
+ """
+ for dirname_utf8, file_id in parents:
+ # Get the entry - the ensures that file_id, dirname_utf8 exists and
+ # has the right file id.
+ entry = self._get_entry(index, file_id, dirname_utf8)
+ if entry[1] is None:
+ self._raise_invalid(dirname_utf8.decode('utf8'),
+ file_id, "This parent is not present.")
+ # Parents of things must be directories
+ if entry[1][index][0] != 'd':
+ self._raise_invalid(dirname_utf8.decode('utf8'),
+ file_id, "This parent is not a directory.")
+
+ def _observed_sha1(self, entry, sha1, stat_value,
+ _stat_to_minikind=_stat_to_minikind):
+ """Note the sha1 of a file.
+
+ :param entry: The entry the sha1 is for.
+ :param sha1: The observed sha1.
+ :param stat_value: The os.lstat for the file.
+ """
+ try:
+ minikind = _stat_to_minikind[stat_value.st_mode & 0170000]
+ except KeyError:
+ # Unhandled kind
+ return None
+ if minikind == 'f':
+ if self._cutoff_time is None:
+ self._sha_cutoff_time()
+ if (stat_value.st_mtime < self._cutoff_time
+ and stat_value.st_ctime < self._cutoff_time):
+ entry[1][0] = ('f', sha1, stat_value.st_size, entry[1][0][3],
+ pack_stat(stat_value))
+ self._mark_modified([entry])
+
+ def _sha_cutoff_time(self):
+ """Return cutoff time.
+
+ Files modified more recently than this time are at risk of being
+ undetectably modified and so can't be cached.
+ """
+ # Cache the cutoff time as long as we hold a lock.
+ # time.time() isn't super expensive (approx 3.38us), but
+ # when you call it 50,000 times it adds up.
+ # For comparison, os.lstat() costs 7.2us if it is hot.
+ self._cutoff_time = int(time.time()) - 3
+ return self._cutoff_time
+
+ def _lstat(self, abspath, entry):
+ """Return the os.lstat value for this path."""
+ return os.lstat(abspath)
+
+ def _sha1_file_and_mutter(self, abspath):
+ # when -Dhashcache is turned on, this is monkey-patched in to log
+ # file reads
+ trace.mutter("dirstate sha1 " + abspath)
+ return self._sha1_provider.sha1(abspath)
+
+ def _is_executable(self, mode, old_executable):
+ """Is this file executable?"""
+ return bool(S_IEXEC & mode)
+
+ def _is_executable_win32(self, mode, old_executable):
+ """On win32 the executable bit is stored in the dirstate."""
+ return old_executable
+
+ if sys.platform == 'win32':
+ _is_executable = _is_executable_win32
+
+ def _read_link(self, abspath, old_link):
+ """Read the target of a symlink"""
+ # TODO: jam 200700301 On Win32, this could just return the value
+ # already in memory. However, this really needs to be done at a
+ # higher level, because there either won't be anything on disk,
+ # or the thing on disk will be a file.
+ fs_encoding = osutils._fs_enc
+ if isinstance(abspath, unicode):
+ # abspath is defined as the path to pass to lstat. readlink is
+ # buggy in python < 2.6 (it doesn't encode unicode path into FS
+ # encoding), so we need to encode ourselves knowing that unicode
+ # paths are produced by UnicodeDirReader on purpose.
+ abspath = abspath.encode(fs_encoding)
+ target = os.readlink(abspath)
+ if fs_encoding not in ('utf-8', 'ascii'):
+ # Change encoding if needed
+ target = target.decode(fs_encoding).encode('UTF-8')
+ return target
+
+ def get_ghosts(self):
+ """Return a list of the parent tree revision ids that are ghosts."""
+ self._read_header_if_needed()
+ return self._ghosts
+
+ def get_lines(self):
+ """Serialise the entire dirstate to a sequence of lines."""
+ if (self._header_state == DirState.IN_MEMORY_UNMODIFIED and
+ self._dirblock_state == DirState.IN_MEMORY_UNMODIFIED):
+ # read what's on disk.
+ self._state_file.seek(0)
+ return self._state_file.readlines()
+ lines = []
+ lines.append(self._get_parents_line(self.get_parent_ids()))
+ lines.append(self._get_ghosts_line(self._ghosts))
+ lines.extend(self._get_entry_lines())
+ return self._get_output_lines(lines)
+
+ def _get_ghosts_line(self, ghost_ids):
+ """Create a line for the state file for ghost information."""
+ return '\0'.join([str(len(ghost_ids))] + ghost_ids)
+
+ def _get_parents_line(self, parent_ids):
+ """Create a line for the state file for parents information."""
+ return '\0'.join([str(len(parent_ids))] + parent_ids)
+
+ def _get_entry_lines(self):
+ """Create lines for entries."""
+ return map(self._entry_to_line, self._iter_entries())
+
+ def _get_fields_to_entry(self):
+ """Get a function which converts entry fields into a entry record.
+
+ This handles size and executable, as well as parent records.
+
+ :return: A function which takes a list of fields, and returns an
+ appropriate record for storing in memory.
+ """
+ # This is intentionally unrolled for performance
+ num_present_parents = self._num_present_parents()
+ if num_present_parents == 0:
+ def fields_to_entry_0_parents(fields, _int=int):
+ path_name_file_id_key = (fields[0], fields[1], fields[2])
+ return (path_name_file_id_key, [
+ ( # Current tree
+ fields[3], # minikind
+ fields[4], # fingerprint
+ _int(fields[5]), # size
+ fields[6] == 'y', # executable
+ fields[7], # packed_stat or revision_id
+ )])
+ return fields_to_entry_0_parents
+ elif num_present_parents == 1:
+ def fields_to_entry_1_parent(fields, _int=int):
+ path_name_file_id_key = (fields[0], fields[1], fields[2])
+ return (path_name_file_id_key, [
+ ( # Current tree
+ fields[3], # minikind
+ fields[4], # fingerprint
+ _int(fields[5]), # size
+ fields[6] == 'y', # executable
+ fields[7], # packed_stat or revision_id
+ ),
+ ( # Parent 1
+ fields[8], # minikind
+ fields[9], # fingerprint
+ _int(fields[10]), # size
+ fields[11] == 'y', # executable
+ fields[12], # packed_stat or revision_id
+ ),
+ ])
+ return fields_to_entry_1_parent
+ elif num_present_parents == 2:
+ def fields_to_entry_2_parents(fields, _int=int):
+ path_name_file_id_key = (fields[0], fields[1], fields[2])
+ return (path_name_file_id_key, [
+ ( # Current tree
+ fields[3], # minikind
+ fields[4], # fingerprint
+ _int(fields[5]), # size
+ fields[6] == 'y', # executable
+ fields[7], # packed_stat or revision_id
+ ),
+ ( # Parent 1
+ fields[8], # minikind
+ fields[9], # fingerprint
+ _int(fields[10]), # size
+ fields[11] == 'y', # executable
+ fields[12], # packed_stat or revision_id
+ ),
+ ( # Parent 2
+ fields[13], # minikind
+ fields[14], # fingerprint
+ _int(fields[15]), # size
+ fields[16] == 'y', # executable
+ fields[17], # packed_stat or revision_id
+ ),
+ ])
+ return fields_to_entry_2_parents
+ else:
+ def fields_to_entry_n_parents(fields, _int=int):
+ path_name_file_id_key = (fields[0], fields[1], fields[2])
+ trees = [(fields[cur], # minikind
+ fields[cur+1], # fingerprint
+ _int(fields[cur+2]), # size
+ fields[cur+3] == 'y', # executable
+ fields[cur+4], # stat or revision_id
+ ) for cur in xrange(3, len(fields)-1, 5)]
+ return path_name_file_id_key, trees
+ return fields_to_entry_n_parents
+
+ def get_parent_ids(self):
+ """Return a list of the parent tree ids for the directory state."""
+ self._read_header_if_needed()
+ return list(self._parents)
+
+ def _get_block_entry_index(self, dirname, basename, tree_index):
+ """Get the coordinates for a path in the state structure.
+
+ :param dirname: The utf8 dirname to lookup.
+ :param basename: The utf8 basename to lookup.
+ :param tree_index: The index of the tree for which this lookup should
+ be attempted.
+ :return: A tuple describing where the path is located, or should be
+ inserted. The tuple contains four fields: the block index, the row
+ index, the directory is present (boolean), the entire path is
+ present (boolean). There is no guarantee that either
+ coordinate is currently reachable unless the found field for it is
+ True. For instance, a directory not present in the searched tree
+ may be returned with a value one greater than the current highest
+ block offset. The directory present field will always be True when
+ the path present field is True. The directory present field does
+ NOT indicate that the directory is present in the searched tree,
+ rather it indicates that there are at least some files in some
+ tree present there.
+ """
+ self._read_dirblocks_if_needed()
+ key = dirname, basename, ''
+ block_index, present = self._find_block_index_from_key(key)
+ if not present:
+ # no such directory - return the dir index and 0 for the row.
+ return block_index, 0, False, False
+ block = self._dirblocks[block_index][1] # access the entries only
+ entry_index, present = self._find_entry_index(key, block)
+ # linear search through entries at this path to find the one
+ # requested.
+ while entry_index < len(block) and block[entry_index][0][1] == basename:
+ if block[entry_index][1][tree_index][0] not in 'ar':
+ # neither absent or relocated
+ return block_index, entry_index, True, True
+ entry_index += 1
+ return block_index, entry_index, True, False
+
+ def _get_entry(self, tree_index, fileid_utf8=None, path_utf8=None,
+ include_deleted=False):
+ """Get the dirstate entry for path in tree tree_index.
+
+ If either file_id or path is supplied, it is used as the key to lookup.
+ If both are supplied, the fastest lookup is used, and an error is
+ raised if they do not both point at the same row.
+
+ :param tree_index: The index of the tree we wish to locate this path
+ in. If the path is present in that tree, the entry containing its
+ details is returned, otherwise (None, None) is returned
+ 0 is the working tree, higher indexes are successive parent
+ trees.
+ :param fileid_utf8: A utf8 file_id to look up.
+ :param path_utf8: An utf8 path to be looked up.
+ :param include_deleted: If True, and performing a lookup via
+ fileid_utf8 rather than path_utf8, return an entry for deleted
+ (absent) paths.
+ :return: The dirstate entry tuple for path, or (None, None)
+ """
+ self._read_dirblocks_if_needed()
+ if path_utf8 is not None:
+ if type(path_utf8) is not str:
+ raise errors.BzrError('path_utf8 is not a str: %s %r'
+ % (type(path_utf8), path_utf8))
+ # path lookups are faster
+ dirname, basename = osutils.split(path_utf8)
+ block_index, entry_index, dir_present, file_present = \
+ self._get_block_entry_index(dirname, basename, tree_index)
+ if not file_present:
+ return None, None
+ entry = self._dirblocks[block_index][1][entry_index]
+ if not (entry[0][2] and entry[1][tree_index][0] not in ('a', 'r')):
+ raise AssertionError('unversioned entry?')
+ if fileid_utf8:
+ if entry[0][2] != fileid_utf8:
+ self._changes_aborted = True
+ raise errors.BzrError('integrity error ? : mismatching'
+ ' tree_index, file_id and path')
+ return entry
+ else:
+ possible_keys = self._get_id_index().get(fileid_utf8, ())
+ if not possible_keys:
+ return None, None
+ for key in possible_keys:
+ block_index, present = \
+ self._find_block_index_from_key(key)
+ # strange, probably indicates an out of date
+ # id index - for now, allow this.
+ if not present:
+ continue
+ # WARNING: DO not change this code to use _get_block_entry_index
+ # as that function is not suitable: it does not use the key
+ # to lookup, and thus the wrong coordinates are returned.
+ block = self._dirblocks[block_index][1]
+ entry_index, present = self._find_entry_index(key, block)
+ if present:
+ entry = self._dirblocks[block_index][1][entry_index]
+ # TODO: We might want to assert that entry[0][2] ==
+ # fileid_utf8.
+ if entry[1][tree_index][0] in 'fdlt':
+ # this is the result we are looking for: the
+ # real home of this file_id in this tree.
+ return entry
+ if entry[1][tree_index][0] == 'a':
+ # there is no home for this entry in this tree
+ if include_deleted:
+ return entry
+ return None, None
+ if entry[1][tree_index][0] != 'r':
+ raise AssertionError(
+ "entry %r has invalid minikind %r for tree %r" \
+ % (entry,
+ entry[1][tree_index][0],
+ tree_index))
+ real_path = entry[1][tree_index][1]
+ return self._get_entry(tree_index, fileid_utf8=fileid_utf8,
+ path_utf8=real_path)
+ return None, None
+
+ @classmethod
+ def initialize(cls, path, sha1_provider=None):
+ """Create a new dirstate on path.
+
+ The new dirstate will be an empty tree - that is it has no parents,
+ and only a root node - which has id ROOT_ID.
+
+ :param path: The name of the file for the dirstate.
+ :param sha1_provider: an object meeting the SHA1Provider interface.
+ If None, a DefaultSHA1Provider is used.
+ :return: A write-locked DirState object.
+ """
+ # This constructs a new DirState object on a path, sets the _state_file
+ # to a new empty file for that path. It then calls _set_data() with our
+ # stock empty dirstate information - a root with ROOT_ID, no children,
+ # and no parents. Finally it calls save() to ensure that this data will
+ # persist.
+ if sha1_provider is None:
+ sha1_provider = DefaultSHA1Provider()
+ result = cls(path, sha1_provider)
+ # root dir and root dir contents with no children.
+ empty_tree_dirblocks = [('', []), ('', [])]
+ # a new root directory, with a NULLSTAT.
+ empty_tree_dirblocks[0][1].append(
+ (('', '', inventory.ROOT_ID), [
+ ('d', '', 0, False, DirState.NULLSTAT),
+ ]))
+ result.lock_write()
+ try:
+ result._set_data([], empty_tree_dirblocks)
+ result.save()
+ except:
+ result.unlock()
+ raise
+ return result
+
+ @staticmethod
+ def _inv_entry_to_details(inv_entry):
+ """Convert an inventory entry (from a revision tree) to state details.
+
+ :param inv_entry: An inventory entry whose sha1 and link targets can be
+ relied upon, and which has a revision set.
+ :return: A details tuple - the details for a single tree at a path +
+ id.
+ """
+ kind = inv_entry.kind
+ minikind = DirState._kind_to_minikind[kind]
+ tree_data = inv_entry.revision
+ if kind == 'directory':
+ fingerprint = ''
+ size = 0
+ executable = False
+ elif kind == 'symlink':
+ if inv_entry.symlink_target is None:
+ fingerprint = ''
+ else:
+ fingerprint = inv_entry.symlink_target.encode('utf8')
+ size = 0
+ executable = False
+ elif kind == 'file':
+ fingerprint = inv_entry.text_sha1 or ''
+ size = inv_entry.text_size or 0
+ executable = inv_entry.executable
+ elif kind == 'tree-reference':
+ fingerprint = inv_entry.reference_revision or ''
+ size = 0
+ executable = False
+ else:
+ raise Exception("can't pack %s" % inv_entry)
+ return static_tuple.StaticTuple(minikind, fingerprint, size,
+ executable, tree_data)
+
+ def _iter_child_entries(self, tree_index, path_utf8):
+ """Iterate over all the entries that are children of path_utf.
+
+ This only returns entries that are present (not in 'a', 'r') in
+ tree_index. tree_index data is not refreshed, so if tree 0 is used,
+ results may differ from that obtained if paths were statted to
+ determine what ones were directories.
+
+ Asking for the children of a non-directory will return an empty
+ iterator.
+ """
+ pending_dirs = []
+ next_pending_dirs = [path_utf8]
+ absent = 'ar'
+ while next_pending_dirs:
+ pending_dirs = next_pending_dirs
+ next_pending_dirs = []
+ for path in pending_dirs:
+ block_index, present = self._find_block_index_from_key(
+ (path, '', ''))
+ if block_index == 0:
+ block_index = 1
+ if len(self._dirblocks) == 1:
+ # asked for the children of the root with no other
+ # contents.
+ return
+ if not present:
+ # children of a non-directory asked for.
+ continue
+ block = self._dirblocks[block_index]
+ for entry in block[1]:
+ kind = entry[1][tree_index][0]
+ if kind not in absent:
+ yield entry
+ if kind == 'd':
+ if entry[0][0]:
+ path = entry[0][0] + '/' + entry[0][1]
+ else:
+ path = entry[0][1]
+ next_pending_dirs.append(path)
+
+ def _iter_entries(self):
+ """Iterate over all the entries in the dirstate.
+
+ Each yelt item is an entry in the standard format described in the
+ docstring of bzrlib.dirstate.
+ """
+ self._read_dirblocks_if_needed()
+ for directory in self._dirblocks:
+ for entry in directory[1]:
+ yield entry
+
+ def _get_id_index(self):
+ """Get an id index of self._dirblocks.
+
+ This maps from file_id => [(directory, name, file_id)] entries where
+ that file_id appears in one of the trees.
+ """
+ if self._id_index is None:
+ id_index = {}
+ for key, tree_details in self._iter_entries():
+ self._add_to_id_index(id_index, key)
+ self._id_index = id_index
+ return self._id_index
+
+ def _add_to_id_index(self, id_index, entry_key):
+ """Add this entry to the _id_index mapping."""
+ # This code used to use a set for every entry in the id_index. However,
+ # it is *rare* to have more than one entry. So a set is a large
+ # overkill. And even when we do, we won't ever have more than the
+ # number of parent trees. Which is still a small number (rarely >2). As
+ # such, we use a simple tuple, and do our own uniqueness checks. While
+ # the 'in' check is O(N) since N is nicely bounded it shouldn't ever
+ # cause quadratic failure.
+ file_id = entry_key[2]
+ entry_key = static_tuple.StaticTuple.from_sequence(entry_key)
+ if file_id not in id_index:
+ id_index[file_id] = static_tuple.StaticTuple(entry_key,)
+ else:
+ entry_keys = id_index[file_id]
+ if entry_key not in entry_keys:
+ id_index[file_id] = entry_keys + (entry_key,)
+
+ def _remove_from_id_index(self, id_index, entry_key):
+ """Remove this entry from the _id_index mapping.
+
+ It is an programming error to call this when the entry_key is not
+ already present.
+ """
+ file_id = entry_key[2]
+ entry_keys = list(id_index[file_id])
+ entry_keys.remove(entry_key)
+ id_index[file_id] = static_tuple.StaticTuple.from_sequence(entry_keys)
+
+ def _get_output_lines(self, lines):
+ """Format lines for final output.
+
+ :param lines: A sequence of lines containing the parents list and the
+ path lines.
+ """
+ output_lines = [DirState.HEADER_FORMAT_3]
+ lines.append('') # a final newline
+ inventory_text = '\0\n\0'.join(lines)
+ output_lines.append('crc32: %s\n' % (zlib.crc32(inventory_text),))
+ # -3, 1 for num parents, 1 for ghosts, 1 for final newline
+ num_entries = len(lines)-3
+ output_lines.append('num_entries: %s\n' % (num_entries,))
+ output_lines.append(inventory_text)
+ return output_lines
+
+ def _make_deleted_row(self, fileid_utf8, parents):
+ """Return a deleted row for fileid_utf8."""
+ return ('/', 'RECYCLED.BIN', 'file', fileid_utf8, 0, DirState.NULLSTAT,
+ ''), parents
+
+ def _num_present_parents(self):
+ """The number of parent entries in each record row."""
+ return len(self._parents) - len(self._ghosts)
+
+ @classmethod
+ def on_file(cls, path, sha1_provider=None, worth_saving_limit=0):
+ """Construct a DirState on the file at path "path".
+
+ :param path: The path at which the dirstate file on disk should live.
+ :param sha1_provider: an object meeting the SHA1Provider interface.
+ If None, a DefaultSHA1Provider is used.
+ :param worth_saving_limit: when the exact number of hash changed
+ entries is known, only bother saving the dirstate if more than
+ this count of entries have changed. -1 means never save.
+ :return: An unlocked DirState object, associated with the given path.
+ """
+ if sha1_provider is None:
+ sha1_provider = DefaultSHA1Provider()
+ result = cls(path, sha1_provider,
+ worth_saving_limit=worth_saving_limit)
+ return result
+
+ def _read_dirblocks_if_needed(self):
+ """Read in all the dirblocks from the file if they are not in memory.
+
+ This populates self._dirblocks, and sets self._dirblock_state to
+ IN_MEMORY_UNMODIFIED. It is not currently ready for incremental block
+ loading.
+ """
+ self._read_header_if_needed()
+ if self._dirblock_state == DirState.NOT_IN_MEMORY:
+ _read_dirblocks(self)
+
+ def _read_header(self):
+ """This reads in the metadata header, and the parent ids.
+
+ After reading in, the file should be positioned at the null
+ just before the start of the first record in the file.
+
+ :return: (expected crc checksum, number of entries, parent list)
+ """
+ self._read_prelude()
+ parent_line = self._state_file.readline()
+ info = parent_line.split('\0')
+ num_parents = int(info[0])
+ self._parents = info[1:-1]
+ ghost_line = self._state_file.readline()
+ info = ghost_line.split('\0')
+ num_ghosts = int(info[1])
+ self._ghosts = info[2:-1]
+ self._header_state = DirState.IN_MEMORY_UNMODIFIED
+ self._end_of_header = self._state_file.tell()
+
+ def _read_header_if_needed(self):
+ """Read the header of the dirstate file if needed."""
+ # inline this as it will be called a lot
+ if not self._lock_token:
+ raise errors.ObjectNotLocked(self)
+ if self._header_state == DirState.NOT_IN_MEMORY:
+ self._read_header()
+
+ def _read_prelude(self):
+ """Read in the prelude header of the dirstate file.
+
+ This only reads in the stuff that is not connected to the crc
+ checksum. The position will be correct to read in the rest of
+ the file and check the checksum after this point.
+ The next entry in the file should be the number of parents,
+ and their ids. Followed by a newline.
+ """
+ header = self._state_file.readline()
+ if header != DirState.HEADER_FORMAT_3:
+ raise errors.BzrError(
+ 'invalid header line: %r' % (header,))
+ crc_line = self._state_file.readline()
+ if not crc_line.startswith('crc32: '):
+ raise errors.BzrError('missing crc32 checksum: %r' % crc_line)
+ self.crc_expected = int(crc_line[len('crc32: '):-1])
+ num_entries_line = self._state_file.readline()
+ if not num_entries_line.startswith('num_entries: '):
+ raise errors.BzrError('missing num_entries line')
+ self._num_entries = int(num_entries_line[len('num_entries: '):-1])
+
+ def sha1_from_stat(self, path, stat_result):
+ """Find a sha1 given a stat lookup."""
+ return self._get_packed_stat_index().get(pack_stat(stat_result), None)
+
+ def _get_packed_stat_index(self):
+ """Get a packed_stat index of self._dirblocks."""
+ if self._packed_stat_index is None:
+ index = {}
+ for key, tree_details in self._iter_entries():
+ if tree_details[0][0] == 'f':
+ index[tree_details[0][4]] = tree_details[0][1]
+ self._packed_stat_index = index
+ return self._packed_stat_index
+
+ def save(self):
+ """Save any pending changes created during this session.
+
+ We reuse the existing file, because that prevents race conditions with
+ file creation, and use oslocks on it to prevent concurrent modification
+ and reads - because dirstate's incremental data aggregation is not
+ compatible with reading a modified file, and replacing a file in use by
+ another process is impossible on Windows.
+
+ A dirstate in read only mode should be smart enough though to validate
+ that the file has not changed, and otherwise discard its cache and
+ start over, to allow for fine grained read lock duration, so 'status'
+ wont block 'commit' - for example.
+ """
+ if self._changes_aborted:
+ # Should this be a warning? For now, I'm expecting that places that
+ # mark it inconsistent will warn, making a warning here redundant.
+ trace.mutter('Not saving DirState because '
+ '_changes_aborted is set.')
+ return
+ # TODO: Since we now distinguish IN_MEMORY_MODIFIED from
+ # IN_MEMORY_HASH_MODIFIED, we should only fail quietly if we fail
+ # to save an IN_MEMORY_HASH_MODIFIED, and fail *noisily* if we
+ # fail to save IN_MEMORY_MODIFIED
+ if not self._worth_saving():
+ return
+
+ grabbed_write_lock = False
+ if self._lock_state != 'w':
+ grabbed_write_lock, new_lock = self._lock_token.temporary_write_lock()
+ # Switch over to the new lock, as the old one may be closed.
+ # TODO: jam 20070315 We should validate the disk file has
+ # not changed contents, since temporary_write_lock may
+ # not be an atomic operation.
+ self._lock_token = new_lock
+ self._state_file = new_lock.f
+ if not grabbed_write_lock:
+ # We couldn't grab a write lock, so we switch back to a read one
+ return
+ try:
+ lines = self.get_lines()
+ self._state_file.seek(0)
+ self._state_file.writelines(lines)
+ self._state_file.truncate()
+ self._state_file.flush()
+ self._maybe_fdatasync()
+ self._mark_unmodified()
+ finally:
+ if grabbed_write_lock:
+ self._lock_token = self._lock_token.restore_read_lock()
+ self._state_file = self._lock_token.f
+ # TODO: jam 20070315 We should validate the disk file has
+ # not changed contents. Since restore_read_lock may
+ # not be an atomic operation.
+
+ def _maybe_fdatasync(self):
+ """Flush to disk if possible and if not configured off."""
+ if self._config_stack.get('dirstate.fdatasync'):
+ osutils.fdatasync(self._state_file.fileno())
+
+ def _worth_saving(self):
+ """Is it worth saving the dirstate or not?"""
+ if (self._header_state == DirState.IN_MEMORY_MODIFIED
+ or self._dirblock_state == DirState.IN_MEMORY_MODIFIED):
+ return True
+ if self._dirblock_state == DirState.IN_MEMORY_HASH_MODIFIED:
+ if self._worth_saving_limit == -1:
+ # We never save hash changes when the limit is -1
+ return False
+ # If we're using smart saving and only a small number of
+ # entries have changed their hash, don't bother saving. John has
+ # suggested using a heuristic here based on the size of the
+ # changed files and/or tree. For now, we go with a configurable
+ # number of changes, keeping the calculation time
+ # as low overhead as possible. (This also keeps all existing
+ # tests passing as the default is 0, i.e. always save.)
+ if len(self._known_hash_changes) >= self._worth_saving_limit:
+ return True
+ return False
+
+ def _set_data(self, parent_ids, dirblocks):
+ """Set the full dirstate data in memory.
+
+ This is an internal function used to completely replace the objects
+ in memory state. It puts the dirstate into state 'full-dirty'.
+
+ :param parent_ids: A list of parent tree revision ids.
+ :param dirblocks: A list containing one tuple for each directory in the
+ tree. Each tuple contains the directory path and a list of entries
+ found in that directory.
+ """
+ # our memory copy is now authoritative.
+ self._dirblocks = dirblocks
+ self._mark_modified(header_modified=True)
+ self._parents = list(parent_ids)
+ self._id_index = None
+ self._packed_stat_index = None
+
+ def set_path_id(self, path, new_id):
+ """Change the id of path to new_id in the current working tree.
+
+ :param path: The path inside the tree to set - '' is the root, 'foo'
+ is the path foo in the root.
+ :param new_id: The new id to assign to the path. This must be a utf8
+ file id (not unicode, and not None).
+ """
+ self._read_dirblocks_if_needed()
+ if len(path):
+ # TODO: logic not written
+ raise NotImplementedError(self.set_path_id)
+ # TODO: check new id is unique
+ entry = self._get_entry(0, path_utf8=path)
+ if entry[0][2] == new_id:
+ # Nothing to change.
+ return
+ # mark the old path absent, and insert a new root path
+ self._make_absent(entry)
+ self.update_minimal(('', '', new_id), 'd',
+ path_utf8='', packed_stat=entry[1][0][4])
+ self._mark_modified()
+ # XXX: This was added by Ian, we need to make sure there
+ # are tests for it, because it isn't in bzr.dev TRUNK
+ # It looks like the only place it is called is in setting the root
+ # id of the tree. So probably we never had an _id_index when we
+ # don't even have a root yet.
+ if self._id_index is not None:
+ self._add_to_id_index(self._id_index, entry[0])
+
+ def set_parent_trees(self, trees, ghosts):
+ """Set the parent trees for the dirstate.
+
+ :param trees: A list of revision_id, tree tuples. tree must be provided
+ even if the revision_id refers to a ghost: supply an empty tree in
+ this case.
+ :param ghosts: A list of the revision_ids that are ghosts at the time
+ of setting.
+ """
+ # TODO: generate a list of parent indexes to preserve to save
+ # processing specific parent trees. In the common case one tree will
+ # be preserved - the left most parent.
+ # TODO: if the parent tree is a dirstate, we might want to walk them
+ # all by path in parallel for 'optimal' common-case performance.
+ # generate new root row.
+ self._read_dirblocks_if_needed()
+ # TODO future sketch: Examine the existing parents to generate a change
+ # map and then walk the new parent trees only, mapping them into the
+ # dirstate. Walk the dirstate at the same time to remove unreferenced
+ # entries.
+ # for now:
+ # sketch: loop over all entries in the dirstate, cherry picking
+ # entries from the parent trees, if they are not ghost trees.
+ # after we finish walking the dirstate, all entries not in the dirstate
+ # are deletes, so we want to append them to the end as per the design
+ # discussions. So do a set difference on ids with the parents to
+ # get deletes, and add them to the end.
+ # During the update process we need to answer the following questions:
+ # - find other keys containing a fileid in order to create cross-path
+ # links. We dont't trivially use the inventory from other trees
+ # because this leads to either double touching, or to accessing
+ # missing keys,
+ # - find other keys containing a path
+ # We accumulate each entry via this dictionary, including the root
+ by_path = {}
+ id_index = {}
+ # we could do parallel iterators, but because file id data may be
+ # scattered throughout, we dont save on index overhead: we have to look
+ # at everything anyway. We can probably save cycles by reusing parent
+ # data and doing an incremental update when adding an additional
+ # parent, but for now the common cases are adding a new parent (merge),
+ # and replacing completely (commit), and commit is more common: so
+ # optimise merge later.
+
+ # ---- start generation of full tree mapping data
+ # what trees should we use?
+ parent_trees = [tree for rev_id, tree in trees if rev_id not in ghosts]
+ # how many trees do we end up with
+ parent_count = len(parent_trees)
+ st = static_tuple.StaticTuple
+
+ # one: the current tree
+ for entry in self._iter_entries():
+ # skip entries not in the current tree
+ if entry[1][0][0] in 'ar': # absent, relocated
+ continue
+ by_path[entry[0]] = [entry[1][0]] + \
+ [DirState.NULL_PARENT_DETAILS] * parent_count
+ # TODO: Possibly inline this, since we know it isn't present yet
+ # id_index[entry[0][2]] = (entry[0],)
+ self._add_to_id_index(id_index, entry[0])
+
+ # now the parent trees:
+ for tree_index, tree in enumerate(parent_trees):
+ # the index is off by one, adjust it.
+ tree_index = tree_index + 1
+ # when we add new locations for a fileid we need these ranges for
+ # any fileid in this tree as we set the by_path[id] to:
+ # already_processed_tree_details + new_details + new_location_suffix
+ # the suffix is from tree_index+1:parent_count+1.
+ new_location_suffix = [DirState.NULL_PARENT_DETAILS] * (parent_count - tree_index)
+ # now stitch in all the entries from this tree
+ last_dirname = None
+ for path, entry in tree.iter_entries_by_dir():
+ # here we process each trees details for each item in the tree.
+ # we first update any existing entries for the id at other paths,
+ # then we either create or update the entry for the id at the
+ # right path, and finally we add (if needed) a mapping from
+ # file_id to this path. We do it in this order to allow us to
+ # avoid checking all known paths for the id when generating a
+ # new entry at this path: by adding the id->path mapping last,
+ # all the mappings are valid and have correct relocation
+ # records where needed.
+ file_id = entry.file_id
+ path_utf8 = path.encode('utf8')
+ dirname, basename = osutils.split(path_utf8)
+ if dirname == last_dirname:
+ # Try to re-use objects as much as possible
+ dirname = last_dirname
+ else:
+ last_dirname = dirname
+ new_entry_key = st(dirname, basename, file_id)
+ # tree index consistency: All other paths for this id in this tree
+ # index must point to the correct path.
+ entry_keys = id_index.get(file_id, ())
+ for entry_key in entry_keys:
+ # TODO:PROFILING: It might be faster to just update
+ # rather than checking if we need to, and then overwrite
+ # the one we are located at.
+ if entry_key != new_entry_key:
+ # this file id is at a different path in one of the
+ # other trees, so put absent pointers there
+ # This is the vertical axis in the matrix, all pointing
+ # to the real path.
+ by_path[entry_key][tree_index] = st('r', path_utf8, 0,
+ False, '')
+ # by path consistency: Insert into an existing path record
+ # (trivial), or add a new one with relocation pointers for the
+ # other tree indexes.
+ if new_entry_key in entry_keys:
+ # there is already an entry where this data belongs, just
+ # insert it.
+ by_path[new_entry_key][tree_index] = \
+ self._inv_entry_to_details(entry)
+ else:
+ # add relocated entries to the horizontal axis - this row
+ # mapping from path,id. We need to look up the correct path
+ # for the indexes from 0 to tree_index -1
+ new_details = []
+ for lookup_index in xrange(tree_index):
+ # boundary case: this is the first occurence of file_id
+ # so there are no id_indexes, possibly take this out of
+ # the loop?
+ if not len(entry_keys):
+ new_details.append(DirState.NULL_PARENT_DETAILS)
+ else:
+ # grab any one entry, use it to find the right path.
+ a_key = iter(entry_keys).next()
+ if by_path[a_key][lookup_index][0] in ('r', 'a'):
+ # its a pointer or missing statement, use it as
+ # is.
+ new_details.append(by_path[a_key][lookup_index])
+ else:
+ # we have the right key, make a pointer to it.
+ real_path = ('/'.join(a_key[0:2])).strip('/')
+ new_details.append(st('r', real_path, 0, False,
+ ''))
+ new_details.append(self._inv_entry_to_details(entry))
+ new_details.extend(new_location_suffix)
+ by_path[new_entry_key] = new_details
+ self._add_to_id_index(id_index, new_entry_key)
+ # --- end generation of full tree mappings
+
+ # sort and output all the entries
+ new_entries = self._sort_entries(by_path.items())
+ self._entries_to_current_state(new_entries)
+ self._parents = [rev_id for rev_id, tree in trees]
+ self._ghosts = list(ghosts)
+ self._mark_modified(header_modified=True)
+ self._id_index = id_index
+
+ def _sort_entries(self, entry_list):
+ """Given a list of entries, sort them into the right order.
+
+ This is done when constructing a new dirstate from trees - normally we
+ try to keep everything in sorted blocks all the time, but sometimes
+ it's easier to sort after the fact.
+ """
+ # When sorting, we usually have 10x more entries than directories. (69k
+ # total entries, 4k directories). So cache the results of splitting.
+ # Saving time and objects. Also, use StaticTuple to avoid putting all
+ # of these object into python's garbage collector.
+ split_dirs = {}
+ def _key(entry, _split_dirs=split_dirs, _st=static_tuple.StaticTuple):
+ # sort by: directory parts, file name, file id
+ dirpath, fname, file_id = entry[0]
+ try:
+ split = _split_dirs[dirpath]
+ except KeyError:
+ split = _st.from_sequence(dirpath.split('/'))
+ _split_dirs[dirpath] = split
+ return _st(split, fname, file_id)
+ return sorted(entry_list, key=_key)
+
+ def set_state_from_inventory(self, new_inv):
+ """Set new_inv as the current state.
+
+ This API is called by tree transform, and will usually occur with
+ existing parent trees.
+
+ :param new_inv: The inventory object to set current state from.
+ """
+ if 'evil' in debug.debug_flags:
+ trace.mutter_callsite(1,
+ "set_state_from_inventory called; please mutate the tree instead")
+ tracing = 'dirstate' in debug.debug_flags
+ if tracing:
+ trace.mutter("set_state_from_inventory trace:")
+ self._read_dirblocks_if_needed()
+ # sketch:
+ # Two iterators: current data and new data, both in dirblock order.
+ # We zip them together, which tells about entries that are new in the
+ # inventory, or removed in the inventory, or present in both and
+ # possibly changed.
+ #
+ # You might think we could just synthesize a new dirstate directly
+ # since we're processing it in the right order. However, we need to
+ # also consider there may be any number of parent trees and relocation
+ # pointers, and we don't want to duplicate that here.
+ new_iterator = new_inv.iter_entries_by_dir()
+ # we will be modifying the dirstate, so we need a stable iterator. In
+ # future we might write one, for now we just clone the state into a
+ # list using a copy so that we see every original item and don't have
+ # to adjust the position when items are inserted or deleted in the
+ # underlying dirstate.
+ old_iterator = iter(list(self._iter_entries()))
+ # both must have roots so this is safe:
+ current_new = new_iterator.next()
+ current_old = old_iterator.next()
+ def advance(iterator):
+ try:
+ return iterator.next()
+ except StopIteration:
+ return None
+ while current_new or current_old:
+ # skip entries in old that are not really there
+ if current_old and current_old[1][0][0] in 'ar':
+ # relocated or absent
+ current_old = advance(old_iterator)
+ continue
+ if current_new:
+ # convert new into dirblock style
+ new_path_utf8 = current_new[0].encode('utf8')
+ new_dirname, new_basename = osutils.split(new_path_utf8)
+ new_id = current_new[1].file_id
+ new_entry_key = (new_dirname, new_basename, new_id)
+ current_new_minikind = \
+ DirState._kind_to_minikind[current_new[1].kind]
+ if current_new_minikind == 't':
+ fingerprint = current_new[1].reference_revision or ''
+ else:
+ # We normally only insert or remove records, or update
+ # them when it has significantly changed. Then we want to
+ # erase its fingerprint. Unaffected records should
+ # normally not be updated at all.
+ fingerprint = ''
+ else:
+ # for safety disable variables
+ new_path_utf8 = new_dirname = new_basename = new_id = \
+ new_entry_key = None
+ # 5 cases, we dont have a value that is strictly greater than everything, so
+ # we make both end conditions explicit
+ if not current_old:
+ # old is finished: insert current_new into the state.
+ if tracing:
+ trace.mutter("Appending from new '%s'.",
+ new_path_utf8.decode('utf8'))
+ self.update_minimal(new_entry_key, current_new_minikind,
+ executable=current_new[1].executable,
+ path_utf8=new_path_utf8, fingerprint=fingerprint,
+ fullscan=True)
+ current_new = advance(new_iterator)
+ elif not current_new:
+ # new is finished
+ if tracing:
+ trace.mutter("Truncating from old '%s/%s'.",
+ current_old[0][0].decode('utf8'),
+ current_old[0][1].decode('utf8'))
+ self._make_absent(current_old)
+ current_old = advance(old_iterator)
+ elif new_entry_key == current_old[0]:
+ # same - common case
+ # We're looking at the same path and id in both the dirstate
+ # and inventory, so just need to update the fields in the
+ # dirstate from the one in the inventory.
+ # TODO: update the record if anything significant has changed.
+ # the minimal required trigger is if the execute bit or cached
+ # kind has changed.
+ if (current_old[1][0][3] != current_new[1].executable or
+ current_old[1][0][0] != current_new_minikind):
+ if tracing:
+ trace.mutter("Updating in-place change '%s'.",
+ new_path_utf8.decode('utf8'))
+ self.update_minimal(current_old[0], current_new_minikind,
+ executable=current_new[1].executable,
+ path_utf8=new_path_utf8, fingerprint=fingerprint,
+ fullscan=True)
+ # both sides are dealt with, move on
+ current_old = advance(old_iterator)
+ current_new = advance(new_iterator)
+ elif (cmp_by_dirs(new_dirname, current_old[0][0]) < 0
+ or (new_dirname == current_old[0][0]
+ and new_entry_key[1:] < current_old[0][1:])):
+ # new comes before:
+ # add a entry for this and advance new
+ if tracing:
+ trace.mutter("Inserting from new '%s'.",
+ new_path_utf8.decode('utf8'))
+ self.update_minimal(new_entry_key, current_new_minikind,
+ executable=current_new[1].executable,
+ path_utf8=new_path_utf8, fingerprint=fingerprint,
+ fullscan=True)
+ current_new = advance(new_iterator)
+ else:
+ # we've advanced past the place where the old key would be,
+ # without seeing it in the new list. so it must be gone.
+ if tracing:
+ trace.mutter("Deleting from old '%s/%s'.",
+ current_old[0][0].decode('utf8'),
+ current_old[0][1].decode('utf8'))
+ self._make_absent(current_old)
+ current_old = advance(old_iterator)
+ self._mark_modified()
+ self._id_index = None
+ self._packed_stat_index = None
+ if tracing:
+ trace.mutter("set_state_from_inventory complete.")
+
+ def set_state_from_scratch(self, working_inv, parent_trees, parent_ghosts):
+ """Wipe the currently stored state and set it to something new.
+
+ This is a hard-reset for the data we are working with.
+ """
+ # Technically, we really want a write lock, but until we write, we
+ # don't really need it.
+ self._requires_lock()
+ # root dir and root dir contents with no children. We have to have a
+ # root for set_state_from_inventory to work correctly.
+ empty_root = (('', '', inventory.ROOT_ID),
+ [('d', '', 0, False, DirState.NULLSTAT)])
+ empty_tree_dirblocks = [('', [empty_root]), ('', [])]
+ self._set_data([], empty_tree_dirblocks)
+ self.set_state_from_inventory(working_inv)
+ self.set_parent_trees(parent_trees, parent_ghosts)
+
+ def _make_absent(self, current_old):
+ """Mark current_old - an entry - as absent for tree 0.
+
+ :return: True if this was the last details entry for the entry key:
+ that is, if the underlying block has had the entry removed, thus
+ shrinking in length.
+ """
+ # build up paths that this id will be left at after the change is made,
+ # so we can update their cross references in tree 0
+ all_remaining_keys = set()
+ # Dont check the working tree, because it's going.
+ for details in current_old[1][1:]:
+ if details[0] not in 'ar': # absent, relocated
+ all_remaining_keys.add(current_old[0])
+ elif details[0] == 'r': # relocated
+ # record the key for the real path.
+ all_remaining_keys.add(tuple(osutils.split(details[1])) + (current_old[0][2],))
+ # absent rows are not present at any path.
+ last_reference = current_old[0] not in all_remaining_keys
+ if last_reference:
+ # the current row consists entire of the current item (being marked
+ # absent), and relocated or absent entries for the other trees:
+ # Remove it, its meaningless.
+ block = self._find_block(current_old[0])
+ entry_index, present = self._find_entry_index(current_old[0], block[1])
+ if not present:
+ raise AssertionError('could not find entry for %s' % (current_old,))
+ block[1].pop(entry_index)
+ # if we have an id_index in use, remove this key from it for this id.
+ if self._id_index is not None:
+ self._remove_from_id_index(self._id_index, current_old[0])
+ # update all remaining keys for this id to record it as absent. The
+ # existing details may either be the record we are marking as deleted
+ # (if there were other trees with the id present at this path), or may
+ # be relocations.
+ for update_key in all_remaining_keys:
+ update_block_index, present = \
+ self._find_block_index_from_key(update_key)
+ if not present:
+ raise AssertionError('could not find block for %s' % (update_key,))
+ update_entry_index, present = \
+ self._find_entry_index(update_key, self._dirblocks[update_block_index][1])
+ if not present:
+ raise AssertionError('could not find entry for %s' % (update_key,))
+ update_tree_details = self._dirblocks[update_block_index][1][update_entry_index][1]
+ # it must not be absent at the moment
+ if update_tree_details[0][0] == 'a': # absent
+ raise AssertionError('bad row %r' % (update_tree_details,))
+ update_tree_details[0] = DirState.NULL_PARENT_DETAILS
+ self._mark_modified()
+ return last_reference
+
+ def update_minimal(self, key, minikind, executable=False, fingerprint='',
+ packed_stat=None, size=0, path_utf8=None, fullscan=False):
+ """Update an entry to the state in tree 0.
+
+ This will either create a new entry at 'key' or update an existing one.
+ It also makes sure that any other records which might mention this are
+ updated as well.
+
+ :param key: (dir, name, file_id) for the new entry
+ :param minikind: The type for the entry ('f' == 'file', 'd' ==
+ 'directory'), etc.
+ :param executable: Should the executable bit be set?
+ :param fingerprint: Simple fingerprint for new entry: canonical-form
+ sha1 for files, referenced revision id for subtrees, etc.
+ :param packed_stat: Packed stat value for new entry.
+ :param size: Size information for new entry
+ :param path_utf8: key[0] + '/' + key[1], just passed in to avoid doing
+ extra computation.
+ :param fullscan: If True then a complete scan of the dirstate is being
+ done and checking for duplicate rows should not be done. This
+ should only be set by set_state_from_inventory and similar methods.
+
+ If packed_stat and fingerprint are not given, they're invalidated in
+ the entry.
+ """
+ block = self._find_block(key)[1]
+ if packed_stat is None:
+ packed_stat = DirState.NULLSTAT
+ # XXX: Some callers pass '' as the packed_stat, and it seems to be
+ # sometimes present in the dirstate - this seems oddly inconsistent.
+ # mbp 20071008
+ entry_index, present = self._find_entry_index(key, block)
+ new_details = (minikind, fingerprint, size, executable, packed_stat)
+ id_index = self._get_id_index()
+ if not present:
+ # New record. Check there isn't a entry at this path already.
+ if not fullscan:
+ low_index, _ = self._find_entry_index(key[0:2] + ('',), block)
+ while low_index < len(block):
+ entry = block[low_index]
+ if entry[0][0:2] == key[0:2]:
+ if entry[1][0][0] not in 'ar':
+ # This entry has the same path (but a different id) as
+ # the new entry we're adding, and is present in ths
+ # tree.
+ self._raise_invalid(
+ ("%s/%s" % key[0:2]).decode('utf8'), key[2],
+ "Attempt to add item at path already occupied by "
+ "id %r" % entry[0][2])
+ low_index += 1
+ else:
+ break
+ # new entry, synthesis cross reference here,
+ existing_keys = id_index.get(key[2], ())
+ if not existing_keys:
+ # not currently in the state, simplest case
+ new_entry = key, [new_details] + self._empty_parent_info()
+ else:
+ # present at one or more existing other paths.
+ # grab one of them and use it to generate parent
+ # relocation/absent entries.
+ new_entry = key, [new_details]
+ # existing_keys can be changed as we iterate.
+ for other_key in tuple(existing_keys):
+ # change the record at other to be a pointer to this new
+ # record. The loop looks similar to the change to
+ # relocations when updating an existing record but its not:
+ # the test for existing kinds is different: this can be
+ # factored out to a helper though.
+ other_block_index, present = self._find_block_index_from_key(
+ other_key)
+ if not present:
+ raise AssertionError('could not find block for %s' % (
+ other_key,))
+ other_block = self._dirblocks[other_block_index][1]
+ other_entry_index, present = self._find_entry_index(
+ other_key, other_block)
+ if not present:
+ raise AssertionError(
+ 'update_minimal: could not find other entry for %s'
+ % (other_key,))
+ if path_utf8 is None:
+ raise AssertionError('no path')
+ # Turn this other location into a reference to the new
+ # location. This also updates the aliased iterator
+ # (current_old in set_state_from_inventory) so that the old
+ # entry, if not already examined, is skipped over by that
+ # loop.
+ other_entry = other_block[other_entry_index]
+ other_entry[1][0] = ('r', path_utf8, 0, False, '')
+ if self._maybe_remove_row(other_block, other_entry_index,
+ id_index):
+ # If the row holding this was removed, we need to
+ # recompute where this entry goes
+ entry_index, _ = self._find_entry_index(key, block)
+
+ # This loop:
+ # adds a tuple to the new details for each column
+ # - either by copying an existing relocation pointer inside that column
+ # - or by creating a new pointer to the right row inside that column
+ num_present_parents = self._num_present_parents()
+ if num_present_parents:
+ # TODO: This re-evaluates the existing_keys set, do we need
+ # to do that ourselves?
+ other_key = list(existing_keys)[0]
+ for lookup_index in xrange(1, num_present_parents + 1):
+ # grab any one entry, use it to find the right path.
+ # TODO: optimise this to reduce memory use in highly
+ # fragmented situations by reusing the relocation
+ # records.
+ update_block_index, present = \
+ self._find_block_index_from_key(other_key)
+ if not present:
+ raise AssertionError('could not find block for %s' % (other_key,))
+ update_entry_index, present = \
+ self._find_entry_index(other_key, self._dirblocks[update_block_index][1])
+ if not present:
+ raise AssertionError('update_minimal: could not find entry for %s' % (other_key,))
+ update_details = self._dirblocks[update_block_index][1][update_entry_index][1][lookup_index]
+ if update_details[0] in 'ar': # relocated, absent
+ # its a pointer or absent in lookup_index's tree, use
+ # it as is.
+ new_entry[1].append(update_details)
+ else:
+ # we have the right key, make a pointer to it.
+ pointer_path = osutils.pathjoin(*other_key[0:2])
+ new_entry[1].append(('r', pointer_path, 0, False, ''))
+ block.insert(entry_index, new_entry)
+ self._add_to_id_index(id_index, key)
+ else:
+ # Does the new state matter?
+ block[entry_index][1][0] = new_details
+ # parents cannot be affected by what we do.
+ # other occurences of this id can be found
+ # from the id index.
+ # ---
+ # tree index consistency: All other paths for this id in this tree
+ # index must point to the correct path. We have to loop here because
+ # we may have passed entries in the state with this file id already
+ # that were absent - where parent entries are - and they need to be
+ # converted to relocated.
+ if path_utf8 is None:
+ raise AssertionError('no path')
+ existing_keys = id_index.get(key[2], ())
+ if key not in existing_keys:
+ raise AssertionError('We found the entry in the blocks, but'
+ ' the key is not in the id_index.'
+ ' key: %s, existing_keys: %s' % (key, existing_keys))
+ for entry_key in existing_keys:
+ # TODO:PROFILING: It might be faster to just update
+ # rather than checking if we need to, and then overwrite
+ # the one we are located at.
+ if entry_key != key:
+ # this file id is at a different path in one of the
+ # other trees, so put absent pointers there
+ # This is the vertical axis in the matrix, all pointing
+ # to the real path.
+ block_index, present = self._find_block_index_from_key(entry_key)
+ if not present:
+ raise AssertionError('not present: %r', entry_key)
+ entry_index, present = self._find_entry_index(entry_key, self._dirblocks[block_index][1])
+ if not present:
+ raise AssertionError('not present: %r', entry_key)
+ self._dirblocks[block_index][1][entry_index][1][0] = \
+ ('r', path_utf8, 0, False, '')
+ # add a containing dirblock if needed.
+ if new_details[0] == 'd':
+ subdir_key = (osutils.pathjoin(*key[0:2]), '', '')
+ block_index, present = self._find_block_index_from_key(subdir_key)
+ if not present:
+ self._dirblocks.insert(block_index, (subdir_key[0], []))
+
+ self._mark_modified()
+
+ def _maybe_remove_row(self, block, index, id_index):
+ """Remove index if it is absent or relocated across the row.
+
+ id_index is updated accordingly.
+ :return: True if we removed the row, False otherwise
+ """
+ present_in_row = False
+ entry = block[index]
+ for column in entry[1]:
+ if column[0] not in 'ar':
+ present_in_row = True
+ break
+ if not present_in_row:
+ block.pop(index)
+ self._remove_from_id_index(id_index, entry[0])
+ return True
+ return False
+
+ def _validate(self):
+ """Check that invariants on the dirblock are correct.
+
+ This can be useful in debugging; it shouldn't be necessary in
+ normal code.
+
+ This must be called with a lock held.
+ """
+ # NOTE: This must always raise AssertionError not just assert,
+ # otherwise it may not behave properly under python -O
+ #
+ # TODO: All entries must have some content that's not 'a' or 'r',
+ # otherwise it could just be removed.
+ #
+ # TODO: All relocations must point directly to a real entry.
+ #
+ # TODO: No repeated keys.
+ #
+ # -- mbp 20070325
+ from pprint import pformat
+ self._read_dirblocks_if_needed()
+ if len(self._dirblocks) > 0:
+ if not self._dirblocks[0][0] == '':
+ raise AssertionError(
+ "dirblocks don't start with root block:\n" + \
+ pformat(self._dirblocks))
+ if len(self._dirblocks) > 1:
+ if not self._dirblocks[1][0] == '':
+ raise AssertionError(
+ "dirblocks missing root directory:\n" + \
+ pformat(self._dirblocks))
+ # the dirblocks are sorted by their path components, name, and dir id
+ dir_names = [d[0].split('/')
+ for d in self._dirblocks[1:]]
+ if dir_names != sorted(dir_names):
+ raise AssertionError(
+ "dir names are not in sorted order:\n" + \
+ pformat(self._dirblocks) + \
+ "\nkeys:\n" +
+ pformat(dir_names))
+ for dirblock in self._dirblocks:
+ # within each dirblock, the entries are sorted by filename and
+ # then by id.
+ for entry in dirblock[1]:
+ if dirblock[0] != entry[0][0]:
+ raise AssertionError(
+ "entry key for %r"
+ "doesn't match directory name in\n%r" %
+ (entry, pformat(dirblock)))
+ if dirblock[1] != sorted(dirblock[1]):
+ raise AssertionError(
+ "dirblock for %r is not sorted:\n%s" % \
+ (dirblock[0], pformat(dirblock)))
+
+ def check_valid_parent():
+ """Check that the current entry has a valid parent.
+
+ This makes sure that the parent has a record,
+ and that the parent isn't marked as "absent" in the
+ current tree. (It is invalid to have a non-absent file in an absent
+ directory.)
+ """
+ if entry[0][0:2] == ('', ''):
+ # There should be no parent for the root row
+ return
+ parent_entry = self._get_entry(tree_index, path_utf8=entry[0][0])
+ if parent_entry == (None, None):
+ raise AssertionError(
+ "no parent entry for: %s in tree %s"
+ % (this_path, tree_index))
+ if parent_entry[1][tree_index][0] != 'd':
+ raise AssertionError(
+ "Parent entry for %s is not marked as a valid"
+ " directory. %s" % (this_path, parent_entry,))
+
+ # For each file id, for each tree: either
+ # the file id is not present at all; all rows with that id in the
+ # key have it marked as 'absent'
+ # OR the file id is present under exactly one name; any other entries
+ # that mention that id point to the correct name.
+ #
+ # We check this with a dict per tree pointing either to the present
+ # name, or None if absent.
+ tree_count = self._num_present_parents() + 1
+ id_path_maps = [dict() for i in range(tree_count)]
+ # Make sure that all renamed entries point to the correct location.
+ for entry in self._iter_entries():
+ file_id = entry[0][2]
+ this_path = osutils.pathjoin(entry[0][0], entry[0][1])
+ if len(entry[1]) != tree_count:
+ raise AssertionError(
+ "wrong number of entry details for row\n%s" \
+ ",\nexpected %d" % \
+ (pformat(entry), tree_count))
+ absent_positions = 0
+ for tree_index, tree_state in enumerate(entry[1]):
+ this_tree_map = id_path_maps[tree_index]
+ minikind = tree_state[0]
+ if minikind in 'ar':
+ absent_positions += 1
+ # have we seen this id before in this column?
+ if file_id in this_tree_map:
+ previous_path, previous_loc = this_tree_map[file_id]
+ # any later mention of this file must be consistent with
+ # what was said before
+ if minikind == 'a':
+ if previous_path is not None:
+ raise AssertionError(
+ "file %s is absent in row %r but also present " \
+ "at %r"% \
+ (file_id, entry, previous_path))
+ elif minikind == 'r':
+ target_location = tree_state[1]
+ if previous_path != target_location:
+ raise AssertionError(
+ "file %s relocation in row %r but also at %r" \
+ % (file_id, entry, previous_path))
+ else:
+ # a file, directory, etc - may have been previously
+ # pointed to by a relocation, which must point here
+ if previous_path != this_path:
+ raise AssertionError(
+ "entry %r inconsistent with previous path %r "
+ "seen at %r" %
+ (entry, previous_path, previous_loc))
+ check_valid_parent()
+ else:
+ if minikind == 'a':
+ # absent; should not occur anywhere else
+ this_tree_map[file_id] = None, this_path
+ elif minikind == 'r':
+ # relocation, must occur at expected location
+ this_tree_map[file_id] = tree_state[1], this_path
+ else:
+ this_tree_map[file_id] = this_path, this_path
+ check_valid_parent()
+ if absent_positions == tree_count:
+ raise AssertionError(
+ "entry %r has no data for any tree." % (entry,))
+ if self._id_index is not None:
+ for file_id, entry_keys in self._id_index.iteritems():
+ for entry_key in entry_keys:
+ if entry_key[2] != file_id:
+ raise AssertionError(
+ 'file_id %r did not match entry key %s'
+ % (file_id, entry_key))
+ if len(entry_keys) != len(set(entry_keys)):
+ raise AssertionError(
+ 'id_index contained non-unique data for %s'
+ % (entry_keys,))
+
+ def _wipe_state(self):
+ """Forget all state information about the dirstate."""
+ self._header_state = DirState.NOT_IN_MEMORY
+ self._dirblock_state = DirState.NOT_IN_MEMORY
+ self._changes_aborted = False
+ self._parents = []
+ self._ghosts = []
+ self._dirblocks = []
+ self._id_index = None
+ self._packed_stat_index = None
+ self._end_of_header = None
+ self._cutoff_time = None
+ self._split_path_cache = {}
+
+ def lock_read(self):
+ """Acquire a read lock on the dirstate."""
+ if self._lock_token is not None:
+ raise errors.LockContention(self._lock_token)
+ # TODO: jam 20070301 Rather than wiping completely, if the blocks are
+ # already in memory, we could read just the header and check for
+ # any modification. If not modified, we can just leave things
+ # alone
+ self._lock_token = lock.ReadLock(self._filename)
+ self._lock_state = 'r'
+ self._state_file = self._lock_token.f
+ self._wipe_state()
+
+ def lock_write(self):
+ """Acquire a write lock on the dirstate."""
+ if self._lock_token is not None:
+ raise errors.LockContention(self._lock_token)
+ # TODO: jam 20070301 Rather than wiping completely, if the blocks are
+ # already in memory, we could read just the header and check for
+ # any modification. If not modified, we can just leave things
+ # alone
+ self._lock_token = lock.WriteLock(self._filename)
+ self._lock_state = 'w'
+ self._state_file = self._lock_token.f
+ self._wipe_state()
+
+ def unlock(self):
+ """Drop any locks held on the dirstate."""
+ if self._lock_token is None:
+ raise errors.LockNotHeld(self)
+ # TODO: jam 20070301 Rather than wiping completely, if the blocks are
+ # already in memory, we could read just the header and check for
+ # any modification. If not modified, we can just leave things
+ # alone
+ self._state_file = None
+ self._lock_state = None
+ self._lock_token.unlock()
+ self._lock_token = None
+ self._split_path_cache = {}
+
+ def _requires_lock(self):
+ """Check that a lock is currently held by someone on the dirstate."""
+ if not self._lock_token:
+ raise errors.ObjectNotLocked(self)
+
+
+def py_update_entry(state, entry, abspath, stat_value,
+ _stat_to_minikind=DirState._stat_to_minikind):
+ """Update the entry based on what is actually on disk.
+
+ This function only calculates the sha if it needs to - if the entry is
+ uncachable, or clearly different to the first parent's entry, no sha
+ is calculated, and None is returned.
+
+ :param state: The dirstate this entry is in.
+ :param entry: This is the dirblock entry for the file in question.
+ :param abspath: The path on disk for this file.
+ :param stat_value: The stat value done on the path.
+ :return: None, or The sha1 hexdigest of the file (40 bytes) or link
+ target of a symlink.
+ """
+ try:
+ minikind = _stat_to_minikind[stat_value.st_mode & 0170000]
+ except KeyError:
+ # Unhandled kind
+ return None
+ packed_stat = pack_stat(stat_value)
+ (saved_minikind, saved_link_or_sha1, saved_file_size,
+ saved_executable, saved_packed_stat) = entry[1][0]
+
+ if minikind == 'd' and saved_minikind == 't':
+ minikind = 't'
+ if (minikind == saved_minikind
+ and packed_stat == saved_packed_stat):
+ # The stat hasn't changed since we saved, so we can re-use the
+ # saved sha hash.
+ if minikind == 'd':
+ return None
+
+ # size should also be in packed_stat
+ if saved_file_size == stat_value.st_size:
+ return saved_link_or_sha1
+
+ # If we have gotten this far, that means that we need to actually
+ # process this entry.
+ link_or_sha1 = None
+ worth_saving = True
+ if minikind == 'f':
+ executable = state._is_executable(stat_value.st_mode,
+ saved_executable)
+ if state._cutoff_time is None:
+ state._sha_cutoff_time()
+ if (stat_value.st_mtime < state._cutoff_time
+ and stat_value.st_ctime < state._cutoff_time
+ and len(entry[1]) > 1
+ and entry[1][1][0] != 'a'):
+ # Could check for size changes for further optimised
+ # avoidance of sha1's. However the most prominent case of
+ # over-shaing is during initial add, which this catches.
+ # Besides, if content filtering happens, size and sha
+ # are calculated at the same time, so checking just the size
+ # gains nothing w.r.t. performance.
+ link_or_sha1 = state._sha1_file(abspath)
+ entry[1][0] = ('f', link_or_sha1, stat_value.st_size,
+ executable, packed_stat)
+ else:
+ entry[1][0] = ('f', '', stat_value.st_size,
+ executable, DirState.NULLSTAT)
+ worth_saving = False
+ elif minikind == 'd':
+ link_or_sha1 = None
+ entry[1][0] = ('d', '', 0, False, packed_stat)
+ if saved_minikind != 'd':
+ # This changed from something into a directory. Make sure we
+ # have a directory block for it. This doesn't happen very
+ # often, so this doesn't have to be super fast.
+ block_index, entry_index, dir_present, file_present = \
+ state._get_block_entry_index(entry[0][0], entry[0][1], 0)
+ state._ensure_block(block_index, entry_index,
+ osutils.pathjoin(entry[0][0], entry[0][1]))
+ else:
+ worth_saving = False
+ elif minikind == 'l':
+ if saved_minikind == 'l':
+ worth_saving = False
+ link_or_sha1 = state._read_link(abspath, saved_link_or_sha1)
+ if state._cutoff_time is None:
+ state._sha_cutoff_time()
+ if (stat_value.st_mtime < state._cutoff_time
+ and stat_value.st_ctime < state._cutoff_time):
+ entry[1][0] = ('l', link_or_sha1, stat_value.st_size,
+ False, packed_stat)
+ else:
+ entry[1][0] = ('l', '', stat_value.st_size,
+ False, DirState.NULLSTAT)
+ if worth_saving:
+ state._mark_modified([entry])
+ return link_or_sha1
+
+
+class ProcessEntryPython(object):
+
+ __slots__ = ["old_dirname_to_file_id", "new_dirname_to_file_id",
+ "last_source_parent", "last_target_parent", "include_unchanged",
+ "partial", "use_filesystem_for_exec", "utf8_decode",
+ "searched_specific_files", "search_specific_files",
+ "searched_exact_paths", "search_specific_file_parents", "seen_ids",
+ "state", "source_index", "target_index", "want_unversioned", "tree"]
+
+ def __init__(self, include_unchanged, use_filesystem_for_exec,
+ search_specific_files, state, source_index, target_index,
+ want_unversioned, tree):
+ self.old_dirname_to_file_id = {}
+ self.new_dirname_to_file_id = {}
+ # Are we doing a partial iter_changes?
+ self.partial = search_specific_files != set([''])
+ # Using a list so that we can access the values and change them in
+ # nested scope. Each one is [path, file_id, entry]
+ self.last_source_parent = [None, None]
+ self.last_target_parent = [None, None]
+ self.include_unchanged = include_unchanged
+ self.use_filesystem_for_exec = use_filesystem_for_exec
+ self.utf8_decode = cache_utf8._utf8_decode
+ # for all search_indexs in each path at or under each element of
+ # search_specific_files, if the detail is relocated: add the id, and
+ # add the relocated path as one to search if its not searched already.
+ # If the detail is not relocated, add the id.
+ self.searched_specific_files = set()
+ # When we search exact paths without expanding downwards, we record
+ # that here.
+ self.searched_exact_paths = set()
+ self.search_specific_files = search_specific_files
+ # The parents up to the root of the paths we are searching.
+ # After all normal paths are returned, these specific items are returned.
+ self.search_specific_file_parents = set()
+ # The ids we've sent out in the delta.
+ self.seen_ids = set()
+ self.state = state
+ self.source_index = source_index
+ self.target_index = target_index
+ if target_index != 0:
+ # A lot of code in here depends on target_index == 0
+ raise errors.BzrError('unsupported target index')
+ self.want_unversioned = want_unversioned
+ self.tree = tree
+
+ def _process_entry(self, entry, path_info, pathjoin=osutils.pathjoin):
+ """Compare an entry and real disk to generate delta information.
+
+ :param path_info: top_relpath, basename, kind, lstat, abspath for
+ the path of entry. If None, then the path is considered absent in
+ the target (Perhaps we should pass in a concrete entry for this ?)
+ Basename is returned as a utf8 string because we expect this
+ tuple will be ignored, and don't want to take the time to
+ decode.
+ :return: (iter_changes_result, changed). If the entry has not been
+ handled then changed is None. Otherwise it is False if no content
+ or metadata changes have occurred, and True if any content or
+ metadata change has occurred. If self.include_unchanged is True then
+ if changed is not None, iter_changes_result will always be a result
+ tuple. Otherwise, iter_changes_result is None unless changed is
+ True.
+ """
+ if self.source_index is None:
+ source_details = DirState.NULL_PARENT_DETAILS
+ else:
+ source_details = entry[1][self.source_index]
+ target_details = entry[1][self.target_index]
+ target_minikind = target_details[0]
+ if path_info is not None and target_minikind in 'fdlt':
+ if not (self.target_index == 0):
+ raise AssertionError()
+ link_or_sha1 = update_entry(self.state, entry,
+ abspath=path_info[4], stat_value=path_info[3])
+ # The entry may have been modified by update_entry
+ target_details = entry[1][self.target_index]
+ target_minikind = target_details[0]
+ else:
+ link_or_sha1 = None
+ file_id = entry[0][2]
+ source_minikind = source_details[0]
+ if source_minikind in 'fdltr' and target_minikind in 'fdlt':
+ # claimed content in both: diff
+ # r | fdlt | | add source to search, add id path move and perform
+ # | | | diff check on source-target
+ # r | fdlt | a | dangling file that was present in the basis.
+ # | | | ???
+ if source_minikind in 'r':
+ # add the source to the search path to find any children it
+ # has. TODO ? : only add if it is a container ?
+ if not osutils.is_inside_any(self.searched_specific_files,
+ source_details[1]):
+ self.search_specific_files.add(source_details[1])
+ # generate the old path; this is needed for stating later
+ # as well.
+ old_path = source_details[1]
+ old_dirname, old_basename = os.path.split(old_path)
+ path = pathjoin(entry[0][0], entry[0][1])
+ old_entry = self.state._get_entry(self.source_index,
+ path_utf8=old_path)
+ # update the source details variable to be the real
+ # location.
+ if old_entry == (None, None):
+ raise errors.CorruptDirstate(self.state._filename,
+ "entry '%s/%s' is considered renamed from %r"
+ " but source does not exist\n"
+ "entry: %s" % (entry[0][0], entry[0][1], old_path, entry))
+ source_details = old_entry[1][self.source_index]
+ source_minikind = source_details[0]
+ else:
+ old_dirname = entry[0][0]
+ old_basename = entry[0][1]
+ old_path = path = None
+ if path_info is None:
+ # the file is missing on disk, show as removed.
+ content_change = True
+ target_kind = None
+ target_exec = False
+ else:
+ # source and target are both versioned and disk file is present.
+ target_kind = path_info[2]
+ if target_kind == 'directory':
+ if path is None:
+ old_path = path = pathjoin(old_dirname, old_basename)
+ self.new_dirname_to_file_id[path] = file_id
+ if source_minikind != 'd':
+ content_change = True
+ else:
+ # directories have no fingerprint
+ content_change = False
+ target_exec = False
+ elif target_kind == 'file':
+ if source_minikind != 'f':
+ content_change = True
+ else:
+ # Check the sha. We can't just rely on the size as
+ # content filtering may mean differ sizes actually
+ # map to the same content
+ if link_or_sha1 is None:
+ # Stat cache miss:
+ statvalue, link_or_sha1 = \
+ self.state._sha1_provider.stat_and_sha1(
+ path_info[4])
+ self.state._observed_sha1(entry, link_or_sha1,
+ statvalue)
+ content_change = (link_or_sha1 != source_details[1])
+ # Target details is updated at update_entry time
+ if self.use_filesystem_for_exec:
+ # We don't need S_ISREG here, because we are sure
+ # we are dealing with a file.
+ target_exec = bool(stat.S_IEXEC & path_info[3].st_mode)
+ else:
+ target_exec = target_details[3]
+ elif target_kind == 'symlink':
+ if source_minikind != 'l':
+ content_change = True
+ else:
+ content_change = (link_or_sha1 != source_details[1])
+ target_exec = False
+ elif target_kind == 'tree-reference':
+ if source_minikind != 't':
+ content_change = True
+ else:
+ content_change = False
+ target_exec = False
+ else:
+ if path is None:
+ path = pathjoin(old_dirname, old_basename)
+ raise errors.BadFileKindError(path, path_info[2])
+ if source_minikind == 'd':
+ if path is None:
+ old_path = path = pathjoin(old_dirname, old_basename)
+ self.old_dirname_to_file_id[old_path] = file_id
+ # parent id is the entry for the path in the target tree
+ if old_basename and old_dirname == self.last_source_parent[0]:
+ source_parent_id = self.last_source_parent[1]
+ else:
+ try:
+ source_parent_id = self.old_dirname_to_file_id[old_dirname]
+ except KeyError:
+ source_parent_entry = self.state._get_entry(self.source_index,
+ path_utf8=old_dirname)
+ source_parent_id = source_parent_entry[0][2]
+ if source_parent_id == entry[0][2]:
+ # This is the root, so the parent is None
+ source_parent_id = None
+ else:
+ self.last_source_parent[0] = old_dirname
+ self.last_source_parent[1] = source_parent_id
+ new_dirname = entry[0][0]
+ if entry[0][1] and new_dirname == self.last_target_parent[0]:
+ target_parent_id = self.last_target_parent[1]
+ else:
+ try:
+ target_parent_id = self.new_dirname_to_file_id[new_dirname]
+ except KeyError:
+ # TODO: We don't always need to do the lookup, because the
+ # parent entry will be the same as the source entry.
+ target_parent_entry = self.state._get_entry(self.target_index,
+ path_utf8=new_dirname)
+ if target_parent_entry == (None, None):
+ raise AssertionError(
+ "Could not find target parent in wt: %s\nparent of: %s"
+ % (new_dirname, entry))
+ target_parent_id = target_parent_entry[0][2]
+ if target_parent_id == entry[0][2]:
+ # This is the root, so the parent is None
+ target_parent_id = None
+ else:
+ self.last_target_parent[0] = new_dirname
+ self.last_target_parent[1] = target_parent_id
+
+ source_exec = source_details[3]
+ changed = (content_change
+ or source_parent_id != target_parent_id
+ or old_basename != entry[0][1]
+ or source_exec != target_exec
+ )
+ if not changed and not self.include_unchanged:
+ return None, False
+ else:
+ if old_path is None:
+ old_path = path = pathjoin(old_dirname, old_basename)
+ old_path_u = self.utf8_decode(old_path)[0]
+ path_u = old_path_u
+ else:
+ old_path_u = self.utf8_decode(old_path)[0]
+ if old_path == path:
+ path_u = old_path_u
+ else:
+ path_u = self.utf8_decode(path)[0]
+ source_kind = DirState._minikind_to_kind[source_minikind]
+ return (entry[0][2],
+ (old_path_u, path_u),
+ content_change,
+ (True, True),
+ (source_parent_id, target_parent_id),
+ (self.utf8_decode(old_basename)[0], self.utf8_decode(entry[0][1])[0]),
+ (source_kind, target_kind),
+ (source_exec, target_exec)), changed
+ elif source_minikind in 'a' and target_minikind in 'fdlt':
+ # looks like a new file
+ path = pathjoin(entry[0][0], entry[0][1])
+ # parent id is the entry for the path in the target tree
+ # TODO: these are the same for an entire directory: cache em.
+ parent_id = self.state._get_entry(self.target_index,
+ path_utf8=entry[0][0])[0][2]
+ if parent_id == entry[0][2]:
+ parent_id = None
+ if path_info is not None:
+ # Present on disk:
+ if self.use_filesystem_for_exec:
+ # We need S_ISREG here, because we aren't sure if this
+ # is a file or not.
+ target_exec = bool(
+ stat.S_ISREG(path_info[3].st_mode)
+ and stat.S_IEXEC & path_info[3].st_mode)
+ else:
+ target_exec = target_details[3]
+ return (entry[0][2],
+ (None, self.utf8_decode(path)[0]),
+ True,
+ (False, True),
+ (None, parent_id),
+ (None, self.utf8_decode(entry[0][1])[0]),
+ (None, path_info[2]),
+ (None, target_exec)), True
+ else:
+ # Its a missing file, report it as such.
+ return (entry[0][2],
+ (None, self.utf8_decode(path)[0]),
+ False,
+ (False, True),
+ (None, parent_id),
+ (None, self.utf8_decode(entry[0][1])[0]),
+ (None, None),
+ (None, False)), True
+ elif source_minikind in 'fdlt' and target_minikind in 'a':
+ # unversioned, possibly, or possibly not deleted: we dont care.
+ # if its still on disk, *and* theres no other entry at this
+ # path [we dont know this in this routine at the moment -
+ # perhaps we should change this - then it would be an unknown.
+ old_path = pathjoin(entry[0][0], entry[0][1])
+ # parent id is the entry for the path in the target tree
+ parent_id = self.state._get_entry(self.source_index, path_utf8=entry[0][0])[0][2]
+ if parent_id == entry[0][2]:
+ parent_id = None
+ return (entry[0][2],
+ (self.utf8_decode(old_path)[0], None),
+ True,
+ (True, False),
+ (parent_id, None),
+ (self.utf8_decode(entry[0][1])[0], None),
+ (DirState._minikind_to_kind[source_minikind], None),
+ (source_details[3], None)), True
+ elif source_minikind in 'fdlt' and target_minikind in 'r':
+ # a rename; could be a true rename, or a rename inherited from
+ # a renamed parent. TODO: handle this efficiently. Its not
+ # common case to rename dirs though, so a correct but slow
+ # implementation will do.
+ if not osutils.is_inside_any(self.searched_specific_files, target_details[1]):
+ self.search_specific_files.add(target_details[1])
+ elif source_minikind in 'ra' and target_minikind in 'ra':
+ # neither of the selected trees contain this file,
+ # so skip over it. This is not currently directly tested, but
+ # is indirectly via test_too_much.TestCommands.test_conflicts.
+ pass
+ else:
+ raise AssertionError("don't know how to compare "
+ "source_minikind=%r, target_minikind=%r"
+ % (source_minikind, target_minikind))
+ return None, None
+
+ def __iter__(self):
+ return self
+
+ def _gather_result_for_consistency(self, result):
+ """Check a result we will yield to make sure we are consistent later.
+
+ This gathers result's parents into a set to output later.
+
+ :param result: A result tuple.
+ """
+ if not self.partial or not result[0]:
+ return
+ self.seen_ids.add(result[0])
+ new_path = result[1][1]
+ if new_path:
+ # Not the root and not a delete: queue up the parents of the path.
+ self.search_specific_file_parents.update(
+ osutils.parent_directories(new_path.encode('utf8')))
+ # Add the root directory which parent_directories does not
+ # provide.
+ self.search_specific_file_parents.add('')
+
+ def iter_changes(self):
+ """Iterate over the changes."""
+ utf8_decode = cache_utf8._utf8_decode
+ _cmp_by_dirs = cmp_by_dirs
+ _process_entry = self._process_entry
+ search_specific_files = self.search_specific_files
+ searched_specific_files = self.searched_specific_files
+ splitpath = osutils.splitpath
+ # sketch:
+ # compare source_index and target_index at or under each element of search_specific_files.
+ # follow the following comparison table. Note that we only want to do diff operations when
+ # the target is fdl because thats when the walkdirs logic will have exposed the pathinfo
+ # for the target.
+ # cases:
+ #
+ # Source | Target | disk | action
+ # r | fdlt | | add source to search, add id path move and perform
+ # | | | diff check on source-target
+ # r | fdlt | a | dangling file that was present in the basis.
+ # | | | ???
+ # r | a | | add source to search
+ # r | a | a |
+ # r | r | | this path is present in a non-examined tree, skip.
+ # r | r | a | this path is present in a non-examined tree, skip.
+ # a | fdlt | | add new id
+ # a | fdlt | a | dangling locally added file, skip
+ # a | a | | not present in either tree, skip
+ # a | a | a | not present in any tree, skip
+ # a | r | | not present in either tree at this path, skip as it
+ # | | | may not be selected by the users list of paths.
+ # a | r | a | not present in either tree at this path, skip as it
+ # | | | may not be selected by the users list of paths.
+ # fdlt | fdlt | | content in both: diff them
+ # fdlt | fdlt | a | deleted locally, but not unversioned - show as deleted ?
+ # fdlt | a | | unversioned: output deleted id for now
+ # fdlt | a | a | unversioned and deleted: output deleted id
+ # fdlt | r | | relocated in this tree, so add target to search.
+ # | | | Dont diff, we will see an r,fd; pair when we reach
+ # | | | this id at the other path.
+ # fdlt | r | a | relocated in this tree, so add target to search.
+ # | | | Dont diff, we will see an r,fd; pair when we reach
+ # | | | this id at the other path.
+
+ # TODO: jam 20070516 - Avoid the _get_entry lookup overhead by
+ # keeping a cache of directories that we have seen.
+
+ while search_specific_files:
+ # TODO: the pending list should be lexically sorted? the
+ # interface doesn't require it.
+ current_root = search_specific_files.pop()
+ current_root_unicode = current_root.decode('utf8')
+ searched_specific_files.add(current_root)
+ # process the entries for this containing directory: the rest will be
+ # found by their parents recursively.
+ root_entries = self.state._entries_for_path(current_root)
+ root_abspath = self.tree.abspath(current_root_unicode)
+ try:
+ root_stat = os.lstat(root_abspath)
+ except OSError, e:
+ if e.errno == errno.ENOENT:
+ # the path does not exist: let _process_entry know that.
+ root_dir_info = None
+ else:
+ # some other random error: hand it up.
+ raise
+ else:
+ root_dir_info = ('', current_root,
+ osutils.file_kind_from_stat_mode(root_stat.st_mode), root_stat,
+ root_abspath)
+ if root_dir_info[2] == 'directory':
+ if self.tree._directory_is_tree_reference(
+ current_root.decode('utf8')):
+ root_dir_info = root_dir_info[:2] + \
+ ('tree-reference',) + root_dir_info[3:]
+
+ if not root_entries and not root_dir_info:
+ # this specified path is not present at all, skip it.
+ continue
+ path_handled = False
+ for entry in root_entries:
+ result, changed = _process_entry(entry, root_dir_info)
+ if changed is not None:
+ path_handled = True
+ if changed:
+ self._gather_result_for_consistency(result)
+ if changed or self.include_unchanged:
+ yield result
+ if self.want_unversioned and not path_handled and root_dir_info:
+ new_executable = bool(
+ stat.S_ISREG(root_dir_info[3].st_mode)
+ and stat.S_IEXEC & root_dir_info[3].st_mode)
+ yield (None,
+ (None, current_root_unicode),
+ True,
+ (False, False),
+ (None, None),
+ (None, splitpath(current_root_unicode)[-1]),
+ (None, root_dir_info[2]),
+ (None, new_executable)
+ )
+ initial_key = (current_root, '', '')
+ block_index, _ = self.state._find_block_index_from_key(initial_key)
+ if block_index == 0:
+ # we have processed the total root already, but because the
+ # initial key matched it we should skip it here.
+ block_index +=1
+ if root_dir_info and root_dir_info[2] == 'tree-reference':
+ current_dir_info = None
+ else:
+ dir_iterator = osutils._walkdirs_utf8(root_abspath, prefix=current_root)
+ try:
+ current_dir_info = dir_iterator.next()
+ except OSError, e:
+ # on win32, python2.4 has e.errno == ERROR_DIRECTORY, but
+ # python 2.5 has e.errno == EINVAL,
+ # and e.winerror == ERROR_DIRECTORY
+ e_winerror = getattr(e, 'winerror', None)
+ win_errors = (ERROR_DIRECTORY, ERROR_PATH_NOT_FOUND)
+ # there may be directories in the inventory even though
+ # this path is not a file on disk: so mark it as end of
+ # iterator
+ if e.errno in (errno.ENOENT, errno.ENOTDIR, errno.EINVAL):
+ current_dir_info = None
+ elif (sys.platform == 'win32'
+ and (e.errno in win_errors
+ or e_winerror in win_errors)):
+ current_dir_info = None
+ else:
+ raise
+ else:
+ if current_dir_info[0][0] == '':
+ # remove .bzr from iteration
+ bzr_index = bisect.bisect_left(current_dir_info[1], ('.bzr',))
+ if current_dir_info[1][bzr_index][0] != '.bzr':
+ raise AssertionError()
+ del current_dir_info[1][bzr_index]
+ # walk until both the directory listing and the versioned metadata
+ # are exhausted.
+ if (block_index < len(self.state._dirblocks) and
+ osutils.is_inside(current_root, self.state._dirblocks[block_index][0])):
+ current_block = self.state._dirblocks[block_index]
+ else:
+ current_block = None
+ while (current_dir_info is not None or
+ current_block is not None):
+ if (current_dir_info and current_block
+ and current_dir_info[0][0] != current_block[0]):
+ if _cmp_by_dirs(current_dir_info[0][0], current_block[0]) < 0:
+ # filesystem data refers to paths not covered by the dirblock.
+ # this has two possibilities:
+ # A) it is versioned but empty, so there is no block for it
+ # B) it is not versioned.
+
+ # if (A) then we need to recurse into it to check for
+ # new unknown files or directories.
+ # if (B) then we should ignore it, because we don't
+ # recurse into unknown directories.
+ path_index = 0
+ while path_index < len(current_dir_info[1]):
+ current_path_info = current_dir_info[1][path_index]
+ if self.want_unversioned:
+ if current_path_info[2] == 'directory':
+ if self.tree._directory_is_tree_reference(
+ current_path_info[0].decode('utf8')):
+ current_path_info = current_path_info[:2] + \
+ ('tree-reference',) + current_path_info[3:]
+ new_executable = bool(
+ stat.S_ISREG(current_path_info[3].st_mode)
+ and stat.S_IEXEC & current_path_info[3].st_mode)
+ yield (None,
+ (None, utf8_decode(current_path_info[0])[0]),
+ True,
+ (False, False),
+ (None, None),
+ (None, utf8_decode(current_path_info[1])[0]),
+ (None, current_path_info[2]),
+ (None, new_executable))
+ # dont descend into this unversioned path if it is
+ # a dir
+ if current_path_info[2] in ('directory',
+ 'tree-reference'):
+ del current_dir_info[1][path_index]
+ path_index -= 1
+ path_index += 1
+
+ # This dir info has been handled, go to the next
+ try:
+ current_dir_info = dir_iterator.next()
+ except StopIteration:
+ current_dir_info = None
+ else:
+ # We have a dirblock entry for this location, but there
+ # is no filesystem path for this. This is most likely
+ # because a directory was removed from the disk.
+ # We don't have to report the missing directory,
+ # because that should have already been handled, but we
+ # need to handle all of the files that are contained
+ # within.
+ for current_entry in current_block[1]:
+ # entry referring to file not present on disk.
+ # advance the entry only, after processing.
+ result, changed = _process_entry(current_entry, None)
+ if changed is not None:
+ if changed:
+ self._gather_result_for_consistency(result)
+ if changed or self.include_unchanged:
+ yield result
+ block_index +=1
+ if (block_index < len(self.state._dirblocks) and
+ osutils.is_inside(current_root,
+ self.state._dirblocks[block_index][0])):
+ current_block = self.state._dirblocks[block_index]
+ else:
+ current_block = None
+ continue
+ entry_index = 0
+ if current_block and entry_index < len(current_block[1]):
+ current_entry = current_block[1][entry_index]
+ else:
+ current_entry = None
+ advance_entry = True
+ path_index = 0
+ if current_dir_info and path_index < len(current_dir_info[1]):
+ current_path_info = current_dir_info[1][path_index]
+ if current_path_info[2] == 'directory':
+ if self.tree._directory_is_tree_reference(
+ current_path_info[0].decode('utf8')):
+ current_path_info = current_path_info[:2] + \
+ ('tree-reference',) + current_path_info[3:]
+ else:
+ current_path_info = None
+ advance_path = True
+ path_handled = False
+ while (current_entry is not None or
+ current_path_info is not None):
+ if current_entry is None:
+ # the check for path_handled when the path is advanced
+ # will yield this path if needed.
+ pass
+ elif current_path_info is None:
+ # no path is fine: the per entry code will handle it.
+ result, changed = _process_entry(current_entry, current_path_info)
+ if changed is not None:
+ if changed:
+ self._gather_result_for_consistency(result)
+ if changed or self.include_unchanged:
+ yield result
+ elif (current_entry[0][1] != current_path_info[1]
+ or current_entry[1][self.target_index][0] in 'ar'):
+ # The current path on disk doesn't match the dirblock
+ # record. Either the dirblock is marked as absent, or
+ # the file on disk is not present at all in the
+ # dirblock. Either way, report about the dirblock
+ # entry, and let other code handle the filesystem one.
+
+ # Compare the basename for these files to determine
+ # which comes first
+ if current_path_info[1] < current_entry[0][1]:
+ # extra file on disk: pass for now, but only
+ # increment the path, not the entry
+ advance_entry = False
+ else:
+ # entry referring to file not present on disk.
+ # advance the entry only, after processing.
+ result, changed = _process_entry(current_entry, None)
+ if changed is not None:
+ if changed:
+ self._gather_result_for_consistency(result)
+ if changed or self.include_unchanged:
+ yield result
+ advance_path = False
+ else:
+ result, changed = _process_entry(current_entry, current_path_info)
+ if changed is not None:
+ path_handled = True
+ if changed:
+ self._gather_result_for_consistency(result)
+ if changed or self.include_unchanged:
+ yield result
+ if advance_entry and current_entry is not None:
+ entry_index += 1
+ if entry_index < len(current_block[1]):
+ current_entry = current_block[1][entry_index]
+ else:
+ current_entry = None
+ else:
+ advance_entry = True # reset the advance flaga
+ if advance_path and current_path_info is not None:
+ if not path_handled:
+ # unversioned in all regards
+ if self.want_unversioned:
+ new_executable = bool(
+ stat.S_ISREG(current_path_info[3].st_mode)
+ and stat.S_IEXEC & current_path_info[3].st_mode)
+ try:
+ relpath_unicode = utf8_decode(current_path_info[0])[0]
+ except UnicodeDecodeError:
+ raise errors.BadFilenameEncoding(
+ current_path_info[0], osutils._fs_enc)
+ yield (None,
+ (None, relpath_unicode),
+ True,
+ (False, False),
+ (None, None),
+ (None, utf8_decode(current_path_info[1])[0]),
+ (None, current_path_info[2]),
+ (None, new_executable))
+ # dont descend into this unversioned path if it is
+ # a dir
+ if current_path_info[2] in ('directory'):
+ del current_dir_info[1][path_index]
+ path_index -= 1
+ # dont descend the disk iterator into any tree
+ # paths.
+ if current_path_info[2] == 'tree-reference':
+ del current_dir_info[1][path_index]
+ path_index -= 1
+ path_index += 1
+ if path_index < len(current_dir_info[1]):
+ current_path_info = current_dir_info[1][path_index]
+ if current_path_info[2] == 'directory':
+ if self.tree._directory_is_tree_reference(
+ current_path_info[0].decode('utf8')):
+ current_path_info = current_path_info[:2] + \
+ ('tree-reference',) + current_path_info[3:]
+ else:
+ current_path_info = None
+ path_handled = False
+ else:
+ advance_path = True # reset the advance flagg.
+ if current_block is not None:
+ block_index += 1
+ if (block_index < len(self.state._dirblocks) and
+ osutils.is_inside(current_root, self.state._dirblocks[block_index][0])):
+ current_block = self.state._dirblocks[block_index]
+ else:
+ current_block = None
+ if current_dir_info is not None:
+ try:
+ current_dir_info = dir_iterator.next()
+ except StopIteration:
+ current_dir_info = None
+ for result in self._iter_specific_file_parents():
+ yield result
+
+ def _iter_specific_file_parents(self):
+ """Iter over the specific file parents."""
+ while self.search_specific_file_parents:
+ # Process the parent directories for the paths we were iterating.
+ # Even in extremely large trees this should be modest, so currently
+ # no attempt is made to optimise.
+ path_utf8 = self.search_specific_file_parents.pop()
+ if osutils.is_inside_any(self.searched_specific_files, path_utf8):
+ # We've examined this path.
+ continue
+ if path_utf8 in self.searched_exact_paths:
+ # We've examined this path.
+ continue
+ path_entries = self.state._entries_for_path(path_utf8)
+ # We need either one or two entries. If the path in
+ # self.target_index has moved (so the entry in source_index is in
+ # 'ar') then we need to also look for the entry for this path in
+ # self.source_index, to output the appropriate delete-or-rename.
+ selected_entries = []
+ found_item = False
+ for candidate_entry in path_entries:
+ # Find entries present in target at this path:
+ if candidate_entry[1][self.target_index][0] not in 'ar':
+ found_item = True
+ selected_entries.append(candidate_entry)
+ # Find entries present in source at this path:
+ elif (self.source_index is not None and
+ candidate_entry[1][self.source_index][0] not in 'ar'):
+ found_item = True
+ if candidate_entry[1][self.target_index][0] == 'a':
+ # Deleted, emit it here.
+ selected_entries.append(candidate_entry)
+ else:
+ # renamed, emit it when we process the directory it
+ # ended up at.
+ self.search_specific_file_parents.add(
+ candidate_entry[1][self.target_index][1])
+ if not found_item:
+ raise AssertionError(
+ "Missing entry for specific path parent %r, %r" % (
+ path_utf8, path_entries))
+ path_info = self._path_info(path_utf8, path_utf8.decode('utf8'))
+ for entry in selected_entries:
+ if entry[0][2] in self.seen_ids:
+ continue
+ result, changed = self._process_entry(entry, path_info)
+ if changed is None:
+ raise AssertionError(
+ "Got entry<->path mismatch for specific path "
+ "%r entry %r path_info %r " % (
+ path_utf8, entry, path_info))
+ # Only include changes - we're outside the users requested
+ # expansion.
+ if changed:
+ self._gather_result_for_consistency(result)
+ if (result[6][0] == 'directory' and
+ result[6][1] != 'directory'):
+ # This stopped being a directory, the old children have
+ # to be included.
+ if entry[1][self.source_index][0] == 'r':
+ # renamed, take the source path
+ entry_path_utf8 = entry[1][self.source_index][1]
+ else:
+ entry_path_utf8 = path_utf8
+ initial_key = (entry_path_utf8, '', '')
+ block_index, _ = self.state._find_block_index_from_key(
+ initial_key)
+ if block_index == 0:
+ # The children of the root are in block index 1.
+ block_index +=1
+ current_block = None
+ if block_index < len(self.state._dirblocks):
+ current_block = self.state._dirblocks[block_index]
+ if not osutils.is_inside(
+ entry_path_utf8, current_block[0]):
+ # No entries for this directory at all.
+ current_block = None
+ if current_block is not None:
+ for entry in current_block[1]:
+ if entry[1][self.source_index][0] in 'ar':
+ # Not in the source tree, so doesn't have to be
+ # included.
+ continue
+ # Path of the entry itself.
+
+ self.search_specific_file_parents.add(
+ osutils.pathjoin(*entry[0][:2]))
+ if changed or self.include_unchanged:
+ yield result
+ self.searched_exact_paths.add(path_utf8)
+
+ def _path_info(self, utf8_path, unicode_path):
+ """Generate path_info for unicode_path.
+
+ :return: None if unicode_path does not exist, or a path_info tuple.
+ """
+ abspath = self.tree.abspath(unicode_path)
+ try:
+ stat = os.lstat(abspath)
+ except OSError, e:
+ if e.errno == errno.ENOENT:
+ # the path does not exist.
+ return None
+ else:
+ raise
+ utf8_basename = utf8_path.rsplit('/', 1)[-1]
+ dir_info = (utf8_path, utf8_basename,
+ osutils.file_kind_from_stat_mode(stat.st_mode), stat,
+ abspath)
+ if dir_info[2] == 'directory':
+ if self.tree._directory_is_tree_reference(
+ unicode_path):
+ self.root_dir_info = self.root_dir_info[:2] + \
+ ('tree-reference',) + self.root_dir_info[3:]
+ return dir_info
+
+
+# Try to load the compiled form if possible
+try:
+ from bzrlib._dirstate_helpers_pyx import (
+ _read_dirblocks,
+ bisect_dirblock,
+ _bisect_path_left,
+ _bisect_path_right,
+ cmp_by_dirs,
+ pack_stat,
+ ProcessEntryC as _process_entry,
+ update_entry as update_entry,
+ )
+except ImportError, e:
+ osutils.failed_to_load_extension(e)
+ from bzrlib._dirstate_helpers_py import (
+ _read_dirblocks,
+ bisect_dirblock,
+ _bisect_path_left,
+ _bisect_path_right,
+ cmp_by_dirs,
+ pack_stat,
+ )
+ # FIXME: It would be nice to be able to track moved lines so that the
+ # corresponding python code can be moved to the _dirstate_helpers_py
+ # module. I don't want to break the history for this important piece of
+ # code so I left the code here -- vila 20090622
+ update_entry = py_update_entry
+ _process_entry = ProcessEntryPython
diff --git a/bzrlib/doc/__init__.py b/bzrlib/doc/__init__.py
new file mode 100644
index 0000000..7cd43c7
--- /dev/null
+++ b/bzrlib/doc/__init__.py
@@ -0,0 +1,38 @@
+# Copyright (C) 2005 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""Documentation for bzrlib.
+
+See bzrlib.doc.api for api documentation and in the future bzrlib.doc.man
+for man page generation.
+"""
+
+from __future__ import absolute_import
+
+
+def load_tests(basic_tests, module, loader):
+ suite = loader.suiteClass()
+ # add the tests for this module (obviously none so far)
+ suite.addTests(basic_tests)
+
+ testmod_names = [
+ 'bzrlib.doc.api',
+ ]
+
+ # add the tests for the sub modules
+ suite.addTests(loader.loadTestsFromModuleNames(testmod_names))
+
+ return suite
diff --git a/bzrlib/doc/api/__init__.py b/bzrlib/doc/api/__init__.py
new file mode 100644
index 0000000..6a7a9ec
--- /dev/null
+++ b/bzrlib/doc/api/__init__.py
@@ -0,0 +1,53 @@
+# Copyright (C) 2005, 2006 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""API Documentation for bzrlib.
+
+This documentation is made up of doctest testable examples.
+
+Look for `bzrlib/doc/api/*.txt` to read it.
+
+This documentation documents the current best practice in using the library.
+For details on specific apis, see pydoc on the api, or read the source.
+"""
+
+from __future__ import absolute_import
+
+import doctest
+import os
+
+from bzrlib import tests
+
+def load_tests(basic_tests, module, loader):
+ """This module creates its own test suite with DocFileSuite."""
+
+ dir_ = os.path.dirname(__file__)
+ if os.path.isdir(dir_):
+ candidates = os.listdir(dir_)
+ else:
+ candidates = []
+ scripts = [candidate for candidate in candidates
+ if candidate.endswith('.txt')]
+ # since this module doesn't define tests, we ignore basic_tests
+ suite = doctest.DocFileSuite(*scripts)
+ # DocFileCase reduces the test id to the base name of the tested file, we
+ # want the module to appears there.
+ for t in tests.iter_suite_tests(suite):
+ def make_new_test_id():
+ new_id = '%s.DocFileTest(%s)' % ( __name__, t)
+ return lambda: new_id
+ t.id = make_new_test_id()
+ return suite
diff --git a/bzrlib/doc/api/branch.txt b/bzrlib/doc/api/branch.txt
new file mode 100644
index 0000000..0f6b7d2
--- /dev/null
+++ b/bzrlib/doc/api/branch.txt
@@ -0,0 +1,37 @@
+The Branch API in bzrlib provides creation and management of Branches.
+
+A Branch represents a series of commits and merges carried out by a user.
+
+Make a temporary directory for these tests:
+
+ >>> from bzrlib import osutils
+ >>> test_dir = osutils.mkdtemp(prefix='bzrlib_doc_api_branch_txt_')
+
+Branches are created by BzrDir's:
+
+ >>> from bzrlib.branch import Branch
+ >>> from bzrlib.bzrdir import BzrDir
+
+ >>> new_branch = BzrDir.create_branch_convenience(test_dir)
+
+Existing Branches can be opened directly:
+
+ >>> transport = new_branch.bzrdir.transport
+ >>> another_instance = Branch.open(transport.clone('..').base)
+
+or via the BzrDir:
+
+ >>> still_the_same_branch = new_branch.bzrdir.open_branch()
+
+A branch has a history of revisions on it:
+
+ >>> new_branch.last_revision()
+ 'null:'
+
+We need to write some more documentation, showing
+push and pull operations. Cloning might also be nice.
+
+And finally, clean up:
+
+ >>> import shutil
+ >>> shutil.rmtree(test_dir)
diff --git a/bzrlib/doc/api/transport.txt b/bzrlib/doc/api/transport.txt
new file mode 100644
index 0000000..bbca6a1
--- /dev/null
+++ b/bzrlib/doc/api/transport.txt
@@ -0,0 +1,24 @@
+The Transport API in bzrlib provides URL based access to network resources.
+
+ >>> import os
+ >>> import sys
+ >>> from bzrlib.osutils import getcwd, dirname
+ >>> from bzrlib.urlutils import local_path_from_url
+ >>> import bzrlib.transport as transport
+ >>> if sys.platform == 'win32':
+ ... root = transport.get_transport_from_url('file:///C:/')
+ ... else:
+ ... root = transport.get_transport_from_url('file:///')
+ >>>
+
+Each Transport instance represents a single logical directory.
+
+ >>> dir = transport.get_transport_from_path(".")
+ >>> local_path_from_url(dir.base) == getcwd() + '/'
+ True
+
+You can change directories via the clone method:
+
+ >>> parent = dir.clone('..')
+ >>> local_path_from_url(parent.base) == (dirname(getcwd()).rstrip('/') + '/')
+ True
diff --git a/bzrlib/doc_generate/__init__.py b/bzrlib/doc_generate/__init__.py
new file mode 100644
index 0000000..831c97c
--- /dev/null
+++ b/bzrlib/doc_generate/__init__.py
@@ -0,0 +1,27 @@
+# Copyright (C) 2005 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+from __future__ import absolute_import
+
+
+def get_module(target):
+ mod_name = "bzrlib.doc_generate.autodoc_%s" % (target)
+ mod = __import__(mod_name)
+ components = mod_name.split('.')
+ for comp in components[1:]:
+ mod = getattr(mod, comp)
+ return mod
+
diff --git a/bzrlib/doc_generate/autodoc_bash_completion.py b/bzrlib/doc_generate/autodoc_bash_completion.py
new file mode 100644
index 0000000..0c47f6e
--- /dev/null
+++ b/bzrlib/doc_generate/autodoc_bash_completion.py
@@ -0,0 +1,53 @@
+# Copyright (C) 2005 Canonical Ltd
+
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"bash_completion.py - create bash completion script from built-in bzr help"
+
+from __future__ import absolute_import
+
+import time
+
+import bzrlib
+import bzrlib.help
+import bzrlib.commands
+
+
+def get_filename(options):
+ return "%s.bash_completion" % (options.bzr_name)
+
+
+def infogen(options, outfile):
+ t = time.time()
+ tt = time.gmtime(t)
+ params = \
+ { "bzrcmd": options.bzr_name,
+ "datestamp": time.strftime("%Y-%m-%d",tt),
+ "timestamp": time.strftime("%Y-%m-%d %H:%M:%S +0000",tt),
+ "version": bzrlib.__version__,
+ }
+
+ outfile.write(preamble % params)
+
+
+preamble = """\
+# bash completion functions for for Bazaar (%(bzrcmd)s)
+#
+# Large parts of this file are autogenerated from the internal
+# Bazaar documentation and data structures.
+#
+# Generation time: %(timestamp)s
+"""
+
diff --git a/bzrlib/doc_generate/autodoc_man.py b/bzrlib/doc_generate/autodoc_man.py
new file mode 100644
index 0000000..33e1f8d
--- /dev/null
+++ b/bzrlib/doc_generate/autodoc_man.py
@@ -0,0 +1,256 @@
+# Copyright (C) 2005-2010 Canonical Ltd
+
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""man.py - create man page from built-in bzr help and static text
+
+TODO:
+ * use usage information instead of simple "bzr foo" in COMMAND OVERVIEW
+ * add command aliases
+"""
+
+from __future__ import absolute_import
+
+PLUGINS_TO_DOCUMENT = ["launchpad"]
+
+import textwrap
+import time
+
+import bzrlib
+import bzrlib.help
+import bzrlib.help_topics
+import bzrlib.commands
+
+from bzrlib.plugin import load_plugins
+load_plugins()
+
+
+def get_filename(options):
+ """Provides name of manpage"""
+ return "%s.1" % (options.bzr_name)
+
+
+def infogen(options, outfile):
+ """Assembles a man page"""
+ t = time.time()
+ tt = time.gmtime(t)
+ params = \
+ { "bzrcmd": options.bzr_name,
+ "datestamp": time.strftime("%Y-%m-%d",tt),
+ "timestamp": time.strftime("%Y-%m-%d %H:%M:%S +0000",tt),
+ "version": bzrlib.__version__,
+ }
+ outfile.write(man_preamble % params)
+ outfile.write(man_escape(man_head % params))
+ outfile.write(man_escape(getcommand_list(params)))
+ outfile.write(man_escape(getcommand_help(params)))
+ outfile.write("".join(environment_variables()))
+ outfile.write(man_escape(man_foot % params))
+
+
+def man_escape(string):
+ """Escapes strings for man page compatibility"""
+ result = string.replace("\\","\\\\")
+ result = result.replace("`","\\'")
+ result = result.replace("'","\\*(Aq")
+ result = result.replace("-","\\-")
+ return result
+
+
+def command_name_list():
+ """Builds a list of command names from bzrlib"""
+ command_names = bzrlib.commands.builtin_command_names()
+ for cmdname in bzrlib.commands.plugin_command_names():
+ cmd_object = bzrlib.commands.get_cmd_object(cmdname)
+ if (PLUGINS_TO_DOCUMENT is None or
+ cmd_object.plugin_name() in PLUGINS_TO_DOCUMENT):
+ command_names.append(cmdname)
+ command_names.sort()
+ return command_names
+
+
+def getcommand_list (params):
+ """Builds summary help for command names in manpage format"""
+ bzrcmd = params["bzrcmd"]
+ output = '.SH "COMMAND OVERVIEW"\n'
+ for cmd_name in command_name_list():
+ cmd_object = bzrlib.commands.get_cmd_object(cmd_name)
+ if cmd_object.hidden:
+ continue
+ cmd_help = cmd_object.help()
+ if cmd_help:
+ firstline = cmd_help.split('\n', 1)[0]
+ usage = cmd_object._usage()
+ tmp = '.TP\n.B "%s"\n%s\n' % (usage, firstline)
+ output = output + tmp
+ else:
+ raise RuntimeError, "Command '%s' has no help text" % (cmd_name)
+ return output
+
+
+def getcommand_help(params):
+ """Shows individual options for a bzr command"""
+ output='.SH "COMMAND REFERENCE"\n'
+ formatted = {}
+ for cmd_name in command_name_list():
+ cmd_object = bzrlib.commands.get_cmd_object(cmd_name)
+ if cmd_object.hidden:
+ continue
+ formatted[cmd_name] = format_command(params, cmd_object)
+ for alias in cmd_object.aliases:
+ formatted[alias] = format_alias(params, alias, cmd_name)
+ for cmd_name in sorted(formatted):
+ output += formatted[cmd_name]
+ return output
+
+
+def format_command(params, cmd):
+ """Provides long help for each public command"""
+ subsection_header = '.SS "%s"\n' % (cmd._usage())
+ doc = "%s\n" % (cmd.__doc__)
+ doc = bzrlib.help_topics.help_as_plain_text(cmd.help())
+
+ # A dot at the beginning of a line is interpreted as a macro.
+ # Simply join lines that begin with a dot with the previous
+ # line to work around this.
+ doc = doc.replace("\n.", ".")
+
+ option_str = ""
+ options = cmd.options()
+ if options:
+ option_str = "\nOptions:\n"
+ for option_name, option in sorted(options.items()):
+ for name, short_name, argname, help in option.iter_switches():
+ if option.is_hidden(name):
+ continue
+ l = ' --' + name
+ if argname is not None:
+ l += ' ' + argname
+ if short_name:
+ l += ', -' + short_name
+ l += (30 - len(l)) * ' ' + (help or '')
+ wrapped = textwrap.fill(l, initial_indent='',
+ subsequent_indent=30*' ',
+ break_long_words=False,
+ )
+ option_str += wrapped + '\n'
+
+ aliases_str = ""
+ if cmd.aliases:
+ if len(cmd.aliases) > 1:
+ aliases_str += '\nAliases: '
+ else:
+ aliases_str += '\nAlias: '
+ aliases_str += ', '.join(cmd.aliases)
+ aliases_str += '\n'
+
+ see_also_str = ""
+ see_also = cmd.get_see_also()
+ if see_also:
+ see_also_str += '\nSee also: '
+ see_also_str += ', '.join(see_also)
+ see_also_str += '\n'
+
+ return subsection_header + option_str + aliases_str + see_also_str + "\n" + doc + "\n"
+
+
+def format_alias(params, alias, cmd_name):
+ help = '.SS "bzr %s"\n' % alias
+ help += 'Alias for "%s", see "bzr %s".\n' % (cmd_name, cmd_name)
+ return help
+
+
+def environment_variables():
+ yield ".SH \"ENVIRONMENT\"\n"
+
+ from bzrlib.help_topics import known_env_variables
+ for k, desc in known_env_variables:
+ yield ".TP\n"
+ yield ".I \"%s\"\n" % k
+ yield man_escape(desc) + "\n"
+
+
+man_preamble = """\
+.\\\"Man page for Bazaar (%(bzrcmd)s)
+.\\\"
+.\\\" Large parts of this file are autogenerated from the output of
+.\\\" \"%(bzrcmd)s help commands\"
+.\\\" \"%(bzrcmd)s help <cmd>\"
+.\\\"
+.\\\" Generation time: %(timestamp)s
+.\\\"
+
+.ie \\n(.g .ds Aq \\(aq
+.el .ds Aq '
+"""
+
+
+man_head = """\
+.TH bzr 1 "%(datestamp)s" "%(version)s" "Bazaar"
+.SH "NAME"
+%(bzrcmd)s - Bazaar next-generation distributed version control
+.SH "SYNOPSIS"
+.B "%(bzrcmd)s"
+.I "command"
+[
+.I "command_options"
+]
+.br
+.B "%(bzrcmd)s"
+.B "help"
+.br
+.B "%(bzrcmd)s"
+.B "help"
+.I "command"
+.SH "DESCRIPTION"
+
+Bazaar (or %(bzrcmd)s) is a distributed version control system that is powerful,
+friendly, and scalable. Bazaar is a project of Canonical Ltd and part of
+the GNU Project to develop a free operating system.
+
+Bazaar keeps track of changes to software source code (or similar information);
+lets you explore who changed it, when, and why; merges concurrent changes; and
+helps people work together in a team.
+"""
+
+man_foot = """\
+.SH "FILES"
+.TP
+.I "~/.bazaar/bazaar.conf"
+Contains the user's default configuration. The section
+.B [DEFAULT]
+is used to define general configuration that will be applied everywhere.
+The section
+.B [ALIASES]
+can be used to create command aliases for
+commonly used options.
+
+A typical config file might look something like:
+
+.br
+[DEFAULT]
+.br
+email=John Doe <jdoe@isp.com>
+.br
+[ALIASES]
+.br
+commit = commit --strict
+.br
+log10 = log --short -r -10..-1
+.SH "SEE ALSO"
+.UR http://bazaar.canonical.com/
+.BR http://bazaar.canonical.com/
+"""
+
diff --git a/bzrlib/doc_generate/autodoc_rstx.py b/bzrlib/doc_generate/autodoc_rstx.py
new file mode 100644
index 0000000..07fb528
--- /dev/null
+++ b/bzrlib/doc_generate/autodoc_rstx.py
@@ -0,0 +1,188 @@
+# Copyright (C) 2006-2010 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""Generate reStructuredText source for the User Reference Manual.
+Loosely based on the manpage generator autodoc_man.py.
+
+Written by the Bazaar community.
+"""
+
+from __future__ import absolute_import
+
+import time
+
+import bzrlib
+import bzrlib.help
+import bzrlib.help_topics
+import bzrlib.commands
+import bzrlib.osutils
+
+
+def get_filename(options):
+ """Provides name of manual"""
+ return "%s_man.txt" % (options.bzr_name)
+
+
+def infogen(options, outfile):
+ """Create manual in RSTX format"""
+ t = time.time()
+ tt = time.gmtime(t)
+ params = \
+ { "bzrcmd": options.bzr_name,
+ "datestamp": time.strftime("%Y-%m-%d",tt),
+ "timestamp": time.strftime("%Y-%m-%d %H:%M:%S +0000",tt),
+ "version": bzrlib.__version__,
+ }
+ nominated_filename = getattr(options, 'filename', None)
+ if nominated_filename is None:
+ topic_dir = None
+ else:
+ topic_dir = bzrlib.osutils.dirname(nominated_filename)
+ outfile.write(rstx_preamble % params)
+ outfile.write(rstx_head % params)
+ outfile.write(_get_body(params, topic_dir))
+ outfile.write(rstx_foot % params)
+
+
+def _get_body(params, topic_dir):
+ """Build the manual content."""
+ from bzrlib.help_topics import SECT_CONCEPT, SECT_LIST, SECT_PLUGIN
+ registry = bzrlib.help_topics.topic_registry
+ result = []
+ result.append(_get_section(registry, SECT_CONCEPT, "Concepts",
+ output_dir=topic_dir))
+ result.append(_get_section(registry, SECT_LIST, "Lists",
+ output_dir=topic_dir))
+ result.append(_get_commands_section(registry, output_dir=topic_dir))
+ return "\n".join(result)
+
+
+def _get_section(registry, section, title, hdg_level1="#", hdg_level2="=",
+ output_dir=None):
+ """Build the manual part from topics matching that section.
+
+ If output_dir is not None, topics are dumped into text files there
+ during processing, as well as being included in the return result.
+ """
+ file_per_topic = output_dir is not None
+ lines = [title, hdg_level1 * len(title), ""]
+ if file_per_topic:
+ lines.extend([".. toctree::", " :maxdepth: 1", ""])
+
+ topics = sorted(registry.get_topics_for_section(section))
+ for topic in topics:
+ help = registry.get_detail(topic)
+ heading, text = help.split("\n", 1)
+ if not text.startswith(hdg_level2):
+ underline = hdg_level2 * len(heading)
+ help = "%s\n%s\n\n%s\n\n" % (heading, underline, text)
+ else:
+ help = "%s\n%s\n\n" % (heading, text)
+ if file_per_topic:
+ topic_id = _dump_text(output_dir, topic, help)
+ lines.append(" %s" % topic_id)
+ else:
+ lines.append(help)
+
+ return "\n" + "\n".join(lines) + "\n"
+
+
+def _get_commands_section(registry, title="Commands", hdg_level1="#",
+ hdg_level2="=", output_dir=None):
+ """Build the commands reference section of the manual."""
+ file_per_topic = output_dir is not None
+ lines = [title, hdg_level1 * len(title), ""]
+ if file_per_topic:
+ lines.extend([".. toctree::", " :maxdepth: 1", ""])
+
+ cmds = sorted(bzrlib.commands.builtin_command_names())
+ for cmd_name in cmds:
+ cmd_object = bzrlib.commands.get_cmd_object(cmd_name)
+ if cmd_object.hidden:
+ continue
+ heading = cmd_name
+ underline = hdg_level2 * len(heading)
+ text = cmd_object.get_help_text(plain=False, see_also_as_links=True)
+ help = "%s\n%s\n\n%s\n\n" % (heading, underline, text)
+ if file_per_topic:
+ topic_id = _dump_text(output_dir, cmd_name, help)
+ lines.append(" %s" % topic_id)
+ else:
+ lines.append(help)
+
+ return "\n" + "\n".join(lines) + "\n"
+
+
+def _dump_text(output_dir, topic, text):
+ """Dump text for a topic to a file."""
+ topic_id = "%s-%s" % (topic, "help")
+ filename = bzrlib.osutils.pathjoin(output_dir, topic_id + ".txt")
+ f = open(filename, "w")
+ f.writelines(text)
+ f.close()
+ return topic_id
+
+
+##
+# TEMPLATES
+
+rstx_preamble = """.. This file is autogenerated from the output of
+.. %(bzrcmd)s help topics
+.. %(bzrcmd)s help commands
+.. %(bzrcmd)s help <cmd>
+..
+.. Generation time: %(timestamp)s
+
+"""
+
+
+rstx_head = """\
+#####################
+Bazaar User Reference
+#####################
+
+About This Manual
+#################
+
+This manual is generated from Bazaar's online help. To use
+the online help system, try the following commands.
+
+ Introduction including a list of commonly used commands::
+
+ bzr help
+
+ List of topics and a summary of each::
+
+ bzr help topics
+
+ List of commands and a summary of each::
+
+ bzr help commands
+
+ More information about a particular topic or command::
+
+ bzr help topic-or-command-name
+
+The following web sites provide further information on Bazaar:
+
+:Home page: http://bazaar.canonical.com/
+:Official docs: http://doc.bazaar.canonical.com/
+:Launchpad: https://launchpad.net/bzr/
+"""
+
+
+rstx_foot = """
+"""
diff --git a/bzrlib/doc_generate/conf.py b/bzrlib/doc_generate/conf.py
new file mode 100644
index 0000000..f7a72d6
--- /dev/null
+++ b/bzrlib/doc_generate/conf.py
@@ -0,0 +1,217 @@
+# -*- coding: utf-8 -*-
+#
+# Bazaar documentation build configuration file, created by
+# sphinx-quickstart on Tue Jul 21 17:04:52 2009.
+#
+# All configuration values have a default; values that are commented out
+# serve to show the default.
+
+from __future__ import absolute_import
+
+# FIXME: better move the content of doc/en/conf.py here and cleanup the result
+# -- vila 20100428
+
+# If extensions (or modules to document with autodoc) are in another directory,
+# add these directories to sys.path here. If the directory is relative to the
+# documentation root, use os.path.abspath to make it absolute, like shown here.
+#sys.path.append(os.path.abspath('.'))
+
+
+# -- General configuration -----------------------------------------------------
+
+# Add any Sphinx extension module names here, as strings. They can be extensions
+# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
+extensions = ['sphinx.ext.ifconfig',]
+
+# Add any paths that contain templates here, relative to this directory.
+templates_path = ['_templates']
+
+# The suffix of source filenames.
+source_suffix = '.txt'
+
+# The encoding of source files.
+#source_encoding = 'utf-8'
+
+# The master toctree document.
+master_doc = 'index'
+
+# General information about the project.
+project = u'Bazaar'
+copyright = u'2009-2011 Canonical Ltd'
+
+# The version info for the project you're documenting, acts as replacement for
+# |version| and |release|, also used in various other places throughout the
+# built documents.
+#
+# The short X.Y version.
+import bzrlib
+version = '.'.join(str(p) for p in bzrlib.version_info[:2])
+# The full version, including alpha/beta/rc tags.
+release = bzrlib.version_string
+
+# The language for content autogenerated by Sphinx. Refer to documentation
+# for a list of supported languages.
+#language = None
+
+# There are two options for replacing |today|: either, you set today to some
+# non-false value, then it is used:
+#today = ''
+# Else, today_fmt is used as the format for a strftime call.
+#today_fmt = '%B %d, %Y'
+
+# List of documents that shouldn't be included in the build.
+#unused_docs = []
+
+# List of directories, relative to source directory, that shouldn't be searched
+# for source files.
+exclude_trees = ['_build']
+
+# The reST default role (used for this markup: `text`) to use for all documents.
+#default_role = None
+
+# If true, '()' will be appended to :func: etc. cross-reference text.
+#add_function_parentheses = True
+
+# If true, the current module name will be prepended to all description
+# unit titles (such as .. function::).
+#add_module_names = True
+
+# If true, sectionauthor and moduleauthor directives will be shown in the
+# output. They are ignored by default.
+#show_authors = False
+
+# The name of the Pygments (syntax highlighting) style to use.
+pygments_style = 'sphinx'
+
+# A list of ignored prefixes for module index sorting.
+#modindex_common_prefix = []
+
+
+# -- Options for HTML output ---------------------------------------------------
+
+# The theme to use for HTML and HTML Help pages. Major themes that come with
+# Sphinx are currently 'default' and 'sphinxdoc'.
+html_theme = 'default'
+
+# Theme options are theme-specific and customize the look and feel of a theme
+# further. For a list of options available for each theme, see the
+# documentation.
+html_theme_options = {
+ # Unfortunately, the right sidebar breaks under IE6 and maybe IE7.
+ # So we stick with the default left placement to cater for users stuck
+ # on those browsers.
+ #'rightsidebar': True,
+
+ # Non-document areas: header (relbar), footer, sidebar, etc.
+ # Some useful colours here:
+ # * blue: darkblue, mediumblue, darkslateblue, cornflowerblue, royalblue,
+ # midnightblue
+ # * gray: dimgray, slategray, lightslategray
+ 'sidebarbgcolor': "cornflowerblue",
+ 'sidebarlinkcolor': "midnightblue",
+ 'relbarbgcolor': "darkblue",
+ 'footerbgcolor': "lightslategray",
+
+ # Text, heading and code colouring
+ 'codebgcolor': "lightyellow",
+ 'codetextcolor': "firebrick",
+ 'linkcolor': "mediumblue",
+ }
+
+# Add any paths that contain custom themes here, relative to this directory.
+#html_theme_path = []
+
+# The name for this set of Sphinx documents. If None, it defaults to
+# "<project> v<release> documentation".
+#html_title = None
+
+# A shorter title for the navigation bar. Default is the same as html_title.
+#html_short_title = None
+
+# The name of an image file (relative to this directory) to place at the top
+# of the sidebar.
+#html_logo = None
+
+# The name of an image file (within the static path) to use as favicon of the
+# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
+# pixels large.
+html_favicon = "bzr.ico"
+
+# Add any paths that contain custom static files (such as style sheets) here,
+# relative to this directory. They are copied after the builtin static files,
+# so a file named "default.css" will overwrite the builtin "default.css".
+html_static_path = ['_static']
+
+# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
+# using the given strftime format.
+#html_last_updated_fmt = '%b %d, %Y'
+
+# If true, SmartyPants will be used to convert quotes and dashes to
+# typographically correct entities.
+#html_use_smartypants = True
+
+# Custom sidebar templates, maps document names to template names.
+#html_sidebars = {}
+
+# Additional templates that should be rendered to pages, maps page names to
+# template names.
+#html_additional_pages = {}
+
+# If false, no module index is generated.
+html_use_modindex = False
+
+# If false, no index is generated.
+html_use_index = False
+
+# If true, the index is split into individual pages for each letter.
+#html_split_index = False
+
+# If true, links to the reST sources are added to the pages.
+html_show_sourcelink = True
+
+# If true, an OpenSearch description file will be output, and all pages will
+# contain a <link> tag referring to it. The value of this option must be the
+# base URL from which the finished HTML is served.
+#html_use_opensearch = ''
+
+# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
+#html_file_suffix = ''
+
+# Output file base name for HTML help builder.
+htmlhelp_basename = 'bzr-docs'
+
+
+# -- Options for LaTeX output --------------------------------------------------
+
+# The paper size ('letter' or 'a4').
+#latex_paper_size = 'letter'
+
+# The font size ('10pt', '11pt' or '12pt').
+#latex_font_size = '10pt'
+
+# Grouping the document tree into LaTeX files. List of tuples
+# (source start file, target name, title, author, documentclass [howto/manual]).
+latex_documents = []
+
+# The name of an image file (relative to this directory) to place at the top of
+# the title page.
+latex_logo = '../Bazaar-Logo-For-Manuals.png'
+
+# For "manual" documents, if this is true, then toplevel headings are parts,
+# not chapters.
+#latex_use_parts = False
+
+# Additional stuff for the LaTeX preamble.
+#latex_preamble = ''
+
+# Documents to append as an appendix to all manuals.
+#latex_appendices = []
+
+# If false, no module index is generated.
+#latex_use_modindex = True
+
+
+# -- Bazaar-specific configuration ---------------------------------------------
+
+# Authors of the documents
+bzr_team = u'Bazaar Developers'
diff --git a/bzrlib/email_message.py b/bzrlib/email_message.py
new file mode 100644
index 0000000..fd9a29d
--- /dev/null
+++ b/bzrlib/email_message.py
@@ -0,0 +1,209 @@
+# Copyright (C) 2007 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""A convenience class around email.Message and email.MIMEMultipart."""
+
+from __future__ import absolute_import
+
+from email import (
+ Header,
+ Message,
+ MIMEMultipart,
+ MIMEText,
+ Utils,
+ )
+
+from bzrlib import __version__ as _bzrlib_version
+from bzrlib.osutils import safe_unicode
+from bzrlib.smtp_connection import SMTPConnection
+
+
+class EmailMessage(object):
+ """An email message.
+
+ The constructor needs an origin address, a destination address or addresses
+ and a subject, and accepts a body as well. Add additional parts to the
+ message with add_inline_attachment(). Retrieve the entire formatted message
+ with as_string().
+
+ Headers can be accessed with get() and msg[], and modified with msg[] =.
+ """
+
+ def __init__(self, from_address, to_address, subject, body=None):
+ """Create an email message.
+
+ :param from_address: The origin address, to be put on the From header.
+ :param to_address: The destination address of the message, to be put in
+ the To header. Can also be a list of addresses.
+ :param subject: The subject of the message.
+ :param body: If given, the body of the message.
+
+ All four parameters can be unicode strings or byte strings, but for the
+ addresses and subject byte strings must be encoded in UTF-8. For the
+ body any byte string will be accepted; if it's not ASCII or UTF-8,
+ it'll be sent with charset=8-bit.
+ """
+ self._headers = {}
+ self._body = body
+ self._parts = []
+
+ if isinstance(to_address, basestring):
+ to_address = [ to_address ]
+
+ to_addresses = []
+
+ for addr in to_address:
+ to_addresses.append(self.address_to_encoded_header(addr))
+
+ self._headers['To'] = ', '.join(to_addresses)
+ self._headers['From'] = self.address_to_encoded_header(from_address)
+ self._headers['Subject'] = Header.Header(safe_unicode(subject))
+ self._headers['User-Agent'] = 'Bazaar (%s)' % _bzrlib_version
+
+ def add_inline_attachment(self, body, filename=None, mime_subtype='plain'):
+ """Add an inline attachment to the message.
+
+ :param body: A text to attach. Can be an unicode string or a byte
+ string, and it'll be sent as ascii, utf-8, or 8-bit, in that
+ preferred order.
+ :param filename: The name for the attachment. This will give a default
+ name for email programs to save the attachment.
+ :param mime_subtype: MIME subtype of the attachment (eg. 'plain' for
+ text/plain [default]).
+
+ The attachment body will be displayed inline, so do not use this
+ function to attach binary attachments.
+ """
+ # add_inline_attachment() has been called, so the message will be a
+ # MIMEMultipart; add the provided body, if any, as the first attachment
+ if self._body is not None:
+ self._parts.append((self._body, None, 'plain'))
+ self._body = None
+
+ self._parts.append((body, filename, mime_subtype))
+
+ def as_string(self, boundary=None):
+ """Return the entire formatted message as a string.
+
+ :param boundary: The boundary to use between MIME parts, if applicable.
+ Used for tests.
+ """
+ if not self._parts:
+ msgobj = Message.Message()
+ if self._body is not None:
+ body, encoding = self.string_with_encoding(self._body)
+ msgobj.set_payload(body, encoding)
+ else:
+ msgobj = MIMEMultipart.MIMEMultipart()
+
+ if boundary is not None:
+ msgobj.set_boundary(boundary)
+
+ for body, filename, mime_subtype in self._parts:
+ body, encoding = self.string_with_encoding(body)
+ payload = MIMEText.MIMEText(body, mime_subtype, encoding)
+
+ if filename is not None:
+ content_type = payload['Content-Type']
+ content_type += '; name="%s"' % filename
+ payload.replace_header('Content-Type', content_type)
+
+ payload['Content-Disposition'] = 'inline'
+ msgobj.attach(payload)
+
+ # sort headers here to ease testing
+ for header, value in sorted(self._headers.items()):
+ msgobj[header] = value
+
+ return msgobj.as_string()
+
+ __str__ = as_string
+
+ def get(self, header, failobj=None):
+ """Get a header from the message, returning failobj if not present."""
+ return self._headers.get(header, failobj)
+
+ def __getitem__(self, header):
+ """Get a header from the message, returning None if not present.
+
+ This method intentionally does not raise KeyError to mimic the behavior
+ of __getitem__ in email.Message.
+ """
+ return self._headers.get(header, None)
+
+ def __setitem__(self, header, value):
+ return self._headers.__setitem__(header, value)
+
+ @staticmethod
+ def send(config, from_address, to_address, subject, body, attachment=None,
+ attachment_filename=None, attachment_mime_subtype='plain'):
+ """Create an email message and send it with SMTPConnection.
+
+ :param config: config object to pass to SMTPConnection constructor.
+
+ See EmailMessage.__init__() and EmailMessage.add_inline_attachment()
+ for an explanation of the rest of parameters.
+ """
+ msg = EmailMessage(from_address, to_address, subject, body)
+ if attachment is not None:
+ msg.add_inline_attachment(attachment, attachment_filename,
+ attachment_mime_subtype)
+ SMTPConnection(config).send_email(msg)
+
+ @staticmethod
+ def address_to_encoded_header(address):
+ """RFC2047-encode an address if necessary.
+
+ :param address: An unicode string, or UTF-8 byte string.
+ :return: A possibly RFC2047-encoded string.
+ """
+ # Can't call Header over all the address, because that encodes both the
+ # name and the email address, which is not permitted by RFCs.
+ user, email = Utils.parseaddr(address)
+ if not user:
+ return email
+ else:
+ return Utils.formataddr((str(Header.Header(safe_unicode(user))),
+ email))
+
+ @staticmethod
+ def string_with_encoding(string_):
+ """Return a str object together with an encoding.
+
+ :param string\\_: A str or unicode object.
+ :return: A tuple (str, encoding), where encoding is one of 'ascii',
+ 'utf-8', or '8-bit', in that preferred order.
+ """
+ # Python's email module base64-encodes the body whenever the charset is
+ # not explicitly set to ascii. Because of this, and because we want to
+ # avoid base64 when it's not necessary in order to be most compatible
+ # with the capabilities of the receiving side, we check with encode()
+ # and decode() whether the body is actually ascii-only.
+ if isinstance(string_, unicode):
+ try:
+ return (string_.encode('ascii'), 'ascii')
+ except UnicodeEncodeError:
+ return (string_.encode('utf-8'), 'utf-8')
+ else:
+ try:
+ string_.decode('ascii')
+ return (string_, 'ascii')
+ except UnicodeDecodeError:
+ try:
+ string_.decode('utf-8')
+ return (string_, 'utf-8')
+ except UnicodeDecodeError:
+ return (string_, '8-bit')
diff --git a/bzrlib/errors.py b/bzrlib/errors.py
new file mode 100644
index 0000000..f986af2
--- /dev/null
+++ b/bzrlib/errors.py
@@ -0,0 +1,3342 @@
+# Copyright (C) 2005-2011 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""Exceptions for bzr, and reporting of them.
+"""
+
+from __future__ import absolute_import
+
+# TODO: is there any value in providing the .args field used by standard
+# python exceptions? A list of values with no names seems less useful
+# to me.
+
+# TODO: Perhaps convert the exception to a string at the moment it's
+# constructed to make sure it will succeed. But that says nothing about
+# exceptions that are never raised.
+
+# TODO: selftest assertRaises should probably also check that every error
+# raised can be formatted as a string successfully, and without giving
+# 'unprintable'.
+
+
+# return codes from the bzr program
+EXIT_OK = 0
+EXIT_ERROR = 3
+EXIT_INTERNAL_ERROR = 4
+
+
+class BzrError(StandardError):
+ """
+ Base class for errors raised by bzrlib.
+
+ :cvar internal_error: if True this was probably caused by a bzr bug and
+ should be displayed with a traceback; if False (or absent) this was
+ probably a user or environment error and they don't need the gory
+ details. (That can be overridden by -Derror on the command line.)
+
+ :cvar _fmt: Format string to display the error; this is expanded
+ by the instance's dict.
+ """
+
+ internal_error = False
+
+ def __init__(self, msg=None, **kwds):
+ """Construct a new BzrError.
+
+ There are two alternative forms for constructing these objects.
+ Either a preformatted string may be passed, or a set of named
+ arguments can be given. The first is for generic "user" errors which
+ are not intended to be caught and so do not need a specific subclass.
+ The second case is for use with subclasses that provide a _fmt format
+ string to print the arguments.
+
+ Keyword arguments are taken as parameters to the error, which can
+ be inserted into the format string template. It's recommended
+ that subclasses override the __init__ method to require specific
+ parameters.
+
+ :param msg: If given, this is the literal complete text for the error,
+ not subject to expansion. 'msg' is used instead of 'message' because
+ python evolved and, in 2.6, forbids the use of 'message'.
+ """
+ StandardError.__init__(self)
+ if msg is not None:
+ # I was going to deprecate this, but it actually turns out to be
+ # quite handy - mbp 20061103.
+ self._preformatted_string = msg
+ else:
+ self._preformatted_string = None
+ for key, value in kwds.items():
+ setattr(self, key, value)
+
+ def _format(self):
+ s = getattr(self, '_preformatted_string', None)
+ if s is not None:
+ # contains a preformatted message
+ return s
+ try:
+ fmt = self._get_format_string()
+ if fmt:
+ d = dict(self.__dict__)
+ s = fmt % d
+ # __str__() should always return a 'str' object
+ # never a 'unicode' object.
+ return s
+ except Exception, e:
+ pass # just bind to 'e' for formatting below
+ else:
+ e = None
+ return 'Unprintable exception %s: dict=%r, fmt=%r, error=%r' \
+ % (self.__class__.__name__,
+ self.__dict__,
+ getattr(self, '_fmt', None),
+ e)
+
+ def __unicode__(self):
+ u = self._format()
+ if isinstance(u, str):
+ # Try decoding the str using the default encoding.
+ u = unicode(u)
+ elif not isinstance(u, unicode):
+ # Try to make a unicode object from it, because __unicode__ must
+ # return a unicode object.
+ u = unicode(u)
+ return u
+
+ def __str__(self):
+ s = self._format()
+ if isinstance(s, unicode):
+ s = s.encode('utf8')
+ else:
+ # __str__ must return a str.
+ s = str(s)
+ return s
+
+ def __repr__(self):
+ return '%s(%s)' % (self.__class__.__name__, str(self))
+
+ def _get_format_string(self):
+ """Return format string for this exception or None"""
+ fmt = getattr(self, '_fmt', None)
+ if fmt is not None:
+ from bzrlib.i18n import gettext
+ return gettext(unicode(fmt)) # _fmt strings should be ascii
+
+ def __eq__(self, other):
+ if self.__class__ is not other.__class__:
+ return NotImplemented
+ return self.__dict__ == other.__dict__
+
+
+class InternalBzrError(BzrError):
+ """Base class for errors that are internal in nature.
+
+ This is a convenience class for errors that are internal. The
+ internal_error attribute can still be altered in subclasses, if needed.
+ Using this class is simply an easy way to get internal errors.
+ """
+
+ internal_error = True
+
+
+class AlreadyBuilding(BzrError):
+
+ _fmt = "The tree builder is already building a tree."
+
+
+class BranchError(BzrError):
+ """Base class for concrete 'errors about a branch'."""
+
+ def __init__(self, branch):
+ BzrError.__init__(self, branch=branch)
+
+
+class BzrCheckError(InternalBzrError):
+
+ _fmt = "Internal check failed: %(msg)s"
+
+ def __init__(self, msg):
+ BzrError.__init__(self)
+ self.msg = msg
+
+
+class DirstateCorrupt(BzrError):
+
+ _fmt = "The dirstate file (%(state)s) appears to be corrupt: %(msg)s"
+
+ def __init__(self, state, msg):
+ BzrError.__init__(self)
+ self.state = state
+ self.msg = msg
+
+
+class DisabledMethod(InternalBzrError):
+
+ _fmt = "The smart server method '%(class_name)s' is disabled."
+
+ def __init__(self, class_name):
+ BzrError.__init__(self)
+ self.class_name = class_name
+
+
+class IncompatibleAPI(BzrError):
+
+ _fmt = 'The API for "%(api)s" is not compatible with "%(wanted)s". '\
+ 'It supports versions "%(minimum)s" to "%(current)s".'
+
+ def __init__(self, api, wanted, minimum, current):
+ self.api = api
+ self.wanted = wanted
+ self.minimum = minimum
+ self.current = current
+
+
+class InProcessTransport(BzrError):
+
+ _fmt = "The transport '%(transport)s' is only accessible within this " \
+ "process."
+
+ def __init__(self, transport):
+ self.transport = transport
+
+
+class InvalidEntryName(InternalBzrError):
+
+ _fmt = "Invalid entry name: %(name)s"
+
+ def __init__(self, name):
+ BzrError.__init__(self)
+ self.name = name
+
+
+class InvalidRevisionNumber(BzrError):
+
+ _fmt = "Invalid revision number %(revno)s"
+
+ def __init__(self, revno):
+ BzrError.__init__(self)
+ self.revno = revno
+
+
+class InvalidRevisionId(BzrError):
+
+ _fmt = "Invalid revision-id {%(revision_id)s} in %(branch)s"
+
+ def __init__(self, revision_id, branch):
+ # branch can be any string or object with __str__ defined
+ BzrError.__init__(self)
+ self.revision_id = revision_id
+ self.branch = branch
+
+
+class ReservedId(BzrError):
+
+ _fmt = "Reserved revision-id {%(revision_id)s}"
+
+ def __init__(self, revision_id):
+ self.revision_id = revision_id
+
+
+class RootMissing(InternalBzrError):
+
+ _fmt = ("The root entry of a tree must be the first entry supplied to "
+ "the commit builder.")
+
+
+class NoPublicBranch(BzrError):
+
+ _fmt = 'There is no public branch set for "%(branch_url)s".'
+
+ def __init__(self, branch):
+ import bzrlib.urlutils as urlutils
+ public_location = urlutils.unescape_for_display(branch.base, 'ascii')
+ BzrError.__init__(self, branch_url=public_location)
+
+
+class NoHelpTopic(BzrError):
+
+ _fmt = ("No help could be found for '%(topic)s'. "
+ "Please use 'bzr help topics' to obtain a list of topics.")
+
+ def __init__(self, topic):
+ self.topic = topic
+
+
+class NoSuchId(BzrError):
+
+ _fmt = 'The file id "%(file_id)s" is not present in the tree %(tree)s.'
+
+ def __init__(self, tree, file_id):
+ BzrError.__init__(self)
+ self.file_id = file_id
+ self.tree = tree
+
+
+class NoSuchIdInRepository(NoSuchId):
+
+ _fmt = ('The file id "%(file_id)s" is not present in the repository'
+ ' %(repository)r')
+
+ def __init__(self, repository, file_id):
+ BzrError.__init__(self, repository=repository, file_id=file_id)
+
+
+class NotStacked(BranchError):
+
+ _fmt = "The branch '%(branch)s' is not stacked."
+
+
+class InventoryModified(InternalBzrError):
+
+ _fmt = ("The current inventory for the tree %(tree)r has been modified,"
+ " so a clean inventory cannot be read without data loss.")
+
+ def __init__(self, tree):
+ self.tree = tree
+
+
+class NoWorkingTree(BzrError):
+
+ _fmt = 'No WorkingTree exists for "%(base)s".'
+
+ def __init__(self, base):
+ BzrError.__init__(self)
+ self.base = base
+
+
+class NotBuilding(BzrError):
+
+ _fmt = "Not currently building a tree."
+
+
+class NotLocalUrl(BzrError):
+
+ _fmt = "%(url)s is not a local path."
+
+ def __init__(self, url):
+ self.url = url
+
+
+class WorkingTreeAlreadyPopulated(InternalBzrError):
+
+ _fmt = 'Working tree already populated in "%(base)s"'
+
+ def __init__(self, base):
+ self.base = base
+
+
+class BzrCommandError(BzrError):
+ """Error from user command"""
+
+ # Error from malformed user command; please avoid raising this as a
+ # generic exception not caused by user input.
+ #
+ # I think it's a waste of effort to differentiate between errors that
+ # are not intended to be caught anyway. UI code need not subclass
+ # BzrCommandError, and non-UI code should not throw a subclass of
+ # BzrCommandError. ADHB 20051211
+
+
+class NotWriteLocked(BzrError):
+
+ _fmt = """%(not_locked)r is not write locked but needs to be."""
+
+ def __init__(self, not_locked):
+ self.not_locked = not_locked
+
+
+class BzrOptionError(BzrCommandError):
+
+ _fmt = "Error in command line options"
+
+
+class BadIndexFormatSignature(BzrError):
+
+ _fmt = "%(value)s is not an index of type %(_type)s."
+
+ def __init__(self, value, _type):
+ BzrError.__init__(self)
+ self.value = value
+ self._type = _type
+
+
+class BadIndexData(BzrError):
+
+ _fmt = "Error in data for index %(value)s."
+
+ def __init__(self, value):
+ BzrError.__init__(self)
+ self.value = value
+
+
+class BadIndexDuplicateKey(BzrError):
+
+ _fmt = "The key '%(key)s' is already in index '%(index)s'."
+
+ def __init__(self, key, index):
+ BzrError.__init__(self)
+ self.key = key
+ self.index = index
+
+
+class BadIndexKey(BzrError):
+
+ _fmt = "The key '%(key)s' is not a valid key."
+
+ def __init__(self, key):
+ BzrError.__init__(self)
+ self.key = key
+
+
+class BadIndexOptions(BzrError):
+
+ _fmt = "Could not parse options for index %(value)s."
+
+ def __init__(self, value):
+ BzrError.__init__(self)
+ self.value = value
+
+
+class BadIndexValue(BzrError):
+
+ _fmt = "The value '%(value)s' is not a valid value."
+
+ def __init__(self, value):
+ BzrError.__init__(self)
+ self.value = value
+
+
+class BadOptionValue(BzrError):
+
+ _fmt = """Bad value "%(value)s" for option "%(name)s"."""
+
+ def __init__(self, name, value):
+ BzrError.__init__(self, name=name, value=value)
+
+
+class StrictCommitFailed(BzrError):
+
+ _fmt = "Commit refused because there are unknown files in the tree"
+
+
+# XXX: Should be unified with TransportError; they seem to represent the
+# same thing
+# RBC 20060929: I think that unifiying with TransportError would be a mistake
+# - this is finer than a TransportError - and more useful as such. It
+# differentiates between 'transport has failed' and 'operation on a transport
+# has failed.'
+class PathError(BzrError):
+
+ _fmt = "Generic path error: %(path)r%(extra)s)"
+
+ def __init__(self, path, extra=None):
+ BzrError.__init__(self)
+ self.path = path
+ if extra:
+ self.extra = ': ' + str(extra)
+ else:
+ self.extra = ''
+
+
+class NoSuchFile(PathError):
+
+ _fmt = "No such file: %(path)r%(extra)s"
+
+
+class FileExists(PathError):
+
+ _fmt = "File exists: %(path)r%(extra)s"
+
+
+class RenameFailedFilesExist(BzrError):
+ """Used when renaming and both source and dest exist."""
+
+ _fmt = ("Could not rename %(source)s => %(dest)s because both files exist."
+ " (Use --after to tell bzr about a rename that has already"
+ " happened)%(extra)s")
+
+ def __init__(self, source, dest, extra=None):
+ BzrError.__init__(self)
+ self.source = str(source)
+ self.dest = str(dest)
+ if extra:
+ self.extra = ' ' + str(extra)
+ else:
+ self.extra = ''
+
+
+class NotADirectory(PathError):
+
+ _fmt = '"%(path)s" is not a directory %(extra)s'
+
+
+class NotInWorkingDirectory(PathError):
+
+ _fmt = '"%(path)s" is not in the working directory %(extra)s'
+
+
+class DirectoryNotEmpty(PathError):
+
+ _fmt = 'Directory not empty: "%(path)s"%(extra)s'
+
+
+class HardLinkNotSupported(PathError):
+
+ _fmt = 'Hard-linking "%(path)s" is not supported'
+
+
+class ReadingCompleted(InternalBzrError):
+
+ _fmt = ("The MediumRequest '%(request)s' has already had finish_reading "
+ "called upon it - the request has been completed and no more "
+ "data may be read.")
+
+ def __init__(self, request):
+ self.request = request
+
+
+class ResourceBusy(PathError):
+
+ _fmt = 'Device or resource busy: "%(path)s"%(extra)s'
+
+
+class PermissionDenied(PathError):
+
+ _fmt = 'Permission denied: "%(path)s"%(extra)s'
+
+
+class InvalidURL(PathError):
+
+ _fmt = 'Invalid url supplied to transport: "%(path)s"%(extra)s'
+
+
+class InvalidURLJoin(PathError):
+
+ _fmt = "Invalid URL join request: %(reason)s: %(base)r + %(join_args)r"
+
+ def __init__(self, reason, base, join_args):
+ self.reason = reason
+ self.base = base
+ self.join_args = join_args
+ PathError.__init__(self, base, reason)
+
+
+class InvalidRebaseURLs(PathError):
+
+ _fmt = "URLs differ by more than path: %(from_)r and %(to)r"
+
+ def __init__(self, from_, to):
+ self.from_ = from_
+ self.to = to
+ PathError.__init__(self, from_, 'URLs differ by more than path.')
+
+
+class UnavailableRepresentation(InternalBzrError):
+
+ _fmt = ("The encoding '%(wanted)s' is not available for key %(key)s which "
+ "is encoded as '%(native)s'.")
+
+ def __init__(self, key, wanted, native):
+ InternalBzrError.__init__(self)
+ self.wanted = wanted
+ self.native = native
+ self.key = key
+
+
+class UnknownHook(BzrError):
+
+ _fmt = "The %(type)s hook '%(hook)s' is unknown in this version of bzrlib."
+
+ def __init__(self, hook_type, hook_name):
+ BzrError.__init__(self)
+ self.type = hook_type
+ self.hook = hook_name
+
+
+class UnsupportedProtocol(PathError):
+
+ _fmt = 'Unsupported protocol for url "%(path)s"%(extra)s'
+
+ def __init__(self, url, extra=""):
+ PathError.__init__(self, url, extra=extra)
+
+
+class UnstackableBranchFormat(BzrError):
+
+ _fmt = ("The branch '%(url)s'(%(format)s) is not a stackable format. "
+ "You will need to upgrade the branch to permit branch stacking.")
+
+ def __init__(self, format, url):
+ BzrError.__init__(self)
+ self.format = format
+ self.url = url
+
+
+class UnstackableLocationError(BzrError):
+
+ _fmt = "The branch '%(branch_url)s' cannot be stacked on '%(target_url)s'."
+
+ def __init__(self, branch_url, target_url):
+ BzrError.__init__(self)
+ self.branch_url = branch_url
+ self.target_url = target_url
+
+
+class UnstackableRepositoryFormat(BzrError):
+
+ _fmt = ("The repository '%(url)s'(%(format)s) is not a stackable format. "
+ "You will need to upgrade the repository to permit branch stacking.")
+
+ def __init__(self, format, url):
+ BzrError.__init__(self)
+ self.format = format
+ self.url = url
+
+
+class ReadError(PathError):
+
+ _fmt = """Error reading from %(path)r."""
+
+
+class ShortReadvError(PathError):
+
+ _fmt = ('readv() read %(actual)s bytes rather than %(length)s bytes'
+ ' at %(offset)s for "%(path)s"%(extra)s')
+
+ internal_error = True
+
+ def __init__(self, path, offset, length, actual, extra=None):
+ PathError.__init__(self, path, extra=extra)
+ self.offset = offset
+ self.length = length
+ self.actual = actual
+
+
+class PathNotChild(PathError):
+
+ _fmt = 'Path "%(path)s" is not a child of path "%(base)s"%(extra)s'
+
+ internal_error = False
+
+ def __init__(self, path, base, extra=None):
+ BzrError.__init__(self)
+ self.path = path
+ self.base = base
+ if extra:
+ self.extra = ': ' + str(extra)
+ else:
+ self.extra = ''
+
+
+class InvalidNormalization(PathError):
+
+ _fmt = 'Path "%(path)s" is not unicode normalized'
+
+
+# TODO: This is given a URL; we try to unescape it but doing that from inside
+# the exception object is a bit undesirable.
+# TODO: Probably this behavior of should be a common superclass
+class NotBranchError(PathError):
+
+ _fmt = 'Not a branch: "%(path)s"%(detail)s.'
+
+ def __init__(self, path, detail=None, bzrdir=None):
+ import bzrlib.urlutils as urlutils
+ path = urlutils.unescape_for_display(path, 'ascii')
+ if detail is not None:
+ detail = ': ' + detail
+ self.detail = detail
+ self.bzrdir = bzrdir
+ PathError.__init__(self, path=path)
+
+ def __repr__(self):
+ return '<%s %r>' % (self.__class__.__name__, self.__dict__)
+
+ def _format(self):
+ # XXX: Ideally self.detail would be a property, but Exceptions in
+ # Python 2.4 have to be old-style classes so properties don't work.
+ # Instead we override _format.
+ if self.detail is None:
+ if self.bzrdir is not None:
+ try:
+ self.bzrdir.open_repository()
+ except NoRepositoryPresent:
+ self.detail = ''
+ except Exception:
+ # Just ignore unexpected errors. Raising arbitrary errors
+ # during str(err) can provoke strange bugs. Concretely
+ # Launchpad's codehosting managed to raise NotBranchError
+ # here, and then get stuck in an infinite loop/recursion
+ # trying to str() that error. All this error really cares
+ # about that there's no working repository there, and if
+ # open_repository() fails, there probably isn't.
+ self.detail = ''
+ else:
+ self.detail = ': location is a repository'
+ else:
+ self.detail = ''
+ return PathError._format(self)
+
+
+class NoSubmitBranch(PathError):
+
+ _fmt = 'No submit branch available for branch "%(path)s"'
+
+ def __init__(self, branch):
+ import bzrlib.urlutils as urlutils
+ self.path = urlutils.unescape_for_display(branch.base, 'ascii')
+
+
+class AlreadyControlDirError(PathError):
+
+ _fmt = 'A control directory already exists: "%(path)s".'
+
+
+class AlreadyBranchError(PathError):
+
+ _fmt = 'Already a branch: "%(path)s".'
+
+
+class InvalidBranchName(PathError):
+
+ _fmt = "Invalid branch name: %(name)s"
+
+ def __init__(self, name):
+ BzrError.__init__(self)
+ self.name = name
+
+
+class ParentBranchExists(AlreadyBranchError):
+
+ _fmt = 'Parent branch already exists: "%(path)s".'
+
+
+class BranchExistsWithoutWorkingTree(PathError):
+
+ _fmt = 'Directory contains a branch, but no working tree \
+(use bzr checkout if you wish to build a working tree): "%(path)s"'
+
+
+class AtomicFileAlreadyClosed(PathError):
+
+ _fmt = ('"%(function)s" called on an AtomicFile after it was closed:'
+ ' "%(path)s"')
+
+ def __init__(self, path, function):
+ PathError.__init__(self, path=path, extra=None)
+ self.function = function
+
+
+class InaccessibleParent(PathError):
+
+ _fmt = ('Parent not accessible given base "%(base)s" and'
+ ' relative path "%(path)s"')
+
+ def __init__(self, path, base):
+ PathError.__init__(self, path)
+ self.base = base
+
+
+class NoRepositoryPresent(BzrError):
+
+ _fmt = 'No repository present: "%(path)s"'
+ def __init__(self, bzrdir):
+ BzrError.__init__(self)
+ self.path = bzrdir.transport.clone('..').base
+
+
+class UnsupportedFormatError(BzrError):
+
+ _fmt = "Unsupported branch format: %(format)s\nPlease run 'bzr upgrade'"
+
+
+class UnknownFormatError(BzrError):
+
+ _fmt = "Unknown %(kind)s format: %(format)r"
+
+ def __init__(self, format, kind='branch'):
+ self.kind = kind
+ self.format = format
+
+
+class IncompatibleFormat(BzrError):
+
+ _fmt = "Format %(format)s is not compatible with .bzr version %(bzrdir)s."
+
+ def __init__(self, format, bzrdir_format):
+ BzrError.__init__(self)
+ self.format = format
+ self.bzrdir = bzrdir_format
+
+
+class ParseFormatError(BzrError):
+
+ _fmt = "Parse error on line %(lineno)d of %(format)s format: %(line)s"
+
+ def __init__(self, format, lineno, line, text):
+ BzrError.__init__(self)
+ self.format = format
+ self.lineno = lineno
+ self.line = line
+ self.text = text
+
+
+class IncompatibleRepositories(BzrError):
+ """Report an error that two repositories are not compatible.
+
+ Note that the source and target repositories are permitted to be strings:
+ this exception is thrown from the smart server and may refer to a
+ repository the client hasn't opened.
+ """
+
+ _fmt = "%(target)s\n" \
+ "is not compatible with\n" \
+ "%(source)s\n" \
+ "%(details)s"
+
+ def __init__(self, source, target, details=None):
+ if details is None:
+ details = "(no details)"
+ BzrError.__init__(self, target=target, source=source, details=details)
+
+
+class IncompatibleRevision(BzrError):
+
+ _fmt = "Revision is not compatible with %(repo_format)s"
+
+ def __init__(self, repo_format):
+ BzrError.__init__(self)
+ self.repo_format = repo_format
+
+
+class AlreadyVersionedError(BzrError):
+ """Used when a path is expected not to be versioned, but it is."""
+
+ _fmt = "%(context_info)s%(path)s is already versioned."
+
+ def __init__(self, path, context_info=None):
+ """Construct a new AlreadyVersionedError.
+
+ :param path: This is the path which is versioned,
+ which should be in a user friendly form.
+ :param context_info: If given, this is information about the context,
+ which could explain why this is expected to not be versioned.
+ """
+ BzrError.__init__(self)
+ self.path = path
+ if context_info is None:
+ self.context_info = ''
+ else:
+ self.context_info = context_info + ". "
+
+
+class NotVersionedError(BzrError):
+ """Used when a path is expected to be versioned, but it is not."""
+
+ _fmt = "%(context_info)s%(path)s is not versioned."
+
+ def __init__(self, path, context_info=None):
+ """Construct a new NotVersionedError.
+
+ :param path: This is the path which is not versioned,
+ which should be in a user friendly form.
+ :param context_info: If given, this is information about the context,
+ which could explain why this is expected to be versioned.
+ """
+ BzrError.__init__(self)
+ self.path = path
+ if context_info is None:
+ self.context_info = ''
+ else:
+ self.context_info = context_info + ". "
+
+
+class PathsNotVersionedError(BzrError):
+ """Used when reporting several paths which are not versioned"""
+
+ _fmt = "Path(s) are not versioned: %(paths_as_string)s"
+
+ def __init__(self, paths):
+ from bzrlib.osutils import quotefn
+ BzrError.__init__(self)
+ self.paths = paths
+ self.paths_as_string = ' '.join([quotefn(p) for p in paths])
+
+
+class PathsDoNotExist(BzrError):
+
+ _fmt = "Path(s) do not exist: %(paths_as_string)s%(extra)s"
+
+ # used when reporting that paths are neither versioned nor in the working
+ # tree
+
+ def __init__(self, paths, extra=None):
+ # circular import
+ from bzrlib.osutils import quotefn
+ BzrError.__init__(self)
+ self.paths = paths
+ self.paths_as_string = ' '.join([quotefn(p) for p in paths])
+ if extra:
+ self.extra = ': ' + str(extra)
+ else:
+ self.extra = ''
+
+
+class BadFileKindError(BzrError):
+
+ _fmt = 'Cannot operate on "%(filename)s" of unsupported kind "%(kind)s"'
+
+ def __init__(self, filename, kind):
+ BzrError.__init__(self, filename=filename, kind=kind)
+
+
+class BadFilenameEncoding(BzrError):
+
+ _fmt = ('Filename %(filename)r is not valid in your current filesystem'
+ ' encoding %(fs_encoding)s')
+
+ def __init__(self, filename, fs_encoding):
+ BzrError.__init__(self)
+ self.filename = filename
+ self.fs_encoding = fs_encoding
+
+
+class ForbiddenControlFileError(BzrError):
+
+ _fmt = 'Cannot operate on "%(filename)s" because it is a control file'
+
+
+class LockError(InternalBzrError):
+
+ _fmt = "Lock error: %(msg)s"
+
+ # All exceptions from the lock/unlock functions should be from
+ # this exception class. They will be translated as necessary. The
+ # original exception is available as e.original_error
+ #
+ # New code should prefer to raise specific subclasses
+ def __init__(self, msg):
+ self.msg = msg
+
+
+class LockActive(LockError):
+
+ _fmt = "The lock for '%(lock_description)s' is in use and cannot be broken."
+
+ internal_error = False
+
+ def __init__(self, lock_description):
+ self.lock_description = lock_description
+
+
+class CommitNotPossible(LockError):
+
+ _fmt = "A commit was attempted but we do not have a write lock open."
+
+ def __init__(self):
+ pass
+
+
+class AlreadyCommitted(LockError):
+
+ _fmt = "A rollback was requested, but is not able to be accomplished."
+
+ def __init__(self):
+ pass
+
+
+class ReadOnlyError(LockError):
+
+ _fmt = "A write attempt was made in a read only transaction on %(obj)s"
+
+ # TODO: There should also be an error indicating that you need a write
+ # lock and don't have any lock at all... mbp 20070226
+
+ def __init__(self, obj):
+ self.obj = obj
+
+
+class LockFailed(LockError):
+
+ internal_error = False
+
+ _fmt = "Cannot lock %(lock)s: %(why)s"
+
+ def __init__(self, lock, why):
+ LockError.__init__(self, '')
+ self.lock = lock
+ self.why = why
+
+
+class OutSideTransaction(BzrError):
+
+ _fmt = ("A transaction related operation was attempted after"
+ " the transaction finished.")
+
+
+class ObjectNotLocked(LockError):
+
+ _fmt = "%(obj)r is not locked"
+
+ # this can indicate that any particular object is not locked; see also
+ # LockNotHeld which means that a particular *lock* object is not held by
+ # the caller -- perhaps they should be unified.
+ def __init__(self, obj):
+ self.obj = obj
+
+
+class ReadOnlyObjectDirtiedError(ReadOnlyError):
+
+ _fmt = "Cannot change object %(obj)r in read only transaction"
+
+ def __init__(self, obj):
+ self.obj = obj
+
+
+class UnlockableTransport(LockError):
+
+ internal_error = False
+
+ _fmt = "Cannot lock: transport is read only: %(transport)s"
+
+ def __init__(self, transport):
+ self.transport = transport
+
+
+class LockContention(LockError):
+
+ _fmt = 'Could not acquire lock "%(lock)s": %(msg)s'
+
+ internal_error = False
+
+ def __init__(self, lock, msg=''):
+ self.lock = lock
+ self.msg = msg
+
+
+class LockBroken(LockError):
+
+ _fmt = ("Lock was broken while still open: %(lock)s"
+ " - check storage consistency!")
+
+ internal_error = False
+
+ def __init__(self, lock):
+ self.lock = lock
+
+
+class LockBreakMismatch(LockError):
+
+ _fmt = ("Lock was released and re-acquired before being broken:"
+ " %(lock)s: held by %(holder)r, wanted to break %(target)r")
+
+ internal_error = False
+
+ def __init__(self, lock, holder, target):
+ self.lock = lock
+ self.holder = holder
+ self.target = target
+
+
+class LockCorrupt(LockError):
+
+ _fmt = ("Lock is apparently held, but corrupted: %(corruption_info)s\n"
+ "Use 'bzr break-lock' to clear it")
+
+ internal_error = False
+
+ def __init__(self, corruption_info, file_data=None):
+ self.corruption_info = corruption_info
+ self.file_data = file_data
+
+
+class LockNotHeld(LockError):
+
+ _fmt = "Lock not held: %(lock)s"
+
+ internal_error = False
+
+ def __init__(self, lock):
+ self.lock = lock
+
+
+class TokenLockingNotSupported(LockError):
+
+ _fmt = "The object %(obj)s does not support token specifying a token when locking."
+
+ def __init__(self, obj):
+ self.obj = obj
+
+
+class TokenMismatch(LockBroken):
+
+ _fmt = "The lock token %(given_token)r does not match lock token %(lock_token)r."
+
+ internal_error = True
+
+ def __init__(self, given_token, lock_token):
+ self.given_token = given_token
+ self.lock_token = lock_token
+
+
+class PointlessCommit(BzrError):
+
+ _fmt = "No changes to commit"
+
+
+class CannotCommitSelectedFileMerge(BzrError):
+
+ _fmt = 'Selected-file commit of merges is not supported yet:'\
+ ' files %(files_str)s'
+
+ def __init__(self, files):
+ files_str = ', '.join(files)
+ BzrError.__init__(self, files=files, files_str=files_str)
+
+
+class ExcludesUnsupported(BzrError):
+
+ _fmt = ('Excluding paths during commit is not supported by '
+ 'repository at %(repository)r.')
+
+ def __init__(self, repository):
+ BzrError.__init__(self, repository=repository)
+
+
+class BadCommitMessageEncoding(BzrError):
+
+ _fmt = 'The specified commit message contains characters unsupported by '\
+ 'the current encoding.'
+
+
+class UpgradeReadonly(BzrError):
+
+ _fmt = "Upgrade URL cannot work with readonly URLs."
+
+
+class UpToDateFormat(BzrError):
+
+ _fmt = "The branch format %(format)s is already at the most recent format."
+
+ def __init__(self, format):
+ BzrError.__init__(self)
+ self.format = format
+
+
+class StrictCommitFailed(Exception):
+
+ _fmt = "Commit refused because there are unknowns in the tree."
+
+
+class NoSuchRevision(InternalBzrError):
+
+ _fmt = "%(branch)s has no revision %(revision)s"
+
+ def __init__(self, branch, revision):
+ # 'branch' may sometimes be an internal object like a KnitRevisionStore
+ BzrError.__init__(self, branch=branch, revision=revision)
+
+
+class RangeInChangeOption(BzrError):
+
+ _fmt = "Option --change does not accept revision ranges"
+
+
+class NoSuchRevisionSpec(BzrError):
+
+ _fmt = "No namespace registered for string: %(spec)r"
+
+ def __init__(self, spec):
+ BzrError.__init__(self, spec=spec)
+
+
+class NoSuchRevisionInTree(NoSuchRevision):
+ """When using Tree.revision_tree, and the revision is not accessible."""
+
+ _fmt = "The revision id {%(revision_id)s} is not present in the tree %(tree)s."
+
+ def __init__(self, tree, revision_id):
+ BzrError.__init__(self)
+ self.tree = tree
+ self.revision_id = revision_id
+
+
+class InvalidRevisionSpec(BzrError):
+
+ _fmt = ("Requested revision: '%(spec)s' does not exist in branch:"
+ " %(branch_url)s%(extra)s")
+
+ def __init__(self, spec, branch, extra=None):
+ BzrError.__init__(self, branch=branch, spec=spec)
+ self.branch_url = getattr(branch, 'user_url', str(branch))
+ if extra:
+ self.extra = '\n' + str(extra)
+ else:
+ self.extra = ''
+
+
+class AppendRevisionsOnlyViolation(BzrError):
+
+ _fmt = ('Operation denied because it would change the main history,'
+ ' which is not permitted by the append_revisions_only setting on'
+ ' branch "%(location)s".')
+
+ def __init__(self, location):
+ import bzrlib.urlutils as urlutils
+ location = urlutils.unescape_for_display(location, 'ascii')
+ BzrError.__init__(self, location=location)
+
+
+class DivergedBranches(BzrError):
+
+ _fmt = ("These branches have diverged."
+ " Use the missing command to see how.\n"
+ "Use the merge command to reconcile them.")
+
+ def __init__(self, branch1, branch2):
+ self.branch1 = branch1
+ self.branch2 = branch2
+
+
+class NotLefthandHistory(InternalBzrError):
+
+ _fmt = "Supplied history does not follow left-hand parents"
+
+ def __init__(self, history):
+ BzrError.__init__(self, history=history)
+
+
+class UnrelatedBranches(BzrError):
+
+ _fmt = ("Branches have no common ancestor, and"
+ " no merge base revision was specified.")
+
+
+class CannotReverseCherrypick(BzrError):
+
+ _fmt = ('Selected merge cannot perform reverse cherrypicks. Try merge3'
+ ' or diff3.')
+
+
+class NoCommonAncestor(BzrError):
+
+ _fmt = "Revisions have no common ancestor: %(revision_a)s %(revision_b)s"
+
+ def __init__(self, revision_a, revision_b):
+ self.revision_a = revision_a
+ self.revision_b = revision_b
+
+
+class NoCommonRoot(BzrError):
+
+ _fmt = ("Revisions are not derived from the same root: "
+ "%(revision_a)s %(revision_b)s.")
+
+ def __init__(self, revision_a, revision_b):
+ BzrError.__init__(self, revision_a=revision_a, revision_b=revision_b)
+
+
+class NotAncestor(BzrError):
+
+ _fmt = "Revision %(rev_id)s is not an ancestor of %(not_ancestor_id)s"
+
+ def __init__(self, rev_id, not_ancestor_id):
+ BzrError.__init__(self, rev_id=rev_id,
+ not_ancestor_id=not_ancestor_id)
+
+
+class NoCommits(BranchError):
+
+ _fmt = "Branch %(branch)s has no commits."
+
+
+class UnlistableStore(BzrError):
+
+ def __init__(self, store):
+ BzrError.__init__(self, "Store %s is not listable" % store)
+
+
+
+class UnlistableBranch(BzrError):
+
+ def __init__(self, br):
+ BzrError.__init__(self, "Stores for branch %s are not listable" % br)
+
+
+class BoundBranchOutOfDate(BzrError):
+
+ _fmt = ("Bound branch %(branch)s is out of date with master branch"
+ " %(master)s.%(extra_help)s")
+
+ def __init__(self, branch, master):
+ BzrError.__init__(self)
+ self.branch = branch
+ self.master = master
+ self.extra_help = ''
+
+
+class CommitToDoubleBoundBranch(BzrError):
+
+ _fmt = ("Cannot commit to branch %(branch)s."
+ " It is bound to %(master)s, which is bound to %(remote)s.")
+
+ def __init__(self, branch, master, remote):
+ BzrError.__init__(self)
+ self.branch = branch
+ self.master = master
+ self.remote = remote
+
+
+class OverwriteBoundBranch(BzrError):
+
+ _fmt = "Cannot pull --overwrite to a branch which is bound %(branch)s"
+
+ def __init__(self, branch):
+ BzrError.__init__(self)
+ self.branch = branch
+
+
+class BoundBranchConnectionFailure(BzrError):
+
+ _fmt = ("Unable to connect to target of bound branch %(branch)s"
+ " => %(target)s: %(error)s")
+
+ def __init__(self, branch, target, error):
+ BzrError.__init__(self)
+ self.branch = branch
+ self.target = target
+ self.error = error
+
+
+class WeaveError(BzrError):
+
+ _fmt = "Error in processing weave: %(msg)s"
+
+ def __init__(self, msg=None):
+ BzrError.__init__(self)
+ self.msg = msg
+
+
+class WeaveRevisionAlreadyPresent(WeaveError):
+
+ _fmt = "Revision {%(revision_id)s} already present in %(weave)s"
+
+ def __init__(self, revision_id, weave):
+
+ WeaveError.__init__(self)
+ self.revision_id = revision_id
+ self.weave = weave
+
+
+class WeaveRevisionNotPresent(WeaveError):
+
+ _fmt = "Revision {%(revision_id)s} not present in %(weave)s"
+
+ def __init__(self, revision_id, weave):
+ WeaveError.__init__(self)
+ self.revision_id = revision_id
+ self.weave = weave
+
+
+class WeaveFormatError(WeaveError):
+
+ _fmt = "Weave invariant violated: %(what)s"
+
+ def __init__(self, what):
+ WeaveError.__init__(self)
+ self.what = what
+
+
+class WeaveParentMismatch(WeaveError):
+
+ _fmt = "Parents are mismatched between two revisions. %(msg)s"
+
+
+class WeaveInvalidChecksum(WeaveError):
+
+ _fmt = "Text did not match its checksum: %(msg)s"
+
+
+class WeaveTextDiffers(WeaveError):
+
+ _fmt = ("Weaves differ on text content. Revision:"
+ " {%(revision_id)s}, %(weave_a)s, %(weave_b)s")
+
+ def __init__(self, revision_id, weave_a, weave_b):
+ WeaveError.__init__(self)
+ self.revision_id = revision_id
+ self.weave_a = weave_a
+ self.weave_b = weave_b
+
+
+class WeaveTextDiffers(WeaveError):
+
+ _fmt = ("Weaves differ on text content. Revision:"
+ " {%(revision_id)s}, %(weave_a)s, %(weave_b)s")
+
+ def __init__(self, revision_id, weave_a, weave_b):
+ WeaveError.__init__(self)
+ self.revision_id = revision_id
+ self.weave_a = weave_a
+ self.weave_b = weave_b
+
+
+class VersionedFileError(BzrError):
+
+ _fmt = "Versioned file error"
+
+
+class RevisionNotPresent(VersionedFileError):
+
+ _fmt = 'Revision {%(revision_id)s} not present in "%(file_id)s".'
+
+ def __init__(self, revision_id, file_id):
+ VersionedFileError.__init__(self)
+ self.revision_id = revision_id
+ self.file_id = file_id
+
+
+class RevisionAlreadyPresent(VersionedFileError):
+
+ _fmt = 'Revision {%(revision_id)s} already present in "%(file_id)s".'
+
+ def __init__(self, revision_id, file_id):
+ VersionedFileError.__init__(self)
+ self.revision_id = revision_id
+ self.file_id = file_id
+
+
+class VersionedFileInvalidChecksum(VersionedFileError):
+
+ _fmt = "Text did not match its checksum: %(msg)s"
+
+
+class KnitError(InternalBzrError):
+
+ _fmt = "Knit error"
+
+
+class KnitCorrupt(KnitError):
+
+ _fmt = "Knit %(filename)s corrupt: %(how)s"
+
+ def __init__(self, filename, how):
+ KnitError.__init__(self)
+ self.filename = filename
+ self.how = how
+
+
+class SHA1KnitCorrupt(KnitCorrupt):
+
+ _fmt = ("Knit %(filename)s corrupt: sha-1 of reconstructed text does not "
+ "match expected sha-1. key %(key)s expected sha %(expected)s actual "
+ "sha %(actual)s")
+
+ def __init__(self, filename, actual, expected, key, content):
+ KnitError.__init__(self)
+ self.filename = filename
+ self.actual = actual
+ self.expected = expected
+ self.key = key
+ self.content = content
+
+
+class KnitDataStreamIncompatible(KnitError):
+ # Not raised anymore, as we can convert data streams. In future we may
+ # need it again for more exotic cases, so we're keeping it around for now.
+
+ _fmt = "Cannot insert knit data stream of format \"%(stream_format)s\" into knit of format \"%(target_format)s\"."
+
+ def __init__(self, stream_format, target_format):
+ self.stream_format = stream_format
+ self.target_format = target_format
+
+
+class KnitDataStreamUnknown(KnitError):
+ # Indicates a data stream we don't know how to handle.
+
+ _fmt = "Cannot parse knit data stream of format \"%(stream_format)s\"."
+
+ def __init__(self, stream_format):
+ self.stream_format = stream_format
+
+
+class KnitHeaderError(KnitError):
+
+ _fmt = 'Knit header error: %(badline)r unexpected for file "%(filename)s".'
+
+ def __init__(self, badline, filename):
+ KnitError.__init__(self)
+ self.badline = badline
+ self.filename = filename
+
+class KnitIndexUnknownMethod(KnitError):
+ """Raised when we don't understand the storage method.
+
+ Currently only 'fulltext' and 'line-delta' are supported.
+ """
+
+ _fmt = ("Knit index %(filename)s does not have a known method"
+ " in options: %(options)r")
+
+ def __init__(self, filename, options):
+ KnitError.__init__(self)
+ self.filename = filename
+ self.options = options
+
+
+class RetryWithNewPacks(BzrError):
+ """Raised when we realize that the packs on disk have changed.
+
+ This is meant as more of a signaling exception, to trap between where a
+ local error occurred and the code that can actually handle the error and
+ code that can retry appropriately.
+ """
+
+ internal_error = True
+
+ _fmt = ("Pack files have changed, reload and retry. context: %(context)s"
+ " %(orig_error)s")
+
+ def __init__(self, context, reload_occurred, exc_info):
+ """create a new RetryWithNewPacks error.
+
+ :param reload_occurred: Set to True if we know that the packs have
+ already been reloaded, and we are failing because of an in-memory
+ cache miss. If set to True then we will ignore if a reload says
+ nothing has changed, because we assume it has already reloaded. If
+ False, then a reload with nothing changed will force an error.
+ :param exc_info: The original exception traceback, so if there is a
+ problem we can raise the original error (value from sys.exc_info())
+ """
+ BzrError.__init__(self)
+ self.context = context
+ self.reload_occurred = reload_occurred
+ self.exc_info = exc_info
+ self.orig_error = exc_info[1]
+ # TODO: The global error handler should probably treat this by
+ # raising/printing the original exception with a bit about
+ # RetryWithNewPacks also not being caught
+
+
+class RetryAutopack(RetryWithNewPacks):
+ """Raised when we are autopacking and we find a missing file.
+
+ Meant as a signaling exception, to tell the autopack code it should try
+ again.
+ """
+
+ internal_error = True
+
+ _fmt = ("Pack files have changed, reload and try autopack again."
+ " context: %(context)s %(orig_error)s")
+
+
+class NoSuchExportFormat(BzrError):
+
+ _fmt = "Export format %(format)r not supported"
+
+ def __init__(self, format):
+ BzrError.__init__(self)
+ self.format = format
+
+
+class TransportError(BzrError):
+
+ _fmt = "Transport error: %(msg)s %(orig_error)s"
+
+ def __init__(self, msg=None, orig_error=None):
+ if msg is None and orig_error is not None:
+ msg = str(orig_error)
+ if orig_error is None:
+ orig_error = ''
+ if msg is None:
+ msg = ''
+ self.msg = msg
+ self.orig_error = orig_error
+ BzrError.__init__(self)
+
+
+class TooManyConcurrentRequests(InternalBzrError):
+
+ _fmt = ("The medium '%(medium)s' has reached its concurrent request limit."
+ " Be sure to finish_writing and finish_reading on the"
+ " currently open request.")
+
+ def __init__(self, medium):
+ self.medium = medium
+
+
+class SmartProtocolError(TransportError):
+
+ _fmt = "Generic bzr smart protocol error: %(details)s"
+
+ def __init__(self, details):
+ self.details = details
+
+
+class UnexpectedProtocolVersionMarker(TransportError):
+
+ _fmt = "Received bad protocol version marker: %(marker)r"
+
+ def __init__(self, marker):
+ self.marker = marker
+
+
+class UnknownSmartMethod(InternalBzrError):
+
+ _fmt = "The server does not recognise the '%(verb)s' request."
+
+ def __init__(self, verb):
+ self.verb = verb
+
+
+class SmartMessageHandlerError(InternalBzrError):
+
+ _fmt = ("The message handler raised an exception:\n"
+ "%(traceback_text)s")
+
+ def __init__(self, exc_info):
+ import traceback
+ # GZ 2010-08-10: Cycle with exc_tb/exc_info affects at least one test
+ self.exc_type, self.exc_value, self.exc_tb = exc_info
+ self.exc_info = exc_info
+ traceback_strings = traceback.format_exception(
+ self.exc_type, self.exc_value, self.exc_tb)
+ self.traceback_text = ''.join(traceback_strings)
+
+
+# A set of semi-meaningful errors which can be thrown
+class TransportNotPossible(TransportError):
+
+ _fmt = "Transport operation not possible: %(msg)s %(orig_error)s"
+
+
+class ConnectionError(TransportError):
+
+ _fmt = "Connection error: %(msg)s %(orig_error)s"
+
+
+class SocketConnectionError(ConnectionError):
+
+ _fmt = "%(msg)s %(host)s%(port)s%(orig_error)s"
+
+ def __init__(self, host, port=None, msg=None, orig_error=None):
+ if msg is None:
+ msg = 'Failed to connect to'
+ if orig_error is None:
+ orig_error = ''
+ else:
+ orig_error = '; ' + str(orig_error)
+ ConnectionError.__init__(self, msg=msg, orig_error=orig_error)
+ self.host = host
+ if port is None:
+ self.port = ''
+ else:
+ self.port = ':%s' % port
+
+
+# XXX: This is also used for unexpected end of file, which is different at the
+# TCP level from "connection reset".
+class ConnectionReset(TransportError):
+
+ _fmt = "Connection closed: %(msg)s %(orig_error)s"
+
+
+class ConnectionTimeout(ConnectionError):
+
+ _fmt = "Connection Timeout: %(msg)s%(orig_error)s"
+
+
+class InvalidRange(TransportError):
+
+ _fmt = "Invalid range access in %(path)s at %(offset)s: %(msg)s"
+
+ def __init__(self, path, offset, msg=None):
+ TransportError.__init__(self, msg)
+ self.path = path
+ self.offset = offset
+
+
+class InvalidHttpResponse(TransportError):
+
+ _fmt = "Invalid http response for %(path)s: %(msg)s%(orig_error)s"
+
+ def __init__(self, path, msg, orig_error=None):
+ self.path = path
+ if orig_error is None:
+ orig_error = ''
+ else:
+ # This is reached for obscure and unusual errors so we want to
+ # preserve as much info as possible to ease debug.
+ orig_error = ': %r' % (orig_error,)
+ TransportError.__init__(self, msg, orig_error=orig_error)
+
+
+class CertificateError(TransportError):
+
+ _fmt = "Certificate error: %(error)s"
+
+ def __init__(self, error):
+ self.error = error
+
+
+class InvalidHttpRange(InvalidHttpResponse):
+
+ _fmt = "Invalid http range %(range)r for %(path)s: %(msg)s"
+
+ def __init__(self, path, range, msg):
+ self.range = range
+ InvalidHttpResponse.__init__(self, path, msg)
+
+
+class HttpBoundaryMissing(InvalidHttpResponse):
+ """A multipart response ends with no boundary marker.
+
+ This is a special case caused by buggy proxies, described in
+ <https://bugs.launchpad.net/bzr/+bug/198646>.
+ """
+
+ _fmt = "HTTP MIME Boundary missing for %(path)s: %(msg)s"
+
+ def __init__(self, path, msg):
+ InvalidHttpResponse.__init__(self, path, msg)
+
+
+class InvalidHttpContentType(InvalidHttpResponse):
+
+ _fmt = 'Invalid http Content-type "%(ctype)s" for %(path)s: %(msg)s'
+
+ def __init__(self, path, ctype, msg):
+ self.ctype = ctype
+ InvalidHttpResponse.__init__(self, path, msg)
+
+
+class RedirectRequested(TransportError):
+
+ _fmt = '%(source)s is%(permanently)s redirected to %(target)s'
+
+ def __init__(self, source, target, is_permanent=False):
+ self.source = source
+ self.target = target
+ if is_permanent:
+ self.permanently = ' permanently'
+ else:
+ self.permanently = ''
+ TransportError.__init__(self)
+
+
+class TooManyRedirections(TransportError):
+
+ _fmt = "Too many redirections"
+
+
+class ConflictsInTree(BzrError):
+
+ _fmt = "Working tree has conflicts."
+
+
+class ConfigContentError(BzrError):
+
+ _fmt = "Config file %(filename)s is not UTF-8 encoded\n"
+
+ def __init__(self, filename):
+ BzrError.__init__(self)
+ self.filename = filename
+
+
+class ParseConfigError(BzrError):
+
+ _fmt = "Error(s) parsing config file %(filename)s:\n%(errors)s"
+
+ def __init__(self, errors, filename):
+ BzrError.__init__(self)
+ self.filename = filename
+ self.errors = '\n'.join(e.msg for e in errors)
+
+
+class ConfigOptionValueError(BzrError):
+
+ _fmt = ('Bad value "%(value)s" for option "%(name)s".\n'
+ 'See ``bzr help %(name)s``')
+
+ def __init__(self, name, value):
+ BzrError.__init__(self, name=name, value=value)
+
+
+class NoEmailInUsername(BzrError):
+
+ _fmt = "%(username)r does not seem to contain a reasonable email address"
+
+ def __init__(self, username):
+ BzrError.__init__(self)
+ self.username = username
+
+
+class SigningFailed(BzrError):
+
+ _fmt = 'Failed to GPG sign data with command "%(command_line)s"'
+
+ def __init__(self, command_line):
+ BzrError.__init__(self, command_line=command_line)
+
+
+class SignatureVerificationFailed(BzrError):
+
+ _fmt = 'Failed to verify GPG signature data with error "%(error)s"'
+
+ def __init__(self, error):
+ BzrError.__init__(self, error=error)
+
+
+class DependencyNotPresent(BzrError):
+
+ _fmt = 'Unable to import library "%(library)s": %(error)s'
+
+ def __init__(self, library, error):
+ BzrError.__init__(self, library=library, error=error)
+
+
+class GpgmeNotInstalled(DependencyNotPresent):
+
+ _fmt = 'python-gpgme is not installed, it is needed to verify signatures'
+
+ def __init__(self, error):
+ DependencyNotPresent.__init__(self, 'gpgme', error)
+
+
+class WorkingTreeNotRevision(BzrError):
+
+ _fmt = ("The working tree for %(basedir)s has changed since"
+ " the last commit, but weave merge requires that it be"
+ " unchanged")
+
+ def __init__(self, tree):
+ BzrError.__init__(self, basedir=tree.basedir)
+
+
+class CantReprocessAndShowBase(BzrError):
+
+ _fmt = ("Can't reprocess and show base, because reprocessing obscures "
+ "the relationship of conflicting lines to the base")
+
+
+class GraphCycleError(BzrError):
+
+ _fmt = "Cycle in graph %(graph)r"
+
+ def __init__(self, graph):
+ BzrError.__init__(self)
+ self.graph = graph
+
+
+class WritingCompleted(InternalBzrError):
+
+ _fmt = ("The MediumRequest '%(request)s' has already had finish_writing "
+ "called upon it - accept bytes may not be called anymore.")
+
+ def __init__(self, request):
+ self.request = request
+
+
+class WritingNotComplete(InternalBzrError):
+
+ _fmt = ("The MediumRequest '%(request)s' has not has finish_writing "
+ "called upon it - until the write phase is complete no "
+ "data may be read.")
+
+ def __init__(self, request):
+ self.request = request
+
+
+class NotConflicted(BzrError):
+
+ _fmt = "File %(filename)s is not conflicted."
+
+ def __init__(self, filename):
+ BzrError.__init__(self)
+ self.filename = filename
+
+
+class MediumNotConnected(InternalBzrError):
+
+ _fmt = """The medium '%(medium)s' is not connected."""
+
+ def __init__(self, medium):
+ self.medium = medium
+
+
+class MustUseDecorated(Exception):
+
+ _fmt = "A decorating function has requested its original command be used."
+
+
+class NoBundleFound(BzrError):
+
+ _fmt = 'No bundle was found in "%(filename)s".'
+
+ def __init__(self, filename):
+ BzrError.__init__(self)
+ self.filename = filename
+
+
+class BundleNotSupported(BzrError):
+
+ _fmt = "Unable to handle bundle version %(version)s: %(msg)s"
+
+ def __init__(self, version, msg):
+ BzrError.__init__(self)
+ self.version = version
+ self.msg = msg
+
+
+class MissingText(BzrError):
+
+ _fmt = ("Branch %(base)s is missing revision"
+ " %(text_revision)s of %(file_id)s")
+
+ def __init__(self, branch, text_revision, file_id):
+ BzrError.__init__(self)
+ self.branch = branch
+ self.base = branch.base
+ self.text_revision = text_revision
+ self.file_id = file_id
+
+
+class DuplicateFileId(BzrError):
+
+ _fmt = "File id {%(file_id)s} already exists in inventory as %(entry)s"
+
+ def __init__(self, file_id, entry):
+ BzrError.__init__(self)
+ self.file_id = file_id
+ self.entry = entry
+
+
+class DuplicateKey(BzrError):
+
+ _fmt = "Key %(key)s is already present in map"
+
+
+class DuplicateHelpPrefix(BzrError):
+
+ _fmt = "The prefix %(prefix)s is in the help search path twice."
+
+ def __init__(self, prefix):
+ self.prefix = prefix
+
+
+class MalformedTransform(InternalBzrError):
+
+ _fmt = "Tree transform is malformed %(conflicts)r"
+
+
+class NoFinalPath(BzrError):
+
+ _fmt = ("No final name for trans_id %(trans_id)r\n"
+ "file-id: %(file_id)r\n"
+ "root trans-id: %(root_trans_id)r\n")
+
+ def __init__(self, trans_id, transform):
+ self.trans_id = trans_id
+ self.file_id = transform.final_file_id(trans_id)
+ self.root_trans_id = transform.root
+
+
+class BzrBadParameter(InternalBzrError):
+
+ _fmt = "Bad parameter: %(param)r"
+
+ # This exception should never be thrown, but it is a base class for all
+ # parameter-to-function errors.
+
+ def __init__(self, param):
+ BzrError.__init__(self)
+ self.param = param
+
+
+class BzrBadParameterNotUnicode(BzrBadParameter):
+
+ _fmt = "Parameter %(param)s is neither unicode nor utf8."
+
+
+class ReusingTransform(BzrError):
+
+ _fmt = "Attempt to reuse a transform that has already been applied."
+
+
+class CantMoveRoot(BzrError):
+
+ _fmt = "Moving the root directory is not supported at this time"
+
+
+class TransformRenameFailed(BzrError):
+
+ _fmt = "Failed to rename %(from_path)s to %(to_path)s: %(why)s"
+
+ def __init__(self, from_path, to_path, why, errno):
+ self.from_path = from_path
+ self.to_path = to_path
+ self.why = why
+ self.errno = errno
+
+
+class BzrMoveFailedError(BzrError):
+
+ _fmt = ("Could not move %(from_path)s%(operator)s %(to_path)s"
+ "%(_has_extra)s%(extra)s")
+
+ def __init__(self, from_path='', to_path='', extra=None):
+ from bzrlib.osutils import splitpath
+ BzrError.__init__(self)
+ if extra:
+ self.extra, self._has_extra = extra, ': '
+ else:
+ self.extra = self._has_extra = ''
+
+ has_from = len(from_path) > 0
+ has_to = len(to_path) > 0
+ if has_from:
+ self.from_path = splitpath(from_path)[-1]
+ else:
+ self.from_path = ''
+
+ if has_to:
+ self.to_path = splitpath(to_path)[-1]
+ else:
+ self.to_path = ''
+
+ self.operator = ""
+ if has_from and has_to:
+ self.operator = " =>"
+ elif has_from:
+ self.from_path = "from " + from_path
+ elif has_to:
+ self.operator = "to"
+ else:
+ self.operator = "file"
+
+
+class BzrRenameFailedError(BzrMoveFailedError):
+
+ _fmt = ("Could not rename %(from_path)s%(operator)s %(to_path)s"
+ "%(_has_extra)s%(extra)s")
+
+ def __init__(self, from_path, to_path, extra=None):
+ BzrMoveFailedError.__init__(self, from_path, to_path, extra)
+
+
+class BzrBadParameterNotString(BzrBadParameter):
+
+ _fmt = "Parameter %(param)s is not a string or unicode string."
+
+
+class BzrBadParameterMissing(BzrBadParameter):
+
+ _fmt = "Parameter %(param)s is required but not present."
+
+
+class BzrBadParameterUnicode(BzrBadParameter):
+
+ _fmt = ("Parameter %(param)s is unicode but"
+ " only byte-strings are permitted.")
+
+
+class BzrBadParameterContainsNewline(BzrBadParameter):
+
+ _fmt = "Parameter %(param)s contains a newline."
+
+
+class ParamikoNotPresent(DependencyNotPresent):
+
+ _fmt = "Unable to import paramiko (required for sftp support): %(error)s"
+
+ def __init__(self, error):
+ DependencyNotPresent.__init__(self, 'paramiko', error)
+
+
+class PointlessMerge(BzrError):
+
+ _fmt = "Nothing to merge."
+
+
+class UninitializableFormat(BzrError):
+
+ _fmt = "Format %(format)s cannot be initialised by this version of bzr."
+
+ def __init__(self, format):
+ BzrError.__init__(self)
+ self.format = format
+
+
+class BadConversionTarget(BzrError):
+
+ _fmt = "Cannot convert from format %(from_format)s to format %(format)s." \
+ " %(problem)s"
+
+ def __init__(self, problem, format, from_format=None):
+ BzrError.__init__(self)
+ self.problem = problem
+ self.format = format
+ self.from_format = from_format or '(unspecified)'
+
+
+class NoDiffFound(BzrError):
+
+ _fmt = 'Could not find an appropriate Differ for file "%(path)s"'
+
+ def __init__(self, path):
+ BzrError.__init__(self, path)
+
+
+class ExecutableMissing(BzrError):
+
+ _fmt = "%(exe_name)s could not be found on this machine"
+
+ def __init__(self, exe_name):
+ BzrError.__init__(self, exe_name=exe_name)
+
+
+class NoDiff(BzrError):
+
+ _fmt = "Diff is not installed on this machine: %(msg)s"
+
+ def __init__(self, msg):
+ BzrError.__init__(self, msg=msg)
+
+
+class NoDiff3(BzrError):
+
+ _fmt = "Diff3 is not installed on this machine."
+
+
+class ExistingContent(BzrError):
+ # Added in bzrlib 0.92, used by VersionedFile.add_lines.
+
+ _fmt = "The content being inserted is already present."
+
+
+class ExistingLimbo(BzrError):
+
+ _fmt = """This tree contains left-over files from a failed operation.
+ Please examine %(limbo_dir)s to see if it contains any files you wish to
+ keep, and delete it when you are done."""
+
+ def __init__(self, limbo_dir):
+ BzrError.__init__(self)
+ self.limbo_dir = limbo_dir
+
+
+class ExistingPendingDeletion(BzrError):
+
+ _fmt = """This tree contains left-over files from a failed operation.
+ Please examine %(pending_deletion)s to see if it contains any files you
+ wish to keep, and delete it when you are done."""
+
+ def __init__(self, pending_deletion):
+ BzrError.__init__(self, pending_deletion=pending_deletion)
+
+
+class ImmortalLimbo(BzrError):
+
+ _fmt = """Unable to delete transform temporary directory %(limbo_dir)s.
+ Please examine %(limbo_dir)s to see if it contains any files you wish to
+ keep, and delete it when you are done."""
+
+ def __init__(self, limbo_dir):
+ BzrError.__init__(self)
+ self.limbo_dir = limbo_dir
+
+
+class ImmortalPendingDeletion(BzrError):
+
+ _fmt = ("Unable to delete transform temporary directory "
+ "%(pending_deletion)s. Please examine %(pending_deletion)s to see if it "
+ "contains any files you wish to keep, and delete it when you are done.")
+
+ def __init__(self, pending_deletion):
+ BzrError.__init__(self, pending_deletion=pending_deletion)
+
+
+class OutOfDateTree(BzrError):
+
+ _fmt = "Working tree is out of date, please run 'bzr update'.%(more)s"
+
+ def __init__(self, tree, more=None):
+ if more is None:
+ more = ''
+ else:
+ more = ' ' + more
+ BzrError.__init__(self)
+ self.tree = tree
+ self.more = more
+
+
+class PublicBranchOutOfDate(BzrError):
+
+ _fmt = 'Public branch "%(public_location)s" lacks revision '\
+ '"%(revstring)s".'
+
+ def __init__(self, public_location, revstring):
+ import bzrlib.urlutils as urlutils
+ public_location = urlutils.unescape_for_display(public_location,
+ 'ascii')
+ BzrError.__init__(self, public_location=public_location,
+ revstring=revstring)
+
+
+class MergeModifiedFormatError(BzrError):
+
+ _fmt = "Error in merge modified format"
+
+
+class ConflictFormatError(BzrError):
+
+ _fmt = "Format error in conflict listings"
+
+
+class CorruptDirstate(BzrError):
+
+ _fmt = ("Inconsistency in dirstate file %(dirstate_path)s.\n"
+ "Error: %(description)s")
+
+ def __init__(self, dirstate_path, description):
+ BzrError.__init__(self)
+ self.dirstate_path = dirstate_path
+ self.description = description
+
+
+class CorruptRepository(BzrError):
+
+ _fmt = ("An error has been detected in the repository %(repo_path)s.\n"
+ "Please run bzr reconcile on this repository.")
+
+ def __init__(self, repo):
+ BzrError.__init__(self)
+ self.repo_path = repo.user_url
+
+
+class InconsistentDelta(BzrError):
+ """Used when we get a delta that is not valid."""
+
+ _fmt = ("An inconsistent delta was supplied involving %(path)r,"
+ " %(file_id)r\nreason: %(reason)s")
+
+ def __init__(self, path, file_id, reason):
+ BzrError.__init__(self)
+ self.path = path
+ self.file_id = file_id
+ self.reason = reason
+
+
+class InconsistentDeltaDelta(InconsistentDelta):
+ """Used when we get a delta that is not valid."""
+
+ _fmt = ("An inconsistent delta was supplied: %(delta)r"
+ "\nreason: %(reason)s")
+
+ def __init__(self, delta, reason):
+ BzrError.__init__(self)
+ self.delta = delta
+ self.reason = reason
+
+
+class UpgradeRequired(BzrError):
+
+ _fmt = "To use this feature you must upgrade your branch at %(path)s."
+
+ def __init__(self, path):
+ BzrError.__init__(self)
+ self.path = path
+
+
+class RepositoryUpgradeRequired(UpgradeRequired):
+
+ _fmt = "To use this feature you must upgrade your repository at %(path)s."
+
+
+class RichRootUpgradeRequired(UpgradeRequired):
+
+ _fmt = ("To use this feature you must upgrade your branch at %(path)s to"
+ " a format which supports rich roots.")
+
+
+class LocalRequiresBoundBranch(BzrError):
+
+ _fmt = "Cannot perform local-only commits on unbound branches."
+
+
+class UnsupportedOperation(BzrError):
+
+ _fmt = ("The method %(mname)s is not supported on"
+ " objects of type %(tname)s.")
+
+ def __init__(self, method, method_self):
+ self.method = method
+ self.mname = method.__name__
+ self.tname = type(method_self).__name__
+
+
+class CannotSetRevisionId(UnsupportedOperation):
+ """Raised when a commit is attempting to set a revision id but cant."""
+
+
+class NonAsciiRevisionId(UnsupportedOperation):
+ """Raised when a commit is attempting to set a non-ascii revision id
+ but cant.
+ """
+
+
+class GhostTagsNotSupported(BzrError):
+
+ _fmt = "Ghost tags not supported by format %(format)r."
+
+ def __init__(self, format):
+ self.format = format
+
+
+class BinaryFile(BzrError):
+
+ _fmt = "File is binary but should be text."
+
+
+class IllegalPath(BzrError):
+
+ _fmt = "The path %(path)s is not permitted on this platform"
+
+ def __init__(self, path):
+ BzrError.__init__(self)
+ self.path = path
+
+
+class TestamentMismatch(BzrError):
+
+ _fmt = """Testament did not match expected value.
+ For revision_id {%(revision_id)s}, expected {%(expected)s}, measured
+ {%(measured)s}"""
+
+ def __init__(self, revision_id, expected, measured):
+ self.revision_id = revision_id
+ self.expected = expected
+ self.measured = measured
+
+
+class NotABundle(BzrError):
+
+ _fmt = "Not a bzr revision-bundle: %(text)r"
+
+ def __init__(self, text):
+ BzrError.__init__(self)
+ self.text = text
+
+
+class BadBundle(BzrError):
+
+ _fmt = "Bad bzr revision-bundle: %(text)r"
+
+ def __init__(self, text):
+ BzrError.__init__(self)
+ self.text = text
+
+
+class MalformedHeader(BadBundle):
+
+ _fmt = "Malformed bzr revision-bundle header: %(text)r"
+
+
+class MalformedPatches(BadBundle):
+
+ _fmt = "Malformed patches in bzr revision-bundle: %(text)r"
+
+
+class MalformedFooter(BadBundle):
+
+ _fmt = "Malformed footer in bzr revision-bundle: %(text)r"
+
+
+class UnsupportedEOLMarker(BadBundle):
+
+ _fmt = "End of line marker was not \\n in bzr revision-bundle"
+
+ def __init__(self):
+ # XXX: BadBundle's constructor assumes there's explanatory text,
+ # but for this there is not
+ BzrError.__init__(self)
+
+
+class IncompatibleBundleFormat(BzrError):
+
+ _fmt = "Bundle format %(bundle_format)s is incompatible with %(other)s"
+
+ def __init__(self, bundle_format, other):
+ BzrError.__init__(self)
+ self.bundle_format = bundle_format
+ self.other = other
+
+
+class BadInventoryFormat(BzrError):
+
+ _fmt = "Root class for inventory serialization errors"
+
+
+class UnexpectedInventoryFormat(BadInventoryFormat):
+
+ _fmt = "The inventory was not in the expected format:\n %(msg)s"
+
+ def __init__(self, msg):
+ BadInventoryFormat.__init__(self, msg=msg)
+
+
+class RootNotRich(BzrError):
+
+ _fmt = """This operation requires rich root data storage"""
+
+
+class NoSmartMedium(InternalBzrError):
+
+ _fmt = "The transport '%(transport)s' cannot tunnel the smart protocol."
+
+ def __init__(self, transport):
+ self.transport = transport
+
+
+class UnknownSSH(BzrError):
+
+ _fmt = "Unrecognised value for BZR_SSH environment variable: %(vendor)s"
+
+ def __init__(self, vendor):
+ BzrError.__init__(self)
+ self.vendor = vendor
+
+
+class SSHVendorNotFound(BzrError):
+
+ _fmt = ("Don't know how to handle SSH connections."
+ " Please set BZR_SSH environment variable.")
+
+
+class GhostRevisionsHaveNoRevno(BzrError):
+ """When searching for revnos, if we encounter a ghost, we are stuck"""
+
+ _fmt = ("Could not determine revno for {%(revision_id)s} because"
+ " its ancestry shows a ghost at {%(ghost_revision_id)s}")
+
+ def __init__(self, revision_id, ghost_revision_id):
+ self.revision_id = revision_id
+ self.ghost_revision_id = ghost_revision_id
+
+
+class GhostRevisionUnusableHere(BzrError):
+
+ _fmt = "Ghost revision {%(revision_id)s} cannot be used here."
+
+ def __init__(self, revision_id):
+ BzrError.__init__(self)
+ self.revision_id = revision_id
+
+
+class IllegalUseOfScopeReplacer(InternalBzrError):
+
+ _fmt = ("ScopeReplacer object %(name)r was used incorrectly:"
+ " %(msg)s%(extra)s")
+
+ def __init__(self, name, msg, extra=None):
+ BzrError.__init__(self)
+ self.name = name
+ self.msg = msg
+ if extra:
+ self.extra = ': ' + str(extra)
+ else:
+ self.extra = ''
+
+
+class InvalidImportLine(InternalBzrError):
+
+ _fmt = "Not a valid import statement: %(msg)\n%(text)s"
+
+ def __init__(self, text, msg):
+ BzrError.__init__(self)
+ self.text = text
+ self.msg = msg
+
+
+class ImportNameCollision(InternalBzrError):
+
+ _fmt = ("Tried to import an object to the same name as"
+ " an existing object. %(name)s")
+
+ def __init__(self, name):
+ BzrError.__init__(self)
+ self.name = name
+
+
+class NotAMergeDirective(BzrError):
+ """File starting with %(firstline)r is not a merge directive"""
+ def __init__(self, firstline):
+ BzrError.__init__(self, firstline=firstline)
+
+
+class NoMergeSource(BzrError):
+ """Raise if no merge source was specified for a merge directive"""
+
+ _fmt = "A merge directive must provide either a bundle or a public"\
+ " branch location."
+
+
+class IllegalMergeDirectivePayload(BzrError):
+ """A merge directive contained something other than a patch or bundle"""
+
+ _fmt = "Bad merge directive payload %(start)r"
+
+ def __init__(self, start):
+ BzrError(self)
+ self.start = start
+
+
+class PatchVerificationFailed(BzrError):
+ """A patch from a merge directive could not be verified"""
+
+ _fmt = "Preview patch does not match requested changes."
+
+
+class PatchMissing(BzrError):
+ """Raise a patch type was specified but no patch supplied"""
+
+ _fmt = "Patch_type was %(patch_type)s, but no patch was supplied."
+
+ def __init__(self, patch_type):
+ BzrError.__init__(self)
+ self.patch_type = patch_type
+
+
+class TargetNotBranch(BzrError):
+ """A merge directive's target branch is required, but isn't a branch"""
+
+ _fmt = ("Your branch does not have all of the revisions required in "
+ "order to merge this merge directive and the target "
+ "location specified in the merge directive is not a branch: "
+ "%(location)s.")
+
+ def __init__(self, location):
+ BzrError.__init__(self)
+ self.location = location
+
+
+class UnsupportedInventoryKind(BzrError):
+
+ _fmt = """Unsupported entry kind %(kind)s"""
+
+ def __init__(self, kind):
+ self.kind = kind
+
+
+class BadSubsumeSource(BzrError):
+
+ _fmt = "Can't subsume %(other_tree)s into %(tree)s. %(reason)s"
+
+ def __init__(self, tree, other_tree, reason):
+ self.tree = tree
+ self.other_tree = other_tree
+ self.reason = reason
+
+
+class SubsumeTargetNeedsUpgrade(BzrError):
+
+ _fmt = """Subsume target %(other_tree)s needs to be upgraded."""
+
+ def __init__(self, other_tree):
+ self.other_tree = other_tree
+
+
+class BadReferenceTarget(InternalBzrError):
+
+ _fmt = "Can't add reference to %(other_tree)s into %(tree)s." \
+ "%(reason)s"
+
+ def __init__(self, tree, other_tree, reason):
+ self.tree = tree
+ self.other_tree = other_tree
+ self.reason = reason
+
+
+class NoSuchTag(BzrError):
+
+ _fmt = "No such tag: %(tag_name)s"
+
+ def __init__(self, tag_name):
+ self.tag_name = tag_name
+
+
+class TagsNotSupported(BzrError):
+
+ _fmt = ("Tags not supported by %(branch)s;"
+ " you may be able to use bzr upgrade.")
+
+ def __init__(self, branch):
+ self.branch = branch
+
+
+class TagAlreadyExists(BzrError):
+
+ _fmt = "Tag %(tag_name)s already exists."
+
+ def __init__(self, tag_name):
+ self.tag_name = tag_name
+
+
+class MalformedBugIdentifier(BzrError):
+
+ _fmt = ('Did not understand bug identifier %(bug_id)s: %(reason)s. '
+ 'See "bzr help bugs" for more information on this feature.')
+
+ def __init__(self, bug_id, reason):
+ self.bug_id = bug_id
+ self.reason = reason
+
+
+class InvalidBugTrackerURL(BzrError):
+
+ _fmt = ("The URL for bug tracker \"%(abbreviation)s\" doesn't "
+ "contain {id}: %(url)s")
+
+ def __init__(self, abbreviation, url):
+ self.abbreviation = abbreviation
+ self.url = url
+
+
+class UnknownBugTrackerAbbreviation(BzrError):
+
+ _fmt = ("Cannot find registered bug tracker called %(abbreviation)s "
+ "on %(branch)s")
+
+ def __init__(self, abbreviation, branch):
+ self.abbreviation = abbreviation
+ self.branch = branch
+
+
+class InvalidLineInBugsProperty(BzrError):
+
+ _fmt = ("Invalid line in bugs property: '%(line)s'")
+
+ def __init__(self, line):
+ self.line = line
+
+
+class InvalidBugStatus(BzrError):
+
+ _fmt = ("Invalid bug status: '%(status)s'")
+
+ def __init__(self, status):
+ self.status = status
+
+
+class UnexpectedSmartServerResponse(BzrError):
+
+ _fmt = "Could not understand response from smart server: %(response_tuple)r"
+
+ def __init__(self, response_tuple):
+ self.response_tuple = response_tuple
+
+
+class ErrorFromSmartServer(BzrError):
+ """An error was received from a smart server.
+
+ :seealso: UnknownErrorFromSmartServer
+ """
+
+ _fmt = "Error received from smart server: %(error_tuple)r"
+
+ internal_error = True
+
+ def __init__(self, error_tuple):
+ self.error_tuple = error_tuple
+ try:
+ self.error_verb = error_tuple[0]
+ except IndexError:
+ self.error_verb = None
+ self.error_args = error_tuple[1:]
+
+
+class UnknownErrorFromSmartServer(BzrError):
+ """An ErrorFromSmartServer could not be translated into a typical bzrlib
+ error.
+
+ This is distinct from ErrorFromSmartServer so that it is possible to
+ distinguish between the following two cases:
+
+ - ErrorFromSmartServer was uncaught. This is logic error in the client
+ and so should provoke a traceback to the user.
+ - ErrorFromSmartServer was caught but its error_tuple could not be
+ translated. This is probably because the server sent us garbage, and
+ should not provoke a traceback.
+ """
+
+ _fmt = "Server sent an unexpected error: %(error_tuple)r"
+
+ internal_error = False
+
+ def __init__(self, error_from_smart_server):
+ """Constructor.
+
+ :param error_from_smart_server: An ErrorFromSmartServer instance.
+ """
+ self.error_from_smart_server = error_from_smart_server
+ self.error_tuple = error_from_smart_server.error_tuple
+
+
+class ContainerError(BzrError):
+ """Base class of container errors."""
+
+
+class UnknownContainerFormatError(ContainerError):
+
+ _fmt = "Unrecognised container format: %(container_format)r"
+
+ def __init__(self, container_format):
+ self.container_format = container_format
+
+
+class UnexpectedEndOfContainerError(ContainerError):
+
+ _fmt = "Unexpected end of container stream"
+
+
+class UnknownRecordTypeError(ContainerError):
+
+ _fmt = "Unknown record type: %(record_type)r"
+
+ def __init__(self, record_type):
+ self.record_type = record_type
+
+
+class InvalidRecordError(ContainerError):
+
+ _fmt = "Invalid record: %(reason)s"
+
+ def __init__(self, reason):
+ self.reason = reason
+
+
+class ContainerHasExcessDataError(ContainerError):
+
+ _fmt = "Container has data after end marker: %(excess)r"
+
+ def __init__(self, excess):
+ self.excess = excess
+
+
+class DuplicateRecordNameError(ContainerError):
+
+ _fmt = "Container has multiple records with the same name: %(name)s"
+
+ def __init__(self, name):
+ self.name = name.decode("utf-8")
+
+
+class NoDestinationAddress(InternalBzrError):
+
+ _fmt = "Message does not have a destination address."
+
+
+class RepositoryDataStreamError(BzrError):
+
+ _fmt = "Corrupt or incompatible data stream: %(reason)s"
+
+ def __init__(self, reason):
+ self.reason = reason
+
+
+class SMTPError(BzrError):
+
+ _fmt = "SMTP error: %(error)s"
+
+ def __init__(self, error):
+ self.error = error
+
+
+class NoMessageSupplied(BzrError):
+
+ _fmt = "No message supplied."
+
+
+class NoMailAddressSpecified(BzrError):
+
+ _fmt = "No mail-to address (--mail-to) or output (-o) specified."
+
+
+class MailClientNotFound(BzrError):
+
+ _fmt = "Unable to find mail client with the following names:"\
+ " %(mail_command_list_string)s"
+
+ def __init__(self, mail_command_list):
+ mail_command_list_string = ', '.join(mail_command_list)
+ BzrError.__init__(self, mail_command_list=mail_command_list,
+ mail_command_list_string=mail_command_list_string)
+
+class SMTPConnectionRefused(SMTPError):
+
+ _fmt = "SMTP connection to %(host)s refused"
+
+ def __init__(self, error, host):
+ self.error = error
+ self.host = host
+
+
+class DefaultSMTPConnectionRefused(SMTPConnectionRefused):
+
+ _fmt = "Please specify smtp_server. No server at default %(host)s."
+
+
+class BzrDirError(BzrError):
+
+ def __init__(self, bzrdir):
+ import bzrlib.urlutils as urlutils
+ display_url = urlutils.unescape_for_display(bzrdir.user_url,
+ 'ascii')
+ BzrError.__init__(self, bzrdir=bzrdir, display_url=display_url)
+
+
+class UnsyncedBranches(BzrDirError):
+
+ _fmt = ("'%(display_url)s' is not in sync with %(target_url)s. See"
+ " bzr help sync-for-reconfigure.")
+
+ def __init__(self, bzrdir, target_branch):
+ BzrDirError.__init__(self, bzrdir)
+ import bzrlib.urlutils as urlutils
+ self.target_url = urlutils.unescape_for_display(target_branch.base,
+ 'ascii')
+
+
+class AlreadyBranch(BzrDirError):
+
+ _fmt = "'%(display_url)s' is already a branch."
+
+
+class AlreadyTree(BzrDirError):
+
+ _fmt = "'%(display_url)s' is already a tree."
+
+
+class AlreadyCheckout(BzrDirError):
+
+ _fmt = "'%(display_url)s' is already a checkout."
+
+
+class AlreadyLightweightCheckout(BzrDirError):
+
+ _fmt = "'%(display_url)s' is already a lightweight checkout."
+
+
+class AlreadyUsingShared(BzrDirError):
+
+ _fmt = "'%(display_url)s' is already using a shared repository."
+
+
+class AlreadyStandalone(BzrDirError):
+
+ _fmt = "'%(display_url)s' is already standalone."
+
+
+class AlreadyWithTrees(BzrDirError):
+
+ _fmt = ("Shared repository '%(display_url)s' already creates "
+ "working trees.")
+
+
+class AlreadyWithNoTrees(BzrDirError):
+
+ _fmt = ("Shared repository '%(display_url)s' already doesn't create "
+ "working trees.")
+
+
+class ReconfigurationNotSupported(BzrDirError):
+
+ _fmt = "Requested reconfiguration of '%(display_url)s' is not supported."
+
+
+class NoBindLocation(BzrDirError):
+
+ _fmt = "No location could be found to bind to at %(display_url)s."
+
+
+class UncommittedChanges(BzrError):
+
+ _fmt = ('Working tree "%(display_url)s" has uncommitted changes'
+ ' (See bzr status).%(more)s')
+
+ def __init__(self, tree, more=None):
+ if more is None:
+ more = ''
+ else:
+ more = ' ' + more
+ import bzrlib.urlutils as urlutils
+ user_url = getattr(tree, "user_url", None)
+ if user_url is None:
+ display_url = str(tree)
+ else:
+ display_url = urlutils.unescape_for_display(user_url, 'ascii')
+ BzrError.__init__(self, tree=tree, display_url=display_url, more=more)
+
+
+class ShelvedChanges(UncommittedChanges):
+
+ _fmt = ('Working tree "%(display_url)s" has shelved changes'
+ ' (See bzr shelve --list).%(more)s')
+
+
+class MissingTemplateVariable(BzrError):
+
+ _fmt = 'Variable {%(name)s} is not available.'
+
+ def __init__(self, name):
+ self.name = name
+
+
+class NoTemplate(BzrError):
+
+ _fmt = 'No template specified.'
+
+
+class UnableCreateSymlink(BzrError):
+
+ _fmt = 'Unable to create symlink %(path_str)son this platform'
+
+ def __init__(self, path=None):
+ path_str = ''
+ if path:
+ try:
+ path_str = repr(str(path))
+ except UnicodeEncodeError:
+ path_str = repr(path)
+ path_str += ' '
+ self.path_str = path_str
+
+
+class UnsupportedTimezoneFormat(BzrError):
+
+ _fmt = ('Unsupported timezone format "%(timezone)s", '
+ 'options are "utc", "original", "local".')
+
+ def __init__(self, timezone):
+ self.timezone = timezone
+
+
+class CommandAvailableInPlugin(StandardError):
+
+ internal_error = False
+
+ def __init__(self, cmd_name, plugin_metadata, provider):
+
+ self.plugin_metadata = plugin_metadata
+ self.cmd_name = cmd_name
+ self.provider = provider
+
+ def __str__(self):
+
+ _fmt = ('"%s" is not a standard bzr command. \n'
+ 'However, the following official plugin provides this command: %s\n'
+ 'You can install it by going to: %s'
+ % (self.cmd_name, self.plugin_metadata['name'],
+ self.plugin_metadata['url']))
+
+ return _fmt
+
+
+class NoPluginAvailable(BzrError):
+ pass
+
+
+class UnableEncodePath(BzrError):
+
+ _fmt = ('Unable to encode %(kind)s path %(path)r in '
+ 'user encoding %(user_encoding)s')
+
+ def __init__(self, path, kind):
+ from bzrlib.osutils import get_user_encoding
+ self.path = path
+ self.kind = kind
+ self.user_encoding = get_user_encoding()
+
+
+class NoSuchConfig(BzrError):
+
+ _fmt = ('The "%(config_id)s" configuration does not exist.')
+
+ def __init__(self, config_id):
+ BzrError.__init__(self, config_id=config_id)
+
+
+class NoSuchConfigOption(BzrError):
+
+ _fmt = ('The "%(option_name)s" configuration option does not exist.')
+
+ def __init__(self, option_name):
+ BzrError.__init__(self, option_name=option_name)
+
+
+class NoSuchAlias(BzrError):
+
+ _fmt = ('The alias "%(alias_name)s" does not exist.')
+
+ def __init__(self, alias_name):
+ BzrError.__init__(self, alias_name=alias_name)
+
+
+class DirectoryLookupFailure(BzrError):
+ """Base type for lookup errors."""
+
+ pass
+
+
+class InvalidLocationAlias(DirectoryLookupFailure):
+
+ _fmt = '"%(alias_name)s" is not a valid location alias.'
+
+ def __init__(self, alias_name):
+ DirectoryLookupFailure.__init__(self, alias_name=alias_name)
+
+
+class UnsetLocationAlias(DirectoryLookupFailure):
+
+ _fmt = 'No %(alias_name)s location assigned.'
+
+ def __init__(self, alias_name):
+ DirectoryLookupFailure.__init__(self, alias_name=alias_name[1:])
+
+
+class CannotBindAddress(BzrError):
+
+ _fmt = 'Cannot bind address "%(host)s:%(port)i": %(orig_error)s.'
+
+ def __init__(self, host, port, orig_error):
+ # nb: in python2.4 socket.error doesn't have a useful repr
+ BzrError.__init__(self, host=host, port=port,
+ orig_error=repr(orig_error.args))
+
+
+class UnknownRules(BzrError):
+
+ _fmt = ('Unknown rules detected: %(unknowns_str)s.')
+
+ def __init__(self, unknowns):
+ BzrError.__init__(self, unknowns_str=", ".join(unknowns))
+
+
+class TipChangeRejected(BzrError):
+ """A pre_change_branch_tip hook function may raise this to cleanly and
+ explicitly abort a change to a branch tip.
+ """
+
+ _fmt = u"Tip change rejected: %(msg)s"
+
+ def __init__(self, msg):
+ self.msg = msg
+
+
+class ShelfCorrupt(BzrError):
+
+ _fmt = "Shelf corrupt."
+
+
+class DecompressCorruption(BzrError):
+
+ _fmt = "Corruption while decompressing repository file%(orig_error)s"
+
+ def __init__(self, orig_error=None):
+ if orig_error is not None:
+ self.orig_error = ", %s" % (orig_error,)
+ else:
+ self.orig_error = ""
+ BzrError.__init__(self)
+
+
+class NoSuchShelfId(BzrError):
+
+ _fmt = 'No changes are shelved with id "%(shelf_id)d".'
+
+ def __init__(self, shelf_id):
+ BzrError.__init__(self, shelf_id=shelf_id)
+
+
+class InvalidShelfId(BzrError):
+
+ _fmt = '"%(invalid_id)s" is not a valid shelf id, try a number instead.'
+
+ def __init__(self, invalid_id):
+ BzrError.__init__(self, invalid_id=invalid_id)
+
+
+class JailBreak(BzrError):
+
+ _fmt = "An attempt to access a url outside the server jail was made: '%(url)s'."
+
+ def __init__(self, url):
+ BzrError.__init__(self, url=url)
+
+
+class UserAbort(BzrError):
+
+ _fmt = 'The user aborted the operation.'
+
+
+class MustHaveWorkingTree(BzrError):
+
+ _fmt = ("Branching '%(url)s'(%(format)s) must create a working tree.")
+
+ def __init__(self, format, url):
+ BzrError.__init__(self, format=format, url=url)
+
+
+class NoSuchView(BzrError):
+ """A view does not exist.
+ """
+
+ _fmt = u"No such view: %(view_name)s."
+
+ def __init__(self, view_name):
+ self.view_name = view_name
+
+
+class ViewsNotSupported(BzrError):
+ """Views are not supported by a tree format.
+ """
+
+ _fmt = ("Views are not supported by %(tree)s;"
+ " use 'bzr upgrade' to change your tree to a later format.")
+
+ def __init__(self, tree):
+ self.tree = tree
+
+
+class FileOutsideView(BzrError):
+
+ _fmt = ('Specified file "%(file_name)s" is outside the current view: '
+ '%(view_str)s')
+
+ def __init__(self, file_name, view_files):
+ self.file_name = file_name
+ self.view_str = ", ".join(view_files)
+
+
+class UnresumableWriteGroup(BzrError):
+
+ _fmt = ("Repository %(repository)s cannot resume write group "
+ "%(write_groups)r: %(reason)s")
+
+ internal_error = True
+
+ def __init__(self, repository, write_groups, reason):
+ self.repository = repository
+ self.write_groups = write_groups
+ self.reason = reason
+
+
+class UnsuspendableWriteGroup(BzrError):
+
+ _fmt = ("Repository %(repository)s cannot suspend a write group.")
+
+ internal_error = True
+
+ def __init__(self, repository):
+ self.repository = repository
+
+
+class LossyPushToSameVCS(BzrError):
+
+ _fmt = ("Lossy push not possible between %(source_branch)r and "
+ "%(target_branch)r that are in the same VCS.")
+
+ internal_error = True
+
+ def __init__(self, source_branch, target_branch):
+ self.source_branch = source_branch
+ self.target_branch = target_branch
+
+
+class NoRoundtrippingSupport(BzrError):
+
+ _fmt = ("Roundtripping is not supported between %(source_branch)r and "
+ "%(target_branch)r.")
+
+ internal_error = True
+
+ def __init__(self, source_branch, target_branch):
+ self.source_branch = source_branch
+ self.target_branch = target_branch
+
+
+class FileTimestampUnavailable(BzrError):
+
+ _fmt = "The filestamp for %(path)s is not available."
+
+ internal_error = True
+
+ def __init__(self, path):
+ self.path = path
+
+
+class NoColocatedBranchSupport(BzrError):
+
+ _fmt = ("%(bzrdir)r does not support co-located branches.")
+
+ def __init__(self, bzrdir):
+ self.bzrdir = bzrdir
+
+
+class NoWhoami(BzrError):
+
+ _fmt = ('Unable to determine your name.\n'
+ "Please, set your name with the 'whoami' command.\n"
+ 'E.g. bzr whoami "Your Name <name@example.com>"')
+
+
+class InvalidPattern(BzrError):
+
+ _fmt = ('Invalid pattern(s) found. %(msg)s')
+
+ def __init__(self, msg):
+ self.msg = msg
+
+
+class RecursiveBind(BzrError):
+
+ _fmt = ('Branch "%(branch_url)s" appears to be bound to itself. '
+ 'Please use `bzr unbind` to fix.')
+
+ def __init__(self, branch_url):
+ self.branch_url = branch_url
+
+
+# FIXME: I would prefer to define the config related exception classes in
+# config.py but the lazy import mechanism proscribes this -- vila 20101222
+class OptionExpansionLoop(BzrError):
+
+ _fmt = 'Loop involving %(refs)r while expanding "%(string)s".'
+
+ def __init__(self, string, refs):
+ self.string = string
+ self.refs = '->'.join(refs)
+
+
+class ExpandingUnknownOption(BzrError):
+
+ _fmt = 'Option %(name)s is not defined while expanding "%(string)s".'
+
+ def __init__(self, name, string):
+ self.name = name
+ self.string = string
+
+
+class NoCompatibleInter(BzrError):
+
+ _fmt = ('No compatible object available for operations from %(source)r '
+ 'to %(target)r.')
+
+ def __init__(self, source, target):
+ self.source = source
+ self.target = target
+
+
+class HpssVfsRequestNotAllowed(BzrError):
+
+ _fmt = ("VFS requests over the smart server are not allowed. Encountered: "
+ "%(method)s, %(arguments)s.")
+
+ def __init__(self, method, arguments):
+ self.method = method
+ self.arguments = arguments
+
+
+class UnsupportedKindChange(BzrError):
+
+ _fmt = ("Kind change from %(from_kind)s to %(to_kind)s for "
+ "%(path)s not supported by format %(format)r")
+
+ def __init__(self, path, from_kind, to_kind, format):
+ self.path = path
+ self.from_kind = from_kind
+ self.to_kind = to_kind
+ self.format = format
+
+
+class MissingFeature(BzrError):
+
+ _fmt = ("Missing feature %(feature)s not provided by this "
+ "version of Bazaar or any plugin.")
+
+ def __init__(self, feature):
+ self.feature = feature
+
+
+class PatchSyntax(BzrError):
+ """Base class for patch syntax errors."""
+
+
+class BinaryFiles(BzrError):
+
+ _fmt = 'Binary files section encountered.'
+
+ def __init__(self, orig_name, mod_name):
+ self.orig_name = orig_name
+ self.mod_name = mod_name
+
+
+class MalformedPatchHeader(PatchSyntax):
+
+ _fmt = "Malformed patch header. %(desc)s\n%(line)r"
+
+ def __init__(self, desc, line):
+ self.desc = desc
+ self.line = line
+
+
+class MalformedHunkHeader(PatchSyntax):
+
+ _fmt = "Malformed hunk header. %(desc)s\n%(line)r"
+
+ def __init__(self, desc, line):
+ self.desc = desc
+ self.line = line
+
+
+class MalformedLine(PatchSyntax):
+
+ _fmt = "Malformed line. %(desc)s\n%(line)r"
+
+ def __init__(self, desc, line):
+ self.desc = desc
+ self.line = line
+
+
+class PatchConflict(BzrError):
+
+ _fmt = ('Text contents mismatch at line %(line_no)d. Original has '
+ '"%(orig_line)s", but patch says it should be "%(patch_line)s"')
+
+ def __init__(self, line_no, orig_line, patch_line):
+ self.line_no = line_no
+ self.orig_line = orig_line.rstrip('\n')
+ self.patch_line = patch_line.rstrip('\n')
+
+
+class FeatureAlreadyRegistered(BzrError):
+
+ _fmt = 'The feature %(feature)s has already been registered.'
+
+ def __init__(self, feature):
+ self.feature = feature
diff --git a/bzrlib/estimate_compressed_size.py b/bzrlib/estimate_compressed_size.py
new file mode 100644
index 0000000..39262af
--- /dev/null
+++ b/bzrlib/estimate_compressed_size.py
@@ -0,0 +1,70 @@
+# Copyright (C) 2011 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""Code to estimate the entropy of content"""
+
+from __future__ import absolute_import
+
+import zlib
+
+
+class ZLibEstimator(object):
+ """Uses zlib.compressobj to estimate compressed size."""
+
+ def __init__(self, target_size, min_compression=2.0):
+ """Create a new estimator.
+
+ :param target_size: The desired size of the compressed content.
+ :param min_compression: Estimated minimum compression. By default we
+ assume that the content is 'text', which means a min compression of
+ about 2:1.
+ """
+ self._target_size = target_size
+ self._compressor = zlib.compressobj()
+ self._uncompressed_size_added = 0
+ self._compressed_size_added = 0
+ self._unflushed_size_added = 0
+ self._estimated_compression = 2.0
+
+ def add_content(self, content):
+ self._uncompressed_size_added += len(content)
+ self._unflushed_size_added += len(content)
+ z_size = len(self._compressor.compress(content))
+ if z_size > 0:
+ self._record_z_len(z_size)
+
+ def _record_z_len(self, count):
+ # We got some compressed bytes, update the counters
+ self._compressed_size_added += count
+ self._unflushed_size_added = 0
+ # So far we've read X uncompressed bytes, and written Y compressed
+ # bytes. We should have a decent estimate of the final compression.
+ self._estimated_compression = (float(self._uncompressed_size_added)
+ / self._compressed_size_added)
+
+ def full(self):
+ """Have we reached the target size?"""
+ if self._unflushed_size_added:
+ remaining_size = self._target_size - self._compressed_size_added
+ # Estimate how much compressed content the unflushed data will
+ # consume
+ est_z_size = (self._unflushed_size_added /
+ self._estimated_compression)
+ if est_z_size >= remaining_size:
+ # We estimate we are close to remaining
+ z_size = len(self._compressor.flush(zlib.Z_SYNC_FLUSH))
+ self._record_z_len(z_size)
+ return self._compressed_size_added >= self._target_size
diff --git a/bzrlib/export/__init__.py b/bzrlib/export/__init__.py
new file mode 100644
index 0000000..6cd0f86
--- /dev/null
+++ b/bzrlib/export/__init__.py
@@ -0,0 +1,247 @@
+# Copyright (C) 2005-2011 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""Export trees to tarballs, non-controlled directories, zipfiles, etc.
+"""
+
+from __future__ import absolute_import
+
+import os
+import time
+import warnings
+
+from bzrlib import (
+ errors,
+ pyutils,
+ trace,
+ )
+
+# Maps format name => export function
+_exporters = {}
+# Maps filename extensions => export format name
+_exporter_extensions = {}
+
+
+def register_exporter(format, extensions, func, override=False):
+ """Register an exporter.
+
+ :param format: This is the name of the format, such as 'tgz' or 'zip'
+ :param extensions: Extensions which should be used in the case that a
+ format was not explicitly specified.
+ :type extensions: List
+ :param func: The function. It will be called with (tree, dest, root)
+ :param override: Whether to override an object which already exists.
+ Frequently plugins will want to provide functionality
+ until it shows up in mainline, so the default is False.
+ """
+ global _exporters, _exporter_extensions
+
+ if (format not in _exporters) or override:
+ _exporters[format] = func
+
+ for ext in extensions:
+ if (ext not in _exporter_extensions) or override:
+ _exporter_extensions[ext] = format
+
+
+def register_lazy_exporter(scheme, extensions, module, funcname):
+ """Register lazy-loaded exporter function.
+
+ When requesting a specific type of export, load the respective path.
+ """
+ def _loader(tree, dest, root, subdir, force_mtime, fileobj):
+ func = pyutils.get_named_object(module, funcname)
+ return func(tree, dest, root, subdir, force_mtime=force_mtime,
+ fileobj=fileobj)
+
+ register_exporter(scheme, extensions, _loader)
+
+
+def get_export_generator(tree, dest=None, format=None, root=None, subdir=None,
+ filtered=False, per_file_timestamps=False,
+ fileobj=None):
+ """Returns a generator that exports the given tree.
+
+ The generator is expected to yield None while exporting the tree while the
+ actual export is written to ``fileobj``.
+
+ :param tree: A Tree (such as RevisionTree) to export
+
+ :param dest: The destination where the files, etc should be put
+
+ :param format: The format (dir, zip, etc), if None, it will check the
+ extension on dest, looking for a match
+
+ :param root: The root location inside the format. It is common practise to
+ have zipfiles and tarballs extract into a subdirectory, rather than
+ into the current working directory. If root is None, the default root
+ will be selected as the destination without its extension.
+
+ :param subdir: A starting directory within the tree. None means to export
+ the entire tree, and anything else should specify the relative path to
+ a directory to start exporting from.
+
+ :param filtered: If True, content filtering is applied to the exported
+ files. Deprecated in favour of passing a ContentFilterTree
+ as the source.
+
+ :param per_file_timestamps: Whether to use the timestamp stored in the tree
+ rather than now(). This will do a revision lookup for every file so
+ will be significantly slower.
+
+ :param fileobj: Optional file object to use
+ """
+ global _exporters, _exporter_extensions
+
+ if format is None and dest is not None:
+ for ext in _exporter_extensions:
+ if dest.endswith(ext):
+ format = _exporter_extensions[ext]
+ break
+
+ # Most of the exporters will just have to call
+ # this function anyway, so why not do it for them
+ if root is None:
+ root = get_root_name(dest)
+
+ if format not in _exporters:
+ raise errors.NoSuchExportFormat(format)
+
+ if not per_file_timestamps:
+ force_mtime = time.time()
+ else:
+ force_mtime = None
+
+ trace.mutter('export version %r', tree)
+
+ if filtered:
+ from bzrlib.filter_tree import ContentFilterTree
+ warnings.warn(
+ "passing filtered=True to export is deprecated in bzr 2.4",
+ stacklevel=2)
+ tree = ContentFilterTree(tree, tree._content_filter_stack)
+ # We don't want things re-filtered by the specific exporter.
+ filtered = False
+
+ tree.lock_read()
+ try:
+ for _ in _exporters[format](
+ tree, dest, root, subdir,
+ force_mtime=force_mtime, fileobj=fileobj):
+ yield
+ finally:
+ tree.unlock()
+
+
+def export(tree, dest, format=None, root=None, subdir=None, filtered=False,
+ per_file_timestamps=False, fileobj=None):
+ """Export the given Tree to the specific destination.
+
+ :param tree: A Tree (such as RevisionTree) to export
+ :param dest: The destination where the files,etc should be put
+ :param format: The format (dir, zip, etc), if None, it will check the
+ extension on dest, looking for a match
+ :param root: The root location inside the format.
+ It is common practise to have zipfiles and tarballs
+ extract into a subdirectory, rather than into the
+ current working directory.
+ If root is None, the default root will be
+ selected as the destination without its
+ extension.
+ :param subdir: A starting directory within the tree. None means to export
+ the entire tree, and anything else should specify the relative path to
+ a directory to start exporting from.
+ :param filtered: If True, content filtering is applied to the
+ files exported. Deprecated in favor of passing an ContentFilterTree.
+ :param per_file_timestamps: Whether to use the timestamp stored in the
+ tree rather than now(). This will do a revision lookup
+ for every file so will be significantly slower.
+ :param fileobj: Optional file object to use
+ """
+ for _ in get_export_generator(tree, dest, format, root, subdir, filtered,
+ per_file_timestamps, fileobj):
+ pass
+
+
+def get_root_name(dest):
+ """Get just the root name for an export.
+
+ """
+ global _exporter_extensions
+ if dest == '-':
+ # Exporting to -/foo doesn't make sense so use relative paths.
+ return ''
+ dest = os.path.basename(dest)
+ for ext in _exporter_extensions:
+ if dest.endswith(ext):
+ return dest[:-len(ext)]
+ return dest
+
+
+def _export_iter_entries(tree, subdir, skip_special=True):
+ """Iter the entries for tree suitable for exporting.
+
+ :param tree: A tree object.
+ :param subdir: None or the path of an entry to start exporting from.
+ :param skip_special: Whether to skip .bzr files.
+ :return: iterator over tuples with final path, tree path and inventory
+ entry for each entry to export
+ """
+ if subdir == '':
+ subdir = None
+ if subdir is not None:
+ subdir = subdir.rstrip('/')
+ entries = tree.iter_entries_by_dir()
+ entries.next() # skip root
+ for path, entry in entries:
+ # The .bzr* namespace is reserved for "magic" files like
+ # .bzrignore and .bzrrules - do not export these
+ if skip_special and path.startswith(".bzr"):
+ continue
+ if path == subdir:
+ if entry.kind == 'directory':
+ continue
+ final_path = entry.name
+ elif subdir is not None:
+ if path.startswith(subdir + '/'):
+ final_path = path[len(subdir) + 1:]
+ else:
+ continue
+ else:
+ final_path = path
+ if not tree.has_filename(path):
+ continue
+
+ yield final_path, path, entry
+
+
+register_lazy_exporter(None, [], 'bzrlib.export.dir_exporter',
+ 'dir_exporter_generator')
+register_lazy_exporter('dir', [], 'bzrlib.export.dir_exporter',
+ 'dir_exporter_generator')
+register_lazy_exporter('tar', ['.tar'], 'bzrlib.export.tar_exporter',
+ 'plain_tar_exporter_generator')
+register_lazy_exporter('tgz', ['.tar.gz', '.tgz'],
+ 'bzrlib.export.tar_exporter',
+ 'tgz_exporter_generator')
+register_lazy_exporter('tbz2', ['.tar.bz2', '.tbz2'],
+ 'bzrlib.export.tar_exporter', 'tbz_exporter_generator')
+register_lazy_exporter('tlzma', ['.tar.lzma'], 'bzrlib.export.tar_exporter',
+ 'tar_lzma_exporter_generator')
+register_lazy_exporter('txz', ['.tar.xz'], 'bzrlib.export.tar_exporter',
+ 'tar_xz_exporter_generator')
+register_lazy_exporter('zip', ['.zip'], 'bzrlib.export.zip_exporter',
+ 'zip_exporter_generator')
diff --git a/bzrlib/export/dir_exporter.py b/bzrlib/export/dir_exporter.py
new file mode 100644
index 0000000..5a0581c
--- /dev/null
+++ b/bzrlib/export/dir_exporter.py
@@ -0,0 +1,96 @@
+# Copyright (C) 2005-2011 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""Export a bzrlib.tree.Tree to a new or empty directory."""
+
+from __future__ import absolute_import
+
+import errno
+import os
+
+from bzrlib import errors, osutils
+from bzrlib.export import _export_iter_entries
+
+
+def dir_exporter_generator(tree, dest, root, subdir=None,
+ force_mtime=None, fileobj=None):
+ """Return a generator that exports this tree to a new directory.
+
+ `dest` should either not exist or should be empty. If it does not exist it
+ will be created holding the contents of this tree.
+
+ :param fileobj: Is not used in this exporter
+
+ :note: If the export fails, the destination directory will be
+ left in an incompletely exported state: export is not transactional.
+ """
+ try:
+ os.mkdir(dest)
+ except OSError, e:
+ if e.errno == errno.EEXIST:
+ # check if directory empty
+ if os.listdir(dest) != []:
+ raise errors.BzrError(
+ "Can't export tree to non-empty directory.")
+ else:
+ raise
+ # Iterate everything, building up the files we will want to export, and
+ # creating the directories and symlinks that we need.
+ # This tracks (file_id, (destination_path, executable))
+ # This matches the api that tree.iter_files_bytes() wants
+ # Note in the case of revision trees, this does trigger a double inventory
+ # lookup, hopefully it isn't too expensive.
+ to_fetch = []
+ for dp, tp, ie in _export_iter_entries(tree, subdir):
+ fullpath = osutils.pathjoin(dest, dp)
+ if ie.kind == "file":
+ to_fetch.append((ie.file_id, (dp, tp, ie.file_id)))
+ elif ie.kind == "directory":
+ os.mkdir(fullpath)
+ elif ie.kind == "symlink":
+ try:
+ symlink_target = tree.get_symlink_target(ie.file_id, tp)
+ os.symlink(symlink_target, fullpath)
+ except OSError, e:
+ raise errors.BzrError(
+ "Failed to create symlink %r -> %r, error: %s"
+ % (fullpath, symlink_target, e))
+ else:
+ raise errors.BzrError("don't know how to export {%s} of kind %r" %
+ (ie.file_id, ie.kind))
+
+ yield
+ # The data returned here can be in any order, but we've already created all
+ # the directories
+ flags = os.O_CREAT | os.O_TRUNC | os.O_WRONLY | getattr(os, 'O_BINARY', 0)
+ for (relpath, treepath, file_id), chunks in tree.iter_files_bytes(to_fetch):
+ fullpath = osutils.pathjoin(dest, relpath)
+ # We set the mode and let the umask sort out the file info
+ mode = 0666
+ if tree.is_executable(file_id, treepath):
+ mode = 0777
+ out = os.fdopen(os.open(fullpath, flags, mode), 'wb')
+ try:
+ out.writelines(chunks)
+ finally:
+ out.close()
+ if force_mtime is not None:
+ mtime = force_mtime
+ else:
+ mtime = tree.get_file_mtime(file_id, treepath)
+ os.utime(fullpath, (mtime, mtime))
+
+ yield
diff --git a/bzrlib/export/tar_exporter.py b/bzrlib/export/tar_exporter.py
new file mode 100644
index 0000000..b385666
--- /dev/null
+++ b/bzrlib/export/tar_exporter.py
@@ -0,0 +1,228 @@
+# Copyright (C) 2005, 2006, 2008-2011 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""Export a tree to a tarball."""
+
+from __future__ import absolute_import
+
+import os
+import StringIO
+import sys
+import tarfile
+
+from bzrlib import (
+ errors,
+ osutils,
+ )
+from bzrlib.export import _export_iter_entries
+
+
+def prepare_tarball_item(tree, root, final_path, tree_path, entry, force_mtime=None):
+ """Prepare a tarball item for exporting
+
+ :param tree: Tree to export
+ :param final_path: Final path to place item
+ :param tree_path: Path for the entry in the tree
+ :param entry: Entry to export
+ :param force_mtime: Option mtime to force, instead of using tree
+ timestamps.
+
+ Returns a (tarinfo, fileobj) tuple
+ """
+ filename = osutils.pathjoin(root, final_path).encode('utf8')
+ item = tarfile.TarInfo(filename)
+ if force_mtime is not None:
+ item.mtime = force_mtime
+ else:
+ item.mtime = tree.get_file_mtime(entry.file_id, tree_path)
+ if entry.kind == "file":
+ item.type = tarfile.REGTYPE
+ if tree.is_executable(entry.file_id, tree_path):
+ item.mode = 0755
+ else:
+ item.mode = 0644
+ # This brings the whole file into memory, but that's almost needed for
+ # the tarfile contract, which wants the size of the file up front. We
+ # want to make sure it doesn't change, and we need to read it in one
+ # go for content filtering.
+ content = tree.get_file_text(entry.file_id, tree_path)
+ item.size = len(content)
+ fileobj = StringIO.StringIO(content)
+ elif entry.kind == "directory":
+ item.type = tarfile.DIRTYPE
+ item.name += '/'
+ item.size = 0
+ item.mode = 0755
+ fileobj = None
+ elif entry.kind == "symlink":
+ item.type = tarfile.SYMTYPE
+ item.size = 0
+ item.mode = 0755
+ item.linkname = tree.get_symlink_target(entry.file_id, tree_path)
+ fileobj = None
+ else:
+ raise errors.BzrError("don't know how to export {%s} of kind %r"
+ % (entry.file_id, entry.kind))
+ return (item, fileobj)
+
+
+def export_tarball_generator(tree, ball, root, subdir=None, force_mtime=None):
+ """Export tree contents to a tarball.
+
+ :returns: A generator that will repeatedly produce None as each file is
+ emitted. The entire generator must be consumed to complete writing
+ the file.
+
+ :param tree: Tree to export
+
+ :param ball: Tarball to export to; it will be closed when writing is
+ complete.
+
+ :param subdir: Sub directory to export
+
+ :param force_mtime: Option mtime to force, instead of using tree
+ timestamps.
+ """
+ try:
+ for final_path, tree_path, entry in _export_iter_entries(tree, subdir):
+ (item, fileobj) = prepare_tarball_item(
+ tree, root, final_path, tree_path, entry, force_mtime)
+ ball.addfile(item, fileobj)
+ yield
+ finally:
+ ball.close()
+
+
+def tgz_exporter_generator(tree, dest, root, subdir, force_mtime=None,
+ fileobj=None):
+ """Export this tree to a new tar file.
+
+ `dest` will be created holding the contents of this tree; if it
+ already exists, it will be clobbered, like with "tar -c".
+ """
+ import gzip
+ if force_mtime is not None:
+ root_mtime = force_mtime
+ elif (getattr(tree, "repository", None) and
+ getattr(tree, "get_revision_id", None)):
+ # If this is a revision tree, use the revisions' timestamp
+ rev = tree.repository.get_revision(tree.get_revision_id())
+ root_mtime = rev.timestamp
+ elif tree.get_root_id() is not None:
+ root_mtime = tree.get_file_mtime(tree.get_root_id())
+ else:
+ root_mtime = None
+
+ is_stdout = False
+ basename = None
+ if fileobj is not None:
+ stream = fileobj
+ elif dest == '-':
+ stream = sys.stdout
+ is_stdout = True
+ else:
+ stream = open(dest, 'wb')
+ # gzip file is used with an explicit fileobj so that
+ # the basename can be stored in the gzip file rather than
+ # dest. (bug 102234)
+ basename = os.path.basename(dest)
+ try:
+ zipstream = gzip.GzipFile(basename, 'w', fileobj=stream,
+ mtime=root_mtime)
+ except TypeError:
+ # Python < 2.7 doesn't support the mtime argument
+ zipstream = gzip.GzipFile(basename, 'w', fileobj=stream)
+ ball = tarfile.open(None, 'w|', fileobj=zipstream)
+ for _ in export_tarball_generator(
+ tree, ball, root, subdir, force_mtime):
+ yield
+ # Closing zipstream may trigger writes to stream
+ zipstream.close()
+ if not is_stdout:
+ # Now we can safely close the stream
+ stream.close()
+
+
+def tbz_exporter_generator(tree, dest, root, subdir,
+ force_mtime=None, fileobj=None):
+ """Export this tree to a new tar file.
+
+ `dest` will be created holding the contents of this tree; if it
+ already exists, it will be clobbered, like with "tar -c".
+ """
+ if fileobj is not None:
+ ball = tarfile.open(None, 'w|bz2', fileobj)
+ elif dest == '-':
+ ball = tarfile.open(None, 'w|bz2', sys.stdout)
+ else:
+ # tarfile.open goes on to do 'os.getcwd() + dest' for opening the
+ # tar file. With dest being unicode, this throws UnicodeDecodeError
+ # unless we encode dest before passing it on. This works around
+ # upstream python bug http://bugs.python.org/issue8396 (fixed in
+ # Python 2.6.5 and 2.7b1)
+ ball = tarfile.open(dest.encode(osutils._fs_enc), 'w:bz2')
+ return export_tarball_generator(
+ tree, ball, root, subdir, force_mtime)
+
+
+def plain_tar_exporter_generator(tree, dest, root, subdir, compression=None,
+ force_mtime=None, fileobj=None):
+ """Export this tree to a new tar file.
+
+ `dest` will be created holding the contents of this tree; if it
+ already exists, it will be clobbered, like with "tar -c".
+ """
+ if fileobj is not None:
+ stream = fileobj
+ elif dest == '-':
+ stream = sys.stdout
+ else:
+ stream = open(dest, 'wb')
+ ball = tarfile.open(None, 'w|', stream)
+ return export_tarball_generator(
+ tree, ball, root, subdir, force_mtime)
+
+
+def tar_xz_exporter_generator(tree, dest, root, subdir,
+ force_mtime=None, fileobj=None):
+ return tar_lzma_exporter_generator(tree, dest, root, subdir,
+ force_mtime, fileobj, "xz")
+
+
+def tar_lzma_exporter_generator(tree, dest, root, subdir,
+ force_mtime=None, fileobj=None,
+ compression_format="alone"):
+ """Export this tree to a new .tar.lzma file.
+
+ `dest` will be created holding the contents of this tree; if it
+ already exists, it will be clobbered, like with "tar -c".
+ """
+ if dest == '-':
+ raise errors.BzrError("Writing to stdout not supported for .tar.lzma")
+
+ if fileobj is not None:
+ raise errors.BzrError(
+ "Writing to fileobject not supported for .tar.lzma")
+ try:
+ import lzma
+ except ImportError, e:
+ raise errors.DependencyNotPresent('lzma', e)
+
+ stream = lzma.LZMAFile(dest.encode(osutils._fs_enc), 'w',
+ options={"format": compression_format})
+ ball = tarfile.open(None, 'w:', fileobj=stream)
+ return export_tarball_generator(
+ tree, ball, root, subdir, force_mtime=force_mtime)
diff --git a/bzrlib/export/zip_exporter.py b/bzrlib/export/zip_exporter.py
new file mode 100644
index 0000000..10c6a67
--- /dev/null
+++ b/bzrlib/export/zip_exporter.py
@@ -0,0 +1,105 @@
+# Copyright (C) 2005, 2006, 2008, 2009, 2010 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""Export a Tree to a zip file.
+"""
+
+from __future__ import absolute_import
+
+import os
+import stat
+import sys
+import time
+import zipfile
+
+from bzrlib import (
+ osutils,
+ )
+from bzrlib.export import _export_iter_entries
+from bzrlib.trace import mutter
+
+
+# Windows expects this bit to be set in the 'external_attr' section,
+# or it won't consider the entry a directory.
+ZIP_DIRECTORY_BIT = (1 << 4)
+FILE_PERMISSIONS = (0644 << 16)
+DIR_PERMISSIONS = (0755 << 16)
+
+_FILE_ATTR = stat.S_IFREG | FILE_PERMISSIONS
+_DIR_ATTR = stat.S_IFDIR | ZIP_DIRECTORY_BIT | DIR_PERMISSIONS
+
+
+def zip_exporter_generator(tree, dest, root, subdir=None,
+ force_mtime=None, fileobj=None):
+ """ Export this tree to a new zip file.
+
+ `dest` will be created holding the contents of this tree; if it
+ already exists, it will be overwritten".
+ """
+
+ compression = zipfile.ZIP_DEFLATED
+ if fileobj is not None:
+ dest = fileobj
+ elif dest == "-":
+ dest = sys.stdout
+ zipf = zipfile.ZipFile(dest, "w", compression)
+ try:
+ for dp, tp, ie in _export_iter_entries(tree, subdir):
+ file_id = ie.file_id
+ mutter(" export {%s} kind %s to %s", file_id, ie.kind, dest)
+
+ # zipfile.ZipFile switches all paths to forward
+ # slashes anyway, so just stick with that.
+ if force_mtime is not None:
+ mtime = force_mtime
+ else:
+ mtime = tree.get_file_mtime(ie.file_id, tp)
+ date_time = time.localtime(mtime)[:6]
+ filename = osutils.pathjoin(root, dp).encode('utf8')
+ if ie.kind == "file":
+ zinfo = zipfile.ZipInfo(
+ filename=filename,
+ date_time=date_time)
+ zinfo.compress_type = compression
+ zinfo.external_attr = _FILE_ATTR
+ content = tree.get_file_text(file_id, tp)
+ zipf.writestr(zinfo, content)
+ elif ie.kind == "directory":
+ # Directories must contain a trailing slash, to indicate
+ # to the zip routine that they are really directories and
+ # not just empty files.
+ zinfo = zipfile.ZipInfo(
+ filename=filename + '/',
+ date_time=date_time)
+ zinfo.compress_type = compression
+ zinfo.external_attr = _DIR_ATTR
+ zipf.writestr(zinfo, '')
+ elif ie.kind == "symlink":
+ zinfo = zipfile.ZipInfo(
+ filename=(filename + '.lnk'),
+ date_time=date_time)
+ zinfo.compress_type = compression
+ zinfo.external_attr = _FILE_ATTR
+ zipf.writestr(zinfo, tree.get_symlink_target(file_id, tp))
+ yield
+
+ zipf.close()
+
+ except UnicodeEncodeError:
+ zipf.close()
+ os.remove(dest)
+ from bzrlib.errors import BzrError
+ raise BzrError("Can't export non-ascii filenames to zip")
diff --git a/bzrlib/export_pot.py b/bzrlib/export_pot.py
new file mode 100644
index 0000000..c9d9585
--- /dev/null
+++ b/bzrlib/export_pot.py
@@ -0,0 +1,322 @@
+# Copyright (C) 2011 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+# The normalize function is taken from pygettext which is distributed
+# with Python under the Python License, which is GPL compatible.
+
+"""Extract docstrings from Bazaar commands.
+
+This module only handles bzrlib objects that use strings not directly wrapped
+by a gettext() call. To generate a complete translation template file, this
+output needs to be combined with that of xgettext or a similar command for
+extracting those strings, as is done in the bzr Makefile. Sorting the output
+is also left to that stage of the process.
+"""
+
+from __future__ import absolute_import
+
+import inspect
+import os
+
+from bzrlib import (
+ commands as _mod_commands,
+ errors,
+ help_topics,
+ option,
+ plugin,
+ help,
+ )
+from bzrlib.trace import (
+ mutter,
+ note,
+ )
+from bzrlib.i18n import gettext
+
+
+def _escape(s):
+ s = (s.replace('\\', '\\\\')
+ .replace('\n', '\\n')
+ .replace('\r', '\\r')
+ .replace('\t', '\\t')
+ .replace('"', '\\"')
+ )
+ return s
+
+def _normalize(s):
+ # This converts the various Python string types into a format that
+ # is appropriate for .po files, namely much closer to C style.
+ lines = s.split('\n')
+ if len(lines) == 1:
+ s = '"' + _escape(s) + '"'
+ else:
+ if not lines[-1]:
+ del lines[-1]
+ lines[-1] = lines[-1] + '\n'
+ lines = map(_escape, lines)
+ lineterm = '\\n"\n"'
+ s = '""\n"' + lineterm.join(lines) + '"'
+ return s
+
+
+def _parse_source(source_text):
+ """Get object to lineno mappings from given source_text"""
+ import ast
+ cls_to_lineno = {}
+ str_to_lineno = {}
+ for node in ast.walk(ast.parse(source_text)):
+ # TODO: worry about duplicates?
+ if isinstance(node, ast.ClassDef):
+ # TODO: worry about nesting?
+ cls_to_lineno[node.name] = node.lineno
+ elif isinstance(node, ast.Str):
+ # Python AST gives location of string literal as the line the
+ # string terminates on. It's more useful to have the line the
+ # string begins on. Unfortunately, counting back newlines is
+ # only an approximation as the AST is ignorant of escaping.
+ str_to_lineno[node.s] = node.lineno - node.s.count('\n')
+ return cls_to_lineno, str_to_lineno
+
+
+class _ModuleContext(object):
+ """Record of the location within a source tree"""
+
+ def __init__(self, path, lineno=1, _source_info=None):
+ self.path = path
+ self.lineno = lineno
+ if _source_info is not None:
+ self._cls_to_lineno, self._str_to_lineno = _source_info
+
+ @classmethod
+ def from_module(cls, module):
+ """Get new context from module object and parse source for linenos"""
+ sourcepath = inspect.getsourcefile(module)
+ # TODO: fix this to do the right thing rather than rely on cwd
+ relpath = os.path.relpath(sourcepath)
+ return cls(relpath,
+ _source_info=_parse_source("".join(inspect.findsource(module)[0])))
+
+ def from_class(self, cls):
+ """Get new context with same details but lineno of class in source"""
+ try:
+ lineno = self._cls_to_lineno[cls.__name__]
+ except (AttributeError, KeyError):
+ mutter("Definition of %r not found in %r", cls, self.path)
+ return self
+ return self.__class__(self.path, lineno,
+ (self._cls_to_lineno, self._str_to_lineno))
+
+ def from_string(self, string):
+ """Get new context with same details but lineno of string in source"""
+ try:
+ lineno = self._str_to_lineno[string]
+ except (AttributeError, KeyError):
+ mutter("String %r not found in %r", string[:20], self.path)
+ return self
+ return self.__class__(self.path, lineno,
+ (self._cls_to_lineno, self._str_to_lineno))
+
+
+class _PotExporter(object):
+ """Write message details to output stream in .pot file format"""
+
+ def __init__(self, outf, include_duplicates=False):
+ self.outf = outf
+ if include_duplicates:
+ self._msgids = None
+ else:
+ self._msgids = set()
+ self._module_contexts = {}
+
+ def poentry(self, path, lineno, s, comment=None):
+ if self._msgids is not None:
+ if s in self._msgids:
+ return
+ self._msgids.add(s)
+ if comment is None:
+ comment = ''
+ else:
+ comment = "# %s\n" % comment
+ mutter("Exporting msg %r at line %d in %r", s[:20], lineno, path)
+ self.outf.write(
+ "#: {path}:{lineno}\n"
+ "{comment}"
+ "msgid {msg}\n"
+ "msgstr \"\"\n"
+ "\n".format(
+ path=path, lineno=lineno, comment=comment, msg=_normalize(s)))
+
+ def poentry_in_context(self, context, string, comment=None):
+ context = context.from_string(string)
+ self.poentry(context.path, context.lineno, string, comment)
+
+ def poentry_per_paragraph(self, path, lineno, msgid, include=None):
+ # TODO: How to split long help?
+ paragraphs = msgid.split('\n\n')
+ if include is not None:
+ paragraphs = filter(include, paragraphs)
+ for p in paragraphs:
+ self.poentry(path, lineno, p)
+ lineno += p.count('\n') + 2
+
+ def get_context(self, obj):
+ module = inspect.getmodule(obj)
+ try:
+ context = self._module_contexts[module.__name__]
+ except KeyError:
+ context = _ModuleContext.from_module(module)
+ self._module_contexts[module.__name__] = context
+ if inspect.isclass(obj):
+ context = context.from_class(obj)
+ return context
+
+
+def _write_option(exporter, context, opt, note):
+ if getattr(opt, 'hidden', False):
+ return
+ optname = opt.name
+ if getattr(opt, 'title', None):
+ exporter.poentry_in_context(context, opt.title,
+ "title of {name!r} {what}".format(name=optname, what=note))
+ for name, _, _, helptxt in opt.iter_switches():
+ if name != optname:
+ if opt.is_hidden(name):
+ continue
+ name = "=".join([optname, name])
+ if helptxt:
+ exporter.poentry_in_context(context, helptxt,
+ "help of {name!r} {what}".format(name=name, what=note))
+
+
+def _standard_options(exporter):
+ OPTIONS = option.Option.OPTIONS
+ context = exporter.get_context(option)
+ for name in sorted(OPTIONS.keys()):
+ opt = OPTIONS[name]
+ _write_option(exporter, context.from_string(name), opt, "option")
+
+
+def _command_options(exporter, context, cmd):
+ note = "option of {0!r} command".format(cmd.name())
+ for opt in cmd.takes_options:
+ # String values in Command option lists are for global options
+ if not isinstance(opt, str):
+ _write_option(exporter, context, opt, note)
+
+
+def _write_command_help(exporter, cmd):
+ context = exporter.get_context(cmd.__class__)
+ rawdoc = cmd.__doc__
+ dcontext = context.from_string(rawdoc)
+ doc = inspect.cleandoc(rawdoc)
+
+ def exclude_usage(p):
+ # ':Usage:' has special meaning in help topics.
+ # This is usage example of command and should not be translated.
+ if p.splitlines()[0] != ':Usage:':
+ return True
+
+ exporter.poentry_per_paragraph(dcontext.path, dcontext.lineno, doc,
+ exclude_usage)
+ _command_options(exporter, context, cmd)
+
+
+def _command_helps(exporter, plugin_name=None):
+ """Extract docstrings from path.
+
+ This respects the Bazaar cmdtable/table convention and will
+ only extract docstrings from functions mentioned in these tables.
+ """
+ from glob import glob
+
+ # builtin commands
+ for cmd_name in _mod_commands.builtin_command_names():
+ command = _mod_commands.get_cmd_object(cmd_name, False)
+ if command.hidden:
+ continue
+ if plugin_name is not None:
+ # only export builtins if we are not exporting plugin commands
+ continue
+ note(gettext("Exporting messages from builtin command: %s"), cmd_name)
+ _write_command_help(exporter, command)
+
+ plugin_path = plugin.get_core_plugin_path()
+ core_plugins = glob(plugin_path + '/*/__init__.py')
+ core_plugins = [os.path.basename(os.path.dirname(p))
+ for p in core_plugins]
+ # plugins
+ for cmd_name in _mod_commands.plugin_command_names():
+ command = _mod_commands.get_cmd_object(cmd_name, False)
+ if command.hidden:
+ continue
+ if plugin_name is not None and command.plugin_name() != plugin_name:
+ # if we are exporting plugin commands, skip plugins we have not specified.
+ continue
+ if plugin_name is None and command.plugin_name() not in core_plugins:
+ # skip non-core plugins
+ # TODO: Support extracting from third party plugins.
+ continue
+ note(gettext("Exporting messages from plugin command: {0} in {1}").format(
+ cmd_name, command.plugin_name() ))
+ _write_command_help(exporter, command)
+
+
+def _error_messages(exporter):
+ """Extract fmt string from bzrlib.errors."""
+ context = exporter.get_context(errors)
+ base_klass = errors.BzrError
+ for name in dir(errors):
+ klass = getattr(errors, name)
+ if not inspect.isclass(klass):
+ continue
+ if not issubclass(klass, base_klass):
+ continue
+ if klass is base_klass:
+ continue
+ if klass.internal_error:
+ continue
+ fmt = getattr(klass, "_fmt", None)
+ if fmt:
+ note(gettext("Exporting message from error: %s"), name)
+ exporter.poentry_in_context(context, fmt)
+
+
+def _help_topics(exporter):
+ topic_registry = help_topics.topic_registry
+ for key in topic_registry.keys():
+ doc = topic_registry.get(key)
+ if isinstance(doc, str):
+ exporter.poentry_per_paragraph(
+ 'dummy/help_topics/'+key+'/detail.txt',
+ 1, doc)
+ elif callable(doc): # help topics from files
+ exporter.poentry_per_paragraph(
+ 'en/help_topics/'+key+'.txt',
+ 1, doc(key))
+ summary = topic_registry.get_summary(key)
+ if summary is not None:
+ exporter.poentry('dummy/help_topics/'+key+'/summary.txt',
+ 1, summary)
+
+
+def export_pot(outf, plugin=None, include_duplicates=False):
+ exporter = _PotExporter(outf, include_duplicates)
+ if plugin is None:
+ _standard_options(exporter)
+ _command_helps(exporter)
+ _error_messages(exporter)
+ _help_topics(exporter)
+ else:
+ _command_helps(exporter, plugin)
diff --git a/bzrlib/externalcommand.py b/bzrlib/externalcommand.py
new file mode 100644
index 0000000..90bc63b
--- /dev/null
+++ b/bzrlib/externalcommand.py
@@ -0,0 +1,66 @@
+# Copyright (C) 2004, 2005 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+from __future__ import absolute_import
+
+# TODO: Perhaps rather than mapping options and arguments back and
+# forth, we should just pass in the whole argv, and allow
+# ExternalCommands to handle it differently to internal commands?
+
+
+import os
+
+from bzrlib.commands import Command
+
+
+class ExternalCommand(Command):
+ """Class to wrap external commands."""
+
+ @classmethod
+ def find_command(cls, cmd):
+ import os.path
+ bzrpath = os.environ.get('BZRPATH', '')
+
+ for dir in bzrpath.split(os.pathsep):
+ ## Empty directories are not real paths
+ if not dir:
+ continue
+ # This needs to be os.path.join() or windows cannot
+ # find the batch file that you are wanting to execute
+ path = os.path.join(dir, cmd)
+ if os.path.isfile(path):
+ return ExternalCommand(path)
+
+ return None
+
+
+ def __init__(self, path):
+ self.path = path
+
+ def name(self):
+ return os.path.basename(self.path)
+
+ def run(self, *args, **kwargs):
+ raise NotImplementedError('should not be called on %r' % self)
+
+ def run_argv_aliases(self, argv, alias_argv=None):
+ return os.spawnv(os.P_WAIT, self.path, [self.path] + argv)
+
+ def help(self):
+ m = 'external command from %s\n\n' % self.path
+ pipe = os.popen('%s --help' % self.path)
+ return m + pipe.read()
+
diff --git a/bzrlib/fetch.py b/bzrlib/fetch.py
new file mode 100644
index 0000000..8459d01
--- /dev/null
+++ b/bzrlib/fetch.py
@@ -0,0 +1,430 @@
+# Copyright (C) 2005-2011 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""Copying of history from one branch to another.
+
+The basic plan is that every branch knows the history of everything
+that has merged into it. As the first step of a merge, pull, or
+branch operation we copy history from the source into the destination
+branch.
+"""
+
+from __future__ import absolute_import
+
+import operator
+
+from bzrlib.lazy_import import lazy_import
+lazy_import(globals(), """
+from bzrlib import (
+ tsort,
+ versionedfile,
+ vf_search,
+ )
+""")
+from bzrlib import (
+ errors,
+ ui,
+ )
+from bzrlib.i18n import gettext
+from bzrlib.revision import NULL_REVISION
+from bzrlib.trace import mutter
+
+
+class RepoFetcher(object):
+ """Pull revisions and texts from one repository to another.
+
+ This should not be used directly, it's essential a object to encapsulate
+ the logic in InterRepository.fetch().
+ """
+
+ def __init__(self, to_repository, from_repository, last_revision=None,
+ find_ghosts=True, fetch_spec=None):
+ """Create a repo fetcher.
+
+ :param last_revision: If set, try to limit to the data this revision
+ references.
+ :param fetch_spec: A SearchResult specifying which revisions to fetch.
+ If set, this overrides last_revision.
+ :param find_ghosts: If True search the entire history for ghosts.
+ """
+ # repository.fetch has the responsibility for short-circuiting
+ # attempts to copy between a repository and itself.
+ self.to_repository = to_repository
+ self.from_repository = from_repository
+ self.sink = to_repository._get_sink()
+ # must not mutate self._last_revision as its potentially a shared instance
+ self._last_revision = last_revision
+ self._fetch_spec = fetch_spec
+ self.find_ghosts = find_ghosts
+ self.from_repository.lock_read()
+ mutter("Using fetch logic to copy between %s(%s) and %s(%s)",
+ self.from_repository, self.from_repository._format,
+ self.to_repository, self.to_repository._format)
+ try:
+ self.__fetch()
+ finally:
+ self.from_repository.unlock()
+
+ def __fetch(self):
+ """Primary worker function.
+
+ This initialises all the needed variables, and then fetches the
+ requested revisions, finally clearing the progress bar.
+ """
+ # Roughly this is what we're aiming for fetch to become:
+ #
+ # missing = self.sink.insert_stream(self.source.get_stream(search))
+ # if missing:
+ # missing = self.sink.insert_stream(self.source.get_items(missing))
+ # assert not missing
+ self.count_total = 0
+ self.file_ids_names = {}
+ pb = ui.ui_factory.nested_progress_bar()
+ pb.show_pct = pb.show_count = False
+ try:
+ pb.update(gettext("Finding revisions"), 0, 2)
+ search_result = self._revids_to_fetch()
+ mutter('fetching: %s', search_result)
+ if search_result.is_empty():
+ return
+ pb.update(gettext("Fetching revisions"), 1, 2)
+ self._fetch_everything_for_search(search_result)
+ finally:
+ pb.finished()
+
+ def _fetch_everything_for_search(self, search):
+ """Fetch all data for the given set of revisions."""
+ # The first phase is "file". We pass the progress bar for it directly
+ # into item_keys_introduced_by, which has more information about how
+ # that phase is progressing than we do. Progress updates for the other
+ # phases are taken care of in this function.
+ # XXX: there should be a clear owner of the progress reporting. Perhaps
+ # item_keys_introduced_by should have a richer API than it does at the
+ # moment, so that it can feed the progress information back to this
+ # function?
+ if (self.from_repository._format.rich_root_data and
+ not self.to_repository._format.rich_root_data):
+ raise errors.IncompatibleRepositories(
+ self.from_repository, self.to_repository,
+ "different rich-root support")
+ pb = ui.ui_factory.nested_progress_bar()
+ try:
+ pb.update("Get stream source")
+ source = self.from_repository._get_source(
+ self.to_repository._format)
+ stream = source.get_stream(search)
+ from_format = self.from_repository._format
+ pb.update("Inserting stream")
+ resume_tokens, missing_keys = self.sink.insert_stream(
+ stream, from_format, [])
+ if missing_keys:
+ pb.update("Missing keys")
+ stream = source.get_stream_for_missing_keys(missing_keys)
+ pb.update("Inserting missing keys")
+ resume_tokens, missing_keys = self.sink.insert_stream(
+ stream, from_format, resume_tokens)
+ if missing_keys:
+ raise AssertionError(
+ "second push failed to complete a fetch %r." % (
+ missing_keys,))
+ if resume_tokens:
+ raise AssertionError(
+ "second push failed to commit the fetch %r." % (
+ resume_tokens,))
+ pb.update("Finishing stream")
+ self.sink.finished()
+ finally:
+ pb.finished()
+
+ def _revids_to_fetch(self):
+ """Determines the exact revisions needed from self.from_repository to
+ install self._last_revision in self.to_repository.
+
+ :returns: A SearchResult of some sort. (Possibly a
+ PendingAncestryResult, EmptySearchResult, etc.)
+ """
+ if self._fetch_spec is not None:
+ # The fetch spec is already a concrete search result.
+ return self._fetch_spec
+ elif self._last_revision == NULL_REVISION:
+ # fetch_spec is None + last_revision is null => empty fetch.
+ # explicit limit of no revisions needed
+ return vf_search.EmptySearchResult()
+ elif self._last_revision is not None:
+ return vf_search.NotInOtherForRevs(self.to_repository,
+ self.from_repository, [self._last_revision],
+ find_ghosts=self.find_ghosts).execute()
+ else: # self._last_revision is None:
+ return vf_search.EverythingNotInOther(self.to_repository,
+ self.from_repository,
+ find_ghosts=self.find_ghosts).execute()
+
+
+class Inter1and2Helper(object):
+ """Helper for operations that convert data from model 1 and 2
+
+ This is for use by fetchers and converters.
+ """
+
+ # This is a class variable so that the test suite can override it.
+ known_graph_threshold = 100
+
+ def __init__(self, source):
+ """Constructor.
+
+ :param source: The repository data comes from
+ """
+ self.source = source
+
+ def iter_rev_trees(self, revs):
+ """Iterate through RevisionTrees efficiently.
+
+ Additionally, the inventory's revision_id is set if unset.
+
+ Trees are retrieved in batches of 100, and then yielded in the order
+ they were requested.
+
+ :param revs: A list of revision ids
+ """
+ # In case that revs is not a list.
+ revs = list(revs)
+ while revs:
+ for tree in self.source.revision_trees(revs[:100]):
+ if tree.root_inventory.revision_id is None:
+ tree.root_inventory.revision_id = tree.get_revision_id()
+ yield tree
+ revs = revs[100:]
+
+ def _find_root_ids(self, revs, parent_map, graph):
+ revision_root = {}
+ for tree in self.iter_rev_trees(revs):
+ root_id = tree.get_root_id()
+ revision_id = tree.get_file_revision(root_id, u"")
+ revision_root[revision_id] = root_id
+ # Find out which parents we don't already know root ids for
+ parents = set()
+ for revision_parents in parent_map.itervalues():
+ parents.update(revision_parents)
+ parents.difference_update(revision_root.keys() + [NULL_REVISION])
+ # Limit to revisions present in the versionedfile
+ parents = graph.get_parent_map(parents).keys()
+ for tree in self.iter_rev_trees(parents):
+ root_id = tree.get_root_id()
+ revision_root[tree.get_revision_id()] = root_id
+ return revision_root
+
+ def generate_root_texts(self, revs):
+ """Generate VersionedFiles for all root ids.
+
+ :param revs: the revisions to include
+ """
+ graph = self.source.get_graph()
+ parent_map = graph.get_parent_map(revs)
+ rev_order = tsort.topo_sort(parent_map)
+ rev_id_to_root_id = self._find_root_ids(revs, parent_map, graph)
+ root_id_order = [(rev_id_to_root_id[rev_id], rev_id) for rev_id in
+ rev_order]
+ # Guaranteed stable, this groups all the file id operations together
+ # retaining topological order within the revisions of a file id.
+ # File id splits and joins would invalidate this, but they don't exist
+ # yet, and are unlikely to in non-rich-root environments anyway.
+ root_id_order.sort(key=operator.itemgetter(0))
+ # Create a record stream containing the roots to create.
+ if len(revs) > self.known_graph_threshold:
+ graph = self.source.get_known_graph_ancestry(revs)
+ new_roots_stream = _new_root_data_stream(
+ root_id_order, rev_id_to_root_id, parent_map, self.source, graph)
+ return [('texts', new_roots_stream)]
+
+
+def _new_root_data_stream(
+ root_keys_to_create, rev_id_to_root_id_map, parent_map, repo, graph=None):
+ """Generate a texts substream of synthesised root entries.
+
+ Used in fetches that do rich-root upgrades.
+
+ :param root_keys_to_create: iterable of (root_id, rev_id) pairs describing
+ the root entries to create.
+ :param rev_id_to_root_id_map: dict of known rev_id -> root_id mappings for
+ calculating the parents. If a parent rev_id is not found here then it
+ will be recalculated.
+ :param parent_map: a parent map for all the revisions in
+ root_keys_to_create.
+ :param graph: a graph to use instead of repo.get_graph().
+ """
+ for root_key in root_keys_to_create:
+ root_id, rev_id = root_key
+ parent_keys = _parent_keys_for_root_version(
+ root_id, rev_id, rev_id_to_root_id_map, parent_map, repo, graph)
+ yield versionedfile.FulltextContentFactory(
+ root_key, parent_keys, None, '')
+
+
+def _parent_keys_for_root_version(
+ root_id, rev_id, rev_id_to_root_id_map, parent_map, repo, graph=None):
+ """Get the parent keys for a given root id.
+
+ A helper function for _new_root_data_stream.
+ """
+ # Include direct parents of the revision, but only if they used the same
+ # root_id and are heads.
+ rev_parents = parent_map[rev_id]
+ parent_ids = []
+ for parent_id in rev_parents:
+ if parent_id == NULL_REVISION:
+ continue
+ if parent_id not in rev_id_to_root_id_map:
+ # We probably didn't read this revision, go spend the extra effort
+ # to actually check
+ try:
+ tree = repo.revision_tree(parent_id)
+ except errors.NoSuchRevision:
+ # Ghost, fill out rev_id_to_root_id in case we encounter this
+ # again.
+ # But set parent_root_id to None since we don't really know
+ parent_root_id = None
+ else:
+ parent_root_id = tree.get_root_id()
+ rev_id_to_root_id_map[parent_id] = None
+ # XXX: why not:
+ # rev_id_to_root_id_map[parent_id] = parent_root_id
+ # memory consumption maybe?
+ else:
+ parent_root_id = rev_id_to_root_id_map[parent_id]
+ if root_id == parent_root_id:
+ # With stacking we _might_ want to refer to a non-local revision,
+ # but this code path only applies when we have the full content
+ # available, so ghosts really are ghosts, not just the edge of
+ # local data.
+ parent_ids.append(parent_id)
+ else:
+ # root_id may be in the parent anyway.
+ try:
+ tree = repo.revision_tree(parent_id)
+ except errors.NoSuchRevision:
+ # ghost, can't refer to it.
+ pass
+ else:
+ try:
+ parent_ids.append(tree.get_file_revision(root_id))
+ except errors.NoSuchId:
+ # not in the tree
+ pass
+ # Drop non-head parents
+ if graph is None:
+ graph = repo.get_graph()
+ heads = graph.heads(parent_ids)
+ selected_ids = []
+ for parent_id in parent_ids:
+ if parent_id in heads and parent_id not in selected_ids:
+ selected_ids.append(parent_id)
+ parent_keys = [(root_id, parent_id) for parent_id in selected_ids]
+ return parent_keys
+
+
+class TargetRepoKinds(object):
+ """An enum-like set of constants.
+
+ They are the possible values of FetchSpecFactory.target_repo_kinds.
+ """
+
+ PREEXISTING = 'preexisting'
+ STACKED = 'stacked'
+ EMPTY = 'empty'
+
+
+class FetchSpecFactory(object):
+ """A helper for building the best fetch spec for a sprout call.
+
+ Factors that go into determining the sort of fetch to perform:
+ * did the caller specify any revision IDs?
+ * did the caller specify a source branch (need to fetch its
+ heads_to_fetch(), usually the tip + tags)
+ * is there an existing target repo (don't need to refetch revs it
+ already has)
+ * target is stacked? (similar to pre-existing target repo: even if
+ the target itself is new don't want to refetch existing revs)
+
+ :ivar source_branch: the source branch if one specified, else None.
+ :ivar source_branch_stop_revision_id: fetch up to this revision of
+ source_branch, rather than its tip.
+ :ivar source_repo: the source repository if one found, else None.
+ :ivar target_repo: the target repository acquired by sprout.
+ :ivar target_repo_kind: one of the TargetRepoKinds constants.
+ """
+
+ def __init__(self):
+ self._explicit_rev_ids = set()
+ self.source_branch = None
+ self.source_branch_stop_revision_id = None
+ self.source_repo = None
+ self.target_repo = None
+ self.target_repo_kind = None
+ self.limit = None
+
+ def add_revision_ids(self, revision_ids):
+ """Add revision_ids to the set of revision_ids to be fetched."""
+ self._explicit_rev_ids.update(revision_ids)
+
+ def make_fetch_spec(self):
+ """Build a SearchResult or PendingAncestryResult or etc."""
+ if self.target_repo_kind is None or self.source_repo is None:
+ raise AssertionError(
+ 'Incomplete FetchSpecFactory: %r' % (self.__dict__,))
+ if len(self._explicit_rev_ids) == 0 and self.source_branch is None:
+ if self.limit is not None:
+ raise NotImplementedError(
+ "limit is only supported with a source branch set")
+ # Caller hasn't specified any revisions or source branch
+ if self.target_repo_kind == TargetRepoKinds.EMPTY:
+ return vf_search.EverythingResult(self.source_repo)
+ else:
+ # We want everything not already in the target (or target's
+ # fallbacks).
+ return vf_search.EverythingNotInOther(
+ self.target_repo, self.source_repo).execute()
+ heads_to_fetch = set(self._explicit_rev_ids)
+ if self.source_branch is not None:
+ must_fetch, if_present_fetch = self.source_branch.heads_to_fetch()
+ if self.source_branch_stop_revision_id is not None:
+ # Replace the tip rev from must_fetch with the stop revision
+ # XXX: this might be wrong if the tip rev is also in the
+ # must_fetch set for other reasons (e.g. it's the tip of
+ # multiple loom threads?), but then it's pretty unclear what it
+ # should mean to specify a stop_revision in that case anyway.
+ must_fetch.discard(self.source_branch.last_revision())
+ must_fetch.add(self.source_branch_stop_revision_id)
+ heads_to_fetch.update(must_fetch)
+ else:
+ if_present_fetch = set()
+ if self.target_repo_kind == TargetRepoKinds.EMPTY:
+ # PendingAncestryResult does not raise errors if a requested head
+ # is absent. Ideally it would support the
+ # required_ids/if_present_ids distinction, but in practice
+ # heads_to_fetch will almost certainly be present so this doesn't
+ # matter much.
+ all_heads = heads_to_fetch.union(if_present_fetch)
+ ret = vf_search.PendingAncestryResult(all_heads, self.source_repo)
+ if self.limit is not None:
+ graph = self.source_repo.get_graph()
+ topo_order = list(graph.iter_topo_order(ret.get_keys()))
+ result_set = topo_order[:self.limit]
+ ret = self.source_repo.revision_ids_to_search_result(result_set)
+ return ret
+ else:
+ return vf_search.NotInOtherForRevs(self.target_repo, self.source_repo,
+ required_ids=heads_to_fetch, if_present_ids=if_present_fetch,
+ limit=self.limit).execute()
diff --git a/bzrlib/fifo_cache.py b/bzrlib/fifo_cache.py
new file mode 100644
index 0000000..641dd21
--- /dev/null
+++ b/bzrlib/fifo_cache.py
@@ -0,0 +1,270 @@
+# Copyright (C) 2008 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""A simple first-in-first-out (FIFO) cache."""
+
+from __future__ import absolute_import
+
+from collections import deque
+
+
+class FIFOCache(dict):
+ """A class which manages a cache of entries, removing old ones."""
+
+ def __init__(self, max_cache=100, after_cleanup_count=None):
+ dict.__init__(self)
+ self._max_cache = max_cache
+ if after_cleanup_count is None:
+ self._after_cleanup_count = self._max_cache * 8 / 10
+ else:
+ self._after_cleanup_count = min(after_cleanup_count,
+ self._max_cache)
+ self._cleanup = {} # map to cleanup functions when items are removed
+ self._queue = deque() # Track when things are accessed
+
+ def __setitem__(self, key, value):
+ """Add a value to the cache, there will be no cleanup function."""
+ self.add(key, value, cleanup=None)
+
+ def __delitem__(self, key):
+ # Remove the key from an arbitrary location in the queue
+ remove = getattr(self._queue, 'remove', None)
+ # Python2.5's has deque.remove, but Python2.4 does not
+ if remove is not None:
+ remove(key)
+ else:
+ # TODO: It would probably be faster to pop()/popleft() until we get to the
+ # key, and then insert those back into the queue. We know
+ # the key should only be present in one position, and we
+ # wouldn't need to rebuild the whole queue.
+ self._queue = deque([k for k in self._queue if k != key])
+ self._remove(key)
+
+ def add(self, key, value, cleanup=None):
+ """Add a new value to the cache.
+
+ Also, if the entry is ever removed from the queue, call cleanup.
+ Passing it the key and value being removed.
+
+ :param key: The key to store it under
+ :param value: The object to store
+ :param cleanup: None or a function taking (key, value) to indicate
+ 'value' should be cleaned up
+ """
+ if key in self:
+ # Remove the earlier reference to this key, adding it again bumps
+ # it to the end of the queue
+ del self[key]
+ self._queue.append(key)
+ dict.__setitem__(self, key, value)
+ if cleanup is not None:
+ self._cleanup[key] = cleanup
+ if len(self) > self._max_cache:
+ self.cleanup()
+
+ def cache_size(self):
+ """Get the number of entries we will cache."""
+ return self._max_cache
+
+ def cleanup(self):
+ """Clear the cache until it shrinks to the requested size.
+
+ This does not completely wipe the cache, just makes sure it is under
+ the after_cleanup_count.
+ """
+ # Make sure the cache is shrunk to the correct size
+ while len(self) > self._after_cleanup_count:
+ self._remove_oldest()
+ if len(self._queue) != len(self):
+ raise AssertionError('The length of the queue should always equal'
+ ' the length of the dict. %s != %s'
+ % (len(self._queue), len(self)))
+
+ def clear(self):
+ """Clear out all of the cache."""
+ # Clean up in FIFO order
+ while self:
+ self._remove_oldest()
+
+ def _remove(self, key):
+ """Remove an entry, making sure to call any cleanup function."""
+ cleanup = self._cleanup.pop(key, None)
+ # We override self.pop() because it doesn't play well with cleanup
+ # functions.
+ val = dict.pop(self, key)
+ if cleanup is not None:
+ cleanup(key, val)
+ return val
+
+ def _remove_oldest(self):
+ """Remove the oldest entry."""
+ key = self._queue.popleft()
+ self._remove(key)
+
+ def resize(self, max_cache, after_cleanup_count=None):
+ """Increase/decrease the number of cached entries.
+
+ :param max_cache: The maximum number of entries to cache.
+ :param after_cleanup_count: After cleanup, we should have at most this
+ many entries. This defaults to 80% of max_cache.
+ """
+ self._max_cache = max_cache
+ if after_cleanup_count is None:
+ self._after_cleanup_count = max_cache * 8 / 10
+ else:
+ self._after_cleanup_count = min(max_cache, after_cleanup_count)
+ if len(self) > self._max_cache:
+ self.cleanup()
+
+ # raise NotImplementedError on dict functions that would mutate the cache
+ # which have not been properly implemented yet.
+ def copy(self):
+ raise NotImplementedError(self.copy)
+
+ def pop(self, key, default=None):
+ # If there is a cleanup() function, than it is unclear what pop()
+ # should do. Specifically, we would have to call the cleanup on the
+ # value before we return it, which should cause whatever resources were
+ # allocated to be removed, which makes the return value fairly useless.
+ # So instead, we just don't implement it.
+ raise NotImplementedError(self.pop)
+
+ def popitem(self):
+ # See pop()
+ raise NotImplementedError(self.popitem)
+
+ def setdefault(self, key, defaultval=None):
+ """similar to dict.setdefault"""
+ if key in self:
+ return self[key]
+ self[key] = defaultval
+ return defaultval
+
+ def update(self, *args, **kwargs):
+ """Similar to dict.update()"""
+ if len(args) == 1:
+ arg = args[0]
+ if isinstance(arg, dict):
+ for key, val in arg.iteritems():
+ self.add(key, val)
+ else:
+ for key, val in args[0]:
+ self.add(key, val)
+ elif len(args) > 1:
+ raise TypeError('update expected at most 1 argument, got %d'
+ % len(args))
+ if kwargs:
+ for key, val in kwargs.iteritems():
+ self.add(key, val)
+
+
+class FIFOSizeCache(FIFOCache):
+ """An FIFOCache that removes things based on the size of the values.
+
+ This differs in that it doesn't care how many actual items there are,
+ it restricts the cache to be cleaned based on the size of the data.
+ """
+
+ def __init__(self, max_size=1024*1024, after_cleanup_size=None,
+ compute_size=None):
+ """Create a new FIFOSizeCache.
+
+ :param max_size: The max number of bytes to store before we start
+ clearing out entries.
+ :param after_cleanup_size: After cleaning up, shrink everything to this
+ size (defaults to 80% of max_size).
+ :param compute_size: A function to compute the size of a value. If
+ not supplied we default to 'len'.
+ """
+ # Arbitrary, we won't really be using the value anyway.
+ FIFOCache.__init__(self, max_cache=max_size)
+ self._max_size = max_size
+ if after_cleanup_size is None:
+ self._after_cleanup_size = self._max_size * 8 / 10
+ else:
+ self._after_cleanup_size = min(after_cleanup_size, self._max_size)
+
+ self._value_size = 0
+ self._compute_size = compute_size
+ if compute_size is None:
+ self._compute_size = len
+
+ def add(self, key, value, cleanup=None):
+ """Add a new value to the cache.
+
+ Also, if the entry is ever removed from the queue, call cleanup.
+ Passing it the key and value being removed.
+
+ :param key: The key to store it under
+ :param value: The object to store, this value by itself is >=
+ after_cleanup_size, then we will not store it at all.
+ :param cleanup: None or a function taking (key, value) to indicate
+ 'value' sohuld be cleaned up.
+ """
+ # Even if the new value won't be stored, we need to remove the old
+ # value
+ if key in self:
+ # Remove the earlier reference to this key, adding it again bumps
+ # it to the end of the queue
+ del self[key]
+ value_len = self._compute_size(value)
+ if value_len >= self._after_cleanup_size:
+ return
+ self._queue.append(key)
+ dict.__setitem__(self, key, value)
+ if cleanup is not None:
+ self._cleanup[key] = cleanup
+ self._value_size += value_len
+ if self._value_size > self._max_size:
+ # Time to cleanup
+ self.cleanup()
+
+ def cache_size(self):
+ """Get the number of bytes we will cache."""
+ return self._max_size
+
+ def cleanup(self):
+ """Clear the cache until it shrinks to the requested size.
+
+ This does not completely wipe the cache, just makes sure it is under
+ the after_cleanup_size.
+ """
+ # Make sure the cache is shrunk to the correct size
+ while self._value_size > self._after_cleanup_size:
+ self._remove_oldest()
+
+ def _remove(self, key):
+ """Remove an entry, making sure to maintain the invariants."""
+ val = FIFOCache._remove(self, key)
+ self._value_size -= self._compute_size(val)
+ return val
+
+ def resize(self, max_size, after_cleanup_size=None):
+ """Increase/decrease the amount of cached data.
+
+ :param max_size: The maximum number of bytes to cache.
+ :param after_cleanup_size: After cleanup, we should have at most this
+ many bytes cached. This defaults to 80% of max_size.
+ """
+ FIFOCache.resize(self, max_size)
+ self._max_size = max_size
+ if after_cleanup_size is None:
+ self._after_cleanup_size = max_size * 8 / 10
+ else:
+ self._after_cleanup_size = min(max_size, after_cleanup_size)
+ if self._value_size > self._max_size:
+ self.cleanup()
+
diff --git a/bzrlib/filter_tree.py b/bzrlib/filter_tree.py
new file mode 100644
index 0000000..8fcb867
--- /dev/null
+++ b/bzrlib/filter_tree.py
@@ -0,0 +1,77 @@
+# Copyright (C) 2011 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""Content-filtered view of any tree.
+"""
+
+from __future__ import absolute_import
+
+from bzrlib import (
+ tree,
+ )
+from bzrlib.filters import (
+ ContentFilterContext,
+ filtered_output_bytes,
+ )
+
+
+class ContentFilterTree(tree.Tree):
+ """A virtual tree that applies content filters to an underlying tree.
+
+ Not every operation is supported yet.
+ """
+
+ def __init__(self, backing_tree, filter_stack_callback):
+ """Construct a new filtered tree view.
+
+ :param filter_stack_callback: A callable taking a path that returns
+ the filter stack that should be used for that path.
+ :param backing_tree: An underlying tree to wrap.
+ """
+ self.backing_tree = backing_tree
+ self.filter_stack_callback = filter_stack_callback
+
+ def get_file_text(self, file_id, path=None):
+ chunks = self.backing_tree.get_file_lines(file_id, path)
+ filters = self.filter_stack_callback(path)
+ if path is None:
+ path = self.backing_tree.id2path(file_id)
+ context = ContentFilterContext(path, self, None)
+ contents = filtered_output_bytes(chunks, filters, context)
+ content = ''.join(contents)
+ return content
+
+ def has_filename(self, filename):
+ return self.backing_tree.has_filename
+
+ def is_executable(self, file_id, path=None):
+ return self.backing_tree.is_executable(file_id, path)
+
+ def iter_entries_by_dir(self, specific_file_ids=None, yield_parents=None):
+ # NB: This simply returns the parent tree's entries; the length may be
+ # wrong but it can't easily be calculated without filtering the whole
+ # text. Currently all callers cope with this; perhaps they should be
+ # updated to a narrower interface that only provides things guaranteed
+ # cheaply available across all trees. -- mbp 20110705
+ return self.backing_tree.iter_entries_by_dir(
+ specific_file_ids=specific_file_ids,
+ yield_parents=yield_parents)
+
+ def lock_read(self):
+ return self.backing_tree.lock_read()
+
+ def unlock(self):
+ return self.backing_tree.unlock()
diff --git a/bzrlib/filters/__init__.py b/bzrlib/filters/__init__.py
new file mode 100644
index 0000000..c902e8c
--- /dev/null
+++ b/bzrlib/filters/__init__.py
@@ -0,0 +1,287 @@
+# Copyright (C) 2008, 2009, 2011 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""Working tree content filtering support.
+
+A filter consists of a read converter, write converter pair.
+The content in the working tree is called the convenience format
+while the content actually stored in called the canonical format.
+The read converter produces canonical content from convenience
+content while the writer goes the other way.
+
+Converters have the following signatures::
+
+ read_converter(chunks) -> chunks
+ write_converter(chunks, context) -> chunks
+
+where:
+
+ * chunks is an iterator over a sequence of byte strings
+
+ * context is an optional ContentFilterContent object (possibly None)
+ providing converters access to interesting information, e.g. the
+ relative path of the file.
+
+Note that context is currently only supported for write converters.
+"""
+
+from __future__ import absolute_import
+
+from cStringIO import StringIO
+from bzrlib.lazy_import import lazy_import
+lazy_import(globals(), """
+from bzrlib import (
+ config,
+ errors,
+ osutils,
+ registry,
+ )
+""")
+from bzrlib.symbol_versioning import (
+ deprecated_function,
+ deprecated_in,
+ )
+
+
+class ContentFilter(object):
+
+ def __init__(self, reader, writer):
+ """Create a filter that converts content while reading and writing.
+
+ :param reader: function for converting convenience to canonical content
+ :param writer: function for converting canonical to convenience content
+ """
+ self.reader = reader
+ self.writer = writer
+
+ def __repr__(self):
+ return "reader: %s, writer: %s" % (self.reader,self.writer)
+
+
+class ContentFilterContext(object):
+ """Object providing information that filters can use."""
+
+ def __init__(self, relpath=None, tree=None, entry=None):
+ """Create a context.
+
+ :param relpath: the relative path or None if this context doesn't
+ support that information.
+ :param tree: the Tree providing this file or None if this context
+ doesn't support that information.
+ :param entry: the InventoryEntry object if it is already known or
+ None if it should be derived if possible
+ """
+ self._relpath = relpath
+ self._tree = tree
+ self._entry = entry
+ # Cached values
+ self._revision_id = None
+ self._revision = None
+ self._config = None
+
+ def relpath(self):
+ """Relative path of file to tree-root."""
+ return self._relpath
+
+ def source_tree(self):
+ """Source Tree object."""
+ return self._tree
+
+ def file_id(self):
+ """File-id of file."""
+ if self._entry is not None:
+ return self._entry.file_id
+ elif self._tree is None:
+ return None
+ else:
+ return self._tree.path2id(self._relpath)
+
+ def revision_id(self):
+ """Id of revision that last changed this file."""
+ if self._revision_id is None:
+ if self._entry is not None:
+ self._revision_id = self._entry.revision
+ elif self._tree is not None:
+ file_id = self._tree.path2id(self._relpath)
+ self._entry = self._tree.inventory[file_id]
+ self._revision_id = self._entry.revision
+ return self._revision_id
+
+ def revision(self):
+ """Revision this variation of the file was introduced in."""
+ if self._revision is None:
+ rev_id = self.revision_id()
+ if rev_id is not None:
+ repo = getattr(self._tree, '_repository', None)
+ if repo is None:
+ repo = self._tree.branch.repository
+ self._revision = repo.get_revision(rev_id)
+ return self._revision
+
+ def config(self):
+ """The Config object to search for configuration settings."""
+ if self._config is None:
+ branch = getattr(self._tree, 'branch', None)
+ if branch is not None:
+ self._config = branch.get_config()
+ else:
+ self._config = config.GlobalConfig()
+ return self._config
+
+
+def filtered_input_file(f, filters):
+ """Get an input file that converts external to internal content.
+
+ :param f: the original input file
+ :param filters: the stack of filters to apply
+ :return: a file-like object
+ """
+ if filters:
+ chunks = [f.read()]
+ for filter in filters:
+ if filter.reader is not None:
+ chunks = filter.reader(chunks)
+ return StringIO(''.join(chunks))
+ else:
+ return f
+
+
+def filtered_output_bytes(chunks, filters, context=None):
+ """Convert byte chunks from internal to external format.
+
+ :param chunks: an iterator containing the original content
+ :param filters: the stack of filters to apply
+ :param context: a ContentFilterContext object passed to
+ each filter
+ :return: an iterator containing the content to output
+ """
+ if filters:
+ for filter in reversed(filters):
+ if filter.writer is not None:
+ chunks = filter.writer(chunks, context)
+ return chunks
+
+
+def internal_size_sha_file_byname(name, filters):
+ """Get size and sha of internal content given external content.
+
+ :param name: path to file
+ :param filters: the stack of filters to apply
+ """
+ f = open(name, 'rb', 65000)
+ try:
+ if filters:
+ f = filtered_input_file(f, filters)
+ return osutils.size_sha_file(f)
+ finally:
+ f.close()
+
+
+# The registry of filter stacks indexed by name.
+filter_stacks_registry = registry.Registry()
+
+
+# Cache of preferences -> stack
+# TODO: make this per branch (say) rather than global
+_stack_cache = {}
+
+
+# XXX: This function doesn't have any tests. JRV 2012-03-29
+@deprecated_function(deprecated_in((2, 6, 0)))
+def register_filter_stack_map(name, stack_map_lookup):
+ """Register the filter stacks to use for various preference values.
+
+ :param name: the preference/filter-stack name
+ :param stack_map_lookup: a callable where
+ the parameter is the preference value to match and
+ the result is the matching stack of filters to use,
+ or None if none.
+ """
+ filter_stacks_registry.register(name, stack_map_lookup)
+
+
+# XXX: This function doesn't have any tests. JRV 2012-03-29
+@deprecated_function(deprecated_in((2, 6, 0)))
+def lazy_register_filter_stack_map(name, module_name, member_name):
+ """Lazily register the filter stacks to use for various preference values.
+
+ :param name: the preference/filter-stack name
+ :param module_name: The python path to the module of the filter stack map.
+ :param member_name: The name of the stack_map_lookup callable
+ in the module.
+ """
+ filter_stacks_registry.register_lazy(name, module_name, member_name)
+
+
+def _get_registered_names():
+ """Get the list of names with filters registered."""
+ # Note: We may want to intelligently order these later.
+ # If so, the register_ fn will need to support an optional priority.
+ return filter_stacks_registry.keys()
+
+
+def _get_filter_stack_for(preferences):
+ """Get the filter stack given a sequence of preferences.
+
+ :param preferences: a sequence of (name,value) tuples where
+ name is the preference name and
+ value is the key into the filter stack map registered
+ for that preference.
+ """
+ if preferences is None:
+ return []
+ stack = _stack_cache.get(preferences)
+ if stack is not None:
+ return stack
+ stack = []
+ for k, v in preferences:
+ if v is None:
+ continue
+ try:
+ stack_map_lookup = filter_stacks_registry.get(k)
+ except KeyError:
+ # Some preferences may not have associated filters
+ continue
+ items = stack_map_lookup(v)
+ if items:
+ stack.extend(items)
+ _stack_cache[preferences] = stack
+ return stack
+
+
+def _reset_registry(value=None):
+ """Reset the filter stack registry.
+
+ This function is provided to aid testing. The expected usage is::
+
+ old = _reset_registry()
+ # run tests
+ _reset_registry(old)
+
+ :param value: the value to set the registry to or None for an empty one.
+ :return: the existing value before it reset.
+ """
+ global filter_stacks_registry
+ original = filter_stacks_registry
+ if value is None:
+ filter_stacks_registry = registry.Registry()
+ else:
+ filter_stacks_registry = value
+ _stack_cache.clear()
+ return original
+
+
+filter_stacks_registry.register_lazy('eol', 'bzrlib.filters.eol', 'eol_lookup')
diff --git a/bzrlib/filters/eol.py b/bzrlib/filters/eol.py
new file mode 100644
index 0000000..20ff8d7
--- /dev/null
+++ b/bzrlib/filters/eol.py
@@ -0,0 +1,73 @@
+# Copyright (C) 2009, 2010 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""End of Line Conversion filters.
+
+See bzr help eol for details.
+"""
+
+from __future__ import absolute_import
+
+
+import re, sys
+
+from bzrlib.errors import BzrError
+from bzrlib.filters import ContentFilter
+
+
+# Real Unix newline - \n without \r before it
+_UNIX_NL_RE = re.compile(r'(?<!\r)\n')
+
+
+def _to_lf_converter(chunks, context=None):
+ """A content file that converts crlf to lf."""
+ content = ''.join(chunks)
+ if '\x00' in content:
+ return [content]
+ else:
+ return [content.replace('\r\n', '\n')]
+
+
+def _to_crlf_converter(chunks, context=None):
+ """A content file that converts lf to crlf."""
+ content = ''.join(chunks)
+ if '\x00' in content:
+ return [content]
+ else:
+ return [_UNIX_NL_RE.sub('\r\n', content)]
+
+
+if sys.platform == 'win32':
+ _native_output = _to_crlf_converter
+else:
+ _native_output = _to_lf_converter
+_eol_filter_stack_map = {
+ 'exact': [],
+ 'native': [ContentFilter(_to_lf_converter, _native_output)],
+ 'lf': [ContentFilter(_to_lf_converter, _to_lf_converter)],
+ 'crlf': [ContentFilter(_to_lf_converter, _to_crlf_converter)],
+ 'native-with-crlf-in-repo':
+ [ContentFilter(_to_crlf_converter, _native_output)],
+ 'lf-with-crlf-in-repo':
+ [ContentFilter(_to_crlf_converter, _to_lf_converter)],
+ 'crlf-with-crlf-in-repo':
+ [ContentFilter(_to_crlf_converter, _to_crlf_converter)],
+ }
+def eol_lookup(key):
+ filter = _eol_filter_stack_map.get(key)
+ if filter is None:
+ raise BzrError("Unknown eol value '%s'" % key)
+ return filter
diff --git a/bzrlib/foreign.py b/bzrlib/foreign.py
new file mode 100644
index 0000000..b38eca8
--- /dev/null
+++ b/bzrlib/foreign.py
@@ -0,0 +1,342 @@
+# Copyright (C) 2008-2012 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""Foreign branch utilities."""
+
+from __future__ import absolute_import
+
+
+from bzrlib.branch import (
+ Branch,
+ )
+from bzrlib.commands import Command, Option
+from bzrlib.repository import Repository
+from bzrlib.revision import Revision
+from bzrlib.lazy_import import lazy_import
+lazy_import(globals(), """
+from bzrlib import (
+ errors,
+ registry,
+ transform,
+ )
+from bzrlib.i18n import gettext
+""")
+
+class VcsMapping(object):
+ """Describes the mapping between the semantics of Bazaar and a foreign VCS.
+
+ """
+ # Whether this is an experimental mapping that is still open to changes.
+ experimental = False
+
+ # Whether this mapping supports exporting and importing all bzr semantics.
+ roundtripping = False
+
+ # Prefix used when importing revisions native to the foreign VCS (as
+ # opposed to roundtripping bzr-native revisions) using this mapping.
+ revid_prefix = None
+
+ def __init__(self, vcs):
+ """Create a new VcsMapping.
+
+ :param vcs: VCS that this mapping maps to Bazaar
+ """
+ self.vcs = vcs
+
+ def revision_id_bzr_to_foreign(self, bzr_revid):
+ """Parse a bzr revision id and convert it to a foreign revid.
+
+ :param bzr_revid: The bzr revision id (a string).
+ :return: A foreign revision id, can be any sort of object.
+ """
+ raise NotImplementedError(self.revision_id_bzr_to_foreign)
+
+ def revision_id_foreign_to_bzr(self, foreign_revid):
+ """Parse a foreign revision id and convert it to a bzr revid.
+
+ :param foreign_revid: Foreign revision id, can be any sort of object.
+ :return: A bzr revision id.
+ """
+ raise NotImplementedError(self.revision_id_foreign_to_bzr)
+
+
+class VcsMappingRegistry(registry.Registry):
+ """Registry for Bazaar<->foreign VCS mappings.
+
+ There should be one instance of this registry for every foreign VCS.
+ """
+
+ def register(self, key, factory, help):
+ """Register a mapping between Bazaar and foreign VCS semantics.
+
+ The factory must be a callable that takes one parameter: the key.
+ It must produce an instance of VcsMapping when called.
+ """
+ if ":" in key:
+ raise ValueError("mapping name can not contain colon (:)")
+ registry.Registry.register(self, key, factory, help)
+
+ def set_default(self, key):
+ """Set the 'default' key to be a clone of the supplied key.
+
+ This method must be called once and only once.
+ """
+ self._set_default_key(key)
+
+ def get_default(self):
+ """Convenience function for obtaining the default mapping to use."""
+ return self.get(self._get_default_key())
+
+ def revision_id_bzr_to_foreign(self, revid):
+ """Convert a bzr revision id to a foreign revid."""
+ raise NotImplementedError(self.revision_id_bzr_to_foreign)
+
+
+class ForeignRevision(Revision):
+ """A Revision from a Foreign repository. Remembers
+ information about foreign revision id and mapping.
+
+ """
+
+ def __init__(self, foreign_revid, mapping, *args, **kwargs):
+ if not "inventory_sha1" in kwargs:
+ kwargs["inventory_sha1"] = ""
+ super(ForeignRevision, self).__init__(*args, **kwargs)
+ self.foreign_revid = foreign_revid
+ self.mapping = mapping
+
+
+class ForeignVcs(object):
+ """A foreign version control system."""
+
+ branch_format = None
+
+ repository_format = None
+
+ def __init__(self, mapping_registry, abbreviation=None):
+ """Create a new foreign vcs instance.
+
+ :param mapping_registry: Registry with mappings for this VCS.
+ :param abbreviation: Optional abbreviation ('bzr', 'svn', 'git', etc)
+ """
+ self.abbreviation = abbreviation
+ self.mapping_registry = mapping_registry
+
+ def show_foreign_revid(self, foreign_revid):
+ """Prepare a foreign revision id for formatting using bzr log.
+
+ :param foreign_revid: Foreign revision id.
+ :return: Dictionary mapping string keys to string values.
+ """
+ return { }
+
+ def serialize_foreign_revid(self, foreign_revid):
+ """Serialize a foreign revision id for this VCS.
+
+ :param foreign_revid: Foreign revision id
+ :return: Bytestring with serialized revid, will not contain any
+ newlines.
+ """
+ raise NotImplementedError(self.serialize_foreign_revid)
+
+
+class ForeignVcsRegistry(registry.Registry):
+ """Registry for Foreign VCSes.
+
+ There should be one entry per foreign VCS. Example entries would be
+ "git", "svn", "hg", "darcs", etc.
+
+ """
+
+ def register(self, key, foreign_vcs, help):
+ """Register a foreign VCS.
+
+ :param key: Prefix of the foreign VCS in revision ids
+ :param foreign_vcs: ForeignVCS instance
+ :param help: Description of the foreign VCS
+ """
+ if ":" in key or "-" in key:
+ raise ValueError("vcs name can not contain : or -")
+ registry.Registry.register(self, key, foreign_vcs, help)
+
+ def parse_revision_id(self, revid):
+ """Parse a bzr revision and return the matching mapping and foreign
+ revid.
+
+ :param revid: The bzr revision id
+ :return: tuple with foreign revid and vcs mapping
+ """
+ if not ":" in revid or not "-" in revid:
+ raise errors.InvalidRevisionId(revid, None)
+ try:
+ foreign_vcs = self.get(revid.split("-")[0])
+ except KeyError:
+ raise errors.InvalidRevisionId(revid, None)
+ return foreign_vcs.mapping_registry.revision_id_bzr_to_foreign(revid)
+
+
+foreign_vcs_registry = ForeignVcsRegistry()
+
+
+class ForeignRepository(Repository):
+ """A Repository that exists in a foreign version control system.
+
+ The data in this repository can not be represented natively using
+ Bazaars internal datastructures, but have to converted using a VcsMapping.
+ """
+
+ # This repository's native version control system
+ vcs = None
+
+ def has_foreign_revision(self, foreign_revid):
+ """Check whether the specified foreign revision is present.
+
+ :param foreign_revid: A foreign revision id, in the format used
+ by this Repository's VCS.
+ """
+ raise NotImplementedError(self.has_foreign_revision)
+
+ def lookup_bzr_revision_id(self, revid):
+ """Lookup a mapped or roundtripped revision by revision id.
+
+ :param revid: Bazaar revision id
+ :return: Tuple with foreign revision id and mapping.
+ """
+ raise NotImplementedError(self.lookup_revision_id)
+
+ def all_revision_ids(self, mapping=None):
+ """See Repository.all_revision_ids()."""
+ raise NotImplementedError(self.all_revision_ids)
+
+ def get_default_mapping(self):
+ """Get the default mapping for this repository."""
+ raise NotImplementedError(self.get_default_mapping)
+
+
+class ForeignBranch(Branch):
+ """Branch that exists in a foreign version control system."""
+
+ def __init__(self, mapping):
+ self.mapping = mapping
+ super(ForeignBranch, self).__init__()
+
+
+def update_workingtree_fileids(wt, target_tree):
+ """Update the file ids in a working tree based on another tree.
+
+ :param wt: Working tree in which to update file ids
+ :param target_tree: Tree to retrieve new file ids from, based on path
+ """
+ tt = transform.TreeTransform(wt)
+ try:
+ for f, p, c, v, d, n, k, e in target_tree.iter_changes(wt):
+ if v == (True, False):
+ trans_id = tt.trans_id_tree_path(p[0])
+ tt.unversion_file(trans_id)
+ elif v == (False, True):
+ trans_id = tt.trans_id_tree_path(p[1])
+ tt.version_file(f, trans_id)
+ tt.apply()
+ finally:
+ tt.finalize()
+ if len(wt.get_parent_ids()) == 1:
+ wt.set_parent_trees([(target_tree.get_revision_id(), target_tree)])
+ else:
+ wt.set_last_revision(target_tree.get_revision_id())
+
+
+class cmd_dpush(Command):
+ __doc__ = """Push into a different VCS without any custom bzr metadata.
+
+ This will afterwards rebase the local branch on the remote
+ branch unless the --no-rebase option is used, in which case
+ the two branches will be out of sync after the push.
+ """
+ takes_args = ['location?']
+ takes_options = [
+ 'remember',
+ Option('directory',
+ help='Branch to push from, '
+ 'rather than the one containing the working directory.',
+ short_name='d',
+ type=unicode,
+ ),
+ Option('no-rebase', help="Do not rebase after push."),
+ Option('strict',
+ help='Refuse to push if there are uncommitted changes in'
+ ' the working tree, --no-strict disables the check.'),
+ ]
+
+ def run(self, location=None, remember=False, directory=None,
+ no_rebase=False, strict=None):
+ from bzrlib import urlutils
+ from bzrlib.controldir import ControlDir
+ from bzrlib.errors import BzrCommandError, NoWorkingTree
+ from bzrlib.workingtree import WorkingTree
+
+ if directory is None:
+ directory = "."
+ try:
+ source_wt = WorkingTree.open_containing(directory)[0]
+ source_branch = source_wt.branch
+ except NoWorkingTree:
+ source_branch = Branch.open(directory)
+ source_wt = None
+ if source_wt is not None:
+ source_wt.check_changed_or_out_of_date(
+ strict, 'dpush_strict',
+ more_error='Use --no-strict to force the push.',
+ more_warning='Uncommitted changes will not be pushed.')
+ stored_loc = source_branch.get_push_location()
+ if location is None:
+ if stored_loc is None:
+ raise BzrCommandError(gettext("No push location known or specified."))
+ else:
+ display_url = urlutils.unescape_for_display(stored_loc,
+ self.outf.encoding)
+ self.outf.write(
+ gettext("Using saved location: %s\n") % display_url)
+ location = stored_loc
+
+ controldir = ControlDir.open(location)
+ target_branch = controldir.open_branch()
+ target_branch.lock_write()
+ try:
+ try:
+ push_result = source_branch.push(target_branch, lossy=True)
+ except errors.LossyPushToSameVCS:
+ raise BzrCommandError(gettext("{0!r} and {1!r} are in the same VCS, lossy "
+ "push not necessary. Please use regular push.").format(
+ source_branch, target_branch))
+ # We successfully created the target, remember it
+ if source_branch.get_push_location() is None or remember:
+ # FIXME: Should be done only if we succeed ? -- vila 2012-01-18
+ source_branch.set_push_location(target_branch.base)
+ if not no_rebase:
+ old_last_revid = source_branch.last_revision()
+ source_branch.pull(target_branch, overwrite=True)
+ new_last_revid = source_branch.last_revision()
+ if source_wt is not None and old_last_revid != new_last_revid:
+ source_wt.lock_write()
+ try:
+ target = source_wt.branch.repository.revision_tree(
+ new_last_revid)
+ update_workingtree_fileids(source_wt, target)
+ finally:
+ source_wt.unlock()
+ push_result.report(self.outf)
+ finally:
+ target_branch.unlock()
diff --git a/bzrlib/generate_ids.py b/bzrlib/generate_ids.py
new file mode 100644
index 0000000..57f9f08
--- /dev/null
+++ b/bzrlib/generate_ids.py
@@ -0,0 +1,121 @@
+# Copyright (C) 2006, 2007, 2009, 2010, 2011 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""Common code for generating file or revision ids."""
+
+from __future__ import absolute_import
+
+from bzrlib.lazy_import import lazy_import
+lazy_import(globals(), """
+import time
+
+from bzrlib import (
+ config,
+ errors,
+ osutils,
+ )
+""")
+
+from bzrlib import (
+ lazy_regex,
+ )
+
+# the regex removes any weird characters; we don't escape them
+# but rather just pull them out
+_file_id_chars_re = lazy_regex.lazy_compile(r'[^\w.]')
+_rev_id_chars_re = lazy_regex.lazy_compile(r'[^-\w.+@]')
+_gen_file_id_suffix = None
+_gen_file_id_serial = 0
+
+
+def _next_id_suffix():
+ """Create a new file id suffix that is reasonably unique.
+
+ On the first call we combine the current time with 64 bits of randomness to
+ give a highly probably globally unique number. Then each call in the same
+ process adds 1 to a serial number we append to that unique value.
+ """
+ # XXX TODO: change bzrlib.add.smart_add_tree to call workingtree.add() rather
+ # than having to move the id randomness out of the inner loop like this.
+ # XXX TODO: for the global randomness this uses we should add the thread-id
+ # before the serial #.
+ # XXX TODO: jam 20061102 I think it would be good to reset every 100 or
+ # 1000 calls, or perhaps if time.time() increases by a certain
+ # amount. time.time() shouldn't be terribly expensive to call,
+ # and it means that long-lived processes wouldn't use the same
+ # suffix forever.
+ global _gen_file_id_suffix, _gen_file_id_serial
+ if _gen_file_id_suffix is None:
+ _gen_file_id_suffix = "-%s-%s-" % (osutils.compact_date(time.time()),
+ osutils.rand_chars(16))
+ _gen_file_id_serial += 1
+ return _gen_file_id_suffix + str(_gen_file_id_serial)
+
+
+def gen_file_id(name):
+ """Return new file id for the basename 'name'.
+
+ The uniqueness is supplied from _next_id_suffix.
+ """
+ # The real randomness is in the _next_id_suffix, the
+ # rest of the identifier is just to be nice.
+ # So we:
+ # 1) Remove non-ascii word characters to keep the ids portable
+ # 2) squash to lowercase, so the file id doesn't have to
+ # be escaped (case insensitive filesystems would bork for ids
+ # that only differ in case without escaping).
+ # 3) truncate the filename to 20 chars. Long filenames also bork on some
+ # filesystems
+ # 4) Removing starting '.' characters to prevent the file ids from
+ # being considered hidden.
+ ascii_word_only = str(_file_id_chars_re.sub('', name.lower()))
+ short_no_dots = ascii_word_only.lstrip('.')[:20]
+ return short_no_dots + _next_id_suffix()
+
+
+def gen_root_id():
+ """Return a new tree-root file id."""
+ return gen_file_id('tree_root')
+
+
+def gen_revision_id(username, timestamp=None):
+ """Return new revision-id.
+
+ :param username: The username of the committer, in the format returned by
+ config.username(). This is typically a real name, followed by an
+ email address. If found, we will use just the email address portion.
+ Otherwise we flatten the real name, and use that.
+ :return: A new revision id.
+ """
+ try:
+ user_or_email = config.extract_email_address(username)
+ except errors.NoEmailInUsername:
+ user_or_email = username
+
+ user_or_email = user_or_email.lower()
+ user_or_email = user_or_email.replace(' ', '_')
+ user_or_email = _rev_id_chars_re.sub('', user_or_email)
+
+ # This gives 36^16 ~= 2^82.7 ~= 83 bits of entropy
+ unique_chunk = osutils.rand_chars(16)
+
+ if timestamp is None:
+ timestamp = time.time()
+
+ rev_id = u'-'.join((user_or_email,
+ osutils.compact_date(timestamp),
+ unique_chunk))
+ return rev_id.encode('utf8')
diff --git a/bzrlib/globbing.py b/bzrlib/globbing.py
new file mode 100644
index 0000000..547d971
--- /dev/null
+++ b/bzrlib/globbing.py
@@ -0,0 +1,356 @@
+# Copyright (C) 2006-2011 Canonical Ltd
+
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""Tools for converting globs to regular expressions.
+
+This module provides functions for converting shell-like globs to regular
+expressions.
+"""
+
+from __future__ import absolute_import
+
+import re
+
+from bzrlib import (
+ errors,
+ lazy_regex,
+ )
+from bzrlib.trace import (
+ mutter,
+ warning,
+ )
+
+
+class Replacer(object):
+ """Do a multiple-pattern substitution.
+
+ The patterns and substitutions are combined into one, so the result of
+ one replacement is never substituted again. Add the patterns and
+ replacements via the add method and then call the object. The patterns
+ must not contain capturing groups.
+ """
+
+ _expand = lazy_regex.lazy_compile(ur'\\&')
+
+ def __init__(self, source=None):
+ self._pat = None
+ if source:
+ self._pats = list(source._pats)
+ self._funs = list(source._funs)
+ else:
+ self._pats = []
+ self._funs = []
+
+ def add(self, pat, fun):
+ r"""Add a pattern and replacement.
+
+ The pattern must not contain capturing groups.
+ The replacement might be either a string template in which \& will be
+ replaced with the match, or a function that will get the matching text
+ as argument. It does not get match object, because capturing is
+ forbidden anyway.
+ """
+ self._pat = None
+ self._pats.append(pat)
+ self._funs.append(fun)
+
+ def add_replacer(self, replacer):
+ r"""Add all patterns from another replacer.
+
+ All patterns and replacements from replacer are appended to the ones
+ already defined.
+ """
+ self._pat = None
+ self._pats.extend(replacer._pats)
+ self._funs.extend(replacer._funs)
+
+ def __call__(self, text):
+ if not self._pat:
+ self._pat = lazy_regex.lazy_compile(
+ u'|'.join([u'(%s)' % p for p in self._pats]),
+ re.UNICODE)
+ return self._pat.sub(self._do_sub, text)
+
+ def _do_sub(self, m):
+ fun = self._funs[m.lastindex - 1]
+ if hasattr(fun, '__call__'):
+ return fun(m.group(0))
+ else:
+ return self._expand.sub(m.group(0), fun)
+
+
+_sub_named = Replacer()
+_sub_named.add(ur'\[:digit:\]', ur'\d')
+_sub_named.add(ur'\[:space:\]', ur'\s')
+_sub_named.add(ur'\[:alnum:\]', ur'\w')
+_sub_named.add(ur'\[:ascii:\]', ur'\0-\x7f')
+_sub_named.add(ur'\[:blank:\]', ur' \t')
+_sub_named.add(ur'\[:cntrl:\]', ur'\0-\x1f\x7f-\x9f')
+
+
+def _sub_group(m):
+ if m[1] in (u'!', u'^'):
+ return u'[^' + _sub_named(m[2:-1]) + u']'
+ return u'[' + _sub_named(m[1:-1]) + u']'
+
+
+def _invalid_regex(repl):
+ def _(m):
+ warning(u"'%s' not allowed within a regular expression. "
+ "Replacing with '%s'" % (m, repl))
+ return repl
+ return _
+
+
+def _trailing_backslashes_regex(m):
+ """Check trailing backslashes.
+
+ Does a head count on trailing backslashes to ensure there isn't an odd
+ one on the end that would escape the brackets we wrap the RE in.
+ """
+ if (len(m) % 2) != 0:
+ warning(u"Regular expressions cannot end with an odd number of '\\'. "
+ "Dropping the final '\\'.")
+ return m[:-1]
+ return m
+
+
+_sub_re = Replacer()
+_sub_re.add(u'^RE:', u'')
+_sub_re.add(u'\((?!\?)', u'(?:')
+_sub_re.add(u'\(\?P<.*>', _invalid_regex(u'(?:'))
+_sub_re.add(u'\(\?P=[^)]*\)', _invalid_regex(u''))
+_sub_re.add(ur'\\+$', _trailing_backslashes_regex)
+
+
+_sub_fullpath = Replacer()
+_sub_fullpath.add(ur'^RE:.*', _sub_re) # RE:<anything> is a regex
+_sub_fullpath.add(ur'\[\^?\]?(?:[^][]|\[:[^]]+:\])+\]', _sub_group) # char group
+_sub_fullpath.add(ur'(?:(?<=/)|^)(?:\.?/)+', u'') # canonicalize path
+_sub_fullpath.add(ur'\\.', ur'\&') # keep anything backslashed
+_sub_fullpath.add(ur'[(){}|^$+.]', ur'\\&') # escape specials
+_sub_fullpath.add(ur'(?:(?<=/)|^)\*\*+/', ur'(?:.*/)?') # **/ after ^ or /
+_sub_fullpath.add(ur'\*+', ur'[^/]*') # * elsewhere
+_sub_fullpath.add(ur'\?', ur'[^/]') # ? everywhere
+
+
+_sub_basename = Replacer()
+_sub_basename.add(ur'\[\^?\]?(?:[^][]|\[:[^]]+:\])+\]', _sub_group) # char group
+_sub_basename.add(ur'\\.', ur'\&') # keep anything backslashed
+_sub_basename.add(ur'[(){}|^$+.]', ur'\\&') # escape specials
+_sub_basename.add(ur'\*+', ur'.*') # * everywhere
+_sub_basename.add(ur'\?', ur'.') # ? everywhere
+
+
+def _sub_extension(pattern):
+ return _sub_basename(pattern[2:])
+
+
+class Globster(object):
+ """A simple wrapper for a set of glob patterns.
+
+ Provides the capability to search the patterns to find a match for
+ a given filename (including the full path).
+
+ Patterns are translated to regular expressions to expidite matching.
+
+ The regular expressions for multiple patterns are aggregated into
+ a super-regex containing groups of up to 99 patterns.
+ The 99 limitation is due to the grouping limit of the Python re module.
+ The resulting super-regex and associated patterns are stored as a list of
+ (regex,[patterns]) in _regex_patterns.
+
+ For performance reasons the patterns are categorised as extension patterns
+ (those that match against a file extension), basename patterns
+ (those that match against the basename of the filename),
+ and fullpath patterns (those that match against the full path).
+ The translations used for extensions and basenames are relatively simpler
+ and therefore faster to perform than the fullpath patterns.
+
+ Also, the extension patterns are more likely to find a match and
+ so are matched first, then the basename patterns, then the fullpath
+ patterns.
+ """
+ # We want to _add_patterns in a specific order (as per type_list below)
+ # starting with the shortest and going to the longest.
+ # As some Python version don't support ordered dicts the list below is
+ # used to select inputs for _add_pattern in a specific order.
+ pattern_types = [ "extension", "basename", "fullpath" ]
+
+ pattern_info = {
+ "extension" : {
+ "translator" : _sub_extension,
+ "prefix" : r'(?:.*/)?(?!.*/)(?:.*\.)'
+ },
+ "basename" : {
+ "translator" : _sub_basename,
+ "prefix" : r'(?:.*/)?(?!.*/)'
+ },
+ "fullpath" : {
+ "translator" : _sub_fullpath,
+ "prefix" : r''
+ },
+ }
+
+ def __init__(self, patterns):
+ self._regex_patterns = []
+ pattern_lists = {
+ "extension" : [],
+ "basename" : [],
+ "fullpath" : [],
+ }
+ for pat in patterns:
+ pat = normalize_pattern(pat)
+ pattern_lists[Globster.identify(pat)].append(pat)
+ pi = Globster.pattern_info
+ for t in Globster.pattern_types:
+ self._add_patterns(pattern_lists[t], pi[t]["translator"],
+ pi[t]["prefix"])
+
+ def _add_patterns(self, patterns, translator, prefix=''):
+ while patterns:
+ grouped_rules = [
+ '(%s)' % translator(pat) for pat in patterns[:99]]
+ joined_rule = '%s(?:%s)$' % (prefix, '|'.join(grouped_rules))
+ # Explicitly use lazy_compile here, because we count on its
+ # nicer error reporting.
+ self._regex_patterns.append((
+ lazy_regex.lazy_compile(joined_rule, re.UNICODE),
+ patterns[:99]))
+ patterns = patterns[99:]
+
+ def match(self, filename):
+ """Searches for a pattern that matches the given filename.
+
+ :return A matching pattern or None if there is no matching pattern.
+ """
+ try:
+ for regex, patterns in self._regex_patterns:
+ match = regex.match(filename)
+ if match:
+ return patterns[match.lastindex -1]
+ except errors.InvalidPattern, e:
+ # We can't show the default e.msg to the user as thats for
+ # the combined pattern we sent to regex. Instead we indicate to
+ # the user that an ignore file needs fixing.
+ mutter('Invalid pattern found in regex: %s.', e.msg)
+ e.msg = "File ~/.bazaar/ignore or .bzrignore contains error(s)."
+ bad_patterns = ''
+ for _, patterns in self._regex_patterns:
+ for p in patterns:
+ if not Globster.is_pattern_valid(p):
+ bad_patterns += ('\n %s' % p)
+ e.msg += bad_patterns
+ raise e
+ return None
+
+ @staticmethod
+ def identify(pattern):
+ """Returns pattern category.
+
+ :param pattern: normalized pattern.
+ Identify if a pattern is fullpath, basename or extension
+ and returns the appropriate type.
+ """
+ if pattern.startswith(u'RE:') or u'/' in pattern:
+ return "fullpath"
+ elif pattern.startswith(u'*.'):
+ return "extension"
+ else:
+ return "basename"
+
+ @staticmethod
+ def is_pattern_valid(pattern):
+ """Returns True if pattern is valid.
+
+ :param pattern: Normalized pattern.
+ is_pattern_valid() assumes pattern to be normalized.
+ see: globbing.normalize_pattern
+ """
+ result = True
+ translator = Globster.pattern_info[Globster.identify(pattern)]["translator"]
+ tpattern = '(%s)' % translator(pattern)
+ try:
+ re_obj = lazy_regex.lazy_compile(tpattern, re.UNICODE)
+ re_obj.search("") # force compile
+ except errors.InvalidPattern, e:
+ result = False
+ return result
+
+
+class ExceptionGlobster(object):
+ """A Globster that supports exception patterns.
+
+ Exceptions are ignore patterns prefixed with '!'. Exception
+ patterns take precedence over regular patterns and cause a
+ matching filename to return None from the match() function.
+ Patterns using a '!!' prefix are highest precedence, and act
+ as regular ignores. '!!' patterns are useful to establish ignores
+ that apply under paths specified by '!' exception patterns.
+ """
+
+ def __init__(self,patterns):
+ ignores = [[], [], []]
+ for p in patterns:
+ if p.startswith(u'!!'):
+ ignores[2].append(p[2:])
+ elif p.startswith(u'!'):
+ ignores[1].append(p[1:])
+ else:
+ ignores[0].append(p)
+ self._ignores = [Globster(i) for i in ignores]
+
+ def match(self, filename):
+ """Searches for a pattern that matches the given filename.
+
+ :return A matching pattern or None if there is no matching pattern.
+ """
+ double_neg = self._ignores[2].match(filename)
+ if double_neg:
+ return "!!%s" % double_neg
+ elif self._ignores[1].match(filename):
+ return None
+ else:
+ return self._ignores[0].match(filename)
+
+class _OrderedGlobster(Globster):
+ """A Globster that keeps pattern order."""
+
+ def __init__(self, patterns):
+ """Constructor.
+
+ :param patterns: sequence of glob patterns
+ """
+ # Note: This could be smarter by running like sequences together
+ self._regex_patterns = []
+ for pat in patterns:
+ pat = normalize_pattern(pat)
+ t = Globster.identify(pat)
+ self._add_patterns([pat], Globster.pattern_info[t]["translator"],
+ Globster.pattern_info[t]["prefix"])
+
+
+_slashes = lazy_regex.lazy_compile(r'[\\/]+')
+def normalize_pattern(pattern):
+ """Converts backslashes in path patterns to forward slashes.
+
+ Doesn't normalize regular expressions - they may contain escapes.
+ """
+ if not (pattern.startswith('RE:') or pattern.startswith('!RE:')):
+ pattern = _slashes.sub('/', pattern)
+ if len(pattern) > 1:
+ pattern = pattern.rstrip('/')
+ return pattern
diff --git a/bzrlib/gpg.py b/bzrlib/gpg.py
new file mode 100644
index 0000000..fc3a940
--- /dev/null
+++ b/bzrlib/gpg.py
@@ -0,0 +1,557 @@
+# Copyright (C) 2005, 2011 Canonical Ltd
+# Authors: Robert Collins <robert.collins@canonical.com>
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""GPG signing and checking logic."""
+
+from __future__ import absolute_import
+
+import os
+import sys
+from StringIO import StringIO
+
+from bzrlib.lazy_import import lazy_import
+lazy_import(globals(), """
+import errno
+import subprocess
+
+from bzrlib import (
+ config,
+ errors,
+ trace,
+ ui,
+ )
+from bzrlib.i18n import (
+ gettext,
+ ngettext,
+ )
+""")
+
+from bzrlib.symbol_versioning import (
+ deprecated_in,
+ deprecated_method,
+ )
+
+#verification results
+SIGNATURE_VALID = 0
+SIGNATURE_KEY_MISSING = 1
+SIGNATURE_NOT_VALID = 2
+SIGNATURE_NOT_SIGNED = 3
+SIGNATURE_EXPIRED = 4
+
+
+def bulk_verify_signatures(repository, revids, strategy,
+ process_events_callback=None):
+ """Do verifications on a set of revisions
+
+ :param repository: repository object
+ :param revids: list of revision ids to verify
+ :param strategy: GPG strategy to use
+ :param process_events_callback: method to call for GUI frontends that
+ want to keep their UI refreshed
+
+ :return: count dictionary of results of each type,
+ result list for each revision,
+ boolean True if all results are verified successfully
+ """
+ count = {SIGNATURE_VALID: 0,
+ SIGNATURE_KEY_MISSING: 0,
+ SIGNATURE_NOT_VALID: 0,
+ SIGNATURE_NOT_SIGNED: 0,
+ SIGNATURE_EXPIRED: 0}
+ result = []
+ all_verifiable = True
+ total = len(revids)
+ pb = ui.ui_factory.nested_progress_bar()
+ try:
+ for i, (rev_id, verification_result, uid) in enumerate(
+ repository.verify_revision_signatures(
+ revids, strategy)):
+ pb.update("verifying signatures", i, total)
+ result.append([rev_id, verification_result, uid])
+ count[verification_result] += 1
+ if verification_result != SIGNATURE_VALID:
+ all_verifiable = False
+ if process_events_callback is not None:
+ process_events_callback()
+ finally:
+ pb.finished()
+ return (count, result, all_verifiable)
+
+
+class DisabledGPGStrategy(object):
+ """A GPG Strategy that makes everything fail."""
+
+ @staticmethod
+ def verify_signatures_available():
+ return True
+
+ def __init__(self, ignored):
+ """Real strategies take a configuration."""
+
+ def sign(self, content):
+ raise errors.SigningFailed('Signing is disabled.')
+
+ def verify(self, content, testament):
+ raise errors.SignatureVerificationFailed('Signature verification is \
+disabled.')
+
+ def set_acceptable_keys(self, command_line_input):
+ pass
+
+
+class LoopbackGPGStrategy(object):
+ """A GPG Strategy that acts like 'cat' - data is just passed through.
+ Used in tests.
+ """
+
+ @staticmethod
+ def verify_signatures_available():
+ return True
+
+ def __init__(self, ignored):
+ """Real strategies take a configuration."""
+
+ def sign(self, content):
+ return ("-----BEGIN PSEUDO-SIGNED CONTENT-----\n" + content +
+ "-----END PSEUDO-SIGNED CONTENT-----\n")
+
+ def verify(self, content, testament):
+ return SIGNATURE_VALID, None
+
+ def set_acceptable_keys(self, command_line_input):
+ if command_line_input is not None:
+ patterns = command_line_input.split(",")
+ self.acceptable_keys = []
+ for pattern in patterns:
+ if pattern == "unknown":
+ pass
+ else:
+ self.acceptable_keys.append(pattern)
+
+ @deprecated_method(deprecated_in((2, 6, 0)))
+ def do_verifications(self, revisions, repository):
+ return bulk_verify_signatures(repository, revisions, self)
+
+ @deprecated_method(deprecated_in((2, 6, 0)))
+ def valid_commits_message(self, count):
+ return valid_commits_message(count)
+
+ @deprecated_method(deprecated_in((2, 6, 0)))
+ def unknown_key_message(self, count):
+ return unknown_key_message(count)
+
+ @deprecated_method(deprecated_in((2, 6, 0)))
+ def commit_not_valid_message(self, count):
+ return commit_not_valid_message(count)
+
+ @deprecated_method(deprecated_in((2, 6, 0)))
+ def commit_not_signed_message(self, count):
+ return commit_not_signed_message(count)
+
+ @deprecated_method(deprecated_in((2, 6, 0)))
+ def expired_commit_message(self, count):
+ return expired_commit_message(count)
+
+
+def _set_gpg_tty():
+ tty = os.environ.get('TTY')
+ if tty is not None:
+ os.environ['GPG_TTY'] = tty
+ trace.mutter('setting GPG_TTY=%s', tty)
+ else:
+ # This is not quite worthy of a warning, because some people
+ # don't need GPG_TTY to be set. But it is worthy of a big mark
+ # in ~/.bzr.log, so that people can debug it if it happens to them
+ trace.mutter('** Env var TTY empty, cannot set GPG_TTY.'
+ ' Is TTY exported?')
+
+
+class GPGStrategy(object):
+ """GPG Signing and checking facilities."""
+
+ acceptable_keys = None
+
+ def __init__(self, config_stack):
+ self._config_stack = config_stack
+ try:
+ import gpgme
+ self.context = gpgme.Context()
+ except ImportError, error:
+ pass # can't use verify()
+
+ @staticmethod
+ def verify_signatures_available():
+ """
+ check if this strategy can verify signatures
+
+ :return: boolean if this strategy can verify signatures
+ """
+ try:
+ import gpgme
+ return True
+ except ImportError, error:
+ return False
+
+ def _command_line(self):
+ key = self._config_stack.get('gpg_signing_key')
+ if key is None or key == 'default':
+ # 'default' or not setting gpg_signing_key at all means we should
+ # use the user email address
+ key = config.extract_email_address(self._config_stack.get('email'))
+ return [self._config_stack.get('gpg_signing_command'), '--clearsign',
+ '-u', key]
+
+ def sign(self, content):
+ if isinstance(content, unicode):
+ raise errors.BzrBadParameterUnicode('content')
+ ui.ui_factory.clear_term()
+
+ preexec_fn = _set_gpg_tty
+ if sys.platform == 'win32':
+ # Win32 doesn't support preexec_fn, but wouldn't support TTY anyway.
+ preexec_fn = None
+ try:
+ process = subprocess.Popen(self._command_line(),
+ stdin=subprocess.PIPE,
+ stdout=subprocess.PIPE,
+ preexec_fn=preexec_fn)
+ try:
+ result = process.communicate(content)[0]
+ if process.returncode is None:
+ process.wait()
+ if process.returncode != 0:
+ raise errors.SigningFailed(self._command_line())
+ return result
+ except OSError, e:
+ if e.errno == errno.EPIPE:
+ raise errors.SigningFailed(self._command_line())
+ else:
+ raise
+ except ValueError:
+ # bad subprocess parameters, should never happen.
+ raise
+ except OSError, e:
+ if e.errno == errno.ENOENT:
+ # gpg is not installed
+ raise errors.SigningFailed(self._command_line())
+ else:
+ raise
+
+ def verify(self, content, testament):
+ """Check content has a valid signature.
+
+ :param content: the commit signature
+ :param testament: the valid testament string for the commit
+
+ :return: SIGNATURE_VALID or a failed SIGNATURE_ value, key uid if valid
+ """
+ try:
+ import gpgme
+ except ImportError, error:
+ raise errors.GpgmeNotInstalled(error)
+
+ signature = StringIO(content)
+ plain_output = StringIO()
+ try:
+ result = self.context.verify(signature, None, plain_output)
+ except gpgme.GpgmeError,error:
+ raise errors.SignatureVerificationFailed(error[2])
+
+ # No result if input is invalid.
+ # test_verify_invalid()
+ if len(result) == 0:
+ return SIGNATURE_NOT_VALID, None
+ # User has specified a list of acceptable keys, check our result is in
+ # it. test_verify_unacceptable_key()
+ fingerprint = result[0].fpr
+ if self.acceptable_keys is not None:
+ if not fingerprint in self.acceptable_keys:
+ return SIGNATURE_KEY_MISSING, fingerprint[-8:]
+ # Check the signature actually matches the testament.
+ # test_verify_bad_testament()
+ if testament != plain_output.getvalue():
+ return SIGNATURE_NOT_VALID, None
+ # Yay gpgme set the valid bit.
+ # Can't write a test for this one as you can't set a key to be
+ # trusted using gpgme.
+ if result[0].summary & gpgme.SIGSUM_VALID:
+ key = self.context.get_key(fingerprint)
+ name = key.uids[0].name
+ email = key.uids[0].email
+ return SIGNATURE_VALID, name + " <" + email + ">"
+ # Sigsum_red indicates a problem, unfortunatly I have not been able
+ # to write any tests which actually set this.
+ if result[0].summary & gpgme.SIGSUM_RED:
+ return SIGNATURE_NOT_VALID, None
+ # GPG does not know this key.
+ # test_verify_unknown_key()
+ if result[0].summary & gpgme.SIGSUM_KEY_MISSING:
+ return SIGNATURE_KEY_MISSING, fingerprint[-8:]
+ # Summary isn't set if sig is valid but key is untrusted but if user
+ # has explicity set the key as acceptable we can validate it.
+ if result[0].summary == 0 and self.acceptable_keys is not None:
+ if fingerprint in self.acceptable_keys:
+ # test_verify_untrusted_but_accepted()
+ return SIGNATURE_VALID, None
+ # test_verify_valid_but_untrusted()
+ if result[0].summary == 0 and self.acceptable_keys is None:
+ return SIGNATURE_NOT_VALID, None
+ if result[0].summary & gpgme.SIGSUM_KEY_EXPIRED:
+ expires = self.context.get_key(result[0].fpr).subkeys[0].expires
+ if expires > result[0].timestamp:
+ # The expired key was not expired at time of signing.
+ # test_verify_expired_but_valid()
+ return SIGNATURE_EXPIRED, fingerprint[-8:]
+ else:
+ # I can't work out how to create a test where the signature
+ # was expired at the time of signing.
+ return SIGNATURE_NOT_VALID, None
+ # A signature from a revoked key gets this.
+ # test_verify_revoked_signature()
+ if result[0].summary & gpgme.SIGSUM_SYS_ERROR:
+ return SIGNATURE_NOT_VALID, None
+ # Other error types such as revoked keys should (I think) be caught by
+ # SIGSUM_RED so anything else means something is buggy.
+ raise errors.SignatureVerificationFailed("Unknown GnuPG key "\
+ "verification result")
+
+ def set_acceptable_keys(self, command_line_input):
+ """Set the acceptable keys for verifying with this GPGStrategy.
+
+ :param command_line_input: comma separated list of patterns from
+ command line
+ :return: nothing
+ """
+ key_patterns = None
+ acceptable_keys_config = self._config_stack.get('acceptable_keys')
+ try:
+ if isinstance(acceptable_keys_config, unicode):
+ acceptable_keys_config = str(acceptable_keys_config)
+ except UnicodeEncodeError:
+ # gpg Context.keylist(pattern) does not like unicode
+ raise errors.BzrCommandError(
+ gettext('Only ASCII permitted in option names'))
+
+ if acceptable_keys_config is not None:
+ key_patterns = acceptable_keys_config
+ if command_line_input is not None: # command line overrides config
+ key_patterns = command_line_input
+ if key_patterns is not None:
+ patterns = key_patterns.split(",")
+
+ self.acceptable_keys = []
+ for pattern in patterns:
+ result = self.context.keylist(pattern)
+ found_key = False
+ for key in result:
+ found_key = True
+ self.acceptable_keys.append(key.subkeys[0].fpr)
+ trace.mutter("Added acceptable key: " + key.subkeys[0].fpr)
+ if not found_key:
+ trace.note(gettext(
+ "No GnuPG key results for pattern: {0}"
+ ).format(pattern))
+
+ @deprecated_method(deprecated_in((2, 6, 0)))
+ def do_verifications(self, revisions, repository,
+ process_events_callback=None):
+ """do verifications on a set of revisions
+
+ :param revisions: list of revision ids to verify
+ :param repository: repository object
+ :param process_events_callback: method to call for GUI frontends that
+ want to keep their UI refreshed
+
+ :return: count dictionary of results of each type,
+ result list for each revision,
+ boolean True if all results are verified successfully
+ """
+ return bulk_verify_signatures(repository, revisions, self,
+ process_events_callback)
+
+ @deprecated_method(deprecated_in((2, 6, 0)))
+ def verbose_valid_message(self, result):
+ """takes a verify result and returns list of signed commits strings"""
+ return verbose_valid_message(result)
+
+ @deprecated_method(deprecated_in((2, 6, 0)))
+ def verbose_not_valid_message(self, result, repo):
+ """takes a verify result and returns list of not valid commit info"""
+ return verbose_not_valid_message(result, repo)
+
+ @deprecated_method(deprecated_in((2, 6, 0)))
+ def verbose_not_signed_message(self, result, repo):
+ """takes a verify result and returns list of not signed commit info"""
+ return verbose_not_valid_message(result, repo)
+
+ @deprecated_method(deprecated_in((2, 6, 0)))
+ def verbose_missing_key_message(self, result):
+ """takes a verify result and returns list of missing key info"""
+ return verbose_missing_key_message(result)
+
+ @deprecated_method(deprecated_in((2, 6, 0)))
+ def verbose_expired_key_message(self, result, repo):
+ """takes a verify result and returns list of expired key info"""
+ return verbose_expired_key_message(result, repo)
+
+ @deprecated_method(deprecated_in((2, 6, 0)))
+ def valid_commits_message(self, count):
+ """returns message for number of commits"""
+ return valid_commits_message(count)
+
+ @deprecated_method(deprecated_in((2, 6, 0)))
+ def unknown_key_message(self, count):
+ """returns message for number of commits"""
+ return unknown_key_message(count)
+
+ @deprecated_method(deprecated_in((2, 6, 0)))
+ def commit_not_valid_message(self, count):
+ """returns message for number of commits"""
+ return commit_not_valid_message(count)
+
+ @deprecated_method(deprecated_in((2, 6, 0)))
+ def commit_not_signed_message(self, count):
+ """returns message for number of commits"""
+ return commit_not_signed_message(count)
+
+ @deprecated_method(deprecated_in((2, 6, 0)))
+ def expired_commit_message(self, count):
+ """returns message for number of commits"""
+ return expired_commit_message(count)
+
+
+def valid_commits_message(count):
+ """returns message for number of commits"""
+ return gettext(u"{0} commits with valid signatures").format(
+ count[SIGNATURE_VALID])
+
+
+def unknown_key_message(count):
+ """returns message for number of commits"""
+ return ngettext(u"{0} commit with unknown key",
+ u"{0} commits with unknown keys",
+ count[SIGNATURE_KEY_MISSING]).format(
+ count[SIGNATURE_KEY_MISSING])
+
+
+def commit_not_valid_message(count):
+ """returns message for number of commits"""
+ return ngettext(u"{0} commit not valid",
+ u"{0} commits not valid",
+ count[SIGNATURE_NOT_VALID]).format(
+ count[SIGNATURE_NOT_VALID])
+
+
+def commit_not_signed_message(count):
+ """returns message for number of commits"""
+ return ngettext(u"{0} commit not signed",
+ u"{0} commits not signed",
+ count[SIGNATURE_NOT_SIGNED]).format(
+ count[SIGNATURE_NOT_SIGNED])
+
+
+def expired_commit_message(count):
+ """returns message for number of commits"""
+ return ngettext(u"{0} commit with key now expired",
+ u"{0} commits with key now expired",
+ count[SIGNATURE_EXPIRED]).format(
+ count[SIGNATURE_EXPIRED])
+
+
+def verbose_expired_key_message(result, repo):
+ """takes a verify result and returns list of expired key info"""
+ signers = {}
+ fingerprint_to_authors = {}
+ for rev_id, validity, fingerprint in result:
+ if validity == SIGNATURE_EXPIRED:
+ revision = repo.get_revision(rev_id)
+ authors = ', '.join(revision.get_apparent_authors())
+ signers.setdefault(fingerprint, 0)
+ signers[fingerprint] += 1
+ fingerprint_to_authors[fingerprint] = authors
+ result = []
+ for fingerprint, number in signers.items():
+ result.append(
+ ngettext(u"{0} commit by author {1} with key {2} now expired",
+ u"{0} commits by author {1} with key {2} now expired",
+ number).format(
+ number, fingerprint_to_authors[fingerprint], fingerprint))
+ return result
+
+
+def verbose_valid_message(result):
+ """takes a verify result and returns list of signed commits strings"""
+ signers = {}
+ for rev_id, validity, uid in result:
+ if validity == SIGNATURE_VALID:
+ signers.setdefault(uid, 0)
+ signers[uid] += 1
+ result = []
+ for uid, number in signers.items():
+ result.append(ngettext(u"{0} signed {1} commit",
+ u"{0} signed {1} commits",
+ number).format(uid, number))
+ return result
+
+
+def verbose_not_valid_message(result, repo):
+ """takes a verify result and returns list of not valid commit info"""
+ signers = {}
+ for rev_id, validity, empty in result:
+ if validity == SIGNATURE_NOT_VALID:
+ revision = repo.get_revision(rev_id)
+ authors = ', '.join(revision.get_apparent_authors())
+ signers.setdefault(authors, 0)
+ signers[authors] += 1
+ result = []
+ for authors, number in signers.items():
+ result.append(ngettext(u"{0} commit by author {1}",
+ u"{0} commits by author {1}",
+ number).format(number, authors))
+ return result
+
+
+def verbose_not_signed_message(result, repo):
+ """takes a verify result and returns list of not signed commit info"""
+ signers = {}
+ for rev_id, validity, empty in result:
+ if validity == SIGNATURE_NOT_SIGNED:
+ revision = repo.get_revision(rev_id)
+ authors = ', '.join(revision.get_apparent_authors())
+ signers.setdefault(authors, 0)
+ signers[authors] += 1
+ result = []
+ for authors, number in signers.items():
+ result.append(ngettext(u"{0} commit by author {1}",
+ u"{0} commits by author {1}",
+ number).format(number, authors))
+ return result
+
+
+def verbose_missing_key_message(result):
+ """takes a verify result and returns list of missing key info"""
+ signers = {}
+ for rev_id, validity, fingerprint in result:
+ if validity == SIGNATURE_KEY_MISSING:
+ signers.setdefault(fingerprint, 0)
+ signers[fingerprint] += 1
+ result = []
+ for fingerprint, number in signers.items():
+ result.append(ngettext(u"Unknown key {0} signed {1} commit",
+ u"Unknown key {0} signed {1} commits",
+ number).format(fingerprint, number))
+ return result
diff --git a/bzrlib/graph.py b/bzrlib/graph.py
new file mode 100644
index 0000000..e1c2522
--- /dev/null
+++ b/bzrlib/graph.py
@@ -0,0 +1,1715 @@
+# Copyright (C) 2007-2011 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+from __future__ import absolute_import
+
+import time
+
+from bzrlib import (
+ debug,
+ errors,
+ osutils,
+ revision,
+ trace,
+ )
+
+STEP_UNIQUE_SEARCHER_EVERY = 5
+
+# DIAGRAM of terminology
+# A
+# /\
+# B C
+# | |\
+# D E F
+# |\/| |
+# |/\|/
+# G H
+#
+# In this diagram, relative to G and H:
+# A, B, C, D, E are common ancestors.
+# C, D and E are border ancestors, because each has a non-common descendant.
+# D and E are least common ancestors because none of their descendants are
+# common ancestors.
+# C is not a least common ancestor because its descendant, E, is a common
+# ancestor.
+#
+# The find_unique_lca algorithm will pick A in two steps:
+# 1. find_lca('G', 'H') => ['D', 'E']
+# 2. Since len(['D', 'E']) > 1, find_lca('D', 'E') => ['A']
+
+
+class DictParentsProvider(object):
+ """A parents provider for Graph objects."""
+
+ def __init__(self, ancestry):
+ self.ancestry = ancestry
+
+ def __repr__(self):
+ return 'DictParentsProvider(%r)' % self.ancestry
+
+ # Note: DictParentsProvider does not implement get_cached_parent_map
+ # Arguably, the data is clearly cached in memory. However, this class
+ # is mostly used for testing, and it keeps the tests clean to not
+ # change it.
+
+ def get_parent_map(self, keys):
+ """See StackedParentsProvider.get_parent_map"""
+ ancestry = self.ancestry
+ return dict([(k, ancestry[k]) for k in keys if k in ancestry])
+
+
+class StackedParentsProvider(object):
+ """A parents provider which stacks (or unions) multiple providers.
+
+ The providers are queries in the order of the provided parent_providers.
+ """
+
+ def __init__(self, parent_providers):
+ self._parent_providers = parent_providers
+
+ def __repr__(self):
+ return "%s(%r)" % (self.__class__.__name__, self._parent_providers)
+
+ def get_parent_map(self, keys):
+ """Get a mapping of keys => parents
+
+ A dictionary is returned with an entry for each key present in this
+ source. If this source doesn't have information about a key, it should
+ not include an entry.
+
+ [NULL_REVISION] is used as the parent of the first user-committed
+ revision. Its parent list is empty.
+
+ :param keys: An iterable returning keys to check (eg revision_ids)
+ :return: A dictionary mapping each key to its parents
+ """
+ found = {}
+ remaining = set(keys)
+ # This adds getattr() overhead to each get_parent_map call. However,
+ # this is StackedParentsProvider, which means we're dealing with I/O
+ # (either local indexes, or remote RPCs), so CPU overhead should be
+ # minimal.
+ for parents_provider in self._parent_providers:
+ get_cached = getattr(parents_provider, 'get_cached_parent_map',
+ None)
+ if get_cached is None:
+ continue
+ new_found = get_cached(remaining)
+ found.update(new_found)
+ remaining.difference_update(new_found)
+ if not remaining:
+ break
+ if not remaining:
+ return found
+ for parents_provider in self._parent_providers:
+ new_found = parents_provider.get_parent_map(remaining)
+ found.update(new_found)
+ remaining.difference_update(new_found)
+ if not remaining:
+ break
+ return found
+
+
+class CachingParentsProvider(object):
+ """A parents provider which will cache the revision => parents as a dict.
+
+ This is useful for providers which have an expensive look up.
+
+ Either a ParentsProvider or a get_parent_map-like callback may be
+ supplied. If it provides extra un-asked-for parents, they will be cached,
+ but filtered out of get_parent_map.
+
+ The cache is enabled by default, but may be disabled and re-enabled.
+ """
+ def __init__(self, parent_provider=None, get_parent_map=None):
+ """Constructor.
+
+ :param parent_provider: The ParentProvider to use. It or
+ get_parent_map must be supplied.
+ :param get_parent_map: The get_parent_map callback to use. It or
+ parent_provider must be supplied.
+ """
+ self._real_provider = parent_provider
+ if get_parent_map is None:
+ self._get_parent_map = self._real_provider.get_parent_map
+ else:
+ self._get_parent_map = get_parent_map
+ self._cache = None
+ self.enable_cache(True)
+
+ def __repr__(self):
+ return "%s(%r)" % (self.__class__.__name__, self._real_provider)
+
+ def enable_cache(self, cache_misses=True):
+ """Enable cache."""
+ if self._cache is not None:
+ raise AssertionError('Cache enabled when already enabled.')
+ self._cache = {}
+ self._cache_misses = cache_misses
+ self.missing_keys = set()
+
+ def disable_cache(self):
+ """Disable and clear the cache."""
+ self._cache = None
+ self._cache_misses = None
+ self.missing_keys = set()
+
+ def get_cached_map(self):
+ """Return any cached get_parent_map values."""
+ if self._cache is None:
+ return None
+ return dict(self._cache)
+
+ def get_cached_parent_map(self, keys):
+ """Return items from the cache.
+
+ This returns the same info as get_parent_map, but explicitly does not
+ invoke the supplied ParentsProvider to search for uncached values.
+ """
+ cache = self._cache
+ if cache is None:
+ return {}
+ return dict([(key, cache[key]) for key in keys if key in cache])
+
+ def get_parent_map(self, keys):
+ """See StackedParentsProvider.get_parent_map."""
+ cache = self._cache
+ if cache is None:
+ cache = self._get_parent_map(keys)
+ else:
+ needed_revisions = set(key for key in keys if key not in cache)
+ # Do not ask for negatively cached keys
+ needed_revisions.difference_update(self.missing_keys)
+ if needed_revisions:
+ parent_map = self._get_parent_map(needed_revisions)
+ cache.update(parent_map)
+ if self._cache_misses:
+ for key in needed_revisions:
+ if key not in parent_map:
+ self.note_missing_key(key)
+ result = {}
+ for key in keys:
+ value = cache.get(key)
+ if value is not None:
+ result[key] = value
+ return result
+
+ def note_missing_key(self, key):
+ """Note that key is a missing key."""
+ if self._cache_misses:
+ self.missing_keys.add(key)
+
+
+class CallableToParentsProviderAdapter(object):
+ """A parents provider that adapts any callable to the parents provider API.
+
+ i.e. it accepts calls to self.get_parent_map and relays them to the
+ callable it was constructed with.
+ """
+
+ def __init__(self, a_callable):
+ self.callable = a_callable
+
+ def __repr__(self):
+ return "%s(%r)" % (self.__class__.__name__, self.callable)
+
+ def get_parent_map(self, keys):
+ return self.callable(keys)
+
+
+class Graph(object):
+ """Provide incremental access to revision graphs.
+
+ This is the generic implementation; it is intended to be subclassed to
+ specialize it for other repository types.
+ """
+
+ def __init__(self, parents_provider):
+ """Construct a Graph that uses several graphs as its input
+
+ This should not normally be invoked directly, because there may be
+ specialized implementations for particular repository types. See
+ Repository.get_graph().
+
+ :param parents_provider: An object providing a get_parent_map call
+ conforming to the behavior of
+ StackedParentsProvider.get_parent_map.
+ """
+ if getattr(parents_provider, 'get_parents', None) is not None:
+ self.get_parents = parents_provider.get_parents
+ if getattr(parents_provider, 'get_parent_map', None) is not None:
+ self.get_parent_map = parents_provider.get_parent_map
+ self._parents_provider = parents_provider
+
+ def __repr__(self):
+ return 'Graph(%r)' % self._parents_provider
+
+ def find_lca(self, *revisions):
+ """Determine the lowest common ancestors of the provided revisions
+
+ A lowest common ancestor is a common ancestor none of whose
+ descendants are common ancestors. In graphs, unlike trees, there may
+ be multiple lowest common ancestors.
+
+ This algorithm has two phases. Phase 1 identifies border ancestors,
+ and phase 2 filters border ancestors to determine lowest common
+ ancestors.
+
+ In phase 1, border ancestors are identified, using a breadth-first
+ search starting at the bottom of the graph. Searches are stopped
+ whenever a node or one of its descendants is determined to be common
+
+ In phase 2, the border ancestors are filtered to find the least
+ common ancestors. This is done by searching the ancestries of each
+ border ancestor.
+
+ Phase 2 is perfomed on the principle that a border ancestor that is
+ not an ancestor of any other border ancestor is a least common
+ ancestor.
+
+ Searches are stopped when they find a node that is determined to be a
+ common ancestor of all border ancestors, because this shows that it
+ cannot be a descendant of any border ancestor.
+
+ The scaling of this operation should be proportional to:
+
+ 1. The number of uncommon ancestors
+ 2. The number of border ancestors
+ 3. The length of the shortest path between a border ancestor and an
+ ancestor of all border ancestors.
+ """
+ border_common, common, sides = self._find_border_ancestors(revisions)
+ # We may have common ancestors that can be reached from each other.
+ # - ask for the heads of them to filter it down to only ones that
+ # cannot be reached from each other - phase 2.
+ return self.heads(border_common)
+
+ def find_difference(self, left_revision, right_revision):
+ """Determine the graph difference between two revisions"""
+ border, common, searchers = self._find_border_ancestors(
+ [left_revision, right_revision])
+ self._search_for_extra_common(common, searchers)
+ left = searchers[0].seen
+ right = searchers[1].seen
+ return (left.difference(right), right.difference(left))
+
+ def find_descendants(self, old_key, new_key):
+ """Find descendants of old_key that are ancestors of new_key."""
+ child_map = self.get_child_map(self._find_descendant_ancestors(
+ old_key, new_key))
+ graph = Graph(DictParentsProvider(child_map))
+ searcher = graph._make_breadth_first_searcher([old_key])
+ list(searcher)
+ return searcher.seen
+
+ def _find_descendant_ancestors(self, old_key, new_key):
+ """Find ancestors of new_key that may be descendants of old_key."""
+ stop = self._make_breadth_first_searcher([old_key])
+ descendants = self._make_breadth_first_searcher([new_key])
+ for revisions in descendants:
+ old_stop = stop.seen.intersection(revisions)
+ descendants.stop_searching_any(old_stop)
+ seen_stop = descendants.find_seen_ancestors(stop.step())
+ descendants.stop_searching_any(seen_stop)
+ return descendants.seen.difference(stop.seen)
+
+ def get_child_map(self, keys):
+ """Get a mapping from parents to children of the specified keys.
+
+ This is simply the inversion of get_parent_map. Only supplied keys
+ will be discovered as children.
+ :return: a dict of key:child_list for keys.
+ """
+ parent_map = self._parents_provider.get_parent_map(keys)
+ parent_child = {}
+ for child, parents in sorted(parent_map.items()):
+ for parent in parents:
+ parent_child.setdefault(parent, []).append(child)
+ return parent_child
+
+ def find_distance_to_null(self, target_revision_id, known_revision_ids):
+ """Find the left-hand distance to the NULL_REVISION.
+
+ (This can also be considered the revno of a branch at
+ target_revision_id.)
+
+ :param target_revision_id: A revision_id which we would like to know
+ the revno for.
+ :param known_revision_ids: [(revision_id, revno)] A list of known
+ revno, revision_id tuples. We'll use this to seed the search.
+ """
+ # Map from revision_ids to a known value for their revno
+ known_revnos = dict(known_revision_ids)
+ cur_tip = target_revision_id
+ num_steps = 0
+ NULL_REVISION = revision.NULL_REVISION
+ known_revnos[NULL_REVISION] = 0
+
+ searching_known_tips = list(known_revnos.keys())
+
+ unknown_searched = {}
+
+ while cur_tip not in known_revnos:
+ unknown_searched[cur_tip] = num_steps
+ num_steps += 1
+ to_search = set([cur_tip])
+ to_search.update(searching_known_tips)
+ parent_map = self.get_parent_map(to_search)
+ parents = parent_map.get(cur_tip, None)
+ if not parents: # An empty list or None is a ghost
+ raise errors.GhostRevisionsHaveNoRevno(target_revision_id,
+ cur_tip)
+ cur_tip = parents[0]
+ next_known_tips = []
+ for revision_id in searching_known_tips:
+ parents = parent_map.get(revision_id, None)
+ if not parents:
+ continue
+ next = parents[0]
+ next_revno = known_revnos[revision_id] - 1
+ if next in unknown_searched:
+ # We have enough information to return a value right now
+ return next_revno + unknown_searched[next]
+ if next in known_revnos:
+ continue
+ known_revnos[next] = next_revno
+ next_known_tips.append(next)
+ searching_known_tips = next_known_tips
+
+ # We reached a known revision, so just add in how many steps it took to
+ # get there.
+ return known_revnos[cur_tip] + num_steps
+
+ def find_lefthand_distances(self, keys):
+ """Find the distance to null for all the keys in keys.
+
+ :param keys: keys to lookup.
+ :return: A dict key->distance for all of keys.
+ """
+ # Optimisable by concurrent searching, but a random spread should get
+ # some sort of hit rate.
+ result = {}
+ known_revnos = []
+ ghosts = []
+ for key in keys:
+ try:
+ known_revnos.append(
+ (key, self.find_distance_to_null(key, known_revnos)))
+ except errors.GhostRevisionsHaveNoRevno:
+ ghosts.append(key)
+ for key in ghosts:
+ known_revnos.append((key, -1))
+ return dict(known_revnos)
+
+ def find_unique_ancestors(self, unique_revision, common_revisions):
+ """Find the unique ancestors for a revision versus others.
+
+ This returns the ancestry of unique_revision, excluding all revisions
+ in the ancestry of common_revisions. If unique_revision is in the
+ ancestry, then the empty set will be returned.
+
+ :param unique_revision: The revision_id whose ancestry we are
+ interested in.
+ (XXX: Would this API be better if we allowed multiple revisions on
+ to be searched here?)
+ :param common_revisions: Revision_ids of ancestries to exclude.
+ :return: A set of revisions in the ancestry of unique_revision
+ """
+ if unique_revision in common_revisions:
+ return set()
+
+ # Algorithm description
+ # 1) Walk backwards from the unique node and all common nodes.
+ # 2) When a node is seen by both sides, stop searching it in the unique
+ # walker, include it in the common walker.
+ # 3) Stop searching when there are no nodes left for the unique walker.
+ # At this point, you have a maximal set of unique nodes. Some of
+ # them may actually be common, and you haven't reached them yet.
+ # 4) Start new searchers for the unique nodes, seeded with the
+ # information you have so far.
+ # 5) Continue searching, stopping the common searches when the search
+ # tip is an ancestor of all unique nodes.
+ # 6) Aggregate together unique searchers when they are searching the
+ # same tips. When all unique searchers are searching the same node,
+ # stop move it to a single 'all_unique_searcher'.
+ # 7) The 'all_unique_searcher' represents the very 'tip' of searching.
+ # Most of the time this produces very little important information.
+ # So don't step it as quickly as the other searchers.
+ # 8) Search is done when all common searchers have completed.
+
+ unique_searcher, common_searcher = self._find_initial_unique_nodes(
+ [unique_revision], common_revisions)
+
+ unique_nodes = unique_searcher.seen.difference(common_searcher.seen)
+ if not unique_nodes:
+ return unique_nodes
+
+ (all_unique_searcher,
+ unique_tip_searchers) = self._make_unique_searchers(unique_nodes,
+ unique_searcher, common_searcher)
+
+ self._refine_unique_nodes(unique_searcher, all_unique_searcher,
+ unique_tip_searchers, common_searcher)
+ true_unique_nodes = unique_nodes.difference(common_searcher.seen)
+ if 'graph' in debug.debug_flags:
+ trace.mutter('Found %d truly unique nodes out of %d',
+ len(true_unique_nodes), len(unique_nodes))
+ return true_unique_nodes
+
+ def _find_initial_unique_nodes(self, unique_revisions, common_revisions):
+ """Steps 1-3 of find_unique_ancestors.
+
+ Find the maximal set of unique nodes. Some of these might actually
+ still be common, but we are sure that there are no other unique nodes.
+
+ :return: (unique_searcher, common_searcher)
+ """
+
+ unique_searcher = self._make_breadth_first_searcher(unique_revisions)
+ # we know that unique_revisions aren't in common_revisions, so skip
+ # past them.
+ unique_searcher.next()
+ common_searcher = self._make_breadth_first_searcher(common_revisions)
+
+ # As long as we are still finding unique nodes, keep searching
+ while unique_searcher._next_query:
+ next_unique_nodes = set(unique_searcher.step())
+ next_common_nodes = set(common_searcher.step())
+
+ # Check if either searcher encounters new nodes seen by the other
+ # side.
+ unique_are_common_nodes = next_unique_nodes.intersection(
+ common_searcher.seen)
+ unique_are_common_nodes.update(
+ next_common_nodes.intersection(unique_searcher.seen))
+ if unique_are_common_nodes:
+ ancestors = unique_searcher.find_seen_ancestors(
+ unique_are_common_nodes)
+ # TODO: This is a bit overboard, we only really care about
+ # the ancestors of the tips because the rest we
+ # already know. This is *correct* but causes us to
+ # search too much ancestry.
+ ancestors.update(common_searcher.find_seen_ancestors(ancestors))
+ unique_searcher.stop_searching_any(ancestors)
+ common_searcher.start_searching(ancestors)
+
+ return unique_searcher, common_searcher
+
+ def _make_unique_searchers(self, unique_nodes, unique_searcher,
+ common_searcher):
+ """Create a searcher for all the unique search tips (step 4).
+
+ As a side effect, the common_searcher will stop searching any nodes
+ that are ancestors of the unique searcher tips.
+
+ :return: (all_unique_searcher, unique_tip_searchers)
+ """
+ unique_tips = self._remove_simple_descendants(unique_nodes,
+ self.get_parent_map(unique_nodes))
+
+ if len(unique_tips) == 1:
+ unique_tip_searchers = []
+ ancestor_all_unique = unique_searcher.find_seen_ancestors(unique_tips)
+ else:
+ unique_tip_searchers = []
+ for tip in unique_tips:
+ revs_to_search = unique_searcher.find_seen_ancestors([tip])
+ revs_to_search.update(
+ common_searcher.find_seen_ancestors(revs_to_search))
+ searcher = self._make_breadth_first_searcher(revs_to_search)
+ # We don't care about the starting nodes.
+ searcher._label = tip
+ searcher.step()
+ unique_tip_searchers.append(searcher)
+
+ ancestor_all_unique = None
+ for searcher in unique_tip_searchers:
+ if ancestor_all_unique is None:
+ ancestor_all_unique = set(searcher.seen)
+ else:
+ ancestor_all_unique = ancestor_all_unique.intersection(
+ searcher.seen)
+ # Collapse all the common nodes into a single searcher
+ all_unique_searcher = self._make_breadth_first_searcher(
+ ancestor_all_unique)
+ if ancestor_all_unique:
+ # We've seen these nodes in all the searchers, so we'll just go to
+ # the next
+ all_unique_searcher.step()
+
+ # Stop any search tips that are already known as ancestors of the
+ # unique nodes
+ stopped_common = common_searcher.stop_searching_any(
+ common_searcher.find_seen_ancestors(ancestor_all_unique))
+
+ total_stopped = 0
+ for searcher in unique_tip_searchers:
+ total_stopped += len(searcher.stop_searching_any(
+ searcher.find_seen_ancestors(ancestor_all_unique)))
+ if 'graph' in debug.debug_flags:
+ trace.mutter('For %d unique nodes, created %d + 1 unique searchers'
+ ' (%d stopped search tips, %d common ancestors'
+ ' (%d stopped common)',
+ len(unique_nodes), len(unique_tip_searchers),
+ total_stopped, len(ancestor_all_unique),
+ len(stopped_common))
+ return all_unique_searcher, unique_tip_searchers
+
+ def _step_unique_and_common_searchers(self, common_searcher,
+ unique_tip_searchers,
+ unique_searcher):
+ """Step all the searchers"""
+ newly_seen_common = set(common_searcher.step())
+ newly_seen_unique = set()
+ for searcher in unique_tip_searchers:
+ next = set(searcher.step())
+ next.update(unique_searcher.find_seen_ancestors(next))
+ next.update(common_searcher.find_seen_ancestors(next))
+ for alt_searcher in unique_tip_searchers:
+ if alt_searcher is searcher:
+ continue
+ next.update(alt_searcher.find_seen_ancestors(next))
+ searcher.start_searching(next)
+ newly_seen_unique.update(next)
+ return newly_seen_common, newly_seen_unique
+
+ def _find_nodes_common_to_all_unique(self, unique_tip_searchers,
+ all_unique_searcher,
+ newly_seen_unique, step_all_unique):
+ """Find nodes that are common to all unique_tip_searchers.
+
+ If it is time, step the all_unique_searcher, and add its nodes to the
+ result.
+ """
+ common_to_all_unique_nodes = newly_seen_unique.copy()
+ for searcher in unique_tip_searchers:
+ common_to_all_unique_nodes.intersection_update(searcher.seen)
+ common_to_all_unique_nodes.intersection_update(
+ all_unique_searcher.seen)
+ # Step all-unique less frequently than the other searchers.
+ # In the common case, we don't need to spider out far here, so
+ # avoid doing extra work.
+ if step_all_unique:
+ tstart = time.clock()
+ nodes = all_unique_searcher.step()
+ common_to_all_unique_nodes.update(nodes)
+ if 'graph' in debug.debug_flags:
+ tdelta = time.clock() - tstart
+ trace.mutter('all_unique_searcher step() took %.3fs'
+ 'for %d nodes (%d total), iteration: %s',
+ tdelta, len(nodes), len(all_unique_searcher.seen),
+ all_unique_searcher._iterations)
+ return common_to_all_unique_nodes
+
+ def _collapse_unique_searchers(self, unique_tip_searchers,
+ common_to_all_unique_nodes):
+ """Combine searchers that are searching the same tips.
+
+ When two searchers are searching the same tips, we can stop one of the
+ searchers. We also know that the maximal set of common ancestors is the
+ intersection of the two original searchers.
+
+ :return: A list of searchers that are searching unique nodes.
+ """
+ # Filter out searchers that don't actually search different
+ # nodes. We already have the ancestry intersection for them
+ unique_search_tips = {}
+ for searcher in unique_tip_searchers:
+ stopped = searcher.stop_searching_any(common_to_all_unique_nodes)
+ will_search_set = frozenset(searcher._next_query)
+ if not will_search_set:
+ if 'graph' in debug.debug_flags:
+ trace.mutter('Unique searcher %s was stopped.'
+ ' (%s iterations) %d nodes stopped',
+ searcher._label,
+ searcher._iterations,
+ len(stopped))
+ elif will_search_set not in unique_search_tips:
+ # This searcher is searching a unique set of nodes, let it
+ unique_search_tips[will_search_set] = [searcher]
+ else:
+ unique_search_tips[will_search_set].append(searcher)
+ # TODO: it might be possible to collapse searchers faster when they
+ # only have *some* search tips in common.
+ next_unique_searchers = []
+ for searchers in unique_search_tips.itervalues():
+ if len(searchers) == 1:
+ # Searching unique tips, go for it
+ next_unique_searchers.append(searchers[0])
+ else:
+ # These searchers have started searching the same tips, we
+ # don't need them to cover the same ground. The
+ # intersection of their ancestry won't change, so create a
+ # new searcher, combining their histories.
+ next_searcher = searchers[0]
+ for searcher in searchers[1:]:
+ next_searcher.seen.intersection_update(searcher.seen)
+ if 'graph' in debug.debug_flags:
+ trace.mutter('Combining %d searchers into a single'
+ ' searcher searching %d nodes with'
+ ' %d ancestry',
+ len(searchers),
+ len(next_searcher._next_query),
+ len(next_searcher.seen))
+ next_unique_searchers.append(next_searcher)
+ return next_unique_searchers
+
+ def _refine_unique_nodes(self, unique_searcher, all_unique_searcher,
+ unique_tip_searchers, common_searcher):
+ """Steps 5-8 of find_unique_ancestors.
+
+ This function returns when common_searcher has stopped searching for
+ more nodes.
+ """
+ # We step the ancestor_all_unique searcher only every
+ # STEP_UNIQUE_SEARCHER_EVERY steps.
+ step_all_unique_counter = 0
+ # While we still have common nodes to search
+ while common_searcher._next_query:
+ (newly_seen_common,
+ newly_seen_unique) = self._step_unique_and_common_searchers(
+ common_searcher, unique_tip_searchers, unique_searcher)
+ # These nodes are common ancestors of all unique nodes
+ common_to_all_unique_nodes = self._find_nodes_common_to_all_unique(
+ unique_tip_searchers, all_unique_searcher, newly_seen_unique,
+ step_all_unique_counter==0)
+ step_all_unique_counter = ((step_all_unique_counter + 1)
+ % STEP_UNIQUE_SEARCHER_EVERY)
+
+ if newly_seen_common:
+ # If a 'common' node is an ancestor of all unique searchers, we
+ # can stop searching it.
+ common_searcher.stop_searching_any(
+ all_unique_searcher.seen.intersection(newly_seen_common))
+ if common_to_all_unique_nodes:
+ common_to_all_unique_nodes.update(
+ common_searcher.find_seen_ancestors(
+ common_to_all_unique_nodes))
+ # The all_unique searcher can start searching the common nodes
+ # but everyone else can stop.
+ # This is the sort of thing where we would like to not have it
+ # start_searching all of the nodes, but only mark all of them
+ # as seen, and have it search only the actual tips. Otherwise
+ # it is another get_parent_map() traversal for it to figure out
+ # what we already should know.
+ all_unique_searcher.start_searching(common_to_all_unique_nodes)
+ common_searcher.stop_searching_any(common_to_all_unique_nodes)
+
+ next_unique_searchers = self._collapse_unique_searchers(
+ unique_tip_searchers, common_to_all_unique_nodes)
+ if len(unique_tip_searchers) != len(next_unique_searchers):
+ if 'graph' in debug.debug_flags:
+ trace.mutter('Collapsed %d unique searchers => %d'
+ ' at %s iterations',
+ len(unique_tip_searchers),
+ len(next_unique_searchers),
+ all_unique_searcher._iterations)
+ unique_tip_searchers = next_unique_searchers
+
+ def get_parent_map(self, revisions):
+ """Get a map of key:parent_list for revisions.
+
+ This implementation delegates to get_parents, for old parent_providers
+ that do not supply get_parent_map.
+ """
+ result = {}
+ for rev, parents in self.get_parents(revisions):
+ if parents is not None:
+ result[rev] = parents
+ return result
+
+ def _make_breadth_first_searcher(self, revisions):
+ return _BreadthFirstSearcher(revisions, self)
+
+ def _find_border_ancestors(self, revisions):
+ """Find common ancestors with at least one uncommon descendant.
+
+ Border ancestors are identified using a breadth-first
+ search starting at the bottom of the graph. Searches are stopped
+ whenever a node or one of its descendants is determined to be common.
+
+ This will scale with the number of uncommon ancestors.
+
+ As well as the border ancestors, a set of seen common ancestors and a
+ list of sets of seen ancestors for each input revision is returned.
+ This allows calculation of graph difference from the results of this
+ operation.
+ """
+ if None in revisions:
+ raise errors.InvalidRevisionId(None, self)
+ common_ancestors = set()
+ searchers = [self._make_breadth_first_searcher([r])
+ for r in revisions]
+ active_searchers = searchers[:]
+ border_ancestors = set()
+
+ while True:
+ newly_seen = set()
+ for searcher in searchers:
+ new_ancestors = searcher.step()
+ if new_ancestors:
+ newly_seen.update(new_ancestors)
+ new_common = set()
+ for revision in newly_seen:
+ if revision in common_ancestors:
+ # Not a border ancestor because it was seen as common
+ # already
+ new_common.add(revision)
+ continue
+ for searcher in searchers:
+ if revision not in searcher.seen:
+ break
+ else:
+ # This is a border because it is a first common that we see
+ # after walking for a while.
+ border_ancestors.add(revision)
+ new_common.add(revision)
+ if new_common:
+ for searcher in searchers:
+ new_common.update(searcher.find_seen_ancestors(new_common))
+ for searcher in searchers:
+ searcher.start_searching(new_common)
+ common_ancestors.update(new_common)
+
+ # Figure out what the searchers will be searching next, and if
+ # there is only 1 set being searched, then we are done searching,
+ # since all searchers would have to be searching the same data,
+ # thus it *must* be in common.
+ unique_search_sets = set()
+ for searcher in searchers:
+ will_search_set = frozenset(searcher._next_query)
+ if will_search_set not in unique_search_sets:
+ # This searcher is searching a unique set of nodes, let it
+ unique_search_sets.add(will_search_set)
+
+ if len(unique_search_sets) == 1:
+ nodes = unique_search_sets.pop()
+ uncommon_nodes = nodes.difference(common_ancestors)
+ if uncommon_nodes:
+ raise AssertionError("Somehow we ended up converging"
+ " without actually marking them as"
+ " in common."
+ "\nStart_nodes: %s"
+ "\nuncommon_nodes: %s"
+ % (revisions, uncommon_nodes))
+ break
+ return border_ancestors, common_ancestors, searchers
+
+ def heads(self, keys):
+ """Return the heads from amongst keys.
+
+ This is done by searching the ancestries of each key. Any key that is
+ reachable from another key is not returned; all the others are.
+
+ This operation scales with the relative depth between any two keys. If
+ any two keys are completely disconnected all ancestry of both sides
+ will be retrieved.
+
+ :param keys: An iterable of keys.
+ :return: A set of the heads. Note that as a set there is no ordering
+ information. Callers will need to filter their input to create
+ order if they need it.
+ """
+ candidate_heads = set(keys)
+ if revision.NULL_REVISION in candidate_heads:
+ # NULL_REVISION is only a head if it is the only entry
+ candidate_heads.remove(revision.NULL_REVISION)
+ if not candidate_heads:
+ return set([revision.NULL_REVISION])
+ if len(candidate_heads) < 2:
+ return candidate_heads
+ searchers = dict((c, self._make_breadth_first_searcher([c]))
+ for c in candidate_heads)
+ active_searchers = dict(searchers)
+ # skip over the actual candidate for each searcher
+ for searcher in active_searchers.itervalues():
+ searcher.next()
+ # The common walker finds nodes that are common to two or more of the
+ # input keys, so that we don't access all history when a currently
+ # uncommon search point actually meets up with something behind a
+ # common search point. Common search points do not keep searches
+ # active; they just allow us to make searches inactive without
+ # accessing all history.
+ common_walker = self._make_breadth_first_searcher([])
+ while len(active_searchers) > 0:
+ ancestors = set()
+ # advance searches
+ try:
+ common_walker.next()
+ except StopIteration:
+ # No common points being searched at this time.
+ pass
+ for candidate in active_searchers.keys():
+ try:
+ searcher = active_searchers[candidate]
+ except KeyError:
+ # rare case: we deleted candidate in a previous iteration
+ # through this for loop, because it was determined to be
+ # a descendant of another candidate.
+ continue
+ try:
+ ancestors.update(searcher.next())
+ except StopIteration:
+ del active_searchers[candidate]
+ continue
+ # process found nodes
+ new_common = set()
+ for ancestor in ancestors:
+ if ancestor in candidate_heads:
+ candidate_heads.remove(ancestor)
+ del searchers[ancestor]
+ if ancestor in active_searchers:
+ del active_searchers[ancestor]
+ # it may meet up with a known common node
+ if ancestor in common_walker.seen:
+ # some searcher has encountered our known common nodes:
+ # just stop it
+ ancestor_set = set([ancestor])
+ for searcher in searchers.itervalues():
+ searcher.stop_searching_any(ancestor_set)
+ else:
+ # or it may have been just reached by all the searchers:
+ for searcher in searchers.itervalues():
+ if ancestor not in searcher.seen:
+ break
+ else:
+ # The final active searcher has just reached this node,
+ # making it be known as a descendant of all candidates,
+ # so we can stop searching it, and any seen ancestors
+ new_common.add(ancestor)
+ for searcher in searchers.itervalues():
+ seen_ancestors =\
+ searcher.find_seen_ancestors([ancestor])
+ searcher.stop_searching_any(seen_ancestors)
+ common_walker.start_searching(new_common)
+ return candidate_heads
+
+ def find_merge_order(self, tip_revision_id, lca_revision_ids):
+ """Find the order that each revision was merged into tip.
+
+ This basically just walks backwards with a stack, and walks left-first
+ until it finds a node to stop.
+ """
+ if len(lca_revision_ids) == 1:
+ return list(lca_revision_ids)
+ looking_for = set(lca_revision_ids)
+ # TODO: Is there a way we could do this "faster" by batching up the
+ # get_parent_map requests?
+ # TODO: Should we also be culling the ancestry search right away? We
+ # could add looking_for to the "stop" list, and walk their
+ # ancestry in batched mode. The flip side is it might mean we walk a
+ # lot of "stop" nodes, rather than only the minimum.
+ # Then again, without it we may trace back into ancestry we could have
+ # stopped early.
+ stack = [tip_revision_id]
+ found = []
+ stop = set()
+ while stack and looking_for:
+ next = stack.pop()
+ stop.add(next)
+ if next in looking_for:
+ found.append(next)
+ looking_for.remove(next)
+ if len(looking_for) == 1:
+ found.append(looking_for.pop())
+ break
+ continue
+ parent_ids = self.get_parent_map([next]).get(next, None)
+ if not parent_ids: # Ghost, nothing to search here
+ continue
+ for parent_id in reversed(parent_ids):
+ # TODO: (performance) We see the parent at this point, but we
+ # wait to mark it until later to make sure we get left
+ # parents before right parents. However, instead of
+ # waiting until we have traversed enough parents, we
+ # could instead note that we've found it, and once all
+ # parents are in the stack, just reverse iterate the
+ # stack for them.
+ if parent_id not in stop:
+ # this will need to be searched
+ stack.append(parent_id)
+ stop.add(parent_id)
+ return found
+
+ def find_lefthand_merger(self, merged_key, tip_key):
+ """Find the first lefthand ancestor of tip_key that merged merged_key.
+
+ We do this by first finding the descendants of merged_key, then
+ walking through the lefthand ancestry of tip_key until we find a key
+ that doesn't descend from merged_key. Its child is the key that
+ merged merged_key.
+
+ :return: The first lefthand ancestor of tip_key to merge merged_key.
+ merged_key if it is a lefthand ancestor of tip_key.
+ None if no ancestor of tip_key merged merged_key.
+ """
+ descendants = self.find_descendants(merged_key, tip_key)
+ candidate_iterator = self.iter_lefthand_ancestry(tip_key)
+ last_candidate = None
+ for candidate in candidate_iterator:
+ if candidate not in descendants:
+ return last_candidate
+ last_candidate = candidate
+
+ def find_unique_lca(self, left_revision, right_revision,
+ count_steps=False):
+ """Find a unique LCA.
+
+ Find lowest common ancestors. If there is no unique common
+ ancestor, find the lowest common ancestors of those ancestors.
+
+ Iteration stops when a unique lowest common ancestor is found.
+ The graph origin is necessarily a unique lowest common ancestor.
+
+ Note that None is not an acceptable substitute for NULL_REVISION.
+ in the input for this method.
+
+ :param count_steps: If True, the return value will be a tuple of
+ (unique_lca, steps) where steps is the number of times that
+ find_lca was run. If False, only unique_lca is returned.
+ """
+ revisions = [left_revision, right_revision]
+ steps = 0
+ while True:
+ steps += 1
+ lca = self.find_lca(*revisions)
+ if len(lca) == 1:
+ result = lca.pop()
+ if count_steps:
+ return result, steps
+ else:
+ return result
+ if len(lca) == 0:
+ raise errors.NoCommonAncestor(left_revision, right_revision)
+ revisions = lca
+
+ def iter_ancestry(self, revision_ids):
+ """Iterate the ancestry of this revision.
+
+ :param revision_ids: Nodes to start the search
+ :return: Yield tuples mapping a revision_id to its parents for the
+ ancestry of revision_id.
+ Ghosts will be returned with None as their parents, and nodes
+ with no parents will have NULL_REVISION as their only parent. (As
+ defined by get_parent_map.)
+ There will also be a node for (NULL_REVISION, ())
+ """
+ pending = set(revision_ids)
+ processed = set()
+ while pending:
+ processed.update(pending)
+ next_map = self.get_parent_map(pending)
+ next_pending = set()
+ for item in next_map.iteritems():
+ yield item
+ next_pending.update(p for p in item[1] if p not in processed)
+ ghosts = pending.difference(next_map)
+ for ghost in ghosts:
+ yield (ghost, None)
+ pending = next_pending
+
+ def iter_lefthand_ancestry(self, start_key, stop_keys=None):
+ if stop_keys is None:
+ stop_keys = ()
+ next_key = start_key
+ def get_parents(key):
+ try:
+ return self._parents_provider.get_parent_map([key])[key]
+ except KeyError:
+ raise errors.RevisionNotPresent(next_key, self)
+ while True:
+ if next_key in stop_keys:
+ return
+ parents = get_parents(next_key)
+ yield next_key
+ if len(parents) == 0:
+ return
+ else:
+ next_key = parents[0]
+
+ def iter_topo_order(self, revisions):
+ """Iterate through the input revisions in topological order.
+
+ This sorting only ensures that parents come before their children.
+ An ancestor may sort after a descendant if the relationship is not
+ visible in the supplied list of revisions.
+ """
+ from bzrlib import tsort
+ sorter = tsort.TopoSorter(self.get_parent_map(revisions))
+ return sorter.iter_topo_order()
+
+ def is_ancestor(self, candidate_ancestor, candidate_descendant):
+ """Determine whether a revision is an ancestor of another.
+
+ We answer this using heads() as heads() has the logic to perform the
+ smallest number of parent lookups to determine the ancestral
+ relationship between N revisions.
+ """
+ return set([candidate_descendant]) == self.heads(
+ [candidate_ancestor, candidate_descendant])
+
+ def is_between(self, revid, lower_bound_revid, upper_bound_revid):
+ """Determine whether a revision is between two others.
+
+ returns true if and only if:
+ lower_bound_revid <= revid <= upper_bound_revid
+ """
+ return ((upper_bound_revid is None or
+ self.is_ancestor(revid, upper_bound_revid)) and
+ (lower_bound_revid is None or
+ self.is_ancestor(lower_bound_revid, revid)))
+
+ def _search_for_extra_common(self, common, searchers):
+ """Make sure that unique nodes are genuinely unique.
+
+ After _find_border_ancestors, all nodes marked "common" are indeed
+ common. Some of the nodes considered unique are not, due to history
+ shortcuts stopping the searches early.
+
+ We know that we have searched enough when all common search tips are
+ descended from all unique (uncommon) nodes because we know that a node
+ cannot be an ancestor of its own ancestor.
+
+ :param common: A set of common nodes
+ :param searchers: The searchers returned from _find_border_ancestors
+ :return: None
+ """
+ # Basic algorithm...
+ # A) The passed in searchers should all be on the same tips, thus
+ # they should be considered the "common" searchers.
+ # B) We find the difference between the searchers, these are the
+ # "unique" nodes for each side.
+ # C) We do a quick culling so that we only start searching from the
+ # more interesting unique nodes. (A unique ancestor is more
+ # interesting than any of its children.)
+ # D) We start searching for ancestors common to all unique nodes.
+ # E) We have the common searchers stop searching any ancestors of
+ # nodes found by (D)
+ # F) When there are no more common search tips, we stop
+
+ # TODO: We need a way to remove unique_searchers when they overlap with
+ # other unique searchers.
+ if len(searchers) != 2:
+ raise NotImplementedError(
+ "Algorithm not yet implemented for > 2 searchers")
+ common_searchers = searchers
+ left_searcher = searchers[0]
+ right_searcher = searchers[1]
+ unique = left_searcher.seen.symmetric_difference(right_searcher.seen)
+ if not unique: # No unique nodes, nothing to do
+ return
+ total_unique = len(unique)
+ unique = self._remove_simple_descendants(unique,
+ self.get_parent_map(unique))
+ simple_unique = len(unique)
+
+ unique_searchers = []
+ for revision_id in unique:
+ if revision_id in left_searcher.seen:
+ parent_searcher = left_searcher
+ else:
+ parent_searcher = right_searcher
+ revs_to_search = parent_searcher.find_seen_ancestors([revision_id])
+ if not revs_to_search: # XXX: This shouldn't be possible
+ revs_to_search = [revision_id]
+ searcher = self._make_breadth_first_searcher(revs_to_search)
+ # We don't care about the starting nodes.
+ searcher.step()
+ unique_searchers.append(searcher)
+
+ # possible todo: aggregate the common searchers into a single common
+ # searcher, just make sure that we include the nodes into the .seen
+ # properties of the original searchers
+
+ ancestor_all_unique = None
+ for searcher in unique_searchers:
+ if ancestor_all_unique is None:
+ ancestor_all_unique = set(searcher.seen)
+ else:
+ ancestor_all_unique = ancestor_all_unique.intersection(
+ searcher.seen)
+
+ trace.mutter('Started %s unique searchers for %s unique revisions',
+ simple_unique, total_unique)
+
+ while True: # If we have no more nodes we have nothing to do
+ newly_seen_common = set()
+ for searcher in common_searchers:
+ newly_seen_common.update(searcher.step())
+ newly_seen_unique = set()
+ for searcher in unique_searchers:
+ newly_seen_unique.update(searcher.step())
+ new_common_unique = set()
+ for revision in newly_seen_unique:
+ for searcher in unique_searchers:
+ if revision not in searcher.seen:
+ break
+ else:
+ # This is a border because it is a first common that we see
+ # after walking for a while.
+ new_common_unique.add(revision)
+ if newly_seen_common:
+ # These are nodes descended from one of the 'common' searchers.
+ # Make sure all searchers are on the same page
+ for searcher in common_searchers:
+ newly_seen_common.update(
+ searcher.find_seen_ancestors(newly_seen_common))
+ # We start searching the whole ancestry. It is a bit wasteful,
+ # though. We really just want to mark all of these nodes as
+ # 'seen' and then start just the tips. However, it requires a
+ # get_parent_map() call to figure out the tips anyway, and all
+ # redundant requests should be fairly fast.
+ for searcher in common_searchers:
+ searcher.start_searching(newly_seen_common)
+
+ # If a 'common' node is an ancestor of all unique searchers, we
+ # can stop searching it.
+ stop_searching_common = ancestor_all_unique.intersection(
+ newly_seen_common)
+ if stop_searching_common:
+ for searcher in common_searchers:
+ searcher.stop_searching_any(stop_searching_common)
+ if new_common_unique:
+ # We found some ancestors that are common
+ for searcher in unique_searchers:
+ new_common_unique.update(
+ searcher.find_seen_ancestors(new_common_unique))
+ # Since these are common, we can grab another set of ancestors
+ # that we have seen
+ for searcher in common_searchers:
+ new_common_unique.update(
+ searcher.find_seen_ancestors(new_common_unique))
+
+ # We can tell all of the unique searchers to start at these
+ # nodes, and tell all of the common searchers to *stop*
+ # searching these nodes
+ for searcher in unique_searchers:
+ searcher.start_searching(new_common_unique)
+ for searcher in common_searchers:
+ searcher.stop_searching_any(new_common_unique)
+ ancestor_all_unique.update(new_common_unique)
+
+ # Filter out searchers that don't actually search different
+ # nodes. We already have the ancestry intersection for them
+ next_unique_searchers = []
+ unique_search_sets = set()
+ for searcher in unique_searchers:
+ will_search_set = frozenset(searcher._next_query)
+ if will_search_set not in unique_search_sets:
+ # This searcher is searching a unique set of nodes, let it
+ unique_search_sets.add(will_search_set)
+ next_unique_searchers.append(searcher)
+ unique_searchers = next_unique_searchers
+ for searcher in common_searchers:
+ if searcher._next_query:
+ break
+ else:
+ # All common searcher have stopped searching
+ return
+
+ def _remove_simple_descendants(self, revisions, parent_map):
+ """remove revisions which are children of other ones in the set
+
+ This doesn't do any graph searching, it just checks the immediate
+ parent_map to find if there are any children which can be removed.
+
+ :param revisions: A set of revision_ids
+ :return: A set of revision_ids with the children removed
+ """
+ simple_ancestors = revisions.copy()
+ # TODO: jam 20071214 we *could* restrict it to searching only the
+ # parent_map of revisions already present in 'revisions', but
+ # considering the general use case, I think this is actually
+ # better.
+
+ # This is the same as the following loop. I don't know that it is any
+ # faster.
+ ## simple_ancestors.difference_update(r for r, p_ids in parent_map.iteritems()
+ ## if p_ids is not None and revisions.intersection(p_ids))
+ ## return simple_ancestors
+
+ # Yet Another Way, invert the parent map (which can be cached)
+ ## descendants = {}
+ ## for revision_id, parent_ids in parent_map.iteritems():
+ ## for p_id in parent_ids:
+ ## descendants.setdefault(p_id, []).append(revision_id)
+ ## for revision in revisions.intersection(descendants):
+ ## simple_ancestors.difference_update(descendants[revision])
+ ## return simple_ancestors
+ for revision, parent_ids in parent_map.iteritems():
+ if parent_ids is None:
+ continue
+ for parent_id in parent_ids:
+ if parent_id in revisions:
+ # This node has a parent present in the set, so we can
+ # remove it
+ simple_ancestors.discard(revision)
+ break
+ return simple_ancestors
+
+
+class HeadsCache(object):
+ """A cache of results for graph heads calls."""
+
+ def __init__(self, graph):
+ self.graph = graph
+ self._heads = {}
+
+ def heads(self, keys):
+ """Return the heads of keys.
+
+ This matches the API of Graph.heads(), specifically the return value is
+ a set which can be mutated, and ordering of the input is not preserved
+ in the output.
+
+ :see also: Graph.heads.
+ :param keys: The keys to calculate heads for.
+ :return: A set containing the heads, which may be mutated without
+ affecting future lookups.
+ """
+ keys = frozenset(keys)
+ try:
+ return set(self._heads[keys])
+ except KeyError:
+ heads = self.graph.heads(keys)
+ self._heads[keys] = heads
+ return set(heads)
+
+
+class FrozenHeadsCache(object):
+ """Cache heads() calls, assuming the caller won't modify them."""
+
+ def __init__(self, graph):
+ self.graph = graph
+ self._heads = {}
+
+ def heads(self, keys):
+ """Return the heads of keys.
+
+ Similar to Graph.heads(). The main difference is that the return value
+ is a frozen set which cannot be mutated.
+
+ :see also: Graph.heads.
+ :param keys: The keys to calculate heads for.
+ :return: A frozenset containing the heads.
+ """
+ keys = frozenset(keys)
+ try:
+ return self._heads[keys]
+ except KeyError:
+ heads = frozenset(self.graph.heads(keys))
+ self._heads[keys] = heads
+ return heads
+
+ def cache(self, keys, heads):
+ """Store a known value."""
+ self._heads[frozenset(keys)] = frozenset(heads)
+
+
+class _BreadthFirstSearcher(object):
+ """Parallel search breadth-first the ancestry of revisions.
+
+ This class implements the iterator protocol, but additionally
+ 1. provides a set of seen ancestors, and
+ 2. allows some ancestries to be unsearched, via stop_searching_any
+ """
+
+ def __init__(self, revisions, parents_provider):
+ self._iterations = 0
+ self._next_query = set(revisions)
+ self.seen = set()
+ self._started_keys = set(self._next_query)
+ self._stopped_keys = set()
+ self._parents_provider = parents_provider
+ self._returning = 'next_with_ghosts'
+ self._current_present = set()
+ self._current_ghosts = set()
+ self._current_parents = {}
+
+ def __repr__(self):
+ if self._iterations:
+ prefix = "searching"
+ else:
+ prefix = "starting"
+ search = '%s=%r' % (prefix, list(self._next_query))
+ return ('_BreadthFirstSearcher(iterations=%d, %s,'
+ ' seen=%r)' % (self._iterations, search, list(self.seen)))
+
+ def get_state(self):
+ """Get the current state of this searcher.
+
+ :return: Tuple with started keys, excludes and included keys
+ """
+ if self._returning == 'next':
+ # We have to know the current nodes children to be able to list the
+ # exclude keys for them. However, while we could have a second
+ # look-ahead result buffer and shuffle things around, this method
+ # is typically only called once per search - when memoising the
+ # results of the search.
+ found, ghosts, next, parents = self._do_query(self._next_query)
+ # pretend we didn't query: perhaps we should tweak _do_query to be
+ # entirely stateless?
+ self.seen.difference_update(next)
+ next_query = next.union(ghosts)
+ else:
+ next_query = self._next_query
+ excludes = self._stopped_keys.union(next_query)
+ included_keys = self.seen.difference(excludes)
+ return self._started_keys, excludes, included_keys
+
+ def _get_result(self):
+ """Get a SearchResult for the current state of this searcher.
+
+ :return: A SearchResult for this search so far. The SearchResult is
+ static - the search can be advanced and the search result will not
+ be invalidated or altered.
+ """
+ from bzrlib.vf_search import SearchResult
+ (started_keys, excludes, included_keys) = self.get_state()
+ return SearchResult(started_keys, excludes, len(included_keys),
+ included_keys)
+
+ def step(self):
+ try:
+ return self.next()
+ except StopIteration:
+ return ()
+
+ def next(self):
+ """Return the next ancestors of this revision.
+
+ Ancestors are returned in the order they are seen in a breadth-first
+ traversal. No ancestor will be returned more than once. Ancestors are
+ returned before their parentage is queried, so ghosts and missing
+ revisions (including the start revisions) are included in the result.
+ This can save a round trip in LCA style calculation by allowing
+ convergence to be detected without reading the data for the revision
+ the convergence occurs on.
+
+ :return: A set of revision_ids.
+ """
+ if self._returning != 'next':
+ # switch to returning the query, not the results.
+ self._returning = 'next'
+ self._iterations += 1
+ else:
+ self._advance()
+ if len(self._next_query) == 0:
+ raise StopIteration()
+ # We have seen what we're querying at this point as we are returning
+ # the query, not the results.
+ self.seen.update(self._next_query)
+ return self._next_query
+
+ def next_with_ghosts(self):
+ """Return the next found ancestors, with ghosts split out.
+
+ Ancestors are returned in the order they are seen in a breadth-first
+ traversal. No ancestor will be returned more than once. Ancestors are
+ returned only after asking for their parents, which allows us to detect
+ which revisions are ghosts and which are not.
+
+ :return: A tuple with (present ancestors, ghost ancestors) sets.
+ """
+ if self._returning != 'next_with_ghosts':
+ # switch to returning the results, not the current query.
+ self._returning = 'next_with_ghosts'
+ self._advance()
+ if len(self._next_query) == 0:
+ raise StopIteration()
+ self._advance()
+ return self._current_present, self._current_ghosts
+
+ def _advance(self):
+ """Advance the search.
+
+ Updates self.seen, self._next_query, self._current_present,
+ self._current_ghosts, self._current_parents and self._iterations.
+ """
+ self._iterations += 1
+ found, ghosts, next, parents = self._do_query(self._next_query)
+ self._current_present = found
+ self._current_ghosts = ghosts
+ self._next_query = next
+ self._current_parents = parents
+ # ghosts are implicit stop points, otherwise the search cannot be
+ # repeated when ghosts are filled.
+ self._stopped_keys.update(ghosts)
+
+ def _do_query(self, revisions):
+ """Query for revisions.
+
+ Adds revisions to the seen set.
+
+ :param revisions: Revisions to query.
+ :return: A tuple: (set(found_revisions), set(ghost_revisions),
+ set(parents_of_found_revisions), dict(found_revisions:parents)).
+ """
+ found_revisions = set()
+ parents_of_found = set()
+ # revisions may contain nodes that point to other nodes in revisions:
+ # we want to filter them out.
+ seen = self.seen
+ seen.update(revisions)
+ parent_map = self._parents_provider.get_parent_map(revisions)
+ found_revisions.update(parent_map)
+ for rev_id, parents in parent_map.iteritems():
+ if parents is None:
+ continue
+ new_found_parents = [p for p in parents if p not in seen]
+ if new_found_parents:
+ # Calling set.update() with an empty generator is actually
+ # rather expensive.
+ parents_of_found.update(new_found_parents)
+ ghost_revisions = revisions - found_revisions
+ return found_revisions, ghost_revisions, parents_of_found, parent_map
+
+ def __iter__(self):
+ return self
+
+ def find_seen_ancestors(self, revisions):
+ """Find ancestors of these revisions that have already been seen.
+
+ This function generally makes the assumption that querying for the
+ parents of a node that has already been queried is reasonably cheap.
+ (eg, not a round trip to a remote host).
+ """
+ # TODO: Often we might ask one searcher for its seen ancestors, and
+ # then ask another searcher the same question. This can result in
+ # searching the same revisions repeatedly if the two searchers
+ # have a lot of overlap.
+ all_seen = self.seen
+ pending = set(revisions).intersection(all_seen)
+ seen_ancestors = set(pending)
+
+ if self._returning == 'next':
+ # self.seen contains what nodes have been returned, not what nodes
+ # have been queried. We don't want to probe for nodes that haven't
+ # been searched yet.
+ not_searched_yet = self._next_query
+ else:
+ not_searched_yet = ()
+ pending.difference_update(not_searched_yet)
+ get_parent_map = self._parents_provider.get_parent_map
+ while pending:
+ parent_map = get_parent_map(pending)
+ all_parents = []
+ # We don't care if it is a ghost, since it can't be seen if it is
+ # a ghost
+ for parent_ids in parent_map.itervalues():
+ all_parents.extend(parent_ids)
+ next_pending = all_seen.intersection(all_parents).difference(seen_ancestors)
+ seen_ancestors.update(next_pending)
+ next_pending.difference_update(not_searched_yet)
+ pending = next_pending
+
+ return seen_ancestors
+
+ def stop_searching_any(self, revisions):
+ """
+ Remove any of the specified revisions from the search list.
+
+ None of the specified revisions are required to be present in the
+ search list.
+
+ It is okay to call stop_searching_any() for revisions which were seen
+ in previous iterations. It is the callers responsibility to call
+ find_seen_ancestors() to make sure that current search tips that are
+ ancestors of those revisions are also stopped. All explicitly stopped
+ revisions will be excluded from the search result's get_keys(), though.
+ """
+ # TODO: does this help performance?
+ # if not revisions:
+ # return set()
+ revisions = frozenset(revisions)
+ if self._returning == 'next':
+ stopped = self._next_query.intersection(revisions)
+ self._next_query = self._next_query.difference(revisions)
+ else:
+ stopped_present = self._current_present.intersection(revisions)
+ stopped = stopped_present.union(
+ self._current_ghosts.intersection(revisions))
+ self._current_present.difference_update(stopped)
+ self._current_ghosts.difference_update(stopped)
+ # stopping 'x' should stop returning parents of 'x', but
+ # not if 'y' always references those same parents
+ stop_rev_references = {}
+ for rev in stopped_present:
+ for parent_id in self._current_parents[rev]:
+ if parent_id not in stop_rev_references:
+ stop_rev_references[parent_id] = 0
+ stop_rev_references[parent_id] += 1
+ # if only the stopped revisions reference it, the ref count will be
+ # 0 after this loop
+ for parents in self._current_parents.itervalues():
+ for parent_id in parents:
+ try:
+ stop_rev_references[parent_id] -= 1
+ except KeyError:
+ pass
+ stop_parents = set()
+ for rev_id, refs in stop_rev_references.iteritems():
+ if refs == 0:
+ stop_parents.add(rev_id)
+ self._next_query.difference_update(stop_parents)
+ self._stopped_keys.update(stopped)
+ self._stopped_keys.update(revisions)
+ return stopped
+
+ def start_searching(self, revisions):
+ """Add revisions to the search.
+
+ The parents of revisions will be returned from the next call to next()
+ or next_with_ghosts(). If next_with_ghosts was the most recently used
+ next* call then the return value is the result of looking up the
+ ghost/not ghost status of revisions. (A tuple (present, ghosted)).
+ """
+ revisions = frozenset(revisions)
+ self._started_keys.update(revisions)
+ new_revisions = revisions.difference(self.seen)
+ if self._returning == 'next':
+ self._next_query.update(new_revisions)
+ self.seen.update(new_revisions)
+ else:
+ # perform a query on revisions
+ revs, ghosts, query, parents = self._do_query(revisions)
+ self._stopped_keys.update(ghosts)
+ self._current_present.update(revs)
+ self._current_ghosts.update(ghosts)
+ self._next_query.update(query)
+ self._current_parents.update(parents)
+ return revs, ghosts
+
+
+def invert_parent_map(parent_map):
+ """Given a map from child => parents, create a map of parent=>children"""
+ child_map = {}
+ for child, parents in parent_map.iteritems():
+ for p in parents:
+ # Any given parent is likely to have only a small handful
+ # of children, many will have only one. So we avoid mem overhead of
+ # a list, in exchange for extra copying of tuples
+ if p not in child_map:
+ child_map[p] = (child,)
+ else:
+ child_map[p] = child_map[p] + (child,)
+ return child_map
+
+
+def collapse_linear_regions(parent_map):
+ """Collapse regions of the graph that are 'linear'.
+
+ For example::
+
+ A:[B], B:[C]
+
+ can be collapsed by removing B and getting::
+
+ A:[C]
+
+ :param parent_map: A dictionary mapping children to their parents
+ :return: Another dictionary with 'linear' chains collapsed
+ """
+ # Note: this isn't a strictly minimal collapse. For example:
+ # A
+ # / \
+ # B C
+ # \ /
+ # D
+ # |
+ # E
+ # Will not have 'D' removed, even though 'E' could fit. Also:
+ # A
+ # | A
+ # B => |
+ # | C
+ # C
+ # A and C are both kept because they are edges of the graph. We *could* get
+ # rid of A if we wanted.
+ # A
+ # / \
+ # B C
+ # | |
+ # D E
+ # \ /
+ # F
+ # Will not have any nodes removed, even though you do have an
+ # 'uninteresting' linear D->B and E->C
+ children = {}
+ for child, parents in parent_map.iteritems():
+ children.setdefault(child, [])
+ for p in parents:
+ children.setdefault(p, []).append(child)
+
+ orig_children = dict(children)
+ removed = set()
+ result = dict(parent_map)
+ for node in parent_map:
+ parents = result[node]
+ if len(parents) == 1:
+ parent_children = children[parents[0]]
+ if len(parent_children) != 1:
+ # This is not the only child
+ continue
+ node_children = children[node]
+ if len(node_children) != 1:
+ continue
+ child_parents = result.get(node_children[0], None)
+ if len(child_parents) != 1:
+ # This is not its only parent
+ continue
+ # The child of this node only points at it, and the parent only has
+ # this as a child. remove this node, and join the others together
+ result[node_children[0]] = parents
+ children[parents[0]] = node_children
+ del result[node]
+ del children[node]
+ removed.add(node)
+
+ return result
+
+
+class GraphThunkIdsToKeys(object):
+ """Forwards calls about 'ids' to be about keys internally."""
+
+ def __init__(self, graph):
+ self._graph = graph
+
+ def topo_sort(self):
+ return [r for (r,) in self._graph.topo_sort()]
+
+ def heads(self, ids):
+ """See Graph.heads()"""
+ as_keys = [(i,) for i in ids]
+ head_keys = self._graph.heads(as_keys)
+ return set([h[0] for h in head_keys])
+
+ def merge_sort(self, tip_revision):
+ nodes = self._graph.merge_sort((tip_revision,))
+ for node in nodes:
+ node.key = node.key[0]
+ return nodes
+
+ def add_node(self, revision, parents):
+ self._graph.add_node((revision,), [(p,) for p in parents])
+
+
+_counters = [0,0,0,0,0,0,0]
+try:
+ from bzrlib._known_graph_pyx import KnownGraph
+except ImportError, e:
+ osutils.failed_to_load_extension(e)
+ from bzrlib._known_graph_py import KnownGraph
diff --git a/bzrlib/groupcompress.py b/bzrlib/groupcompress.py
new file mode 100644
index 0000000..41daac1
--- /dev/null
+++ b/bzrlib/groupcompress.py
@@ -0,0 +1,2211 @@
+# Copyright (C) 2008-2011 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""Core compression logic for compressing streams of related files."""
+
+from __future__ import absolute_import
+
+import time
+import zlib
+
+from bzrlib.lazy_import import lazy_import
+lazy_import(globals(), """
+from bzrlib import (
+ annotate,
+ config,
+ debug,
+ errors,
+ graph as _mod_graph,
+ osutils,
+ pack,
+ static_tuple,
+ trace,
+ tsort,
+ )
+
+from bzrlib.repofmt import pack_repo
+from bzrlib.i18n import gettext
+""")
+
+from bzrlib.btree_index import BTreeBuilder
+from bzrlib.lru_cache import LRUSizeCache
+from bzrlib.versionedfile import (
+ _KeyRefs,
+ adapter_registry,
+ AbsentContentFactory,
+ ChunkedContentFactory,
+ FulltextContentFactory,
+ VersionedFilesWithFallbacks,
+ )
+
+# Minimum number of uncompressed bytes to try fetch at once when retrieving
+# groupcompress blocks.
+BATCH_SIZE = 2**16
+
+# osutils.sha_string('')
+_null_sha1 = 'da39a3ee5e6b4b0d3255bfef95601890afd80709'
+
+def sort_gc_optimal(parent_map):
+ """Sort and group the keys in parent_map into groupcompress order.
+
+ groupcompress is defined (currently) as reverse-topological order, grouped
+ by the key prefix.
+
+ :return: A sorted-list of keys
+ """
+ # groupcompress ordering is approximately reverse topological,
+ # properly grouped by file-id.
+ per_prefix_map = {}
+ for key, value in parent_map.iteritems():
+ if isinstance(key, str) or len(key) == 1:
+ prefix = ''
+ else:
+ prefix = key[0]
+ try:
+ per_prefix_map[prefix][key] = value
+ except KeyError:
+ per_prefix_map[prefix] = {key: value}
+
+ present_keys = []
+ for prefix in sorted(per_prefix_map):
+ present_keys.extend(reversed(tsort.topo_sort(per_prefix_map[prefix])))
+ return present_keys
+
+
+# The max zlib window size is 32kB, so if we set 'max_size' output of the
+# decompressor to the requested bytes + 32kB, then we should guarantee
+# num_bytes coming out.
+_ZLIB_DECOMP_WINDOW = 32*1024
+
+class GroupCompressBlock(object):
+ """An object which maintains the internal structure of the compressed data.
+
+ This tracks the meta info (start of text, length, type, etc.)
+ """
+
+ # Group Compress Block v1 Zlib
+ GCB_HEADER = 'gcb1z\n'
+ # Group Compress Block v1 Lzma
+ GCB_LZ_HEADER = 'gcb1l\n'
+ GCB_KNOWN_HEADERS = (GCB_HEADER, GCB_LZ_HEADER)
+
+ def __init__(self):
+ # map by key? or just order in file?
+ self._compressor_name = None
+ self._z_content_chunks = None
+ self._z_content_decompressor = None
+ self._z_content_length = None
+ self._content_length = None
+ self._content = None
+ self._content_chunks = None
+
+ def __len__(self):
+ # This is the maximum number of bytes this object will reference if
+ # everything is decompressed. However, if we decompress less than
+ # everything... (this would cause some problems for LRUSizeCache)
+ return self._content_length + self._z_content_length
+
+ def _ensure_content(self, num_bytes=None):
+ """Make sure that content has been expanded enough.
+
+ :param num_bytes: Ensure that we have extracted at least num_bytes of
+ content. If None, consume everything
+ """
+ if self._content_length is None:
+ raise AssertionError('self._content_length should never be None')
+ if num_bytes is None:
+ num_bytes = self._content_length
+ elif (self._content_length is not None
+ and num_bytes > self._content_length):
+ raise AssertionError(
+ 'requested num_bytes (%d) > content length (%d)'
+ % (num_bytes, self._content_length))
+ # Expand the content if required
+ if self._content is None:
+ if self._content_chunks is not None:
+ self._content = ''.join(self._content_chunks)
+ self._content_chunks = None
+ if self._content is None:
+ # We join self._z_content_chunks here, because if we are
+ # decompressing, then it is *very* likely that we have a single
+ # chunk
+ if self._z_content_chunks is None:
+ raise AssertionError('No content to decompress')
+ z_content = ''.join(self._z_content_chunks)
+ if z_content == '':
+ self._content = ''
+ elif self._compressor_name == 'lzma':
+ # We don't do partial lzma decomp yet
+ import pylzma
+ self._content = pylzma.decompress(z_content)
+ elif self._compressor_name == 'zlib':
+ # Start a zlib decompressor
+ if num_bytes * 4 > self._content_length * 3:
+ # If we are requesting more that 3/4ths of the content,
+ # just extract the whole thing in a single pass
+ num_bytes = self._content_length
+ self._content = zlib.decompress(z_content)
+ else:
+ self._z_content_decompressor = zlib.decompressobj()
+ # Seed the decompressor with the uncompressed bytes, so
+ # that the rest of the code is simplified
+ self._content = self._z_content_decompressor.decompress(
+ z_content, num_bytes + _ZLIB_DECOMP_WINDOW)
+ if not self._z_content_decompressor.unconsumed_tail:
+ self._z_content_decompressor = None
+ else:
+ raise AssertionError('Unknown compressor: %r'
+ % self._compressor_name)
+ # Any bytes remaining to be decompressed will be in the decompressors
+ # 'unconsumed_tail'
+
+ # Do we have enough bytes already?
+ if len(self._content) >= num_bytes:
+ return
+ # If we got this far, and don't have a decompressor, something is wrong
+ if self._z_content_decompressor is None:
+ raise AssertionError(
+ 'No decompressor to decompress %d bytes' % num_bytes)
+ remaining_decomp = self._z_content_decompressor.unconsumed_tail
+ if not remaining_decomp:
+ raise AssertionError('Nothing left to decompress')
+ needed_bytes = num_bytes - len(self._content)
+ # We always set max_size to 32kB over the minimum needed, so that
+ # zlib will give us as much as we really want.
+ # TODO: If this isn't good enough, we could make a loop here,
+ # that keeps expanding the request until we get enough
+ self._content += self._z_content_decompressor.decompress(
+ remaining_decomp, needed_bytes + _ZLIB_DECOMP_WINDOW)
+ if len(self._content) < num_bytes:
+ raise AssertionError('%d bytes wanted, only %d available'
+ % (num_bytes, len(self._content)))
+ if not self._z_content_decompressor.unconsumed_tail:
+ # The stream is finished
+ self._z_content_decompressor = None
+
+ def _parse_bytes(self, bytes, pos):
+ """Read the various lengths from the header.
+
+ This also populates the various 'compressed' buffers.
+
+ :return: The position in bytes just after the last newline
+ """
+ # At present, we have 2 integers for the compressed and uncompressed
+ # content. In base10 (ascii) 14 bytes can represent > 1TB, so to avoid
+ # checking too far, cap the search to 14 bytes.
+ pos2 = bytes.index('\n', pos, pos + 14)
+ self._z_content_length = int(bytes[pos:pos2])
+ pos = pos2 + 1
+ pos2 = bytes.index('\n', pos, pos + 14)
+ self._content_length = int(bytes[pos:pos2])
+ pos = pos2 + 1
+ if len(bytes) != (pos + self._z_content_length):
+ # XXX: Define some GCCorrupt error ?
+ raise AssertionError('Invalid bytes: (%d) != %d + %d' %
+ (len(bytes), pos, self._z_content_length))
+ self._z_content_chunks = (bytes[pos:],)
+
+ @property
+ def _z_content(self):
+ """Return z_content_chunks as a simple string.
+
+ Meant only to be used by the test suite.
+ """
+ if self._z_content_chunks is not None:
+ return ''.join(self._z_content_chunks)
+ return None
+
+ @classmethod
+ def from_bytes(cls, bytes):
+ out = cls()
+ if bytes[:6] not in cls.GCB_KNOWN_HEADERS:
+ raise ValueError('bytes did not start with any of %r'
+ % (cls.GCB_KNOWN_HEADERS,))
+ # XXX: why not testing the whole header ?
+ if bytes[4] == 'z':
+ out._compressor_name = 'zlib'
+ elif bytes[4] == 'l':
+ out._compressor_name = 'lzma'
+ else:
+ raise ValueError('unknown compressor: %r' % (bytes,))
+ out._parse_bytes(bytes, 6)
+ return out
+
+ def extract(self, key, start, end, sha1=None):
+ """Extract the text for a specific key.
+
+ :param key: The label used for this content
+ :param sha1: TODO (should we validate only when sha1 is supplied?)
+ :return: The bytes for the content
+ """
+ if start == end == 0:
+ return ''
+ self._ensure_content(end)
+ # The bytes are 'f' or 'd' for the type, then a variable-length
+ # base128 integer for the content size, then the actual content
+ # We know that the variable-length integer won't be longer than 5
+ # bytes (it takes 5 bytes to encode 2^32)
+ c = self._content[start]
+ if c == 'f':
+ type = 'fulltext'
+ else:
+ if c != 'd':
+ raise ValueError('Unknown content control code: %s'
+ % (c,))
+ type = 'delta'
+ content_len, len_len = decode_base128_int(
+ self._content[start + 1:start + 6])
+ content_start = start + 1 + len_len
+ if end != content_start + content_len:
+ raise ValueError('end != len according to field header'
+ ' %s != %s' % (end, content_start + content_len))
+ if c == 'f':
+ bytes = self._content[content_start:end]
+ elif c == 'd':
+ bytes = apply_delta_to_source(self._content, content_start, end)
+ return bytes
+
+ def set_chunked_content(self, content_chunks, length):
+ """Set the content of this block to the given chunks."""
+ # If we have lots of short lines, it is may be more efficient to join
+ # the content ahead of time. If the content is <10MiB, we don't really
+ # care about the extra memory consumption, so we can just pack it and
+ # be done. However, timing showed 18s => 17.9s for repacking 1k revs of
+ # mysql, which is below the noise margin
+ self._content_length = length
+ self._content_chunks = content_chunks
+ self._content = None
+ self._z_content_chunks = None
+
+ def set_content(self, content):
+ """Set the content of this block."""
+ self._content_length = len(content)
+ self._content = content
+ self._z_content_chunks = None
+
+ def _create_z_content_from_chunks(self, chunks):
+ compressor = zlib.compressobj(zlib.Z_DEFAULT_COMPRESSION)
+ # Peak in this point is 1 fulltext, 1 compressed text, + zlib overhead
+ # (measured peak is maybe 30MB over the above...)
+ compressed_chunks = map(compressor.compress, chunks)
+ compressed_chunks.append(compressor.flush())
+ # Ignore empty chunks
+ self._z_content_chunks = [c for c in compressed_chunks if c]
+ self._z_content_length = sum(map(len, self._z_content_chunks))
+
+ def _create_z_content(self):
+ if self._z_content_chunks is not None:
+ return
+ if self._content_chunks is not None:
+ chunks = self._content_chunks
+ else:
+ chunks = (self._content,)
+ self._create_z_content_from_chunks(chunks)
+
+ def to_chunks(self):
+ """Create the byte stream as a series of 'chunks'"""
+ self._create_z_content()
+ header = self.GCB_HEADER
+ chunks = ['%s%d\n%d\n'
+ % (header, self._z_content_length, self._content_length),
+ ]
+ chunks.extend(self._z_content_chunks)
+ total_len = sum(map(len, chunks))
+ return total_len, chunks
+
+ def to_bytes(self):
+ """Encode the information into a byte stream."""
+ total_len, chunks = self.to_chunks()
+ return ''.join(chunks)
+
+ def _dump(self, include_text=False):
+ """Take this block, and spit out a human-readable structure.
+
+ :param include_text: Inserts also include text bits, chose whether you
+ want this displayed in the dump or not.
+ :return: A dump of the given block. The layout is something like:
+ [('f', length), ('d', delta_length, text_length, [delta_info])]
+ delta_info := [('i', num_bytes, text), ('c', offset, num_bytes),
+ ...]
+ """
+ self._ensure_content()
+ result = []
+ pos = 0
+ while pos < self._content_length:
+ kind = self._content[pos]
+ pos += 1
+ if kind not in ('f', 'd'):
+ raise ValueError('invalid kind character: %r' % (kind,))
+ content_len, len_len = decode_base128_int(
+ self._content[pos:pos + 5])
+ pos += len_len
+ if content_len + pos > self._content_length:
+ raise ValueError('invalid content_len %d for record @ pos %d'
+ % (content_len, pos - len_len - 1))
+ if kind == 'f': # Fulltext
+ if include_text:
+ text = self._content[pos:pos+content_len]
+ result.append(('f', content_len, text))
+ else:
+ result.append(('f', content_len))
+ elif kind == 'd': # Delta
+ delta_content = self._content[pos:pos+content_len]
+ delta_info = []
+ # The first entry in a delta is the decompressed length
+ decomp_len, delta_pos = decode_base128_int(delta_content)
+ result.append(('d', content_len, decomp_len, delta_info))
+ measured_len = 0
+ while delta_pos < content_len:
+ c = ord(delta_content[delta_pos])
+ delta_pos += 1
+ if c & 0x80: # Copy
+ (offset, length,
+ delta_pos) = decode_copy_instruction(delta_content, c,
+ delta_pos)
+ if include_text:
+ text = self._content[offset:offset+length]
+ delta_info.append(('c', offset, length, text))
+ else:
+ delta_info.append(('c', offset, length))
+ measured_len += length
+ else: # Insert
+ if include_text:
+ txt = delta_content[delta_pos:delta_pos+c]
+ else:
+ txt = ''
+ delta_info.append(('i', c, txt))
+ measured_len += c
+ delta_pos += c
+ if delta_pos != content_len:
+ raise ValueError('Delta consumed a bad number of bytes:'
+ ' %d != %d' % (delta_pos, content_len))
+ if measured_len != decomp_len:
+ raise ValueError('Delta claimed fulltext was %d bytes, but'
+ ' extraction resulted in %d bytes'
+ % (decomp_len, measured_len))
+ pos += content_len
+ return result
+
+
+class _LazyGroupCompressFactory(object):
+ """Yield content from a GroupCompressBlock on demand."""
+
+ def __init__(self, key, parents, manager, start, end, first):
+ """Create a _LazyGroupCompressFactory
+
+ :param key: The key of just this record
+ :param parents: The parents of this key (possibly None)
+ :param gc_block: A GroupCompressBlock object
+ :param start: Offset of the first byte for this record in the
+ uncompressd content
+ :param end: Offset of the byte just after the end of this record
+ (ie, bytes = content[start:end])
+ :param first: Is this the first Factory for the given block?
+ """
+ self.key = key
+ self.parents = parents
+ self.sha1 = None
+ # Note: This attribute coupled with Manager._factories creates a
+ # reference cycle. Perhaps we would rather use a weakref(), or
+ # find an appropriate time to release the ref. After the first
+ # get_bytes_as call? After Manager.get_record_stream() returns
+ # the object?
+ self._manager = manager
+ self._bytes = None
+ self.storage_kind = 'groupcompress-block'
+ if not first:
+ self.storage_kind = 'groupcompress-block-ref'
+ self._first = first
+ self._start = start
+ self._end = end
+
+ def __repr__(self):
+ return '%s(%s, first=%s)' % (self.__class__.__name__,
+ self.key, self._first)
+
+ def get_bytes_as(self, storage_kind):
+ if storage_kind == self.storage_kind:
+ if self._first:
+ # wire bytes, something...
+ return self._manager._wire_bytes()
+ else:
+ return ''
+ if storage_kind in ('fulltext', 'chunked'):
+ if self._bytes is None:
+ # Grab and cache the raw bytes for this entry
+ # and break the ref-cycle with _manager since we don't need it
+ # anymore
+ try:
+ self._manager._prepare_for_extract()
+ except zlib.error as value:
+ raise errors.DecompressCorruption("zlib: " + str(value))
+ block = self._manager._block
+ self._bytes = block.extract(self.key, self._start, self._end)
+ # There are code paths that first extract as fulltext, and then
+ # extract as storage_kind (smart fetch). So we don't break the
+ # refcycle here, but instead in manager.get_record_stream()
+ if storage_kind == 'fulltext':
+ return self._bytes
+ else:
+ return [self._bytes]
+ raise errors.UnavailableRepresentation(self.key, storage_kind,
+ self.storage_kind)
+
+
+class _LazyGroupContentManager(object):
+ """This manages a group of _LazyGroupCompressFactory objects."""
+
+ _max_cut_fraction = 0.75 # We allow a block to be trimmed to 75% of
+ # current size, and still be considered
+ # resuable
+ _full_block_size = 4*1024*1024
+ _full_mixed_block_size = 2*1024*1024
+ _full_enough_block_size = 3*1024*1024 # size at which we won't repack
+ _full_enough_mixed_block_size = 2*768*1024 # 1.5MB
+
+ def __init__(self, block, get_compressor_settings=None):
+ self._block = block
+ # We need to preserve the ordering
+ self._factories = []
+ self._last_byte = 0
+ self._get_settings = get_compressor_settings
+ self._compressor_settings = None
+
+ def _get_compressor_settings(self):
+ if self._compressor_settings is not None:
+ return self._compressor_settings
+ settings = None
+ if self._get_settings is not None:
+ settings = self._get_settings()
+ if settings is None:
+ vf = GroupCompressVersionedFiles
+ settings = vf._DEFAULT_COMPRESSOR_SETTINGS
+ self._compressor_settings = settings
+ return self._compressor_settings
+
+ def add_factory(self, key, parents, start, end):
+ if not self._factories:
+ first = True
+ else:
+ first = False
+ # Note that this creates a reference cycle....
+ factory = _LazyGroupCompressFactory(key, parents, self,
+ start, end, first=first)
+ # max() works here, but as a function call, doing a compare seems to be
+ # significantly faster, timeit says 250ms for max() and 100ms for the
+ # comparison
+ if end > self._last_byte:
+ self._last_byte = end
+ self._factories.append(factory)
+
+ def get_record_stream(self):
+ """Get a record for all keys added so far."""
+ for factory in self._factories:
+ yield factory
+ # Break the ref-cycle
+ factory._bytes = None
+ factory._manager = None
+ # TODO: Consider setting self._factories = None after the above loop,
+ # as it will break the reference cycle
+
+ def _trim_block(self, last_byte):
+ """Create a new GroupCompressBlock, with just some of the content."""
+ # None of the factories need to be adjusted, because the content is
+ # located in an identical place. Just that some of the unreferenced
+ # trailing bytes are stripped
+ trace.mutter('stripping trailing bytes from groupcompress block'
+ ' %d => %d', self._block._content_length, last_byte)
+ new_block = GroupCompressBlock()
+ self._block._ensure_content(last_byte)
+ new_block.set_content(self._block._content[:last_byte])
+ self._block = new_block
+
+ def _make_group_compressor(self):
+ return GroupCompressor(self._get_compressor_settings())
+
+ def _rebuild_block(self):
+ """Create a new GroupCompressBlock with only the referenced texts."""
+ compressor = self._make_group_compressor()
+ tstart = time.time()
+ old_length = self._block._content_length
+ end_point = 0
+ for factory in self._factories:
+ bytes = factory.get_bytes_as('fulltext')
+ (found_sha1, start_point, end_point,
+ type) = compressor.compress(factory.key, bytes, factory.sha1)
+ # Now update this factory with the new offsets, etc
+ factory.sha1 = found_sha1
+ factory._start = start_point
+ factory._end = end_point
+ self._last_byte = end_point
+ new_block = compressor.flush()
+ # TODO: Should we check that new_block really *is* smaller than the old
+ # block? It seems hard to come up with a method that it would
+ # expand, since we do full compression again. Perhaps based on a
+ # request that ends up poorly ordered?
+ # TODO: If the content would have expanded, then we would want to
+ # handle a case where we need to split the block.
+ # Now that we have a user-tweakable option
+ # (max_bytes_to_index), it is possible that one person set it
+ # to a very low value, causing poor compression.
+ delta = time.time() - tstart
+ self._block = new_block
+ trace.mutter('creating new compressed block on-the-fly in %.3fs'
+ ' %d bytes => %d bytes', delta, old_length,
+ self._block._content_length)
+
+ def _prepare_for_extract(self):
+ """A _LazyGroupCompressFactory is about to extract to fulltext."""
+ # We expect that if one child is going to fulltext, all will be. This
+ # helps prevent all of them from extracting a small amount at a time.
+ # Which in itself isn't terribly expensive, but resizing 2MB 32kB at a
+ # time (self._block._content) is a little expensive.
+ self._block._ensure_content(self._last_byte)
+
+ def _check_rebuild_action(self):
+ """Check to see if our block should be repacked."""
+ total_bytes_used = 0
+ last_byte_used = 0
+ for factory in self._factories:
+ total_bytes_used += factory._end - factory._start
+ if last_byte_used < factory._end:
+ last_byte_used = factory._end
+ # If we are using more than half of the bytes from the block, we have
+ # nothing else to check
+ if total_bytes_used * 2 >= self._block._content_length:
+ return None, last_byte_used, total_bytes_used
+ # We are using less than 50% of the content. Is the content we are
+ # using at the beginning of the block? If so, we can just trim the
+ # tail, rather than rebuilding from scratch.
+ if total_bytes_used * 2 > last_byte_used:
+ return 'trim', last_byte_used, total_bytes_used
+
+ # We are using a small amount of the data, and it isn't just packed
+ # nicely at the front, so rebuild the content.
+ # Note: This would be *nicer* as a strip-data-from-group, rather than
+ # building it up again from scratch
+ # It might be reasonable to consider the fulltext sizes for
+ # different bits when deciding this, too. As you may have a small
+ # fulltext, and a trivial delta, and you are just trading around
+ # for another fulltext. If we do a simple 'prune' you may end up
+ # expanding many deltas into fulltexts, as well.
+ # If we build a cheap enough 'strip', then we could try a strip,
+ # if that expands the content, we then rebuild.
+ return 'rebuild', last_byte_used, total_bytes_used
+
+ def check_is_well_utilized(self):
+ """Is the current block considered 'well utilized'?
+
+ This heuristic asks if the current block considers itself to be a fully
+ developed group, rather than just a loose collection of data.
+ """
+ if len(self._factories) == 1:
+ # A block of length 1 could be improved by combining with other
+ # groups - don't look deeper. Even larger than max size groups
+ # could compress well with adjacent versions of the same thing.
+ return False
+ action, last_byte_used, total_bytes_used = self._check_rebuild_action()
+ block_size = self._block._content_length
+ if total_bytes_used < block_size * self._max_cut_fraction:
+ # This block wants to trim itself small enough that we want to
+ # consider it under-utilized.
+ return False
+ # TODO: This code is meant to be the twin of _insert_record_stream's
+ # 'start_new_block' logic. It would probably be better to factor
+ # out that logic into a shared location, so that it stays
+ # together better
+ # We currently assume a block is properly utilized whenever it is >75%
+ # of the size of a 'full' block. In normal operation, a block is
+ # considered full when it hits 4MB of same-file content. So any block
+ # >3MB is 'full enough'.
+ # The only time this isn't true is when a given block has large-object
+ # content. (a single file >4MB, etc.)
+ # Under these circumstances, we allow a block to grow to
+ # 2 x largest_content. Which means that if a given block had a large
+ # object, it may actually be under-utilized. However, given that this
+ # is 'pack-on-the-fly' it is probably reasonable to not repack large
+ # content blobs on-the-fly. Note that because we return False for all
+ # 1-item blobs, we will repack them; we may wish to reevaluate our
+ # treatment of large object blobs in the future.
+ if block_size >= self._full_enough_block_size:
+ return True
+ # If a block is <3MB, it still may be considered 'full' if it contains
+ # mixed content. The current rule is 2MB of mixed content is considered
+ # full. So check to see if this block contains mixed content, and
+ # set the threshold appropriately.
+ common_prefix = None
+ for factory in self._factories:
+ prefix = factory.key[:-1]
+ if common_prefix is None:
+ common_prefix = prefix
+ elif prefix != common_prefix:
+ # Mixed content, check the size appropriately
+ if block_size >= self._full_enough_mixed_block_size:
+ return True
+ break
+ # The content failed both the mixed check and the single-content check
+ # so obviously it is not fully utilized
+ # TODO: there is one other constraint that isn't being checked
+ # namely, that the entries in the block are in the appropriate
+ # order. For example, you could insert the entries in exactly
+ # reverse groupcompress order, and we would think that is ok.
+ # (all the right objects are in one group, and it is fully
+ # utilized, etc.) For now, we assume that case is rare,
+ # especially since we should always fetch in 'groupcompress'
+ # order.
+ return False
+
+ def _check_rebuild_block(self):
+ action, last_byte_used, total_bytes_used = self._check_rebuild_action()
+ if action is None:
+ return
+ if action == 'trim':
+ self._trim_block(last_byte_used)
+ elif action == 'rebuild':
+ self._rebuild_block()
+ else:
+ raise ValueError('unknown rebuild action: %r' % (action,))
+
+ def _wire_bytes(self):
+ """Return a byte stream suitable for transmitting over the wire."""
+ self._check_rebuild_block()
+ # The outer block starts with:
+ # 'groupcompress-block\n'
+ # <length of compressed key info>\n
+ # <length of uncompressed info>\n
+ # <length of gc block>\n
+ # <header bytes>
+ # <gc-block>
+ lines = ['groupcompress-block\n']
+ # The minimal info we need is the key, the start offset, and the
+ # parents. The length and type are encoded in the record itself.
+ # However, passing in the other bits makes it easier. The list of
+ # keys, and the start offset, the length
+ # 1 line key
+ # 1 line with parents, '' for ()
+ # 1 line for start offset
+ # 1 line for end byte
+ header_lines = []
+ for factory in self._factories:
+ key_bytes = '\x00'.join(factory.key)
+ parents = factory.parents
+ if parents is None:
+ parent_bytes = 'None:'
+ else:
+ parent_bytes = '\t'.join('\x00'.join(key) for key in parents)
+ record_header = '%s\n%s\n%d\n%d\n' % (
+ key_bytes, parent_bytes, factory._start, factory._end)
+ header_lines.append(record_header)
+ # TODO: Can we break the refcycle at this point and set
+ # factory._manager = None?
+ header_bytes = ''.join(header_lines)
+ del header_lines
+ header_bytes_len = len(header_bytes)
+ z_header_bytes = zlib.compress(header_bytes)
+ del header_bytes
+ z_header_bytes_len = len(z_header_bytes)
+ block_bytes_len, block_chunks = self._block.to_chunks()
+ lines.append('%d\n%d\n%d\n' % (z_header_bytes_len, header_bytes_len,
+ block_bytes_len))
+ lines.append(z_header_bytes)
+ lines.extend(block_chunks)
+ del z_header_bytes, block_chunks
+ # TODO: This is a point where we will double the memory consumption. To
+ # avoid this, we probably have to switch to a 'chunked' api
+ return ''.join(lines)
+
+ @classmethod
+ def from_bytes(cls, bytes):
+ # TODO: This does extra string copying, probably better to do it a
+ # different way. At a minimum this creates 2 copies of the
+ # compressed content
+ (storage_kind, z_header_len, header_len,
+ block_len, rest) = bytes.split('\n', 4)
+ del bytes
+ if storage_kind != 'groupcompress-block':
+ raise ValueError('Unknown storage kind: %s' % (storage_kind,))
+ z_header_len = int(z_header_len)
+ if len(rest) < z_header_len:
+ raise ValueError('Compressed header len shorter than all bytes')
+ z_header = rest[:z_header_len]
+ header_len = int(header_len)
+ header = zlib.decompress(z_header)
+ if len(header) != header_len:
+ raise ValueError('invalid length for decompressed bytes')
+ del z_header
+ block_len = int(block_len)
+ if len(rest) != z_header_len + block_len:
+ raise ValueError('Invalid length for block')
+ block_bytes = rest[z_header_len:]
+ del rest
+ # So now we have a valid GCB, we just need to parse the factories that
+ # were sent to us
+ header_lines = header.split('\n')
+ del header
+ last = header_lines.pop()
+ if last != '':
+ raise ValueError('header lines did not end with a trailing'
+ ' newline')
+ if len(header_lines) % 4 != 0:
+ raise ValueError('The header was not an even multiple of 4 lines')
+ block = GroupCompressBlock.from_bytes(block_bytes)
+ del block_bytes
+ result = cls(block)
+ for start in xrange(0, len(header_lines), 4):
+ # intern()?
+ key = tuple(header_lines[start].split('\x00'))
+ parents_line = header_lines[start+1]
+ if parents_line == 'None:':
+ parents = None
+ else:
+ parents = tuple([tuple(segment.split('\x00'))
+ for segment in parents_line.split('\t')
+ if segment])
+ start_offset = int(header_lines[start+2])
+ end_offset = int(header_lines[start+3])
+ result.add_factory(key, parents, start_offset, end_offset)
+ return result
+
+
+def network_block_to_records(storage_kind, bytes, line_end):
+ if storage_kind != 'groupcompress-block':
+ raise ValueError('Unknown storage kind: %s' % (storage_kind,))
+ manager = _LazyGroupContentManager.from_bytes(bytes)
+ return manager.get_record_stream()
+
+
+class _CommonGroupCompressor(object):
+
+ def __init__(self, settings=None):
+ """Create a GroupCompressor."""
+ self.chunks = []
+ self._last = None
+ self.endpoint = 0
+ self.input_bytes = 0
+ self.labels_deltas = {}
+ self._delta_index = None # Set by the children
+ self._block = GroupCompressBlock()
+ if settings is None:
+ self._settings = {}
+ else:
+ self._settings = settings
+
+ def compress(self, key, bytes, expected_sha, nostore_sha=None, soft=False):
+ """Compress lines with label key.
+
+ :param key: A key tuple. It is stored in the output
+ for identification of the text during decompression. If the last
+ element is 'None' it is replaced with the sha1 of the text -
+ e.g. sha1:xxxxxxx.
+ :param bytes: The bytes to be compressed
+ :param expected_sha: If non-None, the sha the lines are believed to
+ have. During compression the sha is calculated; a mismatch will
+ cause an error.
+ :param nostore_sha: If the computed sha1 sum matches, we will raise
+ ExistingContent rather than adding the text.
+ :param soft: Do a 'soft' compression. This means that we require larger
+ ranges to match to be considered for a copy command.
+
+ :return: The sha1 of lines, the start and end offsets in the delta, and
+ the type ('fulltext' or 'delta').
+
+ :seealso VersionedFiles.add_lines:
+ """
+ if not bytes: # empty, like a dir entry, etc
+ if nostore_sha == _null_sha1:
+ raise errors.ExistingContent()
+ return _null_sha1, 0, 0, 'fulltext'
+ # we assume someone knew what they were doing when they passed it in
+ if expected_sha is not None:
+ sha1 = expected_sha
+ else:
+ sha1 = osutils.sha_string(bytes)
+ if nostore_sha is not None:
+ if sha1 == nostore_sha:
+ raise errors.ExistingContent()
+ if key[-1] is None:
+ key = key[:-1] + ('sha1:' + sha1,)
+
+ start, end, type = self._compress(key, bytes, len(bytes) / 2, soft)
+ return sha1, start, end, type
+
+ def _compress(self, key, bytes, max_delta_size, soft=False):
+ """Compress lines with label key.
+
+ :param key: A key tuple. It is stored in the output for identification
+ of the text during decompression.
+
+ :param bytes: The bytes to be compressed
+
+ :param max_delta_size: The size above which we issue a fulltext instead
+ of a delta.
+
+ :param soft: Do a 'soft' compression. This means that we require larger
+ ranges to match to be considered for a copy command.
+
+ :return: The sha1 of lines, the start and end offsets in the delta, and
+ the type ('fulltext' or 'delta').
+ """
+ raise NotImplementedError(self._compress)
+
+ def extract(self, key):
+ """Extract a key previously added to the compressor.
+
+ :param key: The key to extract.
+ :return: An iterable over bytes and the sha1.
+ """
+ (start_byte, start_chunk, end_byte, end_chunk) = self.labels_deltas[key]
+ delta_chunks = self.chunks[start_chunk:end_chunk]
+ stored_bytes = ''.join(delta_chunks)
+ if stored_bytes[0] == 'f':
+ fulltext_len, offset = decode_base128_int(stored_bytes[1:10])
+ data_len = fulltext_len + 1 + offset
+ if data_len != len(stored_bytes):
+ raise ValueError('Index claimed fulltext len, but stored bytes'
+ ' claim %s != %s'
+ % (len(stored_bytes), data_len))
+ bytes = stored_bytes[offset + 1:]
+ else:
+ # XXX: This is inefficient at best
+ source = ''.join(self.chunks[:start_chunk])
+ if stored_bytes[0] != 'd':
+ raise ValueError('Unknown content kind, bytes claim %s'
+ % (stored_bytes[0],))
+ delta_len, offset = decode_base128_int(stored_bytes[1:10])
+ data_len = delta_len + 1 + offset
+ if data_len != len(stored_bytes):
+ raise ValueError('Index claimed delta len, but stored bytes'
+ ' claim %s != %s'
+ % (len(stored_bytes), data_len))
+ bytes = apply_delta(source, stored_bytes[offset + 1:])
+ bytes_sha1 = osutils.sha_string(bytes)
+ return bytes, bytes_sha1
+
+ def flush(self):
+ """Finish this group, creating a formatted stream.
+
+ After calling this, the compressor should no longer be used
+ """
+ self._block.set_chunked_content(self.chunks, self.endpoint)
+ self.chunks = None
+ self._delta_index = None
+ return self._block
+
+ def pop_last(self):
+ """Call this if you want to 'revoke' the last compression.
+
+ After this, the data structures will be rolled back, but you cannot do
+ more compression.
+ """
+ self._delta_index = None
+ del self.chunks[self._last[0]:]
+ self.endpoint = self._last[1]
+ self._last = None
+
+ def ratio(self):
+ """Return the overall compression ratio."""
+ return float(self.input_bytes) / float(self.endpoint)
+
+
+class PythonGroupCompressor(_CommonGroupCompressor):
+
+ def __init__(self, settings=None):
+ """Create a GroupCompressor.
+
+ Used only if the pyrex version is not available.
+ """
+ super(PythonGroupCompressor, self).__init__(settings)
+ self._delta_index = LinesDeltaIndex([])
+ # The actual content is managed by LinesDeltaIndex
+ self.chunks = self._delta_index.lines
+
+ def _compress(self, key, bytes, max_delta_size, soft=False):
+ """see _CommonGroupCompressor._compress"""
+ input_len = len(bytes)
+ new_lines = osutils.split_lines(bytes)
+ out_lines, index_lines = self._delta_index.make_delta(
+ new_lines, bytes_length=input_len, soft=soft)
+ delta_length = sum(map(len, out_lines))
+ if delta_length > max_delta_size:
+ # The delta is longer than the fulltext, insert a fulltext
+ type = 'fulltext'
+ out_lines = ['f', encode_base128_int(input_len)]
+ out_lines.extend(new_lines)
+ index_lines = [False, False]
+ index_lines.extend([True] * len(new_lines))
+ else:
+ # this is a worthy delta, output it
+ type = 'delta'
+ out_lines[0] = 'd'
+ # Update the delta_length to include those two encoded integers
+ out_lines[1] = encode_base128_int(delta_length)
+ # Before insertion
+ start = self.endpoint
+ chunk_start = len(self.chunks)
+ self._last = (chunk_start, self.endpoint)
+ self._delta_index.extend_lines(out_lines, index_lines)
+ self.endpoint = self._delta_index.endpoint
+ self.input_bytes += input_len
+ chunk_end = len(self.chunks)
+ self.labels_deltas[key] = (start, chunk_start,
+ self.endpoint, chunk_end)
+ return start, self.endpoint, type
+
+
+class PyrexGroupCompressor(_CommonGroupCompressor):
+ """Produce a serialised group of compressed texts.
+
+ It contains code very similar to SequenceMatcher because of having a similar
+ task. However some key differences apply:
+
+ * there is no junk, we want a minimal edit not a human readable diff.
+ * we don't filter very common lines (because we don't know where a good
+ range will start, and after the first text we want to be emitting minmal
+ edits only.
+ * we chain the left side, not the right side
+ * we incrementally update the adjacency matrix as new lines are provided.
+ * we look for matches in all of the left side, so the routine which does
+ the analagous task of find_longest_match does not need to filter on the
+ left side.
+ """
+
+ def __init__(self, settings=None):
+ super(PyrexGroupCompressor, self).__init__(settings)
+ max_bytes_to_index = self._settings.get('max_bytes_to_index', 0)
+ self._delta_index = DeltaIndex(max_bytes_to_index=max_bytes_to_index)
+
+ def _compress(self, key, bytes, max_delta_size, soft=False):
+ """see _CommonGroupCompressor._compress"""
+ input_len = len(bytes)
+ # By having action/label/sha1/len, we can parse the group if the index
+ # was ever destroyed, we have the key in 'label', we know the final
+ # bytes are valid from sha1, and we know where to find the end of this
+ # record because of 'len'. (the delta record itself will store the
+ # total length for the expanded record)
+ # 'len: %d\n' costs approximately 1% increase in total data
+ # Having the labels at all costs us 9-10% increase, 38% increase for
+ # inventory pages, and 5.8% increase for text pages
+ # new_chunks = ['label:%s\nsha1:%s\n' % (label, sha1)]
+ if self._delta_index._source_offset != self.endpoint:
+ raise AssertionError('_source_offset != endpoint'
+ ' somehow the DeltaIndex got out of sync with'
+ ' the output lines')
+ delta = self._delta_index.make_delta(bytes, max_delta_size)
+ if (delta is None):
+ type = 'fulltext'
+ enc_length = encode_base128_int(len(bytes))
+ len_mini_header = 1 + len(enc_length)
+ self._delta_index.add_source(bytes, len_mini_header)
+ new_chunks = ['f', enc_length, bytes]
+ else:
+ type = 'delta'
+ enc_length = encode_base128_int(len(delta))
+ len_mini_header = 1 + len(enc_length)
+ new_chunks = ['d', enc_length, delta]
+ self._delta_index.add_delta_source(delta, len_mini_header)
+ # Before insertion
+ start = self.endpoint
+ chunk_start = len(self.chunks)
+ # Now output these bytes
+ self._output_chunks(new_chunks)
+ self.input_bytes += input_len
+ chunk_end = len(self.chunks)
+ self.labels_deltas[key] = (start, chunk_start,
+ self.endpoint, chunk_end)
+ if not self._delta_index._source_offset == self.endpoint:
+ raise AssertionError('the delta index is out of sync'
+ 'with the output lines %s != %s'
+ % (self._delta_index._source_offset, self.endpoint))
+ return start, self.endpoint, type
+
+ def _output_chunks(self, new_chunks):
+ """Output some chunks.
+
+ :param new_chunks: The chunks to output.
+ """
+ self._last = (len(self.chunks), self.endpoint)
+ endpoint = self.endpoint
+ self.chunks.extend(new_chunks)
+ endpoint += sum(map(len, new_chunks))
+ self.endpoint = endpoint
+
+
+def make_pack_factory(graph, delta, keylength, inconsistency_fatal=True):
+ """Create a factory for creating a pack based groupcompress.
+
+ This is only functional enough to run interface tests, it doesn't try to
+ provide a full pack environment.
+
+ :param graph: Store a graph.
+ :param delta: Delta compress contents.
+ :param keylength: How long should keys be.
+ """
+ def factory(transport):
+ parents = graph
+ ref_length = 0
+ if graph:
+ ref_length = 1
+ graph_index = BTreeBuilder(reference_lists=ref_length,
+ key_elements=keylength)
+ stream = transport.open_write_stream('newpack')
+ writer = pack.ContainerWriter(stream.write)
+ writer.begin()
+ index = _GCGraphIndex(graph_index, lambda:True, parents=parents,
+ add_callback=graph_index.add_nodes,
+ inconsistency_fatal=inconsistency_fatal)
+ access = pack_repo._DirectPackAccess({})
+ access.set_writer(writer, graph_index, (transport, 'newpack'))
+ result = GroupCompressVersionedFiles(index, access, delta)
+ result.stream = stream
+ result.writer = writer
+ return result
+ return factory
+
+
+def cleanup_pack_group(versioned_files):
+ versioned_files.writer.end()
+ versioned_files.stream.close()
+
+
+class _BatchingBlockFetcher(object):
+ """Fetch group compress blocks in batches.
+
+ :ivar total_bytes: int of expected number of bytes needed to fetch the
+ currently pending batch.
+ """
+
+ def __init__(self, gcvf, locations, get_compressor_settings=None):
+ self.gcvf = gcvf
+ self.locations = locations
+ self.keys = []
+ self.batch_memos = {}
+ self.memos_to_get = []
+ self.total_bytes = 0
+ self.last_read_memo = None
+ self.manager = None
+ self._get_compressor_settings = get_compressor_settings
+
+ def add_key(self, key):
+ """Add another to key to fetch.
+
+ :return: The estimated number of bytes needed to fetch the batch so
+ far.
+ """
+ self.keys.append(key)
+ index_memo, _, _, _ = self.locations[key]
+ read_memo = index_memo[0:3]
+ # Three possibilities for this read_memo:
+ # - it's already part of this batch; or
+ # - it's not yet part of this batch, but is already cached; or
+ # - it's not yet part of this batch and will need to be fetched.
+ if read_memo in self.batch_memos:
+ # This read memo is already in this batch.
+ return self.total_bytes
+ try:
+ cached_block = self.gcvf._group_cache[read_memo]
+ except KeyError:
+ # This read memo is new to this batch, and the data isn't cached
+ # either.
+ self.batch_memos[read_memo] = None
+ self.memos_to_get.append(read_memo)
+ byte_length = read_memo[2]
+ self.total_bytes += byte_length
+ else:
+ # This read memo is new to this batch, but cached.
+ # Keep a reference to the cached block in batch_memos because it's
+ # certain that we'll use it when this batch is processed, but
+ # there's a risk that it would fall out of _group_cache between now
+ # and then.
+ self.batch_memos[read_memo] = cached_block
+ return self.total_bytes
+
+ def _flush_manager(self):
+ if self.manager is not None:
+ for factory in self.manager.get_record_stream():
+ yield factory
+ self.manager = None
+ self.last_read_memo = None
+
+ def yield_factories(self, full_flush=False):
+ """Yield factories for keys added since the last yield. They will be
+ returned in the order they were added via add_key.
+
+ :param full_flush: by default, some results may not be returned in case
+ they can be part of the next batch. If full_flush is True, then
+ all results are returned.
+ """
+ if self.manager is None and not self.keys:
+ return
+ # Fetch all memos in this batch.
+ blocks = self.gcvf._get_blocks(self.memos_to_get)
+ # Turn blocks into factories and yield them.
+ memos_to_get_stack = list(self.memos_to_get)
+ memos_to_get_stack.reverse()
+ for key in self.keys:
+ index_memo, _, parents, _ = self.locations[key]
+ read_memo = index_memo[:3]
+ if self.last_read_memo != read_memo:
+ # We are starting a new block. If we have a
+ # manager, we have found everything that fits for
+ # now, so yield records
+ for factory in self._flush_manager():
+ yield factory
+ # Now start a new manager.
+ if memos_to_get_stack and memos_to_get_stack[-1] == read_memo:
+ # The next block from _get_blocks will be the block we
+ # need.
+ block_read_memo, block = blocks.next()
+ if block_read_memo != read_memo:
+ raise AssertionError(
+ "block_read_memo out of sync with read_memo"
+ "(%r != %r)" % (block_read_memo, read_memo))
+ self.batch_memos[read_memo] = block
+ memos_to_get_stack.pop()
+ else:
+ block = self.batch_memos[read_memo]
+ self.manager = _LazyGroupContentManager(block,
+ get_compressor_settings=self._get_compressor_settings)
+ self.last_read_memo = read_memo
+ start, end = index_memo[3:5]
+ self.manager.add_factory(key, parents, start, end)
+ if full_flush:
+ for factory in self._flush_manager():
+ yield factory
+ del self.keys[:]
+ self.batch_memos.clear()
+ del self.memos_to_get[:]
+ self.total_bytes = 0
+
+
+class GroupCompressVersionedFiles(VersionedFilesWithFallbacks):
+ """A group-compress based VersionedFiles implementation."""
+
+ # This controls how the GroupCompress DeltaIndex works. Basically, we
+ # compute hash pointers into the source blocks (so hash(text) => text).
+ # However each of these references costs some memory in trade against a
+ # more accurate match result. For very large files, they either are
+ # pre-compressed and change in bulk whenever they change, or change in just
+ # local blocks. Either way, 'improved resolution' is not very helpful,
+ # versus running out of memory trying to track everything. The default max
+ # gives 100% sampling of a 1MB file.
+ _DEFAULT_MAX_BYTES_TO_INDEX = 1024 * 1024
+ _DEFAULT_COMPRESSOR_SETTINGS = {'max_bytes_to_index':
+ _DEFAULT_MAX_BYTES_TO_INDEX}
+
+ def __init__(self, index, access, delta=True, _unadded_refs=None,
+ _group_cache=None):
+ """Create a GroupCompressVersionedFiles object.
+
+ :param index: The index object storing access and graph data.
+ :param access: The access object storing raw data.
+ :param delta: Whether to delta compress or just entropy compress.
+ :param _unadded_refs: private parameter, don't use.
+ :param _group_cache: private parameter, don't use.
+ """
+ self._index = index
+ self._access = access
+ self._delta = delta
+ if _unadded_refs is None:
+ _unadded_refs = {}
+ self._unadded_refs = _unadded_refs
+ if _group_cache is None:
+ _group_cache = LRUSizeCache(max_size=50*1024*1024)
+ self._group_cache = _group_cache
+ self._immediate_fallback_vfs = []
+ self._max_bytes_to_index = None
+
+ def without_fallbacks(self):
+ """Return a clone of this object without any fallbacks configured."""
+ return GroupCompressVersionedFiles(self._index, self._access,
+ self._delta, _unadded_refs=dict(self._unadded_refs),
+ _group_cache=self._group_cache)
+
+ def add_lines(self, key, parents, lines, parent_texts=None,
+ left_matching_blocks=None, nostore_sha=None, random_id=False,
+ check_content=True):
+ """Add a text to the store.
+
+ :param key: The key tuple of the text to add.
+ :param parents: The parents key tuples of the text to add.
+ :param lines: A list of lines. Each line must be a bytestring. And all
+ of them except the last must be terminated with \\n and contain no
+ other \\n's. The last line may either contain no \\n's or a single
+ terminating \\n. If the lines list does meet this constraint the
+ add routine may error or may succeed - but you will be unable to
+ read the data back accurately. (Checking the lines have been split
+ correctly is expensive and extremely unlikely to catch bugs so it
+ is not done at runtime unless check_content is True.)
+ :param parent_texts: An optional dictionary containing the opaque
+ representations of some or all of the parents of version_id to
+ allow delta optimisations. VERY IMPORTANT: the texts must be those
+ returned by add_lines or data corruption can be caused.
+ :param left_matching_blocks: a hint about which areas are common
+ between the text and its left-hand-parent. The format is
+ the SequenceMatcher.get_matching_blocks format.
+ :param nostore_sha: Raise ExistingContent and do not add the lines to
+ the versioned file if the digest of the lines matches this.
+ :param random_id: If True a random id has been selected rather than
+ an id determined by some deterministic process such as a converter
+ from a foreign VCS. When True the backend may choose not to check
+ for uniqueness of the resulting key within the versioned file, so
+ this should only be done when the result is expected to be unique
+ anyway.
+ :param check_content: If True, the lines supplied are verified to be
+ bytestrings that are correctly formed lines.
+ :return: The text sha1, the number of bytes in the text, and an opaque
+ representation of the inserted version which can be provided
+ back to future add_lines calls in the parent_texts dictionary.
+ """
+ self._index._check_write_ok()
+ self._check_add(key, lines, random_id, check_content)
+ if parents is None:
+ # The caller might pass None if there is no graph data, but kndx
+ # indexes can't directly store that, so we give them
+ # an empty tuple instead.
+ parents = ()
+ # double handling for now. Make it work until then.
+ length = sum(map(len, lines))
+ record = ChunkedContentFactory(key, parents, None, lines)
+ sha1 = list(self._insert_record_stream([record], random_id=random_id,
+ nostore_sha=nostore_sha))[0]
+ return sha1, length, None
+
+ def _add_text(self, key, parents, text, nostore_sha=None, random_id=False):
+ """See VersionedFiles._add_text()."""
+ self._index._check_write_ok()
+ self._check_add(key, None, random_id, check_content=False)
+ if text.__class__ is not str:
+ raise errors.BzrBadParameterUnicode("text")
+ if parents is None:
+ # The caller might pass None if there is no graph data, but kndx
+ # indexes can't directly store that, so we give them
+ # an empty tuple instead.
+ parents = ()
+ # double handling for now. Make it work until then.
+ length = len(text)
+ record = FulltextContentFactory(key, parents, None, text)
+ sha1 = list(self._insert_record_stream([record], random_id=random_id,
+ nostore_sha=nostore_sha))[0]
+ return sha1, length, None
+
+ def add_fallback_versioned_files(self, a_versioned_files):
+ """Add a source of texts for texts not present in this knit.
+
+ :param a_versioned_files: A VersionedFiles object.
+ """
+ self._immediate_fallback_vfs.append(a_versioned_files)
+
+ def annotate(self, key):
+ """See VersionedFiles.annotate."""
+ ann = annotate.Annotator(self)
+ return ann.annotate_flat(key)
+
+ def get_annotator(self):
+ return annotate.Annotator(self)
+
+ def check(self, progress_bar=None, keys=None):
+ """See VersionedFiles.check()."""
+ if keys is None:
+ keys = self.keys()
+ for record in self.get_record_stream(keys, 'unordered', True):
+ record.get_bytes_as('fulltext')
+ else:
+ return self.get_record_stream(keys, 'unordered', True)
+
+ def clear_cache(self):
+ """See VersionedFiles.clear_cache()"""
+ self._group_cache.clear()
+ self._index._graph_index.clear_cache()
+ self._index._int_cache.clear()
+
+ def _check_add(self, key, lines, random_id, check_content):
+ """check that version_id and lines are safe to add."""
+ version_id = key[-1]
+ if version_id is not None:
+ if osutils.contains_whitespace(version_id):
+ raise errors.InvalidRevisionId(version_id, self)
+ self.check_not_reserved_id(version_id)
+ # TODO: If random_id==False and the key is already present, we should
+ # probably check that the existing content is identical to what is
+ # being inserted, and otherwise raise an exception. This would make
+ # the bundle code simpler.
+ if check_content:
+ self._check_lines_not_unicode(lines)
+ self._check_lines_are_lines(lines)
+
+ def get_parent_map(self, keys):
+ """Get a map of the graph parents of keys.
+
+ :param keys: The keys to look up parents for.
+ :return: A mapping from keys to parents. Absent keys are absent from
+ the mapping.
+ """
+ return self._get_parent_map_with_sources(keys)[0]
+
+ def _get_parent_map_with_sources(self, keys):
+ """Get a map of the parents of keys.
+
+ :param keys: The keys to look up parents for.
+ :return: A tuple. The first element is a mapping from keys to parents.
+ Absent keys are absent from the mapping. The second element is a
+ list with the locations each key was found in. The first element
+ is the in-this-knit parents, the second the first fallback source,
+ and so on.
+ """
+ result = {}
+ sources = [self._index] + self._immediate_fallback_vfs
+ source_results = []
+ missing = set(keys)
+ for source in sources:
+ if not missing:
+ break
+ new_result = source.get_parent_map(missing)
+ source_results.append(new_result)
+ result.update(new_result)
+ missing.difference_update(set(new_result))
+ return result, source_results
+
+ def _get_blocks(self, read_memos):
+ """Get GroupCompressBlocks for the given read_memos.
+
+ :returns: a series of (read_memo, block) pairs, in the order they were
+ originally passed.
+ """
+ cached = {}
+ for read_memo in read_memos:
+ try:
+ block = self._group_cache[read_memo]
+ except KeyError:
+ pass
+ else:
+ cached[read_memo] = block
+ not_cached = []
+ not_cached_seen = set()
+ for read_memo in read_memos:
+ if read_memo in cached:
+ # Don't fetch what we already have
+ continue
+ if read_memo in not_cached_seen:
+ # Don't try to fetch the same data twice
+ continue
+ not_cached.append(read_memo)
+ not_cached_seen.add(read_memo)
+ raw_records = self._access.get_raw_records(not_cached)
+ for read_memo in read_memos:
+ try:
+ yield read_memo, cached[read_memo]
+ except KeyError:
+ # Read the block, and cache it.
+ zdata = raw_records.next()
+ block = GroupCompressBlock.from_bytes(zdata)
+ self._group_cache[read_memo] = block
+ cached[read_memo] = block
+ yield read_memo, block
+
+ def get_missing_compression_parent_keys(self):
+ """Return the keys of missing compression parents.
+
+ Missing compression parents occur when a record stream was missing
+ basis texts, or a index was scanned that had missing basis texts.
+ """
+ # GroupCompress cannot currently reference texts that are not in the
+ # group, so this is valid for now
+ return frozenset()
+
+ def get_record_stream(self, keys, ordering, include_delta_closure):
+ """Get a stream of records for keys.
+
+ :param keys: The keys to include.
+ :param ordering: Either 'unordered' or 'topological'. A topologically
+ sorted stream has compression parents strictly before their
+ children.
+ :param include_delta_closure: If True then the closure across any
+ compression parents will be included (in the opaque data).
+ :return: An iterator of ContentFactory objects, each of which is only
+ valid until the iterator is advanced.
+ """
+ # keys might be a generator
+ orig_keys = list(keys)
+ keys = set(keys)
+ if not keys:
+ return
+ if (not self._index.has_graph
+ and ordering in ('topological', 'groupcompress')):
+ # Cannot topological order when no graph has been stored.
+ # but we allow 'as-requested' or 'unordered'
+ ordering = 'unordered'
+
+ remaining_keys = keys
+ while True:
+ try:
+ keys = set(remaining_keys)
+ for content_factory in self._get_remaining_record_stream(keys,
+ orig_keys, ordering, include_delta_closure):
+ remaining_keys.discard(content_factory.key)
+ yield content_factory
+ return
+ except errors.RetryWithNewPacks, e:
+ self._access.reload_or_raise(e)
+
+ def _find_from_fallback(self, missing):
+ """Find whatever keys you can from the fallbacks.
+
+ :param missing: A set of missing keys. This set will be mutated as keys
+ are found from a fallback_vfs
+ :return: (parent_map, key_to_source_map, source_results)
+ parent_map the overall key => parent_keys
+ key_to_source_map a dict from {key: source}
+ source_results a list of (source: keys)
+ """
+ parent_map = {}
+ key_to_source_map = {}
+ source_results = []
+ for source in self._immediate_fallback_vfs:
+ if not missing:
+ break
+ source_parents = source.get_parent_map(missing)
+ parent_map.update(source_parents)
+ source_parents = list(source_parents)
+ source_results.append((source, source_parents))
+ key_to_source_map.update((key, source) for key in source_parents)
+ missing.difference_update(source_parents)
+ return parent_map, key_to_source_map, source_results
+
+ def _get_ordered_source_keys(self, ordering, parent_map, key_to_source_map):
+ """Get the (source, [keys]) list.
+
+ The returned objects should be in the order defined by 'ordering',
+ which can weave between different sources.
+
+ :param ordering: Must be one of 'topological' or 'groupcompress'
+ :return: List of [(source, [keys])] tuples, such that all keys are in
+ the defined order, regardless of source.
+ """
+ if ordering == 'topological':
+ present_keys = tsort.topo_sort(parent_map)
+ else:
+ # ordering == 'groupcompress'
+ # XXX: This only optimizes for the target ordering. We may need
+ # to balance that with the time it takes to extract
+ # ordering, by somehow grouping based on
+ # locations[key][0:3]
+ present_keys = sort_gc_optimal(parent_map)
+ # Now group by source:
+ source_keys = []
+ current_source = None
+ for key in present_keys:
+ source = key_to_source_map.get(key, self)
+ if source is not current_source:
+ source_keys.append((source, []))
+ current_source = source
+ source_keys[-1][1].append(key)
+ return source_keys
+
+ def _get_as_requested_source_keys(self, orig_keys, locations, unadded_keys,
+ key_to_source_map):
+ source_keys = []
+ current_source = None
+ for key in orig_keys:
+ if key in locations or key in unadded_keys:
+ source = self
+ elif key in key_to_source_map:
+ source = key_to_source_map[key]
+ else: # absent
+ continue
+ if source is not current_source:
+ source_keys.append((source, []))
+ current_source = source
+ source_keys[-1][1].append(key)
+ return source_keys
+
+ def _get_io_ordered_source_keys(self, locations, unadded_keys,
+ source_result):
+ def get_group(key):
+ # This is the group the bytes are stored in, followed by the
+ # location in the group
+ return locations[key][0]
+ present_keys = sorted(locations.iterkeys(), key=get_group)
+ # We don't have an ordering for keys in the in-memory object, but
+ # lets process the in-memory ones first.
+ present_keys = list(unadded_keys) + present_keys
+ # Now grab all of the ones from other sources
+ source_keys = [(self, present_keys)]
+ source_keys.extend(source_result)
+ return source_keys
+
+ def _get_remaining_record_stream(self, keys, orig_keys, ordering,
+ include_delta_closure):
+ """Get a stream of records for keys.
+
+ :param keys: The keys to include.
+ :param ordering: one of 'unordered', 'topological', 'groupcompress' or
+ 'as-requested'
+ :param include_delta_closure: If True then the closure across any
+ compression parents will be included (in the opaque data).
+ :return: An iterator of ContentFactory objects, each of which is only
+ valid until the iterator is advanced.
+ """
+ # Cheap: iterate
+ locations = self._index.get_build_details(keys)
+ unadded_keys = set(self._unadded_refs).intersection(keys)
+ missing = keys.difference(locations)
+ missing.difference_update(unadded_keys)
+ (fallback_parent_map, key_to_source_map,
+ source_result) = self._find_from_fallback(missing)
+ if ordering in ('topological', 'groupcompress'):
+ # would be better to not globally sort initially but instead
+ # start with one key, recurse to its oldest parent, then grab
+ # everything in the same group, etc.
+ parent_map = dict((key, details[2]) for key, details in
+ locations.iteritems())
+ for key in unadded_keys:
+ parent_map[key] = self._unadded_refs[key]
+ parent_map.update(fallback_parent_map)
+ source_keys = self._get_ordered_source_keys(ordering, parent_map,
+ key_to_source_map)
+ elif ordering == 'as-requested':
+ source_keys = self._get_as_requested_source_keys(orig_keys,
+ locations, unadded_keys, key_to_source_map)
+ else:
+ # We want to yield the keys in a semi-optimal (read-wise) ordering.
+ # Otherwise we thrash the _group_cache and destroy performance
+ source_keys = self._get_io_ordered_source_keys(locations,
+ unadded_keys, source_result)
+ for key in missing:
+ yield AbsentContentFactory(key)
+ # Batch up as many keys as we can until either:
+ # - we encounter an unadded ref, or
+ # - we run out of keys, or
+ # - the total bytes to retrieve for this batch > BATCH_SIZE
+ batcher = _BatchingBlockFetcher(self, locations,
+ get_compressor_settings=self._get_compressor_settings)
+ for source, keys in source_keys:
+ if source is self:
+ for key in keys:
+ if key in self._unadded_refs:
+ # Flush batch, then yield unadded ref from
+ # self._compressor.
+ for factory in batcher.yield_factories(full_flush=True):
+ yield factory
+ bytes, sha1 = self._compressor.extract(key)
+ parents = self._unadded_refs[key]
+ yield FulltextContentFactory(key, parents, sha1, bytes)
+ continue
+ if batcher.add_key(key) > BATCH_SIZE:
+ # Ok, this batch is big enough. Yield some results.
+ for factory in batcher.yield_factories():
+ yield factory
+ else:
+ for factory in batcher.yield_factories(full_flush=True):
+ yield factory
+ for record in source.get_record_stream(keys, ordering,
+ include_delta_closure):
+ yield record
+ for factory in batcher.yield_factories(full_flush=True):
+ yield factory
+
+ def get_sha1s(self, keys):
+ """See VersionedFiles.get_sha1s()."""
+ result = {}
+ for record in self.get_record_stream(keys, 'unordered', True):
+ if record.sha1 != None:
+ result[record.key] = record.sha1
+ else:
+ if record.storage_kind != 'absent':
+ result[record.key] = osutils.sha_string(
+ record.get_bytes_as('fulltext'))
+ return result
+
+ def insert_record_stream(self, stream):
+ """Insert a record stream into this container.
+
+ :param stream: A stream of records to insert.
+ :return: None
+ :seealso VersionedFiles.get_record_stream:
+ """
+ # XXX: Setting random_id=True makes
+ # test_insert_record_stream_existing_keys fail for groupcompress and
+ # groupcompress-nograph, this needs to be revisited while addressing
+ # 'bzr branch' performance issues.
+ for _ in self._insert_record_stream(stream, random_id=False):
+ pass
+
+ def _get_compressor_settings(self):
+ if self._max_bytes_to_index is None:
+ # TODO: VersionedFiles don't know about their containing
+ # repository, so they don't have much of an idea about their
+ # location. So for now, this is only a global option.
+ c = config.GlobalConfig()
+ val = c.get_user_option('bzr.groupcompress.max_bytes_to_index')
+ if val is not None:
+ try:
+ val = int(val)
+ except ValueError, e:
+ trace.warning('Value for '
+ '"bzr.groupcompress.max_bytes_to_index"'
+ ' %r is not an integer'
+ % (val,))
+ val = None
+ if val is None:
+ val = self._DEFAULT_MAX_BYTES_TO_INDEX
+ self._max_bytes_to_index = val
+ return {'max_bytes_to_index': self._max_bytes_to_index}
+
+ def _make_group_compressor(self):
+ return GroupCompressor(self._get_compressor_settings())
+
+ def _insert_record_stream(self, stream, random_id=False, nostore_sha=None,
+ reuse_blocks=True):
+ """Internal core to insert a record stream into this container.
+
+ This helper function has a different interface than insert_record_stream
+ to allow add_lines to be minimal, but still return the needed data.
+
+ :param stream: A stream of records to insert.
+ :param nostore_sha: If the sha1 of a given text matches nostore_sha,
+ raise ExistingContent, rather than committing the new text.
+ :param reuse_blocks: If the source is streaming from
+ groupcompress-blocks, just insert the blocks as-is, rather than
+ expanding the texts and inserting again.
+ :return: An iterator over the sha1 of the inserted records.
+ :seealso insert_record_stream:
+ :seealso add_lines:
+ """
+ adapters = {}
+ def get_adapter(adapter_key):
+ try:
+ return adapters[adapter_key]
+ except KeyError:
+ adapter_factory = adapter_registry.get(adapter_key)
+ adapter = adapter_factory(self)
+ adapters[adapter_key] = adapter
+ return adapter
+ # This will go up to fulltexts for gc to gc fetching, which isn't
+ # ideal.
+ self._compressor = self._make_group_compressor()
+ self._unadded_refs = {}
+ keys_to_add = []
+ def flush():
+ bytes_len, chunks = self._compressor.flush().to_chunks()
+ self._compressor = self._make_group_compressor()
+ # Note: At this point we still have 1 copy of the fulltext (in
+ # record and the var 'bytes'), and this generates 2 copies of
+ # the compressed text (one for bytes, one in chunks)
+ # TODO: Push 'chunks' down into the _access api, so that we don't
+ # have to double compressed memory here
+ # TODO: Figure out how to indicate that we would be happy to free
+ # the fulltext content at this point. Note that sometimes we
+ # will want it later (streaming CHK pages), but most of the
+ # time we won't (everything else)
+ bytes = ''.join(chunks)
+ del chunks
+ index, start, length = self._access.add_raw_records(
+ [(None, len(bytes))], bytes)[0]
+ nodes = []
+ for key, reads, refs in keys_to_add:
+ nodes.append((key, "%d %d %s" % (start, length, reads), refs))
+ self._index.add_records(nodes, random_id=random_id)
+ self._unadded_refs = {}
+ del keys_to_add[:]
+
+ last_prefix = None
+ max_fulltext_len = 0
+ max_fulltext_prefix = None
+ insert_manager = None
+ block_start = None
+ block_length = None
+ # XXX: TODO: remove this, it is just for safety checking for now
+ inserted_keys = set()
+ reuse_this_block = reuse_blocks
+ for record in stream:
+ # Raise an error when a record is missing.
+ if record.storage_kind == 'absent':
+ raise errors.RevisionNotPresent(record.key, self)
+ if random_id:
+ if record.key in inserted_keys:
+ trace.note(gettext('Insert claimed random_id=True,'
+ ' but then inserted %r two times'), record.key)
+ continue
+ inserted_keys.add(record.key)
+ if reuse_blocks:
+ # If the reuse_blocks flag is set, check to see if we can just
+ # copy a groupcompress block as-is.
+ # We only check on the first record (groupcompress-block) not
+ # on all of the (groupcompress-block-ref) entries.
+ # The reuse_this_block flag is then kept for as long as
+ if record.storage_kind == 'groupcompress-block':
+ # Check to see if we really want to re-use this block
+ insert_manager = record._manager
+ reuse_this_block = insert_manager.check_is_well_utilized()
+ else:
+ reuse_this_block = False
+ if reuse_this_block:
+ # We still want to reuse this block
+ if record.storage_kind == 'groupcompress-block':
+ # Insert the raw block into the target repo
+ insert_manager = record._manager
+ bytes = record._manager._block.to_bytes()
+ _, start, length = self._access.add_raw_records(
+ [(None, len(bytes))], bytes)[0]
+ del bytes
+ block_start = start
+ block_length = length
+ if record.storage_kind in ('groupcompress-block',
+ 'groupcompress-block-ref'):
+ if insert_manager is None:
+ raise AssertionError('No insert_manager set')
+ if insert_manager is not record._manager:
+ raise AssertionError('insert_manager does not match'
+ ' the current record, we cannot be positive'
+ ' that the appropriate content was inserted.'
+ )
+ value = "%d %d %d %d" % (block_start, block_length,
+ record._start, record._end)
+ nodes = [(record.key, value, (record.parents,))]
+ # TODO: Consider buffering up many nodes to be added, not
+ # sure how much overhead this has, but we're seeing
+ # ~23s / 120s in add_records calls
+ self._index.add_records(nodes, random_id=random_id)
+ continue
+ try:
+ bytes = record.get_bytes_as('fulltext')
+ except errors.UnavailableRepresentation:
+ adapter_key = record.storage_kind, 'fulltext'
+ adapter = get_adapter(adapter_key)
+ bytes = adapter.get_bytes(record)
+ if len(record.key) > 1:
+ prefix = record.key[0]
+ soft = (prefix == last_prefix)
+ else:
+ prefix = None
+ soft = False
+ if max_fulltext_len < len(bytes):
+ max_fulltext_len = len(bytes)
+ max_fulltext_prefix = prefix
+ (found_sha1, start_point, end_point,
+ type) = self._compressor.compress(record.key,
+ bytes, record.sha1, soft=soft,
+ nostore_sha=nostore_sha)
+ # delta_ratio = float(len(bytes)) / (end_point - start_point)
+ # Check if we want to continue to include that text
+ if (prefix == max_fulltext_prefix
+ and end_point < 2 * max_fulltext_len):
+ # As long as we are on the same file_id, we will fill at least
+ # 2 * max_fulltext_len
+ start_new_block = False
+ elif end_point > 4*1024*1024:
+ start_new_block = True
+ elif (prefix is not None and prefix != last_prefix
+ and end_point > 2*1024*1024):
+ start_new_block = True
+ else:
+ start_new_block = False
+ last_prefix = prefix
+ if start_new_block:
+ self._compressor.pop_last()
+ flush()
+ max_fulltext_len = len(bytes)
+ (found_sha1, start_point, end_point,
+ type) = self._compressor.compress(record.key, bytes,
+ record.sha1)
+ if record.key[-1] is None:
+ key = record.key[:-1] + ('sha1:' + found_sha1,)
+ else:
+ key = record.key
+ self._unadded_refs[key] = record.parents
+ yield found_sha1
+ as_st = static_tuple.StaticTuple.from_sequence
+ if record.parents is not None:
+ parents = as_st([as_st(p) for p in record.parents])
+ else:
+ parents = None
+ refs = static_tuple.StaticTuple(parents)
+ keys_to_add.append((key, '%d %d' % (start_point, end_point), refs))
+ if len(keys_to_add):
+ flush()
+ self._compressor = None
+
+ def iter_lines_added_or_present_in_keys(self, keys, pb=None):
+ """Iterate over the lines in the versioned files from keys.
+
+ This may return lines from other keys. Each item the returned
+ iterator yields is a tuple of a line and a text version that that line
+ is present in (not introduced in).
+
+ Ordering of results is in whatever order is most suitable for the
+ underlying storage format.
+
+ If a progress bar is supplied, it may be used to indicate progress.
+ The caller is responsible for cleaning up progress bars (because this
+ is an iterator).
+
+ NOTES:
+ * Lines are normalised by the underlying store: they will all have \n
+ terminators.
+ * Lines are returned in arbitrary order.
+
+ :return: An iterator over (line, key).
+ """
+ keys = set(keys)
+ total = len(keys)
+ # we don't care about inclusions, the caller cares.
+ # but we need to setup a list of records to visit.
+ # we need key, position, length
+ for key_idx, record in enumerate(self.get_record_stream(keys,
+ 'unordered', True)):
+ # XXX: todo - optimise to use less than full texts.
+ key = record.key
+ if pb is not None:
+ pb.update('Walking content', key_idx, total)
+ if record.storage_kind == 'absent':
+ raise errors.RevisionNotPresent(key, self)
+ lines = osutils.split_lines(record.get_bytes_as('fulltext'))
+ for line in lines:
+ yield line, key
+ if pb is not None:
+ pb.update('Walking content', total, total)
+
+ def keys(self):
+ """See VersionedFiles.keys."""
+ if 'evil' in debug.debug_flags:
+ trace.mutter_callsite(2, "keys scales with size of history")
+ sources = [self._index] + self._immediate_fallback_vfs
+ result = set()
+ for source in sources:
+ result.update(source.keys())
+ return result
+
+
+class _GCBuildDetails(object):
+ """A blob of data about the build details.
+
+ This stores the minimal data, which then allows compatibility with the old
+ api, without taking as much memory.
+ """
+
+ __slots__ = ('_index', '_group_start', '_group_end', '_basis_end',
+ '_delta_end', '_parents')
+
+ method = 'group'
+ compression_parent = None
+
+ def __init__(self, parents, position_info):
+ self._parents = parents
+ (self._index, self._group_start, self._group_end, self._basis_end,
+ self._delta_end) = position_info
+
+ def __repr__(self):
+ return '%s(%s, %s)' % (self.__class__.__name__,
+ self.index_memo, self._parents)
+
+ @property
+ def index_memo(self):
+ return (self._index, self._group_start, self._group_end,
+ self._basis_end, self._delta_end)
+
+ @property
+ def record_details(self):
+ return static_tuple.StaticTuple(self.method, None)
+
+ def __getitem__(self, offset):
+ """Compatibility thunk to act like a tuple."""
+ if offset == 0:
+ return self.index_memo
+ elif offset == 1:
+ return self.compression_parent # Always None
+ elif offset == 2:
+ return self._parents
+ elif offset == 3:
+ return self.record_details
+ else:
+ raise IndexError('offset out of range')
+
+ def __len__(self):
+ return 4
+
+
+class _GCGraphIndex(object):
+ """Mapper from GroupCompressVersionedFiles needs into GraphIndex storage."""
+
+ def __init__(self, graph_index, is_locked, parents=True,
+ add_callback=None, track_external_parent_refs=False,
+ inconsistency_fatal=True, track_new_keys=False):
+ """Construct a _GCGraphIndex on a graph_index.
+
+ :param graph_index: An implementation of bzrlib.index.GraphIndex.
+ :param is_locked: A callback, returns True if the index is locked and
+ thus usable.
+ :param parents: If True, record knits parents, if not do not record
+ parents.
+ :param add_callback: If not None, allow additions to the index and call
+ this callback with a list of added GraphIndex nodes:
+ [(node, value, node_refs), ...]
+ :param track_external_parent_refs: As keys are added, keep track of the
+ keys they reference, so that we can query get_missing_parents(),
+ etc.
+ :param inconsistency_fatal: When asked to add records that are already
+ present, and the details are inconsistent with the existing
+ record, raise an exception instead of warning (and skipping the
+ record).
+ """
+ self._add_callback = add_callback
+ self._graph_index = graph_index
+ self._parents = parents
+ self.has_graph = parents
+ self._is_locked = is_locked
+ self._inconsistency_fatal = inconsistency_fatal
+ # GroupCompress records tend to have the same 'group' start + offset
+ # repeated over and over, this creates a surplus of ints
+ self._int_cache = {}
+ if track_external_parent_refs:
+ self._key_dependencies = _KeyRefs(
+ track_new_keys=track_new_keys)
+ else:
+ self._key_dependencies = None
+
+ def add_records(self, records, random_id=False):
+ """Add multiple records to the index.
+
+ This function does not insert data into the Immutable GraphIndex
+ backing the KnitGraphIndex, instead it prepares data for insertion by
+ the caller and checks that it is safe to insert then calls
+ self._add_callback with the prepared GraphIndex nodes.
+
+ :param records: a list of tuples:
+ (key, options, access_memo, parents).
+ :param random_id: If True the ids being added were randomly generated
+ and no check for existence will be performed.
+ """
+ if not self._add_callback:
+ raise errors.ReadOnlyError(self)
+ # we hope there are no repositories with inconsistent parentage
+ # anymore.
+
+ changed = False
+ keys = {}
+ for (key, value, refs) in records:
+ if not self._parents:
+ if refs:
+ for ref in refs:
+ if ref:
+ raise errors.KnitCorrupt(self,
+ "attempt to add node with parents "
+ "in parentless index.")
+ refs = ()
+ changed = True
+ keys[key] = (value, refs)
+ # check for dups
+ if not random_id:
+ present_nodes = self._get_entries(keys)
+ for (index, key, value, node_refs) in present_nodes:
+ # Sometimes these are passed as a list rather than a tuple
+ node_refs = static_tuple.as_tuples(node_refs)
+ passed = static_tuple.as_tuples(keys[key])
+ if node_refs != passed[1]:
+ details = '%s %s %s' % (key, (value, node_refs), passed)
+ if self._inconsistency_fatal:
+ raise errors.KnitCorrupt(self, "inconsistent details"
+ " in add_records: %s" %
+ details)
+ else:
+ trace.warning("inconsistent details in skipped"
+ " record: %s", details)
+ del keys[key]
+ changed = True
+ if changed:
+ result = []
+ if self._parents:
+ for key, (value, node_refs) in keys.iteritems():
+ result.append((key, value, node_refs))
+ else:
+ for key, (value, node_refs) in keys.iteritems():
+ result.append((key, value))
+ records = result
+ key_dependencies = self._key_dependencies
+ if key_dependencies is not None:
+ if self._parents:
+ for key, value, refs in records:
+ parents = refs[0]
+ key_dependencies.add_references(key, parents)
+ else:
+ for key, value, refs in records:
+ new_keys.add_key(key)
+ self._add_callback(records)
+
+ def _check_read(self):
+ """Raise an exception if reads are not permitted."""
+ if not self._is_locked():
+ raise errors.ObjectNotLocked(self)
+
+ def _check_write_ok(self):
+ """Raise an exception if writes are not permitted."""
+ if not self._is_locked():
+ raise errors.ObjectNotLocked(self)
+
+ def _get_entries(self, keys, check_present=False):
+ """Get the entries for keys.
+
+ Note: Callers are responsible for checking that the index is locked
+ before calling this method.
+
+ :param keys: An iterable of index key tuples.
+ """
+ keys = set(keys)
+ found_keys = set()
+ if self._parents:
+ for node in self._graph_index.iter_entries(keys):
+ yield node
+ found_keys.add(node[1])
+ else:
+ # adapt parentless index to the rest of the code.
+ for node in self._graph_index.iter_entries(keys):
+ yield node[0], node[1], node[2], ()
+ found_keys.add(node[1])
+ if check_present:
+ missing_keys = keys.difference(found_keys)
+ if missing_keys:
+ raise errors.RevisionNotPresent(missing_keys.pop(), self)
+
+ def find_ancestry(self, keys):
+ """See CombinedGraphIndex.find_ancestry"""
+ return self._graph_index.find_ancestry(keys, 0)
+
+ def get_parent_map(self, keys):
+ """Get a map of the parents of keys.
+
+ :param keys: The keys to look up parents for.
+ :return: A mapping from keys to parents. Absent keys are absent from
+ the mapping.
+ """
+ self._check_read()
+ nodes = self._get_entries(keys)
+ result = {}
+ if self._parents:
+ for node in nodes:
+ result[node[1]] = node[3][0]
+ else:
+ for node in nodes:
+ result[node[1]] = None
+ return result
+
+ def get_missing_parents(self):
+ """Return the keys of missing parents."""
+ # Copied from _KnitGraphIndex.get_missing_parents
+ # We may have false positives, so filter those out.
+ self._key_dependencies.satisfy_refs_for_keys(
+ self.get_parent_map(self._key_dependencies.get_unsatisfied_refs()))
+ return frozenset(self._key_dependencies.get_unsatisfied_refs())
+
+ def get_build_details(self, keys):
+ """Get the various build details for keys.
+
+ Ghosts are omitted from the result.
+
+ :param keys: An iterable of keys.
+ :return: A dict of key:
+ (index_memo, compression_parent, parents, record_details).
+
+ * index_memo: opaque structure to pass to read_records to extract
+ the raw data
+ * compression_parent: Content that this record is built upon, may
+ be None
+ * parents: Logical parents of this node
+ * record_details: extra information about the content which needs
+ to be passed to Factory.parse_record
+ """
+ self._check_read()
+ result = {}
+ entries = self._get_entries(keys)
+ for entry in entries:
+ key = entry[1]
+ if not self._parents:
+ parents = None
+ else:
+ parents = entry[3][0]
+ details = _GCBuildDetails(parents, self._node_to_position(entry))
+ result[key] = details
+ return result
+
+ def keys(self):
+ """Get all the keys in the collection.
+
+ The keys are not ordered.
+ """
+ self._check_read()
+ return [node[1] for node in self._graph_index.iter_all_entries()]
+
+ def _node_to_position(self, node):
+ """Convert an index value to position details."""
+ bits = node[2].split(' ')
+ # It would be nice not to read the entire gzip.
+ # start and stop are put into _int_cache because they are very common.
+ # They define the 'group' that an entry is in, and many groups can have
+ # thousands of objects.
+ # Branching Launchpad, for example, saves ~600k integers, at 12 bytes
+ # each, or about 7MB. Note that it might be even more when you consider
+ # how PyInt is allocated in separate slabs. And you can't return a slab
+ # to the OS if even 1 int on it is in use. Note though that Python uses
+ # a LIFO when re-using PyInt slots, which might cause more
+ # fragmentation.
+ start = int(bits[0])
+ start = self._int_cache.setdefault(start, start)
+ stop = int(bits[1])
+ stop = self._int_cache.setdefault(stop, stop)
+ basis_end = int(bits[2])
+ delta_end = int(bits[3])
+ # We can't use StaticTuple here, because node[0] is a BTreeGraphIndex
+ # instance...
+ return (node[0], start, stop, basis_end, delta_end)
+
+ def scan_unvalidated_index(self, graph_index):
+ """Inform this _GCGraphIndex that there is an unvalidated index.
+
+ This allows this _GCGraphIndex to keep track of any missing
+ compression parents we may want to have filled in to make those
+ indices valid. It also allows _GCGraphIndex to track any new keys.
+
+ :param graph_index: A GraphIndex
+ """
+ key_dependencies = self._key_dependencies
+ if key_dependencies is None:
+ return
+ for node in graph_index.iter_all_entries():
+ # Add parent refs from graph_index (and discard parent refs
+ # that the graph_index has).
+ key_dependencies.add_references(node[1], node[3][0])
+
+
+from bzrlib._groupcompress_py import (
+ apply_delta,
+ apply_delta_to_source,
+ encode_base128_int,
+ decode_base128_int,
+ decode_copy_instruction,
+ LinesDeltaIndex,
+ )
+try:
+ from bzrlib._groupcompress_pyx import (
+ apply_delta,
+ apply_delta_to_source,
+ DeltaIndex,
+ encode_base128_int,
+ decode_base128_int,
+ )
+ GroupCompressor = PyrexGroupCompressor
+except ImportError, e:
+ osutils.failed_to_load_extension(e)
+ GroupCompressor = PythonGroupCompressor
+
diff --git a/bzrlib/hashcache.py b/bzrlib/hashcache.py
new file mode 100644
index 0000000..2d40ffe
--- /dev/null
+++ b/bzrlib/hashcache.py
@@ -0,0 +1,320 @@
+# Copyright (C) 2005-2010 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+from __future__ import absolute_import
+
+# TODO: Up-front, stat all files in order and remove those which are deleted or
+# out-of-date. Don't actually re-read them until they're needed. That ought
+# to bring all the inodes into core so that future stats to them are fast, and
+# it preserves the nice property that any caller will always get up-to-date
+# data except in unavoidable cases.
+
+# TODO: Perhaps return more details on the file to avoid statting it
+# again: nonexistent, file type, size, etc
+
+# TODO: Perhaps use a Python pickle instead of a text file; might be faster.
+
+
+
+CACHE_HEADER = "### bzr hashcache v5\n"
+
+import os
+import stat
+import time
+
+from bzrlib import (
+ atomicfile,
+ errors,
+ filters as _mod_filters,
+ osutils,
+ trace,
+ )
+
+
+FP_MTIME_COLUMN = 1
+FP_CTIME_COLUMN = 2
+FP_MODE_COLUMN = 5
+
+
+
+class HashCache(object):
+ """Cache for looking up file SHA-1.
+
+ Files are considered to match the cached value if the fingerprint
+ of the file has not changed. This includes its mtime, ctime,
+ device number, inode number, and size. This should catch
+ modifications or replacement of the file by a new one.
+
+ This may not catch modifications that do not change the file's
+ size and that occur within the resolution window of the
+ timestamps. To handle this we specifically do not cache files
+ which have changed since the start of the present second, since
+ they could undetectably change again.
+
+ This scheme may fail if the machine's clock steps backwards.
+ Don't do that.
+
+ This does not canonicalize the paths passed in; that should be
+ done by the caller.
+
+ _cache
+ Indexed by path, points to a two-tuple of the SHA-1 of the file.
+ and its fingerprint.
+
+ stat_count
+ number of times files have been statted
+
+ hit_count
+ number of times files have been retrieved from the cache, avoiding a
+ re-read
+
+ miss_count
+ number of misses (times files have been completely re-read)
+ """
+ needs_write = False
+
+ def __init__(self, root, cache_file_name, mode=None,
+ content_filter_stack_provider=None):
+ """Create a hash cache in base dir, and set the file mode to mode.
+
+ :param content_filter_stack_provider: a function that takes a
+ path (relative to the top of the tree) and a file-id as
+ parameters and returns a stack of ContentFilters.
+ If None, no content filtering is performed.
+ """
+ self.root = osutils.safe_unicode(root)
+ self.root_utf8 = self.root.encode('utf8') # where is the filesystem encoding ?
+ self.hit_count = 0
+ self.miss_count = 0
+ self.stat_count = 0
+ self.danger_count = 0
+ self.removed_count = 0
+ self.update_count = 0
+ self._cache = {}
+ self._mode = mode
+ self._cache_file_name = osutils.safe_unicode(cache_file_name)
+ self._filter_provider = content_filter_stack_provider
+
+ def cache_file_name(self):
+ return self._cache_file_name
+
+ def clear(self):
+ """Discard all cached information.
+
+ This does not reset the counters."""
+ if self._cache:
+ self.needs_write = True
+ self._cache = {}
+
+ def scan(self):
+ """Scan all files and remove entries where the cache entry is obsolete.
+
+ Obsolete entries are those where the file has been modified or deleted
+ since the entry was inserted.
+ """
+ # FIXME optimisation opportunity, on linux [and check other oses]:
+ # rather than iteritems order, stat in inode order.
+ prep = [(ce[1][3], path, ce) for (path, ce) in self._cache.iteritems()]
+ prep.sort()
+
+ for inum, path, cache_entry in prep:
+ abspath = osutils.pathjoin(self.root, path)
+ fp = self._fingerprint(abspath)
+ self.stat_count += 1
+
+ cache_fp = cache_entry[1]
+
+ if (not fp) or (cache_fp != fp):
+ # not here or not a regular file anymore
+ self.removed_count += 1
+ self.needs_write = True
+ del self._cache[path]
+
+ def get_sha1(self, path, stat_value=None):
+ """Return the sha1 of a file.
+ """
+ if path.__class__ is str:
+ abspath = osutils.pathjoin(self.root_utf8, path)
+ else:
+ abspath = osutils.pathjoin(self.root, path)
+ self.stat_count += 1
+ file_fp = self._fingerprint(abspath, stat_value)
+
+ if not file_fp:
+ # not a regular file or not existing
+ if path in self._cache:
+ self.removed_count += 1
+ self.needs_write = True
+ del self._cache[path]
+ return None
+
+ if path in self._cache:
+ cache_sha1, cache_fp = self._cache[path]
+ else:
+ cache_sha1, cache_fp = None, None
+
+ if cache_fp == file_fp:
+ ## mutter("hashcache hit for %s %r -> %s", path, file_fp, cache_sha1)
+ ## mutter("now = %s", time.time())
+ self.hit_count += 1
+ return cache_sha1
+
+ self.miss_count += 1
+
+ mode = file_fp[FP_MODE_COLUMN]
+ if stat.S_ISREG(mode):
+ if self._filter_provider is None:
+ filters = []
+ else:
+ filters = self._filter_provider(path=path, file_id=None)
+ digest = self._really_sha1_file(abspath, filters)
+ elif stat.S_ISLNK(mode):
+ target = osutils.readlink(osutils.safe_unicode(abspath))
+ digest = osutils.sha_string(target.encode('UTF-8'))
+ else:
+ raise errors.BzrError("file %r: unknown file stat mode: %o"
+ % (abspath, mode))
+
+ # window of 3 seconds to allow for 2s resolution on windows,
+ # unsynchronized file servers, etc.
+ cutoff = self._cutoff_time()
+ if file_fp[FP_MTIME_COLUMN] >= cutoff \
+ or file_fp[FP_CTIME_COLUMN] >= cutoff:
+ # changed too recently; can't be cached. we can
+ # return the result and it could possibly be cached
+ # next time.
+ #
+ # the point is that we only want to cache when we are sure that any
+ # subsequent modifications of the file can be detected. If a
+ # modification neither changes the inode, the device, the size, nor
+ # the mode, then we can only distinguish it by time; therefore we
+ # need to let sufficient time elapse before we may cache this entry
+ # again. If we didn't do this, then, for example, a very quick 1
+ # byte replacement in the file might go undetected.
+ ## mutter('%r modified too recently; not caching', path)
+ self.danger_count += 1
+ if cache_fp:
+ self.removed_count += 1
+ self.needs_write = True
+ del self._cache[path]
+ else:
+ ## mutter('%r added to cache: now=%f, mtime=%d, ctime=%d',
+ ## path, time.time(), file_fp[FP_MTIME_COLUMN],
+ ## file_fp[FP_CTIME_COLUMN])
+ self.update_count += 1
+ self.needs_write = True
+ self._cache[path] = (digest, file_fp)
+ return digest
+
+ def _really_sha1_file(self, abspath, filters):
+ """Calculate the SHA1 of a file by reading the full text"""
+ return _mod_filters.internal_size_sha_file_byname(abspath, filters)[1]
+
+ def write(self):
+ """Write contents of cache to file."""
+ outf = atomicfile.AtomicFile(self.cache_file_name(), 'wb',
+ new_mode=self._mode)
+ try:
+ outf.write(CACHE_HEADER)
+
+ for path, c in self._cache.iteritems():
+ line_info = [path.encode('utf-8'), '// ', c[0], ' ']
+ line_info.append(' '.join([str(fld) for fld in c[1]]))
+ line_info.append('\n')
+ outf.write(''.join(line_info))
+ outf.commit()
+ self.needs_write = False
+ ## mutter("write hash cache: %s hits=%d misses=%d stat=%d recent=%d updates=%d",
+ ## self.cache_file_name(), self.hit_count, self.miss_count,
+ ## self.stat_count,
+ ## self.danger_count, self.update_count)
+ finally:
+ outf.close()
+
+ def read(self):
+ """Reinstate cache from file.
+
+ Overwrites existing cache.
+
+ If the cache file has the wrong version marker, this just clears
+ the cache."""
+ self._cache = {}
+
+ fn = self.cache_file_name()
+ try:
+ inf = file(fn, 'rb', buffering=65000)
+ except IOError, e:
+ trace.mutter("failed to open %s: %s", fn, e)
+ # better write it now so it is valid
+ self.needs_write = True
+ return
+
+ hdr = inf.readline()
+ if hdr != CACHE_HEADER:
+ trace.mutter('cache header marker not found at top of %s;'
+ ' discarding cache', fn)
+ self.needs_write = True
+ return
+
+ for l in inf:
+ pos = l.index('// ')
+ path = l[:pos].decode('utf-8')
+ if path in self._cache:
+ trace.warning('duplicated path %r in cache' % path)
+ continue
+
+ pos += 3
+ fields = l[pos:].split(' ')
+ if len(fields) != 7:
+ trace.warning("bad line in hashcache: %r" % l)
+ continue
+
+ sha1 = fields[0]
+ if len(sha1) != 40:
+ trace.warning("bad sha1 in hashcache: %r" % sha1)
+ continue
+
+ fp = tuple(map(long, fields[1:]))
+
+ self._cache[path] = (sha1, fp)
+
+ # GZ 2009-09-20: Should really use a try/finally block to ensure close
+ inf.close()
+
+ self.needs_write = False
+
+ def _cutoff_time(self):
+ """Return cutoff time.
+
+ Files modified more recently than this time are at risk of being
+ undetectably modified and so can't be cached.
+ """
+ return int(time.time()) - 3
+
+ def _fingerprint(self, abspath, stat_value=None):
+ if stat_value is None:
+ try:
+ stat_value = os.lstat(abspath)
+ except OSError:
+ # might be missing, etc
+ return None
+ if stat.S_ISDIR(stat_value.st_mode):
+ return None
+ # we discard any high precision because it's not reliable; perhaps we
+ # could do better on some systems?
+ return (stat_value.st_size, long(stat_value.st_mtime),
+ long(stat_value.st_ctime), stat_value.st_ino,
+ stat_value.st_dev, stat_value.st_mode)
diff --git a/bzrlib/help.py b/bzrlib/help.py
new file mode 100644
index 0000000..1babcb6
--- /dev/null
+++ b/bzrlib/help.py
@@ -0,0 +1,167 @@
+# Copyright (C) 2005-2011 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+from __future__ import absolute_import
+
+# TODO: Some way to get a list of external commands (defined by shell
+# scripts) so that they can be included in the help listing as well.
+# It should be enough to just list the plugin directory and look for
+# executable files with reasonable names.
+
+# TODO: `help commands --all` should show hidden commands
+
+import sys
+
+from bzrlib import (
+ commands as _mod_commands,
+ errors,
+ help_topics,
+ osutils,
+ plugin,
+ ui,
+ utextwrap,
+ )
+
+
+def help(topic=None, outfile=None):
+ """Write the help for the specific topic to outfile"""
+ if outfile is None:
+ outfile = ui.ui_factory.make_output_stream()
+
+ indices = HelpIndices()
+
+ alias = _mod_commands.get_alias(topic)
+ try:
+ topics = indices.search(topic)
+ shadowed_terms = []
+ for index, topic_obj in topics[1:]:
+ shadowed_terms.append('%s%s' % (index.prefix,
+ topic_obj.get_help_topic()))
+ source = topics[0][1]
+ outfile.write(source.get_help_text(shadowed_terms))
+ except errors.NoHelpTopic:
+ if alias is None:
+ raise
+
+ if alias is not None:
+ outfile.write("'bzr %s' is an alias for 'bzr %s'.\n" % (topic,
+ " ".join(alias)))
+
+
+def help_commands(outfile=None):
+ """List all commands"""
+ if outfile is None:
+ outfile = ui.ui_factory.make_output_stream()
+ outfile.write(_help_commands_to_text('commands'))
+
+
+def _help_commands_to_text(topic):
+ """Generate the help text for the list of commands"""
+ out = []
+ if topic == 'hidden-commands':
+ hidden = True
+ else:
+ hidden = False
+ names = list(_mod_commands.all_command_names())
+ commands = ((n, _mod_commands.get_cmd_object(n)) for n in names)
+ shown_commands = [(n, o) for n, o in commands if o.hidden == hidden]
+ max_name = max(len(n) for n, o in shown_commands)
+ indent = ' ' * (max_name + 1)
+ width = osutils.terminal_width()
+ if width is None:
+ width = osutils.default_terminal_width
+ # we need one extra space for terminals that wrap on last char
+ width = width - 1
+
+ for cmd_name, cmd_object in sorted(shown_commands):
+ plugin_name = cmd_object.plugin_name()
+ if plugin_name is None:
+ plugin_name = ''
+ else:
+ plugin_name = ' [%s]' % plugin_name
+
+ cmd_help = cmd_object.help()
+ if cmd_help:
+ firstline = cmd_help.split('\n', 1)[0]
+ else:
+ firstline = ''
+ helpstring = '%-*s %s%s' % (max_name, cmd_name, firstline, plugin_name)
+ lines = utextwrap.wrap(
+ helpstring, subsequent_indent=indent,
+ width=width,
+ break_long_words=False)
+ for line in lines:
+ out.append(line + '\n')
+ return ''.join(out)
+
+
+help_topics.topic_registry.register("commands",
+ _help_commands_to_text,
+ "Basic help for all commands",
+ help_topics.SECT_HIDDEN)
+help_topics.topic_registry.register("hidden-commands",
+ _help_commands_to_text,
+ "All hidden commands",
+ help_topics.SECT_HIDDEN)
+
+
+class HelpIndices(object):
+ """Maintainer of help topics across multiple indices.
+
+ It is currently separate to the HelpTopicRegistry because of its ordered
+ nature, but possibly we should instead structure it as a search within the
+ registry and add ordering and searching facilities to the registry. The
+ registry would probably need to be restructured to support that cleanly
+ which is why this has been implemented in parallel even though it does as a
+ result permit searching for help in indices which are not discoverable via
+ 'help topics'.
+
+ Each index has a unique prefix string, such as "commands", and contains
+ help topics which can be listed or searched.
+ """
+
+ def __init__(self):
+ self.search_path = [
+ help_topics.HelpTopicIndex(),
+ _mod_commands.HelpCommandIndex(),
+ plugin.PluginsHelpIndex(),
+ help_topics.ConfigOptionHelpIndex(),
+ ]
+
+ def _check_prefix_uniqueness(self):
+ """Ensure that the index collection is able to differentiate safely."""
+ prefixes = {}
+ for index in self.search_path:
+ prefixes.setdefault(index.prefix, []).append(index)
+ for prefix, indices in prefixes.items():
+ if len(indices) > 1:
+ raise errors.DuplicateHelpPrefix(prefix)
+
+ def search(self, topic):
+ """Search for topic across the help search path.
+
+ :param topic: A string naming the help topic to search for.
+ :raises: NoHelpTopic if none of the indexs in search_path have topic.
+ :return: A list of HelpTopics which matched 'topic'.
+ """
+ self._check_prefix_uniqueness()
+ result = []
+ for index in self.search_path:
+ result.extend([(index, _topic) for _topic in index.get_topics(topic)])
+ if not result:
+ raise errors.NoHelpTopic(topic)
+ else:
+ return result
diff --git a/bzrlib/help_topics/__init__.py b/bzrlib/help_topics/__init__.py
new file mode 100644
index 0000000..ef1d8e9
--- /dev/null
+++ b/bzrlib/help_topics/__init__.py
@@ -0,0 +1,943 @@
+# Copyright (C) 2006-2011 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""A collection of extra help information for using bzr.
+
+Help topics are meant to be help for items that aren't commands, but will
+help bzr become fully learnable without referring to a tutorial.
+
+Limited formatting of help text is permitted to make the text useful
+both within the reference manual (reStructuredText) and on the screen.
+The help text should be reStructuredText with formatting kept to a
+minimum and, in particular, no headings. The onscreen renderer applies
+the following simple rules before rendering the text:
+
+ 1. A '::' appearing on the end of a line is replaced with ':'.
+ 2. Lines starting with a ':' have it stripped.
+
+These rules mean that literal blocks and field lists respectively can
+be used in the help text, producing sensible input to a manual while
+rendering on the screen naturally.
+"""
+
+from __future__ import absolute_import
+
+import bzrlib
+from bzrlib import (
+ config,
+ osutils,
+ registry,
+ i18n,
+ )
+
+
+# Section identifiers (map topics to the right place in the manual)
+SECT_COMMAND = "command"
+SECT_CONCEPT = "concept"
+SECT_HIDDEN = "hidden"
+SECT_LIST = "list"
+SECT_PLUGIN = "plugin"
+
+
+class HelpTopicRegistry(registry.Registry):
+ """A Registry customized for handling help topics."""
+
+ def register(self, topic, detail, summary, section=SECT_LIST):
+ """Register a new help topic.
+
+ :param topic: Name of documentation entry
+ :param detail: Function or string object providing detailed
+ documentation for topic. Function interface is detail(topic).
+ This should return a text string of the detailed information.
+ See the module documentation for details on help text formatting.
+ :param summary: String providing single-line documentation for topic.
+ :param section: Section in reference manual - see SECT_* identifiers.
+ """
+ # The detail is stored as the 'object' and the metadata as the info
+ info = (summary, section)
+ super(HelpTopicRegistry, self).register(topic, detail, info=info)
+
+ def register_lazy(self, topic, module_name, member_name, summary,
+ section=SECT_LIST):
+ """Register a new help topic, and import the details on demand.
+
+ :param topic: Name of documentation entry
+ :param module_name: The module to find the detailed help.
+ :param member_name: The member of the module to use for detailed help.
+ :param summary: String providing single-line documentation for topic.
+ :param section: Section in reference manual - see SECT_* identifiers.
+ """
+ # The detail is stored as the 'object' and the metadata as the info
+ info = (summary, section)
+ super(HelpTopicRegistry, self).register_lazy(topic, module_name,
+ member_name, info=info)
+
+ def get_detail(self, topic):
+ """Get the detailed help on a given topic."""
+ obj = self.get(topic)
+ if callable(obj):
+ return obj(topic)
+ else:
+ return obj
+
+ def get_summary(self, topic):
+ """Get the single line summary for the topic."""
+ info = self.get_info(topic)
+ if info is None:
+ return None
+ else:
+ return info[0]
+
+ def get_section(self, topic):
+ """Get the section for the topic."""
+ info = self.get_info(topic)
+ if info is None:
+ return None
+ else:
+ return info[1]
+
+ def get_topics_for_section(self, section):
+ """Get the set of topics in a section."""
+ result = set()
+ for topic in self.keys():
+ if section == self.get_section(topic):
+ result.add(topic)
+ return result
+
+
+topic_registry = HelpTopicRegistry()
+
+
+#----------------------------------------------------
+
+def _help_on_topics(dummy):
+ """Write out the help for topics to outfile"""
+
+ topics = topic_registry.keys()
+ lmax = max(len(topic) for topic in topics)
+
+ out = []
+ for topic in topics:
+ summary = topic_registry.get_summary(topic)
+ out.append("%-*s %s\n" % (lmax, topic, summary))
+ return ''.join(out)
+
+
+def _load_from_file(topic_name):
+ """Load help from a file.
+
+ Topics are expected to be txt files in bzrlib.help_topics.
+ """
+ resource_name = osutils.pathjoin("en", "%s.txt" % (topic_name,))
+ return osutils.resource_string('bzrlib.help_topics', resource_name)
+
+
+def _help_on_revisionspec(name):
+ """Generate the help for revision specs."""
+ import re
+ import bzrlib.revisionspec
+
+ out = []
+ out.append(
+"""Revision Identifiers
+
+A revision identifier refers to a specific state of a branch's history. It
+can be expressed in several ways. It can begin with a keyword to
+unambiguously specify a given lookup type; some examples are 'last:1',
+'before:yesterday' and 'submit:'.
+
+Alternately, it can be given without a keyword, in which case it will be
+checked as a revision number, a tag, a revision id, a date specification, or a
+branch specification, in that order. For example, 'date:today' could be
+written as simply 'today', though if you have a tag called 'today' that will
+be found first.
+
+If 'REV1' and 'REV2' are revision identifiers, then 'REV1..REV2' denotes a
+revision range. Examples: '3647..3649', 'date:yesterday..-1' and
+'branch:/path/to/branch1/..branch:/branch2' (note that there are no quotes or
+spaces around the '..').
+
+Ranges are interpreted differently by different commands. To the "log" command,
+a range is a sequence of log messages, but to the "diff" command, the range
+denotes a change between revisions (and not a sequence of changes). In
+addition, "log" considers a closed range whereas "diff" and "merge" consider it
+to be open-ended, that is, they include one end but not the other. For example:
+"bzr log -r 3647..3649" shows the messages of revisions 3647, 3648 and 3649,
+while "bzr diff -r 3647..3649" includes the changes done in revisions 3648 and
+3649, but not 3647.
+
+The keywords used as revision selection methods are the following:
+""")
+ details = []
+ details.append("\nIn addition, plugins can provide other keywords.")
+ details.append("\nA detailed description of each keyword is given below.\n")
+
+ # The help text is indented 4 spaces - this re cleans that up below
+ indent_re = re.compile(r'^ ', re.MULTILINE)
+ for prefix, i in bzrlib.revisionspec.revspec_registry.iteritems():
+ doc = i.help_txt
+ if doc == bzrlib.revisionspec.RevisionSpec.help_txt:
+ summary = "N/A"
+ doc = summary + "\n"
+ else:
+ # Extract out the top line summary from the body and
+ # clean-up the unwanted whitespace
+ summary,doc = doc.split("\n", 1)
+ #doc = indent_re.sub('', doc)
+ while (doc[-2:] == '\n\n' or doc[-1:] == ' '):
+ doc = doc[:-1]
+
+ # Note: The leading : here are HACKs to get reStructuredText
+ # 'field' formatting - we know that the prefix ends in a ':'.
+ out.append(":%s\n\t%s" % (i.prefix, summary))
+ details.append(":%s\n%s" % (i.prefix, doc))
+
+ return '\n'.join(out + details)
+
+
+def _help_on_transport(name):
+ from bzrlib.transport import (
+ transport_list_registry,
+ )
+ import textwrap
+
+ def add_string(proto, help, maxl, prefix_width=20):
+ help_lines = textwrap.wrap(help, maxl - prefix_width,
+ break_long_words=False)
+ line_with_indent = '\n' + ' ' * prefix_width
+ help_text = line_with_indent.join(help_lines)
+ return "%-20s%s\n" % (proto, help_text)
+
+ def sort_func(a,b):
+ a1 = a[:a.rfind("://")]
+ b1 = b[:b.rfind("://")]
+ if a1>b1:
+ return +1
+ elif a1<b1:
+ return -1
+ else:
+ return 0
+
+ protl = []
+ decl = []
+ protos = transport_list_registry.keys( )
+ protos.sort(sort_func)
+ for proto in protos:
+ shorthelp = transport_list_registry.get_help(proto)
+ if not shorthelp:
+ continue
+ if proto.endswith("://"):
+ protl.append(add_string(proto, shorthelp, 79))
+ else:
+ decl.append(add_string(proto, shorthelp, 79))
+
+
+ out = "URL Identifiers\n\n" + \
+ "Supported URL prefixes::\n\n " + \
+ ' '.join(protl)
+
+ if len(decl):
+ out += "\nSupported modifiers::\n\n " + \
+ ' '.join(decl)
+
+ out += """\
+\nBazaar supports all of the standard parts within the URL::
+
+ <protocol>://[user[:password]@]host[:port]/[path]
+
+allowing URLs such as::
+
+ http://bzruser:BadPass@bzr.example.com:8080/bzr/trunk
+
+For bzr+ssh:// and sftp:// URLs, Bazaar also supports paths that begin
+with '~' as meaning that the rest of the path should be interpreted
+relative to the remote user's home directory. For example if the user
+``remote`` has a home directory of ``/home/remote`` on the server
+shell.example.com, then::
+
+ bzr+ssh://remote@shell.example.com/~/myproject/trunk
+
+would refer to ``/home/remote/myproject/trunk``.
+
+Many commands that accept URLs also accept location aliases too.
+See :doc:`location-alias-help` and :doc:`url-special-chars-help`.
+"""
+
+ return out
+
+
+_basic_help = \
+"""Bazaar %s -- a free distributed version-control tool
+http://bazaar.canonical.com/
+
+Basic commands:
+ bzr init makes this directory a versioned branch
+ bzr branch make a copy of another branch
+
+ bzr add make files or directories versioned
+ bzr ignore ignore a file or pattern
+ bzr mv move or rename a versioned file
+
+ bzr status summarize changes in working copy
+ bzr diff show detailed diffs
+
+ bzr merge pull in changes from another branch
+ bzr commit save some or all changes
+ bzr send send changes via email
+
+ bzr log show history of changes
+ bzr check validate storage
+
+ bzr help init more help on e.g. init command
+ bzr help commands list all commands
+ bzr help topics list all help topics
+""" % bzrlib.__version__
+
+
+_global_options = \
+"""Global Options
+
+These options may be used with any command, and may appear in front of any
+command. (e.g. ``bzr --profile help``).
+
+--version Print the version number. Must be supplied before the command.
+--no-aliases Do not process command aliases when running this command.
+--builtin Use the built-in version of a command, not the plugin version.
+ This does not suppress other plugin effects.
+--no-plugins Do not process any plugins.
+--no-l10n Do not translate messages.
+--concurrency Number of processes that can be run concurrently (selftest).
+
+--profile Profile execution using the hotshot profiler.
+--lsprof Profile execution using the lsprof profiler.
+--lsprof-file Profile execution using the lsprof profiler, and write the
+ results to a specified file. If the filename ends with ".txt",
+ text format will be used. If the filename either starts with
+ "callgrind.out" or end with ".callgrind", the output will be
+ formatted for use with KCacheGrind. Otherwise, the output
+ will be a pickle.
+--coverage Generate line coverage report in the specified directory.
+
+-Oname=value Override the ``name`` config option setting it to ``value`` for
+ the duration of the command. This can be used multiple times if
+ several options need to be overridden.
+
+See http://doc.bazaar.canonical.com/developers/profiling.html for more
+information on profiling.
+
+A number of debug flags are also available to assist troubleshooting and
+development. See :doc:`debug-flags-help`.
+"""
+
+_standard_options = \
+"""Standard Options
+
+Standard options are legal for all commands.
+
+--help, -h Show help message.
+--verbose, -v Display more information.
+--quiet, -q Only display errors and warnings.
+
+Unlike global options, standard options can be used in aliases.
+"""
+
+
+_checkouts = \
+"""Checkouts
+
+Checkouts are source trees that are connected to a branch, so that when
+you commit in the source tree, the commit goes into that branch. They
+allow you to use a simpler, more centralized workflow, ignoring some of
+Bazaar's decentralized features until you want them. Using checkouts
+with shared repositories is very similar to working with SVN or CVS, but
+doesn't have the same restrictions. And using checkouts still allows
+others working on the project to use whatever workflow they like.
+
+A checkout is created with the bzr checkout command (see "help checkout").
+You pass it a reference to another branch, and it will create a local copy
+for you that still contains a reference to the branch you created the
+checkout from (the master branch). Then if you make any commits they will be
+made on the other branch first. This creates an instant mirror of your work, or
+facilitates lockstep development, where each developer is working together,
+continuously integrating the changes of others.
+
+However the checkout is still a first class branch in Bazaar terms, so that
+you have the full history locally. As you have a first class branch you can
+also commit locally if you want, for instance due to the temporary loss af a
+network connection. Use the --local option to commit to do this. All the local
+commits will then be made on the master branch the next time you do a non-local
+commit.
+
+If you are using a checkout from a shared branch you will periodically want to
+pull in all the changes made by others. This is done using the "update"
+command. The changes need to be applied before any non-local commit, but
+Bazaar will tell you if there are any changes and suggest that you use this
+command when needed.
+
+It is also possible to create a "lightweight" checkout by passing the
+--lightweight flag to checkout. A lightweight checkout is even closer to an
+SVN checkout in that it is not a first class branch, it mainly consists of the
+working tree. This means that any history operations must query the master
+branch, which could be slow if a network connection is involved. Also, as you
+don't have a local branch, then you cannot commit locally.
+
+Lightweight checkouts work best when you have fast reliable access to the
+master branch. This means that if the master branch is on the same disk or LAN
+a lightweight checkout will be faster than a heavyweight one for any commands
+that modify the revision history (as only one copy of the branch needs to
+be updated). Heavyweight checkouts will generally be faster for any command
+that uses the history but does not change it, but if the master branch is on
+the same disk then there won't be a noticeable difference.
+
+Another possible use for a checkout is to use it with a treeless repository
+containing your branches, where you maintain only one working tree by
+switching the master branch that the checkout points to when you want to
+work on a different branch.
+
+Obviously to commit on a checkout you need to be able to write to the master
+branch. This means that the master branch must be accessible over a writeable
+protocol , such as sftp://, and that you have write permissions at the other
+end. Checkouts also work on the local file system, so that all that matters is
+file permissions.
+
+You can change the master of a checkout by using the "switch" command (see
+"help switch"). This will change the location that the commits are sent to.
+The "bind" command can also be used to turn a normal branch into a heavy
+checkout. If you would like to convert your heavy checkout into a normal
+branch so that every commit is local, you can use the "unbind" command. To see
+whether or not a branch is bound or not you can use the "info" command. If the
+branch is bound it will tell you the location of the bound branch.
+
+Related commands::
+
+ checkout Create a checkout. Pass --lightweight to get a lightweight
+ checkout
+ update Pull any changes in the master branch in to your checkout
+ commit Make a commit that is sent to the master branch. If you have
+ a heavy checkout then the --local option will commit to the
+ checkout without sending the commit to the master
+ switch Change the master branch that the commits in the checkout will
+ be sent to
+ bind Turn a standalone branch into a heavy checkout so that any
+ commits will be sent to the master branch
+ unbind Turn a heavy checkout into a standalone branch so that any
+ commits are only made locally
+ info Displays whether a branch is bound or unbound. If the branch is
+ bound, then it will also display the location of the bound branch
+"""
+
+_repositories = \
+"""Repositories
+
+Repositories in Bazaar are where committed information is stored. There is
+a repository associated with every branch.
+
+Repositories are a form of database. Bzr will usually maintain this for
+good performance automatically, but in some situations (e.g. when doing
+very many commits in a short time period) you may want to ask bzr to
+optimise the database indices. This can be done by the 'bzr pack' command.
+
+By default just running 'bzr init' will create a repository within the new
+branch but it is possible to create a shared repository which allows multiple
+branches to share their information in the same location. When a new branch is
+created it will first look to see if there is a containing shared repository it
+can use.
+
+When two branches of the same project share a repository, there is
+generally a large space saving. For some operations (e.g. branching
+within the repository) this translates in to a large time saving.
+
+To create a shared repository use the init-repository command (or the alias
+init-repo). This command takes the location of the repository to create. This
+means that 'bzr init-repository repo' will create a directory named 'repo',
+which contains a shared repository. Any new branches that are created in this
+directory will then use it for storage.
+
+It is a good idea to create a repository whenever you might create more
+than one branch of a project. This is true for both working areas where you
+are doing the development, and any server areas that you use for hosting
+projects. In the latter case, it is common to want branches without working
+trees. Since the files in the branch will not be edited directly there is no
+need to use up disk space for a working tree. To create a repository in which
+the branches will not have working trees pass the '--no-trees' option to
+'init-repository'.
+
+Related commands::
+
+ init-repository Create a shared repository. Use --no-trees to create one
+ in which new branches won't get a working tree.
+"""
+
+
+_working_trees = \
+"""Working Trees
+
+A working tree is the contents of a branch placed on disk so that you can
+see the files and edit them. The working tree is where you make changes to a
+branch, and when you commit the current state of the working tree is the
+snapshot that is recorded in the commit.
+
+When you push a branch to a remote system, a working tree will not be
+created. If one is already present the files will not be updated. The
+branch information will be updated and the working tree will be marked
+as out-of-date. Updating a working tree remotely is difficult, as there
+may be uncommitted changes or the update may cause content conflicts that are
+difficult to deal with remotely.
+
+If you have a branch with no working tree you can use the 'checkout' command
+to create a working tree. If you run 'bzr checkout .' from the branch it will
+create the working tree. If the branch is updated remotely, you can update the
+working tree by running 'bzr update' in that directory.
+
+If you have a branch with a working tree that you do not want the 'remove-tree'
+command will remove the tree if it is safe. This can be done to avoid the
+warning about the remote working tree not being updated when pushing to the
+branch. It can also be useful when working with a '--no-trees' repository
+(see 'bzr help repositories').
+
+If you want to have a working tree on a remote machine that you push to you
+can either run 'bzr update' in the remote branch after each push, or use some
+other method to update the tree during the push. There is an 'rspush' plugin
+that will update the working tree using rsync as well as doing a push. There
+is also a 'push-and-update' plugin that automates running 'bzr update' via SSH
+after each push.
+
+Useful commands::
+
+ checkout Create a working tree when a branch does not have one.
+ remove-tree Removes the working tree from a branch when it is safe to do so.
+ update When a working tree is out of sync with its associated branch
+ this will update the tree to match the branch.
+"""
+
+
+_branches = \
+"""Branches
+
+A branch consists of the state of a project, including all of its
+history. All branches have a repository associated (which is where the
+branch history is stored), but multiple branches may share the same
+repository (a shared repository). Branches can be copied and merged.
+
+In addition, one branch may be bound to another one. Binding to another
+branch indicates that commits which happen in this branch must also
+happen in the other branch. Bazaar ensures consistency by not allowing
+commits when the two branches are out of date. In order for a commit
+to succeed, it may be necessary to update the current branch using
+``bzr update``.
+
+Related commands::
+
+ init Change a directory into a versioned branch.
+ branch Create a new branch that is a copy of an existing branch.
+ merge Perform a three-way merge.
+ bind Bind a branch to another one.
+"""
+
+
+_standalone_trees = \
+"""Standalone Trees
+
+A standalone tree is a working tree with an associated repository. It
+is an independently usable branch, with no dependencies on any other.
+Creating a standalone tree (via bzr init) is the quickest way to put
+an existing project under version control.
+
+Related Commands::
+
+ init Make a directory into a versioned branch.
+"""
+
+
+_status_flags = \
+"""Status Flags
+
+Status flags are used to summarise changes to the working tree in a concise
+manner. They are in the form::
+
+ xxx <filename>
+
+where the columns' meanings are as follows.
+
+Column 1 - versioning/renames::
+
+ + File versioned
+ - File unversioned
+ R File renamed
+ ? File unknown
+ X File nonexistent (and unknown to bzr)
+ C File has conflicts
+ P Entry for a pending merge (not a file)
+
+Column 2 - contents::
+
+ N File created
+ D File deleted
+ K File kind changed
+ M File modified
+
+Column 3 - execute::
+
+ * The execute bit was changed
+"""
+
+
+known_env_variables = [
+ ("BZRPATH", "Path where bzr is to look for shell plugin external commands."),
+ ("BZR_EMAIL", "E-Mail address of the user. Overrides EMAIL."),
+ ("EMAIL", "E-Mail address of the user."),
+ ("BZR_EDITOR", "Editor for editing commit messages. Overrides EDITOR."),
+ ("EDITOR", "Editor for editing commit messages."),
+ ("BZR_PLUGIN_PATH", "Paths where bzr should look for plugins."),
+ ("BZR_DISABLE_PLUGINS", "Plugins that bzr should not load."),
+ ("BZR_PLUGINS_AT", "Plugins to load from a directory not in BZR_PLUGIN_PATH."),
+ ("BZR_HOME", "Directory holding .bazaar config dir. Overrides HOME."),
+ ("BZR_HOME (Win32)", "Directory holding bazaar config dir. Overrides APPDATA and HOME."),
+ ("BZR_REMOTE_PATH", "Full name of remote 'bzr' command (for bzr+ssh:// URLs)."),
+ ("BZR_SSH", "Path to SSH client, or one of paramiko, openssh, sshcorp, plink or lsh."),
+ ("BZR_LOG", "Location of .bzr.log (use '/dev/null' to suppress log)."),
+ ("BZR_LOG (Win32)", "Location of .bzr.log (use 'NUL' to suppress log)."),
+ ("BZR_COLUMNS", "Override implicit terminal width."),
+ ("BZR_CONCURRENCY", "Number of processes that can be run concurrently (selftest)"),
+ ("BZR_PROGRESS_BAR", "Override the progress display. Values are 'none' or 'text'."),
+ ("BZR_PDB", "Control whether to launch a debugger on error."),
+ ("BZR_SIGQUIT_PDB", "Control whether SIGQUIT behaves normally or invokes a breakin debugger."),
+ ("BZR_TEXTUI_INPUT", "Force console input mode for prompts to line-based (instead of char-based)."),
+ ]
+
+def _env_variables(topic):
+ import textwrap
+ ret = ["Environment Variables\n\n"]
+ max_key_len = max([len(k[0]) for k in known_env_variables])
+ desc_len = (80 - max_key_len - 2)
+ ret.append("=" * max_key_len + " " + "=" * desc_len + "\n")
+ for k, desc in known_env_variables:
+ ret.append(k + (max_key_len + 1 - len(k)) * " ")
+ ret.append("\n".join(textwrap.wrap(
+ desc, width=desc_len, subsequent_indent=" " * (max_key_len + 1))))
+ ret.append("\n")
+ ret += "=" * max_key_len + " " + "=" * desc_len + "\n"
+ return "".join(ret)
+
+_files = \
+r"""Files
+
+:On Unix: ~/.bazaar/bazaar.conf
+:On Windows: C:\\Documents and Settings\\username\\Application Data\\bazaar\\2.0\\bazaar.conf
+
+Contains the user's default configuration. The section ``[DEFAULT]`` is
+used to define general configuration that will be applied everywhere.
+The section ``[ALIASES]`` can be used to create command aliases for
+commonly used options.
+
+A typical config file might look something like::
+
+ [DEFAULT]
+ email=John Doe <jdoe@isp.com>
+
+ [ALIASES]
+ commit = commit --strict
+ log10 = log --short -r -10..-1
+"""
+
+_criss_cross = \
+"""Criss-Cross
+
+A criss-cross in the branch history can cause the default merge technique
+to emit more conflicts than would normally be expected.
+
+In complex merge cases, ``bzr merge --lca`` or ``bzr merge --weave`` may give
+better results. You may wish to ``bzr revert`` the working tree and merge
+again. Alternatively, use ``bzr remerge`` on particular conflicted files.
+
+Criss-crosses occur in a branch's history if two branches merge the same thing
+and then merge one another, or if two branches merge one another at the same
+time. They can be avoided by having each branch only merge from or into a
+designated central branch (a "star topology").
+
+Criss-crosses cause problems because of the way merge works. Bazaar's default
+merge is a three-way merger; in order to merge OTHER into THIS, it must
+find a basis for comparison, BASE. Using BASE, it can determine whether
+differences between THIS and OTHER are due to one side adding lines, or
+from another side removing lines.
+
+Criss-crosses mean there is no good choice for a base. Selecting the recent
+merge points could cause one side's changes to be silently discarded.
+Selecting older merge points (which Bazaar does) mean that extra conflicts
+are emitted.
+
+The ``weave`` merge type is not affected by this problem because it uses
+line-origin detection instead of a basis revision to determine the cause of
+differences.
+"""
+
+_branches_out_of_sync = """Branches Out of Sync
+
+When reconfiguring a checkout, tree or branch into a lightweight checkout,
+a local branch must be destroyed. (For checkouts, this is the local branch
+that serves primarily as a cache.) If the branch-to-be-destroyed does not
+have the same last revision as the new reference branch for the lightweight
+checkout, data could be lost, so Bazaar refuses.
+
+How you deal with this depends on *why* the branches are out of sync.
+
+If you have a checkout and have done local commits, you can get back in sync
+by running "bzr update" (and possibly "bzr commit").
+
+If you have a branch and the remote branch is out-of-date, you can push
+the local changes using "bzr push". If the local branch is out of date, you
+can do "bzr pull". If both branches have had changes, you can merge, commit
+and then push your changes. If you decide that some of the changes aren't
+useful, you can "push --overwrite" or "pull --overwrite" instead.
+"""
+
+
+_storage_formats = \
+"""Storage Formats
+
+To ensure that older clients do not access data incorrectly,
+Bazaar's policy is to introduce a new storage format whenever
+new features requiring new metadata are added. New storage
+formats may also be introduced to improve performance and
+scalability.
+
+The newest format, 2a, is highly recommended. If your
+project is not using 2a, then you should suggest to the
+project owner to upgrade.
+
+
+.. note::
+
+ Some of the older formats have two variants:
+ a plain one and a rich-root one. The latter include an additional
+ field about the root of the tree. There is no performance cost
+ for using a rich-root format but you cannot easily merge changes
+ from a rich-root format into a plain format. As a consequence,
+ moving a project to a rich-root format takes some co-ordination
+ in that all contributors need to upgrade their repositories
+ around the same time. 2a and all future formats will be
+ implicitly rich-root.
+
+See :doc:`current-formats-help` for the complete list of
+currently supported formats. See :doc:`other-formats-help` for
+descriptions of any available experimental and deprecated formats.
+"""
+
+
+# Register help topics
+topic_registry.register("revisionspec", _help_on_revisionspec,
+ "Explain how to use --revision")
+topic_registry.register('basic', _basic_help, "Basic commands", SECT_HIDDEN)
+topic_registry.register('topics', _help_on_topics, "Topics list", SECT_HIDDEN)
+def get_current_formats_topic(topic):
+ from bzrlib import controldir
+ return "Current Storage Formats\n\n" + \
+ controldir.format_registry.help_topic(topic)
+def get_other_formats_topic(topic):
+ from bzrlib import controldir
+ return "Other Storage Formats\n\n" + \
+ controldir.format_registry.help_topic(topic)
+topic_registry.register('current-formats', get_current_formats_topic,
+ 'Current storage formats')
+topic_registry.register('other-formats', get_other_formats_topic,
+ 'Experimental and deprecated storage formats')
+topic_registry.register('standard-options', _standard_options,
+ 'Options that can be used with any command')
+topic_registry.register('global-options', _global_options,
+ 'Options that control how Bazaar runs')
+topic_registry.register('urlspec', _help_on_transport,
+ "Supported transport protocols")
+topic_registry.register('status-flags', _status_flags,
+ "Help on status flags")
+def get_bugs_topic(topic):
+ from bzrlib import bugtracker
+ return ("Bug Tracker Settings\n\n" +
+ bugtracker.tracker_registry.help_topic(topic))
+topic_registry.register('bugs', get_bugs_topic, 'Bug tracker settings')
+topic_registry.register('env-variables', _env_variables,
+ 'Environment variable names and values')
+topic_registry.register('files', _files,
+ 'Information on configuration and log files')
+topic_registry.register_lazy('hooks', 'bzrlib.hooks', 'hooks_help_text',
+ 'Points at which custom processing can be added')
+topic_registry.register_lazy('location-alias', 'bzrlib.directory_service',
+ 'AliasDirectory.help_text',
+ 'Aliases for remembered locations')
+
+# Load some of the help topics from files. Note that topics which reproduce API
+# details will tend to skew (quickly usually!) so please seek other solutions
+# for such things.
+topic_registry.register('authentication', _load_from_file,
+ 'Information on configuring authentication')
+topic_registry.register('configuration', _load_from_file,
+ 'Details on the configuration settings available')
+topic_registry.register('conflict-types', _load_from_file,
+ 'Types of conflicts and what to do about them')
+topic_registry.register('debug-flags', _load_from_file,
+ 'Options to show or record debug information')
+topic_registry.register('log-formats', _load_from_file,
+ 'Details on the logging formats available')
+topic_registry.register('url-special-chars', _load_from_file,
+ 'Special character handling in URLs')
+
+
+# Register concept topics.
+# Note that we might choose to remove these from the online help in the
+# future or implement them via loading content from files. In the meantime,
+# please keep them concise.
+topic_registry.register('branches', _branches,
+ 'Information on what a branch is', SECT_CONCEPT)
+topic_registry.register('checkouts', _checkouts,
+ 'Information on what a checkout is', SECT_CONCEPT)
+topic_registry.register('content-filters', _load_from_file,
+ 'Conversion of content into/from working trees',
+ SECT_CONCEPT)
+topic_registry.register('diverged-branches', _load_from_file,
+ 'How to fix diverged branches',
+ SECT_CONCEPT)
+topic_registry.register('eol', _load_from_file,
+ 'Information on end-of-line handling',
+ SECT_CONCEPT)
+topic_registry.register('formats', _storage_formats,
+ 'Information on choosing a storage format',
+ SECT_CONCEPT)
+topic_registry.register('patterns', _load_from_file,
+ 'Information on the pattern syntax',
+ SECT_CONCEPT)
+topic_registry.register('repositories', _repositories,
+ 'Basic information on shared repositories.',
+ SECT_CONCEPT)
+topic_registry.register('rules', _load_from_file,
+ 'Information on defining rule-based preferences',
+ SECT_CONCEPT)
+topic_registry.register('standalone-trees', _standalone_trees,
+ 'Information on what a standalone tree is',
+ SECT_CONCEPT)
+topic_registry.register('working-trees', _working_trees,
+ 'Information on working trees', SECT_CONCEPT)
+topic_registry.register('criss-cross', _criss_cross,
+ 'Information on criss-cross merging', SECT_CONCEPT)
+topic_registry.register('sync-for-reconfigure', _branches_out_of_sync,
+ 'Steps to resolve "out-of-sync" when reconfiguring',
+ SECT_CONCEPT)
+
+
+class HelpTopicIndex(object):
+ """A index for bzr help that returns topics."""
+
+ def __init__(self):
+ self.prefix = ''
+
+ def get_topics(self, topic):
+ """Search for topic in the HelpTopicRegistry.
+
+ :param topic: A topic to search for. None is treated as 'basic'.
+ :return: A list which is either empty or contains a single
+ RegisteredTopic entry.
+ """
+ if topic is None:
+ topic = 'basic'
+ if topic in topic_registry:
+ return [RegisteredTopic(topic)]
+ else:
+ return []
+
+
+def _format_see_also(see_also):
+ result = ''
+ if see_also:
+ result += '\n:See also: '
+ result += ', '.join(sorted(set(see_also)))
+ result += '\n'
+ return result
+
+
+class RegisteredTopic(object):
+ """A help topic which has been registered in the HelpTopicRegistry.
+
+ These topics consist of nothing more than the name of the topic - all
+ data is retrieved on demand from the registry.
+ """
+
+ def __init__(self, topic):
+ """Constructor.
+
+ :param topic: The name of the topic that this represents.
+ """
+ self.topic = topic
+
+ def get_help_text(self, additional_see_also=None, plain=True):
+ """Return a string with the help for this topic.
+
+ :param additional_see_also: Additional help topics to be
+ cross-referenced.
+ :param plain: if False, raw help (reStructuredText) is
+ returned instead of plain text.
+ """
+ result = topic_registry.get_detail(self.topic)
+ result += _format_see_also(additional_see_also)
+ if plain:
+ result = help_as_plain_text(result)
+ i18n.install()
+ result = i18n.gettext_per_paragraph(result)
+ return result
+
+ def get_help_topic(self):
+ """Return the help topic this can be found under."""
+ return self.topic
+
+
+def help_as_plain_text(text):
+ """Minimal converter of reStructuredText to plain text."""
+ import re
+ # Remove the standalone code block marker
+ text = re.sub(r"(?m)^\s*::\n\s*$", "", text)
+ lines = text.splitlines()
+ result = []
+ for line in lines:
+ if line.startswith(':'):
+ line = line[1:]
+ elif line.endswith('::'):
+ line = line[:-1]
+ # Map :doc:`xxx-help` to ``bzr help xxx``
+ line = re.sub(":doc:`(.+?)-help`", r'``bzr help \1``', line)
+ result.append(line)
+ return "\n".join(result) + "\n"
+
+
+class ConfigOptionHelpIndex(object):
+ """A help index that returns help topics for config options."""
+
+ def __init__(self):
+ self.prefix = 'configuration/'
+
+ def get_topics(self, topic):
+ """Search for topic in the registered config options.
+
+ :param topic: A topic to search for.
+ :return: A list which is either empty or contains a single
+ config.Option entry.
+ """
+ if topic is None:
+ return []
+ elif topic.startswith(self.prefix):
+ topic = topic[len(self.prefix):]
+ if topic in config.option_registry:
+ return [config.option_registry.get(topic)]
+ else:
+ return []
+
+
diff --git a/bzrlib/help_topics/en/authentication.txt b/bzrlib/help_topics/en/authentication.txt
new file mode 100644
index 0000000..805093c
--- /dev/null
+++ b/bzrlib/help_topics/en/authentication.txt
@@ -0,0 +1,226 @@
+Authentication Settings
+=======================
+
+
+Intent
+------
+
+Many different authentication policies can be described in the
+``authentication.conf`` file but a particular user should need only a few
+definitions to cover his needs without having to specify a user and a password
+for every branch he uses.
+
+The definitions found in this file are used to find the credentials to use for
+a given url. The same credentials can generally be used for as many branches as
+possible by grouping their declaration around the remote servers that need
+them. It's even possible to declare credentials that will be used by different
+servers.
+
+The intent is to make this file as small as possible to minimize maintenance.
+
+Once the relevant credentials are declared in this file you may use branch urls
+without embedding passwords (security hazard) or even users (enabling sharing
+of your urls with others).
+
+Instead of using::
+
+ bzr branch ftp://joe:secret@host.com/path/to/my/branch
+
+you simply use::
+
+ bzr branch ftp://host.com/path/to/my/branch
+
+provided you have created the following ``authentication.conf`` file::
+
+ [myprojects]
+ scheme=ftp
+ host=host.com
+ user=joe
+ password=secret
+
+
+Authentication definitions
+--------------------------
+
+There are two kinds of authentication used by the various schemes supported by
+bzr:
+
+1. user and password
+
+``FTP`` needs a (``user``, ``password``) to authenticate against a ``host``
+``SFTP`` can use either a password or a host key to authenticate. However,
+ssh agents are a better, more secure solution. So we have chosen to not provide
+our own less secure method.
+
+2. user, realm and password
+
+``HTTP`` and ``HTTPS`` needs a (``user, realm, password``) to authenticate
+against a host. But, by using ``.htaccess`` files, for example, it is possible
+to define several (``user, realm, password``) for a given ``host``. So what is
+really needed is (``user``, ``password``, ``host``, ``path``). The ``realm`` is
+not taken into account in the definitions, but will displayed if bzr prompts
+you for a password.
+
+``HTTP proxy`` can be handled as ``HTTP`` (or ``HTTPS``) by explicitly
+specifying the appropriate port.
+
+To take all schemes into account, the password will be deduced from a set of
+authentication definitions (``scheme``, ``host``, ``port``, ``path``, ``user``,
+``password``).
+
+ * ``scheme``: can be empty (meaning the rest of the definition can be used
+ for any scheme), ``SFTP`` and ``bzr+ssh`` should not be used here, ``ssh``
+ should be used instead since this is the real scheme regarding
+ authentication,
+
+ * ``host``: can be empty (to act as a default for any host),
+
+ * ``port`` can be empty (useful when an host provides several servers for the
+ same scheme), only numerical values are allowed, this should be used only
+ when the server uses a port different than the scheme standard port,
+
+ * ``path``: can be empty (FTP or SFTP will never user it),
+
+ * ``user``: can be empty (``bzr`` will defaults to python's
+ ``getpass.get_user()``),
+
+ * ``password``: can be empty if you prefer to always be prompted for your
+ password.
+
+Multiple definitions can be provided and, for a given URL, bzr will select a
+(``user`` [, ``password``]) based on the following rules :
+
+ 1. the first match wins,
+
+ 2. empty fields match everything,
+
+ 3. ``scheme`` matches even if decorators are used in the requested URL,
+
+ 4. ``host`` matches exactly or act as a domain if it starts with '.'
+ (``project.bzr.sf.net`` will match ``.bzr.sf.net`` but ``projectbzr.sf.net``
+ will not match ``bzr.sf.net``).
+
+ 5. ``port`` matches if included in the requested URL (exact matches only)
+
+ 6. ``path`` matches if included in the requested URL (and by rule #2 above,
+ empty paths will match any provided path).
+
+
+
+File format
+-----------
+
+The general rules for :doc:`configuration files <configuration-help>`
+apply except for the variable policies.
+
+Each section describes an authentication definition.
+
+The section name is an arbitrary string, only the ``DEFAULT`` value is reserved
+and should appear as the *last* section.
+
+Each section should define:
+
+* ``user``: the login to be used,
+
+Each section could define:
+
+* ``host``: the remote server,
+
+* ``port``: the port the server is listening,
+
+* ``path``: the branch location,
+
+* ``password``: the password.
+
+
+Examples
+--------
+
+
+Personal projects hosted outside
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+All connections are done with the same ``user`` (the remote one for which the
+default bzr one is not appropriate) and the password is always prompted with
+some exceptions::
+
+ # Pet projects on hobby.net
+ [hobby]
+ host=r.hobby.net
+ user=jim
+ password=obvious1234
+
+ # Home server
+ [home]
+ scheme=https
+ host=home.net
+ user=joe
+ password=1essobV10us
+
+ [DEFAULT]
+ # Our local user is barbaz, on all remote sites we're known as foobar
+ user=foobar
+
+
+Source hosting provider
+~~~~~~~~~~~~~~~~~~~~~~~
+
+In the shp.net (fictitious) domain, each project has its own site::
+
+ [shpnet domain]
+ # we use sftp, but ssh is the scheme used for authentication
+ scheme=ssh
+ # The leading '.' ensures that 'shp.net' alone doesn't match
+ host=.shp.net
+ user=joe
+ # bzr don't support supplying a password for sftp,
+ # consider using an ssh agent if you don't want to supply
+ # a password interactively. (pageant, ssh-agent, etc)
+
+HTTPS, SFTP servers and their proxy
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+At company.com, the server hosting release and integration branches is behind a
+proxy, and the two branches use different authentication policies::
+
+ [reference code]
+ scheme=https
+ host=dev.company.com
+ path=/dev
+ user=user1
+ password=pass1
+
+ # development branches on dev server
+ [dev]
+ scheme=ssh # bzr+ssh and sftp are available here
+ host=dev.company.com
+ path=/dev/integration
+ user=user2
+
+ # proxy
+ [proxy]
+ scheme=http
+ host=proxy.company.com
+ port=3128
+ user=proxyuser1
+ password=proxypass1
+
+
+Planned enhancements
+--------------------
+
+The following are not yet implemented but planned as parts of a work in
+progress:
+
+* add a ``password_encoding`` field allowing:
+
+ - storing the passwords in various obfuscating encodings (base64 for one),
+
+ - delegate password storage to plugins (.netrc for example).
+
+* update the credentials when the user is prompted for user or password,
+
+* add a ``verify_certificates`` field for ``HTTPS``.
+
+The ``password_encoding`` and ``verify_certificates`` fields are recognized but
+ignored in the actual implementation.
diff --git a/bzrlib/help_topics/en/configuration.txt b/bzrlib/help_topics/en/configuration.txt
new file mode 100644
index 0000000..ab473bd
--- /dev/null
+++ b/bzrlib/help_topics/en/configuration.txt
@@ -0,0 +1,724 @@
+Configuration Settings
+=======================
+
+Environment settings
+---------------------
+
+While most configuration is handled by configuration files, some options
+which may be semi-permanent can also be controlled through the environment.
+
+BZR_EMAIL
+~~~~~~~~~
+
+Override the email id used by Bazaar. Typical format::
+
+ "John Doe <jdoe@example.com>"
+
+See also the ``email`` configuration option.
+
+BZR_PROGRESS_BAR
+~~~~~~~~~~~~~~~~
+
+Override the progress display. Possible values are "none" or "text". If
+the value is "none" then no progress bar is displayed. The value "text" draws
+the ordinary command line progress bar.
+
+BZR_SIGQUIT_PDB
+~~~~~~~~~~~~~~~
+
+Control whether SIGQUIT behaves normally or invokes a breakin debugger.
+
+* 0 = Standard SIGQUIT behavior (normally, exit with a core dump)
+* 1 = Invoke breakin debugger (default)
+
+BZR_HOME
+~~~~~~~~
+
+Override the home directory used by Bazaar.
+
+BZR_SSH
+~~~~~~~
+
+Select a different SSH implementation.
+
+BZR_PDB
+~~~~~~~
+
+Control whether to launch a debugger on error.
+
+* 0 = Standard behavior
+* 1 = Launch debugger
+
+BZR_REMOTE_PATH
+~~~~~~~~~~~~~~~
+
+Path to the Bazaar executable to use when using the bzr+ssh protocol.
+
+See also the ``bzr_remote_path`` configuration option.
+
+BZR_EDITOR
+~~~~~~~~~~
+
+Path to the editor Bazaar should use for commit messages, etc.
+
+BZR_LOG
+~~~~~~~
+
+Location of the Bazaar log file. You can check the current location by
+running ``bzr version``.
+
+The log file contains debug information that is useful for diagnosing or
+reporting problems with Bazaar.
+
+Setting this to ``NUL`` on Windows or ``/dev/null`` on other platforms
+will disable logging.
+
+
+BZR_PLUGIN_PATH
+~~~~~~~~~~~~~~~
+
+The path to the plugins directory that Bazaar should use.
+If not set, Bazaar will search for plugins in:
+
+* the user specific plugin directory (containing the ``user`` plugins),
+
+* the bzrlib directory (containing the ``core`` plugins),
+
+* the site specific plugin directory if applicable (containing
+ the ``site`` plugins).
+
+If ``BZR_PLUGIN_PATH`` is set in any fashion, it will change the
+the way the plugin are searched.
+
+As for the ``PATH`` variables, if multiple directories are
+specified in ``BZR_PLUGIN_PATH`` they should be separated by the
+platform specific appropriate character (':' on Unix,
+';' on windows)
+
+By default if ``BZR_PLUGIN_PATH`` is set, it replaces searching
+in ``user``. However it will continue to search in ``core`` and
+``site`` unless they are explicitly removed.
+
+If you need to change the order or remove one of these
+directories, you should use special values:
+
+* ``-user``, ``-core``, ``-site`` will remove the corresponding
+ path from the default values,
+
+* ``+user``, ``+core``, ``+site`` will add the corresponding path
+ before the remaining default values (and also remove it from
+ the default values).
+
+Note that the special values 'user', 'core' and 'site' should be
+used literally, they will be substituted by the corresponding,
+platform specific, values.
+
+The examples below use ':' as the separator, windows users
+should use ';'.
+
+Overriding the default user plugin directory::
+
+ BZR_PLUGIN_PATH='/path/to/my/other/plugins'
+
+Disabling the site directory while retaining the user directory::
+
+ BZR_PLUGIN_PATH='-site:+user'
+
+Disabling all plugins (better achieved with --no-plugins)::
+
+ BZR_PLUGIN_PATH='-user:-core:-site'
+
+Overriding the default site plugin directory::
+
+ BZR_PLUGIN_PATH='/path/to/my/site/plugins:-site':+user
+
+BZR_DISABLE_PLUGINS
+~~~~~~~~~~~~~~~~~~~
+
+Under special circumstances (mostly when trying to diagnose a
+bug), it's better to disable a plugin (or several) rather than
+uninstalling them completely. Such plugins can be specified in
+the ``BZR_DISABLE_PLUGINS`` environment variable.
+
+In that case, ``bzr`` will stop loading the specified plugins and
+will raise an import error if they are explicitly imported (by
+another plugin that depends on them for example).
+
+Disabling ``myplugin`` and ``yourplugin`` is achieved by::
+
+ BZR_DISABLE_PLUGINS='myplugin:yourplugin'
+
+BZR_PLUGINS_AT
+~~~~~~~~~~~~~~
+
+When adding a new feature or working on a bug in a plugin,
+developers often need to use a specific version of a given
+plugin. Since python requires that the directory containing the
+code is named like the plugin itself this make it impossible to
+use arbitrary directory names (using a two-level directory scheme
+is inconvenient). ``BZR_PLUGINS_AT`` allows such directories even
+if they don't appear in ``BZR_PLUGIN_PATH`` .
+
+Plugins specified in this environment variable takes precedence
+over the ones in ``BZR_PLUGIN_PATH``.
+
+The variable specified a list of ``plugin_name@plugin path``,
+``plugin_name`` being the name of the plugin as it appears in
+python module paths, ``plugin_path`` being the path to the
+directory containing the plugin code itself
+(i.e. ``plugins/myplugin`` not ``plugins``). Use ':' as the list
+separator, use ';' on windows.
+
+Example:
+~~~~~~~~
+
+Using a specific version of ``myplugin``:
+``BZR_PLUGINS_AT='myplugin@/home/me/bugfixes/123456-myplugin``
+
+BZRPATH
+~~~~~~~
+
+The path where Bazaar should look for shell plugin external commands.
+
+
+http_proxy, https_proxy
+~~~~~~~~~~~~~~~~~~~~~~~
+
+Specifies the network proxy for outgoing connections, for example::
+
+ http_proxy=http://proxy.example.com:3128/
+ https_proxy=http://proxy.example.com:3128/
+
+
+Configuration files
+-------------------
+
+Location
+~~~~~~~~
+
+Configuration files are located in ``$HOME/.bazaar`` on Unix and
+``C:\Documents and Settings\<username>\Application Data\Bazaar\2.0`` on
+Windows. (You can check the location for your system by using
+``bzr version``.)
+
+There are three primary configuration files in this location:
+
+* ``bazaar.conf`` describes default configuration options,
+
+* ``locations.conf`` describes configuration information for
+ specific branch locations,
+
+* ``authentication.conf`` describes credential information for
+ remote servers.
+
+Each branch can also contain a configuration file that sets values specific
+to that branch. This file is found at ``.bzr/branch/branch.conf`` within the
+branch. This file is visible to all users of a branch, if you wish to override
+one of the values for a branch with a setting that is specific to you then you
+can do so in ``locations.conf``.
+
+General format
+~~~~~~~~~~~~~~
+
+An ini file has three types of contructs: section headers, section
+options and comments.
+
+Comments
+^^^^^^^^
+
+A comment is any line that starts with a "#" (sometimes called a "hash
+mark", "pound sign" or "number sign"). Comment lines are ignored by
+Bazaar when parsing ini files.
+
+Section headers
+^^^^^^^^^^^^^^^
+
+A section header is a word enclosed in brackets that starts at the begining
+of a line. A typical section header looks like this::
+
+ [DEFAULT]
+
+The only valid section headers for bazaar.conf currently are [DEFAULT] and
+[ALIASES]. Section headers are case sensitive. The default section provides for
+setting options which can be overridden with the branch config file.
+
+For ``locations.conf``, the options from the section with the
+longest matching section header are used to the exclusion of other
+potentially valid section headers. A section header uses the path for
+the branch as the section header. Some examples include::
+
+ [http://mybranches.isp.com/~jdoe/branchdir]
+ [/home/jdoe/branches/]
+
+
+Section options
+^^^^^^^^^^^^^^^
+
+A section option resides within a section. A section option contains an
+option name, an equals sign and a value. For example::
+
+ email = John Doe <jdoe@isp.com>
+ gpg_signing_key = Amy Pond <amy@example.com>
+
+A option can reference other options by enclosing them in curly brackets::
+
+ my_branch_name = feature_x
+ my_server = bzr+ssh://example.com
+ push_location = {my_server}/project/{my_branch_name}
+
+Option policies
+^^^^^^^^^^^^^^^
+
+Options defined in a section affect the named directory or URL plus
+any locations they contain. Policies can be used to change how an
+option value is interpreted for contained locations. Currently
+there are three policies available:
+
+ none:
+ the value is interpreted the same for contained locations. This is
+ the default behaviour.
+ norecurse:
+ the value is only used for the exact location specified by the
+ section name.
+ appendpath:
+ for contained locations, any additional path components are
+ appended to the value.
+
+Policies are specified by keys with names of the form "<option_name>:policy".
+For example, to define the push location for a tree of branches, the
+following could be used::
+
+ [/top/location]
+ push_location = sftp://example.com/location
+ push_location:policy = appendpath
+
+With this configuration, the push location for ``/top/location/branch1``
+would be ``sftp://example.com/location/branch1``.
+
+Section local options
+^^^^^^^^^^^^^^^^^^^^^
+
+Some options are defined automatically inside a given section and can be
+refered to in this section only.
+
+For example, the ``appendpath`` policy can be used like this::
+
+ [/home/vila/src/bzr/bugs]
+ mypush = lp:~vila/bzr
+ mypush:policy=appendpath
+
+Using ``relpath`` to achieve the same result is done like this::
+
+ [/home/vila/src/bzr/bugs]
+ mypush = lp:~vila/bzr/{relpath}
+
+In both cases, when used in a directory like
+``/home/vila/src/bzr/bugs/832013-expand-in-stack`` we'll get::
+
+ $ bzr config mypush
+ lp:~vila/bzr/832013-expand-in-stack
+
+Another such option is ``basename`` which can be used like this::
+
+ [/home/vila/src/bzr]
+ mypush = lp:~vila/bzr/{basename}
+
+When used in a directory like
+``/home/vila/src/bzr/bugs/832013-expand-in-stack`` we'll get::
+
+ $ bzr config mypush
+ lp:~vila/bzr/832013-expand-in-stack
+
+Note that ``basename`` here refers to the base name of ``relpath`` which
+itself is defined as the relative path between the section name and the
+location it matches.
+
+Another such option is ``branchname``, which refers to the name of a colocated
+branch. For non-colocated branches, it behaves like basename. It can be used
+like this::
+
+ [/home/vila/src/bzr/bugs]
+ mypush = lp:~vila/bzr/{branchname}
+
+When used with a colocated branch named ``832013-expand-in-stack``, we'll get::
+
+ bzr config mypush
+ lp:~vila/bzr/832013-expand-in-stack
+
+When an option is local to a Section, it cannot be referred to from option
+values in any other section from the same ``Store`` nor from any other
+``Store``.
+
+
+The main configuration file, bazaar.conf
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+``bazaar.conf`` allows two sections: ``[DEFAULT]`` and ``[ALIASES]``.
+The default section contains the default
+configuration options for all branches. The default section can be
+overriden by providing a branch-specific section in ``locations.conf``.
+
+A typical ``bazaar.conf`` section often looks like the following::
+
+ [DEFAULT]
+ email = John Doe <jdoe@isp.com>
+ editor = /usr/bin/vim
+ create_signatures = when-required
+
+
+The branch location configuration file, locations.conf
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+``locations.conf`` allows one to specify overriding settings for
+a specific branch. The format is almost identical to the default section in
+bazaar.conf with one significant change: The section header, instead of saying
+default, will be the path to a branch that you wish to override a value
+for. The '?' and '*' wildcards are supported::
+
+ [/home/jdoe/branches/nethack]
+ email = Nethack Admin <nethack@nethack.com>
+
+ [http://hypothetical.site.com/branches/devel-branch]
+ create_signatures = always
+
+The authentication configuration file, authentication.conf
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+``authentication.conf`` allows one to specify credentials for
+remote servers. This can be used for all the supported transports and any part
+of bzr that requires authentication (smtp for example).
+
+The syntax of the file obeys the same rules as the others except for the
+option policies which don't apply.
+
+For more information on the possible uses of the authentication configuration
+file see :doc:`authentication-help`.
+
+
+Common options
+--------------
+
+debug_flags
+~~~~~~~~~~~
+
+A comma-separated list of debugging options to turn on. The same values
+can be used as with the -D command-line option (see `help global-options`).
+For example::
+
+ debug_flags = hpss
+
+or::
+
+ debug_flags = hpss,evil
+
+email
+~~~~~
+
+The email address to use when committing a branch. Typically takes the form
+of::
+
+ email = Full Name <account@hostname.tld>
+
+editor
+~~~~~~
+
+The path of the editor that you wish to use if *bzr commit* is run without
+a commit message. This setting is trumped by the environment variable
+``BZR_EDITOR``, and overrides the ``VISUAL`` and ``EDITOR`` environment
+variables.
+
+log_format
+~~~~~~~~~~
+
+The default log format to use. Standard log formats are ``long``, ``short``
+and ``line``. Additional formats may be provided by plugins. The default
+value is ``long``.
+
+check_signatures
+~~~~~~~~~~~~~~~~
+
+Reserved for future use. These options will allow a policy for branches to
+require signatures.
+
+require
+ The gnupg signature for revisions must be present and must be valid.
+
+ignore
+ Do not check gnupg signatures of revisions.
+
+check-available
+ (default) If gnupg signatures for revisions are present, check them.
+ Bazaar will fail if it finds a bad signature, but will not fail if
+ no signature is present.
+
+create_signatures
+~~~~~~~~~~~~~~~~~
+
+Defines the behaviour of signing revisions on commits. By default bzr will not
+sign new commits.
+
+always
+ Sign every new revision that is committed. If the signing fails then the
+ commit will not be made.
+
+when-required
+ Reserved for future use.
+
+never
+ Reserved for future use.
+
+In future it is planned that ``when-required`` will sign newly
+committed revisions only when the branch requires them. ``never`` will refuse
+to sign newly committed revisions, even if the branch requires signatures.
+
+dirstate.fdatasync
+~~~~~~~~~~~~~~~~~~
+
+If true (default), working tree metadata changes are flushed through the
+OS buffers to physical disk. This is somewhat slower, but means data
+should not be lost if the machine crashes. See also repository.fdatasync.
+
+gpg_signing_key
+~~~~~~~~~~~~~~~
+
+The GnuPG user identity to use when signing commits. Can be an e-mail
+address, key fingerprint or full key ID. When unset or when set to
+"default" Bazaar will use the user e-mail set with ``whoami``.
+
+recurse
+~~~~~~~
+
+Only useful in ``locations.conf``. Defines whether or not the
+configuration for this section applies to subdirectories:
+
+true
+ (default) This section applies to subdirectories as well.
+
+false
+ This section only applies to the branch at this directory and not
+ branches below it.
+
+gpg_signing_command
+~~~~~~~~~~~~~~~~~~~
+
+(Default: "gpg"). Which program should be used to sign and check revisions.
+For example::
+
+ gpg_signing_command = /usr/bin/gnpg
+
+The specified command must accept the options "--clearsign" and "-u <email>".
+
+bzr_remote_path
+~~~~~~~~~~~~~~~
+
+(Default: "bzr"). The path to the command that should be used to run the smart
+server for bzr. This value may only be specified in locations.conf, because:
+
+- it's needed before branch.conf is accessible
+- allowing remote branch.conf files to specify commands would be a security
+ risk
+
+It is overridden by the BZR_REMOTE_PATH environment variable.
+
+smtp_server
+~~~~~~~~~~~
+
+(Default: "localhost"). SMTP server to use when Bazaar needs to send
+email, eg. with ``merge-directive --mail-to``, or the bzr-email plugin.
+
+smtp_username, smtp_password
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+User and password to authenticate to the SMTP server. If smtp_username
+is set, and smtp_password is not, Bazaar will prompt for a password.
+These settings are only needed if the SMTP server requires authentication
+to send mail.
+
+locks.steal_dead
+~~~~~~~~~~~~~~~~
+
+If set to true, bzr will automatically break locks held by processes from
+the same machine and user that are no longer alive. Otherwise, it will
+print a message and you can break the lock manually, if you are satisfied
+the object is no longer in use.
+
+mail_client
+~~~~~~~~~~~
+
+A mail client to use for sending merge requests.
+By default, bzr will try to use ``mapi`` on Windows. On other platforms, it
+will try ``xdg-email``. If either of these fails, it will fall back to
+``editor``.
+
+Supported values for specific clients:
+
+:claws: Use Claws. This skips a dialog for attaching files.
+:evolution: Use Evolution.
+:kmail: Use KMail.
+:mutt: Use Mutt.
+:thunderbird: Use Mozilla Thunderbird or Icedove. For Thunderbird/Icedove 1.5,
+ this works around some bugs that xdg-email doesn't handle.
+
+Supported generic values are:
+
+:default: See above.
+:editor: Use your editor to compose the merge request. This also uses
+ your commit id, (see ``bzr whoami``), smtp_server and (optionally)
+ smtp_username and smtp_password.
+:mapi: Use your preferred e-mail client on Windows.
+:xdg-email: Use xdg-email to run your preferred mail program
+
+repository.fdatasync
+~~~~~~~~~~~~~~~~~~~~
+
+If true (default), repository changes are flushed through the OS buffers
+to physical disk. This is somewhat slower, but means data should not be
+lost if the machine crashes. See also dirstate.fdatasync.
+
+submit_branch
+~~~~~~~~~~~~~
+
+The branch you intend to submit your current work to. This is automatically
+set by ``bzr send``, and is also used by the ``submit:`` revision spec. This
+should usually be set on a per-branch or per-location basis.
+
+public_branch
+~~~~~~~~~~~~~
+
+A publically-accessible version of this branch (implying that this version is
+not publically-accessible). Used (and set) by ``bzr send``.
+
+suppress_warnings
+~~~~~~~~~~~~~~~~~
+
+A list of strings, each string represent a warning that can be emitted by
+bzr. Mentioning a warning in this list tells bzr to not emit it.
+
+Valid values:
+
+* ``format_deprecation``:
+ whether the format deprecation warning is shown on repositories that are
+ using deprecated formats.
+
+default_format
+~~~~~~~~~~~~~~
+
+A format name for the default format used when creating branches. See ``bzr
+help formats`` for possible values.
+
+
+Unicode options
+---------------
+
+output_encoding
+~~~~~~~~~~~~~~~
+
+A Python unicode encoding name for text output from bzr, such as log
+information. Values include: utf8, cp850, ascii, iso-8859-1. The default
+is the terminal encoding prefered by the operating system.
+
+
+Branch type specific options
+----------------------------
+
+These options apply only to branches that use the ``dirstate-tags`` or
+later format. They
+are usually set in ``.bzr/branch/branch.conf`` automatically, but may be
+manually set in ``locations.conf`` or ``bazaar.conf``.
+
+append_revisions_only
+~~~~~~~~~~~~~~~~~~~~~
+
+If set to "True" then revisions can only be appended to the log, not
+removed. A branch with this setting enabled can only pull from another
+branch if the other branch's log is a longer version of its own. This is
+normally set by ``bzr init --append-revisions-only``. If you set it
+manually, use either 'True' or 'False' (case-sensitive) to maintain
+compatibility with previous bzr versions (older than 2.2).
+
+parent_location
+~~~~~~~~~~~~~~~
+
+If present, the location of the default branch for pull or merge. This option
+is normally set when creating a branch, the first ``pull`` or by ``pull
+--remember``.
+
+push_location
+~~~~~~~~~~~~~
+
+If present, the location of the default branch for push. This option
+is normally set by the first ``push`` or ``push --remember``.
+
+push_strict
+~~~~~~~~~~~
+
+If present, defines the ``--strict`` option default value for checking
+uncommitted changes before pushing.
+
+dpush_strict
+~~~~~~~~~~~~
+
+If present, defines the ``--strict`` option default value for checking
+uncommitted changes before pushing into a different VCS without any
+custom bzr metadata.
+
+bound_location
+~~~~~~~~~~~~~~
+
+The location that commits should go to when acting as a checkout.
+This option is normally set by ``bind``.
+
+bound
+~~~~~
+
+If set to "True", the branch should act as a checkout, and push each commit to
+the bound_location. This option is normally set by ``bind``/``unbind``.
+
+send_strict
+~~~~~~~~~~~
+
+If present, defines the ``--strict`` option default value for checking
+uncommitted changes before sending a merge directive.
+
+add.maximum_file_size
+~~~~~~~~~~~~~~~~~~~~~
+
+Defines the maximum file size the command line "add" operation will allow
+in recursive mode, with files larger than this value being skipped. You may
+specify this value as an integer (in which case it is interpreted as bytes),
+or you may specify the value using SI units, i.e. 10KB, 20MB, 1G. A value of 0
+will disable skipping.
+
+External Merge Tools
+--------------------
+
+bzr.mergetool.<name>
+~~~~~~~~~~~~~~~~~~~~
+
+Defines an external merge tool called <name> with the given command-line.
+Arguments containing spaces should be quoted using single or double quotes. The
+executable may omit its path if it can be found on the PATH.
+
+The following markers can be used in the command-line to substitute filenames
+involved in the merge conflict::
+
+ {base} file.BASE
+ {this} file.THIS
+ {other} file.OTHER
+ {result} output file
+ {this_temp} temp copy of file.THIS, used to overwrite output file if merge
+ succeeds.
+
+For example::
+
+ bzr.mergetool.kdiff3 = kdiff3 {base} {this} {other} -o {result}
+
+bzr.default_mergetool
+~~~~~~~~~~~~~~~~~~~~~
+
+Specifies which external merge tool (as defined above) should be selected by
+default in tools such as ``bzr qconflicts``.
+
+For example::
+
+ bzr.default_mergetool = kdiff3
diff --git a/bzrlib/help_topics/en/conflict-types.txt b/bzrlib/help_topics/en/conflict-types.txt
new file mode 100644
index 0000000..7fe037c
--- /dev/null
+++ b/bzrlib/help_topics/en/conflict-types.txt
@@ -0,0 +1,377 @@
+Conflict Types
+==============
+
+Some operations, like merge, revert and pull, modify the contents of your
+working tree. These modifications are programmatically generated, and so they
+may conflict with the current state of your working tree.
+
+When conflicts are present in your working tree (as shown by ``bzr
+conflicts``), you should resolve them and then inform bzr that the conflicts
+have been resolved.
+
+Resolving conflicts is sometimes not obvious. Either because the user that
+should resolve them is not the one responsible for their occurrence, as is the
+case when merging other people's work or because some conflicts are presented
+in a way that is not easy to understand.
+
+Bazaar tries to avoid conflicts ; its aim is to ask you to resolve the
+conflict if and only if there's an actual conceptual conflict in the source
+tree. Because Bazaar doesn't understand the real meaning of the files being
+versioned, it can, when faced with ambiguities, fall short in either direction
+trying to resolve the conflict itself. Many kinds of changes can be combined
+programmatically, but sometimes only a human can determine the right thing to
+do.
+
+When Bazaar generates a conflict, it adds information into the working tree to
+present the conflicting versions, and it's up to you to find the correct
+resolution.
+
+Whatever the conflict is, resolving it is roughly done in two steps:
+
+1. Modify the working tree content so that the conflicted item is now in the
+ state you want to keep, then
+
+2. Inform Bazaar that the conflict is now solved and ask to cleanup any
+ remaining generated information (``bzr resolve <item>``).
+
+For most conflict types, there are some obvious ways to modify the working
+tree and put it into the desired state. For some types of conflicts, Bazaar
+itself already made a choice, when possible.
+
+Yet, whether Bazaar makes a choice or not, there are some other simple but
+different ways to resolve the conflict.
+
+Each type of conflict is explained below, and the action which must be done to
+resolve the conflict is outlined.
+
+Various actions are available depending on the kind of conflict, for some of
+these actions, Bazaar can provide some help. In the end you should at least
+inform Bazaar that you're done with the conflict with::
+
+ ``bzr resolve FILE --action=done'
+
+Note that this is the default action when a single file is involved so you can
+simply use::
+
+ ``bzr resolve FILE``
+
+See ``bzr help resolve`` for more details.
+
+Text conflicts
+--------------
+
+Typical message::
+
+ Text conflict in FILE
+
+These are produced when a text merge cannot completely reconcile two sets of
+text changes. Bazaar will emit files for each version with the extensions
+THIS, OTHER, and BASE. THIS is the version of the file from the target tree,
+i.e. the tree that you are merging changes into. OTHER is the version that you
+are merging into the target. BASE is an older version that is used as a basis
+for comparison.
+
+In the main copy of the file, Bazaar will include all the changes that it
+could reconcile, and any un-reconciled conflicts are surrounded by
+"herringbone" markers like ``<<<<<<<``.
+
+Say the initial text is "The project leader released it.", and THIS modifies it
+to "Martin Pool released it.", while OTHER modifies it to "The project leader
+released Bazaar." A conflict would look like this::
+
+ <<<<<<< TREE
+ Martin Pool released it.
+ =======
+ The project leader released Bazaar.
+ >>>>>>> MERGE-SOURCE
+
+The correct resolution would be "Martin Pool released Bazaar."
+
+You can handle text conflicts either by editing the main copy of the file,
+or by invoking external tools on the THIS, OTHER and BASE versions. It's
+worth mentioning that resolving text conflicts rarely involves picking one
+set of changes over the other (but see below when you encounter these
+cases). More often, the two sets of changes must be intelligently combined.
+
+If you edit the main copy, be sure to remove the herringbone markers. When
+you are done editing, the file should look like it never had a conflict, and be
+ready to commit.
+
+When you have resolved text conflicts, just run ``bzr resolve --auto``, and
+Bazaar will auto-detect which conflicts you have resolved.
+
+When the conflict is resolved, Bazaar deletes the previously generated
+``.BASE``, ``.THIS`` and ``.OTHER`` files if they are still present in the
+working tree.
+
+
+When you want to pick one set of changes over the other, you can use ``bzr
+resolve`` with one of the following actions:
+
+* ``--action=take-this`` will issue ``mv FILE.THIS FILE``,
+* ``--action=take-other`` will issue ``mv FILE.OTHER FILE``.
+
+Note that if you have modified ``FILE.THIS`` or ``FILE.OTHER``, these
+modifications will be taken into account.
+
+Content conflicts
+-----------------
+
+Typical message::
+
+ Contents conflict in FILE
+
+This conflict happens when there are conflicting changes in the working tree
+and the merge source, but the conflicted items are not text files. They may
+be binary files, or symlinks, or directories. It can even happen with files
+that are deleted on one side, and modified on the other.
+
+Like text conflicts, Bazaar will emit THIS, OTHER and BASE files. (They may be
+regular files, symlinks or directories). But it will not include a "main copy"
+of the file with herringbone conflict markers. It will appear that the "main
+copy" has been renamed to THIS or OTHER.
+
+To resolve that kind of conflict, you should rebuild FILE from either version
+or a combination of both.
+
+``bzr resolve`` recognizes the following actions:
+
+* ``--action=take-this`` will issue ``bzr mv FILE.THIS FILE``,
+* ``--action=take-other`` will issue ``bzr mv FILE.OTHER FILE``,
+* ``--action=done`` will just mark the conflict as resolved.
+
+Any action will also delete the previously generated ``.BASE``, ``.THIS`` and
+``.OTHER`` files if they are still present in the working tree.
+
+Bazaar cannot auto-detect when conflicts of this kind have been resolved.
+
+Tag conflicts
+-------------
+
+Typical message::
+
+ Conflicting tags:
+ version-0.1
+
+When pulling from or pushing to another branch, Bazaar informs you about tags
+that conflict between the two branches; that is the same tag points to two
+different revisions. You need not resolve these conflicts, but subsequent
+uses of pull or push will result in the same message.
+
+To resolve the conflict, you must apply the correct tags to either the target
+branch or the source branch as appropriate. Use "bzr tags --show-ids -d
+SOURCE_URL" to see the tags in the source branch. If you want to make the
+target branch's tags match the source branch, then in the target branch do
+``bzr tag --force -r revid:REVISION_ID CONFLICTING_TAG`` for each of the
+CONFLICTING_TAGs, where REVISION_ID comes from the list of tags in the source
+branch. You need not call "bzr resolve" after doing this. To resolve in
+favor of the target branch, you need to similarly use ``tag --force`` in the
+source branch. (Note that pulling or pushing using --overwrite will overwrite
+all tags as well.)
+
+Duplicate paths
+---------------
+
+Typical message::
+
+ Conflict adding file FILE. Moved existing file to FILE.moved.
+
+Sometimes Bazaar will attempt to create a file using a pathname that has
+already been used. The existing file will be renamed to "FILE.moved".
+
+To resolve that kind of conflict, you should rebuild FILE from either version
+or a combination of both.
+
+``bzr resolve`` recognizes the following actions:
+
+* ``--action=take-this`` will issue ``bzr rm FILE ; bzr mv FILE.moved FILE``,
+* ``--action=take-other`` will issue ``bzr rm FILE.moved``,
+* ``--action=done`` will just mark the conflict as resolved.
+
+Note that you must get rid of FILE.moved before using ``--action=done``.
+
+Bazaar cannot auto-detect when conflicts of this kind have been resolved.
+
+Unversioned parent
+------------------
+
+Typical message::
+
+ Conflict because FILE is not versioned, but has versioned children.
+
+Sometimes Bazaar will attempt to create a file whose parent directory is not
+versioned. This happens when the directory has been deleted in the target,
+but has a new child in the source, or vice versa. In this situation, Bazaar
+will version the parent directory as well. Resolving this issue depends
+very much on the particular scenario. You may wish to rename or delete either
+the file or the directory. When you are satisfied, you can run "bzr resolve
+FILE" to mark the conflict as resolved.
+
+Missing parent
+--------------
+
+Typical message::
+
+ Conflict adding files to FILE. Created directory.
+
+This happens when a directory has been deleted in the target, but has new
+children in the source. This is similar to the "unversioned parent" conflict,
+except that the parent directory does not *exist*, instead of just being
+unversioned. In this situation, Bazaar will create the missing parent.
+Resolving this issue depends very much on the particular scenario.
+
+To resolve that kind of conflict, you should either remove or rename the
+children or the directory or a combination of both.
+
+``bzr resolve`` recognizes the following actions:
+
+* ``--action=take-this`` will issue ``bzr rm directory`` including the
+ children,
+* ``--action=take-other`` will acknowledge Bazaar choice to keep the children
+ and restoring the directory,
+* ``--action=done`` will just mark the conflict as resolved.
+
+Bazaar cannot auto-detect when conflicts of this kind have been resolved.
+
+Deleting parent
+---------------
+
+Typical message::
+
+ Conflict: can't delete DIR because it is not empty. Not deleting.
+
+This is the opposite of "missing parent". A directory is deleted in the
+source, but has new children in the target (either because a directory
+deletion is merged or because the merge introduce new children). Bazaar
+will retain the directory. Resolving this issue depends very much on the
+particular scenario.
+
+To resolve that kind of conflict, you should either remove or rename the
+children or the directory or a combination of both.
+
+``bzr resolve`` recognizes the following actions:
+
+* ``--action=take-this`` will acknowledge Bazaar choice to keep the directory,
+
+* ``--action=take-other`` will issue ``bzr rm directory`` including the
+ children,
+
+* ``--action=done`` will just mark the conflict as resolved.
+
+Note that when merging a directory deletion, if unversioned files are
+present, they become potential orphans has they don't have a directory
+parent anymore.
+
+Handling such orphans, *before* the conflict is created, is controlled by
+setting the ``bzr.transform.orphan_policy`` configuration option.
+
+There are two possible values for this option:
+
+* ``conflict`` (the default): will leave the orphans in place and
+ generate a conflicts,
+
+* ``move``: will move the orphans to a ``bzr-orphans`` directory at the root
+ of the working tree with names like ``<file>.~#~``.
+
+Bazaar cannot auto-detect when conflicts of this kind have been resolved.
+
+Path conflict
+-------------
+
+Typical message::
+
+ Path conflict: PATH1 / PATH2
+
+This happens when the source and target have each modified the name or parent
+directory of a file. Bazaar will use the path elements from the source.
+
+To resolve that kind of conflict, you just have to decide what name should be
+retained for the file involved.
+
+``bzr resolve`` recognizes the following actions:
+
+* ``--action=take-this`` will revert Bazaar choice and keep ``PATH1`` by
+ issuing ``bzr mv PATH2 PATH1``,
+* ``--action=take-other`` will acknowledge Bazaar choice of keeping ``PATH2``,
+* ``--action=done`` will just mark the conflict as resolved.
+
+Bazaar cannot auto-detect when conflicts of this kind have been resolved.
+
+Parent loop
+-----------
+
+Typical message::
+
+ Conflict moving FILE into DIRECTORY. Cancelled move.
+
+This happens when the source and the target have each moved directories, so
+that, if the change could be applied, a directory would be contained by itself.
+For example::
+
+ $ bzr init
+ $ bzr mkdir white
+ $ bzr mkdir black
+ $ bzr commit -m "BASE"
+ $ bzr branch . ../other
+ $ bzr mv white black
+ $ bzr commit -m "THIS"
+ $ bzr mv ../other/black ../other/white
+ $ bzr commit ../other -m "OTHER"
+ $ bzr merge ../other
+
+In this situation, Bazaar will cancel the move, and leave ``white`` in
+``black``. To resolve that kind of conflict, you just have to decide what
+name should be retained for the directories involved.
+
+``bzr resolve`` recognizes the following actions:
+
+* ``--action=take-this`` will acknowledge Bazaar choice of leaving ``white``
+ in ``black``,
+* ``--action=take-other`` will revert Bazaar choice and move ``black`` in
+ ``white`` by issuing ``bzr mv black/white white ; bzr mv black white``,
+* ``--action=done`` will just mark the conflict as resolved.
+
+Bazaar cannot auto-detect when conflicts of this kind have been resolved.
+
+Non-directory parent
+--------------------
+
+Typical message::
+
+ Conflict: foo.new is not a directory, but has files in it.
+ Created directory.
+
+This happens when one side has added files to a directory, and the other side
+has changed the directory into a file or symlink. For example::
+
+ $ bzr init
+ $ bzr mkdir foo
+ $ bzr commit -m "BASE"
+ $ bzr branch . ../other
+ $ rmdir foo
+ $ touch foo
+ $ bzr commit -m "THIS"
+ $ bzr mkdir ../other/foo/bar
+ $ bzr commit ../other -m "OTHER"
+ $ bzr merge ../other
+
+To resolve that kind of conflict, you have to decide what name should be
+retained for the file, directory or symlink involved.
+
+``bzr resolve`` recognizes the following actions:
+
+* ``--action=take-this`` will issue ``bzr rm --force foo.new`` and
+ ``bzr add foo``,
+* ``--action=take-other`` will issue ``bzr rm --force foo`` and
+ ``bzr mv foo.new foo``,
+* ``--action=done`` will just mark the conflict as resolved.
+
+Bazaar cannot auto-detect when conflicts of this kind have been resolved.
+
+MalformedTransform
+------------------
+
+It is possible (though very rare) for Bazaar to raise a MalformedTransform
+exception. This means that Bazaar encountered a filesystem conflict that it was
+unable to resolve. This usually indicates a bug. Please let us know if you
+encounter this. Our bug tracker is at https://launchpad.net/bzr/+bugs
diff --git a/bzrlib/help_topics/en/content-filters.txt b/bzrlib/help_topics/en/content-filters.txt
new file mode 100644
index 0000000..8abe5a6
--- /dev/null
+++ b/bzrlib/help_topics/en/content-filters.txt
@@ -0,0 +1,93 @@
+Content Filters
+===============
+
+Content formats
+---------------
+
+Bazaar's content filtering allows you to store files in a different
+format from the copy in your working tree. This lets you, or your
+co-developers, use Windows development tools that expect CRLF files
+on projects that use other line-ending conventions. Among other things,
+content filters also let Unix developers more easily work on projects
+using Windows line-ending conventions, keyword expansion/compression,
+and trailing spaces on lines in text files to be implicitly stripped
+when committed.
+
+To generalize, there are two content formats supported by Bazaar:
+
+* a canonical format - how files are stored internally
+* a convenience format - how files are created in a working tree.
+
+
+Format conversion
+-----------------
+
+The conversion between these formats is done by content filters.
+A content filter has two parts:
+
+* a read converter - converts from convenience to canonical format
+* a write converter - converts from canonical to convenience format.
+
+Many of these converters will provide *round-trip* conversion,
+i.e. applying the read converter followed by the write converter
+gives back the original content. However, others may provide an
+asymmetric conversion. For example, a read converter might strip
+trailing whitespace off lines in source code while the matching
+write converter might pass content through unchanged.
+
+
+Enabling content filters
+------------------------
+
+Content filters are typically provided by plugins, so the first step
+in using them is to install the relevant plugins and read their
+documentation. Some plugins may be very specific about which files
+they filter, e.g. only files ending in ``.java`` or ``.php``.
+In other cases, the plugin may leave it in the user's hands to
+define which files are to be filtered. This is typically done
+using rule-based preferences. See ``bzr help rules`` for general
+information about defining these.
+
+
+Impact on commands
+------------------
+
+Read converters are only applied to commands that read content from
+a working tree, e.g. status, diff and commit. For example, ``bzr diff``
+will apply read converters to files in the working tree, then compare
+the results to the content last committed.
+
+Write converters are only applied by commands that **create files in a
+working tree**, e.g. branch, checkout, update. If you wish to see the
+canonical format of a file or tree, use ``bzr cat`` or ``bzr export``
+respectively.
+
+Note: ``bzr commit`` does not implicitly apply write converters after
+comitting files. If this makes sense for a given plugin providing
+a content filter, the plugin can usually achieve this effect by using a
+``start_commit`` or ``post_commit`` hook say. See :doc:`hooks-help`
+for more information on hooks.
+
+
+Refreshing your working tree
+----------------------------
+
+For performance reasons, Bazaar caches the timestamps of files in
+a working tree, and assumes files are unchanged if their timestamps
+match the cached values. As a consequence, there are times when
+you may need to explicitly ask for content filtering to be reapplied
+in one or both directions, e.g. after installing or reconfiguring
+plugins providing it.
+
+Here are some general guidelines for doing this:
+
+ * To reapply read converters, ``touch`` files, i.e. update their
+ timestamp. Operations like ``bzr status`` should then reapply the
+ relevant read converters and compare the end result with the
+ canonical format.
+
+ * To reapply write converters, ensure there are no local changes,
+ delete the relevant files and run ``bzr revert`` on those files.
+
+Note: In the future, it is likely that additional options will be added
+to commands to make this refreshing process faster and safer.
diff --git a/bzrlib/help_topics/en/debug-flags.txt b/bzrlib/help_topics/en/debug-flags.txt
new file mode 100644
index 0000000..5446516
--- /dev/null
+++ b/bzrlib/help_topics/en/debug-flags.txt
@@ -0,0 +1,45 @@
+Debug Flags
+===========
+
+These flags can be passed on the bzr command line or (without the ``-D``
+prefix) put in the ``debug_flags`` variable in ``bazaar.conf``.
+
+-Dauth Trace authentication sections used.
+-Dbytes Print out how many bytes were transferred
+-Ddirstate Trace dirstate activity (verbose!)
+-Derror Instead of normal error handling, always print a traceback
+ on error.
+-Devil Capture call sites that do expensive or badly-scaling
+ operations.
+-Dfetch Trace history copying between repositories.
+-Dfilters Emit information for debugging content filtering.
+-Dforceinvdeltas Force use of inventory deltas during generic streaming fetch.
+-Dgraph Trace graph traversal.
+-Dhashcache Log every time a working file is read to determine its hash.
+-Dhooks Trace hook execution.
+-Dhpss Trace smart protocol requests and responses.
+-Dhpssdetail More hpss details.
+-Dhpssvfs Traceback on vfs access to Remote objects.
+-Dhttp Trace http connections, requests and responses.
+-Dindex Trace major index operations.
+-Dknit Trace knit operations.
+-Dlock Trace when lockdir locks are taken or released.
+-Dnoretry If a connection is reset, fail immediately rather than
+ retrying the request.
+-Dprogress Trace progress bar operations.
+-Dmem_dump Dump memory to a file upon an out of memory error.
+-Dmerge Emit information for debugging merges.
+-Dno_apport Don't use apport to report crashes.
+-Dno_activity Don't show transport activity indicator in progress bar.
+-Dpack Emit information about pack operations.
+-Drelock Emit a message every time a branch or repository object is
+ unlocked then relocked the same way.
+-Dsftp Trace SFTP internals.
+-Dstatic_tuple Error when a tuple is used where a StaticTuple is expected
+-Dstream Trace fetch streams.
+-Dstrict_locks Trace when OS locks are potentially used in a non-portable
+ manner.
+-Dunlock Some errors during unlock are treated as warnings.
+-DIDS_never Never use InterDifferingSerializer when fetching.
+-DIDS_always Always use InterDifferingSerializer to fetch if appropriate
+ for the format, even for non-local fetches.
diff --git a/bzrlib/help_topics/en/diverged-branches.txt b/bzrlib/help_topics/en/diverged-branches.txt
new file mode 100644
index 0000000..c58b749
--- /dev/null
+++ b/bzrlib/help_topics/en/diverged-branches.txt
@@ -0,0 +1,39 @@
+Diverged Branches
+=================
+
+When Bazaar tries to push one branch onto another, it requires that the
+destination branch must be ready to receive the source branch. If this isn't
+the case, then we say that the branches have ``diverged``. Branches are
+considered diverged if the destination branch's most recent commit is one that
+has not been merged (directly or indirectly) by the source branch. To recover
+from diverged branches, one must merge the missing revisions into the source
+branch.
+
+This situation commonly arises when using a centralized workflow with local
+commits. If someone else has committed new work to the mainline since your
+last pull and you have local commits that have not yet been pushed to the
+mainline, then your local branch and the mainline have diverged.
+
+Discovering What Has Diverged
+-----------------------------
+
+The ``bzr missing`` command is used to find out what revisions are in another
+branch that are not present in the current branch, and vice-versa. It shows a
+summary of which extra revisions exist in each branch. If you want to see the
+precise effects of those revisions, you can use ``bzr diff --old=other_branch``
+to show the differences between other_branch and your current branch.
+
+A Solution
+----------
+
+The solution is to merge the revisions from the mainline into your local
+branch. To do so, use ``bzr merge`` to get the new revisions from the
+mainline. This merge may result in conflicts if the other developer's changes
+overlap with your changes. These conflicts should be resolved before
+continuing. After any conflicts have been resolved, or even if there were no
+conflicts, Bazaar requires that you explicitly commit these new revisions
+to your local branch. This requirement gives you an opportunity to test the
+resulting working tree for correctness, since the merged revisions could have
+made arbitrary changes. After testing, you should commit the merge using
+``bzr commit``. This clears up the diverged branches situation. Your local
+branch can now be pushed to the mainline.
diff --git a/bzrlib/help_topics/en/eol.txt b/bzrlib/help_topics/en/eol.txt
new file mode 100644
index 0000000..d329d2b
--- /dev/null
+++ b/bzrlib/help_topics/en/eol.txt
@@ -0,0 +1,122 @@
+End of Line Conversion
+======================
+
+EOL conversion is provided as a content filter where Bazaar internally
+stores a canonical format but outputs a convenience format. See
+``bzr help content-filters`` for general information about using these.
+
+Note: Content filtering is only supported in recently added formats,
+e.g. 1.14. Be sure that both the repository *and* the branch are
+in a recent format. (Just setting the format on the repository
+is not enough.) If content filtering does not appear to be working, use
+'bzr info -v' to confirm that the branch is using "Working tree format 5"
+or later.
+
+EOL conversion needs to be enabled for selected file patterns using
+rules. See ``bzr help rules`` for general information on defining rules.
+Currently, rules are only supported in $BZR_HOME/.bazaar/rules (or
+%BZR_HOME%/bazaar/2.0/rules on Windows). Branch specific rules will be
+supported in a future verison of Bazaar.
+
+To configure which files to filter, set ``eol`` to one of the values below.
+(If a value is not set, ``exact`` is the default.)
+
+ ========== ===================================== ======================
+ Value Checkout end-of-lines as Commit end-of-lines as
+ ========== ===================================== ======================
+ ``native`` ``crlf`` on Windows, ``lf`` otherwise ``lf``
+ ---------- ------------------------------------- ----------------------
+ ``lf`` ``lf`` ``lf``
+ ---------- ------------------------------------- ----------------------
+ ``crlf`` ``crlf`` ``lf``
+ ---------- ------------------------------------- ----------------------
+ ``exact`` No conversion Exactly as in file
+ ========== ===================================== ======================
+
+Note: For safety reasons, no conversion is applied to any file where a null
+byte is detected in the file.
+
+For users working on a cross-platform project, here is a suggested rule
+to use as a starting point::
+
+ [name *]
+ eol = native
+
+If you have binary files that do not contain a null byte though, be
+sure to add ``eol = exact`` rules for those as well. You can do this
+by giving more explicit patterns earlier in the rules file. For example::
+
+ [name *.png]
+ eol = exact
+
+ [name *]
+ eol = native
+
+If your working tree is on a network drive shared by users on different
+operating systems, you typically want to force certain conventions for
+certain files. In that way, if a file is created with the wrong line
+endings or line endings get mixed during editing, it gets committed
+correctly and gets checked out correctly. For example::
+
+ [name *.bat]
+ eol = crlf
+
+ [name *.sh]
+ eol = lf
+
+ [name *]
+ eol = native
+
+If you take the care to create files with their required endings, you can
+achieve *almost* the same thing by using ``eol = exact``. It is slightly
+safer to use ``lf`` and ``crlf`` though because edits accidentally
+introducing mixed line endings will be corrected during commit for files
+with those settings.
+
+If you have sample test data that deliberately has text files with mixed
+newline conventions, you can ask for those to be left alone like this::
+
+ [name test_data/]
+ eol = exact
+
+ [name *]
+ eol = native
+
+Note that ``exact`` does not imply the file is binary but it does mean
+that no conversion of end-of-lines will be done. (Bazaar currently relies
+of content analysis to detect binary files for commands like ``diff``.
+In the future, a ``binary = true`` rule may be added but it is not
+supported yet.)
+
+If you have an existing repository with text files already stored using
+Windows newline conventions (``crlf``), then you may want to keep using that
+convention in the repository. Forcing certain files to this convention
+may also help users who do not have rules configured. To do this, set
+``eol`` to one of the values below.
+
+ ============================ ======================== ======================
+ Value Checkout end-of-lines as Commit end-of-lines as
+ ============================ ======================== ======================
+ ``native-with-crlf-in-repo`` ``crlf`` on Windows, ``crlf``
+ ``lf`` otherwise
+ ---------------------------- ------------------------ ----------------------
+ ``lf-with-crlf-in-repo`` ``lf`` ``crlf``
+ ---------------------------- ------------------------ ----------------------
+ ``crlf-with-crlf-in-repo`` ``crlf`` ``crlf``
+ ============================ ======================== ======================
+
+For users working on an existing project that uses Windows newline
+conventions in their Bazaar repository, this rule is suggested as a
+starting point::
+
+ [name *]
+ eol = native-with-crlf-in-repo
+
+For new projects, it is recommended that end-of-lines be stored as ``lf``
+and that users stick to the basic settings, i.e. ``native``, ``lf``,
+``crlf`` and ``exact``.
+
+Note: Bazaar's EOL conversion will convert the content of files but
+never reject files because a given line ending or mixed line endings
+are found. A precommit hook should be used if you wish to validate
+(and not just convert) content before committing.
diff --git a/bzrlib/help_topics/en/log-formats.txt b/bzrlib/help_topics/en/log-formats.txt
new file mode 100644
index 0000000..5cb7359
--- /dev/null
+++ b/bzrlib/help_topics/en/log-formats.txt
@@ -0,0 +1,34 @@
+Log Formats
+===========
+
+A log format controls how information about each revision is displayed.
+The standard log formats are compared below::
+
+ Feature long short line
+ ---------------------- ------------- ------------ -------------------
+ design goal detailed view concise view 1 revision per line
+ committer name+email name only name only
+ author name+email - -
+ date-time format full date only date only
+ commit message full full top line
+ tags yes yes yes
+ merges indicator - yes -
+ status/delta optional optional -
+ diff/patch optional optional -
+ revision-id optional optional -
+ branch nick yes - -
+ foreign vcs properties yes yes -
+ preferred levels all 1 1
+ digital signature optional - -
+
+The default format is ``long``. To change this, define the ``log_format``
+setting in the ``[DEFAULT]`` section of ``bazaar.conf`` like this (say)::
+
+ [DEFAULT]
+ log_format = short
+
+Alternatively, to change the log format used for a given query, use the
+--long, --short or --line options.
+
+If one of the standard log formats does not meet your needs, additional
+formats can be provided by plugins.
diff --git a/bzrlib/help_topics/en/patterns.txt b/bzrlib/help_topics/en/patterns.txt
new file mode 100644
index 0000000..6250d20
--- /dev/null
+++ b/bzrlib/help_topics/en/patterns.txt
@@ -0,0 +1,38 @@
+Patterns
+========
+
+Bazaar uses patterns to match files at various times. For example,
+the ``add`` command skips over files that match ignore patterns
+and preferences can be associated with files using rule patterns.
+The pattern syntax is described below.
+
+Trailing slashes on patterns are ignored. If the pattern contains a
+slash or is a regular expression, it is compared to the whole path
+from the branch root. Otherwise, it is compared to only the last
+component of the path. To match a file only in the root directory,
+prepend ``./``. Patterns specifying absolute paths are not allowed.
+
+Patterns may include globbing wildcards such as::
+
+ ? - Matches any single character except '/'
+ * - Matches 0 or more characters except '/'
+ /**/ - Matches 0 or more directories in a path
+ [a-z] - Matches a single character from within a group of characters
+
+Patterns may also be `Python regular expressions`_. Regular expression
+patterns are identified by a ``RE:`` prefix followed by the regular
+expression. Regular expression patterns may not include named or
+numbered groups.
+
+Case insensitive ignore patterns can be specified with regular expressions
+by using the ``i`` (for ignore case) flag in the pattern.
+
+For example, a case insensitive match for ``foo`` may be specified as::
+
+ RE:(?i)foo
+
+Ignore patterns may be prefixed with ``!``, which means that a filename
+matched by that pattern will not be ignored.
+
+.. _Python regular expressions: http://docs.python.org/library/re.html
+
diff --git a/bzrlib/help_topics/en/rules.txt b/bzrlib/help_topics/en/rules.txt
new file mode 100644
index 0000000..1bb01d0
--- /dev/null
+++ b/bzrlib/help_topics/en/rules.txt
@@ -0,0 +1,40 @@
+Rules
+=====
+
+Introduction
+------------
+
+Rules are defined in ini file format where the sections are file glob
+patterns and the contents of each section are the preferences for files
+matching that pattern(s). For example::
+
+ [name *.bat]
+ eol = native
+
+ [name *.html *.xml]
+ keywords = xml_escape
+
+Preferences like these are useful for commands and plugins wishing to
+provide custom behaviour for selected files. For more information on
+end of line conversion see :doc:`eol-help`.
+Keyword support is provided by the `keywords plugin
+<http://doc.bazaar.canonical.com/plugins/en/keywords-plugin.html>`_.
+
+Files
+-----
+
+Default rules for all branches are defined in the optional file
+``BZR_HOME/rules``.
+
+Rule Patterns
+-------------
+
+Patterns are ordered and searching stops as soon as one matches.
+As a consequence, more explicit patterns should be placed towards
+the top of the file. Rule patterns use exactly the same conventions
+as ignore patterns. See :doc:`patterns-help` for details.
+
+.. note::
+
+ Patterns containing square brackets or spaces should be
+ surrounded in quotes to ensure they are correctly parsed.
diff --git a/bzrlib/help_topics/en/url-special-chars.txt b/bzrlib/help_topics/en/url-special-chars.txt
new file mode 100644
index 0000000..2e528b8
--- /dev/null
+++ b/bzrlib/help_topics/en/url-special-chars.txt
@@ -0,0 +1,48 @@
+Special character handling in URLs
+==================================
+
+Bazaar allows locations to be specified in multiple ways, either:
+
+ * Fully qualified URLs
+
+ * File system paths, relative or absolute
+
+Internally bzr treats all locations as URLs. For any file system paths
+that are specified it will automatically determine the appropriate URL
+representation, and escape special characters where necessary.
+
+There are a few characters which have special meaning in URLs and need careful
+handling to avoid ambiguities. Characters can be escaped with a % and a hex
+value in URLs. Any non-ASCII characters in a file path will automatically be
+urlencoded when the path is converted to a URL.
+
+URLs represent non-ASCII characters in an encoding defined by the server, but
+usually UTF-8. The % escapes should be of the UTF-8 bytes. Bazaar tries to be
+generous in what it accepts as a URL and to print them in a way that
+will be readable.
+
+For example, if you have a directory named '/tmp/%2False' these are all valid
+ways of accessing the content (0x2F, or 47, is the ASCII code for forward slash)::
+
+ cd /tmp
+ bzr log /tmp/%2False
+ bzr log %2False
+ bzr log file:///tmp/%252False
+ bzr log file://localhost/tmp/%252False
+ bzr log file:%252False
+
+These are valid but do not refer to the same file::
+
+ bzr log file:///tmp/%2False (refers to a file called /tmp/\/alse)
+ bzr log %252False (refers to a file called /tmp/%252False)
+
+Comma also has special meaning in URLs, because it denotes `segment parameters`_
+
+_`segment parameters`: http://www.ietf.org/rfc/rfc3986.txt (section 3.3)
+
+Comma is also special in any file system paths that are specified. To use a literal
+comma in a file system path, specify a URL and URL encode the comma::
+
+ bzr log foo,branch=bla # path "foo" with the segment parameter "branch" set to "bla"
+ bzr log file:foo%2Cbranch=bla # path "foo,branch=bla"
+ bzr log file:foo,branch=bla # path "foo" with segment parameter "branch" set to "bla"
diff --git a/bzrlib/help_topics/es/conflict-types.txt b/bzrlib/help_topics/es/conflict-types.txt
new file mode 100644
index 0000000..9bed954
--- /dev/null
+++ b/bzrlib/help_topics/es/conflict-types.txt
@@ -0,0 +1,185 @@
+=====================
+Manejo de Conflictos
+=====================
+
+Algunas operaciones, como merge, revert y pull, modifican el contenido de su
+working tree. Estas modificaciones son generadas automaticamente, con lo cual
+pueden tener conflictos con el estado actual de su working tree. Muchos tipos
+de cambios pueden ser combinados automaticamentem pero a veces solo algunos
+humanos pueden determinar la decision correcta a tomar. Cuando esto sucede,
+Bazaar lo informara que hay un conflict y pedira que lo resuelva. El comando
+para resolverlo es ``resolve``, pero debe realizar alguna accion antes de
+ejecutarlo.
+
+Cada tipo de conflicto esta explicado a continuacion, y la accion que se debe
+realizar para resolver el conflicto esta detallada.
+
+
+Conflictos de Texto (Text conflicts)
+=====================================
+Mensaje comun::
+
+ Text conflict in ARCHIVO
+
+Estos se producen cuando el texto a unificar no puede ser completamente
+reconciliado entre dos cambios. Bazaar emitira archivos para cada version con
+extensiones THIS, OTHER, y BASE. THIS es la version del archivo del tree
+de destino, por ejemplo, el tree al que estan unificando los cambios. OTHER
+es la version que estan uniendo al tree de destino. BASE es la version
+anterior que es usado como base para la comparacion.
+
+En la copia principal del archivo, Bazaar incluira los cambios que puede
+reconciliar, y cualquier cambio sin reconciliar son rodeados con marcadores
+"herringbone" como ``<<<<<<<``.
+
+Digamos que el texto inicial es "El lider del proyecto lo lanzo.", y THIS lo
+modifica a "Martin Pool lo lanzo.", mientras que OTHER lo modifica a "El lider
+del proyecto lanzo Bazaar." Un conflicto se veria asi::
+
+ <<<<<<< TREE
+ Martin Pool lo lanzo.
+ =======
+ El lider del proyecto lanzo Bazaar.
+ >>>>>>> MERGE-SOURCE
+
+La resulucion correcta seria "Martin Pool lanzo Bazaar."
+
+Puede manejar conflictos de texto o editando la copia principal del archivo,
+o invocando herramientas externas on las versiones THIS, OTHER y BASE. Vale
+la pena mencionar que resolver conflictos de texto rara vez involucra elegir
+un conjunto de cambios u otros. Los mas comun es que tenga que combinar
+inteligentemente.
+
+Si modifica la copia principal, asegurese de sacar los marcadores "herringbone".
+Cuando termino de modificar, el archivo debe estar como si nunca hubiese estado
+con conflictos, y estar listo para hacer commit.
+
+Cuando resolvio los conflictos de texto, solo ejecute "bzr resolve", y Bazaar
+detectera automaticamente que resolvio.
+
+
+Conflictos de contenido (Content conflicts)
+============================================
+Mensaje comun::
+
+ Contents conflict in ARCHIVO
+
+Este conflicto sucede cuando hay cambios que crean conflictos en el tree de
+destino y el fuente a unificar, pero los items con conflictos no son archivos
+de texto. Pueden ser archivos binarios, symlinks o directorios. Pueden suceder
+con archivos que fueron eliminados en un lado, y modificados en el otro.
+
+Como los conflictos de texto, Bazaar creara archivos THIS, OTHER y BASE.
+(pueden ser archivos normales, symlinks o directorios). Esto no incluira
+la "copia principal" del archivo con marcadores "herringbone". Parecera que la
+"copia principal" fue renombrado a THIS o OTHER.
+
+Para resolver esto, utilice "bzr mv" para renombrar el archivo a su nombre
+original, y combine los cambios manualmente. Cuando este satisfecho, ejecute
+"bzr resolve ARCHIVO". Bazaar no puede detectar cuando cambios de este tipo
+fueron resueltos.
+
+
+Rutas Duplicadas (Duplicate Paths)
+==================================
+Mensaje comun::
+
+ Conflict adding file ARCHIVO. Moved existing file to ARCHIVO.moved.
+
+Bazaar a veces intentara crear un archivo usando la ruta que ya fue usada.
+El archivo existente ser renombrado a "ARCHIVO.moved". Si lo desea, puede
+renombrar cualquiera de estos archivos, o combinar su contenido. Cuando
+este satisfecho, puede ejecutar "bzr resolve ARCHIVO" para marcar que el
+conflicto fue resuelto.
+
+
+Padre sin versionar (Unversioned Parent)
+========================================
+Mensajes comunes::
+
+ Conflict because ARCHIVO no esta versionado, pero tiene hijos versionados.
+
+A veces Bazaar intentara crear un archivo cuyo directorio padre no esta
+versionado. Esto sucede cuando el directorio fue eliminado en el destino,
+pero tiene un hijo nuevo en origen, o vice versa. En esta situacion, Bazaar
+versionara al directorio padre tambien. Resolver este tema depende mucho
+en el escenario particular. Puede que quiera renombrar o eliminar o renombrar
+cualquier archivo o directorio. Cuando esta satisfecho, puede ejecutar "bzr
+resolve ARCHIVO" para marcar el conflicto como resuelto.
+
+
+Padre faltante (Missing Parent)
+===============================
+Mensaje comun::
+
+ Conflict adding files to ARCHIVO. Created directory.
+
+Esto sucede cuando un archivo fue eliminado en el destino, pero tiene hijos
+en el origen. Esto es similar al conflicto "Padre sin versionar", excepto
+que el directorio padre *no existe*, en vez de no estar versionado. En esta
+situacion, Bazaar creara al padre faltante. Resolver estos temas depende
+mucho de cada caso particular. Usted puede querer renombrar o eliminar
+cualquiera de los archivos o directorios. Cuando este satisfecho, puede
+ejecutar "bzr resolve ARCHIVO" para marcar al archivo resuelto.
+
+
+Borrando al Padre (Deleting Parent)
+===================================
+Mensaje comun::
+
+ Conflict: can't delete ARCHIVO because it is not empty. Not deleting.
+
+Esto es el opuesto a "Padre faltante". Un directorio es eliminado en el
+origen, pero tiene hijos en el destino. Bazaar mantiene el directorio.
+Resolver este tema depende mucho de cada escenario particular. Quizas quiera
+renombrar o eliminar cualquiera de los archivos o directorios. Cuando esta
+satisfecho, puede ejecutar "bzr resolver ARCHIVO" para marcar al conflicto
+como resuelto.
+
+
+Conflicto de Ruta (Path Conflict)
+=================================
+Mensaje comun::
+
+ Path conflict: RUTA1 / RUTA2
+
+Esto sucede cuando en el origen y el destino han sido modificados el nombre
+o directorio padre de un archivo. Bazaar usara la ruta de los elementos del
+origen. Puede renombrar el archivo, y una vez que lo ha hecho, ejecutar
+"bzr resolve ARCHIVO" para marcarl el conflicto como resuelto.
+
+
+Bucle Padre (Parent Loop)
+=========================
+Mensaje comun::
+
+ Conflict moving ARCHIVO into DIRECTORIO. Cancelled move.
+
+Esto sucede cuando en el origen y el destino se han movido directorios, de
+tal forma que, si se aplicaran los cambios, el directorios se contendria a
+si mismo.
+Por ejemplo::
+
+ $ bzr init
+ $ bzr mkdir a
+ $ bzr mkdir b
+ $ bzr commit -m "BASE"
+ $ bzr branch . ../other
+ $ bzr mv a b
+ $ bzr commit -m "THIS"
+ $ bzr mv ../other/b ../other/a
+ $ bzr commit ../other -m "OTHER"
+ $ bzr merge ../other
+
+En esta situacion, Bazaar cancelara el movimiento, y dejara "a" en "b".
+Puede renombrar los directorios como desee, y una vez que lo ha hecho,
+ejecute "bzr resolve ARCHIVO" para marcar el conflicto como resuelto.
+
+
+MalformedTransform
+==================
+Es posible (aunque muy raro) que Bazaar de una excepcion MalformedTransform.
+Esto quiere decir que Bazaar encontro un conflicto en el sistema de archivos
+que no le fue posible resolver. Esto usualmente indica un bug. Por favor
+haganoslo saber si se encuentra en esta situacion. Nuestro sistema de bugs
+se encuentra en https://launchpad.net/bzr/+bugs
diff --git a/bzrlib/hooks.py b/bzrlib/hooks.py
new file mode 100644
index 0000000..3e9d7ea
--- /dev/null
+++ b/bzrlib/hooks.py
@@ -0,0 +1,446 @@
+# Copyright (C) 2007-2011 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""Support for plugin hooking logic."""
+
+from __future__ import absolute_import
+
+from bzrlib import (
+ registry,
+ symbol_versioning,
+ )
+from bzrlib.lazy_import import lazy_import
+lazy_import(globals(), """
+import textwrap
+
+from bzrlib import (
+ _format_version_tuple,
+ errors,
+ pyutils,
+ )
+from bzrlib.i18n import gettext
+""")
+
+
+class KnownHooksRegistry(registry.Registry):
+ # known_hooks registry contains
+ # tuple of (module, member name) which is the hook point
+ # module where the specific hooks are defined
+ # callable to get the empty specific Hooks for that attribute
+
+ def register_lazy_hook(self, hook_module_name, hook_member_name,
+ hook_factory_member_name):
+ self.register_lazy((hook_module_name, hook_member_name),
+ hook_module_name, hook_factory_member_name)
+
+ def iter_parent_objects(self):
+ """Yield (hook_key, (parent_object, attr)) tuples for every registered
+ hook, where 'parent_object' is the object that holds the hook
+ instance.
+
+ This is useful for resetting/restoring all the hooks to a known state,
+ as is done in bzrlib.tests.TestCase._clear_hooks.
+ """
+ for key in self.keys():
+ yield key, self.key_to_parent_and_attribute(key)
+
+ def key_to_parent_and_attribute(self, (module_name, member_name)):
+ """Convert a known_hooks key to a (parent_obj, attr) pair.
+
+ :param key: A tuple (module_name, member_name) as found in the keys of
+ the known_hooks registry.
+ :return: The parent_object of the hook and the name of the attribute on
+ that parent object where the hook is kept.
+ """
+ parent_mod, parent_member, attr = pyutils.calc_parent_name(module_name,
+ member_name)
+ return pyutils.get_named_object(parent_mod, parent_member), attr
+
+
+_builtin_known_hooks = (
+ ('bzrlib.branch', 'Branch.hooks', 'BranchHooks'),
+ ('bzrlib.controldir', 'ControlDir.hooks', 'ControlDirHooks'),
+ ('bzrlib.commands', 'Command.hooks', 'CommandHooks'),
+ ('bzrlib.config', 'ConfigHooks', '_ConfigHooks'),
+ ('bzrlib.info', 'hooks', 'InfoHooks'),
+ ('bzrlib.lock', 'Lock.hooks', 'LockHooks'),
+ ('bzrlib.merge', 'Merger.hooks', 'MergeHooks'),
+ ('bzrlib.msgeditor', 'hooks', 'MessageEditorHooks'),
+ ('bzrlib.mutabletree', 'MutableTree.hooks', 'MutableTreeHooks'),
+ ('bzrlib.smart.client', '_SmartClient.hooks', 'SmartClientHooks'),
+ ('bzrlib.smart.server', 'SmartTCPServer.hooks', 'SmartServerHooks'),
+ ('bzrlib.status', 'hooks', 'StatusHooks'),
+ ('bzrlib.transport', 'Transport.hooks', 'TransportHooks'),
+ ('bzrlib.version_info_formats.format_rio', 'RioVersionInfoBuilder.hooks',
+ 'RioVersionInfoBuilderHooks'),
+ ('bzrlib.merge_directive', 'BaseMergeDirective.hooks',
+ 'MergeDirectiveHooks'),
+ )
+
+known_hooks = KnownHooksRegistry()
+for (_hook_module, _hook_attribute, _hook_class) in _builtin_known_hooks:
+ known_hooks.register_lazy_hook(_hook_module, _hook_attribute, _hook_class)
+del _builtin_known_hooks, _hook_module, _hook_attribute, _hook_class
+
+
+def known_hooks_key_to_object((module_name, member_name)):
+ """Convert a known_hooks key to a object.
+
+ :param key: A tuple (module_name, member_name) as found in the keys of
+ the known_hooks registry.
+ :return: The object this specifies.
+ """
+ return pyutils.get_named_object(module_name, member_name)
+
+
+class Hooks(dict):
+ """A dictionary mapping hook name to a list of callables.
+
+ e.g. ['FOO'] Is the list of items to be called when the
+ FOO hook is triggered.
+ """
+
+ def __init__(self, module=None, member_name=None):
+ """Create a new hooks dictionary.
+
+ :param module: The module from which this hooks dictionary should be loaded
+ (used for lazy hooks)
+ :param member_name: Name under which this hooks dictionary should be loaded.
+ (used for lazy hooks)
+ """
+ dict.__init__(self)
+ self._callable_names = {}
+ self._lazy_callable_names = {}
+ self._module = module
+ self._member_name = member_name
+
+ def add_hook(self, name, doc, introduced, deprecated=None):
+ """Add a hook point to this dictionary.
+
+ :param name: The name of the hook, for clients to use when registering.
+ :param doc: The docs for the hook.
+ :param introduced: When the hook was introduced (e.g. (0, 15)).
+ :param deprecated: When the hook was deprecated, None for
+ not-deprecated.
+ """
+ if name in self:
+ raise errors.DuplicateKey(name)
+ if self._module:
+ callbacks = _lazy_hooks.setdefault(
+ (self._module, self._member_name, name), [])
+ else:
+ callbacks = None
+ hookpoint = HookPoint(name=name, doc=doc, introduced=introduced,
+ deprecated=deprecated, callbacks=callbacks)
+ self[name] = hookpoint
+
+ def docs(self):
+ """Generate the documentation for this Hooks instance.
+
+ This introspects all the individual hooks and returns their docs as well.
+ """
+ hook_names = sorted(self.keys())
+ hook_docs = []
+ name = self.__class__.__name__
+ hook_docs.append(name)
+ hook_docs.append("-"*len(name))
+ hook_docs.append("")
+ for hook_name in hook_names:
+ hook = self[hook_name]
+ try:
+ hook_docs.append(hook.docs())
+ except AttributeError:
+ # legacy hook
+ strings = []
+ strings.append(hook_name)
+ strings.append("~" * len(hook_name))
+ strings.append("")
+ strings.append("An old-style hook. For documentation see the __init__ "
+ "method of '%s'\n" % (name,))
+ hook_docs.extend(strings)
+ return "\n".join(hook_docs)
+
+ def get_hook_name(self, a_callable):
+ """Get the name for a_callable for UI display.
+
+ If no name has been registered, the string 'No hook name' is returned.
+ We use a fixed string rather than repr or the callables module because
+ the code names are rarely meaningful for end users and this is not
+ intended for debugging.
+ """
+ name = self._callable_names.get(a_callable, None)
+ if name is None and a_callable is not None:
+ name = self._lazy_callable_names.get((a_callable.__module__,
+ a_callable.__name__),
+ None)
+ if name is None:
+ return 'No hook name'
+ return name
+
+
+ def install_named_hook_lazy(self, hook_name, callable_module,
+ callable_member, name):
+ """Install a_callable in to the hook hook_name lazily, and label it.
+
+ :param hook_name: A hook name. See the __init__ method for the complete
+ list of hooks.
+ :param callable_module: Name of the module in which the callable is
+ present.
+ :param callable_member: Member name of the callable.
+ :param name: A name to associate the callable with, to show users what
+ is running.
+ """
+ try:
+ hook = self[hook_name]
+ except KeyError:
+ raise errors.UnknownHook(self.__class__.__name__, hook_name)
+ try:
+ hook_lazy = getattr(hook, "hook_lazy")
+ except AttributeError:
+ raise errors.UnsupportedOperation(self.install_named_hook_lazy,
+ self)
+ else:
+ hook_lazy(callable_module, callable_member, name)
+ if name is not None:
+ self.name_hook_lazy(callable_module, callable_member, name)
+
+ def install_named_hook(self, hook_name, a_callable, name):
+ """Install a_callable in to the hook hook_name, and label it name.
+
+ :param hook_name: A hook name. See the __init__ method for the complete
+ list of hooks.
+ :param a_callable: The callable to be invoked when the hook triggers.
+ The exact signature will depend on the hook - see the __init__
+ method for details on each hook.
+ :param name: A name to associate a_callable with, to show users what is
+ running.
+ """
+ try:
+ hook = self[hook_name]
+ except KeyError:
+ raise errors.UnknownHook(self.__class__.__name__, hook_name)
+ try:
+ # list hooks, old-style, not yet deprecated but less useful.
+ hook.append(a_callable)
+ except AttributeError:
+ hook.hook(a_callable, name)
+ if name is not None:
+ self.name_hook(a_callable, name)
+
+ def uninstall_named_hook(self, hook_name, label):
+ """Uninstall named hooks.
+
+ :param hook_name: Hook point name
+ :param label: Label of the callable to uninstall
+ """
+ try:
+ hook = self[hook_name]
+ except KeyError:
+ raise errors.UnknownHook(self.__class__.__name__, hook_name)
+ try:
+ uninstall = getattr(hook, "uninstall")
+ except AttributeError:
+ raise errors.UnsupportedOperation(self.uninstall_named_hook, self)
+ else:
+ uninstall(label)
+
+ def name_hook(self, a_callable, name):
+ """Associate name with a_callable to show users what is running."""
+ self._callable_names[a_callable] = name
+
+ def name_hook_lazy(self, callable_module, callable_member, callable_name):
+ self._lazy_callable_names[(callable_module, callable_member)]= \
+ callable_name
+
+
+class HookPoint(object):
+ """A single hook that clients can register to be called back when it fires.
+
+ :ivar name: The name of the hook.
+ :ivar doc: The docs for using the hook.
+ :ivar introduced: A version tuple specifying what version the hook was
+ introduced in. None indicates an unknown version.
+ :ivar deprecated: A version tuple specifying what version the hook was
+ deprecated or superseded in. None indicates that the hook is not
+ superseded or deprecated. If the hook is superseded then the doc
+ should describe the recommended replacement hook to register for.
+ """
+
+ def __init__(self, name, doc, introduced, deprecated=None, callbacks=None):
+ """Create a HookPoint.
+
+ :param name: The name of the hook, for clients to use when registering.
+ :param doc: The docs for the hook.
+ :param introduced: When the hook was introduced (e.g. (0, 15)).
+ :param deprecated: When the hook was deprecated, None for
+ not-deprecated.
+ """
+ self.name = name
+ self.__doc__ = doc
+ self.introduced = introduced
+ self.deprecated = deprecated
+ if callbacks is None:
+ self._callbacks = []
+ else:
+ self._callbacks = callbacks
+
+ def docs(self):
+ """Generate the documentation for this HookPoint.
+
+ :return: A string terminated in \n.
+ """
+ strings = []
+ strings.append(self.name)
+ strings.append('~'*len(self.name))
+ strings.append('')
+ if self.introduced:
+ introduced_string = _format_version_tuple(self.introduced)
+ else:
+ introduced_string = 'unknown'
+ strings.append(gettext('Introduced in: %s') % introduced_string)
+ if self.deprecated:
+ deprecated_string = _format_version_tuple(self.deprecated)
+ strings.append(gettext('Deprecated in: %s') % deprecated_string)
+ strings.append('')
+ strings.extend(textwrap.wrap(self.__doc__,
+ break_long_words=False))
+ strings.append('')
+ return '\n'.join(strings)
+
+ def __eq__(self, other):
+ return (type(other) == type(self) and other.__dict__ == self.__dict__)
+
+ def hook_lazy(self, callback_module, callback_member, callback_label):
+ """Lazily register a callback to be called when this HookPoint fires.
+
+ :param callback_module: Module of the callable to use when this
+ HookPoint fires.
+ :param callback_member: Member name of the callback.
+ :param callback_label: A label to show in the UI while this callback is
+ processing.
+ """
+ obj_getter = registry._LazyObjectGetter(callback_module,
+ callback_member)
+ self._callbacks.append((obj_getter, callback_label))
+
+ def hook(self, callback, callback_label):
+ """Register a callback to be called when this HookPoint fires.
+
+ :param callback: The callable to use when this HookPoint fires.
+ :param callback_label: A label to show in the UI while this callback is
+ processing.
+ """
+ obj_getter = registry._ObjectGetter(callback)
+ self._callbacks.append((obj_getter, callback_label))
+
+ def uninstall(self, label):
+ """Uninstall the callback with the specified label.
+
+ :param label: Label of the entry to uninstall
+ """
+ entries_to_remove = []
+ for entry in self._callbacks:
+ (entry_callback, entry_label) = entry
+ if entry_label == label:
+ entries_to_remove.append(entry)
+ if entries_to_remove == []:
+ raise KeyError("No entry with label %r" % label)
+ for entry in entries_to_remove:
+ self._callbacks.remove(entry)
+
+ def __iter__(self):
+ return (callback.get_obj() for callback, name in self._callbacks)
+
+ def __len__(self):
+ return len(self._callbacks)
+
+ def __repr__(self):
+ strings = []
+ strings.append("<%s(" % type(self).__name__)
+ strings.append(self.name)
+ strings.append("), callbacks=[")
+ callbacks = self._callbacks
+ for (callback, callback_name) in callbacks:
+ strings.append(repr(callback.get_obj()))
+ strings.append("(")
+ strings.append(callback_name)
+ strings.append("),")
+ if len(callbacks) == 1:
+ strings[-1] = ")"
+ strings.append("]>")
+ return ''.join(strings)
+
+
+_help_prefix = \
+"""
+Hooks
+=====
+
+Introduction
+------------
+
+A hook of type *xxx* of class *yyy* needs to be registered using::
+
+ yyy.hooks.install_named_hook("xxx", ...)
+
+See :doc:`Using hooks<../user-guide/hooks>` in the User Guide for examples.
+
+The class that contains each hook is given before the hooks it supplies. For
+instance, BranchHooks as the class is the hooks class for
+`bzrlib.branch.Branch.hooks`.
+
+Each description also indicates whether the hook runs on the client (the
+machine where bzr was invoked) or the server (the machine addressed by
+the branch URL). These may be, but are not necessarily, the same machine.
+
+Plugins (including hooks) are run on the server if all of these is true:
+
+ * The connection is via a smart server (accessed with a URL starting with
+ "bzr://", "bzr+ssh://" or "bzr+http://", or accessed via a "http://"
+ URL when a smart server is available via HTTP).
+
+ * The hook is either server specific or part of general infrastructure rather
+ than client specific code (such as commit).
+
+"""
+
+def hooks_help_text(topic):
+ segments = [_help_prefix]
+ for hook_key in sorted(known_hooks.keys()):
+ hooks = known_hooks_key_to_object(hook_key)
+ segments.append(hooks.docs())
+ return '\n'.join(segments)
+
+
+# Lazily registered hooks. Maps (module, name, hook_name) tuples
+# to lists of tuples with objectgetters and names
+_lazy_hooks = {}
+
+
+def install_lazy_named_hook(hookpoints_module, hookpoints_name, hook_name,
+ a_callable, name):
+ """Install a callable in to a hook lazily, and label it name.
+
+ :param hookpoints_module: Module name of the hook points.
+ :param hookpoints_name: Name of the hook points.
+ :param hook_name: A hook name.
+ :param callable: a callable to call for the hook.
+ :param name: A name to associate a_callable with, to show users what is
+ running.
+ """
+ key = (hookpoints_module, hookpoints_name, hook_name)
+ obj_getter = registry._ObjectGetter(a_callable)
+ _lazy_hooks.setdefault(key, []).append((obj_getter, name))
diff --git a/bzrlib/i18n.py b/bzrlib/i18n.py
new file mode 100644
index 0000000..5f922e8
--- /dev/null
+++ b/bzrlib/i18n.py
@@ -0,0 +1,206 @@
+# -*- coding: utf-8 -*-
+#
+# Copyright (C) 2007 Lukáš Lalinský <lalinsky@gmail.com>
+# Copyright (C) 2007,2009 Alexander Belchenko <bialix@ukr.net>
+# Copyright (C) 2011 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+# This module is copied from Bazaar Explorer and modified for bzr.
+
+"""i18n and l10n support for Bazaar."""
+
+from __future__ import absolute_import
+
+import gettext as _gettext
+import os
+import sys
+
+
+_translations = None
+
+
+def gettext(message):
+ """Translate message.
+
+ :returns: translated message as unicode.
+ """
+ install()
+ return _translations.ugettext(message)
+
+
+def ngettext(singular, plural, number):
+ """Translate message with plural forms based on `number`.
+
+ :param singular: English language message in singular form
+ :param plural: English language message in plural form
+ :param number: the number this message should be translated for
+
+ :returns: translated message as unicode.
+ """
+ install()
+ return _translations.ungettext(singular, plural, number)
+
+
+def N_(msg):
+ """Mark message for translation but don't translate it right away."""
+ return msg
+
+
+def gettext_per_paragraph(message):
+ """Translate message per paragraph.
+
+ :returns: concatenated translated message as unicode.
+ """
+ install()
+ paragraphs = message.split(u'\n\n')
+ ugettext = _translations.ugettext
+ # Be careful not to translate the empty string -- it holds the
+ # meta data of the .po file.
+ return u'\n\n'.join(ugettext(p) if p else u'' for p in paragraphs)
+
+
+def disable_i18n():
+ """Do not allow i18n to be enabled. Useful for third party users
+ of bzrlib."""
+ global _translations
+ _translations = _gettext.NullTranslations()
+
+
+def installed():
+ """Returns whether translations are in use or not."""
+ return _translations is not None
+
+
+def install(lang=None):
+ """Enables gettext translations in bzr."""
+ global _translations
+ if installed():
+ return
+ _translations = install_translations(lang)
+
+
+def install_translations(lang=None, domain='bzr', locale_base=None):
+ """Create a gettext translation object.
+
+ :param lang: language to install.
+ :param domain: translation domain to install.
+ :param locale_base: plugins can specify their own directory.
+
+ :returns: a gettext translations object to use
+ """
+ if lang is None:
+ lang = _get_current_locale()
+ if lang is not None:
+ languages = lang.split(':')
+ else:
+ languages = None
+ translation = _gettext.translation(
+ domain,
+ localedir=_get_locale_dir(locale_base),
+ languages=languages,
+ fallback=True)
+ return translation
+
+
+def add_fallback(fallback):
+ """
+ Add a fallback translations object. Typically used by plugins.
+
+ :param fallback: gettext.GNUTranslations object
+ """
+ install()
+ _translations.add_fallback(fallback)
+
+
+def uninstall():
+ """Disables gettext translations."""
+ global _translations
+ _translations = None
+
+
+def _get_locale_dir(base):
+ """Returns directory to find .mo translations file in, either local or system
+
+ :param base: plugins can specify their own local directory
+ """
+ fs_enc = sys.getfilesystemencoding()
+ if getattr(sys, 'frozen', False):
+ if base is None:
+ base = os.path.dirname(unicode(sys.executable, fs_enc))
+ return os.path.join(base, u'locale')
+ else:
+ if base is None:
+ base = os.path.dirname(unicode(__file__, fs_enc))
+ dirpath = os.path.realpath(os.path.join(base, u'locale'))
+ if os.path.exists(dirpath):
+ return dirpath
+ return os.path.join(unicode(sys.prefix, fs_enc), u"share", u"locale")
+
+
+def _check_win32_locale():
+ for i in ('LANGUAGE','LC_ALL','LC_MESSAGES','LANG'):
+ if os.environ.get(i):
+ break
+ else:
+ lang = None
+ import locale
+ try:
+ import ctypes
+ except ImportError:
+ # use only user's default locale
+ lang = locale.getdefaultlocale()[0]
+ else:
+ # using ctypes to determine all locales
+ lcid_user = ctypes.windll.kernel32.GetUserDefaultLCID()
+ lcid_system = ctypes.windll.kernel32.GetSystemDefaultLCID()
+ if lcid_user != lcid_system:
+ lcid = [lcid_user, lcid_system]
+ else:
+ lcid = [lcid_user]
+ lang = [locale.windows_locale.get(i) for i in lcid]
+ lang = ':'.join([i for i in lang if i])
+ # set lang code for gettext
+ if lang:
+ os.environ['LANGUAGE'] = lang
+
+
+def _get_current_locale():
+ if not os.environ.get('LANGUAGE'):
+ from bzrlib import config
+ lang = config.GlobalStack().get('language')
+ if lang:
+ os.environ['LANGUAGE'] = lang
+ return lang
+ if sys.platform == 'win32':
+ _check_win32_locale()
+ for i in ('LANGUAGE','LC_ALL','LC_MESSAGES','LANG'):
+ lang = os.environ.get(i)
+ if lang:
+ return lang
+ return None
+
+
+def load_plugin_translations(domain):
+ """Load the translations for a specific plugin.
+
+ :param domain: Gettext domain name (usually 'bzr-PLUGINNAME')
+ """
+ locale_base = os.path.dirname(
+ unicode(__file__, sys.getfilesystemencoding()))
+ translation = install_translations(domain=domain,
+ locale_base=locale_base)
+ add_fallback(translation)
+ return translation
diff --git a/bzrlib/identitymap.py b/bzrlib/identitymap.py
new file mode 100644
index 0000000..a2c98d7
--- /dev/null
+++ b/bzrlib/identitymap.py
@@ -0,0 +1,75 @@
+# Copyright (C) 2005 Canonical Ltd
+# Authors: Robert Collins <robert.collins@canonical.com>
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""This module provides an IdentityMap."""
+
+from __future__ import absolute_import
+
+from bzrlib import (
+ errors,
+ )
+
+
+class IdentityMap(object):
+ """An in memory map from object id to instance.
+
+ An IdentityMap maps from keys to single instances of objects in memory.
+ We have explicit calls on the map for the root of each inheritance tree
+ that is store in the map. Look for find_CLASS and add_CLASS methods.
+ """
+
+ def add_weave(self, id, weave):
+ """Add weave to the map with a given id."""
+ if self._weave_key(id) in self._map:
+ raise errors.BzrError('weave %s already in the identity map' % id)
+ self._map[self._weave_key(id)] = weave
+ self._reverse_map[weave] = self._weave_key(id)
+
+ def find_weave(self, id):
+ """Return the weave for 'id', or None if it is not present."""
+ return self._map.get(self._weave_key(id), None)
+
+ def __init__(self):
+ super(IdentityMap, self).__init__()
+ self._map = {}
+ self._reverse_map = {}
+
+ def remove_object(self, an_object):
+ """Remove object from map."""
+ if isinstance(an_object, list):
+ raise KeyError('%r not in identity map' % an_object)
+ else:
+ self._map.pop(self._reverse_map[an_object])
+ self._reverse_map.pop(an_object)
+
+ def _weave_key(self, id):
+ """Return the key for a weaves id."""
+ return "weave-" + id
+
+
+class NullIdentityMap(object):
+ """A pretend in memory map from object id to instance.
+
+ A NullIdentityMap is an Identity map that does not store anything in it.
+ """
+
+ def add_weave(self, id, weave):
+ """See IdentityMap.add_weave."""
+
+ def find_weave(self, id):
+ """See IdentityMap.find_weave."""
+ return None
diff --git a/bzrlib/ignores.py b/bzrlib/ignores.py
new file mode 100644
index 0000000..ef64b06
--- /dev/null
+++ b/bzrlib/ignores.py
@@ -0,0 +1,236 @@
+# Copyright (C) 2006-2011 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""Lists of ignore files, etc."""
+
+from __future__ import absolute_import
+
+import errno
+import os
+from cStringIO import StringIO
+
+import bzrlib
+from bzrlib.lazy_import import lazy_import
+lazy_import(globals(), """
+from bzrlib import (
+ atomicfile,
+ config,
+ globbing,
+ trace,
+ )
+""")
+
+# ~/.bazaar/ignore will be filled out using
+# this ignore list, if it does not exist
+# please keep these sorted (in C locale order) to aid merging
+USER_DEFAULTS = [
+ '*.a',
+ '*.o',
+ '*.py[co]',
+ '*.so',
+ '*.sw[nop]',
+ '*~',
+ '.#*',
+ '[#]*#',
+ '__pycache__',
+ 'bzr-orphans',
+]
+
+
+
+def parse_ignore_file(f):
+ """Read in all of the lines in the file and turn it into an ignore list
+
+ Continue in the case of utf8 decoding errors, and emit a warning when
+ such and error is found. Optimise for the common case -- no decoding
+ errors.
+ """
+ ignored = set()
+ ignore_file = f.read()
+ try:
+ # Try and parse whole ignore file at once.
+ unicode_lines = ignore_file.decode('utf8').split('\n')
+ except UnicodeDecodeError:
+ # Otherwise go though line by line and pick out the 'good'
+ # decodable lines
+ lines = ignore_file.split('\n')
+ unicode_lines = []
+ for line_number, line in enumerate(lines):
+ try:
+ unicode_lines.append(line.decode('utf-8'))
+ except UnicodeDecodeError:
+ # report error about line (idx+1)
+ trace.warning(
+ '.bzrignore: On Line #%d, malformed utf8 character. '
+ 'Ignoring line.' % (line_number+1))
+
+ # Append each line to ignore list if it's not a comment line
+ for line in unicode_lines:
+ line = line.rstrip('\r\n')
+ if not line or line.startswith('#'):
+ continue
+ ignored.add(globbing.normalize_pattern(line))
+ return ignored
+
+
+def get_user_ignores():
+ """Get the list of user ignored files, possibly creating it."""
+ path = config.user_ignore_config_filename()
+ patterns = set(USER_DEFAULTS)
+ try:
+ f = open(path, 'rb')
+ except (IOError, OSError), e:
+ # open() shouldn't return an IOError without errno, but just in case
+ err = getattr(e, 'errno', None)
+ if err not in (errno.ENOENT,):
+ raise
+ # Create the ignore file, and just return the default
+ # We want to ignore if we can't write to the file
+ # since get_* should be a safe operation
+ try:
+ _set_user_ignores(USER_DEFAULTS)
+ except (IOError, OSError), e:
+ if e.errno not in (errno.EPERM,):
+ raise
+ return patterns
+
+ try:
+ return parse_ignore_file(f)
+ finally:
+ f.close()
+
+
+def _set_user_ignores(patterns):
+ """Fill out the user ignore file with the given patterns
+
+ This may raise an error if it doesn't have permission to
+ write to the user ignore file.
+ This is mostly used for testing, since it would be
+ bad form to rewrite a user's ignore list.
+ bzrlib only writes this file if it does not exist.
+ """
+ ignore_path = config.user_ignore_config_filename()
+ config.ensure_config_dir_exists()
+
+ # Create an empty file
+ f = open(ignore_path, 'wb')
+ try:
+ for pattern in patterns:
+ f.write(pattern.encode('utf8') + '\n')
+ finally:
+ f.close()
+
+
+def add_unique_user_ignores(new_ignores):
+ """Add entries to the user's ignore list if not present.
+
+ :param new_ignores: A list of ignore patterns
+ :return: The list of ignores that were added
+ """
+ ignored = get_user_ignores()
+ to_add = []
+ for ignore in new_ignores:
+ ignore = globbing.normalize_pattern(ignore)
+ if ignore not in ignored:
+ ignored.add(ignore)
+ to_add.append(ignore)
+
+ if not to_add:
+ return []
+
+ f = open(config.user_ignore_config_filename(), 'ab')
+ try:
+ for pattern in to_add:
+ f.write(pattern.encode('utf8') + '\n')
+ finally:
+ f.close()
+
+ return to_add
+
+
+_runtime_ignores = set()
+
+
+def add_runtime_ignores(ignores):
+ """Add some ignore patterns that only exists in memory.
+
+ This is used by some plugins that want bzr to ignore files,
+ but don't want to change a users ignore list.
+ (Such as a conversion script that needs to ignore temporary files,
+ but does not want to modify the project's ignore list.)
+
+ :param ignores: A list or generator of ignore patterns.
+ :return: None
+ """
+ global _runtime_ignores
+ _runtime_ignores.update(set(ignores))
+
+
+def get_runtime_ignores():
+ """Get the current set of runtime ignores."""
+ return _runtime_ignores
+
+
+def tree_ignores_add_patterns(tree, name_pattern_list):
+ """Add more ignore patterns to the ignore file in a tree.
+ If ignore file does not exist then it will be created.
+ The ignore file will be automatically added under version control.
+
+ :param tree: Working tree to update the ignore list.
+ :param name_pattern_list: List of ignore patterns.
+ :return: None
+ """
+ # read in the existing ignores set
+ ifn = tree.abspath(bzrlib.IGNORE_FILENAME)
+ if tree.has_filename(ifn):
+ f = open(ifn, 'rU')
+ try:
+ file_contents = f.read()
+ # figure out what kind of line endings are used
+ newline = getattr(f, 'newlines', None)
+ if type(newline) is tuple:
+ newline = newline[0]
+ elif newline is None:
+ newline = os.linesep
+ finally:
+ f.close()
+ else:
+ file_contents = ""
+ newline = os.linesep
+
+ sio = StringIO(file_contents)
+ try:
+ ignores = parse_ignore_file(sio)
+ finally:
+ sio.close()
+
+ # write out the updated ignores set
+ f = atomicfile.AtomicFile(ifn, 'wb')
+ try:
+ # write the original contents, preserving original line endings
+ f.write(newline.join(file_contents.split('\n')))
+ if len(file_contents) > 0 and not file_contents.endswith('\n'):
+ f.write(newline)
+ for pattern in name_pattern_list:
+ if not pattern in ignores:
+ f.write(pattern.encode('utf-8'))
+ f.write(newline)
+ f.commit()
+ finally:
+ f.close()
+
+ if not tree.path2id(bzrlib.IGNORE_FILENAME):
+ tree.add([bzrlib.IGNORE_FILENAME])
diff --git a/bzrlib/index.py b/bzrlib/index.py
new file mode 100644
index 0000000..5b64655
--- /dev/null
+++ b/bzrlib/index.py
@@ -0,0 +1,1868 @@
+# Copyright (C) 2007-2011 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""Indexing facilities."""
+
+from __future__ import absolute_import
+
+__all__ = [
+ 'CombinedGraphIndex',
+ 'GraphIndex',
+ 'GraphIndexBuilder',
+ 'GraphIndexPrefixAdapter',
+ 'InMemoryGraphIndex',
+ ]
+
+from bisect import bisect_right
+from cStringIO import StringIO
+import re
+import sys
+
+from bzrlib.lazy_import import lazy_import
+lazy_import(globals(), """
+from bzrlib import (
+ bisect_multi,
+ revision as _mod_revision,
+ trace,
+ )
+""")
+from bzrlib import (
+ debug,
+ errors,
+ )
+from bzrlib.static_tuple import StaticTuple
+
+_HEADER_READV = (0, 200)
+_OPTION_KEY_ELEMENTS = "key_elements="
+_OPTION_LEN = "len="
+_OPTION_NODE_REFS = "node_ref_lists="
+_SIGNATURE = "Bazaar Graph Index 1\n"
+
+
+_whitespace_re = re.compile('[\t\n\x0b\x0c\r\x00 ]')
+_newline_null_re = re.compile('[\n\0]')
+
+
+def _has_key_from_parent_map(self, key):
+ """Check if this index has one key.
+
+ If it's possible to check for multiple keys at once through
+ calling get_parent_map that should be faster.
+ """
+ return (key in self.get_parent_map([key]))
+
+
+def _missing_keys_from_parent_map(self, keys):
+ return set(keys) - set(self.get_parent_map(keys))
+
+
+class GraphIndexBuilder(object):
+ """A builder that can build a GraphIndex.
+
+ The resulting graph has the structure::
+
+ _SIGNATURE OPTIONS NODES NEWLINE
+ _SIGNATURE := 'Bazaar Graph Index 1' NEWLINE
+ OPTIONS := 'node_ref_lists=' DIGITS NEWLINE
+ NODES := NODE*
+ NODE := KEY NULL ABSENT? NULL REFERENCES NULL VALUE NEWLINE
+ KEY := Not-whitespace-utf8
+ ABSENT := 'a'
+ REFERENCES := REFERENCE_LIST (TAB REFERENCE_LIST){node_ref_lists - 1}
+ REFERENCE_LIST := (REFERENCE (CR REFERENCE)*)?
+ REFERENCE := DIGITS ; digits is the byte offset in the index of the
+ ; referenced key.
+ VALUE := no-newline-no-null-bytes
+ """
+
+ def __init__(self, reference_lists=0, key_elements=1):
+ """Create a GraphIndex builder.
+
+ :param reference_lists: The number of node references lists for each
+ entry.
+ :param key_elements: The number of bytestrings in each key.
+ """
+ self.reference_lists = reference_lists
+ # A dict of {key: (absent, ref_lists, value)}
+ self._nodes = {}
+ # Keys that are referenced but not actually present in this index
+ self._absent_keys = set()
+ self._nodes_by_key = None
+ self._key_length = key_elements
+ self._optimize_for_size = False
+ self._combine_backing_indices = True
+
+ def _check_key(self, key):
+ """Raise BadIndexKey if key is not a valid key for this index."""
+ if type(key) not in (tuple, StaticTuple):
+ raise errors.BadIndexKey(key)
+ if self._key_length != len(key):
+ raise errors.BadIndexKey(key)
+ for element in key:
+ if not element or _whitespace_re.search(element) is not None:
+ raise errors.BadIndexKey(element)
+
+ def _external_references(self):
+ """Return references that are not present in this index.
+ """
+ keys = set()
+ refs = set()
+ # TODO: JAM 2008-11-21 This makes an assumption about how the reference
+ # lists are used. It is currently correct for pack-0.92 through
+ # 1.9, which use the node references (3rd column) second
+ # reference list as the compression parent. Perhaps this should
+ # be moved into something higher up the stack, since it
+ # makes assumptions about how the index is used.
+ if self.reference_lists > 1:
+ for node in self.iter_all_entries():
+ keys.add(node[1])
+ refs.update(node[3][1])
+ return refs - keys
+ else:
+ # If reference_lists == 0 there can be no external references, and
+ # if reference_lists == 1, then there isn't a place to store the
+ # compression parent
+ return set()
+
+ def _get_nodes_by_key(self):
+ if self._nodes_by_key is None:
+ nodes_by_key = {}
+ if self.reference_lists:
+ for key, (absent, references, value) in self._nodes.iteritems():
+ if absent:
+ continue
+ key_dict = nodes_by_key
+ for subkey in key[:-1]:
+ key_dict = key_dict.setdefault(subkey, {})
+ key_dict[key[-1]] = key, value, references
+ else:
+ for key, (absent, references, value) in self._nodes.iteritems():
+ if absent:
+ continue
+ key_dict = nodes_by_key
+ for subkey in key[:-1]:
+ key_dict = key_dict.setdefault(subkey, {})
+ key_dict[key[-1]] = key, value
+ self._nodes_by_key = nodes_by_key
+ return self._nodes_by_key
+
+ def _update_nodes_by_key(self, key, value, node_refs):
+ """Update the _nodes_by_key dict with a new key.
+
+ For a key of (foo, bar, baz) create
+ _nodes_by_key[foo][bar][baz] = key_value
+ """
+ if self._nodes_by_key is None:
+ return
+ key_dict = self._nodes_by_key
+ if self.reference_lists:
+ key_value = StaticTuple(key, value, node_refs)
+ else:
+ key_value = StaticTuple(key, value)
+ for subkey in key[:-1]:
+ key_dict = key_dict.setdefault(subkey, {})
+ key_dict[key[-1]] = key_value
+
+ def _check_key_ref_value(self, key, references, value):
+ """Check that 'key' and 'references' are all valid.
+
+ :param key: A key tuple. Must conform to the key interface (be a tuple,
+ be of the right length, not have any whitespace or nulls in any key
+ element.)
+ :param references: An iterable of reference lists. Something like
+ [[(ref, key)], [(ref, key), (other, key)]]
+ :param value: The value associate with this key. Must not contain
+ newlines or null characters.
+ :return: (node_refs, absent_references)
+
+ * node_refs: basically a packed form of 'references' where all
+ iterables are tuples
+ * absent_references: reference keys that are not in self._nodes.
+ This may contain duplicates if the same key is referenced in
+ multiple lists.
+ """
+ as_st = StaticTuple.from_sequence
+ self._check_key(key)
+ if _newline_null_re.search(value) is not None:
+ raise errors.BadIndexValue(value)
+ if len(references) != self.reference_lists:
+ raise errors.BadIndexValue(references)
+ node_refs = []
+ absent_references = []
+ for reference_list in references:
+ for reference in reference_list:
+ # If reference *is* in self._nodes, then we know it has already
+ # been checked.
+ if reference not in self._nodes:
+ self._check_key(reference)
+ absent_references.append(reference)
+ reference_list = as_st([as_st(ref).intern()
+ for ref in reference_list])
+ node_refs.append(reference_list)
+ return as_st(node_refs), absent_references
+
+ def add_node(self, key, value, references=()):
+ """Add a node to the index.
+
+ :param key: The key. keys are non-empty tuples containing
+ as many whitespace-free utf8 bytestrings as the key length
+ defined for this index.
+ :param references: An iterable of iterables of keys. Each is a
+ reference to another key.
+ :param value: The value to associate with the key. It may be any
+ bytes as long as it does not contain \\0 or \\n.
+ """
+ (node_refs,
+ absent_references) = self._check_key_ref_value(key, references, value)
+ if key in self._nodes and self._nodes[key][0] != 'a':
+ raise errors.BadIndexDuplicateKey(key, self)
+ for reference in absent_references:
+ # There may be duplicates, but I don't think it is worth worrying
+ # about
+ self._nodes[reference] = ('a', (), '')
+ self._absent_keys.update(absent_references)
+ self._absent_keys.discard(key)
+ self._nodes[key] = ('', node_refs, value)
+ if self._nodes_by_key is not None and self._key_length > 1:
+ self._update_nodes_by_key(key, value, node_refs)
+
+ def clear_cache(self):
+ """See GraphIndex.clear_cache()
+
+ This is a no-op, but we need the api to conform to a generic 'Index'
+ abstraction.
+ """
+
+ def finish(self):
+ """Finish the index.
+
+ :returns: cStringIO holding the full context of the index as it
+ should be written to disk.
+ """
+ lines = [_SIGNATURE]
+ lines.append(_OPTION_NODE_REFS + str(self.reference_lists) + '\n')
+ lines.append(_OPTION_KEY_ELEMENTS + str(self._key_length) + '\n')
+ key_count = len(self._nodes) - len(self._absent_keys)
+ lines.append(_OPTION_LEN + str(key_count) + '\n')
+ prefix_length = sum(len(x) for x in lines)
+ # references are byte offsets. To avoid having to do nasty
+ # polynomial work to resolve offsets (references to later in the
+ # file cannot be determined until all the inbetween references have
+ # been calculated too) we pad the offsets with 0's to make them be
+ # of consistent length. Using binary offsets would break the trivial
+ # file parsing.
+ # to calculate the width of zero's needed we do three passes:
+ # one to gather all the non-reference data and the number of references.
+ # one to pad all the data with reference-length and determine entry
+ # addresses.
+ # One to serialise.
+
+ # forward sorted by key. In future we may consider topological sorting,
+ # at the cost of table scans for direct lookup, or a second index for
+ # direct lookup
+ nodes = sorted(self._nodes.items())
+ # if we do not prepass, we don't know how long it will be up front.
+ expected_bytes = None
+ # we only need to pre-pass if we have reference lists at all.
+ if self.reference_lists:
+ key_offset_info = []
+ non_ref_bytes = prefix_length
+ total_references = 0
+ # TODO use simple multiplication for the constants in this loop.
+ for key, (absent, references, value) in nodes:
+ # record the offset known *so far* for this key:
+ # the non reference bytes to date, and the total references to
+ # date - saves reaccumulating on the second pass
+ key_offset_info.append((key, non_ref_bytes, total_references))
+ # key is literal, value is literal, there are 3 null's, 1 NL
+ # key is variable length tuple, \x00 between elements
+ non_ref_bytes += sum(len(element) for element in key)
+ if self._key_length > 1:
+ non_ref_bytes += self._key_length - 1
+ # value is literal bytes, there are 3 null's, 1 NL.
+ non_ref_bytes += len(value) + 3 + 1
+ # one byte for absent if set.
+ if absent:
+ non_ref_bytes += 1
+ elif self.reference_lists:
+ # (ref_lists -1) tabs
+ non_ref_bytes += self.reference_lists - 1
+ # (ref-1 cr's per ref_list)
+ for ref_list in references:
+ # how many references across the whole file?
+ total_references += len(ref_list)
+ # accrue reference separators
+ if ref_list:
+ non_ref_bytes += len(ref_list) - 1
+ # how many digits are needed to represent the total byte count?
+ digits = 1
+ possible_total_bytes = non_ref_bytes + total_references*digits
+ while 10 ** digits < possible_total_bytes:
+ digits += 1
+ possible_total_bytes = non_ref_bytes + total_references*digits
+ expected_bytes = possible_total_bytes + 1 # terminating newline
+ # resolve key addresses.
+ key_addresses = {}
+ for key, non_ref_bytes, total_references in key_offset_info:
+ key_addresses[key] = non_ref_bytes + total_references*digits
+ # serialise
+ format_string = '%%0%sd' % digits
+ for key, (absent, references, value) in nodes:
+ flattened_references = []
+ for ref_list in references:
+ ref_addresses = []
+ for reference in ref_list:
+ ref_addresses.append(format_string % key_addresses[reference])
+ flattened_references.append('\r'.join(ref_addresses))
+ string_key = '\x00'.join(key)
+ lines.append("%s\x00%s\x00%s\x00%s\n" % (string_key, absent,
+ '\t'.join(flattened_references), value))
+ lines.append('\n')
+ result = StringIO(''.join(lines))
+ if expected_bytes and len(result.getvalue()) != expected_bytes:
+ raise errors.BzrError('Failed index creation. Internal error:'
+ ' mismatched output length and expected length: %d %d' %
+ (len(result.getvalue()), expected_bytes))
+ return result
+
+ def set_optimize(self, for_size=None, combine_backing_indices=None):
+ """Change how the builder tries to optimize the result.
+
+ :param for_size: Tell the builder to try and make the index as small as
+ possible.
+ :param combine_backing_indices: If the builder spills to disk to save
+ memory, should the on-disk indices be combined. Set to True if you
+ are going to be probing the index, but to False if you are not. (If
+ you are not querying, then the time spent combining is wasted.)
+ :return: None
+ """
+ # GraphIndexBuilder itself doesn't pay attention to the flag yet, but
+ # other builders do.
+ if for_size is not None:
+ self._optimize_for_size = for_size
+ if combine_backing_indices is not None:
+ self._combine_backing_indices = combine_backing_indices
+
+ def find_ancestry(self, keys, ref_list_num):
+ """See CombinedGraphIndex.find_ancestry()"""
+ pending = set(keys)
+ parent_map = {}
+ missing_keys = set()
+ while pending:
+ next_pending = set()
+ for _, key, value, ref_lists in self.iter_entries(pending):
+ parent_keys = ref_lists[ref_list_num]
+ parent_map[key] = parent_keys
+ next_pending.update([p for p in parent_keys if p not in
+ parent_map])
+ missing_keys.update(pending.difference(parent_map))
+ pending = next_pending
+ return parent_map, missing_keys
+
+
+class GraphIndex(object):
+ """An index for data with embedded graphs.
+
+ The index maps keys to a list of key reference lists, and a value.
+ Each node has the same number of key reference lists. Each key reference
+ list can be empty or an arbitrary length. The value is an opaque NULL
+ terminated string without any newlines. The storage of the index is
+ hidden in the interface: keys and key references are always tuples of
+ bytestrings, never the internal representation (e.g. dictionary offsets).
+
+ It is presumed that the index will not be mutated - it is static data.
+
+ Successive iter_all_entries calls will read the entire index each time.
+ Additionally, iter_entries calls will read the index linearly until the
+ desired keys are found. XXX: This must be fixed before the index is
+ suitable for production use. :XXX
+ """
+
+ def __init__(self, transport, name, size, unlimited_cache=False, offset=0):
+ """Open an index called name on transport.
+
+ :param transport: A bzrlib.transport.Transport.
+ :param name: A path to provide to transport API calls.
+ :param size: The size of the index in bytes. This is used for bisection
+ logic to perform partial index reads. While the size could be
+ obtained by statting the file this introduced an additional round
+ trip as well as requiring stat'able transports, both of which are
+ avoided by having it supplied. If size is None, then bisection
+ support will be disabled and accessing the index will just stream
+ all the data.
+ :param offset: Instead of starting the index data at offset 0, start it
+ at an arbitrary offset.
+ """
+ self._transport = transport
+ self._name = name
+ # Becomes a dict of key:(value, reference-list-byte-locations) used by
+ # the bisection interface to store parsed but not resolved keys.
+ self._bisect_nodes = None
+ # Becomes a dict of key:(value, reference-list-keys) which are ready to
+ # be returned directly to callers.
+ self._nodes = None
+ # a sorted list of slice-addresses for the parsed bytes of the file.
+ # e.g. (0,1) would mean that byte 0 is parsed.
+ self._parsed_byte_map = []
+ # a sorted list of keys matching each slice address for parsed bytes
+ # e.g. (None, 'foo@bar') would mean that the first byte contained no
+ # key, and the end byte of the slice is the of the data for 'foo@bar'
+ self._parsed_key_map = []
+ self._key_count = None
+ self._keys_by_offset = None
+ self._nodes_by_key = None
+ self._size = size
+ # The number of bytes we've read so far in trying to process this file
+ self._bytes_read = 0
+ self._base_offset = offset
+
+ def __eq__(self, other):
+ """Equal when self and other were created with the same parameters."""
+ return (
+ type(self) == type(other) and
+ self._transport == other._transport and
+ self._name == other._name and
+ self._size == other._size)
+
+ def __ne__(self, other):
+ return not self.__eq__(other)
+
+ def __repr__(self):
+ return "%s(%r)" % (self.__class__.__name__,
+ self._transport.abspath(self._name))
+
+ def _buffer_all(self, stream=None):
+ """Buffer all the index data.
+
+ Mutates self._nodes and self.keys_by_offset.
+ """
+ if self._nodes is not None:
+ # We already did this
+ return
+ if 'index' in debug.debug_flags:
+ trace.mutter('Reading entire index %s',
+ self._transport.abspath(self._name))
+ if stream is None:
+ stream = self._transport.get(self._name)
+ if self._base_offset != 0:
+ # This is wasteful, but it is better than dealing with
+ # adjusting all the offsets, etc.
+ stream = StringIO(stream.read()[self._base_offset:])
+ self._read_prefix(stream)
+ self._expected_elements = 3 + self._key_length
+ line_count = 0
+ # raw data keyed by offset
+ self._keys_by_offset = {}
+ # ready-to-return key:value or key:value, node_ref_lists
+ self._nodes = {}
+ self._nodes_by_key = None
+ trailers = 0
+ pos = stream.tell()
+ lines = stream.read().split('\n')
+ # GZ 2009-09-20: Should really use a try/finally block to ensure close
+ stream.close()
+ del lines[-1]
+ _, _, _, trailers = self._parse_lines(lines, pos)
+ for key, absent, references, value in self._keys_by_offset.itervalues():
+ if absent:
+ continue
+ # resolve references:
+ if self.node_ref_lists:
+ node_value = (value, self._resolve_references(references))
+ else:
+ node_value = value
+ self._nodes[key] = node_value
+ # cache the keys for quick set intersections
+ if trailers != 1:
+ # there must be one line - the empty trailer line.
+ raise errors.BadIndexData(self)
+
+ def clear_cache(self):
+ """Clear out any cached/memoized values.
+
+ This can be called at any time, but generally it is used when we have
+ extracted some information, but don't expect to be requesting any more
+ from this index.
+ """
+
+ def external_references(self, ref_list_num):
+ """Return references that are not present in this index.
+ """
+ self._buffer_all()
+ if ref_list_num + 1 > self.node_ref_lists:
+ raise ValueError('No ref list %d, index has %d ref lists'
+ % (ref_list_num, self.node_ref_lists))
+ refs = set()
+ nodes = self._nodes
+ for key, (value, ref_lists) in nodes.iteritems():
+ ref_list = ref_lists[ref_list_num]
+ refs.update([ref for ref in ref_list if ref not in nodes])
+ return refs
+
+ def _get_nodes_by_key(self):
+ if self._nodes_by_key is None:
+ nodes_by_key = {}
+ if self.node_ref_lists:
+ for key, (value, references) in self._nodes.iteritems():
+ key_dict = nodes_by_key
+ for subkey in key[:-1]:
+ key_dict = key_dict.setdefault(subkey, {})
+ key_dict[key[-1]] = key, value, references
+ else:
+ for key, value in self._nodes.iteritems():
+ key_dict = nodes_by_key
+ for subkey in key[:-1]:
+ key_dict = key_dict.setdefault(subkey, {})
+ key_dict[key[-1]] = key, value
+ self._nodes_by_key = nodes_by_key
+ return self._nodes_by_key
+
+ def iter_all_entries(self):
+ """Iterate over all keys within the index.
+
+ :return: An iterable of (index, key, value) or (index, key, value, reference_lists).
+ The former tuple is used when there are no reference lists in the
+ index, making the API compatible with simple key:value index types.
+ There is no defined order for the result iteration - it will be in
+ the most efficient order for the index.
+ """
+ if 'evil' in debug.debug_flags:
+ trace.mutter_callsite(3,
+ "iter_all_entries scales with size of history.")
+ if self._nodes is None:
+ self._buffer_all()
+ if self.node_ref_lists:
+ for key, (value, node_ref_lists) in self._nodes.iteritems():
+ yield self, key, value, node_ref_lists
+ else:
+ for key, value in self._nodes.iteritems():
+ yield self, key, value
+
+ def _read_prefix(self, stream):
+ signature = stream.read(len(self._signature()))
+ if not signature == self._signature():
+ raise errors.BadIndexFormatSignature(self._name, GraphIndex)
+ options_line = stream.readline()
+ if not options_line.startswith(_OPTION_NODE_REFS):
+ raise errors.BadIndexOptions(self)
+ try:
+ self.node_ref_lists = int(options_line[len(_OPTION_NODE_REFS):-1])
+ except ValueError:
+ raise errors.BadIndexOptions(self)
+ options_line = stream.readline()
+ if not options_line.startswith(_OPTION_KEY_ELEMENTS):
+ raise errors.BadIndexOptions(self)
+ try:
+ self._key_length = int(options_line[len(_OPTION_KEY_ELEMENTS):-1])
+ except ValueError:
+ raise errors.BadIndexOptions(self)
+ options_line = stream.readline()
+ if not options_line.startswith(_OPTION_LEN):
+ raise errors.BadIndexOptions(self)
+ try:
+ self._key_count = int(options_line[len(_OPTION_LEN):-1])
+ except ValueError:
+ raise errors.BadIndexOptions(self)
+
+ def _resolve_references(self, references):
+ """Return the resolved key references for references.
+
+ References are resolved by looking up the location of the key in the
+ _keys_by_offset map and substituting the key name, preserving ordering.
+
+ :param references: An iterable of iterables of key locations. e.g.
+ [[123, 456], [123]]
+ :return: A tuple of tuples of keys.
+ """
+ node_refs = []
+ for ref_list in references:
+ node_refs.append(tuple([self._keys_by_offset[ref][0] for ref in ref_list]))
+ return tuple(node_refs)
+
+ def _find_index(self, range_map, key):
+ """Helper for the _parsed_*_index calls.
+
+ Given a range map - [(start, end), ...], finds the index of the range
+ in the map for key if it is in the map, and if it is not there, the
+ immediately preceeding range in the map.
+ """
+ result = bisect_right(range_map, key) - 1
+ if result + 1 < len(range_map):
+ # check the border condition, it may be in result + 1
+ if range_map[result + 1][0] == key[0]:
+ return result + 1
+ return result
+
+ def _parsed_byte_index(self, offset):
+ """Return the index of the entry immediately before offset.
+
+ e.g. if the parsed map has regions 0,10 and 11,12 parsed, meaning that
+ there is one unparsed byte (the 11th, addressed as[10]). then:
+ asking for 0 will return 0
+ asking for 10 will return 0
+ asking for 11 will return 1
+ asking for 12 will return 1
+ """
+ key = (offset, 0)
+ return self._find_index(self._parsed_byte_map, key)
+
+ def _parsed_key_index(self, key):
+ """Return the index of the entry immediately before key.
+
+ e.g. if the parsed map has regions (None, 'a') and ('b','c') parsed,
+ meaning that keys from None to 'a' inclusive, and 'b' to 'c' inclusive
+ have been parsed, then:
+ asking for '' will return 0
+ asking for 'a' will return 0
+ asking for 'b' will return 1
+ asking for 'e' will return 1
+ """
+ search_key = (key, None)
+ return self._find_index(self._parsed_key_map, search_key)
+
+ def _is_parsed(self, offset):
+ """Returns True if offset has been parsed."""
+ index = self._parsed_byte_index(offset)
+ if index == len(self._parsed_byte_map):
+ return offset < self._parsed_byte_map[index - 1][1]
+ start, end = self._parsed_byte_map[index]
+ return offset >= start and offset < end
+
+ def _iter_entries_from_total_buffer(self, keys):
+ """Iterate over keys when the entire index is parsed."""
+ # Note: See the note in BTreeBuilder.iter_entries for why we don't use
+ # .intersection() here
+ nodes = self._nodes
+ keys = [key for key in keys if key in nodes]
+ if self.node_ref_lists:
+ for key in keys:
+ value, node_refs = nodes[key]
+ yield self, key, value, node_refs
+ else:
+ for key in keys:
+ yield self, key, nodes[key]
+
+ def iter_entries(self, keys):
+ """Iterate over keys within the index.
+
+ :param keys: An iterable providing the keys to be retrieved.
+ :return: An iterable as per iter_all_entries, but restricted to the
+ keys supplied. No additional keys will be returned, and every
+ key supplied that is in the index will be returned.
+ """
+ keys = set(keys)
+ if not keys:
+ return []
+ if self._size is None and self._nodes is None:
+ self._buffer_all()
+
+ # We fit about 20 keys per minimum-read (4K), so if we are looking for
+ # more than 1/20th of the index its likely (assuming homogenous key
+ # spread) that we'll read the entire index. If we're going to do that,
+ # buffer the whole thing. A better analysis might take key spread into
+ # account - but B+Tree indices are better anyway.
+ # We could look at all data read, and use a threshold there, which will
+ # trigger on ancestry walks, but that is not yet fully mapped out.
+ if self._nodes is None and len(keys) * 20 > self.key_count():
+ self._buffer_all()
+ if self._nodes is not None:
+ return self._iter_entries_from_total_buffer(keys)
+ else:
+ return (result[1] for result in bisect_multi.bisect_multi_bytes(
+ self._lookup_keys_via_location, self._size, keys))
+
+ def iter_entries_prefix(self, keys):
+ """Iterate over keys within the index using prefix matching.
+
+ Prefix matching is applied within the tuple of a key, not to within
+ the bytestring of each key element. e.g. if you have the keys ('foo',
+ 'bar'), ('foobar', 'gam') and do a prefix search for ('foo', None) then
+ only the former key is returned.
+
+ WARNING: Note that this method currently causes a full index parse
+ unconditionally (which is reasonably appropriate as it is a means for
+ thunking many small indices into one larger one and still supplies
+ iter_all_entries at the thunk layer).
+
+ :param keys: An iterable providing the key prefixes to be retrieved.
+ Each key prefix takes the form of a tuple the length of a key, but
+ with the last N elements 'None' rather than a regular bytestring.
+ The first element cannot be 'None'.
+ :return: An iterable as per iter_all_entries, but restricted to the
+ keys with a matching prefix to those supplied. No additional keys
+ will be returned, and every match that is in the index will be
+ returned.
+ """
+ keys = set(keys)
+ if not keys:
+ return
+ # load data - also finds key lengths
+ if self._nodes is None:
+ self._buffer_all()
+ if self._key_length == 1:
+ for key in keys:
+ # sanity check
+ if key[0] is None:
+ raise errors.BadIndexKey(key)
+ if len(key) != self._key_length:
+ raise errors.BadIndexKey(key)
+ if self.node_ref_lists:
+ value, node_refs = self._nodes[key]
+ yield self, key, value, node_refs
+ else:
+ yield self, key, self._nodes[key]
+ return
+ nodes_by_key = self._get_nodes_by_key()
+ for key in keys:
+ # sanity check
+ if key[0] is None:
+ raise errors.BadIndexKey(key)
+ if len(key) != self._key_length:
+ raise errors.BadIndexKey(key)
+ # find what it refers to:
+ key_dict = nodes_by_key
+ elements = list(key)
+ # find the subdict whose contents should be returned.
+ try:
+ while len(elements) and elements[0] is not None:
+ key_dict = key_dict[elements[0]]
+ elements.pop(0)
+ except KeyError:
+ # a non-existant lookup.
+ continue
+ if len(elements):
+ dicts = [key_dict]
+ while dicts:
+ key_dict = dicts.pop(-1)
+ # can't be empty or would not exist
+ item, value = key_dict.iteritems().next()
+ if type(value) == dict:
+ # push keys
+ dicts.extend(key_dict.itervalues())
+ else:
+ # yield keys
+ for value in key_dict.itervalues():
+ # each value is the key:value:node refs tuple
+ # ready to yield.
+ yield (self, ) + value
+ else:
+ # the last thing looked up was a terminal element
+ yield (self, ) + key_dict
+
+ def _find_ancestors(self, keys, ref_list_num, parent_map, missing_keys):
+ """See BTreeIndex._find_ancestors."""
+ # The api can be implemented as a trivial overlay on top of
+ # iter_entries, it is not an efficient implementation, but it at least
+ # gets the job done.
+ found_keys = set()
+ search_keys = set()
+ for index, key, value, refs in self.iter_entries(keys):
+ parent_keys = refs[ref_list_num]
+ found_keys.add(key)
+ parent_map[key] = parent_keys
+ search_keys.update(parent_keys)
+ # Figure out what, if anything, was missing
+ missing_keys.update(set(keys).difference(found_keys))
+ search_keys = search_keys.difference(parent_map)
+ return search_keys
+
+ def key_count(self):
+ """Return an estimate of the number of keys in this index.
+
+ For GraphIndex the estimate is exact.
+ """
+ if self._key_count is None:
+ self._read_and_parse([_HEADER_READV])
+ return self._key_count
+
+ def _lookup_keys_via_location(self, location_keys):
+ """Public interface for implementing bisection.
+
+ If _buffer_all has been called, then all the data for the index is in
+ memory, and this method should not be called, as it uses a separate
+ cache because it cannot pre-resolve all indices, which buffer_all does
+ for performance.
+
+ :param location_keys: A list of location(byte offset), key tuples.
+ :return: A list of (location_key, result) tuples as expected by
+ bzrlib.bisect_multi.bisect_multi_bytes.
+ """
+ # Possible improvements:
+ # - only bisect lookup each key once
+ # - sort the keys first, and use that to reduce the bisection window
+ # -----
+ # this progresses in three parts:
+ # read data
+ # parse it
+ # attempt to answer the question from the now in memory data.
+ # build the readv request
+ # for each location, ask for 800 bytes - much more than rows we've seen
+ # anywhere.
+ readv_ranges = []
+ for location, key in location_keys:
+ # can we answer from cache?
+ if self._bisect_nodes and key in self._bisect_nodes:
+ # We have the key parsed.
+ continue
+ index = self._parsed_key_index(key)
+ if (len(self._parsed_key_map) and
+ self._parsed_key_map[index][0] <= key and
+ (self._parsed_key_map[index][1] >= key or
+ # end of the file has been parsed
+ self._parsed_byte_map[index][1] == self._size)):
+ # the key has been parsed, so no lookup is needed even if its
+ # not present.
+ continue
+ # - if we have examined this part of the file already - yes
+ index = self._parsed_byte_index(location)
+ if (len(self._parsed_byte_map) and
+ self._parsed_byte_map[index][0] <= location and
+ self._parsed_byte_map[index][1] > location):
+ # the byte region has been parsed, so no read is needed.
+ continue
+ length = 800
+ if location + length > self._size:
+ length = self._size - location
+ # todo, trim out parsed locations.
+ if length > 0:
+ readv_ranges.append((location, length))
+ # read the header if needed
+ if self._bisect_nodes is None:
+ readv_ranges.append(_HEADER_READV)
+ self._read_and_parse(readv_ranges)
+ result = []
+ if self._nodes is not None:
+ # _read_and_parse triggered a _buffer_all because we requested the
+ # whole data range
+ for location, key in location_keys:
+ if key not in self._nodes: # not present
+ result.append(((location, key), False))
+ elif self.node_ref_lists:
+ value, refs = self._nodes[key]
+ result.append(((location, key),
+ (self, key, value, refs)))
+ else:
+ result.append(((location, key),
+ (self, key, self._nodes[key])))
+ return result
+ # generate results:
+ # - figure out <, >, missing, present
+ # - result present references so we can return them.
+ # keys that we cannot answer until we resolve references
+ pending_references = []
+ pending_locations = set()
+ for location, key in location_keys:
+ # can we answer from cache?
+ if key in self._bisect_nodes:
+ # the key has been parsed, so no lookup is needed
+ if self.node_ref_lists:
+ # the references may not have been all parsed.
+ value, refs = self._bisect_nodes[key]
+ wanted_locations = []
+ for ref_list in refs:
+ for ref in ref_list:
+ if ref not in self._keys_by_offset:
+ wanted_locations.append(ref)
+ if wanted_locations:
+ pending_locations.update(wanted_locations)
+ pending_references.append((location, key))
+ continue
+ result.append(((location, key), (self, key,
+ value, self._resolve_references(refs))))
+ else:
+ result.append(((location, key),
+ (self, key, self._bisect_nodes[key])))
+ continue
+ else:
+ # has the region the key should be in, been parsed?
+ index = self._parsed_key_index(key)
+ if (self._parsed_key_map[index][0] <= key and
+ (self._parsed_key_map[index][1] >= key or
+ # end of the file has been parsed
+ self._parsed_byte_map[index][1] == self._size)):
+ result.append(((location, key), False))
+ continue
+ # no, is the key above or below the probed location:
+ # get the range of the probed & parsed location
+ index = self._parsed_byte_index(location)
+ # if the key is below the start of the range, its below
+ if key < self._parsed_key_map[index][0]:
+ direction = -1
+ else:
+ direction = +1
+ result.append(((location, key), direction))
+ readv_ranges = []
+ # lookup data to resolve references
+ for location in pending_locations:
+ length = 800
+ if location + length > self._size:
+ length = self._size - location
+ # TODO: trim out parsed locations (e.g. if the 800 is into the
+ # parsed region trim it, and dont use the adjust_for_latency
+ # facility)
+ if length > 0:
+ readv_ranges.append((location, length))
+ self._read_and_parse(readv_ranges)
+ if self._nodes is not None:
+ # The _read_and_parse triggered a _buffer_all, grab the data and
+ # return it
+ for location, key in pending_references:
+ value, refs = self._nodes[key]
+ result.append(((location, key), (self, key, value, refs)))
+ return result
+ for location, key in pending_references:
+ # answer key references we had to look-up-late.
+ value, refs = self._bisect_nodes[key]
+ result.append(((location, key), (self, key,
+ value, self._resolve_references(refs))))
+ return result
+
+ def _parse_header_from_bytes(self, bytes):
+ """Parse the header from a region of bytes.
+
+ :param bytes: The data to parse.
+ :return: An offset, data tuple such as readv yields, for the unparsed
+ data. (which may length 0).
+ """
+ signature = bytes[0:len(self._signature())]
+ if not signature == self._signature():
+ raise errors.BadIndexFormatSignature(self._name, GraphIndex)
+ lines = bytes[len(self._signature()):].splitlines()
+ options_line = lines[0]
+ if not options_line.startswith(_OPTION_NODE_REFS):
+ raise errors.BadIndexOptions(self)
+ try:
+ self.node_ref_lists = int(options_line[len(_OPTION_NODE_REFS):])
+ except ValueError:
+ raise errors.BadIndexOptions(self)
+ options_line = lines[1]
+ if not options_line.startswith(_OPTION_KEY_ELEMENTS):
+ raise errors.BadIndexOptions(self)
+ try:
+ self._key_length = int(options_line[len(_OPTION_KEY_ELEMENTS):])
+ except ValueError:
+ raise errors.BadIndexOptions(self)
+ options_line = lines[2]
+ if not options_line.startswith(_OPTION_LEN):
+ raise errors.BadIndexOptions(self)
+ try:
+ self._key_count = int(options_line[len(_OPTION_LEN):])
+ except ValueError:
+ raise errors.BadIndexOptions(self)
+ # calculate the bytes we have processed
+ header_end = (len(signature) + len(lines[0]) + len(lines[1]) +
+ len(lines[2]) + 3)
+ self._parsed_bytes(0, None, header_end, None)
+ # setup parsing state
+ self._expected_elements = 3 + self._key_length
+ # raw data keyed by offset
+ self._keys_by_offset = {}
+ # keys with the value and node references
+ self._bisect_nodes = {}
+ return header_end, bytes[header_end:]
+
+ def _parse_region(self, offset, data):
+ """Parse node data returned from a readv operation.
+
+ :param offset: The byte offset the data starts at.
+ :param data: The data to parse.
+ """
+ # trim the data.
+ # end first:
+ end = offset + len(data)
+ high_parsed = offset
+ while True:
+ # Trivial test - if the current index's end is within the
+ # low-matching parsed range, we're done.
+ index = self._parsed_byte_index(high_parsed)
+ if end < self._parsed_byte_map[index][1]:
+ return
+ # print "[%d:%d]" % (offset, end), \
+ # self._parsed_byte_map[index:index + 2]
+ high_parsed, last_segment = self._parse_segment(
+ offset, data, end, index)
+ if last_segment:
+ return
+
+ def _parse_segment(self, offset, data, end, index):
+ """Parse one segment of data.
+
+ :param offset: Where 'data' begins in the file.
+ :param data: Some data to parse a segment of.
+ :param end: Where data ends
+ :param index: The current index into the parsed bytes map.
+ :return: True if the parsed segment is the last possible one in the
+ range of data.
+ :return: high_parsed_byte, last_segment.
+ high_parsed_byte is the location of the highest parsed byte in this
+ segment, last_segment is True if the parsed segment is the last
+ possible one in the data block.
+ """
+ # default is to use all data
+ trim_end = None
+ # accomodate overlap with data before this.
+ if offset < self._parsed_byte_map[index][1]:
+ # overlaps the lower parsed region
+ # skip the parsed data
+ trim_start = self._parsed_byte_map[index][1] - offset
+ # don't trim the start for \n
+ start_adjacent = True
+ elif offset == self._parsed_byte_map[index][1]:
+ # abuts the lower parsed region
+ # use all data
+ trim_start = None
+ # do not trim anything
+ start_adjacent = True
+ else:
+ # does not overlap the lower parsed region
+ # use all data
+ trim_start = None
+ # but trim the leading \n
+ start_adjacent = False
+ if end == self._size:
+ # lines up to the end of all data:
+ # use it all
+ trim_end = None
+ # do not strip to the last \n
+ end_adjacent = True
+ last_segment = True
+ elif index + 1 == len(self._parsed_byte_map):
+ # at the end of the parsed data
+ # use it all
+ trim_end = None
+ # but strip to the last \n
+ end_adjacent = False
+ last_segment = True
+ elif end == self._parsed_byte_map[index + 1][0]:
+ # buts up against the next parsed region
+ # use it all
+ trim_end = None
+ # do not strip to the last \n
+ end_adjacent = True
+ last_segment = True
+ elif end > self._parsed_byte_map[index + 1][0]:
+ # overlaps into the next parsed region
+ # only consider the unparsed data
+ trim_end = self._parsed_byte_map[index + 1][0] - offset
+ # do not strip to the last \n as we know its an entire record
+ end_adjacent = True
+ last_segment = end < self._parsed_byte_map[index + 1][1]
+ else:
+ # does not overlap into the next region
+ # use it all
+ trim_end = None
+ # but strip to the last \n
+ end_adjacent = False
+ last_segment = True
+ # now find bytes to discard if needed
+ if not start_adjacent:
+ # work around python bug in rfind
+ if trim_start is None:
+ trim_start = data.find('\n') + 1
+ else:
+ trim_start = data.find('\n', trim_start) + 1
+ if not (trim_start != 0):
+ raise AssertionError('no \n was present')
+ # print 'removing start', offset, trim_start, repr(data[:trim_start])
+ if not end_adjacent:
+ # work around python bug in rfind
+ if trim_end is None:
+ trim_end = data.rfind('\n') + 1
+ else:
+ trim_end = data.rfind('\n', None, trim_end) + 1
+ if not (trim_end != 0):
+ raise AssertionError('no \n was present')
+ # print 'removing end', offset, trim_end, repr(data[trim_end:])
+ # adjust offset and data to the parseable data.
+ trimmed_data = data[trim_start:trim_end]
+ if not (trimmed_data):
+ raise AssertionError('read unneeded data [%d:%d] from [%d:%d]'
+ % (trim_start, trim_end, offset, offset + len(data)))
+ if trim_start:
+ offset += trim_start
+ # print "parsing", repr(trimmed_data)
+ # splitlines mangles the \r delimiters.. don't use it.
+ lines = trimmed_data.split('\n')
+ del lines[-1]
+ pos = offset
+ first_key, last_key, nodes, _ = self._parse_lines(lines, pos)
+ for key, value in nodes:
+ self._bisect_nodes[key] = value
+ self._parsed_bytes(offset, first_key,
+ offset + len(trimmed_data), last_key)
+ return offset + len(trimmed_data), last_segment
+
+ def _parse_lines(self, lines, pos):
+ key = None
+ first_key = None
+ trailers = 0
+ nodes = []
+ for line in lines:
+ if line == '':
+ # must be at the end
+ if self._size:
+ if not (self._size == pos + 1):
+ raise AssertionError("%s %s" % (self._size, pos))
+ trailers += 1
+ continue
+ elements = line.split('\0')
+ if len(elements) != self._expected_elements:
+ raise errors.BadIndexData(self)
+ # keys are tuples. Each element is a string that may occur many
+ # times, so we intern them to save space. AB, RC, 200807
+ key = tuple([intern(element) for element in elements[:self._key_length]])
+ if first_key is None:
+ first_key = key
+ absent, references, value = elements[-3:]
+ ref_lists = []
+ for ref_string in references.split('\t'):
+ ref_lists.append(tuple([
+ int(ref) for ref in ref_string.split('\r') if ref
+ ]))
+ ref_lists = tuple(ref_lists)
+ self._keys_by_offset[pos] = (key, absent, ref_lists, value)
+ pos += len(line) + 1 # +1 for the \n
+ if absent:
+ continue
+ if self.node_ref_lists:
+ node_value = (value, ref_lists)
+ else:
+ node_value = value
+ nodes.append((key, node_value))
+ # print "parsed ", key
+ return first_key, key, nodes, trailers
+
+ def _parsed_bytes(self, start, start_key, end, end_key):
+ """Mark the bytes from start to end as parsed.
+
+ Calling self._parsed_bytes(1,2) will mark one byte (the one at offset
+ 1) as parsed.
+
+ :param start: The start of the parsed region.
+ :param end: The end of the parsed region.
+ """
+ index = self._parsed_byte_index(start)
+ new_value = (start, end)
+ new_key = (start_key, end_key)
+ if index == -1:
+ # first range parsed is always the beginning.
+ self._parsed_byte_map.insert(index, new_value)
+ self._parsed_key_map.insert(index, new_key)
+ return
+ # four cases:
+ # new region
+ # extend lower region
+ # extend higher region
+ # combine two regions
+ if (index + 1 < len(self._parsed_byte_map) and
+ self._parsed_byte_map[index][1] == start and
+ self._parsed_byte_map[index + 1][0] == end):
+ # combine two regions
+ self._parsed_byte_map[index] = (self._parsed_byte_map[index][0],
+ self._parsed_byte_map[index + 1][1])
+ self._parsed_key_map[index] = (self._parsed_key_map[index][0],
+ self._parsed_key_map[index + 1][1])
+ del self._parsed_byte_map[index + 1]
+ del self._parsed_key_map[index + 1]
+ elif self._parsed_byte_map[index][1] == start:
+ # extend the lower entry
+ self._parsed_byte_map[index] = (
+ self._parsed_byte_map[index][0], end)
+ self._parsed_key_map[index] = (
+ self._parsed_key_map[index][0], end_key)
+ elif (index + 1 < len(self._parsed_byte_map) and
+ self._parsed_byte_map[index + 1][0] == end):
+ # extend the higher entry
+ self._parsed_byte_map[index + 1] = (
+ start, self._parsed_byte_map[index + 1][1])
+ self._parsed_key_map[index + 1] = (
+ start_key, self._parsed_key_map[index + 1][1])
+ else:
+ # new entry
+ self._parsed_byte_map.insert(index + 1, new_value)
+ self._parsed_key_map.insert(index + 1, new_key)
+
+ def _read_and_parse(self, readv_ranges):
+ """Read the ranges and parse the resulting data.
+
+ :param readv_ranges: A prepared readv range list.
+ """
+ if not readv_ranges:
+ return
+ if self._nodes is None and self._bytes_read * 2 >= self._size:
+ # We've already read more than 50% of the file and we are about to
+ # request more data, just _buffer_all() and be done
+ self._buffer_all()
+ return
+
+ base_offset = self._base_offset
+ if base_offset != 0:
+ # Rewrite the ranges for the offset
+ readv_ranges = [(start+base_offset, size)
+ for start, size in readv_ranges]
+ readv_data = self._transport.readv(self._name, readv_ranges, True,
+ self._size + self._base_offset)
+ # parse
+ for offset, data in readv_data:
+ offset -= base_offset
+ self._bytes_read += len(data)
+ if offset < 0:
+ # transport.readv() expanded to extra data which isn't part of
+ # this index
+ data = data[-offset:]
+ offset = 0
+ if offset == 0 and len(data) == self._size:
+ # We read the whole range, most likely because the
+ # Transport upcast our readv ranges into one long request
+ # for enough total data to grab the whole index.
+ self._buffer_all(StringIO(data))
+ return
+ if self._bisect_nodes is None:
+ # this must be the start
+ if not (offset == 0):
+ raise AssertionError()
+ offset, data = self._parse_header_from_bytes(data)
+ # print readv_ranges, "[%d:%d]" % (offset, offset + len(data))
+ self._parse_region(offset, data)
+
+ def _signature(self):
+ """The file signature for this index type."""
+ return _SIGNATURE
+
+ def validate(self):
+ """Validate that everything in the index can be accessed."""
+ # iter_all validates completely at the moment, so just do that.
+ for node in self.iter_all_entries():
+ pass
+
+
+class CombinedGraphIndex(object):
+ """A GraphIndex made up from smaller GraphIndices.
+
+ The backing indices must implement GraphIndex, and are presumed to be
+ static data.
+
+ Queries against the combined index will be made against the first index,
+ and then the second and so on. The order of indices can thus influence
+ performance significantly. For example, if one index is on local disk and a
+ second on a remote server, the local disk index should be before the other
+ in the index list.
+
+ Also, queries tend to need results from the same indices as previous
+ queries. So the indices will be reordered after every query to put the
+ indices that had the result(s) of that query first (while otherwise
+ preserving the relative ordering).
+ """
+
+ def __init__(self, indices, reload_func=None):
+ """Create a CombinedGraphIndex backed by indices.
+
+ :param indices: An ordered list of indices to query for data.
+ :param reload_func: A function to call if we find we are missing an
+ index. Should have the form reload_func() => True/False to indicate
+ if reloading actually changed anything.
+ """
+ self._indices = indices
+ self._reload_func = reload_func
+ # Sibling indices are other CombinedGraphIndex that we should call
+ # _move_to_front_by_name on when we auto-reorder ourself.
+ self._sibling_indices = []
+ # A list of names that corresponds to the instances in self._indices,
+ # so _index_names[0] is always the name for _indices[0], etc. Sibling
+ # indices must all use the same set of names as each other.
+ self._index_names = [None] * len(self._indices)
+
+ def __repr__(self):
+ return "%s(%s)" % (
+ self.__class__.__name__,
+ ', '.join(map(repr, self._indices)))
+
+ def clear_cache(self):
+ """See GraphIndex.clear_cache()"""
+ for index in self._indices:
+ index.clear_cache()
+
+ def get_parent_map(self, keys):
+ """See graph.StackedParentsProvider.get_parent_map"""
+ search_keys = set(keys)
+ if _mod_revision.NULL_REVISION in search_keys:
+ search_keys.discard(_mod_revision.NULL_REVISION)
+ found_parents = {_mod_revision.NULL_REVISION:[]}
+ else:
+ found_parents = {}
+ for index, key, value, refs in self.iter_entries(search_keys):
+ parents = refs[0]
+ if not parents:
+ parents = (_mod_revision.NULL_REVISION,)
+ found_parents[key] = parents
+ return found_parents
+
+ has_key = _has_key_from_parent_map
+
+ def insert_index(self, pos, index, name=None):
+ """Insert a new index in the list of indices to query.
+
+ :param pos: The position to insert the index.
+ :param index: The index to insert.
+ :param name: a name for this index, e.g. a pack name. These names can
+ be used to reflect index reorderings to related CombinedGraphIndex
+ instances that use the same names. (see set_sibling_indices)
+ """
+ self._indices.insert(pos, index)
+ self._index_names.insert(pos, name)
+
+ def iter_all_entries(self):
+ """Iterate over all keys within the index
+
+ Duplicate keys across child indices are presumed to have the same
+ value and are only reported once.
+
+ :return: An iterable of (index, key, reference_lists, value).
+ There is no defined order for the result iteration - it will be in
+ the most efficient order for the index.
+ """
+ seen_keys = set()
+ while True:
+ try:
+ for index in self._indices:
+ for node in index.iter_all_entries():
+ if node[1] not in seen_keys:
+ yield node
+ seen_keys.add(node[1])
+ return
+ except errors.NoSuchFile:
+ self._reload_or_raise()
+
+ def iter_entries(self, keys):
+ """Iterate over keys within the index.
+
+ Duplicate keys across child indices are presumed to have the same
+ value and are only reported once.
+
+ :param keys: An iterable providing the keys to be retrieved.
+ :return: An iterable of (index, key, reference_lists, value). There is
+ no defined order for the result iteration - it will be in the most
+ efficient order for the index.
+ """
+ keys = set(keys)
+ hit_indices = []
+ while True:
+ try:
+ for index in self._indices:
+ if not keys:
+ break
+ index_hit = False
+ for node in index.iter_entries(keys):
+ keys.remove(node[1])
+ yield node
+ index_hit = True
+ if index_hit:
+ hit_indices.append(index)
+ break
+ except errors.NoSuchFile:
+ self._reload_or_raise()
+ self._move_to_front(hit_indices)
+
+ def iter_entries_prefix(self, keys):
+ """Iterate over keys within the index using prefix matching.
+
+ Duplicate keys across child indices are presumed to have the same
+ value and are only reported once.
+
+ Prefix matching is applied within the tuple of a key, not to within
+ the bytestring of each key element. e.g. if you have the keys ('foo',
+ 'bar'), ('foobar', 'gam') and do a prefix search for ('foo', None) then
+ only the former key is returned.
+
+ :param keys: An iterable providing the key prefixes to be retrieved.
+ Each key prefix takes the form of a tuple the length of a key, but
+ with the last N elements 'None' rather than a regular bytestring.
+ The first element cannot be 'None'.
+ :return: An iterable as per iter_all_entries, but restricted to the
+ keys with a matching prefix to those supplied. No additional keys
+ will be returned, and every match that is in the index will be
+ returned.
+ """
+ keys = set(keys)
+ if not keys:
+ return
+ seen_keys = set()
+ hit_indices = []
+ while True:
+ try:
+ for index in self._indices:
+ index_hit = False
+ for node in index.iter_entries_prefix(keys):
+ if node[1] in seen_keys:
+ continue
+ seen_keys.add(node[1])
+ yield node
+ index_hit = True
+ if index_hit:
+ hit_indices.append(index)
+ break
+ except errors.NoSuchFile:
+ self._reload_or_raise()
+ self._move_to_front(hit_indices)
+
+ def _move_to_front(self, hit_indices):
+ """Rearrange self._indices so that hit_indices are first.
+
+ Order is maintained as much as possible, e.g. the first unhit index
+ will be the first index in _indices after the hit_indices, and the
+ hit_indices will be present in exactly the order they are passed to
+ _move_to_front.
+
+ _move_to_front propagates to all objects in self._sibling_indices by
+ calling _move_to_front_by_name.
+ """
+ if self._indices[:len(hit_indices)] == hit_indices:
+ # The 'hit_indices' are already at the front (and in the same
+ # order), no need to re-order
+ return
+ hit_names = self._move_to_front_by_index(hit_indices)
+ for sibling_idx in self._sibling_indices:
+ sibling_idx._move_to_front_by_name(hit_names)
+
+ def _move_to_front_by_index(self, hit_indices):
+ """Core logic for _move_to_front.
+
+ Returns a list of names corresponding to the hit_indices param.
+ """
+ indices_info = zip(self._index_names, self._indices)
+ if 'index' in debug.debug_flags:
+ trace.mutter('CombinedGraphIndex reordering: currently %r, '
+ 'promoting %r', indices_info, hit_indices)
+ hit_names = []
+ unhit_names = []
+ new_hit_indices = []
+ unhit_indices = []
+
+ for offset, (name, idx) in enumerate(indices_info):
+ if idx in hit_indices:
+ hit_names.append(name)
+ new_hit_indices.append(idx)
+ if len(new_hit_indices) == len(hit_indices):
+ # We've found all of the hit entries, everything else is
+ # unhit
+ unhit_names.extend(self._index_names[offset+1:])
+ unhit_indices.extend(self._indices[offset+1:])
+ break
+ else:
+ unhit_names.append(name)
+ unhit_indices.append(idx)
+
+ self._indices = new_hit_indices + unhit_indices
+ self._index_names = hit_names + unhit_names
+ if 'index' in debug.debug_flags:
+ trace.mutter('CombinedGraphIndex reordered: %r', self._indices)
+ return hit_names
+
+ def _move_to_front_by_name(self, hit_names):
+ """Moves indices named by 'hit_names' to front of the search order, as
+ described in _move_to_front.
+ """
+ # Translate names to index instances, and then call
+ # _move_to_front_by_index.
+ indices_info = zip(self._index_names, self._indices)
+ hit_indices = []
+ for name, idx in indices_info:
+ if name in hit_names:
+ hit_indices.append(idx)
+ self._move_to_front_by_index(hit_indices)
+
+ def find_ancestry(self, keys, ref_list_num):
+ """Find the complete ancestry for the given set of keys.
+
+ Note that this is a whole-ancestry request, so it should be used
+ sparingly.
+
+ :param keys: An iterable of keys to look for
+ :param ref_list_num: The reference list which references the parents
+ we care about.
+ :return: (parent_map, missing_keys)
+ """
+ # XXX: make this call _move_to_front?
+ missing_keys = set()
+ parent_map = {}
+ keys_to_lookup = set(keys)
+ generation = 0
+ while keys_to_lookup:
+ # keys that *all* indexes claim are missing, stop searching them
+ generation += 1
+ all_index_missing = None
+ # print 'gen\tidx\tsub\tn_keys\tn_pmap\tn_miss'
+ # print '%4d\t\t\t%4d\t%5d\t%5d' % (generation, len(keys_to_lookup),
+ # len(parent_map),
+ # len(missing_keys))
+ for index_idx, index in enumerate(self._indices):
+ # TODO: we should probably be doing something with
+ # 'missing_keys' since we've already determined that
+ # those revisions have not been found anywhere
+ index_missing_keys = set()
+ # Find all of the ancestry we can from this index
+ # keep looking until the search_keys set is empty, which means
+ # things we didn't find should be in index_missing_keys
+ search_keys = keys_to_lookup
+ sub_generation = 0
+ # print ' \t%2d\t\t%4d\t%5d\t%5d' % (
+ # index_idx, len(search_keys),
+ # len(parent_map), len(index_missing_keys))
+ while search_keys:
+ sub_generation += 1
+ # TODO: ref_list_num should really be a parameter, since
+ # CombinedGraphIndex does not know what the ref lists
+ # mean.
+ search_keys = index._find_ancestors(search_keys,
+ ref_list_num, parent_map, index_missing_keys)
+ # print ' \t \t%2d\t%4d\t%5d\t%5d' % (
+ # sub_generation, len(search_keys),
+ # len(parent_map), len(index_missing_keys))
+ # Now set whatever was missing to be searched in the next index
+ keys_to_lookup = index_missing_keys
+ if all_index_missing is None:
+ all_index_missing = set(index_missing_keys)
+ else:
+ all_index_missing.intersection_update(index_missing_keys)
+ if not keys_to_lookup:
+ break
+ if all_index_missing is None:
+ # There were no indexes, so all search keys are 'missing'
+ missing_keys.update(keys_to_lookup)
+ keys_to_lookup = None
+ else:
+ missing_keys.update(all_index_missing)
+ keys_to_lookup.difference_update(all_index_missing)
+ return parent_map, missing_keys
+
+ def key_count(self):
+ """Return an estimate of the number of keys in this index.
+
+ For CombinedGraphIndex this is approximated by the sum of the keys of
+ the child indices. As child indices may have duplicate keys this can
+ have a maximum error of the number of child indices * largest number of
+ keys in any index.
+ """
+ while True:
+ try:
+ return sum((index.key_count() for index in self._indices), 0)
+ except errors.NoSuchFile:
+ self._reload_or_raise()
+
+ missing_keys = _missing_keys_from_parent_map
+
+ def _reload_or_raise(self):
+ """We just got a NoSuchFile exception.
+
+ Try to reload the indices, if it fails, just raise the current
+ exception.
+ """
+ if self._reload_func is None:
+ raise
+ exc_type, exc_value, exc_traceback = sys.exc_info()
+ trace.mutter('Trying to reload after getting exception: %s',
+ exc_value)
+ if not self._reload_func():
+ # We tried to reload, but nothing changed, so we fail anyway
+ trace.mutter('_reload_func indicated nothing has changed.'
+ ' Raising original exception.')
+ raise exc_type, exc_value, exc_traceback
+
+ def set_sibling_indices(self, sibling_combined_graph_indices):
+ """Set the CombinedGraphIndex objects to reorder after reordering self.
+ """
+ self._sibling_indices = sibling_combined_graph_indices
+
+ def validate(self):
+ """Validate that everything in the index can be accessed."""
+ while True:
+ try:
+ for index in self._indices:
+ index.validate()
+ return
+ except errors.NoSuchFile:
+ self._reload_or_raise()
+
+
+class InMemoryGraphIndex(GraphIndexBuilder):
+ """A GraphIndex which operates entirely out of memory and is mutable.
+
+ This is designed to allow the accumulation of GraphIndex entries during a
+ single write operation, where the accumulated entries need to be immediately
+ available - for example via a CombinedGraphIndex.
+ """
+
+ def add_nodes(self, nodes):
+ """Add nodes to the index.
+
+ :param nodes: An iterable of (key, node_refs, value) entries to add.
+ """
+ if self.reference_lists:
+ for (key, value, node_refs) in nodes:
+ self.add_node(key, value, node_refs)
+ else:
+ for (key, value) in nodes:
+ self.add_node(key, value)
+
+ def iter_all_entries(self):
+ """Iterate over all keys within the index
+
+ :return: An iterable of (index, key, reference_lists, value). There is no
+ defined order for the result iteration - it will be in the most
+ efficient order for the index (in this case dictionary hash order).
+ """
+ if 'evil' in debug.debug_flags:
+ trace.mutter_callsite(3,
+ "iter_all_entries scales with size of history.")
+ if self.reference_lists:
+ for key, (absent, references, value) in self._nodes.iteritems():
+ if not absent:
+ yield self, key, value, references
+ else:
+ for key, (absent, references, value) in self._nodes.iteritems():
+ if not absent:
+ yield self, key, value
+
+ def iter_entries(self, keys):
+ """Iterate over keys within the index.
+
+ :param keys: An iterable providing the keys to be retrieved.
+ :return: An iterable of (index, key, value, reference_lists). There is no
+ defined order for the result iteration - it will be in the most
+ efficient order for the index (keys iteration order in this case).
+ """
+ # Note: See BTreeBuilder.iter_entries for an explanation of why we
+ # aren't using set().intersection() here
+ nodes = self._nodes
+ keys = [key for key in keys if key in nodes]
+ if self.reference_lists:
+ for key in keys:
+ node = nodes[key]
+ if not node[0]:
+ yield self, key, node[2], node[1]
+ else:
+ for key in keys:
+ node = nodes[key]
+ if not node[0]:
+ yield self, key, node[2]
+
+ def iter_entries_prefix(self, keys):
+ """Iterate over keys within the index using prefix matching.
+
+ Prefix matching is applied within the tuple of a key, not to within
+ the bytestring of each key element. e.g. if you have the keys ('foo',
+ 'bar'), ('foobar', 'gam') and do a prefix search for ('foo', None) then
+ only the former key is returned.
+
+ :param keys: An iterable providing the key prefixes to be retrieved.
+ Each key prefix takes the form of a tuple the length of a key, but
+ with the last N elements 'None' rather than a regular bytestring.
+ The first element cannot be 'None'.
+ :return: An iterable as per iter_all_entries, but restricted to the
+ keys with a matching prefix to those supplied. No additional keys
+ will be returned, and every match that is in the index will be
+ returned.
+ """
+ # XXX: To much duplication with the GraphIndex class; consider finding
+ # a good place to pull out the actual common logic.
+ keys = set(keys)
+ if not keys:
+ return
+ if self._key_length == 1:
+ for key in keys:
+ # sanity check
+ if key[0] is None:
+ raise errors.BadIndexKey(key)
+ if len(key) != self._key_length:
+ raise errors.BadIndexKey(key)
+ node = self._nodes[key]
+ if node[0]:
+ continue
+ if self.reference_lists:
+ yield self, key, node[2], node[1]
+ else:
+ yield self, key, node[2]
+ return
+ nodes_by_key = self._get_nodes_by_key()
+ for key in keys:
+ # sanity check
+ if key[0] is None:
+ raise errors.BadIndexKey(key)
+ if len(key) != self._key_length:
+ raise errors.BadIndexKey(key)
+ # find what it refers to:
+ key_dict = nodes_by_key
+ elements = list(key)
+ # find the subdict to return
+ try:
+ while len(elements) and elements[0] is not None:
+ key_dict = key_dict[elements[0]]
+ elements.pop(0)
+ except KeyError:
+ # a non-existant lookup.
+ continue
+ if len(elements):
+ dicts = [key_dict]
+ while dicts:
+ key_dict = dicts.pop(-1)
+ # can't be empty or would not exist
+ item, value = key_dict.iteritems().next()
+ if type(value) == dict:
+ # push keys
+ dicts.extend(key_dict.itervalues())
+ else:
+ # yield keys
+ for value in key_dict.itervalues():
+ yield (self, ) + value
+ else:
+ yield (self, ) + key_dict
+
+ def key_count(self):
+ """Return an estimate of the number of keys in this index.
+
+ For InMemoryGraphIndex the estimate is exact.
+ """
+ return len(self._nodes) - len(self._absent_keys)
+
+ def validate(self):
+ """In memory index's have no known corruption at the moment."""
+
+
+class GraphIndexPrefixAdapter(object):
+ """An adapter between GraphIndex with different key lengths.
+
+ Queries against this will emit queries against the adapted Graph with the
+ prefix added, queries for all items use iter_entries_prefix. The returned
+ nodes will have their keys and node references adjusted to remove the
+ prefix. Finally, an add_nodes_callback can be supplied - when called the
+ nodes and references being added will have prefix prepended.
+ """
+
+ def __init__(self, adapted, prefix, missing_key_length,
+ add_nodes_callback=None):
+ """Construct an adapter against adapted with prefix."""
+ self.adapted = adapted
+ self.prefix_key = prefix + (None,)*missing_key_length
+ self.prefix = prefix
+ self.prefix_len = len(prefix)
+ self.add_nodes_callback = add_nodes_callback
+
+ def add_nodes(self, nodes):
+ """Add nodes to the index.
+
+ :param nodes: An iterable of (key, node_refs, value) entries to add.
+ """
+ # save nodes in case its an iterator
+ nodes = tuple(nodes)
+ translated_nodes = []
+ try:
+ # Add prefix_key to each reference node_refs is a tuple of tuples,
+ # so split it apart, and add prefix_key to the internal reference
+ for (key, value, node_refs) in nodes:
+ adjusted_references = (
+ tuple(tuple(self.prefix + ref_node for ref_node in ref_list)
+ for ref_list in node_refs))
+ translated_nodes.append((self.prefix + key, value,
+ adjusted_references))
+ except ValueError:
+ # XXX: TODO add an explicit interface for getting the reference list
+ # status, to handle this bit of user-friendliness in the API more
+ # explicitly.
+ for (key, value) in nodes:
+ translated_nodes.append((self.prefix + key, value))
+ self.add_nodes_callback(translated_nodes)
+
+ def add_node(self, key, value, references=()):
+ """Add a node to the index.
+
+ :param key: The key. keys are non-empty tuples containing
+ as many whitespace-free utf8 bytestrings as the key length
+ defined for this index.
+ :param references: An iterable of iterables of keys. Each is a
+ reference to another key.
+ :param value: The value to associate with the key. It may be any
+ bytes as long as it does not contain \0 or \n.
+ """
+ self.add_nodes(((key, value, references), ))
+
+ def _strip_prefix(self, an_iter):
+ """Strip prefix data from nodes and return it."""
+ for node in an_iter:
+ # cross checks
+ if node[1][:self.prefix_len] != self.prefix:
+ raise errors.BadIndexData(self)
+ for ref_list in node[3]:
+ for ref_node in ref_list:
+ if ref_node[:self.prefix_len] != self.prefix:
+ raise errors.BadIndexData(self)
+ yield node[0], node[1][self.prefix_len:], node[2], (
+ tuple(tuple(ref_node[self.prefix_len:] for ref_node in ref_list)
+ for ref_list in node[3]))
+
+ def iter_all_entries(self):
+ """Iterate over all keys within the index
+
+ iter_all_entries is implemented against the adapted index using
+ iter_entries_prefix.
+
+ :return: An iterable of (index, key, reference_lists, value). There is no
+ defined order for the result iteration - it will be in the most
+ efficient order for the index (in this case dictionary hash order).
+ """
+ return self._strip_prefix(self.adapted.iter_entries_prefix([self.prefix_key]))
+
+ def iter_entries(self, keys):
+ """Iterate over keys within the index.
+
+ :param keys: An iterable providing the keys to be retrieved.
+ :return: An iterable of (index, key, value, reference_lists). There is no
+ defined order for the result iteration - it will be in the most
+ efficient order for the index (keys iteration order in this case).
+ """
+ return self._strip_prefix(self.adapted.iter_entries(
+ self.prefix + key for key in keys))
+
+ def iter_entries_prefix(self, keys):
+ """Iterate over keys within the index using prefix matching.
+
+ Prefix matching is applied within the tuple of a key, not to within
+ the bytestring of each key element. e.g. if you have the keys ('foo',
+ 'bar'), ('foobar', 'gam') and do a prefix search for ('foo', None) then
+ only the former key is returned.
+
+ :param keys: An iterable providing the key prefixes to be retrieved.
+ Each key prefix takes the form of a tuple the length of a key, but
+ with the last N elements 'None' rather than a regular bytestring.
+ The first element cannot be 'None'.
+ :return: An iterable as per iter_all_entries, but restricted to the
+ keys with a matching prefix to those supplied. No additional keys
+ will be returned, and every match that is in the index will be
+ returned.
+ """
+ return self._strip_prefix(self.adapted.iter_entries_prefix(
+ self.prefix + key for key in keys))
+
+ def key_count(self):
+ """Return an estimate of the number of keys in this index.
+
+ For GraphIndexPrefixAdapter this is relatively expensive - key
+ iteration with the prefix is done.
+ """
+ return len(list(self.iter_all_entries()))
+
+ def validate(self):
+ """Call the adapted's validate."""
+ self.adapted.validate()
diff --git a/bzrlib/info.py b/bzrlib/info.py
new file mode 100644
index 0000000..2bb3545
--- /dev/null
+++ b/bzrlib/info.py
@@ -0,0 +1,537 @@
+# Copyright (C) 2005-2010 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+from __future__ import absolute_import
+
+__all__ = ['show_bzrdir_info']
+
+from cStringIO import StringIO
+import time
+import sys
+
+from bzrlib import (
+ bzrdir,
+ controldir,
+ errors,
+ hooks as _mod_hooks,
+ osutils,
+ urlutils,
+ )
+from bzrlib.errors import (NoWorkingTree, NotBranchError,
+ NoRepositoryPresent, NotLocalUrl)
+from bzrlib.missing import find_unmerged
+
+
+def plural(n, base='', pl=None):
+ if n == 1:
+ return base
+ elif pl is not None:
+ return pl
+ else:
+ return 's'
+
+
+class LocationList(object):
+
+ def __init__(self, base_path):
+ self.locs = []
+ self.base_path = base_path
+
+ def add_url(self, label, url):
+ """Add a URL to the list, converting it to a path if possible"""
+ if url is None:
+ return
+ try:
+ path = urlutils.local_path_from_url(url)
+ except errors.InvalidURL:
+ self.locs.append((label, url))
+ else:
+ self.add_path(label, path)
+
+ def add_path(self, label, path):
+ """Add a path, converting it to a relative path if possible"""
+ try:
+ path = osutils.relpath(self.base_path, path)
+ except errors.PathNotChild:
+ pass
+ else:
+ if path == '':
+ path = '.'
+ if path != '/':
+ path = path.rstrip('/')
+ self.locs.append((label, path))
+
+ def get_lines(self):
+ max_len = max(len(l) for l, u in self.locs)
+ return [" %*s: %s\n" % (max_len, l, u) for l, u in self.locs ]
+
+
+def gather_location_info(repository=None, branch=None, working=None,
+ control=None):
+ locs = {}
+ if branch is not None:
+ branch_path = branch.user_url
+ master_path = branch.get_bound_location()
+ if master_path is None:
+ master_path = branch_path
+ else:
+ branch_path = None
+ master_path = None
+ try:
+ if control is not None and control.get_branch_reference():
+ locs['checkout of branch'] = control.get_branch_reference()
+ except NotBranchError:
+ pass
+ if working:
+ working_path = working.user_url
+ if working_path != branch_path:
+ locs['light checkout root'] = working_path
+ if master_path != branch_path:
+ if repository.is_shared():
+ locs['repository checkout root'] = branch_path
+ else:
+ locs['checkout root'] = branch_path
+ if working_path != master_path:
+ locs['checkout of branch'] = master_path
+ elif repository.is_shared():
+ locs['repository branch'] = branch_path
+ elif branch_path is not None:
+ # standalone
+ locs['branch root'] = branch_path
+ else:
+ working_path = None
+ if repository is not None and repository.is_shared():
+ # lightweight checkout of branch in shared repository
+ if branch_path is not None:
+ locs['repository branch'] = branch_path
+ elif branch_path is not None:
+ # standalone
+ locs['branch root'] = branch_path
+ elif repository is not None:
+ locs['repository'] = repository.user_url
+ elif control is not None:
+ locs['control directory'] = control.user_url
+ else:
+ # Really, at least a control directory should be
+ # passed in for this method to be useful.
+ pass
+ if master_path != branch_path:
+ locs['bound to branch'] = master_path
+ if repository is not None and repository.is_shared():
+ # lightweight checkout of branch in shared repository
+ locs['shared repository'] = repository.user_url
+ order = ['control directory', 'light checkout root',
+ 'repository checkout root', 'checkout root',
+ 'checkout of branch', 'shared repository',
+ 'repository', 'repository branch', 'branch root',
+ 'bound to branch']
+ return [(n, locs[n]) for n in order if n in locs]
+
+
+def _show_location_info(locs, outfile):
+ """Show known locations for working, branch and repository."""
+ outfile.write('Location:\n')
+ path_list = LocationList(osutils.getcwd())
+ for name, loc in locs:
+ path_list.add_url(name, loc)
+ outfile.writelines(path_list.get_lines())
+
+
+def _gather_related_branches(branch):
+ locs = LocationList(osutils.getcwd())
+ locs.add_url('public branch', branch.get_public_branch())
+ locs.add_url('push branch', branch.get_push_location())
+ locs.add_url('parent branch', branch.get_parent())
+ locs.add_url('submit branch', branch.get_submit_branch())
+ try:
+ locs.add_url('stacked on', branch.get_stacked_on_url())
+ except (errors.UnstackableBranchFormat, errors.UnstackableRepositoryFormat,
+ errors.NotStacked):
+ pass
+ return locs
+
+
+def _show_related_info(branch, outfile):
+ """Show parent and push location of branch."""
+ locs = _gather_related_branches(branch)
+ if len(locs.locs) > 0:
+ outfile.write('\n')
+ outfile.write('Related branches:\n')
+ outfile.writelines(locs.get_lines())
+
+
+def _show_control_dir_info(control, outfile):
+ """Show control dir information."""
+ if control._format.colocated_branches:
+ outfile.write('\n')
+ outfile.write('Control directory:\n')
+ outfile.write(' %d branches\n' % len(control.list_branches()))
+
+
+def _show_format_info(control=None, repository=None, branch=None,
+ working=None, outfile=None):
+ """Show known formats for control, working, branch and repository."""
+ outfile.write('\n')
+ outfile.write('Format:\n')
+ if control:
+ outfile.write(' control: %s\n' %
+ control._format.get_format_description())
+ if working:
+ outfile.write(' working tree: %s\n' %
+ working._format.get_format_description())
+ if branch:
+ outfile.write(' branch: %s\n' %
+ branch._format.get_format_description())
+ if repository:
+ outfile.write(' repository: %s\n' %
+ repository._format.get_format_description())
+
+
+def _show_locking_info(repository=None, branch=None, working=None,
+ outfile=None):
+ """Show locking status of working, branch and repository."""
+ if (repository and repository.get_physical_lock_status() or
+ (branch and branch.get_physical_lock_status()) or
+ (working and working.get_physical_lock_status())):
+ outfile.write('\n')
+ outfile.write('Lock status:\n')
+ if working:
+ if working.get_physical_lock_status():
+ status = 'locked'
+ else:
+ status = 'unlocked'
+ outfile.write(' working tree: %s\n' % status)
+ if branch:
+ if branch.get_physical_lock_status():
+ status = 'locked'
+ else:
+ status = 'unlocked'
+ outfile.write(' branch: %s\n' % status)
+ if repository:
+ if repository.get_physical_lock_status():
+ status = 'locked'
+ else:
+ status = 'unlocked'
+ outfile.write(' repository: %s\n' % status)
+
+
+def _show_missing_revisions_branch(branch, outfile):
+ """Show missing master revisions in branch."""
+ # Try with inaccessible branch ?
+ master = branch.get_master_branch()
+ if master:
+ local_extra, remote_extra = find_unmerged(branch, master)
+ if remote_extra:
+ outfile.write('\n')
+ outfile.write(('Branch is out of date: missing %d '
+ 'revision%s.\n') % (len(remote_extra),
+ plural(len(remote_extra))))
+
+
+def _show_missing_revisions_working(working, outfile):
+ """Show missing revisions in working tree."""
+ branch = working.branch
+ basis = working.basis_tree()
+ try:
+ branch_revno, branch_last_revision = branch.last_revision_info()
+ except errors.UnsupportedOperation:
+ return
+ try:
+ tree_last_id = working.get_parent_ids()[0]
+ except IndexError:
+ tree_last_id = None
+
+ if branch_revno and tree_last_id != branch_last_revision:
+ tree_last_revno = branch.revision_id_to_revno(tree_last_id)
+ missing_count = branch_revno - tree_last_revno
+ outfile.write('\n')
+ outfile.write(('Working tree is out of date: missing %d '
+ 'revision%s.\n') % (missing_count, plural(missing_count)))
+
+
+def _show_working_stats(working, outfile):
+ """Show statistics about a working tree."""
+ basis = working.basis_tree()
+ delta = working.changes_from(basis, want_unchanged=True)
+
+ outfile.write('\n')
+ outfile.write('In the working tree:\n')
+ outfile.write(' %8s unchanged\n' % len(delta.unchanged))
+ outfile.write(' %8d modified\n' % len(delta.modified))
+ outfile.write(' %8d added\n' % len(delta.added))
+ outfile.write(' %8d removed\n' % len(delta.removed))
+ outfile.write(' %8d renamed\n' % len(delta.renamed))
+
+ ignore_cnt = unknown_cnt = 0
+ for path in working.extras():
+ if working.is_ignored(path):
+ ignore_cnt += 1
+ else:
+ unknown_cnt += 1
+ outfile.write(' %8d unknown\n' % unknown_cnt)
+ outfile.write(' %8d ignored\n' % ignore_cnt)
+
+ dir_cnt = 0
+ root_id = working.get_root_id()
+ for path, entry in working.iter_entries_by_dir():
+ if entry.kind == 'directory' and entry.file_id != root_id:
+ dir_cnt += 1
+ outfile.write(' %8d versioned %s\n' % (dir_cnt,
+ plural(dir_cnt, 'subdirectory', 'subdirectories')))
+
+
+def _show_branch_stats(branch, verbose, outfile):
+ """Show statistics about a branch."""
+ try:
+ revno, head = branch.last_revision_info()
+ except errors.UnsupportedOperation:
+ return {}
+ outfile.write('\n')
+ outfile.write('Branch history:\n')
+ outfile.write(' %8d revision%s\n' % (revno, plural(revno)))
+ stats = branch.repository.gather_stats(head, committers=verbose)
+ if verbose:
+ committers = stats['committers']
+ outfile.write(' %8d committer%s\n' % (committers,
+ plural(committers)))
+ if revno:
+ timestamp, timezone = stats['firstrev']
+ age = int((time.time() - timestamp) / 3600 / 24)
+ outfile.write(' %8d day%s old\n' % (age, plural(age)))
+ outfile.write(' first revision: %s\n' %
+ osutils.format_date(timestamp, timezone))
+ timestamp, timezone = stats['latestrev']
+ outfile.write(' latest revision: %s\n' %
+ osutils.format_date(timestamp, timezone))
+ return stats
+
+
+def _show_repository_info(repository, outfile):
+ """Show settings of a repository."""
+ if repository.make_working_trees():
+ outfile.write('\n')
+ outfile.write('Create working tree for new branches inside '
+ 'the repository.\n')
+
+
+def _show_repository_stats(repository, stats, outfile):
+ """Show statistics about a repository."""
+ f = StringIO()
+ if 'revisions' in stats:
+ revisions = stats['revisions']
+ f.write(' %8d revision%s\n' % (revisions, plural(revisions)))
+ if 'size' in stats:
+ f.write(' %8d KiB\n' % (stats['size']/1024))
+ for hook in hooks['repository']:
+ hook(repository, stats, f)
+ if f.getvalue() != "":
+ outfile.write('\n')
+ outfile.write('Repository:\n')
+ outfile.write(f.getvalue())
+
+
+def show_bzrdir_info(a_bzrdir, verbose=False, outfile=None):
+ """Output to stdout the 'info' for a_bzrdir."""
+ if outfile is None:
+ outfile = sys.stdout
+ try:
+ tree = a_bzrdir.open_workingtree(
+ recommend_upgrade=False)
+ except (NoWorkingTree, NotLocalUrl, NotBranchError):
+ tree = None
+ try:
+ branch = a_bzrdir.open_branch(name="")
+ except NotBranchError:
+ branch = None
+ try:
+ repository = a_bzrdir.open_repository()
+ except NoRepositoryPresent:
+ lockable = None
+ repository = None
+ else:
+ lockable = repository
+ else:
+ repository = branch.repository
+ lockable = branch
+ else:
+ branch = tree.branch
+ repository = branch.repository
+ lockable = tree
+
+ if lockable is not None:
+ lockable.lock_read()
+ try:
+ show_component_info(a_bzrdir, repository, branch, tree, verbose,
+ outfile)
+ finally:
+ if lockable is not None:
+ lockable.unlock()
+
+
+def show_component_info(control, repository, branch=None, working=None,
+ verbose=1, outfile=None):
+ """Write info about all bzrdir components to stdout"""
+ if outfile is None:
+ outfile = sys.stdout
+ if verbose is False:
+ verbose = 1
+ if verbose is True:
+ verbose = 2
+ layout = describe_layout(repository, branch, working, control)
+ format = describe_format(control, repository, branch, working)
+ outfile.write("%s (format: %s)\n" % (layout, format))
+ _show_location_info(
+ gather_location_info(control=control, repository=repository,
+ branch=branch, working=working),
+ outfile)
+ if branch is not None:
+ _show_related_info(branch, outfile)
+ if verbose == 0:
+ return
+ _show_format_info(control, repository, branch, working, outfile)
+ _show_locking_info(repository, branch, working, outfile)
+ _show_control_dir_info(control, outfile)
+ if branch is not None:
+ _show_missing_revisions_branch(branch, outfile)
+ if working is not None:
+ _show_missing_revisions_working(working, outfile)
+ _show_working_stats(working, outfile)
+ elif branch is not None:
+ _show_missing_revisions_branch(branch, outfile)
+ if branch is not None:
+ show_committers = verbose >= 2
+ stats = _show_branch_stats(branch, show_committers, outfile)
+ elif repository is not None:
+ stats = repository.gather_stats()
+ if branch is None and working is None and repository is not None:
+ _show_repository_info(repository, outfile)
+ if repository is not None:
+ _show_repository_stats(repository, stats, outfile)
+
+
+def describe_layout(repository=None, branch=None, tree=None, control=None):
+ """Convert a control directory layout into a user-understandable term
+
+ Common outputs include "Standalone tree", "Repository branch" and
+ "Checkout". Uncommon outputs include "Unshared repository with trees"
+ and "Empty control directory"
+ """
+ if branch is None and control is not None:
+ try:
+ branch_reference = control.get_branch_reference()
+ except NotBranchError:
+ pass
+ else:
+ if branch_reference is not None:
+ return "Dangling branch reference"
+ if repository is None:
+ return 'Empty control directory'
+ if branch is None and tree is None:
+ if repository.is_shared():
+ phrase = 'Shared repository'
+ else:
+ phrase = 'Unshared repository'
+ extra = []
+ if repository.make_working_trees():
+ extra.append('trees')
+ if len(control.get_branches()) > 0:
+ extra.append('colocated branches')
+ if extra:
+ phrase += ' with ' + " and ".join(extra)
+ return phrase
+ else:
+ if repository.is_shared():
+ independence = "Repository "
+ else:
+ independence = "Standalone "
+ if tree is not None:
+ phrase = "tree"
+ else:
+ phrase = "branch"
+ if branch is None and tree is not None:
+ phrase = "branchless tree"
+ else:
+ if (tree is not None and tree.user_url !=
+ branch.user_url):
+ independence = ''
+ phrase = "Lightweight checkout"
+ elif branch.get_bound_location() is not None:
+ if independence == 'Standalone ':
+ independence = ''
+ if tree is None:
+ phrase = "Bound branch"
+ else:
+ phrase = "Checkout"
+ if independence != "":
+ phrase = phrase.lower()
+ return "%s%s" % (independence, phrase)
+
+
+def describe_format(control, repository, branch, tree):
+ """Determine the format of an existing control directory
+
+ Several candidates may be found. If so, the names are returned as a
+ single string, separated by ' or '.
+
+ If no matching candidate is found, "unnamed" is returned.
+ """
+ candidates = []
+ if (branch is not None and tree is not None and
+ branch.user_url != tree.user_url):
+ branch = None
+ repository = None
+ non_aliases = set(controldir.format_registry.keys())
+ non_aliases.difference_update(controldir.format_registry.aliases())
+ for key in non_aliases:
+ format = controldir.format_registry.make_bzrdir(key)
+ if isinstance(format, bzrdir.BzrDirMetaFormat1):
+ if (tree and format.workingtree_format !=
+ tree._format):
+ continue
+ if (branch and format.get_branch_format() !=
+ branch._format):
+ continue
+ if (repository and format.repository_format !=
+ repository._format):
+ continue
+ if format.__class__ is not control._format.__class__:
+ continue
+ candidates.append(key)
+ if len(candidates) == 0:
+ return 'unnamed'
+ candidates.sort()
+ new_candidates = [c for c in candidates if not
+ controldir.format_registry.get_info(c).hidden]
+ if len(new_candidates) > 0:
+ # If there are any non-hidden formats that match, only return those to
+ # avoid listing hidden formats except when only a hidden format will
+ # do.
+ candidates = new_candidates
+ return ' or '.join(candidates)
+
+
+class InfoHooks(_mod_hooks.Hooks):
+ """Hooks for the info command."""
+
+ def __init__(self):
+ super(InfoHooks, self).__init__("bzrlib.info", "hooks")
+ self.add_hook('repository',
+ "Invoked when displaying the statistics for a repository. "
+ "repository is called with a statistics dictionary as returned "
+ "by the repository and a file-like object to write to.", (1, 15))
+
+
+hooks = InfoHooks()
diff --git a/bzrlib/inspect_for_copy.py b/bzrlib/inspect_for_copy.py
new file mode 100644
index 0000000..29ca60c
--- /dev/null
+++ b/bzrlib/inspect_for_copy.py
@@ -0,0 +1,76 @@
+# Copyright (C) 2005, 2006 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""A version of inspect that includes what 'copy' needs.
+
+Importing the python standard module 'copy' is far more expensive than it
+needs to be, because copy imports 'inspect' which imports 'tokenize'.
+And 'copy' only needs 2 small functions out of 'inspect', but has to
+load all of 'tokenize', which makes it horribly slow.
+
+This module is designed to use tricky hacks in import rules, to avoid this
+overhead.
+"""
+
+from __future__ import absolute_import
+
+
+####
+# These are the only 2 functions that 'copy' needs from 'inspect'
+# As you can see, they are quite trivial, and don't justify the
+# 40ms spent to import 'inspect' because it is importing 'tokenize'
+# These are copied verbatim from the python standard library.
+
+# ----------------------------------------------------------- class helpers
+def _searchbases(cls, accum):
+ # Simulate the "classic class" search order.
+ if cls in accum:
+ return
+ accum.append(cls)
+ for base in cls.__bases__:
+ _searchbases(base, accum)
+
+
+def getmro(cls):
+ "Return tuple of base classes (including cls) in method resolution order."
+ if hasattr(cls, "__mro__"):
+ return cls.__mro__
+ else:
+ result = []
+ _searchbases(cls, result)
+ return tuple(result)
+
+
+def import_copy_with_hacked_inspect():
+ """Import the 'copy' module with a hacked 'inspect' module"""
+ # We don't actually care about 'getmro' but we need to pass
+ # something in the list so that we get the direct module,
+ # rather than getting the base module
+ import sys
+
+ # Don't hack around if 'inspect' already exists
+ if 'inspect' in sys.modules:
+ import copy
+ return
+
+ mod = __import__('bzrlib.inspect_for_copy',
+ globals(), locals(), ['getmro'])
+
+ sys.modules['inspect'] = mod
+ try:
+ import copy
+ finally:
+ del sys.modules['inspect']
diff --git a/bzrlib/inter.py b/bzrlib/inter.py
new file mode 100644
index 0000000..41f6d83
--- /dev/null
+++ b/bzrlib/inter.py
@@ -0,0 +1,121 @@
+# Copyright (C) 2006 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""Inter-object utility class."""
+
+from __future__ import absolute_import
+
+from bzrlib.errors import NoCompatibleInter
+
+
+class InterObject(object):
+ """This class represents operations taking place between two objects.
+
+ Its instances have methods like join or copy_content or fetch, and contain
+ references to the source and target objects these operations can be
+ carried out between.
+
+ Often we will provide convenience methods on the objects which carry out
+ operations with another of similar type - they will always forward to
+ a subclass of InterObject - i.e.
+ InterVersionedFile.get(other).method_name(parameters).
+
+ If the source and target objects implement the locking protocol -
+ lock_read, lock_write, unlock, then the InterObject's lock_read,
+ lock_write and unlock methods may be used (optionally in conjunction with
+ the needs_read_lock and needs_write_lock decorators.)
+
+ When looking for an inter, the most recently registered types are tested
+ first. So typically the most generic and slowest InterObjects should be
+ registered first.
+ """
+
+ # _optimisers = list()
+ # Each concrete InterObject type should have its own optimisers list.
+
+ def __init__(self, source, target):
+ """Construct a default InterObject instance. Please use 'get'.
+
+ Only subclasses of InterObject should call
+ InterObject.__init__ - clients should call InterFOO.get where FOO
+ is the base type of the objects they are interacting between. I.e.
+ InterVersionedFile or InterRepository.
+ get() is a convenience class method which will create an optimised
+ InterFOO if possible.
+ """
+ self.source = source
+ self.target = target
+
+ def _double_lock(self, lock_source, lock_target):
+ """Take out two locks, rolling back the first if the second throws."""
+ lock_source()
+ try:
+ lock_target()
+ except Exception:
+ # we want to ensure that we don't leave source locked by mistake.
+ # and any error on target should not confuse source.
+ self.source.unlock()
+ raise
+
+ @classmethod
+ def get(klass, source, target):
+ """Retrieve a Inter worker object for these objects.
+
+ :param source: the object to be the 'source' member of
+ the InterObject instance.
+ :param target: the object to be the 'target' member of
+ the InterObject instance.
+
+ If an optimised worker exists it will be used otherwise
+ a default Inter worker instance will be created.
+ """
+ for provider in reversed(klass._optimisers):
+ if provider.is_compatible(source, target):
+ return provider(source, target)
+ raise NoCompatibleInter(source, target)
+
+ def lock_read(self):
+ """Take out a logical read lock.
+
+ This will lock the source branch and the target branch. The source gets
+ a read lock and the target a read lock.
+ """
+ self._double_lock(self.source.lock_read, self.target.lock_read)
+
+ def lock_write(self):
+ """Take out a logical write lock.
+
+ This will lock the source branch and the target branch. The source gets
+ a read lock and the target a write lock.
+ """
+ self._double_lock(self.source.lock_read, self.target.lock_write)
+
+ @classmethod
+ def register_optimiser(klass, optimiser):
+ """Register an InterObject optimiser."""
+ klass._optimisers.append(optimiser)
+
+ def unlock(self):
+ """Release the locks on source and target."""
+ try:
+ self.target.unlock()
+ finally:
+ self.source.unlock()
+
+ @classmethod
+ def unregister_optimiser(klass, optimiser):
+ """Unregister an InterObject optimiser."""
+ klass._optimisers.remove(optimiser)
diff --git a/bzrlib/intset.py b/bzrlib/intset.py
new file mode 100644
index 0000000..d7ac6a0
--- /dev/null
+++ b/bzrlib/intset.py
@@ -0,0 +1,227 @@
+# Copyright (C) 2005 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+from __future__ import absolute_import
+
+# Author: Martin Pool <mbp@canonical.com>
+
+
+# Somewhat surprisingly, it turns out that this is much slower than
+# simply storing the ints in a set() type. Python's performance model
+# is very different to that of C.
+
+
+class IntSet(Exception):
+ """Faster set-like class storing only whole numbers.
+
+ Despite the name this stores long integers happily, but negative
+ values are not allowed.
+
+ >>> a = IntSet([0, 2, 5])
+ >>> bool(a)
+ True
+ >>> 2 in a
+ True
+ >>> 4 in a
+ False
+ >>> a.add(4)
+ >>> 4 in a
+ True
+
+ >>> b = IntSet()
+ >>> not b
+ True
+ >>> b.add(10)
+ >>> 10 in a
+ False
+ >>> a.update(b)
+ >>> 10 in a
+ True
+ >>> a.update(range(5))
+ >>> 3 in a
+ True
+
+ Being a set, duplicates are ignored:
+ >>> a = IntSet()
+ >>> a.add(10)
+ >>> a.add(10)
+ >>> 10 in a
+ True
+ >>> list(a)
+ [10]
+
+ """
+ __slots__ = ['_val']
+
+ def __init__(self, values=None, bitmask=0L):
+ """Create a new intset.
+
+ values
+ If specified, an initial collection of values.
+ """
+ self._val = bitmask
+ if values is not None:
+ self.update(values)
+
+
+ def __nonzero__(self):
+ """IntSets are false if empty, otherwise True.
+
+ >>> bool(IntSet())
+ False
+
+ >>> bool(IntSet([0]))
+ True
+ """
+ return bool(self._val)
+
+
+ def __len__(self):
+ """Number of elements in set.
+
+ >>> len(IntSet(xrange(20000)))
+ 20000
+ """
+ v = self._val
+ c = 0
+ while v:
+ if v & 1:
+ c += 1
+ v = v >> 1
+ return c
+
+
+ def __and__(self, other):
+ """Set intersection.
+
+ >>> a = IntSet(range(10))
+ >>> len(a)
+ 10
+ >>> b = a & a
+ >>> b == a
+ True
+ >>> a = a & IntSet([5, 7, 11, 13])
+ >>> list(a)
+ [5, 7]
+ """
+ if not isinstance(other, IntSet):
+ raise NotImplementedError(type(other))
+ return IntSet(bitmask=(self._val & other._val))
+
+
+ def __or__(self, other):
+ """Set union.
+
+ >>> a = IntSet(range(10)) | IntSet([5, 15, 25])
+ >>> len(a)
+ 12
+ """
+ if not isinstance(other, IntSet):
+ raise NotImplementedError(type(other))
+ return IntSet(bitmask=(self._val | other._val))
+
+
+ def __eq__(self, other):
+ """Comparison.
+
+ >>> IntSet(range(3)) == IntSet([2, 0, 1])
+ True
+ """
+ if isinstance(other, IntSet):
+ return self._val == other._val
+ else:
+ return False
+
+
+ def __ne__(self, other):
+ return not self.__eq__(other)
+
+
+ def __contains__(self, i):
+ return self._val & (1L << i)
+
+
+ def __iter__(self):
+ """Return contents of set.
+
+ >>> list(IntSet())
+ []
+ >>> list(IntSet([0, 1, 5, 7]))
+ [0, 1, 5, 7]
+ """
+ v = self._val
+ o = 0
+ # XXX: This is a bit slow
+ while v:
+ if v & 1:
+ yield o
+ v = v >> 1
+ o = o + 1
+
+
+ def update(self, to_add):
+ """Add all the values from the sequence or intset to_add"""
+ if isinstance(to_add, IntSet):
+ self._val |= to_add._val
+ else:
+ for i in to_add:
+ self._val |= (1L << i)
+
+
+ def add(self, to_add):
+ self._val |= (1L << to_add)
+
+
+ def remove(self, to_remove):
+ """Remove one value from the set.
+
+ Raises KeyError if the value is not present.
+
+ >>> a = IntSet([10])
+ >>> a.remove(9)
+ Traceback (most recent call last):
+ File "/usr/lib/python2.4/doctest.py", line 1243, in __run
+ compileflags, 1) in test.globs
+ File "<doctest __main__.IntSet.remove[1]>", line 1, in ?
+ a.remove(9)
+ KeyError: 9
+ >>> a.remove(10)
+ >>> not a
+ True
+ """
+ m = 1L << to_remove
+ if not self._val & m:
+ raise KeyError(to_remove)
+ self._val ^= m
+
+ def set_remove(self, to_remove):
+ """Remove all values that exist in to_remove.
+
+ >>> a = IntSet(range(10))
+ >>> b = IntSet([2,3,4,7,12])
+ >>> a.set_remove(b)
+ >>> list(a)
+ [0, 1, 5, 6, 8, 9]
+ >>> a.set_remove([1,2,5])
+ >>> list(a)
+ [0, 6, 8, 9]
+ """
+ if not isinstance(to_remove, IntSet):
+ self.set_remove(IntSet(to_remove))
+ return
+ intersect = self._val & to_remove._val
+ self._val ^= intersect
+
diff --git a/bzrlib/inventory.py b/bzrlib/inventory.py
new file mode 100644
index 0000000..9730390
--- /dev/null
+++ b/bzrlib/inventory.py
@@ -0,0 +1,2368 @@
+# Copyright (C) 2005-2011 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+# FIXME: This refactoring of the workingtree code doesn't seem to keep
+# the WorkingTree's copy of the inventory in sync with the branch. The
+# branch modifies its working inventory when it does a commit to make
+# missing files permanently removed.
+
+# TODO: Maybe also keep the full path of the entry, and the children?
+# But those depend on its position within a particular inventory, and
+# it would be nice not to need to hold the backpointer here.
+
+from __future__ import absolute_import
+
+# This should really be an id randomly assigned when the tree is
+# created, but it's not for now.
+ROOT_ID = "TREE_ROOT"
+
+from bzrlib.lazy_import import lazy_import
+lazy_import(globals(), """
+import collections
+import copy
+import re
+import tarfile
+
+from bzrlib import (
+ chk_map,
+ errors,
+ generate_ids,
+ osutils,
+ )
+""")
+
+from bzrlib import (
+ lazy_regex,
+ trace,
+ )
+
+from bzrlib.static_tuple import StaticTuple
+from bzrlib.symbol_versioning import (
+ deprecated_in,
+ deprecated_method,
+ )
+
+
+class InventoryEntry(object):
+ """Description of a versioned file.
+
+ An InventoryEntry has the following fields, which are also
+ present in the XML inventory-entry element:
+
+ file_id
+
+ name
+ (within the parent directory)
+
+ parent_id
+ file_id of the parent directory, or ROOT_ID
+
+ revision
+ the revision_id in which this variation of this file was
+ introduced.
+
+ executable
+ Indicates that this file should be executable on systems
+ that support it.
+
+ text_sha1
+ sha-1 of the text of the file
+
+ text_size
+ size in bytes of the text of the file
+
+ (reading a version 4 tree created a text_id field.)
+
+ >>> i = Inventory()
+ >>> i.path2id('')
+ 'TREE_ROOT'
+ >>> i.add(InventoryDirectory('123', 'src', ROOT_ID))
+ InventoryDirectory('123', 'src', parent_id='TREE_ROOT', revision=None)
+ >>> i.add(InventoryFile('2323', 'hello.c', parent_id='123'))
+ InventoryFile('2323', 'hello.c', parent_id='123', sha1=None, len=None, revision=None)
+ >>> shouldbe = {0: '', 1: 'src', 2: 'src/hello.c'}
+ >>> for ix, j in enumerate(i.iter_entries()):
+ ... print (j[0] == shouldbe[ix], j[1])
+ ...
+ (True, InventoryDirectory('TREE_ROOT', u'', parent_id=None, revision=None))
+ (True, InventoryDirectory('123', 'src', parent_id='TREE_ROOT', revision=None))
+ (True, InventoryFile('2323', 'hello.c', parent_id='123', sha1=None, len=None, revision=None))
+ >>> i.add(InventoryFile('2324', 'bye.c', '123'))
+ InventoryFile('2324', 'bye.c', parent_id='123', sha1=None, len=None, revision=None)
+ >>> i.add(InventoryDirectory('2325', 'wibble', '123'))
+ InventoryDirectory('2325', 'wibble', parent_id='123', revision=None)
+ >>> i.path2id('src/wibble')
+ '2325'
+ >>> i.add(InventoryFile('2326', 'wibble.c', '2325'))
+ InventoryFile('2326', 'wibble.c', parent_id='2325', sha1=None, len=None, revision=None)
+ >>> i['2326']
+ InventoryFile('2326', 'wibble.c', parent_id='2325', sha1=None, len=None, revision=None)
+ >>> for path, entry in i.iter_entries():
+ ... print path
+ ...
+ <BLANKLINE>
+ src
+ src/bye.c
+ src/hello.c
+ src/wibble
+ src/wibble/wibble.c
+ >>> i.id2path('2326')
+ 'src/wibble/wibble.c'
+ """
+
+ # Constants returned by describe_change()
+ #
+ # TODO: These should probably move to some kind of FileChangeDescription
+ # class; that's like what's inside a TreeDelta but we want to be able to
+ # generate them just for one file at a time.
+ RENAMED = 'renamed'
+ MODIFIED_AND_RENAMED = 'modified and renamed'
+
+ __slots__ = ['file_id', 'revision', 'parent_id', 'name']
+
+ # Attributes that all InventoryEntry instances are expected to have, but
+ # that don't vary for all kinds of entry. (e.g. symlink_target is only
+ # relevant to InventoryLink, so there's no reason to make every
+ # InventoryFile instance allocate space to hold a value for it.)
+ # Attributes that only vary for files: executable, text_sha1, text_size,
+ # text_id
+ executable = False
+ text_sha1 = None
+ text_size = None
+ text_id = None
+ # Attributes that only vary for symlinks: symlink_target
+ symlink_target = None
+ # Attributes that only vary for tree-references: reference_revision
+ reference_revision = None
+
+
+ def detect_changes(self, old_entry):
+ """Return a (text_modified, meta_modified) from this to old_entry.
+
+ _read_tree_state must have been called on self and old_entry prior to
+ calling detect_changes.
+ """
+ return False, False
+
+ def _diff(self, text_diff, from_label, tree, to_label, to_entry, to_tree,
+ output_to, reverse=False):
+ """Perform a diff between two entries of the same kind."""
+
+ def parent_candidates(self, previous_inventories):
+ """Find possible per-file graph parents.
+
+ This is currently defined by:
+ - Select the last changed revision in the parent inventory.
+ - Do deal with a short lived bug in bzr 0.8's development two entries
+ that have the same last changed but different 'x' bit settings are
+ changed in-place.
+ """
+ # revision:ie mapping for each ie found in previous_inventories.
+ candidates = {}
+ # identify candidate head revision ids.
+ for inv in previous_inventories:
+ if inv.has_id(self.file_id):
+ ie = inv[self.file_id]
+ if ie.revision in candidates:
+ # same revision value in two different inventories:
+ # correct possible inconsistencies:
+ # * there was a bug in revision updates with 'x' bit
+ # support.
+ try:
+ if candidates[ie.revision].executable != ie.executable:
+ candidates[ie.revision].executable = False
+ ie.executable = False
+ except AttributeError:
+ pass
+ else:
+ # add this revision as a candidate.
+ candidates[ie.revision] = ie
+ return candidates
+
+ def has_text(self):
+ """Return true if the object this entry represents has textual data.
+
+ Note that textual data includes binary content.
+
+ Also note that all entries get weave files created for them.
+ This attribute is primarily used when upgrading from old trees that
+ did not have the weave index for all inventory entries.
+ """
+ return False
+
+ def __init__(self, file_id, name, parent_id):
+ """Create an InventoryEntry
+
+ The filename must be a single component, relative to the
+ parent directory; it cannot be a whole path or relative name.
+
+ >>> e = InventoryFile('123', 'hello.c', ROOT_ID)
+ >>> e.name
+ 'hello.c'
+ >>> e.file_id
+ '123'
+ >>> e = InventoryFile('123', 'src/hello.c', ROOT_ID)
+ Traceback (most recent call last):
+ InvalidEntryName: Invalid entry name: src/hello.c
+ """
+ if '/' in name or '\\' in name:
+ raise errors.InvalidEntryName(name=name)
+ self.file_id = file_id
+ self.revision = None
+ self.name = name
+ self.parent_id = parent_id
+
+ def kind_character(self):
+ """Return a short kind indicator useful for appending to names."""
+ raise errors.BzrError('unknown kind %r' % self.kind)
+
+ known_kinds = ('file', 'directory', 'symlink')
+
+ def sorted_children(self):
+ return sorted(self.children.items())
+
+ @staticmethod
+ def versionable_kind(kind):
+ return (kind in ('file', 'directory', 'symlink', 'tree-reference'))
+
+ def check(self, checker, rev_id, inv):
+ """Check this inventory entry is intact.
+
+ This is a template method, override _check for kind specific
+ tests.
+
+ :param checker: Check object providing context for the checks;
+ can be used to find out what parts of the repository have already
+ been checked.
+ :param rev_id: Revision id from which this InventoryEntry was loaded.
+ Not necessarily the last-changed revision for this file.
+ :param inv: Inventory from which the entry was loaded.
+ """
+ if self.parent_id is not None:
+ if not inv.has_id(self.parent_id):
+ raise errors.BzrCheckError(
+ 'missing parent {%s} in inventory for revision {%s}' % (
+ self.parent_id, rev_id))
+ checker._add_entry_to_text_key_references(inv, self)
+ self._check(checker, rev_id)
+
+ def _check(self, checker, rev_id):
+ """Check this inventory entry for kind specific errors."""
+ checker._report_items.append(
+ 'unknown entry kind %r in revision {%s}' % (self.kind, rev_id))
+
+ def copy(self):
+ """Clone this inventory entry."""
+ raise NotImplementedError
+
+ @staticmethod
+ def describe_change(old_entry, new_entry):
+ """Describe the change between old_entry and this.
+
+ This smells of being an InterInventoryEntry situation, but as its
+ the first one, we're making it a static method for now.
+
+ An entry with a different parent, or different name is considered
+ to be renamed. Reparenting is an internal detail.
+ Note that renaming the parent does not trigger a rename for the
+ child entry itself.
+ """
+ # TODO: Perhaps return an object rather than just a string
+ if old_entry is new_entry:
+ # also the case of both being None
+ return 'unchanged'
+ elif old_entry is None:
+ return 'added'
+ elif new_entry is None:
+ return 'removed'
+ if old_entry.kind != new_entry.kind:
+ return 'modified'
+ text_modified, meta_modified = new_entry.detect_changes(old_entry)
+ if text_modified or meta_modified:
+ modified = True
+ else:
+ modified = False
+ # TODO 20060511 (mbp, rbc) factor out 'detect_rename' here.
+ if old_entry.parent_id != new_entry.parent_id:
+ renamed = True
+ elif old_entry.name != new_entry.name:
+ renamed = True
+ else:
+ renamed = False
+ if renamed and not modified:
+ return InventoryEntry.RENAMED
+ if modified and not renamed:
+ return 'modified'
+ if modified and renamed:
+ return InventoryEntry.MODIFIED_AND_RENAMED
+ return 'unchanged'
+
+ def __repr__(self):
+ return ("%s(%r, %r, parent_id=%r, revision=%r)"
+ % (self.__class__.__name__,
+ self.file_id,
+ self.name,
+ self.parent_id,
+ self.revision))
+
+ def __eq__(self, other):
+ if other is self:
+ # For the case when objects are cached
+ return True
+ if not isinstance(other, InventoryEntry):
+ return NotImplemented
+
+ return ((self.file_id == other.file_id)
+ and (self.name == other.name)
+ and (other.symlink_target == self.symlink_target)
+ and (self.text_sha1 == other.text_sha1)
+ and (self.text_size == other.text_size)
+ and (self.text_id == other.text_id)
+ and (self.parent_id == other.parent_id)
+ and (self.kind == other.kind)
+ and (self.revision == other.revision)
+ and (self.executable == other.executable)
+ and (self.reference_revision == other.reference_revision)
+ )
+
+ def __ne__(self, other):
+ return not (self == other)
+
+ def __hash__(self):
+ raise ValueError('not hashable')
+
+ def _unchanged(self, previous_ie):
+ """Has this entry changed relative to previous_ie.
+
+ This method should be overridden in child classes.
+ """
+ compatible = True
+ # different inv parent
+ if previous_ie.parent_id != self.parent_id:
+ compatible = False
+ # renamed
+ elif previous_ie.name != self.name:
+ compatible = False
+ elif previous_ie.kind != self.kind:
+ compatible = False
+ return compatible
+
+ def _read_tree_state(self, path, work_tree):
+ """Populate fields in the inventory entry from the given tree.
+
+ Note that this should be modified to be a noop on virtual trees
+ as all entries created there are prepopulated.
+ """
+ # TODO: Rather than running this manually, we should check the
+ # working sha1 and other expensive properties when they're
+ # first requested, or preload them if they're already known
+ pass # nothing to do by default
+
+ def _forget_tree_state(self):
+ pass
+
+
+class InventoryDirectory(InventoryEntry):
+ """A directory in an inventory."""
+
+ __slots__ = ['children']
+
+ kind = 'directory'
+
+ def _check(self, checker, rev_id):
+ """See InventoryEntry._check"""
+ # In non rich root repositories we do not expect a file graph for the
+ # root.
+ if self.name == '' and not checker.rich_roots:
+ return
+ # Directories are stored as an empty file, but the file should exist
+ # to provide a per-fileid log. The hash of every directory content is
+ # "da..." below (the sha1sum of '').
+ checker.add_pending_item(rev_id,
+ ('texts', self.file_id, self.revision), 'text',
+ 'da39a3ee5e6b4b0d3255bfef95601890afd80709')
+
+ def copy(self):
+ other = InventoryDirectory(self.file_id, self.name, self.parent_id)
+ other.revision = self.revision
+ # note that children are *not* copied; they're pulled across when
+ # others are added
+ return other
+
+ def __init__(self, file_id, name, parent_id):
+ super(InventoryDirectory, self).__init__(file_id, name, parent_id)
+ self.children = {}
+
+ def kind_character(self):
+ """See InventoryEntry.kind_character."""
+ return '/'
+
+
+class InventoryFile(InventoryEntry):
+ """A file in an inventory."""
+
+ __slots__ = ['text_sha1', 'text_size', 'text_id', 'executable']
+
+ kind = 'file'
+
+ def __init__(self, file_id, name, parent_id):
+ super(InventoryFile, self).__init__(file_id, name, parent_id)
+ self.text_sha1 = None
+ self.text_size = None
+ self.text_id = None
+ self.executable = False
+
+ def _check(self, checker, tree_revision_id):
+ """See InventoryEntry._check"""
+ # TODO: check size too.
+ checker.add_pending_item(tree_revision_id,
+ ('texts', self.file_id, self.revision), 'text',
+ self.text_sha1)
+ if self.text_size is None:
+ checker._report_items.append(
+ 'fileid {%s} in {%s} has None for text_size' % (self.file_id,
+ tree_revision_id))
+
+ def copy(self):
+ other = InventoryFile(self.file_id, self.name, self.parent_id)
+ other.executable = self.executable
+ other.text_id = self.text_id
+ other.text_sha1 = self.text_sha1
+ other.text_size = self.text_size
+ other.revision = self.revision
+ return other
+
+ def detect_changes(self, old_entry):
+ """See InventoryEntry.detect_changes."""
+ text_modified = (self.text_sha1 != old_entry.text_sha1)
+ meta_modified = (self.executable != old_entry.executable)
+ return text_modified, meta_modified
+
+ def _diff(self, text_diff, from_label, tree, to_label, to_entry, to_tree,
+ output_to, reverse=False):
+ """See InventoryEntry._diff."""
+ from bzrlib.diff import DiffText
+ from_file_id = self.file_id
+ if to_entry:
+ to_file_id = to_entry.file_id
+ else:
+ to_file_id = None
+ if reverse:
+ to_file_id, from_file_id = from_file_id, to_file_id
+ tree, to_tree = to_tree, tree
+ from_label, to_label = to_label, from_label
+ differ = DiffText(tree, to_tree, output_to, 'utf-8', '', '',
+ text_diff)
+ return differ.diff_text(from_file_id, to_file_id, from_label, to_label)
+
+ def has_text(self):
+ """See InventoryEntry.has_text."""
+ return True
+
+ def kind_character(self):
+ """See InventoryEntry.kind_character."""
+ return ''
+
+ def _read_tree_state(self, path, work_tree):
+ """See InventoryEntry._read_tree_state."""
+ self.text_sha1 = work_tree.get_file_sha1(self.file_id, path=path)
+ # FIXME: 20050930 probe for the text size when getting sha1
+ # in _read_tree_state
+ self.executable = work_tree.is_executable(self.file_id, path=path)
+
+ def __repr__(self):
+ return ("%s(%r, %r, parent_id=%r, sha1=%r, len=%s, revision=%s)"
+ % (self.__class__.__name__,
+ self.file_id,
+ self.name,
+ self.parent_id,
+ self.text_sha1,
+ self.text_size,
+ self.revision))
+
+ def _forget_tree_state(self):
+ self.text_sha1 = None
+
+ def _unchanged(self, previous_ie):
+ """See InventoryEntry._unchanged."""
+ compatible = super(InventoryFile, self)._unchanged(previous_ie)
+ if self.text_sha1 != previous_ie.text_sha1:
+ compatible = False
+ else:
+ # FIXME: 20050930 probe for the text size when getting sha1
+ # in _read_tree_state
+ self.text_size = previous_ie.text_size
+ if self.executable != previous_ie.executable:
+ compatible = False
+ return compatible
+
+
+class InventoryLink(InventoryEntry):
+ """A file in an inventory."""
+
+ __slots__ = ['symlink_target']
+
+ kind = 'symlink'
+
+ def __init__(self, file_id, name, parent_id):
+ super(InventoryLink, self).__init__(file_id, name, parent_id)
+ self.symlink_target = None
+
+ def _check(self, checker, tree_revision_id):
+ """See InventoryEntry._check"""
+ if self.symlink_target is None:
+ checker._report_items.append(
+ 'symlink {%s} has no target in revision {%s}'
+ % (self.file_id, tree_revision_id))
+ # Symlinks are stored as ''
+ checker.add_pending_item(tree_revision_id,
+ ('texts', self.file_id, self.revision), 'text',
+ 'da39a3ee5e6b4b0d3255bfef95601890afd80709')
+
+ def copy(self):
+ other = InventoryLink(self.file_id, self.name, self.parent_id)
+ other.symlink_target = self.symlink_target
+ other.revision = self.revision
+ return other
+
+ def detect_changes(self, old_entry):
+ """See InventoryEntry.detect_changes."""
+ # FIXME: which _modified field should we use ? RBC 20051003
+ text_modified = (self.symlink_target != old_entry.symlink_target)
+ if text_modified:
+ trace.mutter(" symlink target changed")
+ meta_modified = False
+ return text_modified, meta_modified
+
+ def _diff(self, text_diff, from_label, tree, to_label, to_entry, to_tree,
+ output_to, reverse=False):
+ """See InventoryEntry._diff."""
+ from bzrlib.diff import DiffSymlink
+ old_target = self.symlink_target
+ if to_entry is not None:
+ new_target = to_entry.symlink_target
+ else:
+ new_target = None
+ if not reverse:
+ old_tree = tree
+ new_tree = to_tree
+ else:
+ old_tree = to_tree
+ new_tree = tree
+ new_target, old_target = old_target, new_target
+ differ = DiffSymlink(old_tree, new_tree, output_to)
+ return differ.diff_symlink(old_target, new_target)
+
+ def kind_character(self):
+ """See InventoryEntry.kind_character."""
+ return ''
+
+ def _read_tree_state(self, path, work_tree):
+ """See InventoryEntry._read_tree_state."""
+ self.symlink_target = work_tree.get_symlink_target(self.file_id)
+
+ def _forget_tree_state(self):
+ self.symlink_target = None
+
+ def _unchanged(self, previous_ie):
+ """See InventoryEntry._unchanged."""
+ compatible = super(InventoryLink, self)._unchanged(previous_ie)
+ if self.symlink_target != previous_ie.symlink_target:
+ compatible = False
+ return compatible
+
+
+class TreeReference(InventoryEntry):
+
+ __slots__ = ['reference_revision']
+
+ kind = 'tree-reference'
+
+ def __init__(self, file_id, name, parent_id, revision=None,
+ reference_revision=None):
+ InventoryEntry.__init__(self, file_id, name, parent_id)
+ self.revision = revision
+ self.reference_revision = reference_revision
+
+ def copy(self):
+ return TreeReference(self.file_id, self.name, self.parent_id,
+ self.revision, self.reference_revision)
+
+ def _read_tree_state(self, path, work_tree):
+ """Populate fields in the inventory entry from the given tree.
+ """
+ self.reference_revision = work_tree.get_reference_revision(
+ self.file_id, path)
+
+ def _forget_tree_state(self):
+ self.reference_revision = None
+
+ def _unchanged(self, previous_ie):
+ """See InventoryEntry._unchanged."""
+ compatible = super(TreeReference, self)._unchanged(previous_ie)
+ if self.reference_revision != previous_ie.reference_revision:
+ compatible = False
+ return compatible
+
+
+class CommonInventory(object):
+ """Basic inventory logic, defined in terms of primitives like has_id.
+
+ An inventory is the metadata about the contents of a tree.
+
+ This is broadly a map from file_id to entries such as directories, files,
+ symlinks and tree references. Each entry maintains its own metadata like
+ SHA1 and length for files, or children for a directory.
+
+ Entries can be looked up either by path or by file_id.
+
+ InventoryEntry objects must not be modified after they are
+ inserted, other than through the Inventory API.
+ """
+
+ def has_filename(self, filename):
+ return bool(self.path2id(filename))
+
+ def id2path(self, file_id):
+ """Return as a string the path to file_id.
+
+ >>> i = Inventory()
+ >>> e = i.add(InventoryDirectory('src-id', 'src', ROOT_ID))
+ >>> e = i.add(InventoryFile('foo-id', 'foo.c', parent_id='src-id'))
+ >>> print i.id2path('foo-id')
+ src/foo.c
+
+ :raises NoSuchId: If file_id is not present in the inventory.
+ """
+ # get all names, skipping root
+ return '/'.join(reversed(
+ [parent.name for parent in
+ self._iter_file_id_parents(file_id)][:-1]))
+
+ def iter_entries(self, from_dir=None, recursive=True):
+ """Return (path, entry) pairs, in order by name.
+
+ :param from_dir: if None, start from the root,
+ otherwise start from this directory (either file-id or entry)
+ :param recursive: recurse into directories or not
+ """
+ if from_dir is None:
+ if self.root is None:
+ return
+ from_dir = self.root
+ yield '', self.root
+ elif isinstance(from_dir, basestring):
+ from_dir = self[from_dir]
+
+ # unrolling the recursive called changed the time from
+ # 440ms/663ms (inline/total) to 116ms/116ms
+ children = from_dir.children.items()
+ children.sort()
+ if not recursive:
+ for name, ie in children:
+ yield name, ie
+ return
+ children = collections.deque(children)
+ stack = [(u'', children)]
+ while stack:
+ from_dir_relpath, children = stack[-1]
+
+ while children:
+ name, ie = children.popleft()
+
+ # we know that from_dir_relpath never ends in a slash
+ # and 'f' doesn't begin with one, we can do a string op, rather
+ # than the checks of pathjoin(), though this means that all paths
+ # start with a slash
+ path = from_dir_relpath + '/' + name
+
+ yield path[1:], ie
+
+ if ie.kind != 'directory':
+ continue
+
+ # But do this child first
+ new_children = ie.children.items()
+ new_children.sort()
+ new_children = collections.deque(new_children)
+ stack.append((path, new_children))
+ # Break out of inner loop, so that we start outer loop with child
+ break
+ else:
+ # if we finished all children, pop it off the stack
+ stack.pop()
+
+ def _preload_cache(self):
+ """Populate any caches, we are about to access all items.
+
+ The default implementation does nothing, because CommonInventory doesn't
+ have a cache.
+ """
+ pass
+
+ def iter_entries_by_dir(self, from_dir=None, specific_file_ids=None,
+ yield_parents=False):
+ """Iterate over the entries in a directory first order.
+
+ This returns all entries for a directory before returning
+ the entries for children of a directory. This is not
+ lexicographically sorted order, and is a hybrid between
+ depth-first and breadth-first.
+
+ :param yield_parents: If True, yield the parents from the root leading
+ down to specific_file_ids that have been requested. This has no
+ impact if specific_file_ids is None.
+ :return: This yields (path, entry) pairs
+ """
+ if specific_file_ids and not isinstance(specific_file_ids, set):
+ specific_file_ids = set(specific_file_ids)
+ # TODO? Perhaps this should return the from_dir so that the root is
+ # yielded? or maybe an option?
+ if from_dir is None and specific_file_ids is None:
+ # They are iterating from the root, and have not specified any
+ # specific entries to look at. All current callers fully consume the
+ # iterator, so we can safely assume we are accessing all entries
+ self._preload_cache()
+ if from_dir is None:
+ if self.root is None:
+ return
+ # Optimize a common case
+ if (not yield_parents and specific_file_ids is not None and
+ len(specific_file_ids) == 1):
+ file_id = list(specific_file_ids)[0]
+ if self.has_id(file_id):
+ yield self.id2path(file_id), self[file_id]
+ return
+ from_dir = self.root
+ if (specific_file_ids is None or yield_parents or
+ self.root.file_id in specific_file_ids):
+ yield u'', self.root
+ elif isinstance(from_dir, basestring):
+ from_dir = self[from_dir]
+
+ if specific_file_ids is not None:
+ # TODO: jam 20070302 This could really be done as a loop rather
+ # than a bunch of recursive calls.
+ parents = set()
+ byid = self
+ def add_ancestors(file_id):
+ if not byid.has_id(file_id):
+ return
+ parent_id = byid[file_id].parent_id
+ if parent_id is None:
+ return
+ if parent_id not in parents:
+ parents.add(parent_id)
+ add_ancestors(parent_id)
+ for file_id in specific_file_ids:
+ add_ancestors(file_id)
+ else:
+ parents = None
+
+ stack = [(u'', from_dir)]
+ while stack:
+ cur_relpath, cur_dir = stack.pop()
+
+ child_dirs = []
+ for child_name, child_ie in sorted(cur_dir.children.iteritems()):
+
+ child_relpath = cur_relpath + child_name
+
+ if (specific_file_ids is None or
+ child_ie.file_id in specific_file_ids or
+ (yield_parents and child_ie.file_id in parents)):
+ yield child_relpath, child_ie
+
+ if child_ie.kind == 'directory':
+ if parents is None or child_ie.file_id in parents:
+ child_dirs.append((child_relpath+'/', child_ie))
+ stack.extend(reversed(child_dirs))
+
+ def _make_delta(self, old):
+ """Make an inventory delta from two inventories."""
+ old_ids = set(old)
+ new_ids = set(self)
+ adds = new_ids - old_ids
+ deletes = old_ids - new_ids
+ common = old_ids.intersection(new_ids)
+ delta = []
+ for file_id in deletes:
+ delta.append((old.id2path(file_id), None, file_id, None))
+ for file_id in adds:
+ delta.append((None, self.id2path(file_id), file_id, self[file_id]))
+ for file_id in common:
+ if old[file_id] != self[file_id]:
+ delta.append((old.id2path(file_id), self.id2path(file_id),
+ file_id, self[file_id]))
+ return delta
+
+ def make_entry(self, kind, name, parent_id, file_id=None):
+ """Simple thunk to bzrlib.inventory.make_entry."""
+ return make_entry(kind, name, parent_id, file_id)
+
+ def entries(self):
+ """Return list of (path, ie) for all entries except the root.
+
+ This may be faster than iter_entries.
+ """
+ accum = []
+ def descend(dir_ie, dir_path):
+ kids = dir_ie.children.items()
+ kids.sort()
+ for name, ie in kids:
+ child_path = osutils.pathjoin(dir_path, name)
+ accum.append((child_path, ie))
+ if ie.kind == 'directory':
+ descend(ie, child_path)
+
+ if self.root is not None:
+ descend(self.root, u'')
+ return accum
+
+ def path2id(self, relpath):
+ """Walk down through directories to return entry of last component.
+
+ :param relpath: may be either a list of path components, or a single
+ string, in which case it is automatically split.
+
+ This returns the entry of the last component in the path,
+ which may be either a file or a directory.
+
+ Returns None IFF the path is not found.
+ """
+ if isinstance(relpath, basestring):
+ names = osutils.splitpath(relpath)
+ else:
+ names = relpath
+
+ try:
+ parent = self.root
+ except errors.NoSuchId:
+ # root doesn't exist yet so nothing else can
+ return None
+ if parent is None:
+ return None
+ for f in names:
+ try:
+ children = getattr(parent, 'children', None)
+ if children is None:
+ return None
+ cie = children[f]
+ parent = cie
+ except KeyError:
+ # or raise an error?
+ return None
+
+ return parent.file_id
+
+ def filter(self, specific_fileids):
+ """Get an inventory view filtered against a set of file-ids.
+
+ Children of directories and parents are included.
+
+ The result may or may not reference the underlying inventory
+ so it should be treated as immutable.
+ """
+ interesting_parents = set()
+ for fileid in specific_fileids:
+ try:
+ interesting_parents.update(self.get_idpath(fileid))
+ except errors.NoSuchId:
+ # This fileid is not in the inventory - that's ok
+ pass
+ entries = self.iter_entries()
+ if self.root is None:
+ return Inventory(root_id=None)
+ other = Inventory(entries.next()[1].file_id)
+ other.root.revision = self.root.revision
+ other.revision_id = self.revision_id
+ directories_to_expand = set()
+ for path, entry in entries:
+ file_id = entry.file_id
+ if (file_id in specific_fileids
+ or entry.parent_id in directories_to_expand):
+ if entry.kind == 'directory':
+ directories_to_expand.add(file_id)
+ elif file_id not in interesting_parents:
+ continue
+ other.add(entry.copy())
+ return other
+
+ def get_idpath(self, file_id):
+ """Return a list of file_ids for the path to an entry.
+
+ The list contains one element for each directory followed by
+ the id of the file itself. So the length of the returned list
+ is equal to the depth of the file in the tree, counting the
+ root directory as depth 1.
+ """
+ p = []
+ for parent in self._iter_file_id_parents(file_id):
+ p.insert(0, parent.file_id)
+ return p
+
+
+class Inventory(CommonInventory):
+ """Mutable dict based in-memory inventory.
+
+ We never store the full path to a file, because renaming a directory
+ implicitly moves all of its contents. This class internally maintains a
+ lookup tree that allows the children under a directory to be
+ returned quickly.
+
+ >>> inv = Inventory()
+ >>> inv.add(InventoryFile('123-123', 'hello.c', ROOT_ID))
+ InventoryFile('123-123', 'hello.c', parent_id='TREE_ROOT', sha1=None, len=None, revision=None)
+ >>> inv['123-123'].name
+ 'hello.c'
+
+ Id's may be looked up from paths:
+
+ >>> inv.path2id('hello.c')
+ '123-123'
+ >>> inv.has_id('123-123')
+ True
+
+ There are iterators over the contents:
+
+ >>> [entry[0] for entry in inv.iter_entries()]
+ ['', u'hello.c']
+ """
+
+ def __init__(self, root_id=ROOT_ID, revision_id=None):
+ """Create or read an inventory.
+
+ If a working directory is specified, the inventory is read
+ from there. If the file is specified, read from that. If not,
+ the inventory is created empty.
+
+ The inventory is created with a default root directory, with
+ an id of None.
+ """
+ if root_id is not None:
+ self._set_root(InventoryDirectory(root_id, u'', None))
+ else:
+ self.root = None
+ self._byid = {}
+ self.revision_id = revision_id
+
+ def __repr__(self):
+ # More than one page of ouput is not useful anymore to debug
+ max_len = 2048
+ closing = '...}'
+ contents = repr(self._byid)
+ if len(contents) > max_len:
+ contents = contents[:(max_len-len(closing))] + closing
+ return "<Inventory object at %x, contents=%r>" % (id(self), contents)
+
+ def apply_delta(self, delta):
+ """Apply a delta to this inventory.
+
+ See the inventory developers documentation for the theory behind
+ inventory deltas.
+
+ If delta application fails the inventory is left in an indeterminate
+ state and must not be used.
+
+ :param delta: A list of changes to apply. After all the changes are
+ applied the final inventory must be internally consistent, but it
+ is ok to supply changes which, if only half-applied would have an
+ invalid result - such as supplying two changes which rename two
+ files, 'A' and 'B' with each other : [('A', 'B', 'A-id', a_entry),
+ ('B', 'A', 'B-id', b_entry)].
+
+ Each change is a tuple, of the form (old_path, new_path, file_id,
+ new_entry).
+
+ When new_path is None, the change indicates the removal of an entry
+ from the inventory and new_entry will be ignored (using None is
+ appropriate). If new_path is not None, then new_entry must be an
+ InventoryEntry instance, which will be incorporated into the
+ inventory (and replace any existing entry with the same file id).
+
+ When old_path is None, the change indicates the addition of
+ a new entry to the inventory.
+
+ When neither new_path nor old_path are None, the change is a
+ modification to an entry, such as a rename, reparent, kind change
+ etc.
+
+ The children attribute of new_entry is ignored. This is because
+ this method preserves children automatically across alterations to
+ the parent of the children, and cases where the parent id of a
+ child is changing require the child to be passed in as a separate
+ change regardless. E.g. in the recursive deletion of a directory -
+ the directory's children must be included in the delta, or the
+ final inventory will be invalid.
+
+ Note that a file_id must only appear once within a given delta.
+ An AssertionError is raised otherwise.
+ """
+ # Check that the delta is legal. It would be nice if this could be
+ # done within the loops below but it's safer to validate the delta
+ # before starting to mutate the inventory, as there isn't a rollback
+ # facility.
+ list(_check_delta_unique_ids(_check_delta_unique_new_paths(
+ _check_delta_unique_old_paths(_check_delta_ids_match_entry(
+ _check_delta_ids_are_valid(
+ _check_delta_new_path_entry_both_or_None(
+ delta)))))))
+
+ children = {}
+ # Remove all affected items which were in the original inventory,
+ # starting with the longest paths, thus ensuring parents are examined
+ # after their children, which means that everything we examine has no
+ # modified children remaining by the time we examine it.
+ for old_path, file_id in sorted(((op, f) for op, np, f, e in delta
+ if op is not None), reverse=True):
+ # Preserve unaltered children of file_id for later reinsertion.
+ file_id_children = getattr(self[file_id], 'children', {})
+ if len(file_id_children):
+ children[file_id] = file_id_children
+ if self.id2path(file_id) != old_path:
+ raise errors.InconsistentDelta(old_path, file_id,
+ "Entry was at wrong other path %r." % self.id2path(file_id))
+ # Remove file_id and the unaltered children. If file_id is not
+ # being deleted it will be reinserted back later.
+ self.remove_recursive_id(file_id)
+ # Insert all affected which should be in the new inventory, reattaching
+ # their children if they had any. This is done from shortest path to
+ # longest, ensuring that items which were modified and whose parents in
+ # the resulting inventory were also modified, are inserted after their
+ # parents.
+ for new_path, f, new_entry in sorted((np, f, e) for op, np, f, e in
+ delta if np is not None):
+ if new_entry.kind == 'directory':
+ # Pop the child which to allow detection of children whose
+ # parents were deleted and which were not reattached to a new
+ # parent.
+ replacement = InventoryDirectory(new_entry.file_id,
+ new_entry.name, new_entry.parent_id)
+ replacement.revision = new_entry.revision
+ replacement.children = children.pop(replacement.file_id, {})
+ new_entry = replacement
+ try:
+ self.add(new_entry)
+ except errors.DuplicateFileId:
+ raise errors.InconsistentDelta(new_path, new_entry.file_id,
+ "New id is already present in target.")
+ except AttributeError:
+ raise errors.InconsistentDelta(new_path, new_entry.file_id,
+ "Parent is not a directory.")
+ if self.id2path(new_entry.file_id) != new_path:
+ raise errors.InconsistentDelta(new_path, new_entry.file_id,
+ "New path is not consistent with parent path.")
+ if len(children):
+ # Get the parent id that was deleted
+ parent_id, children = children.popitem()
+ raise errors.InconsistentDelta("<deleted>", parent_id,
+ "The file id was deleted but its children were not deleted.")
+
+ def create_by_apply_delta(self, inventory_delta, new_revision_id,
+ propagate_caches=False):
+ """See CHKInventory.create_by_apply_delta()"""
+ new_inv = self.copy()
+ new_inv.apply_delta(inventory_delta)
+ new_inv.revision_id = new_revision_id
+ return new_inv
+
+ def _set_root(self, ie):
+ self.root = ie
+ self._byid = {self.root.file_id: self.root}
+
+ def copy(self):
+ # TODO: jam 20051218 Should copy also copy the revision_id?
+ entries = self.iter_entries()
+ if self.root is None:
+ return Inventory(root_id=None)
+ other = Inventory(entries.next()[1].file_id)
+ other.root.revision = self.root.revision
+ # copy recursively so we know directories will be added before
+ # their children. There are more efficient ways than this...
+ for path, entry in entries:
+ other.add(entry.copy())
+ return other
+
+ def __iter__(self):
+ """Iterate over all file-ids."""
+ return iter(self._byid)
+
+ def iter_just_entries(self):
+ """Iterate over all entries.
+
+ Unlike iter_entries(), just the entries are returned (not (path, ie))
+ and the order of entries is undefined.
+
+ XXX: We may not want to merge this into bzr.dev.
+ """
+ if self.root is None:
+ return
+ for _, ie in self._byid.iteritems():
+ yield ie
+
+ def __len__(self):
+ """Returns number of entries."""
+ return len(self._byid)
+
+ def __getitem__(self, file_id):
+ """Return the entry for given file_id.
+
+ >>> inv = Inventory()
+ >>> inv.add(InventoryFile('123123', 'hello.c', ROOT_ID))
+ InventoryFile('123123', 'hello.c', parent_id='TREE_ROOT', sha1=None, len=None, revision=None)
+ >>> inv['123123'].name
+ 'hello.c'
+ """
+ try:
+ return self._byid[file_id]
+ except KeyError:
+ # really we're passing an inventory, not a tree...
+ raise errors.NoSuchId(self, file_id)
+
+ def get_file_kind(self, file_id):
+ return self._byid[file_id].kind
+
+ def get_child(self, parent_id, filename):
+ return self[parent_id].children.get(filename)
+
+ def _add_child(self, entry):
+ """Add an entry to the inventory, without adding it to its parent"""
+ if entry.file_id in self._byid:
+ raise errors.BzrError(
+ "inventory already contains entry with id {%s}" %
+ entry.file_id)
+ self._byid[entry.file_id] = entry
+ for child in getattr(entry, 'children', {}).itervalues():
+ self._add_child(child)
+ return entry
+
+ def add(self, entry):
+ """Add entry to inventory.
+
+ :return: entry
+ """
+ if entry.file_id in self._byid:
+ raise errors.DuplicateFileId(entry.file_id,
+ self._byid[entry.file_id])
+ if entry.parent_id is None:
+ self.root = entry
+ else:
+ try:
+ parent = self._byid[entry.parent_id]
+ except KeyError:
+ raise errors.InconsistentDelta("<unknown>", entry.parent_id,
+ "Parent not in inventory.")
+ if entry.name in parent.children:
+ raise errors.InconsistentDelta(
+ self.id2path(parent.children[entry.name].file_id),
+ entry.file_id,
+ "Path already versioned")
+ parent.children[entry.name] = entry
+ return self._add_child(entry)
+
+ def add_path(self, relpath, kind, file_id=None, parent_id=None):
+ """Add entry from a path.
+
+ The immediate parent must already be versioned.
+
+ Returns the new entry object."""
+
+ parts = osutils.splitpath(relpath)
+
+ if len(parts) == 0:
+ if file_id is None:
+ file_id = generate_ids.gen_root_id()
+ self.root = InventoryDirectory(file_id, '', None)
+ self._byid = {self.root.file_id: self.root}
+ return self.root
+ else:
+ parent_path = parts[:-1]
+ parent_id = self.path2id(parent_path)
+ if parent_id is None:
+ raise errors.NotVersionedError(path=parent_path)
+ ie = make_entry(kind, parts[-1], parent_id, file_id)
+ return self.add(ie)
+
+ def __delitem__(self, file_id):
+ """Remove entry by id.
+
+ >>> inv = Inventory()
+ >>> inv.add(InventoryFile('123', 'foo.c', ROOT_ID))
+ InventoryFile('123', 'foo.c', parent_id='TREE_ROOT', sha1=None, len=None, revision=None)
+ >>> inv.has_id('123')
+ True
+ >>> del inv['123']
+ >>> inv.has_id('123')
+ False
+ """
+ ie = self[file_id]
+ del self._byid[file_id]
+ if ie.parent_id is not None:
+ del self[ie.parent_id].children[ie.name]
+
+ def __eq__(self, other):
+ """Compare two sets by comparing their contents.
+
+ >>> i1 = Inventory()
+ >>> i2 = Inventory()
+ >>> i1 == i2
+ True
+ >>> i1.add(InventoryFile('123', 'foo', ROOT_ID))
+ InventoryFile('123', 'foo', parent_id='TREE_ROOT', sha1=None, len=None, revision=None)
+ >>> i1 == i2
+ False
+ >>> i2.add(InventoryFile('123', 'foo', ROOT_ID))
+ InventoryFile('123', 'foo', parent_id='TREE_ROOT', sha1=None, len=None, revision=None)
+ >>> i1 == i2
+ True
+ """
+ if not isinstance(other, Inventory):
+ return NotImplemented
+
+ return self._byid == other._byid
+
+ def __ne__(self, other):
+ return not self.__eq__(other)
+
+ def __hash__(self):
+ raise ValueError('not hashable')
+
+ def _iter_file_id_parents(self, file_id):
+ """Yield the parents of file_id up to the root."""
+ while file_id is not None:
+ try:
+ ie = self._byid[file_id]
+ except KeyError:
+ raise errors.NoSuchId(tree=None, file_id=file_id)
+ yield ie
+ file_id = ie.parent_id
+
+ def has_id(self, file_id):
+ return (file_id in self._byid)
+
+ def _make_delta(self, old):
+ """Make an inventory delta from two inventories."""
+ old_getter = getattr(old, '_byid', old)
+ new_getter = self._byid
+ old_ids = set(old_getter)
+ new_ids = set(new_getter)
+ adds = new_ids - old_ids
+ deletes = old_ids - new_ids
+ if not adds and not deletes:
+ common = new_ids
+ else:
+ common = old_ids.intersection(new_ids)
+ delta = []
+ for file_id in deletes:
+ delta.append((old.id2path(file_id), None, file_id, None))
+ for file_id in adds:
+ delta.append((None, self.id2path(file_id), file_id, self[file_id]))
+ for file_id in common:
+ new_ie = new_getter[file_id]
+ old_ie = old_getter[file_id]
+ # If xml_serializer returns the cached InventoryEntries (rather
+ # than always doing .copy()), inlining the 'is' check saves 2.7M
+ # calls to __eq__. Under lsprof this saves 20s => 6s.
+ # It is a minor improvement without lsprof.
+ if old_ie is new_ie or old_ie == new_ie:
+ continue
+ else:
+ delta.append((old.id2path(file_id), self.id2path(file_id),
+ file_id, new_ie))
+ return delta
+
+ def remove_recursive_id(self, file_id):
+ """Remove file_id, and children, from the inventory.
+
+ :param file_id: A file_id to remove.
+ """
+ to_find_delete = [self._byid[file_id]]
+ to_delete = []
+ while to_find_delete:
+ ie = to_find_delete.pop()
+ to_delete.append(ie.file_id)
+ if ie.kind == 'directory':
+ to_find_delete.extend(ie.children.values())
+ for file_id in reversed(to_delete):
+ ie = self[file_id]
+ del self._byid[file_id]
+ if ie.parent_id is not None:
+ del self[ie.parent_id].children[ie.name]
+ else:
+ self.root = None
+
+ def rename(self, file_id, new_parent_id, new_name):
+ """Move a file within the inventory.
+
+ This can change either the name, or the parent, or both.
+
+ This does not move the working file.
+ """
+ new_name = ensure_normalized_name(new_name)
+ if not is_valid_name(new_name):
+ raise errors.BzrError("not an acceptable filename: %r" % new_name)
+
+ new_parent = self._byid[new_parent_id]
+ if new_name in new_parent.children:
+ raise errors.BzrError("%r already exists in %r" %
+ (new_name, self.id2path(new_parent_id)))
+
+ new_parent_idpath = self.get_idpath(new_parent_id)
+ if file_id in new_parent_idpath:
+ raise errors.BzrError(
+ "cannot move directory %r into a subdirectory of itself, %r"
+ % (self.id2path(file_id), self.id2path(new_parent_id)))
+
+ file_ie = self._byid[file_id]
+ old_parent = self._byid[file_ie.parent_id]
+
+ # TODO: Don't leave things messed up if this fails
+
+ del old_parent.children[file_ie.name]
+ new_parent.children[new_name] = file_ie
+
+ file_ie.name = new_name
+ file_ie.parent_id = new_parent_id
+
+ def is_root(self, file_id):
+ return self.root is not None and file_id == self.root.file_id
+
+
+class CHKInventory(CommonInventory):
+ """An inventory persisted in a CHK store.
+
+ By design, a CHKInventory is immutable so many of the methods
+ supported by Inventory - add, rename, apply_delta, etc - are *not*
+ supported. To create a new CHKInventory, use create_by_apply_delta()
+ or from_inventory(), say.
+
+ Internally, a CHKInventory has one or two CHKMaps:
+
+ * id_to_entry - a map from (file_id,) => InventoryEntry as bytes
+ * parent_id_basename_to_file_id - a map from (parent_id, basename_utf8)
+ => file_id as bytes
+
+ The second map is optional and not present in early CHkRepository's.
+
+ No caching is performed: every method call or item access will perform
+ requests to the storage layer. As such, keep references to objects you
+ want to reuse.
+ """
+
+ def __init__(self, search_key_name):
+ CommonInventory.__init__(self)
+ self._fileid_to_entry_cache = {}
+ self._fully_cached = False
+ self._path_to_fileid_cache = {}
+ self._search_key_name = search_key_name
+ self.root_id = None
+
+ def __eq__(self, other):
+ """Compare two sets by comparing their contents."""
+ if not isinstance(other, CHKInventory):
+ return NotImplemented
+
+ this_key = self.id_to_entry.key()
+ other_key = other.id_to_entry.key()
+ this_pid_key = self.parent_id_basename_to_file_id.key()
+ other_pid_key = other.parent_id_basename_to_file_id.key()
+ if None in (this_key, this_pid_key, other_key, other_pid_key):
+ return False
+ return this_key == other_key and this_pid_key == other_pid_key
+
+ def _entry_to_bytes(self, entry):
+ """Serialise entry as a single bytestring.
+
+ :param Entry: An inventory entry.
+ :return: A bytestring for the entry.
+
+ The BNF:
+ ENTRY ::= FILE | DIR | SYMLINK | TREE
+ FILE ::= "file: " COMMON SEP SHA SEP SIZE SEP EXECUTABLE
+ DIR ::= "dir: " COMMON
+ SYMLINK ::= "symlink: " COMMON SEP TARGET_UTF8
+ TREE ::= "tree: " COMMON REFERENCE_REVISION
+ COMMON ::= FILE_ID SEP PARENT_ID SEP NAME_UTF8 SEP REVISION
+ SEP ::= "\n"
+ """
+ if entry.parent_id is not None:
+ parent_str = entry.parent_id
+ else:
+ parent_str = ''
+ name_str = entry.name.encode("utf8")
+ if entry.kind == 'file':
+ if entry.executable:
+ exec_str = "Y"
+ else:
+ exec_str = "N"
+ return "file: %s\n%s\n%s\n%s\n%s\n%d\n%s" % (
+ entry.file_id, parent_str, name_str, entry.revision,
+ entry.text_sha1, entry.text_size, exec_str)
+ elif entry.kind == 'directory':
+ return "dir: %s\n%s\n%s\n%s" % (
+ entry.file_id, parent_str, name_str, entry.revision)
+ elif entry.kind == 'symlink':
+ return "symlink: %s\n%s\n%s\n%s\n%s" % (
+ entry.file_id, parent_str, name_str, entry.revision,
+ entry.symlink_target.encode("utf8"))
+ elif entry.kind == 'tree-reference':
+ return "tree: %s\n%s\n%s\n%s\n%s" % (
+ entry.file_id, parent_str, name_str, entry.revision,
+ entry.reference_revision)
+ else:
+ raise ValueError("unknown kind %r" % entry.kind)
+
+ def _expand_fileids_to_parents_and_children(self, file_ids):
+ """Give a more wholistic view starting with the given file_ids.
+
+ For any file_id which maps to a directory, we will include all children
+ of that directory. We will also include all directories which are
+ parents of the given file_ids, but we will not include their children.
+
+ eg:
+ / # TREE_ROOT
+ foo/ # foo-id
+ baz # baz-id
+ frob/ # frob-id
+ fringle # fringle-id
+ bar/ # bar-id
+ bing # bing-id
+
+ if given [foo-id] we will include
+ TREE_ROOT as interesting parents
+ and
+ foo-id, baz-id, frob-id, fringle-id
+ As interesting ids.
+ """
+ interesting = set()
+ # TODO: Pre-pass over the list of fileids to see if anything is already
+ # deserialized in self._fileid_to_entry_cache
+
+ directories_to_expand = set()
+ children_of_parent_id = {}
+ # It is okay if some of the fileids are missing
+ for entry in self._getitems(file_ids):
+ if entry.kind == 'directory':
+ directories_to_expand.add(entry.file_id)
+ interesting.add(entry.parent_id)
+ children_of_parent_id.setdefault(entry.parent_id, set()
+ ).add(entry.file_id)
+
+ # Now, interesting has all of the direct parents, but not the
+ # parents of those parents. It also may have some duplicates with
+ # specific_fileids
+ remaining_parents = interesting.difference(file_ids)
+ # When we hit the TREE_ROOT, we'll get an interesting parent of None,
+ # but we don't actually want to recurse into that
+ interesting.add(None) # this will auto-filter it in the loop
+ remaining_parents.discard(None)
+ while remaining_parents:
+ next_parents = set()
+ for entry in self._getitems(remaining_parents):
+ next_parents.add(entry.parent_id)
+ children_of_parent_id.setdefault(entry.parent_id, set()
+ ).add(entry.file_id)
+ # Remove any search tips we've already processed
+ remaining_parents = next_parents.difference(interesting)
+ interesting.update(remaining_parents)
+ # We should probably also .difference(directories_to_expand)
+ interesting.update(file_ids)
+ interesting.discard(None)
+ while directories_to_expand:
+ # Expand directories by looking in the
+ # parent_id_basename_to_file_id map
+ keys = [StaticTuple(f,).intern() for f in directories_to_expand]
+ directories_to_expand = set()
+ items = self.parent_id_basename_to_file_id.iteritems(keys)
+ next_file_ids = set([item[1] for item in items])
+ next_file_ids = next_file_ids.difference(interesting)
+ interesting.update(next_file_ids)
+ for entry in self._getitems(next_file_ids):
+ if entry.kind == 'directory':
+ directories_to_expand.add(entry.file_id)
+ children_of_parent_id.setdefault(entry.parent_id, set()
+ ).add(entry.file_id)
+ return interesting, children_of_parent_id
+
+ def filter(self, specific_fileids):
+ """Get an inventory view filtered against a set of file-ids.
+
+ Children of directories and parents are included.
+
+ The result may or may not reference the underlying inventory
+ so it should be treated as immutable.
+ """
+ (interesting,
+ parent_to_children) = self._expand_fileids_to_parents_and_children(
+ specific_fileids)
+ # There is some overlap here, but we assume that all interesting items
+ # are in the _fileid_to_entry_cache because we had to read them to
+ # determine if they were a dir we wanted to recurse, or just a file
+ # This should give us all the entries we'll want to add, so start
+ # adding
+ other = Inventory(self.root_id)
+ other.root.revision = self.root.revision
+ other.revision_id = self.revision_id
+ if not interesting or not parent_to_children:
+ # empty filter, or filtering entrys that don't exist
+ # (if even 1 existed, then we would have populated
+ # parent_to_children with at least the tree root.)
+ return other
+ cache = self._fileid_to_entry_cache
+ remaining_children = collections.deque(parent_to_children[self.root_id])
+ while remaining_children:
+ file_id = remaining_children.popleft()
+ ie = cache[file_id]
+ if ie.kind == 'directory':
+ ie = ie.copy() # We create a copy to depopulate the .children attribute
+ # TODO: depending on the uses of 'other' we should probably alwyas
+ # '.copy()' to prevent someone from mutating other and
+ # invaliding our internal cache
+ other.add(ie)
+ if file_id in parent_to_children:
+ remaining_children.extend(parent_to_children[file_id])
+ return other
+
+ @staticmethod
+ def _bytes_to_utf8name_key(bytes):
+ """Get the file_id, revision_id key out of bytes."""
+ # We don't normally care about name, except for times when we want
+ # to filter out empty names because of non rich-root...
+ sections = bytes.split('\n')
+ kind, file_id = sections[0].split(': ')
+ return (sections[2], intern(file_id), intern(sections[3]))
+
+ def _bytes_to_entry(self, bytes):
+ """Deserialise a serialised entry."""
+ sections = bytes.split('\n')
+ if sections[0].startswith("file: "):
+ result = InventoryFile(sections[0][6:],
+ sections[2].decode('utf8'),
+ sections[1])
+ result.text_sha1 = sections[4]
+ result.text_size = int(sections[5])
+ result.executable = sections[6] == "Y"
+ elif sections[0].startswith("dir: "):
+ result = CHKInventoryDirectory(sections[0][5:],
+ sections[2].decode('utf8'),
+ sections[1], self)
+ elif sections[0].startswith("symlink: "):
+ result = InventoryLink(sections[0][9:],
+ sections[2].decode('utf8'),
+ sections[1])
+ result.symlink_target = sections[4].decode('utf8')
+ elif sections[0].startswith("tree: "):
+ result = TreeReference(sections[0][6:],
+ sections[2].decode('utf8'),
+ sections[1])
+ result.reference_revision = sections[4]
+ else:
+ raise ValueError("Not a serialised entry %r" % bytes)
+ result.file_id = intern(result.file_id)
+ result.revision = intern(sections[3])
+ if result.parent_id == '':
+ result.parent_id = None
+ self._fileid_to_entry_cache[result.file_id] = result
+ return result
+
+ def create_by_apply_delta(self, inventory_delta, new_revision_id,
+ propagate_caches=False):
+ """Create a new CHKInventory by applying inventory_delta to this one.
+
+ See the inventory developers documentation for the theory behind
+ inventory deltas.
+
+ :param inventory_delta: The inventory delta to apply. See
+ Inventory.apply_delta for details.
+ :param new_revision_id: The revision id of the resulting CHKInventory.
+ :param propagate_caches: If True, the caches for this inventory are
+ copied to and updated for the result.
+ :return: The new CHKInventory.
+ """
+ split = osutils.split
+ result = CHKInventory(self._search_key_name)
+ if propagate_caches:
+ # Just propagate the path-to-fileid cache for now
+ result._path_to_fileid_cache = dict(self._path_to_fileid_cache.iteritems())
+ search_key_func = chk_map.search_key_registry.get(self._search_key_name)
+ self.id_to_entry._ensure_root()
+ maximum_size = self.id_to_entry._root_node.maximum_size
+ result.revision_id = new_revision_id
+ result.id_to_entry = chk_map.CHKMap(
+ self.id_to_entry._store,
+ self.id_to_entry.key(),
+ search_key_func=search_key_func)
+ result.id_to_entry._ensure_root()
+ result.id_to_entry._root_node.set_maximum_size(maximum_size)
+ # Change to apply to the parent_id_basename delta. The dict maps
+ # (parent_id, basename) -> (old_key, new_value). We use a dict because
+ # when a path has its id replaced (e.g. the root is changed, or someone
+ # does bzr mv a b, bzr mv c a, we should output a single change to this
+ # map rather than two.
+ parent_id_basename_delta = {}
+ if self.parent_id_basename_to_file_id is not None:
+ result.parent_id_basename_to_file_id = chk_map.CHKMap(
+ self.parent_id_basename_to_file_id._store,
+ self.parent_id_basename_to_file_id.key(),
+ search_key_func=search_key_func)
+ result.parent_id_basename_to_file_id._ensure_root()
+ self.parent_id_basename_to_file_id._ensure_root()
+ result_p_id_root = result.parent_id_basename_to_file_id._root_node
+ p_id_root = self.parent_id_basename_to_file_id._root_node
+ result_p_id_root.set_maximum_size(p_id_root.maximum_size)
+ result_p_id_root._key_width = p_id_root._key_width
+ else:
+ result.parent_id_basename_to_file_id = None
+ result.root_id = self.root_id
+ id_to_entry_delta = []
+ # inventory_delta is only traversed once, so we just update the
+ # variable.
+ # Check for repeated file ids
+ inventory_delta = _check_delta_unique_ids(inventory_delta)
+ # Repeated old paths
+ inventory_delta = _check_delta_unique_old_paths(inventory_delta)
+ # Check for repeated new paths
+ inventory_delta = _check_delta_unique_new_paths(inventory_delta)
+ # Check for entries that don't match the fileid
+ inventory_delta = _check_delta_ids_match_entry(inventory_delta)
+ # Check for nonsense fileids
+ inventory_delta = _check_delta_ids_are_valid(inventory_delta)
+ # Check for new_path <-> entry consistency
+ inventory_delta = _check_delta_new_path_entry_both_or_None(
+ inventory_delta)
+ # All changed entries need to have their parents be directories and be
+ # at the right path. This set contains (path, id) tuples.
+ parents = set()
+ # When we delete an item, all the children of it must be either deleted
+ # or altered in their own right. As we batch process the change via
+ # CHKMap.apply_delta, we build a set of things to use to validate the
+ # delta.
+ deletes = set()
+ altered = set()
+ for old_path, new_path, file_id, entry in inventory_delta:
+ # file id changes
+ if new_path == '':
+ result.root_id = file_id
+ if new_path is None:
+ # Make a delete:
+ new_key = None
+ new_value = None
+ # Update caches
+ if propagate_caches:
+ try:
+ del result._path_to_fileid_cache[old_path]
+ except KeyError:
+ pass
+ deletes.add(file_id)
+ else:
+ new_key = StaticTuple(file_id,)
+ new_value = result._entry_to_bytes(entry)
+ # Update caches. It's worth doing this whether
+ # we're propagating the old caches or not.
+ result._path_to_fileid_cache[new_path] = file_id
+ parents.add((split(new_path)[0], entry.parent_id))
+ if old_path is None:
+ old_key = None
+ else:
+ old_key = StaticTuple(file_id,)
+ if self.id2path(file_id) != old_path:
+ raise errors.InconsistentDelta(old_path, file_id,
+ "Entry was at wrong other path %r." %
+ self.id2path(file_id))
+ altered.add(file_id)
+ id_to_entry_delta.append(StaticTuple(old_key, new_key, new_value))
+ if result.parent_id_basename_to_file_id is not None:
+ # parent_id, basename changes
+ if old_path is None:
+ old_key = None
+ else:
+ old_entry = self[file_id]
+ old_key = self._parent_id_basename_key(old_entry)
+ if new_path is None:
+ new_key = None
+ new_value = None
+ else:
+ new_key = self._parent_id_basename_key(entry)
+ new_value = file_id
+ # If the two keys are the same, the value will be unchanged
+ # as its always the file id for this entry.
+ if old_key != new_key:
+ # Transform a change into explicit delete/add preserving
+ # a possible match on the key from a different file id.
+ if old_key is not None:
+ parent_id_basename_delta.setdefault(
+ old_key, [None, None])[0] = old_key
+ if new_key is not None:
+ parent_id_basename_delta.setdefault(
+ new_key, [None, None])[1] = new_value
+ # validate that deletes are complete.
+ for file_id in deletes:
+ entry = self[file_id]
+ if entry.kind != 'directory':
+ continue
+ # This loop could potentially be better by using the id_basename
+ # map to just get the child file ids.
+ for child in entry.children.values():
+ if child.file_id not in altered:
+ raise errors.InconsistentDelta(self.id2path(child.file_id),
+ child.file_id, "Child not deleted or reparented when "
+ "parent deleted.")
+ result.id_to_entry.apply_delta(id_to_entry_delta)
+ if parent_id_basename_delta:
+ # Transform the parent_id_basename delta data into a linear delta
+ # with only one record for a given key. Optimally this would allow
+ # re-keying, but its simpler to just output that as a delete+add
+ # to spend less time calculating the delta.
+ delta_list = []
+ for key, (old_key, value) in parent_id_basename_delta.iteritems():
+ if value is not None:
+ delta_list.append((old_key, key, value))
+ else:
+ delta_list.append((old_key, None, None))
+ result.parent_id_basename_to_file_id.apply_delta(delta_list)
+ parents.discard(('', None))
+ for parent_path, parent in parents:
+ try:
+ if result[parent].kind != 'directory':
+ raise errors.InconsistentDelta(result.id2path(parent), parent,
+ 'Not a directory, but given children')
+ except errors.NoSuchId:
+ raise errors.InconsistentDelta("<unknown>", parent,
+ "Parent is not present in resulting inventory.")
+ if result.path2id(parent_path) != parent:
+ raise errors.InconsistentDelta(parent_path, parent,
+ "Parent has wrong path %r." % result.path2id(parent_path))
+ return result
+
+ @classmethod
+ def deserialise(klass, chk_store, bytes, expected_revision_id):
+ """Deserialise a CHKInventory.
+
+ :param chk_store: A CHK capable VersionedFiles instance.
+ :param bytes: The serialised bytes.
+ :param expected_revision_id: The revision ID we think this inventory is
+ for.
+ :return: A CHKInventory
+ """
+ lines = bytes.split('\n')
+ if lines[-1] != '':
+ raise AssertionError('bytes to deserialize must end with an eol')
+ lines.pop()
+ if lines[0] != 'chkinventory:':
+ raise ValueError("not a serialised CHKInventory: %r" % bytes)
+ info = {}
+ allowed_keys = frozenset(['root_id', 'revision_id', 'search_key_name',
+ 'parent_id_basename_to_file_id',
+ 'id_to_entry'])
+ for line in lines[1:]:
+ key, value = line.split(': ', 1)
+ if key not in allowed_keys:
+ raise errors.BzrError('Unknown key in inventory: %r\n%r'
+ % (key, bytes))
+ if key in info:
+ raise errors.BzrError('Duplicate key in inventory: %r\n%r'
+ % (key, bytes))
+ info[key] = value
+ revision_id = intern(info['revision_id'])
+ root_id = intern(info['root_id'])
+ search_key_name = intern(info.get('search_key_name', 'plain'))
+ parent_id_basename_to_file_id = intern(info.get(
+ 'parent_id_basename_to_file_id', None))
+ if not parent_id_basename_to_file_id.startswith('sha1:'):
+ raise ValueError('parent_id_basename_to_file_id should be a sha1'
+ ' key not %r' % (parent_id_basename_to_file_id,))
+ id_to_entry = info['id_to_entry']
+ if not id_to_entry.startswith('sha1:'):
+ raise ValueError('id_to_entry should be a sha1'
+ ' key not %r' % (id_to_entry,))
+
+ result = CHKInventory(search_key_name)
+ result.revision_id = revision_id
+ result.root_id = root_id
+ search_key_func = chk_map.search_key_registry.get(
+ result._search_key_name)
+ if parent_id_basename_to_file_id is not None:
+ result.parent_id_basename_to_file_id = chk_map.CHKMap(
+ chk_store, StaticTuple(parent_id_basename_to_file_id,),
+ search_key_func=search_key_func)
+ else:
+ result.parent_id_basename_to_file_id = None
+
+ result.id_to_entry = chk_map.CHKMap(chk_store,
+ StaticTuple(id_to_entry,),
+ search_key_func=search_key_func)
+ if (result.revision_id,) != expected_revision_id:
+ raise ValueError("Mismatched revision id and expected: %r, %r" %
+ (result.revision_id, expected_revision_id))
+ return result
+
+ @classmethod
+ def from_inventory(klass, chk_store, inventory, maximum_size=0, search_key_name='plain'):
+ """Create a CHKInventory from an existing inventory.
+
+ The content of inventory is copied into the chk_store, and a
+ CHKInventory referencing that is returned.
+
+ :param chk_store: A CHK capable VersionedFiles instance.
+ :param inventory: The inventory to copy.
+ :param maximum_size: The CHKMap node size limit.
+ :param search_key_name: The identifier for the search key function
+ """
+ result = klass(search_key_name)
+ result.revision_id = inventory.revision_id
+ result.root_id = inventory.root.file_id
+
+ entry_to_bytes = result._entry_to_bytes
+ parent_id_basename_key = result._parent_id_basename_key
+ id_to_entry_dict = {}
+ parent_id_basename_dict = {}
+ for path, entry in inventory.iter_entries():
+ key = StaticTuple(entry.file_id,).intern()
+ id_to_entry_dict[key] = entry_to_bytes(entry)
+ p_id_key = parent_id_basename_key(entry)
+ parent_id_basename_dict[p_id_key] = entry.file_id
+
+ result._populate_from_dicts(chk_store, id_to_entry_dict,
+ parent_id_basename_dict, maximum_size=maximum_size)
+ return result
+
+ def _populate_from_dicts(self, chk_store, id_to_entry_dict,
+ parent_id_basename_dict, maximum_size):
+ search_key_func = chk_map.search_key_registry.get(self._search_key_name)
+ root_key = chk_map.CHKMap.from_dict(chk_store, id_to_entry_dict,
+ maximum_size=maximum_size, key_width=1,
+ search_key_func=search_key_func)
+ self.id_to_entry = chk_map.CHKMap(chk_store, root_key,
+ search_key_func)
+ root_key = chk_map.CHKMap.from_dict(chk_store,
+ parent_id_basename_dict,
+ maximum_size=maximum_size, key_width=2,
+ search_key_func=search_key_func)
+ self.parent_id_basename_to_file_id = chk_map.CHKMap(chk_store,
+ root_key, search_key_func)
+
+ def _parent_id_basename_key(self, entry):
+ """Create a key for a entry in a parent_id_basename_to_file_id index."""
+ if entry.parent_id is not None:
+ parent_id = entry.parent_id
+ else:
+ parent_id = ''
+ return StaticTuple(parent_id, entry.name.encode('utf8')).intern()
+
+ def __getitem__(self, file_id):
+ """map a single file_id -> InventoryEntry."""
+ if file_id is None:
+ raise errors.NoSuchId(self, file_id)
+ result = self._fileid_to_entry_cache.get(file_id, None)
+ if result is not None:
+ return result
+ try:
+ return self._bytes_to_entry(
+ self.id_to_entry.iteritems([StaticTuple(file_id,)]).next()[1])
+ except StopIteration:
+ # really we're passing an inventory, not a tree...
+ raise errors.NoSuchId(self, file_id)
+
+ def _getitems(self, file_ids):
+ """Similar to __getitem__, but lets you query for multiple.
+
+ The returned order is undefined. And currently if an item doesn't
+ exist, it isn't included in the output.
+ """
+ result = []
+ remaining = []
+ for file_id in file_ids:
+ entry = self._fileid_to_entry_cache.get(file_id, None)
+ if entry is None:
+ remaining.append(file_id)
+ else:
+ result.append(entry)
+ file_keys = [StaticTuple(f,).intern() for f in remaining]
+ for file_key, value in self.id_to_entry.iteritems(file_keys):
+ entry = self._bytes_to_entry(value)
+ result.append(entry)
+ self._fileid_to_entry_cache[entry.file_id] = entry
+ return result
+
+ def has_id(self, file_id):
+ # Perhaps have an explicit 'contains' method on CHKMap ?
+ if self._fileid_to_entry_cache.get(file_id, None) is not None:
+ return True
+ return len(list(
+ self.id_to_entry.iteritems([StaticTuple(file_id,)]))) == 1
+
+ def is_root(self, file_id):
+ return file_id == self.root_id
+
+ def _iter_file_id_parents(self, file_id):
+ """Yield the parents of file_id up to the root."""
+ while file_id is not None:
+ try:
+ ie = self[file_id]
+ except KeyError:
+ raise errors.NoSuchId(tree=self, file_id=file_id)
+ yield ie
+ file_id = ie.parent_id
+
+ def __iter__(self):
+ """Iterate over all file-ids."""
+ for key, _ in self.id_to_entry.iteritems():
+ yield key[-1]
+
+ def iter_just_entries(self):
+ """Iterate over all entries.
+
+ Unlike iter_entries(), just the entries are returned (not (path, ie))
+ and the order of entries is undefined.
+
+ XXX: We may not want to merge this into bzr.dev.
+ """
+ for key, entry in self.id_to_entry.iteritems():
+ file_id = key[0]
+ ie = self._fileid_to_entry_cache.get(file_id, None)
+ if ie is None:
+ ie = self._bytes_to_entry(entry)
+ self._fileid_to_entry_cache[file_id] = ie
+ yield ie
+
+ def _preload_cache(self):
+ """Make sure all file-ids are in _fileid_to_entry_cache"""
+ if self._fully_cached:
+ return # No need to do it again
+ # The optimal sort order is to use iteritems() directly
+ cache = self._fileid_to_entry_cache
+ for key, entry in self.id_to_entry.iteritems():
+ file_id = key[0]
+ if file_id not in cache:
+ ie = self._bytes_to_entry(entry)
+ cache[file_id] = ie
+ else:
+ ie = cache[file_id]
+ last_parent_id = last_parent_ie = None
+ pid_items = self.parent_id_basename_to_file_id.iteritems()
+ for key, child_file_id in pid_items:
+ if key == ('', ''): # This is the root
+ if child_file_id != self.root_id:
+ raise ValueError('Data inconsistency detected.'
+ ' We expected data with key ("","") to match'
+ ' the root id, but %s != %s'
+ % (child_file_id, self.root_id))
+ continue
+ parent_id, basename = key
+ ie = cache[child_file_id]
+ if parent_id == last_parent_id:
+ parent_ie = last_parent_ie
+ else:
+ parent_ie = cache[parent_id]
+ if parent_ie.kind != 'directory':
+ raise ValueError('Data inconsistency detected.'
+ ' An entry in the parent_id_basename_to_file_id map'
+ ' has parent_id {%s} but the kind of that object'
+ ' is %r not "directory"' % (parent_id, parent_ie.kind))
+ if parent_ie._children is None:
+ parent_ie._children = {}
+ basename = basename.decode('utf-8')
+ if basename in parent_ie._children:
+ existing_ie = parent_ie._children[basename]
+ if existing_ie != ie:
+ raise ValueError('Data inconsistency detected.'
+ ' Two entries with basename %r were found'
+ ' in the parent entry {%s}'
+ % (basename, parent_id))
+ if basename != ie.name:
+ raise ValueError('Data inconsistency detected.'
+ ' In the parent_id_basename_to_file_id map, file_id'
+ ' {%s} is listed as having basename %r, but in the'
+ ' id_to_entry map it is %r'
+ % (child_file_id, basename, ie.name))
+ parent_ie._children[basename] = ie
+ self._fully_cached = True
+
+ def iter_changes(self, basis):
+ """Generate a Tree.iter_changes change list between this and basis.
+
+ :param basis: Another CHKInventory.
+ :return: An iterator over the changes between self and basis, as per
+ tree.iter_changes().
+ """
+ # We want: (file_id, (path_in_source, path_in_target),
+ # changed_content, versioned, parent, name, kind,
+ # executable)
+ for key, basis_value, self_value in \
+ self.id_to_entry.iter_changes(basis.id_to_entry):
+ file_id = key[0]
+ if basis_value is not None:
+ basis_entry = basis._bytes_to_entry(basis_value)
+ path_in_source = basis.id2path(file_id)
+ basis_parent = basis_entry.parent_id
+ basis_name = basis_entry.name
+ basis_executable = basis_entry.executable
+ else:
+ path_in_source = None
+ basis_parent = None
+ basis_name = None
+ basis_executable = None
+ if self_value is not None:
+ self_entry = self._bytes_to_entry(self_value)
+ path_in_target = self.id2path(file_id)
+ self_parent = self_entry.parent_id
+ self_name = self_entry.name
+ self_executable = self_entry.executable
+ else:
+ path_in_target = None
+ self_parent = None
+ self_name = None
+ self_executable = None
+ if basis_value is None:
+ # add
+ kind = (None, self_entry.kind)
+ versioned = (False, True)
+ elif self_value is None:
+ # delete
+ kind = (basis_entry.kind, None)
+ versioned = (True, False)
+ else:
+ kind = (basis_entry.kind, self_entry.kind)
+ versioned = (True, True)
+ changed_content = False
+ if kind[0] != kind[1]:
+ changed_content = True
+ elif kind[0] == 'file':
+ if (self_entry.text_size != basis_entry.text_size or
+ self_entry.text_sha1 != basis_entry.text_sha1):
+ changed_content = True
+ elif kind[0] == 'symlink':
+ if self_entry.symlink_target != basis_entry.symlink_target:
+ changed_content = True
+ elif kind[0] == 'tree-reference':
+ if (self_entry.reference_revision !=
+ basis_entry.reference_revision):
+ changed_content = True
+ parent = (basis_parent, self_parent)
+ name = (basis_name, self_name)
+ executable = (basis_executable, self_executable)
+ if (not changed_content
+ and parent[0] == parent[1]
+ and name[0] == name[1]
+ and executable[0] == executable[1]):
+ # Could happen when only the revision changed for a directory
+ # for instance.
+ continue
+ yield (file_id, (path_in_source, path_in_target), changed_content,
+ versioned, parent, name, kind, executable)
+
+ def __len__(self):
+ """Return the number of entries in the inventory."""
+ return len(self.id_to_entry)
+
+ def _make_delta(self, old):
+ """Make an inventory delta from two inventories."""
+ if type(old) != CHKInventory:
+ return CommonInventory._make_delta(self, old)
+ delta = []
+ for key, old_value, self_value in \
+ self.id_to_entry.iter_changes(old.id_to_entry):
+ file_id = key[0]
+ if old_value is not None:
+ old_path = old.id2path(file_id)
+ else:
+ old_path = None
+ if self_value is not None:
+ entry = self._bytes_to_entry(self_value)
+ self._fileid_to_entry_cache[file_id] = entry
+ new_path = self.id2path(file_id)
+ else:
+ entry = None
+ new_path = None
+ delta.append((old_path, new_path, file_id, entry))
+ return delta
+
+ def path2id(self, relpath):
+ """See CommonInventory.path2id()."""
+ # TODO: perhaps support negative hits?
+ if isinstance(relpath, basestring):
+ names = osutils.splitpath(relpath)
+ else:
+ names = relpath
+ if relpath == []:
+ relpath = [""]
+ relpath = osutils.pathjoin(*relpath)
+ result = self._path_to_fileid_cache.get(relpath, None)
+ if result is not None:
+ return result
+ current_id = self.root_id
+ if current_id is None:
+ return None
+ parent_id_index = self.parent_id_basename_to_file_id
+ cur_path = None
+ for basename in names:
+ if cur_path is None:
+ cur_path = basename
+ else:
+ cur_path = cur_path + '/' + basename
+ basename_utf8 = basename.encode('utf8')
+ file_id = self._path_to_fileid_cache.get(cur_path, None)
+ if file_id is None:
+ key_filter = [StaticTuple(current_id, basename_utf8)]
+ items = parent_id_index.iteritems(key_filter)
+ for (parent_id, name_utf8), file_id in items:
+ if parent_id != current_id or name_utf8 != basename_utf8:
+ raise errors.BzrError("corrupt inventory lookup! "
+ "%r %r %r %r" % (parent_id, current_id, name_utf8,
+ basename_utf8))
+ if file_id is None:
+ return None
+ else:
+ self._path_to_fileid_cache[cur_path] = file_id
+ current_id = file_id
+ return current_id
+
+ def to_lines(self):
+ """Serialise the inventory to lines."""
+ lines = ["chkinventory:\n"]
+ if self._search_key_name != 'plain':
+ # custom ordering grouping things that don't change together
+ lines.append('search_key_name: %s\n' % (self._search_key_name,))
+ lines.append("root_id: %s\n" % self.root_id)
+ lines.append('parent_id_basename_to_file_id: %s\n' %
+ (self.parent_id_basename_to_file_id.key()[0],))
+ lines.append("revision_id: %s\n" % self.revision_id)
+ lines.append("id_to_entry: %s\n" % (self.id_to_entry.key()[0],))
+ else:
+ lines.append("revision_id: %s\n" % self.revision_id)
+ lines.append("root_id: %s\n" % self.root_id)
+ if self.parent_id_basename_to_file_id is not None:
+ lines.append('parent_id_basename_to_file_id: %s\n' %
+ (self.parent_id_basename_to_file_id.key()[0],))
+ lines.append("id_to_entry: %s\n" % (self.id_to_entry.key()[0],))
+ return lines
+
+ @property
+ def root(self):
+ """Get the root entry."""
+ return self[self.root_id]
+
+
+class CHKInventoryDirectory(InventoryDirectory):
+ """A directory in an inventory."""
+
+ __slots__ = ['_children', '_chk_inventory']
+
+ def __init__(self, file_id, name, parent_id, chk_inventory):
+ # Don't call InventoryDirectory.__init__ - it isn't right for this
+ # class.
+ InventoryEntry.__init__(self, file_id, name, parent_id)
+ self._children = None
+ self._chk_inventory = chk_inventory
+
+ @property
+ def children(self):
+ """Access the list of children of this directory.
+
+ With a parent_id_basename_to_file_id index, loads all the children,
+ without loads the entire index. Without is bad. A more sophisticated
+ proxy object might be nice, to allow partial loading of children as
+ well when specific names are accessed. (So path traversal can be
+ written in the obvious way but not examine siblings.).
+ """
+ if self._children is not None:
+ return self._children
+ # No longer supported
+ if self._chk_inventory.parent_id_basename_to_file_id is None:
+ raise AssertionError("Inventories without"
+ " parent_id_basename_to_file_id are no longer supported")
+ result = {}
+ # XXX: Todo - use proxy objects for the children rather than loading
+ # all when the attribute is referenced.
+ parent_id_index = self._chk_inventory.parent_id_basename_to_file_id
+ child_keys = set()
+ for (parent_id, name_utf8), file_id in parent_id_index.iteritems(
+ key_filter=[StaticTuple(self.file_id,)]):
+ child_keys.add(StaticTuple(file_id,))
+ cached = set()
+ for file_id_key in child_keys:
+ entry = self._chk_inventory._fileid_to_entry_cache.get(
+ file_id_key[0], None)
+ if entry is not None:
+ result[entry.name] = entry
+ cached.add(file_id_key)
+ child_keys.difference_update(cached)
+ # populate; todo: do by name
+ id_to_entry = self._chk_inventory.id_to_entry
+ for file_id_key, bytes in id_to_entry.iteritems(child_keys):
+ entry = self._chk_inventory._bytes_to_entry(bytes)
+ result[entry.name] = entry
+ self._chk_inventory._fileid_to_entry_cache[file_id_key[0]] = entry
+ self._children = result
+ return result
+
+entry_factory = {
+ 'directory': InventoryDirectory,
+ 'file': InventoryFile,
+ 'symlink': InventoryLink,
+ 'tree-reference': TreeReference
+}
+
+def make_entry(kind, name, parent_id, file_id=None):
+ """Create an inventory entry.
+
+ :param kind: the type of inventory entry to create.
+ :param name: the basename of the entry.
+ :param parent_id: the parent_id of the entry.
+ :param file_id: the file_id to use. if None, one will be created.
+ """
+ if file_id is None:
+ file_id = generate_ids.gen_file_id(name)
+ name = ensure_normalized_name(name)
+ try:
+ factory = entry_factory[kind]
+ except KeyError:
+ raise errors.BadFileKindError(name, kind)
+ return factory(file_id, name, parent_id)
+
+
+def ensure_normalized_name(name):
+ """Normalize name.
+
+ :raises InvalidNormalization: When name is not normalized, and cannot be
+ accessed on this platform by the normalized path.
+ :return: The NFC normalised version of name.
+ """
+ #------- This has been copied to bzrlib.dirstate.DirState.add, please
+ # keep them synchronised.
+ # we dont import normalized_filename directly because we want to be
+ # able to change the implementation at runtime for tests.
+ norm_name, can_access = osutils.normalized_filename(name)
+ if norm_name != name:
+ if can_access:
+ return norm_name
+ else:
+ # TODO: jam 20060701 This would probably be more useful
+ # if the error was raised with the full path
+ raise errors.InvalidNormalization(name)
+ return name
+
+
+_NAME_RE = lazy_regex.lazy_compile(r'^[^/\\]+$')
+
+def is_valid_name(name):
+ return bool(_NAME_RE.match(name))
+
+
+def _check_delta_unique_ids(delta):
+ """Decorate a delta and check that the file ids in it are unique.
+
+ :return: A generator over delta.
+ """
+ ids = set()
+ for item in delta:
+ length = len(ids) + 1
+ ids.add(item[2])
+ if len(ids) != length:
+ raise errors.InconsistentDelta(item[0] or item[1], item[2],
+ "repeated file_id")
+ yield item
+
+
+def _check_delta_unique_new_paths(delta):
+ """Decorate a delta and check that the new paths in it are unique.
+
+ :return: A generator over delta.
+ """
+ paths = set()
+ for item in delta:
+ length = len(paths) + 1
+ path = item[1]
+ if path is not None:
+ paths.add(path)
+ if len(paths) != length:
+ raise errors.InconsistentDelta(path, item[2], "repeated path")
+ yield item
+
+
+def _check_delta_unique_old_paths(delta):
+ """Decorate a delta and check that the old paths in it are unique.
+
+ :return: A generator over delta.
+ """
+ paths = set()
+ for item in delta:
+ length = len(paths) + 1
+ path = item[0]
+ if path is not None:
+ paths.add(path)
+ if len(paths) != length:
+ raise errors.InconsistentDelta(path, item[2], "repeated path")
+ yield item
+
+
+def _check_delta_ids_are_valid(delta):
+ """Decorate a delta and check that the ids in it are valid.
+
+ :return: A generator over delta.
+ """
+ for item in delta:
+ entry = item[3]
+ if item[2] is None:
+ raise errors.InconsistentDelta(item[0] or item[1], item[2],
+ "entry with file_id None %r" % entry)
+ if type(item[2]) != str:
+ raise errors.InconsistentDelta(item[0] or item[1], item[2],
+ "entry with non bytes file_id %r" % entry)
+ yield item
+
+
+def _check_delta_ids_match_entry(delta):
+ """Decorate a delta and check that the ids in it match the entry.file_id.
+
+ :return: A generator over delta.
+ """
+ for item in delta:
+ entry = item[3]
+ if entry is not None:
+ if entry.file_id != item[2]:
+ raise errors.InconsistentDelta(item[0] or item[1], item[2],
+ "mismatched id with %r" % entry)
+ yield item
+
+
+def _check_delta_new_path_entry_both_or_None(delta):
+ """Decorate a delta and check that the new_path and entry are paired.
+
+ :return: A generator over delta.
+ """
+ for item in delta:
+ new_path = item[1]
+ entry = item[3]
+ if new_path is None and entry is not None:
+ raise errors.InconsistentDelta(item[0], item[1],
+ "Entry with no new_path")
+ if new_path is not None and entry is None:
+ raise errors.InconsistentDelta(new_path, item[1],
+ "new_path with no entry")
+ yield item
+
+
+def mutable_inventory_from_tree(tree):
+ """Create a new inventory that has the same contents as a specified tree.
+
+ :param tree: Revision tree to create inventory from
+ """
+ entries = tree.iter_entries_by_dir()
+ inv = Inventory(None, tree.get_revision_id())
+ for path, inv_entry in entries:
+ inv.add(inv_entry.copy())
+ return inv
diff --git a/bzrlib/inventory_delta.py b/bzrlib/inventory_delta.py
new file mode 100644
index 0000000..d41732d
--- /dev/null
+++ b/bzrlib/inventory_delta.py
@@ -0,0 +1,377 @@
+# Copyright (C) 2008, 2009 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""Inventory delta serialisation.
+
+See doc/developers/inventory.txt for the description of the format.
+
+In this module the interesting classes are:
+ - InventoryDeltaSerializer - object to read/write inventory deltas.
+"""
+
+from __future__ import absolute_import
+
+__all__ = ['InventoryDeltaSerializer']
+
+from bzrlib import errors
+from bzrlib.osutils import basename
+from bzrlib import inventory
+from bzrlib.revision import NULL_REVISION
+
+FORMAT_1 = 'bzr inventory delta v1 (bzr 1.14)'
+
+
+class InventoryDeltaError(errors.BzrError):
+ """An error when serializing or deserializing an inventory delta."""
+
+ # Most errors when serializing and deserializing are due to bugs, although
+ # damaged input (i.e. a bug in a different process) could cause
+ # deserialization errors too.
+ internal_error = True
+
+
+class IncompatibleInventoryDelta(errors.BzrError):
+ """The delta could not be deserialised because its contents conflict with
+ the allow_versioned_root or allow_tree_references flags of the
+ deserializer.
+ """
+ internal_error = False
+
+
+def _directory_content(entry):
+ """Serialize the content component of entry which is a directory.
+
+ :param entry: An InventoryDirectory.
+ """
+ return "dir"
+
+
+def _file_content(entry):
+ """Serialize the content component of entry which is a file.
+
+ :param entry: An InventoryFile.
+ """
+ if entry.executable:
+ exec_bytes = 'Y'
+ else:
+ exec_bytes = ''
+ size_exec_sha = (entry.text_size, exec_bytes, entry.text_sha1)
+ if None in size_exec_sha:
+ raise InventoryDeltaError('Missing size or sha for %s' % entry.file_id)
+ return "file\x00%d\x00%s\x00%s" % size_exec_sha
+
+
+def _link_content(entry):
+ """Serialize the content component of entry which is a symlink.
+
+ :param entry: An InventoryLink.
+ """
+ target = entry.symlink_target
+ if target is None:
+ raise InventoryDeltaError('Missing target for %s' % entry.file_id)
+ return "link\x00%s" % target.encode('utf8')
+
+
+def _reference_content(entry):
+ """Serialize the content component of entry which is a tree-reference.
+
+ :param entry: A TreeReference.
+ """
+ tree_revision = entry.reference_revision
+ if tree_revision is None:
+ raise InventoryDeltaError(
+ 'Missing reference revision for %s' % entry.file_id)
+ return "tree\x00%s" % tree_revision
+
+
+def _dir_to_entry(content, name, parent_id, file_id, last_modified,
+ _type=inventory.InventoryDirectory):
+ """Convert a dir content record to an InventoryDirectory."""
+ result = _type(file_id, name, parent_id)
+ result.revision = last_modified
+ return result
+
+
+def _file_to_entry(content, name, parent_id, file_id, last_modified,
+ _type=inventory.InventoryFile):
+ """Convert a dir content record to an InventoryFile."""
+ result = _type(file_id, name, parent_id)
+ result.revision = last_modified
+ result.text_size = int(content[1])
+ result.text_sha1 = content[3]
+ if content[2]:
+ result.executable = True
+ else:
+ result.executable = False
+ return result
+
+
+def _link_to_entry(content, name, parent_id, file_id, last_modified,
+ _type=inventory.InventoryLink):
+ """Convert a link content record to an InventoryLink."""
+ result = _type(file_id, name, parent_id)
+ result.revision = last_modified
+ result.symlink_target = content[1].decode('utf8')
+ return result
+
+
+def _tree_to_entry(content, name, parent_id, file_id, last_modified,
+ _type=inventory.TreeReference):
+ """Convert a tree content record to a TreeReference."""
+ result = _type(file_id, name, parent_id)
+ result.revision = last_modified
+ result.reference_revision = content[1]
+ return result
+
+
+class InventoryDeltaSerializer(object):
+ """Serialize inventory deltas."""
+
+ def __init__(self, versioned_root, tree_references):
+ """Create an InventoryDeltaSerializer.
+
+ :param versioned_root: If True, any root entry that is seen is expected
+ to be versioned, and root entries can have any fileid.
+ :param tree_references: If True support tree-reference entries.
+ """
+ self._versioned_root = versioned_root
+ self._tree_references = tree_references
+ self._entry_to_content = {
+ 'directory': _directory_content,
+ 'file': _file_content,
+ 'symlink': _link_content,
+ }
+ if tree_references:
+ self._entry_to_content['tree-reference'] = _reference_content
+
+ def delta_to_lines(self, old_name, new_name, delta_to_new):
+ """Return a line sequence for delta_to_new.
+
+ Both the versioned_root and tree_references flags must be set via
+ require_flags before calling this.
+
+ :param old_name: A UTF8 revision id for the old inventory. May be
+ NULL_REVISION if there is no older inventory and delta_to_new
+ includes the entire inventory contents.
+ :param new_name: The version name of the inventory we create with this
+ delta.
+ :param delta_to_new: An inventory delta such as Inventory.apply_delta
+ takes.
+ :return: The serialized delta as lines.
+ """
+ if type(old_name) is not str:
+ raise TypeError('old_name should be str, got %r' % (old_name,))
+ if type(new_name) is not str:
+ raise TypeError('new_name should be str, got %r' % (new_name,))
+ lines = ['', '', '', '', '']
+ to_line = self._delta_item_to_line
+ for delta_item in delta_to_new:
+ line = to_line(delta_item, new_name)
+ if line.__class__ != str:
+ raise InventoryDeltaError(
+ 'to_line generated non-str output %r' % lines[-1])
+ lines.append(line)
+ lines.sort()
+ lines[0] = "format: %s\n" % FORMAT_1
+ lines[1] = "parent: %s\n" % old_name
+ lines[2] = "version: %s\n" % new_name
+ lines[3] = "versioned_root: %s\n" % self._serialize_bool(
+ self._versioned_root)
+ lines[4] = "tree_references: %s\n" % self._serialize_bool(
+ self._tree_references)
+ return lines
+
+ def _serialize_bool(self, value):
+ if value:
+ return "true"
+ else:
+ return "false"
+
+ def _delta_item_to_line(self, delta_item, new_version):
+ """Convert delta_item to a line."""
+ oldpath, newpath, file_id, entry = delta_item
+ if newpath is None:
+ # delete
+ oldpath_utf8 = '/' + oldpath.encode('utf8')
+ newpath_utf8 = 'None'
+ parent_id = ''
+ last_modified = NULL_REVISION
+ content = 'deleted\x00\x00'
+ else:
+ if oldpath is None:
+ oldpath_utf8 = 'None'
+ else:
+ oldpath_utf8 = '/' + oldpath.encode('utf8')
+ if newpath == '/':
+ raise AssertionError(
+ "Bad inventory delta: '/' is not a valid newpath "
+ "(should be '') in delta item %r" % (delta_item,))
+ # TODO: Test real-world utf8 cache hit rate. It may be a win.
+ newpath_utf8 = '/' + newpath.encode('utf8')
+ # Serialize None as ''
+ parent_id = entry.parent_id or ''
+ # Serialize unknown revisions as NULL_REVISION
+ last_modified = entry.revision
+ # special cases for /
+ if newpath_utf8 == '/' and not self._versioned_root:
+ # This is an entry for the root, this inventory does not
+ # support versioned roots. So this must be an unversioned
+ # root, i.e. last_modified == new revision. Otherwise, this
+ # delta is invalid.
+ # Note: the non-rich-root repositories *can* have roots with
+ # file-ids other than TREE_ROOT, e.g. repo formats that use the
+ # xml5 serializer.
+ if last_modified != new_version:
+ raise InventoryDeltaError(
+ 'Version present for / in %s (%s != %s)'
+ % (file_id, last_modified, new_version))
+ if last_modified is None:
+ raise InventoryDeltaError("no version for fileid %s" % file_id)
+ content = self._entry_to_content[entry.kind](entry)
+ return ("%s\x00%s\x00%s\x00%s\x00%s\x00%s\n" %
+ (oldpath_utf8, newpath_utf8, file_id, parent_id, last_modified,
+ content))
+
+
+class InventoryDeltaDeserializer(object):
+ """Deserialize inventory deltas."""
+
+ def __init__(self, allow_versioned_root=True, allow_tree_references=True):
+ """Create an InventoryDeltaDeserializer.
+
+ :param versioned_root: If True, any root entry that is seen is expected
+ to be versioned, and root entries can have any fileid.
+ :param tree_references: If True support tree-reference entries.
+ """
+ self._allow_versioned_root = allow_versioned_root
+ self._allow_tree_references = allow_tree_references
+
+ def _deserialize_bool(self, value):
+ if value == "true":
+ return True
+ elif value == "false":
+ return False
+ else:
+ raise InventoryDeltaError("value %r is not a bool" % (value,))
+
+ def parse_text_bytes(self, bytes):
+ """Parse the text bytes of a serialized inventory delta.
+
+ If versioned_root and/or tree_references flags were set via
+ require_flags, then the parsed flags must match or a BzrError will be
+ raised.
+
+ :param bytes: The bytes to parse. This can be obtained by calling
+ delta_to_lines and then doing ''.join(delta_lines).
+ :return: (parent_id, new_id, versioned_root, tree_references,
+ inventory_delta)
+ """
+ if bytes[-1:] != '\n':
+ last_line = bytes.rsplit('\n', 1)[-1]
+ raise InventoryDeltaError('last line not empty: %r' % (last_line,))
+ lines = bytes.split('\n')[:-1] # discard the last empty line
+ if not lines or lines[0] != 'format: %s' % FORMAT_1:
+ raise InventoryDeltaError('unknown format %r' % lines[0:1])
+ if len(lines) < 2 or not lines[1].startswith('parent: '):
+ raise InventoryDeltaError('missing parent: marker')
+ delta_parent_id = lines[1][8:]
+ if len(lines) < 3 or not lines[2].startswith('version: '):
+ raise InventoryDeltaError('missing version: marker')
+ delta_version_id = lines[2][9:]
+ if len(lines) < 4 or not lines[3].startswith('versioned_root: '):
+ raise InventoryDeltaError('missing versioned_root: marker')
+ delta_versioned_root = self._deserialize_bool(lines[3][16:])
+ if len(lines) < 5 or not lines[4].startswith('tree_references: '):
+ raise InventoryDeltaError('missing tree_references: marker')
+ delta_tree_references = self._deserialize_bool(lines[4][17:])
+ if (not self._allow_versioned_root and delta_versioned_root):
+ raise IncompatibleInventoryDelta("versioned_root not allowed")
+ result = []
+ seen_ids = set()
+ line_iter = iter(lines)
+ for i in range(5):
+ line_iter.next()
+ for line in line_iter:
+ (oldpath_utf8, newpath_utf8, file_id, parent_id, last_modified,
+ content) = line.split('\x00', 5)
+ parent_id = parent_id or None
+ if file_id in seen_ids:
+ raise InventoryDeltaError(
+ "duplicate file id in inventory delta %r" % lines)
+ seen_ids.add(file_id)
+ if (newpath_utf8 == '/' and not delta_versioned_root and
+ last_modified != delta_version_id):
+ # Delta claims to be not have a versioned root, yet here's
+ # a root entry with a non-default version.
+ raise InventoryDeltaError("Versioned root found: %r" % line)
+ elif newpath_utf8 != 'None' and last_modified[-1] == ':':
+ # Deletes have a last_modified of null:, but otherwise special
+ # revision ids should not occur.
+ raise InventoryDeltaError('special revisionid found: %r' % line)
+ if content.startswith('tree\x00'):
+ if delta_tree_references is False:
+ raise InventoryDeltaError(
+ "Tree reference found (but header said "
+ "tree_references: false): %r" % line)
+ elif not self._allow_tree_references:
+ raise IncompatibleInventoryDelta(
+ "Tree reference not allowed")
+ if oldpath_utf8 == 'None':
+ oldpath = None
+ elif oldpath_utf8[:1] != '/':
+ raise InventoryDeltaError(
+ "oldpath invalid (does not start with /): %r"
+ % (oldpath_utf8,))
+ else:
+ oldpath_utf8 = oldpath_utf8[1:]
+ oldpath = oldpath_utf8.decode('utf8')
+ if newpath_utf8 == 'None':
+ newpath = None
+ elif newpath_utf8[:1] != '/':
+ raise InventoryDeltaError(
+ "newpath invalid (does not start with /): %r"
+ % (newpath_utf8,))
+ else:
+ # Trim leading slash
+ newpath_utf8 = newpath_utf8[1:]
+ newpath = newpath_utf8.decode('utf8')
+ content_tuple = tuple(content.split('\x00'))
+ if content_tuple[0] == 'deleted':
+ entry = None
+ else:
+ entry = _parse_entry(
+ newpath, file_id, parent_id, last_modified, content_tuple)
+ delta_item = (oldpath, newpath, file_id, entry)
+ result.append(delta_item)
+ return (delta_parent_id, delta_version_id, delta_versioned_root,
+ delta_tree_references, result)
+
+
+def _parse_entry(path, file_id, parent_id, last_modified, content):
+ entry_factory = {
+ 'dir': _dir_to_entry,
+ 'file': _file_to_entry,
+ 'link': _link_to_entry,
+ 'tree': _tree_to_entry,
+ }
+ kind = content[0]
+ if path.startswith('/'):
+ raise AssertionError
+ name = basename(path)
+ return entry_factory[content[0]](
+ content, name, parent_id, file_id, last_modified)
+
+
diff --git a/bzrlib/iterablefile.py b/bzrlib/iterablefile.py
new file mode 100644
index 0000000..0b5c6ff
--- /dev/null
+++ b/bzrlib/iterablefile.py
@@ -0,0 +1,258 @@
+# Copyright (C) 2005 Aaron Bentley, Canonical Ltd
+# <aaron.bentley@utoronto.ca>
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+from __future__ import absolute_import
+
+class IterableFileBase(object):
+ """Create a file-like object from any iterable"""
+
+ def __init__(self, iterable):
+ object.__init__(self)
+ self._iter = iterable.__iter__()
+ self._buffer = ""
+ self.done = False
+
+ def read_n(self, length):
+ """
+ >>> IterableFileBase(['This ', 'is ', 'a ', 'test.']).read_n(8)
+ 'This is '
+ """
+ def test_length(result):
+ if len(result) >= length:
+ return length
+ else:
+ return None
+ return self._read(test_length)
+
+ def read_to(self, sequence, length=None):
+ """
+ >>> f = IterableFileBase(['Th\\nis ', 'is \\n', 'a ', 'te\\nst.'])
+ >>> f.read_to('\\n')
+ 'Th\\n'
+ >>> f.read_to('\\n')
+ 'is is \\n'
+ """
+ def test_contents(result):
+ if length is not None:
+ if len(result) >= length:
+ return length
+ try:
+ return result.index(sequence)+len(sequence)
+ except ValueError:
+ return None
+ return self._read(test_contents)
+
+ def _read(self, result_length):
+ """
+ Read data until result satisfies the condition result_length.
+ result_length is a callable that returns None until the condition
+ is satisfied, and returns the length of the result to use when
+ the condition is satisfied. (i.e. it returns the length of the
+ subset of the first condition match.)
+ """
+ result = self._buffer
+ while result_length(result) is None:
+ try:
+ result += self._iter.next()
+ except StopIteration:
+ self.done = True
+ self._buffer = ""
+ return result
+ output_length = result_length(result)
+ self._buffer = result[output_length:]
+ return result[:output_length]
+
+ def read_all(self):
+ """
+ >>> IterableFileBase(['This ', 'is ', 'a ', 'test.']).read_all()
+ 'This is a test.'
+ """
+ def no_stop(result):
+ return None
+ return self._read(no_stop)
+
+
+ def push_back(self, contents):
+ """
+ >>> f = IterableFileBase(['Th\\nis ', 'is \\n', 'a ', 'te\\nst.'])
+ >>> f.read_to('\\n')
+ 'Th\\n'
+ >>> f.push_back("Sh")
+ >>> f.read_all()
+ 'Shis is \\na te\\nst.'
+ """
+ self._buffer = contents + self._buffer
+
+
+class IterableFile(object):
+ """This class supplies all File methods that can be implemented cheaply."""
+ def __init__(self, iterable):
+ object.__init__(self)
+ self._file_base = IterableFileBase(iterable)
+ self._iter = self._make_iterator()
+ self._closed = False
+ self.softspace = 0
+
+ def _make_iterator(self):
+ while not self._file_base.done:
+ self._check_closed()
+ result = self._file_base.read_to('\n')
+ if result != '':
+ yield result
+
+ def _check_closed(self):
+ if self.closed:
+ raise ValueError("File is closed.")
+
+ def close(self):
+ """
+ >>> f = IterableFile(['This ', 'is ', 'a ', 'test.'])
+ >>> f.closed
+ False
+ >>> f.close()
+ >>> f.closed
+ True
+ """
+ self._file_base.done = True
+ self._closed = True
+
+ closed = property(lambda x: x._closed)
+
+ def flush(self):
+ """No-op for standard compliance.
+ >>> f = IterableFile([])
+ >>> f.close()
+ >>> f.flush()
+ Traceback (most recent call last):
+ ValueError: File is closed.
+ """
+ self._check_closed()
+
+ def next(self):
+ """Implementation of the iterator protocol's next()
+
+ >>> f = IterableFile(['This \\n', 'is ', 'a ', 'test.'])
+ >>> f.next()
+ 'This \\n'
+ >>> f.close()
+ >>> f.next()
+ Traceback (most recent call last):
+ ValueError: File is closed.
+ >>> f = IterableFile(['This \\n', 'is ', 'a ', 'test.\\n'])
+ >>> f.next()
+ 'This \\n'
+ >>> f.next()
+ 'is a test.\\n'
+ >>> f.next()
+ Traceback (most recent call last):
+ StopIteration
+ """
+ self._check_closed()
+ return self._iter.next()
+
+ def __iter__(self):
+ """
+ >>> list(IterableFile(['Th\\nis ', 'is \\n', 'a ', 'te\\nst.']))
+ ['Th\\n', 'is is \\n', 'a te\\n', 'st.']
+ >>> f = IterableFile(['Th\\nis ', 'is \\n', 'a ', 'te\\nst.'])
+ >>> f.close()
+ >>> list(f)
+ Traceback (most recent call last):
+ ValueError: File is closed.
+ """
+ return self
+
+ def read(self, length=None):
+ """
+ >>> IterableFile(['This ', 'is ', 'a ', 'test.']).read()
+ 'This is a test.'
+ >>> f = IterableFile(['This ', 'is ', 'a ', 'test.'])
+ >>> f.read(10)
+ 'This is a '
+ >>> f = IterableFile(['This ', 'is ', 'a ', 'test.'])
+ >>> f.close()
+ >>> f.read(10)
+ Traceback (most recent call last):
+ ValueError: File is closed.
+ """
+ self._check_closed()
+ if length is None:
+ return self._file_base.read_all()
+ else:
+ return self._file_base.read_n(length)
+
+ def read_to(self, sequence, size=None):
+ """
+ Read characters until a sequence is found, with optional max size.
+ The specified sequence, if found, will be included in the result
+
+ >>> f = IterableFile(['Th\\nis ', 'is \\n', 'a ', 'te\\nst.'])
+ >>> f.read_to('i')
+ 'Th\\ni'
+ >>> f.read_to('i')
+ 's i'
+ >>> f.close()
+ >>> f.read_to('i')
+ Traceback (most recent call last):
+ ValueError: File is closed.
+ """
+ self._check_closed()
+ return self._file_base.read_to(sequence, size)
+
+ def readline(self, size=None):
+ """
+ >>> f = IterableFile(['Th\\nis ', 'is \\n', 'a ', 'te\\nst.'])
+ >>> f.readline()
+ 'Th\\n'
+ >>> f.readline(4)
+ 'is i'
+ >>> f.close()
+ >>> f.readline()
+ Traceback (most recent call last):
+ ValueError: File is closed.
+ """
+ return self.read_to('\n', size)
+
+ def readlines(self, sizehint=None):
+ """
+ >>> f = IterableFile(['Th\\nis ', 'is \\n', 'a ', 'te\\nst.'])
+ >>> f.readlines()
+ ['Th\\n', 'is is \\n', 'a te\\n', 'st.']
+ >>> f = IterableFile(['Th\\nis ', 'is \\n', 'a ', 'te\\nst.'])
+ >>> f.close()
+ >>> f.readlines()
+ Traceback (most recent call last):
+ ValueError: File is closed.
+ """
+ lines = []
+ while True:
+ line = self.readline()
+ if line == "":
+ return lines
+ if sizehint is None:
+ lines.append(line)
+ elif len(line) < sizehint:
+ lines.append(line)
+ sizehint -= len(line)
+ else:
+ self._file_base.push_back(line)
+ return lines
+
+
+if __name__ == "__main__":
+ import doctest
+ doctest.testmod()
diff --git a/bzrlib/knit.py b/bzrlib/knit.py
new file mode 100644
index 0000000..aab403c
--- /dev/null
+++ b/bzrlib/knit.py
@@ -0,0 +1,3505 @@
+# Copyright (C) 2006-2011 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""Knit versionedfile implementation.
+
+A knit is a versioned file implementation that supports efficient append only
+updates.
+
+Knit file layout:
+lifeless: the data file is made up of "delta records". each delta record has a delta header
+that contains; (1) a version id, (2) the size of the delta (in lines), and (3) the digest of
+the -expanded data- (ie, the delta applied to the parent). the delta also ends with a
+end-marker; simply "end VERSION"
+
+delta can be line or full contents.a
+... the 8's there are the index number of the annotation.
+version robertc@robertcollins.net-20051003014215-ee2990904cc4c7ad 7 c7d23b2a5bd6ca00e8e266cec0ec228158ee9f9e
+59,59,3
+8
+8 if ie.executable:
+8 e.set('executable', 'yes')
+130,130,2
+8 if elt.get('executable') == 'yes':
+8 ie.executable = True
+end robertc@robertcollins.net-20051003014215-ee2990904cc4c7ad
+
+
+whats in an index:
+09:33 < jrydberg> lifeless: each index is made up of a tuple of; version id, options, position, size, parents
+09:33 < jrydberg> lifeless: the parents are currently dictionary compressed
+09:33 < jrydberg> lifeless: (meaning it currently does not support ghosts)
+09:33 < lifeless> right
+09:33 < jrydberg> lifeless: the position and size is the range in the data file
+
+
+so the index sequence is the dictionary compressed sequence number used
+in the deltas to provide line annotation
+
+"""
+
+from __future__ import absolute_import
+
+
+from cStringIO import StringIO
+from itertools import izip
+import operator
+import os
+
+from bzrlib.lazy_import import lazy_import
+lazy_import(globals(), """
+import gzip
+
+from bzrlib import (
+ debug,
+ diff,
+ graph as _mod_graph,
+ index as _mod_index,
+ pack,
+ patiencediff,
+ static_tuple,
+ trace,
+ tsort,
+ tuned_gzip,
+ ui,
+ )
+
+from bzrlib.repofmt import pack_repo
+from bzrlib.i18n import gettext
+""")
+from bzrlib import (
+ annotate,
+ errors,
+ osutils,
+ )
+from bzrlib.errors import (
+ NoSuchFile,
+ InvalidRevisionId,
+ KnitCorrupt,
+ KnitHeaderError,
+ RevisionNotPresent,
+ SHA1KnitCorrupt,
+ )
+from bzrlib.osutils import (
+ contains_whitespace,
+ sha_string,
+ sha_strings,
+ split_lines,
+ )
+from bzrlib.versionedfile import (
+ _KeyRefs,
+ AbsentContentFactory,
+ adapter_registry,
+ ConstantMapper,
+ ContentFactory,
+ sort_groupcompress,
+ VersionedFilesWithFallbacks,
+ )
+
+
+# TODO: Split out code specific to this format into an associated object.
+
+# TODO: Can we put in some kind of value to check that the index and data
+# files belong together?
+
+# TODO: accommodate binaries, perhaps by storing a byte count
+
+# TODO: function to check whole file
+
+# TODO: atomically append data, then measure backwards from the cursor
+# position after writing to work out where it was located. we may need to
+# bypass python file buffering.
+
+DATA_SUFFIX = '.knit'
+INDEX_SUFFIX = '.kndx'
+_STREAM_MIN_BUFFER_SIZE = 5*1024*1024
+
+
+class KnitAdapter(object):
+ """Base class for knit record adaption."""
+
+ def __init__(self, basis_vf):
+ """Create an adapter which accesses full texts from basis_vf.
+
+ :param basis_vf: A versioned file to access basis texts of deltas from.
+ May be None for adapters that do not need to access basis texts.
+ """
+ self._data = KnitVersionedFiles(None, None)
+ self._annotate_factory = KnitAnnotateFactory()
+ self._plain_factory = KnitPlainFactory()
+ self._basis_vf = basis_vf
+
+
+class FTAnnotatedToUnannotated(KnitAdapter):
+ """An adapter from FT annotated knits to unannotated ones."""
+
+ def get_bytes(self, factory):
+ annotated_compressed_bytes = factory._raw_record
+ rec, contents = \
+ self._data._parse_record_unchecked(annotated_compressed_bytes)
+ content = self._annotate_factory.parse_fulltext(contents, rec[1])
+ size, bytes = self._data._record_to_data((rec[1],), rec[3], content.text())
+ return bytes
+
+
+class DeltaAnnotatedToUnannotated(KnitAdapter):
+ """An adapter for deltas from annotated to unannotated."""
+
+ def get_bytes(self, factory):
+ annotated_compressed_bytes = factory._raw_record
+ rec, contents = \
+ self._data._parse_record_unchecked(annotated_compressed_bytes)
+ delta = self._annotate_factory.parse_line_delta(contents, rec[1],
+ plain=True)
+ contents = self._plain_factory.lower_line_delta(delta)
+ size, bytes = self._data._record_to_data((rec[1],), rec[3], contents)
+ return bytes
+
+
+class FTAnnotatedToFullText(KnitAdapter):
+ """An adapter from FT annotated knits to unannotated ones."""
+
+ def get_bytes(self, factory):
+ annotated_compressed_bytes = factory._raw_record
+ rec, contents = \
+ self._data._parse_record_unchecked(annotated_compressed_bytes)
+ content, delta = self._annotate_factory.parse_record(factory.key[-1],
+ contents, factory._build_details, None)
+ return ''.join(content.text())
+
+
+class DeltaAnnotatedToFullText(KnitAdapter):
+ """An adapter for deltas from annotated to unannotated."""
+
+ def get_bytes(self, factory):
+ annotated_compressed_bytes = factory._raw_record
+ rec, contents = \
+ self._data._parse_record_unchecked(annotated_compressed_bytes)
+ delta = self._annotate_factory.parse_line_delta(contents, rec[1],
+ plain=True)
+ compression_parent = factory.parents[0]
+ basis_entry = self._basis_vf.get_record_stream(
+ [compression_parent], 'unordered', True).next()
+ if basis_entry.storage_kind == 'absent':
+ raise errors.RevisionNotPresent(compression_parent, self._basis_vf)
+ basis_chunks = basis_entry.get_bytes_as('chunked')
+ basis_lines = osutils.chunks_to_lines(basis_chunks)
+ # Manually apply the delta because we have one annotated content and
+ # one plain.
+ basis_content = PlainKnitContent(basis_lines, compression_parent)
+ basis_content.apply_delta(delta, rec[1])
+ basis_content._should_strip_eol = factory._build_details[1]
+ return ''.join(basis_content.text())
+
+
+class FTPlainToFullText(KnitAdapter):
+ """An adapter from FT plain knits to unannotated ones."""
+
+ def get_bytes(self, factory):
+ compressed_bytes = factory._raw_record
+ rec, contents = \
+ self._data._parse_record_unchecked(compressed_bytes)
+ content, delta = self._plain_factory.parse_record(factory.key[-1],
+ contents, factory._build_details, None)
+ return ''.join(content.text())
+
+
+class DeltaPlainToFullText(KnitAdapter):
+ """An adapter for deltas from annotated to unannotated."""
+
+ def get_bytes(self, factory):
+ compressed_bytes = factory._raw_record
+ rec, contents = \
+ self._data._parse_record_unchecked(compressed_bytes)
+ delta = self._plain_factory.parse_line_delta(contents, rec[1])
+ compression_parent = factory.parents[0]
+ # XXX: string splitting overhead.
+ basis_entry = self._basis_vf.get_record_stream(
+ [compression_parent], 'unordered', True).next()
+ if basis_entry.storage_kind == 'absent':
+ raise errors.RevisionNotPresent(compression_parent, self._basis_vf)
+ basis_chunks = basis_entry.get_bytes_as('chunked')
+ basis_lines = osutils.chunks_to_lines(basis_chunks)
+ basis_content = PlainKnitContent(basis_lines, compression_parent)
+ # Manually apply the delta because we have one annotated content and
+ # one plain.
+ content, _ = self._plain_factory.parse_record(rec[1], contents,
+ factory._build_details, basis_content)
+ return ''.join(content.text())
+
+
+class KnitContentFactory(ContentFactory):
+ """Content factory for streaming from knits.
+
+ :seealso ContentFactory:
+ """
+
+ def __init__(self, key, parents, build_details, sha1, raw_record,
+ annotated, knit=None, network_bytes=None):
+ """Create a KnitContentFactory for key.
+
+ :param key: The key.
+ :param parents: The parents.
+ :param build_details: The build details as returned from
+ get_build_details.
+ :param sha1: The sha1 expected from the full text of this object.
+ :param raw_record: The bytes of the knit data from disk.
+ :param annotated: True if the raw data is annotated.
+ :param network_bytes: None to calculate the network bytes on demand,
+ not-none if they are already known.
+ """
+ ContentFactory.__init__(self)
+ self.sha1 = sha1
+ self.key = key
+ self.parents = parents
+ if build_details[0] == 'line-delta':
+ kind = 'delta'
+ else:
+ kind = 'ft'
+ if annotated:
+ annotated_kind = 'annotated-'
+ else:
+ annotated_kind = ''
+ self.storage_kind = 'knit-%s%s-gz' % (annotated_kind, kind)
+ self._raw_record = raw_record
+ self._network_bytes = network_bytes
+ self._build_details = build_details
+ self._knit = knit
+
+ def _create_network_bytes(self):
+ """Create a fully serialised network version for transmission."""
+ # storage_kind, key, parents, Noeol, raw_record
+ key_bytes = '\x00'.join(self.key)
+ if self.parents is None:
+ parent_bytes = 'None:'
+ else:
+ parent_bytes = '\t'.join('\x00'.join(key) for key in self.parents)
+ if self._build_details[1]:
+ noeol = 'N'
+ else:
+ noeol = ' '
+ network_bytes = "%s\n%s\n%s\n%s%s" % (self.storage_kind, key_bytes,
+ parent_bytes, noeol, self._raw_record)
+ self._network_bytes = network_bytes
+
+ def get_bytes_as(self, storage_kind):
+ if storage_kind == self.storage_kind:
+ if self._network_bytes is None:
+ self._create_network_bytes()
+ return self._network_bytes
+ if ('-ft-' in self.storage_kind and
+ storage_kind in ('chunked', 'fulltext')):
+ adapter_key = (self.storage_kind, 'fulltext')
+ adapter_factory = adapter_registry.get(adapter_key)
+ adapter = adapter_factory(None)
+ bytes = adapter.get_bytes(self)
+ if storage_kind == 'chunked':
+ return [bytes]
+ else:
+ return bytes
+ if self._knit is not None:
+ # Not redundant with direct conversion above - that only handles
+ # fulltext cases.
+ if storage_kind == 'chunked':
+ return self._knit.get_lines(self.key[0])
+ elif storage_kind == 'fulltext':
+ return self._knit.get_text(self.key[0])
+ raise errors.UnavailableRepresentation(self.key, storage_kind,
+ self.storage_kind)
+
+
+class LazyKnitContentFactory(ContentFactory):
+ """A ContentFactory which can either generate full text or a wire form.
+
+ :seealso ContentFactory:
+ """
+
+ def __init__(self, key, parents, generator, first):
+ """Create a LazyKnitContentFactory.
+
+ :param key: The key of the record.
+ :param parents: The parents of the record.
+ :param generator: A _ContentMapGenerator containing the record for this
+ key.
+ :param first: Is this the first content object returned from generator?
+ if it is, its storage kind is knit-delta-closure, otherwise it is
+ knit-delta-closure-ref
+ """
+ self.key = key
+ self.parents = parents
+ self.sha1 = None
+ self._generator = generator
+ self.storage_kind = "knit-delta-closure"
+ if not first:
+ self.storage_kind = self.storage_kind + "-ref"
+ self._first = first
+
+ def get_bytes_as(self, storage_kind):
+ if storage_kind == self.storage_kind:
+ if self._first:
+ return self._generator._wire_bytes()
+ else:
+ # all the keys etc are contained in the bytes returned in the
+ # first record.
+ return ''
+ if storage_kind in ('chunked', 'fulltext'):
+ chunks = self._generator._get_one_work(self.key).text()
+ if storage_kind == 'chunked':
+ return chunks
+ else:
+ return ''.join(chunks)
+ raise errors.UnavailableRepresentation(self.key, storage_kind,
+ self.storage_kind)
+
+
+def knit_delta_closure_to_records(storage_kind, bytes, line_end):
+ """Convert a network record to a iterator over stream records.
+
+ :param storage_kind: The storage kind of the record.
+ Must be 'knit-delta-closure'.
+ :param bytes: The bytes of the record on the network.
+ """
+ generator = _NetworkContentMapGenerator(bytes, line_end)
+ return generator.get_record_stream()
+
+
+def knit_network_to_record(storage_kind, bytes, line_end):
+ """Convert a network record to a record object.
+
+ :param storage_kind: The storage kind of the record.
+ :param bytes: The bytes of the record on the network.
+ """
+ start = line_end
+ line_end = bytes.find('\n', start)
+ key = tuple(bytes[start:line_end].split('\x00'))
+ start = line_end + 1
+ line_end = bytes.find('\n', start)
+ parent_line = bytes[start:line_end]
+ if parent_line == 'None:':
+ parents = None
+ else:
+ parents = tuple(
+ [tuple(segment.split('\x00')) for segment in parent_line.split('\t')
+ if segment])
+ start = line_end + 1
+ noeol = bytes[start] == 'N'
+ if 'ft' in storage_kind:
+ method = 'fulltext'
+ else:
+ method = 'line-delta'
+ build_details = (method, noeol)
+ start = start + 1
+ raw_record = bytes[start:]
+ annotated = 'annotated' in storage_kind
+ return [KnitContentFactory(key, parents, build_details, None, raw_record,
+ annotated, network_bytes=bytes)]
+
+
+class KnitContent(object):
+ """Content of a knit version to which deltas can be applied.
+
+ This is always stored in memory as a list of lines with \\n at the end,
+ plus a flag saying if the final ending is really there or not, because that
+ corresponds to the on-disk knit representation.
+ """
+
+ def __init__(self):
+ self._should_strip_eol = False
+
+ def apply_delta(self, delta, new_version_id):
+ """Apply delta to this object to become new_version_id."""
+ raise NotImplementedError(self.apply_delta)
+
+ def line_delta_iter(self, new_lines):
+ """Generate line-based delta from this content to new_lines."""
+ new_texts = new_lines.text()
+ old_texts = self.text()
+ s = patiencediff.PatienceSequenceMatcher(None, old_texts, new_texts)
+ for tag, i1, i2, j1, j2 in s.get_opcodes():
+ if tag == 'equal':
+ continue
+ # ofrom, oto, length, data
+ yield i1, i2, j2 - j1, new_lines._lines[j1:j2]
+
+ def line_delta(self, new_lines):
+ return list(self.line_delta_iter(new_lines))
+
+ @staticmethod
+ def get_line_delta_blocks(knit_delta, source, target):
+ """Extract SequenceMatcher.get_matching_blocks() from a knit delta"""
+ target_len = len(target)
+ s_pos = 0
+ t_pos = 0
+ for s_begin, s_end, t_len, new_text in knit_delta:
+ true_n = s_begin - s_pos
+ n = true_n
+ if n > 0:
+ # knit deltas do not provide reliable info about whether the
+ # last line of a file matches, due to eol handling.
+ if source[s_pos + n -1] != target[t_pos + n -1]:
+ n-=1
+ if n > 0:
+ yield s_pos, t_pos, n
+ t_pos += t_len + true_n
+ s_pos = s_end
+ n = target_len - t_pos
+ if n > 0:
+ if source[s_pos + n -1] != target[t_pos + n -1]:
+ n-=1
+ if n > 0:
+ yield s_pos, t_pos, n
+ yield s_pos + (target_len - t_pos), target_len, 0
+
+
+class AnnotatedKnitContent(KnitContent):
+ """Annotated content."""
+
+ def __init__(self, lines):
+ KnitContent.__init__(self)
+ self._lines = lines
+
+ def annotate(self):
+ """Return a list of (origin, text) for each content line."""
+ lines = self._lines[:]
+ if self._should_strip_eol:
+ origin, last_line = lines[-1]
+ lines[-1] = (origin, last_line.rstrip('\n'))
+ return lines
+
+ def apply_delta(self, delta, new_version_id):
+ """Apply delta to this object to become new_version_id."""
+ offset = 0
+ lines = self._lines
+ for start, end, count, delta_lines in delta:
+ lines[offset+start:offset+end] = delta_lines
+ offset = offset + (start - end) + count
+
+ def text(self):
+ try:
+ lines = [text for origin, text in self._lines]
+ except ValueError, e:
+ # most commonly (only?) caused by the internal form of the knit
+ # missing annotation information because of a bug - see thread
+ # around 20071015
+ raise KnitCorrupt(self,
+ "line in annotated knit missing annotation information: %s"
+ % (e,))
+ if self._should_strip_eol:
+ lines[-1] = lines[-1].rstrip('\n')
+ return lines
+
+ def copy(self):
+ return AnnotatedKnitContent(self._lines[:])
+
+
+class PlainKnitContent(KnitContent):
+ """Unannotated content.
+
+ When annotate[_iter] is called on this content, the same version is reported
+ for all lines. Generally, annotate[_iter] is not useful on PlainKnitContent
+ objects.
+ """
+
+ def __init__(self, lines, version_id):
+ KnitContent.__init__(self)
+ self._lines = lines
+ self._version_id = version_id
+
+ def annotate(self):
+ """Return a list of (origin, text) for each content line."""
+ return [(self._version_id, line) for line in self._lines]
+
+ def apply_delta(self, delta, new_version_id):
+ """Apply delta to this object to become new_version_id."""
+ offset = 0
+ lines = self._lines
+ for start, end, count, delta_lines in delta:
+ lines[offset+start:offset+end] = delta_lines
+ offset = offset + (start - end) + count
+ self._version_id = new_version_id
+
+ def copy(self):
+ return PlainKnitContent(self._lines[:], self._version_id)
+
+ def text(self):
+ lines = self._lines
+ if self._should_strip_eol:
+ lines = lines[:]
+ lines[-1] = lines[-1].rstrip('\n')
+ return lines
+
+
+class _KnitFactory(object):
+ """Base class for common Factory functions."""
+
+ def parse_record(self, version_id, record, record_details,
+ base_content, copy_base_content=True):
+ """Parse a record into a full content object.
+
+ :param version_id: The official version id for this content
+ :param record: The data returned by read_records_iter()
+ :param record_details: Details about the record returned by
+ get_build_details
+ :param base_content: If get_build_details returns a compression_parent,
+ you must return a base_content here, else use None
+ :param copy_base_content: When building from the base_content, decide
+ you can either copy it and return a new object, or modify it in
+ place.
+ :return: (content, delta) A Content object and possibly a line-delta,
+ delta may be None
+ """
+ method, noeol = record_details
+ if method == 'line-delta':
+ if copy_base_content:
+ content = base_content.copy()
+ else:
+ content = base_content
+ delta = self.parse_line_delta(record, version_id)
+ content.apply_delta(delta, version_id)
+ else:
+ content = self.parse_fulltext(record, version_id)
+ delta = None
+ content._should_strip_eol = noeol
+ return (content, delta)
+
+
+class KnitAnnotateFactory(_KnitFactory):
+ """Factory for creating annotated Content objects."""
+
+ annotated = True
+
+ def make(self, lines, version_id):
+ num_lines = len(lines)
+ return AnnotatedKnitContent(zip([version_id] * num_lines, lines))
+
+ def parse_fulltext(self, content, version_id):
+ """Convert fulltext to internal representation
+
+ fulltext content is of the format
+ revid(utf8) plaintext\n
+ internal representation is of the format:
+ (revid, plaintext)
+ """
+ # TODO: jam 20070209 The tests expect this to be returned as tuples,
+ # but the code itself doesn't really depend on that.
+ # Figure out a way to not require the overhead of turning the
+ # list back into tuples.
+ lines = [tuple(line.split(' ', 1)) for line in content]
+ return AnnotatedKnitContent(lines)
+
+ def parse_line_delta_iter(self, lines):
+ return iter(self.parse_line_delta(lines))
+
+ def parse_line_delta(self, lines, version_id, plain=False):
+ """Convert a line based delta into internal representation.
+
+ line delta is in the form of:
+ intstart intend intcount
+ 1..count lines:
+ revid(utf8) newline\n
+ internal representation is
+ (start, end, count, [1..count tuples (revid, newline)])
+
+ :param plain: If True, the lines are returned as a plain
+ list without annotations, not as a list of (origin, content) tuples, i.e.
+ (start, end, count, [1..count newline])
+ """
+ result = []
+ lines = iter(lines)
+ next = lines.next
+
+ cache = {}
+ def cache_and_return(line):
+ origin, text = line.split(' ', 1)
+ return cache.setdefault(origin, origin), text
+
+ # walk through the lines parsing.
+ # Note that the plain test is explicitly pulled out of the
+ # loop to minimise any performance impact
+ if plain:
+ for header in lines:
+ start, end, count = [int(n) for n in header.split(',')]
+ contents = [next().split(' ', 1)[1] for i in xrange(count)]
+ result.append((start, end, count, contents))
+ else:
+ for header in lines:
+ start, end, count = [int(n) for n in header.split(',')]
+ contents = [tuple(next().split(' ', 1)) for i in xrange(count)]
+ result.append((start, end, count, contents))
+ return result
+
+ def get_fulltext_content(self, lines):
+ """Extract just the content lines from a fulltext."""
+ return (line.split(' ', 1)[1] for line in lines)
+
+ def get_linedelta_content(self, lines):
+ """Extract just the content from a line delta.
+
+ This doesn't return all of the extra information stored in a delta.
+ Only the actual content lines.
+ """
+ lines = iter(lines)
+ next = lines.next
+ for header in lines:
+ header = header.split(',')
+ count = int(header[2])
+ for i in xrange(count):
+ origin, text = next().split(' ', 1)
+ yield text
+
+ def lower_fulltext(self, content):
+ """convert a fulltext content record into a serializable form.
+
+ see parse_fulltext which this inverts.
+ """
+ return ['%s %s' % (o, t) for o, t in content._lines]
+
+ def lower_line_delta(self, delta):
+ """convert a delta into a serializable form.
+
+ See parse_line_delta which this inverts.
+ """
+ # TODO: jam 20070209 We only do the caching thing to make sure that
+ # the origin is a valid utf-8 line, eventually we could remove it
+ out = []
+ for start, end, c, lines in delta:
+ out.append('%d,%d,%d\n' % (start, end, c))
+ out.extend(origin + ' ' + text
+ for origin, text in lines)
+ return out
+
+ def annotate(self, knit, key):
+ content = knit._get_content(key)
+ # adjust for the fact that serialised annotations are only key suffixes
+ # for this factory.
+ if type(key) is tuple:
+ prefix = key[:-1]
+ origins = content.annotate()
+ result = []
+ for origin, line in origins:
+ result.append((prefix + (origin,), line))
+ return result
+ else:
+ # XXX: This smells a bit. Why would key ever be a non-tuple here?
+ # Aren't keys defined to be tuples? -- spiv 20080618
+ return content.annotate()
+
+
+class KnitPlainFactory(_KnitFactory):
+ """Factory for creating plain Content objects."""
+
+ annotated = False
+
+ def make(self, lines, version_id):
+ return PlainKnitContent(lines, version_id)
+
+ def parse_fulltext(self, content, version_id):
+ """This parses an unannotated fulltext.
+
+ Note that this is not a noop - the internal representation
+ has (versionid, line) - its just a constant versionid.
+ """
+ return self.make(content, version_id)
+
+ def parse_line_delta_iter(self, lines, version_id):
+ cur = 0
+ num_lines = len(lines)
+ while cur < num_lines:
+ header = lines[cur]
+ cur += 1
+ start, end, c = [int(n) for n in header.split(',')]
+ yield start, end, c, lines[cur:cur+c]
+ cur += c
+
+ def parse_line_delta(self, lines, version_id):
+ return list(self.parse_line_delta_iter(lines, version_id))
+
+ def get_fulltext_content(self, lines):
+ """Extract just the content lines from a fulltext."""
+ return iter(lines)
+
+ def get_linedelta_content(self, lines):
+ """Extract just the content from a line delta.
+
+ This doesn't return all of the extra information stored in a delta.
+ Only the actual content lines.
+ """
+ lines = iter(lines)
+ next = lines.next
+ for header in lines:
+ header = header.split(',')
+ count = int(header[2])
+ for i in xrange(count):
+ yield next()
+
+ def lower_fulltext(self, content):
+ return content.text()
+
+ def lower_line_delta(self, delta):
+ out = []
+ for start, end, c, lines in delta:
+ out.append('%d,%d,%d\n' % (start, end, c))
+ out.extend(lines)
+ return out
+
+ def annotate(self, knit, key):
+ annotator = _KnitAnnotator(knit)
+ return annotator.annotate_flat(key)
+
+
+
+def make_file_factory(annotated, mapper):
+ """Create a factory for creating a file based KnitVersionedFiles.
+
+ This is only functional enough to run interface tests, it doesn't try to
+ provide a full pack environment.
+
+ :param annotated: knit annotations are wanted.
+ :param mapper: The mapper from keys to paths.
+ """
+ def factory(transport):
+ index = _KndxIndex(transport, mapper, lambda:None, lambda:True, lambda:True)
+ access = _KnitKeyAccess(transport, mapper)
+ return KnitVersionedFiles(index, access, annotated=annotated)
+ return factory
+
+
+def make_pack_factory(graph, delta, keylength):
+ """Create a factory for creating a pack based VersionedFiles.
+
+ This is only functional enough to run interface tests, it doesn't try to
+ provide a full pack environment.
+
+ :param graph: Store a graph.
+ :param delta: Delta compress contents.
+ :param keylength: How long should keys be.
+ """
+ def factory(transport):
+ parents = graph or delta
+ ref_length = 0
+ if graph:
+ ref_length += 1
+ if delta:
+ ref_length += 1
+ max_delta_chain = 200
+ else:
+ max_delta_chain = 0
+ graph_index = _mod_index.InMemoryGraphIndex(reference_lists=ref_length,
+ key_elements=keylength)
+ stream = transport.open_write_stream('newpack')
+ writer = pack.ContainerWriter(stream.write)
+ writer.begin()
+ index = _KnitGraphIndex(graph_index, lambda:True, parents=parents,
+ deltas=delta, add_callback=graph_index.add_nodes)
+ access = pack_repo._DirectPackAccess({})
+ access.set_writer(writer, graph_index, (transport, 'newpack'))
+ result = KnitVersionedFiles(index, access,
+ max_delta_chain=max_delta_chain)
+ result.stream = stream
+ result.writer = writer
+ return result
+ return factory
+
+
+def cleanup_pack_knit(versioned_files):
+ versioned_files.stream.close()
+ versioned_files.writer.end()
+
+
+def _get_total_build_size(self, keys, positions):
+ """Determine the total bytes to build these keys.
+
+ (helper function because _KnitGraphIndex and _KndxIndex work the same, but
+ don't inherit from a common base.)
+
+ :param keys: Keys that we want to build
+ :param positions: dict of {key, (info, index_memo, comp_parent)} (such
+ as returned by _get_components_positions)
+ :return: Number of bytes to build those keys
+ """
+ all_build_index_memos = {}
+ build_keys = keys
+ while build_keys:
+ next_keys = set()
+ for key in build_keys:
+ # This is mostly for the 'stacked' case
+ # Where we will be getting the data from a fallback
+ if key not in positions:
+ continue
+ _, index_memo, compression_parent = positions[key]
+ all_build_index_memos[key] = index_memo
+ if compression_parent not in all_build_index_memos:
+ next_keys.add(compression_parent)
+ build_keys = next_keys
+ return sum([index_memo[2] for index_memo
+ in all_build_index_memos.itervalues()])
+
+
+class KnitVersionedFiles(VersionedFilesWithFallbacks):
+ """Storage for many versioned files using knit compression.
+
+ Backend storage is managed by indices and data objects.
+
+ :ivar _index: A _KnitGraphIndex or similar that can describe the
+ parents, graph, compression and data location of entries in this
+ KnitVersionedFiles. Note that this is only the index for
+ *this* vfs; if there are fallbacks they must be queried separately.
+ """
+
+ def __init__(self, index, data_access, max_delta_chain=200,
+ annotated=False, reload_func=None):
+ """Create a KnitVersionedFiles with index and data_access.
+
+ :param index: The index for the knit data.
+ :param data_access: The access object to store and retrieve knit
+ records.
+ :param max_delta_chain: The maximum number of deltas to permit during
+ insertion. Set to 0 to prohibit the use of deltas.
+ :param annotated: Set to True to cause annotations to be calculated and
+ stored during insertion.
+ :param reload_func: An function that can be called if we think we need
+ to reload the pack listing and try again. See
+ 'bzrlib.repofmt.pack_repo.AggregateIndex' for the signature.
+ """
+ self._index = index
+ self._access = data_access
+ self._max_delta_chain = max_delta_chain
+ if annotated:
+ self._factory = KnitAnnotateFactory()
+ else:
+ self._factory = KnitPlainFactory()
+ self._immediate_fallback_vfs = []
+ self._reload_func = reload_func
+
+ def __repr__(self):
+ return "%s(%r, %r)" % (
+ self.__class__.__name__,
+ self._index,
+ self._access)
+
+ def without_fallbacks(self):
+ """Return a clone of this object without any fallbacks configured."""
+ return KnitVersionedFiles(self._index, self._access,
+ self._max_delta_chain, self._factory.annotated,
+ self._reload_func)
+
+ def add_fallback_versioned_files(self, a_versioned_files):
+ """Add a source of texts for texts not present in this knit.
+
+ :param a_versioned_files: A VersionedFiles object.
+ """
+ self._immediate_fallback_vfs.append(a_versioned_files)
+
+ def add_lines(self, key, parents, lines, parent_texts=None,
+ left_matching_blocks=None, nostore_sha=None, random_id=False,
+ check_content=True):
+ """See VersionedFiles.add_lines()."""
+ self._index._check_write_ok()
+ self._check_add(key, lines, random_id, check_content)
+ if parents is None:
+ # The caller might pass None if there is no graph data, but kndx
+ # indexes can't directly store that, so we give them
+ # an empty tuple instead.
+ parents = ()
+ line_bytes = ''.join(lines)
+ return self._add(key, lines, parents,
+ parent_texts, left_matching_blocks, nostore_sha, random_id,
+ line_bytes=line_bytes)
+
+ def _add_text(self, key, parents, text, nostore_sha=None, random_id=False):
+ """See VersionedFiles._add_text()."""
+ self._index._check_write_ok()
+ self._check_add(key, None, random_id, check_content=False)
+ if text.__class__ is not str:
+ raise errors.BzrBadParameterUnicode("text")
+ if parents is None:
+ # The caller might pass None if there is no graph data, but kndx
+ # indexes can't directly store that, so we give them
+ # an empty tuple instead.
+ parents = ()
+ return self._add(key, None, parents,
+ None, None, nostore_sha, random_id,
+ line_bytes=text)
+
+ def _add(self, key, lines, parents, parent_texts,
+ left_matching_blocks, nostore_sha, random_id,
+ line_bytes):
+ """Add a set of lines on top of version specified by parents.
+
+ Any versions not present will be converted into ghosts.
+
+ :param lines: A list of strings where each one is a single line (has a
+ single newline at the end of the string) This is now optional
+ (callers can pass None). It is left in its location for backwards
+ compatibility. It should ''.join(lines) must == line_bytes
+ :param line_bytes: A single string containing the content
+
+ We pass both lines and line_bytes because different routes bring the
+ values to this function. And for memory efficiency, we don't want to
+ have to split/join on-demand.
+ """
+ # first thing, if the content is something we don't need to store, find
+ # that out.
+ digest = sha_string(line_bytes)
+ if nostore_sha == digest:
+ raise errors.ExistingContent
+
+ present_parents = []
+ if parent_texts is None:
+ parent_texts = {}
+ # Do a single query to ascertain parent presence; we only compress
+ # against parents in the same kvf.
+ present_parent_map = self._index.get_parent_map(parents)
+ for parent in parents:
+ if parent in present_parent_map:
+ present_parents.append(parent)
+
+ # Currently we can only compress against the left most present parent.
+ if (len(present_parents) == 0 or
+ present_parents[0] != parents[0]):
+ delta = False
+ else:
+ # To speed the extract of texts the delta chain is limited
+ # to a fixed number of deltas. This should minimize both
+ # I/O and the time spend applying deltas.
+ delta = self._check_should_delta(present_parents[0])
+
+ text_length = len(line_bytes)
+ options = []
+ no_eol = False
+ # Note: line_bytes is not modified to add a newline, that is tracked
+ # via the no_eol flag. 'lines' *is* modified, because that is the
+ # general values needed by the Content code.
+ if line_bytes and line_bytes[-1] != '\n':
+ options.append('no-eol')
+ no_eol = True
+ # Copy the existing list, or create a new one
+ if lines is None:
+ lines = osutils.split_lines(line_bytes)
+ else:
+ lines = lines[:]
+ # Replace the last line with one that ends in a final newline
+ lines[-1] = lines[-1] + '\n'
+ if lines is None:
+ lines = osutils.split_lines(line_bytes)
+
+ for element in key[:-1]:
+ if type(element) is not str:
+ raise TypeError("key contains non-strings: %r" % (key,))
+ if key[-1] is None:
+ key = key[:-1] + ('sha1:' + digest,)
+ elif type(key[-1]) is not str:
+ raise TypeError("key contains non-strings: %r" % (key,))
+ # Knit hunks are still last-element only
+ version_id = key[-1]
+ content = self._factory.make(lines, version_id)
+ if no_eol:
+ # Hint to the content object that its text() call should strip the
+ # EOL.
+ content._should_strip_eol = True
+ if delta or (self._factory.annotated and len(present_parents) > 0):
+ # Merge annotations from parent texts if needed.
+ delta_hunks = self._merge_annotations(content, present_parents,
+ parent_texts, delta, self._factory.annotated,
+ left_matching_blocks)
+
+ if delta:
+ options.append('line-delta')
+ store_lines = self._factory.lower_line_delta(delta_hunks)
+ size, bytes = self._record_to_data(key, digest,
+ store_lines)
+ else:
+ options.append('fulltext')
+ # isinstance is slower and we have no hierarchy.
+ if self._factory.__class__ is KnitPlainFactory:
+ # Use the already joined bytes saving iteration time in
+ # _record_to_data.
+ dense_lines = [line_bytes]
+ if no_eol:
+ dense_lines.append('\n')
+ size, bytes = self._record_to_data(key, digest,
+ lines, dense_lines)
+ else:
+ # get mixed annotation + content and feed it into the
+ # serialiser.
+ store_lines = self._factory.lower_fulltext(content)
+ size, bytes = self._record_to_data(key, digest,
+ store_lines)
+
+ access_memo = self._access.add_raw_records([(key, size)], bytes)[0]
+ self._index.add_records(
+ ((key, options, access_memo, parents),),
+ random_id=random_id)
+ return digest, text_length, content
+
+ def annotate(self, key):
+ """See VersionedFiles.annotate."""
+ return self._factory.annotate(self, key)
+
+ def get_annotator(self):
+ return _KnitAnnotator(self)
+
+ def check(self, progress_bar=None, keys=None):
+ """See VersionedFiles.check()."""
+ if keys is None:
+ return self._logical_check()
+ else:
+ # At the moment, check does not extra work over get_record_stream
+ return self.get_record_stream(keys, 'unordered', True)
+
+ def _logical_check(self):
+ # This doesn't actually test extraction of everything, but that will
+ # impact 'bzr check' substantially, and needs to be integrated with
+ # care. However, it does check for the obvious problem of a delta with
+ # no basis.
+ keys = self._index.keys()
+ parent_map = self.get_parent_map(keys)
+ for key in keys:
+ if self._index.get_method(key) != 'fulltext':
+ compression_parent = parent_map[key][0]
+ if compression_parent not in parent_map:
+ raise errors.KnitCorrupt(self,
+ "Missing basis parent %s for %s" % (
+ compression_parent, key))
+ for fallback_vfs in self._immediate_fallback_vfs:
+ fallback_vfs.check()
+
+ def _check_add(self, key, lines, random_id, check_content):
+ """check that version_id and lines are safe to add."""
+ version_id = key[-1]
+ if version_id is not None:
+ if contains_whitespace(version_id):
+ raise InvalidRevisionId(version_id, self)
+ self.check_not_reserved_id(version_id)
+ # TODO: If random_id==False and the key is already present, we should
+ # probably check that the existing content is identical to what is
+ # being inserted, and otherwise raise an exception. This would make
+ # the bundle code simpler.
+ if check_content:
+ self._check_lines_not_unicode(lines)
+ self._check_lines_are_lines(lines)
+
+ def _check_header(self, key, line):
+ rec = self._split_header(line)
+ self._check_header_version(rec, key[-1])
+ return rec
+
+ def _check_header_version(self, rec, version_id):
+ """Checks the header version on original format knit records.
+
+ These have the last component of the key embedded in the record.
+ """
+ if rec[1] != version_id:
+ raise KnitCorrupt(self,
+ 'unexpected version, wanted %r, got %r' % (version_id, rec[1]))
+
+ def _check_should_delta(self, parent):
+ """Iterate back through the parent listing, looking for a fulltext.
+
+ This is used when we want to decide whether to add a delta or a new
+ fulltext. It searches for _max_delta_chain parents. When it finds a
+ fulltext parent, it sees if the total size of the deltas leading up to
+ it is large enough to indicate that we want a new full text anyway.
+
+ Return True if we should create a new delta, False if we should use a
+ full text.
+ """
+ delta_size = 0
+ fulltext_size = None
+ for count in xrange(self._max_delta_chain):
+ try:
+ # Note that this only looks in the index of this particular
+ # KnitVersionedFiles, not in the fallbacks. This ensures that
+ # we won't store a delta spanning physical repository
+ # boundaries.
+ build_details = self._index.get_build_details([parent])
+ parent_details = build_details[parent]
+ except (RevisionNotPresent, KeyError), e:
+ # Some basis is not locally present: always fulltext
+ return False
+ index_memo, compression_parent, _, _ = parent_details
+ _, _, size = index_memo
+ if compression_parent is None:
+ fulltext_size = size
+ break
+ delta_size += size
+ # We don't explicitly check for presence because this is in an
+ # inner loop, and if it's missing it'll fail anyhow.
+ parent = compression_parent
+ else:
+ # We couldn't find a fulltext, so we must create a new one
+ return False
+ # Simple heuristic - if the total I/O wold be greater as a delta than
+ # the originally installed fulltext, we create a new fulltext.
+ return fulltext_size > delta_size
+
+ def _build_details_to_components(self, build_details):
+ """Convert a build_details tuple to a position tuple."""
+ # record_details, access_memo, compression_parent
+ return build_details[3], build_details[0], build_details[1]
+
+ def _get_components_positions(self, keys, allow_missing=False):
+ """Produce a map of position data for the components of keys.
+
+ This data is intended to be used for retrieving the knit records.
+
+ A dict of key to (record_details, index_memo, next, parents) is
+ returned.
+
+ * method is the way referenced data should be applied.
+ * index_memo is the handle to pass to the data access to actually get
+ the data
+ * next is the build-parent of the version, or None for fulltexts.
+ * parents is the version_ids of the parents of this version
+
+ :param allow_missing: If True do not raise an error on a missing
+ component, just ignore it.
+ """
+ component_data = {}
+ pending_components = keys
+ while pending_components:
+ build_details = self._index.get_build_details(pending_components)
+ current_components = set(pending_components)
+ pending_components = set()
+ for key, details in build_details.iteritems():
+ (index_memo, compression_parent, parents,
+ record_details) = details
+ method = record_details[0]
+ if compression_parent is not None:
+ pending_components.add(compression_parent)
+ component_data[key] = self._build_details_to_components(details)
+ missing = current_components.difference(build_details)
+ if missing and not allow_missing:
+ raise errors.RevisionNotPresent(missing.pop(), self)
+ return component_data
+
+ def _get_content(self, key, parent_texts={}):
+ """Returns a content object that makes up the specified
+ version."""
+ cached_version = parent_texts.get(key, None)
+ if cached_version is not None:
+ # Ensure the cache dict is valid.
+ if not self.get_parent_map([key]):
+ raise RevisionNotPresent(key, self)
+ return cached_version
+ generator = _VFContentMapGenerator(self, [key])
+ return generator._get_content(key)
+
+ def get_parent_map(self, keys):
+ """Get a map of the graph parents of keys.
+
+ :param keys: The keys to look up parents for.
+ :return: A mapping from keys to parents. Absent keys are absent from
+ the mapping.
+ """
+ return self._get_parent_map_with_sources(keys)[0]
+
+ def _get_parent_map_with_sources(self, keys):
+ """Get a map of the parents of keys.
+
+ :param keys: The keys to look up parents for.
+ :return: A tuple. The first element is a mapping from keys to parents.
+ Absent keys are absent from the mapping. The second element is a
+ list with the locations each key was found in. The first element
+ is the in-this-knit parents, the second the first fallback source,
+ and so on.
+ """
+ result = {}
+ sources = [self._index] + self._immediate_fallback_vfs
+ source_results = []
+ missing = set(keys)
+ for source in sources:
+ if not missing:
+ break
+ new_result = source.get_parent_map(missing)
+ source_results.append(new_result)
+ result.update(new_result)
+ missing.difference_update(set(new_result))
+ return result, source_results
+
+ def _get_record_map(self, keys, allow_missing=False):
+ """Produce a dictionary of knit records.
+
+ :return: {key:(record, record_details, digest, next)}
+
+ * record: data returned from read_records (a KnitContentobject)
+ * record_details: opaque information to pass to parse_record
+ * digest: SHA1 digest of the full text after all steps are done
+ * next: build-parent of the version, i.e. the leftmost ancestor.
+ Will be None if the record is not a delta.
+
+ :param keys: The keys to build a map for
+ :param allow_missing: If some records are missing, rather than
+ error, just return the data that could be generated.
+ """
+ raw_map = self._get_record_map_unparsed(keys,
+ allow_missing=allow_missing)
+ return self._raw_map_to_record_map(raw_map)
+
+ def _raw_map_to_record_map(self, raw_map):
+ """Parse the contents of _get_record_map_unparsed.
+
+ :return: see _get_record_map.
+ """
+ result = {}
+ for key in raw_map:
+ data, record_details, next = raw_map[key]
+ content, digest = self._parse_record(key[-1], data)
+ result[key] = content, record_details, digest, next
+ return result
+
+ def _get_record_map_unparsed(self, keys, allow_missing=False):
+ """Get the raw data for reconstructing keys without parsing it.
+
+ :return: A dict suitable for parsing via _raw_map_to_record_map.
+ key-> raw_bytes, (method, noeol), compression_parent
+ """
+ # This retries the whole request if anything fails. Potentially we
+ # could be a bit more selective. We could track the keys whose records
+ # we have successfully found, and then only request the new records
+ # from there. However, _get_components_positions grabs the whole build
+ # chain, which means we'll likely try to grab the same records again
+ # anyway. Also, can the build chains change as part of a pack
+ # operation? We wouldn't want to end up with a broken chain.
+ while True:
+ try:
+ position_map = self._get_components_positions(keys,
+ allow_missing=allow_missing)
+ # key = component_id, r = record_details, i_m = index_memo,
+ # n = next
+ records = [(key, i_m) for key, (r, i_m, n)
+ in position_map.iteritems()]
+ # Sort by the index memo, so that we request records from the
+ # same pack file together, and in forward-sorted order
+ records.sort(key=operator.itemgetter(1))
+ raw_record_map = {}
+ for key, data in self._read_records_iter_unchecked(records):
+ (record_details, index_memo, next) = position_map[key]
+ raw_record_map[key] = data, record_details, next
+ return raw_record_map
+ except errors.RetryWithNewPacks, e:
+ self._access.reload_or_raise(e)
+
+ @classmethod
+ def _split_by_prefix(cls, keys):
+ """For the given keys, split them up based on their prefix.
+
+ To keep memory pressure somewhat under control, split the
+ requests back into per-file-id requests, otherwise "bzr co"
+ extracts the full tree into memory before writing it to disk.
+ This should be revisited if _get_content_maps() can ever cross
+ file-id boundaries.
+
+ The keys for a given file_id are kept in the same relative order.
+ Ordering between file_ids is not, though prefix_order will return the
+ order that the key was first seen.
+
+ :param keys: An iterable of key tuples
+ :return: (split_map, prefix_order)
+ split_map A dictionary mapping prefix => keys
+ prefix_order The order that we saw the various prefixes
+ """
+ split_by_prefix = {}
+ prefix_order = []
+ for key in keys:
+ if len(key) == 1:
+ prefix = ''
+ else:
+ prefix = key[0]
+
+ if prefix in split_by_prefix:
+ split_by_prefix[prefix].append(key)
+ else:
+ split_by_prefix[prefix] = [key]
+ prefix_order.append(prefix)
+ return split_by_prefix, prefix_order
+
+ def _group_keys_for_io(self, keys, non_local_keys, positions,
+ _min_buffer_size=_STREAM_MIN_BUFFER_SIZE):
+ """For the given keys, group them into 'best-sized' requests.
+
+ The idea is to avoid making 1 request per file, but to never try to
+ unpack an entire 1.5GB source tree in a single pass. Also when
+ possible, we should try to group requests to the same pack file
+ together.
+
+ :return: list of (keys, non_local) tuples that indicate what keys
+ should be fetched next.
+ """
+ # TODO: Ideally we would group on 2 factors. We want to extract texts
+ # from the same pack file together, and we want to extract all
+ # the texts for a given build-chain together. Ultimately it
+ # probably needs a better global view.
+ total_keys = len(keys)
+ prefix_split_keys, prefix_order = self._split_by_prefix(keys)
+ prefix_split_non_local_keys, _ = self._split_by_prefix(non_local_keys)
+ cur_keys = []
+ cur_non_local = set()
+ cur_size = 0
+ result = []
+ sizes = []
+ for prefix in prefix_order:
+ keys = prefix_split_keys[prefix]
+ non_local = prefix_split_non_local_keys.get(prefix, [])
+
+ this_size = self._index._get_total_build_size(keys, positions)
+ cur_size += this_size
+ cur_keys.extend(keys)
+ cur_non_local.update(non_local)
+ if cur_size > _min_buffer_size:
+ result.append((cur_keys, cur_non_local))
+ sizes.append(cur_size)
+ cur_keys = []
+ cur_non_local = set()
+ cur_size = 0
+ if cur_keys:
+ result.append((cur_keys, cur_non_local))
+ sizes.append(cur_size)
+ return result
+
+ def get_record_stream(self, keys, ordering, include_delta_closure):
+ """Get a stream of records for keys.
+
+ :param keys: The keys to include.
+ :param ordering: Either 'unordered' or 'topological'. A topologically
+ sorted stream has compression parents strictly before their
+ children.
+ :param include_delta_closure: If True then the closure across any
+ compression parents will be included (in the opaque data).
+ :return: An iterator of ContentFactory objects, each of which is only
+ valid until the iterator is advanced.
+ """
+ # keys might be a generator
+ keys = set(keys)
+ if not keys:
+ return
+ if not self._index.has_graph:
+ # Cannot sort when no graph has been stored.
+ ordering = 'unordered'
+
+ remaining_keys = keys
+ while True:
+ try:
+ keys = set(remaining_keys)
+ for content_factory in self._get_remaining_record_stream(keys,
+ ordering, include_delta_closure):
+ remaining_keys.discard(content_factory.key)
+ yield content_factory
+ return
+ except errors.RetryWithNewPacks, e:
+ self._access.reload_or_raise(e)
+
+ def _get_remaining_record_stream(self, keys, ordering,
+ include_delta_closure):
+ """This function is the 'retry' portion for get_record_stream."""
+ if include_delta_closure:
+ positions = self._get_components_positions(keys, allow_missing=True)
+ else:
+ build_details = self._index.get_build_details(keys)
+ # map from key to
+ # (record_details, access_memo, compression_parent_key)
+ positions = dict((key, self._build_details_to_components(details))
+ for key, details in build_details.iteritems())
+ absent_keys = keys.difference(set(positions))
+ # There may be more absent keys : if we're missing the basis component
+ # and are trying to include the delta closure.
+ # XXX: We should not ever need to examine remote sources because we do
+ # not permit deltas across versioned files boundaries.
+ if include_delta_closure:
+ needed_from_fallback = set()
+ # Build up reconstructable_keys dict. key:True in this dict means
+ # the key can be reconstructed.
+ reconstructable_keys = {}
+ for key in keys:
+ # the delta chain
+ try:
+ chain = [key, positions[key][2]]
+ except KeyError:
+ needed_from_fallback.add(key)
+ continue
+ result = True
+ while chain[-1] is not None:
+ if chain[-1] in reconstructable_keys:
+ result = reconstructable_keys[chain[-1]]
+ break
+ else:
+ try:
+ chain.append(positions[chain[-1]][2])
+ except KeyError:
+ # missing basis component
+ needed_from_fallback.add(chain[-1])
+ result = True
+ break
+ for chain_key in chain[:-1]:
+ reconstructable_keys[chain_key] = result
+ if not result:
+ needed_from_fallback.add(key)
+ # Double index lookups here : need a unified api ?
+ global_map, parent_maps = self._get_parent_map_with_sources(keys)
+ if ordering in ('topological', 'groupcompress'):
+ if ordering == 'topological':
+ # Global topological sort
+ present_keys = tsort.topo_sort(global_map)
+ else:
+ present_keys = sort_groupcompress(global_map)
+ # Now group by source:
+ source_keys = []
+ current_source = None
+ for key in present_keys:
+ for parent_map in parent_maps:
+ if key in parent_map:
+ key_source = parent_map
+ break
+ if current_source is not key_source:
+ source_keys.append((key_source, []))
+ current_source = key_source
+ source_keys[-1][1].append(key)
+ else:
+ if ordering != 'unordered':
+ raise AssertionError('valid values for ordering are:'
+ ' "unordered", "groupcompress" or "topological" not: %r'
+ % (ordering,))
+ # Just group by source; remote sources first.
+ present_keys = []
+ source_keys = []
+ for parent_map in reversed(parent_maps):
+ source_keys.append((parent_map, []))
+ for key in parent_map:
+ present_keys.append(key)
+ source_keys[-1][1].append(key)
+ # We have been requested to return these records in an order that
+ # suits us. So we ask the index to give us an optimally sorted
+ # order.
+ for source, sub_keys in source_keys:
+ if source is parent_maps[0]:
+ # Only sort the keys for this VF
+ self._index._sort_keys_by_io(sub_keys, positions)
+ absent_keys = keys - set(global_map)
+ for key in absent_keys:
+ yield AbsentContentFactory(key)
+ # restrict our view to the keys we can answer.
+ # XXX: Memory: TODO: batch data here to cap buffered data at (say) 1MB.
+ # XXX: At that point we need to consider the impact of double reads by
+ # utilising components multiple times.
+ if include_delta_closure:
+ # XXX: get_content_maps performs its own index queries; allow state
+ # to be passed in.
+ non_local_keys = needed_from_fallback - absent_keys
+ for keys, non_local_keys in self._group_keys_for_io(present_keys,
+ non_local_keys,
+ positions):
+ generator = _VFContentMapGenerator(self, keys, non_local_keys,
+ global_map,
+ ordering=ordering)
+ for record in generator.get_record_stream():
+ yield record
+ else:
+ for source, keys in source_keys:
+ if source is parent_maps[0]:
+ # this KnitVersionedFiles
+ records = [(key, positions[key][1]) for key in keys]
+ for key, raw_data in self._read_records_iter_unchecked(records):
+ (record_details, index_memo, _) = positions[key]
+ yield KnitContentFactory(key, global_map[key],
+ record_details, None, raw_data, self._factory.annotated, None)
+ else:
+ vf = self._immediate_fallback_vfs[parent_maps.index(source) - 1]
+ for record in vf.get_record_stream(keys, ordering,
+ include_delta_closure):
+ yield record
+
+ def get_sha1s(self, keys):
+ """See VersionedFiles.get_sha1s()."""
+ missing = set(keys)
+ record_map = self._get_record_map(missing, allow_missing=True)
+ result = {}
+ for key, details in record_map.iteritems():
+ if key not in missing:
+ continue
+ # record entry 2 is the 'digest'.
+ result[key] = details[2]
+ missing.difference_update(set(result))
+ for source in self._immediate_fallback_vfs:
+ if not missing:
+ break
+ new_result = source.get_sha1s(missing)
+ result.update(new_result)
+ missing.difference_update(set(new_result))
+ return result
+
+ def insert_record_stream(self, stream):
+ """Insert a record stream into this container.
+
+ :param stream: A stream of records to insert.
+ :return: None
+ :seealso VersionedFiles.get_record_stream:
+ """
+ def get_adapter(adapter_key):
+ try:
+ return adapters[adapter_key]
+ except KeyError:
+ adapter_factory = adapter_registry.get(adapter_key)
+ adapter = adapter_factory(self)
+ adapters[adapter_key] = adapter
+ return adapter
+ delta_types = set()
+ if self._factory.annotated:
+ # self is annotated, we need annotated knits to use directly.
+ annotated = "annotated-"
+ convertibles = []
+ else:
+ # self is not annotated, but we can strip annotations cheaply.
+ annotated = ""
+ convertibles = set(["knit-annotated-ft-gz"])
+ if self._max_delta_chain:
+ delta_types.add("knit-annotated-delta-gz")
+ convertibles.add("knit-annotated-delta-gz")
+ # The set of types we can cheaply adapt without needing basis texts.
+ native_types = set()
+ if self._max_delta_chain:
+ native_types.add("knit-%sdelta-gz" % annotated)
+ delta_types.add("knit-%sdelta-gz" % annotated)
+ native_types.add("knit-%sft-gz" % annotated)
+ knit_types = native_types.union(convertibles)
+ adapters = {}
+ # Buffer all index entries that we can't add immediately because their
+ # basis parent is missing. We don't buffer all because generating
+ # annotations may require access to some of the new records. However we
+ # can't generate annotations from new deltas until their basis parent
+ # is present anyway, so we get away with not needing an index that
+ # includes the new keys.
+ #
+ # See <http://launchpad.net/bugs/300177> about ordering of compression
+ # parents in the records - to be conservative, we insist that all
+ # parents must be present to avoid expanding to a fulltext.
+ #
+ # key = basis_parent, value = index entry to add
+ buffered_index_entries = {}
+ for record in stream:
+ kind = record.storage_kind
+ if kind.startswith('knit-') and kind.endswith('-gz'):
+ # Check that the ID in the header of the raw knit bytes matches
+ # the record metadata.
+ raw_data = record._raw_record
+ df, rec = self._parse_record_header(record.key, raw_data)
+ df.close()
+ buffered = False
+ parents = record.parents
+ if record.storage_kind in delta_types:
+ # TODO: eventually the record itself should track
+ # compression_parent
+ compression_parent = parents[0]
+ else:
+ compression_parent = None
+ # Raise an error when a record is missing.
+ if record.storage_kind == 'absent':
+ raise RevisionNotPresent([record.key], self)
+ elif ((record.storage_kind in knit_types)
+ and (compression_parent is None
+ or not self._immediate_fallback_vfs
+ or self._index.has_key(compression_parent)
+ or not self.has_key(compression_parent))):
+ # we can insert the knit record literally if either it has no
+ # compression parent OR we already have its basis in this kvf
+ # OR the basis is not present even in the fallbacks. In the
+ # last case it will either turn up later in the stream and all
+ # will be well, or it won't turn up at all and we'll raise an
+ # error at the end.
+ #
+ # TODO: self.has_key is somewhat redundant with
+ # self._index.has_key; we really want something that directly
+ # asks if it's only present in the fallbacks. -- mbp 20081119
+ if record.storage_kind not in native_types:
+ try:
+ adapter_key = (record.storage_kind, "knit-delta-gz")
+ adapter = get_adapter(adapter_key)
+ except KeyError:
+ adapter_key = (record.storage_kind, "knit-ft-gz")
+ adapter = get_adapter(adapter_key)
+ bytes = adapter.get_bytes(record)
+ else:
+ # It's a knit record, it has a _raw_record field (even if
+ # it was reconstituted from a network stream).
+ bytes = record._raw_record
+ options = [record._build_details[0]]
+ if record._build_details[1]:
+ options.append('no-eol')
+ # Just blat it across.
+ # Note: This does end up adding data on duplicate keys. As
+ # modern repositories use atomic insertions this should not
+ # lead to excessive growth in the event of interrupted fetches.
+ # 'knit' repositories may suffer excessive growth, but as a
+ # deprecated format this is tolerable. It can be fixed if
+ # needed by in the kndx index support raising on a duplicate
+ # add with identical parents and options.
+ access_memo = self._access.add_raw_records(
+ [(record.key, len(bytes))], bytes)[0]
+ index_entry = (record.key, options, access_memo, parents)
+ if 'fulltext' not in options:
+ # Not a fulltext, so we need to make sure the compression
+ # parent will also be present.
+ # Note that pack backed knits don't need to buffer here
+ # because they buffer all writes to the transaction level,
+ # but we don't expose that difference at the index level. If
+ # the query here has sufficient cost to show up in
+ # profiling we should do that.
+ #
+ # They're required to be physically in this
+ # KnitVersionedFiles, not in a fallback.
+ if not self._index.has_key(compression_parent):
+ pending = buffered_index_entries.setdefault(
+ compression_parent, [])
+ pending.append(index_entry)
+ buffered = True
+ if not buffered:
+ self._index.add_records([index_entry])
+ elif record.storage_kind == 'chunked':
+ self.add_lines(record.key, parents,
+ osutils.chunks_to_lines(record.get_bytes_as('chunked')))
+ else:
+ # Not suitable for direct insertion as a
+ # delta, either because it's not the right format, or this
+ # KnitVersionedFiles doesn't permit deltas (_max_delta_chain ==
+ # 0) or because it depends on a base only present in the
+ # fallback kvfs.
+ self._access.flush()
+ try:
+ # Try getting a fulltext directly from the record.
+ bytes = record.get_bytes_as('fulltext')
+ except errors.UnavailableRepresentation:
+ adapter_key = record.storage_kind, 'fulltext'
+ adapter = get_adapter(adapter_key)
+ bytes = adapter.get_bytes(record)
+ lines = split_lines(bytes)
+ try:
+ self.add_lines(record.key, parents, lines)
+ except errors.RevisionAlreadyPresent:
+ pass
+ # Add any records whose basis parent is now available.
+ if not buffered:
+ added_keys = [record.key]
+ while added_keys:
+ key = added_keys.pop(0)
+ if key in buffered_index_entries:
+ index_entries = buffered_index_entries[key]
+ self._index.add_records(index_entries)
+ added_keys.extend(
+ [index_entry[0] for index_entry in index_entries])
+ del buffered_index_entries[key]
+ if buffered_index_entries:
+ # There were index entries buffered at the end of the stream,
+ # So these need to be added (if the index supports holding such
+ # entries for later insertion)
+ all_entries = []
+ for key in buffered_index_entries:
+ index_entries = buffered_index_entries[key]
+ all_entries.extend(index_entries)
+ self._index.add_records(
+ all_entries, missing_compression_parents=True)
+
+ def get_missing_compression_parent_keys(self):
+ """Return an iterable of keys of missing compression parents.
+
+ Check this after calling insert_record_stream to find out if there are
+ any missing compression parents. If there are, the records that
+ depend on them are not able to be inserted safely. For atomic
+ KnitVersionedFiles built on packs, the transaction should be aborted or
+ suspended - commit will fail at this point. Nonatomic knits will error
+ earlier because they have no staging area to put pending entries into.
+ """
+ return self._index.get_missing_compression_parents()
+
+ def iter_lines_added_or_present_in_keys(self, keys, pb=None):
+ """Iterate over the lines in the versioned files from keys.
+
+ This may return lines from other keys. Each item the returned
+ iterator yields is a tuple of a line and a text version that that line
+ is present in (not introduced in).
+
+ Ordering of results is in whatever order is most suitable for the
+ underlying storage format.
+
+ If a progress bar is supplied, it may be used to indicate progress.
+ The caller is responsible for cleaning up progress bars (because this
+ is an iterator).
+
+ NOTES:
+ * Lines are normalised by the underlying store: they will all have \\n
+ terminators.
+ * Lines are returned in arbitrary order.
+ * If a requested key did not change any lines (or didn't have any
+ lines), it may not be mentioned at all in the result.
+
+ :param pb: Progress bar supplied by caller.
+ :return: An iterator over (line, key).
+ """
+ if pb is None:
+ pb = ui.ui_factory.nested_progress_bar()
+ keys = set(keys)
+ total = len(keys)
+ done = False
+ while not done:
+ try:
+ # we don't care about inclusions, the caller cares.
+ # but we need to setup a list of records to visit.
+ # we need key, position, length
+ key_records = []
+ build_details = self._index.get_build_details(keys)
+ for key, details in build_details.iteritems():
+ if key in keys:
+ key_records.append((key, details[0]))
+ records_iter = enumerate(self._read_records_iter(key_records))
+ for (key_idx, (key, data, sha_value)) in records_iter:
+ pb.update(gettext('Walking content'), key_idx, total)
+ compression_parent = build_details[key][1]
+ if compression_parent is None:
+ # fulltext
+ line_iterator = self._factory.get_fulltext_content(data)
+ else:
+ # Delta
+ line_iterator = self._factory.get_linedelta_content(data)
+ # Now that we are yielding the data for this key, remove it
+ # from the list
+ keys.remove(key)
+ # XXX: It might be more efficient to yield (key,
+ # line_iterator) in the future. However for now, this is a
+ # simpler change to integrate into the rest of the
+ # codebase. RBC 20071110
+ for line in line_iterator:
+ yield line, key
+ done = True
+ except errors.RetryWithNewPacks, e:
+ self._access.reload_or_raise(e)
+ # If there are still keys we've not yet found, we look in the fallback
+ # vfs, and hope to find them there. Note that if the keys are found
+ # but had no changes or no content, the fallback may not return
+ # anything.
+ if keys and not self._immediate_fallback_vfs:
+ # XXX: strictly the second parameter is meant to be the file id
+ # but it's not easily accessible here.
+ raise RevisionNotPresent(keys, repr(self))
+ for source in self._immediate_fallback_vfs:
+ if not keys:
+ break
+ source_keys = set()
+ for line, key in source.iter_lines_added_or_present_in_keys(keys):
+ source_keys.add(key)
+ yield line, key
+ keys.difference_update(source_keys)
+ pb.update(gettext('Walking content'), total, total)
+
+ def _make_line_delta(self, delta_seq, new_content):
+ """Generate a line delta from delta_seq and new_content."""
+ diff_hunks = []
+ for op in delta_seq.get_opcodes():
+ if op[0] == 'equal':
+ continue
+ diff_hunks.append((op[1], op[2], op[4]-op[3], new_content._lines[op[3]:op[4]]))
+ return diff_hunks
+
+ def _merge_annotations(self, content, parents, parent_texts={},
+ delta=None, annotated=None,
+ left_matching_blocks=None):
+ """Merge annotations for content and generate deltas.
+
+ This is done by comparing the annotations based on changes to the text
+ and generating a delta on the resulting full texts. If annotations are
+ not being created then a simple delta is created.
+ """
+ if left_matching_blocks is not None:
+ delta_seq = diff._PrematchedMatcher(left_matching_blocks)
+ else:
+ delta_seq = None
+ if annotated:
+ for parent_key in parents:
+ merge_content = self._get_content(parent_key, parent_texts)
+ if (parent_key == parents[0] and delta_seq is not None):
+ seq = delta_seq
+ else:
+ seq = patiencediff.PatienceSequenceMatcher(
+ None, merge_content.text(), content.text())
+ for i, j, n in seq.get_matching_blocks():
+ if n == 0:
+ continue
+ # this copies (origin, text) pairs across to the new
+ # content for any line that matches the last-checked
+ # parent.
+ content._lines[j:j+n] = merge_content._lines[i:i+n]
+ # XXX: Robert says the following block is a workaround for a
+ # now-fixed bug and it can probably be deleted. -- mbp 20080618
+ if content._lines and content._lines[-1][1][-1] != '\n':
+ # The copied annotation was from a line without a trailing EOL,
+ # reinstate one for the content object, to ensure correct
+ # serialization.
+ line = content._lines[-1][1] + '\n'
+ content._lines[-1] = (content._lines[-1][0], line)
+ if delta:
+ if delta_seq is None:
+ reference_content = self._get_content(parents[0], parent_texts)
+ new_texts = content.text()
+ old_texts = reference_content.text()
+ delta_seq = patiencediff.PatienceSequenceMatcher(
+ None, old_texts, new_texts)
+ return self._make_line_delta(delta_seq, content)
+
+ def _parse_record(self, version_id, data):
+ """Parse an original format knit record.
+
+ These have the last element of the key only present in the stored data.
+ """
+ rec, record_contents = self._parse_record_unchecked(data)
+ self._check_header_version(rec, version_id)
+ return record_contents, rec[3]
+
+ def _parse_record_header(self, key, raw_data):
+ """Parse a record header for consistency.
+
+ :return: the header and the decompressor stream.
+ as (stream, header_record)
+ """
+ df = gzip.GzipFile(mode='rb', fileobj=StringIO(raw_data))
+ try:
+ # Current serialise
+ rec = self._check_header(key, df.readline())
+ except Exception, e:
+ raise KnitCorrupt(self,
+ "While reading {%s} got %s(%s)"
+ % (key, e.__class__.__name__, str(e)))
+ return df, rec
+
+ def _parse_record_unchecked(self, data):
+ # profiling notes:
+ # 4168 calls in 2880 217 internal
+ # 4168 calls to _parse_record_header in 2121
+ # 4168 calls to readlines in 330
+ df = gzip.GzipFile(mode='rb', fileobj=StringIO(data))
+ try:
+ record_contents = df.readlines()
+ except Exception, e:
+ raise KnitCorrupt(self, "Corrupt compressed record %r, got %s(%s)" %
+ (data, e.__class__.__name__, str(e)))
+ header = record_contents.pop(0)
+ rec = self._split_header(header)
+ last_line = record_contents.pop()
+ if len(record_contents) != int(rec[2]):
+ raise KnitCorrupt(self,
+ 'incorrect number of lines %s != %s'
+ ' for version {%s} %s'
+ % (len(record_contents), int(rec[2]),
+ rec[1], record_contents))
+ if last_line != 'end %s\n' % rec[1]:
+ raise KnitCorrupt(self,
+ 'unexpected version end line %r, wanted %r'
+ % (last_line, rec[1]))
+ df.close()
+ return rec, record_contents
+
+ def _read_records_iter(self, records):
+ """Read text records from data file and yield result.
+
+ The result will be returned in whatever is the fastest to read.
+ Not by the order requested. Also, multiple requests for the same
+ record will only yield 1 response.
+
+ :param records: A list of (key, access_memo) entries
+ :return: Yields (key, contents, digest) in the order
+ read, not the order requested
+ """
+ if not records:
+ return
+
+ # XXX: This smells wrong, IO may not be getting ordered right.
+ needed_records = sorted(set(records), key=operator.itemgetter(1))
+ if not needed_records:
+ return
+
+ # The transport optimizes the fetching as well
+ # (ie, reads continuous ranges.)
+ raw_data = self._access.get_raw_records(
+ [index_memo for key, index_memo in needed_records])
+
+ for (key, index_memo), data in \
+ izip(iter(needed_records), raw_data):
+ content, digest = self._parse_record(key[-1], data)
+ yield key, content, digest
+
+ def _read_records_iter_raw(self, records):
+ """Read text records from data file and yield raw data.
+
+ This unpacks enough of the text record to validate the id is
+ as expected but thats all.
+
+ Each item the iterator yields is (key, bytes,
+ expected_sha1_of_full_text).
+ """
+ for key, data in self._read_records_iter_unchecked(records):
+ # validate the header (note that we can only use the suffix in
+ # current knit records).
+ df, rec = self._parse_record_header(key, data)
+ df.close()
+ yield key, data, rec[3]
+
+ def _read_records_iter_unchecked(self, records):
+ """Read text records from data file and yield raw data.
+
+ No validation is done.
+
+ Yields tuples of (key, data).
+ """
+ # setup an iterator of the external records:
+ # uses readv so nice and fast we hope.
+ if len(records):
+ # grab the disk data needed.
+ needed_offsets = [index_memo for key, index_memo
+ in records]
+ raw_records = self._access.get_raw_records(needed_offsets)
+
+ for key, index_memo in records:
+ data = raw_records.next()
+ yield key, data
+
+ def _record_to_data(self, key, digest, lines, dense_lines=None):
+ """Convert key, digest, lines into a raw data block.
+
+ :param key: The key of the record. Currently keys are always serialised
+ using just the trailing component.
+ :param dense_lines: The bytes of lines but in a denser form. For
+ instance, if lines is a list of 1000 bytestrings each ending in
+ \\n, dense_lines may be a list with one line in it, containing all
+ the 1000's lines and their \\n's. Using dense_lines if it is
+ already known is a win because the string join to create bytes in
+ this function spends less time resizing the final string.
+ :return: (len, a StringIO instance with the raw data ready to read.)
+ """
+ chunks = ["version %s %d %s\n" % (key[-1], len(lines), digest)]
+ chunks.extend(dense_lines or lines)
+ chunks.append("end %s\n" % key[-1])
+ for chunk in chunks:
+ if type(chunk) is not str:
+ raise AssertionError(
+ 'data must be plain bytes was %s' % type(chunk))
+ if lines and lines[-1][-1] != '\n':
+ raise ValueError('corrupt lines value %r' % lines)
+ compressed_bytes = tuned_gzip.chunks_to_gzip(chunks)
+ return len(compressed_bytes), compressed_bytes
+
+ def _split_header(self, line):
+ rec = line.split()
+ if len(rec) != 4:
+ raise KnitCorrupt(self,
+ 'unexpected number of elements in record header')
+ return rec
+
+ def keys(self):
+ """See VersionedFiles.keys."""
+ if 'evil' in debug.debug_flags:
+ trace.mutter_callsite(2, "keys scales with size of history")
+ sources = [self._index] + self._immediate_fallback_vfs
+ result = set()
+ for source in sources:
+ result.update(source.keys())
+ return result
+
+
+class _ContentMapGenerator(object):
+ """Generate texts or expose raw deltas for a set of texts."""
+
+ def __init__(self, ordering='unordered'):
+ self._ordering = ordering
+
+ def _get_content(self, key):
+ """Get the content object for key."""
+ # Note that _get_content is only called when the _ContentMapGenerator
+ # has been constructed with just one key requested for reconstruction.
+ if key in self.nonlocal_keys:
+ record = self.get_record_stream().next()
+ # Create a content object on the fly
+ lines = osutils.chunks_to_lines(record.get_bytes_as('chunked'))
+ return PlainKnitContent(lines, record.key)
+ else:
+ # local keys we can ask for directly
+ return self._get_one_work(key)
+
+ def get_record_stream(self):
+ """Get a record stream for the keys requested during __init__."""
+ for record in self._work():
+ yield record
+
+ def _work(self):
+ """Produce maps of text and KnitContents as dicts.
+
+ :return: (text_map, content_map) where text_map contains the texts for
+ the requested versions and content_map contains the KnitContents.
+ """
+ # NB: By definition we never need to read remote sources unless texts
+ # are requested from them: we don't delta across stores - and we
+ # explicitly do not want to to prevent data loss situations.
+ if self.global_map is None:
+ self.global_map = self.vf.get_parent_map(self.keys)
+ nonlocal_keys = self.nonlocal_keys
+
+ missing_keys = set(nonlocal_keys)
+ # Read from remote versioned file instances and provide to our caller.
+ for source in self.vf._immediate_fallback_vfs:
+ if not missing_keys:
+ break
+ # Loop over fallback repositories asking them for texts - ignore
+ # any missing from a particular fallback.
+ for record in source.get_record_stream(missing_keys,
+ self._ordering, True):
+ if record.storage_kind == 'absent':
+ # Not in thie particular stream, may be in one of the
+ # other fallback vfs objects.
+ continue
+ missing_keys.remove(record.key)
+ yield record
+
+ if self._raw_record_map is None:
+ raise AssertionError('_raw_record_map should have been filled')
+ first = True
+ for key in self.keys:
+ if key in self.nonlocal_keys:
+ continue
+ yield LazyKnitContentFactory(key, self.global_map[key], self, first)
+ first = False
+
+ def _get_one_work(self, requested_key):
+ # Now, if we have calculated everything already, just return the
+ # desired text.
+ if requested_key in self._contents_map:
+ return self._contents_map[requested_key]
+ # To simplify things, parse everything at once - code that wants one text
+ # probably wants them all.
+ # FUTURE: This function could be improved for the 'extract many' case
+ # by tracking each component and only doing the copy when the number of
+ # children than need to apply delta's to it is > 1 or it is part of the
+ # final output.
+ multiple_versions = len(self.keys) != 1
+ if self._record_map is None:
+ self._record_map = self.vf._raw_map_to_record_map(
+ self._raw_record_map)
+ record_map = self._record_map
+ # raw_record_map is key:
+ # Have read and parsed records at this point.
+ for key in self.keys:
+ if key in self.nonlocal_keys:
+ # already handled
+ continue
+ components = []
+ cursor = key
+ while cursor is not None:
+ try:
+ record, record_details, digest, next = record_map[cursor]
+ except KeyError:
+ raise RevisionNotPresent(cursor, self)
+ components.append((cursor, record, record_details, digest))
+ cursor = next
+ if cursor in self._contents_map:
+ # no need to plan further back
+ components.append((cursor, None, None, None))
+ break
+
+ content = None
+ for (component_id, record, record_details,
+ digest) in reversed(components):
+ if component_id in self._contents_map:
+ content = self._contents_map[component_id]
+ else:
+ content, delta = self._factory.parse_record(key[-1],
+ record, record_details, content,
+ copy_base_content=multiple_versions)
+ if multiple_versions:
+ self._contents_map[component_id] = content
+
+ # digest here is the digest from the last applied component.
+ text = content.text()
+ actual_sha = sha_strings(text)
+ if actual_sha != digest:
+ raise SHA1KnitCorrupt(self, actual_sha, digest, key, text)
+ if multiple_versions:
+ return self._contents_map[requested_key]
+ else:
+ return content
+
+ def _wire_bytes(self):
+ """Get the bytes to put on the wire for 'key'.
+
+ The first collection of bytes asked for returns the serialised
+ raw_record_map and the additional details (key, parent) for key.
+ Subsequent calls return just the additional details (key, parent).
+ The wire storage_kind given for the first key is 'knit-delta-closure',
+ For subsequent keys it is 'knit-delta-closure-ref'.
+
+ :param key: A key from the content generator.
+ :return: Bytes to put on the wire.
+ """
+ lines = []
+ # kind marker for dispatch on the far side,
+ lines.append('knit-delta-closure')
+ # Annotated or not
+ if self.vf._factory.annotated:
+ lines.append('annotated')
+ else:
+ lines.append('')
+ # then the list of keys
+ lines.append('\t'.join(['\x00'.join(key) for key in self.keys
+ if key not in self.nonlocal_keys]))
+ # then the _raw_record_map in serialised form:
+ map_byte_list = []
+ # for each item in the map:
+ # 1 line with key
+ # 1 line with parents if the key is to be yielded (None: for None, '' for ())
+ # one line with method
+ # one line with noeol
+ # one line with next ('' for None)
+ # one line with byte count of the record bytes
+ # the record bytes
+ for key, (record_bytes, (method, noeol), next) in \
+ self._raw_record_map.iteritems():
+ key_bytes = '\x00'.join(key)
+ parents = self.global_map.get(key, None)
+ if parents is None:
+ parent_bytes = 'None:'
+ else:
+ parent_bytes = '\t'.join('\x00'.join(key) for key in parents)
+ method_bytes = method
+ if noeol:
+ noeol_bytes = "T"
+ else:
+ noeol_bytes = "F"
+ if next:
+ next_bytes = '\x00'.join(next)
+ else:
+ next_bytes = ''
+ map_byte_list.append('%s\n%s\n%s\n%s\n%s\n%d\n%s' % (
+ key_bytes, parent_bytes, method_bytes, noeol_bytes, next_bytes,
+ len(record_bytes), record_bytes))
+ map_bytes = ''.join(map_byte_list)
+ lines.append(map_bytes)
+ bytes = '\n'.join(lines)
+ return bytes
+
+
+class _VFContentMapGenerator(_ContentMapGenerator):
+ """Content map generator reading from a VersionedFiles object."""
+
+ def __init__(self, versioned_files, keys, nonlocal_keys=None,
+ global_map=None, raw_record_map=None, ordering='unordered'):
+ """Create a _ContentMapGenerator.
+
+ :param versioned_files: The versioned files that the texts are being
+ extracted from.
+ :param keys: The keys to produce content maps for.
+ :param nonlocal_keys: An iterable of keys(possibly intersecting keys)
+ which are known to not be in this knit, but rather in one of the
+ fallback knits.
+ :param global_map: The result of get_parent_map(keys) (or a supermap).
+ This is required if get_record_stream() is to be used.
+ :param raw_record_map: A unparsed raw record map to use for answering
+ contents.
+ """
+ _ContentMapGenerator.__init__(self, ordering=ordering)
+ # The vf to source data from
+ self.vf = versioned_files
+ # The keys desired
+ self.keys = list(keys)
+ # Keys known to be in fallback vfs objects
+ if nonlocal_keys is None:
+ self.nonlocal_keys = set()
+ else:
+ self.nonlocal_keys = frozenset(nonlocal_keys)
+ # Parents data for keys to be returned in get_record_stream
+ self.global_map = global_map
+ # The chunked lists for self.keys in text form
+ self._text_map = {}
+ # A cache of KnitContent objects used in extracting texts.
+ self._contents_map = {}
+ # All the knit records needed to assemble the requested keys as full
+ # texts.
+ self._record_map = None
+ if raw_record_map is None:
+ self._raw_record_map = self.vf._get_record_map_unparsed(keys,
+ allow_missing=True)
+ else:
+ self._raw_record_map = raw_record_map
+ # the factory for parsing records
+ self._factory = self.vf._factory
+
+
+class _NetworkContentMapGenerator(_ContentMapGenerator):
+ """Content map generator sourced from a network stream."""
+
+ def __init__(self, bytes, line_end):
+ """Construct a _NetworkContentMapGenerator from a bytes block."""
+ self._bytes = bytes
+ self.global_map = {}
+ self._raw_record_map = {}
+ self._contents_map = {}
+ self._record_map = None
+ self.nonlocal_keys = []
+ # Get access to record parsing facilities
+ self.vf = KnitVersionedFiles(None, None)
+ start = line_end
+ # Annotated or not
+ line_end = bytes.find('\n', start)
+ line = bytes[start:line_end]
+ start = line_end + 1
+ if line == 'annotated':
+ self._factory = KnitAnnotateFactory()
+ else:
+ self._factory = KnitPlainFactory()
+ # list of keys to emit in get_record_stream
+ line_end = bytes.find('\n', start)
+ line = bytes[start:line_end]
+ start = line_end + 1
+ self.keys = [
+ tuple(segment.split('\x00')) for segment in line.split('\t')
+ if segment]
+ # now a loop until the end. XXX: It would be nice if this was just a
+ # bunch of the same records as get_record_stream(..., False) gives, but
+ # there is a decent sized gap stopping that at the moment.
+ end = len(bytes)
+ while start < end:
+ # 1 line with key
+ line_end = bytes.find('\n', start)
+ key = tuple(bytes[start:line_end].split('\x00'))
+ start = line_end + 1
+ # 1 line with parents (None: for None, '' for ())
+ line_end = bytes.find('\n', start)
+ line = bytes[start:line_end]
+ if line == 'None:':
+ parents = None
+ else:
+ parents = tuple(
+ [tuple(segment.split('\x00')) for segment in line.split('\t')
+ if segment])
+ self.global_map[key] = parents
+ start = line_end + 1
+ # one line with method
+ line_end = bytes.find('\n', start)
+ line = bytes[start:line_end]
+ method = line
+ start = line_end + 1
+ # one line with noeol
+ line_end = bytes.find('\n', start)
+ line = bytes[start:line_end]
+ noeol = line == "T"
+ start = line_end + 1
+ # one line with next ('' for None)
+ line_end = bytes.find('\n', start)
+ line = bytes[start:line_end]
+ if not line:
+ next = None
+ else:
+ next = tuple(bytes[start:line_end].split('\x00'))
+ start = line_end + 1
+ # one line with byte count of the record bytes
+ line_end = bytes.find('\n', start)
+ line = bytes[start:line_end]
+ count = int(line)
+ start = line_end + 1
+ # the record bytes
+ record_bytes = bytes[start:start+count]
+ start = start + count
+ # put it in the map
+ self._raw_record_map[key] = (record_bytes, (method, noeol), next)
+
+ def get_record_stream(self):
+ """Get a record stream for for keys requested by the bytestream."""
+ first = True
+ for key in self.keys:
+ yield LazyKnitContentFactory(key, self.global_map[key], self, first)
+ first = False
+
+ def _wire_bytes(self):
+ return self._bytes
+
+
+class _KndxIndex(object):
+ """Manages knit index files
+
+ The index is kept in memory and read on startup, to enable
+ fast lookups of revision information. The cursor of the index
+ file is always pointing to the end, making it easy to append
+ entries.
+
+ _cache is a cache for fast mapping from version id to a Index
+ object.
+
+ _history is a cache for fast mapping from indexes to version ids.
+
+ The index data format is dictionary compressed when it comes to
+ parent references; a index entry may only have parents that with a
+ lover index number. As a result, the index is topological sorted.
+
+ Duplicate entries may be written to the index for a single version id
+ if this is done then the latter one completely replaces the former:
+ this allows updates to correct version and parent information.
+ Note that the two entries may share the delta, and that successive
+ annotations and references MUST point to the first entry.
+
+ The index file on disc contains a header, followed by one line per knit
+ record. The same revision can be present in an index file more than once.
+ The first occurrence gets assigned a sequence number starting from 0.
+
+ The format of a single line is
+ REVISION_ID FLAGS BYTE_OFFSET LENGTH( PARENT_ID|PARENT_SEQUENCE_ID)* :\n
+ REVISION_ID is a utf8-encoded revision id
+ FLAGS is a comma separated list of flags about the record. Values include
+ no-eol, line-delta, fulltext.
+ BYTE_OFFSET is the ascii representation of the byte offset in the data file
+ that the compressed data starts at.
+ LENGTH is the ascii representation of the length of the data file.
+ PARENT_ID a utf-8 revision id prefixed by a '.' that is a parent of
+ REVISION_ID.
+ PARENT_SEQUENCE_ID the ascii representation of the sequence number of a
+ revision id already in the knit that is a parent of REVISION_ID.
+ The ' :' marker is the end of record marker.
+
+ partial writes:
+ when a write is interrupted to the index file, it will result in a line
+ that does not end in ' :'. If the ' :' is not present at the end of a line,
+ or at the end of the file, then the record that is missing it will be
+ ignored by the parser.
+
+ When writing new records to the index file, the data is preceded by '\n'
+ to ensure that records always start on new lines even if the last write was
+ interrupted. As a result its normal for the last line in the index to be
+ missing a trailing newline. One can be added with no harmful effects.
+
+ :ivar _kndx_cache: dict from prefix to the old state of KnitIndex objects,
+ where prefix is e.g. the (fileid,) for .texts instances or () for
+ constant-mapped things like .revisions, and the old state is
+ tuple(history_vector, cache_dict). This is used to prevent having an
+ ABI change with the C extension that reads .kndx files.
+ """
+
+ HEADER = "# bzr knit index 8\n"
+
+ def __init__(self, transport, mapper, get_scope, allow_writes, is_locked):
+ """Create a _KndxIndex on transport using mapper."""
+ self._transport = transport
+ self._mapper = mapper
+ self._get_scope = get_scope
+ self._allow_writes = allow_writes
+ self._is_locked = is_locked
+ self._reset_cache()
+ self.has_graph = True
+
+ def add_records(self, records, random_id=False, missing_compression_parents=False):
+ """Add multiple records to the index.
+
+ :param records: a list of tuples:
+ (key, options, access_memo, parents).
+ :param random_id: If True the ids being added were randomly generated
+ and no check for existence will be performed.
+ :param missing_compression_parents: If True the records being added are
+ only compressed against texts already in the index (or inside
+ records). If False the records all refer to unavailable texts (or
+ texts inside records) as compression parents.
+ """
+ if missing_compression_parents:
+ # It might be nice to get the edge of the records. But keys isn't
+ # _wrong_.
+ keys = sorted(record[0] for record in records)
+ raise errors.RevisionNotPresent(keys, self)
+ paths = {}
+ for record in records:
+ key = record[0]
+ prefix = key[:-1]
+ path = self._mapper.map(key) + '.kndx'
+ path_keys = paths.setdefault(path, (prefix, []))
+ path_keys[1].append(record)
+ for path in sorted(paths):
+ prefix, path_keys = paths[path]
+ self._load_prefixes([prefix])
+ lines = []
+ orig_history = self._kndx_cache[prefix][1][:]
+ orig_cache = self._kndx_cache[prefix][0].copy()
+
+ try:
+ for key, options, (_, pos, size), parents in path_keys:
+ if parents is None:
+ # kndx indices cannot be parentless.
+ parents = ()
+ line = "\n%s %s %s %s %s :" % (
+ key[-1], ','.join(options), pos, size,
+ self._dictionary_compress(parents))
+ if type(line) is not str:
+ raise AssertionError(
+ 'data must be utf8 was %s' % type(line))
+ lines.append(line)
+ self._cache_key(key, options, pos, size, parents)
+ if len(orig_history):
+ self._transport.append_bytes(path, ''.join(lines))
+ else:
+ self._init_index(path, lines)
+ except:
+ # If any problems happen, restore the original values and re-raise
+ self._kndx_cache[prefix] = (orig_cache, orig_history)
+ raise
+
+ def scan_unvalidated_index(self, graph_index):
+ """See _KnitGraphIndex.scan_unvalidated_index."""
+ # Because kndx files do not support atomic insertion via separate index
+ # files, they do not support this method.
+ raise NotImplementedError(self.scan_unvalidated_index)
+
+ def get_missing_compression_parents(self):
+ """See _KnitGraphIndex.get_missing_compression_parents."""
+ # Because kndx files do not support atomic insertion via separate index
+ # files, they do not support this method.
+ raise NotImplementedError(self.get_missing_compression_parents)
+
+ def _cache_key(self, key, options, pos, size, parent_keys):
+ """Cache a version record in the history array and index cache.
+
+ This is inlined into _load_data for performance. KEEP IN SYNC.
+ (It saves 60ms, 25% of the __init__ overhead on local 4000 record
+ indexes).
+ """
+ prefix = key[:-1]
+ version_id = key[-1]
+ # last-element only for compatibilty with the C load_data.
+ parents = tuple(parent[-1] for parent in parent_keys)
+ for parent in parent_keys:
+ if parent[:-1] != prefix:
+ raise ValueError("mismatched prefixes for %r, %r" % (
+ key, parent_keys))
+ cache, history = self._kndx_cache[prefix]
+ # only want the _history index to reference the 1st index entry
+ # for version_id
+ if version_id not in cache:
+ index = len(history)
+ history.append(version_id)
+ else:
+ index = cache[version_id][5]
+ cache[version_id] = (version_id,
+ options,
+ pos,
+ size,
+ parents,
+ index)
+
+ def check_header(self, fp):
+ line = fp.readline()
+ if line == '':
+ # An empty file can actually be treated as though the file doesn't
+ # exist yet.
+ raise errors.NoSuchFile(self)
+ if line != self.HEADER:
+ raise KnitHeaderError(badline=line, filename=self)
+
+ def _check_read(self):
+ if not self._is_locked():
+ raise errors.ObjectNotLocked(self)
+ if self._get_scope() != self._scope:
+ self._reset_cache()
+
+ def _check_write_ok(self):
+ """Assert if not writes are permitted."""
+ if not self._is_locked():
+ raise errors.ObjectNotLocked(self)
+ if self._get_scope() != self._scope:
+ self._reset_cache()
+ if self._mode != 'w':
+ raise errors.ReadOnlyObjectDirtiedError(self)
+
+ def get_build_details(self, keys):
+ """Get the method, index_memo and compression parent for keys.
+
+ Ghosts are omitted from the result.
+
+ :param keys: An iterable of keys.
+ :return: A dict of key:(index_memo, compression_parent, parents,
+ record_details).
+ index_memo
+ opaque structure to pass to read_records to extract the raw
+ data
+ compression_parent
+ Content that this record is built upon, may be None
+ parents
+ Logical parents of this node
+ record_details
+ extra information about the content which needs to be passed to
+ Factory.parse_record
+ """
+ parent_map = self.get_parent_map(keys)
+ result = {}
+ for key in keys:
+ if key not in parent_map:
+ continue # Ghost
+ method = self.get_method(key)
+ parents = parent_map[key]
+ if method == 'fulltext':
+ compression_parent = None
+ else:
+ compression_parent = parents[0]
+ noeol = 'no-eol' in self.get_options(key)
+ index_memo = self.get_position(key)
+ result[key] = (index_memo, compression_parent,
+ parents, (method, noeol))
+ return result
+
+ def get_method(self, key):
+ """Return compression method of specified key."""
+ options = self.get_options(key)
+ if 'fulltext' in options:
+ return 'fulltext'
+ elif 'line-delta' in options:
+ return 'line-delta'
+ else:
+ raise errors.KnitIndexUnknownMethod(self, options)
+
+ def get_options(self, key):
+ """Return a list representing options.
+
+ e.g. ['foo', 'bar']
+ """
+ prefix, suffix = self._split_key(key)
+ self._load_prefixes([prefix])
+ try:
+ return self._kndx_cache[prefix][0][suffix][1]
+ except KeyError:
+ raise RevisionNotPresent(key, self)
+
+ def find_ancestry(self, keys):
+ """See CombinedGraphIndex.find_ancestry()"""
+ prefixes = set(key[:-1] for key in keys)
+ self._load_prefixes(prefixes)
+ result = {}
+ parent_map = {}
+ missing_keys = set()
+ pending_keys = list(keys)
+ # This assumes that keys will not reference parents in a different
+ # prefix, which is accurate so far.
+ while pending_keys:
+ key = pending_keys.pop()
+ if key in parent_map:
+ continue
+ prefix = key[:-1]
+ try:
+ suffix_parents = self._kndx_cache[prefix][0][key[-1]][4]
+ except KeyError:
+ missing_keys.add(key)
+ else:
+ parent_keys = tuple([prefix + (suffix,)
+ for suffix in suffix_parents])
+ parent_map[key] = parent_keys
+ pending_keys.extend([p for p in parent_keys
+ if p not in parent_map])
+ return parent_map, missing_keys
+
+ def get_parent_map(self, keys):
+ """Get a map of the parents of keys.
+
+ :param keys: The keys to look up parents for.
+ :return: A mapping from keys to parents. Absent keys are absent from
+ the mapping.
+ """
+ # Parse what we need to up front, this potentially trades off I/O
+ # locality (.kndx and .knit in the same block group for the same file
+ # id) for less checking in inner loops.
+ prefixes = set(key[:-1] for key in keys)
+ self._load_prefixes(prefixes)
+ result = {}
+ for key in keys:
+ prefix = key[:-1]
+ try:
+ suffix_parents = self._kndx_cache[prefix][0][key[-1]][4]
+ except KeyError:
+ pass
+ else:
+ result[key] = tuple(prefix + (suffix,) for
+ suffix in suffix_parents)
+ return result
+
+ def get_position(self, key):
+ """Return details needed to access the version.
+
+ :return: a tuple (key, data position, size) to hand to the access
+ logic to get the record.
+ """
+ prefix, suffix = self._split_key(key)
+ self._load_prefixes([prefix])
+ entry = self._kndx_cache[prefix][0][suffix]
+ return key, entry[2], entry[3]
+
+ has_key = _mod_index._has_key_from_parent_map
+
+ def _init_index(self, path, extra_lines=[]):
+ """Initialize an index."""
+ sio = StringIO()
+ sio.write(self.HEADER)
+ sio.writelines(extra_lines)
+ sio.seek(0)
+ self._transport.put_file_non_atomic(path, sio,
+ create_parent_dir=True)
+ # self._create_parent_dir)
+ # mode=self._file_mode,
+ # dir_mode=self._dir_mode)
+
+ def keys(self):
+ """Get all the keys in the collection.
+
+ The keys are not ordered.
+ """
+ result = set()
+ # Identify all key prefixes.
+ # XXX: A bit hacky, needs polish.
+ if type(self._mapper) is ConstantMapper:
+ prefixes = [()]
+ else:
+ relpaths = set()
+ for quoted_relpath in self._transport.iter_files_recursive():
+ path, ext = os.path.splitext(quoted_relpath)
+ relpaths.add(path)
+ prefixes = [self._mapper.unmap(path) for path in relpaths]
+ self._load_prefixes(prefixes)
+ for prefix in prefixes:
+ for suffix in self._kndx_cache[prefix][1]:
+ result.add(prefix + (suffix,))
+ return result
+
+ def _load_prefixes(self, prefixes):
+ """Load the indices for prefixes."""
+ self._check_read()
+ for prefix in prefixes:
+ if prefix not in self._kndx_cache:
+ # the load_data interface writes to these variables.
+ self._cache = {}
+ self._history = []
+ self._filename = prefix
+ try:
+ path = self._mapper.map(prefix) + '.kndx'
+ fp = self._transport.get(path)
+ try:
+ # _load_data may raise NoSuchFile if the target knit is
+ # completely empty.
+ _load_data(self, fp)
+ finally:
+ fp.close()
+ self._kndx_cache[prefix] = (self._cache, self._history)
+ del self._cache
+ del self._filename
+ del self._history
+ except NoSuchFile:
+ self._kndx_cache[prefix] = ({}, [])
+ if type(self._mapper) is ConstantMapper:
+ # preserve behaviour for revisions.kndx etc.
+ self._init_index(path)
+ del self._cache
+ del self._filename
+ del self._history
+
+ missing_keys = _mod_index._missing_keys_from_parent_map
+
+ def _partition_keys(self, keys):
+ """Turn keys into a dict of prefix:suffix_list."""
+ result = {}
+ for key in keys:
+ prefix_keys = result.setdefault(key[:-1], [])
+ prefix_keys.append(key[-1])
+ return result
+
+ def _dictionary_compress(self, keys):
+ """Dictionary compress keys.
+
+ :param keys: The keys to generate references to.
+ :return: A string representation of keys. keys which are present are
+ dictionary compressed, and others are emitted as fulltext with a
+ '.' prefix.
+ """
+ if not keys:
+ return ''
+ result_list = []
+ prefix = keys[0][:-1]
+ cache = self._kndx_cache[prefix][0]
+ for key in keys:
+ if key[:-1] != prefix:
+ # kndx indices cannot refer across partitioned storage.
+ raise ValueError("mismatched prefixes for %r" % keys)
+ if key[-1] in cache:
+ # -- inlined lookup() --
+ result_list.append(str(cache[key[-1]][5]))
+ # -- end lookup () --
+ else:
+ result_list.append('.' + key[-1])
+ return ' '.join(result_list)
+
+ def _reset_cache(self):
+ # Possibly this should be a LRU cache. A dictionary from key_prefix to
+ # (cache_dict, history_vector) for parsed kndx files.
+ self._kndx_cache = {}
+ self._scope = self._get_scope()
+ allow_writes = self._allow_writes()
+ if allow_writes:
+ self._mode = 'w'
+ else:
+ self._mode = 'r'
+
+ def _sort_keys_by_io(self, keys, positions):
+ """Figure out an optimal order to read the records for the given keys.
+
+ Sort keys, grouped by index and sorted by position.
+
+ :param keys: A list of keys whose records we want to read. This will be
+ sorted 'in-place'.
+ :param positions: A dict, such as the one returned by
+ _get_components_positions()
+ :return: None
+ """
+ def get_sort_key(key):
+ index_memo = positions[key][1]
+ # Group by prefix and position. index_memo[0] is the key, so it is
+ # (file_id, revision_id) and we don't want to sort on revision_id,
+ # index_memo[1] is the position, and index_memo[2] is the size,
+ # which doesn't matter for the sort
+ return index_memo[0][:-1], index_memo[1]
+ return keys.sort(key=get_sort_key)
+
+ _get_total_build_size = _get_total_build_size
+
+ def _split_key(self, key):
+ """Split key into a prefix and suffix."""
+ return key[:-1], key[-1]
+
+
+class _KnitGraphIndex(object):
+ """A KnitVersionedFiles index layered on GraphIndex."""
+
+ def __init__(self, graph_index, is_locked, deltas=False, parents=True,
+ add_callback=None, track_external_parent_refs=False):
+ """Construct a KnitGraphIndex on a graph_index.
+
+ :param graph_index: An implementation of bzrlib.index.GraphIndex.
+ :param is_locked: A callback to check whether the object should answer
+ queries.
+ :param deltas: Allow delta-compressed records.
+ :param parents: If True, record knits parents, if not do not record
+ parents.
+ :param add_callback: If not None, allow additions to the index and call
+ this callback with a list of added GraphIndex nodes:
+ [(node, value, node_refs), ...]
+ :param is_locked: A callback, returns True if the index is locked and
+ thus usable.
+ :param track_external_parent_refs: If True, record all external parent
+ references parents from added records. These can be retrieved
+ later by calling get_missing_parents().
+ """
+ self._add_callback = add_callback
+ self._graph_index = graph_index
+ self._deltas = deltas
+ self._parents = parents
+ if deltas and not parents:
+ # XXX: TODO: Delta tree and parent graph should be conceptually
+ # separate.
+ raise KnitCorrupt(self, "Cannot do delta compression without "
+ "parent tracking.")
+ self.has_graph = parents
+ self._is_locked = is_locked
+ self._missing_compression_parents = set()
+ if track_external_parent_refs:
+ self._key_dependencies = _KeyRefs()
+ else:
+ self._key_dependencies = None
+
+ def __repr__(self):
+ return "%s(%r)" % (self.__class__.__name__, self._graph_index)
+
+ def add_records(self, records, random_id=False,
+ missing_compression_parents=False):
+ """Add multiple records to the index.
+
+ This function does not insert data into the Immutable GraphIndex
+ backing the KnitGraphIndex, instead it prepares data for insertion by
+ the caller and checks that it is safe to insert then calls
+ self._add_callback with the prepared GraphIndex nodes.
+
+ :param records: a list of tuples:
+ (key, options, access_memo, parents).
+ :param random_id: If True the ids being added were randomly generated
+ and no check for existence will be performed.
+ :param missing_compression_parents: If True the records being added are
+ only compressed against texts already in the index (or inside
+ records). If False the records all refer to unavailable texts (or
+ texts inside records) as compression parents.
+ """
+ if not self._add_callback:
+ raise errors.ReadOnlyError(self)
+ # we hope there are no repositories with inconsistent parentage
+ # anymore.
+
+ keys = {}
+ compression_parents = set()
+ key_dependencies = self._key_dependencies
+ for (key, options, access_memo, parents) in records:
+ if self._parents:
+ parents = tuple(parents)
+ if key_dependencies is not None:
+ key_dependencies.add_references(key, parents)
+ index, pos, size = access_memo
+ if 'no-eol' in options:
+ value = 'N'
+ else:
+ value = ' '
+ value += "%d %d" % (pos, size)
+ if not self._deltas:
+ if 'line-delta' in options:
+ raise KnitCorrupt(self, "attempt to add line-delta in non-delta knit")
+ if self._parents:
+ if self._deltas:
+ if 'line-delta' in options:
+ node_refs = (parents, (parents[0],))
+ if missing_compression_parents:
+ compression_parents.add(parents[0])
+ else:
+ node_refs = (parents, ())
+ else:
+ node_refs = (parents, )
+ else:
+ if parents:
+ raise KnitCorrupt(self, "attempt to add node with parents "
+ "in parentless index.")
+ node_refs = ()
+ keys[key] = (value, node_refs)
+ # check for dups
+ if not random_id:
+ present_nodes = self._get_entries(keys)
+ for (index, key, value, node_refs) in present_nodes:
+ parents = node_refs[:1]
+ # Sometimes these are passed as a list rather than a tuple
+ passed = static_tuple.as_tuples(keys[key])
+ passed_parents = passed[1][:1]
+ if (value[0] != keys[key][0][0] or
+ parents != passed_parents):
+ node_refs = static_tuple.as_tuples(node_refs)
+ raise KnitCorrupt(self, "inconsistent details in add_records"
+ ": %s %s" % ((value, node_refs), passed))
+ del keys[key]
+ result = []
+ if self._parents:
+ for key, (value, node_refs) in keys.iteritems():
+ result.append((key, value, node_refs))
+ else:
+ for key, (value, node_refs) in keys.iteritems():
+ result.append((key, value))
+ self._add_callback(result)
+ if missing_compression_parents:
+ # This may appear to be incorrect (it does not check for
+ # compression parents that are in the existing graph index),
+ # but such records won't have been buffered, so this is
+ # actually correct: every entry when
+ # missing_compression_parents==True either has a missing parent, or
+ # a parent that is one of the keys in records.
+ compression_parents.difference_update(keys)
+ self._missing_compression_parents.update(compression_parents)
+ # Adding records may have satisfied missing compression parents.
+ self._missing_compression_parents.difference_update(keys)
+
+ def scan_unvalidated_index(self, graph_index):
+ """Inform this _KnitGraphIndex that there is an unvalidated index.
+
+ This allows this _KnitGraphIndex to keep track of any missing
+ compression parents we may want to have filled in to make those
+ indices valid.
+
+ :param graph_index: A GraphIndex
+ """
+ if self._deltas:
+ new_missing = graph_index.external_references(ref_list_num=1)
+ new_missing.difference_update(self.get_parent_map(new_missing))
+ self._missing_compression_parents.update(new_missing)
+ if self._key_dependencies is not None:
+ # Add parent refs from graph_index (and discard parent refs that
+ # the graph_index has).
+ for node in graph_index.iter_all_entries():
+ self._key_dependencies.add_references(node[1], node[3][0])
+
+ def get_missing_compression_parents(self):
+ """Return the keys of missing compression parents.
+
+ Missing compression parents occur when a record stream was missing
+ basis texts, or a index was scanned that had missing basis texts.
+ """
+ return frozenset(self._missing_compression_parents)
+
+ def get_missing_parents(self):
+ """Return the keys of missing parents."""
+ # If updating this, you should also update
+ # groupcompress._GCGraphIndex.get_missing_parents
+ # We may have false positives, so filter those out.
+ self._key_dependencies.satisfy_refs_for_keys(
+ self.get_parent_map(self._key_dependencies.get_unsatisfied_refs()))
+ return frozenset(self._key_dependencies.get_unsatisfied_refs())
+
+ def _check_read(self):
+ """raise if reads are not permitted."""
+ if not self._is_locked():
+ raise errors.ObjectNotLocked(self)
+
+ def _check_write_ok(self):
+ """Assert if writes are not permitted."""
+ if not self._is_locked():
+ raise errors.ObjectNotLocked(self)
+
+ def _compression_parent(self, an_entry):
+ # return the key that an_entry is compressed against, or None
+ # Grab the second parent list (as deltas implies parents currently)
+ compression_parents = an_entry[3][1]
+ if not compression_parents:
+ return None
+ if len(compression_parents) != 1:
+ raise AssertionError(
+ "Too many compression parents: %r" % compression_parents)
+ return compression_parents[0]
+
+ def get_build_details(self, keys):
+ """Get the method, index_memo and compression parent for version_ids.
+
+ Ghosts are omitted from the result.
+
+ :param keys: An iterable of keys.
+ :return: A dict of key:
+ (index_memo, compression_parent, parents, record_details).
+ index_memo
+ opaque structure to pass to read_records to extract the raw
+ data
+ compression_parent
+ Content that this record is built upon, may be None
+ parents
+ Logical parents of this node
+ record_details
+ extra information about the content which needs to be passed to
+ Factory.parse_record
+ """
+ self._check_read()
+ result = {}
+ entries = self._get_entries(keys, False)
+ for entry in entries:
+ key = entry[1]
+ if not self._parents:
+ parents = ()
+ else:
+ parents = entry[3][0]
+ if not self._deltas:
+ compression_parent_key = None
+ else:
+ compression_parent_key = self._compression_parent(entry)
+ noeol = (entry[2][0] == 'N')
+ if compression_parent_key:
+ method = 'line-delta'
+ else:
+ method = 'fulltext'
+ result[key] = (self._node_to_position(entry),
+ compression_parent_key, parents,
+ (method, noeol))
+ return result
+
+ def _get_entries(self, keys, check_present=False):
+ """Get the entries for keys.
+
+ :param keys: An iterable of index key tuples.
+ """
+ keys = set(keys)
+ found_keys = set()
+ if self._parents:
+ for node in self._graph_index.iter_entries(keys):
+ yield node
+ found_keys.add(node[1])
+ else:
+ # adapt parentless index to the rest of the code.
+ for node in self._graph_index.iter_entries(keys):
+ yield node[0], node[1], node[2], ()
+ found_keys.add(node[1])
+ if check_present:
+ missing_keys = keys.difference(found_keys)
+ if missing_keys:
+ raise RevisionNotPresent(missing_keys.pop(), self)
+
+ def get_method(self, key):
+ """Return compression method of specified key."""
+ return self._get_method(self._get_node(key))
+
+ def _get_method(self, node):
+ if not self._deltas:
+ return 'fulltext'
+ if self._compression_parent(node):
+ return 'line-delta'
+ else:
+ return 'fulltext'
+
+ def _get_node(self, key):
+ try:
+ return list(self._get_entries([key]))[0]
+ except IndexError:
+ raise RevisionNotPresent(key, self)
+
+ def get_options(self, key):
+ """Return a list representing options.
+
+ e.g. ['foo', 'bar']
+ """
+ node = self._get_node(key)
+ options = [self._get_method(node)]
+ if node[2][0] == 'N':
+ options.append('no-eol')
+ return options
+
+ def find_ancestry(self, keys):
+ """See CombinedGraphIndex.find_ancestry()"""
+ return self._graph_index.find_ancestry(keys, 0)
+
+ def get_parent_map(self, keys):
+ """Get a map of the parents of keys.
+
+ :param keys: The keys to look up parents for.
+ :return: A mapping from keys to parents. Absent keys are absent from
+ the mapping.
+ """
+ self._check_read()
+ nodes = self._get_entries(keys)
+ result = {}
+ if self._parents:
+ for node in nodes:
+ result[node[1]] = node[3][0]
+ else:
+ for node in nodes:
+ result[node[1]] = None
+ return result
+
+ def get_position(self, key):
+ """Return details needed to access the version.
+
+ :return: a tuple (index, data position, size) to hand to the access
+ logic to get the record.
+ """
+ node = self._get_node(key)
+ return self._node_to_position(node)
+
+ has_key = _mod_index._has_key_from_parent_map
+
+ def keys(self):
+ """Get all the keys in the collection.
+
+ The keys are not ordered.
+ """
+ self._check_read()
+ return [node[1] for node in self._graph_index.iter_all_entries()]
+
+ missing_keys = _mod_index._missing_keys_from_parent_map
+
+ def _node_to_position(self, node):
+ """Convert an index value to position details."""
+ bits = node[2][1:].split(' ')
+ return node[0], int(bits[0]), int(bits[1])
+
+ def _sort_keys_by_io(self, keys, positions):
+ """Figure out an optimal order to read the records for the given keys.
+
+ Sort keys, grouped by index and sorted by position.
+
+ :param keys: A list of keys whose records we want to read. This will be
+ sorted 'in-place'.
+ :param positions: A dict, such as the one returned by
+ _get_components_positions()
+ :return: None
+ """
+ def get_index_memo(key):
+ # index_memo is at offset [1]. It is made up of (GraphIndex,
+ # position, size). GI is an object, which will be unique for each
+ # pack file. This causes us to group by pack file, then sort by
+ # position. Size doesn't matter, but it isn't worth breaking up the
+ # tuple.
+ return positions[key][1]
+ return keys.sort(key=get_index_memo)
+
+ _get_total_build_size = _get_total_build_size
+
+
+class _KnitKeyAccess(object):
+ """Access to records in .knit files."""
+
+ def __init__(self, transport, mapper):
+ """Create a _KnitKeyAccess with transport and mapper.
+
+ :param transport: The transport the access object is rooted at.
+ :param mapper: The mapper used to map keys to .knit files.
+ """
+ self._transport = transport
+ self._mapper = mapper
+
+ def add_raw_records(self, key_sizes, raw_data):
+ """Add raw knit bytes to a storage area.
+
+ The data is spooled to the container writer in one bytes-record per
+ raw data item.
+
+ :param sizes: An iterable of tuples containing the key and size of each
+ raw data segment.
+ :param raw_data: A bytestring containing the data.
+ :return: A list of memos to retrieve the record later. Each memo is an
+ opaque index memo. For _KnitKeyAccess the memo is (key, pos,
+ length), where the key is the record key.
+ """
+ if type(raw_data) is not str:
+ raise AssertionError(
+ 'data must be plain bytes was %s' % type(raw_data))
+ result = []
+ offset = 0
+ # TODO: This can be tuned for writing to sftp and other servers where
+ # append() is relatively expensive by grouping the writes to each key
+ # prefix.
+ for key, size in key_sizes:
+ path = self._mapper.map(key)
+ try:
+ base = self._transport.append_bytes(path + '.knit',
+ raw_data[offset:offset+size])
+ except errors.NoSuchFile:
+ self._transport.mkdir(osutils.dirname(path))
+ base = self._transport.append_bytes(path + '.knit',
+ raw_data[offset:offset+size])
+ # if base == 0:
+ # chmod.
+ offset += size
+ result.append((key, base, size))
+ return result
+
+ def flush(self):
+ """Flush pending writes on this access object.
+
+ For .knit files this is a no-op.
+ """
+ pass
+
+ def get_raw_records(self, memos_for_retrieval):
+ """Get the raw bytes for a records.
+
+ :param memos_for_retrieval: An iterable containing the access memo for
+ retrieving the bytes.
+ :return: An iterator over the bytes of the records.
+ """
+ # first pass, group into same-index request to minimise readv's issued.
+ request_lists = []
+ current_prefix = None
+ for (key, offset, length) in memos_for_retrieval:
+ if current_prefix == key[:-1]:
+ current_list.append((offset, length))
+ else:
+ if current_prefix is not None:
+ request_lists.append((current_prefix, current_list))
+ current_prefix = key[:-1]
+ current_list = [(offset, length)]
+ # handle the last entry
+ if current_prefix is not None:
+ request_lists.append((current_prefix, current_list))
+ for prefix, read_vector in request_lists:
+ path = self._mapper.map(prefix) + '.knit'
+ for pos, data in self._transport.readv(path, read_vector):
+ yield data
+
+
+def annotate_knit(knit, revision_id):
+ """Annotate a knit with no cached annotations.
+
+ This implementation is for knits with no cached annotations.
+ It will work for knits with cached annotations, but this is not
+ recommended.
+ """
+ annotator = _KnitAnnotator(knit)
+ return iter(annotator.annotate_flat(revision_id))
+
+
+class _KnitAnnotator(annotate.Annotator):
+ """Build up the annotations for a text."""
+
+ def __init__(self, vf):
+ annotate.Annotator.__init__(self, vf)
+
+ # TODO: handle Nodes which cannot be extracted
+ # self._ghosts = set()
+
+ # Map from (key, parent_key) => matching_blocks, should be 'use once'
+ self._matching_blocks = {}
+
+ # KnitContent objects
+ self._content_objects = {}
+ # The number of children that depend on this fulltext content object
+ self._num_compression_children = {}
+ # Delta records that need their compression parent before they can be
+ # expanded
+ self._pending_deltas = {}
+ # Fulltext records that are waiting for their parents fulltexts before
+ # they can be yielded for annotation
+ self._pending_annotation = {}
+
+ self._all_build_details = {}
+
+ def _get_build_graph(self, key):
+ """Get the graphs for building texts and annotations.
+
+ The data you need for creating a full text may be different than the
+ data you need to annotate that text. (At a minimum, you need both
+ parents to create an annotation, but only need 1 parent to generate the
+ fulltext.)
+
+ :return: A list of (key, index_memo) records, suitable for
+ passing to read_records_iter to start reading in the raw data from
+ the pack file.
+ """
+ pending = set([key])
+ records = []
+ ann_keys = set()
+ self._num_needed_children[key] = 1
+ while pending:
+ # get all pending nodes
+ this_iteration = pending
+ build_details = self._vf._index.get_build_details(this_iteration)
+ self._all_build_details.update(build_details)
+ # new_nodes = self._vf._index._get_entries(this_iteration)
+ pending = set()
+ for key, details in build_details.iteritems():
+ (index_memo, compression_parent, parent_keys,
+ record_details) = details
+ self._parent_map[key] = parent_keys
+ self._heads_provider = None
+ records.append((key, index_memo))
+ # Do we actually need to check _annotated_lines?
+ pending.update([p for p in parent_keys
+ if p not in self._all_build_details])
+ if parent_keys:
+ for parent_key in parent_keys:
+ if parent_key in self._num_needed_children:
+ self._num_needed_children[parent_key] += 1
+ else:
+ self._num_needed_children[parent_key] = 1
+ if compression_parent:
+ if compression_parent in self._num_compression_children:
+ self._num_compression_children[compression_parent] += 1
+ else:
+ self._num_compression_children[compression_parent] = 1
+
+ missing_versions = this_iteration.difference(build_details.keys())
+ if missing_versions:
+ for key in missing_versions:
+ if key in self._parent_map and key in self._text_cache:
+ # We already have this text ready, we just need to
+ # yield it later so we get it annotated
+ ann_keys.add(key)
+ parent_keys = self._parent_map[key]
+ for parent_key in parent_keys:
+ if parent_key in self._num_needed_children:
+ self._num_needed_children[parent_key] += 1
+ else:
+ self._num_needed_children[parent_key] = 1
+ pending.update([p for p in parent_keys
+ if p not in self._all_build_details])
+ else:
+ raise errors.RevisionNotPresent(key, self._vf)
+ # Generally we will want to read the records in reverse order, because
+ # we find the parent nodes after the children
+ records.reverse()
+ return records, ann_keys
+
+ def _get_needed_texts(self, key, pb=None):
+ # if True or len(self._vf._immediate_fallback_vfs) > 0:
+ if len(self._vf._immediate_fallback_vfs) > 0:
+ # If we have fallbacks, go to the generic path
+ for v in annotate.Annotator._get_needed_texts(self, key, pb=pb):
+ yield v
+ return
+ while True:
+ try:
+ records, ann_keys = self._get_build_graph(key)
+ for idx, (sub_key, text, num_lines) in enumerate(
+ self._extract_texts(records)):
+ if pb is not None:
+ pb.update(gettext('annotating'), idx, len(records))
+ yield sub_key, text, num_lines
+ for sub_key in ann_keys:
+ text = self._text_cache[sub_key]
+ num_lines = len(text) # bad assumption
+ yield sub_key, text, num_lines
+ return
+ except errors.RetryWithNewPacks, e:
+ self._vf._access.reload_or_raise(e)
+ # The cached build_details are no longer valid
+ self._all_build_details.clear()
+
+ def _cache_delta_blocks(self, key, compression_parent, delta, lines):
+ parent_lines = self._text_cache[compression_parent]
+ blocks = list(KnitContent.get_line_delta_blocks(delta, parent_lines, lines))
+ self._matching_blocks[(key, compression_parent)] = blocks
+
+ def _expand_record(self, key, parent_keys, compression_parent, record,
+ record_details):
+ delta = None
+ if compression_parent:
+ if compression_parent not in self._content_objects:
+ # Waiting for the parent
+ self._pending_deltas.setdefault(compression_parent, []).append(
+ (key, parent_keys, record, record_details))
+ return None
+ # We have the basis parent, so expand the delta
+ num = self._num_compression_children[compression_parent]
+ num -= 1
+ if num == 0:
+ base_content = self._content_objects.pop(compression_parent)
+ self._num_compression_children.pop(compression_parent)
+ else:
+ self._num_compression_children[compression_parent] = num
+ base_content = self._content_objects[compression_parent]
+ # It is tempting to want to copy_base_content=False for the last
+ # child object. However, whenever noeol=False,
+ # self._text_cache[parent_key] is content._lines. So mutating it
+ # gives very bad results.
+ # The alternative is to copy the lines into text cache, but then we
+ # are copying anyway, so just do it here.
+ content, delta = self._vf._factory.parse_record(
+ key, record, record_details, base_content,
+ copy_base_content=True)
+ else:
+ # Fulltext record
+ content, _ = self._vf._factory.parse_record(
+ key, record, record_details, None)
+ if self._num_compression_children.get(key, 0) > 0:
+ self._content_objects[key] = content
+ lines = content.text()
+ self._text_cache[key] = lines
+ if delta is not None:
+ self._cache_delta_blocks(key, compression_parent, delta, lines)
+ return lines
+
+ def _get_parent_annotations_and_matches(self, key, text, parent_key):
+ """Get the list of annotations for the parent, and the matching lines.
+
+ :param text: The opaque value given by _get_needed_texts
+ :param parent_key: The key for the parent text
+ :return: (parent_annotations, matching_blocks)
+ parent_annotations is a list as long as the number of lines in
+ parent
+ matching_blocks is a list of (parent_idx, text_idx, len) tuples
+ indicating which lines match between the two texts
+ """
+ block_key = (key, parent_key)
+ if block_key in self._matching_blocks:
+ blocks = self._matching_blocks.pop(block_key)
+ parent_annotations = self._annotations_cache[parent_key]
+ return parent_annotations, blocks
+ return annotate.Annotator._get_parent_annotations_and_matches(self,
+ key, text, parent_key)
+
+ def _process_pending(self, key):
+ """The content for 'key' was just processed.
+
+ Determine if there is any more pending work to be processed.
+ """
+ to_return = []
+ if key in self._pending_deltas:
+ compression_parent = key
+ children = self._pending_deltas.pop(key)
+ for child_key, parent_keys, record, record_details in children:
+ lines = self._expand_record(child_key, parent_keys,
+ compression_parent,
+ record, record_details)
+ if self._check_ready_for_annotations(child_key, parent_keys):
+ to_return.append(child_key)
+ # Also check any children that are waiting for this parent to be
+ # annotation ready
+ if key in self._pending_annotation:
+ children = self._pending_annotation.pop(key)
+ to_return.extend([c for c, p_keys in children
+ if self._check_ready_for_annotations(c, p_keys)])
+ return to_return
+
+ def _check_ready_for_annotations(self, key, parent_keys):
+ """return true if this text is ready to be yielded.
+
+ Otherwise, this will return False, and queue the text into
+ self._pending_annotation
+ """
+ for parent_key in parent_keys:
+ if parent_key not in self._annotations_cache:
+ # still waiting on at least one parent text, so queue it up
+ # Note that if there are multiple parents, we need to wait
+ # for all of them.
+ self._pending_annotation.setdefault(parent_key,
+ []).append((key, parent_keys))
+ return False
+ return True
+
+ def _extract_texts(self, records):
+ """Extract the various texts needed based on records"""
+ # We iterate in the order read, rather than a strict order requested
+ # However, process what we can, and put off to the side things that
+ # still need parents, cleaning them up when those parents are
+ # processed.
+ # Basic data flow:
+ # 1) As 'records' are read, see if we can expand these records into
+ # Content objects (and thus lines)
+ # 2) If a given line-delta is waiting on its compression parent, it
+ # gets queued up into self._pending_deltas, otherwise we expand
+ # it, and put it into self._text_cache and self._content_objects
+ # 3) If we expanded the text, we will then check to see if all
+ # parents have also been processed. If so, this text gets yielded,
+ # else this record gets set aside into pending_annotation
+ # 4) Further, if we expanded the text in (2), we will then check to
+ # see if there are any children in self._pending_deltas waiting to
+ # also be processed. If so, we go back to (2) for those
+ # 5) Further again, if we yielded the text, we can then check if that
+ # 'unlocks' any of the texts in pending_annotations, which should
+ # then get yielded as well
+ # Note that both steps 4 and 5 are 'recursive' in that unlocking one
+ # compression child could unlock yet another, and yielding a fulltext
+ # will also 'unlock' the children that are waiting on that annotation.
+ # (Though also, unlocking 1 parent's fulltext, does not unlock a child
+ # if other parents are also waiting.)
+ # We want to yield content before expanding child content objects, so
+ # that we know when we can re-use the content lines, and the annotation
+ # code can know when it can stop caching fulltexts, as well.
+
+ # Children that are missing their compression parent
+ pending_deltas = {}
+ for (key, record, digest) in self._vf._read_records_iter(records):
+ # ghosts?
+ details = self._all_build_details[key]
+ (_, compression_parent, parent_keys, record_details) = details
+ lines = self._expand_record(key, parent_keys, compression_parent,
+ record, record_details)
+ if lines is None:
+ # Pending delta should be queued up
+ continue
+ # At this point, we may be able to yield this content, if all
+ # parents are also finished
+ yield_this_text = self._check_ready_for_annotations(key,
+ parent_keys)
+ if yield_this_text:
+ # All parents present
+ yield key, lines, len(lines)
+ to_process = self._process_pending(key)
+ while to_process:
+ this_process = to_process
+ to_process = []
+ for key in this_process:
+ lines = self._text_cache[key]
+ yield key, lines, len(lines)
+ to_process.extend(self._process_pending(key))
+
+try:
+ from bzrlib._knit_load_data_pyx import _load_data_c as _load_data
+except ImportError, e:
+ osutils.failed_to_load_extension(e)
+ from bzrlib._knit_load_data_py import _load_data_py as _load_data
diff --git a/bzrlib/lazy_import.py b/bzrlib/lazy_import.py
new file mode 100644
index 0000000..55c9f86
--- /dev/null
+++ b/bzrlib/lazy_import.py
@@ -0,0 +1,410 @@
+# Copyright (C) 2006-2010 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""Functionality to create lazy evaluation objects.
+
+This includes waiting to import a module until it is actually used.
+
+Most commonly, the 'lazy_import' function is used to import other modules
+in an on-demand fashion. Typically use looks like::
+
+ from bzrlib.lazy_import import lazy_import
+ lazy_import(globals(), '''
+ from bzrlib import (
+ errors,
+ osutils,
+ branch,
+ )
+ import bzrlib.branch
+ ''')
+
+Then 'errors, osutils, branch' and 'bzrlib' will exist as lazy-loaded
+objects which will be replaced with a real object on first use.
+
+In general, it is best to only load modules in this way. This is because
+it isn't safe to pass these variables to other functions before they
+have been replaced. This is especially true for constants, sometimes
+true for classes or functions (when used as a factory, or you want
+to inherit from them).
+"""
+
+from __future__ import absolute_import
+
+
+class ScopeReplacer(object):
+ """A lazy object that will replace itself in the appropriate scope.
+
+ This object sits, ready to create the real object the first time it is
+ needed.
+ """
+
+ __slots__ = ('_scope', '_factory', '_name', '_real_obj')
+
+ # If you to do x = y, setting this to False will disallow access to
+ # members from the second variable (i.e. x). This should normally
+ # be enabled for reasons of thread safety and documentation, but
+ # will be disabled during the selftest command to check for abuse.
+ _should_proxy = True
+
+ def __init__(self, scope, factory, name):
+ """Create a temporary object in the specified scope.
+ Once used, a real object will be placed in the scope.
+
+ :param scope: The scope the object should appear in
+ :param factory: A callable that will create the real object.
+ It will be passed (self, scope, name)
+ :param name: The variable name in the given scope.
+ """
+ object.__setattr__(self, '_scope', scope)
+ object.__setattr__(self, '_factory', factory)
+ object.__setattr__(self, '_name', name)
+ object.__setattr__(self, '_real_obj', None)
+ scope[name] = self
+
+ def _resolve(self):
+ """Return the real object for which this is a placeholder"""
+ name = object.__getattribute__(self, '_name')
+ real_obj = object.__getattribute__(self, '_real_obj')
+ if real_obj is None:
+ # No obj generated previously, so generate from factory and scope.
+ factory = object.__getattribute__(self, '_factory')
+ scope = object.__getattribute__(self, '_scope')
+ obj = factory(self, scope, name)
+ if obj is self:
+ raise errors.IllegalUseOfScopeReplacer(name, msg="Object tried"
+ " to replace itself, check it's not using its own scope.")
+
+ # Check if another thread has jumped in while obj was generated.
+ real_obj = object.__getattribute__(self, '_real_obj')
+ if real_obj is None:
+ # Still no prexisting obj, so go ahead and assign to scope and
+ # return. There is still a small window here where races will
+ # not be detected, but safest to avoid additional locking.
+ object.__setattr__(self, '_real_obj', obj)
+ scope[name] = obj
+ return obj
+
+ # Raise if proxying is disabled as obj has already been generated.
+ if not ScopeReplacer._should_proxy:
+ raise errors.IllegalUseOfScopeReplacer(
+ name, msg="Object already replaced, did you assign it"
+ " to another variable?")
+ return real_obj
+
+ def __getattribute__(self, attr):
+ obj = object.__getattribute__(self, '_resolve')()
+ return getattr(obj, attr)
+
+ def __setattr__(self, attr, value):
+ obj = object.__getattribute__(self, '_resolve')()
+ return setattr(obj, attr, value)
+
+ def __call__(self, *args, **kwargs):
+ obj = object.__getattribute__(self, '_resolve')()
+ return obj(*args, **kwargs)
+
+
+def disallow_proxying():
+ """Disallow lazily imported modules to be used as proxies.
+
+ Calling this function might cause problems with concurrent imports
+ in multithreaded environments, but will help detecting wasteful
+ indirection, so it should be called when executing unit tests.
+
+ Only lazy imports that happen after this call are affected.
+ """
+ ScopeReplacer._should_proxy = False
+
+
+class ImportReplacer(ScopeReplacer):
+ """This is designed to replace only a portion of an import list.
+
+ It will replace itself with a module, and then make children
+ entries also ImportReplacer objects.
+
+ At present, this only supports 'import foo.bar.baz' syntax.
+ """
+
+ # '_import_replacer_children' is intentionally a long semi-unique name
+ # that won't likely exist elsewhere. This allows us to detect an
+ # ImportReplacer object by using
+ # object.__getattribute__(obj, '_import_replacer_children')
+ # We can't just use 'isinstance(obj, ImportReplacer)', because that
+ # accesses .__class__, which goes through __getattribute__, and triggers
+ # the replacement.
+ __slots__ = ('_import_replacer_children', '_member', '_module_path')
+
+ def __init__(self, scope, name, module_path, member=None, children={}):
+ """Upon request import 'module_path' as the name 'module_name'.
+ When imported, prepare children to also be imported.
+
+ :param scope: The scope that objects should be imported into.
+ Typically this is globals()
+ :param name: The variable name. Often this is the same as the
+ module_path. 'bzrlib'
+ :param module_path: A list for the fully specified module path
+ ['bzrlib', 'foo', 'bar']
+ :param member: The member inside the module to import, often this is
+ None, indicating the module is being imported.
+ :param children: Children entries to be imported later.
+ This should be a map of children specifications.
+ ::
+
+ {'foo':(['bzrlib', 'foo'], None,
+ {'bar':(['bzrlib', 'foo', 'bar'], None {})})
+ }
+
+ Examples::
+
+ import foo => name='foo' module_path='foo',
+ member=None, children={}
+ import foo.bar => name='foo' module_path='foo', member=None,
+ children={'bar':(['foo', 'bar'], None, {}}
+ from foo import bar => name='bar' module_path='foo', member='bar'
+ children={}
+ from foo import bar, baz would get translated into 2 import
+ requests. On for 'name=bar' and one for 'name=baz'
+ """
+ if (member is not None) and children:
+ raise ValueError('Cannot supply both a member and children')
+
+ object.__setattr__(self, '_import_replacer_children', children)
+ object.__setattr__(self, '_member', member)
+ object.__setattr__(self, '_module_path', module_path)
+
+ # Indirecting through __class__ so that children can
+ # override _import (especially our instrumented version)
+ cls = object.__getattribute__(self, '__class__')
+ ScopeReplacer.__init__(self, scope=scope, name=name,
+ factory=cls._import)
+
+ def _import(self, scope, name):
+ children = object.__getattribute__(self, '_import_replacer_children')
+ member = object.__getattribute__(self, '_member')
+ module_path = object.__getattribute__(self, '_module_path')
+ module_python_path = '.'.join(module_path)
+ if member is not None:
+ module = __import__(module_python_path, scope, scope, [member], level=0)
+ return getattr(module, member)
+ else:
+ module = __import__(module_python_path, scope, scope, [], level=0)
+ for path in module_path[1:]:
+ module = getattr(module, path)
+
+ # Prepare the children to be imported
+ for child_name, (child_path, child_member, grandchildren) in \
+ children.iteritems():
+ # Using self.__class__, so that children get children classes
+ # instantiated. (This helps with instrumented tests)
+ cls = object.__getattribute__(self, '__class__')
+ cls(module.__dict__, name=child_name,
+ module_path=child_path, member=child_member,
+ children=grandchildren)
+ return module
+
+
+class ImportProcessor(object):
+ """Convert text that users input into lazy import requests"""
+
+ # TODO: jam 20060912 This class is probably not strict enough about
+ # what type of text it allows. For example, you can do:
+ # import (foo, bar), which is not allowed by python.
+ # For now, it should be supporting a superset of python import
+ # syntax which is all we really care about.
+
+ __slots__ = ['imports', '_lazy_import_class']
+
+ def __init__(self, lazy_import_class=None):
+ self.imports = {}
+ if lazy_import_class is None:
+ self._lazy_import_class = ImportReplacer
+ else:
+ self._lazy_import_class = lazy_import_class
+
+ def lazy_import(self, scope, text):
+ """Convert the given text into a bunch of lazy import objects.
+
+ This takes a text string, which should be similar to normal python
+ import markup.
+ """
+ self._build_map(text)
+ self._convert_imports(scope)
+
+ def _convert_imports(self, scope):
+ # Now convert the map into a set of imports
+ for name, info in self.imports.iteritems():
+ self._lazy_import_class(scope, name=name, module_path=info[0],
+ member=info[1], children=info[2])
+
+ def _build_map(self, text):
+ """Take a string describing imports, and build up the internal map"""
+ for line in self._canonicalize_import_text(text):
+ if line.startswith('import '):
+ self._convert_import_str(line)
+ elif line.startswith('from '):
+ self._convert_from_str(line)
+ else:
+ raise errors.InvalidImportLine(line,
+ "doesn't start with 'import ' or 'from '")
+
+ def _convert_import_str(self, import_str):
+ """This converts a import string into an import map.
+
+ This only understands 'import foo, foo.bar, foo.bar.baz as bing'
+
+ :param import_str: The import string to process
+ """
+ if not import_str.startswith('import '):
+ raise ValueError('bad import string %r' % (import_str,))
+ import_str = import_str[len('import '):]
+
+ for path in import_str.split(','):
+ path = path.strip()
+ if not path:
+ continue
+ as_hunks = path.split(' as ')
+ if len(as_hunks) == 2:
+ # We have 'as' so this is a different style of import
+ # 'import foo.bar.baz as bing' creates a local variable
+ # named 'bing' which points to 'foo.bar.baz'
+ name = as_hunks[1].strip()
+ module_path = as_hunks[0].strip().split('.')
+ if name in self.imports:
+ raise errors.ImportNameCollision(name)
+ # No children available in 'import foo as bar'
+ self.imports[name] = (module_path, None, {})
+ else:
+ # Now we need to handle
+ module_path = path.split('.')
+ name = module_path[0]
+ if name not in self.imports:
+ # This is a new import that we haven't seen before
+ module_def = ([name], None, {})
+ self.imports[name] = module_def
+ else:
+ module_def = self.imports[name]
+
+ cur_path = [name]
+ cur = module_def[2]
+ for child in module_path[1:]:
+ cur_path.append(child)
+ if child in cur:
+ cur = cur[child][2]
+ else:
+ next = (cur_path[:], None, {})
+ cur[child] = next
+ cur = next[2]
+
+ def _convert_from_str(self, from_str):
+ """This converts a 'from foo import bar' string into an import map.
+
+ :param from_str: The import string to process
+ """
+ if not from_str.startswith('from '):
+ raise ValueError('bad from/import %r' % from_str)
+ from_str = from_str[len('from '):]
+
+ from_module, import_list = from_str.split(' import ')
+
+ from_module_path = from_module.split('.')
+
+ for path in import_list.split(','):
+ path = path.strip()
+ if not path:
+ continue
+ as_hunks = path.split(' as ')
+ if len(as_hunks) == 2:
+ # We have 'as' so this is a different style of import
+ # 'import foo.bar.baz as bing' creates a local variable
+ # named 'bing' which points to 'foo.bar.baz'
+ name = as_hunks[1].strip()
+ module = as_hunks[0].strip()
+ else:
+ name = module = path
+ if name in self.imports:
+ raise errors.ImportNameCollision(name)
+ self.imports[name] = (from_module_path, module, {})
+
+ def _canonicalize_import_text(self, text):
+ """Take a list of imports, and split it into regularized form.
+
+ This is meant to take regular import text, and convert it to
+ the forms that the rest of the converters prefer.
+ """
+ out = []
+ cur = None
+ continuing = False
+
+ for line in text.split('\n'):
+ line = line.strip()
+ loc = line.find('#')
+ if loc != -1:
+ line = line[:loc].strip()
+
+ if not line:
+ continue
+ if cur is not None:
+ if line.endswith(')'):
+ out.append(cur + ' ' + line[:-1])
+ cur = None
+ else:
+ cur += ' ' + line
+ else:
+ if '(' in line and ')' not in line:
+ cur = line.replace('(', '')
+ else:
+ out.append(line.replace('(', '').replace(')', ''))
+ if cur is not None:
+ raise errors.InvalidImportLine(cur, 'Unmatched parenthesis')
+ return out
+
+
+def lazy_import(scope, text, lazy_import_class=None):
+ """Create lazy imports for all of the imports in text.
+
+ This is typically used as something like::
+
+ from bzrlib.lazy_import import lazy_import
+ lazy_import(globals(), '''
+ from bzrlib import (
+ foo,
+ bar,
+ baz,
+ )
+ import bzrlib.branch
+ import bzrlib.transport
+ ''')
+
+ Then 'foo, bar, baz' and 'bzrlib' will exist as lazy-loaded
+ objects which will be replaced with a real object on first use.
+
+ In general, it is best to only load modules in this way. This is
+ because other objects (functions/classes/variables) are frequently
+ used without accessing a member, which means we cannot tell they
+ have been used.
+ """
+ # This is just a helper around ImportProcessor.lazy_import
+ proc = ImportProcessor(lazy_import_class=lazy_import_class)
+ return proc.lazy_import(scope, text)
+
+
+# The only module that this module depends on is 'bzrlib.errors'. But it
+# can actually be imported lazily, since we only need it if there is a
+# problem.
+
+lazy_import(globals(), """
+from bzrlib import errors
+""")
diff --git a/bzrlib/lazy_regex.py b/bzrlib/lazy_regex.py
new file mode 100644
index 0000000..afd450a
--- /dev/null
+++ b/bzrlib/lazy_regex.py
@@ -0,0 +1,133 @@
+# Copyright (C) 2006 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""Lazily compiled regex objects.
+
+This module defines a class which creates proxy objects for regex
+compilation. This allows overriding re.compile() to return lazily compiled
+objects.
+
+We do this rather than just providing a new interface so that it will also
+be used by existing Python modules that create regexs.
+"""
+
+from __future__ import absolute_import
+
+import re
+
+from bzrlib import errors
+
+
+class LazyRegex(object):
+ """A proxy around a real regex, which won't be compiled until accessed."""
+
+
+ # These are the parameters on a real _sre.SRE_Pattern object, which we
+ # will map to local members so that we don't have the proxy overhead.
+ _regex_attributes_to_copy = [
+ '__copy__', '__deepcopy__', 'findall', 'finditer', 'match',
+ 'scanner', 'search', 'split', 'sub', 'subn'
+ ]
+
+ # We use slots to keep the overhead low. But we need a slot entry for
+ # all of the attributes we will copy
+ __slots__ = ['_real_regex', '_regex_args', '_regex_kwargs',
+ ] + _regex_attributes_to_copy
+
+ def __init__(self, args=(), kwargs={}):
+ """Create a new proxy object, passing in the args to pass to re.compile
+
+ :param args: The `*args` to pass to re.compile
+ :param kwargs: The `**kwargs` to pass to re.compile
+ """
+ self._real_regex = None
+ self._regex_args = args
+ self._regex_kwargs = kwargs
+
+ def _compile_and_collapse(self):
+ """Actually compile the requested regex"""
+ self._real_regex = self._real_re_compile(*self._regex_args,
+ **self._regex_kwargs)
+ for attr in self._regex_attributes_to_copy:
+ setattr(self, attr, getattr(self._real_regex, attr))
+
+ def _real_re_compile(self, *args, **kwargs):
+ """Thunk over to the original re.compile"""
+ try:
+ return _real_re_compile(*args, **kwargs)
+ except re.error, e:
+ # raise InvalidPattern instead of re.error as this gives a
+ # cleaner message to the user.
+ raise errors.InvalidPattern('"' + args[0] + '" ' +str(e))
+
+ def __getstate__(self):
+ """Return the state to use when pickling."""
+ return {
+ "args": self._regex_args,
+ "kwargs": self._regex_kwargs,
+ }
+
+ def __setstate__(self, dict):
+ """Restore from a pickled state."""
+ self._real_regex = None
+ setattr(self, "_regex_args", dict["args"])
+ setattr(self, "_regex_kwargs", dict["kwargs"])
+
+ def __getattr__(self, attr):
+ """Return a member from the proxied regex object.
+
+ If the regex hasn't been compiled yet, compile it
+ """
+ if self._real_regex is None:
+ self._compile_and_collapse()
+ # Once we have compiled, the only time we should come here
+ # is actually if the attribute is missing.
+ return getattr(self._real_regex, attr)
+
+
+def lazy_compile(*args, **kwargs):
+ """Create a proxy object which will compile the regex on demand.
+
+ :return: a LazyRegex proxy object.
+ """
+ return LazyRegex(args, kwargs)
+
+
+def install_lazy_compile():
+ """Make lazy_compile the default compile mode for regex compilation.
+
+ This overrides re.compile with lazy_compile. To restore the original
+ functionality, call reset_compile().
+ """
+ re.compile = lazy_compile
+
+
+def reset_compile():
+ """Restore the original function to re.compile().
+
+ It is safe to call reset_compile() multiple times, it will always
+ restore re.compile() to the value that existed at import time.
+ Though the first call will reset back to the original (it doesn't
+ track nesting level)
+ """
+ re.compile = _real_re_compile
+
+
+_real_re_compile = re.compile
+if _real_re_compile is lazy_compile:
+ raise AssertionError(
+ "re.compile has already been overridden as lazy_compile, but this would" \
+ " cause infinite recursion")
diff --git a/bzrlib/library_state.py b/bzrlib/library_state.py
new file mode 100644
index 0000000..0dab5d3
--- /dev/null
+++ b/bzrlib/library_state.py
@@ -0,0 +1,117 @@
+# Copyright (C) 2010 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""The core state needed to make use of bzr is managed here."""
+
+from __future__ import absolute_import
+
+__all__ = [
+ 'BzrLibraryState',
+ ]
+
+
+import bzrlib
+from bzrlib.lazy_import import lazy_import
+lazy_import(globals(), """
+from bzrlib import (
+ cleanup,
+ config,
+ osutils,
+ symbol_versioning,
+ trace,
+ ui,
+ )
+""")
+
+
+class BzrLibraryState(object):
+ """The state about how bzrlib has been configured.
+
+ This is the core state needed to make use of bzr. The current instance is
+ currently always exposed as bzrlib.global_state, but we desired to move
+ to a point where no global state is needed at all.
+
+ :ivar saved_state: The bzrlib.global_state at the time __enter__ was
+ called.
+ :ivar cleanups: An ObjectWithCleanups which can be used for cleanups that
+ should occur when the use of bzrlib is completed. This is initialised
+ in __enter__ and executed in __exit__.
+ """
+
+ def __init__(self, ui, trace):
+ """Create library start for normal use of bzrlib.
+
+ Most applications that embed bzrlib, including bzr itself, should just
+ call bzrlib.initialize(), but it is possible to use the state class
+ directly. The initialize() function provides sensible defaults for a
+ CLI program, such as a text UI factory.
+
+ More options may be added in future so callers should use named
+ arguments.
+
+ BzrLibraryState implements the Python 2.5 Context Manager protocol
+ PEP343, and can be used with the with statement. Upon __enter__ the
+ global variables in use by bzr are set, and they are cleared on
+ __exit__.
+
+ :param ui: A bzrlib.ui.ui_factory to use.
+ :param trace: A bzrlib.trace.Config context manager to use, perhaps
+ bzrlib.trace.DefaultConfig.
+ """
+ self._ui = ui
+ self._trace = trace
+ # There is no overrides by default, they are set later when the command
+ # arguments are parsed.
+ self.cmdline_overrides = config.CommandLineStore()
+ self.started = False
+
+ def __enter__(self):
+ if not self.started:
+ self._start()
+ return self # This is bound to the 'as' clause in a with statement.
+
+ def _start(self):
+ """Do all initialization."""
+ # NB: This function tweaks so much global state it's hard to test it in
+ # isolation within the same interpreter. It's not reached on normal
+ # in-process run_bzr calls. If it's broken, we expect that
+ # TestRunBzrSubprocess may fail.
+ self.cleanups = cleanup.ObjectWithCleanups()
+
+ if bzrlib.version_info[3] == 'final':
+ self.cleanups.add_cleanup(
+ symbol_versioning.suppress_deprecation_warnings(override=True))
+
+ self._trace.__enter__()
+
+ self._orig_ui = bzrlib.ui.ui_factory
+ bzrlib.ui.ui_factory = self._ui
+ self._ui.__enter__()
+
+ self.saved_state = bzrlib.global_state
+ bzrlib.global_state = self
+ self.started = True
+
+ def __exit__(self, exc_type, exc_val, exc_tb):
+ self.cleanups.cleanup_now()
+ trace._flush_stdout_stderr()
+ trace._flush_trace()
+ osutils.report_extension_load_failures()
+ self._ui.__exit__(None, None, None)
+ self._trace.__exit__(None, None, None)
+ ui.ui_factory = self._orig_ui
+ bzrlib.global_state = self.saved_state
+ return False # propogate exceptions.
diff --git a/bzrlib/lock.py b/bzrlib/lock.py
new file mode 100644
index 0000000..daaac3f
--- /dev/null
+++ b/bzrlib/lock.py
@@ -0,0 +1,550 @@
+# Copyright (C) 2005-2010 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""Locking using OS file locks or file existence.
+
+Note: This method of locking is generally deprecated in favour of LockDir, but
+is used to lock local WorkingTrees, and by some old formats. It's accessed
+through Transport.lock_read(), etc.
+
+This module causes two methods, lock() and unlock() to be defined in
+any way that works on the current platform.
+
+It is not specified whether these locks are reentrant (i.e. can be
+taken repeatedly by a single process) or whether they exclude
+different threads in a single process. That reentrancy is provided by
+LockableFiles.
+
+This defines two classes: ReadLock and WriteLock, which can be
+implemented in different ways on different platforms. Both have an
+unlock() method.
+"""
+
+from __future__ import absolute_import
+
+import errno
+import os
+import sys
+import warnings
+
+from bzrlib import (
+ debug,
+ errors,
+ osutils,
+ trace,
+ )
+from bzrlib.hooks import Hooks
+from bzrlib.i18n import gettext
+
+class LockHooks(Hooks):
+
+ def __init__(self):
+ Hooks.__init__(self, "bzrlib.lock", "Lock.hooks")
+ self.add_hook('lock_acquired',
+ "Called with a bzrlib.lock.LockResult when a physical lock is "
+ "acquired.", (1, 8))
+ self.add_hook('lock_released',
+ "Called with a bzrlib.lock.LockResult when a physical lock is "
+ "released.", (1, 8))
+ self.add_hook('lock_broken',
+ "Called with a bzrlib.lock.LockResult when a physical lock is "
+ "broken.", (1, 15))
+
+
+class Lock(object):
+ """Base class for locks.
+
+ :cvar hooks: Hook dictionary for operations on locks.
+ """
+
+ hooks = LockHooks()
+
+
+class LockResult(object):
+ """Result of an operation on a lock; passed to a hook"""
+
+ def __init__(self, lock_url, details=None):
+ """Create a lock result for lock with optional details about the lock."""
+ self.lock_url = lock_url
+ self.details = details
+
+ def __eq__(self, other):
+ return self.lock_url == other.lock_url and self.details == other.details
+
+ def __repr__(self):
+ return '%s(%s, %s)' % (self.__class__.__name__,
+ self.lock_url, self.details)
+
+
+class LogicalLockResult(object):
+ """The result of a lock_read/lock_write/lock_tree_write call on lockables.
+
+ :ivar unlock: A callable which will unlock the lock.
+ """
+
+ def __init__(self, unlock):
+ self.unlock = unlock
+
+ def __repr__(self):
+ return "LogicalLockResult(%s)" % (self.unlock)
+
+
+
+def cant_unlock_not_held(locked_object):
+ """An attempt to unlock failed because the object was not locked.
+
+ This provides a policy point from which we can generate either a warning
+ or an exception.
+ """
+ # This is typically masking some other error and called from a finally
+ # block, so it's useful to have the option not to generate a new error
+ # here. You can use -Werror to make it fatal. It should possibly also
+ # raise LockNotHeld.
+ if 'unlock' in debug.debug_flags:
+ warnings.warn("%r is already unlocked" % (locked_object,),
+ stacklevel=3)
+ else:
+ raise errors.LockNotHeld(locked_object)
+
+
+try:
+ import fcntl
+ have_fcntl = True
+except ImportError:
+ have_fcntl = False
+
+have_pywin32 = False
+have_ctypes_win32 = False
+if sys.platform == 'win32':
+ import msvcrt
+ try:
+ import win32file, pywintypes, winerror
+ have_pywin32 = True
+ except ImportError:
+ pass
+
+ try:
+ import ctypes
+ have_ctypes_win32 = True
+ except ImportError:
+ pass
+
+
+class _OSLock(object):
+
+ def __init__(self):
+ self.f = None
+ self.filename = None
+
+ def _open(self, filename, filemode):
+ self.filename = osutils.realpath(filename)
+ try:
+ self.f = open(self.filename, filemode)
+ return self.f
+ except IOError, e:
+ if e.errno in (errno.EACCES, errno.EPERM):
+ raise errors.LockFailed(self.filename, str(e))
+ if e.errno != errno.ENOENT:
+ raise
+
+ # maybe this is an old branch (before may 2005)
+ trace.mutter("trying to create missing lock %r", self.filename)
+
+ self.f = open(self.filename, 'wb+')
+ return self.f
+
+ def _clear_f(self):
+ """Clear the self.f attribute cleanly."""
+ if self.f:
+ self.f.close()
+ self.f = None
+
+ def unlock(self):
+ raise NotImplementedError()
+
+
+_lock_classes = []
+
+
+if have_fcntl:
+
+ class _fcntl_FileLock(_OSLock):
+
+ def _unlock(self):
+ fcntl.lockf(self.f, fcntl.LOCK_UN)
+ self._clear_f()
+
+
+ class _fcntl_WriteLock(_fcntl_FileLock):
+
+ _open_locks = set()
+
+ def __init__(self, filename):
+ super(_fcntl_WriteLock, self).__init__()
+ # Check we can grab a lock before we actually open the file.
+ self.filename = osutils.realpath(filename)
+ if self.filename in _fcntl_WriteLock._open_locks:
+ self._clear_f()
+ raise errors.LockContention(self.filename)
+ if self.filename in _fcntl_ReadLock._open_locks:
+ if 'strict_locks' in debug.debug_flags:
+ self._clear_f()
+ raise errors.LockContention(self.filename)
+ else:
+ trace.mutter('Write lock taken w/ an open read lock on: %s'
+ % (self.filename,))
+
+ self._open(self.filename, 'rb+')
+ # reserve a slot for this lock - even if the lockf call fails,
+ # at this point unlock() will be called, because self.f is set.
+ # TODO: make this fully threadsafe, if we decide we care.
+ _fcntl_WriteLock._open_locks.add(self.filename)
+ try:
+ # LOCK_NB will cause IOError to be raised if we can't grab a
+ # lock right away.
+ fcntl.lockf(self.f, fcntl.LOCK_EX | fcntl.LOCK_NB)
+ except IOError, e:
+ if e.errno in (errno.EAGAIN, errno.EACCES):
+ # We couldn't grab the lock
+ self.unlock()
+ # we should be more precise about whats a locking
+ # error and whats a random-other error
+ raise errors.LockContention(self.filename, e)
+
+ def unlock(self):
+ _fcntl_WriteLock._open_locks.remove(self.filename)
+ self._unlock()
+
+
+ class _fcntl_ReadLock(_fcntl_FileLock):
+
+ _open_locks = {}
+
+ def __init__(self, filename):
+ super(_fcntl_ReadLock, self).__init__()
+ self.filename = osutils.realpath(filename)
+ if self.filename in _fcntl_WriteLock._open_locks:
+ if 'strict_locks' in debug.debug_flags:
+ # We raise before calling _open so we don't need to
+ # _clear_f
+ raise errors.LockContention(self.filename)
+ else:
+ trace.mutter('Read lock taken w/ an open write lock on: %s'
+ % (self.filename,))
+ _fcntl_ReadLock._open_locks.setdefault(self.filename, 0)
+ _fcntl_ReadLock._open_locks[self.filename] += 1
+ self._open(filename, 'rb')
+ try:
+ # LOCK_NB will cause IOError to be raised if we can't grab a
+ # lock right away.
+ fcntl.lockf(self.f, fcntl.LOCK_SH | fcntl.LOCK_NB)
+ except IOError, e:
+ # we should be more precise about whats a locking
+ # error and whats a random-other error
+ raise errors.LockContention(self.filename, e)
+
+ def unlock(self):
+ count = _fcntl_ReadLock._open_locks[self.filename]
+ if count == 1:
+ del _fcntl_ReadLock._open_locks[self.filename]
+ else:
+ _fcntl_ReadLock._open_locks[self.filename] = count - 1
+ self._unlock()
+
+ def temporary_write_lock(self):
+ """Try to grab a write lock on the file.
+
+ On platforms that support it, this will upgrade to a write lock
+ without unlocking the file.
+ Otherwise, this will release the read lock, and try to acquire a
+ write lock.
+
+ :return: A token which can be used to switch back to a read lock.
+ """
+ if self.filename in _fcntl_WriteLock._open_locks:
+ raise AssertionError('file already locked: %r'
+ % (self.filename,))
+ try:
+ wlock = _fcntl_TemporaryWriteLock(self)
+ except errors.LockError:
+ # We didn't unlock, so we can just return 'self'
+ return False, self
+ return True, wlock
+
+
+ class _fcntl_TemporaryWriteLock(_OSLock):
+ """A token used when grabbing a temporary_write_lock.
+
+ Call restore_read_lock() when you are done with the write lock.
+ """
+
+ def __init__(self, read_lock):
+ super(_fcntl_TemporaryWriteLock, self).__init__()
+ self._read_lock = read_lock
+ self.filename = read_lock.filename
+
+ count = _fcntl_ReadLock._open_locks[self.filename]
+ if count > 1:
+ # Something else also has a read-lock, so we cannot grab a
+ # write lock.
+ raise errors.LockContention(self.filename)
+
+ if self.filename in _fcntl_WriteLock._open_locks:
+ raise AssertionError('file already locked: %r'
+ % (self.filename,))
+
+ # See if we can open the file for writing. Another process might
+ # have a read lock. We don't use self._open() because we don't want
+ # to create the file if it exists. That would have already been
+ # done by _fcntl_ReadLock
+ try:
+ new_f = open(self.filename, 'rb+')
+ except IOError, e:
+ if e.errno in (errno.EACCES, errno.EPERM):
+ raise errors.LockFailed(self.filename, str(e))
+ raise
+ try:
+ # LOCK_NB will cause IOError to be raised if we can't grab a
+ # lock right away.
+ fcntl.lockf(new_f, fcntl.LOCK_EX | fcntl.LOCK_NB)
+ except IOError, e:
+ # TODO: Raise a more specific error based on the type of error
+ raise errors.LockContention(self.filename, e)
+ _fcntl_WriteLock._open_locks.add(self.filename)
+
+ self.f = new_f
+
+ def restore_read_lock(self):
+ """Restore the original ReadLock."""
+ # For fcntl, since we never released the read lock, just release the
+ # write lock, and return the original lock.
+ fcntl.lockf(self.f, fcntl.LOCK_UN)
+ self._clear_f()
+ _fcntl_WriteLock._open_locks.remove(self.filename)
+ # Avoid reference cycles
+ read_lock = self._read_lock
+ self._read_lock = None
+ return read_lock
+
+
+ _lock_classes.append(('fcntl', _fcntl_WriteLock, _fcntl_ReadLock))
+
+
+if have_pywin32 and sys.platform == 'win32':
+ if os.path.supports_unicode_filenames:
+ # for Windows NT/2K/XP/etc
+ win32file_CreateFile = win32file.CreateFileW
+ else:
+ # for Windows 98
+ win32file_CreateFile = win32file.CreateFile
+
+ class _w32c_FileLock(_OSLock):
+
+ def _open(self, filename, access, share, cflags, pymode):
+ self.filename = osutils.realpath(filename)
+ try:
+ self._handle = win32file_CreateFile(filename, access, share,
+ None, win32file.OPEN_ALWAYS,
+ win32file.FILE_ATTRIBUTE_NORMAL, None)
+ except pywintypes.error, e:
+ if e.args[0] == winerror.ERROR_ACCESS_DENIED:
+ raise errors.LockFailed(filename, e)
+ if e.args[0] == winerror.ERROR_SHARING_VIOLATION:
+ raise errors.LockContention(filename, e)
+ raise
+ fd = win32file._open_osfhandle(self._handle, cflags)
+ self.f = os.fdopen(fd, pymode)
+ return self.f
+
+ def unlock(self):
+ self._clear_f()
+ self._handle = None
+
+
+ class _w32c_ReadLock(_w32c_FileLock):
+ def __init__(self, filename):
+ super(_w32c_ReadLock, self).__init__()
+ self._open(filename, win32file.GENERIC_READ,
+ win32file.FILE_SHARE_READ, os.O_RDONLY, "rb")
+
+ def temporary_write_lock(self):
+ """Try to grab a write lock on the file.
+
+ On platforms that support it, this will upgrade to a write lock
+ without unlocking the file.
+ Otherwise, this will release the read lock, and try to acquire a
+ write lock.
+
+ :return: A token which can be used to switch back to a read lock.
+ """
+ # I can't find a way to upgrade a read lock to a write lock without
+ # unlocking first. So here, we do just that.
+ self.unlock()
+ try:
+ wlock = _w32c_WriteLock(self.filename)
+ except errors.LockError:
+ return False, _w32c_ReadLock(self.filename)
+ return True, wlock
+
+
+ class _w32c_WriteLock(_w32c_FileLock):
+ def __init__(self, filename):
+ super(_w32c_WriteLock, self).__init__()
+ self._open(filename,
+ win32file.GENERIC_READ | win32file.GENERIC_WRITE, 0,
+ os.O_RDWR, "rb+")
+
+ def restore_read_lock(self):
+ """Restore the original ReadLock."""
+ # For win32 we had to completely let go of the original lock, so we
+ # just unlock and create a new read lock.
+ self.unlock()
+ return _w32c_ReadLock(self.filename)
+
+
+ _lock_classes.append(('pywin32', _w32c_WriteLock, _w32c_ReadLock))
+
+
+if have_ctypes_win32:
+ from ctypes.wintypes import DWORD, LPCSTR, LPCWSTR
+ LPSECURITY_ATTRIBUTES = ctypes.c_void_p # used as NULL no need to declare
+ HANDLE = ctypes.c_int # rather than unsigned as in ctypes.wintypes
+ if os.path.supports_unicode_filenames:
+ _function_name = "CreateFileW"
+ LPTSTR = LPCWSTR
+ else:
+ _function_name = "CreateFileA"
+ class LPTSTR(LPCSTR):
+ def __new__(cls, obj):
+ return LPCSTR.__new__(cls, obj.encode("mbcs"))
+
+ # CreateFile <http://msdn.microsoft.com/en-us/library/aa363858.aspx>
+ _CreateFile = ctypes.WINFUNCTYPE(
+ HANDLE, # return value
+ LPTSTR, # lpFileName
+ DWORD, # dwDesiredAccess
+ DWORD, # dwShareMode
+ LPSECURITY_ATTRIBUTES, # lpSecurityAttributes
+ DWORD, # dwCreationDisposition
+ DWORD, # dwFlagsAndAttributes
+ HANDLE # hTemplateFile
+ )((_function_name, ctypes.windll.kernel32))
+
+ INVALID_HANDLE_VALUE = -1
+
+ GENERIC_READ = 0x80000000
+ GENERIC_WRITE = 0x40000000
+ FILE_SHARE_READ = 1
+ OPEN_ALWAYS = 4
+ FILE_ATTRIBUTE_NORMAL = 128
+
+ ERROR_ACCESS_DENIED = 5
+ ERROR_SHARING_VIOLATION = 32
+
+ class _ctypes_FileLock(_OSLock):
+
+ def _open(self, filename, access, share, cflags, pymode):
+ self.filename = osutils.realpath(filename)
+ handle = _CreateFile(filename, access, share, None, OPEN_ALWAYS,
+ FILE_ATTRIBUTE_NORMAL, 0)
+ if handle in (INVALID_HANDLE_VALUE, 0):
+ e = ctypes.WinError()
+ if e.args[0] == ERROR_ACCESS_DENIED:
+ raise errors.LockFailed(filename, e)
+ if e.args[0] == ERROR_SHARING_VIOLATION:
+ raise errors.LockContention(filename, e)
+ raise e
+ fd = msvcrt.open_osfhandle(handle, cflags)
+ self.f = os.fdopen(fd, pymode)
+ return self.f
+
+ def unlock(self):
+ self._clear_f()
+
+
+ class _ctypes_ReadLock(_ctypes_FileLock):
+ def __init__(self, filename):
+ super(_ctypes_ReadLock, self).__init__()
+ self._open(filename, GENERIC_READ, FILE_SHARE_READ, os.O_RDONLY,
+ "rb")
+
+ def temporary_write_lock(self):
+ """Try to grab a write lock on the file.
+
+ On platforms that support it, this will upgrade to a write lock
+ without unlocking the file.
+ Otherwise, this will release the read lock, and try to acquire a
+ write lock.
+
+ :return: A token which can be used to switch back to a read lock.
+ """
+ # I can't find a way to upgrade a read lock to a write lock without
+ # unlocking first. So here, we do just that.
+ self.unlock()
+ try:
+ wlock = _ctypes_WriteLock(self.filename)
+ except errors.LockError:
+ return False, _ctypes_ReadLock(self.filename)
+ return True, wlock
+
+ class _ctypes_WriteLock(_ctypes_FileLock):
+ def __init__(self, filename):
+ super(_ctypes_WriteLock, self).__init__()
+ self._open(filename, GENERIC_READ | GENERIC_WRITE, 0, os.O_RDWR,
+ "rb+")
+
+ def restore_read_lock(self):
+ """Restore the original ReadLock."""
+ # For win32 we had to completely let go of the original lock, so we
+ # just unlock and create a new read lock.
+ self.unlock()
+ return _ctypes_ReadLock(self.filename)
+
+
+ _lock_classes.append(('ctypes', _ctypes_WriteLock, _ctypes_ReadLock))
+
+
+if len(_lock_classes) == 0:
+ raise NotImplementedError(
+ "We must have one of fcntl, pywin32, or ctypes available"
+ " to support OS locking."
+ )
+
+
+# We default to using the first available lock class.
+_lock_type, WriteLock, ReadLock = _lock_classes[0]
+
+
+class _RelockDebugMixin(object):
+ """Mixin support for -Drelock flag.
+
+ Add this as a base class then call self._note_lock with 'r' or 'w' when
+ acquiring a read- or write-lock. If this object was previously locked (and
+ locked the same way), and -Drelock is set, then this will trace.note a
+ message about it.
+ """
+
+ _prev_lock = None
+
+ def _note_lock(self, lock_type):
+ if 'relock' in debug.debug_flags and self._prev_lock == lock_type:
+ if lock_type == 'r':
+ type_name = 'read'
+ else:
+ type_name = 'write'
+ trace.note(gettext('{0!r} was {1} locked again'), self, type_name)
+ self._prev_lock = lock_type
+
diff --git a/bzrlib/lockable_files.py b/bzrlib/lockable_files.py
new file mode 100644
index 0000000..f56f415
--- /dev/null
+++ b/bzrlib/lockable_files.py
@@ -0,0 +1,301 @@
+# Copyright (C) 2005-2011 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+from __future__ import absolute_import
+
+from bzrlib.lazy_import import lazy_import
+lazy_import(globals(), """
+import warnings
+
+from bzrlib import (
+ counted_lock,
+ errors,
+ lock,
+ osutils,
+ transactions,
+ urlutils,
+ )
+""")
+
+from bzrlib.decorators import (
+ only_raises,
+ )
+
+
+class LockableFiles(object):
+ """Object representing a set of related files locked within the same scope.
+
+ This coordinates access to the lock along with providing a transaction.
+
+ LockableFiles manage a lock count and can be locked repeatedly by
+ a single caller. (The underlying lock implementation generally does not
+ support this.)
+
+ Instances of this class are often called control_files.
+
+ This class is now deprecated; code should move to using the Transport
+ directly for file operations and using the lock or CountedLock for
+ locking.
+
+ :ivar _lock: The real underlying lock (e.g. a LockDir)
+ :ivar _lock_count: If _lock_mode is true, a positive count of the number
+ of times the lock has been taken (and not yet released) *by this
+ process*, through this particular object instance.
+ :ivar _lock_mode: None, or 'r' or 'w'
+ """
+
+ def __init__(self, transport, lock_name, lock_class):
+ """Create a LockableFiles group
+
+ :param transport: Transport pointing to the directory holding the
+ control files and lock.
+ :param lock_name: Name of the lock guarding these files.
+ :param lock_class: Class of lock strategy to use: typically
+ either LockDir or TransportLock.
+ """
+ self._transport = transport
+ self.lock_name = lock_name
+ self._transaction = None
+ self._lock_mode = None
+ self._lock_count = 0
+ self._find_modes()
+ esc_name = self._escape(lock_name)
+ self._lock = lock_class(transport, esc_name,
+ file_modebits=self._file_mode,
+ dir_modebits=self._dir_mode)
+ self._counted_lock = counted_lock.CountedLock(self._lock)
+
+ def create_lock(self):
+ """Create the lock.
+
+ This should normally be called only when the LockableFiles directory
+ is first created on disk.
+ """
+ self._lock.create(mode=self._dir_mode)
+
+ def __repr__(self):
+ return '%s(%r)' % (self.__class__.__name__,
+ self._transport)
+
+ def __str__(self):
+ return 'LockableFiles(%s, %s)' % (self.lock_name, self._transport.base)
+
+ def break_lock(self):
+ """Break the lock of this lockable files group if it is held.
+
+ The current ui factory will be used to prompt for user conformation.
+ """
+ self._lock.break_lock()
+
+ def _escape(self, file_or_path):
+ """DEPRECATED: Do not use outside this class"""
+ if not isinstance(file_or_path, basestring):
+ file_or_path = '/'.join(file_or_path)
+ if file_or_path == '':
+ return u''
+ return urlutils.escape(osutils.safe_unicode(file_or_path))
+
+ def _find_modes(self):
+ """Determine the appropriate modes for files and directories.
+
+ :deprecated: Replaced by BzrDir._find_creation_modes.
+ """
+ # XXX: The properties created by this can be removed or deprecated
+ # once all the _get_text_store methods etc no longer use them.
+ # -- mbp 20080512
+ try:
+ st = self._transport.stat('.')
+ except errors.TransportNotPossible:
+ self._dir_mode = 0755
+ self._file_mode = 0644
+ else:
+ # Check the directory mode, but also make sure the created
+ # directories and files are read-write for this user. This is
+ # mostly a workaround for filesystems which lie about being able to
+ # write to a directory (cygwin & win32)
+ self._dir_mode = (st.st_mode & 07777) | 00700
+ # Remove the sticky and execute bits for files
+ self._file_mode = self._dir_mode & ~07111
+
+ def leave_in_place(self):
+ """Set this LockableFiles to not clear the physical lock on unlock."""
+ self._lock.leave_in_place()
+
+ def dont_leave_in_place(self):
+ """Set this LockableFiles to clear the physical lock on unlock."""
+ self._lock.dont_leave_in_place()
+
+ def lock_write(self, token=None):
+ """Lock this group of files for writing.
+
+ :param token: if this is already locked, then lock_write will fail
+ unless the token matches the existing lock.
+ :returns: a token if this instance supports tokens, otherwise None.
+ :raises TokenLockingNotSupported: when a token is given but this
+ instance doesn't support using token locks.
+ :raises MismatchedToken: if the specified token doesn't match the token
+ of the existing lock.
+
+ A token should be passed in if you know that you have locked the object
+ some other way, and need to synchronise this object's state with that
+ fact.
+ """
+ if self._lock_mode:
+ if (self._lock_mode != 'w'
+ or not self.get_transaction().writeable()):
+ raise errors.ReadOnlyError(self)
+ self._lock.validate_token(token)
+ self._lock_count += 1
+ return self._token_from_lock
+ else:
+ token_from_lock = self._lock.lock_write(token=token)
+ #traceback.print_stack()
+ self._lock_mode = 'w'
+ self._lock_count = 1
+ self._set_write_transaction()
+ self._token_from_lock = token_from_lock
+ return token_from_lock
+
+ def lock_read(self):
+ if self._lock_mode:
+ if self._lock_mode not in ('r', 'w'):
+ raise ValueError("invalid lock mode %r" % (self._lock_mode,))
+ self._lock_count += 1
+ else:
+ self._lock.lock_read()
+ #traceback.print_stack()
+ self._lock_mode = 'r'
+ self._lock_count = 1
+ self._set_read_transaction()
+
+ def _set_read_transaction(self):
+ """Setup a read transaction."""
+ self._set_transaction(transactions.ReadOnlyTransaction())
+ # 5K may be excessive, but hey, its a knob.
+ self.get_transaction().set_cache_size(5000)
+
+ def _set_write_transaction(self):
+ """Setup a write transaction."""
+ self._set_transaction(transactions.WriteTransaction())
+
+ @only_raises(errors.LockNotHeld, errors.LockBroken)
+ def unlock(self):
+ if not self._lock_mode:
+ return lock.cant_unlock_not_held(self)
+ if self._lock_count > 1:
+ self._lock_count -= 1
+ else:
+ #traceback.print_stack()
+ self._finish_transaction()
+ try:
+ self._lock.unlock()
+ finally:
+ self._lock_mode = self._lock_count = None
+
+ def is_locked(self):
+ """Return true if this LockableFiles group is locked"""
+ return self._lock_count >= 1
+
+ def get_physical_lock_status(self):
+ """Return physical lock status.
+
+ Returns true if a lock is held on the transport. If no lock is held, or
+ the underlying locking mechanism does not support querying lock
+ status, false is returned.
+ """
+ try:
+ return self._lock.peek() is not None
+ except NotImplementedError:
+ return False
+
+ def get_transaction(self):
+ """Return the current active transaction.
+
+ If no transaction is active, this returns a passthrough object
+ for which all data is immediately flushed and no caching happens.
+ """
+ if self._transaction is None:
+ return transactions.PassThroughTransaction()
+ else:
+ return self._transaction
+
+ def _set_transaction(self, new_transaction):
+ """Set a new active transaction."""
+ if self._transaction is not None:
+ raise errors.LockError('Branch %s is in a transaction already.' %
+ self)
+ self._transaction = new_transaction
+
+ def _finish_transaction(self):
+ """Exit the current transaction."""
+ if self._transaction is None:
+ raise errors.LockError('Branch %s is not in a transaction' %
+ self)
+ transaction = self._transaction
+ self._transaction = None
+ transaction.finish()
+
+
+class TransportLock(object):
+ """Locking method which uses transport-dependent locks.
+
+ On the local filesystem these transform into OS-managed locks.
+
+ These do not guard against concurrent access via different
+ transports.
+
+ This is suitable for use only in WorkingTrees (which are at present
+ always local).
+ """
+ def __init__(self, transport, escaped_name, file_modebits, dir_modebits):
+ self._transport = transport
+ self._escaped_name = escaped_name
+ self._file_modebits = file_modebits
+ self._dir_modebits = dir_modebits
+
+ def break_lock(self):
+ raise NotImplementedError(self.break_lock)
+
+ def leave_in_place(self):
+ raise NotImplementedError(self.leave_in_place)
+
+ def dont_leave_in_place(self):
+ raise NotImplementedError(self.dont_leave_in_place)
+
+ def lock_write(self, token=None):
+ if token is not None:
+ raise errors.TokenLockingNotSupported(self)
+ self._lock = self._transport.lock_write(self._escaped_name)
+
+ def lock_read(self):
+ self._lock = self._transport.lock_read(self._escaped_name)
+
+ def unlock(self):
+ self._lock.unlock()
+ self._lock = None
+
+ def peek(self):
+ raise NotImplementedError()
+
+ def create(self, mode=None):
+ """Create lock mechanism"""
+ # for old-style locks, create the file now
+ self._transport.put_bytes(self._escaped_name, '',
+ mode=self._file_modebits)
+
+ def validate_token(self, token):
+ if token is not None:
+ raise errors.TokenLockingNotSupported(self)
diff --git a/bzrlib/lockdir.py b/bzrlib/lockdir.py
new file mode 100644
index 0000000..5821e4e
--- /dev/null
+++ b/bzrlib/lockdir.py
@@ -0,0 +1,863 @@
+# Copyright (C) 2006-2011 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""On-disk mutex protecting a resource
+
+bzr on-disk objects are locked by the existence of a directory with a
+particular name within the control directory. We use this rather than OS
+internal locks (such as flock etc) because they can be seen across all
+transports, including http.
+
+Objects can be read if there is only physical read access; therefore
+readers can never be required to create a lock, though they will
+check whether a writer is using the lock. Writers can't detect
+whether anyone else is reading from the resource as they write.
+This works because of ordering constraints that make sure readers
+see a consistent view of existing data.
+
+Waiting for a lock must be done by polling; this can be aborted after
+a timeout.
+
+Locks must always be explicitly released, typically from a try/finally
+block -- they are not released from a finalizer or when Python
+exits.
+
+Locks may fail to be released if the process is abruptly terminated
+(machine stop, SIGKILL) or if a remote transport becomes permanently
+disconnected. There is therefore a method to break an existing lock.
+This should rarely be used, and generally only with user approval.
+Locks contain some information on when the lock was taken and by who
+which may guide in deciding whether it can safely be broken. (This is
+similar to the messages displayed by emacs and vim.) Note that if the
+lock holder is still alive they will get no notification that the lock
+has been broken and will continue their work -- so it is important to be
+sure they are actually dead.
+
+A lock is represented on disk by a directory of a particular name,
+containing an information file. Taking a lock is done by renaming a
+temporary directory into place. We use temporary directories because
+for all known transports and filesystems we believe that exactly one
+attempt to claim the lock will succeed and the others will fail. (Files
+won't do because some filesystems or transports only have
+rename-and-overwrite, making it hard to tell who won.)
+
+The desired characteristics are:
+
+* Locks are not reentrant. (That is, a client that tries to take a
+ lock it already holds may deadlock or fail.)
+* Stale locks can be guessed at by a heuristic
+* Lost locks can be broken by any client
+* Failed lock operations leave little or no mess
+* Deadlocks are avoided by having a timeout always in use, clients
+ desiring indefinite waits can retry or set a silly big timeout.
+
+Storage formats use the locks, and also need to consider concurrency
+issues underneath the lock. A format may choose not to use a lock
+at all for some operations.
+
+LockDirs always operate over a Transport. The transport may be readonly, in
+which case the lock can be queried but not acquired.
+
+Locks are identified by a path name, relative to a base transport.
+
+Calling code will typically want to make sure there is exactly one LockDir
+object per actual lock on disk. This module does nothing to prevent aliasing
+and deadlocks will likely occur if the locks are aliased.
+
+In the future we may add a "freshen" method which can be called
+by a lock holder to check that their lock has not been broken, and to
+update the timestamp within it.
+
+Example usage:
+
+>>> from bzrlib.transport.memory import MemoryTransport
+>>> # typically will be obtained from a BzrDir, Branch, etc
+>>> t = MemoryTransport()
+>>> l = LockDir(t, 'sample-lock')
+>>> l.create()
+>>> token = l.wait_lock()
+>>> # do something here
+>>> l.unlock()
+
+Some classes of stale locks can be predicted by checking: the host name is the
+same as the local host name; the user name is the same as the local user; the
+process id no longer exists. The check on user name is not strictly necessary
+but helps protect against colliding host names.
+"""
+
+from __future__ import absolute_import
+
+
+# TODO: We sometimes have the problem that our attempt to rename '1234' to
+# 'held' fails because the transport server moves into an existing directory,
+# rather than failing the rename. If we made the info file name the same as
+# the locked directory name we would avoid this problem because moving into
+# the held directory would implicitly clash. However this would not mesh with
+# the existing locking code and needs a new format of the containing object.
+# -- robertc, mbp 20070628
+
+import os
+import time
+
+from bzrlib import (
+ config,
+ debug,
+ errors,
+ lock,
+ osutils,
+ ui,
+ urlutils,
+ )
+from bzrlib.decorators import only_raises
+from bzrlib.errors import (
+ DirectoryNotEmpty,
+ FileExists,
+ LockBreakMismatch,
+ LockBroken,
+ LockContention,
+ LockCorrupt,
+ LockFailed,
+ LockNotHeld,
+ NoSuchFile,
+ PathError,
+ ResourceBusy,
+ TransportError,
+ )
+from bzrlib.trace import mutter, note
+from bzrlib.osutils import format_delta, rand_chars, get_host_name
+from bzrlib.i18n import gettext
+
+from bzrlib.lazy_import import lazy_import
+lazy_import(globals(), """
+from bzrlib import rio
+""")
+
+# XXX: At the moment there is no consideration of thread safety on LockDir
+# objects. This should perhaps be updated - e.g. if two threads try to take a
+# lock at the same time they should *both* get it. But then that's unlikely
+# to be a good idea.
+
+# TODO: Perhaps store some kind of note like the bzr command line in the lock
+# info?
+
+# TODO: Some kind of callback run while polling a lock to show progress
+# indicators.
+
+# TODO: Make sure to pass the right file and directory mode bits to all
+# files/dirs created.
+
+
+_DEFAULT_TIMEOUT_SECONDS = 30
+_DEFAULT_POLL_SECONDS = 1.0
+
+
+class LockDir(lock.Lock):
+ """Write-lock guarding access to data.
+ """
+
+ __INFO_NAME = '/info'
+
+ def __init__(self, transport, path, file_modebits=0644, dir_modebits=0755,
+ extra_holder_info=None):
+ """Create a new LockDir object.
+
+ The LockDir is initially unlocked - this just creates the object.
+
+ :param transport: Transport which will contain the lock
+
+ :param path: Path to the lock within the base directory of the
+ transport.
+
+ :param extra_holder_info: If passed, {str:str} dict of extra or
+ updated information to insert into the info file when the lock is
+ taken.
+ """
+ self.transport = transport
+ self.path = path
+ self._lock_held = False
+ self._locked_via_token = False
+ self._fake_read_lock = False
+ self._held_dir = path + '/held'
+ self._held_info_path = self._held_dir + self.__INFO_NAME
+ self._file_modebits = file_modebits
+ self._dir_modebits = dir_modebits
+ self._report_function = note
+ self.extra_holder_info = extra_holder_info
+ self._warned_about_lock_holder = None
+
+ def __repr__(self):
+ return '%s(%s%s)' % (self.__class__.__name__,
+ self.transport.base,
+ self.path)
+
+ is_held = property(lambda self: self._lock_held)
+
+ def create(self, mode=None):
+ """Create the on-disk lock.
+
+ This is typically only called when the object/directory containing the
+ directory is first created. The lock is not held when it's created.
+ """
+ self._trace("create lock directory")
+ try:
+ self.transport.mkdir(self.path, mode=mode)
+ except (TransportError, PathError), e:
+ raise LockFailed(self, e)
+
+ def _attempt_lock(self):
+ """Make the pending directory and attempt to rename into place.
+
+ If the rename succeeds, we read back the info file to check that we
+ really got the lock.
+
+ If we fail to acquire the lock, this method is responsible for
+ cleaning up the pending directory if possible. (But it doesn't do
+ that yet.)
+
+ :returns: The nonce of the lock, if it was successfully acquired.
+
+ :raises LockContention: If the lock is held by someone else. The
+ exception contains the info of the current holder of the lock.
+ """
+ self._trace("lock_write...")
+ start_time = time.time()
+ try:
+ tmpname = self._create_pending_dir()
+ except (errors.TransportError, PathError), e:
+ self._trace("... failed to create pending dir, %s", e)
+ raise LockFailed(self, e)
+ while True:
+ try:
+ self.transport.rename(tmpname, self._held_dir)
+ break
+ except (errors.TransportError, PathError, DirectoryNotEmpty,
+ FileExists, ResourceBusy), e:
+ self._trace("... contention, %s", e)
+ other_holder = self.peek()
+ self._trace("other holder is %r" % other_holder)
+ try:
+ self._handle_lock_contention(other_holder)
+ except:
+ self._remove_pending_dir(tmpname)
+ raise
+ except Exception, e:
+ self._trace("... lock failed, %s", e)
+ self._remove_pending_dir(tmpname)
+ raise
+ # We must check we really got the lock, because Launchpad's sftp
+ # server at one time had a bug were the rename would successfully
+ # move the new directory into the existing directory, which was
+ # incorrect. It's possible some other servers or filesystems will
+ # have a similar bug allowing someone to think they got the lock
+ # when it's already held.
+ #
+ # See <https://bugs.launchpad.net/bzr/+bug/498378> for one case.
+ #
+ # Strictly the check is unnecessary and a waste of time for most
+ # people, but probably worth trapping if something is wrong.
+ info = self.peek()
+ self._trace("after locking, info=%r", info)
+ if info is None:
+ raise LockFailed(self, "lock was renamed into place, but "
+ "now is missing!")
+ if info.get('nonce') != self.nonce:
+ self._trace("rename succeeded, "
+ "but lock is still held by someone else")
+ raise LockContention(self)
+ self._lock_held = True
+ self._trace("... lock succeeded after %dms",
+ (time.time() - start_time) * 1000)
+ return self.nonce
+
+ def _handle_lock_contention(self, other_holder):
+ """A lock we want to take is held by someone else.
+
+ This function can: tell the user about it; possibly detect that it's
+ safe or appropriate to steal the lock, or just raise an exception.
+
+ If this function returns (without raising an exception) the lock will
+ be attempted again.
+
+ :param other_holder: A LockHeldInfo for the current holder; note that
+ it might be None if the lock can be seen to be held but the info
+ can't be read.
+ """
+ if (other_holder is not None):
+ if other_holder.is_lock_holder_known_dead():
+ if self.get_config().get('locks.steal_dead'):
+ ui.ui_factory.show_user_warning(
+ 'locks_steal_dead',
+ lock_url=urlutils.join(self.transport.base, self.path),
+ other_holder_info=unicode(other_holder))
+ self.force_break(other_holder)
+ self._trace("stole lock from dead holder")
+ return
+ raise LockContention(self)
+
+ def _remove_pending_dir(self, tmpname):
+ """Remove the pending directory
+
+ This is called if we failed to rename into place, so that the pending
+ dirs don't clutter up the lockdir.
+ """
+ self._trace("remove %s", tmpname)
+ try:
+ self.transport.delete(tmpname + self.__INFO_NAME)
+ self.transport.rmdir(tmpname)
+ except PathError, e:
+ note(gettext("error removing pending lock: %s"), e)
+
+ def _create_pending_dir(self):
+ tmpname = '%s/%s.tmp' % (self.path, rand_chars(10))
+ try:
+ self.transport.mkdir(tmpname)
+ except NoSuchFile:
+ # This may raise a FileExists exception
+ # which is okay, it will be caught later and determined
+ # to be a LockContention.
+ self._trace("lock directory does not exist, creating it")
+ self.create(mode=self._dir_modebits)
+ # After creating the lock directory, try again
+ self.transport.mkdir(tmpname)
+ info = LockHeldInfo.for_this_process(self.extra_holder_info)
+ self.nonce = info.get('nonce')
+ # We use put_file_non_atomic because we just created a new unique
+ # directory so we don't have to worry about files existing there.
+ # We'll rename the whole directory into place to get atomic
+ # properties
+ self.transport.put_bytes_non_atomic(tmpname + self.__INFO_NAME,
+ info.to_bytes())
+ return tmpname
+
+ @only_raises(LockNotHeld, LockBroken)
+ def unlock(self):
+ """Release a held lock
+ """
+ if self._fake_read_lock:
+ self._fake_read_lock = False
+ return
+ if not self._lock_held:
+ return lock.cant_unlock_not_held(self)
+ if self._locked_via_token:
+ self._locked_via_token = False
+ self._lock_held = False
+ else:
+ old_nonce = self.nonce
+ # rename before deleting, because we can't atomically remove the
+ # whole tree
+ start_time = time.time()
+ self._trace("unlocking")
+ tmpname = '%s/releasing.%s.tmp' % (self.path, rand_chars(20))
+ # gotta own it to unlock
+ self.confirm()
+ self.transport.rename(self._held_dir, tmpname)
+ self._lock_held = False
+ self.transport.delete(tmpname + self.__INFO_NAME)
+ try:
+ self.transport.rmdir(tmpname)
+ except DirectoryNotEmpty, e:
+ # There might have been junk left over by a rename that moved
+ # another locker within the 'held' directory. do a slower
+ # deletion where we list the directory and remove everything
+ # within it.
+ #
+ # Maybe this should be broader to allow for ftp servers with
+ # non-specific error messages?
+ self._trace("doing recursive deletion of non-empty directory "
+ "%s", tmpname)
+ self.transport.delete_tree(tmpname)
+ self._trace("... unlock succeeded after %dms",
+ (time.time() - start_time) * 1000)
+ result = lock.LockResult(self.transport.abspath(self.path),
+ old_nonce)
+ for hook in self.hooks['lock_released']:
+ hook(result)
+
+ def break_lock(self):
+ """Break a lock not held by this instance of LockDir.
+
+ This is a UI centric function: it uses the ui.ui_factory to
+ prompt for input if a lock is detected and there is any doubt about
+ it possibly being still active. force_break is the non-interactive
+ version.
+
+ :returns: LockResult for the broken lock.
+ """
+ self._check_not_locked()
+ try:
+ holder_info = self.peek()
+ except LockCorrupt, e:
+ # The lock info is corrupt.
+ if ui.ui_factory.get_boolean(u"Break (corrupt %r)" % (self,)):
+ self.force_break_corrupt(e.file_data)
+ return
+ if holder_info is not None:
+ if ui.ui_factory.confirm_action(
+ u"Break %(lock_info)s",
+ 'bzrlib.lockdir.break',
+ dict(lock_info=unicode(holder_info))):
+ result = self.force_break(holder_info)
+ ui.ui_factory.show_message(
+ "Broke lock %s" % result.lock_url)
+
+ def force_break(self, dead_holder_info):
+ """Release a lock held by another process.
+
+ WARNING: This should only be used when the other process is dead; if
+ it still thinks it has the lock there will be two concurrent writers.
+ In general the user's approval should be sought for lock breaks.
+
+ After the lock is broken it will not be held by any process.
+ It is possible that another process may sneak in and take the
+ lock before the breaking process acquires it.
+
+ :param dead_holder_info:
+ Must be the result of a previous LockDir.peek() call; this is used
+ to check that it's still held by the same process that the user
+ decided was dead. If this is not the current holder,
+ LockBreakMismatch is raised.
+
+ :returns: LockResult for the broken lock.
+ """
+ if not isinstance(dead_holder_info, LockHeldInfo):
+ raise ValueError("dead_holder_info: %r" % dead_holder_info)
+ self._check_not_locked()
+ current_info = self.peek()
+ if current_info is None:
+ # must have been recently released
+ return
+ if current_info != dead_holder_info:
+ raise LockBreakMismatch(self, current_info, dead_holder_info)
+ tmpname = '%s/broken.%s.tmp' % (self.path, rand_chars(20))
+ self.transport.rename(self._held_dir, tmpname)
+ # check that we actually broke the right lock, not someone else;
+ # there's a small race window between checking it and doing the
+ # rename.
+ broken_info_path = tmpname + self.__INFO_NAME
+ broken_info = self._read_info_file(broken_info_path)
+ if broken_info != dead_holder_info:
+ raise LockBreakMismatch(self, broken_info, dead_holder_info)
+ self.transport.delete(broken_info_path)
+ self.transport.rmdir(tmpname)
+ result = lock.LockResult(self.transport.abspath(self.path),
+ current_info.get('nonce'))
+ for hook in self.hooks['lock_broken']:
+ hook(result)
+ return result
+
+ def force_break_corrupt(self, corrupt_info_lines):
+ """Release a lock that has been corrupted.
+
+ This is very similar to force_break, it except it doesn't assume that
+ self.peek() can work.
+
+ :param corrupt_info_lines: the lines of the corrupted info file, used
+ to check that the lock hasn't changed between reading the (corrupt)
+ info file and calling force_break_corrupt.
+ """
+ # XXX: this copes with unparseable info files, but what about missing
+ # info files? Or missing lock dirs?
+ self._check_not_locked()
+ tmpname = '%s/broken.%s.tmp' % (self.path, rand_chars(20))
+ self.transport.rename(self._held_dir, tmpname)
+ # check that we actually broke the right lock, not someone else;
+ # there's a small race window between checking it and doing the
+ # rename.
+ broken_info_path = tmpname + self.__INFO_NAME
+ broken_content = self.transport.get_bytes(broken_info_path)
+ broken_lines = osutils.split_lines(broken_content)
+ if broken_lines != corrupt_info_lines:
+ raise LockBreakMismatch(self, broken_lines, corrupt_info_lines)
+ self.transport.delete(broken_info_path)
+ self.transport.rmdir(tmpname)
+ result = lock.LockResult(self.transport.abspath(self.path))
+ for hook in self.hooks['lock_broken']:
+ hook(result)
+
+ def _check_not_locked(self):
+ """If the lock is held by this instance, raise an error."""
+ if self._lock_held:
+ raise AssertionError("can't break own lock: %r" % self)
+
+ def confirm(self):
+ """Make sure that the lock is still held by this locker.
+
+ This should only fail if the lock was broken by user intervention,
+ or if the lock has been affected by a bug.
+
+ If the lock is not thought to be held, raises LockNotHeld. If
+ the lock is thought to be held but has been broken, raises
+ LockBroken.
+ """
+ if not self._lock_held:
+ raise LockNotHeld(self)
+ info = self.peek()
+ if info is None:
+ # no lock there anymore!
+ raise LockBroken(self)
+ if info.get('nonce') != self.nonce:
+ # there is a lock, but not ours
+ raise LockBroken(self)
+
+ def _read_info_file(self, path):
+ """Read one given info file.
+
+ peek() reads the info file of the lock holder, if any.
+ """
+ return LockHeldInfo.from_info_file_bytes(
+ self.transport.get_bytes(path))
+
+ def peek(self):
+ """Check if the lock is held by anyone.
+
+ If it is held, this returns the lock info structure as a dict
+ which contains some information about the current lock holder.
+ Otherwise returns None.
+ """
+ try:
+ info = self._read_info_file(self._held_info_path)
+ self._trace("peek -> held")
+ return info
+ except NoSuchFile, e:
+ self._trace("peek -> not held")
+
+ def _prepare_info(self):
+ """Write information about a pending lock to a temporary file.
+ """
+
+ def attempt_lock(self):
+ """Take the lock; fail if it's already held.
+
+ If you wish to block until the lock can be obtained, call wait_lock()
+ instead.
+
+ :return: The lock token.
+ :raises LockContention: if the lock is held by someone else.
+ """
+ if self._fake_read_lock:
+ raise LockContention(self)
+ result = self._attempt_lock()
+ hook_result = lock.LockResult(self.transport.abspath(self.path),
+ self.nonce)
+ for hook in self.hooks['lock_acquired']:
+ hook(hook_result)
+ return result
+
+ def lock_url_for_display(self):
+ """Give a nicely-printable representation of the URL of this lock."""
+ # As local lock urls are correct we display them.
+ # We avoid displaying remote lock urls.
+ lock_url = self.transport.abspath(self.path)
+ if lock_url.startswith('file://'):
+ lock_url = lock_url.split('.bzr/')[0]
+ else:
+ lock_url = ''
+ return lock_url
+
+ def wait_lock(self, timeout=None, poll=None, max_attempts=None):
+ """Wait a certain period for a lock.
+
+ If the lock can be acquired within the bounded time, it
+ is taken and this returns. Otherwise, LockContention
+ is raised. Either way, this function should return within
+ approximately `timeout` seconds. (It may be a bit more if
+ a transport operation takes a long time to complete.)
+
+ :param timeout: Approximate maximum amount of time to wait for the
+ lock, in seconds.
+
+ :param poll: Delay in seconds between retrying the lock.
+
+ :param max_attempts: Maximum number of times to try to lock.
+
+ :return: The lock token.
+ """
+ if timeout is None:
+ timeout = _DEFAULT_TIMEOUT_SECONDS
+ if poll is None:
+ poll = _DEFAULT_POLL_SECONDS
+ # XXX: the transport interface doesn't let us guard against operations
+ # there taking a long time, so the total elapsed time or poll interval
+ # may be more than was requested.
+ deadline = time.time() + timeout
+ deadline_str = None
+ last_info = None
+ attempt_count = 0
+ lock_url = self.lock_url_for_display()
+ while True:
+ attempt_count += 1
+ try:
+ return self.attempt_lock()
+ except LockContention:
+ # possibly report the blockage, then try again
+ pass
+ # TODO: In a few cases, we find out that there's contention by
+ # reading the held info and observing that it's not ours. In
+ # those cases it's a bit redundant to read it again. However,
+ # the normal case (??) is that the rename fails and so we
+ # don't know who holds the lock. For simplicity we peek
+ # always.
+ new_info = self.peek()
+ if new_info is not None and new_info != last_info:
+ if last_info is None:
+ start = gettext('Unable to obtain')
+ else:
+ start = gettext('Lock owner changed for')
+ last_info = new_info
+ msg = gettext('{0} lock {1} {2}.').format(start, lock_url,
+ new_info)
+ if deadline_str is None:
+ deadline_str = time.strftime('%H:%M:%S',
+ time.localtime(deadline))
+ if timeout > 0:
+ msg += '\n' + gettext(
+ 'Will continue to try until %s, unless '
+ 'you press Ctrl-C.') % deadline_str
+ msg += '\n' + gettext('See "bzr help break-lock" for more.')
+ self._report_function(msg)
+ if (max_attempts is not None) and (attempt_count >= max_attempts):
+ self._trace("exceeded %d attempts")
+ raise LockContention(self)
+ if time.time() + poll < deadline:
+ self._trace("waiting %ss", poll)
+ time.sleep(poll)
+ else:
+ # As timeout is always 0 for remote locks
+ # this block is applicable only for local
+ # lock contention
+ self._trace("timeout after waiting %ss", timeout)
+ raise LockContention('(local)', lock_url)
+
+ def leave_in_place(self):
+ self._locked_via_token = True
+
+ def dont_leave_in_place(self):
+ self._locked_via_token = False
+
+ def lock_write(self, token=None):
+ """Wait for and acquire the lock.
+
+ :param token: if this is already locked, then lock_write will fail
+ unless the token matches the existing lock.
+ :returns: a token if this instance supports tokens, otherwise None.
+ :raises TokenLockingNotSupported: when a token is given but this
+ instance doesn't support using token locks.
+ :raises MismatchedToken: if the specified token doesn't match the token
+ of the existing lock.
+
+ A token should be passed in if you know that you have locked the object
+ some other way, and need to synchronise this object's state with that
+ fact.
+
+ XXX: docstring duplicated from LockableFiles.lock_write.
+ """
+ if token is not None:
+ self.validate_token(token)
+ self.nonce = token
+ self._lock_held = True
+ self._locked_via_token = True
+ return token
+ else:
+ return self.wait_lock()
+
+ def lock_read(self):
+ """Compatibility-mode shared lock.
+
+ LockDir doesn't support shared read-only locks, so this
+ just pretends that the lock is taken but really does nothing.
+ """
+ # At the moment Branches are commonly locked for read, but
+ # we can't rely on that remotely. Once this is cleaned up,
+ # reenable this warning to prevent it coming back in
+ # -- mbp 20060303
+ ## warn("LockDir.lock_read falls back to write lock")
+ if self._lock_held or self._fake_read_lock:
+ raise LockContention(self)
+ self._fake_read_lock = True
+
+ def validate_token(self, token):
+ if token is not None:
+ info = self.peek()
+ if info is None:
+ # Lock isn't held
+ lock_token = None
+ else:
+ lock_token = info.get('nonce')
+ if token != lock_token:
+ raise errors.TokenMismatch(token, lock_token)
+ else:
+ self._trace("revalidated by token %r", token)
+
+ def _trace(self, format, *args):
+ if 'lock' not in debug.debug_flags:
+ return
+ mutter(str(self) + ": " + (format % args))
+
+ def get_config(self):
+ """Get the configuration that governs this lockdir."""
+ # XXX: This really should also use the locationconfig at least, but
+ # that seems a bit hard to hook up at the moment. -- mbp 20110329
+ # FIXME: The above is still true ;) -- vila 20110811
+ return config.GlobalStack()
+
+
+class LockHeldInfo(object):
+ """The information recorded about a held lock.
+
+ This information is recorded into the lock when it's taken, and it can be
+ read back by any process with access to the lockdir. It can be used, for
+ example, to tell the user who holds the lock, or to try to detect whether
+ the lock holder is still alive.
+
+ Prior to bzr 2.4 a simple dict was used instead of an object.
+ """
+
+ def __init__(self, info_dict):
+ self.info_dict = info_dict
+
+ def __repr__(self):
+ """Return a debugging representation of this object."""
+ return "%s(%r)" % (self.__class__.__name__, self.info_dict)
+
+ def __unicode__(self):
+ """Return a user-oriented description of this object."""
+ d = self.to_readable_dict()
+ return ( gettext(
+ u'held by %(user)s on %(hostname)s (process #%(pid)s), '
+ u'acquired %(time_ago)s') % d)
+
+ def to_readable_dict(self):
+ """Turn the holder info into a dict of human-readable attributes.
+
+ For example, the start time is presented relative to the current time,
+ rather than as seconds since the epoch.
+
+ Returns a list of [user, hostname, pid, time_ago] all as readable
+ strings.
+ """
+ start_time = self.info_dict.get('start_time')
+ if start_time is None:
+ time_ago = '(unknown)'
+ else:
+ time_ago = format_delta(
+ time.time() - int(self.info_dict['start_time']))
+ user = self.info_dict.get('user', '<unknown>')
+ hostname = self.info_dict.get('hostname', '<unknown>')
+ pid = self.info_dict.get('pid', '<unknown>')
+ return dict(
+ user=user,
+ hostname=hostname,
+ pid=pid,
+ time_ago=time_ago)
+
+ def get(self, field_name):
+ """Return the contents of a field from the lock info, or None."""
+ return self.info_dict.get(field_name)
+
+ @classmethod
+ def for_this_process(cls, extra_holder_info):
+ """Return a new LockHeldInfo for a lock taken by this process.
+ """
+ info = dict(
+ hostname=get_host_name(),
+ pid=str(os.getpid()),
+ nonce=rand_chars(20),
+ start_time=str(int(time.time())),
+ user=get_username_for_lock_info(),
+ )
+ if extra_holder_info is not None:
+ info.update(extra_holder_info)
+ return cls(info)
+
+ def to_bytes(self):
+ s = rio.Stanza(**self.info_dict)
+ return s.to_string()
+
+ @classmethod
+ def from_info_file_bytes(cls, info_file_bytes):
+ """Construct from the contents of the held file."""
+ lines = osutils.split_lines(info_file_bytes)
+ try:
+ stanza = rio.read_stanza(lines)
+ except ValueError, e:
+ mutter('Corrupt lock info file: %r', lines)
+ raise LockCorrupt("could not parse lock info file: " + str(e),
+ lines)
+ if stanza is None:
+ # see bug 185013; we fairly often end up with the info file being
+ # empty after an interruption; we could log a message here but
+ # there may not be much we can say
+ return cls({})
+ else:
+ return cls(stanza.as_dict())
+
+ def __cmp__(self, other):
+ """Value comparison of lock holders."""
+ return (
+ cmp(type(self), type(other))
+ or cmp(self.info_dict, other.info_dict))
+
+ def is_locked_by_this_process(self):
+ """True if this process seems to be the current lock holder."""
+ return (
+ self.get('hostname') == get_host_name()
+ and self.get('pid') == str(os.getpid())
+ and self.get('user') == get_username_for_lock_info())
+
+ def is_lock_holder_known_dead(self):
+ """True if the lock holder process is known to be dead.
+
+ False if it's either known to be still alive, or if we just can't tell.
+
+ We can be fairly sure the lock holder is dead if it declared the same
+ hostname and there is no process with the given pid alive. If people
+ have multiple machines with the same hostname this may cause trouble.
+
+ This doesn't check whether the lock holder is in fact the same process
+ calling this method. (In that case it will return true.)
+ """
+ if self.get('hostname') != get_host_name():
+ return False
+ if self.get('hostname') == 'localhost':
+ # Too ambiguous.
+ return False
+ if self.get('user') != get_username_for_lock_info():
+ # Could well be another local process by a different user, but
+ # just to be safe we won't conclude about this either.
+ return False
+ pid_str = self.info_dict.get('pid', None)
+ if not pid_str:
+ mutter("no pid recorded in %r" % (self, ))
+ return False
+ try:
+ pid = int(pid_str)
+ except ValueError:
+ mutter("can't parse pid %r from %r"
+ % (pid_str, self))
+ return False
+ return osutils.is_local_pid_dead(pid)
+
+
+def get_username_for_lock_info():
+ """Get a username suitable for putting into a lock.
+
+ It's ok if what's written here is not a proper email address as long
+ as it gives some clue who the user is.
+ """
+ try:
+ return config.GlobalConfig().username()
+ except errors.NoWhoami:
+ return osutils.getuser_unicode()
diff --git a/bzrlib/log.py b/bzrlib/log.py
new file mode 100644
index 0000000..652e0a9
--- /dev/null
+++ b/bzrlib/log.py
@@ -0,0 +1,2137 @@
+# Copyright (C) 2005-2011 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""Code to show logs of changes.
+
+Various flavors of log can be produced:
+
+* for one file, or the whole tree, and (not done yet) for
+ files in a given directory
+
+* in "verbose" mode with a description of what changed from one
+ version to the next
+
+* with file-ids and revision-ids shown
+
+Logs are actually written out through an abstract LogFormatter
+interface, which allows for different preferred formats. Plugins can
+register formats too.
+
+Logs can be produced in either forward (oldest->newest) or reverse
+(newest->oldest) order.
+
+Logs can be filtered to show only revisions matching a particular
+search string, or within a particular range of revisions. The range
+can be given as date/times, which are reduced to revisions before
+calling in here.
+
+In verbose mode we show a summary of what changed in each particular
+revision. Note that this is the delta for changes in that revision
+relative to its left-most parent, not the delta relative to the last
+logged revision. So for example if you ask for a verbose log of
+changes touching hello.c you will get a list of those revisions also
+listing other things that were changed in the same revision, but not
+all the changes since the previous revision that touched hello.c.
+"""
+
+from __future__ import absolute_import
+
+import codecs
+from cStringIO import StringIO
+from itertools import (
+ chain,
+ izip,
+ )
+import re
+import sys
+from warnings import (
+ warn,
+ )
+
+from bzrlib.lazy_import import lazy_import
+lazy_import(globals(), """
+
+from bzrlib import (
+ config,
+ controldir,
+ diff,
+ errors,
+ foreign,
+ repository as _mod_repository,
+ revision as _mod_revision,
+ revisionspec,
+ tsort,
+ )
+from bzrlib.i18n import gettext, ngettext
+""")
+
+from bzrlib import (
+ lazy_regex,
+ registry,
+ )
+from bzrlib.osutils import (
+ format_date,
+ format_date_with_offset_in_original_timezone,
+ get_diff_header_encoding,
+ get_terminal_encoding,
+ terminal_width,
+ )
+
+
+def find_touching_revisions(branch, file_id):
+ """Yield a description of revisions which affect the file_id.
+
+ Each returned element is (revno, revision_id, description)
+
+ This is the list of revisions where the file is either added,
+ modified, renamed or deleted.
+
+ TODO: Perhaps some way to limit this to only particular revisions,
+ or to traverse a non-mainline set of revisions?
+ """
+ last_ie = None
+ last_path = None
+ revno = 1
+ graph = branch.repository.get_graph()
+ history = list(graph.iter_lefthand_ancestry(branch.last_revision(),
+ [_mod_revision.NULL_REVISION]))
+ for revision_id in reversed(history):
+ this_inv = branch.repository.get_inventory(revision_id)
+ if this_inv.has_id(file_id):
+ this_ie = this_inv[file_id]
+ this_path = this_inv.id2path(file_id)
+ else:
+ this_ie = this_path = None
+
+ # now we know how it was last time, and how it is in this revision.
+ # are those two states effectively the same or not?
+
+ if not this_ie and not last_ie:
+ # not present in either
+ pass
+ elif this_ie and not last_ie:
+ yield revno, revision_id, "added " + this_path
+ elif not this_ie and last_ie:
+ # deleted here
+ yield revno, revision_id, "deleted " + last_path
+ elif this_path != last_path:
+ yield revno, revision_id, ("renamed %s => %s" % (last_path, this_path))
+ elif (this_ie.text_size != last_ie.text_size
+ or this_ie.text_sha1 != last_ie.text_sha1):
+ yield revno, revision_id, "modified " + this_path
+
+ last_ie = this_ie
+ last_path = this_path
+ revno += 1
+
+
+def show_log(branch,
+ lf,
+ specific_fileid=None,
+ verbose=False,
+ direction='reverse',
+ start_revision=None,
+ end_revision=None,
+ search=None,
+ limit=None,
+ show_diff=False,
+ match=None):
+ """Write out human-readable log of commits to this branch.
+
+ This function is being retained for backwards compatibility but
+ should not be extended with new parameters. Use the new Logger class
+ instead, eg. Logger(branch, rqst).show(lf), adding parameters to the
+ make_log_request_dict function.
+
+ :param lf: The LogFormatter object showing the output.
+
+ :param specific_fileid: If not None, list only the commits affecting the
+ specified file, rather than all commits.
+
+ :param verbose: If True show added/changed/deleted/renamed files.
+
+ :param direction: 'reverse' (default) is latest to earliest; 'forward' is
+ earliest to latest.
+
+ :param start_revision: If not None, only show revisions >= start_revision
+
+ :param end_revision: If not None, only show revisions <= end_revision
+
+ :param search: If not None, only show revisions with matching commit
+ messages
+
+ :param limit: If set, shows only 'limit' revisions, all revisions are shown
+ if None or 0.
+
+ :param show_diff: If True, output a diff after each revision.
+
+ :param match: Dictionary of search lists to use when matching revision
+ properties.
+ """
+ # Convert old-style parameters to new-style parameters
+ if specific_fileid is not None:
+ file_ids = [specific_fileid]
+ else:
+ file_ids = None
+ if verbose:
+ if file_ids:
+ delta_type = 'partial'
+ else:
+ delta_type = 'full'
+ else:
+ delta_type = None
+ if show_diff:
+ if file_ids:
+ diff_type = 'partial'
+ else:
+ diff_type = 'full'
+ else:
+ diff_type = None
+
+ # Build the request and execute it
+ rqst = make_log_request_dict(direction=direction, specific_fileids=file_ids,
+ start_revision=start_revision, end_revision=end_revision,
+ limit=limit, message_search=search,
+ delta_type=delta_type, diff_type=diff_type)
+ Logger(branch, rqst).show(lf)
+
+
+# Note: This needs to be kept in sync with the defaults in
+# make_log_request_dict() below
+_DEFAULT_REQUEST_PARAMS = {
+ 'direction': 'reverse',
+ 'levels': None,
+ 'generate_tags': True,
+ 'exclude_common_ancestry': False,
+ '_match_using_deltas': True,
+ }
+
+
+def make_log_request_dict(direction='reverse', specific_fileids=None,
+ start_revision=None, end_revision=None, limit=None,
+ message_search=None, levels=None, generate_tags=True,
+ delta_type=None,
+ diff_type=None, _match_using_deltas=True,
+ exclude_common_ancestry=False, match=None,
+ signature=False, omit_merges=False,
+ ):
+ """Convenience function for making a logging request dictionary.
+
+ Using this function may make code slightly safer by ensuring
+ parameters have the correct names. It also provides a reference
+ point for documenting the supported parameters.
+
+ :param direction: 'reverse' (default) is latest to earliest;
+ 'forward' is earliest to latest.
+
+ :param specific_fileids: If not None, only include revisions
+ affecting the specified files, rather than all revisions.
+
+ :param start_revision: If not None, only generate
+ revisions >= start_revision
+
+ :param end_revision: If not None, only generate
+ revisions <= end_revision
+
+ :param limit: If set, generate only 'limit' revisions, all revisions
+ are shown if None or 0.
+
+ :param message_search: If not None, only include revisions with
+ matching commit messages
+
+ :param levels: the number of levels of revisions to
+ generate; 1 for just the mainline; 0 for all levels, or None for
+ a sensible default.
+
+ :param generate_tags: If True, include tags for matched revisions.
+`
+ :param delta_type: Either 'full', 'partial' or None.
+ 'full' means generate the complete delta - adds/deletes/modifies/etc;
+ 'partial' means filter the delta using specific_fileids;
+ None means do not generate any delta.
+
+ :param diff_type: Either 'full', 'partial' or None.
+ 'full' means generate the complete diff - adds/deletes/modifies/etc;
+ 'partial' means filter the diff using specific_fileids;
+ None means do not generate any diff.
+
+ :param _match_using_deltas: a private parameter controlling the
+ algorithm used for matching specific_fileids. This parameter
+ may be removed in the future so bzrlib client code should NOT
+ use it.
+
+ :param exclude_common_ancestry: Whether -rX..Y should be interpreted as a
+ range operator or as a graph difference.
+
+ :param signature: show digital signature information
+
+ :param match: Dictionary of list of search strings to use when filtering
+ revisions. Keys can be 'message', 'author', 'committer', 'bugs' or
+ the empty string to match any of the preceding properties.
+
+ :param omit_merges: If True, commits with more than one parent are
+ omitted.
+
+ """
+ # Take care of old style message_search parameter
+ if message_search:
+ if match:
+ if 'message' in match:
+ match['message'].append(message_search)
+ else:
+ match['message'] = [message_search]
+ else:
+ match={ 'message': [message_search] }
+ return {
+ 'direction': direction,
+ 'specific_fileids': specific_fileids,
+ 'start_revision': start_revision,
+ 'end_revision': end_revision,
+ 'limit': limit,
+ 'levels': levels,
+ 'generate_tags': generate_tags,
+ 'delta_type': delta_type,
+ 'diff_type': diff_type,
+ 'exclude_common_ancestry': exclude_common_ancestry,
+ 'signature': signature,
+ 'match': match,
+ 'omit_merges': omit_merges,
+ # Add 'private' attributes for features that may be deprecated
+ '_match_using_deltas': _match_using_deltas,
+ }
+
+
+def _apply_log_request_defaults(rqst):
+ """Apply default values to a request dictionary."""
+ result = _DEFAULT_REQUEST_PARAMS.copy()
+ if rqst:
+ result.update(rqst)
+ return result
+
+
+def format_signature_validity(rev_id, repo):
+ """get the signature validity
+
+ :param rev_id: revision id to validate
+ :param repo: repository of revision
+ :return: human readable string to print to log
+ """
+ from bzrlib import gpg
+
+ gpg_strategy = gpg.GPGStrategy(None)
+ result = repo.verify_revision_signature(rev_id, gpg_strategy)
+ if result[0] == gpg.SIGNATURE_VALID:
+ return "valid signature from {0}".format(result[1])
+ if result[0] == gpg.SIGNATURE_KEY_MISSING:
+ return "unknown key {0}".format(result[1])
+ if result[0] == gpg.SIGNATURE_NOT_VALID:
+ return "invalid signature!"
+ if result[0] == gpg.SIGNATURE_NOT_SIGNED:
+ return "no signature"
+
+
+class LogGenerator(object):
+ """A generator of log revisions."""
+
+ def iter_log_revisions(self):
+ """Iterate over LogRevision objects.
+
+ :return: An iterator yielding LogRevision objects.
+ """
+ raise NotImplementedError(self.iter_log_revisions)
+
+
+class Logger(object):
+ """An object that generates, formats and displays a log."""
+
+ def __init__(self, branch, rqst):
+ """Create a Logger.
+
+ :param branch: the branch to log
+ :param rqst: A dictionary specifying the query parameters.
+ See make_log_request_dict() for supported values.
+ """
+ self.branch = branch
+ self.rqst = _apply_log_request_defaults(rqst)
+
+ def show(self, lf):
+ """Display the log.
+
+ :param lf: The LogFormatter object to send the output to.
+ """
+ if not isinstance(lf, LogFormatter):
+ warn("not a LogFormatter instance: %r" % lf)
+
+ self.branch.lock_read()
+ try:
+ if getattr(lf, 'begin_log', None):
+ lf.begin_log()
+ self._show_body(lf)
+ if getattr(lf, 'end_log', None):
+ lf.end_log()
+ finally:
+ self.branch.unlock()
+
+ def _show_body(self, lf):
+ """Show the main log output.
+
+ Subclasses may wish to override this.
+ """
+ # Tweak the LogRequest based on what the LogFormatter can handle.
+ # (There's no point generating stuff if the formatter can't display it.)
+ rqst = self.rqst
+ if rqst['levels'] is None or lf.get_levels() > rqst['levels']:
+ # user didn't specify levels, use whatever the LF can handle:
+ rqst['levels'] = lf.get_levels()
+
+ if not getattr(lf, 'supports_tags', False):
+ rqst['generate_tags'] = False
+ if not getattr(lf, 'supports_delta', False):
+ rqst['delta_type'] = None
+ if not getattr(lf, 'supports_diff', False):
+ rqst['diff_type'] = None
+ if not getattr(lf, 'supports_signatures', False):
+ rqst['signature'] = False
+
+ # Find and print the interesting revisions
+ generator = self._generator_factory(self.branch, rqst)
+ for lr in generator.iter_log_revisions():
+ lf.log_revision(lr)
+ lf.show_advice()
+
+ def _generator_factory(self, branch, rqst):
+ """Make the LogGenerator object to use.
+
+ Subclasses may wish to override this.
+ """
+ return _DefaultLogGenerator(branch, rqst)
+
+
+class _StartNotLinearAncestor(Exception):
+ """Raised when a start revision is not found walking left-hand history."""
+
+
+class _DefaultLogGenerator(LogGenerator):
+ """The default generator of log revisions."""
+
+ def __init__(self, branch, rqst):
+ self.branch = branch
+ self.rqst = rqst
+ if rqst.get('generate_tags') and branch.supports_tags():
+ self.rev_tag_dict = branch.tags.get_reverse_tag_dict()
+ else:
+ self.rev_tag_dict = {}
+
+ def iter_log_revisions(self):
+ """Iterate over LogRevision objects.
+
+ :return: An iterator yielding LogRevision objects.
+ """
+ rqst = self.rqst
+ levels = rqst.get('levels')
+ limit = rqst.get('limit')
+ diff_type = rqst.get('diff_type')
+ show_signature = rqst.get('signature')
+ omit_merges = rqst.get('omit_merges')
+ log_count = 0
+ revision_iterator = self._create_log_revision_iterator()
+ for revs in revision_iterator:
+ for (rev_id, revno, merge_depth), rev, delta in revs:
+ # 0 levels means show everything; merge_depth counts from 0
+ if levels != 0 and merge_depth >= levels:
+ continue
+ if omit_merges and len(rev.parent_ids) > 1:
+ continue
+ if diff_type is None:
+ diff = None
+ else:
+ diff = self._format_diff(rev, rev_id, diff_type)
+ if show_signature:
+ signature = format_signature_validity(rev_id,
+ self.branch.repository)
+ else:
+ signature = None
+ yield LogRevision(rev, revno, merge_depth, delta,
+ self.rev_tag_dict.get(rev_id), diff, signature)
+ if limit:
+ log_count += 1
+ if log_count >= limit:
+ return
+
+ def _format_diff(self, rev, rev_id, diff_type):
+ repo = self.branch.repository
+ if len(rev.parent_ids) == 0:
+ ancestor_id = _mod_revision.NULL_REVISION
+ else:
+ ancestor_id = rev.parent_ids[0]
+ tree_1 = repo.revision_tree(ancestor_id)
+ tree_2 = repo.revision_tree(rev_id)
+ file_ids = self.rqst.get('specific_fileids')
+ if diff_type == 'partial' and file_ids is not None:
+ specific_files = [tree_2.id2path(id) for id in file_ids]
+ else:
+ specific_files = None
+ s = StringIO()
+ path_encoding = get_diff_header_encoding()
+ diff.show_diff_trees(tree_1, tree_2, s, specific_files, old_label='',
+ new_label='', path_encoding=path_encoding)
+ return s.getvalue()
+
+ def _create_log_revision_iterator(self):
+ """Create a revision iterator for log.
+
+ :return: An iterator over lists of ((rev_id, revno, merge_depth), rev,
+ delta).
+ """
+ self.start_rev_id, self.end_rev_id = _get_revision_limits(
+ self.branch, self.rqst.get('start_revision'),
+ self.rqst.get('end_revision'))
+ if self.rqst.get('_match_using_deltas'):
+ return self._log_revision_iterator_using_delta_matching()
+ else:
+ # We're using the per-file-graph algorithm. This scales really
+ # well but only makes sense if there is a single file and it's
+ # not a directory
+ file_count = len(self.rqst.get('specific_fileids'))
+ if file_count != 1:
+ raise BzrError("illegal LogRequest: must match-using-deltas "
+ "when logging %d files" % file_count)
+ return self._log_revision_iterator_using_per_file_graph()
+
+ def _log_revision_iterator_using_delta_matching(self):
+ # Get the base revisions, filtering by the revision range
+ rqst = self.rqst
+ generate_merge_revisions = rqst.get('levels') != 1
+ delayed_graph_generation = not rqst.get('specific_fileids') and (
+ rqst.get('limit') or self.start_rev_id or self.end_rev_id)
+ view_revisions = _calc_view_revisions(
+ self.branch, self.start_rev_id, self.end_rev_id,
+ rqst.get('direction'),
+ generate_merge_revisions=generate_merge_revisions,
+ delayed_graph_generation=delayed_graph_generation,
+ exclude_common_ancestry=rqst.get('exclude_common_ancestry'))
+
+ # Apply the other filters
+ return make_log_rev_iterator(self.branch, view_revisions,
+ rqst.get('delta_type'), rqst.get('match'),
+ file_ids=rqst.get('specific_fileids'),
+ direction=rqst.get('direction'))
+
+ def _log_revision_iterator_using_per_file_graph(self):
+ # Get the base revisions, filtering by the revision range.
+ # Note that we always generate the merge revisions because
+ # filter_revisions_touching_file_id() requires them ...
+ rqst = self.rqst
+ view_revisions = _calc_view_revisions(
+ self.branch, self.start_rev_id, self.end_rev_id,
+ rqst.get('direction'), generate_merge_revisions=True,
+ exclude_common_ancestry=rqst.get('exclude_common_ancestry'))
+ if not isinstance(view_revisions, list):
+ view_revisions = list(view_revisions)
+ view_revisions = _filter_revisions_touching_file_id(self.branch,
+ rqst.get('specific_fileids')[0], view_revisions,
+ include_merges=rqst.get('levels') != 1)
+ return make_log_rev_iterator(self.branch, view_revisions,
+ rqst.get('delta_type'), rqst.get('match'))
+
+
+def _calc_view_revisions(branch, start_rev_id, end_rev_id, direction,
+ generate_merge_revisions,
+ delayed_graph_generation=False,
+ exclude_common_ancestry=False,
+ ):
+ """Calculate the revisions to view.
+
+ :return: An iterator of (revision_id, dotted_revno, merge_depth) tuples OR
+ a list of the same tuples.
+ """
+ if (exclude_common_ancestry and start_rev_id == end_rev_id):
+ raise errors.BzrCommandError(gettext(
+ '--exclude-common-ancestry requires two different revisions'))
+ if direction not in ('reverse', 'forward'):
+ raise ValueError(gettext('invalid direction %r') % direction)
+ br_revno, br_rev_id = branch.last_revision_info()
+ if br_revno == 0:
+ return []
+
+ if (end_rev_id and start_rev_id == end_rev_id
+ and (not generate_merge_revisions
+ or not _has_merges(branch, end_rev_id))):
+ # If a single revision is requested, check we can handle it
+ return _generate_one_revision(branch, end_rev_id, br_rev_id,
+ br_revno)
+ if not generate_merge_revisions:
+ try:
+ # If we only want to see linear revisions, we can iterate ...
+ iter_revs = _linear_view_revisions(
+ branch, start_rev_id, end_rev_id,
+ exclude_common_ancestry=exclude_common_ancestry)
+ # If a start limit was given and it's not obviously an
+ # ancestor of the end limit, check it before outputting anything
+ if (direction == 'forward'
+ or (start_rev_id and not _is_obvious_ancestor(
+ branch, start_rev_id, end_rev_id))):
+ iter_revs = list(iter_revs)
+ if direction == 'forward':
+ iter_revs = reversed(iter_revs)
+ return iter_revs
+ except _StartNotLinearAncestor:
+ # Switch to the slower implementation that may be able to find a
+ # non-obvious ancestor out of the left-hand history.
+ pass
+ iter_revs = _generate_all_revisions(branch, start_rev_id, end_rev_id,
+ direction, delayed_graph_generation,
+ exclude_common_ancestry)
+ if direction == 'forward':
+ iter_revs = _rebase_merge_depth(reverse_by_depth(list(iter_revs)))
+ return iter_revs
+
+
+def _generate_one_revision(branch, rev_id, br_rev_id, br_revno):
+ if rev_id == br_rev_id:
+ # It's the tip
+ return [(br_rev_id, br_revno, 0)]
+ else:
+ revno_str = _compute_revno_str(branch, rev_id)
+ return [(rev_id, revno_str, 0)]
+
+
+def _generate_all_revisions(branch, start_rev_id, end_rev_id, direction,
+ delayed_graph_generation,
+ exclude_common_ancestry=False):
+ # On large trees, generating the merge graph can take 30-60 seconds
+ # so we delay doing it until a merge is detected, incrementally
+ # returning initial (non-merge) revisions while we can.
+
+ # The above is only true for old formats (<= 0.92), for newer formats, a
+ # couple of seconds only should be needed to load the whole graph and the
+ # other graph operations needed are even faster than that -- vila 100201
+ initial_revisions = []
+ if delayed_graph_generation:
+ try:
+ for rev_id, revno, depth in _linear_view_revisions(
+ branch, start_rev_id, end_rev_id, exclude_common_ancestry):
+ if _has_merges(branch, rev_id):
+ # The end_rev_id can be nested down somewhere. We need an
+ # explicit ancestry check. There is an ambiguity here as we
+ # may not raise _StartNotLinearAncestor for a revision that
+ # is an ancestor but not a *linear* one. But since we have
+ # loaded the graph to do the check (or calculate a dotted
+ # revno), we may as well accept to show the log... We need
+ # the check only if start_rev_id is not None as all
+ # revisions have _mod_revision.NULL_REVISION as an ancestor
+ # -- vila 20100319
+ graph = branch.repository.get_graph()
+ if (start_rev_id is not None
+ and not graph.is_ancestor(start_rev_id, end_rev_id)):
+ raise _StartNotLinearAncestor()
+ # Since we collected the revisions so far, we need to
+ # adjust end_rev_id.
+ end_rev_id = rev_id
+ break
+ else:
+ initial_revisions.append((rev_id, revno, depth))
+ else:
+ # No merged revisions found
+ return initial_revisions
+ except _StartNotLinearAncestor:
+ # A merge was never detected so the lower revision limit can't
+ # be nested down somewhere
+ raise errors.BzrCommandError(gettext('Start revision not found in'
+ ' history of end revision.'))
+
+ # We exit the loop above because we encounter a revision with merges, from
+ # this revision, we need to switch to _graph_view_revisions.
+
+ # A log including nested merges is required. If the direction is reverse,
+ # we rebase the initial merge depths so that the development line is
+ # shown naturally, i.e. just like it is for linear logging. We can easily
+ # make forward the exact opposite display, but showing the merge revisions
+ # indented at the end seems slightly nicer in that case.
+ view_revisions = chain(iter(initial_revisions),
+ _graph_view_revisions(branch, start_rev_id, end_rev_id,
+ rebase_initial_depths=(direction == 'reverse'),
+ exclude_common_ancestry=exclude_common_ancestry))
+ return view_revisions
+
+
+def _has_merges(branch, rev_id):
+ """Does a revision have multiple parents or not?"""
+ parents = branch.repository.get_parent_map([rev_id]).get(rev_id, [])
+ return len(parents) > 1
+
+
+def _compute_revno_str(branch, rev_id):
+ """Compute the revno string from a rev_id.
+
+ :return: The revno string, or None if the revision is not in the supplied
+ branch.
+ """
+ try:
+ revno = branch.revision_id_to_dotted_revno(rev_id)
+ except errors.NoSuchRevision:
+ # The revision must be outside of this branch
+ return None
+ else:
+ return '.'.join(str(n) for n in revno)
+
+
+def _is_obvious_ancestor(branch, start_rev_id, end_rev_id):
+ """Is start_rev_id an obvious ancestor of end_rev_id?"""
+ if start_rev_id and end_rev_id:
+ try:
+ start_dotted = branch.revision_id_to_dotted_revno(start_rev_id)
+ end_dotted = branch.revision_id_to_dotted_revno(end_rev_id)
+ except errors.NoSuchRevision:
+ # one or both is not in the branch; not obvious
+ return False
+ if len(start_dotted) == 1 and len(end_dotted) == 1:
+ # both on mainline
+ return start_dotted[0] <= end_dotted[0]
+ elif (len(start_dotted) == 3 and len(end_dotted) == 3 and
+ start_dotted[0:1] == end_dotted[0:1]):
+ # both on same development line
+ return start_dotted[2] <= end_dotted[2]
+ else:
+ # not obvious
+ return False
+ # if either start or end is not specified then we use either the first or
+ # the last revision and *they* are obvious ancestors.
+ return True
+
+
+def _linear_view_revisions(branch, start_rev_id, end_rev_id,
+ exclude_common_ancestry=False):
+ """Calculate a sequence of revisions to view, newest to oldest.
+
+ :param start_rev_id: the lower revision-id
+ :param end_rev_id: the upper revision-id
+ :param exclude_common_ancestry: Whether the start_rev_id should be part of
+ the iterated revisions.
+ :return: An iterator of (revision_id, dotted_revno, merge_depth) tuples.
+ :raises _StartNotLinearAncestor: if a start_rev_id is specified but
+ is not found walking the left-hand history
+ """
+ br_revno, br_rev_id = branch.last_revision_info()
+ repo = branch.repository
+ graph = repo.get_graph()
+ if start_rev_id is None and end_rev_id is None:
+ cur_revno = br_revno
+ for revision_id in graph.iter_lefthand_ancestry(br_rev_id,
+ (_mod_revision.NULL_REVISION,)):
+ yield revision_id, str(cur_revno), 0
+ cur_revno -= 1
+ else:
+ if end_rev_id is None:
+ end_rev_id = br_rev_id
+ found_start = start_rev_id is None
+ for revision_id in graph.iter_lefthand_ancestry(end_rev_id,
+ (_mod_revision.NULL_REVISION,)):
+ revno_str = _compute_revno_str(branch, revision_id)
+ if not found_start and revision_id == start_rev_id:
+ if not exclude_common_ancestry:
+ yield revision_id, revno_str, 0
+ found_start = True
+ break
+ else:
+ yield revision_id, revno_str, 0
+ else:
+ if not found_start:
+ raise _StartNotLinearAncestor()
+
+
+def _graph_view_revisions(branch, start_rev_id, end_rev_id,
+ rebase_initial_depths=True,
+ exclude_common_ancestry=False):
+ """Calculate revisions to view including merges, newest to oldest.
+
+ :param branch: the branch
+ :param start_rev_id: the lower revision-id
+ :param end_rev_id: the upper revision-id
+ :param rebase_initial_depth: should depths be rebased until a mainline
+ revision is found?
+ :return: An iterator of (revision_id, dotted_revno, merge_depth) tuples.
+ """
+ if exclude_common_ancestry:
+ stop_rule = 'with-merges-without-common-ancestry'
+ else:
+ stop_rule = 'with-merges'
+ view_revisions = branch.iter_merge_sorted_revisions(
+ start_revision_id=end_rev_id, stop_revision_id=start_rev_id,
+ stop_rule=stop_rule)
+ if not rebase_initial_depths:
+ for (rev_id, merge_depth, revno, end_of_merge
+ ) in view_revisions:
+ yield rev_id, '.'.join(map(str, revno)), merge_depth
+ else:
+ # We're following a development line starting at a merged revision.
+ # We need to adjust depths down by the initial depth until we find
+ # a depth less than it. Then we use that depth as the adjustment.
+ # If and when we reach the mainline, depth adjustment ends.
+ depth_adjustment = None
+ for (rev_id, merge_depth, revno, end_of_merge
+ ) in view_revisions:
+ if depth_adjustment is None:
+ depth_adjustment = merge_depth
+ if depth_adjustment:
+ if merge_depth < depth_adjustment:
+ # From now on we reduce the depth adjustement, this can be
+ # surprising for users. The alternative requires two passes
+ # which breaks the fast display of the first revision
+ # though.
+ depth_adjustment = merge_depth
+ merge_depth -= depth_adjustment
+ yield rev_id, '.'.join(map(str, revno)), merge_depth
+
+
+def _rebase_merge_depth(view_revisions):
+ """Adjust depths upwards so the top level is 0."""
+ # If either the first or last revision have a merge_depth of 0, we're done
+ if view_revisions and view_revisions[0][2] and view_revisions[-1][2]:
+ min_depth = min([d for r,n,d in view_revisions])
+ if min_depth != 0:
+ view_revisions = [(r,n,d-min_depth) for r,n,d in view_revisions]
+ return view_revisions
+
+
+def make_log_rev_iterator(branch, view_revisions, generate_delta, search,
+ file_ids=None, direction='reverse'):
+ """Create a revision iterator for log.
+
+ :param branch: The branch being logged.
+ :param view_revisions: The revisions being viewed.
+ :param generate_delta: Whether to generate a delta for each revision.
+ Permitted values are None, 'full' and 'partial'.
+ :param search: A user text search string.
+ :param file_ids: If non empty, only revisions matching one or more of
+ the file-ids are to be kept.
+ :param direction: the direction in which view_revisions is sorted
+ :return: An iterator over lists of ((rev_id, revno, merge_depth), rev,
+ delta).
+ """
+ # Convert view_revisions into (view, None, None) groups to fit with
+ # the standard interface here.
+ if type(view_revisions) == list:
+ # A single batch conversion is faster than many incremental ones.
+ # As we have all the data, do a batch conversion.
+ nones = [None] * len(view_revisions)
+ log_rev_iterator = iter([zip(view_revisions, nones, nones)])
+ else:
+ def _convert():
+ for view in view_revisions:
+ yield (view, None, None)
+ log_rev_iterator = iter([_convert()])
+ for adapter in log_adapters:
+ # It would be nicer if log adapters were first class objects
+ # with custom parameters. This will do for now. IGC 20090127
+ if adapter == _make_delta_filter:
+ log_rev_iterator = adapter(branch, generate_delta,
+ search, log_rev_iterator, file_ids, direction)
+ else:
+ log_rev_iterator = adapter(branch, generate_delta,
+ search, log_rev_iterator)
+ return log_rev_iterator
+
+
+def _make_search_filter(branch, generate_delta, match, log_rev_iterator):
+ """Create a filtered iterator of log_rev_iterator matching on a regex.
+
+ :param branch: The branch being logged.
+ :param generate_delta: Whether to generate a delta for each revision.
+ :param match: A dictionary with properties as keys and lists of strings
+ as values. To match, a revision may match any of the supplied strings
+ within a single property but must match at least one string for each
+ property.
+ :param log_rev_iterator: An input iterator containing all revisions that
+ could be displayed, in lists.
+ :return: An iterator over lists of ((rev_id, revno, merge_depth), rev,
+ delta).
+ """
+ if match is None:
+ return log_rev_iterator
+ searchRE = [(k, [re.compile(x, re.IGNORECASE) for x in v])
+ for (k,v) in match.iteritems()]
+ return _filter_re(searchRE, log_rev_iterator)
+
+
+def _filter_re(searchRE, log_rev_iterator):
+ for revs in log_rev_iterator:
+ new_revs = [rev for rev in revs if _match_filter(searchRE, rev[1])]
+ if new_revs:
+ yield new_revs
+
+def _match_filter(searchRE, rev):
+ strings = {
+ 'message': (rev.message,),
+ 'committer': (rev.committer,),
+ 'author': (rev.get_apparent_authors()),
+ 'bugs': list(rev.iter_bugs())
+ }
+ strings[''] = [item for inner_list in strings.itervalues()
+ for item in inner_list]
+ for (k,v) in searchRE:
+ if k in strings and not _match_any_filter(strings[k], v):
+ return False
+ return True
+
+def _match_any_filter(strings, res):
+ return any([filter(None, map(re.search, strings)) for re in res])
+
+def _make_delta_filter(branch, generate_delta, search, log_rev_iterator,
+ fileids=None, direction='reverse'):
+ """Add revision deltas to a log iterator if needed.
+
+ :param branch: The branch being logged.
+ :param generate_delta: Whether to generate a delta for each revision.
+ Permitted values are None, 'full' and 'partial'.
+ :param search: A user text search string.
+ :param log_rev_iterator: An input iterator containing all revisions that
+ could be displayed, in lists.
+ :param fileids: If non empty, only revisions matching one or more of
+ the file-ids are to be kept.
+ :param direction: the direction in which view_revisions is sorted
+ :return: An iterator over lists of ((rev_id, revno, merge_depth), rev,
+ delta).
+ """
+ if not generate_delta and not fileids:
+ return log_rev_iterator
+ return _generate_deltas(branch.repository, log_rev_iterator,
+ generate_delta, fileids, direction)
+
+
+def _generate_deltas(repository, log_rev_iterator, delta_type, fileids,
+ direction):
+ """Create deltas for each batch of revisions in log_rev_iterator.
+
+ If we're only generating deltas for the sake of filtering against
+ file-ids, we stop generating deltas once all file-ids reach the
+ appropriate life-cycle point. If we're receiving data newest to
+ oldest, then that life-cycle point is 'add', otherwise it's 'remove'.
+ """
+ check_fileids = fileids is not None and len(fileids) > 0
+ if check_fileids:
+ fileid_set = set(fileids)
+ if direction == 'reverse':
+ stop_on = 'add'
+ else:
+ stop_on = 'remove'
+ else:
+ fileid_set = None
+ for revs in log_rev_iterator:
+ # If we were matching against fileids and we've run out,
+ # there's nothing left to do
+ if check_fileids and not fileid_set:
+ return
+ revisions = [rev[1] for rev in revs]
+ new_revs = []
+ if delta_type == 'full' and not check_fileids:
+ deltas = repository.get_deltas_for_revisions(revisions)
+ for rev, delta in izip(revs, deltas):
+ new_revs.append((rev[0], rev[1], delta))
+ else:
+ deltas = repository.get_deltas_for_revisions(revisions, fileid_set)
+ for rev, delta in izip(revs, deltas):
+ if check_fileids:
+ if delta is None or not delta.has_changed():
+ continue
+ else:
+ _update_fileids(delta, fileid_set, stop_on)
+ if delta_type is None:
+ delta = None
+ elif delta_type == 'full':
+ # If the file matches all the time, rebuilding
+ # a full delta like this in addition to a partial
+ # one could be slow. However, it's likely that
+ # most revisions won't get this far, making it
+ # faster to filter on the partial deltas and
+ # build the occasional full delta than always
+ # building full deltas and filtering those.
+ rev_id = rev[0][0]
+ delta = repository.get_revision_delta(rev_id)
+ new_revs.append((rev[0], rev[1], delta))
+ yield new_revs
+
+
+def _update_fileids(delta, fileids, stop_on):
+ """Update the set of file-ids to search based on file lifecycle events.
+
+ :param fileids: a set of fileids to update
+ :param stop_on: either 'add' or 'remove' - take file-ids out of the
+ fileids set once their add or remove entry is detected respectively
+ """
+ if stop_on == 'add':
+ for item in delta.added:
+ if item[1] in fileids:
+ fileids.remove(item[1])
+ elif stop_on == 'delete':
+ for item in delta.removed:
+ if item[1] in fileids:
+ fileids.remove(item[1])
+
+
+def _make_revision_objects(branch, generate_delta, search, log_rev_iterator):
+ """Extract revision objects from the repository
+
+ :param branch: The branch being logged.
+ :param generate_delta: Whether to generate a delta for each revision.
+ :param search: A user text search string.
+ :param log_rev_iterator: An input iterator containing all revisions that
+ could be displayed, in lists.
+ :return: An iterator over lists of ((rev_id, revno, merge_depth), rev,
+ delta).
+ """
+ repository = branch.repository
+ for revs in log_rev_iterator:
+ # r = revision_id, n = revno, d = merge depth
+ revision_ids = [view[0] for view, _, _ in revs]
+ revisions = repository.get_revisions(revision_ids)
+ revs = [(rev[0], revision, rev[2]) for rev, revision in
+ izip(revs, revisions)]
+ yield revs
+
+
+def _make_batch_filter(branch, generate_delta, search, log_rev_iterator):
+ """Group up a single large batch into smaller ones.
+
+ :param branch: The branch being logged.
+ :param generate_delta: Whether to generate a delta for each revision.
+ :param search: A user text search string.
+ :param log_rev_iterator: An input iterator containing all revisions that
+ could be displayed, in lists.
+ :return: An iterator over lists of ((rev_id, revno, merge_depth), rev,
+ delta).
+ """
+ num = 9
+ for batch in log_rev_iterator:
+ batch = iter(batch)
+ while True:
+ step = [detail for _, detail in zip(range(num), batch)]
+ if len(step) == 0:
+ break
+ yield step
+ num = min(int(num * 1.5), 200)
+
+
+def _get_revision_limits(branch, start_revision, end_revision):
+ """Get and check revision limits.
+
+ :param branch: The branch containing the revisions.
+
+ :param start_revision: The first revision to be logged.
+ For backwards compatibility this may be a mainline integer revno,
+ but for merge revision support a RevisionInfo is expected.
+
+ :param end_revision: The last revision to be logged.
+ For backwards compatibility this may be a mainline integer revno,
+ but for merge revision support a RevisionInfo is expected.
+
+ :return: (start_rev_id, end_rev_id) tuple.
+ """
+ branch_revno, branch_rev_id = branch.last_revision_info()
+ start_rev_id = None
+ if start_revision is None:
+ start_revno = 1
+ else:
+ if isinstance(start_revision, revisionspec.RevisionInfo):
+ start_rev_id = start_revision.rev_id
+ start_revno = start_revision.revno or 1
+ else:
+ branch.check_real_revno(start_revision)
+ start_revno = start_revision
+ start_rev_id = branch.get_rev_id(start_revno)
+
+ end_rev_id = None
+ if end_revision is None:
+ end_revno = branch_revno
+ else:
+ if isinstance(end_revision, revisionspec.RevisionInfo):
+ end_rev_id = end_revision.rev_id
+ end_revno = end_revision.revno or branch_revno
+ else:
+ branch.check_real_revno(end_revision)
+ end_revno = end_revision
+ end_rev_id = branch.get_rev_id(end_revno)
+
+ if branch_revno != 0:
+ if (start_rev_id == _mod_revision.NULL_REVISION
+ or end_rev_id == _mod_revision.NULL_REVISION):
+ raise errors.BzrCommandError(gettext('Logging revision 0 is invalid.'))
+ if start_revno > end_revno:
+ raise errors.BzrCommandError(gettext("Start revision must be "
+ "older than the end revision."))
+ return (start_rev_id, end_rev_id)
+
+
+def _get_mainline_revs(branch, start_revision, end_revision):
+ """Get the mainline revisions from the branch.
+
+ Generates the list of mainline revisions for the branch.
+
+ :param branch: The branch containing the revisions.
+
+ :param start_revision: The first revision to be logged.
+ For backwards compatibility this may be a mainline integer revno,
+ but for merge revision support a RevisionInfo is expected.
+
+ :param end_revision: The last revision to be logged.
+ For backwards compatibility this may be a mainline integer revno,
+ but for merge revision support a RevisionInfo is expected.
+
+ :return: A (mainline_revs, rev_nos, start_rev_id, end_rev_id) tuple.
+ """
+ branch_revno, branch_last_revision = branch.last_revision_info()
+ if branch_revno == 0:
+ return None, None, None, None
+
+ # For mainline generation, map start_revision and end_revision to
+ # mainline revnos. If the revision is not on the mainline choose the
+ # appropriate extreme of the mainline instead - the extra will be
+ # filtered later.
+ # Also map the revisions to rev_ids, to be used in the later filtering
+ # stage.
+ start_rev_id = None
+ if start_revision is None:
+ start_revno = 1
+ else:
+ if isinstance(start_revision, revisionspec.RevisionInfo):
+ start_rev_id = start_revision.rev_id
+ start_revno = start_revision.revno or 1
+ else:
+ branch.check_real_revno(start_revision)
+ start_revno = start_revision
+
+ end_rev_id = None
+ if end_revision is None:
+ end_revno = branch_revno
+ else:
+ if isinstance(end_revision, revisionspec.RevisionInfo):
+ end_rev_id = end_revision.rev_id
+ end_revno = end_revision.revno or branch_revno
+ else:
+ branch.check_real_revno(end_revision)
+ end_revno = end_revision
+
+ if ((start_rev_id == _mod_revision.NULL_REVISION)
+ or (end_rev_id == _mod_revision.NULL_REVISION)):
+ raise errors.BzrCommandError(gettext('Logging revision 0 is invalid.'))
+ if start_revno > end_revno:
+ raise errors.BzrCommandError(gettext("Start revision must be older "
+ "than the end revision."))
+
+ if end_revno < start_revno:
+ return None, None, None, None
+ cur_revno = branch_revno
+ rev_nos = {}
+ mainline_revs = []
+ graph = branch.repository.get_graph()
+ for revision_id in graph.iter_lefthand_ancestry(
+ branch_last_revision, (_mod_revision.NULL_REVISION,)):
+ if cur_revno < start_revno:
+ # We have gone far enough, but we always add 1 more revision
+ rev_nos[revision_id] = cur_revno
+ mainline_revs.append(revision_id)
+ break
+ if cur_revno <= end_revno:
+ rev_nos[revision_id] = cur_revno
+ mainline_revs.append(revision_id)
+ cur_revno -= 1
+ else:
+ # We walked off the edge of all revisions, so we add a 'None' marker
+ mainline_revs.append(None)
+
+ mainline_revs.reverse()
+
+ # override the mainline to look like the revision history.
+ return mainline_revs, rev_nos, start_rev_id, end_rev_id
+
+
+def _filter_revisions_touching_file_id(branch, file_id, view_revisions,
+ include_merges=True):
+ r"""Return the list of revision ids which touch a given file id.
+
+ The function filters view_revisions and returns a subset.
+ This includes the revisions which directly change the file id,
+ and the revisions which merge these changes. So if the
+ revision graph is::
+
+ A-.
+ |\ \
+ B C E
+ |/ /
+ D |
+ |\|
+ | F
+ |/
+ G
+
+ And 'C' changes a file, then both C and D will be returned. F will not be
+ returned even though it brings the changes to C into the branch starting
+ with E. (Note that if we were using F as the tip instead of G, then we
+ would see C, D, F.)
+
+ This will also be restricted based on a subset of the mainline.
+
+ :param branch: The branch where we can get text revision information.
+
+ :param file_id: Filter out revisions that do not touch file_id.
+
+ :param view_revisions: A list of (revision_id, dotted_revno, merge_depth)
+ tuples. This is the list of revisions which will be filtered. It is
+ assumed that view_revisions is in merge_sort order (i.e. newest
+ revision first ).
+
+ :param include_merges: include merge revisions in the result or not
+
+ :return: A list of (revision_id, dotted_revno, merge_depth) tuples.
+ """
+ # Lookup all possible text keys to determine which ones actually modified
+ # the file.
+ graph = branch.repository.get_file_graph()
+ get_parent_map = graph.get_parent_map
+ text_keys = [(file_id, rev_id) for rev_id, revno, depth in view_revisions]
+ next_keys = None
+ # Looking up keys in batches of 1000 can cut the time in half, as well as
+ # memory consumption. GraphIndex *does* like to look for a few keys in
+ # parallel, it just doesn't like looking for *lots* of keys in parallel.
+ # TODO: This code needs to be re-evaluated periodically as we tune the
+ # indexing layer. We might consider passing in hints as to the known
+ # access pattern (sparse/clustered, high success rate/low success
+ # rate). This particular access is clustered with a low success rate.
+ modified_text_revisions = set()
+ chunk_size = 1000
+ for start in xrange(0, len(text_keys), chunk_size):
+ next_keys = text_keys[start:start + chunk_size]
+ # Only keep the revision_id portion of the key
+ modified_text_revisions.update(
+ [k[1] for k in get_parent_map(next_keys)])
+ del text_keys, next_keys
+
+ result = []
+ # Track what revisions will merge the current revision, replace entries
+ # with 'None' when they have been added to result
+ current_merge_stack = [None]
+ for info in view_revisions:
+ rev_id, revno, depth = info
+ if depth == len(current_merge_stack):
+ current_merge_stack.append(info)
+ else:
+ del current_merge_stack[depth + 1:]
+ current_merge_stack[-1] = info
+
+ if rev_id in modified_text_revisions:
+ # This needs to be logged, along with the extra revisions
+ for idx in xrange(len(current_merge_stack)):
+ node = current_merge_stack[idx]
+ if node is not None:
+ if include_merges or node[2] == 0:
+ result.append(node)
+ current_merge_stack[idx] = None
+ return result
+
+
+def reverse_by_depth(merge_sorted_revisions, _depth=0):
+ """Reverse revisions by depth.
+
+ Revisions with a different depth are sorted as a group with the previous
+ revision of that depth. There may be no topological justification for this,
+ but it looks much nicer.
+ """
+ # Add a fake revision at start so that we can always attach sub revisions
+ merge_sorted_revisions = [(None, None, _depth)] + merge_sorted_revisions
+ zd_revisions = []
+ for val in merge_sorted_revisions:
+ if val[2] == _depth:
+ # Each revision at the current depth becomes a chunk grouping all
+ # higher depth revisions.
+ zd_revisions.append([val])
+ else:
+ zd_revisions[-1].append(val)
+ for revisions in zd_revisions:
+ if len(revisions) > 1:
+ # We have higher depth revisions, let reverse them locally
+ revisions[1:] = reverse_by_depth(revisions[1:], _depth + 1)
+ zd_revisions.reverse()
+ result = []
+ for chunk in zd_revisions:
+ result.extend(chunk)
+ if _depth == 0:
+ # Top level call, get rid of the fake revisions that have been added
+ result = [r for r in result if r[0] is not None and r[1] is not None]
+ return result
+
+
+class LogRevision(object):
+ """A revision to be logged (by LogFormatter.log_revision).
+
+ A simple wrapper for the attributes of a revision to be logged.
+ The attributes may or may not be populated, as determined by the
+ logging options and the log formatter capabilities.
+ """
+
+ def __init__(self, rev=None, revno=None, merge_depth=0, delta=None,
+ tags=None, diff=None, signature=None):
+ self.rev = rev
+ if revno is None:
+ self.revno = None
+ else:
+ self.revno = str(revno)
+ self.merge_depth = merge_depth
+ self.delta = delta
+ self.tags = tags
+ self.diff = diff
+ self.signature = signature
+
+
+class LogFormatter(object):
+ """Abstract class to display log messages.
+
+ At a minimum, a derived class must implement the log_revision method.
+
+ If the LogFormatter needs to be informed of the beginning or end of
+ a log it should implement the begin_log and/or end_log hook methods.
+
+ A LogFormatter should define the following supports_XXX flags
+ to indicate which LogRevision attributes it supports:
+
+ - supports_delta must be True if this log formatter supports delta.
+ Otherwise the delta attribute may not be populated. The 'delta_format'
+ attribute describes whether the 'short_status' format (1) or the long
+ one (2) should be used.
+
+ - supports_merge_revisions must be True if this log formatter supports
+ merge revisions. If not, then only mainline revisions will be passed
+ to the formatter.
+
+ - preferred_levels is the number of levels this formatter defaults to.
+ The default value is zero meaning display all levels.
+ This value is only relevant if supports_merge_revisions is True.
+
+ - supports_tags must be True if this log formatter supports tags.
+ Otherwise the tags attribute may not be populated.
+
+ - supports_diff must be True if this log formatter supports diffs.
+ Otherwise the diff attribute may not be populated.
+
+ - supports_signatures must be True if this log formatter supports GPG
+ signatures.
+
+ Plugins can register functions to show custom revision properties using
+ the properties_handler_registry. The registered function
+ must respect the following interface description::
+
+ def my_show_properties(properties_dict):
+ # code that returns a dict {'name':'value'} of the properties
+ # to be shown
+ """
+ preferred_levels = 0
+
+ def __init__(self, to_file, show_ids=False, show_timezone='original',
+ delta_format=None, levels=None, show_advice=False,
+ to_exact_file=None, author_list_handler=None):
+ """Create a LogFormatter.
+
+ :param to_file: the file to output to
+ :param to_exact_file: if set, gives an output stream to which
+ non-Unicode diffs are written.
+ :param show_ids: if True, revision-ids are to be displayed
+ :param show_timezone: the timezone to use
+ :param delta_format: the level of delta information to display
+ or None to leave it to the formatter to decide
+ :param levels: the number of levels to display; None or -1 to
+ let the log formatter decide.
+ :param show_advice: whether to show advice at the end of the
+ log or not
+ :param author_list_handler: callable generating a list of
+ authors to display for a given revision
+ """
+ self.to_file = to_file
+ # 'exact' stream used to show diff, it should print content 'as is'
+ # and should not try to decode/encode it to unicode to avoid bug #328007
+ if to_exact_file is not None:
+ self.to_exact_file = to_exact_file
+ else:
+ # XXX: somewhat hacky; this assumes it's a codec writer; it's better
+ # for code that expects to get diffs to pass in the exact file
+ # stream
+ self.to_exact_file = getattr(to_file, 'stream', to_file)
+ self.show_ids = show_ids
+ self.show_timezone = show_timezone
+ if delta_format is None:
+ # Ensures backward compatibility
+ delta_format = 2 # long format
+ self.delta_format = delta_format
+ self.levels = levels
+ self._show_advice = show_advice
+ self._merge_count = 0
+ self._author_list_handler = author_list_handler
+
+ def get_levels(self):
+ """Get the number of levels to display or 0 for all."""
+ if getattr(self, 'supports_merge_revisions', False):
+ if self.levels is None or self.levels == -1:
+ self.levels = self.preferred_levels
+ else:
+ self.levels = 1
+ return self.levels
+
+ def log_revision(self, revision):
+ """Log a revision.
+
+ :param revision: The LogRevision to be logged.
+ """
+ raise NotImplementedError('not implemented in abstract base')
+
+ def show_advice(self):
+ """Output user advice, if any, when the log is completed."""
+ if self._show_advice and self.levels == 1 and self._merge_count > 0:
+ advice_sep = self.get_advice_separator()
+ if advice_sep:
+ self.to_file.write(advice_sep)
+ self.to_file.write(
+ "Use --include-merged or -n0 to see merged revisions.\n")
+
+ def get_advice_separator(self):
+ """Get the text separating the log from the closing advice."""
+ return ''
+
+ def short_committer(self, rev):
+ name, address = config.parse_username(rev.committer)
+ if name:
+ return name
+ return address
+
+ def short_author(self, rev):
+ return self.authors(rev, 'first', short=True, sep=', ')
+
+ def authors(self, rev, who, short=False, sep=None):
+ """Generate list of authors, taking --authors option into account.
+
+ The caller has to specify the name of a author list handler,
+ as provided by the author list registry, using the ``who``
+ argument. That name only sets a default, though: when the
+ user selected a different author list generation using the
+ ``--authors`` command line switch, as represented by the
+ ``author_list_handler`` constructor argument, that value takes
+ precedence.
+
+ :param rev: The revision for which to generate the list of authors.
+ :param who: Name of the default handler.
+ :param short: Whether to shorten names to either name or address.
+ :param sep: What separator to use for automatic concatenation.
+ """
+ if self._author_list_handler is not None:
+ # The user did specify --authors, which overrides the default
+ author_list_handler = self._author_list_handler
+ else:
+ # The user didn't specify --authors, so we use the caller's default
+ author_list_handler = author_list_registry.get(who)
+ names = author_list_handler(rev)
+ if short:
+ for i in range(len(names)):
+ name, address = config.parse_username(names[i])
+ if name:
+ names[i] = name
+ else:
+ names[i] = address
+ if sep is not None:
+ names = sep.join(names)
+ return names
+
+ def merge_marker(self, revision):
+ """Get the merge marker to include in the output or '' if none."""
+ if len(revision.rev.parent_ids) > 1:
+ self._merge_count += 1
+ return ' [merge]'
+ else:
+ return ''
+
+ def show_properties(self, revision, indent):
+ """Displays the custom properties returned by each registered handler.
+
+ If a registered handler raises an error it is propagated.
+ """
+ for line in self.custom_properties(revision):
+ self.to_file.write("%s%s\n" % (indent, line))
+
+ def custom_properties(self, revision):
+ """Format the custom properties returned by each registered handler.
+
+ If a registered handler raises an error it is propagated.
+
+ :return: a list of formatted lines (excluding trailing newlines)
+ """
+ lines = self._foreign_info_properties(revision)
+ for key, handler in properties_handler_registry.iteritems():
+ lines.extend(self._format_properties(handler(revision)))
+ return lines
+
+ def _foreign_info_properties(self, rev):
+ """Custom log displayer for foreign revision identifiers.
+
+ :param rev: Revision object.
+ """
+ # Revision comes directly from a foreign repository
+ if isinstance(rev, foreign.ForeignRevision):
+ return self._format_properties(
+ rev.mapping.vcs.show_foreign_revid(rev.foreign_revid))
+
+ # Imported foreign revision revision ids always contain :
+ if not ":" in rev.revision_id:
+ return []
+
+ # Revision was once imported from a foreign repository
+ try:
+ foreign_revid, mapping = \
+ foreign.foreign_vcs_registry.parse_revision_id(rev.revision_id)
+ except errors.InvalidRevisionId:
+ return []
+
+ return self._format_properties(
+ mapping.vcs.show_foreign_revid(foreign_revid))
+
+ def _format_properties(self, properties):
+ lines = []
+ for key, value in properties.items():
+ lines.append(key + ': ' + value)
+ return lines
+
+ def show_diff(self, to_file, diff, indent):
+ for l in diff.rstrip().split('\n'):
+ to_file.write(indent + '%s\n' % (l,))
+
+
+# Separator between revisions in long format
+_LONG_SEP = '-' * 60
+
+
+class LongLogFormatter(LogFormatter):
+
+ supports_merge_revisions = True
+ preferred_levels = 1
+ supports_delta = True
+ supports_tags = True
+ supports_diff = True
+ supports_signatures = True
+
+ def __init__(self, *args, **kwargs):
+ super(LongLogFormatter, self).__init__(*args, **kwargs)
+ if self.show_timezone == 'original':
+ self.date_string = self._date_string_original_timezone
+ else:
+ self.date_string = self._date_string_with_timezone
+
+ def _date_string_with_timezone(self, rev):
+ return format_date(rev.timestamp, rev.timezone or 0,
+ self.show_timezone)
+
+ def _date_string_original_timezone(self, rev):
+ return format_date_with_offset_in_original_timezone(rev.timestamp,
+ rev.timezone or 0)
+
+ def log_revision(self, revision):
+ """Log a revision, either merged or not."""
+ indent = ' ' * revision.merge_depth
+ lines = [_LONG_SEP]
+ if revision.revno is not None:
+ lines.append('revno: %s%s' % (revision.revno,
+ self.merge_marker(revision)))
+ if revision.tags:
+ lines.append('tags: %s' % (', '.join(revision.tags)))
+ if self.show_ids or revision.revno is None:
+ lines.append('revision-id: %s' % (revision.rev.revision_id,))
+ if self.show_ids:
+ for parent_id in revision.rev.parent_ids:
+ lines.append('parent: %s' % (parent_id,))
+ lines.extend(self.custom_properties(revision.rev))
+
+ committer = revision.rev.committer
+ authors = self.authors(revision.rev, 'all')
+ if authors != [committer]:
+ lines.append('author: %s' % (", ".join(authors),))
+ lines.append('committer: %s' % (committer,))
+
+ branch_nick = revision.rev.properties.get('branch-nick', None)
+ if branch_nick is not None:
+ lines.append('branch nick: %s' % (branch_nick,))
+
+ lines.append('timestamp: %s' % (self.date_string(revision.rev),))
+
+ if revision.signature is not None:
+ lines.append('signature: ' + revision.signature)
+
+ lines.append('message:')
+ if not revision.rev.message:
+ lines.append(' (no message)')
+ else:
+ message = revision.rev.message.rstrip('\r\n')
+ for l in message.split('\n'):
+ lines.append(' %s' % (l,))
+
+ # Dump the output, appending the delta and diff if requested
+ to_file = self.to_file
+ to_file.write("%s%s\n" % (indent, ('\n' + indent).join(lines)))
+ if revision.delta is not None:
+ # Use the standard status output to display changes
+ from bzrlib.delta import report_delta
+ report_delta(to_file, revision.delta, short_status=False,
+ show_ids=self.show_ids, indent=indent)
+ if revision.diff is not None:
+ to_file.write(indent + 'diff:\n')
+ to_file.flush()
+ # Note: we explicitly don't indent the diff (relative to the
+ # revision information) so that the output can be fed to patch -p0
+ self.show_diff(self.to_exact_file, revision.diff, indent)
+ self.to_exact_file.flush()
+
+ def get_advice_separator(self):
+ """Get the text separating the log from the closing advice."""
+ return '-' * 60 + '\n'
+
+
+class ShortLogFormatter(LogFormatter):
+
+ supports_merge_revisions = True
+ preferred_levels = 1
+ supports_delta = True
+ supports_tags = True
+ supports_diff = True
+
+ def __init__(self, *args, **kwargs):
+ super(ShortLogFormatter, self).__init__(*args, **kwargs)
+ self.revno_width_by_depth = {}
+
+ def log_revision(self, revision):
+ # We need two indents: one per depth and one for the information
+ # relative to that indent. Most mainline revnos are 5 chars or
+ # less while dotted revnos are typically 11 chars or less. Once
+ # calculated, we need to remember the offset for a given depth
+ # as we might be starting from a dotted revno in the first column
+ # and we want subsequent mainline revisions to line up.
+ depth = revision.merge_depth
+ indent = ' ' * depth
+ revno_width = self.revno_width_by_depth.get(depth)
+ if revno_width is None:
+ if revision.revno is None or revision.revno.find('.') == -1:
+ # mainline revno, e.g. 12345
+ revno_width = 5
+ else:
+ # dotted revno, e.g. 12345.10.55
+ revno_width = 11
+ self.revno_width_by_depth[depth] = revno_width
+ offset = ' ' * (revno_width + 1)
+
+ to_file = self.to_file
+ tags = ''
+ if revision.tags:
+ tags = ' {%s}' % (', '.join(revision.tags))
+ to_file.write(indent + "%*s %s\t%s%s%s\n" % (revno_width,
+ revision.revno or "", self.short_author(revision.rev),
+ format_date(revision.rev.timestamp,
+ revision.rev.timezone or 0,
+ self.show_timezone, date_fmt="%Y-%m-%d",
+ show_offset=False),
+ tags, self.merge_marker(revision)))
+ self.show_properties(revision.rev, indent+offset)
+ if self.show_ids or revision.revno is None:
+ to_file.write(indent + offset + 'revision-id:%s\n'
+ % (revision.rev.revision_id,))
+ if not revision.rev.message:
+ to_file.write(indent + offset + '(no message)\n')
+ else:
+ message = revision.rev.message.rstrip('\r\n')
+ for l in message.split('\n'):
+ to_file.write(indent + offset + '%s\n' % (l,))
+
+ if revision.delta is not None:
+ # Use the standard status output to display changes
+ from bzrlib.delta import report_delta
+ report_delta(to_file, revision.delta,
+ short_status=self.delta_format==1,
+ show_ids=self.show_ids, indent=indent + offset)
+ if revision.diff is not None:
+ self.show_diff(self.to_exact_file, revision.diff, ' ')
+ to_file.write('\n')
+
+
+class LineLogFormatter(LogFormatter):
+
+ supports_merge_revisions = True
+ preferred_levels = 1
+ supports_tags = True
+
+ def __init__(self, *args, **kwargs):
+ super(LineLogFormatter, self).__init__(*args, **kwargs)
+ width = terminal_width()
+ if width is not None:
+ # we need one extra space for terminals that wrap on last char
+ width = width - 1
+ self._max_chars = width
+
+ def truncate(self, str, max_len):
+ if max_len is None or len(str) <= max_len:
+ return str
+ return str[:max_len-3] + '...'
+
+ def date_string(self, rev):
+ return format_date(rev.timestamp, rev.timezone or 0,
+ self.show_timezone, date_fmt="%Y-%m-%d",
+ show_offset=False)
+
+ def message(self, rev):
+ if not rev.message:
+ return '(no message)'
+ else:
+ return rev.message
+
+ def log_revision(self, revision):
+ indent = ' ' * revision.merge_depth
+ self.to_file.write(self.log_string(revision.revno, revision.rev,
+ self._max_chars, revision.tags, indent))
+ self.to_file.write('\n')
+
+ def log_string(self, revno, rev, max_chars, tags=None, prefix=''):
+ """Format log info into one string. Truncate tail of string
+
+ :param revno: revision number or None.
+ Revision numbers counts from 1.
+ :param rev: revision object
+ :param max_chars: maximum length of resulting string
+ :param tags: list of tags or None
+ :param prefix: string to prefix each line
+ :return: formatted truncated string
+ """
+ out = []
+ if revno:
+ # show revno only when is not None
+ out.append("%s:" % revno)
+ if max_chars is not None:
+ out.append(self.truncate(self.short_author(rev), (max_chars+3)/4))
+ else:
+ out.append(self.short_author(rev))
+ out.append(self.date_string(rev))
+ if len(rev.parent_ids) > 1:
+ out.append('[merge]')
+ if tags:
+ tag_str = '{%s}' % (', '.join(tags))
+ out.append(tag_str)
+ out.append(rev.get_summary())
+ return self.truncate(prefix + " ".join(out).rstrip('\n'), max_chars)
+
+
+class GnuChangelogLogFormatter(LogFormatter):
+
+ supports_merge_revisions = True
+ supports_delta = True
+
+ def log_revision(self, revision):
+ """Log a revision, either merged or not."""
+ to_file = self.to_file
+
+ date_str = format_date(revision.rev.timestamp,
+ revision.rev.timezone or 0,
+ self.show_timezone,
+ date_fmt='%Y-%m-%d',
+ show_offset=False)
+ committer_str = self.authors(revision.rev, 'first', sep=', ')
+ committer_str = committer_str.replace(' <', ' <')
+ to_file.write('%s %s\n\n' % (date_str,committer_str))
+
+ if revision.delta is not None and revision.delta.has_changed():
+ for c in revision.delta.added + revision.delta.removed + revision.delta.modified:
+ path, = c[:1]
+ to_file.write('\t* %s:\n' % (path,))
+ for c in revision.delta.renamed:
+ oldpath,newpath = c[:2]
+ # For renamed files, show both the old and the new path
+ to_file.write('\t* %s:\n\t* %s:\n' % (oldpath,newpath))
+ to_file.write('\n')
+
+ if not revision.rev.message:
+ to_file.write('\tNo commit message\n')
+ else:
+ message = revision.rev.message.rstrip('\r\n')
+ for l in message.split('\n'):
+ to_file.write('\t%s\n' % (l.lstrip(),))
+ to_file.write('\n')
+
+
+def line_log(rev, max_chars):
+ lf = LineLogFormatter(None)
+ return lf.log_string(None, rev, max_chars)
+
+
+class LogFormatterRegistry(registry.Registry):
+ """Registry for log formatters"""
+
+ def make_formatter(self, name, *args, **kwargs):
+ """Construct a formatter from arguments.
+
+ :param name: Name of the formatter to construct. 'short', 'long' and
+ 'line' are built-in.
+ """
+ return self.get(name)(*args, **kwargs)
+
+ def get_default(self, branch):
+ c = branch.get_config_stack()
+ return self.get(c.get('log_format'))
+
+
+log_formatter_registry = LogFormatterRegistry()
+
+
+log_formatter_registry.register('short', ShortLogFormatter,
+ 'Moderately short log format.')
+log_formatter_registry.register('long', LongLogFormatter,
+ 'Detailed log format.')
+log_formatter_registry.register('line', LineLogFormatter,
+ 'Log format with one line per revision.')
+log_formatter_registry.register('gnu-changelog', GnuChangelogLogFormatter,
+ 'Format used by GNU ChangeLog files.')
+
+
+def register_formatter(name, formatter):
+ log_formatter_registry.register(name, formatter)
+
+
+def log_formatter(name, *args, **kwargs):
+ """Construct a formatter from arguments.
+
+ name -- Name of the formatter to construct; currently 'long', 'short' and
+ 'line' are supported.
+ """
+ try:
+ return log_formatter_registry.make_formatter(name, *args, **kwargs)
+ except KeyError:
+ raise errors.BzrCommandError(gettext("unknown log formatter: %r") % name)
+
+
+def author_list_all(rev):
+ return rev.get_apparent_authors()[:]
+
+
+def author_list_first(rev):
+ lst = rev.get_apparent_authors()
+ try:
+ return [lst[0]]
+ except IndexError:
+ return []
+
+
+def author_list_committer(rev):
+ return [rev.committer]
+
+
+author_list_registry = registry.Registry()
+
+author_list_registry.register('all', author_list_all,
+ 'All authors')
+
+author_list_registry.register('first', author_list_first,
+ 'The first author')
+
+author_list_registry.register('committer', author_list_committer,
+ 'The committer')
+
+
+def show_changed_revisions(branch, old_rh, new_rh, to_file=None,
+ log_format='long'):
+ """Show the change in revision history comparing the old revision history to the new one.
+
+ :param branch: The branch where the revisions exist
+ :param old_rh: The old revision history
+ :param new_rh: The new revision history
+ :param to_file: A file to write the results to. If None, stdout will be used
+ """
+ if to_file is None:
+ to_file = codecs.getwriter(get_terminal_encoding())(sys.stdout,
+ errors='replace')
+ lf = log_formatter(log_format,
+ show_ids=False,
+ to_file=to_file,
+ show_timezone='original')
+
+ # This is the first index which is different between
+ # old and new
+ base_idx = None
+ for i in xrange(max(len(new_rh),
+ len(old_rh))):
+ if (len(new_rh) <= i
+ or len(old_rh) <= i
+ or new_rh[i] != old_rh[i]):
+ base_idx = i
+ break
+
+ if base_idx is None:
+ to_file.write('Nothing seems to have changed\n')
+ return
+ ## TODO: It might be nice to do something like show_log
+ ## and show the merged entries. But since this is the
+ ## removed revisions, it shouldn't be as important
+ if base_idx < len(old_rh):
+ to_file.write('*'*60)
+ to_file.write('\nRemoved Revisions:\n')
+ for i in range(base_idx, len(old_rh)):
+ rev = branch.repository.get_revision(old_rh[i])
+ lr = LogRevision(rev, i+1, 0, None)
+ lf.log_revision(lr)
+ to_file.write('*'*60)
+ to_file.write('\n\n')
+ if base_idx < len(new_rh):
+ to_file.write('Added Revisions:\n')
+ show_log(branch,
+ lf,
+ None,
+ verbose=False,
+ direction='forward',
+ start_revision=base_idx+1,
+ end_revision=len(new_rh),
+ search=None)
+
+
+def get_history_change(old_revision_id, new_revision_id, repository):
+ """Calculate the uncommon lefthand history between two revisions.
+
+ :param old_revision_id: The original revision id.
+ :param new_revision_id: The new revision id.
+ :param repository: The repository to use for the calculation.
+
+ return old_history, new_history
+ """
+ old_history = []
+ old_revisions = set()
+ new_history = []
+ new_revisions = set()
+ graph = repository.get_graph()
+ new_iter = graph.iter_lefthand_ancestry(new_revision_id)
+ old_iter = graph.iter_lefthand_ancestry(old_revision_id)
+ stop_revision = None
+ do_old = True
+ do_new = True
+ while do_new or do_old:
+ if do_new:
+ try:
+ new_revision = new_iter.next()
+ except StopIteration:
+ do_new = False
+ else:
+ new_history.append(new_revision)
+ new_revisions.add(new_revision)
+ if new_revision in old_revisions:
+ stop_revision = new_revision
+ break
+ if do_old:
+ try:
+ old_revision = old_iter.next()
+ except StopIteration:
+ do_old = False
+ else:
+ old_history.append(old_revision)
+ old_revisions.add(old_revision)
+ if old_revision in new_revisions:
+ stop_revision = old_revision
+ break
+ new_history.reverse()
+ old_history.reverse()
+ if stop_revision is not None:
+ new_history = new_history[new_history.index(stop_revision) + 1:]
+ old_history = old_history[old_history.index(stop_revision) + 1:]
+ return old_history, new_history
+
+
+def show_branch_change(branch, output, old_revno, old_revision_id):
+ """Show the changes made to a branch.
+
+ :param branch: The branch to show changes about.
+ :param output: A file-like object to write changes to.
+ :param old_revno: The revno of the old tip.
+ :param old_revision_id: The revision_id of the old tip.
+ """
+ new_revno, new_revision_id = branch.last_revision_info()
+ old_history, new_history = get_history_change(old_revision_id,
+ new_revision_id,
+ branch.repository)
+ if old_history == [] and new_history == []:
+ output.write('Nothing seems to have changed\n')
+ return
+
+ log_format = log_formatter_registry.get_default(branch)
+ lf = log_format(show_ids=False, to_file=output, show_timezone='original')
+ if old_history != []:
+ output.write('*'*60)
+ output.write('\nRemoved Revisions:\n')
+ show_flat_log(branch.repository, old_history, old_revno, lf)
+ output.write('*'*60)
+ output.write('\n\n')
+ if new_history != []:
+ output.write('Added Revisions:\n')
+ start_revno = new_revno - len(new_history) + 1
+ show_log(branch, lf, None, verbose=False, direction='forward',
+ start_revision=start_revno,)
+
+
+def show_flat_log(repository, history, last_revno, lf):
+ """Show a simple log of the specified history.
+
+ :param repository: The repository to retrieve revisions from.
+ :param history: A list of revision_ids indicating the lefthand history.
+ :param last_revno: The revno of the last revision_id in the history.
+ :param lf: The log formatter to use.
+ """
+ start_revno = last_revno - len(history) + 1
+ revisions = repository.get_revisions(history)
+ for i, rev in enumerate(revisions):
+ lr = LogRevision(rev, i + last_revno, 0, None)
+ lf.log_revision(lr)
+
+
+def _get_info_for_log_files(revisionspec_list, file_list, add_cleanup):
+ """Find file-ids and kinds given a list of files and a revision range.
+
+ We search for files at the end of the range. If not found there,
+ we try the start of the range.
+
+ :param revisionspec_list: revision range as parsed on the command line
+ :param file_list: the list of paths given on the command line;
+ the first of these can be a branch location or a file path,
+ the remainder must be file paths
+ :param add_cleanup: When the branch returned is read locked,
+ an unlock call will be queued to the cleanup.
+ :return: (branch, info_list, start_rev_info, end_rev_info) where
+ info_list is a list of (relative_path, file_id, kind) tuples where
+ kind is one of values 'directory', 'file', 'symlink', 'tree-reference'.
+ branch will be read-locked.
+ """
+ from bzrlib.builtins import _get_revision_range
+ tree, b, path = controldir.ControlDir.open_containing_tree_or_branch(
+ file_list[0])
+ add_cleanup(b.lock_read().unlock)
+ # XXX: It's damn messy converting a list of paths to relative paths when
+ # those paths might be deleted ones, they might be on a case-insensitive
+ # filesystem and/or they might be in silly locations (like another branch).
+ # For example, what should "log bzr://branch/dir/file1 file2" do? (Is
+ # file2 implicitly in the same dir as file1 or should its directory be
+ # taken from the current tree somehow?) For now, this solves the common
+ # case of running log in a nested directory, assuming paths beyond the
+ # first one haven't been deleted ...
+ if tree:
+ relpaths = [path] + tree.safe_relpath_files(file_list[1:])
+ else:
+ relpaths = [path] + file_list[1:]
+ info_list = []
+ start_rev_info, end_rev_info = _get_revision_range(revisionspec_list, b,
+ "log")
+ if relpaths in ([], [u'']):
+ return b, [], start_rev_info, end_rev_info
+ if start_rev_info is None and end_rev_info is None:
+ if tree is None:
+ tree = b.basis_tree()
+ tree1 = None
+ for fp in relpaths:
+ file_id = tree.path2id(fp)
+ kind = _get_kind_for_file_id(tree, file_id)
+ if file_id is None:
+ # go back to when time began
+ if tree1 is None:
+ try:
+ rev1 = b.get_rev_id(1)
+ except errors.NoSuchRevision:
+ # No history at all
+ file_id = None
+ kind = None
+ else:
+ tree1 = b.repository.revision_tree(rev1)
+ if tree1:
+ file_id = tree1.path2id(fp)
+ kind = _get_kind_for_file_id(tree1, file_id)
+ info_list.append((fp, file_id, kind))
+
+ elif start_rev_info == end_rev_info:
+ # One revision given - file must exist in it
+ tree = b.repository.revision_tree(end_rev_info.rev_id)
+ for fp in relpaths:
+ file_id = tree.path2id(fp)
+ kind = _get_kind_for_file_id(tree, file_id)
+ info_list.append((fp, file_id, kind))
+
+ else:
+ # Revision range given. Get the file-id from the end tree.
+ # If that fails, try the start tree.
+ rev_id = end_rev_info.rev_id
+ if rev_id is None:
+ tree = b.basis_tree()
+ else:
+ tree = b.repository.revision_tree(rev_id)
+ tree1 = None
+ for fp in relpaths:
+ file_id = tree.path2id(fp)
+ kind = _get_kind_for_file_id(tree, file_id)
+ if file_id is None:
+ if tree1 is None:
+ rev_id = start_rev_info.rev_id
+ if rev_id is None:
+ rev1 = b.get_rev_id(1)
+ tree1 = b.repository.revision_tree(rev1)
+ else:
+ tree1 = b.repository.revision_tree(rev_id)
+ file_id = tree1.path2id(fp)
+ kind = _get_kind_for_file_id(tree1, file_id)
+ info_list.append((fp, file_id, kind))
+ return b, info_list, start_rev_info, end_rev_info
+
+
+def _get_kind_for_file_id(tree, file_id):
+ """Return the kind of a file-id or None if it doesn't exist."""
+ if file_id is not None:
+ return tree.kind(file_id)
+ else:
+ return None
+
+
+properties_handler_registry = registry.Registry()
+
+# Use the properties handlers to print out bug information if available
+def _bugs_properties_handler(revision):
+ if revision.properties.has_key('bugs'):
+ bug_lines = revision.properties['bugs'].split('\n')
+ bug_rows = [line.split(' ', 1) for line in bug_lines]
+ fixed_bug_urls = [row[0] for row in bug_rows if
+ len(row) > 1 and row[1] == 'fixed']
+
+ if fixed_bug_urls:
+ return {ngettext('fixes bug', 'fixes bugs', len(fixed_bug_urls)):\
+ ' '.join(fixed_bug_urls)}
+ return {}
+
+properties_handler_registry.register('bugs_properties_handler',
+ _bugs_properties_handler)
+
+
+# adapters which revision ids to log are filtered. When log is called, the
+# log_rev_iterator is adapted through each of these factory methods.
+# Plugins are welcome to mutate this list in any way they like - as long
+# as the overall behaviour is preserved. At this point there is no extensible
+# mechanism for getting parameters to each factory method, and until there is
+# this won't be considered a stable api.
+log_adapters = [
+ # core log logic
+ _make_batch_filter,
+ # read revision objects
+ _make_revision_objects,
+ # filter on log messages
+ _make_search_filter,
+ # generate deltas for things we will show
+ _make_delta_filter
+ ]
diff --git a/bzrlib/lru_cache.py b/bzrlib/lru_cache.py
new file mode 100644
index 0000000..68a24a6
--- /dev/null
+++ b/bzrlib/lru_cache.py
@@ -0,0 +1,316 @@
+# Copyright (C) 2006, 2008, 2009 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""A simple least-recently-used (LRU) cache."""
+
+from __future__ import absolute_import
+
+from bzrlib import (
+ symbol_versioning,
+ trace,
+ )
+
+_null_key = object()
+
+class _LRUNode(object):
+ """This maintains the linked-list which is the lru internals."""
+
+ __slots__ = ('prev', 'next_key', 'key', 'value')
+
+ def __init__(self, key, value):
+ self.prev = None
+ self.next_key = _null_key
+ self.key = key
+ self.value = value
+
+ def __repr__(self):
+ if self.prev is None:
+ prev_key = None
+ else:
+ prev_key = self.prev.key
+ return '%s(%r n:%r p:%r)' % (self.__class__.__name__, self.key,
+ self.next_key, prev_key)
+
+
+class LRUCache(object):
+ """A class which manages a cache of entries, removing unused ones."""
+
+ def __init__(self, max_cache=100, after_cleanup_count=None):
+ self._cache = {}
+ # The "HEAD" of the lru linked list
+ self._most_recently_used = None
+ # The "TAIL" of the lru linked list
+ self._least_recently_used = None
+ self._update_max_cache(max_cache, after_cleanup_count)
+
+ def __contains__(self, key):
+ return key in self._cache
+
+ def __getitem__(self, key):
+ cache = self._cache
+ node = cache[key]
+ # Inlined from _record_access to decrease the overhead of __getitem__
+ # We also have more knowledge about structure if __getitem__ is
+ # succeeding, then we know that self._most_recently_used must not be
+ # None, etc.
+ mru = self._most_recently_used
+ if node is mru:
+ # Nothing to do, this node is already at the head of the queue
+ return node.value
+ # Remove this node from the old location
+ node_prev = node.prev
+ next_key = node.next_key
+ # benchmarking shows that the lookup of _null_key in globals is faster
+ # than the attribute lookup for (node is self._least_recently_used)
+ if next_key is _null_key:
+ # 'node' is the _least_recently_used, because it doesn't have a
+ # 'next' item. So move the current lru to the previous node.
+ self._least_recently_used = node_prev
+ else:
+ node_next = cache[next_key]
+ node_next.prev = node_prev
+ node_prev.next_key = next_key
+ # Insert this node at the front of the list
+ node.next_key = mru.key
+ mru.prev = node
+ self._most_recently_used = node
+ node.prev = None
+ return node.value
+
+ def __len__(self):
+ return len(self._cache)
+
+ @symbol_versioning.deprecated_method(
+ symbol_versioning.deprecated_in((2, 5, 0)))
+ def add(self, key, value, cleanup=None):
+ if cleanup is not None:
+ raise ValueError("Per-node cleanup functions no longer supported")
+ return self.__setitem__(key, value)
+
+ def __setitem__(self, key, value):
+ """Add a new value to the cache"""
+ if key is _null_key:
+ raise ValueError('cannot use _null_key as a key')
+ if key in self._cache:
+ node = self._cache[key]
+ node.value = value
+ self._record_access(node)
+ else:
+ node = _LRUNode(key, value)
+ self._cache[key] = node
+ self._record_access(node)
+
+ if len(self._cache) > self._max_cache:
+ # Trigger the cleanup
+ self.cleanup()
+
+ def cache_size(self):
+ """Get the number of entries we will cache."""
+ return self._max_cache
+
+ def get(self, key, default=None):
+ node = self._cache.get(key, None)
+ if node is None:
+ return default
+ self._record_access(node)
+ return node.value
+
+ def keys(self):
+ """Get the list of keys currently cached.
+
+ Note that values returned here may not be available by the time you
+ request them later. This is simply meant as a peak into the current
+ state.
+
+ :return: An unordered list of keys that are currently cached.
+ """
+ return self._cache.keys()
+
+ def as_dict(self):
+ """Get a new dict with the same key:value pairs as the cache"""
+ return dict((k, n.value) for k, n in self._cache.iteritems())
+
+ items = symbol_versioning.deprecated_method(
+ symbol_versioning.deprecated_in((2, 5, 0)))(as_dict)
+
+ def cleanup(self):
+ """Clear the cache until it shrinks to the requested size.
+
+ This does not completely wipe the cache, just makes sure it is under
+ the after_cleanup_count.
+ """
+ # Make sure the cache is shrunk to the correct size
+ while len(self._cache) > self._after_cleanup_count:
+ self._remove_lru()
+
+ def _record_access(self, node):
+ """Record that key was accessed."""
+ # Move 'node' to the front of the queue
+ if self._most_recently_used is None:
+ self._most_recently_used = node
+ self._least_recently_used = node
+ return
+ elif node is self._most_recently_used:
+ # Nothing to do, this node is already at the head of the queue
+ return
+ # We've taken care of the tail pointer, remove the node, and insert it
+ # at the front
+ # REMOVE
+ if node is self._least_recently_used:
+ self._least_recently_used = node.prev
+ if node.prev is not None:
+ node.prev.next_key = node.next_key
+ if node.next_key is not _null_key:
+ node_next = self._cache[node.next_key]
+ node_next.prev = node.prev
+ # INSERT
+ node.next_key = self._most_recently_used.key
+ self._most_recently_used.prev = node
+ self._most_recently_used = node
+ node.prev = None
+
+ def _remove_node(self, node):
+ if node is self._least_recently_used:
+ self._least_recently_used = node.prev
+ self._cache.pop(node.key)
+ # If we have removed all entries, remove the head pointer as well
+ if self._least_recently_used is None:
+ self._most_recently_used = None
+ if node.prev is not None:
+ node.prev.next_key = node.next_key
+ if node.next_key is not _null_key:
+ node_next = self._cache[node.next_key]
+ node_next.prev = node.prev
+ # And remove this node's pointers
+ node.prev = None
+ node.next_key = _null_key
+
+ def _remove_lru(self):
+ """Remove one entry from the lru, and handle consequences.
+
+ If there are no more references to the lru, then this entry should be
+ removed from the cache.
+ """
+ self._remove_node(self._least_recently_used)
+
+ def clear(self):
+ """Clear out all of the cache."""
+ # Clean up in LRU order
+ while self._cache:
+ self._remove_lru()
+
+ def resize(self, max_cache, after_cleanup_count=None):
+ """Change the number of entries that will be cached."""
+ self._update_max_cache(max_cache,
+ after_cleanup_count=after_cleanup_count)
+
+ def _update_max_cache(self, max_cache, after_cleanup_count=None):
+ self._max_cache = max_cache
+ if after_cleanup_count is None:
+ self._after_cleanup_count = self._max_cache * 8 / 10
+ else:
+ self._after_cleanup_count = min(after_cleanup_count,
+ self._max_cache)
+ self.cleanup()
+
+
+class LRUSizeCache(LRUCache):
+ """An LRUCache that removes things based on the size of the values.
+
+ This differs in that it doesn't care how many actual items there are,
+ it just restricts the cache to be cleaned up after so much data is stored.
+
+ The size of items added will be computed using compute_size(value), which
+ defaults to len() if not supplied.
+ """
+
+ def __init__(self, max_size=1024*1024, after_cleanup_size=None,
+ compute_size=None):
+ """Create a new LRUSizeCache.
+
+ :param max_size: The max number of bytes to store before we start
+ clearing out entries.
+ :param after_cleanup_size: After cleaning up, shrink everything to this
+ size.
+ :param compute_size: A function to compute the size of the values. We
+ use a function here, so that you can pass 'len' if you are just
+ using simple strings, or a more complex function if you are using
+ something like a list of strings, or even a custom object.
+ The function should take the form "compute_size(value) => integer".
+ If not supplied, it defaults to 'len()'
+ """
+ self._value_size = 0
+ self._compute_size = compute_size
+ if compute_size is None:
+ self._compute_size = len
+ self._update_max_size(max_size, after_cleanup_size=after_cleanup_size)
+ LRUCache.__init__(self, max_cache=max(int(max_size/512), 1))
+
+ def __setitem__(self, key, value):
+ """Add a new value to the cache"""
+ if key is _null_key:
+ raise ValueError('cannot use _null_key as a key')
+ node = self._cache.get(key, None)
+ value_len = self._compute_size(value)
+ if value_len >= self._after_cleanup_size:
+ # The new value is 'too big to fit', as it would fill up/overflow
+ # the cache all by itself
+ trace.mutter('Adding the key %r to an LRUSizeCache failed.'
+ ' value %d is too big to fit in a the cache'
+ ' with size %d %d', key, value_len,
+ self._after_cleanup_size, self._max_size)
+ if node is not None:
+ # We won't be replacing the old node, so just remove it
+ self._remove_node(node)
+ return
+ if node is None:
+ node = _LRUNode(key, value)
+ self._cache[key] = node
+ else:
+ self._value_size -= self._compute_size(node.value)
+ self._value_size += value_len
+ self._record_access(node)
+
+ if self._value_size > self._max_size:
+ # Time to cleanup
+ self.cleanup()
+
+ def cleanup(self):
+ """Clear the cache until it shrinks to the requested size.
+
+ This does not completely wipe the cache, just makes sure it is under
+ the after_cleanup_size.
+ """
+ # Make sure the cache is shrunk to the correct size
+ while self._value_size > self._after_cleanup_size:
+ self._remove_lru()
+
+ def _remove_node(self, node):
+ self._value_size -= self._compute_size(node.value)
+ LRUCache._remove_node(self, node)
+
+ def resize(self, max_size, after_cleanup_size=None):
+ """Change the number of bytes that will be cached."""
+ self._update_max_size(max_size, after_cleanup_size=after_cleanup_size)
+ max_cache = max(int(max_size/512), 1)
+ self._update_max_cache(max_cache)
+
+ def _update_max_size(self, max_size, after_cleanup_size=None):
+ self._max_size = max_size
+ if after_cleanup_size is None:
+ self._after_cleanup_size = self._max_size * 8 / 10
+ else:
+ self._after_cleanup_size = min(after_cleanup_size, self._max_size)
diff --git a/bzrlib/lsprof.py b/bzrlib/lsprof.py
new file mode 100644
index 0000000..2fc04a8
--- /dev/null
+++ b/bzrlib/lsprof.py
@@ -0,0 +1,326 @@
+# this is copied from the lsprof distro because somehow
+# it is not installed by distutils
+# I made one modification to profile so that it returns a pair
+# instead of just the Stats object
+
+from __future__ import absolute_import
+
+import cPickle
+import os
+import sys
+import thread
+import threading
+from _lsprof import Profiler, profiler_entry
+
+from bzrlib import errors
+
+__all__ = ['profile', 'Stats']
+
+def profile(f, *args, **kwds):
+ """Run a function profile.
+
+ Exceptions are not caught: If you need stats even when exceptions are to be
+ raised, pass in a closure that will catch the exceptions and transform them
+ appropriately for your driver function.
+
+ Important caveat: only one profile can execute at a time. See BzrProfiler
+ for details.
+
+ :return: The functions return value and a stats object.
+ """
+ profiler = BzrProfiler()
+ profiler.start()
+ try:
+ ret = f(*args, **kwds)
+ finally:
+ stats = profiler.stop()
+ return ret, stats
+
+
+class BzrProfiler(object):
+ """Bzr utility wrapper around Profiler.
+
+ For most uses the module level 'profile()' function will be suitable.
+ However profiling when a simple wrapped function isn't available may
+ be easier to accomplish using this class.
+
+ To use it, create a BzrProfiler and call start() on it. Some arbitrary
+ time later call stop() to stop profiling and retrieve the statistics
+ from the code executed in the interim.
+
+ Note that profiling involves a threading.Lock around the actual profiling.
+ This is needed because profiling involves global manipulation of the python
+ interpreter state. As such you cannot perform multiple profiles at once.
+ Trying to do so will lock out the second profiler unless the global
+ bzrlib.lsprof.BzrProfiler.profiler_block is set to 0. Setting it to 0 will
+ cause profiling to fail rather than blocking.
+ """
+
+ profiler_block = 1
+ """Serialise rather than failing to profile concurrent profile requests."""
+
+ profiler_lock = threading.Lock()
+ """Global lock used to serialise profiles."""
+
+ def start(self):
+ """Start profiling.
+
+ This hooks into threading and will record all calls made until
+ stop() is called.
+ """
+ self._g_threadmap = {}
+ self.p = Profiler()
+ permitted = self.__class__.profiler_lock.acquire(
+ self.__class__.profiler_block)
+ if not permitted:
+ raise errors.InternalBzrError(msg="Already profiling something")
+ try:
+ self.p.enable(subcalls=True)
+ threading.setprofile(self._thread_profile)
+ except:
+ self.__class__.profiler_lock.release()
+ raise
+
+ def stop(self):
+ """Stop profiling.
+
+ This unhooks from threading and cleans up the profiler, returning
+ the gathered Stats object.
+
+ :return: A bzrlib.lsprof.Stats object.
+ """
+ try:
+ self.p.disable()
+ for pp in self._g_threadmap.values():
+ pp.disable()
+ threading.setprofile(None)
+ p = self.p
+ self.p = None
+ threads = {}
+ for tid, pp in self._g_threadmap.items():
+ threads[tid] = Stats(pp.getstats(), {})
+ self._g_threadmap = None
+ return Stats(p.getstats(), threads)
+ finally:
+ self.__class__.profiler_lock.release()
+
+ def _thread_profile(self, f, *args, **kwds):
+ # we lose the first profile point for a new thread in order to
+ # trampoline a new Profile object into place
+ thr = thread.get_ident()
+ self._g_threadmap[thr] = p = Profiler()
+ # this overrides our sys.setprofile hook:
+ p.enable(subcalls=True, builtins=True)
+
+
+class Stats(object):
+ """Wrapper around the collected data.
+
+ A Stats instance is created when the profiler finishes. Normal
+ usage is to use save() to write out the data to a file, or pprint()
+ to write human-readable information to the command line.
+ """
+
+ def __init__(self, data, threads):
+ self.data = data
+ self.threads = threads
+
+ def sort(self, crit="inlinetime"):
+ """Sort the data by the supplied critera.
+
+ :param crit: the data attribute used as the sort key."""
+ if crit not in profiler_entry.__dict__:
+ raise ValueError, "Can't sort by %s" % crit
+ self.data.sort(lambda b, a: cmp(getattr(a, crit),
+ getattr(b, crit)))
+ for e in self.data:
+ if e.calls:
+ e.calls.sort(lambda b, a: cmp(getattr(a, crit),
+ getattr(b, crit)))
+
+ def pprint(self, top=None, file=None):
+ """Pretty-print the data as plain text for human consumption.
+
+ :param top: only output the top n entries.
+ The default value of None means output all data.
+ :param file: the output file; if None, output will
+ default to stdout."""
+ if file is None:
+ file = sys.stdout
+ d = self.data
+ if top is not None:
+ d = d[:top]
+ cols = "% 12s %12s %11.4f %11.4f %s\n"
+ hcols = "% 12s %12s %12s %12s %s\n"
+ cols2 = "+%12s %12s %11.4f %11.4f + %s\n"
+ file.write(hcols % ("CallCount", "Recursive", "Total(ms)",
+ "Inline(ms)", "module:lineno(function)"))
+ for e in d:
+ file.write(cols % (e.callcount, e.reccallcount, e.totaltime,
+ e.inlinetime, label(e.code)))
+ if e.calls:
+ for se in e.calls:
+ file.write(cols % ("+%s" % se.callcount, se.reccallcount,
+ se.totaltime, se.inlinetime,
+ "+%s" % label(se.code)))
+
+ def freeze(self):
+ """Replace all references to code objects with string
+ descriptions; this makes it possible to pickle the instance."""
+
+ # this code is probably rather ickier than it needs to be!
+ for i in range(len(self.data)):
+ e = self.data[i]
+ if not isinstance(e.code, str):
+ self.data[i] = type(e)((label(e.code),) + e[1:])
+ if e.calls:
+ for j in range(len(e.calls)):
+ se = e.calls[j]
+ if not isinstance(se.code, str):
+ e.calls[j] = type(se)((label(se.code),) + se[1:])
+ for s in self.threads.values():
+ s.freeze()
+
+ def calltree(self, file):
+ """Output profiling data in calltree format (for KCacheGrind)."""
+ _CallTreeFilter(self.data).output(file)
+
+ def save(self, filename, format=None):
+ """Save profiling data to a file.
+
+ :param filename: the name of the output file
+ :param format: 'txt' for a text representation;
+ 'callgrind' for calltree format;
+ otherwise a pickled Python object. A format of None indicates
+ that the format to use is to be found from the filename. If
+ the name starts with callgrind.out, callgrind format is used
+ otherwise the format is given by the filename extension.
+ """
+ if format is None:
+ basename = os.path.basename(filename)
+ if basename.startswith('callgrind.out'):
+ format = "callgrind"
+ else:
+ ext = os.path.splitext(filename)[1]
+ if len(ext) > 1:
+ format = ext[1:]
+ outfile = open(filename, 'wb')
+ try:
+ if format == "callgrind":
+ self.calltree(outfile)
+ elif format == "txt":
+ self.pprint(file=outfile)
+ else:
+ self.freeze()
+ cPickle.dump(self, outfile, 2)
+ finally:
+ outfile.close()
+
+
+class _CallTreeFilter(object):
+ """Converter of a Stats object to input suitable for KCacheGrind.
+
+ This code is taken from http://ddaa.net/blog/python/lsprof-calltree
+ with the changes made by J.P. Calderone and Itamar applied. Note that
+ isinstance(code, str) needs to be used at times to determine if the code
+ object is actually an external code object (with a filename, etc.) or
+ a Python built-in.
+ """
+
+ def __init__(self, data):
+ self.data = data
+ self.out_file = None
+
+ def output(self, out_file):
+ self.out_file = out_file
+ out_file.write('events: Ticks\n')
+ self._print_summary()
+ for entry in self.data:
+ self._entry(entry)
+
+ def _print_summary(self):
+ max_cost = 0
+ for entry in self.data:
+ totaltime = int(entry.totaltime * 1000)
+ max_cost = max(max_cost, totaltime)
+ self.out_file.write('summary: %d\n' % (max_cost,))
+
+ def _entry(self, entry):
+ out_file = self.out_file
+ code = entry.code
+ inlinetime = int(entry.inlinetime * 1000)
+ #out_file.write('ob=%s\n' % (code.co_filename,))
+ if isinstance(code, str):
+ out_file.write('fi=~\n')
+ else:
+ out_file.write('fi=%s\n' % (code.co_filename,))
+ out_file.write('fn=%s\n' % (label(code, True),))
+ if isinstance(code, str):
+ out_file.write('0 %s\n' % (inlinetime,))
+ else:
+ out_file.write('%d %d\n' % (code.co_firstlineno, inlinetime))
+ # recursive calls are counted in entry.calls
+ if entry.calls:
+ calls = entry.calls
+ else:
+ calls = []
+ if isinstance(code, str):
+ lineno = 0
+ else:
+ lineno = code.co_firstlineno
+ for subentry in calls:
+ self._subentry(lineno, subentry)
+ out_file.write('\n')
+
+ def _subentry(self, lineno, subentry):
+ out_file = self.out_file
+ code = subentry.code
+ totaltime = int(subentry.totaltime * 1000)
+ #out_file.write('cob=%s\n' % (code.co_filename,))
+ if isinstance(code, str):
+ out_file.write('cfi=~\n')
+ out_file.write('cfn=%s\n' % (label(code, True),))
+ out_file.write('calls=%d 0\n' % (subentry.callcount,))
+ else:
+ out_file.write('cfi=%s\n' % (code.co_filename,))
+ out_file.write('cfn=%s\n' % (label(code, True),))
+ out_file.write('calls=%d %d\n' % (
+ subentry.callcount, code.co_firstlineno))
+ out_file.write('%d %d\n' % (lineno, totaltime))
+
+_fn2mod = {}
+
+def label(code, calltree=False):
+ if isinstance(code, str):
+ return code
+ try:
+ mname = _fn2mod[code.co_filename]
+ except KeyError:
+ for k, v in sys.modules.items():
+ if v is None:
+ continue
+ if getattr(v, '__file__', None) is None:
+ continue
+ if not isinstance(v.__file__, str):
+ continue
+ if v.__file__.startswith(code.co_filename):
+ mname = _fn2mod[code.co_filename] = k
+ break
+ else:
+ mname = _fn2mod[code.co_filename] = '<%s>'%code.co_filename
+ if calltree:
+ return '%s %s:%d' % (code.co_name, mname, code.co_firstlineno)
+ else:
+ return '%s:%d(%s)' % (mname, code.co_firstlineno, code.co_name)
+
+
+if __name__ == '__main__':
+ import os
+ sys.argv = sys.argv[1:]
+ if not sys.argv:
+ sys.stderr.write("usage: lsprof.py <script> <arguments...>\n")
+ sys.exit(2)
+ sys.path.insert(0, os.path.abspath(os.path.dirname(sys.argv[0])))
+ stats = profile(execfile, sys.argv[0], globals(), locals())
+ stats.sort()
+ stats.pprint()
diff --git a/bzrlib/mail_client.py b/bzrlib/mail_client.py
new file mode 100644
index 0000000..686f524
--- /dev/null
+++ b/bzrlib/mail_client.py
@@ -0,0 +1,641 @@
+# Copyright (C) 2007-2010 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+from __future__ import absolute_import
+
+import errno
+import os
+import subprocess
+import sys
+import tempfile
+
+import bzrlib
+from bzrlib import (
+ config as _mod_config,
+ email_message,
+ errors,
+ msgeditor,
+ osutils,
+ urlutils,
+ registry
+ )
+
+mail_client_registry = registry.Registry()
+
+
+class MailClient(object):
+ """A mail client that can send messages with attachements."""
+
+ def __init__(self, config):
+ self.config = config
+
+ def compose(self, prompt, to, subject, attachment, mime_subtype,
+ extension, basename=None, body=None):
+ """Compose (and possibly send) an email message
+
+ Must be implemented by subclasses.
+
+ :param prompt: A message to tell the user what to do. Supported by
+ the Editor client, but ignored by others
+ :param to: The address to send the message to
+ :param subject: The contents of the subject line
+ :param attachment: An email attachment, as a bytestring
+ :param mime_subtype: The attachment is assumed to be a subtype of
+ Text. This allows the precise subtype to be specified, e.g.
+ "plain", "x-patch", etc.
+ :param extension: The file extension associated with the attachment
+ type, e.g. ".patch"
+ :param basename: The name to use for the attachment, e.g.
+ "send-nick-3252"
+ """
+ raise NotImplementedError
+
+ def compose_merge_request(self, to, subject, directive, basename=None,
+ body=None):
+ """Compose (and possibly send) a merge request
+
+ :param to: The address to send the request to
+ :param subject: The subject line to use for the request
+ :param directive: A merge directive representing the merge request, as
+ a bytestring.
+ :param basename: The name to use for the attachment, e.g.
+ "send-nick-3252"
+ """
+ prompt = self._get_merge_prompt("Please describe these changes:", to,
+ subject, directive)
+ self.compose(prompt, to, subject, directive,
+ 'x-patch', '.patch', basename, body)
+
+ def _get_merge_prompt(self, prompt, to, subject, attachment):
+ """Generate a prompt string. Overridden by Editor.
+
+ :param prompt: A string suggesting what user should do
+ :param to: The address the mail will be sent to
+ :param subject: The subject line of the mail
+ :param attachment: The attachment that will be used
+ """
+ return ''
+
+
+class Editor(MailClient):
+ __doc__ = """DIY mail client that uses commit message editor"""
+
+ supports_body = True
+
+ def _get_merge_prompt(self, prompt, to, subject, attachment):
+ """See MailClient._get_merge_prompt"""
+ return (u"%s\n\n"
+ u"To: %s\n"
+ u"Subject: %s\n\n"
+ u"%s" % (prompt, to, subject,
+ attachment.decode('utf-8', 'replace')))
+
+ def compose(self, prompt, to, subject, attachment, mime_subtype,
+ extension, basename=None, body=None):
+ """See MailClient.compose"""
+ if not to:
+ raise errors.NoMailAddressSpecified()
+ body = msgeditor.edit_commit_message(prompt, start_message=body)
+ if body == '':
+ raise errors.NoMessageSupplied()
+ email_message.EmailMessage.send(self.config,
+ self.config.get('email'),
+ to,
+ subject,
+ body,
+ attachment,
+ attachment_mime_subtype=mime_subtype)
+mail_client_registry.register('editor', Editor,
+ help=Editor.__doc__)
+
+
+class BodyExternalMailClient(MailClient):
+
+ supports_body = True
+
+ def _get_client_commands(self):
+ """Provide a list of commands that may invoke the mail client"""
+ if sys.platform == 'win32':
+ import win32utils
+ return [win32utils.get_app_path(i) for i in self._client_commands]
+ else:
+ return self._client_commands
+
+ def compose(self, prompt, to, subject, attachment, mime_subtype,
+ extension, basename=None, body=None):
+ """See MailClient.compose.
+
+ Writes the attachment to a temporary file, invokes _compose.
+ """
+ if basename is None:
+ basename = 'attachment'
+ pathname = osutils.mkdtemp(prefix='bzr-mail-')
+ attach_path = osutils.pathjoin(pathname, basename + extension)
+ outfile = open(attach_path, 'wb')
+ try:
+ outfile.write(attachment)
+ finally:
+ outfile.close()
+ if body is not None:
+ kwargs = {'body': body}
+ else:
+ kwargs = {}
+ self._compose(prompt, to, subject, attach_path, mime_subtype,
+ extension, **kwargs)
+
+ def _compose(self, prompt, to, subject, attach_path, mime_subtype,
+ extension, body=None, from_=None):
+ """Invoke a mail client as a commandline process.
+
+ Overridden by MAPIClient.
+ :param to: The address to send the mail to
+ :param subject: The subject line for the mail
+ :param pathname: The path to the attachment
+ :param mime_subtype: The attachment is assumed to have a major type of
+ "text", but the precise subtype can be specified here
+ :param extension: A file extension (including period) associated with
+ the attachment type.
+ :param body: Optional body text.
+ :param from_: Optional From: header.
+ """
+ for name in self._get_client_commands():
+ cmdline = [self._encode_path(name, 'executable')]
+ if body is not None:
+ kwargs = {'body': body}
+ else:
+ kwargs = {}
+ if from_ is not None:
+ kwargs['from_'] = from_
+ cmdline.extend(self._get_compose_commandline(to, subject,
+ attach_path,
+ **kwargs))
+ try:
+ subprocess.call(cmdline)
+ except OSError, e:
+ if e.errno != errno.ENOENT:
+ raise
+ else:
+ break
+ else:
+ raise errors.MailClientNotFound(self._client_commands)
+
+ def _get_compose_commandline(self, to, subject, attach_path, body):
+ """Determine the commandline to use for composing a message
+
+ Implemented by various subclasses
+ :param to: The address to send the mail to
+ :param subject: The subject line for the mail
+ :param attach_path: The path to the attachment
+ """
+ raise NotImplementedError
+
+ def _encode_safe(self, u):
+ """Encode possible unicode string argument to 8-bit string
+ in user_encoding. Unencodable characters will be replaced
+ with '?'.
+
+ :param u: possible unicode string.
+ :return: encoded string if u is unicode, u itself otherwise.
+ """
+ if isinstance(u, unicode):
+ return u.encode(osutils.get_user_encoding(), 'replace')
+ return u
+
+ def _encode_path(self, path, kind):
+ """Encode unicode path in user encoding.
+
+ :param path: possible unicode path.
+ :param kind: path kind ('executable' or 'attachment').
+ :return: encoded path if path is unicode,
+ path itself otherwise.
+ :raise: UnableEncodePath.
+ """
+ if isinstance(path, unicode):
+ try:
+ return path.encode(osutils.get_user_encoding())
+ except UnicodeEncodeError:
+ raise errors.UnableEncodePath(path, kind)
+ return path
+
+
+class ExternalMailClient(BodyExternalMailClient):
+ __doc__ = """An external mail client."""
+
+ supports_body = False
+
+
+class Evolution(BodyExternalMailClient):
+ __doc__ = """Evolution mail client."""
+
+ _client_commands = ['evolution']
+
+ def _get_compose_commandline(self, to, subject, attach_path, body=None):
+ """See ExternalMailClient._get_compose_commandline"""
+ message_options = {}
+ if subject is not None:
+ message_options['subject'] = subject
+ if attach_path is not None:
+ message_options['attach'] = attach_path
+ if body is not None:
+ message_options['body'] = body
+ options_list = ['%s=%s' % (k, urlutils.escape(v)) for (k, v) in
+ sorted(message_options.iteritems())]
+ return ['mailto:%s?%s' % (self._encode_safe(to or ''),
+ '&'.join(options_list))]
+mail_client_registry.register('evolution', Evolution,
+ help=Evolution.__doc__)
+
+
+class Mutt(BodyExternalMailClient):
+ __doc__ = """Mutt mail client."""
+
+ _client_commands = ['mutt']
+
+ def _get_compose_commandline(self, to, subject, attach_path, body=None):
+ """See ExternalMailClient._get_compose_commandline"""
+ message_options = []
+ if subject is not None:
+ message_options.extend(['-s', self._encode_safe(subject)])
+ if attach_path is not None:
+ message_options.extend(['-a',
+ self._encode_path(attach_path, 'attachment')])
+ if body is not None:
+ # Store the temp file object in self, so that it does not get
+ # garbage collected and delete the file before mutt can read it.
+ self._temp_file = tempfile.NamedTemporaryFile(
+ prefix="mutt-body-", suffix=".txt")
+ self._temp_file.write(body)
+ self._temp_file.flush()
+ message_options.extend(['-i', self._temp_file.name])
+ if to is not None:
+ message_options.extend(['--', self._encode_safe(to)])
+ return message_options
+mail_client_registry.register('mutt', Mutt,
+ help=Mutt.__doc__)
+
+
+class Thunderbird(BodyExternalMailClient):
+ __doc__ = """Mozilla Thunderbird (or Icedove)
+
+ Note that Thunderbird 1.5 is buggy and does not support setting
+ "to" simultaneously with including a attachment.
+
+ There is a workaround if no attachment is present, but we always need to
+ send attachments.
+ """
+
+ _client_commands = ['thunderbird', 'mozilla-thunderbird', 'icedove',
+ '/Applications/Mozilla/Thunderbird.app/Contents/MacOS/thunderbird-bin',
+ '/Applications/Thunderbird.app/Contents/MacOS/thunderbird-bin']
+
+ def _get_compose_commandline(self, to, subject, attach_path, body=None):
+ """See ExternalMailClient._get_compose_commandline"""
+ message_options = {}
+ if to is not None:
+ message_options['to'] = self._encode_safe(to)
+ if subject is not None:
+ message_options['subject'] = self._encode_safe(subject)
+ if attach_path is not None:
+ message_options['attachment'] = urlutils.local_path_to_url(
+ attach_path)
+ if body is not None:
+ options_list = ['body=%s' % urlutils.quote(self._encode_safe(body))]
+ else:
+ options_list = []
+ options_list.extend(["%s='%s'" % (k, v) for k, v in
+ sorted(message_options.iteritems())])
+ return ['-compose', ','.join(options_list)]
+mail_client_registry.register('thunderbird', Thunderbird,
+ help=Thunderbird.__doc__)
+
+
+class KMail(ExternalMailClient):
+ __doc__ = """KDE mail client."""
+
+ _client_commands = ['kmail']
+
+ def _get_compose_commandline(self, to, subject, attach_path):
+ """See ExternalMailClient._get_compose_commandline"""
+ message_options = []
+ if subject is not None:
+ message_options.extend(['-s', self._encode_safe(subject)])
+ if attach_path is not None:
+ message_options.extend(['--attach',
+ self._encode_path(attach_path, 'attachment')])
+ if to is not None:
+ message_options.extend([self._encode_safe(to)])
+ return message_options
+mail_client_registry.register('kmail', KMail,
+ help=KMail.__doc__)
+
+
+class Claws(ExternalMailClient):
+ __doc__ = """Claws mail client."""
+
+ supports_body = True
+
+ _client_commands = ['claws-mail']
+
+ def _get_compose_commandline(self, to, subject, attach_path, body=None,
+ from_=None):
+ """See ExternalMailClient._get_compose_commandline"""
+ compose_url = []
+ if from_ is not None:
+ compose_url.append('from=' + urlutils.quote(from_))
+ if subject is not None:
+ # Don't use urlutils.quote_plus because Claws doesn't seem
+ # to recognise spaces encoded as "+".
+ compose_url.append(
+ 'subject=' + urlutils.quote(self._encode_safe(subject)))
+ if body is not None:
+ compose_url.append(
+ 'body=' + urlutils.quote(self._encode_safe(body)))
+ # to must be supplied for the claws-mail --compose syntax to work.
+ if to is None:
+ raise errors.NoMailAddressSpecified()
+ compose_url = 'mailto:%s?%s' % (
+ self._encode_safe(to), '&'.join(compose_url))
+ # Collect command-line options.
+ message_options = ['--compose', compose_url]
+ if attach_path is not None:
+ message_options.extend(
+ ['--attach', self._encode_path(attach_path, 'attachment')])
+ return message_options
+
+ def _compose(self, prompt, to, subject, attach_path, mime_subtype,
+ extension, body=None, from_=None):
+ """See ExternalMailClient._compose"""
+ if from_ is None:
+ from_ = self.config.get('email')
+ super(Claws, self)._compose(prompt, to, subject, attach_path,
+ mime_subtype, extension, body, from_)
+
+
+mail_client_registry.register('claws', Claws,
+ help=Claws.__doc__)
+
+
+class XDGEmail(BodyExternalMailClient):
+ __doc__ = """xdg-email attempts to invoke the user's preferred mail client"""
+
+ _client_commands = ['xdg-email']
+
+ def _get_compose_commandline(self, to, subject, attach_path, body=None):
+ """See ExternalMailClient._get_compose_commandline"""
+ if not to:
+ raise errors.NoMailAddressSpecified()
+ commandline = [self._encode_safe(to)]
+ if subject is not None:
+ commandline.extend(['--subject', self._encode_safe(subject)])
+ if attach_path is not None:
+ commandline.extend(['--attach',
+ self._encode_path(attach_path, 'attachment')])
+ if body is not None:
+ commandline.extend(['--body', self._encode_safe(body)])
+ return commandline
+mail_client_registry.register('xdg-email', XDGEmail,
+ help=XDGEmail.__doc__)
+
+
+class EmacsMail(ExternalMailClient):
+ __doc__ = """Call emacsclient to have a mail buffer.
+
+ This only work for emacs >= 22.1 due to recent -e/--eval support.
+
+ The good news is that this implementation will work with all mail
+ agents registered against ``mail-user-agent``. So there is no need
+ to instantiate ExternalMailClient for each and every GNU Emacs
+ MUA.
+
+ Users just have to ensure that ``mail-user-agent`` is set according
+ to their tastes.
+ """
+
+ _client_commands = ['emacsclient']
+
+ def __init__(self, config):
+ super(EmacsMail, self).__init__(config)
+ self.elisp_tmp_file = None
+
+ def _prepare_send_function(self):
+ """Write our wrapper function into a temporary file.
+
+ This temporary file will be loaded at runtime in
+ _get_compose_commandline function.
+
+ This function does not remove the file. That's a wanted
+ behaviour since _get_compose_commandline won't run the send
+ mail function directly but return the eligible command line.
+ Removing our temporary file here would prevent our sendmail
+ function to work. (The file is deleted by some elisp code
+ after being read by Emacs.)
+ """
+
+ _defun = r"""(defun bzr-add-mime-att (file)
+ "Attach FILE to a mail buffer as a MIME attachment."
+ (let ((agent mail-user-agent))
+ (if (and file (file-exists-p file))
+ (cond
+ ((eq agent 'sendmail-user-agent)
+ (progn
+ (mail-text)
+ (newline)
+ (if (functionp 'etach-attach)
+ (etach-attach file)
+ (mail-attach-file file))))
+ ((or (eq agent 'message-user-agent)
+ (eq agent 'gnus-user-agent)
+ (eq agent 'mh-e-user-agent))
+ (progn
+ (mml-attach-file file "text/x-patch" "BZR merge" "inline")))
+ ((eq agent 'mew-user-agent)
+ (progn
+ (mew-draft-prepare-attachments)
+ (mew-attach-link file (file-name-nondirectory file))
+ (let* ((nums (mew-syntax-nums))
+ (syntax (mew-syntax-get-entry mew-encode-syntax nums)))
+ (mew-syntax-set-cd syntax "BZR merge")
+ (mew-encode-syntax-print mew-encode-syntax))
+ (mew-header-goto-body)))
+ (t
+ (message "Unhandled MUA, report it on bazaar@lists.canonical.com")))
+ (error "File %s does not exist." file))))
+"""
+
+ fd, temp_file = tempfile.mkstemp(prefix="emacs-bzr-send-",
+ suffix=".el")
+ try:
+ os.write(fd, _defun)
+ finally:
+ os.close(fd) # Just close the handle but do not remove the file.
+ return temp_file
+
+ def _get_compose_commandline(self, to, subject, attach_path):
+ commandline = ["--eval"]
+
+ _to = "nil"
+ _subject = "nil"
+
+ if to is not None:
+ _to = ("\"%s\"" % self._encode_safe(to).replace('"', '\\"'))
+ if subject is not None:
+ _subject = ("\"%s\"" %
+ self._encode_safe(subject).replace('"', '\\"'))
+
+ # Funcall the default mail composition function
+ # This will work with any mail mode including default mail-mode
+ # User must tweak mail-user-agent variable to tell what function
+ # will be called inside compose-mail.
+ mail_cmd = "(compose-mail %s %s)" % (_to, _subject)
+ commandline.append(mail_cmd)
+
+ # Try to attach a MIME attachment using our wrapper function
+ if attach_path is not None:
+ # Do not create a file if there is no attachment
+ elisp = self._prepare_send_function()
+ self.elisp_tmp_file = elisp
+ lmmform = '(load "%s")' % elisp
+ mmform = '(bzr-add-mime-att "%s")' % \
+ self._encode_path(attach_path, 'attachment')
+ rmform = '(delete-file "%s")' % elisp
+ commandline.append(lmmform)
+ commandline.append(mmform)
+ commandline.append(rmform)
+
+ return commandline
+mail_client_registry.register('emacsclient', EmacsMail,
+ help=EmacsMail.__doc__)
+
+
+class MAPIClient(BodyExternalMailClient):
+ __doc__ = """Default Windows mail client launched using MAPI."""
+
+ def _compose(self, prompt, to, subject, attach_path, mime_subtype,
+ extension, body=None):
+ """See ExternalMailClient._compose.
+
+ This implementation uses MAPI via the simplemapi ctypes wrapper
+ """
+ from bzrlib.util import simplemapi
+ try:
+ simplemapi.SendMail(to or '', subject or '', body or '',
+ attach_path)
+ except simplemapi.MAPIError, e:
+ if e.code != simplemapi.MAPI_USER_ABORT:
+ raise errors.MailClientNotFound(['MAPI supported mail client'
+ ' (error %d)' % (e.code,)])
+mail_client_registry.register('mapi', MAPIClient,
+ help=MAPIClient.__doc__)
+
+
+class MailApp(BodyExternalMailClient):
+ __doc__ = """Use MacOS X's Mail.app for sending email messages.
+
+ Although it would be nice to use appscript, it's not installed
+ with the shipped Python installations. We instead build an
+ AppleScript and invoke the script using osascript(1). We don't
+ use the _encode_safe() routines as it's not clear what encoding
+ osascript expects the script to be in.
+ """
+
+ _client_commands = ['osascript']
+
+ def _get_compose_commandline(self, to, subject, attach_path, body=None,
+ from_=None):
+ """See ExternalMailClient._get_compose_commandline"""
+
+ fd, self.temp_file = tempfile.mkstemp(prefix="bzr-send-",
+ suffix=".scpt")
+ try:
+ os.write(fd, 'tell application "Mail"\n')
+ os.write(fd, 'set newMessage to make new outgoing message\n')
+ os.write(fd, 'tell newMessage\n')
+ if to is not None:
+ os.write(fd, 'make new to recipient with properties'
+ ' {address:"%s"}\n' % to)
+ if from_ is not None:
+ # though from_ doesn't actually seem to be used
+ os.write(fd, 'set sender to "%s"\n'
+ % sender.replace('"', '\\"'))
+ if subject is not None:
+ os.write(fd, 'set subject to "%s"\n'
+ % subject.replace('"', '\\"'))
+ if body is not None:
+ # FIXME: would be nice to prepend the body to the
+ # existing content (e.g., preserve signature), but
+ # can't seem to figure out the right applescript
+ # incantation.
+ os.write(fd, 'set content to "%s\\n\n"\n' %
+ body.replace('"', '\\"').replace('\n', '\\n'))
+
+ if attach_path is not None:
+ # FIXME: would be nice to first append a newline to
+ # ensure the attachment is on a new paragraph, but
+ # can't seem to figure out the right applescript
+ # incantation.
+ os.write(fd, 'tell content to make new attachment'
+ ' with properties {file name:"%s"}'
+ ' at after the last paragraph\n'
+ % self._encode_path(attach_path, 'attachment'))
+ os.write(fd, 'set visible to true\n')
+ os.write(fd, 'end tell\n')
+ os.write(fd, 'end tell\n')
+ finally:
+ os.close(fd) # Just close the handle but do not remove the file.
+ return [self.temp_file]
+mail_client_registry.register('mail.app', MailApp,
+ help=MailApp.__doc__)
+
+
+class DefaultMail(MailClient):
+ __doc__ = """Default mail handling. Tries XDGEmail (or MAPIClient on Windows),
+ falls back to Editor"""
+
+ supports_body = True
+
+ def _mail_client(self):
+ """Determine the preferred mail client for this platform"""
+ if osutils.supports_mapi():
+ return MAPIClient(self.config)
+ else:
+ return XDGEmail(self.config)
+
+ def compose(self, prompt, to, subject, attachment, mime_subtype,
+ extension, basename=None, body=None):
+ """See MailClient.compose"""
+ try:
+ return self._mail_client().compose(prompt, to, subject,
+ attachment, mime_subtype,
+ extension, basename, body)
+ except errors.MailClientNotFound:
+ return Editor(self.config).compose(prompt, to, subject,
+ attachment, mime_subtype, extension, body)
+
+ def compose_merge_request(self, to, subject, directive, basename=None,
+ body=None):
+ """See MailClient.compose_merge_request"""
+ try:
+ return self._mail_client().compose_merge_request(to, subject,
+ directive, basename=basename, body=body)
+ except errors.MailClientNotFound:
+ return Editor(self.config).compose_merge_request(to, subject,
+ directive, basename=basename, body=body)
+mail_client_registry.register('default', DefaultMail,
+ help=DefaultMail.__doc__)
+mail_client_registry.default_key = 'default'
+
+opt_mail_client = _mod_config.RegistryOption('mail_client',
+ mail_client_registry, help='E-mail client to use.', invalid='error')
diff --git a/bzrlib/memorytree.py b/bzrlib/memorytree.py
new file mode 100644
index 0000000..a459da8
--- /dev/null
+++ b/bzrlib/memorytree.py
@@ -0,0 +1,324 @@
+# Copyright (C) 2006 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""MemoryTree object.
+
+See MemoryTree for more details.
+"""
+
+from __future__ import absolute_import
+
+import os
+
+from bzrlib import (
+ errors,
+ mutabletree,
+ revision as _mod_revision,
+ )
+from bzrlib.decorators import needs_read_lock
+from bzrlib.inventory import Inventory
+from bzrlib.osutils import sha_file
+from bzrlib.mutabletree import needs_tree_write_lock
+from bzrlib.transport.memory import MemoryTransport
+
+
+class MemoryTree(mutabletree.MutableInventoryTree):
+ """A MemoryTree is a specialisation of MutableTree.
+
+ It maintains nearly no state outside of read_lock and write_lock
+ transactions. (it keeps a reference to the branch, and its last-revision
+ only).
+ """
+
+ def __init__(self, branch, revision_id):
+ """Construct a MemoryTree for branch using revision_id."""
+ self.branch = branch
+ self.bzrdir = branch.bzrdir
+ self._branch_revision_id = revision_id
+ self._locks = 0
+ self._lock_mode = None
+
+ def get_config_stack(self):
+ return self.branch.get_config_stack()
+
+ def is_control_filename(self, filename):
+ # Memory tree doesn't have any control filenames
+ return False
+
+ @needs_tree_write_lock
+ def _add(self, files, ids, kinds):
+ """See MutableTree._add."""
+ for f, file_id, kind in zip(files, ids, kinds):
+ if kind is None:
+ kind = 'file'
+ if file_id is None:
+ self._inventory.add_path(f, kind=kind)
+ else:
+ self._inventory.add_path(f, kind=kind, file_id=file_id)
+
+ def basis_tree(self):
+ """See Tree.basis_tree()."""
+ return self._basis_tree
+
+ @staticmethod
+ def create_on_branch(branch):
+ """Create a MemoryTree for branch, using the last-revision of branch."""
+ revision_id = _mod_revision.ensure_null(branch.last_revision())
+ return MemoryTree(branch, revision_id)
+
+ def _gather_kinds(self, files, kinds):
+ """See MutableTree._gather_kinds.
+
+ This implementation does not care about the file kind of
+ missing files, so is a no-op.
+ """
+
+ def get_file(self, file_id, path=None):
+ """See Tree.get_file."""
+ if path is None:
+ path = self.id2path(file_id)
+ return self._file_transport.get(path)
+
+ def get_file_sha1(self, file_id, path=None, stat_value=None):
+ """See Tree.get_file_sha1()."""
+ if path is None:
+ path = self.id2path(file_id)
+ stream = self._file_transport.get(path)
+ return sha_file(stream)
+
+ def get_root_id(self):
+ return self.path2id('')
+
+ def _comparison_data(self, entry, path):
+ """See Tree._comparison_data."""
+ if entry is None:
+ return None, False, None
+ return entry.kind, entry.executable, None
+
+ @needs_tree_write_lock
+ def rename_one(self, from_rel, to_rel):
+ file_id = self.path2id(from_rel)
+ to_dir, to_tail = os.path.split(to_rel)
+ to_parent_id = self.path2id(to_dir)
+ self._file_transport.move(from_rel, to_rel)
+ self._inventory.rename(file_id, to_parent_id, to_tail)
+
+ def path_content_summary(self, path):
+ """See Tree.path_content_summary."""
+ id = self.path2id(path)
+ if id is None:
+ return 'missing', None, None, None
+ kind = self.kind(id)
+ if kind == 'file':
+ bytes = self._file_transport.get_bytes(path)
+ size = len(bytes)
+ executable = self._inventory[id].executable
+ sha1 = None # no stat cache
+ return (kind, size, executable, sha1)
+ elif kind == 'directory':
+ # memory tree does not support nested trees yet.
+ return kind, None, None, None
+ elif kind == 'symlink':
+ raise NotImplementedError('symlink support')
+ else:
+ raise NotImplementedError('unknown kind')
+
+ def _file_size(self, entry, stat_value):
+ """See Tree._file_size."""
+ if entry is None:
+ return 0
+ return entry.text_size
+
+ @needs_read_lock
+ def get_parent_ids(self):
+ """See Tree.get_parent_ids.
+
+ This implementation returns the current cached value from
+ self._parent_ids.
+ """
+ return list(self._parent_ids)
+
+ def has_filename(self, filename):
+ """See Tree.has_filename()."""
+ return self._file_transport.has(filename)
+
+ def is_executable(self, file_id, path=None):
+ return self._inventory[file_id].executable
+
+ def kind(self, file_id):
+ return self._inventory[file_id].kind
+
+ def mkdir(self, path, file_id=None):
+ """See MutableTree.mkdir()."""
+ self.add(path, file_id, 'directory')
+ if file_id is None:
+ file_id = self.path2id(path)
+ self._file_transport.mkdir(path)
+ return file_id
+
+ @needs_read_lock
+ def last_revision(self):
+ """See MutableTree.last_revision."""
+ return self._branch_revision_id
+
+ def lock_read(self):
+ """Lock the memory tree for reading.
+
+ This triggers population of data from the branch for its revision.
+ """
+ self._locks += 1
+ try:
+ if self._locks == 1:
+ self.branch.lock_read()
+ self._lock_mode = "r"
+ self._populate_from_branch()
+ except:
+ self._locks -= 1
+ raise
+
+ def lock_tree_write(self):
+ """See MutableTree.lock_tree_write()."""
+ self._locks += 1
+ try:
+ if self._locks == 1:
+ self.branch.lock_read()
+ self._lock_mode = "w"
+ self._populate_from_branch()
+ elif self._lock_mode == "r":
+ raise errors.ReadOnlyError(self)
+ except:
+ self._locks -= 1
+ raise
+
+ def lock_write(self):
+ """See MutableTree.lock_write()."""
+ self._locks += 1
+ try:
+ if self._locks == 1:
+ self.branch.lock_write()
+ self._lock_mode = "w"
+ self._populate_from_branch()
+ elif self._lock_mode == "r":
+ raise errors.ReadOnlyError(self)
+ except:
+ self._locks -= 1
+ raise
+
+ def _populate_from_branch(self):
+ """Populate the in-tree state from the branch."""
+ self._set_basis()
+ if self._branch_revision_id == _mod_revision.NULL_REVISION:
+ self._parent_ids = []
+ else:
+ self._parent_ids = [self._branch_revision_id]
+ self._inventory = Inventory(None, self._basis_tree.get_revision_id())
+ self._file_transport = MemoryTransport()
+ # TODO copy the revision trees content, or do it lazy, or something.
+ inventory_entries = self._basis_tree.iter_entries_by_dir()
+ for path, entry in inventory_entries:
+ self._inventory.add(entry.copy())
+ if path == '':
+ continue
+ if entry.kind == 'directory':
+ self._file_transport.mkdir(path)
+ elif entry.kind == 'file':
+ self._file_transport.put_file(path,
+ self._basis_tree.get_file(entry.file_id))
+ else:
+ raise NotImplementedError(self._populate_from_branch)
+
+ def put_file_bytes_non_atomic(self, file_id, bytes):
+ """See MutableTree.put_file_bytes_non_atomic."""
+ self._file_transport.put_bytes(self.id2path(file_id), bytes)
+
+ def unlock(self):
+ """Release a lock.
+
+ This frees all cached state when the last lock context for the tree is
+ left.
+ """
+ if self._locks == 1:
+ self._basis_tree = None
+ self._parent_ids = []
+ self._inventory = None
+ try:
+ self.branch.unlock()
+ finally:
+ self._locks = 0
+ self._lock_mode = None
+ else:
+ self._locks -= 1
+
+ @needs_tree_write_lock
+ def unversion(self, file_ids):
+ """Remove the file ids in file_ids from the current versioned set.
+
+ When a file_id is unversioned, all of its children are automatically
+ unversioned.
+
+ :param file_ids: The file ids to stop versioning.
+ :raises: NoSuchId if any fileid is not currently versioned.
+ """
+ # XXX: This should be in mutabletree, but the inventory-save action
+ # is not relevant to memory tree. Until that is done in unlock by
+ # working tree, we cannot share the implementation.
+ for file_id in file_ids:
+ if self._inventory.has_id(file_id):
+ self._inventory.remove_recursive_id(file_id)
+ else:
+ raise errors.NoSuchId(self, file_id)
+
+ def set_parent_ids(self, revision_ids, allow_leftmost_as_ghost=False):
+ """See MutableTree.set_parent_trees()."""
+ for revision_id in revision_ids:
+ _mod_revision.check_not_reserved_id(revision_id)
+ if len(revision_ids) == 0:
+ self._parent_ids = []
+ self._branch_revision_id = _mod_revision.NULL_REVISION
+ else:
+ self._parent_ids = revision_ids
+ self._branch_revision_id = revision_ids[0]
+ self._allow_leftmost_as_ghost = allow_leftmost_as_ghost
+ self._set_basis()
+
+ def _set_basis(self):
+ try:
+ self._basis_tree = self.branch.repository.revision_tree(
+ self._branch_revision_id)
+ except errors.NoSuchRevision:
+ if self._allow_leftmost_as_ghost:
+ self._basis_tree = self.branch.repository.revision_tree(
+ _mod_revision.NULL_REVISION)
+ else:
+ raise
+
+ def set_parent_trees(self, parents_list, allow_leftmost_as_ghost=False):
+ """See MutableTree.set_parent_trees()."""
+ if len(parents_list) == 0:
+ self._parent_ids = []
+ self._basis_tree = self.branch.repository.revision_tree(
+ _mod_revision.NULL_REVISION)
+ else:
+ if parents_list[0][1] is None and not allow_leftmost_as_ghost:
+ # a ghost in the left most parent
+ raise errors.GhostRevisionUnusableHere(parents_list[0][0])
+ self._parent_ids = [parent_id for parent_id, tree in parents_list]
+ if parents_list[0][1] is None or parents_list[0][1] == 'null:':
+ self._basis_tree = self.branch.repository.revision_tree(
+ _mod_revision.NULL_REVISION)
+ else:
+ self._basis_tree = parents_list[0][1]
+ self._branch_revision_id = parents_list[0][0]
diff --git a/bzrlib/merge.py b/bzrlib/merge.py
new file mode 100644
index 0000000..c7c2dd9
--- /dev/null
+++ b/bzrlib/merge.py
@@ -0,0 +1,2467 @@
+# Copyright (C) 2005-2011 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+from __future__ import absolute_import
+
+import warnings
+
+from bzrlib.lazy_import import lazy_import
+lazy_import(globals(), """
+from bzrlib import (
+ branch as _mod_branch,
+ cleanup,
+ conflicts as _mod_conflicts,
+ debug,
+ generate_ids,
+ graph as _mod_graph,
+ merge3,
+ osutils,
+ patiencediff,
+ revision as _mod_revision,
+ textfile,
+ trace,
+ transform,
+ tree as _mod_tree,
+ tsort,
+ ui,
+ versionedfile,
+ workingtree,
+ )
+from bzrlib.i18n import gettext
+""")
+from bzrlib import (
+ decorators,
+ errors,
+ hooks,
+ registry,
+ )
+from bzrlib.symbol_versioning import (
+ deprecated_in,
+ deprecated_method,
+ )
+# TODO: Report back as changes are merged in
+
+
+def transform_tree(from_tree, to_tree, interesting_ids=None):
+ from_tree.lock_tree_write()
+ operation = cleanup.OperationWithCleanups(merge_inner)
+ operation.add_cleanup(from_tree.unlock)
+ operation.run_simple(from_tree.branch, to_tree, from_tree,
+ ignore_zero=True, interesting_ids=interesting_ids, this_tree=from_tree)
+
+
+class MergeHooks(hooks.Hooks):
+
+ def __init__(self):
+ hooks.Hooks.__init__(self, "bzrlib.merge", "Merger.hooks")
+ self.add_hook('merge_file_content',
+ "Called with a bzrlib.merge.Merger object to create a per file "
+ "merge object when starting a merge. "
+ "Should return either None or a subclass of "
+ "``bzrlib.merge.AbstractPerFileMerger``. "
+ "Such objects will then be called per file "
+ "that needs to be merged (including when one "
+ "side has deleted the file and the other has changed it). "
+ "See the AbstractPerFileMerger API docs for details on how it is "
+ "used by merge.",
+ (2, 1))
+ self.add_hook('pre_merge',
+ 'Called before a merge. '
+ 'Receives a Merger object as the single argument.',
+ (2, 5))
+ self.add_hook('post_merge',
+ 'Called after a merge. '
+ 'Receives a Merger object as the single argument. '
+ 'The return value is ignored.',
+ (2, 5))
+
+
+class AbstractPerFileMerger(object):
+ """PerFileMerger objects are used by plugins extending merge for bzrlib.
+
+ See ``bzrlib.plugins.news_merge.news_merge`` for an example concrete class.
+
+ :ivar merger: The Merge3Merger performing the merge.
+ """
+
+ def __init__(self, merger):
+ """Create a PerFileMerger for use with merger."""
+ self.merger = merger
+
+ def merge_contents(self, merge_params):
+ """Attempt to merge the contents of a single file.
+
+ :param merge_params: A bzrlib.merge.MergeFileHookParams
+ :return: A tuple of (status, chunks), where status is one of
+ 'not_applicable', 'success', 'conflicted', or 'delete'. If status
+ is 'success' or 'conflicted', then chunks should be an iterable of
+ strings for the new file contents.
+ """
+ return ('not applicable', None)
+
+
+class PerFileMerger(AbstractPerFileMerger):
+ """Merge individual files when self.file_matches returns True.
+
+ This class is intended to be subclassed. The file_matches and
+ merge_matching methods should be overridden with concrete implementations.
+ """
+
+ def file_matches(self, params):
+ """Return True if merge_matching should be called on this file.
+
+ Only called with merges of plain files with no clear winner.
+
+ Subclasses must override this.
+ """
+ raise NotImplementedError(self.file_matches)
+
+ def get_filename(self, params, tree):
+ """Lookup the filename (i.e. basename, not path), given a Tree (e.g.
+ self.merger.this_tree) and a MergeFileHookParams.
+ """
+ return osutils.basename(tree.id2path(params.file_id))
+
+ def get_filepath(self, params, tree):
+ """Calculate the path to the file in a tree.
+
+ :param params: A MergeFileHookParams describing the file to merge
+ :param tree: a Tree, e.g. self.merger.this_tree.
+ """
+ return tree.id2path(params.file_id)
+
+ def merge_contents(self, params):
+ """Merge the contents of a single file."""
+ # Check whether this custom merge logic should be used.
+ if (
+ # OTHER is a straight winner, rely on default merge.
+ params.winner == 'other' or
+ # THIS and OTHER aren't both files.
+ not params.is_file_merge() or
+ # The filename doesn't match
+ not self.file_matches(params)):
+ return 'not_applicable', None
+ return self.merge_matching(params)
+
+ def merge_matching(self, params):
+ """Merge the contents of a single file that has matched the criteria
+ in PerFileMerger.merge_contents (is a conflict, is a file,
+ self.file_matches is True).
+
+ Subclasses must override this.
+ """
+ raise NotImplementedError(self.merge_matching)
+
+
+class ConfigurableFileMerger(PerFileMerger):
+ """Merge individual files when configured via a .conf file.
+
+ This is a base class for concrete custom file merging logic. Concrete
+ classes should implement ``merge_text``.
+
+ See ``bzrlib.plugins.news_merge.news_merge`` for an example concrete class.
+
+ :ivar affected_files: The configured file paths to merge.
+
+ :cvar name_prefix: The prefix to use when looking up configuration
+ details. <name_prefix>_merge_files describes the files targeted by the
+ hook for example.
+
+ :cvar default_files: The default file paths to merge when no configuration
+ is present.
+ """
+
+ name_prefix = None
+ default_files = None
+
+ def __init__(self, merger):
+ super(ConfigurableFileMerger, self).__init__(merger)
+ self.affected_files = None
+ self.default_files = self.__class__.default_files or []
+ self.name_prefix = self.__class__.name_prefix
+ if self.name_prefix is None:
+ raise ValueError("name_prefix must be set.")
+
+ def file_matches(self, params):
+ """Check whether the file should call the merge hook.
+
+ <name_prefix>_merge_files configuration variable is a list of files
+ that should use the hook.
+ """
+ affected_files = self.affected_files
+ if affected_files is None:
+ config = self.merger.this_branch.get_config()
+ # Until bzr provides a better policy for caching the config, we
+ # just add the part we're interested in to the params to avoid
+ # reading the config files repeatedly (bazaar.conf, location.conf,
+ # branch.conf).
+ config_key = self.name_prefix + '_merge_files'
+ affected_files = config.get_user_option_as_list(config_key)
+ if affected_files is None:
+ # If nothing was specified in the config, use the default.
+ affected_files = self.default_files
+ self.affected_files = affected_files
+ if affected_files:
+ filepath = self.get_filepath(params, self.merger.this_tree)
+ if filepath in affected_files:
+ return True
+ return False
+
+ def merge_matching(self, params):
+ return self.merge_text(params)
+
+ def merge_text(self, params):
+ """Merge the byte contents of a single file.
+
+ This is called after checking that the merge should be performed in
+ merge_contents, and it should behave as per
+ ``bzrlib.merge.AbstractPerFileMerger.merge_contents``.
+ """
+ raise NotImplementedError(self.merge_text)
+
+
+class MergeFileHookParams(object):
+ """Object holding parameters passed to merge_file_content hooks.
+
+ There are some fields hooks can access:
+
+ :ivar file_id: the file ID of the file being merged
+ :ivar trans_id: the transform ID for the merge of this file
+ :ivar this_kind: kind of file_id in 'this' tree
+ :ivar other_kind: kind of file_id in 'other' tree
+ :ivar winner: one of 'this', 'other', 'conflict'
+ """
+
+ def __init__(self, merger, file_id, trans_id, this_kind, other_kind,
+ winner):
+ self._merger = merger
+ self.file_id = file_id
+ self.trans_id = trans_id
+ self.this_kind = this_kind
+ self.other_kind = other_kind
+ self.winner = winner
+
+ def is_file_merge(self):
+ """True if this_kind and other_kind are both 'file'."""
+ return self.this_kind == 'file' and self.other_kind == 'file'
+
+ @decorators.cachedproperty
+ def base_lines(self):
+ """The lines of the 'base' version of the file."""
+ return self._merger.get_lines(self._merger.base_tree, self.file_id)
+
+ @decorators.cachedproperty
+ def this_lines(self):
+ """The lines of the 'this' version of the file."""
+ return self._merger.get_lines(self._merger.this_tree, self.file_id)
+
+ @decorators.cachedproperty
+ def other_lines(self):
+ """The lines of the 'other' version of the file."""
+ return self._merger.get_lines(self._merger.other_tree, self.file_id)
+
+
+class Merger(object):
+
+ hooks = MergeHooks()
+
+ def __init__(self, this_branch, other_tree=None, base_tree=None,
+ this_tree=None, pb=None, change_reporter=None,
+ recurse='down', revision_graph=None):
+ object.__init__(self)
+ self.this_branch = this_branch
+ self.this_basis = _mod_revision.ensure_null(
+ this_branch.last_revision())
+ self.this_rev_id = None
+ self.this_tree = this_tree
+ self.this_revision_tree = None
+ self.this_basis_tree = None
+ self.other_tree = other_tree
+ self.other_branch = None
+ self.base_tree = base_tree
+ self.ignore_zero = False
+ self.backup_files = False
+ self.interesting_ids = None
+ self.interesting_files = None
+ self.show_base = False
+ self.reprocess = False
+ if pb is not None:
+ warnings.warn("pb parameter to Merger() is deprecated and ignored")
+ self.pp = None
+ self.recurse = recurse
+ self.change_reporter = change_reporter
+ self._cached_trees = {}
+ self._revision_graph = revision_graph
+ self._base_is_ancestor = None
+ self._base_is_other_ancestor = None
+ self._is_criss_cross = None
+ self._lca_trees = None
+
+ def cache_trees_with_revision_ids(self, trees):
+ """Cache any tree in trees if it has a revision_id."""
+ for maybe_tree in trees:
+ if maybe_tree is None:
+ continue
+ try:
+ rev_id = maybe_tree.get_revision_id()
+ except AttributeError:
+ continue
+ self._cached_trees[rev_id] = maybe_tree
+
+ @property
+ def revision_graph(self):
+ if self._revision_graph is None:
+ self._revision_graph = self.this_branch.repository.get_graph()
+ return self._revision_graph
+
+ def _set_base_is_ancestor(self, value):
+ self._base_is_ancestor = value
+
+ def _get_base_is_ancestor(self):
+ if self._base_is_ancestor is None:
+ self._base_is_ancestor = self.revision_graph.is_ancestor(
+ self.base_rev_id, self.this_basis)
+ return self._base_is_ancestor
+
+ base_is_ancestor = property(_get_base_is_ancestor, _set_base_is_ancestor)
+
+ def _set_base_is_other_ancestor(self, value):
+ self._base_is_other_ancestor = value
+
+ def _get_base_is_other_ancestor(self):
+ if self._base_is_other_ancestor is None:
+ if self.other_basis is None:
+ return True
+ self._base_is_other_ancestor = self.revision_graph.is_ancestor(
+ self.base_rev_id, self.other_basis)
+ return self._base_is_other_ancestor
+
+ base_is_other_ancestor = property(_get_base_is_other_ancestor,
+ _set_base_is_other_ancestor)
+
+ @staticmethod
+ def from_uncommitted(tree, other_tree, pb=None, base_tree=None):
+ """Return a Merger for uncommitted changes in other_tree.
+
+ :param tree: The tree to merge into
+ :param other_tree: The tree to get uncommitted changes from
+ :param pb: A progress indicator
+ :param base_tree: The basis to use for the merge. If unspecified,
+ other_tree.basis_tree() will be used.
+ """
+ if base_tree is None:
+ base_tree = other_tree.basis_tree()
+ merger = Merger(tree.branch, other_tree, base_tree, tree, pb)
+ merger.base_rev_id = merger.base_tree.get_revision_id()
+ merger.other_rev_id = None
+ merger.other_basis = merger.base_rev_id
+ return merger
+
+ @classmethod
+ def from_mergeable(klass, tree, mergeable, pb):
+ """Return a Merger for a bundle or merge directive.
+
+ :param tree: The tree to merge changes into
+ :param mergeable: A merge directive or bundle
+ :param pb: A progress indicator
+ """
+ mergeable.install_revisions(tree.branch.repository)
+ base_revision_id, other_revision_id, verified =\
+ mergeable.get_merge_request(tree.branch.repository)
+ revision_graph = tree.branch.repository.get_graph()
+ if base_revision_id is not None:
+ if (base_revision_id != _mod_revision.NULL_REVISION and
+ revision_graph.is_ancestor(
+ base_revision_id, tree.branch.last_revision())):
+ base_revision_id = None
+ else:
+ trace.warning('Performing cherrypick')
+ merger = klass.from_revision_ids(pb, tree, other_revision_id,
+ base_revision_id, revision_graph=
+ revision_graph)
+ return merger, verified
+
+ @staticmethod
+ def from_revision_ids(pb, tree, other, base=None, other_branch=None,
+ base_branch=None, revision_graph=None,
+ tree_branch=None):
+ """Return a Merger for revision-ids.
+
+ :param pb: A progress indicator
+ :param tree: The tree to merge changes into
+ :param other: The revision-id to use as OTHER
+ :param base: The revision-id to use as BASE. If not specified, will
+ be auto-selected.
+ :param other_branch: A branch containing the other revision-id. If
+ not supplied, tree.branch is used.
+ :param base_branch: A branch containing the base revision-id. If
+ not supplied, other_branch or tree.branch will be used.
+ :param revision_graph: If you have a revision_graph precomputed, pass
+ it in, otherwise it will be created for you.
+ :param tree_branch: The branch associated with tree. If not supplied,
+ tree.branch will be used.
+ """
+ if tree_branch is None:
+ tree_branch = tree.branch
+ merger = Merger(tree_branch, this_tree=tree, pb=pb,
+ revision_graph=revision_graph)
+ if other_branch is None:
+ other_branch = tree.branch
+ merger.set_other_revision(other, other_branch)
+ if base is None:
+ merger.find_base()
+ else:
+ if base_branch is None:
+ base_branch = other_branch
+ merger.set_base_revision(base, base_branch)
+ return merger
+
+ def revision_tree(self, revision_id, branch=None):
+ if revision_id not in self._cached_trees:
+ if branch is None:
+ branch = self.this_branch
+ try:
+ tree = self.this_tree.revision_tree(revision_id)
+ except errors.NoSuchRevisionInTree:
+ tree = branch.repository.revision_tree(revision_id)
+ self._cached_trees[revision_id] = tree
+ return self._cached_trees[revision_id]
+
+ def _get_tree(self, treespec, possible_transports=None):
+ location, revno = treespec
+ if revno is None:
+ tree = workingtree.WorkingTree.open_containing(location)[0]
+ return tree.branch, tree
+ branch = _mod_branch.Branch.open_containing(
+ location, possible_transports)[0]
+ if revno == -1:
+ revision_id = branch.last_revision()
+ else:
+ revision_id = branch.get_rev_id(revno)
+ revision_id = _mod_revision.ensure_null(revision_id)
+ return branch, self.revision_tree(revision_id, branch)
+
+ def set_interesting_files(self, file_list):
+ self.interesting_files = file_list
+
+ def set_pending(self):
+ if (not self.base_is_ancestor or not self.base_is_other_ancestor
+ or self.other_rev_id is None):
+ return
+ self._add_parent()
+
+ def _add_parent(self):
+ new_parents = self.this_tree.get_parent_ids() + [self.other_rev_id]
+ new_parent_trees = []
+ operation = cleanup.OperationWithCleanups(
+ self.this_tree.set_parent_trees)
+ for revision_id in new_parents:
+ try:
+ tree = self.revision_tree(revision_id)
+ except errors.NoSuchRevision:
+ tree = None
+ else:
+ tree.lock_read()
+ operation.add_cleanup(tree.unlock)
+ new_parent_trees.append((revision_id, tree))
+ operation.run_simple(new_parent_trees, allow_leftmost_as_ghost=True)
+
+ def set_other(self, other_revision, possible_transports=None):
+ """Set the revision and tree to merge from.
+
+ This sets the other_tree, other_rev_id, other_basis attributes.
+
+ :param other_revision: The [path, revision] list to merge from.
+ """
+ self.other_branch, self.other_tree = self._get_tree(other_revision,
+ possible_transports)
+ if other_revision[1] == -1:
+ self.other_rev_id = _mod_revision.ensure_null(
+ self.other_branch.last_revision())
+ if _mod_revision.is_null(self.other_rev_id):
+ raise errors.NoCommits(self.other_branch)
+ self.other_basis = self.other_rev_id
+ elif other_revision[1] is not None:
+ self.other_rev_id = self.other_branch.get_rev_id(other_revision[1])
+ self.other_basis = self.other_rev_id
+ else:
+ self.other_rev_id = None
+ self.other_basis = self.other_branch.last_revision()
+ if self.other_basis is None:
+ raise errors.NoCommits(self.other_branch)
+ if self.other_rev_id is not None:
+ self._cached_trees[self.other_rev_id] = self.other_tree
+ self._maybe_fetch(self.other_branch, self.this_branch, self.other_basis)
+
+ def set_other_revision(self, revision_id, other_branch):
+ """Set 'other' based on a branch and revision id
+
+ :param revision_id: The revision to use for a tree
+ :param other_branch: The branch containing this tree
+ """
+ self.other_rev_id = revision_id
+ self.other_branch = other_branch
+ self._maybe_fetch(other_branch, self.this_branch, self.other_rev_id)
+ self.other_tree = self.revision_tree(revision_id)
+ self.other_basis = revision_id
+
+ def set_base_revision(self, revision_id, branch):
+ """Set 'base' based on a branch and revision id
+
+ :param revision_id: The revision to use for a tree
+ :param branch: The branch containing this tree
+ """
+ self.base_rev_id = revision_id
+ self.base_branch = branch
+ self._maybe_fetch(branch, self.this_branch, revision_id)
+ self.base_tree = self.revision_tree(revision_id)
+
+ def _maybe_fetch(self, source, target, revision_id):
+ if not source.repository.has_same_location(target.repository):
+ target.fetch(source, revision_id)
+
+ def find_base(self):
+ revisions = [_mod_revision.ensure_null(self.this_basis),
+ _mod_revision.ensure_null(self.other_basis)]
+ if _mod_revision.NULL_REVISION in revisions:
+ self.base_rev_id = _mod_revision.NULL_REVISION
+ self.base_tree = self.revision_tree(self.base_rev_id)
+ self._is_criss_cross = False
+ else:
+ lcas = self.revision_graph.find_lca(revisions[0], revisions[1])
+ self._is_criss_cross = False
+ if len(lcas) == 0:
+ self.base_rev_id = _mod_revision.NULL_REVISION
+ elif len(lcas) == 1:
+ self.base_rev_id = list(lcas)[0]
+ else: # len(lcas) > 1
+ self._is_criss_cross = True
+ if len(lcas) > 2:
+ # find_unique_lca can only handle 2 nodes, so we have to
+ # start back at the beginning. It is a shame to traverse
+ # the graph again, but better than re-implementing
+ # find_unique_lca.
+ self.base_rev_id = self.revision_graph.find_unique_lca(
+ revisions[0], revisions[1])
+ else:
+ self.base_rev_id = self.revision_graph.find_unique_lca(
+ *lcas)
+ sorted_lca_keys = self.revision_graph.find_merge_order(
+ revisions[0], lcas)
+ if self.base_rev_id == _mod_revision.NULL_REVISION:
+ self.base_rev_id = sorted_lca_keys[0]
+
+ if self.base_rev_id == _mod_revision.NULL_REVISION:
+ raise errors.UnrelatedBranches()
+ if self._is_criss_cross:
+ trace.warning('Warning: criss-cross merge encountered. See bzr'
+ ' help criss-cross.')
+ trace.mutter('Criss-cross lcas: %r' % lcas)
+ if self.base_rev_id in lcas:
+ trace.mutter('Unable to find unique lca. '
+ 'Fallback %r as best option.'
+ % self.base_rev_id)
+ interesting_revision_ids = set(lcas)
+ interesting_revision_ids.add(self.base_rev_id)
+ interesting_trees = dict((t.get_revision_id(), t)
+ for t in self.this_branch.repository.revision_trees(
+ interesting_revision_ids))
+ self._cached_trees.update(interesting_trees)
+ if self.base_rev_id in lcas:
+ self.base_tree = interesting_trees[self.base_rev_id]
+ else:
+ self.base_tree = interesting_trees.pop(self.base_rev_id)
+ self._lca_trees = [interesting_trees[key]
+ for key in sorted_lca_keys]
+ else:
+ self.base_tree = self.revision_tree(self.base_rev_id)
+ self.base_is_ancestor = True
+ self.base_is_other_ancestor = True
+ trace.mutter('Base revid: %r' % self.base_rev_id)
+
+ def set_base(self, base_revision):
+ """Set the base revision to use for the merge.
+
+ :param base_revision: A 2-list containing a path and revision number.
+ """
+ trace.mutter("doing merge() with no base_revision specified")
+ if base_revision == [None, None]:
+ self.find_base()
+ else:
+ base_branch, self.base_tree = self._get_tree(base_revision)
+ if base_revision[1] == -1:
+ self.base_rev_id = base_branch.last_revision()
+ elif base_revision[1] is None:
+ self.base_rev_id = _mod_revision.NULL_REVISION
+ else:
+ self.base_rev_id = _mod_revision.ensure_null(
+ base_branch.get_rev_id(base_revision[1]))
+ self._maybe_fetch(base_branch, self.this_branch, self.base_rev_id)
+
+ def make_merger(self):
+ kwargs = {'working_tree': self.this_tree, 'this_tree': self.this_tree,
+ 'other_tree': self.other_tree,
+ 'interesting_ids': self.interesting_ids,
+ 'interesting_files': self.interesting_files,
+ 'this_branch': self.this_branch,
+ 'other_branch': self.other_branch,
+ 'do_merge': False}
+ if self.merge_type.requires_base:
+ kwargs['base_tree'] = self.base_tree
+ if self.merge_type.supports_reprocess:
+ kwargs['reprocess'] = self.reprocess
+ elif self.reprocess:
+ raise errors.BzrError(
+ "Conflict reduction is not supported for merge"
+ " type %s." % self.merge_type)
+ if self.merge_type.supports_show_base:
+ kwargs['show_base'] = self.show_base
+ elif self.show_base:
+ raise errors.BzrError("Showing base is not supported for this"
+ " merge type. %s" % self.merge_type)
+ if (not getattr(self.merge_type, 'supports_reverse_cherrypick', True)
+ and not self.base_is_other_ancestor):
+ raise errors.CannotReverseCherrypick()
+ if self.merge_type.supports_cherrypick:
+ kwargs['cherrypick'] = (not self.base_is_ancestor or
+ not self.base_is_other_ancestor)
+ if self._is_criss_cross and getattr(self.merge_type,
+ 'supports_lca_trees', False):
+ kwargs['lca_trees'] = self._lca_trees
+ return self.merge_type(pb=None,
+ change_reporter=self.change_reporter,
+ **kwargs)
+
+ def _do_merge_to(self):
+ merge = self.make_merger()
+ if self.other_branch is not None:
+ self.other_branch.update_references(self.this_branch)
+ for hook in Merger.hooks['pre_merge']:
+ hook(merge)
+ merge.do_merge()
+ for hook in Merger.hooks['post_merge']:
+ hook(merge)
+ if self.recurse == 'down':
+ for relpath, file_id in self.this_tree.iter_references():
+ sub_tree = self.this_tree.get_nested_tree(file_id, relpath)
+ other_revision = self.other_tree.get_reference_revision(
+ file_id, relpath)
+ if other_revision == sub_tree.last_revision():
+ continue
+ sub_merge = Merger(sub_tree.branch, this_tree=sub_tree)
+ sub_merge.merge_type = self.merge_type
+ other_branch = self.other_branch.reference_parent(file_id,
+ relpath)
+ sub_merge.set_other_revision(other_revision, other_branch)
+ base_revision = self.base_tree.get_reference_revision(file_id)
+ sub_merge.base_tree = \
+ sub_tree.branch.repository.revision_tree(base_revision)
+ sub_merge.base_rev_id = base_revision
+ sub_merge.do_merge()
+ return merge
+
+ def do_merge(self):
+ operation = cleanup.OperationWithCleanups(self._do_merge_to)
+ self.this_tree.lock_tree_write()
+ operation.add_cleanup(self.this_tree.unlock)
+ if self.base_tree is not None:
+ self.base_tree.lock_read()
+ operation.add_cleanup(self.base_tree.unlock)
+ if self.other_tree is not None:
+ self.other_tree.lock_read()
+ operation.add_cleanup(self.other_tree.unlock)
+ merge = operation.run_simple()
+ if len(merge.cooked_conflicts) == 0:
+ if not self.ignore_zero and not trace.is_quiet():
+ trace.note(gettext("All changes applied successfully."))
+ else:
+ trace.note(gettext("%d conflicts encountered.")
+ % len(merge.cooked_conflicts))
+
+ return len(merge.cooked_conflicts)
+
+
+class _InventoryNoneEntry(object):
+ """This represents an inventory entry which *isn't there*.
+
+ It simplifies the merging logic if we always have an InventoryEntry, even
+ if it isn't actually present
+ """
+ executable = None
+ kind = None
+ name = None
+ parent_id = None
+ revision = None
+ symlink_target = None
+ text_sha1 = None
+
+_none_entry = _InventoryNoneEntry()
+
+
+class Merge3Merger(object):
+ """Three-way merger that uses the merge3 text merger"""
+ requires_base = True
+ supports_reprocess = True
+ supports_show_base = True
+ history_based = False
+ supports_cherrypick = True
+ supports_reverse_cherrypick = True
+ winner_idx = {"this": 2, "other": 1, "conflict": 1}
+ supports_lca_trees = True
+
+ def __init__(self, working_tree, this_tree, base_tree, other_tree,
+ interesting_ids=None, reprocess=False, show_base=False,
+ pb=None, pp=None, change_reporter=None,
+ interesting_files=None, do_merge=True,
+ cherrypick=False, lca_trees=None, this_branch=None,
+ other_branch=None):
+ """Initialize the merger object and perform the merge.
+
+ :param working_tree: The working tree to apply the merge to
+ :param this_tree: The local tree in the merge operation
+ :param base_tree: The common tree in the merge operation
+ :param other_tree: The other tree to merge changes from
+ :param this_branch: The branch associated with this_tree. Defaults to
+ this_tree.branch if not supplied.
+ :param other_branch: The branch associated with other_tree, if any.
+ :param interesting_ids: The file_ids of files that should be
+ participate in the merge. May not be combined with
+ interesting_files.
+ :param: reprocess If True, perform conflict-reduction processing.
+ :param show_base: If True, show the base revision in text conflicts.
+ (incompatible with reprocess)
+ :param pb: ignored
+ :param pp: A ProgressPhase object
+ :param change_reporter: An object that should report changes made
+ :param interesting_files: The tree-relative paths of files that should
+ participate in the merge. If these paths refer to directories,
+ the contents of those directories will also be included. May not
+ be combined with interesting_ids. If neither interesting_files nor
+ interesting_ids is specified, all files may participate in the
+ merge.
+ :param lca_trees: Can be set to a dictionary of {revision_id:rev_tree}
+ if the ancestry was found to include a criss-cross merge.
+ Otherwise should be None.
+ """
+ object.__init__(self)
+ if interesting_files is not None and interesting_ids is not None:
+ raise ValueError(
+ 'specify either interesting_ids or interesting_files')
+ if this_branch is None:
+ this_branch = this_tree.branch
+ self.interesting_ids = interesting_ids
+ self.interesting_files = interesting_files
+ self.working_tree = working_tree
+ self.this_tree = this_tree
+ self.base_tree = base_tree
+ self.other_tree = other_tree
+ self.this_branch = this_branch
+ self.other_branch = other_branch
+ self._raw_conflicts = []
+ self.cooked_conflicts = []
+ self.reprocess = reprocess
+ self.show_base = show_base
+ self._lca_trees = lca_trees
+ # Uncommenting this will change the default algorithm to always use
+ # _entries_lca. This can be useful for running the test suite and
+ # making sure we haven't missed any corner cases.
+ # if lca_trees is None:
+ # self._lca_trees = [self.base_tree]
+ self.change_reporter = change_reporter
+ self.cherrypick = cherrypick
+ if do_merge:
+ self.do_merge()
+ if pp is not None:
+ warnings.warn("pp argument to Merge3Merger is deprecated")
+ if pb is not None:
+ warnings.warn("pb argument to Merge3Merger is deprecated")
+
+ def do_merge(self):
+ operation = cleanup.OperationWithCleanups(self._do_merge)
+ self.working_tree.lock_tree_write()
+ operation.add_cleanup(self.working_tree.unlock)
+ self.this_tree.lock_read()
+ operation.add_cleanup(self.this_tree.unlock)
+ self.base_tree.lock_read()
+ operation.add_cleanup(self.base_tree.unlock)
+ self.other_tree.lock_read()
+ operation.add_cleanup(self.other_tree.unlock)
+ operation.run()
+
+ def _do_merge(self, operation):
+ self.tt = transform.TreeTransform(self.working_tree, None)
+ operation.add_cleanup(self.tt.finalize)
+ self._compute_transform()
+ results = self.tt.apply(no_conflicts=True)
+ self.write_modified(results)
+ try:
+ self.working_tree.add_conflicts(self.cooked_conflicts)
+ except errors.UnsupportedOperation:
+ pass
+
+ def make_preview_transform(self):
+ operation = cleanup.OperationWithCleanups(self._make_preview_transform)
+ self.base_tree.lock_read()
+ operation.add_cleanup(self.base_tree.unlock)
+ self.other_tree.lock_read()
+ operation.add_cleanup(self.other_tree.unlock)
+ return operation.run_simple()
+
+ def _make_preview_transform(self):
+ self.tt = transform.TransformPreview(self.working_tree)
+ self._compute_transform()
+ return self.tt
+
+ def _compute_transform(self):
+ if self._lca_trees is None:
+ entries = self._entries3()
+ resolver = self._three_way
+ else:
+ entries = self._entries_lca()
+ resolver = self._lca_multi_way
+ # Prepare merge hooks
+ factories = Merger.hooks['merge_file_content']
+ # One hook for each registered one plus our default merger
+ hooks = [factory(self) for factory in factories] + [self]
+ self.active_hooks = [hook for hook in hooks if hook is not None]
+ child_pb = ui.ui_factory.nested_progress_bar()
+ try:
+ for num, (file_id, changed, parents3, names3,
+ executable3) in enumerate(entries):
+ # Try merging each entry
+ child_pb.update(gettext('Preparing file merge'),
+ num, len(entries))
+ self._merge_names(file_id, parents3, names3, resolver=resolver)
+ if changed:
+ file_status = self._do_merge_contents(file_id)
+ else:
+ file_status = 'unmodified'
+ self._merge_executable(file_id,
+ executable3, file_status, resolver=resolver)
+ finally:
+ child_pb.finished()
+ self.tt.fixup_new_roots()
+ self._finish_computing_transform()
+
+ def _finish_computing_transform(self):
+ """Finalize the transform and report the changes.
+
+ This is the second half of _compute_transform.
+ """
+ child_pb = ui.ui_factory.nested_progress_bar()
+ try:
+ fs_conflicts = transform.resolve_conflicts(self.tt, child_pb,
+ lambda t, c: transform.conflict_pass(t, c, self.other_tree))
+ finally:
+ child_pb.finished()
+ if self.change_reporter is not None:
+ from bzrlib import delta
+ delta.report_changes(
+ self.tt.iter_changes(), self.change_reporter)
+ self.cook_conflicts(fs_conflicts)
+ for conflict in self.cooked_conflicts:
+ trace.warning(unicode(conflict))
+
+ def _entries3(self):
+ """Gather data about files modified between three trees.
+
+ Return a list of tuples of file_id, changed, parents3, names3,
+ executable3. changed is a boolean indicating whether the file contents
+ or kind were changed. parents3 is a tuple of parent ids for base,
+ other and this. names3 is a tuple of names for base, other and this.
+ executable3 is a tuple of execute-bit values for base, other and this.
+ """
+ result = []
+ iterator = self.other_tree.iter_changes(self.base_tree,
+ specific_files=self.interesting_files,
+ extra_trees=[self.this_tree])
+ this_entries = dict((e.file_id, e) for p, e in
+ self.this_tree.iter_entries_by_dir(
+ self.interesting_ids))
+ for (file_id, paths, changed, versioned, parents, names, kind,
+ executable) in iterator:
+ if (self.interesting_ids is not None and
+ file_id not in self.interesting_ids):
+ continue
+ entry = this_entries.get(file_id)
+ if entry is not None:
+ this_name = entry.name
+ this_parent = entry.parent_id
+ this_executable = entry.executable
+ else:
+ this_name = None
+ this_parent = None
+ this_executable = None
+ parents3 = parents + (this_parent,)
+ names3 = names + (this_name,)
+ executable3 = executable + (this_executable,)
+ result.append((file_id, changed, parents3, names3, executable3))
+ return result
+
+ def _entries_lca(self):
+ """Gather data about files modified between multiple trees.
+
+ This compares OTHER versus all LCA trees, and for interesting entries,
+ it then compares with THIS and BASE.
+
+ For the multi-valued entries, the format will be (BASE, [lca1, lca2])
+
+ :return: [(file_id, changed, parents, names, executable)], where:
+
+ * file_id: Simple file_id of the entry
+ * changed: Boolean, True if the kind or contents changed else False
+ * parents: ((base, [parent_id, in, lcas]), parent_id_other,
+ parent_id_this)
+ * names: ((base, [name, in, lcas]), name_in_other, name_in_this)
+ * executable: ((base, [exec, in, lcas]), exec_in_other,
+ exec_in_this)
+ """
+ if self.interesting_files is not None:
+ lookup_trees = [self.this_tree, self.base_tree]
+ lookup_trees.extend(self._lca_trees)
+ # I think we should include the lca trees as well
+ interesting_ids = self.other_tree.paths2ids(self.interesting_files,
+ lookup_trees)
+ else:
+ interesting_ids = self.interesting_ids
+ result = []
+ walker = _mod_tree.MultiWalker(self.other_tree, self._lca_trees)
+
+ base_inventory = self.base_tree.root_inventory
+ this_inventory = self.this_tree.root_inventory
+ for path, file_id, other_ie, lca_values in walker.iter_all():
+ # Is this modified at all from any of the other trees?
+ if other_ie is None:
+ other_ie = _none_entry
+ if interesting_ids is not None and file_id not in interesting_ids:
+ continue
+
+ # If other_revision is found in any of the lcas, that means this
+ # node is uninteresting. This is because when merging, if there are
+ # multiple heads(), we have to create a new node. So if we didn't,
+ # we know that the ancestry is linear, and that OTHER did not
+ # modify anything
+ # See doc/developers/lca_merge_resolution.txt for details
+ other_revision = other_ie.revision
+ if other_revision is not None:
+ # We can't use this shortcut when other_revision is None,
+ # because it may be None because things are WorkingTrees, and
+ # not because it is *actually* None.
+ is_unmodified = False
+ for lca_path, ie in lca_values:
+ if ie is not None and ie.revision == other_revision:
+ is_unmodified = True
+ break
+ if is_unmodified:
+ continue
+
+ lca_entries = []
+ for lca_path, lca_ie in lca_values:
+ if lca_ie is None:
+ lca_entries.append(_none_entry)
+ else:
+ lca_entries.append(lca_ie)
+
+ if base_inventory.has_id(file_id):
+ base_ie = base_inventory[file_id]
+ else:
+ base_ie = _none_entry
+
+ if this_inventory.has_id(file_id):
+ this_ie = this_inventory[file_id]
+ else:
+ this_ie = _none_entry
+
+ lca_kinds = []
+ lca_parent_ids = []
+ lca_names = []
+ lca_executable = []
+ for lca_ie in lca_entries:
+ lca_kinds.append(lca_ie.kind)
+ lca_parent_ids.append(lca_ie.parent_id)
+ lca_names.append(lca_ie.name)
+ lca_executable.append(lca_ie.executable)
+
+ kind_winner = self._lca_multi_way(
+ (base_ie.kind, lca_kinds),
+ other_ie.kind, this_ie.kind)
+ parent_id_winner = self._lca_multi_way(
+ (base_ie.parent_id, lca_parent_ids),
+ other_ie.parent_id, this_ie.parent_id)
+ name_winner = self._lca_multi_way(
+ (base_ie.name, lca_names),
+ other_ie.name, this_ie.name)
+
+ content_changed = True
+ if kind_winner == 'this':
+ # No kind change in OTHER, see if there are *any* changes
+ if other_ie.kind == 'directory':
+ if parent_id_winner == 'this' and name_winner == 'this':
+ # No change for this directory in OTHER, skip
+ continue
+ content_changed = False
+ elif other_ie.kind is None or other_ie.kind == 'file':
+ def get_sha1(ie, tree):
+ if ie.kind != 'file':
+ return None
+ return tree.get_file_sha1(file_id)
+ base_sha1 = get_sha1(base_ie, self.base_tree)
+ lca_sha1s = [get_sha1(ie, tree) for ie, tree
+ in zip(lca_entries, self._lca_trees)]
+ this_sha1 = get_sha1(this_ie, self.this_tree)
+ other_sha1 = get_sha1(other_ie, self.other_tree)
+ sha1_winner = self._lca_multi_way(
+ (base_sha1, lca_sha1s), other_sha1, this_sha1,
+ allow_overriding_lca=False)
+ exec_winner = self._lca_multi_way(
+ (base_ie.executable, lca_executable),
+ other_ie.executable, this_ie.executable)
+ if (parent_id_winner == 'this' and name_winner == 'this'
+ and sha1_winner == 'this' and exec_winner == 'this'):
+ # No kind, parent, name, exec, or content change for
+ # OTHER, so this node is not considered interesting
+ continue
+ if sha1_winner == 'this':
+ content_changed = False
+ elif other_ie.kind == 'symlink':
+ def get_target(ie, tree):
+ if ie.kind != 'symlink':
+ return None
+ return tree.get_symlink_target(file_id)
+ base_target = get_target(base_ie, self.base_tree)
+ lca_targets = [get_target(ie, tree) for ie, tree
+ in zip(lca_entries, self._lca_trees)]
+ this_target = get_target(this_ie, self.this_tree)
+ other_target = get_target(other_ie, self.other_tree)
+ target_winner = self._lca_multi_way(
+ (base_target, lca_targets),
+ other_target, this_target)
+ if (parent_id_winner == 'this' and name_winner == 'this'
+ and target_winner == 'this'):
+ # No kind, parent, name, or symlink target change
+ # not interesting
+ continue
+ if target_winner == 'this':
+ content_changed = False
+ elif other_ie.kind == 'tree-reference':
+ # The 'changed' information seems to be handled at a higher
+ # level. At least, _entries3 returns False for content
+ # changed, even when at a new revision_id.
+ content_changed = False
+ if (parent_id_winner == 'this' and name_winner == 'this'):
+ # Nothing interesting
+ continue
+ else:
+ raise AssertionError('unhandled kind: %s' % other_ie.kind)
+
+ # If we have gotten this far, that means something has changed
+ result.append((file_id, content_changed,
+ ((base_ie.parent_id, lca_parent_ids),
+ other_ie.parent_id, this_ie.parent_id),
+ ((base_ie.name, lca_names),
+ other_ie.name, this_ie.name),
+ ((base_ie.executable, lca_executable),
+ other_ie.executable, this_ie.executable)
+ ))
+ return result
+
+ def write_modified(self, results):
+ modified_hashes = {}
+ for path in results.modified_paths:
+ file_id = self.working_tree.path2id(self.working_tree.relpath(path))
+ if file_id is None:
+ continue
+ hash = self.working_tree.get_file_sha1(file_id)
+ if hash is None:
+ continue
+ modified_hashes[file_id] = hash
+ self.working_tree.set_merge_modified(modified_hashes)
+
+ @staticmethod
+ def parent(entry, file_id):
+ """Determine the parent for a file_id (used as a key method)"""
+ if entry is None:
+ return None
+ return entry.parent_id
+
+ @staticmethod
+ def name(entry, file_id):
+ """Determine the name for a file_id (used as a key method)"""
+ if entry is None:
+ return None
+ return entry.name
+
+ @staticmethod
+ def contents_sha1(tree, file_id):
+ """Determine the sha1 of the file contents (used as a key method)."""
+ if not tree.has_id(file_id):
+ return None
+ return tree.get_file_sha1(file_id)
+
+ @staticmethod
+ def executable(tree, file_id):
+ """Determine the executability of a file-id (used as a key method)."""
+ if not tree.has_id(file_id):
+ return None
+ if tree.kind(file_id) != "file":
+ return False
+ return tree.is_executable(file_id)
+
+ @staticmethod
+ def kind(tree, file_id):
+ """Determine the kind of a file-id (used as a key method)."""
+ if not tree.has_id(file_id):
+ return None
+ return tree.kind(file_id)
+
+ @staticmethod
+ def _three_way(base, other, this):
+ if base == other:
+ # if 'base == other', either they all agree, or only 'this' has
+ # changed.
+ return 'this'
+ elif this not in (base, other):
+ # 'this' is neither 'base' nor 'other', so both sides changed
+ return 'conflict'
+ elif this == other:
+ # "Ambiguous clean merge" -- both sides have made the same change.
+ return "this"
+ else:
+ # this == base: only other has changed.
+ return "other"
+
+ @staticmethod
+ def _lca_multi_way(bases, other, this, allow_overriding_lca=True):
+ """Consider LCAs when determining whether a change has occurred.
+
+ If LCAS are all identical, this is the same as a _three_way comparison.
+
+ :param bases: value in (BASE, [LCAS])
+ :param other: value in OTHER
+ :param this: value in THIS
+ :param allow_overriding_lca: If there is more than one unique lca
+ value, allow OTHER to override THIS if it has a new value, and
+ THIS only has an lca value, or vice versa. This is appropriate for
+ truly scalar values, not as much for non-scalars.
+ :return: 'this', 'other', or 'conflict' depending on whether an entry
+ changed or not.
+ """
+ # See doc/developers/lca_tree_merging.txt for details about this
+ # algorithm.
+ if other == this:
+ # Either Ambiguously clean, or nothing was actually changed. We
+ # don't really care
+ return 'this'
+ base_val, lca_vals = bases
+ # Remove 'base_val' from the lca_vals, because it is not interesting
+ filtered_lca_vals = [lca_val for lca_val in lca_vals
+ if lca_val != base_val]
+ if len(filtered_lca_vals) == 0:
+ return Merge3Merger._three_way(base_val, other, this)
+
+ unique_lca_vals = set(filtered_lca_vals)
+ if len(unique_lca_vals) == 1:
+ return Merge3Merger._three_way(unique_lca_vals.pop(), other, this)
+
+ if allow_overriding_lca:
+ if other in unique_lca_vals:
+ if this in unique_lca_vals:
+ # Each side picked a different lca, conflict
+ return 'conflict'
+ else:
+ # This has a value which supersedes both lca values, and
+ # other only has an lca value
+ return 'this'
+ elif this in unique_lca_vals:
+ # OTHER has a value which supersedes both lca values, and this
+ # only has an lca value
+ return 'other'
+
+ # At this point, the lcas disagree, and the tip disagree
+ return 'conflict'
+
+ def merge_names(self, file_id):
+ def get_entry(tree):
+ try:
+ return tree.root_inventory[file_id]
+ except errors.NoSuchId:
+ return None
+ this_entry = get_entry(self.this_tree)
+ other_entry = get_entry(self.other_tree)
+ base_entry = get_entry(self.base_tree)
+ entries = (base_entry, other_entry, this_entry)
+ names = []
+ parents = []
+ for entry in entries:
+ if entry is None:
+ names.append(None)
+ parents.append(None)
+ else:
+ names.append(entry.name)
+ parents.append(entry.parent_id)
+ return self._merge_names(file_id, parents, names,
+ resolver=self._three_way)
+
+ def _merge_names(self, file_id, parents, names, resolver):
+ """Perform a merge on file_id names and parents"""
+ base_name, other_name, this_name = names
+ base_parent, other_parent, this_parent = parents
+
+ name_winner = resolver(*names)
+
+ parent_id_winner = resolver(*parents)
+ if this_name is None:
+ if name_winner == "this":
+ name_winner = "other"
+ if parent_id_winner == "this":
+ parent_id_winner = "other"
+ if name_winner == "this" and parent_id_winner == "this":
+ return
+ if name_winner == 'conflict' or parent_id_winner == 'conflict':
+ # Creating helpers (.OTHER or .THIS) here cause problems down the
+ # road if a ContentConflict needs to be created so we should not do
+ # that
+ trans_id = self.tt.trans_id_file_id(file_id)
+ self._raw_conflicts.append(('path conflict', trans_id, file_id,
+ this_parent, this_name,
+ other_parent, other_name))
+ if not self.other_tree.has_id(file_id):
+ # it doesn't matter whether the result was 'other' or
+ # 'conflict'-- if it has no file id, we leave it alone.
+ return
+ parent_id = parents[self.winner_idx[parent_id_winner]]
+ name = names[self.winner_idx[name_winner]]
+ if parent_id is not None or name is not None:
+ # if we get here, name_winner and parent_winner are set to safe
+ # values.
+ if parent_id is None and name is not None:
+ # if parent_id is None and name is non-None, current file is
+ # the tree root.
+ if names[self.winner_idx[parent_id_winner]] != '':
+ raise AssertionError(
+ 'File looks like a root, but named %s' %
+ names[self.winner_idx[parent_id_winner]])
+ parent_trans_id = transform.ROOT_PARENT
+ else:
+ parent_trans_id = self.tt.trans_id_file_id(parent_id)
+ self.tt.adjust_path(name, parent_trans_id,
+ self.tt.trans_id_file_id(file_id))
+
+ def _do_merge_contents(self, file_id):
+ """Performs a merge on file_id contents."""
+ def contents_pair(tree):
+ if not tree.has_id(file_id):
+ return (None, None)
+ kind = tree.kind(file_id)
+ if kind == "file":
+ contents = tree.get_file_sha1(file_id)
+ elif kind == "symlink":
+ contents = tree.get_symlink_target(file_id)
+ else:
+ contents = None
+ return kind, contents
+
+ # See SPOT run. run, SPOT, run.
+ # So we're not QUITE repeating ourselves; we do tricky things with
+ # file kind...
+ base_pair = contents_pair(self.base_tree)
+ other_pair = contents_pair(self.other_tree)
+ if self._lca_trees:
+ this_pair = contents_pair(self.this_tree)
+ lca_pairs = [contents_pair(tree) for tree in self._lca_trees]
+ winner = self._lca_multi_way((base_pair, lca_pairs), other_pair,
+ this_pair, allow_overriding_lca=False)
+ else:
+ if base_pair == other_pair:
+ winner = 'this'
+ else:
+ # We delayed evaluating this_pair as long as we can to avoid
+ # unnecessary sha1 calculation
+ this_pair = contents_pair(self.this_tree)
+ winner = self._three_way(base_pair, other_pair, this_pair)
+ if winner == 'this':
+ # No interesting changes introduced by OTHER
+ return "unmodified"
+ # We have a hypothetical conflict, but if we have files, then we
+ # can try to merge the content
+ trans_id = self.tt.trans_id_file_id(file_id)
+ params = MergeFileHookParams(self, file_id, trans_id, this_pair[0],
+ other_pair[0], winner)
+ hooks = self.active_hooks
+ hook_status = 'not_applicable'
+ for hook in hooks:
+ hook_status, lines = hook.merge_contents(params)
+ if hook_status != 'not_applicable':
+ # Don't try any more hooks, this one applies.
+ break
+ # If the merge ends up replacing the content of the file, we get rid of
+ # it at the end of this method (this variable is used to track the
+ # exceptions to this rule).
+ keep_this = False
+ result = "modified"
+ if hook_status == 'not_applicable':
+ # No merge hook was able to resolve the situation. Two cases exist:
+ # a content conflict or a duplicate one.
+ result = None
+ name = self.tt.final_name(trans_id)
+ parent_id = self.tt.final_parent(trans_id)
+ duplicate = False
+ inhibit_content_conflict = False
+ if params.this_kind is None: # file_id is not in THIS
+ # Is the name used for a different file_id ?
+ dupe_path = self.other_tree.id2path(file_id)
+ this_id = self.this_tree.path2id(dupe_path)
+ if this_id is not None:
+ # Two entries for the same path
+ keep_this = True
+ # versioning the merged file will trigger a duplicate
+ # conflict
+ self.tt.version_file(file_id, trans_id)
+ transform.create_from_tree(
+ self.tt, trans_id, self.other_tree, file_id,
+ filter_tree_path=self._get_filter_tree_path(file_id))
+ inhibit_content_conflict = True
+ elif params.other_kind is None: # file_id is not in OTHER
+ # Is the name used for a different file_id ?
+ dupe_path = self.this_tree.id2path(file_id)
+ other_id = self.other_tree.path2id(dupe_path)
+ if other_id is not None:
+ # Two entries for the same path again, but here, the other
+ # entry will also be merged. We simply inhibit the
+ # 'content' conflict creation because we know OTHER will
+ # create (or has already created depending on ordering) an
+ # entry at the same path. This will trigger a 'duplicate'
+ # conflict later.
+ keep_this = True
+ inhibit_content_conflict = True
+ if not inhibit_content_conflict:
+ if params.this_kind is not None:
+ self.tt.unversion_file(trans_id)
+ # This is a contents conflict, because none of the available
+ # functions could merge it.
+ file_group = self._dump_conflicts(name, parent_id, file_id,
+ set_version=True)
+ self._raw_conflicts.append(('contents conflict', file_group))
+ elif hook_status == 'success':
+ self.tt.create_file(lines, trans_id)
+ elif hook_status == 'conflicted':
+ # XXX: perhaps the hook should be able to provide
+ # the BASE/THIS/OTHER files?
+ self.tt.create_file(lines, trans_id)
+ self._raw_conflicts.append(('text conflict', trans_id))
+ name = self.tt.final_name(trans_id)
+ parent_id = self.tt.final_parent(trans_id)
+ self._dump_conflicts(name, parent_id, file_id)
+ elif hook_status == 'delete':
+ self.tt.unversion_file(trans_id)
+ result = "deleted"
+ elif hook_status == 'done':
+ # The hook function did whatever it needs to do directly, no
+ # further action needed here.
+ pass
+ else:
+ raise AssertionError('unknown hook_status: %r' % (hook_status,))
+ if not self.this_tree.has_id(file_id) and result == "modified":
+ self.tt.version_file(file_id, trans_id)
+ if not keep_this:
+ # The merge has been performed and produced a new content, so the
+ # old contents should not be retained.
+ self.tt.delete_contents(trans_id)
+ return result
+
+ def _default_other_winner_merge(self, merge_hook_params):
+ """Replace this contents with other."""
+ file_id = merge_hook_params.file_id
+ trans_id = merge_hook_params.trans_id
+ if self.other_tree.has_id(file_id):
+ # OTHER changed the file
+ transform.create_from_tree(
+ self.tt, trans_id, self.other_tree, file_id,
+ filter_tree_path=self._get_filter_tree_path(file_id))
+ return 'done', None
+ elif self.this_tree.has_id(file_id):
+ # OTHER deleted the file
+ return 'delete', None
+ else:
+ raise AssertionError(
+ 'winner is OTHER, but file_id %r not in THIS or OTHER tree'
+ % (file_id,))
+
+ def merge_contents(self, merge_hook_params):
+ """Fallback merge logic after user installed hooks."""
+ # This function is used in merge hooks as the fallback instance.
+ # Perhaps making this function and the functions it calls be a
+ # a separate class would be better.
+ if merge_hook_params.winner == 'other':
+ # OTHER is a straight winner, so replace this contents with other
+ return self._default_other_winner_merge(merge_hook_params)
+ elif merge_hook_params.is_file_merge():
+ # THIS and OTHER are both files, so text merge. Either
+ # BASE is a file, or both converted to files, so at least we
+ # have agreement that output should be a file.
+ try:
+ self.text_merge(merge_hook_params.file_id,
+ merge_hook_params.trans_id)
+ except errors.BinaryFile:
+ return 'not_applicable', None
+ return 'done', None
+ else:
+ return 'not_applicable', None
+
+ def get_lines(self, tree, file_id):
+ """Return the lines in a file, or an empty list."""
+ if tree.has_id(file_id):
+ return tree.get_file_lines(file_id)
+ else:
+ return []
+
+ def text_merge(self, file_id, trans_id):
+ """Perform a three-way text merge on a file_id"""
+ # it's possible that we got here with base as a different type.
+ # if so, we just want two-way text conflicts.
+ if self.base_tree.has_id(file_id) and \
+ self.base_tree.kind(file_id) == "file":
+ base_lines = self.get_lines(self.base_tree, file_id)
+ else:
+ base_lines = []
+ other_lines = self.get_lines(self.other_tree, file_id)
+ this_lines = self.get_lines(self.this_tree, file_id)
+ m3 = merge3.Merge3(base_lines, this_lines, other_lines,
+ is_cherrypick=self.cherrypick)
+ start_marker = "!START OF MERGE CONFLICT!" + "I HOPE THIS IS UNIQUE"
+ if self.show_base is True:
+ base_marker = '|' * 7
+ else:
+ base_marker = None
+
+ def iter_merge3(retval):
+ retval["text_conflicts"] = False
+ for line in m3.merge_lines(name_a = "TREE",
+ name_b = "MERGE-SOURCE",
+ name_base = "BASE-REVISION",
+ start_marker=start_marker,
+ base_marker=base_marker,
+ reprocess=self.reprocess):
+ if line.startswith(start_marker):
+ retval["text_conflicts"] = True
+ yield line.replace(start_marker, '<' * 7)
+ else:
+ yield line
+ retval = {}
+ merge3_iterator = iter_merge3(retval)
+ self.tt.create_file(merge3_iterator, trans_id)
+ if retval["text_conflicts"] is True:
+ self._raw_conflicts.append(('text conflict', trans_id))
+ name = self.tt.final_name(trans_id)
+ parent_id = self.tt.final_parent(trans_id)
+ file_group = self._dump_conflicts(name, parent_id, file_id,
+ this_lines, base_lines,
+ other_lines)
+ file_group.append(trans_id)
+
+
+ def _get_filter_tree_path(self, file_id):
+ if self.this_tree.supports_content_filtering():
+ # We get the path from the working tree if it exists.
+ # That fails though when OTHER is adding a file, so
+ # we fall back to the other tree to find the path if
+ # it doesn't exist locally.
+ try:
+ return self.this_tree.id2path(file_id)
+ except errors.NoSuchId:
+ return self.other_tree.id2path(file_id)
+ # Skip the id2path lookup for older formats
+ return None
+
+ def _dump_conflicts(self, name, parent_id, file_id, this_lines=None,
+ base_lines=None, other_lines=None, set_version=False,
+ no_base=False):
+ """Emit conflict files.
+ If this_lines, base_lines, or other_lines are omitted, they will be
+ determined automatically. If set_version is true, the .OTHER, .THIS
+ or .BASE (in that order) will be created as versioned files.
+ """
+ data = [('OTHER', self.other_tree, other_lines),
+ ('THIS', self.this_tree, this_lines)]
+ if not no_base:
+ data.append(('BASE', self.base_tree, base_lines))
+
+ # We need to use the actual path in the working tree of the file here,
+ # ignoring the conflict suffixes
+ wt = self.this_tree
+ if wt.supports_content_filtering():
+ try:
+ filter_tree_path = wt.id2path(file_id)
+ except errors.NoSuchId:
+ # file has been deleted
+ filter_tree_path = None
+ else:
+ # Skip the id2path lookup for older formats
+ filter_tree_path = None
+
+ versioned = False
+ file_group = []
+ for suffix, tree, lines in data:
+ if tree.has_id(file_id):
+ trans_id = self._conflict_file(name, parent_id, tree, file_id,
+ suffix, lines, filter_tree_path)
+ file_group.append(trans_id)
+ if set_version and not versioned:
+ self.tt.version_file(file_id, trans_id)
+ versioned = True
+ return file_group
+
+ def _conflict_file(self, name, parent_id, tree, file_id, suffix,
+ lines=None, filter_tree_path=None):
+ """Emit a single conflict file."""
+ name = name + '.' + suffix
+ trans_id = self.tt.create_path(name, parent_id)
+ transform.create_from_tree(self.tt, trans_id, tree, file_id, lines,
+ filter_tree_path)
+ return trans_id
+
+ def merge_executable(self, file_id, file_status):
+ """Perform a merge on the execute bit."""
+ executable = [self.executable(t, file_id) for t in (self.base_tree,
+ self.other_tree, self.this_tree)]
+ self._merge_executable(file_id, executable, file_status,
+ resolver=self._three_way)
+
+ def _merge_executable(self, file_id, executable, file_status,
+ resolver):
+ """Perform a merge on the execute bit."""
+ base_executable, other_executable, this_executable = executable
+ if file_status == "deleted":
+ return
+ winner = resolver(*executable)
+ if winner == "conflict":
+ # There must be a None in here, if we have a conflict, but we
+ # need executability since file status was not deleted.
+ if self.executable(self.other_tree, file_id) is None:
+ winner = "this"
+ else:
+ winner = "other"
+ if winner == 'this' and file_status != "modified":
+ return
+ trans_id = self.tt.trans_id_file_id(file_id)
+ if self.tt.final_kind(trans_id) != "file":
+ return
+ if winner == "this":
+ executability = this_executable
+ else:
+ if self.other_tree.has_id(file_id):
+ executability = other_executable
+ elif self.this_tree.has_id(file_id):
+ executability = this_executable
+ elif self.base_tree_has_id(file_id):
+ executability = base_executable
+ if executability is not None:
+ trans_id = self.tt.trans_id_file_id(file_id)
+ self.tt.set_executability(executability, trans_id)
+
+ def cook_conflicts(self, fs_conflicts):
+ """Convert all conflicts into a form that doesn't depend on trans_id"""
+ content_conflict_file_ids = set()
+ cooked_conflicts = transform.cook_conflicts(fs_conflicts, self.tt)
+ fp = transform.FinalPaths(self.tt)
+ for conflict in self._raw_conflicts:
+ conflict_type = conflict[0]
+ if conflict_type == 'path conflict':
+ (trans_id, file_id,
+ this_parent, this_name,
+ other_parent, other_name) = conflict[1:]
+ if this_parent is None or this_name is None:
+ this_path = '<deleted>'
+ else:
+ parent_path = fp.get_path(
+ self.tt.trans_id_file_id(this_parent))
+ this_path = osutils.pathjoin(parent_path, this_name)
+ if other_parent is None or other_name is None:
+ other_path = '<deleted>'
+ else:
+ if other_parent == self.other_tree.get_root_id():
+ # The tree transform doesn't know about the other root,
+ # so we special case here to avoid a NoFinalPath
+ # exception
+ parent_path = ''
+ else:
+ parent_path = fp.get_path(
+ self.tt.trans_id_file_id(other_parent))
+ other_path = osutils.pathjoin(parent_path, other_name)
+ c = _mod_conflicts.Conflict.factory(
+ 'path conflict', path=this_path,
+ conflict_path=other_path,
+ file_id=file_id)
+ elif conflict_type == 'contents conflict':
+ for trans_id in conflict[1]:
+ file_id = self.tt.final_file_id(trans_id)
+ if file_id is not None:
+ # Ok we found the relevant file-id
+ break
+ path = fp.get_path(trans_id)
+ for suffix in ('.BASE', '.THIS', '.OTHER'):
+ if path.endswith(suffix):
+ # Here is the raw path
+ path = path[:-len(suffix)]
+ break
+ c = _mod_conflicts.Conflict.factory(conflict_type,
+ path=path, file_id=file_id)
+ content_conflict_file_ids.add(file_id)
+ elif conflict_type == 'text conflict':
+ trans_id = conflict[1]
+ path = fp.get_path(trans_id)
+ file_id = self.tt.final_file_id(trans_id)
+ c = _mod_conflicts.Conflict.factory(conflict_type,
+ path=path, file_id=file_id)
+ else:
+ raise AssertionError('bad conflict type: %r' % (conflict,))
+ cooked_conflicts.append(c)
+
+ self.cooked_conflicts = []
+ # We want to get rid of path conflicts when a corresponding contents
+ # conflict exists. This can occur when one branch deletes a file while
+ # the other renames *and* modifies it. In this case, the content
+ # conflict is enough.
+ for c in cooked_conflicts:
+ if (c.typestring == 'path conflict'
+ and c.file_id in content_conflict_file_ids):
+ continue
+ self.cooked_conflicts.append(c)
+ self.cooked_conflicts.sort(key=_mod_conflicts.Conflict.sort_key)
+
+
+class WeaveMerger(Merge3Merger):
+ """Three-way tree merger, text weave merger."""
+ supports_reprocess = True
+ supports_show_base = False
+ supports_reverse_cherrypick = False
+ history_based = True
+
+ def _generate_merge_plan(self, file_id, base):
+ return self.this_tree.plan_file_merge(file_id, self.other_tree,
+ base=base)
+
+ def _merged_lines(self, file_id):
+ """Generate the merged lines.
+ There is no distinction between lines that are meant to contain <<<<<<<
+ and conflicts.
+ """
+ if self.cherrypick:
+ base = self.base_tree
+ else:
+ base = None
+ plan = self._generate_merge_plan(file_id, base)
+ if 'merge' in debug.debug_flags:
+ plan = list(plan)
+ trans_id = self.tt.trans_id_file_id(file_id)
+ name = self.tt.final_name(trans_id) + '.plan'
+ contents = ('%11s|%s' % l for l in plan)
+ self.tt.new_file(name, self.tt.final_parent(trans_id), contents)
+ textmerge = versionedfile.PlanWeaveMerge(plan, '<<<<<<< TREE\n',
+ '>>>>>>> MERGE-SOURCE\n')
+ lines, conflicts = textmerge.merge_lines(self.reprocess)
+ if conflicts:
+ base_lines = textmerge.base_from_plan()
+ else:
+ base_lines = None
+ return lines, base_lines
+
+ def text_merge(self, file_id, trans_id):
+ """Perform a (weave) text merge for a given file and file-id.
+ If conflicts are encountered, .THIS and .OTHER files will be emitted,
+ and a conflict will be noted.
+ """
+ lines, base_lines = self._merged_lines(file_id)
+ lines = list(lines)
+ # Note we're checking whether the OUTPUT is binary in this case,
+ # because we don't want to get into weave merge guts.
+ textfile.check_text_lines(lines)
+ self.tt.create_file(lines, trans_id)
+ if base_lines is not None:
+ # Conflict
+ self._raw_conflicts.append(('text conflict', trans_id))
+ name = self.tt.final_name(trans_id)
+ parent_id = self.tt.final_parent(trans_id)
+ file_group = self._dump_conflicts(name, parent_id, file_id,
+ no_base=False,
+ base_lines=base_lines)
+ file_group.append(trans_id)
+
+
+class LCAMerger(WeaveMerger):
+
+ def _generate_merge_plan(self, file_id, base):
+ return self.this_tree.plan_file_lca_merge(file_id, self.other_tree,
+ base=base)
+
+class Diff3Merger(Merge3Merger):
+ """Three-way merger using external diff3 for text merging"""
+
+ def dump_file(self, temp_dir, name, tree, file_id):
+ out_path = osutils.pathjoin(temp_dir, name)
+ out_file = open(out_path, "wb")
+ try:
+ in_file = tree.get_file(file_id)
+ for line in in_file:
+ out_file.write(line)
+ finally:
+ out_file.close()
+ return out_path
+
+ def text_merge(self, file_id, trans_id):
+ """Perform a diff3 merge using a specified file-id and trans-id.
+ If conflicts are encountered, .BASE, .THIS. and .OTHER conflict files
+ will be dumped, and a will be conflict noted.
+ """
+ import bzrlib.patch
+ temp_dir = osutils.mkdtemp(prefix="bzr-")
+ try:
+ new_file = osutils.pathjoin(temp_dir, "new")
+ this = self.dump_file(temp_dir, "this", self.this_tree, file_id)
+ base = self.dump_file(temp_dir, "base", self.base_tree, file_id)
+ other = self.dump_file(temp_dir, "other", self.other_tree, file_id)
+ status = bzrlib.patch.diff3(new_file, this, base, other)
+ if status not in (0, 1):
+ raise errors.BzrError("Unhandled diff3 exit code")
+ f = open(new_file, 'rb')
+ try:
+ self.tt.create_file(f, trans_id)
+ finally:
+ f.close()
+ if status == 1:
+ name = self.tt.final_name(trans_id)
+ parent_id = self.tt.final_parent(trans_id)
+ self._dump_conflicts(name, parent_id, file_id)
+ self._raw_conflicts.append(('text conflict', trans_id))
+ finally:
+ osutils.rmtree(temp_dir)
+
+
+class PathNotInTree(errors.BzrError):
+
+ _fmt = """Merge-into failed because %(tree)s does not contain %(path)s."""
+
+ def __init__(self, path, tree):
+ errors.BzrError.__init__(self, path=path, tree=tree)
+
+
+class MergeIntoMerger(Merger):
+ """Merger that understands other_tree will be merged into a subdir.
+
+ This also changes the Merger api so that it uses real Branch, revision_id,
+ and RevisonTree objects, rather than using revision specs.
+ """
+
+ def __init__(self, this_tree, other_branch, other_tree, target_subdir,
+ source_subpath, other_rev_id=None):
+ """Create a new MergeIntoMerger object.
+
+ source_subpath in other_tree will be effectively copied to
+ target_subdir in this_tree.
+
+ :param this_tree: The tree that we will be merging into.
+ :param other_branch: The Branch we will be merging from.
+ :param other_tree: The RevisionTree object we want to merge.
+ :param target_subdir: The relative path where we want to merge
+ other_tree into this_tree
+ :param source_subpath: The relative path specifying the subtree of
+ other_tree to merge into this_tree.
+ """
+ # It is assumed that we are merging a tree that is not in our current
+ # ancestry, which means we are using the "EmptyTree" as our basis.
+ null_ancestor_tree = this_tree.branch.repository.revision_tree(
+ _mod_revision.NULL_REVISION)
+ super(MergeIntoMerger, self).__init__(
+ this_branch=this_tree.branch,
+ this_tree=this_tree,
+ other_tree=other_tree,
+ base_tree=null_ancestor_tree,
+ )
+ self._target_subdir = target_subdir
+ self._source_subpath = source_subpath
+ self.other_branch = other_branch
+ if other_rev_id is None:
+ other_rev_id = other_tree.get_revision_id()
+ self.other_rev_id = self.other_basis = other_rev_id
+ self.base_is_ancestor = True
+ self.backup_files = True
+ self.merge_type = Merge3Merger
+ self.show_base = False
+ self.reprocess = False
+ self.interesting_ids = None
+ self.merge_type = _MergeTypeParameterizer(MergeIntoMergeType,
+ target_subdir=self._target_subdir,
+ source_subpath=self._source_subpath)
+ if self._source_subpath != '':
+ # If this isn't a partial merge make sure the revisions will be
+ # present.
+ self._maybe_fetch(self.other_branch, self.this_branch,
+ self.other_basis)
+
+ def set_pending(self):
+ if self._source_subpath != '':
+ return
+ Merger.set_pending(self)
+
+
+class _MergeTypeParameterizer(object):
+ """Wrap a merge-type class to provide extra parameters.
+
+ This is hack used by MergeIntoMerger to pass some extra parameters to its
+ merge_type. Merger.do_merge() sets up its own set of parameters to pass to
+ the 'merge_type' member. It is difficult override do_merge without
+ re-writing the whole thing, so instead we create a wrapper which will pass
+ the extra parameters.
+ """
+
+ def __init__(self, merge_type, **kwargs):
+ self._extra_kwargs = kwargs
+ self._merge_type = merge_type
+
+ def __call__(self, *args, **kwargs):
+ kwargs.update(self._extra_kwargs)
+ return self._merge_type(*args, **kwargs)
+
+ def __getattr__(self, name):
+ return getattr(self._merge_type, name)
+
+
+class MergeIntoMergeType(Merge3Merger):
+ """Merger that incorporates a tree (or part of a tree) into another."""
+
+ def __init__(self, *args, **kwargs):
+ """Initialize the merger object.
+
+ :param args: See Merge3Merger.__init__'s args.
+ :param kwargs: See Merge3Merger.__init__'s keyword args, except for
+ source_subpath and target_subdir.
+ :keyword source_subpath: The relative path specifying the subtree of
+ other_tree to merge into this_tree.
+ :keyword target_subdir: The relative path where we want to merge
+ other_tree into this_tree
+ """
+ # All of the interesting work happens during Merge3Merger.__init__(),
+ # so we have have to hack in to get our extra parameters set.
+ self._source_subpath = kwargs.pop('source_subpath')
+ self._target_subdir = kwargs.pop('target_subdir')
+ super(MergeIntoMergeType, self).__init__(*args, **kwargs)
+
+ def _compute_transform(self):
+ child_pb = ui.ui_factory.nested_progress_bar()
+ try:
+ entries = self._entries_to_incorporate()
+ entries = list(entries)
+ for num, (entry, parent_id) in enumerate(entries):
+ child_pb.update(gettext('Preparing file merge'), num, len(entries))
+ parent_trans_id = self.tt.trans_id_file_id(parent_id)
+ trans_id = transform.new_by_entry(self.tt, entry,
+ parent_trans_id, self.other_tree)
+ finally:
+ child_pb.finished()
+ self._finish_computing_transform()
+
+ def _entries_to_incorporate(self):
+ """Yields pairs of (inventory_entry, new_parent)."""
+ other_inv = self.other_tree.root_inventory
+ subdir_id = other_inv.path2id(self._source_subpath)
+ if subdir_id is None:
+ # XXX: The error would be clearer if it gave the URL of the source
+ # branch, but we don't have a reference to that here.
+ raise PathNotInTree(self._source_subpath, "Source tree")
+ subdir = other_inv[subdir_id]
+ parent_in_target = osutils.dirname(self._target_subdir)
+ target_id = self.this_tree.path2id(parent_in_target)
+ if target_id is None:
+ raise PathNotInTree(self._target_subdir, "Target tree")
+ name_in_target = osutils.basename(self._target_subdir)
+ merge_into_root = subdir.copy()
+ merge_into_root.name = name_in_target
+ if self.this_tree.has_id(merge_into_root.file_id):
+ # Give the root a new file-id.
+ # This can happen fairly easily if the directory we are
+ # incorporating is the root, and both trees have 'TREE_ROOT' as
+ # their root_id. Users will expect this to Just Work, so we
+ # change the file-id here.
+ # Non-root file-ids could potentially conflict too. That's really
+ # an edge case, so we don't do anything special for those. We let
+ # them cause conflicts.
+ merge_into_root.file_id = generate_ids.gen_file_id(name_in_target)
+ yield (merge_into_root, target_id)
+ if subdir.kind != 'directory':
+ # No children, so we are done.
+ return
+ for ignored_path, entry in other_inv.iter_entries_by_dir(subdir_id):
+ parent_id = entry.parent_id
+ if parent_id == subdir.file_id:
+ # The root's parent ID has changed, so make sure children of
+ # the root refer to the new ID.
+ parent_id = merge_into_root.file_id
+ yield (entry, parent_id)
+
+
+def merge_inner(this_branch, other_tree, base_tree, ignore_zero=False,
+ backup_files=False,
+ merge_type=Merge3Merger,
+ interesting_ids=None,
+ show_base=False,
+ reprocess=False,
+ other_rev_id=None,
+ interesting_files=None,
+ this_tree=None,
+ pb=None,
+ change_reporter=None):
+ """Primary interface for merging.
+
+ Typical use is probably::
+
+ merge_inner(branch, branch.get_revision_tree(other_revision),
+ branch.get_revision_tree(base_revision))
+ """
+ if this_tree is None:
+ raise errors.BzrError("bzrlib.merge.merge_inner requires a this_tree "
+ "parameter")
+ merger = Merger(this_branch, other_tree, base_tree, this_tree=this_tree,
+ pb=pb, change_reporter=change_reporter)
+ merger.backup_files = backup_files
+ merger.merge_type = merge_type
+ merger.interesting_ids = interesting_ids
+ merger.ignore_zero = ignore_zero
+ if interesting_files:
+ if interesting_ids:
+ raise ValueError('Only supply interesting_ids'
+ ' or interesting_files')
+ merger.interesting_files = interesting_files
+ merger.show_base = show_base
+ merger.reprocess = reprocess
+ merger.other_rev_id = other_rev_id
+ merger.other_basis = other_rev_id
+ get_revision_id = getattr(base_tree, 'get_revision_id', None)
+ if get_revision_id is None:
+ get_revision_id = base_tree.last_revision
+ merger.cache_trees_with_revision_ids([other_tree, base_tree, this_tree])
+ merger.set_base_revision(get_revision_id(), this_branch)
+ return merger.do_merge()
+
+
+merge_type_registry = registry.Registry()
+merge_type_registry.register('diff3', Diff3Merger,
+ "Merge using external diff3.")
+merge_type_registry.register('lca', LCAMerger,
+ "LCA-newness merge.")
+merge_type_registry.register('merge3', Merge3Merger,
+ "Native diff3-style merge.")
+merge_type_registry.register('weave', WeaveMerger,
+ "Weave-based merge.")
+
+
+def get_merge_type_registry():
+ """Merge type registry was previously in bzrlib.option
+
+ This method provides a backwards compatible way to retrieve it.
+ """
+ return merge_type_registry
+
+
+def _plan_annotate_merge(annotated_a, annotated_b, ancestors_a, ancestors_b):
+ def status_a(revision, text):
+ if revision in ancestors_b:
+ return 'killed-b', text
+ else:
+ return 'new-a', text
+
+ def status_b(revision, text):
+ if revision in ancestors_a:
+ return 'killed-a', text
+ else:
+ return 'new-b', text
+
+ plain_a = [t for (a, t) in annotated_a]
+ plain_b = [t for (a, t) in annotated_b]
+ matcher = patiencediff.PatienceSequenceMatcher(None, plain_a, plain_b)
+ blocks = matcher.get_matching_blocks()
+ a_cur = 0
+ b_cur = 0
+ for ai, bi, l in blocks:
+ # process all mismatched sections
+ # (last mismatched section is handled because blocks always
+ # includes a 0-length last block)
+ for revision, text in annotated_a[a_cur:ai]:
+ yield status_a(revision, text)
+ for revision, text in annotated_b[b_cur:bi]:
+ yield status_b(revision, text)
+ # and now the matched section
+ a_cur = ai + l
+ b_cur = bi + l
+ for text_a in plain_a[ai:a_cur]:
+ yield "unchanged", text_a
+
+
+class _PlanMergeBase(object):
+
+ def __init__(self, a_rev, b_rev, vf, key_prefix):
+ """Contructor.
+
+ :param a_rev: Revision-id of one revision to merge
+ :param b_rev: Revision-id of the other revision to merge
+ :param vf: A VersionedFiles containing both revisions
+ :param key_prefix: A prefix for accessing keys in vf, typically
+ (file_id,).
+ """
+ self.a_rev = a_rev
+ self.b_rev = b_rev
+ self.vf = vf
+ self._last_lines = None
+ self._last_lines_revision_id = None
+ self._cached_matching_blocks = {}
+ self._key_prefix = key_prefix
+ self._precache_tip_lines()
+
+ def _precache_tip_lines(self):
+ lines = self.get_lines([self.a_rev, self.b_rev])
+ self.lines_a = lines[self.a_rev]
+ self.lines_b = lines[self.b_rev]
+
+ def get_lines(self, revisions):
+ """Get lines for revisions from the backing VersionedFiles.
+
+ :raises RevisionNotPresent: on absent texts.
+ """
+ keys = [(self._key_prefix + (rev,)) for rev in revisions]
+ result = {}
+ for record in self.vf.get_record_stream(keys, 'unordered', True):
+ if record.storage_kind == 'absent':
+ raise errors.RevisionNotPresent(record.key, self.vf)
+ result[record.key[-1]] = osutils.chunks_to_lines(
+ record.get_bytes_as('chunked'))
+ return result
+
+ def plan_merge(self):
+ """Generate a 'plan' for merging the two revisions.
+
+ This involves comparing their texts and determining the cause of
+ differences. If text A has a line and text B does not, then either the
+ line was added to text A, or it was deleted from B. Once the causes
+ are combined, they are written out in the format described in
+ VersionedFile.plan_merge
+ """
+ blocks = self._get_matching_blocks(self.a_rev, self.b_rev)
+ unique_a, unique_b = self._unique_lines(blocks)
+ new_a, killed_b = self._determine_status(self.a_rev, unique_a)
+ new_b, killed_a = self._determine_status(self.b_rev, unique_b)
+ return self._iter_plan(blocks, new_a, killed_b, new_b, killed_a)
+
+ def _iter_plan(self, blocks, new_a, killed_b, new_b, killed_a):
+ last_i = 0
+ last_j = 0
+ for i, j, n in blocks:
+ for a_index in range(last_i, i):
+ if a_index in new_a:
+ if a_index in killed_b:
+ yield 'conflicted-a', self.lines_a[a_index]
+ else:
+ yield 'new-a', self.lines_a[a_index]
+ else:
+ yield 'killed-b', self.lines_a[a_index]
+ for b_index in range(last_j, j):
+ if b_index in new_b:
+ if b_index in killed_a:
+ yield 'conflicted-b', self.lines_b[b_index]
+ else:
+ yield 'new-b', self.lines_b[b_index]
+ else:
+ yield 'killed-a', self.lines_b[b_index]
+ # handle common lines
+ for a_index in range(i, i+n):
+ yield 'unchanged', self.lines_a[a_index]
+ last_i = i+n
+ last_j = j+n
+
+ def _get_matching_blocks(self, left_revision, right_revision):
+ """Return a description of which sections of two revisions match.
+
+ See SequenceMatcher.get_matching_blocks
+ """
+ cached = self._cached_matching_blocks.get((left_revision,
+ right_revision))
+ if cached is not None:
+ return cached
+ if self._last_lines_revision_id == left_revision:
+ left_lines = self._last_lines
+ right_lines = self.get_lines([right_revision])[right_revision]
+ else:
+ lines = self.get_lines([left_revision, right_revision])
+ left_lines = lines[left_revision]
+ right_lines = lines[right_revision]
+ self._last_lines = right_lines
+ self._last_lines_revision_id = right_revision
+ matcher = patiencediff.PatienceSequenceMatcher(None, left_lines,
+ right_lines)
+ return matcher.get_matching_blocks()
+
+ def _unique_lines(self, matching_blocks):
+ """Analyse matching_blocks to determine which lines are unique
+
+ :return: a tuple of (unique_left, unique_right), where the values are
+ sets of line numbers of unique lines.
+ """
+ last_i = 0
+ last_j = 0
+ unique_left = []
+ unique_right = []
+ for i, j, n in matching_blocks:
+ unique_left.extend(range(last_i, i))
+ unique_right.extend(range(last_j, j))
+ last_i = i + n
+ last_j = j + n
+ return unique_left, unique_right
+
+ @staticmethod
+ def _subtract_plans(old_plan, new_plan):
+ """Remove changes from new_plan that came from old_plan.
+
+ It is assumed that the difference between the old_plan and new_plan
+ is their choice of 'b' text.
+
+ All lines from new_plan that differ from old_plan are emitted
+ verbatim. All lines from new_plan that match old_plan but are
+ not about the 'b' revision are emitted verbatim.
+
+ Lines that match and are about the 'b' revision are the lines we
+ don't want, so we convert 'killed-b' -> 'unchanged', and 'new-b'
+ is skipped entirely.
+ """
+ matcher = patiencediff.PatienceSequenceMatcher(None, old_plan,
+ new_plan)
+ last_j = 0
+ for i, j, n in matcher.get_matching_blocks():
+ for jj in range(last_j, j):
+ yield new_plan[jj]
+ for jj in range(j, j+n):
+ plan_line = new_plan[jj]
+ if plan_line[0] == 'new-b':
+ pass
+ elif plan_line[0] == 'killed-b':
+ yield 'unchanged', plan_line[1]
+ else:
+ yield plan_line
+ last_j = j + n
+
+
+class _PlanMerge(_PlanMergeBase):
+ """Plan an annotate merge using on-the-fly annotation"""
+
+ def __init__(self, a_rev, b_rev, vf, key_prefix):
+ super(_PlanMerge, self).__init__(a_rev, b_rev, vf, key_prefix)
+ self.a_key = self._key_prefix + (self.a_rev,)
+ self.b_key = self._key_prefix + (self.b_rev,)
+ self.graph = _mod_graph.Graph(self.vf)
+ heads = self.graph.heads((self.a_key, self.b_key))
+ if len(heads) == 1:
+ # one side dominates, so we can just return its values, yay for
+ # per-file graphs
+ # Ideally we would know that before we get this far
+ self._head_key = heads.pop()
+ if self._head_key == self.a_key:
+ other = b_rev
+ else:
+ other = a_rev
+ trace.mutter('found dominating revision for %s\n%s > %s', self.vf,
+ self._head_key[-1], other)
+ self._weave = None
+ else:
+ self._head_key = None
+ self._build_weave()
+
+ def _precache_tip_lines(self):
+ # Turn this into a no-op, because we will do this later
+ pass
+
+ def _find_recursive_lcas(self):
+ """Find all the ancestors back to a unique lca"""
+ cur_ancestors = (self.a_key, self.b_key)
+ # graph.find_lca(uncommon, keys) now returns plain NULL_REVISION,
+ # rather than a key tuple. We will just map that directly to no common
+ # ancestors.
+ parent_map = {}
+ while True:
+ next_lcas = self.graph.find_lca(*cur_ancestors)
+ # Map a plain NULL_REVISION to a simple no-ancestors
+ if next_lcas == set([_mod_revision.NULL_REVISION]):
+ next_lcas = ()
+ # Order the lca's based on when they were merged into the tip
+ # While the actual merge portion of weave merge uses a set() of
+ # active revisions, the order of insertion *does* effect the
+ # implicit ordering of the texts.
+ for rev_key in cur_ancestors:
+ ordered_parents = tuple(self.graph.find_merge_order(rev_key,
+ next_lcas))
+ parent_map[rev_key] = ordered_parents
+ if len(next_lcas) == 0:
+ break
+ elif len(next_lcas) == 1:
+ parent_map[list(next_lcas)[0]] = ()
+ break
+ elif len(next_lcas) > 2:
+ # More than 2 lca's, fall back to grabbing all nodes between
+ # this and the unique lca.
+ trace.mutter('More than 2 LCAs, falling back to all nodes for:'
+ ' %s, %s\n=> %s',
+ self.a_key, self.b_key, cur_ancestors)
+ cur_lcas = next_lcas
+ while len(cur_lcas) > 1:
+ cur_lcas = self.graph.find_lca(*cur_lcas)
+ if len(cur_lcas) == 0:
+ # No common base to find, use the full ancestry
+ unique_lca = None
+ else:
+ unique_lca = list(cur_lcas)[0]
+ if unique_lca == _mod_revision.NULL_REVISION:
+ # find_lca will return a plain 'NULL_REVISION' rather
+ # than a key tuple when there is no common ancestor, we
+ # prefer to just use None, because it doesn't confuse
+ # _get_interesting_texts()
+ unique_lca = None
+ parent_map.update(self._find_unique_parents(next_lcas,
+ unique_lca))
+ break
+ cur_ancestors = next_lcas
+ return parent_map
+
+ def _find_unique_parents(self, tip_keys, base_key):
+ """Find ancestors of tip that aren't ancestors of base.
+
+ :param tip_keys: Nodes that are interesting
+ :param base_key: Cull all ancestors of this node
+ :return: The parent map for all revisions between tip_keys and
+ base_key. base_key will be included. References to nodes outside of
+ the ancestor set will also be removed.
+ """
+ # TODO: this would be simpler if find_unique_ancestors took a list
+ # instead of a single tip, internally it supports it, but it
+ # isn't a "backwards compatible" api change.
+ if base_key is None:
+ parent_map = dict(self.graph.iter_ancestry(tip_keys))
+ # We remove NULL_REVISION because it isn't a proper tuple key, and
+ # thus confuses things like _get_interesting_texts, and our logic
+ # to add the texts into the memory weave.
+ if _mod_revision.NULL_REVISION in parent_map:
+ parent_map.pop(_mod_revision.NULL_REVISION)
+ else:
+ interesting = set()
+ for tip in tip_keys:
+ interesting.update(
+ self.graph.find_unique_ancestors(tip, [base_key]))
+ parent_map = self.graph.get_parent_map(interesting)
+ parent_map[base_key] = ()
+ culled_parent_map, child_map, tails = self._remove_external_references(
+ parent_map)
+ # Remove all the tails but base_key
+ if base_key is not None:
+ tails.remove(base_key)
+ self._prune_tails(culled_parent_map, child_map, tails)
+ # Now remove all the uninteresting 'linear' regions
+ simple_map = _mod_graph.collapse_linear_regions(culled_parent_map)
+ return simple_map
+
+ @staticmethod
+ def _remove_external_references(parent_map):
+ """Remove references that go outside of the parent map.
+
+ :param parent_map: Something returned from Graph.get_parent_map(keys)
+ :return: (filtered_parent_map, child_map, tails)
+ filtered_parent_map is parent_map without external references
+ child_map is the {parent_key: [child_keys]} mapping
+ tails is a list of nodes that do not have any parents in the map
+ """
+ # TODO: The basic effect of this function seems more generic than
+ # _PlanMerge. But the specific details of building a child_map,
+ # and computing tails seems very specific to _PlanMerge.
+ # Still, should this be in Graph land?
+ filtered_parent_map = {}
+ child_map = {}
+ tails = []
+ for key, parent_keys in parent_map.iteritems():
+ culled_parent_keys = [p for p in parent_keys if p in parent_map]
+ if not culled_parent_keys:
+ tails.append(key)
+ for parent_key in culled_parent_keys:
+ child_map.setdefault(parent_key, []).append(key)
+ # TODO: Do we want to do this, it adds overhead for every node,
+ # just to say that the node has no children
+ child_map.setdefault(key, [])
+ filtered_parent_map[key] = culled_parent_keys
+ return filtered_parent_map, child_map, tails
+
+ @staticmethod
+ def _prune_tails(parent_map, child_map, tails_to_remove):
+ """Remove tails from the parent map.
+
+ This will remove the supplied revisions until no more children have 0
+ parents.
+
+ :param parent_map: A dict of {child: [parents]}, this dictionary will
+ be modified in place.
+ :param tails_to_remove: A list of tips that should be removed,
+ this list will be consumed
+ :param child_map: The reverse dict of parent_map ({parent: [children]})
+ this dict will be modified
+ :return: None, parent_map will be modified in place.
+ """
+ while tails_to_remove:
+ next = tails_to_remove.pop()
+ parent_map.pop(next)
+ children = child_map.pop(next)
+ for child in children:
+ child_parents = parent_map[child]
+ child_parents.remove(next)
+ if len(child_parents) == 0:
+ tails_to_remove.append(child)
+
+ def _get_interesting_texts(self, parent_map):
+ """Return a dict of texts we are interested in.
+
+ Note that the input is in key tuples, but the output is in plain
+ revision ids.
+
+ :param parent_map: The output from _find_recursive_lcas
+ :return: A dict of {'revision_id':lines} as returned by
+ _PlanMergeBase.get_lines()
+ """
+ all_revision_keys = set(parent_map)
+ all_revision_keys.add(self.a_key)
+ all_revision_keys.add(self.b_key)
+
+ # Everything else is in 'keys' but get_lines is in 'revision_ids'
+ all_texts = self.get_lines([k[-1] for k in all_revision_keys])
+ return all_texts
+
+ def _build_weave(self):
+ from bzrlib import weave
+ self._weave = weave.Weave(weave_name='in_memory_weave',
+ allow_reserved=True)
+ parent_map = self._find_recursive_lcas()
+
+ all_texts = self._get_interesting_texts(parent_map)
+
+ # Note: Unfortunately, the order given by topo_sort will effect the
+ # ordering resolution in the output. Specifically, if you add A then B,
+ # then in the output text A lines will show up before B lines. And, of
+ # course, topo_sort doesn't guarantee any real ordering.
+ # So we use merge_sort, and add a fake node on the tip.
+ # This ensures that left-hand parents will always be inserted into the
+ # weave before right-hand parents.
+ tip_key = self._key_prefix + (_mod_revision.CURRENT_REVISION,)
+ parent_map[tip_key] = (self.a_key, self.b_key)
+
+ for seq_num, key, depth, eom in reversed(tsort.merge_sort(parent_map,
+ tip_key)):
+ if key == tip_key:
+ continue
+ # for key in tsort.topo_sort(parent_map):
+ parent_keys = parent_map[key]
+ revision_id = key[-1]
+ parent_ids = [k[-1] for k in parent_keys]
+ self._weave.add_lines(revision_id, parent_ids,
+ all_texts[revision_id])
+
+ def plan_merge(self):
+ """Generate a 'plan' for merging the two revisions.
+
+ This involves comparing their texts and determining the cause of
+ differences. If text A has a line and text B does not, then either the
+ line was added to text A, or it was deleted from B. Once the causes
+ are combined, they are written out in the format described in
+ VersionedFile.plan_merge
+ """
+ if self._head_key is not None: # There was a single head
+ if self._head_key == self.a_key:
+ plan = 'new-a'
+ else:
+ if self._head_key != self.b_key:
+ raise AssertionError('There was an invalid head: %s != %s'
+ % (self.b_key, self._head_key))
+ plan = 'new-b'
+ head_rev = self._head_key[-1]
+ lines = self.get_lines([head_rev])[head_rev]
+ return ((plan, line) for line in lines)
+ return self._weave.plan_merge(self.a_rev, self.b_rev)
+
+
+class _PlanLCAMerge(_PlanMergeBase):
+ """
+ This merge algorithm differs from _PlanMerge in that:
+
+ 1. comparisons are done against LCAs only
+ 2. cases where a contested line is new versus one LCA but old versus
+ another are marked as conflicts, by emitting the line as conflicted-a
+ or conflicted-b.
+
+ This is faster, and hopefully produces more useful output.
+ """
+
+ def __init__(self, a_rev, b_rev, vf, key_prefix, graph):
+ _PlanMergeBase.__init__(self, a_rev, b_rev, vf, key_prefix)
+ lcas = graph.find_lca(key_prefix + (a_rev,), key_prefix + (b_rev,))
+ self.lcas = set()
+ for lca in lcas:
+ if lca == _mod_revision.NULL_REVISION:
+ self.lcas.add(lca)
+ else:
+ self.lcas.add(lca[-1])
+ for lca in self.lcas:
+ if _mod_revision.is_null(lca):
+ lca_lines = []
+ else:
+ lca_lines = self.get_lines([lca])[lca]
+ matcher = patiencediff.PatienceSequenceMatcher(None, self.lines_a,
+ lca_lines)
+ blocks = list(matcher.get_matching_blocks())
+ self._cached_matching_blocks[(a_rev, lca)] = blocks
+ matcher = patiencediff.PatienceSequenceMatcher(None, self.lines_b,
+ lca_lines)
+ blocks = list(matcher.get_matching_blocks())
+ self._cached_matching_blocks[(b_rev, lca)] = blocks
+
+ def _determine_status(self, revision_id, unique_line_numbers):
+ """Determines the status unique lines versus all lcas.
+
+ Basically, determines why the line is unique to this revision.
+
+ A line may be determined new, killed, or both.
+
+ If a line is determined new, that means it was not present in at least
+ one LCA, and is not present in the other merge revision.
+
+ If a line is determined killed, that means the line was present in
+ at least one LCA.
+
+ If a line is killed and new, this indicates that the two merge
+ revisions contain differing conflict resolutions.
+
+ :param revision_id: The id of the revision in which the lines are
+ unique
+ :param unique_line_numbers: The line numbers of unique lines.
+ :return: a tuple of (new_this, killed_other)
+ """
+ new = set()
+ killed = set()
+ unique_line_numbers = set(unique_line_numbers)
+ for lca in self.lcas:
+ blocks = self._get_matching_blocks(revision_id, lca)
+ unique_vs_lca, _ignored = self._unique_lines(blocks)
+ new.update(unique_line_numbers.intersection(unique_vs_lca))
+ killed.update(unique_line_numbers.difference(unique_vs_lca))
+ return new, killed
diff --git a/bzrlib/merge3.py b/bzrlib/merge3.py
new file mode 100644
index 0000000..ac6fb0a
--- /dev/null
+++ b/bzrlib/merge3.py
@@ -0,0 +1,482 @@
+# Copyright (C) 2005-2010 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+from __future__ import absolute_import
+
+# mbp: "you know that thing where cvs gives you conflict markers?"
+# s: "i hate that."
+
+from bzrlib import (
+ errors,
+ patiencediff,
+ textfile,
+ )
+
+
+def intersect(ra, rb):
+ """Given two ranges return the range where they intersect or None.
+
+ >>> intersect((0, 10), (0, 6))
+ (0, 6)
+ >>> intersect((0, 10), (5, 15))
+ (5, 10)
+ >>> intersect((0, 10), (10, 15))
+ >>> intersect((0, 9), (10, 15))
+ >>> intersect((0, 9), (7, 15))
+ (7, 9)
+ """
+ # preconditions: (ra[0] <= ra[1]) and (rb[0] <= rb[1])
+
+ sa = max(ra[0], rb[0])
+ sb = min(ra[1], rb[1])
+ if sa < sb:
+ return sa, sb
+ else:
+ return None
+
+
+def compare_range(a, astart, aend, b, bstart, bend):
+ """Compare a[astart:aend] == b[bstart:bend], without slicing.
+ """
+ if (aend-astart) != (bend-bstart):
+ return False
+ for ia, ib in zip(xrange(astart, aend), xrange(bstart, bend)):
+ if a[ia] != b[ib]:
+ return False
+ else:
+ return True
+
+
+
+
+class Merge3(object):
+ """3-way merge of texts.
+
+ Given BASE, OTHER, THIS, tries to produce a combined text
+ incorporating the changes from both BASE->OTHER and BASE->THIS.
+ All three will typically be sequences of lines."""
+
+ def __init__(self, base, a, b, is_cherrypick=False, allow_objects=False):
+ """Constructor.
+
+ :param base: lines in BASE
+ :param a: lines in A
+ :param b: lines in B
+ :param is_cherrypick: flag indicating if this merge is a cherrypick.
+ When cherrypicking b => a, matches with b and base do not conflict.
+ :param allow_objects: if True, do not require that base, a and b are
+ plain Python strs. Also prevents BinaryFile from being raised.
+ Lines can be any sequence of comparable and hashable Python
+ objects.
+ """
+ if not allow_objects:
+ textfile.check_text_lines(base)
+ textfile.check_text_lines(a)
+ textfile.check_text_lines(b)
+ self.base = base
+ self.a = a
+ self.b = b
+ self.is_cherrypick = is_cherrypick
+
+ def merge_lines(self,
+ name_a=None,
+ name_b=None,
+ name_base=None,
+ start_marker='<<<<<<<',
+ mid_marker='=======',
+ end_marker='>>>>>>>',
+ base_marker=None,
+ reprocess=False):
+ """Return merge in cvs-like form.
+ """
+ newline = '\n'
+ if len(self.a) > 0:
+ if self.a[0].endswith('\r\n'):
+ newline = '\r\n'
+ elif self.a[0].endswith('\r'):
+ newline = '\r'
+ if base_marker and reprocess:
+ raise errors.CantReprocessAndShowBase()
+ if name_a:
+ start_marker = start_marker + ' ' + name_a
+ if name_b:
+ end_marker = end_marker + ' ' + name_b
+ if name_base and base_marker:
+ base_marker = base_marker + ' ' + name_base
+ merge_regions = self.merge_regions()
+ if reprocess is True:
+ merge_regions = self.reprocess_merge_regions(merge_regions)
+ for t in merge_regions:
+ what = t[0]
+ if what == 'unchanged':
+ for i in range(t[1], t[2]):
+ yield self.base[i]
+ elif what == 'a' or what == 'same':
+ for i in range(t[1], t[2]):
+ yield self.a[i]
+ elif what == 'b':
+ for i in range(t[1], t[2]):
+ yield self.b[i]
+ elif what == 'conflict':
+ yield start_marker + newline
+ for i in range(t[3], t[4]):
+ yield self.a[i]
+ if base_marker is not None:
+ yield base_marker + newline
+ for i in range(t[1], t[2]):
+ yield self.base[i]
+ yield mid_marker + newline
+ for i in range(t[5], t[6]):
+ yield self.b[i]
+ yield end_marker + newline
+ else:
+ raise ValueError(what)
+
+ def merge_annotated(self):
+ """Return merge with conflicts, showing origin of lines.
+
+ Most useful for debugging merge.
+ """
+ for t in self.merge_regions():
+ what = t[0]
+ if what == 'unchanged':
+ for i in range(t[1], t[2]):
+ yield 'u | ' + self.base[i]
+ elif what == 'a' or what == 'same':
+ for i in range(t[1], t[2]):
+ yield what[0] + ' | ' + self.a[i]
+ elif what == 'b':
+ for i in range(t[1], t[2]):
+ yield 'b | ' + self.b[i]
+ elif what == 'conflict':
+ yield '<<<<\n'
+ for i in range(t[3], t[4]):
+ yield 'A | ' + self.a[i]
+ yield '----\n'
+ for i in range(t[5], t[6]):
+ yield 'B | ' + self.b[i]
+ yield '>>>>\n'
+ else:
+ raise ValueError(what)
+
+ def merge_groups(self):
+ """Yield sequence of line groups. Each one is a tuple:
+
+ 'unchanged', lines
+ Lines unchanged from base
+
+ 'a', lines
+ Lines taken from a
+
+ 'same', lines
+ Lines taken from a (and equal to b)
+
+ 'b', lines
+ Lines taken from b
+
+ 'conflict', base_lines, a_lines, b_lines
+ Lines from base were changed to either a or b and conflict.
+ """
+ for t in self.merge_regions():
+ what = t[0]
+ if what == 'unchanged':
+ yield what, self.base[t[1]:t[2]]
+ elif what == 'a' or what == 'same':
+ yield what, self.a[t[1]:t[2]]
+ elif what == 'b':
+ yield what, self.b[t[1]:t[2]]
+ elif what == 'conflict':
+ yield (what,
+ self.base[t[1]:t[2]],
+ self.a[t[3]:t[4]],
+ self.b[t[5]:t[6]])
+ else:
+ raise ValueError(what)
+
+ def merge_regions(self):
+ """Return sequences of matching and conflicting regions.
+
+ This returns tuples, where the first value says what kind we
+ have:
+
+ 'unchanged', start, end
+ Take a region of base[start:end]
+
+ 'same', astart, aend
+ b and a are different from base but give the same result
+
+ 'a', start, end
+ Non-clashing insertion from a[start:end]
+
+ Method is as follows:
+
+ The two sequences align only on regions which match the base
+ and both descendents. These are found by doing a two-way diff
+ of each one against the base, and then finding the
+ intersections between those regions. These "sync regions"
+ are by definition unchanged in both and easily dealt with.
+
+ The regions in between can be in any of three cases:
+ conflicted, or changed on only one side.
+ """
+
+ # section a[0:ia] has been disposed of, etc
+ iz = ia = ib = 0
+
+ for zmatch, zend, amatch, aend, bmatch, bend in self.find_sync_regions():
+ matchlen = zend - zmatch
+ # invariants:
+ # matchlen >= 0
+ # matchlen == (aend - amatch)
+ # matchlen == (bend - bmatch)
+ len_a = amatch - ia
+ len_b = bmatch - ib
+ len_base = zmatch - iz
+ # invariants:
+ # assert len_a >= 0
+ # assert len_b >= 0
+ # assert len_base >= 0
+
+ #print 'unmatched a=%d, b=%d' % (len_a, len_b)
+
+ if len_a or len_b:
+ # try to avoid actually slicing the lists
+ same = compare_range(self.a, ia, amatch,
+ self.b, ib, bmatch)
+
+ if same:
+ yield 'same', ia, amatch
+ else:
+ equal_a = compare_range(self.a, ia, amatch,
+ self.base, iz, zmatch)
+ equal_b = compare_range(self.b, ib, bmatch,
+ self.base, iz, zmatch)
+ if equal_a and not equal_b:
+ yield 'b', ib, bmatch
+ elif equal_b and not equal_a:
+ yield 'a', ia, amatch
+ elif not equal_a and not equal_b:
+ if self.is_cherrypick:
+ for node in self._refine_cherrypick_conflict(
+ iz, zmatch, ia, amatch,
+ ib, bmatch):
+ yield node
+ else:
+ yield 'conflict', iz, zmatch, ia, amatch, ib, bmatch
+ else:
+ raise AssertionError("can't handle a=b=base but unmatched")
+
+ ia = amatch
+ ib = bmatch
+ iz = zmatch
+
+ # if the same part of the base was deleted on both sides
+ # that's OK, we can just skip it.
+
+ if matchlen > 0:
+ # invariants:
+ # assert ia == amatch
+ # assert ib == bmatch
+ # assert iz == zmatch
+
+ yield 'unchanged', zmatch, zend
+ iz = zend
+ ia = aend
+ ib = bend
+
+ def _refine_cherrypick_conflict(self, zstart, zend, astart, aend, bstart, bend):
+ """When cherrypicking b => a, ignore matches with b and base."""
+ # Do not emit regions which match, only regions which do not match
+ matches = patiencediff.PatienceSequenceMatcher(None,
+ self.base[zstart:zend], self.b[bstart:bend]).get_matching_blocks()
+ last_base_idx = 0
+ last_b_idx = 0
+ last_b_idx = 0
+ yielded_a = False
+ for base_idx, b_idx, match_len in matches:
+ conflict_z_len = base_idx - last_base_idx
+ conflict_b_len = b_idx - last_b_idx
+ if conflict_b_len == 0: # There are no lines in b which conflict,
+ # so skip it
+ pass
+ else:
+ if yielded_a:
+ yield ('conflict',
+ zstart + last_base_idx, zstart + base_idx,
+ aend, aend, bstart + last_b_idx, bstart + b_idx)
+ else:
+ # The first conflict gets the a-range
+ yielded_a = True
+ yield ('conflict', zstart + last_base_idx, zstart +
+ base_idx,
+ astart, aend, bstart + last_b_idx, bstart + b_idx)
+ last_base_idx = base_idx + match_len
+ last_b_idx = b_idx + match_len
+ if last_base_idx != zend - zstart or last_b_idx != bend - bstart:
+ if yielded_a:
+ yield ('conflict', zstart + last_base_idx, zstart + base_idx,
+ aend, aend, bstart + last_b_idx, bstart + b_idx)
+ else:
+ # The first conflict gets the a-range
+ yielded_a = True
+ yield ('conflict', zstart + last_base_idx, zstart + base_idx,
+ astart, aend, bstart + last_b_idx, bstart + b_idx)
+ if not yielded_a:
+ yield ('conflict', zstart, zend, astart, aend, bstart, bend)
+
+ def reprocess_merge_regions(self, merge_regions):
+ """Where there are conflict regions, remove the agreed lines.
+
+ Lines where both A and B have made the same changes are
+ eliminated.
+ """
+ for region in merge_regions:
+ if region[0] != "conflict":
+ yield region
+ continue
+ type, iz, zmatch, ia, amatch, ib, bmatch = region
+ a_region = self.a[ia:amatch]
+ b_region = self.b[ib:bmatch]
+ matches = patiencediff.PatienceSequenceMatcher(
+ None, a_region, b_region).get_matching_blocks()
+ next_a = ia
+ next_b = ib
+ for region_ia, region_ib, region_len in matches[:-1]:
+ region_ia += ia
+ region_ib += ib
+ reg = self.mismatch_region(next_a, region_ia, next_b,
+ region_ib)
+ if reg is not None:
+ yield reg
+ yield 'same', region_ia, region_len+region_ia
+ next_a = region_ia + region_len
+ next_b = region_ib + region_len
+ reg = self.mismatch_region(next_a, amatch, next_b, bmatch)
+ if reg is not None:
+ yield reg
+
+ @staticmethod
+ def mismatch_region(next_a, region_ia, next_b, region_ib):
+ if next_a < region_ia or next_b < region_ib:
+ return 'conflict', None, None, next_a, region_ia, next_b, region_ib
+
+ def find_sync_regions(self):
+ """Return a list of sync regions, where both descendents match the base.
+
+ Generates a list of (base1, base2, a1, a2, b1, b2). There is
+ always a zero-length sync region at the end of all the files.
+ """
+
+ ia = ib = 0
+ amatches = patiencediff.PatienceSequenceMatcher(
+ None, self.base, self.a).get_matching_blocks()
+ bmatches = patiencediff.PatienceSequenceMatcher(
+ None, self.base, self.b).get_matching_blocks()
+ len_a = len(amatches)
+ len_b = len(bmatches)
+
+ sl = []
+
+ while ia < len_a and ib < len_b:
+ abase, amatch, alen = amatches[ia]
+ bbase, bmatch, blen = bmatches[ib]
+
+ # there is an unconflicted block at i; how long does it
+ # extend? until whichever one ends earlier.
+ i = intersect((abase, abase+alen), (bbase, bbase+blen))
+ if i:
+ intbase = i[0]
+ intend = i[1]
+ intlen = intend - intbase
+
+ # found a match of base[i[0], i[1]]; this may be less than
+ # the region that matches in either one
+ # assert intlen <= alen
+ # assert intlen <= blen
+ # assert abase <= intbase
+ # assert bbase <= intbase
+
+ asub = amatch + (intbase - abase)
+ bsub = bmatch + (intbase - bbase)
+ aend = asub + intlen
+ bend = bsub + intlen
+
+ # assert self.base[intbase:intend] == self.a[asub:aend], \
+ # (self.base[intbase:intend], self.a[asub:aend])
+ # assert self.base[intbase:intend] == self.b[bsub:bend]
+
+ sl.append((intbase, intend,
+ asub, aend,
+ bsub, bend))
+ # advance whichever one ends first in the base text
+ if (abase + alen) < (bbase + blen):
+ ia += 1
+ else:
+ ib += 1
+
+ intbase = len(self.base)
+ abase = len(self.a)
+ bbase = len(self.b)
+ sl.append((intbase, intbase, abase, abase, bbase, bbase))
+
+ return sl
+
+ def find_unconflicted(self):
+ """Return a list of ranges in base that are not conflicted."""
+ am = patiencediff.PatienceSequenceMatcher(
+ None, self.base, self.a).get_matching_blocks()
+ bm = patiencediff.PatienceSequenceMatcher(
+ None, self.base, self.b).get_matching_blocks()
+
+ unc = []
+
+ while am and bm:
+ # there is an unconflicted block at i; how long does it
+ # extend? until whichever one ends earlier.
+ a1 = am[0][0]
+ a2 = a1 + am[0][2]
+ b1 = bm[0][0]
+ b2 = b1 + bm[0][2]
+ i = intersect((a1, a2), (b1, b2))
+ if i:
+ unc.append(i)
+
+ if a2 < b2:
+ del am[0]
+ else:
+ del bm[0]
+
+ return unc
+
+
+def main(argv):
+ # as for diff3 and meld the syntax is "MINE BASE OTHER"
+ a = file(argv[1], 'rt').readlines()
+ base = file(argv[2], 'rt').readlines()
+ b = file(argv[3], 'rt').readlines()
+
+ m3 = Merge3(base, a, b)
+
+ #for sr in m3.find_sync_regions():
+ # print sr
+
+ # sys.stdout.writelines(m3.merge_lines(name_a=argv[1], name_b=argv[3]))
+ sys.stdout.writelines(m3.merge_annotated())
+
+
+if __name__ == '__main__':
+ import sys
+ sys.exit(main(sys.argv))
diff --git a/bzrlib/merge_directive.py b/bzrlib/merge_directive.py
new file mode 100644
index 0000000..7e715d9
--- /dev/null
+++ b/bzrlib/merge_directive.py
@@ -0,0 +1,684 @@
+# Copyright (C) 2007-2011 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+from __future__ import absolute_import
+
+from StringIO import StringIO
+import re
+
+from bzrlib import lazy_import
+lazy_import.lazy_import(globals(), """
+from bzrlib import (
+ branch as _mod_branch,
+ diff,
+ email_message,
+ errors,
+ gpg,
+ hooks,
+ registry,
+ revision as _mod_revision,
+ rio,
+ testament,
+ timestamp,
+ trace,
+ )
+from bzrlib.bundle import (
+ serializer as bundle_serializer,
+ )
+""")
+
+
+class MergeRequestBodyParams(object):
+ """Parameter object for the merge_request_body hook."""
+
+ def __init__(self, body, orig_body, directive, to, basename, subject,
+ branch, tree=None):
+ self.body = body
+ self.orig_body = orig_body
+ self.directive = directive
+ self.branch = branch
+ self.tree = tree
+ self.to = to
+ self.basename = basename
+ self.subject = subject
+
+
+class MergeDirectiveHooks(hooks.Hooks):
+ """Hooks for MergeDirective classes."""
+
+ def __init__(self):
+ hooks.Hooks.__init__(self, "bzrlib.merge_directive", "BaseMergeDirective.hooks")
+ self.add_hook('merge_request_body',
+ "Called with a MergeRequestBodyParams when a body is needed for"
+ " a merge request. Callbacks must return a body. If more"
+ " than one callback is registered, the output of one callback is"
+ " provided to the next.", (1, 15, 0))
+
+
+class BaseMergeDirective(object):
+ """A request to perform a merge into a branch.
+
+ This is the base class that all merge directive implementations
+ should derive from.
+
+ :cvar multiple_output_files: Whether or not this merge directive
+ stores a set of revisions in more than one file
+ """
+
+ hooks = MergeDirectiveHooks()
+
+ multiple_output_files = False
+
+ def __init__(self, revision_id, testament_sha1, time, timezone,
+ target_branch, patch=None, source_branch=None,
+ message=None, bundle=None):
+ """Constructor.
+
+ :param revision_id: The revision to merge
+ :param testament_sha1: The sha1 of the testament of the revision to
+ merge.
+ :param time: The current POSIX timestamp time
+ :param timezone: The timezone offset
+ :param target_branch: Location of branch to apply the merge to
+ :param patch: The text of a diff or bundle
+ :param source_branch: A public location to merge the revision from
+ :param message: The message to use when committing this merge
+ """
+ self.revision_id = revision_id
+ self.testament_sha1 = testament_sha1
+ self.time = time
+ self.timezone = timezone
+ self.target_branch = target_branch
+ self.patch = patch
+ self.source_branch = source_branch
+ self.message = message
+
+ def to_lines(self):
+ """Serialize as a list of lines
+
+ :return: a list of lines
+ """
+ raise NotImplementedError(self.to_lines)
+
+ def to_files(self):
+ """Serialize as a set of files.
+
+ :return: List of tuples with filename and contents as lines
+ """
+ raise NotImplementedError(self.to_files)
+
+ def get_raw_bundle(self):
+ """Return the bundle for this merge directive.
+
+ :return: bundle text or None if there is no bundle
+ """
+ return None
+
+ def _to_lines(self, base_revision=False):
+ """Serialize as a list of lines
+
+ :return: a list of lines
+ """
+ time_str = timestamp.format_patch_date(self.time, self.timezone)
+ stanza = rio.Stanza(revision_id=self.revision_id, timestamp=time_str,
+ target_branch=self.target_branch,
+ testament_sha1=self.testament_sha1)
+ for key in ('source_branch', 'message'):
+ if self.__dict__[key] is not None:
+ stanza.add(key, self.__dict__[key])
+ if base_revision:
+ stanza.add('base_revision_id', self.base_revision_id)
+ lines = ['# ' + self._format_string + '\n']
+ lines.extend(rio.to_patch_lines(stanza))
+ lines.append('# \n')
+ return lines
+
+ def write_to_directory(self, path):
+ """Write this merge directive to a series of files in a directory.
+
+ :param path: Filesystem path to write to
+ """
+ raise NotImplementedError(self.write_to_directory)
+
+ @classmethod
+ def from_objects(klass, repository, revision_id, time, timezone,
+ target_branch, patch_type='bundle',
+ local_target_branch=None, public_branch=None, message=None):
+ """Generate a merge directive from various objects
+
+ :param repository: The repository containing the revision
+ :param revision_id: The revision to merge
+ :param time: The POSIX timestamp of the date the request was issued.
+ :param timezone: The timezone of the request
+ :param target_branch: The url of the branch to merge into
+ :param patch_type: 'bundle', 'diff' or None, depending on the type of
+ patch desired.
+ :param local_target_branch: the submit branch, either itself or a local copy
+ :param public_branch: location of a public branch containing
+ the target revision.
+ :param message: Message to use when committing the merge
+ :return: The merge directive
+
+ The public branch is always used if supplied. If the patch_type is
+ not 'bundle', the public branch must be supplied, and will be verified.
+
+ If the message is not supplied, the message from revision_id will be
+ used for the commit.
+ """
+ t_revision_id = revision_id
+ if revision_id == _mod_revision.NULL_REVISION:
+ t_revision_id = None
+ t = testament.StrictTestament3.from_revision(repository, t_revision_id)
+ if local_target_branch is None:
+ submit_branch = _mod_branch.Branch.open(target_branch)
+ else:
+ submit_branch = local_target_branch
+ if submit_branch.get_public_branch() is not None:
+ target_branch = submit_branch.get_public_branch()
+ if patch_type is None:
+ patch = None
+ else:
+ submit_revision_id = submit_branch.last_revision()
+ submit_revision_id = _mod_revision.ensure_null(submit_revision_id)
+ repository.fetch(submit_branch.repository, submit_revision_id)
+ graph = repository.get_graph()
+ ancestor_id = graph.find_unique_lca(revision_id,
+ submit_revision_id)
+ type_handler = {'bundle': klass._generate_bundle,
+ 'diff': klass._generate_diff,
+ None: lambda x, y, z: None }
+ patch = type_handler[patch_type](repository, revision_id,
+ ancestor_id)
+
+ if public_branch is not None and patch_type != 'bundle':
+ public_branch_obj = _mod_branch.Branch.open(public_branch)
+ if not public_branch_obj.repository.has_revision(revision_id):
+ raise errors.PublicBranchOutOfDate(public_branch,
+ revision_id)
+
+ return klass(revision_id, t.as_sha1(), time, timezone, target_branch,
+ patch, patch_type, public_branch, message)
+
+ def get_disk_name(self, branch):
+ """Generate a suitable basename for storing this directive on disk
+
+ :param branch: The Branch this merge directive was generated fro
+ :return: A string
+ """
+ revno, revision_id = branch.last_revision_info()
+ if self.revision_id == revision_id:
+ revno = [revno]
+ else:
+ revno = branch.get_revision_id_to_revno_map().get(self.revision_id,
+ ['merge'])
+ nick = re.sub('(\W+)', '-', branch.nick).strip('-')
+ return '%s-%s' % (nick, '.'.join(str(n) for n in revno))
+
+ @staticmethod
+ def _generate_diff(repository, revision_id, ancestor_id):
+ tree_1 = repository.revision_tree(ancestor_id)
+ tree_2 = repository.revision_tree(revision_id)
+ s = StringIO()
+ diff.show_diff_trees(tree_1, tree_2, s, old_label='', new_label='')
+ return s.getvalue()
+
+ @staticmethod
+ def _generate_bundle(repository, revision_id, ancestor_id):
+ s = StringIO()
+ bundle_serializer.write_bundle(repository, revision_id,
+ ancestor_id, s)
+ return s.getvalue()
+
+ def to_signed(self, branch):
+ """Serialize as a signed string.
+
+ :param branch: The source branch, to get the signing strategy
+ :return: a string
+ """
+ my_gpg = gpg.GPGStrategy(branch.get_config_stack())
+ return my_gpg.sign(''.join(self.to_lines()))
+
+ def to_email(self, mail_to, branch, sign=False):
+ """Serialize as an email message.
+
+ :param mail_to: The address to mail the message to
+ :param branch: The source branch, to get the signing strategy and
+ source email address
+ :param sign: If True, gpg-sign the email
+ :return: an email message
+ """
+ mail_from = branch.get_config_stack().get('email')
+ if self.message is not None:
+ subject = self.message
+ else:
+ revision = branch.repository.get_revision(self.revision_id)
+ subject = revision.message
+ if sign:
+ body = self.to_signed(branch)
+ else:
+ body = ''.join(self.to_lines())
+ message = email_message.EmailMessage(mail_from, mail_to, subject,
+ body)
+ return message
+
+ def install_revisions(self, target_repo):
+ """Install revisions and return the target revision"""
+ if not target_repo.has_revision(self.revision_id):
+ if self.patch_type == 'bundle':
+ info = bundle_serializer.read_bundle(
+ StringIO(self.get_raw_bundle()))
+ # We don't use the bundle's target revision, because
+ # MergeDirective.revision_id is authoritative.
+ try:
+ info.install_revisions(target_repo, stream_input=False)
+ except errors.RevisionNotPresent:
+ # At least one dependency isn't present. Try installing
+ # missing revisions from the submit branch
+ try:
+ submit_branch = \
+ _mod_branch.Branch.open(self.target_branch)
+ except errors.NotBranchError:
+ raise errors.TargetNotBranch(self.target_branch)
+ missing_revisions = []
+ bundle_revisions = set(r.revision_id for r in
+ info.real_revisions)
+ for revision in info.real_revisions:
+ for parent_id in revision.parent_ids:
+ if (parent_id not in bundle_revisions and
+ not target_repo.has_revision(parent_id)):
+ missing_revisions.append(parent_id)
+ # reverse missing revisions to try to get heads first
+ unique_missing = []
+ unique_missing_set = set()
+ for revision in reversed(missing_revisions):
+ if revision in unique_missing_set:
+ continue
+ unique_missing.append(revision)
+ unique_missing_set.add(revision)
+ for missing_revision in unique_missing:
+ target_repo.fetch(submit_branch.repository,
+ missing_revision)
+ info.install_revisions(target_repo, stream_input=False)
+ else:
+ source_branch = _mod_branch.Branch.open(self.source_branch)
+ target_repo.fetch(source_branch.repository, self.revision_id)
+ return self.revision_id
+
+ def compose_merge_request(self, mail_client, to, body, branch, tree=None):
+ """Compose a request to merge this directive.
+
+ :param mail_client: The mail client to use for composing this request.
+ :param to: The address to compose the request to.
+ :param branch: The Branch that was used to produce this directive.
+ :param tree: The Tree (if any) for the Branch used to produce this
+ directive.
+ """
+ basename = self.get_disk_name(branch)
+ subject = '[MERGE] '
+ if self.message is not None:
+ subject += self.message
+ else:
+ revision = branch.repository.get_revision(self.revision_id)
+ subject += revision.get_summary()
+ if getattr(mail_client, 'supports_body', False):
+ orig_body = body
+ for hook in self.hooks['merge_request_body']:
+ params = MergeRequestBodyParams(body, orig_body, self,
+ to, basename, subject, branch,
+ tree)
+ body = hook(params)
+ elif len(self.hooks['merge_request_body']) > 0:
+ trace.warning('Cannot run merge_request_body hooks because mail'
+ ' client %s does not support message bodies.',
+ mail_client.__class__.__name__)
+ mail_client.compose_merge_request(to, subject,
+ ''.join(self.to_lines()),
+ basename, body)
+
+
+class MergeDirective(BaseMergeDirective):
+
+ """A request to perform a merge into a branch.
+
+ Designed to be serialized and mailed. It provides all the information
+ needed to perform a merge automatically, by providing at minimum a revision
+ bundle or the location of a branch.
+
+ The serialization format is robust against certain common forms of
+ deterioration caused by mailing.
+
+ The format is also designed to be patch-compatible. If the directive
+ includes a diff or revision bundle, it should be possible to apply it
+ directly using the standard patch program.
+ """
+
+ _format_string = 'Bazaar merge directive format 1'
+
+ def __init__(self, revision_id, testament_sha1, time, timezone,
+ target_branch, patch=None, patch_type=None,
+ source_branch=None, message=None, bundle=None):
+ """Constructor.
+
+ :param revision_id: The revision to merge
+ :param testament_sha1: The sha1 of the testament of the revision to
+ merge.
+ :param time: The current POSIX timestamp time
+ :param timezone: The timezone offset
+ :param target_branch: Location of the branch to apply the merge to
+ :param patch: The text of a diff or bundle
+ :param patch_type: None, "diff" or "bundle", depending on the contents
+ of patch
+ :param source_branch: A public location to merge the revision from
+ :param message: The message to use when committing this merge
+ """
+ BaseMergeDirective.__init__(self, revision_id, testament_sha1, time,
+ timezone, target_branch, patch, source_branch, message)
+ if patch_type not in (None, 'diff', 'bundle'):
+ raise ValueError(patch_type)
+ if patch_type != 'bundle' and source_branch is None:
+ raise errors.NoMergeSource()
+ if patch_type is not None and patch is None:
+ raise errors.PatchMissing(patch_type)
+ self.patch_type = patch_type
+
+ def clear_payload(self):
+ self.patch = None
+ self.patch_type = None
+
+ def get_raw_bundle(self):
+ return self.bundle
+
+ def _bundle(self):
+ if self.patch_type == 'bundle':
+ return self.patch
+ else:
+ return None
+
+ bundle = property(_bundle)
+
+ @classmethod
+ def from_lines(klass, lines):
+ """Deserialize a MergeRequest from an iterable of lines
+
+ :param lines: An iterable of lines
+ :return: a MergeRequest
+ """
+ line_iter = iter(lines)
+ firstline = ""
+ for line in line_iter:
+ if line.startswith('# Bazaar merge directive format '):
+ return _format_registry.get(line[2:].rstrip())._from_lines(
+ line_iter)
+ firstline = firstline or line.strip()
+ raise errors.NotAMergeDirective(firstline)
+
+ @classmethod
+ def _from_lines(klass, line_iter):
+ stanza = rio.read_patch_stanza(line_iter)
+ patch_lines = list(line_iter)
+ if len(patch_lines) == 0:
+ patch = None
+ patch_type = None
+ else:
+ patch = ''.join(patch_lines)
+ try:
+ bundle_serializer.read_bundle(StringIO(patch))
+ except (errors.NotABundle, errors.BundleNotSupported,
+ errors.BadBundle):
+ patch_type = 'diff'
+ else:
+ patch_type = 'bundle'
+ time, timezone = timestamp.parse_patch_date(stanza.get('timestamp'))
+ kwargs = {}
+ for key in ('revision_id', 'testament_sha1', 'target_branch',
+ 'source_branch', 'message'):
+ try:
+ kwargs[key] = stanza.get(key)
+ except KeyError:
+ pass
+ kwargs['revision_id'] = kwargs['revision_id'].encode('utf-8')
+ return MergeDirective(time=time, timezone=timezone,
+ patch_type=patch_type, patch=patch, **kwargs)
+
+ def to_lines(self):
+ lines = self._to_lines()
+ if self.patch is not None:
+ lines.extend(self.patch.splitlines(True))
+ return lines
+
+ @staticmethod
+ def _generate_bundle(repository, revision_id, ancestor_id):
+ s = StringIO()
+ bundle_serializer.write_bundle(repository, revision_id,
+ ancestor_id, s, '0.9')
+ return s.getvalue()
+
+ def get_merge_request(self, repository):
+ """Provide data for performing a merge
+
+ Returns suggested base, suggested target, and patch verification status
+ """
+ return None, self.revision_id, 'inapplicable'
+
+
+class MergeDirective2(BaseMergeDirective):
+
+ _format_string = 'Bazaar merge directive format 2 (Bazaar 0.90)'
+
+ def __init__(self, revision_id, testament_sha1, time, timezone,
+ target_branch, patch=None, source_branch=None, message=None,
+ bundle=None, base_revision_id=None):
+ if source_branch is None and bundle is None:
+ raise errors.NoMergeSource()
+ BaseMergeDirective.__init__(self, revision_id, testament_sha1, time,
+ timezone, target_branch, patch, source_branch, message)
+ self.bundle = bundle
+ self.base_revision_id = base_revision_id
+
+ def _patch_type(self):
+ if self.bundle is not None:
+ return 'bundle'
+ elif self.patch is not None:
+ return 'diff'
+ else:
+ return None
+
+ patch_type = property(_patch_type)
+
+ def clear_payload(self):
+ self.patch = None
+ self.bundle = None
+
+ def get_raw_bundle(self):
+ if self.bundle is None:
+ return None
+ else:
+ return self.bundle.decode('base-64')
+
+ @classmethod
+ def _from_lines(klass, line_iter):
+ stanza = rio.read_patch_stanza(line_iter)
+ patch = None
+ bundle = None
+ try:
+ start = line_iter.next()
+ except StopIteration:
+ pass
+ else:
+ if start.startswith('# Begin patch'):
+ patch_lines = []
+ for line in line_iter:
+ if line.startswith('# Begin bundle'):
+ start = line
+ break
+ patch_lines.append(line)
+ else:
+ start = None
+ patch = ''.join(patch_lines)
+ if start is not None:
+ if start.startswith('# Begin bundle'):
+ bundle = ''.join(line_iter)
+ else:
+ raise errors.IllegalMergeDirectivePayload(start)
+ time, timezone = timestamp.parse_patch_date(stanza.get('timestamp'))
+ kwargs = {}
+ for key in ('revision_id', 'testament_sha1', 'target_branch',
+ 'source_branch', 'message', 'base_revision_id'):
+ try:
+ kwargs[key] = stanza.get(key)
+ except KeyError:
+ pass
+ kwargs['revision_id'] = kwargs['revision_id'].encode('utf-8')
+ kwargs['base_revision_id'] =\
+ kwargs['base_revision_id'].encode('utf-8')
+ return klass(time=time, timezone=timezone, patch=patch, bundle=bundle,
+ **kwargs)
+
+ def to_lines(self):
+ lines = self._to_lines(base_revision=True)
+ if self.patch is not None:
+ lines.append('# Begin patch\n')
+ lines.extend(self.patch.splitlines(True))
+ if self.bundle is not None:
+ lines.append('# Begin bundle\n')
+ lines.extend(self.bundle.splitlines(True))
+ return lines
+
+ @classmethod
+ def from_objects(klass, repository, revision_id, time, timezone,
+ target_branch, include_patch=True, include_bundle=True,
+ local_target_branch=None, public_branch=None, message=None,
+ base_revision_id=None):
+ """Generate a merge directive from various objects
+
+ :param repository: The repository containing the revision
+ :param revision_id: The revision to merge
+ :param time: The POSIX timestamp of the date the request was issued.
+ :param timezone: The timezone of the request
+ :param target_branch: The url of the branch to merge into
+ :param include_patch: If true, include a preview patch
+ :param include_bundle: If true, include a bundle
+ :param local_target_branch: the target branch, either itself or a local copy
+ :param public_branch: location of a public branch containing
+ the target revision.
+ :param message: Message to use when committing the merge
+ :return: The merge directive
+
+ The public branch is always used if supplied. If no bundle is
+ included, the public branch must be supplied, and will be verified.
+
+ If the message is not supplied, the message from revision_id will be
+ used for the commit.
+ """
+ locked = []
+ try:
+ repository.lock_write()
+ locked.append(repository)
+ t_revision_id = revision_id
+ if revision_id == 'null:':
+ t_revision_id = None
+ t = testament.StrictTestament3.from_revision(repository,
+ t_revision_id)
+ if local_target_branch is None:
+ submit_branch = _mod_branch.Branch.open(target_branch)
+ else:
+ submit_branch = local_target_branch
+ submit_branch.lock_read()
+ locked.append(submit_branch)
+ if submit_branch.get_public_branch() is not None:
+ target_branch = submit_branch.get_public_branch()
+ submit_revision_id = submit_branch.last_revision()
+ submit_revision_id = _mod_revision.ensure_null(submit_revision_id)
+ graph = repository.get_graph(submit_branch.repository)
+ ancestor_id = graph.find_unique_lca(revision_id,
+ submit_revision_id)
+ if base_revision_id is None:
+ base_revision_id = ancestor_id
+ if (include_patch, include_bundle) != (False, False):
+ repository.fetch(submit_branch.repository, submit_revision_id)
+ if include_patch:
+ patch = klass._generate_diff(repository, revision_id,
+ base_revision_id)
+ else:
+ patch = None
+
+ if include_bundle:
+ bundle = klass._generate_bundle(repository, revision_id,
+ ancestor_id).encode('base-64')
+ else:
+ bundle = None
+
+ if public_branch is not None and not include_bundle:
+ public_branch_obj = _mod_branch.Branch.open(public_branch)
+ public_branch_obj.lock_read()
+ locked.append(public_branch_obj)
+ if not public_branch_obj.repository.has_revision(
+ revision_id):
+ raise errors.PublicBranchOutOfDate(public_branch,
+ revision_id)
+ testament_sha1 = t.as_sha1()
+ finally:
+ for entry in reversed(locked):
+ entry.unlock()
+ return klass(revision_id, testament_sha1, time, timezone,
+ target_branch, patch, public_branch, message, bundle,
+ base_revision_id)
+
+ def _verify_patch(self, repository):
+ calculated_patch = self._generate_diff(repository, self.revision_id,
+ self.base_revision_id)
+ # Convert line-endings to UNIX
+ stored_patch = re.sub('\r\n?', '\n', self.patch)
+ calculated_patch = re.sub('\r\n?', '\n', calculated_patch)
+ # Strip trailing whitespace
+ calculated_patch = re.sub(' *\n', '\n', calculated_patch)
+ stored_patch = re.sub(' *\n', '\n', stored_patch)
+ return (calculated_patch == stored_patch)
+
+ def get_merge_request(self, repository):
+ """Provide data for performing a merge
+
+ Returns suggested base, suggested target, and patch verification status
+ """
+ verified = self._maybe_verify(repository)
+ return self.base_revision_id, self.revision_id, verified
+
+ def _maybe_verify(self, repository):
+ if self.patch is not None:
+ if self._verify_patch(repository):
+ return 'verified'
+ else:
+ return 'failed'
+ else:
+ return 'inapplicable'
+
+
+class MergeDirectiveFormatRegistry(registry.Registry):
+
+ def register(self, directive, format_string=None):
+ if format_string is None:
+ format_string = directive._format_string
+ registry.Registry.register(self, format_string, directive)
+
+
+_format_registry = MergeDirectiveFormatRegistry()
+_format_registry.register(MergeDirective)
+_format_registry.register(MergeDirective2)
+# 0.19 never existed. It got renamed to 0.90. But by that point, there were
+# already merge directives in the wild that used 0.19. Registering with the old
+# format string to retain compatibility with those merge directives.
+_format_registry.register(MergeDirective2,
+ 'Bazaar merge directive format 2 (Bazaar 0.19)')
diff --git a/bzrlib/mergetools.py b/bzrlib/mergetools.py
new file mode 100644
index 0000000..db8255b
--- /dev/null
+++ b/bzrlib/mergetools.py
@@ -0,0 +1,127 @@
+# Copyright (C) 2010 Canonical Ltd.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""Utility functions for managing external merge tools such as kdiff3."""
+
+from __future__ import absolute_import
+
+import os
+import shutil
+import subprocess
+import sys
+import tempfile
+
+from bzrlib.lazy_import import lazy_import
+lazy_import(globals(), """
+from bzrlib import (
+ cmdline,
+ osutils,
+ trace,
+)
+""")
+
+
+known_merge_tools = {
+ 'bcompare': 'bcompare {this} {other} {base} {result}',
+ 'kdiff3': 'kdiff3 {base} {this} {other} -o {result}',
+ 'xdiff': 'xxdiff -m -O -M {result} {this} {base} {other}',
+ 'meld': 'meld {base} {this_temp} {other}',
+ 'opendiff': 'opendiff {this} {other} -ancestor {base} -merge {result}',
+ 'winmergeu': 'winmergeu {result}',
+}
+
+
+def check_availability(command_line):
+ cmd_list = cmdline.split(command_line)
+ exe = cmd_list[0]
+ if sys.platform == 'win32':
+ exe = _get_executable_path(exe)
+ if exe is None:
+ return False
+ base, ext = os.path.splitext(exe)
+ path_ext = [unicode(s.lower())
+ for s in os.getenv('PATHEXT', '').split(os.pathsep)]
+ return os.path.exists(exe) and ext in path_ext
+ else:
+ return (os.access(exe, os.X_OK)
+ or osutils.find_executable_on_path(exe) is not None)
+
+
+def invoke(command_line, filename, invoker=None):
+ """Invokes the given merge tool command line, substituting the given
+ filename according to the embedded substitution markers. Optionally, it
+ will use the given invoker function instead of the default
+ subprocess_invoker.
+ """
+ if invoker is None:
+ invoker = subprocess_invoker
+ cmd_list = cmdline.split(command_line)
+ exe = _get_executable_path(cmd_list[0])
+ if exe is not None:
+ cmd_list[0] = exe
+ args, tmp_file = _subst_filename(cmd_list, filename)
+ def cleanup(retcode):
+ if tmp_file is not None:
+ if retcode == 0: # on success, replace file with temp file
+ shutil.move(tmp_file, filename)
+ else: # otherwise, delete temp file
+ os.remove(tmp_file)
+ return invoker(args[0], args[1:], cleanup)
+
+
+def _get_executable_path(exe):
+ if os.path.isabs(exe):
+ return exe
+ return osutils.find_executable_on_path(exe)
+
+
+def _subst_filename(args, filename):
+ subst_names = {
+ 'base': filename + u'.BASE',
+ 'this': filename + u'.THIS',
+ 'other': filename + u'.OTHER',
+ 'result': filename,
+ }
+ tmp_file = None
+ subst_args = []
+ for arg in args:
+ if '{this_temp}' in arg and not 'this_temp' in subst_names:
+ fh, tmp_file = tempfile.mkstemp(u"_bzr_mergetools_%s.THIS" %
+ os.path.basename(filename))
+ trace.mutter('fh=%r, tmp_file=%r', fh, tmp_file)
+ os.close(fh)
+ shutil.copy(filename + u".THIS", tmp_file)
+ subst_names['this_temp'] = tmp_file
+ arg = _format_arg(arg, subst_names)
+ subst_args.append(arg)
+ return subst_args, tmp_file
+
+
+# This would be better implemented using format() from python 2.6
+def _format_arg(arg, subst_names):
+ arg = arg.replace('{base}', subst_names['base'])
+ arg = arg.replace('{this}', subst_names['this'])
+ arg = arg.replace('{other}', subst_names['other'])
+ arg = arg.replace('{result}', subst_names['result'])
+ if subst_names.has_key('this_temp'):
+ arg = arg.replace('{this_temp}', subst_names['this_temp'])
+ return arg
+
+
+def subprocess_invoker(executable, args, cleanup):
+ retcode = subprocess.call([executable] + args)
+ cleanup(retcode)
+ return retcode
diff --git a/bzrlib/missing.py b/bzrlib/missing.py
new file mode 100644
index 0000000..7a6dd83
--- /dev/null
+++ b/bzrlib/missing.py
@@ -0,0 +1,228 @@
+# Copyright (C) 2005, 2006 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""Display what revisions are missing in 'other' from 'this' and vice versa."""
+
+from __future__ import absolute_import
+
+from bzrlib import (
+ log,
+ symbol_versioning,
+ )
+import bzrlib.revision as _mod_revision
+
+
+def iter_log_revisions(revisions, revision_source, verbose, rev_tag_dict=None):
+ last_tree = revision_source.revision_tree(_mod_revision.NULL_REVISION)
+ last_rev_id = None
+
+ if rev_tag_dict is None:
+ rev_tag_dict = {}
+ for rev in revisions:
+ # We need the following for backward compatibilty (hopefully
+ # this will be deprecated soon :-/) -- vila 080911
+ if len(rev) == 2:
+ revno, rev_id = rev
+ merge_depth = 0
+ else:
+ revno, rev_id, merge_depth = rev
+ rev = revision_source.get_revision(rev_id)
+ if verbose:
+ delta = revision_source.get_revision_delta(rev_id)
+ else:
+ delta = None
+ yield log.LogRevision(rev, revno, merge_depth, delta=delta,
+ tags=rev_tag_dict.get(rev_id))
+
+
+def find_unmerged(local_branch, remote_branch, restrict='all',
+ include_merged=None, backward=False,
+ local_revid_range=None, remote_revid_range=None,
+ include_merges=symbol_versioning.DEPRECATED_PARAMETER):
+ """Find revisions from each side that have not been merged.
+
+ :param local_branch: Compare the history of local_branch
+ :param remote_branch: versus the history of remote_branch, and determine
+ mainline revisions which have not been merged.
+ :param restrict: ('all', 'local', 'remote') If 'all', we will return the
+ unique revisions from both sides. If 'local', we will return None
+ for the remote revisions, similarly if 'remote' we will return None for
+ the local revisions.
+ :param include_merged: Show mainline revisions only if False,
+ all revisions otherwise.
+ :param backward: Show oldest versions first when True, newest versions
+ first when False.
+ :param local_revid_range: Revision-id range for filtering local_branch
+ revisions (lower bound, upper bound)
+ :param remote_revid_range: Revision-id range for filtering remote_branch
+ revisions (lower bound, upper bound)
+ :param include_merges: Deprecated historical alias for include_merged
+
+ :return: A list of [(revno, revision_id)] for the mainline revisions on
+ each side.
+ """
+ if symbol_versioning.deprecated_passed(include_merges):
+ symbol_versioning.warn(
+ 'include_merges was deprecated in 2.5.'
+ ' Use include_merged instead.',
+ DeprecationWarning, stacklevel=2)
+ if include_merged is None:
+ include_merged = include_merges
+ if include_merged is None:
+ include_merged = False
+ local_branch.lock_read()
+ try:
+ remote_branch.lock_read()
+ try:
+ return _find_unmerged(
+ local_branch, remote_branch, restrict=restrict,
+ include_merged=include_merged, backward=backward,
+ local_revid_range=local_revid_range,
+ remote_revid_range=remote_revid_range)
+ finally:
+ remote_branch.unlock()
+ finally:
+ local_branch.unlock()
+
+
+def _enumerate_mainline(ancestry, graph, tip_revno, tip, backward=True):
+ """Enumerate the mainline revisions for these revisions.
+
+ :param ancestry: A set of revisions that we care about
+ :param graph: A Graph which lets us find the parents for a revision
+ :param tip_revno: The revision number for the tip revision
+ :param tip: The tip of mainline
+ :param backward: Show oldest versions first when True, newest versions
+ first when False.
+ :return: [(revno, revision_id)] for all revisions in ancestry that
+ are left-hand parents from tip, or None if ancestry is None.
+ """
+ if ancestry is None:
+ return None
+ if not ancestry: #Empty ancestry, no need to do any work
+ return []
+
+ # Optionally, we could make 1 call to graph.get_parent_map with all
+ # ancestors. However that will often check many more parents than we
+ # actually need, and the Graph is likely to already have the parents cached
+ # anyway.
+ mainline = []
+ cur = tip
+ cur_revno = tip_revno
+ while cur in ancestry:
+ parent_map = graph.get_parent_map([cur])
+ parents = parent_map.get(cur)
+ if not parents:
+ break # Ghost, we are done
+ mainline.append((str(cur_revno), cur))
+ cur = parents[0]
+ cur_revno -= 1
+ if not backward:
+ mainline.reverse()
+ return mainline
+
+
+def _enumerate_with_merges(branch, ancestry, graph, tip_revno, tip,
+ backward=True):
+ """Enumerate the revisions for the ancestry.
+
+ :param branch: The branch we care about
+ :param ancestry: A set of revisions that we care about
+ :param graph: A Graph which lets us find the parents for a revision
+ :param tip_revno: The revision number for the tip revision
+ :param tip: The tip of the ancsetry
+ :param backward: Show oldest versions first when True, newest versions
+ first when False.
+ :return: [(revno, revision_id)] for all revisions in ancestry that
+ are parents from tip, or None if ancestry is None.
+ """
+ if ancestry is None:
+ return None
+ if not ancestry: #Empty ancestry, no need to do any work
+ return []
+
+ merge_sorted_revisions = branch.iter_merge_sorted_revisions()
+ # Now that we got the correct revnos, keep only the relevant
+ # revisions.
+ merge_sorted_revisions = [
+ # log.reverse_by_depth expects seq_num to be present, but it is
+ # stripped by iter_merge_sorted_revisions()
+ (0, revid, n, d, e) for revid, n, d, e in merge_sorted_revisions
+ if revid in ancestry]
+ if not backward:
+ merge_sorted_revisions = log.reverse_by_depth(merge_sorted_revisions)
+ revline = []
+ for seq, rev_id, merge_depth, revno, end_of_merge in merge_sorted_revisions:
+ revline.append(('.'.join(map(str, revno)), rev_id, merge_depth))
+ return revline
+
+
+def _filter_revs(graph, revs, revid_range):
+ if revid_range is None or revs is None:
+ return revs
+ return [rev for rev in revs
+ if graph.is_between(rev[1], revid_range[0], revid_range[1])]
+
+
+def _find_unmerged(local_branch, remote_branch, restrict,
+ include_merged, backward,
+ local_revid_range=None, remote_revid_range=None):
+ """See find_unmerged.
+
+ The branches should already be locked before entering.
+ """
+ local_revno, local_revision_id = local_branch.last_revision_info()
+ remote_revno, remote_revision_id = remote_branch.last_revision_info()
+ if local_revno == remote_revno and local_revision_id == remote_revision_id:
+ # A simple shortcut when the tips are at the same point
+ return [], []
+ graph = local_branch.repository.get_graph(remote_branch.repository)
+ if restrict == 'remote':
+ local_extra = None
+ remote_extra = graph.find_unique_ancestors(remote_revision_id,
+ [local_revision_id])
+ elif restrict == 'local':
+ remote_extra = None
+ local_extra = graph.find_unique_ancestors(local_revision_id,
+ [remote_revision_id])
+ else:
+ if restrict != 'all':
+ raise ValueError('param restrict not one of "all", "local",'
+ ' "remote": %r' % (restrict,))
+ local_extra, remote_extra = graph.find_difference(local_revision_id,
+ remote_revision_id)
+ if include_merged:
+ locals = _enumerate_with_merges(local_branch, local_extra,
+ graph, local_revno,
+ local_revision_id, backward)
+ remotes = _enumerate_with_merges(remote_branch, remote_extra,
+ graph, remote_revno,
+ remote_revision_id, backward)
+ else:
+ # Now that we have unique ancestors, compute just the mainline, and
+ # generate revnos for them.
+ locals = _enumerate_mainline(local_extra, graph, local_revno,
+ local_revision_id, backward)
+ remotes = _enumerate_mainline(remote_extra, graph, remote_revno,
+ remote_revision_id, backward)
+ return _filter_revs(graph, locals, local_revid_range), _filter_revs(graph,
+ remotes, remote_revid_range)
+
+
+def sorted_revisions(revisions, history_map):
+ revisions = [(history_map[r],r) for r in revisions]
+ revisions.sort()
+ return revisions
diff --git a/bzrlib/msgeditor.py b/bzrlib/msgeditor.py
new file mode 100644
index 0000000..700057b
--- /dev/null
+++ b/bzrlib/msgeditor.py
@@ -0,0 +1,349 @@
+# Copyright (C) 2005-2011 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""Commit message editor support."""
+
+from __future__ import absolute_import
+
+import codecs
+import os
+from subprocess import call
+import sys
+
+from bzrlib import (
+ cmdline,
+ config,
+ osutils,
+ trace,
+ transport,
+ ui,
+ )
+from bzrlib.errors import BzrError, BadCommitMessageEncoding
+from bzrlib.hooks import Hooks
+
+
+def _get_editor():
+ """Return a sequence of possible editor binaries for the current platform"""
+ try:
+ yield os.environ["BZR_EDITOR"], '$BZR_EDITOR'
+ except KeyError:
+ pass
+
+ e = config.GlobalStack().get('editor')
+ if e is not None:
+ yield e, config.config_filename()
+
+ for varname in 'VISUAL', 'EDITOR':
+ if varname in os.environ:
+ yield os.environ[varname], '$' + varname
+
+ if sys.platform == 'win32':
+ for editor in 'wordpad.exe', 'notepad.exe':
+ yield editor, None
+ else:
+ for editor in ['/usr/bin/editor', 'vi', 'pico', 'nano', 'joe']:
+ yield editor, None
+
+
+def _run_editor(filename):
+ """Try to execute an editor to edit the commit message."""
+ for candidate, candidate_source in _get_editor():
+ edargs = cmdline.split(candidate)
+ try:
+ ## mutter("trying editor: %r", (edargs +[filename]))
+ x = call(edargs + [filename])
+ except OSError, e:
+ if candidate_source is not None:
+ # We tried this editor because some user configuration (an
+ # environment variable or config file) said to try it. Let
+ # the user know their configuration is broken.
+ trace.warning(
+ 'Could not start editor "%s" (specified by %s): %s\n'
+ % (candidate, candidate_source, str(e)))
+ continue
+ raise
+ if x == 0:
+ return True
+ elif x == 127:
+ continue
+ else:
+ break
+ raise BzrError("Could not start any editor.\nPlease specify one with:\n"
+ " - $BZR_EDITOR\n - editor=/some/path in %s\n"
+ " - $VISUAL\n - $EDITOR" % \
+ config.config_filename())
+
+
+DEFAULT_IGNORE_LINE = "%(bar)s %(msg)s %(bar)s" % \
+ { 'bar' : '-' * 14, 'msg' : 'This line and the following will be ignored' }
+
+
+def edit_commit_message(infotext, ignoreline=DEFAULT_IGNORE_LINE,
+ start_message=None):
+ """Let the user edit a commit message in a temp file.
+
+ This is run if they don't give a message or
+ message-containing file on the command line.
+
+ :param infotext: Text to be displayed at bottom of message
+ for the user's reference;
+ currently similar to 'bzr status'.
+
+ :param ignoreline: The separator to use above the infotext.
+
+ :param start_message: The text to place above the separator, if any.
+ This will not be removed from the message
+ after the user has edited it.
+
+ :return: commit message or None.
+ """
+
+ if not start_message is None:
+ start_message = start_message.encode(osutils.get_user_encoding())
+ infotext = infotext.encode(osutils.get_user_encoding(), 'replace')
+ return edit_commit_message_encoded(infotext, ignoreline, start_message)
+
+
+def edit_commit_message_encoded(infotext, ignoreline=DEFAULT_IGNORE_LINE,
+ start_message=None):
+ """Let the user edit a commit message in a temp file.
+
+ This is run if they don't give a message or
+ message-containing file on the command line.
+
+ :param infotext: Text to be displayed at bottom of message
+ for the user's reference;
+ currently similar to 'bzr status'.
+ The string is already encoded
+
+ :param ignoreline: The separator to use above the infotext.
+
+ :param start_message: The text to place above the separator, if any.
+ This will not be removed from the message
+ after the user has edited it.
+ The string is already encoded
+
+ :return: commit message or None.
+ """
+ msgfilename = None
+ try:
+ msgfilename, hasinfo = _create_temp_file_with_commit_template(
+ infotext, ignoreline, start_message)
+ if not msgfilename:
+ return None
+ basename = osutils.basename(msgfilename)
+ msg_transport = transport.get_transport_from_path(osutils.dirname(msgfilename))
+ reference_content = msg_transport.get_bytes(basename)
+ if not _run_editor(msgfilename):
+ return None
+ edited_content = msg_transport.get_bytes(basename)
+ if edited_content == reference_content:
+ if not ui.ui_factory.confirm_action(
+ u"Commit message was not edited, use anyway",
+ "bzrlib.msgeditor.unchanged",
+ {}):
+ # Returning "" makes cmd_commit raise 'empty commit message
+ # specified' which is a reasonable error, given the user has
+ # rejected using the unedited template.
+ return ""
+ started = False
+ msg = []
+ lastline, nlines = 0, 0
+ # codecs.open() ALWAYS opens file in binary mode but we need text mode
+ # 'rU' mode useful when bzr.exe used on Cygwin (bialix 20070430)
+ f = file(msgfilename, 'rU')
+ try:
+ try:
+ for line in codecs.getreader(osutils.get_user_encoding())(f):
+ stripped_line = line.strip()
+ # strip empty line before the log message starts
+ if not started:
+ if stripped_line != "":
+ started = True
+ else:
+ continue
+ # check for the ignore line only if there
+ # is additional information at the end
+ if hasinfo and stripped_line == ignoreline:
+ break
+ nlines += 1
+ # keep track of the last line that had some content
+ if stripped_line != "":
+ lastline = nlines
+ msg.append(line)
+ except UnicodeDecodeError:
+ raise BadCommitMessageEncoding()
+ finally:
+ f.close()
+
+ if len(msg) == 0:
+ return ""
+ # delete empty lines at the end
+ del msg[lastline:]
+ # add a newline at the end, if needed
+ if not msg[-1].endswith("\n"):
+ return "%s%s" % ("".join(msg), "\n")
+ else:
+ return "".join(msg)
+ finally:
+ # delete the msg file in any case
+ if msgfilename is not None:
+ try:
+ os.unlink(msgfilename)
+ except IOError, e:
+ trace.warning(
+ "failed to unlink %s: %s; ignored", msgfilename, e)
+
+
+def _create_temp_file_with_commit_template(infotext,
+ ignoreline=DEFAULT_IGNORE_LINE,
+ start_message=None,
+ tmpdir=None):
+ """Create temp file and write commit template in it.
+
+ :param infotext: Text to be displayed at bottom of message for the
+ user's reference; currently similar to 'bzr status'. The text is
+ already encoded.
+
+ :param ignoreline: The separator to use above the infotext.
+
+ :param start_message: The text to place above the separator, if any.
+ This will not be removed from the message after the user has edited
+ it. The string is already encoded
+
+ :return: 2-tuple (temp file name, hasinfo)
+ """
+ import tempfile
+ tmp_fileno, msgfilename = tempfile.mkstemp(prefix='bzr_log.',
+ dir=tmpdir, text=True)
+ msgfile = os.fdopen(tmp_fileno, 'w')
+ try:
+ if start_message is not None:
+ msgfile.write("%s\n" % start_message)
+
+ if infotext is not None and infotext != "":
+ hasinfo = True
+ msgfile.write("\n\n%s\n\n%s" %(ignoreline, infotext))
+ else:
+ hasinfo = False
+ finally:
+ msgfile.close()
+
+ return (msgfilename, hasinfo)
+
+
+def make_commit_message_template(working_tree, specific_files):
+ """Prepare a template file for a commit into a branch.
+
+ Returns a unicode string containing the template.
+ """
+ # TODO: make provision for this to be overridden or modified by a hook
+ #
+ # TODO: Rather than running the status command, should prepare a draft of
+ # the revision to be committed, then pause and ask the user to
+ # confirm/write a message.
+ from StringIO import StringIO # must be unicode-safe
+ from bzrlib.status import show_tree_status
+ status_tmp = StringIO()
+ show_tree_status(working_tree, specific_files=specific_files,
+ to_file=status_tmp, verbose=True)
+ return status_tmp.getvalue()
+
+
+def make_commit_message_template_encoded(working_tree, specific_files,
+ diff=None, output_encoding='utf-8'):
+ """Prepare a template file for a commit into a branch.
+
+ Returns an encoded string.
+ """
+ # TODO: make provision for this to be overridden or modified by a hook
+ #
+ # TODO: Rather than running the status command, should prepare a draft of
+ # the revision to be committed, then pause and ask the user to
+ # confirm/write a message.
+ from StringIO import StringIO # must be unicode-safe
+ from bzrlib.diff import show_diff_trees
+
+ template = make_commit_message_template(working_tree, specific_files)
+ template = template.encode(output_encoding, "replace")
+
+ if diff:
+ stream = StringIO()
+ show_diff_trees(working_tree.basis_tree(),
+ working_tree, stream, specific_files,
+ path_encoding=output_encoding)
+ template = template + '\n' + stream.getvalue()
+
+ return template
+
+
+class MessageEditorHooks(Hooks):
+ """A dictionary mapping hook name to a list of callables for message editor
+ hooks.
+
+ e.g. ['commit_message_template'] is the list of items to be called to
+ generate a commit message template
+ """
+
+ def __init__(self):
+ """Create the default hooks.
+
+ These are all empty initially.
+ """
+ Hooks.__init__(self, "bzrlib.msgeditor", "hooks")
+ self.add_hook('set_commit_message',
+ "Set a fixed commit message. "
+ "set_commit_message is called with the "
+ "bzrlib.commit.Commit object (so you can also change e.g. revision "
+ "properties by editing commit.builder._revprops) and the message "
+ "so far. set_commit_message must return the message to use or None"
+ " if it should use the message editor as normal.", (2, 4))
+ self.add_hook('commit_message_template',
+ "Called when a commit message is being generated. "
+ "commit_message_template is called with the bzrlib.commit.Commit "
+ "object and the message that is known so far. "
+ "commit_message_template must return a new message to use (which "
+ "could be the same as it was given). When there are multiple "
+ "hooks registered for commit_message_template, they are chained "
+ "with the result from the first passed into the second, and so "
+ "on.", (1, 10))
+
+
+hooks = MessageEditorHooks()
+
+
+def set_commit_message(commit, start_message=None):
+ """Sets the commit message.
+ :param commit: Commit object for the active commit.
+ :return: The commit message or None to continue using the message editor
+ """
+ start_message = None
+ for hook in hooks['set_commit_message']:
+ start_message = hook(commit, start_message)
+ return start_message
+
+
+def generate_commit_message_template(commit, start_message=None):
+ """Generate a commit message template.
+
+ :param commit: Commit object for the active commit.
+ :param start_message: Message to start with.
+ :return: A start commit message or None for an empty start commit message.
+ """
+ start_message = None
+ for hook in hooks['commit_message_template']:
+ start_message = hook(commit, start_message)
+ return start_message
diff --git a/bzrlib/multiparent.py b/bzrlib/multiparent.py
new file mode 100644
index 0000000..c352cbc
--- /dev/null
+++ b/bzrlib/multiparent.py
@@ -0,0 +1,681 @@
+# Copyright (C) 2007-2011 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+from __future__ import absolute_import
+
+from bzrlib.lazy_import import lazy_import
+
+lazy_import(globals(), """
+import errno
+import gzip
+import itertools
+import os
+from StringIO import StringIO
+
+from bzrlib import (
+ bencode,
+ errors,
+ patiencediff,
+ ui,
+ )
+""")
+
+
+def topo_iter_keys(vf, keys=None):
+ if keys is None:
+ keys = vf.keys()
+ parents = vf.get_parent_map(keys)
+ return _topo_iter(parents, keys)
+
+def topo_iter(vf, versions=None):
+ if versions is None:
+ versions = vf.versions()
+ parents = vf.get_parent_map(versions)
+ return _topo_iter(parents, versions)
+
+def _topo_iter(parents, versions):
+ seen = set()
+ descendants = {}
+ def pending_parents(version):
+ if parents[version] is None:
+ return []
+ return [v for v in parents[version] if v in versions and
+ v not in seen]
+ for version_id in versions:
+ if parents[version_id] is None:
+ # parentless
+ continue
+ for parent_id in parents[version_id]:
+ descendants.setdefault(parent_id, []).append(version_id)
+ cur = [v for v in versions if len(pending_parents(v)) == 0]
+ while len(cur) > 0:
+ next = []
+ for version_id in cur:
+ if version_id in seen:
+ continue
+ if len(pending_parents(version_id)) != 0:
+ continue
+ next.extend(descendants.get(version_id, []))
+ yield version_id
+ seen.add(version_id)
+ cur = next
+
+
+class MultiParent(object):
+ """A multi-parent diff"""
+
+ __slots__ = ['hunks']
+
+ def __init__(self, hunks=None):
+ if hunks is not None:
+ self.hunks = hunks
+ else:
+ self.hunks = []
+
+ def __repr__(self):
+ return "MultiParent(%r)" % self.hunks
+
+ def __eq__(self, other):
+ if self.__class__ is not other.__class__:
+ return False
+ return (self.hunks == other.hunks)
+
+ @staticmethod
+ def from_lines(text, parents=(), left_blocks=None):
+ """Produce a MultiParent from a list of lines and parents"""
+ def compare(parent):
+ matcher = patiencediff.PatienceSequenceMatcher(None, parent,
+ text)
+ return matcher.get_matching_blocks()
+ if len(parents) > 0:
+ if left_blocks is None:
+ left_blocks = compare(parents[0])
+ parent_comparisons = [left_blocks] + [compare(p) for p in
+ parents[1:]]
+ else:
+ parent_comparisons = []
+ cur_line = 0
+ new_text = NewText([])
+ parent_text = []
+ block_iter = [iter(i) for i in parent_comparisons]
+ diff = MultiParent([])
+ def next_block(p):
+ try:
+ return block_iter[p].next()
+ except StopIteration:
+ return None
+ cur_block = [next_block(p) for p, i in enumerate(block_iter)]
+ while cur_line < len(text):
+ best_match = None
+ for p, block in enumerate(cur_block):
+ if block is None:
+ continue
+ i, j, n = block
+ while j + n <= cur_line:
+ block = cur_block[p] = next_block(p)
+ if block is None:
+ break
+ i, j, n = block
+ if block is None:
+ continue
+ if j > cur_line:
+ continue
+ offset = cur_line - j
+ i += offset
+ j = cur_line
+ n -= offset
+ if n == 0:
+ continue
+ if best_match is None or n > best_match.num_lines:
+ best_match = ParentText(p, i, j, n)
+ if best_match is None:
+ new_text.lines.append(text[cur_line])
+ cur_line += 1
+ else:
+ if len(new_text.lines) > 0:
+ diff.hunks.append(new_text)
+ new_text = NewText([])
+ diff.hunks.append(best_match)
+ cur_line += best_match.num_lines
+ if len(new_text.lines) > 0:
+ diff.hunks.append(new_text)
+ return diff
+
+ def get_matching_blocks(self, parent, parent_len):
+ for hunk in self.hunks:
+ if not isinstance(hunk, ParentText) or hunk.parent != parent:
+ continue
+ yield (hunk.parent_pos, hunk.child_pos, hunk.num_lines)
+ yield parent_len, self.num_lines(), 0
+
+ def to_lines(self, parents=()):
+ """Contruct a fulltext from this diff and its parents"""
+ mpvf = MultiMemoryVersionedFile()
+ for num, parent in enumerate(parents):
+ mpvf.add_version(StringIO(parent).readlines(), num, [])
+ mpvf.add_diff(self, 'a', range(len(parents)))
+ return mpvf.get_line_list(['a'])[0]
+
+ @classmethod
+ def from_texts(cls, text, parents=()):
+ """Produce a MultiParent from a text and list of parent text"""
+ return cls.from_lines(StringIO(text).readlines(),
+ [StringIO(p).readlines() for p in parents])
+
+ def to_patch(self):
+ """Yield text lines for a patch"""
+ for hunk in self.hunks:
+ for line in hunk.to_patch():
+ yield line
+
+ def patch_len(self):
+ return len(''.join(self.to_patch()))
+
+ def zipped_patch_len(self):
+ return len(gzip_string(self.to_patch()))
+
+ @classmethod
+ def from_patch(cls, text):
+ """Create a MultiParent from its string form"""
+ return cls._from_patch(StringIO(text))
+
+ @staticmethod
+ def _from_patch(lines):
+ """This is private because it is essential to split lines on \n only"""
+ line_iter = iter(lines)
+ hunks = []
+ cur_line = None
+ while(True):
+ try:
+ cur_line = line_iter.next()
+ except StopIteration:
+ break
+ if cur_line[0] == 'i':
+ num_lines = int(cur_line.split(' ')[1])
+ hunk_lines = [line_iter.next() for x in xrange(num_lines)]
+ hunk_lines[-1] = hunk_lines[-1][:-1]
+ hunks.append(NewText(hunk_lines))
+ elif cur_line[0] == '\n':
+ hunks[-1].lines[-1] += '\n'
+ else:
+ if not (cur_line[0] == 'c'):
+ raise AssertionError(cur_line[0])
+ parent, parent_pos, child_pos, num_lines =\
+ [int(v) for v in cur_line.split(' ')[1:]]
+ hunks.append(ParentText(parent, parent_pos, child_pos,
+ num_lines))
+ return MultiParent(hunks)
+
+ def range_iterator(self):
+ """Iterate through the hunks, with range indicated
+
+ kind is "new" or "parent".
+ for "new", data is a list of lines.
+ for "parent", data is (parent, parent_start, parent_end)
+ :return: a generator of (start, end, kind, data)
+ """
+ start = 0
+ for hunk in self.hunks:
+ if isinstance(hunk, NewText):
+ kind = 'new'
+ end = start + len(hunk.lines)
+ data = hunk.lines
+ else:
+ kind = 'parent'
+ start = hunk.child_pos
+ end = start + hunk.num_lines
+ data = (hunk.parent, hunk.parent_pos, hunk.parent_pos +
+ hunk.num_lines)
+ yield start, end, kind, data
+ start = end
+
+ def num_lines(self):
+ """The number of lines in the output text"""
+ extra_n = 0
+ for hunk in reversed(self.hunks):
+ if isinstance(hunk, ParentText):
+ return hunk.child_pos + hunk.num_lines + extra_n
+ extra_n += len(hunk.lines)
+ return extra_n
+
+ def is_snapshot(self):
+ """Return true of this hunk is effectively a fulltext"""
+ if len(self.hunks) != 1:
+ return False
+ return (isinstance(self.hunks[0], NewText))
+
+
+class NewText(object):
+ """The contents of text that is introduced by this text"""
+
+ __slots__ = ['lines']
+
+ def __init__(self, lines):
+ self.lines = lines
+
+ def __eq__(self, other):
+ if self.__class__ is not other.__class__:
+ return False
+ return (other.lines == self.lines)
+
+ def __repr__(self):
+ return 'NewText(%r)' % self.lines
+
+ def to_patch(self):
+ yield 'i %d\n' % len(self.lines)
+ for line in self.lines:
+ yield line
+ yield '\n'
+
+
+class ParentText(object):
+ """A reference to text present in a parent text"""
+
+ __slots__ = ['parent', 'parent_pos', 'child_pos', 'num_lines']
+
+ def __init__(self, parent, parent_pos, child_pos, num_lines):
+ self.parent = parent
+ self.parent_pos = parent_pos
+ self.child_pos = child_pos
+ self.num_lines = num_lines
+
+ def _as_dict(self):
+ return dict(parent=self.parent, parent_pos=self.parent_pos,
+ child_pos=self.child_pos, num_lines=self.num_lines)
+
+ def __repr__(self):
+ return ('ParentText(%(parent)r, %(parent_pos)r, %(child_pos)r,'
+ ' %(num_lines)r)' % self._as_dict())
+
+ def __eq__(self, other):
+ if self.__class__ is not other.__class__:
+ return False
+ return self._as_dict() == other._as_dict()
+
+ def to_patch(self):
+ yield ('c %(parent)d %(parent_pos)d %(child_pos)d %(num_lines)d\n'
+ % self._as_dict())
+
+
+class BaseVersionedFile(object):
+ """Pseudo-VersionedFile skeleton for MultiParent"""
+
+ def __init__(self, snapshot_interval=25, max_snapshots=None):
+ self._lines = {}
+ self._parents = {}
+ self._snapshots = set()
+ self.snapshot_interval = snapshot_interval
+ self.max_snapshots = max_snapshots
+
+ def versions(self):
+ return iter(self._parents)
+
+ def has_version(self, version):
+ return version in self._parents
+
+ def do_snapshot(self, version_id, parent_ids):
+ """Determine whether to perform a snapshot for this version"""
+ if self.snapshot_interval is None:
+ return False
+ if self.max_snapshots is not None and\
+ len(self._snapshots) == self.max_snapshots:
+ return False
+ if len(parent_ids) == 0:
+ return True
+ for ignored in xrange(self.snapshot_interval):
+ if len(parent_ids) == 0:
+ return False
+ version_ids = parent_ids
+ parent_ids = []
+ for version_id in version_ids:
+ if version_id not in self._snapshots:
+ parent_ids.extend(self._parents[version_id])
+ else:
+ return True
+
+ def add_version(self, lines, version_id, parent_ids,
+ force_snapshot=None, single_parent=False):
+ """Add a version to the versionedfile
+
+ :param lines: The list of lines to add. Must be split on '\n'.
+ :param version_id: The version_id of the version to add
+ :param force_snapshot: If true, force this version to be added as a
+ snapshot version. If false, force this version to be added as a
+ diff. If none, determine this automatically.
+ :param single_parent: If true, use a single parent, rather than
+ multiple parents.
+ """
+ if force_snapshot is None:
+ do_snapshot = self.do_snapshot(version_id, parent_ids)
+ else:
+ do_snapshot = force_snapshot
+ if do_snapshot:
+ self._snapshots.add(version_id)
+ diff = MultiParent([NewText(lines)])
+ else:
+ if single_parent:
+ parent_lines = self.get_line_list(parent_ids[:1])
+ else:
+ parent_lines = self.get_line_list(parent_ids)
+ diff = MultiParent.from_lines(lines, parent_lines)
+ if diff.is_snapshot():
+ self._snapshots.add(version_id)
+ self.add_diff(diff, version_id, parent_ids)
+ self._lines[version_id] = lines
+
+ def get_parents(self, version_id):
+ return self._parents[version_id]
+
+ def make_snapshot(self, version_id):
+ snapdiff = MultiParent([NewText(self.cache_version(version_id))])
+ self.add_diff(snapdiff, version_id, self._parents[version_id])
+ self._snapshots.add(version_id)
+
+ def import_versionedfile(self, vf, snapshots, no_cache=True,
+ single_parent=False, verify=False):
+ """Import all revisions of a versionedfile
+
+ :param vf: The versionedfile to import
+ :param snapshots: If provided, the revisions to make snapshots of.
+ Otherwise, this will be auto-determined
+ :param no_cache: If true, clear the cache after every add.
+ :param single_parent: If true, omit all but one parent text, (but
+ retain parent metadata).
+ """
+ if not (no_cache or not verify):
+ raise ValueError()
+ revisions = set(vf.versions())
+ total = len(revisions)
+ pb = ui.ui_factory.nested_progress_bar()
+ try:
+ while len(revisions) > 0:
+ added = set()
+ for revision in revisions:
+ parents = vf.get_parents(revision)
+ if [p for p in parents if p not in self._parents] != []:
+ continue
+ lines = [a + ' ' + l for a, l in
+ vf.annotate(revision)]
+ if snapshots is None:
+ force_snapshot = None
+ else:
+ force_snapshot = (revision in snapshots)
+ self.add_version(lines, revision, parents, force_snapshot,
+ single_parent)
+ added.add(revision)
+ if no_cache:
+ self.clear_cache()
+ vf.clear_cache()
+ if verify:
+ if not (lines == self.get_line_list([revision])[0]):
+ raise AssertionError()
+ self.clear_cache()
+ pb.update(gettext('Importing revisions'),
+ (total - len(revisions)) + len(added), total)
+ revisions = [r for r in revisions if r not in added]
+ finally:
+ pb.finished()
+
+ def select_snapshots(self, vf):
+ """Determine which versions to add as snapshots"""
+ build_ancestors = {}
+ descendants = {}
+ snapshots = set()
+ for version_id in topo_iter(vf):
+ potential_build_ancestors = set(vf.get_parents(version_id))
+ parents = vf.get_parents(version_id)
+ if len(parents) == 0:
+ snapshots.add(version_id)
+ build_ancestors[version_id] = set()
+ else:
+ for parent in vf.get_parents(version_id):
+ potential_build_ancestors.update(build_ancestors[parent])
+ if len(potential_build_ancestors) > self.snapshot_interval:
+ snapshots.add(version_id)
+ build_ancestors[version_id] = set()
+ else:
+ build_ancestors[version_id] = potential_build_ancestors
+ return snapshots
+
+ def select_by_size(self, num):
+ """Select snapshots for minimum output size"""
+ num -= len(self._snapshots)
+ new_snapshots = self.get_size_ranking()[-num:]
+ return [v for n, v in new_snapshots]
+
+ def get_size_ranking(self):
+ """Get versions ranked by size"""
+ versions = []
+ new_snapshots = set()
+ for version_id in self.versions():
+ if version_id in self._snapshots:
+ continue
+ diff_len = self.get_diff(version_id).patch_len()
+ snapshot_len = MultiParent([NewText(
+ self.cache_version(version_id))]).patch_len()
+ versions.append((snapshot_len - diff_len, version_id))
+ versions.sort()
+ return versions
+
+ def import_diffs(self, vf):
+ """Import the diffs from another pseudo-versionedfile"""
+ for version_id in vf.versions():
+ self.add_diff(vf.get_diff(version_id), version_id,
+ vf._parents[version_id])
+
+ def get_build_ranking(self):
+ """Return revisions sorted by how much they reduce build complexity"""
+ could_avoid = {}
+ referenced_by = {}
+ for version_id in topo_iter(self):
+ could_avoid[version_id] = set()
+ if version_id not in self._snapshots:
+ for parent_id in self._parents[version_id]:
+ could_avoid[version_id].update(could_avoid[parent_id])
+ could_avoid[version_id].update(self._parents)
+ could_avoid[version_id].discard(version_id)
+ for avoid_id in could_avoid[version_id]:
+ referenced_by.setdefault(avoid_id, set()).add(version_id)
+ available_versions = list(self.versions())
+ ranking = []
+ while len(available_versions) > 0:
+ available_versions.sort(key=lambda x:
+ len(could_avoid[x]) *
+ len(referenced_by.get(x, [])))
+ selected = available_versions.pop()
+ ranking.append(selected)
+ for version_id in referenced_by[selected]:
+ could_avoid[version_id].difference_update(
+ could_avoid[selected])
+ for version_id in could_avoid[selected]:
+ referenced_by[version_id].difference_update(
+ referenced_by[selected]
+ )
+ return ranking
+
+ def clear_cache(self):
+ self._lines.clear()
+
+ def get_line_list(self, version_ids):
+ return [self.cache_version(v) for v in version_ids]
+
+ def cache_version(self, version_id):
+ try:
+ return self._lines[version_id]
+ except KeyError:
+ pass
+ diff = self.get_diff(version_id)
+ lines = []
+ reconstructor = _Reconstructor(self, self._lines, self._parents)
+ reconstructor.reconstruct_version(lines, version_id)
+ self._lines[version_id] = lines
+ return lines
+
+
+class MultiMemoryVersionedFile(BaseVersionedFile):
+ """Memory-backed pseudo-versionedfile"""
+
+ def __init__(self, snapshot_interval=25, max_snapshots=None):
+ BaseVersionedFile.__init__(self, snapshot_interval, max_snapshots)
+ self._diffs = {}
+
+ def add_diff(self, diff, version_id, parent_ids):
+ self._diffs[version_id] = diff
+ self._parents[version_id] = parent_ids
+
+ def get_diff(self, version_id):
+ try:
+ return self._diffs[version_id]
+ except KeyError:
+ raise errors.RevisionNotPresent(version_id, self)
+
+ def destroy(self):
+ self._diffs = {}
+
+
+class MultiVersionedFile(BaseVersionedFile):
+ """Disk-backed pseudo-versionedfile"""
+
+ def __init__(self, filename, snapshot_interval=25, max_snapshots=None):
+ BaseVersionedFile.__init__(self, snapshot_interval, max_snapshots)
+ self._filename = filename
+ self._diff_offset = {}
+
+ def get_diff(self, version_id):
+ start, count = self._diff_offset[version_id]
+ infile = open(self._filename + '.mpknit', 'rb')
+ try:
+ infile.seek(start)
+ sio = StringIO(infile.read(count))
+ finally:
+ infile.close()
+ zip_file = gzip.GzipFile(None, mode='rb', fileobj=sio)
+ try:
+ file_version_id = zip_file.readline()
+ content = zip_file.read()
+ return MultiParent.from_patch(content)
+ finally:
+ zip_file.close()
+
+ def add_diff(self, diff, version_id, parent_ids):
+ outfile = open(self._filename + '.mpknit', 'ab')
+ try:
+ outfile.seek(0, 2) # workaround for windows bug:
+ # .tell() for files opened in 'ab' mode
+ # before any write returns 0
+ start = outfile.tell()
+ try:
+ zipfile = gzip.GzipFile(None, mode='ab', fileobj=outfile)
+ zipfile.writelines(itertools.chain(
+ ['version %s\n' % version_id], diff.to_patch()))
+ finally:
+ zipfile.close()
+ end = outfile.tell()
+ finally:
+ outfile.close()
+ self._diff_offset[version_id] = (start, end-start)
+ self._parents[version_id] = parent_ids
+
+ def destroy(self):
+ try:
+ os.unlink(self._filename + '.mpknit')
+ except OSError, e:
+ if e.errno != errno.ENOENT:
+ raise
+ try:
+ os.unlink(self._filename + '.mpidx')
+ except OSError, e:
+ if e.errno != errno.ENOENT:
+ raise
+
+ def save(self):
+ open(self._filename + '.mpidx', 'wb').write(bencode.bencode(
+ (self._parents, list(self._snapshots), self._diff_offset)))
+
+ def load(self):
+ self._parents, snapshots, self._diff_offset = bencode.bdecode(
+ open(self._filename + '.mpidx', 'rb').read())
+ self._snapshots = set(snapshots)
+
+
+class _Reconstructor(object):
+ """Build a text from the diffs, ancestry graph and cached lines"""
+
+ def __init__(self, diffs, lines, parents):
+ self.diffs = diffs
+ self.lines = lines
+ self.parents = parents
+ self.cursor = {}
+
+ def reconstruct(self, lines, parent_text, version_id):
+ """Append the lines referred to by a ParentText to lines"""
+ parent_id = self.parents[version_id][parent_text.parent]
+ end = parent_text.parent_pos + parent_text.num_lines
+ return self._reconstruct(lines, parent_id, parent_text.parent_pos,
+ end)
+
+ def _reconstruct(self, lines, req_version_id, req_start, req_end):
+ """Append lines for the requested version_id range"""
+ # stack of pending range requests
+ if req_start == req_end:
+ return
+ pending_reqs = [(req_version_id, req_start, req_end)]
+ while len(pending_reqs) > 0:
+ req_version_id, req_start, req_end = pending_reqs.pop()
+ # lazily allocate cursors for versions
+ if req_version_id in self.lines:
+ lines.extend(self.lines[req_version_id][req_start:req_end])
+ continue
+ try:
+ start, end, kind, data, iterator = self.cursor[req_version_id]
+ except KeyError:
+ iterator = self.diffs.get_diff(req_version_id).range_iterator()
+ start, end, kind, data = iterator.next()
+ if start > req_start:
+ iterator = self.diffs.get_diff(req_version_id).range_iterator()
+ start, end, kind, data = iterator.next()
+
+ # find the first hunk relevant to the request
+ while end <= req_start:
+ start, end, kind, data = iterator.next()
+ self.cursor[req_version_id] = start, end, kind, data, iterator
+ # if the hunk can't satisfy the whole request, split it in two,
+ # and leave the second half for later.
+ if req_end > end:
+ pending_reqs.append((req_version_id, end, req_end))
+ req_end = end
+ if kind == 'new':
+ lines.extend(data[req_start - start: (req_end - start)])
+ else:
+ # If the hunk is a ParentText, rewrite it as a range request
+ # for the parent, and make it the next pending request.
+ parent, parent_start, parent_end = data
+ new_version_id = self.parents[req_version_id][parent]
+ new_start = parent_start + req_start - start
+ new_end = parent_end + req_end - end
+ pending_reqs.append((new_version_id, new_start, new_end))
+
+ def reconstruct_version(self, lines, version_id):
+ length = self.diffs.get_diff(version_id).num_lines()
+ return self._reconstruct(lines, version_id, 0, length)
+
+
+def gzip_string(lines):
+ sio = StringIO()
+ data_file = gzip.GzipFile(None, mode='wb', fileobj=sio)
+ data_file.writelines(lines)
+ data_file.close()
+ return sio.getvalue()
diff --git a/bzrlib/mutabletree.py b/bzrlib/mutabletree.py
new file mode 100644
index 0000000..3378d1f
--- /dev/null
+++ b/bzrlib/mutabletree.py
@@ -0,0 +1,799 @@
+# Copyright (C) 2006-2011 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""MutableTree object.
+
+See MutableTree for more details.
+"""
+
+from __future__ import absolute_import
+
+from bzrlib.lazy_import import lazy_import
+lazy_import(globals(), """
+import operator
+import os
+import re
+
+from bzrlib import (
+ add,
+ controldir,
+ errors,
+ hooks,
+ inventory as _mod_inventory,
+ osutils,
+ revisiontree,
+ trace,
+ transport as _mod_transport,
+ tree,
+ )
+""")
+
+from bzrlib.decorators import needs_read_lock, needs_write_lock
+
+
+def needs_tree_write_lock(unbound):
+ """Decorate unbound to take out and release a tree_write lock."""
+ def tree_write_locked(self, *args, **kwargs):
+ self.lock_tree_write()
+ try:
+ return unbound(self, *args, **kwargs)
+ finally:
+ self.unlock()
+ tree_write_locked.__doc__ = unbound.__doc__
+ tree_write_locked.__name__ = unbound.__name__
+ return tree_write_locked
+
+
+class MutableTree(tree.Tree):
+ """A MutableTree is a specialisation of Tree which is able to be mutated.
+
+ Generally speaking these mutations are only possible within a lock_write
+ context, and will revert if the lock is broken abnormally - but this cannot
+ be guaranteed - depending on the exact implementation of the mutable state.
+
+ The most common form of Mutable Tree is WorkingTree, see bzrlib.workingtree.
+ For tests we also have MemoryTree which is a MutableTree whose contents are
+ entirely in memory.
+
+ For now, we are not treating MutableTree as an interface to provide
+ conformance tests for - rather we are testing MemoryTree specifically, and
+ interface testing implementations of WorkingTree.
+
+ A mutable tree always has an associated Branch and ControlDir object - the
+ branch and bzrdir attributes.
+ """
+ def __init__(self, *args, **kw):
+ super(MutableTree, self).__init__(*args, **kw)
+ # Is this tree on a case-insensitive or case-preserving file-system?
+ # Sub-classes may initialize to False if they detect they are being
+ # used on media which doesn't differentiate the case of names.
+ self.case_sensitive = True
+
+ def is_control_filename(self, filename):
+ """True if filename is the name of a control file in this tree.
+
+ :param filename: A filename within the tree. This is a relative path
+ from the root of this tree.
+
+ This is true IF and ONLY IF the filename is part of the meta data
+ that bzr controls in this tree. I.E. a random .bzr directory placed
+ on disk will not be a control file for this tree.
+ """
+ raise NotImplementedError(self.is_control_filename)
+
+ @needs_tree_write_lock
+ def add(self, files, ids=None, kinds=None):
+ """Add paths to the set of versioned paths.
+
+ Note that the command line normally calls smart_add instead,
+ which can automatically recurse.
+
+ This adds the files to the inventory, so that they will be
+ recorded by the next commit.
+
+ :param files: List of paths to add, relative to the base of the tree.
+ :param ids: If set, use these instead of automatically generated ids.
+ Must be the same length as the list of files, but may
+ contain None for ids that are to be autogenerated.
+ :param kinds: Optional parameter to specify the kinds to be used for
+ each file.
+
+ TODO: Perhaps callback with the ids and paths as they're added.
+ """
+ if isinstance(files, basestring):
+ # XXX: Passing a single string is inconsistent and should be
+ # deprecated.
+ if not (ids is None or isinstance(ids, basestring)):
+ raise AssertionError()
+ if not (kinds is None or isinstance(kinds, basestring)):
+ raise AssertionError()
+ files = [files]
+ if ids is not None:
+ ids = [ids]
+ if kinds is not None:
+ kinds = [kinds]
+
+ files = [path.strip('/') for path in files]
+
+ if ids is None:
+ ids = [None] * len(files)
+ else:
+ if not (len(ids) == len(files)):
+ raise AssertionError()
+ if kinds is None:
+ kinds = [None] * len(files)
+ elif not len(kinds) == len(files):
+ raise AssertionError()
+ for f in files:
+ # generic constraint checks:
+ if self.is_control_filename(f):
+ raise errors.ForbiddenControlFileError(filename=f)
+ fp = osutils.splitpath(f)
+ # fill out file kinds for all files [not needed when we stop
+ # caring about the instantaneous file kind within a uncommmitted tree
+ #
+ self._gather_kinds(files, kinds)
+ self._add(files, ids, kinds)
+
+ def add_reference(self, sub_tree):
+ """Add a TreeReference to the tree, pointing at sub_tree"""
+ raise errors.UnsupportedOperation(self.add_reference, self)
+
+ def _add_reference(self, sub_tree):
+ """Standard add_reference implementation, for use by subclasses"""
+ try:
+ sub_tree_path = self.relpath(sub_tree.basedir)
+ except errors.PathNotChild:
+ raise errors.BadReferenceTarget(self, sub_tree,
+ 'Target not inside tree.')
+ sub_tree_id = sub_tree.get_root_id()
+ if sub_tree_id == self.get_root_id():
+ raise errors.BadReferenceTarget(self, sub_tree,
+ 'Trees have the same root id.')
+ if self.has_id(sub_tree_id):
+ raise errors.BadReferenceTarget(self, sub_tree,
+ 'Root id already present in tree')
+ self._add([sub_tree_path], [sub_tree_id], ['tree-reference'])
+
+ def _add(self, files, ids, kinds):
+ """Helper function for add - updates the inventory.
+
+ :param files: sequence of pathnames, relative to the tree root
+ :param ids: sequence of suggested ids for the files (may be None)
+ :param kinds: sequence of inventory kinds of the files (i.e. may
+ contain "tree-reference")
+ """
+ raise NotImplementedError(self._add)
+
+ def apply_inventory_delta(self, changes):
+ """Apply changes to the inventory as an atomic operation.
+
+ :param changes: An inventory delta to apply to the working tree's
+ inventory.
+ :return None:
+ :seealso Inventory.apply_delta: For details on the changes parameter.
+ """
+ raise NotImplementedError(self.apply_inventory_delta)
+
+ @needs_write_lock
+ def commit(self, message=None, revprops=None, *args,
+ **kwargs):
+ # avoid circular imports
+ from bzrlib import commit
+ possible_master_transports=[]
+ revprops = commit.Commit.update_revprops(
+ revprops,
+ self.branch,
+ kwargs.pop('authors', None),
+ kwargs.pop('author', None),
+ kwargs.get('local', False),
+ possible_master_transports)
+ # args for wt.commit start at message from the Commit.commit method,
+ args = (message, ) + args
+ for hook in MutableTree.hooks['start_commit']:
+ hook(self)
+ committed_id = commit.Commit().commit(working_tree=self,
+ revprops=revprops,
+ possible_master_transports=possible_master_transports,
+ *args, **kwargs)
+ post_hook_params = PostCommitHookParams(self)
+ for hook in MutableTree.hooks['post_commit']:
+ hook(post_hook_params)
+ return committed_id
+
+ def _gather_kinds(self, files, kinds):
+ """Helper function for add - sets the entries of kinds."""
+ raise NotImplementedError(self._gather_kinds)
+
+ @needs_read_lock
+ def has_changes(self, _from_tree=None):
+ """Quickly check that the tree contains at least one commitable change.
+
+ :param _from_tree: tree to compare against to find changes (default to
+ the basis tree and is intended to be used by tests).
+
+ :return: True if a change is found. False otherwise
+ """
+ # Check pending merges
+ if len(self.get_parent_ids()) > 1:
+ return True
+ if _from_tree is None:
+ _from_tree = self.basis_tree()
+ changes = self.iter_changes(_from_tree)
+ try:
+ change = changes.next()
+ # Exclude root (talk about black magic... --vila 20090629)
+ if change[4] == (None, None):
+ change = changes.next()
+ return True
+ except StopIteration:
+ # No changes
+ return False
+
+ @needs_read_lock
+ def check_changed_or_out_of_date(self, strict, opt_name,
+ more_error, more_warning):
+ """Check the tree for uncommitted changes and branch synchronization.
+
+ If strict is None and not set in the config files, a warning is issued.
+ If strict is True, an error is raised.
+ If strict is False, no checks are done and no warning is issued.
+
+ :param strict: True, False or None, searched in branch config if None.
+
+ :param opt_name: strict option name to search in config file.
+
+ :param more_error: Details about how to avoid the check.
+
+ :param more_warning: Details about what is happening.
+ """
+ if strict is None:
+ strict = self.branch.get_config_stack().get(opt_name)
+ if strict is not False:
+ err_class = None
+ if (self.has_changes()):
+ err_class = errors.UncommittedChanges
+ elif self.last_revision() != self.branch.last_revision():
+ # The tree has lost sync with its branch, there is little
+ # chance that the user is aware of it but he can still force
+ # the action with --no-strict
+ err_class = errors.OutOfDateTree
+ if err_class is not None:
+ if strict is None:
+ err = err_class(self, more=more_warning)
+ # We don't want to interrupt the user if he expressed no
+ # preference about strict.
+ trace.warning('%s', err._format())
+ else:
+ err = err_class(self, more=more_error)
+ raise err
+
+ @needs_read_lock
+ def last_revision(self):
+ """Return the revision id of the last commit performed in this tree.
+
+ In early tree formats the result of last_revision is the same as the
+ branch last_revision, but that is no longer the case for modern tree
+ formats.
+
+ last_revision returns the left most parent id, or None if there are no
+ parents.
+
+ last_revision was deprecated as of 0.11. Please use get_parent_ids
+ instead.
+ """
+ raise NotImplementedError(self.last_revision)
+
+ def lock_tree_write(self):
+ """Lock the working tree for write, and the branch for read.
+
+ This is useful for operations which only need to mutate the working
+ tree. Taking out branch write locks is a relatively expensive process
+ and may fail if the branch is on read only media. So branch write locks
+ should only be taken out when we are modifying branch data - such as in
+ operations like commit, pull, uncommit and update.
+ """
+ raise NotImplementedError(self.lock_tree_write)
+
+ def lock_write(self):
+ """Lock the tree and its branch. This allows mutating calls to be made.
+
+ Some mutating methods will take out implicit write locks, but in
+ general you should always obtain a write lock before calling mutating
+ methods on a tree.
+ """
+ raise NotImplementedError(self.lock_write)
+
+ @needs_write_lock
+ def mkdir(self, path, file_id=None):
+ """Create a directory in the tree. if file_id is None, one is assigned.
+
+ :param path: A unicode file path.
+ :param file_id: An optional file-id.
+ :return: the file id of the new directory.
+ """
+ raise NotImplementedError(self.mkdir)
+
+ def _observed_sha1(self, file_id, path, (sha1, stat_value)):
+ """Tell the tree we have observed a paths sha1.
+
+ The intent of this function is to allow trees that have a hashcache to
+ update the hashcache during commit. If the observed file is too new
+ (based on the stat_value) to be safely hash-cached the tree will ignore
+ it.
+
+ The default implementation does nothing.
+
+ :param file_id: The file id
+ :param path: The file path
+ :param sha1: The sha 1 that was observed.
+ :param stat_value: A stat result for the file the sha1 was read from.
+ :return: None
+ """
+
+ @needs_write_lock
+ def put_file_bytes_non_atomic(self, file_id, bytes):
+ """Update the content of a file in the tree.
+
+ Note that the file is written in-place rather than being
+ written to a temporary location and renamed. As a consequence,
+ readers can potentially see the file half-written.
+
+ :param file_id: file-id of the file
+ :param bytes: the new file contents
+ """
+ raise NotImplementedError(self.put_file_bytes_non_atomic)
+
+ def set_parent_ids(self, revision_ids, allow_leftmost_as_ghost=False):
+ """Set the parents ids of the working tree.
+
+ :param revision_ids: A list of revision_ids.
+ """
+ raise NotImplementedError(self.set_parent_ids)
+
+ def set_parent_trees(self, parents_list, allow_leftmost_as_ghost=False):
+ """Set the parents of the working tree.
+
+ :param parents_list: A list of (revision_id, tree) tuples.
+ If tree is None, then that element is treated as an unreachable
+ parent tree - i.e. a ghost.
+ """
+ raise NotImplementedError(self.set_parent_trees)
+
+ def smart_add(self, file_list, recurse=True, action=None, save=True):
+ """Version file_list, optionally recursing into directories.
+
+ This is designed more towards DWIM for humans than API clarity.
+ For the specific behaviour see the help for cmd_add().
+
+ :param file_list: List of zero or more paths. *NB: these are
+ interpreted relative to the process cwd, not relative to the
+ tree.* (Add and most other tree methods use tree-relative
+ paths.)
+ :param action: A reporter to be called with the inventory, parent_ie,
+ path and kind of the path being added. It may return a file_id if
+ a specific one should be used.
+ :param save: Save the inventory after completing the adds. If False
+ this provides dry-run functionality by doing the add and not saving
+ the inventory.
+ :return: A tuple - files_added, ignored_files. files_added is the count
+ of added files, and ignored_files is a dict mapping files that were
+ ignored to the rule that caused them to be ignored.
+ """
+ raise NotImplementedError(self.smart_add)
+
+
+class MutableInventoryTree(MutableTree, tree.InventoryTree):
+
+ @needs_tree_write_lock
+ def apply_inventory_delta(self, changes):
+ """Apply changes to the inventory as an atomic operation.
+
+ :param changes: An inventory delta to apply to the working tree's
+ inventory.
+ :return None:
+ :seealso Inventory.apply_delta: For details on the changes parameter.
+ """
+ self.flush()
+ inv = self.root_inventory
+ inv.apply_delta(changes)
+ self._write_inventory(inv)
+
+ def _fix_case_of_inventory_path(self, path):
+ """If our tree isn't case sensitive, return the canonical path"""
+ if not self.case_sensitive:
+ path = self.get_canonical_inventory_path(path)
+ return path
+
+ @needs_tree_write_lock
+ def smart_add(self, file_list, recurse=True, action=None, save=True):
+ """Version file_list, optionally recursing into directories.
+
+ This is designed more towards DWIM for humans than API clarity.
+ For the specific behaviour see the help for cmd_add().
+
+ :param file_list: List of zero or more paths. *NB: these are
+ interpreted relative to the process cwd, not relative to the
+ tree.* (Add and most other tree methods use tree-relative
+ paths.)
+ :param action: A reporter to be called with the inventory, parent_ie,
+ path and kind of the path being added. It may return a file_id if
+ a specific one should be used.
+ :param save: Save the inventory after completing the adds. If False
+ this provides dry-run functionality by doing the add and not saving
+ the inventory.
+ :return: A tuple - files_added, ignored_files. files_added is the count
+ of added files, and ignored_files is a dict mapping files that were
+ ignored to the rule that caused them to be ignored.
+ """
+ # Not all mutable trees can have conflicts
+ if getattr(self, 'conflicts', None) is not None:
+ # Collect all related files without checking whether they exist or
+ # are versioned. It's cheaper to do that once for all conflicts
+ # than trying to find the relevant conflict for each added file.
+ conflicts_related = set()
+ for c in self.conflicts():
+ conflicts_related.update(c.associated_filenames())
+ else:
+ conflicts_related = None
+ adder = _SmartAddHelper(self, action, conflicts_related)
+ adder.add(file_list, recurse=recurse)
+ if save:
+ invdelta = adder.get_inventory_delta()
+ self.apply_inventory_delta(invdelta)
+ return adder.added, adder.ignored
+
+ def update_basis_by_delta(self, new_revid, delta):
+ """Update the parents of this tree after a commit.
+
+ This gives the tree one parent, with revision id new_revid. The
+ inventory delta is applied to the current basis tree to generate the
+ inventory for the parent new_revid, and all other parent trees are
+ discarded.
+
+ All the changes in the delta should be changes synchronising the basis
+ tree with some or all of the working tree, with a change to a directory
+ requiring that its contents have been recursively included. That is,
+ this is not a general purpose tree modification routine, but a helper
+ for commit which is not required to handle situations that do not arise
+ outside of commit.
+
+ See the inventory developers documentation for the theory behind
+ inventory deltas.
+
+ :param new_revid: The new revision id for the trees parent.
+ :param delta: An inventory delta (see apply_inventory_delta) describing
+ the changes from the current left most parent revision to new_revid.
+ """
+ # if the tree is updated by a pull to the branch, as happens in
+ # WorkingTree2, when there was no separation between branch and tree,
+ # then just clear merges, efficiency is not a concern for now as this
+ # is legacy environments only, and they are slow regardless.
+ if self.last_revision() == new_revid:
+ self.set_parent_ids([new_revid])
+ return
+ # generic implementation based on Inventory manipulation. See
+ # WorkingTree classes for optimised versions for specific format trees.
+ basis = self.basis_tree()
+ basis.lock_read()
+ # TODO: Consider re-evaluating the need for this with CHKInventory
+ # we don't strictly need to mutate an inventory for this
+ # it only makes sense when apply_delta is cheaper than get_inventory()
+ inventory = _mod_inventory.mutable_inventory_from_tree(basis)
+ basis.unlock()
+ inventory.apply_delta(delta)
+ rev_tree = revisiontree.InventoryRevisionTree(self.branch.repository,
+ inventory, new_revid)
+ self.set_parent_trees([(new_revid, rev_tree)])
+
+
+class MutableTreeHooks(hooks.Hooks):
+ """A dictionary mapping a hook name to a list of callables for mutabletree
+ hooks.
+ """
+
+ def __init__(self):
+ """Create the default hooks.
+
+ """
+ hooks.Hooks.__init__(self, "bzrlib.mutabletree", "MutableTree.hooks")
+ self.add_hook('start_commit',
+ "Called before a commit is performed on a tree. The start commit "
+ "hook is able to change the tree before the commit takes place. "
+ "start_commit is called with the bzrlib.mutabletree.MutableTree "
+ "that the commit is being performed on.", (1, 4))
+ self.add_hook('post_commit',
+ "Called after a commit is performed on a tree. The hook is "
+ "called with a bzrlib.mutabletree.PostCommitHookParams object. "
+ "The mutable tree the commit was performed on is available via "
+ "the mutable_tree attribute of that object.", (2, 0))
+ self.add_hook('pre_transform',
+ "Called before a tree transform on this tree. The hook is called "
+ "with the tree that is being transformed and the transform.",
+ (2, 5))
+ self.add_hook('post_build_tree',
+ "Called after a completely new tree is built. The hook is "
+ "called with the tree as its only argument.", (2, 5))
+ self.add_hook('post_transform',
+ "Called after a tree transform has been performed on a tree. "
+ "The hook is called with the tree that is being transformed and "
+ "the transform.",
+ (2, 5))
+
+# install the default hooks into the MutableTree class.
+MutableTree.hooks = MutableTreeHooks()
+
+
+class PostCommitHookParams(object):
+ """Parameters for the post_commit hook.
+
+ To access the parameters, use the following attributes:
+
+ * mutable_tree - the MutableTree object
+ """
+
+ def __init__(self, mutable_tree):
+ """Create the parameters for the post_commit hook."""
+ self.mutable_tree = mutable_tree
+
+
+class _SmartAddHelper(object):
+ """Helper for MutableTree.smart_add."""
+
+ def get_inventory_delta(self):
+ return self._invdelta.values()
+
+ def _get_ie(self, inv_path):
+ """Retrieve the most up to date inventory entry for a path.
+
+ :param inv_path: Normalized inventory path
+ :return: Inventory entry (with possibly invalid .children for
+ directories)
+ """
+ entry = self._invdelta.get(inv_path)
+ if entry is not None:
+ return entry[3]
+ # Find a 'best fit' match if the filesystem is case-insensitive
+ inv_path = self.tree._fix_case_of_inventory_path(inv_path)
+ file_id = self.tree.path2id(inv_path)
+ if file_id is not None:
+ return self.tree.iter_entries_by_dir([file_id]).next()[1]
+ return None
+
+ def _convert_to_directory(self, this_ie, inv_path):
+ """Convert an entry to a directory.
+
+ :param this_ie: Inventory entry
+ :param inv_path: Normalized path for the inventory entry
+ :return: The new inventory entry
+ """
+ # Same as in _add_one below, if the inventory doesn't
+ # think this is a directory, update the inventory
+ this_ie = _mod_inventory.InventoryDirectory(
+ this_ie.file_id, this_ie.name, this_ie.parent_id)
+ self._invdelta[inv_path] = (inv_path, inv_path, this_ie.file_id,
+ this_ie)
+ return this_ie
+
+ def _add_one_and_parent(self, parent_ie, path, kind, inv_path):
+ """Add a new entry to the inventory and automatically add unversioned parents.
+
+ :param parent_ie: Parent inventory entry if known, or None. If
+ None, the parent is looked up by name and used if present, otherwise it
+ is recursively added.
+ :param path:
+ :param kind: Kind of new entry (file, directory, etc)
+ :param inv_path:
+ :return: Inventory entry for path and a list of paths which have been added.
+ """
+ # Nothing to do if path is already versioned.
+ # This is safe from infinite recursion because the tree root is
+ # always versioned.
+ inv_dirname = osutils.dirname(inv_path)
+ dirname, basename = osutils.split(path)
+ if parent_ie is None:
+ # slower but does not need parent_ie
+ this_ie = self._get_ie(inv_path)
+ if this_ie is not None:
+ return this_ie
+ # its really not there : add the parent
+ # note that the dirname use leads to some extra str copying etc but as
+ # there are a limited number of dirs we can be nested under, it should
+ # generally find it very fast and not recurse after that.
+ parent_ie = self._add_one_and_parent(None,
+ dirname, 'directory',
+ inv_dirname)
+ # if the parent exists, but isn't a directory, we have to do the
+ # kind change now -- really the inventory shouldn't pretend to know
+ # the kind of wt files, but it does.
+ if parent_ie.kind != 'directory':
+ # nb: this relies on someone else checking that the path we're using
+ # doesn't contain symlinks.
+ parent_ie = self._convert_to_directory(parent_ie, inv_dirname)
+ file_id = self.action(self.tree, parent_ie, path, kind)
+ entry = _mod_inventory.make_entry(kind, basename, parent_ie.file_id,
+ file_id=file_id)
+ self._invdelta[inv_path] = (None, inv_path, entry.file_id, entry)
+ self.added.append(inv_path)
+ return entry
+
+ def _gather_dirs_to_add(self, user_dirs):
+ # only walk the minimal parents needed: we have user_dirs to override
+ # ignores.
+ prev_dir = None
+
+ is_inside = osutils.is_inside_or_parent_of_any
+ for path, (inv_path, this_ie) in sorted(
+ user_dirs.iteritems(), key=operator.itemgetter(0)):
+ if (prev_dir is None or not is_inside([prev_dir], path)):
+ yield (path, inv_path, this_ie, None)
+ prev_dir = path
+
+ def __init__(self, tree, action, conflicts_related=None):
+ self.tree = tree
+ if action is None:
+ self.action = add.AddAction()
+ else:
+ self.action = action
+ self._invdelta = {}
+ self.added = []
+ self.ignored = {}
+ if conflicts_related is None:
+ self.conflicts_related = frozenset()
+ else:
+ self.conflicts_related = conflicts_related
+
+ def add(self, file_list, recurse=True):
+ from bzrlib.inventory import InventoryEntry
+ if not file_list:
+ # no paths supplied: add the entire tree.
+ # FIXME: this assumes we are running in a working tree subdir :-/
+ # -- vila 20100208
+ file_list = [u'.']
+
+ # expand any symlinks in the directory part, while leaving the
+ # filename alone
+ # only expanding if symlinks are supported avoids windows path bugs
+ if osutils.has_symlinks():
+ file_list = map(osutils.normalizepath, file_list)
+
+ user_dirs = {}
+ # validate user file paths and convert all paths to tree
+ # relative : it's cheaper to make a tree relative path an abspath
+ # than to convert an abspath to tree relative, and it's cheaper to
+ # perform the canonicalization in bulk.
+ for filepath in osutils.canonical_relpaths(self.tree.basedir, file_list):
+ # validate user parameters. Our recursive code avoids adding new
+ # files that need such validation
+ if self.tree.is_control_filename(filepath):
+ raise errors.ForbiddenControlFileError(filename=filepath)
+
+ abspath = self.tree.abspath(filepath)
+ kind = osutils.file_kind(abspath)
+ # ensure the named path is added, so that ignore rules in the later
+ # directory walk dont skip it.
+ # we dont have a parent ie known yet.: use the relatively slower
+ # inventory probing method
+ inv_path, _ = osutils.normalized_filename(filepath)
+ this_ie = self._get_ie(inv_path)
+ if this_ie is None:
+ this_ie = self._add_one_and_parent(None, filepath, kind, inv_path)
+ if kind == 'directory':
+ # schedule the dir for scanning
+ user_dirs[filepath] = (inv_path, this_ie)
+
+ if not recurse:
+ # no need to walk any directories at all.
+ return
+
+ things_to_add = list(self._gather_dirs_to_add(user_dirs))
+
+ illegalpath_re = re.compile(r'[\r\n]')
+ for directory, inv_path, this_ie, parent_ie in things_to_add:
+ # directory is tree-relative
+ abspath = self.tree.abspath(directory)
+
+ # get the contents of this directory.
+
+ # find the kind of the path being added, and save stat_value
+ # for reuse
+ stat_value = None
+ if this_ie is None:
+ stat_value = osutils.file_stat(abspath)
+ kind = osutils.file_kind_from_stat_mode(stat_value.st_mode)
+ else:
+ kind = this_ie.kind
+
+ # allow AddAction to skip this file
+ if self.action.skip_file(self.tree, abspath, kind, stat_value):
+ continue
+ if not InventoryEntry.versionable_kind(kind):
+ trace.warning("skipping %s (can't add file of kind '%s')",
+ abspath, kind)
+ continue
+ if illegalpath_re.search(directory):
+ trace.warning("skipping %r (contains \\n or \\r)" % abspath)
+ continue
+ if directory in self.conflicts_related:
+ # If the file looks like one generated for a conflict, don't
+ # add it.
+ trace.warning(
+ 'skipping %s (generated to help resolve conflicts)',
+ abspath)
+ continue
+
+ if kind == 'directory' and directory != '':
+ try:
+ transport = _mod_transport.get_transport_from_path(abspath)
+ controldir.ControlDirFormat.find_format(transport)
+ sub_tree = True
+ except errors.NotBranchError:
+ sub_tree = False
+ except errors.UnsupportedFormatError:
+ sub_tree = True
+ else:
+ sub_tree = False
+
+ if this_ie is not None:
+ pass
+ elif sub_tree:
+ # XXX: This is wrong; people *might* reasonably be trying to
+ # add subtrees as subtrees. This should probably only be done
+ # in formats which can represent subtrees, and even then
+ # perhaps only when the user asked to add subtrees. At the
+ # moment you can add them specially through 'join --reference',
+ # which is perhaps reasonable: adding a new reference is a
+ # special operation and can have a special behaviour. mbp
+ # 20070306
+ trace.warning("skipping nested tree %r", abspath)
+ else:
+ this_ie = self._add_one_and_parent(parent_ie, directory, kind,
+ inv_path)
+
+ if kind == 'directory' and not sub_tree:
+ if this_ie.kind != 'directory':
+ this_ie = self._convert_to_directory(this_ie, inv_path)
+
+ for subf in sorted(os.listdir(abspath)):
+ inv_f, _ = osutils.normalized_filename(subf)
+ # here we could use TreeDirectory rather than
+ # string concatenation.
+ subp = osutils.pathjoin(directory, subf)
+ # TODO: is_control_filename is very slow. Make it faster.
+ # TreeDirectory.is_control_filename could also make this
+ # faster - its impossible for a non root dir to have a
+ # control file.
+ if self.tree.is_control_filename(subp):
+ trace.mutter("skip control directory %r", subp)
+ continue
+ sub_invp = osutils.pathjoin(inv_path, inv_f)
+ entry = self._invdelta.get(sub_invp)
+ if entry is not None:
+ sub_ie = entry[3]
+ else:
+ sub_ie = this_ie.children.get(inv_f)
+ if sub_ie is not None:
+ # recurse into this already versioned subdir.
+ things_to_add.append((subp, sub_invp, sub_ie, this_ie))
+ else:
+ # user selection overrides ignores
+ # ignore while selecting files - if we globbed in the
+ # outer loop we would ignore user files.
+ ignore_glob = self.tree.is_ignored(subp)
+ if ignore_glob is not None:
+ self.ignored.setdefault(ignore_glob, []).append(subp)
+ else:
+ things_to_add.append((subp, sub_invp, None, this_ie))
diff --git a/bzrlib/option.py b/bzrlib/option.py
new file mode 100644
index 0000000..99f7ffd
--- /dev/null
+++ b/bzrlib/option.py
@@ -0,0 +1,575 @@
+# Copyright (C) 2005-2010 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+# TODO: For things like --diff-prefix, we want a way to customize the display
+# of the option argument.
+
+from __future__ import absolute_import
+
+import optparse
+import re
+
+from bzrlib.lazy_import import lazy_import
+lazy_import(globals(), """
+from bzrlib import (
+ errors,
+ revisionspec,
+ i18n,
+ )
+""")
+
+from bzrlib import (
+ registry as _mod_registry,
+ )
+
+
+def _parse_revision_str(revstr):
+ """This handles a revision string -> revno.
+
+ This always returns a list. The list will have one element for
+ each revision specifier supplied.
+
+ >>> _parse_revision_str('234')
+ [<RevisionSpec_dwim 234>]
+ >>> _parse_revision_str('234..567')
+ [<RevisionSpec_dwim 234>, <RevisionSpec_dwim 567>]
+ >>> _parse_revision_str('..')
+ [<RevisionSpec None>, <RevisionSpec None>]
+ >>> _parse_revision_str('..234')
+ [<RevisionSpec None>, <RevisionSpec_dwim 234>]
+ >>> _parse_revision_str('234..')
+ [<RevisionSpec_dwim 234>, <RevisionSpec None>]
+ >>> _parse_revision_str('234..456..789') # Maybe this should be an error
+ [<RevisionSpec_dwim 234>, <RevisionSpec_dwim 456>, <RevisionSpec_dwim 789>]
+ >>> _parse_revision_str('234....789') #Error ?
+ [<RevisionSpec_dwim 234>, <RevisionSpec None>, <RevisionSpec_dwim 789>]
+ >>> _parse_revision_str('revid:test@other.com-234234')
+ [<RevisionSpec_revid revid:test@other.com-234234>]
+ >>> _parse_revision_str('revid:test@other.com-234234..revid:test@other.com-234235')
+ [<RevisionSpec_revid revid:test@other.com-234234>, <RevisionSpec_revid revid:test@other.com-234235>]
+ >>> _parse_revision_str('revid:test@other.com-234234..23')
+ [<RevisionSpec_revid revid:test@other.com-234234>, <RevisionSpec_dwim 23>]
+ >>> _parse_revision_str('date:2005-04-12')
+ [<RevisionSpec_date date:2005-04-12>]
+ >>> _parse_revision_str('date:2005-04-12 12:24:33')
+ [<RevisionSpec_date date:2005-04-12 12:24:33>]
+ >>> _parse_revision_str('date:2005-04-12T12:24:33')
+ [<RevisionSpec_date date:2005-04-12T12:24:33>]
+ >>> _parse_revision_str('date:2005-04-12,12:24:33')
+ [<RevisionSpec_date date:2005-04-12,12:24:33>]
+ >>> _parse_revision_str('-5..23')
+ [<RevisionSpec_dwim -5>, <RevisionSpec_dwim 23>]
+ >>> _parse_revision_str('-5')
+ [<RevisionSpec_dwim -5>]
+ >>> _parse_revision_str('123a')
+ [<RevisionSpec_dwim 123a>]
+ >>> _parse_revision_str('abc')
+ [<RevisionSpec_dwim abc>]
+ >>> _parse_revision_str('branch:../branch2')
+ [<RevisionSpec_branch branch:../branch2>]
+ >>> _parse_revision_str('branch:../../branch2')
+ [<RevisionSpec_branch branch:../../branch2>]
+ >>> _parse_revision_str('branch:../../branch2..23')
+ [<RevisionSpec_branch branch:../../branch2>, <RevisionSpec_dwim 23>]
+ >>> _parse_revision_str('branch:..\\\\branch2')
+ [<RevisionSpec_branch branch:..\\branch2>]
+ >>> _parse_revision_str('branch:..\\\\..\\\\branch2..23')
+ [<RevisionSpec_branch branch:..\\..\\branch2>, <RevisionSpec_dwim 23>]
+ """
+ # TODO: Maybe move this into revisionspec.py
+ revs = []
+ # split on .. that is not followed by a / or \
+ sep = re.compile(r'\.\.(?![\\/])')
+ for x in sep.split(revstr):
+ revs.append(revisionspec.RevisionSpec.from_string(x or None))
+ return revs
+
+
+def _parse_change_str(revstr):
+ """Parse the revision string and return a tuple with left-most
+ parent of the revision.
+
+ >>> _parse_change_str('123')
+ (<RevisionSpec_before before:123>, <RevisionSpec_dwim 123>)
+ >>> _parse_change_str('123..124')
+ Traceback (most recent call last):
+ ...
+ RangeInChangeOption: Option --change does not accept revision ranges
+ """
+ revs = _parse_revision_str(revstr)
+ if len(revs) > 1:
+ raise errors.RangeInChangeOption()
+ return (revisionspec.RevisionSpec.from_string('before:' + revstr),
+ revs[0])
+
+
+def _parse_merge_type(typestring):
+ return get_merge_type(typestring)
+
+def get_merge_type(typestring):
+ """Attempt to find the merge class/factory associated with a string."""
+ from merge import merge_types
+ try:
+ return merge_types[typestring][0]
+ except KeyError:
+ templ = '%s%%7s: %%s' % (' '*12)
+ lines = [templ % (f[0], f[1][1]) for f in merge_types.iteritems()]
+ type_list = '\n'.join(lines)
+ msg = "No known merge type %s. Supported types are:\n%s" %\
+ (typestring, type_list)
+ raise errors.BzrCommandError(msg)
+
+
+class Option(object):
+ """Description of a command line option
+
+ :ivar _short_name: If this option has a single-letter name, this is it.
+ Otherwise None.
+ """
+
+ # The dictionary of standard options. These are always legal.
+ STD_OPTIONS = {}
+
+ # The dictionary of commonly used options. these are only legal
+ # if a command explicitly references them by name in the list
+ # of supported options.
+ OPTIONS = {}
+
+ def __init__(self, name, help='', type=None, argname=None,
+ short_name=None, param_name=None, custom_callback=None,
+ hidden=False):
+ """Make a new command option.
+
+ :param name: regular name of the command, used in the double-dash
+ form and also as the parameter to the command's run()
+ method (unless param_name is specified).
+
+ :param help: help message displayed in command help
+
+ :param type: function called to parse the option argument, or
+ None (default) if this option doesn't take an argument.
+
+ :param argname: name of option argument, if any
+
+ :param short_name: short option code for use with a single -, e.g.
+ short_name="v" to enable parsing of -v.
+
+ :param param_name: name of the parameter which will be passed to
+ the command's run() method.
+
+ :param custom_callback: a callback routine to be called after normal
+ processing. The signature of the callback routine is
+ (option, name, new_value, parser).
+ :param hidden: If True, the option should be hidden in help and
+ documentation.
+ """
+ self.name = name
+ self.help = help
+ self.type = type
+ self._short_name = short_name
+ if type is None:
+ if argname:
+ raise ValueError('argname not valid for booleans')
+ elif argname is None:
+ argname = 'ARG'
+ self.argname = argname
+ if param_name is None:
+ self._param_name = self.name.replace('-', '_')
+ else:
+ self._param_name = param_name
+ self.custom_callback = custom_callback
+ self.hidden = hidden
+
+ def short_name(self):
+ if self._short_name:
+ return self._short_name
+
+ def set_short_name(self, short_name):
+ self._short_name = short_name
+
+ def get_negation_name(self):
+ if self.name.startswith('no-'):
+ return self.name[3:]
+ else:
+ return 'no-' + self.name
+
+ def add_option(self, parser, short_name):
+ """Add this option to an Optparse parser"""
+ option_strings = ['--%s' % self.name]
+ if short_name is not None:
+ option_strings.append('-%s' % short_name)
+ if self.hidden:
+ help = optparse.SUPPRESS_HELP
+ else:
+ help = self.help
+ optargfn = self.type
+ if optargfn is None:
+ parser.add_option(action='callback',
+ callback=self._optparse_bool_callback,
+ callback_args=(True,),
+ help=help,
+ *option_strings)
+ negation_strings = ['--%s' % self.get_negation_name()]
+ parser.add_option(action='callback',
+ callback=self._optparse_bool_callback,
+ callback_args=(False,),
+ help=optparse.SUPPRESS_HELP, *negation_strings)
+ else:
+ parser.add_option(action='callback',
+ callback=self._optparse_callback,
+ type='string', metavar=self.argname.upper(),
+ help=help,
+ default=OptionParser.DEFAULT_VALUE,
+ *option_strings)
+
+ def _optparse_bool_callback(self, option, opt_str, value, parser, bool_v):
+ setattr(parser.values, self._param_name, bool_v)
+ if self.custom_callback is not None:
+ self.custom_callback(option, self._param_name, bool_v, parser)
+
+ def _optparse_callback(self, option, opt, value, parser):
+ v = self.type(value)
+ setattr(parser.values, self._param_name, v)
+ if self.custom_callback is not None:
+ self.custom_callback(option, self.name, v, parser)
+
+ def iter_switches(self):
+ """Iterate through the list of switches provided by the option
+
+ :return: an iterator of (name, short_name, argname, help)
+ """
+ argname = self.argname
+ if argname is not None:
+ argname = argname.upper()
+ yield self.name, self.short_name(), argname, self.help
+
+ def is_hidden(self, name):
+ return self.hidden
+
+
+class ListOption(Option):
+ """Option used to provide a list of values.
+
+ On the command line, arguments are specified by a repeated use of the
+ option. '-' is a special argument that resets the list. For example,
+ --foo=a --foo=b
+ sets the value of the 'foo' option to ['a', 'b'], and
+ --foo=a --foo=b --foo=- --foo=c
+ sets the value of the 'foo' option to ['c'].
+ """
+
+ def add_option(self, parser, short_name):
+ """Add this option to an Optparse parser."""
+ option_strings = ['--%s' % self.name]
+ if short_name is not None:
+ option_strings.append('-%s' % short_name)
+ parser.add_option(action='callback',
+ callback=self._optparse_callback,
+ type='string', metavar=self.argname.upper(),
+ help=self.help, dest=self._param_name, default=[],
+ *option_strings)
+
+ def _optparse_callback(self, option, opt, value, parser):
+ values = getattr(parser.values, self._param_name)
+ if value == '-':
+ del values[:]
+ else:
+ values.append(self.type(value))
+ if self.custom_callback is not None:
+ self.custom_callback(option, self._param_name, values, parser)
+
+
+class RegistryOption(Option):
+ """Option based on a registry
+
+ The values for the options correspond to entries in the registry. Input
+ must be a registry key. After validation, it is converted into an object
+ using Registry.get or a caller-provided converter.
+ """
+
+ def validate_value(self, value):
+ """Validate a value name"""
+ if value not in self.registry:
+ raise errors.BadOptionValue(self.name, value)
+
+ def convert(self, value):
+ """Convert a value name into an output type"""
+ self.validate_value(value)
+ if self.converter is None:
+ return self.registry.get(value)
+ else:
+ return self.converter(value)
+
+ def __init__(self, name, help, registry=None, converter=None,
+ value_switches=False, title=None, enum_switch=True,
+ lazy_registry=None, short_name=None, short_value_switches=None):
+ """
+ Constructor.
+
+ :param name: The option name.
+ :param help: Help for the option.
+ :param registry: A Registry containing the values
+ :param converter: Callable to invoke with the value name to produce
+ the value. If not supplied, self.registry.get is used.
+ :param value_switches: If true, each possible value is assigned its
+ own switch. For example, instead of '--format knit',
+ '--knit' can be used interchangeably.
+ :param enum_switch: If true, a switch is provided with the option name,
+ which takes a value.
+ :param lazy_registry: A tuple of (module name, attribute name) for a
+ registry to be lazily loaded.
+ :param short_name: The short name for the enum switch, if any
+ :param short_value_switches: A dict mapping values to short names
+ """
+ Option.__init__(self, name, help, type=self.convert,
+ short_name=short_name)
+ self._registry = registry
+ if registry is None:
+ if lazy_registry is None:
+ raise AssertionError(
+ 'One of registry or lazy_registry must be given.')
+ self._lazy_registry = _mod_registry._LazyObjectGetter(
+ *lazy_registry)
+ if registry is not None and lazy_registry is not None:
+ raise AssertionError(
+ 'registry and lazy_registry are mutually exclusive')
+ self.name = name
+ self.converter = converter
+ self.value_switches = value_switches
+ self.enum_switch = enum_switch
+ self.short_value_switches = short_value_switches
+ self.title = title
+ if self.title is None:
+ self.title = name
+
+ @property
+ def registry(self):
+ if self._registry is None:
+ self._registry = self._lazy_registry.get_obj()
+ return self._registry
+
+ @staticmethod
+ def from_kwargs(name_, help=None, title=None, value_switches=False,
+ enum_switch=True, **kwargs):
+ """Convenience method to generate string-map registry options
+
+ name, help, value_switches and enum_switch are passed to the
+ RegistryOption constructor. Any other keyword arguments are treated
+ as values for the option, and their value is treated as the help.
+ """
+ reg = _mod_registry.Registry()
+ for name, switch_help in sorted(kwargs.items()):
+ name = name.replace('_', '-')
+ reg.register(name, name, help=switch_help)
+ if not value_switches:
+ help = help + ' "' + name + '": ' + switch_help
+ if not help.endswith("."):
+ help = help + "."
+ return RegistryOption(name_, help, reg, title=title,
+ value_switches=value_switches, enum_switch=enum_switch)
+
+ def add_option(self, parser, short_name):
+ """Add this option to an Optparse parser"""
+ if self.value_switches:
+ parser = parser.add_option_group(self.title)
+ if self.enum_switch:
+ Option.add_option(self, parser, short_name)
+ if self.value_switches:
+ for key in self.registry.keys():
+ option_strings = ['--%s' % key]
+ if self.is_hidden(key):
+ help = optparse.SUPPRESS_HELP
+ else:
+ help = self.registry.get_help(key)
+ if (self.short_value_switches and
+ key in self.short_value_switches):
+ option_strings.append('-%s' %
+ self.short_value_switches[key])
+ parser.add_option(action='callback',
+ callback=self._optparse_value_callback(key),
+ help=help,
+ *option_strings)
+
+ def _optparse_value_callback(self, cb_value):
+ def cb(option, opt, value, parser):
+ v = self.type(cb_value)
+ setattr(parser.values, self._param_name, v)
+ if self.custom_callback is not None:
+ self.custom_callback(option, self._param_name, v, parser)
+ return cb
+
+ def iter_switches(self):
+ """Iterate through the list of switches provided by the option
+
+ :return: an iterator of (name, short_name, argname, help)
+ """
+ for value in Option.iter_switches(self):
+ yield value
+ if self.value_switches:
+ for key in sorted(self.registry.keys()):
+ yield key, None, None, self.registry.get_help(key)
+
+ def is_hidden(self, name):
+ if name == self.name:
+ return Option.is_hidden(self, name)
+ return getattr(self.registry.get_info(name), 'hidden', False)
+
+
+class OptionParser(optparse.OptionParser):
+ """OptionParser that raises exceptions instead of exiting"""
+
+ DEFAULT_VALUE = object()
+
+ def __init__(self):
+ optparse.OptionParser.__init__(self)
+ self.formatter = GettextIndentedHelpFormatter()
+
+ def error(self, message):
+ raise errors.BzrCommandError(message)
+
+
+class GettextIndentedHelpFormatter(optparse.IndentedHelpFormatter):
+ """Adds gettext() call to format_option()"""
+ def __init__(self):
+ optparse.IndentedHelpFormatter.__init__(self)
+
+ def format_option(self, option):
+ """code taken from Python's optparse.py"""
+ if option.help:
+ option.help = i18n.gettext(option.help)
+ return optparse.IndentedHelpFormatter.format_option(self, option)
+
+
+def get_optparser(options):
+ """Generate an optparse parser for bzrlib-style options"""
+
+ parser = OptionParser()
+ parser.remove_option('--help')
+ for option in options.itervalues():
+ option.add_option(parser, option.short_name())
+ return parser
+
+
+def custom_help(name, help):
+ """Clone a common option overriding the help."""
+ import copy
+ o = copy.copy(Option.OPTIONS[name])
+ o.help = help
+ return o
+
+
+def _standard_option(name, **kwargs):
+ """Register a standard option."""
+ # All standard options are implicitly 'global' ones
+ Option.STD_OPTIONS[name] = Option(name, **kwargs)
+ Option.OPTIONS[name] = Option.STD_OPTIONS[name]
+
+def _standard_list_option(name, **kwargs):
+ """Register a standard option."""
+ # All standard options are implicitly 'global' ones
+ Option.STD_OPTIONS[name] = ListOption(name, **kwargs)
+ Option.OPTIONS[name] = Option.STD_OPTIONS[name]
+
+
+def _global_option(name, **kwargs):
+ """Register a global option."""
+ Option.OPTIONS[name] = Option(name, **kwargs)
+
+
+def _global_registry_option(name, help, registry=None, **kwargs):
+ Option.OPTIONS[name] = RegistryOption(name, help, registry, **kwargs)
+
+
+# This is the verbosity level detected during command line parsing.
+# Note that the final value is dependent on the order in which the
+# various flags (verbose, quiet, no-verbose, no-quiet) are given.
+# The final value will be one of the following:
+#
+# * -ve for quiet
+# * 0 for normal
+# * +ve for verbose
+_verbosity_level = 0
+
+
+def _verbosity_level_callback(option, opt_str, value, parser):
+ global _verbosity_level
+ if not value:
+ # Either --no-verbose or --no-quiet was specified
+ _verbosity_level = 0
+ elif opt_str == "verbose":
+ if _verbosity_level > 0:
+ _verbosity_level += 1
+ else:
+ _verbosity_level = 1
+ else:
+ if _verbosity_level < 0:
+ _verbosity_level -= 1
+ else:
+ _verbosity_level = -1
+
+
+# Declare the standard options
+_standard_option('help', short_name='h',
+ help='Show help message.')
+_standard_option('quiet', short_name='q',
+ help="Only display errors and warnings.",
+ custom_callback=_verbosity_level_callback)
+_standard_option('usage',
+ help='Show usage message and options.')
+_standard_option('verbose', short_name='v',
+ help='Display more information.',
+ custom_callback=_verbosity_level_callback)
+
+# Declare commonly used options
+_global_option('change',
+ type=_parse_change_str,
+ short_name='c',
+ param_name='revision',
+ help='Select changes introduced by the specified revision. See also "help revisionspec".')
+_global_option('directory', short_name='d', type=unicode,
+ help='Branch to operate on, instead of working directory.')
+_global_option('file', type=unicode, short_name='F')
+_global_registry_option('log-format', "Use specified log format.",
+ lazy_registry=('bzrlib.log', 'log_formatter_registry'),
+ value_switches=True, title='Log format',
+ short_value_switches={'short': 'S'})
+_global_registry_option('merge-type', 'Select a particular merge algorithm.',
+ lazy_registry=('bzrlib.merge', 'merge_type_registry'),
+ value_switches=True, title='Merge algorithm')
+_global_option('message', type=unicode,
+ short_name='m',
+ help='Message string.')
+_global_option('null', short_name='0',
+ help='Use an ASCII NUL (\\0) separator rather than '
+ 'a newline.')
+_global_option('overwrite', help='Ignore differences between branches and '
+ 'overwrite unconditionally.')
+_global_option('remember', help='Remember the specified location as a'
+ ' default.')
+_global_option('reprocess', help='Reprocess to reduce spurious conflicts.')
+_global_option('revision',
+ type=_parse_revision_str,
+ short_name='r',
+ help='See "help revisionspec" for details.')
+_global_option('show-ids',
+ help='Show internal object ids.')
+_global_option('timezone',
+ type=str,
+ help='Display timezone as local, original, or utc.')
+
+diff_writer_registry = _mod_registry.Registry()
+diff_writer_registry.register('plain', lambda x: x, 'Plaintext diff output.')
+diff_writer_registry.default_key = 'plain'
diff --git a/bzrlib/osutils.py b/bzrlib/osutils.py
new file mode 100644
index 0000000..7288115
--- /dev/null
+++ b/bzrlib/osutils.py
@@ -0,0 +1,2587 @@
+# Copyright (C) 2005-2011 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+from __future__ import absolute_import
+
+import errno
+import os
+import re
+import stat
+import sys
+import time
+import codecs
+
+from bzrlib.lazy_import import lazy_import
+lazy_import(globals(), """
+from datetime import datetime
+import getpass
+import locale
+import ntpath
+import posixpath
+import select
+# We need to import both shutil and rmtree as we export the later on posix
+# and need the former on windows
+import shutil
+from shutil import rmtree
+import socket
+import subprocess
+# We need to import both tempfile and mkdtemp as we export the later on posix
+# and need the former on windows
+import tempfile
+from tempfile import mkdtemp
+import unicodedata
+
+from bzrlib import (
+ cache_utf8,
+ config,
+ errors,
+ trace,
+ win32utils,
+ )
+from bzrlib.i18n import gettext
+""")
+
+from bzrlib.symbol_versioning import (
+ DEPRECATED_PARAMETER,
+ deprecated_function,
+ deprecated_in,
+ deprecated_passed,
+ warn as warn_deprecated,
+ )
+
+from hashlib import (
+ md5,
+ sha1 as sha,
+ )
+
+
+import bzrlib
+from bzrlib import symbol_versioning, _fs_enc
+
+
+# Cross platform wall-clock time functionality with decent resolution.
+# On Linux ``time.clock`` returns only CPU time. On Windows, ``time.time()``
+# only has a resolution of ~15ms. Note that ``time.clock()`` is not
+# synchronized with ``time.time()``, this is only meant to be used to find
+# delta times by subtracting from another call to this function.
+timer_func = time.time
+if sys.platform == 'win32':
+ timer_func = time.clock
+
+# On win32, O_BINARY is used to indicate the file should
+# be opened in binary mode, rather than text mode.
+# On other platforms, O_BINARY doesn't exist, because
+# they always open in binary mode, so it is okay to
+# OR with 0 on those platforms.
+# O_NOINHERIT and O_TEXT exists only on win32 too.
+O_BINARY = getattr(os, 'O_BINARY', 0)
+O_TEXT = getattr(os, 'O_TEXT', 0)
+O_NOINHERIT = getattr(os, 'O_NOINHERIT', 0)
+
+
+def get_unicode_argv():
+ try:
+ user_encoding = get_user_encoding()
+ return [a.decode(user_encoding) for a in sys.argv[1:]]
+ except UnicodeDecodeError:
+ raise errors.BzrError(gettext("Parameter {0!r} encoding is unsupported by {1} "
+ "application locale.").format(a, user_encoding))
+
+
+def make_readonly(filename):
+ """Make a filename read-only."""
+ mod = os.lstat(filename).st_mode
+ if not stat.S_ISLNK(mod):
+ mod = mod & 0777555
+ chmod_if_possible(filename, mod)
+
+
+def make_writable(filename):
+ mod = os.lstat(filename).st_mode
+ if not stat.S_ISLNK(mod):
+ mod = mod | 0200
+ chmod_if_possible(filename, mod)
+
+
+def chmod_if_possible(filename, mode):
+ # Set file mode if that can be safely done.
+ # Sometimes even on unix the filesystem won't allow it - see
+ # https://bugs.launchpad.net/bzr/+bug/606537
+ try:
+ # It is probably faster to just do the chmod, rather than
+ # doing a stat, and then trying to compare
+ os.chmod(filename, mode)
+ except (IOError, OSError),e:
+ # Permission/access denied seems to commonly happen on smbfs; there's
+ # probably no point warning about it.
+ # <https://bugs.launchpad.net/bzr/+bug/606537>
+ if getattr(e, 'errno') in (errno.EPERM, errno.EACCES):
+ trace.mutter("ignore error on chmod of %r: %r" % (
+ filename, e))
+ return
+ raise
+
+
+def minimum_path_selection(paths):
+ """Return the smallset subset of paths which are outside paths.
+
+ :param paths: A container (and hence not None) of paths.
+ :return: A set of paths sufficient to include everything in paths via
+ is_inside, drawn from the paths parameter.
+ """
+ if len(paths) < 2:
+ return set(paths)
+
+ def sort_key(path):
+ return path.split('/')
+ sorted_paths = sorted(list(paths), key=sort_key)
+
+ search_paths = [sorted_paths[0]]
+ for path in sorted_paths[1:]:
+ if not is_inside(search_paths[-1], path):
+ # This path is unique, add it
+ search_paths.append(path)
+
+ return set(search_paths)
+
+
+_QUOTE_RE = None
+
+
+def quotefn(f):
+ """Return a quoted filename filename
+
+ This previously used backslash quoting, but that works poorly on
+ Windows."""
+ # TODO: I'm not really sure this is the best format either.x
+ global _QUOTE_RE
+ if _QUOTE_RE is None:
+ _QUOTE_RE = re.compile(r'([^a-zA-Z0-9.,:/\\_~-])')
+
+ if _QUOTE_RE.search(f):
+ return '"' + f + '"'
+ else:
+ return f
+
+
+_directory_kind = 'directory'
+
+def get_umask():
+ """Return the current umask"""
+ # Assume that people aren't messing with the umask while running
+ # XXX: This is not thread safe, but there is no way to get the
+ # umask without setting it
+ umask = os.umask(0)
+ os.umask(umask)
+ return umask
+
+
+_kind_marker_map = {
+ "file": "",
+ _directory_kind: "/",
+ "symlink": "@",
+ 'tree-reference': '+',
+}
+
+
+def kind_marker(kind):
+ try:
+ return _kind_marker_map[kind]
+ except KeyError:
+ # Slightly faster than using .get(, '') when the common case is that
+ # kind will be found
+ return ''
+
+
+lexists = getattr(os.path, 'lexists', None)
+if lexists is None:
+ def lexists(f):
+ try:
+ stat = getattr(os, 'lstat', os.stat)
+ stat(f)
+ return True
+ except OSError, e:
+ if e.errno == errno.ENOENT:
+ return False;
+ else:
+ raise errors.BzrError(gettext("lstat/stat of ({0!r}): {1!r}").format(f, e))
+
+
+def fancy_rename(old, new, rename_func, unlink_func):
+ """A fancy rename, when you don't have atomic rename.
+
+ :param old: The old path, to rename from
+ :param new: The new path, to rename to
+ :param rename_func: The potentially non-atomic rename function
+ :param unlink_func: A way to delete the target file if the full rename
+ succeeds
+ """
+ # sftp rename doesn't allow overwriting, so play tricks:
+ base = os.path.basename(new)
+ dirname = os.path.dirname(new)
+ # callers use different encodings for the paths so the following MUST
+ # respect that. We rely on python upcasting to unicode if new is unicode
+ # and keeping a str if not.
+ tmp_name = 'tmp.%s.%.9f.%d.%s' % (base, time.time(),
+ os.getpid(), rand_chars(10))
+ tmp_name = pathjoin(dirname, tmp_name)
+
+ # Rename the file out of the way, but keep track if it didn't exist
+ # We don't want to grab just any exception
+ # something like EACCES should prevent us from continuing
+ # The downside is that the rename_func has to throw an exception
+ # with an errno = ENOENT, or NoSuchFile
+ file_existed = False
+ try:
+ rename_func(new, tmp_name)
+ except (errors.NoSuchFile,), e:
+ pass
+ except IOError, e:
+ # RBC 20060103 abstraction leakage: the paramiko SFTP clients rename
+ # function raises an IOError with errno is None when a rename fails.
+ # This then gets caught here.
+ if e.errno not in (None, errno.ENOENT, errno.ENOTDIR):
+ raise
+ except Exception, e:
+ if (getattr(e, 'errno', None) is None
+ or e.errno not in (errno.ENOENT, errno.ENOTDIR)):
+ raise
+ else:
+ file_existed = True
+
+ failure_exc = None
+ success = False
+ try:
+ try:
+ # This may throw an exception, in which case success will
+ # not be set.
+ rename_func(old, new)
+ success = True
+ except (IOError, OSError), e:
+ # source and target may be aliases of each other (e.g. on a
+ # case-insensitive filesystem), so we may have accidentally renamed
+ # source by when we tried to rename target
+ failure_exc = sys.exc_info()
+ if (file_existed and e.errno in (None, errno.ENOENT)
+ and old.lower() == new.lower()):
+ # source and target are the same file on a case-insensitive
+ # filesystem, so we don't generate an exception
+ failure_exc = None
+ finally:
+ if file_existed:
+ # If the file used to exist, rename it back into place
+ # otherwise just delete it from the tmp location
+ if success:
+ unlink_func(tmp_name)
+ else:
+ rename_func(tmp_name, new)
+ if failure_exc is not None:
+ try:
+ raise failure_exc[0], failure_exc[1], failure_exc[2]
+ finally:
+ del failure_exc
+
+
+# In Python 2.4.2 and older, os.path.abspath and os.path.realpath
+# choke on a Unicode string containing a relative path if
+# os.getcwd() returns a non-sys.getdefaultencoding()-encoded
+# string.
+def _posix_abspath(path):
+ # jam 20060426 rather than encoding to fsencoding
+ # copy posixpath.abspath, but use os.getcwdu instead
+ if not posixpath.isabs(path):
+ path = posixpath.join(getcwd(), path)
+ return _posix_normpath(path)
+
+
+def _posix_realpath(path):
+ return posixpath.realpath(path.encode(_fs_enc)).decode(_fs_enc)
+
+
+def _posix_normpath(path):
+ path = posixpath.normpath(path)
+ # Bug 861008: posixpath.normpath() returns a path normalized according to
+ # the POSIX standard, which stipulates (for compatibility reasons) that two
+ # leading slashes must not be simplified to one, and only if there are 3 or
+ # more should they be simplified as one. So we treat the leading 2 slashes
+ # as a special case here by simply removing the first slash, as we consider
+ # that breaking POSIX compatibility for this obscure feature is acceptable.
+ # This is not a paranoid precaution, as we notably get paths like this when
+ # the repo is hosted at the root of the filesystem, i.e. in "/".
+ if path.startswith('//'):
+ path = path[1:]
+ return path
+
+
+def _posix_path_from_environ(key):
+ """Get unicode path from `key` in environment or None if not present
+
+ Note that posix systems use arbitrary byte strings for filesystem objects,
+ so a path that raises BadFilenameEncoding here may still be accessible.
+ """
+ val = os.environ.get(key, None)
+ if val is None:
+ return val
+ try:
+ return val.decode(_fs_enc)
+ except UnicodeDecodeError:
+ # GZ 2011-12-12:Ideally want to include `key` in the exception message
+ raise errors.BadFilenameEncoding(val, _fs_enc)
+
+
+def _posix_get_home_dir():
+ """Get the home directory of the current user as a unicode path"""
+ path = posixpath.expanduser("~")
+ try:
+ return path.decode(_fs_enc)
+ except UnicodeDecodeError:
+ raise errors.BadFilenameEncoding(path, _fs_enc)
+
+
+def _posix_getuser_unicode():
+ """Get username from environment or password database as unicode"""
+ name = getpass.getuser()
+ user_encoding = get_user_encoding()
+ try:
+ return name.decode(user_encoding)
+ except UnicodeDecodeError:
+ raise errors.BzrError("Encoding of username %r is unsupported by %s "
+ "application locale." % (name, user_encoding))
+
+
+def _win32_fixdrive(path):
+ """Force drive letters to be consistent.
+
+ win32 is inconsistent whether it returns lower or upper case
+ and even if it was consistent the user might type the other
+ so we force it to uppercase
+ running python.exe under cmd.exe return capital C:\\
+ running win32 python inside a cygwin shell returns lowercase c:\\
+ """
+ drive, path = ntpath.splitdrive(path)
+ return drive.upper() + path
+
+
+def _win32_abspath(path):
+ # Real ntpath.abspath doesn't have a problem with a unicode cwd
+ return _win32_fixdrive(ntpath.abspath(unicode(path)).replace('\\', '/'))
+
+
+def _win98_abspath(path):
+ """Return the absolute version of a path.
+ Windows 98 safe implementation (python reimplementation
+ of Win32 API function GetFullPathNameW)
+ """
+ # Corner cases:
+ # C:\path => C:/path
+ # C:/path => C:/path
+ # \\HOST\path => //HOST/path
+ # //HOST/path => //HOST/path
+ # path => C:/cwd/path
+ # /path => C:/path
+ path = unicode(path)
+ # check for absolute path
+ drive = ntpath.splitdrive(path)[0]
+ if drive == '' and path[:2] not in('//','\\\\'):
+ cwd = os.getcwdu()
+ # we cannot simply os.path.join cwd and path
+ # because os.path.join('C:','/path') produce '/path'
+ # and this is incorrect
+ if path[:1] in ('/','\\'):
+ cwd = ntpath.splitdrive(cwd)[0]
+ path = path[1:]
+ path = cwd + '\\' + path
+ return _win32_fixdrive(ntpath.normpath(path).replace('\\', '/'))
+
+
+def _win32_realpath(path):
+ # Real ntpath.realpath doesn't have a problem with a unicode cwd
+ return _win32_fixdrive(ntpath.realpath(unicode(path)).replace('\\', '/'))
+
+
+def _win32_pathjoin(*args):
+ return ntpath.join(*args).replace('\\', '/')
+
+
+def _win32_normpath(path):
+ return _win32_fixdrive(ntpath.normpath(unicode(path)).replace('\\', '/'))
+
+
+def _win32_getcwd():
+ return _win32_fixdrive(os.getcwdu().replace('\\', '/'))
+
+
+def _win32_mkdtemp(*args, **kwargs):
+ return _win32_fixdrive(tempfile.mkdtemp(*args, **kwargs).replace('\\', '/'))
+
+
+def _win32_rename(old, new):
+ """We expect to be able to atomically replace 'new' with old.
+
+ On win32, if new exists, it must be moved out of the way first,
+ and then deleted.
+ """
+ try:
+ fancy_rename(old, new, rename_func=os.rename, unlink_func=os.unlink)
+ except OSError, e:
+ if e.errno in (errno.EPERM, errno.EACCES, errno.EBUSY, errno.EINVAL):
+ # If we try to rename a non-existant file onto cwd, we get
+ # EPERM or EACCES instead of ENOENT, this will raise ENOENT
+ # if the old path doesn't exist, sometimes we get EACCES
+ # On Linux, we seem to get EBUSY, on Mac we get EINVAL
+ os.lstat(old)
+ raise
+
+
+def _mac_getcwd():
+ return unicodedata.normalize('NFC', os.getcwdu())
+
+
+def _rename_wrap_exception(rename_func):
+ """Adds extra information to any exceptions that come from rename().
+
+ The exception has an updated message and 'old_filename' and 'new_filename'
+ attributes.
+ """
+
+ def _rename_wrapper(old, new):
+ try:
+ rename_func(old, new)
+ except OSError, e:
+ detailed_error = OSError(e.errno, e.strerror +
+ " [occurred when renaming '%s' to '%s']" %
+ (old, new))
+ detailed_error.old_filename = old
+ detailed_error.new_filename = new
+ raise detailed_error
+
+ return _rename_wrapper
+
+# Default rename wraps os.rename()
+rename = _rename_wrap_exception(os.rename)
+
+# Default is to just use the python builtins, but these can be rebound on
+# particular platforms.
+abspath = _posix_abspath
+realpath = _posix_realpath
+pathjoin = os.path.join
+normpath = _posix_normpath
+path_from_environ = _posix_path_from_environ
+_get_home_dir = _posix_get_home_dir
+getuser_unicode = _posix_getuser_unicode
+getcwd = os.getcwdu
+dirname = os.path.dirname
+basename = os.path.basename
+split = os.path.split
+splitext = os.path.splitext
+# These were already lazily imported into local scope
+# mkdtemp = tempfile.mkdtemp
+# rmtree = shutil.rmtree
+lstat = os.lstat
+fstat = os.fstat
+
+def wrap_stat(st):
+ return st
+
+
+MIN_ABS_PATHLENGTH = 1
+
+
+if sys.platform == 'win32':
+ if win32utils.winver == 'Windows 98':
+ abspath = _win98_abspath
+ else:
+ abspath = _win32_abspath
+ realpath = _win32_realpath
+ pathjoin = _win32_pathjoin
+ normpath = _win32_normpath
+ getcwd = _win32_getcwd
+ mkdtemp = _win32_mkdtemp
+ rename = _rename_wrap_exception(_win32_rename)
+ try:
+ from bzrlib import _walkdirs_win32
+ except ImportError:
+ pass
+ else:
+ lstat = _walkdirs_win32.lstat
+ fstat = _walkdirs_win32.fstat
+ wrap_stat = _walkdirs_win32.wrap_stat
+
+ MIN_ABS_PATHLENGTH = 3
+
+ def _win32_delete_readonly(function, path, excinfo):
+ """Error handler for shutil.rmtree function [for win32]
+ Helps to remove files and dirs marked as read-only.
+ """
+ exception = excinfo[1]
+ if function in (os.remove, os.rmdir) \
+ and isinstance(exception, OSError) \
+ and exception.errno == errno.EACCES:
+ make_writable(path)
+ function(path)
+ else:
+ raise
+
+ def rmtree(path, ignore_errors=False, onerror=_win32_delete_readonly):
+ """Replacer for shutil.rmtree: could remove readonly dirs/files"""
+ return shutil.rmtree(path, ignore_errors, onerror)
+
+ f = win32utils.get_unicode_argv # special function or None
+ if f is not None:
+ get_unicode_argv = f
+ path_from_environ = win32utils.get_environ_unicode
+ _get_home_dir = win32utils.get_home_location
+ getuser_unicode = win32utils.get_user_name
+
+elif sys.platform == 'darwin':
+ getcwd = _mac_getcwd
+
+
+def get_terminal_encoding(trace=False):
+ """Find the best encoding for printing to the screen.
+
+ This attempts to check both sys.stdout and sys.stdin to see
+ what encoding they are in, and if that fails it falls back to
+ osutils.get_user_encoding().
+ The problem is that on Windows, locale.getpreferredencoding()
+ is not the same encoding as that used by the console:
+ http://mail.python.org/pipermail/python-list/2003-May/162357.html
+
+ On my standard US Windows XP, the preferred encoding is
+ cp1252, but the console is cp437
+
+ :param trace: If True trace the selected encoding via mutter().
+ """
+ from bzrlib.trace import mutter
+ output_encoding = getattr(sys.stdout, 'encoding', None)
+ if not output_encoding:
+ input_encoding = getattr(sys.stdin, 'encoding', None)
+ if not input_encoding:
+ output_encoding = get_user_encoding()
+ if trace:
+ mutter('encoding stdout as osutils.get_user_encoding() %r',
+ output_encoding)
+ else:
+ output_encoding = input_encoding
+ if trace:
+ mutter('encoding stdout as sys.stdin encoding %r',
+ output_encoding)
+ else:
+ if trace:
+ mutter('encoding stdout as sys.stdout encoding %r', output_encoding)
+ if output_encoding == 'cp0':
+ # invalid encoding (cp0 means 'no codepage' on Windows)
+ output_encoding = get_user_encoding()
+ if trace:
+ mutter('cp0 is invalid encoding.'
+ ' encoding stdout as osutils.get_user_encoding() %r',
+ output_encoding)
+ # check encoding
+ try:
+ codecs.lookup(output_encoding)
+ except LookupError:
+ sys.stderr.write('bzr: warning:'
+ ' unknown terminal encoding %s.\n'
+ ' Using encoding %s instead.\n'
+ % (output_encoding, get_user_encoding())
+ )
+ output_encoding = get_user_encoding()
+
+ return output_encoding
+
+
+def normalizepath(f):
+ if getattr(os.path, 'realpath', None) is not None:
+ F = realpath
+ else:
+ F = abspath
+ [p,e] = os.path.split(f)
+ if e == "" or e == "." or e == "..":
+ return F(f)
+ else:
+ return pathjoin(F(p), e)
+
+
+def isdir(f):
+ """True if f is an accessible directory."""
+ try:
+ return stat.S_ISDIR(os.lstat(f)[stat.ST_MODE])
+ except OSError:
+ return False
+
+
+def isfile(f):
+ """True if f is a regular file."""
+ try:
+ return stat.S_ISREG(os.lstat(f)[stat.ST_MODE])
+ except OSError:
+ return False
+
+def islink(f):
+ """True if f is a symlink."""
+ try:
+ return stat.S_ISLNK(os.lstat(f)[stat.ST_MODE])
+ except OSError:
+ return False
+
+def is_inside(dir, fname):
+ """True if fname is inside dir.
+
+ The parameters should typically be passed to osutils.normpath first, so
+ that . and .. and repeated slashes are eliminated, and the separators
+ are canonical for the platform.
+
+ The empty string as a dir name is taken as top-of-tree and matches
+ everything.
+ """
+ # XXX: Most callers of this can actually do something smarter by
+ # looking at the inventory
+ if dir == fname:
+ return True
+
+ if dir == '':
+ return True
+
+ if dir[-1] != '/':
+ dir += '/'
+
+ return fname.startswith(dir)
+
+
+def is_inside_any(dir_list, fname):
+ """True if fname is inside any of given dirs."""
+ for dirname in dir_list:
+ if is_inside(dirname, fname):
+ return True
+ return False
+
+
+def is_inside_or_parent_of_any(dir_list, fname):
+ """True if fname is a child or a parent of any of the given files."""
+ for dirname in dir_list:
+ if is_inside(dirname, fname) or is_inside(fname, dirname):
+ return True
+ return False
+
+
+def pumpfile(from_file, to_file, read_length=-1, buff_size=32768,
+ report_activity=None, direction='read'):
+ """Copy contents of one file to another.
+
+ The read_length can either be -1 to read to end-of-file (EOF) or
+ it can specify the maximum number of bytes to read.
+
+ The buff_size represents the maximum size for each read operation
+ performed on from_file.
+
+ :param report_activity: Call this as bytes are read, see
+ Transport._report_activity
+ :param direction: Will be passed to report_activity
+
+ :return: The number of bytes copied.
+ """
+ length = 0
+ if read_length >= 0:
+ # read specified number of bytes
+
+ while read_length > 0:
+ num_bytes_to_read = min(read_length, buff_size)
+
+ block = from_file.read(num_bytes_to_read)
+ if not block:
+ # EOF reached
+ break
+ if report_activity is not None:
+ report_activity(len(block), direction)
+ to_file.write(block)
+
+ actual_bytes_read = len(block)
+ read_length -= actual_bytes_read
+ length += actual_bytes_read
+ else:
+ # read to EOF
+ while True:
+ block = from_file.read(buff_size)
+ if not block:
+ # EOF reached
+ break
+ if report_activity is not None:
+ report_activity(len(block), direction)
+ to_file.write(block)
+ length += len(block)
+ return length
+
+
+def pump_string_file(bytes, file_handle, segment_size=None):
+ """Write bytes to file_handle in many smaller writes.
+
+ :param bytes: The string to write.
+ :param file_handle: The file to write to.
+ """
+ # Write data in chunks rather than all at once, because very large
+ # writes fail on some platforms (e.g. Windows with SMB mounted
+ # drives).
+ if not segment_size:
+ segment_size = 5242880 # 5MB
+ segments = range(len(bytes) / segment_size + 1)
+ write = file_handle.write
+ for segment_index in segments:
+ segment = buffer(bytes, segment_index * segment_size, segment_size)
+ write(segment)
+
+
+def file_iterator(input_file, readsize=32768):
+ while True:
+ b = input_file.read(readsize)
+ if len(b) == 0:
+ break
+ yield b
+
+
+def sha_file(f):
+ """Calculate the hexdigest of an open file.
+
+ The file cursor should be already at the start.
+ """
+ s = sha()
+ BUFSIZE = 128<<10
+ while True:
+ b = f.read(BUFSIZE)
+ if not b:
+ break
+ s.update(b)
+ return s.hexdigest()
+
+
+def size_sha_file(f):
+ """Calculate the size and hexdigest of an open file.
+
+ The file cursor should be already at the start and
+ the caller is responsible for closing the file afterwards.
+ """
+ size = 0
+ s = sha()
+ BUFSIZE = 128<<10
+ while True:
+ b = f.read(BUFSIZE)
+ if not b:
+ break
+ size += len(b)
+ s.update(b)
+ return size, s.hexdigest()
+
+
+def sha_file_by_name(fname):
+ """Calculate the SHA1 of a file by reading the full text"""
+ s = sha()
+ f = os.open(fname, os.O_RDONLY | O_BINARY | O_NOINHERIT)
+ try:
+ while True:
+ b = os.read(f, 1<<16)
+ if not b:
+ return s.hexdigest()
+ s.update(b)
+ finally:
+ os.close(f)
+
+
+def sha_strings(strings, _factory=sha):
+ """Return the sha-1 of concatenation of strings"""
+ s = _factory()
+ map(s.update, strings)
+ return s.hexdigest()
+
+
+def sha_string(f, _factory=sha):
+ return _factory(f).hexdigest()
+
+
+def fingerprint_file(f):
+ b = f.read()
+ return {'size': len(b),
+ 'sha1': sha(b).hexdigest()}
+
+
+def compare_files(a, b):
+ """Returns true if equal in contents"""
+ BUFSIZE = 4096
+ while True:
+ ai = a.read(BUFSIZE)
+ bi = b.read(BUFSIZE)
+ if ai != bi:
+ return False
+ if ai == '':
+ return True
+
+
+def local_time_offset(t=None):
+ """Return offset of local zone from GMT, either at present or at time t."""
+ if t is None:
+ t = time.time()
+ offset = datetime.fromtimestamp(t) - datetime.utcfromtimestamp(t)
+ return offset.days * 86400 + offset.seconds
+
+weekdays = ['Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun']
+_default_format_by_weekday_num = [wd + " %Y-%m-%d %H:%M:%S" for wd in weekdays]
+
+
+def format_date(t, offset=0, timezone='original', date_fmt=None,
+ show_offset=True):
+ """Return a formatted date string.
+
+ :param t: Seconds since the epoch.
+ :param offset: Timezone offset in seconds east of utc.
+ :param timezone: How to display the time: 'utc', 'original' for the
+ timezone specified by offset, or 'local' for the process's current
+ timezone.
+ :param date_fmt: strftime format.
+ :param show_offset: Whether to append the timezone.
+ """
+ (date_fmt, tt, offset_str) = \
+ _format_date(t, offset, timezone, date_fmt, show_offset)
+ date_fmt = date_fmt.replace('%a', weekdays[tt[6]])
+ date_str = time.strftime(date_fmt, tt)
+ return date_str + offset_str
+
+
+# Cache of formatted offset strings
+_offset_cache = {}
+
+
+def format_date_with_offset_in_original_timezone(t, offset=0,
+ _cache=_offset_cache):
+ """Return a formatted date string in the original timezone.
+
+ This routine may be faster then format_date.
+
+ :param t: Seconds since the epoch.
+ :param offset: Timezone offset in seconds east of utc.
+ """
+ if offset is None:
+ offset = 0
+ tt = time.gmtime(t + offset)
+ date_fmt = _default_format_by_weekday_num[tt[6]]
+ date_str = time.strftime(date_fmt, tt)
+ offset_str = _cache.get(offset, None)
+ if offset_str is None:
+ offset_str = ' %+03d%02d' % (offset / 3600, (offset / 60) % 60)
+ _cache[offset] = offset_str
+ return date_str + offset_str
+
+
+def format_local_date(t, offset=0, timezone='original', date_fmt=None,
+ show_offset=True):
+ """Return an unicode date string formatted according to the current locale.
+
+ :param t: Seconds since the epoch.
+ :param offset: Timezone offset in seconds east of utc.
+ :param timezone: How to display the time: 'utc', 'original' for the
+ timezone specified by offset, or 'local' for the process's current
+ timezone.
+ :param date_fmt: strftime format.
+ :param show_offset: Whether to append the timezone.
+ """
+ (date_fmt, tt, offset_str) = \
+ _format_date(t, offset, timezone, date_fmt, show_offset)
+ date_str = time.strftime(date_fmt, tt)
+ if not isinstance(date_str, unicode):
+ date_str = date_str.decode(get_user_encoding(), 'replace')
+ return date_str + offset_str
+
+
+def _format_date(t, offset, timezone, date_fmt, show_offset):
+ if timezone == 'utc':
+ tt = time.gmtime(t)
+ offset = 0
+ elif timezone == 'original':
+ if offset is None:
+ offset = 0
+ tt = time.gmtime(t + offset)
+ elif timezone == 'local':
+ tt = time.localtime(t)
+ offset = local_time_offset(t)
+ else:
+ raise errors.UnsupportedTimezoneFormat(timezone)
+ if date_fmt is None:
+ date_fmt = "%a %Y-%m-%d %H:%M:%S"
+ if show_offset:
+ offset_str = ' %+03d%02d' % (offset / 3600, (offset / 60) % 60)
+ else:
+ offset_str = ''
+ return (date_fmt, tt, offset_str)
+
+
+def compact_date(when):
+ return time.strftime('%Y%m%d%H%M%S', time.gmtime(when))
+
+
+def format_delta(delta):
+ """Get a nice looking string for a time delta.
+
+ :param delta: The time difference in seconds, can be positive or negative.
+ positive indicates time in the past, negative indicates time in the
+ future. (usually time.time() - stored_time)
+ :return: String formatted to show approximate resolution
+ """
+ delta = int(delta)
+ if delta >= 0:
+ direction = 'ago'
+ else:
+ direction = 'in the future'
+ delta = -delta
+
+ seconds = delta
+ if seconds < 90: # print seconds up to 90 seconds
+ if seconds == 1:
+ return '%d second %s' % (seconds, direction,)
+ else:
+ return '%d seconds %s' % (seconds, direction)
+
+ minutes = int(seconds / 60)
+ seconds -= 60 * minutes
+ if seconds == 1:
+ plural_seconds = ''
+ else:
+ plural_seconds = 's'
+ if minutes < 90: # print minutes, seconds up to 90 minutes
+ if minutes == 1:
+ return '%d minute, %d second%s %s' % (
+ minutes, seconds, plural_seconds, direction)
+ else:
+ return '%d minutes, %d second%s %s' % (
+ minutes, seconds, plural_seconds, direction)
+
+ hours = int(minutes / 60)
+ minutes -= 60 * hours
+ if minutes == 1:
+ plural_minutes = ''
+ else:
+ plural_minutes = 's'
+
+ if hours == 1:
+ return '%d hour, %d minute%s %s' % (hours, minutes,
+ plural_minutes, direction)
+ return '%d hours, %d minute%s %s' % (hours, minutes,
+ plural_minutes, direction)
+
+def filesize(f):
+ """Return size of given open file."""
+ return os.fstat(f.fileno())[stat.ST_SIZE]
+
+
+# Alias os.urandom to support platforms (which?) without /dev/urandom and
+# override if it doesn't work. Avoid checking on windows where there is
+# significant initialisation cost that can be avoided for some bzr calls.
+
+rand_bytes = os.urandom
+
+if rand_bytes.__module__ != "nt":
+ try:
+ rand_bytes(1)
+ except NotImplementedError:
+ # not well seeded, but better than nothing
+ def rand_bytes(n):
+ import random
+ s = ''
+ while n:
+ s += chr(random.randint(0, 255))
+ n -= 1
+ return s
+
+
+ALNUM = '0123456789abcdefghijklmnopqrstuvwxyz'
+def rand_chars(num):
+ """Return a random string of num alphanumeric characters
+
+ The result only contains lowercase chars because it may be used on
+ case-insensitive filesystems.
+ """
+ s = ''
+ for raw_byte in rand_bytes(num):
+ s += ALNUM[ord(raw_byte) % 36]
+ return s
+
+
+## TODO: We could later have path objects that remember their list
+## decomposition (might be too tricksy though.)
+
+def splitpath(p):
+ """Turn string into list of parts."""
+ # split on either delimiter because people might use either on
+ # Windows
+ ps = re.split(r'[\\/]', p)
+
+ rps = []
+ for f in ps:
+ if f == '..':
+ raise errors.BzrError(gettext("sorry, %r not allowed in path") % f)
+ elif (f == '.') or (f == ''):
+ pass
+ else:
+ rps.append(f)
+ return rps
+
+
+def joinpath(p):
+ for f in p:
+ if (f == '..') or (f is None) or (f == ''):
+ raise errors.BzrError(gettext("sorry, %r not allowed in path") % f)
+ return pathjoin(*p)
+
+
+def parent_directories(filename):
+ """Return the list of parent directories, deepest first.
+
+ For example, parent_directories("a/b/c") -> ["a/b", "a"].
+ """
+ parents = []
+ parts = splitpath(dirname(filename))
+ while parts:
+ parents.append(joinpath(parts))
+ parts.pop()
+ return parents
+
+
+_extension_load_failures = []
+
+
+def failed_to_load_extension(exception):
+ """Handle failing to load a binary extension.
+
+ This should be called from the ImportError block guarding the attempt to
+ import the native extension. If this function returns, the pure-Python
+ implementation should be loaded instead::
+
+ >>> try:
+ >>> import bzrlib._fictional_extension_pyx
+ >>> except ImportError, e:
+ >>> bzrlib.osutils.failed_to_load_extension(e)
+ >>> import bzrlib._fictional_extension_py
+ """
+ # NB: This docstring is just an example, not a doctest, because doctest
+ # currently can't cope with the use of lazy imports in this namespace --
+ # mbp 20090729
+
+ # This currently doesn't report the failure at the time it occurs, because
+ # they tend to happen very early in startup when we can't check config
+ # files etc, and also we want to report all failures but not spam the user
+ # with 10 warnings.
+ exception_str = str(exception)
+ if exception_str not in _extension_load_failures:
+ trace.mutter("failed to load compiled extension: %s" % exception_str)
+ _extension_load_failures.append(exception_str)
+
+
+def report_extension_load_failures():
+ if not _extension_load_failures:
+ return
+ if config.GlobalStack().get('ignore_missing_extensions'):
+ return
+ # the warnings framework should by default show this only once
+ from bzrlib.trace import warning
+ warning(
+ "bzr: warning: some compiled extensions could not be loaded; "
+ "see <https://answers.launchpad.net/bzr/+faq/703>")
+ # we no longer show the specific missing extensions here, because it makes
+ # the message too long and scary - see
+ # https://bugs.launchpad.net/bzr/+bug/430529
+
+
+try:
+ from bzrlib._chunks_to_lines_pyx import chunks_to_lines
+except ImportError, e:
+ failed_to_load_extension(e)
+ from bzrlib._chunks_to_lines_py import chunks_to_lines
+
+
+def split_lines(s):
+ """Split s into lines, but without removing the newline characters."""
+ # Trivially convert a fulltext into a 'chunked' representation, and let
+ # chunks_to_lines do the heavy lifting.
+ if isinstance(s, str):
+ # chunks_to_lines only supports 8-bit strings
+ return chunks_to_lines([s])
+ else:
+ return _split_lines(s)
+
+
+def _split_lines(s):
+ """Split s into lines, but without removing the newline characters.
+
+ This supports Unicode or plain string objects.
+ """
+ lines = s.split('\n')
+ result = [line + '\n' for line in lines[:-1]]
+ if lines[-1]:
+ result.append(lines[-1])
+ return result
+
+
+def hardlinks_good():
+ return sys.platform not in ('win32', 'cygwin', 'darwin')
+
+
+def link_or_copy(src, dest):
+ """Hardlink a file, or copy it if it can't be hardlinked."""
+ if not hardlinks_good():
+ shutil.copyfile(src, dest)
+ return
+ try:
+ os.link(src, dest)
+ except (OSError, IOError), e:
+ if e.errno != errno.EXDEV:
+ raise
+ shutil.copyfile(src, dest)
+
+
+def delete_any(path):
+ """Delete a file, symlink or directory.
+
+ Will delete even if readonly.
+ """
+ try:
+ _delete_file_or_dir(path)
+ except (OSError, IOError), e:
+ if e.errno in (errno.EPERM, errno.EACCES):
+ # make writable and try again
+ try:
+ make_writable(path)
+ except (OSError, IOError):
+ pass
+ _delete_file_or_dir(path)
+ else:
+ raise
+
+
+def _delete_file_or_dir(path):
+ # Look Before You Leap (LBYL) is appropriate here instead of Easier to Ask for
+ # Forgiveness than Permission (EAFP) because:
+ # - root can damage a solaris file system by using unlink,
+ # - unlink raises different exceptions on different OSes (linux: EISDIR, win32:
+ # EACCES, OSX: EPERM) when invoked on a directory.
+ if isdir(path): # Takes care of symlinks
+ os.rmdir(path)
+ else:
+ os.unlink(path)
+
+
+def has_symlinks():
+ if getattr(os, 'symlink', None) is not None:
+ return True
+ else:
+ return False
+
+
+def has_hardlinks():
+ if getattr(os, 'link', None) is not None:
+ return True
+ else:
+ return False
+
+
+def host_os_dereferences_symlinks():
+ return (has_symlinks()
+ and sys.platform not in ('cygwin', 'win32'))
+
+
+def readlink(abspath):
+ """Return a string representing the path to which the symbolic link points.
+
+ :param abspath: The link absolute unicode path.
+
+ This his guaranteed to return the symbolic link in unicode in all python
+ versions.
+ """
+ link = abspath.encode(_fs_enc)
+ target = os.readlink(link)
+ target = target.decode(_fs_enc)
+ return target
+
+
+def contains_whitespace(s):
+ """True if there are any whitespace characters in s."""
+ # string.whitespace can include '\xa0' in certain locales, because it is
+ # considered "non-breaking-space" as part of ISO-8859-1. But it
+ # 1) Isn't a breaking whitespace
+ # 2) Isn't one of ' \t\r\n' which are characters we sometimes use as
+ # separators
+ # 3) '\xa0' isn't unicode safe since it is >128.
+
+ # This should *not* be a unicode set of characters in case the source
+ # string is not a Unicode string. We can auto-up-cast the characters since
+ # they are ascii, but we don't want to auto-up-cast the string in case it
+ # is utf-8
+ for ch in ' \t\n\r\v\f':
+ if ch in s:
+ return True
+ else:
+ return False
+
+
+def contains_linebreaks(s):
+ """True if there is any vertical whitespace in s."""
+ for ch in '\f\n\r':
+ if ch in s:
+ return True
+ else:
+ return False
+
+
+def relpath(base, path):
+ """Return path relative to base, or raise PathNotChild exception.
+
+ The path may be either an absolute path or a path relative to the
+ current working directory.
+
+ os.path.commonprefix (python2.4) has a bad bug that it works just
+ on string prefixes, assuming that '/u' is a prefix of '/u2'. This
+ avoids that problem.
+
+ NOTE: `base` should not have a trailing slash otherwise you'll get
+ PathNotChild exceptions regardless of `path`.
+ """
+
+ if len(base) < MIN_ABS_PATHLENGTH:
+ # must have space for e.g. a drive letter
+ raise ValueError(gettext('%r is too short to calculate a relative path')
+ % (base,))
+
+ rp = abspath(path)
+
+ s = []
+ head = rp
+ while True:
+ if len(head) <= len(base) and head != base:
+ raise errors.PathNotChild(rp, base)
+ if head == base:
+ break
+ head, tail = split(head)
+ if tail:
+ s.append(tail)
+
+ if s:
+ return pathjoin(*reversed(s))
+ else:
+ return ''
+
+
+def _cicp_canonical_relpath(base, path):
+ """Return the canonical path relative to base.
+
+ Like relpath, but on case-insensitive-case-preserving file-systems, this
+ will return the relpath as stored on the file-system rather than in the
+ case specified in the input string, for all existing portions of the path.
+
+ This will cause O(N) behaviour if called for every path in a tree; if you
+ have a number of paths to convert, you should use canonical_relpaths().
+ """
+ # TODO: it should be possible to optimize this for Windows by using the
+ # win32 API FindFiles function to look for the specified name - but using
+ # os.listdir() still gives us the correct, platform agnostic semantics in
+ # the short term.
+
+ rel = relpath(base, path)
+ # '.' will have been turned into ''
+ if not rel:
+ return rel
+
+ abs_base = abspath(base)
+ current = abs_base
+ _listdir = os.listdir
+
+ # use an explicit iterator so we can easily consume the rest on early exit.
+ bit_iter = iter(rel.split('/'))
+ for bit in bit_iter:
+ lbit = bit.lower()
+ try:
+ next_entries = _listdir(current)
+ except OSError: # enoent, eperm, etc
+ # We can't find this in the filesystem, so just append the
+ # remaining bits.
+ current = pathjoin(current, bit, *list(bit_iter))
+ break
+ for look in next_entries:
+ if lbit == look.lower():
+ current = pathjoin(current, look)
+ break
+ else:
+ # got to the end, nothing matched, so we just return the
+ # non-existing bits as they were specified (the filename may be
+ # the target of a move, for example).
+ current = pathjoin(current, bit, *list(bit_iter))
+ break
+ return current[len(abs_base):].lstrip('/')
+
+# XXX - TODO - we need better detection/integration of case-insensitive
+# file-systems; Linux often sees FAT32 devices (or NFS-mounted OSX
+# filesystems), for example, so could probably benefit from the same basic
+# support there. For now though, only Windows and OSX get that support, and
+# they get it for *all* file-systems!
+if sys.platform in ('win32', 'darwin'):
+ canonical_relpath = _cicp_canonical_relpath
+else:
+ canonical_relpath = relpath
+
+def canonical_relpaths(base, paths):
+ """Create an iterable to canonicalize a sequence of relative paths.
+
+ The intent is for this implementation to use a cache, vastly speeding
+ up multiple transformations in the same directory.
+ """
+ # but for now, we haven't optimized...
+ return [canonical_relpath(base, p) for p in paths]
+
+
+def decode_filename(filename):
+ """Decode the filename using the filesystem encoding
+
+ If it is unicode, it is returned.
+ Otherwise it is decoded from the the filesystem's encoding. If decoding
+ fails, a errors.BadFilenameEncoding exception is raised.
+ """
+ if type(filename) is unicode:
+ return filename
+ try:
+ return filename.decode(_fs_enc)
+ except UnicodeDecodeError:
+ raise errors.BadFilenameEncoding(filename, _fs_enc)
+
+
+def safe_unicode(unicode_or_utf8_string):
+ """Coerce unicode_or_utf8_string into unicode.
+
+ If it is unicode, it is returned.
+ Otherwise it is decoded from utf-8. If decoding fails, the exception is
+ wrapped in a BzrBadParameterNotUnicode exception.
+ """
+ if isinstance(unicode_or_utf8_string, unicode):
+ return unicode_or_utf8_string
+ try:
+ return unicode_or_utf8_string.decode('utf8')
+ except UnicodeDecodeError:
+ raise errors.BzrBadParameterNotUnicode(unicode_or_utf8_string)
+
+
+def safe_utf8(unicode_or_utf8_string):
+ """Coerce unicode_or_utf8_string to a utf8 string.
+
+ If it is a str, it is returned.
+ If it is Unicode, it is encoded into a utf-8 string.
+ """
+ if isinstance(unicode_or_utf8_string, str):
+ # TODO: jam 20070209 This is overkill, and probably has an impact on
+ # performance if we are dealing with lots of apis that want a
+ # utf-8 revision id
+ try:
+ # Make sure it is a valid utf-8 string
+ unicode_or_utf8_string.decode('utf-8')
+ except UnicodeDecodeError:
+ raise errors.BzrBadParameterNotUnicode(unicode_or_utf8_string)
+ return unicode_or_utf8_string
+ return unicode_or_utf8_string.encode('utf-8')
+
+
+_revision_id_warning = ('Unicode revision ids were deprecated in bzr 0.15.'
+ ' Revision id generators should be creating utf8'
+ ' revision ids.')
+
+
+def safe_revision_id(unicode_or_utf8_string, warn=True):
+ """Revision ids should now be utf8, but at one point they were unicode.
+
+ :param unicode_or_utf8_string: A possibly Unicode revision_id. (can also be
+ utf8 or None).
+ :param warn: Functions that are sanitizing user data can set warn=False
+ :return: None or a utf8 revision id.
+ """
+ if (unicode_or_utf8_string is None
+ or unicode_or_utf8_string.__class__ == str):
+ return unicode_or_utf8_string
+ if warn:
+ symbol_versioning.warn(_revision_id_warning, DeprecationWarning,
+ stacklevel=2)
+ return cache_utf8.encode(unicode_or_utf8_string)
+
+
+_file_id_warning = ('Unicode file ids were deprecated in bzr 0.15. File id'
+ ' generators should be creating utf8 file ids.')
+
+
+def safe_file_id(unicode_or_utf8_string, warn=True):
+ """File ids should now be utf8, but at one point they were unicode.
+
+ This is the same as safe_utf8, except it uses the cached encode functions
+ to save a little bit of performance.
+
+ :param unicode_or_utf8_string: A possibly Unicode file_id. (can also be
+ utf8 or None).
+ :param warn: Functions that are sanitizing user data can set warn=False
+ :return: None or a utf8 file id.
+ """
+ if (unicode_or_utf8_string is None
+ or unicode_or_utf8_string.__class__ == str):
+ return unicode_or_utf8_string
+ if warn:
+ symbol_versioning.warn(_file_id_warning, DeprecationWarning,
+ stacklevel=2)
+ return cache_utf8.encode(unicode_or_utf8_string)
+
+
+_platform_normalizes_filenames = False
+if sys.platform == 'darwin':
+ _platform_normalizes_filenames = True
+
+
+def normalizes_filenames():
+ """Return True if this platform normalizes unicode filenames.
+
+ Only Mac OSX.
+ """
+ return _platform_normalizes_filenames
+
+
+def _accessible_normalized_filename(path):
+ """Get the unicode normalized path, and if you can access the file.
+
+ On platforms where the system normalizes filenames (Mac OSX),
+ you can access a file by any path which will normalize correctly.
+ On platforms where the system does not normalize filenames
+ (everything else), you have to access a file by its exact path.
+
+ Internally, bzr only supports NFC normalization, since that is
+ the standard for XML documents.
+
+ So return the normalized path, and a flag indicating if the file
+ can be accessed by that path.
+ """
+
+ return unicodedata.normalize('NFC', unicode(path)), True
+
+
+def _inaccessible_normalized_filename(path):
+ __doc__ = _accessible_normalized_filename.__doc__
+
+ normalized = unicodedata.normalize('NFC', unicode(path))
+ return normalized, normalized == path
+
+
+if _platform_normalizes_filenames:
+ normalized_filename = _accessible_normalized_filename
+else:
+ normalized_filename = _inaccessible_normalized_filename
+
+
+def set_signal_handler(signum, handler, restart_syscall=True):
+ """A wrapper for signal.signal that also calls siginterrupt(signum, False)
+ on platforms that support that.
+
+ :param restart_syscall: if set, allow syscalls interrupted by a signal to
+ automatically restart (by calling `signal.siginterrupt(signum,
+ False)`). May be ignored if the feature is not available on this
+ platform or Python version.
+ """
+ try:
+ import signal
+ siginterrupt = signal.siginterrupt
+ except ImportError:
+ # This python implementation doesn't provide signal support, hence no
+ # handler exists
+ return None
+ except AttributeError:
+ # siginterrupt doesn't exist on this platform, or for this version
+ # of Python.
+ siginterrupt = lambda signum, flag: None
+ if restart_syscall:
+ def sig_handler(*args):
+ # Python resets the siginterrupt flag when a signal is
+ # received. <http://bugs.python.org/issue8354>
+ # As a workaround for some cases, set it back the way we want it.
+ siginterrupt(signum, False)
+ # Now run the handler function passed to set_signal_handler.
+ handler(*args)
+ else:
+ sig_handler = handler
+ old_handler = signal.signal(signum, sig_handler)
+ if restart_syscall:
+ siginterrupt(signum, False)
+ return old_handler
+
+
+default_terminal_width = 80
+"""The default terminal width for ttys.
+
+This is defined so that higher levels can share a common fallback value when
+terminal_width() returns None.
+"""
+
+# Keep some state so that terminal_width can detect if _terminal_size has
+# returned a different size since the process started. See docstring and
+# comments of terminal_width for details.
+# _terminal_size_state has 3 possible values: no_data, unchanged, and changed.
+_terminal_size_state = 'no_data'
+_first_terminal_size = None
+
+def terminal_width():
+ """Return terminal width.
+
+ None is returned if the width can't established precisely.
+
+ The rules are:
+ - if BZR_COLUMNS is set, returns its value
+ - if there is no controlling terminal, returns None
+ - query the OS, if the queried size has changed since the last query,
+ return its value,
+ - if COLUMNS is set, returns its value,
+ - if the OS has a value (even though it's never changed), return its value.
+
+ From there, we need to query the OS to get the size of the controlling
+ terminal.
+
+ On Unices we query the OS by:
+ - get termios.TIOCGWINSZ
+ - if an error occurs or a negative value is obtained, returns None
+
+ On Windows we query the OS by:
+ - win32utils.get_console_size() decides,
+ - returns None on error (provided default value)
+ """
+ # Note to implementors: if changing the rules for determining the width,
+ # make sure you've considered the behaviour in these cases:
+ # - M-x shell in emacs, where $COLUMNS is set and TIOCGWINSZ returns 0,0.
+ # - bzr log | less, in bash, where $COLUMNS not set and TIOCGWINSZ returns
+ # 0,0.
+ # - (add more interesting cases here, if you find any)
+ # Some programs implement "Use $COLUMNS (if set) until SIGWINCH occurs",
+ # but we don't want to register a signal handler because it is impossible
+ # to do so without risking EINTR errors in Python <= 2.6.5 (see
+ # <http://bugs.python.org/issue8354>). Instead we check TIOCGWINSZ every
+ # time so we can notice if the reported size has changed, which should have
+ # a similar effect.
+
+ # If BZR_COLUMNS is set, take it, user is always right
+ # Except if they specified 0 in which case, impose no limit here
+ try:
+ width = int(os.environ['BZR_COLUMNS'])
+ except (KeyError, ValueError):
+ width = None
+ if width is not None:
+ if width > 0:
+ return width
+ else:
+ return None
+
+ isatty = getattr(sys.stdout, 'isatty', None)
+ if isatty is None or not isatty():
+ # Don't guess, setting BZR_COLUMNS is the recommended way to override.
+ return None
+
+ # Query the OS
+ width, height = os_size = _terminal_size(None, None)
+ global _first_terminal_size, _terminal_size_state
+ if _terminal_size_state == 'no_data':
+ _first_terminal_size = os_size
+ _terminal_size_state = 'unchanged'
+ elif (_terminal_size_state == 'unchanged' and
+ _first_terminal_size != os_size):
+ _terminal_size_state = 'changed'
+
+ # If the OS claims to know how wide the terminal is, and this value has
+ # ever changed, use that.
+ if _terminal_size_state == 'changed':
+ if width is not None and width > 0:
+ return width
+
+ # If COLUMNS is set, use it.
+ try:
+ return int(os.environ['COLUMNS'])
+ except (KeyError, ValueError):
+ pass
+
+ # Finally, use an unchanged size from the OS, if we have one.
+ if _terminal_size_state == 'unchanged':
+ if width is not None and width > 0:
+ return width
+
+ # The width could not be determined.
+ return None
+
+
+def _win32_terminal_size(width, height):
+ width, height = win32utils.get_console_size(defaultx=width, defaulty=height)
+ return width, height
+
+
+def _ioctl_terminal_size(width, height):
+ try:
+ import struct, fcntl, termios
+ s = struct.pack('HHHH', 0, 0, 0, 0)
+ x = fcntl.ioctl(1, termios.TIOCGWINSZ, s)
+ height, width = struct.unpack('HHHH', x)[0:2]
+ except (IOError, AttributeError):
+ pass
+ return width, height
+
+_terminal_size = None
+"""Returns the terminal size as (width, height).
+
+:param width: Default value for width.
+:param height: Default value for height.
+
+This is defined specifically for each OS and query the size of the controlling
+terminal. If any error occurs, the provided default values should be returned.
+"""
+if sys.platform == 'win32':
+ _terminal_size = _win32_terminal_size
+else:
+ _terminal_size = _ioctl_terminal_size
+
+
+def supports_executable():
+ return sys.platform != "win32"
+
+
+def supports_posix_readonly():
+ """Return True if 'readonly' has POSIX semantics, False otherwise.
+
+ Notably, a win32 readonly file cannot be deleted, unlike POSIX where the
+ directory controls creation/deletion, etc.
+
+ And under win32, readonly means that the directory itself cannot be
+ deleted. The contents of a readonly directory can be changed, unlike POSIX
+ where files in readonly directories cannot be added, deleted or renamed.
+ """
+ return sys.platform != "win32"
+
+
+def set_or_unset_env(env_variable, value):
+ """Modify the environment, setting or removing the env_variable.
+
+ :param env_variable: The environment variable in question
+ :param value: The value to set the environment to. If None, then
+ the variable will be removed.
+ :return: The original value of the environment variable.
+ """
+ orig_val = os.environ.get(env_variable)
+ if value is None:
+ if orig_val is not None:
+ del os.environ[env_variable]
+ else:
+ if isinstance(value, unicode):
+ value = value.encode(get_user_encoding())
+ os.environ[env_variable] = value
+ return orig_val
+
+
+_validWin32PathRE = re.compile(r'^([A-Za-z]:[/\\])?[^:<>*"?\|]*$')
+
+
+def check_legal_path(path):
+ """Check whether the supplied path is legal.
+ This is only required on Windows, so we don't test on other platforms
+ right now.
+ """
+ if sys.platform != "win32":
+ return
+ if _validWin32PathRE.match(path) is None:
+ raise errors.IllegalPath(path)
+
+
+_WIN32_ERROR_DIRECTORY = 267 # Similar to errno.ENOTDIR
+
+def _is_error_enotdir(e):
+ """Check if this exception represents ENOTDIR.
+
+ Unfortunately, python is very inconsistent about the exception
+ here. The cases are:
+ 1) Linux, Mac OSX all versions seem to set errno == ENOTDIR
+ 2) Windows, Python2.4, uses errno == ERROR_DIRECTORY (267)
+ which is the windows error code.
+ 3) Windows, Python2.5 uses errno == EINVAL and
+ winerror == ERROR_DIRECTORY
+
+ :param e: An Exception object (expected to be OSError with an errno
+ attribute, but we should be able to cope with anything)
+ :return: True if this represents an ENOTDIR error. False otherwise.
+ """
+ en = getattr(e, 'errno', None)
+ if (en == errno.ENOTDIR
+ or (sys.platform == 'win32'
+ and (en == _WIN32_ERROR_DIRECTORY
+ or (en == errno.EINVAL
+ and getattr(e, 'winerror', None) == _WIN32_ERROR_DIRECTORY)
+ ))):
+ return True
+ return False
+
+
+def walkdirs(top, prefix=""):
+ """Yield data about all the directories in a tree.
+
+ This yields all the data about the contents of a directory at a time.
+ After each directory has been yielded, if the caller has mutated the list
+ to exclude some directories, they are then not descended into.
+
+ The data yielded is of the form:
+ ((directory-relpath, directory-path-from-top),
+ [(relpath, basename, kind, lstat, path-from-top), ...]),
+ - directory-relpath is the relative path of the directory being returned
+ with respect to top. prefix is prepended to this.
+ - directory-path-from-root is the path including top for this directory.
+ It is suitable for use with os functions.
+ - relpath is the relative path within the subtree being walked.
+ - basename is the basename of the path
+ - kind is the kind of the file now. If unknown then the file is not
+ present within the tree - but it may be recorded as versioned. See
+ versioned_kind.
+ - lstat is the stat data *if* the file was statted.
+ - planned, not implemented:
+ path_from_tree_root is the path from the root of the tree.
+
+ :param prefix: Prefix the relpaths that are yielded with 'prefix'. This
+ allows one to walk a subtree but get paths that are relative to a tree
+ rooted higher up.
+ :return: an iterator over the dirs.
+ """
+ #TODO there is a bit of a smell where the results of the directory-
+ # summary in this, and the path from the root, may not agree
+ # depending on top and prefix - i.e. ./foo and foo as a pair leads to
+ # potentially confusing output. We should make this more robust - but
+ # not at a speed cost. RBC 20060731
+ _lstat = os.lstat
+ _directory = _directory_kind
+ _listdir = os.listdir
+ _kind_from_mode = file_kind_from_stat_mode
+ pending = [(safe_unicode(prefix), "", _directory, None, safe_unicode(top))]
+ while pending:
+ # 0 - relpath, 1- basename, 2- kind, 3- stat, 4-toppath
+ relroot, _, _, _, top = pending.pop()
+ if relroot:
+ relprefix = relroot + u'/'
+ else:
+ relprefix = ''
+ top_slash = top + u'/'
+
+ dirblock = []
+ append = dirblock.append
+ try:
+ names = sorted(map(decode_filename, _listdir(top)))
+ except OSError, e:
+ if not _is_error_enotdir(e):
+ raise
+ else:
+ for name in names:
+ abspath = top_slash + name
+ statvalue = _lstat(abspath)
+ kind = _kind_from_mode(statvalue.st_mode)
+ append((relprefix + name, name, kind, statvalue, abspath))
+ yield (relroot, top), dirblock
+
+ # push the user specified dirs from dirblock
+ pending.extend(d for d in reversed(dirblock) if d[2] == _directory)
+
+
+class DirReader(object):
+ """An interface for reading directories."""
+
+ def top_prefix_to_starting_dir(self, top, prefix=""):
+ """Converts top and prefix to a starting dir entry
+
+ :param top: A utf8 path
+ :param prefix: An optional utf8 path to prefix output relative paths
+ with.
+ :return: A tuple starting with prefix, and ending with the native
+ encoding of top.
+ """
+ raise NotImplementedError(self.top_prefix_to_starting_dir)
+
+ def read_dir(self, prefix, top):
+ """Read a specific dir.
+
+ :param prefix: A utf8 prefix to be preprended to the path basenames.
+ :param top: A natively encoded path to read.
+ :return: A list of the directories contents. Each item contains:
+ (utf8_relpath, utf8_name, kind, lstatvalue, native_abspath)
+ """
+ raise NotImplementedError(self.read_dir)
+
+
+_selected_dir_reader = None
+
+
+def _walkdirs_utf8(top, prefix=""):
+ """Yield data about all the directories in a tree.
+
+ This yields the same information as walkdirs() only each entry is yielded
+ in utf-8. On platforms which have a filesystem encoding of utf8 the paths
+ are returned as exact byte-strings.
+
+ :return: yields a tuple of (dir_info, [file_info])
+ dir_info is (utf8_relpath, path-from-top)
+ file_info is (utf8_relpath, utf8_name, kind, lstat, path-from-top)
+ if top is an absolute path, path-from-top is also an absolute path.
+ path-from-top might be unicode or utf8, but it is the correct path to
+ pass to os functions to affect the file in question. (such as os.lstat)
+ """
+ global _selected_dir_reader
+ if _selected_dir_reader is None:
+ if sys.platform == "win32" and win32utils.winver == 'Windows NT':
+ # Win98 doesn't have unicode apis like FindFirstFileW
+ # TODO: We possibly could support Win98 by falling back to the
+ # original FindFirstFile, and using TCHAR instead of WCHAR,
+ # but that gets a bit tricky, and requires custom compiling
+ # for win98 anyway.
+ try:
+ from bzrlib._walkdirs_win32 import Win32ReadDir
+ _selected_dir_reader = Win32ReadDir()
+ except ImportError:
+ pass
+ elif _fs_enc in ('utf-8', 'ascii'):
+ try:
+ from bzrlib._readdir_pyx import UTF8DirReader
+ _selected_dir_reader = UTF8DirReader()
+ except ImportError, e:
+ failed_to_load_extension(e)
+ pass
+
+ if _selected_dir_reader is None:
+ # Fallback to the python version
+ _selected_dir_reader = UnicodeDirReader()
+
+ # 0 - relpath, 1- basename, 2- kind, 3- stat, 4-toppath
+ # But we don't actually uses 1-3 in pending, so set them to None
+ pending = [[_selected_dir_reader.top_prefix_to_starting_dir(top, prefix)]]
+ read_dir = _selected_dir_reader.read_dir
+ _directory = _directory_kind
+ while pending:
+ relroot, _, _, _, top = pending[-1].pop()
+ if not pending[-1]:
+ pending.pop()
+ dirblock = sorted(read_dir(relroot, top))
+ yield (relroot, top), dirblock
+ # push the user specified dirs from dirblock
+ next = [d for d in reversed(dirblock) if d[2] == _directory]
+ if next:
+ pending.append(next)
+
+
+class UnicodeDirReader(DirReader):
+ """A dir reader for non-utf8 file systems, which transcodes."""
+
+ __slots__ = ['_utf8_encode']
+
+ def __init__(self):
+ self._utf8_encode = codecs.getencoder('utf8')
+
+ def top_prefix_to_starting_dir(self, top, prefix=""):
+ """See DirReader.top_prefix_to_starting_dir."""
+ return (safe_utf8(prefix), None, None, None, safe_unicode(top))
+
+ def read_dir(self, prefix, top):
+ """Read a single directory from a non-utf8 file system.
+
+ top, and the abspath element in the output are unicode, all other paths
+ are utf8. Local disk IO is done via unicode calls to listdir etc.
+
+ This is currently the fallback code path when the filesystem encoding is
+ not UTF-8. It may be better to implement an alternative so that we can
+ safely handle paths that are not properly decodable in the current
+ encoding.
+
+ See DirReader.read_dir for details.
+ """
+ _utf8_encode = self._utf8_encode
+ _lstat = os.lstat
+ _listdir = os.listdir
+ _kind_from_mode = file_kind_from_stat_mode
+
+ if prefix:
+ relprefix = prefix + '/'
+ else:
+ relprefix = ''
+ top_slash = top + u'/'
+
+ dirblock = []
+ append = dirblock.append
+ for name in sorted(_listdir(top)):
+ try:
+ name_utf8 = _utf8_encode(name)[0]
+ except UnicodeDecodeError:
+ raise errors.BadFilenameEncoding(
+ _utf8_encode(relprefix)[0] + name, _fs_enc)
+ abspath = top_slash + name
+ statvalue = _lstat(abspath)
+ kind = _kind_from_mode(statvalue.st_mode)
+ append((relprefix + name_utf8, name_utf8, kind, statvalue, abspath))
+ return dirblock
+
+
+def copy_tree(from_path, to_path, handlers={}):
+ """Copy all of the entries in from_path into to_path.
+
+ :param from_path: The base directory to copy.
+ :param to_path: The target directory. If it does not exist, it will
+ be created.
+ :param handlers: A dictionary of functions, which takes a source and
+ destinations for files, directories, etc.
+ It is keyed on the file kind, such as 'directory', 'symlink', or 'file'
+ 'file', 'directory', and 'symlink' should always exist.
+ If they are missing, they will be replaced with 'os.mkdir()',
+ 'os.readlink() + os.symlink()', and 'shutil.copy2()', respectively.
+ """
+ # Now, just copy the existing cached tree to the new location
+ # We use a cheap trick here.
+ # Absolute paths are prefixed with the first parameter
+ # relative paths are prefixed with the second.
+ # So we can get both the source and target returned
+ # without any extra work.
+
+ def copy_dir(source, dest):
+ os.mkdir(dest)
+
+ def copy_link(source, dest):
+ """Copy the contents of a symlink"""
+ link_to = os.readlink(source)
+ os.symlink(link_to, dest)
+
+ real_handlers = {'file':shutil.copy2,
+ 'symlink':copy_link,
+ 'directory':copy_dir,
+ }
+ real_handlers.update(handlers)
+
+ if not os.path.exists(to_path):
+ real_handlers['directory'](from_path, to_path)
+
+ for dir_info, entries in walkdirs(from_path, prefix=to_path):
+ for relpath, name, kind, st, abspath in entries:
+ real_handlers[kind](abspath, relpath)
+
+
+def copy_ownership_from_path(dst, src=None):
+ """Copy usr/grp ownership from src file/dir to dst file/dir.
+
+ If src is None, the containing directory is used as source. If chown
+ fails, the error is ignored and a warning is printed.
+ """
+ chown = getattr(os, 'chown', None)
+ if chown is None:
+ return
+
+ if src == None:
+ src = os.path.dirname(dst)
+ if src == '':
+ src = '.'
+
+ try:
+ s = os.stat(src)
+ chown(dst, s.st_uid, s.st_gid)
+ except OSError, e:
+ trace.warning(
+ 'Unable to copy ownership from "%s" to "%s". '
+ 'You may want to set it manually.', src, dst)
+ trace.log_exception_quietly()
+
+
+def path_prefix_key(path):
+ """Generate a prefix-order path key for path.
+
+ This can be used to sort paths in the same way that walkdirs does.
+ """
+ return (dirname(path) , path)
+
+
+def compare_paths_prefix_order(path_a, path_b):
+ """Compare path_a and path_b to generate the same order walkdirs uses."""
+ key_a = path_prefix_key(path_a)
+ key_b = path_prefix_key(path_b)
+ return cmp(key_a, key_b)
+
+
+_cached_user_encoding = None
+
+
+def get_user_encoding(use_cache=DEPRECATED_PARAMETER):
+ """Find out what the preferred user encoding is.
+
+ This is generally the encoding that is used for command line parameters
+ and file contents. This may be different from the terminal encoding
+ or the filesystem encoding.
+
+ :return: A string defining the preferred user encoding
+ """
+ global _cached_user_encoding
+ if deprecated_passed(use_cache):
+ warn_deprecated("use_cache should only have been used for tests",
+ DeprecationWarning, stacklevel=2)
+ if _cached_user_encoding is not None:
+ return _cached_user_encoding
+
+ if os.name == 'posix' and getattr(locale, 'CODESET', None) is not None:
+ # Use the existing locale settings and call nl_langinfo directly
+ # rather than going through getpreferredencoding. This avoids
+ # <http://bugs.python.org/issue6202> on OSX Python 2.6 and the
+ # possibility of the setlocale call throwing an error.
+ user_encoding = locale.nl_langinfo(locale.CODESET)
+ else:
+ # GZ 2011-12-19: On windows could call GetACP directly instead.
+ user_encoding = locale.getpreferredencoding(False)
+
+ try:
+ user_encoding = codecs.lookup(user_encoding).name
+ except LookupError:
+ if user_encoding not in ("", "cp0"):
+ sys.stderr.write('bzr: warning:'
+ ' unknown encoding %s.'
+ ' Continuing with ascii encoding.\n'
+ % user_encoding
+ )
+ user_encoding = 'ascii'
+ else:
+ # Get 'ascii' when setlocale has not been called or LANG=C or unset.
+ if user_encoding == 'ascii':
+ if sys.platform == 'darwin':
+ # OSX is special-cased in Python to have a UTF-8 filesystem
+ # encoding and previously had LANG set here if not present.
+ user_encoding = 'utf-8'
+ # GZ 2011-12-19: Maybe UTF-8 should be the default in this case
+ # for some other posix platforms as well.
+
+ _cached_user_encoding = user_encoding
+ return user_encoding
+
+
+def get_diff_header_encoding():
+ return get_terminal_encoding()
+
+
+def get_host_name():
+ """Return the current unicode host name.
+
+ This is meant to be used in place of socket.gethostname() because that
+ behaves inconsistently on different platforms.
+ """
+ if sys.platform == "win32":
+ return win32utils.get_host_name()
+ else:
+ import socket
+ return socket.gethostname().decode(get_user_encoding())
+
+
+# We must not read/write any more than 64k at a time from/to a socket so we
+# don't risk "no buffer space available" errors on some platforms. Windows in
+# particular is likely to throw WSAECONNABORTED or WSAENOBUFS if given too much
+# data at once.
+MAX_SOCKET_CHUNK = 64 * 1024
+
+_end_of_stream_errors = [errno.ECONNRESET]
+for _eno in ['WSAECONNRESET', 'WSAECONNABORTED']:
+ _eno = getattr(errno, _eno, None)
+ if _eno is not None:
+ _end_of_stream_errors.append(_eno)
+del _eno
+
+
+def read_bytes_from_socket(sock, report_activity=None,
+ max_read_size=MAX_SOCKET_CHUNK):
+ """Read up to max_read_size of bytes from sock and notify of progress.
+
+ Translates "Connection reset by peer" into file-like EOF (return an
+ empty string rather than raise an error), and repeats the recv if
+ interrupted by a signal.
+ """
+ while 1:
+ try:
+ bytes = sock.recv(max_read_size)
+ except socket.error, e:
+ eno = e.args[0]
+ if eno in _end_of_stream_errors:
+ # The connection was closed by the other side. Callers expect
+ # an empty string to signal end-of-stream.
+ return ""
+ elif eno == errno.EINTR:
+ # Retry the interrupted recv.
+ continue
+ raise
+ else:
+ if report_activity is not None:
+ report_activity(len(bytes), 'read')
+ return bytes
+
+
+def recv_all(socket, count):
+ """Receive an exact number of bytes.
+
+ Regular Socket.recv() may return less than the requested number of bytes,
+ depending on what's in the OS buffer. MSG_WAITALL is not available
+ on all platforms, but this should work everywhere. This will return
+ less than the requested amount if the remote end closes.
+
+ This isn't optimized and is intended mostly for use in testing.
+ """
+ b = ''
+ while len(b) < count:
+ new = read_bytes_from_socket(socket, None, count - len(b))
+ if new == '':
+ break # eof
+ b += new
+ return b
+
+
+def send_all(sock, bytes, report_activity=None):
+ """Send all bytes on a socket.
+
+ Breaks large blocks in smaller chunks to avoid buffering limitations on
+ some platforms, and catches EINTR which may be thrown if the send is
+ interrupted by a signal.
+
+ This is preferred to socket.sendall(), because it avoids portability bugs
+ and provides activity reporting.
+
+ :param report_activity: Call this as bytes are read, see
+ Transport._report_activity
+ """
+ sent_total = 0
+ byte_count = len(bytes)
+ while sent_total < byte_count:
+ try:
+ sent = sock.send(buffer(bytes, sent_total, MAX_SOCKET_CHUNK))
+ except socket.error, e:
+ if e.args[0] != errno.EINTR:
+ raise
+ else:
+ sent_total += sent
+ report_activity(sent, 'write')
+
+
+def connect_socket(address):
+ # Slight variation of the socket.create_connection() function (provided by
+ # python-2.6) that can fail if getaddrinfo returns an empty list. We also
+ # provide it for previous python versions. Also, we don't use the timeout
+ # parameter (provided by the python implementation) so we don't implement
+ # it either).
+ err = socket.error('getaddrinfo returns an empty list')
+ host, port = address
+ for res in socket.getaddrinfo(host, port, 0, socket.SOCK_STREAM):
+ af, socktype, proto, canonname, sa = res
+ sock = None
+ try:
+ sock = socket.socket(af, socktype, proto)
+ sock.connect(sa)
+ return sock
+
+ except socket.error, err:
+ # 'err' is now the most recent error
+ if sock is not None:
+ sock.close()
+ raise err
+
+
+def dereference_path(path):
+ """Determine the real path to a file.
+
+ All parent elements are dereferenced. But the file itself is not
+ dereferenced.
+ :param path: The original path. May be absolute or relative.
+ :return: the real path *to* the file
+ """
+ parent, base = os.path.split(path)
+ # The pathjoin for '.' is a workaround for Python bug #1213894.
+ # (initial path components aren't dereferenced)
+ return pathjoin(realpath(pathjoin('.', parent)), base)
+
+
+def supports_mapi():
+ """Return True if we can use MAPI to launch a mail client."""
+ return sys.platform == "win32"
+
+
+def resource_string(package, resource_name):
+ """Load a resource from a package and return it as a string.
+
+ Note: Only packages that start with bzrlib are currently supported.
+
+ This is designed to be a lightweight implementation of resource
+ loading in a way which is API compatible with the same API from
+ pkg_resources. See
+ http://peak.telecommunity.com/DevCenter/PkgResources#basic-resource-access.
+ If and when pkg_resources becomes a standard library, this routine
+ can delegate to it.
+ """
+ # Check package name is within bzrlib
+ if package == "bzrlib":
+ resource_relpath = resource_name
+ elif package.startswith("bzrlib."):
+ package = package[len("bzrlib."):].replace('.', os.sep)
+ resource_relpath = pathjoin(package, resource_name)
+ else:
+ raise errors.BzrError('resource package %s not in bzrlib' % package)
+
+ # Map the resource to a file and read its contents
+ base = dirname(bzrlib.__file__)
+ if getattr(sys, 'frozen', None): # bzr.exe
+ base = abspath(pathjoin(base, '..', '..'))
+ f = file(pathjoin(base, resource_relpath), "rU")
+ try:
+ return f.read()
+ finally:
+ f.close()
+
+def file_kind_from_stat_mode_thunk(mode):
+ global file_kind_from_stat_mode
+ if file_kind_from_stat_mode is file_kind_from_stat_mode_thunk:
+ try:
+ from bzrlib._readdir_pyx import UTF8DirReader
+ file_kind_from_stat_mode = UTF8DirReader().kind_from_mode
+ except ImportError, e:
+ # This is one time where we won't warn that an extension failed to
+ # load. The extension is never available on Windows anyway.
+ from bzrlib._readdir_py import (
+ _kind_from_mode as file_kind_from_stat_mode
+ )
+ return file_kind_from_stat_mode(mode)
+file_kind_from_stat_mode = file_kind_from_stat_mode_thunk
+
+def file_stat(f, _lstat=os.lstat):
+ try:
+ # XXX cache?
+ return _lstat(f)
+ except OSError, e:
+ if getattr(e, 'errno', None) in (errno.ENOENT, errno.ENOTDIR):
+ raise errors.NoSuchFile(f)
+ raise
+
+def file_kind(f, _lstat=os.lstat):
+ stat_value = file_stat(f, _lstat)
+ return file_kind_from_stat_mode(stat_value.st_mode)
+
+def until_no_eintr(f, *a, **kw):
+ """Run f(*a, **kw), retrying if an EINTR error occurs.
+
+ WARNING: you must be certain that it is safe to retry the call repeatedly
+ if EINTR does occur. This is typically only true for low-level operations
+ like os.read. If in any doubt, don't use this.
+
+ Keep in mind that this is not a complete solution to EINTR. There is
+ probably code in the Python standard library and other dependencies that
+ may encounter EINTR if a signal arrives (and there is signal handler for
+ that signal). So this function can reduce the impact for IO that bzrlib
+ directly controls, but it is not a complete solution.
+ """
+ # Borrowed from Twisted's twisted.python.util.untilConcludes function.
+ while True:
+ try:
+ return f(*a, **kw)
+ except (IOError, OSError), e:
+ if e.errno == errno.EINTR:
+ continue
+ raise
+
+
+@deprecated_function(deprecated_in((2, 2, 0)))
+def re_compile_checked(re_string, flags=0, where=""):
+ """Return a compiled re, or raise a sensible error.
+
+ This should only be used when compiling user-supplied REs.
+
+ :param re_string: Text form of regular expression.
+ :param flags: eg re.IGNORECASE
+ :param where: Message explaining to the user the context where
+ it occurred, eg 'log search filter'.
+ """
+ # from https://bugs.launchpad.net/bzr/+bug/251352
+ try:
+ re_obj = re.compile(re_string, flags)
+ re_obj.search("")
+ return re_obj
+ except errors.InvalidPattern, e:
+ if where:
+ where = ' in ' + where
+ # despite the name 'error' is a type
+ raise errors.BzrCommandError('Invalid regular expression%s: %s'
+ % (where, e.msg))
+
+
+if sys.platform == "win32":
+ def getchar():
+ import msvcrt
+ return msvcrt.getch()
+else:
+ def getchar():
+ import tty
+ import termios
+ fd = sys.stdin.fileno()
+ settings = termios.tcgetattr(fd)
+ try:
+ tty.setraw(fd)
+ ch = sys.stdin.read(1)
+ finally:
+ termios.tcsetattr(fd, termios.TCSADRAIN, settings)
+ return ch
+
+if sys.platform.startswith('linux'):
+ def _local_concurrency():
+ try:
+ return os.sysconf('SC_NPROCESSORS_ONLN')
+ except (ValueError, OSError, AttributeError):
+ return None
+elif sys.platform == 'darwin':
+ def _local_concurrency():
+ return subprocess.Popen(['sysctl', '-n', 'hw.availcpu'],
+ stdout=subprocess.PIPE).communicate()[0]
+elif "bsd" in sys.platform:
+ def _local_concurrency():
+ return subprocess.Popen(['sysctl', '-n', 'hw.ncpu'],
+ stdout=subprocess.PIPE).communicate()[0]
+elif sys.platform == 'sunos5':
+ def _local_concurrency():
+ return subprocess.Popen(['psrinfo', '-p',],
+ stdout=subprocess.PIPE).communicate()[0]
+elif sys.platform == "win32":
+ def _local_concurrency():
+ # This appears to return the number of cores.
+ return os.environ.get('NUMBER_OF_PROCESSORS')
+else:
+ def _local_concurrency():
+ # Who knows ?
+ return None
+
+
+_cached_local_concurrency = None
+
+def local_concurrency(use_cache=True):
+ """Return how many processes can be run concurrently.
+
+ Rely on platform specific implementations and default to 1 (one) if
+ anything goes wrong.
+ """
+ global _cached_local_concurrency
+
+ if _cached_local_concurrency is not None and use_cache:
+ return _cached_local_concurrency
+
+ concurrency = os.environ.get('BZR_CONCURRENCY', None)
+ if concurrency is None:
+ try:
+ import multiprocessing
+ concurrency = multiprocessing.cpu_count()
+ except (ImportError, NotImplementedError):
+ # multiprocessing is only available on Python >= 2.6
+ # and multiprocessing.cpu_count() isn't implemented on all
+ # platforms
+ try:
+ concurrency = _local_concurrency()
+ except (OSError, IOError):
+ pass
+ try:
+ concurrency = int(concurrency)
+ except (TypeError, ValueError):
+ concurrency = 1
+ if use_cache:
+ _cached_concurrency = concurrency
+ return concurrency
+
+
+class UnicodeOrBytesToBytesWriter(codecs.StreamWriter):
+ """A stream writer that doesn't decode str arguments."""
+
+ def __init__(self, encode, stream, errors='strict'):
+ codecs.StreamWriter.__init__(self, stream, errors)
+ self.encode = encode
+
+ def write(self, object):
+ if type(object) is str:
+ self.stream.write(object)
+ else:
+ data, _ = self.encode(object, self.errors)
+ self.stream.write(data)
+
+if sys.platform == 'win32':
+ def open_file(filename, mode='r', bufsize=-1):
+ """This function is used to override the ``open`` builtin.
+
+ But it uses O_NOINHERIT flag so the file handle is not inherited by
+ child processes. Deleting or renaming a closed file opened with this
+ function is not blocking child processes.
+ """
+ writing = 'w' in mode
+ appending = 'a' in mode
+ updating = '+' in mode
+ binary = 'b' in mode
+
+ flags = O_NOINHERIT
+ # see http://msdn.microsoft.com/en-us/library/yeby3zcb%28VS.71%29.aspx
+ # for flags for each modes.
+ if binary:
+ flags |= O_BINARY
+ else:
+ flags |= O_TEXT
+
+ if writing:
+ if updating:
+ flags |= os.O_RDWR
+ else:
+ flags |= os.O_WRONLY
+ flags |= os.O_CREAT | os.O_TRUNC
+ elif appending:
+ if updating:
+ flags |= os.O_RDWR
+ else:
+ flags |= os.O_WRONLY
+ flags |= os.O_CREAT | os.O_APPEND
+ else: #reading
+ if updating:
+ flags |= os.O_RDWR
+ else:
+ flags |= os.O_RDONLY
+
+ return os.fdopen(os.open(filename, flags), mode, bufsize)
+else:
+ open_file = open
+
+
+def available_backup_name(base, exists):
+ """Find a non-existing backup file name.
+
+ This will *not* create anything, this only return a 'free' entry. This
+ should be used for checking names in a directory below a locked
+ tree/branch/repo to avoid race conditions. This is LBYL (Look Before You
+ Leap) and generally discouraged.
+
+ :param base: The base name.
+
+ :param exists: A callable returning True if the path parameter exists.
+ """
+ counter = 1
+ name = "%s.~%d~" % (base, counter)
+ while exists(name):
+ counter += 1
+ name = "%s.~%d~" % (base, counter)
+ return name
+
+
+def set_fd_cloexec(fd):
+ """Set a Unix file descriptor's FD_CLOEXEC flag. Do nothing if platform
+ support for this is not available.
+ """
+ try:
+ import fcntl
+ old = fcntl.fcntl(fd, fcntl.F_GETFD)
+ fcntl.fcntl(fd, fcntl.F_SETFD, old | fcntl.FD_CLOEXEC)
+ except (ImportError, AttributeError):
+ # Either the fcntl module or specific constants are not present
+ pass
+
+
+def find_executable_on_path(name):
+ """Finds an executable on the PATH.
+
+ On Windows, this will try to append each extension in the PATHEXT
+ environment variable to the name, if it cannot be found with the name
+ as given.
+
+ :param name: The base name of the executable.
+ :return: The path to the executable found or None.
+ """
+ if sys.platform == 'win32':
+ exts = os.environ.get('PATHEXT', '').split(os.pathsep)
+ exts = [ext.lower() for ext in exts]
+ base, ext = os.path.splitext(name)
+ if ext != '':
+ if ext.lower() not in exts:
+ return None
+ name = base
+ exts = [ext]
+ else:
+ exts = ['']
+ path = os.environ.get('PATH')
+ if path is not None:
+ path = path.split(os.pathsep)
+ for ext in exts:
+ for d in path:
+ f = os.path.join(d, name) + ext
+ if os.access(f, os.X_OK):
+ return f
+ if sys.platform == 'win32':
+ app_path = win32utils.get_app_path(name)
+ if app_path != name:
+ return app_path
+ return None
+
+
+def _posix_is_local_pid_dead(pid):
+ """True if pid doesn't correspond to live process on this machine"""
+ try:
+ # Special meaning of unix kill: just check if it's there.
+ os.kill(pid, 0)
+ except OSError, e:
+ if e.errno == errno.ESRCH:
+ # On this machine, and really not found: as sure as we can be
+ # that it's dead.
+ return True
+ elif e.errno == errno.EPERM:
+ # exists, though not ours
+ return False
+ else:
+ mutter("os.kill(%d, 0) failed: %s" % (pid, e))
+ # Don't really know.
+ return False
+ else:
+ # Exists and our process: not dead.
+ return False
+
+if sys.platform == "win32":
+ is_local_pid_dead = win32utils.is_local_pid_dead
+else:
+ is_local_pid_dead = _posix_is_local_pid_dead
+
+
+def fdatasync(fileno):
+ """Flush file contents to disk if possible.
+
+ :param fileno: Integer OS file handle.
+ :raises TransportNotPossible: If flushing to disk is not possible.
+ """
+ fn = getattr(os, 'fdatasync', getattr(os, 'fsync', None))
+ if fn is not None:
+ fn(fileno)
+
+
+def ensure_empty_directory_exists(path, exception_class):
+ """Make sure a local directory exists and is empty.
+
+ If it does not exist, it is created. If it exists and is not empty, an
+ instance of exception_class is raised.
+ """
+ try:
+ os.mkdir(path)
+ except OSError, e:
+ if e.errno != errno.EEXIST:
+ raise
+ if os.listdir(path) != []:
+ raise exception_class(path)
+
+
+def is_environment_error(evalue):
+ """True if exception instance is due to a process environment issue
+
+ This includes OSError and IOError, but also other errors that come from
+ the operating system or core libraries but are not subclasses of those.
+ """
+ if isinstance(evalue, (EnvironmentError, select.error)):
+ return True
+ if sys.platform == "win32" and win32utils._is_pywintypes_error(evalue):
+ return True
+ return False
diff --git a/bzrlib/pack.py b/bzrlib/pack.py
new file mode 100644
index 0000000..adadb8f
--- /dev/null
+++ b/bzrlib/pack.py
@@ -0,0 +1,537 @@
+# Copyright (C) 2007, 2009, 2010 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""Container format for Bazaar data.
+
+"Containers" and "records" are described in
+doc/developers/container-format.txt.
+"""
+
+from __future__ import absolute_import
+
+from cStringIO import StringIO
+import re
+
+from bzrlib import errors
+
+
+FORMAT_ONE = "Bazaar pack format 1 (introduced in 0.18)"
+
+
+_whitespace_re = re.compile('[\t\n\x0b\x0c\r ]')
+
+
+def _check_name(name):
+ """Do some basic checking of 'name'.
+
+ At the moment, this just checks that there are no whitespace characters in a
+ name.
+
+ :raises InvalidRecordError: if name is not valid.
+ :seealso: _check_name_encoding
+ """
+ if _whitespace_re.search(name) is not None:
+ raise errors.InvalidRecordError("%r is not a valid name." % (name,))
+
+
+def _check_name_encoding(name):
+ """Check that 'name' is valid UTF-8.
+
+ This is separate from _check_name because UTF-8 decoding is relatively
+ expensive, and we usually want to avoid it.
+
+ :raises InvalidRecordError: if name is not valid UTF-8.
+ """
+ try:
+ name.decode('utf-8')
+ except UnicodeDecodeError, e:
+ raise errors.InvalidRecordError(str(e))
+
+
+class ContainerSerialiser(object):
+ """A helper class for serialising containers.
+
+ It simply returns bytes from method calls to 'begin', 'end' and
+ 'bytes_record'. You may find ContainerWriter to be a more convenient
+ interface.
+ """
+
+ def begin(self):
+ """Return the bytes to begin a container."""
+ return FORMAT_ONE + "\n"
+
+ def end(self):
+ """Return the bytes to finish a container."""
+ return "E"
+
+ def bytes_header(self, length, names):
+ """Return the header for a Bytes record."""
+ # Kind marker
+ byte_sections = ["B"]
+ # Length
+ byte_sections.append(str(length) + "\n")
+ # Names
+ for name_tuple in names:
+ # Make sure we're writing valid names. Note that we will leave a
+ # half-written record if a name is bad!
+ for name in name_tuple:
+ _check_name(name)
+ byte_sections.append('\x00'.join(name_tuple) + "\n")
+ # End of headers
+ byte_sections.append("\n")
+ return ''.join(byte_sections)
+
+ def bytes_record(self, bytes, names):
+ """Return the bytes for a Bytes record with the given name and
+ contents.
+
+ If the content may be large, construct the header separately and then
+ stream out the contents.
+ """
+ return self.bytes_header(len(bytes), names) + bytes
+
+
+class ContainerWriter(object):
+ """A class for writing containers to a file.
+
+ :attribute records_written: The number of user records added to the
+ container. This does not count the prelude or suffix of the container
+ introduced by the begin() and end() methods.
+ """
+
+ # Join up headers with the body if writing fewer than this many bytes:
+ # trades off memory usage and copying to do less IO ops.
+ _JOIN_WRITES_THRESHOLD = 100000
+
+ def __init__(self, write_func):
+ """Constructor.
+
+ :param write_func: a callable that will be called when this
+ ContainerWriter needs to write some bytes.
+ """
+ self._write_func = write_func
+ self.current_offset = 0
+ self.records_written = 0
+ self._serialiser = ContainerSerialiser()
+
+ def begin(self):
+ """Begin writing a container."""
+ self.write_func(self._serialiser.begin())
+
+ def write_func(self, bytes):
+ self._write_func(bytes)
+ self.current_offset += len(bytes)
+
+ def end(self):
+ """Finish writing a container."""
+ self.write_func(self._serialiser.end())
+
+ def add_bytes_record(self, bytes, names):
+ """Add a Bytes record with the given names.
+
+ :param bytes: The bytes to insert.
+ :param names: The names to give the inserted bytes. Each name is
+ a tuple of bytestrings. The bytestrings may not contain
+ whitespace.
+ :return: An offset, length tuple. The offset is the offset
+ of the record within the container, and the length is the
+ length of data that will need to be read to reconstitute the
+ record. These offset and length can only be used with the pack
+ interface - they might be offset by headers or other such details
+ and thus are only suitable for use by a ContainerReader.
+ """
+ current_offset = self.current_offset
+ length = len(bytes)
+ if length < self._JOIN_WRITES_THRESHOLD:
+ self.write_func(self._serialiser.bytes_header(length, names)
+ + bytes)
+ else:
+ self.write_func(self._serialiser.bytes_header(length, names))
+ self.write_func(bytes)
+ self.records_written += 1
+ # return a memo of where we wrote data to allow random access.
+ return current_offset, self.current_offset - current_offset
+
+
+class ReadVFile(object):
+ """Adapt a readv result iterator to a file like protocol.
+
+ The readv result must support the iterator protocol returning (offset,
+ data_bytes) pairs.
+ """
+
+ # XXX: This could be a generic transport class, as other code may want to
+ # gradually consume the readv result.
+
+ def __init__(self, readv_result):
+ """Construct a new ReadVFile wrapper.
+
+ :seealso: make_readv_reader
+
+ :param readv_result: the most recent readv result - list or generator
+ """
+ # readv can return a sequence or an iterator, but we require an
+ # iterator to know how much has been consumed.
+ readv_result = iter(readv_result)
+ self.readv_result = readv_result
+ self._string = None
+
+ def _next(self):
+ if (self._string is None or
+ self._string.tell() == self._string_length):
+ offset, data = self.readv_result.next()
+ self._string_length = len(data)
+ self._string = StringIO(data)
+
+ def read(self, length):
+ self._next()
+ result = self._string.read(length)
+ if len(result) < length:
+ raise errors.BzrError('wanted %d bytes but next '
+ 'hunk only contains %d: %r...' %
+ (length, len(result), result[:20]))
+ return result
+
+ def readline(self):
+ """Note that readline will not cross readv segments."""
+ self._next()
+ result = self._string.readline()
+ if self._string.tell() == self._string_length and result[-1] != '\n':
+ raise errors.BzrError('short readline in the readvfile hunk: %r'
+ % (result, ))
+ return result
+
+
+def make_readv_reader(transport, filename, requested_records):
+ """Create a ContainerReader that will read selected records only.
+
+ :param transport: The transport the pack file is located on.
+ :param filename: The filename of the pack file.
+ :param requested_records: The record offset, length tuples as returned
+ by add_bytes_record for the desired records.
+ """
+ readv_blocks = [(0, len(FORMAT_ONE)+1)]
+ readv_blocks.extend(requested_records)
+ result = ContainerReader(ReadVFile(
+ transport.readv(filename, readv_blocks)))
+ return result
+
+
+class BaseReader(object):
+
+ def __init__(self, source_file):
+ """Constructor.
+
+ :param source_file: a file-like object with `read` and `readline`
+ methods.
+ """
+ self._source = source_file
+
+ def reader_func(self, length=None):
+ return self._source.read(length)
+
+ def _read_line(self):
+ line = self._source.readline()
+ if not line.endswith('\n'):
+ raise errors.UnexpectedEndOfContainerError()
+ return line.rstrip('\n')
+
+
+class ContainerReader(BaseReader):
+ """A class for reading Bazaar's container format."""
+
+ def iter_records(self):
+ """Iterate over the container, yielding each record as it is read.
+
+ Each yielded record will be a 2-tuple of (names, callable), where names
+ is a ``list`` and bytes is a function that takes one argument,
+ ``max_length``.
+
+ You **must not** call the callable after advancing the iterator to the
+ next record. That is, this code is invalid::
+
+ record_iter = container.iter_records()
+ names1, callable1 = record_iter.next()
+ names2, callable2 = record_iter.next()
+ bytes1 = callable1(None)
+
+ As it will give incorrect results and invalidate the state of the
+ ContainerReader.
+
+ :raises ContainerError: if any sort of container corruption is
+ detected, e.g. UnknownContainerFormatError is the format of the
+ container is unrecognised.
+ :seealso: ContainerReader.read
+ """
+ self._read_format()
+ return self._iter_records()
+
+ def iter_record_objects(self):
+ """Iterate over the container, yielding each record as it is read.
+
+ Each yielded record will be an object with ``read`` and ``validate``
+ methods. Like with iter_records, it is not safe to use a record object
+ after advancing the iterator to yield next record.
+
+ :raises ContainerError: if any sort of container corruption is
+ detected, e.g. UnknownContainerFormatError is the format of the
+ container is unrecognised.
+ :seealso: iter_records
+ """
+ self._read_format()
+ return self._iter_record_objects()
+
+ def _iter_records(self):
+ for record in self._iter_record_objects():
+ yield record.read()
+
+ def _iter_record_objects(self):
+ while True:
+ record_kind = self.reader_func(1)
+ if record_kind == 'B':
+ # Bytes record.
+ reader = BytesRecordReader(self._source)
+ yield reader
+ elif record_kind == 'E':
+ # End marker. There are no more records.
+ return
+ elif record_kind == '':
+ # End of stream encountered, but no End Marker record seen, so
+ # this container is incomplete.
+ raise errors.UnexpectedEndOfContainerError()
+ else:
+ # Unknown record type.
+ raise errors.UnknownRecordTypeError(record_kind)
+
+ def _read_format(self):
+ format = self._read_line()
+ if format != FORMAT_ONE:
+ raise errors.UnknownContainerFormatError(format)
+
+ def validate(self):
+ """Validate this container and its records.
+
+ Validating consumes the data stream just like iter_records and
+ iter_record_objects, so you cannot call it after
+ iter_records/iter_record_objects.
+
+ :raises ContainerError: if something is invalid.
+ """
+ all_names = set()
+ for record_names, read_bytes in self.iter_records():
+ read_bytes(None)
+ for name_tuple in record_names:
+ for name in name_tuple:
+ _check_name_encoding(name)
+ # Check that the name is unique. Note that Python will refuse
+ # to decode non-shortest forms of UTF-8 encoding, so there is no
+ # risk that the same unicode string has been encoded two
+ # different ways.
+ if name_tuple in all_names:
+ raise errors.DuplicateRecordNameError(name_tuple[0])
+ all_names.add(name_tuple)
+ excess_bytes = self.reader_func(1)
+ if excess_bytes != '':
+ raise errors.ContainerHasExcessDataError(excess_bytes)
+
+
+class BytesRecordReader(BaseReader):
+
+ def read(self):
+ """Read this record.
+
+ You can either validate or read a record, you can't do both.
+
+ :returns: A tuple of (names, callable). The callable can be called
+ repeatedly to obtain the bytes for the record, with a max_length
+ argument. If max_length is None, returns all the bytes. Because
+ records can be arbitrarily large, using None is not recommended
+ unless you have reason to believe the content will fit in memory.
+ """
+ # Read the content length.
+ length_line = self._read_line()
+ try:
+ length = int(length_line)
+ except ValueError:
+ raise errors.InvalidRecordError(
+ "%r is not a valid length." % (length_line,))
+
+ # Read the list of names.
+ names = []
+ while True:
+ name_line = self._read_line()
+ if name_line == '':
+ break
+ name_tuple = tuple(name_line.split('\x00'))
+ for name in name_tuple:
+ _check_name(name)
+ names.append(name_tuple)
+
+ self._remaining_length = length
+ return names, self._content_reader
+
+ def _content_reader(self, max_length):
+ if max_length is None:
+ length_to_read = self._remaining_length
+ else:
+ length_to_read = min(max_length, self._remaining_length)
+ self._remaining_length -= length_to_read
+ bytes = self.reader_func(length_to_read)
+ if len(bytes) != length_to_read:
+ raise errors.UnexpectedEndOfContainerError()
+ return bytes
+
+ def validate(self):
+ """Validate this record.
+
+ You can either validate or read, you can't do both.
+
+ :raises ContainerError: if this record is invalid.
+ """
+ names, read_bytes = self.read()
+ for name_tuple in names:
+ for name in name_tuple:
+ _check_name_encoding(name)
+ read_bytes(None)
+
+
+class ContainerPushParser(object):
+ """A "push" parser for container format 1.
+
+ It accepts bytes via the ``accept_bytes`` method, and parses them into
+ records which can be retrieved via the ``read_pending_records`` method.
+ """
+
+ def __init__(self):
+ self._buffer = ''
+ self._state_handler = self._state_expecting_format_line
+ self._parsed_records = []
+ self._reset_current_record()
+ self.finished = False
+
+ def _reset_current_record(self):
+ self._current_record_length = None
+ self._current_record_names = []
+
+ def accept_bytes(self, bytes):
+ self._buffer += bytes
+ # Keep iterating the state machine until it stops consuming bytes from
+ # the buffer.
+ last_buffer_length = None
+ cur_buffer_length = len(self._buffer)
+ last_state_handler = None
+ while (cur_buffer_length != last_buffer_length
+ or last_state_handler != self._state_handler):
+ last_buffer_length = cur_buffer_length
+ last_state_handler = self._state_handler
+ self._state_handler()
+ cur_buffer_length = len(self._buffer)
+
+ def read_pending_records(self, max=None):
+ if max:
+ records = self._parsed_records[:max]
+ del self._parsed_records[:max]
+ return records
+ else:
+ records = self._parsed_records
+ self._parsed_records = []
+ return records
+
+ def _consume_line(self):
+ """Take a line out of the buffer, and return the line.
+
+ If a newline byte is not found in the buffer, the buffer is
+ unchanged and this returns None instead.
+ """
+ newline_pos = self._buffer.find('\n')
+ if newline_pos != -1:
+ line = self._buffer[:newline_pos]
+ self._buffer = self._buffer[newline_pos+1:]
+ return line
+ else:
+ return None
+
+ def _state_expecting_format_line(self):
+ line = self._consume_line()
+ if line is not None:
+ if line != FORMAT_ONE:
+ raise errors.UnknownContainerFormatError(line)
+ self._state_handler = self._state_expecting_record_type
+
+ def _state_expecting_record_type(self):
+ if len(self._buffer) >= 1:
+ record_type = self._buffer[0]
+ self._buffer = self._buffer[1:]
+ if record_type == 'B':
+ self._state_handler = self._state_expecting_length
+ elif record_type == 'E':
+ self.finished = True
+ self._state_handler = self._state_expecting_nothing
+ else:
+ raise errors.UnknownRecordTypeError(record_type)
+
+ def _state_expecting_length(self):
+ line = self._consume_line()
+ if line is not None:
+ try:
+ self._current_record_length = int(line)
+ except ValueError:
+ raise errors.InvalidRecordError(
+ "%r is not a valid length." % (line,))
+ self._state_handler = self._state_expecting_name
+
+ def _state_expecting_name(self):
+ encoded_name_parts = self._consume_line()
+ if encoded_name_parts == '':
+ self._state_handler = self._state_expecting_body
+ elif encoded_name_parts:
+ name_parts = tuple(encoded_name_parts.split('\x00'))
+ for name_part in name_parts:
+ _check_name(name_part)
+ self._current_record_names.append(name_parts)
+
+ def _state_expecting_body(self):
+ if len(self._buffer) >= self._current_record_length:
+ body_bytes = self._buffer[:self._current_record_length]
+ self._buffer = self._buffer[self._current_record_length:]
+ record = (self._current_record_names, body_bytes)
+ self._parsed_records.append(record)
+ self._reset_current_record()
+ self._state_handler = self._state_expecting_record_type
+
+ def _state_expecting_nothing(self):
+ pass
+
+ def read_size_hint(self):
+ hint = 16384
+ if self._state_handler == self._state_expecting_body:
+ remaining = self._current_record_length - len(self._buffer)
+ if remaining < 0:
+ remaining = 0
+ return max(hint, remaining)
+ return hint
+
+
+def iter_records_from_file(source_file):
+ parser = ContainerPushParser()
+ while True:
+ bytes = source_file.read(parser.read_size_hint())
+ parser.accept_bytes(bytes)
+ for record in parser.read_pending_records():
+ yield record
+ if parser.finished:
+ break
+
diff --git a/bzrlib/patch.py b/bzrlib/patch.py
new file mode 100644
index 0000000..94d4a64
--- /dev/null
+++ b/bzrlib/patch.py
@@ -0,0 +1,104 @@
+# Copyright (C) 2005, 2006 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+from __future__ import absolute_import
+
+import errno
+import os
+from subprocess import Popen, PIPE
+
+from bzrlib.errors import NoDiff3
+from bzrlib.textfile import check_text_path
+
+"""Diff and patch functionality"""
+
+__docformat__ = "restructuredtext"
+
+
+_do_close_fds = True
+if os.name == 'nt':
+ _do_close_fds = False
+
+
+def write_to_cmd(args, input=""):
+ """Spawn a process, and wait for the result
+
+ If the process is killed, an exception is raised
+
+ :param args: The command line, the first entry should be the program name
+ :param input: [optional] The text to send the process on stdin
+ :return: (stdout, stderr, status)
+ """
+ process = Popen(args, bufsize=len(input), stdin=PIPE, stdout=PIPE,
+ stderr=PIPE, close_fds=_do_close_fds)
+ stdout, stderr = process.communicate(input)
+ status = process.wait()
+ if status < 0:
+ raise Exception("%s killed by signal %i" (args[0], -status))
+ return stdout, stderr, status
+
+
+def patch(patch_contents, filename, output_filename=None, reverse=False):
+ """Apply a patch to a file, to produce another output file. This is should
+ be suitable for our limited purposes.
+
+ :param patch_contents: The contents of the patch to apply
+ :type patch_contents: str
+ :param filename: the name of the file to apply the patch to
+ :type filename: str
+ :param output_filename: The filename to produce. If None, file is \
+ modified in-place
+ :type output_filename: str or NoneType
+ :param reverse: If true, apply the patch in reverse
+ :type reverse: bool
+ :return: 0 on success, 1 if some hunks failed
+ """
+ args = ["patch", "-f", "-s", "--posix", "--binary"]
+ if reverse:
+ args.append("--reverse")
+ if output_filename is not None:
+ args.extend(("-o", output_filename))
+ args.append(filename)
+ stdout, stderr, status = write_to_cmd(args, patch_contents)
+ return status
+
+
+def diff3(out_file, mine_path, older_path, yours_path):
+ def add_label(args, label):
+ args.extend(("-L", label))
+ check_text_path(mine_path)
+ check_text_path(older_path)
+ check_text_path(yours_path)
+ args = ['diff3', "-E", "--merge"]
+ add_label(args, "TREE")
+ add_label(args, "ANCESTOR")
+ add_label(args, "MERGE-SOURCE")
+ args.extend((mine_path, older_path, yours_path))
+ try:
+ output, stderr, status = write_to_cmd(args)
+ except OSError, e:
+ if e.errno == errno.ENOENT:
+ raise NoDiff3
+ else:
+ raise
+ if status not in (0, 1):
+ raise Exception(stderr)
+ f = open(out_file, 'wb')
+ try:
+ f.write(output)
+ finally:
+ f.close()
+ return status
diff --git a/bzrlib/patches.py b/bzrlib/patches.py
new file mode 100644
index 0000000..80410bd
--- /dev/null
+++ b/bzrlib/patches.py
@@ -0,0 +1,474 @@
+# Copyright (C) 2005-2010 Aaron Bentley, Canonical Ltd
+# <aaron.bentley@utoronto.ca>
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+from __future__ import absolute_import
+
+from bzrlib.errors import (
+ BinaryFiles,
+ MalformedHunkHeader,
+ MalformedLine,
+ MalformedPatchHeader,
+ PatchConflict,
+ PatchSyntax,
+ )
+
+import re
+
+
+binary_files_re = 'Binary files (.*) and (.*) differ\n'
+
+def get_patch_names(iter_lines):
+ try:
+ line = iter_lines.next()
+ match = re.match(binary_files_re, line)
+ if match is not None:
+ raise BinaryFiles(match.group(1), match.group(2))
+ if not line.startswith("--- "):
+ raise MalformedPatchHeader("No orig name", line)
+ else:
+ orig_name = line[4:].rstrip("\n")
+ except StopIteration:
+ raise MalformedPatchHeader("No orig line", "")
+ try:
+ line = iter_lines.next()
+ if not line.startswith("+++ "):
+ raise PatchSyntax("No mod name")
+ else:
+ mod_name = line[4:].rstrip("\n")
+ except StopIteration:
+ raise MalformedPatchHeader("No mod line", "")
+ return (orig_name, mod_name)
+
+
+def parse_range(textrange):
+ """Parse a patch range, handling the "1" special-case
+
+ :param textrange: The text to parse
+ :type textrange: str
+ :return: the position and range, as a tuple
+ :rtype: (int, int)
+ """
+ tmp = textrange.split(',')
+ if len(tmp) == 1:
+ pos = tmp[0]
+ range = "1"
+ else:
+ (pos, range) = tmp
+ pos = int(pos)
+ range = int(range)
+ return (pos, range)
+
+
+def hunk_from_header(line):
+ import re
+ matches = re.match(r'\@\@ ([^@]*) \@\@( (.*))?\n', line)
+ if matches is None:
+ raise MalformedHunkHeader("Does not match format.", line)
+ try:
+ (orig, mod) = matches.group(1).split(" ")
+ except (ValueError, IndexError), e:
+ raise MalformedHunkHeader(str(e), line)
+ if not orig.startswith('-') or not mod.startswith('+'):
+ raise MalformedHunkHeader("Positions don't start with + or -.", line)
+ try:
+ (orig_pos, orig_range) = parse_range(orig[1:])
+ (mod_pos, mod_range) = parse_range(mod[1:])
+ except (ValueError, IndexError), e:
+ raise MalformedHunkHeader(str(e), line)
+ if mod_range < 0 or orig_range < 0:
+ raise MalformedHunkHeader("Hunk range is negative", line)
+ tail = matches.group(3)
+ return Hunk(orig_pos, orig_range, mod_pos, mod_range, tail)
+
+
+class HunkLine:
+ def __init__(self, contents):
+ self.contents = contents
+
+ def get_str(self, leadchar):
+ if self.contents == "\n" and leadchar == " " and False:
+ return "\n"
+ if not self.contents.endswith('\n'):
+ terminator = '\n' + NO_NL
+ else:
+ terminator = ''
+ return leadchar + self.contents + terminator
+
+
+class ContextLine(HunkLine):
+ def __init__(self, contents):
+ HunkLine.__init__(self, contents)
+
+ def __str__(self):
+ return self.get_str(" ")
+
+
+class InsertLine(HunkLine):
+ def __init__(self, contents):
+ HunkLine.__init__(self, contents)
+
+ def __str__(self):
+ return self.get_str("+")
+
+
+class RemoveLine(HunkLine):
+ def __init__(self, contents):
+ HunkLine.__init__(self, contents)
+
+ def __str__(self):
+ return self.get_str("-")
+
+NO_NL = '\\ No newline at end of file\n'
+__pychecker__="no-returnvalues"
+
+def parse_line(line):
+ if line.startswith("\n"):
+ return ContextLine(line)
+ elif line.startswith(" "):
+ return ContextLine(line[1:])
+ elif line.startswith("+"):
+ return InsertLine(line[1:])
+ elif line.startswith("-"):
+ return RemoveLine(line[1:])
+ else:
+ raise MalformedLine("Unknown line type", line)
+__pychecker__=""
+
+
+class Hunk:
+ def __init__(self, orig_pos, orig_range, mod_pos, mod_range, tail=None):
+ self.orig_pos = orig_pos
+ self.orig_range = orig_range
+ self.mod_pos = mod_pos
+ self.mod_range = mod_range
+ self.tail = tail
+ self.lines = []
+
+ def get_header(self):
+ if self.tail is None:
+ tail_str = ''
+ else:
+ tail_str = ' ' + self.tail
+ return "@@ -%s +%s @@%s\n" % (self.range_str(self.orig_pos,
+ self.orig_range),
+ self.range_str(self.mod_pos,
+ self.mod_range),
+ tail_str)
+
+ def range_str(self, pos, range):
+ """Return a file range, special-casing for 1-line files.
+
+ :param pos: The position in the file
+ :type pos: int
+ :range: The range in the file
+ :type range: int
+ :return: a string in the format 1,4 except when range == pos == 1
+ """
+ if range == 1:
+ return "%i" % pos
+ else:
+ return "%i,%i" % (pos, range)
+
+ def __str__(self):
+ lines = [self.get_header()]
+ for line in self.lines:
+ lines.append(str(line))
+ return "".join(lines)
+
+ def shift_to_mod(self, pos):
+ if pos < self.orig_pos-1:
+ return 0
+ elif pos > self.orig_pos+self.orig_range:
+ return self.mod_range - self.orig_range
+ else:
+ return self.shift_to_mod_lines(pos)
+
+ def shift_to_mod_lines(self, pos):
+ position = self.orig_pos-1
+ shift = 0
+ for line in self.lines:
+ if isinstance(line, InsertLine):
+ shift += 1
+ elif isinstance(line, RemoveLine):
+ if position == pos:
+ return None
+ shift -= 1
+ position += 1
+ elif isinstance(line, ContextLine):
+ position += 1
+ if position > pos:
+ break
+ return shift
+
+
+def iter_hunks(iter_lines, allow_dirty=False):
+ '''
+ :arg iter_lines: iterable of lines to parse for hunks
+ :kwarg allow_dirty: If True, when we encounter something that is not
+ a hunk header when we're looking for one, assume the rest of the lines
+ are not part of the patch (comments or other junk). Default False
+ '''
+ hunk = None
+ for line in iter_lines:
+ if line == "\n":
+ if hunk is not None:
+ yield hunk
+ hunk = None
+ continue
+ if hunk is not None:
+ yield hunk
+ try:
+ hunk = hunk_from_header(line)
+ except MalformedHunkHeader:
+ if allow_dirty:
+ # If the line isn't a hunk header, then we've reached the end
+ # of this patch and there's "junk" at the end. Ignore the
+ # rest of this patch.
+ return
+ raise
+ orig_size = 0
+ mod_size = 0
+ while orig_size < hunk.orig_range or mod_size < hunk.mod_range:
+ hunk_line = parse_line(iter_lines.next())
+ hunk.lines.append(hunk_line)
+ if isinstance(hunk_line, (RemoveLine, ContextLine)):
+ orig_size += 1
+ if isinstance(hunk_line, (InsertLine, ContextLine)):
+ mod_size += 1
+ if hunk is not None:
+ yield hunk
+
+
+class BinaryPatch(object):
+ def __init__(self, oldname, newname):
+ self.oldname = oldname
+ self.newname = newname
+
+ def __str__(self):
+ return 'Binary files %s and %s differ\n' % (self.oldname, self.newname)
+
+
+class Patch(BinaryPatch):
+
+ def __init__(self, oldname, newname):
+ BinaryPatch.__init__(self, oldname, newname)
+ self.hunks = []
+
+ def __str__(self):
+ ret = self.get_header()
+ ret += "".join([str(h) for h in self.hunks])
+ return ret
+
+ def get_header(self):
+ return "--- %s\n+++ %s\n" % (self.oldname, self.newname)
+
+ def stats_values(self):
+ """Calculate the number of inserts and removes."""
+ removes = 0
+ inserts = 0
+ for hunk in self.hunks:
+ for line in hunk.lines:
+ if isinstance(line, InsertLine):
+ inserts+=1;
+ elif isinstance(line, RemoveLine):
+ removes+=1;
+ return (inserts, removes, len(self.hunks))
+
+ def stats_str(self):
+ """Return a string of patch statistics"""
+ return "%i inserts, %i removes in %i hunks" % \
+ self.stats_values()
+
+ def pos_in_mod(self, position):
+ newpos = position
+ for hunk in self.hunks:
+ shift = hunk.shift_to_mod(position)
+ if shift is None:
+ return None
+ newpos += shift
+ return newpos
+
+ def iter_inserted(self):
+ """Iteraties through inserted lines
+
+ :return: Pair of line number, line
+ :rtype: iterator of (int, InsertLine)
+ """
+ for hunk in self.hunks:
+ pos = hunk.mod_pos - 1;
+ for line in hunk.lines:
+ if isinstance(line, InsertLine):
+ yield (pos, line)
+ pos += 1
+ if isinstance(line, ContextLine):
+ pos += 1
+
+
+def parse_patch(iter_lines, allow_dirty=False):
+ '''
+ :arg iter_lines: iterable of lines to parse
+ :kwarg allow_dirty: If True, allow the patch to have trailing junk.
+ Default False
+ '''
+ iter_lines = iter_lines_handle_nl(iter_lines)
+ try:
+ (orig_name, mod_name) = get_patch_names(iter_lines)
+ except BinaryFiles, e:
+ return BinaryPatch(e.orig_name, e.mod_name)
+ else:
+ patch = Patch(orig_name, mod_name)
+ for hunk in iter_hunks(iter_lines, allow_dirty):
+ patch.hunks.append(hunk)
+ return patch
+
+
+def iter_file_patch(iter_lines, allow_dirty=False):
+ '''
+ :arg iter_lines: iterable of lines to parse for patches
+ :kwarg allow_dirty: If True, allow comments and other non-patch text
+ before the first patch. Note that the algorithm here can only find
+ such text before any patches have been found. Comments after the
+ first patch are stripped away in iter_hunks() if it is also passed
+ allow_dirty=True. Default False.
+ '''
+ ### FIXME: Docstring is not quite true. We allow certain comments no
+ # matter what, If they startwith '===', '***', or '#' Someone should
+ # reexamine this logic and decide if we should include those in
+ # allow_dirty or restrict those to only being before the patch is found
+ # (as allow_dirty does).
+ regex = re.compile(binary_files_re)
+ saved_lines = []
+ orig_range = 0
+ beginning = True
+ for line in iter_lines:
+ if line.startswith('=== ') or line.startswith('*** '):
+ continue
+ if line.startswith('#'):
+ continue
+ elif orig_range > 0:
+ if line.startswith('-') or line.startswith(' '):
+ orig_range -= 1
+ elif line.startswith('--- ') or regex.match(line):
+ if allow_dirty and beginning:
+ # Patches can have "junk" at the beginning
+ # Stripping junk from the end of patches is handled when we
+ # parse the patch
+ beginning = False
+ elif len(saved_lines) > 0:
+ yield saved_lines
+ saved_lines = []
+ elif line.startswith('@@'):
+ hunk = hunk_from_header(line)
+ orig_range = hunk.orig_range
+ saved_lines.append(line)
+ if len(saved_lines) > 0:
+ yield saved_lines
+
+
+def iter_lines_handle_nl(iter_lines):
+ """
+ Iterates through lines, ensuring that lines that originally had no
+ terminating \n are produced without one. This transformation may be
+ applied at any point up until hunk line parsing, and is safe to apply
+ repeatedly.
+ """
+ last_line = None
+ for line in iter_lines:
+ if line == NO_NL:
+ if not last_line.endswith('\n'):
+ raise AssertionError()
+ last_line = last_line[:-1]
+ line = None
+ if last_line is not None:
+ yield last_line
+ last_line = line
+ if last_line is not None:
+ yield last_line
+
+
+def parse_patches(iter_lines, allow_dirty=False):
+ '''
+ :arg iter_lines: iterable of lines to parse for patches
+ :kwarg allow_dirty: If True, allow text that's not part of the patch at
+ selected places. This includes comments before and after a patch
+ for instance. Default False.
+ '''
+ return [parse_patch(f.__iter__(), allow_dirty) for f in
+ iter_file_patch(iter_lines, allow_dirty)]
+
+
+def difference_index(atext, btext):
+ """Find the indext of the first character that differs between two texts
+
+ :param atext: The first text
+ :type atext: str
+ :param btext: The second text
+ :type str: str
+ :return: The index, or None if there are no differences within the range
+ :rtype: int or NoneType
+ """
+ length = len(atext)
+ if len(btext) < length:
+ length = len(btext)
+ for i in range(length):
+ if atext[i] != btext[i]:
+ return i;
+ return None
+
+
+def iter_patched(orig_lines, patch_lines):
+ """Iterate through a series of lines with a patch applied.
+ This handles a single file, and does exact, not fuzzy patching.
+ """
+ patch_lines = iter_lines_handle_nl(iter(patch_lines))
+ get_patch_names(patch_lines)
+ return iter_patched_from_hunks(orig_lines, iter_hunks(patch_lines))
+
+
+def iter_patched_from_hunks(orig_lines, hunks):
+ """Iterate through a series of lines with a patch applied.
+ This handles a single file, and does exact, not fuzzy patching.
+
+ :param orig_lines: The unpatched lines.
+ :param hunks: An iterable of Hunk instances.
+ """
+ seen_patch = []
+ line_no = 1
+ if orig_lines is not None:
+ orig_lines = iter(orig_lines)
+ for hunk in hunks:
+ while line_no < hunk.orig_pos:
+ orig_line = orig_lines.next()
+ yield orig_line
+ line_no += 1
+ for hunk_line in hunk.lines:
+ seen_patch.append(str(hunk_line))
+ if isinstance(hunk_line, InsertLine):
+ yield hunk_line.contents
+ elif isinstance(hunk_line, (ContextLine, RemoveLine)):
+ orig_line = orig_lines.next()
+ if orig_line != hunk_line.contents:
+ raise PatchConflict(line_no, orig_line, "".join(seen_patch))
+ if isinstance(hunk_line, ContextLine):
+ yield orig_line
+ else:
+ if not isinstance(hunk_line, RemoveLine):
+ raise AssertionError(hunk_line)
+ line_no += 1
+ if orig_lines is not None:
+ for line in orig_lines:
+ yield line
diff --git a/bzrlib/patiencediff.py b/bzrlib/patiencediff.py
new file mode 100755
index 0000000..9dd4e54
--- /dev/null
+++ b/bzrlib/patiencediff.py
@@ -0,0 +1,168 @@
+#!/usr/bin/env python
+# Copyright (C) 2005, 2006, 2007 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+from __future__ import absolute_import
+
+from bzrlib.lazy_import import lazy_import
+lazy_import(globals(), """
+import os
+import sys
+import time
+import difflib
+""")
+
+
+__all__ = ['PatienceSequenceMatcher', 'unified_diff', 'unified_diff_files']
+
+
+# This is a version of unified_diff which only adds a factory parameter
+# so that you can override the default SequenceMatcher
+# this has been submitted as a patch to python
+def unified_diff(a, b, fromfile='', tofile='', fromfiledate='',
+ tofiledate='', n=3, lineterm='\n',
+ sequencematcher=None):
+ r"""
+ Compare two sequences of lines; generate the delta as a unified diff.
+
+ Unified diffs are a compact way of showing line changes and a few
+ lines of context. The number of context lines is set by 'n' which
+ defaults to three.
+
+ By default, the diff control lines (those with ---, +++, or @@) are
+ created with a trailing newline. This is helpful so that inputs
+ created from file.readlines() result in diffs that are suitable for
+ file.writelines() since both the inputs and outputs have trailing
+ newlines.
+
+ For inputs that do not have trailing newlines, set the lineterm
+ argument to "" so that the output will be uniformly newline free.
+
+ The unidiff format normally has a header for filenames and modification
+ times. Any or all of these may be specified using strings for
+ 'fromfile', 'tofile', 'fromfiledate', and 'tofiledate'. The modification
+ times are normally expressed in the format returned by time.ctime().
+
+ Example:
+
+ >>> for line in unified_diff('one two three four'.split(),
+ ... 'zero one tree four'.split(), 'Original', 'Current',
+ ... 'Sat Jan 26 23:30:50 1991', 'Fri Jun 06 10:20:52 2003',
+ ... lineterm=''):
+ ... print line
+ --- Original Sat Jan 26 23:30:50 1991
+ +++ Current Fri Jun 06 10:20:52 2003
+ @@ -1,4 +1,4 @@
+ +zero
+ one
+ -two
+ -three
+ +tree
+ four
+ """
+ if sequencematcher is None:
+ sequencematcher = difflib.SequenceMatcher
+
+ if fromfiledate:
+ fromfiledate = '\t' + str(fromfiledate)
+ if tofiledate:
+ tofiledate = '\t' + str(tofiledate)
+
+ started = False
+ for group in sequencematcher(None,a,b).get_grouped_opcodes(n):
+ if not started:
+ yield '--- %s%s%s' % (fromfile, fromfiledate, lineterm)
+ yield '+++ %s%s%s' % (tofile, tofiledate, lineterm)
+ started = True
+ i1, i2, j1, j2 = group[0][1], group[-1][2], group[0][3], group[-1][4]
+ yield "@@ -%d,%d +%d,%d @@%s" % (i1+1, i2-i1, j1+1, j2-j1, lineterm)
+ for tag, i1, i2, j1, j2 in group:
+ if tag == 'equal':
+ for line in a[i1:i2]:
+ yield ' ' + line
+ continue
+ if tag == 'replace' or tag == 'delete':
+ for line in a[i1:i2]:
+ yield '-' + line
+ if tag == 'replace' or tag == 'insert':
+ for line in b[j1:j2]:
+ yield '+' + line
+
+
+def unified_diff_files(a, b, sequencematcher=None):
+ """Generate the diff for two files.
+ """
+ # Should this actually be an error?
+ if a == b:
+ return []
+ if a == '-':
+ file_a = sys.stdin
+ time_a = time.time()
+ else:
+ file_a = open(a, 'rb')
+ time_a = os.stat(a).st_mtime
+
+ if b == '-':
+ file_b = sys.stdin
+ time_b = time.time()
+ else:
+ file_b = open(b, 'rb')
+ time_b = os.stat(b).st_mtime
+
+ # TODO: Include fromfiledate and tofiledate
+ return unified_diff(file_a.readlines(), file_b.readlines(),
+ fromfile=a, tofile=b,
+ sequencematcher=sequencematcher)
+
+
+try:
+ from bzrlib._patiencediff_c import (
+ unique_lcs_c as unique_lcs,
+ recurse_matches_c as recurse_matches,
+ PatienceSequenceMatcher_c as PatienceSequenceMatcher
+ )
+except ImportError:
+ from bzrlib._patiencediff_py import (
+ unique_lcs_py as unique_lcs,
+ recurse_matches_py as recurse_matches,
+ PatienceSequenceMatcher_py as PatienceSequenceMatcher
+ )
+
+
+def main(args):
+ import optparse
+ p = optparse.OptionParser(usage='%prog [options] file_a file_b'
+ '\nFiles can be "-" to read from stdin')
+ p.add_option('--patience', dest='matcher', action='store_const', const='patience',
+ default='patience', help='Use the patience difference algorithm')
+ p.add_option('--difflib', dest='matcher', action='store_const', const='difflib',
+ default='patience', help='Use python\'s difflib algorithm')
+
+ algorithms = {'patience':PatienceSequenceMatcher, 'difflib':difflib.SequenceMatcher}
+
+ (opts, args) = p.parse_args(args)
+ matcher = algorithms[opts.matcher]
+
+ if len(args) != 2:
+ print 'You must supply 2 filenames to diff'
+ return -1
+
+ for line in unified_diff_files(args[0], args[1], sequencematcher=matcher):
+ sys.stdout.write(line)
+
+
+if __name__ == '__main__':
+ sys.exit(main(sys.argv[1:]))
diff --git a/bzrlib/plugin.py b/bzrlib/plugin.py
new file mode 100644
index 0000000..1b8d8e9
--- /dev/null
+++ b/bzrlib/plugin.py
@@ -0,0 +1,677 @@
+# Copyright (C) 2005-2011 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""bzr python plugin support.
+
+When load_plugins() is invoked, any python module in any directory in
+$BZR_PLUGIN_PATH will be imported. The module will be imported as
+'bzrlib.plugins.$BASENAME(PLUGIN)'. In the plugin's main body, it should
+update any bzrlib registries it wants to extend.
+
+See the plugin-api developer documentation for information about writing
+plugins.
+
+BZR_PLUGIN_PATH is also honoured for any plugins imported via
+'import bzrlib.plugins.PLUGINNAME', as long as set_plugins_path has been
+called.
+"""
+
+from __future__ import absolute_import
+
+import os
+import sys
+
+from bzrlib import osutils
+
+from bzrlib.lazy_import import lazy_import
+lazy_import(globals(), """
+import imp
+import re
+import types
+
+from bzrlib import (
+ _format_version_tuple,
+ config,
+ debug,
+ errors,
+ trace,
+ )
+from bzrlib.i18n import gettext
+from bzrlib import plugins as _mod_plugins
+""")
+
+
+DEFAULT_PLUGIN_PATH = None
+_loaded = False
+_plugins_disabled = False
+
+
+plugin_warnings = {}
+# Map from plugin name, to list of string warnings about eg plugin
+# dependencies.
+
+
+def are_plugins_disabled():
+ return _plugins_disabled
+
+
+def disable_plugins():
+ """Disable loading plugins.
+
+ Future calls to load_plugins() will be ignored.
+ """
+ global _plugins_disabled
+ _plugins_disabled = True
+ load_plugins([])
+
+
+def describe_plugins(show_paths=False):
+ """Generate text description of plugins.
+
+ Includes both those that have loaded, and those that failed to
+ load.
+
+ :param show_paths: If true,
+ :returns: Iterator of text lines (including newlines.)
+ """
+ from inspect import getdoc
+ loaded_plugins = plugins()
+ all_names = sorted(list(set(
+ loaded_plugins.keys() + plugin_warnings.keys())))
+ for name in all_names:
+ if name in loaded_plugins:
+ plugin = loaded_plugins[name]
+ version = plugin.__version__
+ if version == 'unknown':
+ version = ''
+ yield '%s %s\n' % (name, version)
+ d = getdoc(plugin.module)
+ if d:
+ doc = d.split('\n')[0]
+ else:
+ doc = '(no description)'
+ yield (" %s\n" % doc)
+ if show_paths:
+ yield (" %s\n" % plugin.path())
+ del plugin
+ else:
+ yield "%s (failed to load)\n" % name
+ if name in plugin_warnings:
+ for line in plugin_warnings[name]:
+ yield " ** " + line + '\n'
+ yield '\n'
+
+
+def _strip_trailing_sep(path):
+ return path.rstrip("\\/")
+
+
+def _get_specific_plugin_paths(paths):
+ """Returns the plugin paths from a string describing the associations.
+
+ :param paths: A string describing the paths associated with the plugins.
+
+ :returns: A list of (plugin name, path) tuples.
+
+ For example, if paths is my_plugin@/test/my-test:her_plugin@/production/her,
+ [('my_plugin', '/test/my-test'), ('her_plugin', '/production/her')]
+ will be returned.
+
+ Note that ':' in the example above depends on the os.
+ """
+ if not paths:
+ return []
+ specs = []
+ for spec in paths.split(os.pathsep):
+ try:
+ name, path = spec.split('@')
+ except ValueError:
+ raise errors.BzrCommandError(gettext(
+ '"%s" is not a valid <plugin_name>@<plugin_path> description ')
+ % spec)
+ specs.append((name, path))
+ return specs
+
+
+def set_plugins_path(path=None):
+ """Set the path for plugins to be loaded from.
+
+ :param path: The list of paths to search for plugins. By default,
+ path will be determined using get_standard_plugins_path.
+ if path is [], no plugins can be loaded.
+ """
+ if path is None:
+ path = get_standard_plugins_path()
+ _mod_plugins.__path__ = path
+ PluginImporter.reset()
+ # Set up a blacklist for disabled plugins
+ disabled_plugins = os.environ.get('BZR_DISABLE_PLUGINS', None)
+ if disabled_plugins is not None:
+ for name in disabled_plugins.split(os.pathsep):
+ PluginImporter.blacklist.add('bzrlib.plugins.' + name)
+ # Set up a the specific paths for plugins
+ for plugin_name, plugin_path in _get_specific_plugin_paths(os.environ.get(
+ 'BZR_PLUGINS_AT', None)):
+ PluginImporter.specific_paths[
+ 'bzrlib.plugins.%s' % plugin_name] = plugin_path
+ return path
+
+
+def _append_new_path(paths, new_path):
+ """Append a new path if it set and not already known."""
+ if new_path is not None and new_path not in paths:
+ paths.append(new_path)
+ return paths
+
+
+def get_core_plugin_path():
+ core_path = None
+ bzr_exe = bool(getattr(sys, 'frozen', None))
+ if bzr_exe: # expand path for bzr.exe
+ # We need to use relative path to system-wide plugin
+ # directory because bzrlib from standalone bzr.exe
+ # could be imported by another standalone program
+ # (e.g. bzr-config; or TortoiseBzr/Olive if/when they
+ # will become standalone exe). [bialix 20071123]
+ # __file__ typically is
+ # C:\Program Files\Bazaar\lib\library.zip\bzrlib\plugin.pyc
+ # then plugins directory is
+ # C:\Program Files\Bazaar\plugins
+ # so relative path is ../../../plugins
+ core_path = osutils.abspath(osutils.pathjoin(
+ osutils.dirname(__file__), '../../../plugins'))
+ else: # don't look inside library.zip
+ # search the plugin path before the bzrlib installed dir
+ core_path = os.path.dirname(_mod_plugins.__file__)
+ return core_path
+
+
+def get_site_plugin_path():
+ """Returns the path for the site installed plugins."""
+ if sys.platform == 'win32':
+ # We don't have (yet) a good answer for windows since that is certainly
+ # related to the way we build the installers. -- vila20090821
+ return None
+ site_path = None
+ try:
+ from distutils.sysconfig import get_python_lib
+ except ImportError:
+ # If distutuils is not available, we just don't know where they are
+ pass
+ else:
+ site_path = osutils.pathjoin(get_python_lib(), 'bzrlib', 'plugins')
+ return site_path
+
+
+def get_user_plugin_path():
+ return osutils.pathjoin(config.config_dir(), 'plugins')
+
+
+def get_standard_plugins_path():
+ """Determine a plugin path suitable for general use."""
+ # Ad-Hoc default: core is not overriden by site but user can overrides both
+ # The rationale is that:
+ # - 'site' comes last, because these plugins should always be available and
+ # are supposed to be in sync with the bzr installed on site.
+ # - 'core' comes before 'site' so that running bzr from sources or a user
+ # installed version overrides the site version.
+ # - 'user' comes first, because... user is always right.
+ # - the above rules clearly defines which plugin version will be loaded if
+ # several exist. Yet, it is sometimes desirable to disable some directory
+ # so that a set of plugins is disabled as once. This can be done via
+ # -site, -core, -user.
+
+ env_paths = os.environ.get('BZR_PLUGIN_PATH', '+user').split(os.pathsep)
+ defaults = ['+core', '+site']
+
+ # The predefined references
+ refs = dict(core=get_core_plugin_path(),
+ site=get_site_plugin_path(),
+ user=get_user_plugin_path())
+
+ # Unset paths that should be removed
+ for k,v in refs.iteritems():
+ removed = '-%s' % k
+ # defaults can never mention removing paths as that will make it
+ # impossible for the user to revoke these removals.
+ if removed in env_paths:
+ env_paths.remove(removed)
+ refs[k] = None
+
+ # Expand references
+ paths = []
+ for p in env_paths + defaults:
+ if p.startswith('+'):
+ # Resolve references if they are known
+ try:
+ p = refs[p[1:]]
+ except KeyError:
+ # Leave them untouched so user can still use paths starting
+ # with '+'
+ pass
+ _append_new_path(paths, p)
+
+ # Get rid of trailing slashes, since Python can't handle them when
+ # it tries to import modules.
+ paths = map(_strip_trailing_sep, paths)
+ return paths
+
+
+def load_plugins(path=None):
+ """Load bzrlib plugins.
+
+ The environment variable BZR_PLUGIN_PATH is considered a delimited
+ set of paths to look through. Each entry is searched for `*.py`
+ files (and whatever other extensions are used in the platform,
+ such as `*.pyd`).
+
+ load_from_path() provides the underlying mechanism and is called with
+ the default directory list to provide the normal behaviour.
+
+ :param path: The list of paths to search for plugins. By default,
+ path will be determined using get_standard_plugins_path.
+ if path is [], no plugins can be loaded.
+ """
+ global _loaded
+ if _loaded:
+ # People can make sure plugins are loaded, they just won't be twice
+ return
+ _loaded = True
+
+ # scan for all plugins in the path.
+ load_from_path(set_plugins_path(path))
+
+
+def load_from_path(dirs):
+ """Load bzrlib plugins found in each dir in dirs.
+
+ Loading a plugin means importing it into the python interpreter.
+ The plugin is expected to make calls to register commands when
+ it's loaded (or perhaps access other hooks in future.)
+
+ Plugins are loaded into bzrlib.plugins.NAME, and can be found there
+ for future reference.
+
+ The python module path for bzrlib.plugins will be modified to be 'dirs'.
+ """
+ # Explicitly load the plugins with a specific path
+ for fullname, path in PluginImporter.specific_paths.iteritems():
+ name = fullname[len('bzrlib.plugins.'):]
+ _load_plugin_module(name, path)
+
+ # We need to strip the trailing separators here as well as in the
+ # set_plugins_path function because calling code can pass anything in to
+ # this function, and since it sets plugins.__path__, it should set it to
+ # something that will be valid for Python to use (in case people try to
+ # run "import bzrlib.plugins.PLUGINNAME" after calling this function).
+ _mod_plugins.__path__ = map(_strip_trailing_sep, dirs)
+ for d in dirs:
+ if not d:
+ continue
+ trace.mutter('looking for plugins in %s', d)
+ if os.path.isdir(d):
+ load_from_dir(d)
+
+
+# backwards compatability: load_from_dirs was the old name
+# This was changed in 0.15
+load_from_dirs = load_from_path
+
+
+def _find_plugin_module(dir, name):
+ """Check if there is a valid python module that can be loaded as a plugin.
+
+ :param dir: The directory where the search is performed.
+ :param path: An existing file path, either a python file or a package
+ directory.
+
+ :return: (name, path, description) name is the module name, path is the
+ file to load and description is the tuple returned by
+ imp.get_suffixes().
+ """
+ path = osutils.pathjoin(dir, name)
+ if os.path.isdir(path):
+ # Check for a valid __init__.py file, valid suffixes depends on -O and
+ # can be .py, .pyc and .pyo
+ for suffix, mode, kind in imp.get_suffixes():
+ if kind not in (imp.PY_SOURCE, imp.PY_COMPILED):
+ # We don't recognize compiled modules (.so, .dll, etc)
+ continue
+ init_path = osutils.pathjoin(path, '__init__' + suffix)
+ if os.path.isfile(init_path):
+ return name, init_path, (suffix, mode, kind)
+ else:
+ for suffix, mode, kind in imp.get_suffixes():
+ if name.endswith(suffix):
+ # Clean up the module name
+ name = name[:-len(suffix)]
+ if kind == imp.C_EXTENSION and name.endswith('module'):
+ name = name[:-len('module')]
+ return name, path, (suffix, mode, kind)
+ # There is no python module here
+ return None, None, (None, None, None)
+
+
+def record_plugin_warning(plugin_name, warning_message):
+ trace.mutter(warning_message)
+ plugin_warnings.setdefault(plugin_name, []).append(warning_message)
+
+
+def _load_plugin_module(name, dir):
+ """Load plugin name from dir.
+
+ :param name: The plugin name in the bzrlib.plugins namespace.
+ :param dir: The directory the plugin is loaded from for error messages.
+ """
+ if ('bzrlib.plugins.%s' % name) in PluginImporter.blacklist:
+ return
+ try:
+ exec "import bzrlib.plugins.%s" % name in {}
+ except KeyboardInterrupt:
+ raise
+ except errors.IncompatibleAPI, e:
+ warning_message = (
+ "Unable to load plugin %r. It requested API version "
+ "%s of module %s but the minimum exported version is %s, and "
+ "the maximum is %s" %
+ (name, e.wanted, e.api, e.minimum, e.current))
+ record_plugin_warning(name, warning_message)
+ except Exception, e:
+ trace.warning("%s" % e)
+ if re.search('\.|-| ', name):
+ sanitised_name = re.sub('[-. ]', '_', name)
+ if sanitised_name.startswith('bzr_'):
+ sanitised_name = sanitised_name[len('bzr_'):]
+ trace.warning("Unable to load %r in %r as a plugin because the "
+ "file path isn't a valid module name; try renaming "
+ "it to %r." % (name, dir, sanitised_name))
+ else:
+ record_plugin_warning(
+ name,
+ 'Unable to load plugin %r from %r' % (name, dir))
+ trace.log_exception_quietly()
+ if 'error' in debug.debug_flags:
+ trace.print_exception(sys.exc_info(), sys.stderr)
+
+
+def load_from_dir(d):
+ """Load the plugins in directory d.
+
+ d must be in the plugins module path already.
+ This function is called once for each directory in the module path.
+ """
+ plugin_names = set()
+ for p in os.listdir(d):
+ name, path, desc = _find_plugin_module(d, p)
+ if name is not None:
+ if name == '__init__':
+ # We do nothing with the __init__.py file in directories from
+ # the bzrlib.plugins module path, we may want to, one day
+ # -- vila 20100316.
+ continue # We don't load __init__.py in the plugins dirs
+ elif getattr(_mod_plugins, name, None) is not None:
+ # The module has already been loaded from another directory
+ # during a previous call.
+ # FIXME: There should be a better way to report masked plugins
+ # -- vila 20100316
+ trace.mutter('Plugin name %s already loaded', name)
+ else:
+ plugin_names.add(name)
+
+ for name in plugin_names:
+ _load_plugin_module(name, d)
+
+
+def plugins():
+ """Return a dictionary of the plugins.
+
+ Each item in the dictionary is a PlugIn object.
+ """
+ result = {}
+ for name, plugin in _mod_plugins.__dict__.items():
+ if isinstance(plugin, types.ModuleType):
+ result[name] = PlugIn(name, plugin)
+ return result
+
+
+def format_concise_plugin_list():
+ """Return a string holding a concise list of plugins and their version.
+ """
+ items = []
+ for name, a_plugin in sorted(plugins().items()):
+ items.append("%s[%s]" %
+ (name, a_plugin.__version__))
+ return ', '.join(items)
+
+
+
+class PluginsHelpIndex(object):
+ """A help index that returns help topics for plugins."""
+
+ def __init__(self):
+ self.prefix = 'plugins/'
+
+ def get_topics(self, topic):
+ """Search for topic in the loaded plugins.
+
+ This will not trigger loading of new plugins.
+
+ :param topic: A topic to search for.
+ :return: A list which is either empty or contains a single
+ RegisteredTopic entry.
+ """
+ if not topic:
+ return []
+ if topic.startswith(self.prefix):
+ topic = topic[len(self.prefix):]
+ plugin_module_name = 'bzrlib.plugins.%s' % topic
+ try:
+ module = sys.modules[plugin_module_name]
+ except KeyError:
+ return []
+ else:
+ return [ModuleHelpTopic(module)]
+
+
+class ModuleHelpTopic(object):
+ """A help topic which returns the docstring for a module."""
+
+ def __init__(self, module):
+ """Constructor.
+
+ :param module: The module for which help should be generated.
+ """
+ self.module = module
+
+ def get_help_text(self, additional_see_also=None, verbose=True):
+ """Return a string with the help for this topic.
+
+ :param additional_see_also: Additional help topics to be
+ cross-referenced.
+ """
+ if not self.module.__doc__:
+ result = "Plugin '%s' has no docstring.\n" % self.module.__name__
+ else:
+ result = self.module.__doc__
+ if result[-1] != '\n':
+ result += '\n'
+ from bzrlib import help_topics
+ result += help_topics._format_see_also(additional_see_also)
+ return result
+
+ def get_help_topic(self):
+ """Return the module help topic: its basename."""
+ return self.module.__name__[len('bzrlib.plugins.'):]
+
+
+class PlugIn(object):
+ """The bzrlib representation of a plugin.
+
+ The PlugIn object provides a way to manipulate a given plugin module.
+ """
+
+ def __init__(self, name, module):
+ """Construct a plugin for module."""
+ self.name = name
+ self.module = module
+
+ def path(self):
+ """Get the path that this plugin was loaded from."""
+ if getattr(self.module, '__path__', None) is not None:
+ return os.path.abspath(self.module.__path__[0])
+ elif getattr(self.module, '__file__', None) is not None:
+ path = os.path.abspath(self.module.__file__)
+ if path[-4:] in ('.pyc', '.pyo'):
+ pypath = path[:-4] + '.py'
+ if os.path.isfile(pypath):
+ path = pypath
+ return path
+ else:
+ return repr(self.module)
+
+ def __str__(self):
+ return "<%s.%s object at %s, name=%s, module=%s>" % (
+ self.__class__.__module__, self.__class__.__name__, id(self),
+ self.name, self.module)
+
+ __repr__ = __str__
+
+ def test_suite(self):
+ """Return the plugin's test suite."""
+ if getattr(self.module, 'test_suite', None) is not None:
+ return self.module.test_suite()
+ else:
+ return None
+
+ def load_plugin_tests(self, loader):
+ """Return the adapted plugin's test suite.
+
+ :param loader: The custom loader that should be used to load additional
+ tests.
+
+ """
+ if getattr(self.module, 'load_tests', None) is not None:
+ return loader.loadTestsFromModule(self.module)
+ else:
+ return None
+
+ def version_info(self):
+ """Return the plugin's version_tuple or None if unknown."""
+ version_info = getattr(self.module, 'version_info', None)
+ if version_info is not None:
+ try:
+ if isinstance(version_info, types.StringType):
+ version_info = version_info.split('.')
+ elif len(version_info) == 3:
+ version_info = tuple(version_info) + ('final', 0)
+ except TypeError, e:
+ # The given version_info isn't even iteratible
+ trace.log_exception_quietly()
+ version_info = (version_info,)
+ return version_info
+
+ def _get__version__(self):
+ version_info = self.version_info()
+ if version_info is None or len(version_info) == 0:
+ return "unknown"
+ try:
+ version_string = _format_version_tuple(version_info)
+ except (ValueError, TypeError, IndexError), e:
+ trace.log_exception_quietly()
+ # try to return something usefull for bad plugins, in stead of
+ # stack tracing.
+ version_string = '.'.join(map(str, version_info))
+ return version_string
+
+ __version__ = property(_get__version__)
+
+
+class _PluginImporter(object):
+ """An importer tailored to bzr specific needs.
+
+ This is a singleton that takes care of:
+ - disabled plugins specified in 'blacklist',
+ - plugins that needs to be loaded from specific directories.
+ """
+
+ def __init__(self):
+ self.reset()
+
+ def reset(self):
+ self.blacklist = set()
+ self.specific_paths = {}
+
+ def find_module(self, fullname, parent_path=None):
+ """Search a plugin module.
+
+ Disabled plugins raise an import error, plugins with specific paths
+ returns a specific loader.
+
+ :return: None if the plugin doesn't need special handling, self
+ otherwise.
+ """
+ if not fullname.startswith('bzrlib.plugins.'):
+ return None
+ if fullname in self.blacklist:
+ raise ImportError('%s is disabled' % fullname)
+ if fullname in self.specific_paths:
+ return self
+ return None
+
+ def load_module(self, fullname):
+ """Load a plugin from a specific directory (or file)."""
+ # We are called only for specific paths
+ plugin_path = self.specific_paths[fullname]
+ loading_path = None
+ if os.path.isdir(plugin_path):
+ for suffix, mode, kind in imp.get_suffixes():
+ if kind not in (imp.PY_SOURCE, imp.PY_COMPILED):
+ # We don't recognize compiled modules (.so, .dll, etc)
+ continue
+ init_path = osutils.pathjoin(plugin_path, '__init__' + suffix)
+ if os.path.isfile(init_path):
+ # We've got a module here and load_module needs specific
+ # parameters.
+ loading_path = plugin_path
+ suffix = ''
+ mode = ''
+ kind = imp.PKG_DIRECTORY
+ break
+ else:
+ for suffix, mode, kind in imp.get_suffixes():
+ if plugin_path.endswith(suffix):
+ loading_path = plugin_path
+ break
+ if loading_path is None:
+ raise ImportError('%s cannot be loaded from %s'
+ % (fullname, plugin_path))
+ if kind is imp.PKG_DIRECTORY:
+ f = None
+ else:
+ f = open(loading_path, mode)
+ try:
+ mod = imp.load_module(fullname, f, loading_path,
+ (suffix, mode, kind))
+ mod.__package__ = fullname
+ return mod
+ finally:
+ if f is not None:
+ f.close()
+
+
+# Install a dedicated importer for plugins requiring special handling
+PluginImporter = _PluginImporter()
+sys.meta_path.append(PluginImporter)
diff --git a/bzrlib/plugins/__init__.py b/bzrlib/plugins/__init__.py
new file mode 100644
index 0000000..efcb160
--- /dev/null
+++ b/bzrlib/plugins/__init__.py
@@ -0,0 +1,19 @@
+# Copyright (C) 2006 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""Null placeholder plugin"""
+from __future__ import absolute_import
+
diff --git a/bzrlib/plugins/bash_completion/README.txt b/bzrlib/plugins/bash_completion/README.txt
new file mode 100644
index 0000000..3bd1355
--- /dev/null
+++ b/bzrlib/plugins/bash_completion/README.txt
@@ -0,0 +1,143 @@
+.. comment
+
+ Copyright (C) 2010 Canonical Ltd
+
+ This file is part of bzr-bash-completion
+
+ bzr-bash-completion free software: you can redistribute it and/or
+ modify it under the terms of the GNU General Public License as
+ published by the Free Software Foundation, either version 2 of the
+ License, or (at your option) any later version.
+
+ bzr-bash-completion is distributed in the hope that it will be
+ useful, but WITHOUT ANY WARRANTY; without even the implied warranty
+ of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+==========================
+bzr bash-completion plugin
+==========================
+
+This plugin generates a shell function which can be used by bash to
+automatically complete the currently typed command when the user
+presses the completion key (usually tab).
+
+It is intended as a bzr plugin, but can be used to some extend as a
+standalone python script as well.
+
+| Copyright (C) 2009, 2010 Canonical Ltd
+
+.. contents::
+
+-------------------------------
+Bundled and standalone versions
+-------------------------------
+
+This plugin has been merged_ into the main source tree of Bazaar.
+Starting with the bzr 2.3 series, a common bzr installation will
+include this plugin.
+
+There is still a standalone version available. It makes the plugin
+available for users of older bzr versions. When using both versions,
+local configuration might determine which version actually gets used,
+and some installations might even overwrite one another, so don't use
+the standalone version if you have the bundled one, unless you know
+what you are doing. Some effort will be made to keep the two versions
+reasonably in sync for some time yet.
+
+This text here documents the bundled version.
+
+.. _merged: http://bazaar.launchpad.net/~bzr-pqm/bzr/bzr.dev/revision/5240
+
+-----
+Using
+-----
+
+Using as a plugin
+-----------------
+
+This is the preferred method of generating the completion function, as
+it will ensure proper bzr initialization.
+
+::
+
+ eval "`bzr bash-completion`"
+
+Lazy initialization
+-------------------
+
+Running the above command automatically from your ``~/.bashrc`` file
+or similar can cause annoying delays in the startup of your shell.
+To avoid this problem, you can delay the generation of the completion
+function until you actually need it.
+
+To do so, source the file ``contrib/bash/bzr`` shipped with the bzr
+source distribution from your ``~/.bashrc`` file
+or add it to your ``~/.bash_completion`` if
+your setup uses such a file. On a system-wide installation, the
+directory ``/usr/share/bash-completion/`` might contain such bash
+completion scripts.
+
+Note that the full completion function is generated only once per
+shell session. If you update your bzr installation or change the set
+of installed plugins, then you might wish to regenerate the completion
+function manually as described above in order for completion to take
+these changes into account.
+
+--------------
+Design concept
+--------------
+
+The plugin is designed to generate a completion function
+containing all the required information about the possible
+completions. This is usually only done once when bash
+initializes. After that, no more invocations of bzr are required. This
+makes the function much faster than a possible implementation talking
+to bzr for each and every completion. On the other hand, this has the
+effect that updates to bzr or its plugins won't show up in the
+completions immediately, but only after the completion function has
+been regenerated.
+
+-------
+License
+-------
+
+As this is built upon a bash completion script originally included in
+the bzr source tree, and as the bzr sources are covered by the GPL 2,
+this plugin here is licensed under these same terms.
+
+If you require a more liberal license, you'll have to contact all
+those who contributed code to this plugin, be it for bash or for
+python.
+
+-------
+History
+-------
+
+The plugin was created by Martin von Gagern in 2009, building on a
+static completion function of very limited scope distributed together
+with bzr.
+
+A version of it was merged into the bzr source tree in May 2010.
+
+----------
+References
+----------
+
+Bazaar homepage
+ | http://bazaar.canonical.com/
+Standalone plugin homepages
+ | https://launchpad.net/bzr-bash-completion
+ | http://pypi.python.org/pypi/bzr-bash-completion
+
+
+
+.. vim: ft=rst
+
+.. emacs
+ Local Variables:
+ mode: rst
+ End:
diff --git a/bzrlib/plugins/bash_completion/__init__.py b/bzrlib/plugins/bash_completion/__init__.py
new file mode 100644
index 0000000..64d506f
--- /dev/null
+++ b/bzrlib/plugins/bash_completion/__init__.py
@@ -0,0 +1,41 @@
+# Copyright (C) 2009, 2010 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+from __future__ import absolute_import
+
+__doc__ = """Generate a shell function for bash command line completion.
+
+This plugin provides a command called bash-completion that generates a
+bash completion function for bzr. See its documentation for details.
+"""
+
+from bzrlib import commands, version_info
+
+
+bzr_plugin_name = 'bash_completion'
+bzr_commands = [ 'bash-completion' ]
+
+commands.plugin_cmds.register_lazy('cmd_bash_completion', [],
+ 'bzrlib.plugins.bash_completion.bashcomp')
+
+
+def load_tests(basic_tests, module, loader):
+ testmod_names = [
+ 'tests',
+ ]
+ basic_tests.addTest(loader.loadTestsFromModuleNames(
+ ["%s.%s" % (__name__, tmn) for tmn in testmod_names]))
+ return basic_tests
diff --git a/bzrlib/plugins/bash_completion/bashcomp.py b/bzrlib/plugins/bash_completion/bashcomp.py
new file mode 100644
index 0000000..52d740e
--- /dev/null
+++ b/bzrlib/plugins/bash_completion/bashcomp.py
@@ -0,0 +1,482 @@
+#!/usr/bin/env python
+
+# Copyright (C) 2009, 2010 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+from __future__ import absolute_import
+
+from bzrlib import (
+ cmdline,
+ commands,
+ config,
+ help_topics,
+ option,
+ plugin,
+)
+import bzrlib
+import re
+import sys
+
+
+class BashCodeGen(object):
+ """Generate a bash script for given completion data."""
+
+ def __init__(self, data, function_name='_bzr', debug=False):
+ self.data = data
+ self.function_name = function_name
+ self.debug = debug
+
+ def script(self):
+ return ("""\
+# Programmable completion for the Bazaar-NG bzr command under bash.
+# Known to work with bash 2.05a as well as bash 4.1.2, and probably
+# all versions in between as well.
+
+# Based originally on the svn bash completition script.
+# Customized by Sven Wilhelm/Icecrash.com
+# Adjusted for automatic generation by Martin von Gagern
+
+# Generated using the bash_completion plugin.
+# See https://launchpad.net/bzr-bash-completion for details.
+
+# Commands and options of bzr %(bzr_version)s
+
+shopt -s progcomp
+%(function)s
+complete -F %(function_name)s -o default bzr
+""" % {
+ "function_name": self.function_name,
+ "function": self.function(),
+ "bzr_version": self.bzr_version(),
+ })
+
+ def function(self):
+ return ("""\
+%(function_name)s ()
+{
+ local cur cmds cmdIdx cmd cmdOpts fixedWords i globalOpts
+ local curOpt optEnums
+ local IFS=$' \\n'
+
+ COMPREPLY=()
+ cur=${COMP_WORDS[COMP_CWORD]}
+
+ cmds='%(cmds)s'
+ globalOpts=( %(global_options)s )
+
+ # do ordinary expansion if we are anywhere after a -- argument
+ for ((i = 1; i < COMP_CWORD; ++i)); do
+ [[ ${COMP_WORDS[i]} == "--" ]] && return 0
+ done
+
+ # find the command; it's the first word not starting in -
+ cmd=
+ for ((cmdIdx = 1; cmdIdx < ${#COMP_WORDS[@]}; ++cmdIdx)); do
+ if [[ ${COMP_WORDS[cmdIdx]} != -* ]]; then
+ cmd=${COMP_WORDS[cmdIdx]}
+ break
+ fi
+ done
+
+ # complete command name if we are not already past the command
+ if [[ $COMP_CWORD -le cmdIdx ]]; then
+ COMPREPLY=( $( compgen -W "$cmds ${globalOpts[*]}" -- $cur ) )
+ return 0
+ fi
+
+ # find the option for which we want to complete a value
+ curOpt=
+ if [[ $cur != -* ]] && [[ $COMP_CWORD -gt 1 ]]; then
+ curOpt=${COMP_WORDS[COMP_CWORD - 1]}
+ if [[ $curOpt == = ]]; then
+ curOpt=${COMP_WORDS[COMP_CWORD - 2]}
+ elif [[ $cur == : ]]; then
+ cur=
+ curOpt="$curOpt:"
+ elif [[ $curOpt == : ]]; then
+ curOpt=${COMP_WORDS[COMP_CWORD - 2]}:
+ fi
+ fi
+%(debug)s
+ cmdOpts=( )
+ optEnums=( )
+ fixedWords=( )
+ case $cmd in
+%(cases)s\
+ *)
+ cmdOpts=(--help -h)
+ ;;
+ esac
+
+ IFS=$'\\n'
+ if [[ ${#fixedWords[@]} -eq 0 ]] && [[ ${#optEnums[@]} -eq 0 ]] && [[ $cur != -* ]]; then
+ case $curOpt in
+ tag:|*..tag:)
+ fixedWords=( $(bzr tags 2>/dev/null | sed 's/ *[^ ]*$//; s/ /\\\\\\\\ /g;') )
+ ;;
+ esac
+ case $cur in
+ [\\"\\']tag:*)
+ fixedWords=( $(bzr tags 2>/dev/null | sed 's/ *[^ ]*$//; s/^/tag:/') )
+ ;;
+ [\\"\\']*..tag:*)
+ fixedWords=( $(bzr tags 2>/dev/null | sed 's/ *[^ ]*$//') )
+ fixedWords=( $(for i in "${fixedWords[@]}"; do echo "${cur%%..tag:*}..tag:${i}"; done) )
+ ;;
+ esac
+ elif [[ $cur == = ]] && [[ ${#optEnums[@]} -gt 0 ]]; then
+ # complete directly after "--option=", list all enum values
+ COMPREPLY=( "${optEnums[@]}" )
+ return 0
+ else
+ fixedWords=( "${cmdOpts[@]}"
+ "${globalOpts[@]}"
+ "${optEnums[@]}"
+ "${fixedWords[@]}" )
+ fi
+
+ if [[ ${#fixedWords[@]} -gt 0 ]]; then
+ COMPREPLY=( $( compgen -W "${fixedWords[*]}" -- $cur ) )
+ fi
+
+ return 0
+}
+""" % {
+ "cmds": self.command_names(),
+ "function_name": self.function_name,
+ "cases": self.command_cases(),
+ "global_options": self.global_options(),
+ "debug": self.debug_output(),
+ })
+ # Help Emacs terminate strings: "
+
+ def command_names(self):
+ return " ".join(self.data.all_command_aliases())
+
+ def debug_output(self):
+ if not self.debug:
+ return ''
+ else:
+ return (r"""
+ # Debugging code enabled using the --debug command line switch.
+ # Will dump some variables to the top portion of the terminal.
+ echo -ne '\e[s\e[H'
+ for (( i=0; i < ${#COMP_WORDS[@]}; ++i)); do
+ echo "\$COMP_WORDS[$i]='${COMP_WORDS[i]}'"$'\e[K'
+ done
+ for i in COMP_CWORD COMP_LINE COMP_POINT COMP_TYPE COMP_KEY cur curOpt; do
+ echo "\$${i}=\"${!i}\""$'\e[K'
+ done
+ echo -ne '---\e[K\e[u'
+""")
+
+ def bzr_version(self):
+ bzr_version = bzrlib.version_string
+ if not self.data.plugins:
+ bzr_version += "."
+ else:
+ bzr_version += " and the following plugins:"
+ for name, plugin in sorted(self.data.plugins.iteritems()):
+ bzr_version += "\n# %s" % plugin
+ return bzr_version
+
+ def global_options(self):
+ return " ".join(sorted(self.data.global_options))
+
+ def command_cases(self):
+ cases = ""
+ for command in self.data.commands:
+ cases += self.command_case(command)
+ return cases
+
+ def command_case(self, command):
+ case = "\t%s)\n" % "|".join(command.aliases)
+ if command.plugin:
+ case += "\t\t# plugin \"%s\"\n" % command.plugin
+ options = []
+ enums = []
+ for option in command.options:
+ for message in option.error_messages:
+ case += "\t\t# %s\n" % message
+ if option.registry_keys:
+ for key in option.registry_keys:
+ options.append("%s=%s" % (option, key))
+ enums.append("%s) optEnums=( %s ) ;;" %
+ (option, ' '.join(option.registry_keys)))
+ else:
+ options.append(str(option))
+ case += "\t\tcmdOpts=( %s )\n" % " ".join(options)
+ if command.fixed_words:
+ fixed_words = command.fixed_words
+ if isinstance(fixed_words, list):
+ fixed_words = "( %s )" + ' '.join(fixed_words)
+ case += "\t\tfixedWords=%s\n" % fixed_words
+ if enums:
+ case += "\t\tcase $curOpt in\n\t\t\t"
+ case += "\n\t\t\t".join(enums)
+ case += "\n\t\tesac\n"
+ case += "\t\t;;\n"
+ return case
+
+
+class CompletionData(object):
+
+ def __init__(self):
+ self.plugins = {}
+ self.global_options = set()
+ self.commands = []
+
+ def all_command_aliases(self):
+ for c in self.commands:
+ for a in c.aliases:
+ yield a
+
+
+class CommandData(object):
+
+ def __init__(self, name):
+ self.name = name
+ self.aliases = [name]
+ self.plugin = None
+ self.options = []
+ self.fixed_words = None
+
+
+class PluginData(object):
+
+ def __init__(self, name, version=None):
+ if version is None:
+ try:
+ version = bzrlib.plugin.plugins()[name].__version__
+ except:
+ version = 'unknown'
+ self.name = name
+ self.version = version
+
+ def __str__(self):
+ if self.version == 'unknown':
+ return self.name
+ return '%s %s' % (self.name, self.version)
+
+
+class OptionData(object):
+
+ def __init__(self, name):
+ self.name = name
+ self.registry_keys = None
+ self.error_messages = []
+
+ def __str__(self):
+ return self.name
+
+ def __cmp__(self, other):
+ return cmp(self.name, other.name)
+
+
+class DataCollector(object):
+
+ def __init__(self, no_plugins=False, selected_plugins=None):
+ self.data = CompletionData()
+ self.user_aliases = {}
+ if no_plugins:
+ self.selected_plugins = set()
+ elif selected_plugins is None:
+ self.selected_plugins = None
+ else:
+ self.selected_plugins = set([x.replace('-', '_')
+ for x in selected_plugins])
+
+ def collect(self):
+ self.global_options()
+ self.aliases()
+ self.commands()
+ return self.data
+
+ def global_options(self):
+ re_switch = re.compile(r'\n(--[A-Za-z0-9-_]+)(?:, (-\S))?\s')
+ help_text = help_topics.topic_registry.get_detail('global-options')
+ for long, short in re_switch.findall(help_text):
+ self.data.global_options.add(long)
+ if short:
+ self.data.global_options.add(short)
+
+ def aliases(self):
+ for alias, expansion in config.GlobalConfig().get_aliases().iteritems():
+ for token in cmdline.split(expansion):
+ if not token.startswith("-"):
+ self.user_aliases.setdefault(token, set()).add(alias)
+ break
+
+ def commands(self):
+ for name in sorted(commands.all_command_names()):
+ self.command(name)
+
+ def command(self, name):
+ cmd = commands.get_cmd_object(name)
+ cmd_data = CommandData(name)
+
+ plugin_name = cmd.plugin_name()
+ if plugin_name is not None:
+ if (self.selected_plugins is not None and
+ plugin not in self.selected_plugins):
+ return None
+ plugin_data = self.data.plugins.get(plugin_name)
+ if plugin_data is None:
+ plugin_data = PluginData(plugin_name)
+ self.data.plugins[plugin_name] = plugin_data
+ cmd_data.plugin = plugin_data
+ self.data.commands.append(cmd_data)
+
+ # Find all aliases to the command; both cmd-defined and user-defined.
+ # We assume a user won't override one command with a different one,
+ # but will choose completely new names or add options to existing
+ # ones while maintaining the actual command name unchanged.
+ cmd_data.aliases.extend(cmd.aliases)
+ cmd_data.aliases.extend(sorted([useralias
+ for cmdalias in cmd_data.aliases
+ if cmdalias in self.user_aliases
+ for useralias in self.user_aliases[cmdalias]
+ if useralias not in cmd_data.aliases]))
+
+ opts = cmd.options()
+ for optname, opt in sorted(opts.iteritems()):
+ cmd_data.options.extend(self.option(opt))
+
+ if 'help' == name or 'help' in cmd.aliases:
+ cmd_data.fixed_words = ('($cmds %s)' %
+ " ".join(sorted(help_topics.topic_registry.keys())))
+
+ return cmd_data
+
+ def option(self, opt):
+ optswitches = {}
+ parser = option.get_optparser({opt.name: opt})
+ parser = self.wrap_parser(optswitches, parser)
+ optswitches.clear()
+ opt.add_option(parser, opt.short_name())
+ if isinstance(opt, option.RegistryOption) and opt.enum_switch:
+ enum_switch = '--%s' % opt.name
+ enum_data = optswitches.get(enum_switch)
+ if enum_data:
+ try:
+ enum_data.registry_keys = opt.registry.keys()
+ except ImportError, e:
+ enum_data.error_messages.append(
+ "ERROR getting registry keys for '--%s': %s"
+ % (opt.name, str(e).split('\n')[0]))
+ return sorted(optswitches.values())
+
+ def wrap_container(self, optswitches, parser):
+ def tweaked_add_option(*opts, **attrs):
+ for name in opts:
+ optswitches[name] = OptionData(name)
+ parser.add_option = tweaked_add_option
+ return parser
+
+ def wrap_parser(self, optswitches, parser):
+ orig_add_option_group = parser.add_option_group
+ def tweaked_add_option_group(*opts, **attrs):
+ return self.wrap_container(optswitches,
+ orig_add_option_group(*opts, **attrs))
+ parser.add_option_group = tweaked_add_option_group
+ return self.wrap_container(optswitches, parser)
+
+
+def bash_completion_function(out, function_name="_bzr", function_only=False,
+ debug=False,
+ no_plugins=False, selected_plugins=None):
+ dc = DataCollector(no_plugins=no_plugins, selected_plugins=selected_plugins)
+ data = dc.collect()
+ cg = BashCodeGen(data, function_name=function_name, debug=debug)
+ if function_only:
+ res = cg.function()
+ else:
+ res = cg.script()
+ out.write(res)
+
+
+class cmd_bash_completion(commands.Command):
+ __doc__ = """Generate a shell function for bash command line completion.
+
+ This command generates a shell function which can be used by bash to
+ automatically complete the currently typed command when the user presses
+ the completion key (usually tab).
+
+ Commonly used like this:
+ eval "`bzr bash-completion`"
+ """
+
+ takes_options = [
+ option.Option("function-name", short_name="f", type=str, argname="name",
+ help="Name of the generated function (default: _bzr)"),
+ option.Option("function-only", short_name="o", type=None,
+ help="Generate only the shell function, don't enable it"),
+ option.Option("debug", type=None, hidden=True,
+ help="Enable shell code useful for debugging"),
+ option.ListOption("plugin", type=str, argname="name",
+ # param_name="selected_plugins", # doesn't work, bug #387117
+ help="Enable completions for the selected plugin"
+ + " (default: all plugins)"),
+ ]
+
+ def run(self, **kwargs):
+ if 'plugin' in kwargs:
+ # work around bug #387117 which prevents us from using param_name
+ if len(kwargs['plugin']) > 0:
+ kwargs['selected_plugins'] = kwargs['plugin']
+ del kwargs['plugin']
+ bash_completion_function(sys.stdout, **kwargs)
+
+
+if __name__ == '__main__':
+
+ import locale
+ import optparse
+
+ def plugin_callback(option, opt, value, parser):
+ values = parser.values.selected_plugins
+ if value == '-':
+ del values[:]
+ else:
+ values.append(value)
+
+ parser = optparse.OptionParser(usage="%prog [-f NAME] [-o]")
+ parser.add_option("--function-name", "-f", metavar="NAME",
+ help="Name of the generated function (default: _bzr)")
+ parser.add_option("--function-only", "-o", action="store_true",
+ help="Generate only the shell function, don't enable it")
+ parser.add_option("--debug", action="store_true",
+ help=optparse.SUPPRESS_HELP)
+ parser.add_option("--no-plugins", action="store_true",
+ help="Don't load any bzr plugins")
+ parser.add_option("--plugin", metavar="NAME", type="string",
+ dest="selected_plugins", default=[],
+ action="callback", callback=plugin_callback,
+ help="Enable completions for the selected plugin"
+ + " (default: all plugins)")
+ (opts, args) = parser.parse_args()
+ if args:
+ parser.error("script does not take positional arguments")
+ kwargs = dict()
+ for name, value in opts.__dict__.iteritems():
+ if value is not None:
+ kwargs[name] = value
+
+ locale.setlocale(locale.LC_ALL, '')
+ if not kwargs.get('no_plugins', False):
+ plugin.load_plugins()
+ commands.install_bzr_command_hooks()
+ bash_completion_function(sys.stdout, **kwargs)
diff --git a/bzrlib/plugins/bash_completion/tests/__init__.py b/bzrlib/plugins/bash_completion/tests/__init__.py
new file mode 100644
index 0000000..fbe9415
--- /dev/null
+++ b/bzrlib/plugins/bash_completion/tests/__init__.py
@@ -0,0 +1,23 @@
+# Copyright (C) 2010 by Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+def load_tests(basic_tests, module, loader):
+ testmod_names = [
+ 'test_bashcomp',
+ ]
+ basic_tests.addTest(loader.loadTestsFromModuleNames(
+ ["%s.%s" % (__name__, tmn) for tmn in testmod_names]))
+ return basic_tests
diff --git a/bzrlib/plugins/bash_completion/tests/test_bashcomp.py b/bzrlib/plugins/bash_completion/tests/test_bashcomp.py
new file mode 100644
index 0000000..e22e6be
--- /dev/null
+++ b/bzrlib/plugins/bash_completion/tests/test_bashcomp.py
@@ -0,0 +1,332 @@
+# Copyright (C) 2010 by Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+import sys
+
+import bzrlib
+from bzrlib import commands, tests
+from bzrlib.tests import features
+from bzrlib.plugins.bash_completion.bashcomp import *
+
+import subprocess
+
+
+class BashCompletionMixin(object):
+ """Component for testing execution of a bash completion script."""
+
+ _test_needs_features = [features.bash_feature]
+ script = None
+
+ def complete(self, words, cword=-1):
+ """Perform a bash completion.
+
+ :param words: a list of words representing the current command.
+ :param cword: the current word to complete, defaults to the last one.
+ """
+ if self.script is None:
+ self.script = self.get_script()
+ proc = subprocess.Popen([features.bash_feature.path,
+ '--noprofile'],
+ stdin=subprocess.PIPE,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE)
+ if cword < 0:
+ cword = len(words) + cword
+ input = '%s\n' % self.script
+ input += ('COMP_WORDS=( %s )\n' %
+ ' '.join(["'"+w.replace("'", "'\\''")+"'" for w in words]))
+ input += 'COMP_CWORD=%d\n' % cword
+ input += '%s\n' % getattr(self, 'script_name', '_bzr')
+ input += 'echo ${#COMPREPLY[*]}\n'
+ input += "IFS=$'\\n'\n"
+ input += 'echo "${COMPREPLY[*]}"\n'
+ (out, err) = proc.communicate(input)
+ if '' != err:
+ raise AssertionError('Unexpected error message:\n%s' % err)
+ self.assertEqual('', err, 'No messages to standard error')
+ #import sys
+ #print >>sys.stdout, '---\n%s\n---\n%s\n---\n' % (input, out)
+ lines = out.split('\n')
+ nlines = int(lines[0])
+ del lines[0]
+ self.assertEqual('', lines[-1], 'Newline at end')
+ del lines[-1]
+ if nlines == 0 and len(lines) == 1 and lines[0] == '':
+ del lines[0]
+ self.assertEqual(nlines, len(lines), 'No newlines in generated words')
+ self.completion_result = set(lines)
+ return self.completion_result
+
+ def assertCompletionEquals(self, *words):
+ self.assertEqual(set(words), self.completion_result)
+
+ def assertCompletionContains(self, *words):
+ missing = set(words) - self.completion_result
+ if missing:
+ raise AssertionError('Completion should contain %r but it has %r'
+ % (missing, self.completion_result))
+
+ def assertCompletionOmits(self, *words):
+ surplus = set(words) & self.completion_result
+ if surplus:
+ raise AssertionError('Completion should omit %r but it has %r'
+ % (surplus, res, self.completion_result))
+
+ def get_script(self):
+ commands.install_bzr_command_hooks()
+ dc = DataCollector()
+ data = dc.collect()
+ cg = BashCodeGen(data)
+ res = cg.function()
+ return res
+
+
+class TestBashCompletion(tests.TestCase, BashCompletionMixin):
+ """Test bash completions that don't execute bzr."""
+
+ def test_simple_scipt(self):
+ """Ensure that the test harness works as expected"""
+ self.script = """
+_bzr() {
+ COMPREPLY=()
+ # add all words in reverse order, with some markup around them
+ for ((i = ${#COMP_WORDS[@]}; i > 0; --i)); do
+ COMPREPLY+=( "-${COMP_WORDS[i-1]}+" )
+ done
+ # and append the current word
+ COMPREPLY+=( "+${COMP_WORDS[COMP_CWORD]}-" )
+}
+"""
+ self.complete(['foo', '"bar', "'baz"], cword=1)
+ self.assertCompletionEquals("-'baz+", '-"bar+', '-foo+', '+"bar-')
+
+ def test_cmd_ini(self):
+ self.complete(['bzr', 'ini'])
+ self.assertCompletionContains('init', 'init-repo', 'init-repository')
+ self.assertCompletionOmits('commit')
+
+ def test_init_opts(self):
+ self.complete(['bzr', 'init', '-'])
+ self.assertCompletionContains('-h', '--2a', '--format=2a')
+
+ def test_global_opts(self):
+ self.complete(['bzr', '-', 'init'], cword=1)
+ self.assertCompletionContains('--no-plugins', '--builtin')
+
+ def test_commit_dashm(self):
+ self.complete(['bzr', 'commit', '-m'])
+ self.assertCompletionEquals('-m')
+
+ def test_status_negated(self):
+ self.complete(['bzr', 'status', '--n'])
+ self.assertCompletionContains('--no-versioned', '--no-verbose')
+
+ def test_init_format_any(self):
+ self.complete(['bzr', 'init', '--format', '=', 'directory'], cword=3)
+ self.assertCompletionContains('1.9', '2a')
+
+ def test_init_format_2(self):
+ self.complete(['bzr', 'init', '--format', '=', '2', 'directory'],
+ cword=4)
+ self.assertCompletionContains('2a')
+ self.assertCompletionOmits('1.9')
+
+
+class TestBashCompletionInvoking(tests.TestCaseWithTransport,
+ BashCompletionMixin):
+ """Test bash completions that might execute bzr.
+
+ Only the syntax ``$(bzr ...`` is supported so far. The bzr command
+ will be replaced by the bzr instance running this selftest.
+ """
+
+ def setUp(self):
+ super(TestBashCompletionInvoking, self).setUp()
+ if sys.platform == 'win32':
+ raise tests.KnownFailure(
+ 'see bug #709104, completion is broken on windows')
+
+ def get_script(self):
+ s = super(TestBashCompletionInvoking, self).get_script()
+ return s.replace("$(bzr ", "$('%s' " % self.get_bzr_path())
+
+ def test_revspec_tag_all(self):
+ self.requireFeature(features.sed_feature)
+ wt = self.make_branch_and_tree('.', format='dirstate-tags')
+ wt.branch.tags.set_tag('tag1', 'null:')
+ wt.branch.tags.set_tag('tag2', 'null:')
+ wt.branch.tags.set_tag('3tag', 'null:')
+ self.complete(['bzr', 'log', '-r', 'tag', ':'])
+ self.assertCompletionEquals('tag1', 'tag2', '3tag')
+
+ def test_revspec_tag_prefix(self):
+ self.requireFeature(features.sed_feature)
+ wt = self.make_branch_and_tree('.', format='dirstate-tags')
+ wt.branch.tags.set_tag('tag1', 'null:')
+ wt.branch.tags.set_tag('tag2', 'null:')
+ wt.branch.tags.set_tag('3tag', 'null:')
+ self.complete(['bzr', 'log', '-r', 'tag', ':', 't'])
+ self.assertCompletionEquals('tag1', 'tag2')
+
+ def test_revspec_tag_spaces(self):
+ self.requireFeature(features.sed_feature)
+ wt = self.make_branch_and_tree('.', format='dirstate-tags')
+ wt.branch.tags.set_tag('tag with spaces', 'null:')
+ self.complete(['bzr', 'log', '-r', 'tag', ':', 't'])
+ self.assertCompletionEquals(r'tag\ with\ spaces')
+ self.complete(['bzr', 'log', '-r', '"tag:t'])
+ self.assertCompletionEquals('tag:tag with spaces')
+ self.complete(['bzr', 'log', '-r', "'tag:t"])
+ self.assertCompletionEquals('tag:tag with spaces')
+
+ def test_revspec_tag_endrange(self):
+ self.requireFeature(features.sed_feature)
+ wt = self.make_branch_and_tree('.', format='dirstate-tags')
+ wt.branch.tags.set_tag('tag1', 'null:')
+ wt.branch.tags.set_tag('tag2', 'null:')
+ self.complete(['bzr', 'log', '-r', '3..tag', ':', 't'])
+ self.assertCompletionEquals('tag1', 'tag2')
+ self.complete(['bzr', 'log', '-r', '"3..tag:t'])
+ self.assertCompletionEquals('3..tag:tag1', '3..tag:tag2')
+ self.complete(['bzr', 'log', '-r', "'3..tag:t"])
+ self.assertCompletionEquals('3..tag:tag1', '3..tag:tag2')
+
+
+class TestBashCodeGen(tests.TestCase):
+
+ def test_command_names(self):
+ data = CompletionData()
+ bar = CommandData('bar')
+ bar.aliases.append('baz')
+ data.commands.append(bar)
+ data.commands.append(CommandData('foo'))
+ cg = BashCodeGen(data)
+ self.assertEqual('bar baz foo', cg.command_names())
+
+ def test_debug_output(self):
+ data = CompletionData()
+ self.assertEqual('', BashCodeGen(data, debug=False).debug_output())
+ self.assertTrue(BashCodeGen(data, debug=True).debug_output())
+
+ def test_bzr_version(self):
+ data = CompletionData()
+ cg = BashCodeGen(data)
+ self.assertEqual('%s.' % bzrlib.version_string, cg.bzr_version())
+ data.plugins['foo'] = PluginData('foo', '1.0')
+ data.plugins['bar'] = PluginData('bar', '2.0')
+ cg = BashCodeGen(data)
+ self.assertEqual('''\
+%s and the following plugins:
+# bar 2.0
+# foo 1.0''' % bzrlib.version_string, cg.bzr_version())
+
+ def test_global_options(self):
+ data = CompletionData()
+ data.global_options.add('--foo')
+ data.global_options.add('--bar')
+ cg = BashCodeGen(data)
+ self.assertEqual('--bar --foo', cg.global_options())
+
+ def test_command_cases(self):
+ data = CompletionData()
+ bar = CommandData('bar')
+ bar.aliases.append('baz')
+ bar.options.append(OptionData('--opt'))
+ data.commands.append(bar)
+ data.commands.append(CommandData('foo'))
+ cg = BashCodeGen(data)
+ self.assertEqualDiff('''\
+\tbar|baz)
+\t\tcmdOpts=( --opt )
+\t\t;;
+\tfoo)
+\t\tcmdOpts=( )
+\t\t;;
+''', cg.command_cases())
+
+ def test_command_case(self):
+ cmd = CommandData('cmd')
+ cmd.plugin = PluginData('plugger', '1.0')
+ bar = OptionData('--bar')
+ bar.registry_keys = ['that', 'this']
+ bar.error_messages.append('Some error message')
+ cmd.options.append(bar)
+ cmd.options.append(OptionData('--foo'))
+ data = CompletionData()
+ data.commands.append(cmd)
+ cg = BashCodeGen(data)
+ self.assertEqualDiff('''\
+\tcmd)
+\t\t# plugin "plugger 1.0"
+\t\t# Some error message
+\t\tcmdOpts=( --bar=that --bar=this --foo )
+\t\tcase $curOpt in
+\t\t\t--bar) optEnums=( that this ) ;;
+\t\tesac
+\t\t;;
+''', cg.command_case(cmd))
+
+
+class TestDataCollector(tests.TestCase):
+
+ def setUp(self):
+ super(TestDataCollector, self).setUp()
+ commands.install_bzr_command_hooks()
+
+ def test_global_options(self):
+ dc = DataCollector()
+ dc.global_options()
+ self.assertSubset(['--no-plugins', '--builtin'],
+ dc.data.global_options)
+
+ def test_commands(self):
+ dc = DataCollector()
+ dc.commands()
+ self.assertSubset(['init', 'init-repo', 'init-repository'],
+ dc.data.all_command_aliases())
+
+ def test_commands_from_plugins(self):
+ dc = DataCollector()
+ dc.commands()
+ self.assertSubset(['bash-completion'],
+ dc.data.all_command_aliases())
+
+ def test_commit_dashm(self):
+ dc = DataCollector()
+ cmd = dc.command('commit')
+ self.assertSubset(['-m'],
+ [str(o) for o in cmd.options])
+
+ def test_status_negated(self):
+ dc = DataCollector()
+ cmd = dc.command('status')
+ self.assertSubset(['--no-versioned', '--no-verbose'],
+ [str(o) for o in cmd.options])
+
+ def test_init_format(self):
+ dc = DataCollector()
+ cmd = dc.command('init')
+ for opt in cmd.options:
+ if opt.name == '--format':
+ self.assertSubset(['2a'], opt.registry_keys)
+ return
+ raise AssertionError('Option --format not found')
+
+
+class BlackboxTests(tests.TestCase):
+
+ def test_bash_completion(self):
+ self.run_bzr("bash-completion")
diff --git a/bzrlib/plugins/changelog_merge/__init__.py b/bzrlib/plugins/changelog_merge/__init__.py
new file mode 100644
index 0000000..3a48bd4
--- /dev/null
+++ b/bzrlib/plugins/changelog_merge/__init__.py
@@ -0,0 +1,78 @@
+# Copyright (C) 2010 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+from __future__ import absolute_import
+
+__doc__ = """Merge hook for GNU-format ChangeLog files
+
+To enable this plugin, add a section to your locations.conf
+like::
+
+ [/home/user/proj]
+ changelog_merge_files = ChangeLog
+
+Or add an entry to your branch.conf like::
+
+ changelog_merge_files = ChangeLog
+
+The changelog_merge_files config option takes a list of file names (not paths),
+separated by commas. (This is unlike the news_merge plugin, which matches
+paths.) e.g. the above config examples would match both
+``src/foolib/ChangeLog`` and ``docs/ChangeLog``.
+
+The algorithm used to merge the changes can be summarised as:
+
+ * new entries added to the top of OTHER are emitted first
+ * all other additions, deletions and edits from THIS and OTHER are preserved
+ * edits (e.g. to fix typos) at the top of OTHER are hard to distinguish from
+ adding and deleting independent entries; the algorithm tries to guess which
+ based on how similar the old and new entries are.
+
+Caveats
+-------
+
+Most changes can be merged, but conflicts are possible if the plugin finds
+edits at the top of OTHER to entries that have been deleted (or also edited) by
+THIS. In that case the plugin gives up and bzr's default merge logic will be
+used.
+
+No effort is made to deduplicate entries added by both sides.
+
+The results depend on the choice of the 'base' version, so it might give
+strange results if there is a criss-cross merge.
+"""
+
+# Since we are a built-in plugin we share the bzrlib version
+from bzrlib import version_info
+from bzrlib.hooks import install_lazy_named_hook
+
+# Put most of the code in a separate module that we lazy-import to keep the
+# overhead of this plugin as minimal as possible.
+def changelog_merge_hook(merger):
+ """Merger.merge_file_content hook for GNU-format ChangeLog files."""
+ from bzrlib.plugins.changelog_merge.changelog_merge import ChangeLogMerger
+ return ChangeLogMerger(merger)
+
+install_lazy_named_hook("bzrlib.merge", "Merger.hooks", "merge_file_content",
+ changelog_merge_hook, 'GNU ChangeLog file merge')
+
+def load_tests(basic_tests, module, loader):
+ testmod_names = [
+ 'tests',
+ ]
+ basic_tests.addTest(loader.loadTestsFromModuleNames(
+ ["%s.%s" % (__name__, tmn) for tmn in testmod_names]))
+ return basic_tests
diff --git a/bzrlib/plugins/changelog_merge/changelog_merge.py b/bzrlib/plugins/changelog_merge/changelog_merge.py
new file mode 100644
index 0000000..2b3ce8a
--- /dev/null
+++ b/bzrlib/plugins/changelog_merge/changelog_merge.py
@@ -0,0 +1,199 @@
+# Copyright (C) 2010 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""Merge logic for changelog_merge plugin."""
+
+from __future__ import absolute_import
+
+import difflib
+
+from bzrlib import (
+ debug,
+ merge,
+ urlutils,
+ )
+from bzrlib.merge3 import Merge3
+from bzrlib.trace import mutter
+
+
+def changelog_entries(lines):
+ """Return a list of changelog entries.
+
+ :param lines: lines of a changelog file.
+ :returns: list of entries. Each entry is a tuple of lines.
+ """
+ entries = []
+ for line in lines:
+ if line[0] not in (' ', '\t', '\n'):
+ # new entry
+ entries.append([line])
+ else:
+ try:
+ entry = entries[-1]
+ except IndexError:
+ # Cope with leading blank lines.
+ entries.append([])
+ entry = entries[-1]
+ entry.append(line)
+ return map(tuple, entries)
+
+
+def entries_to_lines(entries):
+ """Turn a list of entries into a flat iterable of lines."""
+ for entry in entries:
+ for line in entry:
+ yield line
+
+
+class ChangeLogMerger(merge.ConfigurableFileMerger):
+ """Merge GNU-format ChangeLog files."""
+
+ name_prefix = "changelog"
+
+ def get_filepath(self, params, tree):
+ """Calculate the path to the file in a tree.
+
+ This is overridden to return just the basename, rather than full path,
+ so that e.g. if the config says ``changelog_merge_files = ChangeLog``,
+ then all ChangeLog files in the tree will match (not just one in the
+ root of the tree).
+
+ :param params: A MergeHookParams describing the file to merge
+ :param tree: a Tree, e.g. self.merger.this_tree.
+ """
+ return urlutils.basename(tree.id2path(params.file_id))
+
+ def merge_text(self, params):
+ """Merge changelog changes.
+
+ * new entries from other will float to the top
+ * edits to older entries are preserved
+ """
+ # Transform files into lists of changelog entries
+ this_entries = changelog_entries(params.this_lines)
+ other_entries = changelog_entries(params.other_lines)
+ base_entries = changelog_entries(params.base_lines)
+ try:
+ result_entries = merge_entries(
+ base_entries, this_entries, other_entries)
+ except EntryConflict:
+ # XXX: generating a nice conflict file would be better
+ return 'not_applicable', None
+ # Transform the merged elements back into real blocks of lines.
+ return 'success', entries_to_lines(result_entries)
+
+
+class EntryConflict(Exception):
+ pass
+
+
+def default_guess_edits(new_entries, deleted_entries, entry_as_str=''.join):
+ """Default implementation of guess_edits param of merge_entries.
+
+ This algorithm does O(N^2 * logN) SequenceMatcher.ratio() calls, which is
+ pretty bad, but it shouldn't be used very often.
+ """
+ deleted_entries_as_strs = map(entry_as_str, deleted_entries)
+ new_entries_as_strs = map(entry_as_str, new_entries)
+ result_new = list(new_entries)
+ result_deleted = list(deleted_entries)
+ result_edits = []
+ sm = difflib.SequenceMatcher()
+ CUTOFF = 0.8
+ while True:
+ best = None
+ best_score = CUTOFF
+ # Compare each new entry with each old entry to find the best match
+ for new_entry_as_str in new_entries_as_strs:
+ sm.set_seq1(new_entry_as_str)
+ for old_entry_as_str in deleted_entries_as_strs:
+ sm.set_seq2(old_entry_as_str)
+ score = sm.ratio()
+ if score > best_score:
+ best = new_entry_as_str, old_entry_as_str
+ best_score = score
+ if best is not None:
+ # Add the best match to the list of edits, and remove it from the
+ # the list of new/old entries. Also remove it from the new/old
+ # lists for the next round.
+ del_index = deleted_entries_as_strs.index(best[1])
+ new_index = new_entries_as_strs.index(best[0])
+ result_edits.append(
+ (result_deleted[del_index], result_new[new_index]))
+ del deleted_entries_as_strs[del_index], result_deleted[del_index]
+ del new_entries_as_strs[new_index], result_new[new_index]
+ else:
+ # No match better than CUTOFF exists in the remaining new and old
+ # entries.
+ break
+ return result_new, result_deleted, result_edits
+
+
+def merge_entries(base_entries, this_entries, other_entries,
+ guess_edits=default_guess_edits):
+ """Merge changelog given base, this, and other versions."""
+ m3 = Merge3(base_entries, this_entries, other_entries, allow_objects=True)
+ result_entries = []
+ at_top = True
+ for group in m3.merge_groups():
+ if 'changelog_merge' in debug.debug_flags:
+ mutter('merge group:\n%r', group)
+ group_kind = group[0]
+ if group_kind == 'conflict':
+ _, base, this, other = group
+ # Find additions
+ new_in_other = [
+ entry for entry in other if entry not in base]
+ # Find deletions
+ deleted_in_other = [
+ entry for entry in base if entry not in other]
+ if at_top and deleted_in_other:
+ # Magic! Compare deletions and additions to try spot edits
+ new_in_other, deleted_in_other, edits_in_other = guess_edits(
+ new_in_other, deleted_in_other)
+ else:
+ # Changes not made at the top are always preserved as is, no
+ # need to try distinguish edits from adds and deletes.
+ edits_in_other = []
+ if 'changelog_merge' in debug.debug_flags:
+ mutter('at_top: %r', at_top)
+ mutter('new_in_other: %r', new_in_other)
+ mutter('deleted_in_other: %r', deleted_in_other)
+ mutter('edits_in_other: %r', edits_in_other)
+ # Apply deletes and edits
+ updated_this = [
+ entry for entry in this if entry not in deleted_in_other]
+ for old_entry, new_entry in edits_in_other:
+ try:
+ index = updated_this.index(old_entry)
+ except ValueError:
+ # edited entry no longer present in this! Just give up and
+ # declare a conflict.
+ raise EntryConflict()
+ updated_this[index] = new_entry
+ if 'changelog_merge' in debug.debug_flags:
+ mutter('updated_this: %r', updated_this)
+ if at_top:
+ # Float new entries from other to the top
+ result_entries = new_in_other + result_entries
+ else:
+ result_entries.extend(new_in_other)
+ result_entries.extend(updated_this)
+ else: # unchanged, same, a, or b.
+ lines = group[1]
+ result_entries.extend(lines)
+ at_top = False
+ return result_entries
diff --git a/bzrlib/plugins/changelog_merge/tests/__init__.py b/bzrlib/plugins/changelog_merge/tests/__init__.py
new file mode 100644
index 0000000..62c2658
--- /dev/null
+++ b/bzrlib/plugins/changelog_merge/tests/__init__.py
@@ -0,0 +1,24 @@
+# Copyright (C) 2011 by Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+def load_tests(basic_tests, module, loader):
+ testmod_names = [
+ 'test_changelog_merge',
+ ]
+ basic_tests.addTest(loader.loadTestsFromModuleNames(
+ ["%s.%s" % (__name__, tmn) for tmn in testmod_names]))
+ return basic_tests
+
diff --git a/bzrlib/plugins/changelog_merge/tests/test_changelog_merge.py b/bzrlib/plugins/changelog_merge/tests/test_changelog_merge.py
new file mode 100644
index 0000000..a7b0d3f
--- /dev/null
+++ b/bzrlib/plugins/changelog_merge/tests/test_changelog_merge.py
@@ -0,0 +1,222 @@
+# Copyright (C) 2011 by Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+from bzrlib import (
+ merge,
+ tests,
+ )
+from bzrlib.tests import test_merge_core
+from bzrlib.plugins.changelog_merge import changelog_merge
+
+
+sample_base_entries = [
+ 'Base entry B1',
+ 'Base entry B2',
+ 'Base entry B3',
+ ]
+
+sample_this_entries = [
+ 'This entry T1',
+ 'This entry T2',
+ #'Base entry B1 updated',
+ 'Base entry B1',
+ 'Base entry B2',
+ 'Base entry B3',
+ ]
+
+sample_other_entries = [
+ 'Other entry O1',
+ #'Base entry B1',
+ 'Base entry B1',
+ 'Base entry B2 updated',
+ 'Base entry B3',
+ ]
+
+
+sample2_base_entries = [
+ 'Base entry B1',
+ 'Base entry B2',
+ 'Base entry B3',
+ ]
+
+sample2_this_entries = [
+ 'This entry T1',
+ 'This entry T2',
+ #'Base entry B1 updated',
+ 'Base entry B1',
+ 'Base entry B2',
+ ]
+
+sample2_other_entries = [
+ 'Other entry O1',
+ #'Base entry B1',
+ 'Base entry B1 edit', # > 80% similar according to difflib
+ 'Base entry B2',
+ ]
+
+
+class TestMergeCoreLogic(tests.TestCase):
+
+ def test_new_in_other_floats_to_top(self):
+ """Changes at the top of 'other' float to the top.
+
+ Given a changelog in THIS containing::
+
+ NEW-1
+ OLD-1
+
+ and a changelog in OTHER containing::
+
+ NEW-2
+ OLD-1
+
+ it will merge as::
+
+ NEW-2
+ NEW-1
+ OLD-1
+ """
+ base_entries = ['OLD-1']
+ this_entries = ['NEW-1', 'OLD-1']
+ other_entries = ['NEW-2', 'OLD-1']
+ result_entries = changelog_merge.merge_entries(
+ base_entries, this_entries, other_entries)
+ self.assertEqual(
+ ['NEW-2', 'NEW-1', 'OLD-1'], result_entries)
+
+ def test_acceptance_bug_723968(self):
+ """Merging a branch that:
+
+ 1. adds a new entry, and
+ 2. edits an old entry (e.g. to fix a typo or twiddle formatting)
+
+ will:
+
+ 1. add the new entry to the top
+ 2. keep the edit, without duplicating the edited entry or moving it.
+ """
+ result_entries = changelog_merge.merge_entries(
+ sample_base_entries, sample_this_entries, sample_other_entries)
+ self.assertEqual([
+ 'Other entry O1',
+ 'This entry T1',
+ 'This entry T2',
+ 'Base entry B1',
+ 'Base entry B2 updated',
+ 'Base entry B3',
+ ],
+ list(result_entries))
+
+ def test_more_complex_conflict(self):
+ """Like test_acceptance_bug_723968, but with a more difficult conflict:
+ the new entry and the edited entry are adjacent.
+ """
+ def guess_edits(new, deleted):
+ #import pdb; pdb.set_trace()
+ return changelog_merge.default_guess_edits(new, deleted,
+ entry_as_str=lambda x: x)
+ result_entries = changelog_merge.merge_entries(
+ sample2_base_entries, sample2_this_entries, sample2_other_entries,
+ guess_edits=guess_edits)
+ self.assertEqual([
+ 'Other entry O1',
+ 'This entry T1',
+ 'This entry T2',
+ 'Base entry B1 edit',
+ 'Base entry B2',
+ ],
+ list(result_entries))
+
+ def test_too_hard(self):
+ """A conflict this plugin cannot resolve raises EntryConflict.
+ """
+ # An entry edited in other but deleted in this is a conflict we can't
+ # resolve. (Ideally perhaps we'd generate a nice conflict file, but
+ # for now we just give up.)
+ self.assertRaises(changelog_merge.EntryConflict,
+ changelog_merge.merge_entries,
+ sample2_base_entries, [], sample2_other_entries)
+
+ def test_default_guess_edits(self):
+ """default_guess_edits matches a new entry only once.
+
+ (Even when that entry is the best match for multiple old entries.)
+ """
+ new_in_other = [('AAAAA',), ('BBBBB',)]
+ deleted_in_other = [('DDDDD',), ('BBBBBx',), ('BBBBBxx',)]
+ # BBBBB is the best match for both BBBBBx and BBBBBxx
+ result = changelog_merge.default_guess_edits(
+ new_in_other, deleted_in_other)
+ self.assertEqual(
+ ([('AAAAA',)], # new
+ [('DDDDD',), ('BBBBBxx',)], # deleted
+ [(('BBBBBx',), ('BBBBB',))]), # edits
+ result)
+
+
+class TestChangeLogMerger(tests.TestCaseWithTransport):
+ """Tests for ChangeLogMerger class.
+
+ Most tests should be unit tests for merge_entries (and its helpers).
+ This class is just to cover the handful of lines of code in ChangeLogMerger
+ itself.
+ """
+
+ def make_builder(self):
+ builder = test_merge_core.MergeBuilder(self.test_base_dir)
+ self.addCleanup(builder.cleanup)
+ return builder
+
+ def make_changelog_merger(self, base_text, this_text, other_text):
+ builder = self.make_builder()
+ builder.add_file('clog-id', builder.tree_root, 'ChangeLog',
+ base_text, True)
+ builder.change_contents('clog-id', other=other_text, this=this_text)
+ merger = builder.make_merger(merge.Merge3Merger, ['clog-id'])
+ # The following can't use config stacks until the plugin itself does
+ # ('this_branch' is already write locked at this point and as such
+ # won't write the new value to disk where get_user_option can get it).
+ merger.this_branch.get_config().set_user_option(
+ 'changelog_merge_files', 'ChangeLog')
+ merge_hook_params = merge.MergeFileHookParams(merger, 'clog-id', None,
+ 'file', 'file', 'conflict')
+ changelog_merger = changelog_merge.ChangeLogMerger(merger)
+ return changelog_merger, merge_hook_params
+
+ def test_merge_text_returns_not_applicable(self):
+ """A conflict this plugin cannot resolve returns (not_applicable, None).
+ """
+ # Build same example as TestMergeCoreLogic.test_too_hard: edit an entry
+ # in other but delete it in this.
+ def entries_as_str(entries):
+ return ''.join(entry + '\n' for entry in entries)
+ changelog_merger, merge_hook_params = self.make_changelog_merger(
+ entries_as_str(sample2_base_entries),
+ '',
+ entries_as_str(sample2_other_entries))
+ self.assertEqual(
+ ('not_applicable', None),
+ changelog_merger.merge_contents(merge_hook_params))
+
+ def test_merge_text_returns_success(self):
+ """A successful merge returns ('success', lines)."""
+ changelog_merger, merge_hook_params = self.make_changelog_merger(
+ '', 'this text\n', 'other text\n')
+ status, lines = changelog_merger.merge_contents(merge_hook_params)
+ self.assertEqual(
+ ('success', ['other text\n', 'this text\n']),
+ (status, list(lines)))
+
diff --git a/bzrlib/plugins/launchpad/__init__.py b/bzrlib/plugins/launchpad/__init__.py
new file mode 100644
index 0000000..5673d95
--- /dev/null
+++ b/bzrlib/plugins/launchpad/__init__.py
@@ -0,0 +1,201 @@
+# Copyright (C) 2006-2011 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""Launchpad.net integration plugin for Bazaar.
+
+This plugin provides facilities for working with Bazaar branches that are
+hosted on Launchpad (http://launchpad.net). It provides a directory service
+for referring to Launchpad branches using the "lp:" prefix. For example,
+lp:bzr refers to the Bazaar's main development branch and
+lp:~username/project/branch-name can be used to refer to a specific branch.
+
+This plugin provides a bug tracker so that "bzr commit --fixes lp:1234" will
+record that revision as fixing Launchpad's bug 1234.
+
+The plugin also provides the following commands:
+
+ launchpad-login: Show or set the Launchpad user ID
+ launchpad-open: Open a Launchpad branch page in your web browser
+ lp-propose-merge: Propose merging a branch on Launchpad
+ register-branch: Register a branch with launchpad.net
+ launchpad-mirror: Ask Launchpad to mirror a branch now
+
+"""
+
+from __future__ import absolute_import
+
+# The XMLRPC server address can be overridden by setting the environment
+# variable $BZR_LP_XMLRPC_URL
+
+# see http://wiki.bazaar.canonical.com/Specs/BranchRegistrationTool
+
+from bzrlib import (
+ branch as _mod_branch,
+ config as _mod_config,
+ lazy_regex,
+ # Since we are a built-in plugin we share the bzrlib version
+ trace,
+ version_info,
+ )
+from bzrlib.commands import (
+ plugin_cmds,
+ )
+from bzrlib.directory_service import directories
+from bzrlib.help_topics import topic_registry
+
+for klsname, aliases in [
+ ("cmd_register_branch", []),
+ ("cmd_launchpad_open", ["lp-open"]),
+ ("cmd_launchpad_login", ["lp-login"]),
+ ("cmd_launchpad_mirror", ["lp-mirror"]),
+ ("cmd_lp_propose_merge", ["lp-submit", "lp-propose"]),
+ ("cmd_lp_find_proposal", [])]:
+ plugin_cmds.register_lazy(klsname, aliases,
+ "bzrlib.plugins.launchpad.cmds")
+
+
+def _register_directory():
+ directories.register_lazy('lp:', 'bzrlib.plugins.launchpad.lp_directory',
+ 'LaunchpadDirectory',
+ 'Launchpad-based directory service',)
+ directories.register_lazy(
+ 'debianlp:', 'bzrlib.plugins.launchpad.lp_directory',
+ 'LaunchpadDirectory',
+ 'debianlp: shortcut')
+ directories.register_lazy(
+ 'ubuntu:', 'bzrlib.plugins.launchpad.lp_directory',
+ 'LaunchpadDirectory',
+ 'ubuntu: shortcut')
+
+_register_directory()
+
+# This is kept in __init__ so that we don't load lp_api_lite unless the branch
+# actually matches. That way we can avoid importing extra dependencies like
+# json.
+_package_branch = lazy_regex.lazy_compile(
+ r'bazaar.launchpad.net.*?/'
+ r'(?P<user>~[^/]+/)?(?P<archive>ubuntu|debian)/(?P<series>[^/]+/)?'
+ r'(?P<project>[^/]+)(?P<branch>/[^/]+)?'
+ )
+
+def _get_package_branch_info(url):
+ """Determine the packaging information for this URL.
+
+ :return: If this isn't a packaging branch, return None. If it is, return
+ (archive, series, project)
+ """
+ if url is None:
+ return None
+ m = _package_branch.search(url)
+ if m is None:
+ return None
+ archive, series, project, user = m.group('archive', 'series',
+ 'project', 'user')
+ if series is not None:
+ # series is optional, so the regex includes the extra '/', we don't
+ # want to send that on (it causes Internal Server Errors.)
+ series = series.strip('/')
+ if user is not None:
+ user = user.strip('~/')
+ if user != 'ubuntu-branches':
+ return None
+ return archive, series, project
+
+
+def _check_is_up_to_date(the_branch):
+ info = _get_package_branch_info(the_branch.base)
+ if info is None:
+ return
+ c = the_branch.get_config_stack()
+ verbosity = c.get('launchpad.packaging_verbosity')
+ if not verbosity:
+ trace.mutter('not checking %s because verbosity is turned off'
+ % (the_branch.base,))
+ return
+ archive, series, project = info
+ from bzrlib.plugins.launchpad import lp_api_lite
+ latest_pub = lp_api_lite.LatestPublication(archive, series, project)
+ lp_api_lite.report_freshness(the_branch, verbosity, latest_pub)
+
+
+def _register_hooks():
+ _mod_branch.Branch.hooks.install_named_hook('open',
+ _check_is_up_to_date, 'package-branch-up-to-date')
+
+
+_register_hooks()
+
+def load_tests(basic_tests, module, loader):
+ testmod_names = [
+ 'test_account',
+ 'test_register',
+ 'test_lp_api',
+ 'test_lp_api_lite',
+ 'test_lp_directory',
+ 'test_lp_login',
+ 'test_lp_open',
+ 'test_lp_service',
+ ]
+ basic_tests.addTest(loader.loadTestsFromModuleNames(
+ ["%s.%s" % (__name__, tmn) for tmn in testmod_names]))
+ return basic_tests
+
+
+_launchpad_help = """Integration with Launchpad.net
+
+Launchpad.net provides free Bazaar branch hosting with integrated bug and
+specification tracking.
+
+The bzr client (through the plugin called 'launchpad') has special
+features to communicate with Launchpad:
+
+ * The launchpad-login command tells Bazaar your Launchpad user name. This
+ is then used by the 'lp:' transport to download your branches using
+ bzr+ssh://.
+
+ * The 'lp:' transport uses Launchpad as a directory service: for example
+ 'lp:bzr' and 'lp:python' refer to the main branches of the relevant
+ projects and may be branched, logged, etc. You can also use the 'lp:'
+ transport to refer to specific branches, e.g. lp:~bzr/bzr/trunk.
+
+ * The 'lp:' bug tracker alias can expand launchpad bug numbers to their
+ URLs for use with 'bzr commit --fixes', e.g. 'bzr commit --fixes lp:12345'
+ will record a revision property that marks that revision as fixing
+ Launchpad bug 12345. When you push that branch to Launchpad it will
+ automatically be linked to the bug report.
+
+ * The register-branch command tells Launchpad about the url of a
+ public branch. Launchpad will then mirror the branch, display
+ its contents and allow it to be attached to bugs and other
+ objects.
+
+For more information see http://help.launchpad.net/
+"""
+topic_registry.register('launchpad',
+ _launchpad_help,
+ 'Using Bazaar with Launchpad.net')
+
+_mod_config.option_registry.register(
+ _mod_config.Option('launchpad.packaging_verbosity', default=True,
+ from_unicode=_mod_config.bool_from_store,
+ help="""\
+Whether to warn if a UDD package import branch is accessed that is out of date.
+
+Setting this option to 'off' will disable verbosity.
+"""))
+_mod_config.option_registry.register(
+ _mod_config.Option('launchpad_username', default=None,
+ help="The username to login with when conneting to Launchpad."))
diff --git a/bzrlib/plugins/launchpad/account.py b/bzrlib/plugins/launchpad/account.py
new file mode 100644
index 0000000..657b261
--- /dev/null
+++ b/bzrlib/plugins/launchpad/account.py
@@ -0,0 +1,113 @@
+# Copyright (C) 2007-2010 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""Functions to manage the user's Launchpad user ID.
+
+This allows the user to configure their Launchpad user ID once, rather
+than once for each place that needs to take it into account.
+"""
+
+from __future__ import absolute_import
+
+from bzrlib import (
+ errors,
+ trace,
+ transport,
+ )
+from bzrlib.config import AuthenticationConfig, GlobalStack
+from bzrlib.i18n import gettext
+
+LAUNCHPAD_BASE = 'https://launchpad.net/'
+
+
+class UnknownLaunchpadUsername(errors.BzrError):
+ _fmt = "The user name %(user)s is not registered on Launchpad."
+
+
+class NoRegisteredSSHKeys(errors.BzrError):
+ _fmt = "The user %(user)s has not registered any SSH keys with Launchpad.\n" \
+ "See <https://launchpad.net/people/+me>"
+
+
+class MismatchedUsernames(errors.BzrError):
+
+ _fmt = ('bazaar.conf and authentication.conf disagree about launchpad'
+ ' account name. Please re-run launchpad-login.')
+
+
+def get_lp_login(_config=None):
+ """Return the user's Launchpad username.
+
+ :raises: MismatchedUsername if authentication.conf and bazaar.conf
+ disagree about username.
+ """
+ if _config is None:
+ _config = GlobalStack()
+
+ username = _config.get('launchpad_username')
+ if username is not None:
+ auth = AuthenticationConfig()
+ auth_username = _get_auth_user(auth)
+ # Auto-upgrading
+ if auth_username is None:
+ trace.note(gettext('Setting ssh/sftp usernames for launchpad.net.'))
+ _set_auth_user(username, auth)
+ elif auth_username != username:
+ raise MismatchedUsernames()
+ return username
+
+
+def _set_global_option(username, _config=None):
+ if _config is None:
+ _config = GlobalStack()
+ _config.set('launchpad_username', username)
+
+
+def set_lp_login(username, _config=None):
+ """Set the user's Launchpad username"""
+ _set_global_option(username, _config)
+ _set_auth_user(username)
+
+
+def _get_auth_user(auth=None):
+ if auth is None:
+ auth = AuthenticationConfig()
+ username = auth.get_user('ssh', '.launchpad.net')
+ return username
+
+def _set_auth_user(username, auth=None):
+ if auth is None:
+ auth = AuthenticationConfig()
+ auth.set_credentials(
+ 'Launchpad', '.launchpad.net', username, 'ssh')
+
+
+def check_lp_login(username, _transport=None):
+ """Check whether the given Launchpad username is okay.
+
+ This will check for both existence and whether the user has
+ uploaded SSH keys.
+ """
+ if _transport is None:
+ _transport = transport.get_transport_from_url(LAUNCHPAD_BASE)
+
+ try:
+ data = _transport.get_bytes('~%s/+sshkeys' % username)
+ except errors.NoSuchFile:
+ raise UnknownLaunchpadUsername(user=username)
+
+ if not data:
+ raise NoRegisteredSSHKeys(user=username)
diff --git a/bzrlib/plugins/launchpad/cmds.py b/bzrlib/plugins/launchpad/cmds.py
new file mode 100644
index 0000000..1b2844e
--- /dev/null
+++ b/bzrlib/plugins/launchpad/cmds.py
@@ -0,0 +1,410 @@
+# Copyright (C) 2006-2012 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""Launchpad plugin commands."""
+
+from __future__ import absolute_import
+
+from bzrlib import (
+ branch as _mod_branch,
+ controldir,
+ trace,
+ )
+from bzrlib.commands import (
+ Command,
+ )
+from bzrlib.errors import (
+ BzrCommandError,
+ InvalidRevisionSpec,
+ InvalidURL,
+ NoPublicBranch,
+ NotBranchError,
+ )
+from bzrlib.i18n import gettext
+from bzrlib.option import (
+ Option,
+ ListOption,
+ )
+
+
+class cmd_register_branch(Command):
+ __doc__ = """Register a branch with launchpad.net.
+
+ This command lists a bzr branch in the directory of branches on
+ launchpad.net. Registration allows the branch to be associated with
+ bugs or specifications.
+
+ Before using this command you must register the project to which the
+ branch belongs, and create an account for yourself on launchpad.net.
+
+ arguments:
+ public_url: The publicly visible url for the branch to register.
+ This must be an http or https url (which Launchpad can read
+ from to access the branch). Local file urls, SFTP urls, and
+ bzr+ssh urls will not work.
+ If no public_url is provided, bzr will use the configured
+ public_url if there is one for the current branch, and
+ otherwise error.
+
+ example:
+ bzr register-branch http://foo.com/bzr/fooproject.mine \\
+ --project fooproject
+ """
+ takes_args = ['public_url?']
+ takes_options = [
+ Option('project',
+ 'Launchpad project short name to associate with the branch.',
+ unicode),
+ Option('product',
+ 'Launchpad product short name to associate with the branch.',
+ unicode,
+ hidden=True),
+ Option('branch-name',
+ 'Short name for the branch; '
+ 'by default taken from the last component of the url.',
+ unicode),
+ Option('branch-title',
+ 'One-sentence description of the branch.',
+ unicode),
+ Option('branch-description',
+ 'Longer description of the purpose or contents of the branch.',
+ unicode),
+ Option('author',
+ "Branch author's email address, if not yourself.",
+ unicode),
+ Option('link-bug',
+ 'The bug this branch fixes.',
+ int),
+ Option('dry-run',
+ 'Prepare the request but don\'t actually send it.')
+ ]
+
+
+ def run(self,
+ public_url=None,
+ project='',
+ product=None,
+ branch_name='',
+ branch_title='',
+ branch_description='',
+ author='',
+ link_bug=None,
+ dry_run=False):
+ from bzrlib.plugins.launchpad.lp_registration import (
+ BranchRegistrationRequest, BranchBugLinkRequest,
+ DryRunLaunchpadService, LaunchpadService)
+ if public_url is None:
+ try:
+ b = _mod_branch.Branch.open_containing('.')[0]
+ except NotBranchError:
+ raise BzrCommandError(gettext(
+ 'register-branch requires a public '
+ 'branch url - see bzr help register-branch.'))
+ public_url = b.get_public_branch()
+ if public_url is None:
+ raise NoPublicBranch(b)
+ if product is not None:
+ project = product
+ trace.note(gettext(
+ '--product is deprecated; please use --project.'))
+
+
+ rego = BranchRegistrationRequest(branch_url=public_url,
+ branch_name=branch_name,
+ branch_title=branch_title,
+ branch_description=branch_description,
+ product_name=project,
+ author_email=author,
+ )
+ linko = BranchBugLinkRequest(branch_url=public_url,
+ bug_id=link_bug)
+ if not dry_run:
+ service = LaunchpadService()
+ # This gives back the xmlrpc url that can be used for future
+ # operations on the branch. It's not so useful to print to the
+ # user since they can't do anything with it from a web browser; it
+ # might be nice for the server to tell us about an html url as
+ # well.
+ else:
+ # Run on service entirely in memory
+ service = DryRunLaunchpadService()
+ service.gather_user_credentials()
+ rego.submit(service)
+ if link_bug:
+ linko.submit(service)
+ self.outf.write('Branch registered.\n')
+
+
+class cmd_launchpad_open(Command):
+ __doc__ = """Open a Launchpad branch page in your web browser."""
+
+ aliases = ['lp-open']
+ takes_options = [
+ Option('dry-run',
+ 'Do not actually open the browser. Just say the URL we would '
+ 'use.'),
+ ]
+ takes_args = ['location?']
+
+ def _possible_locations(self, location):
+ """Yield possible external locations for the branch at 'location'."""
+ yield location
+ try:
+ branch = _mod_branch.Branch.open_containing(location)[0]
+ except NotBranchError:
+ return
+ branch_url = branch.get_public_branch()
+ if branch_url is not None:
+ yield branch_url
+ branch_url = branch.get_push_location()
+ if branch_url is not None:
+ yield branch_url
+
+ def _get_web_url(self, service, location):
+ from bzrlib.plugins.launchpad.lp_registration import (
+ NotLaunchpadBranch)
+ for branch_url in self._possible_locations(location):
+ try:
+ return service.get_web_url_from_branch_url(branch_url)
+ except (NotLaunchpadBranch, InvalidURL):
+ pass
+ raise NotLaunchpadBranch(branch_url)
+
+ def run(self, location=None, dry_run=False):
+ from bzrlib.plugins.launchpad.lp_registration import (
+ LaunchpadService)
+ if location is None:
+ location = u'.'
+ web_url = self._get_web_url(LaunchpadService(), location)
+ trace.note(gettext('Opening %s in web browser') % web_url)
+ if not dry_run:
+ import webbrowser # this import should not be lazy
+ # otherwise bzr.exe lacks this module
+ webbrowser.open(web_url)
+
+
+class cmd_launchpad_login(Command):
+ __doc__ = """Show or set the Launchpad user ID.
+
+ When communicating with Launchpad, some commands need to know your
+ Launchpad user ID. This command can be used to set or show the
+ user ID that Bazaar will use for such communication.
+
+ :Examples:
+ Show the Launchpad ID of the current user::
+
+ bzr launchpad-login
+
+ Set the Launchpad ID of the current user to 'bob'::
+
+ bzr launchpad-login bob
+ """
+ aliases = ['lp-login']
+ takes_args = ['name?']
+ takes_options = [
+ 'verbose',
+ Option('no-check',
+ "Don't check that the user name is valid."),
+ ]
+
+ def run(self, name=None, no_check=False, verbose=False):
+ # This is totally separate from any launchpadlib login system.
+ from bzrlib.plugins.launchpad import account
+ check_account = not no_check
+
+ if name is None:
+ username = account.get_lp_login()
+ if username:
+ if check_account:
+ account.check_lp_login(username)
+ if verbose:
+ self.outf.write(gettext(
+ "Launchpad user ID exists and has SSH keys.\n"))
+ self.outf.write(username + '\n')
+ else:
+ self.outf.write(gettext('No Launchpad user ID configured.\n'))
+ return 1
+ else:
+ name = name.lower()
+ if check_account:
+ account.check_lp_login(name)
+ if verbose:
+ self.outf.write(gettext(
+ "Launchpad user ID exists and has SSH keys.\n"))
+ account.set_lp_login(name)
+ if verbose:
+ self.outf.write(gettext("Launchpad user ID set to '%s'.\n") %
+ (name,))
+
+
+# XXX: cmd_launchpad_mirror is untested
+class cmd_launchpad_mirror(Command):
+ __doc__ = """Ask Launchpad to mirror a branch now."""
+
+ aliases = ['lp-mirror']
+ takes_args = ['location?']
+
+ def run(self, location='.'):
+ from bzrlib.plugins.launchpad import lp_api
+ from bzrlib.plugins.launchpad.lp_registration import LaunchpadService
+ branch, _ = _mod_branch.Branch.open_containing(location)
+ service = LaunchpadService()
+ launchpad = lp_api.login(service)
+ lp_branch = lp_api.LaunchpadBranch.from_bzr(launchpad, branch,
+ create_missing=False)
+ lp_branch.lp.requestMirror()
+
+
+class cmd_lp_propose_merge(Command):
+ __doc__ = """Propose merging a branch on Launchpad.
+
+ This will open your usual editor to provide the initial comment. When it
+ has created the proposal, it will open it in your default web browser.
+
+ The branch will be proposed to merge into SUBMIT_BRANCH. If SUBMIT_BRANCH
+ is not supplied, the remembered submit branch will be used. If no submit
+ branch is remembered, the development focus will be used.
+
+ By default, the SUBMIT_BRANCH's review team will be requested to review
+ the merge proposal. This can be overriden by specifying --review (-R).
+ The parameter the launchpad account name of the desired reviewer. This
+ may optionally be followed by '=' and the review type. For example:
+
+ bzr lp-propose-merge --review jrandom --review review-team=qa
+
+ This will propose a merge, request "jrandom" to perform a review of
+ unspecified type, and request "review-team" to perform a "qa" review.
+ """
+
+ takes_options = [Option('staging',
+ help='Propose the merge on staging.'),
+ Option('message', short_name='m', type=unicode,
+ help='Commit message.'),
+ Option('approve',
+ help='Mark the proposal as approved immediately.'),
+ Option('fixes', 'The bug this proposal fixes.', str),
+ ListOption('review', short_name='R', type=unicode,
+ help='Requested reviewer and optional type.')]
+
+ takes_args = ['submit_branch?']
+
+ aliases = ['lp-submit', 'lp-propose']
+
+ def run(self, submit_branch=None, review=None, staging=False,
+ message=None, approve=False, fixes=None):
+ from bzrlib.plugins.launchpad import lp_propose
+ tree, branch, relpath = controldir.ControlDir.open_containing_tree_or_branch(
+ '.')
+ if review is None:
+ reviews = None
+ else:
+ reviews = []
+ for review in review:
+ if '=' in review:
+ reviews.append(review.split('=', 2))
+ else:
+ reviews.append((review, ''))
+ if submit_branch is None:
+ submit_branch = branch.get_submit_branch()
+ if submit_branch is None:
+ target = None
+ else:
+ target = _mod_branch.Branch.open(submit_branch)
+ proposer = lp_propose.Proposer(tree, branch, target, message,
+ reviews, staging, approve=approve,
+ fixes=fixes)
+ proposer.check_proposal()
+ proposer.create_proposal()
+
+
+class cmd_lp_find_proposal(Command):
+
+ __doc__ = """Find the proposal to merge this revision.
+
+ Finds the merge proposal(s) that discussed landing the specified revision.
+ This works only if the selected branch was the merge proposal target, and
+ if the merged_revno is recorded for the merge proposal. The proposal(s)
+ are opened in a web browser.
+
+ Any revision involved in the merge may be specified-- the revision in
+ which the merge was performed, or one of the revisions that was merged.
+
+ So, to find the merge proposal that reviewed line 1 of README::
+
+ bzr lp-find-proposal -r annotate:README:1
+ """
+
+ takes_options = ['revision']
+
+ def run(self, revision=None):
+ from bzrlib import ui
+ from bzrlib.plugins.launchpad import lp_api
+ import webbrowser
+ b = _mod_branch.Branch.open_containing('.')[0]
+ pb = ui.ui_factory.nested_progress_bar()
+ b.lock_read()
+ try:
+ revno = self._find_merged_revno(revision, b, pb)
+ merged = self._find_proposals(revno, b, pb)
+ if len(merged) == 0:
+ raise BzrCommandError(gettext('No review found.'))
+ trace.note(gettext('%d proposals(s) found.') % len(merged))
+ for mp in merged:
+ webbrowser.open(lp_api.canonical_url(mp))
+ finally:
+ b.unlock()
+ pb.finished()
+
+ def _find_merged_revno(self, revision, b, pb):
+ if revision is None:
+ return b.revno()
+ pb.update(gettext('Finding revision-id'))
+ revision_id = revision[0].as_revision_id(b)
+ # a revno spec is necessarily on the mainline.
+ if self._is_revno_spec(revision[0]):
+ merging_revision = revision_id
+ else:
+ graph = b.repository.get_graph()
+ pb.update(gettext('Finding merge'))
+ merging_revision = graph.find_lefthand_merger(
+ revision_id, b.last_revision())
+ if merging_revision is None:
+ raise InvalidRevisionSpec(revision[0].user_spec, b)
+ pb.update(gettext('Finding revno'))
+ return b.revision_id_to_revno(merging_revision)
+
+ def _find_proposals(self, revno, b, pb):
+ from bzrlib.plugins.launchpad import (lp_api, lp_registration)
+ launchpad = lp_api.login(lp_registration.LaunchpadService())
+ pb.update(gettext('Finding Launchpad branch'))
+ lpb = lp_api.LaunchpadBranch.from_bzr(launchpad, b,
+ create_missing=False)
+ pb.update(gettext('Finding proposals'))
+ return list(lpb.lp.getMergeProposals(status=['Merged'],
+ merged_revnos=[revno]))
+
+
+ @staticmethod
+ def _is_revno_spec(spec):
+ try:
+ int(spec.user_spec)
+ except ValueError:
+ return False
+ else:
+ return True
+
+
+
diff --git a/bzrlib/plugins/launchpad/lp_api.py b/bzrlib/plugins/launchpad/lp_api.py
new file mode 100644
index 0000000..6d5a7a0
--- /dev/null
+++ b/bzrlib/plugins/launchpad/lp_api.py
@@ -0,0 +1,313 @@
+# Copyright (C) 2009, 2010 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""Tools for dealing with the Launchpad API."""
+
+from __future__ import absolute_import
+
+# Importing this module will be expensive, since it imports launchpadlib and
+# its dependencies. However, our plan is to only load this module when it is
+# needed by a command that uses it.
+
+
+import os
+import re
+import urlparse
+
+from bzrlib import (
+ branch,
+ config,
+ errors,
+ osutils,
+ trace,
+ transport,
+ )
+from bzrlib.i18n import gettext
+from bzrlib.plugins.launchpad.lp_registration import (
+ InvalidLaunchpadInstance,
+ )
+
+try:
+ import launchpadlib
+except ImportError, e:
+ raise errors.DependencyNotPresent('launchpadlib', e)
+
+from launchpadlib.launchpad import (
+ STAGING_SERVICE_ROOT,
+ Launchpad,
+ )
+
+
+# Declare the minimum version of launchpadlib that we need in order to work.
+# 1.5.1 is the version of launchpadlib packaged in Ubuntu 9.10, the most
+# recent Ubuntu release at the time of writing.
+MINIMUM_LAUNCHPADLIB_VERSION = (1, 5, 1)
+
+
+def get_cache_directory():
+ """Return the directory to cache launchpadlib objects in."""
+ return osutils.pathjoin(config.config_dir(), 'launchpad')
+
+
+def parse_launchpadlib_version(version_number):
+ """Parse a version number of the style used by launchpadlib."""
+ return tuple(map(int, version_number.split('.')))
+
+
+def check_launchpadlib_compatibility():
+ """Raise an error if launchpadlib has the wrong version number."""
+ installed_version = parse_launchpadlib_version(launchpadlib.__version__)
+ if installed_version < MINIMUM_LAUNCHPADLIB_VERSION:
+ raise errors.IncompatibleAPI(
+ 'launchpadlib', MINIMUM_LAUNCHPADLIB_VERSION,
+ installed_version, installed_version)
+
+
+# The older versions of launchpadlib only provided service root constants for
+# edge and staging, whilst newer versions drop edge. Therefore service root
+# URIs for which we do not always have constants are derived from the staging
+# one, which does always exist.
+#
+# It is necessary to derive, rather than use hardcoded URIs because
+# launchpadlib <= 1.5.4 requires service root URIs that end in a path of
+# /beta/, whilst launchpadlib >= 1.5.5 requires service root URIs with no path
+# info.
+#
+# Once we have a hard dependency on launchpadlib >= 1.5.4 we can replace all of
+# bzr's local knowledge of individual Launchpad instances with use of the
+# launchpadlib.uris module.
+LAUNCHPAD_API_URLS = {
+ 'production': STAGING_SERVICE_ROOT.replace('api.staging.launchpad.net',
+ 'api.launchpad.net'),
+ 'qastaging': STAGING_SERVICE_ROOT.replace('api.staging.launchpad.net',
+ 'api.qastaging.launchpad.net'),
+ 'staging': STAGING_SERVICE_ROOT,
+ 'dev': STAGING_SERVICE_ROOT.replace('api.staging.launchpad.net',
+ 'api.launchpad.dev'),
+ }
+
+
+def _get_api_url(service):
+ """Return the root URL of the Launchpad API.
+
+ e.g. For the 'staging' Launchpad service, this function returns
+ launchpadlib.launchpad.STAGING_SERVICE_ROOT.
+
+ :param service: A `LaunchpadService` object.
+ :return: A URL as a string.
+ """
+ if service._lp_instance is None:
+ lp_instance = service.DEFAULT_INSTANCE
+ else:
+ lp_instance = service._lp_instance
+ try:
+ return LAUNCHPAD_API_URLS[lp_instance]
+ except KeyError:
+ raise InvalidLaunchpadInstance(lp_instance)
+
+
+class NoLaunchpadBranch(errors.BzrError):
+ _fmt = 'No launchpad branch could be found for branch "%(url)s".'
+
+ def __init__(self, branch):
+ errors.BzrError.__init__(self, branch=branch, url=branch.base)
+
+
+def login(service, timeout=None, proxy_info=None):
+ """Log in to the Launchpad API.
+
+ :return: The root `Launchpad` object from launchpadlib.
+ """
+ cache_directory = get_cache_directory()
+ launchpad = Launchpad.login_with(
+ 'bzr', _get_api_url(service), cache_directory, timeout=timeout,
+ proxy_info=proxy_info)
+ # XXX: Work-around a minor security bug in launchpadlib 1.5.1, which would
+ # create this directory with default umask.
+ osutils.chmod_if_possible(cache_directory, 0700)
+ return launchpad
+
+
+class LaunchpadBranch(object):
+ """Provide bzr and lp API access to a Launchpad branch."""
+
+ def __init__(self, lp_branch, bzr_url, bzr_branch=None, check_update=True):
+ """Constructor.
+
+ :param lp_branch: The Launchpad branch.
+ :param bzr_url: The URL of the Bazaar branch.
+ :param bzr_branch: An instance of the Bazaar branch.
+ """
+ self.bzr_url = bzr_url
+ self._bzr = bzr_branch
+ self._push_bzr = None
+ self._check_update = check_update
+ self.lp = lp_branch
+
+ @property
+ def bzr(self):
+ """Return the bzr branch for this branch."""
+ if self._bzr is None:
+ self._bzr = branch.Branch.open(self.bzr_url)
+ return self._bzr
+
+ @property
+ def push_bzr(self):
+ """Return the push branch for this branch."""
+ if self._push_bzr is None:
+ self._push_bzr = branch.Branch.open(self.lp.bzr_identity)
+ return self._push_bzr
+
+ @staticmethod
+ def plausible_launchpad_url(url):
+ """Is 'url' something that could conceivably be pushed to LP?
+
+ :param url: A URL that may refer to a Launchpad branch.
+ :return: A boolean.
+ """
+ if url is None:
+ return False
+ if url.startswith('lp:'):
+ return True
+ regex = re.compile('([a-z]*\+)*(bzr\+ssh|http)'
+ '://bazaar.*.launchpad.net')
+ return bool(regex.match(url))
+
+ @staticmethod
+ def candidate_urls(bzr_branch):
+ """Iterate through related URLs that might be Launchpad URLs.
+
+ :param bzr_branch: A Bazaar branch to find URLs from.
+ :return: a generator of URL strings.
+ """
+ url = bzr_branch.get_public_branch()
+ if url is not None:
+ yield url
+ url = bzr_branch.get_push_location()
+ if url is not None:
+ yield url
+ url = bzr_branch.get_parent()
+ if url is not None:
+ yield url
+ yield bzr_branch.base
+
+ @staticmethod
+ def tweak_url(url, launchpad):
+ """Adjust a URL to work with staging, if needed."""
+ if str(launchpad._root_uri) == STAGING_SERVICE_ROOT:
+ return url.replace('bazaar.launchpad.net',
+ 'bazaar.staging.launchpad.net')
+ elif str(launchpad._root_uri) == LAUNCHPAD_API_URLS['qastaging']:
+ return url.replace('bazaar.launchpad.net',
+ 'bazaar.qastaging.launchpad.net')
+ return url
+
+ @classmethod
+ def from_bzr(cls, launchpad, bzr_branch, create_missing=True):
+ """Find a Launchpad branch from a bzr branch."""
+ check_update = True
+ for url in cls.candidate_urls(bzr_branch):
+ url = cls.tweak_url(url, launchpad)
+ if not cls.plausible_launchpad_url(url):
+ continue
+ lp_branch = launchpad.branches.getByUrl(url=url)
+ if lp_branch is not None:
+ break
+ else:
+ if not create_missing:
+ raise NoLaunchpadBranch(bzr_branch)
+ lp_branch = cls.create_now(launchpad, bzr_branch)
+ check_update = False
+ return cls(lp_branch, bzr_branch.base, bzr_branch, check_update)
+
+ @classmethod
+ def create_now(cls, launchpad, bzr_branch):
+ """Create a Bazaar branch on Launchpad for the supplied branch."""
+ url = cls.tweak_url(bzr_branch.get_push_location(), launchpad)
+ if not cls.plausible_launchpad_url(url):
+ raise errors.BzrError(gettext('%s is not registered on Launchpad') %
+ bzr_branch.base)
+ bzr_branch.create_clone_on_transport(transport.get_transport(url))
+ lp_branch = launchpad.branches.getByUrl(url=url)
+ if lp_branch is None:
+ raise errors.BzrError(gettext('%s is not registered on Launchpad') %
+ url)
+ return lp_branch
+
+ def get_target(self):
+ """Return the 'LaunchpadBranch' for the target of this one."""
+ lp_branch = self.lp
+ if lp_branch.project is not None:
+ dev_focus = lp_branch.project.development_focus
+ if dev_focus is None:
+ raise errors.BzrError(gettext('%s has no development focus.') %
+ lp_branch.bzr_identity)
+ target = dev_focus.branch
+ if target is None:
+ raise errors.BzrError(gettext(
+ 'development focus %s has no branch.') % dev_focus)
+ elif lp_branch.sourcepackage is not None:
+ target = lp_branch.sourcepackage.getBranch(pocket="Release")
+ if target is None:
+ raise errors.BzrError(gettext(
+ 'source package %s has no branch.') %
+ lp_branch.sourcepackage)
+ else:
+ raise errors.BzrError(gettext(
+ '%s has no associated product or source package.') %
+ lp_branch.bzr_identity)
+ return LaunchpadBranch(target, target.bzr_identity)
+
+ def update_lp(self):
+ """Update the Launchpad copy of this branch."""
+ if not self._check_update:
+ return
+ self.bzr.lock_read()
+ try:
+ if self.lp.last_scanned_id is not None:
+ if self.bzr.last_revision() == self.lp.last_scanned_id:
+ trace.note(gettext('%s is already up-to-date.') %
+ self.lp.bzr_identity)
+ return
+ graph = self.bzr.repository.get_graph()
+ if not graph.is_ancestor(self.lp.last_scanned_id,
+ self.bzr.last_revision()):
+ raise errors.DivergedBranches(self.bzr, self.push_bzr)
+ trace.note(gettext('Pushing to %s') % self.lp.bzr_identity)
+ self.bzr.push(self.push_bzr)
+ finally:
+ self.bzr.unlock()
+
+ def find_lca_tree(self, other):
+ """Find the revision tree for the LCA of this branch and other.
+
+ :param other: Another LaunchpadBranch
+ :return: The RevisionTree of the LCA of this branch and other.
+ """
+ graph = self.bzr.repository.get_graph(other.bzr.repository)
+ lca = graph.find_unique_lca(self.bzr.last_revision(),
+ other.bzr.last_revision())
+ return self.bzr.repository.revision_tree(lca)
+
+
+def canonical_url(object):
+ """Return the canonical URL for a branch."""
+ scheme, netloc, path, params, query, fragment = urlparse.urlparse(
+ str(object.self_link))
+ path = '/'.join(path.split('/')[2:])
+ netloc = netloc.replace('api.', 'code.')
+ return urlparse.urlunparse((scheme, netloc, path, params, query,
+ fragment))
diff --git a/bzrlib/plugins/launchpad/lp_api_lite.py b/bzrlib/plugins/launchpad/lp_api_lite.py
new file mode 100644
index 0000000..f28a2eb
--- /dev/null
+++ b/bzrlib/plugins/launchpad/lp_api_lite.py
@@ -0,0 +1,288 @@
+# Copyright (C) 2011 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""Tools for dealing with the Launchpad API without using launchpadlib.
+
+The api itself is a RESTful interface, so we can make HTTP queries directly.
+loading launchpadlib itself has a fairly high overhead (just calling
+Launchpad.login_anonymously() takes a 500ms once the WADL is cached, and 5+s to
+get the WADL.
+"""
+
+from __future__ import absolute_import
+
+try:
+ # Use simplejson if available, much faster, and can be easily installed in
+ # older versions of python
+ import simplejson as json
+except ImportError:
+ # Is present since python 2.6
+ try:
+ import json
+ except ImportError:
+ json = None
+
+import time
+import urllib
+import urllib2
+
+from bzrlib import (
+ revision,
+ trace,
+ )
+
+
+class LatestPublication(object):
+ """Encapsulate how to find the latest publication for a given project."""
+
+ LP_API_ROOT = 'https://api.launchpad.net/1.0'
+
+ def __init__(self, archive, series, project):
+ self._archive = archive
+ self._project = project
+ self._setup_series_and_pocket(series)
+
+ def _setup_series_and_pocket(self, series):
+ """Parse the 'series' info into a series and a pocket.
+
+ eg::
+ _setup_series_and_pocket('natty-proposed')
+ => _series == 'natty'
+ _pocket == 'Proposed'
+ """
+ self._series = series
+ self._pocket = None
+ if self._series is not None and '-' in self._series:
+ self._series, self._pocket = self._series.split('-', 1)
+ self._pocket = self._pocket.title()
+ else:
+ self._pocket = 'Release'
+
+ def _archive_URL(self):
+ """Return the Launchpad 'Archive' URL that we will query.
+ This is everything in the URL except the query parameters.
+ """
+ return '%s/%s/+archive/primary' % (self.LP_API_ROOT, self._archive)
+
+ def _publication_status(self):
+ """Handle the 'status' field.
+ It seems that Launchpad tracks all 'debian' packages as 'Pending', while
+ for 'ubuntu' we care about the 'Published' packages.
+ """
+ if self._archive == 'debian':
+ # Launchpad only tracks debian packages as "Pending", it doesn't mark
+ # them Published
+ return 'Pending'
+ return 'Published'
+
+ def _query_params(self):
+ """Get the parameters defining our query.
+ This defines the actions we are making against the archive.
+ :return: A dict of query parameters.
+ """
+ params = {'ws.op': 'getPublishedSources',
+ 'exact_match': 'true',
+ # If we need to use "" shouldn't we quote the project somehow?
+ 'source_name': '"%s"' % (self._project,),
+ 'status': self._publication_status(),
+ # We only need the latest one, the results seem to be properly
+ # most-recent-debian-version sorted
+ 'ws.size': '1',
+ }
+ if self._series is not None:
+ params['distro_series'] = '/%s/%s' % (self._archive, self._series)
+ if self._pocket is not None:
+ params['pocket'] = self._pocket
+ return params
+
+ def _query_URL(self):
+ """Create the full URL that we need to query, including parameters."""
+ params = self._query_params()
+ # We sort to give deterministic results for testing
+ encoded = urllib.urlencode(sorted(params.items()))
+ return '%s?%s' % (self._archive_URL(), encoded)
+
+ def _get_lp_info(self):
+ """Place an actual HTTP query against the Launchpad service."""
+ if json is None:
+ return None
+ query_URL = self._query_URL()
+ try:
+ req = urllib2.Request(query_URL)
+ response = urllib2.urlopen(req)
+ json_info = response.read()
+ # TODO: We haven't tested the HTTPError
+ except (urllib2.URLError, urllib2.HTTPError), e:
+ trace.mutter('failed to place query to %r' % (query_URL,))
+ trace.log_exception_quietly()
+ return None
+ return json_info
+
+ def _parse_json_info(self, json_info):
+ """Parse the json response from Launchpad into objects."""
+ if json is None:
+ return None
+ try:
+ return json.loads(json_info)
+ except Exception:
+ trace.mutter('Failed to parse json info: %r' % (json_info,))
+ trace.log_exception_quietly()
+ return None
+
+ def get_latest_version(self):
+ """Get the latest published version for the given package."""
+ json_info = self._get_lp_info()
+ if json_info is None:
+ return None
+ info = self._parse_json_info(json_info)
+ if info is None:
+ return None
+ try:
+ entries = info['entries']
+ if len(entries) == 0:
+ return None
+ return entries[0]['source_package_version']
+ except KeyError:
+ trace.log_exception_quietly()
+ return None
+
+ def place(self):
+ """Text-form for what location this represents.
+
+ Example::
+ ubuntu, natty => Ubuntu Natty
+ ubuntu, natty-proposed => Ubuntu Natty Proposed
+ :return: A string representing the location we are checking.
+ """
+ place = self._archive
+ if self._series is not None:
+ place = '%s %s' % (place, self._series)
+ if self._pocket is not None and self._pocket != 'Release':
+ place = '%s %s' % (place, self._pocket)
+ return place.title()
+
+
+def get_latest_publication(archive, series, project):
+ """Get the most recent publication for a given project.
+
+ :param archive: Either 'ubuntu' or 'debian'
+ :param series: Something like 'natty', 'sid', etc. Can be set as None. Can
+ also include a pocket such as 'natty-proposed'.
+ :param project: Something like 'bzr'
+ :return: A version string indicating the most-recent version published in
+ Launchpad. Might return None if there is an error.
+ """
+ lp = LatestPublication(archive, series, project)
+ return lp.get_latest_version()
+
+
+def get_most_recent_tag(tag_dict, the_branch):
+ """Get the most recent revision that has been tagged."""
+ # Note: this assumes that a given rev won't get tagged multiple times. But
+ # it should be valid for the package importer branches that we care
+ # about
+ reverse_dict = dict((rev, tag) for tag, rev in tag_dict.iteritems())
+ the_branch.lock_read()
+ try:
+ last_rev = the_branch.last_revision()
+ graph = the_branch.repository.get_graph()
+ stop_revisions = (None, revision.NULL_REVISION)
+ for rev_id in graph.iter_lefthand_ancestry(last_rev, stop_revisions):
+ if rev_id in reverse_dict:
+ return reverse_dict[rev_id]
+ finally:
+ the_branch.unlock()
+
+
+def _get_newest_versions(the_branch, latest_pub):
+ """Get information about how 'fresh' this packaging branch is.
+
+ :param the_branch: The Branch to check
+ :param latest_pub: The LatestPublication used to check most recent
+ published version.
+ :return: (latest_ver, branch_latest_ver)
+ """
+ t = time.time()
+ latest_ver = latest_pub.get_latest_version()
+ t_latest_ver = time.time() - t
+ trace.mutter('LatestPublication.get_latest_version took: %.3fs'
+ % (t_latest_ver,))
+ if latest_ver is None:
+ return None, None
+ t = time.time()
+ tags = the_branch.tags.get_tag_dict()
+ t_tag_dict = time.time() - t
+ trace.mutter('LatestPublication.get_tag_dict took: %.3fs' % (t_tag_dict,))
+ if latest_ver in tags:
+ # branch might have a newer tag, but we don't really care
+ return latest_ver, latest_ver
+ else:
+ best_tag = get_most_recent_tag(tags, the_branch)
+ return latest_ver, best_tag
+
+
+def _report_freshness(latest_ver, branch_latest_ver, place, verbosity,
+ report_func):
+ """Report if the branch is up-to-date."""
+ if latest_ver is None:
+ if verbosity == 'all':
+ report_func('Most recent %s version: MISSING' % (place,))
+ elif verbosity == 'short':
+ report_func('%s is MISSING a version' % (place,))
+ return
+ elif latest_ver == branch_latest_ver:
+ if verbosity == 'minimal':
+ return
+ elif verbosity == 'short':
+ report_func('%s is CURRENT in %s' % (latest_ver, place))
+ else:
+ report_func('Most recent %s version: %s\n'
+ 'Packaging branch status: CURRENT'
+ % (place, latest_ver))
+ else:
+ if verbosity in ('minimal', 'short'):
+ if branch_latest_ver is None:
+ branch_latest_ver = 'Branch'
+ report_func('%s is OUT-OF-DATE, %s has %s'
+ % (branch_latest_ver, place, latest_ver))
+ else:
+ report_func('Most recent %s version: %s\n'
+ 'Packaging branch version: %s\n'
+ 'Packaging branch status: OUT-OF-DATE'
+ % (place, latest_ver, branch_latest_ver))
+
+
+def report_freshness(the_branch, verbosity, latest_pub):
+ """Report to the user how up-to-date the packaging branch is.
+
+ :param the_branch: A Branch object
+ :param verbosity: Can be one of:
+ off: Do not print anything, and skip all checks.
+ all: Print all information that we have in a verbose manner, this
+ includes misses, etc.
+ short: Print information, but only one-line summaries
+ minimal: Only print a one-line summary when the package branch is
+ out-of-date
+ :param latest_pub: A LatestPublication instance
+ """
+ if verbosity == 'off':
+ return
+ if verbosity is None:
+ verbosity = 'all'
+ latest_ver, branch_ver = _get_newest_versions(the_branch, latest_pub)
+ place = latest_pub.place()
+ _report_freshness(latest_ver, branch_ver, place, verbosity,
+ trace.note)
diff --git a/bzrlib/plugins/launchpad/lp_directory.py b/bzrlib/plugins/launchpad/lp_directory.py
new file mode 100644
index 0000000..3e88f69
--- /dev/null
+++ b/bzrlib/plugins/launchpad/lp_directory.py
@@ -0,0 +1,209 @@
+# Copyright (C) 2007-2011 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""Directory lookup that uses Launchpad."""
+
+from __future__ import absolute_import
+
+from urlparse import urlsplit
+import xmlrpclib
+
+from bzrlib import (
+ debug,
+ errors,
+ trace,
+ transport,
+ )
+from bzrlib.i18n import gettext
+
+from bzrlib.plugins.launchpad.lp_registration import (
+ LaunchpadService, ResolveLaunchpadPathRequest)
+from bzrlib.plugins.launchpad.account import get_lp_login
+
+
+# As bzrlib.transport.remote may not be loaded yet, make sure bzr+ssh
+# is counted as a netloc protocol.
+transport.register_urlparse_netloc_protocol('bzr+ssh')
+transport.register_urlparse_netloc_protocol('lp')
+
+_ubuntu_series_shortcuts = {
+ 'n': 'natty',
+ 'm': 'maverick',
+ 'l': 'lucid',
+ 'k': 'karmic',
+ 'j': 'jaunty',
+ 'h': 'hardy',
+ 'd': 'dapper',
+ }
+
+
+class LaunchpadDirectory(object):
+
+ def _requires_launchpad_login(self, scheme, netloc, path, query,
+ fragment):
+ """Does the URL require a Launchpad login in order to be reached?
+
+ The URL is specified by its parsed components, as returned from
+ urlsplit.
+ """
+ return (scheme in ('bzr+ssh', 'sftp')
+ and (netloc.endswith('launchpad.net')
+ or netloc.endswith('launchpad.dev')))
+
+ def look_up(self, name, url):
+ """See DirectoryService.look_up"""
+ return self._resolve(url)
+
+ def _resolve_locally(self, path, url, _request_factory):
+ # This is the best I could work out about XMLRPC. If an lp: url
+ # includes ~user, then it is specially validated. Otherwise, it is just
+ # sent to +branch/$path.
+ _, netloc, _, _, _ = urlsplit(url)
+ if netloc == '':
+ netloc = LaunchpadService.DEFAULT_INSTANCE
+ base_url = LaunchpadService.LAUNCHPAD_DOMAINS[netloc]
+ base = 'bzr+ssh://bazaar.%s/' % (base_url,)
+ maybe_invalid = False
+ if path.startswith('~'):
+ # A ~user style path, validate it a bit.
+ # If a path looks fishy, fall back to asking XMLRPC to
+ # resolve it for us. That way we still get their nicer error
+ # messages.
+ parts = path.split('/')
+ if (len(parts) < 3
+ or (parts[1] in ('ubuntu', 'debian') and len(parts) < 5)):
+ # This special case requires 5-parts to be valid.
+ maybe_invalid = True
+ else:
+ base += '+branch/'
+ if maybe_invalid:
+ return self._resolve_via_xmlrpc(path, url, _request_factory)
+ return {'urls': [base + path]}
+
+ def _resolve_via_xmlrpc(self, path, url, _request_factory):
+ service = LaunchpadService.for_url(url)
+ resolve = _request_factory(path)
+ try:
+ result = resolve.submit(service)
+ except xmlrpclib.Fault, fault:
+ raise errors.InvalidURL(
+ path=url, extra=fault.faultString)
+ return result
+
+ def _update_url_scheme(self, url):
+ # Do ubuntu: and debianlp: expansions.
+ scheme, netloc, path, query, fragment = urlsplit(url)
+ if scheme in ('ubuntu', 'debianlp'):
+ if scheme == 'ubuntu':
+ distro = 'ubuntu'
+ distro_series = _ubuntu_series_shortcuts
+ elif scheme == 'debianlp':
+ distro = 'debian'
+ # No shortcuts for Debian distroseries.
+ distro_series = {}
+ else:
+ raise AssertionError('scheme should be ubuntu: or debianlp:')
+ # Split the path. It's either going to be 'project' or
+ # 'series/project', but recognize that it may be a series we don't
+ # know about.
+ path_parts = path.split('/')
+ if len(path_parts) == 1:
+ # It's just a project name.
+ lp_url_template = 'lp:%(distro)s/%(project)s'
+ project = path_parts[0]
+ series = None
+ elif len(path_parts) == 2:
+ # It's a series and project.
+ lp_url_template = 'lp:%(distro)s/%(series)s/%(project)s'
+ series, project = path_parts
+ else:
+ # There are either 0 or > 2 path parts, neither of which is
+ # supported for these schemes.
+ raise errors.InvalidURL('Bad path: %s' % url)
+ # Expand any series shortcuts, but keep unknown series.
+ series = distro_series.get(series, series)
+ # Hack the url and let the following do the final resolution.
+ url = lp_url_template % dict(
+ distro=distro,
+ series=series,
+ project=project)
+ scheme, netloc, path, query, fragment = urlsplit(url)
+ return url, path
+
+ def _expand_user(self, path, url, lp_login):
+ if path.startswith('~/'):
+ if lp_login is None:
+ raise errors.InvalidURL(path=url,
+ extra='Cannot resolve "~" to your username.'
+ ' See "bzr help launchpad-login"')
+ path = '~' + lp_login + path[1:]
+ return path
+
+ def _resolve(self, url,
+ _request_factory=ResolveLaunchpadPathRequest,
+ _lp_login=None):
+ """Resolve the base URL for this transport."""
+ url, path = self._update_url_scheme(url)
+ if _lp_login is None:
+ _lp_login = get_lp_login()
+ path = path.strip('/')
+ path = self._expand_user(path, url, _lp_login)
+ if _lp_login is not None:
+ result = self._resolve_locally(path, url, _request_factory)
+ if 'launchpad' in debug.debug_flags:
+ local_res = result
+ result = self._resolve_via_xmlrpc(path, url, _request_factory)
+ trace.note(gettext(
+ 'resolution for {0}\n local: {1}\n remote: {2}').format(
+ url, local_res['urls'], result['urls']))
+ else:
+ result = self._resolve_via_xmlrpc(path, url, _request_factory)
+
+ if 'launchpad' in debug.debug_flags:
+ trace.mutter("resolve_lp_path(%r) == %r", url, result)
+
+ _warned_login = False
+ for url in result['urls']:
+ scheme, netloc, path, query, fragment = urlsplit(url)
+ if self._requires_launchpad_login(scheme, netloc, path, query,
+ fragment):
+ # Only accept launchpad.net bzr+ssh URLs if we know
+ # the user's Launchpad login:
+ if _lp_login is not None:
+ break
+ if _lp_login is None:
+ if not _warned_login:
+ trace.warning(
+'You have not informed bzr of your Launchpad ID, and you must do this to\n'
+'write to Launchpad or access private data. See "bzr help launchpad-login".')
+ _warned_login = True
+ else:
+ # Use the URL if we can create a transport for it.
+ try:
+ transport.get_transport(url)
+ except (errors.PathError, errors.TransportError):
+ pass
+ else:
+ break
+ else:
+ raise errors.InvalidURL(path=url, extra='no supported schemes')
+ return url
+
+
+def get_test_permutations():
+ # Since this transport doesn't do anything once opened, it's not subjected
+ # to the usual transport tests.
+ return []
diff --git a/bzrlib/plugins/launchpad/lp_propose.py b/bzrlib/plugins/launchpad/lp_propose.py
new file mode 100644
index 0000000..7142515
--- /dev/null
+++ b/bzrlib/plugins/launchpad/lp_propose.py
@@ -0,0 +1,221 @@
+# Copyright (C) 2010, 2011 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+from __future__ import absolute_import
+
+from bzrlib import (
+ errors,
+ hooks,
+ )
+from bzrlib.lazy_import import lazy_import
+lazy_import(globals(), """
+import webbrowser
+
+from bzrlib import (
+ msgeditor,
+ )
+from bzrlib.i18n import gettext
+from bzrlib.plugins.launchpad import (
+ lp_api,
+ lp_registration,
+ )
+""")
+
+
+class ProposeMergeHooks(hooks.Hooks):
+ """Hooks for proposing a merge on Launchpad."""
+
+ def __init__(self):
+ hooks.Hooks.__init__(self, "bzrlib.plugins.launchpad.lp_propose",
+ "Proposer.hooks")
+ self.add_hook('get_prerequisite',
+ "Return the prerequisite branch for proposing as merge.", (2, 1))
+ self.add_hook('merge_proposal_body',
+ "Return an initial body for the merge proposal message.", (2, 1))
+
+
+class Proposer(object):
+
+ hooks = ProposeMergeHooks()
+
+ def __init__(self, tree, source_branch, target_branch, message, reviews,
+ staging=False, approve=False, fixes=None):
+ """Constructor.
+
+ :param tree: The working tree for the source branch.
+ :param source_branch: The branch to propose for merging.
+ :param target_branch: The branch to merge into.
+ :param message: The commit message to use. (May be None.)
+ :param reviews: A list of tuples of reviewer, review type.
+ :param staging: If True, propose the merge against staging instead of
+ production.
+ :param approve: If True, mark the new proposal as approved immediately.
+ This is useful when a project permits some things to be approved
+ by the submitter (e.g. merges between release and deployment
+ branches).
+ """
+ self.tree = tree
+ if staging:
+ lp_instance = 'staging'
+ else:
+ lp_instance = 'production'
+ service = lp_registration.LaunchpadService(lp_instance=lp_instance)
+ self.launchpad = lp_api.login(service)
+ self.source_branch = lp_api.LaunchpadBranch.from_bzr(
+ self.launchpad, source_branch)
+ if target_branch is None:
+ self.target_branch = self.source_branch.get_target()
+ else:
+ self.target_branch = lp_api.LaunchpadBranch.from_bzr(
+ self.launchpad, target_branch)
+ self.commit_message = message
+ # XXX: this is where bug lp:583638 could be tackled.
+ if reviews == []:
+ self.reviews = []
+ else:
+ self.reviews = [(self.launchpad.people[reviewer], review_type)
+ for reviewer, review_type in
+ reviews]
+ self.approve = approve
+ self.fixes = fixes
+
+ def get_comment(self, prerequisite_branch):
+ """Determine the initial comment for the merge proposal."""
+ info = ["Source: %s\n" % self.source_branch.lp.bzr_identity]
+ info.append("Target: %s\n" % self.target_branch.lp.bzr_identity)
+ if prerequisite_branch is not None:
+ info.append("Prereq: %s\n" % prerequisite_branch.lp.bzr_identity)
+ for rdata in self.reviews:
+ uniquename = "%s (%s)" % (rdata[0].display_name, rdata[0].name)
+ info.append('Reviewer: %s, type "%s"\n' % (uniquename, rdata[1]))
+ self.source_branch.bzr.lock_read()
+ try:
+ self.target_branch.bzr.lock_read()
+ try:
+ body = self.get_initial_body()
+ finally:
+ self.target_branch.bzr.unlock()
+ finally:
+ self.source_branch.bzr.unlock()
+ initial_comment = msgeditor.edit_commit_message(''.join(info),
+ start_message=body)
+ return initial_comment.strip().encode('utf-8')
+
+ def get_initial_body(self):
+ """Get a body for the proposal for the user to modify.
+
+ :return: a str or None.
+ """
+ def list_modified_files():
+ lca_tree = self.source_branch.find_lca_tree(
+ self.target_branch)
+ source_tree = self.source_branch.bzr.basis_tree()
+ files = modified_files(lca_tree, source_tree)
+ return list(files)
+ target_loc = ('bzr+ssh://bazaar.launchpad.net/%s' %
+ self.target_branch.lp.unique_name)
+ body = None
+ for hook in self.hooks['merge_proposal_body']:
+ body = hook({
+ 'tree': self.tree,
+ 'target_branch': target_loc,
+ 'modified_files_callback': list_modified_files,
+ 'old_body': body,
+ })
+ return body
+
+ def check_proposal(self):
+ """Check that the submission is sensible."""
+ if self.source_branch.lp.self_link == self.target_branch.lp.self_link:
+ raise errors.BzrCommandError(
+ 'Source and target branches must be different.')
+ for mp in self.source_branch.lp.landing_targets:
+ if mp.queue_status in ('Merged', 'Rejected'):
+ continue
+ if mp.target_branch.self_link == self.target_branch.lp.self_link:
+ raise errors.BzrCommandError(gettext(
+ 'There is already a branch merge proposal: %s') %
+ lp_api.canonical_url(mp))
+
+ def _get_prerequisite_branch(self):
+ hooks = self.hooks['get_prerequisite']
+ prerequisite_branch = None
+ for hook in hooks:
+ prerequisite_branch = hook(
+ {'launchpad': self.launchpad,
+ 'source_branch': self.source_branch,
+ 'target_branch': self.target_branch,
+ 'prerequisite_branch': prerequisite_branch})
+ return prerequisite_branch
+
+ def call_webservice(self, call, *args, **kwargs):
+ """Make a call to the webservice, wrapping failures.
+
+ :param call: The call to make.
+ :param *args: *args for the call.
+ :param **kwargs: **kwargs for the call.
+ :return: The result of calling call(*args, *kwargs).
+ """
+ from lazr.restfulclient import errors as restful_errors
+ try:
+ return call(*args, **kwargs)
+ except restful_errors.HTTPError, e:
+ error_lines = []
+ for line in e.content.splitlines():
+ if line.startswith('Traceback (most recent call last):'):
+ break
+ error_lines.append(line)
+ raise Exception(''.join(error_lines))
+
+ def create_proposal(self):
+ """Perform the submission."""
+ prerequisite_branch = self._get_prerequisite_branch()
+ if prerequisite_branch is None:
+ prereq = None
+ else:
+ prereq = prerequisite_branch.lp
+ prerequisite_branch.update_lp()
+ self.source_branch.update_lp()
+ reviewers = []
+ review_types = []
+ for reviewer, review_type in self.reviews:
+ review_types.append(review_type)
+ reviewers.append(reviewer.self_link)
+ initial_comment = self.get_comment(prerequisite_branch)
+ mp = self.call_webservice(
+ self.source_branch.lp.createMergeProposal,
+ target_branch=self.target_branch.lp,
+ prerequisite_branch=prereq,
+ initial_comment=initial_comment,
+ commit_message=self.commit_message, reviewers=reviewers,
+ review_types=review_types)
+ if self.approve:
+ self.call_webservice(mp.setStatus, status='Approved')
+ if self.fixes:
+ if self.fixes.startswith('lp:'):
+ self.fixes = self.fixes[3:]
+ self.call_webservice(
+ self.source_branch.lp.linkBug,
+ bug=self.launchpad.bugs[int(self.fixes)])
+ webbrowser.open(lp_api.canonical_url(mp))
+
+
+def modified_files(old_tree, new_tree):
+ """Return a list of paths in the new tree with modified contents."""
+ for f, (op, path), c, v, p, n, (ok, k), e in new_tree.iter_changes(
+ old_tree):
+ if c and k == 'file':
+ yield str(path)
diff --git a/bzrlib/plugins/launchpad/lp_registration.py b/bzrlib/plugins/launchpad/lp_registration.py
new file mode 100644
index 0000000..dae825f
--- /dev/null
+++ b/bzrlib/plugins/launchpad/lp_registration.py
@@ -0,0 +1,358 @@
+# Copyright (C) 2006-2011 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+from __future__ import absolute_import
+
+
+import os
+import socket
+from urlparse import urlsplit, urlunsplit
+import urllib
+import xmlrpclib
+
+from bzrlib import (
+ config,
+ errors,
+ urlutils,
+ __version__ as _bzrlib_version,
+ )
+from bzrlib.transport.http import _urllib2_wrappers
+
+
+# for testing, do
+'''
+export BZR_LP_XMLRPC_URL=http://xmlrpc.staging.launchpad.net/bazaar/
+'''
+
+class InvalidLaunchpadInstance(errors.BzrError):
+
+ _fmt = "%(lp_instance)s is not a valid Launchpad instance."
+
+ def __init__(self, lp_instance):
+ errors.BzrError.__init__(self, lp_instance=lp_instance)
+
+
+class NotLaunchpadBranch(errors.BzrError):
+
+ _fmt = "%(url)s is not registered on Launchpad."
+
+ def __init__(self, url):
+ errors.BzrError.__init__(self, url=url)
+
+
+class XMLRPCTransport(xmlrpclib.Transport):
+
+ def __init__(self, scheme):
+ xmlrpclib.Transport.__init__(self)
+ self._scheme = scheme
+ self._opener = _urllib2_wrappers.Opener()
+ self.verbose = 0
+
+ def request(self, host, handler, request_body, verbose=0):
+ self.verbose = verbose
+ url = self._scheme + "://" + host + handler
+ request = _urllib2_wrappers.Request("POST", url, request_body)
+ # FIXME: _urllib2_wrappers will override user-agent with its own
+ # request.add_header("User-Agent", self.user_agent)
+ request.add_header("Content-Type", "text/xml")
+
+ response = self._opener.open(request)
+ if response.code != 200:
+ raise xmlrpclib.ProtocolError(host + handler, response.code,
+ response.msg, response.info())
+ return self.parse_response(response)
+
+
+class LaunchpadService(object):
+ """A service to talk to Launchpad via XMLRPC.
+
+ See http://wiki.bazaar.canonical.com/Specs/LaunchpadRpc for the methods we can call.
+ """
+
+ LAUNCHPAD_DOMAINS = {
+ 'production': 'launchpad.net',
+ 'staging': 'staging.launchpad.net',
+ 'qastaging': 'qastaging.launchpad.net',
+ 'demo': 'demo.launchpad.net',
+ 'dev': 'launchpad.dev',
+ }
+
+ # NB: these should always end in a slash to avoid xmlrpclib appending
+ # '/RPC2'
+ LAUNCHPAD_INSTANCE = {}
+ for instance, domain in LAUNCHPAD_DOMAINS.iteritems():
+ LAUNCHPAD_INSTANCE[instance] = 'https://xmlrpc.%s/bazaar/' % domain
+
+ # We use production as the default because edge has been deprecated circa
+ # 2010-11 (see bug https://bugs.launchpad.net/bzr/+bug/583667)
+ DEFAULT_INSTANCE = 'production'
+ DEFAULT_SERVICE_URL = LAUNCHPAD_INSTANCE[DEFAULT_INSTANCE]
+
+ transport = None
+ registrant_email = None
+ registrant_password = None
+
+
+ def __init__(self, transport=None, lp_instance=None):
+ """Construct a new service talking to the launchpad rpc server"""
+ self._lp_instance = lp_instance
+ if transport is None:
+ uri_type = urllib.splittype(self.service_url)[0]
+ transport = XMLRPCTransport(uri_type)
+ transport.user_agent = 'bzr/%s (xmlrpclib/%s)' \
+ % (_bzrlib_version, xmlrpclib.__version__)
+ self.transport = transport
+
+ @property
+ def service_url(self):
+ """Return the http or https url for the xmlrpc server.
+
+ This does not include the username/password credentials.
+ """
+ key = 'BZR_LP_XMLRPC_URL'
+ if key in os.environ:
+ return os.environ[key]
+ elif self._lp_instance is not None:
+ try:
+ return self.LAUNCHPAD_INSTANCE[self._lp_instance]
+ except KeyError:
+ raise InvalidLaunchpadInstance(self._lp_instance)
+ else:
+ return self.DEFAULT_SERVICE_URL
+
+ @classmethod
+ def for_url(cls, url, **kwargs):
+ """Return the Launchpad service corresponding to the given URL."""
+ result = urlsplit(url)
+ lp_instance = result[1]
+ if lp_instance == '':
+ lp_instance = None
+ elif lp_instance not in cls.LAUNCHPAD_INSTANCE:
+ raise errors.InvalidURL(path=url)
+ return cls(lp_instance=lp_instance, **kwargs)
+
+ def get_proxy(self, authenticated):
+ """Return the proxy for XMLRPC requests."""
+ if authenticated:
+ # auth info must be in url
+ # TODO: if there's no registrant email perhaps we should
+ # just connect anonymously?
+ scheme, hostinfo, path = urlsplit(self.service_url)[:3]
+ if '@' in hostinfo:
+ raise AssertionError(hostinfo)
+ if self.registrant_email is None:
+ raise AssertionError()
+ if self.registrant_password is None:
+ raise AssertionError()
+ # TODO: perhaps fully quote the password to make it very slightly
+ # obscured
+ # TODO: can we perhaps add extra Authorization headers
+ # directly to the request, rather than putting this into
+ # the url? perhaps a bit more secure against accidentally
+ # revealing it. std66 s3.2.1 discourages putting the
+ # password in the url.
+ hostinfo = '%s:%s@%s' % (urlutils.quote(self.registrant_email),
+ urlutils.quote(self.registrant_password),
+ hostinfo)
+ url = urlunsplit((scheme, hostinfo, path, '', ''))
+ else:
+ url = self.service_url
+ return xmlrpclib.ServerProxy(url, transport=self.transport)
+
+ def gather_user_credentials(self):
+ """Get the password from the user."""
+ the_config = config.GlobalConfig()
+ self.registrant_email = the_config.user_email()
+ if self.registrant_password is None:
+ auth = config.AuthenticationConfig()
+ scheme, hostinfo = urlsplit(self.service_url)[:2]
+ prompt = 'launchpad.net password for %s: ' % \
+ self.registrant_email
+ # We will reuse http[s] credentials if we can, prompt user
+ # otherwise
+ self.registrant_password = auth.get_password(scheme, hostinfo,
+ self.registrant_email,
+ prompt=prompt)
+
+ def send_request(self, method_name, method_params, authenticated):
+ proxy = self.get_proxy(authenticated)
+ method = getattr(proxy, method_name)
+ try:
+ result = method(*method_params)
+ except xmlrpclib.ProtocolError, e:
+ if e.errcode == 301:
+ # TODO: This can give a ProtocolError representing a 301 error, whose
+ # e.headers['location'] tells where to go and e.errcode==301; should
+ # probably log something and retry on the new url.
+ raise NotImplementedError("should resend request to %s, but this isn't implemented"
+ % e.headers.get('Location', 'NO-LOCATION-PRESENT'))
+ else:
+ # we don't want to print the original message because its
+ # str representation includes the plaintext password.
+ # TODO: print more headers to help in tracking down failures
+ raise errors.BzrError("xmlrpc protocol error connecting to %s: %s %s"
+ % (self.service_url, e.errcode, e.errmsg))
+ except socket.gaierror, e:
+ raise errors.ConnectionError(
+ "Could not resolve '%s'" % self.domain,
+ orig_error=e)
+ return result
+
+ @property
+ def domain(self):
+ if self._lp_instance is None:
+ instance = self.DEFAULT_INSTANCE
+ else:
+ instance = self._lp_instance
+ return self.LAUNCHPAD_DOMAINS[instance]
+
+ def _guess_branch_path(self, branch_url, _request_factory=None):
+ scheme, hostinfo, path = urlsplit(branch_url)[:3]
+ if _request_factory is None:
+ _request_factory = ResolveLaunchpadPathRequest
+ if scheme == 'lp':
+ resolve = _request_factory(path)
+ try:
+ result = resolve.submit(self)
+ except xmlrpclib.Fault, fault:
+ raise errors.InvalidURL(branch_url, str(fault))
+ branch_url = result['urls'][0]
+ path = urlsplit(branch_url)[2]
+ else:
+ domains = (
+ 'bazaar.%s' % domain
+ for domain in self.LAUNCHPAD_DOMAINS.itervalues())
+ if hostinfo not in domains:
+ raise NotLaunchpadBranch(branch_url)
+ return path.lstrip('/')
+
+ def get_web_url_from_branch_url(self, branch_url, _request_factory=None):
+ """Get the Launchpad web URL for the given branch URL.
+
+ :raise errors.InvalidURL: if 'branch_url' cannot be identified as a
+ Launchpad branch URL.
+ :return: The URL of the branch on Launchpad.
+ """
+ path = self._guess_branch_path(branch_url, _request_factory)
+ return urlutils.join('https://code.%s' % self.domain, path)
+
+
+class BaseRequest(object):
+ """Base request for talking to a XMLRPC server."""
+
+ # Set this to the XMLRPC method name.
+ _methodname = None
+ _authenticated = True
+
+ def _request_params(self):
+ """Return the arguments to pass to the method"""
+ raise NotImplementedError(self._request_params)
+
+ def submit(self, service):
+ """Submit request to Launchpad XMLRPC server.
+
+ :param service: LaunchpadService indicating where to send
+ the request and the authentication credentials.
+ """
+ return service.send_request(self._methodname, self._request_params(),
+ self._authenticated)
+
+
+class DryRunLaunchpadService(LaunchpadService):
+ """Service that just absorbs requests without sending to server.
+
+ The dummy service does not need authentication.
+ """
+
+ def send_request(self, method_name, method_params, authenticated):
+ pass
+
+ def gather_user_credentials(self):
+ pass
+
+
+class BranchRegistrationRequest(BaseRequest):
+ """Request to tell Launchpad about a bzr branch."""
+
+ _methodname = 'register_branch'
+
+ def __init__(self, branch_url,
+ branch_name='',
+ branch_title='',
+ branch_description='',
+ author_email='',
+ product_name='',
+ ):
+ if not branch_url:
+ raise errors.InvalidURL(branch_url, "You need to specify a non-empty branch URL.")
+ self.branch_url = branch_url
+ if branch_name:
+ self.branch_name = branch_name
+ else:
+ self.branch_name = self._find_default_branch_name(self.branch_url)
+ self.branch_title = branch_title
+ self.branch_description = branch_description
+ self.author_email = author_email
+ self.product_name = product_name
+
+ def _request_params(self):
+ """Return xmlrpc request parameters"""
+ # This must match the parameter tuple expected by Launchpad for this
+ # method
+ return (self.branch_url,
+ self.branch_name,
+ self.branch_title,
+ self.branch_description,
+ self.author_email,
+ self.product_name,
+ )
+
+ def _find_default_branch_name(self, branch_url):
+ i = branch_url.rfind('/')
+ return branch_url[i+1:]
+
+
+class BranchBugLinkRequest(BaseRequest):
+ """Request to link a bzr branch in Launchpad to a bug."""
+
+ _methodname = 'link_branch_to_bug'
+
+ def __init__(self, branch_url, bug_id):
+ self.bug_id = bug_id
+ self.branch_url = branch_url
+
+ def _request_params(self):
+ """Return xmlrpc request parameters"""
+ # This must match the parameter tuple expected by Launchpad for this
+ # method
+ return (self.branch_url, self.bug_id, '')
+
+
+class ResolveLaunchpadPathRequest(BaseRequest):
+ """Request to resolve the path component of an lp: URL."""
+
+ _methodname = 'resolve_lp_path'
+ _authenticated = False
+
+ def __init__(self, path):
+ if not path:
+ raise errors.InvalidURL(path=path,
+ extra="You must specify a project.")
+ self.path = path
+
+ def _request_params(self):
+ """Return xmlrpc request parameters"""
+ return (self.path,)
diff --git a/bzrlib/plugins/launchpad/test_account.py b/bzrlib/plugins/launchpad/test_account.py
new file mode 100644
index 0000000..ca058a6
--- /dev/null
+++ b/bzrlib/plugins/launchpad/test_account.py
@@ -0,0 +1,117 @@
+# Copyright (C) 2007-2010 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""Tests for Launchpad user ID management functions."""
+
+from bzrlib import config
+from bzrlib.tests import TestCaseInTempDir, TestCaseWithMemoryTransport
+from bzrlib.plugins.launchpad import account
+
+
+class LaunchpadAccountTests(TestCaseInTempDir):
+
+ def test_get_lp_login_unconfigured(self):
+ # Test that get_lp_login() returns None if no username has
+ # been configured.
+ my_config = config.MemoryStack('')
+ self.assertEqual(None, account.get_lp_login(my_config))
+
+ def test_get_lp_login(self):
+ # Test that get_lp_login() returns the configured username
+ my_config = config.MemoryStack(
+ '[DEFAULT]\nlaunchpad_username=test-user\n')
+ self.assertEqual('test-user', account.get_lp_login(my_config))
+
+ def test_set_lp_login(self):
+ # Test that set_lp_login() updates the config file.
+ my_config = config.MemoryStack('')
+ self.assertEqual(None, my_config.get('launchpad_username'))
+ account.set_lp_login('test-user', my_config)
+ self.assertEqual(
+ 'test-user', my_config.get('launchpad_username'))
+
+ def test_unknown_launchpad_username(self):
+ # Test formatting of UnknownLaunchpadUsername exception
+ error = account.UnknownLaunchpadUsername(user='test-user')
+ self.assertEqualDiff('The user name test-user is not registered '
+ 'on Launchpad.', str(error))
+
+ def test_no_registered_ssh_keys(self):
+ # Test formatting of NoRegisteredSSHKeys exception
+ error = account.NoRegisteredSSHKeys(user='test-user')
+ self.assertEqualDiff('The user test-user has not registered any '
+ 'SSH keys with Launchpad.\n'
+ 'See <https://launchpad.net/people/+me>',
+ str(error))
+
+ def test_set_lp_login_updates_authentication_conf(self):
+ self.assertIs(None, account._get_auth_user())
+ account.set_lp_login('foo')
+ self.assertEqual('foo', account._get_auth_user())
+
+ def test_get_lp_login_does_not_update_for_none_user(self):
+ account.get_lp_login()
+ self.assertIs(None, account._get_auth_user())
+
+ def test_get_lp_login_updates_authentication_conf(self):
+ account._set_global_option('foo')
+ self.assertIs(None, account._get_auth_user())
+ account.get_lp_login()
+ auth = config.AuthenticationConfig()
+ self.assertEqual('foo', account._get_auth_user(auth))
+ self.assertEqual('foo', auth.get_user('ssh', 'bazaar.launchpad.net'))
+ self.assertEqual('foo', auth.get_user('ssh',
+ 'bazaar.staging.launchpad.net'))
+
+ def test_get_lp_login_leaves_existing_credentials(self):
+ auth = config.AuthenticationConfig()
+ auth.set_credentials('Foo', 'bazaar.launchpad.net', 'foo', 'ssh')
+ auth.set_credentials('Bar', 'bazaar.staging.launchpad.net', 'foo',
+ 'ssh')
+ account._set_global_option('foo')
+ account.get_lp_login()
+ auth = config.AuthenticationConfig()
+ credentials = auth.get_credentials('ssh', 'bazaar.launchpad.net')
+ self.assertEqual('Foo', credentials['name'])
+
+ def test_get_lp_login_errors_on_mismatch(self):
+ account._set_auth_user('foo')
+ account._set_global_option('bar')
+ e = self.assertRaises(account.MismatchedUsernames,
+ account.get_lp_login)
+ self.assertEqual('bazaar.conf and authentication.conf disagree about'
+ ' launchpad account name. Please re-run launchpad-login.', str(e))
+
+
+class CheckAccountTests(TestCaseWithMemoryTransport):
+
+ def test_check_lp_login_valid_user(self):
+ transport = self.get_transport()
+ transport.mkdir('~test-user')
+ transport.put_bytes('~test-user/+sshkeys', 'some keys here')
+ account.check_lp_login('test-user', transport)
+
+ def test_check_lp_login_no_user(self):
+ transport = self.get_transport()
+ self.assertRaises(account.UnknownLaunchpadUsername,
+ account.check_lp_login, 'test-user', transport)
+
+ def test_check_lp_login_no_ssh_keys(self):
+ transport = self.get_transport()
+ transport.mkdir('~test-user')
+ transport.put_bytes('~test-user/+sshkeys', '')
+ self.assertRaises(account.NoRegisteredSSHKeys,
+ account.check_lp_login, 'test-user', transport)
diff --git a/bzrlib/plugins/launchpad/test_lp_api.py b/bzrlib/plugins/launchpad/test_lp_api.py
new file mode 100644
index 0000000..ba8e12f
--- /dev/null
+++ b/bzrlib/plugins/launchpad/test_lp_api.py
@@ -0,0 +1,100 @@
+# Copyright (C) 2009, 2010 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+
+from bzrlib import config, errors, osutils
+from bzrlib.tests import (
+ TestCase,
+ TestCaseWithTransport,
+ )
+from bzrlib.tests.features import (
+ ModuleAvailableFeature,
+ )
+
+
+launchpadlib_feature = ModuleAvailableFeature('launchpadlib')
+
+
+class TestDependencyManagement(TestCase):
+ """Tests for managing the dependency on launchpadlib."""
+
+ _test_needs_features = [launchpadlib_feature]
+
+ def setUp(self):
+ TestCase.setUp(self)
+ from bzrlib.plugins.launchpad import lp_api
+ self.lp_api = lp_api
+
+ def patch(self, obj, name, value):
+ """Temporarily set the 'name' attribute of 'obj' to 'value'."""
+ self.overrideAttr(obj, name, value)
+
+ def test_get_launchpadlib_version(self):
+ # parse_launchpadlib_version returns a tuple of a version number of
+ # the style used by launchpadlib.
+ version_info = self.lp_api.parse_launchpadlib_version('1.5.1')
+ self.assertEqual((1, 5, 1), version_info)
+
+ def test_supported_launchpadlib_version(self):
+ # If the installed version of launchpadlib is greater than the minimum
+ # required version of launchpadlib, check_launchpadlib_compatibility
+ # doesn't raise an error.
+ launchpadlib = launchpadlib_feature.module
+ self.patch(launchpadlib, '__version__', '1.5.1')
+ self.lp_api.MINIMUM_LAUNCHPADLIB_VERSION = (1, 5, 1)
+ # Doesn't raise an exception.
+ self.lp_api.check_launchpadlib_compatibility()
+
+ def test_unsupported_launchpadlib_version(self):
+ # If the installed version of launchpadlib is less than the minimum
+ # required version of launchpadlib, check_launchpadlib_compatibility
+ # raises an IncompatibleAPI error.
+ launchpadlib = launchpadlib_feature.module
+ self.patch(launchpadlib, '__version__', '1.5.0')
+ self.lp_api.MINIMUM_LAUNCHPADLIB_VERSION = (1, 5, 1)
+ self.assertRaises(
+ errors.IncompatibleAPI,
+ self.lp_api.check_launchpadlib_compatibility)
+
+
+class TestCacheDirectory(TestCase):
+ """Tests for get_cache_directory."""
+
+ _test_needs_features = [launchpadlib_feature]
+
+ def test_get_cache_directory(self):
+ # get_cache_directory returns the path to a directory inside the
+ # Bazaar configuration directory.
+ from bzrlib.plugins.launchpad import lp_api
+ expected_path = osutils.pathjoin(config.config_dir(), 'launchpad')
+ self.assertEqual(expected_path, lp_api.get_cache_directory())
+
+
+class TestLaunchpadMirror(TestCaseWithTransport):
+ """Tests for the 'bzr lp-mirror' command."""
+
+ # Testing the lp-mirror command is quite hard, since it must talk to a
+ # Launchpad server. Here, we just test that the command exists.
+
+ _test_needs_features = [launchpadlib_feature]
+
+ def test_command_exists(self):
+ out, err = self.run_bzr(['launchpad-mirror', '--help'], retcode=0)
+ self.assertEqual('', err)
+
+ def test_alias_exists(self):
+ out, err = self.run_bzr(['lp-mirror', '--help'], retcode=0)
+ self.assertEqual('', err)
diff --git a/bzrlib/plugins/launchpad/test_lp_api_lite.py b/bzrlib/plugins/launchpad/test_lp_api_lite.py
new file mode 100644
index 0000000..f4b5ea6
--- /dev/null
+++ b/bzrlib/plugins/launchpad/test_lp_api_lite.py
@@ -0,0 +1,549 @@
+# Copyright (C) 2011 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""Tools for dealing with the Launchpad API without using launchpadlib.
+"""
+
+import doctest
+import socket
+
+from bzrlib import tests
+from bzrlib.tests import features
+from bzrlib.plugins import launchpad
+from bzrlib.plugins.launchpad import lp_api_lite
+from testtools.matchers import DocTestMatches
+
+
+class _JSONParserFeature(features.Feature):
+
+ def _probe(self):
+ return lp_api_lite.json is not None
+
+ def feature_name(self):
+ return 'simplejson or json'
+
+
+JSONParserFeature = _JSONParserFeature()
+
+
+_example_response = r"""
+{
+ "total_size": 2,
+ "start": 0,
+ "next_collection_link": "https://api.launchpad.net/1.0/ubuntu/+archive/primary?distro_series=%2Fubuntu%2Flucid&exact_match=true&source_name=%22bzr%22&status=Published&ws.op=getPublishedSources&ws.start=1&ws.size=1",
+ "entries": [
+ {
+ "package_creator_link": "https://api.launchpad.net/1.0/~maxb",
+ "package_signer_link": "https://api.launchpad.net/1.0/~jelmer",
+ "source_package_name": "bzr",
+ "removal_comment": null,
+ "display_name": "bzr 2.1.4-0ubuntu1 in lucid",
+ "date_made_pending": null,
+ "source_package_version": "2.1.4-0ubuntu1",
+ "date_superseded": null,
+ "http_etag": "\"9ba966152dec474dc0fe1629d0bbce2452efaf3b-5f4c3fbb3eaf26d502db4089777a9b6a0537ffab\"",
+ "self_link": "https://api.launchpad.net/1.0/ubuntu/+archive/primary/+sourcepub/1750327",
+ "distro_series_link": "https://api.launchpad.net/1.0/ubuntu/lucid",
+ "component_name": "main",
+ "status": "Published",
+ "date_removed": null,
+ "pocket": "Updates",
+ "date_published": "2011-05-30T06:09:58.653984+00:00",
+ "removed_by_link": null,
+ "section_name": "devel",
+ "resource_type_link": "https://api.launchpad.net/1.0/#source_package_publishing_history",
+ "archive_link": "https://api.launchpad.net/1.0/ubuntu/+archive/primary",
+ "package_maintainer_link": "https://api.launchpad.net/1.0/~ubuntu-devel-discuss-lists",
+ "date_created": "2011-05-30T05:19:12.233621+00:00",
+ "scheduled_deletion_date": null
+ }
+ ]
+}"""
+
+_no_versions_response = '{"total_size": 0, "start": 0, "entries": []}'
+
+
+class TestLatestPublication(tests.TestCase):
+
+ def make_latest_publication(self, archive='ubuntu', series='natty',
+ project='bzr'):
+ return lp_api_lite.LatestPublication(archive, series, project)
+
+ def assertPlace(self, place, archive, series, project):
+ lp = lp_api_lite.LatestPublication(archive, series, project)
+ self.assertEqual(place, lp.place())
+
+ def test_init(self):
+ latest_pub = self.make_latest_publication()
+ self.assertEqual('ubuntu', latest_pub._archive)
+ self.assertEqual('natty', latest_pub._series)
+ self.assertEqual('bzr', latest_pub._project)
+ self.assertEqual('Release', latest_pub._pocket)
+
+ def test__archive_URL(self):
+ latest_pub = self.make_latest_publication()
+ self.assertEqual(
+ 'https://api.launchpad.net/1.0/ubuntu/+archive/primary',
+ latest_pub._archive_URL())
+
+ def test__publication_status_for_ubuntu(self):
+ latest_pub = self.make_latest_publication()
+ self.assertEqual('Published', latest_pub._publication_status())
+
+ def test__publication_status_for_debian(self):
+ latest_pub = self.make_latest_publication(archive='debian')
+ self.assertEqual('Pending', latest_pub._publication_status())
+
+ def test_pocket(self):
+ latest_pub = self.make_latest_publication(series='natty-proposed')
+ self.assertEqual('natty', latest_pub._series)
+ self.assertEqual('Proposed', latest_pub._pocket)
+
+ def test_series_None(self):
+ latest_pub = self.make_latest_publication(series=None)
+ self.assertEqual('ubuntu', latest_pub._archive)
+ self.assertEqual(None, latest_pub._series)
+ self.assertEqual('bzr', latest_pub._project)
+ self.assertEqual('Release', latest_pub._pocket)
+
+ def test__query_params(self):
+ latest_pub = self.make_latest_publication()
+ self.assertEqual({'ws.op': 'getPublishedSources',
+ 'exact_match': 'true',
+ 'source_name': '"bzr"',
+ 'status': 'Published',
+ 'ws.size': '1',
+ 'distro_series': '/ubuntu/natty',
+ 'pocket': 'Release',
+ }, latest_pub._query_params())
+
+ def test__query_params_no_series(self):
+ latest_pub = self.make_latest_publication(series=None)
+ self.assertEqual({'ws.op': 'getPublishedSources',
+ 'exact_match': 'true',
+ 'source_name': '"bzr"',
+ 'status': 'Published',
+ 'ws.size': '1',
+ 'pocket': 'Release',
+ }, latest_pub._query_params())
+
+ def test__query_params_pocket(self):
+ latest_pub = self.make_latest_publication(series='natty-proposed')
+ self.assertEqual({'ws.op': 'getPublishedSources',
+ 'exact_match': 'true',
+ 'source_name': '"bzr"',
+ 'status': 'Published',
+ 'ws.size': '1',
+ 'distro_series': '/ubuntu/natty',
+ 'pocket': 'Proposed',
+ }, latest_pub._query_params())
+
+ def test__query_URL(self):
+ latest_pub = self.make_latest_publication()
+ # we explicitly sort params, so we can be sure this URL matches exactly
+ self.assertEqual(
+ 'https://api.launchpad.net/1.0/ubuntu/+archive/primary'
+ '?distro_series=%2Fubuntu%2Fnatty&exact_match=true'
+ '&pocket=Release&source_name=%22bzr%22&status=Published'
+ '&ws.op=getPublishedSources&ws.size=1',
+ latest_pub._query_URL())
+
+ def DONT_test__gracefully_handle_failed_rpc_connection(self):
+ # TODO: This test kind of sucks. We intentionally create an arbitrary
+ # port and don't listen to it, because we want the request to fail.
+ # However, it seems to take 1s for it to timeout. Is there a way
+ # to make it fail faster?
+ latest_pub = self.make_latest_publication()
+ s = socket.socket()
+ s.bind(('127.0.0.1', 0))
+ addr, port = s.getsockname()
+ latest_pub.LP_API_ROOT = 'http://%s:%s/' % (addr, port)
+ s.close()
+ self.assertIs(None, latest_pub._get_lp_info())
+
+ def DONT_test__query_launchpad(self):
+ # TODO: This is a test that we are making a valid request against
+ # launchpad. This seems important, but it is slow, requires net
+ # access, and requires launchpad to be up and running. So for
+ # now, it is commented out for production tests.
+ latest_pub = self.make_latest_publication()
+ json_txt = latest_pub._get_lp_info()
+ self.assertIsNot(None, json_txt)
+ if lp_api_lite.json is None:
+ # We don't have a way to parse the text
+ return
+ # The content should be a valid json result
+ content = lp_api_lite.json.loads(json_txt)
+ entries = content['entries'] # It should have an 'entries' field.
+ # ws.size should mean we get 0 or 1, and there should be something
+ self.assertEqual(1, len(entries))
+ entry = entries[0]
+ self.assertEqual('bzr', entry['source_package_name'])
+ version = entry['source_package_version']
+ self.assertIsNot(None, version)
+
+ def test__get_lp_info_no_json(self):
+ # If we can't parse the json, we don't make the query.
+ self.overrideAttr(lp_api_lite, 'json', None)
+ latest_pub = self.make_latest_publication()
+ self.assertIs(None, latest_pub._get_lp_info())
+
+ def test__parse_json_info_no_module(self):
+ # If a json parsing module isn't available, we just return None here.
+ self.overrideAttr(lp_api_lite, 'json', None)
+ latest_pub = self.make_latest_publication()
+ self.assertIs(None, latest_pub._parse_json_info(_example_response))
+
+ def test__parse_json_example_response(self):
+ self.requireFeature(JSONParserFeature)
+ latest_pub = self.make_latest_publication()
+ content = latest_pub._parse_json_info(_example_response)
+ self.assertIsNot(None, content)
+ self.assertEqual(2, content['total_size'])
+ entries = content['entries']
+ self.assertEqual(1, len(entries))
+ entry = entries[0]
+ self.assertEqual('bzr', entry['source_package_name'])
+ self.assertEqual("2.1.4-0ubuntu1", entry["source_package_version"])
+
+ def test__parse_json_not_json(self):
+ self.requireFeature(JSONParserFeature)
+ latest_pub = self.make_latest_publication()
+ self.assertIs(None, latest_pub._parse_json_info('Not_valid_json'))
+
+ def test_get_latest_version_no_response(self):
+ latest_pub = self.make_latest_publication()
+ latest_pub._get_lp_info = lambda: None
+ self.assertEqual(None, latest_pub.get_latest_version())
+
+ def test_get_latest_version_no_json(self):
+ self.overrideAttr(lp_api_lite, 'json', None)
+ latest_pub = self.make_latest_publication()
+ self.assertEqual(None, latest_pub.get_latest_version())
+
+ def test_get_latest_version_invalid_json(self):
+ self.requireFeature(JSONParserFeature)
+ latest_pub = self.make_latest_publication()
+ latest_pub._get_lp_info = lambda: "not json"
+ self.assertEqual(None, latest_pub.get_latest_version())
+
+ def test_get_latest_version_no_versions(self):
+ self.requireFeature(JSONParserFeature)
+ latest_pub = self.make_latest_publication()
+ latest_pub._get_lp_info = lambda: _no_versions_response
+ self.assertEqual(None, latest_pub.get_latest_version())
+
+ def test_get_latest_version_missing_entries(self):
+ # Launchpad's no-entries response does have an empty entries value.
+ # However, lets test that we handle other failures without tracebacks
+ self.requireFeature(JSONParserFeature)
+ latest_pub = self.make_latest_publication()
+ latest_pub._get_lp_info = lambda: '{}'
+ self.assertEqual(None, latest_pub.get_latest_version())
+
+ def test_get_latest_version_invalid_entries(self):
+ # Make sure we sanely handle a json response we don't understand
+ self.requireFeature(JSONParserFeature)
+ latest_pub = self.make_latest_publication()
+ latest_pub._get_lp_info = lambda: '{"entries": {"a": 1}}'
+ self.assertEqual(None, latest_pub.get_latest_version())
+
+ def test_get_latest_version_example(self):
+ self.requireFeature(JSONParserFeature)
+ latest_pub = self.make_latest_publication()
+ latest_pub._get_lp_info = lambda: _example_response
+ self.assertEqual("2.1.4-0ubuntu1", latest_pub.get_latest_version())
+
+ def DONT_test_get_latest_version_from_launchpad(self):
+ self.requireFeature(JSONParserFeature)
+ latest_pub = self.make_latest_publication()
+ self.assertIsNot(None, latest_pub.get_latest_version())
+
+ def test_place(self):
+ self.assertPlace('Ubuntu', 'ubuntu', None, 'bzr')
+ self.assertPlace('Ubuntu Natty', 'ubuntu', 'natty', 'bzr')
+ self.assertPlace('Ubuntu Natty Proposed', 'ubuntu', 'natty-proposed',
+ 'bzr')
+ self.assertPlace('Debian', 'debian', None, 'bzr')
+ self.assertPlace('Debian Sid', 'debian', 'sid', 'bzr')
+
+
+class TestIsUpToDate(tests.TestCase):
+
+ def assertPackageBranchRe(self, url, user, archive, series, project):
+ m = launchpad._package_branch.search(url)
+ if m is None:
+ self.fail('package_branch regex did not match url: %s' % (url,))
+ self.assertEqual(
+ (user, archive, series, project),
+ m.group('user', 'archive', 'series', 'project'))
+
+ def assertNotPackageBranch(self, url):
+ self.assertIs(None, launchpad._get_package_branch_info(url))
+
+ def assertBranchInfo(self, url, archive, series, project):
+ self.assertEqual((archive, series, project),
+ launchpad._get_package_branch_info(url))
+
+ def test_package_branch_regex(self):
+ self.assertPackageBranchRe(
+ 'http://bazaar.launchpad.net/+branch/ubuntu/foo',
+ None, 'ubuntu', None, 'foo')
+ self.assertPackageBranchRe(
+ 'bzr+ssh://bazaar.launchpad.net/+branch/ubuntu/natty/foo',
+ None, 'ubuntu', 'natty/', 'foo')
+ self.assertPackageBranchRe(
+ 'sftp://bazaar.launchpad.net/+branch/debian/foo',
+ None, 'debian', None, 'foo')
+ self.assertPackageBranchRe(
+ 'http://bazaar.launchpad.net/+branch/debian/sid/foo',
+ None, 'debian', 'sid/', 'foo')
+ self.assertPackageBranchRe(
+ 'http://bazaar.launchpad.net/+branch'
+ '/~ubuntu-branches/ubuntu/natty/foo/natty',
+ '~ubuntu-branches/', 'ubuntu', 'natty/', 'foo')
+ self.assertPackageBranchRe(
+ 'http://bazaar.launchpad.net/+branch'
+ '/~user/ubuntu/natty/foo/test',
+ '~user/', 'ubuntu', 'natty/', 'foo')
+
+ def test_package_branch_doesnt_match(self):
+ self.assertNotPackageBranch('http://example.com/ubuntu/foo')
+ self.assertNotPackageBranch(
+ 'http://bazaar.launchpad.net/+branch/bzr')
+ self.assertNotPackageBranch(
+ 'http://bazaar.launchpad.net/+branch/~bzr-pqm/bzr/bzr.dev')
+ # Not a packaging branch because ~user isn't ~ubuntu-branches
+ self.assertNotPackageBranch(
+ 'http://bazaar.launchpad.net/+branch'
+ '/~user/ubuntu/natty/foo/natty')
+ # Older versions of bzr-svn/hg/git did not set Branch.base until after
+ # they called Branch.__init__().
+ self.assertNotPackageBranch(None)
+
+ def test__get_package_branch_info(self):
+ self.assertBranchInfo(
+ 'bzr+ssh://bazaar.launchpad.net/+branch/ubuntu/natty/foo',
+ 'ubuntu', 'natty', 'foo')
+ self.assertBranchInfo(
+ 'bzr+ssh://bazaar.launchpad.net/+branch'
+ '/~ubuntu-branches/ubuntu/natty/foo/natty',
+ 'ubuntu', 'natty', 'foo')
+ self.assertBranchInfo(
+ 'http://bazaar.launchpad.net/+branch'
+ '/~ubuntu-branches/debian/sid/foo/sid',
+ 'debian', 'sid', 'foo')
+
+
+class TestGetMostRecentTag(tests.TestCaseWithMemoryTransport):
+
+ def make_simple_builder(self):
+ builder = self.make_branch_builder('tip')
+ builder.build_snapshot('A', [], [
+ ('add', ('', 'root-id', 'directory', None))])
+ b = builder.get_branch()
+ b.tags.set_tag('tip-1.0', 'A')
+ return builder, b, b.tags.get_tag_dict()
+
+ def test_get_most_recent_tag_tip(self):
+ builder, b, tag_dict = self.make_simple_builder()
+ self.assertEqual('tip-1.0',
+ lp_api_lite.get_most_recent_tag(tag_dict, b))
+
+ def test_get_most_recent_tag_older(self):
+ builder, b, tag_dict = self.make_simple_builder()
+ builder.build_snapshot('B', ['A'], [])
+ self.assertEqual('B', b.last_revision())
+ self.assertEqual('tip-1.0',
+ lp_api_lite.get_most_recent_tag(tag_dict, b))
+
+
+class StubLatestPublication(object):
+
+ def __init__(self, latest):
+ self.called = False
+ self.latest = latest
+
+ def get_latest_version(self):
+ self.called = True
+ return self.latest
+
+ def place(self):
+ return 'Ubuntu Natty'
+
+
+class TestReportFreshness(tests.TestCaseWithMemoryTransport):
+
+ def setUp(self):
+ super(TestReportFreshness, self).setUp()
+ builder = self.make_branch_builder('tip')
+ builder.build_snapshot('A', [], [
+ ('add', ('', 'root-id', 'directory', None))])
+ self.branch = builder.get_branch()
+
+ def assertFreshnessReports(self, verbosity, latest_version, content):
+ """Assert that lp_api_lite.report_freshness reports the given content.
+
+ :param verbosity: The reporting level
+ :param latest_version: The version reported by StubLatestPublication
+ :param content: The expected content. This should be in DocTest form.
+ """
+ orig_log_len = len(self.get_log())
+ lp_api_lite.report_freshness(self.branch, verbosity,
+ StubLatestPublication(latest_version))
+ new_content = self.get_log()[orig_log_len:]
+ # Strip out lines that have LatestPublication.get_* because those are
+ # timing related lines. While interesting to log for now, they aren't
+ # something we want to be testing
+ new_content = new_content.split('\n')
+ for i in range(2):
+ if (len(new_content) > 0
+ and 'LatestPublication.get_' in new_content[0]):
+ new_content = new_content[1:]
+ new_content = '\n'.join(new_content)
+ self.assertThat(new_content,
+ DocTestMatches(content,
+ doctest.ELLIPSIS | doctest.REPORT_UDIFF))
+
+ def test_verbosity_off_skips_check(self):
+ # We force _get_package_branch_info so that we know it would otherwise
+ # try to connect to launcphad
+ self.overrideAttr(launchpad, '_get_package_branch_info',
+ lambda x: ('ubuntu', 'natty', 'bzr'))
+ self.overrideAttr(lp_api_lite, 'LatestPublication',
+ lambda *args: self.fail('Tried to query launchpad'))
+ c = self.branch.get_config_stack()
+ c.set('launchpad.packaging_verbosity', 'off')
+ orig_log_len = len(self.get_log())
+ launchpad._check_is_up_to_date(self.branch)
+ new_content = self.get_log()[orig_log_len:]
+ self.assertContainsRe(new_content,
+ 'not checking memory.*/tip/ because verbosity is turned off')
+
+ def test_verbosity_off(self):
+ latest_pub = StubLatestPublication('1.0-1ubuntu2')
+ lp_api_lite.report_freshness(self.branch, 'off', latest_pub)
+ self.assertFalse(latest_pub.called)
+
+ def test_verbosity_all_out_of_date_smoke(self):
+ self.branch.tags.set_tag('1.0-1ubuntu1', 'A')
+ self.assertFreshnessReports('all', '1.0-1ubuntu2',
+ ' INFO Most recent Ubuntu Natty version: 1.0-1ubuntu2\n'
+ 'Packaging branch version: 1.0-1ubuntu1\n'
+ 'Packaging branch status: OUT-OF-DATE\n')
+
+
+class Test_GetNewestVersions(tests.TestCaseWithMemoryTransport):
+
+ def setUp(self):
+ super(Test_GetNewestVersions, self).setUp()
+ builder = self.make_branch_builder('tip')
+ builder.build_snapshot('A', [], [
+ ('add', ('', 'root-id', 'directory', None))])
+ self.branch = builder.get_branch()
+
+ def assertLatestVersions(self, latest_branch_version, pub_version):
+ if latest_branch_version is not None:
+ self.branch.tags.set_tag(latest_branch_version, 'A')
+ latest_pub = StubLatestPublication(pub_version)
+ self.assertEqual((pub_version, latest_branch_version),
+ lp_api_lite._get_newest_versions(self.branch, latest_pub))
+
+ def test_no_tags(self):
+ self.assertLatestVersions(None, '1.0-1ubuntu2')
+
+ def test_out_of_date(self):
+ self.assertLatestVersions('1.0-1ubuntu1', '1.0-1ubuntu2')
+
+ def test_up_to_date(self):
+ self.assertLatestVersions('1.0-1ubuntu2', '1.0-1ubuntu2')
+
+ def test_missing(self):
+ self.assertLatestVersions(None, None)
+
+
+class Test_ReportFreshness(tests.TestCase):
+
+ def assertReportedFreshness(self, verbosity, latest_ver, branch_latest_ver,
+ content, place='Ubuntu Natty'):
+ """Assert that lp_api_lite.report_freshness reports the given content.
+ """
+ reported = []
+ def report_func(value):
+ reported.append(value)
+
+ lp_api_lite._report_freshness(latest_ver, branch_latest_ver, place,
+ verbosity, report_func)
+ new_content = '\n'.join(reported)
+ self.assertThat(new_content,
+ DocTestMatches(content,
+ doctest.ELLIPSIS | doctest.REPORT_UDIFF))
+
+ def test_verbosity_minimal_no_tags(self):
+ self.assertReportedFreshness('minimal', '1.0-1ubuntu2', None,
+ 'Branch is OUT-OF-DATE, Ubuntu Natty has 1.0-1ubuntu2\n')
+
+ def test_verbosity_minimal_out_of_date(self):
+ self.assertReportedFreshness('minimal', '1.0-1ubuntu2', '1.0-1ubuntu1',
+ '1.0-1ubuntu1 is OUT-OF-DATE,'
+ ' Ubuntu Natty has 1.0-1ubuntu2\n')
+
+ def test_verbosity_minimal_up_to_date(self):
+ self.assertReportedFreshness('minimal', '1.0-1ubuntu2', '1.0-1ubuntu2',
+ '')
+
+ def test_verbosity_minimal_missing(self):
+ self.assertReportedFreshness('minimal', None, None,
+ '')
+
+ def test_verbosity_short_out_of_date(self):
+ self.assertReportedFreshness('short', '1.0-1ubuntu2', '1.0-1ubuntu1',
+ '1.0-1ubuntu1 is OUT-OF-DATE,'
+ ' Ubuntu Natty has 1.0-1ubuntu2\n')
+
+ def test_verbosity_short_up_to_date(self):
+ self.assertReportedFreshness('short', '1.0-1ubuntu2', '1.0-1ubuntu2',
+ '1.0-1ubuntu2 is CURRENT in Ubuntu Natty')
+
+ def test_verbosity_short_missing(self):
+ self.assertReportedFreshness('short', None, None,
+ 'Ubuntu Natty is MISSING a version')
+
+ def test_verbosity_all_no_tags(self):
+ self.assertReportedFreshness('all', '1.0-1ubuntu2', None,
+ 'Most recent Ubuntu Natty version: 1.0-1ubuntu2\n'
+ 'Packaging branch version: None\n'
+ 'Packaging branch status: OUT-OF-DATE\n')
+
+ def test_verbosity_all_out_of_date(self):
+ self.assertReportedFreshness('all', '1.0-1ubuntu2', '1.0-1ubuntu1',
+ 'Most recent Ubuntu Natty version: 1.0-1ubuntu2\n'
+ 'Packaging branch version: 1.0-1ubuntu1\n'
+ 'Packaging branch status: OUT-OF-DATE\n')
+
+ def test_verbosity_all_up_to_date(self):
+ self.assertReportedFreshness('all', '1.0-1ubuntu2', '1.0-1ubuntu2',
+ 'Most recent Ubuntu Natty version: 1.0-1ubuntu2\n'
+ 'Packaging branch status: CURRENT\n')
+
+ def test_verbosity_all_missing(self):
+ self.assertReportedFreshness('all', None, None,
+ 'Most recent Ubuntu Natty version: MISSING\n')
+
+ def test_verbosity_None_is_all(self):
+ self.assertReportedFreshness(None, '1.0-1ubuntu2', '1.0-1ubuntu2',
+ 'Most recent Ubuntu Natty version: 1.0-1ubuntu2\n'
+ 'Packaging branch status: CURRENT\n')
diff --git a/bzrlib/plugins/launchpad/test_lp_directory.py b/bzrlib/plugins/launchpad/test_lp_directory.py
new file mode 100644
index 0000000..f678ff3
--- /dev/null
+++ b/bzrlib/plugins/launchpad/test_lp_directory.py
@@ -0,0 +1,639 @@
+# Copyright (C) 2007-2011 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""Tests for directory lookup through Launchpad.net"""
+
+import os
+import xmlrpclib
+
+import bzrlib
+from bzrlib import (
+ debug,
+ errors,
+ tests,
+ transport,
+ )
+from bzrlib.branch import Branch
+from bzrlib.directory_service import directories
+from bzrlib.tests import (
+ features,
+ ssl_certs,
+ TestCaseInTempDir,
+ TestCaseWithMemoryTransport
+)
+from bzrlib.plugins.launchpad import (
+ _register_directory,
+ lp_registration,
+ )
+from bzrlib.plugins.launchpad.lp_directory import (
+ LaunchpadDirectory)
+from bzrlib.plugins.launchpad.account import get_lp_login, set_lp_login
+from bzrlib.tests import http_server
+
+
+def load_tests(standard_tests, module, loader):
+ result = loader.suiteClass()
+ t_tests, remaining_tests = tests.split_suite_by_condition(
+ standard_tests, tests.condition_isinstance((
+ TestXMLRPCTransport,
+ )))
+ transport_scenarios = [
+ ('http', dict(server_class=PreCannedHTTPServer,)),
+ ]
+ if features.HTTPSServerFeature.available():
+ transport_scenarios.append(
+ ('https', dict(server_class=PreCannedHTTPSServer,)),
+ )
+ tests.multiply_tests(t_tests, transport_scenarios, result)
+
+ # No parametrization for the remaining tests
+ result.addTests(remaining_tests)
+
+ return result
+
+
+class FakeResolveFactory(object):
+
+ def __init__(self, test, expected_path, result):
+ self._test = test
+ self._expected_path = expected_path
+ self._result = result
+ self._submitted = False
+
+ def __call__(self, path):
+ self._test.assertEqual(self._expected_path, path)
+ return self
+
+ def submit(self, service):
+ self._service_url = service.service_url
+ self._submitted = True
+ return self._result
+
+
+class LocalDirectoryURLTests(TestCaseInTempDir):
+ """Tests for branch urls that we try to pass through local resolution."""
+
+ def assertResolve(self, expected, url, submitted=False):
+ path = url[url.index(':')+1:].lstrip('/')
+ factory = FakeResolveFactory(self, path,
+ dict(urls=['bzr+ssh://fake-resolved']))
+ directory = LaunchpadDirectory()
+ self.assertEqual(expected,
+ directory._resolve(url, factory, _lp_login='user'))
+ # We are testing local resolution, and the fallback when necessary.
+ self.assertEqual(submitted, factory._submitted)
+
+ def test_short_form(self):
+ self.assertResolve('bzr+ssh://bazaar.launchpad.net/+branch/apt',
+ 'lp:apt')
+
+ def test_two_part_form(self):
+ self.assertResolve('bzr+ssh://bazaar.launchpad.net/+branch/apt/2.2',
+ 'lp:apt/2.2')
+
+ def test_two_part_plus_subdir(self):
+ # We allow you to pass more than just what resolves. That way you can
+ # do things like "bzr log lp:apt/2.2/BUGS"
+ # Though the virtual FS implementation currently aborts when given a
+ # URL like this, rather than letting you recurse upwards to find the
+ # real branch at lp:apt/2.2
+ self.assertResolve('bzr+ssh://bazaar.launchpad.net/+branch/apt/2.2/BUGS',
+ 'lp:apt/2.2/BUGS')
+
+ def test_user_expansion(self):
+ self.assertResolve('bzr+ssh://bazaar.launchpad.net/~user/apt/foo',
+ 'lp:~/apt/foo')
+
+ def test_ubuntu(self):
+ # Confirmed against xmlrpc. If you don't have a ~user, xmlrpc doesn't
+ # care that you are asking for 'ubuntu'
+ self.assertResolve('bzr+ssh://bazaar.launchpad.net/+branch/ubuntu',
+ 'lp:ubuntu')
+
+ def test_ubuntu_invalid(self):
+ """Invalid ubuntu urls don't crash.
+
+ :seealso: http://pad.lv/843900
+ """
+ # This ought to be natty-updates.
+ self.assertRaises(errors.InvalidURL,
+ self.assertResolve,
+ '',
+ 'ubuntu:natty/updates/smartpm')
+
+ def test_ubuntu_apt(self):
+ self.assertResolve('bzr+ssh://bazaar.launchpad.net/+branch/ubuntu/apt',
+ 'lp:ubuntu/apt')
+
+ def test_ubuntu_natty_apt(self):
+ self.assertResolve(
+ 'bzr+ssh://bazaar.launchpad.net/+branch/ubuntu/natty/apt',
+ 'lp:ubuntu/natty/apt')
+
+ def test_ubuntu_natty_apt_filename(self):
+ self.assertResolve(
+ 'bzr+ssh://bazaar.launchpad.net/+branch/ubuntu/natty/apt/filename',
+ 'lp:ubuntu/natty/apt/filename')
+
+ def test_user_two_part(self):
+ # We fall back to the ResolveFactory. The real Launchpad one will raise
+ # InvalidURL for this case.
+ self.assertResolve('bzr+ssh://fake-resolved', 'lp:~jameinel/apt',
+ submitted=True)
+
+ def test_user_three_part(self):
+ self.assertResolve('bzr+ssh://bazaar.launchpad.net/~jameinel/apt/foo',
+ 'lp:~jameinel/apt/foo')
+
+ def test_user_three_part_plus_filename(self):
+ self.assertResolve(
+ 'bzr+ssh://bazaar.launchpad.net/~jameinel/apt/foo/fname',
+ 'lp:~jameinel/apt/foo/fname')
+
+ def test_user_ubuntu_two_part(self):
+ self.assertResolve('bzr+ssh://fake-resolved', 'lp:~jameinel/ubuntu',
+ submitted=True)
+ self.assertResolve('bzr+ssh://fake-resolved', 'lp:~jameinel/debian',
+ submitted=True)
+
+ def test_user_ubuntu_three_part(self):
+ self.assertResolve('bzr+ssh://fake-resolved',
+ 'lp:~jameinel/ubuntu/natty', submitted=True)
+ self.assertResolve('bzr+ssh://fake-resolved',
+ 'lp:~jameinel/debian/sid', submitted=True)
+
+ def test_user_ubuntu_four_part(self):
+ self.assertResolve('bzr+ssh://fake-resolved',
+ 'lp:~jameinel/ubuntu/natty/project', submitted=True)
+ self.assertResolve('bzr+ssh://fake-resolved',
+ 'lp:~jameinel/debian/sid/project', submitted=True)
+
+ def test_user_ubuntu_five_part(self):
+ self.assertResolve(
+ 'bzr+ssh://bazaar.launchpad.net/~jameinel/ubuntu/natty/apt/branch',
+ 'lp:~jameinel/ubuntu/natty/apt/branch')
+ self.assertResolve(
+ 'bzr+ssh://bazaar.launchpad.net/~jameinel/debian/sid/apt/branch',
+ 'lp:~jameinel/debian/sid/apt/branch')
+
+ def test_user_ubuntu_five_part_plus_subdir(self):
+ self.assertResolve(
+ 'bzr+ssh://bazaar.launchpad.net/~jameinel/ubuntu/natty/apt/branch/f',
+ 'lp:~jameinel/ubuntu/natty/apt/branch/f')
+ self.assertResolve(
+ 'bzr+ssh://bazaar.launchpad.net/~jameinel/debian/sid/apt/branch/f',
+ 'lp:~jameinel/debian/sid/apt/branch/f')
+
+ def test_handles_special_lp(self):
+ self.assertResolve('bzr+ssh://bazaar.launchpad.net/+branch/apt', 'lp:apt')
+ self.assertResolve('bzr+ssh://bazaar.launchpad.net/+branch/apt',
+ 'lp:///apt')
+ self.assertResolve('bzr+ssh://bazaar.launchpad.net/+branch/apt',
+ 'lp://production/apt')
+ self.assertResolve('bzr+ssh://bazaar.launchpad.dev/+branch/apt',
+ 'lp://dev/apt')
+ self.assertResolve('bzr+ssh://bazaar.staging.launchpad.net/+branch/apt',
+ 'lp://staging/apt')
+ self.assertResolve('bzr+ssh://bazaar.qastaging.launchpad.net/+branch/apt',
+ 'lp://qastaging/apt')
+ self.assertResolve('bzr+ssh://bazaar.demo.launchpad.net/+branch/apt',
+ 'lp://demo/apt')
+
+ def test_debug_launchpad_uses_resolver(self):
+ self.assertResolve('bzr+ssh://bazaar.launchpad.net/+branch/bzr',
+ 'lp:bzr', submitted=False)
+ debug.debug_flags.add('launchpad')
+ self.addCleanup(debug.debug_flags.discard, 'launchpad')
+ self.assertResolve('bzr+ssh://fake-resolved', 'lp:bzr', submitted=True)
+
+
+class DirectoryUrlTests(TestCaseInTempDir):
+ """Tests for branch urls through Launchpad.net directory"""
+
+ def test_short_form(self):
+ """A launchpad url should map to a http url"""
+ factory = FakeResolveFactory(
+ self, 'apt', dict(urls=[
+ 'http://bazaar.launchpad.net/~apt/apt/devel']))
+ directory = LaunchpadDirectory()
+ self.assertEquals('http://bazaar.launchpad.net/~apt/apt/devel',
+ directory._resolve('lp:apt', factory))
+ # Make sure that resolve went to the production server.
+ self.assertEquals('https://xmlrpc.launchpad.net/bazaar/',
+ factory._service_url)
+
+ def test_qastaging(self):
+ """A launchpad url should map to a http url"""
+ factory = FakeResolveFactory(
+ self, 'apt', dict(urls=[
+ 'http://bazaar.qastaging.launchpad.net/~apt/apt/devel']))
+ url = 'lp://qastaging/apt'
+ directory = LaunchpadDirectory()
+ self.assertEquals('http://bazaar.qastaging.launchpad.net/~apt/apt/devel',
+ directory._resolve(url, factory))
+ # Make sure that resolve went to the qastaging server.
+ self.assertEquals('https://xmlrpc.qastaging.launchpad.net/bazaar/',
+ factory._service_url)
+
+ def test_staging(self):
+ """A launchpad url should map to a http url"""
+ factory = FakeResolveFactory(
+ self, 'apt', dict(urls=[
+ 'http://bazaar.staging.launchpad.net/~apt/apt/devel']))
+ url = 'lp://staging/apt'
+ directory = LaunchpadDirectory()
+ self.assertEquals('http://bazaar.staging.launchpad.net/~apt/apt/devel',
+ directory._resolve(url, factory))
+ # Make sure that resolve went to the staging server.
+ self.assertEquals('https://xmlrpc.staging.launchpad.net/bazaar/',
+ factory._service_url)
+
+ def test_url_from_directory(self):
+ """A launchpad url should map to a http url"""
+ factory = FakeResolveFactory(
+ self, 'apt', dict(urls=[
+ 'http://bazaar.launchpad.net/~apt/apt/devel']))
+ directory = LaunchpadDirectory()
+ self.assertEquals('http://bazaar.launchpad.net/~apt/apt/devel',
+ directory._resolve('lp:///apt', factory))
+
+ def test_directory_skip_bad_schemes(self):
+ factory = FakeResolveFactory(
+ self, 'apt', dict(urls=[
+ 'bad-scheme://bazaar.launchpad.net/~apt/apt/devel',
+ 'http://bazaar.launchpad.net/~apt/apt/devel',
+ 'http://another/location']))
+ directory = LaunchpadDirectory()
+ self.assertEquals('http://bazaar.launchpad.net/~apt/apt/devel',
+ directory._resolve('lp:///apt', factory))
+
+ def test_directory_no_matching_schemes(self):
+ # If the XMLRPC call does not return any protocols we support,
+ # invalidURL is raised.
+ factory = FakeResolveFactory(
+ self, 'apt', dict(urls=[
+ 'bad-scheme://bazaar.launchpad.net/~apt/apt/devel']))
+ directory = LaunchpadDirectory()
+ self.assertRaises(errors.InvalidURL,
+ directory._resolve, 'lp:///apt', factory)
+
+ def test_directory_fault(self):
+ # Test that XMLRPC faults get converted to InvalidURL errors.
+ factory = FakeResolveFactory(self, 'apt', None)
+ def submit(service):
+ raise xmlrpclib.Fault(42, 'something went wrong')
+ factory.submit = submit
+ directory = LaunchpadDirectory()
+ self.assertRaises(errors.InvalidURL,
+ directory._resolve, 'lp:///apt', factory)
+
+ def test_skip_bzr_ssh_launchpad_net_when_anonymous(self):
+ # Test that bzr+ssh://bazaar.launchpad.net gets skipped if
+ # Bazaar does not know the user's Launchpad ID:
+ self.assertEqual(None, get_lp_login())
+ factory = FakeResolveFactory(
+ self, 'apt', dict(urls=[
+ 'bzr+ssh://bazaar.launchpad.net/~apt/apt/devel',
+ 'http://bazaar.launchpad.net/~apt/apt/devel']))
+ directory = LaunchpadDirectory()
+ self.assertEquals('http://bazaar.launchpad.net/~apt/apt/devel',
+ directory._resolve('lp:///apt', factory))
+
+ def test_skip_sftp_launchpad_net_when_anonymous(self):
+ # Test that sftp://bazaar.launchpad.net gets skipped if
+ # Bazaar does not know the user's Launchpad ID:
+ self.assertEqual(None, get_lp_login())
+ factory = FakeResolveFactory(
+ self, 'apt', dict(urls=[
+ 'sftp://bazaar.launchpad.net/~apt/apt/devel',
+ 'http://bazaar.launchpad.net/~apt/apt/devel']))
+ directory = LaunchpadDirectory()
+ self.assertEquals('http://bazaar.launchpad.net/~apt/apt/devel',
+ directory._resolve('lp:///apt', factory))
+
+ def test_with_login_avoid_resolve_factory(self):
+ # Test that bzr+ssh URLs get rewritten to include the user's
+ # Launchpad ID (assuming we know the Launchpad ID).
+ factory = FakeResolveFactory(
+ self, 'apt', dict(urls=[
+ 'bzr+ssh://my-super-custom/special/devel',
+ 'http://bazaar.launchpad.net/~apt/apt/devel']))
+ directory = LaunchpadDirectory()
+ self.assertEquals(
+ 'bzr+ssh://bazaar.launchpad.net/+branch/apt',
+ directory._resolve('lp:///apt', factory, _lp_login='username'))
+
+ def test_no_rewrite_of_other_bzr_ssh(self):
+ # Test that we don't rewrite bzr+ssh URLs for other
+ self.assertEqual(None, get_lp_login())
+ factory = FakeResolveFactory(
+ self, 'apt', dict(urls=[
+ 'bzr+ssh://example.com/~apt/apt/devel',
+ 'http://bazaar.launchpad.net/~apt/apt/devel']))
+ directory = LaunchpadDirectory()
+ self.assertEquals('bzr+ssh://example.com/~apt/apt/devel',
+ directory._resolve('lp:///apt', factory))
+
+ # TODO: check we get an error if the url is unreasonable
+ def test_error_for_bad_url(self):
+ directory = LaunchpadDirectory()
+ self.assertRaises(errors.InvalidURL,
+ directory._resolve, 'lp://ratotehunoahu')
+
+ def test_resolve_tilde_to_user(self):
+ factory = FakeResolveFactory(
+ self, '~username/apt/test', dict(urls=[
+ 'bzr+ssh://bazaar.launchpad.net/~username/apt/test']))
+ directory = LaunchpadDirectory()
+ self.assertEquals(
+ 'bzr+ssh://bazaar.launchpad.net/~username/apt/test',
+ directory._resolve('lp:~/apt/test', factory, _lp_login='username'))
+ # Should also happen when the login is just set by config
+ set_lp_login('username')
+ self.assertEquals(
+ 'bzr+ssh://bazaar.launchpad.net/~username/apt/test',
+ directory._resolve('lp:~/apt/test', factory))
+
+ def test_tilde_fails_no_login(self):
+ factory = FakeResolveFactory(
+ self, '~username/apt/test', dict(urls=[
+ 'bzr+ssh://bazaar.launchpad.net/~username/apt/test']))
+ self.assertIs(None, get_lp_login())
+ directory = LaunchpadDirectory()
+ self.assertRaises(errors.InvalidURL,
+ directory._resolve, 'lp:~/apt/test', factory)
+
+
+class DirectoryOpenBranchTests(TestCaseWithMemoryTransport):
+
+ def test_directory_open_branch(self):
+ # Test that opening an lp: branch redirects to the real location.
+ target_branch = self.make_branch('target')
+ class FooService(object):
+ """A directory service that maps the name to a FILE url"""
+
+ def look_up(self, name, url):
+ if 'lp:///apt' == url:
+ return target_branch.base.rstrip('/')
+ return '!unexpected look_up value!'
+
+ directories.remove('lp:')
+ directories.remove('ubuntu:')
+ directories.remove('debianlp:')
+ directories.register('lp:', FooService, 'Map lp URLs to local urls')
+ self.addCleanup(_register_directory)
+ self.addCleanup(directories.remove, 'lp:')
+ t = transport.get_transport('lp:///apt')
+ branch = Branch.open_from_transport(t)
+ self.assertEqual(target_branch.base, branch.base)
+
+
+class PredefinedRequestHandler(http_server.TestingHTTPRequestHandler):
+ """Request handler for a unique and pre-defined request.
+
+ The only thing we care about here is that we receive a connection. But
+ since we want to dialog with a real http client, we have to send it correct
+ responses.
+
+ We expect to receive a *single* request nothing more (and we won't even
+ check what request it is), the tests will recognize us from our response.
+ """
+
+ def handle_one_request(self):
+ tcs = self.server.test_case_server
+ requestline = self.rfile.readline()
+ self.MessageClass(self.rfile, 0)
+ if requestline.startswith('POST'):
+ # The body should be a single line (or we don't know where it ends
+ # and we don't want to issue a blocking read)
+ self.rfile.readline()
+
+ self.wfile.write(tcs.canned_response)
+
+
+class PreCannedServerMixin(object):
+
+ def __init__(self):
+ super(PreCannedServerMixin, self).__init__(
+ request_handler=PredefinedRequestHandler)
+ # Bytes read and written by the server
+ self.bytes_read = 0
+ self.bytes_written = 0
+ self.canned_response = None
+
+
+class PreCannedHTTPServer(PreCannedServerMixin, http_server.HttpServer):
+ pass
+
+
+if features.HTTPSServerFeature.available():
+ from bzrlib.tests import https_server
+ class PreCannedHTTPSServer(PreCannedServerMixin, https_server.HTTPSServer):
+ pass
+
+
+class TestXMLRPCTransport(tests.TestCase):
+
+ # set by load_tests
+ server_class = None
+
+ def setUp(self):
+ tests.TestCase.setUp(self)
+ self.server = self.server_class()
+ self.server.start_server()
+ self.addCleanup(self.server.stop_server)
+ # Ensure we don't clobber env
+ self.overrideEnv('BZR_LP_XMLRPC_URL', None)
+ # Ensure we use the right certificates for https.
+ # FIXME: There should be a better way but the only alternative I can
+ # think of involves carrying the ca_certs through the lp_registration
+ # infrastructure to _urllib2_wrappers... -- vila 2012-01-20
+ bzrlib.global_state.cmdline_overrides._from_cmdline(
+ ['ssl.ca_certs=%s' % ssl_certs.build_path('ca.crt')])
+
+ def set_canned_response(self, server, path):
+ response_format = '''HTTP/1.1 200 OK\r
+Date: Tue, 11 Jul 2006 04:32:56 GMT\r
+Server: Apache/2.0.54 (Fedora)\r
+Last-Modified: Sun, 23 Apr 2006 19:35:20 GMT\r
+ETag: "56691-23-38e9ae00"\r
+Accept-Ranges: bytes\r
+Content-Length: %(length)d\r
+Connection: close\r
+Content-Type: text/plain; charset=UTF-8\r
+\r
+<?xml version='1.0'?>
+<methodResponse>
+<params>
+<param>
+<value><struct>
+<member>
+<name>urls</name>
+<value><array><data>
+<value><string>bzr+ssh://bazaar.launchpad.net/%(path)s</string></value>
+<value><string>http://bazaar.launchpad.net/%(path)s</string></value>
+</data></array></value>
+</member>
+</struct></value>
+</param>
+</params>
+</methodResponse>
+'''
+ length = 334 + 2 * len(path)
+ server.canned_response = response_format % dict(length=length,
+ path=path)
+
+ def do_request(self, server_url):
+ os.environ['BZR_LP_XMLRPC_URL'] = self.server.get_url()
+ service = lp_registration.LaunchpadService()
+ resolve = lp_registration.ResolveLaunchpadPathRequest('bzr')
+ result = resolve.submit(service)
+ return result
+
+ def test_direct_request(self):
+ self.set_canned_response(self.server, '~bzr-pqm/bzr/bzr.dev')
+ result = self.do_request(self.server.get_url())
+ urls = result.get('urls', None)
+ self.assertIsNot(None, urls)
+ self.assertEquals(
+ ['bzr+ssh://bazaar.launchpad.net/~bzr-pqm/bzr/bzr.dev',
+ 'http://bazaar.launchpad.net/~bzr-pqm/bzr/bzr.dev'],
+ urls)
+ # FIXME: we need to test with a real proxy, I can't find a way so simulate
+ # CONNECT without leaving one server hanging the test :-/ Since that maybe
+ # related to the leaking tests problems, I'll punt for now -- vila 20091030
+
+
+class TestDebuntuExpansions(TestCaseInTempDir):
+ """Test expansions for ubuntu: and debianlp: schemes."""
+
+ def setUp(self):
+ super(TestDebuntuExpansions, self).setUp()
+ self.directory = LaunchpadDirectory()
+
+ def _make_factory(self, package='foo', distro='ubuntu', series=None):
+ if series is None:
+ path = '%s/%s' % (distro, package)
+ url_suffix = '~branch/%s/%s' % (distro, package)
+ else:
+ path = '%s/%s/%s' % (distro, series, package)
+ url_suffix = '~branch/%s/%s/%s' % (distro, series, package)
+ return FakeResolveFactory(
+ self, path, dict(urls=[
+ 'http://bazaar.launchpad.net/' + url_suffix]))
+
+ def assertURL(self, expected_url, shortcut, package='foo', distro='ubuntu',
+ series=None):
+ factory = self._make_factory(package=package, distro=distro,
+ series=series)
+ self.assertEqual('http://bazaar.launchpad.net/~branch/' + expected_url,
+ self.directory._resolve(shortcut, factory))
+
+ # Bogus distro.
+
+ def test_bogus_distro(self):
+ self.assertRaises(errors.InvalidURL,
+ self.directory._resolve, 'gentoo:foo')
+
+ def test_trick_bogus_distro_u(self):
+ self.assertRaises(errors.InvalidURL,
+ self.directory._resolve, 'utube:foo')
+
+ def test_trick_bogus_distro_d(self):
+ self.assertRaises(errors.InvalidURL,
+ self.directory._resolve, 'debuntu:foo')
+
+ def test_missing_ubuntu_distroseries_without_project(self):
+ # Launchpad does not hold source packages for Intrepid. Missing or
+ # bogus distroseries with no project name is treated like a project.
+ self.assertURL('ubuntu/intrepid', 'ubuntu:intrepid', package='intrepid')
+
+ def test_missing_ubuntu_distroseries_with_project(self):
+ # Launchpad does not hold source packages for Intrepid. Missing or
+ # bogus distroseries with a project name is treated like an unknown
+ # series (i.e. we keep it verbatim).
+ self.assertURL('ubuntu/intrepid/foo',
+ 'ubuntu:intrepid/foo', series='intrepid')
+
+ def test_missing_debian_distroseries(self):
+ # Launchpad does not hold source packages for unstable. Missing or
+ # bogus distroseries is treated like a project.
+ self.assertURL('debian/sid',
+ 'debianlp:sid', package='sid', distro='debian')
+
+ # Ubuntu Default distro series.
+
+ def test_ubuntu_default_distroseries_expansion(self):
+ self.assertURL('ubuntu/foo', 'ubuntu:foo')
+
+ def test_ubuntu_natty_distroseries_expansion(self):
+ self.assertURL('ubuntu/natty/foo', 'ubuntu:natty/foo', series='natty')
+
+ def test_ubuntu_n_distroseries_expansion(self):
+ self.assertURL('ubuntu/natty/foo', 'ubuntu:n/foo', series='natty')
+
+ def test_ubuntu_maverick_distroseries_expansion(self):
+ self.assertURL('ubuntu/maverick/foo', 'ubuntu:maverick/foo',
+ series='maverick')
+
+ def test_ubuntu_m_distroseries_expansion(self):
+ self.assertURL('ubuntu/maverick/foo', 'ubuntu:m/foo', series='maverick')
+
+ def test_ubuntu_lucid_distroseries_expansion(self):
+ self.assertURL('ubuntu/lucid/foo', 'ubuntu:lucid/foo', series='lucid')
+
+ def test_ubuntu_l_distroseries_expansion(self):
+ self.assertURL('ubuntu/lucid/foo', 'ubuntu:l/foo', series='lucid')
+
+ def test_ubuntu_karmic_distroseries_expansion(self):
+ self.assertURL('ubuntu/karmic/foo', 'ubuntu:karmic/foo',
+ series='karmic')
+
+ def test_ubuntu_k_distroseries_expansion(self):
+ self.assertURL('ubuntu/karmic/foo', 'ubuntu:k/foo', series='karmic')
+
+ def test_ubuntu_jaunty_distroseries_expansion(self):
+ self.assertURL('ubuntu/jaunty/foo', 'ubuntu:jaunty/foo',
+ series='jaunty')
+
+ def test_ubuntu_j_distroseries_expansion(self):
+ self.assertURL('ubuntu/jaunty/foo', 'ubuntu:j/foo', series='jaunty')
+
+ def test_ubuntu_hardy_distroseries_expansion(self):
+ self.assertURL('ubuntu/hardy/foo', 'ubuntu:hardy/foo', series='hardy')
+
+ def test_ubuntu_h_distroseries_expansion(self):
+ self.assertURL('ubuntu/hardy/foo', 'ubuntu:h/foo', series='hardy')
+
+ def test_ubuntu_dapper_distroseries_expansion(self):
+ self.assertURL('ubuntu/dapper/foo', 'ubuntu:dapper/foo',
+ series='dapper')
+
+ def test_ubuntu_d_distroseries_expansion(self):
+ self.assertURL('ubuntu/dapper/foo', 'ubuntu:d/foo', series='dapper')
+
+ # Debian default distro series.
+
+ def test_debian_default_distroseries_expansion(self):
+ self.assertURL('debian/foo', 'debianlp:foo', distro='debian')
+
+ def test_debian_squeeze_distroseries_expansion(self):
+ self.assertURL('debian/squeeze/foo', 'debianlp:squeeze/foo',
+ distro='debian', series='squeeze')
+
+ def test_debian_lenny_distroseries_expansion(self):
+ self.assertURL('debian/lenny/foo', 'debianlp:lenny/foo',
+ distro='debian', series='lenny')
diff --git a/bzrlib/plugins/launchpad/test_lp_login.py b/bzrlib/plugins/launchpad/test_lp_login.py
new file mode 100644
index 0000000..8439fef
--- /dev/null
+++ b/bzrlib/plugins/launchpad/test_lp_login.py
@@ -0,0 +1,58 @@
+# Copyright (C) 2009 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""Tests for the launchpad-login command."""
+
+from bzrlib.plugins.launchpad import account
+from bzrlib.tests import TestCaseWithTransport
+
+
+class TestLaunchpadLogin(TestCaseWithTransport):
+ """Tests for launchpad-login."""
+
+ def test_login_without_name_when_not_logged_in(self):
+ # lp-login without a 'name' parameter returns the user ID of the
+ # logged in user. If no one is logged in, we tell the user as much.
+ out, err = self.run_bzr(['launchpad-login', '--no-check'], retcode=1)
+ self.assertEqual('No Launchpad user ID configured.\n', out)
+ self.assertEqual('', err)
+
+ def test_login_with_name_sets_login(self):
+ # lp-login with a 'name' parameter sets the Launchpad login.
+ self.run_bzr(['launchpad-login', '--no-check', 'foo'])
+ self.assertEqual('foo', account.get_lp_login())
+
+ def test_login_without_name_when_logged_in(self):
+ # lp-login without a 'name' parameter returns the user ID of the
+ # logged in user.
+ account.set_lp_login('foo')
+ out, err = self.run_bzr(['launchpad-login', '--no-check'])
+ self.assertEqual('foo\n', out)
+ self.assertEqual('', err)
+
+ def test_login_with_name_no_output_by_default(self):
+ # lp-login with a 'name' parameter produces no output by default.
+ out, err = self.run_bzr(['launchpad-login', '--no-check', 'foo'])
+ self.assertEqual('', out)
+ self.assertEqual('', err)
+
+ def test_login_with_name_verbose(self):
+ # lp-login with a 'name' parameter and a verbose flag produces some
+ # information about what Bazaar just did.
+ out, err = self.run_bzr(
+ ['launchpad-login', '-v', '--no-check', 'foo'])
+ self.assertEqual("Launchpad user ID set to 'foo'.\n", out)
+ self.assertEqual('', err)
diff --git a/bzrlib/plugins/launchpad/test_lp_open.py b/bzrlib/plugins/launchpad/test_lp_open.py
new file mode 100644
index 0000000..5eb8345
--- /dev/null
+++ b/bzrlib/plugins/launchpad/test_lp_open.py
@@ -0,0 +1,103 @@
+# Copyright (C) 2009, 2010, 2012 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""Tests for the launchpad-open command."""
+
+from bzrlib.tests import TestCaseWithTransport
+
+
+class TestLaunchpadOpen(TestCaseWithTransport):
+
+ def run_open(self, location, retcode=0, working_dir='.'):
+ out, err = self.run_bzr(['launchpad-open', '--dry-run', location],
+ retcode=retcode,
+ working_dir=working_dir)
+ return err.splitlines()
+
+ def test_non_branch(self):
+ # If given a branch with no public or push locations, lp-open will try
+ # to guess the Launchpad page for the given URL / path. If it cannot
+ # find one, it will raise an error.
+ self.assertEqual(
+ ['bzr: ERROR: . is not registered on Launchpad.'],
+ self.run_open('.', retcode=3))
+
+ def test_no_public_location_no_push_location(self):
+ self.make_branch('not-public')
+ self.assertEqual(
+ ['bzr: ERROR: not-public is not registered on Launchpad.'],
+ self.run_open('not-public', retcode=3))
+
+ def test_non_launchpad_branch(self):
+ branch = self.make_branch('non-lp')
+ url = 'http://example.com/non-lp'
+ branch.set_public_branch(url)
+ self.assertEqual(
+ ['bzr: ERROR: %s is not registered on Launchpad.' % url],
+ self.run_open('non-lp', retcode=3))
+
+ def test_launchpad_branch_with_public_location(self):
+ branch = self.make_branch('lp')
+ branch.set_public_branch('bzr+ssh://bazaar.launchpad.net/~foo/bar/baz')
+ self.assertEqual(
+ ['Opening https://code.launchpad.net/~foo/bar/baz in web '
+ 'browser'],
+ self.run_open('lp'))
+
+ def test_launchpad_branch_with_public_and_push_location(self):
+ branch = self.make_branch('lp')
+ branch.lock_write()
+ try:
+ branch.set_public_branch(
+ 'bzr+ssh://bazaar.launchpad.net/~foo/bar/public')
+ branch.set_push_location(
+ 'bzr+ssh://bazaar.launchpad.net/~foo/bar/push')
+ finally:
+ branch.unlock()
+ self.assertEqual(
+ ['Opening https://code.launchpad.net/~foo/bar/public in web '
+ 'browser'],
+ self.run_open('lp'))
+
+ def test_launchpad_branch_with_no_public_but_with_push(self):
+ # lp-open falls back to the push location if it cannot find a public
+ # location.
+ branch = self.make_branch('lp')
+ branch.set_push_location('bzr+ssh://bazaar.launchpad.net/~foo/bar/baz')
+ self.assertEqual(
+ ['Opening https://code.launchpad.net/~foo/bar/baz in web '
+ 'browser'],
+ self.run_open('lp'))
+
+ def test_launchpad_branch_with_no_public_no_push(self):
+ # If lp-open is given a branch URL and that branch has no public
+ # location and no push location, then just try to look up the
+ # Launchpad page for that URL.
+ self.assertEqual(
+ ['Opening https://code.launchpad.net/~foo/bar/baz in web '
+ 'browser'],
+ self.run_open('bzr+ssh://bazaar.launchpad.net/~foo/bar/baz'))
+
+ def test_launchpad_branch_subdirectory(self):
+ # lp-open in a subdirectory of a registered branch should work
+ wt = self.make_branch_and_tree('lp')
+ wt.branch.set_push_location(
+ 'bzr+ssh://bazaar.launchpad.net/~foo/bar/baz')
+ self.build_tree(['lp/a/'])
+ self.assertEqual(
+ ['Opening https://code.launchpad.net/~foo/bar/baz in web '
+ 'browser'],
+ self.run_open('.', working_dir='lp/a'))
diff --git a/bzrlib/plugins/launchpad/test_lp_service.py b/bzrlib/plugins/launchpad/test_lp_service.py
new file mode 100644
index 0000000..6e28c90
--- /dev/null
+++ b/bzrlib/plugins/launchpad/test_lp_service.py
@@ -0,0 +1,181 @@
+# Copyright (C) 2008-2011 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""Tests for selection of the right Launchpad service by environment"""
+
+import os
+import xmlrpclib
+
+from bzrlib import errors
+from bzrlib.plugins.launchpad.lp_registration import (
+ InvalidLaunchpadInstance, LaunchpadService, NotLaunchpadBranch)
+from bzrlib.plugins.launchpad.test_lp_directory import FakeResolveFactory
+from bzrlib.tests import TestCase
+
+
+class LaunchpadServiceTests(TestCase):
+ """Test that the correct Launchpad instance is chosen."""
+
+ def setUp(self):
+ super(LaunchpadServiceTests, self).setUp()
+ # make sure we have a reproducible standard environment
+ self.overrideEnv('BZR_LP_XMLRPC_URL', None)
+
+ def test_default_service(self):
+ service = LaunchpadService()
+ self.assertEqual('https://xmlrpc.launchpad.net/bazaar/',
+ service.service_url)
+
+ def test_alter_default_service_url(self):
+ LaunchpadService.DEFAULT_SERVICE_URL = 'http://example.com/'
+ try:
+ service = LaunchpadService()
+ self.assertEqual('http://example.com/',
+ service.service_url)
+ finally:
+ LaunchpadService.DEFAULT_SERVICE_URL = \
+ LaunchpadService.LAUNCHPAD_INSTANCE['production']
+
+ def test_staging_service(self):
+ service = LaunchpadService(lp_instance='staging')
+ self.assertEqual('https://xmlrpc.staging.launchpad.net/bazaar/',
+ service.service_url)
+
+ def test_dev_service(self):
+ service = LaunchpadService(lp_instance='dev')
+ self.assertEqual('https://xmlrpc.launchpad.dev/bazaar/',
+ service.service_url)
+
+ def test_demo_service(self):
+ service = LaunchpadService(lp_instance='demo')
+ self.assertEqual('https://xmlrpc.demo.launchpad.net/bazaar/',
+ service.service_url)
+
+ def test_unknown_service(self):
+ error = self.assertRaises(InvalidLaunchpadInstance,
+ LaunchpadService,
+ lp_instance='fubar')
+ self.assertEqual('fubar is not a valid Launchpad instance.',
+ str(error))
+
+ def test_environment_overrides_default(self):
+ os.environ['BZR_LP_XMLRPC_URL'] = 'http://example.com/'
+ service = LaunchpadService()
+ self.assertEqual('http://example.com/',
+ service.service_url)
+
+ def test_environment_overrides_specified_service(self):
+ os.environ['BZR_LP_XMLRPC_URL'] = 'http://example.com/'
+ service = LaunchpadService(lp_instance='staging')
+ self.assertEqual('http://example.com/',
+ service.service_url)
+
+
+class TestURLInference(TestCase):
+ """Test the way we infer Launchpad web pages from branch URLs."""
+
+ def test_default_bzr_ssh_url(self):
+ service = LaunchpadService()
+ web_url = service.get_web_url_from_branch_url(
+ 'bzr+ssh://bazaar.launchpad.net/~foo/bar/baz')
+ self.assertEqual(
+ 'https://code.launchpad.net/~foo/bar/baz', web_url)
+
+ def test_product_bzr_ssh_url(self):
+ service = LaunchpadService(lp_instance='production')
+ web_url = service.get_web_url_from_branch_url(
+ 'bzr+ssh://bazaar.launchpad.net/~foo/bar/baz')
+ self.assertEqual(
+ 'https://code.launchpad.net/~foo/bar/baz', web_url)
+
+ def test_sftp_branch_url(self):
+ service = LaunchpadService(lp_instance='production')
+ web_url = service.get_web_url_from_branch_url(
+ 'sftp://bazaar.launchpad.net/~foo/bar/baz')
+ self.assertEqual(
+ 'https://code.launchpad.net/~foo/bar/baz', web_url)
+
+ def test_staging_branch_url(self):
+ service = LaunchpadService(lp_instance='production')
+ web_url = service.get_web_url_from_branch_url(
+ 'bzr+ssh://bazaar.staging.launchpad.net/~foo/bar/baz')
+ self.assertEqual(
+ 'https://code.launchpad.net/~foo/bar/baz', web_url)
+
+ def test_non_launchpad_url(self):
+ service = LaunchpadService()
+ error = self.assertRaises(
+ NotLaunchpadBranch, service.get_web_url_from_branch_url,
+ 'bzr+ssh://example.com/~foo/bar/baz')
+ self.assertEqual(
+ 'bzr+ssh://example.com/~foo/bar/baz is not registered on Launchpad.',
+ str(error))
+
+ def test_dodgy_launchpad_url(self):
+ service = LaunchpadService()
+ self.assertRaises(
+ NotLaunchpadBranch, service.get_web_url_from_branch_url,
+ 'bzr+ssh://launchpad.net/~foo/bar/baz')
+
+ def test_lp_branch_url(self):
+ service = LaunchpadService(lp_instance='production')
+ factory = FakeResolveFactory(
+ self, '~foo/bar/baz',
+ dict(urls=['http://bazaar.launchpad.net/~foo/bar/baz']))
+ web_url = service.get_web_url_from_branch_url(
+ 'lp:~foo/bar/baz', factory)
+ self.assertEqual(
+ 'https://code.launchpad.net/~foo/bar/baz', web_url)
+
+ def test_lp_branch_shortcut(self):
+ service = LaunchpadService()
+ factory = FakeResolveFactory(
+ self, 'foo',
+ dict(urls=['http://bazaar.launchpad.net/~foo/bar/baz']))
+ web_url = service.get_web_url_from_branch_url('lp:foo', factory)
+ self.assertEqual(
+ 'https://code.launchpad.net/~foo/bar/baz', web_url)
+
+ def test_lp_branch_fault(self):
+ service = LaunchpadService()
+ factory = FakeResolveFactory(self, 'foo', None)
+ def submit(service):
+ raise xmlrpclib.Fault(42, 'something went wrong')
+ factory.submit = submit
+ self.assertRaises(
+ errors.InvalidURL, service.get_web_url_from_branch_url, 'lp:foo',
+ factory)
+
+ def test_staging_url(self):
+ service = LaunchpadService(lp_instance='staging')
+ web_url = service.get_web_url_from_branch_url(
+ 'bzr+ssh://bazaar.launchpad.net/~foo/bar/baz')
+ self.assertEqual(
+ 'https://code.staging.launchpad.net/~foo/bar/baz', web_url)
+
+ def test_dev_url(self):
+ service = LaunchpadService(lp_instance='dev')
+ web_url = service.get_web_url_from_branch_url(
+ 'bzr+ssh://bazaar.launchpad.net/~foo/bar/baz')
+ self.assertEqual(
+ 'https://code.launchpad.dev/~foo/bar/baz', web_url)
+
+ def test_demo_url(self):
+ service = LaunchpadService(lp_instance='demo')
+ web_url = service.get_web_url_from_branch_url(
+ 'bzr+ssh://bazaar.launchpad.net/~foo/bar/baz')
+ self.assertEqual(
+ 'https://code.demo.launchpad.net/~foo/bar/baz', web_url)
diff --git a/bzrlib/plugins/launchpad/test_register.py b/bzrlib/plugins/launchpad/test_register.py
new file mode 100644
index 0000000..81aadc7
--- /dev/null
+++ b/bzrlib/plugins/launchpad/test_register.py
@@ -0,0 +1,366 @@
+# Copyright (C) 2006-2011 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+import base64
+from StringIO import StringIO
+import urlparse
+import xmlrpclib
+
+from bzrlib import (
+ config,
+ tests,
+ ui,
+ )
+from bzrlib.tests import TestCaseWithTransport
+
+# local import
+from bzrlib.plugins.launchpad.lp_registration import (
+ BaseRequest,
+ BranchBugLinkRequest,
+ BranchRegistrationRequest,
+ ResolveLaunchpadPathRequest,
+ LaunchpadService,
+ )
+
+
+# TODO: Test that the command-line client, making sure that it'll pass the
+# request through to a dummy transport, and that the transport will validate
+# the results passed in. Not sure how to get the transport object back out to
+# validate that its OK - may not be necessary.
+
+# TODO: Add test for (and implement) other command-line options to set
+# project, author_email, description.
+
+# TODO: project_id is not properly handled -- must be passed in rpc or path.
+
+class InstrumentedXMLRPCConnection(object):
+ """Stands in place of an http connection for the purposes of testing"""
+
+ def __init__(self, testcase):
+ self.testcase = testcase
+
+ def getreply(self):
+ """Fake the http reply.
+
+ :returns: (errcode, errmsg, headers)
+ """
+ return (200, 'OK', [])
+
+ def getresponse(self, buffering=True):
+ """Fake the http reply.
+
+ This is used when running on Python 2.7, where xmlrpclib uses
+ httplib.HTTPConnection in a different way than before.
+ """
+ class FakeHttpResponse(object):
+
+ def __init__(self, status, reason, body):
+ self.status = status
+ self.reason = reason
+ self.body = body
+
+ def read(self, size=-1):
+ return self.body.read(size)
+
+ def getheader(self, name, default):
+ # We don't have headers
+ return default
+
+ return FakeHttpResponse(200, 'OK', self.getfile())
+
+ def getfile(self):
+ """Return a fake file containing the response content."""
+ return StringIO('''\
+<?xml version="1.0" ?>
+<methodResponse>
+ <params>
+ <param>
+ <value>
+ <string>victoria dock</string>
+ </value>
+ </param>
+ </params>
+</methodResponse>''')
+
+
+
+class InstrumentedXMLRPCTransport(xmlrpclib.Transport):
+
+ # Python 2.5's xmlrpclib looks for this.
+ _use_datetime = False
+
+ def __init__(self, testcase, expect_auth):
+ self.testcase = testcase
+ self.expect_auth = expect_auth
+ self._connection = (None, None)
+
+ def make_connection(self, host):
+ host, http_headers, x509 = self.get_host_info(host)
+ test = self.testcase
+ self.connected_host = host
+ if self.expect_auth:
+ auth_hdrs = [v for k,v in http_headers if k == 'Authorization']
+ if len(auth_hdrs) != 1:
+ raise AssertionError("multiple auth headers: %r"
+ % (auth_hdrs,))
+ authinfo = auth_hdrs[0]
+ expected_auth = 'testuser@launchpad.net:testpassword'
+ test.assertEquals(authinfo,
+ 'Basic ' + base64.encodestring(expected_auth).strip())
+ elif http_headers:
+ raise AssertionError()
+ return InstrumentedXMLRPCConnection(test)
+
+ def send_request(self, connection, handler_path, request_body):
+ test = self.testcase
+ self.got_request = True
+
+ def send_host(self, conn, host):
+ pass
+
+ def send_user_agent(self, conn):
+ # TODO: send special user agent string, including bzrlib version
+ # number
+ pass
+
+ def send_content(self, conn, request_body):
+ unpacked, method = xmlrpclib.loads(request_body)
+ if None in unpacked:
+ raise AssertionError(
+ "xmlrpc result %r shouldn't contain None" % (unpacked,))
+ self.sent_params = unpacked
+
+
+class MockLaunchpadService(LaunchpadService):
+
+ def send_request(self, method_name, method_params, authenticated):
+ """Stash away the method details rather than sending them to a real server"""
+ self.called_method_name = method_name
+ self.called_method_params = method_params
+ self.called_authenticated = authenticated
+
+
+class TestBranchRegistration(TestCaseWithTransport):
+
+ def setUp(self):
+ super(TestBranchRegistration, self).setUp()
+ # make sure we have a reproducible standard environment
+ self.overrideEnv('BZR_LP_XMLRPC_URL', None)
+
+ def test_register_help(self):
+ """register-branch accepts --help"""
+ out, err = self.run_bzr(['register-branch', '--help'])
+ self.assertContainsRe(out, r'Register a branch')
+
+ def test_register_no_url_no_branch(self):
+ """register-branch command requires parameters"""
+ self.make_repository('.')
+ self.run_bzr_error(
+ ['register-branch requires a public branch url - '
+ 'see bzr help register-branch'],
+ 'register-branch')
+
+ def test_register_no_url_in_published_branch_no_error(self):
+ b = self.make_branch('.')
+ b.set_public_branch('http://test-server.com/bzr/branch')
+ out, err = self.run_bzr(['register-branch', '--dry-run'])
+ self.assertEqual('Branch registered.\n', out)
+ self.assertEqual('', err)
+
+ def test_register_no_url_in_unpublished_branch_errors(self):
+ b = self.make_branch('.')
+ out, err = self.run_bzr_error(['no public branch'],
+ ['register-branch', '--dry-run'])
+ self.assertEqual('', out)
+
+ def test_register_dry_run(self):
+ out, err = self.run_bzr(['register-branch',
+ 'http://test-server.com/bzr/branch',
+ '--dry-run'])
+ self.assertEquals(out, 'Branch registered.\n')
+
+ def test_onto_transport(self):
+ """How the request is sent by transmitting across a mock Transport"""
+ # use a real transport, but intercept at the http/xml layer
+ transport = InstrumentedXMLRPCTransport(self, expect_auth=True)
+ service = LaunchpadService(transport)
+ service.registrant_email = 'testuser@launchpad.net'
+ service.registrant_password = 'testpassword'
+ rego = BranchRegistrationRequest('http://test-server.com/bzr/branch',
+ 'branch-id',
+ 'my test branch',
+ 'description',
+ 'author@launchpad.net',
+ 'product')
+ rego.submit(service)
+ self.assertEquals(transport.connected_host, 'xmlrpc.launchpad.net')
+ self.assertEquals(len(transport.sent_params), 6)
+ self.assertEquals(transport.sent_params,
+ ('http://test-server.com/bzr/branch', # branch_url
+ 'branch-id', # branch_name
+ 'my test branch', # branch_title
+ 'description',
+ 'author@launchpad.net',
+ 'product'))
+ self.assertTrue(transport.got_request)
+
+ def test_onto_transport_unauthenticated(self):
+ """An unauthenticated request is transmitted across a mock Transport"""
+ transport = InstrumentedXMLRPCTransport(self, expect_auth=False)
+ service = LaunchpadService(transport)
+ resolve = ResolveLaunchpadPathRequest('bzr')
+ resolve.submit(service)
+ self.assertEquals(transport.connected_host, 'xmlrpc.launchpad.net')
+ self.assertEquals(len(transport.sent_params), 1)
+ self.assertEquals(transport.sent_params, ('bzr', ))
+ self.assertTrue(transport.got_request)
+
+ def test_subclass_request(self):
+ """Define a new type of xmlrpc request"""
+ class DummyRequest(BaseRequest):
+ _methodname = 'dummy_request'
+ def _request_params(self):
+ return (42,)
+
+ service = MockLaunchpadService()
+ service.registrant_email = 'test@launchpad.net'
+ service.registrant_password = ''
+ request = DummyRequest()
+ request.submit(service)
+ self.assertEquals(service.called_method_name, 'dummy_request')
+ self.assertEquals(service.called_method_params, (42,))
+
+ def test_mock_server_registration(self):
+ """Send registration to mock server"""
+ test_case = self
+ class MockRegistrationService(MockLaunchpadService):
+ def send_request(self, method_name, method_params, authenticated):
+ test_case.assertEquals(method_name, "register_branch")
+ test_case.assertEquals(list(method_params),
+ ['url', 'name', 'title', 'description', 'email', 'name'])
+ test_case.assertEquals(authenticated, True)
+ return 'result'
+ service = MockRegistrationService()
+ rego = BranchRegistrationRequest('url', 'name', 'title',
+ 'description', 'email', 'name')
+ result = rego.submit(service)
+ self.assertEquals(result, 'result')
+
+ def test_mock_server_registration_with_defaults(self):
+ """Send registration to mock server"""
+ test_case = self
+ class MockRegistrationService(MockLaunchpadService):
+ def send_request(self, method_name, method_params, authenticated):
+ test_case.assertEquals(method_name, "register_branch")
+ test_case.assertEquals(list(method_params),
+ ['http://server/branch', 'branch', '', '', '', ''])
+ test_case.assertEquals(authenticated, True)
+ return 'result'
+ service = MockRegistrationService()
+ rego = BranchRegistrationRequest('http://server/branch')
+ result = rego.submit(service)
+ self.assertEquals(result, 'result')
+
+ def test_mock_bug_branch_link(self):
+ """Send bug-branch link to mock server"""
+ test_case = self
+ class MockService(MockLaunchpadService):
+ def send_request(self, method_name, method_params, authenticated):
+ test_case.assertEquals(method_name, "link_branch_to_bug")
+ test_case.assertEquals(list(method_params),
+ ['http://server/branch', 1234, ''])
+ test_case.assertEquals(authenticated, True)
+ return 'http://launchpad.net/bug/1234'
+ service = MockService()
+ rego = BranchBugLinkRequest('http://server/branch', 1234)
+ result = rego.submit(service)
+ self.assertEquals(result, 'http://launchpad.net/bug/1234')
+
+ def test_mock_resolve_lp_url(self):
+ test_case = self
+ class MockService(MockLaunchpadService):
+ def send_request(self, method_name, method_params, authenticated):
+ test_case.assertEquals(method_name, "resolve_lp_path")
+ test_case.assertEquals(list(method_params), ['bzr'])
+ test_case.assertEquals(authenticated, False)
+ return dict(urls=[
+ 'bzr+ssh://bazaar.launchpad.net~bzr/bzr/trunk',
+ 'sftp://bazaar.launchpad.net~bzr/bzr/trunk',
+ 'bzr+http://bazaar.launchpad.net~bzr/bzr/trunk',
+ 'http://bazaar.launchpad.net~bzr/bzr/trunk'])
+ service = MockService()
+ resolve = ResolveLaunchpadPathRequest('bzr')
+ result = resolve.submit(service)
+ self.assertTrue('urls' in result)
+ self.assertEquals(result['urls'], [
+ 'bzr+ssh://bazaar.launchpad.net~bzr/bzr/trunk',
+ 'sftp://bazaar.launchpad.net~bzr/bzr/trunk',
+ 'bzr+http://bazaar.launchpad.net~bzr/bzr/trunk',
+ 'http://bazaar.launchpad.net~bzr/bzr/trunk'])
+
+
+class TestGatherUserCredentials(tests.TestCaseInTempDir):
+
+ def setUp(self):
+ super(TestGatherUserCredentials, self).setUp()
+ # make sure we have a reproducible standard environment
+ self.overrideEnv('BZR_LP_XMLRPC_URL', None)
+
+ def test_gather_user_credentials_has_password(self):
+ service = LaunchpadService()
+ service.registrant_password = 'mypassword'
+ # This should be a basic no-op, since we already have the password
+ service.gather_user_credentials()
+ self.assertEqual('mypassword', service.registrant_password)
+
+ def test_gather_user_credentials_from_auth_conf(self):
+ auth_path = config.authentication_config_filename()
+ service = LaunchpadService()
+ g_conf = config.GlobalStack()
+ g_conf.set('email', 'Test User <test@user.com>')
+ f = open(auth_path, 'wb')
+ try:
+ scheme, hostinfo = urlparse.urlsplit(service.service_url)[:2]
+ f.write('[section]\n'
+ 'scheme=%s\n'
+ 'host=%s\n'
+ 'user=test@user.com\n'
+ 'password=testpass\n'
+ % (scheme, hostinfo))
+ finally:
+ f.close()
+ self.assertIs(None, service.registrant_password)
+ service.gather_user_credentials()
+ self.assertEqual('test@user.com', service.registrant_email)
+ self.assertEqual('testpass', service.registrant_password)
+
+ def test_gather_user_credentials_prompts(self):
+ service = LaunchpadService()
+ self.assertIs(None, service.registrant_password)
+ g_conf = config.GlobalStack()
+ g_conf.set('email', 'Test User <test@user.com>')
+ stdout = tests.StringIOWrapper()
+ stderr = tests.StringIOWrapper()
+ ui.ui_factory = tests.TestUIFactory(stdin='userpass\n',
+ stdout=stdout, stderr=stderr)
+ self.assertIs(None, service.registrant_password)
+ service.gather_user_credentials()
+ self.assertEqual('test@user.com', service.registrant_email)
+ self.assertEqual('userpass', service.registrant_password)
+ self.assertEquals('', stdout.getvalue())
+ self.assertContainsRe(stderr.getvalue(),
+ 'launchpad.net password for test@user\\.com')
+
diff --git a/bzrlib/plugins/netrc_credential_store/__init__.py b/bzrlib/plugins/netrc_credential_store/__init__.py
new file mode 100644
index 0000000..ad8aba0
--- /dev/null
+++ b/bzrlib/plugins/netrc_credential_store/__init__.py
@@ -0,0 +1,75 @@
+# Copyright (C) 2008-2011 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+from __future__ import absolute_import
+
+__doc__ = """Use ~/.netrc as a credential store for authentication.conf."""
+
+# Since we are a built-in plugin we share the bzrlib version
+from bzrlib import version_info
+
+from bzrlib import (
+ config,
+ lazy_import,
+ )
+
+lazy_import.lazy_import(globals(), """
+import errno
+import netrc
+
+from bzrlib import (
+ errors,
+ )
+""")
+
+
+class NetrcCredentialStore(config.CredentialStore):
+
+ def __init__(self):
+ super(NetrcCredentialStore, self).__init__()
+ try:
+ self._netrc = netrc.netrc()
+ except IOError, e:
+ if e.args[0] == errno.ENOENT:
+ raise errors.NoSuchFile(e.filename)
+ else:
+ raise
+
+ def decode_password(self, credentials):
+ auth = self._netrc.authenticators(credentials['host'])
+ password = None
+ if auth is not None:
+ user, account, password = auth
+ cred_user = credentials.get('user', None)
+ if cred_user is None or user != cred_user:
+ # We don't use the netrc ability to provide a user since there
+ # is no way to give it back to AuthConfig. So if the user
+ # doesn't match, we don't return a password.
+ password = None
+ return password
+
+
+config.credential_store_registry.register_lazy(
+ 'netrc', __name__, 'NetrcCredentialStore', help=__doc__)
+
+
+def load_tests(basic_tests, module, loader):
+ testmod_names = [
+ 'tests',
+ ]
+ basic_tests.addTest(loader.loadTestsFromModuleNames(
+ ["%s.%s" % (__name__, tmn) for tmn in testmod_names]))
+ return basic_tests
diff --git a/bzrlib/plugins/netrc_credential_store/tests/__init__.py b/bzrlib/plugins/netrc_credential_store/tests/__init__.py
new file mode 100644
index 0000000..ab4bd39
--- /dev/null
+++ b/bzrlib/plugins/netrc_credential_store/tests/__init__.py
@@ -0,0 +1,23 @@
+# Copyright (C) 2008 by Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+def load_tests(basic_tests, module, loader):
+ testmod_names = [
+ 'test_netrc',
+ ]
+ basic_tests.addTest(loader.loadTestsFromModuleNames(
+ ["%s.%s" % (__name__, tmn) for tmn in testmod_names]))
+ return basic_tests
diff --git a/bzrlib/plugins/netrc_credential_store/tests/test_netrc.py b/bzrlib/plugins/netrc_credential_store/tests/test_netrc.py
new file mode 100644
index 0000000..872ee57
--- /dev/null
+++ b/bzrlib/plugins/netrc_credential_store/tests/test_netrc.py
@@ -0,0 +1,86 @@
+# Copyright (C) 2008 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+from cStringIO import StringIO
+
+from bzrlib import (
+ config,
+ errors,
+ osutils,
+ tests,
+ )
+
+from bzrlib.plugins import netrc_credential_store
+
+
+class TestNetrcCSNoNetrc(tests.TestCaseInTempDir):
+
+ def test_home_netrc_does_not_exist(self):
+ self.assertRaises(errors.NoSuchFile,
+ config.credential_store_registry.get_credential_store,
+ 'netrc')
+
+
+class TestNetrcCS(tests.TestCaseInTempDir):
+
+ def setUp(self):
+ super(TestNetrcCS, self).setUp()
+ # Create a .netrc file
+ netrc_content = """
+machine host login joe password secret
+default login anonymous password joe@home
+"""
+ f = open(osutils.pathjoin(self.test_home_dir, '.netrc'), 'wb')
+ try:
+ f.write(netrc_content)
+ finally:
+ f.close()
+
+ def _get_netrc_cs(self):
+ return config.credential_store_registry.get_credential_store('netrc')
+
+ def test_not_matching_user(self):
+ cs = self._get_netrc_cs()
+ password = cs.decode_password(dict(host='host', user='jim'))
+ self.assertIs(None, password)
+
+ def test_matching_user(self):
+ cs = self._get_netrc_cs()
+ password = cs.decode_password(dict(host='host', user='joe'))
+ self.assertEquals('secret', password)
+
+ def test_default_password(self):
+ cs = self._get_netrc_cs()
+ password = cs.decode_password(dict(host='other', user='anonymous'))
+ self.assertEquals('joe@home', password)
+
+ def test_default_password_without_user(self):
+ cs = self._get_netrc_cs()
+ password = cs.decode_password(dict(host='other'))
+ self.assertIs(None, password)
+
+ def test_get_netrc_credentials_via_auth_config(self):
+ # Create a test AuthenticationConfig object
+ ac_content = """
+[host1]
+host = host
+user = joe
+password_encoding = netrc
+"""
+ conf = config.AuthenticationConfig(_file=StringIO(ac_content))
+ credentials = conf.get_credentials('scheme', 'host', user='joe')
+ self.assertIsNot(None, credentials)
+ self.assertEquals('secret', credentials.get('password', None))
diff --git a/bzrlib/plugins/news_merge/README b/bzrlib/plugins/news_merge/README
new file mode 100644
index 0000000..020f047
--- /dev/null
+++ b/bzrlib/plugins/news_merge/README
@@ -0,0 +1,7 @@
+A plugin for merging bzr's NEWS file.
+
+This plugin is activated via configuration variables, see 'bzr help news_merge'.
+
+This hook can resolve conflicts where both sides add entries at the same place.
+If it encounters a more difficult conflict it gives up and bzr will fallback to
+the default merge algorithm.
diff --git a/bzrlib/plugins/news_merge/__init__.py b/bzrlib/plugins/news_merge/__init__.py
new file mode 100644
index 0000000..2e0c33a
--- /dev/null
+++ b/bzrlib/plugins/news_merge/__init__.py
@@ -0,0 +1,58 @@
+# Copyright (C) 2010 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+from __future__ import absolute_import
+
+__doc__ = """Merge hook for bzr's NEWS file.
+
+To enable this plugin, add a section to your branch.conf or location.conf
+like::
+
+ [/home/user/code/bzr]
+ news_merge_files = NEWS
+
+The news_merge_files config option takes a list of file paths, separated by
+commas.
+
+Limitations:
+
+* if there's a conflict in more than just bullet points, this doesn't yet know
+ how to resolve that, so bzr will fallback to the default line-based merge.
+"""
+
+# Since we are a built-in plugin we share the bzrlib version
+from bzrlib import version_info
+from bzrlib.hooks import install_lazy_named_hook
+
+
+def news_merge_hook(merger):
+ """Merger.merge_file_content hook for bzr-format NEWS files."""
+ from bzrlib.plugins.news_merge.news_merge import NewsMerger
+ return NewsMerger(merger)
+
+
+install_lazy_named_hook("bzrlib.merge", "Merger.hooks", "merge_file_content",
+ news_merge_hook, "NEWS file merge")
+
+
+def load_tests(basic_tests, module, loader):
+ testmod_names = [
+ 'tests',
+ ]
+ basic_tests.addTest(loader.loadTestsFromModuleNames(
+ ["%s.%s" % (__name__, tmn) for tmn in testmod_names]))
+ return basic_tests
+
diff --git a/bzrlib/plugins/news_merge/news_merge.py b/bzrlib/plugins/news_merge/news_merge.py
new file mode 100644
index 0000000..a8b0a22
--- /dev/null
+++ b/bzrlib/plugins/news_merge/news_merge.py
@@ -0,0 +1,78 @@
+# Copyright (C) 2010 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""Merge logic for news_merge plugin."""
+
+from __future__ import absolute_import
+
+
+from bzrlib.plugins.news_merge.parser import simple_parse_lines
+from bzrlib import merge, merge3
+
+
+class NewsMerger(merge.ConfigurableFileMerger):
+ """Merge bzr NEWS files."""
+
+ name_prefix = "news"
+
+ def merge_text(self, params):
+ """Perform a simple 3-way merge of a bzr NEWS file.
+
+ Each section of a bzr NEWS file is essentially an ordered set of bullet
+ points, so we can simply take a set of bullet points, determine which
+ bullets to add and which to remove, sort, and reserialize.
+ """
+ # Transform the different versions of the NEWS file into a bunch of
+ # text lines where each line matches one part of the overall
+ # structure, e.g. a heading or bullet.
+ this_lines = list(simple_parse_lines(params.this_lines))
+ other_lines = list(simple_parse_lines(params.other_lines))
+ base_lines = list(simple_parse_lines(params.base_lines))
+ m3 = merge3.Merge3(base_lines, this_lines, other_lines,
+ allow_objects=True)
+ result_chunks = []
+ for group in m3.merge_groups():
+ if group[0] == 'conflict':
+ _, base, a, b = group
+ # Are all the conflicting lines bullets? If so, we can merge
+ # this.
+ for line_set in [base, a, b]:
+ for line in line_set:
+ if line[0] != 'bullet':
+ # Something else :(
+ # Maybe the default merge can cope.
+ return 'not_applicable', None
+ # Calculate additions and deletions.
+ new_in_a = set(a).difference(base)
+ new_in_b = set(b).difference(base)
+ all_new = new_in_a.union(new_in_b)
+ deleted_in_a = set(base).difference(a)
+ deleted_in_b = set(base).difference(b)
+ # Combine into the final set of bullet points.
+ final = all_new.difference(deleted_in_a).difference(
+ deleted_in_b)
+ # Sort, and emit.
+ final = sorted(final, key=sort_key)
+ result_chunks.extend(final)
+ else:
+ result_chunks.extend(group[1])
+ # Transform the merged elements back into real blocks of lines.
+ result_lines = '\n\n'.join(chunk[1] for chunk in result_chunks)
+ return 'success', result_lines
+
+
+def sort_key(chunk):
+ return chunk[1].replace('`', '').lower()
diff --git a/bzrlib/plugins/news_merge/parser.py b/bzrlib/plugins/news_merge/parser.py
new file mode 100644
index 0000000..4b34543
--- /dev/null
+++ b/bzrlib/plugins/news_merge/parser.py
@@ -0,0 +1,71 @@
+# Copyright (C) 2010 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""Simple parser for bzr's NEWS file.
+
+Simple as this is, it's a bit over-powered for news_merge's needs, which only
+cares about 'bullet' and 'everything else'.
+
+This module can be run as a standalone Python program; pass it a filename and
+it will print the parsed form of a file (a series of 2-tuples, see
+simple_parse's docstring).
+"""
+
+from __future__ import absolute_import
+
+
+def simple_parse_lines(lines):
+ """Same as simple_parse, but takes an iterable of strs rather than a single
+ str.
+ """
+ return simple_parse(''.join(lines))
+
+
+def simple_parse(content):
+ """Returns blocks, where each block is a 2-tuple (kind, text).
+
+ :kind: one of 'heading', 'release', 'section', 'empty' or 'text'.
+ :text: a str, including newlines.
+ """
+ blocks = content.split('\n\n')
+ for block in blocks:
+ if block.startswith('###'):
+ # First line is ###...: Top heading
+ yield 'heading', block
+ continue
+ last_line = block.rsplit('\n', 1)[-1]
+ if last_line.startswith('###'):
+ # last line is ###...: 2nd-level heading
+ yield 'release', block
+ elif last_line.startswith('***'):
+ # last line is ***...: 3rd-level heading
+ yield 'section', block
+ elif block.startswith('* '):
+ # bullet
+ yield 'bullet', block
+ elif block.strip() == '':
+ # empty
+ yield 'empty', block
+ else:
+ # plain text
+ yield 'text', block
+
+
+if __name__ == '__main__':
+ import sys
+ content = open(sys.argv[1], 'rb').read()
+ for result in simple_parse(content):
+ print result
diff --git a/bzrlib/plugins/news_merge/tests/__init__.py b/bzrlib/plugins/news_merge/tests/__init__.py
new file mode 100644
index 0000000..e4171d1
--- /dev/null
+++ b/bzrlib/plugins/news_merge/tests/__init__.py
@@ -0,0 +1,23 @@
+# Copyright (C) 2010 by Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+def load_tests(basic_tests, module, loader):
+ testmod_names = [
+ 'test_news_merge',
+ ]
+ basic_tests.addTest(loader.loadTestsFromModuleNames(
+ ["%s.%s" % (__name__, tmn) for tmn in testmod_names]))
+ return basic_tests
diff --git a/bzrlib/plugins/news_merge/tests/test_news_merge.py b/bzrlib/plugins/news_merge/tests/test_news_merge.py
new file mode 100644
index 0000000..927ca1a
--- /dev/null
+++ b/bzrlib/plugins/news_merge/tests/test_news_merge.py
@@ -0,0 +1,27 @@
+# Copyright (C) 2010 by Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+# FIXME: This is totally incomplete but I'm only the patch pilot :-)
+# -- vila 100120
+# Note that the single test from this file is now in
+# test_merge.TestConfigurableFileMerger -- rbc 20100129.
+
+from bzrlib import (
+ option,
+ tests,
+ )
+from bzrlib.merge import Merger
+from bzrlib.plugins import news_merge
diff --git a/bzrlib/plugins/po_merge/README b/bzrlib/plugins/po_merge/README
new file mode 100644
index 0000000..f6e661c
--- /dev/null
+++ b/bzrlib/plugins/po_merge/README
@@ -0,0 +1,7 @@
+A plugin for merging .po files.
+
+This plugin is controlled via configuration variables, see 'bzr help po_merge'.
+
+This hook can avoid conflicts in ``.po` files by invoking msgmerge with the
+appropriate options. If it can't apply, it falls back to the default bzr
+merge algorithm.
diff --git a/bzrlib/plugins/po_merge/__init__.py b/bzrlib/plugins/po_merge/__init__.py
new file mode 100644
index 0000000..97c00db
--- /dev/null
+++ b/bzrlib/plugins/po_merge/__init__.py
@@ -0,0 +1,92 @@
+# Copyright (C) 2011 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+from __future__ import absolute_import
+
+__doc__ = """Merge hook for ``.po`` files.
+
+To enable this plugin, add a section to your branch.conf or location.conf
+like::
+
+ [/home/user/code/bzr]
+ po_merge.pot_dirs = po,doc/po4a/po
+
+The ``po_merge.pot_dirs`` config option takes a list of directories that can
+contain ``.po`` files, separated by commas (if several directories are
+needed). Each directory should contain a single ``.pot`` file.
+
+The ``po_merge.command`` is the command whose output is used as the result of
+the merge. It defaults to::
+
+ msgmerge -N "{other}" "{pot_file}" -C "{this}" -o "{result}"
+
+where:
+
+* ``this`` is the ``.po`` file content before the merge in the current branch,
+* ``other`` is the ``.po`` file content in the branch merged from,
+* ``pot_file`` is the path to the ``.pot`` file corresponding to the ``.po``
+ file being merged.
+
+If conflicts occur in a ``.pot`` file during a given merge, the ``.po`` files
+will use the ``.pot`` file present in tree before the merge. If this doesn't
+suit your needs, you should can disable the plugin during the merge with::
+
+ bzr merge <usual merge args> -Opo_merge.po_dirs=
+
+This will allow you to resolve the conflicts in the ``.pot`` file and then
+merge the ``.po`` files again with::
+
+ bzr remerge po/*.po doc/po4a/po/*.po
+
+"""
+
+from bzrlib import (
+ config,
+ # Since we are a built-in plugin we share the bzrlib version
+ version_info,
+ )
+from bzrlib.hooks import install_lazy_named_hook
+
+
+def register_lazy_option(key, member):
+ config.option_registry.register_lazy(
+ key, 'bzrlib.plugins.po_merge.po_merge', member)
+
+
+register_lazy_option('po_merge.command', 'command_option')
+register_lazy_option('po_merge.po_dirs', 'po_dirs_option')
+register_lazy_option('po_merge.po_glob', 'po_glob_option')
+register_lazy_option('po_merge.pot_glob', 'pot_glob_option')
+
+
+def po_merge_hook(merger):
+ """Merger.merge_file_content hook for po files."""
+ from bzrlib.plugins.po_merge.po_merge import PoMerger
+ return PoMerger(merger)
+
+
+install_lazy_named_hook("bzrlib.merge", "Merger.hooks", "merge_file_content",
+ po_merge_hook, ".po file merge")
+
+
+def load_tests(basic_tests, module, loader):
+ testmod_names = [
+ 'tests',
+ ]
+ basic_tests.addTest(loader.loadTestsFromModuleNames(
+ ["%s.%s" % (__name__, tmn) for tmn in testmod_names]))
+ return basic_tests
+
diff --git a/bzrlib/plugins/po_merge/po_merge.py b/bzrlib/plugins/po_merge/po_merge.py
new file mode 100644
index 0000000..49b9a95
--- /dev/null
+++ b/bzrlib/plugins/po_merge/po_merge.py
@@ -0,0 +1,174 @@
+# Copyright (C) 2011 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""Merge logic for po_merge plugin."""
+
+from __future__ import absolute_import
+
+from bzrlib import (
+ config,
+ merge,
+ )
+
+
+from bzrlib.lazy_import import lazy_import
+lazy_import(globals(), """
+import fnmatch
+import subprocess
+import tempfile
+import sys
+
+from bzrlib import (
+ cmdline,
+ osutils,
+ trace,
+ )
+""")
+
+
+command_option = config.Option(
+ 'po_merge.command',
+ default='msgmerge -N "{other}" "{pot_file}" -C "{this}" -o "{result}"',
+ help='''\
+Command used to create a conflict-free .po file during merge.
+
+The following parameters are provided by the hook:
+``this`` is the ``.po`` file content before the merge in the current branch,
+``other`` is the ``.po`` file content in the branch merged from,
+``pot_file`` is the path to the ``.pot`` file corresponding to the ``.po``
+file being merged.
+``result`` is the path where ``msgmerge`` will output its result. The hook will
+use the content of this file to produce the resulting ``.po`` file.
+
+All paths are absolute.
+''')
+
+
+po_dirs_option = config.ListOption(
+ 'po_merge.po_dirs', default='po,debian/po',
+ help='List of dirs containing .po files that the hook applies to.')
+
+
+po_glob_option = config.Option(
+ 'po_merge.po_glob', default='*.po',
+ help='Glob matching all ``.po`` files in one of ``po_merge.po_dirs``.')
+
+pot_glob_option = config.Option(
+ 'po_merge.pot_glob', default='*.pot',
+ help='Glob matching the ``.pot`` file in one of ``po_merge.po_dirs``.')
+
+
+class PoMerger(merge.PerFileMerger):
+ """Merge .po files."""
+
+ def __init__(self, merger):
+ super(merge.PerFileMerger, self).__init__(merger)
+ # config options are cached locally until config files are (see
+ # http://pad.lv/832042)
+
+ # FIXME: We use the branch config as there is no tree config
+ # -- vila 2011-11-23
+ self.conf = merger.this_branch.get_config_stack()
+ # Which dirs are targeted by the hook
+ self.po_dirs = self.conf.get('po_merge.po_dirs')
+ # Which files are targeted by the hook
+ self.po_glob = self.conf.get('po_merge.po_glob')
+ # Which .pot file should be used
+ self.pot_glob = self.conf.get('po_merge.pot_glob')
+ self.command = self.conf.get('po_merge.command', expand=False)
+ # file_matches() will set the following for merge_text()
+ self.pot_file_abspath = None
+ trace.mutter('PoMerger created')
+
+ def file_matches(self, params):
+ """Return True if merge_matching should be called on this file."""
+ if not self.po_dirs or not self.command:
+ # Return early if there is no options defined
+ return False
+ po_dir = None
+ po_path = self.get_filepath(params, self.merger.this_tree)
+ for po_dir in self.po_dirs:
+ glob = osutils.pathjoin(po_dir, self.po_glob)
+ if fnmatch.fnmatch(po_path, glob):
+ trace.mutter('po %s matches: %s' % (po_path, glob))
+ break
+ else:
+ trace.mutter('PoMerger did not match for %s and %s'
+ % (self.po_dirs, self.po_glob))
+ return False
+ # Do we have the corresponding .pot file
+ for inv_entry in self.merger.this_tree.list_files(from_dir=po_dir,
+ recursive=False):
+ trace.mutter('inv_entry: %r' % (inv_entry,))
+ pot_name, pot_file_id = inv_entry[0], inv_entry[3]
+ if fnmatch.fnmatch(pot_name, self.pot_glob):
+ relpath = osutils.pathjoin(po_dir, pot_name)
+ self.pot_file_abspath = self.merger.this_tree.abspath(relpath)
+ # FIXME: I can't find an easy way to know if the .pot file has
+ # conflicts *during* the merge itself. So either the actual
+ # content on disk is fine and msgmerge will work OR it's not
+ # and it will fail. Conversely, either the result is ok for the
+ # user and he's happy OR the user needs to resolve the
+ # conflicts in the .pot file and use remerge.
+ # -- vila 2011-11-24
+ trace.mutter('will msgmerge %s using %s'
+ % (po_path, self.pot_file_abspath))
+ return True
+ else:
+ return False
+
+ def _invoke(self, command):
+ trace.mutter('Will msgmerge: %s' % (command,))
+ # We use only absolute paths so we don't care about the cwd
+ proc = subprocess.Popen(cmdline.split(command),
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE,
+ stdin=subprocess.PIPE)
+ out, err = proc.communicate()
+ return proc.returncode, out, err
+
+ def merge_matching(self, params):
+ return self.merge_text(params)
+
+ def merge_text(self, params):
+ """Calls msgmerge when .po files conflict.
+
+ This requires a valid .pot file to reconcile both sides.
+ """
+ # Create tmp files with the 'this' and 'other' content
+ tmpdir = tempfile.mkdtemp(prefix='po_merge')
+ env = {}
+ env['this'] = osutils.pathjoin(tmpdir, 'this')
+ env['other'] = osutils.pathjoin(tmpdir, 'other')
+ env['result'] = osutils.pathjoin(tmpdir, 'result')
+ env['pot_file'] = self.pot_file_abspath
+ try:
+ with osutils.open_file(env['this'], 'wb') as f:
+ f.writelines(params.this_lines)
+ with osutils.open_file(env['other'], 'wb') as f:
+ f.writelines(params.other_lines)
+ command = self.conf.expand_options(self.command, env)
+ retcode, out, err = self._invoke(command)
+ with osutils.open_file(env['result']) as f:
+ # FIXME: To avoid the list() construct below which means the
+ # whole 'result' file is kept in memory, there may be a way to
+ # use an iterator that will close the file when it's done, but
+ # there is still the issue of removing the tmp dir...
+ # -- vila 2011-11-24
+ return 'success', list(f.readlines())
+ finally:
+ osutils.rmtree(tmpdir)
+ return 'not applicable', []
diff --git a/bzrlib/plugins/po_merge/tests/__init__.py b/bzrlib/plugins/po_merge/tests/__init__.py
new file mode 100644
index 0000000..56a3985
--- /dev/null
+++ b/bzrlib/plugins/po_merge/tests/__init__.py
@@ -0,0 +1,23 @@
+# Copyright (C) 2011 by Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+def load_tests(basic_tests, module, loader):
+ testmod_names = [
+ 'test_po_merge',
+ ]
+ basic_tests.addTest(loader.loadTestsFromModuleNames(
+ ["%s.%s" % (__name__, tmn) for tmn in testmod_names]))
+ return basic_tests
diff --git a/bzrlib/plugins/po_merge/tests/test_po_merge.py b/bzrlib/plugins/po_merge/tests/test_po_merge.py
new file mode 100644
index 0000000..47fbc7c
--- /dev/null
+++ b/bzrlib/plugins/po_merge/tests/test_po_merge.py
@@ -0,0 +1,451 @@
+# -*- coding: utf-8
+# Copyright (C) 2011 by Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+import os
+
+from bzrlib import (
+ merge,
+ tests,
+ )
+from bzrlib.tests import (
+ features,
+ script,
+ )
+
+from bzrlib.plugins import po_merge
+
+class BlackboxTestPoMerger(script.TestCaseWithTransportAndScript):
+
+ _test_needs_features = [features.msgmerge_feature]
+
+ def setUp(self):
+ super(BlackboxTestPoMerger, self).setUp()
+ self.builder = make_adduser_branch(self, 'adduser')
+ # We need to install our hook as the test framework cleared it as part
+ # of the initialization
+ merge.Merger.hooks.install_named_hook(
+ "merge_file_content", po_merge.po_merge_hook, ".po file merge")
+
+ def test_merge_with_hook_gives_unexpected_results(self):
+ # Since the conflicts in .pot are not seen *during* the merge, the .po
+ # merge triggers the hook and creates no conflicts for fr.po. But the
+ # .pot used is the one present in the tree *before* the merge.
+ self.run_script("""\
+$ bzr branch adduser -rrevid:this work
+2>Branched 2 revisions.
+$ cd work
+$ bzr merge ../adduser -rrevid:other
+2> M po/adduser.pot
+2> M po/fr.po
+2>Text conflict in po/adduser.pot
+2>1 conflicts encountered.
+""")
+
+ def test_called_on_remerge(self):
+ # Merge with no config for the hook to create the conflicts
+ self.run_script("""\
+$ bzr branch adduser -rrevid:this work
+2>Branched 2 revisions.
+$ cd work
+# set po_dirs to an empty list
+$ bzr merge ../adduser -rrevid:other -Opo_merge.po_dirs=
+2> M po/adduser.pot
+2> M po/fr.po
+2>Text conflict in po/adduser.pot
+2>Text conflict in po/fr.po
+2>2 conflicts encountered.
+""")
+ # Fix the conflicts in the .pot file
+ with open('po/adduser.pot', 'w') as f:
+ f.write(_Adduser['resolved_pot'])
+ # Tell bzr the conflict is resolved
+ self.run_script("""\
+$ bzr resolve po/adduser.pot
+2>1 conflict resolved, 1 remaining
+# Use remerge to trigger the hook, we use the default config options here
+$ bzr remerge po/*.po
+2>All changes applied successfully.
+# There should be no conflicts anymore
+$ bzr conflicts
+""")
+
+
+def make_adduser_branch(test, relpath):
+ """Helper for po_merge blackbox tests.
+
+ This creates a branch containing the needed base revisions so tests can
+ attempt merges and conflict resolutions.
+ """
+ builder = test.make_branch_builder(relpath)
+ builder.start_series()
+ builder.build_snapshot('base', None,
+ [('add', ('', 'root-id', 'directory', '')),
+ # Create empty files
+ ('add', ('po', 'dir-id', 'directory', None),),
+ ('add', ('po/adduser.pot', 'pot-id', 'file',
+ _Adduser['base_pot'])),
+ ('add', ('po/fr.po', 'po-id', 'file',
+ _Adduser['base_po'])),
+ ])
+ # The 'other' branch
+ builder.build_snapshot('other', ['base'],
+ [('modify', ('pot-id',
+ _Adduser['other_pot'])),
+ ('modify', ('po-id',
+ _Adduser['other_po'])),
+ ])
+ # The 'this' branch
+ builder.build_snapshot('this', ['base'],
+ [('modify', ('pot-id', _Adduser['this_pot'])),
+ ('modify', ('po-id', _Adduser['this_po'])),
+ ])
+ # builder.get_branch() tip is now 'this'
+ builder.finish_series()
+ return builder
+
+
+class TestAdduserBranch(script.TestCaseWithTransportAndScript):
+ """Sanity checks on the adduser branch content."""
+
+ def setUp(self):
+ super(TestAdduserBranch, self).setUp()
+ self.builder = make_adduser_branch(self, 'adduser')
+
+ def assertAdduserBranchContent(self, revid):
+ env = dict(revid=revid, branch_name=revid)
+ self.run_script("""\
+$ bzr branch adduser -rrevid:%(revid)s %(branch_name)s
+""" % env, null_output_matches_anything=True)
+ self.assertFileEqual(_Adduser['%(revid)s_pot' % env],
+ '%(branch_name)s/po/adduser.pot' % env)
+ self.assertFileEqual(_Adduser['%(revid)s_po' % env],
+ '%(branch_name)s/po/fr.po' % env )
+
+ def test_base(self):
+ self.assertAdduserBranchContent('base')
+
+ def test_this(self):
+ self.assertAdduserBranchContent('this')
+
+ def test_other(self):
+ self.assertAdduserBranchContent('other')
+
+
+# Real content from the adduser package so we don't have to guess about format
+# details. This is declared at the end of the file to avoid cluttering the
+# beginning of the file.
+
+_Adduser = dict(
+ base_pot = r"""# SOME DESCRIPTIVE TITLE.
+# Copyright (C) YEAR THE PACKAGE'S COPYRIGHT HOLDER
+# This file is distributed under the same license as the PACKAGE package.
+# FIRST AUTHOR <EMAIL@ADDRESS>, YEAR.
+#
+#, fuzzy
+msgid ""
+msgstr ""
+"Project-Id-Version: PACKAGE VERSION\n"
+"Report-Msgid-Bugs-To: adduser-devel@example.com\n"
+"POT-Creation-Date: 2007-01-17 21:50+0100\n"
+"PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n"
+"Last-Translator: FULL NAME <EMAIL@ADDRESS>\n"
+"Language-Team: LANGUAGE <LL@example.com>\n"
+"MIME-Version: 1.0\n"
+"Content-Type: text/plain; charset=CHARSET\n"
+"Content-Transfer-Encoding: 8bit\n"
+
+#. everyone can issue "--help" and "--version", but only root can go on
+#: ../adduser:135
+msgid "Only root may add a user or group to the system.\n"
+msgstr ""
+
+#: ../adduser:188
+msgid "Warning: The home dir you specified already exists.\n"
+msgstr ""
+
+""",
+ this_pot = r"""# SOME DESCRIPTIVE TITLE.
+# Copyright (C) YEAR THE PACKAGE'S COPYRIGHT HOLDER
+# This file is distributed under the same license as the PACKAGE package.
+# FIRST AUTHOR <EMAIL@ADDRESS>, YEAR.
+#
+#, fuzzy
+msgid ""
+msgstr ""
+"Project-Id-Version: PACKAGE VERSION\n"
+"Report-Msgid-Bugs-To: adduser-devel@example.com\n"
+"POT-Creation-Date: 2011-01-06 21:06+0000\n"
+"PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n"
+"Last-Translator: FULL NAME <EMAIL@ADDRESS>\n"
+"Language-Team: LANGUAGE <LL@example.com>\n"
+"Language: \n"
+"MIME-Version: 1.0\n"
+"Content-Type: text/plain; charset=CHARSET\n"
+"Content-Transfer-Encoding: 8bit\n"
+
+#. everyone can issue "--help" and "--version", but only root can go on
+#: ../adduser:152
+msgid "Only root may add a user or group to the system.\n"
+msgstr ""
+
+#: ../adduser:208
+#, perl-format
+msgid "Warning: The home dir %s you specified already exists.\n"
+msgstr ""
+
+#: ../adduser:210
+#, perl-format
+msgid "Warning: The home dir %s you specified can't be accessed: %s\n"
+msgstr ""
+
+""",
+ other_pot = r"""# SOME DESCRIPTIVE TITLE.
+# Copyright (C) YEAR THE PACKAGE'S COPYRIGHT HOLDER
+# This file is distributed under the same license as the PACKAGE package.
+# FIRST AUTHOR <EMAIL@ADDRESS>, YEAR.
+#
+#, fuzzy
+msgid ""
+msgstr ""
+"Project-Id-Version: PACKAGE VERSION\n"
+"Report-Msgid-Bugs-To: adduser-devel@example.com\n"
+"POT-Creation-Date: 2010-11-21 17:13-0400\n"
+"PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n"
+"Last-Translator: FULL NAME <EMAIL@ADDRESS>\n"
+"Language-Team: LANGUAGE <LL@example.com>\n"
+"Language: \n"
+"MIME-Version: 1.0\n"
+"Content-Type: text/plain; charset=CHARSET\n"
+"Content-Transfer-Encoding: 8bit\n"
+
+#. everyone can issue "--help" and "--version", but only root can go on
+#: ../adduser:150
+msgid "Only root may add a user or group to the system.\n"
+msgstr ""
+
+#: ../adduser:206
+#, perl-format
+msgid "Warning: The home dir %s you specified already exists.\n"
+msgstr ""
+
+#: ../adduser:208
+#, perl-format
+msgid "Warning: The home dir %s you specified can't be accessed: %s\n"
+msgstr ""
+
+""",
+ resolved_pot = r"""# SOME DESCRIPTIVE TITLE.
+# Copyright (C) YEAR THE PACKAGE'S COPYRIGHT HOLDER
+# This file is distributed under the same license as the PACKAGE package.
+# FIRST AUTHOR <EMAIL@ADDRESS>, YEAR.
+#
+#, fuzzy
+msgid ""
+msgstr ""
+"Project-Id-Version: PACKAGE VERSION\n"
+"Report-Msgid-Bugs-To: adduser-devel@example.com\n"
+"POT-Creation-Date: 2011-10-19 12:50-0700\n"
+"PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n"
+"Last-Translator: FULL NAME <EMAIL@ADDRESS>\n"
+"Language-Team: LANGUAGE <LL@example.com>\n"
+"Language: \n"
+"MIME-Version: 1.0\n"
+"Content-Type: text/plain; charset=CHARSET\n"
+"Content-Transfer-Encoding: 8bit\n"
+
+#. everyone can issue "--help" and "--version", but only root can go on
+#: ../adduser:152
+msgid "Only root may add a user or group to the system.\n"
+msgstr ""
+
+#: ../adduser:208
+#, perl-format
+msgid "Warning: The home dir %s you specified already exists.\n"
+msgstr ""
+
+#: ../adduser:210
+#, perl-format
+msgid "Warning: The home dir %s you specified can't be accessed: %s\n"
+msgstr ""
+
+""",
+ base_po = r"""# adduser's manpages translation to French
+# Copyright (C) 2004 Software in the Public Interest
+# This file is distributed under the same license as the adduser package
+#
+# Translators:
+# Jean-Baka Domelevo Entfellner <domelevo@example.com>, 2009.
+#
+msgid ""
+msgstr ""
+"Project-Id-Version: adduser 3.111\n"
+"Report-Msgid-Bugs-To: adduser-devel@example.com\n"
+"POT-Creation-Date: 2007-01-17 21:50+0100\n"
+"PO-Revision-Date: 2010-01-21 10:36+0100\n"
+"Last-Translator: Jean-Baka Domelevo Entfellner <domelevo@example.com>\n"
+"Language-Team: Debian French Team <debian-l10n-french@example.com>\n"
+"Language: \n"
+"MIME-Version: 1.0\n"
+"Content-Type: text/plain; charset=UTF-8\n"
+"Content-Transfer-Encoding: 8bit\n"
+"X-Poedit-Language: French\n"
+"X-Poedit-Country: FRANCE\n"
+
+# type: Plain text
+#. everyone can issue "--help" and "--version", but only root can go on
+#: ../adduser:135
+msgid "Only root may add a user or group to the system.\n"
+msgstr ""
+"Seul le superutilisateur est autorisé à ajouter un utilisateur ou un groupe "
+"au système.\n"
+
+#: ../adduser:188
+msgid "Warning: The home dir you specified already exists.\n"
+msgstr ""
+"Attention ! Le répertoire personnel que vous avez indiqué existe déjà.\n"
+
+""",
+ this_po = r"""# adduser's manpages translation to French
+# Copyright (C) 2004 Software in the Public Interest
+# This file is distributed under the same license as the adduser package
+#
+# Translators:
+# Jean-Baka Domelevo Entfellner <domelevo@example.com>, 2009.
+#
+msgid ""
+msgstr ""
+"Project-Id-Version: adduser 3.111\n"
+"Report-Msgid-Bugs-To: adduser-devel@example.com\n"
+"POT-Creation-Date: 2010-10-12 15:48+0200\n"
+"PO-Revision-Date: 2010-01-21 10:36+0100\n"
+"Last-Translator: Jean-Baka Domelevo Entfellner <domelevo@example.com>\n"
+"Language-Team: Debian French Team <debian-l10n-french@example.com>\n"
+"Language: \n"
+"MIME-Version: 1.0\n"
+"Content-Type: text/plain; charset=UTF-8\n"
+"Content-Transfer-Encoding: 8bit\n"
+"X-Poedit-Language: French\n"
+"X-Poedit-Country: FRANCE\n"
+
+# type: Plain text
+#. everyone can issue "--help" and "--version", but only root can go on
+#: ../adduser:152
+msgid "Only root may add a user or group to the system.\n"
+msgstr ""
+"Seul le superutilisateur est autorisé à ajouter un utilisateur ou un groupe "
+"au système.\n"
+
+#: ../adduser:208
+#, fuzzy, perl-format
+msgid "Warning: The home dir %s you specified already exists.\n"
+msgstr ""
+"Attention ! Le répertoire personnel que vous avez indiqué existe déjà.\n"
+
+#: ../adduser:210
+#, fuzzy, perl-format
+msgid "Warning: The home dir %s you specified can't be accessed: %s\n"
+msgstr ""
+"Attention ! Le répertoire personnel que vous avez indiqué existe déjà.\n"
+
+""",
+ other_po = r"""# adduser's manpages translation to French
+# Copyright (C) 2004 Software in the Public Interest
+# This file is distributed under the same license as the adduser package
+#
+# Translators:
+# Jean-Baka Domelevo Entfellner <domelevo@example.com>, 2009, 2010.
+#
+msgid ""
+msgstr ""
+"Project-Id-Version: adduser 3.112+nmu2\n"
+"Report-Msgid-Bugs-To: adduser-devel@example.com\n"
+"POT-Creation-Date: 2010-11-21 17:13-0400\n"
+"PO-Revision-Date: 2010-11-10 11:08+0100\n"
+"Last-Translator: Jean-Baka Domelevo-Entfellner <domelevo@example.com>\n"
+"Language-Team: Debian French Team <debian-l10n-french@example.com>\n"
+"Language: \n"
+"MIME-Version: 1.0\n"
+"Content-Type: text/plain; charset=UTF-8\n"
+"Content-Transfer-Encoding: 8bit\n"
+"X-Poedit-Country: FRANCE\n"
+
+# type: Plain text
+#. everyone can issue "--help" and "--version", but only root can go on
+#: ../adduser:150
+msgid "Only root may add a user or group to the system.\n"
+msgstr ""
+"Seul le superutilisateur est autorisé à ajouter un utilisateur ou un groupe "
+"au système.\n"
+
+#: ../adduser:206
+#, perl-format
+msgid "Warning: The home dir %s you specified already exists.\n"
+msgstr ""
+"Attention ! Le répertoire personnel que vous avez indiqué (%s) existe déjà.\n"
+
+#: ../adduser:208
+#, perl-format
+msgid "Warning: The home dir %s you specified can't be accessed: %s\n"
+msgstr ""
+"Attention ! Impossible d'accéder au répertoire personnel que vous avez "
+"indiqué (%s) : %s.\n"
+
+""",
+ resolved_po = r"""# adduser's manpages translation to French
+# Copyright (C) 2004 Software in the Public Interest
+# This file is distributed under the same license as the adduser package
+#
+# Translators:
+# Jean-Baka Domelevo Entfellner <domelevo@example.com>, 2009, 2010.
+#
+msgid ""
+msgstr ""
+"Project-Id-Version: adduser 3.112+nmu2\n"
+"Report-Msgid-Bugs-To: adduser-devel@example.com\n"
+"POT-Creation-Date: 2011-10-19 12:50-0700\n"
+"PO-Revision-Date: 2010-11-10 11:08+0100\n"
+"Last-Translator: Jean-Baka Domelevo-Entfellner <domelevo@example.com>\n"
+"Language-Team: Debian French Team <debian-l10n-french@example.com>\n"
+"Language: \n"
+"MIME-Version: 1.0\n"
+"Content-Type: text/plain; charset=UTF-8\n"
+"Content-Transfer-Encoding: 8bit\n"
+"X-Poedit-Country: FRANCE\n"
+
+# type: Plain text
+#. everyone can issue "--help" and "--version", but only root can go on
+#: ../adduser:152
+msgid "Only root may add a user or group to the system.\n"
+msgstr ""
+"Seul le superutilisateur est autorisé à ajouter un utilisateur ou un groupe "
+"au système.\n"
+
+#: ../adduser:208
+#, perl-format
+msgid "Warning: The home dir %s you specified already exists.\n"
+msgstr ""
+"Attention ! Le répertoire personnel que vous avez indiqué (%s) existe déjà.\n"
+
+#: ../adduser:210
+#, perl-format
+msgid "Warning: The home dir %s you specified can't be accessed: %s\n"
+msgstr ""
+"Attention ! Impossible d'accéder au répertoire personnel que vous avez "
+"indiqué (%s) : %s.\n"
+
+""",
+)
diff --git a/bzrlib/plugins/weave_fmt/__init__.py b/bzrlib/plugins/weave_fmt/__init__.py
new file mode 100644
index 0000000..e7d33d2
--- /dev/null
+++ b/bzrlib/plugins/weave_fmt/__init__.py
@@ -0,0 +1,128 @@
+# Copyright (C) 2010 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""Weave formats.
+
+These were formats present in pre-1.0 version of Bazaar.
+"""
+
+from __future__ import absolute_import
+
+# Since we are a built-in plugin we share the bzrlib version
+from bzrlib import version_info
+
+from bzrlib import (
+ branch as _mod_branch,
+ controldir,
+ repository as _mod_repository,
+ serializer,
+ workingtree as _mod_workingtree,
+ )
+from bzrlib.bzrdir import (
+ BzrProber,
+ register_metadir,
+ )
+
+# Pre-0.8 formats that don't have a disk format string (because they are
+# versioned by the matching control directory). We use the control directories
+# disk format string as a key for the network_name because they meet the
+# constraints (simple string, unique, immutable).
+_mod_repository.network_format_registry.register_lazy(
+ "Bazaar-NG branch, format 5\n",
+ 'bzrlib.plugins.weave_fmt.repository',
+ 'RepositoryFormat5',
+)
+_mod_repository.network_format_registry.register_lazy(
+ "Bazaar-NG branch, format 6\n",
+ 'bzrlib.plugins.weave_fmt.repository',
+ 'RepositoryFormat6',
+)
+
+# weave formats which has no format string and are not discoverable or independently
+# creatable on disk, so are not registered in format_registry. They're
+# all in bzrlib.plugins.weave_fmt.repository now. When an instance of one of these is
+# needed, it's constructed directly by the BzrDir. Non-native formats where
+# the repository is not separately opened are similar.
+
+_mod_repository.format_registry.register_lazy(
+ 'Bazaar-NG Repository format 7',
+ 'bzrlib.plugins.weave_fmt.repository',
+ 'RepositoryFormat7'
+ )
+
+_mod_repository.format_registry.register_extra_lazy(
+ 'bzrlib.plugins.weave_fmt.repository',
+ 'RepositoryFormat4')
+_mod_repository.format_registry.register_extra_lazy(
+ 'bzrlib.plugins.weave_fmt.repository',
+ 'RepositoryFormat5')
+_mod_repository.format_registry.register_extra_lazy(
+ 'bzrlib.plugins.weave_fmt.repository',
+ 'RepositoryFormat6')
+
+
+# The pre-0.8 formats have their repository format network name registered in
+# repository.py. MetaDir formats have their repository format network name
+# inferred from their disk format string.
+controldir.format_registry.register_lazy('weave',
+ "bzrlib.plugins.weave_fmt.bzrdir", "BzrDirFormat6",
+ 'Pre-0.8 format. Slower than knit and does not'
+ ' support checkouts or shared repositories.',
+ hidden=True,
+ deprecated=True)
+register_metadir(controldir.format_registry, 'metaweave',
+ 'bzrlib.plugins.weave_fmt.repository.RepositoryFormat7',
+ 'Transitional format in 0.8. Slower than knit.',
+ branch_format='bzrlib.branchfmt.fullhistory.BzrBranchFormat5',
+ tree_format='bzrlib.workingtree_3.WorkingTreeFormat3',
+ hidden=True,
+ deprecated=True)
+
+
+BzrProber.formats.register_lazy(
+ "Bazaar-NG branch, format 0.0.4\n", "bzrlib.plugins.weave_fmt.bzrdir",
+ "BzrDirFormat4")
+BzrProber.formats.register_lazy(
+ "Bazaar-NG branch, format 5\n", "bzrlib.plugins.weave_fmt.bzrdir",
+ "BzrDirFormat5")
+BzrProber.formats.register_lazy(
+ "Bazaar-NG branch, format 6\n", "bzrlib.plugins.weave_fmt.bzrdir",
+ "BzrDirFormat6")
+
+
+_mod_branch.format_registry.register_extra_lazy(
+ 'bzrlib.plugins.weave_fmt.branch', 'BzrBranchFormat4')
+_mod_branch.network_format_registry.register_lazy(
+ "Bazaar-NG branch, format 6\n",
+ 'bzrlib.plugins.weave_fmt.branch', "BzrBranchFormat4")
+
+
+_mod_workingtree.format_registry.register_extra_lazy(
+ 'bzrlib.plugins.weave_fmt.workingtree',
+ 'WorkingTreeFormat2')
+
+serializer.format_registry.register_lazy('4', 'bzrlib.plugins.weave_fmt.xml4',
+ 'serializer_v4')
+
+def load_tests(basic_tests, module, loader):
+ testmod_names = [
+ 'test_bzrdir',
+ 'test_repository',
+ 'test_workingtree',
+ ]
+ basic_tests.addTest(loader.loadTestsFromModuleNames(
+ ["%s.%s" % (__name__, tmn) for tmn in testmod_names]))
+ return basic_tests
diff --git a/bzrlib/plugins/weave_fmt/branch.py b/bzrlib/plugins/weave_fmt/branch.py
new file mode 100644
index 0000000..f9852c2
--- /dev/null
+++ b/bzrlib/plugins/weave_fmt/branch.py
@@ -0,0 +1,219 @@
+# Copyright (C) 2010 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""Weave-era branch implementations."""
+
+from __future__ import absolute_import
+
+from bzrlib import (
+ errors,
+ lockable_files,
+ )
+
+from bzrlib.decorators import (
+ needs_read_lock,
+ needs_write_lock,
+ only_raises,
+ )
+from bzrlib.lock import LogicalLockResult
+from bzrlib.trace import mutter
+
+from bzrlib.branch import (
+ BranchFormat,
+ BranchWriteLockResult,
+ )
+from bzrlib.branchfmt.fullhistory import (
+ FullHistoryBzrBranch,
+ )
+
+
+class BzrBranch4(FullHistoryBzrBranch):
+ """Branch format 4."""
+
+ def lock_write(self, token=None):
+ """Lock the branch for write operations.
+
+ :param token: A token to permit reacquiring a previously held and
+ preserved lock.
+ :return: A BranchWriteLockResult.
+ """
+ if not self.is_locked():
+ self._note_lock('w')
+ # All-in-one needs to always unlock/lock.
+ self.repository._warn_if_deprecated(self)
+ self.repository.lock_write()
+ try:
+ return BranchWriteLockResult(self.unlock,
+ self.control_files.lock_write(token=token))
+ except:
+ self.repository.unlock()
+ raise
+
+ def lock_read(self):
+ """Lock the branch for read operations.
+
+ :return: A bzrlib.lock.LogicalLockResult.
+ """
+ if not self.is_locked():
+ self._note_lock('r')
+ # All-in-one needs to always unlock/lock.
+ self.repository._warn_if_deprecated(self)
+ self.repository.lock_read()
+ try:
+ self.control_files.lock_read()
+ return LogicalLockResult(self.unlock)
+ except:
+ self.repository.unlock()
+ raise
+
+ @only_raises(errors.LockNotHeld, errors.LockBroken)
+ def unlock(self):
+ if self.control_files._lock_count == 2 and self.conf_store is not None:
+ self.conf_store.save_changes()
+ try:
+ self.control_files.unlock()
+ finally:
+ # All-in-one needs to always unlock/lock.
+ self.repository.unlock()
+ if not self.control_files.is_locked():
+ # we just released the lock
+ self._clear_cached_state()
+
+ def _get_checkout_format(self, lightweight=False):
+ """Return the most suitable metadir for a checkout of this branch.
+ """
+ from bzrlib.plugins.weave_fmt.repository import RepositoryFormat7
+ from bzrlib.bzrdir import BzrDirMetaFormat1
+ format = BzrDirMetaFormat1()
+ if lightweight:
+ format.set_branch_format(self._format)
+ format.repository_format = self.bzrdir._format.repository_format
+ else:
+ format.repository_format = RepositoryFormat7()
+ return format
+
+ def unbind(self):
+ raise errors.UpgradeRequired(self.user_url)
+
+ def bind(self, other):
+ raise errors.UpgradeRequired(self.user_url)
+
+ def set_bound_location(self, location):
+ raise NotImplementedError(self.set_bound_location)
+
+ def get_bound_location(self):
+ return None
+
+ def update(self):
+ return None
+
+ def get_master_branch(self, possible_transports=None):
+ return None
+
+
+class BzrBranchFormat4(BranchFormat):
+ """Bzr branch format 4.
+
+ This format has:
+ - a revision-history file.
+ - a branch-lock lock file [ to be shared with the bzrdir ]
+
+ It does not support binding.
+ """
+
+ def initialize(self, a_bzrdir, name=None, repository=None,
+ append_revisions_only=None):
+ """Create a branch of this format in a_bzrdir.
+
+ :param a_bzrdir: The bzrdir to initialize the branch in
+ :param name: Name of colocated branch to create, if any
+ :param repository: Repository for this branch (unused)
+ """
+ if append_revisions_only:
+ raise errors.UpgradeRequired(a_bzrdir.user_url)
+ if repository is not None:
+ raise NotImplementedError(
+ "initialize(repository=<not None>) on %r" % (self,))
+ if not [isinstance(a_bzrdir._format, format) for format in
+ self._compatible_bzrdirs]:
+ raise errors.IncompatibleFormat(self, a_bzrdir._format)
+ utf8_files = [('revision-history', ''),
+ ('branch-name', ''),
+ ]
+ mutter('creating branch %r in %s', self, a_bzrdir.user_url)
+ branch_transport = a_bzrdir.get_branch_transport(self, name=name)
+ control_files = lockable_files.LockableFiles(branch_transport,
+ 'branch-lock', lockable_files.TransportLock)
+ control_files.create_lock()
+ try:
+ control_files.lock_write()
+ except errors.LockContention:
+ lock_taken = False
+ else:
+ lock_taken = True
+ try:
+ for (filename, content) in utf8_files:
+ branch_transport.put_bytes(
+ filename, content,
+ mode=a_bzrdir._get_file_mode())
+ finally:
+ if lock_taken:
+ control_files.unlock()
+ branch = self.open(a_bzrdir, name, _found=True,
+ found_repository=None)
+ self._run_post_branch_init_hooks(a_bzrdir, name, branch)
+ return branch
+
+ def __init__(self):
+ super(BzrBranchFormat4, self).__init__()
+ from bzrlib.plugins.weave_fmt.bzrdir import (
+ BzrDirFormat4, BzrDirFormat5, BzrDirFormat6,
+ )
+ self._matchingbzrdir = BzrDirFormat6()
+ self._compatible_bzrdirs = [BzrDirFormat4, BzrDirFormat5,
+ BzrDirFormat6]
+
+ def network_name(self):
+ """The network name for this format is the control dirs disk label."""
+ return self._matchingbzrdir.get_format_string()
+
+ def get_format_description(self):
+ return "Branch format 4"
+
+ def open(self, a_bzrdir, name=None, _found=False, ignore_fallbacks=False,
+ found_repository=None, possible_transports=None):
+ """See BranchFormat.open()."""
+ if name is None:
+ name = a_bzrdir._get_selected_branch()
+ if name != "":
+ raise errors.NoColocatedBranchSupport(self)
+ if not _found:
+ # we are being called directly and must probe.
+ raise NotImplementedError
+ if found_repository is None:
+ found_repository = a_bzrdir.open_repository()
+ return BzrBranch4(_format=self,
+ _control_files=a_bzrdir._control_files,
+ a_bzrdir=a_bzrdir,
+ name=name,
+ _repository=found_repository,
+ possible_transports=possible_transports)
+
+ def __str__(self):
+ return "Bazaar-NG branch format 4"
+
+ def supports_leaving_lock(self):
+ return False
diff --git a/bzrlib/plugins/weave_fmt/bzrdir.py b/bzrlib/plugins/weave_fmt/bzrdir.py
new file mode 100644
index 0000000..5b593e8
--- /dev/null
+++ b/bzrlib/plugins/weave_fmt/bzrdir.py
@@ -0,0 +1,1006 @@
+# Copyright (C) 2006-2010 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""Weave-era BzrDir formats."""
+
+from __future__ import absolute_import
+
+from bzrlib.bzrdir import (
+ BzrDir,
+ BzrDirFormat,
+ BzrDirMetaFormat1,
+ )
+from bzrlib.controldir import (
+ ControlDir,
+ Converter,
+ format_registry,
+ )
+from bzrlib.lazy_import import lazy_import
+lazy_import(globals(), """
+import os
+import warnings
+
+from bzrlib import (
+ errors,
+ graph,
+ lockable_files,
+ lockdir,
+ osutils,
+ revision as _mod_revision,
+ trace,
+ ui,
+ urlutils,
+ versionedfile,
+ weave,
+ xml5,
+ )
+from bzrlib.i18n import gettext
+from bzrlib.store.versioned import VersionedFileStore
+from bzrlib.transactions import WriteTransaction
+from bzrlib.transport import (
+ get_transport,
+ local,
+ )
+from bzrlib.plugins.weave_fmt import xml4
+""")
+
+
+class BzrDirFormatAllInOne(BzrDirFormat):
+ """Common class for formats before meta-dirs."""
+
+ fixed_components = True
+
+ def initialize_on_transport_ex(self, transport, use_existing_dir=False,
+ create_prefix=False, force_new_repo=False, stacked_on=None,
+ stack_on_pwd=None, repo_format_name=None, make_working_trees=None,
+ shared_repo=False):
+ """See ControlDir.initialize_on_transport_ex."""
+ require_stacking = (stacked_on is not None)
+ # Format 5 cannot stack, but we've been asked to - actually init
+ # a Meta1Dir
+ if require_stacking:
+ format = BzrDirMetaFormat1()
+ return format.initialize_on_transport_ex(transport,
+ use_existing_dir=use_existing_dir, create_prefix=create_prefix,
+ force_new_repo=force_new_repo, stacked_on=stacked_on,
+ stack_on_pwd=stack_on_pwd, repo_format_name=repo_format_name,
+ make_working_trees=make_working_trees, shared_repo=shared_repo)
+ return BzrDirFormat.initialize_on_transport_ex(self, transport,
+ use_existing_dir=use_existing_dir, create_prefix=create_prefix,
+ force_new_repo=force_new_repo, stacked_on=stacked_on,
+ stack_on_pwd=stack_on_pwd, repo_format_name=repo_format_name,
+ make_working_trees=make_working_trees, shared_repo=shared_repo)
+
+ @classmethod
+ def from_string(cls, format_string):
+ if format_string != cls.get_format_string():
+ raise AssertionError("unexpected format string %r" % format_string)
+ return cls()
+
+
+class BzrDirFormat5(BzrDirFormatAllInOne):
+ """Bzr control format 5.
+
+ This format is a combined format for working tree, branch and repository.
+ It has:
+ - Format 2 working trees [always]
+ - Format 4 branches [always]
+ - Format 5 repositories [always]
+ Unhashed stores in the repository.
+ """
+
+ _lock_class = lockable_files.TransportLock
+
+ def __eq__(self, other):
+ return type(self) == type(other)
+
+ @classmethod
+ def get_format_string(cls):
+ """See BzrDirFormat.get_format_string()."""
+ return "Bazaar-NG branch, format 5\n"
+
+ def get_branch_format(self):
+ from bzrlib.plugins.weave_fmt.branch import BzrBranchFormat4
+ return BzrBranchFormat4()
+
+ def get_format_description(self):
+ """See ControlDirFormat.get_format_description()."""
+ return "All-in-one format 5"
+
+ def get_converter(self, format=None):
+ """See ControlDirFormat.get_converter()."""
+ # there is one and only one upgrade path here.
+ return ConvertBzrDir5To6()
+
+ def _initialize_for_clone(self, url):
+ return self.initialize_on_transport(get_transport(url), _cloning=True)
+
+ def initialize_on_transport(self, transport, _cloning=False):
+ """Format 5 dirs always have working tree, branch and repository.
+
+ Except when they are being cloned.
+ """
+ from bzrlib.plugins.weave_fmt.branch import BzrBranchFormat4
+ from bzrlib.plugins.weave_fmt.repository import RepositoryFormat5
+ result = (super(BzrDirFormat5, self).initialize_on_transport(transport))
+ RepositoryFormat5().initialize(result, _internal=True)
+ if not _cloning:
+ branch = BzrBranchFormat4().initialize(result)
+ result._init_workingtree()
+ return result
+
+ def network_name(self):
+ return self.get_format_string()
+
+ def _open(self, transport):
+ """See BzrDirFormat._open."""
+ return BzrDir5(transport, self)
+
+ def __return_repository_format(self):
+ """Circular import protection."""
+ from bzrlib.plugins.weave_fmt.repository import RepositoryFormat5
+ return RepositoryFormat5()
+ repository_format = property(__return_repository_format)
+
+
+class BzrDirFormat6(BzrDirFormatAllInOne):
+ """Bzr control format 6.
+
+ This format is a combined format for working tree, branch and repository.
+ It has:
+ - Format 2 working trees [always]
+ - Format 4 branches [always]
+ - Format 6 repositories [always]
+ """
+
+ _lock_class = lockable_files.TransportLock
+
+ def __eq__(self, other):
+ return type(self) == type(other)
+
+ @classmethod
+ def get_format_string(cls):
+ """See BzrDirFormat.get_format_string()."""
+ return "Bazaar-NG branch, format 6\n"
+
+ def get_format_description(self):
+ """See ControlDirFormat.get_format_description()."""
+ return "All-in-one format 6"
+
+ def get_branch_format(self):
+ from bzrlib.plugins.weave_fmt.branch import BzrBranchFormat4
+ return BzrBranchFormat4()
+
+ def get_converter(self, format=None):
+ """See ControlDirFormat.get_converter()."""
+ # there is one and only one upgrade path here.
+ return ConvertBzrDir6ToMeta()
+
+ def _initialize_for_clone(self, url):
+ return self.initialize_on_transport(get_transport(url), _cloning=True)
+
+ def initialize_on_transport(self, transport, _cloning=False):
+ """Format 6 dirs always have working tree, branch and repository.
+
+ Except when they are being cloned.
+ """
+ from bzrlib.plugins.weave_fmt.branch import BzrBranchFormat4
+ from bzrlib.plugins.weave_fmt.repository import RepositoryFormat6
+ result = super(BzrDirFormat6, self).initialize_on_transport(transport)
+ RepositoryFormat6().initialize(result, _internal=True)
+ if not _cloning:
+ branch = BzrBranchFormat4().initialize(result)
+ result._init_workingtree()
+ return result
+
+ def network_name(self):
+ return self.get_format_string()
+
+ def _open(self, transport):
+ """See BzrDirFormat._open."""
+ return BzrDir6(transport, self)
+
+ def __return_repository_format(self):
+ """Circular import protection."""
+ from bzrlib.plugins.weave_fmt.repository import RepositoryFormat6
+ return RepositoryFormat6()
+ repository_format = property(__return_repository_format)
+
+
+class ConvertBzrDir4To5(Converter):
+ """Converts format 4 bzr dirs to format 5."""
+
+ def __init__(self):
+ super(ConvertBzrDir4To5, self).__init__()
+ self.converted_revs = set()
+ self.absent_revisions = set()
+ self.text_count = 0
+ self.revisions = {}
+
+ def convert(self, to_convert, pb):
+ """See Converter.convert()."""
+ self.bzrdir = to_convert
+ if pb is not None:
+ warnings.warn(gettext("pb parameter to convert() is deprecated"))
+ self.pb = ui.ui_factory.nested_progress_bar()
+ try:
+ ui.ui_factory.note(gettext('starting upgrade from format 4 to 5'))
+ if isinstance(self.bzrdir.transport, local.LocalTransport):
+ self.bzrdir.get_workingtree_transport(None).delete('stat-cache')
+ self._convert_to_weaves()
+ return ControlDir.open(self.bzrdir.user_url)
+ finally:
+ self.pb.finished()
+
+ def _convert_to_weaves(self):
+ ui.ui_factory.note(gettext(
+ 'note: upgrade may be faster if all store files are ungzipped first'))
+ try:
+ # TODO permissions
+ stat = self.bzrdir.transport.stat('weaves')
+ if not S_ISDIR(stat.st_mode):
+ self.bzrdir.transport.delete('weaves')
+ self.bzrdir.transport.mkdir('weaves')
+ except errors.NoSuchFile:
+ self.bzrdir.transport.mkdir('weaves')
+ # deliberately not a WeaveFile as we want to build it up slowly.
+ self.inv_weave = weave.Weave('inventory')
+ # holds in-memory weaves for all files
+ self.text_weaves = {}
+ self.bzrdir.transport.delete('branch-format')
+ self.branch = self.bzrdir.open_branch()
+ self._convert_working_inv()
+ rev_history = self.branch._revision_history()
+ # to_read is a stack holding the revisions we still need to process;
+ # appending to it adds new highest-priority revisions
+ self.known_revisions = set(rev_history)
+ self.to_read = rev_history[-1:]
+ while self.to_read:
+ rev_id = self.to_read.pop()
+ if (rev_id not in self.revisions
+ and rev_id not in self.absent_revisions):
+ self._load_one_rev(rev_id)
+ self.pb.clear()
+ to_import = self._make_order()
+ for i, rev_id in enumerate(to_import):
+ self.pb.update(gettext('converting revision'), i, len(to_import))
+ self._convert_one_rev(rev_id)
+ self.pb.clear()
+ self._write_all_weaves()
+ self._write_all_revs()
+ ui.ui_factory.note(gettext('upgraded to weaves:'))
+ ui.ui_factory.note(' ' + gettext('%6d revisions and inventories') %
+ len(self.revisions))
+ ui.ui_factory.note(' ' + gettext('%6d revisions not present') %
+ len(self.absent_revisions))
+ ui.ui_factory.note(' ' + gettext('%6d texts') % self.text_count)
+ self._cleanup_spare_files_after_format4()
+ self.branch._transport.put_bytes(
+ 'branch-format',
+ BzrDirFormat5().get_format_string(),
+ mode=self.bzrdir._get_file_mode())
+
+ def _cleanup_spare_files_after_format4(self):
+ # FIXME working tree upgrade foo.
+ for n in 'merged-patches', 'pending-merged-patches':
+ try:
+ ## assert os.path.getsize(p) == 0
+ self.bzrdir.transport.delete(n)
+ except errors.NoSuchFile:
+ pass
+ self.bzrdir.transport.delete_tree('inventory-store')
+ self.bzrdir.transport.delete_tree('text-store')
+
+ def _convert_working_inv(self):
+ inv = xml4.serializer_v4.read_inventory(
+ self.branch._transport.get('inventory'))
+ new_inv_xml = xml5.serializer_v5.write_inventory_to_string(inv, working=True)
+ self.branch._transport.put_bytes('inventory', new_inv_xml,
+ mode=self.bzrdir._get_file_mode())
+
+ def _write_all_weaves(self):
+ controlweaves = VersionedFileStore(self.bzrdir.transport, prefixed=False,
+ versionedfile_class=weave.WeaveFile)
+ weave_transport = self.bzrdir.transport.clone('weaves')
+ weaves = VersionedFileStore(weave_transport, prefixed=False,
+ versionedfile_class=weave.WeaveFile)
+ transaction = WriteTransaction()
+
+ try:
+ i = 0
+ for file_id, file_weave in self.text_weaves.items():
+ self.pb.update(gettext('writing weave'), i,
+ len(self.text_weaves))
+ weaves._put_weave(file_id, file_weave, transaction)
+ i += 1
+ self.pb.update(gettext('inventory'), 0, 1)
+ controlweaves._put_weave('inventory', self.inv_weave, transaction)
+ self.pb.update(gettext('inventory'), 1, 1)
+ finally:
+ self.pb.clear()
+
+ def _write_all_revs(self):
+ """Write all revisions out in new form."""
+ self.bzrdir.transport.delete_tree('revision-store')
+ self.bzrdir.transport.mkdir('revision-store')
+ revision_transport = self.bzrdir.transport.clone('revision-store')
+ # TODO permissions
+ from bzrlib.xml5 import serializer_v5
+ from bzrlib.plugins.weave_fmt.repository import RevisionTextStore
+ revision_store = RevisionTextStore(revision_transport,
+ serializer_v5, False, versionedfile.PrefixMapper(),
+ lambda:True, lambda:True)
+ try:
+ for i, rev_id in enumerate(self.converted_revs):
+ self.pb.update(gettext('write revision'), i,
+ len(self.converted_revs))
+ text = serializer_v5.write_revision_to_string(
+ self.revisions[rev_id])
+ key = (rev_id,)
+ revision_store.add_lines(key, None, osutils.split_lines(text))
+ finally:
+ self.pb.clear()
+
+ def _load_one_rev(self, rev_id):
+ """Load a revision object into memory.
+
+ Any parents not either loaded or abandoned get queued to be
+ loaded."""
+ self.pb.update(gettext('loading revision'),
+ len(self.revisions),
+ len(self.known_revisions))
+ if not self.branch.repository.has_revision(rev_id):
+ self.pb.clear()
+ ui.ui_factory.note(gettext('revision {%s} not present in branch; '
+ 'will be converted as a ghost') %
+ rev_id)
+ self.absent_revisions.add(rev_id)
+ else:
+ rev = self.branch.repository.get_revision(rev_id)
+ for parent_id in rev.parent_ids:
+ self.known_revisions.add(parent_id)
+ self.to_read.append(parent_id)
+ self.revisions[rev_id] = rev
+
+ def _load_old_inventory(self, rev_id):
+ f = self.branch.repository.inventory_store.get(rev_id)
+ try:
+ old_inv_xml = f.read()
+ finally:
+ f.close()
+ inv = xml4.serializer_v4.read_inventory_from_string(old_inv_xml)
+ inv.revision_id = rev_id
+ rev = self.revisions[rev_id]
+ return inv
+
+ def _load_updated_inventory(self, rev_id):
+ inv_xml = self.inv_weave.get_text(rev_id)
+ inv = xml5.serializer_v5.read_inventory_from_string(inv_xml, rev_id)
+ return inv
+
+ def _convert_one_rev(self, rev_id):
+ """Convert revision and all referenced objects to new format."""
+ rev = self.revisions[rev_id]
+ inv = self._load_old_inventory(rev_id)
+ present_parents = [p for p in rev.parent_ids
+ if p not in self.absent_revisions]
+ self._convert_revision_contents(rev, inv, present_parents)
+ self._store_new_inv(rev, inv, present_parents)
+ self.converted_revs.add(rev_id)
+
+ def _store_new_inv(self, rev, inv, present_parents):
+ new_inv_xml = xml5.serializer_v5.write_inventory_to_string(inv)
+ new_inv_sha1 = osutils.sha_string(new_inv_xml)
+ self.inv_weave.add_lines(rev.revision_id,
+ present_parents,
+ new_inv_xml.splitlines(True))
+ rev.inventory_sha1 = new_inv_sha1
+
+ def _convert_revision_contents(self, rev, inv, present_parents):
+ """Convert all the files within a revision.
+
+ Also upgrade the inventory to refer to the text revision ids."""
+ rev_id = rev.revision_id
+ trace.mutter('converting texts of revision {%s}', rev_id)
+ parent_invs = map(self._load_updated_inventory, present_parents)
+ entries = inv.iter_entries()
+ entries.next()
+ for path, ie in entries:
+ self._convert_file_version(rev, ie, parent_invs)
+
+ def _convert_file_version(self, rev, ie, parent_invs):
+ """Convert one version of one file.
+
+ The file needs to be added into the weave if it is a merge
+ of >=2 parents or if it's changed from its parent.
+ """
+ file_id = ie.file_id
+ rev_id = rev.revision_id
+ w = self.text_weaves.get(file_id)
+ if w is None:
+ w = weave.Weave(file_id)
+ self.text_weaves[file_id] = w
+ text_changed = False
+ parent_candiate_entries = ie.parent_candidates(parent_invs)
+ heads = graph.Graph(self).heads(parent_candiate_entries.keys())
+ # XXX: Note that this is unordered - and this is tolerable because
+ # the previous code was also unordered.
+ previous_entries = dict((head, parent_candiate_entries[head]) for head
+ in heads)
+ self.snapshot_ie(previous_entries, ie, w, rev_id)
+
+ def get_parent_map(self, revision_ids):
+ """See graph.StackedParentsProvider.get_parent_map"""
+ return dict((revision_id, self.revisions[revision_id])
+ for revision_id in revision_ids
+ if revision_id in self.revisions)
+
+ def snapshot_ie(self, previous_revisions, ie, w, rev_id):
+ # TODO: convert this logic, which is ~= snapshot to
+ # a call to:. This needs the path figured out. rather than a work_tree
+ # a v4 revision_tree can be given, or something that looks enough like
+ # one to give the file content to the entry if it needs it.
+ # and we need something that looks like a weave store for snapshot to
+ # save against.
+ #ie.snapshot(rev, PATH, previous_revisions, REVISION_TREE, InMemoryWeaveStore(self.text_weaves))
+ if len(previous_revisions) == 1:
+ previous_ie = previous_revisions.values()[0]
+ if ie._unchanged(previous_ie):
+ ie.revision = previous_ie.revision
+ return
+ if ie.has_text():
+ f = self.branch.repository._text_store.get(ie.text_id)
+ try:
+ file_lines = f.readlines()
+ finally:
+ f.close()
+ w.add_lines(rev_id, previous_revisions, file_lines)
+ self.text_count += 1
+ else:
+ w.add_lines(rev_id, previous_revisions, [])
+ ie.revision = rev_id
+
+ def _make_order(self):
+ """Return a suitable order for importing revisions.
+
+ The order must be such that an revision is imported after all
+ its (present) parents.
+ """
+ todo = set(self.revisions.keys())
+ done = self.absent_revisions.copy()
+ order = []
+ while todo:
+ # scan through looking for a revision whose parents
+ # are all done
+ for rev_id in sorted(list(todo)):
+ rev = self.revisions[rev_id]
+ parent_ids = set(rev.parent_ids)
+ if parent_ids.issubset(done):
+ # can take this one now
+ order.append(rev_id)
+ todo.remove(rev_id)
+ done.add(rev_id)
+ return order
+
+
+class ConvertBzrDir5To6(Converter):
+ """Converts format 5 bzr dirs to format 6."""
+
+ def convert(self, to_convert, pb):
+ """See Converter.convert()."""
+ self.bzrdir = to_convert
+ pb = ui.ui_factory.nested_progress_bar()
+ try:
+ ui.ui_factory.note(gettext('starting upgrade from format 5 to 6'))
+ self._convert_to_prefixed()
+ return ControlDir.open(self.bzrdir.user_url)
+ finally:
+ pb.finished()
+
+ def _convert_to_prefixed(self):
+ from bzrlib.store import TransportStore
+ self.bzrdir.transport.delete('branch-format')
+ for store_name in ["weaves", "revision-store"]:
+ ui.ui_factory.note(gettext("adding prefixes to %s") % store_name)
+ store_transport = self.bzrdir.transport.clone(store_name)
+ store = TransportStore(store_transport, prefixed=True)
+ for urlfilename in store_transport.list_dir('.'):
+ filename = urlutils.unescape(urlfilename)
+ if (filename.endswith(".weave") or
+ filename.endswith(".gz") or
+ filename.endswith(".sig")):
+ file_id, suffix = os.path.splitext(filename)
+ else:
+ file_id = filename
+ suffix = ''
+ new_name = store._mapper.map((file_id,)) + suffix
+ # FIXME keep track of the dirs made RBC 20060121
+ try:
+ store_transport.move(filename, new_name)
+ except errors.NoSuchFile: # catches missing dirs strangely enough
+ store_transport.mkdir(osutils.dirname(new_name))
+ store_transport.move(filename, new_name)
+ self.bzrdir.transport.put_bytes(
+ 'branch-format',
+ BzrDirFormat6().get_format_string(),
+ mode=self.bzrdir._get_file_mode())
+
+
+class ConvertBzrDir6ToMeta(Converter):
+ """Converts format 6 bzr dirs to metadirs."""
+
+ def convert(self, to_convert, pb):
+ """See Converter.convert()."""
+ from bzrlib.plugins.weave_fmt.repository import RepositoryFormat7
+ from bzrlib.branchfmt.fullhistory import BzrBranchFormat5
+ self.bzrdir = to_convert
+ self.pb = ui.ui_factory.nested_progress_bar()
+ self.count = 0
+ self.total = 20 # the steps we know about
+ self.garbage_inventories = []
+ self.dir_mode = self.bzrdir._get_dir_mode()
+ self.file_mode = self.bzrdir._get_file_mode()
+
+ ui.ui_factory.note(gettext('starting upgrade from format 6 to metadir'))
+ self.bzrdir.transport.put_bytes(
+ 'branch-format',
+ "Converting to format 6",
+ mode=self.file_mode)
+ # its faster to move specific files around than to open and use the apis...
+ # first off, nuke ancestry.weave, it was never used.
+ try:
+ self.step(gettext('Removing ancestry.weave'))
+ self.bzrdir.transport.delete('ancestry.weave')
+ except errors.NoSuchFile:
+ pass
+ # find out whats there
+ self.step(gettext('Finding branch files'))
+ last_revision = self.bzrdir.open_branch().last_revision()
+ bzrcontents = self.bzrdir.transport.list_dir('.')
+ for name in bzrcontents:
+ if name.startswith('basis-inventory.'):
+ self.garbage_inventories.append(name)
+ # create new directories for repository, working tree and branch
+ repository_names = [('inventory.weave', True),
+ ('revision-store', True),
+ ('weaves', True)]
+ self.step(gettext('Upgrading repository') + ' ')
+ self.bzrdir.transport.mkdir('repository', mode=self.dir_mode)
+ self.make_lock('repository')
+ # we hard code the formats here because we are converting into
+ # the meta format. The meta format upgrader can take this to a
+ # future format within each component.
+ self.put_format('repository', RepositoryFormat7())
+ for entry in repository_names:
+ self.move_entry('repository', entry)
+
+ self.step(gettext('Upgrading branch') + ' ')
+ self.bzrdir.transport.mkdir('branch', mode=self.dir_mode)
+ self.make_lock('branch')
+ self.put_format('branch', BzrBranchFormat5())
+ branch_files = [('revision-history', True),
+ ('branch-name', True),
+ ('parent', False)]
+ for entry in branch_files:
+ self.move_entry('branch', entry)
+
+ checkout_files = [('pending-merges', True),
+ ('inventory', True),
+ ('stat-cache', False)]
+ # If a mandatory checkout file is not present, the branch does not have
+ # a functional checkout. Do not create a checkout in the converted
+ # branch.
+ for name, mandatory in checkout_files:
+ if mandatory and name not in bzrcontents:
+ has_checkout = False
+ break
+ else:
+ has_checkout = True
+ if not has_checkout:
+ ui.ui_factory.note(gettext('No working tree.'))
+ # If some checkout files are there, we may as well get rid of them.
+ for name, mandatory in checkout_files:
+ if name in bzrcontents:
+ self.bzrdir.transport.delete(name)
+ else:
+ from bzrlib.workingtree_3 import WorkingTreeFormat3
+ self.step(gettext('Upgrading working tree'))
+ self.bzrdir.transport.mkdir('checkout', mode=self.dir_mode)
+ self.make_lock('checkout')
+ self.put_format(
+ 'checkout', WorkingTreeFormat3())
+ self.bzrdir.transport.delete_multi(
+ self.garbage_inventories, self.pb)
+ for entry in checkout_files:
+ self.move_entry('checkout', entry)
+ if last_revision is not None:
+ self.bzrdir.transport.put_bytes(
+ 'checkout/last-revision', last_revision)
+ self.bzrdir.transport.put_bytes(
+ 'branch-format',
+ BzrDirMetaFormat1().get_format_string(),
+ mode=self.file_mode)
+ self.pb.finished()
+ return ControlDir.open(self.bzrdir.user_url)
+
+ def make_lock(self, name):
+ """Make a lock for the new control dir name."""
+ self.step(gettext('Make %s lock') % name)
+ ld = lockdir.LockDir(self.bzrdir.transport,
+ '%s/lock' % name,
+ file_modebits=self.file_mode,
+ dir_modebits=self.dir_mode)
+ ld.create()
+
+ def move_entry(self, new_dir, entry):
+ """Move then entry name into new_dir."""
+ name = entry[0]
+ mandatory = entry[1]
+ self.step(gettext('Moving %s') % name)
+ try:
+ self.bzrdir.transport.move(name, '%s/%s' % (new_dir, name))
+ except errors.NoSuchFile:
+ if mandatory:
+ raise
+
+ def put_format(self, dirname, format):
+ self.bzrdir.transport.put_bytes('%s/format' % dirname,
+ format.get_format_string(),
+ self.file_mode)
+
+
+class BzrDirFormat4(BzrDirFormat):
+ """Bzr dir format 4.
+
+ This format is a combined format for working tree, branch and repository.
+ It has:
+ - Format 1 working trees [always]
+ - Format 4 branches [always]
+ - Format 4 repositories [always]
+
+ This format is deprecated: it indexes texts using a text it which is
+ removed in format 5; write support for this format has been removed.
+ """
+
+ _lock_class = lockable_files.TransportLock
+
+ def __eq__(self, other):
+ return type(self) == type(other)
+
+ @classmethod
+ def get_format_string(cls):
+ """See BzrDirFormat.get_format_string()."""
+ return "Bazaar-NG branch, format 0.0.4\n"
+
+ def get_format_description(self):
+ """See ControlDirFormat.get_format_description()."""
+ return "All-in-one format 4"
+
+ def get_converter(self, format=None):
+ """See ControlDirFormat.get_converter()."""
+ # there is one and only one upgrade path here.
+ return ConvertBzrDir4To5()
+
+ def initialize_on_transport(self, transport):
+ """Format 4 branches cannot be created."""
+ raise errors.UninitializableFormat(self)
+
+ def is_supported(self):
+ """Format 4 is not supported.
+
+ It is not supported because the model changed from 4 to 5 and the
+ conversion logic is expensive - so doing it on the fly was not
+ feasible.
+ """
+ return False
+
+ def network_name(self):
+ return self.get_format_string()
+
+ def _open(self, transport):
+ """See BzrDirFormat._open."""
+ return BzrDir4(transport, self)
+
+ def __return_repository_format(self):
+ """Circular import protection."""
+ from bzrlib.plugins.weave_fmt.repository import RepositoryFormat4
+ return RepositoryFormat4()
+ repository_format = property(__return_repository_format)
+
+ @classmethod
+ def from_string(cls, format_string):
+ if format_string != cls.get_format_string():
+ raise AssertionError("unexpected format string %r" % format_string)
+ return cls()
+
+
+class BzrDirPreSplitOut(BzrDir):
+ """A common class for the all-in-one formats."""
+
+ def __init__(self, _transport, _format):
+ """See ControlDir.__init__."""
+ super(BzrDirPreSplitOut, self).__init__(_transport, _format)
+ self._control_files = lockable_files.LockableFiles(
+ self.get_branch_transport(None),
+ self._format._lock_file_name,
+ self._format._lock_class)
+
+ def break_lock(self):
+ """Pre-splitout bzrdirs do not suffer from stale locks."""
+ raise NotImplementedError(self.break_lock)
+
+ def cloning_metadir(self, require_stacking=False):
+ """Produce a metadir suitable for cloning with."""
+ if require_stacking:
+ return format_registry.make_bzrdir('1.6')
+ return self._format.__class__()
+
+ def clone(self, url, revision_id=None, force_new_repo=False,
+ preserve_stacking=False):
+ """See ControlDir.clone().
+
+ force_new_repo has no effect, since this family of formats always
+ require a new repository.
+ preserve_stacking has no effect, since no source branch using this
+ family of formats can be stacked, so there is no stacking to preserve.
+ """
+ self._make_tail(url)
+ result = self._format._initialize_for_clone(url)
+ self.open_repository().clone(result, revision_id=revision_id)
+ from_branch = self.open_branch()
+ from_branch.clone(result, revision_id=revision_id)
+ try:
+ tree = self.open_workingtree()
+ except errors.NotLocalUrl:
+ # make a new one, this format always has to have one.
+ result._init_workingtree()
+ else:
+ tree.clone(result)
+ return result
+
+ def create_branch(self, name=None, repository=None,
+ append_revisions_only=None):
+ """See ControlDir.create_branch."""
+ if repository is not None:
+ raise NotImplementedError(
+ "create_branch(repository=<not None>) on %r" % (self,))
+ return self._format.get_branch_format().initialize(self, name=name,
+ append_revisions_only=append_revisions_only)
+
+ def destroy_branch(self, name=None):
+ """See ControlDir.destroy_branch."""
+ raise errors.UnsupportedOperation(self.destroy_branch, self)
+
+ def create_repository(self, shared=False):
+ """See ControlDir.create_repository."""
+ if shared:
+ raise errors.IncompatibleFormat('shared repository', self._format)
+ return self.open_repository()
+
+ def destroy_repository(self):
+ """See ControlDir.destroy_repository."""
+ raise errors.UnsupportedOperation(self.destroy_repository, self)
+
+ def create_workingtree(self, revision_id=None, from_branch=None,
+ accelerator_tree=None, hardlink=False):
+ """See ControlDir.create_workingtree."""
+ # The workingtree is sometimes created when the bzrdir is created,
+ # but not when cloning.
+
+ # this looks buggy but is not -really-
+ # because this format creates the workingtree when the bzrdir is
+ # created
+ # clone and sprout will have set the revision_id
+ # and that will have set it for us, its only
+ # specific uses of create_workingtree in isolation
+ # that can do wonky stuff here, and that only
+ # happens for creating checkouts, which cannot be
+ # done on this format anyway. So - acceptable wart.
+ if hardlink:
+ warning("can't support hardlinked working trees in %r"
+ % (self,))
+ try:
+ result = self.open_workingtree(recommend_upgrade=False)
+ except errors.NoSuchFile:
+ result = self._init_workingtree()
+ if revision_id is not None:
+ if revision_id == _mod_revision.NULL_REVISION:
+ result.set_parent_ids([])
+ else:
+ result.set_parent_ids([revision_id])
+ return result
+
+ def _init_workingtree(self):
+ from bzrlib.plugins.weave_fmt.workingtree import WorkingTreeFormat2
+ try:
+ return WorkingTreeFormat2().initialize(self)
+ except errors.NotLocalUrl:
+ # Even though we can't access the working tree, we need to
+ # create its control files.
+ return WorkingTreeFormat2()._stub_initialize_on_transport(
+ self.transport, self._control_files._file_mode)
+
+ def destroy_workingtree(self):
+ """See ControlDir.destroy_workingtree."""
+ raise errors.UnsupportedOperation(self.destroy_workingtree, self)
+
+ def destroy_workingtree_metadata(self):
+ """See ControlDir.destroy_workingtree_metadata."""
+ raise errors.UnsupportedOperation(self.destroy_workingtree_metadata,
+ self)
+
+ def get_branch_transport(self, branch_format, name=None):
+ """See BzrDir.get_branch_transport()."""
+ if name is not None:
+ raise errors.NoColocatedBranchSupport(self)
+ if branch_format is None:
+ return self.transport
+ try:
+ branch_format.get_format_string()
+ except NotImplementedError:
+ return self.transport
+ raise errors.IncompatibleFormat(branch_format, self._format)
+
+ def get_repository_transport(self, repository_format):
+ """See BzrDir.get_repository_transport()."""
+ if repository_format is None:
+ return self.transport
+ try:
+ repository_format.get_format_string()
+ except NotImplementedError:
+ return self.transport
+ raise errors.IncompatibleFormat(repository_format, self._format)
+
+ def get_workingtree_transport(self, workingtree_format):
+ """See BzrDir.get_workingtree_transport()."""
+ if workingtree_format is None:
+ return self.transport
+ try:
+ workingtree_format.get_format_string()
+ except NotImplementedError:
+ return self.transport
+ raise errors.IncompatibleFormat(workingtree_format, self._format)
+
+ def needs_format_conversion(self, format=None):
+ """See ControlDir.needs_format_conversion()."""
+ # if the format is not the same as the system default,
+ # an upgrade is needed.
+ if format is None:
+ symbol_versioning.warn(symbol_versioning.deprecated_in((1, 13, 0))
+ % 'needs_format_conversion(format=None)')
+ format = BzrDirFormat.get_default_format()
+ return not isinstance(self._format, format.__class__)
+
+ def open_branch(self, name=None, unsupported=False,
+ ignore_fallbacks=False, possible_transports=None):
+ """See ControlDir.open_branch."""
+ from bzrlib.plugins.weave_fmt.branch import BzrBranchFormat4
+ format = BzrBranchFormat4()
+ format.check_support_status(unsupported)
+ return format.open(self, name, _found=True,
+ possible_transports=possible_transports)
+
+ def sprout(self, url, revision_id=None, force_new_repo=False,
+ possible_transports=None, accelerator_tree=None,
+ hardlink=False, stacked=False, create_tree_if_local=True,
+ source_branch=None):
+ """See ControlDir.sprout()."""
+ if source_branch is not None:
+ my_branch = self.open_branch()
+ if source_branch.base != my_branch.base:
+ raise AssertionError(
+ "source branch %r is not within %r with branch %r" %
+ (source_branch, self, my_branch))
+ if stacked:
+ raise errors.UnstackableBranchFormat(
+ self._format, self.root_transport.base)
+ if not create_tree_if_local:
+ raise errors.MustHaveWorkingTree(
+ self._format, self.root_transport.base)
+ from bzrlib.plugins.weave_fmt.workingtree import WorkingTreeFormat2
+ self._make_tail(url)
+ result = self._format._initialize_for_clone(url)
+ try:
+ self.open_repository().clone(result, revision_id=revision_id)
+ except errors.NoRepositoryPresent:
+ pass
+ try:
+ self.open_branch().sprout(result, revision_id=revision_id)
+ except errors.NotBranchError:
+ pass
+
+ # we always want a working tree
+ WorkingTreeFormat2().initialize(result,
+ accelerator_tree=accelerator_tree,
+ hardlink=hardlink)
+ return result
+
+ def set_branch_reference(self, target_branch, name=None):
+ from bzrlib.branch import BranchReferenceFormat
+ if name is not None:
+ raise errors.NoColocatedBranchSupport(self)
+ raise errors.IncompatibleFormat(BranchReferenceFormat, self._format)
+
+
+class BzrDir4(BzrDirPreSplitOut):
+ """A .bzr version 4 control object.
+
+ This is a deprecated format and may be removed after sept 2006.
+ """
+
+ def create_repository(self, shared=False):
+ """See ControlDir.create_repository."""
+ return self._format.repository_format.initialize(self, shared)
+
+ def needs_format_conversion(self, format=None):
+ """Format 4 dirs are always in need of conversion."""
+ if format is None:
+ symbol_versioning.warn(symbol_versioning.deprecated_in((1, 13, 0))
+ % 'needs_format_conversion(format=None)')
+ return True
+
+ def open_repository(self):
+ """See ControlDir.open_repository."""
+ from bzrlib.plugins.weave_fmt.repository import RepositoryFormat4
+ return RepositoryFormat4().open(self, _found=True)
+
+
+class BzrDir5(BzrDirPreSplitOut):
+ """A .bzr version 5 control object.
+
+ This is a deprecated format and may be removed after sept 2006.
+ """
+
+ def has_workingtree(self):
+ """See ControlDir.has_workingtree."""
+ return True
+
+ def open_repository(self):
+ """See ControlDir.open_repository."""
+ from bzrlib.plugins.weave_fmt.repository import RepositoryFormat5
+ return RepositoryFormat5().open(self, _found=True)
+
+ def open_workingtree(self, unsupported=False,
+ recommend_upgrade=True):
+ """See ControlDir.create_workingtree."""
+ from bzrlib.plugins.weave_fmt.workingtree import WorkingTreeFormat2
+ wt_format = WorkingTreeFormat2()
+ # we don't warn here about upgrades; that ought to be handled for the
+ # bzrdir as a whole
+ return wt_format.open(self, _found=True)
+
+
+class BzrDir6(BzrDirPreSplitOut):
+ """A .bzr version 6 control object.
+
+ This is a deprecated format and may be removed after sept 2006.
+ """
+
+ def has_workingtree(self):
+ """See ControlDir.has_workingtree."""
+ return True
+
+ def open_repository(self):
+ """See ControlDir.open_repository."""
+ from bzrlib.plugins.weave_fmt.repository import RepositoryFormat6
+ return RepositoryFormat6().open(self, _found=True)
+
+ def open_workingtree(self, unsupported=False, recommend_upgrade=True):
+ """See ControlDir.create_workingtree."""
+ # we don't warn here about upgrades; that ought to be handled for the
+ # bzrdir as a whole
+ from bzrlib.plugins.weave_fmt.workingtree import WorkingTreeFormat2
+ return WorkingTreeFormat2().open(self, _found=True)
diff --git a/bzrlib/plugins/weave_fmt/repository.py b/bzrlib/plugins/weave_fmt/repository.py
new file mode 100644
index 0000000..7af7ad1
--- /dev/null
+++ b/bzrlib/plugins/weave_fmt/repository.py
@@ -0,0 +1,883 @@
+# Copyright (C) 2007-2011 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""Deprecated weave-based repository formats.
+
+Weave based formats scaled linearly with history size and could not represent
+ghosts.
+"""
+
+from __future__ import absolute_import
+
+import gzip
+import os
+from cStringIO import StringIO
+
+from bzrlib.lazy_import import lazy_import
+lazy_import(globals(), """
+import itertools
+
+from bzrlib import (
+ xml5,
+ graph as _mod_graph,
+ ui,
+ )
+""")
+from bzrlib import (
+ debug,
+ errors,
+ lockable_files,
+ lockdir,
+ osutils,
+ symbol_versioning,
+ trace,
+ tuned_gzip,
+ urlutils,
+ versionedfile,
+ weave,
+ weavefile,
+ )
+from bzrlib.decorators import needs_read_lock, needs_write_lock
+from bzrlib.repository import (
+ InterRepository,
+ RepositoryFormatMetaDir,
+ )
+from bzrlib.store.text import TextStore
+from bzrlib.versionedfile import (
+ AbsentContentFactory,
+ FulltextContentFactory,
+ VersionedFiles,
+ )
+from bzrlib.vf_repository import (
+ InterSameDataRepository,
+ VersionedFileCommitBuilder,
+ VersionedFileRepository,
+ VersionedFileRepositoryFormat,
+ MetaDirVersionedFileRepository,
+ MetaDirVersionedFileRepositoryFormat,
+ )
+
+from bzrlib.plugins.weave_fmt import bzrdir as weave_bzrdir
+
+
+class AllInOneRepository(VersionedFileRepository):
+ """Legacy support - the repository behaviour for all-in-one branches."""
+
+ @property
+ def _serializer(self):
+ return xml5.serializer_v5
+
+ def _escape(self, file_or_path):
+ if not isinstance(file_or_path, basestring):
+ file_or_path = '/'.join(file_or_path)
+ if file_or_path == '':
+ return u''
+ return urlutils.escape(osutils.safe_unicode(file_or_path))
+
+ def __init__(self, _format, a_bzrdir):
+ # we reuse one control files instance.
+ dir_mode = a_bzrdir._get_dir_mode()
+ file_mode = a_bzrdir._get_file_mode()
+
+ def get_store(name, compressed=True, prefixed=False):
+ # FIXME: This approach of assuming stores are all entirely compressed
+ # or entirely uncompressed is tidy, but breaks upgrade from
+ # some existing branches where there's a mixture; we probably
+ # still want the option to look for both.
+ relpath = self._escape(name)
+ store = TextStore(a_bzrdir.transport.clone(relpath),
+ prefixed=prefixed, compressed=compressed,
+ dir_mode=dir_mode,
+ file_mode=file_mode)
+ return store
+
+ # not broken out yet because the controlweaves|inventory_store
+ # and texts bits are still different.
+ if isinstance(_format, RepositoryFormat4):
+ # cannot remove these - there is still no consistent api
+ # which allows access to this old info.
+ self.inventory_store = get_store('inventory-store')
+ self._text_store = get_store('text-store')
+ super(AllInOneRepository, self).__init__(_format, a_bzrdir, a_bzrdir._control_files)
+
+ @needs_read_lock
+ def _all_possible_ids(self):
+ """Return all the possible revisions that we could find."""
+ if 'evil' in debug.debug_flags:
+ trace.mutter_callsite(
+ 3, "_all_possible_ids scales with size of history.")
+ return [key[-1] for key in self.inventories.keys()]
+
+ @needs_read_lock
+ def _all_revision_ids(self):
+ """Returns a list of all the revision ids in the repository.
+
+ These are in as much topological order as the underlying store can
+ present: for weaves ghosts may lead to a lack of correctness until
+ the reweave updates the parents list.
+ """
+ return [key[-1] for key in self.revisions.keys()]
+
+ def _activate_new_inventory(self):
+ """Put a replacement inventory.new into use as inventories."""
+ # Copy the content across
+ t = self.bzrdir._control_files._transport
+ t.copy('inventory.new.weave', 'inventory.weave')
+ # delete the temp inventory
+ t.delete('inventory.new.weave')
+ # Check we can parse the new weave properly as a sanity check
+ self.inventories.keys()
+
+ def _backup_inventory(self):
+ t = self.bzrdir._control_files._transport
+ t.copy('inventory.weave', 'inventory.backup.weave')
+
+ def _temp_inventories(self):
+ t = self.bzrdir._control_files._transport
+ return self._format._get_inventories(t, self, 'inventory.new')
+
+ def get_commit_builder(self, branch, parents, config, timestamp=None,
+ timezone=None, committer=None, revprops=None,
+ revision_id=None, lossy=False):
+ self._check_ascii_revisionid(revision_id, self.get_commit_builder)
+ result = VersionedFileCommitBuilder(self, parents, config, timestamp,
+ timezone, committer, revprops, revision_id, lossy=lossy)
+ self.start_write_group()
+ return result
+
+ @needs_read_lock
+ def get_revisions(self, revision_ids):
+ revs = self._get_revisions(revision_ids)
+ return revs
+
+ def _inventory_add_lines(self, revision_id, parents, lines,
+ check_content=True):
+ """Store lines in inv_vf and return the sha1 of the inventory."""
+ present_parents = self.get_graph().get_parent_map(parents)
+ final_parents = []
+ for parent in parents:
+ if parent in present_parents:
+ final_parents.append((parent,))
+ return self.inventories.add_lines((revision_id,), final_parents, lines,
+ check_content=check_content)[0]
+
+ def is_shared(self):
+ """AllInOne repositories cannot be shared."""
+ return False
+
+ @needs_write_lock
+ def set_make_working_trees(self, new_value):
+ """Set the policy flag for making working trees when creating branches.
+
+ This only applies to branches that use this repository.
+
+ The default is 'True'.
+ :param new_value: True to restore the default, False to disable making
+ working trees.
+ """
+ raise errors.RepositoryUpgradeRequired(self.user_url)
+
+ def make_working_trees(self):
+ """Returns the policy for making working trees on new branches."""
+ return True
+
+
+class WeaveMetaDirRepository(MetaDirVersionedFileRepository):
+ """A subclass of MetaDirRepository to set weave specific policy."""
+
+ def __init__(self, _format, a_bzrdir, control_files):
+ super(WeaveMetaDirRepository, self).__init__(_format, a_bzrdir, control_files)
+ self._serializer = _format._serializer
+
+ @needs_read_lock
+ def _all_possible_ids(self):
+ """Return all the possible revisions that we could find."""
+ if 'evil' in debug.debug_flags:
+ trace.mutter_callsite(
+ 3, "_all_possible_ids scales with size of history.")
+ return [key[-1] for key in self.inventories.keys()]
+
+ @needs_read_lock
+ def _all_revision_ids(self):
+ """Returns a list of all the revision ids in the repository.
+
+ These are in as much topological order as the underlying store can
+ present: for weaves ghosts may lead to a lack of correctness until
+ the reweave updates the parents list.
+ """
+ return [key[-1] for key in self.revisions.keys()]
+
+ def _activate_new_inventory(self):
+ """Put a replacement inventory.new into use as inventories."""
+ # Copy the content across
+ t = self._transport
+ t.copy('inventory.new.weave', 'inventory.weave')
+ # delete the temp inventory
+ t.delete('inventory.new.weave')
+ # Check we can parse the new weave properly as a sanity check
+ self.inventories.keys()
+
+ def _backup_inventory(self):
+ t = self._transport
+ t.copy('inventory.weave', 'inventory.backup.weave')
+
+ def _temp_inventories(self):
+ t = self._transport
+ return self._format._get_inventories(t, self, 'inventory.new')
+
+ def get_commit_builder(self, branch, parents, config, timestamp=None,
+ timezone=None, committer=None, revprops=None,
+ revision_id=None, lossy=False):
+ self._check_ascii_revisionid(revision_id, self.get_commit_builder)
+ result = VersionedFileCommitBuilder(self, parents, config, timestamp,
+ timezone, committer, revprops, revision_id, lossy=lossy)
+ self.start_write_group()
+ return result
+
+ @needs_read_lock
+ def get_revision(self, revision_id):
+ """Return the Revision object for a named revision"""
+ r = self.get_revision_reconcile(revision_id)
+ return r
+
+ def _inventory_add_lines(self, revision_id, parents, lines,
+ check_content=True):
+ """Store lines in inv_vf and return the sha1 of the inventory."""
+ present_parents = self.get_graph().get_parent_map(parents)
+ final_parents = []
+ for parent in parents:
+ if parent in present_parents:
+ final_parents.append((parent,))
+ return self.inventories.add_lines((revision_id,), final_parents, lines,
+ check_content=check_content)[0]
+
+
+class PreSplitOutRepositoryFormat(VersionedFileRepositoryFormat):
+ """Base class for the pre split out repository formats."""
+
+ rich_root_data = False
+ supports_tree_reference = False
+ supports_ghosts = False
+ supports_external_lookups = False
+ supports_chks = False
+ supports_nesting_repositories = True
+ _fetch_order = 'topological'
+ _fetch_reconcile = True
+ fast_deltas = False
+ supports_leaving_lock = False
+ # XXX: This is an old format that we don't support full checking on, so
+ # just claim that checking for this inconsistency is not required.
+ revision_graph_can_have_wrong_parents = False
+
+ def initialize(self, a_bzrdir, shared=False, _internal=False):
+ """Create a weave repository."""
+ if shared:
+ raise errors.IncompatibleFormat(self, a_bzrdir._format)
+
+ if not _internal:
+ # always initialized when the bzrdir is.
+ return self.open(a_bzrdir, _found=True)
+
+ # Create an empty weave
+ sio = StringIO()
+ weavefile.write_weave_v5(weave.Weave(), sio)
+ empty_weave = sio.getvalue()
+
+ trace.mutter('creating repository in %s.', a_bzrdir.transport.base)
+
+ # FIXME: RBC 20060125 don't peek under the covers
+ # NB: no need to escape relative paths that are url safe.
+ control_files = lockable_files.LockableFiles(a_bzrdir.transport,
+ 'branch-lock', lockable_files.TransportLock)
+ control_files.create_lock()
+ control_files.lock_write()
+ transport = a_bzrdir.transport
+ try:
+ transport.mkdir_multi(['revision-store', 'weaves'],
+ mode=a_bzrdir._get_dir_mode())
+ transport.put_bytes_non_atomic('inventory.weave', empty_weave,
+ mode=a_bzrdir._get_file_mode())
+ finally:
+ control_files.unlock()
+ repository = self.open(a_bzrdir, _found=True)
+ self._run_post_repo_init_hooks(repository, a_bzrdir, shared)
+ return repository
+
+ def open(self, a_bzrdir, _found=False):
+ """See RepositoryFormat.open()."""
+ if not _found:
+ # we are being called directly and must probe.
+ raise NotImplementedError
+
+ repo_transport = a_bzrdir.get_repository_transport(None)
+ result = AllInOneRepository(_format=self, a_bzrdir=a_bzrdir)
+ result.revisions = self._get_revisions(repo_transport, result)
+ result.signatures = self._get_signatures(repo_transport, result)
+ result.inventories = self._get_inventories(repo_transport, result)
+ result.texts = self._get_texts(repo_transport, result)
+ result.chk_bytes = None
+ return result
+
+ def is_deprecated(self):
+ return True
+
+
+class RepositoryFormat4(PreSplitOutRepositoryFormat):
+ """Bzr repository format 4.
+
+ This repository format has:
+ - flat stores
+ - TextStores for texts, inventories,revisions.
+
+ This format is deprecated: it indexes texts using a text id which is
+ removed in format 5; initialization and write support for this format
+ has been removed.
+ """
+
+ supports_funky_characters = False
+
+ _matchingbzrdir = weave_bzrdir.BzrDirFormat4()
+
+ def get_format_description(self):
+ """See RepositoryFormat.get_format_description()."""
+ return "Repository format 4"
+
+ def initialize(self, url, shared=False, _internal=False):
+ """Format 4 branches cannot be created."""
+ raise errors.UninitializableFormat(self)
+
+ def is_supported(self):
+ """Format 4 is not supported.
+
+ It is not supported because the model changed from 4 to 5 and the
+ conversion logic is expensive - so doing it on the fly was not
+ feasible.
+ """
+ return False
+
+ def _get_inventories(self, repo_transport, repo, name='inventory'):
+ # No inventories store written so far.
+ return None
+
+ def _get_revisions(self, repo_transport, repo):
+ from bzrlib.plugins.weave_fmt.xml4 import serializer_v4
+ return RevisionTextStore(repo_transport.clone('revision-store'),
+ serializer_v4, True, versionedfile.PrefixMapper(),
+ repo.is_locked, repo.is_write_locked)
+
+ def _get_signatures(self, repo_transport, repo):
+ return SignatureTextStore(repo_transport.clone('revision-store'),
+ False, versionedfile.PrefixMapper(),
+ repo.is_locked, repo.is_write_locked)
+
+ def _get_texts(self, repo_transport, repo):
+ return None
+
+
+class RepositoryFormat5(PreSplitOutRepositoryFormat):
+ """Bzr control format 5.
+
+ This repository format has:
+ - weaves for file texts and inventory
+ - flat stores
+ - TextStores for revisions and signatures.
+ """
+
+ _versionedfile_class = weave.WeaveFile
+ _matchingbzrdir = weave_bzrdir.BzrDirFormat5()
+ supports_funky_characters = False
+
+ @property
+ def _serializer(self):
+ return xml5.serializer_v5
+
+ def get_format_description(self):
+ """See RepositoryFormat.get_format_description()."""
+ return "Weave repository format 5"
+
+ def network_name(self):
+ """The network name for this format is the control dirs disk label."""
+ return self._matchingbzrdir.get_format_string()
+
+ def _get_inventories(self, repo_transport, repo, name='inventory'):
+ mapper = versionedfile.ConstantMapper(name)
+ return versionedfile.ThunkedVersionedFiles(repo_transport,
+ weave.WeaveFile, mapper, repo.is_locked)
+
+ def _get_revisions(self, repo_transport, repo):
+ return RevisionTextStore(repo_transport.clone('revision-store'),
+ xml5.serializer_v5, False, versionedfile.PrefixMapper(),
+ repo.is_locked, repo.is_write_locked)
+
+ def _get_signatures(self, repo_transport, repo):
+ return SignatureTextStore(repo_transport.clone('revision-store'),
+ False, versionedfile.PrefixMapper(),
+ repo.is_locked, repo.is_write_locked)
+
+ def _get_texts(self, repo_transport, repo):
+ mapper = versionedfile.PrefixMapper()
+ base_transport = repo_transport.clone('weaves')
+ return versionedfile.ThunkedVersionedFiles(base_transport,
+ weave.WeaveFile, mapper, repo.is_locked)
+
+
+class RepositoryFormat6(PreSplitOutRepositoryFormat):
+ """Bzr control format 6.
+
+ This repository format has:
+ - weaves for file texts and inventory
+ - hash subdirectory based stores.
+ - TextStores for revisions and signatures.
+ """
+
+ _versionedfile_class = weave.WeaveFile
+ _matchingbzrdir = weave_bzrdir.BzrDirFormat6()
+ supports_funky_characters = False
+ @property
+ def _serializer(self):
+ return xml5.serializer_v5
+
+ def get_format_description(self):
+ """See RepositoryFormat.get_format_description()."""
+ return "Weave repository format 6"
+
+ def network_name(self):
+ """The network name for this format is the control dirs disk label."""
+ return self._matchingbzrdir.get_format_string()
+
+ def _get_inventories(self, repo_transport, repo, name='inventory'):
+ mapper = versionedfile.ConstantMapper(name)
+ return versionedfile.ThunkedVersionedFiles(repo_transport,
+ weave.WeaveFile, mapper, repo.is_locked)
+
+ def _get_revisions(self, repo_transport, repo):
+ return RevisionTextStore(repo_transport.clone('revision-store'),
+ xml5.serializer_v5, False, versionedfile.HashPrefixMapper(),
+ repo.is_locked, repo.is_write_locked)
+
+ def _get_signatures(self, repo_transport, repo):
+ return SignatureTextStore(repo_transport.clone('revision-store'),
+ False, versionedfile.HashPrefixMapper(),
+ repo.is_locked, repo.is_write_locked)
+
+ def _get_texts(self, repo_transport, repo):
+ mapper = versionedfile.HashPrefixMapper()
+ base_transport = repo_transport.clone('weaves')
+ return versionedfile.ThunkedVersionedFiles(base_transport,
+ weave.WeaveFile, mapper, repo.is_locked)
+
+
+class RepositoryFormat7(MetaDirVersionedFileRepositoryFormat):
+ """Bzr repository 7.
+
+ This repository format has:
+ - weaves for file texts and inventory
+ - hash subdirectory based stores.
+ - TextStores for revisions and signatures.
+ - a format marker of its own
+ - an optional 'shared-storage' flag
+ - an optional 'no-working-trees' flag
+ """
+
+ _versionedfile_class = weave.WeaveFile
+ supports_ghosts = False
+ supports_chks = False
+ supports_funky_characters = False
+ revision_graph_can_have_wrong_parents = False
+
+ _fetch_order = 'topological'
+ _fetch_reconcile = True
+ fast_deltas = False
+ @property
+ def _serializer(self):
+ return xml5.serializer_v5
+
+ @classmethod
+ def get_format_string(cls):
+ """See RepositoryFormat.get_format_string()."""
+ return "Bazaar-NG Repository format 7"
+
+ def get_format_description(self):
+ """See RepositoryFormat.get_format_description()."""
+ return "Weave repository format 7"
+
+ def _get_inventories(self, repo_transport, repo, name='inventory'):
+ mapper = versionedfile.ConstantMapper(name)
+ return versionedfile.ThunkedVersionedFiles(repo_transport,
+ weave.WeaveFile, mapper, repo.is_locked)
+
+ def _get_revisions(self, repo_transport, repo):
+ return RevisionTextStore(repo_transport.clone('revision-store'),
+ xml5.serializer_v5, True, versionedfile.HashPrefixMapper(),
+ repo.is_locked, repo.is_write_locked)
+
+ def _get_signatures(self, repo_transport, repo):
+ return SignatureTextStore(repo_transport.clone('revision-store'),
+ True, versionedfile.HashPrefixMapper(),
+ repo.is_locked, repo.is_write_locked)
+
+ def _get_texts(self, repo_transport, repo):
+ mapper = versionedfile.HashPrefixMapper()
+ base_transport = repo_transport.clone('weaves')
+ return versionedfile.ThunkedVersionedFiles(base_transport,
+ weave.WeaveFile, mapper, repo.is_locked)
+
+ def initialize(self, a_bzrdir, shared=False):
+ """Create a weave repository.
+
+ :param shared: If true the repository will be initialized as a shared
+ repository.
+ """
+ # Create an empty weave
+ sio = StringIO()
+ weavefile.write_weave_v5(weave.Weave(), sio)
+ empty_weave = sio.getvalue()
+
+ trace.mutter('creating repository in %s.', a_bzrdir.transport.base)
+ dirs = ['revision-store', 'weaves']
+ files = [('inventory.weave', StringIO(empty_weave)),
+ ]
+ utf8_files = [('format', self.get_format_string())]
+
+ self._upload_blank_content(a_bzrdir, dirs, files, utf8_files, shared)
+ return self.open(a_bzrdir=a_bzrdir, _found=True)
+
+ def open(self, a_bzrdir, _found=False, _override_transport=None):
+ """See RepositoryFormat.open().
+
+ :param _override_transport: INTERNAL USE ONLY. Allows opening the
+ repository at a slightly different url
+ than normal. I.e. during 'upgrade'.
+ """
+ if not _found:
+ format = RepositoryFormatMetaDir.find_format(a_bzrdir)
+ if _override_transport is not None:
+ repo_transport = _override_transport
+ else:
+ repo_transport = a_bzrdir.get_repository_transport(None)
+ control_files = lockable_files.LockableFiles(repo_transport,
+ 'lock', lockdir.LockDir)
+ result = WeaveMetaDirRepository(_format=self, a_bzrdir=a_bzrdir,
+ control_files=control_files)
+ result.revisions = self._get_revisions(repo_transport, result)
+ result.signatures = self._get_signatures(repo_transport, result)
+ result.inventories = self._get_inventories(repo_transport, result)
+ result.texts = self._get_texts(repo_transport, result)
+ result.chk_bytes = None
+ result._transport = repo_transport
+ return result
+
+ def is_deprecated(self):
+ return True
+
+
+class TextVersionedFiles(VersionedFiles):
+ """Just-a-bunch-of-files based VersionedFile stores."""
+
+ def __init__(self, transport, compressed, mapper, is_locked, can_write):
+ self._compressed = compressed
+ self._transport = transport
+ self._mapper = mapper
+ if self._compressed:
+ self._ext = '.gz'
+ else:
+ self._ext = ''
+ self._is_locked = is_locked
+ self._can_write = can_write
+
+ def add_lines(self, key, parents, lines):
+ """Add a revision to the store."""
+ if not self._is_locked():
+ raise errors.ObjectNotLocked(self)
+ if not self._can_write():
+ raise errors.ReadOnlyError(self)
+ if '/' in key[-1]:
+ raise ValueError('bad idea to put / in %r' % (key,))
+ text = ''.join(lines)
+ if self._compressed:
+ text = tuned_gzip.bytes_to_gzip(text)
+ path = self._map(key)
+ self._transport.put_bytes_non_atomic(path, text, create_parent_dir=True)
+
+ def insert_record_stream(self, stream):
+ adapters = {}
+ for record in stream:
+ # Raise an error when a record is missing.
+ if record.storage_kind == 'absent':
+ raise errors.RevisionNotPresent([record.key[0]], self)
+ # adapt to non-tuple interface
+ if record.storage_kind == 'fulltext':
+ self.add_lines(record.key, None,
+ osutils.split_lines(record.get_bytes_as('fulltext')))
+ else:
+ adapter_key = record.storage_kind, 'fulltext'
+ try:
+ adapter = adapters[adapter_key]
+ except KeyError:
+ adapter_factory = adapter_registry.get(adapter_key)
+ adapter = adapter_factory(self)
+ adapters[adapter_key] = adapter
+ lines = osutils.split_lines(adapter.get_bytes(
+ record, record.get_bytes_as(record.storage_kind)))
+ try:
+ self.add_lines(record.key, None, lines)
+ except errors.RevisionAlreadyPresent:
+ pass
+
+ def _load_text(self, key):
+ if not self._is_locked():
+ raise errors.ObjectNotLocked(self)
+ path = self._map(key)
+ try:
+ text = self._transport.get_bytes(path)
+ compressed = self._compressed
+ except errors.NoSuchFile:
+ if self._compressed:
+ # try without the .gz
+ path = path[:-3]
+ try:
+ text = self._transport.get_bytes(path)
+ compressed = False
+ except errors.NoSuchFile:
+ return None
+ else:
+ return None
+ if compressed:
+ text = gzip.GzipFile(mode='rb', fileobj=StringIO(text)).read()
+ return text
+
+ def _map(self, key):
+ return self._mapper.map(key) + self._ext
+
+
+class RevisionTextStore(TextVersionedFiles):
+ """Legacy thunk for format 4 repositories."""
+
+ def __init__(self, transport, serializer, compressed, mapper, is_locked,
+ can_write):
+ """Create a RevisionTextStore at transport with serializer."""
+ TextVersionedFiles.__init__(self, transport, compressed, mapper,
+ is_locked, can_write)
+ self._serializer = serializer
+
+ def _load_text_parents(self, key):
+ text = self._load_text(key)
+ if text is None:
+ return None, None
+ parents = self._serializer.read_revision_from_string(text).parent_ids
+ return text, tuple((parent,) for parent in parents)
+
+ def get_parent_map(self, keys):
+ result = {}
+ for key in keys:
+ parents = self._load_text_parents(key)[1]
+ if parents is None:
+ continue
+ result[key] = parents
+ return result
+
+ def get_known_graph_ancestry(self, keys):
+ """Get a KnownGraph instance with the ancestry of keys."""
+ keys = self.keys()
+ parent_map = self.get_parent_map(keys)
+ kg = _mod_graph.KnownGraph(parent_map)
+ return kg
+
+ def get_record_stream(self, keys, sort_order, include_delta_closure):
+ for key in keys:
+ text, parents = self._load_text_parents(key)
+ if text is None:
+ yield AbsentContentFactory(key)
+ else:
+ yield FulltextContentFactory(key, parents, None, text)
+
+ def keys(self):
+ if not self._is_locked():
+ raise errors.ObjectNotLocked(self)
+ relpaths = set()
+ for quoted_relpath in self._transport.iter_files_recursive():
+ relpath = urlutils.unquote(quoted_relpath)
+ path, ext = os.path.splitext(relpath)
+ if ext == '.gz':
+ relpath = path
+ if not relpath.endswith('.sig'):
+ relpaths.add(relpath)
+ paths = list(relpaths)
+ return set([self._mapper.unmap(path) for path in paths])
+
+
+class SignatureTextStore(TextVersionedFiles):
+ """Legacy thunk for format 4-7 repositories."""
+
+ def __init__(self, transport, compressed, mapper, is_locked, can_write):
+ TextVersionedFiles.__init__(self, transport, compressed, mapper,
+ is_locked, can_write)
+ self._ext = '.sig' + self._ext
+
+ def get_parent_map(self, keys):
+ result = {}
+ for key in keys:
+ text = self._load_text(key)
+ if text is None:
+ continue
+ result[key] = None
+ return result
+
+ def get_record_stream(self, keys, sort_order, include_delta_closure):
+ for key in keys:
+ text = self._load_text(key)
+ if text is None:
+ yield AbsentContentFactory(key)
+ else:
+ yield FulltextContentFactory(key, None, None, text)
+
+ def keys(self):
+ if not self._is_locked():
+ raise errors.ObjectNotLocked(self)
+ relpaths = set()
+ for quoted_relpath in self._transport.iter_files_recursive():
+ relpath = urlutils.unquote(quoted_relpath)
+ path, ext = os.path.splitext(relpath)
+ if ext == '.gz':
+ relpath = path
+ if not relpath.endswith('.sig'):
+ continue
+ relpaths.add(relpath[:-4])
+ paths = list(relpaths)
+ return set([self._mapper.unmap(path) for path in paths])
+
+
+class InterWeaveRepo(InterSameDataRepository):
+ """Optimised code paths between Weave based repositories.
+ """
+
+ @classmethod
+ def _get_repo_format_to_test(self):
+ return RepositoryFormat7()
+
+ @staticmethod
+ def is_compatible(source, target):
+ """Be compatible with known Weave formats.
+
+ We don't test for the stores being of specific types because that
+ could lead to confusing results, and there is no need to be
+ overly general.
+ """
+ try:
+ return (isinstance(source._format, (RepositoryFormat5,
+ RepositoryFormat6,
+ RepositoryFormat7)) and
+ isinstance(target._format, (RepositoryFormat5,
+ RepositoryFormat6,
+ RepositoryFormat7)))
+ except AttributeError:
+ return False
+
+ @needs_write_lock
+ def copy_content(self, revision_id=None):
+ """See InterRepository.copy_content()."""
+ # weave specific optimised path:
+ try:
+ self.target.set_make_working_trees(self.source.make_working_trees())
+ except (errors.RepositoryUpgradeRequired, NotImplemented):
+ pass
+ # FIXME do not peek!
+ if self.source._transport.listable():
+ pb = ui.ui_factory.nested_progress_bar()
+ try:
+ self.target.texts.insert_record_stream(
+ self.source.texts.get_record_stream(
+ self.source.texts.keys(), 'topological', False))
+ pb.update('Copying inventory', 0, 1)
+ self.target.inventories.insert_record_stream(
+ self.source.inventories.get_record_stream(
+ self.source.inventories.keys(), 'topological', False))
+ self.target.signatures.insert_record_stream(
+ self.source.signatures.get_record_stream(
+ self.source.signatures.keys(),
+ 'unordered', True))
+ self.target.revisions.insert_record_stream(
+ self.source.revisions.get_record_stream(
+ self.source.revisions.keys(),
+ 'topological', True))
+ finally:
+ pb.finished()
+ else:
+ self.target.fetch(self.source, revision_id=revision_id)
+
+ @needs_read_lock
+ def search_missing_revision_ids(self,
+ revision_id=symbol_versioning.DEPRECATED_PARAMETER,
+ find_ghosts=True, revision_ids=None, if_present_ids=None,
+ limit=None):
+ """See InterRepository.search_missing_revision_ids()."""
+ # we want all revisions to satisfy revision_id in source.
+ # but we don't want to stat every file here and there.
+ # we want then, all revisions other needs to satisfy revision_id
+ # checked, but not those that we have locally.
+ # so the first thing is to get a subset of the revisions to
+ # satisfy revision_id in source, and then eliminate those that
+ # we do already have.
+ # this is slow on high latency connection to self, but as this
+ # disk format scales terribly for push anyway due to rewriting
+ # inventory.weave, this is considered acceptable.
+ # - RBC 20060209
+ if symbol_versioning.deprecated_passed(revision_id):
+ symbol_versioning.warn(
+ 'search_missing_revision_ids(revision_id=...) was '
+ 'deprecated in 2.4. Use revision_ids=[...] instead.',
+ DeprecationWarning, stacklevel=2)
+ if revision_ids is not None:
+ raise AssertionError(
+ 'revision_ids is mutually exclusive with revision_id')
+ if revision_id is not None:
+ revision_ids = [revision_id]
+ del revision_id
+ source_ids_set = self._present_source_revisions_for(
+ revision_ids, if_present_ids)
+ # source_ids is the worst possible case we may need to pull.
+ # now we want to filter source_ids against what we actually
+ # have in target, but don't try to check for existence where we know
+ # we do not have a revision as that would be pointless.
+ target_ids = set(self.target._all_possible_ids())
+ possibly_present_revisions = target_ids.intersection(source_ids_set)
+ actually_present_revisions = set(
+ self.target._eliminate_revisions_not_present(possibly_present_revisions))
+ required_revisions = source_ids_set.difference(actually_present_revisions)
+ if revision_ids is not None:
+ # we used get_ancestry to determine source_ids then we are assured all
+ # revisions referenced are present as they are installed in topological order.
+ # and the tip revision was validated by get_ancestry.
+ result_set = required_revisions
+ else:
+ # if we just grabbed the possibly available ids, then
+ # we only have an estimate of whats available and need to validate
+ # that against the revision records.
+ result_set = set(
+ self.source._eliminate_revisions_not_present(required_revisions))
+ if limit is not None:
+ topo_ordered = self.get_graph().iter_topo_order(result_set)
+ result_set = set(itertools.islice(topo_ordered, limit))
+ return self.source.revision_ids_to_search_result(result_set)
+
+
+InterRepository.register_optimiser(InterWeaveRepo)
+
+
+def get_extra_interrepo_test_combinations():
+ from bzrlib.repofmt import knitrepo
+ return [(InterRepository, RepositoryFormat5(),
+ knitrepo.RepositoryFormatKnit3())]
diff --git a/bzrlib/plugins/weave_fmt/test_bzrdir.py b/bzrlib/plugins/weave_fmt/test_bzrdir.py
new file mode 100644
index 0000000..2391aa4
--- /dev/null
+++ b/bzrlib/plugins/weave_fmt/test_bzrdir.py
@@ -0,0 +1,584 @@
+# Copyright (C) 2006-2011 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""Tests for the weave-era BzrDir formats.
+
+For interface contract tests, see tests/per_bzr_dir.
+"""
+
+from __future__ import absolute_import
+
+import os
+import sys
+
+from bzrlib import (
+ branch,
+ bzrdir,
+ controldir,
+ errors,
+ repository,
+ upgrade,
+ urlutils,
+ workingtree,
+ )
+from bzrlib.osutils import (
+ getcwd,
+ )
+from bzrlib.tests.test_bundle import V4BundleTester
+from bzrlib.tests.test_sftp_transport import TestCaseWithSFTPServer
+from bzrlib.tests import (
+ TestCaseWithTransport,
+ )
+
+from bzrlib.plugins.weave_fmt.branch import (
+ BzrBranchFormat4,
+ )
+from bzrlib.plugins.weave_fmt.bzrdir import (
+ BzrDirFormat5,
+ BzrDirFormat6,
+ )
+
+
+class TestFormat5(TestCaseWithTransport):
+ """Tests specific to the version 5 bzrdir format."""
+
+ def test_same_lockfiles_between_tree_repo_branch(self):
+ # this checks that only a single lockfiles instance is created
+ # for format 5 objects
+ dir = BzrDirFormat5().initialize(self.get_url())
+ def check_dir_components_use_same_lock(dir):
+ ctrl_1 = dir.open_repository().control_files
+ ctrl_2 = dir.open_branch().control_files
+ ctrl_3 = dir.open_workingtree()._control_files
+ self.assertTrue(ctrl_1 is ctrl_2)
+ self.assertTrue(ctrl_2 is ctrl_3)
+ check_dir_components_use_same_lock(dir)
+ # and if we open it normally.
+ dir = controldir.ControlDir.open(self.get_url())
+ check_dir_components_use_same_lock(dir)
+
+ def test_can_convert(self):
+ # format 5 dirs are convertable
+ dir = BzrDirFormat5().initialize(self.get_url())
+ self.assertTrue(dir.can_convert_format())
+
+ def test_needs_conversion(self):
+ # format 5 dirs need a conversion if they are not the default,
+ # and they aren't
+ dir = BzrDirFormat5().initialize(self.get_url())
+ # don't need to convert it to itself
+ self.assertFalse(dir.needs_format_conversion(BzrDirFormat5()))
+ # do need to convert it to the current default
+ self.assertTrue(dir.needs_format_conversion(
+ bzrdir.BzrDirFormat.get_default_format()))
+
+
+class TestFormat6(TestCaseWithTransport):
+ """Tests specific to the version 6 bzrdir format."""
+
+ def test_same_lockfiles_between_tree_repo_branch(self):
+ # this checks that only a single lockfiles instance is created
+ # for format 6 objects
+ dir = BzrDirFormat6().initialize(self.get_url())
+ def check_dir_components_use_same_lock(dir):
+ ctrl_1 = dir.open_repository().control_files
+ ctrl_2 = dir.open_branch().control_files
+ ctrl_3 = dir.open_workingtree()._control_files
+ self.assertTrue(ctrl_1 is ctrl_2)
+ self.assertTrue(ctrl_2 is ctrl_3)
+ check_dir_components_use_same_lock(dir)
+ # and if we open it normally.
+ dir = controldir.ControlDir.open(self.get_url())
+ check_dir_components_use_same_lock(dir)
+
+ def test_can_convert(self):
+ # format 6 dirs are convertable
+ dir = BzrDirFormat6().initialize(self.get_url())
+ self.assertTrue(dir.can_convert_format())
+
+ def test_needs_conversion(self):
+ # format 6 dirs need an conversion if they are not the default.
+ dir = BzrDirFormat6().initialize(self.get_url())
+ self.assertTrue(dir.needs_format_conversion(
+ bzrdir.BzrDirFormat.get_default_format()))
+
+
+class TestBreakLockOldBranch(TestCaseWithTransport):
+
+ def test_break_lock_format_5_bzrdir(self):
+ # break lock on a format 5 bzrdir should just return
+ self.make_branch_and_tree('foo', format=BzrDirFormat5())
+ out, err = self.run_bzr('break-lock foo')
+ self.assertEqual('', out)
+ self.assertEqual('', err)
+
+
+_upgrade1_template = \
+ [
+ ('foo', 'new contents\n'),
+ ('.bzr/',),
+ ('.bzr/README',
+ 'This is a Bazaar control directory.\n'
+ 'Do not change any files in this directory.\n'
+ 'See http://bazaar.canonical.com/ for more information about Bazaar.\n'),
+ ('.bzr/branch-format', 'Bazaar-NG branch, format 0.0.4\n'),
+ ('.bzr/revision-history',
+ 'mbp@sourcefrog.net-20051004035611-176b16534b086b3c\n'
+ 'mbp@sourcefrog.net-20051004035756-235f2b7dcdddd8dd\n'),
+ ('.bzr/merged-patches', ''),
+ ('.bzr/pending-merged-patches', ''),
+ ('.bzr/branch-name', ''),
+ ('.bzr/branch-lock', ''),
+ ('.bzr/pending-merges', ''),
+ ('.bzr/inventory',
+ '<inventory>\n'
+ '<entry file_id="foo-20051004035605-91e788d1875603ae" kind="file" name="foo" />\n'
+ '</inventory>\n'),
+ ('.bzr/stat-cache',
+ '### bzr hashcache v5\n'
+ 'foo// be9f309239729f69a6309e970ef24941d31e042c 13 1128398176 1128398176 303464 770\n'),
+ ('.bzr/text-store/',),
+ ('.bzr/text-store/foo-20051004035611-1591048e9dc7c2d4.gz',
+ '\x1f\x8b\x08\x00[\xfdAC\x02\xff\xcb\xcc\xcb,\xc9L\xccQH\xce\xcf+I\xcd+)\xe6\x02\x00\xdd\xcc\xf90\x11\x00\x00\x00'),
+ ('.bzr/text-store/foo-20051004035756-4081373d897c3453.gz',
+ '\x1f\x8b\x08\x00\xc4\xfdAC\x02\xff\xcbK-WH\xce\xcf+I\xcd+)\xe6\x02\x00g\xc3\xdf\xc9\r\x00\x00\x00'),
+ ('.bzr/inventory-store/',),
+ ('.bzr/inventory-store/mbp@sourcefrog.net-20051004035611-176b16534b086b3c.gz',
+ '\x1f\x8b\x08\x00[\xfdAC\x02\xffm\x8f\xcd\n\xc20\x10\x84\xef>E\xc8\xbdt7?M\x02\xad\xaf"\xa1\x99`P[\xa8E\xacOo\x14\x05\x0f\xdef\xe1\xfbv\x98\xbeL7L\xeb\xbcl\xfb]_\xc3\xb2\x89\\\xce8\x944\xc8<\xcf\x8d"\xb2LdH\xdb\x8el\x13\x18\xce\xfb\xc4\xde\xd5SGHq*\xd3\x0b\xad\x8e\x14S\xbc\xe0\xadI\xb1\xe2\xbe\xfe}\xc2\xdc\xb0\rL\xc6#\xa4\xd1\x8d*\x99\x0f}=F\x1e$8G\x9d\xa0\x02\xa1rP9\x01c`FV\xda1qg\x98"\x02}\xa5\xf2\xa8\x95\xec\xa4h\xeb\x80\xf6g\xcd\x13\xb3\x01\xcc\x98\xda\x00\x00\x00'),
+ ('.bzr/inventory-store/mbp@sourcefrog.net-20051004035756-235f2b7dcdddd8dd.gz',
+ '\x1f\x8b\x08\x00\xc4\xfdAC\x02\xffm\x8f\xc1\n\xc20\x10D\xef~E\xc8\xbd\xb8\x9bM\x9a,\xb4\xfe\x8a\xc4f\x83Am\xa1\x16\xb1~\xbdQ\x14<x\x9b\x81y3LW\xc6\x9b\x8c\xcb4\xaf\xbbMW\xc5\xbc\xaa\\\xce\xb2/\xa9\xd7y\x9a\x1a\x03\xe0\x10\xc0\x02\xb9\x16\\\xc3(>\x84\x84\xc1WKQ\xb4:\x95\xf1\x15\xad\x8cVc\xbc\xc8\x1b\xd3j\x91\xfb\xf2\xaf\xa4r\x8d\x85\x80\xe4)\x05\xf6\x03YG\x9f\xf4\xf5\x18\xb1\xd7\x07\xe1L\xc0\x86\xd8\x1b\xce-\xc7\xb6:a\x0f\x92\x8de\x8b\x89P\xc0\x9a\xe1\x0b\x95G\x9d\xc4\xda\xb1\xad\x07\xb6?o\x9e\xb5\xff\xf0\xf9\xda\x00\x00\x00'),
+ ('.bzr/revision-store/',),
+ ('.bzr/revision-store/mbp@sourcefrog.net-20051004035611-176b16534b086b3c.gz',
+ '\x1f\x8b\x08\x00[\xfdAC\x02\xff\x9d\x8eKj\xc30\x14E\xe7^\x85\xd0 \xb3$\xefI\xd1\x8f\xd8\xa6\x1b(t\x07E?\xbb\x82H\n\xb2\x1ahW\xdfB1\x14:\xeb\xf4r\xee\xbdgl\xf1\x91\xb6T\x0b\xf15\xe7\xd4{l\x13}\xb6\xad\xa7B^j\xbd\x91\xc3\xad_\xb3\xbb?m\xf5\xbd\xf9\xb8\xb4\xba\x9eJ\xec\x87\xb5_)I\xe5\x11K\xaf\xed\xe35\x85\x89\xfe\xa5\x8e\x0c@ \xc0\x05\xb8\x90\x88GT\xd2\xa1\x14\xfc\xe2@K\xc7\xfd\xef\x85\xed\xcd\xe2D\x95\x8d\x1a\xa47<\x02c2\xb0 \xbc\xd0\x8ay\xa3\xbcp\x8a\x83\x12A3\xb7XJv\xef\x7f_\xf7\x94\xe3\xd6m\xbeO\x14\x91in4*<\x812\x88\xc60\xfc\x01>k\x89\x13\xe5\x12\x00\xe8<\x8c\xdf\x8d\xcd\xaeq\xb6!\x90\xa5\xd6\xf1\xbc\x07\xc3x\xde\x85\xe6\xe1\x0b\xc8\x8a\x98\x03T\x01\x00\x00'),
+ ('.bzr/revision-store/mbp@sourcefrog.net-20051004035756-235f2b7dcdddd8dd.gz',
+ '\x1f\x8b\x08\x00\xc4\xfdAC\x02\xff\x9d\x90Kj\x031\x0c\x86\xf79\xc5\xe0Ev\xe9\xc8o\x9b\xcc\x84^\xa0\xd0\x1b\x14\xbf&5d\xec`\xbb\x81\xf6\xf45\x84\xa4\x81\xaeZ\xa1\x85\x84^\xdf\xaf\xa9\x84K\xac1\xa7\xc1\xe5u\x8d\xad\x852\xa3\x17SZL\xc3k\xce\xa7a{j\xfb\xd5\x9e\x9fk\xfe(.,%\x1f\x9fRh\xdbc\xdb\xa3!\xa6KH-\x97\xcf\xb7\xe8g\xf4\xbbkG\x008\x06`@\xb9\xe4bG(_\x88\x95\xde\xf9n\xca\xfb\xc7\r\xf5\xdd\xe0\x19\xa9\x85)\x81\xf5"\xbd\x04j\xb8\x02b\xa8W\\\x0b\xc9\x14\xf4\xbc\xbb\xd7\xd6H4\xdc\xb8\xff}\xba\xc55\xd4f\xd6\xf3\x8c0&\x8ajE\xa4x\xe2@\xa5\xa6\x9a\xf3k\xc3WNaFT\x00\x00:l\xa6>Q\xcd1\x1cjp9\xf9;\xc34\xde\n\x9b\xe9lJWT{t\',a\xf9\x0b\xae\xc0x\x87\xa5\xb0Xp\xca,(a\xa9{\xd0{}\xd4\x12\x04(\xc5\xbb$\xc5$V\xceaI\x19\x01\xa2\x1dh\xed\x82d\x8c.\xccr@\xc3\xd8Q\xc6\x1f\xaa\xf1\xb6\xe8\xb0\xf9\x06QR\r\xf9\xfc\x01\x00\x00')]
+
+
+_ghost_template = [
+ ( './foo',
+ 'hello\n'
+ ),
+ ( './.bzr/', ),
+ ( './.bzr/README',
+ 'This is a Bazaar control directory.\n'
+ 'Do not change any files in this directory.\n'
+ 'See http://bazaar.canonical.com/ for more information about Bazaar.\n'
+ ),
+ ( './.bzr/branch-format',
+ 'Bazaar-NG branch, format 0.0.4\n'
+ ),
+ ( './.bzr/branch-lock',
+ ''
+ ),
+ ( './.bzr/branch-name',
+ ''
+ ),
+ ( './.bzr/inventory',
+ '<inventory>\n'
+ '<entry file_id="foo-20051004104918-0379cb7c76354cde" kind="file" name="foo" />\n'
+ '</inventory>\n'
+ ),
+ ( './.bzr/merged-patches',
+ ''
+ ),
+ ( './.bzr/pending-merged-patches',
+ ''
+ ),
+ ( './.bzr/pending-merges',
+ ''
+ ),
+ ( './.bzr/revision-history',
+ 'mbp@sourcefrog.net-20051004104921-a98be2278dd30b7b\n'
+ 'mbp@sourcefrog.net-20051004104937-c9b7a7bfcc0bb22d\n'
+ ),
+ ( './.bzr/stat-cache',
+ '### bzr hashcache v5\n'
+ 'foo// f572d396fae9206628714fb2ce00f72e94f2258f 6 1128422956 1128422956 306900 770\n'
+ ),
+ ( './.bzr/text-store/', ),
+ ( './.bzr/text-store/foo-20051004104921-8de8118a71be45ba.gz',
+ '\x1f\x8b\x08\x081^BC\x00\x03foo-20051004104921-8de8118a71be45ba\x00\xcbH\xcd\xc9\xc9\xe7\x02\x00 0:6\x06\x00\x00\x00'
+ ),
+ ( './.bzr/inventory-store/', ),
+ ( './.bzr/inventory-store/mbp@sourcefrog.net-20051004104921-a98be2278dd30b7b.gz',
+ '\x1f\x8b\x08\x081^BC\x00\x03mbp@sourcefrog.net-20051004104921-a98be2278dd30b7b\x00m\x8f\xcb\n'
+ '\xc20\x10E\xf7~E\xc8\xbe83\xcd\x13\xaa\xbf"yL0\xa8-\xd4"\xd6\xaf7\x8a\x82\x0bw\xb38\xe7\xde;C\x1do<.\xd3\xbc\xee7C;\xe6U\x94z\xe6C\xcd;Y\xa6\xa9#\x00\x8d\x00\n'
+ 'Ayt\x1d\xf4\xd6\xa7h\x935\xbdV)\xb3\x14\xa7:\xbe\xd0\xe6H1\x86\x0b\xbf5)\x16\xbe/\x7fC\x08;\x97\xd9!\xba`1\xb2\xd21|\xe8\xeb1`\xe3\xb5\xa5\xdc{S\x02{\x02c\xc8YT%Rb\x80b\x89\xbd*D\xda\x95\xafT\x1f\xad\xd2H\xb1m\xfb\xb7?\xcf<\x01W}\xb5\x8b\xd9\x00\x00\x00'
+ ),
+ ( './.bzr/inventory-store/mbp@sourcefrog.net-20051004104937-c9b7a7bfcc0bb22d.gz',
+ '\x1f\x8b\x08\x08A^BC\x00\x03mbp@sourcefrog.net-20051004104937-c9b7a7bfcc0bb22d\x00m\x8f\xcb\n'
+ '\xc20\x10E\xf7~E\xc8\xbe83\xcd\x13\xaa\xbf"yL0\xa8-\xd4"\xd6\xaf7\x8a\x82\x0bw\xb38\xe7\xde;C\x1do<.\xd3\xbc\xee7C;\xe6U\x94z\xe6C\xcd;Y\xa6\xa9#\x00\x8d\x00\n'
+ 'Ayt\x1d\xf4\xd6\xa7h\x935\xbdV)\xb3\x14\xa7:\xbe\xd0\xe6H1\x86\x0b\xbf5)\x16\xbe/\x7fC\x08;\x97\xd9!\xba`1\xb2\xd21|\xe8\xeb1`\xe3\xb5\xa5\xdc{S\x02{\x02c\xc8YT%Rb\x80b\x89\xbd*D\xda\x95\xafT\x1f\xad\xd2H\xb1m\xfb\xb7?\xcf<\x01W}\xb5\x8b\xd9\x00\x00\x00'
+ ),
+ ( './.bzr/revision-store/', ),
+ ( './.bzr/revision-store/mbp@sourcefrog.net-20051004104921-a98be2278dd30b7b.gz',
+ '\x1f\x8b\x08\x081^BC\x00\x03mbp@sourcefrog.net-20051004104921-a98be2278dd30b7b\x00\x9d\x8eMj\xc30\x14\x84\xf7>\x85\xd0"\xbb$\xef\xc9\xb6,\x11\xdb\xf4\x02\x85\xde\xa0\xe8\xe7\xd9\x11\xc4R\x90\xd4@{\xfa\x06\x8a\xa1\xd0]\x97\x03\xdf\xcc|c\xa6G(!E\xe6\xd2\xb6\x85Z)O\xfc\xd5\xe4\x1a"{K\xe9\xc6\x0e\xb7z\xd9\xec\xfd\xa5\xa4\x8f\xech\xc9i=E\xaa\x87\xb5^8\x0b\xf1A\xb1\xa6\xfc\xf9\x1e\xfc\xc4\xffRG\x01\xd0#@\x87\xd0i\x81G\xa3\x95%!\x06\xe5}\x0bv\xb0\xbf\x17\xca\xd5\xe0\xc4-\xa0\xb1\x8b\xb6`\xc0I\xa4\xc5\xf4\x9el\xef\x95v [\x94\xcf\x8e\xd5\xcay\xe4l\xf7\xfe\xf7u\r'
+ '\x1b\x95j\xb6\xfb\xc4\x11\x85\xea\x84\xd0\x12O\x03t\x83D\xad\xc4\x0f\xf0\x95"M\xbc\x95\x00\xc0\xe7f|6\x8aYi^B.u<\xef\xb1\x19\xcf\xbb\xce\xdc|\x038=\xc7\xe6R\x01\x00\x00'
+ ),
+ ( './.bzr/revision-store/mbp@sourcefrog.net-20051004104937-c9b7a7bfcc0bb22d.gz',
+ '\x1f\x8b\x08\x08A^BC\x00\x03mbp@sourcefrog.net-20051004104937-c9b7a7bfcc0bb22d\x00\x9d\x90\xc1j\xc30\x0c\x86\xef}\n'
+ "\xe3Coie'\xb1c\x9a\x94\xbe\xc0`o0,[N\x03M\\\x1c\xafe{\xfae\x94n\x85\xc1`;Y\x88O\xd2\xff\xb9Mt\x19\xe6!N\xcc\xc5q\x1cr\xa6\xd4\xf1'\x9b\xf20\xb1\xe7\x18Ol}\xca\xbb\x11\xcf\x879\xbe&G!\xc5~3Q^\xf7y\xc7\xd90]h\xca1\xbd\xbd\x0c\xbe\xe3?\xa9B\x02\xd4\x02\xa0\x12P\x99R\x17\xce\xa0\xb6\x1a\x83s\x80(\xa5\x7f\xdc0\x1f\xad\xe88\x82\xb0\x18\x0c\x82\x05\xa7\x04\x05[{\xc2\xda7\xc6\x81*\x85B\x8dh\x1a\xe7\x05g\xf7\xdc\xff>\x9d\x87\x91\xe6l\xc7s\xc7\x85\x90M%\xa5\xd1z#\x85\xa8\x9b\x1a\xaa\xfa\x06\xbc\xc7\x89:^*\x00\xe0\xfbU\xbbL\xcc\xb6\xa7\xfdH\xa9'\x16\x03\xeb\x8fq\xce\xed\xf6\xde_\xb5g\x9b\x16\xa1y\xa9\xbe\x02&\n"
+ '\x7fJ+EaM\x83$\xa5n\xbc/a\x91~\xd0\xbd\xfd\x135\n'
+ '\xd0\x9a`\x0c*W\x1aR\xc1\x94du\x08(\t\xb0\x91\xdeZ\xa3\x9cU\x9cm\x7f\x8dr\x1d\x10Ot\xb8\xc6\xcf\xa7\x907|\xfb-\xb1\xbd\xd3\xfb\xd5\x07\xeeD\xee\x08*\x02\x00\x00'
+ ),
+]
+
+_upgrade_dir_template = [
+ ( './.bzr/', ),
+ ( './.bzr/README',
+ 'This is a Bazaar control directory.\n'
+ 'Do not change any files in this directory.\n'
+ 'See http://bazaar.canonical.com/ for more information about Bazaar.\n'
+ ),
+ ( './.bzr/branch-format',
+ 'Bazaar-NG branch, format 0.0.4\n'
+ ),
+ ( './.bzr/branch-lock',
+ ''
+ ),
+ ( './.bzr/branch-name',
+ ''
+ ),
+ ( './.bzr/inventory',
+ '<inventory>\n'
+ '<entry file_id="dir-20051005095101-da1441ea3fa6917a" kind="directory" name="dir" />\n'
+ '</inventory>\n'
+ ),
+ ( './.bzr/merged-patches',
+ ''
+ ),
+ ( './.bzr/pending-merged-patches',
+ ''
+ ),
+ ( './.bzr/pending-merges',
+ ''
+ ),
+ ( './.bzr/revision-history',
+ 'robertc@robertcollins.net-20051005095108-6065fbd8e7d8617e\n'
+ ),
+ ( './.bzr/stat-cache',
+ '### bzr hashcache v5\n'
+ ),
+ ( './.bzr/text-store/', ),
+ ( './.bzr/inventory-store/', ),
+ ( './.bzr/inventory-store/robertc@robertcollins.net-20051005095108-6065fbd8e7d8617e.gz',
+ '\x1f\x8b\x08\x00\x0c\xa2CC\x02\xff\xb3\xc9\xcc+K\xcd+\xc9/\xaa\xb4\xe3\xb2\x012\x8a*\x15\xd22sR\xe33Sl\x95R2\x8bt\x8d\x0c\x0cL\r'
+ "\x81\xd8\xc0\x12H\x19\xea\xa6$\x1a\x9a\x98\x18\xa6&\x1a\xa7%\x9aY\x1a\x9a'*)dg\xe6A\x94\xa6&\x83LQR\xc8K\xccM\x05\x0b()\xe8\x03\xcd\xd4G\xb2\x00\x00\xc2<\x94\xb1m\x00\x00\x00"
+ ),
+ ( './.bzr/revision-store/', ),
+ ( './.bzr/revision-store/robertc@robertcollins.net-20051005095108-6065fbd8e7d8617e.gz',
+ '\x1f\x8b\x08\x00\x0c\xa2CC\x02\xff\xa5OKj\xc30\x14\xdc\xfb\x14B\x8b\xec\x92<I\xd6\xc7\xc42\x85\xde\xa0\x17(\xb6\xf4\x9c\n'
+ 'l\xa9H"\x90\x9c\xbe\xa6\xa9\xa1\x9b\xae\xbax\x0c\xcc\xe71\xd3g\xbc\x85\x12R$.\xadk\xa8\x15\xb3\xa5oi\xc2\\\xc9kZ\x96\x10\x0b9,\xf5\x92\xbf)\xf7\xf2\x83O\xe5\x14\xb1\x1e\xae\xf5BI\x887\x8c5\xe5\xfb{\xf0\x96\xfei>r\x00\xc9\xb6\x83n\x03sT\xa0\xe4<y\x83\xda\x1b\xc54\xfe~T>Ff\xe9\xcc:\xdd\x8e\xa6E\xc7@\xa2\x82I\xaaNL\xbas\\313)\x00\xb9\xe6\xe0(\xd9\x87\xfc\xb7A\r'
+ "+\x96:\xae\x9f\x962\xc6\x8d\x04i\x949\x01\x97R\xb7\x1d\x17O\xc3#E\xb4T(\x00\xa0C\xd3o\x892^q\x18\xbd'>\xe4\xfe\xbc\x13M\x7f\xde{\r"
+ '\xcd\x17\x85\xea\xba\x03l\x01\x00\x00'
+ ),
+ ( './dir/', ),
+]
+
+
+class TestUpgrade(TestCaseWithTransport):
+
+ def test_upgrade_v6_to_meta_no_workingtree(self):
+ # Some format6 branches do not have checkout files. Upgrading
+ # such a branch to metadir must not setup a working tree.
+ self.build_tree_contents(_upgrade1_template)
+ upgrade.upgrade('.', BzrDirFormat6())
+ t = self.get_transport('.')
+ t.delete_multi(['.bzr/pending-merges', '.bzr/inventory'])
+ self.assertFalse(t.has('.bzr/stat-cache'))
+ t.delete_tree('backup.bzr.~1~')
+ # At this point, we have a format6 branch without checkout files.
+ upgrade.upgrade('.', bzrdir.BzrDirMetaFormat1())
+ # The upgrade should not have set up a working tree.
+ control = controldir.ControlDir.open('.')
+ self.assertFalse(control.has_workingtree())
+ # We have covered the scope of this test, we may as well check that
+ # upgrade has not eaten our data, even if it's a bit redundant with
+ # other tests.
+ self.assertIsInstance(control._format, bzrdir.BzrDirMetaFormat1)
+ b = control.open_branch()
+ self.addCleanup(b.lock_read().unlock)
+ self.assertEquals(b._revision_history(),
+ ['mbp@sourcefrog.net-20051004035611-176b16534b086b3c',
+ 'mbp@sourcefrog.net-20051004035756-235f2b7dcdddd8dd'])
+
+ def test_upgrade_simple(self):
+ """Upgrade simple v0.0.4 format to latest format"""
+ eq = self.assertEquals
+ self.build_tree_contents(_upgrade1_template)
+ upgrade.upgrade(u'.')
+ control = controldir.ControlDir.open('.')
+ b = control.open_branch()
+ # tsk, peeking under the covers.
+ self.assertIsInstance(
+ control._format,
+ bzrdir.BzrDirFormat.get_default_format().__class__)
+ self.addCleanup(b.lock_read().unlock)
+ rh = b._revision_history()
+ eq(rh,
+ ['mbp@sourcefrog.net-20051004035611-176b16534b086b3c',
+ 'mbp@sourcefrog.net-20051004035756-235f2b7dcdddd8dd'])
+ rt = b.repository.revision_tree(rh[0])
+ foo_id = 'foo-20051004035605-91e788d1875603ae'
+ rt.lock_read()
+ try:
+ eq(rt.get_file_text(foo_id), 'initial contents\n')
+ finally:
+ rt.unlock()
+ rt = b.repository.revision_tree(rh[1])
+ rt.lock_read()
+ try:
+ eq(rt.get_file_text(foo_id), 'new contents\n')
+ finally:
+ rt.unlock()
+ # check a backup was made:
+ backup_dir = 'backup.bzr.~1~'
+ t = self.get_transport('.')
+ t.stat(backup_dir)
+ t.stat(backup_dir + '/README')
+ t.stat(backup_dir + '/branch-format')
+ t.stat(backup_dir + '/revision-history')
+ t.stat(backup_dir + '/merged-patches')
+ t.stat(backup_dir + '/pending-merged-patches')
+ t.stat(backup_dir + '/pending-merges')
+ t.stat(backup_dir + '/branch-name')
+ t.stat(backup_dir + '/branch-lock')
+ t.stat(backup_dir + '/inventory')
+ t.stat(backup_dir + '/stat-cache')
+ t.stat(backup_dir + '/text-store')
+ t.stat(backup_dir + '/text-store/foo-20051004035611-1591048e9dc7c2d4.gz')
+ t.stat(backup_dir + '/text-store/foo-20051004035756-4081373d897c3453.gz')
+ t.stat(backup_dir + '/inventory-store/')
+ t.stat(backup_dir + '/inventory-store/mbp@sourcefrog.net-20051004035611-176b16534b086b3c.gz')
+ t.stat(backup_dir + '/inventory-store/mbp@sourcefrog.net-20051004035756-235f2b7dcdddd8dd.gz')
+ t.stat(backup_dir + '/revision-store/')
+ t.stat(backup_dir + '/revision-store/mbp@sourcefrog.net-20051004035611-176b16534b086b3c.gz')
+ t.stat(backup_dir + '/revision-store/mbp@sourcefrog.net-20051004035756-235f2b7dcdddd8dd.gz')
+
+ def test_upgrade_with_ghosts(self):
+ """Upgrade v0.0.4 tree containing ghost references.
+
+ That is, some of the parents of revisions mentioned in the branch
+ aren't present in the branch's storage.
+
+ This shouldn't normally happen in branches created entirely in
+ bzr, but can happen in branches imported from baz and arch, or from
+ other systems, where the importer knows about a revision but not
+ its contents."""
+ eq = self.assertEquals
+ self.build_tree_contents(_ghost_template)
+ upgrade.upgrade(u'.')
+ b = branch.Branch.open(u'.')
+ self.addCleanup(b.lock_read().unlock)
+ revision_id = b._revision_history()[1]
+ rev = b.repository.get_revision(revision_id)
+ eq(len(rev.parent_ids), 2)
+ eq(rev.parent_ids[1], 'wibble@wobble-2')
+
+ def test_upgrade_makes_dir_weaves(self):
+ self.build_tree_contents(_upgrade_dir_template)
+ old_repodir = controldir.ControlDir.open_unsupported('.')
+ old_repo_format = old_repodir.open_repository()._format
+ upgrade.upgrade('.')
+ # this is the path to the literal file. As format changes
+ # occur it needs to be updated. FIXME: ask the store for the
+ # path.
+ repo = repository.Repository.open('.')
+ # it should have changed the format
+ self.assertNotEqual(old_repo_format.__class__, repo._format.__class__)
+ # and we should be able to read the names for the file id
+ # 'dir-20051005095101-da1441ea3fa6917a'
+ repo.lock_read()
+ self.addCleanup(repo.unlock)
+ text_keys = repo.texts.keys()
+ dir_keys = [key for key in text_keys if key[0] ==
+ 'dir-20051005095101-da1441ea3fa6917a']
+ self.assertNotEqual([], dir_keys)
+
+ def test_upgrade_to_meta_sets_workingtree_last_revision(self):
+ self.build_tree_contents(_upgrade_dir_template)
+ upgrade.upgrade('.', bzrdir.BzrDirMetaFormat1())
+ tree = workingtree.WorkingTree.open('.')
+ self.addCleanup(tree.lock_read().unlock)
+ self.assertEqual([tree.branch._revision_history()[-1]],
+ tree.get_parent_ids())
+
+
+class SFTPBranchTest(TestCaseWithSFTPServer):
+ """Test some stuff when accessing a bzr Branch over sftp"""
+
+ def test_lock_file(self):
+ # old format branches use a special lock file on sftp.
+ b = self.make_branch('', format=BzrDirFormat6())
+ b = branch.Branch.open(self.get_url())
+ self.assertPathExists('.bzr/')
+ self.assertPathExists('.bzr/branch-format')
+ self.assertPathExists('.bzr/branch-lock')
+
+ self.assertPathDoesNotExist('.bzr/branch-lock.write-lock')
+ b.lock_write()
+ self.assertPathExists('.bzr/branch-lock.write-lock')
+ b.unlock()
+ self.assertPathDoesNotExist('.bzr/branch-lock.write-lock')
+
+
+class TestInfo(TestCaseWithTransport):
+
+ def test_info_locking_oslocks(self):
+ if sys.platform == "win32":
+ self.skip("don't use oslocks on win32 in unix manner")
+ # This test tests old (all-in-one, OS lock using) behaviour which
+ # simply cannot work on windows (and is indeed why we changed our
+ # design. As such, don't try to remove the thisFailsStrictLockCheck
+ # call here.
+ self.thisFailsStrictLockCheck()
+
+ tree = self.make_branch_and_tree('branch',
+ format=BzrDirFormat6())
+
+ # Test all permutations of locking the working tree, branch and repository
+ # XXX: Well not yet, as we can't query oslocks yet. Currently, it's
+ # implemented by raising NotImplementedError and get_physical_lock_status()
+ # always returns false. This makes bzr info hide the lock status. (Olaf)
+ # W B R
+
+ # U U U
+ out, err = self.run_bzr('info -v branch')
+ self.assertEqualDiff(
+"""Standalone tree (format: weave)
+Location:
+ branch root: %s
+
+Format:
+ control: All-in-one format 6
+ working tree: Working tree format 2
+ branch: Branch format 4
+ repository: %s
+
+In the working tree:
+ 0 unchanged
+ 0 modified
+ 0 added
+ 0 removed
+ 0 renamed
+ 0 unknown
+ 0 ignored
+ 0 versioned subdirectories
+
+Branch history:
+ 0 revisions
+
+Repository:
+ 0 revisions
+""" % ('branch', tree.branch.repository._format.get_format_description(),
+ ), out)
+ self.assertEqual('', err)
+ # L L L
+ tree.lock_write()
+ out, err = self.run_bzr('info -v branch')
+ self.assertEqualDiff(
+"""Standalone tree (format: weave)
+Location:
+ branch root: %s
+
+Format:
+ control: All-in-one format 6
+ working tree: Working tree format 2
+ branch: Branch format 4
+ repository: %s
+
+In the working tree:
+ 0 unchanged
+ 0 modified
+ 0 added
+ 0 removed
+ 0 renamed
+ 0 unknown
+ 0 ignored
+ 0 versioned subdirectories
+
+Branch history:
+ 0 revisions
+
+Repository:
+ 0 revisions
+""" % ('branch', tree.branch.repository._format.get_format_description(),
+ ), out)
+ self.assertEqual('', err)
+ tree.unlock()
+
+
+class TestBranchFormat4(TestCaseWithTransport):
+ """Tests specific to branch format 4"""
+
+ def test_no_metadir_support(self):
+ url = self.get_url()
+ bdir = bzrdir.BzrDirMetaFormat1().initialize(url)
+ bdir.create_repository()
+ self.assertRaises(errors.IncompatibleFormat,
+ BzrBranchFormat4().initialize, bdir)
+
+ def test_supports_bzrdir_6(self):
+ url = self.get_url()
+ bdir = BzrDirFormat6().initialize(url)
+ bdir.create_repository()
+ BzrBranchFormat4().initialize(bdir)
+
+
+class TestBoundBranch(TestCaseWithTransport):
+
+ def setUp(self):
+ super(TestBoundBranch, self).setUp()
+ self.build_tree(['master/', 'child/'])
+ self.make_branch_and_tree('master')
+ self.make_branch_and_tree('child',
+ format=controldir.format_registry.make_bzrdir('weave'))
+ os.chdir('child')
+
+ def test_bind_format_6_bzrdir(self):
+ # bind on a format 6 bzrdir should error
+ out,err = self.run_bzr('bind ../master', retcode=3)
+ self.assertEqual('', out)
+ # TODO: jam 20060427 Probably something like this really should
+ # print out the actual path, rather than the URL
+ cwd = urlutils.local_path_to_url(getcwd())
+ self.assertEqual('bzr: ERROR: To use this feature you must '
+ 'upgrade your branch at %s/.\n' % cwd, err)
+
+ def test_unbind_format_6_bzrdir(self):
+ # bind on a format 6 bzrdir should error
+ out,err = self.run_bzr('unbind', retcode=3)
+ self.assertEqual('', out)
+ cwd = urlutils.local_path_to_url(getcwd())
+ self.assertEqual('bzr: ERROR: To use this feature you must '
+ 'upgrade your branch at %s/.\n' % cwd, err)
+
+
+class TestInit(TestCaseWithTransport):
+
+ def test_init_weave(self):
+ # --format=weave should be accepted to allow interoperation with
+ # old releases when desired.
+ out, err = self.run_bzr('init --format=weave')
+ self.assertEqual("""Created a standalone tree (format: weave)\n""",
+ out)
+ self.assertEqual('', err)
+
+
+class V4WeaveBundleTester(V4BundleTester):
+
+ def bzrdir_format(self):
+ return 'metaweave'
diff --git a/bzrlib/plugins/weave_fmt/test_repository.py b/bzrlib/plugins/weave_fmt/test_repository.py
new file mode 100644
index 0000000..7453296
--- /dev/null
+++ b/bzrlib/plugins/weave_fmt/test_repository.py
@@ -0,0 +1,331 @@
+# Copyright (C) 2006-2010 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""Tests for weave repositories.
+
+For interface tests see tests/per_repository/*.py.
+
+"""
+
+from __future__ import absolute_import
+
+from cStringIO import StringIO
+from stat import S_ISDIR
+import sys
+
+from bzrlib.bzrdir import (
+ BzrDirMetaFormat1,
+ )
+from bzrlib.errors import (
+ IllegalPath,
+ NoSuchFile,
+ )
+from bzrlib.repository import (
+ InterRepository,
+ Repository,
+ )
+from bzrlib.serializer import (
+ format_registry as serializer_format_registry,
+ )
+from bzrlib.tests import (
+ TestCase,
+ TestCaseWithTransport,
+ )
+
+from bzrlib.plugins.weave_fmt import xml4
+from bzrlib.plugins.weave_fmt.bzrdir import (
+ BzrDirFormat6,
+ )
+from bzrlib.plugins.weave_fmt.repository import (
+ InterWeaveRepo,
+ RepositoryFormat4,
+ RepositoryFormat5,
+ RepositoryFormat6,
+ RepositoryFormat7,
+ )
+
+
+class TestFormat6(TestCaseWithTransport):
+
+ def test_attribute__fetch_order(self):
+ """Weaves need topological data insertion."""
+ control = BzrDirFormat6().initialize(self.get_url())
+ repo = RepositoryFormat6().initialize(control)
+ self.assertEqual('topological', repo._format._fetch_order)
+
+ def test_attribute__fetch_uses_deltas(self):
+ """Weaves do not reuse deltas."""
+ control = BzrDirFormat6().initialize(self.get_url())
+ repo = RepositoryFormat6().initialize(control)
+ self.assertEqual(False, repo._format._fetch_uses_deltas)
+
+ def test_attribute__fetch_reconcile(self):
+ """Weave repositories need a reconcile after fetch."""
+ control = BzrDirFormat6().initialize(self.get_url())
+ repo = RepositoryFormat6().initialize(control)
+ self.assertEqual(True, repo._format._fetch_reconcile)
+
+ def test_no_ancestry_weave(self):
+ control = BzrDirFormat6().initialize(self.get_url())
+ repo = RepositoryFormat6().initialize(control)
+ # We no longer need to create the ancestry.weave file
+ # since it is *never* used.
+ self.assertRaises(NoSuchFile,
+ control.transport.get,
+ 'ancestry.weave')
+
+ def test_supports_external_lookups(self):
+ control = BzrDirFormat6().initialize(self.get_url())
+ repo = RepositoryFormat6().initialize(control)
+ self.assertFalse(repo._format.supports_external_lookups)
+
+
+
+
+class TestFormat7(TestCaseWithTransport):
+
+ def test_attribute__fetch_order(self):
+ """Weaves need topological data insertion."""
+ control = BzrDirMetaFormat1().initialize(self.get_url())
+ repo = RepositoryFormat7().initialize(control)
+ self.assertEqual('topological', repo._format._fetch_order)
+
+ def test_attribute__fetch_uses_deltas(self):
+ """Weaves do not reuse deltas."""
+ control = BzrDirMetaFormat1().initialize(self.get_url())
+ repo = RepositoryFormat7().initialize(control)
+ self.assertEqual(False, repo._format._fetch_uses_deltas)
+
+ def test_attribute__fetch_reconcile(self):
+ """Weave repositories need a reconcile after fetch."""
+ control = BzrDirMetaFormat1().initialize(self.get_url())
+ repo = RepositoryFormat7().initialize(control)
+ self.assertEqual(True, repo._format._fetch_reconcile)
+
+ def test_disk_layout(self):
+ control = BzrDirMetaFormat1().initialize(self.get_url())
+ repo = RepositoryFormat7().initialize(control)
+ # in case of side effects of locking.
+ repo.lock_write()
+ repo.unlock()
+ # we want:
+ # format 'Bazaar-NG Repository format 7'
+ # lock ''
+ # inventory.weave == empty_weave
+ # empty revision-store directory
+ # empty weaves directory
+ t = control.get_repository_transport(None)
+ self.assertEqualDiff('Bazaar-NG Repository format 7',
+ t.get('format').read())
+ self.assertTrue(S_ISDIR(t.stat('revision-store').st_mode))
+ self.assertTrue(S_ISDIR(t.stat('weaves').st_mode))
+ self.assertEqualDiff('# bzr weave file v5\n'
+ 'w\n'
+ 'W\n',
+ t.get('inventory.weave').read())
+ # Creating a file with id Foo:Bar results in a non-escaped file name on
+ # disk.
+ control.create_branch()
+ tree = control.create_workingtree()
+ tree.add(['foo'], ['Foo:Bar'], ['file'])
+ tree.put_file_bytes_non_atomic('Foo:Bar', 'content\n')
+ try:
+ tree.commit('first post', rev_id='first')
+ except IllegalPath:
+ if sys.platform != 'win32':
+ raise
+ self.knownFailure('Foo:Bar cannot be used as a file-id on windows'
+ ' in repo format 7')
+ return
+ self.assertEqualDiff(
+ '# bzr weave file v5\n'
+ 'i\n'
+ '1 7fe70820e08a1aac0ef224d9c66ab66831cc4ab1\n'
+ 'n first\n'
+ '\n'
+ 'w\n'
+ '{ 0\n'
+ '. content\n'
+ '}\n'
+ 'W\n',
+ t.get('weaves/74/Foo%3ABar.weave').read())
+
+ def test_shared_disk_layout(self):
+ control = BzrDirMetaFormat1().initialize(self.get_url())
+ repo = RepositoryFormat7().initialize(control, shared=True)
+ # we want:
+ # format 'Bazaar-NG Repository format 7'
+ # inventory.weave == empty_weave
+ # empty revision-store directory
+ # empty weaves directory
+ # a 'shared-storage' marker file.
+ # lock is not present when unlocked
+ t = control.get_repository_transport(None)
+ self.assertEqualDiff('Bazaar-NG Repository format 7',
+ t.get('format').read())
+ self.assertEqualDiff('', t.get('shared-storage').read())
+ self.assertTrue(S_ISDIR(t.stat('revision-store').st_mode))
+ self.assertTrue(S_ISDIR(t.stat('weaves').st_mode))
+ self.assertEqualDiff('# bzr weave file v5\n'
+ 'w\n'
+ 'W\n',
+ t.get('inventory.weave').read())
+ self.assertFalse(t.has('branch-lock'))
+
+ def test_creates_lockdir(self):
+ """Make sure it appears to be controlled by a LockDir existence"""
+ control = BzrDirMetaFormat1().initialize(self.get_url())
+ repo = RepositoryFormat7().initialize(control, shared=True)
+ t = control.get_repository_transport(None)
+ # TODO: Should check there is a 'lock' toplevel directory,
+ # regardless of contents
+ self.assertFalse(t.has('lock/held/info'))
+ repo.lock_write()
+ try:
+ self.assertTrue(t.has('lock/held/info'))
+ finally:
+ # unlock so we don't get a warning about failing to do so
+ repo.unlock()
+
+ def test_uses_lockdir(self):
+ """repo format 7 actually locks on lockdir"""
+ base_url = self.get_url()
+ control = BzrDirMetaFormat1().initialize(base_url)
+ repo = RepositoryFormat7().initialize(control, shared=True)
+ t = control.get_repository_transport(None)
+ repo.lock_write()
+ repo.unlock()
+ del repo
+ # make sure the same lock is created by opening it
+ repo = Repository.open(base_url)
+ repo.lock_write()
+ self.assertTrue(t.has('lock/held/info'))
+ repo.unlock()
+ self.assertFalse(t.has('lock/held/info'))
+
+ def test_shared_no_tree_disk_layout(self):
+ control = BzrDirMetaFormat1().initialize(self.get_url())
+ repo = RepositoryFormat7().initialize(control, shared=True)
+ repo.set_make_working_trees(False)
+ # we want:
+ # format 'Bazaar-NG Repository format 7'
+ # lock ''
+ # inventory.weave == empty_weave
+ # empty revision-store directory
+ # empty weaves directory
+ # a 'shared-storage' marker file.
+ t = control.get_repository_transport(None)
+ self.assertEqualDiff('Bazaar-NG Repository format 7',
+ t.get('format').read())
+ ## self.assertEqualDiff('', t.get('lock').read())
+ self.assertEqualDiff('', t.get('shared-storage').read())
+ self.assertEqualDiff('', t.get('no-working-trees').read())
+ repo.set_make_working_trees(True)
+ self.assertFalse(t.has('no-working-trees'))
+ self.assertTrue(S_ISDIR(t.stat('revision-store').st_mode))
+ self.assertTrue(S_ISDIR(t.stat('weaves').st_mode))
+ self.assertEqualDiff('# bzr weave file v5\n'
+ 'w\n'
+ 'W\n',
+ t.get('inventory.weave').read())
+
+ def test_supports_external_lookups(self):
+ control = BzrDirMetaFormat1().initialize(self.get_url())
+ repo = RepositoryFormat7().initialize(control)
+ self.assertFalse(repo._format.supports_external_lookups)
+
+
+class TestInterWeaveRepo(TestCaseWithTransport):
+
+ def test_is_compatible_and_registered(self):
+ # InterWeaveRepo is compatible when either side
+ # is a format 5/6/7 branch
+ from bzrlib.repofmt import knitrepo
+ formats = [RepositoryFormat5(),
+ RepositoryFormat6(),
+ RepositoryFormat7()]
+ incompatible_formats = [RepositoryFormat4(),
+ knitrepo.RepositoryFormatKnit1(),
+ ]
+ repo_a = self.make_repository('a')
+ repo_b = self.make_repository('b')
+ is_compatible = InterWeaveRepo.is_compatible
+ for source in incompatible_formats:
+ # force incompatible left then right
+ repo_a._format = source
+ repo_b._format = formats[0]
+ self.assertFalse(is_compatible(repo_a, repo_b))
+ self.assertFalse(is_compatible(repo_b, repo_a))
+ for source in formats:
+ repo_a._format = source
+ for target in formats:
+ repo_b._format = target
+ self.assertTrue(is_compatible(repo_a, repo_b))
+ self.assertEqual(InterWeaveRepo,
+ InterRepository.get(repo_a, repo_b).__class__)
+
+
+_working_inventory_v4 = """<inventory file_id="TREE_ROOT">
+<entry file_id="bar-20050901064931-73b4b1138abc9cd2" kind="file" name="bar" parent_id="TREE_ROOT" />
+<entry file_id="foo-20050801201819-4139aa4a272f4250" kind="directory" name="foo" parent_id="TREE_ROOT" />
+<entry file_id="bar-20050824000535-6bc48cfad47ed134" kind="file" name="bar" parent_id="foo-20050801201819-4139aa4a272f4250" />
+</inventory>"""
+
+
+_revision_v4 = """<revision committer="Martin Pool &lt;mbp@sourcefrog.net&gt;"
+ inventory_id="mbp@sourcefrog.net-20050905080035-e0439293f8b6b9f9"
+ inventory_sha1="e79c31c1deb64c163cf660fdedd476dd579ffd41"
+ revision_id="mbp@sourcefrog.net-20050905080035-e0439293f8b6b9f9"
+ timestamp="1125907235.212"
+ timezone="36000">
+<message>- start splitting code for xml (de)serialization away from objects
+ preparatory to supporting multiple formats by a single library
+</message>
+<parents>
+<revision_ref revision_id="mbp@sourcefrog.net-20050905063503-43948f59fa127d92" revision_sha1="7bdf4cc8c5bdac739f8cf9b10b78cf4b68f915ff" />
+</parents>
+</revision>
+"""
+
+
+class TestSerializer(TestCase):
+ """Test serializer"""
+
+ def test_registry(self):
+ self.assertIs(xml4.serializer_v4,
+ serializer_format_registry.get('4'))
+
+ def test_canned_inventory(self):
+ """Test unpacked a canned inventory v4 file."""
+ inp = StringIO(_working_inventory_v4)
+ inv = xml4.serializer_v4.read_inventory(inp)
+ self.assertEqual(len(inv), 4)
+ self.assert_(inv.has_id('bar-20050901064931-73b4b1138abc9cd2'))
+
+ def test_unpack_revision(self):
+ """Test unpacking a canned revision v4"""
+ inp = StringIO(_revision_v4)
+ rev = xml4.serializer_v4.read_revision(inp)
+ eq = self.assertEqual
+ eq(rev.committer,
+ "Martin Pool <mbp@sourcefrog.net>")
+ eq(rev.inventory_id,
+ "mbp@sourcefrog.net-20050905080035-e0439293f8b6b9f9")
+ eq(len(rev.parent_ids), 1)
+ eq(rev.parent_ids[0],
+ "mbp@sourcefrog.net-20050905063503-43948f59fa127d92")
+
+
diff --git a/bzrlib/plugins/weave_fmt/test_workingtree.py b/bzrlib/plugins/weave_fmt/test_workingtree.py
new file mode 100644
index 0000000..cea8c6b
--- /dev/null
+++ b/bzrlib/plugins/weave_fmt/test_workingtree.py
@@ -0,0 +1,89 @@
+# Copyright (C) 2005-2011 Canonical Ltd
+# Authors: Robert Collins <robert.collins@canonical.com>
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""Tests for weave-era working tree formats."""
+
+from __future__ import absolute_import
+
+import os
+
+from bzrlib import (
+ conflicts,
+ errors,
+ )
+
+from bzrlib.tests import (
+ TestCaseWithTransport,
+ )
+
+from bzrlib.plugins.weave_fmt.bzrdir import BzrDirFormat6
+
+
+class TestFormat2WorkingTree(TestCaseWithTransport):
+ """Tests that are specific to format 2 trees."""
+
+ def create_format2_tree(self, url):
+ return self.make_branch_and_tree(
+ url, format=BzrDirFormat6())
+
+ def test_conflicts(self):
+ # test backwards compatability
+ tree = self.create_format2_tree('.')
+ self.assertRaises(errors.UnsupportedOperation, tree.set_conflicts,
+ None)
+ file('lala.BASE', 'wb').write('labase')
+ expected = conflicts.ContentsConflict('lala')
+ self.assertEqual(list(tree.conflicts()), [expected])
+ file('lala', 'wb').write('la')
+ tree.add('lala', 'lala-id')
+ expected = conflicts.ContentsConflict('lala', file_id='lala-id')
+ self.assertEqual(list(tree.conflicts()), [expected])
+ file('lala.THIS', 'wb').write('lathis')
+ file('lala.OTHER', 'wb').write('laother')
+ # When "text conflict"s happen, stem, THIS and OTHER are text
+ expected = conflicts.TextConflict('lala', file_id='lala-id')
+ self.assertEqual(list(tree.conflicts()), [expected])
+ os.unlink('lala.OTHER')
+ os.mkdir('lala.OTHER')
+ expected = conflicts.ContentsConflict('lala', file_id='lala-id')
+ self.assertEqual(list(tree.conflicts()), [expected])
+
+ def test_detect_conflicts(self):
+ """Conflicts are detected properly"""
+ tree = self.create_format2_tree('.')
+ self.build_tree_contents([('hello', 'hello world4'),
+ ('hello.THIS', 'hello world2'),
+ ('hello.BASE', 'hello world1'),
+ ('hello.OTHER', 'hello world3'),
+ ('hello.sploo.BASE', 'yellowworld'),
+ ('hello.sploo.OTHER', 'yellowworld2'),
+ ])
+ tree.lock_read()
+ self.assertLength(6, list(tree.list_files()))
+ tree.unlock()
+ tree_conflicts = tree.conflicts()
+ self.assertLength(2, tree_conflicts)
+ self.assertTrue('hello' in tree_conflicts[0].path)
+ self.assertTrue('hello.sploo' in tree_conflicts[1].path)
+ conflicts.restore('hello')
+ conflicts.restore('hello.sploo')
+ self.assertLength(0, tree.conflicts())
+ self.assertFileEqual('hello world2', 'hello')
+ self.assertFalse(os.path.lexists('hello.sploo'))
+ self.assertRaises(errors.NotConflicted, conflicts.restore, 'hello')
+ self.assertRaises(errors.NotConflicted,
+ conflicts.restore, 'hello.sploo')
diff --git a/bzrlib/plugins/weave_fmt/workingtree.py b/bzrlib/plugins/weave_fmt/workingtree.py
new file mode 100644
index 0000000..2ec443c
--- /dev/null
+++ b/bzrlib/plugins/weave_fmt/workingtree.py
@@ -0,0 +1,243 @@
+# Copyright (C) 2005-2010 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""Weave-era working tree objects."""
+
+from __future__ import absolute_import
+
+from cStringIO import StringIO
+
+from bzrlib import (
+ conflicts as _mod_conflicts,
+ errors,
+ inventory,
+ osutils,
+ revision as _mod_revision,
+ transform,
+ xml5,
+ )
+from bzrlib.decorators import needs_read_lock
+from bzrlib.mutabletree import MutableTree
+from bzrlib.transport.local import LocalTransport
+from bzrlib.workingtree import (
+ WorkingTreeFormat,
+ )
+from bzrlib.workingtree_3 import (
+ PreDirStateWorkingTree,
+ )
+
+
+def get_conflicted_stem(path):
+ for suffix in _mod_conflicts.CONFLICT_SUFFIXES:
+ if path.endswith(suffix):
+ return path[:-len(suffix)]
+
+
+class WorkingTreeFormat2(WorkingTreeFormat):
+ """The second working tree format.
+
+ This format modified the hash cache from the format 1 hash cache.
+ """
+
+ upgrade_recommended = True
+
+ requires_normalized_unicode_filenames = True
+
+ case_sensitive_filename = "Branch-FoRMaT"
+
+ missing_parent_conflicts = False
+
+ supports_versioned_directories = True
+
+ def get_format_description(self):
+ """See WorkingTreeFormat.get_format_description()."""
+ return "Working tree format 2"
+
+ def _stub_initialize_on_transport(self, transport, file_mode):
+ """Workaround: create control files for a remote working tree.
+
+ This ensures that it can later be updated and dealt with locally,
+ since BzrDirFormat6 and BzrDirFormat5 cannot represent dirs with
+ no working tree. (See bug #43064).
+ """
+ sio = StringIO()
+ inv = inventory.Inventory()
+ xml5.serializer_v5.write_inventory(inv, sio, working=True)
+ sio.seek(0)
+ transport.put_file('inventory', sio, file_mode)
+ transport.put_bytes('pending-merges', '', file_mode)
+
+ def initialize(self, a_bzrdir, revision_id=None, from_branch=None,
+ accelerator_tree=None, hardlink=False):
+ """See WorkingTreeFormat.initialize()."""
+ if not isinstance(a_bzrdir.transport, LocalTransport):
+ raise errors.NotLocalUrl(a_bzrdir.transport.base)
+ if from_branch is not None:
+ branch = from_branch
+ else:
+ branch = a_bzrdir.open_branch()
+ if revision_id is None:
+ revision_id = _mod_revision.ensure_null(branch.last_revision())
+ branch.lock_write()
+ try:
+ branch.generate_revision_history(revision_id)
+ finally:
+ branch.unlock()
+ inv = inventory.Inventory()
+ wt = WorkingTree2(a_bzrdir.root_transport.local_abspath('.'),
+ branch,
+ inv,
+ _internal=True,
+ _format=self,
+ _bzrdir=a_bzrdir,
+ _control_files=branch.control_files)
+ basis_tree = branch.repository.revision_tree(revision_id)
+ if basis_tree.get_root_id() is not None:
+ wt.set_root_id(basis_tree.get_root_id())
+ # set the parent list and cache the basis tree.
+ if _mod_revision.is_null(revision_id):
+ parent_trees = []
+ else:
+ parent_trees = [(revision_id, basis_tree)]
+ wt.set_parent_trees(parent_trees)
+ transform.build_tree(basis_tree, wt)
+ for hook in MutableTree.hooks['post_build_tree']:
+ hook(wt)
+ return wt
+
+ def __init__(self):
+ super(WorkingTreeFormat2, self).__init__()
+ from bzrlib.plugins.weave_fmt.bzrdir import BzrDirFormat6
+ self._matchingbzrdir = BzrDirFormat6()
+
+ def open(self, a_bzrdir, _found=False):
+ """Return the WorkingTree object for a_bzrdir
+
+ _found is a private parameter, do not use it. It is used to indicate
+ if format probing has already been done.
+ """
+ if not _found:
+ # we are being called directly and must probe.
+ raise NotImplementedError
+ if not isinstance(a_bzrdir.transport, LocalTransport):
+ raise errors.NotLocalUrl(a_bzrdir.transport.base)
+ wt = WorkingTree2(a_bzrdir.root_transport.local_abspath('.'),
+ _internal=True,
+ _format=self,
+ _bzrdir=a_bzrdir,
+ _control_files=a_bzrdir.open_branch().control_files)
+ return wt
+
+
+class WorkingTree2(PreDirStateWorkingTree):
+ """This is the Format 2 working tree.
+
+ This was the first weave based working tree.
+ - uses os locks for locking.
+ - uses the branch last-revision.
+ """
+
+ def __init__(self, basedir, *args, **kwargs):
+ super(WorkingTree2, self).__init__(basedir, *args, **kwargs)
+ # WorkingTree2 has more of a constraint that self._inventory must
+ # exist. Because this is an older format, we don't mind the overhead
+ # caused by the extra computation here.
+
+ # Newer WorkingTree's should only have self._inventory set when they
+ # have a read lock.
+ if self._inventory is None:
+ self.read_working_inventory()
+
+ def _get_check_refs(self):
+ """Return the references needed to perform a check of this tree."""
+ return [('trees', self.last_revision())]
+
+
+ def lock_tree_write(self):
+ """See WorkingTree.lock_tree_write().
+
+ In Format2 WorkingTrees we have a single lock for the branch and tree
+ so lock_tree_write() degrades to lock_write().
+
+ :return: An object with an unlock method which will release the lock
+ obtained.
+ """
+ self.branch.lock_write()
+ try:
+ self._control_files.lock_write()
+ return self
+ except:
+ self.branch.unlock()
+ raise
+
+ def unlock(self):
+ # we share control files:
+ if self._control_files._lock_count == 3:
+ # do non-implementation specific cleanup
+ self._cleanup()
+ # _inventory_is_modified is always False during a read lock.
+ if self._inventory_is_modified:
+ self.flush()
+ self._write_hashcache_if_dirty()
+
+ # reverse order of locking.
+ try:
+ return self._control_files.unlock()
+ finally:
+ self.branch.unlock()
+
+ def _iter_conflicts(self):
+ conflicted = set()
+ for info in self.list_files():
+ path = info[0]
+ stem = get_conflicted_stem(path)
+ if stem is None:
+ continue
+ if stem not in conflicted:
+ conflicted.add(stem)
+ yield stem
+
+ @needs_read_lock
+ def conflicts(self):
+ conflicts = _mod_conflicts.ConflictList()
+ for conflicted in self._iter_conflicts():
+ text = True
+ try:
+ if osutils.file_kind(self.abspath(conflicted)) != "file":
+ text = False
+ except errors.NoSuchFile:
+ text = False
+ if text is True:
+ for suffix in ('.THIS', '.OTHER'):
+ try:
+ kind = osutils.file_kind(self.abspath(conflicted+suffix))
+ if kind != "file":
+ text = False
+ except errors.NoSuchFile:
+ text = False
+ if text == False:
+ break
+ ctype = {True: 'text conflict', False: 'contents conflict'}[text]
+ conflicts.append(_mod_conflicts.Conflict.factory(ctype,
+ path=conflicted,
+ file_id=self.path2id(conflicted)))
+ return conflicts
+
+ def set_conflicts(self, arg):
+ raise errors.UnsupportedOperation(self.set_conflicts, self)
+
+ def add_conflicts(self, arg):
+ raise errors.UnsupportedOperation(self.add_conflicts, self)
diff --git a/bzrlib/plugins/weave_fmt/xml4.py b/bzrlib/plugins/weave_fmt/xml4.py
new file mode 100644
index 0000000..f1cd664
--- /dev/null
+++ b/bzrlib/plugins/weave_fmt/xml4.py
@@ -0,0 +1,190 @@
+# Copyright (C) 2005-2010 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+from __future__ import absolute_import
+
+from bzrlib.xml_serializer import (
+ Element,
+ SubElement,
+ XMLSerializer,
+ escape_invalid_chars,
+ )
+from bzrlib.inventory import ROOT_ID, Inventory
+import bzrlib.inventory as inventory
+from bzrlib.revision import Revision
+from bzrlib.errors import BzrError
+
+
+class _Serializer_v4(XMLSerializer):
+ """Version 0.0.4 serializer
+
+ You should use the serializer_v4 singleton.
+
+ v4 serialisation is no longer supported, only deserialisation.
+ """
+
+ __slots__ = []
+
+ def _pack_entry(self, ie):
+ """Convert InventoryEntry to XML element"""
+ e = Element('entry')
+ e.set('name', ie.name)
+ e.set('file_id', ie.file_id)
+ e.set('kind', ie.kind)
+
+ if ie.text_size is not None:
+ e.set('text_size', '%d' % ie.text_size)
+
+ for f in ['text_id', 'text_sha1', 'symlink_target']:
+ v = getattr(ie, f)
+ if v is not None:
+ e.set(f, v)
+
+ # to be conservative, we don't externalize the root pointers
+ # for now, leaving them as null in the xml form. in a future
+ # version it will be implied by nested elements.
+ if ie.parent_id != ROOT_ID:
+ e.set('parent_id', ie.parent_id)
+
+ e.tail = '\n'
+
+ return e
+
+
+ def _unpack_inventory(self, elt, revision_id=None, entry_cache=None,
+ return_from_cache=False):
+ """Construct from XML Element
+
+ :param revision_id: Ignored parameter used by xml5.
+ """
+ root_id = elt.get('file_id') or ROOT_ID
+ inv = Inventory(root_id)
+ for e in elt:
+ ie = self._unpack_entry(e, entry_cache=entry_cache,
+ return_from_cache=return_from_cache)
+ if ie.parent_id == ROOT_ID:
+ ie.parent_id = root_id
+ inv.add(ie)
+ return inv
+
+
+ def _unpack_entry(self, elt, entry_cache=None, return_from_cache=False):
+ ## original format inventories don't have a parent_id for
+ ## nodes in the root directory, but it's cleaner to use one
+ ## internally.
+ parent_id = elt.get('parent_id')
+ if parent_id is None:
+ parent_id = ROOT_ID
+
+ kind = elt.get('kind')
+ if kind == 'directory':
+ ie = inventory.InventoryDirectory(elt.get('file_id'),
+ elt.get('name'),
+ parent_id)
+ elif kind == 'file':
+ ie = inventory.InventoryFile(elt.get('file_id'),
+ elt.get('name'),
+ parent_id)
+ ie.text_id = elt.get('text_id')
+ ie.text_sha1 = elt.get('text_sha1')
+ v = elt.get('text_size')
+ ie.text_size = v and int(v)
+ elif kind == 'symlink':
+ ie = inventory.InventoryLink(elt.get('file_id'),
+ elt.get('name'),
+ parent_id)
+ ie.symlink_target = elt.get('symlink_target')
+ else:
+ raise BzrError("unknown kind %r" % kind)
+
+ ## mutter("read inventoryentry: %r", elt.attrib)
+
+ return ie
+
+
+ def _pack_revision(self, rev):
+ """Revision object -> xml tree"""
+ root = Element('revision',
+ committer = rev.committer,
+ timestamp = '%.9f' % rev.timestamp,
+ revision_id = rev.revision_id,
+ inventory_id = rev.inventory_id,
+ inventory_sha1 = rev.inventory_sha1,
+ )
+ if rev.timezone:
+ root.set('timezone', str(rev.timezone))
+ root.text = '\n'
+
+ msg = SubElement(root, 'message')
+ msg.text = escape_invalid_chars(rev.message)[0]
+ msg.tail = '\n'
+
+ if rev.parents:
+ pelts = SubElement(root, 'parents')
+ pelts.tail = pelts.text = '\n'
+ for i, parent_id in enumerate(rev.parents):
+ p = SubElement(pelts, 'revision_ref')
+ p.tail = '\n'
+ p.set('revision_id', parent_id)
+ if i < len(rev.parent_sha1s):
+ p.set('revision_sha1', rev.parent_sha1s[i])
+ return root
+
+
+ def _unpack_revision(self, elt):
+ """XML Element -> Revision object"""
+
+ # <changeset> is deprecated...
+ if elt.tag not in ('revision', 'changeset'):
+ raise BzrError("unexpected tag in revision file: %r" % elt)
+
+ rev = Revision(committer = elt.get('committer'),
+ timestamp = float(elt.get('timestamp')),
+ revision_id = elt.get('revision_id'),
+ inventory_id = elt.get('inventory_id'),
+ inventory_sha1 = elt.get('inventory_sha1')
+ )
+
+ precursor = elt.get('precursor')
+ precursor_sha1 = elt.get('precursor_sha1')
+
+ pelts = elt.find('parents')
+
+ if pelts:
+ for p in pelts:
+ rev.parent_ids.append(p.get('revision_id'))
+ rev.parent_sha1s.append(p.get('revision_sha1'))
+ if precursor:
+ # must be consistent
+ prec_parent = rev.parent_ids[0]
+ elif precursor:
+ # revisions written prior to 0.0.5 have a single precursor
+ # give as an attribute
+ rev.parent_ids.append(precursor)
+ rev.parent_sha1s.append(precursor_sha1)
+
+ v = elt.get('timezone')
+ rev.timezone = v and int(v)
+
+ rev.message = elt.findtext('message') # text of <message>
+ return rev
+
+
+
+
+"""singleton instance"""
+serializer_v4 = _Serializer_v4()
+
diff --git a/bzrlib/progress.py b/bzrlib/progress.py
new file mode 100644
index 0000000..f1f197d
--- /dev/null
+++ b/bzrlib/progress.py
@@ -0,0 +1,248 @@
+# Copyright (C) 2005-2010 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""Progress indicators.
+
+The usual way to use this is via bzrlib.ui.ui_factory.nested_progress_bar which
+will manage a conceptual stack of nested activities.
+"""
+
+from __future__ import absolute_import
+
+import time
+import os
+
+
+def _supports_progress(f):
+ """Detect if we can use pretty progress bars on file F.
+
+ If this returns true we expect that a human may be looking at that
+ output, and that we can repaint a line to update it.
+
+ This doesn't check the policy for whether we *should* use them.
+ """
+ isatty = getattr(f, 'isatty', None)
+ if isatty is None:
+ return False
+ if not isatty():
+ return False
+ # The following case also handles Win32 - on that platform $TERM is
+ # typically never set, so the case None is treated as a smart terminal,
+ # not dumb. <https://bugs.launchpad.net/bugs/334808> win32 files do have
+ # isatty methods that return true.
+ if os.environ.get('TERM') == 'dumb':
+ # e.g. emacs compile window
+ return False
+ return True
+
+
+class ProgressTask(object):
+ """Model component of a progress indicator.
+
+ Most code that needs to indicate progress should update one of these,
+ and it will in turn update the display, if one is present.
+
+ Code updating the task may also set fields as hints about how to display
+ it: show_pct, show_spinner, show_eta, show_count, show_bar. UIs
+ will not necessarily respect all these fields.
+
+ The message given when updating a task must be unicode, not bytes.
+
+ :ivar update_latency: The interval (in seconds) at which the PB should be
+ updated. Setting this to zero suggests every update should be shown
+ synchronously.
+
+ :ivar show_transport_activity: If true (default), transport activity
+ will be shown when this task is drawn. Disable it if you're sure
+ that only irrelevant or uninteresting transport activity can occur
+ during this task.
+ """
+
+ def __init__(self, parent_task=None, ui_factory=None, progress_view=None):
+ """Construct a new progress task.
+
+ :param parent_task: Enclosing ProgressTask or None.
+
+ :param progress_view: ProgressView to display this ProgressTask.
+
+ :param ui_factory: The UI factory that will display updates;
+ deprecated in favor of passing progress_view directly.
+
+ Normally you should not call this directly but rather through
+ `ui_factory.nested_progress_bar`.
+ """
+ self._parent_task = parent_task
+ self._last_update = 0
+ self.total_cnt = None
+ self.current_cnt = None
+ self.msg = ''
+ # TODO: deprecate passing ui_factory
+ self.ui_factory = ui_factory
+ self.progress_view = progress_view
+ self.show_pct = False
+ self.show_spinner = True
+ self.show_eta = False,
+ self.show_count = True
+ self.show_bar = True
+ self.update_latency = 0.1
+ self.show_transport_activity = True
+
+ def __repr__(self):
+ return '%s(%r/%r, msg=%r)' % (
+ self.__class__.__name__,
+ self.current_cnt,
+ self.total_cnt,
+ self.msg)
+
+ def update(self, msg, current_cnt=None, total_cnt=None):
+ """Report updated task message and if relevent progress counters
+
+ The message given must be unicode, not a byte string.
+ """
+ self.msg = msg
+ self.current_cnt = current_cnt
+ if total_cnt:
+ self.total_cnt = total_cnt
+ if self.progress_view:
+ self.progress_view.show_progress(self)
+ else:
+ self.ui_factory._progress_updated(self)
+
+ def tick(self):
+ self.update(self.msg)
+
+ def finished(self):
+ if self.progress_view:
+ self.progress_view.task_finished(self)
+ else:
+ self.ui_factory._progress_finished(self)
+
+ def make_sub_task(self):
+ return ProgressTask(self, ui_factory=self.ui_factory,
+ progress_view=self.progress_view)
+
+ def _overall_completion_fraction(self, child_fraction=0.0):
+ """Return fractional completion of this task and its parents
+
+ Returns None if no completion can be computed."""
+ if self.current_cnt is not None and self.total_cnt:
+ own_fraction = (float(self.current_cnt) + child_fraction) / self.total_cnt
+ else:
+ # if this task has no estimation, it just passes on directly
+ # whatever the child has measured...
+ own_fraction = child_fraction
+ if self._parent_task is None:
+ return own_fraction
+ else:
+ if own_fraction is None:
+ own_fraction = 0.0
+ return self._parent_task._overall_completion_fraction(own_fraction)
+
+ def clear(self):
+ # TODO: deprecate this method; the model object shouldn't be concerned
+ # with whether it's shown or not. Most callers use this because they
+ # want to write some different non-progress output to the screen, but
+ # they should probably instead use a stream that's synchronized with
+ # the progress output. It may be there is a model-level use for
+ # saying "this task's not active at the moment" but I don't see it. --
+ # mbp 20090623
+ if self.progress_view:
+ self.progress_view.clear()
+ else:
+ self.ui_factory.clear_term()
+
+
+class DummyProgress(object):
+ """Progress-bar standin that does nothing.
+
+ This was previously often constructed by application code if no progress
+ bar was explicitly passed in. That's no longer recommended: instead, just
+ create a progress task from the ui_factory. This class can be used in
+ test code that needs to fake a progress task for some reason.
+ """
+
+ def tick(self):
+ pass
+
+ def update(self, msg=None, current=None, total=None):
+ pass
+
+ def child_update(self, message, current, total):
+ pass
+
+ def clear(self):
+ pass
+
+ def child_progress(self, **kwargs):
+ return DummyProgress(**kwargs)
+
+
+def str_tdelta(delt):
+ if delt is None:
+ return "-:--:--"
+ delt = int(round(delt))
+ return '%d:%02d:%02d' % (delt/3600,
+ (delt/60) % 60,
+ delt % 60)
+
+
+def get_eta(start_time, current, total, enough_samples=3, last_updates=None, n_recent=10):
+ if start_time is None:
+ return None
+
+ if not total:
+ return None
+
+ if current < enough_samples:
+ return None
+
+ if current > total:
+ return None # wtf?
+
+ elapsed = time.time() - start_time
+
+ if elapsed < 2.0: # not enough time to estimate
+ return None
+
+ total_duration = float(elapsed) * float(total) / float(current)
+
+ if last_updates and len(last_updates) >= n_recent:
+ avg = sum(last_updates) / float(len(last_updates))
+ time_left = avg * (total - current)
+
+ old_time_left = total_duration - elapsed
+
+ # We could return the average, or some other value here
+ return (time_left + old_time_left) / 2
+
+ return total_duration - elapsed
+
+
+class ProgressPhase(object):
+ """Update progress object with the current phase"""
+ def __init__(self, message, total, pb):
+ object.__init__(self)
+ self.pb = pb
+ self.message = message
+ self.total = total
+ self.cur_phase = None
+
+ def next_phase(self):
+ if self.cur_phase is None:
+ self.cur_phase = 0
+ else:
+ self.cur_phase += 1
+ self.pb.update(self.message, self.cur_phase, self.total)
diff --git a/bzrlib/push.py b/bzrlib/push.py
new file mode 100644
index 0000000..ccab830
--- /dev/null
+++ b/bzrlib/push.py
@@ -0,0 +1,177 @@
+# Copyright (C) 2008-2012 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""UI helper for the push command."""
+
+from __future__ import absolute_import
+
+from bzrlib import (
+ controldir,
+ errors,
+ revision as _mod_revision,
+ transport,
+ )
+from bzrlib.trace import (
+ note,
+ warning,
+ )
+from bzrlib.i18n import gettext
+
+
+class PushResult(object):
+ """Result of a push operation.
+
+ :ivar branch_push_result: Result of a push between branches
+ :ivar target_branch: The target branch
+ :ivar stacked_on: URL of the branch on which the result is stacked
+ :ivar workingtree_updated: Whether or not the target workingtree was updated.
+ """
+
+ def __init__(self):
+ self.branch_push_result = None
+ self.stacked_on = None
+ self.workingtree_updated = None
+ self.target_branch = None
+
+ def report(self, to_file):
+ """Write a human-readable description of the result."""
+ if self.branch_push_result is None:
+ if self.stacked_on is not None:
+ note(gettext('Created new stacked branch referring to %s.') %
+ self.stacked_on)
+ else:
+ note(gettext('Created new branch.'))
+ else:
+ self.branch_push_result.report(to_file)
+
+
+def _show_push_branch(br_from, revision_id, location, to_file, verbose=False,
+ overwrite=False, remember=False, stacked_on=None, create_prefix=False,
+ use_existing_dir=False, no_tree=False):
+ """Push a branch to a location.
+
+ :param br_from: the source branch
+ :param revision_id: the revision-id to push up to
+ :param location: the url of the destination
+ :param to_file: the output stream
+ :param verbose: if True, display more output than normal
+ :param overwrite: list of things to overwrite ("history", "tags")
+ or boolean indicating for everything
+ :param remember: if True, store the location as the push location for
+ the source branch
+ :param stacked_on: the url of the branch, if any, to stack on;
+ if set, only the revisions not in that branch are pushed
+ :param create_prefix: if True, create the necessary parent directories
+ at the destination if they don't already exist
+ :param use_existing_dir: if True, proceed even if the destination
+ directory exists without a current .bzr directory in it
+ """
+ to_transport = transport.get_transport(location)
+ try:
+ dir_to = controldir.ControlDir.open_from_transport(to_transport)
+ except errors.NotBranchError:
+ # Didn't find anything
+ dir_to = None
+
+ if dir_to is None:
+ try:
+ br_to = br_from.create_clone_on_transport(to_transport,
+ revision_id=revision_id, stacked_on=stacked_on,
+ create_prefix=create_prefix, use_existing_dir=use_existing_dir,
+ no_tree=no_tree)
+ except errors.AlreadyControlDirError, err:
+ raise errors.BzrCommandError(gettext(
+ "Target directory %s already contains a .bzr directory, "
+ "but it is not valid.") % (location,))
+ except errors.FileExists, err:
+ if not use_existing_dir:
+ raise errors.BzrCommandError(gettext("Target directory %s"
+ " already exists, but does not have a .bzr"
+ " directory. Supply --use-existing-dir to push"
+ " there anyway.") % location)
+ # This shouldn't occur, but if it does the FileExists error will be
+ # more informative than an UnboundLocalError for br_to.
+ raise
+ except errors.NoSuchFile:
+ if not create_prefix:
+ raise errors.BzrCommandError(gettext("Parent directory of %s"
+ " does not exist."
+ "\nYou may supply --create-prefix to create all"
+ " leading parent directories.")
+ % location)
+ # This shouldn't occur (because create_prefix is true, so
+ # create_clone_on_transport should be catching NoSuchFile and
+ # creating the missing directories) but if it does the original
+ # NoSuchFile error will be more informative than an
+ # UnboundLocalError for br_to.
+ raise
+ except errors.TooManyRedirections:
+ raise errors.BzrCommandError(gettext("Too many redirections trying "
+ "to make %s.") % location)
+ push_result = PushResult()
+ # TODO: Some more useful message about what was copied
+ try:
+ push_result.stacked_on = br_to.get_stacked_on_url()
+ except (errors.UnstackableBranchFormat,
+ errors.UnstackableRepositoryFormat,
+ errors.NotStacked):
+ push_result.stacked_on = None
+ push_result.target_branch = br_to
+ push_result.old_revid = _mod_revision.NULL_REVISION
+ push_result.old_revno = 0
+ # Remembers if asked explicitly or no previous location is set
+ if (remember
+ or (remember is None and br_from.get_push_location() is None)):
+ # FIXME: Should be done only if we succeed ? -- vila 2012-01-18
+ br_from.set_push_location(br_to.base)
+ else:
+ if stacked_on is not None:
+ warning("Ignoring request for a stacked branch as repository "
+ "already exists at the destination location.")
+ try:
+ push_result = dir_to.push_branch(br_from, revision_id, overwrite,
+ remember, create_prefix)
+ except errors.DivergedBranches:
+ raise errors.BzrCommandError(gettext('These branches have diverged.'
+ ' See "bzr help diverged-branches"'
+ ' for more information.'))
+ except errors.NoRoundtrippingSupport, e:
+ raise errors.BzrCommandError(gettext("It is not possible to losslessly "
+ "push to %s. You may want to use dpush instead.") %
+ e.target_branch.mapping.vcs.abbreviation)
+ except errors.NoRepositoryPresent:
+ # we have a controldir but no branch or repository
+ # XXX: Figure out what to do other than complain.
+ raise errors.BzrCommandError(gettext("At %s you have a valid .bzr"
+ " control directory, but not a branch or repository. This"
+ " is an unsupported configuration. Please move the target"
+ " directory out of the way and try again.") % location)
+ if push_result.workingtree_updated == False:
+ warning("This transport does not update the working "
+ "tree of: %s. See 'bzr help working-trees' for "
+ "more information." % push_result.target_branch.base)
+ push_result.report(to_file)
+ if verbose:
+ br_to = push_result.target_branch
+ br_to.lock_read()
+ try:
+ from bzrlib.log import show_branch_change
+ show_branch_change(br_to, to_file, push_result.old_revno,
+ push_result.old_revid)
+ finally:
+ br_to.unlock()
+
+
diff --git a/bzrlib/python-compat.h b/bzrlib/python-compat.h
new file mode 100644
index 0000000..642a0c7
--- /dev/null
+++ b/bzrlib/python-compat.h
@@ -0,0 +1,90 @@
+/*
+ * Bazaar -- distributed version control
+ *
+ * Copyright (C) 2008 by Canonical Ltd
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/* Provide the typedefs that pyrex does automatically in newer versions, to
+ * allow older versions to build our extensions.
+ */
+
+#ifndef _BZR_PYTHON_COMPAT_H
+#define _BZR_PYTHON_COMPAT_H
+
+/* http://www.python.org/dev/peps/pep-0353/ */
+#if PY_VERSION_HEX < 0x02050000 && !defined(PY_SSIZE_T_MIN)
+ typedef int Py_ssize_t;
+ typedef Py_ssize_t (*lenfunc)(PyObject *);
+ typedef PyObject * (*ssizeargfunc)(PyObject *, Py_ssize_t);
+ typedef PyObject * (*ssizessizeargfunc)(PyObject *, Py_ssize_t, Py_ssize_t);
+ #define PY_SSIZE_T_MAX INT_MAX
+ #define PY_SSIZE_T_MIN INT_MIN
+ #define PyInt_FromSsize_t(z) PyInt_FromLong(z)
+ #define PyInt_AsSsize_t(o) PyInt_AsLong(o)
+#endif
+
+#if defined(_WIN32) || defined(WIN32)
+ /* Defining WIN32_LEAN_AND_MEAN makes including windows quite a bit
+ * lighter weight.
+ */
+ #define WIN32_LEAN_AND_MEAN
+ #include <windows.h>
+
+ /* Needed for htonl */
+ #include "Winsock2.h"
+
+ /* sys/stat.h doesn't have any of these macro definitions for MSVC, so
+ * we'll define whatever is missing that we actually use.
+ */
+ #if !defined(S_ISDIR)
+ #define S_ISDIR(m) (((m) & 0170000) == 0040000)
+ #endif
+ #if !defined(S_ISREG)
+ #define S_ISREG(m) (((m) & 0170000) == 0100000)
+ #endif
+ #if !defined(S_IXUSR)
+ #define S_IXUSR 0000100/* execute/search permission, owner */
+ #endif
+ /* sys/stat.h doesn't have S_ISLNK on win32, so we fake it by just always
+ * returning False
+ */
+ #if !defined(S_ISLNK)
+ #define S_ISLNK(mode) (0)
+ #endif
+#else /* Not win32 */
+ /* For htonl */
+ #include "arpa/inet.h"
+#endif
+
+#include <stdio.h>
+
+#ifdef _MSC_VER
+#define snprintf _snprintf
+/* gcc (mingw32) has strtoll, while the MSVC compiler uses _strtoi64 */
+#define strtoll _strtoi64
+#define strtoull _strtoui64
+#endif
+
+/* Introduced in Python 2.6 */
+#ifndef Py_TYPE
+# define Py_TYPE(o) ((o)->ob_type)
+#endif
+#ifndef Py_REFCNT
+# define Py_REFCNT(o) ((o)->ob_refcnt)
+#endif
+
+#endif /* _BZR_PYTHON_COMPAT_H */
diff --git a/bzrlib/pyutils.py b/bzrlib/pyutils.py
new file mode 100644
index 0000000..0f3b2a1
--- /dev/null
+++ b/bzrlib/pyutils.py
@@ -0,0 +1,91 @@
+# Copyright (C) 2010 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""General Python convenience functions."""
+
+from __future__ import absolute_import
+
+import sys
+
+
+def get_named_object(module_name, member_name=None):
+ """Get the Python object named by a given module and member name.
+
+ This is usually much more convenient than dealing with ``__import__``
+ directly::
+
+ >>> doc = get_named_object('bzrlib.pyutils', 'get_named_object.__doc__')
+ >>> doc.splitlines()[0]
+ 'Get the Python object named by a given module and member name.'
+
+ :param module_name: a module name, as would be found in sys.modules if
+ the module is already imported. It may contain dots. e.g. 'sys' or
+ 'os.path'.
+ :param member_name: (optional) a name of an attribute in that module to
+ return. It may contain dots. e.g. 'MyClass.some_method'. If not
+ given, the named module will be returned instead.
+ :raises: ImportError or AttributeError.
+ """
+ # We may have just a module name, or a module name and a member name,
+ # and either may contain dots. __import__'s return value is a bit
+ # unintuitive, so we need to take care to always return the object
+ # specified by the full combination of module name + member name.
+ if member_name:
+ # Give __import__ a from_list. It will return the last module in
+ # the dotted module name.
+ attr_chain = member_name.split('.')
+ from_list = attr_chain[:1]
+ obj = __import__(module_name, {}, {}, from_list)
+ for attr in attr_chain:
+ obj = getattr(obj, attr)
+ else:
+ # We're just importing a module, no attributes, so we have no
+ # from_list. __import__ will return the first module in the dotted
+ # module name, so we look up the module from sys.modules.
+ __import__(module_name, globals(), locals(), [])
+ obj = sys.modules[module_name]
+ return obj
+
+
+def calc_parent_name(module_name, member_name=None):
+ """Determine the 'parent' of a given dotted module name and (optional)
+ member name.
+
+ The idea is that ``getattr(parent_obj, final_attr)`` will equal
+ get_named_object(module_name, member_name).
+
+ :return: (module_name, member_name, final_attr) tuple.
+ """
+# +SKIP is not recognized by python2.4
+# Typical use is::
+#
+# >>> parent_mod, parent_member, final_attr = calc_parent_name(
+# ... module_name, member_name) # doctest: +SKIP
+# >>> parent_obj = get_named_object(parent_mod, parent_member)
+# ... # doctest: +SKIP
+ if member_name is not None:
+ split_name = member_name.rsplit('.', 1)
+ if len(split_name) == 1:
+ return (module_name, None, member_name)
+ else:
+ return (module_name, split_name[0], split_name[1])
+ else:
+ split_name = module_name.rsplit('.', 1)
+ if len(split_name) == 1:
+ raise AssertionError(
+ 'No parent object for top-level module %r' % (module_name,))
+ else:
+ return (split_name[0], None, split_name[1])
diff --git a/bzrlib/readdir.h b/bzrlib/readdir.h
new file mode 100644
index 0000000..7c064c7
--- /dev/null
+++ b/bzrlib/readdir.h
@@ -0,0 +1,22 @@
+/*
+ * Bazaar -- distributed version control
+ *
+ * Copyright (C) 2006 by Canonical Ltd
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/* Adjust C api to workaround pyrex output bug/limitation */
+typedef struct dirent dirent;
diff --git a/bzrlib/reconcile.py b/bzrlib/reconcile.py
new file mode 100644
index 0000000..5264cd1
--- /dev/null
+++ b/bzrlib/reconcile.py
@@ -0,0 +1,558 @@
+# Copyright (C) 2006-2010 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""Reconcilers are able to fix some potential data errors in a branch."""
+
+from __future__ import absolute_import
+
+__all__ = [
+ 'KnitReconciler',
+ 'PackReconciler',
+ 'reconcile',
+ 'Reconciler',
+ 'RepoReconciler',
+ ]
+
+
+from bzrlib import (
+ cleanup,
+ errors,
+ revision as _mod_revision,
+ ui,
+ )
+from bzrlib.trace import mutter
+from bzrlib.tsort import topo_sort
+from bzrlib.versionedfile import AdapterFactory, FulltextContentFactory
+from bzrlib.i18n import gettext
+
+
+def reconcile(dir, canonicalize_chks=False):
+ """Reconcile the data in dir.
+
+ Currently this is limited to a inventory 'reweave'.
+
+ This is a convenience method, for using a Reconciler object.
+
+ Directly using Reconciler is recommended for library users that
+ desire fine grained control or analysis of the found issues.
+
+ :param canonicalize_chks: Make sure CHKs are in canonical form.
+ """
+ reconciler = Reconciler(dir, canonicalize_chks=canonicalize_chks)
+ reconciler.reconcile()
+
+
+class Reconciler(object):
+ """Reconcilers are used to reconcile existing data."""
+
+ def __init__(self, dir, other=None, canonicalize_chks=False):
+ """Create a Reconciler."""
+ self.bzrdir = dir
+ self.canonicalize_chks = canonicalize_chks
+
+ def reconcile(self):
+ """Perform reconciliation.
+
+ After reconciliation the following attributes document found issues:
+
+ * `inconsistent_parents`: The number of revisions in the repository
+ whose ancestry was being reported incorrectly.
+ * `garbage_inventories`: The number of inventory objects without
+ revisions that were garbage collected.
+ * `fixed_branch_history`: None if there was no branch, False if the
+ branch history was correct, True if the branch history needed to be
+ re-normalized.
+ """
+ self.pb = ui.ui_factory.nested_progress_bar()
+ try:
+ self._reconcile()
+ finally:
+ self.pb.finished()
+
+ def _reconcile(self):
+ """Helper function for performing reconciliation."""
+ self._reconcile_branch()
+ self._reconcile_repository()
+
+ def _reconcile_branch(self):
+ try:
+ self.branch = self.bzrdir.open_branch()
+ except errors.NotBranchError:
+ # Nothing to check here
+ self.fixed_branch_history = None
+ return
+ ui.ui_factory.note(gettext('Reconciling branch %s') % self.branch.base)
+ branch_reconciler = self.branch.reconcile(thorough=True)
+ self.fixed_branch_history = branch_reconciler.fixed_history
+
+ def _reconcile_repository(self):
+ self.repo = self.bzrdir.find_repository()
+ ui.ui_factory.note(gettext('Reconciling repository %s') %
+ self.repo.user_url)
+ self.pb.update(gettext("Reconciling repository"), 0, 1)
+ if self.canonicalize_chks:
+ try:
+ self.repo.reconcile_canonicalize_chks
+ except AttributeError:
+ raise errors.BzrError(
+ gettext("%s cannot canonicalize CHKs.") % (self.repo,))
+ repo_reconciler = self.repo.reconcile_canonicalize_chks()
+ else:
+ repo_reconciler = self.repo.reconcile(thorough=True)
+ self.inconsistent_parents = repo_reconciler.inconsistent_parents
+ self.garbage_inventories = repo_reconciler.garbage_inventories
+ if repo_reconciler.aborted:
+ ui.ui_factory.note(gettext(
+ 'Reconcile aborted: revision index has inconsistent parents.'))
+ ui.ui_factory.note(gettext(
+ 'Run "bzr check" for more details.'))
+ else:
+ ui.ui_factory.note(gettext('Reconciliation complete.'))
+
+
+class BranchReconciler(object):
+ """Reconciler that works on a branch."""
+
+ def __init__(self, a_branch, thorough=False):
+ self.fixed_history = None
+ self.thorough = thorough
+ self.branch = a_branch
+
+ def reconcile(self):
+ operation = cleanup.OperationWithCleanups(self._reconcile)
+ self.add_cleanup = operation.add_cleanup
+ operation.run_simple()
+
+ def _reconcile(self):
+ self.branch.lock_write()
+ self.add_cleanup(self.branch.unlock)
+ self.pb = ui.ui_factory.nested_progress_bar()
+ self.add_cleanup(self.pb.finished)
+ self._reconcile_steps()
+
+ def _reconcile_steps(self):
+ self._reconcile_revision_history()
+
+ def _reconcile_revision_history(self):
+ last_revno, last_revision_id = self.branch.last_revision_info()
+ real_history = []
+ graph = self.branch.repository.get_graph()
+ try:
+ for revid in graph.iter_lefthand_ancestry(
+ last_revision_id, (_mod_revision.NULL_REVISION,)):
+ real_history.append(revid)
+ except errors.RevisionNotPresent:
+ pass # Hit a ghost left hand parent
+ real_history.reverse()
+ if last_revno != len(real_history):
+ self.fixed_history = True
+ # Technically for Branch5 formats, it is more efficient to use
+ # set_revision_history, as this will regenerate it again.
+ # Not really worth a whole BranchReconciler class just for this,
+ # though.
+ ui.ui_factory.note(gettext('Fixing last revision info {0} '\
+ ' => {1}').format(
+ last_revno, len(real_history)))
+ self.branch.set_last_revision_info(len(real_history),
+ last_revision_id)
+ else:
+ self.fixed_history = False
+ ui.ui_factory.note(gettext('revision_history ok.'))
+
+
+class RepoReconciler(object):
+ """Reconciler that reconciles a repository.
+
+ The goal of repository reconciliation is to make any derived data
+ consistent with the core data committed by a user. This can involve
+ reindexing, or removing unreferenced data if that can interfere with
+ queries in a given repository.
+
+ Currently this consists of an inventory reweave with revision cross-checks.
+ """
+
+ def __init__(self, repo, other=None, thorough=False):
+ """Construct a RepoReconciler.
+
+ :param thorough: perform a thorough check which may take longer but
+ will correct non-data loss issues such as incorrect
+ cached data.
+ """
+ self.garbage_inventories = 0
+ self.inconsistent_parents = 0
+ self.aborted = False
+ self.repo = repo
+ self.thorough = thorough
+
+ def reconcile(self):
+ """Perform reconciliation.
+
+ After reconciliation the following attributes document found issues:
+
+ * `inconsistent_parents`: The number of revisions in the repository
+ whose ancestry was being reported incorrectly.
+ * `garbage_inventories`: The number of inventory objects without
+ revisions that were garbage collected.
+ """
+ operation = cleanup.OperationWithCleanups(self._reconcile)
+ self.add_cleanup = operation.add_cleanup
+ operation.run_simple()
+
+ def _reconcile(self):
+ self.repo.lock_write()
+ self.add_cleanup(self.repo.unlock)
+ self.pb = ui.ui_factory.nested_progress_bar()
+ self.add_cleanup(self.pb.finished)
+ self._reconcile_steps()
+
+ def _reconcile_steps(self):
+ """Perform the steps to reconcile this repository."""
+ self._reweave_inventory()
+
+ def _reweave_inventory(self):
+ """Regenerate the inventory weave for the repository from scratch.
+
+ This is a smart function: it will only do the reweave if doing it
+ will correct data issues. The self.thorough flag controls whether
+ only data-loss causing issues (!self.thorough) or all issues
+ (self.thorough) are treated as requiring the reweave.
+ """
+ transaction = self.repo.get_transaction()
+ self.pb.update(gettext('Reading inventory data'))
+ self.inventory = self.repo.inventories
+ self.revisions = self.repo.revisions
+ # the total set of revisions to process
+ self.pending = set([key[-1] for key in self.revisions.keys()])
+
+ # mapping from revision_id to parents
+ self._rev_graph = {}
+ # errors that we detect
+ self.inconsistent_parents = 0
+ # we need the revision id of each revision and its available parents list
+ self._setup_steps(len(self.pending))
+ for rev_id in self.pending:
+ # put a revision into the graph.
+ self._graph_revision(rev_id)
+ self._check_garbage_inventories()
+ # if there are no inconsistent_parents and
+ # (no garbage inventories or we are not doing a thorough check)
+ if (not self.inconsistent_parents and
+ (not self.garbage_inventories or not self.thorough)):
+ ui.ui_factory.note(gettext('Inventory ok.'))
+ return
+ self.pb.update(gettext('Backing up inventory'), 0, 0)
+ self.repo._backup_inventory()
+ ui.ui_factory.note(gettext('Backup inventory created.'))
+ new_inventories = self.repo._temp_inventories()
+
+ # we have topological order of revisions and non ghost parents ready.
+ self._setup_steps(len(self._rev_graph))
+ revision_keys = [(rev_id,) for rev_id in topo_sort(self._rev_graph)]
+ stream = self._change_inv_parents(
+ self.inventory.get_record_stream(revision_keys, 'unordered', True),
+ self._new_inv_parents,
+ set(revision_keys))
+ new_inventories.insert_record_stream(stream)
+ # if this worked, the set of new_inventories.keys should equal
+ # self.pending
+ if not (set(new_inventories.keys()) ==
+ set([(revid,) for revid in self.pending])):
+ raise AssertionError()
+ self.pb.update(gettext('Writing weave'))
+ self.repo._activate_new_inventory()
+ self.inventory = None
+ ui.ui_factory.note(gettext('Inventory regenerated.'))
+
+ def _new_inv_parents(self, revision_key):
+ """Lookup ghost-filtered parents for revision_key."""
+ # Use the filtered ghostless parents list:
+ return tuple([(revid,) for revid in self._rev_graph[revision_key[-1]]])
+
+ def _change_inv_parents(self, stream, get_parents, all_revision_keys):
+ """Adapt a record stream to reconcile the parents."""
+ for record in stream:
+ wanted_parents = get_parents(record.key)
+ if wanted_parents and wanted_parents[0] not in all_revision_keys:
+ # The check for the left most parent only handles knit
+ # compressors, but this code only applies to knit and weave
+ # repositories anyway.
+ bytes = record.get_bytes_as('fulltext')
+ yield FulltextContentFactory(record.key, wanted_parents, record.sha1, bytes)
+ else:
+ adapted_record = AdapterFactory(record.key, wanted_parents, record)
+ yield adapted_record
+ self._reweave_step('adding inventories')
+
+ def _setup_steps(self, new_total):
+ """Setup the markers we need to control the progress bar."""
+ self.total = new_total
+ self.count = 0
+
+ def _graph_revision(self, rev_id):
+ """Load a revision into the revision graph."""
+ # pick a random revision
+ # analyse revision id rev_id and put it in the stack.
+ self._reweave_step('loading revisions')
+ rev = self.repo.get_revision_reconcile(rev_id)
+ parents = []
+ for parent in rev.parent_ids:
+ if self._parent_is_available(parent):
+ parents.append(parent)
+ else:
+ mutter('found ghost %s', parent)
+ self._rev_graph[rev_id] = parents
+
+ def _check_garbage_inventories(self):
+ """Check for garbage inventories which we cannot trust
+
+ We cant trust them because their pre-requisite file data may not
+ be present - all we know is that their revision was not installed.
+ """
+ if not self.thorough:
+ return
+ inventories = set(self.inventory.keys())
+ revisions = set(self.revisions.keys())
+ garbage = inventories.difference(revisions)
+ self.garbage_inventories = len(garbage)
+ for revision_key in garbage:
+ mutter('Garbage inventory {%s} found.', revision_key[-1])
+
+ def _parent_is_available(self, parent):
+ """True if parent is a fully available revision
+
+ A fully available revision has a inventory and a revision object in the
+ repository.
+ """
+ if parent in self._rev_graph:
+ return True
+ inv_present = (1 == len(self.inventory.get_parent_map([(parent,)])))
+ return (inv_present and self.repo.has_revision(parent))
+
+ def _reweave_step(self, message):
+ """Mark a single step of regeneration complete."""
+ self.pb.update(message, self.count, self.total)
+ self.count += 1
+
+
+class KnitReconciler(RepoReconciler):
+ """Reconciler that reconciles a knit format repository.
+
+ This will detect garbage inventories and remove them in thorough mode.
+ """
+
+ def _reconcile_steps(self):
+ """Perform the steps to reconcile this repository."""
+ if self.thorough:
+ try:
+ self._load_indexes()
+ except errors.BzrCheckError:
+ self.aborted = True
+ return
+ # knits never suffer this
+ self._gc_inventory()
+ self._fix_text_parents()
+
+ def _load_indexes(self):
+ """Load indexes for the reconciliation."""
+ self.transaction = self.repo.get_transaction()
+ self.pb.update(gettext('Reading indexes'), 0, 2)
+ self.inventory = self.repo.inventories
+ self.pb.update(gettext('Reading indexes'), 1, 2)
+ self.repo._check_for_inconsistent_revision_parents()
+ self.revisions = self.repo.revisions
+ self.pb.update(gettext('Reading indexes'), 2, 2)
+
+ def _gc_inventory(self):
+ """Remove inventories that are not referenced from the revision store."""
+ self.pb.update(gettext('Checking unused inventories'), 0, 1)
+ self._check_garbage_inventories()
+ self.pb.update(gettext('Checking unused inventories'), 1, 3)
+ if not self.garbage_inventories:
+ ui.ui_factory.note(gettext('Inventory ok.'))
+ return
+ self.pb.update(gettext('Backing up inventory'), 0, 0)
+ self.repo._backup_inventory()
+ ui.ui_factory.note(gettext('Backup Inventory created'))
+ # asking for '' should never return a non-empty weave
+ new_inventories = self.repo._temp_inventories()
+ # we have topological order of revisions and non ghost parents ready.
+ graph = self.revisions.get_parent_map(self.revisions.keys())
+ revision_keys = topo_sort(graph)
+ revision_ids = [key[-1] for key in revision_keys]
+ self._setup_steps(len(revision_keys))
+ stream = self._change_inv_parents(
+ self.inventory.get_record_stream(revision_keys, 'unordered', True),
+ graph.__getitem__,
+ set(revision_keys))
+ new_inventories.insert_record_stream(stream)
+ # if this worked, the set of new_inventory_vf.names should equal
+ # the revisionds list
+ if not(set(new_inventories.keys()) == set(revision_keys)):
+ raise AssertionError()
+ self.pb.update(gettext('Writing weave'))
+ self.repo._activate_new_inventory()
+ self.inventory = None
+ ui.ui_factory.note(gettext('Inventory regenerated.'))
+
+ def _fix_text_parents(self):
+ """Fix bad versionedfile parent entries.
+
+ It is possible for the parents entry in a versionedfile entry to be
+ inconsistent with the values in the revision and inventory.
+
+ This method finds entries with such inconsistencies, corrects their
+ parent lists, and replaces the versionedfile with a corrected version.
+ """
+ transaction = self.repo.get_transaction()
+ versions = [key[-1] for key in self.revisions.keys()]
+ mutter('Prepopulating revision text cache with %d revisions',
+ len(versions))
+ vf_checker = self.repo._get_versioned_file_checker()
+ bad_parents, unused_versions = vf_checker.check_file_version_parents(
+ self.repo.texts, self.pb)
+ text_index = vf_checker.text_index
+ per_id_bad_parents = {}
+ for key in unused_versions:
+ # Ensure that every file with unused versions gets rewritten.
+ # NB: This is really not needed, reconcile != pack.
+ per_id_bad_parents[key[0]] = {}
+ # Generate per-knit/weave data.
+ for key, details in bad_parents.iteritems():
+ file_id = key[0]
+ rev_id = key[1]
+ knit_parents = tuple([parent[-1] for parent in details[0]])
+ correct_parents = tuple([parent[-1] for parent in details[1]])
+ file_details = per_id_bad_parents.setdefault(file_id, {})
+ file_details[rev_id] = (knit_parents, correct_parents)
+ file_id_versions = {}
+ for text_key in text_index:
+ versions_list = file_id_versions.setdefault(text_key[0], [])
+ versions_list.append(text_key[1])
+ # Do the reconcile of individual weaves.
+ for num, file_id in enumerate(per_id_bad_parents):
+ self.pb.update(gettext('Fixing text parents'), num,
+ len(per_id_bad_parents))
+ versions_with_bad_parents = per_id_bad_parents[file_id]
+ id_unused_versions = set(key[-1] for key in unused_versions
+ if key[0] == file_id)
+ if file_id in file_id_versions:
+ file_versions = file_id_versions[file_id]
+ else:
+ # This id was present in the disk store but is not referenced
+ # by any revision at all.
+ file_versions = []
+ self._fix_text_parent(file_id, versions_with_bad_parents,
+ id_unused_versions, file_versions)
+
+ def _fix_text_parent(self, file_id, versions_with_bad_parents,
+ unused_versions, all_versions):
+ """Fix bad versionedfile entries in a single versioned file."""
+ mutter('fixing text parent: %r (%d versions)', file_id,
+ len(versions_with_bad_parents))
+ mutter('(%d are unused)', len(unused_versions))
+ new_file_id = 'temp:%s' % file_id
+ new_parents = {}
+ needed_keys = set()
+ for version in all_versions:
+ if version in unused_versions:
+ continue
+ elif version in versions_with_bad_parents:
+ parents = versions_with_bad_parents[version][1]
+ else:
+ pmap = self.repo.texts.get_parent_map([(file_id, version)])
+ parents = [key[-1] for key in pmap[(file_id, version)]]
+ new_parents[(new_file_id, version)] = [
+ (new_file_id, parent) for parent in parents]
+ needed_keys.add((file_id, version))
+ def fix_parents(stream):
+ for record in stream:
+ bytes = record.get_bytes_as('fulltext')
+ new_key = (new_file_id, record.key[-1])
+ parents = new_parents[new_key]
+ yield FulltextContentFactory(new_key, parents, record.sha1, bytes)
+ stream = self.repo.texts.get_record_stream(needed_keys, 'topological', True)
+ self.repo._remove_file_id(new_file_id)
+ self.repo.texts.insert_record_stream(fix_parents(stream))
+ self.repo._remove_file_id(file_id)
+ if len(new_parents):
+ self.repo._move_file_id(new_file_id, file_id)
+
+
+class PackReconciler(RepoReconciler):
+ """Reconciler that reconciles a pack based repository.
+
+ Garbage inventories do not affect ancestry queries, and removal is
+ considerably more expensive as there is no separate versioned file for
+ them, so they are not cleaned. In short it is currently a no-op.
+
+ In future this may be a good place to hook in annotation cache checking,
+ index recreation etc.
+ """
+
+ # XXX: The index corruption that _fix_text_parents performs is needed for
+ # packs, but not yet implemented. The basic approach is to:
+ # - lock the names list
+ # - perform a customised pack() that regenerates data as needed
+ # - unlock the names list
+ # https://bugs.launchpad.net/bzr/+bug/154173
+
+ def __init__(self, repo, other=None, thorough=False,
+ canonicalize_chks=False):
+ super(PackReconciler, self).__init__(repo, other=other,
+ thorough=thorough)
+ self.canonicalize_chks = canonicalize_chks
+
+ def _reconcile_steps(self):
+ """Perform the steps to reconcile this repository."""
+ if not self.thorough:
+ return
+ collection = self.repo._pack_collection
+ collection.ensure_loaded()
+ collection.lock_names()
+ self.add_cleanup(collection._unlock_names)
+ packs = collection.all_packs()
+ all_revisions = self.repo.all_revision_ids()
+ total_inventories = len(list(
+ collection.inventory_index.combined_index.iter_all_entries()))
+ if len(all_revisions):
+ if self.canonicalize_chks:
+ reconcile_meth = self.repo._canonicalize_chks_pack
+ else:
+ reconcile_meth = self.repo._reconcile_pack
+ new_pack = reconcile_meth(collection, packs, ".reconcile",
+ all_revisions, self.pb)
+ if new_pack is not None:
+ self._discard_and_save(packs)
+ else:
+ # only make a new pack when there is data to copy.
+ self._discard_and_save(packs)
+ self.garbage_inventories = total_inventories - len(list(
+ collection.inventory_index.combined_index.iter_all_entries()))
+
+ def _discard_and_save(self, packs):
+ """Discard some packs from the repository.
+
+ This removes them from the memory index, saves the in-memory index
+ which makes the newly reconciled pack visible and hides the packs to be
+ discarded, and finally renames the packs being discarded into the
+ obsolete packs directory.
+
+ :param packs: The packs to discard.
+ """
+ for pack in packs:
+ self.repo._pack_collection._remove_pack_from_memory(pack)
+ self.repo._pack_collection._save_pack_names()
+ self.repo._pack_collection._obsolete_packs(packs)
diff --git a/bzrlib/reconfigure.py b/bzrlib/reconfigure.py
new file mode 100644
index 0000000..6e5897e
--- /dev/null
+++ b/bzrlib/reconfigure.py
@@ -0,0 +1,385 @@
+# Copyright (C) 2007-2010 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""Reconfigure a controldir into a new tree/branch/repository layout.
+
+Various types of reconfiguration operation are available either by
+constructing a class or using a factory method on Reconfigure.
+"""
+
+from __future__ import absolute_import
+
+
+from bzrlib import (
+ branch,
+ controldir,
+ errors,
+ trace,
+ ui,
+ urlutils,
+ )
+from bzrlib.i18n import gettext
+
+# TODO: common base class for all reconfigure operations, making no
+# assumptions about what kind of change will be done.
+
+
+class ReconfigureStackedOn(object):
+ """Reconfigures a branch to be stacked on another branch."""
+
+ def apply(self, bzrdir, stacked_on_url):
+ branch = bzrdir.open_branch()
+ # it may be a path relative to the cwd or a url; the branch wants
+ # a path relative to itself...
+ on_url = urlutils.relative_url(branch.base,
+ urlutils.normalize_url(stacked_on_url))
+ branch.lock_write()
+ try:
+ branch.set_stacked_on_url(on_url)
+ if not trace.is_quiet():
+ ui.ui_factory.note(gettext(
+ "{0} is now stacked on {1}\n").format(
+ branch.base, branch.get_stacked_on_url()))
+ finally:
+ branch.unlock()
+
+
+class ReconfigureUnstacked(object):
+
+ def apply(self, bzrdir):
+ branch = bzrdir.open_branch()
+ branch.lock_write()
+ try:
+ branch.set_stacked_on_url(None)
+ if not trace.is_quiet():
+ ui.ui_factory.note(gettext(
+ "%s is now not stacked\n")
+ % (branch.base,))
+ finally:
+ branch.unlock()
+
+
+class Reconfigure(object):
+
+ def __init__(self, bzrdir, new_bound_location=None):
+ self.bzrdir = bzrdir
+ self.new_bound_location = new_bound_location
+ self.local_repository = None
+ try:
+ self.repository = self.bzrdir.find_repository()
+ except errors.NoRepositoryPresent:
+ self.repository = None
+ self.local_repository = None
+ else:
+ if (self.repository.user_url == self.bzrdir.user_url):
+ self.local_repository = self.repository
+ else:
+ self.local_repository = None
+ try:
+ branch = self.bzrdir.open_branch()
+ if branch.user_url == bzrdir.user_url:
+ self.local_branch = branch
+ self.referenced_branch = None
+ else:
+ self.local_branch = None
+ self.referenced_branch = branch
+ except errors.NotBranchError:
+ self.local_branch = None
+ self.referenced_branch = None
+ try:
+ self.tree = bzrdir.open_workingtree()
+ except errors.NoWorkingTree:
+ self.tree = None
+ self._unbind = False
+ self._bind = False
+ self._destroy_reference = False
+ self._create_reference = False
+ self._destroy_branch = False
+ self._create_branch = False
+ self._destroy_tree = False
+ self._create_tree = False
+ self._create_repository = False
+ self._destroy_repository = False
+ self._repository_trees = None
+
+ @staticmethod
+ def to_branch(bzrdir):
+ """Return a Reconfiguration to convert this bzrdir into a branch
+
+ :param bzrdir: The bzrdir to reconfigure
+ :raise errors.AlreadyBranch: if bzrdir is already a branch
+ """
+ reconfiguration = Reconfigure(bzrdir)
+ reconfiguration._plan_changes(want_tree=False, want_branch=True,
+ want_bound=False, want_reference=False)
+ if not reconfiguration.changes_planned():
+ raise errors.AlreadyBranch(bzrdir)
+ return reconfiguration
+
+ @staticmethod
+ def to_tree(bzrdir):
+ """Return a Reconfiguration to convert this bzrdir into a tree
+
+ :param bzrdir: The bzrdir to reconfigure
+ :raise errors.AlreadyTree: if bzrdir is already a tree
+ """
+ reconfiguration = Reconfigure(bzrdir)
+ reconfiguration._plan_changes(want_tree=True, want_branch=True,
+ want_bound=False, want_reference=False)
+ if not reconfiguration.changes_planned():
+ raise errors.AlreadyTree(bzrdir)
+ return reconfiguration
+
+ @staticmethod
+ def to_checkout(bzrdir, bound_location=None):
+ """Return a Reconfiguration to convert this bzrdir into a checkout
+
+ :param bzrdir: The bzrdir to reconfigure
+ :param bound_location: The location the checkout should be bound to.
+ :raise errors.AlreadyCheckout: if bzrdir is already a checkout
+ """
+ reconfiguration = Reconfigure(bzrdir, bound_location)
+ reconfiguration._plan_changes(want_tree=True, want_branch=True,
+ want_bound=True, want_reference=False)
+ if not reconfiguration.changes_planned():
+ raise errors.AlreadyCheckout(bzrdir)
+ return reconfiguration
+
+ @classmethod
+ def to_lightweight_checkout(klass, bzrdir, reference_location=None):
+ """Make a Reconfiguration to convert bzrdir into a lightweight checkout
+
+ :param bzrdir: The bzrdir to reconfigure
+ :param bound_location: The location the checkout should be bound to.
+ :raise errors.AlreadyLightweightCheckout: if bzrdir is already a
+ lightweight checkout
+ """
+ reconfiguration = klass(bzrdir, reference_location)
+ reconfiguration._plan_changes(want_tree=True, want_branch=False,
+ want_bound=False, want_reference=True)
+ if not reconfiguration.changes_planned():
+ raise errors.AlreadyLightweightCheckout(bzrdir)
+ return reconfiguration
+
+ @classmethod
+ def to_use_shared(klass, bzrdir):
+ """Convert a standalone branch into a repository branch"""
+ reconfiguration = klass(bzrdir)
+ reconfiguration._set_use_shared(use_shared=True)
+ if not reconfiguration.changes_planned():
+ raise errors.AlreadyUsingShared(bzrdir)
+ return reconfiguration
+
+ @classmethod
+ def to_standalone(klass, bzrdir):
+ """Convert a repository branch into a standalone branch"""
+ reconfiguration = klass(bzrdir)
+ reconfiguration._set_use_shared(use_shared=False)
+ if not reconfiguration.changes_planned():
+ raise errors.AlreadyStandalone(bzrdir)
+ return reconfiguration
+
+ @classmethod
+ def set_repository_trees(klass, bzrdir, with_trees):
+ """Adjust a repository's working tree presence default"""
+ reconfiguration = klass(bzrdir)
+ if not reconfiguration.repository.is_shared():
+ raise errors.ReconfigurationNotSupported(reconfiguration.bzrdir)
+ if with_trees and reconfiguration.repository.make_working_trees():
+ raise errors.AlreadyWithTrees(bzrdir)
+ elif (not with_trees
+ and not reconfiguration.repository.make_working_trees()):
+ raise errors.AlreadyWithNoTrees(bzrdir)
+ else:
+ reconfiguration._repository_trees = with_trees
+ return reconfiguration
+
+ def _plan_changes(self, want_tree, want_branch, want_bound,
+ want_reference):
+ """Determine which changes are needed to assume the configuration"""
+ if not want_branch and not want_reference:
+ raise errors.ReconfigurationNotSupported(self.bzrdir)
+ if want_branch and want_reference:
+ raise errors.ReconfigurationNotSupported(self.bzrdir)
+ if self.repository is None:
+ if not want_reference:
+ self._create_repository = True
+ else:
+ if want_reference and (
+ self.repository.user_url == self.bzrdir.user_url):
+ if not self.repository.is_shared():
+ self._destroy_repository = True
+ if self.referenced_branch is None:
+ if want_reference:
+ self._create_reference = True
+ if self.local_branch is not None:
+ self._destroy_branch = True
+ else:
+ if not want_reference:
+ self._destroy_reference = True
+ if self.local_branch is None:
+ if want_branch is True:
+ self._create_branch = True
+ if want_bound:
+ self._bind = True
+ else:
+ if want_bound:
+ if self.local_branch.get_bound_location() is None:
+ self._bind = True
+ else:
+ if self.local_branch.get_bound_location() is not None:
+ self._unbind = True
+ if not want_tree and self.tree is not None:
+ self._destroy_tree = True
+ if want_tree and self.tree is None:
+ self._create_tree = True
+
+ def _set_use_shared(self, use_shared=None):
+ if use_shared is None:
+ return
+ if use_shared:
+ if self.local_repository is not None:
+ self._destroy_repository = True
+ else:
+ if self.local_repository is None:
+ self._create_repository = True
+
+ def changes_planned(self):
+ """Return True if changes are planned, False otherwise"""
+ return (self._unbind or self._bind or self._destroy_tree
+ or self._create_tree or self._destroy_reference
+ or self._create_branch or self._create_repository
+ or self._create_reference or self._destroy_repository)
+
+ def _check(self):
+ """Raise if reconfiguration would destroy local changes"""
+ if self._destroy_tree and self.tree.has_changes():
+ raise errors.UncommittedChanges(self.tree)
+ if self._create_reference and self.local_branch is not None:
+ reference_branch = branch.Branch.open(self._select_bind_location())
+ if (reference_branch.last_revision() !=
+ self.local_branch.last_revision()):
+ raise errors.UnsyncedBranches(self.bzrdir, reference_branch)
+
+ def _select_bind_location(self):
+ """Select a location to bind or create a reference to.
+
+ Preference is:
+ 1. user specified location
+ 2. branch reference location (it's a kind of bind location)
+ 3. current bind location
+ 4. previous bind location (it was a good choice once)
+ 5. push location (it's writeable, so committable)
+ 6. parent location (it's pullable, so update-from-able)
+ """
+ if self.new_bound_location is not None:
+ return self.new_bound_location
+ if self.local_branch is not None:
+ bound = self.local_branch.get_bound_location()
+ if bound is not None:
+ return bound
+ old_bound = self.local_branch.get_old_bound_location()
+ if old_bound is not None:
+ return old_bound
+ push_location = self.local_branch.get_push_location()
+ if push_location is not None:
+ return push_location
+ parent = self.local_branch.get_parent()
+ if parent is not None:
+ return parent
+ elif self.referenced_branch is not None:
+ return self.referenced_branch.base
+ raise errors.NoBindLocation(self.bzrdir)
+
+ def apply(self, force=False):
+ """Apply the reconfiguration
+
+ :param force: If true, the reconfiguration is applied even if it will
+ destroy local changes.
+ :raise errors.UncommittedChanges: if the local tree is to be destroyed
+ but contains uncommitted changes.
+ :raise errors.NoBindLocation: if no bind location was specified and
+ none could be autodetected.
+ """
+ if not force:
+ self._check()
+ if self._create_repository:
+ if self.local_branch and not self._destroy_branch:
+ old_repo = self.local_branch.repository
+ elif self._create_branch and self.referenced_branch is not None:
+ old_repo = self.referenced_branch.repository
+ else:
+ old_repo = None
+ if old_repo is not None:
+ repository_format = old_repo._format
+ else:
+ repository_format = None
+ if repository_format is not None:
+ repo = repository_format.initialize(self.bzrdir)
+ else:
+ repo = self.bzrdir.create_repository()
+ if self.local_branch and not self._destroy_branch:
+ repo.fetch(self.local_branch.repository,
+ self.local_branch.last_revision())
+ else:
+ repo = self.repository
+ if self._create_branch and self.referenced_branch is not None:
+ repo.fetch(self.referenced_branch.repository,
+ self.referenced_branch.last_revision())
+ if self._create_reference:
+ reference_branch = branch.Branch.open(self._select_bind_location())
+ if self._destroy_repository:
+ if self._create_reference:
+ reference_branch.repository.fetch(self.repository)
+ elif self.local_branch is not None and not self._destroy_branch:
+ up = self.local_branch.user_transport.clone('..')
+ up_bzrdir = controldir.ControlDir.open_containing_from_transport(
+ up)[0]
+ new_repo = up_bzrdir.find_repository()
+ new_repo.fetch(self.repository)
+ last_revision_info = None
+ if self._destroy_reference:
+ last_revision_info = self.referenced_branch.last_revision_info()
+ self.bzrdir.destroy_branch()
+ if self._destroy_branch:
+ last_revision_info = self.local_branch.last_revision_info()
+ if self._create_reference:
+ self.local_branch.tags.merge_to(reference_branch.tags)
+ self.bzrdir.destroy_branch()
+ if self._create_branch:
+ local_branch = self.bzrdir.create_branch()
+ if last_revision_info is not None:
+ local_branch.set_last_revision_info(*last_revision_info)
+ if self._destroy_reference:
+ self.referenced_branch.tags.merge_to(local_branch.tags)
+ self.referenced_branch.update_references(local_branch)
+ else:
+ local_branch = self.local_branch
+ if self._create_reference:
+ self.bzrdir.set_branch_reference(reference_branch)
+ if self._destroy_tree:
+ self.bzrdir.destroy_workingtree()
+ if self._create_tree:
+ self.bzrdir.create_workingtree()
+ if self._unbind:
+ self.local_branch.unbind()
+ if self._bind:
+ bind_location = self._select_bind_location()
+ local_branch.bind(branch.Branch.open(bind_location))
+ if self._destroy_repository:
+ self.bzrdir.destroy_repository()
+ if self._repository_trees is not None:
+ repo.set_make_working_trees(self._repository_trees)
diff --git a/bzrlib/recordcounter.py b/bzrlib/recordcounter.py
new file mode 100644
index 0000000..9423031
--- /dev/null
+++ b/bzrlib/recordcounter.py
@@ -0,0 +1,89 @@
+# Copyright (C) 2010 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""Record counting support for showing progress of revision fetch."""
+
+from __future__ import absolute_import
+
+
+class RecordCounter(object):
+ """Container for maintains estimates of work requires for fetch.
+
+ Instance of this class is used along with a progress bar to provide
+ the user an estimate of the amount of work pending for a fetch (push,
+ pull, branch, checkout) operation.
+ """
+ def __init__(self):
+ self.initialized = False
+ self.current = 0
+ self.key_count = 0
+ self.max = 0
+
+ # Users of RecordCounter instance update progress bar every
+ # _STEP_ records. We choose are reasonably high number to keep
+ # display updates from being too frequent. This is an odd number
+ # to ensure that the last digit of the records fetched in
+ # fetches vs estimate ratio changes periodically.
+ self.STEP = 7
+
+ def is_initialized(self):
+ return self.initialized
+
+ def _estimate_max(self, key_count):
+ """Estimate the maximum amount of 'inserting stream' work.
+
+ This is just an estimate.
+ """
+ # Note: The magic number below is based of empirical data
+ # based on 3 seperate projects. Estimatation can probably
+ # be improved but this should work well for most cases.
+ # The project used for the estimate (with approx. numbers) were:
+ # lp:bzr with records_fetched = 7 * revs_required
+ # lp:emacs with records_fetched = 8 * revs_required
+ # bzr-svn checkout of lp:parrot = 10.63 * revs_required
+ # Hence, 10.3 was chosen as for a realistic progress bar as:
+ # 1. If records fetched is is lower than 10.3x then we simply complete
+ # with 10.3x. Under promise, over deliver.
+ # 2. In case of remote fetch, when we start the count fetch vs estimate
+ # display with revs_required/estimate, having a multiplier with a
+ # decimal point produces a realistic looking _estimate_ number rather
+ # than using something like 3125/31250 (for 10x)
+ # 3. Based on the above data, the possibility of overshooting this
+ # factor is minimal, and in case of an overshoot the estimate value
+ # should not need to be corrected too many times.
+ return int(key_count * 10.3)
+
+ def setup(self, key_count, current=0):
+ """Setup RecordCounter with basic estimate of work pending.
+
+ Setup self.max and self.current to reflect the amount of work
+ pending for a fetch.
+ """
+ self.current = current
+ self.key_count = key_count
+ self.max = self._estimate_max(key_count)
+ self.initialized = True
+
+ def increment(self, count):
+ """Increment self.current by count.
+
+ Apart from incrementing self.current by count, also ensure
+ that self.max > self.current.
+ """
+ self.current += count
+ if self.current > self.max:
+ self.max += self.key_count
+
diff --git a/bzrlib/registry.py b/bzrlib/registry.py
new file mode 100644
index 0000000..82cafb7
--- /dev/null
+++ b/bzrlib/registry.py
@@ -0,0 +1,291 @@
+# Copyright (C) 2006-2010 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""Classes to provide name-to-object registry-like support."""
+
+from __future__ import absolute_import
+
+from bzrlib.pyutils import get_named_object
+
+
+class _ObjectGetter(object):
+ """Maintain a reference to an object, and return the object on request.
+
+ This is used by Registry to make plain objects function similarly
+ to lazily imported objects.
+
+ Objects can be any sort of python object (class, function, module,
+ instance, etc)
+ """
+
+ __slots__ = ['_obj']
+
+ def __init__(self, obj):
+ self._obj = obj
+
+ def get_module(self):
+ """Get the module the object was loaded from."""
+ return self._obj.__module__
+
+ def get_obj(self):
+ """Get the object that was saved at creation time"""
+ return self._obj
+
+
+class _LazyObjectGetter(_ObjectGetter):
+ """Keep a record of a possible object.
+
+ When requested, load and return it.
+ """
+
+ __slots__ = ['_module_name', '_member_name', '_imported']
+
+ def __init__(self, module_name, member_name):
+ self._module_name = module_name
+ self._member_name = member_name
+ self._imported = False
+ super(_LazyObjectGetter, self).__init__(None)
+
+ def get_module(self):
+ """Get the module the referenced object will be loaded from.
+ """
+ return self._module_name
+
+ def get_obj(self):
+ """Get the referenced object.
+
+ Upon first request, the object will be imported. Future requests will
+ return the imported object.
+ """
+ if not self._imported:
+ self._obj = get_named_object(self._module_name, self._member_name)
+ self._imported = True
+ return super(_LazyObjectGetter, self).get_obj()
+
+ def __repr__(self):
+ return "<%s.%s object at %x, module=%r attribute=%r imported=%r>" % (
+ self.__class__.__module__, self.__class__.__name__, id(self),
+ self._module_name, self._member_name, self._imported)
+
+
+class Registry(object):
+ """A class that registers objects to a name.
+
+ There are many places that want to collect related objects and access them
+ by a key. This class is designed to allow registering the mapping from key
+ to object. It goes one step further, and allows registering a name to a
+ hypothetical object which has not been imported yet. It also supports
+ adding additional information at registration time so that decisions can be
+ made without having to import the object (which may be expensive).
+
+ The functions 'get', 'get_info', and 'get_help' also support a
+ 'default_key' (settable through my_registry.default_key = XXX, XXX must
+ already be registered.) Calling my_registry.get() or my_registry.get(None),
+ will return the entry for the default key.
+ """
+
+ def __init__(self):
+ """Create a new Registry."""
+ self._default_key = None
+ # Map from key => (is_lazy, info)
+ self._dict = {}
+ self._help_dict = {}
+ self._info_dict = {}
+
+ def register(self, key, obj, help=None, info=None,
+ override_existing=False):
+ """Register a new object to a name.
+
+ :param key: This is the key to use to request the object later.
+ :param obj: The object to register.
+ :param help: Help text for this entry. This may be a string or
+ a callable. If it is a callable, it should take two
+ parameters (registry, key): this registry and the key that
+ the help was registered under.
+ :param info: More information for this entry. Registry.get_info()
+ can be used to get this information. Registry treats this as an
+ opaque storage location (it is defined by the caller).
+ :param override_existing: Raise KeyErorr if False and something has
+ already been registered for that key. If True, ignore if there
+ is an existing key (always register the new value).
+ """
+ if not override_existing:
+ if key in self._dict:
+ raise KeyError('Key %r already registered' % key)
+ self._dict[key] = _ObjectGetter(obj)
+ self._add_help_and_info(key, help=help, info=info)
+
+ def register_lazy(self, key, module_name, member_name,
+ help=None, info=None,
+ override_existing=False):
+ """Register a new object to be loaded on request.
+
+ :param key: This is the key to use to request the object later.
+ :param module_name: The python path to the module. Such as 'os.path'.
+ :param member_name: The member of the module to return. If empty or
+ None, get() will return the module itself.
+ :param help: Help text for this entry. This may be a string or
+ a callable.
+ :param info: More information for this entry. Registry.get_info()
+ can be used to get this information. Registry treats this as an
+ opaque storage location (it is defined by the caller).
+ :param override_existing: If True, replace the existing object
+ with the new one. If False, if there is already something
+ registered with the same key, raise a KeyError
+ """
+ if not override_existing:
+ if key in self._dict:
+ raise KeyError('Key %r already registered' % key)
+ self._dict[key] = _LazyObjectGetter(module_name, member_name)
+ self._add_help_and_info(key, help=help, info=info)
+
+ def _add_help_and_info(self, key, help=None, info=None):
+ """Add the help and information about this key"""
+ self._help_dict[key] = help
+ self._info_dict[key] = info
+
+ def get(self, key=None):
+ """Return the object register()'ed to the given key.
+
+ May raise ImportError if the object was registered lazily and
+ there are any problems, or AttributeError if the module does not
+ have the supplied member.
+
+ :param key: The key to obtain the object for. If no object has been
+ registered to that key, the object registered for self.default_key
+ will be returned instead, if it exists. Otherwise KeyError will be
+ raised.
+ :return: The previously registered object.
+ :raises ImportError: If the object was registered lazily, and there are
+ problems during import.
+ :raises AttributeError: If registered lazily, and the module does not
+ contain the registered member.
+ """
+ return self._dict[self._get_key_or_default(key)].get_obj()
+
+ def _get_module(self, key):
+ """Return the module the object will be or was loaded from.
+
+ :param key: The key to obtain the module for.
+ :return: The name of the module
+ """
+ return self._dict[key].get_module()
+
+ def get_prefix(self, fullname):
+ """Return an object whose key is a prefix of the supplied value.
+
+ :fullname: The name to find a prefix for
+ :return: a tuple of (object, remainder), where the remainder is the
+ portion of the name that did not match the key.
+ """
+ for key in self.keys():
+ if fullname.startswith(key):
+ return self.get(key), fullname[len(key):]
+
+ def _get_key_or_default(self, key=None):
+ """Return either 'key' or the default key if key is None"""
+ if key is not None:
+ return key
+ if self.default_key is None:
+ raise KeyError('Key is None, and no default key is set')
+ else:
+ return self.default_key
+
+ def get_help(self, key=None):
+ """Get the help text associated with the given key"""
+ the_help = self._help_dict[self._get_key_or_default(key)]
+ if callable(the_help):
+ return the_help(self, key)
+ return the_help
+
+ def get_info(self, key=None):
+ """Get the extra information associated with the given key"""
+ return self._info_dict[self._get_key_or_default(key)]
+
+ def remove(self, key):
+ """Remove a registered entry.
+
+ This is mostly for the test suite, but it can be used by others
+ """
+ del self._dict[key]
+
+ def __contains__(self, key):
+ return key in self._dict
+
+ def keys(self):
+ """Get a list of registered entries"""
+ return sorted(self._dict.keys())
+
+ def iteritems(self):
+ for key, getter in self._dict.iteritems():
+ yield key, getter.get_obj()
+
+ def items(self):
+ # We should not use the iteritems() implementation below (see bug
+ # #430510)
+ return sorted([(key, getter.get_obj())
+ for key, getter in self._dict.items()])
+
+ def _set_default_key(self, key):
+ if not self._dict.has_key(key):
+ raise KeyError('No object registered under key %s.' % key)
+ else:
+ self._default_key = key
+
+ def _get_default_key(self):
+ return self._default_key
+
+ default_key = property(_get_default_key, _set_default_key,
+ doc="Current value of the default key."
+ " Can be set to any existing key.")
+
+
+class FormatRegistry(Registry):
+ """Registry specialised for handling formats."""
+
+ def __init__(self, other_registry=None):
+ Registry.__init__(self)
+ self._other_registry = other_registry
+
+ def register(self, key, obj, help=None, info=None,
+ override_existing=False):
+ Registry.register(self, key, obj, help=help, info=info,
+ override_existing=override_existing)
+ if self._other_registry is not None:
+ self._other_registry.register(key, obj, help=help,
+ info=info, override_existing=override_existing)
+
+ def register_lazy(self, key, module_name, member_name,
+ help=None, info=None,
+ override_existing=False):
+ # Overridden to allow capturing registrations to two seperate
+ # registries in a single call.
+ Registry.register_lazy(self, key, module_name, member_name,
+ help=help, info=info, override_existing=override_existing)
+ if self._other_registry is not None:
+ self._other_registry.register_lazy(key, module_name, member_name,
+ help=help, info=info, override_existing=override_existing)
+
+ def remove(self, key):
+ Registry.remove(self, key)
+ if self._other_registry is not None:
+ self._other_registry.remove(key)
+
+ def get(self, format_string):
+ r = Registry.get(self, format_string)
+ if callable(r):
+ r = r()
+ return r
diff --git a/bzrlib/remote.py b/bzrlib/remote.py
new file mode 100644
index 0000000..fa4f1c9
--- /dev/null
+++ b/bzrlib/remote.py
@@ -0,0 +1,4291 @@
+# Copyright (C) 2006-2012 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+from __future__ import absolute_import
+
+import bz2
+import zlib
+
+from bzrlib import (
+ bencode,
+ branch,
+ bzrdir as _mod_bzrdir,
+ config as _mod_config,
+ controldir,
+ debug,
+ errors,
+ gpg,
+ graph,
+ inventory_delta,
+ lock,
+ lockdir,
+ osutils,
+ registry,
+ repository as _mod_repository,
+ revision as _mod_revision,
+ static_tuple,
+ symbol_versioning,
+ testament as _mod_testament,
+ urlutils,
+ vf_repository,
+ vf_search,
+ )
+from bzrlib.branch import BranchReferenceFormat, BranchWriteLockResult
+from bzrlib.decorators import needs_read_lock, needs_write_lock, only_raises
+from bzrlib.errors import (
+ NoSuchRevision,
+ SmartProtocolError,
+ )
+from bzrlib.i18n import gettext
+from bzrlib.inventory import Inventory
+from bzrlib.lockable_files import LockableFiles
+from bzrlib.smart import client, vfs, repository as smart_repo
+from bzrlib.smart.client import _SmartClient
+from bzrlib.revision import NULL_REVISION
+from bzrlib.revisiontree import InventoryRevisionTree
+from bzrlib.repository import RepositoryWriteLockResult, _LazyListJoin
+from bzrlib.serializer import format_registry as serializer_format_registry
+from bzrlib.trace import mutter, note, warning, log_exception_quietly
+from bzrlib.versionedfile import FulltextContentFactory
+
+
+_DEFAULT_SEARCH_DEPTH = 100
+
+
+class _RpcHelper(object):
+ """Mixin class that helps with issuing RPCs."""
+
+ def _call(self, method, *args, **err_context):
+ try:
+ return self._client.call(method, *args)
+ except errors.ErrorFromSmartServer, err:
+ self._translate_error(err, **err_context)
+
+ def _call_expecting_body(self, method, *args, **err_context):
+ try:
+ return self._client.call_expecting_body(method, *args)
+ except errors.ErrorFromSmartServer, err:
+ self._translate_error(err, **err_context)
+
+ def _call_with_body_bytes(self, method, args, body_bytes, **err_context):
+ try:
+ return self._client.call_with_body_bytes(method, args, body_bytes)
+ except errors.ErrorFromSmartServer, err:
+ self._translate_error(err, **err_context)
+
+ def _call_with_body_bytes_expecting_body(self, method, args, body_bytes,
+ **err_context):
+ try:
+ return self._client.call_with_body_bytes_expecting_body(
+ method, args, body_bytes)
+ except errors.ErrorFromSmartServer, err:
+ self._translate_error(err, **err_context)
+
+
+def response_tuple_to_repo_format(response):
+ """Convert a response tuple describing a repository format to a format."""
+ format = RemoteRepositoryFormat()
+ format._rich_root_data = (response[0] == 'yes')
+ format._supports_tree_reference = (response[1] == 'yes')
+ format._supports_external_lookups = (response[2] == 'yes')
+ format._network_name = response[3]
+ return format
+
+
+# Note that RemoteBzrDirProber lives in bzrlib.bzrdir so bzrlib.remote
+# does not have to be imported unless a remote format is involved.
+
+class RemoteBzrDirFormat(_mod_bzrdir.BzrDirMetaFormat1):
+ """Format representing bzrdirs accessed via a smart server"""
+
+ supports_workingtrees = False
+
+ colocated_branches = False
+
+ def __init__(self):
+ _mod_bzrdir.BzrDirMetaFormat1.__init__(self)
+ # XXX: It's a bit ugly that the network name is here, because we'd
+ # like to believe that format objects are stateless or at least
+ # immutable, However, we do at least avoid mutating the name after
+ # it's returned. See <https://bugs.launchpad.net/bzr/+bug/504102>
+ self._network_name = None
+
+ def __repr__(self):
+ return "%s(_network_name=%r)" % (self.__class__.__name__,
+ self._network_name)
+
+ def get_format_description(self):
+ if self._network_name:
+ try:
+ real_format = controldir.network_format_registry.get(
+ self._network_name)
+ except KeyError:
+ pass
+ else:
+ return 'Remote: ' + real_format.get_format_description()
+ return 'bzr remote bzrdir'
+
+ def get_format_string(self):
+ raise NotImplementedError(self.get_format_string)
+
+ def network_name(self):
+ if self._network_name:
+ return self._network_name
+ else:
+ raise AssertionError("No network name set.")
+
+ def initialize_on_transport(self, transport):
+ try:
+ # hand off the request to the smart server
+ client_medium = transport.get_smart_medium()
+ except errors.NoSmartMedium:
+ # TODO: lookup the local format from a server hint.
+ local_dir_format = _mod_bzrdir.BzrDirMetaFormat1()
+ return local_dir_format.initialize_on_transport(transport)
+ client = _SmartClient(client_medium)
+ path = client.remote_path_from_transport(transport)
+ try:
+ response = client.call('BzrDirFormat.initialize', path)
+ except errors.ErrorFromSmartServer, err:
+ _translate_error(err, path=path)
+ if response[0] != 'ok':
+ raise errors.SmartProtocolError('unexpected response code %s' % (response,))
+ format = RemoteBzrDirFormat()
+ self._supply_sub_formats_to(format)
+ return RemoteBzrDir(transport, format)
+
+ def parse_NoneTrueFalse(self, arg):
+ if not arg:
+ return None
+ if arg == 'False':
+ return False
+ if arg == 'True':
+ return True
+ raise AssertionError("invalid arg %r" % arg)
+
+ def _serialize_NoneTrueFalse(self, arg):
+ if arg is False:
+ return 'False'
+ if arg:
+ return 'True'
+ return ''
+
+ def _serialize_NoneString(self, arg):
+ return arg or ''
+
+ def initialize_on_transport_ex(self, transport, use_existing_dir=False,
+ create_prefix=False, force_new_repo=False, stacked_on=None,
+ stack_on_pwd=None, repo_format_name=None, make_working_trees=None,
+ shared_repo=False):
+ try:
+ # hand off the request to the smart server
+ client_medium = transport.get_smart_medium()
+ except errors.NoSmartMedium:
+ do_vfs = True
+ else:
+ # Decline to open it if the server doesn't support our required
+ # version (3) so that the VFS-based transport will do it.
+ if client_medium.should_probe():
+ try:
+ server_version = client_medium.protocol_version()
+ if server_version != '2':
+ do_vfs = True
+ else:
+ do_vfs = False
+ except errors.SmartProtocolError:
+ # Apparently there's no usable smart server there, even though
+ # the medium supports the smart protocol.
+ do_vfs = True
+ else:
+ do_vfs = False
+ if not do_vfs:
+ client = _SmartClient(client_medium)
+ path = client.remote_path_from_transport(transport)
+ if client_medium._is_remote_before((1, 16)):
+ do_vfs = True
+ if do_vfs:
+ # TODO: lookup the local format from a server hint.
+ local_dir_format = _mod_bzrdir.BzrDirMetaFormat1()
+ self._supply_sub_formats_to(local_dir_format)
+ return local_dir_format.initialize_on_transport_ex(transport,
+ use_existing_dir=use_existing_dir, create_prefix=create_prefix,
+ force_new_repo=force_new_repo, stacked_on=stacked_on,
+ stack_on_pwd=stack_on_pwd, repo_format_name=repo_format_name,
+ make_working_trees=make_working_trees, shared_repo=shared_repo,
+ vfs_only=True)
+ return self._initialize_on_transport_ex_rpc(client, path, transport,
+ use_existing_dir, create_prefix, force_new_repo, stacked_on,
+ stack_on_pwd, repo_format_name, make_working_trees, shared_repo)
+
+ def _initialize_on_transport_ex_rpc(self, client, path, transport,
+ use_existing_dir, create_prefix, force_new_repo, stacked_on,
+ stack_on_pwd, repo_format_name, make_working_trees, shared_repo):
+ args = []
+ args.append(self._serialize_NoneTrueFalse(use_existing_dir))
+ args.append(self._serialize_NoneTrueFalse(create_prefix))
+ args.append(self._serialize_NoneTrueFalse(force_new_repo))
+ args.append(self._serialize_NoneString(stacked_on))
+ # stack_on_pwd is often/usually our transport
+ if stack_on_pwd:
+ try:
+ stack_on_pwd = transport.relpath(stack_on_pwd)
+ if not stack_on_pwd:
+ stack_on_pwd = '.'
+ except errors.PathNotChild:
+ pass
+ args.append(self._serialize_NoneString(stack_on_pwd))
+ args.append(self._serialize_NoneString(repo_format_name))
+ args.append(self._serialize_NoneTrueFalse(make_working_trees))
+ args.append(self._serialize_NoneTrueFalse(shared_repo))
+ request_network_name = self._network_name or \
+ _mod_bzrdir.BzrDirFormat.get_default_format().network_name()
+ try:
+ response = client.call('BzrDirFormat.initialize_ex_1.16',
+ request_network_name, path, *args)
+ except errors.UnknownSmartMethod:
+ client._medium._remember_remote_is_before((1,16))
+ local_dir_format = _mod_bzrdir.BzrDirMetaFormat1()
+ self._supply_sub_formats_to(local_dir_format)
+ return local_dir_format.initialize_on_transport_ex(transport,
+ use_existing_dir=use_existing_dir, create_prefix=create_prefix,
+ force_new_repo=force_new_repo, stacked_on=stacked_on,
+ stack_on_pwd=stack_on_pwd, repo_format_name=repo_format_name,
+ make_working_trees=make_working_trees, shared_repo=shared_repo,
+ vfs_only=True)
+ except errors.ErrorFromSmartServer, err:
+ _translate_error(err, path=path)
+ repo_path = response[0]
+ bzrdir_name = response[6]
+ require_stacking = response[7]
+ require_stacking = self.parse_NoneTrueFalse(require_stacking)
+ format = RemoteBzrDirFormat()
+ format._network_name = bzrdir_name
+ self._supply_sub_formats_to(format)
+ bzrdir = RemoteBzrDir(transport, format, _client=client)
+ if repo_path:
+ repo_format = response_tuple_to_repo_format(response[1:])
+ if repo_path == '.':
+ repo_path = ''
+ if repo_path:
+ repo_bzrdir_format = RemoteBzrDirFormat()
+ repo_bzrdir_format._network_name = response[5]
+ repo_bzr = RemoteBzrDir(transport.clone(repo_path),
+ repo_bzrdir_format)
+ else:
+ repo_bzr = bzrdir
+ final_stack = response[8] or None
+ final_stack_pwd = response[9] or None
+ if final_stack_pwd:
+ final_stack_pwd = urlutils.join(
+ transport.base, final_stack_pwd)
+ remote_repo = RemoteRepository(repo_bzr, repo_format)
+ if len(response) > 10:
+ # Updated server verb that locks remotely.
+ repo_lock_token = response[10] or None
+ remote_repo.lock_write(repo_lock_token, _skip_rpc=True)
+ if repo_lock_token:
+ remote_repo.dont_leave_lock_in_place()
+ else:
+ remote_repo.lock_write()
+ policy = _mod_bzrdir.UseExistingRepository(remote_repo, final_stack,
+ final_stack_pwd, require_stacking)
+ policy.acquire_repository()
+ else:
+ remote_repo = None
+ policy = None
+ bzrdir._format.set_branch_format(self.get_branch_format())
+ if require_stacking:
+ # The repo has already been created, but we need to make sure that
+ # we'll make a stackable branch.
+ bzrdir._format.require_stacking(_skip_repo=True)
+ return remote_repo, bzrdir, require_stacking, policy
+
+ def _open(self, transport):
+ return RemoteBzrDir(transport, self)
+
+ def __eq__(self, other):
+ if not isinstance(other, RemoteBzrDirFormat):
+ return False
+ return self.get_format_description() == other.get_format_description()
+
+ def __return_repository_format(self):
+ # Always return a RemoteRepositoryFormat object, but if a specific bzr
+ # repository format has been asked for, tell the RemoteRepositoryFormat
+ # that it should use that for init() etc.
+ result = RemoteRepositoryFormat()
+ custom_format = getattr(self, '_repository_format', None)
+ if custom_format:
+ if isinstance(custom_format, RemoteRepositoryFormat):
+ return custom_format
+ else:
+ # We will use the custom format to create repositories over the
+ # wire; expose its details like rich_root_data for code to
+ # query
+ result._custom_format = custom_format
+ return result
+
+ def get_branch_format(self):
+ result = _mod_bzrdir.BzrDirMetaFormat1.get_branch_format(self)
+ if not isinstance(result, RemoteBranchFormat):
+ new_result = RemoteBranchFormat()
+ new_result._custom_format = result
+ # cache the result
+ self.set_branch_format(new_result)
+ result = new_result
+ return result
+
+ repository_format = property(__return_repository_format,
+ _mod_bzrdir.BzrDirMetaFormat1._set_repository_format) #.im_func)
+
+
+class RemoteControlStore(_mod_config.IniFileStore):
+ """Control store which attempts to use HPSS calls to retrieve control store.
+
+ Note that this is specific to bzr-based formats.
+ """
+
+ def __init__(self, bzrdir):
+ super(RemoteControlStore, self).__init__()
+ self.bzrdir = bzrdir
+ self._real_store = None
+
+ def lock_write(self, token=None):
+ self._ensure_real()
+ return self._real_store.lock_write(token)
+
+ def unlock(self):
+ self._ensure_real()
+ return self._real_store.unlock()
+
+ @needs_write_lock
+ def save(self):
+ # We need to be able to override the undecorated implementation
+ self.save_without_locking()
+
+ def save_without_locking(self):
+ super(RemoteControlStore, self).save()
+
+ def _ensure_real(self):
+ self.bzrdir._ensure_real()
+ if self._real_store is None:
+ self._real_store = _mod_config.ControlStore(self.bzrdir)
+
+ def external_url(self):
+ return self.bzrdir.user_url
+
+ def _load_content(self):
+ medium = self.bzrdir._client._medium
+ path = self.bzrdir._path_for_remote_call(self.bzrdir._client)
+ try:
+ response, handler = self.bzrdir._call_expecting_body(
+ 'BzrDir.get_config_file', path)
+ except errors.UnknownSmartMethod:
+ self._ensure_real()
+ return self._real_store._load_content()
+ if len(response) and response[0] != 'ok':
+ raise errors.UnexpectedSmartServerResponse(response)
+ return handler.read_body_bytes()
+
+ def _save_content(self, content):
+ # FIXME JRV 2011-11-22: Ideally this should use a
+ # HPSS call too, but at the moment it is not possible
+ # to write lock control directories.
+ self._ensure_real()
+ return self._real_store._save_content(content)
+
+
+class RemoteBzrDir(_mod_bzrdir.BzrDir, _RpcHelper):
+ """Control directory on a remote server, accessed via bzr:// or similar."""
+
+ def __init__(self, transport, format, _client=None, _force_probe=False):
+ """Construct a RemoteBzrDir.
+
+ :param _client: Private parameter for testing. Disables probing and the
+ use of a real bzrdir.
+ """
+ _mod_bzrdir.BzrDir.__init__(self, transport, format)
+ # this object holds a delegated bzrdir that uses file-level operations
+ # to talk to the other side
+ self._real_bzrdir = None
+ self._has_working_tree = None
+ # 1-shot cache for the call pattern 'create_branch; open_branch' - see
+ # create_branch for details.
+ self._next_open_branch_result = None
+
+ if _client is None:
+ medium = transport.get_smart_medium()
+ self._client = client._SmartClient(medium)
+ else:
+ self._client = _client
+ if not _force_probe:
+ return
+
+ self._probe_bzrdir()
+
+ def __repr__(self):
+ return '%s(%r)' % (self.__class__.__name__, self._client)
+
+ def _probe_bzrdir(self):
+ medium = self._client._medium
+ path = self._path_for_remote_call(self._client)
+ if medium._is_remote_before((2, 1)):
+ self._rpc_open(path)
+ return
+ try:
+ self._rpc_open_2_1(path)
+ return
+ except errors.UnknownSmartMethod:
+ medium._remember_remote_is_before((2, 1))
+ self._rpc_open(path)
+
+ def _rpc_open_2_1(self, path):
+ response = self._call('BzrDir.open_2.1', path)
+ if response == ('no',):
+ raise errors.NotBranchError(path=self.root_transport.base)
+ elif response[0] == 'yes':
+ if response[1] == 'yes':
+ self._has_working_tree = True
+ elif response[1] == 'no':
+ self._has_working_tree = False
+ else:
+ raise errors.UnexpectedSmartServerResponse(response)
+ else:
+ raise errors.UnexpectedSmartServerResponse(response)
+
+ def _rpc_open(self, path):
+ response = self._call('BzrDir.open', path)
+ if response not in [('yes',), ('no',)]:
+ raise errors.UnexpectedSmartServerResponse(response)
+ if response == ('no',):
+ raise errors.NotBranchError(path=self.root_transport.base)
+
+ def _ensure_real(self):
+ """Ensure that there is a _real_bzrdir set.
+
+ Used before calls to self._real_bzrdir.
+ """
+ if not self._real_bzrdir:
+ if 'hpssvfs' in debug.debug_flags:
+ import traceback
+ warning('VFS BzrDir access triggered\n%s',
+ ''.join(traceback.format_stack()))
+ self._real_bzrdir = _mod_bzrdir.BzrDir.open_from_transport(
+ self.root_transport, probers=[_mod_bzrdir.BzrProber])
+ self._format._network_name = \
+ self._real_bzrdir._format.network_name()
+
+ def _translate_error(self, err, **context):
+ _translate_error(err, bzrdir=self, **context)
+
+ def break_lock(self):
+ # Prevent aliasing problems in the next_open_branch_result cache.
+ # See create_branch for rationale.
+ self._next_open_branch_result = None
+ return _mod_bzrdir.BzrDir.break_lock(self)
+
+ def _vfs_checkout_metadir(self):
+ self._ensure_real()
+ return self._real_bzrdir.checkout_metadir()
+
+ def checkout_metadir(self):
+ """Retrieve the controldir format to use for checkouts of this one.
+ """
+ medium = self._client._medium
+ if medium._is_remote_before((2, 5)):
+ return self._vfs_checkout_metadir()
+ path = self._path_for_remote_call(self._client)
+ try:
+ response = self._client.call('BzrDir.checkout_metadir',
+ path)
+ except errors.UnknownSmartMethod:
+ medium._remember_remote_is_before((2, 5))
+ return self._vfs_checkout_metadir()
+ if len(response) != 3:
+ raise errors.UnexpectedSmartServerResponse(response)
+ control_name, repo_name, branch_name = response
+ try:
+ format = controldir.network_format_registry.get(control_name)
+ except KeyError:
+ raise errors.UnknownFormatError(kind='control',
+ format=control_name)
+ if repo_name:
+ try:
+ repo_format = _mod_repository.network_format_registry.get(
+ repo_name)
+ except KeyError:
+ raise errors.UnknownFormatError(kind='repository',
+ format=repo_name)
+ format.repository_format = repo_format
+ if branch_name:
+ try:
+ format.set_branch_format(
+ branch.network_format_registry.get(branch_name))
+ except KeyError:
+ raise errors.UnknownFormatError(kind='branch',
+ format=branch_name)
+ return format
+
+ def _vfs_cloning_metadir(self, require_stacking=False):
+ self._ensure_real()
+ return self._real_bzrdir.cloning_metadir(
+ require_stacking=require_stacking)
+
+ def cloning_metadir(self, require_stacking=False):
+ medium = self._client._medium
+ if medium._is_remote_before((1, 13)):
+ return self._vfs_cloning_metadir(require_stacking=require_stacking)
+ verb = 'BzrDir.cloning_metadir'
+ if require_stacking:
+ stacking = 'True'
+ else:
+ stacking = 'False'
+ path = self._path_for_remote_call(self._client)
+ try:
+ response = self._call(verb, path, stacking)
+ except errors.UnknownSmartMethod:
+ medium._remember_remote_is_before((1, 13))
+ return self._vfs_cloning_metadir(require_stacking=require_stacking)
+ except errors.UnknownErrorFromSmartServer, err:
+ if err.error_tuple != ('BranchReference',):
+ raise
+ # We need to resolve the branch reference to determine the
+ # cloning_metadir. This causes unnecessary RPCs to open the
+ # referenced branch (and bzrdir, etc) but only when the caller
+ # didn't already resolve the branch reference.
+ referenced_branch = self.open_branch()
+ return referenced_branch.bzrdir.cloning_metadir()
+ if len(response) != 3:
+ raise errors.UnexpectedSmartServerResponse(response)
+ control_name, repo_name, branch_info = response
+ if len(branch_info) != 2:
+ raise errors.UnexpectedSmartServerResponse(response)
+ branch_ref, branch_name = branch_info
+ try:
+ format = controldir.network_format_registry.get(control_name)
+ except KeyError:
+ raise errors.UnknownFormatError(kind='control', format=control_name)
+
+ if repo_name:
+ try:
+ format.repository_format = _mod_repository.network_format_registry.get(
+ repo_name)
+ except KeyError:
+ raise errors.UnknownFormatError(kind='repository',
+ format=repo_name)
+ if branch_ref == 'ref':
+ # XXX: we need possible_transports here to avoid reopening the
+ # connection to the referenced location
+ ref_bzrdir = _mod_bzrdir.BzrDir.open(branch_name)
+ branch_format = ref_bzrdir.cloning_metadir().get_branch_format()
+ format.set_branch_format(branch_format)
+ elif branch_ref == 'branch':
+ if branch_name:
+ try:
+ branch_format = branch.network_format_registry.get(
+ branch_name)
+ except KeyError:
+ raise errors.UnknownFormatError(kind='branch',
+ format=branch_name)
+ format.set_branch_format(branch_format)
+ else:
+ raise errors.UnexpectedSmartServerResponse(response)
+ return format
+
+ def create_repository(self, shared=False):
+ # as per meta1 formats - just delegate to the format object which may
+ # be parameterised.
+ result = self._format.repository_format.initialize(self, shared)
+ if not isinstance(result, RemoteRepository):
+ return self.open_repository()
+ else:
+ return result
+
+ def destroy_repository(self):
+ """See BzrDir.destroy_repository"""
+ path = self._path_for_remote_call(self._client)
+ try:
+ response = self._call('BzrDir.destroy_repository', path)
+ except errors.UnknownSmartMethod:
+ self._ensure_real()
+ self._real_bzrdir.destroy_repository()
+ return
+ if response[0] != 'ok':
+ raise SmartProtocolError('unexpected response code %s' % (response,))
+
+ def create_branch(self, name=None, repository=None,
+ append_revisions_only=None):
+ if name is None:
+ name = self._get_selected_branch()
+ if name != "":
+ raise errors.NoColocatedBranchSupport(self)
+ # as per meta1 formats - just delegate to the format object which may
+ # be parameterised.
+ real_branch = self._format.get_branch_format().initialize(self,
+ name=name, repository=repository,
+ append_revisions_only=append_revisions_only)
+ if not isinstance(real_branch, RemoteBranch):
+ if not isinstance(repository, RemoteRepository):
+ raise AssertionError(
+ 'need a RemoteRepository to use with RemoteBranch, got %r'
+ % (repository,))
+ result = RemoteBranch(self, repository, real_branch, name=name)
+ else:
+ result = real_branch
+ # BzrDir.clone_on_transport() uses the result of create_branch but does
+ # not return it to its callers; we save approximately 8% of our round
+ # trips by handing the branch we created back to the first caller to
+ # open_branch rather than probing anew. Long term we need a API in
+ # bzrdir that doesn't discard result objects (like result_branch).
+ # RBC 20090225
+ self._next_open_branch_result = result
+ return result
+
+ def destroy_branch(self, name=None):
+ """See BzrDir.destroy_branch"""
+ if name is None:
+ name = self._get_selected_branch()
+ if name != "":
+ raise errors.NoColocatedBranchSupport(self)
+ path = self._path_for_remote_call(self._client)
+ try:
+ if name != "":
+ args = (name, )
+ else:
+ args = ()
+ response = self._call('BzrDir.destroy_branch', path, *args)
+ except errors.UnknownSmartMethod:
+ self._ensure_real()
+ self._real_bzrdir.destroy_branch(name=name)
+ self._next_open_branch_result = None
+ return
+ self._next_open_branch_result = None
+ if response[0] != 'ok':
+ raise SmartProtocolError('unexpected response code %s' % (response,))
+
+ def create_workingtree(self, revision_id=None, from_branch=None,
+ accelerator_tree=None, hardlink=False):
+ raise errors.NotLocalUrl(self.transport.base)
+
+ def find_branch_format(self, name=None):
+ """Find the branch 'format' for this bzrdir.
+
+ This might be a synthetic object for e.g. RemoteBranch and SVN.
+ """
+ b = self.open_branch(name=name)
+ return b._format
+
+ def get_branches(self, possible_transports=None, ignore_fallbacks=False):
+ path = self._path_for_remote_call(self._client)
+ try:
+ response, handler = self._call_expecting_body(
+ 'BzrDir.get_branches', path)
+ except errors.UnknownSmartMethod:
+ self._ensure_real()
+ return self._real_bzrdir.get_branches()
+ if response[0] != "success":
+ raise errors.UnexpectedSmartServerResponse(response)
+ body = bencode.bdecode(handler.read_body_bytes())
+ ret = {}
+ for (name, value) in body.iteritems():
+ ret[name] = self._open_branch(name, value[0], value[1],
+ possible_transports=possible_transports,
+ ignore_fallbacks=ignore_fallbacks)
+ return ret
+
+ def set_branch_reference(self, target_branch, name=None):
+ """See BzrDir.set_branch_reference()."""
+ if name is None:
+ name = self._get_selected_branch()
+ if name != "":
+ raise errors.NoColocatedBranchSupport(self)
+ self._ensure_real()
+ return self._real_bzrdir.set_branch_reference(target_branch, name=name)
+
+ def get_branch_reference(self, name=None):
+ """See BzrDir.get_branch_reference()."""
+ if name is None:
+ name = self._get_selected_branch()
+ if name != "":
+ raise errors.NoColocatedBranchSupport(self)
+ response = self._get_branch_reference()
+ if response[0] == 'ref':
+ return response[1]
+ else:
+ return None
+
+ def _get_branch_reference(self):
+ path = self._path_for_remote_call(self._client)
+ medium = self._client._medium
+ candidate_calls = [
+ ('BzrDir.open_branchV3', (2, 1)),
+ ('BzrDir.open_branchV2', (1, 13)),
+ ('BzrDir.open_branch', None),
+ ]
+ for verb, required_version in candidate_calls:
+ if required_version and medium._is_remote_before(required_version):
+ continue
+ try:
+ response = self._call(verb, path)
+ except errors.UnknownSmartMethod:
+ if required_version is None:
+ raise
+ medium._remember_remote_is_before(required_version)
+ else:
+ break
+ if verb == 'BzrDir.open_branch':
+ if response[0] != 'ok':
+ raise errors.UnexpectedSmartServerResponse(response)
+ if response[1] != '':
+ return ('ref', response[1])
+ else:
+ return ('branch', '')
+ if response[0] not in ('ref', 'branch'):
+ raise errors.UnexpectedSmartServerResponse(response)
+ return response
+
+ def _get_tree_branch(self, name=None):
+ """See BzrDir._get_tree_branch()."""
+ return None, self.open_branch(name=name)
+
+ def _open_branch(self, name, kind, location_or_format,
+ ignore_fallbacks=False, possible_transports=None):
+ if kind == 'ref':
+ # a branch reference, use the existing BranchReference logic.
+ format = BranchReferenceFormat()
+ return format.open(self, name=name, _found=True,
+ location=location_or_format, ignore_fallbacks=ignore_fallbacks,
+ possible_transports=possible_transports)
+ branch_format_name = location_or_format
+ if not branch_format_name:
+ branch_format_name = None
+ format = RemoteBranchFormat(network_name=branch_format_name)
+ return RemoteBranch(self, self.find_repository(), format=format,
+ setup_stacking=not ignore_fallbacks, name=name,
+ possible_transports=possible_transports)
+
+ def open_branch(self, name=None, unsupported=False,
+ ignore_fallbacks=False, possible_transports=None):
+ if name is None:
+ name = self._get_selected_branch()
+ if name != "":
+ raise errors.NoColocatedBranchSupport(self)
+ if unsupported:
+ raise NotImplementedError('unsupported flag support not implemented yet.')
+ if self._next_open_branch_result is not None:
+ # See create_branch for details.
+ result = self._next_open_branch_result
+ self._next_open_branch_result = None
+ return result
+ response = self._get_branch_reference()
+ return self._open_branch(name, response[0], response[1],
+ possible_transports=possible_transports,
+ ignore_fallbacks=ignore_fallbacks)
+
+ def _open_repo_v1(self, path):
+ verb = 'BzrDir.find_repository'
+ response = self._call(verb, path)
+ if response[0] != 'ok':
+ raise errors.UnexpectedSmartServerResponse(response)
+ # servers that only support the v1 method don't support external
+ # references either.
+ self._ensure_real()
+ repo = self._real_bzrdir.open_repository()
+ response = response + ('no', repo._format.network_name())
+ return response, repo
+
+ def _open_repo_v2(self, path):
+ verb = 'BzrDir.find_repositoryV2'
+ response = self._call(verb, path)
+ if response[0] != 'ok':
+ raise errors.UnexpectedSmartServerResponse(response)
+ self._ensure_real()
+ repo = self._real_bzrdir.open_repository()
+ response = response + (repo._format.network_name(),)
+ return response, repo
+
+ def _open_repo_v3(self, path):
+ verb = 'BzrDir.find_repositoryV3'
+ medium = self._client._medium
+ if medium._is_remote_before((1, 13)):
+ raise errors.UnknownSmartMethod(verb)
+ try:
+ response = self._call(verb, path)
+ except errors.UnknownSmartMethod:
+ medium._remember_remote_is_before((1, 13))
+ raise
+ if response[0] != 'ok':
+ raise errors.UnexpectedSmartServerResponse(response)
+ return response, None
+
+ def open_repository(self):
+ path = self._path_for_remote_call(self._client)
+ response = None
+ for probe in [self._open_repo_v3, self._open_repo_v2,
+ self._open_repo_v1]:
+ try:
+ response, real_repo = probe(path)
+ break
+ except errors.UnknownSmartMethod:
+ pass
+ if response is None:
+ raise errors.UnknownSmartMethod('BzrDir.find_repository{3,2,}')
+ if response[0] != 'ok':
+ raise errors.UnexpectedSmartServerResponse(response)
+ if len(response) != 6:
+ raise SmartProtocolError('incorrect response length %s' % (response,))
+ if response[1] == '':
+ # repo is at this dir.
+ format = response_tuple_to_repo_format(response[2:])
+ # Used to support creating a real format instance when needed.
+ format._creating_bzrdir = self
+ remote_repo = RemoteRepository(self, format)
+ format._creating_repo = remote_repo
+ if real_repo is not None:
+ remote_repo._set_real_repository(real_repo)
+ return remote_repo
+ else:
+ raise errors.NoRepositoryPresent(self)
+
+ def has_workingtree(self):
+ if self._has_working_tree is None:
+ path = self._path_for_remote_call(self._client)
+ try:
+ response = self._call('BzrDir.has_workingtree', path)
+ except errors.UnknownSmartMethod:
+ self._ensure_real()
+ self._has_working_tree = self._real_bzrdir.has_workingtree()
+ else:
+ if response[0] not in ('yes', 'no'):
+ raise SmartProtocolError('unexpected response code %s' % (response,))
+ self._has_working_tree = (response[0] == 'yes')
+ return self._has_working_tree
+
+ def open_workingtree(self, recommend_upgrade=True):
+ if self.has_workingtree():
+ raise errors.NotLocalUrl(self.root_transport)
+ else:
+ raise errors.NoWorkingTree(self.root_transport.base)
+
+ def _path_for_remote_call(self, client):
+ """Return the path to be used for this bzrdir in a remote call."""
+ return urlutils.split_segment_parameters_raw(
+ client.remote_path_from_transport(self.root_transport))[0]
+
+ def get_branch_transport(self, branch_format, name=None):
+ self._ensure_real()
+ return self._real_bzrdir.get_branch_transport(branch_format, name=name)
+
+ def get_repository_transport(self, repository_format):
+ self._ensure_real()
+ return self._real_bzrdir.get_repository_transport(repository_format)
+
+ def get_workingtree_transport(self, workingtree_format):
+ self._ensure_real()
+ return self._real_bzrdir.get_workingtree_transport(workingtree_format)
+
+ def can_convert_format(self):
+ """Upgrading of remote bzrdirs is not supported yet."""
+ return False
+
+ def needs_format_conversion(self, format):
+ """Upgrading of remote bzrdirs is not supported yet."""
+ return False
+
+ def _get_config(self):
+ return RemoteBzrDirConfig(self)
+
+ def _get_config_store(self):
+ return RemoteControlStore(self)
+
+
+class RemoteRepositoryFormat(vf_repository.VersionedFileRepositoryFormat):
+ """Format for repositories accessed over a _SmartClient.
+
+ Instances of this repository are represented by RemoteRepository
+ instances.
+
+ The RemoteRepositoryFormat is parameterized during construction
+ to reflect the capabilities of the real, remote format. Specifically
+ the attributes rich_root_data and supports_tree_reference are set
+ on a per instance basis, and are not set (and should not be) at
+ the class level.
+
+ :ivar _custom_format: If set, a specific concrete repository format that
+ will be used when initializing a repository with this
+ RemoteRepositoryFormat.
+ :ivar _creating_repo: If set, the repository object that this
+ RemoteRepositoryFormat was created for: it can be called into
+ to obtain data like the network name.
+ """
+
+ _matchingbzrdir = RemoteBzrDirFormat()
+ supports_full_versioned_files = True
+ supports_leaving_lock = True
+
+ def __init__(self):
+ _mod_repository.RepositoryFormat.__init__(self)
+ self._custom_format = None
+ self._network_name = None
+ self._creating_bzrdir = None
+ self._revision_graph_can_have_wrong_parents = None
+ self._supports_chks = None
+ self._supports_external_lookups = None
+ self._supports_tree_reference = None
+ self._supports_funky_characters = None
+ self._supports_nesting_repositories = None
+ self._rich_root_data = None
+
+ def __repr__(self):
+ return "%s(_network_name=%r)" % (self.__class__.__name__,
+ self._network_name)
+
+ @property
+ def fast_deltas(self):
+ self._ensure_real()
+ return self._custom_format.fast_deltas
+
+ @property
+ def rich_root_data(self):
+ if self._rich_root_data is None:
+ self._ensure_real()
+ self._rich_root_data = self._custom_format.rich_root_data
+ return self._rich_root_data
+
+ @property
+ def supports_chks(self):
+ if self._supports_chks is None:
+ self._ensure_real()
+ self._supports_chks = self._custom_format.supports_chks
+ return self._supports_chks
+
+ @property
+ def supports_external_lookups(self):
+ if self._supports_external_lookups is None:
+ self._ensure_real()
+ self._supports_external_lookups = \
+ self._custom_format.supports_external_lookups
+ return self._supports_external_lookups
+
+ @property
+ def supports_funky_characters(self):
+ if self._supports_funky_characters is None:
+ self._ensure_real()
+ self._supports_funky_characters = \
+ self._custom_format.supports_funky_characters
+ return self._supports_funky_characters
+
+ @property
+ def supports_nesting_repositories(self):
+ if self._supports_nesting_repositories is None:
+ self._ensure_real()
+ self._supports_nesting_repositories = \
+ self._custom_format.supports_nesting_repositories
+ return self._supports_nesting_repositories
+
+ @property
+ def supports_tree_reference(self):
+ if self._supports_tree_reference is None:
+ self._ensure_real()
+ self._supports_tree_reference = \
+ self._custom_format.supports_tree_reference
+ return self._supports_tree_reference
+
+ @property
+ def revision_graph_can_have_wrong_parents(self):
+ if self._revision_graph_can_have_wrong_parents is None:
+ self._ensure_real()
+ self._revision_graph_can_have_wrong_parents = \
+ self._custom_format.revision_graph_can_have_wrong_parents
+ return self._revision_graph_can_have_wrong_parents
+
+ def _vfs_initialize(self, a_bzrdir, shared):
+ """Helper for common code in initialize."""
+ if self._custom_format:
+ # Custom format requested
+ result = self._custom_format.initialize(a_bzrdir, shared=shared)
+ elif self._creating_bzrdir is not None:
+ # Use the format that the repository we were created to back
+ # has.
+ prior_repo = self._creating_bzrdir.open_repository()
+ prior_repo._ensure_real()
+ result = prior_repo._real_repository._format.initialize(
+ a_bzrdir, shared=shared)
+ else:
+ # assume that a_bzr is a RemoteBzrDir but the smart server didn't
+ # support remote initialization.
+ # We delegate to a real object at this point (as RemoteBzrDir
+ # delegate to the repository format which would lead to infinite
+ # recursion if we just called a_bzrdir.create_repository.
+ a_bzrdir._ensure_real()
+ result = a_bzrdir._real_bzrdir.create_repository(shared=shared)
+ if not isinstance(result, RemoteRepository):
+ return self.open(a_bzrdir)
+ else:
+ return result
+
+ def initialize(self, a_bzrdir, shared=False):
+ # Being asked to create on a non RemoteBzrDir:
+ if not isinstance(a_bzrdir, RemoteBzrDir):
+ return self._vfs_initialize(a_bzrdir, shared)
+ medium = a_bzrdir._client._medium
+ if medium._is_remote_before((1, 13)):
+ return self._vfs_initialize(a_bzrdir, shared)
+ # Creating on a remote bzr dir.
+ # 1) get the network name to use.
+ if self._custom_format:
+ network_name = self._custom_format.network_name()
+ elif self._network_name:
+ network_name = self._network_name
+ else:
+ # Select the current bzrlib default and ask for that.
+ reference_bzrdir_format = controldir.format_registry.get('default')()
+ reference_format = reference_bzrdir_format.repository_format
+ network_name = reference_format.network_name()
+ # 2) try direct creation via RPC
+ path = a_bzrdir._path_for_remote_call(a_bzrdir._client)
+ verb = 'BzrDir.create_repository'
+ if shared:
+ shared_str = 'True'
+ else:
+ shared_str = 'False'
+ try:
+ response = a_bzrdir._call(verb, path, network_name, shared_str)
+ except errors.UnknownSmartMethod:
+ # Fallback - use vfs methods
+ medium._remember_remote_is_before((1, 13))
+ return self._vfs_initialize(a_bzrdir, shared)
+ else:
+ # Turn the response into a RemoteRepository object.
+ format = response_tuple_to_repo_format(response[1:])
+ # Used to support creating a real format instance when needed.
+ format._creating_bzrdir = a_bzrdir
+ remote_repo = RemoteRepository(a_bzrdir, format)
+ format._creating_repo = remote_repo
+ return remote_repo
+
+ def open(self, a_bzrdir):
+ if not isinstance(a_bzrdir, RemoteBzrDir):
+ raise AssertionError('%r is not a RemoteBzrDir' % (a_bzrdir,))
+ return a_bzrdir.open_repository()
+
+ def _ensure_real(self):
+ if self._custom_format is None:
+ try:
+ self._custom_format = _mod_repository.network_format_registry.get(
+ self._network_name)
+ except KeyError:
+ raise errors.UnknownFormatError(kind='repository',
+ format=self._network_name)
+
+ @property
+ def _fetch_order(self):
+ self._ensure_real()
+ return self._custom_format._fetch_order
+
+ @property
+ def _fetch_uses_deltas(self):
+ self._ensure_real()
+ return self._custom_format._fetch_uses_deltas
+
+ @property
+ def _fetch_reconcile(self):
+ self._ensure_real()
+ return self._custom_format._fetch_reconcile
+
+ def get_format_description(self):
+ self._ensure_real()
+ return 'Remote: ' + self._custom_format.get_format_description()
+
+ def __eq__(self, other):
+ return self.__class__ is other.__class__
+
+ def network_name(self):
+ if self._network_name:
+ return self._network_name
+ self._creating_repo._ensure_real()
+ return self._creating_repo._real_repository._format.network_name()
+
+ @property
+ def pack_compresses(self):
+ self._ensure_real()
+ return self._custom_format.pack_compresses
+
+ @property
+ def _serializer(self):
+ self._ensure_real()
+ return self._custom_format._serializer
+
+
+class RemoteRepository(_mod_repository.Repository, _RpcHelper,
+ lock._RelockDebugMixin):
+ """Repository accessed over rpc.
+
+ For the moment most operations are performed using local transport-backed
+ Repository objects.
+ """
+
+ def __init__(self, remote_bzrdir, format, real_repository=None, _client=None):
+ """Create a RemoteRepository instance.
+
+ :param remote_bzrdir: The bzrdir hosting this repository.
+ :param format: The RemoteFormat object to use.
+ :param real_repository: If not None, a local implementation of the
+ repository logic for the repository, usually accessing the data
+ via the VFS.
+ :param _client: Private testing parameter - override the smart client
+ to be used by the repository.
+ """
+ if real_repository:
+ self._real_repository = real_repository
+ else:
+ self._real_repository = None
+ self.bzrdir = remote_bzrdir
+ if _client is None:
+ self._client = remote_bzrdir._client
+ else:
+ self._client = _client
+ self._format = format
+ self._lock_mode = None
+ self._lock_token = None
+ self._write_group_tokens = None
+ self._lock_count = 0
+ self._leave_lock = False
+ # Cache of revision parents; misses are cached during read locks, and
+ # write locks when no _real_repository has been set.
+ self._unstacked_provider = graph.CachingParentsProvider(
+ get_parent_map=self._get_parent_map_rpc)
+ self._unstacked_provider.disable_cache()
+ # For tests:
+ # These depend on the actual remote format, so force them off for
+ # maximum compatibility. XXX: In future these should depend on the
+ # remote repository instance, but this is irrelevant until we perform
+ # reconcile via an RPC call.
+ self._reconcile_does_inventory_gc = False
+ self._reconcile_fixes_text_parents = False
+ self._reconcile_backsup_inventory = False
+ self.base = self.bzrdir.transport.base
+ # Additional places to query for data.
+ self._fallback_repositories = []
+
+ @property
+ def user_transport(self):
+ return self.bzrdir.user_transport
+
+ @property
+ def control_transport(self):
+ # XXX: Normally you shouldn't directly get at the remote repository
+ # transport, but I'm not sure it's worth making this method
+ # optional -- mbp 2010-04-21
+ return self.bzrdir.get_repository_transport(None)
+
+ def __str__(self):
+ return "%s(%s)" % (self.__class__.__name__, self.base)
+
+ __repr__ = __str__
+
+ def abort_write_group(self, suppress_errors=False):
+ """Complete a write group on the decorated repository.
+
+ Smart methods perform operations in a single step so this API
+ is not really applicable except as a compatibility thunk
+ for older plugins that don't use e.g. the CommitBuilder
+ facility.
+
+ :param suppress_errors: see Repository.abort_write_group.
+ """
+ if self._real_repository:
+ self._ensure_real()
+ return self._real_repository.abort_write_group(
+ suppress_errors=suppress_errors)
+ if not self.is_in_write_group():
+ if suppress_errors:
+ mutter('(suppressed) not in write group')
+ return
+ raise errors.BzrError("not in write group")
+ path = self.bzrdir._path_for_remote_call(self._client)
+ try:
+ response = self._call('Repository.abort_write_group', path,
+ self._lock_token, self._write_group_tokens)
+ except Exception, exc:
+ self._write_group = None
+ if not suppress_errors:
+ raise
+ mutter('abort_write_group failed')
+ log_exception_quietly()
+ note(gettext('bzr: ERROR (ignored): %s'), exc)
+ else:
+ if response != ('ok', ):
+ raise errors.UnexpectedSmartServerResponse(response)
+ self._write_group_tokens = None
+
+ @property
+ def chk_bytes(self):
+ """Decorate the real repository for now.
+
+ In the long term a full blown network facility is needed to avoid
+ creating a real repository object locally.
+ """
+ self._ensure_real()
+ return self._real_repository.chk_bytes
+
+ def commit_write_group(self):
+ """Complete a write group on the decorated repository.
+
+ Smart methods perform operations in a single step so this API
+ is not really applicable except as a compatibility thunk
+ for older plugins that don't use e.g. the CommitBuilder
+ facility.
+ """
+ if self._real_repository:
+ self._ensure_real()
+ return self._real_repository.commit_write_group()
+ if not self.is_in_write_group():
+ raise errors.BzrError("not in write group")
+ path = self.bzrdir._path_for_remote_call(self._client)
+ response = self._call('Repository.commit_write_group', path,
+ self._lock_token, self._write_group_tokens)
+ if response != ('ok', ):
+ raise errors.UnexpectedSmartServerResponse(response)
+ self._write_group_tokens = None
+ # Refresh data after writing to the repository.
+ self.refresh_data()
+
+ def resume_write_group(self, tokens):
+ if self._real_repository:
+ return self._real_repository.resume_write_group(tokens)
+ path = self.bzrdir._path_for_remote_call(self._client)
+ try:
+ response = self._call('Repository.check_write_group', path,
+ self._lock_token, tokens)
+ except errors.UnknownSmartMethod:
+ self._ensure_real()
+ return self._real_repository.resume_write_group(tokens)
+ if response != ('ok', ):
+ raise errors.UnexpectedSmartServerResponse(response)
+ self._write_group_tokens = tokens
+
+ def suspend_write_group(self):
+ if self._real_repository:
+ return self._real_repository.suspend_write_group()
+ ret = self._write_group_tokens or []
+ self._write_group_tokens = None
+ return ret
+
+ def get_missing_parent_inventories(self, check_for_missing_texts=True):
+ self._ensure_real()
+ return self._real_repository.get_missing_parent_inventories(
+ check_for_missing_texts=check_for_missing_texts)
+
+ def _get_rev_id_for_revno_vfs(self, revno, known_pair):
+ self._ensure_real()
+ return self._real_repository.get_rev_id_for_revno(
+ revno, known_pair)
+
+ def get_rev_id_for_revno(self, revno, known_pair):
+ """See Repository.get_rev_id_for_revno."""
+ path = self.bzrdir._path_for_remote_call(self._client)
+ try:
+ if self._client._medium._is_remote_before((1, 17)):
+ return self._get_rev_id_for_revno_vfs(revno, known_pair)
+ response = self._call(
+ 'Repository.get_rev_id_for_revno', path, revno, known_pair)
+ except errors.UnknownSmartMethod:
+ self._client._medium._remember_remote_is_before((1, 17))
+ return self._get_rev_id_for_revno_vfs(revno, known_pair)
+ if response[0] == 'ok':
+ return True, response[1]
+ elif response[0] == 'history-incomplete':
+ known_pair = response[1:3]
+ for fallback in self._fallback_repositories:
+ found, result = fallback.get_rev_id_for_revno(revno, known_pair)
+ if found:
+ return True, result
+ else:
+ known_pair = result
+ # Not found in any fallbacks
+ return False, known_pair
+ else:
+ raise errors.UnexpectedSmartServerResponse(response)
+
+ def _ensure_real(self):
+ """Ensure that there is a _real_repository set.
+
+ Used before calls to self._real_repository.
+
+ Note that _ensure_real causes many roundtrips to the server which are
+ not desirable, and prevents the use of smart one-roundtrip RPC's to
+ perform complex operations (such as accessing parent data, streaming
+ revisions etc). Adding calls to _ensure_real should only be done when
+ bringing up new functionality, adding fallbacks for smart methods that
+ require a fallback path, and never to replace an existing smart method
+ invocation. If in doubt chat to the bzr network team.
+ """
+ if self._real_repository is None:
+ if 'hpssvfs' in debug.debug_flags:
+ import traceback
+ warning('VFS Repository access triggered\n%s',
+ ''.join(traceback.format_stack()))
+ self._unstacked_provider.missing_keys.clear()
+ self.bzrdir._ensure_real()
+ self._set_real_repository(
+ self.bzrdir._real_bzrdir.open_repository())
+
+ def _translate_error(self, err, **context):
+ self.bzrdir._translate_error(err, repository=self, **context)
+
+ def find_text_key_references(self):
+ """Find the text key references within the repository.
+
+ :return: A dictionary mapping text keys ((fileid, revision_id) tuples)
+ to whether they were referred to by the inventory of the
+ revision_id that they contain. The inventory texts from all present
+ revision ids are assessed to generate this report.
+ """
+ self._ensure_real()
+ return self._real_repository.find_text_key_references()
+
+ def _generate_text_key_index(self):
+ """Generate a new text key index for the repository.
+
+ This is an expensive function that will take considerable time to run.
+
+ :return: A dict mapping (file_id, revision_id) tuples to a list of
+ parents, also (file_id, revision_id) tuples.
+ """
+ self._ensure_real()
+ return self._real_repository._generate_text_key_index()
+
+ def _get_revision_graph(self, revision_id):
+ """Private method for using with old (< 1.2) servers to fallback."""
+ if revision_id is None:
+ revision_id = ''
+ elif _mod_revision.is_null(revision_id):
+ return {}
+
+ path = self.bzrdir._path_for_remote_call(self._client)
+ response = self._call_expecting_body(
+ 'Repository.get_revision_graph', path, revision_id)
+ response_tuple, response_handler = response
+ if response_tuple[0] != 'ok':
+ raise errors.UnexpectedSmartServerResponse(response_tuple)
+ coded = response_handler.read_body_bytes()
+ if coded == '':
+ # no revisions in this repository!
+ return {}
+ lines = coded.split('\n')
+ revision_graph = {}
+ for line in lines:
+ d = tuple(line.split())
+ revision_graph[d[0]] = d[1:]
+
+ return revision_graph
+
+ def _get_sink(self):
+ """See Repository._get_sink()."""
+ return RemoteStreamSink(self)
+
+ def _get_source(self, to_format):
+ """Return a source for streaming from this repository."""
+ return RemoteStreamSource(self, to_format)
+
+ @needs_read_lock
+ def get_file_graph(self):
+ return graph.Graph(self.texts)
+
+ @needs_read_lock
+ def has_revision(self, revision_id):
+ """True if this repository has a copy of the revision."""
+ # Copy of bzrlib.repository.Repository.has_revision
+ return revision_id in self.has_revisions((revision_id,))
+
+ @needs_read_lock
+ def has_revisions(self, revision_ids):
+ """Probe to find out the presence of multiple revisions.
+
+ :param revision_ids: An iterable of revision_ids.
+ :return: A set of the revision_ids that were present.
+ """
+ # Copy of bzrlib.repository.Repository.has_revisions
+ parent_map = self.get_parent_map(revision_ids)
+ result = set(parent_map)
+ if _mod_revision.NULL_REVISION in revision_ids:
+ result.add(_mod_revision.NULL_REVISION)
+ return result
+
+ def _has_same_fallbacks(self, other_repo):
+ """Returns true if the repositories have the same fallbacks."""
+ # XXX: copied from Repository; it should be unified into a base class
+ # <https://bugs.launchpad.net/bzr/+bug/401622>
+ my_fb = self._fallback_repositories
+ other_fb = other_repo._fallback_repositories
+ if len(my_fb) != len(other_fb):
+ return False
+ for f, g in zip(my_fb, other_fb):
+ if not f.has_same_location(g):
+ return False
+ return True
+
+ def has_same_location(self, other):
+ # TODO: Move to RepositoryBase and unify with the regular Repository
+ # one; unfortunately the tests rely on slightly different behaviour at
+ # present -- mbp 20090710
+ return (self.__class__ is other.__class__ and
+ self.bzrdir.transport.base == other.bzrdir.transport.base)
+
+ def get_graph(self, other_repository=None):
+ """Return the graph for this repository format"""
+ parents_provider = self._make_parents_provider(other_repository)
+ return graph.Graph(parents_provider)
+
+ @needs_read_lock
+ def get_known_graph_ancestry(self, revision_ids):
+ """Return the known graph for a set of revision ids and their ancestors.
+ """
+ st = static_tuple.StaticTuple
+ revision_keys = [st(r_id).intern() for r_id in revision_ids]
+ known_graph = self.revisions.get_known_graph_ancestry(revision_keys)
+ return graph.GraphThunkIdsToKeys(known_graph)
+
+ def gather_stats(self, revid=None, committers=None):
+ """See Repository.gather_stats()."""
+ path = self.bzrdir._path_for_remote_call(self._client)
+ # revid can be None to indicate no revisions, not just NULL_REVISION
+ if revid is None or _mod_revision.is_null(revid):
+ fmt_revid = ''
+ else:
+ fmt_revid = revid
+ if committers is None or not committers:
+ fmt_committers = 'no'
+ else:
+ fmt_committers = 'yes'
+ response_tuple, response_handler = self._call_expecting_body(
+ 'Repository.gather_stats', path, fmt_revid, fmt_committers)
+ if response_tuple[0] != 'ok':
+ raise errors.UnexpectedSmartServerResponse(response_tuple)
+
+ body = response_handler.read_body_bytes()
+ result = {}
+ for line in body.split('\n'):
+ if not line:
+ continue
+ key, val_text = line.split(':')
+ if key in ('revisions', 'size', 'committers'):
+ result[key] = int(val_text)
+ elif key in ('firstrev', 'latestrev'):
+ values = val_text.split(' ')[1:]
+ result[key] = (float(values[0]), long(values[1]))
+
+ return result
+
+ def find_branches(self, using=False):
+ """See Repository.find_branches()."""
+ # should be an API call to the server.
+ self._ensure_real()
+ return self._real_repository.find_branches(using=using)
+
+ def get_physical_lock_status(self):
+ """See Repository.get_physical_lock_status()."""
+ path = self.bzrdir._path_for_remote_call(self._client)
+ try:
+ response = self._call('Repository.get_physical_lock_status', path)
+ except errors.UnknownSmartMethod:
+ self._ensure_real()
+ return self._real_repository.get_physical_lock_status()
+ if response[0] not in ('yes', 'no'):
+ raise errors.UnexpectedSmartServerResponse(response)
+ return (response[0] == 'yes')
+
+ def is_in_write_group(self):
+ """Return True if there is an open write group.
+
+ write groups are only applicable locally for the smart server..
+ """
+ if self._write_group_tokens is not None:
+ return True
+ if self._real_repository:
+ return self._real_repository.is_in_write_group()
+
+ def is_locked(self):
+ return self._lock_count >= 1
+
+ def is_shared(self):
+ """See Repository.is_shared()."""
+ path = self.bzrdir._path_for_remote_call(self._client)
+ response = self._call('Repository.is_shared', path)
+ if response[0] not in ('yes', 'no'):
+ raise SmartProtocolError('unexpected response code %s' % (response,))
+ return response[0] == 'yes'
+
+ def is_write_locked(self):
+ return self._lock_mode == 'w'
+
+ def _warn_if_deprecated(self, branch=None):
+ # If we have a real repository, the check will be done there, if we
+ # don't the check will be done remotely.
+ pass
+
+ def lock_read(self):
+ """Lock the repository for read operations.
+
+ :return: A bzrlib.lock.LogicalLockResult.
+ """
+ # wrong eventually - want a local lock cache context
+ if not self._lock_mode:
+ self._note_lock('r')
+ self._lock_mode = 'r'
+ self._lock_count = 1
+ self._unstacked_provider.enable_cache(cache_misses=True)
+ if self._real_repository is not None:
+ self._real_repository.lock_read()
+ for repo in self._fallback_repositories:
+ repo.lock_read()
+ else:
+ self._lock_count += 1
+ return lock.LogicalLockResult(self.unlock)
+
+ def _remote_lock_write(self, token):
+ path = self.bzrdir._path_for_remote_call(self._client)
+ if token is None:
+ token = ''
+ err_context = {'token': token}
+ response = self._call('Repository.lock_write', path, token,
+ **err_context)
+ if response[0] == 'ok':
+ ok, token = response
+ return token
+ else:
+ raise errors.UnexpectedSmartServerResponse(response)
+
+ def lock_write(self, token=None, _skip_rpc=False):
+ if not self._lock_mode:
+ self._note_lock('w')
+ if _skip_rpc:
+ if self._lock_token is not None:
+ if token != self._lock_token:
+ raise errors.TokenMismatch(token, self._lock_token)
+ self._lock_token = token
+ else:
+ self._lock_token = self._remote_lock_write(token)
+ # if self._lock_token is None, then this is something like packs or
+ # svn where we don't get to lock the repo, or a weave style repository
+ # where we cannot lock it over the wire and attempts to do so will
+ # fail.
+ if self._real_repository is not None:
+ self._real_repository.lock_write(token=self._lock_token)
+ if token is not None:
+ self._leave_lock = True
+ else:
+ self._leave_lock = False
+ self._lock_mode = 'w'
+ self._lock_count = 1
+ cache_misses = self._real_repository is None
+ self._unstacked_provider.enable_cache(cache_misses=cache_misses)
+ for repo in self._fallback_repositories:
+ # Writes don't affect fallback repos
+ repo.lock_read()
+ elif self._lock_mode == 'r':
+ raise errors.ReadOnlyError(self)
+ else:
+ self._lock_count += 1
+ return RepositoryWriteLockResult(self.unlock, self._lock_token or None)
+
+ def leave_lock_in_place(self):
+ if not self._lock_token:
+ raise NotImplementedError(self.leave_lock_in_place)
+ self._leave_lock = True
+
+ def dont_leave_lock_in_place(self):
+ if not self._lock_token:
+ raise NotImplementedError(self.dont_leave_lock_in_place)
+ self._leave_lock = False
+
+ def _set_real_repository(self, repository):
+ """Set the _real_repository for this repository.
+
+ :param repository: The repository to fallback to for non-hpss
+ implemented operations.
+ """
+ if self._real_repository is not None:
+ # Replacing an already set real repository.
+ # We cannot do this [currently] if the repository is locked -
+ # synchronised state might be lost.
+ if self.is_locked():
+ raise AssertionError('_real_repository is already set')
+ if isinstance(repository, RemoteRepository):
+ raise AssertionError()
+ self._real_repository = repository
+ # three code paths happen here:
+ # 1) old servers, RemoteBranch.open() calls _ensure_real before setting
+ # up stacking. In this case self._fallback_repositories is [], and the
+ # real repo is already setup. Preserve the real repo and
+ # RemoteRepository.add_fallback_repository will avoid adding
+ # duplicates.
+ # 2) new servers, RemoteBranch.open() sets up stacking, and when
+ # ensure_real is triggered from a branch, the real repository to
+ # set already has a matching list with separate instances, but
+ # as they are also RemoteRepositories we don't worry about making the
+ # lists be identical.
+ # 3) new servers, RemoteRepository.ensure_real is triggered before
+ # RemoteBranch.ensure real, in this case we get a repo with no fallbacks
+ # and need to populate it.
+ if (self._fallback_repositories and
+ len(self._real_repository._fallback_repositories) !=
+ len(self._fallback_repositories)):
+ if len(self._real_repository._fallback_repositories):
+ raise AssertionError(
+ "cannot cleanly remove existing _fallback_repositories")
+ for fb in self._fallback_repositories:
+ self._real_repository.add_fallback_repository(fb)
+ if self._lock_mode == 'w':
+ # if we are already locked, the real repository must be able to
+ # acquire the lock with our token.
+ self._real_repository.lock_write(self._lock_token)
+ elif self._lock_mode == 'r':
+ self._real_repository.lock_read()
+ if self._write_group_tokens is not None:
+ # if we are already in a write group, resume it
+ self._real_repository.resume_write_group(self._write_group_tokens)
+ self._write_group_tokens = None
+
+ def start_write_group(self):
+ """Start a write group on the decorated repository.
+
+ Smart methods perform operations in a single step so this API
+ is not really applicable except as a compatibility thunk
+ for older plugins that don't use e.g. the CommitBuilder
+ facility.
+ """
+ if self._real_repository:
+ self._ensure_real()
+ return self._real_repository.start_write_group()
+ if not self.is_write_locked():
+ raise errors.NotWriteLocked(self)
+ if self._write_group_tokens is not None:
+ raise errors.BzrError('already in a write group')
+ path = self.bzrdir._path_for_remote_call(self._client)
+ try:
+ response = self._call('Repository.start_write_group', path,
+ self._lock_token)
+ except (errors.UnknownSmartMethod, errors.UnsuspendableWriteGroup):
+ self._ensure_real()
+ return self._real_repository.start_write_group()
+ if response[0] != 'ok':
+ raise errors.UnexpectedSmartServerResponse(response)
+ self._write_group_tokens = response[1]
+
+ def _unlock(self, token):
+ path = self.bzrdir._path_for_remote_call(self._client)
+ if not token:
+ # with no token the remote repository is not persistently locked.
+ return
+ err_context = {'token': token}
+ response = self._call('Repository.unlock', path, token,
+ **err_context)
+ if response == ('ok',):
+ return
+ else:
+ raise errors.UnexpectedSmartServerResponse(response)
+
+ @only_raises(errors.LockNotHeld, errors.LockBroken)
+ def unlock(self):
+ if not self._lock_count:
+ return lock.cant_unlock_not_held(self)
+ self._lock_count -= 1
+ if self._lock_count > 0:
+ return
+ self._unstacked_provider.disable_cache()
+ old_mode = self._lock_mode
+ self._lock_mode = None
+ try:
+ # The real repository is responsible at present for raising an
+ # exception if it's in an unfinished write group. However, it
+ # normally will *not* actually remove the lock from disk - that's
+ # done by the server on receiving the Repository.unlock call.
+ # This is just to let the _real_repository stay up to date.
+ if self._real_repository is not None:
+ self._real_repository.unlock()
+ elif self._write_group_tokens is not None:
+ self.abort_write_group()
+ finally:
+ # The rpc-level lock should be released even if there was a
+ # problem releasing the vfs-based lock.
+ if old_mode == 'w':
+ # Only write-locked repositories need to make a remote method
+ # call to perform the unlock.
+ old_token = self._lock_token
+ self._lock_token = None
+ if not self._leave_lock:
+ self._unlock(old_token)
+ # Fallbacks are always 'lock_read()' so we don't pay attention to
+ # self._leave_lock
+ for repo in self._fallback_repositories:
+ repo.unlock()
+
+ def break_lock(self):
+ # should hand off to the network
+ path = self.bzrdir._path_for_remote_call(self._client)
+ try:
+ response = self._call("Repository.break_lock", path)
+ except errors.UnknownSmartMethod:
+ self._ensure_real()
+ return self._real_repository.break_lock()
+ if response != ('ok',):
+ raise errors.UnexpectedSmartServerResponse(response)
+
+ def _get_tarball(self, compression):
+ """Return a TemporaryFile containing a repository tarball.
+
+ Returns None if the server does not support sending tarballs.
+ """
+ import tempfile
+ path = self.bzrdir._path_for_remote_call(self._client)
+ try:
+ response, protocol = self._call_expecting_body(
+ 'Repository.tarball', path, compression)
+ except errors.UnknownSmartMethod:
+ protocol.cancel_read_body()
+ return None
+ if response[0] == 'ok':
+ # Extract the tarball and return it
+ t = tempfile.NamedTemporaryFile()
+ # TODO: rpc layer should read directly into it...
+ t.write(protocol.read_body_bytes())
+ t.seek(0)
+ return t
+ raise errors.UnexpectedSmartServerResponse(response)
+
+ @needs_read_lock
+ def sprout(self, to_bzrdir, revision_id=None):
+ """Create a descendent repository for new development.
+
+ Unlike clone, this does not copy the settings of the repository.
+ """
+ dest_repo = self._create_sprouting_repo(to_bzrdir, shared=False)
+ dest_repo.fetch(self, revision_id=revision_id)
+ return dest_repo
+
+ def _create_sprouting_repo(self, a_bzrdir, shared):
+ if not isinstance(a_bzrdir._format, self.bzrdir._format.__class__):
+ # use target default format.
+ dest_repo = a_bzrdir.create_repository()
+ else:
+ # Most control formats need the repository to be specifically
+ # created, but on some old all-in-one formats it's not needed
+ try:
+ dest_repo = self._format.initialize(a_bzrdir, shared=shared)
+ except errors.UninitializableFormat:
+ dest_repo = a_bzrdir.open_repository()
+ return dest_repo
+
+ ### These methods are just thin shims to the VFS object for now.
+
+ @needs_read_lock
+ def revision_tree(self, revision_id):
+ revision_id = _mod_revision.ensure_null(revision_id)
+ if revision_id == _mod_revision.NULL_REVISION:
+ return InventoryRevisionTree(self,
+ Inventory(root_id=None), _mod_revision.NULL_REVISION)
+ else:
+ return list(self.revision_trees([revision_id]))[0]
+
+ def get_serializer_format(self):
+ path = self.bzrdir._path_for_remote_call(self._client)
+ try:
+ response = self._call('VersionedFileRepository.get_serializer_format',
+ path)
+ except errors.UnknownSmartMethod:
+ self._ensure_real()
+ return self._real_repository.get_serializer_format()
+ if response[0] != 'ok':
+ raise errors.UnexpectedSmartServerResponse(response)
+ return response[1]
+
+ def get_commit_builder(self, branch, parents, config, timestamp=None,
+ timezone=None, committer=None, revprops=None,
+ revision_id=None, lossy=False):
+ """Obtain a CommitBuilder for this repository.
+
+ :param branch: Branch to commit to.
+ :param parents: Revision ids of the parents of the new revision.
+ :param config: Configuration to use.
+ :param timestamp: Optional timestamp recorded for commit.
+ :param timezone: Optional timezone for timestamp.
+ :param committer: Optional committer to set for commit.
+ :param revprops: Optional dictionary of revision properties.
+ :param revision_id: Optional revision id.
+ :param lossy: Whether to discard data that can not be natively
+ represented, when pushing to a foreign VCS
+ """
+ if self._fallback_repositories and not self._format.supports_chks:
+ raise errors.BzrError("Cannot commit directly to a stacked branch"
+ " in pre-2a formats. See "
+ "https://bugs.launchpad.net/bzr/+bug/375013 for details.")
+ if self._format.rich_root_data:
+ commit_builder_kls = vf_repository.VersionedFileRootCommitBuilder
+ else:
+ commit_builder_kls = vf_repository.VersionedFileCommitBuilder
+ result = commit_builder_kls(self, parents, config,
+ timestamp, timezone, committer, revprops, revision_id,
+ lossy)
+ self.start_write_group()
+ return result
+
+ def add_fallback_repository(self, repository):
+ """Add a repository to use for looking up data not held locally.
+
+ :param repository: A repository.
+ """
+ if not self._format.supports_external_lookups:
+ raise errors.UnstackableRepositoryFormat(
+ self._format.network_name(), self.base)
+ # We need to accumulate additional repositories here, to pass them in
+ # on various RPC's.
+ #
+ # Make the check before we lock: this raises an exception.
+ self._check_fallback_repository(repository)
+ if self.is_locked():
+ # We will call fallback.unlock() when we transition to the unlocked
+ # state, so always add a lock here. If a caller passes us a locked
+ # repository, they are responsible for unlocking it later.
+ repository.lock_read()
+ self._fallback_repositories.append(repository)
+ # If self._real_repository was parameterised already (e.g. because a
+ # _real_branch had its get_stacked_on_url method called), then the
+ # repository to be added may already be in the _real_repositories list.
+ if self._real_repository is not None:
+ fallback_locations = [repo.user_url for repo in
+ self._real_repository._fallback_repositories]
+ if repository.user_url not in fallback_locations:
+ self._real_repository.add_fallback_repository(repository)
+
+ def _check_fallback_repository(self, repository):
+ """Check that this repository can fallback to repository safely.
+
+ Raise an error if not.
+
+ :param repository: A repository to fallback to.
+ """
+ return _mod_repository.InterRepository._assert_same_model(
+ self, repository)
+
+ def add_inventory(self, revid, inv, parents):
+ self._ensure_real()
+ return self._real_repository.add_inventory(revid, inv, parents)
+
+ def add_inventory_by_delta(self, basis_revision_id, delta, new_revision_id,
+ parents, basis_inv=None, propagate_caches=False):
+ self._ensure_real()
+ return self._real_repository.add_inventory_by_delta(basis_revision_id,
+ delta, new_revision_id, parents, basis_inv=basis_inv,
+ propagate_caches=propagate_caches)
+
+ def add_revision(self, revision_id, rev, inv=None):
+ _mod_revision.check_not_reserved_id(revision_id)
+ key = (revision_id,)
+ # check inventory present
+ if not self.inventories.get_parent_map([key]):
+ if inv is None:
+ raise errors.WeaveRevisionNotPresent(revision_id,
+ self.inventories)
+ else:
+ # yes, this is not suitable for adding with ghosts.
+ rev.inventory_sha1 = self.add_inventory(revision_id, inv,
+ rev.parent_ids)
+ else:
+ rev.inventory_sha1 = self.inventories.get_sha1s([key])[key]
+ self._add_revision(rev)
+
+ def _add_revision(self, rev):
+ if self._real_repository is not None:
+ return self._real_repository._add_revision(rev)
+ text = self._serializer.write_revision_to_string(rev)
+ key = (rev.revision_id,)
+ parents = tuple((parent,) for parent in rev.parent_ids)
+ self._write_group_tokens, missing_keys = self._get_sink().insert_stream(
+ [('revisions', [FulltextContentFactory(key, parents, None, text)])],
+ self._format, self._write_group_tokens)
+
+ @needs_read_lock
+ def get_inventory(self, revision_id):
+ return list(self.iter_inventories([revision_id]))[0]
+
+ def _iter_inventories_rpc(self, revision_ids, ordering):
+ if ordering is None:
+ ordering = 'unordered'
+ path = self.bzrdir._path_for_remote_call(self._client)
+ body = "\n".join(revision_ids)
+ response_tuple, response_handler = (
+ self._call_with_body_bytes_expecting_body(
+ "VersionedFileRepository.get_inventories",
+ (path, ordering), body))
+ if response_tuple[0] != "ok":
+ raise errors.UnexpectedSmartServerResponse(response_tuple)
+ deserializer = inventory_delta.InventoryDeltaDeserializer()
+ byte_stream = response_handler.read_streamed_body()
+ decoded = smart_repo._byte_stream_to_stream(byte_stream)
+ if decoded is None:
+ # no results whatsoever
+ return
+ src_format, stream = decoded
+ if src_format.network_name() != self._format.network_name():
+ raise AssertionError(
+ "Mismatched RemoteRepository and stream src %r, %r" % (
+ src_format.network_name(), self._format.network_name()))
+ # ignore the src format, it's not really relevant
+ prev_inv = Inventory(root_id=None,
+ revision_id=_mod_revision.NULL_REVISION)
+ # there should be just one substream, with inventory deltas
+ substream_kind, substream = stream.next()
+ if substream_kind != "inventory-deltas":
+ raise AssertionError(
+ "Unexpected stream %r received" % substream_kind)
+ for record in substream:
+ (parent_id, new_id, versioned_root, tree_references, invdelta) = (
+ deserializer.parse_text_bytes(record.get_bytes_as("fulltext")))
+ if parent_id != prev_inv.revision_id:
+ raise AssertionError("invalid base %r != %r" % (parent_id,
+ prev_inv.revision_id))
+ inv = prev_inv.create_by_apply_delta(invdelta, new_id)
+ yield inv, inv.revision_id
+ prev_inv = inv
+
+ def _iter_inventories_vfs(self, revision_ids, ordering=None):
+ self._ensure_real()
+ return self._real_repository._iter_inventories(revision_ids, ordering)
+
+ def iter_inventories(self, revision_ids, ordering=None):
+ """Get many inventories by revision_ids.
+
+ This will buffer some or all of the texts used in constructing the
+ inventories in memory, but will only parse a single inventory at a
+ time.
+
+ :param revision_ids: The expected revision ids of the inventories.
+ :param ordering: optional ordering, e.g. 'topological'. If not
+ specified, the order of revision_ids will be preserved (by
+ buffering if necessary).
+ :return: An iterator of inventories.
+ """
+ if ((None in revision_ids)
+ or (_mod_revision.NULL_REVISION in revision_ids)):
+ raise ValueError('cannot get null revision inventory')
+ for inv, revid in self._iter_inventories(revision_ids, ordering):
+ if inv is None:
+ raise errors.NoSuchRevision(self, revid)
+ yield inv
+
+ def _iter_inventories(self, revision_ids, ordering=None):
+ if len(revision_ids) == 0:
+ return
+ missing = set(revision_ids)
+ if ordering is None:
+ order_as_requested = True
+ invs = {}
+ order = list(revision_ids)
+ order.reverse()
+ next_revid = order.pop()
+ else:
+ order_as_requested = False
+ if ordering != 'unordered' and self._fallback_repositories:
+ raise ValueError('unsupported ordering %r' % ordering)
+ iter_inv_fns = [self._iter_inventories_rpc] + [
+ fallback._iter_inventories for fallback in
+ self._fallback_repositories]
+ try:
+ for iter_inv in iter_inv_fns:
+ request = [revid for revid in revision_ids if revid in missing]
+ for inv, revid in iter_inv(request, ordering):
+ if inv is None:
+ continue
+ missing.remove(inv.revision_id)
+ if ordering != 'unordered':
+ invs[revid] = inv
+ else:
+ yield inv, revid
+ if order_as_requested:
+ # Yield as many results as we can while preserving order.
+ while next_revid in invs:
+ inv = invs.pop(next_revid)
+ yield inv, inv.revision_id
+ try:
+ next_revid = order.pop()
+ except IndexError:
+ # We still want to fully consume the stream, just
+ # in case it is not actually finished at this point
+ next_revid = None
+ break
+ except errors.UnknownSmartMethod:
+ for inv, revid in self._iter_inventories_vfs(revision_ids, ordering):
+ yield inv, revid
+ return
+ # Report missing
+ if order_as_requested:
+ if next_revid is not None:
+ yield None, next_revid
+ while order:
+ revid = order.pop()
+ yield invs.get(revid), revid
+ else:
+ while missing:
+ yield None, missing.pop()
+
+ @needs_read_lock
+ def get_revision(self, revision_id):
+ return self.get_revisions([revision_id])[0]
+
+ def get_transaction(self):
+ self._ensure_real()
+ return self._real_repository.get_transaction()
+
+ @needs_read_lock
+ def clone(self, a_bzrdir, revision_id=None):
+ dest_repo = self._create_sprouting_repo(
+ a_bzrdir, shared=self.is_shared())
+ self.copy_content_into(dest_repo, revision_id)
+ return dest_repo
+
+ def make_working_trees(self):
+ """See Repository.make_working_trees"""
+ path = self.bzrdir._path_for_remote_call(self._client)
+ try:
+ response = self._call('Repository.make_working_trees', path)
+ except errors.UnknownSmartMethod:
+ self._ensure_real()
+ return self._real_repository.make_working_trees()
+ if response[0] not in ('yes', 'no'):
+ raise SmartProtocolError('unexpected response code %s' % (response,))
+ return response[0] == 'yes'
+
+ def refresh_data(self):
+ """Re-read any data needed to synchronise with disk.
+
+ This method is intended to be called after another repository instance
+ (such as one used by a smart server) has inserted data into the
+ repository. On all repositories this will work outside of write groups.
+ Some repository formats (pack and newer for bzrlib native formats)
+ support refresh_data inside write groups. If called inside a write
+ group on a repository that does not support refreshing in a write group
+ IsInWriteGroupError will be raised.
+ """
+ if self._real_repository is not None:
+ self._real_repository.refresh_data()
+ # Refresh the parents cache for this object
+ self._unstacked_provider.disable_cache()
+ self._unstacked_provider.enable_cache()
+
+ def revision_ids_to_search_result(self, result_set):
+ """Convert a set of revision ids to a graph SearchResult."""
+ result_parents = set()
+ for parents in self.get_graph().get_parent_map(
+ result_set).itervalues():
+ result_parents.update(parents)
+ included_keys = result_set.intersection(result_parents)
+ start_keys = result_set.difference(included_keys)
+ exclude_keys = result_parents.difference(result_set)
+ result = vf_search.SearchResult(start_keys, exclude_keys,
+ len(result_set), result_set)
+ return result
+
+ @needs_read_lock
+ def search_missing_revision_ids(self, other,
+ revision_id=symbol_versioning.DEPRECATED_PARAMETER,
+ find_ghosts=True, revision_ids=None, if_present_ids=None,
+ limit=None):
+ """Return the revision ids that other has that this does not.
+
+ These are returned in topological order.
+
+ revision_id: only return revision ids included by revision_id.
+ """
+ if symbol_versioning.deprecated_passed(revision_id):
+ symbol_versioning.warn(
+ 'search_missing_revision_ids(revision_id=...) was '
+ 'deprecated in 2.4. Use revision_ids=[...] instead.',
+ DeprecationWarning, stacklevel=2)
+ if revision_ids is not None:
+ raise AssertionError(
+ 'revision_ids is mutually exclusive with revision_id')
+ if revision_id is not None:
+ revision_ids = [revision_id]
+ inter_repo = _mod_repository.InterRepository.get(other, self)
+ return inter_repo.search_missing_revision_ids(
+ find_ghosts=find_ghosts, revision_ids=revision_ids,
+ if_present_ids=if_present_ids, limit=limit)
+
+ def fetch(self, source, revision_id=None, find_ghosts=False,
+ fetch_spec=None):
+ # No base implementation to use as RemoteRepository is not a subclass
+ # of Repository; so this is a copy of Repository.fetch().
+ if fetch_spec is not None and revision_id is not None:
+ raise AssertionError(
+ "fetch_spec and revision_id are mutually exclusive.")
+ if self.is_in_write_group():
+ raise errors.InternalBzrError(
+ "May not fetch while in a write group.")
+ # fast path same-url fetch operations
+ if (self.has_same_location(source)
+ and fetch_spec is None
+ and self._has_same_fallbacks(source)):
+ # check that last_revision is in 'from' and then return a
+ # no-operation.
+ if (revision_id is not None and
+ not _mod_revision.is_null(revision_id)):
+ self.get_revision(revision_id)
+ return 0, []
+ # if there is no specific appropriate InterRepository, this will get
+ # the InterRepository base class, which raises an
+ # IncompatibleRepositories when asked to fetch.
+ inter = _mod_repository.InterRepository.get(source, self)
+ if (fetch_spec is not None and
+ not getattr(inter, "supports_fetch_spec", False)):
+ raise errors.UnsupportedOperation(
+ "fetch_spec not supported for %r" % inter)
+ return inter.fetch(revision_id=revision_id,
+ find_ghosts=find_ghosts, fetch_spec=fetch_spec)
+
+ def create_bundle(self, target, base, fileobj, format=None):
+ self._ensure_real()
+ self._real_repository.create_bundle(target, base, fileobj, format)
+
+ def fileids_altered_by_revision_ids(self, revision_ids):
+ self._ensure_real()
+ return self._real_repository.fileids_altered_by_revision_ids(revision_ids)
+
+ def _get_versioned_file_checker(self, revisions, revision_versions_cache):
+ self._ensure_real()
+ return self._real_repository._get_versioned_file_checker(
+ revisions, revision_versions_cache)
+
+ def _iter_files_bytes_rpc(self, desired_files, absent):
+ path = self.bzrdir._path_for_remote_call(self._client)
+ lines = []
+ identifiers = []
+ for (file_id, revid, identifier) in desired_files:
+ lines.append("%s\0%s" % (
+ osutils.safe_file_id(file_id),
+ osutils.safe_revision_id(revid)))
+ identifiers.append(identifier)
+ (response_tuple, response_handler) = (
+ self._call_with_body_bytes_expecting_body(
+ "Repository.iter_files_bytes", (path, ), "\n".join(lines)))
+ if response_tuple != ('ok', ):
+ response_handler.cancel_read_body()
+ raise errors.UnexpectedSmartServerResponse(response_tuple)
+ byte_stream = response_handler.read_streamed_body()
+ def decompress_stream(start, byte_stream, unused):
+ decompressor = zlib.decompressobj()
+ yield decompressor.decompress(start)
+ while decompressor.unused_data == "":
+ try:
+ data = byte_stream.next()
+ except StopIteration:
+ break
+ yield decompressor.decompress(data)
+ yield decompressor.flush()
+ unused.append(decompressor.unused_data)
+ unused = ""
+ while True:
+ while not "\n" in unused:
+ unused += byte_stream.next()
+ header, rest = unused.split("\n", 1)
+ args = header.split("\0")
+ if args[0] == "absent":
+ absent[identifiers[int(args[3])]] = (args[1], args[2])
+ unused = rest
+ continue
+ elif args[0] == "ok":
+ idx = int(args[1])
+ else:
+ raise errors.UnexpectedSmartServerResponse(args)
+ unused_chunks = []
+ yield (identifiers[idx],
+ decompress_stream(rest, byte_stream, unused_chunks))
+ unused = "".join(unused_chunks)
+
+ def iter_files_bytes(self, desired_files):
+ """See Repository.iter_file_bytes.
+ """
+ try:
+ absent = {}
+ for (identifier, bytes_iterator) in self._iter_files_bytes_rpc(
+ desired_files, absent):
+ yield identifier, bytes_iterator
+ for fallback in self._fallback_repositories:
+ if not absent:
+ break
+ desired_files = [(key[0], key[1], identifier) for
+ (identifier, key) in absent.iteritems()]
+ for (identifier, bytes_iterator) in fallback.iter_files_bytes(desired_files):
+ del absent[identifier]
+ yield identifier, bytes_iterator
+ if absent:
+ # There may be more missing items, but raise an exception
+ # for just one.
+ missing_identifier = absent.keys()[0]
+ missing_key = absent[missing_identifier]
+ raise errors.RevisionNotPresent(revision_id=missing_key[1],
+ file_id=missing_key[0])
+ except errors.UnknownSmartMethod:
+ self._ensure_real()
+ for (identifier, bytes_iterator) in (
+ self._real_repository.iter_files_bytes(desired_files)):
+ yield identifier, bytes_iterator
+
+ def get_cached_parent_map(self, revision_ids):
+ """See bzrlib.CachingParentsProvider.get_cached_parent_map"""
+ return self._unstacked_provider.get_cached_parent_map(revision_ids)
+
+ def get_parent_map(self, revision_ids):
+ """See bzrlib.Graph.get_parent_map()."""
+ return self._make_parents_provider().get_parent_map(revision_ids)
+
+ def _get_parent_map_rpc(self, keys):
+ """Helper for get_parent_map that performs the RPC."""
+ medium = self._client._medium
+ if medium._is_remote_before((1, 2)):
+ # We already found out that the server can't understand
+ # Repository.get_parent_map requests, so just fetch the whole
+ # graph.
+ #
+ # Note that this reads the whole graph, when only some keys are
+ # wanted. On this old server there's no way (?) to get them all
+ # in one go, and the user probably will have seen a warning about
+ # the server being old anyhow.
+ rg = self._get_revision_graph(None)
+ # There is an API discrepancy between get_parent_map and
+ # get_revision_graph. Specifically, a "key:()" pair in
+ # get_revision_graph just means a node has no parents. For
+ # "get_parent_map" it means the node is a ghost. So fix up the
+ # graph to correct this.
+ # https://bugs.launchpad.net/bzr/+bug/214894
+ # There is one other "bug" which is that ghosts in
+ # get_revision_graph() are not returned at all. But we won't worry
+ # about that for now.
+ for node_id, parent_ids in rg.iteritems():
+ if parent_ids == ():
+ rg[node_id] = (NULL_REVISION,)
+ rg[NULL_REVISION] = ()
+ return rg
+
+ keys = set(keys)
+ if None in keys:
+ raise ValueError('get_parent_map(None) is not valid')
+ if NULL_REVISION in keys:
+ keys.discard(NULL_REVISION)
+ found_parents = {NULL_REVISION:()}
+ if not keys:
+ return found_parents
+ else:
+ found_parents = {}
+ # TODO(Needs analysis): We could assume that the keys being requested
+ # from get_parent_map are in a breadth first search, so typically they
+ # will all be depth N from some common parent, and we don't have to
+ # have the server iterate from the root parent, but rather from the
+ # keys we're searching; and just tell the server the keyspace we
+ # already have; but this may be more traffic again.
+
+ # Transform self._parents_map into a search request recipe.
+ # TODO: Manage this incrementally to avoid covering the same path
+ # repeatedly. (The server will have to on each request, but the less
+ # work done the better).
+ #
+ # Negative caching notes:
+ # new server sends missing when a request including the revid
+ # 'include-missing:' is present in the request.
+ # missing keys are serialised as missing:X, and we then call
+ # provider.note_missing(X) for-all X
+ parents_map = self._unstacked_provider.get_cached_map()
+ if parents_map is None:
+ # Repository is not locked, so there's no cache.
+ parents_map = {}
+ if _DEFAULT_SEARCH_DEPTH <= 0:
+ (start_set, stop_keys,
+ key_count) = vf_search.search_result_from_parent_map(
+ parents_map, self._unstacked_provider.missing_keys)
+ else:
+ (start_set, stop_keys,
+ key_count) = vf_search.limited_search_result_from_parent_map(
+ parents_map, self._unstacked_provider.missing_keys,
+ keys, depth=_DEFAULT_SEARCH_DEPTH)
+ recipe = ('manual', start_set, stop_keys, key_count)
+ body = self._serialise_search_recipe(recipe)
+ path = self.bzrdir._path_for_remote_call(self._client)
+ for key in keys:
+ if type(key) is not str:
+ raise ValueError(
+ "key %r not a plain string" % (key,))
+ verb = 'Repository.get_parent_map'
+ args = (path, 'include-missing:') + tuple(keys)
+ try:
+ response = self._call_with_body_bytes_expecting_body(
+ verb, args, body)
+ except errors.UnknownSmartMethod:
+ # Server does not support this method, so get the whole graph.
+ # Worse, we have to force a disconnection, because the server now
+ # doesn't realise it has a body on the wire to consume, so the
+ # only way to recover is to abandon the connection.
+ warning(
+ 'Server is too old for fast get_parent_map, reconnecting. '
+ '(Upgrade the server to Bazaar 1.2 to avoid this)')
+ medium.disconnect()
+ # To avoid having to disconnect repeatedly, we keep track of the
+ # fact the server doesn't understand remote methods added in 1.2.
+ medium._remember_remote_is_before((1, 2))
+ # Recurse just once and we should use the fallback code.
+ return self._get_parent_map_rpc(keys)
+ response_tuple, response_handler = response
+ if response_tuple[0] not in ['ok']:
+ response_handler.cancel_read_body()
+ raise errors.UnexpectedSmartServerResponse(response_tuple)
+ if response_tuple[0] == 'ok':
+ coded = bz2.decompress(response_handler.read_body_bytes())
+ if coded == '':
+ # no revisions found
+ return {}
+ lines = coded.split('\n')
+ revision_graph = {}
+ for line in lines:
+ d = tuple(line.split())
+ if len(d) > 1:
+ revision_graph[d[0]] = d[1:]
+ else:
+ # No parents:
+ if d[0].startswith('missing:'):
+ revid = d[0][8:]
+ self._unstacked_provider.note_missing_key(revid)
+ else:
+ # no parents - so give the Graph result
+ # (NULL_REVISION,).
+ revision_graph[d[0]] = (NULL_REVISION,)
+ return revision_graph
+
+ @needs_read_lock
+ def get_signature_text(self, revision_id):
+ path = self.bzrdir._path_for_remote_call(self._client)
+ try:
+ response_tuple, response_handler = self._call_expecting_body(
+ 'Repository.get_revision_signature_text', path, revision_id)
+ except errors.UnknownSmartMethod:
+ self._ensure_real()
+ return self._real_repository.get_signature_text(revision_id)
+ except errors.NoSuchRevision, err:
+ for fallback in self._fallback_repositories:
+ try:
+ return fallback.get_signature_text(revision_id)
+ except errors.NoSuchRevision:
+ pass
+ raise err
+ else:
+ if response_tuple[0] != 'ok':
+ raise errors.UnexpectedSmartServerResponse(response_tuple)
+ return response_handler.read_body_bytes()
+
+ @needs_read_lock
+ def _get_inventory_xml(self, revision_id):
+ # This call is used by older working tree formats,
+ # which stored a serialized basis inventory.
+ self._ensure_real()
+ return self._real_repository._get_inventory_xml(revision_id)
+
+ @needs_write_lock
+ def reconcile(self, other=None, thorough=False):
+ from bzrlib.reconcile import RepoReconciler
+ path = self.bzrdir._path_for_remote_call(self._client)
+ try:
+ response, handler = self._call_expecting_body(
+ 'Repository.reconcile', path, self._lock_token)
+ except (errors.UnknownSmartMethod, errors.TokenLockingNotSupported):
+ self._ensure_real()
+ return self._real_repository.reconcile(other=other, thorough=thorough)
+ if response != ('ok', ):
+ raise errors.UnexpectedSmartServerResponse(response)
+ body = handler.read_body_bytes()
+ result = RepoReconciler(self)
+ for line in body.split('\n'):
+ if not line:
+ continue
+ key, val_text = line.split(':')
+ if key == "garbage_inventories":
+ result.garbage_inventories = int(val_text)
+ elif key == "inconsistent_parents":
+ result.inconsistent_parents = int(val_text)
+ else:
+ mutter("unknown reconcile key %r" % key)
+ return result
+
+ def all_revision_ids(self):
+ path = self.bzrdir._path_for_remote_call(self._client)
+ try:
+ response_tuple, response_handler = self._call_expecting_body(
+ "Repository.all_revision_ids", path)
+ except errors.UnknownSmartMethod:
+ self._ensure_real()
+ return self._real_repository.all_revision_ids()
+ if response_tuple != ("ok", ):
+ raise errors.UnexpectedSmartServerResponse(response_tuple)
+ revids = set(response_handler.read_body_bytes().splitlines())
+ for fallback in self._fallback_repositories:
+ revids.update(set(fallback.all_revision_ids()))
+ return list(revids)
+
+ def _filtered_revision_trees(self, revision_ids, file_ids):
+ """Return Tree for a revision on this branch with only some files.
+
+ :param revision_ids: a sequence of revision-ids;
+ a revision-id may not be None or 'null:'
+ :param file_ids: if not None, the result is filtered
+ so that only those file-ids, their parents and their
+ children are included.
+ """
+ inventories = self.iter_inventories(revision_ids)
+ for inv in inventories:
+ # Should we introduce a FilteredRevisionTree class rather
+ # than pre-filter the inventory here?
+ filtered_inv = inv.filter(file_ids)
+ yield InventoryRevisionTree(self, filtered_inv, filtered_inv.revision_id)
+
+ @needs_read_lock
+ def get_deltas_for_revisions(self, revisions, specific_fileids=None):
+ medium = self._client._medium
+ if medium._is_remote_before((1, 2)):
+ self._ensure_real()
+ for delta in self._real_repository.get_deltas_for_revisions(
+ revisions, specific_fileids):
+ yield delta
+ return
+ # Get the revision-ids of interest
+ required_trees = set()
+ for revision in revisions:
+ required_trees.add(revision.revision_id)
+ required_trees.update(revision.parent_ids[:1])
+
+ # Get the matching filtered trees. Note that it's more
+ # efficient to pass filtered trees to changes_from() rather
+ # than doing the filtering afterwards. changes_from() could
+ # arguably do the filtering itself but it's path-based, not
+ # file-id based, so filtering before or afterwards is
+ # currently easier.
+ if specific_fileids is None:
+ trees = dict((t.get_revision_id(), t) for
+ t in self.revision_trees(required_trees))
+ else:
+ trees = dict((t.get_revision_id(), t) for
+ t in self._filtered_revision_trees(required_trees,
+ specific_fileids))
+
+ # Calculate the deltas
+ for revision in revisions:
+ if not revision.parent_ids:
+ old_tree = self.revision_tree(_mod_revision.NULL_REVISION)
+ else:
+ old_tree = trees[revision.parent_ids[0]]
+ yield trees[revision.revision_id].changes_from(old_tree)
+
+ @needs_read_lock
+ def get_revision_delta(self, revision_id, specific_fileids=None):
+ r = self.get_revision(revision_id)
+ return list(self.get_deltas_for_revisions([r],
+ specific_fileids=specific_fileids))[0]
+
+ @needs_read_lock
+ def revision_trees(self, revision_ids):
+ inventories = self.iter_inventories(revision_ids)
+ for inv in inventories:
+ yield InventoryRevisionTree(self, inv, inv.revision_id)
+
+ @needs_read_lock
+ def get_revision_reconcile(self, revision_id):
+ self._ensure_real()
+ return self._real_repository.get_revision_reconcile(revision_id)
+
+ @needs_read_lock
+ def check(self, revision_ids=None, callback_refs=None, check_repo=True):
+ self._ensure_real()
+ return self._real_repository.check(revision_ids=revision_ids,
+ callback_refs=callback_refs, check_repo=check_repo)
+
+ def copy_content_into(self, destination, revision_id=None):
+ """Make a complete copy of the content in self into destination.
+
+ This is a destructive operation! Do not use it on existing
+ repositories.
+ """
+ interrepo = _mod_repository.InterRepository.get(self, destination)
+ return interrepo.copy_content(revision_id)
+
+ def _copy_repository_tarball(self, to_bzrdir, revision_id=None):
+ # get a tarball of the remote repository, and copy from that into the
+ # destination
+ import tarfile
+ # TODO: Maybe a progress bar while streaming the tarball?
+ note(gettext("Copying repository content as tarball..."))
+ tar_file = self._get_tarball('bz2')
+ if tar_file is None:
+ return None
+ destination = to_bzrdir.create_repository()
+ try:
+ tar = tarfile.open('repository', fileobj=tar_file,
+ mode='r|bz2')
+ tmpdir = osutils.mkdtemp()
+ try:
+ _extract_tar(tar, tmpdir)
+ tmp_bzrdir = _mod_bzrdir.BzrDir.open(tmpdir)
+ tmp_repo = tmp_bzrdir.open_repository()
+ tmp_repo.copy_content_into(destination, revision_id)
+ finally:
+ osutils.rmtree(tmpdir)
+ finally:
+ tar_file.close()
+ return destination
+ # TODO: Suggestion from john: using external tar is much faster than
+ # python's tarfile library, but it may not work on windows.
+
+ @property
+ def inventories(self):
+ """Decorate the real repository for now.
+
+ In the long term a full blown network facility is needed to
+ avoid creating a real repository object locally.
+ """
+ self._ensure_real()
+ return self._real_repository.inventories
+
+ @needs_write_lock
+ def pack(self, hint=None, clean_obsolete_packs=False):
+ """Compress the data within the repository.
+ """
+ if hint is None:
+ body = ""
+ else:
+ body = "".join([l+"\n" for l in hint])
+ path = self.bzrdir._path_for_remote_call(self._client)
+ try:
+ response, handler = self._call_with_body_bytes_expecting_body(
+ 'Repository.pack', (path, self._lock_token,
+ str(clean_obsolete_packs)), body)
+ except errors.UnknownSmartMethod:
+ self._ensure_real()
+ return self._real_repository.pack(hint=hint,
+ clean_obsolete_packs=clean_obsolete_packs)
+ handler.cancel_read_body()
+ if response != ('ok', ):
+ raise errors.UnexpectedSmartServerResponse(response)
+
+ @property
+ def revisions(self):
+ """Decorate the real repository for now.
+
+ In the long term a full blown network facility is needed.
+ """
+ self._ensure_real()
+ return self._real_repository.revisions
+
+ def set_make_working_trees(self, new_value):
+ if new_value:
+ new_value_str = "True"
+ else:
+ new_value_str = "False"
+ path = self.bzrdir._path_for_remote_call(self._client)
+ try:
+ response = self._call(
+ 'Repository.set_make_working_trees', path, new_value_str)
+ except errors.UnknownSmartMethod:
+ self._ensure_real()
+ self._real_repository.set_make_working_trees(new_value)
+ else:
+ if response[0] != 'ok':
+ raise errors.UnexpectedSmartServerResponse(response)
+
+ @property
+ def signatures(self):
+ """Decorate the real repository for now.
+
+ In the long term a full blown network facility is needed to avoid
+ creating a real repository object locally.
+ """
+ self._ensure_real()
+ return self._real_repository.signatures
+
+ @needs_write_lock
+ def sign_revision(self, revision_id, gpg_strategy):
+ testament = _mod_testament.Testament.from_revision(self, revision_id)
+ plaintext = testament.as_short_text()
+ self.store_revision_signature(gpg_strategy, plaintext, revision_id)
+
+ @property
+ def texts(self):
+ """Decorate the real repository for now.
+
+ In the long term a full blown network facility is needed to avoid
+ creating a real repository object locally.
+ """
+ self._ensure_real()
+ return self._real_repository.texts
+
+ def _iter_revisions_rpc(self, revision_ids):
+ body = "\n".join(revision_ids)
+ path = self.bzrdir._path_for_remote_call(self._client)
+ response_tuple, response_handler = (
+ self._call_with_body_bytes_expecting_body(
+ "Repository.iter_revisions", (path, ), body))
+ if response_tuple[0] != "ok":
+ raise errors.UnexpectedSmartServerResponse(response_tuple)
+ serializer_format = response_tuple[1]
+ serializer = serializer_format_registry.get(serializer_format)
+ byte_stream = response_handler.read_streamed_body()
+ decompressor = zlib.decompressobj()
+ chunks = []
+ for bytes in byte_stream:
+ chunks.append(decompressor.decompress(bytes))
+ if decompressor.unused_data != "":
+ chunks.append(decompressor.flush())
+ yield serializer.read_revision_from_string("".join(chunks))
+ unused = decompressor.unused_data
+ decompressor = zlib.decompressobj()
+ chunks = [decompressor.decompress(unused)]
+ chunks.append(decompressor.flush())
+ text = "".join(chunks)
+ if text != "":
+ yield serializer.read_revision_from_string("".join(chunks))
+
+ @needs_read_lock
+ def get_revisions(self, revision_ids):
+ if revision_ids is None:
+ revision_ids = self.all_revision_ids()
+ else:
+ for rev_id in revision_ids:
+ if not rev_id or not isinstance(rev_id, basestring):
+ raise errors.InvalidRevisionId(
+ revision_id=rev_id, branch=self)
+ try:
+ missing = set(revision_ids)
+ revs = {}
+ for rev in self._iter_revisions_rpc(revision_ids):
+ missing.remove(rev.revision_id)
+ revs[rev.revision_id] = rev
+ except errors.UnknownSmartMethod:
+ self._ensure_real()
+ return self._real_repository.get_revisions(revision_ids)
+ for fallback in self._fallback_repositories:
+ if not missing:
+ break
+ for revid in list(missing):
+ # XXX JRV 2011-11-20: It would be nice if there was a
+ # public method on Repository that could be used to query
+ # for revision objects *without* failing completely if one
+ # was missing. There is VersionedFileRepository._iter_revisions,
+ # but unfortunately that's private and not provided by
+ # all repository implementations.
+ try:
+ revs[revid] = fallback.get_revision(revid)
+ except errors.NoSuchRevision:
+ pass
+ else:
+ missing.remove(revid)
+ if missing:
+ raise errors.NoSuchRevision(self, list(missing)[0])
+ return [revs[revid] for revid in revision_ids]
+
+ def supports_rich_root(self):
+ return self._format.rich_root_data
+
+ @property
+ def _serializer(self):
+ return self._format._serializer
+
+ @needs_write_lock
+ def store_revision_signature(self, gpg_strategy, plaintext, revision_id):
+ signature = gpg_strategy.sign(plaintext)
+ self.add_signature_text(revision_id, signature)
+
+ def add_signature_text(self, revision_id, signature):
+ if self._real_repository:
+ # If there is a real repository the write group will
+ # be in the real repository as well, so use that:
+ self._ensure_real()
+ return self._real_repository.add_signature_text(
+ revision_id, signature)
+ path = self.bzrdir._path_for_remote_call(self._client)
+ response, handler = self._call_with_body_bytes_expecting_body(
+ 'Repository.add_signature_text', (path, self._lock_token,
+ revision_id) + tuple(self._write_group_tokens), signature)
+ handler.cancel_read_body()
+ self.refresh_data()
+ if response[0] != 'ok':
+ raise errors.UnexpectedSmartServerResponse(response)
+ self._write_group_tokens = response[1:]
+
+ def has_signature_for_revision_id(self, revision_id):
+ path = self.bzrdir._path_for_remote_call(self._client)
+ try:
+ response = self._call('Repository.has_signature_for_revision_id',
+ path, revision_id)
+ except errors.UnknownSmartMethod:
+ self._ensure_real()
+ return self._real_repository.has_signature_for_revision_id(
+ revision_id)
+ if response[0] not in ('yes', 'no'):
+ raise SmartProtocolError('unexpected response code %s' % (response,))
+ if response[0] == 'yes':
+ return True
+ for fallback in self._fallback_repositories:
+ if fallback.has_signature_for_revision_id(revision_id):
+ return True
+ return False
+
+ @needs_read_lock
+ def verify_revision_signature(self, revision_id, gpg_strategy):
+ if not self.has_signature_for_revision_id(revision_id):
+ return gpg.SIGNATURE_NOT_SIGNED, None
+ signature = self.get_signature_text(revision_id)
+
+ testament = _mod_testament.Testament.from_revision(self, revision_id)
+ plaintext = testament.as_short_text()
+
+ return gpg_strategy.verify(signature, plaintext)
+
+ def item_keys_introduced_by(self, revision_ids, _files_pb=None):
+ self._ensure_real()
+ return self._real_repository.item_keys_introduced_by(revision_ids,
+ _files_pb=_files_pb)
+
+ def _find_inconsistent_revision_parents(self, revisions_iterator=None):
+ self._ensure_real()
+ return self._real_repository._find_inconsistent_revision_parents(
+ revisions_iterator)
+
+ def _check_for_inconsistent_revision_parents(self):
+ self._ensure_real()
+ return self._real_repository._check_for_inconsistent_revision_parents()
+
+ def _make_parents_provider(self, other=None):
+ providers = [self._unstacked_provider]
+ if other is not None:
+ providers.insert(0, other)
+ return graph.StackedParentsProvider(_LazyListJoin(
+ providers, self._fallback_repositories))
+
+ def _serialise_search_recipe(self, recipe):
+ """Serialise a graph search recipe.
+
+ :param recipe: A search recipe (start, stop, count).
+ :return: Serialised bytes.
+ """
+ start_keys = ' '.join(recipe[1])
+ stop_keys = ' '.join(recipe[2])
+ count = str(recipe[3])
+ return '\n'.join((start_keys, stop_keys, count))
+
+ def _serialise_search_result(self, search_result):
+ parts = search_result.get_network_struct()
+ return '\n'.join(parts)
+
+ def autopack(self):
+ path = self.bzrdir._path_for_remote_call(self._client)
+ try:
+ response = self._call('PackRepository.autopack', path)
+ except errors.UnknownSmartMethod:
+ self._ensure_real()
+ self._real_repository._pack_collection.autopack()
+ return
+ self.refresh_data()
+ if response[0] != 'ok':
+ raise errors.UnexpectedSmartServerResponse(response)
+
+
+class RemoteStreamSink(vf_repository.StreamSink):
+
+ def _insert_real(self, stream, src_format, resume_tokens):
+ self.target_repo._ensure_real()
+ sink = self.target_repo._real_repository._get_sink()
+ result = sink.insert_stream(stream, src_format, resume_tokens)
+ if not result:
+ self.target_repo.autopack()
+ return result
+
+ def insert_stream(self, stream, src_format, resume_tokens):
+ target = self.target_repo
+ target._unstacked_provider.missing_keys.clear()
+ candidate_calls = [('Repository.insert_stream_1.19', (1, 19))]
+ if target._lock_token:
+ candidate_calls.append(('Repository.insert_stream_locked', (1, 14)))
+ lock_args = (target._lock_token or '',)
+ else:
+ candidate_calls.append(('Repository.insert_stream', (1, 13)))
+ lock_args = ()
+ client = target._client
+ medium = client._medium
+ path = target.bzrdir._path_for_remote_call(client)
+ # Probe for the verb to use with an empty stream before sending the
+ # real stream to it. We do this both to avoid the risk of sending a
+ # large request that is then rejected, and because we don't want to
+ # implement a way to buffer, rewind, or restart the stream.
+ found_verb = False
+ for verb, required_version in candidate_calls:
+ if medium._is_remote_before(required_version):
+ continue
+ if resume_tokens:
+ # We've already done the probing (and set _is_remote_before) on
+ # a previous insert.
+ found_verb = True
+ break
+ byte_stream = smart_repo._stream_to_byte_stream([], src_format)
+ try:
+ response = client.call_with_body_stream(
+ (verb, path, '') + lock_args, byte_stream)
+ except errors.UnknownSmartMethod:
+ medium._remember_remote_is_before(required_version)
+ else:
+ found_verb = True
+ break
+ if not found_verb:
+ # Have to use VFS.
+ return self._insert_real(stream, src_format, resume_tokens)
+ self._last_inv_record = None
+ self._last_substream = None
+ if required_version < (1, 19):
+ # Remote side doesn't support inventory deltas. Wrap the stream to
+ # make sure we don't send any. If the stream contains inventory
+ # deltas we'll interrupt the smart insert_stream request and
+ # fallback to VFS.
+ stream = self._stop_stream_if_inventory_delta(stream)
+ byte_stream = smart_repo._stream_to_byte_stream(
+ stream, src_format)
+ resume_tokens = ' '.join(resume_tokens)
+ response = client.call_with_body_stream(
+ (verb, path, resume_tokens) + lock_args, byte_stream)
+ if response[0][0] not in ('ok', 'missing-basis'):
+ raise errors.UnexpectedSmartServerResponse(response)
+ if self._last_substream is not None:
+ # The stream included an inventory-delta record, but the remote
+ # side isn't new enough to support them. So we need to send the
+ # rest of the stream via VFS.
+ self.target_repo.refresh_data()
+ return self._resume_stream_with_vfs(response, src_format)
+ if response[0][0] == 'missing-basis':
+ tokens, missing_keys = bencode.bdecode_as_tuple(response[0][1])
+ resume_tokens = tokens
+ return resume_tokens, set(missing_keys)
+ else:
+ self.target_repo.refresh_data()
+ return [], set()
+
+ def _resume_stream_with_vfs(self, response, src_format):
+ """Resume sending a stream via VFS, first resending the record and
+ substream that couldn't be sent via an insert_stream verb.
+ """
+ if response[0][0] == 'missing-basis':
+ tokens, missing_keys = bencode.bdecode_as_tuple(response[0][1])
+ # Ignore missing_keys, we haven't finished inserting yet
+ else:
+ tokens = []
+ def resume_substream():
+ # Yield the substream that was interrupted.
+ for record in self._last_substream:
+ yield record
+ self._last_substream = None
+ def resume_stream():
+ # Finish sending the interrupted substream
+ yield ('inventory-deltas', resume_substream())
+ # Then simply continue sending the rest of the stream.
+ for substream_kind, substream in self._last_stream:
+ yield substream_kind, substream
+ return self._insert_real(resume_stream(), src_format, tokens)
+
+ def _stop_stream_if_inventory_delta(self, stream):
+ """Normally this just lets the original stream pass-through unchanged.
+
+ However if any 'inventory-deltas' substream occurs it will stop
+ streaming, and store the interrupted substream and stream in
+ self._last_substream and self._last_stream so that the stream can be
+ resumed by _resume_stream_with_vfs.
+ """
+
+ stream_iter = iter(stream)
+ for substream_kind, substream in stream_iter:
+ if substream_kind == 'inventory-deltas':
+ self._last_substream = substream
+ self._last_stream = stream_iter
+ return
+ else:
+ yield substream_kind, substream
+
+
+class RemoteStreamSource(vf_repository.StreamSource):
+ """Stream data from a remote server."""
+
+ def get_stream(self, search):
+ if (self.from_repository._fallback_repositories and
+ self.to_format._fetch_order == 'topological'):
+ return self._real_stream(self.from_repository, search)
+ sources = []
+ seen = set()
+ repos = [self.from_repository]
+ while repos:
+ repo = repos.pop(0)
+ if repo in seen:
+ continue
+ seen.add(repo)
+ repos.extend(repo._fallback_repositories)
+ sources.append(repo)
+ return self.missing_parents_chain(search, sources)
+
+ def get_stream_for_missing_keys(self, missing_keys):
+ self.from_repository._ensure_real()
+ real_repo = self.from_repository._real_repository
+ real_source = real_repo._get_source(self.to_format)
+ return real_source.get_stream_for_missing_keys(missing_keys)
+
+ def _real_stream(self, repo, search):
+ """Get a stream for search from repo.
+
+ This never called RemoteStreamSource.get_stream, and is a helper
+ for RemoteStreamSource._get_stream to allow getting a stream
+ reliably whether fallback back because of old servers or trying
+ to stream from a non-RemoteRepository (which the stacked support
+ code will do).
+ """
+ source = repo._get_source(self.to_format)
+ if isinstance(source, RemoteStreamSource):
+ repo._ensure_real()
+ source = repo._real_repository._get_source(self.to_format)
+ return source.get_stream(search)
+
+ def _get_stream(self, repo, search):
+ """Core worker to get a stream from repo for search.
+
+ This is used by both get_stream and the stacking support logic. It
+ deliberately gets a stream for repo which does not need to be
+ self.from_repository. In the event that repo is not Remote, or
+ cannot do a smart stream, a fallback is made to the generic
+ repository._get_stream() interface, via self._real_stream.
+
+ In the event of stacking, streams from _get_stream will not
+ contain all the data for search - this is normal (see get_stream).
+
+ :param repo: A repository.
+ :param search: A search.
+ """
+ # Fallbacks may be non-smart
+ if not isinstance(repo, RemoteRepository):
+ return self._real_stream(repo, search)
+ client = repo._client
+ medium = client._medium
+ path = repo.bzrdir._path_for_remote_call(client)
+ search_bytes = repo._serialise_search_result(search)
+ args = (path, self.to_format.network_name())
+ candidate_verbs = [
+ ('Repository.get_stream_1.19', (1, 19)),
+ ('Repository.get_stream', (1, 13))]
+
+ found_verb = False
+ for verb, version in candidate_verbs:
+ if medium._is_remote_before(version):
+ continue
+ try:
+ response = repo._call_with_body_bytes_expecting_body(
+ verb, args, search_bytes)
+ except errors.UnknownSmartMethod:
+ medium._remember_remote_is_before(version)
+ except errors.UnknownErrorFromSmartServer, e:
+ if isinstance(search, vf_search.EverythingResult):
+ error_verb = e.error_from_smart_server.error_verb
+ if error_verb == 'BadSearch':
+ # Pre-2.4 servers don't support this sort of search.
+ # XXX: perhaps falling back to VFS on BadSearch is a
+ # good idea in general? It might provide a little bit
+ # of protection against client-side bugs.
+ medium._remember_remote_is_before((2, 4))
+ break
+ raise
+ else:
+ response_tuple, response_handler = response
+ found_verb = True
+ break
+ if not found_verb:
+ return self._real_stream(repo, search)
+ if response_tuple[0] != 'ok':
+ raise errors.UnexpectedSmartServerResponse(response_tuple)
+ byte_stream = response_handler.read_streamed_body()
+ src_format, stream = smart_repo._byte_stream_to_stream(byte_stream,
+ self._record_counter)
+ if src_format.network_name() != repo._format.network_name():
+ raise AssertionError(
+ "Mismatched RemoteRepository and stream src %r, %r" % (
+ src_format.network_name(), repo._format.network_name()))
+ return stream
+
+ def missing_parents_chain(self, search, sources):
+ """Chain multiple streams together to handle stacking.
+
+ :param search: The overall search to satisfy with streams.
+ :param sources: A list of Repository objects to query.
+ """
+ self.from_serialiser = self.from_repository._format._serializer
+ self.seen_revs = set()
+ self.referenced_revs = set()
+ # If there are heads in the search, or the key count is > 0, we are not
+ # done.
+ while not search.is_empty() and len(sources) > 1:
+ source = sources.pop(0)
+ stream = self._get_stream(source, search)
+ for kind, substream in stream:
+ if kind != 'revisions':
+ yield kind, substream
+ else:
+ yield kind, self.missing_parents_rev_handler(substream)
+ search = search.refine(self.seen_revs, self.referenced_revs)
+ self.seen_revs = set()
+ self.referenced_revs = set()
+ if not search.is_empty():
+ for kind, stream in self._get_stream(sources[0], search):
+ yield kind, stream
+
+ def missing_parents_rev_handler(self, substream):
+ for content in substream:
+ revision_bytes = content.get_bytes_as('fulltext')
+ revision = self.from_serialiser.read_revision_from_string(
+ revision_bytes)
+ self.seen_revs.add(content.key[-1])
+ self.referenced_revs.update(revision.parent_ids)
+ yield content
+
+
+class RemoteBranchLockableFiles(LockableFiles):
+ """A 'LockableFiles' implementation that talks to a smart server.
+
+ This is not a public interface class.
+ """
+
+ def __init__(self, bzrdir, _client):
+ self.bzrdir = bzrdir
+ self._client = _client
+ self._need_find_modes = True
+ LockableFiles.__init__(
+ self, bzrdir.get_branch_transport(None),
+ 'lock', lockdir.LockDir)
+
+ def _find_modes(self):
+ # RemoteBranches don't let the client set the mode of control files.
+ self._dir_mode = None
+ self._file_mode = None
+
+
+class RemoteBranchFormat(branch.BranchFormat):
+
+ def __init__(self, network_name=None):
+ super(RemoteBranchFormat, self).__init__()
+ self._matchingbzrdir = RemoteBzrDirFormat()
+ self._matchingbzrdir.set_branch_format(self)
+ self._custom_format = None
+ self._network_name = network_name
+
+ def __eq__(self, other):
+ return (isinstance(other, RemoteBranchFormat) and
+ self.__dict__ == other.__dict__)
+
+ def _ensure_real(self):
+ if self._custom_format is None:
+ try:
+ self._custom_format = branch.network_format_registry.get(
+ self._network_name)
+ except KeyError:
+ raise errors.UnknownFormatError(kind='branch',
+ format=self._network_name)
+
+ def get_format_description(self):
+ self._ensure_real()
+ return 'Remote: ' + self._custom_format.get_format_description()
+
+ def network_name(self):
+ return self._network_name
+
+ def open(self, a_bzrdir, name=None, ignore_fallbacks=False):
+ return a_bzrdir.open_branch(name=name,
+ ignore_fallbacks=ignore_fallbacks)
+
+ def _vfs_initialize(self, a_bzrdir, name, append_revisions_only):
+ # Initialisation when using a local bzrdir object, or a non-vfs init
+ # method is not available on the server.
+ # self._custom_format is always set - the start of initialize ensures
+ # that.
+ if isinstance(a_bzrdir, RemoteBzrDir):
+ a_bzrdir._ensure_real()
+ result = self._custom_format.initialize(a_bzrdir._real_bzrdir,
+ name=name, append_revisions_only=append_revisions_only)
+ else:
+ # We assume the bzrdir is parameterised; it may not be.
+ result = self._custom_format.initialize(a_bzrdir, name=name,
+ append_revisions_only=append_revisions_only)
+ if (isinstance(a_bzrdir, RemoteBzrDir) and
+ not isinstance(result, RemoteBranch)):
+ result = RemoteBranch(a_bzrdir, a_bzrdir.find_repository(), result,
+ name=name)
+ return result
+
+ def initialize(self, a_bzrdir, name=None, repository=None,
+ append_revisions_only=None):
+ if name is None:
+ name = a_bzrdir._get_selected_branch()
+ # 1) get the network name to use.
+ if self._custom_format:
+ network_name = self._custom_format.network_name()
+ else:
+ # Select the current bzrlib default and ask for that.
+ reference_bzrdir_format = controldir.format_registry.get('default')()
+ reference_format = reference_bzrdir_format.get_branch_format()
+ self._custom_format = reference_format
+ network_name = reference_format.network_name()
+ # Being asked to create on a non RemoteBzrDir:
+ if not isinstance(a_bzrdir, RemoteBzrDir):
+ return self._vfs_initialize(a_bzrdir, name=name,
+ append_revisions_only=append_revisions_only)
+ medium = a_bzrdir._client._medium
+ if medium._is_remote_before((1, 13)):
+ return self._vfs_initialize(a_bzrdir, name=name,
+ append_revisions_only=append_revisions_only)
+ # Creating on a remote bzr dir.
+ # 2) try direct creation via RPC
+ path = a_bzrdir._path_for_remote_call(a_bzrdir._client)
+ if name != "":
+ # XXX JRV20100304: Support creating colocated branches
+ raise errors.NoColocatedBranchSupport(self)
+ verb = 'BzrDir.create_branch'
+ try:
+ response = a_bzrdir._call(verb, path, network_name)
+ except errors.UnknownSmartMethod:
+ # Fallback - use vfs methods
+ medium._remember_remote_is_before((1, 13))
+ return self._vfs_initialize(a_bzrdir, name=name,
+ append_revisions_only=append_revisions_only)
+ if response[0] != 'ok':
+ raise errors.UnexpectedSmartServerResponse(response)
+ # Turn the response into a RemoteRepository object.
+ format = RemoteBranchFormat(network_name=response[1])
+ repo_format = response_tuple_to_repo_format(response[3:])
+ repo_path = response[2]
+ if repository is not None:
+ remote_repo_url = urlutils.join(a_bzrdir.user_url, repo_path)
+ url_diff = urlutils.relative_url(repository.user_url,
+ remote_repo_url)
+ if url_diff != '.':
+ raise AssertionError(
+ 'repository.user_url %r does not match URL from server '
+ 'response (%r + %r)'
+ % (repository.user_url, a_bzrdir.user_url, repo_path))
+ remote_repo = repository
+ else:
+ if repo_path == '':
+ repo_bzrdir = a_bzrdir
+ else:
+ repo_bzrdir = RemoteBzrDir(
+ a_bzrdir.root_transport.clone(repo_path), a_bzrdir._format,
+ a_bzrdir._client)
+ remote_repo = RemoteRepository(repo_bzrdir, repo_format)
+ remote_branch = RemoteBranch(a_bzrdir, remote_repo,
+ format=format, setup_stacking=False, name=name)
+ if append_revisions_only:
+ remote_branch.set_append_revisions_only(append_revisions_only)
+ # XXX: We know this is a new branch, so it must have revno 0, revid
+ # NULL_REVISION. Creating the branch locked would make this be unable
+ # to be wrong; here its simply very unlikely to be wrong. RBC 20090225
+ remote_branch._last_revision_info_cache = 0, NULL_REVISION
+ return remote_branch
+
+ def make_tags(self, branch):
+ self._ensure_real()
+ return self._custom_format.make_tags(branch)
+
+ def supports_tags(self):
+ # Remote branches might support tags, but we won't know until we
+ # access the real remote branch.
+ self._ensure_real()
+ return self._custom_format.supports_tags()
+
+ def supports_stacking(self):
+ self._ensure_real()
+ return self._custom_format.supports_stacking()
+
+ def supports_set_append_revisions_only(self):
+ self._ensure_real()
+ return self._custom_format.supports_set_append_revisions_only()
+
+ def _use_default_local_heads_to_fetch(self):
+ # If the branch format is a metadir format *and* its heads_to_fetch
+ # implementation is not overridden vs the base class, we can use the
+ # base class logic rather than use the heads_to_fetch RPC. This is
+ # usually cheaper in terms of net round trips, as the last-revision and
+ # tags info fetched is cached and would be fetched anyway.
+ self._ensure_real()
+ if isinstance(self._custom_format, branch.BranchFormatMetadir):
+ branch_class = self._custom_format._branch_class()
+ heads_to_fetch_impl = branch_class.heads_to_fetch.im_func
+ if heads_to_fetch_impl is branch.Branch.heads_to_fetch.im_func:
+ return True
+ return False
+
+
+class RemoteBranchStore(_mod_config.IniFileStore):
+ """Branch store which attempts to use HPSS calls to retrieve branch store.
+
+ Note that this is specific to bzr-based formats.
+ """
+
+ def __init__(self, branch):
+ super(RemoteBranchStore, self).__init__()
+ self.branch = branch
+ self.id = "branch"
+ self._real_store = None
+
+ def external_url(self):
+ return self.branch.user_url
+
+ def _load_content(self):
+ path = self.branch._remote_path()
+ try:
+ response, handler = self.branch._call_expecting_body(
+ 'Branch.get_config_file', path)
+ except errors.UnknownSmartMethod:
+ self._ensure_real()
+ return self._real_store._load_content()
+ if len(response) and response[0] != 'ok':
+ raise errors.UnexpectedSmartServerResponse(response)
+ return handler.read_body_bytes()
+
+ def _save_content(self, content):
+ path = self.branch._remote_path()
+ try:
+ response, handler = self.branch._call_with_body_bytes_expecting_body(
+ 'Branch.put_config_file', (path,
+ self.branch._lock_token, self.branch._repo_lock_token),
+ content)
+ except errors.UnknownSmartMethod:
+ self._ensure_real()
+ return self._real_store._save_content(content)
+ handler.cancel_read_body()
+ if response != ('ok', ):
+ raise errors.UnexpectedSmartServerResponse(response)
+
+ def _ensure_real(self):
+ self.branch._ensure_real()
+ if self._real_store is None:
+ self._real_store = _mod_config.BranchStore(self.branch)
+
+
+class RemoteBranch(branch.Branch, _RpcHelper, lock._RelockDebugMixin):
+ """Branch stored on a server accessed by HPSS RPC.
+
+ At the moment most operations are mapped down to simple file operations.
+ """
+
+ def __init__(self, remote_bzrdir, remote_repository, real_branch=None,
+ _client=None, format=None, setup_stacking=True, name=None,
+ possible_transports=None):
+ """Create a RemoteBranch instance.
+
+ :param real_branch: An optional local implementation of the branch
+ format, usually accessing the data via the VFS.
+ :param _client: Private parameter for testing.
+ :param format: A RemoteBranchFormat object, None to create one
+ automatically. If supplied it should have a network_name already
+ supplied.
+ :param setup_stacking: If True make an RPC call to determine the
+ stacked (or not) status of the branch. If False assume the branch
+ is not stacked.
+ :param name: Colocated branch name
+ """
+ # We intentionally don't call the parent class's __init__, because it
+ # will try to assign to self.tags, which is a property in this subclass.
+ # And the parent's __init__ doesn't do much anyway.
+ self.bzrdir = remote_bzrdir
+ self.name = name
+ if _client is not None:
+ self._client = _client
+ else:
+ self._client = remote_bzrdir._client
+ self.repository = remote_repository
+ if real_branch is not None:
+ self._real_branch = real_branch
+ # Give the remote repository the matching real repo.
+ real_repo = self._real_branch.repository
+ if isinstance(real_repo, RemoteRepository):
+ real_repo._ensure_real()
+ real_repo = real_repo._real_repository
+ self.repository._set_real_repository(real_repo)
+ # Give the branch the remote repository to let fast-pathing happen.
+ self._real_branch.repository = self.repository
+ else:
+ self._real_branch = None
+ # Fill out expected attributes of branch for bzrlib API users.
+ self._clear_cached_state()
+ # TODO: deprecate self.base in favor of user_url
+ self.base = self.bzrdir.user_url
+ self._name = name
+ self._control_files = None
+ self._lock_mode = None
+ self._lock_token = None
+ self._repo_lock_token = None
+ self._lock_count = 0
+ self._leave_lock = False
+ self.conf_store = None
+ # Setup a format: note that we cannot call _ensure_real until all the
+ # attributes above are set: This code cannot be moved higher up in this
+ # function.
+ if format is None:
+ self._format = RemoteBranchFormat()
+ if real_branch is not None:
+ self._format._network_name = \
+ self._real_branch._format.network_name()
+ else:
+ self._format = format
+ # when we do _ensure_real we may need to pass ignore_fallbacks to the
+ # branch.open_branch method.
+ self._real_ignore_fallbacks = not setup_stacking
+ if not self._format._network_name:
+ # Did not get from open_branchV2 - old server.
+ self._ensure_real()
+ self._format._network_name = \
+ self._real_branch._format.network_name()
+ self.tags = self._format.make_tags(self)
+ # The base class init is not called, so we duplicate this:
+ hooks = branch.Branch.hooks['open']
+ for hook in hooks:
+ hook(self)
+ self._is_stacked = False
+ if setup_stacking:
+ self._setup_stacking(possible_transports)
+
+ def _setup_stacking(self, possible_transports):
+ # configure stacking into the remote repository, by reading it from
+ # the vfs branch.
+ try:
+ fallback_url = self.get_stacked_on_url()
+ except (errors.NotStacked, errors.UnstackableBranchFormat,
+ errors.UnstackableRepositoryFormat), e:
+ return
+ self._is_stacked = True
+ if possible_transports is None:
+ possible_transports = []
+ else:
+ possible_transports = list(possible_transports)
+ possible_transports.append(self.bzrdir.root_transport)
+ self._activate_fallback_location(fallback_url,
+ possible_transports=possible_transports)
+
+ def _get_config(self):
+ return RemoteBranchConfig(self)
+
+ def _get_config_store(self):
+ if self.conf_store is None:
+ self.conf_store = RemoteBranchStore(self)
+ return self.conf_store
+
+ def _get_real_transport(self):
+ # if we try vfs access, return the real branch's vfs transport
+ self._ensure_real()
+ return self._real_branch._transport
+
+ _transport = property(_get_real_transport)
+
+ def __str__(self):
+ return "%s(%s)" % (self.__class__.__name__, self.base)
+
+ __repr__ = __str__
+
+ def _ensure_real(self):
+ """Ensure that there is a _real_branch set.
+
+ Used before calls to self._real_branch.
+ """
+ if self._real_branch is None:
+ if not vfs.vfs_enabled():
+ raise AssertionError('smart server vfs must be enabled '
+ 'to use vfs implementation')
+ self.bzrdir._ensure_real()
+ self._real_branch = self.bzrdir._real_bzrdir.open_branch(
+ ignore_fallbacks=self._real_ignore_fallbacks, name=self._name)
+ # The remote branch and the real branch shares the same store. If
+ # we don't, there will always be cases where one of the stores
+ # doesn't see an update made on the other.
+ self._real_branch.conf_store = self.conf_store
+ if self.repository._real_repository is None:
+ # Give the remote repository the matching real repo.
+ real_repo = self._real_branch.repository
+ if isinstance(real_repo, RemoteRepository):
+ real_repo._ensure_real()
+ real_repo = real_repo._real_repository
+ self.repository._set_real_repository(real_repo)
+ # Give the real branch the remote repository to let fast-pathing
+ # happen.
+ self._real_branch.repository = self.repository
+ if self._lock_mode == 'r':
+ self._real_branch.lock_read()
+ elif self._lock_mode == 'w':
+ self._real_branch.lock_write(token=self._lock_token)
+
+ def _translate_error(self, err, **context):
+ self.repository._translate_error(err, branch=self, **context)
+
+ def _clear_cached_state(self):
+ super(RemoteBranch, self)._clear_cached_state()
+ if self._real_branch is not None:
+ self._real_branch._clear_cached_state()
+
+ def _clear_cached_state_of_remote_branch_only(self):
+ """Like _clear_cached_state, but doesn't clear the cache of
+ self._real_branch.
+
+ This is useful when falling back to calling a method of
+ self._real_branch that changes state. In that case the underlying
+ branch changes, so we need to invalidate this RemoteBranch's cache of
+ it. However, there's no need to invalidate the _real_branch's cache
+ too, in fact doing so might harm performance.
+ """
+ super(RemoteBranch, self)._clear_cached_state()
+
+ @property
+ def control_files(self):
+ # Defer actually creating RemoteBranchLockableFiles until its needed,
+ # because it triggers an _ensure_real that we otherwise might not need.
+ if self._control_files is None:
+ self._control_files = RemoteBranchLockableFiles(
+ self.bzrdir, self._client)
+ return self._control_files
+
+ def get_physical_lock_status(self):
+ """See Branch.get_physical_lock_status()."""
+ try:
+ response = self._client.call('Branch.get_physical_lock_status',
+ self._remote_path())
+ except errors.UnknownSmartMethod:
+ self._ensure_real()
+ return self._real_branch.get_physical_lock_status()
+ if response[0] not in ('yes', 'no'):
+ raise errors.UnexpectedSmartServerResponse(response)
+ return (response[0] == 'yes')
+
+ def get_stacked_on_url(self):
+ """Get the URL this branch is stacked against.
+
+ :raises NotStacked: If the branch is not stacked.
+ :raises UnstackableBranchFormat: If the branch does not support
+ stacking.
+ :raises UnstackableRepositoryFormat: If the repository does not support
+ stacking.
+ """
+ try:
+ # there may not be a repository yet, so we can't use
+ # self._translate_error, so we can't use self._call either.
+ response = self._client.call('Branch.get_stacked_on_url',
+ self._remote_path())
+ except errors.ErrorFromSmartServer, err:
+ # there may not be a repository yet, so we can't call through
+ # its _translate_error
+ _translate_error(err, branch=self)
+ except errors.UnknownSmartMethod, err:
+ self._ensure_real()
+ return self._real_branch.get_stacked_on_url()
+ if response[0] != 'ok':
+ raise errors.UnexpectedSmartServerResponse(response)
+ return response[1]
+
+ def set_stacked_on_url(self, url):
+ branch.Branch.set_stacked_on_url(self, url)
+ # We need the stacked_on_url to be visible both locally (to not query
+ # it repeatedly) and remotely (so smart verbs can get it server side)
+ # Without the following line,
+ # bzrlib.tests.per_branch.test_create_clone.TestCreateClone
+ # .test_create_clone_on_transport_stacked_hooks_get_stacked_branch
+ # fails for remote branches -- vila 2012-01-04
+ self.conf_store.save_changes()
+ if not url:
+ self._is_stacked = False
+ else:
+ self._is_stacked = True
+
+ def _vfs_get_tags_bytes(self):
+ self._ensure_real()
+ return self._real_branch._get_tags_bytes()
+
+ @needs_read_lock
+ def _get_tags_bytes(self):
+ if self._tags_bytes is None:
+ self._tags_bytes = self._get_tags_bytes_via_hpss()
+ return self._tags_bytes
+
+ def _get_tags_bytes_via_hpss(self):
+ medium = self._client._medium
+ if medium._is_remote_before((1, 13)):
+ return self._vfs_get_tags_bytes()
+ try:
+ response = self._call('Branch.get_tags_bytes', self._remote_path())
+ except errors.UnknownSmartMethod:
+ medium._remember_remote_is_before((1, 13))
+ return self._vfs_get_tags_bytes()
+ return response[0]
+
+ def _vfs_set_tags_bytes(self, bytes):
+ self._ensure_real()
+ return self._real_branch._set_tags_bytes(bytes)
+
+ def _set_tags_bytes(self, bytes):
+ if self.is_locked():
+ self._tags_bytes = bytes
+ medium = self._client._medium
+ if medium._is_remote_before((1, 18)):
+ self._vfs_set_tags_bytes(bytes)
+ return
+ try:
+ args = (
+ self._remote_path(), self._lock_token, self._repo_lock_token)
+ response = self._call_with_body_bytes(
+ 'Branch.set_tags_bytes', args, bytes)
+ except errors.UnknownSmartMethod:
+ medium._remember_remote_is_before((1, 18))
+ self._vfs_set_tags_bytes(bytes)
+
+ def lock_read(self):
+ """Lock the branch for read operations.
+
+ :return: A bzrlib.lock.LogicalLockResult.
+ """
+ self.repository.lock_read()
+ if not self._lock_mode:
+ self._note_lock('r')
+ self._lock_mode = 'r'
+ self._lock_count = 1
+ if self._real_branch is not None:
+ self._real_branch.lock_read()
+ else:
+ self._lock_count += 1
+ return lock.LogicalLockResult(self.unlock)
+
+ def _remote_lock_write(self, token):
+ if token is None:
+ branch_token = repo_token = ''
+ else:
+ branch_token = token
+ repo_token = self.repository.lock_write().repository_token
+ self.repository.unlock()
+ err_context = {'token': token}
+ try:
+ response = self._call(
+ 'Branch.lock_write', self._remote_path(), branch_token,
+ repo_token or '', **err_context)
+ except errors.LockContention, e:
+ # The LockContention from the server doesn't have any
+ # information about the lock_url. We re-raise LockContention
+ # with valid lock_url.
+ raise errors.LockContention('(remote lock)',
+ self.repository.base.split('.bzr/')[0])
+ if response[0] != 'ok':
+ raise errors.UnexpectedSmartServerResponse(response)
+ ok, branch_token, repo_token = response
+ return branch_token, repo_token
+
+ def lock_write(self, token=None):
+ if not self._lock_mode:
+ self._note_lock('w')
+ # Lock the branch and repo in one remote call.
+ remote_tokens = self._remote_lock_write(token)
+ self._lock_token, self._repo_lock_token = remote_tokens
+ if not self._lock_token:
+ raise SmartProtocolError('Remote server did not return a token!')
+ # Tell the self.repository object that it is locked.
+ self.repository.lock_write(
+ self._repo_lock_token, _skip_rpc=True)
+
+ if self._real_branch is not None:
+ self._real_branch.lock_write(token=self._lock_token)
+ if token is not None:
+ self._leave_lock = True
+ else:
+ self._leave_lock = False
+ self._lock_mode = 'w'
+ self._lock_count = 1
+ elif self._lock_mode == 'r':
+ raise errors.ReadOnlyError(self)
+ else:
+ if token is not None:
+ # A token was given to lock_write, and we're relocking, so
+ # check that the given token actually matches the one we
+ # already have.
+ if token != self._lock_token:
+ raise errors.TokenMismatch(token, self._lock_token)
+ self._lock_count += 1
+ # Re-lock the repository too.
+ self.repository.lock_write(self._repo_lock_token)
+ return BranchWriteLockResult(self.unlock, self._lock_token or None)
+
+ def _unlock(self, branch_token, repo_token):
+ err_context = {'token': str((branch_token, repo_token))}
+ response = self._call(
+ 'Branch.unlock', self._remote_path(), branch_token,
+ repo_token or '', **err_context)
+ if response == ('ok',):
+ return
+ raise errors.UnexpectedSmartServerResponse(response)
+
+ @only_raises(errors.LockNotHeld, errors.LockBroken)
+ def unlock(self):
+ try:
+ self._lock_count -= 1
+ if not self._lock_count:
+ if self.conf_store is not None:
+ self.conf_store.save_changes()
+ self._clear_cached_state()
+ mode = self._lock_mode
+ self._lock_mode = None
+ if self._real_branch is not None:
+ if (not self._leave_lock and mode == 'w' and
+ self._repo_lock_token):
+ # If this RemoteBranch will remove the physical lock
+ # for the repository, make sure the _real_branch
+ # doesn't do it first. (Because the _real_branch's
+ # repository is set to be the RemoteRepository.)
+ self._real_branch.repository.leave_lock_in_place()
+ self._real_branch.unlock()
+ if mode != 'w':
+ # Only write-locked branched need to make a remote method
+ # call to perform the unlock.
+ return
+ if not self._lock_token:
+ raise AssertionError('Locked, but no token!')
+ branch_token = self._lock_token
+ repo_token = self._repo_lock_token
+ self._lock_token = None
+ self._repo_lock_token = None
+ if not self._leave_lock:
+ self._unlock(branch_token, repo_token)
+ finally:
+ self.repository.unlock()
+
+ def break_lock(self):
+ try:
+ response = self._call(
+ 'Branch.break_lock', self._remote_path())
+ except errors.UnknownSmartMethod:
+ self._ensure_real()
+ return self._real_branch.break_lock()
+ if response != ('ok',):
+ raise errors.UnexpectedSmartServerResponse(response)
+
+ def leave_lock_in_place(self):
+ if not self._lock_token:
+ raise NotImplementedError(self.leave_lock_in_place)
+ self._leave_lock = True
+
+ def dont_leave_lock_in_place(self):
+ if not self._lock_token:
+ raise NotImplementedError(self.dont_leave_lock_in_place)
+ self._leave_lock = False
+
+ @needs_read_lock
+ def get_rev_id(self, revno, history=None):
+ if revno == 0:
+ return _mod_revision.NULL_REVISION
+ last_revision_info = self.last_revision_info()
+ ok, result = self.repository.get_rev_id_for_revno(
+ revno, last_revision_info)
+ if ok:
+ return result
+ missing_parent = result[1]
+ # Either the revision named by the server is missing, or its parent
+ # is. Call get_parent_map to determine which, so that we report a
+ # useful error.
+ parent_map = self.repository.get_parent_map([missing_parent])
+ if missing_parent in parent_map:
+ missing_parent = parent_map[missing_parent]
+ raise errors.RevisionNotPresent(missing_parent, self.repository)
+
+ def _read_last_revision_info(self):
+ response = self._call('Branch.last_revision_info', self._remote_path())
+ if response[0] != 'ok':
+ raise SmartProtocolError('unexpected response code %s' % (response,))
+ revno = int(response[1])
+ last_revision = response[2]
+ return (revno, last_revision)
+
+ def _gen_revision_history(self):
+ """See Branch._gen_revision_history()."""
+ if self._is_stacked:
+ self._ensure_real()
+ return self._real_branch._gen_revision_history()
+ response_tuple, response_handler = self._call_expecting_body(
+ 'Branch.revision_history', self._remote_path())
+ if response_tuple[0] != 'ok':
+ raise errors.UnexpectedSmartServerResponse(response_tuple)
+ result = response_handler.read_body_bytes().split('\x00')
+ if result == ['']:
+ return []
+ return result
+
+ def _remote_path(self):
+ return self.bzrdir._path_for_remote_call(self._client)
+
+ def _set_last_revision_descendant(self, revision_id, other_branch,
+ allow_diverged=False, allow_overwrite_descendant=False):
+ # This performs additional work to meet the hook contract; while its
+ # undesirable, we have to synthesise the revno to call the hook, and
+ # not calling the hook is worse as it means changes can't be prevented.
+ # Having calculated this though, we can't just call into
+ # set_last_revision_info as a simple call, because there is a set_rh
+ # hook that some folk may still be using.
+ old_revno, old_revid = self.last_revision_info()
+ history = self._lefthand_history(revision_id)
+ self._run_pre_change_branch_tip_hooks(len(history), revision_id)
+ err_context = {'other_branch': other_branch}
+ response = self._call('Branch.set_last_revision_ex',
+ self._remote_path(), self._lock_token, self._repo_lock_token,
+ revision_id, int(allow_diverged), int(allow_overwrite_descendant),
+ **err_context)
+ self._clear_cached_state()
+ if len(response) != 3 and response[0] != 'ok':
+ raise errors.UnexpectedSmartServerResponse(response)
+ new_revno, new_revision_id = response[1:]
+ self._last_revision_info_cache = new_revno, new_revision_id
+ self._run_post_change_branch_tip_hooks(old_revno, old_revid)
+ if self._real_branch is not None:
+ cache = new_revno, new_revision_id
+ self._real_branch._last_revision_info_cache = cache
+
+ def _set_last_revision(self, revision_id):
+ old_revno, old_revid = self.last_revision_info()
+ # This performs additional work to meet the hook contract; while its
+ # undesirable, we have to synthesise the revno to call the hook, and
+ # not calling the hook is worse as it means changes can't be prevented.
+ # Having calculated this though, we can't just call into
+ # set_last_revision_info as a simple call, because there is a set_rh
+ # hook that some folk may still be using.
+ history = self._lefthand_history(revision_id)
+ self._run_pre_change_branch_tip_hooks(len(history), revision_id)
+ self._clear_cached_state()
+ response = self._call('Branch.set_last_revision',
+ self._remote_path(), self._lock_token, self._repo_lock_token,
+ revision_id)
+ if response != ('ok',):
+ raise errors.UnexpectedSmartServerResponse(response)
+ self._run_post_change_branch_tip_hooks(old_revno, old_revid)
+
+ def _get_parent_location(self):
+ medium = self._client._medium
+ if medium._is_remote_before((1, 13)):
+ return self._vfs_get_parent_location()
+ try:
+ response = self._call('Branch.get_parent', self._remote_path())
+ except errors.UnknownSmartMethod:
+ medium._remember_remote_is_before((1, 13))
+ return self._vfs_get_parent_location()
+ if len(response) != 1:
+ raise errors.UnexpectedSmartServerResponse(response)
+ parent_location = response[0]
+ if parent_location == '':
+ return None
+ return parent_location
+
+ def _vfs_get_parent_location(self):
+ self._ensure_real()
+ return self._real_branch._get_parent_location()
+
+ def _set_parent_location(self, url):
+ medium = self._client._medium
+ if medium._is_remote_before((1, 15)):
+ return self._vfs_set_parent_location(url)
+ try:
+ call_url = url or ''
+ if type(call_url) is not str:
+ raise AssertionError('url must be a str or None (%s)' % url)
+ response = self._call('Branch.set_parent_location',
+ self._remote_path(), self._lock_token, self._repo_lock_token,
+ call_url)
+ except errors.UnknownSmartMethod:
+ medium._remember_remote_is_before((1, 15))
+ return self._vfs_set_parent_location(url)
+ if response != ():
+ raise errors.UnexpectedSmartServerResponse(response)
+
+ def _vfs_set_parent_location(self, url):
+ self._ensure_real()
+ return self._real_branch._set_parent_location(url)
+
+ @needs_write_lock
+ def pull(self, source, overwrite=False, stop_revision=None,
+ **kwargs):
+ self._clear_cached_state_of_remote_branch_only()
+ self._ensure_real()
+ return self._real_branch.pull(
+ source, overwrite=overwrite, stop_revision=stop_revision,
+ _override_hook_target=self, **kwargs)
+
+ @needs_read_lock
+ def push(self, target, overwrite=False, stop_revision=None, lossy=False):
+ self._ensure_real()
+ return self._real_branch.push(
+ target, overwrite=overwrite, stop_revision=stop_revision, lossy=lossy,
+ _override_hook_source_branch=self)
+
+ def is_locked(self):
+ return self._lock_count >= 1
+
+ @needs_read_lock
+ def revision_id_to_dotted_revno(self, revision_id):
+ """Given a revision id, return its dotted revno.
+
+ :return: a tuple like (1,) or (400,1,3).
+ """
+ try:
+ response = self._call('Branch.revision_id_to_revno',
+ self._remote_path(), revision_id)
+ except errors.UnknownSmartMethod:
+ self._ensure_real()
+ return self._real_branch.revision_id_to_dotted_revno(revision_id)
+ if response[0] == 'ok':
+ return tuple([int(x) for x in response[1:]])
+ else:
+ raise errors.UnexpectedSmartServerResponse(response)
+
+ @needs_read_lock
+ def revision_id_to_revno(self, revision_id):
+ """Given a revision id on the branch mainline, return its revno.
+
+ :return: an integer
+ """
+ try:
+ response = self._call('Branch.revision_id_to_revno',
+ self._remote_path(), revision_id)
+ except errors.UnknownSmartMethod:
+ self._ensure_real()
+ return self._real_branch.revision_id_to_revno(revision_id)
+ if response[0] == 'ok':
+ if len(response) == 2:
+ return int(response[1])
+ raise NoSuchRevision(self, revision_id)
+ else:
+ raise errors.UnexpectedSmartServerResponse(response)
+
+ @needs_write_lock
+ def set_last_revision_info(self, revno, revision_id):
+ # XXX: These should be returned by the set_last_revision_info verb
+ old_revno, old_revid = self.last_revision_info()
+ self._run_pre_change_branch_tip_hooks(revno, revision_id)
+ if not revision_id or not isinstance(revision_id, basestring):
+ raise errors.InvalidRevisionId(revision_id=revision_id, branch=self)
+ try:
+ response = self._call('Branch.set_last_revision_info',
+ self._remote_path(), self._lock_token, self._repo_lock_token,
+ str(revno), revision_id)
+ except errors.UnknownSmartMethod:
+ self._ensure_real()
+ self._clear_cached_state_of_remote_branch_only()
+ self._real_branch.set_last_revision_info(revno, revision_id)
+ self._last_revision_info_cache = revno, revision_id
+ return
+ if response == ('ok',):
+ self._clear_cached_state()
+ self._last_revision_info_cache = revno, revision_id
+ self._run_post_change_branch_tip_hooks(old_revno, old_revid)
+ # Update the _real_branch's cache too.
+ if self._real_branch is not None:
+ cache = self._last_revision_info_cache
+ self._real_branch._last_revision_info_cache = cache
+ else:
+ raise errors.UnexpectedSmartServerResponse(response)
+
+ @needs_write_lock
+ def generate_revision_history(self, revision_id, last_rev=None,
+ other_branch=None):
+ medium = self._client._medium
+ if not medium._is_remote_before((1, 6)):
+ # Use a smart method for 1.6 and above servers
+ try:
+ self._set_last_revision_descendant(revision_id, other_branch,
+ allow_diverged=True, allow_overwrite_descendant=True)
+ return
+ except errors.UnknownSmartMethod:
+ medium._remember_remote_is_before((1, 6))
+ self._clear_cached_state_of_remote_branch_only()
+ graph = self.repository.get_graph()
+ (last_revno, last_revid) = self.last_revision_info()
+ known_revision_ids = [
+ (last_revid, last_revno),
+ (_mod_revision.NULL_REVISION, 0),
+ ]
+ if last_rev is not None:
+ if not graph.is_ancestor(last_rev, revision_id):
+ # our previous tip is not merged into stop_revision
+ raise errors.DivergedBranches(self, other_branch)
+ revno = graph.find_distance_to_null(revision_id, known_revision_ids)
+ self.set_last_revision_info(revno, revision_id)
+
+ def set_push_location(self, location):
+ self._set_config_location('push_location', location)
+
+ def heads_to_fetch(self):
+ if self._format._use_default_local_heads_to_fetch():
+ # We recognise this format, and its heads-to-fetch implementation
+ # is the default one (tip + tags). In this case it's cheaper to
+ # just use the default implementation rather than a special RPC as
+ # the tip and tags data is cached.
+ return branch.Branch.heads_to_fetch(self)
+ medium = self._client._medium
+ if medium._is_remote_before((2, 4)):
+ return self._vfs_heads_to_fetch()
+ try:
+ return self._rpc_heads_to_fetch()
+ except errors.UnknownSmartMethod:
+ medium._remember_remote_is_before((2, 4))
+ return self._vfs_heads_to_fetch()
+
+ def _rpc_heads_to_fetch(self):
+ response = self._call('Branch.heads_to_fetch', self._remote_path())
+ if len(response) != 2:
+ raise errors.UnexpectedSmartServerResponse(response)
+ must_fetch, if_present_fetch = response
+ return set(must_fetch), set(if_present_fetch)
+
+ def _vfs_heads_to_fetch(self):
+ self._ensure_real()
+ return self._real_branch.heads_to_fetch()
+
+
+class RemoteConfig(object):
+ """A Config that reads and writes from smart verbs.
+
+ It is a low-level object that considers config data to be name/value pairs
+ that may be associated with a section. Assigning meaning to the these
+ values is done at higher levels like bzrlib.config.TreeConfig.
+ """
+
+ def get_option(self, name, section=None, default=None):
+ """Return the value associated with a named option.
+
+ :param name: The name of the value
+ :param section: The section the option is in (if any)
+ :param default: The value to return if the value is not set
+ :return: The value or default value
+ """
+ try:
+ configobj = self._get_configobj()
+ section_obj = None
+ if section is None:
+ section_obj = configobj
+ else:
+ try:
+ section_obj = configobj[section]
+ except KeyError:
+ pass
+ if section_obj is None:
+ value = default
+ else:
+ value = section_obj.get(name, default)
+ except errors.UnknownSmartMethod:
+ value = self._vfs_get_option(name, section, default)
+ for hook in _mod_config.OldConfigHooks['get']:
+ hook(self, name, value)
+ return value
+
+ def _response_to_configobj(self, response):
+ if len(response[0]) and response[0][0] != 'ok':
+ raise errors.UnexpectedSmartServerResponse(response)
+ lines = response[1].read_body_bytes().splitlines()
+ conf = _mod_config.ConfigObj(lines, encoding='utf-8')
+ for hook in _mod_config.OldConfigHooks['load']:
+ hook(self)
+ return conf
+
+
+class RemoteBranchConfig(RemoteConfig):
+ """A RemoteConfig for Branches."""
+
+ def __init__(self, branch):
+ self._branch = branch
+
+ def _get_configobj(self):
+ path = self._branch._remote_path()
+ response = self._branch._client.call_expecting_body(
+ 'Branch.get_config_file', path)
+ return self._response_to_configobj(response)
+
+ def set_option(self, value, name, section=None):
+ """Set the value associated with a named option.
+
+ :param value: The value to set
+ :param name: The name of the value to set
+ :param section: The section the option is in (if any)
+ """
+ medium = self._branch._client._medium
+ if medium._is_remote_before((1, 14)):
+ return self._vfs_set_option(value, name, section)
+ if isinstance(value, dict):
+ if medium._is_remote_before((2, 2)):
+ return self._vfs_set_option(value, name, section)
+ return self._set_config_option_dict(value, name, section)
+ else:
+ return self._set_config_option(value, name, section)
+
+ def _set_config_option(self, value, name, section):
+ try:
+ path = self._branch._remote_path()
+ response = self._branch._client.call('Branch.set_config_option',
+ path, self._branch._lock_token, self._branch._repo_lock_token,
+ value.encode('utf8'), name, section or '')
+ except errors.UnknownSmartMethod:
+ medium = self._branch._client._medium
+ medium._remember_remote_is_before((1, 14))
+ return self._vfs_set_option(value, name, section)
+ if response != ():
+ raise errors.UnexpectedSmartServerResponse(response)
+
+ def _serialize_option_dict(self, option_dict):
+ utf8_dict = {}
+ for key, value in option_dict.items():
+ if isinstance(key, unicode):
+ key = key.encode('utf8')
+ if isinstance(value, unicode):
+ value = value.encode('utf8')
+ utf8_dict[key] = value
+ return bencode.bencode(utf8_dict)
+
+ def _set_config_option_dict(self, value, name, section):
+ try:
+ path = self._branch._remote_path()
+ serialised_dict = self._serialize_option_dict(value)
+ response = self._branch._client.call(
+ 'Branch.set_config_option_dict',
+ path, self._branch._lock_token, self._branch._repo_lock_token,
+ serialised_dict, name, section or '')
+ except errors.UnknownSmartMethod:
+ medium = self._branch._client._medium
+ medium._remember_remote_is_before((2, 2))
+ return self._vfs_set_option(value, name, section)
+ if response != ():
+ raise errors.UnexpectedSmartServerResponse(response)
+
+ def _real_object(self):
+ self._branch._ensure_real()
+ return self._branch._real_branch
+
+ def _vfs_set_option(self, value, name, section=None):
+ return self._real_object()._get_config().set_option(
+ value, name, section)
+
+
+class RemoteBzrDirConfig(RemoteConfig):
+ """A RemoteConfig for BzrDirs."""
+
+ def __init__(self, bzrdir):
+ self._bzrdir = bzrdir
+
+ def _get_configobj(self):
+ medium = self._bzrdir._client._medium
+ verb = 'BzrDir.get_config_file'
+ if medium._is_remote_before((1, 15)):
+ raise errors.UnknownSmartMethod(verb)
+ path = self._bzrdir._path_for_remote_call(self._bzrdir._client)
+ response = self._bzrdir._call_expecting_body(
+ verb, path)
+ return self._response_to_configobj(response)
+
+ def _vfs_get_option(self, name, section, default):
+ return self._real_object()._get_config().get_option(
+ name, section, default)
+
+ def set_option(self, value, name, section=None):
+ """Set the value associated with a named option.
+
+ :param value: The value to set
+ :param name: The name of the value to set
+ :param section: The section the option is in (if any)
+ """
+ return self._real_object()._get_config().set_option(
+ value, name, section)
+
+ def _real_object(self):
+ self._bzrdir._ensure_real()
+ return self._bzrdir._real_bzrdir
+
+
+def _extract_tar(tar, to_dir):
+ """Extract all the contents of a tarfile object.
+
+ A replacement for extractall, which is not present in python2.4
+ """
+ for tarinfo in tar:
+ tar.extract(tarinfo, to_dir)
+
+
+error_translators = registry.Registry()
+no_context_error_translators = registry.Registry()
+
+
+def _translate_error(err, **context):
+ """Translate an ErrorFromSmartServer into a more useful error.
+
+ Possible context keys:
+ - branch
+ - repository
+ - bzrdir
+ - token
+ - other_branch
+ - path
+
+ If the error from the server doesn't match a known pattern, then
+ UnknownErrorFromSmartServer is raised.
+ """
+ def find(name):
+ try:
+ return context[name]
+ except KeyError, key_err:
+ mutter('Missing key %r in context %r', key_err.args[0], context)
+ raise err
+ def get_path():
+ """Get the path from the context if present, otherwise use first error
+ arg.
+ """
+ try:
+ return context['path']
+ except KeyError, key_err:
+ try:
+ return err.error_args[0]
+ except IndexError, idx_err:
+ mutter(
+ 'Missing key %r in context %r', key_err.args[0], context)
+ raise err
+
+ try:
+ translator = error_translators.get(err.error_verb)
+ except KeyError:
+ pass
+ else:
+ raise translator(err, find, get_path)
+ try:
+ translator = no_context_error_translators.get(err.error_verb)
+ except KeyError:
+ raise errors.UnknownErrorFromSmartServer(err)
+ else:
+ raise translator(err)
+
+
+error_translators.register('NoSuchRevision',
+ lambda err, find, get_path: NoSuchRevision(
+ find('branch'), err.error_args[0]))
+error_translators.register('nosuchrevision',
+ lambda err, find, get_path: NoSuchRevision(
+ find('repository'), err.error_args[0]))
+
+def _translate_nobranch_error(err, find, get_path):
+ if len(err.error_args) >= 1:
+ extra = err.error_args[0]
+ else:
+ extra = None
+ return errors.NotBranchError(path=find('bzrdir').root_transport.base,
+ detail=extra)
+
+error_translators.register('nobranch', _translate_nobranch_error)
+error_translators.register('norepository',
+ lambda err, find, get_path: errors.NoRepositoryPresent(
+ find('bzrdir')))
+error_translators.register('UnlockableTransport',
+ lambda err, find, get_path: errors.UnlockableTransport(
+ find('bzrdir').root_transport))
+error_translators.register('TokenMismatch',
+ lambda err, find, get_path: errors.TokenMismatch(
+ find('token'), '(remote token)'))
+error_translators.register('Diverged',
+ lambda err, find, get_path: errors.DivergedBranches(
+ find('branch'), find('other_branch')))
+error_translators.register('NotStacked',
+ lambda err, find, get_path: errors.NotStacked(branch=find('branch')))
+
+def _translate_PermissionDenied(err, find, get_path):
+ path = get_path()
+ if len(err.error_args) >= 2:
+ extra = err.error_args[1]
+ else:
+ extra = None
+ return errors.PermissionDenied(path, extra=extra)
+
+error_translators.register('PermissionDenied', _translate_PermissionDenied)
+error_translators.register('ReadError',
+ lambda err, find, get_path: errors.ReadError(get_path()))
+error_translators.register('NoSuchFile',
+ lambda err, find, get_path: errors.NoSuchFile(get_path()))
+error_translators.register('TokenLockingNotSupported',
+ lambda err, find, get_path: errors.TokenLockingNotSupported(
+ find('repository')))
+error_translators.register('UnsuspendableWriteGroup',
+ lambda err, find, get_path: errors.UnsuspendableWriteGroup(
+ repository=find('repository')))
+error_translators.register('UnresumableWriteGroup',
+ lambda err, find, get_path: errors.UnresumableWriteGroup(
+ repository=find('repository'), write_groups=err.error_args[0],
+ reason=err.error_args[1]))
+no_context_error_translators.register('IncompatibleRepositories',
+ lambda err: errors.IncompatibleRepositories(
+ err.error_args[0], err.error_args[1], err.error_args[2]))
+no_context_error_translators.register('LockContention',
+ lambda err: errors.LockContention('(remote lock)'))
+no_context_error_translators.register('LockFailed',
+ lambda err: errors.LockFailed(err.error_args[0], err.error_args[1]))
+no_context_error_translators.register('TipChangeRejected',
+ lambda err: errors.TipChangeRejected(err.error_args[0].decode('utf8')))
+no_context_error_translators.register('UnstackableBranchFormat',
+ lambda err: errors.UnstackableBranchFormat(*err.error_args))
+no_context_error_translators.register('UnstackableRepositoryFormat',
+ lambda err: errors.UnstackableRepositoryFormat(*err.error_args))
+no_context_error_translators.register('FileExists',
+ lambda err: errors.FileExists(err.error_args[0]))
+no_context_error_translators.register('DirectoryNotEmpty',
+ lambda err: errors.DirectoryNotEmpty(err.error_args[0]))
+
+def _translate_short_readv_error(err):
+ args = err.error_args
+ return errors.ShortReadvError(args[0], int(args[1]), int(args[2]),
+ int(args[3]))
+
+no_context_error_translators.register('ShortReadvError',
+ _translate_short_readv_error)
+
+def _translate_unicode_error(err):
+ encoding = str(err.error_args[0]) # encoding must always be a string
+ val = err.error_args[1]
+ start = int(err.error_args[2])
+ end = int(err.error_args[3])
+ reason = str(err.error_args[4]) # reason must always be a string
+ if val.startswith('u:'):
+ val = val[2:].decode('utf-8')
+ elif val.startswith('s:'):
+ val = val[2:].decode('base64')
+ if err.error_verb == 'UnicodeDecodeError':
+ raise UnicodeDecodeError(encoding, val, start, end, reason)
+ elif err.error_verb == 'UnicodeEncodeError':
+ raise UnicodeEncodeError(encoding, val, start, end, reason)
+
+no_context_error_translators.register('UnicodeEncodeError',
+ _translate_unicode_error)
+no_context_error_translators.register('UnicodeDecodeError',
+ _translate_unicode_error)
+no_context_error_translators.register('ReadOnlyError',
+ lambda err: errors.TransportNotPossible('readonly transport'))
+no_context_error_translators.register('MemoryError',
+ lambda err: errors.BzrError("remote server out of memory\n"
+ "Retry non-remotely, or contact the server admin for details."))
+no_context_error_translators.register('RevisionNotPresent',
+ lambda err: errors.RevisionNotPresent(err.error_args[0], err.error_args[1]))
+
+no_context_error_translators.register('BzrCheckError',
+ lambda err: errors.BzrCheckError(msg=err.error_args[0]))
+
diff --git a/bzrlib/rename_map.py b/bzrlib/rename_map.py
new file mode 100644
index 0000000..88ec730
--- /dev/null
+++ b/bzrlib/rename_map.py
@@ -0,0 +1,264 @@
+# Copyright (C) 2009 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+from __future__ import absolute_import
+
+from cStringIO import StringIO
+
+from bzrlib import (
+ osutils,
+ progress,
+ trace,
+)
+from bzrlib.ui import ui_factory
+from bzrlib.i18n import gettext
+
+class RenameMap(object):
+ """Determine a mapping of renames."""
+
+ def __init__(self, tree):
+ self.tree = tree
+ self.edge_hashes = {}
+
+ @staticmethod
+ def iter_edge_hashes(lines):
+ """Iterate through the hashes of line pairs (which make up an edge).
+
+ The hash is truncated using a modulus to avoid excessive memory
+ consumption by the hitscount dict. A modulus of 10Mi means that the
+ maximum number of keys is 10Mi. (Keys are normally 32 bits, e.g.
+ 4 Gi)
+ """
+ modulus = 1024 * 1024 * 10
+ for n in range(len(lines)):
+ yield hash(tuple(lines[n:n+2])) % modulus
+
+ def add_edge_hashes(self, lines, tag):
+ """Update edge_hashes to include the given lines.
+
+ :param lines: The lines to update the hashes for.
+ :param tag: A tag uniquely associated with these lines (i.e. file-id)
+ """
+ for my_hash in self.iter_edge_hashes(lines):
+ self.edge_hashes.setdefault(my_hash, set()).add(tag)
+
+ def add_file_edge_hashes(self, tree, file_ids):
+ """Update to reflect the hashes for files in the tree.
+
+ :param tree: The tree containing the files.
+ :param file_ids: A list of file_ids to perform the updates for.
+ """
+ desired_files = [(f, f) for f in file_ids]
+ task = ui_factory.nested_progress_bar()
+ try:
+ for num, (file_id, contents) in enumerate(
+ tree.iter_files_bytes(desired_files)):
+ task.update(gettext('Calculating hashes'), num, len(file_ids))
+ s = StringIO()
+ s.writelines(contents)
+ s.seek(0)
+ self.add_edge_hashes(s.readlines(), file_id)
+ finally:
+ task.finished()
+
+ def hitcounts(self, lines):
+ """Count the number of hash hits for each tag, for the given lines.
+
+ Hits are weighted according to the number of tags the hash is
+ associated with; more tags means that the hash is less rare and should
+ tend to be ignored.
+ :param lines: The lines to calculate hashes of.
+ :return: a dict of {tag: hitcount}
+ """
+ hits = {}
+ for my_hash in self.iter_edge_hashes(lines):
+ tags = self.edge_hashes.get(my_hash)
+ if tags is None:
+ continue
+ taglen = len(tags)
+ for tag in tags:
+ if tag not in hits:
+ hits[tag] = 0
+ hits[tag] += 1.0 / taglen
+ return hits
+
+ def get_all_hits(self, paths):
+ """Find all the hit counts for the listed paths in the tree.
+
+ :return: A list of tuples of count, path, file_id.
+ """
+ all_hits = []
+ task = ui_factory.nested_progress_bar()
+ try:
+ for num, path in enumerate(paths):
+ task.update(gettext('Determining hash hits'), num, len(paths))
+ hits = self.hitcounts(self.tree.get_file_lines(None,
+ path=path))
+ all_hits.extend((v, path, k) for k, v in hits.items())
+ finally:
+ task.finished()
+ return all_hits
+
+ def file_match(self, paths):
+ """Return a mapping from file_ids to the supplied paths."""
+ return self._match_hits(self.get_all_hits(paths))
+
+ @staticmethod
+ def _match_hits(hit_list):
+ """Using a hit list, determine a path-to-fileid map.
+
+ The hit list is a list of (count, path, file_id), where count is a
+ (possibly float) number, with higher numbers indicating stronger
+ matches.
+ """
+ seen_file_ids = set()
+ path_map = {}
+ for count, path, file_id in sorted(hit_list, reverse=True):
+ if path in path_map or file_id in seen_file_ids:
+ continue
+ path_map[path] = file_id
+ seen_file_ids.add(file_id)
+ return path_map
+
+ def get_required_parents(self, matches):
+ """Return a dict of all file parents that must be versioned.
+
+ The keys are the required parents and the values are sets of their
+ children.
+ """
+ required_parents = {}
+ for path in matches:
+ while True:
+ child = path
+ path = osutils.dirname(path)
+ if self.tree.path2id(path) is not None:
+ break
+ required_parents.setdefault(path, []).append(child)
+ require_ids = {}
+ for parent, children in required_parents.iteritems():
+ child_file_ids = set()
+ for child in children:
+ file_id = matches.get(child)
+ if file_id is not None:
+ child_file_ids.add(file_id)
+ require_ids[parent] = child_file_ids
+ return require_ids
+
+ def match_parents(self, required_parents, missing_parents):
+ """Map parent directories to file-ids.
+
+ This is done by finding similarity between the file-ids of children of
+ required parent directories and the file-ids of children of missing
+ parent directories.
+ """
+ all_hits = []
+ for file_id, file_id_children in missing_parents.iteritems():
+ for path, path_children in required_parents.iteritems():
+ hits = len(path_children.intersection(file_id_children))
+ if hits > 0:
+ all_hits.append((hits, path, file_id))
+ return self._match_hits(all_hits)
+
+ def _find_missing_files(self, basis):
+ missing_files = set()
+ missing_parents = {}
+ candidate_files = set()
+ task = ui_factory.nested_progress_bar()
+ iterator = self.tree.iter_changes(basis, want_unversioned=True,
+ pb=task)
+ try:
+ for (file_id, paths, changed_content, versioned, parent, name,
+ kind, executable) in iterator:
+ if kind[1] is None and versioned[1]:
+ missing_parents.setdefault(parent[0], set()).add(file_id)
+ if kind[0] == 'file':
+ missing_files.add(file_id)
+ else:
+ #other kinds are not handled
+ pass
+ if versioned == (False, False):
+ if self.tree.is_ignored(paths[1]):
+ continue
+ if kind[1] == 'file':
+ candidate_files.add(paths[1])
+ if kind[1] == 'directory':
+ for _dir, children in self.tree.walkdirs(paths[1]):
+ for child in children:
+ if child[2] == 'file':
+ candidate_files.add(child[0])
+ finally:
+ task.finished()
+ return missing_files, missing_parents, candidate_files
+
+ @classmethod
+ def guess_renames(klass, tree, dry_run=False):
+ """Guess which files to rename, and perform the rename.
+
+ We assume that unversioned files and missing files indicate that
+ versioned files have been renamed outside of Bazaar.
+
+ :param tree: A write-locked working tree.
+ """
+ required_parents = {}
+ task = ui_factory.nested_progress_bar()
+ try:
+ pp = progress.ProgressPhase('Guessing renames', 4, task)
+ basis = tree.basis_tree()
+ basis.lock_read()
+ try:
+ rn = klass(tree)
+ pp.next_phase()
+ missing_files, missing_parents, candidate_files = (
+ rn._find_missing_files(basis))
+ pp.next_phase()
+ rn.add_file_edge_hashes(basis, missing_files)
+ finally:
+ basis.unlock()
+ pp.next_phase()
+ matches = rn.file_match(candidate_files)
+ parents_matches = matches
+ while len(parents_matches) > 0:
+ required_parents = rn.get_required_parents(
+ parents_matches)
+ parents_matches = rn.match_parents(required_parents,
+ missing_parents)
+ matches.update(parents_matches)
+ pp.next_phase()
+ delta = rn._make_inventory_delta(matches)
+ for old, new, file_id, entry in delta:
+ trace.note( gettext("{0} => {1}").format(old, new) )
+ if not dry_run:
+ tree.add(required_parents)
+ tree.apply_inventory_delta(delta)
+ finally:
+ task.finished()
+
+ def _make_inventory_delta(self, matches):
+ delta = []
+ file_id_matches = dict((f, p) for p, f in matches.items())
+ for old_path, entry in self.tree.iter_entries_by_dir(matches.values()):
+ new_path = file_id_matches[entry.file_id]
+ parent_path, new_name = osutils.split(new_path)
+ parent_id = matches.get(parent_path)
+ if parent_id is None:
+ parent_id = self.tree.path2id(parent_path)
+ if entry.name == new_name and entry.parent_id == parent_id:
+ continue
+ new_entry = entry.copy()
+ new_entry.parent_id = parent_id
+ new_entry.name = new_name
+ delta.append((old_path, new_path, new_entry.file_id, new_entry))
+ return delta
diff --git a/bzrlib/repofmt/__init__.py b/bzrlib/repofmt/__init__.py
new file mode 100644
index 0000000..afe2457
--- /dev/null
+++ b/bzrlib/repofmt/__init__.py
@@ -0,0 +1,20 @@
+# Copyright (C) 2007 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""Repository formats"""
+
+from __future__ import absolute_import
+
diff --git a/bzrlib/repofmt/groupcompress_repo.py b/bzrlib/repofmt/groupcompress_repo.py
new file mode 100644
index 0000000..3a088f5
--- /dev/null
+++ b/bzrlib/repofmt/groupcompress_repo.py
@@ -0,0 +1,1426 @@
+# Copyright (C) 2008-2011 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""Repository formats using CHK inventories and groupcompress compression."""
+
+from __future__ import absolute_import
+
+import time
+
+from bzrlib import (
+ controldir,
+ chk_map,
+ chk_serializer,
+ debug,
+ errors,
+ index as _mod_index,
+ inventory,
+ osutils,
+ pack,
+ revision as _mod_revision,
+ trace,
+ ui,
+ versionedfile,
+ )
+from bzrlib.btree_index import (
+ BTreeGraphIndex,
+ BTreeBuilder,
+ )
+from bzrlib.decorators import needs_write_lock
+from bzrlib.groupcompress import (
+ _GCGraphIndex,
+ GroupCompressVersionedFiles,
+ )
+from bzrlib.repofmt.pack_repo import (
+ _DirectPackAccess,
+ Pack,
+ NewPack,
+ PackRepository,
+ PackRootCommitBuilder,
+ RepositoryPackCollection,
+ RepositoryFormatPack,
+ ResumedPack,
+ Packer,
+ )
+from bzrlib.vf_repository import (
+ StreamSource,
+ )
+from bzrlib.static_tuple import StaticTuple
+
+
+class GCPack(NewPack):
+
+ def __init__(self, pack_collection, upload_suffix='', file_mode=None):
+ """Create a NewPack instance.
+
+ :param pack_collection: A PackCollection into which this is being
+ inserted.
+ :param upload_suffix: An optional suffix to be given to any temporary
+ files created during the pack creation. e.g '.autopack'
+ :param file_mode: An optional file mode to create the new files with.
+ """
+ # replaced from NewPack to:
+ # - change inventory reference list length to 1
+ # - change texts reference lists to 1
+ # TODO: patch this to be parameterised
+
+ # The relative locations of the packs are constrained, but all are
+ # passed in because the caller has them, so as to avoid object churn.
+ index_builder_class = pack_collection._index_builder_class
+ # from brisbane-core
+ if pack_collection.chk_index is not None:
+ chk_index = index_builder_class(reference_lists=0)
+ else:
+ chk_index = None
+ Pack.__init__(self,
+ # Revisions: parents list, no text compression.
+ index_builder_class(reference_lists=1),
+ # Inventory: We want to map compression only, but currently the
+ # knit code hasn't been updated enough to understand that, so we
+ # have a regular 2-list index giving parents and compression
+ # source.
+ index_builder_class(reference_lists=1),
+ # Texts: per file graph, for all fileids - so one reference list
+ # and two elements in the key tuple.
+ index_builder_class(reference_lists=1, key_elements=2),
+ # Signatures: Just blobs to store, no compression, no parents
+ # listing.
+ index_builder_class(reference_lists=0),
+ # CHK based storage - just blobs, no compression or parents.
+ chk_index=chk_index
+ )
+ self._pack_collection = pack_collection
+ # When we make readonly indices, we need this.
+ self.index_class = pack_collection._index_class
+ # where should the new pack be opened
+ self.upload_transport = pack_collection._upload_transport
+ # where are indices written out to
+ self.index_transport = pack_collection._index_transport
+ # where is the pack renamed to when it is finished?
+ self.pack_transport = pack_collection._pack_transport
+ # What file mode to upload the pack and indices with.
+ self._file_mode = file_mode
+ # tracks the content written to the .pack file.
+ self._hash = osutils.md5()
+ # a four-tuple with the length in bytes of the indices, once the pack
+ # is finalised. (rev, inv, text, sigs)
+ self.index_sizes = None
+ # How much data to cache when writing packs. Note that this is not
+ # synchronised with reads, because it's not in the transport layer, so
+ # is not safe unless the client knows it won't be reading from the pack
+ # under creation.
+ self._cache_limit = 0
+ # the temporary pack file name.
+ self.random_name = osutils.rand_chars(20) + upload_suffix
+ # when was this pack started ?
+ self.start_time = time.time()
+ # open an output stream for the data added to the pack.
+ self.write_stream = self.upload_transport.open_write_stream(
+ self.random_name, mode=self._file_mode)
+ if 'pack' in debug.debug_flags:
+ trace.mutter('%s: create_pack: pack stream open: %s%s t+%6.3fs',
+ time.ctime(), self.upload_transport.base, self.random_name,
+ time.time() - self.start_time)
+ # A list of byte sequences to be written to the new pack, and the
+ # aggregate size of them. Stored as a list rather than separate
+ # variables so that the _write_data closure below can update them.
+ self._buffer = [[], 0]
+ # create a callable for adding data
+ #
+ # robertc says- this is a closure rather than a method on the object
+ # so that the variables are locals, and faster than accessing object
+ # members.
+ def _write_data(bytes, flush=False, _buffer=self._buffer,
+ _write=self.write_stream.write, _update=self._hash.update):
+ _buffer[0].append(bytes)
+ _buffer[1] += len(bytes)
+ # buffer cap
+ if _buffer[1] > self._cache_limit or flush:
+ bytes = ''.join(_buffer[0])
+ _write(bytes)
+ _update(bytes)
+ _buffer[:] = [[], 0]
+ # expose this on self, for the occasion when clients want to add data.
+ self._write_data = _write_data
+ # a pack writer object to serialise pack records.
+ self._writer = pack.ContainerWriter(self._write_data)
+ self._writer.begin()
+ # what state is the pack in? (open, finished, aborted)
+ self._state = 'open'
+ # no name until we finish writing the content
+ self.name = None
+
+ def _check_references(self):
+ """Make sure our external references are present.
+
+ Packs are allowed to have deltas whose base is not in the pack, but it
+ must be present somewhere in this collection. It is not allowed to
+ have deltas based on a fallback repository.
+ (See <https://bugs.launchpad.net/bzr/+bug/288751>)
+ """
+ # Groupcompress packs don't have any external references, arguably CHK
+ # pages have external references, but we cannot 'cheaply' determine
+ # them without actually walking all of the chk pages.
+
+
+class ResumedGCPack(ResumedPack):
+
+ def _check_references(self):
+ """Make sure our external compression parents are present."""
+ # See GCPack._check_references for why this is empty
+
+ def _get_external_refs(self, index):
+ # GC repositories don't have compression parents external to a given
+ # pack file
+ return set()
+
+
+class GCCHKPacker(Packer):
+ """This class understand what it takes to collect a GCCHK repo."""
+
+ def __init__(self, pack_collection, packs, suffix, revision_ids=None,
+ reload_func=None):
+ super(GCCHKPacker, self).__init__(pack_collection, packs, suffix,
+ revision_ids=revision_ids,
+ reload_func=reload_func)
+ self._pack_collection = pack_collection
+ # ATM, We only support this for GCCHK repositories
+ if pack_collection.chk_index is None:
+ raise AssertionError('pack_collection.chk_index should not be None')
+ self._gather_text_refs = False
+ self._chk_id_roots = []
+ self._chk_p_id_roots = []
+ self._text_refs = None
+ # set by .pack() if self.revision_ids is not None
+ self.revision_keys = None
+
+ def _get_progress_stream(self, source_vf, keys, message, pb):
+ def pb_stream():
+ substream = source_vf.get_record_stream(keys, 'groupcompress', True)
+ for idx, record in enumerate(substream):
+ if pb is not None:
+ pb.update(message, idx + 1, len(keys))
+ yield record
+ return pb_stream()
+
+ def _get_filtered_inv_stream(self, source_vf, keys, message, pb=None):
+ """Filter the texts of inventories, to find the chk pages."""
+ total_keys = len(keys)
+ def _filtered_inv_stream():
+ id_roots_set = set()
+ p_id_roots_set = set()
+ stream = source_vf.get_record_stream(keys, 'groupcompress', True)
+ for idx, record in enumerate(stream):
+ # Inventories should always be with revisions; assume success.
+ bytes = record.get_bytes_as('fulltext')
+ chk_inv = inventory.CHKInventory.deserialise(None, bytes,
+ record.key)
+ if pb is not None:
+ pb.update('inv', idx, total_keys)
+ key = chk_inv.id_to_entry.key()
+ if key not in id_roots_set:
+ self._chk_id_roots.append(key)
+ id_roots_set.add(key)
+ p_id_map = chk_inv.parent_id_basename_to_file_id
+ if p_id_map is None:
+ raise AssertionError('Parent id -> file_id map not set')
+ key = p_id_map.key()
+ if key not in p_id_roots_set:
+ p_id_roots_set.add(key)
+ self._chk_p_id_roots.append(key)
+ yield record
+ # We have finished processing all of the inventory records, we
+ # don't need these sets anymore
+ id_roots_set.clear()
+ p_id_roots_set.clear()
+ return _filtered_inv_stream()
+
+ def _get_chk_streams(self, source_vf, keys, pb=None):
+ # We want to stream the keys from 'id_roots', and things they
+ # reference, and then stream things from p_id_roots and things they
+ # reference, and then any remaining keys that we didn't get to.
+
+ # We also group referenced texts together, so if one root references a
+ # text with prefix 'a', and another root references a node with prefix
+ # 'a', we want to yield those nodes before we yield the nodes for 'b'
+ # This keeps 'similar' nodes together.
+
+ # Note: We probably actually want multiple streams here, to help the
+ # client understand that the different levels won't compress well
+ # against each other.
+ # Test the difference between using one Group per level, and
+ # using 1 Group per prefix. (so '' (root) would get a group, then
+ # all the references to search-key 'a' would get a group, etc.)
+ total_keys = len(keys)
+ remaining_keys = set(keys)
+ counter = [0]
+ if self._gather_text_refs:
+ self._text_refs = set()
+ def _get_referenced_stream(root_keys, parse_leaf_nodes=False):
+ cur_keys = root_keys
+ while cur_keys:
+ keys_by_search_prefix = {}
+ remaining_keys.difference_update(cur_keys)
+ next_keys = set()
+ def handle_internal_node(node):
+ for prefix, value in node._items.iteritems():
+ # We don't want to request the same key twice, and we
+ # want to order it by the first time it is seen.
+ # Even further, we don't want to request a key which is
+ # not in this group of pack files (it should be in the
+ # repo, but it doesn't have to be in the group being
+ # packed.)
+ # TODO: consider how to treat externally referenced chk
+ # pages as 'external_references' so that we
+ # always fill them in for stacked branches
+ if value not in next_keys and value in remaining_keys:
+ keys_by_search_prefix.setdefault(prefix,
+ []).append(value)
+ next_keys.add(value)
+ def handle_leaf_node(node):
+ # Store is None, because we know we have a LeafNode, and we
+ # just want its entries
+ for file_id, bytes in node.iteritems(None):
+ self._text_refs.add(chk_map._bytes_to_text_key(bytes))
+ def next_stream():
+ stream = source_vf.get_record_stream(cur_keys,
+ 'as-requested', True)
+ for record in stream:
+ if record.storage_kind == 'absent':
+ # An absent CHK record: we assume that the missing
+ # record is in a different pack - e.g. a page not
+ # altered by the commit we're packing.
+ continue
+ bytes = record.get_bytes_as('fulltext')
+ # We don't care about search_key_func for this code,
+ # because we only care about external references.
+ node = chk_map._deserialise(bytes, record.key,
+ search_key_func=None)
+ common_base = node._search_prefix
+ if isinstance(node, chk_map.InternalNode):
+ handle_internal_node(node)
+ elif parse_leaf_nodes:
+ handle_leaf_node(node)
+ counter[0] += 1
+ if pb is not None:
+ pb.update('chk node', counter[0], total_keys)
+ yield record
+ yield next_stream()
+ # Double check that we won't be emitting any keys twice
+ # If we get rid of the pre-calculation of all keys, we could
+ # turn this around and do
+ # next_keys.difference_update(seen_keys)
+ # However, we also may have references to chk pages in another
+ # pack file during autopack. We filter earlier, so we should no
+ # longer need to do this
+ # next_keys = next_keys.intersection(remaining_keys)
+ cur_keys = []
+ for prefix in sorted(keys_by_search_prefix):
+ cur_keys.extend(keys_by_search_prefix.pop(prefix))
+ for stream in _get_referenced_stream(self._chk_id_roots,
+ self._gather_text_refs):
+ yield stream
+ del self._chk_id_roots
+ # while it isn't really possible for chk_id_roots to not be in the
+ # local group of packs, it is possible that the tree shape has not
+ # changed recently, so we need to filter _chk_p_id_roots by the
+ # available keys
+ chk_p_id_roots = [key for key in self._chk_p_id_roots
+ if key in remaining_keys]
+ del self._chk_p_id_roots
+ for stream in _get_referenced_stream(chk_p_id_roots, False):
+ yield stream
+ if remaining_keys:
+ trace.mutter('There were %d keys in the chk index, %d of which'
+ ' were not referenced', total_keys,
+ len(remaining_keys))
+ if self.revision_ids is None:
+ stream = source_vf.get_record_stream(remaining_keys,
+ 'unordered', True)
+ yield stream
+
+ def _build_vf(self, index_name, parents, delta, for_write=False):
+ """Build a VersionedFiles instance on top of this group of packs."""
+ index_name = index_name + '_index'
+ index_to_pack = {}
+ access = _DirectPackAccess(index_to_pack,
+ reload_func=self._reload_func)
+ if for_write:
+ # Use new_pack
+ if self.new_pack is None:
+ raise AssertionError('No new pack has been set')
+ index = getattr(self.new_pack, index_name)
+ index_to_pack[index] = self.new_pack.access_tuple()
+ index.set_optimize(for_size=True)
+ access.set_writer(self.new_pack._writer, index,
+ self.new_pack.access_tuple())
+ add_callback = index.add_nodes
+ else:
+ indices = []
+ for pack in self.packs:
+ sub_index = getattr(pack, index_name)
+ index_to_pack[sub_index] = pack.access_tuple()
+ indices.append(sub_index)
+ index = _mod_index.CombinedGraphIndex(indices)
+ add_callback = None
+ vf = GroupCompressVersionedFiles(
+ _GCGraphIndex(index,
+ add_callback=add_callback,
+ parents=parents,
+ is_locked=self._pack_collection.repo.is_locked),
+ access=access,
+ delta=delta)
+ return vf
+
+ def _build_vfs(self, index_name, parents, delta):
+ """Build the source and target VersionedFiles."""
+ source_vf = self._build_vf(index_name, parents,
+ delta, for_write=False)
+ target_vf = self._build_vf(index_name, parents,
+ delta, for_write=True)
+ return source_vf, target_vf
+
+ def _copy_stream(self, source_vf, target_vf, keys, message, vf_to_stream,
+ pb_offset):
+ trace.mutter('repacking %d %s', len(keys), message)
+ self.pb.update('repacking %s' % (message,), pb_offset)
+ child_pb = ui.ui_factory.nested_progress_bar()
+ try:
+ stream = vf_to_stream(source_vf, keys, message, child_pb)
+ for _ in target_vf._insert_record_stream(stream,
+ random_id=True,
+ reuse_blocks=False):
+ pass
+ finally:
+ child_pb.finished()
+
+ def _copy_revision_texts(self):
+ source_vf, target_vf = self._build_vfs('revision', True, False)
+ if not self.revision_keys:
+ # We are doing a full fetch, aka 'pack'
+ self.revision_keys = source_vf.keys()
+ self._copy_stream(source_vf, target_vf, self.revision_keys,
+ 'revisions', self._get_progress_stream, 1)
+
+ def _copy_inventory_texts(self):
+ source_vf, target_vf = self._build_vfs('inventory', True, True)
+ # It is not sufficient to just use self.revision_keys, as stacked
+ # repositories can have more inventories than they have revisions.
+ # One alternative would be to do something with
+ # get_parent_map(self.revision_keys), but that shouldn't be any faster
+ # than this.
+ inventory_keys = source_vf.keys()
+ missing_inventories = set(self.revision_keys).difference(inventory_keys)
+ if missing_inventories:
+ # Go back to the original repo, to see if these are really missing
+ # https://bugs.launchpad.net/bzr/+bug/437003
+ # If we are packing a subset of the repo, it is fine to just have
+ # the data in another Pack file, which is not included in this pack
+ # operation.
+ inv_index = self._pack_collection.repo.inventories._index
+ pmap = inv_index.get_parent_map(missing_inventories)
+ really_missing = missing_inventories.difference(pmap)
+ if really_missing:
+ missing_inventories = sorted(really_missing)
+ raise ValueError('We are missing inventories for revisions: %s'
+ % (missing_inventories,))
+ self._copy_stream(source_vf, target_vf, inventory_keys,
+ 'inventories', self._get_filtered_inv_stream, 2)
+
+ def _get_chk_vfs_for_copy(self):
+ return self._build_vfs('chk', False, False)
+
+ def _copy_chk_texts(self):
+ source_vf, target_vf = self._get_chk_vfs_for_copy()
+ # TODO: This is technically spurious... if it is a performance issue,
+ # remove it
+ total_keys = source_vf.keys()
+ trace.mutter('repacking chk: %d id_to_entry roots,'
+ ' %d p_id_map roots, %d total keys',
+ len(self._chk_id_roots), len(self._chk_p_id_roots),
+ len(total_keys))
+ self.pb.update('repacking chk', 3)
+ child_pb = ui.ui_factory.nested_progress_bar()
+ try:
+ for stream in self._get_chk_streams(source_vf, total_keys,
+ pb=child_pb):
+ for _ in target_vf._insert_record_stream(stream,
+ random_id=True,
+ reuse_blocks=False):
+ pass
+ finally:
+ child_pb.finished()
+
+ def _copy_text_texts(self):
+ source_vf, target_vf = self._build_vfs('text', True, True)
+ # XXX: We don't walk the chk map to determine referenced (file_id,
+ # revision_id) keys. We don't do it yet because you really need
+ # to filter out the ones that are present in the parents of the
+ # rev just before the ones you are copying, otherwise the filter
+ # is grabbing too many keys...
+ text_keys = source_vf.keys()
+ self._copy_stream(source_vf, target_vf, text_keys,
+ 'texts', self._get_progress_stream, 4)
+
+ def _copy_signature_texts(self):
+ source_vf, target_vf = self._build_vfs('signature', False, False)
+ signature_keys = source_vf.keys()
+ signature_keys.intersection(self.revision_keys)
+ self._copy_stream(source_vf, target_vf, signature_keys,
+ 'signatures', self._get_progress_stream, 5)
+
+ def _create_pack_from_packs(self):
+ self.pb.update('repacking', 0, 7)
+ self.new_pack = self.open_pack()
+ # Is this necessary for GC ?
+ self.new_pack.set_write_cache_size(1024*1024)
+ self._copy_revision_texts()
+ self._copy_inventory_texts()
+ self._copy_chk_texts()
+ self._copy_text_texts()
+ self._copy_signature_texts()
+ self.new_pack._check_references()
+ if not self._use_pack(self.new_pack):
+ self.new_pack.abort()
+ return None
+ self.new_pack.finish_content()
+ if len(self.packs) == 1:
+ old_pack = self.packs[0]
+ if old_pack.name == self.new_pack._hash.hexdigest():
+ # The single old pack was already optimally packed.
+ trace.mutter('single pack %s was already optimally packed',
+ old_pack.name)
+ self.new_pack.abort()
+ return None
+ self.pb.update('finishing repack', 6, 7)
+ self.new_pack.finish()
+ self._pack_collection.allocate(self.new_pack)
+ return self.new_pack
+
+
+class GCCHKReconcilePacker(GCCHKPacker):
+ """A packer which regenerates indices etc as it copies.
+
+ This is used by ``bzr reconcile`` to cause parent text pointers to be
+ regenerated.
+ """
+
+ def __init__(self, *args, **kwargs):
+ super(GCCHKReconcilePacker, self).__init__(*args, **kwargs)
+ self._data_changed = False
+ self._gather_text_refs = True
+
+ def _copy_inventory_texts(self):
+ source_vf, target_vf = self._build_vfs('inventory', True, True)
+ self._copy_stream(source_vf, target_vf, self.revision_keys,
+ 'inventories', self._get_filtered_inv_stream, 2)
+ if source_vf.keys() != self.revision_keys:
+ self._data_changed = True
+
+ def _copy_text_texts(self):
+ """generate what texts we should have and then copy."""
+ source_vf, target_vf = self._build_vfs('text', True, True)
+ trace.mutter('repacking %d texts', len(self._text_refs))
+ self.pb.update("repacking texts", 4)
+ # we have three major tasks here:
+ # 1) generate the ideal index
+ repo = self._pack_collection.repo
+ # We want the one we just wrote, so base it on self.new_pack
+ revision_vf = self._build_vf('revision', True, False, for_write=True)
+ ancestor_keys = revision_vf.get_parent_map(revision_vf.keys())
+ # Strip keys back into revision_ids.
+ ancestors = dict((k[0], tuple([p[0] for p in parents]))
+ for k, parents in ancestor_keys.iteritems())
+ del ancestor_keys
+ # TODO: _generate_text_key_index should be much cheaper to generate from
+ # a chk repository, rather than the current implementation
+ ideal_index = repo._generate_text_key_index(None, ancestors)
+ file_id_parent_map = source_vf.get_parent_map(self._text_refs)
+ # 2) generate a keys list that contains all the entries that can
+ # be used as-is, with corrected parents.
+ ok_keys = []
+ new_parent_keys = {} # (key, parent_keys)
+ discarded_keys = []
+ NULL_REVISION = _mod_revision.NULL_REVISION
+ for key in self._text_refs:
+ # 0 - index
+ # 1 - key
+ # 2 - value
+ # 3 - refs
+ try:
+ ideal_parents = tuple(ideal_index[key])
+ except KeyError:
+ discarded_keys.append(key)
+ self._data_changed = True
+ else:
+ if ideal_parents == (NULL_REVISION,):
+ ideal_parents = ()
+ source_parents = file_id_parent_map[key]
+ if ideal_parents == source_parents:
+ # no change needed.
+ ok_keys.append(key)
+ else:
+ # We need to change the parent graph, but we don't need to
+ # re-insert the text (since we don't pun the compression
+ # parent with the parents list)
+ self._data_changed = True
+ new_parent_keys[key] = ideal_parents
+ # we're finished with some data.
+ del ideal_index
+ del file_id_parent_map
+ # 3) bulk copy the data, updating records than need it
+ def _update_parents_for_texts():
+ stream = source_vf.get_record_stream(self._text_refs,
+ 'groupcompress', False)
+ for record in stream:
+ if record.key in new_parent_keys:
+ record.parents = new_parent_keys[record.key]
+ yield record
+ target_vf.insert_record_stream(_update_parents_for_texts())
+
+ def _use_pack(self, new_pack):
+ """Override _use_pack to check for reconcile having changed content."""
+ return new_pack.data_inserted() and self._data_changed
+
+
+class GCCHKCanonicalizingPacker(GCCHKPacker):
+ """A packer that ensures inventories have canonical-form CHK maps.
+
+ Ideally this would be part of reconcile, but it's very slow and rarely
+ needed. (It repairs repositories affected by
+ https://bugs.launchpad.net/bzr/+bug/522637).
+ """
+
+ def __init__(self, *args, **kwargs):
+ super(GCCHKCanonicalizingPacker, self).__init__(*args, **kwargs)
+ self._data_changed = False
+
+ def _exhaust_stream(self, source_vf, keys, message, vf_to_stream, pb_offset):
+ """Create and exhaust a stream, but don't insert it.
+
+ This is useful to get the side-effects of generating a stream.
+ """
+ self.pb.update('scanning %s' % (message,), pb_offset)
+ child_pb = ui.ui_factory.nested_progress_bar()
+ try:
+ list(vf_to_stream(source_vf, keys, message, child_pb))
+ finally:
+ child_pb.finished()
+
+ def _copy_inventory_texts(self):
+ source_vf, target_vf = self._build_vfs('inventory', True, True)
+ source_chk_vf, target_chk_vf = self._get_chk_vfs_for_copy()
+ inventory_keys = source_vf.keys()
+ # First, copy the existing CHKs on the assumption that most of them
+ # will be correct. This will save us from having to reinsert (and
+ # recompress) these records later at the cost of perhaps preserving a
+ # few unused CHKs.
+ # (Iterate but don't insert _get_filtered_inv_stream to populate the
+ # variables needed by GCCHKPacker._copy_chk_texts.)
+ self._exhaust_stream(source_vf, inventory_keys, 'inventories',
+ self._get_filtered_inv_stream, 2)
+ GCCHKPacker._copy_chk_texts(self)
+ # Now copy and fix the inventories, and any regenerated CHKs.
+ def chk_canonicalizing_inv_stream(source_vf, keys, message, pb=None):
+ return self._get_filtered_canonicalizing_inv_stream(
+ source_vf, keys, message, pb, source_chk_vf, target_chk_vf)
+ self._copy_stream(source_vf, target_vf, inventory_keys,
+ 'inventories', chk_canonicalizing_inv_stream, 4)
+
+ def _copy_chk_texts(self):
+ # No-op; in this class this happens during _copy_inventory_texts.
+ pass
+
+ def _get_filtered_canonicalizing_inv_stream(self, source_vf, keys, message,
+ pb=None, source_chk_vf=None, target_chk_vf=None):
+ """Filter the texts of inventories, regenerating CHKs to make sure they
+ are canonical.
+ """
+ total_keys = len(keys)
+ target_chk_vf = versionedfile.NoDupeAddLinesDecorator(target_chk_vf)
+ def _filtered_inv_stream():
+ stream = source_vf.get_record_stream(keys, 'groupcompress', True)
+ search_key_name = None
+ for idx, record in enumerate(stream):
+ # Inventories should always be with revisions; assume success.
+ bytes = record.get_bytes_as('fulltext')
+ chk_inv = inventory.CHKInventory.deserialise(
+ source_chk_vf, bytes, record.key)
+ if pb is not None:
+ pb.update('inv', idx, total_keys)
+ chk_inv.id_to_entry._ensure_root()
+ if search_key_name is None:
+ # Find the name corresponding to the search_key_func
+ search_key_reg = chk_map.search_key_registry
+ for search_key_name, func in search_key_reg.iteritems():
+ if func == chk_inv.id_to_entry._search_key_func:
+ break
+ canonical_inv = inventory.CHKInventory.from_inventory(
+ target_chk_vf, chk_inv,
+ maximum_size=chk_inv.id_to_entry._root_node._maximum_size,
+ search_key_name=search_key_name)
+ if chk_inv.id_to_entry.key() != canonical_inv.id_to_entry.key():
+ trace.mutter(
+ 'Non-canonical CHK map for id_to_entry of inv: %s '
+ '(root is %s, should be %s)' % (chk_inv.revision_id,
+ chk_inv.id_to_entry.key()[0],
+ canonical_inv.id_to_entry.key()[0]))
+ self._data_changed = True
+ p_id_map = chk_inv.parent_id_basename_to_file_id
+ p_id_map._ensure_root()
+ canon_p_id_map = canonical_inv.parent_id_basename_to_file_id
+ if p_id_map.key() != canon_p_id_map.key():
+ trace.mutter(
+ 'Non-canonical CHK map for parent_id_to_basename of '
+ 'inv: %s (root is %s, should be %s)'
+ % (chk_inv.revision_id, p_id_map.key()[0],
+ canon_p_id_map.key()[0]))
+ self._data_changed = True
+ yield versionedfile.ChunkedContentFactory(record.key,
+ record.parents, record.sha1,
+ canonical_inv.to_lines())
+ # We have finished processing all of the inventory records, we
+ # don't need these sets anymore
+ return _filtered_inv_stream()
+
+ def _use_pack(self, new_pack):
+ """Override _use_pack to check for reconcile having changed content."""
+ return new_pack.data_inserted() and self._data_changed
+
+
+class GCRepositoryPackCollection(RepositoryPackCollection):
+
+ pack_factory = GCPack
+ resumed_pack_factory = ResumedGCPack
+ normal_packer_class = GCCHKPacker
+ optimising_packer_class = GCCHKPacker
+
+ def _check_new_inventories(self):
+ """Detect missing inventories or chk root entries for the new revisions
+ in this write group.
+
+ :returns: list of strs, summarising any problems found. If the list is
+ empty no problems were found.
+ """
+ # Ensure that all revisions added in this write group have:
+ # - corresponding inventories,
+ # - chk root entries for those inventories,
+ # - and any present parent inventories have their chk root
+ # entries too.
+ # And all this should be independent of any fallback repository.
+ problems = []
+ key_deps = self.repo.revisions._index._key_dependencies
+ new_revisions_keys = key_deps.get_new_keys()
+ no_fallback_inv_index = self.repo.inventories._index
+ no_fallback_chk_bytes_index = self.repo.chk_bytes._index
+ no_fallback_texts_index = self.repo.texts._index
+ inv_parent_map = no_fallback_inv_index.get_parent_map(
+ new_revisions_keys)
+ # Are any inventories for corresponding to the new revisions missing?
+ corresponding_invs = set(inv_parent_map)
+ missing_corresponding = set(new_revisions_keys)
+ missing_corresponding.difference_update(corresponding_invs)
+ if missing_corresponding:
+ problems.append("inventories missing for revisions %s" %
+ (sorted(missing_corresponding),))
+ return problems
+ # Are any chk root entries missing for any inventories? This includes
+ # any present parent inventories, which may be used when calculating
+ # deltas for streaming.
+ all_inv_keys = set(corresponding_invs)
+ for parent_inv_keys in inv_parent_map.itervalues():
+ all_inv_keys.update(parent_inv_keys)
+ # Filter out ghost parents.
+ all_inv_keys.intersection_update(
+ no_fallback_inv_index.get_parent_map(all_inv_keys))
+ parent_invs_only_keys = all_inv_keys.symmetric_difference(
+ corresponding_invs)
+ all_missing = set()
+ inv_ids = [key[-1] for key in all_inv_keys]
+ parent_invs_only_ids = [key[-1] for key in parent_invs_only_keys]
+ root_key_info = _build_interesting_key_sets(
+ self.repo, inv_ids, parent_invs_only_ids)
+ expected_chk_roots = root_key_info.all_keys()
+ present_chk_roots = no_fallback_chk_bytes_index.get_parent_map(
+ expected_chk_roots)
+ missing_chk_roots = expected_chk_roots.difference(present_chk_roots)
+ if missing_chk_roots:
+ problems.append("missing referenced chk root keys: %s"
+ % (sorted(missing_chk_roots),))
+ # Don't bother checking any further.
+ return problems
+ # Find all interesting chk_bytes records, and make sure they are
+ # present, as well as the text keys they reference.
+ chk_bytes_no_fallbacks = self.repo.chk_bytes.without_fallbacks()
+ chk_bytes_no_fallbacks._search_key_func = \
+ self.repo.chk_bytes._search_key_func
+ chk_diff = chk_map.iter_interesting_nodes(
+ chk_bytes_no_fallbacks, root_key_info.interesting_root_keys,
+ root_key_info.uninteresting_root_keys)
+ text_keys = set()
+ try:
+ for record in _filter_text_keys(chk_diff, text_keys,
+ chk_map._bytes_to_text_key):
+ pass
+ except errors.NoSuchRevision, e:
+ # XXX: It would be nice if we could give a more precise error here.
+ problems.append("missing chk node(s) for id_to_entry maps")
+ chk_diff = chk_map.iter_interesting_nodes(
+ chk_bytes_no_fallbacks, root_key_info.interesting_pid_root_keys,
+ root_key_info.uninteresting_pid_root_keys)
+ try:
+ for interesting_rec, interesting_map in chk_diff:
+ pass
+ except errors.NoSuchRevision, e:
+ problems.append(
+ "missing chk node(s) for parent_id_basename_to_file_id maps")
+ present_text_keys = no_fallback_texts_index.get_parent_map(text_keys)
+ missing_text_keys = text_keys.difference(present_text_keys)
+ if missing_text_keys:
+ problems.append("missing text keys: %r"
+ % (sorted(missing_text_keys),))
+ return problems
+
+
+class CHKInventoryRepository(PackRepository):
+ """subclass of PackRepository that uses CHK based inventories."""
+
+ def __init__(self, _format, a_bzrdir, control_files, _commit_builder_class,
+ _serializer):
+ """Overridden to change pack collection class."""
+ super(CHKInventoryRepository, self).__init__(_format, a_bzrdir,
+ control_files, _commit_builder_class, _serializer)
+ index_transport = self._transport.clone('indices')
+ self._pack_collection = GCRepositoryPackCollection(self,
+ self._transport, index_transport,
+ self._transport.clone('upload'),
+ self._transport.clone('packs'),
+ _format.index_builder_class,
+ _format.index_class,
+ use_chk_index=self._format.supports_chks,
+ )
+ self.inventories = GroupCompressVersionedFiles(
+ _GCGraphIndex(self._pack_collection.inventory_index.combined_index,
+ add_callback=self._pack_collection.inventory_index.add_callback,
+ parents=True, is_locked=self.is_locked,
+ inconsistency_fatal=False),
+ access=self._pack_collection.inventory_index.data_access)
+ self.revisions = GroupCompressVersionedFiles(
+ _GCGraphIndex(self._pack_collection.revision_index.combined_index,
+ add_callback=self._pack_collection.revision_index.add_callback,
+ parents=True, is_locked=self.is_locked,
+ track_external_parent_refs=True, track_new_keys=True),
+ access=self._pack_collection.revision_index.data_access,
+ delta=False)
+ self.signatures = GroupCompressVersionedFiles(
+ _GCGraphIndex(self._pack_collection.signature_index.combined_index,
+ add_callback=self._pack_collection.signature_index.add_callback,
+ parents=False, is_locked=self.is_locked,
+ inconsistency_fatal=False),
+ access=self._pack_collection.signature_index.data_access,
+ delta=False)
+ self.texts = GroupCompressVersionedFiles(
+ _GCGraphIndex(self._pack_collection.text_index.combined_index,
+ add_callback=self._pack_collection.text_index.add_callback,
+ parents=True, is_locked=self.is_locked,
+ inconsistency_fatal=False),
+ access=self._pack_collection.text_index.data_access)
+ # No parents, individual CHK pages don't have specific ancestry
+ self.chk_bytes = GroupCompressVersionedFiles(
+ _GCGraphIndex(self._pack_collection.chk_index.combined_index,
+ add_callback=self._pack_collection.chk_index.add_callback,
+ parents=False, is_locked=self.is_locked,
+ inconsistency_fatal=False),
+ access=self._pack_collection.chk_index.data_access)
+ search_key_name = self._format._serializer.search_key_name
+ search_key_func = chk_map.search_key_registry.get(search_key_name)
+ self.chk_bytes._search_key_func = search_key_func
+ # True when the repository object is 'write locked' (as opposed to the
+ # physical lock only taken out around changes to the pack-names list.)
+ # Another way to represent this would be a decorator around the control
+ # files object that presents logical locks as physical ones - if this
+ # gets ugly consider that alternative design. RBC 20071011
+ self._write_lock_count = 0
+ self._transaction = None
+ # for tests
+ self._reconcile_does_inventory_gc = True
+ self._reconcile_fixes_text_parents = True
+ self._reconcile_backsup_inventory = False
+
+ def _add_inventory_checked(self, revision_id, inv, parents):
+ """Add inv to the repository after checking the inputs.
+
+ This function can be overridden to allow different inventory styles.
+
+ :seealso: add_inventory, for the contract.
+ """
+ # make inventory
+ serializer = self._format._serializer
+ result = inventory.CHKInventory.from_inventory(self.chk_bytes, inv,
+ maximum_size=serializer.maximum_size,
+ search_key_name=serializer.search_key_name)
+ inv_lines = result.to_lines()
+ return self._inventory_add_lines(revision_id, parents,
+ inv_lines, check_content=False)
+
+ def _create_inv_from_null(self, delta, revision_id):
+ """This will mutate new_inv directly.
+
+ This is a simplified form of create_by_apply_delta which knows that all
+ the old values must be None, so everything is a create.
+ """
+ serializer = self._format._serializer
+ new_inv = inventory.CHKInventory(serializer.search_key_name)
+ new_inv.revision_id = revision_id
+ entry_to_bytes = new_inv._entry_to_bytes
+ id_to_entry_dict = {}
+ parent_id_basename_dict = {}
+ for old_path, new_path, file_id, entry in delta:
+ if old_path is not None:
+ raise ValueError('Invalid delta, somebody tried to delete %r'
+ ' from the NULL_REVISION'
+ % ((old_path, file_id),))
+ if new_path is None:
+ raise ValueError('Invalid delta, delta from NULL_REVISION has'
+ ' no new_path %r' % (file_id,))
+ if new_path == '':
+ new_inv.root_id = file_id
+ parent_id_basename_key = StaticTuple('', '').intern()
+ else:
+ utf8_entry_name = entry.name.encode('utf-8')
+ parent_id_basename_key = StaticTuple(entry.parent_id,
+ utf8_entry_name).intern()
+ new_value = entry_to_bytes(entry)
+ # Populate Caches?
+ # new_inv._path_to_fileid_cache[new_path] = file_id
+ key = StaticTuple(file_id).intern()
+ id_to_entry_dict[key] = new_value
+ parent_id_basename_dict[parent_id_basename_key] = file_id
+
+ new_inv._populate_from_dicts(self.chk_bytes, id_to_entry_dict,
+ parent_id_basename_dict, maximum_size=serializer.maximum_size)
+ return new_inv
+
+ def add_inventory_by_delta(self, basis_revision_id, delta, new_revision_id,
+ parents, basis_inv=None, propagate_caches=False):
+ """Add a new inventory expressed as a delta against another revision.
+
+ :param basis_revision_id: The inventory id the delta was created
+ against.
+ :param delta: The inventory delta (see Inventory.apply_delta for
+ details).
+ :param new_revision_id: The revision id that the inventory is being
+ added for.
+ :param parents: The revision ids of the parents that revision_id is
+ known to have and are in the repository already. These are supplied
+ for repositories that depend on the inventory graph for revision
+ graph access, as well as for those that pun ancestry with delta
+ compression.
+ :param basis_inv: The basis inventory if it is already known,
+ otherwise None.
+ :param propagate_caches: If True, the caches for this inventory are
+ copied to and updated for the result if possible.
+
+ :returns: (validator, new_inv)
+ The validator(which is a sha1 digest, though what is sha'd is
+ repository format specific) of the serialized inventory, and the
+ resulting inventory.
+ """
+ if not self.is_in_write_group():
+ raise AssertionError("%r not in write group" % (self,))
+ _mod_revision.check_not_reserved_id(new_revision_id)
+ basis_tree = None
+ if basis_inv is None:
+ if basis_revision_id == _mod_revision.NULL_REVISION:
+ new_inv = self._create_inv_from_null(delta, new_revision_id)
+ if new_inv.root_id is None:
+ raise errors.RootMissing()
+ inv_lines = new_inv.to_lines()
+ return self._inventory_add_lines(new_revision_id, parents,
+ inv_lines, check_content=False), new_inv
+ else:
+ basis_tree = self.revision_tree(basis_revision_id)
+ basis_tree.lock_read()
+ basis_inv = basis_tree.root_inventory
+ try:
+ result = basis_inv.create_by_apply_delta(delta, new_revision_id,
+ propagate_caches=propagate_caches)
+ inv_lines = result.to_lines()
+ return self._inventory_add_lines(new_revision_id, parents,
+ inv_lines, check_content=False), result
+ finally:
+ if basis_tree is not None:
+ basis_tree.unlock()
+
+ def _deserialise_inventory(self, revision_id, bytes):
+ return inventory.CHKInventory.deserialise(self.chk_bytes, bytes,
+ (revision_id,))
+
+ def _iter_inventories(self, revision_ids, ordering):
+ """Iterate over many inventory objects."""
+ if ordering is None:
+ ordering = 'unordered'
+ keys = [(revision_id,) for revision_id in revision_ids]
+ stream = self.inventories.get_record_stream(keys, ordering, True)
+ texts = {}
+ for record in stream:
+ if record.storage_kind != 'absent':
+ texts[record.key] = record.get_bytes_as('fulltext')
+ else:
+ texts[record.key] = None
+ for key in keys:
+ bytes = texts[key]
+ if bytes is None:
+ yield (None, key[-1])
+ else:
+ yield (inventory.CHKInventory.deserialise(
+ self.chk_bytes, bytes, key), key[-1])
+
+ def _get_inventory_xml(self, revision_id):
+ """Get serialized inventory as a string."""
+ # Without a native 'xml' inventory, this method doesn't make sense.
+ # However older working trees, and older bundles want it - so we supply
+ # it allowing _get_inventory_xml to work. Bundles currently use the
+ # serializer directly; this also isn't ideal, but there isn't an xml
+ # iteration interface offered at all for repositories.
+ return self._serializer.write_inventory_to_string(
+ self.get_inventory(revision_id))
+
+ def _find_present_inventory_keys(self, revision_keys):
+ parent_map = self.inventories.get_parent_map(revision_keys)
+ present_inventory_keys = set(k for k in parent_map)
+ return present_inventory_keys
+
+ def fileids_altered_by_revision_ids(self, revision_ids, _inv_weave=None):
+ """Find the file ids and versions affected by revisions.
+
+ :param revisions: an iterable containing revision ids.
+ :param _inv_weave: The inventory weave from this repository or None.
+ If None, the inventory weave will be opened automatically.
+ :return: a dictionary mapping altered file-ids to an iterable of
+ revision_ids. Each altered file-ids has the exact revision_ids that
+ altered it listed explicitly.
+ """
+ rich_root = self.supports_rich_root()
+ bytes_to_info = inventory.CHKInventory._bytes_to_utf8name_key
+ file_id_revisions = {}
+ pb = ui.ui_factory.nested_progress_bar()
+ try:
+ revision_keys = [(r,) for r in revision_ids]
+ parent_keys = self._find_parent_keys_of_revisions(revision_keys)
+ # TODO: instead of using _find_present_inventory_keys, change the
+ # code paths to allow missing inventories to be tolerated.
+ # However, we only want to tolerate missing parent
+ # inventories, not missing inventories for revision_ids
+ present_parent_inv_keys = self._find_present_inventory_keys(
+ parent_keys)
+ present_parent_inv_ids = set(
+ [k[-1] for k in present_parent_inv_keys])
+ inventories_to_read = set(revision_ids)
+ inventories_to_read.update(present_parent_inv_ids)
+ root_key_info = _build_interesting_key_sets(
+ self, inventories_to_read, present_parent_inv_ids)
+ interesting_root_keys = root_key_info.interesting_root_keys
+ uninteresting_root_keys = root_key_info.uninteresting_root_keys
+ chk_bytes = self.chk_bytes
+ for record, items in chk_map.iter_interesting_nodes(chk_bytes,
+ interesting_root_keys, uninteresting_root_keys,
+ pb=pb):
+ for name, bytes in items:
+ (name_utf8, file_id, revision_id) = bytes_to_info(bytes)
+ # TODO: consider interning file_id, revision_id here, or
+ # pushing that intern() into bytes_to_info()
+ # TODO: rich_root should always be True here, for all
+ # repositories that support chk_bytes
+ if not rich_root and name_utf8 == '':
+ continue
+ try:
+ file_id_revisions[file_id].add(revision_id)
+ except KeyError:
+ file_id_revisions[file_id] = set([revision_id])
+ finally:
+ pb.finished()
+ return file_id_revisions
+
+ def find_text_key_references(self):
+ """Find the text key references within the repository.
+
+ :return: A dictionary mapping text keys ((fileid, revision_id) tuples)
+ to whether they were referred to by the inventory of the
+ revision_id that they contain. The inventory texts from all present
+ revision ids are assessed to generate this report.
+ """
+ # XXX: Slow version but correct: rewrite as a series of delta
+ # examinations/direct tree traversal. Note that that will require care
+ # as a common node is reachable both from the inventory that added it,
+ # and others afterwards.
+ revision_keys = self.revisions.keys()
+ result = {}
+ rich_roots = self.supports_rich_root()
+ pb = ui.ui_factory.nested_progress_bar()
+ try:
+ all_revs = self.all_revision_ids()
+ total = len(all_revs)
+ for pos, inv in enumerate(self.iter_inventories(all_revs)):
+ pb.update("Finding text references", pos, total)
+ for _, entry in inv.iter_entries():
+ if not rich_roots and entry.file_id == inv.root_id:
+ continue
+ key = (entry.file_id, entry.revision)
+ result.setdefault(key, False)
+ if entry.revision == inv.revision_id:
+ result[key] = True
+ return result
+ finally:
+ pb.finished()
+
+ @needs_write_lock
+ def reconcile_canonicalize_chks(self):
+ """Reconcile this repository to make sure all CHKs are in canonical
+ form.
+ """
+ from bzrlib.reconcile import PackReconciler
+ reconciler = PackReconciler(self, thorough=True, canonicalize_chks=True)
+ reconciler.reconcile()
+ return reconciler
+
+ def _reconcile_pack(self, collection, packs, extension, revs, pb):
+ packer = GCCHKReconcilePacker(collection, packs, extension)
+ return packer.pack(pb)
+
+ def _canonicalize_chks_pack(self, collection, packs, extension, revs, pb):
+ packer = GCCHKCanonicalizingPacker(collection, packs, extension, revs)
+ return packer.pack(pb)
+
+ def _get_source(self, to_format):
+ """Return a source for streaming from this repository."""
+ if self._format._serializer == to_format._serializer:
+ # We must be exactly the same format, otherwise stuff like the chk
+ # page layout might be different.
+ # Actually, this test is just slightly looser than exact so that
+ # CHK2 <-> 2a transfers will work.
+ return GroupCHKStreamSource(self, to_format)
+ return super(CHKInventoryRepository, self)._get_source(to_format)
+
+ def _find_inconsistent_revision_parents(self, revisions_iterator=None):
+ """Find revisions with different parent lists in the revision object
+ and in the index graph.
+
+ :param revisions_iterator: None, or an iterator of (revid,
+ Revision-or-None). This iterator controls the revisions checked.
+ :returns: an iterator yielding tuples of (revison-id, parents-in-index,
+ parents-in-revision).
+ """
+ if not self.is_locked():
+ raise AssertionError()
+ vf = self.revisions
+ if revisions_iterator is None:
+ revisions_iterator = self._iter_revisions(None)
+ for revid, revision in revisions_iterator:
+ if revision is None:
+ pass
+ parent_map = vf.get_parent_map([(revid,)])
+ parents_according_to_index = tuple(parent[-1] for parent in
+ parent_map[(revid,)])
+ parents_according_to_revision = tuple(revision.parent_ids)
+ if parents_according_to_index != parents_according_to_revision:
+ yield (revid, parents_according_to_index,
+ parents_according_to_revision)
+
+ def _check_for_inconsistent_revision_parents(self):
+ inconsistencies = list(self._find_inconsistent_revision_parents())
+ if inconsistencies:
+ raise errors.BzrCheckError(
+ "Revision index has inconsistent parents.")
+
+
+class GroupCHKStreamSource(StreamSource):
+ """Used when both the source and target repo are GroupCHK repos."""
+
+ def __init__(self, from_repository, to_format):
+ """Create a StreamSource streaming from from_repository."""
+ super(GroupCHKStreamSource, self).__init__(from_repository, to_format)
+ self._revision_keys = None
+ self._text_keys = None
+ self._text_fetch_order = 'groupcompress'
+ self._chk_id_roots = None
+ self._chk_p_id_roots = None
+
+ def _get_inventory_stream(self, inventory_keys, allow_absent=False):
+ """Get a stream of inventory texts.
+
+ When this function returns, self._chk_id_roots and self._chk_p_id_roots
+ should be populated.
+ """
+ self._chk_id_roots = []
+ self._chk_p_id_roots = []
+ def _filtered_inv_stream():
+ id_roots_set = set()
+ p_id_roots_set = set()
+ source_vf = self.from_repository.inventories
+ stream = source_vf.get_record_stream(inventory_keys,
+ 'groupcompress', True)
+ for record in stream:
+ if record.storage_kind == 'absent':
+ if allow_absent:
+ continue
+ else:
+ raise errors.NoSuchRevision(self, record.key)
+ bytes = record.get_bytes_as('fulltext')
+ chk_inv = inventory.CHKInventory.deserialise(None, bytes,
+ record.key)
+ key = chk_inv.id_to_entry.key()
+ if key not in id_roots_set:
+ self._chk_id_roots.append(key)
+ id_roots_set.add(key)
+ p_id_map = chk_inv.parent_id_basename_to_file_id
+ if p_id_map is None:
+ raise AssertionError('Parent id -> file_id map not set')
+ key = p_id_map.key()
+ if key not in p_id_roots_set:
+ p_id_roots_set.add(key)
+ self._chk_p_id_roots.append(key)
+ yield record
+ # We have finished processing all of the inventory records, we
+ # don't need these sets anymore
+ id_roots_set.clear()
+ p_id_roots_set.clear()
+ return ('inventories', _filtered_inv_stream())
+
+ def _get_filtered_chk_streams(self, excluded_revision_keys):
+ self._text_keys = set()
+ excluded_revision_keys.discard(_mod_revision.NULL_REVISION)
+ if not excluded_revision_keys:
+ uninteresting_root_keys = set()
+ uninteresting_pid_root_keys = set()
+ else:
+ # filter out any excluded revisions whose inventories are not
+ # actually present
+ # TODO: Update Repository.iter_inventories() to add
+ # ignore_missing=True
+ present_keys = self.from_repository._find_present_inventory_keys(
+ excluded_revision_keys)
+ present_ids = [k[-1] for k in present_keys]
+ uninteresting_root_keys = set()
+ uninteresting_pid_root_keys = set()
+ for inv in self.from_repository.iter_inventories(present_ids):
+ uninteresting_root_keys.add(inv.id_to_entry.key())
+ uninteresting_pid_root_keys.add(
+ inv.parent_id_basename_to_file_id.key())
+ chk_bytes = self.from_repository.chk_bytes
+ def _filter_id_to_entry():
+ interesting_nodes = chk_map.iter_interesting_nodes(chk_bytes,
+ self._chk_id_roots, uninteresting_root_keys)
+ for record in _filter_text_keys(interesting_nodes, self._text_keys,
+ chk_map._bytes_to_text_key):
+ if record is not None:
+ yield record
+ # Consumed
+ self._chk_id_roots = None
+ yield 'chk_bytes', _filter_id_to_entry()
+ def _get_parent_id_basename_to_file_id_pages():
+ for record, items in chk_map.iter_interesting_nodes(chk_bytes,
+ self._chk_p_id_roots, uninteresting_pid_root_keys):
+ if record is not None:
+ yield record
+ # Consumed
+ self._chk_p_id_roots = None
+ yield 'chk_bytes', _get_parent_id_basename_to_file_id_pages()
+
+ def _get_text_stream(self):
+ # Note: We know we don't have to handle adding root keys, because both
+ # the source and target are the identical network name.
+ text_stream = self.from_repository.texts.get_record_stream(
+ self._text_keys, self._text_fetch_order, False)
+ return ('texts', text_stream)
+
+ def get_stream(self, search):
+ def wrap_and_count(pb, rc, stream):
+ """Yield records from stream while showing progress."""
+ count = 0
+ for record in stream:
+ if count == rc.STEP:
+ rc.increment(count)
+ pb.update('Estimate', rc.current, rc.max)
+ count = 0
+ count += 1
+ yield record
+
+ revision_ids = search.get_keys()
+ pb = ui.ui_factory.nested_progress_bar()
+ rc = self._record_counter
+ self._record_counter.setup(len(revision_ids))
+ for stream_info in self._fetch_revision_texts(revision_ids):
+ yield (stream_info[0],
+ wrap_and_count(pb, rc, stream_info[1]))
+ self._revision_keys = [(rev_id,) for rev_id in revision_ids]
+ # TODO: The keys to exclude might be part of the search recipe
+ # For now, exclude all parents that are at the edge of ancestry, for
+ # which we have inventories
+ from_repo = self.from_repository
+ parent_keys = from_repo._find_parent_keys_of_revisions(
+ self._revision_keys)
+ self.from_repository.revisions.clear_cache()
+ self.from_repository.signatures.clear_cache()
+ # Clear the repo's get_parent_map cache too.
+ self.from_repository._unstacked_provider.disable_cache()
+ self.from_repository._unstacked_provider.enable_cache()
+ s = self._get_inventory_stream(self._revision_keys)
+ yield (s[0], wrap_and_count(pb, rc, s[1]))
+ self.from_repository.inventories.clear_cache()
+ for stream_info in self._get_filtered_chk_streams(parent_keys):
+ yield (stream_info[0], wrap_and_count(pb, rc, stream_info[1]))
+ self.from_repository.chk_bytes.clear_cache()
+ s = self._get_text_stream()
+ yield (s[0], wrap_and_count(pb, rc, s[1]))
+ self.from_repository.texts.clear_cache()
+ pb.update('Done', rc.max, rc.max)
+ pb.finished()
+
+ def get_stream_for_missing_keys(self, missing_keys):
+ # missing keys can only occur when we are byte copying and not
+ # translating (because translation means we don't send
+ # unreconstructable deltas ever).
+ missing_inventory_keys = set()
+ for key in missing_keys:
+ if key[0] != 'inventories':
+ raise AssertionError('The only missing keys we should'
+ ' be filling in are inventory keys, not %s'
+ % (key[0],))
+ missing_inventory_keys.add(key[1:])
+ if self._chk_id_roots or self._chk_p_id_roots:
+ raise AssertionError('Cannot call get_stream_for_missing_keys'
+ ' until all of get_stream() has been consumed.')
+ # Yield the inventory stream, so we can find the chk stream
+ # Some of the missing_keys will be missing because they are ghosts.
+ # As such, we can ignore them. The Sink is required to verify there are
+ # no unavailable texts when the ghost inventories are not filled in.
+ yield self._get_inventory_stream(missing_inventory_keys,
+ allow_absent=True)
+ # We use the empty set for excluded_revision_keys, to make it clear
+ # that we want to transmit all referenced chk pages.
+ for stream_info in self._get_filtered_chk_streams(set()):
+ yield stream_info
+
+
+class _InterestingKeyInfo(object):
+ def __init__(self):
+ self.interesting_root_keys = set()
+ self.interesting_pid_root_keys = set()
+ self.uninteresting_root_keys = set()
+ self.uninteresting_pid_root_keys = set()
+
+ def all_interesting(self):
+ return self.interesting_root_keys.union(self.interesting_pid_root_keys)
+
+ def all_uninteresting(self):
+ return self.uninteresting_root_keys.union(
+ self.uninteresting_pid_root_keys)
+
+ def all_keys(self):
+ return self.all_interesting().union(self.all_uninteresting())
+
+
+def _build_interesting_key_sets(repo, inventory_ids, parent_only_inv_ids):
+ result = _InterestingKeyInfo()
+ for inv in repo.iter_inventories(inventory_ids, 'unordered'):
+ root_key = inv.id_to_entry.key()
+ pid_root_key = inv.parent_id_basename_to_file_id.key()
+ if inv.revision_id in parent_only_inv_ids:
+ result.uninteresting_root_keys.add(root_key)
+ result.uninteresting_pid_root_keys.add(pid_root_key)
+ else:
+ result.interesting_root_keys.add(root_key)
+ result.interesting_pid_root_keys.add(pid_root_key)
+ return result
+
+
+def _filter_text_keys(interesting_nodes_iterable, text_keys, bytes_to_text_key):
+ """Iterate the result of iter_interesting_nodes, yielding the records
+ and adding to text_keys.
+ """
+ text_keys_update = text_keys.update
+ for record, items in interesting_nodes_iterable:
+ text_keys_update([bytes_to_text_key(b) for n,b in items])
+ yield record
+
+
+class RepositoryFormat2a(RepositoryFormatPack):
+ """A CHK repository that uses the bencode revision serializer."""
+
+ repository_class = CHKInventoryRepository
+ supports_external_lookups = True
+ supports_chks = True
+ _commit_builder_class = PackRootCommitBuilder
+ rich_root_data = True
+ _serializer = chk_serializer.chk_bencode_serializer
+ _commit_inv_deltas = True
+ # What index classes to use
+ index_builder_class = BTreeBuilder
+ index_class = BTreeGraphIndex
+ # Note: We cannot unpack a delta that references a text we haven't
+ # seen yet. There are 2 options, work in fulltexts, or require
+ # topological sorting. Using fulltexts is more optimal for local
+ # operations, because the source can be smart about extracting
+ # multiple in-a-row (and sharing strings). Topological is better
+ # for remote, because we access less data.
+ _fetch_order = 'unordered'
+ _fetch_uses_deltas = False # essentially ignored by the groupcompress code.
+ fast_deltas = True
+ pack_compresses = True
+
+ def _get_matching_bzrdir(self):
+ return controldir.format_registry.make_bzrdir('2a')
+
+ def _ignore_setting_bzrdir(self, format):
+ pass
+
+ _matchingbzrdir = property(_get_matching_bzrdir, _ignore_setting_bzrdir)
+
+ @classmethod
+ def get_format_string(cls):
+ return ('Bazaar repository format 2a (needs bzr 1.16 or later)\n')
+
+ def get_format_description(self):
+ """See RepositoryFormat.get_format_description()."""
+ return ("Repository format 2a - rich roots, group compression"
+ " and chk inventories")
+
+
+class RepositoryFormat2aSubtree(RepositoryFormat2a):
+ """A 2a repository format that supports nested trees.
+
+ """
+
+ def _get_matching_bzrdir(self):
+ return controldir.format_registry.make_bzrdir('development-subtree')
+
+ def _ignore_setting_bzrdir(self, format):
+ pass
+
+ _matchingbzrdir = property(_get_matching_bzrdir, _ignore_setting_bzrdir)
+
+ @classmethod
+ def get_format_string(cls):
+ return ('Bazaar development format 8\n')
+
+ def get_format_description(self):
+ """See RepositoryFormat.get_format_description()."""
+ return ("Development repository format 8 - nested trees, "
+ "group compression and chk inventories")
+
+ experimental = True
+ supports_tree_reference = True
diff --git a/bzrlib/repofmt/knitpack_repo.py b/bzrlib/repofmt/knitpack_repo.py
new file mode 100644
index 0000000..15a819d
--- /dev/null
+++ b/bzrlib/repofmt/knitpack_repo.py
@@ -0,0 +1,1156 @@
+# Copyright (C) 2007-2011 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""Knit-based pack repository formats."""
+
+from __future__ import absolute_import
+
+from bzrlib.lazy_import import lazy_import
+lazy_import(globals(), """
+from itertools import izip
+import time
+
+from bzrlib import (
+ controldir,
+ debug,
+ errors,
+ knit,
+ osutils,
+ pack,
+ revision as _mod_revision,
+ trace,
+ tsort,
+ ui,
+ xml5,
+ xml6,
+ xml7,
+ )
+from bzrlib.knit import (
+ _KnitGraphIndex,
+ KnitPlainFactory,
+ KnitVersionedFiles,
+ )
+""")
+
+from bzrlib import (
+ btree_index,
+ )
+from bzrlib.index import (
+ CombinedGraphIndex,
+ GraphIndex,
+ GraphIndexPrefixAdapter,
+ InMemoryGraphIndex,
+ )
+from bzrlib.repofmt.knitrepo import (
+ KnitRepository,
+ )
+from bzrlib.repofmt.pack_repo import (
+ _DirectPackAccess,
+ NewPack,
+ RepositoryFormatPack,
+ ResumedPack,
+ Packer,
+ PackCommitBuilder,
+ PackRepository,
+ PackRootCommitBuilder,
+ RepositoryPackCollection,
+ )
+from bzrlib.vf_repository import (
+ StreamSource,
+ )
+
+
+class KnitPackRepository(PackRepository, KnitRepository):
+
+ def __init__(self, _format, a_bzrdir, control_files, _commit_builder_class,
+ _serializer):
+ PackRepository.__init__(self, _format, a_bzrdir, control_files,
+ _commit_builder_class, _serializer)
+ if self._format.supports_chks:
+ raise AssertionError("chk not supported")
+ index_transport = self._transport.clone('indices')
+ self._pack_collection = KnitRepositoryPackCollection(self,
+ self._transport,
+ index_transport,
+ self._transport.clone('upload'),
+ self._transport.clone('packs'),
+ _format.index_builder_class,
+ _format.index_class,
+ use_chk_index=False,
+ )
+ self.inventories = KnitVersionedFiles(
+ _KnitGraphIndex(self._pack_collection.inventory_index.combined_index,
+ add_callback=self._pack_collection.inventory_index.add_callback,
+ deltas=True, parents=True, is_locked=self.is_locked),
+ data_access=self._pack_collection.inventory_index.data_access,
+ max_delta_chain=200)
+ self.revisions = KnitVersionedFiles(
+ _KnitGraphIndex(self._pack_collection.revision_index.combined_index,
+ add_callback=self._pack_collection.revision_index.add_callback,
+ deltas=False, parents=True, is_locked=self.is_locked,
+ track_external_parent_refs=True),
+ data_access=self._pack_collection.revision_index.data_access,
+ max_delta_chain=0)
+ self.signatures = KnitVersionedFiles(
+ _KnitGraphIndex(self._pack_collection.signature_index.combined_index,
+ add_callback=self._pack_collection.signature_index.add_callback,
+ deltas=False, parents=False, is_locked=self.is_locked),
+ data_access=self._pack_collection.signature_index.data_access,
+ max_delta_chain=0)
+ self.texts = KnitVersionedFiles(
+ _KnitGraphIndex(self._pack_collection.text_index.combined_index,
+ add_callback=self._pack_collection.text_index.add_callback,
+ deltas=True, parents=True, is_locked=self.is_locked),
+ data_access=self._pack_collection.text_index.data_access,
+ max_delta_chain=200)
+ self.chk_bytes = None
+ # True when the repository object is 'write locked' (as opposed to the
+ # physical lock only taken out around changes to the pack-names list.)
+ # Another way to represent this would be a decorator around the control
+ # files object that presents logical locks as physical ones - if this
+ # gets ugly consider that alternative design. RBC 20071011
+ self._write_lock_count = 0
+ self._transaction = None
+ # for tests
+ self._reconcile_does_inventory_gc = True
+ self._reconcile_fixes_text_parents = True
+ self._reconcile_backsup_inventory = False
+
+ def _get_source(self, to_format):
+ if to_format.network_name() == self._format.network_name():
+ return KnitPackStreamSource(self, to_format)
+ return PackRepository._get_source(self, to_format)
+
+ def _reconcile_pack(self, collection, packs, extension, revs, pb):
+ packer = KnitReconcilePacker(collection, packs, extension, revs)
+ return packer.pack(pb)
+
+
+class RepositoryFormatKnitPack1(RepositoryFormatPack):
+ """A no-subtrees parameterized Pack repository.
+
+ This format was introduced in 0.92.
+ """
+
+ repository_class = KnitPackRepository
+ _commit_builder_class = PackCommitBuilder
+ @property
+ def _serializer(self):
+ return xml5.serializer_v5
+ # What index classes to use
+ index_builder_class = InMemoryGraphIndex
+ index_class = GraphIndex
+
+ def _get_matching_bzrdir(self):
+ return controldir.format_registry.make_bzrdir('pack-0.92')
+
+ def _ignore_setting_bzrdir(self, format):
+ pass
+
+ _matchingbzrdir = property(_get_matching_bzrdir, _ignore_setting_bzrdir)
+
+ @classmethod
+ def get_format_string(cls):
+ """See RepositoryFormat.get_format_string()."""
+ return "Bazaar pack repository format 1 (needs bzr 0.92)\n"
+
+ def get_format_description(self):
+ """See RepositoryFormat.get_format_description()."""
+ return "Packs containing knits without subtree support"
+
+
+class RepositoryFormatKnitPack3(RepositoryFormatPack):
+ """A subtrees parameterized Pack repository.
+
+ This repository format uses the xml7 serializer to get:
+ - support for recording full info about the tree root
+ - support for recording tree-references
+
+ This format was introduced in 0.92.
+ """
+
+ repository_class = KnitPackRepository
+ _commit_builder_class = PackRootCommitBuilder
+ rich_root_data = True
+ experimental = True
+ supports_tree_reference = True
+ @property
+ def _serializer(self):
+ return xml7.serializer_v7
+ # What index classes to use
+ index_builder_class = InMemoryGraphIndex
+ index_class = GraphIndex
+
+ def _get_matching_bzrdir(self):
+ return controldir.format_registry.make_bzrdir(
+ 'pack-0.92-subtree')
+
+ def _ignore_setting_bzrdir(self, format):
+ pass
+
+ _matchingbzrdir = property(_get_matching_bzrdir, _ignore_setting_bzrdir)
+
+ @classmethod
+ def get_format_string(cls):
+ """See RepositoryFormat.get_format_string()."""
+ return "Bazaar pack repository format 1 with subtree support (needs bzr 0.92)\n"
+
+ def get_format_description(self):
+ """See RepositoryFormat.get_format_description()."""
+ return "Packs containing knits with subtree support\n"
+
+
+class RepositoryFormatKnitPack4(RepositoryFormatPack):
+ """A rich-root, no subtrees parameterized Pack repository.
+
+ This repository format uses the xml6 serializer to get:
+ - support for recording full info about the tree root
+
+ This format was introduced in 1.0.
+ """
+
+ repository_class = KnitPackRepository
+ _commit_builder_class = PackRootCommitBuilder
+ rich_root_data = True
+ supports_tree_reference = False
+ @property
+ def _serializer(self):
+ return xml6.serializer_v6
+ # What index classes to use
+ index_builder_class = InMemoryGraphIndex
+ index_class = GraphIndex
+
+ def _get_matching_bzrdir(self):
+ return controldir.format_registry.make_bzrdir(
+ 'rich-root-pack')
+
+ def _ignore_setting_bzrdir(self, format):
+ pass
+
+ _matchingbzrdir = property(_get_matching_bzrdir, _ignore_setting_bzrdir)
+
+ @classmethod
+ def get_format_string(cls):
+ """See RepositoryFormat.get_format_string()."""
+ return ("Bazaar pack repository format 1 with rich root"
+ " (needs bzr 1.0)\n")
+
+ def get_format_description(self):
+ """See RepositoryFormat.get_format_description()."""
+ return "Packs containing knits with rich root support\n"
+
+
+class RepositoryFormatKnitPack5(RepositoryFormatPack):
+ """Repository that supports external references to allow stacking.
+
+ New in release 1.6.
+
+ Supports external lookups, which results in non-truncated ghosts after
+ reconcile compared to pack-0.92 formats.
+ """
+
+ repository_class = KnitPackRepository
+ _commit_builder_class = PackCommitBuilder
+ supports_external_lookups = True
+ # What index classes to use
+ index_builder_class = InMemoryGraphIndex
+ index_class = GraphIndex
+
+ @property
+ def _serializer(self):
+ return xml5.serializer_v5
+
+ def _get_matching_bzrdir(self):
+ return controldir.format_registry.make_bzrdir('1.6')
+
+ def _ignore_setting_bzrdir(self, format):
+ pass
+
+ _matchingbzrdir = property(_get_matching_bzrdir, _ignore_setting_bzrdir)
+
+ @classmethod
+ def get_format_string(cls):
+ """See RepositoryFormat.get_format_string()."""
+ return "Bazaar RepositoryFormatKnitPack5 (bzr 1.6)\n"
+
+ def get_format_description(self):
+ """See RepositoryFormat.get_format_description()."""
+ return "Packs 5 (adds stacking support, requires bzr 1.6)"
+
+
+class RepositoryFormatKnitPack5RichRoot(RepositoryFormatPack):
+ """A repository with rich roots and stacking.
+
+ New in release 1.6.1.
+
+ Supports stacking on other repositories, allowing data to be accessed
+ without being stored locally.
+ """
+
+ repository_class = KnitPackRepository
+ _commit_builder_class = PackRootCommitBuilder
+ rich_root_data = True
+ supports_tree_reference = False # no subtrees
+ supports_external_lookups = True
+ # What index classes to use
+ index_builder_class = InMemoryGraphIndex
+ index_class = GraphIndex
+
+ @property
+ def _serializer(self):
+ return xml6.serializer_v6
+
+ def _get_matching_bzrdir(self):
+ return controldir.format_registry.make_bzrdir(
+ '1.6.1-rich-root')
+
+ def _ignore_setting_bzrdir(self, format):
+ pass
+
+ _matchingbzrdir = property(_get_matching_bzrdir, _ignore_setting_bzrdir)
+
+ @classmethod
+ def get_format_string(cls):
+ """See RepositoryFormat.get_format_string()."""
+ return "Bazaar RepositoryFormatKnitPack5RichRoot (bzr 1.6.1)\n"
+
+ def get_format_description(self):
+ return "Packs 5 rich-root (adds stacking support, requires bzr 1.6.1)"
+
+
+class RepositoryFormatKnitPack5RichRootBroken(RepositoryFormatPack):
+ """A repository with rich roots and external references.
+
+ New in release 1.6.
+
+ Supports external lookups, which results in non-truncated ghosts after
+ reconcile compared to pack-0.92 formats.
+
+ This format was deprecated because the serializer it uses accidentally
+ supported subtrees, when the format was not intended to. This meant that
+ someone could accidentally fetch from an incorrect repository.
+ """
+
+ repository_class = KnitPackRepository
+ _commit_builder_class = PackRootCommitBuilder
+ rich_root_data = True
+ supports_tree_reference = False # no subtrees
+
+ supports_external_lookups = True
+ # What index classes to use
+ index_builder_class = InMemoryGraphIndex
+ index_class = GraphIndex
+
+ @property
+ def _serializer(self):
+ return xml7.serializer_v7
+
+ def _get_matching_bzrdir(self):
+ matching = controldir.format_registry.make_bzrdir(
+ '1.6.1-rich-root')
+ matching.repository_format = self
+ return matching
+
+ def _ignore_setting_bzrdir(self, format):
+ pass
+
+ _matchingbzrdir = property(_get_matching_bzrdir, _ignore_setting_bzrdir)
+
+ @classmethod
+ def get_format_string(cls):
+ """See RepositoryFormat.get_format_string()."""
+ return "Bazaar RepositoryFormatKnitPack5RichRoot (bzr 1.6)\n"
+
+ def get_format_description(self):
+ return ("Packs 5 rich-root (adds stacking support, requires bzr 1.6)"
+ " (deprecated)")
+
+ def is_deprecated(self):
+ return True
+
+
+class RepositoryFormatKnitPack6(RepositoryFormatPack):
+ """A repository with stacking and btree indexes,
+ without rich roots or subtrees.
+
+ This is equivalent to pack-1.6 with B+Tree indices.
+ """
+
+ repository_class = KnitPackRepository
+ _commit_builder_class = PackCommitBuilder
+ supports_external_lookups = True
+ # What index classes to use
+ index_builder_class = btree_index.BTreeBuilder
+ index_class = btree_index.BTreeGraphIndex
+
+ @property
+ def _serializer(self):
+ return xml5.serializer_v5
+
+ def _get_matching_bzrdir(self):
+ return controldir.format_registry.make_bzrdir('1.9')
+
+ def _ignore_setting_bzrdir(self, format):
+ pass
+
+ _matchingbzrdir = property(_get_matching_bzrdir, _ignore_setting_bzrdir)
+
+ @classmethod
+ def get_format_string(cls):
+ """See RepositoryFormat.get_format_string()."""
+ return "Bazaar RepositoryFormatKnitPack6 (bzr 1.9)\n"
+
+ def get_format_description(self):
+ """See RepositoryFormat.get_format_description()."""
+ return "Packs 6 (uses btree indexes, requires bzr 1.9)"
+
+
+class RepositoryFormatKnitPack6RichRoot(RepositoryFormatPack):
+ """A repository with rich roots, no subtrees, stacking and btree indexes.
+
+ 1.6-rich-root with B+Tree indices.
+ """
+
+ repository_class = KnitPackRepository
+ _commit_builder_class = PackRootCommitBuilder
+ rich_root_data = True
+ supports_tree_reference = False # no subtrees
+ supports_external_lookups = True
+ # What index classes to use
+ index_builder_class = btree_index.BTreeBuilder
+ index_class = btree_index.BTreeGraphIndex
+
+ @property
+ def _serializer(self):
+ return xml6.serializer_v6
+
+ def _get_matching_bzrdir(self):
+ return controldir.format_registry.make_bzrdir(
+ '1.9-rich-root')
+
+ def _ignore_setting_bzrdir(self, format):
+ pass
+
+ _matchingbzrdir = property(_get_matching_bzrdir, _ignore_setting_bzrdir)
+
+ @classmethod
+ def get_format_string(cls):
+ """See RepositoryFormat.get_format_string()."""
+ return "Bazaar RepositoryFormatKnitPack6RichRoot (bzr 1.9)\n"
+
+ def get_format_description(self):
+ return "Packs 6 rich-root (uses btree indexes, requires bzr 1.9)"
+
+
+class RepositoryFormatPackDevelopment2Subtree(RepositoryFormatPack):
+ """A subtrees development repository.
+
+ This format should be retained in 2.3, to provide an upgrade path from this
+ to RepositoryFormat2aSubtree. It can be removed in later releases.
+
+ 1.6.1-subtree[as it might have been] with B+Tree indices.
+ """
+
+ repository_class = KnitPackRepository
+ _commit_builder_class = PackRootCommitBuilder
+ rich_root_data = True
+ experimental = True
+ supports_tree_reference = True
+ supports_external_lookups = True
+ # What index classes to use
+ index_builder_class = btree_index.BTreeBuilder
+ index_class = btree_index.BTreeGraphIndex
+
+ @property
+ def _serializer(self):
+ return xml7.serializer_v7
+
+ def _get_matching_bzrdir(self):
+ return controldir.format_registry.make_bzrdir(
+ 'development5-subtree')
+
+ def _ignore_setting_bzrdir(self, format):
+ pass
+
+ _matchingbzrdir = property(_get_matching_bzrdir, _ignore_setting_bzrdir)
+
+ @classmethod
+ def get_format_string(cls):
+ """See RepositoryFormat.get_format_string()."""
+ return ("Bazaar development format 2 with subtree support "
+ "(needs bzr.dev from before 1.8)\n")
+
+ def get_format_description(self):
+ """See RepositoryFormat.get_format_description()."""
+ return ("Development repository format, currently the same as "
+ "1.6.1-subtree with B+Tree indices.\n")
+
+
+class KnitPackStreamSource(StreamSource):
+ """A StreamSource used to transfer data between same-format KnitPack repos.
+
+ This source assumes:
+ 1) Same serialization format for all objects
+ 2) Same root information
+ 3) XML format inventories
+ 4) Atomic inserts (so we can stream inventory texts before text
+ content)
+ 5) No chk_bytes
+ """
+
+ def __init__(self, from_repository, to_format):
+ super(KnitPackStreamSource, self).__init__(from_repository, to_format)
+ self._text_keys = None
+ self._text_fetch_order = 'unordered'
+
+ def _get_filtered_inv_stream(self, revision_ids):
+ from_repo = self.from_repository
+ parent_ids = from_repo._find_parent_ids_of_revisions(revision_ids)
+ parent_keys = [(p,) for p in parent_ids]
+ find_text_keys = from_repo._serializer._find_text_key_references
+ parent_text_keys = set(find_text_keys(
+ from_repo._inventory_xml_lines_for_keys(parent_keys)))
+ content_text_keys = set()
+ knit = KnitVersionedFiles(None, None)
+ factory = KnitPlainFactory()
+ def find_text_keys_from_content(record):
+ if record.storage_kind not in ('knit-delta-gz', 'knit-ft-gz'):
+ raise ValueError("Unknown content storage kind for"
+ " inventory text: %s" % (record.storage_kind,))
+ # It's a knit record, it has a _raw_record field (even if it was
+ # reconstituted from a network stream).
+ raw_data = record._raw_record
+ # read the entire thing
+ revision_id = record.key[-1]
+ content, _ = knit._parse_record(revision_id, raw_data)
+ if record.storage_kind == 'knit-delta-gz':
+ line_iterator = factory.get_linedelta_content(content)
+ elif record.storage_kind == 'knit-ft-gz':
+ line_iterator = factory.get_fulltext_content(content)
+ content_text_keys.update(find_text_keys(
+ [(line, revision_id) for line in line_iterator]))
+ revision_keys = [(r,) for r in revision_ids]
+ def _filtered_inv_stream():
+ source_vf = from_repo.inventories
+ stream = source_vf.get_record_stream(revision_keys,
+ 'unordered', False)
+ for record in stream:
+ if record.storage_kind == 'absent':
+ raise errors.NoSuchRevision(from_repo, record.key)
+ find_text_keys_from_content(record)
+ yield record
+ self._text_keys = content_text_keys - parent_text_keys
+ return ('inventories', _filtered_inv_stream())
+
+ def _get_text_stream(self):
+ # Note: We know we don't have to handle adding root keys, because both
+ # the source and target are the identical network name.
+ text_stream = self.from_repository.texts.get_record_stream(
+ self._text_keys, self._text_fetch_order, False)
+ return ('texts', text_stream)
+
+ def get_stream(self, search):
+ revision_ids = search.get_keys()
+ for stream_info in self._fetch_revision_texts(revision_ids):
+ yield stream_info
+ self._revision_keys = [(rev_id,) for rev_id in revision_ids]
+ yield self._get_filtered_inv_stream(revision_ids)
+ yield self._get_text_stream()
+
+
+class KnitPacker(Packer):
+ """Packer that works with knit packs."""
+
+ def __init__(self, pack_collection, packs, suffix, revision_ids=None,
+ reload_func=None):
+ super(KnitPacker, self).__init__(pack_collection, packs, suffix,
+ revision_ids=revision_ids,
+ reload_func=reload_func)
+
+ def _pack_map_and_index_list(self, index_attribute):
+ """Convert a list of packs to an index pack map and index list.
+
+ :param index_attribute: The attribute that the desired index is found
+ on.
+ :return: A tuple (map, list) where map contains the dict from
+ index:pack_tuple, and list contains the indices in the preferred
+ access order.
+ """
+ indices = []
+ pack_map = {}
+ for pack_obj in self.packs:
+ index = getattr(pack_obj, index_attribute)
+ indices.append(index)
+ pack_map[index] = pack_obj
+ return pack_map, indices
+
+ def _index_contents(self, indices, key_filter=None):
+ """Get an iterable of the index contents from a pack_map.
+
+ :param indices: The list of indices to query
+ :param key_filter: An optional filter to limit the keys returned.
+ """
+ all_index = CombinedGraphIndex(indices)
+ if key_filter is None:
+ return all_index.iter_all_entries()
+ else:
+ return all_index.iter_entries(key_filter)
+
+ def _copy_nodes(self, nodes, index_map, writer, write_index,
+ output_lines=None):
+ """Copy knit nodes between packs with no graph references.
+
+ :param output_lines: Output full texts of copied items.
+ """
+ pb = ui.ui_factory.nested_progress_bar()
+ try:
+ return self._do_copy_nodes(nodes, index_map, writer,
+ write_index, pb, output_lines=output_lines)
+ finally:
+ pb.finished()
+
+ def _do_copy_nodes(self, nodes, index_map, writer, write_index, pb,
+ output_lines=None):
+ # for record verification
+ knit = KnitVersionedFiles(None, None)
+ # plan a readv on each source pack:
+ # group by pack
+ nodes = sorted(nodes)
+ # how to map this into knit.py - or knit.py into this?
+ # we don't want the typical knit logic, we want grouping by pack
+ # at this point - perhaps a helper library for the following code
+ # duplication points?
+ request_groups = {}
+ for index, key, value in nodes:
+ if index not in request_groups:
+ request_groups[index] = []
+ request_groups[index].append((key, value))
+ record_index = 0
+ pb.update("Copied record", record_index, len(nodes))
+ for index, items in request_groups.iteritems():
+ pack_readv_requests = []
+ for key, value in items:
+ # ---- KnitGraphIndex.get_position
+ bits = value[1:].split(' ')
+ offset, length = int(bits[0]), int(bits[1])
+ pack_readv_requests.append((offset, length, (key, value[0])))
+ # linear scan up the pack
+ pack_readv_requests.sort()
+ # copy the data
+ pack_obj = index_map[index]
+ transport, path = pack_obj.access_tuple()
+ try:
+ reader = pack.make_readv_reader(transport, path,
+ [offset[0:2] for offset in pack_readv_requests])
+ except errors.NoSuchFile:
+ if self._reload_func is not None:
+ self._reload_func()
+ raise
+ for (names, read_func), (_1, _2, (key, eol_flag)) in \
+ izip(reader.iter_records(), pack_readv_requests):
+ raw_data = read_func(None)
+ # check the header only
+ if output_lines is not None:
+ output_lines(knit._parse_record(key[-1], raw_data)[0])
+ else:
+ df, _ = knit._parse_record_header(key, raw_data)
+ df.close()
+ pos, size = writer.add_bytes_record(raw_data, names)
+ write_index.add_node(key, eol_flag + "%d %d" % (pos, size))
+ pb.update("Copied record", record_index)
+ record_index += 1
+
+ def _copy_nodes_graph(self, index_map, writer, write_index,
+ readv_group_iter, total_items, output_lines=False):
+ """Copy knit nodes between packs.
+
+ :param output_lines: Return lines present in the copied data as
+ an iterator of line,version_id.
+ """
+ pb = ui.ui_factory.nested_progress_bar()
+ try:
+ for result in self._do_copy_nodes_graph(index_map, writer,
+ write_index, output_lines, pb, readv_group_iter, total_items):
+ yield result
+ except Exception:
+ # Python 2.4 does not permit try:finally: in a generator.
+ pb.finished()
+ raise
+ else:
+ pb.finished()
+
+ def _do_copy_nodes_graph(self, index_map, writer, write_index,
+ output_lines, pb, readv_group_iter, total_items):
+ # for record verification
+ knit = KnitVersionedFiles(None, None)
+ # for line extraction when requested (inventories only)
+ if output_lines:
+ factory = KnitPlainFactory()
+ record_index = 0
+ pb.update("Copied record", record_index, total_items)
+ for index, readv_vector, node_vector in readv_group_iter:
+ # copy the data
+ pack_obj = index_map[index]
+ transport, path = pack_obj.access_tuple()
+ try:
+ reader = pack.make_readv_reader(transport, path, readv_vector)
+ except errors.NoSuchFile:
+ if self._reload_func is not None:
+ self._reload_func()
+ raise
+ for (names, read_func), (key, eol_flag, references) in \
+ izip(reader.iter_records(), node_vector):
+ raw_data = read_func(None)
+ if output_lines:
+ # read the entire thing
+ content, _ = knit._parse_record(key[-1], raw_data)
+ if len(references[-1]) == 0:
+ line_iterator = factory.get_fulltext_content(content)
+ else:
+ line_iterator = factory.get_linedelta_content(content)
+ for line in line_iterator:
+ yield line, key
+ else:
+ # check the header only
+ df, _ = knit._parse_record_header(key, raw_data)
+ df.close()
+ pos, size = writer.add_bytes_record(raw_data, names)
+ write_index.add_node(key, eol_flag + "%d %d" % (pos, size), references)
+ pb.update("Copied record", record_index)
+ record_index += 1
+
+ def _process_inventory_lines(self, inv_lines):
+ """Use up the inv_lines generator and setup a text key filter."""
+ repo = self._pack_collection.repo
+ fileid_revisions = repo._find_file_ids_from_xml_inventory_lines(
+ inv_lines, self.revision_keys)
+ text_filter = []
+ for fileid, file_revids in fileid_revisions.iteritems():
+ text_filter.extend([(fileid, file_revid) for file_revid in file_revids])
+ self._text_filter = text_filter
+
+ def _copy_inventory_texts(self):
+ # select inventory keys
+ inv_keys = self._revision_keys # currently the same keyspace, and note that
+ # querying for keys here could introduce a bug where an inventory item
+ # is missed, so do not change it to query separately without cross
+ # checking like the text key check below.
+ inventory_index_map, inventory_indices = self._pack_map_and_index_list(
+ 'inventory_index')
+ inv_nodes = self._index_contents(inventory_indices, inv_keys)
+ # copy inventory keys and adjust values
+ # XXX: Should be a helper function to allow different inv representation
+ # at this point.
+ self.pb.update("Copying inventory texts", 2)
+ total_items, readv_group_iter = self._least_readv_node_readv(inv_nodes)
+ # Only grab the output lines if we will be processing them
+ output_lines = bool(self.revision_ids)
+ inv_lines = self._copy_nodes_graph(inventory_index_map,
+ self.new_pack._writer, self.new_pack.inventory_index,
+ readv_group_iter, total_items, output_lines=output_lines)
+ if self.revision_ids:
+ self._process_inventory_lines(inv_lines)
+ else:
+ # eat the iterator to cause it to execute.
+ list(inv_lines)
+ self._text_filter = None
+ if 'pack' in debug.debug_flags:
+ trace.mutter('%s: create_pack: inventories copied: %s%s %d items t+%6.3fs',
+ time.ctime(), self._pack_collection._upload_transport.base,
+ self.new_pack.random_name,
+ self.new_pack.inventory_index.key_count(),
+ time.time() - self.new_pack.start_time)
+
+ def _update_pack_order(self, entries, index_to_pack_map):
+ """Determine how we want our packs to be ordered.
+
+ This changes the sort order of the self.packs list so that packs unused
+ by 'entries' will be at the end of the list, so that future requests
+ can avoid probing them. Used packs will be at the front of the
+ self.packs list, in the order of their first use in 'entries'.
+
+ :param entries: A list of (index, ...) tuples
+ :param index_to_pack_map: A mapping from index objects to pack objects.
+ """
+ packs = []
+ seen_indexes = set()
+ for entry in entries:
+ index = entry[0]
+ if index not in seen_indexes:
+ packs.append(index_to_pack_map[index])
+ seen_indexes.add(index)
+ if len(packs) == len(self.packs):
+ if 'pack' in debug.debug_flags:
+ trace.mutter('Not changing pack list, all packs used.')
+ return
+ seen_packs = set(packs)
+ for pack in self.packs:
+ if pack not in seen_packs:
+ packs.append(pack)
+ seen_packs.add(pack)
+ if 'pack' in debug.debug_flags:
+ old_names = [p.access_tuple()[1] for p in self.packs]
+ new_names = [p.access_tuple()[1] for p in packs]
+ trace.mutter('Reordering packs\nfrom: %s\n to: %s',
+ old_names, new_names)
+ self.packs = packs
+
+ def _copy_revision_texts(self):
+ # select revisions
+ if self.revision_ids:
+ revision_keys = [(revision_id,) for revision_id in self.revision_ids]
+ else:
+ revision_keys = None
+ # select revision keys
+ revision_index_map, revision_indices = self._pack_map_and_index_list(
+ 'revision_index')
+ revision_nodes = self._index_contents(revision_indices, revision_keys)
+ revision_nodes = list(revision_nodes)
+ self._update_pack_order(revision_nodes, revision_index_map)
+ # copy revision keys and adjust values
+ self.pb.update("Copying revision texts", 1)
+ total_items, readv_group_iter = self._revision_node_readv(revision_nodes)
+ list(self._copy_nodes_graph(revision_index_map, self.new_pack._writer,
+ self.new_pack.revision_index, readv_group_iter, total_items))
+ if 'pack' in debug.debug_flags:
+ trace.mutter('%s: create_pack: revisions copied: %s%s %d items t+%6.3fs',
+ time.ctime(), self._pack_collection._upload_transport.base,
+ self.new_pack.random_name,
+ self.new_pack.revision_index.key_count(),
+ time.time() - self.new_pack.start_time)
+ self._revision_keys = revision_keys
+
+ def _get_text_nodes(self):
+ text_index_map, text_indices = self._pack_map_and_index_list(
+ 'text_index')
+ return text_index_map, self._index_contents(text_indices,
+ self._text_filter)
+
+ def _copy_text_texts(self):
+ # select text keys
+ text_index_map, text_nodes = self._get_text_nodes()
+ if self._text_filter is not None:
+ # We could return the keys copied as part of the return value from
+ # _copy_nodes_graph but this doesn't work all that well with the
+ # need to get line output too, so we check separately, and as we're
+ # going to buffer everything anyway, we check beforehand, which
+ # saves reading knit data over the wire when we know there are
+ # mising records.
+ text_nodes = set(text_nodes)
+ present_text_keys = set(_node[1] for _node in text_nodes)
+ missing_text_keys = set(self._text_filter) - present_text_keys
+ if missing_text_keys:
+ # TODO: raise a specific error that can handle many missing
+ # keys.
+ trace.mutter("missing keys during fetch: %r", missing_text_keys)
+ a_missing_key = missing_text_keys.pop()
+ raise errors.RevisionNotPresent(a_missing_key[1],
+ a_missing_key[0])
+ # copy text keys and adjust values
+ self.pb.update("Copying content texts", 3)
+ total_items, readv_group_iter = self._least_readv_node_readv(text_nodes)
+ list(self._copy_nodes_graph(text_index_map, self.new_pack._writer,
+ self.new_pack.text_index, readv_group_iter, total_items))
+ self._log_copied_texts()
+
+ def _create_pack_from_packs(self):
+ self.pb.update("Opening pack", 0, 5)
+ self.new_pack = self.open_pack()
+ new_pack = self.new_pack
+ # buffer data - we won't be reading-back during the pack creation and
+ # this makes a significant difference on sftp pushes.
+ new_pack.set_write_cache_size(1024*1024)
+ if 'pack' in debug.debug_flags:
+ plain_pack_list = ['%s%s' % (a_pack.pack_transport.base, a_pack.name)
+ for a_pack in self.packs]
+ if self.revision_ids is not None:
+ rev_count = len(self.revision_ids)
+ else:
+ rev_count = 'all'
+ trace.mutter('%s: create_pack: creating pack from source packs: '
+ '%s%s %s revisions wanted %s t=0',
+ time.ctime(), self._pack_collection._upload_transport.base, new_pack.random_name,
+ plain_pack_list, rev_count)
+ self._copy_revision_texts()
+ self._copy_inventory_texts()
+ self._copy_text_texts()
+ # select signature keys
+ signature_filter = self._revision_keys # same keyspace
+ signature_index_map, signature_indices = self._pack_map_and_index_list(
+ 'signature_index')
+ signature_nodes = self._index_contents(signature_indices,
+ signature_filter)
+ # copy signature keys and adjust values
+ self.pb.update("Copying signature texts", 4)
+ self._copy_nodes(signature_nodes, signature_index_map, new_pack._writer,
+ new_pack.signature_index)
+ if 'pack' in debug.debug_flags:
+ trace.mutter('%s: create_pack: revision signatures copied: %s%s %d items t+%6.3fs',
+ time.ctime(), self._pack_collection._upload_transport.base, new_pack.random_name,
+ new_pack.signature_index.key_count(),
+ time.time() - new_pack.start_time)
+ new_pack._check_references()
+ if not self._use_pack(new_pack):
+ new_pack.abort()
+ return None
+ self.pb.update("Finishing pack", 5)
+ new_pack.finish()
+ self._pack_collection.allocate(new_pack)
+ return new_pack
+
+ def _least_readv_node_readv(self, nodes):
+ """Generate request groups for nodes using the least readv's.
+
+ :param nodes: An iterable of graph index nodes.
+ :return: Total node count and an iterator of the data needed to perform
+ readvs to obtain the data for nodes. Each item yielded by the
+ iterator is a tuple with:
+ index, readv_vector, node_vector. readv_vector is a list ready to
+ hand to the transport readv method, and node_vector is a list of
+ (key, eol_flag, references) for the node retrieved by the
+ matching readv_vector.
+ """
+ # group by pack so we do one readv per pack
+ nodes = sorted(nodes)
+ total = len(nodes)
+ request_groups = {}
+ for index, key, value, references in nodes:
+ if index not in request_groups:
+ request_groups[index] = []
+ request_groups[index].append((key, value, references))
+ result = []
+ for index, items in request_groups.iteritems():
+ pack_readv_requests = []
+ for key, value, references in items:
+ # ---- KnitGraphIndex.get_position
+ bits = value[1:].split(' ')
+ offset, length = int(bits[0]), int(bits[1])
+ pack_readv_requests.append(
+ ((offset, length), (key, value[0], references)))
+ # linear scan up the pack to maximum range combining.
+ pack_readv_requests.sort()
+ # split out the readv and the node data.
+ pack_readv = [readv for readv, node in pack_readv_requests]
+ node_vector = [node for readv, node in pack_readv_requests]
+ result.append((index, pack_readv, node_vector))
+ return total, result
+
+ def _revision_node_readv(self, revision_nodes):
+ """Return the total revisions and the readv's to issue.
+
+ :param revision_nodes: The revision index contents for the packs being
+ incorporated into the new pack.
+ :return: As per _least_readv_node_readv.
+ """
+ return self._least_readv_node_readv(revision_nodes)
+
+
+class KnitReconcilePacker(KnitPacker):
+ """A packer which regenerates indices etc as it copies.
+
+ This is used by ``bzr reconcile`` to cause parent text pointers to be
+ regenerated.
+ """
+
+ def __init__(self, *args, **kwargs):
+ super(KnitReconcilePacker, self).__init__(*args, **kwargs)
+ self._data_changed = False
+
+ def _process_inventory_lines(self, inv_lines):
+ """Generate a text key reference map rather for reconciling with."""
+ repo = self._pack_collection.repo
+ refs = repo._serializer._find_text_key_references(inv_lines)
+ self._text_refs = refs
+ # during reconcile we:
+ # - convert unreferenced texts to full texts
+ # - correct texts which reference a text not copied to be full texts
+ # - copy all others as-is but with corrected parents.
+ # - so at this point we don't know enough to decide what becomes a full
+ # text.
+ self._text_filter = None
+
+ def _copy_text_texts(self):
+ """generate what texts we should have and then copy."""
+ self.pb.update("Copying content texts", 3)
+ # we have three major tasks here:
+ # 1) generate the ideal index
+ repo = self._pack_collection.repo
+ ancestors = dict([(key[0], tuple(ref[0] for ref in refs[0])) for
+ _1, key, _2, refs in
+ self.new_pack.revision_index.iter_all_entries()])
+ ideal_index = repo._generate_text_key_index(self._text_refs, ancestors)
+ # 2) generate a text_nodes list that contains all the deltas that can
+ # be used as-is, with corrected parents.
+ ok_nodes = []
+ bad_texts = []
+ discarded_nodes = []
+ NULL_REVISION = _mod_revision.NULL_REVISION
+ text_index_map, text_nodes = self._get_text_nodes()
+ for node in text_nodes:
+ # 0 - index
+ # 1 - key
+ # 2 - value
+ # 3 - refs
+ try:
+ ideal_parents = tuple(ideal_index[node[1]])
+ except KeyError:
+ discarded_nodes.append(node)
+ self._data_changed = True
+ else:
+ if ideal_parents == (NULL_REVISION,):
+ ideal_parents = ()
+ if ideal_parents == node[3][0]:
+ # no change needed.
+ ok_nodes.append(node)
+ elif ideal_parents[0:1] == node[3][0][0:1]:
+ # the left most parent is the same, or there are no parents
+ # today. Either way, we can preserve the representation as
+ # long as we change the refs to be inserted.
+ self._data_changed = True
+ ok_nodes.append((node[0], node[1], node[2],
+ (ideal_parents, node[3][1])))
+ self._data_changed = True
+ else:
+ # Reinsert this text completely
+ bad_texts.append((node[1], ideal_parents))
+ self._data_changed = True
+ # we're finished with some data.
+ del ideal_index
+ del text_nodes
+ # 3) bulk copy the ok data
+ total_items, readv_group_iter = self._least_readv_node_readv(ok_nodes)
+ list(self._copy_nodes_graph(text_index_map, self.new_pack._writer,
+ self.new_pack.text_index, readv_group_iter, total_items))
+ # 4) adhoc copy all the other texts.
+ # We have to topologically insert all texts otherwise we can fail to
+ # reconcile when parts of a single delta chain are preserved intact,
+ # and other parts are not. E.g. Discarded->d1->d2->d3. d1 will be
+ # reinserted, and if d3 has incorrect parents it will also be
+ # reinserted. If we insert d3 first, d2 is present (as it was bulk
+ # copied), so we will try to delta, but d2 is not currently able to be
+ # extracted because its basis d1 is not present. Topologically sorting
+ # addresses this. The following generates a sort for all the texts that
+ # are being inserted without having to reference the entire text key
+ # space (we only topo sort the revisions, which is smaller).
+ topo_order = tsort.topo_sort(ancestors)
+ rev_order = dict(zip(topo_order, range(len(topo_order))))
+ bad_texts.sort(key=lambda key:rev_order.get(key[0][1], 0))
+ transaction = repo.get_transaction()
+ file_id_index = GraphIndexPrefixAdapter(
+ self.new_pack.text_index,
+ ('blank', ), 1,
+ add_nodes_callback=self.new_pack.text_index.add_nodes)
+ data_access = _DirectPackAccess(
+ {self.new_pack.text_index:self.new_pack.access_tuple()})
+ data_access.set_writer(self.new_pack._writer, self.new_pack.text_index,
+ self.new_pack.access_tuple())
+ output_texts = KnitVersionedFiles(
+ _KnitGraphIndex(self.new_pack.text_index,
+ add_callback=self.new_pack.text_index.add_nodes,
+ deltas=True, parents=True, is_locked=repo.is_locked),
+ data_access=data_access, max_delta_chain=200)
+ for key, parent_keys in bad_texts:
+ # We refer to the new pack to delta data being output.
+ # A possible improvement would be to catch errors on short reads
+ # and only flush then.
+ self.new_pack.flush()
+ parents = []
+ for parent_key in parent_keys:
+ if parent_key[0] != key[0]:
+ # Graph parents must match the fileid
+ raise errors.BzrError('Mismatched key parent %r:%r' %
+ (key, parent_keys))
+ parents.append(parent_key[1])
+ text_lines = osutils.split_lines(repo.texts.get_record_stream(
+ [key], 'unordered', True).next().get_bytes_as('fulltext'))
+ output_texts.add_lines(key, parent_keys, text_lines,
+ random_id=True, check_content=False)
+ # 5) check that nothing inserted has a reference outside the keyspace.
+ missing_text_keys = self.new_pack.text_index._external_references()
+ if missing_text_keys:
+ raise errors.BzrCheckError('Reference to missing compression parents %r'
+ % (missing_text_keys,))
+ self._log_copied_texts()
+
+ def _use_pack(self, new_pack):
+ """Override _use_pack to check for reconcile having changed content."""
+ # XXX: we might be better checking this at the copy time.
+ original_inventory_keys = set()
+ inv_index = self._pack_collection.inventory_index.combined_index
+ for entry in inv_index.iter_all_entries():
+ original_inventory_keys.add(entry[1])
+ new_inventory_keys = set()
+ for entry in new_pack.inventory_index.iter_all_entries():
+ new_inventory_keys.add(entry[1])
+ if new_inventory_keys != original_inventory_keys:
+ self._data_changed = True
+ return new_pack.data_inserted() and self._data_changed
+
+
+class OptimisingKnitPacker(KnitPacker):
+ """A packer which spends more time to create better disk layouts."""
+
+ def _revision_node_readv(self, revision_nodes):
+ """Return the total revisions and the readv's to issue.
+
+ This sort places revisions in topological order with the ancestors
+ after the children.
+
+ :param revision_nodes: The revision index contents for the packs being
+ incorporated into the new pack.
+ :return: As per _least_readv_node_readv.
+ """
+ # build an ancestors dict
+ ancestors = {}
+ by_key = {}
+ for index, key, value, references in revision_nodes:
+ ancestors[key] = references[0]
+ by_key[key] = (index, value, references)
+ order = tsort.topo_sort(ancestors)
+ total = len(order)
+ # Single IO is pathological, but it will work as a starting point.
+ requests = []
+ for key in reversed(order):
+ index, value, references = by_key[key]
+ # ---- KnitGraphIndex.get_position
+ bits = value[1:].split(' ')
+ offset, length = int(bits[0]), int(bits[1])
+ requests.append(
+ (index, [(offset, length)], [(key, value[0], references)]))
+ # TODO: combine requests in the same index that are in ascending order.
+ return total, requests
+
+ def open_pack(self):
+ """Open a pack for the pack we are creating."""
+ new_pack = super(OptimisingKnitPacker, self).open_pack()
+ # Turn on the optimization flags for all the index builders.
+ new_pack.revision_index.set_optimize(for_size=True)
+ new_pack.inventory_index.set_optimize(for_size=True)
+ new_pack.text_index.set_optimize(for_size=True)
+ new_pack.signature_index.set_optimize(for_size=True)
+ return new_pack
+
+
+class KnitRepositoryPackCollection(RepositoryPackCollection):
+ """A knit pack collection."""
+
+ pack_factory = NewPack
+ resumed_pack_factory = ResumedPack
+ normal_packer_class = KnitPacker
+ optimising_packer_class = OptimisingKnitPacker
+
+
+
diff --git a/bzrlib/repofmt/knitrepo.py b/bzrlib/repofmt/knitrepo.py
new file mode 100644
index 0000000..624c4ea
--- /dev/null
+++ b/bzrlib/repofmt/knitrepo.py
@@ -0,0 +1,522 @@
+# Copyright (C) 2007-2010 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+from __future__ import absolute_import
+
+from bzrlib.lazy_import import lazy_import
+lazy_import(globals(), """
+import itertools
+
+from bzrlib import (
+ controldir,
+ errors,
+ knit as _mod_knit,
+ lockable_files,
+ lockdir,
+ osutils,
+ revision as _mod_revision,
+ trace,
+ transactions,
+ versionedfile,
+ xml5,
+ xml6,
+ xml7,
+ )
+""")
+from bzrlib.decorators import needs_read_lock, needs_write_lock
+from bzrlib.repository import (
+ InterRepository,
+ IsInWriteGroupError,
+ RepositoryFormatMetaDir,
+ )
+from bzrlib.vf_repository import (
+ InterSameDataRepository,
+ MetaDirVersionedFileRepository,
+ MetaDirVersionedFileRepositoryFormat,
+ VersionedFileCommitBuilder,
+ VersionedFileRootCommitBuilder,
+ )
+from bzrlib import symbol_versioning
+
+
+class _KnitParentsProvider(object):
+
+ def __init__(self, knit):
+ self._knit = knit
+
+ def __repr__(self):
+ return 'KnitParentsProvider(%r)' % self._knit
+
+ def get_parent_map(self, keys):
+ """See graph.StackedParentsProvider.get_parent_map"""
+ parent_map = {}
+ for revision_id in keys:
+ if revision_id is None:
+ raise ValueError('get_parent_map(None) is not valid')
+ if revision_id == _mod_revision.NULL_REVISION:
+ parent_map[revision_id] = ()
+ else:
+ try:
+ parents = tuple(
+ self._knit.get_parents_with_ghosts(revision_id))
+ except errors.RevisionNotPresent:
+ continue
+ else:
+ if len(parents) == 0:
+ parents = (_mod_revision.NULL_REVISION,)
+ parent_map[revision_id] = parents
+ return parent_map
+
+
+class _KnitsParentsProvider(object):
+
+ def __init__(self, knit, prefix=()):
+ """Create a parent provider for string keys mapped to tuple keys."""
+ self._knit = knit
+ self._prefix = prefix
+
+ def __repr__(self):
+ return 'KnitsParentsProvider(%r)' % self._knit
+
+ def get_parent_map(self, keys):
+ """See graph.StackedParentsProvider.get_parent_map"""
+ parent_map = self._knit.get_parent_map(
+ [self._prefix + (key,) for key in keys])
+ result = {}
+ for key, parents in parent_map.items():
+ revid = key[-1]
+ if len(parents) == 0:
+ parents = (_mod_revision.NULL_REVISION,)
+ else:
+ parents = tuple(parent[-1] for parent in parents)
+ result[revid] = parents
+ for revision_id in keys:
+ if revision_id == _mod_revision.NULL_REVISION:
+ result[revision_id] = ()
+ return result
+
+
+class KnitRepository(MetaDirVersionedFileRepository):
+ """Knit format repository."""
+
+ # These attributes are inherited from the Repository base class. Setting
+ # them to None ensures that if the constructor is changed to not initialize
+ # them, or a subclass fails to call the constructor, that an error will
+ # occur rather than the system working but generating incorrect data.
+ _commit_builder_class = None
+ _serializer = None
+
+ def __init__(self, _format, a_bzrdir, control_files, _commit_builder_class,
+ _serializer):
+ super(KnitRepository, self).__init__(_format, a_bzrdir, control_files)
+ self._commit_builder_class = _commit_builder_class
+ self._serializer = _serializer
+ self._reconcile_fixes_text_parents = True
+
+ @needs_read_lock
+ def _all_revision_ids(self):
+ """See Repository.all_revision_ids()."""
+ return [key[0] for key in self.revisions.keys()]
+
+ def _activate_new_inventory(self):
+ """Put a replacement inventory.new into use as inventories."""
+ # Copy the content across
+ t = self._transport
+ t.copy('inventory.new.kndx', 'inventory.kndx')
+ try:
+ t.copy('inventory.new.knit', 'inventory.knit')
+ except errors.NoSuchFile:
+ # empty inventories knit
+ t.delete('inventory.knit')
+ # delete the temp inventory
+ t.delete('inventory.new.kndx')
+ try:
+ t.delete('inventory.new.knit')
+ except errors.NoSuchFile:
+ # empty inventories knit
+ pass
+ # Force index reload (sanity check)
+ self.inventories._index._reset_cache()
+ self.inventories.keys()
+
+ def _backup_inventory(self):
+ t = self._transport
+ t.copy('inventory.kndx', 'inventory.backup.kndx')
+ t.copy('inventory.knit', 'inventory.backup.knit')
+
+ def _move_file_id(self, from_id, to_id):
+ t = self._transport.clone('knits')
+ from_rel_url = self.texts._index._mapper.map((from_id, None))
+ to_rel_url = self.texts._index._mapper.map((to_id, None))
+ # We expect both files to always exist in this case.
+ for suffix in ('.knit', '.kndx'):
+ t.rename(from_rel_url + suffix, to_rel_url + suffix)
+
+ def _remove_file_id(self, file_id):
+ t = self._transport.clone('knits')
+ rel_url = self.texts._index._mapper.map((file_id, None))
+ for suffix in ('.kndx', '.knit'):
+ try:
+ t.delete(rel_url + suffix)
+ except errors.NoSuchFile:
+ pass
+
+ def _temp_inventories(self):
+ result = self._format._get_inventories(self._transport, self,
+ 'inventory.new')
+ # Reconciling when the output has no revisions would result in no
+ # writes - but we want to ensure there is an inventory for
+ # compatibility with older clients that don't lazy-load.
+ result.get_parent_map([('A',)])
+ return result
+
+ @needs_read_lock
+ def get_revision(self, revision_id):
+ """Return the Revision object for a named revision"""
+ revision_id = osutils.safe_revision_id(revision_id)
+ return self.get_revision_reconcile(revision_id)
+
+ def _refresh_data(self):
+ if not self.is_locked():
+ return
+ if self.is_in_write_group():
+ raise IsInWriteGroupError(self)
+ # Create a new transaction to force all knits to see the scope change.
+ # This is safe because we're outside a write group.
+ self.control_files._finish_transaction()
+ if self.is_write_locked():
+ self.control_files._set_write_transaction()
+ else:
+ self.control_files._set_read_transaction()
+
+ @needs_write_lock
+ def reconcile(self, other=None, thorough=False):
+ """Reconcile this repository."""
+ from bzrlib.reconcile import KnitReconciler
+ reconciler = KnitReconciler(self, thorough=thorough)
+ reconciler.reconcile()
+ return reconciler
+
+ def _make_parents_provider(self):
+ return _KnitsParentsProvider(self.revisions)
+
+
+class RepositoryFormatKnit(MetaDirVersionedFileRepositoryFormat):
+ """Bzr repository knit format (generalized).
+
+ This repository format has:
+ - knits for file texts and inventory
+ - hash subdirectory based stores.
+ - knits for revisions and signatures
+ - TextStores for revisions and signatures.
+ - a format marker of its own
+ - an optional 'shared-storage' flag
+ - an optional 'no-working-trees' flag
+ - a LockDir lock
+ """
+
+ # Set this attribute in derived classes to control the repository class
+ # created by open and initialize.
+ repository_class = None
+ # Set this attribute in derived classes to control the
+ # _commit_builder_class that the repository objects will have passed to
+ # their constructor.
+ _commit_builder_class = None
+ # Set this attribute in derived clases to control the _serializer that the
+ # repository objects will have passed to their constructor.
+ @property
+ def _serializer(self):
+ return xml5.serializer_v5
+ # Knit based repositories handle ghosts reasonably well.
+ supports_ghosts = True
+ # External lookups are not supported in this format.
+ supports_external_lookups = False
+ # No CHK support.
+ supports_chks = False
+ _fetch_order = 'topological'
+ _fetch_uses_deltas = True
+ fast_deltas = False
+ supports_funky_characters = True
+ # The revision.kndx could potentially claim a revision has a different
+ # parent to the revision text.
+ revision_graph_can_have_wrong_parents = True
+
+ def _get_inventories(self, repo_transport, repo, name='inventory'):
+ mapper = versionedfile.ConstantMapper(name)
+ index = _mod_knit._KndxIndex(repo_transport, mapper,
+ repo.get_transaction, repo.is_write_locked, repo.is_locked)
+ access = _mod_knit._KnitKeyAccess(repo_transport, mapper)
+ return _mod_knit.KnitVersionedFiles(index, access, annotated=False)
+
+ def _get_revisions(self, repo_transport, repo):
+ mapper = versionedfile.ConstantMapper('revisions')
+ index = _mod_knit._KndxIndex(repo_transport, mapper,
+ repo.get_transaction, repo.is_write_locked, repo.is_locked)
+ access = _mod_knit._KnitKeyAccess(repo_transport, mapper)
+ return _mod_knit.KnitVersionedFiles(index, access, max_delta_chain=0,
+ annotated=False)
+
+ def _get_signatures(self, repo_transport, repo):
+ mapper = versionedfile.ConstantMapper('signatures')
+ index = _mod_knit._KndxIndex(repo_transport, mapper,
+ repo.get_transaction, repo.is_write_locked, repo.is_locked)
+ access = _mod_knit._KnitKeyAccess(repo_transport, mapper)
+ return _mod_knit.KnitVersionedFiles(index, access, max_delta_chain=0,
+ annotated=False)
+
+ def _get_texts(self, repo_transport, repo):
+ mapper = versionedfile.HashEscapedPrefixMapper()
+ base_transport = repo_transport.clone('knits')
+ index = _mod_knit._KndxIndex(base_transport, mapper,
+ repo.get_transaction, repo.is_write_locked, repo.is_locked)
+ access = _mod_knit._KnitKeyAccess(base_transport, mapper)
+ return _mod_knit.KnitVersionedFiles(index, access, max_delta_chain=200,
+ annotated=True)
+
+ def initialize(self, a_bzrdir, shared=False):
+ """Create a knit format 1 repository.
+
+ :param a_bzrdir: bzrdir to contain the new repository; must already
+ be initialized.
+ :param shared: If true the repository will be initialized as a shared
+ repository.
+ """
+ trace.mutter('creating repository in %s.', a_bzrdir.transport.base)
+ dirs = ['knits']
+ files = []
+ utf8_files = [('format', self.get_format_string())]
+
+ self._upload_blank_content(a_bzrdir, dirs, files, utf8_files, shared)
+ repo_transport = a_bzrdir.get_repository_transport(None)
+ control_files = lockable_files.LockableFiles(repo_transport,
+ 'lock', lockdir.LockDir)
+ transaction = transactions.WriteTransaction()
+ result = self.open(a_bzrdir=a_bzrdir, _found=True)
+ result.lock_write()
+ # the revision id here is irrelevant: it will not be stored, and cannot
+ # already exist, we do this to create files on disk for older clients.
+ result.inventories.get_parent_map([('A',)])
+ result.revisions.get_parent_map([('A',)])
+ result.signatures.get_parent_map([('A',)])
+ result.unlock()
+ self._run_post_repo_init_hooks(result, a_bzrdir, shared)
+ return result
+
+ def open(self, a_bzrdir, _found=False, _override_transport=None):
+ """See RepositoryFormat.open().
+
+ :param _override_transport: INTERNAL USE ONLY. Allows opening the
+ repository at a slightly different url
+ than normal. I.e. during 'upgrade'.
+ """
+ if not _found:
+ format = RepositoryFormatMetaDir.find_format(a_bzrdir)
+ if _override_transport is not None:
+ repo_transport = _override_transport
+ else:
+ repo_transport = a_bzrdir.get_repository_transport(None)
+ control_files = lockable_files.LockableFiles(repo_transport,
+ 'lock', lockdir.LockDir)
+ repo = self.repository_class(_format=self,
+ a_bzrdir=a_bzrdir,
+ control_files=control_files,
+ _commit_builder_class=self._commit_builder_class,
+ _serializer=self._serializer)
+ repo.revisions = self._get_revisions(repo_transport, repo)
+ repo.signatures = self._get_signatures(repo_transport, repo)
+ repo.inventories = self._get_inventories(repo_transport, repo)
+ repo.texts = self._get_texts(repo_transport, repo)
+ repo.chk_bytes = None
+ repo._transport = repo_transport
+ return repo
+
+
+class RepositoryFormatKnit1(RepositoryFormatKnit):
+ """Bzr repository knit format 1.
+
+ This repository format has:
+ - knits for file texts and inventory
+ - hash subdirectory based stores.
+ - knits for revisions and signatures
+ - TextStores for revisions and signatures.
+ - a format marker of its own
+ - an optional 'shared-storage' flag
+ - an optional 'no-working-trees' flag
+ - a LockDir lock
+
+ This format was introduced in bzr 0.8.
+ """
+
+ repository_class = KnitRepository
+ _commit_builder_class = VersionedFileCommitBuilder
+ @property
+ def _serializer(self):
+ return xml5.serializer_v5
+
+ def __ne__(self, other):
+ return self.__class__ is not other.__class__
+
+ @classmethod
+ def get_format_string(cls):
+ """See RepositoryFormat.get_format_string()."""
+ return "Bazaar-NG Knit Repository Format 1"
+
+ def get_format_description(self):
+ """See RepositoryFormat.get_format_description()."""
+ return "Knit repository format 1"
+
+
+class RepositoryFormatKnit3(RepositoryFormatKnit):
+ """Bzr repository knit format 3.
+
+ This repository format has:
+ - knits for file texts and inventory
+ - hash subdirectory based stores.
+ - knits for revisions and signatures
+ - TextStores for revisions and signatures.
+ - a format marker of its own
+ - an optional 'shared-storage' flag
+ - an optional 'no-working-trees' flag
+ - a LockDir lock
+ - support for recording full info about the tree root
+ - support for recording tree-references
+ """
+
+ repository_class = KnitRepository
+ _commit_builder_class = VersionedFileRootCommitBuilder
+ rich_root_data = True
+ experimental = True
+ supports_tree_reference = True
+ @property
+ def _serializer(self):
+ return xml7.serializer_v7
+
+ def _get_matching_bzrdir(self):
+ return controldir.format_registry.make_bzrdir('dirstate-with-subtree')
+
+ def _ignore_setting_bzrdir(self, format):
+ pass
+
+ _matchingbzrdir = property(_get_matching_bzrdir, _ignore_setting_bzrdir)
+
+ @classmethod
+ def get_format_string(cls):
+ """See RepositoryFormat.get_format_string()."""
+ return "Bazaar Knit Repository Format 3 (bzr 0.15)\n"
+
+ def get_format_description(self):
+ """See RepositoryFormat.get_format_description()."""
+ return "Knit repository format 3"
+
+
+class RepositoryFormatKnit4(RepositoryFormatKnit):
+ """Bzr repository knit format 4.
+
+ This repository format has everything in format 3, except for
+ tree-references:
+ - knits for file texts and inventory
+ - hash subdirectory based stores.
+ - knits for revisions and signatures
+ - TextStores for revisions and signatures.
+ - a format marker of its own
+ - an optional 'shared-storage' flag
+ - an optional 'no-working-trees' flag
+ - a LockDir lock
+ - support for recording full info about the tree root
+ """
+
+ repository_class = KnitRepository
+ _commit_builder_class = VersionedFileRootCommitBuilder
+ rich_root_data = True
+ supports_tree_reference = False
+ @property
+ def _serializer(self):
+ return xml6.serializer_v6
+
+ def _get_matching_bzrdir(self):
+ return controldir.format_registry.make_bzrdir('rich-root')
+
+ def _ignore_setting_bzrdir(self, format):
+ pass
+
+ _matchingbzrdir = property(_get_matching_bzrdir, _ignore_setting_bzrdir)
+
+ @classmethod
+ def get_format_string(cls):
+ """See RepositoryFormat.get_format_string()."""
+ return 'Bazaar Knit Repository Format 4 (bzr 1.0)\n'
+
+ def get_format_description(self):
+ """See RepositoryFormat.get_format_description()."""
+ return "Knit repository format 4"
+
+
+class InterKnitRepo(InterSameDataRepository):
+ """Optimised code paths between Knit based repositories."""
+
+ @classmethod
+ def _get_repo_format_to_test(self):
+ return RepositoryFormatKnit1()
+
+ @staticmethod
+ def is_compatible(source, target):
+ """Be compatible with known Knit formats.
+
+ We don't test for the stores being of specific types because that
+ could lead to confusing results, and there is no need to be
+ overly general.
+ """
+ try:
+ are_knits = (isinstance(source._format, RepositoryFormatKnit) and
+ isinstance(target._format, RepositoryFormatKnit))
+ except AttributeError:
+ return False
+ return are_knits and InterRepository._same_model(source, target)
+
+ @needs_read_lock
+ def search_missing_revision_ids(self,
+ find_ghosts=True, revision_ids=None, if_present_ids=None,
+ limit=None):
+ """See InterRepository.search_missing_revision_ids()."""
+ source_ids_set = self._present_source_revisions_for(
+ revision_ids, if_present_ids)
+ # source_ids is the worst possible case we may need to pull.
+ # now we want to filter source_ids against what we actually
+ # have in target, but don't try to check for existence where we know
+ # we do not have a revision as that would be pointless.
+ target_ids = set(self.target.all_revision_ids())
+ possibly_present_revisions = target_ids.intersection(source_ids_set)
+ actually_present_revisions = set(
+ self.target._eliminate_revisions_not_present(possibly_present_revisions))
+ required_revisions = source_ids_set.difference(actually_present_revisions)
+ if revision_ids is not None:
+ # we used get_ancestry to determine source_ids then we are assured all
+ # revisions referenced are present as they are installed in topological order.
+ # and the tip revision was validated by get_ancestry.
+ result_set = required_revisions
+ else:
+ # if we just grabbed the possibly available ids, then
+ # we only have an estimate of whats available and need to validate
+ # that against the revision records.
+ result_set = set(
+ self.source._eliminate_revisions_not_present(required_revisions))
+ if limit is not None:
+ topo_ordered = self.source.get_graph().iter_topo_order(result_set)
+ result_set = set(itertools.islice(topo_ordered, limit))
+ return self.source.revision_ids_to_search_result(result_set)
+
+
+InterRepository.register_optimiser(InterKnitRepo)
diff --git a/bzrlib/repofmt/pack_repo.py b/bzrlib/repofmt/pack_repo.py
new file mode 100644
index 0000000..d513d95
--- /dev/null
+++ b/bzrlib/repofmt/pack_repo.py
@@ -0,0 +1,2091 @@
+# Copyright (C) 2007-2011 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+from __future__ import absolute_import
+
+import re
+import sys
+
+from bzrlib.lazy_import import lazy_import
+lazy_import(globals(), """
+from itertools import izip
+import time
+
+from bzrlib import (
+ chk_map,
+ cleanup,
+ config,
+ debug,
+ graph,
+ osutils,
+ pack,
+ transactions,
+ tsort,
+ ui,
+ )
+from bzrlib.index import (
+ CombinedGraphIndex,
+ GraphIndexPrefixAdapter,
+ )
+""")
+from bzrlib import (
+ btree_index,
+ errors,
+ lockable_files,
+ lockdir,
+ )
+
+from bzrlib.decorators import (
+ needs_read_lock,
+ needs_write_lock,
+ only_raises,
+ )
+from bzrlib.lock import LogicalLockResult
+from bzrlib.repository import (
+ _LazyListJoin,
+ MetaDirRepository,
+ RepositoryFormatMetaDir,
+ RepositoryWriteLockResult,
+ )
+from bzrlib.vf_repository import (
+ MetaDirVersionedFileRepository,
+ MetaDirVersionedFileRepositoryFormat,
+ VersionedFileCommitBuilder,
+ VersionedFileRootCommitBuilder,
+ )
+from bzrlib.trace import (
+ mutter,
+ note,
+ warning,
+ )
+
+
+class PackCommitBuilder(VersionedFileCommitBuilder):
+ """Subclass of VersionedFileCommitBuilder to add texts with pack semantics.
+
+ Specifically this uses one knit object rather than one knit object per
+ added text, reducing memory and object pressure.
+ """
+
+ def __init__(self, repository, parents, config, timestamp=None,
+ timezone=None, committer=None, revprops=None,
+ revision_id=None, lossy=False):
+ VersionedFileCommitBuilder.__init__(self, repository, parents, config,
+ timestamp=timestamp, timezone=timezone, committer=committer,
+ revprops=revprops, revision_id=revision_id, lossy=lossy)
+ self._file_graph = graph.Graph(
+ repository._pack_collection.text_index.combined_index)
+
+ def _heads(self, file_id, revision_ids):
+ keys = [(file_id, revision_id) for revision_id in revision_ids]
+ return set([key[1] for key in self._file_graph.heads(keys)])
+
+
+class PackRootCommitBuilder(VersionedFileRootCommitBuilder):
+ """A subclass of RootCommitBuilder to add texts with pack semantics.
+
+ Specifically this uses one knit object rather than one knit object per
+ added text, reducing memory and object pressure.
+ """
+
+ def __init__(self, repository, parents, config, timestamp=None,
+ timezone=None, committer=None, revprops=None,
+ revision_id=None, lossy=False):
+ super(PackRootCommitBuilder, self).__init__(repository, parents,
+ config, timestamp=timestamp, timezone=timezone,
+ committer=committer, revprops=revprops, revision_id=revision_id,
+ lossy=lossy)
+ self._file_graph = graph.Graph(
+ repository._pack_collection.text_index.combined_index)
+
+ def _heads(self, file_id, revision_ids):
+ keys = [(file_id, revision_id) for revision_id in revision_ids]
+ return set([key[1] for key in self._file_graph.heads(keys)])
+
+
+class Pack(object):
+ """An in memory proxy for a pack and its indices.
+
+ This is a base class that is not directly used, instead the classes
+ ExistingPack and NewPack are used.
+ """
+
+ # A map of index 'type' to the file extension and position in the
+ # index_sizes array.
+ index_definitions = {
+ 'chk': ('.cix', 4),
+ 'revision': ('.rix', 0),
+ 'inventory': ('.iix', 1),
+ 'text': ('.tix', 2),
+ 'signature': ('.six', 3),
+ }
+
+ def __init__(self, revision_index, inventory_index, text_index,
+ signature_index, chk_index=None):
+ """Create a pack instance.
+
+ :param revision_index: A GraphIndex for determining what revisions are
+ present in the Pack and accessing the locations of their texts.
+ :param inventory_index: A GraphIndex for determining what inventories are
+ present in the Pack and accessing the locations of their
+ texts/deltas.
+ :param text_index: A GraphIndex for determining what file texts
+ are present in the pack and accessing the locations of their
+ texts/deltas (via (fileid, revisionid) tuples).
+ :param signature_index: A GraphIndex for determining what signatures are
+ present in the Pack and accessing the locations of their texts.
+ :param chk_index: A GraphIndex for accessing content by CHK, if the
+ pack has one.
+ """
+ self.revision_index = revision_index
+ self.inventory_index = inventory_index
+ self.text_index = text_index
+ self.signature_index = signature_index
+ self.chk_index = chk_index
+
+ def access_tuple(self):
+ """Return a tuple (transport, name) for the pack content."""
+ return self.pack_transport, self.file_name()
+
+ def _check_references(self):
+ """Make sure our external references are present.
+
+ Packs are allowed to have deltas whose base is not in the pack, but it
+ must be present somewhere in this collection. It is not allowed to
+ have deltas based on a fallback repository.
+ (See <https://bugs.launchpad.net/bzr/+bug/288751>)
+ """
+ missing_items = {}
+ for (index_name, external_refs, index) in [
+ ('texts',
+ self._get_external_refs(self.text_index),
+ self._pack_collection.text_index.combined_index),
+ ('inventories',
+ self._get_external_refs(self.inventory_index),
+ self._pack_collection.inventory_index.combined_index),
+ ]:
+ missing = external_refs.difference(
+ k for (idx, k, v, r) in
+ index.iter_entries(external_refs))
+ if missing:
+ missing_items[index_name] = sorted(list(missing))
+ if missing_items:
+ from pprint import pformat
+ raise errors.BzrCheckError(
+ "Newly created pack file %r has delta references to "
+ "items not in its repository:\n%s"
+ % (self, pformat(missing_items)))
+
+ def file_name(self):
+ """Get the file name for the pack on disk."""
+ return self.name + '.pack'
+
+ def get_revision_count(self):
+ return self.revision_index.key_count()
+
+ def index_name(self, index_type, name):
+ """Get the disk name of an index type for pack name 'name'."""
+ return name + Pack.index_definitions[index_type][0]
+
+ def index_offset(self, index_type):
+ """Get the position in a index_size array for a given index type."""
+ return Pack.index_definitions[index_type][1]
+
+ def inventory_index_name(self, name):
+ """The inv index is the name + .iix."""
+ return self.index_name('inventory', name)
+
+ def revision_index_name(self, name):
+ """The revision index is the name + .rix."""
+ return self.index_name('revision', name)
+
+ def signature_index_name(self, name):
+ """The signature index is the name + .six."""
+ return self.index_name('signature', name)
+
+ def text_index_name(self, name):
+ """The text index is the name + .tix."""
+ return self.index_name('text', name)
+
+ def _replace_index_with_readonly(self, index_type):
+ unlimited_cache = False
+ if index_type == 'chk':
+ unlimited_cache = True
+ index = self.index_class(self.index_transport,
+ self.index_name(index_type, self.name),
+ self.index_sizes[self.index_offset(index_type)],
+ unlimited_cache=unlimited_cache)
+ if index_type == 'chk':
+ index._leaf_factory = btree_index._gcchk_factory
+ setattr(self, index_type + '_index', index)
+
+
+class ExistingPack(Pack):
+ """An in memory proxy for an existing .pack and its disk indices."""
+
+ def __init__(self, pack_transport, name, revision_index, inventory_index,
+ text_index, signature_index, chk_index=None):
+ """Create an ExistingPack object.
+
+ :param pack_transport: The transport where the pack file resides.
+ :param name: The name of the pack on disk in the pack_transport.
+ """
+ Pack.__init__(self, revision_index, inventory_index, text_index,
+ signature_index, chk_index)
+ self.name = name
+ self.pack_transport = pack_transport
+ if None in (revision_index, inventory_index, text_index,
+ signature_index, name, pack_transport):
+ raise AssertionError()
+
+ def __eq__(self, other):
+ return self.__dict__ == other.__dict__
+
+ def __ne__(self, other):
+ return not self.__eq__(other)
+
+ def __repr__(self):
+ return "<%s.%s object at 0x%x, %s, %s" % (
+ self.__class__.__module__, self.__class__.__name__, id(self),
+ self.pack_transport, self.name)
+
+
+class ResumedPack(ExistingPack):
+
+ def __init__(self, name, revision_index, inventory_index, text_index,
+ signature_index, upload_transport, pack_transport, index_transport,
+ pack_collection, chk_index=None):
+ """Create a ResumedPack object."""
+ ExistingPack.__init__(self, pack_transport, name, revision_index,
+ inventory_index, text_index, signature_index,
+ chk_index=chk_index)
+ self.upload_transport = upload_transport
+ self.index_transport = index_transport
+ self.index_sizes = [None, None, None, None]
+ indices = [
+ ('revision', revision_index),
+ ('inventory', inventory_index),
+ ('text', text_index),
+ ('signature', signature_index),
+ ]
+ if chk_index is not None:
+ indices.append(('chk', chk_index))
+ self.index_sizes.append(None)
+ for index_type, index in indices:
+ offset = self.index_offset(index_type)
+ self.index_sizes[offset] = index._size
+ self.index_class = pack_collection._index_class
+ self._pack_collection = pack_collection
+ self._state = 'resumed'
+ # XXX: perhaps check that the .pack file exists?
+
+ def access_tuple(self):
+ if self._state == 'finished':
+ return Pack.access_tuple(self)
+ elif self._state == 'resumed':
+ return self.upload_transport, self.file_name()
+ else:
+ raise AssertionError(self._state)
+
+ def abort(self):
+ self.upload_transport.delete(self.file_name())
+ indices = [self.revision_index, self.inventory_index, self.text_index,
+ self.signature_index]
+ if self.chk_index is not None:
+ indices.append(self.chk_index)
+ for index in indices:
+ index._transport.delete(index._name)
+
+ def finish(self):
+ self._check_references()
+ index_types = ['revision', 'inventory', 'text', 'signature']
+ if self.chk_index is not None:
+ index_types.append('chk')
+ for index_type in index_types:
+ old_name = self.index_name(index_type, self.name)
+ new_name = '../indices/' + old_name
+ self.upload_transport.move(old_name, new_name)
+ self._replace_index_with_readonly(index_type)
+ new_name = '../packs/' + self.file_name()
+ self.upload_transport.move(self.file_name(), new_name)
+ self._state = 'finished'
+
+ def _get_external_refs(self, index):
+ """Return compression parents for this index that are not present.
+
+ This returns any compression parents that are referenced by this index,
+ which are not contained *in* this index. They may be present elsewhere.
+ """
+ return index.external_references(1)
+
+
+class NewPack(Pack):
+ """An in memory proxy for a pack which is being created."""
+
+ def __init__(self, pack_collection, upload_suffix='', file_mode=None):
+ """Create a NewPack instance.
+
+ :param pack_collection: A PackCollection into which this is being inserted.
+ :param upload_suffix: An optional suffix to be given to any temporary
+ files created during the pack creation. e.g '.autopack'
+ :param file_mode: Unix permissions for newly created file.
+ """
+ # The relative locations of the packs are constrained, but all are
+ # passed in because the caller has them, so as to avoid object churn.
+ index_builder_class = pack_collection._index_builder_class
+ if pack_collection.chk_index is not None:
+ chk_index = index_builder_class(reference_lists=0)
+ else:
+ chk_index = None
+ Pack.__init__(self,
+ # Revisions: parents list, no text compression.
+ index_builder_class(reference_lists=1),
+ # Inventory: We want to map compression only, but currently the
+ # knit code hasn't been updated enough to understand that, so we
+ # have a regular 2-list index giving parents and compression
+ # source.
+ index_builder_class(reference_lists=2),
+ # Texts: compression and per file graph, for all fileids - so two
+ # reference lists and two elements in the key tuple.
+ index_builder_class(reference_lists=2, key_elements=2),
+ # Signatures: Just blobs to store, no compression, no parents
+ # listing.
+ index_builder_class(reference_lists=0),
+ # CHK based storage - just blobs, no compression or parents.
+ chk_index=chk_index
+ )
+ self._pack_collection = pack_collection
+ # When we make readonly indices, we need this.
+ self.index_class = pack_collection._index_class
+ # where should the new pack be opened
+ self.upload_transport = pack_collection._upload_transport
+ # where are indices written out to
+ self.index_transport = pack_collection._index_transport
+ # where is the pack renamed to when it is finished?
+ self.pack_transport = pack_collection._pack_transport
+ # What file mode to upload the pack and indices with.
+ self._file_mode = file_mode
+ # tracks the content written to the .pack file.
+ self._hash = osutils.md5()
+ # a tuple with the length in bytes of the indices, once the pack
+ # is finalised. (rev, inv, text, sigs, chk_if_in_use)
+ self.index_sizes = None
+ # How much data to cache when writing packs. Note that this is not
+ # synchronised with reads, because it's not in the transport layer, so
+ # is not safe unless the client knows it won't be reading from the pack
+ # under creation.
+ self._cache_limit = 0
+ # the temporary pack file name.
+ self.random_name = osutils.rand_chars(20) + upload_suffix
+ # when was this pack started ?
+ self.start_time = time.time()
+ # open an output stream for the data added to the pack.
+ self.write_stream = self.upload_transport.open_write_stream(
+ self.random_name, mode=self._file_mode)
+ if 'pack' in debug.debug_flags:
+ mutter('%s: create_pack: pack stream open: %s%s t+%6.3fs',
+ time.ctime(), self.upload_transport.base, self.random_name,
+ time.time() - self.start_time)
+ # A list of byte sequences to be written to the new pack, and the
+ # aggregate size of them. Stored as a list rather than separate
+ # variables so that the _write_data closure below can update them.
+ self._buffer = [[], 0]
+ # create a callable for adding data
+ #
+ # robertc says- this is a closure rather than a method on the object
+ # so that the variables are locals, and faster than accessing object
+ # members.
+ def _write_data(bytes, flush=False, _buffer=self._buffer,
+ _write=self.write_stream.write, _update=self._hash.update):
+ _buffer[0].append(bytes)
+ _buffer[1] += len(bytes)
+ # buffer cap
+ if _buffer[1] > self._cache_limit or flush:
+ bytes = ''.join(_buffer[0])
+ _write(bytes)
+ _update(bytes)
+ _buffer[:] = [[], 0]
+ # expose this on self, for the occasion when clients want to add data.
+ self._write_data = _write_data
+ # a pack writer object to serialise pack records.
+ self._writer = pack.ContainerWriter(self._write_data)
+ self._writer.begin()
+ # what state is the pack in? (open, finished, aborted)
+ self._state = 'open'
+ # no name until we finish writing the content
+ self.name = None
+
+ def abort(self):
+ """Cancel creating this pack."""
+ self._state = 'aborted'
+ self.write_stream.close()
+ # Remove the temporary pack file.
+ self.upload_transport.delete(self.random_name)
+ # The indices have no state on disk.
+
+ def access_tuple(self):
+ """Return a tuple (transport, name) for the pack content."""
+ if self._state == 'finished':
+ return Pack.access_tuple(self)
+ elif self._state == 'open':
+ return self.upload_transport, self.random_name
+ else:
+ raise AssertionError(self._state)
+
+ def data_inserted(self):
+ """True if data has been added to this pack."""
+ return bool(self.get_revision_count() or
+ self.inventory_index.key_count() or
+ self.text_index.key_count() or
+ self.signature_index.key_count() or
+ (self.chk_index is not None and self.chk_index.key_count()))
+
+ def finish_content(self):
+ if self.name is not None:
+ return
+ self._writer.end()
+ if self._buffer[1]:
+ self._write_data('', flush=True)
+ self.name = self._hash.hexdigest()
+
+ def finish(self, suspend=False):
+ """Finish the new pack.
+
+ This:
+ - finalises the content
+ - assigns a name (the md5 of the content, currently)
+ - writes out the associated indices
+ - renames the pack into place.
+ - stores the index size tuple for the pack in the index_sizes
+ attribute.
+ """
+ self.finish_content()
+ if not suspend:
+ self._check_references()
+ # write indices
+ # XXX: It'd be better to write them all to temporary names, then
+ # rename them all into place, so that the window when only some are
+ # visible is smaller. On the other hand none will be seen until
+ # they're in the names list.
+ self.index_sizes = [None, None, None, None]
+ self._write_index('revision', self.revision_index, 'revision',
+ suspend)
+ self._write_index('inventory', self.inventory_index, 'inventory',
+ suspend)
+ self._write_index('text', self.text_index, 'file texts', suspend)
+ self._write_index('signature', self.signature_index,
+ 'revision signatures', suspend)
+ if self.chk_index is not None:
+ self.index_sizes.append(None)
+ self._write_index('chk', self.chk_index,
+ 'content hash bytes', suspend)
+ self.write_stream.close(
+ want_fdatasync=self._pack_collection.config_stack.get('repository.fdatasync'))
+ # Note that this will clobber an existing pack with the same name,
+ # without checking for hash collisions. While this is undesirable this
+ # is something that can be rectified in a subsequent release. One way
+ # to rectify it may be to leave the pack at the original name, writing
+ # its pack-names entry as something like 'HASH: index-sizes
+ # temporary-name'. Allocate that and check for collisions, if it is
+ # collision free then rename it into place. If clients know this scheme
+ # they can handle missing-file errors by:
+ # - try for HASH.pack
+ # - try for temporary-name
+ # - refresh the pack-list to see if the pack is now absent
+ new_name = self.name + '.pack'
+ if not suspend:
+ new_name = '../packs/' + new_name
+ self.upload_transport.move(self.random_name, new_name)
+ self._state = 'finished'
+ if 'pack' in debug.debug_flags:
+ # XXX: size might be interesting?
+ mutter('%s: create_pack: pack finished: %s%s->%s t+%6.3fs',
+ time.ctime(), self.upload_transport.base, self.random_name,
+ new_name, time.time() - self.start_time)
+
+ def flush(self):
+ """Flush any current data."""
+ if self._buffer[1]:
+ bytes = ''.join(self._buffer[0])
+ self.write_stream.write(bytes)
+ self._hash.update(bytes)
+ self._buffer[:] = [[], 0]
+
+ def _get_external_refs(self, index):
+ return index._external_references()
+
+ def set_write_cache_size(self, size):
+ self._cache_limit = size
+
+ def _write_index(self, index_type, index, label, suspend=False):
+ """Write out an index.
+
+ :param index_type: The type of index to write - e.g. 'revision'.
+ :param index: The index object to serialise.
+ :param label: What label to give the index e.g. 'revision'.
+ """
+ index_name = self.index_name(index_type, self.name)
+ if suspend:
+ transport = self.upload_transport
+ else:
+ transport = self.index_transport
+ index_tempfile = index.finish()
+ index_bytes = index_tempfile.read()
+ write_stream = transport.open_write_stream(index_name,
+ mode=self._file_mode)
+ write_stream.write(index_bytes)
+ write_stream.close(
+ want_fdatasync=self._pack_collection.config_stack.get('repository.fdatasync'))
+ self.index_sizes[self.index_offset(index_type)] = len(index_bytes)
+ if 'pack' in debug.debug_flags:
+ # XXX: size might be interesting?
+ mutter('%s: create_pack: wrote %s index: %s%s t+%6.3fs',
+ time.ctime(), label, self.upload_transport.base,
+ self.random_name, time.time() - self.start_time)
+ # Replace the writable index on this object with a readonly,
+ # presently unloaded index. We should alter
+ # the index layer to make its finish() error if add_node is
+ # subsequently used. RBC
+ self._replace_index_with_readonly(index_type)
+
+
+class AggregateIndex(object):
+ """An aggregated index for the RepositoryPackCollection.
+
+ AggregateIndex is reponsible for managing the PackAccess object,
+ Index-To-Pack mapping, and all indices list for a specific type of index
+ such as 'revision index'.
+
+ A CombinedIndex provides an index on a single key space built up
+ from several on-disk indices. The AggregateIndex builds on this
+ to provide a knit access layer, and allows having up to one writable
+ index within the collection.
+ """
+ # XXX: Probably 'can be written to' could/should be separated from 'acts
+ # like a knit index' -- mbp 20071024
+
+ def __init__(self, reload_func=None, flush_func=None):
+ """Create an AggregateIndex.
+
+ :param reload_func: A function to call if we find we are missing an
+ index. Should have the form reload_func() => True if the list of
+ active pack files has changed.
+ """
+ self._reload_func = reload_func
+ self.index_to_pack = {}
+ self.combined_index = CombinedGraphIndex([], reload_func=reload_func)
+ self.data_access = _DirectPackAccess(self.index_to_pack,
+ reload_func=reload_func,
+ flush_func=flush_func)
+ self.add_callback = None
+
+ def add_index(self, index, pack):
+ """Add index to the aggregate, which is an index for Pack pack.
+
+ Future searches on the aggregate index will seach this new index
+ before all previously inserted indices.
+
+ :param index: An Index for the pack.
+ :param pack: A Pack instance.
+ """
+ # expose it to the index map
+ self.index_to_pack[index] = pack.access_tuple()
+ # put it at the front of the linear index list
+ self.combined_index.insert_index(0, index, pack.name)
+
+ def add_writable_index(self, index, pack):
+ """Add an index which is able to have data added to it.
+
+ There can be at most one writable index at any time. Any
+ modifications made to the knit are put into this index.
+
+ :param index: An index from the pack parameter.
+ :param pack: A Pack instance.
+ """
+ if self.add_callback is not None:
+ raise AssertionError(
+ "%s already has a writable index through %s" % \
+ (self, self.add_callback))
+ # allow writing: queue writes to a new index
+ self.add_index(index, pack)
+ # Updates the index to packs mapping as a side effect,
+ self.data_access.set_writer(pack._writer, index, pack.access_tuple())
+ self.add_callback = index.add_nodes
+
+ def clear(self):
+ """Reset all the aggregate data to nothing."""
+ self.data_access.set_writer(None, None, (None, None))
+ self.index_to_pack.clear()
+ del self.combined_index._indices[:]
+ del self.combined_index._index_names[:]
+ self.add_callback = None
+
+ def remove_index(self, index):
+ """Remove index from the indices used to answer queries.
+
+ :param index: An index from the pack parameter.
+ """
+ del self.index_to_pack[index]
+ pos = self.combined_index._indices.index(index)
+ del self.combined_index._indices[pos]
+ del self.combined_index._index_names[pos]
+ if (self.add_callback is not None and
+ getattr(index, 'add_nodes', None) == self.add_callback):
+ self.add_callback = None
+ self.data_access.set_writer(None, None, (None, None))
+
+
+class Packer(object):
+ """Create a pack from packs."""
+
+ def __init__(self, pack_collection, packs, suffix, revision_ids=None,
+ reload_func=None):
+ """Create a Packer.
+
+ :param pack_collection: A RepositoryPackCollection object where the
+ new pack is being written to.
+ :param packs: The packs to combine.
+ :param suffix: The suffix to use on the temporary files for the pack.
+ :param revision_ids: Revision ids to limit the pack to.
+ :param reload_func: A function to call if a pack file/index goes
+ missing. The side effect of calling this function should be to
+ update self.packs. See also AggregateIndex
+ """
+ self.packs = packs
+ self.suffix = suffix
+ self.revision_ids = revision_ids
+ # The pack object we are creating.
+ self.new_pack = None
+ self._pack_collection = pack_collection
+ self._reload_func = reload_func
+ # The index layer keys for the revisions being copied. None for 'all
+ # objects'.
+ self._revision_keys = None
+ # What text keys to copy. None for 'all texts'. This is set by
+ # _copy_inventory_texts
+ self._text_filter = None
+
+ def pack(self, pb=None):
+ """Create a new pack by reading data from other packs.
+
+ This does little more than a bulk copy of data. One key difference
+ is that data with the same item key across multiple packs is elided
+ from the output. The new pack is written into the current pack store
+ along with its indices, and the name added to the pack names. The
+ source packs are not altered and are not required to be in the current
+ pack collection.
+
+ :param pb: An optional progress bar to use. A nested bar is created if
+ this is None.
+ :return: A Pack object, or None if nothing was copied.
+ """
+ # open a pack - using the same name as the last temporary file
+ # - which has already been flushed, so it's safe.
+ # XXX: - duplicate code warning with start_write_group; fix before
+ # considering 'done'.
+ if self._pack_collection._new_pack is not None:
+ raise errors.BzrError('call to %s.pack() while another pack is'
+ ' being written.'
+ % (self.__class__.__name__,))
+ if self.revision_ids is not None:
+ if len(self.revision_ids) == 0:
+ # silly fetch request.
+ return None
+ else:
+ self.revision_ids = frozenset(self.revision_ids)
+ self.revision_keys = frozenset((revid,) for revid in
+ self.revision_ids)
+ if pb is None:
+ self.pb = ui.ui_factory.nested_progress_bar()
+ else:
+ self.pb = pb
+ try:
+ return self._create_pack_from_packs()
+ finally:
+ if pb is None:
+ self.pb.finished()
+
+ def open_pack(self):
+ """Open a pack for the pack we are creating."""
+ new_pack = self._pack_collection.pack_factory(self._pack_collection,
+ upload_suffix=self.suffix,
+ file_mode=self._pack_collection.repo.bzrdir._get_file_mode())
+ # We know that we will process all nodes in order, and don't need to
+ # query, so don't combine any indices spilled to disk until we are done
+ new_pack.revision_index.set_optimize(combine_backing_indices=False)
+ new_pack.inventory_index.set_optimize(combine_backing_indices=False)
+ new_pack.text_index.set_optimize(combine_backing_indices=False)
+ new_pack.signature_index.set_optimize(combine_backing_indices=False)
+ return new_pack
+
+ def _copy_revision_texts(self):
+ """Copy revision data to the new pack."""
+ raise NotImplementedError(self._copy_revision_texts)
+
+ def _copy_inventory_texts(self):
+ """Copy the inventory texts to the new pack.
+
+ self._revision_keys is used to determine what inventories to copy.
+
+ Sets self._text_filter appropriately.
+ """
+ raise NotImplementedError(self._copy_inventory_texts)
+
+ def _copy_text_texts(self):
+ raise NotImplementedError(self._copy_text_texts)
+
+ def _create_pack_from_packs(self):
+ raise NotImplementedError(self._create_pack_from_packs)
+
+ def _log_copied_texts(self):
+ if 'pack' in debug.debug_flags:
+ mutter('%s: create_pack: file texts copied: %s%s %d items t+%6.3fs',
+ time.ctime(), self._pack_collection._upload_transport.base,
+ self.new_pack.random_name,
+ self.new_pack.text_index.key_count(),
+ time.time() - self.new_pack.start_time)
+
+ def _use_pack(self, new_pack):
+ """Return True if new_pack should be used.
+
+ :param new_pack: The pack that has just been created.
+ :return: True if the pack should be used.
+ """
+ return new_pack.data_inserted()
+
+
+class RepositoryPackCollection(object):
+ """Management of packs within a repository.
+
+ :ivar _names: map of {pack_name: (index_size,)}
+ """
+
+ pack_factory = None
+ resumed_pack_factory = None
+ normal_packer_class = None
+ optimising_packer_class = None
+
+ def __init__(self, repo, transport, index_transport, upload_transport,
+ pack_transport, index_builder_class, index_class,
+ use_chk_index):
+ """Create a new RepositoryPackCollection.
+
+ :param transport: Addresses the repository base directory
+ (typically .bzr/repository/).
+ :param index_transport: Addresses the directory containing indices.
+ :param upload_transport: Addresses the directory into which packs are written
+ while they're being created.
+ :param pack_transport: Addresses the directory of existing complete packs.
+ :param index_builder_class: The index builder class to use.
+ :param index_class: The index class to use.
+ :param use_chk_index: Whether to setup and manage a CHK index.
+ """
+ # XXX: This should call self.reset()
+ self.repo = repo
+ self.transport = transport
+ self._index_transport = index_transport
+ self._upload_transport = upload_transport
+ self._pack_transport = pack_transport
+ self._index_builder_class = index_builder_class
+ self._index_class = index_class
+ self._suffix_offsets = {'.rix': 0, '.iix': 1, '.tix': 2, '.six': 3,
+ '.cix': 4}
+ self.packs = []
+ # name:Pack mapping
+ self._names = None
+ self._packs_by_name = {}
+ # the previous pack-names content
+ self._packs_at_load = None
+ # when a pack is being created by this object, the state of that pack.
+ self._new_pack = None
+ # aggregated revision index data
+ flush = self._flush_new_pack
+ self.revision_index = AggregateIndex(self.reload_pack_names, flush)
+ self.inventory_index = AggregateIndex(self.reload_pack_names, flush)
+ self.text_index = AggregateIndex(self.reload_pack_names, flush)
+ self.signature_index = AggregateIndex(self.reload_pack_names, flush)
+ all_indices = [self.revision_index, self.inventory_index,
+ self.text_index, self.signature_index]
+ if use_chk_index:
+ self.chk_index = AggregateIndex(self.reload_pack_names, flush)
+ all_indices.append(self.chk_index)
+ else:
+ # used to determine if we're using a chk_index elsewhere.
+ self.chk_index = None
+ # Tell all the CombinedGraphIndex objects about each other, so they can
+ # share hints about which pack names to search first.
+ all_combined = [agg_idx.combined_index for agg_idx in all_indices]
+ for combined_idx in all_combined:
+ combined_idx.set_sibling_indices(
+ set(all_combined).difference([combined_idx]))
+ # resumed packs
+ self._resumed_packs = []
+ self.config_stack = config.LocationStack(self.transport.base)
+
+ def __repr__(self):
+ return '%s(%r)' % (self.__class__.__name__, self.repo)
+
+ def add_pack_to_memory(self, pack):
+ """Make a Pack object available to the repository to satisfy queries.
+
+ :param pack: A Pack object.
+ """
+ if pack.name in self._packs_by_name:
+ raise AssertionError(
+ 'pack %s already in _packs_by_name' % (pack.name,))
+ self.packs.append(pack)
+ self._packs_by_name[pack.name] = pack
+ self.revision_index.add_index(pack.revision_index, pack)
+ self.inventory_index.add_index(pack.inventory_index, pack)
+ self.text_index.add_index(pack.text_index, pack)
+ self.signature_index.add_index(pack.signature_index, pack)
+ if self.chk_index is not None:
+ self.chk_index.add_index(pack.chk_index, pack)
+
+ def all_packs(self):
+ """Return a list of all the Pack objects this repository has.
+
+ Note that an in-progress pack being created is not returned.
+
+ :return: A list of Pack objects for all the packs in the repository.
+ """
+ result = []
+ for name in self.names():
+ result.append(self.get_pack_by_name(name))
+ return result
+
+ def autopack(self):
+ """Pack the pack collection incrementally.
+
+ This will not attempt global reorganisation or recompression,
+ rather it will just ensure that the total number of packs does
+ not grow without bound. It uses the _max_pack_count method to
+ determine if autopacking is needed, and the pack_distribution
+ method to determine the number of revisions in each pack.
+
+ If autopacking takes place then the packs name collection will have
+ been flushed to disk - packing requires updating the name collection
+ in synchronisation with certain steps. Otherwise the names collection
+ is not flushed.
+
+ :return: Something evaluating true if packing took place.
+ """
+ while True:
+ try:
+ return self._do_autopack()
+ except errors.RetryAutopack:
+ # If we get a RetryAutopack exception, we should abort the
+ # current action, and retry.
+ pass
+
+ def _do_autopack(self):
+ # XXX: Should not be needed when the management of indices is sane.
+ total_revisions = self.revision_index.combined_index.key_count()
+ total_packs = len(self._names)
+ if self._max_pack_count(total_revisions) >= total_packs:
+ return None
+ # determine which packs need changing
+ pack_distribution = self.pack_distribution(total_revisions)
+ existing_packs = []
+ for pack in self.all_packs():
+ revision_count = pack.get_revision_count()
+ if revision_count == 0:
+ # revision less packs are not generated by normal operation,
+ # only by operations like sign-my-commits, and thus will not
+ # tend to grow rapdily or without bound like commit containing
+ # packs do - leave them alone as packing them really should
+ # group their data with the relevant commit, and that may
+ # involve rewriting ancient history - which autopack tries to
+ # avoid. Alternatively we could not group the data but treat
+ # each of these as having a single revision, and thus add
+ # one revision for each to the total revision count, to get
+ # a matching distribution.
+ continue
+ existing_packs.append((revision_count, pack))
+ pack_operations = self.plan_autopack_combinations(
+ existing_packs, pack_distribution)
+ num_new_packs = len(pack_operations)
+ num_old_packs = sum([len(po[1]) for po in pack_operations])
+ num_revs_affected = sum([po[0] for po in pack_operations])
+ mutter('Auto-packing repository %s, which has %d pack files, '
+ 'containing %d revisions. Packing %d files into %d affecting %d'
+ ' revisions', self, total_packs, total_revisions, num_old_packs,
+ num_new_packs, num_revs_affected)
+ result = self._execute_pack_operations(pack_operations, packer_class=self.normal_packer_class,
+ reload_func=self._restart_autopack)
+ mutter('Auto-packing repository %s completed', self)
+ return result
+
+ def _execute_pack_operations(self, pack_operations, packer_class,
+ reload_func=None):
+ """Execute a series of pack operations.
+
+ :param pack_operations: A list of [revision_count, packs_to_combine].
+ :param packer_class: The class of packer to use
+ :return: The new pack names.
+ """
+ for revision_count, packs in pack_operations:
+ # we may have no-ops from the setup logic
+ if len(packs) == 0:
+ continue
+ packer = packer_class(self, packs, '.autopack',
+ reload_func=reload_func)
+ try:
+ result = packer.pack()
+ except errors.RetryWithNewPacks:
+ # An exception is propagating out of this context, make sure
+ # this packer has cleaned up. Packer() doesn't set its new_pack
+ # state into the RepositoryPackCollection object, so we only
+ # have access to it directly here.
+ if packer.new_pack is not None:
+ packer.new_pack.abort()
+ raise
+ if result is None:
+ return
+ for pack in packs:
+ self._remove_pack_from_memory(pack)
+ # record the newly available packs and stop advertising the old
+ # packs
+ to_be_obsoleted = []
+ for _, packs in pack_operations:
+ to_be_obsoleted.extend(packs)
+ result = self._save_pack_names(clear_obsolete_packs=True,
+ obsolete_packs=to_be_obsoleted)
+ return result
+
+ def _flush_new_pack(self):
+ if self._new_pack is not None:
+ self._new_pack.flush()
+
+ def lock_names(self):
+ """Acquire the mutex around the pack-names index.
+
+ This cannot be used in the middle of a read-only transaction on the
+ repository.
+ """
+ self.repo.control_files.lock_write()
+
+ def _already_packed(self):
+ """Is the collection already packed?"""
+ return not (self.repo._format.pack_compresses or (len(self._names) > 1))
+
+ def pack(self, hint=None, clean_obsolete_packs=False):
+ """Pack the pack collection totally."""
+ self.ensure_loaded()
+ total_packs = len(self._names)
+ if self._already_packed():
+ return
+ total_revisions = self.revision_index.combined_index.key_count()
+ # XXX: the following may want to be a class, to pack with a given
+ # policy.
+ mutter('Packing repository %s, which has %d pack files, '
+ 'containing %d revisions with hint %r.', self, total_packs,
+ total_revisions, hint)
+ while True:
+ try:
+ self._try_pack_operations(hint)
+ except RetryPackOperations:
+ continue
+ break
+
+ if clean_obsolete_packs:
+ self._clear_obsolete_packs()
+
+ def _try_pack_operations(self, hint):
+ """Calculate the pack operations based on the hint (if any), and
+ execute them.
+ """
+ # determine which packs need changing
+ pack_operations = [[0, []]]
+ for pack in self.all_packs():
+ if hint is None or pack.name in hint:
+ # Either no hint was provided (so we are packing everything),
+ # or this pack was included in the hint.
+ pack_operations[-1][0] += pack.get_revision_count()
+ pack_operations[-1][1].append(pack)
+ self._execute_pack_operations(pack_operations,
+ packer_class=self.optimising_packer_class,
+ reload_func=self._restart_pack_operations)
+
+ def plan_autopack_combinations(self, existing_packs, pack_distribution):
+ """Plan a pack operation.
+
+ :param existing_packs: The packs to pack. (A list of (revcount, Pack)
+ tuples).
+ :param pack_distribution: A list with the number of revisions desired
+ in each pack.
+ """
+ if len(existing_packs) <= len(pack_distribution):
+ return []
+ existing_packs.sort(reverse=True)
+ pack_operations = [[0, []]]
+ # plan out what packs to keep, and what to reorganise
+ while len(existing_packs):
+ # take the largest pack, and if it's less than the head of the
+ # distribution chart we will include its contents in the new pack
+ # for that position. If it's larger, we remove its size from the
+ # distribution chart
+ next_pack_rev_count, next_pack = existing_packs.pop(0)
+ if next_pack_rev_count >= pack_distribution[0]:
+ # this is already packed 'better' than this, so we can
+ # not waste time packing it.
+ while next_pack_rev_count > 0:
+ next_pack_rev_count -= pack_distribution[0]
+ if next_pack_rev_count >= 0:
+ # more to go
+ del pack_distribution[0]
+ else:
+ # didn't use that entire bucket up
+ pack_distribution[0] = -next_pack_rev_count
+ else:
+ # add the revisions we're going to add to the next output pack
+ pack_operations[-1][0] += next_pack_rev_count
+ # allocate this pack to the next pack sub operation
+ pack_operations[-1][1].append(next_pack)
+ if pack_operations[-1][0] >= pack_distribution[0]:
+ # this pack is used up, shift left.
+ del pack_distribution[0]
+ pack_operations.append([0, []])
+ # Now that we know which pack files we want to move, shove them all
+ # into a single pack file.
+ final_rev_count = 0
+ final_pack_list = []
+ for num_revs, pack_files in pack_operations:
+ final_rev_count += num_revs
+ final_pack_list.extend(pack_files)
+ if len(final_pack_list) == 1:
+ raise AssertionError('We somehow generated an autopack with a'
+ ' single pack file being moved.')
+ return []
+ return [[final_rev_count, final_pack_list]]
+
+ def ensure_loaded(self):
+ """Ensure we have read names from disk.
+
+ :return: True if the disk names had not been previously read.
+ """
+ # NB: if you see an assertion error here, it's probably access against
+ # an unlocked repo. Naughty.
+ if not self.repo.is_locked():
+ raise errors.ObjectNotLocked(self.repo)
+ if self._names is None:
+ self._names = {}
+ self._packs_at_load = set()
+ for index, key, value in self._iter_disk_pack_index():
+ name = key[0]
+ self._names[name] = self._parse_index_sizes(value)
+ self._packs_at_load.add((key, value))
+ result = True
+ else:
+ result = False
+ # populate all the metadata.
+ self.all_packs()
+ return result
+
+ def _parse_index_sizes(self, value):
+ """Parse a string of index sizes."""
+ return tuple([int(digits) for digits in value.split(' ')])
+
+ def get_pack_by_name(self, name):
+ """Get a Pack object by name.
+
+ :param name: The name of the pack - e.g. '123456'
+ :return: A Pack object.
+ """
+ try:
+ return self._packs_by_name[name]
+ except KeyError:
+ rev_index = self._make_index(name, '.rix')
+ inv_index = self._make_index(name, '.iix')
+ txt_index = self._make_index(name, '.tix')
+ sig_index = self._make_index(name, '.six')
+ if self.chk_index is not None:
+ chk_index = self._make_index(name, '.cix', is_chk=True)
+ else:
+ chk_index = None
+ result = ExistingPack(self._pack_transport, name, rev_index,
+ inv_index, txt_index, sig_index, chk_index)
+ self.add_pack_to_memory(result)
+ return result
+
+ def _resume_pack(self, name):
+ """Get a suspended Pack object by name.
+
+ :param name: The name of the pack - e.g. '123456'
+ :return: A Pack object.
+ """
+ if not re.match('[a-f0-9]{32}', name):
+ # Tokens should be md5sums of the suspended pack file, i.e. 32 hex
+ # digits.
+ raise errors.UnresumableWriteGroup(
+ self.repo, [name], 'Malformed write group token')
+ try:
+ rev_index = self._make_index(name, '.rix', resume=True)
+ inv_index = self._make_index(name, '.iix', resume=True)
+ txt_index = self._make_index(name, '.tix', resume=True)
+ sig_index = self._make_index(name, '.six', resume=True)
+ if self.chk_index is not None:
+ chk_index = self._make_index(name, '.cix', resume=True,
+ is_chk=True)
+ else:
+ chk_index = None
+ result = self.resumed_pack_factory(name, rev_index, inv_index,
+ txt_index, sig_index, self._upload_transport,
+ self._pack_transport, self._index_transport, self,
+ chk_index=chk_index)
+ except errors.NoSuchFile, e:
+ raise errors.UnresumableWriteGroup(self.repo, [name], str(e))
+ self.add_pack_to_memory(result)
+ self._resumed_packs.append(result)
+ return result
+
+ def allocate(self, a_new_pack):
+ """Allocate name in the list of packs.
+
+ :param a_new_pack: A NewPack instance to be added to the collection of
+ packs for this repository.
+ """
+ self.ensure_loaded()
+ if a_new_pack.name in self._names:
+ raise errors.BzrError(
+ 'Pack %r already exists in %s' % (a_new_pack.name, self))
+ self._names[a_new_pack.name] = tuple(a_new_pack.index_sizes)
+ self.add_pack_to_memory(a_new_pack)
+
+ def _iter_disk_pack_index(self):
+ """Iterate over the contents of the pack-names index.
+
+ This is used when loading the list from disk, and before writing to
+ detect updates from others during our write operation.
+ :return: An iterator of the index contents.
+ """
+ return self._index_class(self.transport, 'pack-names', None
+ ).iter_all_entries()
+
+ def _make_index(self, name, suffix, resume=False, is_chk=False):
+ size_offset = self._suffix_offsets[suffix]
+ index_name = name + suffix
+ if resume:
+ transport = self._upload_transport
+ index_size = transport.stat(index_name).st_size
+ else:
+ transport = self._index_transport
+ index_size = self._names[name][size_offset]
+ index = self._index_class(transport, index_name, index_size,
+ unlimited_cache=is_chk)
+ if is_chk and self._index_class is btree_index.BTreeGraphIndex:
+ index._leaf_factory = btree_index._gcchk_factory
+ return index
+
+ def _max_pack_count(self, total_revisions):
+ """Return the maximum number of packs to use for total revisions.
+
+ :param total_revisions: The total number of revisions in the
+ repository.
+ """
+ if not total_revisions:
+ return 1
+ digits = str(total_revisions)
+ result = 0
+ for digit in digits:
+ result += int(digit)
+ return result
+
+ def names(self):
+ """Provide an order to the underlying names."""
+ return sorted(self._names.keys())
+
+ def _obsolete_packs(self, packs):
+ """Move a number of packs which have been obsoleted out of the way.
+
+ Each pack and its associated indices are moved out of the way.
+
+ Note: for correctness this function should only be called after a new
+ pack names index has been written without these pack names, and with
+ the names of packs that contain the data previously available via these
+ packs.
+
+ :param packs: The packs to obsolete.
+ :param return: None.
+ """
+ for pack in packs:
+ try:
+ try:
+ pack.pack_transport.move(pack.file_name(),
+ '../obsolete_packs/' + pack.file_name())
+ except errors.NoSuchFile:
+ # perhaps obsolete_packs was removed? Let's create it and
+ # try again
+ try:
+ pack.pack_transport.mkdir('../obsolete_packs/')
+ except errors.FileExists:
+ pass
+ pack.pack_transport.move(pack.file_name(),
+ '../obsolete_packs/' + pack.file_name())
+ except (errors.PathError, errors.TransportError), e:
+ # TODO: Should these be warnings or mutters?
+ mutter("couldn't rename obsolete pack, skipping it:\n%s"
+ % (e,))
+ # TODO: Probably needs to know all possible indices for this pack
+ # - or maybe list the directory and move all indices matching this
+ # name whether we recognize it or not?
+ suffixes = ['.iix', '.six', '.tix', '.rix']
+ if self.chk_index is not None:
+ suffixes.append('.cix')
+ for suffix in suffixes:
+ try:
+ self._index_transport.move(pack.name + suffix,
+ '../obsolete_packs/' + pack.name + suffix)
+ except (errors.PathError, errors.TransportError), e:
+ mutter("couldn't rename obsolete index, skipping it:\n%s"
+ % (e,))
+
+ def pack_distribution(self, total_revisions):
+ """Generate a list of the number of revisions to put in each pack.
+
+ :param total_revisions: The total number of revisions in the
+ repository.
+ """
+ if total_revisions == 0:
+ return [0]
+ digits = reversed(str(total_revisions))
+ result = []
+ for exponent, count in enumerate(digits):
+ size = 10 ** exponent
+ for pos in range(int(count)):
+ result.append(size)
+ return list(reversed(result))
+
+ def _pack_tuple(self, name):
+ """Return a tuple with the transport and file name for a pack name."""
+ return self._pack_transport, name + '.pack'
+
+ def _remove_pack_from_memory(self, pack):
+ """Remove pack from the packs accessed by this repository.
+
+ Only affects memory state, until self._save_pack_names() is invoked.
+ """
+ self._names.pop(pack.name)
+ self._packs_by_name.pop(pack.name)
+ self._remove_pack_indices(pack)
+ self.packs.remove(pack)
+
+ def _remove_pack_indices(self, pack, ignore_missing=False):
+ """Remove the indices for pack from the aggregated indices.
+
+ :param ignore_missing: Suppress KeyErrors from calling remove_index.
+ """
+ for index_type in Pack.index_definitions.keys():
+ attr_name = index_type + '_index'
+ aggregate_index = getattr(self, attr_name)
+ if aggregate_index is not None:
+ pack_index = getattr(pack, attr_name)
+ try:
+ aggregate_index.remove_index(pack_index)
+ except KeyError:
+ if ignore_missing:
+ continue
+ raise
+
+ def reset(self):
+ """Clear all cached data."""
+ # cached revision data
+ self.revision_index.clear()
+ # cached signature data
+ self.signature_index.clear()
+ # cached file text data
+ self.text_index.clear()
+ # cached inventory data
+ self.inventory_index.clear()
+ # cached chk data
+ if self.chk_index is not None:
+ self.chk_index.clear()
+ # remove the open pack
+ self._new_pack = None
+ # information about packs.
+ self._names = None
+ self.packs = []
+ self._packs_by_name = {}
+ self._packs_at_load = None
+
+ def _unlock_names(self):
+ """Release the mutex around the pack-names index."""
+ self.repo.control_files.unlock()
+
+ def _diff_pack_names(self):
+ """Read the pack names from disk, and compare it to the one in memory.
+
+ :return: (disk_nodes, deleted_nodes, new_nodes)
+ disk_nodes The final set of nodes that should be referenced
+ deleted_nodes Nodes which have been removed from when we started
+ new_nodes Nodes that are newly introduced
+ """
+ # load the disk nodes across
+ disk_nodes = set()
+ for index, key, value in self._iter_disk_pack_index():
+ disk_nodes.add((key, value))
+ orig_disk_nodes = set(disk_nodes)
+
+ # do a two-way diff against our original content
+ current_nodes = set()
+ for name, sizes in self._names.iteritems():
+ current_nodes.add(
+ ((name, ), ' '.join(str(size) for size in sizes)))
+
+ # Packs no longer present in the repository, which were present when we
+ # locked the repository
+ deleted_nodes = self._packs_at_load - current_nodes
+ # Packs which this process is adding
+ new_nodes = current_nodes - self._packs_at_load
+
+ # Update the disk_nodes set to include the ones we are adding, and
+ # remove the ones which were removed by someone else
+ disk_nodes.difference_update(deleted_nodes)
+ disk_nodes.update(new_nodes)
+
+ return disk_nodes, deleted_nodes, new_nodes, orig_disk_nodes
+
+ def _syncronize_pack_names_from_disk_nodes(self, disk_nodes):
+ """Given the correct set of pack files, update our saved info.
+
+ :return: (removed, added, modified)
+ removed pack names removed from self._names
+ added pack names added to self._names
+ modified pack names that had changed value
+ """
+ removed = []
+ added = []
+ modified = []
+ ## self._packs_at_load = disk_nodes
+ new_names = dict(disk_nodes)
+ # drop no longer present nodes
+ for pack in self.all_packs():
+ if (pack.name,) not in new_names:
+ removed.append(pack.name)
+ self._remove_pack_from_memory(pack)
+ # add new nodes/refresh existing ones
+ for key, value in disk_nodes:
+ name = key[0]
+ sizes = self._parse_index_sizes(value)
+ if name in self._names:
+ # existing
+ if sizes != self._names[name]:
+ # the pack for name has had its indices replaced - rare but
+ # important to handle. XXX: probably can never happen today
+ # because the three-way merge code above does not handle it
+ # - you may end up adding the same key twice to the new
+ # disk index because the set values are the same, unless
+ # the only index shows up as deleted by the set difference
+ # - which it may. Until there is a specific test for this,
+ # assume it's broken. RBC 20071017.
+ self._remove_pack_from_memory(self.get_pack_by_name(name))
+ self._names[name] = sizes
+ self.get_pack_by_name(name)
+ modified.append(name)
+ else:
+ # new
+ self._names[name] = sizes
+ self.get_pack_by_name(name)
+ added.append(name)
+ return removed, added, modified
+
+ def _save_pack_names(self, clear_obsolete_packs=False, obsolete_packs=None):
+ """Save the list of packs.
+
+ This will take out the mutex around the pack names list for the
+ duration of the method call. If concurrent updates have been made, a
+ three-way merge between the current list and the current in memory list
+ is performed.
+
+ :param clear_obsolete_packs: If True, clear out the contents of the
+ obsolete_packs directory.
+ :param obsolete_packs: Packs that are obsolete once the new pack-names
+ file has been written.
+ :return: A list of the names saved that were not previously on disk.
+ """
+ already_obsolete = []
+ self.lock_names()
+ try:
+ builder = self._index_builder_class()
+ (disk_nodes, deleted_nodes, new_nodes,
+ orig_disk_nodes) = self._diff_pack_names()
+ # TODO: handle same-name, index-size-changes here -
+ # e.g. use the value from disk, not ours, *unless* we're the one
+ # changing it.
+ for key, value in disk_nodes:
+ builder.add_node(key, value)
+ self.transport.put_file('pack-names', builder.finish(),
+ mode=self.repo.bzrdir._get_file_mode())
+ self._packs_at_load = disk_nodes
+ if clear_obsolete_packs:
+ to_preserve = None
+ if obsolete_packs:
+ to_preserve = set([o.name for o in obsolete_packs])
+ already_obsolete = self._clear_obsolete_packs(to_preserve)
+ finally:
+ self._unlock_names()
+ # synchronise the memory packs list with what we just wrote:
+ self._syncronize_pack_names_from_disk_nodes(disk_nodes)
+ if obsolete_packs:
+ # TODO: We could add one more condition here. "if o.name not in
+ # orig_disk_nodes and o != the new_pack we haven't written to
+ # disk yet. However, the new pack object is not easily
+ # accessible here (it would have to be passed through the
+ # autopacking code, etc.)
+ obsolete_packs = [o for o in obsolete_packs
+ if o.name not in already_obsolete]
+ self._obsolete_packs(obsolete_packs)
+ return [new_node[0][0] for new_node in new_nodes]
+
+ def reload_pack_names(self):
+ """Sync our pack listing with what is present in the repository.
+
+ This should be called when we find out that something we thought was
+ present is now missing. This happens when another process re-packs the
+ repository, etc.
+
+ :return: True if the in-memory list of packs has been altered at all.
+ """
+ # The ensure_loaded call is to handle the case where the first call
+ # made involving the collection was to reload_pack_names, where we
+ # don't have a view of disk contents. It's a bit of a bandaid, and
+ # causes two reads of pack-names, but it's a rare corner case not
+ # struck with regular push/pull etc.
+ first_read = self.ensure_loaded()
+ if first_read:
+ return True
+ # out the new value.
+ (disk_nodes, deleted_nodes, new_nodes,
+ orig_disk_nodes) = self._diff_pack_names()
+ # _packs_at_load is meant to be the explicit list of names in
+ # 'pack-names' at then start. As such, it should not contain any
+ # pending names that haven't been written out yet.
+ self._packs_at_load = orig_disk_nodes
+ (removed, added,
+ modified) = self._syncronize_pack_names_from_disk_nodes(disk_nodes)
+ if removed or added or modified:
+ return True
+ return False
+
+ def _restart_autopack(self):
+ """Reload the pack names list, and restart the autopack code."""
+ if not self.reload_pack_names():
+ # Re-raise the original exception, because something went missing
+ # and a restart didn't find it
+ raise
+ raise errors.RetryAutopack(self.repo, False, sys.exc_info())
+
+ def _restart_pack_operations(self):
+ """Reload the pack names list, and restart the autopack code."""
+ if not self.reload_pack_names():
+ # Re-raise the original exception, because something went missing
+ # and a restart didn't find it
+ raise
+ raise RetryPackOperations(self.repo, False, sys.exc_info())
+
+ def _clear_obsolete_packs(self, preserve=None):
+ """Delete everything from the obsolete-packs directory.
+
+ :return: A list of pack identifiers (the filename without '.pack') that
+ were found in obsolete_packs.
+ """
+ found = []
+ obsolete_pack_transport = self.transport.clone('obsolete_packs')
+ if preserve is None:
+ preserve = set()
+ try:
+ obsolete_pack_files = obsolete_pack_transport.list_dir('.')
+ except errors.NoSuchFile:
+ return found
+ for filename in obsolete_pack_files:
+ name, ext = osutils.splitext(filename)
+ if ext == '.pack':
+ found.append(name)
+ if name in preserve:
+ continue
+ try:
+ obsolete_pack_transport.delete(filename)
+ except (errors.PathError, errors.TransportError), e:
+ warning("couldn't delete obsolete pack, skipping it:\n%s"
+ % (e,))
+ return found
+
+ def _start_write_group(self):
+ # Do not permit preparation for writing if we're not in a 'write lock'.
+ if not self.repo.is_write_locked():
+ raise errors.NotWriteLocked(self)
+ self._new_pack = self.pack_factory(self, upload_suffix='.pack',
+ file_mode=self.repo.bzrdir._get_file_mode())
+ # allow writing: queue writes to a new index
+ self.revision_index.add_writable_index(self._new_pack.revision_index,
+ self._new_pack)
+ self.inventory_index.add_writable_index(self._new_pack.inventory_index,
+ self._new_pack)
+ self.text_index.add_writable_index(self._new_pack.text_index,
+ self._new_pack)
+ self._new_pack.text_index.set_optimize(combine_backing_indices=False)
+ self.signature_index.add_writable_index(self._new_pack.signature_index,
+ self._new_pack)
+ if self.chk_index is not None:
+ self.chk_index.add_writable_index(self._new_pack.chk_index,
+ self._new_pack)
+ self.repo.chk_bytes._index._add_callback = self.chk_index.add_callback
+ self._new_pack.chk_index.set_optimize(combine_backing_indices=False)
+
+ self.repo.inventories._index._add_callback = self.inventory_index.add_callback
+ self.repo.revisions._index._add_callback = self.revision_index.add_callback
+ self.repo.signatures._index._add_callback = self.signature_index.add_callback
+ self.repo.texts._index._add_callback = self.text_index.add_callback
+
+ def _abort_write_group(self):
+ # FIXME: just drop the transient index.
+ # forget what names there are
+ if self._new_pack is not None:
+ operation = cleanup.OperationWithCleanups(self._new_pack.abort)
+ operation.add_cleanup(setattr, self, '_new_pack', None)
+ # If we aborted while in the middle of finishing the write
+ # group, _remove_pack_indices could fail because the indexes are
+ # already gone. But they're not there we shouldn't fail in this
+ # case, so we pass ignore_missing=True.
+ operation.add_cleanup(self._remove_pack_indices, self._new_pack,
+ ignore_missing=True)
+ operation.run_simple()
+ for resumed_pack in self._resumed_packs:
+ operation = cleanup.OperationWithCleanups(resumed_pack.abort)
+ # See comment in previous finally block.
+ operation.add_cleanup(self._remove_pack_indices, resumed_pack,
+ ignore_missing=True)
+ operation.run_simple()
+ del self._resumed_packs[:]
+
+ def _remove_resumed_pack_indices(self):
+ for resumed_pack in self._resumed_packs:
+ self._remove_pack_indices(resumed_pack)
+ del self._resumed_packs[:]
+
+ def _check_new_inventories(self):
+ """Detect missing inventories in this write group.
+
+ :returns: list of strs, summarising any problems found. If the list is
+ empty no problems were found.
+ """
+ # The base implementation does no checks. GCRepositoryPackCollection
+ # overrides this.
+ return []
+
+ def _commit_write_group(self):
+ all_missing = set()
+ for prefix, versioned_file in (
+ ('revisions', self.repo.revisions),
+ ('inventories', self.repo.inventories),
+ ('texts', self.repo.texts),
+ ('signatures', self.repo.signatures),
+ ):
+ missing = versioned_file.get_missing_compression_parent_keys()
+ all_missing.update([(prefix,) + key for key in missing])
+ if all_missing:
+ raise errors.BzrCheckError(
+ "Repository %s has missing compression parent(s) %r "
+ % (self.repo, sorted(all_missing)))
+ problems = self._check_new_inventories()
+ if problems:
+ problems_summary = '\n'.join(problems)
+ raise errors.BzrCheckError(
+ "Cannot add revision(s) to repository: " + problems_summary)
+ self._remove_pack_indices(self._new_pack)
+ any_new_content = False
+ if self._new_pack.data_inserted():
+ # get all the data to disk and read to use
+ self._new_pack.finish()
+ self.allocate(self._new_pack)
+ self._new_pack = None
+ any_new_content = True
+ else:
+ self._new_pack.abort()
+ self._new_pack = None
+ for resumed_pack in self._resumed_packs:
+ # XXX: this is a pretty ugly way to turn the resumed pack into a
+ # properly committed pack.
+ self._names[resumed_pack.name] = None
+ self._remove_pack_from_memory(resumed_pack)
+ resumed_pack.finish()
+ self.allocate(resumed_pack)
+ any_new_content = True
+ del self._resumed_packs[:]
+ if any_new_content:
+ result = self.autopack()
+ if not result:
+ # when autopack takes no steps, the names list is still
+ # unsaved.
+ return self._save_pack_names()
+ return result
+ return []
+
+ def _suspend_write_group(self):
+ tokens = [pack.name for pack in self._resumed_packs]
+ self._remove_pack_indices(self._new_pack)
+ if self._new_pack.data_inserted():
+ # get all the data to disk and read to use
+ self._new_pack.finish(suspend=True)
+ tokens.append(self._new_pack.name)
+ self._new_pack = None
+ else:
+ self._new_pack.abort()
+ self._new_pack = None
+ self._remove_resumed_pack_indices()
+ return tokens
+
+ def _resume_write_group(self, tokens):
+ for token in tokens:
+ self._resume_pack(token)
+
+
+class PackRepository(MetaDirVersionedFileRepository):
+ """Repository with knit objects stored inside pack containers.
+
+ The layering for a KnitPackRepository is:
+
+ Graph | HPSS | Repository public layer |
+ ===================================================
+ Tuple based apis below, string based, and key based apis above
+ ---------------------------------------------------
+ VersionedFiles
+ Provides .texts, .revisions etc
+ This adapts the N-tuple keys to physical knit records which only have a
+ single string identifier (for historical reasons), which in older formats
+ was always the revision_id, and in the mapped code for packs is always
+ the last element of key tuples.
+ ---------------------------------------------------
+ GraphIndex
+ A separate GraphIndex is used for each of the
+ texts/inventories/revisions/signatures contained within each individual
+ pack file. The GraphIndex layer works in N-tuples and is unaware of any
+ semantic value.
+ ===================================================
+
+ """
+
+ # These attributes are inherited from the Repository base class. Setting
+ # them to None ensures that if the constructor is changed to not initialize
+ # them, or a subclass fails to call the constructor, that an error will
+ # occur rather than the system working but generating incorrect data.
+ _commit_builder_class = None
+ _serializer = None
+
+ def __init__(self, _format, a_bzrdir, control_files, _commit_builder_class,
+ _serializer):
+ MetaDirRepository.__init__(self, _format, a_bzrdir, control_files)
+ self._commit_builder_class = _commit_builder_class
+ self._serializer = _serializer
+ self._reconcile_fixes_text_parents = True
+ if self._format.supports_external_lookups:
+ self._unstacked_provider = graph.CachingParentsProvider(
+ self._make_parents_provider_unstacked())
+ else:
+ self._unstacked_provider = graph.CachingParentsProvider(self)
+ self._unstacked_provider.disable_cache()
+
+ @needs_read_lock
+ def _all_revision_ids(self):
+ """See Repository.all_revision_ids()."""
+ return [key[0] for key in self.revisions.keys()]
+
+ def _abort_write_group(self):
+ self.revisions._index._key_dependencies.clear()
+ self._pack_collection._abort_write_group()
+
+ def _make_parents_provider(self):
+ if not self._format.supports_external_lookups:
+ return self._unstacked_provider
+ return graph.StackedParentsProvider(_LazyListJoin(
+ [self._unstacked_provider], self._fallback_repositories))
+
+ def _refresh_data(self):
+ if not self.is_locked():
+ return
+ self._pack_collection.reload_pack_names()
+ self._unstacked_provider.disable_cache()
+ self._unstacked_provider.enable_cache()
+
+ def _start_write_group(self):
+ self._pack_collection._start_write_group()
+
+ def _commit_write_group(self):
+ hint = self._pack_collection._commit_write_group()
+ self.revisions._index._key_dependencies.clear()
+ # The commit may have added keys that were previously cached as
+ # missing, so reset the cache.
+ self._unstacked_provider.disable_cache()
+ self._unstacked_provider.enable_cache()
+ return hint
+
+ def suspend_write_group(self):
+ # XXX check self._write_group is self.get_transaction()?
+ tokens = self._pack_collection._suspend_write_group()
+ self.revisions._index._key_dependencies.clear()
+ self._write_group = None
+ return tokens
+
+ def _resume_write_group(self, tokens):
+ self._start_write_group()
+ try:
+ self._pack_collection._resume_write_group(tokens)
+ except errors.UnresumableWriteGroup:
+ self._abort_write_group()
+ raise
+ for pack in self._pack_collection._resumed_packs:
+ self.revisions._index.scan_unvalidated_index(pack.revision_index)
+
+ def get_transaction(self):
+ if self._write_lock_count:
+ return self._transaction
+ else:
+ return self.control_files.get_transaction()
+
+ def is_locked(self):
+ return self._write_lock_count or self.control_files.is_locked()
+
+ def is_write_locked(self):
+ return self._write_lock_count
+
+ def lock_write(self, token=None):
+ """Lock the repository for writes.
+
+ :return: A bzrlib.repository.RepositoryWriteLockResult.
+ """
+ locked = self.is_locked()
+ if not self._write_lock_count and locked:
+ raise errors.ReadOnlyError(self)
+ self._write_lock_count += 1
+ if self._write_lock_count == 1:
+ self._transaction = transactions.WriteTransaction()
+ if not locked:
+ if 'relock' in debug.debug_flags and self._prev_lock == 'w':
+ note('%r was write locked again', self)
+ self._prev_lock = 'w'
+ self._unstacked_provider.enable_cache()
+ for repo in self._fallback_repositories:
+ # Writes don't affect fallback repos
+ repo.lock_read()
+ self._refresh_data()
+ return RepositoryWriteLockResult(self.unlock, None)
+
+ def lock_read(self):
+ """Lock the repository for reads.
+
+ :return: A bzrlib.lock.LogicalLockResult.
+ """
+ locked = self.is_locked()
+ if self._write_lock_count:
+ self._write_lock_count += 1
+ else:
+ self.control_files.lock_read()
+ if not locked:
+ if 'relock' in debug.debug_flags and self._prev_lock == 'r':
+ note('%r was read locked again', self)
+ self._prev_lock = 'r'
+ self._unstacked_provider.enable_cache()
+ for repo in self._fallback_repositories:
+ repo.lock_read()
+ self._refresh_data()
+ return LogicalLockResult(self.unlock)
+
+ def leave_lock_in_place(self):
+ # not supported - raise an error
+ raise NotImplementedError(self.leave_lock_in_place)
+
+ def dont_leave_lock_in_place(self):
+ # not supported - raise an error
+ raise NotImplementedError(self.dont_leave_lock_in_place)
+
+ @needs_write_lock
+ def pack(self, hint=None, clean_obsolete_packs=False):
+ """Compress the data within the repository.
+
+ This will pack all the data to a single pack. In future it may
+ recompress deltas or do other such expensive operations.
+ """
+ self._pack_collection.pack(hint=hint, clean_obsolete_packs=clean_obsolete_packs)
+
+ @needs_write_lock
+ def reconcile(self, other=None, thorough=False):
+ """Reconcile this repository."""
+ from bzrlib.reconcile import PackReconciler
+ reconciler = PackReconciler(self, thorough=thorough)
+ reconciler.reconcile()
+ return reconciler
+
+ def _reconcile_pack(self, collection, packs, extension, revs, pb):
+ raise NotImplementedError(self._reconcile_pack)
+
+ @only_raises(errors.LockNotHeld, errors.LockBroken)
+ def unlock(self):
+ if self._write_lock_count == 1 and self._write_group is not None:
+ self.abort_write_group()
+ self._unstacked_provider.disable_cache()
+ self._transaction = None
+ self._write_lock_count = 0
+ raise errors.BzrError(
+ 'Must end write group before releasing write lock on %s'
+ % self)
+ if self._write_lock_count:
+ self._write_lock_count -= 1
+ if not self._write_lock_count:
+ transaction = self._transaction
+ self._transaction = None
+ transaction.finish()
+ else:
+ self.control_files.unlock()
+
+ if not self.is_locked():
+ self._unstacked_provider.disable_cache()
+ for repo in self._fallback_repositories:
+ repo.unlock()
+
+
+class RepositoryFormatPack(MetaDirVersionedFileRepositoryFormat):
+ """Format logic for pack structured repositories.
+
+ This repository format has:
+ - a list of packs in pack-names
+ - packs in packs/NAME.pack
+ - indices in indices/NAME.{iix,six,tix,rix}
+ - knit deltas in the packs, knit indices mapped to the indices.
+ - thunk objects to support the knits programming API.
+ - a format marker of its own
+ - an optional 'shared-storage' flag
+ - an optional 'no-working-trees' flag
+ - a LockDir lock
+ """
+
+ # Set this attribute in derived classes to control the repository class
+ # created by open and initialize.
+ repository_class = None
+ # Set this attribute in derived classes to control the
+ # _commit_builder_class that the repository objects will have passed to
+ # their constructor.
+ _commit_builder_class = None
+ # Set this attribute in derived clases to control the _serializer that the
+ # repository objects will have passed to their constructor.
+ _serializer = None
+ # Packs are not confused by ghosts.
+ supports_ghosts = True
+ # External references are not supported in pack repositories yet.
+ supports_external_lookups = False
+ # Most pack formats do not use chk lookups.
+ supports_chks = False
+ # What index classes to use
+ index_builder_class = None
+ index_class = None
+ _fetch_uses_deltas = True
+ fast_deltas = False
+ supports_funky_characters = True
+ revision_graph_can_have_wrong_parents = True
+
+ def initialize(self, a_bzrdir, shared=False):
+ """Create a pack based repository.
+
+ :param a_bzrdir: bzrdir to contain the new repository; must already
+ be initialized.
+ :param shared: If true the repository will be initialized as a shared
+ repository.
+ """
+ mutter('creating repository in %s.', a_bzrdir.transport.base)
+ dirs = ['indices', 'obsolete_packs', 'packs', 'upload']
+ builder = self.index_builder_class()
+ files = [('pack-names', builder.finish())]
+ utf8_files = [('format', self.get_format_string())]
+
+ self._upload_blank_content(a_bzrdir, dirs, files, utf8_files, shared)
+ repository = self.open(a_bzrdir=a_bzrdir, _found=True)
+ self._run_post_repo_init_hooks(repository, a_bzrdir, shared)
+ return repository
+
+ def open(self, a_bzrdir, _found=False, _override_transport=None):
+ """See RepositoryFormat.open().
+
+ :param _override_transport: INTERNAL USE ONLY. Allows opening the
+ repository at a slightly different url
+ than normal. I.e. during 'upgrade'.
+ """
+ if not _found:
+ format = RepositoryFormatMetaDir.find_format(a_bzrdir)
+ if _override_transport is not None:
+ repo_transport = _override_transport
+ else:
+ repo_transport = a_bzrdir.get_repository_transport(None)
+ control_files = lockable_files.LockableFiles(repo_transport,
+ 'lock', lockdir.LockDir)
+ return self.repository_class(_format=self,
+ a_bzrdir=a_bzrdir,
+ control_files=control_files,
+ _commit_builder_class=self._commit_builder_class,
+ _serializer=self._serializer)
+
+
+class RetryPackOperations(errors.RetryWithNewPacks):
+ """Raised when we are packing and we find a missing file.
+
+ Meant as a signaling exception, to tell the RepositoryPackCollection.pack
+ code it should try again.
+ """
+
+ internal_error = True
+
+ _fmt = ("Pack files have changed, reload and try pack again."
+ " context: %(context)s %(orig_error)s")
+
+
+class _DirectPackAccess(object):
+ """Access to data in one or more packs with less translation."""
+
+ def __init__(self, index_to_packs, reload_func=None, flush_func=None):
+ """Create a _DirectPackAccess object.
+
+ :param index_to_packs: A dict mapping index objects to the transport
+ and file names for obtaining data.
+ :param reload_func: A function to call if we determine that the pack
+ files have moved and we need to reload our caches. See
+ bzrlib.repo_fmt.pack_repo.AggregateIndex for more details.
+ """
+ self._container_writer = None
+ self._write_index = None
+ self._indices = index_to_packs
+ self._reload_func = reload_func
+ self._flush_func = flush_func
+
+ def add_raw_records(self, key_sizes, raw_data):
+ """Add raw knit bytes to a storage area.
+
+ The data is spooled to the container writer in one bytes-record per
+ raw data item.
+
+ :param sizes: An iterable of tuples containing the key and size of each
+ raw data segment.
+ :param raw_data: A bytestring containing the data.
+ :return: A list of memos to retrieve the record later. Each memo is an
+ opaque index memo. For _DirectPackAccess the memo is (index, pos,
+ length), where the index field is the write_index object supplied
+ to the PackAccess object.
+ """
+ if type(raw_data) is not str:
+ raise AssertionError(
+ 'data must be plain bytes was %s' % type(raw_data))
+ result = []
+ offset = 0
+ for key, size in key_sizes:
+ p_offset, p_length = self._container_writer.add_bytes_record(
+ raw_data[offset:offset+size], [])
+ offset += size
+ result.append((self._write_index, p_offset, p_length))
+ return result
+
+ def flush(self):
+ """Flush pending writes on this access object.
+
+ This will flush any buffered writes to a NewPack.
+ """
+ if self._flush_func is not None:
+ self._flush_func()
+
+ def get_raw_records(self, memos_for_retrieval):
+ """Get the raw bytes for a records.
+
+ :param memos_for_retrieval: An iterable containing the (index, pos,
+ length) memo for retrieving the bytes. The Pack access method
+ looks up the pack to use for a given record in its index_to_pack
+ map.
+ :return: An iterator over the bytes of the records.
+ """
+ # first pass, group into same-index requests
+ request_lists = []
+ current_index = None
+ for (index, offset, length) in memos_for_retrieval:
+ if current_index == index:
+ current_list.append((offset, length))
+ else:
+ if current_index is not None:
+ request_lists.append((current_index, current_list))
+ current_index = index
+ current_list = [(offset, length)]
+ # handle the last entry
+ if current_index is not None:
+ request_lists.append((current_index, current_list))
+ for index, offsets in request_lists:
+ try:
+ transport, path = self._indices[index]
+ except KeyError:
+ # A KeyError here indicates that someone has triggered an index
+ # reload, and this index has gone missing, we need to start
+ # over.
+ if self._reload_func is None:
+ # If we don't have a _reload_func there is nothing that can
+ # be done
+ raise
+ raise errors.RetryWithNewPacks(index,
+ reload_occurred=True,
+ exc_info=sys.exc_info())
+ try:
+ reader = pack.make_readv_reader(transport, path, offsets)
+ for names, read_func in reader.iter_records():
+ yield read_func(None)
+ except errors.NoSuchFile:
+ # A NoSuchFile error indicates that a pack file has gone
+ # missing on disk, we need to trigger a reload, and start over.
+ if self._reload_func is None:
+ raise
+ raise errors.RetryWithNewPacks(transport.abspath(path),
+ reload_occurred=False,
+ exc_info=sys.exc_info())
+
+ def set_writer(self, writer, index, transport_packname):
+ """Set a writer to use for adding data."""
+ if index is not None:
+ self._indices[index] = transport_packname
+ self._container_writer = writer
+ self._write_index = index
+
+ def reload_or_raise(self, retry_exc):
+ """Try calling the reload function, or re-raise the original exception.
+
+ This should be called after _DirectPackAccess raises a
+ RetryWithNewPacks exception. This function will handle the common logic
+ of determining when the error is fatal versus being temporary.
+ It will also make sure that the original exception is raised, rather
+ than the RetryWithNewPacks exception.
+
+ If this function returns, then the calling function should retry
+ whatever operation was being performed. Otherwise an exception will
+ be raised.
+
+ :param retry_exc: A RetryWithNewPacks exception.
+ """
+ is_error = False
+ if self._reload_func is None:
+ is_error = True
+ elif not self._reload_func():
+ # The reload claimed that nothing changed
+ if not retry_exc.reload_occurred:
+ # If there wasn't an earlier reload, then we really were
+ # expecting to find changes. We didn't find them, so this is a
+ # hard error
+ is_error = True
+ if is_error:
+ exc_class, exc_value, exc_traceback = retry_exc.exc_info
+ raise exc_class, exc_value, exc_traceback
+
+
+
diff --git a/bzrlib/repository.py b/bzrlib/repository.py
new file mode 100644
index 0000000..022b106
--- /dev/null
+++ b/bzrlib/repository.py
@@ -0,0 +1,1831 @@
+# Copyright (C) 2005-2011 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+from __future__ import absolute_import
+
+from bzrlib.lazy_import import lazy_import
+lazy_import(globals(), """
+import itertools
+import time
+
+from bzrlib import (
+ config,
+ controldir,
+ debug,
+ generate_ids,
+ graph,
+ lockable_files,
+ lockdir,
+ osutils,
+ revision as _mod_revision,
+ testament as _mod_testament,
+ tsort,
+ gpg,
+ )
+from bzrlib.bundle import serializer
+from bzrlib.i18n import gettext
+""")
+
+from bzrlib import (
+ bzrdir,
+ errors,
+ registry,
+ symbol_versioning,
+ ui,
+ )
+from bzrlib.decorators import needs_read_lock, needs_write_lock, only_raises
+from bzrlib.inter import InterObject
+from bzrlib.lock import _RelockDebugMixin, LogicalLockResult
+from bzrlib.trace import (
+ log_exception_quietly, note, mutter, mutter_callsite, warning)
+
+
+# Old formats display a warning, but only once
+_deprecation_warning_done = False
+
+
+class IsInWriteGroupError(errors.InternalBzrError):
+
+ _fmt = "May not refresh_data of repo %(repo)s while in a write group."
+
+ def __init__(self, repo):
+ errors.InternalBzrError.__init__(self, repo=repo)
+
+
+class CommitBuilder(object):
+ """Provides an interface to build up a commit.
+
+ This allows describing a tree to be committed without needing to
+ know the internals of the format of the repository.
+ """
+
+ # all clients should supply tree roots.
+ record_root_entry = True
+ # whether this commit builder supports the record_entry_contents interface
+ supports_record_entry_contents = False
+ # whether this commit builder will automatically update the branch that is
+ # being committed to
+ updates_branch = False
+
+ def __init__(self, repository, parents, config_stack, timestamp=None,
+ timezone=None, committer=None, revprops=None,
+ revision_id=None, lossy=False):
+ """Initiate a CommitBuilder.
+
+ :param repository: Repository to commit to.
+ :param parents: Revision ids of the parents of the new revision.
+ :param timestamp: Optional timestamp recorded for commit.
+ :param timezone: Optional timezone for timestamp.
+ :param committer: Optional committer to set for commit.
+ :param revprops: Optional dictionary of revision properties.
+ :param revision_id: Optional revision id.
+ :param lossy: Whether to discard data that can not be natively
+ represented, when pushing to a foreign VCS
+ """
+ self._config_stack = config_stack
+ self._lossy = lossy
+
+ if committer is None:
+ self._committer = self._config_stack.get('email')
+ elif not isinstance(committer, unicode):
+ self._committer = committer.decode() # throw if non-ascii
+ else:
+ self._committer = committer
+
+ self._new_revision_id = revision_id
+ self.parents = parents
+ self.repository = repository
+
+ self._revprops = {}
+ if revprops is not None:
+ self._validate_revprops(revprops)
+ self._revprops.update(revprops)
+
+ if timestamp is None:
+ timestamp = time.time()
+ # Restrict resolution to 1ms
+ self._timestamp = round(timestamp, 3)
+
+ if timezone is None:
+ self._timezone = osutils.local_time_offset()
+ else:
+ self._timezone = int(timezone)
+
+ self._generate_revision_if_needed()
+
+ def any_changes(self):
+ """Return True if any entries were changed.
+
+ This includes merge-only changes. It is the core for the --unchanged
+ detection in commit.
+
+ :return: True if any changes have occured.
+ """
+ raise NotImplementedError(self.any_changes)
+
+ def _validate_unicode_text(self, text, context):
+ """Verify things like commit messages don't have bogus characters."""
+ if '\r' in text:
+ raise ValueError('Invalid value for %s: %r' % (context, text))
+
+ def _validate_revprops(self, revprops):
+ for key, value in revprops.iteritems():
+ # We know that the XML serializers do not round trip '\r'
+ # correctly, so refuse to accept them
+ if not isinstance(value, basestring):
+ raise ValueError('revision property (%s) is not a valid'
+ ' (unicode) string: %r' % (key, value))
+ self._validate_unicode_text(value,
+ 'revision property (%s)' % (key,))
+
+ def commit(self, message):
+ """Make the actual commit.
+
+ :return: The revision id of the recorded revision.
+ """
+ raise NotImplementedError(self.commit)
+
+ def abort(self):
+ """Abort the commit that is being built.
+ """
+ raise NotImplementedError(self.abort)
+
+ def revision_tree(self):
+ """Return the tree that was just committed.
+
+ After calling commit() this can be called to get a
+ RevisionTree representing the newly committed tree. This is
+ preferred to calling Repository.revision_tree() because that may
+ require deserializing the inventory, while we already have a copy in
+ memory.
+ """
+ raise NotImplementedError(self.revision_tree)
+
+ def finish_inventory(self):
+ """Tell the builder that the inventory is finished.
+
+ :return: The inventory id in the repository, which can be used with
+ repository.get_inventory.
+ """
+ raise NotImplementedError(self.finish_inventory)
+
+ def _gen_revision_id(self):
+ """Return new revision-id."""
+ return generate_ids.gen_revision_id(self._committer, self._timestamp)
+
+ def _generate_revision_if_needed(self):
+ """Create a revision id if None was supplied.
+
+ If the repository can not support user-specified revision ids
+ they should override this function and raise CannotSetRevisionId
+ if _new_revision_id is not None.
+
+ :raises: CannotSetRevisionId
+ """
+ if self._new_revision_id is None:
+ self._new_revision_id = self._gen_revision_id()
+ self.random_revid = True
+ else:
+ self.random_revid = False
+
+ def will_record_deletes(self):
+ """Tell the commit builder that deletes are being notified.
+
+ This enables the accumulation of an inventory delta; for the resulting
+ commit to be valid, deletes against the basis MUST be recorded via
+ builder.record_delete().
+ """
+ raise NotImplementedError(self.will_record_deletes)
+
+ def record_iter_changes(self, tree, basis_revision_id, iter_changes):
+ """Record a new tree via iter_changes.
+
+ :param tree: The tree to obtain text contents from for changed objects.
+ :param basis_revision_id: The revision id of the tree the iter_changes
+ has been generated against. Currently assumed to be the same
+ as self.parents[0] - if it is not, errors may occur.
+ :param iter_changes: An iter_changes iterator with the changes to apply
+ to basis_revision_id. The iterator must not include any items with
+ a current kind of None - missing items must be either filtered out
+ or errored-on beefore record_iter_changes sees the item.
+ :return: A generator of (file_id, relpath, fs_hash) tuples for use with
+ tree._observed_sha1.
+ """
+ raise NotImplementedError(self.record_iter_changes)
+
+
+class RepositoryWriteLockResult(LogicalLockResult):
+ """The result of write locking a repository.
+
+ :ivar repository_token: The token obtained from the underlying lock, or
+ None.
+ :ivar unlock: A callable which will unlock the lock.
+ """
+
+ def __init__(self, unlock, repository_token):
+ LogicalLockResult.__init__(self, unlock)
+ self.repository_token = repository_token
+
+ def __repr__(self):
+ return "RepositoryWriteLockResult(%s, %s)" % (self.repository_token,
+ self.unlock)
+
+
+######################################################################
+# Repositories
+
+
+class Repository(_RelockDebugMixin, controldir.ControlComponent):
+ """Repository holding history for one or more branches.
+
+ The repository holds and retrieves historical information including
+ revisions and file history. It's normally accessed only by the Branch,
+ which views a particular line of development through that history.
+
+ See VersionedFileRepository in bzrlib.vf_repository for the
+ base class for most Bazaar repositories.
+ """
+
+ def abort_write_group(self, suppress_errors=False):
+ """Commit the contents accrued within the current write group.
+
+ :param suppress_errors: if true, abort_write_group will catch and log
+ unexpected errors that happen during the abort, rather than
+ allowing them to propagate. Defaults to False.
+
+ :seealso: start_write_group.
+ """
+ if self._write_group is not self.get_transaction():
+ # has an unlock or relock occured ?
+ if suppress_errors:
+ mutter(
+ '(suppressed) mismatched lock context and write group. %r, %r',
+ self._write_group, self.get_transaction())
+ return
+ raise errors.BzrError(
+ 'mismatched lock context and write group. %r, %r' %
+ (self._write_group, self.get_transaction()))
+ try:
+ self._abort_write_group()
+ except Exception, exc:
+ self._write_group = None
+ if not suppress_errors:
+ raise
+ mutter('abort_write_group failed')
+ log_exception_quietly()
+ note(gettext('bzr: ERROR (ignored): %s'), exc)
+ self._write_group = None
+
+ def _abort_write_group(self):
+ """Template method for per-repository write group cleanup.
+
+ This is called during abort before the write group is considered to be
+ finished and should cleanup any internal state accrued during the write
+ group. There is no requirement that data handed to the repository be
+ *not* made available - this is not a rollback - but neither should any
+ attempt be made to ensure that data added is fully commited. Abort is
+ invoked when an error has occured so futher disk or network operations
+ may not be possible or may error and if possible should not be
+ attempted.
+ """
+
+ def add_fallback_repository(self, repository):
+ """Add a repository to use for looking up data not held locally.
+
+ :param repository: A repository.
+ """
+ raise NotImplementedError(self.add_fallback_repository)
+
+ def _check_fallback_repository(self, repository):
+ """Check that this repository can fallback to repository safely.
+
+ Raise an error if not.
+
+ :param repository: A repository to fallback to.
+ """
+ return InterRepository._assert_same_model(self, repository)
+
+ def all_revision_ids(self):
+ """Returns a list of all the revision ids in the repository.
+
+ This is conceptually deprecated because code should generally work on
+ the graph reachable from a particular revision, and ignore any other
+ revisions that might be present. There is no direct replacement
+ method.
+ """
+ if 'evil' in debug.debug_flags:
+ mutter_callsite(2, "all_revision_ids is linear with history.")
+ return self._all_revision_ids()
+
+ def _all_revision_ids(self):
+ """Returns a list of all the revision ids in the repository.
+
+ These are in as much topological order as the underlying store can
+ present.
+ """
+ raise NotImplementedError(self._all_revision_ids)
+
+ def break_lock(self):
+ """Break a lock if one is present from another instance.
+
+ Uses the ui factory to ask for confirmation if the lock may be from
+ an active process.
+ """
+ self.control_files.break_lock()
+
+ @staticmethod
+ def create(controldir):
+ """Construct the current default format repository in controldir."""
+ return RepositoryFormat.get_default_format().initialize(controldir)
+
+ def __init__(self, _format, controldir, control_files):
+ """instantiate a Repository.
+
+ :param _format: The format of the repository on disk.
+ :param controldir: The ControlDir of the repository.
+ :param control_files: Control files to use for locking, etc.
+ """
+ # In the future we will have a single api for all stores for
+ # getting file texts, inventories and revisions, then
+ # this construct will accept instances of those things.
+ super(Repository, self).__init__()
+ self._format = _format
+ # the following are part of the public API for Repository:
+ self.bzrdir = controldir
+ self.control_files = control_files
+ # for tests
+ self._write_group = None
+ # Additional places to query for data.
+ self._fallback_repositories = []
+
+ @property
+ def user_transport(self):
+ return self.bzrdir.user_transport
+
+ @property
+ def control_transport(self):
+ return self._transport
+
+ def __repr__(self):
+ if self._fallback_repositories:
+ return '%s(%r, fallback_repositories=%r)' % (
+ self.__class__.__name__,
+ self.base,
+ self._fallback_repositories)
+ else:
+ return '%s(%r)' % (self.__class__.__name__,
+ self.base)
+
+ def _has_same_fallbacks(self, other_repo):
+ """Returns true if the repositories have the same fallbacks."""
+ my_fb = self._fallback_repositories
+ other_fb = other_repo._fallback_repositories
+ if len(my_fb) != len(other_fb):
+ return False
+ for f, g in zip(my_fb, other_fb):
+ if not f.has_same_location(g):
+ return False
+ return True
+
+ def has_same_location(self, other):
+ """Returns a boolean indicating if this repository is at the same
+ location as another repository.
+
+ This might return False even when two repository objects are accessing
+ the same physical repository via different URLs.
+ """
+ if self.__class__ is not other.__class__:
+ return False
+ return (self.control_url == other.control_url)
+
+ def is_in_write_group(self):
+ """Return True if there is an open write group.
+
+ :seealso: start_write_group.
+ """
+ return self._write_group is not None
+
+ def is_locked(self):
+ return self.control_files.is_locked()
+
+ def is_write_locked(self):
+ """Return True if this object is write locked."""
+ return self.is_locked() and self.control_files._lock_mode == 'w'
+
+ def lock_write(self, token=None):
+ """Lock this repository for writing.
+
+ This causes caching within the repository obejct to start accumlating
+ data during reads, and allows a 'write_group' to be obtained. Write
+ groups must be used for actual data insertion.
+
+ A token should be passed in if you know that you have locked the object
+ some other way, and need to synchronise this object's state with that
+ fact.
+
+ XXX: this docstring is duplicated in many places, e.g. lockable_files.py
+
+ :param token: if this is already locked, then lock_write will fail
+ unless the token matches the existing lock.
+ :returns: a token if this instance supports tokens, otherwise None.
+ :raises TokenLockingNotSupported: when a token is given but this
+ instance doesn't support using token locks.
+ :raises MismatchedToken: if the specified token doesn't match the token
+ of the existing lock.
+ :seealso: start_write_group.
+ :return: A RepositoryWriteLockResult.
+ """
+ locked = self.is_locked()
+ token = self.control_files.lock_write(token=token)
+ if not locked:
+ self._warn_if_deprecated()
+ self._note_lock('w')
+ for repo in self._fallback_repositories:
+ # Writes don't affect fallback repos
+ repo.lock_read()
+ self._refresh_data()
+ return RepositoryWriteLockResult(self.unlock, token)
+
+ def lock_read(self):
+ """Lock the repository for read operations.
+
+ :return: An object with an unlock method which will release the lock
+ obtained.
+ """
+ locked = self.is_locked()
+ self.control_files.lock_read()
+ if not locked:
+ self._warn_if_deprecated()
+ self._note_lock('r')
+ for repo in self._fallback_repositories:
+ repo.lock_read()
+ self._refresh_data()
+ return LogicalLockResult(self.unlock)
+
+ def get_physical_lock_status(self):
+ return self.control_files.get_physical_lock_status()
+
+ def leave_lock_in_place(self):
+ """Tell this repository not to release the physical lock when this
+ object is unlocked.
+
+ If lock_write doesn't return a token, then this method is not supported.
+ """
+ self.control_files.leave_in_place()
+
+ def dont_leave_lock_in_place(self):
+ """Tell this repository to release the physical lock when this
+ object is unlocked, even if it didn't originally acquire it.
+
+ If lock_write doesn't return a token, then this method is not supported.
+ """
+ self.control_files.dont_leave_in_place()
+
+ @needs_read_lock
+ def gather_stats(self, revid=None, committers=None):
+ """Gather statistics from a revision id.
+
+ :param revid: The revision id to gather statistics from, if None, then
+ no revision specific statistics are gathered.
+ :param committers: Optional parameter controlling whether to grab
+ a count of committers from the revision specific statistics.
+ :return: A dictionary of statistics. Currently this contains:
+ committers: The number of committers if requested.
+ firstrev: A tuple with timestamp, timezone for the penultimate left
+ most ancestor of revid, if revid is not the NULL_REVISION.
+ latestrev: A tuple with timestamp, timezone for revid, if revid is
+ not the NULL_REVISION.
+ revisions: The total revision count in the repository.
+ size: An estimate disk size of the repository in bytes.
+ """
+ result = {}
+ if revid and committers:
+ result['committers'] = 0
+ if revid and revid != _mod_revision.NULL_REVISION:
+ graph = self.get_graph()
+ if committers:
+ all_committers = set()
+ revisions = [r for (r, p) in graph.iter_ancestry([revid])
+ if r != _mod_revision.NULL_REVISION]
+ last_revision = None
+ if not committers:
+ # ignore the revisions in the middle - just grab first and last
+ revisions = revisions[0], revisions[-1]
+ for revision in self.get_revisions(revisions):
+ if not last_revision:
+ last_revision = revision
+ if committers:
+ all_committers.add(revision.committer)
+ first_revision = revision
+ if committers:
+ result['committers'] = len(all_committers)
+ result['firstrev'] = (first_revision.timestamp,
+ first_revision.timezone)
+ result['latestrev'] = (last_revision.timestamp,
+ last_revision.timezone)
+ return result
+
+ def find_branches(self, using=False):
+ """Find branches underneath this repository.
+
+ This will include branches inside other branches.
+
+ :param using: If True, list only branches using this repository.
+ """
+ if using and not self.is_shared():
+ return self.bzrdir.list_branches()
+ class Evaluator(object):
+
+ def __init__(self):
+ self.first_call = True
+
+ def __call__(self, controldir):
+ # On the first call, the parameter is always the controldir
+ # containing the current repo.
+ if not self.first_call:
+ try:
+ repository = controldir.open_repository()
+ except errors.NoRepositoryPresent:
+ pass
+ else:
+ return False, ([], repository)
+ self.first_call = False
+ value = (controldir.list_branches(), None)
+ return True, value
+
+ ret = []
+ for branches, repository in controldir.ControlDir.find_bzrdirs(
+ self.user_transport, evaluate=Evaluator()):
+ if branches is not None:
+ ret.extend(branches)
+ if not using and repository is not None:
+ ret.extend(repository.find_branches())
+ return ret
+
+ @needs_read_lock
+ def search_missing_revision_ids(self, other,
+ revision_id=symbol_versioning.DEPRECATED_PARAMETER,
+ find_ghosts=True, revision_ids=None, if_present_ids=None,
+ limit=None):
+ """Return the revision ids that other has that this does not.
+
+ These are returned in topological order.
+
+ revision_id: only return revision ids included by revision_id.
+ """
+ if symbol_versioning.deprecated_passed(revision_id):
+ symbol_versioning.warn(
+ 'search_missing_revision_ids(revision_id=...) was '
+ 'deprecated in 2.4. Use revision_ids=[...] instead.',
+ DeprecationWarning, stacklevel=3)
+ if revision_ids is not None:
+ raise AssertionError(
+ 'revision_ids is mutually exclusive with revision_id')
+ if revision_id is not None:
+ revision_ids = [revision_id]
+ return InterRepository.get(other, self).search_missing_revision_ids(
+ find_ghosts=find_ghosts, revision_ids=revision_ids,
+ if_present_ids=if_present_ids, limit=limit)
+
+ @staticmethod
+ def open(base):
+ """Open the repository rooted at base.
+
+ For instance, if the repository is at URL/.bzr/repository,
+ Repository.open(URL) -> a Repository instance.
+ """
+ control = controldir.ControlDir.open(base)
+ return control.open_repository()
+
+ def copy_content_into(self, destination, revision_id=None):
+ """Make a complete copy of the content in self into destination.
+
+ This is a destructive operation! Do not use it on existing
+ repositories.
+ """
+ return InterRepository.get(self, destination).copy_content(revision_id)
+
+ def commit_write_group(self):
+ """Commit the contents accrued within the current write group.
+
+ :seealso: start_write_group.
+
+ :return: it may return an opaque hint that can be passed to 'pack'.
+ """
+ if self._write_group is not self.get_transaction():
+ # has an unlock or relock occured ?
+ raise errors.BzrError('mismatched lock context %r and '
+ 'write group %r.' %
+ (self.get_transaction(), self._write_group))
+ result = self._commit_write_group()
+ self._write_group = None
+ return result
+
+ def _commit_write_group(self):
+ """Template method for per-repository write group cleanup.
+
+ This is called before the write group is considered to be
+ finished and should ensure that all data handed to the repository
+ for writing during the write group is safely committed (to the
+ extent possible considering file system caching etc).
+ """
+
+ def suspend_write_group(self):
+ """Suspend a write group.
+
+ :raise UnsuspendableWriteGroup: If the write group can not be
+ suspended.
+ :return: List of tokens
+ """
+ raise errors.UnsuspendableWriteGroup(self)
+
+ def refresh_data(self):
+ """Re-read any data needed to synchronise with disk.
+
+ This method is intended to be called after another repository instance
+ (such as one used by a smart server) has inserted data into the
+ repository. On all repositories this will work outside of write groups.
+ Some repository formats (pack and newer for bzrlib native formats)
+ support refresh_data inside write groups. If called inside a write
+ group on a repository that does not support refreshing in a write group
+ IsInWriteGroupError will be raised.
+ """
+ self._refresh_data()
+
+ def resume_write_group(self, tokens):
+ if not self.is_write_locked():
+ raise errors.NotWriteLocked(self)
+ if self._write_group:
+ raise errors.BzrError('already in a write group')
+ self._resume_write_group(tokens)
+ # so we can detect unlock/relock - the write group is now entered.
+ self._write_group = self.get_transaction()
+
+ def _resume_write_group(self, tokens):
+ raise errors.UnsuspendableWriteGroup(self)
+
+ def fetch(self, source, revision_id=None, find_ghosts=False):
+ """Fetch the content required to construct revision_id from source.
+
+ If revision_id is None, then all content is copied.
+
+ fetch() may not be used when the repository is in a write group -
+ either finish the current write group before using fetch, or use
+ fetch before starting the write group.
+
+ :param find_ghosts: Find and copy revisions in the source that are
+ ghosts in the target (and not reachable directly by walking out to
+ the first-present revision in target from revision_id).
+ :param revision_id: If specified, all the content needed for this
+ revision ID will be copied to the target. Fetch will determine for
+ itself which content needs to be copied.
+ """
+ if self.is_in_write_group():
+ raise errors.InternalBzrError(
+ "May not fetch while in a write group.")
+ # fast path same-url fetch operations
+ # TODO: lift out to somewhere common with RemoteRepository
+ # <https://bugs.launchpad.net/bzr/+bug/401646>
+ if (self.has_same_location(source)
+ and self._has_same_fallbacks(source)):
+ # check that last_revision is in 'from' and then return a
+ # no-operation.
+ if (revision_id is not None and
+ not _mod_revision.is_null(revision_id)):
+ self.get_revision(revision_id)
+ return 0, []
+ inter = InterRepository.get(source, self)
+ return inter.fetch(revision_id=revision_id, find_ghosts=find_ghosts)
+
+ def create_bundle(self, target, base, fileobj, format=None):
+ return serializer.write_bundle(self, target, base, fileobj, format)
+
+ def get_commit_builder(self, branch, parents, config_stack, timestamp=None,
+ timezone=None, committer=None, revprops=None,
+ revision_id=None, lossy=False):
+ """Obtain a CommitBuilder for this repository.
+
+ :param branch: Branch to commit to.
+ :param parents: Revision ids of the parents of the new revision.
+ :param config_stack: Configuration stack to use.
+ :param timestamp: Optional timestamp recorded for commit.
+ :param timezone: Optional timezone for timestamp.
+ :param committer: Optional committer to set for commit.
+ :param revprops: Optional dictionary of revision properties.
+ :param revision_id: Optional revision id.
+ :param lossy: Whether to discard data that can not be natively
+ represented, when pushing to a foreign VCS
+ """
+ raise NotImplementedError(self.get_commit_builder)
+
+ @only_raises(errors.LockNotHeld, errors.LockBroken)
+ def unlock(self):
+ if (self.control_files._lock_count == 1 and
+ self.control_files._lock_mode == 'w'):
+ if self._write_group is not None:
+ self.abort_write_group()
+ self.control_files.unlock()
+ raise errors.BzrError(
+ 'Must end write groups before releasing write locks.')
+ self.control_files.unlock()
+ if self.control_files._lock_count == 0:
+ for repo in self._fallback_repositories:
+ repo.unlock()
+
+ @needs_read_lock
+ def clone(self, controldir, revision_id=None):
+ """Clone this repository into controldir using the current format.
+
+ Currently no check is made that the format of this repository and
+ the bzrdir format are compatible. FIXME RBC 20060201.
+
+ :return: The newly created destination repository.
+ """
+ # TODO: deprecate after 0.16; cloning this with all its settings is
+ # probably not very useful -- mbp 20070423
+ dest_repo = self._create_sprouting_repo(
+ controldir, shared=self.is_shared())
+ self.copy_content_into(dest_repo, revision_id)
+ return dest_repo
+
+ def start_write_group(self):
+ """Start a write group in the repository.
+
+ Write groups are used by repositories which do not have a 1:1 mapping
+ between file ids and backend store to manage the insertion of data from
+ both fetch and commit operations.
+
+ A write lock is required around the start_write_group/commit_write_group
+ for the support of lock-requiring repository formats.
+
+ One can only insert data into a repository inside a write group.
+
+ :return: None.
+ """
+ if not self.is_write_locked():
+ raise errors.NotWriteLocked(self)
+ if self._write_group:
+ raise errors.BzrError('already in a write group')
+ self._start_write_group()
+ # so we can detect unlock/relock - the write group is now entered.
+ self._write_group = self.get_transaction()
+
+ def _start_write_group(self):
+ """Template method for per-repository write group startup.
+
+ This is called before the write group is considered to be
+ entered.
+ """
+
+ @needs_read_lock
+ def sprout(self, to_bzrdir, revision_id=None):
+ """Create a descendent repository for new development.
+
+ Unlike clone, this does not copy the settings of the repository.
+ """
+ dest_repo = self._create_sprouting_repo(to_bzrdir, shared=False)
+ dest_repo.fetch(self, revision_id=revision_id)
+ return dest_repo
+
+ def _create_sprouting_repo(self, a_bzrdir, shared):
+ if not isinstance(a_bzrdir._format, self.bzrdir._format.__class__):
+ # use target default format.
+ dest_repo = a_bzrdir.create_repository()
+ else:
+ # Most control formats need the repository to be specifically
+ # created, but on some old all-in-one formats it's not needed
+ try:
+ dest_repo = self._format.initialize(a_bzrdir, shared=shared)
+ except errors.UninitializableFormat:
+ dest_repo = a_bzrdir.open_repository()
+ return dest_repo
+
+ @needs_read_lock
+ def has_revision(self, revision_id):
+ """True if this repository has a copy of the revision."""
+ return revision_id in self.has_revisions((revision_id,))
+
+ @needs_read_lock
+ def has_revisions(self, revision_ids):
+ """Probe to find out the presence of multiple revisions.
+
+ :param revision_ids: An iterable of revision_ids.
+ :return: A set of the revision_ids that were present.
+ """
+ raise NotImplementedError(self.has_revisions)
+
+ @needs_read_lock
+ def get_revision(self, revision_id):
+ """Return the Revision object for a named revision."""
+ return self.get_revisions([revision_id])[0]
+
+ def get_revision_reconcile(self, revision_id):
+ """'reconcile' helper routine that allows access to a revision always.
+
+ This variant of get_revision does not cross check the weave graph
+ against the revision one as get_revision does: but it should only
+ be used by reconcile, or reconcile-alike commands that are correcting
+ or testing the revision graph.
+ """
+ raise NotImplementedError(self.get_revision_reconcile)
+
+ def get_revisions(self, revision_ids):
+ """Get many revisions at once.
+
+ Repositories that need to check data on every revision read should
+ subclass this method.
+ """
+ raise NotImplementedError(self.get_revisions)
+
+ def get_deltas_for_revisions(self, revisions, specific_fileids=None):
+ """Produce a generator of revision deltas.
+
+ Note that the input is a sequence of REVISIONS, not revision_ids.
+ Trees will be held in memory until the generator exits.
+ Each delta is relative to the revision's lefthand predecessor.
+
+ :param specific_fileids: if not None, the result is filtered
+ so that only those file-ids, their parents and their
+ children are included.
+ """
+ # Get the revision-ids of interest
+ required_trees = set()
+ for revision in revisions:
+ required_trees.add(revision.revision_id)
+ required_trees.update(revision.parent_ids[:1])
+
+ # Get the matching filtered trees. Note that it's more
+ # efficient to pass filtered trees to changes_from() rather
+ # than doing the filtering afterwards. changes_from() could
+ # arguably do the filtering itself but it's path-based, not
+ # file-id based, so filtering before or afterwards is
+ # currently easier.
+ if specific_fileids is None:
+ trees = dict((t.get_revision_id(), t) for
+ t in self.revision_trees(required_trees))
+ else:
+ trees = dict((t.get_revision_id(), t) for
+ t in self._filtered_revision_trees(required_trees,
+ specific_fileids))
+
+ # Calculate the deltas
+ for revision in revisions:
+ if not revision.parent_ids:
+ old_tree = self.revision_tree(_mod_revision.NULL_REVISION)
+ else:
+ old_tree = trees[revision.parent_ids[0]]
+ yield trees[revision.revision_id].changes_from(old_tree)
+
+ @needs_read_lock
+ def get_revision_delta(self, revision_id, specific_fileids=None):
+ """Return the delta for one revision.
+
+ The delta is relative to the left-hand predecessor of the
+ revision.
+
+ :param specific_fileids: if not None, the result is filtered
+ so that only those file-ids, their parents and their
+ children are included.
+ """
+ r = self.get_revision(revision_id)
+ return list(self.get_deltas_for_revisions([r],
+ specific_fileids=specific_fileids))[0]
+
+ @needs_write_lock
+ def store_revision_signature(self, gpg_strategy, plaintext, revision_id):
+ signature = gpg_strategy.sign(plaintext)
+ self.add_signature_text(revision_id, signature)
+
+ def add_signature_text(self, revision_id, signature):
+ """Store a signature text for a revision.
+
+ :param revision_id: Revision id of the revision
+ :param signature: Signature text.
+ """
+ raise NotImplementedError(self.add_signature_text)
+
+ def _find_parent_ids_of_revisions(self, revision_ids):
+ """Find all parent ids that are mentioned in the revision graph.
+
+ :return: set of revisions that are parents of revision_ids which are
+ not part of revision_ids themselves
+ """
+ parent_map = self.get_parent_map(revision_ids)
+ parent_ids = set()
+ map(parent_ids.update, parent_map.itervalues())
+ parent_ids.difference_update(revision_ids)
+ parent_ids.discard(_mod_revision.NULL_REVISION)
+ return parent_ids
+
+ def iter_files_bytes(self, desired_files):
+ """Iterate through file versions.
+
+ Files will not necessarily be returned in the order they occur in
+ desired_files. No specific order is guaranteed.
+
+ Yields pairs of identifier, bytes_iterator. identifier is an opaque
+ value supplied by the caller as part of desired_files. It should
+ uniquely identify the file version in the caller's context. (Examples:
+ an index number or a TreeTransform trans_id.)
+
+ :param desired_files: a list of (file_id, revision_id, identifier)
+ triples
+ """
+ raise NotImplementedError(self.iter_files_bytes)
+
+ def get_rev_id_for_revno(self, revno, known_pair):
+ """Return the revision id of a revno, given a later (revno, revid)
+ pair in the same history.
+
+ :return: if found (True, revid). If the available history ran out
+ before reaching the revno, then this returns
+ (False, (closest_revno, closest_revid)).
+ """
+ known_revno, known_revid = known_pair
+ partial_history = [known_revid]
+ distance_from_known = known_revno - revno
+ if distance_from_known < 0:
+ raise ValueError(
+ 'requested revno (%d) is later than given known revno (%d)'
+ % (revno, known_revno))
+ try:
+ _iter_for_revno(
+ self, partial_history, stop_index=distance_from_known)
+ except errors.RevisionNotPresent, err:
+ if err.revision_id == known_revid:
+ # The start revision (known_revid) wasn't found.
+ raise
+ # This is a stacked repository with no fallbacks, or a there's a
+ # left-hand ghost. Either way, even though the revision named in
+ # the error isn't in this repo, we know it's the next step in this
+ # left-hand history.
+ partial_history.append(err.revision_id)
+ if len(partial_history) <= distance_from_known:
+ # Didn't find enough history to get a revid for the revno.
+ earliest_revno = known_revno - len(partial_history) + 1
+ return (False, (earliest_revno, partial_history[-1]))
+ if len(partial_history) - 1 > distance_from_known:
+ raise AssertionError('_iter_for_revno returned too much history')
+ return (True, partial_history[-1])
+
+ def is_shared(self):
+ """Return True if this repository is flagged as a shared repository."""
+ raise NotImplementedError(self.is_shared)
+
+ @needs_write_lock
+ def reconcile(self, other=None, thorough=False):
+ """Reconcile this repository."""
+ from bzrlib.reconcile import RepoReconciler
+ reconciler = RepoReconciler(self, thorough=thorough)
+ reconciler.reconcile()
+ return reconciler
+
+ def _refresh_data(self):
+ """Helper called from lock_* to ensure coherency with disk.
+
+ The default implementation does nothing; it is however possible
+ for repositories to maintain loaded indices across multiple locks
+ by checking inside their implementation of this method to see
+ whether their indices are still valid. This depends of course on
+ the disk format being validatable in this manner. This method is
+ also called by the refresh_data() public interface to cause a refresh
+ to occur while in a write lock so that data inserted by a smart server
+ push operation is visible on the client's instance of the physical
+ repository.
+ """
+
+ @needs_read_lock
+ def revision_tree(self, revision_id):
+ """Return Tree for a revision on this branch.
+
+ `revision_id` may be NULL_REVISION for the empty tree revision.
+ """
+ raise NotImplementedError(self.revision_tree)
+
+ def revision_trees(self, revision_ids):
+ """Return Trees for revisions in this repository.
+
+ :param revision_ids: a sequence of revision-ids;
+ a revision-id may not be None or 'null:'
+ """
+ raise NotImplementedError(self.revision_trees)
+
+ def pack(self, hint=None, clean_obsolete_packs=False):
+ """Compress the data within the repository.
+
+ This operation only makes sense for some repository types. For other
+ types it should be a no-op that just returns.
+
+ This stub method does not require a lock, but subclasses should use
+ @needs_write_lock as this is a long running call it's reasonable to
+ implicitly lock for the user.
+
+ :param hint: If not supplied, the whole repository is packed.
+ If supplied, the repository may use the hint parameter as a
+ hint for the parts of the repository to pack. A hint can be
+ obtained from the result of commit_write_group(). Out of
+ date hints are simply ignored, because concurrent operations
+ can obsolete them rapidly.
+
+ :param clean_obsolete_packs: Clean obsolete packs immediately after
+ the pack operation.
+ """
+
+ def get_transaction(self):
+ return self.control_files.get_transaction()
+
+ def get_parent_map(self, revision_ids):
+ """See graph.StackedParentsProvider.get_parent_map"""
+ raise NotImplementedError(self.get_parent_map)
+
+ def _get_parent_map_no_fallbacks(self, revision_ids):
+ """Same as Repository.get_parent_map except doesn't query fallbacks."""
+ # revisions index works in keys; this just works in revisions
+ # therefore wrap and unwrap
+ query_keys = []
+ result = {}
+ for revision_id in revision_ids:
+ if revision_id == _mod_revision.NULL_REVISION:
+ result[revision_id] = ()
+ elif revision_id is None:
+ raise ValueError('get_parent_map(None) is not valid')
+ else:
+ query_keys.append((revision_id ,))
+ vf = self.revisions.without_fallbacks()
+ for ((revision_id,), parent_keys) in \
+ vf.get_parent_map(query_keys).iteritems():
+ if parent_keys:
+ result[revision_id] = tuple([parent_revid
+ for (parent_revid,) in parent_keys])
+ else:
+ result[revision_id] = (_mod_revision.NULL_REVISION,)
+ return result
+
+ def _make_parents_provider(self):
+ if not self._format.supports_external_lookups:
+ return self
+ return graph.StackedParentsProvider(_LazyListJoin(
+ [self._make_parents_provider_unstacked()],
+ self._fallback_repositories))
+
+ def _make_parents_provider_unstacked(self):
+ return graph.CallableToParentsProviderAdapter(
+ self._get_parent_map_no_fallbacks)
+
+ @needs_read_lock
+ def get_known_graph_ancestry(self, revision_ids):
+ """Return the known graph for a set of revision ids and their ancestors.
+ """
+ raise NotImplementedError(self.get_known_graph_ancestry)
+
+ def get_file_graph(self):
+ """Return the graph walker for files."""
+ raise NotImplementedError(self.get_file_graph)
+
+ def get_graph(self, other_repository=None):
+ """Return the graph walker for this repository format"""
+ parents_provider = self._make_parents_provider()
+ if (other_repository is not None and
+ not self.has_same_location(other_repository)):
+ parents_provider = graph.StackedParentsProvider(
+ [parents_provider, other_repository._make_parents_provider()])
+ return graph.Graph(parents_provider)
+
+ @needs_write_lock
+ def set_make_working_trees(self, new_value):
+ """Set the policy flag for making working trees when creating branches.
+
+ This only applies to branches that use this repository.
+
+ The default is 'True'.
+ :param new_value: True to restore the default, False to disable making
+ working trees.
+ """
+ raise NotImplementedError(self.set_make_working_trees)
+
+ def make_working_trees(self):
+ """Returns the policy for making working trees on new branches."""
+ raise NotImplementedError(self.make_working_trees)
+
+ @needs_write_lock
+ def sign_revision(self, revision_id, gpg_strategy):
+ testament = _mod_testament.Testament.from_revision(self, revision_id)
+ plaintext = testament.as_short_text()
+ self.store_revision_signature(gpg_strategy, plaintext, revision_id)
+
+ @needs_read_lock
+ def verify_revision_signature(self, revision_id, gpg_strategy):
+ """Verify the signature on a revision.
+
+ :param revision_id: the revision to verify
+ :gpg_strategy: the GPGStrategy object to used
+
+ :return: gpg.SIGNATURE_VALID or a failed SIGNATURE_ value
+ """
+ if not self.has_signature_for_revision_id(revision_id):
+ return gpg.SIGNATURE_NOT_SIGNED, None
+ signature = self.get_signature_text(revision_id)
+
+ testament = _mod_testament.Testament.from_revision(self, revision_id)
+ plaintext = testament.as_short_text()
+
+ return gpg_strategy.verify(signature, plaintext)
+
+ @needs_read_lock
+ def verify_revision_signatures(self, revision_ids, gpg_strategy):
+ """Verify revision signatures for a number of revisions.
+
+ :param revision_id: the revision to verify
+ :gpg_strategy: the GPGStrategy object to used
+ :return: Iterator over tuples with revision id, result and keys
+ """
+ for revid in revision_ids:
+ (result, key) = self.verify_revision_signature(revid, gpg_strategy)
+ yield revid, result, key
+
+ def has_signature_for_revision_id(self, revision_id):
+ """Query for a revision signature for revision_id in the repository."""
+ raise NotImplementedError(self.has_signature_for_revision_id)
+
+ def get_signature_text(self, revision_id):
+ """Return the text for a signature."""
+ raise NotImplementedError(self.get_signature_text)
+
+ def check(self, revision_ids=None, callback_refs=None, check_repo=True):
+ """Check consistency of all history of given revision_ids.
+
+ Different repository implementations should override _check().
+
+ :param revision_ids: A non-empty list of revision_ids whose ancestry
+ will be checked. Typically the last revision_id of a branch.
+ :param callback_refs: A dict of check-refs to resolve and callback
+ the check/_check method on the items listed as wanting the ref.
+ see bzrlib.check.
+ :param check_repo: If False do not check the repository contents, just
+ calculate the data callback_refs requires and call them back.
+ """
+ return self._check(revision_ids=revision_ids, callback_refs=callback_refs,
+ check_repo=check_repo)
+
+ def _check(self, revision_ids=None, callback_refs=None, check_repo=True):
+ raise NotImplementedError(self.check)
+
+ def _warn_if_deprecated(self, branch=None):
+ if not self._format.is_deprecated():
+ return
+ global _deprecation_warning_done
+ if _deprecation_warning_done:
+ return
+ try:
+ if branch is None:
+ conf = config.GlobalStack()
+ else:
+ conf = branch.get_config_stack()
+ if 'format_deprecation' in conf.get('suppress_warnings'):
+ return
+ warning("Format %s for %s is deprecated -"
+ " please use 'bzr upgrade' to get better performance"
+ % (self._format, self.bzrdir.transport.base))
+ finally:
+ _deprecation_warning_done = True
+
+ def supports_rich_root(self):
+ return self._format.rich_root_data
+
+ def _check_ascii_revisionid(self, revision_id, method):
+ """Private helper for ascii-only repositories."""
+ # weave repositories refuse to store revisionids that are non-ascii.
+ if revision_id is not None:
+ # weaves require ascii revision ids.
+ if isinstance(revision_id, unicode):
+ try:
+ revision_id.encode('ascii')
+ except UnicodeEncodeError:
+ raise errors.NonAsciiRevisionId(method, self)
+ else:
+ try:
+ revision_id.decode('ascii')
+ except UnicodeDecodeError:
+ raise errors.NonAsciiRevisionId(method, self)
+
+
+class MetaDirRepository(Repository):
+ """Repositories in the new meta-dir layout.
+
+ :ivar _transport: Transport for access to repository control files,
+ typically pointing to .bzr/repository.
+ """
+
+ def __init__(self, _format, a_bzrdir, control_files):
+ super(MetaDirRepository, self).__init__(_format, a_bzrdir, control_files)
+ self._transport = control_files._transport
+
+ def is_shared(self):
+ """Return True if this repository is flagged as a shared repository."""
+ return self._transport.has('shared-storage')
+
+ @needs_write_lock
+ def set_make_working_trees(self, new_value):
+ """Set the policy flag for making working trees when creating branches.
+
+ This only applies to branches that use this repository.
+
+ The default is 'True'.
+ :param new_value: True to restore the default, False to disable making
+ working trees.
+ """
+ if new_value:
+ try:
+ self._transport.delete('no-working-trees')
+ except errors.NoSuchFile:
+ pass
+ else:
+ self._transport.put_bytes('no-working-trees', '',
+ mode=self.bzrdir._get_file_mode())
+
+ def make_working_trees(self):
+ """Returns the policy for making working trees on new branches."""
+ return not self._transport.has('no-working-trees')
+
+ @needs_write_lock
+ def update_feature_flags(self, updated_flags):
+ """Update the feature flags for this branch.
+
+ :param updated_flags: Dictionary mapping feature names to necessities
+ A necessity can be None to indicate the feature should be removed
+ """
+ self._format._update_feature_flags(updated_flags)
+ self.control_transport.put_bytes('format', self._format.as_string())
+
+
+class RepositoryFormatRegistry(controldir.ControlComponentFormatRegistry):
+ """Repository format registry."""
+
+ def get_default(self):
+ """Return the current default format."""
+ return controldir.format_registry.make_bzrdir('default').repository_format
+
+
+network_format_registry = registry.FormatRegistry()
+"""Registry of formats indexed by their network name.
+
+The network name for a repository format is an identifier that can be used when
+referring to formats with smart server operations. See
+RepositoryFormat.network_name() for more detail.
+"""
+
+
+format_registry = RepositoryFormatRegistry(network_format_registry)
+"""Registry of formats, indexed by their BzrDirMetaFormat format string.
+
+This can contain either format instances themselves, or classes/factories that
+can be called to obtain one.
+"""
+
+
+#####################################################################
+# Repository Formats
+
+class RepositoryFormat(controldir.ControlComponentFormat):
+ """A repository format.
+
+ Formats provide four things:
+ * An initialization routine to construct repository data on disk.
+ * a optional format string which is used when the BzrDir supports
+ versioned children.
+ * an open routine which returns a Repository instance.
+ * A network name for referring to the format in smart server RPC
+ methods.
+
+ There is one and only one Format subclass for each on-disk format. But
+ there can be one Repository subclass that is used for several different
+ formats. The _format attribute on a Repository instance can be used to
+ determine the disk format.
+
+ Formats are placed in a registry by their format string for reference
+ during opening. These should be subclasses of RepositoryFormat for
+ consistency.
+
+ Once a format is deprecated, just deprecate the initialize and open
+ methods on the format class. Do not deprecate the object, as the
+ object may be created even when a repository instance hasn't been
+ created.
+
+ Common instance attributes:
+ _matchingbzrdir - the controldir format that the repository format was
+ originally written to work with. This can be used if manually
+ constructing a bzrdir and repository, or more commonly for test suite
+ parameterization.
+ """
+
+ # Set to True or False in derived classes. True indicates that the format
+ # supports ghosts gracefully.
+ supports_ghosts = None
+ # Can this repository be given external locations to lookup additional
+ # data. Set to True or False in derived classes.
+ supports_external_lookups = None
+ # Does this format support CHK bytestring lookups. Set to True or False in
+ # derived classes.
+ supports_chks = None
+ # Should fetch trigger a reconcile after the fetch? Only needed for
+ # some repository formats that can suffer internal inconsistencies.
+ _fetch_reconcile = False
+ # Does this format have < O(tree_size) delta generation. Used to hint what
+ # code path for commit, amongst other things.
+ fast_deltas = None
+ # Does doing a pack operation compress data? Useful for the pack UI command
+ # (so if there is one pack, the operation can still proceed because it may
+ # help), and for fetching when data won't have come from the same
+ # compressor.
+ pack_compresses = False
+ # Does the repository storage understand references to trees?
+ supports_tree_reference = None
+ # Is the format experimental ?
+ experimental = False
+ # Does this repository format escape funky characters, or does it create
+ # files with similar names as the versioned files in its contents on disk
+ # ?
+ supports_funky_characters = None
+ # Does this repository format support leaving locks?
+ supports_leaving_lock = None
+ # Does this format support the full VersionedFiles interface?
+ supports_full_versioned_files = None
+ # Does this format support signing revision signatures?
+ supports_revision_signatures = True
+ # Can the revision graph have incorrect parents?
+ revision_graph_can_have_wrong_parents = None
+ # Does this format support rich root data?
+ rich_root_data = None
+ # Does this format support explicitly versioned directories?
+ supports_versioned_directories = None
+ # Can other repositories be nested into one of this format?
+ supports_nesting_repositories = None
+ # Is it possible for revisions to be present without being referenced
+ # somewhere ?
+ supports_unreferenced_revisions = None
+
+ def __repr__(self):
+ return "%s()" % self.__class__.__name__
+
+ def __eq__(self, other):
+ # format objects are generally stateless
+ return isinstance(other, self.__class__)
+
+ def __ne__(self, other):
+ return not self == other
+
+ def get_format_description(self):
+ """Return the short description for this format."""
+ raise NotImplementedError(self.get_format_description)
+
+ def initialize(self, controldir, shared=False):
+ """Initialize a repository of this format in controldir.
+
+ :param controldir: The controldir to put the new repository in it.
+ :param shared: The repository should be initialized as a sharable one.
+ :returns: The new repository object.
+
+ This may raise UninitializableFormat if shared repository are not
+ compatible the controldir.
+ """
+ raise NotImplementedError(self.initialize)
+
+ def is_supported(self):
+ """Is this format supported?
+
+ Supported formats must be initializable and openable.
+ Unsupported formats may not support initialization or committing or
+ some other features depending on the reason for not being supported.
+ """
+ return True
+
+ def is_deprecated(self):
+ """Is this format deprecated?
+
+ Deprecated formats may trigger a user-visible warning recommending
+ the user to upgrade. They are still fully supported.
+ """
+ return False
+
+ def network_name(self):
+ """A simple byte string uniquely identifying this format for RPC calls.
+
+ MetaDir repository formats use their disk format string to identify the
+ repository over the wire. All in one formats such as bzr < 0.8, and
+ foreign formats like svn/git and hg should use some marker which is
+ unique and immutable.
+ """
+ raise NotImplementedError(self.network_name)
+
+ def check_conversion_target(self, target_format):
+ if self.rich_root_data and not target_format.rich_root_data:
+ raise errors.BadConversionTarget(
+ 'Does not support rich root data.', target_format,
+ from_format=self)
+ if (self.supports_tree_reference and
+ not getattr(target_format, 'supports_tree_reference', False)):
+ raise errors.BadConversionTarget(
+ 'Does not support nested trees', target_format,
+ from_format=self)
+
+ def open(self, controldir, _found=False):
+ """Return an instance of this format for a controldir.
+
+ _found is a private parameter, do not use it.
+ """
+ raise NotImplementedError(self.open)
+
+ def _run_post_repo_init_hooks(self, repository, controldir, shared):
+ from bzrlib.controldir import ControlDir, RepoInitHookParams
+ hooks = ControlDir.hooks['post_repo_init']
+ if not hooks:
+ return
+ params = RepoInitHookParams(repository, self, controldir, shared)
+ for hook in hooks:
+ hook(params)
+
+
+class RepositoryFormatMetaDir(bzrdir.BzrFormat, RepositoryFormat):
+ """Common base class for the new repositories using the metadir layout."""
+
+ rich_root_data = False
+ supports_tree_reference = False
+ supports_external_lookups = False
+ supports_leaving_lock = True
+ supports_nesting_repositories = True
+
+ @property
+ def _matchingbzrdir(self):
+ matching = bzrdir.BzrDirMetaFormat1()
+ matching.repository_format = self
+ return matching
+
+ def __init__(self):
+ RepositoryFormat.__init__(self)
+ bzrdir.BzrFormat.__init__(self)
+
+ def _create_control_files(self, a_bzrdir):
+ """Create the required files and the initial control_files object."""
+ # FIXME: RBC 20060125 don't peek under the covers
+ # NB: no need to escape relative paths that are url safe.
+ repository_transport = a_bzrdir.get_repository_transport(self)
+ control_files = lockable_files.LockableFiles(repository_transport,
+ 'lock', lockdir.LockDir)
+ control_files.create_lock()
+ return control_files
+
+ def _upload_blank_content(self, a_bzrdir, dirs, files, utf8_files, shared):
+ """Upload the initial blank content."""
+ control_files = self._create_control_files(a_bzrdir)
+ control_files.lock_write()
+ transport = control_files._transport
+ if shared == True:
+ utf8_files += [('shared-storage', '')]
+ try:
+ transport.mkdir_multi(dirs, mode=a_bzrdir._get_dir_mode())
+ for (filename, content_stream) in files:
+ transport.put_file(filename, content_stream,
+ mode=a_bzrdir._get_file_mode())
+ for (filename, content_bytes) in utf8_files:
+ transport.put_bytes_non_atomic(filename, content_bytes,
+ mode=a_bzrdir._get_file_mode())
+ finally:
+ control_files.unlock()
+
+ @classmethod
+ def find_format(klass, a_bzrdir):
+ """Return the format for the repository object in a_bzrdir.
+
+ This is used by bzr native formats that have a "format" file in
+ the repository. Other methods may be used by different types of
+ control directory.
+ """
+ try:
+ transport = a_bzrdir.get_repository_transport(None)
+ format_string = transport.get_bytes("format")
+ except errors.NoSuchFile:
+ raise errors.NoRepositoryPresent(a_bzrdir)
+ return klass._find_format(format_registry, 'repository', format_string)
+
+ def check_support_status(self, allow_unsupported, recommend_upgrade=True,
+ basedir=None):
+ RepositoryFormat.check_support_status(self,
+ allow_unsupported=allow_unsupported, recommend_upgrade=recommend_upgrade,
+ basedir=basedir)
+ bzrdir.BzrFormat.check_support_status(self, allow_unsupported=allow_unsupported,
+ recommend_upgrade=recommend_upgrade, basedir=basedir)
+
+
+# formats which have no format string are not discoverable or independently
+# creatable on disk, so are not registered in format_registry. They're
+# all in bzrlib.repofmt.knitreponow. When an instance of one of these is
+# needed, it's constructed directly by the ControlDir. Non-native formats where
+# the repository is not separately opened are similar.
+
+format_registry.register_lazy(
+ 'Bazaar-NG Knit Repository Format 1',
+ 'bzrlib.repofmt.knitrepo',
+ 'RepositoryFormatKnit1',
+ )
+
+format_registry.register_lazy(
+ 'Bazaar Knit Repository Format 3 (bzr 0.15)\n',
+ 'bzrlib.repofmt.knitrepo',
+ 'RepositoryFormatKnit3',
+ )
+
+format_registry.register_lazy(
+ 'Bazaar Knit Repository Format 4 (bzr 1.0)\n',
+ 'bzrlib.repofmt.knitrepo',
+ 'RepositoryFormatKnit4',
+ )
+
+# Pack-based formats. There is one format for pre-subtrees, and one for
+# post-subtrees to allow ease of testing.
+# NOTE: These are experimental in 0.92. Stable in 1.0 and above
+format_registry.register_lazy(
+ 'Bazaar pack repository format 1 (needs bzr 0.92)\n',
+ 'bzrlib.repofmt.knitpack_repo',
+ 'RepositoryFormatKnitPack1',
+ )
+format_registry.register_lazy(
+ 'Bazaar pack repository format 1 with subtree support (needs bzr 0.92)\n',
+ 'bzrlib.repofmt.knitpack_repo',
+ 'RepositoryFormatKnitPack3',
+ )
+format_registry.register_lazy(
+ 'Bazaar pack repository format 1 with rich root (needs bzr 1.0)\n',
+ 'bzrlib.repofmt.knitpack_repo',
+ 'RepositoryFormatKnitPack4',
+ )
+format_registry.register_lazy(
+ 'Bazaar RepositoryFormatKnitPack5 (bzr 1.6)\n',
+ 'bzrlib.repofmt.knitpack_repo',
+ 'RepositoryFormatKnitPack5',
+ )
+format_registry.register_lazy(
+ 'Bazaar RepositoryFormatKnitPack5RichRoot (bzr 1.6.1)\n',
+ 'bzrlib.repofmt.knitpack_repo',
+ 'RepositoryFormatKnitPack5RichRoot',
+ )
+format_registry.register_lazy(
+ 'Bazaar RepositoryFormatKnitPack5RichRoot (bzr 1.6)\n',
+ 'bzrlib.repofmt.knitpack_repo',
+ 'RepositoryFormatKnitPack5RichRootBroken',
+ )
+format_registry.register_lazy(
+ 'Bazaar RepositoryFormatKnitPack6 (bzr 1.9)\n',
+ 'bzrlib.repofmt.knitpack_repo',
+ 'RepositoryFormatKnitPack6',
+ )
+format_registry.register_lazy(
+ 'Bazaar RepositoryFormatKnitPack6RichRoot (bzr 1.9)\n',
+ 'bzrlib.repofmt.knitpack_repo',
+ 'RepositoryFormatKnitPack6RichRoot',
+ )
+format_registry.register_lazy(
+ 'Bazaar repository format 2a (needs bzr 1.16 or later)\n',
+ 'bzrlib.repofmt.groupcompress_repo',
+ 'RepositoryFormat2a',
+ )
+
+# Development formats.
+# Check their docstrings to see if/when they are obsolete.
+format_registry.register_lazy(
+ ("Bazaar development format 2 with subtree support "
+ "(needs bzr.dev from before 1.8)\n"),
+ 'bzrlib.repofmt.knitpack_repo',
+ 'RepositoryFormatPackDevelopment2Subtree',
+ )
+format_registry.register_lazy(
+ 'Bazaar development format 8\n',
+ 'bzrlib.repofmt.groupcompress_repo',
+ 'RepositoryFormat2aSubtree',
+ )
+
+
+class InterRepository(InterObject):
+ """This class represents operations taking place between two repositories.
+
+ Its instances have methods like copy_content and fetch, and contain
+ references to the source and target repositories these operations can be
+ carried out on.
+
+ Often we will provide convenience methods on 'repository' which carry out
+ operations with another repository - they will always forward to
+ InterRepository.get(other).method_name(parameters).
+ """
+
+ _optimisers = []
+ """The available optimised InterRepository types."""
+
+ @needs_write_lock
+ def copy_content(self, revision_id=None):
+ """Make a complete copy of the content in self into destination.
+
+ This is a destructive operation! Do not use it on existing
+ repositories.
+
+ :param revision_id: Only copy the content needed to construct
+ revision_id and its parents.
+ """
+ try:
+ self.target.set_make_working_trees(self.source.make_working_trees())
+ except NotImplementedError:
+ pass
+ self.target.fetch(self.source, revision_id=revision_id)
+
+ @needs_write_lock
+ def fetch(self, revision_id=None, find_ghosts=False):
+ """Fetch the content required to construct revision_id.
+
+ The content is copied from self.source to self.target.
+
+ :param revision_id: if None all content is copied, if NULL_REVISION no
+ content is copied.
+ :return: None.
+ """
+ raise NotImplementedError(self.fetch)
+
+ @needs_read_lock
+ def search_missing_revision_ids(self,
+ revision_id=symbol_versioning.DEPRECATED_PARAMETER,
+ find_ghosts=True, revision_ids=None, if_present_ids=None,
+ limit=None):
+ """Return the revision ids that source has that target does not.
+
+ :param revision_id: only return revision ids included by this
+ revision_id.
+ :param revision_ids: return revision ids included by these
+ revision_ids. NoSuchRevision will be raised if any of these
+ revisions are not present.
+ :param if_present_ids: like revision_ids, but will not cause
+ NoSuchRevision if any of these are absent, instead they will simply
+ not be in the result. This is useful for e.g. finding revisions
+ to fetch for tags, which may reference absent revisions.
+ :param find_ghosts: If True find missing revisions in deep history
+ rather than just finding the surface difference.
+ :param limit: Maximum number of revisions to return, topologically
+ ordered
+ :return: A bzrlib.graph.SearchResult.
+ """
+ raise NotImplementedError(self.search_missing_revision_ids)
+
+ @staticmethod
+ def _same_model(source, target):
+ """True if source and target have the same data representation.
+
+ Note: this is always called on the base class; overriding it in a
+ subclass will have no effect.
+ """
+ try:
+ InterRepository._assert_same_model(source, target)
+ return True
+ except errors.IncompatibleRepositories, e:
+ return False
+
+ @staticmethod
+ def _assert_same_model(source, target):
+ """Raise an exception if two repositories do not use the same model.
+ """
+ if source.supports_rich_root() != target.supports_rich_root():
+ raise errors.IncompatibleRepositories(source, target,
+ "different rich-root support")
+ if source._serializer != target._serializer:
+ raise errors.IncompatibleRepositories(source, target,
+ "different serializers")
+
+
+class CopyConverter(object):
+ """A repository conversion tool which just performs a copy of the content.
+
+ This is slow but quite reliable.
+ """
+
+ def __init__(self, target_format):
+ """Create a CopyConverter.
+
+ :param target_format: The format the resulting repository should be.
+ """
+ self.target_format = target_format
+
+ def convert(self, repo, pb):
+ """Perform the conversion of to_convert, giving feedback via pb.
+
+ :param to_convert: The disk object to convert.
+ :param pb: a progress bar to use for progress information.
+ """
+ pb = ui.ui_factory.nested_progress_bar()
+ self.count = 0
+ self.total = 4
+ # this is only useful with metadir layouts - separated repo content.
+ # trigger an assertion if not such
+ repo._format.get_format_string()
+ self.repo_dir = repo.bzrdir
+ pb.update(gettext('Moving repository to repository.backup'))
+ self.repo_dir.transport.move('repository', 'repository.backup')
+ backup_transport = self.repo_dir.transport.clone('repository.backup')
+ repo._format.check_conversion_target(self.target_format)
+ self.source_repo = repo._format.open(self.repo_dir,
+ _found=True,
+ _override_transport=backup_transport)
+ pb.update(gettext('Creating new repository'))
+ converted = self.target_format.initialize(self.repo_dir,
+ self.source_repo.is_shared())
+ converted.lock_write()
+ try:
+ pb.update(gettext('Copying content'))
+ self.source_repo.copy_content_into(converted)
+ finally:
+ converted.unlock()
+ pb.update(gettext('Deleting old repository content'))
+ self.repo_dir.transport.delete_tree('repository.backup')
+ ui.ui_factory.note(gettext('repository converted'))
+ pb.finished()
+
+
+def _strip_NULL_ghosts(revision_graph):
+ """Also don't use this. more compatibility code for unmigrated clients."""
+ # Filter ghosts, and null:
+ if _mod_revision.NULL_REVISION in revision_graph:
+ del revision_graph[_mod_revision.NULL_REVISION]
+ for key, parents in revision_graph.items():
+ revision_graph[key] = tuple(parent for parent in parents if parent
+ in revision_graph)
+ return revision_graph
+
+
+def _iter_for_revno(repo, partial_history_cache, stop_index=None,
+ stop_revision=None):
+ """Extend the partial history to include a given index
+
+ If a stop_index is supplied, stop when that index has been reached.
+ If a stop_revision is supplied, stop when that revision is
+ encountered. Otherwise, stop when the beginning of history is
+ reached.
+
+ :param stop_index: The index which should be present. When it is
+ present, history extension will stop.
+ :param stop_revision: The revision id which should be present. When
+ it is encountered, history extension will stop.
+ """
+ start_revision = partial_history_cache[-1]
+ graph = repo.get_graph()
+ iterator = graph.iter_lefthand_ancestry(start_revision,
+ (_mod_revision.NULL_REVISION,))
+ try:
+ # skip the last revision in the list
+ iterator.next()
+ while True:
+ if (stop_index is not None and
+ len(partial_history_cache) > stop_index):
+ break
+ if partial_history_cache[-1] == stop_revision:
+ break
+ revision_id = iterator.next()
+ partial_history_cache.append(revision_id)
+ except StopIteration:
+ # No more history
+ return
+
+
+class _LazyListJoin(object):
+ """An iterable yielding the contents of many lists as one list.
+
+ Each iterator made from this will reflect the current contents of the lists
+ at the time the iterator is made.
+
+ This is used by Repository's _make_parents_provider implementation so that
+ it is safe to do::
+
+ pp = repo._make_parents_provider() # uses a list of fallback repos
+ pp.add_fallback_repository(other_repo) # appends to that list
+ result = pp.get_parent_map(...)
+ # The result will include revs from other_repo
+ """
+
+ def __init__(self, *list_parts):
+ self.list_parts = list_parts
+
+ def __iter__(self):
+ full_list = []
+ for list_part in self.list_parts:
+ full_list.extend(list_part)
+ return iter(full_list)
+
+ def __repr__(self):
+ return "%s.%s(%s)" % (self.__module__, self.__class__.__name__,
+ self.list_parts)
diff --git a/bzrlib/revision.py b/bzrlib/revision.py
new file mode 100644
index 0000000..49e681c
--- /dev/null
+++ b/bzrlib/revision.py
@@ -0,0 +1,234 @@
+# Copyright (C) 2005-2011 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+from __future__ import absolute_import
+
+# TODO: Some kind of command-line display of revision properties:
+# perhaps show them in log -v and allow them as options to the commit command.
+
+
+from bzrlib.lazy_import import lazy_import
+lazy_import(globals(), """
+from bzrlib import bugtracker
+""")
+from bzrlib import (
+ errors,
+ symbol_versioning,
+ )
+from bzrlib.osutils import contains_whitespace
+
+NULL_REVISION="null:"
+CURRENT_REVISION="current:"
+
+
+class Revision(object):
+ """Single revision on a branch.
+
+ Revisions may know their revision_hash, but only once they've been
+ written out. This is not stored because you cannot write the hash
+ into the file it describes.
+
+ After bzr 0.0.5 revisions are allowed to have multiple parents.
+
+ parent_ids
+ List of parent revision_ids
+
+ properties
+ Dictionary of revision properties. These are attached to the
+ revision as extra metadata. The name must be a single
+ word; the value can be an arbitrary string.
+ """
+
+ def __init__(self, revision_id, properties=None, **args):
+ self.revision_id = revision_id
+ if properties is None:
+ self.properties = {}
+ else:
+ self.properties = properties
+ self._check_properties()
+ self.committer = None
+ self.parent_ids = []
+ self.parent_sha1s = []
+ """Not used anymore - legacy from for 4."""
+ self.__dict__.update(args)
+
+ def __repr__(self):
+ return "<Revision id %s>" % self.revision_id
+
+ def __eq__(self, other):
+ if not isinstance(other, Revision):
+ return False
+ return (
+ self.inventory_sha1 == other.inventory_sha1
+ and self.revision_id == other.revision_id
+ and self.timestamp == other.timestamp
+ and self.message == other.message
+ and self.timezone == other.timezone
+ and self.committer == other.committer
+ and self.properties == other.properties
+ and self.parent_ids == other.parent_ids)
+
+ def __ne__(self, other):
+ return not self.__eq__(other)
+
+ def _check_properties(self):
+ """Verify that all revision properties are OK."""
+ for name, value in self.properties.iteritems():
+ if not isinstance(name, basestring) or contains_whitespace(name):
+ raise ValueError("invalid property name %r" % name)
+ if not isinstance(value, basestring):
+ raise ValueError("invalid property value %r for %r" %
+ (value, name))
+
+ def get_history(self, repository):
+ """Return the canonical line-of-history for this revision.
+
+ If ghosts are present this may differ in result from a ghost-free
+ repository.
+ """
+ current_revision = self
+ reversed_result = []
+ while current_revision is not None:
+ reversed_result.append(current_revision.revision_id)
+ if not len (current_revision.parent_ids):
+ reversed_result.append(None)
+ current_revision = None
+ else:
+ next_revision_id = current_revision.parent_ids[0]
+ current_revision = repository.get_revision(next_revision_id)
+ reversed_result.reverse()
+ return reversed_result
+
+ def get_summary(self):
+ """Get the first line of the log message for this revision.
+
+ Return an empty string if message is None.
+ """
+ if self.message:
+ return self.message.lstrip().split('\n', 1)[0]
+ else:
+ return ''
+
+ def get_apparent_authors(self):
+ """Return the apparent authors of this revision.
+
+ If the revision properties contain the names of the authors,
+ return them. Otherwise return the committer name.
+
+ The return value will be a list containing at least one element.
+ """
+ authors = self.properties.get('authors', None)
+ if authors is None:
+ author = self.properties.get('author', self.committer)
+ if author is None:
+ return []
+ return [author]
+ else:
+ return authors.split("\n")
+
+ def iter_bugs(self):
+ """Iterate over the bugs associated with this revision."""
+ bug_property = self.properties.get('bugs', None)
+ if bug_property is None:
+ return
+ for line in bug_property.splitlines():
+ try:
+ url, status = line.split(None, 2)
+ except ValueError:
+ raise errors.InvalidLineInBugsProperty(line)
+ if status not in bugtracker.ALLOWED_BUG_STATUSES:
+ raise errors.InvalidBugStatus(status)
+ yield url, status
+
+
+def iter_ancestors(revision_id, revision_source, only_present=False):
+ ancestors = (revision_id,)
+ distance = 0
+ while len(ancestors) > 0:
+ new_ancestors = []
+ for ancestor in ancestors:
+ if not only_present:
+ yield ancestor, distance
+ try:
+ revision = revision_source.get_revision(ancestor)
+ except errors.NoSuchRevision, e:
+ if e.revision == revision_id:
+ raise
+ else:
+ continue
+ if only_present:
+ yield ancestor, distance
+ new_ancestors.extend(revision.parent_ids)
+ ancestors = new_ancestors
+ distance += 1
+
+
+def find_present_ancestors(revision_id, revision_source):
+ """Return the ancestors of a revision present in a branch.
+
+ It's possible that a branch won't have the complete ancestry of
+ one of its revisions.
+
+ """
+ found_ancestors = {}
+ anc_iter = enumerate(iter_ancestors(revision_id, revision_source,
+ only_present=True))
+ for anc_order, (anc_id, anc_distance) in anc_iter:
+ if anc_id not in found_ancestors:
+ found_ancestors[anc_id] = (anc_order, anc_distance)
+ return found_ancestors
+
+
+def __get_closest(intersection):
+ intersection.sort()
+ matches = []
+ for entry in intersection:
+ if entry[0] == intersection[0][0]:
+ matches.append(entry[2])
+ return matches
+
+
+def is_reserved_id(revision_id):
+ """Determine whether a revision id is reserved
+
+ :return: True if the revision is reserved, False otherwise
+ """
+ return isinstance(revision_id, basestring) and revision_id.endswith(':')
+
+
+def check_not_reserved_id(revision_id):
+ """Raise ReservedId if the supplied revision_id is reserved"""
+ if is_reserved_id(revision_id):
+ raise errors.ReservedId(revision_id)
+
+
+def ensure_null(revision_id):
+ """Ensure only NULL_REVISION is used to represent the null revision"""
+ if revision_id is None:
+ symbol_versioning.warn('NULL_REVISION should be used for the null'
+ ' revision instead of None, as of bzr 0.91.',
+ DeprecationWarning, stacklevel=2)
+ return NULL_REVISION
+ else:
+ return revision_id
+
+
+def is_null(revision_id):
+ if revision_id is None:
+ symbol_versioning.warn('NULL_REVISION should be used for the null'
+ ' revision instead of None, as of bzr 0.90.',
+ DeprecationWarning, stacklevel=2)
+ return revision_id in (None, NULL_REVISION)
diff --git a/bzrlib/revisionspec.py b/bzrlib/revisionspec.py
new file mode 100644
index 0000000..b2c9ab5
--- /dev/null
+++ b/bzrlib/revisionspec.py
@@ -0,0 +1,1009 @@
+# Copyright (C) 2005-2010 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+from __future__ import absolute_import
+
+
+from bzrlib.lazy_import import lazy_import
+lazy_import(globals(), """
+import bisect
+import datetime
+
+from bzrlib import (
+ branch as _mod_branch,
+ osutils,
+ revision,
+ symbol_versioning,
+ workingtree,
+ )
+from bzrlib.i18n import gettext
+""")
+
+from bzrlib import (
+ errors,
+ lazy_regex,
+ registry,
+ trace,
+ )
+
+
+class RevisionInfo(object):
+ """The results of applying a revision specification to a branch."""
+
+ help_txt = """The results of applying a revision specification to a branch.
+
+ An instance has two useful attributes: revno, and rev_id.
+
+ They can also be accessed as spec[0] and spec[1] respectively,
+ so that you can write code like:
+ revno, rev_id = RevisionSpec(branch, spec)
+ although this is probably going to be deprecated later.
+
+ This class exists mostly to be the return value of a RevisionSpec,
+ so that you can access the member you're interested in (number or id)
+ or treat the result as a tuple.
+ """
+
+ def __init__(self, branch, revno=None, rev_id=None):
+ self.branch = branch
+ self._has_revno = (revno is not None)
+ self._revno = revno
+ self.rev_id = rev_id
+ if self.rev_id is None and self._revno is not None:
+ # allow caller to be lazy
+ self.rev_id = branch.get_rev_id(self._revno)
+
+ @property
+ def revno(self):
+ if not self._has_revno and self.rev_id is not None:
+ try:
+ self._revno = self.branch.revision_id_to_revno(self.rev_id)
+ except errors.NoSuchRevision:
+ self._revno = None
+ self._has_revno = True
+ return self._revno
+
+ def __nonzero__(self):
+ if self.rev_id is None:
+ return False
+ # TODO: otherwise, it should depend on how I was built -
+ # if it's in_history(branch), then check revision_history(),
+ # if it's in_store(branch), do the check below
+ return self.branch.repository.has_revision(self.rev_id)
+
+ def __len__(self):
+ return 2
+
+ def __getitem__(self, index):
+ if index == 0: return self.revno
+ if index == 1: return self.rev_id
+ raise IndexError(index)
+
+ def get(self):
+ return self.branch.repository.get_revision(self.rev_id)
+
+ def __eq__(self, other):
+ if type(other) not in (tuple, list, type(self)):
+ return False
+ if type(other) is type(self) and self.branch is not other.branch:
+ return False
+ return tuple(self) == tuple(other)
+
+ def __repr__(self):
+ return '<bzrlib.revisionspec.RevisionInfo object %s, %s for %r>' % (
+ self.revno, self.rev_id, self.branch)
+
+ @staticmethod
+ def from_revision_id(branch, revision_id, revs=symbol_versioning.DEPRECATED_PARAMETER):
+ """Construct a RevisionInfo given just the id.
+
+ Use this if you don't know or care what the revno is.
+ """
+ if symbol_versioning.deprecated_passed(revs):
+ symbol_versioning.warn(
+ 'RevisionInfo.from_revision_id(revs) was deprecated in 2.5.',
+ DeprecationWarning,
+ stacklevel=2)
+ return RevisionInfo(branch, revno=None, rev_id=revision_id)
+
+
+class RevisionSpec(object):
+ """A parsed revision specification."""
+
+ help_txt = """A parsed revision specification.
+
+ A revision specification is a string, which may be unambiguous about
+ what it represents by giving a prefix like 'date:' or 'revid:' etc,
+ or it may have no prefix, in which case it's tried against several
+ specifier types in sequence to determine what the user meant.
+
+ Revision specs are an UI element, and they have been moved out
+ of the branch class to leave "back-end" classes unaware of such
+ details. Code that gets a revno or rev_id from other code should
+ not be using revision specs - revnos and revision ids are the
+ accepted ways to refer to revisions internally.
+
+ (Equivalent to the old Branch method get_revision_info())
+ """
+
+ prefix = None
+ # wants_revision_history has been deprecated in 2.5.
+ wants_revision_history = False
+ dwim_catchable_exceptions = (errors.InvalidRevisionSpec,)
+ """Exceptions that RevisionSpec_dwim._match_on will catch.
+
+ If the revspec is part of ``dwim_revspecs``, it may be tried with an
+ invalid revspec and raises some exception. The exceptions mentioned here
+ will not be reported to the user but simply ignored without stopping the
+ dwim processing.
+ """
+
+ @staticmethod
+ def from_string(spec):
+ """Parse a revision spec string into a RevisionSpec object.
+
+ :param spec: A string specified by the user
+ :return: A RevisionSpec object that understands how to parse the
+ supplied notation.
+ """
+ if not isinstance(spec, (type(None), basestring)):
+ raise TypeError('error')
+
+ if spec is None:
+ return RevisionSpec(None, _internal=True)
+ match = revspec_registry.get_prefix(spec)
+ if match is not None:
+ spectype, specsuffix = match
+ trace.mutter('Returning RevisionSpec %s for %s',
+ spectype.__name__, spec)
+ return spectype(spec, _internal=True)
+ else:
+ # Otherwise treat it as a DWIM, build the RevisionSpec object and
+ # wait for _match_on to be called.
+ return RevisionSpec_dwim(spec, _internal=True)
+
+ def __init__(self, spec, _internal=False):
+ """Create a RevisionSpec referring to the Null revision.
+
+ :param spec: The original spec supplied by the user
+ :param _internal: Used to ensure that RevisionSpec is not being
+ called directly. Only from RevisionSpec.from_string()
+ """
+ if not _internal:
+ symbol_versioning.warn('Creating a RevisionSpec directly has'
+ ' been deprecated in version 0.11. Use'
+ ' RevisionSpec.from_string()'
+ ' instead.',
+ DeprecationWarning, stacklevel=2)
+ self.user_spec = spec
+ if self.prefix and spec.startswith(self.prefix):
+ spec = spec[len(self.prefix):]
+ self.spec = spec
+
+ def _match_on(self, branch, revs):
+ trace.mutter('Returning RevisionSpec._match_on: None')
+ return RevisionInfo(branch, None, None)
+
+ def _match_on_and_check(self, branch, revs):
+ info = self._match_on(branch, revs)
+ if info:
+ return info
+ elif info == (None, None):
+ # special case - nothing supplied
+ return info
+ elif self.prefix:
+ raise errors.InvalidRevisionSpec(self.user_spec, branch)
+ else:
+ raise errors.InvalidRevisionSpec(self.spec, branch)
+
+ def in_history(self, branch):
+ if branch:
+ if self.wants_revision_history:
+ symbol_versioning.warn(
+ "RevisionSpec.wants_revision_history was "
+ "deprecated in 2.5 (%s)." % self.__class__.__name__,
+ DeprecationWarning)
+ branch.lock_read()
+ try:
+ graph = branch.repository.get_graph()
+ revs = list(graph.iter_lefthand_ancestry(
+ branch.last_revision(), [revision.NULL_REVISION]))
+ finally:
+ branch.unlock()
+ revs.reverse()
+ else:
+ revs = None
+ else:
+ # this should never trigger.
+ # TODO: make it a deprecated code path. RBC 20060928
+ revs = None
+ return self._match_on_and_check(branch, revs)
+
+ # FIXME: in_history is somewhat broken,
+ # it will return non-history revisions in many
+ # circumstances. The expected facility is that
+ # in_history only returns revision-history revs,
+ # in_store returns any rev. RBC 20051010
+ # aliases for now, when we fix the core logic, then they
+ # will do what you expect.
+ in_store = in_history
+ in_branch = in_store
+
+ def as_revision_id(self, context_branch):
+ """Return just the revision_id for this revisions spec.
+
+ Some revision specs require a context_branch to be able to determine
+ their value. Not all specs will make use of it.
+ """
+ return self._as_revision_id(context_branch)
+
+ def _as_revision_id(self, context_branch):
+ """Implementation of as_revision_id()
+
+ Classes should override this function to provide appropriate
+ functionality. The default is to just call '.in_history().rev_id'
+ """
+ return self.in_history(context_branch).rev_id
+
+ def as_tree(self, context_branch):
+ """Return the tree object for this revisions spec.
+
+ Some revision specs require a context_branch to be able to determine
+ the revision id and access the repository. Not all specs will make
+ use of it.
+ """
+ return self._as_tree(context_branch)
+
+ def _as_tree(self, context_branch):
+ """Implementation of as_tree().
+
+ Classes should override this function to provide appropriate
+ functionality. The default is to just call '.as_revision_id()'
+ and get the revision tree from context_branch's repository.
+ """
+ revision_id = self.as_revision_id(context_branch)
+ return context_branch.repository.revision_tree(revision_id)
+
+ def __repr__(self):
+ # this is mostly for helping with testing
+ return '<%s %s>' % (self.__class__.__name__,
+ self.user_spec)
+
+ def needs_branch(self):
+ """Whether this revision spec needs a branch.
+
+ Set this to False the branch argument of _match_on is not used.
+ """
+ return True
+
+ def get_branch(self):
+ """When the revision specifier contains a branch location, return it.
+
+ Otherwise, return None.
+ """
+ return None
+
+
+# private API
+
+class RevisionSpec_dwim(RevisionSpec):
+ """Provides a DWIMish revision specifier lookup.
+
+ Note that this does not go in the revspec_registry because by definition
+ there is no prefix to identify it. It's solely called from
+ RevisionSpec.from_string() because the DWIMification happen when _match_on
+ is called so the string describing the revision is kept here until needed.
+ """
+
+ help_txt = None
+
+ _revno_regex = lazy_regex.lazy_compile(r'^(?:(\d+(\.\d+)*)|-\d+)(:.*)?$')
+
+ # The revspecs to try
+ _possible_revspecs = []
+
+ def _try_spectype(self, rstype, branch):
+ rs = rstype(self.spec, _internal=True)
+ # Hit in_history to find out if it exists, or we need to try the
+ # next type.
+ return rs.in_history(branch)
+
+ def _match_on(self, branch, revs):
+ """Run the lookup and see what we can get."""
+
+ # First, see if it's a revno
+ if self._revno_regex.match(self.spec) is not None:
+ try:
+ return self._try_spectype(RevisionSpec_revno, branch)
+ except RevisionSpec_revno.dwim_catchable_exceptions:
+ pass
+
+ # Next see what has been registered
+ for objgetter in self._possible_revspecs:
+ rs_class = objgetter.get_obj()
+ try:
+ return self._try_spectype(rs_class, branch)
+ except rs_class.dwim_catchable_exceptions:
+ pass
+
+ # Try the old (deprecated) dwim list:
+ for rs_class in dwim_revspecs:
+ try:
+ return self._try_spectype(rs_class, branch)
+ except rs_class.dwim_catchable_exceptions:
+ pass
+
+ # Well, I dunno what it is. Note that we don't try to keep track of the
+ # first of last exception raised during the DWIM tries as none seems
+ # really relevant.
+ raise errors.InvalidRevisionSpec(self.spec, branch)
+
+ @classmethod
+ def append_possible_revspec(cls, revspec):
+ """Append a possible DWIM revspec.
+
+ :param revspec: Revision spec to try.
+ """
+ cls._possible_revspecs.append(registry._ObjectGetter(revspec))
+
+ @classmethod
+ def append_possible_lazy_revspec(cls, module_name, member_name):
+ """Append a possible lazily loaded DWIM revspec.
+
+ :param module_name: Name of the module with the revspec
+ :param member_name: Name of the revspec within the module
+ """
+ cls._possible_revspecs.append(
+ registry._LazyObjectGetter(module_name, member_name))
+
+
+class RevisionSpec_revno(RevisionSpec):
+ """Selects a revision using a number."""
+
+ help_txt = """Selects a revision using a number.
+
+ Use an integer to specify a revision in the history of the branch.
+ Optionally a branch can be specified. A negative number will count
+ from the end of the branch (-1 is the last revision, -2 the previous
+ one). If the negative number is larger than the branch's history, the
+ first revision is returned.
+ Examples::
+
+ revno:1 -> return the first revision of this branch
+ revno:3:/path/to/branch -> return the 3rd revision of
+ the branch '/path/to/branch'
+ revno:-1 -> The last revision in a branch.
+ -2:http://other/branch -> The second to last revision in the
+ remote branch.
+ -1000000 -> Most likely the first revision, unless
+ your history is very long.
+ """
+ prefix = 'revno:'
+
+ def _match_on(self, branch, revs):
+ """Lookup a revision by revision number"""
+ branch, revno, revision_id = self._lookup(branch)
+ return RevisionInfo(branch, revno, revision_id)
+
+ def _lookup(self, branch):
+ loc = self.spec.find(':')
+ if loc == -1:
+ revno_spec = self.spec
+ branch_spec = None
+ else:
+ revno_spec = self.spec[:loc]
+ branch_spec = self.spec[loc+1:]
+
+ if revno_spec == '':
+ if not branch_spec:
+ raise errors.InvalidRevisionSpec(self.user_spec,
+ branch, 'cannot have an empty revno and no branch')
+ revno = None
+ else:
+ try:
+ revno = int(revno_spec)
+ dotted = False
+ except ValueError:
+ # dotted decimal. This arguably should not be here
+ # but the from_string method is a little primitive
+ # right now - RBC 20060928
+ try:
+ match_revno = tuple((int(number) for number in revno_spec.split('.')))
+ except ValueError, e:
+ raise errors.InvalidRevisionSpec(self.user_spec, branch, e)
+
+ dotted = True
+
+ if branch_spec:
+ # the user has overriden the branch to look in.
+ branch = _mod_branch.Branch.open(branch_spec)
+
+ if dotted:
+ try:
+ revision_id = branch.dotted_revno_to_revision_id(match_revno,
+ _cache_reverse=True)
+ except errors.NoSuchRevision:
+ raise errors.InvalidRevisionSpec(self.user_spec, branch)
+ else:
+ # there is no traditional 'revno' for dotted-decimal revnos.
+ # so for API compatibility we return None.
+ return branch, None, revision_id
+ else:
+ last_revno, last_revision_id = branch.last_revision_info()
+ if revno < 0:
+ # if get_rev_id supported negative revnos, there would not be a
+ # need for this special case.
+ if (-revno) >= last_revno:
+ revno = 1
+ else:
+ revno = last_revno + revno + 1
+ try:
+ revision_id = branch.get_rev_id(revno)
+ except errors.NoSuchRevision:
+ raise errors.InvalidRevisionSpec(self.user_spec, branch)
+ return branch, revno, revision_id
+
+ def _as_revision_id(self, context_branch):
+ # We would have the revno here, but we don't really care
+ branch, revno, revision_id = self._lookup(context_branch)
+ return revision_id
+
+ def needs_branch(self):
+ return self.spec.find(':') == -1
+
+ def get_branch(self):
+ if self.spec.find(':') == -1:
+ return None
+ else:
+ return self.spec[self.spec.find(':')+1:]
+
+# Old compatibility
+RevisionSpec_int = RevisionSpec_revno
+
+
+class RevisionIDSpec(RevisionSpec):
+
+ def _match_on(self, branch, revs):
+ revision_id = self.as_revision_id(branch)
+ return RevisionInfo.from_revision_id(branch, revision_id)
+
+
+class RevisionSpec_revid(RevisionIDSpec):
+ """Selects a revision using the revision id."""
+
+ help_txt = """Selects a revision using the revision id.
+
+ Supply a specific revision id, that can be used to specify any
+ revision id in the ancestry of the branch.
+ Including merges, and pending merges.
+ Examples::
+
+ revid:aaaa@bbbb-123456789 -> Select revision 'aaaa@bbbb-123456789'
+ """
+
+ prefix = 'revid:'
+
+ def _as_revision_id(self, context_branch):
+ # self.spec comes straight from parsing the command line arguments,
+ # so we expect it to be a Unicode string. Switch it to the internal
+ # representation.
+ return osutils.safe_revision_id(self.spec, warn=False)
+
+
+
+class RevisionSpec_last(RevisionSpec):
+ """Selects the nth revision from the end."""
+
+ help_txt = """Selects the nth revision from the end.
+
+ Supply a positive number to get the nth revision from the end.
+ This is the same as supplying negative numbers to the 'revno:' spec.
+ Examples::
+
+ last:1 -> return the last revision
+ last:3 -> return the revision 2 before the end.
+ """
+
+ prefix = 'last:'
+
+ def _match_on(self, branch, revs):
+ revno, revision_id = self._revno_and_revision_id(branch)
+ return RevisionInfo(branch, revno, revision_id)
+
+ def _revno_and_revision_id(self, context_branch):
+ last_revno, last_revision_id = context_branch.last_revision_info()
+
+ if self.spec == '':
+ if not last_revno:
+ raise errors.NoCommits(context_branch)
+ return last_revno, last_revision_id
+
+ try:
+ offset = int(self.spec)
+ except ValueError, e:
+ raise errors.InvalidRevisionSpec(self.user_spec, context_branch, e)
+
+ if offset <= 0:
+ raise errors.InvalidRevisionSpec(self.user_spec, context_branch,
+ 'you must supply a positive value')
+
+ revno = last_revno - offset + 1
+ try:
+ revision_id = context_branch.get_rev_id(revno)
+ except errors.NoSuchRevision:
+ raise errors.InvalidRevisionSpec(self.user_spec, context_branch)
+ return revno, revision_id
+
+ def _as_revision_id(self, context_branch):
+ # We compute the revno as part of the process, but we don't really care
+ # about it.
+ revno, revision_id = self._revno_and_revision_id(context_branch)
+ return revision_id
+
+
+
+class RevisionSpec_before(RevisionSpec):
+ """Selects the parent of the revision specified."""
+
+ help_txt = """Selects the parent of the revision specified.
+
+ Supply any revision spec to return the parent of that revision. This is
+ mostly useful when inspecting revisions that are not in the revision history
+ of a branch.
+
+ It is an error to request the parent of the null revision (before:0).
+
+ Examples::
+
+ before:1913 -> Return the parent of revno 1913 (revno 1912)
+ before:revid:aaaa@bbbb-1234567890 -> return the parent of revision
+ aaaa@bbbb-1234567890
+ bzr diff -r before:1913..1913
+ -> Find the changes between revision 1913 and its parent (1912).
+ (What changes did revision 1913 introduce).
+ This is equivalent to: bzr diff -c 1913
+ """
+
+ prefix = 'before:'
+
+ def _match_on(self, branch, revs):
+ r = RevisionSpec.from_string(self.spec)._match_on(branch, revs)
+ if r.revno == 0:
+ raise errors.InvalidRevisionSpec(self.user_spec, branch,
+ 'cannot go before the null: revision')
+ if r.revno is None:
+ # We need to use the repository history here
+ rev = branch.repository.get_revision(r.rev_id)
+ if not rev.parent_ids:
+ revision_id = revision.NULL_REVISION
+ else:
+ revision_id = rev.parent_ids[0]
+ revno = None
+ else:
+ revno = r.revno - 1
+ try:
+ revision_id = branch.get_rev_id(revno, revs)
+ except errors.NoSuchRevision:
+ raise errors.InvalidRevisionSpec(self.user_spec,
+ branch)
+ return RevisionInfo(branch, revno, revision_id)
+
+ def _as_revision_id(self, context_branch):
+ base_revision_id = RevisionSpec.from_string(self.spec)._as_revision_id(context_branch)
+ if base_revision_id == revision.NULL_REVISION:
+ raise errors.InvalidRevisionSpec(self.user_spec, context_branch,
+ 'cannot go before the null: revision')
+ context_repo = context_branch.repository
+ context_repo.lock_read()
+ try:
+ parent_map = context_repo.get_parent_map([base_revision_id])
+ finally:
+ context_repo.unlock()
+ if base_revision_id not in parent_map:
+ # Ghost, or unknown revision id
+ raise errors.InvalidRevisionSpec(self.user_spec, context_branch,
+ 'cannot find the matching revision')
+ parents = parent_map[base_revision_id]
+ if len(parents) < 1:
+ raise errors.InvalidRevisionSpec(self.user_spec, context_branch,
+ 'No parents for revision.')
+ return parents[0]
+
+
+
+class RevisionSpec_tag(RevisionSpec):
+ """Select a revision identified by tag name"""
+
+ help_txt = """Selects a revision identified by a tag name.
+
+ Tags are stored in the branch and created by the 'tag' command.
+ """
+
+ prefix = 'tag:'
+ dwim_catchable_exceptions = (errors.NoSuchTag, errors.TagsNotSupported)
+
+ def _match_on(self, branch, revs):
+ # Can raise tags not supported, NoSuchTag, etc
+ return RevisionInfo.from_revision_id(branch,
+ branch.tags.lookup_tag(self.spec))
+
+ def _as_revision_id(self, context_branch):
+ return context_branch.tags.lookup_tag(self.spec)
+
+
+
+class _RevListToTimestamps(object):
+ """This takes a list of revisions, and allows you to bisect by date"""
+
+ __slots__ = ['branch']
+
+ def __init__(self, branch):
+ self.branch = branch
+
+ def __getitem__(self, index):
+ """Get the date of the index'd item"""
+ r = self.branch.repository.get_revision(self.branch.get_rev_id(index))
+ # TODO: Handle timezone.
+ return datetime.datetime.fromtimestamp(r.timestamp)
+
+ def __len__(self):
+ return self.branch.revno()
+
+
+class RevisionSpec_date(RevisionSpec):
+ """Selects a revision on the basis of a datestamp."""
+
+ help_txt = """Selects a revision on the basis of a datestamp.
+
+ Supply a datestamp to select the first revision that matches the date.
+ Date can be 'yesterday', 'today', 'tomorrow' or a YYYY-MM-DD string.
+ Matches the first entry after a given date (either at midnight or
+ at a specified time).
+
+ One way to display all the changes since yesterday would be::
+
+ bzr log -r date:yesterday..
+
+ Examples::
+
+ date:yesterday -> select the first revision since yesterday
+ date:2006-08-14,17:10:14 -> select the first revision after
+ August 14th, 2006 at 5:10pm.
+ """
+ prefix = 'date:'
+ _date_regex = lazy_regex.lazy_compile(
+ r'(?P<date>(?P<year>\d\d\d\d)-(?P<month>\d\d)-(?P<day>\d\d))?'
+ r'(,|T)?\s*'
+ r'(?P<time>(?P<hour>\d\d):(?P<minute>\d\d)(:(?P<second>\d\d))?)?'
+ )
+
+ def _match_on(self, branch, revs):
+ """Spec for date revisions:
+ date:value
+ value can be 'yesterday', 'today', 'tomorrow' or a YYYY-MM-DD string.
+ matches the first entry after a given date (either at midnight or
+ at a specified time).
+ """
+ # XXX: This doesn't actually work
+ # So the proper way of saying 'give me all entries for today' is:
+ # -r date:yesterday..date:today
+ today = datetime.datetime.fromordinal(datetime.date.today().toordinal())
+ if self.spec.lower() == 'yesterday':
+ dt = today - datetime.timedelta(days=1)
+ elif self.spec.lower() == 'today':
+ dt = today
+ elif self.spec.lower() == 'tomorrow':
+ dt = today + datetime.timedelta(days=1)
+ else:
+ m = self._date_regex.match(self.spec)
+ if not m or (not m.group('date') and not m.group('time')):
+ raise errors.InvalidRevisionSpec(self.user_spec,
+ branch, 'invalid date')
+
+ try:
+ if m.group('date'):
+ year = int(m.group('year'))
+ month = int(m.group('month'))
+ day = int(m.group('day'))
+ else:
+ year = today.year
+ month = today.month
+ day = today.day
+
+ if m.group('time'):
+ hour = int(m.group('hour'))
+ minute = int(m.group('minute'))
+ if m.group('second'):
+ second = int(m.group('second'))
+ else:
+ second = 0
+ else:
+ hour, minute, second = 0,0,0
+ except ValueError:
+ raise errors.InvalidRevisionSpec(self.user_spec,
+ branch, 'invalid date')
+
+ dt = datetime.datetime(year=year, month=month, day=day,
+ hour=hour, minute=minute, second=second)
+ branch.lock_read()
+ try:
+ rev = bisect.bisect(_RevListToTimestamps(branch), dt, 1)
+ finally:
+ branch.unlock()
+ if rev == branch.revno():
+ raise errors.InvalidRevisionSpec(self.user_spec, branch)
+ return RevisionInfo(branch, rev)
+
+
+
+class RevisionSpec_ancestor(RevisionSpec):
+ """Selects a common ancestor with a second branch."""
+
+ help_txt = """Selects a common ancestor with a second branch.
+
+ Supply the path to a branch to select the common ancestor.
+
+ The common ancestor is the last revision that existed in both
+ branches. Usually this is the branch point, but it could also be
+ a revision that was merged.
+
+ This is frequently used with 'diff' to return all of the changes
+ that your branch introduces, while excluding the changes that you
+ have not merged from the remote branch.
+
+ Examples::
+
+ ancestor:/path/to/branch
+ $ bzr diff -r ancestor:../../mainline/branch
+ """
+ prefix = 'ancestor:'
+
+ def _match_on(self, branch, revs):
+ trace.mutter('matching ancestor: on: %s, %s', self.spec, branch)
+ return self._find_revision_info(branch, self.spec)
+
+ def _as_revision_id(self, context_branch):
+ return self._find_revision_id(context_branch, self.spec)
+
+ @staticmethod
+ def _find_revision_info(branch, other_location):
+ revision_id = RevisionSpec_ancestor._find_revision_id(branch,
+ other_location)
+ return RevisionInfo(branch, None, revision_id)
+
+ @staticmethod
+ def _find_revision_id(branch, other_location):
+ from bzrlib.branch import Branch
+
+ branch.lock_read()
+ try:
+ revision_a = revision.ensure_null(branch.last_revision())
+ if revision_a == revision.NULL_REVISION:
+ raise errors.NoCommits(branch)
+ if other_location == '':
+ other_location = branch.get_parent()
+ other_branch = Branch.open(other_location)
+ other_branch.lock_read()
+ try:
+ revision_b = revision.ensure_null(other_branch.last_revision())
+ if revision_b == revision.NULL_REVISION:
+ raise errors.NoCommits(other_branch)
+ graph = branch.repository.get_graph(other_branch.repository)
+ rev_id = graph.find_unique_lca(revision_a, revision_b)
+ finally:
+ other_branch.unlock()
+ if rev_id == revision.NULL_REVISION:
+ raise errors.NoCommonAncestor(revision_a, revision_b)
+ return rev_id
+ finally:
+ branch.unlock()
+
+
+
+
+class RevisionSpec_branch(RevisionSpec):
+ """Selects the last revision of a specified branch."""
+
+ help_txt = """Selects the last revision of a specified branch.
+
+ Supply the path to a branch to select its last revision.
+
+ Examples::
+
+ branch:/path/to/branch
+ """
+ prefix = 'branch:'
+ dwim_catchable_exceptions = (errors.NotBranchError,)
+
+ def _match_on(self, branch, revs):
+ from bzrlib.branch import Branch
+ other_branch = Branch.open(self.spec)
+ revision_b = other_branch.last_revision()
+ if revision_b in (None, revision.NULL_REVISION):
+ raise errors.NoCommits(other_branch)
+ if branch is None:
+ branch = other_branch
+ else:
+ try:
+ # pull in the remote revisions so we can diff
+ branch.fetch(other_branch, revision_b)
+ except errors.ReadOnlyError:
+ branch = other_branch
+ return RevisionInfo(branch, None, revision_b)
+
+ def _as_revision_id(self, context_branch):
+ from bzrlib.branch import Branch
+ other_branch = Branch.open(self.spec)
+ last_revision = other_branch.last_revision()
+ last_revision = revision.ensure_null(last_revision)
+ context_branch.fetch(other_branch, last_revision)
+ if last_revision == revision.NULL_REVISION:
+ raise errors.NoCommits(other_branch)
+ return last_revision
+
+ def _as_tree(self, context_branch):
+ from bzrlib.branch import Branch
+ other_branch = Branch.open(self.spec)
+ last_revision = other_branch.last_revision()
+ last_revision = revision.ensure_null(last_revision)
+ if last_revision == revision.NULL_REVISION:
+ raise errors.NoCommits(other_branch)
+ return other_branch.repository.revision_tree(last_revision)
+
+ def needs_branch(self):
+ return False
+
+ def get_branch(self):
+ return self.spec
+
+
+
+class RevisionSpec_submit(RevisionSpec_ancestor):
+ """Selects a common ancestor with a submit branch."""
+
+ help_txt = """Selects a common ancestor with the submit branch.
+
+ Diffing against this shows all the changes that were made in this branch,
+ and is a good predictor of what merge will do. The submit branch is
+ used by the bundle and merge directive commands. If no submit branch
+ is specified, the parent branch is used instead.
+
+ The common ancestor is the last revision that existed in both
+ branches. Usually this is the branch point, but it could also be
+ a revision that was merged.
+
+ Examples::
+
+ $ bzr diff -r submit:
+ """
+
+ prefix = 'submit:'
+
+ def _get_submit_location(self, branch):
+ submit_location = branch.get_submit_branch()
+ location_type = 'submit branch'
+ if submit_location is None:
+ submit_location = branch.get_parent()
+ location_type = 'parent branch'
+ if submit_location is None:
+ raise errors.NoSubmitBranch(branch)
+ trace.note(gettext('Using {0} {1}').format(location_type,
+ submit_location))
+ return submit_location
+
+ def _match_on(self, branch, revs):
+ trace.mutter('matching ancestor: on: %s, %s', self.spec, branch)
+ return self._find_revision_info(branch,
+ self._get_submit_location(branch))
+
+ def _as_revision_id(self, context_branch):
+ return self._find_revision_id(context_branch,
+ self._get_submit_location(context_branch))
+
+
+class RevisionSpec_annotate(RevisionIDSpec):
+
+ prefix = 'annotate:'
+
+ help_txt = """Select the revision that last modified the specified line.
+
+ Select the revision that last modified the specified line. Line is
+ specified as path:number. Path is a relative path to the file. Numbers
+ start at 1, and are relative to the current version, not the last-
+ committed version of the file.
+ """
+
+ def _raise_invalid(self, numstring, context_branch):
+ raise errors.InvalidRevisionSpec(self.user_spec, context_branch,
+ 'No such line: %s' % numstring)
+
+ def _as_revision_id(self, context_branch):
+ path, numstring = self.spec.rsplit(':', 1)
+ try:
+ index = int(numstring) - 1
+ except ValueError:
+ self._raise_invalid(numstring, context_branch)
+ tree, file_path = workingtree.WorkingTree.open_containing(path)
+ tree.lock_read()
+ try:
+ file_id = tree.path2id(file_path)
+ if file_id is None:
+ raise errors.InvalidRevisionSpec(self.user_spec,
+ context_branch, "File '%s' is not versioned." %
+ file_path)
+ revision_ids = [r for (r, l) in tree.annotate_iter(file_id)]
+ finally:
+ tree.unlock()
+ try:
+ revision_id = revision_ids[index]
+ except IndexError:
+ self._raise_invalid(numstring, context_branch)
+ if revision_id == revision.CURRENT_REVISION:
+ raise errors.InvalidRevisionSpec(self.user_spec, context_branch,
+ 'Line %s has not been committed.' % numstring)
+ return revision_id
+
+
+class RevisionSpec_mainline(RevisionIDSpec):
+
+ help_txt = """Select mainline revision that merged the specified revision.
+
+ Select the revision that merged the specified revision into mainline.
+ """
+
+ prefix = 'mainline:'
+
+ def _as_revision_id(self, context_branch):
+ revspec = RevisionSpec.from_string(self.spec)
+ if revspec.get_branch() is None:
+ spec_branch = context_branch
+ else:
+ spec_branch = _mod_branch.Branch.open(revspec.get_branch())
+ revision_id = revspec.as_revision_id(spec_branch)
+ graph = context_branch.repository.get_graph()
+ result = graph.find_lefthand_merger(revision_id,
+ context_branch.last_revision())
+ if result is None:
+ raise errors.InvalidRevisionSpec(self.user_spec, context_branch)
+ return result
+
+
+# The order in which we want to DWIM a revision spec without any prefix.
+# revno is always tried first and isn't listed here, this is used by
+# RevisionSpec_dwim._match_on
+dwim_revspecs = symbol_versioning.deprecated_list(
+ symbol_versioning.deprecated_in((2, 4, 0)), "dwim_revspecs", [])
+
+RevisionSpec_dwim.append_possible_revspec(RevisionSpec_tag)
+RevisionSpec_dwim.append_possible_revspec(RevisionSpec_revid)
+RevisionSpec_dwim.append_possible_revspec(RevisionSpec_date)
+RevisionSpec_dwim.append_possible_revspec(RevisionSpec_branch)
+
+revspec_registry = registry.Registry()
+def _register_revspec(revspec):
+ revspec_registry.register(revspec.prefix, revspec)
+
+_register_revspec(RevisionSpec_revno)
+_register_revspec(RevisionSpec_revid)
+_register_revspec(RevisionSpec_last)
+_register_revspec(RevisionSpec_before)
+_register_revspec(RevisionSpec_tag)
+_register_revspec(RevisionSpec_date)
+_register_revspec(RevisionSpec_ancestor)
+_register_revspec(RevisionSpec_branch)
+_register_revspec(RevisionSpec_submit)
+_register_revspec(RevisionSpec_annotate)
+_register_revspec(RevisionSpec_mainline)
diff --git a/bzrlib/revisiontree.py b/bzrlib/revisiontree.py
new file mode 100644
index 0000000..404656c
--- /dev/null
+++ b/bzrlib/revisiontree.py
@@ -0,0 +1,334 @@
+# Copyright (C) 2006-2010 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""RevisionTree - a Tree implementation backed by repository data for a revision."""
+
+from __future__ import absolute_import
+
+from cStringIO import StringIO
+
+from bzrlib import (
+ errors,
+ revision,
+ tree,
+ )
+
+
+class RevisionTree(tree.Tree):
+ """Tree viewing a previous revision.
+
+ File text can be retrieved from the text store.
+ """
+
+ def __init__(self, repository, revision_id):
+ self._repository = repository
+ self._revision_id = revision_id
+ self._rules_searcher = None
+
+ def has_versioned_directories(self):
+ """See `Tree.has_versioned_directories`."""
+ return self._repository._format.supports_versioned_directories
+
+ def supports_tree_reference(self):
+ return getattr(self._repository._format, "supports_tree_reference",
+ False)
+
+ def get_parent_ids(self):
+ """See Tree.get_parent_ids.
+
+ A RevisionTree's parents match the revision graph.
+ """
+ if self._revision_id in (None, revision.NULL_REVISION):
+ parent_ids = []
+ else:
+ parent_ids = self._repository.get_revision(
+ self._revision_id).parent_ids
+ return parent_ids
+
+ def get_revision_id(self):
+ """Return the revision id associated with this tree."""
+ return self._revision_id
+
+ def get_file_revision(self, file_id, path=None):
+ """Return the revision id in which a file was last changed."""
+ raise NotImplementedError(self.get_file_revision)
+
+ def get_file_text(self, file_id, path=None):
+ for (identifier, content) in self.iter_files_bytes([(file_id, None)]):
+ ret = "".join(content)
+ return ret
+
+ def get_file(self, file_id, path=None):
+ return StringIO(self.get_file_text(file_id))
+
+ def is_locked(self):
+ return self._repository.is_locked()
+
+ def lock_read(self):
+ self._repository.lock_read()
+ return self
+
+ def __repr__(self):
+ return '<%s instance at %x, rev_id=%r>' % (
+ self.__class__.__name__, id(self), self._revision_id)
+
+ def unlock(self):
+ self._repository.unlock()
+
+ def _get_rules_searcher(self, default_searcher):
+ """See Tree._get_rules_searcher."""
+ if self._rules_searcher is None:
+ self._rules_searcher = super(RevisionTree,
+ self)._get_rules_searcher(default_searcher)
+ return self._rules_searcher
+
+
+class InventoryRevisionTree(RevisionTree,tree.InventoryTree):
+
+ def __init__(self, repository, inv, revision_id):
+ RevisionTree.__init__(self, repository, revision_id)
+ self._inventory = inv
+
+ def get_file_mtime(self, file_id, path=None):
+ inv, inv_file_id = self._unpack_file_id(file_id)
+ ie = inv[inv_file_id]
+ try:
+ revision = self._repository.get_revision(ie.revision)
+ except errors.NoSuchRevision:
+ raise errors.FileTimestampUnavailable(self.id2path(file_id))
+ return revision.timestamp
+
+ def get_file_size(self, file_id):
+ inv, inv_file_id = self._unpack_file_id(file_id)
+ return inv[inv_file_id].text_size
+
+ def get_file_sha1(self, file_id, path=None, stat_value=None):
+ inv, inv_file_id = self._unpack_file_id(file_id)
+ ie = inv[inv_file_id]
+ if ie.kind == "file":
+ return ie.text_sha1
+ return None
+
+ def get_file_revision(self, file_id, path=None):
+ inv, inv_file_id = self._unpack_file_id(file_id)
+ ie = inv[inv_file_id]
+ return ie.revision
+
+ def is_executable(self, file_id, path=None):
+ inv, inv_file_id = self._unpack_file_id(file_id)
+ ie = inv[inv_file_id]
+ if ie.kind != "file":
+ return False
+ return ie.executable
+
+ def has_filename(self, filename):
+ return bool(self.path2id(filename))
+
+ def list_files(self, include_root=False, from_dir=None, recursive=True):
+ # The only files returned by this are those from the version
+ if from_dir is None:
+ from_dir_id = None
+ inv = self.root_inventory
+ else:
+ inv, from_dir_id = self._path2inv_file_id(from_dir)
+ if from_dir_id is None:
+ # Directory not versioned
+ return
+ entries = inv.iter_entries(from_dir=from_dir_id, recursive=recursive)
+ if inv.root is not None and not include_root and from_dir is None:
+ # skip the root for compatability with the current apis.
+ entries.next()
+ for path, entry in entries:
+ yield path, 'V', entry.kind, entry.file_id, entry
+
+ def get_symlink_target(self, file_id, path=None):
+ inv, inv_file_id = self._unpack_file_id(file_id)
+ ie = inv[inv_file_id]
+ # Inventories store symlink targets in unicode
+ return ie.symlink_target
+
+ def get_reference_revision(self, file_id, path=None):
+ inv, inv_file_id = self._unpack_file_id(file_id)
+ return inv[inv_file_id].reference_revision
+
+ def get_root_id(self):
+ if self.root_inventory.root:
+ return self.root_inventory.root.file_id
+
+ def kind(self, file_id):
+ inv, inv_file_id = self._unpack_file_id(file_id)
+ return inv[inv_file_id].kind
+
+ def path_content_summary(self, path):
+ """See Tree.path_content_summary."""
+ inv, file_id = self._path2inv_file_id(path)
+ if file_id is None:
+ return ('missing', None, None, None)
+ entry = inv[file_id]
+ kind = entry.kind
+ if kind == 'file':
+ return (kind, entry.text_size, entry.executable, entry.text_sha1)
+ elif kind == 'symlink':
+ return (kind, None, None, entry.symlink_target)
+ else:
+ return (kind, None, None, None)
+
+ def _comparison_data(self, entry, path):
+ if entry is None:
+ return None, False, None
+ return entry.kind, entry.executable, None
+
+ def _file_size(self, entry, stat_value):
+ return entry.text_size
+
+ def walkdirs(self, prefix=""):
+ _directory = 'directory'
+ inv, top_id = self._path2inv_file_id(prefix)
+ if top_id is None:
+ pending = []
+ else:
+ pending = [(prefix, '', _directory, None, top_id, None)]
+ while pending:
+ dirblock = []
+ currentdir = pending.pop()
+ # 0 - relpath, 1- basename, 2- kind, 3- stat, id, v-kind
+ if currentdir[0]:
+ relroot = currentdir[0] + '/'
+ else:
+ relroot = ""
+ # FIXME: stash the node in pending
+ entry = inv[currentdir[4]]
+ for name, child in entry.sorted_children():
+ toppath = relroot + name
+ dirblock.append((toppath, name, child.kind, None,
+ child.file_id, child.kind
+ ))
+ yield (currentdir[0], entry.file_id), dirblock
+ # push the user specified dirs from dirblock
+ for dir in reversed(dirblock):
+ if dir[2] == _directory:
+ pending.append(dir)
+
+ def iter_files_bytes(self, desired_files):
+ """See Tree.iter_files_bytes.
+
+ This version is implemented on top of Repository.iter_files_bytes"""
+ repo_desired_files = [(f, self.get_file_revision(f), i)
+ for f, i in desired_files]
+ try:
+ for result in self._repository.iter_files_bytes(repo_desired_files):
+ yield result
+ except errors.RevisionNotPresent, e:
+ raise errors.NoSuchFile(e.file_id)
+
+ def annotate_iter(self, file_id,
+ default_revision=revision.CURRENT_REVISION):
+ """See Tree.annotate_iter"""
+ text_key = (file_id, self.get_file_revision(file_id))
+ annotator = self._repository.texts.get_annotator()
+ annotations = annotator.annotate_flat(text_key)
+ return [(key[-1], line) for key, line in annotations]
+
+ def __eq__(self, other):
+ if self is other:
+ return True
+ if isinstance(other, InventoryRevisionTree):
+ return (self.root_inventory == other.root_inventory)
+ return False
+
+ def __ne__(self, other):
+ return not (self == other)
+
+ def __hash__(self):
+ raise ValueError('not hashable')
+
+
+class InterCHKRevisionTree(tree.InterTree):
+ """Fast path optimiser for RevisionTrees with CHK inventories."""
+
+ @staticmethod
+ def is_compatible(source, target):
+ if (isinstance(source, RevisionTree)
+ and isinstance(target, RevisionTree)):
+ try:
+ # Only CHK inventories have id_to_entry attribute
+ source.root_inventory.id_to_entry
+ target.root_inventory.id_to_entry
+ return True
+ except AttributeError:
+ pass
+ return False
+
+ def iter_changes(self, include_unchanged=False,
+ specific_files=None, pb=None, extra_trees=[],
+ require_versioned=True, want_unversioned=False):
+ lookup_trees = [self.source]
+ if extra_trees:
+ lookup_trees.extend(extra_trees)
+ # The ids of items we need to examine to insure delta consistency.
+ precise_file_ids = set()
+ discarded_changes = {}
+ if specific_files == []:
+ specific_file_ids = []
+ else:
+ specific_file_ids = self.target.paths2ids(specific_files,
+ lookup_trees, require_versioned=require_versioned)
+ # FIXME: It should be possible to delegate include_unchanged handling
+ # to CHKInventory.iter_changes and do a better job there -- vila
+ # 20090304
+ changed_file_ids = set()
+ # FIXME: nested tree support
+ for result in self.target.root_inventory.iter_changes(
+ self.source.root_inventory):
+ if specific_file_ids is not None:
+ file_id = result[0]
+ if file_id not in specific_file_ids:
+ # A change from the whole tree that we don't want to show yet.
+ # We may find that we need to show it for delta consistency, so
+ # stash it.
+ discarded_changes[result[0]] = result
+ continue
+ new_parent_id = result[4][1]
+ precise_file_ids.add(new_parent_id)
+ yield result
+ changed_file_ids.add(result[0])
+ if specific_file_ids is not None:
+ for result in self._handle_precise_ids(precise_file_ids,
+ changed_file_ids, discarded_changes=discarded_changes):
+ yield result
+ if include_unchanged:
+ # CHKMap avoid being O(tree), so we go to O(tree) only if
+ # required to.
+ # Now walk the whole inventory, excluding the already yielded
+ # file ids
+ # FIXME: Support nested trees
+ changed_file_ids = set(changed_file_ids)
+ for relpath, entry in self.target.root_inventory.iter_entries():
+ if (specific_file_ids is not None
+ and not entry.file_id in specific_file_ids):
+ continue
+ if not entry.file_id in changed_file_ids:
+ yield (entry.file_id,
+ (relpath, relpath), # Not renamed
+ False, # Not modified
+ (True, True), # Still versioned
+ (entry.parent_id, entry.parent_id),
+ (entry.name, entry.name),
+ (entry.kind, entry.kind),
+ (entry.executable, entry.executable))
+
+
+tree.InterTree.register_optimiser(InterCHKRevisionTree)
diff --git a/bzrlib/rio.py b/bzrlib/rio.py
new file mode 100644
index 0000000..adc05d2
--- /dev/null
+++ b/bzrlib/rio.py
@@ -0,0 +1,389 @@
+# Copyright (C) 2005 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+from __future__ import absolute_import
+
+# \subsection{\emph{rio} - simple text metaformat}
+#
+# \emph{r} stands for `restricted', `reproducible', or `rfc822-like'.
+#
+# The stored data consists of a series of \emph{stanzas}, each of which contains
+# \emph{fields} identified by an ascii name, with Unicode or string contents.
+# The field tag is constrained to alphanumeric characters.
+# There may be more than one field in a stanza with the same name.
+#
+# The format itself does not deal with character encoding issues, though
+# the result will normally be written in Unicode.
+#
+# The format is intended to be simple enough that there is exactly one character
+# stream representation of an object and vice versa, and that this relation
+# will continue to hold for future versions of bzr.
+
+import re
+
+from bzrlib import osutils
+from bzrlib.iterablefile import IterableFile
+
+# XXX: some redundancy is allowing to write stanzas in isolation as well as
+# through a writer object.
+
+class RioWriter(object):
+ def __init__(self, to_file):
+ self._soft_nl = False
+ self._to_file = to_file
+
+ def write_stanza(self, stanza):
+ if self._soft_nl:
+ self._to_file.write('\n')
+ stanza.write(self._to_file)
+ self._soft_nl = True
+
+
+class RioReader(object):
+ """Read stanzas from a file as a sequence
+
+ to_file can be anything that can be enumerated as a sequence of
+ lines (with newlines.)
+ """
+ def __init__(self, from_file):
+ self._from_file = from_file
+
+ def __iter__(self):
+ while True:
+ s = read_stanza(self._from_file)
+ if s is None:
+ break
+ else:
+ yield s
+
+
+def rio_file(stanzas, header=None):
+ """Produce a rio IterableFile from an iterable of stanzas"""
+ def str_iter():
+ if header is not None:
+ yield header + '\n'
+ first_stanza = True
+ for s in stanzas:
+ if first_stanza is not True:
+ yield '\n'
+ for line in s.to_lines():
+ yield line
+ first_stanza = False
+ return IterableFile(str_iter())
+
+
+def read_stanzas(from_file):
+ while True:
+ s = read_stanza(from_file)
+ if s is None:
+ break
+ else:
+ yield s
+
+class Stanza(object):
+ """One stanza for rio.
+
+ Each stanza contains a set of named fields.
+
+ Names must be non-empty ascii alphanumeric plus _. Names can be repeated
+ within a stanza. Names are case-sensitive. The ordering of fields is
+ preserved.
+
+ Each field value must be either an int or a string.
+ """
+
+ __slots__ = ['items']
+
+ def __init__(self, **kwargs):
+ """Construct a new Stanza.
+
+ The keyword arguments, if any, are added in sorted order to the stanza.
+ """
+ self.items = []
+ if kwargs:
+ for tag, value in sorted(kwargs.items()):
+ self.add(tag, value)
+
+ def add(self, tag, value):
+ """Append a name and value to the stanza."""
+ if not valid_tag(tag):
+ raise ValueError("invalid tag %r" % (tag,))
+ if isinstance(value, str):
+ value = unicode(value)
+ elif isinstance(value, unicode):
+ pass
+ ## elif isinstance(value, (int, long)):
+ ## value = str(value) # XXX: python2.4 without L-suffix
+ else:
+ raise TypeError("invalid type for rio value: %r of type %s"
+ % (value, type(value)))
+ self.items.append((tag, value))
+
+ @classmethod
+ def from_pairs(cls, pairs):
+ ret = cls()
+ ret.items = pairs
+ return ret
+
+ def __contains__(self, find_tag):
+ """True if there is any field in this stanza with the given tag."""
+ for tag, value in self.items:
+ if tag == find_tag:
+ return True
+ return False
+
+ def __len__(self):
+ """Return number of pairs in the stanza."""
+ return len(self.items)
+
+ def __eq__(self, other):
+ if not isinstance(other, Stanza):
+ return False
+ return self.items == other.items
+
+ def __ne__(self, other):
+ return not self.__eq__(other)
+
+ def __repr__(self):
+ return "Stanza(%r)" % self.items
+
+ def iter_pairs(self):
+ """Return iterator of tag, value pairs."""
+ return iter(self.items)
+
+ def to_lines(self):
+ """Generate sequence of lines for external version of this file.
+
+ The lines are always utf-8 encoded strings.
+ """
+ if not self.items:
+ # max() complains if sequence is empty
+ return []
+ result = []
+ for tag, value in self.items:
+ if value == '':
+ result.append(tag + ': \n')
+ elif '\n' in value:
+ # don't want splitlines behaviour on empty lines
+ val_lines = value.split('\n')
+ result.append(tag + ': ' + val_lines[0].encode('utf-8') + '\n')
+ for line in val_lines[1:]:
+ result.append('\t' + line.encode('utf-8') + '\n')
+ else:
+ result.append(tag + ': ' + value.encode('utf-8') + '\n')
+ return result
+
+ def to_string(self):
+ """Return stanza as a single string"""
+ return ''.join(self.to_lines())
+
+ def to_unicode(self):
+ """Return stanza as a single Unicode string.
+
+ This is most useful when adding a Stanza to a parent Stanza
+ """
+ if not self.items:
+ return u''
+
+ result = []
+ for tag, value in self.items:
+ if value == u'':
+ result.append(tag + u': \n')
+ elif u'\n' in value:
+ # don't want splitlines behaviour on empty lines
+ val_lines = value.split(u'\n')
+ result.append(tag + u': ' + val_lines[0] + u'\n')
+ for line in val_lines[1:]:
+ result.append(u'\t' + line + u'\n')
+ else:
+ result.append(tag + u': ' + value + u'\n')
+ return u''.join(result)
+
+ def write(self, to_file):
+ """Write stanza to a file"""
+ to_file.writelines(self.to_lines())
+
+ def get(self, tag):
+ """Return the value for a field wih given tag.
+
+ If there is more than one value, only the first is returned. If the
+ tag is not present, KeyError is raised.
+ """
+ for t, v in self.items:
+ if t == tag:
+ return v
+ else:
+ raise KeyError(tag)
+
+ __getitem__ = get
+
+ def get_all(self, tag):
+ r = []
+ for t, v in self.items:
+ if t == tag:
+ r.append(v)
+ return r
+
+ def as_dict(self):
+ """Return a dict containing the unique values of the stanza.
+ """
+ d = {}
+ for tag, value in self.items:
+ d[tag] = value
+ return d
+
+
+def valid_tag(tag):
+ return _valid_tag(tag)
+
+
+def read_stanza(line_iter):
+ """Return new Stanza read from list of lines or a file
+
+ Returns one Stanza that was read, or returns None at end of file. If a
+ blank line follows the stanza, it is consumed. It's not an error for
+ there to be no blank at end of file. If there is a blank file at the
+ start of the input this is really an empty stanza and that is returned.
+
+ Only the stanza lines and the trailing blank (if any) are consumed
+ from the line_iter.
+
+ The raw lines must be in utf-8 encoding.
+ """
+ return _read_stanza_utf8(line_iter)
+
+
+def read_stanza_unicode(unicode_iter):
+ """Read a Stanza from a list of lines or a file.
+
+ The lines should already be in unicode form. This returns a single
+ stanza that was read. If there is a blank line at the end of the Stanza,
+ it is consumed. It is not an error for there to be no blank line at
+ the end of the iterable. If there is a blank line at the beginning,
+ this is treated as an empty Stanza and None is returned.
+
+ Only the stanza lines and the trailing blank (if any) are consumed
+ from the unicode_iter
+
+ :param unicode_iter: A iterable, yeilding Unicode strings. See read_stanza
+ if you have a utf-8 encoded string.
+ :return: A Stanza object if there are any lines in the file.
+ None otherwise
+ """
+ return _read_stanza_unicode(unicode_iter)
+
+
+def to_patch_lines(stanza, max_width=72):
+ """Convert a stanza into RIO-Patch format lines.
+
+ RIO-Patch is a RIO variant designed to be e-mailed as part of a patch.
+ It resists common forms of damage such as newline conversion or the removal
+ of trailing whitespace, yet is also reasonably easy to read.
+
+ :param max_width: The maximum number of characters per physical line.
+ :return: a list of lines
+ """
+ if max_width <= 6:
+ raise ValueError(max_width)
+ max_rio_width = max_width - 4
+ lines = []
+ for pline in stanza.to_lines():
+ for line in pline.split('\n')[:-1]:
+ line = re.sub('\\\\', '\\\\\\\\', line)
+ while len(line) > 0:
+ partline = line[:max_rio_width]
+ line = line[max_rio_width:]
+ if len(line) > 0 and line[0] != [' ']:
+ break_index = -1
+ break_index = partline.rfind(' ', -20)
+ if break_index < 3:
+ break_index = partline.rfind('-', -20)
+ break_index += 1
+ if break_index < 3:
+ break_index = partline.rfind('/', -20)
+ if break_index >= 3:
+ line = partline[break_index:] + line
+ partline = partline[:break_index]
+ if len(line) > 0:
+ line = ' ' + line
+ partline = re.sub('\r', '\\\\r', partline)
+ blank_line = False
+ if len(line) > 0:
+ partline += '\\'
+ elif re.search(' $', partline):
+ partline += '\\'
+ blank_line = True
+ lines.append('# ' + partline + '\n')
+ if blank_line:
+ lines.append('# \n')
+ return lines
+
+
+def _patch_stanza_iter(line_iter):
+ map = {'\\\\': '\\',
+ '\\r' : '\r',
+ '\\\n': ''}
+ def mapget(match):
+ return map[match.group(0)]
+
+ last_line = None
+ for line in line_iter:
+ if line.startswith('# '):
+ line = line[2:]
+ elif line.startswith('#'):
+ line = line[1:]
+ else:
+ raise ValueError("bad line %r" % (line,))
+ if last_line is not None and len(line) > 2:
+ line = line[2:]
+ line = re.sub('\r', '', line)
+ line = re.sub('\\\\(.|\n)', mapget, line)
+ if last_line is None:
+ last_line = line
+ else:
+ last_line += line
+ if last_line[-1] == '\n':
+ yield last_line
+ last_line = None
+ if last_line is not None:
+ yield last_line
+
+
+def read_patch_stanza(line_iter):
+ """Convert an iterable of RIO-Patch format lines into a Stanza.
+
+ RIO-Patch is a RIO variant designed to be e-mailed as part of a patch.
+ It resists common forms of damage such as newline conversion or the removal
+ of trailing whitespace, yet is also reasonably easy to read.
+
+ :return: a Stanza
+ """
+ return read_stanza(_patch_stanza_iter(line_iter))
+
+
+try:
+ from bzrlib._rio_pyx import (
+ _read_stanza_utf8,
+ _read_stanza_unicode,
+ _valid_tag,
+ )
+except ImportError, e:
+ osutils.failed_to_load_extension(e)
+ from bzrlib._rio_py import (
+ _read_stanza_utf8,
+ _read_stanza_unicode,
+ _valid_tag,
+ )
diff --git a/bzrlib/rules.py b/bzrlib/rules.py
new file mode 100644
index 0000000..717bb44
--- /dev/null
+++ b/bzrlib/rules.py
@@ -0,0 +1,165 @@
+# Copyright (C) 2008, 2009, 2010 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""Rule-based definition of preferences for selected files in selected branches.
+
+See ``bzr help rules`` for details.
+"""
+
+from __future__ import absolute_import
+
+from bzrlib import (
+ config,
+ cmdline,
+ errors,
+ globbing,
+ osutils,
+ )
+from bzrlib.util.configobj import configobj
+
+
+# Name of the file holding rules in a tree
+RULES_TREE_FILENAME = ".bzrrules"
+
+# Namespace prefix for per file preferences
+FILE_PREFS_PREFIX = 'name '
+FILE_PREFS_PREFIX_LEN = len(FILE_PREFS_PREFIX)
+
+# The object providing default rules
+_per_user_searcher = None
+
+
+class _RulesSearcher(object):
+ """An object that provides rule-based preferences."""
+
+ def get_items(self, path):
+ """Return the preferences for a path as name,value tuples.
+
+ :param path: tree relative path
+ :return: () if no rule matched, otherwise a sequence of name,value
+ tuples.
+ """
+ raise NotImplementedError(self.get_items)
+
+ def get_selected_items(self, path, names):
+ """Return selected preferences for a path as name,value tuples.
+
+ :param path: tree relative path
+ :param names: the list of preferences to lookup
+ :return: () if no rule matched, otherwise a sequence of name,value
+ tuples. The sequence is the same length as names,
+ tuple order matches the order in names, and
+ undefined preferences are given the value None.
+ """
+ raise NotImplementedError(self.get_selected_items)
+
+ def get_single_value(self, path, preference_name):
+ """Get a single preference for a single file.
+
+ :returns: The string preference value, or None.
+ """
+ for key, value in self.get_selected_items(path, [preference_name]):
+ return value
+ return None
+
+
+class _IniBasedRulesSearcher(_RulesSearcher):
+
+ def __init__(self, inifile):
+ """Construct a _RulesSearcher based on an ini file.
+
+ The content will be decoded as utf-8.
+
+ :param inifile: the name of the file or a sequence of lines.
+ """
+ self._cfg = configobj.ConfigObj(inifile, encoding='utf-8')
+ sections = self._cfg.keys()
+ patterns = []
+ self.pattern_to_section = {}
+ for s in sections:
+ if s.startswith(FILE_PREFS_PREFIX):
+ file_patterns = cmdline.split(s[FILE_PREFS_PREFIX_LEN:])
+ patterns.extend(file_patterns)
+ for fp in file_patterns:
+ self.pattern_to_section[fp] = s
+ if len(patterns) < len(sections):
+ unknowns = [s for s in sections
+ if not s.startswith(FILE_PREFS_PREFIX)]
+ raise errors.UnknownRules(unknowns)
+ elif patterns:
+ self._globster = globbing._OrderedGlobster(patterns)
+ else:
+ self._globster = None
+
+ def get_items(self, path):
+ """See _RulesSearcher.get_items."""
+ if self._globster is None:
+ return ()
+ pat = self._globster.match(path)
+ if pat is None:
+ return ()
+ else:
+ all = self._cfg[self.pattern_to_section[pat]]
+ return tuple(all.items())
+
+ def get_selected_items(self, path, names):
+ """See _RulesSearcher.get_selected_items."""
+ if self._globster is None:
+ return ()
+ pat = self._globster.match(path)
+ if pat is None:
+ return ()
+ else:
+ all = self._cfg[self.pattern_to_section[pat]]
+ return tuple((k, all.get(k)) for k in names)
+
+
+class _StackedRulesSearcher(_RulesSearcher):
+
+ def __init__(self, searchers):
+ """Construct a _RulesSearcher based on a stack of other ones.
+
+ :param searchers: a sequence of searchers.
+ """
+ self.searchers = searchers
+
+ def get_items(self, path):
+ """See _RulesSearcher.get_items."""
+ for searcher in self.searchers:
+ result = searcher.get_items(path)
+ if result:
+ return result
+ return ()
+
+ def get_selected_items(self, path, names):
+ """See _RulesSearcher.get_selected_items."""
+ for searcher in self.searchers:
+ result = searcher.get_selected_items(path, names)
+ if result:
+ return result
+ return ()
+
+
+def rules_filename():
+ """Return the default rules filename."""
+ return osutils.pathjoin(config.config_dir(), 'rules')
+
+
+def reset_rules():
+ global _per_user_searcher
+ _per_user_searcher = _IniBasedRulesSearcher(rules_filename())
+
+reset_rules()
diff --git a/bzrlib/send.py b/bzrlib/send.py
new file mode 100644
index 0000000..280c91e
--- /dev/null
+++ b/bzrlib/send.py
@@ -0,0 +1,205 @@
+# Copyright (C) 2009, 2010 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+from __future__ import absolute_import
+
+import os
+import time
+
+from bzrlib import (
+ controldir,
+ errors,
+ osutils,
+ registry,
+ trace,
+ )
+from bzrlib.i18n import gettext
+from bzrlib.branch import (
+ Branch,
+ )
+from bzrlib.revision import (
+ NULL_REVISION,
+ )
+
+
+format_registry = registry.Registry()
+
+
+def send(target_branch, revision, public_branch, remember,
+ format, no_bundle, no_patch, output, from_, mail_to, message, body,
+ to_file, strict=None):
+ possible_transports = []
+ tree, branch = controldir.ControlDir.open_containing_tree_or_branch(
+ from_, possible_transports=possible_transports)[:2]
+ # we may need to write data into branch's repository to calculate
+ # the data to send.
+ branch.lock_write()
+ try:
+ if output is None:
+ config_stack = branch.get_config_stack()
+ if mail_to is None:
+ mail_to = config_stack.get('submit_to')
+ mail_client = config_stack.get('mail_client')(config_stack)
+ if (not getattr(mail_client, 'supports_body', False)
+ and body is not None):
+ raise errors.BzrCommandError(gettext(
+ 'Mail client "%s" does not support specifying body') %
+ mail_client.__class__.__name__)
+ if remember and target_branch is None:
+ raise errors.BzrCommandError(gettext(
+ '--remember requires a branch to be specified.'))
+ stored_target_branch = branch.get_submit_branch()
+ remembered_target_branch = None
+ if target_branch is None:
+ target_branch = stored_target_branch
+ remembered_target_branch = "submit"
+ else:
+ # Remembers if asked explicitly or no previous location is set
+ if remember or (
+ remember is None and stored_target_branch is None):
+ branch.set_submit_branch(target_branch)
+ if target_branch is None:
+ target_branch = branch.get_parent()
+ remembered_target_branch = "parent"
+ if target_branch is None:
+ raise errors.BzrCommandError(gettext('No submit branch known or'
+ ' specified'))
+ if remembered_target_branch is not None:
+ trace.note(gettext('Using saved {0} location "{1}" to determine '
+ 'what changes to submit.').format(
+ remembered_target_branch,
+ target_branch))
+
+ submit_branch = Branch.open(target_branch,
+ possible_transports=possible_transports)
+ possible_transports.append(submit_branch.bzrdir.root_transport)
+ if mail_to is None or format is None:
+ if mail_to is None:
+ mail_to = submit_branch.get_config_stack().get(
+ 'child_submit_to')
+ if format is None:
+ formatname = submit_branch.get_child_submit_format()
+ try:
+ format = format_registry.get(formatname)
+ except KeyError:
+ raise errors.BzrCommandError(
+ gettext("No such send format '%s'.") % formatname)
+
+ stored_public_branch = branch.get_public_branch()
+ if public_branch is None:
+ public_branch = stored_public_branch
+ # Remembers if asked explicitly or no previous location is set
+ elif (remember
+ or (remember is None and stored_public_branch is None)):
+ branch.set_public_branch(public_branch)
+ if no_bundle and public_branch is None:
+ raise errors.BzrCommandError(gettext('No public branch specified or'
+ ' known'))
+ base_revision_id = None
+ revision_id = None
+ if revision is not None:
+ if len(revision) > 2:
+ raise errors.BzrCommandError(gettext('bzr send takes '
+ 'at most two one revision identifiers'))
+ revision_id = revision[-1].as_revision_id(branch)
+ if len(revision) == 2:
+ base_revision_id = revision[0].as_revision_id(branch)
+ if revision_id is None:
+ if tree is not None:
+ tree.check_changed_or_out_of_date(
+ strict, 'send_strict',
+ more_error='Use --no-strict to force the send.',
+ more_warning='Uncommitted changes will not be sent.')
+ revision_id = branch.last_revision()
+ if revision_id == NULL_REVISION:
+ raise errors.BzrCommandError(gettext('No revisions to submit.'))
+ if format is None:
+ format = format_registry.get()
+ directive = format(branch, revision_id, target_branch,
+ public_branch, no_patch, no_bundle, message, base_revision_id,
+ submit_branch)
+ if output is None:
+ directive.compose_merge_request(mail_client, mail_to, body,
+ branch, tree)
+ else:
+ if directive.multiple_output_files:
+ if output == '-':
+ raise errors.BzrCommandError(gettext('- not supported for '
+ 'merge directives that use more than one output file.'))
+ if not os.path.exists(output):
+ os.mkdir(output, 0755)
+ for (filename, lines) in directive.to_files():
+ path = os.path.join(output, filename)
+ outfile = open(path, 'wb')
+ try:
+ outfile.writelines(lines)
+ finally:
+ outfile.close()
+ else:
+ if output == '-':
+ outfile = to_file
+ else:
+ outfile = open(output, 'wb')
+ try:
+ outfile.writelines(directive.to_lines())
+ finally:
+ if outfile is not to_file:
+ outfile.close()
+ finally:
+ branch.unlock()
+
+
+def _send_4(branch, revision_id, target_branch, public_branch,
+ no_patch, no_bundle, message,
+ base_revision_id, local_target_branch=None):
+ from bzrlib import merge_directive
+ return merge_directive.MergeDirective2.from_objects(
+ branch.repository, revision_id, time.time(),
+ osutils.local_time_offset(), target_branch,
+ public_branch=public_branch,
+ include_patch=not no_patch,
+ include_bundle=not no_bundle, message=message,
+ base_revision_id=base_revision_id,
+ local_target_branch=local_target_branch)
+
+
+def _send_0_9(branch, revision_id, submit_branch, public_branch,
+ no_patch, no_bundle, message,
+ base_revision_id, local_target_branch=None):
+ if not no_bundle:
+ if not no_patch:
+ patch_type = 'bundle'
+ else:
+ raise errors.BzrCommandError(gettext('Format 0.9 does not'
+ ' permit bundle with no patch'))
+ else:
+ if not no_patch:
+ patch_type = 'diff'
+ else:
+ patch_type = None
+ from bzrlib import merge_directive
+ return merge_directive.MergeDirective.from_objects(
+ branch.repository, revision_id, time.time(),
+ osutils.local_time_offset(), submit_branch,
+ public_branch=public_branch, patch_type=patch_type,
+ message=message, local_target_branch=local_target_branch)
+
+
+format_registry.register('4',
+ _send_4, 'Bundle format 4, Merge Directive 2 (default)')
+format_registry.register('0.9',
+ _send_0_9, 'Bundle format 0.9, Merge Directive 1')
+format_registry.default_key = '4'
diff --git a/bzrlib/serializer.py b/bzrlib/serializer.py
new file mode 100644
index 0000000..8784fe2
--- /dev/null
+++ b/bzrlib/serializer.py
@@ -0,0 +1,103 @@
+# Copyright (C) 2009, 2010 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""Inventory/revision serialization."""
+
+from __future__ import absolute_import
+
+from bzrlib import registry
+
+
+class Serializer(object):
+ """Inventory and revision serialization/deserialization."""
+
+ squashes_xml_invalid_characters = False
+
+ def write_inventory(self, inv, f):
+ """Write inventory to a file.
+
+ Note: this is a *whole inventory* operation, and should only be used
+ sparingly, as it does not scale well with large trees.
+ """
+ raise NotImplementedError(self.write_inventory)
+
+ def write_inventory_to_string(self, inv):
+ """Produce a simple string representation of an inventory.
+
+ Note: this is a *whole inventory* operation, and should only be used
+ sparingly, as it does not scale well with large trees.
+
+ The requirement for the contents of the string is that it can be passed
+ to read_inventory_from_string and the result is an identical inventory
+ in memory.
+
+ (All serializers as of 2009-07-29 produce XML, but this is not mandated
+ by the interface.)
+ """
+ raise NotImplementedError(self.write_inventory_to_string)
+
+ def read_inventory_from_string(self, string, revision_id=None,
+ entry_cache=None, return_from_cache=False):
+ """Read string into an inventory object.
+
+ :param string: The serialized inventory to read.
+ :param revision_id: If not-None, the expected revision id of the
+ inventory. Some serialisers use this to set the results' root
+ revision. This should be supplied for deserialising all
+ from-repository inventories so that xml5 inventories that were
+ serialised without a revision identifier can be given the right
+ revision id (but not for working tree inventories where users can
+ edit the data without triggering checksum errors or anything).
+ :param entry_cache: An optional cache of InventoryEntry objects. If
+ supplied we will look up entries via (file_id, revision_id) which
+ should map to a valid InventoryEntry (File/Directory/etc) object.
+ :param return_from_cache: Return entries directly from the cache,
+ rather than copying them first. This is only safe if the caller
+ promises not to mutate the returned inventory entries, but it can
+ make some operations significantly faster.
+ """
+ raise NotImplementedError(self.read_inventory_from_string)
+
+ def read_inventory(self, f, revision_id=None):
+ """See read_inventory_from_string."""
+ raise NotImplementedError(self.read_inventory)
+
+ def write_revision(self, rev, f):
+ raise NotImplementedError(self.write_revision)
+
+ def write_revision_to_string(self, rev):
+ raise NotImplementedError(self.write_revision_to_string)
+
+ def read_revision(self, f):
+ raise NotImplementedError(self.read_revision)
+
+ def read_revision_from_string(self, xml_string):
+ raise NotImplementedError(self.read_revision_from_string)
+
+
+class SerializerRegistry(registry.Registry):
+ """Registry for serializer objects"""
+
+
+format_registry = SerializerRegistry()
+format_registry.register_lazy('5', 'bzrlib.xml5', 'serializer_v5')
+format_registry.register_lazy('6', 'bzrlib.xml6', 'serializer_v6')
+format_registry.register_lazy('7', 'bzrlib.xml7', 'serializer_v7')
+format_registry.register_lazy('8', 'bzrlib.xml8', 'serializer_v8')
+format_registry.register_lazy('9', 'bzrlib.chk_serializer',
+ 'chk_serializer_255_bigpage')
+format_registry.register_lazy('10', 'bzrlib.chk_serializer',
+ 'chk_bencode_serializer')
diff --git a/bzrlib/shelf.py b/bzrlib/shelf.py
new file mode 100644
index 0000000..fbf9058
--- /dev/null
+++ b/bzrlib/shelf.py
@@ -0,0 +1,439 @@
+# Copyright (C) 2008, 2009, 2010 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+from __future__ import absolute_import
+
+import errno
+import re
+
+from bzrlib.lazy_import import lazy_import
+lazy_import(globals(), """
+from bzrlib import (
+ bencode,
+ errors,
+ merge,
+ merge3,
+ pack,
+ transform,
+)
+""")
+
+
+class ShelfCreator(object):
+ """Create a transform to shelve objects and its inverse."""
+
+ def __init__(self, work_tree, target_tree, file_list=None):
+ """Constructor.
+
+ :param work_tree: The working tree to apply changes to. This is not
+ required to be locked - a tree_write lock will be taken out.
+ :param target_tree: The tree to make the working tree more similar to.
+ This is not required to be locked - a read_lock will be taken out.
+ :param file_list: The files to make more similar to the target.
+ """
+ self.work_tree = work_tree
+ self.work_transform = transform.TreeTransform(work_tree)
+ try:
+ self.target_tree = target_tree
+ self.shelf_transform = transform.TransformPreview(self.target_tree)
+ try:
+ self.renames = {}
+ self.creation = {}
+ self.deletion = {}
+ self.iter_changes = work_tree.iter_changes(
+ self.target_tree, specific_files=file_list)
+ except:
+ self.shelf_transform.finalize()
+ raise
+ except:
+ self.work_transform.finalize()
+ raise
+
+ def iter_shelvable(self):
+ """Iterable of tuples describing shelvable changes.
+
+ As well as generating the tuples, this updates several members.
+ Tuples may be::
+
+ ('add file', file_id, work_kind, work_path)
+ ('delete file', file_id, target_kind, target_path)
+ ('rename', file_id, target_path, work_path)
+ ('change kind', file_id, target_kind, work_kind, target_path)
+ ('modify text', file_id)
+ ('modify target', file_id, target_target, work_target)
+ """
+ for (file_id, paths, changed, versioned, parents, names, kind,
+ executable) in self.iter_changes:
+ # don't shelve add of tree root. Working tree should never
+ # lack roots, and bzr misbehaves when they do.
+ # FIXME ADHB (2009-08-09): should still shelve adds of tree roots
+ # when a tree root was deleted / renamed.
+ if kind[0] is None and names[1] == '':
+ continue
+ # Also don't shelve deletion of tree root.
+ if kind[1] is None and names[0] == '':
+ continue
+ if kind[0] is None or versioned[0] == False:
+ self.creation[file_id] = (kind[1], names[1], parents[1],
+ versioned)
+ yield ('add file', file_id, kind[1], paths[1])
+ elif kind[1] is None or versioned[0] == False:
+ self.deletion[file_id] = (kind[0], names[0], parents[0],
+ versioned)
+ yield ('delete file', file_id, kind[0], paths[0])
+ else:
+ if names[0] != names[1] or parents[0] != parents[1]:
+ self.renames[file_id] = (names, parents)
+ yield ('rename', file_id) + paths
+
+ if kind[0] != kind [1]:
+ yield ('change kind', file_id, kind[0], kind[1], paths[0])
+ elif kind[0] == 'symlink':
+ t_target = self.target_tree.get_symlink_target(file_id)
+ w_target = self.work_tree.get_symlink_target(file_id)
+ yield ('modify target', file_id, paths[0], t_target,
+ w_target)
+ elif changed:
+ yield ('modify text', file_id)
+
+ def shelve_change(self, change):
+ """Shelve a change in the iter_shelvable format."""
+ if change[0] == 'rename':
+ self.shelve_rename(change[1])
+ elif change[0] == 'delete file':
+ self.shelve_deletion(change[1])
+ elif change[0] == 'add file':
+ self.shelve_creation(change[1])
+ elif change[0] in ('change kind', 'modify text'):
+ self.shelve_content_change(change[1])
+ elif change[0] == 'modify target':
+ self.shelve_modify_target(change[1])
+ else:
+ raise ValueError('Unknown change kind: "%s"' % change[0])
+
+ def shelve_all(self):
+ """Shelve all changes."""
+ for change in self.iter_shelvable():
+ self.shelve_change(change)
+
+ def shelve_rename(self, file_id):
+ """Shelve a file rename.
+
+ :param file_id: The file id of the file to shelve the renaming of.
+ """
+ names, parents = self.renames[file_id]
+ w_trans_id = self.work_transform.trans_id_file_id(file_id)
+ work_parent = self.work_transform.trans_id_file_id(parents[0])
+ self.work_transform.adjust_path(names[0], work_parent, w_trans_id)
+
+ s_trans_id = self.shelf_transform.trans_id_file_id(file_id)
+ shelf_parent = self.shelf_transform.trans_id_file_id(parents[1])
+ self.shelf_transform.adjust_path(names[1], shelf_parent, s_trans_id)
+
+ def shelve_modify_target(self, file_id):
+ """Shelve a change of symlink target.
+
+ :param file_id: The file id of the symlink which changed target.
+ :param new_target: The target that the symlink should have due
+ to shelving.
+ """
+ new_target = self.target_tree.get_symlink_target(file_id)
+ w_trans_id = self.work_transform.trans_id_file_id(file_id)
+ self.work_transform.delete_contents(w_trans_id)
+ self.work_transform.create_symlink(new_target, w_trans_id)
+
+ old_target = self.work_tree.get_symlink_target(file_id)
+ s_trans_id = self.shelf_transform.trans_id_file_id(file_id)
+ self.shelf_transform.delete_contents(s_trans_id)
+ self.shelf_transform.create_symlink(old_target, s_trans_id)
+
+ def shelve_lines(self, file_id, new_lines):
+ """Shelve text changes to a file, using provided lines.
+
+ :param file_id: The file id of the file to shelve the text of.
+ :param new_lines: The lines that the file should have due to shelving.
+ """
+ w_trans_id = self.work_transform.trans_id_file_id(file_id)
+ self.work_transform.delete_contents(w_trans_id)
+ self.work_transform.create_file(new_lines, w_trans_id)
+
+ s_trans_id = self.shelf_transform.trans_id_file_id(file_id)
+ self.shelf_transform.delete_contents(s_trans_id)
+ inverse_lines = self._inverse_lines(new_lines, file_id)
+ self.shelf_transform.create_file(inverse_lines, s_trans_id)
+
+ @staticmethod
+ def _content_from_tree(tt, tree, file_id):
+ trans_id = tt.trans_id_file_id(file_id)
+ tt.delete_contents(trans_id)
+ transform.create_from_tree(tt, trans_id, tree, file_id)
+
+ def shelve_content_change(self, file_id):
+ """Shelve a kind change or binary file content change.
+
+ :param file_id: The file id of the file to shelve the content change
+ of.
+ """
+ self._content_from_tree(self.work_transform, self.target_tree, file_id)
+ self._content_from_tree(self.shelf_transform, self.work_tree, file_id)
+
+ def shelve_creation(self, file_id):
+ """Shelve creation of a file.
+
+ This handles content and inventory id.
+ :param file_id: The file_id of the file to shelve creation of.
+ """
+ kind, name, parent, versioned = self.creation[file_id]
+ version = not versioned[0]
+ self._shelve_creation(self.work_tree, file_id, self.work_transform,
+ self.shelf_transform, kind, name, parent,
+ version)
+
+ def shelve_deletion(self, file_id):
+ """Shelve deletion of a file.
+
+ This handles content and inventory id.
+ :param file_id: The file_id of the file to shelve deletion of.
+ """
+ kind, name, parent, versioned = self.deletion[file_id]
+ existing_path = self.target_tree.id2path(file_id)
+ if not self.work_tree.has_filename(existing_path):
+ existing_path = None
+ version = not versioned[1]
+ self._shelve_creation(self.target_tree, file_id, self.shelf_transform,
+ self.work_transform, kind, name, parent,
+ version, existing_path=existing_path)
+
+ def _shelve_creation(self, tree, file_id, from_transform, to_transform,
+ kind, name, parent, version, existing_path=None):
+ w_trans_id = from_transform.trans_id_file_id(file_id)
+ if parent is not None and kind is not None:
+ from_transform.delete_contents(w_trans_id)
+ from_transform.unversion_file(w_trans_id)
+
+ if existing_path is not None:
+ s_trans_id = to_transform.trans_id_tree_path(existing_path)
+ else:
+ s_trans_id = to_transform.trans_id_file_id(file_id)
+ if parent is not None:
+ s_parent_id = to_transform.trans_id_file_id(parent)
+ to_transform.adjust_path(name, s_parent_id, s_trans_id)
+ if existing_path is None:
+ if kind is None:
+ to_transform.create_file('', s_trans_id)
+ else:
+ transform.create_from_tree(to_transform, s_trans_id,
+ tree, file_id)
+ if version:
+ to_transform.version_file(file_id, s_trans_id)
+
+ def _inverse_lines(self, new_lines, file_id):
+ """Produce a version with only those changes removed from new_lines."""
+ target_lines = self.target_tree.get_file_lines(file_id)
+ work_lines = self.work_tree.get_file_lines(file_id)
+ return merge3.Merge3(new_lines, target_lines, work_lines).merge_lines()
+
+ def finalize(self):
+ """Release all resources used by this ShelfCreator."""
+ self.work_transform.finalize()
+ self.shelf_transform.finalize()
+
+ def transform(self):
+ """Shelve changes from working tree."""
+ self.work_transform.apply()
+
+ def write_shelf(self, shelf_file, message=None):
+ """Serialize the shelved changes to a file.
+
+ :param shelf_file: A file-like object to write the shelf to.
+ :param message: An optional message describing the shelved changes.
+ :return: the filename of the written file.
+ """
+ transform.resolve_conflicts(self.shelf_transform)
+ serializer = pack.ContainerSerialiser()
+ shelf_file.write(serializer.begin())
+ metadata = {
+ 'revision_id': self.target_tree.get_revision_id(),
+ }
+ if message is not None:
+ metadata['message'] = message.encode('utf-8')
+ shelf_file.write(serializer.bytes_record(
+ bencode.bencode(metadata), (('metadata',),)))
+ for bytes in self.shelf_transform.serialize(serializer):
+ shelf_file.write(bytes)
+ shelf_file.write(serializer.end())
+
+
+class Unshelver(object):
+ """Unshelve shelved changes."""
+
+ def __init__(self, tree, base_tree, transform, message):
+ """Constructor.
+
+ :param tree: The tree to apply the changes to.
+ :param base_tree: The basis to apply the tranform to.
+ :param message: A message from the shelved transform.
+ """
+ self.tree = tree
+ self.base_tree = base_tree
+ self.transform = transform
+ self.message = message
+
+ @staticmethod
+ def iter_records(shelf_file):
+ parser = pack.ContainerPushParser()
+ parser.accept_bytes(shelf_file.read())
+ return iter(parser.read_pending_records())
+
+ @staticmethod
+ def parse_metadata(records):
+ names, metadata_bytes = records.next()
+ if names[0] != ('metadata',):
+ raise errors.ShelfCorrupt
+ metadata = bencode.bdecode(metadata_bytes)
+ message = metadata.get('message')
+ if message is not None:
+ metadata['message'] = message.decode('utf-8')
+ return metadata
+
+ @classmethod
+ def from_tree_and_shelf(klass, tree, shelf_file):
+ """Create an Unshelver from a tree and a shelf file.
+
+ :param tree: The tree to apply shelved changes to.
+ :param shelf_file: A file-like object containing shelved changes.
+ :return: The Unshelver.
+ """
+ records = klass.iter_records(shelf_file)
+ metadata = klass.parse_metadata(records)
+ base_revision_id = metadata['revision_id']
+ try:
+ base_tree = tree.revision_tree(base_revision_id)
+ except errors.NoSuchRevisionInTree:
+ base_tree = tree.branch.repository.revision_tree(base_revision_id)
+ tt = transform.TransformPreview(base_tree)
+ tt.deserialize(records)
+ return klass(tree, base_tree, tt, metadata.get('message'))
+
+ def make_merger(self, task=None):
+ """Return a merger that can unshelve the changes."""
+ target_tree = self.transform.get_preview_tree()
+ merger = merge.Merger.from_uncommitted(self.tree, target_tree,
+ task, self.base_tree)
+ merger.merge_type = merge.Merge3Merger
+ return merger
+
+ def finalize(self):
+ """Release all resources held by this Unshelver."""
+ self.transform.finalize()
+
+
+class ShelfManager(object):
+ """Maintain a list of shelved changes."""
+
+ def __init__(self, tree, transport):
+ self.tree = tree
+ self.transport = transport.clone('shelf')
+ self.transport.ensure_base()
+
+ def get_shelf_filename(self, shelf_id):
+ return 'shelf-%d' % shelf_id
+
+ def get_shelf_ids(self, filenames):
+ matcher = re.compile('shelf-([1-9][0-9]*)')
+ shelf_ids = []
+ for filename in filenames:
+ match = matcher.match(filename)
+ if match is not None:
+ shelf_ids.append(int(match.group(1)))
+ return shelf_ids
+
+ def new_shelf(self):
+ """Return a file object and id for a new set of shelved changes."""
+ last_shelf = self.last_shelf()
+ if last_shelf is None:
+ next_shelf = 1
+ else:
+ next_shelf = last_shelf + 1
+ filename = self.get_shelf_filename(next_shelf)
+ shelf_file = open(self.transport.local_abspath(filename), 'wb')
+ return next_shelf, shelf_file
+
+ def shelve_changes(self, creator, message=None):
+ """Store the changes in a ShelfCreator on a shelf."""
+ next_shelf, shelf_file = self.new_shelf()
+ try:
+ creator.write_shelf(shelf_file, message)
+ finally:
+ shelf_file.close()
+ creator.transform()
+ return next_shelf
+
+ def read_shelf(self, shelf_id):
+ """Return the file associated with a shelf_id for reading.
+
+ :param shelf_id: The id of the shelf to retrive the file for.
+ """
+ filename = self.get_shelf_filename(shelf_id)
+ try:
+ return open(self.transport.local_abspath(filename), 'rb')
+ except IOError, e:
+ if e.errno != errno.ENOENT:
+ raise
+ from bzrlib import errors
+ raise errors.NoSuchShelfId(shelf_id)
+
+ def get_unshelver(self, shelf_id):
+ """Return an unshelver for a given shelf_id.
+
+ :param shelf_id: The shelf id to return the unshelver for.
+ """
+ shelf_file = self.read_shelf(shelf_id)
+ try:
+ return Unshelver.from_tree_and_shelf(self.tree, shelf_file)
+ finally:
+ shelf_file.close()
+
+ def get_metadata(self, shelf_id):
+ """Return the metadata associated with a given shelf_id."""
+ shelf_file = self.read_shelf(shelf_id)
+ try:
+ records = Unshelver.iter_records(shelf_file)
+ finally:
+ shelf_file.close()
+ return Unshelver.parse_metadata(records)
+
+ def delete_shelf(self, shelf_id):
+ """Delete the shelved changes for a given id.
+
+ :param shelf_id: id of the shelved changes to delete.
+ """
+ filename = self.get_shelf_filename(shelf_id)
+ self.transport.delete(filename)
+
+ def active_shelves(self):
+ """Return a list of shelved changes."""
+ active = self.get_shelf_ids(self.transport.list_dir('.'))
+ active.sort()
+ return active
+
+ def last_shelf(self):
+ """Return the id of the last-created shelved change."""
+ active = self.active_shelves()
+ if len(active) > 0:
+ return active[-1]
+ else:
+ return None
diff --git a/bzrlib/shelf_ui.py b/bzrlib/shelf_ui.py
new file mode 100644
index 0000000..b83da8b
--- /dev/null
+++ b/bzrlib/shelf_ui.py
@@ -0,0 +1,498 @@
+# Copyright (C) 2008, 2009, 2010 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+from __future__ import absolute_import
+
+from cStringIO import StringIO
+import shutil
+import sys
+import tempfile
+
+from bzrlib import (
+ builtins,
+ delta,
+ diff,
+ errors,
+ osutils,
+ patches,
+ patiencediff,
+ shelf,
+ textfile,
+ trace,
+ ui,
+ workingtree,
+)
+from bzrlib.i18n import gettext
+
+class UseEditor(Exception):
+ """Use an editor instead of selecting hunks."""
+
+
+class ShelfReporter(object):
+
+ vocab = {'add file': gettext('Shelve adding file "%(path)s"?'),
+ 'binary': gettext('Shelve binary changes?'),
+ 'change kind': gettext('Shelve changing "%s" from %(other)s'
+ ' to %(this)s?'),
+ 'delete file': gettext('Shelve removing file "%(path)s"?'),
+ 'final': gettext('Shelve %d change(s)?'),
+ 'hunk': gettext('Shelve?'),
+ 'modify target': gettext('Shelve changing target of'
+ ' "%(path)s" from "%(other)s" to "%(this)s"?'),
+ 'rename': gettext('Shelve renaming "%(other)s" =>'
+ ' "%(this)s"?')
+ }
+
+ invert_diff = False
+
+ def __init__(self):
+ self.delta_reporter = delta._ChangeReporter()
+
+ def no_changes(self):
+ """Report that no changes were selected to apply."""
+ trace.warning('No changes to shelve.')
+
+ def shelved_id(self, shelf_id):
+ """Report the id changes were shelved to."""
+ trace.note(gettext('Changes shelved with id "%d".') % shelf_id)
+
+ def changes_destroyed(self):
+ """Report that changes were made without shelving."""
+ trace.note(gettext('Selected changes destroyed.'))
+
+ def selected_changes(self, transform):
+ """Report the changes that were selected."""
+ trace.note(gettext("Selected changes:"))
+ changes = transform.iter_changes()
+ delta.report_changes(changes, self.delta_reporter)
+
+ def prompt_change(self, change):
+ """Determine the prompt for a change to apply."""
+ if change[0] == 'rename':
+ vals = {'this': change[3], 'other': change[2]}
+ elif change[0] == 'change kind':
+ vals = {'path': change[4], 'other': change[2], 'this': change[3]}
+ elif change[0] == 'modify target':
+ vals = {'path': change[2], 'other': change[3], 'this': change[4]}
+ else:
+ vals = {'path': change[3]}
+ prompt = self.vocab[change[0]] % vals
+ return prompt
+
+
+class ApplyReporter(ShelfReporter):
+
+ vocab = {'add file': gettext('Delete file "%(path)s"?'),
+ 'binary': gettext('Apply binary changes?'),
+ 'change kind': gettext('Change "%(path)s" from %(this)s'
+ ' to %(other)s?'),
+ 'delete file': gettext('Add file "%(path)s"?'),
+ 'final': gettext('Apply %d change(s)?'),
+ 'hunk': gettext('Apply change?'),
+ 'modify target': gettext('Change target of'
+ ' "%(path)s" from "%(this)s" to "%(other)s"?'),
+ 'rename': gettext('Rename "%(this)s" => "%(other)s"?'),
+ }
+
+ invert_diff = True
+
+ def changes_destroyed(self):
+ pass
+
+
+class Shelver(object):
+ """Interactively shelve the changes in a working tree."""
+
+ def __init__(self, work_tree, target_tree, diff_writer=None, auto=False,
+ auto_apply=False, file_list=None, message=None,
+ destroy=False, manager=None, reporter=None):
+ """Constructor.
+
+ :param work_tree: The working tree to shelve changes from.
+ :param target_tree: The "unchanged" / old tree to compare the
+ work_tree to.
+ :param auto: If True, shelve each possible change.
+ :param auto_apply: If True, shelve changes with no final prompt.
+ :param file_list: If supplied, only files in this list may be shelved.
+ :param message: The message to associate with the shelved changes.
+ :param destroy: Change the working tree without storing the shelved
+ changes.
+ :param manager: The shelf manager to use.
+ :param reporter: Object for reporting changes to user.
+ """
+ self.work_tree = work_tree
+ self.target_tree = target_tree
+ self.diff_writer = diff_writer
+ if self.diff_writer is None:
+ self.diff_writer = sys.stdout
+ if manager is None:
+ manager = work_tree.get_shelf_manager()
+ self.manager = manager
+ self.auto = auto
+ self.auto_apply = auto_apply
+ self.file_list = file_list
+ self.message = message
+ self.destroy = destroy
+ if reporter is None:
+ reporter = ShelfReporter()
+ self.reporter = reporter
+ config = self.work_tree.branch.get_config()
+ self.change_editor = config.get_change_editor(target_tree, work_tree)
+ self.work_tree.lock_tree_write()
+
+ @classmethod
+ def from_args(klass, diff_writer, revision=None, all=False, file_list=None,
+ message=None, directory=None, destroy=False):
+ """Create a shelver from commandline arguments.
+
+ The returned shelver wil have a work_tree that is locked and should
+ be unlocked.
+
+ :param revision: RevisionSpec of the revision to compare to.
+ :param all: If True, shelve all changes without prompting.
+ :param file_list: If supplied, only files in this list may be shelved.
+ :param message: The message to associate with the shelved changes.
+ :param directory: The directory containing the working tree.
+ :param destroy: Change the working tree without storing the shelved
+ changes.
+ """
+ if directory is None:
+ directory = u'.'
+ elif file_list:
+ file_list = [osutils.pathjoin(directory, f) for f in file_list]
+ tree, path = workingtree.WorkingTree.open_containing(directory)
+ # Ensure that tree is locked for the lifetime of target_tree, as
+ # target tree may be reading from the same dirstate.
+ tree.lock_tree_write()
+ try:
+ target_tree = builtins._get_one_revision_tree('shelf2', revision,
+ tree.branch, tree)
+ files = tree.safe_relpath_files(file_list)
+ return klass(tree, target_tree, diff_writer, all, all, files,
+ message, destroy)
+ finally:
+ tree.unlock()
+
+ def run(self):
+ """Interactively shelve the changes."""
+ creator = shelf.ShelfCreator(self.work_tree, self.target_tree,
+ self.file_list)
+ self.tempdir = tempfile.mkdtemp()
+ changes_shelved = 0
+ try:
+ for change in creator.iter_shelvable():
+ if change[0] == 'modify text':
+ try:
+ changes_shelved += self.handle_modify_text(creator,
+ change[1])
+ except errors.BinaryFile:
+ if self.prompt_bool(self.reporter.vocab['binary']):
+ changes_shelved += 1
+ creator.shelve_content_change(change[1])
+ else:
+ if self.prompt_bool(self.reporter.prompt_change(change)):
+ creator.shelve_change(change)
+ changes_shelved += 1
+ if changes_shelved > 0:
+ self.reporter.selected_changes(creator.work_transform)
+ if (self.auto_apply or self.prompt_bool(
+ self.reporter.vocab['final'] % changes_shelved)):
+ if self.destroy:
+ creator.transform()
+ self.reporter.changes_destroyed()
+ else:
+ shelf_id = self.manager.shelve_changes(creator,
+ self.message)
+ self.reporter.shelved_id(shelf_id)
+ else:
+ self.reporter.no_changes()
+ finally:
+ shutil.rmtree(self.tempdir)
+ creator.finalize()
+
+ def finalize(self):
+ if self.change_editor is not None:
+ self.change_editor.finish()
+ self.work_tree.unlock()
+
+
+ def get_parsed_patch(self, file_id, invert=False):
+ """Return a parsed version of a file's patch.
+
+ :param file_id: The id of the file to generate a patch for.
+ :param invert: If True, provide an inverted patch (insertions displayed
+ as removals, removals displayed as insertions).
+ :return: A patches.Patch.
+ """
+ diff_file = StringIO()
+ if invert:
+ old_tree = self.work_tree
+ new_tree = self.target_tree
+ else:
+ old_tree = self.target_tree
+ new_tree = self.work_tree
+ old_path = old_tree.id2path(file_id)
+ new_path = new_tree.id2path(file_id)
+ text_differ = diff.DiffText(old_tree, new_tree, diff_file,
+ path_encoding=osutils.get_terminal_encoding())
+ patch = text_differ.diff(file_id, old_path, new_path, 'file', 'file')
+ diff_file.seek(0)
+ return patches.parse_patch(diff_file)
+
+ def prompt(self, message, choices, default):
+ return ui.ui_factory.choose(message, choices, default=default)
+
+ def prompt_bool(self, question, allow_editor=False):
+ """Prompt the user with a yes/no question.
+
+ This may be overridden by self.auto. It may also *set* self.auto. It
+ may also raise UserAbort.
+ :param question: The question to ask the user.
+ :return: True or False
+ """
+ if self.auto:
+ return True
+ alternatives_chars = 'yn'
+ alternatives = '&yes\n&No'
+ if allow_editor:
+ alternatives_chars += 'e'
+ alternatives += '\n&edit manually'
+ alternatives_chars += 'fq'
+ alternatives += '\n&finish\n&quit'
+ choice = self.prompt(question, alternatives, 1)
+ if choice is None:
+ # EOF.
+ char = 'n'
+ else:
+ char = alternatives_chars[choice]
+ if char == 'y':
+ return True
+ elif char == 'e' and allow_editor:
+ raise UseEditor
+ elif char == 'f':
+ self.auto = True
+ return True
+ if char == 'q':
+ raise errors.UserAbort()
+ else:
+ return False
+
+ def handle_modify_text(self, creator, file_id):
+ """Handle modified text, by using hunk selection or file editing.
+
+ :param creator: A ShelfCreator.
+ :param file_id: The id of the file that was modified.
+ :return: The number of changes.
+ """
+ work_tree_lines = self.work_tree.get_file_lines(file_id)
+ try:
+ lines, change_count = self._select_hunks(creator, file_id,
+ work_tree_lines)
+ except UseEditor:
+ lines, change_count = self._edit_file(file_id, work_tree_lines)
+ if change_count != 0:
+ creator.shelve_lines(file_id, lines)
+ return change_count
+
+ def _select_hunks(self, creator, file_id, work_tree_lines):
+ """Provide diff hunk selection for modified text.
+
+ If self.reporter.invert_diff is True, the diff is inverted so that
+ insertions are displayed as removals and vice versa.
+
+ :param creator: a ShelfCreator
+ :param file_id: The id of the file to shelve.
+ :param work_tree_lines: Line contents of the file in the working tree.
+ :return: number of shelved hunks.
+ """
+ if self.reporter.invert_diff:
+ target_lines = work_tree_lines
+ else:
+ target_lines = self.target_tree.get_file_lines(file_id)
+ textfile.check_text_lines(work_tree_lines)
+ textfile.check_text_lines(target_lines)
+ parsed = self.get_parsed_patch(file_id, self.reporter.invert_diff)
+ final_hunks = []
+ if not self.auto:
+ offset = 0
+ self.diff_writer.write(parsed.get_header())
+ for hunk in parsed.hunks:
+ self.diff_writer.write(str(hunk))
+ selected = self.prompt_bool(self.reporter.vocab['hunk'],
+ allow_editor=(self.change_editor
+ is not None))
+ if not self.reporter.invert_diff:
+ selected = (not selected)
+ if selected:
+ hunk.mod_pos += offset
+ final_hunks.append(hunk)
+ else:
+ offset -= (hunk.mod_range - hunk.orig_range)
+ sys.stdout.flush()
+ if self.reporter.invert_diff:
+ change_count = len(final_hunks)
+ else:
+ change_count = len(parsed.hunks) - len(final_hunks)
+ patched = patches.iter_patched_from_hunks(target_lines,
+ final_hunks)
+ lines = list(patched)
+ return lines, change_count
+
+ def _edit_file(self, file_id, work_tree_lines):
+ """
+ :param file_id: id of the file to edit.
+ :param work_tree_lines: Line contents of the file in the working tree.
+ :return: (lines, change_region_count), where lines is the new line
+ content of the file, and change_region_count is the number of
+ changed regions.
+ """
+ lines = osutils.split_lines(self.change_editor.edit_file(file_id))
+ return lines, self._count_changed_regions(work_tree_lines, lines)
+
+ @staticmethod
+ def _count_changed_regions(old_lines, new_lines):
+ matcher = patiencediff.PatienceSequenceMatcher(None, old_lines,
+ new_lines)
+ blocks = matcher.get_matching_blocks()
+ return len(blocks) - 2
+
+
+class Unshelver(object):
+ """Unshelve changes into a working tree."""
+
+ @classmethod
+ def from_args(klass, shelf_id=None, action='apply', directory='.',
+ write_diff_to=None):
+ """Create an unshelver from commandline arguments.
+
+ The returned shelver will have a tree that is locked and should
+ be unlocked.
+
+ :param shelf_id: Integer id of the shelf, as a string.
+ :param action: action to perform. May be 'apply', 'dry-run',
+ 'delete', 'preview'.
+ :param directory: The directory to unshelve changes into.
+ :param write_diff_to: See Unshelver.__init__().
+ """
+ tree, path = workingtree.WorkingTree.open_containing(directory)
+ tree.lock_tree_write()
+ try:
+ manager = tree.get_shelf_manager()
+ if shelf_id is not None:
+ try:
+ shelf_id = int(shelf_id)
+ except ValueError:
+ raise errors.InvalidShelfId(shelf_id)
+ else:
+ shelf_id = manager.last_shelf()
+ if shelf_id is None:
+ raise errors.BzrCommandError(gettext('No changes are shelved.'))
+ apply_changes = True
+ delete_shelf = True
+ read_shelf = True
+ show_diff = False
+ if action == 'dry-run':
+ apply_changes = False
+ delete_shelf = False
+ elif action == 'preview':
+ apply_changes = False
+ delete_shelf = False
+ show_diff = True
+ elif action == 'delete-only':
+ apply_changes = False
+ read_shelf = False
+ elif action == 'keep':
+ apply_changes = True
+ delete_shelf = False
+ except:
+ tree.unlock()
+ raise
+ return klass(tree, manager, shelf_id, apply_changes, delete_shelf,
+ read_shelf, show_diff, write_diff_to)
+
+ def __init__(self, tree, manager, shelf_id, apply_changes=True,
+ delete_shelf=True, read_shelf=True, show_diff=False,
+ write_diff_to=None):
+ """Constructor.
+
+ :param tree: The working tree to unshelve into.
+ :param manager: The ShelveManager containing the shelved changes.
+ :param shelf_id:
+ :param apply_changes: If True, apply the shelved changes to the
+ working tree.
+ :param delete_shelf: If True, delete the changes from the shelf.
+ :param read_shelf: If True, read the changes from the shelf.
+ :param show_diff: If True, show the diff that would result from
+ unshelving the changes.
+ :param write_diff_to: A file-like object where the diff will be
+ written to. If None, ui.ui_factory.make_output_stream() will
+ be used.
+ """
+ self.tree = tree
+ manager = tree.get_shelf_manager()
+ self.manager = manager
+ self.shelf_id = shelf_id
+ self.apply_changes = apply_changes
+ self.delete_shelf = delete_shelf
+ self.read_shelf = read_shelf
+ self.show_diff = show_diff
+ self.write_diff_to = write_diff_to
+
+ def run(self):
+ """Perform the unshelving operation."""
+ self.tree.lock_tree_write()
+ cleanups = [self.tree.unlock]
+ try:
+ if self.read_shelf:
+ trace.note(gettext('Using changes with id "%d".') % self.shelf_id)
+ unshelver = self.manager.get_unshelver(self.shelf_id)
+ cleanups.append(unshelver.finalize)
+ if unshelver.message is not None:
+ trace.note(gettext('Message: %s') % unshelver.message)
+ change_reporter = delta._ChangeReporter()
+ merger = unshelver.make_merger(None)
+ merger.change_reporter = change_reporter
+ if self.apply_changes:
+ merger.do_merge()
+ elif self.show_diff:
+ self.write_diff(merger)
+ else:
+ self.show_changes(merger)
+ if self.delete_shelf:
+ self.manager.delete_shelf(self.shelf_id)
+ trace.note(gettext('Deleted changes with id "%d".') % self.shelf_id)
+ finally:
+ for cleanup in reversed(cleanups):
+ cleanup()
+
+ def write_diff(self, merger):
+ """Write this operation's diff to self.write_diff_to."""
+ tree_merger = merger.make_merger()
+ tt = tree_merger.make_preview_transform()
+ new_tree = tt.get_preview_tree()
+ if self.write_diff_to is None:
+ self.write_diff_to = ui.ui_factory.make_output_stream(encoding_type='exact')
+ path_encoding = osutils.get_diff_header_encoding()
+ diff.show_diff_trees(merger.this_tree, new_tree, self.write_diff_to,
+ path_encoding=path_encoding)
+ tt.finalize()
+
+ def show_changes(self, merger):
+ """Show the changes that this operation specifies."""
+ tree_merger = merger.make_merger()
+ # This implicitly shows the changes via the reporter, so we're done...
+ tt = tree_merger.make_preview_transform()
+ tt.finalize()
diff --git a/bzrlib/shellcomplete.py b/bzrlib/shellcomplete.py
new file mode 100644
index 0000000..0b2a1e5
--- /dev/null
+++ b/bzrlib/shellcomplete.py
@@ -0,0 +1,86 @@
+# Copyright (C) 2005, 2006, 2007, 2009, 2010 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+from __future__ import absolute_import
+
+import sys
+
+
+def shellcomplete(context=None, outfile = None):
+ if outfile is None:
+ outfile = sys.stdout
+ if context is None:
+ shellcomplete_commands(outfile = outfile)
+ else:
+ shellcomplete_on_command(context, outfile = outfile)
+
+
+def shellcomplete_on_command(cmdname, outfile=None):
+ cmdname = str(cmdname)
+
+ if outfile is None:
+ outfile = sys.stdout
+
+ from inspect import getdoc
+ import commands
+ cmdobj = commands.get_cmd_object(cmdname)
+
+ doc = getdoc(cmdobj)
+ if doc is None:
+ raise NotImplementedError("sorry, no detailed shellcomplete yet for %r" % cmdname)
+
+ shellcomplete_on_options(cmdobj.options().values(), outfile=outfile)
+ for aname in cmdobj.takes_args:
+ outfile.write(aname + '\n')
+
+
+def shellcomplete_on_options(options, outfile=None):
+ for opt in options:
+ short_name = opt.short_name()
+ if short_name:
+ outfile.write('"(--%s -%s)"{--%s,-%s}\n'
+ % (opt.name, short_name, opt.name, short_name))
+ else:
+ outfile.write('--%s\n' % opt.name)
+
+
+def shellcomplete_commands(outfile = None):
+ """List all commands"""
+ from bzrlib import commands
+ from inspect import getdoc
+
+ commands.install_bzr_command_hooks()
+
+ if outfile is None:
+ outfile = sys.stdout
+
+ cmds = []
+ for cmdname in commands.all_command_names():
+ cmd = commands.get_cmd_object(cmdname)
+ cmds.append((cmdname, cmd))
+ for alias in cmd.aliases:
+ cmds.append((alias, cmd))
+ cmds.sort()
+ for cmdname, cmd in cmds:
+ if cmd.hidden:
+ continue
+ doc = getdoc(cmd)
+ if doc is None:
+ outfile.write(cmdname + '\n')
+ else:
+ doclines = doc.splitlines()
+ firstline = doclines[0].lower()
+ outfile.write(cmdname + ':' + firstline[0:-1] + '\n')
diff --git a/bzrlib/smart/__init__.py b/bzrlib/smart/__init__.py
new file mode 100644
index 0000000..1348949
--- /dev/null
+++ b/bzrlib/smart/__init__.py
@@ -0,0 +1,55 @@
+# Copyright (C) 2006,2011 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""Smart-server protocol, client and server.
+
+This code is fairly complex, so it has been split up into a package of modules,
+rather than being a single large module. Refer to the individual module
+docstrings for details.
+
+Server-side request handlers are registered in the `bzrlib.smart.request`
+module.
+
+The domain logic is in `bzrlib.remote`: `RemoteBzrDir`, `RemoteBranch`,
+and so on.
+
+There is also an plain file-level transport that calls remote methods to
+manipulate files on the server in `bzrlib.transport.remote`.
+
+The protocol is described in doc/developers/network-protocol.txt.
+
+"""
+
+from __future__ import absolute_import
+
+# TODO: A plain integer from query_version is too simple; should give some
+# capabilities too?
+
+# TODO: Make each request and response self-validatable, e.g. with checksums.
+#
+# TODO: is it useful to allow multiple chunks in the bulk data?
+#
+# TODO: If we get an exception during transmission of bulk data we can't just
+# emit the exception because it won't be seen.
+# John proposes: I think it would be worthwhile to have a header on each
+# chunk, that indicates it is another chunk. Then you can send an 'error'
+# chunk as long as you finish the previous chunk.
+#
+
+# Promote some attributes from submodules into this namespace
+from bzrlib.smart.request import SmartServerRequestHandler
+
+
diff --git a/bzrlib/smart/branch.py b/bzrlib/smart/branch.py
new file mode 100644
index 0000000..631ec1d
--- /dev/null
+++ b/bzrlib/smart/branch.py
@@ -0,0 +1,448 @@
+# Copyright (C) 2006-2010 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""Server-side branch related request implmentations."""
+
+from __future__ import absolute_import
+
+from bzrlib import (
+ bencode,
+ errors,
+ revision as _mod_revision,
+ )
+from bzrlib.controldir import ControlDir
+from bzrlib.smart.request import (
+ FailedSmartServerResponse,
+ SmartServerRequest,
+ SuccessfulSmartServerResponse,
+ )
+
+
+class SmartServerBranchRequest(SmartServerRequest):
+ """Base class for handling common branch request logic.
+ """
+
+ def do(self, path, *args):
+ """Execute a request for a branch at path.
+
+ All Branch requests take a path to the branch as their first argument.
+
+ If the branch is a branch reference, NotBranchError is raised.
+
+ :param path: The path for the repository as received from the
+ client.
+ :return: A SmartServerResponse from self.do_with_branch().
+ """
+ transport = self.transport_from_client_path(path)
+ controldir = ControlDir.open_from_transport(transport)
+ if controldir.get_branch_reference() is not None:
+ raise errors.NotBranchError(transport.base)
+ branch = controldir.open_branch(ignore_fallbacks=True)
+ return self.do_with_branch(branch, *args)
+
+
+class SmartServerLockedBranchRequest(SmartServerBranchRequest):
+ """Base class for handling common branch request logic for requests that
+ need a write lock.
+ """
+
+ def do_with_branch(self, branch, branch_token, repo_token, *args):
+ """Execute a request for a branch.
+
+ A write lock will be acquired with the given tokens for the branch and
+ repository locks. The lock will be released once the request is
+ processed. The physical lock state won't be changed.
+ """
+ # XXX: write a test for LockContention
+ branch.repository.lock_write(token=repo_token)
+ try:
+ branch.lock_write(token=branch_token)
+ try:
+ return self.do_with_locked_branch(branch, *args)
+ finally:
+ branch.unlock()
+ finally:
+ branch.repository.unlock()
+
+
+class SmartServerBranchBreakLock(SmartServerBranchRequest):
+
+ def do_with_branch(self, branch):
+ """Break a branch lock.
+ """
+ branch.break_lock()
+ return SuccessfulSmartServerResponse(('ok', ), )
+
+
+class SmartServerBranchGetConfigFile(SmartServerBranchRequest):
+
+ def do_with_branch(self, branch):
+ """Return the content of branch.conf
+
+ The body is not utf8 decoded - its the literal bytestream from disk.
+ """
+ try:
+ content = branch.control_transport.get_bytes('branch.conf')
+ except errors.NoSuchFile:
+ content = ''
+ return SuccessfulSmartServerResponse( ('ok', ), content)
+
+
+class SmartServerBranchPutConfigFile(SmartServerBranchRequest):
+ """Set the configuration data for a branch.
+
+ New in 2.5.
+ """
+
+ def do_with_branch(self, branch, branch_token, repo_token):
+ """Set the content of branch.conf.
+
+ The body is not utf8 decoded - its the literal bytestream for disk.
+ """
+ self._branch = branch
+ self._branch_token = branch_token
+ self._repo_token = repo_token
+ # Signal we want a body
+ return None
+
+ def do_body(self, body_bytes):
+ self._branch.repository.lock_write(token=self._repo_token)
+ try:
+ self._branch.lock_write(token=self._branch_token)
+ try:
+ self._branch.control_transport.put_bytes(
+ 'branch.conf', body_bytes)
+ finally:
+ self._branch.unlock()
+ finally:
+ self._branch.repository.unlock()
+ return SuccessfulSmartServerResponse(('ok', ))
+
+
+class SmartServerBranchGetParent(SmartServerBranchRequest):
+
+ def do_with_branch(self, branch):
+ """Return the parent of branch."""
+ parent = branch._get_parent_location() or ''
+ return SuccessfulSmartServerResponse((parent,))
+
+
+class SmartServerBranchGetTagsBytes(SmartServerBranchRequest):
+
+ def do_with_branch(self, branch):
+ """Return the _get_tags_bytes for a branch."""
+ bytes = branch._get_tags_bytes()
+ return SuccessfulSmartServerResponse((bytes,))
+
+
+class SmartServerBranchSetTagsBytes(SmartServerLockedBranchRequest):
+
+ def __init__(self, backing_transport, root_client_path='/', jail_root=None):
+ SmartServerLockedBranchRequest.__init__(
+ self, backing_transport, root_client_path, jail_root)
+ self.locked = False
+
+ def do_with_locked_branch(self, branch):
+ """Call _set_tags_bytes for a branch.
+
+ New in 1.18.
+ """
+ # We need to keep this branch locked until we get a body with the tags
+ # bytes.
+ self.branch = branch
+ self.branch.lock_write()
+ self.locked = True
+
+ def do_body(self, bytes):
+ self.branch._set_tags_bytes(bytes)
+ return SuccessfulSmartServerResponse(())
+
+ def do_end(self):
+ # TODO: this request shouldn't have to do this housekeeping manually.
+ # Some of this logic probably belongs in a base class.
+ if not self.locked:
+ # We never acquired the branch successfully in the first place, so
+ # there's nothing more to do.
+ return
+ try:
+ return SmartServerLockedBranchRequest.do_end(self)
+ finally:
+ # Only try unlocking if we locked successfully in the first place
+ self.branch.unlock()
+
+
+class SmartServerBranchHeadsToFetch(SmartServerBranchRequest):
+
+ def do_with_branch(self, branch):
+ """Return the heads-to-fetch for a Branch as two bencoded lists.
+
+ See Branch.heads_to_fetch.
+
+ New in 2.4.
+ """
+ must_fetch, if_present_fetch = branch.heads_to_fetch()
+ return SuccessfulSmartServerResponse(
+ (list(must_fetch), list(if_present_fetch)))
+
+
+class SmartServerBranchRequestGetStackedOnURL(SmartServerBranchRequest):
+
+ def do_with_branch(self, branch):
+ stacked_on_url = branch.get_stacked_on_url()
+ return SuccessfulSmartServerResponse(('ok', stacked_on_url))
+
+
+class SmartServerRequestRevisionHistory(SmartServerBranchRequest):
+
+ def do_with_branch(self, branch):
+ """Get the revision history for the branch.
+
+ The revision list is returned as the body content,
+ with each revision utf8 encoded and \x00 joined.
+ """
+ branch.lock_read()
+ try:
+ graph = branch.repository.get_graph()
+ stop_revisions = (None, _mod_revision.NULL_REVISION)
+ history = list(graph.iter_lefthand_ancestry(
+ branch.last_revision(), stop_revisions))
+ finally:
+ branch.unlock()
+ return SuccessfulSmartServerResponse(
+ ('ok', ), ('\x00'.join(reversed(history))))
+
+
+class SmartServerBranchRequestLastRevisionInfo(SmartServerBranchRequest):
+
+ def do_with_branch(self, branch):
+ """Return branch.last_revision_info().
+
+ The revno is encoded in decimal, the revision_id is encoded as utf8.
+ """
+ revno, last_revision = branch.last_revision_info()
+ return SuccessfulSmartServerResponse(('ok', str(revno), last_revision))
+
+
+class SmartServerBranchRequestRevisionIdToRevno(SmartServerBranchRequest):
+
+ def do_with_branch(self, branch, revid):
+ """Return branch.revision_id_to_revno().
+
+ New in 2.5.
+
+ The revno is encoded in decimal, the revision_id is encoded as utf8.
+ """
+ try:
+ dotted_revno = branch.revision_id_to_dotted_revno(revid)
+ except errors.NoSuchRevision:
+ return FailedSmartServerResponse(('NoSuchRevision', revid))
+ return SuccessfulSmartServerResponse(
+ ('ok', ) + tuple(map(str, dotted_revno)))
+
+
+class SmartServerSetTipRequest(SmartServerLockedBranchRequest):
+ """Base class for handling common branch request logic for requests that
+ update the branch tip.
+ """
+
+ def do_with_locked_branch(self, branch, *args):
+ try:
+ return self.do_tip_change_with_locked_branch(branch, *args)
+ except errors.TipChangeRejected, e:
+ msg = e.msg
+ if isinstance(msg, unicode):
+ msg = msg.encode('utf-8')
+ return FailedSmartServerResponse(('TipChangeRejected', msg))
+
+
+class SmartServerBranchRequestSetConfigOption(SmartServerLockedBranchRequest):
+ """Set an option in the branch configuration."""
+
+ def do_with_locked_branch(self, branch, value, name, section):
+ if not section:
+ section = None
+ branch._get_config().set_option(value.decode('utf8'), name, section)
+ return SuccessfulSmartServerResponse(())
+
+
+class SmartServerBranchRequestSetConfigOptionDict(SmartServerLockedBranchRequest):
+ """Set an option in the branch configuration.
+
+ New in 2.2.
+ """
+
+ def do_with_locked_branch(self, branch, value_dict, name, section):
+ utf8_dict = bencode.bdecode(value_dict)
+ value_dict = {}
+ for key, value in utf8_dict.items():
+ value_dict[key.decode('utf8')] = value.decode('utf8')
+ if not section:
+ section = None
+ branch._get_config().set_option(value_dict, name, section)
+ return SuccessfulSmartServerResponse(())
+
+
+class SmartServerBranchRequestSetLastRevision(SmartServerSetTipRequest):
+
+ def do_tip_change_with_locked_branch(self, branch, new_last_revision_id):
+ if new_last_revision_id == 'null:':
+ branch.set_last_revision_info(0, new_last_revision_id)
+ else:
+ if not branch.repository.has_revision(new_last_revision_id):
+ return FailedSmartServerResponse(
+ ('NoSuchRevision', new_last_revision_id))
+ branch.generate_revision_history(new_last_revision_id, None, None)
+ return SuccessfulSmartServerResponse(('ok',))
+
+
+class SmartServerBranchRequestSetLastRevisionEx(SmartServerSetTipRequest):
+
+ def do_tip_change_with_locked_branch(self, branch, new_last_revision_id,
+ allow_divergence, allow_overwrite_descendant):
+ """Set the last revision of the branch.
+
+ New in 1.6.
+
+ :param new_last_revision_id: the revision ID to set as the last
+ revision of the branch.
+ :param allow_divergence: A flag. If non-zero, change the revision ID
+ even if the new_last_revision_id's ancestry has diverged from the
+ current last revision. If zero, a 'Diverged' error will be
+ returned if new_last_revision_id is not a descendant of the current
+ last revision.
+ :param allow_overwrite_descendant: A flag. If zero and
+ new_last_revision_id is not a descendant of the current last
+ revision, then the last revision will not be changed. If non-zero
+ and there is no divergence, then the last revision is always
+ changed.
+
+ :returns: on success, a tuple of ('ok', revno, revision_id), where
+ revno and revision_id are the new values of the current last
+ revision info. The revision_id might be different to the
+ new_last_revision_id if allow_overwrite_descendant was not set.
+ """
+ do_not_overwrite_descendant = not allow_overwrite_descendant
+ try:
+ last_revno, last_rev = branch.last_revision_info()
+ graph = branch.repository.get_graph()
+ if not allow_divergence or do_not_overwrite_descendant:
+ relation = branch._revision_relations(
+ last_rev, new_last_revision_id, graph)
+ if relation == 'diverged' and not allow_divergence:
+ return FailedSmartServerResponse(('Diverged',))
+ if relation == 'a_descends_from_b' and do_not_overwrite_descendant:
+ return SuccessfulSmartServerResponse(
+ ('ok', last_revno, last_rev))
+ new_revno = graph.find_distance_to_null(
+ new_last_revision_id, [(last_rev, last_revno)])
+ branch.set_last_revision_info(new_revno, new_last_revision_id)
+ except errors.GhostRevisionsHaveNoRevno:
+ return FailedSmartServerResponse(
+ ('NoSuchRevision', new_last_revision_id))
+ return SuccessfulSmartServerResponse(
+ ('ok', new_revno, new_last_revision_id))
+
+
+class SmartServerBranchRequestSetLastRevisionInfo(SmartServerSetTipRequest):
+ """Branch.set_last_revision_info. Sets the revno and the revision ID of
+ the specified branch.
+
+ New in bzrlib 1.4.
+ """
+
+ def do_tip_change_with_locked_branch(self, branch, new_revno,
+ new_last_revision_id):
+ try:
+ branch.set_last_revision_info(int(new_revno), new_last_revision_id)
+ except errors.NoSuchRevision:
+ return FailedSmartServerResponse(
+ ('NoSuchRevision', new_last_revision_id))
+ return SuccessfulSmartServerResponse(('ok',))
+
+
+class SmartServerBranchRequestSetParentLocation(SmartServerLockedBranchRequest):
+ """Set the parent location for a branch.
+
+ Takes a location to set, which must be utf8 encoded.
+ """
+
+ def do_with_locked_branch(self, branch, location):
+ branch._set_parent_location(location)
+ return SuccessfulSmartServerResponse(())
+
+
+class SmartServerBranchRequestLockWrite(SmartServerBranchRequest):
+
+ def do_with_branch(self, branch, branch_token='', repo_token=''):
+ if branch_token == '':
+ branch_token = None
+ if repo_token == '':
+ repo_token = None
+ try:
+ repo_token = branch.repository.lock_write(
+ token=repo_token).repository_token
+ try:
+ branch_token = branch.lock_write(
+ token=branch_token).branch_token
+ finally:
+ # this leaves the repository with 1 lock
+ branch.repository.unlock()
+ except errors.LockContention:
+ return FailedSmartServerResponse(('LockContention',))
+ except errors.TokenMismatch:
+ return FailedSmartServerResponse(('TokenMismatch',))
+ except errors.UnlockableTransport:
+ return FailedSmartServerResponse(('UnlockableTransport',))
+ except errors.LockFailed, e:
+ return FailedSmartServerResponse(('LockFailed', str(e.lock), str(e.why)))
+ if repo_token is None:
+ repo_token = ''
+ else:
+ branch.repository.leave_lock_in_place()
+ branch.leave_lock_in_place()
+ branch.unlock()
+ return SuccessfulSmartServerResponse(('ok', branch_token, repo_token))
+
+
+class SmartServerBranchRequestUnlock(SmartServerBranchRequest):
+
+ def do_with_branch(self, branch, branch_token, repo_token):
+ try:
+ branch.repository.lock_write(token=repo_token)
+ try:
+ branch.lock_write(token=branch_token)
+ finally:
+ branch.repository.unlock()
+ except errors.TokenMismatch:
+ return FailedSmartServerResponse(('TokenMismatch',))
+ if repo_token:
+ branch.repository.dont_leave_lock_in_place()
+ branch.dont_leave_lock_in_place()
+ branch.unlock()
+ return SuccessfulSmartServerResponse(('ok',))
+
+
+class SmartServerBranchRequestGetPhysicalLockStatus(SmartServerBranchRequest):
+ """Get the physical lock status for a branch.
+
+ New in 2.5.
+ """
+
+ def do_with_branch(self, branch):
+ if branch.get_physical_lock_status():
+ return SuccessfulSmartServerResponse(('yes',))
+ else:
+ return SuccessfulSmartServerResponse(('no',))
diff --git a/bzrlib/smart/bzrdir.py b/bzrlib/smart/bzrdir.py
new file mode 100644
index 0000000..e25617e
--- /dev/null
+++ b/bzrlib/smart/bzrdir.py
@@ -0,0 +1,626 @@
+# Copyright (C) 2006-2010 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""Server-side bzrdir related request implmentations."""
+
+from __future__ import absolute_import
+
+from bzrlib import (
+ bencode,
+ branch,
+ errors,
+ repository,
+ urlutils,
+ )
+from bzrlib.bzrdir import (
+ BzrDir,
+ BzrDirFormat,
+ BzrProber,
+ )
+from bzrlib.controldir import (
+ network_format_registry,
+ )
+from bzrlib.smart.request import (
+ FailedSmartServerResponse,
+ SmartServerRequest,
+ SuccessfulSmartServerResponse,
+ )
+
+
+class SmartServerRequestOpenBzrDir(SmartServerRequest):
+
+ def do(self, path):
+ try:
+ t = self.transport_from_client_path(path)
+ except errors.PathNotChild:
+ # The client is trying to ask about a path that they have no access
+ # to.
+ # Ideally we'd return a FailedSmartServerResponse here rather than
+ # a "successful" negative, but we want to be compatibile with
+ # clients that don't anticipate errors from this method.
+ answer = 'no'
+ else:
+ bzr_prober = BzrProber()
+ try:
+ bzr_prober.probe_transport(t)
+ except (errors.NotBranchError, errors.UnknownFormatError):
+ answer = 'no'
+ else:
+ answer = 'yes'
+ return SuccessfulSmartServerResponse((answer,))
+
+
+class SmartServerRequestOpenBzrDir_2_1(SmartServerRequest):
+
+ def do(self, path):
+ """Is there a BzrDir present, and if so does it have a working tree?
+
+ New in 2.1.
+ """
+ try:
+ t = self.transport_from_client_path(path)
+ except errors.PathNotChild:
+ # The client is trying to ask about a path that they have no access
+ # to.
+ return SuccessfulSmartServerResponse(('no',))
+ try:
+ bd = BzrDir.open_from_transport(t)
+ except errors.NotBranchError:
+ answer = ('no',)
+ else:
+ answer = ('yes',)
+ if bd.has_workingtree():
+ answer += ('yes',)
+ else:
+ answer += ('no',)
+ return SuccessfulSmartServerResponse(answer)
+
+
+class SmartServerRequestBzrDir(SmartServerRequest):
+
+ def do(self, path, *args):
+ """Open a BzrDir at path, and return `self.do_bzrdir_request(*args)`."""
+ try:
+ self._bzrdir = BzrDir.open_from_transport(
+ self.transport_from_client_path(path))
+ except errors.NotBranchError, e:
+ return FailedSmartServerResponse(('nobranch',))
+ return self.do_bzrdir_request(*args)
+
+ def _boolean_to_yes_no(self, a_boolean):
+ if a_boolean:
+ return 'yes'
+ else:
+ return 'no'
+
+ def _format_to_capabilities(self, repo_format):
+ rich_root = self._boolean_to_yes_no(repo_format.rich_root_data)
+ tree_ref = self._boolean_to_yes_no(
+ repo_format.supports_tree_reference)
+ external_lookup = self._boolean_to_yes_no(
+ repo_format.supports_external_lookups)
+ return rich_root, tree_ref, external_lookup
+
+ def _repo_relpath(self, current_transport, repository):
+ """Get the relative path for repository from current_transport."""
+ # the relpath of the bzrdir in the found repository gives us the
+ # path segments to pop-out.
+ relpath = repository.user_transport.relpath(
+ current_transport.base)
+ if len(relpath):
+ segments = ['..'] * len(relpath.split('/'))
+ else:
+ segments = []
+ return '/'.join(segments)
+
+
+class SmartServerBzrDirRequestDestroyBranch(SmartServerRequestBzrDir):
+
+ def do_bzrdir_request(self, name=None):
+ """Destroy the branch with the specified name.
+
+ New in 2.5.0.
+ :return: On success, 'ok'.
+ """
+ try:
+ self._bzrdir.destroy_branch(name)
+ except errors.NotBranchError, e:
+ return FailedSmartServerResponse(('nobranch',))
+ return SuccessfulSmartServerResponse(('ok',))
+
+
+class SmartServerBzrDirRequestHasWorkingTree(SmartServerRequestBzrDir):
+
+ def do_bzrdir_request(self, name=None):
+ """Check whether there is a working tree present.
+
+ New in 2.5.0.
+
+ :return: If there is a working tree present, 'yes'.
+ Otherwise 'no'.
+ """
+ if self._bzrdir.has_workingtree():
+ return SuccessfulSmartServerResponse(('yes', ))
+ else:
+ return SuccessfulSmartServerResponse(('no', ))
+
+
+class SmartServerBzrDirRequestDestroyRepository(SmartServerRequestBzrDir):
+
+ def do_bzrdir_request(self, name=None):
+ """Destroy the repository.
+
+ New in 2.5.0.
+
+ :return: On success, 'ok'.
+ """
+ try:
+ self._bzrdir.destroy_repository()
+ except errors.NoRepositoryPresent, e:
+ return FailedSmartServerResponse(('norepository',))
+ return SuccessfulSmartServerResponse(('ok',))
+
+
+class SmartServerBzrDirRequestCloningMetaDir(SmartServerRequestBzrDir):
+
+ def do_bzrdir_request(self, require_stacking):
+ """Get the format that should be used when cloning from this dir.
+
+ New in 1.13.
+
+ :return: on success, a 3-tuple of network names for (control,
+ repository, branch) directories, where '' signifies "not present".
+ If this BzrDir contains a branch reference then this will fail with
+ BranchReference; clients should resolve branch references before
+ calling this RPC.
+ """
+ try:
+ branch_ref = self._bzrdir.get_branch_reference()
+ except errors.NotBranchError:
+ branch_ref = None
+ if branch_ref is not None:
+ # The server shouldn't try to resolve references, and it quite
+ # possibly can't reach them anyway. The client needs to resolve
+ # the branch reference to determine the cloning_metadir.
+ return FailedSmartServerResponse(('BranchReference',))
+ if require_stacking == "True":
+ require_stacking = True
+ else:
+ require_stacking = False
+ control_format = self._bzrdir.cloning_metadir(
+ require_stacking=require_stacking)
+ control_name = control_format.network_name()
+ if not control_format.fixed_components:
+ branch_name = ('branch',
+ control_format.get_branch_format().network_name())
+ repository_name = control_format.repository_format.network_name()
+ else:
+ # Only MetaDir has delegated formats today.
+ branch_name = ('branch', '')
+ repository_name = ''
+ return SuccessfulSmartServerResponse((control_name, repository_name,
+ branch_name))
+
+
+class SmartServerBzrDirRequestCheckoutMetaDir(SmartServerRequestBzrDir):
+ """Get the format to use for checkouts.
+
+ New in 2.5.
+
+ :return: on success, a 3-tuple of network names for (control,
+ repository, branch) directories, where '' signifies "not present".
+ If this BzrDir contains a branch reference then this will fail with
+ BranchReference; clients should resolve branch references before
+ calling this RPC (they should not try to create a checkout of a
+ checkout).
+ """
+
+ def do_bzrdir_request(self):
+ try:
+ branch_ref = self._bzrdir.get_branch_reference()
+ except errors.NotBranchError:
+ branch_ref = None
+ if branch_ref is not None:
+ # The server shouldn't try to resolve references, and it quite
+ # possibly can't reach them anyway. The client needs to resolve
+ # the branch reference to determine the cloning_metadir.
+ return FailedSmartServerResponse(('BranchReference',))
+ control_format = self._bzrdir.checkout_metadir()
+ control_name = control_format.network_name()
+ if not control_format.fixed_components:
+ branch_name = control_format.get_branch_format().network_name()
+ repo_name = control_format.repository_format.network_name()
+ else:
+ branch_name = ''
+ repo_name = ''
+ return SuccessfulSmartServerResponse(
+ (control_name, repo_name, branch_name))
+
+
+class SmartServerRequestCreateBranch(SmartServerRequestBzrDir):
+
+ def do(self, path, network_name):
+ """Create a branch in the bzr dir at path.
+
+ This operates precisely like 'bzrdir.create_branch'.
+
+ If a bzrdir is not present, an exception is propogated
+ rather than 'no branch' because these are different conditions (and
+ this method should only be called after establishing that a bzr dir
+ exists anyway).
+
+ This is the initial version of this method introduced to the smart
+ server for 1.13.
+
+ :param path: The path to the bzrdir.
+ :param network_name: The network name of the branch type to create.
+ :return: ('ok', branch_format, repo_path, rich_root, tree_ref,
+ external_lookup, repo_format)
+ """
+ bzrdir = BzrDir.open_from_transport(
+ self.transport_from_client_path(path))
+ format = branch.network_format_registry.get(network_name)
+ bzrdir.branch_format = format
+ result = format.initialize(bzrdir, name="")
+ rich_root, tree_ref, external_lookup = self._format_to_capabilities(
+ result.repository._format)
+ branch_format = result._format.network_name()
+ repo_format = result.repository._format.network_name()
+ repo_path = self._repo_relpath(bzrdir.root_transport,
+ result.repository)
+ # branch format, repo relpath, rich_root, tree_ref, external_lookup,
+ # repo_network_name
+ return SuccessfulSmartServerResponse(('ok', branch_format, repo_path,
+ rich_root, tree_ref, external_lookup, repo_format))
+
+
+class SmartServerRequestCreateRepository(SmartServerRequestBzrDir):
+
+ def do(self, path, network_name, shared):
+ """Create a repository in the bzr dir at path.
+
+ This operates precisely like 'bzrdir.create_repository'.
+
+ If a bzrdir is not present, an exception is propagated
+ rather than 'no branch' because these are different conditions (and
+ this method should only be called after establishing that a bzr dir
+ exists anyway).
+
+ This is the initial version of this method introduced to the smart
+ server for 1.13.
+
+ :param path: The path to the bzrdir.
+ :param network_name: The network name of the repository type to create.
+ :param shared: The value to pass create_repository for the shared
+ parameter.
+ :return: (ok, rich_root, tree_ref, external_lookup, network_name)
+ """
+ bzrdir = BzrDir.open_from_transport(
+ self.transport_from_client_path(path))
+ shared = shared == 'True'
+ format = repository.network_format_registry.get(network_name)
+ bzrdir.repository_format = format
+ result = format.initialize(bzrdir, shared=shared)
+ rich_root, tree_ref, external_lookup = self._format_to_capabilities(
+ result._format)
+ return SuccessfulSmartServerResponse(('ok', rich_root, tree_ref,
+ external_lookup, result._format.network_name()))
+
+
+class SmartServerRequestFindRepository(SmartServerRequestBzrDir):
+
+ def _find(self, path):
+ """try to find a repository from path upwards
+
+ This operates precisely like 'bzrdir.find_repository'.
+
+ :return: (relpath, rich_root, tree_ref, external_lookup, network_name).
+ All are strings, relpath is a / prefixed path, the next three are
+ either 'yes' or 'no', and the last is a repository format network
+ name.
+ :raises errors.NoRepositoryPresent: When there is no repository
+ present.
+ """
+ bzrdir = BzrDir.open_from_transport(
+ self.transport_from_client_path(path))
+ repository = bzrdir.find_repository()
+ path = self._repo_relpath(bzrdir.root_transport, repository)
+ rich_root, tree_ref, external_lookup = self._format_to_capabilities(
+ repository._format)
+ network_name = repository._format.network_name()
+ return path, rich_root, tree_ref, external_lookup, network_name
+
+
+class SmartServerRequestFindRepositoryV1(SmartServerRequestFindRepository):
+
+ def do(self, path):
+ """try to find a repository from path upwards
+
+ This operates precisely like 'bzrdir.find_repository'.
+
+ If a bzrdir is not present, an exception is propagated
+ rather than 'no branch' because these are different conditions.
+
+ This is the initial version of this method introduced with the smart
+ server. Modern clients will try the V2 method that adds support for the
+ supports_external_lookups attribute.
+
+ :return: norepository or ok, relpath.
+ """
+ try:
+ path, rich_root, tree_ref, external_lookup, name = self._find(path)
+ return SuccessfulSmartServerResponse(('ok', path, rich_root, tree_ref))
+ except errors.NoRepositoryPresent:
+ return FailedSmartServerResponse(('norepository', ))
+
+
+class SmartServerRequestFindRepositoryV2(SmartServerRequestFindRepository):
+
+ def do(self, path):
+ """try to find a repository from path upwards
+
+ This operates precisely like 'bzrdir.find_repository'.
+
+ If a bzrdir is not present, an exception is propagated
+ rather than 'no branch' because these are different conditions.
+
+ This is the second edition of this method introduced in bzr 1.3, which
+ returns information about the supports_external_lookups format
+ attribute too.
+
+ :return: norepository or ok, relpath, rich_root, tree_ref,
+ external_lookup.
+ """
+ try:
+ path, rich_root, tree_ref, external_lookup, name = self._find(path)
+ return SuccessfulSmartServerResponse(
+ ('ok', path, rich_root, tree_ref, external_lookup))
+ except errors.NoRepositoryPresent:
+ return FailedSmartServerResponse(('norepository', ))
+
+
+class SmartServerRequestFindRepositoryV3(SmartServerRequestFindRepository):
+
+ def do(self, path):
+ """try to find a repository from path upwards
+
+ This operates precisely like 'bzrdir.find_repository'.
+
+ If a bzrdir is not present, an exception is propogated
+ rather than 'no branch' because these are different conditions.
+
+ This is the third edition of this method introduced in bzr 1.13, which
+ returns information about the network name of the repository format.
+
+ :return: norepository or ok, relpath, rich_root, tree_ref,
+ external_lookup, network_name.
+ """
+ try:
+ path, rich_root, tree_ref, external_lookup, name = self._find(path)
+ return SuccessfulSmartServerResponse(
+ ('ok', path, rich_root, tree_ref, external_lookup, name))
+ except errors.NoRepositoryPresent:
+ return FailedSmartServerResponse(('norepository', ))
+
+
+class SmartServerBzrDirRequestConfigFile(SmartServerRequestBzrDir):
+
+ def do_bzrdir_request(self):
+ """Get the configuration bytes for a config file in bzrdir.
+
+ The body is not utf8 decoded - it is the literal bytestream from disk.
+ """
+ config = self._bzrdir._get_config()
+ if config is None:
+ content = ''
+ else:
+ content = config._get_config_file().read()
+ return SuccessfulSmartServerResponse((), content)
+
+
+class SmartServerBzrDirRequestGetBranches(SmartServerRequestBzrDir):
+
+ def do_bzrdir_request(self):
+ """Get the branches in a control directory.
+
+ The body is a bencoded dictionary, with values similar to the return
+ value of the open branch request.
+ """
+ branches = self._bzrdir.get_branches()
+ ret = {}
+ for name, b in branches.iteritems():
+ if name is None:
+ name = ""
+ ret[name] = ("branch", b._format.network_name())
+ return SuccessfulSmartServerResponse(
+ ("success", ), bencode.bencode(ret))
+
+
+class SmartServerRequestInitializeBzrDir(SmartServerRequest):
+
+ def do(self, path):
+ """Initialize a bzrdir at path.
+
+ The default format of the server is used.
+ :return: SmartServerResponse(('ok', ))
+ """
+ target_transport = self.transport_from_client_path(path)
+ BzrDirFormat.get_default_format().initialize_on_transport(target_transport)
+ return SuccessfulSmartServerResponse(('ok', ))
+
+
+class SmartServerRequestBzrDirInitializeEx(SmartServerRequestBzrDir):
+
+ def parse_NoneTrueFalse(self, arg):
+ if not arg:
+ return None
+ if arg == 'False':
+ return False
+ if arg == 'True':
+ return True
+ raise AssertionError("invalid arg %r" % arg)
+
+ def parse_NoneString(self, arg):
+ return arg or None
+
+ def _serialize_NoneTrueFalse(self, arg):
+ if arg is False:
+ return 'False'
+ if not arg:
+ return ''
+ return 'True'
+
+ def do(self, bzrdir_network_name, path, use_existing_dir, create_prefix,
+ force_new_repo, stacked_on, stack_on_pwd, repo_format_name,
+ make_working_trees, shared_repo):
+ """Initialize a bzrdir at path as per
+ BzrDirFormat.initialize_on_transport_ex.
+
+ New in 1.16. (Replaces BzrDirFormat.initialize_ex verb from 1.15).
+
+ :return: return SuccessfulSmartServerResponse((repo_path, rich_root,
+ tree_ref, external_lookup, repo_network_name,
+ repo_bzrdir_network_name, bzrdir_format_network_name,
+ NoneTrueFalse(stacking), final_stack, final_stack_pwd,
+ repo_lock_token))
+ """
+ target_transport = self.transport_from_client_path(path)
+ format = network_format_registry.get(bzrdir_network_name)
+ use_existing_dir = self.parse_NoneTrueFalse(use_existing_dir)
+ create_prefix = self.parse_NoneTrueFalse(create_prefix)
+ force_new_repo = self.parse_NoneTrueFalse(force_new_repo)
+ stacked_on = self.parse_NoneString(stacked_on)
+ stack_on_pwd = self.parse_NoneString(stack_on_pwd)
+ make_working_trees = self.parse_NoneTrueFalse(make_working_trees)
+ shared_repo = self.parse_NoneTrueFalse(shared_repo)
+ if stack_on_pwd == '.':
+ stack_on_pwd = target_transport.base
+ repo_format_name = self.parse_NoneString(repo_format_name)
+ repo, bzrdir, stacking, repository_policy = \
+ format.initialize_on_transport_ex(target_transport,
+ use_existing_dir=use_existing_dir, create_prefix=create_prefix,
+ force_new_repo=force_new_repo, stacked_on=stacked_on,
+ stack_on_pwd=stack_on_pwd, repo_format_name=repo_format_name,
+ make_working_trees=make_working_trees, shared_repo=shared_repo)
+ if repo is None:
+ repo_path = ''
+ repo_name = ''
+ rich_root = tree_ref = external_lookup = ''
+ repo_bzrdir_name = ''
+ final_stack = None
+ final_stack_pwd = None
+ repo_lock_token = ''
+ else:
+ repo_path = self._repo_relpath(bzrdir.root_transport, repo)
+ if repo_path == '':
+ repo_path = '.'
+ rich_root, tree_ref, external_lookup = self._format_to_capabilities(
+ repo._format)
+ repo_name = repo._format.network_name()
+ repo_bzrdir_name = repo.bzrdir._format.network_name()
+ final_stack = repository_policy._stack_on
+ final_stack_pwd = repository_policy._stack_on_pwd
+ # It is returned locked, but we need to do the lock to get the lock
+ # token.
+ repo.unlock()
+ repo_lock_token = repo.lock_write().repository_token or ''
+ if repo_lock_token:
+ repo.leave_lock_in_place()
+ repo.unlock()
+ final_stack = final_stack or ''
+ final_stack_pwd = final_stack_pwd or ''
+
+ # We want this to be relative to the bzrdir.
+ if final_stack_pwd:
+ final_stack_pwd = urlutils.relative_url(
+ target_transport.base, final_stack_pwd)
+
+ # Can't meaningfully return a root path.
+ if final_stack.startswith('/'):
+ client_path = self._root_client_path + final_stack[1:]
+ final_stack = urlutils.relative_url(
+ self._root_client_path, client_path)
+ final_stack_pwd = '.'
+
+ return SuccessfulSmartServerResponse((repo_path, rich_root, tree_ref,
+ external_lookup, repo_name, repo_bzrdir_name,
+ bzrdir._format.network_name(),
+ self._serialize_NoneTrueFalse(stacking), final_stack,
+ final_stack_pwd, repo_lock_token))
+
+
+class SmartServerRequestOpenBranch(SmartServerRequestBzrDir):
+
+ def do_bzrdir_request(self):
+ """open a branch at path and return the branch reference or branch."""
+ try:
+ reference_url = self._bzrdir.get_branch_reference()
+ if reference_url is None:
+ return SuccessfulSmartServerResponse(('ok', ''))
+ else:
+ return SuccessfulSmartServerResponse(('ok', reference_url))
+ except errors.NotBranchError, e:
+ return FailedSmartServerResponse(('nobranch',))
+
+
+class SmartServerRequestOpenBranchV2(SmartServerRequestBzrDir):
+
+ def do_bzrdir_request(self):
+ """open a branch at path and return the reference or format."""
+ try:
+ reference_url = self._bzrdir.get_branch_reference()
+ if reference_url is None:
+ br = self._bzrdir.open_branch(ignore_fallbacks=True)
+ format = br._format.network_name()
+ return SuccessfulSmartServerResponse(('branch', format))
+ else:
+ return SuccessfulSmartServerResponse(('ref', reference_url))
+ except errors.NotBranchError, e:
+ return FailedSmartServerResponse(('nobranch',))
+
+
+class SmartServerRequestOpenBranchV3(SmartServerRequestBzrDir):
+
+ def do_bzrdir_request(self):
+ """Open a branch at path and return the reference or format.
+
+ This version introduced in 2.1.
+
+ Differences to SmartServerRequestOpenBranchV2:
+ * can return 2-element ('nobranch', extra), where 'extra' is a string
+ with an explanation like 'location is a repository'. Previously
+ a 'nobranch' response would never have more than one element.
+ """
+ try:
+ reference_url = self._bzrdir.get_branch_reference()
+ if reference_url is None:
+ br = self._bzrdir.open_branch(ignore_fallbacks=True)
+ format = br._format.network_name()
+ return SuccessfulSmartServerResponse(('branch', format))
+ else:
+ return SuccessfulSmartServerResponse(('ref', reference_url))
+ except errors.NotBranchError, e:
+ # Stringify the exception so that its .detail attribute will be
+ # filled out.
+ str(e)
+ resp = ('nobranch',)
+ detail = e.detail
+ if detail:
+ if detail.startswith(': '):
+ detail = detail[2:]
+ resp += (detail,)
+ return FailedSmartServerResponse(resp)
+
diff --git a/bzrlib/smart/client.py b/bzrlib/smart/client.py
new file mode 100644
index 0000000..8c1e21b
--- /dev/null
+++ b/bzrlib/smart/client.py
@@ -0,0 +1,352 @@
+# Copyright (C) 2006-2010 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+from __future__ import absolute_import
+
+from bzrlib import lazy_import
+lazy_import.lazy_import(globals(), """
+from bzrlib.smart import request as _mod_request
+""")
+
+import bzrlib
+from bzrlib.smart import message, protocol
+from bzrlib import (
+ debug,
+ errors,
+ hooks,
+ trace,
+ )
+
+
+class _SmartClient(object):
+
+ def __init__(self, medium, headers=None):
+ """Constructor.
+
+ :param medium: a SmartClientMedium
+ """
+ self._medium = medium
+ if headers is None:
+ self._headers = {'Software version': bzrlib.__version__}
+ else:
+ self._headers = dict(headers)
+
+ def __repr__(self):
+ return '%s(%r)' % (self.__class__.__name__, self._medium)
+
+ def _call_and_read_response(self, method, args, body=None, readv_body=None,
+ body_stream=None, expect_response_body=True):
+ request = _SmartClientRequest(self, method, args, body=body,
+ readv_body=readv_body, body_stream=body_stream,
+ expect_response_body=expect_response_body)
+ return request.call_and_read_response()
+
+ def call(self, method, *args):
+ """Call a method on the remote server."""
+ result, protocol = self.call_expecting_body(method, *args)
+ protocol.cancel_read_body()
+ return result
+
+ def call_expecting_body(self, method, *args):
+ """Call a method and return the result and the protocol object.
+
+ The body can be read like so::
+
+ result, smart_protocol = smart_client.call_expecting_body(...)
+ body = smart_protocol.read_body_bytes()
+ """
+ return self._call_and_read_response(
+ method, args, expect_response_body=True)
+
+ def call_with_body_bytes(self, method, args, body):
+ """Call a method on the remote server with body bytes."""
+ if type(method) is not str:
+ raise TypeError('method must be a byte string, not %r' % (method,))
+ for arg in args:
+ if type(arg) is not str:
+ raise TypeError('args must be byte strings, not %r' % (args,))
+ if type(body) is not str:
+ raise TypeError('body must be byte string, not %r' % (body,))
+ response, response_handler = self._call_and_read_response(
+ method, args, body=body, expect_response_body=False)
+ return response
+
+ def call_with_body_bytes_expecting_body(self, method, args, body):
+ """Call a method on the remote server with body bytes."""
+ if type(method) is not str:
+ raise TypeError('method must be a byte string, not %r' % (method,))
+ for arg in args:
+ if type(arg) is not str:
+ raise TypeError('args must be byte strings, not %r' % (args,))
+ if type(body) is not str:
+ raise TypeError('body must be byte string, not %r' % (body,))
+ response, response_handler = self._call_and_read_response(
+ method, args, body=body, expect_response_body=True)
+ return (response, response_handler)
+
+ def call_with_body_readv_array(self, args, body):
+ response, response_handler = self._call_and_read_response(
+ args[0], args[1:], readv_body=body, expect_response_body=True)
+ return (response, response_handler)
+
+ def call_with_body_stream(self, args, stream):
+ response, response_handler = self._call_and_read_response(
+ args[0], args[1:], body_stream=stream,
+ expect_response_body=False)
+ return (response, response_handler)
+
+ def remote_path_from_transport(self, transport):
+ """Convert transport into a path suitable for using in a request.
+
+ Note that the resulting remote path doesn't encode the host name or
+ anything but path, so it is only safe to use it in requests sent over
+ the medium from the matching transport.
+ """
+ return self._medium.remote_path_from_transport(transport)
+
+
+class _SmartClientRequest(object):
+ """Encapsulate the logic for a single request.
+
+ This class handles things like reconnecting and sending the request a
+ second time when the connection is reset in the middle. It also handles the
+ multiple requests that get made if we don't know what protocol the server
+ supports yet.
+
+ Generally, you build up one of these objects, passing in the arguments that
+ you want to send to the server, and then use 'call_and_read_response' to
+ get the response from the server.
+ """
+
+ def __init__(self, client, method, args, body=None, readv_body=None,
+ body_stream=None, expect_response_body=True):
+ self.client = client
+ self.method = method
+ self.args = args
+ self.body = body
+ self.readv_body = readv_body
+ self.body_stream = body_stream
+ self.expect_response_body = expect_response_body
+
+ def call_and_read_response(self):
+ """Send the request to the server, and read the initial response.
+
+ This doesn't read all of the body content of the response, instead it
+ returns (response_tuple, response_handler). response_tuple is the 'ok',
+ or 'error' information, and 'response_handler' can be used to get the
+ content stream out.
+ """
+ self._run_call_hooks()
+ protocol_version = self.client._medium._protocol_version
+ if protocol_version is None:
+ return self._call_determining_protocol_version()
+ else:
+ return self._call(protocol_version)
+
+ def _is_safe_to_send_twice(self):
+ """Check if the current method is re-entrant safe."""
+ if self.body_stream is not None or 'noretry' in debug.debug_flags:
+ # We can't restart a body stream that has already been consumed.
+ return False
+ request_type = _mod_request.request_handlers.get_info(self.method)
+ if request_type in ('read', 'idem', 'semi'):
+ return True
+ # If we have gotten this far, 'stream' cannot be retried, because we
+ # already consumed the local stream.
+ if request_type in ('semivfs', 'mutate', 'stream'):
+ return False
+ trace.mutter('Unknown request type: %s for method %s'
+ % (request_type, self.method))
+ return False
+
+ def _run_call_hooks(self):
+ if not _SmartClient.hooks['call']:
+ return
+ params = CallHookParams(self.method, self.args, self.body,
+ self.readv_body, self.client._medium)
+ for hook in _SmartClient.hooks['call']:
+ hook(params)
+
+ def _call(self, protocol_version):
+ """We know the protocol version.
+
+ So this just sends the request, and then reads the response. This is
+ where the code will be to retry requests if the connection is closed.
+ """
+ response_handler = self._send(protocol_version)
+ try:
+ response_tuple = response_handler.read_response_tuple(
+ expect_body=self.expect_response_body)
+ except errors.ConnectionReset, e:
+ self.client._medium.reset()
+ if not self._is_safe_to_send_twice():
+ raise
+ trace.warning('ConnectionReset reading response for %r, retrying'
+ % (self.method,))
+ trace.log_exception_quietly()
+ encoder, response_handler = self._construct_protocol(
+ protocol_version)
+ self._send_no_retry(encoder)
+ response_tuple = response_handler.read_response_tuple(
+ expect_body=self.expect_response_body)
+ return (response_tuple, response_handler)
+
+ def _call_determining_protocol_version(self):
+ """Determine what protocol the remote server supports.
+
+ We do this by placing a request in the most recent protocol, and
+ handling the UnexpectedProtocolVersionMarker from the server.
+ """
+ for protocol_version in [3, 2]:
+ if protocol_version == 2:
+ # If v3 doesn't work, the remote side is older than 1.6.
+ self.client._medium._remember_remote_is_before((1, 6))
+ try:
+ response_tuple, response_handler = self._call(protocol_version)
+ except errors.UnexpectedProtocolVersionMarker, err:
+ # TODO: We could recover from this without disconnecting if
+ # we recognise the protocol version.
+ trace.warning(
+ 'Server does not understand Bazaar network protocol %d,'
+ ' reconnecting. (Upgrade the server to avoid this.)'
+ % (protocol_version,))
+ self.client._medium.disconnect()
+ continue
+ except errors.ErrorFromSmartServer:
+ # If we received an error reply from the server, then it
+ # must be ok with this protocol version.
+ self.client._medium._protocol_version = protocol_version
+ raise
+ else:
+ self.client._medium._protocol_version = protocol_version
+ return response_tuple, response_handler
+ raise errors.SmartProtocolError(
+ 'Server is not a Bazaar server: ' + str(err))
+
+ def _construct_protocol(self, version):
+ """Build the encoding stack for a given protocol version."""
+ request = self.client._medium.get_request()
+ if version == 3:
+ request_encoder = protocol.ProtocolThreeRequester(request)
+ response_handler = message.ConventionalResponseHandler()
+ response_proto = protocol.ProtocolThreeDecoder(
+ response_handler, expect_version_marker=True)
+ response_handler.setProtoAndMediumRequest(response_proto, request)
+ elif version == 2:
+ request_encoder = protocol.SmartClientRequestProtocolTwo(request)
+ response_handler = request_encoder
+ else:
+ request_encoder = protocol.SmartClientRequestProtocolOne(request)
+ response_handler = request_encoder
+ return request_encoder, response_handler
+
+ def _send(self, protocol_version):
+ """Encode the request, and send it to the server.
+
+ This will retry a request if we get a ConnectionReset while sending the
+ request to the server. (Unless we have a body_stream that we have
+ already started consuming, since we can't restart body_streams)
+
+ :return: response_handler as defined by _construct_protocol
+ """
+ encoder, response_handler = self._construct_protocol(protocol_version)
+ try:
+ self._send_no_retry(encoder)
+ except errors.ConnectionReset, e:
+ # If we fail during the _send_no_retry phase, then we can
+ # be confident that the server did not get our request, because we
+ # haven't started waiting for the reply yet. So try the request
+ # again. We only issue a single retry, because if the connection
+ # really is down, there is no reason to loop endlessly.
+
+ # Connection is dead, so close our end of it.
+ self.client._medium.reset()
+ if (('noretry' in debug.debug_flags)
+ or (self.body_stream is not None
+ and encoder.body_stream_started)):
+ # We can't restart a body_stream that has been partially
+ # consumed, so we don't retry.
+ # Note: We don't have to worry about
+ # SmartClientRequestProtocolOne or Two, because they don't
+ # support client-side body streams.
+ raise
+ trace.warning('ConnectionReset calling %r, retrying'
+ % (self.method,))
+ trace.log_exception_quietly()
+ encoder, response_handler = self._construct_protocol(
+ protocol_version)
+ self._send_no_retry(encoder)
+ return response_handler
+
+ def _send_no_retry(self, encoder):
+ """Just encode the request and try to send it."""
+ encoder.set_headers(self.client._headers)
+ if self.body is not None:
+ if self.readv_body is not None:
+ raise AssertionError(
+ "body and readv_body are mutually exclusive.")
+ if self.body_stream is not None:
+ raise AssertionError(
+ "body and body_stream are mutually exclusive.")
+ encoder.call_with_body_bytes((self.method, ) + self.args, self.body)
+ elif self.readv_body is not None:
+ if self.body_stream is not None:
+ raise AssertionError(
+ "readv_body and body_stream are mutually exclusive.")
+ encoder.call_with_body_readv_array((self.method, ) + self.args,
+ self.readv_body)
+ elif self.body_stream is not None:
+ encoder.call_with_body_stream((self.method, ) + self.args,
+ self.body_stream)
+ else:
+ encoder.call(self.method, *self.args)
+
+
+class SmartClientHooks(hooks.Hooks):
+
+ def __init__(self):
+ hooks.Hooks.__init__(self, "bzrlib.smart.client", "_SmartClient.hooks")
+ self.add_hook('call',
+ "Called when the smart client is submitting a request to the "
+ "smart server. Called with a bzrlib.smart.client.CallHookParams "
+ "object. Streaming request bodies, and responses, are not "
+ "accessible.", None)
+
+
+_SmartClient.hooks = SmartClientHooks()
+
+
+class CallHookParams(object):
+
+ def __init__(self, method, args, body, readv_body, medium):
+ self.method = method
+ self.args = args
+ self.body = body
+ self.readv_body = readv_body
+ self.medium = medium
+
+ def __repr__(self):
+ attrs = dict((k, v) for (k, v) in self.__dict__.iteritems()
+ if v is not None)
+ return '<%s %r>' % (self.__class__.__name__, attrs)
+
+ def __eq__(self, other):
+ if type(other) is not type(self):
+ return NotImplemented
+ return self.__dict__ == other.__dict__
+
+ def __ne__(self, other):
+ return not self == other
diff --git a/bzrlib/smart/medium.py b/bzrlib/smart/medium.py
new file mode 100644
index 0000000..b1759e9
--- /dev/null
+++ b/bzrlib/smart/medium.py
@@ -0,0 +1,1193 @@
+# Copyright (C) 2006-2011 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""The 'medium' layer for the smart servers and clients.
+
+"Medium" here is the noun meaning "a means of transmission", not the adjective
+for "the quality between big and small."
+
+Media carry the bytes of the requests somehow (e.g. via TCP, wrapped in HTTP, or
+over SSH), and pass them to and from the protocol logic. See the overview in
+bzrlib/transport/smart/__init__.py.
+"""
+
+from __future__ import absolute_import
+
+import errno
+import os
+import sys
+import time
+
+import bzrlib
+from bzrlib.lazy_import import lazy_import
+lazy_import(globals(), """
+import select
+import socket
+import thread
+import weakref
+
+from bzrlib import (
+ debug,
+ errors,
+ trace,
+ transport,
+ ui,
+ urlutils,
+ )
+from bzrlib.i18n import gettext
+from bzrlib.smart import client, protocol, request, signals, vfs
+from bzrlib.transport import ssh
+""")
+from bzrlib import osutils
+
+# Throughout this module buffer size parameters are either limited to be at
+# most _MAX_READ_SIZE, or are ignored and _MAX_READ_SIZE is used instead.
+# For this module's purposes, MAX_SOCKET_CHUNK is a reasonable size for reads
+# from non-sockets as well.
+_MAX_READ_SIZE = osutils.MAX_SOCKET_CHUNK
+
+def _get_protocol_factory_for_bytes(bytes):
+ """Determine the right protocol factory for 'bytes'.
+
+ This will return an appropriate protocol factory depending on the version
+ of the protocol being used, as determined by inspecting the given bytes.
+ The bytes should have at least one newline byte (i.e. be a whole line),
+ otherwise it's possible that a request will be incorrectly identified as
+ version 1.
+
+ Typical use would be::
+
+ factory, unused_bytes = _get_protocol_factory_for_bytes(bytes)
+ server_protocol = factory(transport, write_func, root_client_path)
+ server_protocol.accept_bytes(unused_bytes)
+
+ :param bytes: a str of bytes of the start of the request.
+ :returns: 2-tuple of (protocol_factory, unused_bytes). protocol_factory is
+ a callable that takes three args: transport, write_func,
+ root_client_path. unused_bytes are any bytes that were not part of a
+ protocol version marker.
+ """
+ if bytes.startswith(protocol.MESSAGE_VERSION_THREE):
+ protocol_factory = protocol.build_server_protocol_three
+ bytes = bytes[len(protocol.MESSAGE_VERSION_THREE):]
+ elif bytes.startswith(protocol.REQUEST_VERSION_TWO):
+ protocol_factory = protocol.SmartServerRequestProtocolTwo
+ bytes = bytes[len(protocol.REQUEST_VERSION_TWO):]
+ else:
+ protocol_factory = protocol.SmartServerRequestProtocolOne
+ return protocol_factory, bytes
+
+
+def _get_line(read_bytes_func):
+ """Read bytes using read_bytes_func until a newline byte.
+
+ This isn't particularly efficient, so should only be used when the
+ expected size of the line is quite short.
+
+ :returns: a tuple of two strs: (line, excess)
+ """
+ newline_pos = -1
+ bytes = ''
+ while newline_pos == -1:
+ new_bytes = read_bytes_func(1)
+ bytes += new_bytes
+ if new_bytes == '':
+ # Ran out of bytes before receiving a complete line.
+ return bytes, ''
+ newline_pos = bytes.find('\n')
+ line = bytes[:newline_pos+1]
+ excess = bytes[newline_pos+1:]
+ return line, excess
+
+
+class SmartMedium(object):
+ """Base class for smart protocol media, both client- and server-side."""
+
+ def __init__(self):
+ self._push_back_buffer = None
+
+ def _push_back(self, bytes):
+ """Return unused bytes to the medium, because they belong to the next
+ request(s).
+
+ This sets the _push_back_buffer to the given bytes.
+ """
+ if self._push_back_buffer is not None:
+ raise AssertionError(
+ "_push_back called when self._push_back_buffer is %r"
+ % (self._push_back_buffer,))
+ if bytes == '':
+ return
+ self._push_back_buffer = bytes
+
+ def _get_push_back_buffer(self):
+ if self._push_back_buffer == '':
+ raise AssertionError(
+ '%s._push_back_buffer should never be the empty string, '
+ 'which can be confused with EOF' % (self,))
+ bytes = self._push_back_buffer
+ self._push_back_buffer = None
+ return bytes
+
+ def read_bytes(self, desired_count):
+ """Read some bytes from this medium.
+
+ :returns: some bytes, possibly more or less than the number requested
+ in 'desired_count' depending on the medium.
+ """
+ if self._push_back_buffer is not None:
+ return self._get_push_back_buffer()
+ bytes_to_read = min(desired_count, _MAX_READ_SIZE)
+ return self._read_bytes(bytes_to_read)
+
+ def _read_bytes(self, count):
+ raise NotImplementedError(self._read_bytes)
+
+ def _get_line(self):
+ """Read bytes from this request's response until a newline byte.
+
+ This isn't particularly efficient, so should only be used when the
+ expected size of the line is quite short.
+
+ :returns: a string of bytes ending in a newline (byte 0x0A).
+ """
+ line, excess = _get_line(self.read_bytes)
+ self._push_back(excess)
+ return line
+
+ def _report_activity(self, bytes, direction):
+ """Notify that this medium has activity.
+
+ Implementations should call this from all methods that actually do IO.
+ Be careful that it's not called twice, if one method is implemented on
+ top of another.
+
+ :param bytes: Number of bytes read or written.
+ :param direction: 'read' or 'write' or None.
+ """
+ ui.ui_factory.report_transport_activity(self, bytes, direction)
+
+
+_bad_file_descriptor = (errno.EBADF,)
+if sys.platform == 'win32':
+ # Given on Windows if you pass a closed socket to select.select. Probably
+ # also given if you pass a file handle to select.
+ WSAENOTSOCK = 10038
+ _bad_file_descriptor += (WSAENOTSOCK,)
+
+
+class SmartServerStreamMedium(SmartMedium):
+ """Handles smart commands coming over a stream.
+
+ The stream may be a pipe connected to sshd, or a tcp socket, or an
+ in-process fifo for testing.
+
+ One instance is created for each connected client; it can serve multiple
+ requests in the lifetime of the connection.
+
+ The server passes requests through to an underlying backing transport,
+ which will typically be a LocalTransport looking at the server's filesystem.
+
+ :ivar _push_back_buffer: a str of bytes that have been read from the stream
+ but not used yet, or None if there are no buffered bytes. Subclasses
+ should make sure to exhaust this buffer before reading more bytes from
+ the stream. See also the _push_back method.
+ """
+
+ _timer = time.time
+
+ def __init__(self, backing_transport, root_client_path='/', timeout=None):
+ """Construct new server.
+
+ :param backing_transport: Transport for the directory served.
+ """
+ # backing_transport could be passed to serve instead of __init__
+ self.backing_transport = backing_transport
+ self.root_client_path = root_client_path
+ self.finished = False
+ if timeout is None:
+ raise AssertionError('You must supply a timeout.')
+ self._client_timeout = timeout
+ self._client_poll_timeout = min(timeout / 10.0, 1.0)
+ SmartMedium.__init__(self)
+
+ def serve(self):
+ """Serve requests until the client disconnects."""
+ # Keep a reference to stderr because the sys module's globals get set to
+ # None during interpreter shutdown.
+ from sys import stderr
+ try:
+ while not self.finished:
+ server_protocol = self._build_protocol()
+ self._serve_one_request(server_protocol)
+ except errors.ConnectionTimeout, e:
+ trace.note('%s' % (e,))
+ trace.log_exception_quietly()
+ self._disconnect_client()
+ # We reported it, no reason to make a big fuss.
+ return
+ except Exception, e:
+ stderr.write("%s terminating on exception %s\n" % (self, e))
+ raise
+ self._disconnect_client()
+
+ def _stop_gracefully(self):
+ """When we finish this message, stop looking for more."""
+ trace.mutter('Stopping %s' % (self,))
+ self.finished = True
+
+ def _disconnect_client(self):
+ """Close the current connection. We stopped due to a timeout/etc."""
+ # The default implementation is a no-op, because that is all we used to
+ # do when disconnecting from a client. I suppose we never had the
+ # *server* initiate a disconnect, before
+
+ def _wait_for_bytes_with_timeout(self, timeout_seconds):
+ """Wait for more bytes to be read, but timeout if none available.
+
+ This allows us to detect idle connections, and stop trying to read from
+ them, without setting the socket itself to non-blocking. This also
+ allows us to specify when we watch for idle timeouts.
+
+ :return: Did we timeout? (True if we timed out, False if there is data
+ to be read)
+ """
+ raise NotImplementedError(self._wait_for_bytes_with_timeout)
+
+ def _build_protocol(self):
+ """Identifies the version of the incoming request, and returns an
+ a protocol object that can interpret it.
+
+ If more bytes than the version prefix of the request are read, they will
+ be fed into the protocol before it is returned.
+
+ :returns: a SmartServerRequestProtocol.
+ """
+ self._wait_for_bytes_with_timeout(self._client_timeout)
+ if self.finished:
+ # We're stopping, so don't try to do any more work
+ return None
+ bytes = self._get_line()
+ protocol_factory, unused_bytes = _get_protocol_factory_for_bytes(bytes)
+ protocol = protocol_factory(
+ self.backing_transport, self._write_out, self.root_client_path)
+ protocol.accept_bytes(unused_bytes)
+ return protocol
+
+ def _wait_on_descriptor(self, fd, timeout_seconds):
+ """select() on a file descriptor, waiting for nonblocking read()
+
+ This will raise a ConnectionTimeout exception if we do not get a
+ readable handle before timeout_seconds.
+ :return: None
+ """
+ t_end = self._timer() + timeout_seconds
+ poll_timeout = min(timeout_seconds, self._client_poll_timeout)
+ rs = xs = None
+ while not rs and not xs and self._timer() < t_end:
+ if self.finished:
+ return
+ try:
+ rs, _, xs = select.select([fd], [], [fd], poll_timeout)
+ except (select.error, socket.error) as e:
+ err = getattr(e, 'errno', None)
+ if err is None and getattr(e, 'args', None) is not None:
+ # select.error doesn't have 'errno', it just has args[0]
+ err = e.args[0]
+ if err in _bad_file_descriptor:
+ return # Not a socket indicates read() will fail
+ elif err == errno.EINTR:
+ # Interrupted, keep looping.
+ continue
+ raise
+ if rs or xs:
+ return
+ raise errors.ConnectionTimeout('disconnecting client after %.1f seconds'
+ % (timeout_seconds,))
+
+ def _serve_one_request(self, protocol):
+ """Read one request from input, process, send back a response.
+
+ :param protocol: a SmartServerRequestProtocol.
+ """
+ if protocol is None:
+ return
+ try:
+ self._serve_one_request_unguarded(protocol)
+ except KeyboardInterrupt:
+ raise
+ except Exception, e:
+ self.terminate_due_to_error()
+
+ def terminate_due_to_error(self):
+ """Called when an unhandled exception from the protocol occurs."""
+ raise NotImplementedError(self.terminate_due_to_error)
+
+ def _read_bytes(self, desired_count):
+ """Get some bytes from the medium.
+
+ :param desired_count: number of bytes we want to read.
+ """
+ raise NotImplementedError(self._read_bytes)
+
+
+class SmartServerSocketStreamMedium(SmartServerStreamMedium):
+
+ def __init__(self, sock, backing_transport, root_client_path='/',
+ timeout=None):
+ """Constructor.
+
+ :param sock: the socket the server will read from. It will be put
+ into blocking mode.
+ """
+ SmartServerStreamMedium.__init__(
+ self, backing_transport, root_client_path=root_client_path,
+ timeout=timeout)
+ sock.setblocking(True)
+ self.socket = sock
+ # Get the getpeername now, as we might be closed later when we care.
+ try:
+ self._client_info = sock.getpeername()
+ except socket.error:
+ self._client_info = '<unknown>'
+
+ def __str__(self):
+ return '%s(client=%s)' % (self.__class__.__name__, self._client_info)
+
+ def __repr__(self):
+ return '%s.%s(client=%s)' % (self.__module__, self.__class__.__name__,
+ self._client_info)
+
+ def _serve_one_request_unguarded(self, protocol):
+ while protocol.next_read_size():
+ # We can safely try to read large chunks. If there is less data
+ # than MAX_SOCKET_CHUNK ready, the socket will just return a
+ # short read immediately rather than block.
+ bytes = self.read_bytes(osutils.MAX_SOCKET_CHUNK)
+ if bytes == '':
+ self.finished = True
+ return
+ protocol.accept_bytes(bytes)
+
+ self._push_back(protocol.unused_data)
+
+ def _disconnect_client(self):
+ """Close the current connection. We stopped due to a timeout/etc."""
+ self.socket.close()
+
+ def _wait_for_bytes_with_timeout(self, timeout_seconds):
+ """Wait for more bytes to be read, but timeout if none available.
+
+ This allows us to detect idle connections, and stop trying to read from
+ them, without setting the socket itself to non-blocking. This also
+ allows us to specify when we watch for idle timeouts.
+
+ :return: None, this will raise ConnectionTimeout if we time out before
+ data is available.
+ """
+ return self._wait_on_descriptor(self.socket, timeout_seconds)
+
+ def _read_bytes(self, desired_count):
+ return osutils.read_bytes_from_socket(
+ self.socket, self._report_activity)
+
+ def terminate_due_to_error(self):
+ # TODO: This should log to a server log file, but no such thing
+ # exists yet. Andrew Bennetts 2006-09-29.
+ self.socket.close()
+ self.finished = True
+
+ def _write_out(self, bytes):
+ tstart = osutils.timer_func()
+ osutils.send_all(self.socket, bytes, self._report_activity)
+ if 'hpss' in debug.debug_flags:
+ thread_id = thread.get_ident()
+ trace.mutter('%12s: [%s] %d bytes to the socket in %.3fs'
+ % ('wrote', thread_id, len(bytes),
+ osutils.timer_func() - tstart))
+
+
+class SmartServerPipeStreamMedium(SmartServerStreamMedium):
+
+ def __init__(self, in_file, out_file, backing_transport, timeout=None):
+ """Construct new server.
+
+ :param in_file: Python file from which requests can be read.
+ :param out_file: Python file to write responses.
+ :param backing_transport: Transport for the directory served.
+ """
+ SmartServerStreamMedium.__init__(self, backing_transport,
+ timeout=timeout)
+ if sys.platform == 'win32':
+ # force binary mode for files
+ import msvcrt
+ for f in (in_file, out_file):
+ fileno = getattr(f, 'fileno', None)
+ if fileno:
+ msvcrt.setmode(fileno(), os.O_BINARY)
+ self._in = in_file
+ self._out = out_file
+
+ def serve(self):
+ """See SmartServerStreamMedium.serve"""
+ # This is the regular serve, except it adds signal trapping for soft
+ # shutdown.
+ stop_gracefully = self._stop_gracefully
+ signals.register_on_hangup(id(self), stop_gracefully)
+ try:
+ return super(SmartServerPipeStreamMedium, self).serve()
+ finally:
+ signals.unregister_on_hangup(id(self))
+
+ def _serve_one_request_unguarded(self, protocol):
+ while True:
+ # We need to be careful not to read past the end of the current
+ # request, or else the read from the pipe will block, so we use
+ # protocol.next_read_size().
+ bytes_to_read = protocol.next_read_size()
+ if bytes_to_read == 0:
+ # Finished serving this request.
+ self._out.flush()
+ return
+ bytes = self.read_bytes(bytes_to_read)
+ if bytes == '':
+ # Connection has been closed.
+ self.finished = True
+ self._out.flush()
+ return
+ protocol.accept_bytes(bytes)
+
+ def _disconnect_client(self):
+ self._in.close()
+ self._out.flush()
+ self._out.close()
+
+ def _wait_for_bytes_with_timeout(self, timeout_seconds):
+ """Wait for more bytes to be read, but timeout if none available.
+
+ This allows us to detect idle connections, and stop trying to read from
+ them, without setting the socket itself to non-blocking. This also
+ allows us to specify when we watch for idle timeouts.
+
+ :return: None, this will raise ConnectionTimeout if we time out before
+ data is available.
+ """
+ if (getattr(self._in, 'fileno', None) is None
+ or sys.platform == 'win32'):
+ # You can't select() file descriptors on Windows.
+ return
+ return self._wait_on_descriptor(self._in, timeout_seconds)
+
+ def _read_bytes(self, desired_count):
+ return self._in.read(desired_count)
+
+ def terminate_due_to_error(self):
+ # TODO: This should log to a server log file, but no such thing
+ # exists yet. Andrew Bennetts 2006-09-29.
+ self._out.close()
+ self.finished = True
+
+ def _write_out(self, bytes):
+ self._out.write(bytes)
+
+
+class SmartClientMediumRequest(object):
+ """A request on a SmartClientMedium.
+
+ Each request allows bytes to be provided to it via accept_bytes, and then
+ the response bytes to be read via read_bytes.
+
+ For instance:
+ request.accept_bytes('123')
+ request.finished_writing()
+ result = request.read_bytes(3)
+ request.finished_reading()
+
+ It is up to the individual SmartClientMedium whether multiple concurrent
+ requests can exist. See SmartClientMedium.get_request to obtain instances
+ of SmartClientMediumRequest, and the concrete Medium you are using for
+ details on concurrency and pipelining.
+ """
+
+ def __init__(self, medium):
+ """Construct a SmartClientMediumRequest for the medium medium."""
+ self._medium = medium
+ # we track state by constants - we may want to use the same
+ # pattern as BodyReader if it gets more complex.
+ # valid states are: "writing", "reading", "done"
+ self._state = "writing"
+
+ def accept_bytes(self, bytes):
+ """Accept bytes for inclusion in this request.
+
+ This method may not be called after finished_writing() has been
+ called. It depends upon the Medium whether or not the bytes will be
+ immediately transmitted. Message based Mediums will tend to buffer the
+ bytes until finished_writing() is called.
+
+ :param bytes: A bytestring.
+ """
+ if self._state != "writing":
+ raise errors.WritingCompleted(self)
+ self._accept_bytes(bytes)
+
+ def _accept_bytes(self, bytes):
+ """Helper for accept_bytes.
+
+ Accept_bytes checks the state of the request to determing if bytes
+ should be accepted. After that it hands off to _accept_bytes to do the
+ actual acceptance.
+ """
+ raise NotImplementedError(self._accept_bytes)
+
+ def finished_reading(self):
+ """Inform the request that all desired data has been read.
+
+ This will remove the request from the pipeline for its medium (if the
+ medium supports pipelining) and any further calls to methods on the
+ request will raise ReadingCompleted.
+ """
+ if self._state == "writing":
+ raise errors.WritingNotComplete(self)
+ if self._state != "reading":
+ raise errors.ReadingCompleted(self)
+ self._state = "done"
+ self._finished_reading()
+
+ def _finished_reading(self):
+ """Helper for finished_reading.
+
+ finished_reading checks the state of the request to determine if
+ finished_reading is allowed, and if it is hands off to _finished_reading
+ to perform the action.
+ """
+ raise NotImplementedError(self._finished_reading)
+
+ def finished_writing(self):
+ """Finish the writing phase of this request.
+
+ This will flush all pending data for this request along the medium.
+ After calling finished_writing, you may not call accept_bytes anymore.
+ """
+ if self._state != "writing":
+ raise errors.WritingCompleted(self)
+ self._state = "reading"
+ self._finished_writing()
+
+ def _finished_writing(self):
+ """Helper for finished_writing.
+
+ finished_writing checks the state of the request to determine if
+ finished_writing is allowed, and if it is hands off to _finished_writing
+ to perform the action.
+ """
+ raise NotImplementedError(self._finished_writing)
+
+ def read_bytes(self, count):
+ """Read bytes from this requests response.
+
+ This method will block and wait for count bytes to be read. It may not
+ be invoked until finished_writing() has been called - this is to ensure
+ a message-based approach to requests, for compatibility with message
+ based mediums like HTTP.
+ """
+ if self._state == "writing":
+ raise errors.WritingNotComplete(self)
+ if self._state != "reading":
+ raise errors.ReadingCompleted(self)
+ return self._read_bytes(count)
+
+ def _read_bytes(self, count):
+ """Helper for SmartClientMediumRequest.read_bytes.
+
+ read_bytes checks the state of the request to determing if bytes
+ should be read. After that it hands off to _read_bytes to do the
+ actual read.
+
+ By default this forwards to self._medium.read_bytes because we are
+ operating on the medium's stream.
+ """
+ return self._medium.read_bytes(count)
+
+ def read_line(self):
+ line = self._read_line()
+ if not line.endswith('\n'):
+ # end of file encountered reading from server
+ raise errors.ConnectionReset(
+ "Unexpected end of message. Please check connectivity "
+ "and permissions, and report a bug if problems persist.")
+ return line
+
+ def _read_line(self):
+ """Helper for SmartClientMediumRequest.read_line.
+
+ By default this forwards to self._medium._get_line because we are
+ operating on the medium's stream.
+ """
+ return self._medium._get_line()
+
+
+class _VfsRefuser(object):
+ """An object that refuses all VFS requests.
+
+ """
+
+ def __init__(self):
+ client._SmartClient.hooks.install_named_hook(
+ 'call', self.check_vfs, 'vfs refuser')
+
+ def check_vfs(self, params):
+ try:
+ request_method = request.request_handlers.get(params.method)
+ except KeyError:
+ # A method we don't know about doesn't count as a VFS method.
+ return
+ if issubclass(request_method, vfs.VfsRequest):
+ raise errors.HpssVfsRequestNotAllowed(params.method, params.args)
+
+
+class _DebugCounter(object):
+ """An object that counts the HPSS calls made to each client medium.
+
+ When a medium is garbage-collected, or failing that when
+ bzrlib.global_state exits, the total number of calls made on that medium
+ are reported via trace.note.
+ """
+
+ def __init__(self):
+ self.counts = weakref.WeakKeyDictionary()
+ client._SmartClient.hooks.install_named_hook(
+ 'call', self.increment_call_count, 'hpss call counter')
+ bzrlib.global_state.cleanups.add_cleanup(self.flush_all)
+
+ def track(self, medium):
+ """Start tracking calls made to a medium.
+
+ This only keeps a weakref to the medium, so shouldn't affect the
+ medium's lifetime.
+ """
+ medium_repr = repr(medium)
+ # Add this medium to the WeakKeyDictionary
+ self.counts[medium] = dict(count=0, vfs_count=0,
+ medium_repr=medium_repr)
+ # Weakref callbacks are fired in reverse order of their association
+ # with the referenced object. So we add a weakref *after* adding to
+ # the WeakKeyDict so that we can report the value from it before the
+ # entry is removed by the WeakKeyDict's own callback.
+ ref = weakref.ref(medium, self.done)
+
+ def increment_call_count(self, params):
+ # Increment the count in the WeakKeyDictionary
+ value = self.counts[params.medium]
+ value['count'] += 1
+ try:
+ request_method = request.request_handlers.get(params.method)
+ except KeyError:
+ # A method we don't know about doesn't count as a VFS method.
+ return
+ if issubclass(request_method, vfs.VfsRequest):
+ value['vfs_count'] += 1
+
+ def done(self, ref):
+ value = self.counts[ref]
+ count, vfs_count, medium_repr = (
+ value['count'], value['vfs_count'], value['medium_repr'])
+ # In case this callback is invoked for the same ref twice (by the
+ # weakref callback and by the atexit function), set the call count back
+ # to 0 so this item won't be reported twice.
+ value['count'] = 0
+ value['vfs_count'] = 0
+ if count != 0:
+ trace.note(gettext('HPSS calls: {0} ({1} vfs) {2}').format(
+ count, vfs_count, medium_repr))
+
+ def flush_all(self):
+ for ref in list(self.counts.keys()):
+ self.done(ref)
+
+_debug_counter = None
+_vfs_refuser = None
+
+
+class SmartClientMedium(SmartMedium):
+ """Smart client is a medium for sending smart protocol requests over."""
+
+ def __init__(self, base):
+ super(SmartClientMedium, self).__init__()
+ self.base = base
+ self._protocol_version_error = None
+ self._protocol_version = None
+ self._done_hello = False
+ # Be optimistic: we assume the remote end can accept new remote
+ # requests until we get an error saying otherwise.
+ # _remote_version_is_before tracks the bzr version the remote side
+ # can be based on what we've seen so far.
+ self._remote_version_is_before = None
+ # Install debug hook function if debug flag is set.
+ if 'hpss' in debug.debug_flags:
+ global _debug_counter
+ if _debug_counter is None:
+ _debug_counter = _DebugCounter()
+ _debug_counter.track(self)
+ if 'hpss_client_no_vfs' in debug.debug_flags:
+ global _vfs_refuser
+ if _vfs_refuser is None:
+ _vfs_refuser = _VfsRefuser()
+
+ def _is_remote_before(self, version_tuple):
+ """Is it possible the remote side supports RPCs for a given version?
+
+ Typical use::
+
+ needed_version = (1, 2)
+ if medium._is_remote_before(needed_version):
+ fallback_to_pre_1_2_rpc()
+ else:
+ try:
+ do_1_2_rpc()
+ except UnknownSmartMethod:
+ medium._remember_remote_is_before(needed_version)
+ fallback_to_pre_1_2_rpc()
+
+ :seealso: _remember_remote_is_before
+ """
+ if self._remote_version_is_before is None:
+ # So far, the remote side seems to support everything
+ return False
+ return version_tuple >= self._remote_version_is_before
+
+ def _remember_remote_is_before(self, version_tuple):
+ """Tell this medium that the remote side is older the given version.
+
+ :seealso: _is_remote_before
+ """
+ if (self._remote_version_is_before is not None and
+ version_tuple > self._remote_version_is_before):
+ # We have been told that the remote side is older than some version
+ # which is newer than a previously supplied older-than version.
+ # This indicates that some smart verb call is not guarded
+ # appropriately (it should simply not have been tried).
+ trace.mutter(
+ "_remember_remote_is_before(%r) called, but "
+ "_remember_remote_is_before(%r) was called previously."
+ , version_tuple, self._remote_version_is_before)
+ if 'hpss' in debug.debug_flags:
+ ui.ui_factory.show_warning(
+ "_remember_remote_is_before(%r) called, but "
+ "_remember_remote_is_before(%r) was called previously."
+ % (version_tuple, self._remote_version_is_before))
+ return
+ self._remote_version_is_before = version_tuple
+
+ def protocol_version(self):
+ """Find out if 'hello' smart request works."""
+ if self._protocol_version_error is not None:
+ raise self._protocol_version_error
+ if not self._done_hello:
+ try:
+ medium_request = self.get_request()
+ # Send a 'hello' request in protocol version one, for maximum
+ # backwards compatibility.
+ client_protocol = protocol.SmartClientRequestProtocolOne(medium_request)
+ client_protocol.query_version()
+ self._done_hello = True
+ except errors.SmartProtocolError, e:
+ # Cache the error, just like we would cache a successful
+ # result.
+ self._protocol_version_error = e
+ raise
+ return '2'
+
+ def should_probe(self):
+ """Should RemoteBzrDirFormat.probe_transport send a smart request on
+ this medium?
+
+ Some transports are unambiguously smart-only; there's no need to check
+ if the transport is able to carry smart requests, because that's all
+ it is for. In those cases, this method should return False.
+
+ But some HTTP transports can sometimes fail to carry smart requests,
+ but still be usuable for accessing remote bzrdirs via plain file
+ accesses. So for those transports, their media should return True here
+ so that RemoteBzrDirFormat can determine if it is appropriate for that
+ transport.
+ """
+ return False
+
+ def disconnect(self):
+ """If this medium maintains a persistent connection, close it.
+
+ The default implementation does nothing.
+ """
+
+ def remote_path_from_transport(self, transport):
+ """Convert transport into a path suitable for using in a request.
+
+ Note that the resulting remote path doesn't encode the host name or
+ anything but path, so it is only safe to use it in requests sent over
+ the medium from the matching transport.
+ """
+ medium_base = urlutils.join(self.base, '/')
+ rel_url = urlutils.relative_url(medium_base, transport.base)
+ return urlutils.unquote(rel_url)
+
+
+class SmartClientStreamMedium(SmartClientMedium):
+ """Stream based medium common class.
+
+ SmartClientStreamMediums operate on a stream. All subclasses use a common
+ SmartClientStreamMediumRequest for their requests, and should implement
+ _accept_bytes and _read_bytes to allow the request objects to send and
+ receive bytes.
+ """
+
+ def __init__(self, base):
+ SmartClientMedium.__init__(self, base)
+ self._current_request = None
+
+ def accept_bytes(self, bytes):
+ self._accept_bytes(bytes)
+
+ def __del__(self):
+ """The SmartClientStreamMedium knows how to close the stream when it is
+ finished with it.
+ """
+ self.disconnect()
+
+ def _flush(self):
+ """Flush the output stream.
+
+ This method is used by the SmartClientStreamMediumRequest to ensure that
+ all data for a request is sent, to avoid long timeouts or deadlocks.
+ """
+ raise NotImplementedError(self._flush)
+
+ def get_request(self):
+ """See SmartClientMedium.get_request().
+
+ SmartClientStreamMedium always returns a SmartClientStreamMediumRequest
+ for get_request.
+ """
+ return SmartClientStreamMediumRequest(self)
+
+ def reset(self):
+ """We have been disconnected, reset current state.
+
+ This resets things like _current_request and connected state.
+ """
+ self.disconnect()
+ self._current_request = None
+
+
+class SmartSimplePipesClientMedium(SmartClientStreamMedium):
+ """A client medium using simple pipes.
+
+ This client does not manage the pipes: it assumes they will always be open.
+ """
+
+ def __init__(self, readable_pipe, writeable_pipe, base):
+ SmartClientStreamMedium.__init__(self, base)
+ self._readable_pipe = readable_pipe
+ self._writeable_pipe = writeable_pipe
+
+ def _accept_bytes(self, bytes):
+ """See SmartClientStreamMedium.accept_bytes."""
+ try:
+ self._writeable_pipe.write(bytes)
+ except IOError, e:
+ if e.errno in (errno.EINVAL, errno.EPIPE):
+ raise errors.ConnectionReset(
+ "Error trying to write to subprocess:\n%s" % (e,))
+ raise
+ self._report_activity(len(bytes), 'write')
+
+ def _flush(self):
+ """See SmartClientStreamMedium._flush()."""
+ # Note: If flush were to fail, we'd like to raise ConnectionReset, etc.
+ # However, testing shows that even when the child process is
+ # gone, this doesn't error.
+ self._writeable_pipe.flush()
+
+ def _read_bytes(self, count):
+ """See SmartClientStreamMedium._read_bytes."""
+ bytes_to_read = min(count, _MAX_READ_SIZE)
+ bytes = self._readable_pipe.read(bytes_to_read)
+ self._report_activity(len(bytes), 'read')
+ return bytes
+
+
+class SSHParams(object):
+ """A set of parameters for starting a remote bzr via SSH."""
+
+ def __init__(self, host, port=None, username=None, password=None,
+ bzr_remote_path='bzr'):
+ self.host = host
+ self.port = port
+ self.username = username
+ self.password = password
+ self.bzr_remote_path = bzr_remote_path
+
+
+class SmartSSHClientMedium(SmartClientStreamMedium):
+ """A client medium using SSH.
+
+ It delegates IO to a SmartSimplePipesClientMedium or
+ SmartClientAlreadyConnectedSocketMedium (depending on platform).
+ """
+
+ def __init__(self, base, ssh_params, vendor=None):
+ """Creates a client that will connect on the first use.
+
+ :param ssh_params: A SSHParams instance.
+ :param vendor: An optional override for the ssh vendor to use. See
+ bzrlib.transport.ssh for details on ssh vendors.
+ """
+ self._real_medium = None
+ self._ssh_params = ssh_params
+ # for the benefit of progress making a short description of this
+ # transport
+ self._scheme = 'bzr+ssh'
+ # SmartClientStreamMedium stores the repr of this object in its
+ # _DebugCounter so we have to store all the values used in our repr
+ # method before calling the super init.
+ SmartClientStreamMedium.__init__(self, base)
+ self._vendor = vendor
+ self._ssh_connection = None
+
+ def __repr__(self):
+ if self._ssh_params.port is None:
+ maybe_port = ''
+ else:
+ maybe_port = ':%s' % self._ssh_params.port
+ if self._ssh_params.username is None:
+ maybe_user = ''
+ else:
+ maybe_user = '%s@' % self._ssh_params.username
+ return "%s(%s://%s%s%s/)" % (
+ self.__class__.__name__,
+ self._scheme,
+ maybe_user,
+ self._ssh_params.host,
+ maybe_port)
+
+ def _accept_bytes(self, bytes):
+ """See SmartClientStreamMedium.accept_bytes."""
+ self._ensure_connection()
+ self._real_medium.accept_bytes(bytes)
+
+ def disconnect(self):
+ """See SmartClientMedium.disconnect()."""
+ if self._real_medium is not None:
+ self._real_medium.disconnect()
+ self._real_medium = None
+ if self._ssh_connection is not None:
+ self._ssh_connection.close()
+ self._ssh_connection = None
+
+ def _ensure_connection(self):
+ """Connect this medium if not already connected."""
+ if self._real_medium is not None:
+ return
+ if self._vendor is None:
+ vendor = ssh._get_ssh_vendor()
+ else:
+ vendor = self._vendor
+ self._ssh_connection = vendor.connect_ssh(self._ssh_params.username,
+ self._ssh_params.password, self._ssh_params.host,
+ self._ssh_params.port,
+ command=[self._ssh_params.bzr_remote_path, 'serve', '--inet',
+ '--directory=/', '--allow-writes'])
+ io_kind, io_object = self._ssh_connection.get_sock_or_pipes()
+ if io_kind == 'socket':
+ self._real_medium = SmartClientAlreadyConnectedSocketMedium(
+ self.base, io_object)
+ elif io_kind == 'pipes':
+ read_from, write_to = io_object
+ self._real_medium = SmartSimplePipesClientMedium(
+ read_from, write_to, self.base)
+ else:
+ raise AssertionError(
+ "Unexpected io_kind %r from %r"
+ % (io_kind, self._ssh_connection))
+ for hook in transport.Transport.hooks["post_connect"]:
+ hook(self)
+
+ def _flush(self):
+ """See SmartClientStreamMedium._flush()."""
+ self._real_medium._flush()
+
+ def _read_bytes(self, count):
+ """See SmartClientStreamMedium.read_bytes."""
+ if self._real_medium is None:
+ raise errors.MediumNotConnected(self)
+ return self._real_medium.read_bytes(count)
+
+
+# Port 4155 is the default port for bzr://, registered with IANA.
+BZR_DEFAULT_INTERFACE = None
+BZR_DEFAULT_PORT = 4155
+
+
+class SmartClientSocketMedium(SmartClientStreamMedium):
+ """A client medium using a socket.
+
+ This class isn't usable directly. Use one of its subclasses instead.
+ """
+
+ def __init__(self, base):
+ SmartClientStreamMedium.__init__(self, base)
+ self._socket = None
+ self._connected = False
+
+ def _accept_bytes(self, bytes):
+ """See SmartClientMedium.accept_bytes."""
+ self._ensure_connection()
+ osutils.send_all(self._socket, bytes, self._report_activity)
+
+ def _ensure_connection(self):
+ """Connect this medium if not already connected."""
+ raise NotImplementedError(self._ensure_connection)
+
+ def _flush(self):
+ """See SmartClientStreamMedium._flush().
+
+ For sockets we do no flushing. For TCP sockets we may want to turn off
+ TCP_NODELAY and add a means to do a flush, but that can be done in the
+ future.
+ """
+
+ def _read_bytes(self, count):
+ """See SmartClientMedium.read_bytes."""
+ if not self._connected:
+ raise errors.MediumNotConnected(self)
+ return osutils.read_bytes_from_socket(
+ self._socket, self._report_activity)
+
+ def disconnect(self):
+ """See SmartClientMedium.disconnect()."""
+ if not self._connected:
+ return
+ self._socket.close()
+ self._socket = None
+ self._connected = False
+
+
+class SmartTCPClientMedium(SmartClientSocketMedium):
+ """A client medium that creates a TCP connection."""
+
+ def __init__(self, host, port, base):
+ """Creates a client that will connect on the first use."""
+ SmartClientSocketMedium.__init__(self, base)
+ self._host = host
+ self._port = port
+
+ def _ensure_connection(self):
+ """Connect this medium if not already connected."""
+ if self._connected:
+ return
+ if self._port is None:
+ port = BZR_DEFAULT_PORT
+ else:
+ port = int(self._port)
+ try:
+ sockaddrs = socket.getaddrinfo(self._host, port, socket.AF_UNSPEC,
+ socket.SOCK_STREAM, 0, 0)
+ except socket.gaierror, (err_num, err_msg):
+ raise errors.ConnectionError("failed to lookup %s:%d: %s" %
+ (self._host, port, err_msg))
+ # Initialize err in case there are no addresses returned:
+ err = socket.error("no address found for %s" % self._host)
+ for (family, socktype, proto, canonname, sockaddr) in sockaddrs:
+ try:
+ self._socket = socket.socket(family, socktype, proto)
+ self._socket.setsockopt(socket.IPPROTO_TCP,
+ socket.TCP_NODELAY, 1)
+ self._socket.connect(sockaddr)
+ except socket.error, err:
+ if self._socket is not None:
+ self._socket.close()
+ self._socket = None
+ continue
+ break
+ if self._socket is None:
+ # socket errors either have a (string) or (errno, string) as their
+ # args.
+ if type(err.args) is str:
+ err_msg = err.args
+ else:
+ err_msg = err.args[1]
+ raise errors.ConnectionError("failed to connect to %s:%d: %s" %
+ (self._host, port, err_msg))
+ self._connected = True
+ for hook in transport.Transport.hooks["post_connect"]:
+ hook(self)
+
+
+class SmartClientAlreadyConnectedSocketMedium(SmartClientSocketMedium):
+ """A client medium for an already connected socket.
+
+ Note that this class will assume it "owns" the socket, so it will close it
+ when its disconnect method is called.
+ """
+
+ def __init__(self, base, sock):
+ SmartClientSocketMedium.__init__(self, base)
+ self._socket = sock
+ self._connected = True
+
+ def _ensure_connection(self):
+ # Already connected, by definition! So nothing to do.
+ pass
+
+
+class SmartClientStreamMediumRequest(SmartClientMediumRequest):
+ """A SmartClientMediumRequest that works with an SmartClientStreamMedium."""
+
+ def __init__(self, medium):
+ SmartClientMediumRequest.__init__(self, medium)
+ # check that we are safe concurrency wise. If some streams start
+ # allowing concurrent requests - i.e. via multiplexing - then this
+ # assert should be moved to SmartClientStreamMedium.get_request,
+ # and the setting/unsetting of _current_request likewise moved into
+ # that class : but its unneeded overhead for now. RBC 20060922
+ if self._medium._current_request is not None:
+ raise errors.TooManyConcurrentRequests(self._medium)
+ self._medium._current_request = self
+
+ def _accept_bytes(self, bytes):
+ """See SmartClientMediumRequest._accept_bytes.
+
+ This forwards to self._medium._accept_bytes because we are operating
+ on the mediums stream.
+ """
+ self._medium._accept_bytes(bytes)
+
+ def _finished_reading(self):
+ """See SmartClientMediumRequest._finished_reading.
+
+ This clears the _current_request on self._medium to allow a new
+ request to be created.
+ """
+ if self._medium._current_request is not self:
+ raise AssertionError()
+ self._medium._current_request = None
+
+ def _finished_writing(self):
+ """See SmartClientMediumRequest._finished_writing.
+
+ This invokes self._medium._flush to ensure all bytes are transmitted.
+ """
+ self._medium._flush()
diff --git a/bzrlib/smart/message.py b/bzrlib/smart/message.py
new file mode 100644
index 0000000..2322297
--- /dev/null
+++ b/bzrlib/smart/message.py
@@ -0,0 +1,353 @@
+# Copyright (C) 2008 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+from __future__ import absolute_import
+
+import collections
+from cStringIO import StringIO
+
+from bzrlib import (
+ debug,
+ errors,
+ )
+from bzrlib.trace import mutter
+
+
+class MessageHandler(object):
+ """Base class for handling messages received via the smart protocol.
+
+ As parts of a message are received, the corresponding PART_received method
+ will be called.
+ """
+
+ def __init__(self):
+ self.headers = None
+
+ def headers_received(self, headers):
+ """Called when message headers are received.
+
+ This default implementation just stores them in self.headers.
+ """
+ self.headers = headers
+
+ def byte_part_received(self, byte):
+ """Called when a 'byte' part is received.
+
+ Note that a 'byte' part is a message part consisting of exactly one
+ byte.
+ """
+ raise NotImplementedError(self.byte_received)
+
+ def bytes_part_received(self, bytes):
+ """Called when a 'bytes' part is received.
+
+ A 'bytes' message part can contain any number of bytes. It should not
+ be confused with a 'byte' part, which is always a single byte.
+ """
+ raise NotImplementedError(self.bytes_received)
+
+ def structure_part_received(self, structure):
+ """Called when a 'structure' part is received.
+
+ :param structure: some structured data, which will be some combination
+ of list, dict, int, and str objects.
+ """
+ raise NotImplementedError(self.bytes_received)
+
+ def protocol_error(self, exception):
+ """Called when there is a protocol decoding error.
+
+ The default implementation just re-raises the exception.
+ """
+ raise
+
+ def end_received(self):
+ """Called when the end of the message is received."""
+ # No-op by default.
+ pass
+
+
+class ConventionalRequestHandler(MessageHandler):
+ """A message handler for "conventional" requests.
+
+ "Conventional" is used in the sense described in
+ doc/developers/network-protocol.txt: a simple message with arguments and an
+ optional body.
+
+ Possible states:
+ * args: expecting args
+ * body: expecting body (terminated by receiving a post-body status)
+ * error: expecting post-body error
+ * end: expecting end of message
+ * nothing: finished
+ """
+
+ def __init__(self, request_handler, responder):
+ MessageHandler.__init__(self)
+ self.request_handler = request_handler
+ self.responder = responder
+ self.expecting = 'args'
+ self._should_finish_body = False
+ self._response_sent = False
+
+ def protocol_error(self, exception):
+ if self.responder.response_sent:
+ # We can only send one response to a request, no matter how many
+ # errors happen while processing it.
+ return
+ self.responder.send_error(exception)
+
+ def byte_part_received(self, byte):
+ if self.expecting == 'body':
+ if byte == 'S':
+ # Success. Nothing more to come except the end of message.
+ self.expecting = 'end'
+ elif byte == 'E':
+ # Error. Expect an error structure.
+ self.expecting = 'error'
+ else:
+ raise errors.SmartProtocolError(
+ 'Non-success status byte in request body: %r' % (byte,))
+ else:
+ raise errors.SmartProtocolError(
+ 'Unexpected message part: byte(%r)' % (byte,))
+
+ def structure_part_received(self, structure):
+ if self.expecting == 'args':
+ self._args_received(structure)
+ elif self.expecting == 'error':
+ self._error_received(structure)
+ else:
+ raise errors.SmartProtocolError(
+ 'Unexpected message part: structure(%r)' % (structure,))
+
+ def _args_received(self, args):
+ self.expecting = 'body'
+ self.request_handler.args_received(args)
+ if self.request_handler.finished_reading:
+ self._response_sent = True
+ self.responder.send_response(self.request_handler.response)
+ self.expecting = 'end'
+
+ def _error_received(self, error_args):
+ self.expecting = 'end'
+ self.request_handler.post_body_error_received(error_args)
+
+ def bytes_part_received(self, bytes):
+ if self.expecting == 'body':
+ self._should_finish_body = True
+ self.request_handler.accept_body(bytes)
+ else:
+ raise errors.SmartProtocolError(
+ 'Unexpected message part: bytes(%r)' % (bytes,))
+
+ def end_received(self):
+ if self.expecting not in ['body', 'end']:
+ raise errors.SmartProtocolError(
+ 'End of message received prematurely (while expecting %s)'
+ % (self.expecting,))
+ self.expecting = 'nothing'
+ self.request_handler.end_received()
+ if not self.request_handler.finished_reading:
+ raise errors.SmartProtocolError(
+ "Complete conventional request was received, but request "
+ "handler has not finished reading.")
+ if not self._response_sent:
+ self.responder.send_response(self.request_handler.response)
+
+
+class ResponseHandler(object):
+ """Abstract base class for an object that handles a smart response."""
+
+ def read_response_tuple(self, expect_body=False):
+ """Reads and returns the response tuple for the current request.
+
+ :keyword expect_body: a boolean indicating if a body is expected in the
+ response. Some protocol versions needs this information to know
+ when a response is finished. If False, read_body_bytes should
+ *not* be called afterwards. Defaults to False.
+ :returns: tuple of response arguments.
+ """
+ raise NotImplementedError(self.read_response_tuple)
+
+ def read_body_bytes(self, count=-1):
+ """Read and return some bytes from the body.
+
+ :param count: if specified, read up to this many bytes. By default,
+ reads the entire body.
+ :returns: str of bytes from the response body.
+ """
+ raise NotImplementedError(self.read_body_bytes)
+
+ def read_streamed_body(self):
+ """Returns an iterable that reads and returns a series of body chunks.
+ """
+ raise NotImplementedError(self.read_streamed_body)
+
+ def cancel_read_body(self):
+ """Stop expecting a body for this response.
+
+ If expect_body was passed to read_response_tuple, this cancels that
+ expectation (and thus finishes reading the response, allowing a new
+ request to be issued). This is useful if a response turns out to be an
+ error rather than a normal result with a body.
+ """
+ raise NotImplementedError(self.cancel_read_body)
+
+
+class ConventionalResponseHandler(MessageHandler, ResponseHandler):
+
+ def __init__(self):
+ MessageHandler.__init__(self)
+ self.status = None
+ self.args = None
+ self._bytes_parts = collections.deque()
+ self._body_started = False
+ self._body_stream_status = None
+ self._body = None
+ self._body_error_args = None
+ self.finished_reading = False
+
+ def setProtoAndMediumRequest(self, protocol_decoder, medium_request):
+ self._protocol_decoder = protocol_decoder
+ self._medium_request = medium_request
+
+ def byte_part_received(self, byte):
+ if byte not in ['E', 'S']:
+ raise errors.SmartProtocolError(
+ 'Unknown response status: %r' % (byte,))
+ if self._body_started:
+ if self._body_stream_status is not None:
+ raise errors.SmartProtocolError(
+ 'Unexpected byte part received: %r' % (byte,))
+ self._body_stream_status = byte
+ else:
+ if self.status is not None:
+ raise errors.SmartProtocolError(
+ 'Unexpected byte part received: %r' % (byte,))
+ self.status = byte
+
+ def bytes_part_received(self, bytes):
+ self._body_started = True
+ self._bytes_parts.append(bytes)
+
+ def structure_part_received(self, structure):
+ if type(structure) is not tuple:
+ raise errors.SmartProtocolError(
+ 'Args structure is not a sequence: %r' % (structure,))
+ if not self._body_started:
+ if self.args is not None:
+ raise errors.SmartProtocolError(
+ 'Unexpected structure received: %r (already got %r)'
+ % (structure, self.args))
+ self.args = structure
+ else:
+ if self._body_stream_status != 'E':
+ raise errors.SmartProtocolError(
+ 'Unexpected structure received after body: %r'
+ % (structure,))
+ self._body_error_args = structure
+
+ def _wait_for_response_args(self):
+ while self.args is None and not self.finished_reading:
+ self._read_more()
+
+ def _wait_for_response_end(self):
+ while not self.finished_reading:
+ self._read_more()
+
+ def _read_more(self):
+ next_read_size = self._protocol_decoder.next_read_size()
+ if next_read_size == 0:
+ # a complete request has been read.
+ self.finished_reading = True
+ self._medium_request.finished_reading()
+ return
+ bytes = self._medium_request.read_bytes(next_read_size)
+ if bytes == '':
+ # end of file encountered reading from server
+ if 'hpss' in debug.debug_flags:
+ mutter(
+ 'decoder state: buf[:10]=%r, state_accept=%s',
+ self._protocol_decoder._get_in_buffer()[:10],
+ self._protocol_decoder.state_accept.__name__)
+ raise errors.ConnectionReset(
+ "Unexpected end of message. "
+ "Please check connectivity and permissions, and report a bug "
+ "if problems persist.")
+ self._protocol_decoder.accept_bytes(bytes)
+
+ def protocol_error(self, exception):
+ # Whatever the error is, we're done with this request.
+ self.finished_reading = True
+ self._medium_request.finished_reading()
+ raise
+
+ def read_response_tuple(self, expect_body=False):
+ """Read a response tuple from the wire."""
+ self._wait_for_response_args()
+ if not expect_body:
+ self._wait_for_response_end()
+ if 'hpss' in debug.debug_flags:
+ mutter(' result: %r', self.args)
+ if self.status == 'E':
+ self._wait_for_response_end()
+ _raise_smart_server_error(self.args)
+ return tuple(self.args)
+
+ def read_body_bytes(self, count=-1):
+ """Read bytes from the body, decoding into a byte stream.
+
+ We read all bytes at once to ensure we've checked the trailer for
+ errors, and then feed the buffer back as read_body_bytes is called.
+
+ Like the builtin file.read in Python, a count of -1 (the default) means
+ read the entire body.
+ """
+ # TODO: we don't necessarily need to buffer the full request if count
+ # != -1. (2008/04/30, Andrew Bennetts)
+ if self._body is None:
+ self._wait_for_response_end()
+ body_bytes = ''.join(self._bytes_parts)
+ if 'hpss' in debug.debug_flags:
+ mutter(' %d body bytes read', len(body_bytes))
+ self._body = StringIO(body_bytes)
+ self._bytes_parts = None
+ return self._body.read(count)
+
+ def read_streamed_body(self):
+ while not self.finished_reading:
+ while self._bytes_parts:
+ bytes_part = self._bytes_parts.popleft()
+ if 'hpssdetail' in debug.debug_flags:
+ mutter(' %d byte part read', len(bytes_part))
+ yield bytes_part
+ self._read_more()
+ if self._body_stream_status == 'E':
+ _raise_smart_server_error(self._body_error_args)
+
+ def cancel_read_body(self):
+ self._wait_for_response_end()
+
+
+def _raise_smart_server_error(error_tuple):
+ """Raise exception based on tuple received from smart server
+
+ Specific error translation is handled by bzrlib.remote._translate_error
+ """
+ if error_tuple[0] == 'UnknownMethod':
+ raise errors.UnknownSmartMethod(error_tuple[1])
+ raise errors.ErrorFromSmartServer(error_tuple)
diff --git a/bzrlib/smart/packrepository.py b/bzrlib/smart/packrepository.py
new file mode 100644
index 0000000..c74ebc6
--- /dev/null
+++ b/bzrlib/smart/packrepository.py
@@ -0,0 +1,47 @@
+# Copyright (C) 2008 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+from __future__ import absolute_import
+
+"""Server-side pack repository related request implmentations."""
+
+from bzrlib.smart.request import (
+ FailedSmartServerResponse,
+ SuccessfulSmartServerResponse,
+ )
+
+
+from bzrlib.smart.repository import (
+ SmartServerRepositoryRequest,
+ )
+
+
+class SmartServerPackRepositoryAutopack(SmartServerRepositoryRequest):
+
+ def do_repository_request(self, repository):
+ pack_collection = getattr(repository, '_pack_collection', None)
+ if pack_collection is None:
+ # This is a not a pack repo, so asking for an autopack is just a
+ # no-op.
+ return SuccessfulSmartServerResponse(('ok',))
+ repository.lock_write()
+ try:
+ repository._pack_collection.autopack()
+ finally:
+ repository.unlock()
+ return SuccessfulSmartServerResponse(('ok',))
+
+
diff --git a/bzrlib/smart/protocol.py b/bzrlib/smart/protocol.py
new file mode 100644
index 0000000..aed3f10
--- /dev/null
+++ b/bzrlib/smart/protocol.py
@@ -0,0 +1,1385 @@
+# Copyright (C) 2006-2010 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""Wire-level encoding and decoding of requests and responses for the smart
+client and server.
+"""
+
+from __future__ import absolute_import
+
+import collections
+from cStringIO import StringIO
+import struct
+import sys
+import thread
+import time
+
+import bzrlib
+from bzrlib import (
+ debug,
+ errors,
+ osutils,
+ )
+from bzrlib.smart import message, request
+from bzrlib.trace import log_exception_quietly, mutter
+from bzrlib.bencode import bdecode_as_tuple, bencode
+
+
+# Protocol version strings. These are sent as prefixes of bzr requests and
+# responses to identify the protocol version being used. (There are no version
+# one strings because that version doesn't send any).
+REQUEST_VERSION_TWO = 'bzr request 2\n'
+RESPONSE_VERSION_TWO = 'bzr response 2\n'
+
+MESSAGE_VERSION_THREE = 'bzr message 3 (bzr 1.6)\n'
+RESPONSE_VERSION_THREE = REQUEST_VERSION_THREE = MESSAGE_VERSION_THREE
+
+
+def _recv_tuple(from_file):
+ req_line = from_file.readline()
+ return _decode_tuple(req_line)
+
+
+def _decode_tuple(req_line):
+ if req_line is None or req_line == '':
+ return None
+ if req_line[-1] != '\n':
+ raise errors.SmartProtocolError("request %r not terminated" % req_line)
+ return tuple(req_line[:-1].split('\x01'))
+
+
+def _encode_tuple(args):
+ """Encode the tuple args to a bytestream."""
+ joined = '\x01'.join(args) + '\n'
+ if type(joined) is unicode:
+ # XXX: We should fix things so this never happens! -AJB, 20100304
+ mutter('response args contain unicode, should be only bytes: %r',
+ joined)
+ joined = joined.encode('ascii')
+ return joined
+
+
+class Requester(object):
+ """Abstract base class for an object that can issue requests on a smart
+ medium.
+ """
+
+ def call(self, *args):
+ """Make a remote call.
+
+ :param args: the arguments of this call.
+ """
+ raise NotImplementedError(self.call)
+
+ def call_with_body_bytes(self, args, body):
+ """Make a remote call with a body.
+
+ :param args: the arguments of this call.
+ :type body: str
+ :param body: the body to send with the request.
+ """
+ raise NotImplementedError(self.call_with_body_bytes)
+
+ def call_with_body_readv_array(self, args, body):
+ """Make a remote call with a readv array.
+
+ :param args: the arguments of this call.
+ :type body: iterable of (start, length) tuples.
+ :param body: the readv ranges to send with this request.
+ """
+ raise NotImplementedError(self.call_with_body_readv_array)
+
+ def set_headers(self, headers):
+ raise NotImplementedError(self.set_headers)
+
+
+class SmartProtocolBase(object):
+ """Methods common to client and server"""
+
+ # TODO: this only actually accomodates a single block; possibly should
+ # support multiple chunks?
+ def _encode_bulk_data(self, body):
+ """Encode body as a bulk data chunk."""
+ return ''.join(('%d\n' % len(body), body, 'done\n'))
+
+ def _serialise_offsets(self, offsets):
+ """Serialise a readv offset list."""
+ txt = []
+ for start, length in offsets:
+ txt.append('%d,%d' % (start, length))
+ return '\n'.join(txt)
+
+
+class SmartServerRequestProtocolOne(SmartProtocolBase):
+ """Server-side encoding and decoding logic for smart version 1."""
+
+ def __init__(self, backing_transport, write_func, root_client_path='/',
+ jail_root=None):
+ self._backing_transport = backing_transport
+ self._root_client_path = root_client_path
+ self._jail_root = jail_root
+ self.unused_data = ''
+ self._finished = False
+ self.in_buffer = ''
+ self._has_dispatched = False
+ self.request = None
+ self._body_decoder = None
+ self._write_func = write_func
+
+ def accept_bytes(self, bytes):
+ """Take bytes, and advance the internal state machine appropriately.
+
+ :param bytes: must be a byte string
+ """
+ if not isinstance(bytes, str):
+ raise ValueError(bytes)
+ self.in_buffer += bytes
+ if not self._has_dispatched:
+ if '\n' not in self.in_buffer:
+ # no command line yet
+ return
+ self._has_dispatched = True
+ try:
+ first_line, self.in_buffer = self.in_buffer.split('\n', 1)
+ first_line += '\n'
+ req_args = _decode_tuple(first_line)
+ self.request = request.SmartServerRequestHandler(
+ self._backing_transport, commands=request.request_handlers,
+ root_client_path=self._root_client_path,
+ jail_root=self._jail_root)
+ self.request.args_received(req_args)
+ if self.request.finished_reading:
+ # trivial request
+ self.unused_data = self.in_buffer
+ self.in_buffer = ''
+ self._send_response(self.request.response)
+ except KeyboardInterrupt:
+ raise
+ except errors.UnknownSmartMethod, err:
+ protocol_error = errors.SmartProtocolError(
+ "bad request %r" % (err.verb,))
+ failure = request.FailedSmartServerResponse(
+ ('error', str(protocol_error)))
+ self._send_response(failure)
+ return
+ except Exception, exception:
+ # everything else: pass to client, flush, and quit
+ log_exception_quietly()
+ self._send_response(request.FailedSmartServerResponse(
+ ('error', str(exception))))
+ return
+
+ if self._has_dispatched:
+ if self._finished:
+ # nothing to do.XXX: this routine should be a single state
+ # machine too.
+ self.unused_data += self.in_buffer
+ self.in_buffer = ''
+ return
+ if self._body_decoder is None:
+ self._body_decoder = LengthPrefixedBodyDecoder()
+ self._body_decoder.accept_bytes(self.in_buffer)
+ self.in_buffer = self._body_decoder.unused_data
+ body_data = self._body_decoder.read_pending_data()
+ self.request.accept_body(body_data)
+ if self._body_decoder.finished_reading:
+ self.request.end_of_body()
+ if not self.request.finished_reading:
+ raise AssertionError("no more body, request not finished")
+ if self.request.response is not None:
+ self._send_response(self.request.response)
+ self.unused_data = self.in_buffer
+ self.in_buffer = ''
+ else:
+ if self.request.finished_reading:
+ raise AssertionError(
+ "no response and we have finished reading.")
+
+ def _send_response(self, response):
+ """Send a smart server response down the output stream."""
+ if self._finished:
+ raise AssertionError('response already sent')
+ args = response.args
+ body = response.body
+ self._finished = True
+ self._write_protocol_version()
+ self._write_success_or_failure_prefix(response)
+ self._write_func(_encode_tuple(args))
+ if body is not None:
+ if not isinstance(body, str):
+ raise ValueError(body)
+ bytes = self._encode_bulk_data(body)
+ self._write_func(bytes)
+
+ def _write_protocol_version(self):
+ """Write any prefixes this protocol requires.
+
+ Version one doesn't send protocol versions.
+ """
+
+ def _write_success_or_failure_prefix(self, response):
+ """Write the protocol specific success/failure prefix.
+
+ For SmartServerRequestProtocolOne this is omitted but we
+ call is_successful to ensure that the response is valid.
+ """
+ response.is_successful()
+
+ def next_read_size(self):
+ if self._finished:
+ return 0
+ if self._body_decoder is None:
+ return 1
+ else:
+ return self._body_decoder.next_read_size()
+
+
+class SmartServerRequestProtocolTwo(SmartServerRequestProtocolOne):
+ r"""Version two of the server side of the smart protocol.
+
+ This prefixes responses with the value of RESPONSE_VERSION_TWO.
+ """
+
+ response_marker = RESPONSE_VERSION_TWO
+ request_marker = REQUEST_VERSION_TWO
+
+ def _write_success_or_failure_prefix(self, response):
+ """Write the protocol specific success/failure prefix."""
+ if response.is_successful():
+ self._write_func('success\n')
+ else:
+ self._write_func('failed\n')
+
+ def _write_protocol_version(self):
+ r"""Write any prefixes this protocol requires.
+
+ Version two sends the value of RESPONSE_VERSION_TWO.
+ """
+ self._write_func(self.response_marker)
+
+ def _send_response(self, response):
+ """Send a smart server response down the output stream."""
+ if (self._finished):
+ raise AssertionError('response already sent')
+ self._finished = True
+ self._write_protocol_version()
+ self._write_success_or_failure_prefix(response)
+ self._write_func(_encode_tuple(response.args))
+ if response.body is not None:
+ if not isinstance(response.body, str):
+ raise AssertionError('body must be a str')
+ if not (response.body_stream is None):
+ raise AssertionError(
+ 'body_stream and body cannot both be set')
+ bytes = self._encode_bulk_data(response.body)
+ self._write_func(bytes)
+ elif response.body_stream is not None:
+ _send_stream(response.body_stream, self._write_func)
+
+
+def _send_stream(stream, write_func):
+ write_func('chunked\n')
+ _send_chunks(stream, write_func)
+ write_func('END\n')
+
+
+def _send_chunks(stream, write_func):
+ for chunk in stream:
+ if isinstance(chunk, str):
+ bytes = "%x\n%s" % (len(chunk), chunk)
+ write_func(bytes)
+ elif isinstance(chunk, request.FailedSmartServerResponse):
+ write_func('ERR\n')
+ _send_chunks(chunk.args, write_func)
+ return
+ else:
+ raise errors.BzrError(
+ 'Chunks must be str or FailedSmartServerResponse, got %r'
+ % chunk)
+
+
+class _NeedMoreBytes(Exception):
+ """Raise this inside a _StatefulDecoder to stop decoding until more bytes
+ have been received.
+ """
+
+ def __init__(self, count=None):
+ """Constructor.
+
+ :param count: the total number of bytes needed by the current state.
+ May be None if the number of bytes needed is unknown.
+ """
+ self.count = count
+
+
+class _StatefulDecoder(object):
+ """Base class for writing state machines to decode byte streams.
+
+ Subclasses should provide a self.state_accept attribute that accepts bytes
+ and, if appropriate, updates self.state_accept to a different function.
+ accept_bytes will call state_accept as often as necessary to make sure the
+ state machine has progressed as far as possible before it returns.
+
+ See ProtocolThreeDecoder for an example subclass.
+ """
+
+ def __init__(self):
+ self.finished_reading = False
+ self._in_buffer_list = []
+ self._in_buffer_len = 0
+ self.unused_data = ''
+ self.bytes_left = None
+ self._number_needed_bytes = None
+
+ def _get_in_buffer(self):
+ if len(self._in_buffer_list) == 1:
+ return self._in_buffer_list[0]
+ in_buffer = ''.join(self._in_buffer_list)
+ if len(in_buffer) != self._in_buffer_len:
+ raise AssertionError(
+ "Length of buffer did not match expected value: %s != %s"
+ % self._in_buffer_len, len(in_buffer))
+ self._in_buffer_list = [in_buffer]
+ return in_buffer
+
+ def _get_in_bytes(self, count):
+ """Grab X bytes from the input_buffer.
+
+ Callers should have already checked that self._in_buffer_len is >
+ count. Note, this does not consume the bytes from the buffer. The
+ caller will still need to call _get_in_buffer() and then
+ _set_in_buffer() if they actually need to consume the bytes.
+ """
+ # check if we can yield the bytes from just the first entry in our list
+ if len(self._in_buffer_list) == 0:
+ raise AssertionError('Callers must be sure we have buffered bytes'
+ ' before calling _get_in_bytes')
+ if len(self._in_buffer_list[0]) > count:
+ return self._in_buffer_list[0][:count]
+ # We can't yield it from the first buffer, so collapse all buffers, and
+ # yield it from that
+ in_buf = self._get_in_buffer()
+ return in_buf[:count]
+
+ def _set_in_buffer(self, new_buf):
+ if new_buf is not None:
+ self._in_buffer_list = [new_buf]
+ self._in_buffer_len = len(new_buf)
+ else:
+ self._in_buffer_list = []
+ self._in_buffer_len = 0
+
+ def accept_bytes(self, bytes):
+ """Decode as much of bytes as possible.
+
+ If 'bytes' contains too much data it will be appended to
+ self.unused_data.
+
+ finished_reading will be set when no more data is required. Further
+ data will be appended to self.unused_data.
+ """
+ # accept_bytes is allowed to change the state
+ self._number_needed_bytes = None
+ # lsprof puts a very large amount of time on this specific call for
+ # large readv arrays
+ self._in_buffer_list.append(bytes)
+ self._in_buffer_len += len(bytes)
+ try:
+ # Run the function for the current state.
+ current_state = self.state_accept
+ self.state_accept()
+ while current_state != self.state_accept:
+ # The current state has changed. Run the function for the new
+ # current state, so that it can:
+ # - decode any unconsumed bytes left in a buffer, and
+ # - signal how many more bytes are expected (via raising
+ # _NeedMoreBytes).
+ current_state = self.state_accept
+ self.state_accept()
+ except _NeedMoreBytes, e:
+ self._number_needed_bytes = e.count
+
+
+class ChunkedBodyDecoder(_StatefulDecoder):
+ """Decoder for chunked body data.
+
+ This is very similar the HTTP's chunked encoding. See the description of
+ streamed body data in `doc/developers/network-protocol.txt` for details.
+ """
+
+ def __init__(self):
+ _StatefulDecoder.__init__(self)
+ self.state_accept = self._state_accept_expecting_header
+ self.chunk_in_progress = None
+ self.chunks = collections.deque()
+ self.error = False
+ self.error_in_progress = None
+
+ def next_read_size(self):
+ # Note: the shortest possible chunk is 2 bytes: '0\n', and the
+ # end-of-body marker is 4 bytes: 'END\n'.
+ if self.state_accept == self._state_accept_reading_chunk:
+ # We're expecting more chunk content. So we're expecting at least
+ # the rest of this chunk plus an END chunk.
+ return self.bytes_left + 4
+ elif self.state_accept == self._state_accept_expecting_length:
+ if self._in_buffer_len == 0:
+ # We're expecting a chunk length. There's at least two bytes
+ # left: a digit plus '\n'.
+ return 2
+ else:
+ # We're in the middle of reading a chunk length. So there's at
+ # least one byte left, the '\n' that terminates the length.
+ return 1
+ elif self.state_accept == self._state_accept_reading_unused:
+ return 1
+ elif self.state_accept == self._state_accept_expecting_header:
+ return max(0, len('chunked\n') - self._in_buffer_len)
+ else:
+ raise AssertionError("Impossible state: %r" % (self.state_accept,))
+
+ def read_next_chunk(self):
+ try:
+ return self.chunks.popleft()
+ except IndexError:
+ return None
+
+ def _extract_line(self):
+ in_buf = self._get_in_buffer()
+ pos = in_buf.find('\n')
+ if pos == -1:
+ # We haven't read a complete line yet, so request more bytes before
+ # we continue.
+ raise _NeedMoreBytes(1)
+ line = in_buf[:pos]
+ # Trim the prefix (including '\n' delimiter) from the _in_buffer.
+ self._set_in_buffer(in_buf[pos+1:])
+ return line
+
+ def _finished(self):
+ self.unused_data = self._get_in_buffer()
+ self._in_buffer_list = []
+ self._in_buffer_len = 0
+ self.state_accept = self._state_accept_reading_unused
+ if self.error:
+ error_args = tuple(self.error_in_progress)
+ self.chunks.append(request.FailedSmartServerResponse(error_args))
+ self.error_in_progress = None
+ self.finished_reading = True
+
+ def _state_accept_expecting_header(self):
+ prefix = self._extract_line()
+ if prefix == 'chunked':
+ self.state_accept = self._state_accept_expecting_length
+ else:
+ raise errors.SmartProtocolError(
+ 'Bad chunked body header: "%s"' % (prefix,))
+
+ def _state_accept_expecting_length(self):
+ prefix = self._extract_line()
+ if prefix == 'ERR':
+ self.error = True
+ self.error_in_progress = []
+ self._state_accept_expecting_length()
+ return
+ elif prefix == 'END':
+ # We've read the end-of-body marker.
+ # Any further bytes are unused data, including the bytes left in
+ # the _in_buffer.
+ self._finished()
+ return
+ else:
+ self.bytes_left = int(prefix, 16)
+ self.chunk_in_progress = ''
+ self.state_accept = self._state_accept_reading_chunk
+
+ def _state_accept_reading_chunk(self):
+ in_buf = self._get_in_buffer()
+ in_buffer_len = len(in_buf)
+ self.chunk_in_progress += in_buf[:self.bytes_left]
+ self._set_in_buffer(in_buf[self.bytes_left:])
+ self.bytes_left -= in_buffer_len
+ if self.bytes_left <= 0:
+ # Finished with chunk
+ self.bytes_left = None
+ if self.error:
+ self.error_in_progress.append(self.chunk_in_progress)
+ else:
+ self.chunks.append(self.chunk_in_progress)
+ self.chunk_in_progress = None
+ self.state_accept = self._state_accept_expecting_length
+
+ def _state_accept_reading_unused(self):
+ self.unused_data += self._get_in_buffer()
+ self._in_buffer_list = []
+
+
+class LengthPrefixedBodyDecoder(_StatefulDecoder):
+ """Decodes the length-prefixed bulk data."""
+
+ def __init__(self):
+ _StatefulDecoder.__init__(self)
+ self.state_accept = self._state_accept_expecting_length
+ self.state_read = self._state_read_no_data
+ self._body = ''
+ self._trailer_buffer = ''
+
+ def next_read_size(self):
+ if self.bytes_left is not None:
+ # Ideally we want to read all the remainder of the body and the
+ # trailer in one go.
+ return self.bytes_left + 5
+ elif self.state_accept == self._state_accept_reading_trailer:
+ # Just the trailer left
+ return 5 - len(self._trailer_buffer)
+ elif self.state_accept == self._state_accept_expecting_length:
+ # There's still at least 6 bytes left ('\n' to end the length, plus
+ # 'done\n').
+ return 6
+ else:
+ # Reading excess data. Either way, 1 byte at a time is fine.
+ return 1
+
+ def read_pending_data(self):
+ """Return any pending data that has been decoded."""
+ return self.state_read()
+
+ def _state_accept_expecting_length(self):
+ in_buf = self._get_in_buffer()
+ pos = in_buf.find('\n')
+ if pos == -1:
+ return
+ self.bytes_left = int(in_buf[:pos])
+ self._set_in_buffer(in_buf[pos+1:])
+ self.state_accept = self._state_accept_reading_body
+ self.state_read = self._state_read_body_buffer
+
+ def _state_accept_reading_body(self):
+ in_buf = self._get_in_buffer()
+ self._body += in_buf
+ self.bytes_left -= len(in_buf)
+ self._set_in_buffer(None)
+ if self.bytes_left <= 0:
+ # Finished with body
+ if self.bytes_left != 0:
+ self._trailer_buffer = self._body[self.bytes_left:]
+ self._body = self._body[:self.bytes_left]
+ self.bytes_left = None
+ self.state_accept = self._state_accept_reading_trailer
+
+ def _state_accept_reading_trailer(self):
+ self._trailer_buffer += self._get_in_buffer()
+ self._set_in_buffer(None)
+ # TODO: what if the trailer does not match "done\n"? Should this raise
+ # a ProtocolViolation exception?
+ if self._trailer_buffer.startswith('done\n'):
+ self.unused_data = self._trailer_buffer[len('done\n'):]
+ self.state_accept = self._state_accept_reading_unused
+ self.finished_reading = True
+
+ def _state_accept_reading_unused(self):
+ self.unused_data += self._get_in_buffer()
+ self._set_in_buffer(None)
+
+ def _state_read_no_data(self):
+ return ''
+
+ def _state_read_body_buffer(self):
+ result = self._body
+ self._body = ''
+ return result
+
+
+class SmartClientRequestProtocolOne(SmartProtocolBase, Requester,
+ message.ResponseHandler):
+ """The client-side protocol for smart version 1."""
+
+ def __init__(self, request):
+ """Construct a SmartClientRequestProtocolOne.
+
+ :param request: A SmartClientMediumRequest to serialise onto and
+ deserialise from.
+ """
+ self._request = request
+ self._body_buffer = None
+ self._request_start_time = None
+ self._last_verb = None
+ self._headers = None
+
+ def set_headers(self, headers):
+ self._headers = dict(headers)
+
+ def call(self, *args):
+ if 'hpss' in debug.debug_flags:
+ mutter('hpss call: %s', repr(args)[1:-1])
+ if getattr(self._request._medium, 'base', None) is not None:
+ mutter(' (to %s)', self._request._medium.base)
+ self._request_start_time = osutils.timer_func()
+ self._write_args(args)
+ self._request.finished_writing()
+ self._last_verb = args[0]
+
+ def call_with_body_bytes(self, args, body):
+ """Make a remote call of args with body bytes 'body'.
+
+ After calling this, call read_response_tuple to find the result out.
+ """
+ if 'hpss' in debug.debug_flags:
+ mutter('hpss call w/body: %s (%r...)', repr(args)[1:-1], body[:20])
+ if getattr(self._request._medium, '_path', None) is not None:
+ mutter(' (to %s)', self._request._medium._path)
+ mutter(' %d bytes', len(body))
+ self._request_start_time = osutils.timer_func()
+ if 'hpssdetail' in debug.debug_flags:
+ mutter('hpss body content: %s', body)
+ self._write_args(args)
+ bytes = self._encode_bulk_data(body)
+ self._request.accept_bytes(bytes)
+ self._request.finished_writing()
+ self._last_verb = args[0]
+
+ def call_with_body_readv_array(self, args, body):
+ """Make a remote call with a readv array.
+
+ The body is encoded with one line per readv offset pair. The numbers in
+ each pair are separated by a comma, and no trailing \\n is emitted.
+ """
+ if 'hpss' in debug.debug_flags:
+ mutter('hpss call w/readv: %s', repr(args)[1:-1])
+ if getattr(self._request._medium, '_path', None) is not None:
+ mutter(' (to %s)', self._request._medium._path)
+ self._request_start_time = osutils.timer_func()
+ self._write_args(args)
+ readv_bytes = self._serialise_offsets(body)
+ bytes = self._encode_bulk_data(readv_bytes)
+ self._request.accept_bytes(bytes)
+ self._request.finished_writing()
+ if 'hpss' in debug.debug_flags:
+ mutter(' %d bytes in readv request', len(readv_bytes))
+ self._last_verb = args[0]
+
+ def call_with_body_stream(self, args, stream):
+ # Protocols v1 and v2 don't support body streams. So it's safe to
+ # assume that a v1/v2 server doesn't support whatever method we're
+ # trying to call with a body stream.
+ self._request.finished_writing()
+ self._request.finished_reading()
+ raise errors.UnknownSmartMethod(args[0])
+
+ def cancel_read_body(self):
+ """After expecting a body, a response code may indicate one otherwise.
+
+ This method lets the domain client inform the protocol that no body
+ will be transmitted. This is a terminal method: after calling it the
+ protocol is not able to be used further.
+ """
+ self._request.finished_reading()
+
+ def _read_response_tuple(self):
+ result = self._recv_tuple()
+ if 'hpss' in debug.debug_flags:
+ if self._request_start_time is not None:
+ mutter(' result: %6.3fs %s',
+ osutils.timer_func() - self._request_start_time,
+ repr(result)[1:-1])
+ self._request_start_time = None
+ else:
+ mutter(' result: %s', repr(result)[1:-1])
+ return result
+
+ def read_response_tuple(self, expect_body=False):
+ """Read a response tuple from the wire.
+
+ This should only be called once.
+ """
+ result = self._read_response_tuple()
+ self._response_is_unknown_method(result)
+ self._raise_args_if_error(result)
+ if not expect_body:
+ self._request.finished_reading()
+ return result
+
+ def _raise_args_if_error(self, result_tuple):
+ # Later protocol versions have an explicit flag in the protocol to say
+ # if an error response is "failed" or not. In version 1 we don't have
+ # that luxury. So here is a complete list of errors that can be
+ # returned in response to existing version 1 smart requests. Responses
+ # starting with these codes are always "failed" responses.
+ v1_error_codes = [
+ 'norepository',
+ 'NoSuchFile',
+ 'FileExists',
+ 'DirectoryNotEmpty',
+ 'ShortReadvError',
+ 'UnicodeEncodeError',
+ 'UnicodeDecodeError',
+ 'ReadOnlyError',
+ 'nobranch',
+ 'NoSuchRevision',
+ 'nosuchrevision',
+ 'LockContention',
+ 'UnlockableTransport',
+ 'LockFailed',
+ 'TokenMismatch',
+ 'ReadError',
+ 'PermissionDenied',
+ ]
+ if result_tuple[0] in v1_error_codes:
+ self._request.finished_reading()
+ raise errors.ErrorFromSmartServer(result_tuple)
+
+ def _response_is_unknown_method(self, result_tuple):
+ """Raise UnexpectedSmartServerResponse if the response is an 'unknonwn
+ method' response to the request.
+
+ :param response: The response from a smart client call_expecting_body
+ call.
+ :param verb: The verb used in that call.
+ :raises: UnexpectedSmartServerResponse
+ """
+ if (result_tuple == ('error', "Generic bzr smart protocol error: "
+ "bad request '%s'" % self._last_verb) or
+ result_tuple == ('error', "Generic bzr smart protocol error: "
+ "bad request u'%s'" % self._last_verb)):
+ # The response will have no body, so we've finished reading.
+ self._request.finished_reading()
+ raise errors.UnknownSmartMethod(self._last_verb)
+
+ def read_body_bytes(self, count=-1):
+ """Read bytes from the body, decoding into a byte stream.
+
+ We read all bytes at once to ensure we've checked the trailer for
+ errors, and then feed the buffer back as read_body_bytes is called.
+ """
+ if self._body_buffer is not None:
+ return self._body_buffer.read(count)
+ _body_decoder = LengthPrefixedBodyDecoder()
+
+ while not _body_decoder.finished_reading:
+ bytes = self._request.read_bytes(_body_decoder.next_read_size())
+ if bytes == '':
+ # end of file encountered reading from server
+ raise errors.ConnectionReset(
+ "Connection lost while reading response body.")
+ _body_decoder.accept_bytes(bytes)
+ self._request.finished_reading()
+ self._body_buffer = StringIO(_body_decoder.read_pending_data())
+ # XXX: TODO check the trailer result.
+ if 'hpss' in debug.debug_flags:
+ mutter(' %d body bytes read',
+ len(self._body_buffer.getvalue()))
+ return self._body_buffer.read(count)
+
+ def _recv_tuple(self):
+ """Receive a tuple from the medium request."""
+ return _decode_tuple(self._request.read_line())
+
+ def query_version(self):
+ """Return protocol version number of the server."""
+ self.call('hello')
+ resp = self.read_response_tuple()
+ if resp == ('ok', '1'):
+ return 1
+ elif resp == ('ok', '2'):
+ return 2
+ else:
+ raise errors.SmartProtocolError("bad response %r" % (resp,))
+
+ def _write_args(self, args):
+ self._write_protocol_version()
+ bytes = _encode_tuple(args)
+ self._request.accept_bytes(bytes)
+
+ def _write_protocol_version(self):
+ """Write any prefixes this protocol requires.
+
+ Version one doesn't send protocol versions.
+ """
+
+
+class SmartClientRequestProtocolTwo(SmartClientRequestProtocolOne):
+ """Version two of the client side of the smart protocol.
+
+ This prefixes the request with the value of REQUEST_VERSION_TWO.
+ """
+
+ response_marker = RESPONSE_VERSION_TWO
+ request_marker = REQUEST_VERSION_TWO
+
+ def read_response_tuple(self, expect_body=False):
+ """Read a response tuple from the wire.
+
+ This should only be called once.
+ """
+ version = self._request.read_line()
+ if version != self.response_marker:
+ self._request.finished_reading()
+ raise errors.UnexpectedProtocolVersionMarker(version)
+ response_status = self._request.read_line()
+ result = SmartClientRequestProtocolOne._read_response_tuple(self)
+ self._response_is_unknown_method(result)
+ if response_status == 'success\n':
+ self.response_status = True
+ if not expect_body:
+ self._request.finished_reading()
+ return result
+ elif response_status == 'failed\n':
+ self.response_status = False
+ self._request.finished_reading()
+ raise errors.ErrorFromSmartServer(result)
+ else:
+ raise errors.SmartProtocolError(
+ 'bad protocol status %r' % response_status)
+
+ def _write_protocol_version(self):
+ """Write any prefixes this protocol requires.
+
+ Version two sends the value of REQUEST_VERSION_TWO.
+ """
+ self._request.accept_bytes(self.request_marker)
+
+ def read_streamed_body(self):
+ """Read bytes from the body, decoding into a byte stream.
+ """
+ # Read no more than 64k at a time so that we don't risk error 10055 (no
+ # buffer space available) on Windows.
+ _body_decoder = ChunkedBodyDecoder()
+ while not _body_decoder.finished_reading:
+ bytes = self._request.read_bytes(_body_decoder.next_read_size())
+ if bytes == '':
+ # end of file encountered reading from server
+ raise errors.ConnectionReset(
+ "Connection lost while reading streamed body.")
+ _body_decoder.accept_bytes(bytes)
+ for body_bytes in iter(_body_decoder.read_next_chunk, None):
+ if 'hpss' in debug.debug_flags and type(body_bytes) is str:
+ mutter(' %d byte chunk read',
+ len(body_bytes))
+ yield body_bytes
+ self._request.finished_reading()
+
+
+def build_server_protocol_three(backing_transport, write_func,
+ root_client_path, jail_root=None):
+ request_handler = request.SmartServerRequestHandler(
+ backing_transport, commands=request.request_handlers,
+ root_client_path=root_client_path, jail_root=jail_root)
+ responder = ProtocolThreeResponder(write_func)
+ message_handler = message.ConventionalRequestHandler(request_handler, responder)
+ return ProtocolThreeDecoder(message_handler)
+
+
+class ProtocolThreeDecoder(_StatefulDecoder):
+
+ response_marker = RESPONSE_VERSION_THREE
+ request_marker = REQUEST_VERSION_THREE
+
+ def __init__(self, message_handler, expect_version_marker=False):
+ _StatefulDecoder.__init__(self)
+ self._has_dispatched = False
+ # Initial state
+ if expect_version_marker:
+ self.state_accept = self._state_accept_expecting_protocol_version
+ # We're expecting at least the protocol version marker + some
+ # headers.
+ self._number_needed_bytes = len(MESSAGE_VERSION_THREE) + 4
+ else:
+ self.state_accept = self._state_accept_expecting_headers
+ self._number_needed_bytes = 4
+ self.decoding_failed = False
+ self.request_handler = self.message_handler = message_handler
+
+ def accept_bytes(self, bytes):
+ self._number_needed_bytes = None
+ try:
+ _StatefulDecoder.accept_bytes(self, bytes)
+ except KeyboardInterrupt:
+ raise
+ except errors.SmartMessageHandlerError, exception:
+ # We do *not* set self.decoding_failed here. The message handler
+ # has raised an error, but the decoder is still able to parse bytes
+ # and determine when this message ends.
+ if not isinstance(exception.exc_value, errors.UnknownSmartMethod):
+ log_exception_quietly()
+ self.message_handler.protocol_error(exception.exc_value)
+ # The state machine is ready to continue decoding, but the
+ # exception has interrupted the loop that runs the state machine.
+ # So we call accept_bytes again to restart it.
+ self.accept_bytes('')
+ except Exception, exception:
+ # The decoder itself has raised an exception. We cannot continue
+ # decoding.
+ self.decoding_failed = True
+ if isinstance(exception, errors.UnexpectedProtocolVersionMarker):
+ # This happens during normal operation when the client tries a
+ # protocol version the server doesn't understand, so no need to
+ # log a traceback every time.
+ # Note that this can only happen when
+ # expect_version_marker=True, which is only the case on the
+ # client side.
+ pass
+ else:
+ log_exception_quietly()
+ self.message_handler.protocol_error(exception)
+
+ def _extract_length_prefixed_bytes(self):
+ if self._in_buffer_len < 4:
+ # A length prefix by itself is 4 bytes, and we don't even have that
+ # many yet.
+ raise _NeedMoreBytes(4)
+ (length,) = struct.unpack('!L', self._get_in_bytes(4))
+ end_of_bytes = 4 + length
+ if self._in_buffer_len < end_of_bytes:
+ # We haven't yet read as many bytes as the length-prefix says there
+ # are.
+ raise _NeedMoreBytes(end_of_bytes)
+ # Extract the bytes from the buffer.
+ in_buf = self._get_in_buffer()
+ bytes = in_buf[4:end_of_bytes]
+ self._set_in_buffer(in_buf[end_of_bytes:])
+ return bytes
+
+ def _extract_prefixed_bencoded_data(self):
+ prefixed_bytes = self._extract_length_prefixed_bytes()
+ try:
+ decoded = bdecode_as_tuple(prefixed_bytes)
+ except ValueError:
+ raise errors.SmartProtocolError(
+ 'Bytes %r not bencoded' % (prefixed_bytes,))
+ return decoded
+
+ def _extract_single_byte(self):
+ if self._in_buffer_len == 0:
+ # The buffer is empty
+ raise _NeedMoreBytes(1)
+ in_buf = self._get_in_buffer()
+ one_byte = in_buf[0]
+ self._set_in_buffer(in_buf[1:])
+ return one_byte
+
+ def _state_accept_expecting_protocol_version(self):
+ needed_bytes = len(MESSAGE_VERSION_THREE) - self._in_buffer_len
+ in_buf = self._get_in_buffer()
+ if needed_bytes > 0:
+ # We don't have enough bytes to check if the protocol version
+ # marker is right. But we can check if it is already wrong by
+ # checking that the start of MESSAGE_VERSION_THREE matches what
+ # we've read so far.
+ # [In fact, if the remote end isn't bzr we might never receive
+ # len(MESSAGE_VERSION_THREE) bytes. So if the bytes we have so far
+ # are wrong then we should just raise immediately rather than
+ # stall.]
+ if not MESSAGE_VERSION_THREE.startswith(in_buf):
+ # We have enough bytes to know the protocol version is wrong
+ raise errors.UnexpectedProtocolVersionMarker(in_buf)
+ raise _NeedMoreBytes(len(MESSAGE_VERSION_THREE))
+ if not in_buf.startswith(MESSAGE_VERSION_THREE):
+ raise errors.UnexpectedProtocolVersionMarker(in_buf)
+ self._set_in_buffer(in_buf[len(MESSAGE_VERSION_THREE):])
+ self.state_accept = self._state_accept_expecting_headers
+
+ def _state_accept_expecting_headers(self):
+ decoded = self._extract_prefixed_bencoded_data()
+ if type(decoded) is not dict:
+ raise errors.SmartProtocolError(
+ 'Header object %r is not a dict' % (decoded,))
+ self.state_accept = self._state_accept_expecting_message_part
+ try:
+ self.message_handler.headers_received(decoded)
+ except:
+ raise errors.SmartMessageHandlerError(sys.exc_info())
+
+ def _state_accept_expecting_message_part(self):
+ message_part_kind = self._extract_single_byte()
+ if message_part_kind == 'o':
+ self.state_accept = self._state_accept_expecting_one_byte
+ elif message_part_kind == 's':
+ self.state_accept = self._state_accept_expecting_structure
+ elif message_part_kind == 'b':
+ self.state_accept = self._state_accept_expecting_bytes
+ elif message_part_kind == 'e':
+ self.done()
+ else:
+ raise errors.SmartProtocolError(
+ 'Bad message kind byte: %r' % (message_part_kind,))
+
+ def _state_accept_expecting_one_byte(self):
+ byte = self._extract_single_byte()
+ self.state_accept = self._state_accept_expecting_message_part
+ try:
+ self.message_handler.byte_part_received(byte)
+ except:
+ raise errors.SmartMessageHandlerError(sys.exc_info())
+
+ def _state_accept_expecting_bytes(self):
+ # XXX: this should not buffer whole message part, but instead deliver
+ # the bytes as they arrive.
+ prefixed_bytes = self._extract_length_prefixed_bytes()
+ self.state_accept = self._state_accept_expecting_message_part
+ try:
+ self.message_handler.bytes_part_received(prefixed_bytes)
+ except:
+ raise errors.SmartMessageHandlerError(sys.exc_info())
+
+ def _state_accept_expecting_structure(self):
+ structure = self._extract_prefixed_bencoded_data()
+ self.state_accept = self._state_accept_expecting_message_part
+ try:
+ self.message_handler.structure_part_received(structure)
+ except:
+ raise errors.SmartMessageHandlerError(sys.exc_info())
+
+ def done(self):
+ self.unused_data = self._get_in_buffer()
+ self._set_in_buffer(None)
+ self.state_accept = self._state_accept_reading_unused
+ try:
+ self.message_handler.end_received()
+ except:
+ raise errors.SmartMessageHandlerError(sys.exc_info())
+
+ def _state_accept_reading_unused(self):
+ self.unused_data += self._get_in_buffer()
+ self._set_in_buffer(None)
+
+ def next_read_size(self):
+ if self.state_accept == self._state_accept_reading_unused:
+ return 0
+ elif self.decoding_failed:
+ # An exception occured while processing this message, probably from
+ # self.message_handler. We're not sure that this state machine is
+ # in a consistent state, so just signal that we're done (i.e. give
+ # up).
+ return 0
+ else:
+ if self._number_needed_bytes is not None:
+ return self._number_needed_bytes - self._in_buffer_len
+ else:
+ raise AssertionError("don't know how many bytes are expected!")
+
+
+class _ProtocolThreeEncoder(object):
+
+ response_marker = request_marker = MESSAGE_VERSION_THREE
+ BUFFER_SIZE = 1024*1024 # 1 MiB buffer before flushing
+
+ def __init__(self, write_func):
+ self._buf = []
+ self._buf_len = 0
+ self._real_write_func = write_func
+
+ def _write_func(self, bytes):
+ # TODO: Another possibility would be to turn this into an async model.
+ # Where we let another thread know that we have some bytes if
+ # they want it, but we don't actually block for it
+ # Note that osutils.send_all always sends 64kB chunks anyway, so
+ # we might just push out smaller bits at a time?
+ self._buf.append(bytes)
+ self._buf_len += len(bytes)
+ if self._buf_len > self.BUFFER_SIZE:
+ self.flush()
+
+ def flush(self):
+ if self._buf:
+ self._real_write_func(''.join(self._buf))
+ del self._buf[:]
+ self._buf_len = 0
+
+ def _serialise_offsets(self, offsets):
+ """Serialise a readv offset list."""
+ txt = []
+ for start, length in offsets:
+ txt.append('%d,%d' % (start, length))
+ return '\n'.join(txt)
+
+ def _write_protocol_version(self):
+ self._write_func(MESSAGE_VERSION_THREE)
+
+ def _write_prefixed_bencode(self, structure):
+ bytes = bencode(structure)
+ self._write_func(struct.pack('!L', len(bytes)))
+ self._write_func(bytes)
+
+ def _write_headers(self, headers):
+ self._write_prefixed_bencode(headers)
+
+ def _write_structure(self, args):
+ self._write_func('s')
+ utf8_args = []
+ for arg in args:
+ if type(arg) is unicode:
+ utf8_args.append(arg.encode('utf8'))
+ else:
+ utf8_args.append(arg)
+ self._write_prefixed_bencode(utf8_args)
+
+ def _write_end(self):
+ self._write_func('e')
+ self.flush()
+
+ def _write_prefixed_body(self, bytes):
+ self._write_func('b')
+ self._write_func(struct.pack('!L', len(bytes)))
+ self._write_func(bytes)
+
+ def _write_chunked_body_start(self):
+ self._write_func('oC')
+
+ def _write_error_status(self):
+ self._write_func('oE')
+
+ def _write_success_status(self):
+ self._write_func('oS')
+
+
+class ProtocolThreeResponder(_ProtocolThreeEncoder):
+
+ def __init__(self, write_func):
+ _ProtocolThreeEncoder.__init__(self, write_func)
+ self.response_sent = False
+ self._headers = {'Software version': bzrlib.__version__}
+ if 'hpss' in debug.debug_flags:
+ self._thread_id = thread.get_ident()
+ self._response_start_time = None
+
+ def _trace(self, action, message, extra_bytes=None, include_time=False):
+ if self._response_start_time is None:
+ self._response_start_time = osutils.timer_func()
+ if include_time:
+ t = '%5.3fs ' % (time.clock() - self._response_start_time)
+ else:
+ t = ''
+ if extra_bytes is None:
+ extra = ''
+ else:
+ extra = ' ' + repr(extra_bytes[:40])
+ if len(extra) > 33:
+ extra = extra[:29] + extra[-1] + '...'
+ mutter('%12s: [%s] %s%s%s'
+ % (action, self._thread_id, t, message, extra))
+
+ def send_error(self, exception):
+ if self.response_sent:
+ raise AssertionError(
+ "send_error(%s) called, but response already sent."
+ % (exception,))
+ if isinstance(exception, errors.UnknownSmartMethod):
+ failure = request.FailedSmartServerResponse(
+ ('UnknownMethod', exception.verb))
+ self.send_response(failure)
+ return
+ if 'hpss' in debug.debug_flags:
+ self._trace('error', str(exception))
+ self.response_sent = True
+ self._write_protocol_version()
+ self._write_headers(self._headers)
+ self._write_error_status()
+ self._write_structure(('error', str(exception)))
+ self._write_end()
+
+ def send_response(self, response):
+ if self.response_sent:
+ raise AssertionError(
+ "send_response(%r) called, but response already sent."
+ % (response,))
+ self.response_sent = True
+ self._write_protocol_version()
+ self._write_headers(self._headers)
+ if response.is_successful():
+ self._write_success_status()
+ else:
+ self._write_error_status()
+ if 'hpss' in debug.debug_flags:
+ self._trace('response', repr(response.args))
+ self._write_structure(response.args)
+ if response.body is not None:
+ self._write_prefixed_body(response.body)
+ if 'hpss' in debug.debug_flags:
+ self._trace('body', '%d bytes' % (len(response.body),),
+ response.body, include_time=True)
+ elif response.body_stream is not None:
+ count = num_bytes = 0
+ first_chunk = None
+ for exc_info, chunk in _iter_with_errors(response.body_stream):
+ count += 1
+ if exc_info is not None:
+ self._write_error_status()
+ error_struct = request._translate_error(exc_info[1])
+ self._write_structure(error_struct)
+ break
+ else:
+ if isinstance(chunk, request.FailedSmartServerResponse):
+ self._write_error_status()
+ self._write_structure(chunk.args)
+ break
+ num_bytes += len(chunk)
+ if first_chunk is None:
+ first_chunk = chunk
+ self._write_prefixed_body(chunk)
+ self.flush()
+ if 'hpssdetail' in debug.debug_flags:
+ # Not worth timing separately, as _write_func is
+ # actually buffered
+ self._trace('body chunk',
+ '%d bytes' % (len(chunk),),
+ chunk, suppress_time=True)
+ if 'hpss' in debug.debug_flags:
+ self._trace('body stream',
+ '%d bytes %d chunks' % (num_bytes, count),
+ first_chunk)
+ self._write_end()
+ if 'hpss' in debug.debug_flags:
+ self._trace('response end', '', include_time=True)
+
+
+def _iter_with_errors(iterable):
+ """Handle errors from iterable.next().
+
+ Use like::
+
+ for exc_info, value in _iter_with_errors(iterable):
+ ...
+
+ This is a safer alternative to::
+
+ try:
+ for value in iterable:
+ ...
+ except:
+ ...
+
+ Because the latter will catch errors from the for-loop body, not just
+ iterable.next()
+
+ If an error occurs, exc_info will be a exc_info tuple, and the generator
+ will terminate. Otherwise exc_info will be None, and value will be the
+ value from iterable.next(). Note that KeyboardInterrupt and SystemExit
+ will not be itercepted.
+ """
+ iterator = iter(iterable)
+ while True:
+ try:
+ yield None, iterator.next()
+ except StopIteration:
+ return
+ except (KeyboardInterrupt, SystemExit):
+ raise
+ except Exception:
+ mutter('_iter_with_errors caught error')
+ log_exception_quietly()
+ yield sys.exc_info(), None
+ return
+
+
+class ProtocolThreeRequester(_ProtocolThreeEncoder, Requester):
+
+ def __init__(self, medium_request):
+ _ProtocolThreeEncoder.__init__(self, medium_request.accept_bytes)
+ self._medium_request = medium_request
+ self._headers = {}
+ self.body_stream_started = None
+
+ def set_headers(self, headers):
+ self._headers = headers.copy()
+
+ def call(self, *args):
+ if 'hpss' in debug.debug_flags:
+ mutter('hpss call: %s', repr(args)[1:-1])
+ base = getattr(self._medium_request._medium, 'base', None)
+ if base is not None:
+ mutter(' (to %s)', base)
+ self._request_start_time = osutils.timer_func()
+ self._write_protocol_version()
+ self._write_headers(self._headers)
+ self._write_structure(args)
+ self._write_end()
+ self._medium_request.finished_writing()
+
+ def call_with_body_bytes(self, args, body):
+ """Make a remote call of args with body bytes 'body'.
+
+ After calling this, call read_response_tuple to find the result out.
+ """
+ if 'hpss' in debug.debug_flags:
+ mutter('hpss call w/body: %s (%r...)', repr(args)[1:-1], body[:20])
+ path = getattr(self._medium_request._medium, '_path', None)
+ if path is not None:
+ mutter(' (to %s)', path)
+ mutter(' %d bytes', len(body))
+ self._request_start_time = osutils.timer_func()
+ self._write_protocol_version()
+ self._write_headers(self._headers)
+ self._write_structure(args)
+ self._write_prefixed_body(body)
+ self._write_end()
+ self._medium_request.finished_writing()
+
+ def call_with_body_readv_array(self, args, body):
+ """Make a remote call with a readv array.
+
+ The body is encoded with one line per readv offset pair. The numbers in
+ each pair are separated by a comma, and no trailing \\n is emitted.
+ """
+ if 'hpss' in debug.debug_flags:
+ mutter('hpss call w/readv: %s', repr(args)[1:-1])
+ path = getattr(self._medium_request._medium, '_path', None)
+ if path is not None:
+ mutter(' (to %s)', path)
+ self._request_start_time = osutils.timer_func()
+ self._write_protocol_version()
+ self._write_headers(self._headers)
+ self._write_structure(args)
+ readv_bytes = self._serialise_offsets(body)
+ if 'hpss' in debug.debug_flags:
+ mutter(' %d bytes in readv request', len(readv_bytes))
+ self._write_prefixed_body(readv_bytes)
+ self._write_end()
+ self._medium_request.finished_writing()
+
+ def call_with_body_stream(self, args, stream):
+ if 'hpss' in debug.debug_flags:
+ mutter('hpss call w/body stream: %r', args)
+ path = getattr(self._medium_request._medium, '_path', None)
+ if path is not None:
+ mutter(' (to %s)', path)
+ self._request_start_time = osutils.timer_func()
+ self.body_stream_started = False
+ self._write_protocol_version()
+ self._write_headers(self._headers)
+ self._write_structure(args)
+ # TODO: notice if the server has sent an early error reply before we
+ # have finished sending the stream. We would notice at the end
+ # anyway, but if the medium can deliver it early then it's good
+ # to short-circuit the whole request...
+ # Provoke any ConnectionReset failures before we start the body stream.
+ self.flush()
+ self.body_stream_started = True
+ for exc_info, part in _iter_with_errors(stream):
+ if exc_info is not None:
+ # Iterating the stream failed. Cleanly abort the request.
+ self._write_error_status()
+ # Currently the client unconditionally sends ('error',) as the
+ # error args.
+ self._write_structure(('error',))
+ self._write_end()
+ self._medium_request.finished_writing()
+ raise exc_info[0], exc_info[1], exc_info[2]
+ else:
+ self._write_prefixed_body(part)
+ self.flush()
+ self._write_end()
+ self._medium_request.finished_writing()
+
diff --git a/bzrlib/smart/repository.py b/bzrlib/smart/repository.py
new file mode 100644
index 0000000..f8829ef
--- /dev/null
+++ b/bzrlib/smart/repository.py
@@ -0,0 +1,1304 @@
+# Copyright (C) 2006-2010 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""Server-side repository related request implementations."""
+
+from __future__ import absolute_import
+
+import bz2
+import os
+import Queue
+import sys
+import tempfile
+import threading
+import zlib
+
+from bzrlib import (
+ bencode,
+ errors,
+ estimate_compressed_size,
+ inventory as _mod_inventory,
+ inventory_delta,
+ osutils,
+ pack,
+ trace,
+ ui,
+ vf_search,
+ )
+from bzrlib.bzrdir import BzrDir
+from bzrlib.smart.request import (
+ FailedSmartServerResponse,
+ SmartServerRequest,
+ SuccessfulSmartServerResponse,
+ )
+from bzrlib.repository import _strip_NULL_ghosts, network_format_registry
+from bzrlib import revision as _mod_revision
+from bzrlib.versionedfile import (
+ ChunkedContentFactory,
+ NetworkRecordStream,
+ record_to_fulltext_bytes,
+ )
+
+
+class SmartServerRepositoryRequest(SmartServerRequest):
+ """Common base class for Repository requests."""
+
+ def do(self, path, *args):
+ """Execute a repository request.
+
+ All Repository requests take a path to the repository as their first
+ argument. The repository must be at the exact path given by the
+ client - no searching is done.
+
+ The actual logic is delegated to self.do_repository_request.
+
+ :param client_path: The path for the repository as received from the
+ client.
+ :return: A SmartServerResponse from self.do_repository_request().
+ """
+ transport = self.transport_from_client_path(path)
+ bzrdir = BzrDir.open_from_transport(transport)
+ # Save the repository for use with do_body.
+ self._repository = bzrdir.open_repository()
+ return self.do_repository_request(self._repository, *args)
+
+ def do_repository_request(self, repository, *args):
+ """Override to provide an implementation for a verb."""
+ # No-op for verbs that take bodies (None as a result indicates a body
+ # is expected)
+ return None
+
+ def recreate_search(self, repository, search_bytes, discard_excess=False):
+ """Recreate a search from its serialised form.
+
+ :param discard_excess: If True, and the search refers to data we don't
+ have, just silently accept that fact - the verb calling
+ recreate_search trusts that clients will look for missing things
+ they expected and get it from elsewhere.
+ """
+ if search_bytes == 'everything':
+ return vf_search.EverythingResult(repository), None
+ lines = search_bytes.split('\n')
+ if lines[0] == 'ancestry-of':
+ heads = lines[1:]
+ search_result = vf_search.PendingAncestryResult(heads, repository)
+ return search_result, None
+ elif lines[0] == 'search':
+ return self.recreate_search_from_recipe(repository, lines[1:],
+ discard_excess=discard_excess)
+ else:
+ return (None, FailedSmartServerResponse(('BadSearch',)))
+
+ def recreate_search_from_recipe(self, repository, lines,
+ discard_excess=False):
+ """Recreate a specific revision search (vs a from-tip search).
+
+ :param discard_excess: If True, and the search refers to data we don't
+ have, just silently accept that fact - the verb calling
+ recreate_search trusts that clients will look for missing things
+ they expected and get it from elsewhere.
+ """
+ start_keys = set(lines[0].split(' '))
+ exclude_keys = set(lines[1].split(' '))
+ revision_count = int(lines[2])
+ repository.lock_read()
+ try:
+ search = repository.get_graph()._make_breadth_first_searcher(
+ start_keys)
+ while True:
+ try:
+ next_revs = search.next()
+ except StopIteration:
+ break
+ search.stop_searching_any(exclude_keys.intersection(next_revs))
+ (started_keys, excludes, included_keys) = search.get_state()
+ if (not discard_excess and len(included_keys) != revision_count):
+ # we got back a different amount of data than expected, this
+ # gets reported as NoSuchRevision, because less revisions
+ # indicates missing revisions, and more should never happen as
+ # the excludes list considers ghosts and ensures that ghost
+ # filling races are not a problem.
+ return (None, FailedSmartServerResponse(('NoSuchRevision',)))
+ search_result = vf_search.SearchResult(started_keys, excludes,
+ len(included_keys), included_keys)
+ return (search_result, None)
+ finally:
+ repository.unlock()
+
+
+class SmartServerRepositoryReadLocked(SmartServerRepositoryRequest):
+ """Calls self.do_readlocked_repository_request."""
+
+ def do_repository_request(self, repository, *args):
+ """Read lock a repository for do_readlocked_repository_request."""
+ repository.lock_read()
+ try:
+ return self.do_readlocked_repository_request(repository, *args)
+ finally:
+ repository.unlock()
+
+
+class SmartServerRepositoryBreakLock(SmartServerRepositoryRequest):
+ """Break a repository lock."""
+
+ def do_repository_request(self, repository):
+ repository.break_lock()
+ return SuccessfulSmartServerResponse(('ok', ))
+
+
+_lsprof_count = 0
+
+class SmartServerRepositoryGetParentMap(SmartServerRepositoryRequest):
+ """Bzr 1.2+ - get parent data for revisions during a graph search."""
+
+ no_extra_results = False
+
+ def do_repository_request(self, repository, *revision_ids):
+ """Get parent details for some revisions.
+
+ All the parents for revision_ids are returned. Additionally up to 64KB
+ of additional parent data found by performing a breadth first search
+ from revision_ids is returned. The verb takes a body containing the
+ current search state, see do_body for details.
+
+ If 'include-missing:' is in revision_ids, ghosts encountered in the
+ graph traversal for getting parent data are included in the result with
+ a prefix of 'missing:'.
+
+ :param repository: The repository to query in.
+ :param revision_ids: The utf8 encoded revision_id to answer for.
+ """
+ self._revision_ids = revision_ids
+ return None # Signal that we want a body.
+
+ def do_body(self, body_bytes):
+ """Process the current search state and perform the parent lookup.
+
+ :return: A smart server response where the body contains an utf8
+ encoded flattened list of the parents of the revisions (the same
+ format as Repository.get_revision_graph) which has been bz2
+ compressed.
+ """
+ repository = self._repository
+ repository.lock_read()
+ try:
+ return self._do_repository_request(body_bytes)
+ finally:
+ repository.unlock()
+
+ def _expand_requested_revs(self, repo_graph, revision_ids, client_seen_revs,
+ include_missing, max_size=65536):
+ result = {}
+ queried_revs = set()
+ estimator = estimate_compressed_size.ZLibEstimator(max_size)
+ next_revs = revision_ids
+ first_loop_done = False
+ while next_revs:
+ queried_revs.update(next_revs)
+ parent_map = repo_graph.get_parent_map(next_revs)
+ current_revs = next_revs
+ next_revs = set()
+ for revision_id in current_revs:
+ missing_rev = False
+ parents = parent_map.get(revision_id)
+ if parents is not None:
+ # adjust for the wire
+ if parents == (_mod_revision.NULL_REVISION,):
+ parents = ()
+ # prepare the next query
+ next_revs.update(parents)
+ encoded_id = revision_id
+ else:
+ missing_rev = True
+ encoded_id = "missing:" + revision_id
+ parents = []
+ if (revision_id not in client_seen_revs and
+ (not missing_rev or include_missing)):
+ # Client does not have this revision, give it to it.
+ # add parents to the result
+ result[encoded_id] = parents
+ # Approximate the serialized cost of this revision_id.
+ line = '%s %s\n' % (encoded_id, ' '.join(parents))
+ estimator.add_content(line)
+ # get all the directly asked for parents, and then flesh out to
+ # 64K (compressed) or so. We do one level of depth at a time to
+ # stay in sync with the client. The 250000 magic number is
+ # estimated compression ratio taken from bzr.dev itself.
+ if self.no_extra_results or (first_loop_done and estimator.full()):
+ trace.mutter('size: %d, z_size: %d'
+ % (estimator._uncompressed_size_added,
+ estimator._compressed_size_added))
+ next_revs = set()
+ break
+ # don't query things we've already queried
+ next_revs = next_revs.difference(queried_revs)
+ first_loop_done = True
+ return result
+
+ def _do_repository_request(self, body_bytes):
+ repository = self._repository
+ revision_ids = set(self._revision_ids)
+ include_missing = 'include-missing:' in revision_ids
+ if include_missing:
+ revision_ids.remove('include-missing:')
+ body_lines = body_bytes.split('\n')
+ search_result, error = self.recreate_search_from_recipe(
+ repository, body_lines)
+ if error is not None:
+ return error
+ # TODO might be nice to start up the search again; but thats not
+ # written or tested yet.
+ client_seen_revs = set(search_result.get_keys())
+ # Always include the requested ids.
+ client_seen_revs.difference_update(revision_ids)
+
+ repo_graph = repository.get_graph()
+ result = self._expand_requested_revs(repo_graph, revision_ids,
+ client_seen_revs, include_missing)
+
+ # sorting trivially puts lexographically similar revision ids together.
+ # Compression FTW.
+ lines = []
+ for revision, parents in sorted(result.items()):
+ lines.append(' '.join((revision, ) + tuple(parents)))
+
+ return SuccessfulSmartServerResponse(
+ ('ok', ), bz2.compress('\n'.join(lines)))
+
+
+class SmartServerRepositoryGetRevisionGraph(SmartServerRepositoryReadLocked):
+
+ def do_readlocked_repository_request(self, repository, revision_id):
+ """Return the result of repository.get_revision_graph(revision_id).
+
+ Deprecated as of bzr 1.4, but supported for older clients.
+
+ :param repository: The repository to query in.
+ :param revision_id: The utf8 encoded revision_id to get a graph from.
+ :return: A smart server response where the body contains an utf8
+ encoded flattened list of the revision graph.
+ """
+ if not revision_id:
+ revision_id = None
+
+ lines = []
+ graph = repository.get_graph()
+ if revision_id:
+ search_ids = [revision_id]
+ else:
+ search_ids = repository.all_revision_ids()
+ search = graph._make_breadth_first_searcher(search_ids)
+ transitive_ids = set()
+ map(transitive_ids.update, list(search))
+ parent_map = graph.get_parent_map(transitive_ids)
+ revision_graph = _strip_NULL_ghosts(parent_map)
+ if revision_id and revision_id not in revision_graph:
+ # Note that we return an empty body, rather than omitting the body.
+ # This way the client knows that it can always expect to find a body
+ # in the response for this method, even in the error case.
+ return FailedSmartServerResponse(('nosuchrevision', revision_id), '')
+
+ for revision, parents in revision_graph.items():
+ lines.append(' '.join((revision, ) + tuple(parents)))
+
+ return SuccessfulSmartServerResponse(('ok', ), '\n'.join(lines))
+
+
+class SmartServerRepositoryGetRevIdForRevno(SmartServerRepositoryReadLocked):
+
+ def do_readlocked_repository_request(self, repository, revno,
+ known_pair):
+ """Find the revid for a given revno, given a known revno/revid pair.
+
+ New in 1.17.
+ """
+ try:
+ found_flag, result = repository.get_rev_id_for_revno(revno, known_pair)
+ except errors.RevisionNotPresent, err:
+ if err.revision_id != known_pair[1]:
+ raise AssertionError(
+ 'get_rev_id_for_revno raised RevisionNotPresent for '
+ 'non-initial revision: ' + err.revision_id)
+ return FailedSmartServerResponse(
+ ('nosuchrevision', err.revision_id))
+ if found_flag:
+ return SuccessfulSmartServerResponse(('ok', result))
+ else:
+ earliest_revno, earliest_revid = result
+ return SuccessfulSmartServerResponse(
+ ('history-incomplete', earliest_revno, earliest_revid))
+
+
+class SmartServerRepositoryGetSerializerFormat(SmartServerRepositoryRequest):
+
+ def do_repository_request(self, repository):
+ """Return the serializer format for this repository.
+
+ New in 2.5.0.
+
+ :param repository: The repository to query
+ :return: A smart server response ('ok', FORMAT)
+ """
+ serializer = repository.get_serializer_format()
+ return SuccessfulSmartServerResponse(('ok', serializer))
+
+
+class SmartServerRequestHasRevision(SmartServerRepositoryRequest):
+
+ def do_repository_request(self, repository, revision_id):
+ """Return ok if a specific revision is in the repository at path.
+
+ :param repository: The repository to query in.
+ :param revision_id: The utf8 encoded revision_id to lookup.
+ :return: A smart server response of ('yes', ) if the revision is
+ present. ('no', ) if it is missing.
+ """
+ if repository.has_revision(revision_id):
+ return SuccessfulSmartServerResponse(('yes', ))
+ else:
+ return SuccessfulSmartServerResponse(('no', ))
+
+
+class SmartServerRequestHasSignatureForRevisionId(
+ SmartServerRepositoryRequest):
+
+ def do_repository_request(self, repository, revision_id):
+ """Return ok if a signature is present for a revision.
+
+ Introduced in bzr 2.5.0.
+
+ :param repository: The repository to query in.
+ :param revision_id: The utf8 encoded revision_id to lookup.
+ :return: A smart server response of ('yes', ) if a
+ signature for the revision is present,
+ ('no', ) if it is missing.
+ """
+ try:
+ if repository.has_signature_for_revision_id(revision_id):
+ return SuccessfulSmartServerResponse(('yes', ))
+ else:
+ return SuccessfulSmartServerResponse(('no', ))
+ except errors.NoSuchRevision:
+ return FailedSmartServerResponse(
+ ('nosuchrevision', revision_id))
+
+
+class SmartServerRepositoryGatherStats(SmartServerRepositoryRequest):
+
+ def do_repository_request(self, repository, revid, committers):
+ """Return the result of repository.gather_stats().
+
+ :param repository: The repository to query in.
+ :param revid: utf8 encoded rev id or an empty string to indicate None
+ :param committers: 'yes' or 'no'.
+
+ :return: A SmartServerResponse ('ok',), a encoded body looking like
+ committers: 1
+ firstrev: 1234.230 0
+ latestrev: 345.700 3600
+ revisions: 2
+
+ But containing only fields returned by the gather_stats() call
+ """
+ if revid == '':
+ decoded_revision_id = None
+ else:
+ decoded_revision_id = revid
+ if committers == 'yes':
+ decoded_committers = True
+ else:
+ decoded_committers = None
+ try:
+ stats = repository.gather_stats(decoded_revision_id,
+ decoded_committers)
+ except errors.NoSuchRevision:
+ return FailedSmartServerResponse(('nosuchrevision', revid))
+
+ body = ''
+ if stats.has_key('committers'):
+ body += 'committers: %d\n' % stats['committers']
+ if stats.has_key('firstrev'):
+ body += 'firstrev: %.3f %d\n' % stats['firstrev']
+ if stats.has_key('latestrev'):
+ body += 'latestrev: %.3f %d\n' % stats['latestrev']
+ if stats.has_key('revisions'):
+ body += 'revisions: %d\n' % stats['revisions']
+ if stats.has_key('size'):
+ body += 'size: %d\n' % stats['size']
+
+ return SuccessfulSmartServerResponse(('ok', ), body)
+
+
+class SmartServerRepositoryGetRevisionSignatureText(
+ SmartServerRepositoryRequest):
+ """Return the signature text of a revision.
+
+ New in 2.5.
+ """
+
+ def do_repository_request(self, repository, revision_id):
+ """Return the result of repository.get_signature_text().
+
+ :param repository: The repository to query in.
+ :return: A smart server response of with the signature text as
+ body.
+ """
+ try:
+ text = repository.get_signature_text(revision_id)
+ except errors.NoSuchRevision, err:
+ return FailedSmartServerResponse(
+ ('nosuchrevision', err.revision))
+ return SuccessfulSmartServerResponse(('ok', ), text)
+
+
+class SmartServerRepositoryIsShared(SmartServerRepositoryRequest):
+
+ def do_repository_request(self, repository):
+ """Return the result of repository.is_shared().
+
+ :param repository: The repository to query in.
+ :return: A smart server response of ('yes', ) if the repository is
+ shared, and ('no', ) if it is not.
+ """
+ if repository.is_shared():
+ return SuccessfulSmartServerResponse(('yes', ))
+ else:
+ return SuccessfulSmartServerResponse(('no', ))
+
+
+class SmartServerRepositoryMakeWorkingTrees(SmartServerRepositoryRequest):
+
+ def do_repository_request(self, repository):
+ """Return the result of repository.make_working_trees().
+
+ Introduced in bzr 2.5.0.
+
+ :param repository: The repository to query in.
+ :return: A smart server response of ('yes', ) if the repository uses
+ working trees, and ('no', ) if it is not.
+ """
+ if repository.make_working_trees():
+ return SuccessfulSmartServerResponse(('yes', ))
+ else:
+ return SuccessfulSmartServerResponse(('no', ))
+
+
+class SmartServerRepositoryLockWrite(SmartServerRepositoryRequest):
+
+ def do_repository_request(self, repository, token=''):
+ # XXX: this probably should not have a token.
+ if token == '':
+ token = None
+ try:
+ token = repository.lock_write(token=token).repository_token
+ except errors.LockContention, e:
+ return FailedSmartServerResponse(('LockContention',))
+ except errors.UnlockableTransport:
+ return FailedSmartServerResponse(('UnlockableTransport',))
+ except errors.LockFailed, e:
+ return FailedSmartServerResponse(('LockFailed',
+ str(e.lock), str(e.why)))
+ if token is not None:
+ repository.leave_lock_in_place()
+ repository.unlock()
+ if token is None:
+ token = ''
+ return SuccessfulSmartServerResponse(('ok', token))
+
+
+class SmartServerRepositoryGetStream(SmartServerRepositoryRequest):
+
+ def do_repository_request(self, repository, to_network_name):
+ """Get a stream for inserting into a to_format repository.
+
+ The request body is 'search_bytes', a description of the revisions
+ being requested.
+
+ In 2.3 this verb added support for search_bytes == 'everything'. Older
+ implementations will respond with a BadSearch error, and clients should
+ catch this and fallback appropriately.
+
+ :param repository: The repository to stream from.
+ :param to_network_name: The network name of the format of the target
+ repository.
+ """
+ self._to_format = network_format_registry.get(to_network_name)
+ if self._should_fake_unknown():
+ return FailedSmartServerResponse(
+ ('UnknownMethod', 'Repository.get_stream'))
+ return None # Signal that we want a body.
+
+ def _should_fake_unknown(self):
+ """Return True if we should return UnknownMethod to the client.
+
+ This is a workaround for bugs in pre-1.19 clients that claim to
+ support receiving streams of CHK repositories. The pre-1.19 client
+ expects inventory records to be serialized in the format defined by
+ to_network_name, but in pre-1.19 (at least) that format definition
+ tries to use the xml5 serializer, which does not correctly handle
+ rich-roots. After 1.19 the client can also accept inventory-deltas
+ (which avoids this issue), and those clients will use the
+ Repository.get_stream_1.19 verb instead of this one.
+ So: if this repository is CHK, and the to_format doesn't match,
+ we should just fake an UnknownSmartMethod error so that the client
+ will fallback to VFS, rather than sending it a stream we know it
+ cannot handle.
+ """
+ from_format = self._repository._format
+ to_format = self._to_format
+ if not from_format.supports_chks:
+ # Source not CHK: that's ok
+ return False
+ if (to_format.supports_chks and
+ from_format.repository_class is to_format.repository_class and
+ from_format._serializer == to_format._serializer):
+ # Source is CHK, but target matches: that's ok
+ # (e.g. 2a->2a, or CHK2->2a)
+ return False
+ # Source is CHK, and target is not CHK or incompatible CHK. We can't
+ # generate a compatible stream.
+ return True
+
+ def do_body(self, body_bytes):
+ repository = self._repository
+ repository.lock_read()
+ try:
+ search_result, error = self.recreate_search(repository, body_bytes,
+ discard_excess=True)
+ if error is not None:
+ repository.unlock()
+ return error
+ source = repository._get_source(self._to_format)
+ stream = source.get_stream(search_result)
+ except Exception:
+ exc_info = sys.exc_info()
+ try:
+ # On non-error, unlocking is done by the body stream handler.
+ repository.unlock()
+ finally:
+ raise exc_info[0], exc_info[1], exc_info[2]
+ return SuccessfulSmartServerResponse(('ok',),
+ body_stream=self.body_stream(stream, repository))
+
+ def body_stream(self, stream, repository):
+ byte_stream = _stream_to_byte_stream(stream, repository._format)
+ try:
+ for bytes in byte_stream:
+ yield bytes
+ except errors.RevisionNotPresent, e:
+ # This shouldn't be able to happen, but as we don't buffer
+ # everything it can in theory happen.
+ repository.unlock()
+ yield FailedSmartServerResponse(('NoSuchRevision', e.revision_id))
+ else:
+ repository.unlock()
+
+
+class SmartServerRepositoryGetStream_1_19(SmartServerRepositoryGetStream):
+ """The same as Repository.get_stream, but will return stream CHK formats to
+ clients.
+
+ See SmartServerRepositoryGetStream._should_fake_unknown.
+
+ New in 1.19.
+ """
+
+ def _should_fake_unknown(self):
+ """Returns False; we don't need to workaround bugs in 1.19+ clients."""
+ return False
+
+
+def _stream_to_byte_stream(stream, src_format):
+ """Convert a record stream to a self delimited byte stream."""
+ pack_writer = pack.ContainerSerialiser()
+ yield pack_writer.begin()
+ yield pack_writer.bytes_record(src_format.network_name(), '')
+ for substream_type, substream in stream:
+ for record in substream:
+ if record.storage_kind in ('chunked', 'fulltext'):
+ serialised = record_to_fulltext_bytes(record)
+ elif record.storage_kind == 'absent':
+ raise ValueError("Absent factory for %s" % (record.key,))
+ else:
+ serialised = record.get_bytes_as(record.storage_kind)
+ if serialised:
+ # Some streams embed the whole stream into the wire
+ # representation of the first record, which means that
+ # later records have no wire representation: we skip them.
+ yield pack_writer.bytes_record(serialised, [(substream_type,)])
+ yield pack_writer.end()
+
+
+class _ByteStreamDecoder(object):
+ """Helper for _byte_stream_to_stream.
+
+ The expected usage of this class is via the function _byte_stream_to_stream
+ which creates a _ByteStreamDecoder, pops off the stream format and then
+ yields the output of record_stream(), the main entry point to
+ _ByteStreamDecoder.
+
+ Broadly this class has to unwrap two layers of iterators:
+ (type, substream)
+ (substream details)
+
+ This is complicated by wishing to return type, iterator_for_type, but
+ getting the data for iterator_for_type when we find out type: we can't
+ simply pass a generator down to the NetworkRecordStream parser, instead
+ we have a little local state to seed each NetworkRecordStream instance,
+ and gather the type that we'll be yielding.
+
+ :ivar byte_stream: The byte stream being decoded.
+ :ivar stream_decoder: A pack parser used to decode the bytestream
+ :ivar current_type: The current type, used to join adjacent records of the
+ same type into a single stream.
+ :ivar first_bytes: The first bytes to give the next NetworkRecordStream.
+ """
+
+ def __init__(self, byte_stream, record_counter):
+ """Create a _ByteStreamDecoder."""
+ self.stream_decoder = pack.ContainerPushParser()
+ self.current_type = None
+ self.first_bytes = None
+ self.byte_stream = byte_stream
+ self._record_counter = record_counter
+ self.key_count = 0
+
+ def iter_stream_decoder(self):
+ """Iterate the contents of the pack from stream_decoder."""
+ # dequeue pending items
+ for record in self.stream_decoder.read_pending_records():
+ yield record
+ # Pull bytes of the wire, decode them to records, yield those records.
+ for bytes in self.byte_stream:
+ self.stream_decoder.accept_bytes(bytes)
+ for record in self.stream_decoder.read_pending_records():
+ yield record
+
+ def iter_substream_bytes(self):
+ if self.first_bytes is not None:
+ yield self.first_bytes
+ # If we run out of pack records, single the outer layer to stop.
+ self.first_bytes = None
+ for record in self.iter_pack_records:
+ record_names, record_bytes = record
+ record_name, = record_names
+ substream_type = record_name[0]
+ if substream_type != self.current_type:
+ # end of a substream, seed the next substream.
+ self.current_type = substream_type
+ self.first_bytes = record_bytes
+ return
+ yield record_bytes
+
+ def record_stream(self):
+ """Yield substream_type, substream from the byte stream."""
+ def wrap_and_count(pb, rc, substream):
+ """Yield records from stream while showing progress."""
+ counter = 0
+ if rc:
+ if self.current_type != 'revisions' and self.key_count != 0:
+ # As we know the number of revisions now (in self.key_count)
+ # we can setup and use record_counter (rc).
+ if not rc.is_initialized():
+ rc.setup(self.key_count, self.key_count)
+ for record in substream.read():
+ if rc:
+ if rc.is_initialized() and counter == rc.STEP:
+ rc.increment(counter)
+ pb.update('Estimate', rc.current, rc.max)
+ counter = 0
+ if self.current_type == 'revisions':
+ # Total records is proportional to number of revs
+ # to fetch. With remote, we used self.key_count to
+ # track the number of revs. Once we have the revs
+ # counts in self.key_count, the progress bar changes
+ # from 'Estimating..' to 'Estimate' above.
+ self.key_count += 1
+ if counter == rc.STEP:
+ pb.update('Estimating..', self.key_count)
+ counter = 0
+ counter += 1
+ yield record
+
+ self.seed_state()
+ pb = ui.ui_factory.nested_progress_bar()
+ rc = self._record_counter
+ # Make and consume sub generators, one per substream type:
+ while self.first_bytes is not None:
+ substream = NetworkRecordStream(self.iter_substream_bytes())
+ # after substream is fully consumed, self.current_type is set to
+ # the next type, and self.first_bytes is set to the matching bytes.
+ yield self.current_type, wrap_and_count(pb, rc, substream)
+ if rc:
+ pb.update('Done', rc.max, rc.max)
+ pb.finished()
+
+ def seed_state(self):
+ """Prepare the _ByteStreamDecoder to decode from the pack stream."""
+ # Set a single generator we can use to get data from the pack stream.
+ self.iter_pack_records = self.iter_stream_decoder()
+ # Seed the very first subiterator with content; after this each one
+ # seeds the next.
+ list(self.iter_substream_bytes())
+
+
+def _byte_stream_to_stream(byte_stream, record_counter=None):
+ """Convert a byte stream into a format and a stream.
+
+ :param byte_stream: A bytes iterator, as output by _stream_to_byte_stream.
+ :return: (RepositoryFormat, stream_generator)
+ """
+ decoder = _ByteStreamDecoder(byte_stream, record_counter)
+ for bytes in byte_stream:
+ decoder.stream_decoder.accept_bytes(bytes)
+ for record in decoder.stream_decoder.read_pending_records(max=1):
+ record_names, src_format_name = record
+ src_format = network_format_registry.get(src_format_name)
+ return src_format, decoder.record_stream()
+
+
+class SmartServerRepositoryUnlock(SmartServerRepositoryRequest):
+
+ def do_repository_request(self, repository, token):
+ try:
+ repository.lock_write(token=token)
+ except errors.TokenMismatch, e:
+ return FailedSmartServerResponse(('TokenMismatch',))
+ repository.dont_leave_lock_in_place()
+ repository.unlock()
+ return SuccessfulSmartServerResponse(('ok',))
+
+
+class SmartServerRepositoryGetPhysicalLockStatus(SmartServerRepositoryRequest):
+ """Get the physical lock status for a repository.
+
+ New in 2.5.
+ """
+
+ def do_repository_request(self, repository):
+ if repository.get_physical_lock_status():
+ return SuccessfulSmartServerResponse(('yes', ))
+ else:
+ return SuccessfulSmartServerResponse(('no', ))
+
+
+class SmartServerRepositorySetMakeWorkingTrees(SmartServerRepositoryRequest):
+
+ def do_repository_request(self, repository, str_bool_new_value):
+ if str_bool_new_value == 'True':
+ new_value = True
+ else:
+ new_value = False
+ repository.set_make_working_trees(new_value)
+ return SuccessfulSmartServerResponse(('ok',))
+
+
+class SmartServerRepositoryTarball(SmartServerRepositoryRequest):
+ """Get the raw repository files as a tarball.
+
+ The returned tarball contains a .bzr control directory which in turn
+ contains a repository.
+
+ This takes one parameter, compression, which currently must be
+ "", "gz", or "bz2".
+
+ This is used to implement the Repository.copy_content_into operation.
+ """
+
+ def do_repository_request(self, repository, compression):
+ tmp_dirname, tmp_repo = self._copy_to_tempdir(repository)
+ try:
+ controldir_name = tmp_dirname + '/.bzr'
+ return self._tarfile_response(controldir_name, compression)
+ finally:
+ osutils.rmtree(tmp_dirname)
+
+ def _copy_to_tempdir(self, from_repo):
+ tmp_dirname = osutils.mkdtemp(prefix='tmpbzrclone')
+ tmp_bzrdir = from_repo.bzrdir._format.initialize(tmp_dirname)
+ tmp_repo = from_repo._format.initialize(tmp_bzrdir)
+ from_repo.copy_content_into(tmp_repo)
+ return tmp_dirname, tmp_repo
+
+ def _tarfile_response(self, tmp_dirname, compression):
+ temp = tempfile.NamedTemporaryFile()
+ try:
+ self._tarball_of_dir(tmp_dirname, compression, temp.file)
+ # all finished; write the tempfile out to the network
+ temp.seek(0)
+ return SuccessfulSmartServerResponse(('ok',), temp.read())
+ # FIXME: Don't read the whole thing into memory here; rather stream
+ # it out from the file onto the network. mbp 20070411
+ finally:
+ temp.close()
+
+ def _tarball_of_dir(self, dirname, compression, ofile):
+ import tarfile
+ filename = os.path.basename(ofile.name)
+ tarball = tarfile.open(fileobj=ofile, name=filename,
+ mode='w|' + compression)
+ try:
+ # The tarball module only accepts ascii names, and (i guess)
+ # packs them with their 8bit names. We know all the files
+ # within the repository have ASCII names so the should be safe
+ # to pack in.
+ dirname = dirname.encode(sys.getfilesystemencoding())
+ # python's tarball module includes the whole path by default so
+ # override it
+ if not dirname.endswith('.bzr'):
+ raise ValueError(dirname)
+ tarball.add(dirname, '.bzr') # recursive by default
+ finally:
+ tarball.close()
+
+
+class SmartServerRepositoryInsertStreamLocked(SmartServerRepositoryRequest):
+ """Insert a record stream from a RemoteSink into a repository.
+
+ This gets bytes pushed to it by the network infrastructure and turns that
+ into a bytes iterator using a thread. That is then processed by
+ _byte_stream_to_stream.
+
+ New in 1.14.
+ """
+
+ def do_repository_request(self, repository, resume_tokens, lock_token):
+ """StreamSink.insert_stream for a remote repository."""
+ repository.lock_write(token=lock_token)
+ self.do_insert_stream_request(repository, resume_tokens)
+
+ def do_insert_stream_request(self, repository, resume_tokens):
+ tokens = [token for token in resume_tokens.split(' ') if token]
+ self.tokens = tokens
+ self.repository = repository
+ self.queue = Queue.Queue()
+ self.insert_thread = threading.Thread(target=self._inserter_thread)
+ self.insert_thread.start()
+
+ def do_chunk(self, body_stream_chunk):
+ self.queue.put(body_stream_chunk)
+
+ def _inserter_thread(self):
+ try:
+ src_format, stream = _byte_stream_to_stream(
+ self.blocking_byte_stream())
+ self.insert_result = self.repository._get_sink().insert_stream(
+ stream, src_format, self.tokens)
+ self.insert_ok = True
+ except:
+ self.insert_exception = sys.exc_info()
+ self.insert_ok = False
+
+ def blocking_byte_stream(self):
+ while True:
+ bytes = self.queue.get()
+ if bytes is StopIteration:
+ return
+ else:
+ yield bytes
+
+ def do_end(self):
+ self.queue.put(StopIteration)
+ if self.insert_thread is not None:
+ self.insert_thread.join()
+ if not self.insert_ok:
+ exc_info = self.insert_exception
+ raise exc_info[0], exc_info[1], exc_info[2]
+ write_group_tokens, missing_keys = self.insert_result
+ if write_group_tokens or missing_keys:
+ # bzip needed? missing keys should typically be a small set.
+ # Should this be a streaming body response ?
+ missing_keys = sorted(missing_keys)
+ bytes = bencode.bencode((write_group_tokens, missing_keys))
+ self.repository.unlock()
+ return SuccessfulSmartServerResponse(('missing-basis', bytes))
+ else:
+ self.repository.unlock()
+ return SuccessfulSmartServerResponse(('ok', ))
+
+
+class SmartServerRepositoryInsertStream_1_19(SmartServerRepositoryInsertStreamLocked):
+ """Insert a record stream from a RemoteSink into a repository.
+
+ Same as SmartServerRepositoryInsertStreamLocked, except:
+ - the lock token argument is optional
+ - servers that implement this verb accept 'inventory-delta' records in the
+ stream.
+
+ New in 1.19.
+ """
+
+ def do_repository_request(self, repository, resume_tokens, lock_token=None):
+ """StreamSink.insert_stream for a remote repository."""
+ SmartServerRepositoryInsertStreamLocked.do_repository_request(
+ self, repository, resume_tokens, lock_token)
+
+
+class SmartServerRepositoryInsertStream(SmartServerRepositoryInsertStreamLocked):
+ """Insert a record stream from a RemoteSink into an unlocked repository.
+
+ This is the same as SmartServerRepositoryInsertStreamLocked, except it
+ takes no lock_tokens; i.e. it works with an unlocked (or lock-free, e.g.
+ like pack format) repository.
+
+ New in 1.13.
+ """
+
+ def do_repository_request(self, repository, resume_tokens):
+ """StreamSink.insert_stream for a remote repository."""
+ repository.lock_write()
+ self.do_insert_stream_request(repository, resume_tokens)
+
+
+class SmartServerRepositoryAddSignatureText(SmartServerRepositoryRequest):
+ """Add a revision signature text.
+
+ New in 2.5.
+ """
+
+ def do_repository_request(self, repository, lock_token, revision_id,
+ *write_group_tokens):
+ """Add a revision signature text.
+
+ :param repository: Repository to operate on
+ :param lock_token: Lock token
+ :param revision_id: Revision for which to add signature
+ :param write_group_tokens: Write group tokens
+ """
+ self._lock_token = lock_token
+ self._revision_id = revision_id
+ self._write_group_tokens = write_group_tokens
+ return None
+
+ def do_body(self, body_bytes):
+ """Add a signature text.
+
+ :param body_bytes: GPG signature text
+ :return: SuccessfulSmartServerResponse with arguments 'ok' and
+ the list of new write group tokens.
+ """
+ self._repository.lock_write(token=self._lock_token)
+ try:
+ self._repository.resume_write_group(self._write_group_tokens)
+ try:
+ self._repository.add_signature_text(self._revision_id,
+ body_bytes)
+ finally:
+ new_write_group_tokens = self._repository.suspend_write_group()
+ finally:
+ self._repository.unlock()
+ return SuccessfulSmartServerResponse(
+ ('ok', ) + tuple(new_write_group_tokens))
+
+
+class SmartServerRepositoryStartWriteGroup(SmartServerRepositoryRequest):
+ """Start a write group.
+
+ New in 2.5.
+ """
+
+ def do_repository_request(self, repository, lock_token):
+ """Start a write group."""
+ repository.lock_write(token=lock_token)
+ try:
+ repository.start_write_group()
+ try:
+ tokens = repository.suspend_write_group()
+ except errors.UnsuspendableWriteGroup:
+ return FailedSmartServerResponse(('UnsuspendableWriteGroup',))
+ finally:
+ repository.unlock()
+ return SuccessfulSmartServerResponse(('ok', tokens))
+
+
+class SmartServerRepositoryCommitWriteGroup(SmartServerRepositoryRequest):
+ """Commit a write group.
+
+ New in 2.5.
+ """
+
+ def do_repository_request(self, repository, lock_token,
+ write_group_tokens):
+ """Commit a write group."""
+ repository.lock_write(token=lock_token)
+ try:
+ try:
+ repository.resume_write_group(write_group_tokens)
+ except errors.UnresumableWriteGroup, e:
+ return FailedSmartServerResponse(
+ ('UnresumableWriteGroup', e.write_groups, e.reason))
+ try:
+ repository.commit_write_group()
+ except:
+ write_group_tokens = repository.suspend_write_group()
+ # FIXME JRV 2011-11-19: What if the write_group_tokens
+ # have changed?
+ raise
+ finally:
+ repository.unlock()
+ return SuccessfulSmartServerResponse(('ok', ))
+
+
+class SmartServerRepositoryAbortWriteGroup(SmartServerRepositoryRequest):
+ """Abort a write group.
+
+ New in 2.5.
+ """
+
+ def do_repository_request(self, repository, lock_token, write_group_tokens):
+ """Abort a write group."""
+ repository.lock_write(token=lock_token)
+ try:
+ try:
+ repository.resume_write_group(write_group_tokens)
+ except errors.UnresumableWriteGroup, e:
+ return FailedSmartServerResponse(
+ ('UnresumableWriteGroup', e.write_groups, e.reason))
+ repository.abort_write_group()
+ finally:
+ repository.unlock()
+ return SuccessfulSmartServerResponse(('ok', ))
+
+
+class SmartServerRepositoryCheckWriteGroup(SmartServerRepositoryRequest):
+ """Check that a write group is still valid.
+
+ New in 2.5.
+ """
+
+ def do_repository_request(self, repository, lock_token, write_group_tokens):
+ """Abort a write group."""
+ repository.lock_write(token=lock_token)
+ try:
+ try:
+ repository.resume_write_group(write_group_tokens)
+ except errors.UnresumableWriteGroup, e:
+ return FailedSmartServerResponse(
+ ('UnresumableWriteGroup', e.write_groups, e.reason))
+ else:
+ repository.suspend_write_group()
+ finally:
+ repository.unlock()
+ return SuccessfulSmartServerResponse(('ok', ))
+
+
+class SmartServerRepositoryAllRevisionIds(SmartServerRepositoryRequest):
+ """Retrieve all of the revision ids in a repository.
+
+ New in 2.5.
+ """
+
+ def do_repository_request(self, repository):
+ revids = repository.all_revision_ids()
+ return SuccessfulSmartServerResponse(("ok", ), "\n".join(revids))
+
+
+class SmartServerRepositoryReconcile(SmartServerRepositoryRequest):
+ """Reconcile a repository.
+
+ New in 2.5.
+ """
+
+ def do_repository_request(self, repository, lock_token):
+ try:
+ repository.lock_write(token=lock_token)
+ except errors.TokenLockingNotSupported, e:
+ return FailedSmartServerResponse(
+ ('TokenLockingNotSupported', ))
+ try:
+ reconciler = repository.reconcile()
+ finally:
+ repository.unlock()
+ body = [
+ "garbage_inventories: %d\n" % reconciler.garbage_inventories,
+ "inconsistent_parents: %d\n" % reconciler.inconsistent_parents,
+ ]
+ return SuccessfulSmartServerResponse(('ok', ), "".join(body))
+
+
+class SmartServerRepositoryPack(SmartServerRepositoryRequest):
+ """Pack a repository.
+
+ New in 2.5.
+ """
+
+ def do_repository_request(self, repository, lock_token, clean_obsolete_packs):
+ self._repository = repository
+ self._lock_token = lock_token
+ if clean_obsolete_packs == 'True':
+ self._clean_obsolete_packs = True
+ else:
+ self._clean_obsolete_packs = False
+ return None
+
+ def do_body(self, body_bytes):
+ if body_bytes == "":
+ hint = None
+ else:
+ hint = body_bytes.splitlines()
+ self._repository.lock_write(token=self._lock_token)
+ try:
+ self._repository.pack(hint, self._clean_obsolete_packs)
+ finally:
+ self._repository.unlock()
+ return SuccessfulSmartServerResponse(("ok", ), )
+
+
+class SmartServerRepositoryIterFilesBytes(SmartServerRepositoryRequest):
+ """Iterate over the contents of files.
+
+ The client sends a list of desired files to stream, one
+ per line, and as tuples of file id and revision, separated by
+ \0.
+
+ The server replies with a stream. Each entry is preceded by a header,
+ which can either be:
+
+ * "ok\x00IDX\n" where IDX is the index of the entry in the desired files
+ list sent by the client. This header is followed by the contents of
+ the file, bzip2-compressed.
+ * "absent\x00FILEID\x00REVISION\x00IDX" to indicate a text is missing.
+ The client can then raise an appropriate RevisionNotPresent error
+ or check its fallback repositories.
+
+ New in 2.5.
+ """
+
+ def body_stream(self, repository, desired_files):
+ self._repository.lock_read()
+ try:
+ text_keys = {}
+ for i, key in enumerate(desired_files):
+ text_keys[key] = i
+ for record in repository.texts.get_record_stream(text_keys,
+ 'unordered', True):
+ identifier = text_keys[record.key]
+ if record.storage_kind == 'absent':
+ yield "absent\0%s\0%s\0%d\n" % (record.key[0],
+ record.key[1], identifier)
+ # FIXME: Way to abort early?
+ continue
+ yield "ok\0%d\n" % identifier
+ compressor = zlib.compressobj()
+ for bytes in record.get_bytes_as('chunked'):
+ data = compressor.compress(bytes)
+ if data:
+ yield data
+ data = compressor.flush()
+ if data:
+ yield data
+ finally:
+ self._repository.unlock()
+
+ def do_body(self, body_bytes):
+ desired_files = [
+ tuple(l.split("\0")) for l in body_bytes.splitlines()]
+ return SuccessfulSmartServerResponse(('ok', ),
+ body_stream=self.body_stream(self._repository, desired_files))
+
+ def do_repository_request(self, repository):
+ # Signal that we want a body
+ return None
+
+
+class SmartServerRepositoryIterRevisions(SmartServerRepositoryRequest):
+ """Stream a list of revisions.
+
+ The client sends a list of newline-separated revision ids in the
+ body of the request and the server replies with the serializer format,
+ and a stream of bzip2-compressed revision texts (using the specified
+ serializer format).
+
+ Any revisions the server does not have are omitted from the stream.
+
+ New in 2.5.
+ """
+
+ def do_repository_request(self, repository):
+ self._repository = repository
+ # Signal there is a body
+ return None
+
+ def do_body(self, body_bytes):
+ revision_ids = body_bytes.split("\n")
+ return SuccessfulSmartServerResponse(
+ ('ok', self._repository.get_serializer_format()),
+ body_stream=self.body_stream(self._repository, revision_ids))
+
+ def body_stream(self, repository, revision_ids):
+ self._repository.lock_read()
+ try:
+ for record in repository.revisions.get_record_stream(
+ [(revid,) for revid in revision_ids], 'unordered', True):
+ if record.storage_kind == 'absent':
+ continue
+ yield zlib.compress(record.get_bytes_as('fulltext'))
+ finally:
+ self._repository.unlock()
+
+
+class SmartServerRepositoryGetInventories(SmartServerRepositoryRequest):
+ """Get the inventory deltas for a set of revision ids.
+
+ This accepts a list of revision ids, and then sends a chain
+ of deltas for the inventories of those revisions. The first
+ revision will be empty.
+
+ The server writes back zlibbed serialized inventory deltas,
+ in the ordering specified. The base for each delta is the
+ inventory generated by the previous delta.
+
+ New in 2.5.
+ """
+
+ def _inventory_delta_stream(self, repository, ordering, revids):
+ prev_inv = _mod_inventory.Inventory(root_id=None,
+ revision_id=_mod_revision.NULL_REVISION)
+ serializer = inventory_delta.InventoryDeltaSerializer(
+ repository.supports_rich_root(),
+ repository._format.supports_tree_reference)
+ repository.lock_read()
+ try:
+ for inv, revid in repository._iter_inventories(revids, ordering):
+ if inv is None:
+ continue
+ inv_delta = inv._make_delta(prev_inv)
+ lines = serializer.delta_to_lines(
+ prev_inv.revision_id, inv.revision_id, inv_delta)
+ yield ChunkedContentFactory(inv.revision_id, None, None, lines)
+ prev_inv = inv
+ finally:
+ repository.unlock()
+
+ def body_stream(self, repository, ordering, revids):
+ substream = self._inventory_delta_stream(repository,
+ ordering, revids)
+ return _stream_to_byte_stream([('inventory-deltas', substream)],
+ repository._format)
+
+ def do_body(self, body_bytes):
+ return SuccessfulSmartServerResponse(('ok', ),
+ body_stream=self.body_stream(self._repository, self._ordering,
+ body_bytes.splitlines()))
+
+ def do_repository_request(self, repository, ordering):
+ if ordering == 'unordered':
+ # inventory deltas for a topologically sorted stream
+ # are likely to be smaller
+ ordering = 'topological'
+ self._ordering = ordering
+ # Signal that we want a body
+ return None
diff --git a/bzrlib/smart/request.py b/bzrlib/smart/request.py
new file mode 100644
index 0000000..dd33af7
--- /dev/null
+++ b/bzrlib/smart/request.py
@@ -0,0 +1,776 @@
+# Copyright (C) 2006-2011 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""Infrastructure for server-side request handlers.
+
+Interesting module attributes:
+ * The request_handlers registry maps verb names to SmartServerRequest
+ classes.
+ * The jail_info threading.local() object is used to prevent accidental
+ opening of BzrDirs outside of the backing transport, or any other
+ transports placed in jail_info.transports. The jail_info is reset on
+ every call into a request handler (which can happen an arbitrary number
+ of times during a request).
+"""
+
+from __future__ import absolute_import
+
+# XXX: The class names are a little confusing: the protocol will instantiate a
+# SmartServerRequestHandler, whose dispatch_command method creates an instance
+# of a SmartServerRequest subclass.
+
+
+import threading
+
+from bzrlib import (
+ debug,
+ errors,
+ osutils,
+ registry,
+ revision,
+ trace,
+ urlutils,
+ )
+from bzrlib.lazy_import import lazy_import
+lazy_import(globals(), """
+from bzrlib import bzrdir
+from bzrlib.bundle import serializer
+
+import tempfile
+import thread
+""")
+
+
+jail_info = threading.local()
+jail_info.transports = None
+
+
+def _install_hook():
+ bzrdir.BzrDir.hooks.install_named_hook(
+ 'pre_open', _pre_open_hook, 'checking server jail')
+
+
+def _pre_open_hook(transport):
+ allowed_transports = getattr(jail_info, 'transports', None)
+ if allowed_transports is None:
+ return
+ abspath = transport.base
+ for allowed_transport in allowed_transports:
+ try:
+ allowed_transport.relpath(abspath)
+ except errors.PathNotChild:
+ continue
+ else:
+ return
+ raise errors.JailBreak(abspath)
+
+
+_install_hook()
+
+
+class SmartServerRequest(object):
+ """Base class for request handlers.
+
+ To define a new request, subclass this class and override the `do` method
+ (and if appropriate, `do_body` as well). Request implementors should take
+ care to call `translate_client_path` and `transport_from_client_path` as
+ appropriate when dealing with paths received from the client.
+ """
+ # XXX: rename this class to BaseSmartServerRequestHandler ? A request
+ # *handler* is a different concept to the request.
+
+ def __init__(self, backing_transport, root_client_path='/', jail_root=None):
+ """Constructor.
+
+ :param backing_transport: the base transport to be used when performing
+ this request.
+ :param root_client_path: the client path that maps to the root of
+ backing_transport. This is used to interpret relpaths received
+ from the client. Clients will not be able to refer to paths above
+ this root. If root_client_path is None, then no translation will
+ be performed on client paths. Default is '/'.
+ :param jail_root: if specified, the root of the BzrDir.open jail to use
+ instead of backing_transport.
+ """
+ self._backing_transport = backing_transport
+ if jail_root is None:
+ jail_root = backing_transport
+ self._jail_root = jail_root
+ if root_client_path is not None:
+ if not root_client_path.startswith('/'):
+ root_client_path = '/' + root_client_path
+ if not root_client_path.endswith('/'):
+ root_client_path += '/'
+ self._root_client_path = root_client_path
+ self._body_chunks = []
+
+ def _check_enabled(self):
+ """Raises DisabledMethod if this method is disabled."""
+ pass
+
+ def do(self, *args):
+ """Mandatory extension point for SmartServerRequest subclasses.
+
+ Subclasses must implement this.
+
+ This should return a SmartServerResponse if this command expects to
+ receive no body.
+ """
+ raise NotImplementedError(self.do)
+
+ def execute(self, *args):
+ """Public entry point to execute this request.
+
+ It will return a SmartServerResponse if the command does not expect a
+ body.
+
+ :param args: the arguments of the request.
+ """
+ self._check_enabled()
+ return self.do(*args)
+
+ def do_body(self, body_bytes):
+ """Called if the client sends a body with the request.
+
+ The do() method is still called, and must have returned None.
+
+ Must return a SmartServerResponse.
+ """
+ if body_bytes != '':
+ raise errors.SmartProtocolError('Request does not expect a body')
+
+ def do_chunk(self, chunk_bytes):
+ """Called with each body chunk if the request has a streamed body.
+
+ The do() method is still called, and must have returned None.
+ """
+ self._body_chunks.append(chunk_bytes)
+
+ def do_end(self):
+ """Called when the end of the request has been received."""
+ body_bytes = ''.join(self._body_chunks)
+ self._body_chunks = None
+ return self.do_body(body_bytes)
+
+ def setup_jail(self):
+ jail_info.transports = [self._jail_root]
+
+ def teardown_jail(self):
+ jail_info.transports = None
+
+ def translate_client_path(self, client_path):
+ """Translate a path received from a network client into a local
+ relpath.
+
+ All paths received from the client *must* be translated.
+
+ :param client_path: the path from the client.
+ :returns: a relpath that may be used with self._backing_transport
+ (unlike the untranslated client_path, which must not be used with
+ the backing transport).
+ """
+ if self._root_client_path is None:
+ # no translation necessary!
+ return client_path
+ if not client_path.startswith('/'):
+ client_path = '/' + client_path
+ if client_path + '/' == self._root_client_path:
+ return '.'
+ if client_path.startswith(self._root_client_path):
+ path = client_path[len(self._root_client_path):]
+ relpath = urlutils.joinpath('/', path)
+ if not relpath.startswith('/'):
+ raise ValueError(relpath)
+ return urlutils.escape('.' + relpath)
+ else:
+ raise errors.PathNotChild(client_path, self._root_client_path)
+
+ def transport_from_client_path(self, client_path):
+ """Get a backing transport corresponding to the location referred to by
+ a network client.
+
+ :seealso: translate_client_path
+ :returns: a transport cloned from self._backing_transport
+ """
+ relpath = self.translate_client_path(client_path)
+ return self._backing_transport.clone(relpath)
+
+
+class SmartServerResponse(object):
+ """A response to a client request.
+
+ This base class should not be used. Instead use
+ SuccessfulSmartServerResponse and FailedSmartServerResponse as appropriate.
+ """
+
+ def __init__(self, args, body=None, body_stream=None):
+ """Constructor.
+
+ :param args: tuple of response arguments.
+ :param body: string of a response body.
+ :param body_stream: iterable of bytestrings to be streamed to the
+ client.
+ """
+ self.args = args
+ if body is not None and body_stream is not None:
+ raise errors.BzrError(
+ "'body' and 'body_stream' are mutually exclusive.")
+ self.body = body
+ self.body_stream = body_stream
+
+ def __eq__(self, other):
+ if other is None:
+ return False
+ return (other.args == self.args and
+ other.body == self.body and
+ other.body_stream is self.body_stream)
+
+ def __repr__(self):
+ return "<%s args=%r body=%r>" % (self.__class__.__name__,
+ self.args, self.body)
+
+
+class FailedSmartServerResponse(SmartServerResponse):
+ """A SmartServerResponse for a request which failed."""
+
+ def is_successful(self):
+ """FailedSmartServerResponse are not successful."""
+ return False
+
+
+class SuccessfulSmartServerResponse(SmartServerResponse):
+ """A SmartServerResponse for a successfully completed request."""
+
+ def is_successful(self):
+ """SuccessfulSmartServerResponse are successful."""
+ return True
+
+
+class SmartServerRequestHandler(object):
+ """Protocol logic for smart server.
+
+ This doesn't handle serialization at all, it just processes requests and
+ creates responses.
+ """
+
+ # IMPORTANT FOR IMPLEMENTORS: It is important that SmartServerRequestHandler
+ # not contain encoding or decoding logic to allow the wire protocol to vary
+ # from the object protocol: we will want to tweak the wire protocol separate
+ # from the object model, and ideally we will be able to do that without
+ # having a SmartServerRequestHandler subclass for each wire protocol, rather
+ # just a Protocol subclass.
+
+ # TODO: Better way of representing the body for commands that take it,
+ # and allow it to be streamed into the server.
+
+ def __init__(self, backing_transport, commands, root_client_path,
+ jail_root=None):
+ """Constructor.
+
+ :param backing_transport: a Transport to handle requests for.
+ :param commands: a registry mapping command names to SmartServerRequest
+ subclasses. e.g. bzrlib.transport.smart.vfs.vfs_commands.
+ """
+ self._backing_transport = backing_transport
+ self._root_client_path = root_client_path
+ self._commands = commands
+ if jail_root is None:
+ jail_root = backing_transport
+ self._jail_root = jail_root
+ self.response = None
+ self.finished_reading = False
+ self._command = None
+ if 'hpss' in debug.debug_flags:
+ self._request_start_time = osutils.timer_func()
+ self._thread_id = thread.get_ident()
+
+ def _trace(self, action, message, extra_bytes=None, include_time=False):
+ # It is a bit of a shame that this functionality overlaps with that of
+ # ProtocolThreeRequester._trace. However, there is enough difference
+ # that just putting it in a helper doesn't help a lot. And some state
+ # is taken from the instance.
+ if include_time:
+ t = '%5.3fs ' % (osutils.timer_func() - self._request_start_time)
+ else:
+ t = ''
+ if extra_bytes is None:
+ extra = ''
+ else:
+ extra = ' ' + repr(extra_bytes[:40])
+ if len(extra) > 33:
+ extra = extra[:29] + extra[-1] + '...'
+ trace.mutter('%12s: [%s] %s%s%s'
+ % (action, self._thread_id, t, message, extra))
+
+ def accept_body(self, bytes):
+ """Accept body data."""
+ if self._command is None:
+ # no active command object, so ignore the event.
+ return
+ self._run_handler_code(self._command.do_chunk, (bytes,), {})
+ if 'hpss' in debug.debug_flags:
+ self._trace('accept body',
+ '%d bytes' % (len(bytes),), bytes)
+
+ def end_of_body(self):
+ """No more body data will be received."""
+ self._run_handler_code(self._command.do_end, (), {})
+ # cannot read after this.
+ self.finished_reading = True
+ if 'hpss' in debug.debug_flags:
+ self._trace('end of body', '', include_time=True)
+
+ def _run_handler_code(self, callable, args, kwargs):
+ """Run some handler specific code 'callable'.
+
+ If a result is returned, it is considered to be the commands response,
+ and finished_reading is set true, and its assigned to self.response.
+
+ Any exceptions caught are translated and a response object created
+ from them.
+ """
+ result = self._call_converting_errors(callable, args, kwargs)
+
+ if result is not None:
+ self.response = result
+ self.finished_reading = True
+
+ def _call_converting_errors(self, callable, args, kwargs):
+ """Call callable converting errors to Response objects."""
+ # XXX: most of this error conversion is VFS-related, and thus ought to
+ # be in SmartServerVFSRequestHandler somewhere.
+ try:
+ self._command.setup_jail()
+ try:
+ return callable(*args, **kwargs)
+ finally:
+ self._command.teardown_jail()
+ except (KeyboardInterrupt, SystemExit):
+ raise
+ except Exception, err:
+ err_struct = _translate_error(err)
+ return FailedSmartServerResponse(err_struct)
+
+ def headers_received(self, headers):
+ # Just a no-op at the moment.
+ if 'hpss' in debug.debug_flags:
+ self._trace('headers', repr(headers))
+
+ def args_received(self, args):
+ cmd = args[0]
+ args = args[1:]
+ try:
+ command = self._commands.get(cmd)
+ except LookupError:
+ if 'hpss' in debug.debug_flags:
+ self._trace('hpss unknown request',
+ cmd, repr(args)[1:-1])
+ raise errors.UnknownSmartMethod(cmd)
+ if 'hpss' in debug.debug_flags:
+ from bzrlib.smart import vfs
+ if issubclass(command, vfs.VfsRequest):
+ action = 'hpss vfs req'
+ else:
+ action = 'hpss request'
+ self._trace(action,
+ '%s %s' % (cmd, repr(args)[1:-1]))
+ self._command = command(
+ self._backing_transport, self._root_client_path, self._jail_root)
+ self._run_handler_code(self._command.execute, args, {})
+
+ def end_received(self):
+ if self._command is None:
+ # no active command object, so ignore the event.
+ return
+ self._run_handler_code(self._command.do_end, (), {})
+ if 'hpss' in debug.debug_flags:
+ self._trace('end', '', include_time=True)
+
+ def post_body_error_received(self, error_args):
+ # Just a no-op at the moment.
+ pass
+
+
+def _translate_error(err):
+ if isinstance(err, errors.NoSuchFile):
+ return ('NoSuchFile', err.path)
+ elif isinstance(err, errors.FileExists):
+ return ('FileExists', err.path)
+ elif isinstance(err, errors.DirectoryNotEmpty):
+ return ('DirectoryNotEmpty', err.path)
+ elif isinstance(err, errors.IncompatibleRepositories):
+ return ('IncompatibleRepositories', str(err.source), str(err.target),
+ str(err.details))
+ elif isinstance(err, errors.ShortReadvError):
+ return ('ShortReadvError', err.path, str(err.offset), str(err.length),
+ str(err.actual))
+ elif isinstance(err, errors.RevisionNotPresent):
+ return ('RevisionNotPresent', err.revision_id, err.file_id)
+ elif isinstance(err, errors.UnstackableRepositoryFormat):
+ return (('UnstackableRepositoryFormat', str(err.format), err.url))
+ elif isinstance(err, errors.UnstackableBranchFormat):
+ return ('UnstackableBranchFormat', str(err.format), err.url)
+ elif isinstance(err, errors.NotStacked):
+ return ('NotStacked',)
+ elif isinstance(err, errors.BzrCheckError):
+ return ('BzrCheckError', err.msg)
+ elif isinstance(err, UnicodeError):
+ # If it is a DecodeError, than most likely we are starting
+ # with a plain string
+ str_or_unicode = err.object
+ if isinstance(str_or_unicode, unicode):
+ # XXX: UTF-8 might have \x01 (our protocol v1 and v2 seperator
+ # byte) in it, so this encoding could cause broken responses.
+ # Newer clients use protocol v3, so will be fine.
+ val = 'u:' + str_or_unicode.encode('utf-8')
+ else:
+ val = 's:' + str_or_unicode.encode('base64')
+ # This handles UnicodeEncodeError or UnicodeDecodeError
+ return (err.__class__.__name__, err.encoding, val, str(err.start),
+ str(err.end), err.reason)
+ elif isinstance(err, errors.TransportNotPossible):
+ if err.msg == "readonly transport":
+ return ('ReadOnlyError', )
+ elif isinstance(err, errors.ReadError):
+ # cannot read the file
+ return ('ReadError', err.path)
+ elif isinstance(err, errors.PermissionDenied):
+ return ('PermissionDenied', err.path, err.extra)
+ elif isinstance(err, errors.TokenMismatch):
+ return ('TokenMismatch', err.given_token, err.lock_token)
+ elif isinstance(err, errors.LockContention):
+ return ('LockContention',)
+ elif isinstance(err, MemoryError):
+ # GZ 2011-02-24: Copy bzrlib.trace -Dmem_dump functionality here?
+ return ('MemoryError',)
+ # Unserialisable error. Log it, and return a generic error
+ trace.log_exception_quietly()
+ return ('error', trace._qualified_exception_name(err.__class__, True),
+ str(err))
+
+
+class HelloRequest(SmartServerRequest):
+ """Answer a version request with the highest protocol version this server
+ supports.
+ """
+
+ def do(self):
+ return SuccessfulSmartServerResponse(('ok', '2'))
+
+
+class GetBundleRequest(SmartServerRequest):
+ """Get a bundle of from the null revision to the specified revision."""
+
+ def do(self, path, revision_id):
+ # open transport relative to our base
+ t = self.transport_from_client_path(path)
+ control, extra_path = bzrdir.BzrDir.open_containing_from_transport(t)
+ repo = control.open_repository()
+ tmpf = tempfile.TemporaryFile()
+ base_revision = revision.NULL_REVISION
+ serializer.write_bundle(repo, revision_id, base_revision, tmpf)
+ tmpf.seek(0)
+ return SuccessfulSmartServerResponse((), tmpf.read())
+
+
+class SmartServerIsReadonly(SmartServerRequest):
+ # XXX: this request method belongs somewhere else.
+
+ def do(self):
+ if self._backing_transport.is_readonly():
+ answer = 'yes'
+ else:
+ answer = 'no'
+ return SuccessfulSmartServerResponse((answer,))
+
+
+# In the 'info' attribute, we store whether this request is 'safe' to retry if
+# we get a disconnect while reading the response. It can have the values:
+# read This is purely a read request, so retrying it is perfectly ok.
+# idem An idempotent write request. Something like 'put' where if you put
+# the same bytes twice you end up with the same final bytes.
+# semi This is a request that isn't strictly idempotent, but doesn't
+# result in corruption if it is retried. This is for things like
+# 'lock' and 'unlock'. If you call lock, it updates the disk
+# structure. If you fail to read the response, you won't be able to
+# use the lock, because you don't have the lock token. Calling lock
+# again will fail, because the lock is already taken. However, we
+# can't tell if the server received our request or not. If it didn't,
+# then retrying the request is fine, as it will actually do what we
+# want. If it did, we will interrupt the current operation, but we
+# are no worse off than interrupting the current operation because of
+# a ConnectionReset.
+# semivfs Similar to semi, but specific to a Virtual FileSystem request.
+# stream This is a request that takes a stream that cannot be restarted if
+# consumed. This request is 'safe' in that if we determine the
+# connection is closed before we consume the stream, we can try
+# again.
+# mutate State is updated in a way that replaying that request results in a
+# different state. For example 'append' writes more bytes to a given
+# file. If append succeeds, it moves the file pointer.
+request_handlers = registry.Registry()
+request_handlers.register_lazy(
+ 'append', 'bzrlib.smart.vfs', 'AppendRequest', info='mutate')
+request_handlers.register_lazy(
+ 'Branch.break_lock', 'bzrlib.smart.branch',
+ 'SmartServerBranchBreakLock', info='idem')
+request_handlers.register_lazy(
+ 'Branch.get_config_file', 'bzrlib.smart.branch',
+ 'SmartServerBranchGetConfigFile', info='read')
+request_handlers.register_lazy(
+ 'Branch.get_parent', 'bzrlib.smart.branch', 'SmartServerBranchGetParent',
+ info='read')
+request_handlers.register_lazy(
+ 'Branch.put_config_file', 'bzrlib.smart.branch',
+ 'SmartServerBranchPutConfigFile', info='idem')
+request_handlers.register_lazy(
+ 'Branch.get_tags_bytes', 'bzrlib.smart.branch',
+ 'SmartServerBranchGetTagsBytes', info='read')
+request_handlers.register_lazy(
+ 'Branch.set_tags_bytes', 'bzrlib.smart.branch',
+ 'SmartServerBranchSetTagsBytes', info='idem')
+request_handlers.register_lazy(
+ 'Branch.heads_to_fetch', 'bzrlib.smart.branch',
+ 'SmartServerBranchHeadsToFetch', info='read')
+request_handlers.register_lazy(
+ 'Branch.get_stacked_on_url', 'bzrlib.smart.branch',
+ 'SmartServerBranchRequestGetStackedOnURL', info='read')
+request_handlers.register_lazy(
+ 'Branch.get_physical_lock_status', 'bzrlib.smart.branch',
+ 'SmartServerBranchRequestGetPhysicalLockStatus', info='read')
+request_handlers.register_lazy(
+ 'Branch.last_revision_info', 'bzrlib.smart.branch',
+ 'SmartServerBranchRequestLastRevisionInfo', info='read')
+request_handlers.register_lazy(
+ 'Branch.lock_write', 'bzrlib.smart.branch',
+ 'SmartServerBranchRequestLockWrite', info='semi')
+request_handlers.register_lazy(
+ 'Branch.revision_history', 'bzrlib.smart.branch',
+ 'SmartServerRequestRevisionHistory', info='read')
+request_handlers.register_lazy(
+ 'Branch.set_config_option', 'bzrlib.smart.branch',
+ 'SmartServerBranchRequestSetConfigOption', info='idem')
+request_handlers.register_lazy(
+ 'Branch.set_config_option_dict', 'bzrlib.smart.branch',
+ 'SmartServerBranchRequestSetConfigOptionDict', info='idem')
+request_handlers.register_lazy(
+ 'Branch.set_last_revision', 'bzrlib.smart.branch',
+ 'SmartServerBranchRequestSetLastRevision', info='idem')
+request_handlers.register_lazy(
+ 'Branch.set_last_revision_info', 'bzrlib.smart.branch',
+ 'SmartServerBranchRequestSetLastRevisionInfo', info='idem')
+request_handlers.register_lazy(
+ 'Branch.set_last_revision_ex', 'bzrlib.smart.branch',
+ 'SmartServerBranchRequestSetLastRevisionEx', info='idem')
+request_handlers.register_lazy(
+ 'Branch.set_parent_location', 'bzrlib.smart.branch',
+ 'SmartServerBranchRequestSetParentLocation', info='idem')
+request_handlers.register_lazy(
+ 'Branch.unlock', 'bzrlib.smart.branch',
+ 'SmartServerBranchRequestUnlock', info='semi')
+request_handlers.register_lazy(
+ 'Branch.revision_id_to_revno', 'bzrlib.smart.branch',
+ 'SmartServerBranchRequestRevisionIdToRevno', info='read')
+request_handlers.register_lazy(
+ 'BzrDir.checkout_metadir', 'bzrlib.smart.bzrdir',
+ 'SmartServerBzrDirRequestCheckoutMetaDir', info='read')
+request_handlers.register_lazy(
+ 'BzrDir.cloning_metadir', 'bzrlib.smart.bzrdir',
+ 'SmartServerBzrDirRequestCloningMetaDir', info='read')
+request_handlers.register_lazy(
+ 'BzrDir.create_branch', 'bzrlib.smart.bzrdir',
+ 'SmartServerRequestCreateBranch', info='semi')
+request_handlers.register_lazy(
+ 'BzrDir.create_repository', 'bzrlib.smart.bzrdir',
+ 'SmartServerRequestCreateRepository', info='semi')
+request_handlers.register_lazy(
+ 'BzrDir.find_repository', 'bzrlib.smart.bzrdir',
+ 'SmartServerRequestFindRepositoryV1', info='read')
+request_handlers.register_lazy(
+ 'BzrDir.find_repositoryV2', 'bzrlib.smart.bzrdir',
+ 'SmartServerRequestFindRepositoryV2', info='read')
+request_handlers.register_lazy(
+ 'BzrDir.find_repositoryV3', 'bzrlib.smart.bzrdir',
+ 'SmartServerRequestFindRepositoryV3', info='read')
+request_handlers.register_lazy(
+ 'BzrDir.get_branches', 'bzrlib.smart.bzrdir',
+ 'SmartServerBzrDirRequestGetBranches', info='read')
+request_handlers.register_lazy(
+ 'BzrDir.get_config_file', 'bzrlib.smart.bzrdir',
+ 'SmartServerBzrDirRequestConfigFile', info='read')
+request_handlers.register_lazy(
+ 'BzrDir.destroy_branch', 'bzrlib.smart.bzrdir',
+ 'SmartServerBzrDirRequestDestroyBranch', info='semi')
+request_handlers.register_lazy(
+ 'BzrDir.destroy_repository', 'bzrlib.smart.bzrdir',
+ 'SmartServerBzrDirRequestDestroyRepository', info='semi')
+request_handlers.register_lazy(
+ 'BzrDir.has_workingtree', 'bzrlib.smart.bzrdir',
+ 'SmartServerBzrDirRequestHasWorkingTree', info='read')
+request_handlers.register_lazy(
+ 'BzrDirFormat.initialize', 'bzrlib.smart.bzrdir',
+ 'SmartServerRequestInitializeBzrDir', info='semi')
+request_handlers.register_lazy(
+ 'BzrDirFormat.initialize_ex_1.16', 'bzrlib.smart.bzrdir',
+ 'SmartServerRequestBzrDirInitializeEx', info='semi')
+request_handlers.register_lazy(
+ 'BzrDir.open', 'bzrlib.smart.bzrdir', 'SmartServerRequestOpenBzrDir',
+ info='read')
+request_handlers.register_lazy(
+ 'BzrDir.open_2.1', 'bzrlib.smart.bzrdir',
+ 'SmartServerRequestOpenBzrDir_2_1', info='read')
+request_handlers.register_lazy(
+ 'BzrDir.open_branch', 'bzrlib.smart.bzrdir',
+ 'SmartServerRequestOpenBranch', info='read')
+request_handlers.register_lazy(
+ 'BzrDir.open_branchV2', 'bzrlib.smart.bzrdir',
+ 'SmartServerRequestOpenBranchV2', info='read')
+request_handlers.register_lazy(
+ 'BzrDir.open_branchV3', 'bzrlib.smart.bzrdir',
+ 'SmartServerRequestOpenBranchV3', info='read')
+request_handlers.register_lazy(
+ 'delete', 'bzrlib.smart.vfs', 'DeleteRequest', info='semivfs')
+request_handlers.register_lazy(
+ 'get', 'bzrlib.smart.vfs', 'GetRequest', info='read')
+request_handlers.register_lazy(
+ 'get_bundle', 'bzrlib.smart.request', 'GetBundleRequest', info='read')
+request_handlers.register_lazy(
+ 'has', 'bzrlib.smart.vfs', 'HasRequest', info='read')
+request_handlers.register_lazy(
+ 'hello', 'bzrlib.smart.request', 'HelloRequest', info='read')
+request_handlers.register_lazy(
+ 'iter_files_recursive', 'bzrlib.smart.vfs', 'IterFilesRecursiveRequest',
+ info='read')
+request_handlers.register_lazy(
+ 'list_dir', 'bzrlib.smart.vfs', 'ListDirRequest', info='read')
+request_handlers.register_lazy(
+ 'mkdir', 'bzrlib.smart.vfs', 'MkdirRequest', info='semivfs')
+request_handlers.register_lazy(
+ 'move', 'bzrlib.smart.vfs', 'MoveRequest', info='semivfs')
+request_handlers.register_lazy(
+ 'put', 'bzrlib.smart.vfs', 'PutRequest', info='idem')
+request_handlers.register_lazy(
+ 'put_non_atomic', 'bzrlib.smart.vfs', 'PutNonAtomicRequest', info='idem')
+request_handlers.register_lazy(
+ 'readv', 'bzrlib.smart.vfs', 'ReadvRequest', info='read')
+request_handlers.register_lazy(
+ 'rename', 'bzrlib.smart.vfs', 'RenameRequest', info='semivfs')
+request_handlers.register_lazy(
+ 'Repository.add_signature_text', 'bzrlib.smart.repository',
+ 'SmartServerRepositoryAddSignatureText', info='idem')
+request_handlers.register_lazy(
+ 'Repository.all_revision_ids', 'bzrlib.smart.repository',
+ 'SmartServerRepositoryAllRevisionIds', info='read')
+request_handlers.register_lazy(
+ 'PackRepository.autopack', 'bzrlib.smart.packrepository',
+ 'SmartServerPackRepositoryAutopack', info='idem')
+request_handlers.register_lazy(
+ 'Repository.break_lock', 'bzrlib.smart.repository',
+ 'SmartServerRepositoryBreakLock', info='idem')
+request_handlers.register_lazy(
+ 'Repository.gather_stats', 'bzrlib.smart.repository',
+ 'SmartServerRepositoryGatherStats', info='read')
+request_handlers.register_lazy(
+ 'Repository.get_parent_map', 'bzrlib.smart.repository',
+ 'SmartServerRepositoryGetParentMap', info='read')
+request_handlers.register_lazy(
+ 'Repository.get_revision_graph', 'bzrlib.smart.repository',
+ 'SmartServerRepositoryGetRevisionGraph', info='read')
+request_handlers.register_lazy(
+ 'Repository.get_revision_signature_text', 'bzrlib.smart.repository',
+ 'SmartServerRepositoryGetRevisionSignatureText', info='read')
+request_handlers.register_lazy(
+ 'Repository.has_revision', 'bzrlib.smart.repository',
+ 'SmartServerRequestHasRevision', info='read')
+request_handlers.register_lazy(
+ 'Repository.has_signature_for_revision_id', 'bzrlib.smart.repository',
+ 'SmartServerRequestHasSignatureForRevisionId', info='read')
+request_handlers.register_lazy(
+ 'Repository.insert_stream', 'bzrlib.smart.repository',
+ 'SmartServerRepositoryInsertStream', info='stream')
+request_handlers.register_lazy(
+ 'Repository.insert_stream_1.19', 'bzrlib.smart.repository',
+ 'SmartServerRepositoryInsertStream_1_19', info='stream')
+request_handlers.register_lazy(
+ 'Repository.insert_stream_locked', 'bzrlib.smart.repository',
+ 'SmartServerRepositoryInsertStreamLocked', info='stream')
+request_handlers.register_lazy(
+ 'Repository.is_shared', 'bzrlib.smart.repository',
+ 'SmartServerRepositoryIsShared', info='read')
+request_handlers.register_lazy(
+ 'Repository.iter_files_bytes', 'bzrlib.smart.repository',
+ 'SmartServerRepositoryIterFilesBytes', info='read')
+request_handlers.register_lazy(
+ 'Repository.lock_write', 'bzrlib.smart.repository',
+ 'SmartServerRepositoryLockWrite', info='semi')
+request_handlers.register_lazy(
+ 'Repository.make_working_trees', 'bzrlib.smart.repository',
+ 'SmartServerRepositoryMakeWorkingTrees', info='read')
+request_handlers.register_lazy(
+ 'Repository.set_make_working_trees', 'bzrlib.smart.repository',
+ 'SmartServerRepositorySetMakeWorkingTrees', info='idem')
+request_handlers.register_lazy(
+ 'Repository.unlock', 'bzrlib.smart.repository',
+ 'SmartServerRepositoryUnlock', info='semi')
+request_handlers.register_lazy(
+ 'Repository.get_physical_lock_status', 'bzrlib.smart.repository',
+ 'SmartServerRepositoryGetPhysicalLockStatus', info='read')
+request_handlers.register_lazy(
+ 'Repository.get_rev_id_for_revno', 'bzrlib.smart.repository',
+ 'SmartServerRepositoryGetRevIdForRevno', info='read')
+request_handlers.register_lazy(
+ 'Repository.get_stream', 'bzrlib.smart.repository',
+ 'SmartServerRepositoryGetStream', info='read')
+request_handlers.register_lazy(
+ 'Repository.get_stream_1.19', 'bzrlib.smart.repository',
+ 'SmartServerRepositoryGetStream_1_19', info='read')
+request_handlers.register_lazy(
+ 'Repository.iter_revisions', 'bzrlib.smart.repository',
+ 'SmartServerRepositoryIterRevisions', info='read')
+request_handlers.register_lazy(
+ 'Repository.pack', 'bzrlib.smart.repository',
+ 'SmartServerRepositoryPack', info='idem')
+request_handlers.register_lazy(
+ 'Repository.start_write_group', 'bzrlib.smart.repository',
+ 'SmartServerRepositoryStartWriteGroup', info='semi')
+request_handlers.register_lazy(
+ 'Repository.commit_write_group', 'bzrlib.smart.repository',
+ 'SmartServerRepositoryCommitWriteGroup', info='semi')
+request_handlers.register_lazy(
+ 'Repository.abort_write_group', 'bzrlib.smart.repository',
+ 'SmartServerRepositoryAbortWriteGroup', info='semi')
+request_handlers.register_lazy(
+ 'Repository.check_write_group', 'bzrlib.smart.repository',
+ 'SmartServerRepositoryCheckWriteGroup', info='read')
+request_handlers.register_lazy(
+ 'Repository.reconcile', 'bzrlib.smart.repository',
+ 'SmartServerRepositoryReconcile', info='idem')
+request_handlers.register_lazy(
+ 'Repository.tarball', 'bzrlib.smart.repository',
+ 'SmartServerRepositoryTarball', info='read')
+request_handlers.register_lazy(
+ 'VersionedFileRepository.get_serializer_format', 'bzrlib.smart.repository',
+ 'SmartServerRepositoryGetSerializerFormat', info='read')
+request_handlers.register_lazy(
+ 'VersionedFileRepository.get_inventories', 'bzrlib.smart.repository',
+ 'SmartServerRepositoryGetInventories', info='read')
+request_handlers.register_lazy(
+ 'rmdir', 'bzrlib.smart.vfs', 'RmdirRequest', info='semivfs')
+request_handlers.register_lazy(
+ 'stat', 'bzrlib.smart.vfs', 'StatRequest', info='read')
+request_handlers.register_lazy(
+ 'Transport.is_readonly', 'bzrlib.smart.request',
+ 'SmartServerIsReadonly', info='read')
diff --git a/bzrlib/smart/server.py b/bzrlib/smart/server.py
new file mode 100644
index 0000000..8cfac4a
--- /dev/null
+++ b/bzrlib/smart/server.py
@@ -0,0 +1,502 @@
+# Copyright (C) 2006-2011 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""Server for smart-server protocol."""
+
+from __future__ import absolute_import
+
+import errno
+import os.path
+import socket
+import sys
+import time
+import threading
+
+from bzrlib.hooks import Hooks
+from bzrlib import (
+ errors,
+ trace,
+ transport as _mod_transport,
+)
+from bzrlib.i18n import gettext
+from bzrlib.lazy_import import lazy_import
+lazy_import(globals(), """
+from bzrlib.smart import (
+ medium,
+ signals,
+ )
+from bzrlib.transport import (
+ chroot,
+ pathfilter,
+ )
+from bzrlib import (
+ config,
+ urlutils,
+ )
+""")
+
+
+class SmartTCPServer(object):
+ """Listens on a TCP socket and accepts connections from smart clients.
+
+ Each connection will be served by a SmartServerSocketStreamMedium running in
+ a thread.
+
+ hooks: An instance of SmartServerHooks.
+ """
+
+ # This is the timeout on the socket we use .accept() on. It is exposed here
+ # so the test suite can set it faster. (It thread.interrupt_main() will not
+ # fire a KeyboardInterrupt during socket.accept)
+ _ACCEPT_TIMEOUT = 1.0
+ _SHUTDOWN_POLL_TIMEOUT = 1.0
+ _LOG_WAITING_TIMEOUT = 10.0
+
+ _timer = time.time
+
+ def __init__(self, backing_transport, root_client_path='/',
+ client_timeout=None):
+ """Construct a new server.
+
+ To actually start it running, call either start_background_thread or
+ serve.
+
+ :param backing_transport: The transport to serve.
+ :param root_client_path: The client path that will correspond to root
+ of backing_transport.
+ :param client_timeout: See SmartServerSocketStreamMedium's timeout
+ parameter.
+ """
+ self.backing_transport = backing_transport
+ self.root_client_path = root_client_path
+ self._client_timeout = client_timeout
+ self._active_connections = []
+ # This is set to indicate we want to wait for clients to finish before
+ # we disconnect.
+ self._gracefully_stopping = False
+
+ def start_server(self, host, port):
+ """Create the server listening socket.
+
+ :param host: Name of the interface to listen on.
+ :param port: TCP port to listen on, or 0 to allocate a transient port.
+ """
+ # let connections timeout so that we get a chance to terminate
+ # Keep a reference to the exceptions we want to catch because the socket
+ # module's globals get set to None during interpreter shutdown.
+ from socket import timeout as socket_timeout
+ from socket import error as socket_error
+ self._socket_error = socket_error
+ self._socket_timeout = socket_timeout
+ addrs = socket.getaddrinfo(host, port, socket.AF_UNSPEC,
+ socket.SOCK_STREAM, 0, socket.AI_PASSIVE)[0]
+
+ (family, socktype, proto, canonname, sockaddr) = addrs
+
+ self._server_socket = socket.socket(family, socktype, proto)
+ # SO_REUSERADDR has a different meaning on Windows
+ if sys.platform != 'win32':
+ self._server_socket.setsockopt(socket.SOL_SOCKET,
+ socket.SO_REUSEADDR, 1)
+ try:
+ self._server_socket.bind(sockaddr)
+ except self._socket_error, message:
+ raise errors.CannotBindAddress(host, port, message)
+ self._sockname = self._server_socket.getsockname()
+ self.port = self._sockname[1]
+ self._server_socket.listen(1)
+ self._server_socket.settimeout(self._ACCEPT_TIMEOUT)
+ # Once we start accept()ing connections, we set started.
+ self._started = threading.Event()
+ # Once we stop accept()ing connections (and are closing the socket) we
+ # set _stopped
+ self._stopped = threading.Event()
+ # Once we have finished waiting for all clients, etc. We set
+ # _fully_stopped
+ self._fully_stopped = threading.Event()
+
+ def _backing_urls(self):
+ # There are three interesting urls:
+ # The URL the server can be contacted on. (e.g. bzr://host/)
+ # The URL that a commit done on the same machine as the server will
+ # have within the servers space. (e.g. file:///home/user/source)
+ # The URL that will be given to other hooks in the same process -
+ # the URL of the backing transport itself. (e.g. filtered-36195:///)
+ # We need all three because:
+ # * other machines see the first
+ # * local commits on this machine should be able to be mapped to
+ # this server
+ # * commits the server does itself need to be mapped across to this
+ # server.
+ # The latter two urls are different aliases to the servers url,
+ # so we group those in a list - as there might be more aliases
+ # in the future.
+ urls = [self.backing_transport.base]
+ try:
+ urls.append(self.backing_transport.external_url())
+ except errors.InProcessTransport:
+ pass
+ return urls
+
+ def run_server_started_hooks(self, backing_urls=None):
+ if backing_urls is None:
+ backing_urls = self._backing_urls()
+ for hook in SmartTCPServer.hooks['server_started']:
+ hook(backing_urls, self.get_url())
+ for hook in SmartTCPServer.hooks['server_started_ex']:
+ hook(backing_urls, self)
+
+ def run_server_stopped_hooks(self, backing_urls=None):
+ if backing_urls is None:
+ backing_urls = self._backing_urls()
+ for hook in SmartTCPServer.hooks['server_stopped']:
+ hook(backing_urls, self.get_url())
+
+ def _stop_gracefully(self):
+ trace.note(gettext('Requested to stop gracefully'))
+ self._should_terminate = True
+ self._gracefully_stopping = True
+ for handler, _ in self._active_connections:
+ handler._stop_gracefully()
+
+ def _wait_for_clients_to_disconnect(self):
+ self._poll_active_connections()
+ if not self._active_connections:
+ return
+ trace.note(gettext('Waiting for %d client(s) to finish')
+ % (len(self._active_connections),))
+ t_next_log = self._timer() + self._LOG_WAITING_TIMEOUT
+ while self._active_connections:
+ now = self._timer()
+ if now >= t_next_log:
+ trace.note(gettext('Still waiting for %d client(s) to finish')
+ % (len(self._active_connections),))
+ t_next_log = now + self._LOG_WAITING_TIMEOUT
+ self._poll_active_connections(self._SHUTDOWN_POLL_TIMEOUT)
+
+ def serve(self, thread_name_suffix=''):
+ # Note: There is a temptation to do
+ # signals.register_on_hangup(id(self), self._stop_gracefully)
+ # However, that creates a temporary object which is a bound
+ # method. signals._on_sighup is a WeakKeyDictionary so it
+ # immediately gets garbage collected, because nothing else
+ # references it. Instead, we need to keep a real reference to the
+ # bound method for the lifetime of the serve() function.
+ stop_gracefully = self._stop_gracefully
+ signals.register_on_hangup(id(self), stop_gracefully)
+ self._should_terminate = False
+ # for hooks we are letting code know that a server has started (and
+ # later stopped).
+ self.run_server_started_hooks()
+ self._started.set()
+ try:
+ try:
+ while not self._should_terminate:
+ try:
+ conn, client_addr = self._server_socket.accept()
+ except self._socket_timeout:
+ # just check if we're asked to stop
+ pass
+ except self._socket_error, e:
+ # if the socket is closed by stop_background_thread
+ # we might get a EBADF here, or if we get a signal we
+ # can get EINTR, any other socket errors should get
+ # logged.
+ if e.args[0] not in (errno.EBADF, errno.EINTR):
+ trace.warning(gettext("listening socket error: %s")
+ % (e,))
+ else:
+ if self._should_terminate:
+ conn.close()
+ break
+ self.serve_conn(conn, thread_name_suffix)
+ # Cleanout any threads that have finished processing.
+ self._poll_active_connections()
+ except KeyboardInterrupt:
+ # dont log when CTRL-C'd.
+ raise
+ except Exception, e:
+ trace.report_exception(sys.exc_info(), sys.stderr)
+ raise
+ finally:
+ try:
+ # ensure the server socket is closed.
+ self._server_socket.close()
+ except self._socket_error:
+ # ignore errors on close
+ pass
+ self._stopped.set()
+ signals.unregister_on_hangup(id(self))
+ self.run_server_stopped_hooks()
+ if self._gracefully_stopping:
+ self._wait_for_clients_to_disconnect()
+ self._fully_stopped.set()
+
+ def get_url(self):
+ """Return the url of the server"""
+ return "bzr://%s:%s/" % (self._sockname[0], self._sockname[1])
+
+ def _make_handler(self, conn):
+ return medium.SmartServerSocketStreamMedium(
+ conn, self.backing_transport, self.root_client_path,
+ timeout=self._client_timeout)
+
+ def _poll_active_connections(self, timeout=0.0):
+ """Check to see if any active connections have finished.
+
+ This will iterate through self._active_connections, and update any
+ connections that are finished.
+
+ :param timeout: The timeout to pass to thread.join(). By default, we
+ set it to 0, so that we don't hang if threads are not done yet.
+ :return: None
+ """
+ still_active = []
+ for handler, thread in self._active_connections:
+ thread.join(timeout)
+ if thread.isAlive():
+ still_active.append((handler, thread))
+ self._active_connections = still_active
+
+ def serve_conn(self, conn, thread_name_suffix):
+ # For WIN32, where the timeout value from the listening socket
+ # propagates to the newly accepted socket.
+ conn.setblocking(True)
+ conn.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
+ thread_name = 'smart-server-child' + thread_name_suffix
+ handler = self._make_handler(conn)
+ connection_thread = threading.Thread(
+ None, handler.serve, name=thread_name)
+ self._active_connections.append((handler, connection_thread))
+ connection_thread.setDaemon(True)
+ connection_thread.start()
+ return connection_thread
+
+ def start_background_thread(self, thread_name_suffix=''):
+ self._started.clear()
+ self._server_thread = threading.Thread(None,
+ self.serve, args=(thread_name_suffix,),
+ name='server-' + self.get_url())
+ self._server_thread.setDaemon(True)
+ self._server_thread.start()
+ self._started.wait()
+
+ def stop_background_thread(self):
+ self._stopped.clear()
+ # tell the main loop to quit on the next iteration.
+ self._should_terminate = True
+ # close the socket - gives error to connections from here on in,
+ # rather than a connection reset error to connections made during
+ # the period between setting _should_terminate = True and
+ # the current request completing/aborting. It may also break out the
+ # main loop if it was currently in accept() (on some platforms).
+ try:
+ self._server_socket.close()
+ except self._socket_error:
+ # ignore errors on close
+ pass
+ if not self._stopped.isSet():
+ # server has not stopped (though it may be stopping)
+ # its likely in accept(), so give it a connection
+ temp_socket = socket.socket()
+ temp_socket.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
+ if not temp_socket.connect_ex(self._sockname):
+ # and close it immediately: we dont choose to send any requests.
+ temp_socket.close()
+ self._stopped.wait()
+ self._server_thread.join()
+
+
+class SmartServerHooks(Hooks):
+ """Hooks for the smart server."""
+
+ def __init__(self):
+ """Create the default hooks.
+
+ These are all empty initially, because by default nothing should get
+ notified.
+ """
+ Hooks.__init__(self, "bzrlib.smart.server", "SmartTCPServer.hooks")
+ self.add_hook('server_started',
+ "Called by the bzr server when it starts serving a directory. "
+ "server_started is called with (backing urls, public url), "
+ "where backing_url is a list of URLs giving the "
+ "server-specific directory locations, and public_url is the "
+ "public URL for the directory being served.", (0, 16))
+ self.add_hook('server_started_ex',
+ "Called by the bzr server when it starts serving a directory. "
+ "server_started is called with (backing_urls, server_obj).",
+ (1, 17))
+ self.add_hook('server_stopped',
+ "Called by the bzr server when it stops serving a directory. "
+ "server_stopped is called with the same parameters as the "
+ "server_started hook: (backing_urls, public_url).", (0, 16))
+ self.add_hook('server_exception',
+ "Called by the bzr server when an exception occurs. "
+ "server_exception is called with the sys.exc_info() tuple "
+ "return true for the hook if the exception has been handled, "
+ "in which case the server will exit normally.", (2, 4))
+
+SmartTCPServer.hooks = SmartServerHooks()
+
+
+def _local_path_for_transport(transport):
+ """Return a local path for transport, if reasonably possible.
+
+ This function works even if transport's url has a "readonly+" prefix,
+ unlike local_path_from_url.
+
+ This essentially recovers the --directory argument the user passed to "bzr
+ serve" from the transport passed to serve_bzr.
+ """
+ try:
+ base_url = transport.external_url()
+ except (errors.InProcessTransport, NotImplementedError):
+ return None
+ else:
+ # Strip readonly prefix
+ if base_url.startswith('readonly+'):
+ base_url = base_url[len('readonly+'):]
+ try:
+ return urlutils.local_path_from_url(base_url)
+ except errors.InvalidURL:
+ return None
+
+
+class BzrServerFactory(object):
+ """Helper class for serve_bzr."""
+
+ def __init__(self, userdir_expander=None, get_base_path=None):
+ self.cleanups = []
+ self.base_path = None
+ self.backing_transport = None
+ if userdir_expander is None:
+ userdir_expander = os.path.expanduser
+ self.userdir_expander = userdir_expander
+ if get_base_path is None:
+ get_base_path = _local_path_for_transport
+ self.get_base_path = get_base_path
+
+ def _expand_userdirs(self, path):
+ """Translate /~/ or /~user/ to e.g. /home/foo, using
+ self.userdir_expander (os.path.expanduser by default).
+
+ If the translated path would fall outside base_path, or the path does
+ not start with ~, then no translation is applied.
+
+ If the path is inside, it is adjusted to be relative to the base path.
+
+ e.g. if base_path is /home, and the expanded path is /home/joe, then
+ the translated path is joe.
+ """
+ result = path
+ if path.startswith('~'):
+ expanded = self.userdir_expander(path)
+ if not expanded.endswith('/'):
+ expanded += '/'
+ if expanded.startswith(self.base_path):
+ result = expanded[len(self.base_path):]
+ return result
+
+ def _make_expand_userdirs_filter(self, transport):
+ return pathfilter.PathFilteringServer(transport, self._expand_userdirs)
+
+ def _make_backing_transport(self, transport):
+ """Chroot transport, and decorate with userdir expander."""
+ self.base_path = self.get_base_path(transport)
+ chroot_server = chroot.ChrootServer(transport)
+ chroot_server.start_server()
+ self.cleanups.append(chroot_server.stop_server)
+ transport = _mod_transport.get_transport_from_url(chroot_server.get_url())
+ if self.base_path is not None:
+ # Decorate the server's backing transport with a filter that can
+ # expand homedirs.
+ expand_userdirs = self._make_expand_userdirs_filter(transport)
+ expand_userdirs.start_server()
+ self.cleanups.append(expand_userdirs.stop_server)
+ transport = _mod_transport.get_transport_from_url(expand_userdirs.get_url())
+ self.transport = transport
+
+ def _get_stdin_stdout(self):
+ return sys.stdin, sys.stdout
+
+ def _make_smart_server(self, host, port, inet, timeout):
+ if timeout is None:
+ c = config.GlobalStack()
+ timeout = c.get('serve.client_timeout')
+ if inet:
+ stdin, stdout = self._get_stdin_stdout()
+ smart_server = medium.SmartServerPipeStreamMedium(
+ stdin, stdout, self.transport, timeout=timeout)
+ else:
+ if host is None:
+ host = medium.BZR_DEFAULT_INTERFACE
+ if port is None:
+ port = medium.BZR_DEFAULT_PORT
+ smart_server = SmartTCPServer(self.transport,
+ client_timeout=timeout)
+ smart_server.start_server(host, port)
+ trace.note(gettext('listening on port: %s') % smart_server.port)
+ self.smart_server = smart_server
+
+ def _change_globals(self):
+ from bzrlib import lockdir, ui
+ # For the duration of this server, no UI output is permitted. note
+ # that this may cause problems with blackbox tests. This should be
+ # changed with care though, as we dont want to use bandwidth sending
+ # progress over stderr to smart server clients!
+ old_factory = ui.ui_factory
+ old_lockdir_timeout = lockdir._DEFAULT_TIMEOUT_SECONDS
+ def restore_default_ui_factory_and_lockdir_timeout():
+ ui.ui_factory = old_factory
+ lockdir._DEFAULT_TIMEOUT_SECONDS = old_lockdir_timeout
+ self.cleanups.append(restore_default_ui_factory_and_lockdir_timeout)
+ ui.ui_factory = ui.SilentUIFactory()
+ lockdir._DEFAULT_TIMEOUT_SECONDS = 0
+ orig = signals.install_sighup_handler()
+ def restore_signals():
+ signals.restore_sighup_handler(orig)
+ self.cleanups.append(restore_signals)
+
+ def set_up(self, transport, host, port, inet, timeout):
+ self._make_backing_transport(transport)
+ self._make_smart_server(host, port, inet, timeout)
+ self._change_globals()
+
+ def tear_down(self):
+ for cleanup in reversed(self.cleanups):
+ cleanup()
+
+
+def serve_bzr(transport, host=None, port=None, inet=False, timeout=None):
+ """This is the default implementation of 'bzr serve'.
+
+ It creates a TCP or pipe smart server on 'transport, and runs it. The
+ transport will be decorated with a chroot and pathfilter (using
+ os.path.expanduser).
+ """
+ bzr_server = BzrServerFactory()
+ try:
+ bzr_server.set_up(transport, host, port, inet, timeout)
+ bzr_server.smart_server.serve()
+ except:
+ hook_caught_exception = False
+ for hook in SmartTCPServer.hooks['server_exception']:
+ hook_caught_exception = hook(sys.exc_info())
+ if not hook_caught_exception:
+ raise
+ finally:
+ bzr_server.tear_down()
diff --git a/bzrlib/smart/signals.py b/bzrlib/smart/signals.py
new file mode 100644
index 0000000..a3967b6
--- /dev/null
+++ b/bzrlib/smart/signals.py
@@ -0,0 +1,116 @@
+# Copyright (C) 2011 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""Signal handling for the smart server code."""
+
+from __future__ import absolute_import
+
+import signal
+import weakref
+
+from bzrlib import trace
+
+
+# I'm pretty sure this has to be global, since signal handling is per-process.
+_on_sighup = None
+# TODO: Using a dict means that the order of calls is unordered. We could use a
+# list and then do something like LIFO ordering. A dict was chosen so
+# that you could have a key to easily remove your entry. However, you
+# could just use the callable itself as the indexed part, and even in
+# large cases, we shouldn't have more than 100 or so callbacks
+# registered.
+def _sighup_handler(signal_number, interrupted_frame):
+ """This is the actual function that is registered for handling SIGHUP.
+
+ It will call out to all the registered functions, letting them know that a
+ graceful termination has been requested.
+ """
+ if _on_sighup is None:
+ return
+ trace.mutter('Caught SIGHUP, sending graceful shutdown requests.')
+ for ref in _on_sighup.valuerefs():
+ try:
+ cb = ref()
+ if cb is not None:
+ cb()
+ except KeyboardInterrupt:
+ raise
+ except Exception:
+ trace.mutter('Error occurred while running SIGHUP handlers:')
+ trace.log_exception_quietly()
+
+
+def install_sighup_handler():
+ """Setup a handler for the SIGHUP signal."""
+ if getattr(signal, "SIGHUP", None) is None:
+ # If we can't install SIGHUP, there is no reason (yet) to do graceful
+ # shutdown.
+ old_signal = None
+ else:
+ old_signal = signal.signal(signal.SIGHUP, _sighup_handler)
+ old_dict = _setup_on_hangup_dict()
+ return old_signal, old_dict
+
+
+def _setup_on_hangup_dict():
+ """Create something for _on_sighup.
+
+ This is done when we install the sighup handler, and for tests that want to
+ test the functionality. If this hasn'nt been called, then
+ register_on_hangup is a no-op. As is unregister_on_hangup.
+ """
+ global _on_sighup
+ old = _on_sighup
+ _on_sighup = weakref.WeakValueDictionary()
+ return old
+
+
+def restore_sighup_handler(orig):
+ """Pass in the returned value from install_sighup_handler to reset."""
+ global _on_sighup
+ old_signal, old_dict = orig
+ if old_signal is not None:
+ signal.signal(signal.SIGHUP, old_signal)
+ _on_sighup = old_dict
+
+
+# TODO: Should these be single-use callables? Meaning that once we've triggered
+# SIGHUP and called them, they should auto-remove themselves? I don't
+# think so. Callers need to clean up during shutdown anyway, so that we
+# don't end up with lots of garbage in the _on_sighup dict. On the other
+# hand, we made _on_sighup a WeakValueDictionary in case cleanups didn't
+# get fired properly. Maybe we just assume we don't have to do it?
+def register_on_hangup(identifier, a_callable):
+ """Register for us to call a_callable as part of a graceful shutdown."""
+ if _on_sighup is None:
+ return
+ _on_sighup[identifier] = a_callable
+
+
+def unregister_on_hangup(identifier):
+ """Remove a callback from being called during sighup."""
+ if _on_sighup is None:
+ return
+ try:
+ del _on_sighup[identifier]
+ except KeyboardInterrupt:
+ raise
+ except Exception:
+ # This usually runs as a tear-down step. So we don't want to propagate
+ # most exceptions.
+ trace.mutter('Error occurred during unregister_on_hangup:')
+ trace.log_exception_quietly()
+
diff --git a/bzrlib/smart/vfs.py b/bzrlib/smart/vfs.py
new file mode 100644
index 0000000..fe0d687
--- /dev/null
+++ b/bzrlib/smart/vfs.py
@@ -0,0 +1,231 @@
+# Copyright (C) 2006-2010 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""VFS operations for the smart server.
+
+This module defines the smart server methods that are low-level file operations
+-- i.e. methods that operate directly on files and directories, rather than
+higher-level concepts like branches and revisions.
+
+These methods, plus 'hello' and 'get_bundle', are version 1 of the smart server
+protocol, as implemented in bzr 0.11 and later.
+"""
+
+from __future__ import absolute_import
+
+import os
+
+from bzrlib import errors
+from bzrlib import urlutils
+from bzrlib.smart import request
+
+
+def _deserialise_optional_mode(mode):
+ # XXX: FIXME this should be on the protocol object. Later protocol versions
+ # might serialise modes differently.
+ if mode == '':
+ return None
+ else:
+ return int(mode)
+
+
+def vfs_enabled():
+ """Is the VFS enabled ?
+
+ the VFS is disabled when the BZR_NO_SMART_VFS environment variable is set.
+
+ :return: True if it is enabled.
+ """
+ return not 'BZR_NO_SMART_VFS' in os.environ
+
+
+class VfsRequest(request.SmartServerRequest):
+ """Base class for VFS requests.
+
+ VFS requests are disabled if vfs_enabled() returns False.
+ """
+
+ def _check_enabled(self):
+ if not vfs_enabled():
+ raise errors.DisabledMethod(self.__class__.__name__)
+
+ def translate_client_path(self, relpath):
+ # VFS requests are made with escaped paths so the escaping done in
+ # SmartServerRequest.translate_client_path leads to double escaping.
+ # Remove it here -- the fact that the result is still escaped means
+ # that the str() will not fail on valid input.
+ x = request.SmartServerRequest.translate_client_path(self, relpath)
+ return str(urlutils.unescape(x))
+
+
+class HasRequest(VfsRequest):
+
+ def do(self, relpath):
+ relpath = self.translate_client_path(relpath)
+ r = self._backing_transport.has(relpath) and 'yes' or 'no'
+ return request.SuccessfulSmartServerResponse((r,))
+
+
+class GetRequest(VfsRequest):
+
+ def do(self, relpath):
+ relpath = self.translate_client_path(relpath)
+ backing_bytes = self._backing_transport.get_bytes(relpath)
+ return request.SuccessfulSmartServerResponse(('ok',), backing_bytes)
+
+
+class AppendRequest(VfsRequest):
+
+ def do(self, relpath, mode):
+ relpath = self.translate_client_path(relpath)
+ self._relpath = relpath
+ self._mode = _deserialise_optional_mode(mode)
+
+ def do_body(self, body_bytes):
+ old_length = self._backing_transport.append_bytes(
+ self._relpath, body_bytes, self._mode)
+ return request.SuccessfulSmartServerResponse(('appended', '%d' % old_length))
+
+
+class DeleteRequest(VfsRequest):
+
+ def do(self, relpath):
+ relpath = self.translate_client_path(relpath)
+ self._backing_transport.delete(relpath)
+ return request.SuccessfulSmartServerResponse(('ok', ))
+
+
+class IterFilesRecursiveRequest(VfsRequest):
+
+ def do(self, relpath):
+ if not relpath.endswith('/'):
+ relpath += '/'
+ relpath = self.translate_client_path(relpath)
+ transport = self._backing_transport.clone(relpath)
+ filenames = transport.iter_files_recursive()
+ return request.SuccessfulSmartServerResponse(('names',) + tuple(filenames))
+
+
+class ListDirRequest(VfsRequest):
+
+ def do(self, relpath):
+ if not relpath.endswith('/'):
+ relpath += '/'
+ relpath = self.translate_client_path(relpath)
+ filenames = self._backing_transport.list_dir(relpath)
+ return request.SuccessfulSmartServerResponse(('names',) + tuple(filenames))
+
+
+class MkdirRequest(VfsRequest):
+
+ def do(self, relpath, mode):
+ relpath = self.translate_client_path(relpath)
+ self._backing_transport.mkdir(relpath,
+ _deserialise_optional_mode(mode))
+ return request.SuccessfulSmartServerResponse(('ok',))
+
+
+class MoveRequest(VfsRequest):
+
+ def do(self, rel_from, rel_to):
+ rel_from = self.translate_client_path(rel_from)
+ rel_to = self.translate_client_path(rel_to)
+ self._backing_transport.move(rel_from, rel_to)
+ return request.SuccessfulSmartServerResponse(('ok',))
+
+
+class PutRequest(VfsRequest):
+
+ def do(self, relpath, mode):
+ relpath = self.translate_client_path(relpath)
+ self._relpath = relpath
+ self._mode = _deserialise_optional_mode(mode)
+
+ def do_body(self, body_bytes):
+ self._backing_transport.put_bytes(self._relpath, body_bytes, self._mode)
+ return request.SuccessfulSmartServerResponse(('ok',))
+
+
+class PutNonAtomicRequest(VfsRequest):
+
+ def do(self, relpath, mode, create_parent, dir_mode):
+ relpath = self.translate_client_path(relpath)
+ self._relpath = relpath
+ self._dir_mode = _deserialise_optional_mode(dir_mode)
+ self._mode = _deserialise_optional_mode(mode)
+ # a boolean would be nicer XXX
+ self._create_parent = (create_parent == 'T')
+
+ def do_body(self, body_bytes):
+ self._backing_transport.put_bytes_non_atomic(self._relpath,
+ body_bytes,
+ mode=self._mode,
+ create_parent_dir=self._create_parent,
+ dir_mode=self._dir_mode)
+ return request.SuccessfulSmartServerResponse(('ok',))
+
+
+class ReadvRequest(VfsRequest):
+
+ def do(self, relpath):
+ relpath = self.translate_client_path(relpath)
+ self._relpath = relpath
+
+ def do_body(self, body_bytes):
+ """accept offsets for a readv request."""
+ offsets = self._deserialise_offsets(body_bytes)
+ backing_bytes = ''.join(bytes for offset, bytes in
+ self._backing_transport.readv(self._relpath, offsets))
+ return request.SuccessfulSmartServerResponse(('readv',), backing_bytes)
+
+ def _deserialise_offsets(self, text):
+ # XXX: FIXME this should be on the protocol object.
+ offsets = []
+ for line in text.split('\n'):
+ if not line:
+ continue
+ start, length = line.split(',')
+ offsets.append((int(start), int(length)))
+ return offsets
+
+
+class RenameRequest(VfsRequest):
+
+ def do(self, rel_from, rel_to):
+ rel_from = self.translate_client_path(rel_from)
+ rel_to = self.translate_client_path(rel_to)
+ self._backing_transport.rename(rel_from, rel_to)
+ return request.SuccessfulSmartServerResponse(('ok', ))
+
+
+class RmdirRequest(VfsRequest):
+
+ def do(self, relpath):
+ relpath = self.translate_client_path(relpath)
+ self._backing_transport.rmdir(relpath)
+ return request.SuccessfulSmartServerResponse(('ok', ))
+
+
+class StatRequest(VfsRequest):
+
+ def do(self, relpath):
+ if not relpath.endswith('/'):
+ relpath += '/'
+ relpath = self.translate_client_path(relpath)
+ stat = self._backing_transport.stat(relpath)
+ return request.SuccessfulSmartServerResponse(
+ ('stat', str(stat.st_size), oct(stat.st_mode)))
+
diff --git a/bzrlib/smtp_connection.py b/bzrlib/smtp_connection.py
new file mode 100644
index 0000000..d36a01d
--- /dev/null
+++ b/bzrlib/smtp_connection.py
@@ -0,0 +1,190 @@
+# Copyright (C) 2007 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""A convenience class around smtplib."""
+
+from __future__ import absolute_import
+
+from email import Utils
+import errno
+import smtplib
+import socket
+
+from bzrlib import (
+ config,
+ osutils,
+ )
+from bzrlib.errors import (
+ NoDestinationAddress,
+ SMTPError,
+ DefaultSMTPConnectionRefused,
+ SMTPConnectionRefused,
+ )
+
+
+smtp_password = config.Option('smtp_password', default=None,
+ help='''\
+Password to use for authentication to SMTP server.
+''')
+smtp_server = config.Option('smtp_server', default=None,
+ help='''\
+Hostname of the SMTP server to use for sending email.
+''')
+smtp_username = config.Option('smtp_username', default=None,
+ help='''\
+Username to use for authentication to SMTP server.
+''')
+
+
+class SMTPConnection(object):
+ """Connect to an SMTP server and send an email.
+
+ This is a gateway between bzrlib.config.Config and smtplib.SMTP. It
+ understands the basic bzr SMTP configuration information: smtp_server,
+ smtp_username, and smtp_password.
+ """
+
+ _default_smtp_server = 'localhost'
+
+ def __init__(self, config, _smtp_factory=None):
+ self._smtp_factory = _smtp_factory
+ if self._smtp_factory is None:
+ self._smtp_factory = smtplib.SMTP
+ self._config = config
+ self._config_smtp_server = config.get('smtp_server')
+ self._smtp_server = self._config_smtp_server
+ if self._smtp_server is None:
+ self._smtp_server = self._default_smtp_server
+
+ self._smtp_username = config.get('smtp_username')
+ self._smtp_password = config.get('smtp_password')
+
+ self._connection = None
+
+ def _connect(self):
+ """If we haven't connected, connect and authenticate."""
+ if self._connection is not None:
+ return
+
+ self._create_connection()
+ # FIXME: _authenticate() should only be called when the server has
+ # refused unauthenticated access, so it can safely try to authenticate
+ # with the default username. JRV20090407
+ self._authenticate()
+
+ def _create_connection(self):
+ """Create an SMTP connection."""
+ self._connection = self._smtp_factory()
+ try:
+ self._connection.connect(self._smtp_server)
+ except socket.error, e:
+ if e.args[0] == errno.ECONNREFUSED:
+ if self._config_smtp_server is None:
+ raise DefaultSMTPConnectionRefused(socket.error,
+ self._smtp_server)
+ else:
+ raise SMTPConnectionRefused(socket.error,
+ self._smtp_server)
+ else:
+ raise
+
+ # Say EHLO (falling back to HELO) to query the server's features.
+ code, resp = self._connection.ehlo()
+ if not (200 <= code <= 299):
+ code, resp = self._connection.helo()
+ if not (200 <= code <= 299):
+ raise SMTPError("server refused HELO: %d %s" % (code, resp))
+
+ # Use TLS if the server advertised it:
+ if self._connection.has_extn("starttls"):
+ code, resp = self._connection.starttls()
+ if not (200 <= code <= 299):
+ raise SMTPError("server refused STARTTLS: %d %s" % (code, resp))
+ # Say EHLO again, to check for newly revealed features
+ code, resp = self._connection.ehlo()
+ if not (200 <= code <= 299):
+ raise SMTPError("server refused EHLO: %d %s" % (code, resp))
+
+ def _authenticate(self):
+ """If necessary authenticate yourself to the server."""
+ auth = config.AuthenticationConfig()
+ if self._smtp_username is None:
+ # FIXME: Since _authenticate gets called even when no authentication
+ # is necessary, it's not possible to use the default username
+ # here yet.
+ self._smtp_username = auth.get_user('smtp', self._smtp_server)
+ if self._smtp_username is None:
+ return
+
+ if self._smtp_password is None:
+ self._smtp_password = auth.get_password(
+ 'smtp', self._smtp_server, self._smtp_username)
+
+ # smtplib requires that the username and password be byte
+ # strings. The CRAM-MD5 spec doesn't give any guidance on
+ # encodings, but the SASL PLAIN spec says UTF-8, so that's
+ # what we'll use.
+ username = osutils.safe_utf8(self._smtp_username)
+ password = osutils.safe_utf8(self._smtp_password)
+
+ self._connection.login(username, password)
+
+ @staticmethod
+ def get_message_addresses(message):
+ """Get the origin and destination addresses of a message.
+
+ :param message: A message object supporting get() to access its
+ headers, like email.Message or bzrlib.email_message.EmailMessage.
+ :return: A pair (from_email, to_emails), where from_email is the email
+ address in the From header, and to_emails a list of all the
+ addresses in the To, Cc, and Bcc headers.
+ """
+ from_email = Utils.parseaddr(message.get('From', None))[1]
+ to_full_addresses = []
+ for header in ['To', 'Cc', 'Bcc']:
+ value = message.get(header, None)
+ if value:
+ to_full_addresses.append(value)
+ to_emails = [ pair[1] for pair in
+ Utils.getaddresses(to_full_addresses) ]
+
+ return from_email, to_emails
+
+ def send_email(self, message):
+ """Send an email message.
+
+ The message will be sent to all addresses in the To, Cc and Bcc
+ headers.
+
+ :param message: An email.Message or email.MIMEMultipart object.
+ :return: None
+ """
+ from_email, to_emails = self.get_message_addresses(message)
+
+ if not to_emails:
+ raise NoDestinationAddress
+
+ try:
+ self._connect()
+ self._connection.sendmail(from_email, to_emails,
+ message.as_string())
+ except smtplib.SMTPRecipientsRefused, e:
+ raise SMTPError('server refused recipient: %d %s' %
+ e.recipients.values()[0])
+ except smtplib.SMTPResponseException, e:
+ raise SMTPError('%d %s' % (e.smtp_code, e.smtp_error))
+ except smtplib.SMTPException, e:
+ raise SMTPError(str(e))
diff --git a/bzrlib/static_tuple.py b/bzrlib/static_tuple.py
new file mode 100644
index 0000000..257eb32
--- /dev/null
+++ b/bzrlib/static_tuple.py
@@ -0,0 +1,58 @@
+# Copyright (C) 2009, 2010 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""Interface thunk for a StaticTuple implementation."""
+
+from __future__ import absolute_import
+
+from bzrlib import debug
+
+try:
+ from bzrlib._static_tuple_c import StaticTuple
+except ImportError, e:
+ from bzrlib import osutils
+ osutils.failed_to_load_extension(e)
+ from bzrlib._static_tuple_py import StaticTuple
+
+
+def expect_static_tuple(obj):
+ """Check if the passed object is a StaticTuple.
+
+ Cast it if necessary, but if the 'static_tuple' debug flag is set, raise an
+ error instead.
+
+ As apis are improved, we will probably eventually stop calling this as it
+ adds overhead we shouldn't need.
+ """
+ if 'static_tuple' not in debug.debug_flags:
+ return StaticTuple.from_sequence(obj)
+ if type(obj) is not StaticTuple:
+ raise TypeError('We expected a StaticTuple not a %s' % (type(obj),))
+ return obj
+
+
+def as_tuples(obj):
+ """Ensure that the object and any referenced objects are plain tuples.
+
+ :param obj: a list, tuple or StaticTuple
+ :return: a plain tuple instance, with all children also being tuples.
+ """
+ result = []
+ for item in obj:
+ if isinstance(item, (tuple, list, StaticTuple)):
+ item = as_tuples(item)
+ result.append(item)
+ return tuple(result)
diff --git a/bzrlib/status.py b/bzrlib/status.py
new file mode 100644
index 0000000..e032b40
--- /dev/null
+++ b/bzrlib/status.py
@@ -0,0 +1,483 @@
+# Copyright (C) 2005-2010 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+from __future__ import absolute_import
+
+import sys
+
+from bzrlib import (
+ delta as _mod_delta,
+ hooks as _mod_hooks,
+ log,
+ osutils,
+ tsort,
+ revision as _mod_revision,
+ )
+import bzrlib.errors as errors
+from bzrlib.trace import mutter, warning
+
+# TODO: when showing single-line logs, truncate to the width of the terminal
+# if known, but only if really going to the terminal (not into a file)
+
+
+def report_changes(to_file, old, new, specific_files,
+ show_short_reporter, show_long_callback,
+ short=False, want_unchanged=False,
+ want_unversioned=False, show_ids=False, classify=True):
+ """Display summary of changes.
+
+ This compares two trees with regards to a list of files, and delegates
+ the display to underlying elements.
+
+ For short output, it creates an iterator on all changes, and lets a given
+ reporter display these changes.
+
+ For stantard output, it creates a delta of the changes, and forwards it
+ to a callback
+
+ :param to_file: If set, write to this file (default stdout.)
+ :param old: Start tree for the comparison
+ :param end: End tree for the comparison
+ :param specific_files: If set, a list of filenames whose status should be
+ shown. It is an error to give a filename that is not in the working
+ tree, or in the working inventory or in the basis inventory.
+ :param show_short_reporter: Reporter in charge of display for short output
+ :param show_long_callback: Callback in charge of display for normal output
+ :param short: If True, gives short SVN-style status lines.
+ :param want_unchanged: Deprecated parameter. If set, includes unchanged
+ files.
+ :param show_ids: If set, includes each file's id.
+ :param want_unversioned: If False, only shows versioned files.
+ :param classify: Add special symbols to indicate file kind.
+ """
+
+ if short:
+ changes = new.iter_changes(old, want_unchanged, specific_files,
+ require_versioned=False, want_unversioned=want_unversioned)
+ _mod_delta.report_changes(changes, show_short_reporter)
+
+ else:
+ delta = new.changes_from(old, want_unchanged=want_unchanged,
+ specific_files=specific_files,
+ want_unversioned=want_unversioned)
+ # filter out unknown files. We may want a tree method for
+ # this
+ delta.unversioned = [unversioned for unversioned in
+ delta.unversioned if not new.is_ignored(unversioned[0])]
+ show_long_callback(to_file, delta,
+ show_ids=show_ids,
+ show_unchanged=want_unchanged,
+ classify=classify)
+
+
+def show_tree_status(wt, show_unchanged=None,
+ specific_files=None,
+ show_ids=False,
+ to_file=None,
+ show_pending=True,
+ revision=None,
+ short=False,
+ verbose=False,
+ versioned=False,
+ classify=True,
+ show_long_callback=_mod_delta.report_delta):
+ """Display summary of changes.
+
+ By default this compares the working tree to a previous revision.
+ If the revision argument is given, summarizes changes between the
+ working tree and another, or between two revisions.
+
+ The result is written out as Unicode and to_file should be able
+ to encode that.
+
+ If showing the status of a working tree, extra information is included
+ about unknown files, conflicts, and pending merges.
+
+ :param show_unchanged: Deprecated parameter. If set, includes unchanged
+ files.
+ :param specific_files: If set, a list of filenames whose status should be
+ shown. It is an error to give a filename that is not in the working
+ tree, or in the working inventory or in the basis inventory.
+ :param show_ids: If set, includes each file's id.
+ :param to_file: If set, write to this file (default stdout.)
+ :param show_pending: If set, write pending merges.
+ :param revision: If None, compare latest revision with working tree
+ If not None, it must be a RevisionSpec list.
+ If one revision, compare with working tree.
+ If two revisions, show status between first and second.
+ :param short: If True, gives short SVN-style status lines.
+ :param verbose: If True, show all merged revisions, not just
+ the merge tips
+ :param versioned: If True, only shows versioned files.
+ :param classify: Add special symbols to indicate file kind.
+ :param show_long_callback: A callback: message = show_long_callback(to_file, delta,
+ show_ids, show_unchanged, indent, filter), only used with the long output
+ """
+ if show_unchanged is not None:
+ warn("show_tree_status with show_unchanged has been deprecated "
+ "since bzrlib 0.9", DeprecationWarning, stacklevel=2)
+
+ if to_file is None:
+ to_file = sys.stdout
+
+ wt.lock_read()
+ try:
+ new_is_working_tree = True
+ if revision is None:
+ if wt.last_revision() != wt.branch.last_revision():
+ warning("working tree is out of date, run 'bzr update'")
+ new = wt
+ old = new.basis_tree()
+ elif len(revision) > 0:
+ try:
+ old = revision[0].as_tree(wt.branch)
+ except errors.NoSuchRevision, e:
+ raise errors.BzrCommandError(str(e))
+ if (len(revision) > 1) and (revision[1].spec is not None):
+ try:
+ new = revision[1].as_tree(wt.branch)
+ new_is_working_tree = False
+ except errors.NoSuchRevision, e:
+ raise errors.BzrCommandError(str(e))
+ else:
+ new = wt
+ old.lock_read()
+ new.lock_read()
+ try:
+ for hook in hooks['pre_status']:
+ hook(StatusHookParams(old, new, to_file, versioned,
+ show_ids, short, verbose, specific_files=specific_files))
+
+ specific_files, nonexistents \
+ = _filter_nonexistent(specific_files, old, new)
+ want_unversioned = not versioned
+
+ # Reporter used for short outputs
+ reporter = _mod_delta._ChangeReporter(output_file=to_file,
+ unversioned_filter=new.is_ignored, classify=classify)
+ report_changes(to_file, old, new, specific_files,
+ reporter, show_long_callback,
+ short=short, want_unchanged=show_unchanged,
+ want_unversioned=want_unversioned, show_ids=show_ids,
+ classify=classify)
+
+ # show the ignored files among specific files (i.e. show the files
+ # identified from input that we choose to ignore).
+ if specific_files is not None:
+ # Ignored files is sorted because specific_files is already sorted
+ ignored_files = [specific for specific in
+ specific_files if new.is_ignored(specific)]
+ if len(ignored_files) > 0 and not short:
+ to_file.write("ignored:\n")
+ prefix = ' '
+ else:
+ prefix = 'I '
+ for ignored_file in ignored_files:
+ to_file.write("%s %s\n" % (prefix, ignored_file))
+
+ # show the new conflicts only for now. XXX: get them from the
+ # delta.
+ conflicts = new.conflicts()
+ if specific_files is not None:
+ conflicts = conflicts.select_conflicts(new, specific_files,
+ ignore_misses=True, recurse=True)[1]
+ if len(conflicts) > 0 and not short:
+ to_file.write("conflicts:\n")
+ for conflict in conflicts:
+ if short:
+ prefix = 'C '
+ else:
+ prefix = ' '
+ to_file.write("%s %s\n" % (prefix, unicode(conflict)))
+ # Show files that were requested but don't exist (and are
+ # not versioned). We don't involve delta in this; these
+ # paths are really the province of just the status
+ # command, since they have more to do with how it was
+ # invoked than with the tree it's operating on.
+ if nonexistents and not short:
+ to_file.write("nonexistent:\n")
+ for nonexistent in nonexistents:
+ # We could calculate prefix outside the loop but, given
+ # how rarely this ought to happen, it's OK and arguably
+ # slightly faster to do it here (ala conflicts above)
+ if short:
+ prefix = 'X '
+ else:
+ prefix = ' '
+ to_file.write("%s %s\n" % (prefix, nonexistent))
+ if (new_is_working_tree and show_pending):
+ show_pending_merges(new, to_file, short, verbose=verbose)
+ if nonexistents:
+ raise errors.PathsDoNotExist(nonexistents)
+ for hook in hooks['post_status']:
+ hook(StatusHookParams(old, new, to_file, versioned,
+ show_ids, short, verbose, specific_files=specific_files))
+ finally:
+ old.unlock()
+ new.unlock()
+ finally:
+ wt.unlock()
+
+
+def _get_sorted_revisions(tip_revision, revision_ids, parent_map):
+ """Get an iterator which will return the revisions in merge sorted order.
+
+ This will build up a list of all nodes, such that only nodes in the list
+ are referenced. It then uses MergeSorter to return them in 'merge-sorted'
+ order.
+
+ :param revision_ids: A set of revision_ids
+ :param parent_map: The parent information for each node. Revisions which
+ are considered ghosts should not be present in the map.
+ :return: iterator from MergeSorter.iter_topo_order()
+ """
+ # MergeSorter requires that all nodes be present in the graph, so get rid
+ # of any references pointing outside of this graph.
+ parent_graph = {}
+ for revision_id in revision_ids:
+ if revision_id not in parent_map: # ghost
+ parent_graph[revision_id] = []
+ else:
+ # Only include parents which are in this sub-graph
+ parent_graph[revision_id] = [p for p in parent_map[revision_id]
+ if p in revision_ids]
+ sorter = tsort.MergeSorter(parent_graph, tip_revision)
+ return sorter.iter_topo_order()
+
+
+def show_pending_merges(new, to_file, short=False, verbose=False):
+ """Write out a display of pending merges in a working tree."""
+ parents = new.get_parent_ids()
+ if len(parents) < 2:
+ return
+
+ term_width = osutils.terminal_width()
+ if term_width is not None:
+ # we need one extra space for terminals that wrap on last char
+ term_width = term_width - 1
+ if short:
+ first_prefix = 'P '
+ sub_prefix = 'P. '
+ else:
+ first_prefix = ' '
+ sub_prefix = ' '
+
+ def show_log_message(rev, prefix):
+ if term_width is None:
+ width = term_width
+ else:
+ width = term_width - len(prefix)
+ log_message = log_formatter.log_string(None, rev, width, prefix=prefix)
+ to_file.write(log_message + '\n')
+
+ pending = parents[1:]
+ branch = new.branch
+ last_revision = parents[0]
+ if not short:
+ if verbose:
+ to_file.write('pending merges:\n')
+ else:
+ to_file.write('pending merge tips:'
+ ' (use -v to see all merge revisions)\n')
+ graph = branch.repository.get_graph()
+ other_revisions = [last_revision]
+ log_formatter = log.LineLogFormatter(to_file)
+ for merge in pending:
+ try:
+ rev = branch.repository.get_revisions([merge])[0]
+ except errors.NoSuchRevision:
+ # If we are missing a revision, just print out the revision id
+ to_file.write(first_prefix + '(ghost) ' + merge + '\n')
+ other_revisions.append(merge)
+ continue
+
+ # Log the merge, as it gets a slightly different formatting
+ show_log_message(rev, first_prefix)
+ if not verbose:
+ continue
+
+ # Find all of the revisions in the merge source, which are not in the
+ # last committed revision.
+ merge_extra = graph.find_unique_ancestors(merge, other_revisions)
+ other_revisions.append(merge)
+ merge_extra.discard(_mod_revision.NULL_REVISION)
+
+ # Get a handle to all of the revisions we will need
+ try:
+ revisions = dict((rev.revision_id, rev) for rev in
+ branch.repository.get_revisions(merge_extra))
+ except errors.NoSuchRevision:
+ # One of the sub nodes is a ghost, check each one
+ revisions = {}
+ for revision_id in merge_extra:
+ try:
+ rev = branch.repository.get_revisions([revision_id])[0]
+ except errors.NoSuchRevision:
+ revisions[revision_id] = None
+ else:
+ revisions[revision_id] = rev
+
+ # Display the revisions brought in by this merge.
+ rev_id_iterator = _get_sorted_revisions(merge, merge_extra,
+ branch.repository.get_parent_map(merge_extra))
+ # Skip the first node
+ num, first, depth, eom = rev_id_iterator.next()
+ if first != merge:
+ raise AssertionError('Somehow we misunderstood how'
+ ' iter_topo_order works %s != %s' % (first, merge))
+ for num, sub_merge, depth, eom in rev_id_iterator:
+ rev = revisions[sub_merge]
+ if rev is None:
+ to_file.write(sub_prefix + '(ghost) ' + sub_merge + '\n')
+ continue
+ show_log_message(revisions[sub_merge], sub_prefix)
+
+
+def _filter_nonexistent(orig_paths, old_tree, new_tree):
+ """Convert orig_paths to two sorted lists and return them.
+
+ The first is orig_paths paths minus the items in the second list,
+ and the second list is paths that are not in either inventory or
+ tree (they don't qualify if they exist in the tree's inventory, or
+ if they exist in the tree but are not versioned.)
+
+ If either of the two lists is empty, return it as an empty list.
+
+ This can be used by operations such as bzr status that can accept
+ unknown or ignored files.
+ """
+ mutter("check paths: %r", orig_paths)
+ if not orig_paths:
+ return orig_paths, []
+ s = old_tree.filter_unversioned_files(orig_paths)
+ s = new_tree.filter_unversioned_files(s)
+ nonexistent = [path for path in s if not new_tree.has_filename(path)]
+ remaining = [path for path in orig_paths if not path in nonexistent]
+ # Sorting the 'remaining' list doesn't have much effect in
+ # practice, since the various status output sections will sort
+ # their groups individually. But for consistency of this
+ # function's API, it's better to sort both than just 'nonexistent'.
+ return sorted(remaining), sorted(nonexistent)
+
+
+class StatusHooks(_mod_hooks.Hooks):
+ """A dictionary mapping hook name to a list of callables for status hooks.
+
+ e.g. ['post_status'] Is the list of items to be called when the
+ status command has finished printing the status.
+ """
+
+ def __init__(self):
+ """Create the default hooks.
+
+ These are all empty initially, because by default nothing should get
+ notified.
+ """
+ _mod_hooks.Hooks.__init__(self, "bzrlib.status", "hooks")
+ self.add_hook('post_status',
+ "Called with argument StatusHookParams after Bazaar has "
+ "displayed the status. StatusHookParams has the attributes "
+ "(old_tree, new_tree, to_file, versioned, show_ids, short, "
+ "verbose). The last four arguments correspond to the command "
+ "line options specified by the user for the status command. "
+ "to_file is the output stream for writing.",
+ (2, 3))
+ self.add_hook('pre_status',
+ "Called with argument StatusHookParams before Bazaar "
+ "displays the status. StatusHookParams has the attributes "
+ "(old_tree, new_tree, to_file, versioned, show_ids, short, "
+ "verbose). The last four arguments correspond to the command "
+ "line options specified by the user for the status command. "
+ "to_file is the output stream for writing.",
+ (2, 3))
+
+
+class StatusHookParams(object):
+ """Object holding parameters passed to post_status hooks.
+
+ :ivar old_tree: Start tree (basis tree) for comparison.
+ :ivar new_tree: Working tree.
+ :ivar to_file: If set, write to this file.
+ :ivar versioned: Show only versioned files.
+ :ivar show_ids: Show internal object ids.
+ :ivar short: Use short status indicators.
+ :ivar verbose: Verbose flag.
+ """
+
+ def __init__(self, old_tree, new_tree, to_file, versioned, show_ids,
+ short, verbose, specific_files=None):
+ """Create a group of post_status hook parameters.
+
+ :param old_tree: Start tree (basis tree) for comparison.
+ :param new_tree: Working tree.
+ :param to_file: If set, write to this file.
+ :param versioned: Show only versioned files.
+ :param show_ids: Show internal object ids.
+ :param short: Use short status indicators.
+ :param verbose: Verbose flag.
+ :param specific_files: If set, a list of filenames whose status should be
+ shown. It is an error to give a filename that is not in the working
+ tree, or in the working inventory or in the basis inventory.
+ """
+ self.old_tree = old_tree
+ self.new_tree = new_tree
+ self.to_file = to_file
+ self.versioned = versioned
+ self.show_ids = show_ids
+ self.short = short
+ self.verbose = verbose
+ self.specific_files = specific_files
+
+ def __eq__(self, other):
+ return self.__dict__ == other.__dict__
+
+ def __repr__(self):
+ return "<%s(%s, %s, %s, %s, %s, %s, %s, %s)>" % (self.__class__.__name__,
+ self.old_tree, self.new_tree, self.to_file, self.versioned,
+ self.show_ids, self.short, self.verbose, self.specific_files)
+
+
+def _show_shelve_summary(params):
+ """post_status hook to display a summary of shelves.
+
+ :param params: StatusHookParams.
+ """
+ # Don't show shelves if status of specific files is being shown, only if
+ # no file arguments have been passed
+ if params.specific_files:
+ return
+ get_shelf_manager = getattr(params.new_tree, 'get_shelf_manager', None)
+ if get_shelf_manager is None:
+ return
+ manager = get_shelf_manager()
+ shelves = manager.active_shelves()
+ if shelves:
+ singular = '%d shelf exists. '
+ plural = '%d shelves exist. '
+ if len(shelves) == 1:
+ fmt = singular
+ else:
+ fmt = plural
+ params.to_file.write(fmt % len(shelves))
+ params.to_file.write('See "bzr shelve --list" for details.\n')
+
+
+hooks = StatusHooks()
+
+
+hooks.install_named_hook('post_status', _show_shelve_summary,
+ 'bzr status')
+
diff --git a/bzrlib/store/__init__.py b/bzrlib/store/__init__.py
new file mode 100644
index 0000000..c2a6bef
--- /dev/null
+++ b/bzrlib/store/__init__.py
@@ -0,0 +1,323 @@
+# Copyright (C) 2005, 2006 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+# TODO: Could remember a bias towards whether a particular store is typically
+# compressed or not.
+
+"""
+Stores are the main data-storage mechanism for Bazaar.
+
+A store is a simple write-once container indexed by a universally
+unique ID.
+"""
+
+from __future__ import absolute_import
+
+import os
+
+from bzrlib import (
+ errors,
+ versionedfile,
+ )
+from bzrlib.errors import BzrError, UnlistableStore
+from bzrlib.trace import mutter
+
+######################################################################
+# stores
+
+class StoreError(Exception):
+ pass
+
+
+class Store(object):
+ """This class represents the abstract storage layout for saving information.
+
+ Files can be added, but not modified once they are in. Typically
+ the hash is used as the name, or something else known to be unique,
+ such as a UUID.
+ """
+
+ def __len__(self):
+ raise NotImplementedError('Children should define their length')
+
+ def get(self, fileid, suffix=None):
+ """Returns a file reading from a particular entry.
+
+ If suffix is present, retrieve the named suffix for fileid.
+ """
+ raise NotImplementedError
+
+ def __getitem__(self, fileid):
+ """DEPRECATED. Please use .get(fileid) instead."""
+ raise NotImplementedError
+
+ def __iter__(self):
+ raise NotImplementedError
+
+ def add(self, f, fileid):
+ """Add a file object f to the store accessible from the given fileid"""
+ raise NotImplementedError('Children of Store must define their method of adding entries.')
+
+ def has_id(self, fileid, suffix=None):
+ """Return True or false for the presence of fileid in the store.
+
+ suffix, if present, is a per file suffix, i.e. for digital signature
+ data."""
+ raise NotImplementedError
+
+ def listable(self):
+ """Return True if this store is able to be listed."""
+ return (getattr(self, "__iter__", None) is not None)
+
+ def copy_all_ids(self, store_from, pb=None):
+ """Copy all the file ids from store_from into self."""
+ if not store_from.listable():
+ raise UnlistableStore(store_from)
+ ids = []
+ for count, file_id in enumerate(store_from):
+ if pb:
+ pb.update('listing files', count, count)
+ ids.append(file_id)
+ if pb:
+ pb.clear()
+ mutter('copy_all ids: %r', ids)
+ self.copy_multi(store_from, ids, pb=pb)
+
+ def copy_multi(self, other, ids, pb=None, permit_failure=False):
+ """Copy texts for ids from other into self.
+
+ If an id is present in self, it is skipped. A count of copied
+ ids is returned, which may be less than len(ids).
+
+ :param other: Another Store object
+ :param ids: A list of entry ids to be copied
+ :param pb: A ProgressTask object, if none is given, the default will be created.
+ :param permit_failure: Allow missing entries to be ignored
+ :return: (n_copied, [failed]) The number of entries copied successfully,
+ followed by a list of entries which could not be copied (because they
+ were missing)
+ """
+ if pb:
+ pb.update('preparing to copy')
+ failed = set()
+ count = 0
+ for fileid in ids:
+ count += 1
+ if self.has_id(fileid):
+ continue
+ try:
+ self._copy_one(fileid, None, other, pb)
+ for suffix in self._suffixes:
+ try:
+ self._copy_one(fileid, suffix, other, pb)
+ except KeyError:
+ pass
+ if pb:
+ pb.update('copy', count, len(ids))
+ except KeyError:
+ if permit_failure:
+ failed.add(fileid)
+ else:
+ raise
+ if pb:
+ pb.clear()
+ return count, failed
+
+ def _copy_one(self, fileid, suffix, other, pb):
+ """Most generic copy-one object routine.
+
+ Subclasses can override this to provide an optimised
+ copy between their own instances. Such overriden routines
+ should call this if they have no optimised facility for a
+ specific 'other'.
+ """
+ mutter('Store._copy_one: %r', fileid)
+ f = other.get(fileid, suffix)
+ self.add(f, fileid, suffix)
+
+
+class TransportStore(Store):
+ """A TransportStore is a Store superclass for Stores that use Transports."""
+
+ def add(self, f, fileid, suffix=None):
+ """Add contents of a file into the store.
+
+ f -- A file-like object
+ """
+ mutter("add store entry %r", fileid)
+ names = self._id_to_names(fileid, suffix)
+ if self._transport.has_any(names):
+ raise BzrError("store %r already contains id %r"
+ % (self._transport.base, fileid))
+
+ # Most of the time, just adding the file will work
+ # if we find a time where it fails, (because the dir
+ # doesn't exist), then create the dir, and try again
+ self._add(names[0], f)
+
+ def _add(self, relpath, f):
+ """Actually add the file to the given location.
+ This should be overridden by children.
+ """
+ raise NotImplementedError('children need to implement this function.')
+
+ def _check_fileid(self, fileid):
+ if type(fileid) != str:
+ raise TypeError('Fileids should be bytestrings: %s %r' % (
+ type(fileid), fileid))
+ if '\\' in fileid or '/' in fileid:
+ raise ValueError("invalid store id %r" % fileid)
+
+ def _id_to_names(self, fileid, suffix):
+ """Return the names in the expected order"""
+ if suffix is not None:
+ fn = self._relpath(fileid, [suffix])
+ else:
+ fn = self._relpath(fileid)
+
+ # FIXME RBC 20051128 this belongs in TextStore.
+ fn_gz = fn + '.gz'
+ if self._compressed:
+ return fn_gz, fn
+ else:
+ return fn, fn_gz
+
+ def has_id(self, fileid, suffix=None):
+ """See Store.has_id."""
+ return self._transport.has_any(self._id_to_names(fileid, suffix))
+
+ def _get_name(self, fileid, suffix=None):
+ """A special check, which returns the name of an existing file.
+
+ This is similar in spirit to 'has_id', but it is designed
+ to return information about which file the store has.
+ """
+ for name in self._id_to_names(fileid, suffix=suffix):
+ if self._transport.has(name):
+ return name
+ return None
+
+ def _get(self, filename):
+ """Return an vanilla file stream for clients to read from.
+
+ This is the body of a template method on 'get', and should be
+ implemented by subclasses.
+ """
+ raise NotImplementedError
+
+ def get(self, fileid, suffix=None):
+ """See Store.get()."""
+ names = self._id_to_names(fileid, suffix)
+ for name in names:
+ try:
+ return self._get(name)
+ except errors.NoSuchFile:
+ pass
+ raise KeyError(fileid)
+
+ def __init__(self, a_transport, prefixed=False, compressed=False,
+ dir_mode=None, file_mode=None,
+ escaped=False):
+ super(TransportStore, self).__init__()
+ self._transport = a_transport
+ self._prefixed = prefixed
+ # FIXME RBC 20051128 this belongs in TextStore.
+ self._compressed = compressed
+ self._suffixes = set()
+ self._escaped = escaped
+
+ # It is okay for these to be None, it just means they
+ # will just use the filesystem defaults
+ self._dir_mode = dir_mode
+ self._file_mode = file_mode
+ # Create a key mapper to use
+ if escaped and prefixed:
+ self._mapper = versionedfile.HashEscapedPrefixMapper()
+ elif not escaped and prefixed:
+ self._mapper = versionedfile.HashPrefixMapper()
+ elif self._escaped:
+ raise ValueError(
+ "%r: escaped unprefixed stores are not permitted."
+ % (self,))
+ else:
+ self._mapper = versionedfile.PrefixMapper()
+
+ def _iter_files_recursive(self):
+ """Iterate through the files in the transport."""
+ for quoted_relpath in self._transport.iter_files_recursive():
+ yield quoted_relpath
+
+ def __iter__(self):
+ for relpath in self._iter_files_recursive():
+ # worst case is one of each suffix.
+ name = os.path.basename(relpath)
+ if name.endswith('.gz'):
+ name = name[:-3]
+ skip = False
+ for count in range(len(self._suffixes)):
+ for suffix in self._suffixes:
+ if name.endswith('.' + suffix):
+ skip = True
+ if not skip:
+ yield self._mapper.unmap(name)[0]
+
+ def __len__(self):
+ return len(list(self.__iter__()))
+
+ def _relpath(self, fileid, suffixes=None):
+ self._check_fileid(fileid)
+ if suffixes:
+ for suffix in suffixes:
+ if not suffix in self._suffixes:
+ raise ValueError("Unregistered suffix %r" % suffix)
+ self._check_fileid(suffix)
+ else:
+ suffixes = []
+ path = self._mapper.map((fileid,))
+ full_path = '.'.join([path] + suffixes)
+ return full_path
+
+ def __repr__(self):
+ if self._transport is None:
+ return "%s(None)" % (self.__class__.__name__)
+ else:
+ return "%s(%r)" % (self.__class__.__name__, self._transport.base)
+
+ __str__ = __repr__
+
+ def listable(self):
+ """Return True if this store is able to be listed."""
+ return self._transport.listable()
+
+ def register_suffix(self, suffix):
+ """Register a suffix as being expected in this store."""
+ self._check_fileid(suffix)
+ if suffix == 'gz':
+ raise ValueError('You cannot register the "gz" suffix.')
+ self._suffixes.add(suffix)
+
+ def total_size(self):
+ """Return (count, bytes)
+
+ This is the (compressed) size stored on disk, not the size of
+ the content."""
+ total = 0
+ count = 0
+ for relpath in self._transport.iter_files_recursive():
+ count += 1
+ total += self._transport.stat(relpath).st_size
+
+ return count, total
diff --git a/bzrlib/store/text.py b/bzrlib/store/text.py
new file mode 100644
index 0000000..d0ffcf1
--- /dev/null
+++ b/bzrlib/store/text.py
@@ -0,0 +1,127 @@
+# Copyright (C) 2005, 2006, 2008, 2009, 2010 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""A store that keeps the full text of every version.
+
+This store keeps uncompressed versions of the full text. It does not
+do any sort of delta compression.
+"""
+
+from __future__ import absolute_import
+
+import gzip
+import os
+
+from bzrlib import osutils
+from bzrlib.errors import BzrError, NoSuchFile, FileExists
+import bzrlib.store
+from bzrlib.trace import mutter
+
+
+
+class TextStore(bzrlib.store.TransportStore):
+ """Store that holds files indexed by unique names.
+
+ Files can be added, but not modified once they are in. Typically
+ the hash is used as the name, or something else known to be unique,
+ such as a UUID.
+
+ Files are stored uncompressed, with no delta compression.
+ """
+
+ def _add_compressed(self, fn, f):
+ from cStringIO import StringIO
+ from bzrlib.osutils import pumpfile
+
+ if isinstance(f, basestring):
+ f = StringIO(f)
+
+ sio = StringIO()
+ gf = gzip.GzipFile(mode='wb', fileobj=sio)
+ # if pumpfile handles files that don't fit in ram,
+ # so will this function
+ pumpfile(f, gf)
+ gf.close()
+ sio.seek(0)
+ self._try_put(fn, sio)
+
+ def _add(self, fn, f):
+ if self._compressed:
+ self._add_compressed(fn, f)
+ else:
+ self._try_put(fn, f)
+
+ def _try_put(self, fn, f):
+ try:
+ self._transport.put_file(fn, f, mode=self._file_mode)
+ except NoSuchFile:
+ if not self._prefixed:
+ raise
+ try:
+ self._transport.mkdir(os.path.dirname(fn), mode=self._dir_mode)
+ except FileExists:
+ pass
+ self._transport.put_file(fn, f, mode=self._file_mode)
+
+ def _get(self, fn):
+ if fn.endswith('.gz'):
+ return self._get_compressed(fn)
+ else:
+ return self._transport.get(fn)
+
+ def _copy_one(self, fileid, suffix, other, pb):
+ # TODO: Once the copy_to interface is improved to allow a source
+ # and destination targets, then we can always do the copy
+ # as long as other is a TextStore
+ if not (isinstance(other, TextStore)
+ and other._prefixed == self._prefixed):
+ return super(TextStore, self)._copy_one(fileid, suffix, other, pb)
+
+ mutter('_copy_one: %r, %r', fileid, suffix)
+ path = other._get_name(fileid, suffix)
+ if path is None:
+ raise KeyError(fileid + '-' + str(suffix))
+
+ try:
+ result = other._transport.copy_to([path], self._transport,
+ mode=self._file_mode)
+ except NoSuchFile:
+ if not self._prefixed:
+ raise
+ try:
+ self._transport.mkdir(osutils.dirname(path), mode=self._dir_mode)
+ except FileExists:
+ pass
+ result = other._transport.copy_to([path], self._transport,
+ mode=self._file_mode)
+
+ if result != 1:
+ raise BzrError('Unable to copy file: %r' % (path,))
+
+ def _get_compressed(self, filename):
+ """Returns a file reading from a particular entry."""
+ f = self._transport.get(filename)
+ # gzip.GzipFile.read() requires a tell() function
+ # but some transports return objects that cannot seek
+ # so buffer them in a StringIO instead
+ if getattr(f, 'tell', None) is not None:
+ return gzip.GzipFile(mode='rb', fileobj=f)
+ try:
+ from cStringIO import StringIO
+ sio = StringIO(f.read())
+ return gzip.GzipFile(mode='rb', fileobj=sio)
+ finally:
+ f.close()
diff --git a/bzrlib/store/versioned/__init__.py b/bzrlib/store/versioned/__init__.py
new file mode 100644
index 0000000..145cec8
--- /dev/null
+++ b/bzrlib/store/versioned/__init__.py
@@ -0,0 +1,243 @@
+# Copyright (C) 2005, 2006 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+from __future__ import absolute_import
+
+# XXX: Some consideration of the problems that might occur if there are
+# files whose id differs only in case. That should probably be forbidden.
+
+
+import os
+from warnings import warn
+
+from bzrlib import (
+ errors,
+ osutils,
+ )
+from bzrlib.store import TransportStore
+from bzrlib.trace import mutter
+import bzrlib.ui
+
+
+class VersionedFileStore(TransportStore):
+ """Collection of many versioned files in a transport."""
+
+ # TODO: Rather than passing versionedfile_kwargs, perhaps pass in a
+ # transport factory callable?
+ def __init__(self, transport, prefixed=False, precious=False,
+ dir_mode=None, file_mode=None,
+ versionedfile_class=None,
+ versionedfile_kwargs={},
+ escaped=False):
+ super(VersionedFileStore, self).__init__(transport,
+ dir_mode=dir_mode, file_mode=file_mode,
+ prefixed=prefixed, compressed=False, escaped=escaped)
+ self._precious = precious
+ self._versionedfile_class = versionedfile_class
+ self._versionedfile_kwargs = versionedfile_kwargs
+ # Used for passing get_scope to versioned file constructors;
+ self.get_scope = None
+
+ def filename(self, file_id):
+ """Return the path relative to the transport root."""
+ return self._relpath(file_id)
+
+ def __iter__(self):
+ suffixes = self._versionedfile_class.get_suffixes()
+ ids = set()
+ for relpath in self._iter_files_recursive():
+ for suffix in suffixes:
+ if relpath.endswith(suffix):
+ # TODO: use standard remove_suffix function
+ escaped_id = os.path.basename(relpath[:-len(suffix)])
+ file_id = self._mapper.unmap(escaped_id)[0]
+ if file_id not in ids:
+ ids.add(file_id)
+ yield file_id
+ break # only one suffix can match
+
+ def has_id(self, file_id):
+ suffixes = self._versionedfile_class.get_suffixes()
+ filename = self.filename(file_id)
+ for suffix in suffixes:
+ if not self._transport.has(filename + suffix):
+ return False
+ return True
+
+ def get_empty(self, file_id, transaction):
+ """Get an empty weave, which implies deleting the existing one first."""
+ if self.has_id(file_id):
+ self.delete(file_id, transaction)
+ return self.get_weave_or_empty(file_id, transaction)
+
+ def delete(self, file_id, transaction):
+ """Remove file_id from the store."""
+ suffixes = self._versionedfile_class.get_suffixes()
+ filename = self.filename(file_id)
+ for suffix in suffixes:
+ self._transport.delete(filename + suffix)
+
+ def _get(self, file_id):
+ return self._transport.get(self.filename(file_id))
+
+ def _put(self, file_id, f):
+ fn = self.filename(file_id)
+ try:
+ return self._transport.put_file(fn, f, mode=self._file_mode)
+ except errors.NoSuchFile:
+ if not self._prefixed:
+ raise
+ self._transport.mkdir(os.path.dirname(fn), mode=self._dir_mode)
+ return self._transport.put_file(fn, f, mode=self._file_mode)
+
+ def get_weave(self, file_id, transaction, _filename=None):
+ """Return the VersionedFile for file_id.
+
+ :param _filename: filename that would be returned from self.filename for
+ file_id. This is used to reduce duplicate filename calculations when
+ using 'get_weave_or_empty'. FOR INTERNAL USE ONLY.
+ """
+ if _filename is None:
+ _filename = self.filename(file_id)
+ if transaction.writeable():
+ w = self._versionedfile_class(_filename, self._transport, self._file_mode,
+ get_scope=self.get_scope, **self._versionedfile_kwargs)
+ else:
+ w = self._versionedfile_class(_filename,
+ self._transport,
+ self._file_mode,
+ create=False,
+ access_mode='r',
+ get_scope=self.get_scope,
+ **self._versionedfile_kwargs)
+ return w
+
+ def _make_new_versionedfile(self, file_id, transaction,
+ known_missing=False, _filename=None):
+ """Make a new versioned file.
+
+ :param _filename: filename that would be returned from self.filename for
+ file_id. This is used to reduce duplicate filename calculations when
+ using 'get_weave_or_empty'. FOR INTERNAL USE ONLY.
+ """
+ if not known_missing and self.has_id(file_id):
+ self.delete(file_id, transaction)
+ if _filename is None:
+ _filename = self.filename(file_id)
+ try:
+ # we try without making the directory first because thats optimising
+ # for the common case.
+ weave = self._versionedfile_class(_filename, self._transport, self._file_mode, create=True,
+ get_scope=self.get_scope, **self._versionedfile_kwargs)
+ except errors.NoSuchFile:
+ if not self._prefixed:
+ # unexpected error - NoSuchFile is expected to be raised on a
+ # missing dir only and that only occurs when we are prefixed.
+ raise
+ dirname = osutils.dirname(_filename)
+ self._transport.mkdir(dirname, mode=self._dir_mode)
+ weave = self._versionedfile_class(_filename, self._transport,
+ self._file_mode, create=True,
+ get_scope=self.get_scope,
+ **self._versionedfile_kwargs)
+ return weave
+
+ def get_weave_or_empty(self, file_id, transaction):
+ """Return a weave, or an empty one if it doesn't exist."""
+ # This is typically used from 'commit' and 'fetch/push/pull' where
+ # we scan across many versioned files once. As such the small overhead
+ # of calculating the filename before doing a cache lookup is more than
+ # compensated for by not calculating the filename when making new
+ # versioned files.
+ _filename = self.filename(file_id)
+ try:
+ return self.get_weave(file_id, transaction, _filename=_filename)
+ except errors.NoSuchFile:
+ weave = self._make_new_versionedfile(file_id, transaction,
+ known_missing=True, _filename=_filename)
+ return weave
+
+ def _put_weave(self, file_id, weave, transaction):
+ """Preserved here for upgrades-to-weaves to use."""
+ myweave = self._make_new_versionedfile(file_id, transaction)
+ myweave.insert_record_stream(weave.get_record_stream(
+ [(version,) for version in weave.versions()],
+ 'topological', False))
+
+ def copy_all_ids(self, store_from, pb=None, from_transaction=None,
+ to_transaction=None):
+ """Copy all the file ids from store_from into self."""
+ if from_transaction is None:
+ warn("Please pass from_transaction into "
+ "versioned_store.copy_all_ids.", stacklevel=2)
+ if to_transaction is None:
+ warn("Please pass to_transaction into "
+ "versioned_store.copy_all_ids.", stacklevel=2)
+ if not store_from.listable():
+ raise errors.UnlistableStore(store_from)
+ ids = []
+ for count, file_id in enumerate(store_from):
+ if pb:
+ pb.update('listing files', count, count)
+ ids.append(file_id)
+ if pb:
+ pb.clear()
+ mutter('copy_all ids: %r', ids)
+ self.copy_multi(store_from, ids, pb=pb,
+ from_transaction=from_transaction,
+ to_transaction=to_transaction)
+
+ def copy_multi(self, from_store, file_ids, pb=None, from_transaction=None,
+ to_transaction=None):
+ """Copy all the versions for multiple file_ids from from_store.
+
+ :param from_transaction: required current transaction in from_store.
+ """
+ from bzrlib.transactions import PassThroughTransaction
+ if from_transaction is None:
+ warn("VersionedFileStore.copy_multi without a from_transaction parameter "
+ "is deprecated. Please provide a from_transaction.",
+ DeprecationWarning,
+ stacklevel=2)
+ # we are reading one object - caching is irrelevant.
+ from_transaction = PassThroughTransaction()
+ if to_transaction is None:
+ warn("VersionedFileStore.copy_multi without a to_transaction parameter "
+ "is deprecated. Please provide a to_transaction.",
+ DeprecationWarning,
+ stacklevel=2)
+ # we are copying single objects, and there may be open transactions
+ # so again with the passthrough
+ to_transaction = PassThroughTransaction()
+ pb = bzrlib.ui.ui_factory.nested_progress_bar()
+ try:
+ for count, f in enumerate(file_ids):
+ mutter("copy weave {%s} into %s", f, self)
+ pb.update('copy', count, len(file_ids))
+ # if we have it in cache, its faster.
+ # joining is fast with knits, and bearable for weaves -
+ # indeed the new case can be optimised if needed.
+ target = self._make_new_versionedfile(f, to_transaction)
+ source = from_store.get_weave(f, from_transaction)
+ target.insert_record_stream(source.get_record_stream(
+ [(version,) for version in source.versions()],
+ 'topological', False))
+ finally:
+ pb.finished()
+
+ def total_size(self):
+ count, bytes = super(VersionedFileStore, self).total_size()
+ return (count / len(self._versionedfile_class.get_suffixes())), bytes
diff --git a/bzrlib/strace.py b/bzrlib/strace.py
new file mode 100644
index 0000000..b65fa43
--- /dev/null
+++ b/bzrlib/strace.py
@@ -0,0 +1,96 @@
+# Copyright (C) 2007, 2009, 2010 Canonical Ltd
+# Authors: Robert Collins <robert.collins@canonical.com>
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""Support for running strace against the current process."""
+
+from __future__ import absolute_import
+
+import os
+import signal
+import subprocess
+import tempfile
+
+from bzrlib import errors
+
+
+def strace(function, *args, **kwargs):
+ """Invoke strace on function.
+
+ :return: a tuple: function-result, a StraceResult.
+ """
+ return strace_detailed(function, args, kwargs)
+
+
+def strace_detailed(function, args, kwargs, follow_children=True):
+ # FIXME: strace is buggy
+ # (https://bugs.launchpad.net/ubuntu/+source/strace/+bug/103133) and the
+ # test suite hangs if the '-f' is given to strace *and* more than one
+ # thread is running. Using follow_children=False allows the test suite to
+ # disable fork following to work around the bug.
+
+ # capture strace output to a file
+ log_file = tempfile.NamedTemporaryFile()
+ log_file_fd = log_file.fileno()
+ err_file = tempfile.NamedTemporaryFile()
+ pid = os.getpid()
+ # start strace
+ strace_cmd = ['strace', '-r', '-tt', '-p', str(pid), '-o', log_file.name]
+ if follow_children:
+ strace_cmd.append('-f')
+ # need to catch both stdout and stderr to work around
+ # bug 627208
+ proc = subprocess.Popen(strace_cmd,
+ stdout=subprocess.PIPE,
+ stderr=err_file.fileno())
+ # Wait for strace to attach
+ attached_notice = proc.stdout.readline()
+ # Run the function to strace
+ result = function(*args, **kwargs)
+ # stop strace
+ os.kill(proc.pid, signal.SIGQUIT)
+ proc.communicate()
+ # grab the log
+ log_file.seek(0)
+ log = log_file.read()
+ log_file.close()
+ # and stderr
+ err_file.seek(0)
+ err_messages = err_file.read()
+ err_file.close()
+ # and read any errors
+ if err_messages.startswith("attach: ptrace(PTRACE_ATTACH,"):
+ raise StraceError(err_messages=err_messages)
+ return result, StraceResult(log, err_messages)
+
+
+class StraceError(errors.BzrError):
+
+ _fmt = "strace failed: %(err_messages)s"
+
+
+class StraceResult(object):
+ """The result of stracing a function."""
+
+ def __init__(self, raw_log, err_messages):
+ """Create a StraceResult.
+
+ :param raw_log: The output that strace created.
+ """
+ self.raw_log = raw_log
+ self.err_messages = err_messages
+
+
diff --git a/bzrlib/switch.py b/bzrlib/switch.py
new file mode 100644
index 0000000..6f8e75e
--- /dev/null
+++ b/bzrlib/switch.py
@@ -0,0 +1,175 @@
+# Copyright (C) 2007, 2009-2012 Canonical Ltd.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+from __future__ import absolute_import
+
+# Original author: David Allouche
+
+from bzrlib import errors, merge, revision
+from bzrlib.branch import Branch
+from bzrlib.i18n import gettext
+from bzrlib.trace import note
+
+def _run_post_switch_hooks(control_dir, to_branch, force, revision_id):
+ from bzrlib.branch import SwitchHookParams
+ hooks = Branch.hooks['post_switch']
+ if not hooks:
+ return
+ params = SwitchHookParams(control_dir, to_branch, force, revision_id)
+ for hook in hooks:
+ hook(params)
+
+def switch(control_dir, to_branch, force=False, quiet=False, revision_id=None):
+ """Switch the branch associated with a checkout.
+
+ :param control_dir: ControlDir of the checkout to change
+ :param to_branch: branch that the checkout is to reference
+ :param force: skip the check for local commits in a heavy checkout
+ :param revision_id: revision ID to switch to.
+ """
+ _check_pending_merges(control_dir, force)
+ try:
+ source_repository = control_dir.open_branch().repository
+ except errors.NotBranchError:
+ source_repository = to_branch.repository
+ to_branch.lock_read()
+ try:
+ _set_branch_location(control_dir, to_branch, force)
+ finally:
+ to_branch.unlock()
+ tree = control_dir.open_workingtree()
+ _update(tree, source_repository, quiet, revision_id)
+ _run_post_switch_hooks(control_dir, to_branch, force, revision_id)
+
+def _check_pending_merges(control, force=False):
+ """Check that there are no outstanding pending merges before switching.
+
+ :param control: ControlDir of the branch to check
+ """
+ try:
+ tree = control.open_workingtree()
+ except errors.NotBranchError, ex:
+ # Lightweight checkout and branch is no longer there
+ if force:
+ return
+ else:
+ raise ex
+ # XXX: Should the tree be locked for get_parent_ids?
+ existing_pending_merges = tree.get_parent_ids()[1:]
+ if len(existing_pending_merges) > 0:
+ raise errors.BzrCommandError(gettext('Pending merges must be '
+ 'committed or reverted before using switch.'))
+
+
+def _set_branch_location(control, to_branch, force=False):
+ """Set location value of a branch reference.
+
+ :param control: ControlDir of the checkout to change
+ :param to_branch: branch that the checkout is to reference
+ :param force: skip the check for local commits in a heavy checkout
+ """
+ branch_format = control.find_branch_format()
+ if branch_format.get_reference(control) is not None:
+ # Lightweight checkout: update the branch reference
+ branch_format.set_reference(control, None, to_branch)
+ else:
+ b = control.open_branch()
+ bound_branch = b.get_bound_location()
+ if bound_branch is not None:
+ # Heavyweight checkout: check all local commits
+ # have been pushed to the current bound branch then
+ # synchronise the local branch with the new remote branch
+ # and bind to it
+ possible_transports = []
+ try:
+ if not force and _any_local_commits(b, possible_transports):
+ raise errors.BzrCommandError(gettext(
+ 'Cannot switch as local commits found in the checkout. '
+ 'Commit these to the bound branch or use --force to '
+ 'throw them away.'))
+ except errors.BoundBranchConnectionFailure, e:
+ raise errors.BzrCommandError(gettext(
+ 'Unable to connect to current master branch %(target)s: '
+ '%(error)s To switch anyway, use --force.') %
+ e.__dict__)
+ b.lock_write()
+ try:
+ b.set_bound_location(None)
+ b.pull(to_branch, overwrite=True,
+ possible_transports=possible_transports)
+ b.set_bound_location(to_branch.base)
+ b.set_parent(b.get_master_branch().get_parent())
+ finally:
+ b.unlock()
+ else:
+ # If this is a standalone tree and the new branch
+ # is derived from this one, create a lightweight checkout.
+ b.lock_read()
+ try:
+ graph = b.repository.get_graph(to_branch.repository)
+ if (b.bzrdir._format.colocated_branches and
+ (force or graph.is_ancestor(b.last_revision(),
+ to_branch.last_revision()))):
+ b.bzrdir.destroy_branch()
+ b.bzrdir.set_branch_reference(to_branch, name="")
+ else:
+ raise errors.BzrCommandError(gettext('Cannot switch a branch, '
+ 'only a checkout.'))
+ finally:
+ b.unlock()
+
+
+def _any_local_commits(this_branch, possible_transports):
+ """Does this branch have any commits not in the master branch?"""
+ last_rev = revision.ensure_null(this_branch.last_revision())
+ if last_rev != revision.NULL_REVISION:
+ other_branch = this_branch.get_master_branch(possible_transports)
+ this_branch.lock_read()
+ other_branch.lock_read()
+ try:
+ other_last_rev = other_branch.last_revision()
+ graph = this_branch.repository.get_graph(
+ other_branch.repository)
+ if not graph.is_ancestor(last_rev, other_last_rev):
+ return True
+ finally:
+ other_branch.unlock()
+ this_branch.unlock()
+ return False
+
+
+def _update(tree, source_repository, quiet=False, revision_id=None):
+ """Update a working tree to the latest revision of its branch.
+
+ :param tree: the working tree
+ :param source_repository: repository holding the revisions
+ """
+ tree.lock_tree_write()
+ try:
+ to_branch = tree.branch
+ if revision_id is None:
+ revision_id = to_branch.last_revision()
+ if tree.last_revision() == revision_id:
+ if not quiet:
+ note(gettext("Tree is up to date at revision %d."), to_branch.revno())
+ return
+ base_tree = source_repository.revision_tree(tree.last_revision())
+ merge.Merge3Merger(tree, tree, base_tree, to_branch.repository.revision_tree(revision_id))
+ tree.set_last_revision(to_branch.last_revision())
+ if not quiet:
+ note(gettext('Updated to revision %d.') % to_branch.revno())
+ finally:
+ tree.unlock()
diff --git a/bzrlib/symbol_versioning.py b/bzrlib/symbol_versioning.py
new file mode 100644
index 0000000..2656aa7
--- /dev/null
+++ b/bzrlib/symbol_versioning.py
@@ -0,0 +1,371 @@
+# Copyright (C) 2006-2010 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""Symbol versioning
+
+The methods here allow for api symbol versioning.
+"""
+
+from __future__ import absolute_import
+
+__all__ = ['deprecated_function',
+ 'deprecated_in',
+ 'deprecated_list',
+ 'deprecated_method',
+ 'DEPRECATED_PARAMETER',
+ 'deprecated_passed',
+ 'set_warning_method',
+ 'warn',
+ ]
+
+
+import warnings
+# Import the 'warn' symbol so bzrlib can call it even if we redefine it
+from warnings import warn
+
+import bzrlib
+
+
+DEPRECATED_PARAMETER = "A deprecated parameter marker."
+
+
+def deprecated_in(version_tuple):
+ """Generate a message that something was deprecated in a release.
+
+ >>> deprecated_in((1, 4, 0))
+ '%s was deprecated in version 1.4.0.'
+ """
+ return ("%%s was deprecated in version %s."
+ % bzrlib._format_version_tuple(version_tuple))
+
+
+def set_warning_method(method):
+ """Set the warning method to be used by this module.
+
+ It should take a message and a warning category as warnings.warn does.
+ """
+ global warn
+ warn = method
+
+
+# TODO - maybe this would be easier to use as one 'smart' method that
+# guess if it is a method or a class or an attribute ? If so, we can
+# add that on top of the primitives, once we have all three written
+# - RBC 20050105
+
+
+def deprecation_string(a_callable, deprecation_version):
+ """Generate an automatic deprecation string for a_callable.
+
+ :param a_callable: The callable to substitute into deprecation_version.
+ :param deprecation_version: A deprecation format warning string. This should
+ have a single %s operator in it. a_callable will be turned into a nice
+ python symbol and then substituted into deprecation_version.
+ """
+ # We also want to handle old-style classes, in particular exception, and
+ # they don't have an im_class attribute.
+ if getattr(a_callable, 'im_class', None) is None:
+ symbol = "%s.%s" % (a_callable.__module__,
+ a_callable.__name__)
+ else:
+ symbol = "%s.%s.%s" % (a_callable.im_class.__module__,
+ a_callable.im_class.__name__,
+ a_callable.__name__
+ )
+ return deprecation_version % symbol
+
+
+def deprecated_function(deprecation_version):
+ """Decorate a function so that use of it will trigger a warning."""
+
+ def function_decorator(callable):
+ """This is the function python calls to perform the decoration."""
+
+ def decorated_function(*args, **kwargs):
+ """This is the decorated function."""
+ from bzrlib import trace
+ trace.mutter_callsite(4, "Deprecated function called")
+ warn(deprecation_string(callable, deprecation_version),
+ DeprecationWarning, stacklevel=2)
+ return callable(*args, **kwargs)
+ _populate_decorated(callable, deprecation_version, "function",
+ decorated_function)
+ return decorated_function
+ return function_decorator
+
+
+def deprecated_method(deprecation_version):
+ """Decorate a method so that use of it will trigger a warning.
+
+ To deprecate a static or class method, use
+
+ @staticmethod
+ @deprecated_function
+ def ...
+
+ To deprecate an entire class, decorate __init__.
+ """
+
+ def method_decorator(callable):
+ """This is the function python calls to perform the decoration."""
+
+ def decorated_method(self, *args, **kwargs):
+ """This is the decorated method."""
+ from bzrlib import trace
+ if callable.__name__ == '__init__':
+ symbol = "%s.%s" % (self.__class__.__module__,
+ self.__class__.__name__,
+ )
+ else:
+ symbol = "%s.%s.%s" % (self.__class__.__module__,
+ self.__class__.__name__,
+ callable.__name__
+ )
+ trace.mutter_callsite(4, "Deprecated method called")
+ warn(deprecation_version % symbol, DeprecationWarning, stacklevel=2)
+ return callable(self, *args, **kwargs)
+ _populate_decorated(callable, deprecation_version, "method",
+ decorated_method)
+ return decorated_method
+ return method_decorator
+
+
+def deprecated_passed(parameter_value):
+ """Return True if parameter_value was used."""
+ # FIXME: it might be nice to have a parameter deprecation decorator.
+ # it would need to handle positional and *args and **kwargs parameters,
+ # which means some mechanism to describe how the parameter was being
+ # passed before deprecation, and some way to deprecate parameters that
+ # were not at the end of the arg list. Thats needed for __init__ where
+ # we cannot just forward to a new method name.I.e. in the following
+ # examples we would want to have callers that pass any value to 'bad' be
+ # given a warning - because we have applied:
+ # @deprecated_parameter('bad', deprecated_in((1, 5, 0))
+ #
+ # def __init__(self, bad=None)
+ # def __init__(self, bad, other)
+ # def __init__(self, **kwargs)
+ # RBC 20060116
+ return not parameter_value is DEPRECATED_PARAMETER
+
+
+def _decorate_docstring(callable, deprecation_version, label,
+ decorated_callable):
+ if callable.__doc__:
+ docstring_lines = callable.__doc__.split('\n')
+ else:
+ docstring_lines = []
+ if len(docstring_lines) == 0:
+ decorated_callable.__doc__ = deprecation_version % ("This " + label)
+ elif len(docstring_lines) == 1:
+ decorated_callable.__doc__ = (callable.__doc__
+ + "\n"
+ + "\n"
+ + deprecation_version % ("This " + label)
+ + "\n")
+ else:
+ spaces = len(docstring_lines[-1])
+ new_doc = callable.__doc__
+ new_doc += "\n" + " " * spaces
+ new_doc += deprecation_version % ("This " + label)
+ new_doc += "\n" + " " * spaces
+ decorated_callable.__doc__ = new_doc
+
+
+def _populate_decorated(callable, deprecation_version, label,
+ decorated_callable):
+ """Populate attributes like __name__ and __doc__ on the decorated callable.
+ """
+ _decorate_docstring(callable, deprecation_version, label,
+ decorated_callable)
+ decorated_callable.__module__ = callable.__module__
+ decorated_callable.__name__ = callable.__name__
+ decorated_callable.is_deprecated = True
+
+
+def _dict_deprecation_wrapper(wrapped_method):
+ """Returns a closure that emits a warning and calls the superclass"""
+ def cb(dep_dict, *args, **kwargs):
+ msg = 'access to %s' % (dep_dict._variable_name, )
+ msg = dep_dict._deprecation_version % (msg,)
+ if dep_dict._advice:
+ msg += ' ' + dep_dict._advice
+ warn(msg, DeprecationWarning, stacklevel=2)
+ return wrapped_method(dep_dict, *args, **kwargs)
+ return cb
+
+
+class DeprecatedDict(dict):
+ """A dictionary that complains when read or written."""
+
+ is_deprecated = True
+
+ def __init__(self,
+ deprecation_version,
+ variable_name,
+ initial_value,
+ advice,
+ ):
+ """Create a dict that warns when read or modified.
+
+ :param deprecation_version: string for the warning format to raise,
+ typically from deprecated_in()
+ :param initial_value: The contents of the dict
+ :param variable_name: This allows better warnings to be printed
+ :param advice: String of advice on what callers should do instead
+ of using this variable.
+ """
+ self._deprecation_version = deprecation_version
+ self._variable_name = variable_name
+ self._advice = advice
+ dict.__init__(self, initial_value)
+
+ # This isn't every possible method but it should trap anyone using the
+ # dict -- add more if desired
+ __len__ = _dict_deprecation_wrapper(dict.__len__)
+ __getitem__ = _dict_deprecation_wrapper(dict.__getitem__)
+ __setitem__ = _dict_deprecation_wrapper(dict.__setitem__)
+ __delitem__ = _dict_deprecation_wrapper(dict.__delitem__)
+ keys = _dict_deprecation_wrapper(dict.keys)
+ __contains__ = _dict_deprecation_wrapper(dict.__contains__)
+
+
+def deprecated_list(deprecation_version, variable_name,
+ initial_value, extra=None):
+ """Create a list that warns when modified
+
+ :param deprecation_version: string for the warning format to raise,
+ typically from deprecated_in()
+ :param initial_value: The contents of the list
+ :param variable_name: This allows better warnings to be printed
+ :param extra: Extra info to print when printing a warning
+ """
+
+ subst_text = 'Modifying %s' % (variable_name,)
+ msg = deprecation_version % (subst_text,)
+ if extra:
+ msg += ' ' + extra
+
+ class _DeprecatedList(list):
+ __doc__ = list.__doc__ + msg
+
+ is_deprecated = True
+
+ def _warn_deprecated(self, func, *args, **kwargs):
+ warn(msg, DeprecationWarning, stacklevel=3)
+ return func(self, *args, **kwargs)
+
+ def append(self, obj):
+ """appending to %s is deprecated""" % (variable_name,)
+ return self._warn_deprecated(list.append, obj)
+
+ def insert(self, index, obj):
+ """inserting to %s is deprecated""" % (variable_name,)
+ return self._warn_deprecated(list.insert, index, obj)
+
+ def extend(self, iterable):
+ """extending %s is deprecated""" % (variable_name,)
+ return self._warn_deprecated(list.extend, iterable)
+
+ def remove(self, value):
+ """removing from %s is deprecated""" % (variable_name,)
+ return self._warn_deprecated(list.remove, value)
+
+ def pop(self, index=None):
+ """pop'ing from %s is deprecated""" % (variable_name,)
+ if index:
+ return self._warn_deprecated(list.pop, index)
+ else:
+ # Can't pass None
+ return self._warn_deprecated(list.pop)
+
+ return _DeprecatedList(initial_value)
+
+
+def _check_for_filter(error_only):
+ """Check if there is already a filter for deprecation warnings.
+
+ :param error_only: Only match an 'error' filter
+ :return: True if a filter is found, False otherwise
+ """
+ for filter in warnings.filters:
+ if issubclass(DeprecationWarning, filter[2]):
+ # This filter will effect DeprecationWarning
+ if not error_only or filter[0] == 'error':
+ return True
+ return False
+
+
+def _remove_filter_callable(filter):
+ """Build and returns a callable removing filter from the warnings.
+
+ :param filter: The filter to remove (can be None).
+
+ :return: A callable that will remove filter from warnings.filters.
+ """
+ def cleanup():
+ if filter:
+ warnings.filters.remove(filter)
+ return cleanup
+
+
+def suppress_deprecation_warnings(override=True):
+ """Call this function to suppress all deprecation warnings.
+
+ When this is a final release version, we don't want to annoy users with
+ lots of deprecation warnings. We only want the deprecation warnings when
+ running a dev or release candidate.
+
+ :param override: If True, always set the ignore, if False, only set the
+ ignore if there isn't already a filter.
+
+ :return: A callable to remove the new warnings this added.
+ """
+ if not override and _check_for_filter(error_only=False):
+ # If there is already a filter effecting suppress_deprecation_warnings,
+ # then skip it.
+ filter = None
+ else:
+ warnings.filterwarnings('ignore', category=DeprecationWarning)
+ filter = warnings.filters[0]
+ return _remove_filter_callable(filter)
+
+
+def activate_deprecation_warnings(override=True):
+ """Call this function to activate deprecation warnings.
+
+ When running in a 'final' release we suppress deprecation warnings.
+ However, the test suite wants to see them. So when running selftest, we
+ re-enable the deprecation warnings.
+
+ Note: warnings that have already been issued under 'ignore' will not be
+ reported after this point. The 'warnings' module has already marked them as
+ handled, so they don't get issued again.
+
+ :param override: If False, only add a filter if there isn't an error filter
+ already. (This slightly differs from suppress_deprecation_warnings, in
+ because it always overrides everything but -Werror).
+
+ :return: A callable to remove the new warnings this added.
+ """
+ if not override and _check_for_filter(error_only=True):
+ # DeprecationWarnings are already turned into errors, don't downgrade
+ # them to 'default'.
+ filter = None
+ else:
+ warnings.filterwarnings('default', category=DeprecationWarning)
+ filter = warnings.filters[0]
+ return _remove_filter_callable(filter)
diff --git a/bzrlib/tag.py b/bzrlib/tag.py
new file mode 100644
index 0000000..0cab6dd
--- /dev/null
+++ b/bzrlib/tag.py
@@ -0,0 +1,429 @@
+# Copyright (C) 2007-2011 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""Tag strategies.
+
+These are contained within a branch and normally constructed
+when the branch is opened. Clients should typically do
+
+ Branch.tags.add('name', 'value')
+"""
+
+from __future__ import absolute_import
+
+# NOTE: I was going to call this tags.py, but vim seems to think all files
+# called tags* are ctags files... mbp 20070220.
+
+from bzrlib.registry import Registry
+from bzrlib.lazy_import import lazy_import
+lazy_import(globals(), """
+import itertools
+import re
+import sys
+
+from bzrlib import (
+ bencode,
+ cleanup,
+ errors,
+ symbol_versioning,
+ trace,
+ )
+""")
+
+
+class _Tags(object):
+
+ def __init__(self, branch):
+ self.branch = branch
+
+ def get_tag_dict(self):
+ """Return a dictionary mapping tags to revision ids.
+ """
+ raise NotImplementedError(self.get_tag_dict)
+
+ def get_reverse_tag_dict(self):
+ """Return a dictionary mapping revision ids to list of tags.
+ """
+ raise NotImplementedError(self.get_reverse_tag_dict)
+
+ def merge_to(self, to_tags, overwrite=False, ignore_master=False):
+ """Merge new tags from this tags container into another.
+
+ :param to_tags: Tags container to merge into
+ :param overwrite: Whether to overwrite existing, divergent, tags.
+ :param ignore_master: Do not modify the tags in the target's master
+ branch (if any). Default is false (so the master will be updated).
+ New in bzr 2.3.
+ :return: Tuple with tag updates as dictionary and tag conflicts
+ """
+ raise NotImplementedError(self.merge_to)
+
+ def set_tag(self, tag_name, revision):
+ """Set a tag.
+
+ :param tag_name: Tag name
+ :param revision: Revision id
+ :raise GhostTagsNotSupported: if revision is not present in
+ the branch repository
+ """
+ raise NotImplementedError(self.set_tag)
+
+ def lookup_tag(self, tag_name):
+ """Look up a tag.
+
+ :param tag_name: Tag to look up
+ :raise NoSuchTag: Raised when tag does not exist
+ :return: Matching revision id
+ """
+ raise NotImplementedError(self.lookup_tag)
+
+ def delete_tag(self, tag_name):
+ """Delete a tag.
+
+ :param tag_name: Tag to delete
+ :raise NoSuchTag: Raised when tag does not exist
+ """
+ raise NotImplementedError(self.delete_tag)
+
+ def rename_revisions(self, rename_map):
+ """Replace revision ids according to a rename map.
+
+ :param rename_map: Dictionary mapping old revision ids to
+ new revision ids.
+ """
+ raise NotImplementedError(self.rename_revisions)
+
+ def has_tag(self, tag_name):
+ return self.get_tag_dict().has_key(tag_name)
+
+
+class DisabledTags(_Tags):
+ """Tag storage that refuses to store anything.
+
+ This is used by older formats that can't store tags.
+ """
+
+ def _not_supported(self, *a, **k):
+ raise errors.TagsNotSupported(self.branch)
+
+ set_tag = _not_supported
+ get_tag_dict = _not_supported
+ _set_tag_dict = _not_supported
+ lookup_tag = _not_supported
+ delete_tag = _not_supported
+
+ def merge_to(self, to_tags, overwrite=False, ignore_master=False):
+ # we never have anything to copy
+ return {}, []
+
+ def rename_revisions(self, rename_map):
+ # No tags, so nothing to rename
+ pass
+
+ def get_reverse_tag_dict(self):
+ # There aren't any tags, so the reverse mapping is empty.
+ return {}
+
+
+class BasicTags(_Tags):
+ """Tag storage in an unversioned branch control file.
+ """
+
+ def set_tag(self, tag_name, tag_target):
+ """Add a tag definition to the branch.
+
+ Behaviour if the tag is already present is not defined (yet).
+ """
+ # all done with a write lock held, so this looks atomic
+ self.branch.lock_write()
+ try:
+ master = self.branch.get_master_branch()
+ if master is not None:
+ master.tags.set_tag(tag_name, tag_target)
+ td = self.get_tag_dict()
+ td[tag_name] = tag_target
+ self._set_tag_dict(td)
+ finally:
+ self.branch.unlock()
+
+ def lookup_tag(self, tag_name):
+ """Return the referent string of a tag"""
+ td = self.get_tag_dict()
+ try:
+ return td[tag_name]
+ except KeyError:
+ raise errors.NoSuchTag(tag_name)
+
+ def get_tag_dict(self):
+ self.branch.lock_read()
+ try:
+ try:
+ tag_content = self.branch._get_tags_bytes()
+ except errors.NoSuchFile, e:
+ # ugly, but only abentley should see this :)
+ trace.warning('No branch/tags file in %s. '
+ 'This branch was probably created by bzr 0.15pre. '
+ 'Create an empty file to silence this message.'
+ % (self.branch, ))
+ return {}
+ return self._deserialize_tag_dict(tag_content)
+ finally:
+ self.branch.unlock()
+
+ def get_reverse_tag_dict(self):
+ """Returns a dict with revisions as keys
+ and a list of tags for that revision as value"""
+ d = self.get_tag_dict()
+ rev = {}
+ for key in d:
+ try:
+ rev[d[key]].append(key)
+ except KeyError:
+ rev[d[key]] = [key]
+ return rev
+
+ def delete_tag(self, tag_name):
+ """Delete a tag definition.
+ """
+ self.branch.lock_write()
+ try:
+ d = self.get_tag_dict()
+ try:
+ del d[tag_name]
+ except KeyError:
+ raise errors.NoSuchTag(tag_name)
+ master = self.branch.get_master_branch()
+ if master is not None:
+ try:
+ master.tags.delete_tag(tag_name)
+ except errors.NoSuchTag:
+ pass
+ self._set_tag_dict(d)
+ finally:
+ self.branch.unlock()
+
+ def _set_tag_dict(self, new_dict):
+ """Replace all tag definitions
+
+ WARNING: Calling this on an unlocked branch will lock it, and will
+ replace the tags without warning on conflicts.
+
+ :param new_dict: Dictionary from tag name to target.
+ """
+ return self.branch._set_tags_bytes(self._serialize_tag_dict(new_dict))
+
+ def _serialize_tag_dict(self, tag_dict):
+ td = dict((k.encode('utf-8'), v)
+ for k,v in tag_dict.items())
+ return bencode.bencode(td)
+
+ def _deserialize_tag_dict(self, tag_content):
+ """Convert the tag file into a dictionary of tags"""
+ # was a special case to make initialization easy, an empty definition
+ # is an empty dictionary
+ if tag_content == '':
+ return {}
+ try:
+ r = {}
+ for k, v in bencode.bdecode(tag_content).items():
+ r[k.decode('utf-8')] = v
+ return r
+ except ValueError, e:
+ raise ValueError("failed to deserialize tag dictionary %r: %s"
+ % (tag_content, e))
+
+ def merge_to(self, to_tags, overwrite=False, ignore_master=False):
+ """Copy tags between repositories if necessary and possible.
+
+ This method has common command-line behaviour about handling
+ error cases.
+
+ All new definitions are copied across, except that tags that already
+ exist keep their existing definitions.
+
+ :param to_tags: Branch to receive these tags
+ :param overwrite: Overwrite conflicting tags in the target branch
+ :param ignore_master: Do not modify the tags in the target's master
+ branch (if any). Default is false (so the master will be updated).
+ New in bzr 2.3.
+
+ :returns: Tuple with tag_updates and tag_conflicts.
+ tag_updates is a dictionary with new tags, None is used for
+ removed tags
+ tag_conflicts is a set of tags that conflicted, each of which is
+ (tagname, source_target, dest_target), or None if no copying was
+ done.
+ """
+ operation = cleanup.OperationWithCleanups(self._merge_to_operation)
+ return operation.run(to_tags, overwrite, ignore_master)
+
+ def _merge_to_operation(self, operation, to_tags, overwrite, ignore_master):
+ add_cleanup = operation.add_cleanup
+ if self.branch == to_tags.branch:
+ return {}, []
+ if not self.branch.supports_tags():
+ # obviously nothing to copy
+ return {}, []
+ source_dict = self.get_tag_dict()
+ if not source_dict:
+ # no tags in the source, and we don't want to clobber anything
+ # that's in the destination
+ return {}, []
+ # We merge_to both master and child individually.
+ #
+ # It's possible for master and child to have differing sets of
+ # tags, in which case it's possible to have different sets of
+ # conflicts. We report the union of both conflict sets. In
+ # that case it's likely the child and master have accepted
+ # different tags from the source, which may be a surprising result, but
+ # the best we can do in the circumstances.
+ #
+ # Ideally we'd improve this API to report the different conflicts
+ # more clearly to the caller, but we don't want to break plugins
+ # such as bzr-builddeb that use this API.
+ add_cleanup(to_tags.branch.lock_write().unlock)
+ if ignore_master:
+ master = None
+ else:
+ master = to_tags.branch.get_master_branch()
+ if master is not None:
+ add_cleanup(master.lock_write().unlock)
+ updates, conflicts = self._merge_to(to_tags, source_dict, overwrite)
+ if master is not None:
+ extra_updates, extra_conflicts = self._merge_to(master.tags,
+ source_dict, overwrite)
+ updates.update(extra_updates)
+ conflicts += extra_conflicts
+ # We use set() to remove any duplicate conflicts from the master
+ # branch.
+ return updates, set(conflicts)
+
+ def _merge_to(self, to_tags, source_dict, overwrite):
+ dest_dict = to_tags.get_tag_dict()
+ result, updates, conflicts = self._reconcile_tags(source_dict,
+ dest_dict, overwrite)
+ if result != dest_dict:
+ to_tags._set_tag_dict(result)
+ return updates, conflicts
+
+ def rename_revisions(self, rename_map):
+ """Rename revisions in this tags dictionary.
+
+ :param rename_map: Dictionary mapping old revids to new revids
+ """
+ reverse_tags = self.get_reverse_tag_dict()
+ for revid, names in reverse_tags.iteritems():
+ if revid in rename_map:
+ for name in names:
+ self.set_tag(name, rename_map[revid])
+
+ def _reconcile_tags(self, source_dict, dest_dict, overwrite):
+ """Do a two-way merge of two tag dictionaries.
+
+ * only in source => source value
+ * only in destination => destination value
+ * same definitions => that
+ * different definitions => if overwrite is False, keep destination
+ value and give a warning, otherwise use the source value
+
+ :returns: (result_dict, updates,
+ [(conflicting_tag, source_target, dest_target)])
+ """
+ conflicts = []
+ updates = {}
+ result = dict(dest_dict) # copy
+ for name, target in source_dict.items():
+ if result.get(name) == target:
+ pass
+ elif name not in result or overwrite:
+ updates[name] = target
+ result[name] = target
+ else:
+ conflicts.append((name, target, result[name]))
+ return result, updates, conflicts
+
+
+def _merge_tags_if_possible(from_branch, to_branch, ignore_master=False):
+ # Try hard to support merge_to implementations that don't expect
+ # 'ignore_master' (new in bzr 2.3). First, if the flag isn't set then we
+ # can safely avoid passing ignore_master at all.
+ if not ignore_master:
+ from_branch.tags.merge_to(to_branch.tags)
+ return
+ # If the flag is set, try to pass it, but be ready to catch TypeError.
+ try:
+ from_branch.tags.merge_to(to_branch.tags, ignore_master=ignore_master)
+ except TypeError:
+ # Probably this implementation of 'merge_to' is from a plugin that
+ # doesn't expect the 'ignore_master' keyword argument (e.g. bzr-svn
+ # 1.0.4). There's a small risk that the TypeError is actually caused
+ # by a completely different problem (which is why we don't catch it for
+ # the ignore_master=False case), but even then there's probably no harm
+ # in calling a second time.
+ symbol_versioning.warn(
+ symbol_versioning.deprecated_in((2,3)) % (
+ "Tags.merge_to (of %r) that doesn't accept ignore_master kwarg"
+ % (from_branch.tags,),),
+ DeprecationWarning)
+ from_branch.tags.merge_to(to_branch.tags)
+
+
+def sort_natural(branch, tags):
+ """Sort tags, with numeric substrings as numbers.
+
+ :param branch: Branch
+ :param tags: List of tuples with tag name and revision id.
+ """
+ def natural_sort_key(tag):
+ return [f(s) for f,s in
+ zip(itertools.cycle((unicode.lower,int)),
+ re.split('([0-9]+)', tag[0]))]
+ tags.sort(key=natural_sort_key)
+
+
+def sort_alpha(branch, tags):
+ """Sort tags lexicographically, in place.
+
+ :param branch: Branch
+ :param tags: List of tuples with tag name and revision id.
+ """
+ tags.sort()
+
+
+def sort_time(branch, tags):
+ """Sort tags by time inline.
+
+ :param branch: Branch
+ :param tags: List of tuples with tag name and revision id.
+ """
+ timestamps = {}
+ for tag, revid in tags:
+ try:
+ revobj = branch.repository.get_revision(revid)
+ except errors.NoSuchRevision:
+ timestamp = sys.maxint # place them at the end
+ else:
+ timestamp = revobj.timestamp
+ timestamps[revid] = timestamp
+ tags.sort(key=lambda x: timestamps[x[1]])
+
+
+tag_sort_methods = Registry()
+tag_sort_methods.register("natural", sort_natural,
+ 'Sort numeric substrings as numbers. (default)')
+tag_sort_methods.register("alpha", sort_alpha, 'Sort tags lexicographically.')
+tag_sort_methods.register("time", sort_time, 'Sort tags chronologically.')
+tag_sort_methods.default_key = "natural"
diff --git a/bzrlib/testament.py b/bzrlib/testament.py
new file mode 100644
index 0000000..88fcf43
--- /dev/null
+++ b/bzrlib/testament.py
@@ -0,0 +1,245 @@
+# Copyright (C) 2005 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""Testament - a summary of a revision for signing.
+
+A testament can be defined as "something that serves as tangible
+proof or evidence." In bzr we use them to allow people to certify
+particular revisions as authentic.
+
+The goal is that if two revisions are semantically equal, then they will
+have a byte-for-byte equal testament. We can define different versions of
+"semantically equal" by using different testament classes; e.g. one that
+includes or ignores file-ids.
+
+We sign a testament rather than the revision XML itself for several reasons.
+The most important is that the form in which the revision is stored
+internally is designed for that purpose, and contains information which need
+not be attested to by the signer. For example the inventory contains the
+last-changed revision for a file, but this is not necessarily something the
+user cares to sign.
+
+Having unnecessary fields signed makes the signatures brittle when the same
+revision is stored in different branches or when the format is upgraded.
+
+Handling upgrades is another motivation for using testaments separate from
+the stored revision. We would like to be able to compare a signature
+generated from an old-format tree to newer tree, or vice versa. This could
+be done by comparing the revisions but that makes it unclear about exactly
+what is being compared or not.
+
+Different signing keys might indicate different levels of trust; we can in
+the future extend this to allow signatures indicating not just that a
+particular version is authentic but that it has other properties.
+
+The signature can be applied to either the full testament or to just a
+hash of it.
+
+Testament format 1
+~~~~~~~~~~~~~~~~~~
+
+* timestamps are given as integers to avoid rounding errors
+* parents given in lexicographical order
+* indented-text form similar to log; intended to be human readable
+* paths are given with forward slashes
+* files are named using paths for ease of comparison/debugging
+* the testament uses unix line-endings (\n)
+"""
+
+from __future__ import absolute_import
+
+# XXX: At the moment, clients trust that the graph described in a weave
+# is accurate, but that's not covered by the testament. Perhaps the best
+# fix is when verifying a revision to make sure that every file mentioned
+# in the revision has compatible ancestry links.
+
+# TODO: perhaps write timestamp in a more readable form
+
+# TODO: Perhaps these should just be different formats in which inventories/
+# revisions can be serialized.
+
+from copy import copy
+
+from bzrlib.osutils import (
+ contains_whitespace,
+ contains_linebreaks,
+ sha_strings,
+ )
+from bzrlib.tree import Tree
+
+
+class Testament(object):
+ """Reduced summary of a revision.
+
+ Testaments can be
+
+ - produced from a revision
+ - written to a stream
+ - loaded from a stream
+ - compared to a revision
+ """
+
+ long_header = 'bazaar-ng testament version 1\n'
+ short_header = 'bazaar-ng testament short form 1\n'
+ include_root = False
+
+ @classmethod
+ def from_revision(cls, repository, revision_id):
+ """Produce a new testament from a historical revision."""
+ rev = repository.get_revision(revision_id)
+ tree = repository.revision_tree(revision_id)
+ return cls(rev, tree)
+
+ @classmethod
+ def from_revision_tree(cls, tree):
+ """Produce a new testament from a revision tree."""
+ rev = tree._repository.get_revision(tree.get_revision_id())
+ return cls(rev, tree)
+
+ def __init__(self, rev, tree):
+ """Create a new testament for rev using tree."""
+ self.revision_id = rev.revision_id
+ self.committer = rev.committer
+ self.timezone = rev.timezone or 0
+ self.timestamp = rev.timestamp
+ self.message = rev.message
+ self.parent_ids = rev.parent_ids[:]
+ if not isinstance(tree, Tree):
+ raise TypeError("As of bzr 2.4 Testament.__init__() takes a "
+ "Revision and a Tree.")
+ self.tree = tree
+ self.revprops = copy(rev.properties)
+ if contains_whitespace(self.revision_id):
+ raise ValueError(self.revision_id)
+ if contains_linebreaks(self.committer):
+ raise ValueError(self.committer)
+
+ def as_text_lines(self):
+ """Yield text form as a sequence of lines.
+
+ The result is returned in utf-8, because it should be signed or
+ hashed in that encoding.
+ """
+ r = []
+ a = r.append
+ a(self.long_header)
+ a('revision-id: %s\n' % self.revision_id)
+ a('committer: %s\n' % self.committer)
+ a('timestamp: %d\n' % self.timestamp)
+ a('timezone: %d\n' % self.timezone)
+ # inventory length contains the root, which is not shown here
+ a('parents:\n')
+ for parent_id in sorted(self.parent_ids):
+ if contains_whitespace(parent_id):
+ raise ValueError(parent_id)
+ a(' %s\n' % parent_id)
+ a('message:\n')
+ for l in self.message.splitlines():
+ a(' %s\n' % l)
+ a('inventory:\n')
+ for path, ie in self._get_entries():
+ a(self._entry_to_line(path, ie))
+ r.extend(self._revprops_to_lines())
+ return [line.encode('utf-8') for line in r]
+
+ def _get_entries(self):
+ return ((path, ie) for (path, versioned, kind, file_id, ie) in
+ self.tree.list_files(include_root=self.include_root))
+
+ def _escape_path(self, path):
+ if contains_linebreaks(path):
+ raise ValueError(path)
+ return unicode(path.replace('\\', '/').replace(' ', '\ '))
+
+ def _entry_to_line(self, path, ie):
+ """Turn an inventory entry into a testament line"""
+ if contains_whitespace(ie.file_id):
+ raise ValueError(ie.file_id)
+ content = ''
+ content_spacer=''
+ if ie.kind == 'file':
+ # TODO: avoid switching on kind
+ if not ie.text_sha1:
+ raise AssertionError()
+ content = ie.text_sha1
+ content_spacer = ' '
+ elif ie.kind == 'symlink':
+ if not ie.symlink_target:
+ raise AssertionError()
+ content = self._escape_path(ie.symlink_target)
+ content_spacer = ' '
+
+ l = u' %s %s %s%s%s\n' % (ie.kind, self._escape_path(path),
+ ie.file_id.decode('utf8'),
+ content_spacer, content)
+ return l
+
+ def as_text(self):
+ return ''.join(self.as_text_lines())
+
+ def as_short_text(self):
+ """Return short digest-based testament."""
+ return (self.short_header +
+ 'revision-id: %s\n'
+ 'sha1: %s\n'
+ % (self.revision_id, self.as_sha1()))
+
+ def _revprops_to_lines(self):
+ """Pack up revision properties."""
+ if not self.revprops:
+ return []
+ r = ['properties:\n']
+ for name, value in sorted(self.revprops.items()):
+ if contains_whitespace(name):
+ raise ValueError(name)
+ r.append(' %s:\n' % name)
+ for line in value.splitlines():
+ r.append(u' %s\n' % line)
+ return r
+
+ def as_sha1(self):
+ return sha_strings(self.as_text_lines())
+
+
+class StrictTestament(Testament):
+ """This testament format is for use as a checksum in bundle format 0.8"""
+
+ long_header = 'bazaar-ng testament version 2.1\n'
+ short_header = 'bazaar-ng testament short form 2.1\n'
+ include_root = False
+ def _entry_to_line(self, path, ie):
+ l = Testament._entry_to_line(self, path, ie)[:-1]
+ l += ' ' + ie.revision
+ l += {True: ' yes\n', False: ' no\n'}[ie.executable]
+ return l
+
+
+class StrictTestament3(StrictTestament):
+ """This testament format is for use as a checksum in bundle format 0.9+
+
+ It differs from StrictTestament by including data about the tree root.
+ """
+
+ long_header = 'bazaar testament version 3 strict\n'
+ short_header = 'bazaar testament short form 3 strict\n'
+ include_root = True
+
+ def _escape_path(self, path):
+ if contains_linebreaks(path):
+ raise ValueError(path)
+ if path == '':
+ path = '.'
+ return unicode(path.replace('\\', '/').replace(' ', '\ '))
diff --git a/bzrlib/tests/EncodingAdapter.py b/bzrlib/tests/EncodingAdapter.py
new file mode 100644
index 0000000..a5d4470
--- /dev/null
+++ b/bzrlib/tests/EncodingAdapter.py
@@ -0,0 +1,133 @@
+# Copyright (C) 2006, 2009, 2010, 2011 Canonical Ltd
+# -*- coding: utf-8 -*-
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""Adapter for running test cases against multiple encodings."""
+
+# prefix for micro (1/1000000)
+_mu = u'\xb5'
+
+# greek letter omega, not to be confused with
+# the Ohm sign, u'\u2126'. Though they are probably identical
+# cp437 can handle the first, but not the second
+_omega = u'\u03a9'
+
+# smallest error possible, epsilon
+# cp437 handles u03b5, but not u2208 the 'element of' operator
+_epsilon = u'\u03b5'
+
+# Swedish?
+_erik = u'Erik B\xe5gfors'
+
+# Swedish 'räksmörgås' means shrimp sandwich
+_shrimp_sandwich = u'r\xe4ksm\xf6rg\xe5s'
+
+# Arabic, probably only Unicode encodings can handle this one
+_juju = u'\u062c\u0648\u062c\u0648'
+
+# iso-8859-1 alternative for juju
+_juju_alt = u'j\xfbj\xfa'
+
+# Russian, 'Alexander' in russian
+_alexander = u'\u0410\u043b\u0435\u043a\u0441\u0430\u043d\u0434\u0440'
+# The word 'test' in Russian
+_russian_test = u'\u0422\u0435\u0441\u0442'
+
+# Kanji
+# It is a kanji sequence for nihonjin, or Japanese in English.
+#
+# '\u4eba' being person, 'u\65e5' sun and '\u672c' origin. Ie,
+# sun-origin-person, 'native from the land where the sun rises'. Note, I'm
+# not a fluent speaker, so this is just my crude breakdown.
+#
+# Wouter van Heyst
+_nihonjin = u'\u65e5\u672c\u4eba'
+
+# Czech
+# It's what is usually used for showing how fonts look, because it contains
+# most accented characters, ie. in places where Englishman use 'Quick brown fox
+# jumped over a lazy dog'. The literal translation of the Czech version would
+# be something like 'Yellow horse groaned devilish codes'. Actually originally
+# the last word used to be 'ódy' (odes). The 'k' was added as a pun when using
+# the sentece to check whether one has properly set encoding.
+_yellow_horse = (u'\u017dlu\u0165ou\u010dk\xfd k\u016f\u0148'
+ u' \xfap\u011bl \u010f\xe1belsk\xe9 k\xf3dy')
+_yellow = u'\u017dlu\u0165ou\u010dk\xfd'
+_someone = u'Some\u016f\u0148\u011b'
+_something = u'\u0165ou\u010dk\xfd'
+
+# Hebrew
+# Shalom -> 'hello' or 'peace', used as a common greeting
+_shalom = u'\u05e9\u05dc\u05d5\u05dd'
+
+
+encoding_scenarios = [
+ # Permutation 1 of utf-8
+ ('utf-8,1', {
+ 'info': {
+ 'committer': _erik,
+ 'message': _yellow_horse,
+ 'filename': _shrimp_sandwich,
+ 'directory': _nihonjin,
+ },
+ 'encoding': 'utf-8',
+ }),
+ # Permutation 2 of utf-8
+ ('utf-8,2', {
+ 'info': {
+ 'committer': _alexander,
+ 'message': u'Testing ' + _mu,
+ 'filename': _shalom,
+ 'directory': _juju,
+ },
+ 'encoding': 'utf-8',
+ }),
+ ('iso-8859-1', {
+ 'info': {
+ 'committer': _erik,
+ 'message': u'Testing ' + _mu,
+ 'filename': _juju_alt,
+ 'directory': _shrimp_sandwich,
+ },
+ 'encoding': 'iso-8859-1',
+ }),
+ ('iso-8859-2', {
+ 'info': {
+ 'committer': _someone,
+ 'message': _yellow_horse,
+ 'filename': _yellow,
+ 'directory': _something,
+ },
+ 'encoding': 'iso-8859-2',
+ }),
+ ('cp1251', {
+ 'info': {
+ 'committer': _alexander,
+ 'message': u'Testing ' + _mu,
+ 'filename': _russian_test,
+ 'directory': _russian_test + 'dir',
+ },
+ 'encoding': 'cp1251',
+ }),
+# The iso-8859-1 tests run on a default windows cp437 installation
+# and it takes a long time to run an extra permutation of the tests
+# But just in case we want to add this back in:
+# ('cp437', {'committer':_erik
+# , 'message':u'Testing ' + _mu
+# , 'filename':'file_' + _omega
+# , 'directory':_epsilon + '_dir',
+# 'encoding': 'cp437'}),
+ ]
diff --git a/bzrlib/tests/TestUtil.py b/bzrlib/tests/TestUtil.py
new file mode 100644
index 0000000..f54d9bc
--- /dev/null
+++ b/bzrlib/tests/TestUtil.py
@@ -0,0 +1,233 @@
+# Copyright (C) 2005-2011 Canonical Ltd
+# Author: Robert Collins <robert.collins@canonical.com>
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+#
+
+import sys
+import logging
+import unittest
+import weakref
+
+from bzrlib import pyutils
+
+# Mark this python module as being part of the implementation
+# of unittest: this gives us better tracebacks where the last
+# shown frame is the test code, not our assertXYZ.
+__unittest = 1
+
+
+class LogCollector(logging.Handler):
+
+ def __init__(self):
+ logging.Handler.__init__(self)
+ self.records=[]
+
+ def emit(self, record):
+ self.records.append(record.getMessage())
+
+
+def makeCollectingLogger():
+ """I make a logger instance that collects its logs for programmatic analysis
+ -> (logger, collector)"""
+ logger=logging.Logger("collector")
+ handler=LogCollector()
+ handler.setFormatter(logging.Formatter("%(levelname)s: %(message)s"))
+ logger.addHandler(handler)
+ return logger, handler
+
+
+def visitTests(suite, visitor):
+ """A foreign method for visiting the tests in a test suite."""
+ for test in suite._tests:
+ #Abusing types to avoid monkey patching unittest.TestCase.
+ # Maybe that would be better?
+ try:
+ test.visit(visitor)
+ except AttributeError:
+ if isinstance(test, unittest.TestCase):
+ visitor.visitCase(test)
+ elif isinstance(test, unittest.TestSuite):
+ visitor.visitSuite(test)
+ visitTests(test, visitor)
+ else:
+ print "unvisitable non-unittest.TestCase element %r (%r)" % (
+ test, test.__class__)
+
+
+class FailedCollectionCase(unittest.TestCase):
+ """Pseudo-test to run and report failure if given case was uncollected"""
+
+ def __init__(self, case):
+ super(FailedCollectionCase, self).__init__("fail_uncollected")
+ # GZ 2011-09-16: Maybe catch errors from id() method as cases may be
+ # in a bit of a funny state by now.
+ self._problem_case_id = case.id()
+
+ def id(self):
+ if self._problem_case_id[-1:] == ")":
+ return self._problem_case_id[:-1] + ",uncollected)"
+ return self._problem_case_id + "(uncollected)"
+
+ def fail_uncollected(self):
+ self.fail("Uncollected test case: " + self._problem_case_id)
+
+
+class TestSuite(unittest.TestSuite):
+ """I am an extended TestSuite with a visitor interface.
+ This is primarily to allow filtering of tests - and suites or
+ more in the future. An iterator of just tests wouldn't scale..."""
+
+ def visit(self, visitor):
+ """visit the composite. Visiting is depth-first.
+ current callbacks are visitSuite and visitCase."""
+ visitor.visitSuite(self)
+ visitTests(self, visitor)
+
+ def run(self, result):
+ """Run the tests in the suite, discarding references after running."""
+ tests = list(self)
+ tests.reverse()
+ self._tests = []
+ stored_count = 0
+ count_stored_tests = getattr(result, "_count_stored_tests", int)
+ from bzrlib.tests import selftest_debug_flags
+ notify = "uncollected_cases" in selftest_debug_flags
+ while tests:
+ if result.shouldStop:
+ self._tests = reversed(tests)
+ break
+ case = _run_and_collect_case(tests.pop(), result)()
+ new_stored_count = count_stored_tests()
+ if case is not None and isinstance(case, unittest.TestCase):
+ if stored_count == new_stored_count and notify:
+ # Testcase didn't fail, but somehow is still alive
+ FailedCollectionCase(case).run(result)
+ # Adding a new failure so need to reupdate the count
+ new_stored_count = count_stored_tests()
+ # GZ 2011-09-16: Previously zombied the case at this point by
+ # clearing the dict as fallback, skip for now.
+ stored_count = new_stored_count
+ return result
+
+
+def _run_and_collect_case(case, res):
+ """Run test case against result and use weakref to drop the refcount"""
+ case.run(res)
+ return weakref.ref(case)
+
+
+class TestLoader(unittest.TestLoader):
+ """Custom TestLoader to extend the stock python one."""
+
+ suiteClass = TestSuite
+ # Memoize test names by test class dict
+ test_func_names = {}
+
+ def loadTestsFromModuleNames(self, names):
+ """use a custom means to load tests from modules.
+
+ There is an undesirable glitch in the python TestLoader where a
+ import error is ignore. We think this can be solved by ensuring the
+ requested name is resolvable, if its not raising the original error.
+ """
+ result = self.suiteClass()
+ for name in names:
+ result.addTests(self.loadTestsFromModuleName(name))
+ return result
+
+ def loadTestsFromModuleName(self, name):
+ result = self.suiteClass()
+ module = pyutils.get_named_object(name)
+
+ result.addTests(self.loadTestsFromModule(module))
+ return result
+
+ def loadTestsFromModule(self, module):
+ """Load tests from a module object.
+
+ This extension of the python test loader looks for an attribute
+ load_tests in the module object, and if not found falls back to the
+ regular python loadTestsFromModule.
+
+ If a load_tests attribute is found, it is called and the result is
+ returned.
+
+ load_tests should be defined like so:
+ >>> def load_tests(standard_tests, module, loader):
+ >>> pass
+
+ standard_tests is the tests found by the stock TestLoader in the
+ module, module and loader are the module and loader instances.
+
+ For instance, to run every test twice, you might do:
+ >>> def load_tests(standard_tests, module, loader):
+ >>> result = loader.suiteClass()
+ >>> for test in iter_suite_tests(standard_tests):
+ >>> result.addTests([test, test])
+ >>> return result
+ """
+ if sys.version_info < (2, 7):
+ basic_tests = super(TestLoader, self).loadTestsFromModule(module)
+ else:
+ # GZ 2010-07-19: Python 2.7 unittest also uses load_tests but with
+ # a different and incompatible signature
+ basic_tests = super(TestLoader, self).loadTestsFromModule(module,
+ use_load_tests=False)
+ load_tests = getattr(module, "load_tests", None)
+ if load_tests is not None:
+ return load_tests(basic_tests, module, self)
+ else:
+ return basic_tests
+
+ def getTestCaseNames(self, test_case_class):
+ test_fn_names = self.test_func_names.get(test_case_class, None)
+ if test_fn_names is not None:
+ # We already know them
+ return test_fn_names
+
+ test_fn_names = unittest.TestLoader.getTestCaseNames(self,
+ test_case_class)
+ self.test_func_names[test_case_class] = test_fn_names
+ return test_fn_names
+
+
+class FilteredByModuleTestLoader(TestLoader):
+ """A test loader that import only the needed modules."""
+
+ def __init__(self, needs_module):
+ """Constructor.
+
+ :param needs_module: a callable taking a module name as a
+ parameter returing True if the module should be loaded.
+ """
+ TestLoader.__init__(self)
+ self.needs_module = needs_module
+
+ def loadTestsFromModuleName(self, name):
+ if self.needs_module(name):
+ return TestLoader.loadTestsFromModuleName(self, name)
+ else:
+ return self.suiteClass()
+
+
+class TestVisitor(object):
+ """A visitor for Tests"""
+
+ def visitSuite(self, aTestSuite):
+ pass
+
+ def visitCase(self, aTestCase):
+ pass
diff --git a/bzrlib/tests/__init__.py b/bzrlib/tests/__init__.py
new file mode 100644
index 0000000..0aa26e6
--- /dev/null
+++ b/bzrlib/tests/__init__.py
@@ -0,0 +1,4521 @@
+# Copyright (C) 2005-2011 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""Testing framework extensions"""
+
+from __future__ import absolute_import
+
+# NOTE: Some classes in here use camelCaseNaming() rather than
+# underscore_naming(). That's for consistency with unittest; it's not the
+# general style of bzrlib. Please continue that consistency when adding e.g.
+# new assertFoo() methods.
+
+import atexit
+import codecs
+import copy
+from cStringIO import StringIO
+import difflib
+import doctest
+import errno
+import itertools
+import logging
+import os
+import platform
+import pprint
+import random
+import re
+import shlex
+import site
+import stat
+import subprocess
+import sys
+import tempfile
+import threading
+import time
+import traceback
+import unittest
+import warnings
+
+import testtools
+# nb: check this before importing anything else from within it
+_testtools_version = getattr(testtools, '__version__', ())
+if _testtools_version < (0, 9, 5):
+ raise ImportError("need at least testtools 0.9.5: %s is %r"
+ % (testtools.__file__, _testtools_version))
+from testtools import content
+
+import bzrlib
+from bzrlib import (
+ branchbuilder,
+ controldir,
+ chk_map,
+ commands as _mod_commands,
+ config,
+ i18n,
+ debug,
+ errors,
+ hooks,
+ lock as _mod_lock,
+ lockdir,
+ memorytree,
+ osutils,
+ plugin as _mod_plugin,
+ pyutils,
+ ui,
+ urlutils,
+ registry,
+ symbol_versioning,
+ trace,
+ transport as _mod_transport,
+ workingtree,
+ )
+try:
+ import bzrlib.lsprof
+except ImportError:
+ # lsprof not available
+ pass
+from bzrlib.smart import client, request
+from bzrlib.transport import (
+ memory,
+ pathfilter,
+ )
+from bzrlib.symbol_versioning import (
+ deprecated_function,
+ deprecated_in,
+ )
+from bzrlib.tests import (
+ fixtures,
+ test_server,
+ TestUtil,
+ treeshape,
+ )
+from bzrlib.ui import NullProgressView
+from bzrlib.ui.text import TextUIFactory
+from bzrlib.tests.features import _CompatabilityThunkFeature
+
+# Mark this python module as being part of the implementation
+# of unittest: this gives us better tracebacks where the last
+# shown frame is the test code, not our assertXYZ.
+__unittest = 1
+
+default_transport = test_server.LocalURLServer
+
+
+_unitialized_attr = object()
+"""A sentinel needed to act as a default value in a method signature."""
+
+
+# Subunit result codes, defined here to prevent a hard dependency on subunit.
+SUBUNIT_SEEK_SET = 0
+SUBUNIT_SEEK_CUR = 1
+
+# These are intentionally brought into this namespace. That way plugins, etc
+# can just "from bzrlib.tests import TestCase, TestLoader, etc"
+TestSuite = TestUtil.TestSuite
+TestLoader = TestUtil.TestLoader
+
+# Tests should run in a clean and clearly defined environment. The goal is to
+# keep them isolated from the running environment as mush as possible. The test
+# framework ensures the variables defined below are set (or deleted if the
+# value is None) before a test is run and reset to their original value after
+# the test is run. Generally if some code depends on an environment variable,
+# the tests should start without this variable in the environment. There are a
+# few exceptions but you shouldn't violate this rule lightly.
+isolated_environ = {
+ 'BZR_HOME': None,
+ 'HOME': None,
+ 'XDG_CONFIG_HOME': None,
+ # bzr now uses the Win32 API and doesn't rely on APPDATA, but the
+ # tests do check our impls match APPDATA
+ 'BZR_EDITOR': None, # test_msgeditor manipulates this variable
+ 'VISUAL': None,
+ 'EDITOR': None,
+ 'BZR_EMAIL': None,
+ 'BZREMAIL': None, # may still be present in the environment
+ 'EMAIL': 'jrandom@example.com', # set EMAIL as bzr does not guess
+ 'BZR_PROGRESS_BAR': None,
+ # This should trap leaks to ~/.bzr.log. This occurs when tests use TestCase
+ # as a base class instead of TestCaseInTempDir. Tests inheriting from
+ # TestCase should not use disk resources, BZR_LOG is one.
+ 'BZR_LOG': '/you-should-use-TestCaseInTempDir-if-you-need-a-log-file',
+ 'BZR_PLUGIN_PATH': None,
+ 'BZR_DISABLE_PLUGINS': None,
+ 'BZR_PLUGINS_AT': None,
+ 'BZR_CONCURRENCY': None,
+ # Make sure that any text ui tests are consistent regardless of
+ # the environment the test case is run in; you may want tests that
+ # test other combinations. 'dumb' is a reasonable guess for tests
+ # going to a pipe or a StringIO.
+ 'TERM': 'dumb',
+ 'LINES': '25',
+ 'COLUMNS': '80',
+ 'BZR_COLUMNS': '80',
+ # Disable SSH Agent
+ 'SSH_AUTH_SOCK': None,
+ # Proxies
+ 'http_proxy': None,
+ 'HTTP_PROXY': None,
+ 'https_proxy': None,
+ 'HTTPS_PROXY': None,
+ 'no_proxy': None,
+ 'NO_PROXY': None,
+ 'all_proxy': None,
+ 'ALL_PROXY': None,
+ # Nobody cares about ftp_proxy, FTP_PROXY AFAIK. So far at
+ # least. If you do (care), please update this comment
+ # -- vila 20080401
+ 'ftp_proxy': None,
+ 'FTP_PROXY': None,
+ 'BZR_REMOTE_PATH': None,
+ # Generally speaking, we don't want apport reporting on crashes in
+ # the test envirnoment unless we're specifically testing apport,
+ # so that it doesn't leak into the real system environment. We
+ # use an env var so it propagates to subprocesses.
+ 'APPORT_DISABLE': '1',
+ }
+
+
+def override_os_environ(test, env=None):
+ """Modify os.environ keeping a copy.
+
+ :param test: A test instance
+
+ :param env: A dict containing variable definitions to be installed
+ """
+ if env is None:
+ env = isolated_environ
+ test._original_os_environ = dict([(var, value)
+ for var, value in os.environ.iteritems()])
+ for var, value in env.iteritems():
+ osutils.set_or_unset_env(var, value)
+ if var not in test._original_os_environ:
+ # The var is new, add it with a value of None, so
+ # restore_os_environ will delete it
+ test._original_os_environ[var] = None
+
+
+def restore_os_environ(test):
+ """Restore os.environ to its original state.
+
+ :param test: A test instance previously passed to override_os_environ.
+ """
+ for var, value in test._original_os_environ.iteritems():
+ # Restore the original value (or delete it if the value has been set to
+ # None in override_os_environ).
+ osutils.set_or_unset_env(var, value)
+
+
+def _clear__type_equality_funcs(test):
+ """Cleanup bound methods stored on TestCase instances
+
+ Clear the dict breaking a few (mostly) harmless cycles in the affected
+ unittests released with Python 2.6 and initial Python 2.7 versions.
+
+ For a few revisions between Python 2.7.1 and Python 2.7.2 that annoyingly
+ shipped in Oneiric, an object with no clear method was used, hence the
+ extra complications, see bug 809048 for details.
+ """
+ type_equality_funcs = getattr(test, "_type_equality_funcs", None)
+ if type_equality_funcs is not None:
+ tef_clear = getattr(type_equality_funcs, "clear", None)
+ if tef_clear is None:
+ tef_instance_dict = getattr(type_equality_funcs, "__dict__", None)
+ if tef_instance_dict is not None:
+ tef_clear = tef_instance_dict.clear
+ if tef_clear is not None:
+ tef_clear()
+
+
+class ExtendedTestResult(testtools.TextTestResult):
+ """Accepts, reports and accumulates the results of running tests.
+
+ Compared to the unittest version this class adds support for
+ profiling, benchmarking, stopping as soon as a test fails, and
+ skipping tests. There are further-specialized subclasses for
+ different types of display.
+
+ When a test finishes, in whatever way, it calls one of the addSuccess,
+ addFailure or addError methods. These in turn may redirect to a more
+ specific case for the special test results supported by our extended
+ tests.
+
+ Note that just one of these objects is fed the results from many tests.
+ """
+
+ stop_early = False
+
+ def __init__(self, stream, descriptions, verbosity,
+ bench_history=None,
+ strict=False,
+ ):
+ """Construct new TestResult.
+
+ :param bench_history: Optionally, a writable file object to accumulate
+ benchmark results.
+ """
+ testtools.TextTestResult.__init__(self, stream)
+ if bench_history is not None:
+ from bzrlib.version import _get_bzr_source_tree
+ src_tree = _get_bzr_source_tree()
+ if src_tree:
+ try:
+ revision_id = src_tree.get_parent_ids()[0]
+ except IndexError:
+ # XXX: if this is a brand new tree, do the same as if there
+ # is no branch.
+ revision_id = ''
+ else:
+ # XXX: If there's no branch, what should we do?
+ revision_id = ''
+ bench_history.write("--date %s %s\n" % (time.time(), revision_id))
+ self._bench_history = bench_history
+ self.ui = ui.ui_factory
+ self.num_tests = 0
+ self.error_count = 0
+ self.failure_count = 0
+ self.known_failure_count = 0
+ self.skip_count = 0
+ self.not_applicable_count = 0
+ self.unsupported = {}
+ self.count = 0
+ self._overall_start_time = time.time()
+ self._strict = strict
+ self._first_thread_leaker_id = None
+ self._tests_leaking_threads_count = 0
+ self._traceback_from_test = None
+
+ def stopTestRun(self):
+ run = self.testsRun
+ actionTaken = "Ran"
+ stopTime = time.time()
+ timeTaken = stopTime - self.startTime
+ # GZ 2010-07-19: Seems testtools has no printErrors method, and though
+ # the parent class method is similar have to duplicate
+ self._show_list('ERROR', self.errors)
+ self._show_list('FAIL', self.failures)
+ self.stream.write(self.sep2)
+ self.stream.write("%s %d test%s in %.3fs\n\n" % (actionTaken,
+ run, run != 1 and "s" or "", timeTaken))
+ if not self.wasSuccessful():
+ self.stream.write("FAILED (")
+ failed, errored = map(len, (self.failures, self.errors))
+ if failed:
+ self.stream.write("failures=%d" % failed)
+ if errored:
+ if failed: self.stream.write(", ")
+ self.stream.write("errors=%d" % errored)
+ if self.known_failure_count:
+ if failed or errored: self.stream.write(", ")
+ self.stream.write("known_failure_count=%d" %
+ self.known_failure_count)
+ self.stream.write(")\n")
+ else:
+ if self.known_failure_count:
+ self.stream.write("OK (known_failures=%d)\n" %
+ self.known_failure_count)
+ else:
+ self.stream.write("OK\n")
+ if self.skip_count > 0:
+ skipped = self.skip_count
+ self.stream.write('%d test%s skipped\n' %
+ (skipped, skipped != 1 and "s" or ""))
+ if self.unsupported:
+ for feature, count in sorted(self.unsupported.items()):
+ self.stream.write("Missing feature '%s' skipped %d tests.\n" %
+ (feature, count))
+ if self._strict:
+ ok = self.wasStrictlySuccessful()
+ else:
+ ok = self.wasSuccessful()
+ if self._first_thread_leaker_id:
+ self.stream.write(
+ '%s is leaking threads among %d leaking tests.\n' % (
+ self._first_thread_leaker_id,
+ self._tests_leaking_threads_count))
+ # We don't report the main thread as an active one.
+ self.stream.write(
+ '%d non-main threads were left active in the end.\n'
+ % (len(self._active_threads) - 1))
+
+ def getDescription(self, test):
+ return test.id()
+
+ def _extractBenchmarkTime(self, testCase, details=None):
+ """Add a benchmark time for the current test case."""
+ if details and 'benchtime' in details:
+ return float(''.join(details['benchtime'].iter_bytes()))
+ return getattr(testCase, "_benchtime", None)
+
+ def _elapsedTestTimeString(self):
+ """Return a time string for the overall time the current test has taken."""
+ return self._formatTime(self._delta_to_float(
+ self._now() - self._start_datetime))
+
+ def _testTimeString(self, testCase):
+ benchmark_time = self._extractBenchmarkTime(testCase)
+ if benchmark_time is not None:
+ return self._formatTime(benchmark_time) + "*"
+ else:
+ return self._elapsedTestTimeString()
+
+ def _formatTime(self, seconds):
+ """Format seconds as milliseconds with leading spaces."""
+ # some benchmarks can take thousands of seconds to run, so we need 8
+ # places
+ return "%8dms" % (1000 * seconds)
+
+ def _shortened_test_description(self, test):
+ what = test.id()
+ what = re.sub(r'^bzrlib\.tests\.', '', what)
+ return what
+
+ # GZ 2010-10-04: Cloned tests may end up harmlessly calling this method
+ # multiple times in a row, because the handler is added for
+ # each test but the container list is shared between cases.
+ # See lp:498869 lp:625574 and lp:637725 for background.
+ def _record_traceback_from_test(self, exc_info):
+ """Store the traceback from passed exc_info tuple till"""
+ self._traceback_from_test = exc_info[2]
+
+ def startTest(self, test):
+ super(ExtendedTestResult, self).startTest(test)
+ if self.count == 0:
+ self.startTests()
+ self.count += 1
+ self.report_test_start(test)
+ test.number = self.count
+ self._recordTestStartTime()
+ # Make testtools cases give us the real traceback on failure
+ addOnException = getattr(test, "addOnException", None)
+ if addOnException is not None:
+ addOnException(self._record_traceback_from_test)
+ # Only check for thread leaks on bzrlib derived test cases
+ if isinstance(test, TestCase):
+ test.addCleanup(self._check_leaked_threads, test)
+
+ def stopTest(self, test):
+ super(ExtendedTestResult, self).stopTest(test)
+ # Manually break cycles, means touching various private things but hey
+ getDetails = getattr(test, "getDetails", None)
+ if getDetails is not None:
+ getDetails().clear()
+ _clear__type_equality_funcs(test)
+ self._traceback_from_test = None
+
+ def startTests(self):
+ self.report_tests_starting()
+ self._active_threads = threading.enumerate()
+
+ def _check_leaked_threads(self, test):
+ """See if any threads have leaked since last call
+
+ A sample of live threads is stored in the _active_threads attribute,
+ when this method runs it compares the current live threads and any not
+ in the previous sample are treated as having leaked.
+ """
+ now_active_threads = set(threading.enumerate())
+ threads_leaked = now_active_threads.difference(self._active_threads)
+ if threads_leaked:
+ self._report_thread_leak(test, threads_leaked, now_active_threads)
+ self._tests_leaking_threads_count += 1
+ if self._first_thread_leaker_id is None:
+ self._first_thread_leaker_id = test.id()
+ self._active_threads = now_active_threads
+
+ def _recordTestStartTime(self):
+ """Record that a test has started."""
+ self._start_datetime = self._now()
+
+ def addError(self, test, err):
+ """Tell result that test finished with an error.
+
+ Called from the TestCase run() method when the test
+ fails with an unexpected error.
+ """
+ self._post_mortem(self._traceback_from_test)
+ super(ExtendedTestResult, self).addError(test, err)
+ self.error_count += 1
+ self.report_error(test, err)
+ if self.stop_early:
+ self.stop()
+
+ def addFailure(self, test, err):
+ """Tell result that test failed.
+
+ Called from the TestCase run() method when the test
+ fails because e.g. an assert() method failed.
+ """
+ self._post_mortem(self._traceback_from_test)
+ super(ExtendedTestResult, self).addFailure(test, err)
+ self.failure_count += 1
+ self.report_failure(test, err)
+ if self.stop_early:
+ self.stop()
+
+ def addSuccess(self, test, details=None):
+ """Tell result that test completed successfully.
+
+ Called from the TestCase run()
+ """
+ if self._bench_history is not None:
+ benchmark_time = self._extractBenchmarkTime(test, details)
+ if benchmark_time is not None:
+ self._bench_history.write("%s %s\n" % (
+ self._formatTime(benchmark_time),
+ test.id()))
+ self.report_success(test)
+ super(ExtendedTestResult, self).addSuccess(test)
+ test._log_contents = ''
+
+ def addExpectedFailure(self, test, err):
+ self.known_failure_count += 1
+ self.report_known_failure(test, err)
+
+ def addUnexpectedSuccess(self, test, details=None):
+ """Tell result the test unexpectedly passed, counting as a failure
+
+ When the minimum version of testtools required becomes 0.9.8 this
+ can be updated to use the new handling there.
+ """
+ super(ExtendedTestResult, self).addFailure(test, details=details)
+ self.failure_count += 1
+ self.report_unexpected_success(test,
+ "".join(details["reason"].iter_text()))
+ if self.stop_early:
+ self.stop()
+
+ def addNotSupported(self, test, feature):
+ """The test will not be run because of a missing feature.
+ """
+ # this can be called in two different ways: it may be that the
+ # test started running, and then raised (through requireFeature)
+ # UnavailableFeature. Alternatively this method can be called
+ # while probing for features before running the test code proper; in
+ # that case we will see startTest and stopTest, but the test will
+ # never actually run.
+ self.unsupported.setdefault(str(feature), 0)
+ self.unsupported[str(feature)] += 1
+ self.report_unsupported(test, feature)
+
+ def addSkip(self, test, reason):
+ """A test has not run for 'reason'."""
+ self.skip_count += 1
+ self.report_skip(test, reason)
+
+ def addNotApplicable(self, test, reason):
+ self.not_applicable_count += 1
+ self.report_not_applicable(test, reason)
+
+ def _count_stored_tests(self):
+ """Count of tests instances kept alive due to not succeeding"""
+ return self.error_count + self.failure_count + self.known_failure_count
+
+ def _post_mortem(self, tb=None):
+ """Start a PDB post mortem session."""
+ if os.environ.get('BZR_TEST_PDB', None):
+ import pdb
+ pdb.post_mortem(tb)
+
+ def progress(self, offset, whence):
+ """The test is adjusting the count of tests to run."""
+ if whence == SUBUNIT_SEEK_SET:
+ self.num_tests = offset
+ elif whence == SUBUNIT_SEEK_CUR:
+ self.num_tests += offset
+ else:
+ raise errors.BzrError("Unknown whence %r" % whence)
+
+ def report_tests_starting(self):
+ """Display information before the test run begins"""
+ if getattr(sys, 'frozen', None) is None:
+ bzr_path = osutils.realpath(sys.argv[0])
+ else:
+ bzr_path = sys.executable
+ self.stream.write(
+ 'bzr selftest: %s\n' % (bzr_path,))
+ self.stream.write(
+ ' %s\n' % (
+ bzrlib.__path__[0],))
+ self.stream.write(
+ ' bzr-%s python-%s %s\n' % (
+ bzrlib.version_string,
+ bzrlib._format_version_tuple(sys.version_info),
+ platform.platform(aliased=1),
+ ))
+ self.stream.write('\n')
+
+ def report_test_start(self, test):
+ """Display information on the test just about to be run"""
+
+ def _report_thread_leak(self, test, leaked_threads, active_threads):
+ """Display information on a test that leaked one or more threads"""
+ # GZ 2010-09-09: A leak summary reported separately from the general
+ # thread debugging would be nice. Tests under subunit
+ # need something not using stream, perhaps adding a
+ # testtools details object would be fitting.
+ if 'threads' in selftest_debug_flags:
+ self.stream.write('%s is leaking, active is now %d\n' %
+ (test.id(), len(active_threads)))
+
+ def startTestRun(self):
+ self.startTime = time.time()
+
+ def report_success(self, test):
+ pass
+
+ def wasStrictlySuccessful(self):
+ if self.unsupported or self.known_failure_count:
+ return False
+ return self.wasSuccessful()
+
+
+class TextTestResult(ExtendedTestResult):
+ """Displays progress and results of tests in text form"""
+
+ def __init__(self, stream, descriptions, verbosity,
+ bench_history=None,
+ pb=None,
+ strict=None,
+ ):
+ ExtendedTestResult.__init__(self, stream, descriptions, verbosity,
+ bench_history, strict)
+ # We no longer pass them around, but just rely on the UIFactory stack
+ # for state
+ if pb is not None:
+ warnings.warn("Passing pb to TextTestResult is deprecated")
+ self.pb = self.ui.nested_progress_bar()
+ self.pb.show_pct = False
+ self.pb.show_spinner = False
+ self.pb.show_eta = False,
+ self.pb.show_count = False
+ self.pb.show_bar = False
+ self.pb.update_latency = 0
+ self.pb.show_transport_activity = False
+
+ def stopTestRun(self):
+ # called when the tests that are going to run have run
+ self.pb.clear()
+ self.pb.finished()
+ super(TextTestResult, self).stopTestRun()
+
+ def report_tests_starting(self):
+ super(TextTestResult, self).report_tests_starting()
+ self.pb.update('[test 0/%d] Starting' % (self.num_tests))
+
+ def _progress_prefix_text(self):
+ # the longer this text, the less space we have to show the test
+ # name...
+ a = '[%d' % self.count # total that have been run
+ # tests skipped as known not to be relevant are not important enough
+ # to show here
+ ## if self.skip_count:
+ ## a += ', %d skip' % self.skip_count
+ ## if self.known_failure_count:
+ ## a += '+%dX' % self.known_failure_count
+ if self.num_tests:
+ a +='/%d' % self.num_tests
+ a += ' in '
+ runtime = time.time() - self._overall_start_time
+ if runtime >= 60:
+ a += '%dm%ds' % (runtime / 60, runtime % 60)
+ else:
+ a += '%ds' % runtime
+ total_fail_count = self.error_count + self.failure_count
+ if total_fail_count:
+ a += ', %d failed' % total_fail_count
+ # if self.unsupported:
+ # a += ', %d missing' % len(self.unsupported)
+ a += ']'
+ return a
+
+ def report_test_start(self, test):
+ self.pb.update(
+ self._progress_prefix_text()
+ + ' '
+ + self._shortened_test_description(test))
+
+ def _test_description(self, test):
+ return self._shortened_test_description(test)
+
+ def report_error(self, test, err):
+ self.stream.write('ERROR: %s\n %s\n' % (
+ self._test_description(test),
+ err[1],
+ ))
+
+ def report_failure(self, test, err):
+ self.stream.write('FAIL: %s\n %s\n' % (
+ self._test_description(test),
+ err[1],
+ ))
+
+ def report_known_failure(self, test, err):
+ pass
+
+ def report_unexpected_success(self, test, reason):
+ self.stream.write('FAIL: %s\n %s: %s\n' % (
+ self._test_description(test),
+ "Unexpected success. Should have failed",
+ reason,
+ ))
+
+ def report_skip(self, test, reason):
+ pass
+
+ def report_not_applicable(self, test, reason):
+ pass
+
+ def report_unsupported(self, test, feature):
+ """test cannot be run because feature is missing."""
+
+
+class VerboseTestResult(ExtendedTestResult):
+ """Produce long output, with one line per test run plus times"""
+
+ def _ellipsize_to_right(self, a_string, final_width):
+ """Truncate and pad a string, keeping the right hand side"""
+ if len(a_string) > final_width:
+ result = '...' + a_string[3-final_width:]
+ else:
+ result = a_string
+ return result.ljust(final_width)
+
+ def report_tests_starting(self):
+ self.stream.write('running %d tests...\n' % self.num_tests)
+ super(VerboseTestResult, self).report_tests_starting()
+
+ def report_test_start(self, test):
+ name = self._shortened_test_description(test)
+ width = osutils.terminal_width()
+ if width is not None:
+ # width needs space for 6 char status, plus 1 for slash, plus an
+ # 11-char time string, plus a trailing blank
+ # when NUMBERED_DIRS: plus 5 chars on test number, plus 1 char on
+ # space
+ self.stream.write(self._ellipsize_to_right(name, width-18))
+ else:
+ self.stream.write(name)
+ self.stream.flush()
+
+ def _error_summary(self, err):
+ indent = ' ' * 4
+ return '%s%s' % (indent, err[1])
+
+ def report_error(self, test, err):
+ self.stream.write('ERROR %s\n%s\n'
+ % (self._testTimeString(test),
+ self._error_summary(err)))
+
+ def report_failure(self, test, err):
+ self.stream.write(' FAIL %s\n%s\n'
+ % (self._testTimeString(test),
+ self._error_summary(err)))
+
+ def report_known_failure(self, test, err):
+ self.stream.write('XFAIL %s\n%s\n'
+ % (self._testTimeString(test),
+ self._error_summary(err)))
+
+ def report_unexpected_success(self, test, reason):
+ self.stream.write(' FAIL %s\n%s: %s\n'
+ % (self._testTimeString(test),
+ "Unexpected success. Should have failed",
+ reason))
+
+ def report_success(self, test):
+ self.stream.write(' OK %s\n' % self._testTimeString(test))
+ for bench_called, stats in getattr(test, '_benchcalls', []):
+ self.stream.write('LSProf output for %s(%s, %s)\n' % bench_called)
+ stats.pprint(file=self.stream)
+ # flush the stream so that we get smooth output. This verbose mode is
+ # used to show the output in PQM.
+ self.stream.flush()
+
+ def report_skip(self, test, reason):
+ self.stream.write(' SKIP %s\n%s\n'
+ % (self._testTimeString(test), reason))
+
+ def report_not_applicable(self, test, reason):
+ self.stream.write(' N/A %s\n %s\n'
+ % (self._testTimeString(test), reason))
+
+ def report_unsupported(self, test, feature):
+ """test cannot be run because feature is missing."""
+ self.stream.write("NODEP %s\n The feature '%s' is not available.\n"
+ %(self._testTimeString(test), feature))
+
+
+class TextTestRunner(object):
+ stop_on_failure = False
+
+ def __init__(self,
+ stream=sys.stderr,
+ descriptions=0,
+ verbosity=1,
+ bench_history=None,
+ strict=False,
+ result_decorators=None,
+ ):
+ """Create a TextTestRunner.
+
+ :param result_decorators: An optional list of decorators to apply
+ to the result object being used by the runner. Decorators are
+ applied left to right - the first element in the list is the
+ innermost decorator.
+ """
+ # stream may know claim to know to write unicode strings, but in older
+ # pythons this goes sufficiently wrong that it is a bad idea. (
+ # specifically a built in file with encoding 'UTF-8' will still try
+ # to encode using ascii.
+ new_encoding = osutils.get_terminal_encoding()
+ codec = codecs.lookup(new_encoding)
+ if type(codec) is tuple:
+ # Python 2.4
+ encode = codec[0]
+ else:
+ encode = codec.encode
+ # GZ 2010-09-08: Really we don't want to be writing arbitrary bytes,
+ # so should swap to the plain codecs.StreamWriter
+ stream = osutils.UnicodeOrBytesToBytesWriter(encode, stream,
+ "backslashreplace")
+ stream.encoding = new_encoding
+ self.stream = stream
+ self.descriptions = descriptions
+ self.verbosity = verbosity
+ self._bench_history = bench_history
+ self._strict = strict
+ self._result_decorators = result_decorators or []
+
+ def run(self, test):
+ "Run the given test case or test suite."
+ if self.verbosity == 1:
+ result_class = TextTestResult
+ elif self.verbosity >= 2:
+ result_class = VerboseTestResult
+ original_result = result_class(self.stream,
+ self.descriptions,
+ self.verbosity,
+ bench_history=self._bench_history,
+ strict=self._strict,
+ )
+ # Signal to result objects that look at stop early policy to stop,
+ original_result.stop_early = self.stop_on_failure
+ result = original_result
+ for decorator in self._result_decorators:
+ result = decorator(result)
+ result.stop_early = self.stop_on_failure
+ result.startTestRun()
+ try:
+ test.run(result)
+ finally:
+ result.stopTestRun()
+ # higher level code uses our extended protocol to determine
+ # what exit code to give.
+ return original_result
+
+
+def iter_suite_tests(suite):
+ """Return all tests in a suite, recursing through nested suites"""
+ if isinstance(suite, unittest.TestCase):
+ yield suite
+ elif isinstance(suite, unittest.TestSuite):
+ for item in suite:
+ for r in iter_suite_tests(item):
+ yield r
+ else:
+ raise Exception('unknown type %r for object %r'
+ % (type(suite), suite))
+
+
+TestSkipped = testtools.testcase.TestSkipped
+
+
+class TestNotApplicable(TestSkipped):
+ """A test is not applicable to the situation where it was run.
+
+ This is only normally raised by parameterized tests, if they find that
+ the instance they're constructed upon does not support one aspect
+ of its interface.
+ """
+
+
+# traceback._some_str fails to format exceptions that have the default
+# __str__ which does an implicit ascii conversion. However, repr() on those
+# objects works, for all that its not quite what the doctor may have ordered.
+def _clever_some_str(value):
+ try:
+ return str(value)
+ except:
+ try:
+ return repr(value).replace('\\n', '\n')
+ except:
+ return '<unprintable %s object>' % type(value).__name__
+
+traceback._some_str = _clever_some_str
+
+
+# deprecated - use self.knownFailure(), or self.expectFailure.
+KnownFailure = testtools.testcase._ExpectedFailure
+
+
+class UnavailableFeature(Exception):
+ """A feature required for this test was not available.
+
+ This can be considered a specialised form of SkippedTest.
+
+ The feature should be used to construct the exception.
+ """
+
+
+class StringIOWrapper(object):
+ """A wrapper around cStringIO which just adds an encoding attribute.
+
+ Internally we can check sys.stdout to see what the output encoding
+ should be. However, cStringIO has no encoding attribute that we can
+ set. So we wrap it instead.
+ """
+ encoding='ascii'
+ _cstring = None
+
+ def __init__(self, s=None):
+ if s is not None:
+ self.__dict__['_cstring'] = StringIO(s)
+ else:
+ self.__dict__['_cstring'] = StringIO()
+
+ def __getattr__(self, name, getattr=getattr):
+ return getattr(self.__dict__['_cstring'], name)
+
+ def __setattr__(self, name, val):
+ if name == 'encoding':
+ self.__dict__['encoding'] = val
+ else:
+ return setattr(self._cstring, name, val)
+
+
+class TestUIFactory(TextUIFactory):
+ """A UI Factory for testing.
+
+ Hide the progress bar but emit note()s.
+ Redirect stdin.
+ Allows get_password to be tested without real tty attached.
+
+ See also CannedInputUIFactory which lets you provide programmatic input in
+ a structured way.
+ """
+ # TODO: Capture progress events at the model level and allow them to be
+ # observed by tests that care.
+ #
+ # XXX: Should probably unify more with CannedInputUIFactory or a
+ # particular configuration of TextUIFactory, or otherwise have a clearer
+ # idea of how they're supposed to be different.
+ # See https://bugs.launchpad.net/bzr/+bug/408213
+
+ def __init__(self, stdout=None, stderr=None, stdin=None):
+ if stdin is not None:
+ # We use a StringIOWrapper to be able to test various
+ # encodings, but the user is still responsible to
+ # encode the string and to set the encoding attribute
+ # of StringIOWrapper.
+ stdin = StringIOWrapper(stdin)
+ super(TestUIFactory, self).__init__(stdin, stdout, stderr)
+
+ def get_non_echoed_password(self):
+ """Get password from stdin without trying to handle the echo mode"""
+ password = self.stdin.readline()
+ if not password:
+ raise EOFError
+ if password[-1] == '\n':
+ password = password[:-1]
+ return password
+
+ def make_progress_view(self):
+ return NullProgressView()
+
+
+def isolated_doctest_setUp(test):
+ override_os_environ(test)
+
+
+def isolated_doctest_tearDown(test):
+ restore_os_environ(test)
+
+
+def IsolatedDocTestSuite(*args, **kwargs):
+ """Overrides doctest.DocTestSuite to handle isolation.
+
+ The method is really a factory and users are expected to use it as such.
+ """
+
+ kwargs['setUp'] = isolated_doctest_setUp
+ kwargs['tearDown'] = isolated_doctest_tearDown
+ return doctest.DocTestSuite(*args, **kwargs)
+
+
+class TestCase(testtools.TestCase):
+ """Base class for bzr unit tests.
+
+ Tests that need access to disk resources should subclass
+ TestCaseInTempDir not TestCase.
+
+ Error and debug log messages are redirected from their usual
+ location into a temporary file, the contents of which can be
+ retrieved by _get_log(). We use a real OS file, not an in-memory object,
+ so that it can also capture file IO. When the test completes this file
+ is read into memory and removed from disk.
+
+ There are also convenience functions to invoke bzr's command-line
+ routine, and to build and check bzr trees.
+
+ In addition to the usual method of overriding tearDown(), this class also
+ allows subclasses to register cleanup functions via addCleanup, which are
+ run in order as the object is torn down. It's less likely this will be
+ accidentally overlooked.
+ """
+
+ _log_file = None
+ # record lsprof data when performing benchmark calls.
+ _gather_lsprof_in_benchmarks = False
+
+ def __init__(self, methodName='testMethod'):
+ super(TestCase, self).__init__(methodName)
+ self._directory_isolation = True
+ self.exception_handlers.insert(0,
+ (UnavailableFeature, self._do_unsupported_or_skip))
+ self.exception_handlers.insert(0,
+ (TestNotApplicable, self._do_not_applicable))
+
+ def setUp(self):
+ super(TestCase, self).setUp()
+
+ timeout = config.GlobalStack().get('selftest.timeout')
+ if timeout:
+ timeout_fixture = fixtures.TimeoutFixture(timeout)
+ timeout_fixture.setUp()
+ self.addCleanup(timeout_fixture.cleanUp)
+
+ for feature in getattr(self, '_test_needs_features', []):
+ self.requireFeature(feature)
+ self._cleanEnvironment()
+
+ if bzrlib.global_state is not None:
+ self.overrideAttr(bzrlib.global_state, 'cmdline_overrides',
+ config.CommandLineStore())
+
+ self._silenceUI()
+ self._startLogFile()
+ self._benchcalls = []
+ self._benchtime = None
+ self._clear_hooks()
+ self._track_transports()
+ self._track_locks()
+ self._clear_debug_flags()
+ # Isolate global verbosity level, to make sure it's reproducible
+ # between tests. We should get rid of this altogether: bug 656694. --
+ # mbp 20101008
+ self.overrideAttr(bzrlib.trace, '_verbosity_level', 0)
+ self._log_files = set()
+ # Each key in the ``_counters`` dict holds a value for a different
+ # counter. When the test ends, addDetail() should be used to output the
+ # counter values. This happens in install_counter_hook().
+ self._counters = {}
+ if 'config_stats' in selftest_debug_flags:
+ self._install_config_stats_hooks()
+ # Do not use i18n for tests (unless the test reverses this)
+ i18n.disable_i18n()
+
+ def debug(self):
+ # debug a frame up.
+ import pdb
+ # The sys preserved stdin/stdout should allow blackbox tests debugging
+ pdb.Pdb(stdin=sys.__stdin__, stdout=sys.__stdout__
+ ).set_trace(sys._getframe().f_back)
+
+ def discardDetail(self, name):
+ """Extend the addDetail, getDetails api so we can remove a detail.
+
+ eg. bzr always adds the 'log' detail at startup, but we don't want to
+ include it for skipped, xfail, etc tests.
+
+ It is safe to call this for a detail that doesn't exist, in case this
+ gets called multiple times.
+ """
+ # We cheat. details is stored in __details which means we shouldn't
+ # touch it. but getDetails() returns the dict directly, so we can
+ # mutate it.
+ details = self.getDetails()
+ if name in details:
+ del details[name]
+
+ def install_counter_hook(self, hooks, name, counter_name=None):
+ """Install a counting hook.
+
+ Any hook can be counted as long as it doesn't need to return a value.
+
+ :param hooks: Where the hook should be installed.
+
+ :param name: The hook name that will be counted.
+
+ :param counter_name: The counter identifier in ``_counters``, defaults
+ to ``name``.
+ """
+ _counters = self._counters # Avoid closing over self
+ if counter_name is None:
+ counter_name = name
+ if _counters.has_key(counter_name):
+ raise AssertionError('%s is already used as a counter name'
+ % (counter_name,))
+ _counters[counter_name] = 0
+ self.addDetail(counter_name, content.Content(content.UTF8_TEXT,
+ lambda: ['%d' % (_counters[counter_name],)]))
+ def increment_counter(*args, **kwargs):
+ _counters[counter_name] += 1
+ label = 'count %s calls' % (counter_name,)
+ hooks.install_named_hook(name, increment_counter, label)
+ self.addCleanup(hooks.uninstall_named_hook, name, label)
+
+ def _install_config_stats_hooks(self):
+ """Install config hooks to count hook calls.
+
+ """
+ for hook_name in ('get', 'set', 'remove', 'load', 'save'):
+ self.install_counter_hook(config.ConfigHooks, hook_name,
+ 'config.%s' % (hook_name,))
+
+ # The OldConfigHooks are private and need special handling to protect
+ # against recursive tests (tests that run other tests), so we just do
+ # manually what registering them into _builtin_known_hooks will provide
+ # us.
+ self.overrideAttr(config, 'OldConfigHooks', config._OldConfigHooks())
+ for hook_name in ('get', 'set', 'remove', 'load', 'save'):
+ self.install_counter_hook(config.OldConfigHooks, hook_name,
+ 'old_config.%s' % (hook_name,))
+
+ def _clear_debug_flags(self):
+ """Prevent externally set debug flags affecting tests.
+
+ Tests that want to use debug flags can just set them in the
+ debug_flags set during setup/teardown.
+ """
+ # Start with a copy of the current debug flags we can safely modify.
+ self.overrideAttr(debug, 'debug_flags', set(debug.debug_flags))
+ if 'allow_debug' not in selftest_debug_flags:
+ debug.debug_flags.clear()
+ if 'disable_lock_checks' not in selftest_debug_flags:
+ debug.debug_flags.add('strict_locks')
+
+ def _clear_hooks(self):
+ # prevent hooks affecting tests
+ known_hooks = hooks.known_hooks
+ self._preserved_hooks = {}
+ for key, (parent, name) in known_hooks.iter_parent_objects():
+ current_hooks = getattr(parent, name)
+ self._preserved_hooks[parent] = (name, current_hooks)
+ self._preserved_lazy_hooks = hooks._lazy_hooks
+ hooks._lazy_hooks = {}
+ self.addCleanup(self._restoreHooks)
+ for key, (parent, name) in known_hooks.iter_parent_objects():
+ factory = known_hooks.get(key)
+ setattr(parent, name, factory())
+ # this hook should always be installed
+ request._install_hook()
+
+ def disable_directory_isolation(self):
+ """Turn off directory isolation checks."""
+ self._directory_isolation = False
+
+ def enable_directory_isolation(self):
+ """Enable directory isolation checks."""
+ self._directory_isolation = True
+
+ def _silenceUI(self):
+ """Turn off UI for duration of test"""
+ # by default the UI is off; tests can turn it on if they want it.
+ self.overrideAttr(ui, 'ui_factory', ui.SilentUIFactory())
+
+ def _check_locks(self):
+ """Check that all lock take/release actions have been paired."""
+ # We always check for mismatched locks. If a mismatch is found, we
+ # fail unless -Edisable_lock_checks is supplied to selftest, in which
+ # case we just print a warning.
+ # unhook:
+ acquired_locks = [lock for action, lock in self._lock_actions
+ if action == 'acquired']
+ released_locks = [lock for action, lock in self._lock_actions
+ if action == 'released']
+ broken_locks = [lock for action, lock in self._lock_actions
+ if action == 'broken']
+ # trivially, given the tests for lock acquistion and release, if we
+ # have as many in each list, it should be ok. Some lock tests also
+ # break some locks on purpose and should be taken into account by
+ # considering that breaking a lock is just a dirty way of releasing it.
+ if len(acquired_locks) != (len(released_locks) + len(broken_locks)):
+ message = (
+ 'Different number of acquired and '
+ 'released or broken locks.\n'
+ 'acquired=%s\n'
+ 'released=%s\n'
+ 'broken=%s\n' %
+ (acquired_locks, released_locks, broken_locks))
+ if not self._lock_check_thorough:
+ # Rather than fail, just warn
+ print "Broken test %s: %s" % (self, message)
+ return
+ self.fail(message)
+
+ def _track_locks(self):
+ """Track lock activity during tests."""
+ self._lock_actions = []
+ if 'disable_lock_checks' in selftest_debug_flags:
+ self._lock_check_thorough = False
+ else:
+ self._lock_check_thorough = True
+
+ self.addCleanup(self._check_locks)
+ _mod_lock.Lock.hooks.install_named_hook('lock_acquired',
+ self._lock_acquired, None)
+ _mod_lock.Lock.hooks.install_named_hook('lock_released',
+ self._lock_released, None)
+ _mod_lock.Lock.hooks.install_named_hook('lock_broken',
+ self._lock_broken, None)
+
+ def _lock_acquired(self, result):
+ self._lock_actions.append(('acquired', result))
+
+ def _lock_released(self, result):
+ self._lock_actions.append(('released', result))
+
+ def _lock_broken(self, result):
+ self._lock_actions.append(('broken', result))
+
+ def permit_dir(self, name):
+ """Permit a directory to be used by this test. See permit_url."""
+ name_transport = _mod_transport.get_transport_from_path(name)
+ self.permit_url(name)
+ self.permit_url(name_transport.base)
+
+ def permit_url(self, url):
+ """Declare that url is an ok url to use in this test.
+
+ Do this for memory transports, temporary test directory etc.
+
+ Do not do this for the current working directory, /tmp, or any other
+ preexisting non isolated url.
+ """
+ if not url.endswith('/'):
+ url += '/'
+ self._bzr_selftest_roots.append(url)
+
+ def permit_source_tree_branch_repo(self):
+ """Permit the source tree bzr is running from to be opened.
+
+ Some code such as bzrlib.version attempts to read from the bzr branch
+ that bzr is executing from (if any). This method permits that directory
+ to be used in the test suite.
+ """
+ path = self.get_source_path()
+ self.record_directory_isolation()
+ try:
+ try:
+ workingtree.WorkingTree.open(path)
+ except (errors.NotBranchError, errors.NoWorkingTree):
+ raise TestSkipped('Needs a working tree of bzr sources')
+ finally:
+ self.enable_directory_isolation()
+
+ def _preopen_isolate_transport(self, transport):
+ """Check that all transport openings are done in the test work area."""
+ while isinstance(transport, pathfilter.PathFilteringTransport):
+ # Unwrap pathfiltered transports
+ transport = transport.server.backing_transport.clone(
+ transport._filter('.'))
+ url = transport.base
+ # ReadonlySmartTCPServer_for_testing decorates the backing transport
+ # urls it is given by prepending readonly+. This is appropriate as the
+ # client shouldn't know that the server is readonly (or not readonly).
+ # We could register all servers twice, with readonly+ prepending, but
+ # that makes for a long list; this is about the same but easier to
+ # read.
+ if url.startswith('readonly+'):
+ url = url[len('readonly+'):]
+ self._preopen_isolate_url(url)
+
+ def _preopen_isolate_url(self, url):
+ if not self._directory_isolation:
+ return
+ if self._directory_isolation == 'record':
+ self._bzr_selftest_roots.append(url)
+ return
+ # This prevents all transports, including e.g. sftp ones backed on disk
+ # from working unless they are explicitly granted permission. We then
+ # depend on the code that sets up test transports to check that they are
+ # appropriately isolated and enable their use by calling
+ # self.permit_transport()
+ if not osutils.is_inside_any(self._bzr_selftest_roots, url):
+ raise errors.BzrError("Attempt to escape test isolation: %r %r"
+ % (url, self._bzr_selftest_roots))
+
+ def record_directory_isolation(self):
+ """Gather accessed directories to permit later access.
+
+ This is used for tests that access the branch bzr is running from.
+ """
+ self._directory_isolation = "record"
+
+ def start_server(self, transport_server, backing_server=None):
+ """Start transport_server for this test.
+
+ This starts the server, registers a cleanup for it and permits the
+ server's urls to be used.
+ """
+ if backing_server is None:
+ transport_server.start_server()
+ else:
+ transport_server.start_server(backing_server)
+ self.addCleanup(transport_server.stop_server)
+ # Obtain a real transport because if the server supplies a password, it
+ # will be hidden from the base on the client side.
+ t = _mod_transport.get_transport_from_url(transport_server.get_url())
+ # Some transport servers effectively chroot the backing transport;
+ # others like SFTPServer don't - users of the transport can walk up the
+ # transport to read the entire backing transport. This wouldn't matter
+ # except that the workdir tests are given - and that they expect the
+ # server's url to point at - is one directory under the safety net. So
+ # Branch operations into the transport will attempt to walk up one
+ # directory. Chrooting all servers would avoid this but also mean that
+ # we wouldn't be testing directly against non-root urls. Alternatively
+ # getting the test framework to start the server with a backing server
+ # at the actual safety net directory would work too, but this then
+ # means that the self.get_url/self.get_transport methods would need
+ # to transform all their results. On balance its cleaner to handle it
+ # here, and permit a higher url when we have one of these transports.
+ if t.base.endswith('/work/'):
+ # we have safety net/test root/work
+ t = t.clone('../..')
+ elif isinstance(transport_server,
+ test_server.SmartTCPServer_for_testing):
+ # The smart server adds a path similar to work, which is traversed
+ # up from by the client. But the server is chrooted - the actual
+ # backing transport is not escaped from, and VFS requests to the
+ # root will error (because they try to escape the chroot).
+ t2 = t.clone('..')
+ while t2.base != t.base:
+ t = t2
+ t2 = t.clone('..')
+ self.permit_url(t.base)
+
+ def _track_transports(self):
+ """Install checks for transport usage."""
+ # TestCase has no safe place it can write to.
+ self._bzr_selftest_roots = []
+ # Currently the easiest way to be sure that nothing is going on is to
+ # hook into bzr dir opening. This leaves a small window of error for
+ # transport tests, but they are well known, and we can improve on this
+ # step.
+ controldir.ControlDir.hooks.install_named_hook("pre_open",
+ self._preopen_isolate_transport, "Check bzr directories are safe.")
+
+ def _ndiff_strings(self, a, b):
+ """Return ndiff between two strings containing lines.
+
+ A trailing newline is added if missing to make the strings
+ print properly."""
+ if b and b[-1] != '\n':
+ b += '\n'
+ if a and a[-1] != '\n':
+ a += '\n'
+ difflines = difflib.ndiff(a.splitlines(True),
+ b.splitlines(True),
+ linejunk=lambda x: False,
+ charjunk=lambda x: False)
+ return ''.join(difflines)
+
+ def assertEqual(self, a, b, message=''):
+ try:
+ if a == b:
+ return
+ except UnicodeError, e:
+ # If we can't compare without getting a UnicodeError, then
+ # obviously they are different
+ trace.mutter('UnicodeError: %s', e)
+ if message:
+ message += '\n'
+ raise AssertionError("%snot equal:\na = %s\nb = %s\n"
+ % (message,
+ pprint.pformat(a), pprint.pformat(b)))
+
+ assertEquals = assertEqual
+
+ def assertEqualDiff(self, a, b, message=None):
+ """Assert two texts are equal, if not raise an exception.
+
+ This is intended for use with multi-line strings where it can
+ be hard to find the differences by eye.
+ """
+ # TODO: perhaps override assertEquals to call this for strings?
+ if a == b:
+ return
+ if message is None:
+ message = "texts not equal:\n"
+ if a + '\n' == b:
+ message = 'first string is missing a final newline.\n'
+ if a == b + '\n':
+ message = 'second string is missing a final newline.\n'
+ raise AssertionError(message +
+ self._ndiff_strings(a, b))
+
+ def assertEqualMode(self, mode, mode_test):
+ self.assertEqual(mode, mode_test,
+ 'mode mismatch %o != %o' % (mode, mode_test))
+
+ def assertEqualStat(self, expected, actual):
+ """assert that expected and actual are the same stat result.
+
+ :param expected: A stat result.
+ :param actual: A stat result.
+ :raises AssertionError: If the expected and actual stat values differ
+ other than by atime.
+ """
+ self.assertEqual(expected.st_size, actual.st_size,
+ 'st_size did not match')
+ self.assertEqual(expected.st_mtime, actual.st_mtime,
+ 'st_mtime did not match')
+ self.assertEqual(expected.st_ctime, actual.st_ctime,
+ 'st_ctime did not match')
+ if sys.platform == 'win32':
+ # On Win32 both 'dev' and 'ino' cannot be trusted. In python2.4 it
+ # is 'dev' that varies, in python 2.5 (6?) it is st_ino that is
+ # odd. We just force it to always be 0 to avoid any problems.
+ self.assertEqual(0, expected.st_dev)
+ self.assertEqual(0, actual.st_dev)
+ self.assertEqual(0, expected.st_ino)
+ self.assertEqual(0, actual.st_ino)
+ else:
+ self.assertEqual(expected.st_dev, actual.st_dev,
+ 'st_dev did not match')
+ self.assertEqual(expected.st_ino, actual.st_ino,
+ 'st_ino did not match')
+ self.assertEqual(expected.st_mode, actual.st_mode,
+ 'st_mode did not match')
+
+ def assertLength(self, length, obj_with_len):
+ """Assert that obj_with_len is of length length."""
+ if len(obj_with_len) != length:
+ self.fail("Incorrect length: wanted %d, got %d for %r" % (
+ length, len(obj_with_len), obj_with_len))
+
+ def assertLogsError(self, exception_class, func, *args, **kwargs):
+ """Assert that `func(*args, **kwargs)` quietly logs a specific error.
+ """
+ captured = []
+ orig_log_exception_quietly = trace.log_exception_quietly
+ try:
+ def capture():
+ orig_log_exception_quietly()
+ captured.append(sys.exc_info()[1])
+ trace.log_exception_quietly = capture
+ func(*args, **kwargs)
+ finally:
+ trace.log_exception_quietly = orig_log_exception_quietly
+ self.assertLength(1, captured)
+ err = captured[0]
+ self.assertIsInstance(err, exception_class)
+ return err
+
+ def assertPositive(self, val):
+ """Assert that val is greater than 0."""
+ self.assertTrue(val > 0, 'expected a positive value, but got %s' % val)
+
+ def assertNegative(self, val):
+ """Assert that val is less than 0."""
+ self.assertTrue(val < 0, 'expected a negative value, but got %s' % val)
+
+ def assertStartsWith(self, s, prefix):
+ if not s.startswith(prefix):
+ raise AssertionError('string %r does not start with %r' % (s, prefix))
+
+ def assertEndsWith(self, s, suffix):
+ """Asserts that s ends with suffix."""
+ if not s.endswith(suffix):
+ raise AssertionError('string %r does not end with %r' % (s, suffix))
+
+ def assertContainsRe(self, haystack, needle_re, flags=0):
+ """Assert that a contains something matching a regular expression."""
+ if not re.search(needle_re, haystack, flags):
+ if '\n' in haystack or len(haystack) > 60:
+ # a long string, format it in a more readable way
+ raise AssertionError(
+ 'pattern "%s" not found in\n"""\\\n%s"""\n'
+ % (needle_re, haystack))
+ else:
+ raise AssertionError('pattern "%s" not found in "%s"'
+ % (needle_re, haystack))
+
+ def assertNotContainsRe(self, haystack, needle_re, flags=0):
+ """Assert that a does not match a regular expression"""
+ if re.search(needle_re, haystack, flags):
+ raise AssertionError('pattern "%s" found in "%s"'
+ % (needle_re, haystack))
+
+ def assertContainsString(self, haystack, needle):
+ if haystack.find(needle) == -1:
+ self.fail("string %r not found in '''%s'''" % (needle, haystack))
+
+ def assertNotContainsString(self, haystack, needle):
+ if haystack.find(needle) != -1:
+ self.fail("string %r found in '''%s'''" % (needle, haystack))
+
+ def assertSubset(self, sublist, superlist):
+ """Assert that every entry in sublist is present in superlist."""
+ missing = set(sublist) - set(superlist)
+ if len(missing) > 0:
+ raise AssertionError("value(s) %r not present in container %r" %
+ (missing, superlist))
+
+ def assertListRaises(self, excClass, func, *args, **kwargs):
+ """Fail unless excClass is raised when the iterator from func is used.
+
+ Many functions can return generators this makes sure
+ to wrap them in a list() call to make sure the whole generator
+ is run, and that the proper exception is raised.
+ """
+ try:
+ list(func(*args, **kwargs))
+ except excClass, e:
+ return e
+ else:
+ if getattr(excClass,'__name__', None) is not None:
+ excName = excClass.__name__
+ else:
+ excName = str(excClass)
+ raise self.failureException, "%s not raised" % excName
+
+ def assertRaises(self, excClass, callableObj, *args, **kwargs):
+ """Assert that a callable raises a particular exception.
+
+ :param excClass: As for the except statement, this may be either an
+ exception class, or a tuple of classes.
+ :param callableObj: A callable, will be passed ``*args`` and
+ ``**kwargs``.
+
+ Returns the exception so that you can examine it.
+ """
+ try:
+ callableObj(*args, **kwargs)
+ except excClass, e:
+ return e
+ else:
+ if getattr(excClass,'__name__', None) is not None:
+ excName = excClass.__name__
+ else:
+ # probably a tuple
+ excName = str(excClass)
+ raise self.failureException, "%s not raised" % excName
+
+ def assertIs(self, left, right, message=None):
+ if not (left is right):
+ if message is not None:
+ raise AssertionError(message)
+ else:
+ raise AssertionError("%r is not %r." % (left, right))
+
+ def assertIsNot(self, left, right, message=None):
+ if (left is right):
+ if message is not None:
+ raise AssertionError(message)
+ else:
+ raise AssertionError("%r is %r." % (left, right))
+
+ def assertTransportMode(self, transport, path, mode):
+ """Fail if a path does not have mode "mode".
+
+ If modes are not supported on this transport, the assertion is ignored.
+ """
+ if not transport._can_roundtrip_unix_modebits():
+ return
+ path_stat = transport.stat(path)
+ actual_mode = stat.S_IMODE(path_stat.st_mode)
+ self.assertEqual(mode, actual_mode,
+ 'mode of %r incorrect (%s != %s)'
+ % (path, oct(mode), oct(actual_mode)))
+
+ def assertIsSameRealPath(self, path1, path2):
+ """Fail if path1 and path2 points to different files"""
+ self.assertEqual(osutils.realpath(path1),
+ osutils.realpath(path2),
+ "apparent paths:\na = %s\nb = %s\n," % (path1, path2))
+
+ def assertIsInstance(self, obj, kls, msg=None):
+ """Fail if obj is not an instance of kls
+
+ :param msg: Supplementary message to show if the assertion fails.
+ """
+ if not isinstance(obj, kls):
+ m = "%r is an instance of %s rather than %s" % (
+ obj, obj.__class__, kls)
+ if msg:
+ m += ": " + msg
+ self.fail(m)
+
+ def assertFileEqual(self, content, path):
+ """Fail if path does not contain 'content'."""
+ self.assertPathExists(path)
+ f = file(path, 'rb')
+ try:
+ s = f.read()
+ finally:
+ f.close()
+ self.assertEqualDiff(content, s)
+
+ def assertDocstring(self, expected_docstring, obj):
+ """Fail if obj does not have expected_docstring"""
+ if __doc__ is None:
+ # With -OO the docstring should be None instead
+ self.assertIs(obj.__doc__, None)
+ else:
+ self.assertEqual(expected_docstring, obj.__doc__)
+
+ @symbol_versioning.deprecated_method(symbol_versioning.deprecated_in((2, 4)))
+ def failUnlessExists(self, path):
+ return self.assertPathExists(path)
+
+ def assertPathExists(self, path):
+ """Fail unless path or paths, which may be abs or relative, exist."""
+ if not isinstance(path, basestring):
+ for p in path:
+ self.assertPathExists(p)
+ else:
+ self.assertTrue(osutils.lexists(path),
+ path + " does not exist")
+
+ @symbol_versioning.deprecated_method(symbol_versioning.deprecated_in((2, 4)))
+ def failIfExists(self, path):
+ return self.assertPathDoesNotExist(path)
+
+ def assertPathDoesNotExist(self, path):
+ """Fail if path or paths, which may be abs or relative, exist."""
+ if not isinstance(path, basestring):
+ for p in path:
+ self.assertPathDoesNotExist(p)
+ else:
+ self.assertFalse(osutils.lexists(path),
+ path + " exists")
+
+ def _capture_deprecation_warnings(self, a_callable, *args, **kwargs):
+ """A helper for callDeprecated and applyDeprecated.
+
+ :param a_callable: A callable to call.
+ :param args: The positional arguments for the callable
+ :param kwargs: The keyword arguments for the callable
+ :return: A tuple (warnings, result). result is the result of calling
+ a_callable(``*args``, ``**kwargs``).
+ """
+ local_warnings = []
+ def capture_warnings(msg, cls=None, stacklevel=None):
+ # we've hooked into a deprecation specific callpath,
+ # only deprecations should getting sent via it.
+ self.assertEqual(cls, DeprecationWarning)
+ local_warnings.append(msg)
+ original_warning_method = symbol_versioning.warn
+ symbol_versioning.set_warning_method(capture_warnings)
+ try:
+ result = a_callable(*args, **kwargs)
+ finally:
+ symbol_versioning.set_warning_method(original_warning_method)
+ return (local_warnings, result)
+
+ def applyDeprecated(self, deprecation_format, a_callable, *args, **kwargs):
+ """Call a deprecated callable without warning the user.
+
+ Note that this only captures warnings raised by symbol_versioning.warn,
+ not other callers that go direct to the warning module.
+
+ To test that a deprecated method raises an error, do something like
+ this (remember that both assertRaises and applyDeprecated delays *args
+ and **kwargs passing)::
+
+ self.assertRaises(errors.ReservedId,
+ self.applyDeprecated,
+ deprecated_in((1, 5, 0)),
+ br.append_revision,
+ 'current:')
+
+ :param deprecation_format: The deprecation format that the callable
+ should have been deprecated with. This is the same type as the
+ parameter to deprecated_method/deprecated_function. If the
+ callable is not deprecated with this format, an assertion error
+ will be raised.
+ :param a_callable: A callable to call. This may be a bound method or
+ a regular function. It will be called with ``*args`` and
+ ``**kwargs``.
+ :param args: The positional arguments for the callable
+ :param kwargs: The keyword arguments for the callable
+ :return: The result of a_callable(``*args``, ``**kwargs``)
+ """
+ call_warnings, result = self._capture_deprecation_warnings(a_callable,
+ *args, **kwargs)
+ expected_first_warning = symbol_versioning.deprecation_string(
+ a_callable, deprecation_format)
+ if len(call_warnings) == 0:
+ self.fail("No deprecation warning generated by call to %s" %
+ a_callable)
+ self.assertEqual(expected_first_warning, call_warnings[0])
+ return result
+
+ def callCatchWarnings(self, fn, *args, **kw):
+ """Call a callable that raises python warnings.
+
+ The caller's responsible for examining the returned warnings.
+
+ If the callable raises an exception, the exception is not
+ caught and propagates up to the caller. In that case, the list
+ of warnings is not available.
+
+ :returns: ([warning_object, ...], fn_result)
+ """
+ # XXX: This is not perfect, because it completely overrides the
+ # warnings filters, and some code may depend on suppressing particular
+ # warnings. It's the easiest way to insulate ourselves from -Werror,
+ # though. -- Andrew, 20071062
+ wlist = []
+ def _catcher(message, category, filename, lineno, file=None, line=None):
+ # despite the name, 'message' is normally(?) a Warning subclass
+ # instance
+ wlist.append(message)
+ saved_showwarning = warnings.showwarning
+ saved_filters = warnings.filters
+ try:
+ warnings.showwarning = _catcher
+ warnings.filters = []
+ result = fn(*args, **kw)
+ finally:
+ warnings.showwarning = saved_showwarning
+ warnings.filters = saved_filters
+ return wlist, result
+
+ def callDeprecated(self, expected, callable, *args, **kwargs):
+ """Assert that a callable is deprecated in a particular way.
+
+ This is a very precise test for unusual requirements. The
+ applyDeprecated helper function is probably more suited for most tests
+ as it allows you to simply specify the deprecation format being used
+ and will ensure that that is issued for the function being called.
+
+ Note that this only captures warnings raised by symbol_versioning.warn,
+ not other callers that go direct to the warning module. To catch
+ general warnings, use callCatchWarnings.
+
+ :param expected: a list of the deprecation warnings expected, in order
+ :param callable: The callable to call
+ :param args: The positional arguments for the callable
+ :param kwargs: The keyword arguments for the callable
+ """
+ call_warnings, result = self._capture_deprecation_warnings(callable,
+ *args, **kwargs)
+ self.assertEqual(expected, call_warnings)
+ return result
+
+ def _startLogFile(self):
+ """Setup a in-memory target for bzr and testcase log messages"""
+ pseudo_log_file = StringIO()
+ def _get_log_contents_for_weird_testtools_api():
+ return [pseudo_log_file.getvalue().decode(
+ "utf-8", "replace").encode("utf-8")]
+ self.addDetail("log", content.Content(content.ContentType("text",
+ "plain", {"charset": "utf8"}),
+ _get_log_contents_for_weird_testtools_api))
+ self._log_file = pseudo_log_file
+ self._log_memento = trace.push_log_file(self._log_file)
+ self.addCleanup(self._finishLogFile)
+
+ def _finishLogFile(self):
+ """Flush and dereference the in-memory log for this testcase"""
+ if trace._trace_file:
+ # flush the log file, to get all content
+ trace._trace_file.flush()
+ trace.pop_log_file(self._log_memento)
+ # The logging module now tracks references for cleanup so discard ours
+ del self._log_memento
+
+ def thisFailsStrictLockCheck(self):
+ """It is known that this test would fail with -Dstrict_locks.
+
+ By default, all tests are run with strict lock checking unless
+ -Edisable_lock_checks is supplied. However there are some tests which
+ we know fail strict locks at this point that have not been fixed.
+ They should call this function to disable the strict checking.
+
+ This should be used sparingly, it is much better to fix the locking
+ issues rather than papering over the problem by calling this function.
+ """
+ debug.debug_flags.discard('strict_locks')
+
+ def overrideAttr(self, obj, attr_name, new=_unitialized_attr):
+ """Overrides an object attribute restoring it after the test.
+
+ :note: This should be used with discretion; you should think about
+ whether it's better to make the code testable without monkey-patching.
+
+ :param obj: The object that will be mutated.
+
+ :param attr_name: The attribute name we want to preserve/override in
+ the object.
+
+ :param new: The optional value we want to set the attribute to.
+
+ :returns: The actual attr value.
+ """
+ value = getattr(obj, attr_name)
+ # The actual value is captured by the call below
+ self.addCleanup(setattr, obj, attr_name, value)
+ if new is not _unitialized_attr:
+ setattr(obj, attr_name, new)
+ return value
+
+ def overrideEnv(self, name, new):
+ """Set an environment variable, and reset it after the test.
+
+ :param name: The environment variable name.
+
+ :param new: The value to set the variable to. If None, the
+ variable is deleted from the environment.
+
+ :returns: The actual variable value.
+ """
+ value = osutils.set_or_unset_env(name, new)
+ self.addCleanup(osutils.set_or_unset_env, name, value)
+ return value
+
+ def recordCalls(self, obj, attr_name):
+ """Monkeypatch in a wrapper that will record calls.
+
+ The monkeypatch is automatically removed when the test concludes.
+
+ :param obj: The namespace holding the reference to be replaced;
+ typically a module, class, or object.
+ :param attr_name: A string for the name of the attribute to
+ patch.
+ :returns: A list that will be extended with one item every time the
+ function is called, with a tuple of (args, kwargs).
+ """
+ calls = []
+
+ def decorator(*args, **kwargs):
+ calls.append((args, kwargs))
+ return orig(*args, **kwargs)
+ orig = self.overrideAttr(obj, attr_name, decorator)
+ return calls
+
+ def _cleanEnvironment(self):
+ for name, value in isolated_environ.iteritems():
+ self.overrideEnv(name, value)
+
+ def _restoreHooks(self):
+ for klass, (name, hooks) in self._preserved_hooks.items():
+ setattr(klass, name, hooks)
+ self._preserved_hooks.clear()
+ bzrlib.hooks._lazy_hooks = self._preserved_lazy_hooks
+ self._preserved_lazy_hooks.clear()
+
+ def knownFailure(self, reason):
+ """Declare that this test fails for a known reason
+
+ Tests that are known to fail should generally be using expectedFailure
+ with an appropriate reverse assertion if a change could cause the test
+ to start passing. Conversely if the test has no immediate prospect of
+ succeeding then using skip is more suitable.
+
+ When this method is called while an exception is being handled, that
+ traceback will be used, otherwise a new exception will be thrown to
+ provide one but won't be reported.
+ """
+ self._add_reason(reason)
+ try:
+ exc_info = sys.exc_info()
+ if exc_info != (None, None, None):
+ self._report_traceback(exc_info)
+ else:
+ try:
+ raise self.failureException(reason)
+ except self.failureException:
+ exc_info = sys.exc_info()
+ # GZ 02-08-2011: Maybe cleanup this err.exc_info attribute too?
+ raise testtools.testcase._ExpectedFailure(exc_info)
+ finally:
+ del exc_info
+
+ def _suppress_log(self):
+ """Remove the log info from details."""
+ self.discardDetail('log')
+
+ def _do_skip(self, result, reason):
+ self._suppress_log()
+ addSkip = getattr(result, 'addSkip', None)
+ if not callable(addSkip):
+ result.addSuccess(result)
+ else:
+ addSkip(self, reason)
+
+ @staticmethod
+ def _do_known_failure(self, result, e):
+ self._suppress_log()
+ err = sys.exc_info()
+ addExpectedFailure = getattr(result, 'addExpectedFailure', None)
+ if addExpectedFailure is not None:
+ addExpectedFailure(self, err)
+ else:
+ result.addSuccess(self)
+
+ @staticmethod
+ def _do_not_applicable(self, result, e):
+ if not e.args:
+ reason = 'No reason given'
+ else:
+ reason = e.args[0]
+ self._suppress_log ()
+ addNotApplicable = getattr(result, 'addNotApplicable', None)
+ if addNotApplicable is not None:
+ result.addNotApplicable(self, reason)
+ else:
+ self._do_skip(result, reason)
+
+ @staticmethod
+ def _report_skip(self, result, err):
+ """Override the default _report_skip.
+
+ We want to strip the 'log' detail. If we waint until _do_skip, it has
+ already been formatted into the 'reason' string, and we can't pull it
+ out again.
+ """
+ self._suppress_log()
+ super(TestCase, self)._report_skip(self, result, err)
+
+ @staticmethod
+ def _report_expected_failure(self, result, err):
+ """Strip the log.
+
+ See _report_skip for motivation.
+ """
+ self._suppress_log()
+ super(TestCase, self)._report_expected_failure(self, result, err)
+
+ @staticmethod
+ def _do_unsupported_or_skip(self, result, e):
+ reason = e.args[0]
+ self._suppress_log()
+ addNotSupported = getattr(result, 'addNotSupported', None)
+ if addNotSupported is not None:
+ result.addNotSupported(self, reason)
+ else:
+ self._do_skip(result, reason)
+
+ def time(self, callable, *args, **kwargs):
+ """Run callable and accrue the time it takes to the benchmark time.
+
+ If lsprofiling is enabled (i.e. by --lsprof-time to bzr selftest) then
+ this will cause lsprofile statistics to be gathered and stored in
+ self._benchcalls.
+ """
+ if self._benchtime is None:
+ self.addDetail('benchtime', content.Content(content.ContentType(
+ "text", "plain"), lambda:[str(self._benchtime)]))
+ self._benchtime = 0
+ start = time.time()
+ try:
+ if not self._gather_lsprof_in_benchmarks:
+ return callable(*args, **kwargs)
+ else:
+ # record this benchmark
+ ret, stats = bzrlib.lsprof.profile(callable, *args, **kwargs)
+ stats.sort()
+ self._benchcalls.append(((callable, args, kwargs), stats))
+ return ret
+ finally:
+ self._benchtime += time.time() - start
+
+ def log(self, *args):
+ trace.mutter(*args)
+
+ def get_log(self):
+ """Get a unicode string containing the log from bzrlib.trace.
+
+ Undecodable characters are replaced.
+ """
+ return u"".join(self.getDetails()['log'].iter_text())
+
+ def requireFeature(self, feature):
+ """This test requires a specific feature is available.
+
+ :raises UnavailableFeature: When feature is not available.
+ """
+ if not feature.available():
+ raise UnavailableFeature(feature)
+
+ def _run_bzr_autosplit(self, args, retcode, encoding, stdin,
+ working_dir):
+ """Run bazaar command line, splitting up a string command line."""
+ if isinstance(args, basestring):
+ # shlex don't understand unicode strings,
+ # so args should be plain string (bialix 20070906)
+ args = list(shlex.split(str(args)))
+ return self._run_bzr_core(args, retcode=retcode,
+ encoding=encoding, stdin=stdin, working_dir=working_dir,
+ )
+
+ def _run_bzr_core(self, args, retcode, encoding, stdin,
+ working_dir):
+ # Clear chk_map page cache, because the contents are likely to mask
+ # locking errors.
+ chk_map.clear_cache()
+ if encoding is None:
+ encoding = osutils.get_user_encoding()
+ stdout = StringIOWrapper()
+ stderr = StringIOWrapper()
+ stdout.encoding = encoding
+ stderr.encoding = encoding
+
+ self.log('run bzr: %r', args)
+ # FIXME: don't call into logging here
+ handler = trace.EncodedStreamHandler(stderr, errors="replace",
+ level=logging.INFO)
+ logger = logging.getLogger('')
+ logger.addHandler(handler)
+ old_ui_factory = ui.ui_factory
+ ui.ui_factory = TestUIFactory(stdin=stdin, stdout=stdout, stderr=stderr)
+
+ cwd = None
+ if working_dir is not None:
+ cwd = osutils.getcwd()
+ os.chdir(working_dir)
+
+ try:
+ try:
+ result = self.apply_redirected(
+ ui.ui_factory.stdin,
+ stdout, stderr,
+ _mod_commands.run_bzr_catch_user_errors,
+ args)
+ except KeyboardInterrupt:
+ # Reraise KeyboardInterrupt with contents of redirected stdout
+ # and stderr as arguments, for tests which are interested in
+ # stdout and stderr and are expecting the exception.
+ out = stdout.getvalue()
+ err = stderr.getvalue()
+ if out:
+ self.log('output:\n%r', out)
+ if err:
+ self.log('errors:\n%r', err)
+ raise KeyboardInterrupt(out, err)
+ finally:
+ logger.removeHandler(handler)
+ ui.ui_factory = old_ui_factory
+ if cwd is not None:
+ os.chdir(cwd)
+
+ out = stdout.getvalue()
+ err = stderr.getvalue()
+ if out:
+ self.log('output:\n%r', out)
+ if err:
+ self.log('errors:\n%r', err)
+ if retcode is not None:
+ self.assertEquals(retcode, result,
+ message='Unexpected return code')
+ return result, out, err
+
+ def run_bzr(self, args, retcode=0, encoding=None, stdin=None,
+ working_dir=None, error_regexes=[], output_encoding=None):
+ """Invoke bzr, as if it were run from the command line.
+
+ The argument list should not include the bzr program name - the
+ first argument is normally the bzr command. Arguments may be
+ passed in three ways:
+
+ 1- A list of strings, eg ["commit", "a"]. This is recommended
+ when the command contains whitespace or metacharacters, or
+ is built up at run time.
+
+ 2- A single string, eg "add a". This is the most convenient
+ for hardcoded commands.
+
+ This runs bzr through the interface that catches and reports
+ errors, and with logging set to something approximating the
+ default, so that error reporting can be checked.
+
+ This should be the main method for tests that want to exercise the
+ overall behavior of the bzr application (rather than a unit test
+ or a functional test of the library.)
+
+ This sends the stdout/stderr results into the test's log,
+ where it may be useful for debugging. See also run_captured.
+
+ :keyword stdin: A string to be used as stdin for the command.
+ :keyword retcode: The status code the command should return;
+ default 0.
+ :keyword working_dir: The directory to run the command in
+ :keyword error_regexes: A list of expected error messages. If
+ specified they must be seen in the error output of the command.
+ """
+ retcode, out, err = self._run_bzr_autosplit(
+ args=args,
+ retcode=retcode,
+ encoding=encoding,
+ stdin=stdin,
+ working_dir=working_dir,
+ )
+ self.assertIsInstance(error_regexes, (list, tuple))
+ for regex in error_regexes:
+ self.assertContainsRe(err, regex)
+ return out, err
+
+ def run_bzr_error(self, error_regexes, *args, **kwargs):
+ """Run bzr, and check that stderr contains the supplied regexes
+
+ :param error_regexes: Sequence of regular expressions which
+ must each be found in the error output. The relative ordering
+ is not enforced.
+ :param args: command-line arguments for bzr
+ :param kwargs: Keyword arguments which are interpreted by run_bzr
+ This function changes the default value of retcode to be 3,
+ since in most cases this is run when you expect bzr to fail.
+
+ :return: (out, err) The actual output of running the command (in case
+ you want to do more inspection)
+
+ Examples of use::
+
+ # Make sure that commit is failing because there is nothing to do
+ self.run_bzr_error(['no changes to commit'],
+ ['commit', '-m', 'my commit comment'])
+ # Make sure --strict is handling an unknown file, rather than
+ # giving us the 'nothing to do' error
+ self.build_tree(['unknown'])
+ self.run_bzr_error(['Commit refused because there are unknown files'],
+ ['commit', --strict', '-m', 'my commit comment'])
+ """
+ kwargs.setdefault('retcode', 3)
+ kwargs['error_regexes'] = error_regexes
+ out, err = self.run_bzr(*args, **kwargs)
+ return out, err
+
+ def run_bzr_subprocess(self, *args, **kwargs):
+ """Run bzr in a subprocess for testing.
+
+ This starts a new Python interpreter and runs bzr in there.
+ This should only be used for tests that have a justifiable need for
+ this isolation: e.g. they are testing startup time, or signal
+ handling, or early startup code, etc. Subprocess code can't be
+ profiled or debugged so easily.
+
+ :keyword retcode: The status code that is expected. Defaults to 0. If
+ None is supplied, the status code is not checked.
+ :keyword env_changes: A dictionary which lists changes to environment
+ variables. A value of None will unset the env variable.
+ The values must be strings. The change will only occur in the
+ child, so you don't need to fix the environment after running.
+ :keyword universal_newlines: Convert CRLF => LF
+ :keyword allow_plugins: By default the subprocess is run with
+ --no-plugins to ensure test reproducibility. Also, it is possible
+ for system-wide plugins to create unexpected output on stderr,
+ which can cause unnecessary test failures.
+ """
+ env_changes = kwargs.get('env_changes', {})
+ working_dir = kwargs.get('working_dir', None)
+ allow_plugins = kwargs.get('allow_plugins', False)
+ if len(args) == 1:
+ if isinstance(args[0], list):
+ args = args[0]
+ elif isinstance(args[0], basestring):
+ args = list(shlex.split(args[0]))
+ else:
+ raise ValueError("passing varargs to run_bzr_subprocess")
+ process = self.start_bzr_subprocess(args, env_changes=env_changes,
+ working_dir=working_dir,
+ allow_plugins=allow_plugins)
+ # We distinguish between retcode=None and retcode not passed.
+ supplied_retcode = kwargs.get('retcode', 0)
+ return self.finish_bzr_subprocess(process, retcode=supplied_retcode,
+ universal_newlines=kwargs.get('universal_newlines', False),
+ process_args=args)
+
+ def start_bzr_subprocess(self, process_args, env_changes=None,
+ skip_if_plan_to_signal=False,
+ working_dir=None,
+ allow_plugins=False, stderr=subprocess.PIPE):
+ """Start bzr in a subprocess for testing.
+
+ This starts a new Python interpreter and runs bzr in there.
+ This should only be used for tests that have a justifiable need for
+ this isolation: e.g. they are testing startup time, or signal
+ handling, or early startup code, etc. Subprocess code can't be
+ profiled or debugged so easily.
+
+ :param process_args: a list of arguments to pass to the bzr executable,
+ for example ``['--version']``.
+ :param env_changes: A dictionary which lists changes to environment
+ variables. A value of None will unset the env variable.
+ The values must be strings. The change will only occur in the
+ child, so you don't need to fix the environment after running.
+ :param skip_if_plan_to_signal: raise TestSkipped when true and system
+ doesn't support signalling subprocesses.
+ :param allow_plugins: If False (default) pass --no-plugins to bzr.
+ :param stderr: file to use for the subprocess's stderr. Valid values
+ are those valid for the stderr argument of `subprocess.Popen`.
+ Default value is ``subprocess.PIPE``.
+
+ :returns: Popen object for the started process.
+ """
+ if skip_if_plan_to_signal:
+ if os.name != "posix":
+ raise TestSkipped("Sending signals not supported")
+
+ if env_changes is None:
+ env_changes = {}
+ # Because $HOME is set to a tempdir for the context of a test, modules
+ # installed in the user dir will not be found unless $PYTHONUSERBASE
+ # gets set to the computed directory of this parent process.
+ if site.USER_BASE is not None:
+ env_changes["PYTHONUSERBASE"] = site.USER_BASE
+ old_env = {}
+
+ def cleanup_environment():
+ for env_var, value in env_changes.iteritems():
+ old_env[env_var] = osutils.set_or_unset_env(env_var, value)
+
+ def restore_environment():
+ for env_var, value in old_env.iteritems():
+ osutils.set_or_unset_env(env_var, value)
+
+ bzr_path = self.get_bzr_path()
+
+ cwd = None
+ if working_dir is not None:
+ cwd = osutils.getcwd()
+ os.chdir(working_dir)
+
+ try:
+ # win32 subprocess doesn't support preexec_fn
+ # so we will avoid using it on all platforms, just to
+ # make sure the code path is used, and we don't break on win32
+ cleanup_environment()
+ # Include the subprocess's log file in the test details, in case
+ # the test fails due to an error in the subprocess.
+ self._add_subprocess_log(trace._get_bzr_log_filename())
+ command = [sys.executable]
+ # frozen executables don't need the path to bzr
+ if getattr(sys, "frozen", None) is None:
+ command.append(bzr_path)
+ if not allow_plugins:
+ command.append('--no-plugins')
+ command.extend(process_args)
+ process = self._popen(command, stdin=subprocess.PIPE,
+ stdout=subprocess.PIPE,
+ stderr=stderr)
+ finally:
+ restore_environment()
+ if cwd is not None:
+ os.chdir(cwd)
+
+ return process
+
+ def _add_subprocess_log(self, log_file_path):
+ if len(self._log_files) == 0:
+ # Register an addCleanup func. We do this on the first call to
+ # _add_subprocess_log rather than in TestCase.setUp so that this
+ # addCleanup is registered after any cleanups for tempdirs that
+ # subclasses might create, which will probably remove the log file
+ # we want to read.
+ self.addCleanup(self._subprocess_log_cleanup)
+ # self._log_files is a set, so if a log file is reused we won't grab it
+ # twice.
+ self._log_files.add(log_file_path)
+
+ def _subprocess_log_cleanup(self):
+ for count, log_file_path in enumerate(self._log_files):
+ # We use buffer_now=True to avoid holding the file open beyond
+ # the life of this function, which might interfere with e.g.
+ # cleaning tempdirs on Windows.
+ # XXX: Testtools 0.9.5 doesn't have the content_from_file helper
+ #detail_content = content.content_from_file(
+ # log_file_path, buffer_now=True)
+ with open(log_file_path, 'rb') as log_file:
+ log_file_bytes = log_file.read()
+ detail_content = content.Content(content.ContentType("text",
+ "plain", {"charset": "utf8"}), lambda: [log_file_bytes])
+ self.addDetail("start_bzr_subprocess-log-%d" % (count,),
+ detail_content)
+
+ def _popen(self, *args, **kwargs):
+ """Place a call to Popen.
+
+ Allows tests to override this method to intercept the calls made to
+ Popen for introspection.
+ """
+ return subprocess.Popen(*args, **kwargs)
+
+ def get_source_path(self):
+ """Return the path of the directory containing bzrlib."""
+ return os.path.dirname(os.path.dirname(bzrlib.__file__))
+
+ def get_bzr_path(self):
+ """Return the path of the 'bzr' executable for this test suite."""
+ bzr_path = os.path.join(self.get_source_path(), "bzr")
+ if not os.path.isfile(bzr_path):
+ # We are probably installed. Assume sys.argv is the right file
+ bzr_path = sys.argv[0]
+ return bzr_path
+
+ def finish_bzr_subprocess(self, process, retcode=0, send_signal=None,
+ universal_newlines=False, process_args=None):
+ """Finish the execution of process.
+
+ :param process: the Popen object returned from start_bzr_subprocess.
+ :param retcode: The status code that is expected. Defaults to 0. If
+ None is supplied, the status code is not checked.
+ :param send_signal: an optional signal to send to the process.
+ :param universal_newlines: Convert CRLF => LF
+ :returns: (stdout, stderr)
+ """
+ if send_signal is not None:
+ os.kill(process.pid, send_signal)
+ out, err = process.communicate()
+
+ if universal_newlines:
+ out = out.replace('\r\n', '\n')
+ err = err.replace('\r\n', '\n')
+
+ if retcode is not None and retcode != process.returncode:
+ if process_args is None:
+ process_args = "(unknown args)"
+ trace.mutter('Output of bzr %s:\n%s', process_args, out)
+ trace.mutter('Error for bzr %s:\n%s', process_args, err)
+ self.fail('Command bzr %s failed with retcode %s != %s'
+ % (process_args, retcode, process.returncode))
+ return [out, err]
+
+ def check_tree_shape(self, tree, shape):
+ """Compare a tree to a list of expected names.
+
+ Fail if they are not precisely equal.
+ """
+ extras = []
+ shape = list(shape) # copy
+ for path, ie in tree.iter_entries_by_dir():
+ name = path.replace('\\', '/')
+ if ie.kind == 'directory':
+ name = name + '/'
+ if name == "/":
+ pass # ignore root entry
+ elif name in shape:
+ shape.remove(name)
+ else:
+ extras.append(name)
+ if shape:
+ self.fail("expected paths not found in inventory: %r" % shape)
+ if extras:
+ self.fail("unexpected paths found in inventory: %r" % extras)
+
+ def apply_redirected(self, stdin=None, stdout=None, stderr=None,
+ a_callable=None, *args, **kwargs):
+ """Call callable with redirected std io pipes.
+
+ Returns the return code."""
+ if not callable(a_callable):
+ raise ValueError("a_callable must be callable.")
+ if stdin is None:
+ stdin = StringIO("")
+ if stdout is None:
+ if getattr(self, "_log_file", None) is not None:
+ stdout = self._log_file
+ else:
+ stdout = StringIO()
+ if stderr is None:
+ if getattr(self, "_log_file", None is not None):
+ stderr = self._log_file
+ else:
+ stderr = StringIO()
+ real_stdin = sys.stdin
+ real_stdout = sys.stdout
+ real_stderr = sys.stderr
+ try:
+ sys.stdout = stdout
+ sys.stderr = stderr
+ sys.stdin = stdin
+ return a_callable(*args, **kwargs)
+ finally:
+ sys.stdout = real_stdout
+ sys.stderr = real_stderr
+ sys.stdin = real_stdin
+
+ def reduceLockdirTimeout(self):
+ """Reduce the default lock timeout for the duration of the test, so that
+ if LockContention occurs during a test, it does so quickly.
+
+ Tests that expect to provoke LockContention errors should call this.
+ """
+ self.overrideAttr(lockdir, '_DEFAULT_TIMEOUT_SECONDS', 0)
+
+ def make_utf8_encoded_stringio(self, encoding_type=None):
+ """Return a StringIOWrapper instance, that will encode Unicode
+ input to UTF-8.
+ """
+ if encoding_type is None:
+ encoding_type = 'strict'
+ sio = StringIO()
+ output_encoding = 'utf-8'
+ sio = codecs.getwriter(output_encoding)(sio, errors=encoding_type)
+ sio.encoding = output_encoding
+ return sio
+
+ def disable_verb(self, verb):
+ """Disable a smart server verb for one test."""
+ from bzrlib.smart import request
+ request_handlers = request.request_handlers
+ orig_method = request_handlers.get(verb)
+ orig_info = request_handlers.get_info(verb)
+ request_handlers.remove(verb)
+ self.addCleanup(request_handlers.register, verb, orig_method,
+ info=orig_info)
+
+
+class CapturedCall(object):
+ """A helper for capturing smart server calls for easy debug analysis."""
+
+ def __init__(self, params, prefix_length):
+ """Capture the call with params and skip prefix_length stack frames."""
+ self.call = params
+ import traceback
+ # The last 5 frames are the __init__, the hook frame, and 3 smart
+ # client frames. Beyond this we could get more clever, but this is good
+ # enough for now.
+ stack = traceback.extract_stack()[prefix_length:-5]
+ self.stack = ''.join(traceback.format_list(stack))
+
+ def __str__(self):
+ return self.call.method
+
+ def __repr__(self):
+ return self.call.method
+
+ def stack(self):
+ return self.stack
+
+
+class TestCaseWithMemoryTransport(TestCase):
+ """Common test class for tests that do not need disk resources.
+
+ Tests that need disk resources should derive from TestCaseInTempDir
+ orTestCaseWithTransport.
+
+ TestCaseWithMemoryTransport sets the TEST_ROOT variable for all bzr tests.
+
+ For TestCaseWithMemoryTransport the ``test_home_dir`` is set to the name of
+ a directory which does not exist. This serves to help ensure test isolation
+ is preserved. ``test_dir`` is set to the TEST_ROOT, as is cwd, because they
+ must exist. However, TestCaseWithMemoryTransport does not offer local file
+ defaults for the transport in tests, nor does it obey the command line
+ override, so tests that accidentally write to the common directory should
+ be rare.
+
+ :cvar TEST_ROOT: Directory containing all temporary directories, plus a
+ ``.bzr`` directory that stops us ascending higher into the filesystem.
+ """
+
+ TEST_ROOT = None
+ _TEST_NAME = 'test'
+
+ def __init__(self, methodName='runTest'):
+ # allow test parameterization after test construction and before test
+ # execution. Variables that the parameterizer sets need to be
+ # ones that are not set by setUp, or setUp will trash them.
+ super(TestCaseWithMemoryTransport, self).__init__(methodName)
+ self.vfs_transport_factory = default_transport
+ self.transport_server = None
+ self.transport_readonly_server = None
+ self.__vfs_server = None
+
+ def get_transport(self, relpath=None):
+ """Return a writeable transport.
+
+ This transport is for the test scratch space relative to
+ "self._test_root"
+
+ :param relpath: a path relative to the base url.
+ """
+ t = _mod_transport.get_transport_from_url(self.get_url(relpath))
+ self.assertFalse(t.is_readonly())
+ return t
+
+ def get_readonly_transport(self, relpath=None):
+ """Return a readonly transport for the test scratch space
+
+ This can be used to test that operations which should only need
+ readonly access in fact do not try to write.
+
+ :param relpath: a path relative to the base url.
+ """
+ t = _mod_transport.get_transport_from_url(
+ self.get_readonly_url(relpath))
+ self.assertTrue(t.is_readonly())
+ return t
+
+ def create_transport_readonly_server(self):
+ """Create a transport server from class defined at init.
+
+ This is mostly a hook for daughter classes.
+ """
+ return self.transport_readonly_server()
+
+ def get_readonly_server(self):
+ """Get the server instance for the readonly transport
+
+ This is useful for some tests with specific servers to do diagnostics.
+ """
+ if self.__readonly_server is None:
+ if self.transport_readonly_server is None:
+ # readonly decorator requested
+ self.__readonly_server = test_server.ReadonlyServer()
+ else:
+ # explicit readonly transport.
+ self.__readonly_server = self.create_transport_readonly_server()
+ self.start_server(self.__readonly_server,
+ self.get_vfs_only_server())
+ return self.__readonly_server
+
+ def get_readonly_url(self, relpath=None):
+ """Get a URL for the readonly transport.
+
+ This will either be backed by '.' or a decorator to the transport
+ used by self.get_url()
+ relpath provides for clients to get a path relative to the base url.
+ These should only be downwards relative, not upwards.
+ """
+ base = self.get_readonly_server().get_url()
+ return self._adjust_url(base, relpath)
+
+ def get_vfs_only_server(self):
+ """Get the vfs only read/write server instance.
+
+ This is useful for some tests with specific servers that need
+ diagnostics.
+
+ For TestCaseWithMemoryTransport this is always a MemoryServer, and there
+ is no means to override it.
+ """
+ if self.__vfs_server is None:
+ self.__vfs_server = memory.MemoryServer()
+ self.start_server(self.__vfs_server)
+ return self.__vfs_server
+
+ def get_server(self):
+ """Get the read/write server instance.
+
+ This is useful for some tests with specific servers that need
+ diagnostics.
+
+ This is built from the self.transport_server factory. If that is None,
+ then the self.get_vfs_server is returned.
+ """
+ if self.__server is None:
+ if (self.transport_server is None or self.transport_server is
+ self.vfs_transport_factory):
+ self.__server = self.get_vfs_only_server()
+ else:
+ # bring up a decorated means of access to the vfs only server.
+ self.__server = self.transport_server()
+ self.start_server(self.__server, self.get_vfs_only_server())
+ return self.__server
+
+ def _adjust_url(self, base, relpath):
+ """Get a URL (or maybe a path) for the readwrite transport.
+
+ This will either be backed by '.' or to an equivalent non-file based
+ facility.
+ relpath provides for clients to get a path relative to the base url.
+ These should only be downwards relative, not upwards.
+ """
+ if relpath is not None and relpath != '.':
+ if not base.endswith('/'):
+ base = base + '/'
+ # XXX: Really base should be a url; we did after all call
+ # get_url()! But sometimes it's just a path (from
+ # LocalAbspathServer), and it'd be wrong to append urlescaped data
+ # to a non-escaped local path.
+ if base.startswith('./') or base.startswith('/'):
+ base += relpath
+ else:
+ base += urlutils.escape(relpath)
+ return base
+
+ def get_url(self, relpath=None):
+ """Get a URL (or maybe a path) for the readwrite transport.
+
+ This will either be backed by '.' or to an equivalent non-file based
+ facility.
+ relpath provides for clients to get a path relative to the base url.
+ These should only be downwards relative, not upwards.
+ """
+ base = self.get_server().get_url()
+ return self._adjust_url(base, relpath)
+
+ def get_vfs_only_url(self, relpath=None):
+ """Get a URL (or maybe a path for the plain old vfs transport.
+
+ This will never be a smart protocol. It always has all the
+ capabilities of the local filesystem, but it might actually be a
+ MemoryTransport or some other similar virtual filesystem.
+
+ This is the backing transport (if any) of the server returned by
+ get_url and get_readonly_url.
+
+ :param relpath: provides for clients to get a path relative to the base
+ url. These should only be downwards relative, not upwards.
+ :return: A URL
+ """
+ base = self.get_vfs_only_server().get_url()
+ return self._adjust_url(base, relpath)
+
+ def _create_safety_net(self):
+ """Make a fake bzr directory.
+
+ This prevents any tests propagating up onto the TEST_ROOT directory's
+ real branch.
+ """
+ root = TestCaseWithMemoryTransport.TEST_ROOT
+ # Make sure we get a readable and accessible home for .bzr.log
+ # and/or config files, and not fallback to weird defaults (see
+ # http://pad.lv/825027).
+ self.assertIs(None, os.environ.get('BZR_HOME', None))
+ os.environ['BZR_HOME'] = root
+ wt = controldir.ControlDir.create_standalone_workingtree(root)
+ del os.environ['BZR_HOME']
+ # Hack for speed: remember the raw bytes of the dirstate file so that
+ # we don't need to re-open the wt to check it hasn't changed.
+ TestCaseWithMemoryTransport._SAFETY_NET_PRISTINE_DIRSTATE = (
+ wt.control_transport.get_bytes('dirstate'))
+
+ def _check_safety_net(self):
+ """Check that the safety .bzr directory have not been touched.
+
+ _make_test_root have created a .bzr directory to prevent tests from
+ propagating. This method ensures than a test did not leaked.
+ """
+ root = TestCaseWithMemoryTransport.TEST_ROOT
+ t = _mod_transport.get_transport_from_path(root)
+ self.permit_url(t.base)
+ if (t.get_bytes('.bzr/checkout/dirstate') !=
+ TestCaseWithMemoryTransport._SAFETY_NET_PRISTINE_DIRSTATE):
+ # The current test have modified the /bzr directory, we need to
+ # recreate a new one or all the followng tests will fail.
+ # If you need to inspect its content uncomment the following line
+ # import pdb; pdb.set_trace()
+ _rmtree_temp_dir(root + '/.bzr', test_id=self.id())
+ self._create_safety_net()
+ raise AssertionError('%s/.bzr should not be modified' % root)
+
+ def _make_test_root(self):
+ if TestCaseWithMemoryTransport.TEST_ROOT is None:
+ # Watch out for tricky test dir (on OSX /tmp -> /private/tmp)
+ root = osutils.realpath(osutils.mkdtemp(prefix='testbzr-',
+ suffix='.tmp'))
+ TestCaseWithMemoryTransport.TEST_ROOT = root
+
+ self._create_safety_net()
+
+ # The same directory is used by all tests, and we're not
+ # specifically told when all tests are finished. This will do.
+ atexit.register(_rmtree_temp_dir, root)
+
+ self.permit_dir(TestCaseWithMemoryTransport.TEST_ROOT)
+ self.addCleanup(self._check_safety_net)
+
+ def makeAndChdirToTestDir(self):
+ """Create a temporary directories for this one test.
+
+ This must set self.test_home_dir and self.test_dir and chdir to
+ self.test_dir.
+
+ For TestCaseWithMemoryTransport we chdir to the TEST_ROOT for this test.
+ """
+ os.chdir(TestCaseWithMemoryTransport.TEST_ROOT)
+ self.test_dir = TestCaseWithMemoryTransport.TEST_ROOT
+ self.test_home_dir = self.test_dir + "/MemoryTransportMissingHomeDir"
+ self.permit_dir(self.test_dir)
+
+ def make_branch(self, relpath, format=None, name=None):
+ """Create a branch on the transport at relpath."""
+ repo = self.make_repository(relpath, format=format)
+ return repo.bzrdir.create_branch(append_revisions_only=False,
+ name=name)
+
+ def get_default_format(self):
+ return 'default'
+
+ def resolve_format(self, format):
+ """Resolve an object to a ControlDir format object.
+
+ The initial format object can either already be
+ a ControlDirFormat, None (for the default format),
+ or a string with the name of the control dir format.
+
+ :param format: Object to resolve
+ :return A ControlDirFormat instance
+ """
+ if format is None:
+ format = self.get_default_format()
+ if isinstance(format, basestring):
+ format = controldir.format_registry.make_bzrdir(format)
+ return format
+
+ def make_bzrdir(self, relpath, format=None):
+ try:
+ # might be a relative or absolute path
+ maybe_a_url = self.get_url(relpath)
+ segments = maybe_a_url.rsplit('/', 1)
+ t = _mod_transport.get_transport(maybe_a_url)
+ if len(segments) > 1 and segments[-1] not in ('', '.'):
+ t.ensure_base()
+ format = self.resolve_format(format)
+ return format.initialize_on_transport(t)
+ except errors.UninitializableFormat:
+ raise TestSkipped("Format %s is not initializable." % format)
+
+ def make_repository(self, relpath, shared=None, format=None):
+ """Create a repository on our default transport at relpath.
+
+ Note that relpath must be a relative path, not a full url.
+ """
+ # FIXME: If you create a remoterepository this returns the underlying
+ # real format, which is incorrect. Actually we should make sure that
+ # RemoteBzrDir returns a RemoteRepository.
+ # maybe mbp 20070410
+ made_control = self.make_bzrdir(relpath, format=format)
+ return made_control.create_repository(shared=shared)
+
+ def make_smart_server(self, path, backing_server=None):
+ if backing_server is None:
+ backing_server = self.get_server()
+ smart_server = test_server.SmartTCPServer_for_testing()
+ self.start_server(smart_server, backing_server)
+ remote_transport = _mod_transport.get_transport_from_url(smart_server.get_url()
+ ).clone(path)
+ return remote_transport
+
+ def make_branch_and_memory_tree(self, relpath, format=None):
+ """Create a branch on the default transport and a MemoryTree for it."""
+ b = self.make_branch(relpath, format=format)
+ return memorytree.MemoryTree.create_on_branch(b)
+
+ def make_branch_builder(self, relpath, format=None):
+ branch = self.make_branch(relpath, format=format)
+ return branchbuilder.BranchBuilder(branch=branch)
+
+ def overrideEnvironmentForTesting(self):
+ test_home_dir = self.test_home_dir
+ if isinstance(test_home_dir, unicode):
+ test_home_dir = test_home_dir.encode(sys.getfilesystemencoding())
+ self.overrideEnv('HOME', test_home_dir)
+ self.overrideEnv('BZR_HOME', test_home_dir)
+
+ def setUp(self):
+ super(TestCaseWithMemoryTransport, self).setUp()
+
+ def _add_disconnect_cleanup(transport):
+ """Schedule disconnection of given transport at test cleanup
+
+ This needs to happen for all connected transports or leaks occur.
+
+ Note reconnections may mean we call disconnect multiple times per
+ transport which is suboptimal but seems harmless.
+ """
+ self.addCleanup(transport.disconnect)
+
+ _mod_transport.Transport.hooks.install_named_hook('post_connect',
+ _add_disconnect_cleanup, None)
+
+ self._make_test_root()
+ self.addCleanup(os.chdir, os.getcwdu())
+ self.makeAndChdirToTestDir()
+ self.overrideEnvironmentForTesting()
+ self.__readonly_server = None
+ self.__server = None
+ self.reduceLockdirTimeout()
+
+ def setup_smart_server_with_call_log(self):
+ """Sets up a smart server as the transport server with a call log."""
+ self.transport_server = test_server.SmartTCPServer_for_testing
+ self.hpss_connections = []
+ self.hpss_calls = []
+ import traceback
+ # Skip the current stack down to the caller of
+ # setup_smart_server_with_call_log
+ prefix_length = len(traceback.extract_stack()) - 2
+ def capture_hpss_call(params):
+ self.hpss_calls.append(
+ CapturedCall(params, prefix_length))
+ def capture_connect(transport):
+ self.hpss_connections.append(transport)
+ client._SmartClient.hooks.install_named_hook(
+ 'call', capture_hpss_call, None)
+ _mod_transport.Transport.hooks.install_named_hook(
+ 'post_connect', capture_connect, None)
+
+ def reset_smart_call_log(self):
+ self.hpss_calls = []
+ self.hpss_connections = []
+
+
+class TestCaseInTempDir(TestCaseWithMemoryTransport):
+ """Derived class that runs a test within a temporary directory.
+
+ This is useful for tests that need to create a branch, etc.
+
+ The directory is created in a slightly complex way: for each
+ Python invocation, a new temporary top-level directory is created.
+ All test cases create their own directory within that. If the
+ tests complete successfully, the directory is removed.
+
+ :ivar test_base_dir: The path of the top-level directory for this
+ test, which contains a home directory and a work directory.
+
+ :ivar test_home_dir: An initially empty directory under test_base_dir
+ which is used as $HOME for this test.
+
+ :ivar test_dir: A directory under test_base_dir used as the current
+ directory when the test proper is run.
+ """
+
+ OVERRIDE_PYTHON = 'python'
+
+ def setUp(self):
+ super(TestCaseInTempDir, self).setUp()
+ # Remove the protection set in isolated_environ, we have a proper
+ # access to disk resources now.
+ self.overrideEnv('BZR_LOG', None)
+
+ def check_file_contents(self, filename, expect):
+ self.log("check contents of file %s" % filename)
+ f = file(filename)
+ try:
+ contents = f.read()
+ finally:
+ f.close()
+ if contents != expect:
+ self.log("expected: %r" % expect)
+ self.log("actually: %r" % contents)
+ self.fail("contents of %s not as expected" % filename)
+
+ def _getTestDirPrefix(self):
+ # create a directory within the top level test directory
+ if sys.platform in ('win32', 'cygwin'):
+ name_prefix = re.sub('[<>*=+",:;_/\\-]', '_', self.id())
+ # windows is likely to have path-length limits so use a short name
+ name_prefix = name_prefix[-30:]
+ else:
+ name_prefix = re.sub('[/]', '_', self.id())
+ return name_prefix
+
+ def makeAndChdirToTestDir(self):
+ """See TestCaseWithMemoryTransport.makeAndChdirToTestDir().
+
+ For TestCaseInTempDir we create a temporary directory based on the test
+ name and then create two subdirs - test and home under it.
+ """
+ name_prefix = osutils.pathjoin(TestCaseWithMemoryTransport.TEST_ROOT,
+ self._getTestDirPrefix())
+ name = name_prefix
+ for i in range(100):
+ if os.path.exists(name):
+ name = name_prefix + '_' + str(i)
+ else:
+ # now create test and home directories within this dir
+ self.test_base_dir = name
+ self.addCleanup(self.deleteTestDir)
+ os.mkdir(self.test_base_dir)
+ break
+ self.permit_dir(self.test_base_dir)
+ # 'sprouting' and 'init' of a branch both walk up the tree to find
+ # stacking policy to honour; create a bzr dir with an unshared
+ # repository (but not a branch - our code would be trying to escape
+ # then!) to stop them, and permit it to be read.
+ # control = controldir.ControlDir.create(self.test_base_dir)
+ # control.create_repository()
+ self.test_home_dir = self.test_base_dir + '/home'
+ os.mkdir(self.test_home_dir)
+ self.test_dir = self.test_base_dir + '/work'
+ os.mkdir(self.test_dir)
+ os.chdir(self.test_dir)
+ # put name of test inside
+ f = file(self.test_base_dir + '/name', 'w')
+ try:
+ f.write(self.id())
+ finally:
+ f.close()
+
+ def deleteTestDir(self):
+ os.chdir(TestCaseWithMemoryTransport.TEST_ROOT)
+ _rmtree_temp_dir(self.test_base_dir, test_id=self.id())
+
+ def build_tree(self, shape, line_endings='binary', transport=None):
+ """Build a test tree according to a pattern.
+
+ shape is a sequence of file specifications. If the final
+ character is '/', a directory is created.
+
+ This assumes that all the elements in the tree being built are new.
+
+ This doesn't add anything to a branch.
+
+ :type shape: list or tuple.
+ :param line_endings: Either 'binary' or 'native'
+ in binary mode, exact contents are written in native mode, the
+ line endings match the default platform endings.
+ :param transport: A transport to write to, for building trees on VFS's.
+ If the transport is readonly or None, "." is opened automatically.
+ :return: None
+ """
+ if type(shape) not in (list, tuple):
+ raise AssertionError("Parameter 'shape' should be "
+ "a list or a tuple. Got %r instead" % (shape,))
+ # It's OK to just create them using forward slashes on windows.
+ if transport is None or transport.is_readonly():
+ transport = _mod_transport.get_transport_from_path(".")
+ for name in shape:
+ self.assertIsInstance(name, basestring)
+ if name[-1] == '/':
+ transport.mkdir(urlutils.escape(name[:-1]))
+ else:
+ if line_endings == 'binary':
+ end = '\n'
+ elif line_endings == 'native':
+ end = os.linesep
+ else:
+ raise errors.BzrError(
+ 'Invalid line ending request %r' % line_endings)
+ content = "contents of %s%s" % (name.encode('utf-8'), end)
+ transport.put_bytes_non_atomic(urlutils.escape(name), content)
+
+ build_tree_contents = staticmethod(treeshape.build_tree_contents)
+
+ def assertInWorkingTree(self, path, root_path='.', tree=None):
+ """Assert whether path or paths are in the WorkingTree"""
+ if tree is None:
+ tree = workingtree.WorkingTree.open(root_path)
+ if not isinstance(path, basestring):
+ for p in path:
+ self.assertInWorkingTree(p, tree=tree)
+ else:
+ self.assertIsNot(tree.path2id(path), None,
+ path+' not in working tree.')
+
+ def assertNotInWorkingTree(self, path, root_path='.', tree=None):
+ """Assert whether path or paths are not in the WorkingTree"""
+ if tree is None:
+ tree = workingtree.WorkingTree.open(root_path)
+ if not isinstance(path, basestring):
+ for p in path:
+ self.assertNotInWorkingTree(p,tree=tree)
+ else:
+ self.assertIs(tree.path2id(path), None, path+' in working tree.')
+
+
+class TestCaseWithTransport(TestCaseInTempDir):
+ """A test case that provides get_url and get_readonly_url facilities.
+
+ These back onto two transport servers, one for readonly access and one for
+ read write access.
+
+ If no explicit class is provided for readonly access, a
+ ReadonlyTransportDecorator is used instead which allows the use of non disk
+ based read write transports.
+
+ If an explicit class is provided for readonly access, that server and the
+ readwrite one must both define get_url() as resolving to os.getcwd().
+ """
+
+ def get_vfs_only_server(self):
+ """See TestCaseWithMemoryTransport.
+
+ This is useful for some tests with specific servers that need
+ diagnostics.
+ """
+ if self.__vfs_server is None:
+ self.__vfs_server = self.vfs_transport_factory()
+ self.start_server(self.__vfs_server)
+ return self.__vfs_server
+
+ def make_branch_and_tree(self, relpath, format=None):
+ """Create a branch on the transport and a tree locally.
+
+ If the transport is not a LocalTransport, the Tree can't be created on
+ the transport. In that case if the vfs_transport_factory is
+ LocalURLServer the working tree is created in the local
+ directory backing the transport, and the returned tree's branch and
+ repository will also be accessed locally. Otherwise a lightweight
+ checkout is created and returned.
+
+ We do this because we can't physically create a tree in the local
+ path, with a branch reference to the transport_factory url, and
+ a branch + repository in the vfs_transport, unless the vfs_transport
+ namespace is distinct from the local disk - the two branch objects
+ would collide. While we could construct a tree with its branch object
+ pointing at the transport_factory transport in memory, reopening it
+ would behaving unexpectedly, and has in the past caused testing bugs
+ when we tried to do it that way.
+
+ :param format: The BzrDirFormat.
+ :returns: the WorkingTree.
+ """
+ # TODO: always use the local disk path for the working tree,
+ # this obviously requires a format that supports branch references
+ # so check for that by checking bzrdir.BzrDirFormat.get_default_format()
+ # RBC 20060208
+ format = self.resolve_format(format=format)
+ if not format.supports_workingtrees:
+ b = self.make_branch(relpath+'.branch', format=format)
+ return b.create_checkout(relpath, lightweight=True)
+ b = self.make_branch(relpath, format=format)
+ try:
+ return b.bzrdir.create_workingtree()
+ except errors.NotLocalUrl:
+ # We can only make working trees locally at the moment. If the
+ # transport can't support them, then we keep the non-disk-backed
+ # branch and create a local checkout.
+ if self.vfs_transport_factory is test_server.LocalURLServer:
+ # the branch is colocated on disk, we cannot create a checkout.
+ # hopefully callers will expect this.
+ local_controldir = controldir.ControlDir.open(
+ self.get_vfs_only_url(relpath))
+ wt = local_controldir.create_workingtree()
+ if wt.branch._format != b._format:
+ wt._branch = b
+ # Make sure that assigning to wt._branch fixes wt.branch,
+ # in case the implementation details of workingtree objects
+ # change.
+ self.assertIs(b, wt.branch)
+ return wt
+ else:
+ return b.create_checkout(relpath, lightweight=True)
+
+ def assertIsDirectory(self, relpath, transport):
+ """Assert that relpath within transport is a directory.
+
+ This may not be possible on all transports; in that case it propagates
+ a TransportNotPossible.
+ """
+ try:
+ mode = transport.stat(relpath).st_mode
+ except errors.NoSuchFile:
+ self.fail("path %s is not a directory; no such file"
+ % (relpath))
+ if not stat.S_ISDIR(mode):
+ self.fail("path %s is not a directory; has mode %#o"
+ % (relpath, mode))
+
+ def assertTreesEqual(self, left, right):
+ """Check that left and right have the same content and properties."""
+ # we use a tree delta to check for equality of the content, and we
+ # manually check for equality of other things such as the parents list.
+ self.assertEqual(left.get_parent_ids(), right.get_parent_ids())
+ differences = left.changes_from(right)
+ self.assertFalse(differences.has_changed(),
+ "Trees %r and %r are different: %r" % (left, right, differences))
+
+ def setUp(self):
+ super(TestCaseWithTransport, self).setUp()
+ self.__vfs_server = None
+
+ def disable_missing_extensions_warning(self):
+ """Some tests expect a precise stderr content.
+
+ There is no point in forcing them to duplicate the extension related
+ warning.
+ """
+ config.GlobalConfig().set_user_option('ignore_missing_extensions', True)
+
+
+class ChrootedTestCase(TestCaseWithTransport):
+ """A support class that provides readonly urls outside the local namespace.
+
+ This is done by checking if self.transport_server is a MemoryServer. if it
+ is then we are chrooted already, if it is not then an HttpServer is used
+ for readonly urls.
+
+ TODO RBC 20060127: make this an option to TestCaseWithTransport so it can
+ be used without needed to redo it when a different
+ subclass is in use ?
+ """
+
+ def setUp(self):
+ from bzrlib.tests import http_server
+ super(ChrootedTestCase, self).setUp()
+ if not self.vfs_transport_factory == memory.MemoryServer:
+ self.transport_readonly_server = http_server.HttpServer
+
+
+def condition_id_re(pattern):
+ """Create a condition filter which performs a re check on a test's id.
+
+ :param pattern: A regular expression string.
+ :return: A callable that returns True if the re matches.
+ """
+ filter_re = re.compile(pattern, 0)
+ def condition(test):
+ test_id = test.id()
+ return filter_re.search(test_id)
+ return condition
+
+
+def condition_isinstance(klass_or_klass_list):
+ """Create a condition filter which returns isinstance(param, klass).
+
+ :return: A callable which when called with one parameter obj return the
+ result of isinstance(obj, klass_or_klass_list).
+ """
+ def condition(obj):
+ return isinstance(obj, klass_or_klass_list)
+ return condition
+
+
+def condition_id_in_list(id_list):
+ """Create a condition filter which verify that test's id in a list.
+
+ :param id_list: A TestIdList object.
+ :return: A callable that returns True if the test's id appears in the list.
+ """
+ def condition(test):
+ return id_list.includes(test.id())
+ return condition
+
+
+def condition_id_startswith(starts):
+ """Create a condition filter verifying that test's id starts with a string.
+
+ :param starts: A list of string.
+ :return: A callable that returns True if the test's id starts with one of
+ the given strings.
+ """
+ def condition(test):
+ for start in starts:
+ if test.id().startswith(start):
+ return True
+ return False
+ return condition
+
+
+def exclude_tests_by_condition(suite, condition):
+ """Create a test suite which excludes some tests from suite.
+
+ :param suite: The suite to get tests from.
+ :param condition: A callable whose result evaluates True when called with a
+ test case which should be excluded from the result.
+ :return: A suite which contains the tests found in suite that fail
+ condition.
+ """
+ result = []
+ for test in iter_suite_tests(suite):
+ if not condition(test):
+ result.append(test)
+ return TestUtil.TestSuite(result)
+
+
+def filter_suite_by_condition(suite, condition):
+ """Create a test suite by filtering another one.
+
+ :param suite: The source suite.
+ :param condition: A callable whose result evaluates True when called with a
+ test case which should be included in the result.
+ :return: A suite which contains the tests found in suite that pass
+ condition.
+ """
+ result = []
+ for test in iter_suite_tests(suite):
+ if condition(test):
+ result.append(test)
+ return TestUtil.TestSuite(result)
+
+
+def filter_suite_by_re(suite, pattern):
+ """Create a test suite by filtering another one.
+
+ :param suite: the source suite
+ :param pattern: pattern that names must match
+ :returns: the newly created suite
+ """
+ condition = condition_id_re(pattern)
+ result_suite = filter_suite_by_condition(suite, condition)
+ return result_suite
+
+
+def filter_suite_by_id_list(suite, test_id_list):
+ """Create a test suite by filtering another one.
+
+ :param suite: The source suite.
+ :param test_id_list: A list of the test ids to keep as strings.
+ :returns: the newly created suite
+ """
+ condition = condition_id_in_list(test_id_list)
+ result_suite = filter_suite_by_condition(suite, condition)
+ return result_suite
+
+
+def filter_suite_by_id_startswith(suite, start):
+ """Create a test suite by filtering another one.
+
+ :param suite: The source suite.
+ :param start: A list of string the test id must start with one of.
+ :returns: the newly created suite
+ """
+ condition = condition_id_startswith(start)
+ result_suite = filter_suite_by_condition(suite, condition)
+ return result_suite
+
+
+def exclude_tests_by_re(suite, pattern):
+ """Create a test suite which excludes some tests from suite.
+
+ :param suite: The suite to get tests from.
+ :param pattern: A regular expression string. Test ids that match this
+ pattern will be excluded from the result.
+ :return: A TestSuite that contains all the tests from suite without the
+ tests that matched pattern. The order of tests is the same as it was in
+ suite.
+ """
+ return exclude_tests_by_condition(suite, condition_id_re(pattern))
+
+
+def preserve_input(something):
+ """A helper for performing test suite transformation chains.
+
+ :param something: Anything you want to preserve.
+ :return: Something.
+ """
+ return something
+
+
+def randomize_suite(suite):
+ """Return a new TestSuite with suite's tests in random order.
+
+ The tests in the input suite are flattened into a single suite in order to
+ accomplish this. Any nested TestSuites are removed to provide global
+ randomness.
+ """
+ tests = list(iter_suite_tests(suite))
+ random.shuffle(tests)
+ return TestUtil.TestSuite(tests)
+
+
+def split_suite_by_condition(suite, condition):
+ """Split a test suite into two by a condition.
+
+ :param suite: The suite to split.
+ :param condition: The condition to match on. Tests that match this
+ condition are returned in the first test suite, ones that do not match
+ are in the second suite.
+ :return: A tuple of two test suites, where the first contains tests from
+ suite matching the condition, and the second contains the remainder
+ from suite. The order within each output suite is the same as it was in
+ suite.
+ """
+ matched = []
+ did_not_match = []
+ for test in iter_suite_tests(suite):
+ if condition(test):
+ matched.append(test)
+ else:
+ did_not_match.append(test)
+ return TestUtil.TestSuite(matched), TestUtil.TestSuite(did_not_match)
+
+
+def split_suite_by_re(suite, pattern):
+ """Split a test suite into two by a regular expression.
+
+ :param suite: The suite to split.
+ :param pattern: A regular expression string. Test ids that match this
+ pattern will be in the first test suite returned, and the others in the
+ second test suite returned.
+ :return: A tuple of two test suites, where the first contains tests from
+ suite matching pattern, and the second contains the remainder from
+ suite. The order within each output suite is the same as it was in
+ suite.
+ """
+ return split_suite_by_condition(suite, condition_id_re(pattern))
+
+
+def run_suite(suite, name='test', verbose=False, pattern=".*",
+ stop_on_failure=False,
+ transport=None, lsprof_timed=None, bench_history=None,
+ matching_tests_first=None,
+ list_only=False,
+ random_seed=None,
+ exclude_pattern=None,
+ strict=False,
+ runner_class=None,
+ suite_decorators=None,
+ stream=None,
+ result_decorators=None,
+ ):
+ """Run a test suite for bzr selftest.
+
+ :param runner_class: The class of runner to use. Must support the
+ constructor arguments passed by run_suite which are more than standard
+ python uses.
+ :return: A boolean indicating success.
+ """
+ TestCase._gather_lsprof_in_benchmarks = lsprof_timed
+ if verbose:
+ verbosity = 2
+ else:
+ verbosity = 1
+ if runner_class is None:
+ runner_class = TextTestRunner
+ if stream is None:
+ stream = sys.stdout
+ runner = runner_class(stream=stream,
+ descriptions=0,
+ verbosity=verbosity,
+ bench_history=bench_history,
+ strict=strict,
+ result_decorators=result_decorators,
+ )
+ runner.stop_on_failure=stop_on_failure
+ if isinstance(suite, unittest.TestSuite):
+ # Empty out _tests list of passed suite and populate new TestSuite
+ suite._tests[:], suite = [], TestSuite(suite)
+ # built in decorator factories:
+ decorators = [
+ random_order(random_seed, runner),
+ exclude_tests(exclude_pattern),
+ ]
+ if matching_tests_first:
+ decorators.append(tests_first(pattern))
+ else:
+ decorators.append(filter_tests(pattern))
+ if suite_decorators:
+ decorators.extend(suite_decorators)
+ # tell the result object how many tests will be running: (except if
+ # --parallel=fork is being used. Robert said he will provide a better
+ # progress design later -- vila 20090817)
+ if fork_decorator not in decorators:
+ decorators.append(CountingDecorator)
+ for decorator in decorators:
+ suite = decorator(suite)
+ if list_only:
+ # Done after test suite decoration to allow randomisation etc
+ # to take effect, though that is of marginal benefit.
+ if verbosity >= 2:
+ stream.write("Listing tests only ...\n")
+ for t in iter_suite_tests(suite):
+ stream.write("%s\n" % (t.id()))
+ return True
+ result = runner.run(suite)
+ if strict:
+ return result.wasStrictlySuccessful()
+ else:
+ return result.wasSuccessful()
+
+
+# A registry where get() returns a suite decorator.
+parallel_registry = registry.Registry()
+
+
+def fork_decorator(suite):
+ if getattr(os, "fork", None) is None:
+ raise errors.BzrCommandError("platform does not support fork,"
+ " try --parallel=subprocess instead.")
+ concurrency = osutils.local_concurrency()
+ if concurrency == 1:
+ return suite
+ from testtools import ConcurrentTestSuite
+ return ConcurrentTestSuite(suite, fork_for_tests)
+parallel_registry.register('fork', fork_decorator)
+
+
+def subprocess_decorator(suite):
+ concurrency = osutils.local_concurrency()
+ if concurrency == 1:
+ return suite
+ from testtools import ConcurrentTestSuite
+ return ConcurrentTestSuite(suite, reinvoke_for_tests)
+parallel_registry.register('subprocess', subprocess_decorator)
+
+
+def exclude_tests(exclude_pattern):
+ """Return a test suite decorator that excludes tests."""
+ if exclude_pattern is None:
+ return identity_decorator
+ def decorator(suite):
+ return ExcludeDecorator(suite, exclude_pattern)
+ return decorator
+
+
+def filter_tests(pattern):
+ if pattern == '.*':
+ return identity_decorator
+ def decorator(suite):
+ return FilterTestsDecorator(suite, pattern)
+ return decorator
+
+
+def random_order(random_seed, runner):
+ """Return a test suite decorator factory for randomising tests order.
+
+ :param random_seed: now, a string which casts to a long, or a long.
+ :param runner: A test runner with a stream attribute to report on.
+ """
+ if random_seed is None:
+ return identity_decorator
+ def decorator(suite):
+ return RandomDecorator(suite, random_seed, runner.stream)
+ return decorator
+
+
+def tests_first(pattern):
+ if pattern == '.*':
+ return identity_decorator
+ def decorator(suite):
+ return TestFirstDecorator(suite, pattern)
+ return decorator
+
+
+def identity_decorator(suite):
+ """Return suite."""
+ return suite
+
+
+class TestDecorator(TestUtil.TestSuite):
+ """A decorator for TestCase/TestSuite objects.
+
+ Contains rather than flattening suite passed on construction
+ """
+
+ def __init__(self, suite=None):
+ super(TestDecorator, self).__init__()
+ if suite is not None:
+ self.addTest(suite)
+
+ # Don't need subclass run method with suite emptying
+ run = unittest.TestSuite.run
+
+
+class CountingDecorator(TestDecorator):
+ """A decorator which calls result.progress(self.countTestCases)."""
+
+ def run(self, result):
+ progress_method = getattr(result, 'progress', None)
+ if callable(progress_method):
+ progress_method(self.countTestCases(), SUBUNIT_SEEK_SET)
+ return super(CountingDecorator, self).run(result)
+
+
+class ExcludeDecorator(TestDecorator):
+ """A decorator which excludes test matching an exclude pattern."""
+
+ def __init__(self, suite, exclude_pattern):
+ super(ExcludeDecorator, self).__init__(
+ exclude_tests_by_re(suite, exclude_pattern))
+
+
+class FilterTestsDecorator(TestDecorator):
+ """A decorator which filters tests to those matching a pattern."""
+
+ def __init__(self, suite, pattern):
+ super(FilterTestsDecorator, self).__init__(
+ filter_suite_by_re(suite, pattern))
+
+
+class RandomDecorator(TestDecorator):
+ """A decorator which randomises the order of its tests."""
+
+ def __init__(self, suite, random_seed, stream):
+ random_seed = self.actual_seed(random_seed)
+ stream.write("Randomizing test order using seed %s\n\n" %
+ (random_seed,))
+ # Initialise the random number generator.
+ random.seed(random_seed)
+ super(RandomDecorator, self).__init__(randomize_suite(suite))
+
+ @staticmethod
+ def actual_seed(seed):
+ if seed == "now":
+ # We convert the seed to a long to make it reuseable across
+ # invocations (because the user can reenter it).
+ return long(time.time())
+ else:
+ # Convert the seed to a long if we can
+ try:
+ return long(seed)
+ except (TypeError, ValueError):
+ pass
+ return seed
+
+
+class TestFirstDecorator(TestDecorator):
+ """A decorator which moves named tests to the front."""
+
+ def __init__(self, suite, pattern):
+ super(TestFirstDecorator, self).__init__()
+ self.addTests(split_suite_by_re(suite, pattern))
+
+
+def partition_tests(suite, count):
+ """Partition suite into count lists of tests."""
+ # This just assigns tests in a round-robin fashion. On one hand this
+ # splits up blocks of related tests that might run faster if they shared
+ # resources, but on the other it avoids assigning blocks of slow tests to
+ # just one partition. So the slowest partition shouldn't be much slower
+ # than the fastest.
+ partitions = [list() for i in range(count)]
+ tests = iter_suite_tests(suite)
+ for partition, test in itertools.izip(itertools.cycle(partitions), tests):
+ partition.append(test)
+ return partitions
+
+
+def workaround_zealous_crypto_random():
+ """Crypto.Random want to help us being secure, but we don't care here.
+
+ This workaround some test failure related to the sftp server. Once paramiko
+ stop using the controversial API in Crypto.Random, we may get rid of it.
+ """
+ try:
+ from Crypto.Random import atfork
+ atfork()
+ except ImportError:
+ pass
+
+
+def fork_for_tests(suite):
+ """Take suite and start up one runner per CPU by forking()
+
+ :return: An iterable of TestCase-like objects which can each have
+ run(result) called on them to feed tests to result.
+ """
+ concurrency = osutils.local_concurrency()
+ result = []
+ from subunit import ProtocolTestCase
+ from subunit.test_results import AutoTimingTestResultDecorator
+ class TestInOtherProcess(ProtocolTestCase):
+ # Should be in subunit, I think. RBC.
+ def __init__(self, stream, pid):
+ ProtocolTestCase.__init__(self, stream)
+ self.pid = pid
+
+ def run(self, result):
+ try:
+ ProtocolTestCase.run(self, result)
+ finally:
+ pid, status = os.waitpid(self.pid, 0)
+ # GZ 2011-10-18: If status is nonzero, should report to the result
+ # that something went wrong.
+
+ test_blocks = partition_tests(suite, concurrency)
+ # Clear the tests from the original suite so it doesn't keep them alive
+ suite._tests[:] = []
+ for process_tests in test_blocks:
+ process_suite = TestUtil.TestSuite(process_tests)
+ # Also clear each split list so new suite has only reference
+ process_tests[:] = []
+ c2pread, c2pwrite = os.pipe()
+ pid = os.fork()
+ if pid == 0:
+ try:
+ stream = os.fdopen(c2pwrite, 'wb', 1)
+ workaround_zealous_crypto_random()
+ os.close(c2pread)
+ # Leave stderr and stdout open so we can see test noise
+ # Close stdin so that the child goes away if it decides to
+ # read from stdin (otherwise its a roulette to see what
+ # child actually gets keystrokes for pdb etc).
+ sys.stdin.close()
+ subunit_result = AutoTimingTestResultDecorator(
+ SubUnitBzrProtocolClient(stream))
+ process_suite.run(subunit_result)
+ except:
+ # Try and report traceback on stream, but exit with error even
+ # if stream couldn't be created or something else goes wrong.
+ # The traceback is formatted to a string and written in one go
+ # to avoid interleaving lines from multiple failing children.
+ try:
+ stream.write(traceback.format_exc())
+ finally:
+ os._exit(1)
+ os._exit(0)
+ else:
+ os.close(c2pwrite)
+ stream = os.fdopen(c2pread, 'rb', 1)
+ test = TestInOtherProcess(stream, pid)
+ result.append(test)
+ return result
+
+
+def reinvoke_for_tests(suite):
+ """Take suite and start up one runner per CPU using subprocess().
+
+ :return: An iterable of TestCase-like objects which can each have
+ run(result) called on them to feed tests to result.
+ """
+ concurrency = osutils.local_concurrency()
+ result = []
+ from subunit import ProtocolTestCase
+ class TestInSubprocess(ProtocolTestCase):
+ def __init__(self, process, name):
+ ProtocolTestCase.__init__(self, process.stdout)
+ self.process = process
+ self.process.stdin.close()
+ self.name = name
+
+ def run(self, result):
+ try:
+ ProtocolTestCase.run(self, result)
+ finally:
+ self.process.wait()
+ os.unlink(self.name)
+ # print "pid %d finished" % finished_process
+ test_blocks = partition_tests(suite, concurrency)
+ for process_tests in test_blocks:
+ # ugly; currently reimplement rather than reuses TestCase methods.
+ bzr_path = os.path.dirname(os.path.dirname(bzrlib.__file__))+'/bzr'
+ if not os.path.isfile(bzr_path):
+ # We are probably installed. Assume sys.argv is the right file
+ bzr_path = sys.argv[0]
+ bzr_path = [bzr_path]
+ if sys.platform == "win32":
+ # if we're on windows, we can't execute the bzr script directly
+ bzr_path = [sys.executable] + bzr_path
+ fd, test_list_file_name = tempfile.mkstemp()
+ test_list_file = os.fdopen(fd, 'wb', 1)
+ for test in process_tests:
+ test_list_file.write(test.id() + '\n')
+ test_list_file.close()
+ try:
+ argv = bzr_path + ['selftest', '--load-list', test_list_file_name,
+ '--subunit']
+ if '--no-plugins' in sys.argv:
+ argv.append('--no-plugins')
+ # stderr=subprocess.STDOUT would be ideal, but until we prevent
+ # noise on stderr it can interrupt the subunit protocol.
+ process = subprocess.Popen(argv, stdin=subprocess.PIPE,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE,
+ bufsize=1)
+ test = TestInSubprocess(process, test_list_file_name)
+ result.append(test)
+ except:
+ os.unlink(test_list_file_name)
+ raise
+ return result
+
+
+class ProfileResult(testtools.ExtendedToOriginalDecorator):
+ """Generate profiling data for all activity between start and success.
+
+ The profile data is appended to the test's _benchcalls attribute and can
+ be accessed by the forwarded-to TestResult.
+
+ While it might be cleaner do accumulate this in stopTest, addSuccess is
+ where our existing output support for lsprof is, and this class aims to
+ fit in with that: while it could be moved it's not necessary to accomplish
+ test profiling, nor would it be dramatically cleaner.
+ """
+
+ def startTest(self, test):
+ self.profiler = bzrlib.lsprof.BzrProfiler()
+ # Prevent deadlocks in tests that use lsprof: those tests will
+ # unavoidably fail.
+ bzrlib.lsprof.BzrProfiler.profiler_block = 0
+ self.profiler.start()
+ testtools.ExtendedToOriginalDecorator.startTest(self, test)
+
+ def addSuccess(self, test):
+ stats = self.profiler.stop()
+ try:
+ calls = test._benchcalls
+ except AttributeError:
+ test._benchcalls = []
+ calls = test._benchcalls
+ calls.append(((test.id(), "", ""), stats))
+ testtools.ExtendedToOriginalDecorator.addSuccess(self, test)
+
+ def stopTest(self, test):
+ testtools.ExtendedToOriginalDecorator.stopTest(self, test)
+ self.profiler = None
+
+
+# Controlled by "bzr selftest -E=..." option
+# Currently supported:
+# -Eallow_debug Will no longer clear debug.debug_flags() so it
+# preserves any flags supplied at the command line.
+# -Edisable_lock_checks Turns errors in mismatched locks into simple prints
+# rather than failing tests. And no longer raise
+# LockContention when fctnl locks are not being used
+# with proper exclusion rules.
+# -Ethreads Will display thread ident at creation/join time to
+# help track thread leaks
+# -Euncollected_cases Display the identity of any test cases that weren't
+# deallocated after being completed.
+# -Econfig_stats Will collect statistics using addDetail
+selftest_debug_flags = set()
+
+
+def selftest(verbose=False, pattern=".*", stop_on_failure=True,
+ transport=None,
+ test_suite_factory=None,
+ lsprof_timed=None,
+ bench_history=None,
+ matching_tests_first=None,
+ list_only=False,
+ random_seed=None,
+ exclude_pattern=None,
+ strict=False,
+ load_list=None,
+ debug_flags=None,
+ starting_with=None,
+ runner_class=None,
+ suite_decorators=None,
+ stream=None,
+ lsprof_tests=False,
+ ):
+ """Run the whole test suite under the enhanced runner"""
+ # XXX: Very ugly way to do this...
+ # Disable warning about old formats because we don't want it to disturb
+ # any blackbox tests.
+ from bzrlib import repository
+ repository._deprecation_warning_done = True
+
+ global default_transport
+ if transport is None:
+ transport = default_transport
+ old_transport = default_transport
+ default_transport = transport
+ global selftest_debug_flags
+ old_debug_flags = selftest_debug_flags
+ if debug_flags is not None:
+ selftest_debug_flags = set(debug_flags)
+ try:
+ if load_list is None:
+ keep_only = None
+ else:
+ keep_only = load_test_id_list(load_list)
+ if starting_with:
+ starting_with = [test_prefix_alias_registry.resolve_alias(start)
+ for start in starting_with]
+ if test_suite_factory is None:
+ # Reduce loading time by loading modules based on the starting_with
+ # patterns.
+ suite = test_suite(keep_only, starting_with)
+ else:
+ suite = test_suite_factory()
+ if starting_with:
+ # But always filter as requested.
+ suite = filter_suite_by_id_startswith(suite, starting_with)
+ result_decorators = []
+ if lsprof_tests:
+ result_decorators.append(ProfileResult)
+ return run_suite(suite, 'testbzr', verbose=verbose, pattern=pattern,
+ stop_on_failure=stop_on_failure,
+ transport=transport,
+ lsprof_timed=lsprof_timed,
+ bench_history=bench_history,
+ matching_tests_first=matching_tests_first,
+ list_only=list_only,
+ random_seed=random_seed,
+ exclude_pattern=exclude_pattern,
+ strict=strict,
+ runner_class=runner_class,
+ suite_decorators=suite_decorators,
+ stream=stream,
+ result_decorators=result_decorators,
+ )
+ finally:
+ default_transport = old_transport
+ selftest_debug_flags = old_debug_flags
+
+
+def load_test_id_list(file_name):
+ """Load a test id list from a text file.
+
+ The format is one test id by line. No special care is taken to impose
+ strict rules, these test ids are used to filter the test suite so a test id
+ that do not match an existing test will do no harm. This allows user to add
+ comments, leave blank lines, etc.
+ """
+ test_list = []
+ try:
+ ftest = open(file_name, 'rt')
+ except IOError, e:
+ if e.errno != errno.ENOENT:
+ raise
+ else:
+ raise errors.NoSuchFile(file_name)
+
+ for test_name in ftest.readlines():
+ test_list.append(test_name.strip())
+ ftest.close()
+ return test_list
+
+
+def suite_matches_id_list(test_suite, id_list):
+ """Warns about tests not appearing or appearing more than once.
+
+ :param test_suite: A TestSuite object.
+ :param test_id_list: The list of test ids that should be found in
+ test_suite.
+
+ :return: (absents, duplicates) absents is a list containing the test found
+ in id_list but not in test_suite, duplicates is a list containing the
+ test found multiple times in test_suite.
+
+ When using a prefined test id list, it may occurs that some tests do not
+ exist anymore or that some tests use the same id. This function warns the
+ tester about potential problems in his workflow (test lists are volatile)
+ or in the test suite itself (using the same id for several tests does not
+ help to localize defects).
+ """
+ # Build a dict counting id occurrences
+ tests = dict()
+ for test in iter_suite_tests(test_suite):
+ id = test.id()
+ tests[id] = tests.get(id, 0) + 1
+
+ not_found = []
+ duplicates = []
+ for id in id_list:
+ occurs = tests.get(id, 0)
+ if not occurs:
+ not_found.append(id)
+ elif occurs > 1:
+ duplicates.append(id)
+
+ return not_found, duplicates
+
+
+class TestIdList(object):
+ """Test id list to filter a test suite.
+
+ Relying on the assumption that test ids are built as:
+ <module>[.<class>.<method>][(<param>+)], <module> being in python dotted
+ notation, this class offers methods to :
+ - avoid building a test suite for modules not refered to in the test list,
+ - keep only the tests listed from the module test suite.
+ """
+
+ def __init__(self, test_id_list):
+ # When a test suite needs to be filtered against us we compare test ids
+ # for equality, so a simple dict offers a quick and simple solution.
+ self.tests = dict().fromkeys(test_id_list, True)
+
+ # While unittest.TestCase have ids like:
+ # <module>.<class>.<method>[(<param+)],
+ # doctest.DocTestCase can have ids like:
+ # <module>
+ # <module>.<class>
+ # <module>.<function>
+ # <module>.<class>.<method>
+
+ # Since we can't predict a test class from its name only, we settle on
+ # a simple constraint: a test id always begins with its module name.
+
+ modules = {}
+ for test_id in test_id_list:
+ parts = test_id.split('.')
+ mod_name = parts.pop(0)
+ modules[mod_name] = True
+ for part in parts:
+ mod_name += '.' + part
+ modules[mod_name] = True
+ self.modules = modules
+
+ def refers_to(self, module_name):
+ """Is there tests for the module or one of its sub modules."""
+ return self.modules.has_key(module_name)
+
+ def includes(self, test_id):
+ return self.tests.has_key(test_id)
+
+
+class TestPrefixAliasRegistry(registry.Registry):
+ """A registry for test prefix aliases.
+
+ This helps implement shorcuts for the --starting-with selftest
+ option. Overriding existing prefixes is not allowed but not fatal (a
+ warning will be emitted).
+ """
+
+ def register(self, key, obj, help=None, info=None,
+ override_existing=False):
+ """See Registry.register.
+
+ Trying to override an existing alias causes a warning to be emitted,
+ not a fatal execption.
+ """
+ try:
+ super(TestPrefixAliasRegistry, self).register(
+ key, obj, help=help, info=info, override_existing=False)
+ except KeyError:
+ actual = self.get(key)
+ trace.note(
+ 'Test prefix alias %s is already used for %s, ignoring %s'
+ % (key, actual, obj))
+
+ def resolve_alias(self, id_start):
+ """Replace the alias by the prefix in the given string.
+
+ Using an unknown prefix is an error to help catching typos.
+ """
+ parts = id_start.split('.')
+ try:
+ parts[0] = self.get(parts[0])
+ except KeyError:
+ raise errors.BzrCommandError(
+ '%s is not a known test prefix alias' % parts[0])
+ return '.'.join(parts)
+
+
+test_prefix_alias_registry = TestPrefixAliasRegistry()
+"""Registry of test prefix aliases."""
+
+
+# This alias allows to detect typos ('bzrlin.') by making all valid test ids
+# appear prefixed ('bzrlib.' is "replaced" by 'bzrlib.').
+test_prefix_alias_registry.register('bzrlib', 'bzrlib')
+
+# Obvious highest levels prefixes, feel free to add your own via a plugin
+test_prefix_alias_registry.register('bd', 'bzrlib.doc')
+test_prefix_alias_registry.register('bu', 'bzrlib.utils')
+test_prefix_alias_registry.register('bt', 'bzrlib.tests')
+test_prefix_alias_registry.register('bb', 'bzrlib.tests.blackbox')
+test_prefix_alias_registry.register('bp', 'bzrlib.plugins')
+
+
+def _test_suite_testmod_names():
+ """Return the standard list of test module names to test."""
+ return [
+ 'bzrlib.doc',
+ 'bzrlib.tests.blackbox',
+ 'bzrlib.tests.commands',
+ 'bzrlib.tests.per_branch',
+ 'bzrlib.tests.per_bzrdir',
+ 'bzrlib.tests.per_controldir',
+ 'bzrlib.tests.per_controldir_colo',
+ 'bzrlib.tests.per_foreign_vcs',
+ 'bzrlib.tests.per_interrepository',
+ 'bzrlib.tests.per_intertree',
+ 'bzrlib.tests.per_inventory',
+ 'bzrlib.tests.per_interbranch',
+ 'bzrlib.tests.per_lock',
+ 'bzrlib.tests.per_merger',
+ 'bzrlib.tests.per_transport',
+ 'bzrlib.tests.per_tree',
+ 'bzrlib.tests.per_pack_repository',
+ 'bzrlib.tests.per_repository',
+ 'bzrlib.tests.per_repository_chk',
+ 'bzrlib.tests.per_repository_reference',
+ 'bzrlib.tests.per_repository_vf',
+ 'bzrlib.tests.per_uifactory',
+ 'bzrlib.tests.per_versionedfile',
+ 'bzrlib.tests.per_workingtree',
+ 'bzrlib.tests.test__annotator',
+ 'bzrlib.tests.test__bencode',
+ 'bzrlib.tests.test__btree_serializer',
+ 'bzrlib.tests.test__chk_map',
+ 'bzrlib.tests.test__dirstate_helpers',
+ 'bzrlib.tests.test__groupcompress',
+ 'bzrlib.tests.test__known_graph',
+ 'bzrlib.tests.test__rio',
+ 'bzrlib.tests.test__simple_set',
+ 'bzrlib.tests.test__static_tuple',
+ 'bzrlib.tests.test__walkdirs_win32',
+ 'bzrlib.tests.test_ancestry',
+ 'bzrlib.tests.test_annotate',
+ 'bzrlib.tests.test_api',
+ 'bzrlib.tests.test_atomicfile',
+ 'bzrlib.tests.test_bad_files',
+ 'bzrlib.tests.test_bisect_multi',
+ 'bzrlib.tests.test_branch',
+ 'bzrlib.tests.test_branchbuilder',
+ 'bzrlib.tests.test_btree_index',
+ 'bzrlib.tests.test_bugtracker',
+ 'bzrlib.tests.test_bundle',
+ 'bzrlib.tests.test_bzrdir',
+ 'bzrlib.tests.test__chunks_to_lines',
+ 'bzrlib.tests.test_cache_utf8',
+ 'bzrlib.tests.test_chk_map',
+ 'bzrlib.tests.test_chk_serializer',
+ 'bzrlib.tests.test_chunk_writer',
+ 'bzrlib.tests.test_clean_tree',
+ 'bzrlib.tests.test_cleanup',
+ 'bzrlib.tests.test_cmdline',
+ 'bzrlib.tests.test_commands',
+ 'bzrlib.tests.test_commit',
+ 'bzrlib.tests.test_commit_merge',
+ 'bzrlib.tests.test_config',
+ 'bzrlib.tests.test_conflicts',
+ 'bzrlib.tests.test_controldir',
+ 'bzrlib.tests.test_counted_lock',
+ 'bzrlib.tests.test_crash',
+ 'bzrlib.tests.test_decorators',
+ 'bzrlib.tests.test_delta',
+ 'bzrlib.tests.test_debug',
+ 'bzrlib.tests.test_diff',
+ 'bzrlib.tests.test_directory_service',
+ 'bzrlib.tests.test_dirstate',
+ 'bzrlib.tests.test_email_message',
+ 'bzrlib.tests.test_eol_filters',
+ 'bzrlib.tests.test_errors',
+ 'bzrlib.tests.test_estimate_compressed_size',
+ 'bzrlib.tests.test_export',
+ 'bzrlib.tests.test_export_pot',
+ 'bzrlib.tests.test_extract',
+ 'bzrlib.tests.test_features',
+ 'bzrlib.tests.test_fetch',
+ 'bzrlib.tests.test_fixtures',
+ 'bzrlib.tests.test_fifo_cache',
+ 'bzrlib.tests.test_filters',
+ 'bzrlib.tests.test_filter_tree',
+ 'bzrlib.tests.test_ftp_transport',
+ 'bzrlib.tests.test_foreign',
+ 'bzrlib.tests.test_generate_docs',
+ 'bzrlib.tests.test_generate_ids',
+ 'bzrlib.tests.test_globbing',
+ 'bzrlib.tests.test_gpg',
+ 'bzrlib.tests.test_graph',
+ 'bzrlib.tests.test_groupcompress',
+ 'bzrlib.tests.test_hashcache',
+ 'bzrlib.tests.test_help',
+ 'bzrlib.tests.test_hooks',
+ 'bzrlib.tests.test_http',
+ 'bzrlib.tests.test_http_response',
+ 'bzrlib.tests.test_https_ca_bundle',
+ 'bzrlib.tests.test_https_urllib',
+ 'bzrlib.tests.test_i18n',
+ 'bzrlib.tests.test_identitymap',
+ 'bzrlib.tests.test_ignores',
+ 'bzrlib.tests.test_index',
+ 'bzrlib.tests.test_import_tariff',
+ 'bzrlib.tests.test_info',
+ 'bzrlib.tests.test_inv',
+ 'bzrlib.tests.test_inventory_delta',
+ 'bzrlib.tests.test_knit',
+ 'bzrlib.tests.test_lazy_import',
+ 'bzrlib.tests.test_lazy_regex',
+ 'bzrlib.tests.test_library_state',
+ 'bzrlib.tests.test_lock',
+ 'bzrlib.tests.test_lockable_files',
+ 'bzrlib.tests.test_lockdir',
+ 'bzrlib.tests.test_log',
+ 'bzrlib.tests.test_lru_cache',
+ 'bzrlib.tests.test_lsprof',
+ 'bzrlib.tests.test_mail_client',
+ 'bzrlib.tests.test_matchers',
+ 'bzrlib.tests.test_memorytree',
+ 'bzrlib.tests.test_merge',
+ 'bzrlib.tests.test_merge3',
+ 'bzrlib.tests.test_merge_core',
+ 'bzrlib.tests.test_merge_directive',
+ 'bzrlib.tests.test_mergetools',
+ 'bzrlib.tests.test_missing',
+ 'bzrlib.tests.test_msgeditor',
+ 'bzrlib.tests.test_multiparent',
+ 'bzrlib.tests.test_mutabletree',
+ 'bzrlib.tests.test_nonascii',
+ 'bzrlib.tests.test_options',
+ 'bzrlib.tests.test_osutils',
+ 'bzrlib.tests.test_osutils_encodings',
+ 'bzrlib.tests.test_pack',
+ 'bzrlib.tests.test_patch',
+ 'bzrlib.tests.test_patches',
+ 'bzrlib.tests.test_permissions',
+ 'bzrlib.tests.test_plugins',
+ 'bzrlib.tests.test_progress',
+ 'bzrlib.tests.test_pyutils',
+ 'bzrlib.tests.test_read_bundle',
+ 'bzrlib.tests.test_reconcile',
+ 'bzrlib.tests.test_reconfigure',
+ 'bzrlib.tests.test_registry',
+ 'bzrlib.tests.test_remote',
+ 'bzrlib.tests.test_rename_map',
+ 'bzrlib.tests.test_repository',
+ 'bzrlib.tests.test_revert',
+ 'bzrlib.tests.test_revision',
+ 'bzrlib.tests.test_revisionspec',
+ 'bzrlib.tests.test_revisiontree',
+ 'bzrlib.tests.test_rio',
+ 'bzrlib.tests.test_rules',
+ 'bzrlib.tests.test_url_policy_open',
+ 'bzrlib.tests.test_sampler',
+ 'bzrlib.tests.test_scenarios',
+ 'bzrlib.tests.test_script',
+ 'bzrlib.tests.test_selftest',
+ 'bzrlib.tests.test_serializer',
+ 'bzrlib.tests.test_setup',
+ 'bzrlib.tests.test_sftp_transport',
+ 'bzrlib.tests.test_shelf',
+ 'bzrlib.tests.test_shelf_ui',
+ 'bzrlib.tests.test_smart',
+ 'bzrlib.tests.test_smart_add',
+ 'bzrlib.tests.test_smart_request',
+ 'bzrlib.tests.test_smart_signals',
+ 'bzrlib.tests.test_smart_transport',
+ 'bzrlib.tests.test_smtp_connection',
+ 'bzrlib.tests.test_source',
+ 'bzrlib.tests.test_ssh_transport',
+ 'bzrlib.tests.test_status',
+ 'bzrlib.tests.test_store',
+ 'bzrlib.tests.test_strace',
+ 'bzrlib.tests.test_subsume',
+ 'bzrlib.tests.test_switch',
+ 'bzrlib.tests.test_symbol_versioning',
+ 'bzrlib.tests.test_tag',
+ 'bzrlib.tests.test_test_server',
+ 'bzrlib.tests.test_testament',
+ 'bzrlib.tests.test_textfile',
+ 'bzrlib.tests.test_textmerge',
+ 'bzrlib.tests.test_cethread',
+ 'bzrlib.tests.test_timestamp',
+ 'bzrlib.tests.test_trace',
+ 'bzrlib.tests.test_transactions',
+ 'bzrlib.tests.test_transform',
+ 'bzrlib.tests.test_transport',
+ 'bzrlib.tests.test_transport_log',
+ 'bzrlib.tests.test_tree',
+ 'bzrlib.tests.test_treebuilder',
+ 'bzrlib.tests.test_treeshape',
+ 'bzrlib.tests.test_tsort',
+ 'bzrlib.tests.test_tuned_gzip',
+ 'bzrlib.tests.test_ui',
+ 'bzrlib.tests.test_uncommit',
+ 'bzrlib.tests.test_upgrade',
+ 'bzrlib.tests.test_upgrade_stacked',
+ 'bzrlib.tests.test_urlutils',
+ 'bzrlib.tests.test_utextwrap',
+ 'bzrlib.tests.test_version',
+ 'bzrlib.tests.test_version_info',
+ 'bzrlib.tests.test_versionedfile',
+ 'bzrlib.tests.test_vf_search',
+ 'bzrlib.tests.test_weave',
+ 'bzrlib.tests.test_whitebox',
+ 'bzrlib.tests.test_win32utils',
+ 'bzrlib.tests.test_workingtree',
+ 'bzrlib.tests.test_workingtree_4',
+ 'bzrlib.tests.test_wsgi',
+ 'bzrlib.tests.test_xml',
+ ]
+
+
+def _test_suite_modules_to_doctest():
+ """Return the list of modules to doctest."""
+ if __doc__ is None:
+ # GZ 2009-03-31: No docstrings with -OO so there's nothing to doctest
+ return []
+ return [
+ 'bzrlib',
+ 'bzrlib.branchbuilder',
+ 'bzrlib.decorators',
+ 'bzrlib.inventory',
+ 'bzrlib.iterablefile',
+ 'bzrlib.lockdir',
+ 'bzrlib.merge3',
+ 'bzrlib.option',
+ 'bzrlib.pyutils',
+ 'bzrlib.symbol_versioning',
+ 'bzrlib.tests',
+ 'bzrlib.tests.fixtures',
+ 'bzrlib.timestamp',
+ 'bzrlib.transport.http',
+ 'bzrlib.version_info_formats.format_custom',
+ ]
+
+
+def test_suite(keep_only=None, starting_with=None):
+ """Build and return TestSuite for the whole of bzrlib.
+
+ :param keep_only: A list of test ids limiting the suite returned.
+
+ :param starting_with: An id limiting the suite returned to the tests
+ starting with it.
+
+ This function can be replaced if you need to change the default test
+ suite on a global basis, but it is not encouraged.
+ """
+
+ loader = TestUtil.TestLoader()
+
+ if keep_only is not None:
+ id_filter = TestIdList(keep_only)
+ if starting_with:
+ # We take precedence over keep_only because *at loading time* using
+ # both options means we will load less tests for the same final result.
+ def interesting_module(name):
+ for start in starting_with:
+ if (
+ # Either the module name starts with the specified string
+ name.startswith(start)
+ # or it may contain tests starting with the specified string
+ or start.startswith(name)
+ ):
+ return True
+ return False
+ loader = TestUtil.FilteredByModuleTestLoader(interesting_module)
+
+ elif keep_only is not None:
+ loader = TestUtil.FilteredByModuleTestLoader(id_filter.refers_to)
+ def interesting_module(name):
+ return id_filter.refers_to(name)
+
+ else:
+ loader = TestUtil.TestLoader()
+ def interesting_module(name):
+ # No filtering, all modules are interesting
+ return True
+
+ suite = loader.suiteClass()
+
+ # modules building their suite with loadTestsFromModuleNames
+ suite.addTest(loader.loadTestsFromModuleNames(_test_suite_testmod_names()))
+
+ for mod in _test_suite_modules_to_doctest():
+ if not interesting_module(mod):
+ # No tests to keep here, move along
+ continue
+ try:
+ # note that this really does mean "report only" -- doctest
+ # still runs the rest of the examples
+ doc_suite = IsolatedDocTestSuite(
+ mod, optionflags=doctest.REPORT_ONLY_FIRST_FAILURE)
+ except ValueError, e:
+ print '**failed to get doctest for: %s\n%s' % (mod, e)
+ raise
+ if len(doc_suite._tests) == 0:
+ raise errors.BzrError("no doctests found in %s" % (mod,))
+ suite.addTest(doc_suite)
+
+ default_encoding = sys.getdefaultencoding()
+ for name, plugin in _mod_plugin.plugins().items():
+ if not interesting_module(plugin.module.__name__):
+ continue
+ plugin_suite = plugin.test_suite()
+ # We used to catch ImportError here and turn it into just a warning,
+ # but really if you don't have --no-plugins this should be a failure.
+ # mbp 20080213 - see http://bugs.launchpad.net/bugs/189771
+ if plugin_suite is None:
+ plugin_suite = plugin.load_plugin_tests(loader)
+ if plugin_suite is not None:
+ suite.addTest(plugin_suite)
+ if default_encoding != sys.getdefaultencoding():
+ trace.warning(
+ 'Plugin "%s" tried to reset default encoding to: %s', name,
+ sys.getdefaultencoding())
+ reload(sys)
+ sys.setdefaultencoding(default_encoding)
+
+ if keep_only is not None:
+ # Now that the referred modules have loaded their tests, keep only the
+ # requested ones.
+ suite = filter_suite_by_id_list(suite, id_filter)
+ # Do some sanity checks on the id_list filtering
+ not_found, duplicates = suite_matches_id_list(suite, keep_only)
+ if starting_with:
+ # The tester has used both keep_only and starting_with, so he is
+ # already aware that some tests are excluded from the list, there
+ # is no need to tell him which.
+ pass
+ else:
+ # Some tests mentioned in the list are not in the test suite. The
+ # list may be out of date, report to the tester.
+ for id in not_found:
+ trace.warning('"%s" not found in the test suite', id)
+ for id in duplicates:
+ trace.warning('"%s" is used as an id by several tests', id)
+
+ return suite
+
+
+def multiply_scenarios(*scenarios):
+ """Multiply two or more iterables of scenarios.
+
+ It is safe to pass scenario generators or iterators.
+
+ :returns: A list of compound scenarios: the cross-product of all
+ scenarios, with the names concatenated and the parameters
+ merged together.
+ """
+ return reduce(_multiply_two_scenarios, map(list, scenarios))
+
+
+def _multiply_two_scenarios(scenarios_left, scenarios_right):
+ """Multiply two sets of scenarios.
+
+ :returns: the cartesian product of the two sets of scenarios, that is
+ a scenario for every possible combination of a left scenario and a
+ right scenario.
+ """
+ return [
+ ('%s,%s' % (left_name, right_name),
+ dict(left_dict.items() + right_dict.items()))
+ for left_name, left_dict in scenarios_left
+ for right_name, right_dict in scenarios_right]
+
+
+def multiply_tests(tests, scenarios, result):
+ """Multiply tests_list by scenarios into result.
+
+ This is the core workhorse for test parameterisation.
+
+ Typically the load_tests() method for a per-implementation test suite will
+ call multiply_tests and return the result.
+
+ :param tests: The tests to parameterise.
+ :param scenarios: The scenarios to apply: pairs of (scenario_name,
+ scenario_param_dict).
+ :param result: A TestSuite to add created tests to.
+
+ This returns the passed in result TestSuite with the cross product of all
+ the tests repeated once for each scenario. Each test is adapted by adding
+ the scenario name at the end of its id(), and updating the test object's
+ __dict__ with the scenario_param_dict.
+
+ >>> import bzrlib.tests.test_sampler
+ >>> r = multiply_tests(
+ ... bzrlib.tests.test_sampler.DemoTest('test_nothing'),
+ ... [('one', dict(param=1)),
+ ... ('two', dict(param=2))],
+ ... TestUtil.TestSuite())
+ >>> tests = list(iter_suite_tests(r))
+ >>> len(tests)
+ 2
+ >>> tests[0].id()
+ 'bzrlib.tests.test_sampler.DemoTest.test_nothing(one)'
+ >>> tests[0].param
+ 1
+ >>> tests[1].param
+ 2
+ """
+ for test in iter_suite_tests(tests):
+ apply_scenarios(test, scenarios, result)
+ return result
+
+
+def apply_scenarios(test, scenarios, result):
+ """Apply the scenarios in scenarios to test and add to result.
+
+ :param test: The test to apply scenarios to.
+ :param scenarios: An iterable of scenarios to apply to test.
+ :return: result
+ :seealso: apply_scenario
+ """
+ for scenario in scenarios:
+ result.addTest(apply_scenario(test, scenario))
+ return result
+
+
+def apply_scenario(test, scenario):
+ """Copy test and apply scenario to it.
+
+ :param test: A test to adapt.
+ :param scenario: A tuple describing the scenarion.
+ The first element of the tuple is the new test id.
+ The second element is a dict containing attributes to set on the
+ test.
+ :return: The adapted test.
+ """
+ new_id = "%s(%s)" % (test.id(), scenario[0])
+ new_test = clone_test(test, new_id)
+ for name, value in scenario[1].items():
+ setattr(new_test, name, value)
+ return new_test
+
+
+def clone_test(test, new_id):
+ """Clone a test giving it a new id.
+
+ :param test: The test to clone.
+ :param new_id: The id to assign to it.
+ :return: The new test.
+ """
+ new_test = copy.copy(test)
+ new_test.id = lambda: new_id
+ # XXX: Workaround <https://bugs.launchpad.net/testtools/+bug/637725>, which
+ # causes cloned tests to share the 'details' dict. This makes it hard to
+ # read the test output for parameterized tests, because tracebacks will be
+ # associated with irrelevant tests.
+ try:
+ details = new_test._TestCase__details
+ except AttributeError:
+ # must be a different version of testtools than expected. Do nothing.
+ pass
+ else:
+ # Reset the '__details' dict.
+ new_test._TestCase__details = {}
+ return new_test
+
+
+def permute_tests_for_extension(standard_tests, loader, py_module_name,
+ ext_module_name):
+ """Helper for permutating tests against an extension module.
+
+ This is meant to be used inside a modules 'load_tests()' function. It will
+ create 2 scenarios, and cause all tests in the 'standard_tests' to be run
+ against both implementations. Setting 'test.module' to the appropriate
+ module. See bzrlib.tests.test__chk_map.load_tests as an example.
+
+ :param standard_tests: A test suite to permute
+ :param loader: A TestLoader
+ :param py_module_name: The python path to a python module that can always
+ be loaded, and will be considered the 'python' implementation. (eg
+ 'bzrlib._chk_map_py')
+ :param ext_module_name: The python path to an extension module. If the
+ module cannot be loaded, a single test will be added, which notes that
+ the module is not available. If it can be loaded, all standard_tests
+ will be run against that module.
+ :return: (suite, feature) suite is a test-suite that has all the permuted
+ tests. feature is the Feature object that can be used to determine if
+ the module is available.
+ """
+
+ from bzrlib.tests.features import ModuleAvailableFeature
+ py_module = pyutils.get_named_object(py_module_name)
+ scenarios = [
+ ('python', {'module': py_module}),
+ ]
+ suite = loader.suiteClass()
+ feature = ModuleAvailableFeature(ext_module_name)
+ if feature.available():
+ scenarios.append(('C', {'module': feature.module}))
+ else:
+ # the compiled module isn't available, so we add a failing test
+ class FailWithoutFeature(TestCase):
+ def test_fail(self):
+ self.requireFeature(feature)
+ suite.addTest(loader.loadTestsFromTestCase(FailWithoutFeature))
+ result = multiply_tests(standard_tests, scenarios, suite)
+ return result, feature
+
+
+def _rmtree_temp_dir(dirname, test_id=None):
+ # If LANG=C we probably have created some bogus paths
+ # which rmtree(unicode) will fail to delete
+ # so make sure we are using rmtree(str) to delete everything
+ # except on win32, where rmtree(str) will fail
+ # since it doesn't have the property of byte-stream paths
+ # (they are either ascii or mbcs)
+ if sys.platform == 'win32':
+ # make sure we are using the unicode win32 api
+ dirname = unicode(dirname)
+ else:
+ dirname = dirname.encode(sys.getfilesystemencoding())
+ try:
+ osutils.rmtree(dirname)
+ except OSError, e:
+ # We don't want to fail here because some useful display will be lost
+ # otherwise. Polluting the tmp dir is bad, but not giving all the
+ # possible info to the test runner is even worse.
+ if test_id != None:
+ ui.ui_factory.clear_term()
+ sys.stderr.write('\nWhile running: %s\n' % (test_id,))
+ # Ugly, but the last thing we want here is fail, so bear with it.
+ printable_e = str(e).decode(osutils.get_user_encoding(), 'replace'
+ ).encode('ascii', 'replace')
+ sys.stderr.write('Unable to remove testing dir %s\n%s'
+ % (os.path.basename(dirname), printable_e))
+
+
+def probe_unicode_in_user_encoding():
+ """Try to encode several unicode strings to use in unicode-aware tests.
+ Return first successfull match.
+
+ :return: (unicode value, encoded plain string value) or (None, None)
+ """
+ possible_vals = [u'm\xb5', u'\xe1', u'\u0410']
+ for uni_val in possible_vals:
+ try:
+ str_val = uni_val.encode(osutils.get_user_encoding())
+ except UnicodeEncodeError:
+ # Try a different character
+ pass
+ else:
+ return uni_val, str_val
+ return None, None
+
+
+def probe_bad_non_ascii(encoding):
+ """Try to find [bad] character with code [128..255]
+ that cannot be decoded to unicode in some encoding.
+ Return None if all non-ascii characters is valid
+ for given encoding.
+ """
+ for i in xrange(128, 256):
+ char = chr(i)
+ try:
+ char.decode(encoding)
+ except UnicodeDecodeError:
+ return char
+ return None
+
+
+# Only define SubUnitBzrRunner if subunit is available.
+try:
+ from subunit import TestProtocolClient
+ from subunit.test_results import AutoTimingTestResultDecorator
+ class SubUnitBzrProtocolClient(TestProtocolClient):
+
+ def stopTest(self, test):
+ super(SubUnitBzrProtocolClient, self).stopTest(test)
+ _clear__type_equality_funcs(test)
+
+ def addSuccess(self, test, details=None):
+ # The subunit client always includes the details in the subunit
+ # stream, but we don't want to include it in ours.
+ if details is not None and 'log' in details:
+ del details['log']
+ return super(SubUnitBzrProtocolClient, self).addSuccess(
+ test, details)
+
+ class SubUnitBzrRunner(TextTestRunner):
+ def run(self, test):
+ result = AutoTimingTestResultDecorator(
+ SubUnitBzrProtocolClient(self.stream))
+ test.run(result)
+ return result
+except ImportError:
+ pass
+
+
+# API compatibility for old plugins; see bug 892622.
+for name in [
+ 'Feature',
+ 'HTTPServerFeature',
+ 'ModuleAvailableFeature',
+ 'HTTPSServerFeature', 'SymlinkFeature', 'HardlinkFeature',
+ 'OsFifoFeature', 'UnicodeFilenameFeature',
+ 'ByteStringNamedFilesystem', 'UTF8Filesystem',
+ 'BreakinFeature', 'CaseInsCasePresFilenameFeature',
+ 'CaseInsensitiveFilesystemFeature', 'case_sensitive_filesystem_feature',
+ 'posix_permissions_feature',
+ ]:
+ globals()[name] = _CompatabilityThunkFeature(
+ symbol_versioning.deprecated_in((2, 5, 0)),
+ 'bzrlib.tests', name,
+ name, 'bzrlib.tests.features')
+
+
+for (old_name, new_name) in [
+ ('UnicodeFilename', 'UnicodeFilenameFeature'),
+ ]:
+ globals()[name] = _CompatabilityThunkFeature(
+ symbol_versioning.deprecated_in((2, 5, 0)),
+ 'bzrlib.tests', old_name,
+ new_name, 'bzrlib.tests.features')
diff --git a/bzrlib/tests/blackbox/__init__.py b/bzrlib/tests/blackbox/__init__.py
new file mode 100644
index 0000000..6d52e42
--- /dev/null
+++ b/bzrlib/tests/blackbox/__init__.py
@@ -0,0 +1,152 @@
+# Copyright (C) 2005-2011 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+
+"""Black-box tests for bzr.
+
+These check that it behaves properly when it's invoked through the regular
+command-line interface. This doesn't actually run a new interpreter but
+rather starts again from the run_bzr function.
+"""
+
+
+from bzrlib.symbol_versioning import (
+ deprecated_in,
+ deprecated_method,
+ )
+from bzrlib import tests
+
+
+def load_tests(basic_tests, module, loader):
+ suite = loader.suiteClass()
+ # add the tests for this module
+ suite.addTests(basic_tests)
+
+ prefix = 'bzrlib.tests.blackbox.'
+ testmod_names = [
+ 'test_add',
+ 'test_added',
+ 'test_alias',
+ 'test_aliases',
+ 'test_ancestry',
+ 'test_annotate',
+ 'test_branch',
+ 'test_branches',
+ 'test_break_lock',
+ 'test_bound_branches',
+ 'test_bundle_info',
+ 'test_cat',
+ 'test_cat_revision',
+ 'test_check',
+ 'test_checkout',
+ 'test_clean_tree',
+ 'test_command_encoding',
+ 'test_commit',
+ 'test_config',
+ 'test_conflicts',
+ 'test_debug',
+ 'test_deleted',
+ 'test_diff',
+ 'test_dump_btree',
+ 'test_dpush',
+ 'test_exceptions',
+ 'test_export',
+ 'test_export_pot',
+ 'test_filesystem_cicp',
+ 'test_filtered_view_ops',
+ 'test_find_merge_base',
+ 'test_help',
+ 'test_hooks',
+ 'test_ignore',
+ 'test_ignored',
+ 'test_info',
+ 'test_init',
+ 'test_inventory',
+ 'test_join',
+ 'test_locale',
+ 'test_log',
+ 'test_logformats',
+ 'test_lookup_revision',
+ 'test_ls',
+ 'test_lsprof',
+ 'test_merge',
+ 'test_merge_directive',
+ 'test_missing',
+ 'test_mkdir',
+ 'test_modified',
+ 'test_mv',
+ 'test_nick',
+ 'test_non_ascii',
+ 'test_outside_wt',
+ 'test_pack',
+ 'test_pull',
+ 'test_push',
+ 'test_reconcile',
+ 'test_reconfigure',
+ 'test_reference',
+ 'test_remerge',
+ 'test_remove',
+ 'test_re_sign',
+ 'test_remember_option',
+ 'test_remove_tree',
+ 'test_repair_workingtree',
+ 'test_resolve',
+ 'test_revert',
+ 'test_revno',
+ 'test_revision_history',
+ 'test_revision_info',
+ 'test_rmbranch',
+ 'test_script',
+ 'test_selftest',
+ 'test_send',
+ 'test_serve',
+ 'test_shared_repository',
+ 'test_shell_complete',
+ 'test_shelve',
+ 'test_sign_my_commits',
+ 'test_split',
+ 'test_status',
+ 'test_switch',
+ 'test_tags',
+ 'test_testament',
+ 'test_too_much',
+ 'test_uncommit',
+ 'test_unknowns',
+ 'test_update',
+ 'test_upgrade',
+ 'test_version',
+ 'test_version_info',
+ 'test_versioning',
+ 'test_view',
+ 'test_whoami',
+ ]
+ # add the tests for the sub modules
+ suite.addTests(loader.loadTestsFromModuleNames(
+ [prefix + module_name for module_name in testmod_names]))
+ return suite
+
+
+class ExternalBase(tests.TestCaseWithTransport):
+ """Don't use this class anymore, use TestCaseWithTransport or similar"""
+
+ @deprecated_method(deprecated_in((2, 2, 0)))
+ def check_output(self, output, *args):
+ """Verify that the expected output matches what bzr says.
+
+ The output is supplied first, so that you can supply a variable
+ number of arguments to bzr.
+ """
+ self.assertEquals(self.run_bzr(*args)[0], output)
diff --git a/bzrlib/tests/blackbox/test_add.py b/bzrlib/tests/blackbox/test_add.py
new file mode 100644
index 0000000..38c71d1
--- /dev/null
+++ b/bzrlib/tests/blackbox/test_add.py
@@ -0,0 +1,267 @@
+# Copyright (C) 2006, 2007, 2009-2012 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+#
+
+"""Tests of the 'bzr add' command."""
+
+import os
+
+from bzrlib import (
+ osutils,
+ tests,
+ )
+from bzrlib.tests import (
+ features,
+ script,
+ )
+from bzrlib.tests.scenarios import load_tests_apply_scenarios
+
+
+load_tests = load_tests_apply_scenarios
+
+
+class TestAdd(tests.TestCaseWithTransport):
+
+ scenarios = [
+ ('pre-views', {'branch_tree_format': 'pack-0.92'}),
+ ('view-aware', {'branch_tree_format': '2a'}),
+ ]
+
+ def make_branch_and_tree(self, dir):
+ return super(TestAdd, self).make_branch_and_tree(
+ dir, format=self.branch_tree_format)
+
+ def test_add_reports(self):
+ """add command prints the names of added files."""
+ tree = self.make_branch_and_tree('.')
+ self.build_tree(['top.txt', 'dir/', 'dir/sub.txt', 'CVS'])
+ self.build_tree_contents([('.bzrignore', 'CVS\n')])
+ out = self.run_bzr('add')[0]
+ # the ordering is not defined at the moment
+ results = sorted(out.rstrip('\n').split('\n'))
+ self.assertEquals(['adding .bzrignore',
+ 'adding dir',
+ 'adding dir/sub.txt',
+ 'adding top.txt'],
+ results)
+ out = self.run_bzr('add -v')[0]
+ results = sorted(out.rstrip('\n').split('\n'))
+ self.assertEquals(['ignored CVS matching "CVS"'],
+ results)
+
+ def test_add_quiet_is(self):
+ """add -q does not print the names of added files."""
+ tree = self.make_branch_and_tree('.')
+ self.build_tree(['top.txt', 'dir/', 'dir/sub.txt'])
+ out = self.run_bzr('add -q')[0]
+ # the ordering is not defined at the moment
+ results = sorted(out.rstrip('\n').split('\n'))
+ self.assertEquals([''], results)
+
+ def test_add_in_unversioned(self):
+ """Try to add a file in an unversioned directory.
+
+ "bzr add" should add the parent(s) as necessary.
+ """
+ tree = self.make_branch_and_tree('.')
+ self.build_tree(['inertiatic/', 'inertiatic/esp'])
+ self.assertEquals(self.run_bzr('unknowns')[0], 'inertiatic\n')
+ self.run_bzr('add inertiatic/esp')
+ self.assertEquals(self.run_bzr('unknowns')[0], '')
+
+ # Multiple unversioned parents
+ self.build_tree(['veil/', 'veil/cerpin/', 'veil/cerpin/taxt'])
+ self.assertEquals(self.run_bzr('unknowns')[0], 'veil\n')
+ self.run_bzr('add veil/cerpin/taxt')
+ self.assertEquals(self.run_bzr('unknowns')[0], '')
+
+ # Check whacky paths work
+ self.build_tree(['cicatriz/', 'cicatriz/esp'])
+ self.assertEquals(self.run_bzr('unknowns')[0], 'cicatriz\n')
+ self.run_bzr('add inertiatic/../cicatriz/esp')
+ self.assertEquals(self.run_bzr('unknowns')[0], '')
+
+ def test_add_no_recurse(self):
+ tree = self.make_branch_and_tree('.')
+ self.build_tree(['inertiatic/', 'inertiatic/esp'])
+ self.assertEquals(self.run_bzr('unknowns')[0], 'inertiatic\n')
+ self.run_bzr('add -N inertiatic')
+ self.assertEquals(self.run_bzr('unknowns')[0], 'inertiatic/esp\n')
+
+ def test_add_in_versioned(self):
+ """Try to add a file in a versioned directory.
+
+ "bzr add" should do this happily.
+ """
+ tree = self.make_branch_and_tree('.')
+ self.build_tree(['inertiatic/', 'inertiatic/esp'])
+ self.assertEquals(self.run_bzr('unknowns')[0], 'inertiatic\n')
+ self.run_bzr('add --no-recurse inertiatic')
+ self.assertEquals(self.run_bzr('unknowns')[0], 'inertiatic/esp\n')
+ self.run_bzr('add inertiatic/esp')
+ self.assertEquals(self.run_bzr('unknowns')[0], '')
+
+ def test_subdir_add(self):
+ """Add in subdirectory should add only things from there down"""
+ eq = self.assertEqual
+ ass = self.assertTrue
+
+ t = self.make_branch_and_tree('.')
+ b = t.branch
+ self.build_tree(['src/', 'README'])
+
+ eq(sorted(t.unknowns()),
+ ['README', 'src'])
+
+ self.run_bzr('add src')
+
+ self.build_tree(['src/foo.c'])
+
+ # add with no arguments in a subdirectory gets only files below that
+ # subdirectory
+ self.run_bzr('add', working_dir='src')
+ self.assertEquals('README\n',
+ self.run_bzr('unknowns', working_dir='src')[0])
+ # reopen to see the new changes
+ t = t.bzrdir.open_workingtree('src')
+ versioned = [path for path, entry in t.iter_entries_by_dir()]
+ self.assertEquals(versioned, ['', 'src', 'src/foo.c'])
+
+ # add from the parent directory should pick up all file names
+ self.run_bzr('add')
+ self.assertEquals(self.run_bzr('unknowns')[0], '')
+ self.run_bzr('check')
+
+ def test_add_missing(self):
+ """bzr add foo where foo is missing should error."""
+ self.make_branch_and_tree('.')
+ self.run_bzr('add missing-file', retcode=3)
+
+ def test_add_from(self):
+ base_tree = self.make_branch_and_tree('base')
+ self.build_tree(['base/a', 'base/b/', 'base/b/c'])
+ base_tree.add(['a', 'b', 'b/c'])
+ base_tree.commit('foo')
+
+ new_tree = self.make_branch_and_tree('new')
+ self.build_tree(['new/a', 'new/b/', 'new/b/c', 'd'])
+
+ out, err = self.run_bzr('add --file-ids-from ../base',
+ working_dir='new')
+ self.assertEqual('', err)
+ self.assertEqualDiff('adding a w/ file id from a\n'
+ 'adding b w/ file id from b\n'
+ 'adding b/c w/ file id from b/c\n',
+ out)
+ new_tree = new_tree.bzrdir.open_workingtree()
+ self.assertEqual(base_tree.path2id('a'), new_tree.path2id('a'))
+ self.assertEqual(base_tree.path2id('b'), new_tree.path2id('b'))
+ self.assertEqual(base_tree.path2id('b/c'), new_tree.path2id('b/c'))
+
+ def test_add_from_subdir(self):
+ base_tree = self.make_branch_and_tree('base')
+ self.build_tree(['base/a', 'base/b/', 'base/b/c', 'base/b/d'])
+ base_tree.add(['a', 'b', 'b/c', 'b/d'])
+ base_tree.commit('foo')
+
+ new_tree = self.make_branch_and_tree('new')
+ self.build_tree(['new/c', 'new/d'])
+
+ out, err = self.run_bzr('add --file-ids-from ../base/b',
+ working_dir='new')
+ self.assertEqual('', err)
+ self.assertEqualDiff('adding c w/ file id from b/c\n'
+ 'adding d w/ file id from b/d\n',
+ out)
+
+ new_tree = new_tree.bzrdir.open_workingtree('new')
+ self.assertEqual(base_tree.path2id('b/c'), new_tree.path2id('c'))
+ self.assertEqual(base_tree.path2id('b/d'), new_tree.path2id('d'))
+
+ def test_add_dry_run(self):
+ """Test a dry run add, make sure nothing is added."""
+ wt = self.make_branch_and_tree('.')
+ self.build_tree(['inertiatic/', 'inertiatic/esp'])
+ self.assertEqual(list(wt.unknowns()), ['inertiatic'])
+ self.run_bzr('add --dry-run')
+ self.assertEqual(list(wt.unknowns()), ['inertiatic'])
+
+ def test_add_control_dir(self):
+ """The control dir and its content should be refused."""
+ self.make_branch_and_tree('.')
+ err = self.run_bzr('add .bzr', retcode=3)[1]
+ self.assertContainsRe(err, r'ERROR:.*\.bzr.*control file')
+ err = self.run_bzr('add .bzr/README', retcode=3)[1]
+ self.assertContainsRe(err, r'ERROR:.*\.bzr.*control file')
+ self.build_tree(['.bzr/crescent'])
+ err = self.run_bzr('add .bzr/crescent', retcode=3)[1]
+ self.assertContainsRe(err, r'ERROR:.*\.bzr.*control file')
+
+ def test_add_via_symlink(self):
+ self.requireFeature(features.SymlinkFeature)
+ self.make_branch_and_tree('source')
+ self.build_tree(['source/top.txt'])
+ os.symlink('source', 'link')
+ out = self.run_bzr(['add', 'link/top.txt'])[0]
+ self.assertEquals(out, 'adding top.txt\n')
+
+ def test_add_symlink_to_abspath(self):
+ self.requireFeature(features.SymlinkFeature)
+ self.make_branch_and_tree('tree')
+ os.symlink(osutils.abspath('target'), 'tree/link')
+ out = self.run_bzr(['add', 'tree/link'])[0]
+ self.assertEquals(out, 'adding link\n')
+
+ def test_add_not_child(self):
+ # https://bugs.launchpad.net/bzr/+bug/98735
+ sr = script.ScriptRunner()
+ self.make_branch_and_tree('tree1')
+ self.make_branch_and_tree('tree2')
+ self.build_tree(['tree1/a', 'tree2/b'])
+ sr.run_script(self, '''
+ $ bzr add tree1/a tree2/b
+ 2>bzr: ERROR: Path "...tree2/b" is not a child of path "...tree1"
+ ''')
+
+ def test_add_multiple_files_in_unicode_cwd(self):
+ """Adding multiple files in a non-ascii cwd, see lp:686611"""
+ self.requireFeature(features.UnicodeFilenameFeature)
+ self.make_branch_and_tree(u"\xA7")
+ self.build_tree([u"\xA7/a", u"\xA7/b"])
+ out, err = self.run_bzr(["add", "a", "b"], working_dir=u"\xA7")
+ self.assertEquals(out, "adding a\n" "adding b\n")
+ self.assertEquals(err, "")
+
+ def test_add_skip_large_files(self):
+ """Test skipping files larger than add.maximum_file_size"""
+ tree = self.make_branch_and_tree('.')
+ self.build_tree(['small.txt', 'big.txt', 'big2.txt'])
+ self.build_tree_contents([('small.txt', '0\n')])
+ self.build_tree_contents([('big.txt', '01234567890123456789\n')])
+ self.build_tree_contents([('big2.txt', '01234567890123456789\n')])
+ tree.branch.get_config_stack().set('add.maximum_file_size', 5)
+ out = self.run_bzr('add')[0]
+ results = sorted(out.rstrip('\n').split('\n'))
+ self.assertEquals(['adding small.txt'], results)
+ # named items never skipped, even if over max
+ out, err = self.run_bzr(["add", "big2.txt"])
+ results = sorted(out.rstrip('\n').split('\n'))
+ self.assertEquals(['adding big2.txt'], results)
+ self.assertEquals("", err)
+ tree.branch.get_config_stack().set('add.maximum_file_size', 30)
+ out = self.run_bzr('add')[0]
+ results = sorted(out.rstrip('\n').split('\n'))
+ self.assertEquals(['adding big.txt'], results)
diff --git a/bzrlib/tests/blackbox/test_added.py b/bzrlib/tests/blackbox/test_added.py
new file mode 100644
index 0000000..28420c1
--- /dev/null
+++ b/bzrlib/tests/blackbox/test_added.py
@@ -0,0 +1,77 @@
+# Copyright (C) 2006-2010 Canonical Ltd
+# -*- coding: utf-8 -*-
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+
+"""Black-box tests for 'bzr added', which shows newly-added files."""
+
+import os
+
+from bzrlib.branch import Branch
+from bzrlib.tests import TestCaseWithTransport
+
+
+class TestAdded(TestCaseWithTransport):
+
+ def test_added(self):
+ """Test that 'added' command reports added files"""
+ self._test_added('a', 'a\n')
+
+ def test_added_with_spaces(self):
+ """Test that 'added' command reports added files with spaces in their names quoted"""
+ self._test_added('a filename with spaces', '"a filename with spaces"\n')
+
+ def test_added_null_separator(self):
+ """Test that added uses its null operator properly"""
+ self._test_added('a', 'a\0', null=True)
+
+ def _test_added(self, name, output, null=False):
+
+ def check_added(expected, null=False):
+ command = 'added'
+
+ if null:
+ command += ' --null'
+
+ out, err = self.run_bzr(command)
+ self.assertEquals(out, expected)
+ self.assertEquals(err, '')
+
+ # in empty directory, nothing added
+ tree = self.make_branch_and_tree('.')
+ check_added('')
+
+ # with unknown file, still nothing added
+ self.build_tree_contents([(name, 'contents of %s\n' % (name))])
+ check_added('')
+
+ # after add, shows up in list
+ # bug report 20060119 by Nathan McCallum -- 'bzr added' causes
+ # NameError
+ tree.add(name)
+ check_added(output, null)
+
+ # after commit, now no longer listed
+ tree.commit(message='add "%s"' % (name))
+ check_added('')
+
+ def test_added_directory(self):
+ """Test --directory option"""
+ tree = self.make_branch_and_tree('a')
+ self.build_tree(['a/README'])
+ tree.add('README')
+ out, err = self.run_bzr(['added', '--directory=a'])
+ self.assertEquals('README\n', out)
diff --git a/bzrlib/tests/blackbox/test_alias.py b/bzrlib/tests/blackbox/test_alias.py
new file mode 100644
index 0000000..b795287
--- /dev/null
+++ b/bzrlib/tests/blackbox/test_alias.py
@@ -0,0 +1,95 @@
+# Copyright (C) 2008, 2009, 2010 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+#
+
+"""Tests of the 'bzr alias' command."""
+
+from bzrlib import (
+ config,
+ tests,
+ )
+from bzrlib.tests import (
+ features,
+ )
+
+
+class TestAlias(tests.TestCaseWithTransport):
+
+ def test_list_alias_with_none(self):
+ """Calling alias with no parameters lists existing aliases."""
+ out, err = self.run_bzr('alias')
+ self.assertEquals('', out)
+
+ def test_list_unknown_alias(self):
+ out, err = self.run_bzr('alias commit')
+ self.assertEquals('bzr alias: commit: not found\n', out)
+
+ def test_add_alias_outputs_nothing(self):
+ out, err = self.run_bzr('alias commit="commit --strict"')
+ self.assertEquals('', out)
+
+ def test_add_alias_visible(self):
+ """Adding an alias makes it ..."""
+ self.run_bzr('alias commit="commit --strict"')
+ out, err = self.run_bzr('alias commit')
+ self.assertEquals('bzr alias commit="commit --strict"\n', out)
+
+ def test_unicode_alias(self):
+ """Unicode aliases should work (Bug #529930)"""
+ # XXX: strictly speaking, lack of unicode filenames doesn't imply that
+ # unicode command lines aren't available.
+ self.requireFeature(features.UnicodeFilenameFeature)
+ file_name = u'foo\xb6'
+
+ tree = self.make_branch_and_tree('.')
+ self.build_tree([file_name])
+ tree.add(file_name)
+ tree.commit('added')
+
+ config.GlobalConfig.from_string(
+ u'[ALIASES]\nust=st %s\n' % (file_name,), save=True)
+
+ out, err = self.run_bzr('ust')
+ self.assertEquals(err, '')
+ self.assertEquals(out, '')
+
+ def test_alias_listing_alphabetical(self):
+ self.run_bzr('alias commit="commit --strict"')
+ self.run_bzr('alias ll="log --short"')
+ self.run_bzr('alias add="add -q"')
+
+ out, err = self.run_bzr('alias')
+ self.assertEquals(
+ 'bzr alias add="add -q"\n'
+ 'bzr alias commit="commit --strict"\n'
+ 'bzr alias ll="log --short"\n',
+ out)
+
+ def test_remove_unknown_alias(self):
+ out, err = self.run_bzr('alias --remove fooix', retcode=3)
+ self.assertEquals('bzr: ERROR: The alias "fooix" does not exist.\n',
+ err)
+
+ def test_remove_known_alias(self):
+ self.run_bzr('alias commit="commit --strict"')
+ out, err = self.run_bzr('alias commit')
+ self.assertEquals('bzr alias commit="commit --strict"\n', out)
+ # No output when removing an existing alias.
+ out, err = self.run_bzr('alias --remove commit')
+ self.assertEquals('', out)
+ # Now its not.
+ out, err = self.run_bzr('alias commit')
+ self.assertEquals("bzr alias: commit: not found\n", out)
diff --git a/bzrlib/tests/blackbox/test_aliases.py b/bzrlib/tests/blackbox/test_aliases.py
new file mode 100644
index 0000000..b31c11d
--- /dev/null
+++ b/bzrlib/tests/blackbox/test_aliases.py
@@ -0,0 +1,73 @@
+# Copyright (C) 2006, 2007, 2009, 2010 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+"""Black-box tests for bzr aliases.
+"""
+
+import os
+
+from bzrlib import config
+from bzrlib.branch import Branch
+from bzrlib.tests import TestCaseWithTransport
+from bzrlib.trace import mutter
+
+
+class TestAliases(TestCaseWithTransport):
+
+ def test_aliases(self):
+
+ def bzr(args, **kwargs):
+ return self.run_bzr(args, **kwargs)[0]
+
+ def bzr_catch_error(args, **kwargs):
+ return self.run_bzr(args, **kwargs)[1]
+
+
+ conf = config.GlobalConfig.from_string('''[ALIASES]
+c=cat
+c1=cat -r 1
+c2=cat -r 1 -r2
+''', save=True)
+
+ str1 = 'foo\n'
+ str2 = 'bar\n'
+
+ tree = self.make_branch_and_tree('.')
+ self.build_tree_contents([('a', str1)])
+ tree.add('a')
+ tree.commit(message='1')
+
+ self.assertEquals(bzr('c a'), str1)
+
+ self.build_tree_contents([('a', str2)])
+ tree.commit(message='2')
+
+ self.assertEquals(bzr('c a'), str2)
+ self.assertEquals(bzr('c1 a'), str1)
+ self.assertEquals(bzr('c1 --revision 2 a'), str2)
+
+ # If --no-aliases isn't working, we will not get retcode=3
+ bzr('--no-aliases c a', retcode=3)
+
+ # If --no-aliases breaks all of bzr, we also get retcode=3
+ # So we need to catch the output as well
+ self.assertEquals(bzr_catch_error('--no-aliases c a',
+ retcode=None),
+ 'bzr: ERROR: unknown command "c"\n')
+
+ bzr('c -r1 -r2', retcode=3)
+ bzr('c1 -r1 -r2', retcode=3)
+ bzr('c2', retcode=3)
+ bzr('c2 -r1', retcode=3)
diff --git a/bzrlib/tests/blackbox/test_ancestry.py b/bzrlib/tests/blackbox/test_ancestry.py
new file mode 100644
index 0000000..c55e7cf
--- /dev/null
+++ b/bzrlib/tests/blackbox/test_ancestry.py
@@ -0,0 +1,111 @@
+# Copyright (C) 2005, 2006 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+import os
+
+from bzrlib.tests import TestCaseWithTransport
+
+class TestAncestry(TestCaseWithTransport):
+
+ def _build_branches(self):
+ a_wt = self.make_branch_and_tree('A')
+ self.build_tree_contents([('A/foo', '1111\n')])
+ a_wt.add('foo')
+ a_wt.commit('added foo',rev_id='A1')
+
+ b_wt = a_wt.bzrdir.sprout('B').open_workingtree()
+ self.build_tree_contents([('B/foo', '1111\n22\n')])
+ b_wt.commit('modified B/foo',rev_id='B1')
+
+ self.build_tree_contents([('A/foo', '000\n1111\n')])
+ a_wt.commit('modified A/foo',rev_id='A2')
+
+ a_wt.merge_from_branch(b_wt.branch, b_wt.last_revision(),
+ b_wt.branch.get_rev_id(1))
+ a_wt.commit('merged B into A',rev_id='A3')
+ return a_wt, b_wt
+
+ def _check_ancestry(self, location='', result=None):
+ out = self.run_bzr(['ancestry', location])[0]
+ if result is not None:
+ self.assertEqualDiff(result, out)
+ else:
+ # A2 and B1 can be in either order, because they are parallel, and
+ # thus their topological order is not defined
+ result = "A1\nB1\nA2\nA3\n"
+ if result != out:
+ result = "A1\nA2\nB1\nA3\n"
+ self.assertEqualDiff(result, out)
+
+ def test_ancestry(self):
+ """Tests 'ancestry' command"""
+ self._build_branches()
+ os.chdir('A')
+ self._check_ancestry()
+
+ def test_ancestry_with_location(self):
+ """Tests 'ancestry' command with a specified location."""
+ self._build_branches()
+ self._check_ancestry('A')
+
+ def test_ancestry_with_repo_branch(self):
+ """Tests 'ancestry' command with a location that is a
+ repository branch."""
+ a_tree = self._build_branches()[0]
+
+ self.make_repository('repo', shared=True)
+
+ a_tree.bzrdir.sprout('repo/A')
+ self._check_ancestry('repo/A')
+
+ def test_ancestry_with_checkout(self):
+ """Tests 'ancestry' command with a location that is a
+ checkout of a repository branch."""
+ a_tree = self._build_branches()[0]
+ self.make_repository('repo', shared=True)
+ repo_branch = a_tree.bzrdir.sprout('repo/A').open_branch()
+ repo_branch.create_checkout('A-checkout')
+ self._check_ancestry('A-checkout')
+
+ def test_ancestry_with_lightweight_checkout(self):
+ """Tests 'ancestry' command with a location that is a
+ lightweight checkout of a repository branch."""
+ a_tree = self._build_branches()[0]
+ self.make_repository('repo', shared=True)
+ repo_branch = a_tree.bzrdir.sprout('repo/A').open_branch()
+ repo_branch.create_checkout('A-checkout', lightweight=True)
+ self._check_ancestry('A-checkout')
+
+ def test_ancestry_with_truncated_checkout(self):
+ """Tests 'ancestry' command with a location that is a
+ checkout of a repository branch with a shortened revision history."""
+ a_tree = self._build_branches()[0]
+ self.make_repository('repo', shared=True)
+ repo_branch = a_tree.bzrdir.sprout('repo/A').open_branch()
+ repo_branch.create_checkout('A-checkout',
+ revision_id=repo_branch.get_rev_id(2))
+ self._check_ancestry('A-checkout', "A1\nA2\n")
+
+ def test_ancestry_with_truncated_lightweight_checkout(self):
+ """Tests 'ancestry' command with a location that is a lightweight
+ checkout of a repository branch with a shortened revision history."""
+ a_tree = self._build_branches()[0]
+ self.make_repository('repo', shared=True)
+ repo_branch = a_tree.bzrdir.sprout('repo/A').open_branch()
+ repo_branch.create_checkout('A-checkout',
+ revision_id=repo_branch.get_rev_id(2),
+ lightweight=True)
+ self._check_ancestry('A-checkout', "A1\nA2\n")
diff --git a/bzrlib/tests/blackbox/test_annotate.py b/bzrlib/tests/blackbox/test_annotate.py
new file mode 100644
index 0000000..badfe88
--- /dev/null
+++ b/bzrlib/tests/blackbox/test_annotate.py
@@ -0,0 +1,333 @@
+# Copyright (C) 2005-2010 Canonical Ltd
+# -*- coding: utf-8 -*-
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+
+"""Black-box tests for bzr.
+
+These check that it behaves properly when it's invoked through the regular
+command-line interface. This doesn't actually run a new interpreter but
+rather starts again from the run_bzr function.
+"""
+
+
+from bzrlib import (
+ config,
+ tests,
+ )
+
+from bzrlib.tests.matchers import ContainsNoVfsCalls
+from bzrlib.urlutils import joinpath
+
+
+class TestAnnotate(tests.TestCaseWithTransport):
+
+ def setUp(self):
+ super(TestAnnotate, self).setUp()
+ wt = self.make_branch_and_tree('.')
+ b = wt.branch
+ self.build_tree_contents([('hello.txt', 'my helicopter\n'),
+ ('nomail.txt', 'nomail\n')])
+ wt.add(['hello.txt'])
+ self.revision_id_1 = wt.commit('add hello',
+ committer='test@user',
+ timestamp=1165960000.00, timezone=0)
+ wt.add(['nomail.txt'])
+ self.revision_id_2 = wt.commit('add nomail',
+ committer='no mail',
+ timestamp=1165970000.00, timezone=0)
+ self.build_tree_contents([('hello.txt', 'my helicopter\n'
+ 'your helicopter\n')])
+ self.revision_id_3 = wt.commit('mod hello',
+ committer='user@test',
+ timestamp=1166040000.00, timezone=0)
+ self.build_tree_contents([('hello.txt', 'my helicopter\n'
+ 'your helicopter\n'
+ 'all of\n'
+ 'our helicopters\n'
+ )])
+ self.revision_id_4 = wt.commit('mod hello',
+ committer='user@test',
+ timestamp=1166050000.00, timezone=0)
+
+ def test_help_annotate(self):
+ """Annotate command exists"""
+ out, err = self.run_bzr('--no-plugins annotate --help')
+
+ def test_annotate_cmd(self):
+ out, err = self.run_bzr('annotate hello.txt')
+ self.assertEqual('', err)
+ self.assertEqualDiff('''\
+1 test@us | my helicopter
+3 user@te | your helicopter
+4 user@te | all of
+ | our helicopters
+''', out)
+
+ def test_annotate_cmd_full(self):
+ out, err = self.run_bzr('annotate hello.txt --all')
+ self.assertEqual('', err)
+ self.assertEqualDiff('''\
+1 test@us | my helicopter
+3 user@te | your helicopter
+4 user@te | all of
+4 user@te | our helicopters
+''', out)
+
+ def test_annotate_cmd_long(self):
+ out, err = self.run_bzr('annotate hello.txt --long')
+ self.assertEqual('', err)
+ self.assertEqualDiff('''\
+1 test@user 20061212 | my helicopter
+3 user@test 20061213 | your helicopter
+4 user@test 20061213 | all of
+ | our helicopters
+''', out)
+
+ def test_annotate_cmd_show_ids(self):
+ out, err = self.run_bzr('annotate hello.txt --show-ids')
+ max_len = max([len(self.revision_id_1),
+ len(self.revision_id_3),
+ len(self.revision_id_4)])
+ self.assertEqual('', err)
+ self.assertEqualDiff('''\
+%*s | my helicopter
+%*s | your helicopter
+%*s | all of
+%*s | our helicopters
+''' % (max_len, self.revision_id_1,
+ max_len, self.revision_id_3,
+ max_len, self.revision_id_4,
+ max_len, '',
+ )
+, out)
+
+ def test_no_mail(self):
+ out, err = self.run_bzr('annotate nomail.txt')
+ self.assertEqual('', err)
+ self.assertEqualDiff('''\
+2 no mail | nomail
+''', out)
+
+ def test_annotate_cmd_revision(self):
+ out, err = self.run_bzr('annotate hello.txt -r1')
+ self.assertEqual('', err)
+ self.assertEqualDiff('''\
+1 test@us | my helicopter
+''', out)
+
+ def test_annotate_cmd_revision3(self):
+ out, err = self.run_bzr('annotate hello.txt -r3')
+ self.assertEqual('', err)
+ self.assertEqualDiff('''\
+1 test@us | my helicopter
+3 user@te | your helicopter
+''', out)
+
+ def test_annotate_cmd_unknown_revision(self):
+ out, err = self.run_bzr('annotate hello.txt -r 10',
+ retcode=3)
+ self.assertEqual('', out)
+ self.assertContainsRe(err, "Requested revision: '10' does not exist")
+
+ def test_annotate_cmd_two_revisions(self):
+ out, err = self.run_bzr('annotate hello.txt -r1..2',
+ retcode=3)
+ self.assertEqual('', out)
+ self.assertEqual('bzr: ERROR: bzr annotate --revision takes'
+ ' exactly one revision identifier\n',
+ err)
+
+
+class TestSimpleAnnotate(tests.TestCaseWithTransport):
+ """Annotate tests with no complex setup."""
+
+ def _setup_edited_file(self, relpath='.'):
+ """Create a tree with a locally edited file."""
+ tree = self.make_branch_and_tree(relpath)
+ file_relpath = joinpath(relpath, 'file')
+ self.build_tree_contents([(file_relpath, 'foo\ngam\n')])
+ tree.add('file')
+ tree.commit('add file', committer="test@host", rev_id="rev1")
+ self.build_tree_contents([(file_relpath, 'foo\nbar\ngam\n')])
+ return tree
+
+ def test_annotate_cmd_revspec_branch(self):
+ tree = self._setup_edited_file('trunk')
+ tree.branch.create_checkout(self.get_url('work'), lightweight=True)
+ out, err = self.run_bzr(['annotate', 'file', '-r', 'branch:../trunk'],
+ working_dir='work')
+ self.assertEqual('', err)
+ self.assertEqual(
+ '1 test@ho | foo\n'
+ ' | gam\n',
+ out)
+
+ def test_annotate_edited_file(self):
+ tree = self._setup_edited_file()
+ self.overrideEnv('BZR_EMAIL', 'current@host2')
+ out, err = self.run_bzr('annotate file')
+ self.assertEqual(
+ '1 test@ho | foo\n'
+ '2? current | bar\n'
+ '1 test@ho | gam\n',
+ out)
+
+ def test_annotate_edited_file_no_default(self):
+ # Ensure that when no username is available annotate still works.
+ self.overrideEnv('EMAIL', None)
+ self.overrideEnv('BZR_EMAIL', None)
+ # Also, make sure that it's not inferred from mailname.
+ self.overrideAttr(config, '_auto_user_id',
+ lambda: (None, None))
+ tree = self._setup_edited_file()
+ out, err = self.run_bzr('annotate file')
+ self.assertEqual(
+ '1 test@ho | foo\n'
+ '2? local u | bar\n'
+ '1 test@ho | gam\n',
+ out)
+
+ def test_annotate_edited_file_show_ids(self):
+ tree = self._setup_edited_file()
+ self.overrideEnv('BZR_EMAIL', 'current@host2')
+ out, err = self.run_bzr('annotate file --show-ids')
+ self.assertEqual(
+ ' rev1 | foo\n'
+ 'current: | bar\n'
+ ' rev1 | gam\n',
+ out)
+
+ def _create_merged_file(self):
+ """Create a file with a pending merge and local edit."""
+ tree = self.make_branch_and_tree('.')
+ self.build_tree_contents([('file', 'foo\ngam\n')])
+ tree.add('file')
+ tree.commit('add file', rev_id="rev1", committer="test@host")
+ # right side
+ self.build_tree_contents([('file', 'foo\nbar\ngam\n')])
+ tree.commit("right", rev_id="rev1.1.1", committer="test@host")
+ tree.pull(tree.branch, True, "rev1")
+ # left side
+ self.build_tree_contents([('file', 'foo\nbaz\ngam\n')])
+ tree.commit("left", rev_id="rev2", committer="test@host")
+ # merge
+ tree.merge_from_branch(tree.branch, "rev1.1.1")
+ # edit the file to be 'resolved' and have a further local edit
+ self.build_tree_contents([('file', 'local\nfoo\nbar\nbaz\ngam\n')])
+ return tree
+
+ def test_annotated_edited_merged_file_revnos(self):
+ wt = self._create_merged_file()
+ out, err = self.run_bzr(['annotate', 'file'])
+ email = config.extract_email_address(
+ wt.branch.get_config_stack().get('email'))
+ self.assertEqual(
+ '3? %-7s | local\n'
+ '1 test@ho | foo\n'
+ '1.1.1 test@ho | bar\n'
+ '2 test@ho | baz\n'
+ '1 test@ho | gam\n' % email[:7],
+ out)
+
+ def test_annotated_edited_merged_file_ids(self):
+ self._create_merged_file()
+ out, err = self.run_bzr(['annotate', 'file', '--show-ids'])
+ self.assertEqual(
+ 'current: | local\n'
+ ' rev1 | foo\n'
+ 'rev1.1.1 | bar\n'
+ ' rev2 | baz\n'
+ ' rev1 | gam\n',
+ out)
+
+ def test_annotate_empty_file(self):
+ tree = self.make_branch_and_tree('.')
+ self.build_tree_contents([('empty', '')])
+ tree.add('empty')
+ tree.commit('add empty file')
+ out, err = self.run_bzr(['annotate', 'empty'])
+ self.assertEqual('', out)
+
+ def test_annotate_removed_file(self):
+ tree = self.make_branch_and_tree('.')
+ self.build_tree_contents([('empty', '')])
+ tree.add('empty')
+ tree.commit('add empty file')
+ # delete the file.
+ tree.remove('empty')
+ tree.commit('remove empty file')
+ out, err = self.run_bzr(['annotate', '-r1', 'empty'])
+ self.assertEqual('', out)
+
+ def test_annotate_empty_file_show_ids(self):
+ tree = self.make_branch_and_tree('.')
+ self.build_tree_contents([('empty', '')])
+ tree.add('empty')
+ tree.commit('add empty file')
+ out, err = self.run_bzr(['annotate', '--show-ids', 'empty'])
+ self.assertEqual('', out)
+
+ def test_annotate_nonexistant_file(self):
+ tree = self.make_branch_and_tree('.')
+ self.build_tree(['file'])
+ tree.add(['file'])
+ tree.commit('add a file')
+ out, err = self.run_bzr(['annotate', 'doesnotexist'], retcode=3)
+ self.assertEqual('', out)
+ self.assertEqual("bzr: ERROR: doesnotexist is not versioned.\n", err)
+
+ def test_annotate_without_workingtree(self):
+ tree = self.make_branch_and_tree('.')
+ self.build_tree_contents([('empty', '')])
+ tree.add('empty')
+ tree.commit('add empty file')
+ bzrdir = tree.branch.bzrdir
+ bzrdir.destroy_workingtree()
+ self.assertFalse(bzrdir.has_workingtree())
+ out, err = self.run_bzr(['annotate', 'empty'])
+ self.assertEqual('', out)
+
+ def test_annotate_directory(self):
+ """Test --directory option"""
+ wt = self.make_branch_and_tree('a')
+ self.build_tree_contents([('a/hello.txt', 'my helicopter\n')])
+ wt.add(['hello.txt'])
+ wt.commit('commit', committer='test@user')
+ out, err = self.run_bzr(['annotate', '-d', 'a', 'hello.txt'])
+ self.assertEqualDiff('1 test@us | my helicopter\n', out)
+
+
+class TestSmartServerAnnotate(tests.TestCaseWithTransport):
+
+ def test_simple_annotate(self):
+ self.setup_smart_server_with_call_log()
+ wt = self.make_branch_and_tree('branch')
+ self.build_tree_contents([('branch/hello.txt', 'my helicopter\n')])
+ wt.add(['hello.txt'])
+ wt.commit('commit', committer='test@user')
+ self.reset_smart_call_log()
+ out, err = self.run_bzr(['annotate', "-d", self.get_url('branch'),
+ "hello.txt"])
+ # This figure represent the amount of work to perform this use case. It
+ # is entirely ok to reduce this number if a test fails due to rpc_count
+ # being too low. If rpc_count increases, more network roundtrips have
+ # become necessary for this use case. Please do not adjust this number
+ # upwards without agreement from bzr's network support maintainers.
+ self.assertLength(16, self.hpss_calls)
+ self.assertLength(1, self.hpss_connections)
+ self.expectFailure("annotate accesses inventories, which require VFS access",
+ self.assertThat, self.hpss_calls, ContainsNoVfsCalls)
diff --git a/bzrlib/tests/blackbox/test_bound_branches.py b/bzrlib/tests/blackbox/test_bound_branches.py
new file mode 100644
index 0000000..1395101
--- /dev/null
+++ b/bzrlib/tests/blackbox/test_bound_branches.py
@@ -0,0 +1,386 @@
+# Copyright (C) 2005-2012 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+
+"""Tests of bound branches (binding, unbinding, commit, etc) command."""
+
+from bzrlib import (
+ branch,
+ controldir,
+ errors,
+ tests,
+ )
+from bzrlib.tests import script
+
+
+class TestBoundBranches(tests.TestCaseWithTransport):
+
+ def create_branches(self):
+ base_tree = self.make_branch_and_tree('base')
+ base_tree.lock_write()
+ self.build_tree(['base/a', 'base/b'])
+ base_tree.add(['a', 'b'])
+ base_tree.commit('init')
+ base_tree.unlock()
+
+ child_tree = base_tree.branch.create_checkout('child')
+
+ self.check_revno(1, 'child')
+ d = controldir.ControlDir.open('child')
+ self.assertNotEqual(None, d.open_branch().get_master_branch())
+
+ return base_tree, child_tree
+
+ def check_revno(self, val, loc='.'):
+ self.assertEqual(
+ val, controldir.ControlDir.open(loc).open_branch().last_revision_info()[0])
+
+ def test_simple_binding(self):
+ tree = self.make_branch_and_tree('base')
+ self.build_tree(['base/a', 'base/b'])
+ tree.add('a', 'b')
+ tree.commit(message='init')
+
+ tree.bzrdir.sprout('child')
+
+ self.run_bzr('bind ../base', working_dir='child')
+
+ d = controldir.ControlDir.open('child')
+ self.assertNotEqual(None, d.open_branch().get_master_branch())
+
+ self.run_bzr('unbind', working_dir='child')
+ self.assertEqual(None, d.open_branch().get_master_branch())
+
+ self.run_bzr('unbind', retcode=3, working_dir='child')
+
+ def test_bind_branch6(self):
+ branch1 = self.make_branch('branch1', format='dirstate-tags')
+ error = self.run_bzr('bind', retcode=3, working_dir='branch1')[1]
+ self.assertEndsWith(
+ error, 'No location supplied and no previous location known\n')
+
+ def setup_rebind(self, format):
+ branch1 = self.make_branch('branch1')
+ branch2 = self.make_branch('branch2', format=format)
+ branch2.bind(branch1)
+ branch2.unbind()
+
+ def test_rebind_branch6(self):
+ self.setup_rebind('dirstate-tags')
+ self.run_bzr('bind', working_dir='branch2')
+ b = branch.Branch.open('branch2')
+ self.assertEndsWith(b.get_bound_location(), '/branch1/')
+
+ def test_rebind_branch5(self):
+ self.setup_rebind('knit')
+ error = self.run_bzr('bind', retcode=3, working_dir='branch2')[1]
+ self.assertEndsWith(
+ error, 'No location supplied. This format does not remember'
+ ' old locations.\n')
+
+ def test_bound_commit(self):
+ child_tree = self.create_branches()[1]
+
+ self.build_tree_contents([('child/a', 'new contents')])
+ child_tree.commit(message='child')
+
+ self.check_revno(2, 'child')
+
+ # Make sure it committed on the parent
+ self.check_revno(2, 'base')
+
+ def test_bound_fail(self):
+ # Make sure commit fails if out of date.
+ base_tree, child_tree = self.create_branches()
+
+ self.build_tree_contents([
+ ('base/a', 'new base contents\n' ),
+ ('child/b', 'new b child contents\n')])
+ base_tree.commit(message='base')
+ self.check_revno(2, 'base')
+
+ self.check_revno(1, 'child')
+ self.assertRaises(errors.BoundBranchOutOfDate, child_tree.commit,
+ message='child')
+ self.check_revno(1, 'child')
+
+ child_tree.update()
+ self.check_revno(2, 'child')
+
+ child_tree.commit(message='child')
+ self.check_revno(3, 'child')
+ self.check_revno(3, 'base')
+
+ def test_double_binding(self):
+ child_tree = self.create_branches()[1]
+ child_tree.bzrdir.sprout('child2')
+
+ # Double binding succeeds, but committing to child2 should fail
+ self.run_bzr('bind ../child', working_dir='child2')
+
+ # Refresh the child tree object as 'unbind' modified it
+ child2_tree = controldir.ControlDir.open('child2').open_workingtree()
+ self.assertRaises(errors.CommitToDoubleBoundBranch,
+ child2_tree.commit, message='child2', allow_pointless=True)
+
+ def test_unbinding(self):
+ base_tree, child_tree = self.create_branches()
+
+ self.build_tree_contents([
+ ('base/a', 'new base contents\n' ),
+ ('child/b', 'new b child contents\n')])
+
+ base_tree.commit(message='base')
+ self.check_revno(2, 'base')
+
+ self.check_revno(1, 'child')
+ self.run_bzr("commit -m child", retcode=3, working_dir='child')
+ self.check_revno(1, 'child')
+ self.run_bzr('unbind', working_dir='child')
+ # Refresh the child tree/branch objects as 'unbind' modified them
+ child_tree = child_tree.bzrdir.open_workingtree()
+ child_tree.commit(message='child')
+ self.check_revno(2, 'child')
+
+ def test_commit_remote_bound(self):
+ # It is not possible to commit to a branch
+ # which is bound to a branch which is bound
+ base_tree, child_tree = self.create_branches()
+ base_tree.bzrdir.sprout('newbase')
+
+ # There is no way to know that B has already
+ # been bound by someone else, otherwise it
+ # might be nice if this would fail
+ self.run_bzr('bind ../newbase', working_dir='base')
+
+ self.run_bzr('commit -m failure --unchanged', retcode=3,
+ working_dir='child')
+
+ def test_pull_updates_both(self):
+ base_tree = self.create_branches()[0]
+ newchild_tree = base_tree.bzrdir.sprout('newchild').open_workingtree()
+ self.build_tree_contents([('newchild/b', 'newchild b contents\n')])
+ newchild_tree.commit(message='newchild')
+ self.check_revno(2, 'newchild')
+
+ # The pull should succeed, and update
+ # the bound parent branch
+ self.run_bzr('pull ../newchild', working_dir='child')
+ self.check_revno(2, 'child')
+ self.check_revno(2, 'base')
+
+ def test_pull_local_updates_local(self):
+ base_tree = self.create_branches()[0]
+ newchild_tree = base_tree.bzrdir.sprout('newchild').open_workingtree()
+ self.build_tree_contents([('newchild/b', 'newchild b contents\n')])
+ newchild_tree.commit(message='newchild')
+ self.check_revno(2, 'newchild')
+
+ # The pull should succeed, and update
+ # the bound parent branch
+ self.run_bzr('pull ../newchild --local', working_dir='child')
+ self.check_revno(2, 'child')
+ self.check_revno(1, 'base')
+
+ def test_bind_diverged(self):
+ base_tree, child_tree = self.create_branches()
+ base_branch = base_tree.branch
+ child_branch = child_tree.branch
+
+ self.run_bzr('unbind', working_dir='child')
+
+ # Refresh the child tree/branch objects as 'unbind' modified them
+ child_tree = child_tree.bzrdir.open_workingtree()
+ child_tree.commit(message='child', allow_pointless=True)
+ self.check_revno(2, 'child')
+
+ self.check_revno(1, 'base')
+ base_tree.commit(message='base', allow_pointless=True)
+ self.check_revno(2, 'base')
+
+ # These branches have diverged, but bind should succeed anyway
+ self.run_bzr('bind ../base', working_dir='child')
+
+ # Refresh the child tree/branch objects as 'bind' modified them
+ child_tree = child_tree.bzrdir.open_workingtree()
+ # This should turn the local commit into a merge
+ child_tree.update()
+ child_tree.commit(message='merged')
+ self.check_revno(3, 'child')
+ self.assertEquals(child_tree.branch.last_revision(),
+ base_tree.branch.last_revision())
+
+ def test_bind_parent_ahead(self):
+ base_tree = self.create_branches()[0]
+
+ self.run_bzr('unbind', working_dir='child')
+
+ base_tree.commit(message='base', allow_pointless=True)
+
+ self.check_revno(1, 'child')
+ self.run_bzr('bind ../base', working_dir='child')
+
+ # binding does not pull data:
+ self.check_revno(1, 'child')
+ self.run_bzr('unbind', working_dir='child')
+
+ # Check and make sure it also works if parent is ahead multiple
+ base_tree.commit(message='base 3', allow_pointless=True)
+ base_tree.commit(message='base 4', allow_pointless=True)
+ base_tree.commit(message='base 5', allow_pointless=True)
+ self.check_revno(5, 'base')
+
+ self.check_revno(1, 'child')
+ self.run_bzr('bind ../base', working_dir='child')
+ self.check_revno(1, 'child')
+
+ def test_bind_child_ahead(self):
+ # test binding when the master branches history is a prefix of the
+ # childs - it should bind ok but the revision histories should not
+ # be altered
+ child_tree = self.create_branches()[1]
+
+ self.run_bzr('unbind', working_dir='child')
+ # Refresh the child tree/branch objects as 'bind' modified them
+ child_tree = child_tree.bzrdir.open_workingtree()
+ child_tree.commit(message='child', allow_pointless=True)
+ self.check_revno(2, 'child')
+ self.check_revno(1, 'base')
+
+ self.run_bzr('bind ../base', working_dir='child')
+ self.check_revno(1, 'base')
+
+ # Check and make sure it also works if child is ahead multiple
+ self.run_bzr('unbind', working_dir='child')
+ child_tree.commit(message='child 3', allow_pointless=True)
+ child_tree.commit(message='child 4', allow_pointless=True)
+ child_tree.commit(message='child 5', allow_pointless=True)
+ self.check_revno(5, 'child')
+
+ self.check_revno(1, 'base')
+ self.run_bzr('bind ../base', working_dir='child')
+ self.check_revno(1, 'base')
+
+ def test_bind_fail_if_missing(self):
+ """We should not be able to bind to a missing branch."""
+ tree = self.make_branch_and_tree('tree_1')
+ tree.commit('dummy commit')
+ self.run_bzr_error(['Not a branch.*no-such-branch/'],
+ ['bind', '../no-such-branch'],
+ working_dir='tree_1')
+ self.assertIs(None, tree.branch.get_bound_location())
+
+ def test_commit_after_merge(self):
+ base_tree, child_tree = self.create_branches()
+
+ # We want merge to be able to be a local only
+ # operation, because it can be without violating
+ # the binding invariants.
+ # But we can't fail afterwards
+ other_tree = child_tree.bzrdir.sprout('other').open_workingtree()
+ other_branch = other_tree.branch
+
+ self.build_tree_contents([('other/c', 'file c\n')])
+ other_tree.add('c')
+ other_tree.commit(message='adding c')
+ new_rev_id = other_branch.last_revision()
+
+ child_tree.merge_from_branch(other_branch)
+
+ self.assertPathExists('child/c')
+ self.assertEqual([new_rev_id], child_tree.get_parent_ids()[1:])
+
+ # Make sure the local branch has the installed revision
+ self.assertTrue(child_tree.branch.repository.has_revision(new_rev_id))
+
+ # And make sure that the base tree does not
+ self.assertFalse(base_tree.branch.repository.has_revision(new_rev_id))
+
+ # Commit should succeed, and cause merged revisions to
+ # be pulled into base
+ self.run_bzr(['commit', '-m', 'merge other'], working_dir='child')
+ self.check_revno(2, 'child')
+ self.check_revno(2, 'base')
+ self.assertTrue(base_tree.branch.repository.has_revision(new_rev_id))
+
+ def test_pull_overwrite(self):
+ # XXX: This test should be moved to branch-implemenations/test_pull
+ child_tree = self.create_branches()[1]
+
+ other_tree = child_tree.bzrdir.sprout('other').open_workingtree()
+
+ self.build_tree_contents([('other/a', 'new contents\n')])
+ other_tree.commit(message='changed a')
+ self.check_revno(2, 'other')
+ self.build_tree_contents([
+ ('other/a', 'new contents\nand then some\n')])
+ other_tree.commit(message='another a')
+ self.check_revno(3, 'other')
+ self.build_tree_contents([
+ ('other/a', 'new contents\nand then some\nand some more\n')])
+ other_tree.commit('yet another a')
+ self.check_revno(4, 'other')
+
+ self.build_tree_contents([('child/a', 'also changed a\n')])
+ child_tree.commit(message='child modified a')
+
+ self.check_revno(2, 'child')
+ self.check_revno(2, 'base')
+
+ self.run_bzr('pull --overwrite ../other', working_dir='child')
+
+ # both the local and master should have been updated.
+ self.check_revno(4, 'child')
+ self.check_revno(4, 'base')
+
+ def test_bind_directory(self):
+ """Test --directory option"""
+ tree = self.make_branch_and_tree('base')
+ self.build_tree(['base/a', 'base/b'])
+ tree.add('a', 'b')
+ tree.commit(message='init')
+ branch = tree.branch
+ tree.bzrdir.sprout('child')
+ self.run_bzr('bind --directory=child base')
+ d = controldir.ControlDir.open('child')
+ self.assertNotEqual(None, d.open_branch().get_master_branch())
+ self.run_bzr('unbind -d child')
+ self.assertEqual(None, d.open_branch().get_master_branch())
+ self.run_bzr('unbind --directory child', retcode=3)
+
+
+class TestBind(script.TestCaseWithTransportAndScript):
+
+ def test_bind_when_bound(self):
+ self.run_script("""
+$ bzr init trunk
+...
+$ bzr init copy
+...
+$ cd copy
+$ bzr bind ../trunk
+$ bzr bind
+2>bzr: ERROR: Branch is already bound
+""")
+
+ def test_bind_before_bound(self):
+ self.run_script("""
+$ bzr init trunk
+...
+$ cd trunk
+$ bzr bind
+2>bzr: ERROR: No location supplied and no previous location known
+""")
diff --git a/bzrlib/tests/blackbox/test_branch.py b/bzrlib/tests/blackbox/test_branch.py
new file mode 100644
index 0000000..8a20f7b
--- /dev/null
+++ b/bzrlib/tests/blackbox/test_branch.py
@@ -0,0 +1,657 @@
+# Copyright (C) 2006-2012 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+
+"""Black-box tests for bzr branch."""
+
+import os
+
+from bzrlib import (
+ branch,
+ bzrdir,
+ controldir,
+ errors,
+ revision as _mod_revision,
+ tests,
+ )
+from bzrlib.repofmt.knitrepo import RepositoryFormatKnit1
+from bzrlib.tests import (
+ fixtures,
+ test_server,
+ )
+from bzrlib.tests.features import (
+ HardlinkFeature,
+ )
+from bzrlib.tests.blackbox import test_switch
+from bzrlib.tests.matchers import ContainsNoVfsCalls
+from bzrlib.tests.test_sftp_transport import TestCaseWithSFTPServer
+from bzrlib.tests.script import run_script
+from bzrlib.urlutils import local_path_to_url, strip_trailing_slash
+from bzrlib.workingtree import WorkingTree
+
+
+class TestBranch(tests.TestCaseWithTransport):
+
+ def example_branch(self, path='.', format=None):
+ tree = self.make_branch_and_tree(path, format=format)
+ self.build_tree_contents([(path + '/hello', 'foo')])
+ tree.add('hello')
+ tree.commit(message='setup')
+ self.build_tree_contents([(path + '/goodbye', 'baz')])
+ tree.add('goodbye')
+ tree.commit(message='setup')
+ return tree
+
+ def test_branch(self):
+ """Branch from one branch to another."""
+ self.example_branch('a')
+ self.run_bzr('branch a b')
+ b = branch.Branch.open('b')
+ self.run_bzr('branch a c -r 1')
+ # previously was erroneously created by branching
+ self.assertFalse(b._transport.has('branch-name'))
+ b.bzrdir.open_workingtree().commit(message='foo', allow_pointless=True)
+
+ def test_branch_no_to_location(self):
+ """The to_location is derived from the source branch name."""
+ os.mkdir("something")
+ a = self.example_branch('something/a').branch
+ self.run_bzr('branch something/a')
+ b = branch.Branch.open('a')
+ self.assertEquals(b.last_revision_info(), a.last_revision_info())
+
+ def test_into_colocated(self):
+ """Branch from a branch into a colocated branch."""
+ self.example_branch('a')
+ out, err = self.run_bzr(
+ 'init --format=development-colo file:b,branch=orig')
+ self.assertEqual(
+ """Created a lightweight checkout (format: development-colo)\n""",
+ out)
+ self.assertEqual('', err)
+ out, err = self.run_bzr(
+ 'branch a file:b,branch=thiswasa')
+ self.assertEqual('', out)
+ self.assertEqual('Branched 2 revisions.\n', err)
+ out, err = self.run_bzr('branches b')
+ self.assertEqual(" orig\n thiswasa\n", out)
+ self.assertEqual('', err)
+ out,err = self.run_bzr('branch a file:b,branch=orig', retcode=3)
+ self.assertEqual('', out)
+ self.assertEqual(
+ 'bzr: ERROR: Already a branch: "file:b,branch=orig".\n', err)
+
+ def test_from_colocated(self):
+ """Branch from a colocated branch into a regular branch."""
+ tree = self.example_branch('a', format='development-colo')
+ tree.bzrdir.create_branch(name='somecolo')
+ out, err = self.run_bzr('branch %s,branch=somecolo' %
+ local_path_to_url('a'))
+ self.assertEqual('', out)
+ self.assertEqual('Branched 0 revisions.\n', err)
+ self.assertPathExists("somecolo")
+
+ def test_branch_broken_pack(self):
+ """branching with a corrupted pack file."""
+ self.example_branch('a')
+ # add some corruption
+ packs_dir = 'a/.bzr/repository/packs/'
+ fname = packs_dir + os.listdir(packs_dir)[0]
+ with open(fname, 'rb+') as f:
+ # Start from the end of the file to avoid choosing a place bigger
+ # than the file itself.
+ f.seek(-5, os.SEEK_END)
+ c = f.read(1)
+ f.seek(-5, os.SEEK_END)
+ # Make sure we inject a value different than the one we just read
+ if c == '\xFF':
+ corrupt = '\x00'
+ else:
+ corrupt = '\xFF'
+ f.write(corrupt) # make sure we corrupt something
+ self.run_bzr_error(['Corruption while decompressing repository file'],
+ 'branch a b', retcode=3)
+
+ def test_branch_switch_no_branch(self):
+ # No branch in the current directory:
+ # => new branch will be created, but switch fails
+ self.example_branch('a')
+ self.make_repository('current')
+ self.run_bzr_error(['No WorkingTree exists for'],
+ 'branch --switch ../a ../b', working_dir='current')
+ a = branch.Branch.open('a')
+ b = branch.Branch.open('b')
+ self.assertEqual(a.last_revision(), b.last_revision())
+
+ def test_branch_switch_no_wt(self):
+ # No working tree in the current directory:
+ # => new branch will be created, but switch fails and the current
+ # branch is unmodified
+ self.example_branch('a')
+ self.make_branch('current')
+ self.run_bzr_error(['No WorkingTree exists for'],
+ 'branch --switch ../a ../b', working_dir='current')
+ a = branch.Branch.open('a')
+ b = branch.Branch.open('b')
+ self.assertEqual(a.last_revision(), b.last_revision())
+ work = branch.Branch.open('current')
+ self.assertEqual(work.last_revision(), _mod_revision.NULL_REVISION)
+
+ def test_branch_switch_no_checkout(self):
+ # Standalone branch in the current directory:
+ # => new branch will be created, but switch fails and the current
+ # branch is unmodified
+ self.example_branch('a')
+ tree = self.make_branch_and_tree('current')
+ c1 = tree.commit('some diverged change')
+ self.run_bzr_error(['Cannot switch a branch, only a checkout'],
+ 'branch --switch ../a ../b', working_dir='current')
+ a = branch.Branch.open('a')
+ b = branch.Branch.open('b')
+ self.assertEqual(a.last_revision(), b.last_revision())
+ work = branch.Branch.open('current')
+ self.assertEqual(work.last_revision(), c1)
+
+ def test_branch_into_empty_dir(self):
+ t = self.example_branch('source')
+ self.make_bzrdir('target')
+ self.run_bzr("branch source target")
+ self.assertEquals(2, len(t.branch.repository.all_revision_ids()))
+
+ def test_branch_switch_checkout(self):
+ # Checkout in the current directory:
+ # => new branch will be created and checkout bound to the new branch
+ self.example_branch('a')
+ self.run_bzr('checkout a current')
+ out, err = self.run_bzr('branch --switch ../a ../b',
+ working_dir='current')
+ a = branch.Branch.open('a')
+ b = branch.Branch.open('b')
+ self.assertEqual(a.last_revision(), b.last_revision())
+ work = WorkingTree.open('current')
+ self.assertEndsWith(work.branch.get_bound_location(), '/b/')
+ self.assertContainsRe(err, "Switched to branch: .*/b/")
+
+ def test_branch_switch_lightweight_checkout(self):
+ # Lightweight checkout in the current directory:
+ # => new branch will be created and lightweight checkout pointed to
+ # the new branch
+ self.example_branch('a')
+ self.run_bzr('checkout --lightweight a current')
+ out, err = self.run_bzr('branch --switch ../a ../b',
+ working_dir='current')
+ a = branch.Branch.open('a')
+ b = branch.Branch.open('b')
+ self.assertEqual(a.last_revision(), b.last_revision())
+ work = WorkingTree.open('current')
+ self.assertEndsWith(work.branch.base, '/b/')
+ self.assertContainsRe(err, "Switched to branch: .*/b/")
+
+ def test_branch_only_copies_history(self):
+ # Knit branches should only push the history for the current revision.
+ format = bzrdir.BzrDirMetaFormat1()
+ format.repository_format = RepositoryFormatKnit1()
+ shared_repo = self.make_repository('repo', format=format, shared=True)
+ shared_repo.set_make_working_trees(True)
+
+ def make_shared_tree(path):
+ shared_repo.bzrdir.root_transport.mkdir(path)
+ controldir.ControlDir.create_branch_convenience('repo/' + path)
+ return WorkingTree.open('repo/' + path)
+ tree_a = make_shared_tree('a')
+ self.build_tree(['repo/a/file'])
+ tree_a.add('file')
+ tree_a.commit('commit a-1', rev_id='a-1')
+ f = open('repo/a/file', 'ab')
+ f.write('more stuff\n')
+ f.close()
+ tree_a.commit('commit a-2', rev_id='a-2')
+
+ tree_b = make_shared_tree('b')
+ self.build_tree(['repo/b/file'])
+ tree_b.add('file')
+ tree_b.commit('commit b-1', rev_id='b-1')
+
+ self.assertTrue(shared_repo.has_revision('a-1'))
+ self.assertTrue(shared_repo.has_revision('a-2'))
+ self.assertTrue(shared_repo.has_revision('b-1'))
+
+ # Now that we have a repository with shared files, make sure
+ # that things aren't copied out by a 'branch'
+ self.run_bzr('branch repo/b branch-b')
+ pushed_tree = WorkingTree.open('branch-b')
+ pushed_repo = pushed_tree.branch.repository
+ self.assertFalse(pushed_repo.has_revision('a-1'))
+ self.assertFalse(pushed_repo.has_revision('a-2'))
+ self.assertTrue(pushed_repo.has_revision('b-1'))
+
+ def test_branch_hardlink(self):
+ self.requireFeature(HardlinkFeature)
+ source = self.make_branch_and_tree('source')
+ self.build_tree(['source/file1'])
+ source.add('file1')
+ source.commit('added file')
+ out, err = self.run_bzr(['branch', 'source', 'target', '--hardlink'])
+ source_stat = os.stat('source/file1')
+ target_stat = os.stat('target/file1')
+ self.assertEqual(source_stat, target_stat)
+
+ def test_branch_files_from(self):
+ source = self.make_branch_and_tree('source')
+ self.build_tree(['source/file1'])
+ source.add('file1')
+ source.commit('added file')
+ out, err = self.run_bzr('branch source target --files-from source')
+ self.assertPathExists('target/file1')
+
+ def test_branch_files_from_hardlink(self):
+ self.requireFeature(HardlinkFeature)
+ source = self.make_branch_and_tree('source')
+ self.build_tree(['source/file1'])
+ source.add('file1')
+ source.commit('added file')
+ source.bzrdir.sprout('second')
+ out, err = self.run_bzr('branch source target --files-from second'
+ ' --hardlink')
+ source_stat = os.stat('source/file1')
+ second_stat = os.stat('second/file1')
+ target_stat = os.stat('target/file1')
+ self.assertNotEqual(source_stat, target_stat)
+ self.assertEqual(second_stat, target_stat)
+
+ def test_branch_standalone(self):
+ shared_repo = self.make_repository('repo', shared=True)
+ self.example_branch('source')
+ self.run_bzr('branch --standalone source repo/target')
+ b = branch.Branch.open('repo/target')
+ expected_repo_path = os.path.abspath('repo/target/.bzr/repository')
+ self.assertEqual(strip_trailing_slash(b.repository.base),
+ strip_trailing_slash(local_path_to_url(expected_repo_path)))
+
+ def test_branch_no_tree(self):
+ self.example_branch('source')
+ self.run_bzr('branch --no-tree source target')
+ self.assertPathDoesNotExist('target/hello')
+ self.assertPathDoesNotExist('target/goodbye')
+
+ def test_branch_into_existing_dir(self):
+ self.example_branch('a')
+ # existing dir with similar files but no .bzr dir
+ self.build_tree_contents([('b/',)])
+ self.build_tree_contents([('b/hello', 'bar')]) # different content
+ self.build_tree_contents([('b/goodbye', 'baz')])# same content
+ # fails without --use-existing-dir
+ out,err = self.run_bzr('branch a b', retcode=3)
+ self.assertEqual('', out)
+ self.assertEqual('bzr: ERROR: Target directory "b" already exists.\n',
+ err)
+ # force operation
+ self.run_bzr('branch a b --use-existing-dir')
+ # check conflicts
+ self.assertPathExists('b/hello.moved')
+ self.assertPathDoesNotExist('b/godbye.moved')
+ # we can't branch into branch
+ out,err = self.run_bzr('branch a b --use-existing-dir', retcode=3)
+ self.assertEqual('', out)
+ self.assertEqual('bzr: ERROR: Already a branch: "b".\n', err)
+
+ def test_branch_bind(self):
+ self.example_branch('a')
+ out, err = self.run_bzr('branch a b --bind')
+ self.assertEndsWith(err, "New branch bound to a\n")
+ b = branch.Branch.open('b')
+ self.assertEndsWith(b.get_bound_location(), '/a/')
+
+ def test_branch_with_post_branch_init_hook(self):
+ calls = []
+ branch.Branch.hooks.install_named_hook('post_branch_init',
+ calls.append, None)
+ self.assertLength(0, calls)
+ self.example_branch('a')
+ self.assertLength(1, calls)
+ self.run_bzr('branch a b')
+ self.assertLength(2, calls)
+
+ def test_checkout_with_post_branch_init_hook(self):
+ calls = []
+ branch.Branch.hooks.install_named_hook('post_branch_init',
+ calls.append, None)
+ self.assertLength(0, calls)
+ self.example_branch('a')
+ self.assertLength(1, calls)
+ self.run_bzr('checkout a b')
+ self.assertLength(2, calls)
+
+ def test_lightweight_checkout_with_post_branch_init_hook(self):
+ calls = []
+ branch.Branch.hooks.install_named_hook('post_branch_init',
+ calls.append, None)
+ self.assertLength(0, calls)
+ self.example_branch('a')
+ self.assertLength(1, calls)
+ self.run_bzr('checkout --lightweight a b')
+ self.assertLength(2, calls)
+
+ def test_branch_fetches_all_tags(self):
+ builder = self.make_branch_builder('source')
+ source = fixtures.build_branch_with_non_ancestral_rev(builder)
+ source.tags.set_tag('tag-a', 'rev-2')
+ source.get_config_stack().set('branch.fetch_tags', True)
+ # Now source has a tag not in its ancestry. Make a branch from it.
+ self.run_bzr('branch source new-branch')
+ new_branch = branch.Branch.open('new-branch')
+ # The tag is present, and so is its revision.
+ self.assertEqual('rev-2', new_branch.tags.lookup_tag('tag-a'))
+ new_branch.repository.get_revision('rev-2')
+
+
+class TestBranchStacked(tests.TestCaseWithTransport):
+ """Tests for branch --stacked"""
+
+ def assertRevisionInRepository(self, repo_path, revid):
+ """Check that a revision is in a repo, disregarding stacking."""
+ repo = controldir.ControlDir.open(repo_path).open_repository()
+ self.assertTrue(repo.has_revision(revid))
+
+ def assertRevisionNotInRepository(self, repo_path, revid):
+ """Check that a revision is not in a repo, disregarding stacking."""
+ repo = controldir.ControlDir.open(repo_path).open_repository()
+ self.assertFalse(repo.has_revision(revid))
+
+ def assertRevisionsInBranchRepository(self, revid_list, branch_path):
+ repo = branch.Branch.open(branch_path).repository
+ self.assertEqual(set(revid_list),
+ repo.has_revisions(revid_list))
+
+ def test_branch_stacked_branch_not_stacked(self):
+ """Branching a stacked branch is not stacked by default"""
+ # We have a mainline
+ trunk_tree = self.make_branch_and_tree('target',
+ format='1.9')
+ trunk_tree.commit('mainline')
+ # and a branch from it which is stacked
+ branch_tree = self.make_branch_and_tree('branch',
+ format='1.9')
+ branch_tree.branch.set_stacked_on_url(trunk_tree.branch.base)
+ # with some work on it
+ work_tree = trunk_tree.branch.bzrdir.sprout('local').open_workingtree()
+ work_tree.commit('moar work plz')
+ work_tree.branch.push(branch_tree.branch)
+ # branching our local branch gives us a new stacked branch pointing at
+ # mainline.
+ out, err = self.run_bzr(['branch', 'branch', 'newbranch'])
+ self.assertEqual('', out)
+ self.assertEqual('Branched 2 revisions.\n',
+ err)
+ # it should have preserved the branch format, and so it should be
+ # capable of supporting stacking, but not actually have a stacked_on
+ # branch configured
+ self.assertRaises(errors.NotStacked,
+ controldir.ControlDir.open('newbranch').open_branch().get_stacked_on_url)
+
+ def test_branch_stacked_branch_stacked(self):
+ """Asking to stack on a stacked branch does work"""
+ # We have a mainline
+ trunk_tree = self.make_branch_and_tree('target',
+ format='1.9')
+ trunk_revid = trunk_tree.commit('mainline')
+ # and a branch from it which is stacked
+ branch_tree = self.make_branch_and_tree('branch',
+ format='1.9')
+ branch_tree.branch.set_stacked_on_url(trunk_tree.branch.base)
+ # with some work on it
+ work_tree = trunk_tree.branch.bzrdir.sprout('local').open_workingtree()
+ branch_revid = work_tree.commit('moar work plz')
+ work_tree.branch.push(branch_tree.branch)
+ # you can chain branches on from there
+ out, err = self.run_bzr(['branch', 'branch', '--stacked', 'branch2'])
+ self.assertEqual('', out)
+ self.assertEqual('Created new stacked branch referring to %s.\n' %
+ branch_tree.branch.base, err)
+ self.assertEqual(branch_tree.branch.base,
+ branch.Branch.open('branch2').get_stacked_on_url())
+ branch2_tree = WorkingTree.open('branch2')
+ branch2_revid = work_tree.commit('work on second stacked branch')
+ work_tree.branch.push(branch2_tree.branch)
+ self.assertRevisionInRepository('branch2', branch2_revid)
+ self.assertRevisionsInBranchRepository(
+ [trunk_revid, branch_revid, branch2_revid],
+ 'branch2')
+
+ def test_branch_stacked(self):
+ # We have a mainline
+ trunk_tree = self.make_branch_and_tree('mainline',
+ format='1.9')
+ original_revid = trunk_tree.commit('mainline')
+ self.assertRevisionInRepository('mainline', original_revid)
+ # and a branch from it which is stacked
+ out, err = self.run_bzr(['branch', '--stacked', 'mainline',
+ 'newbranch'])
+ self.assertEqual('', out)
+ self.assertEqual('Created new stacked branch referring to %s.\n' %
+ trunk_tree.branch.base, err)
+ self.assertRevisionNotInRepository('newbranch', original_revid)
+ new_branch = branch.Branch.open('newbranch')
+ self.assertEqual(trunk_tree.branch.base,
+ new_branch.get_stacked_on_url())
+
+ def test_branch_stacked_from_smart_server(self):
+ # We can branch stacking on a smart server
+ self.transport_server = test_server.SmartTCPServer_for_testing
+ trunk = self.make_branch('mainline', format='1.9')
+ out, err = self.run_bzr(
+ ['branch', '--stacked', self.get_url('mainline'), 'shallow'])
+
+ def test_branch_stacked_from_non_stacked_format(self):
+ """The origin format doesn't support stacking"""
+ trunk = self.make_branch('trunk', format='pack-0.92')
+ out, err = self.run_bzr(
+ ['branch', '--stacked', 'trunk', 'shallow'])
+ # We should notify the user that we upgraded their format
+ self.assertEqualDiff(
+ 'Source repository format does not support stacking, using format:\n'
+ ' Packs 5 (adds stacking support, requires bzr 1.6)\n'
+ 'Source branch format does not support stacking, using format:\n'
+ ' Branch format 7\n'
+ 'Doing on-the-fly conversion from RepositoryFormatKnitPack1() to RepositoryFormatKnitPack5().\n'
+ 'This may take some time. Upgrade the repositories to the same format for better performance.\n'
+ 'Created new stacked branch referring to %s.\n' % (trunk.base,),
+ err)
+
+ def test_branch_stacked_from_rich_root_non_stackable(self):
+ trunk = self.make_branch('trunk', format='rich-root-pack')
+ out, err = self.run_bzr(
+ ['branch', '--stacked', 'trunk', 'shallow'])
+ # We should notify the user that we upgraded their format
+ self.assertEqualDiff(
+ 'Source repository format does not support stacking, using format:\n'
+ ' Packs 5 rich-root (adds stacking support, requires bzr 1.6.1)\n'
+ 'Source branch format does not support stacking, using format:\n'
+ ' Branch format 7\n'
+ 'Doing on-the-fly conversion from RepositoryFormatKnitPack4() to RepositoryFormatKnitPack5RichRoot().\n'
+ 'This may take some time. Upgrade the repositories to the same format for better performance.\n'
+ 'Created new stacked branch referring to %s.\n' % (trunk.base,),
+ err)
+
+
+class TestSmartServerBranching(tests.TestCaseWithTransport):
+
+ def test_branch_from_trivial_branch_to_same_server_branch_acceptance(self):
+ self.setup_smart_server_with_call_log()
+ t = self.make_branch_and_tree('from')
+ for count in range(9):
+ t.commit(message='commit %d' % count)
+ self.reset_smart_call_log()
+ out, err = self.run_bzr(['branch', self.get_url('from'),
+ self.get_url('target')])
+ # This figure represent the amount of work to perform this use case. It
+ # is entirely ok to reduce this number if a test fails due to rpc_count
+ # being too low. If rpc_count increases, more network roundtrips have
+ # become necessary for this use case. Please do not adjust this number
+ # upwards without agreement from bzr's network support maintainers.
+ self.assertLength(2, self.hpss_connections)
+ self.assertLength(33, self.hpss_calls)
+ self.expectFailure("branching to the same branch requires VFS access",
+ self.assertThat, self.hpss_calls, ContainsNoVfsCalls)
+
+ def test_branch_from_trivial_branch_streaming_acceptance(self):
+ self.setup_smart_server_with_call_log()
+ t = self.make_branch_and_tree('from')
+ for count in range(9):
+ t.commit(message='commit %d' % count)
+ self.reset_smart_call_log()
+ out, err = self.run_bzr(['branch', self.get_url('from'),
+ 'local-target'])
+ # This figure represent the amount of work to perform this use case. It
+ # is entirely ok to reduce this number if a test fails due to rpc_count
+ # being too low. If rpc_count increases, more network roundtrips have
+ # become necessary for this use case. Please do not adjust this number
+ # upwards without agreement from bzr's network support maintainers.
+ self.assertThat(self.hpss_calls, ContainsNoVfsCalls)
+ self.assertLength(10, self.hpss_calls)
+ self.assertLength(1, self.hpss_connections)
+
+ def test_branch_from_trivial_stacked_branch_streaming_acceptance(self):
+ self.setup_smart_server_with_call_log()
+ t = self.make_branch_and_tree('trunk')
+ for count in range(8):
+ t.commit(message='commit %d' % count)
+ tree2 = t.branch.bzrdir.sprout('feature', stacked=True
+ ).open_workingtree()
+ local_tree = t.branch.bzrdir.sprout('local-working').open_workingtree()
+ local_tree.commit('feature change')
+ local_tree.branch.push(tree2.branch)
+ self.reset_smart_call_log()
+ out, err = self.run_bzr(['branch', self.get_url('feature'),
+ 'local-target'])
+ # This figure represent the amount of work to perform this use case. It
+ # is entirely ok to reduce this number if a test fails due to rpc_count
+ # being too low. If rpc_count increases, more network roundtrips have
+ # become necessary for this use case. Please do not adjust this number
+ # upwards without agreement from bzr's network support maintainers.
+ self.assertLength(15, self.hpss_calls)
+ self.assertLength(1, self.hpss_connections)
+ self.assertThat(self.hpss_calls, ContainsNoVfsCalls)
+
+ def test_branch_from_branch_with_tags(self):
+ self.setup_smart_server_with_call_log()
+ builder = self.make_branch_builder('source')
+ source = fixtures.build_branch_with_non_ancestral_rev(builder)
+ source.get_config_stack().set('branch.fetch_tags', True)
+ source.tags.set_tag('tag-a', 'rev-2')
+ source.tags.set_tag('tag-missing', 'missing-rev')
+ # Now source has a tag not in its ancestry. Make a branch from it.
+ self.reset_smart_call_log()
+ out, err = self.run_bzr(['branch', self.get_url('source'), 'target'])
+ # This figure represent the amount of work to perform this use case. It
+ # is entirely ok to reduce this number if a test fails due to rpc_count
+ # being too low. If rpc_count increases, more network roundtrips have
+ # become necessary for this use case. Please do not adjust this number
+ # upwards without agreement from bzr's network support maintainers.
+ self.assertLength(10, self.hpss_calls)
+ self.assertThat(self.hpss_calls, ContainsNoVfsCalls)
+ self.assertLength(1, self.hpss_connections)
+
+ def test_branch_to_stacked_from_trivial_branch_streaming_acceptance(self):
+ self.setup_smart_server_with_call_log()
+ t = self.make_branch_and_tree('from')
+ for count in range(9):
+ t.commit(message='commit %d' % count)
+ self.reset_smart_call_log()
+ out, err = self.run_bzr(['branch', '--stacked', self.get_url('from'),
+ 'local-target'])
+ # XXX: the number of hpss calls for this case isn't deterministic yet,
+ # so we can't easily assert about the number of calls.
+ #self.assertLength(XXX, self.hpss_calls)
+ # We can assert that none of the calls were readv requests for rix
+ # files, though (demonstrating that at least get_parent_map calls are
+ # not using VFS RPCs).
+ readvs_of_rix_files = [
+ c for c in self.hpss_calls
+ if c.call.method == 'readv' and c.call.args[-1].endswith('.rix')]
+ self.assertLength(1, self.hpss_connections)
+ self.assertLength(0, readvs_of_rix_files)
+ self.expectFailure("branching to stacked requires VFS access",
+ self.assertThat, self.hpss_calls, ContainsNoVfsCalls)
+
+
+class TestRemoteBranch(TestCaseWithSFTPServer):
+
+ def setUp(self):
+ super(TestRemoteBranch, self).setUp()
+ tree = self.make_branch_and_tree('branch')
+ self.build_tree_contents([('branch/file', 'file content\n')])
+ tree.add('file')
+ tree.commit('file created')
+
+ def test_branch_local_remote(self):
+ self.run_bzr(['branch', 'branch', self.get_url('remote')])
+ t = self.get_transport()
+ # Ensure that no working tree what created remotely
+ self.assertFalse(t.has('remote/file'))
+
+ def test_branch_remote_remote(self):
+ # Light cheat: we access the branch remotely
+ self.run_bzr(['branch', self.get_url('branch'),
+ self.get_url('remote')])
+ t = self.get_transport()
+ # Ensure that no working tree what created remotely
+ self.assertFalse(t.has('remote/file'))
+
+
+class TestDeprecatedAliases(tests.TestCaseWithTransport):
+
+ def test_deprecated_aliases(self):
+ """bzr branch can be called clone or get, but those names are
+ deprecated.
+
+ See bug 506265.
+ """
+ for command in ['clone', 'get']:
+ run_script(self, """
+ $ bzr %(command)s A B
+ 2>The command 'bzr %(command)s' has been deprecated in bzr 2.4. Please use 'bzr branch' instead.
+ 2>bzr: ERROR: Not a branch...
+ """ % locals())
+
+
+class TestBranchParentLocation(test_switch.TestSwitchParentLocationBase):
+
+ def _checkout_and_branch(self, option=''):
+ self.script_runner.run_script(self, '''
+ $ bzr checkout %(option)s repo/trunk checkout
+ $ cd checkout
+ $ bzr branch --switch ../repo/trunk ../repo/branched
+ 2>Branched 0 revisions.
+ 2>Tree is up to date at revision 0.
+ 2>Switched to branch:...branched...
+ $ cd ..
+ ''' % locals())
+ bound_branch = branch.Branch.open_containing('checkout')[0]
+ master_branch = branch.Branch.open_containing('repo/branched')[0]
+ return (bound_branch, master_branch)
+
+ def test_branch_switch_parent_lightweight(self):
+ """Lightweight checkout using bzr branch --switch."""
+ bb, mb = self._checkout_and_branch(option='--lightweight')
+ self.assertParent('repo/trunk', bb)
+ self.assertParent('repo/trunk', mb)
+
+ def test_branch_switch_parent_heavyweight(self):
+ """Heavyweight checkout using bzr branch --switch."""
+ bb, mb = self._checkout_and_branch()
+ self.assertParent('repo/trunk', bb)
+ self.assertParent('repo/trunk', mb)
diff --git a/bzrlib/tests/blackbox/test_branches.py b/bzrlib/tests/blackbox/test_branches.py
new file mode 100644
index 0000000..cfeb7e7
--- /dev/null
+++ b/bzrlib/tests/blackbox/test_branches.py
@@ -0,0 +1,93 @@
+# Copyright (C) 2011 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+
+"""Black-box tests for bzr branches."""
+
+from bzrlib.bzrdir import BzrDir
+from bzrlib.tests import TestCaseWithTransport
+
+
+class TestBranches(TestCaseWithTransport):
+
+ def test_no_colocated_support(self):
+ # Listing the branches in a control directory without colocated branch
+ # support.
+ self.run_bzr('init a')
+ out, err = self.run_bzr('branches a')
+ self.assertEquals(out, "* (default)\n")
+
+ def test_no_branch(self):
+ # Listing the branches in a control directory without branches.
+ self.run_bzr('init-repo a')
+ out, err = self.run_bzr('branches a')
+ self.assertEquals(out, "")
+
+ def test_default_current_dir(self):
+ # "bzr branches" list the branches in the current directory
+ # if no location was specified.
+ self.run_bzr('init-repo a')
+ out, err = self.run_bzr('branches', working_dir='a')
+ self.assertEquals(out, "")
+
+ def test_recursive_current(self):
+ self.run_bzr('init .')
+ self.assertEquals(".\n", self.run_bzr('branches --recursive')[0])
+
+ def test_recursive(self):
+ self.run_bzr('init source')
+ self.run_bzr('init source/subsource')
+ self.run_bzr('checkout --lightweight source checkout')
+ self.run_bzr('init checkout/subcheckout')
+ self.run_bzr('init checkout/.bzr/subcheckout')
+ out = self.run_bzr('branches --recursive')[0]
+ lines = out.split('\n')
+ self.assertIs(True, 'source' in lines, lines)
+ self.assertIs(True, 'source/subsource' in lines, lines)
+ self.assertIs(True, 'checkout/subcheckout' in lines, lines)
+ self.assertIs(True, 'checkout' not in lines, lines)
+
+ def test_indicates_non_branch(self):
+ t = self.make_branch_and_tree('a', format='development-colo')
+ t.bzrdir.create_branch(name='another')
+ t.bzrdir.create_branch(name='colocated')
+ out, err = self.run_bzr('branches a')
+ self.assertEquals(out, "* (default)\n"
+ " another\n"
+ " colocated\n")
+
+ def test_indicates_branch(self):
+ t = self.make_repository('a', format='development-colo')
+ t.bzrdir.create_branch(name='another')
+ branch = t.bzrdir.create_branch(name='colocated')
+ t.bzrdir.set_branch_reference(target_branch=branch)
+ out, err = self.run_bzr('branches a')
+ self.assertEquals(out, " another\n"
+ "* colocated\n")
+
+ def test_shared_repos(self):
+ self.make_repository('a', shared=True)
+ BzrDir.create_branch_convenience('a/branch1')
+ b = BzrDir.create_branch_convenience('a/branch2')
+ b.create_checkout(lightweight=True, to_location='b')
+ out, err = self.run_bzr('branches b')
+ self.assertEquals(out, " branch1\n"
+ "* branch2\n")
+
+ def test_standalone_branch(self):
+ self.make_branch('a')
+ out, err = self.run_bzr('branches a')
+ self.assertEquals(out, "* (default)\n")
diff --git a/bzrlib/tests/blackbox/test_break_lock.py b/bzrlib/tests/blackbox/test_break_lock.py
new file mode 100644
index 0000000..c4bd48a
--- /dev/null
+++ b/bzrlib/tests/blackbox/test_break_lock.py
@@ -0,0 +1,143 @@
+# Copyright (C) 2006, 2007, 2009, 2010 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""Tests for lock-breaking user interface"""
+
+from bzrlib import (
+ branch,
+ config,
+ controldir,
+ errors,
+ osutils,
+ tests,
+ )
+from bzrlib.tests.matchers import ContainsNoVfsCalls
+from bzrlib.tests.script import (
+ run_script,
+ )
+
+
+class TestBreakLock(tests.TestCaseWithTransport):
+
+ # General principal for break-lock: All the elements that might be locked
+ # by a bzr operation on PATH, are candidates that break-lock may unlock.
+ # so pathologically if we have a lightweight checkout A, of branch B, which
+ # is bound to location C, the following things should be checked for locks
+ # to break:
+ # wt = WorkingTree(A)
+ # wt.branch
+ # wt.branch.repository
+ # wt.branch.get_master_branch()
+ # wt.branch.get_master_branch().repository
+ # so for smoke tests all we need is a bound branch with a checkout of that
+ # and we can then use different urls to test individual cases, for as much
+ # granularity as needed.
+
+ def setUp(self):
+ super(TestBreakLock, self).setUp()
+ self.build_tree(
+ ['master-repo/',
+ 'master-repo/master-branch/',
+ 'repo/',
+ 'repo/branch/',
+ 'checkout/'])
+ controldir.ControlDir.create('master-repo').create_repository()
+ self.master_branch = controldir.ControlDir.create_branch_convenience(
+ 'master-repo/master-branch')
+ controldir.ControlDir.create('repo').create_repository()
+ local_branch = controldir.ControlDir.create_branch_convenience('repo/branch')
+ local_branch.bind(self.master_branch)
+ checkoutdir = controldir.ControlDir.create('checkout')
+ checkoutdir.set_branch_reference(local_branch)
+ self.wt = checkoutdir.create_workingtree()
+
+ def test_break_lock_help(self):
+ out, err = self.run_bzr('break-lock --help')
+ # shouldn't fail and should not produce error output
+ self.assertEqual('', err)
+
+ def test_break_lock_no_interaction(self):
+ """With --force, the user isn't asked for confirmation"""
+ self.master_branch.lock_write()
+ run_script(self, """
+ $ bzr break-lock --force master-repo/master-branch
+ Broke lock ...master-branch/.bzr/...
+ """)
+ # lock should now be dead
+ self.assertRaises(errors.LockBroken, self.master_branch.unlock)
+
+ def test_break_lock_everything_locked(self):
+ ### if everything is locked, we should be able to unlock the lot.
+ # however, we dont test breaking the working tree because we
+ # cannot accurately do so right now: the dirstate lock is held
+ # by an os lock, and we need to spawn a separate process to lock it
+ # then kill -9 it.
+ # sketch of test:
+ # lock most of the dir:
+ self.wt.branch.lock_write()
+ self.master_branch.lock_write()
+ # run the break-lock
+ # we need 5 yes's - wt, branch, repo, bound branch, bound repo.
+ self.run_bzr('break-lock checkout', stdin="y\ny\ny\ny\n")
+ # a new tree instance should be lockable
+ br = branch.Branch.open('checkout')
+ br.lock_write()
+ br.unlock()
+ # and a new instance of the master branch
+ mb = br.get_master_branch()
+ mb.lock_write()
+ mb.unlock()
+ self.assertRaises(errors.LockBroken, self.wt.unlock)
+ self.assertRaises(errors.LockBroken, self.master_branch.unlock)
+
+
+class TestConfigBreakLock(tests.TestCaseWithTransport):
+
+ def setUp(self):
+ super(TestConfigBreakLock, self).setUp()
+ self.config_file_name = './my.conf'
+ self.build_tree_contents([(self.config_file_name,
+ '[DEFAULT]\none=1\n')])
+ self.config = config.LockableConfig(file_name=self.config_file_name)
+ self.config.lock_write()
+
+ def test_create_pending_lock(self):
+ self.addCleanup(self.config.unlock)
+ self.assertTrue(self.config._lock.is_held)
+
+ def test_break_lock(self):
+ self.run_bzr('break-lock --config %s'
+ % osutils.dirname(self.config_file_name),
+ stdin="y\n")
+ self.assertRaises(errors.LockBroken, self.config.unlock)
+
+
+class TestSmartServerBreakLock(tests.TestCaseWithTransport):
+
+ def test_simple_branch_break_lock(self):
+ self.setup_smart_server_with_call_log()
+ t = self.make_branch_and_tree('branch')
+ t.branch.lock_write()
+ self.reset_smart_call_log()
+ out, err = self.run_bzr(['break-lock', '--force', self.get_url('branch')])
+ # This figure represent the amount of work to perform this use case. It
+ # is entirely ok to reduce this number if a test fails due to rpc_count
+ # being too low. If rpc_count increases, more network roundtrips have
+ # become necessary for this use case. Please do not adjust this number
+ # upwards without agreement from bzr's network support maintainers.
+ self.assertThat(self.hpss_calls, ContainsNoVfsCalls)
+ self.assertLength(1, self.hpss_connections)
+ self.assertLength(5, self.hpss_calls)
diff --git a/bzrlib/tests/blackbox/test_bundle_info.py b/bzrlib/tests/blackbox/test_bundle_info.py
new file mode 100644
index 0000000..58851bc
--- /dev/null
+++ b/bzrlib/tests/blackbox/test_bundle_info.py
@@ -0,0 +1,55 @@
+# Copyright (C) 2007, 2009 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+
+from bzrlib import (
+ merge_directive,
+ tests,
+ )
+
+
+class TestBundleInfo(tests.TestCaseWithTransport):
+
+ def test_bundle_info(self):
+ source = self.make_branch_and_tree('source')
+ self.build_tree(['source/foo'])
+ source.add('foo')
+ source.commit('added file', rev_id='rev1')
+ bundle = open('bundle', 'wb')
+ try:
+ source.branch.repository.create_bundle('rev1', 'null:', bundle,
+ '4')
+ finally:
+ bundle.close()
+ info = self.run_bzr('bundle-info bundle')[0]
+ # there might be either one file, or two, depending on whether the
+ # tree root counts...
+ self.assertContainsRe(info, 'file: [12] .0 multiparent.')
+ self.assertContainsRe(info, 'nicks: source')
+ self.assertNotContainsRe(info, 'foo')
+ self.run_bzr_error(['--verbose requires a merge directive'],
+ 'bundle-info -v bundle')
+ target = self.make_branch('target')
+ md = merge_directive.MergeDirective2.from_objects(
+ source.branch.repository, 'rev1', 0, 0, 'target',
+ base_revision_id='null:')
+ directive = open('directive', 'wb')
+ try:
+ directive.writelines(md.to_lines())
+ finally:
+ directive.close()
+ info = self.run_bzr('bundle-info -v directive')[0]
+ self.assertContainsRe(info, 'foo')
diff --git a/bzrlib/tests/blackbox/test_cat.py b/bzrlib/tests/blackbox/test_cat.py
new file mode 100644
index 0000000..67b81d7
--- /dev/null
+++ b/bzrlib/tests/blackbox/test_cat.py
@@ -0,0 +1,241 @@
+# Copyright (C) 2005-2012 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+
+"""Black-box tests for bzr cat.
+"""
+
+from bzrlib import tests
+from bzrlib.tests.matchers import ContainsNoVfsCalls
+from bzrlib.transport import memory
+
+
+class TestCat(tests.TestCaseWithTransport):
+
+ def test_cat(self):
+ tree = self.make_branch_and_tree('branch')
+ self.build_tree_contents([('branch/a', 'foo\n')])
+ tree.add('a')
+ # 'bzr cat' without an option should cat the last revision
+ self.run_bzr(['cat', 'a'], retcode=3, working_dir='branch')
+
+ tree.commit(message='1')
+ self.build_tree_contents([('branch/a', 'baz\n')])
+
+ self.assertEquals('foo\n',
+ self.run_bzr(['cat', 'a'], working_dir='branch')[0])
+
+ # On Windows, we used to have a bug where newlines got changed into
+ # crlf, whereas cat ought to write out the file exactly as it's
+ # recorded (by default.) That problem can't be reproduced in-process,
+ # so we need just one test here that
+ self.assertEquals('foo\n',
+ self.run_bzr_subprocess(['cat', 'a'],
+ working_dir='branch')[0])
+
+ tree.commit(message='2')
+ self.assertEquals(
+ 'baz\n', self.run_bzr(['cat', 'a'], working_dir='branch')[0])
+ self.assertEquals(
+ 'foo\n', self.run_bzr(['cat', 'a', '-r', '1'],
+ working_dir='branch')[0])
+ self.assertEquals(
+ 'baz\n', self.run_bzr(['cat', 'a', '-r', '-1'],
+ working_dir='branch')[0])
+
+ rev_id = tree.branch.last_revision()
+
+ self.assertEquals(
+ 'baz\n', self.run_bzr(['cat', 'a', '-r', 'revid:%s' % rev_id],
+ working_dir='branch')[0])
+
+ self.assertEquals('foo\n',
+ self.run_bzr(['cat', 'branch/a',
+ '-r', 'revno:1:branch'])[0])
+ self.run_bzr(['cat', 'a'], retcode=3)
+ self.run_bzr(['cat', 'a', '-r', 'revno:1:branch-that-does-not-exist'],
+ retcode=3)
+
+ def test_cat_different_id(self):
+ """'cat' works with old and new files"""
+ self.disable_missing_extensions_warning()
+ tree = self.make_branch_and_tree('.')
+ # the files are named after their path in the revision and
+ # current trees later in the test case
+ # a-rev-tree is special because it appears in both the revision
+ # tree and the working tree
+ self.build_tree_contents([('a-rev-tree', 'foo\n'),
+ ('c-rev', 'baz\n'), ('d-rev', 'bar\n'), ('e-rev', 'qux\n')])
+ tree.lock_write()
+ try:
+ tree.add(['a-rev-tree', 'c-rev', 'd-rev', 'e-rev'])
+ tree.commit('add test files', rev_id='first')
+ # remove currently uses self._write_inventory -
+ # work around that for now.
+ tree.flush()
+ tree.remove(['d-rev'])
+ tree.rename_one('a-rev-tree', 'b-tree')
+ tree.rename_one('c-rev', 'a-rev-tree')
+ tree.rename_one('e-rev', 'old-rev')
+ self.build_tree_contents([('e-rev', 'new\n')])
+ tree.add(['e-rev'])
+ finally:
+ # calling bzr as another process require free lock on win32
+ tree.unlock()
+
+ # 'b-tree' is not present in the old tree.
+ self.run_bzr_error(["^bzr: ERROR: u?'b-tree' "
+ "is not present in revision .+$"],
+ 'cat b-tree --name-from-revision')
+
+ # get to the old file automatically
+ out, err = self.run_bzr('cat d-rev')
+ self.assertEqual('bar\n', out)
+ self.assertEqual('', err)
+
+ out, err = \
+ self.run_bzr('cat a-rev-tree --name-from-revision')
+ self.assertEqual('foo\n', out)
+ self.assertEqual('', err)
+
+ out, err = self.run_bzr('cat a-rev-tree')
+ self.assertEqual('baz\n', out)
+ self.assertEqual('', err)
+
+ # the actual file-id for e-rev doesn't exist in the old tree
+ out, err = self.run_bzr('cat e-rev -rrevid:first')
+ self.assertEqual('qux\n', out)
+ self.assertEqual('', err)
+
+ def test_remote_cat(self):
+ wt = self.make_branch_and_tree('.')
+ self.build_tree(['README'])
+ wt.add('README')
+ wt.commit('Making sure there is a basis_tree available')
+
+ url = self.get_readonly_url() + '/README'
+ out, err = self.run_bzr(['cat', url])
+ self.assertEqual('contents of README\n', out)
+
+ def test_cat_branch_revspec(self):
+ wt = self.make_branch_and_tree('a')
+ self.build_tree(['a/README'])
+ wt.add('README')
+ wt.commit('Making sure there is a basis_tree available')
+ wt = self.make_branch_and_tree('b')
+
+ out, err = self.run_bzr(['cat', '-r', 'branch:../a', 'README'],
+ working_dir='b')
+ self.assertEqual('contents of a/README\n', out)
+
+ def test_cat_filters(self):
+ wt = self.make_branch_and_tree('.')
+ self.build_tree(['README'])
+ wt.add('README')
+ wt.commit('Making sure there is a basis_tree available')
+ url = self.get_readonly_url() + '/README'
+
+ # Test unfiltered output
+ out, err = self.run_bzr(['cat', url])
+ self.assertEqual('contents of README\n', out)
+
+ # Test --filters option is legal but has no impact if no filters
+ out, err = self.run_bzr(['cat', '--filters', url])
+ self.assertEqual('contents of README\n', out)
+
+ def test_cat_filters_applied(self):
+ # Test filtering applied to output. This is tricky to do in a
+ # subprocess because we really need to patch in a plugin that
+ # registers the filters. Instead, we patch in a custom
+ # filter_stack and use run_bzr() ...
+ from cStringIO import StringIO
+ from bzrlib.commands import run_bzr
+ from bzrlib.tests.test_filters import _stack_2
+ from bzrlib.trace import mutter
+ from bzrlib.tree import Tree
+ wt = self.make_branch_and_tree('.')
+ self.build_tree_contents([
+ ('README', "junk\nline 1 of README\nline 2 of README\n"),
+ ])
+ wt.add('README')
+ wt.commit('Making sure there is a basis_tree available')
+ url = self.get_readonly_url() + '/README'
+ real_content_filter_stack = Tree._content_filter_stack
+ def _custom_content_filter_stack(tree, path=None, file_id=None):
+ return _stack_2
+ Tree._content_filter_stack = _custom_content_filter_stack
+ try:
+ out, err = self.run_bzr(['cat', url, '--filters'])
+ # The filter stack will remove the first line and swapcase the rest
+ self.assertEqual('LINE 1 OF readme\nLINE 2 OF readme\n', out)
+ self.assertEqual('', err)
+ finally:
+ Tree._content_filter_stack = real_content_filter_stack
+
+ def test_cat_no_working_tree(self):
+ wt = self.make_branch_and_tree('.')
+ self.build_tree(['README'])
+ wt.add('README')
+ wt.commit('Making sure there is a basis_tree available')
+ wt.branch.bzrdir.destroy_workingtree()
+
+ url = self.get_readonly_url() + '/README'
+ out, err = self.run_bzr(['cat', url])
+ self.assertEqual('contents of README\n', out)
+
+ def test_cat_nonexistent_branch(self):
+ self.vfs_transport_factory = memory.MemoryServer
+ self.run_bzr_error(['^bzr: ERROR: Not a branch'],
+ ['cat', self.get_url()])
+
+ def test_cat_directory(self):
+ wt = self.make_branch_and_tree('a')
+ self.build_tree(['a/README'])
+ wt.add('README')
+ wt.commit('Making sure there is a basis_tree available')
+
+ out, err = self.run_bzr(['cat', '--directory=a', 'README'])
+ self.assertEqual('contents of a/README\n', out)
+
+ def test_cat_remote_directory(self):
+ wt = self.make_branch_and_tree('a')
+ self.build_tree(['a/README'])
+ wt.add('README')
+ wt.commit('Making sure there is a basis_tree available')
+
+ url = self.get_readonly_url() + '/a'
+ out, err = self.run_bzr(['cat', '-d', url, 'README'])
+ self.assertEqual('contents of a/README\n', out)
+
+
+class TestSmartServerCat(tests.TestCaseWithTransport):
+
+ def test_simple_branch_cat(self):
+ self.setup_smart_server_with_call_log()
+ t = self.make_branch_and_tree('branch')
+ self.build_tree_contents([('branch/foo', 'thecontents')])
+ t.add("foo")
+ t.commit("message")
+ self.reset_smart_call_log()
+ out, err = self.run_bzr(['cat', "%s/foo" % self.get_url('branch')])
+ # This figure represent the amount of work to perform this use case. It
+ # is entirely ok to reduce this number if a test fails due to rpc_count
+ # being too low. If rpc_count increases, more network roundtrips have
+ # become necessary for this use case. Please do not adjust this number
+ # upwards without agreement from bzr's network support maintainers.
+ self.assertLength(9, self.hpss_calls)
+ self.assertLength(1, self.hpss_connections)
+ self.assertThat(self.hpss_calls, ContainsNoVfsCalls)
diff --git a/bzrlib/tests/blackbox/test_cat_revision.py b/bzrlib/tests/blackbox/test_cat_revision.py
new file mode 100644
index 0000000..ce826e9
--- /dev/null
+++ b/bzrlib/tests/blackbox/test_cat_revision.py
@@ -0,0 +1,78 @@
+# Copyright (C) 2007-2010 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+
+from bzrlib.tests import TestCaseWithTransport
+
+
+class TestCatRevision(TestCaseWithTransport):
+
+ def test_cat_unicode_revision(self):
+ tree = self.make_branch_and_tree('.')
+ tree.commit('This revision', rev_id='abcd')
+ output, errors = self.run_bzr(['cat-revision', u'abcd'])
+ self.assertContainsRe(output, 'This revision')
+ self.assertEqual('', errors)
+
+ def test_cat_revision(self):
+ """Test bzr cat-revision.
+ """
+ wt = self.make_branch_and_tree('.')
+ r = wt.branch.repository
+
+ wt.commit('Commit one', rev_id='a@r-0-1')
+ wt.commit('Commit two', rev_id='a@r-0-2')
+ wt.commit('Commit three', rev_id='a@r-0-3')
+
+ r.lock_read()
+ try:
+ revs = {}
+ for i in (1, 2, 3):
+ revid = "a@r-0-%d" % i
+ stream = r.revisions.get_record_stream([(revid,)], 'unordered',
+ False)
+ revs[i] = stream.next().get_bytes_as('fulltext')
+ finally:
+ r.unlock()
+
+ for i in [1, 2, 3]:
+ self.assertEqual(revs[i],
+ self.run_bzr('cat-revision -r revid:a@r-0-%d' % i)[0])
+ self.assertEqual(revs[i],
+ self.run_bzr('cat-revision a@r-0-%d' % i)[0])
+ self.assertEqual(revs[i],
+ self.run_bzr('cat-revision -r %d' % i)[0])
+
+ def test_cat_no_such_revid(self):
+ tree = self.make_branch_and_tree('.')
+ err = self.run_bzr('cat-revision abcd', retcode=3)[1]
+ self.assertContainsRe(err, 'The repository .* contains no revision abcd.')
+
+ def test_cat_revision_directory(self):
+ """Test --directory option"""
+ tree = self.make_branch_and_tree('a')
+ tree.commit('This revision', rev_id='abcd')
+ output, errors = self.run_bzr(['cat-revision', '-d', 'a', u'abcd'])
+ self.assertContainsRe(output, 'This revision')
+ self.assertEqual('', errors)
+
+ def test_cat_tree_less_branch(self):
+ tree = self.make_branch_and_tree('.')
+ tree.commit('This revision', rev_id='abcd')
+ tree.bzrdir.destroy_workingtree()
+ output, errors = self.run_bzr(['cat-revision', '-d', 'a', u'abcd'])
+ self.assertContainsRe(output, 'This revision')
+ self.assertEqual('', errors)
diff --git a/bzrlib/tests/blackbox/test_check.py b/bzrlib/tests/blackbox/test_check.py
new file mode 100644
index 0000000..4ac824a
--- /dev/null
+++ b/bzrlib/tests/blackbox/test_check.py
@@ -0,0 +1,112 @@
+# Copyright (C) 2007-2010 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""Tests for the 'check' CLI command."""
+
+from bzrlib.tests import ChrootedTestCase
+from bzrlib.tests import TestCaseWithTransport
+
+
+class TestCheck(TestCaseWithTransport):
+
+ def test_check_no_tree(self):
+ self.make_branch('.')
+ self.run_bzr('check')
+
+ def test_check_initial_tree(self):
+ self.make_branch_and_tree('.')
+ self.run_bzr('check')
+
+ def test_check_one_commit_tree(self):
+ tree = self.make_branch_and_tree('.')
+ tree.commit('hallelujah')
+ out, err = self.run_bzr('check')
+ self.assertContainsRe(err, r"Checking working tree at '.*'\.\n")
+ self.assertContainsRe(err, r"Checking repository at '.*'\.\n")
+ # the root directory may be in the texts for rich root formats
+ self.assertContainsRe(err, r"checked repository.*\n"
+ r" 1 revisions\n"
+ r" [01] file-ids\n"
+ )
+ self.assertContainsRe(err, r"Checking branch at '.*'\.\n")
+ self.assertContainsRe(err, r"checked branch.*")
+
+ def test_check_branch(self):
+ tree = self.make_branch_and_tree('.')
+ tree.commit('foo')
+ out, err = self.run_bzr('check --branch')
+ self.assertContainsRe(err, r"^Checking branch at '.*'\.\n"
+ r"checked branch.*")
+
+ def test_check_repository(self):
+ tree = self.make_branch_and_tree('.')
+ tree.commit('foo')
+ out, err = self.run_bzr('check --repo')
+ self.assertContainsRe(err, r"^Checking repository at '.*'\.\n"
+ r"checked repository.*\n"
+ r" 1 revisions\n"
+ r" [01] file-ids\n"
+ )
+
+ def test_check_tree(self):
+ tree = self.make_branch_and_tree('.')
+ tree.commit('foo')
+ out, err = self.run_bzr('check --tree')
+ self.assertContainsRe(err, r"^Checking working tree at '.*'\.\n$")
+
+ def test_partial_check(self):
+ tree = self.make_branch_and_tree('.')
+ tree.commit('foo')
+ out, err = self.run_bzr('check --tree --branch')
+ self.assertContainsRe(err, r"^Checking working tree at '.*'\.\n"
+ r"Checking branch at '.*'\.\n"
+ r"checked branch.*")
+
+ def test_check_missing_tree(self):
+ branch = self.make_branch('.')
+ out, err = self.run_bzr('check --tree')
+ self.assertEqual(err, "No working tree found at specified location.\n")
+
+ def test_check_missing_partial(self):
+ branch = self.make_branch('.')
+ out, err = self.run_bzr('check --tree --branch')
+ self.assertContainsRe(err,
+ r"Checking branch at '.*'\.\n"
+ r"No working tree found at specified location\.\n"
+ r"checked branch.*")
+
+ def test_check_missing_branch_in_shared_repo(self):
+ self.make_repository('shared', shared=True)
+ out, err = self.run_bzr('check --branch shared')
+ self.assertEqual(err, "No branch found at specified location.\n")
+
+
+class ChrootedCheckTests(ChrootedTestCase):
+
+ def test_check_missing_branch(self):
+ out, err = self.run_bzr(
+ 'check --branch %s' % self.get_readonly_url(''))
+ self.assertEqual(err, "No branch found at specified location.\n")
+
+ def test_check_missing_repository(self):
+ out, err = self.run_bzr('check --repo %s' % self.get_readonly_url(''))
+ self.assertEqual(err, "No repository found at specified location.\n")
+
+ def test_check_missing_everything(self):
+ out, err = self.run_bzr('check %s' % self.get_readonly_url(''))
+ self.assertEqual(err, "No working tree found at specified location.\n"
+ "No branch found at specified location.\n"
+ "No repository found at specified location.\n")
diff --git a/bzrlib/tests/blackbox/test_checkout.py b/bzrlib/tests/blackbox/test_checkout.py
new file mode 100644
index 0000000..81f818b
--- /dev/null
+++ b/bzrlib/tests/blackbox/test_checkout.py
@@ -0,0 +1,228 @@
+# Copyright (C) 2006, 2007, 2009-2012 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""Tests for the 'checkout' CLI command."""
+
+import os
+
+from bzrlib import (
+ branch as _mod_branch,
+ bzrdir,
+ controldir,
+ errors,
+ workingtree,
+ )
+from bzrlib.tests import (
+ TestCaseWithTransport,
+ )
+from bzrlib.tests.matchers import ContainsNoVfsCalls
+from bzrlib.tests.features import (
+ HardlinkFeature,
+ )
+
+
+class TestCheckout(TestCaseWithTransport):
+
+ def setUp(self):
+ super(TestCheckout, self).setUp()
+ tree = controldir.ControlDir.create_standalone_workingtree('branch')
+ tree.commit('1', rev_id='1', allow_pointless=True)
+ self.build_tree(['branch/added_in_2'])
+ tree.add('added_in_2')
+ tree.commit('2', rev_id='2')
+
+ def test_checkout_makes_bound_branch(self):
+ self.run_bzr('checkout branch checkout')
+ # if we have a checkout, the branch base should be 'branch'
+ source = controldir.ControlDir.open('branch')
+ result = controldir.ControlDir.open('checkout')
+ self.assertEqual(source.open_branch().bzrdir.root_transport.base,
+ result.open_branch().get_bound_location())
+
+ def test_checkout_light_makes_checkout(self):
+ self.run_bzr('checkout --lightweight branch checkout')
+ # if we have a checkout, the branch base should be 'branch'
+ source = controldir.ControlDir.open('branch')
+ result = controldir.ControlDir.open('checkout')
+ self.assertEqual(source.open_branch().bzrdir.root_transport.base,
+ result.open_branch().bzrdir.root_transport.base)
+
+ def test_checkout_dash_r(self):
+ out, err = self.run_bzr(['checkout', '-r', '-2', 'branch', 'checkout'])
+ # the working tree should now be at revision '1' with the content
+ # from 1.
+ result = controldir.ControlDir.open('checkout')
+ self.assertEqual(['1'], result.open_workingtree().get_parent_ids())
+ self.assertPathDoesNotExist('checkout/added_in_2')
+
+ def test_checkout_light_dash_r(self):
+ out, err = self.run_bzr(['checkout','--lightweight', '-r', '-2',
+ 'branch', 'checkout'])
+ # the working tree should now be at revision '1' with the content
+ # from 1.
+ result = controldir.ControlDir.open('checkout')
+ self.assertEqual(['1'], result.open_workingtree().get_parent_ids())
+ self.assertPathDoesNotExist('checkout/added_in_2')
+
+ def test_checkout_into_empty_dir(self):
+ self.make_bzrdir('checkout')
+ out, err = self.run_bzr(['checkout', 'branch', 'checkout'])
+ result = controldir.ControlDir.open('checkout')
+ tree = result.open_workingtree()
+ branch = result.open_branch()
+
+ def test_checkout_reconstitutes_working_trees(self):
+ # doing a 'bzr checkout' in the directory of a branch with no tree
+ # or a 'bzr checkout path' with path the name of a directory with
+ # a branch with no tree will reconsistute the tree.
+ os.mkdir('treeless-branch')
+ branch = controldir.ControlDir.create_branch_convenience(
+ 'treeless-branch',
+ force_new_tree=False,
+ format=bzrdir.BzrDirMetaFormat1())
+ # check no tree was created
+ self.assertRaises(errors.NoWorkingTree, branch.bzrdir.open_workingtree)
+ out, err = self.run_bzr('checkout treeless-branch')
+ # we should have a tree now
+ branch.bzrdir.open_workingtree()
+ # with no diff
+ out, err = self.run_bzr('diff treeless-branch')
+
+ # now test with no parameters
+ branch = controldir.ControlDir.create_branch_convenience(
+ '.',
+ force_new_tree=False,
+ format=bzrdir.BzrDirMetaFormat1())
+ # check no tree was created
+ self.assertRaises(errors.NoWorkingTree, branch.bzrdir.open_workingtree)
+ out, err = self.run_bzr('checkout')
+ # we should have a tree now
+ branch.bzrdir.open_workingtree()
+ # with no diff
+ out, err = self.run_bzr('diff')
+
+ def _test_checkout_existing_dir(self, lightweight):
+ source = self.make_branch_and_tree('source')
+ self.build_tree_contents([('source/file1', 'content1'),
+ ('source/file2', 'content2'),])
+ source.add(['file1', 'file2'])
+ source.commit('added files')
+ self.build_tree_contents([('target/', ''),
+ ('target/file1', 'content1'),
+ ('target/file2', 'content3'),])
+ cmd = ['checkout', 'source', 'target']
+ if lightweight:
+ cmd.append('--lightweight')
+ self.run_bzr('checkout source target')
+ # files with unique content should be moved
+ self.assertPathExists('target/file2.moved')
+ # files with content matching tree should not be moved
+ self.assertPathDoesNotExist('target/file1.moved')
+
+ def test_checkout_existing_dir_heavy(self):
+ self._test_checkout_existing_dir(False)
+
+ def test_checkout_existing_dir_lightweight(self):
+ self._test_checkout_existing_dir(True)
+
+ def test_checkout_in_branch_with_r(self):
+ branch = _mod_branch.Branch.open('branch')
+ branch.bzrdir.destroy_workingtree()
+ self.run_bzr('checkout -r 1', working_dir='branch')
+ tree = workingtree.WorkingTree.open('branch')
+ self.assertEqual('1', tree.last_revision())
+ branch.bzrdir.destroy_workingtree()
+ self.run_bzr('checkout -r 0', working_dir='branch')
+ self.assertEqual('null:', tree.last_revision())
+
+ def test_checkout_files_from(self):
+ branch = _mod_branch.Branch.open('branch')
+ self.run_bzr(['checkout', 'branch', 'branch2', '--files-from',
+ 'branch'])
+
+ def test_checkout_hardlink(self):
+ self.requireFeature(HardlinkFeature)
+ source = self.make_branch_and_tree('source')
+ self.build_tree(['source/file1'])
+ source.add('file1')
+ source.commit('added file')
+ out, err = self.run_bzr('checkout source target --hardlink')
+ source_stat = os.stat('source/file1')
+ target_stat = os.stat('target/file1')
+ self.assertEqual(source_stat, target_stat)
+
+ def test_checkout_hardlink_files_from(self):
+ self.requireFeature(HardlinkFeature)
+ source = self.make_branch_and_tree('source')
+ self.build_tree(['source/file1'])
+ source.add('file1')
+ source.commit('added file')
+ source.bzrdir.sprout('second')
+ out, err = self.run_bzr('checkout source target --hardlink'
+ ' --files-from second')
+ second_stat = os.stat('second/file1')
+ target_stat = os.stat('target/file1')
+ self.assertEqual(second_stat, target_stat)
+
+ def test_colo_checkout(self):
+ source = self.make_branch_and_tree('source', format='development-colo')
+ self.build_tree(['source/file1'])
+ source.add('file1')
+ source.commit('added file')
+ target = source.bzrdir.sprout('file:second,branch=somebranch',
+ create_tree_if_local=False)
+ out, err = self.run_bzr('checkout file:,branch=somebranch .',
+ working_dir='second')
+ # We should always be creating a lighweight checkout for colocated
+ # branches.
+ self.assertEquals(
+ target.open_branch(name='somebranch').base,
+ target.get_branch_reference(name=""))
+
+
+class TestSmartServerCheckout(TestCaseWithTransport):
+
+ def test_heavyweight_checkout(self):
+ self.setup_smart_server_with_call_log()
+ t = self.make_branch_and_tree('from')
+ for count in range(9):
+ t.commit(message='commit %d' % count)
+ self.reset_smart_call_log()
+ out, err = self.run_bzr(['checkout', self.get_url('from'), 'target'])
+ # This figure represent the amount of work to perform this use case. It
+ # is entirely ok to reduce this number if a test fails due to rpc_count
+ # being too low. If rpc_count increases, more network roundtrips have
+ # become necessary for this use case. Please do not adjust this number
+ # upwards without agreement from bzr's network support maintainers.
+ self.assertLength(10, self.hpss_calls)
+ self.assertLength(1, self.hpss_connections)
+ self.assertThat(self.hpss_calls, ContainsNoVfsCalls)
+
+ def test_lightweight_checkout(self):
+ self.setup_smart_server_with_call_log()
+ t = self.make_branch_and_tree('from')
+ for count in range(9):
+ t.commit(message='commit %d' % count)
+ self.reset_smart_call_log()
+ out, err = self.run_bzr(['checkout', '--lightweight', self.get_url('from'),
+ 'target'])
+ # This figure represent the amount of work to perform this use case. It
+ # is entirely ok to reduce this number if a test fails due to rpc_count
+ # being too low. If rpc_count increases, more network roundtrips have
+ # become necessary for this use case. Please do not adjust this number
+ # upwards without agreement from bzr's network support maintainers.
+ self.assertLength(13, self.hpss_calls)
+ self.assertThat(self.hpss_calls, ContainsNoVfsCalls)
diff --git a/bzrlib/tests/blackbox/test_clean_tree.py b/bzrlib/tests/blackbox/test_clean_tree.py
new file mode 100644
index 0000000..793fe6f
--- /dev/null
+++ b/bzrlib/tests/blackbox/test_clean_tree.py
@@ -0,0 +1,118 @@
+# Copyright (C) 2005-2010 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+#
+
+
+"""Tests of the 'bzr clean-tree' command."""
+
+
+import os
+
+from bzrlib import ignores
+from bzrlib.tests import TestCaseWithTransport
+from bzrlib.tests.script import run_script
+
+
+class TestBzrTools(TestCaseWithTransport):
+
+ @staticmethod
+ def touch(filename):
+ my_file = open(filename, 'wb')
+ try:
+ my_file.write('')
+ finally:
+ my_file.close()
+
+ def test_clean_tree(self):
+ self.run_bzr('init')
+ self.run_bzr('ignore *~')
+ self.run_bzr('ignore *.pyc')
+ self.touch('name')
+ self.touch('name~')
+ self.assertPathExists('name~')
+ self.touch('name.pyc')
+ self.run_bzr('clean-tree --force')
+ self.assertPathExists('name~')
+ self.assertPathDoesNotExist('name')
+ self.touch('name')
+ self.run_bzr('clean-tree --detritus --force')
+ self.assertPathExists('name')
+ self.assertPathDoesNotExist('name~')
+ self.assertPathExists('name.pyc')
+ self.run_bzr('clean-tree --ignored --force')
+ self.assertPathExists('name')
+ self.assertPathDoesNotExist('name.pyc')
+ self.run_bzr('clean-tree --unknown --force')
+ self.assertPathDoesNotExist('name')
+ self.touch('name')
+ self.touch('name~')
+ self.touch('name.pyc')
+ self.run_bzr('clean-tree --unknown --ignored --force')
+ self.assertPathDoesNotExist('name')
+ self.assertPathDoesNotExist('name~')
+ self.assertPathDoesNotExist('name.pyc')
+
+ def test_clean_tree_nested_bzrdir(self):
+ # clean-tree should not blindly delete nested bzrdirs (branches)
+ # bug https://bugs.launchpad.net/bzr/+bug/572098
+ # so it will play well with scmproj/bzr-externals plugins.
+ wt1 = self.make_branch_and_tree('.')
+ wt2 = self.make_branch_and_tree('foo')
+ wt3 = self.make_branch_and_tree('bar')
+ ignores.tree_ignores_add_patterns(wt1, ['./foo'])
+ self.run_bzr(['clean-tree', '--unknown', '--force'])
+ self.assertPathExists('foo')
+ self.assertPathExists('bar')
+ self.run_bzr(['clean-tree', '--ignored', '--force'])
+ self.assertPathExists('foo')
+ self.assertPathExists('bar')
+
+ def test_clean_tree_directory(self):
+ """Test --directory option"""
+ tree = self.make_branch_and_tree('a')
+ self.build_tree(['a/added', 'a/unknown', 'a/ignored'])
+ tree.add('added')
+ self.run_bzr('clean-tree -d a --unknown --ignored --force')
+ self.assertPathDoesNotExist('a/unknown')
+ self.assertPathDoesNotExist('a/ignored')
+ self.assertPathExists('a/added')
+
+ def test_clean_tree_interactive(self):
+ wt = self.make_branch_and_tree('.')
+ self.touch('bar')
+ self.touch('foo')
+ run_script(self, """
+ $ bzr clean-tree
+ bar
+ foo
+ 2>Are you sure you wish to delete these? ([y]es, [n]o): no
+ <n
+ Canceled
+ """)
+ self.assertPathExists('bar')
+ self.assertPathExists('foo')
+ run_script(self, """
+ $ bzr clean-tree
+ bar
+ foo
+ 2>Are you sure you wish to delete these? ([y]es, [n]o): yes
+ <y
+ 2>deleting paths:
+ 2> bar
+ 2> foo
+ """)
+ self.assertPathDoesNotExist('bar')
+ self.assertPathDoesNotExist('foo')
diff --git a/bzrlib/tests/blackbox/test_command_encoding.py b/bzrlib/tests/blackbox/test_command_encoding.py
new file mode 100644
index 0000000..981b3ac
--- /dev/null
+++ b/bzrlib/tests/blackbox/test_command_encoding.py
@@ -0,0 +1,120 @@
+# Copyright (C) 2005, 2007 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""Tests for the Command.encoding_type interface."""
+
+from bzrlib.tests import TestCase
+from bzrlib.commands import Command, register_command, plugin_cmds
+
+
+class cmd_echo_exact(Command):
+ """This command just repeats what it is given.
+
+ It decodes the argument, and then writes it to stdout.
+ """
+
+ takes_args = ['text']
+ encoding_type = 'exact'
+
+ def run(self, text=None):
+ self.outf.write(text)
+
+
+class cmd_echo_strict(cmd_echo_exact):
+ """Raise a UnicodeError for unrepresentable characters."""
+
+ encoding_type = 'strict'
+
+
+class cmd_echo_replace(cmd_echo_exact):
+ """Replace bogus unicode characters."""
+
+ encoding_type = 'replace'
+
+
+class TestCommandEncoding(TestCase):
+
+ def test_exact(self):
+ def bzr(*args, **kwargs):
+ return self.run_bzr(*args, **kwargs)[0]
+
+ register_command(cmd_echo_exact)
+ try:
+ self.assertEqual('foo', bzr('echo-exact foo'))
+ # Exact should fail to decode the string
+ self.assertRaises(UnicodeEncodeError,
+ bzr,
+ ['echo-exact', u'foo\xb5'])
+ # Previously a non-ascii bytestring was also tested, as 'exact'
+ # outputs bytes untouched, but needed buggy argv parsing to work
+ finally:
+ plugin_cmds.remove('echo-exact')
+
+ def test_strict_utf8(self):
+ def bzr(*args, **kwargs):
+ kwargs['encoding'] = 'utf-8'
+ return self.run_bzr(*args, **kwargs)[0]
+
+ register_command(cmd_echo_strict)
+ try:
+ self.assertEqual('foo', bzr('echo-strict foo'))
+ self.assertEqual(u'foo\xb5'.encode('utf-8'),
+ bzr(['echo-strict', u'foo\xb5']))
+ finally:
+ plugin_cmds.remove('echo-strict')
+
+ def test_strict_ascii(self):
+ def bzr(*args, **kwargs):
+ kwargs['encoding'] = 'ascii'
+ return self.run_bzr(*args, **kwargs)[0]
+
+ register_command(cmd_echo_strict)
+ try:
+ self.assertEqual('foo', bzr('echo-strict foo'))
+ # ascii can't encode \xb5
+ self.assertRaises(UnicodeEncodeError,
+ bzr,
+ ['echo-strict', u'foo\xb5'])
+ finally:
+ plugin_cmds.remove('echo-strict')
+
+ def test_replace_utf8(self):
+ def bzr(*args, **kwargs):
+ kwargs['encoding'] = 'utf-8'
+ return self.run_bzr(*args, **kwargs)[0]
+
+ register_command(cmd_echo_replace)
+ try:
+ self.assertEqual('foo', bzr('echo-replace foo'))
+ self.assertEqual(u'foo\xb5'.encode('utf-8'),
+ bzr(['echo-replace', u'foo\xb5']))
+ finally:
+ plugin_cmds.remove('echo-replace')
+
+ def test_replace_ascii(self):
+ def bzr(*args, **kwargs):
+ kwargs['encoding'] = 'ascii'
+ return self.run_bzr(*args, **kwargs)[0]
+
+ register_command(cmd_echo_replace)
+ try:
+ self.assertEqual('foo', bzr('echo-replace foo'))
+ # ascii can't encode \xb5
+ self.assertEqual('foo?', bzr(['echo-replace', u'foo\xb5']))
+ finally:
+ plugin_cmds.remove('echo-replace')
+
+
diff --git a/bzrlib/tests/blackbox/test_commit.py b/bzrlib/tests/blackbox/test_commit.py
new file mode 100644
index 0000000..6be29c3
--- /dev/null
+++ b/bzrlib/tests/blackbox/test_commit.py
@@ -0,0 +1,893 @@
+# Copyright (C) 2006-2012 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+
+"""Tests for the commit CLI of bzr."""
+
+import doctest
+import os
+import re
+import sys
+
+from testtools.matchers import DocTestMatches
+
+from bzrlib import (
+ config,
+ osutils,
+ ignores,
+ msgeditor,
+ )
+from bzrlib.controldir import ControlDir
+from bzrlib.tests import (
+ test_foreign,
+ features,
+ )
+from bzrlib.tests import TestCaseWithTransport
+from bzrlib.tests.matchers import ContainsNoVfsCalls
+
+
+class TestCommit(TestCaseWithTransport):
+
+ def test_05_empty_commit(self):
+ """Commit of tree with no versioned files should fail"""
+ # If forced, it should succeed, but this is not tested here.
+ self.make_branch_and_tree('.')
+ self.build_tree(['hello.txt'])
+ out,err = self.run_bzr('commit -m empty', retcode=3)
+ self.assertEqual('', out)
+ # Two ugly bits here.
+ # 1) We really don't want 'aborting commit write group' anymore.
+ # 2) bzr: ERROR: is a really long line, so we wrap it with '\'
+ self.assertThat(
+ err,
+ DocTestMatches("""\
+Committing to: ...
+bzr: ERROR: No changes to commit.\
+ Please 'bzr add' the files you want to commit,\
+ or use --unchanged to force an empty commit.
+""", flags=doctest.ELLIPSIS|doctest.REPORT_UDIFF))
+
+ def test_commit_success(self):
+ """Successful commit should not leave behind a bzr-commit-* file"""
+ self.make_branch_and_tree('.')
+ self.run_bzr('commit --unchanged -m message')
+ self.assertEqual('', self.run_bzr('unknowns')[0])
+
+ # same for unicode messages
+ self.run_bzr(["commit", "--unchanged", "-m", u'foo\xb5'])
+ self.assertEqual('', self.run_bzr('unknowns')[0])
+
+ def test_commit_lossy_native(self):
+ """A --lossy option to commit is supported."""
+ self.make_branch_and_tree('.')
+ self.run_bzr('commit --lossy --unchanged -m message')
+ self.assertEqual('', self.run_bzr('unknowns')[0])
+
+ def test_commit_lossy_foreign(self):
+ test_foreign.register_dummy_foreign_for_test(self)
+ self.make_branch_and_tree('.',
+ format=test_foreign.DummyForeignVcsDirFormat())
+ self.run_bzr('commit --lossy --unchanged -m message')
+ output = self.run_bzr('revision-info')[0]
+ self.assertTrue(output.startswith('1 dummy-'))
+
+ def test_commit_with_path(self):
+ """Commit tree with path of root specified"""
+ a_tree = self.make_branch_and_tree('a')
+ self.build_tree(['a/a_file'])
+ a_tree.add('a_file')
+ self.run_bzr(['commit', '-m', 'first commit', 'a'])
+
+ b_tree = a_tree.bzrdir.sprout('b').open_workingtree()
+ self.build_tree_contents([('b/a_file', 'changes in b')])
+ self.run_bzr(['commit', '-m', 'first commit in b', 'b'])
+
+ self.build_tree_contents([('a/a_file', 'new contents')])
+ self.run_bzr(['commit', '-m', 'change in a', 'a'])
+
+ b_tree.merge_from_branch(a_tree.branch)
+ self.assertEqual(len(b_tree.conflicts()), 1)
+ self.run_bzr('resolved b/a_file')
+ self.run_bzr(['commit', '-m', 'merge into b', 'b'])
+
+ def test_10_verbose_commit(self):
+ """Add one file and examine verbose commit output"""
+ tree = self.make_branch_and_tree('.')
+ self.build_tree(['hello.txt'])
+ tree.add("hello.txt")
+ out,err = self.run_bzr('commit -m added')
+ self.assertEqual('', out)
+ self.assertContainsRe(err, '^Committing to: .*\n'
+ 'added hello.txt\n'
+ 'Committed revision 1.\n$',)
+
+ def prepare_simple_history(self):
+ """Prepare and return a working tree with one commit of one file"""
+ # Commit with modified file should say so
+ wt = ControlDir.create_standalone_workingtree('.')
+ self.build_tree(['hello.txt', 'extra.txt'])
+ wt.add(['hello.txt'])
+ wt.commit(message='added')
+ return wt
+
+ def test_verbose_commit_modified(self):
+ # Verbose commit of modified file should say so
+ wt = self.prepare_simple_history()
+ self.build_tree_contents([('hello.txt', 'new contents')])
+ out, err = self.run_bzr('commit -m modified')
+ self.assertEqual('', out)
+ self.assertContainsRe(err, '^Committing to: .*\n'
+ 'modified hello\.txt\n'
+ 'Committed revision 2\.\n$')
+
+ def test_unicode_commit_message_is_filename(self):
+ """Unicode commit message same as a filename (Bug #563646).
+ """
+ self.requireFeature(features.UnicodeFilenameFeature)
+ file_name = u'\N{euro sign}'
+ self.run_bzr(['init'])
+ with open(file_name, 'w') as f: f.write('hello world')
+ self.run_bzr(['add'])
+ out, err = self.run_bzr(['commit', '-m', file_name])
+ reflags = re.MULTILINE|re.DOTALL|re.UNICODE
+ te = osutils.get_terminal_encoding()
+ self.assertContainsRe(err.decode(te),
+ u'The commit message is a file name:',
+ flags=reflags)
+
+ # Run same test with a filename that causes encode
+ # error for the terminal encoding. We do this
+ # by forcing terminal encoding of ascii for
+ # osutils.get_terminal_encoding which is used
+ # by ui.text.show_warning
+ default_get_terminal_enc = osutils.get_terminal_encoding
+ try:
+ osutils.get_terminal_encoding = lambda trace=None: 'ascii'
+ file_name = u'foo\u1234'
+ with open(file_name, 'w') as f: f.write('hello world')
+ self.run_bzr(['add'])
+ out, err = self.run_bzr(['commit', '-m', file_name])
+ reflags = re.MULTILINE|re.DOTALL|re.UNICODE
+ te = osutils.get_terminal_encoding()
+ self.assertContainsRe(err.decode(te, 'replace'),
+ u'The commit message is a file name:',
+ flags=reflags)
+ finally:
+ osutils.get_terminal_encoding = default_get_terminal_enc
+
+ def test_non_ascii_file_unversioned_utf8(self):
+ self.requireFeature(features.UnicodeFilenameFeature)
+ tree = self.make_branch_and_tree(".")
+ self.build_tree(["f"])
+ tree.add(["f"])
+ out, err = self.run_bzr(["commit", "-m", "Wrong filename", u"\xa7"],
+ encoding="utf-8", retcode=3)
+ self.assertContainsRe(err, "(?m)not versioned: \"\xc2\xa7\"$")
+
+ def test_non_ascii_file_unversioned_iso_8859_5(self):
+ self.requireFeature(features.UnicodeFilenameFeature)
+ tree = self.make_branch_and_tree(".")
+ self.build_tree(["f"])
+ tree.add(["f"])
+ out, err = self.run_bzr(["commit", "-m", "Wrong filename", u"\xa7"],
+ encoding="iso-8859-5", retcode=3)
+ self.expectFailure("Error messages are always written as UTF-8",
+ self.assertNotContainsString, err, "\xc2\xa7")
+ self.assertContainsRe(err, "(?m)not versioned: \"\xfd\"$")
+
+ def test_warn_about_forgotten_commit_message(self):
+ """Test that the lack of -m parameter is caught"""
+ wt = self.make_branch_and_tree('.')
+ self.build_tree(['one', 'two'])
+ wt.add(['two'])
+ out, err = self.run_bzr('commit -m one two')
+ self.assertContainsRe(err, "The commit message is a file name")
+
+ def test_verbose_commit_renamed(self):
+ # Verbose commit of renamed file should say so
+ wt = self.prepare_simple_history()
+ wt.rename_one('hello.txt', 'gutentag.txt')
+ out, err = self.run_bzr('commit -m renamed')
+ self.assertEqual('', out)
+ self.assertContainsRe(err, '^Committing to: .*\n'
+ 'renamed hello\.txt => gutentag\.txt\n'
+ 'Committed revision 2\.$\n')
+
+ def test_verbose_commit_moved(self):
+ # Verbose commit of file moved to new directory should say so
+ wt = self.prepare_simple_history()
+ os.mkdir('subdir')
+ wt.add(['subdir'])
+ wt.rename_one('hello.txt', 'subdir/hello.txt')
+ out, err = self.run_bzr('commit -m renamed')
+ self.assertEqual('', out)
+ self.assertEqual(set([
+ 'Committing to: %s/' % osutils.getcwd(),
+ 'added subdir',
+ 'renamed hello.txt => subdir/hello.txt',
+ 'Committed revision 2.',
+ '',
+ ]), set(err.split('\n')))
+
+ def test_verbose_commit_with_unknown(self):
+ """Unknown files should not be listed by default in verbose output"""
+ # Is that really the best policy?
+ wt = ControlDir.create_standalone_workingtree('.')
+ self.build_tree(['hello.txt', 'extra.txt'])
+ wt.add(['hello.txt'])
+ out,err = self.run_bzr('commit -m added')
+ self.assertEqual('', out)
+ self.assertContainsRe(err, '^Committing to: .*\n'
+ 'added hello\.txt\n'
+ 'Committed revision 1\.\n$')
+
+ def test_verbose_commit_with_unchanged(self):
+ """Unchanged files should not be listed by default in verbose output"""
+ tree = self.make_branch_and_tree('.')
+ self.build_tree(['hello.txt', 'unchanged.txt'])
+ tree.add('unchanged.txt')
+ self.run_bzr('commit -m unchanged unchanged.txt')
+ tree.add("hello.txt")
+ out,err = self.run_bzr('commit -m added')
+ self.assertEqual('', out)
+ self.assertContainsRe(err, '^Committing to: .*\n'
+ 'added hello\.txt\n'
+ 'Committed revision 2\.$\n')
+
+ def test_verbose_commit_includes_master_location(self):
+ """Location of master is displayed when committing to bound branch"""
+ a_tree = self.make_branch_and_tree('a')
+ self.build_tree(['a/b'])
+ a_tree.add('b')
+ a_tree.commit(message='Initial message')
+
+ b_tree = a_tree.branch.create_checkout('b')
+ expected = "%s/" % (osutils.abspath('a'), )
+ out, err = self.run_bzr('commit -m blah --unchanged', working_dir='b')
+ self.assertEqual(err, 'Committing to: %s\n'
+ 'Committed revision 2.\n' % expected)
+
+ def test_commit_sanitizes_CR_in_message(self):
+ # See bug #433779, basically Emacs likes to pass '\r\n' style line
+ # endings to 'bzr commit -m ""' which breaks because we don't allow
+ # '\r' in commit messages. (Mostly because of issues where XML style
+ # formats arbitrarily strip it out of the data while parsing.)
+ # To make life easier for users, we just always translate '\r\n' =>
+ # '\n'. And '\r' => '\n'.
+ a_tree = self.make_branch_and_tree('a')
+ self.build_tree(['a/b'])
+ a_tree.add('b')
+ self.run_bzr(['commit',
+ '-m', 'a string\r\n\r\nwith mixed\r\rendings\n'],
+ working_dir='a')
+ rev_id = a_tree.branch.last_revision()
+ rev = a_tree.branch.repository.get_revision(rev_id)
+ self.assertEqualDiff('a string\n\nwith mixed\n\nendings\n',
+ rev.message)
+
+ def test_commit_merge_reports_all_modified_files(self):
+ # the commit command should show all the files that are shown by
+ # bzr diff or bzr status when committing, even when they were not
+ # changed by the user but rather through doing a merge.
+ this_tree = self.make_branch_and_tree('this')
+ # we need a bunch of files and dirs, to perform one action on each.
+ self.build_tree([
+ 'this/dirtorename/',
+ 'this/dirtoreparent/',
+ 'this/dirtoleave/',
+ 'this/dirtoremove/',
+ 'this/filetoreparent',
+ 'this/filetorename',
+ 'this/filetomodify',
+ 'this/filetoremove',
+ 'this/filetoleave']
+ )
+ this_tree.add([
+ 'dirtorename',
+ 'dirtoreparent',
+ 'dirtoleave',
+ 'dirtoremove',
+ 'filetoreparent',
+ 'filetorename',
+ 'filetomodify',
+ 'filetoremove',
+ 'filetoleave']
+ )
+ this_tree.commit('create_files')
+ other_dir = this_tree.bzrdir.sprout('other')
+ other_tree = other_dir.open_workingtree()
+ other_tree.lock_write()
+ # perform the needed actions on the files and dirs.
+ try:
+ other_tree.rename_one('dirtorename', 'renameddir')
+ other_tree.rename_one('dirtoreparent', 'renameddir/reparenteddir')
+ other_tree.rename_one('filetorename', 'renamedfile')
+ other_tree.rename_one('filetoreparent',
+ 'renameddir/reparentedfile')
+ other_tree.remove(['dirtoremove', 'filetoremove'])
+ self.build_tree_contents([
+ ('other/newdir/',),
+ ('other/filetomodify', 'new content'),
+ ('other/newfile', 'new file content')])
+ other_tree.add('newfile')
+ other_tree.add('newdir/')
+ other_tree.commit('modify all sample files and dirs.')
+ finally:
+ other_tree.unlock()
+ this_tree.merge_from_branch(other_tree.branch)
+ out, err = self.run_bzr('commit -m added', working_dir='this')
+ self.assertEqual('', out)
+ self.assertEqual(set([
+ 'Committing to: %s/' % osutils.pathjoin(osutils.getcwd(), 'this'),
+ 'modified filetomodify',
+ 'added newdir',
+ 'added newfile',
+ 'renamed dirtorename => renameddir',
+ 'renamed filetorename => renamedfile',
+ 'renamed dirtoreparent => renameddir/reparenteddir',
+ 'renamed filetoreparent => renameddir/reparentedfile',
+ 'deleted dirtoremove',
+ 'deleted filetoremove',
+ 'Committed revision 2.',
+ ''
+ ]), set(err.split('\n')))
+
+ def test_empty_commit_message(self):
+ tree = self.make_branch_and_tree('.')
+ self.build_tree_contents([('foo.c', 'int main() {}')])
+ tree.add('foo.c')
+ self.run_bzr('commit -m ""')
+
+ def test_other_branch_commit(self):
+ # this branch is to ensure consistent behaviour, whether we're run
+ # inside a branch, or not.
+ outer_tree = self.make_branch_and_tree('.')
+ inner_tree = self.make_branch_and_tree('branch')
+ self.build_tree_contents([
+ ('branch/foo.c', 'int main() {}'),
+ ('branch/bar.c', 'int main() {}')])
+ inner_tree.add(['foo.c', 'bar.c'])
+ # can't commit files in different trees; sane error
+ self.run_bzr('commit -m newstuff branch/foo.c .', retcode=3)
+ # can commit to branch - records foo.c only
+ self.run_bzr('commit -m newstuff branch/foo.c')
+ # can commit to branch - records bar.c
+ self.run_bzr('commit -m newstuff branch')
+ # No changes left
+ self.run_bzr_error(["No changes to commit"], 'commit -m newstuff branch')
+
+ def test_out_of_date_tree_commit(self):
+ # check we get an error code and a clear message committing with an out
+ # of date checkout
+ tree = self.make_branch_and_tree('branch')
+ # make a checkout
+ checkout = tree.branch.create_checkout('checkout', lightweight=True)
+ # commit to the original branch to make the checkout out of date
+ tree.commit('message branch', allow_pointless=True)
+ # now commit to the checkout should emit
+ # ERROR: Out of date with the branch, 'bzr update' is suggested
+ output = self.run_bzr('commit --unchanged -m checkout_message '
+ 'checkout', retcode=3)
+ self.assertEqual(output,
+ ('',
+ "bzr: ERROR: Working tree is out of date, please "
+ "run 'bzr update'.\n"))
+
+ def test_local_commit_unbound(self):
+ # a --local commit on an unbound branch is an error
+ self.make_branch_and_tree('.')
+ out, err = self.run_bzr('commit --local', retcode=3)
+ self.assertEqualDiff('', out)
+ self.assertEqualDiff('bzr: ERROR: Cannot perform local-only commits '
+ 'on unbound branches.\n', err)
+
+ def test_commit_a_text_merge_in_a_checkout(self):
+ # checkouts perform multiple actions in a transaction across bond
+ # branches and their master, and have been observed to fail in the
+ # past. This is a user story reported to fail in bug #43959 where
+ # a merge done in a checkout (using the update command) failed to
+ # commit correctly.
+ trunk = self.make_branch_and_tree('trunk')
+
+ u1 = trunk.branch.create_checkout('u1')
+ self.build_tree_contents([('u1/hosts', 'initial contents\n')])
+ u1.add('hosts')
+ self.run_bzr('commit -m add-hosts u1')
+
+ u2 = trunk.branch.create_checkout('u2')
+ self.build_tree_contents([('u2/hosts', 'altered in u2\n')])
+ self.run_bzr('commit -m checkin-from-u2 u2')
+
+ # make an offline commits
+ self.build_tree_contents([('u1/hosts', 'first offline change in u1\n')])
+ self.run_bzr('commit -m checkin-offline --local u1')
+
+ # now try to pull in online work from u2, and then commit our offline
+ # work as a merge
+ # retcode 1 as we expect a text conflict
+ self.run_bzr('update u1', retcode=1)
+ self.assertFileEqual('''\
+<<<<<<< TREE
+first offline change in u1
+=======
+altered in u2
+>>>>>>> MERGE-SOURCE
+''',
+ 'u1/hosts')
+
+ self.run_bzr('resolved u1/hosts')
+ # add a text change here to represent resolving the merge conflicts in
+ # favour of a new version of the file not identical to either the u1
+ # version or the u2 version.
+ self.build_tree_contents([('u1/hosts', 'merge resolution\n')])
+ self.run_bzr('commit -m checkin-merge-of-the-offline-work-from-u1 u1')
+
+ def test_commit_exclude_excludes_modified_files(self):
+ """Commit -x foo should ignore changes to foo."""
+ tree = self.make_branch_and_tree('.')
+ self.build_tree(['a', 'b', 'c'])
+ tree.smart_add(['.'])
+ out, err = self.run_bzr(['commit', '-m', 'test', '-x', 'b'])
+ self.assertFalse('added b' in out)
+ self.assertFalse('added b' in err)
+ # If b was excluded it will still be 'added' in status.
+ out, err = self.run_bzr(['added'])
+ self.assertEqual('b\n', out)
+ self.assertEqual('', err)
+
+ def test_commit_exclude_twice_uses_both_rules(self):
+ """Commit -x foo -x bar should ignore changes to foo and bar."""
+ tree = self.make_branch_and_tree('.')
+ self.build_tree(['a', 'b', 'c'])
+ tree.smart_add(['.'])
+ out, err = self.run_bzr(['commit', '-m', 'test', '-x', 'b', '-x', 'c'])
+ self.assertFalse('added b' in out)
+ self.assertFalse('added c' in out)
+ self.assertFalse('added b' in err)
+ self.assertFalse('added c' in err)
+ # If b was excluded it will still be 'added' in status.
+ out, err = self.run_bzr(['added'])
+ self.assertTrue('b\n' in out)
+ self.assertTrue('c\n' in out)
+ self.assertEqual('', err)
+
+ def test_commit_respects_spec_for_removals(self):
+ """Commit with a file spec should only commit removals that match"""
+ t = self.make_branch_and_tree('.')
+ self.build_tree(['file-a', 'dir-a/', 'dir-a/file-b'])
+ t.add(['file-a', 'dir-a', 'dir-a/file-b'])
+ t.commit('Create')
+ t.remove(['file-a', 'dir-a/file-b'])
+ result = self.run_bzr('commit . -m removed-file-b',
+ working_dir='dir-a')[1]
+ self.assertNotContainsRe(result, 'file-a')
+ result = self.run_bzr('status', working_dir='dir-a')[0]
+ self.assertContainsRe(result, 'removed:\n file-a')
+
+ def test_strict_commit(self):
+ """Commit with --strict works if everything is known"""
+ ignores._set_user_ignores([])
+ tree = self.make_branch_and_tree('tree')
+ self.build_tree(['tree/a'])
+ tree.add('a')
+ # A simple change should just work
+ self.run_bzr('commit --strict -m adding-a', working_dir='tree')
+
+ def test_strict_commit_no_changes(self):
+ """commit --strict gives "no changes" if there is nothing to commit"""
+ tree = self.make_branch_and_tree('tree')
+ self.build_tree(['tree/a'])
+ tree.add('a')
+ tree.commit('adding a')
+
+ # With no changes, it should just be 'no changes'
+ # Make sure that commit is failing because there is nothing to do
+ self.run_bzr_error(['No changes to commit'],
+ 'commit --strict -m no-changes',
+ working_dir='tree')
+
+ # But --strict doesn't care if you supply --unchanged
+ self.run_bzr('commit --strict --unchanged -m no-changes',
+ working_dir='tree')
+
+ def test_strict_commit_unknown(self):
+ """commit --strict fails if a file is unknown"""
+ tree = self.make_branch_and_tree('tree')
+ self.build_tree(['tree/a'])
+ tree.add('a')
+ tree.commit('adding a')
+
+ # Add one file so there is a change, but forget the other
+ self.build_tree(['tree/b', 'tree/c'])
+ tree.add('b')
+ self.run_bzr_error(['Commit refused because there are unknown files'],
+ 'commit --strict -m add-b',
+ working_dir='tree')
+
+ # --no-strict overrides --strict
+ self.run_bzr('commit --strict -m add-b --no-strict',
+ working_dir='tree')
+
+ def test_fixes_bug_output(self):
+ """commit --fixes=lp:23452 succeeds without output."""
+ tree = self.make_branch_and_tree('tree')
+ self.build_tree(['tree/hello.txt'])
+ tree.add('hello.txt')
+ output, err = self.run_bzr(
+ 'commit -m hello --fixes=lp:23452 tree/hello.txt')
+ self.assertEqual('', output)
+ self.assertContainsRe(err, 'Committing to: .*\n'
+ 'added hello\.txt\n'
+ 'Committed revision 1\.\n')
+
+ def test_no_bugs_no_properties(self):
+ """If no bugs are fixed, the bugs property is not set.
+
+ see https://beta.launchpad.net/bzr/+bug/109613
+ """
+ tree = self.make_branch_and_tree('tree')
+ self.build_tree(['tree/hello.txt'])
+ tree.add('hello.txt')
+ self.run_bzr( 'commit -m hello tree/hello.txt')
+ # Get the revision properties, ignoring the branch-nick property, which
+ # we don't care about for this test.
+ last_rev = tree.branch.repository.get_revision(tree.last_revision())
+ properties = dict(last_rev.properties)
+ del properties['branch-nick']
+ self.assertFalse('bugs' in properties)
+
+ def test_fixes_bug_sets_property(self):
+ """commit --fixes=lp:234 sets the lp:234 revprop to 'fixed'."""
+ tree = self.make_branch_and_tree('tree')
+ self.build_tree(['tree/hello.txt'])
+ tree.add('hello.txt')
+ self.run_bzr('commit -m hello --fixes=lp:234 tree/hello.txt')
+
+ # Get the revision properties, ignoring the branch-nick property, which
+ # we don't care about for this test.
+ last_rev = tree.branch.repository.get_revision(tree.last_revision())
+ properties = dict(last_rev.properties)
+ del properties['branch-nick']
+
+ self.assertEqual({'bugs': 'https://launchpad.net/bugs/234 fixed'},
+ properties)
+
+ def test_fixes_multiple_bugs_sets_properties(self):
+ """--fixes can be used more than once to show that bugs are fixed."""
+ tree = self.make_branch_and_tree('tree')
+ self.build_tree(['tree/hello.txt'])
+ tree.add('hello.txt')
+ self.run_bzr('commit -m hello --fixes=lp:123 --fixes=lp:235'
+ ' tree/hello.txt')
+
+ # Get the revision properties, ignoring the branch-nick property, which
+ # we don't care about for this test.
+ last_rev = tree.branch.repository.get_revision(tree.last_revision())
+ properties = dict(last_rev.properties)
+ del properties['branch-nick']
+
+ self.assertEqual(
+ {'bugs': 'https://launchpad.net/bugs/123 fixed\n'
+ 'https://launchpad.net/bugs/235 fixed'},
+ properties)
+
+ def test_fixes_bug_with_alternate_trackers(self):
+ """--fixes can be used on a properly configured branch to mark bug
+ fixes on multiple trackers.
+ """
+ tree = self.make_branch_and_tree('tree')
+ tree.branch.get_config().set_user_option(
+ 'trac_twisted_url', 'http://twistedmatrix.com/trac')
+ self.build_tree(['tree/hello.txt'])
+ tree.add('hello.txt')
+ self.run_bzr('commit -m hello --fixes=lp:123 --fixes=twisted:235 tree/')
+
+ # Get the revision properties, ignoring the branch-nick property, which
+ # we don't care about for this test.
+ last_rev = tree.branch.repository.get_revision(tree.last_revision())
+ properties = dict(last_rev.properties)
+ del properties['branch-nick']
+
+ self.assertEqual(
+ {'bugs': 'https://launchpad.net/bugs/123 fixed\n'
+ 'http://twistedmatrix.com/trac/ticket/235 fixed'},
+ properties)
+
+ def test_fixes_unknown_bug_prefix(self):
+ tree = self.make_branch_and_tree('tree')
+ self.build_tree(['tree/hello.txt'])
+ tree.add('hello.txt')
+ self.run_bzr_error(
+ ["Unrecognized bug %s. Commit refused." % 'xxx:123'],
+ 'commit -m add-b --fixes=xxx:123',
+ working_dir='tree')
+
+ def test_fixes_bug_with_default_tracker(self):
+ """commit --fixes=234 uses the default bug tracker."""
+ tree = self.make_branch_and_tree('tree')
+ self.build_tree(['tree/hello.txt'])
+ tree.add('hello.txt')
+ self.run_bzr_error(
+ ["bzr: ERROR: No tracker specified for bug 123. Use the form "
+ "'tracker:id' or specify a default bug tracker using the "
+ "`bugtracker` option.\n"
+ "See \"bzr help bugs\" for more information on this feature. "
+ "Commit refused."],
+ 'commit -m add-b --fixes=123',
+ working_dir='tree')
+ tree.branch.get_config_stack().set("bugtracker", "lp")
+ self.run_bzr('commit -m hello --fixes=234 tree/hello.txt')
+ last_rev = tree.branch.repository.get_revision(tree.last_revision())
+ self.assertEqual('https://launchpad.net/bugs/234 fixed',
+ last_rev.properties['bugs'])
+
+ def test_fixes_invalid_bug_number(self):
+ tree = self.make_branch_and_tree('tree')
+ self.build_tree(['tree/hello.txt'])
+ tree.add('hello.txt')
+ self.run_bzr_error(
+ ["Did not understand bug identifier orange: Must be an integer. "
+ "See \"bzr help bugs\" for more information on this feature.\n"
+ "Commit refused."],
+ 'commit -m add-b --fixes=lp:orange',
+ working_dir='tree')
+
+ def test_fixes_invalid_argument(self):
+ """Raise an appropriate error when the fixes argument isn't tag:id."""
+ tree = self.make_branch_and_tree('tree')
+ self.build_tree(['tree/hello.txt'])
+ tree.add('hello.txt')
+ self.run_bzr_error(
+ [r"Invalid bug orange:apples:bananas. Must be in the form of "
+ r"'tracker:id'\. See \"bzr help bugs\" for more information on "
+ r"this feature.\nCommit refused\."],
+ 'commit -m add-b --fixes=orange:apples:bananas',
+ working_dir='tree')
+
+ def test_no_author(self):
+ """If the author is not specified, the author property is not set."""
+ tree = self.make_branch_and_tree('tree')
+ self.build_tree(['tree/hello.txt'])
+ tree.add('hello.txt')
+ self.run_bzr( 'commit -m hello tree/hello.txt')
+ last_rev = tree.branch.repository.get_revision(tree.last_revision())
+ properties = last_rev.properties
+ self.assertFalse('author' in properties)
+
+ def test_author_sets_property(self):
+ """commit --author='John Doe <jdoe@example.com>' sets the author
+ revprop.
+ """
+ tree = self.make_branch_and_tree('tree')
+ self.build_tree(['tree/hello.txt'])
+ tree.add('hello.txt')
+ self.run_bzr(["commit", '-m', 'hello',
+ '--author', u'John D\xf6 <jdoe@example.com>',
+ "tree/hello.txt"])
+ last_rev = tree.branch.repository.get_revision(tree.last_revision())
+ properties = last_rev.properties
+ self.assertEqual(u'John D\xf6 <jdoe@example.com>', properties['authors'])
+
+ def test_author_no_email(self):
+ """Author's name without an email address is allowed, too."""
+ tree = self.make_branch_and_tree('tree')
+ self.build_tree(['tree/hello.txt'])
+ tree.add('hello.txt')
+ out, err = self.run_bzr("commit -m hello --author='John Doe' "
+ "tree/hello.txt")
+ last_rev = tree.branch.repository.get_revision(tree.last_revision())
+ properties = last_rev.properties
+ self.assertEqual('John Doe', properties['authors'])
+
+ def test_multiple_authors(self):
+ """Multiple authors can be specyfied, and all are stored."""
+ tree = self.make_branch_and_tree('tree')
+ self.build_tree(['tree/hello.txt'])
+ tree.add('hello.txt')
+ out, err = self.run_bzr("commit -m hello --author='John Doe' "
+ "--author='Jane Rey' tree/hello.txt")
+ last_rev = tree.branch.repository.get_revision(tree.last_revision())
+ properties = last_rev.properties
+ self.assertEqual('John Doe\nJane Rey', properties['authors'])
+
+ def test_commit_time(self):
+ tree = self.make_branch_and_tree('tree')
+ self.build_tree(['tree/hello.txt'])
+ tree.add('hello.txt')
+ out, err = self.run_bzr("commit -m hello "
+ "--commit-time='2009-10-10 08:00:00 +0100' tree/hello.txt")
+ last_rev = tree.branch.repository.get_revision(tree.last_revision())
+ self.assertEqual(
+ 'Sat 2009-10-10 08:00:00 +0100',
+ osutils.format_date(last_rev.timestamp, last_rev.timezone))
+
+ def test_commit_time_bad_time(self):
+ tree = self.make_branch_and_tree('tree')
+ self.build_tree(['tree/hello.txt'])
+ tree.add('hello.txt')
+ out, err = self.run_bzr("commit -m hello "
+ "--commit-time='NOT A TIME' tree/hello.txt", retcode=3)
+ self.assertStartsWith(
+ err, "bzr: ERROR: Could not parse --commit-time:")
+
+ def test_commit_time_missing_tz(self):
+ tree = self.make_branch_and_tree('tree')
+ self.build_tree(['tree/hello.txt'])
+ tree.add('hello.txt')
+ out, err = self.run_bzr("commit -m hello "
+ "--commit-time='2009-10-10 08:00:00' tree/hello.txt", retcode=3)
+ self.assertStartsWith(
+ err, "bzr: ERROR: Could not parse --commit-time:")
+ # Test that it is actually checking and does not simply crash with
+ # some other exception
+ self.assertContainsString(err, "missing a timezone offset")
+
+ def test_partial_commit_with_renames_in_tree(self):
+ # this test illustrates bug #140419
+ t = self.make_branch_and_tree('.')
+ self.build_tree(['dir/', 'dir/a', 'test'])
+ t.add(['dir/', 'dir/a', 'test'])
+ t.commit('initial commit')
+ # important part: file dir/a should change parent
+ # and should appear before old parent
+ # then during partial commit we have error
+ # parent_id {dir-XXX} not in inventory
+ t.rename_one('dir/a', 'a')
+ self.build_tree_contents([('test', 'changes in test')])
+ # partial commit
+ out, err = self.run_bzr('commit test -m "partial commit"')
+ self.assertEquals('', out)
+ self.assertContainsRe(err, r'modified test\nCommitted revision 2.')
+
+ def test_commit_readonly_checkout(self):
+ # https://bugs.launchpad.net/bzr/+bug/129701
+ # "UnlockableTransport error trying to commit in checkout of readonly
+ # branch"
+ self.make_branch('master')
+ master = ControlDir.open_from_transport(
+ self.get_readonly_transport('master')).open_branch()
+ master.create_checkout('checkout')
+ out, err = self.run_bzr(['commit', '--unchanged', '-mfoo', 'checkout'],
+ retcode=3)
+ self.assertContainsRe(err,
+ r'^bzr: ERROR: Cannot lock.*readonly transport')
+
+ def setup_editor(self):
+ # Test that commit template hooks work
+ if sys.platform == "win32":
+ f = file('fed.bat', 'w')
+ f.write('@rem dummy fed')
+ f.close()
+ self.overrideEnv('BZR_EDITOR', "fed.bat")
+ else:
+ f = file('fed.sh', 'wb')
+ f.write('#!/bin/sh\n')
+ f.close()
+ os.chmod('fed.sh', 0755)
+ self.overrideEnv('BZR_EDITOR', "./fed.sh")
+
+ def setup_commit_with_template(self):
+ self.setup_editor()
+ msgeditor.hooks.install_named_hook("commit_message_template",
+ lambda commit_obj, msg: "save me some typing\n", None)
+ tree = self.make_branch_and_tree('tree')
+ self.build_tree(['tree/hello.txt'])
+ tree.add('hello.txt')
+ return tree
+
+ def test_edit_empty_message(self):
+ tree = self.make_branch_and_tree('tree')
+ self.setup_editor()
+ self.build_tree(['tree/hello.txt'])
+ tree.add('hello.txt')
+ out, err = self.run_bzr("commit tree/hello.txt", retcode=3,
+ stdin="y\n")
+ self.assertContainsRe(err,
+ "bzr: ERROR: Empty commit message specified")
+
+ def test_commit_hook_template_accepted(self):
+ tree = self.setup_commit_with_template()
+ out, err = self.run_bzr("commit tree/hello.txt", stdin="y\n")
+ last_rev = tree.branch.repository.get_revision(tree.last_revision())
+ self.assertEqual('save me some typing\n', last_rev.message)
+
+ def test_commit_hook_template_rejected(self):
+ tree = self.setup_commit_with_template()
+ expected = tree.last_revision()
+ out, err = self.run_bzr_error(["Empty commit message specified."
+ " Please specify a commit message with either"
+ " --message or --file or leave a blank message"
+ " with --message \"\"."],
+ "commit tree/hello.txt", stdin="n\n")
+ self.assertEqual(expected, tree.last_revision())
+
+ def test_set_commit_message(self):
+ msgeditor.hooks.install_named_hook("set_commit_message",
+ lambda commit_obj, msg: "save me some typing\n", None)
+ tree = self.make_branch_and_tree('tree')
+ self.build_tree(['tree/hello.txt'])
+ tree.add('hello.txt')
+ out, err = self.run_bzr("commit tree/hello.txt")
+ last_rev = tree.branch.repository.get_revision(tree.last_revision())
+ self.assertEqual('save me some typing\n', last_rev.message)
+
+ def test_commit_without_username(self):
+ """Ensure commit error if username is not set.
+ """
+ self.run_bzr(['init', 'foo'])
+ with open('foo/foo.txt', 'w') as f:
+ f.write('hello')
+ self.run_bzr(['add'], working_dir='foo')
+ self.overrideEnv('EMAIL', None)
+ self.overrideEnv('BZR_EMAIL', None)
+ # Also, make sure that it's not inferred from mailname.
+ self.overrideAttr(config, '_auto_user_id',
+ lambda: (None, None))
+ self.run_bzr_error(
+ ['Unable to determine your name'],
+ ['commit', '-m', 'initial'], working_dir='foo')
+
+ def test_commit_recursive_checkout(self):
+ """Ensure that a commit to a recursive checkout fails cleanly.
+ """
+ self.run_bzr(['init', 'test_branch'])
+ self.run_bzr(['checkout', 'test_branch', 'test_checkout'])
+ self.run_bzr(['bind', '.'], working_dir='test_checkout') # bind to self
+ with open('test_checkout/foo.txt', 'w') as f:
+ f.write('hello')
+ self.run_bzr(['add'], working_dir='test_checkout')
+ out, err = self.run_bzr_error(
+ ['Branch.*test_checkout.*appears to be bound to itself'],
+ ['commit', '-m', 'addedfoo'], working_dir='test_checkout')
+
+ def test_mv_dirs_non_ascii(self):
+ """Move directory with non-ascii name and containing files.
+
+ Regression test for bug 185211.
+ """
+ tree = self.make_branch_and_tree('.')
+ self.build_tree([u'abc\xa7/', u'abc\xa7/foo'])
+
+ tree.add([u'abc\xa7/', u'abc\xa7/foo'])
+ tree.commit('checkin')
+
+ tree.rename_one(u'abc\xa7','abc')
+
+ self.run_bzr('ci -m "non-ascii mv"')
+
+
+class TestSmartServerCommit(TestCaseWithTransport):
+
+ def test_commit_to_lightweight(self):
+ self.setup_smart_server_with_call_log()
+ t = self.make_branch_and_tree('from')
+ for count in range(9):
+ t.commit(message='commit %d' % count)
+ out, err = self.run_bzr(['checkout', '--lightweight', self.get_url('from'),
+ 'target'])
+ self.reset_smart_call_log()
+ self.build_tree(['target/afile'])
+ self.run_bzr(['add', 'target/afile'])
+ out, err = self.run_bzr(['commit', '-m', 'do something', 'target'])
+ # This figure represent the amount of work to perform this use case. It
+ # is entirely ok to reduce this number if a test fails due to rpc_count
+ # being too low. If rpc_count increases, more network roundtrips have
+ # become necessary for this use case. Please do not adjust this number
+ # upwards without agreement from bzr's network support maintainers.
+ self.assertLength(211, self.hpss_calls)
+ self.assertLength(2, self.hpss_connections)
+ self.expectFailure("commit still uses VFS calls",
+ self.assertThat, self.hpss_calls, ContainsNoVfsCalls)
diff --git a/bzrlib/tests/blackbox/test_config.py b/bzrlib/tests/blackbox/test_config.py
new file mode 100644
index 0000000..4a92281
--- /dev/null
+++ b/bzrlib/tests/blackbox/test_config.py
@@ -0,0 +1,384 @@
+# Copyright (C) 2010 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+
+"""Black-box tests for bzr config."""
+
+from bzrlib import (
+ config,
+ tests,
+ )
+from bzrlib.tests import (
+ script,
+ test_config as _t_config,
+ )
+from bzrlib.tests.matchers import ContainsNoVfsCalls
+
+
+class TestWithoutConfig(tests.TestCaseWithTransport):
+
+ def test_config_all(self):
+ out, err = self.run_bzr(['config'])
+ self.assertEquals('', out)
+ self.assertEquals('', err)
+
+ def test_remove_unknown_option(self):
+ self.run_bzr_error(['The "file" configuration option does not exist',],
+ ['config', '--remove', 'file'])
+
+ def test_all_remove_exclusive(self):
+ self.run_bzr_error(['--all and --remove are mutually exclusive.',],
+ ['config', '--remove', '--all'])
+
+ def test_all_set_exclusive(self):
+ self.run_bzr_error(['Only one option can be set.',],
+ ['config', '--all', 'hello=world'])
+
+ def test_remove_no_option(self):
+ self.run_bzr_error(['--remove expects an option to remove.',],
+ ['config', '--remove'])
+
+ def test_unknown_option(self):
+ self.run_bzr_error(['The "file" configuration option does not exist',],
+ ['config', 'file'])
+
+ def test_unexpected_regexp(self):
+ self.run_bzr_error(
+ ['The "\*file" configuration option does not exist',],
+ ['config', '*file'])
+
+ def test_wrong_regexp(self):
+ self.run_bzr_error(
+ ['Invalid pattern\(s\) found. "\*file" nothing to repeat',],
+ ['config', '--all', '*file'])
+
+
+
+class TestConfigDisplay(tests.TestCaseWithTransport):
+
+ def setUp(self):
+ super(TestConfigDisplay, self).setUp()
+ _t_config.create_configs(self)
+
+ def test_multiline_all_values(self):
+ self.bazaar_config.set_user_option('multiline', '1\n2\n')
+ # Fallout from bug 710410, the triple quotes have been toggled
+ script.run_script(self, '''\
+ $ bzr config -d tree
+ bazaar:
+ [DEFAULT]
+ multiline = """1
+ 2
+ """
+ ''')
+
+ def test_multiline_value_only(self):
+ self.bazaar_config.set_user_option('multiline', '1\n2\n')
+ # Fallout from bug 710410, the triple quotes have been toggled
+ script.run_script(self, '''\
+ $ bzr config -d tree multiline
+ """1
+ 2
+ """
+ ''')
+
+ def test_list_value_all(self):
+ config.option_registry.register(config.ListOption('list'))
+ self.addCleanup(config.option_registry.remove, 'list')
+ self.bazaar_config.set_user_option('list', [1, 'a', 'with, a comma'])
+ script.run_script(self, '''\
+ $ bzr config -d tree
+ bazaar:
+ [DEFAULT]
+ list = 1, a, "with, a comma"
+ ''')
+
+ def test_list_value_one(self):
+ config.option_registry.register(config.ListOption('list'))
+ self.addCleanup(config.option_registry.remove, 'list')
+ self.bazaar_config.set_user_option('list', [1, 'a', 'with, a comma'])
+ script.run_script(self, '''\
+ $ bzr config -d tree list
+ 1, a, "with, a comma"
+ ''')
+
+ def test_registry_value_all(self):
+ self.bazaar_config.set_user_option('bzr.transform.orphan_policy',
+ u'move')
+ script.run_script(self, '''\
+ $ bzr config -d tree
+ bazaar:
+ [DEFAULT]
+ bzr.transform.orphan_policy = move
+ ''')
+
+ def test_registry_value_one(self):
+ self.bazaar_config.set_user_option('bzr.transform.orphan_policy',
+ u'move')
+ script.run_script(self, '''\
+ $ bzr config -d tree bzr.transform.orphan_policy
+ move
+ ''')
+
+ def test_bazaar_config(self):
+ self.bazaar_config.set_user_option('hello', 'world')
+ script.run_script(self, '''\
+ $ bzr config -d tree
+ bazaar:
+ [DEFAULT]
+ hello = world
+ ''')
+
+ def test_locations_config_for_branch(self):
+ self.locations_config.set_user_option('hello', 'world')
+ self.branch_config.set_user_option('hello', 'you')
+ script.run_script(self, '''\
+ $ bzr config -d tree
+ locations:
+ [.../tree]
+ hello = world
+ branch:
+ hello = you
+ ''')
+
+ def test_locations_config_outside_branch(self):
+ self.bazaar_config.set_user_option('hello', 'world')
+ self.locations_config.set_user_option('hello', 'world')
+ script.run_script(self, '''\
+ $ bzr config
+ bazaar:
+ [DEFAULT]
+ hello = world
+ ''')
+
+ def test_cmd_line(self):
+ self.bazaar_config.set_user_option('hello', 'world')
+ script.run_script(self, '''\
+ $ bzr config -Ohello=bzr
+ cmdline:
+ hello = bzr
+ bazaar:
+ [DEFAULT]
+ hello = world
+ ''')
+
+
+class TestConfigDisplayWithPolicy(tests.TestCaseWithTransport):
+
+ def test_location_with_policy(self):
+ # LocationConfig is the only one dealing with policies so far.
+ self.make_branch_and_tree('tree')
+ config_text = """\
+[%(dir)s]
+url = dir
+url:policy = appendpath
+[%(dir)s/tree]
+url = tree
+""" % {'dir': self.test_dir}
+ # We don't use the config directly so we save it to disk
+ config.LocationConfig.from_string(config_text, 'tree', save=True)
+ # policies are displayed with their options since they are part of
+ # their definition, likewise the path is not appended, we are just
+ # presenting the relevant portions of the config files
+ script.run_script(self, '''\
+ $ bzr config -d tree --all url
+ locations:
+ [.../work/tree]
+ url = tree
+ [.../work]
+ url = dir
+ url:policy = appendpath
+ ''')
+
+
+class TestConfigActive(tests.TestCaseWithTransport):
+
+ def setUp(self):
+ super(TestConfigActive, self).setUp()
+ _t_config.create_configs_with_file_option(self)
+
+ def test_active_in_locations(self):
+ script.run_script(self, '''\
+ $ bzr config -d tree file
+ locations
+ ''')
+
+ def test_active_in_bazaar(self):
+ script.run_script(self, '''\
+ $ bzr config -d tree --scope bazaar file
+ bazaar
+ ''')
+
+ def test_active_in_branch(self):
+ # We need to delete the locations definition that overrides the branch
+ # one
+ script.run_script(self, '''\
+ $ bzr config -d tree --scope locations --remove file
+ $ bzr config -d tree file
+ branch
+ ''')
+
+
+class TestConfigSetOption(tests.TestCaseWithTransport):
+
+ def setUp(self):
+ super(TestConfigSetOption, self).setUp()
+ _t_config.create_configs(self)
+
+ def test_unknown_config(self):
+ self.run_bzr_error(['The "moon" configuration does not exist'],
+ ['config', '--scope', 'moon', 'hello=world'])
+
+ def test_bazaar_config_outside_branch(self):
+ script.run_script(self, '''\
+ $ bzr config --scope bazaar hello=world
+ $ bzr config -d tree --all hello
+ bazaar:
+ [DEFAULT]
+ hello = world
+ ''')
+
+ def test_bazaar_config_inside_branch(self):
+ script.run_script(self, '''\
+ $ bzr config -d tree --scope bazaar hello=world
+ $ bzr config -d tree --all hello
+ bazaar:
+ [DEFAULT]
+ hello = world
+ ''')
+
+ def test_locations_config_inside_branch(self):
+ script.run_script(self, '''\
+ $ bzr config -d tree --scope locations hello=world
+ $ bzr config -d tree --all hello
+ locations:
+ [.../work/tree]
+ hello = world
+ ''')
+
+ def test_branch_config_default(self):
+ script.run_script(self, '''\
+ $ bzr config -d tree hello=world
+ $ bzr config -d tree --all hello
+ branch:
+ hello = world
+ ''')
+
+ def test_branch_config_forcing_branch(self):
+ script.run_script(self, '''\
+ $ bzr config -d tree --scope branch hello=world
+ $ bzr config -d tree --all hello
+ branch:
+ hello = world
+ ''')
+
+
+class TestConfigRemoveOption(tests.TestCaseWithTransport):
+
+ def setUp(self):
+ super(TestConfigRemoveOption, self).setUp()
+ _t_config.create_configs_with_file_option(self)
+
+ def test_unknown_config(self):
+ self.run_bzr_error(['The "moon" configuration does not exist'],
+ ['config', '--scope', 'moon', '--remove', 'file'])
+
+ def test_bazaar_config_outside_branch(self):
+ script.run_script(self, '''\
+ $ bzr config --scope bazaar --remove file
+ $ bzr config -d tree --all file
+ locations:
+ [.../work/tree]
+ file = locations
+ branch:
+ file = branch
+ ''')
+
+ def test_bazaar_config_inside_branch(self):
+ script.run_script(self, '''\
+ $ bzr config -d tree --scope bazaar --remove file
+ $ bzr config -d tree --all file
+ locations:
+ [.../work/tree]
+ file = locations
+ branch:
+ file = branch
+ ''')
+
+ def test_locations_config_inside_branch(self):
+ script.run_script(self, '''\
+ $ bzr config -d tree --scope locations --remove file
+ $ bzr config -d tree --all file
+ branch:
+ file = branch
+ bazaar:
+ [DEFAULT]
+ file = bazaar
+ ''')
+
+ def test_branch_config_default(self):
+ script.run_script(self, '''\
+ $ bzr config -d tree --scope locations --remove file
+ $ bzr config -d tree --all file
+ branch:
+ file = branch
+ bazaar:
+ [DEFAULT]
+ file = bazaar
+ ''')
+ script.run_script(self, '''\
+ $ bzr config -d tree --remove file
+ $ bzr config -d tree --all file
+ bazaar:
+ [DEFAULT]
+ file = bazaar
+ ''')
+
+ def test_branch_config_forcing_branch(self):
+ script.run_script(self, '''\
+ $ bzr config -d tree --scope branch --remove file
+ $ bzr config -d tree --all file
+ locations:
+ [.../work/tree]
+ file = locations
+ bazaar:
+ [DEFAULT]
+ file = bazaar
+ ''')
+ script.run_script(self, '''\
+ $ bzr config -d tree --scope locations --remove file
+ $ bzr config -d tree --all file
+ bazaar:
+ [DEFAULT]
+ file = bazaar
+ ''')
+
+
+class TestSmartServerConfig(tests.TestCaseWithTransport):
+
+ def test_simple_branch_config(self):
+ self.setup_smart_server_with_call_log()
+ t = self.make_branch_and_tree('branch')
+ self.reset_smart_call_log()
+ out, err = self.run_bzr(['config', '-d', self.get_url('branch')])
+ # This figure represent the amount of work to perform this use case. It
+ # is entirely ok to reduce this number if a test fails due to rpc_count
+ # being too low. If rpc_count increases, more network roundtrips have
+ # become necessary for this use case. Please do not adjust this number
+ # upwards without agreement from bzr's network support maintainers.
+ self.assertLength(5, self.hpss_calls)
+ self.assertLength(1, self.hpss_connections)
+ self.assertThat(self.hpss_calls, ContainsNoVfsCalls)
diff --git a/bzrlib/tests/blackbox/test_conflicts.py b/bzrlib/tests/blackbox/test_conflicts.py
new file mode 100644
index 0000000..3f6be64
--- /dev/null
+++ b/bzrlib/tests/blackbox/test_conflicts.py
@@ -0,0 +1,126 @@
+# Copyright (C) 2006, 2007, 2009, 2010, 2011 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+from bzrlib import (
+ conflicts,
+ tests,
+ workingtree,
+ )
+from bzrlib.tests import script, features
+
+
+def make_tree_with_conflicts(test, this_path='this', other_path='other',
+ prefix='my'):
+ this_tree = test.make_branch_and_tree(this_path)
+ test.build_tree_contents([
+ ('%s/%sfile' % (this_path, prefix), 'this content\n'),
+ ('%s/%s_other_file' % (this_path, prefix), 'this content\n'),
+ ('%s/%sdir/' % (this_path, prefix),),
+ ])
+ this_tree.add(prefix+'file')
+ this_tree.add(prefix+'_other_file')
+ this_tree.add(prefix+'dir')
+ this_tree.commit(message="new")
+ other_tree = this_tree.bzrdir.sprout(other_path).open_workingtree()
+ test.build_tree_contents([
+ ('%s/%sfile' % (other_path, prefix), 'contentsb\n'),
+ ('%s/%s_other_file' % (other_path, prefix), 'contentsb\n'),
+ ])
+ other_tree.rename_one(prefix+'dir', prefix+'dir2')
+ other_tree.commit(message="change")
+ test.build_tree_contents([
+ ('%s/%sfile' % (this_path, prefix), 'contentsa2\n'),
+ ('%s/%s_other_file' % (this_path, prefix), 'contentsa2\n'),
+ ])
+ this_tree.rename_one(prefix+'dir', prefix+'dir3')
+ this_tree.commit(message='change')
+ this_tree.merge_from_branch(other_tree.branch)
+ return this_tree, other_tree
+
+
+class TestConflicts(script.TestCaseWithTransportAndScript):
+
+ def setUp(self):
+ super(TestConflicts, self).setUp()
+ make_tree_with_conflicts(self, 'branch', 'other')
+
+ def test_conflicts(self):
+ self.run_script("""\
+$ cd branch
+$ bzr conflicts
+Text conflict in my_other_file
+Path conflict: mydir3 / mydir2
+Text conflict in myfile
+""")
+
+ def test_conflicts_text(self):
+ self.run_script("""\
+$ cd branch
+$ bzr conflicts --text
+my_other_file
+myfile
+""")
+
+ def test_conflicts_directory(self):
+ self.run_script("""\
+$ bzr conflicts -d branch
+Text conflict in my_other_file
+Path conflict: mydir3 / mydir2
+Text conflict in myfile
+""")
+
+
+class TestUnicodePaths(tests.TestCaseWithTransport):
+ """Unicode characters in conflicts should be displayed properly"""
+
+ _test_needs_features = [features.UnicodeFilenameFeature]
+ encoding = "UTF-8"
+
+ def _as_output(self, text):
+ return text
+
+ def test_messages(self):
+ """Conflict messages involving non-ascii paths are displayed okay"""
+ make_tree_with_conflicts(self, "branch", prefix=u"\xA7")
+ out, err = self.run_bzr(["conflicts", "-d", "branch"],
+ encoding=self.encoding)
+ self.assertEqual(out.decode(self.encoding),
+ u"Text conflict in \xA7_other_file\n"
+ u"Path conflict: \xA7dir3 / \xA7dir2\n"
+ u"Text conflict in \xA7file\n")
+ self.assertEqual(err, "")
+
+ def test_text_conflict_paths(self):
+ """Text conflicts on non-ascii paths are displayed okay"""
+ make_tree_with_conflicts(self, "branch", prefix=u"\xA7")
+ out, err = self.run_bzr(["conflicts", "-d", "branch", "--text"],
+ encoding=self.encoding)
+ self.assertEqual(out.decode(self.encoding),
+ u"\xA7_other_file\n"
+ u"\xA7file\n")
+ self.assertEqual(err, "")
+
+
+class TestUnicodePathsOnAsciiTerminal(TestUnicodePaths):
+ """Undisplayable unicode characters in conflicts should be escaped"""
+
+ encoding = "ascii"
+
+ def setUp(self):
+ self.skip("Need to decide if replacing is the desired behaviour")
+
+ def _as_output(self, text):
+ return text.encode(self.encoding, "replace")
diff --git a/bzrlib/tests/blackbox/test_debug.py b/bzrlib/tests/blackbox/test_debug.py
new file mode 100644
index 0000000..2acbdd4
--- /dev/null
+++ b/bzrlib/tests/blackbox/test_debug.py
@@ -0,0 +1,60 @@
+# Copyright (C) 2006, 2007, 2009, 2010, 2011 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""Blackbox tests for -D debug options"""
+
+import os
+import signal
+import sys
+import time
+
+from bzrlib import debug, tests
+
+
+class TestDebugOption(tests.TestCaseInTempDir):
+
+ def test_dash_derror(self):
+ """With -Derror, tracebacks are shown even for user errors"""
+ out, err = self.run_bzr("-Derror branch nonexistent-location",
+ retcode=3)
+ # error output should contain a traceback; we used to look for code in
+ # here but it may be missing if the source is not in sync with the
+ # pyc file.
+ self.assertContainsRe(err, "Traceback \\(most recent call last\\)")
+
+ def test_dash_dlock(self):
+ # With -Dlock, locking and unlocking is recorded into the log
+ self.run_bzr("-Dlock init foo")
+ self.assertContainsRe(self.get_log(), "lock_write")
+
+
+class TestDebugBytes(tests.TestCaseWithTransport):
+
+ def test_bytes_reports_activity(self):
+ tree = self.make_branch_and_tree('tree')
+ self.build_tree(['tree/one'])
+ tree.add('one')
+ rev_id = tree.commit('first')
+ remote_trans = self.make_smart_server('.')
+ # I would like to avoid run_bzr_subprocess here, but we need it to be
+ # connected to a real TextUIFactory. The NullProgressView always
+ # ignores transport activity.
+ env = {'BZR_PROGRESS_BAR': 'text'}
+ out, err = self.run_bzr_subprocess('branch -Dbytes %s/tree target'
+ % (remote_trans.base,),
+ env_changes=env)
+ self.assertContainsRe(err, 'Branched 1 revision')
+ self.assertContainsRe(err, 'Transferred:.*kB')
diff --git a/bzrlib/tests/blackbox/test_deleted.py b/bzrlib/tests/blackbox/test_deleted.py
new file mode 100644
index 0000000..b40ba27
--- /dev/null
+++ b/bzrlib/tests/blackbox/test_deleted.py
@@ -0,0 +1,37 @@
+# Copyright (C) 2010 Canonical Ltd
+# -*- coding: utf-8 -*-
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+
+"""Black-box tests for 'bzr deleted', which shows newly deleted files."""
+
+import os
+
+from bzrlib.branch import Branch
+from bzrlib.tests import TestCaseWithTransport
+
+
+class TestDeleted(TestCaseWithTransport):
+
+ def test_deleted_directory(self):
+ """Test --directory option"""
+ tree = self.make_branch_and_tree('a')
+ self.build_tree(['a/README'])
+ tree.add('README')
+ tree.commit('r1')
+ tree.remove('README')
+ out, err = self.run_bzr(['deleted', '--directory=a'])
+ self.assertEquals('README\n', out)
diff --git a/bzrlib/tests/blackbox/test_diff.py b/bzrlib/tests/blackbox/test_diff.py
new file mode 100644
index 0000000..9a33dbb
--- /dev/null
+++ b/bzrlib/tests/blackbox/test_diff.py
@@ -0,0 +1,424 @@
+# Copyright (C) 2006-2012 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+
+"""Black-box tests for bzr diff."""
+
+import os
+import re
+
+from bzrlib import (
+ tests,
+ workingtree,
+ )
+from bzrlib.diff import (
+ DiffTree,
+ format_registry as diff_format_registry,
+ )
+from bzrlib.tests import (
+ features,
+ )
+
+
+def subst_dates(string):
+ """Replace date strings with constant values."""
+ return re.sub(r'\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2} [-\+]\d{4}',
+ 'YYYY-MM-DD HH:MM:SS +ZZZZ', string)
+
+
+class DiffBase(tests.TestCaseWithTransport):
+ """Base class with common setup method"""
+
+ def make_example_branch(self):
+ tree = self.make_branch_and_tree('.')
+ self.build_tree_contents([
+ ('hello', 'foo\n'),
+ ('goodbye', 'baz\n')])
+ tree.add(['hello'])
+ tree.commit('setup')
+ tree.add(['goodbye'])
+ tree.commit('setup')
+ return tree
+
+
+class TestDiff(DiffBase):
+
+ def test_diff(self):
+ tree = self.make_example_branch()
+ self.build_tree_contents([('hello', 'hello world!')])
+ tree.commit(message='fixing hello')
+ output = self.run_bzr('diff -r 2..3', retcode=1)[0]
+ self.assert_('\n+hello world!' in output)
+ output = self.run_bzr('diff -c 3', retcode=1)[0]
+ self.assert_('\n+hello world!' in output)
+ output = self.run_bzr('diff -r last:3..last:1', retcode=1)[0]
+ self.assert_('\n+baz' in output)
+ output = self.run_bzr('diff -c last:2', retcode=1)[0]
+ self.assert_('\n+baz' in output)
+ self.build_tree(['moo'])
+ tree.add('moo')
+ os.unlink('moo')
+ self.run_bzr('diff')
+
+ def test_diff_prefix(self):
+ """diff --prefix appends to filenames in output"""
+ self.make_example_branch()
+ self.build_tree_contents([('hello', 'hello world!\n')])
+ out, err = self.run_bzr('diff --prefix old/:new/', retcode=1)
+ self.assertEquals(err, '')
+ self.assertEqualDiff(subst_dates(out), '''\
+=== modified file 'hello'
+--- old/hello\tYYYY-MM-DD HH:MM:SS +ZZZZ
++++ new/hello\tYYYY-MM-DD HH:MM:SS +ZZZZ
+@@ -1,1 +1,1 @@
+-foo
++hello world!
+
+''')
+
+ def test_diff_illegal_prefix_value(self):
+ # There was an error in error reporting for this option
+ out, err = self.run_bzr('diff --prefix old/', retcode=3)
+ self.assertContainsRe(err,
+ '--prefix expects two values separated by a colon')
+
+ def test_diff_p1(self):
+ """diff -p1 produces lkml-style diffs"""
+ self.make_example_branch()
+ self.build_tree_contents([('hello', 'hello world!\n')])
+ out, err = self.run_bzr('diff -p1', retcode=1)
+ self.assertEquals(err, '')
+ self.assertEqualDiff(subst_dates(out), '''\
+=== modified file 'hello'
+--- old/hello\tYYYY-MM-DD HH:MM:SS +ZZZZ
++++ new/hello\tYYYY-MM-DD HH:MM:SS +ZZZZ
+@@ -1,1 +1,1 @@
+-foo
++hello world!
+
+''')
+
+ def test_diff_p0(self):
+ """diff -p0 produces diffs with no prefix"""
+ self.make_example_branch()
+ self.build_tree_contents([('hello', 'hello world!\n')])
+ out, err = self.run_bzr('diff -p0', retcode=1)
+ self.assertEquals(err, '')
+ self.assertEqualDiff(subst_dates(out), '''\
+=== modified file 'hello'
+--- hello\tYYYY-MM-DD HH:MM:SS +ZZZZ
++++ hello\tYYYY-MM-DD HH:MM:SS +ZZZZ
+@@ -1,1 +1,1 @@
+-foo
++hello world!
+
+''')
+
+ def test_diff_nonexistent(self):
+ # Get an error from a file that does not exist at all
+ # (Malone #3619)
+ self.make_example_branch()
+ out, err = self.run_bzr('diff does-not-exist', retcode=3,
+ error_regexes=('not versioned.*does-not-exist',))
+
+ def test_diff_illegal_revision_specifiers(self):
+ out, err = self.run_bzr('diff -r 1..23..123', retcode=3,
+ error_regexes=('one or two revision specifiers',))
+
+ def test_diff_using_and_format(self):
+ out, err = self.run_bzr('diff --format=default --using=mydi', retcode=3,
+ error_regexes=('are mutually exclusive',))
+
+ def test_diff_nonexistent_revision(self):
+ out, err = self.run_bzr('diff -r 123', retcode=3,
+ error_regexes=("Requested revision: '123' does not "
+ "exist in branch:",))
+
+ def test_diff_nonexistent_dotted_revision(self):
+ out, err = self.run_bzr('diff -r 1.1', retcode=3)
+ self.assertContainsRe(err,
+ "Requested revision: '1.1' does not exist in branch:")
+
+ def test_diff_nonexistent_dotted_revision_change(self):
+ out, err = self.run_bzr('diff -c 1.1', retcode=3)
+ self.assertContainsRe(err,
+ "Requested revision: '1.1' does not exist in branch:")
+
+ def test_diff_unversioned(self):
+ # Get an error when diffing a non-versioned file.
+ # (Malone #3619)
+ self.make_example_branch()
+ self.build_tree(['unversioned-file'])
+ out, err = self.run_bzr('diff unversioned-file', retcode=3)
+ self.assertContainsRe(err, 'not versioned.*unversioned-file')
+
+ # TODO: What should diff say for a file deleted in working tree?
+
+ def example_branches(self):
+ branch1_tree = self.make_branch_and_tree('branch1')
+ self.build_tree(['branch1/file'], line_endings='binary')
+ self.build_tree(['branch1/file2'], line_endings='binary')
+ branch1_tree.add('file')
+ branch1_tree.add('file2')
+ branch1_tree.commit(message='add file and file2')
+ branch2_tree = branch1_tree.bzrdir.sprout('branch2').open_workingtree()
+ self.build_tree_contents([('branch2/file', 'new content\n')])
+ branch2_tree.commit(message='update file')
+ return branch1_tree, branch2_tree
+
+ def check_b2_vs_b1(self, cmd):
+ # Compare branch2 vs branch1 using cmd and check the result
+ out, err = self.run_bzr(cmd, retcode=1)
+ self.assertEquals('', err)
+ self.assertEquals("=== modified file 'file'\n"
+ "--- file\tYYYY-MM-DD HH:MM:SS +ZZZZ\n"
+ "+++ file\tYYYY-MM-DD HH:MM:SS +ZZZZ\n"
+ "@@ -1,1 +1,1 @@\n"
+ "-new content\n"
+ "+contents of branch1/file\n"
+ "\n", subst_dates(out))
+
+ def check_b1_vs_b2(self, cmd):
+ # Compare branch1 vs branch2 using cmd and check the result
+ out, err = self.run_bzr(cmd, retcode=1)
+ self.assertEquals('', err)
+ self.assertEqualDiff("=== modified file 'file'\n"
+ "--- file\tYYYY-MM-DD HH:MM:SS +ZZZZ\n"
+ "+++ file\tYYYY-MM-DD HH:MM:SS +ZZZZ\n"
+ "@@ -1,1 +1,1 @@\n"
+ "-contents of branch1/file\n"
+ "+new content\n"
+ "\n", subst_dates(out))
+
+ def check_no_diffs(self, cmd):
+ # Check that running cmd returns an empty diff
+ out, err = self.run_bzr(cmd, retcode=0)
+ self.assertEquals('', err)
+ self.assertEquals('', out)
+
+ def test_diff_branches(self):
+ self.example_branches()
+ # should open branch1 and diff against branch2,
+ self.check_b2_vs_b1('diff -r branch:branch2 branch1')
+ # Compare two working trees using various syntax forms
+ self.check_b2_vs_b1('diff --old branch2 --new branch1')
+ self.check_b2_vs_b1('diff --old branch2 branch1')
+ self.check_b2_vs_b1('diff branch2 --new branch1')
+ # Test with a selected file that was changed
+ self.check_b2_vs_b1('diff --old branch2 --new branch1 file')
+ self.check_b2_vs_b1('diff --old branch2 branch1/file')
+ self.check_b2_vs_b1('diff branch2/file --new branch1')
+ # Test with a selected file that was not changed
+ self.check_no_diffs('diff --old branch2 --new branch1 file2')
+ self.check_no_diffs('diff --old branch2 branch1/file2')
+ self.check_no_diffs('diff branch2/file2 --new branch1')
+
+ def test_diff_branches_no_working_trees(self):
+ branch1_tree, branch2_tree = self.example_branches()
+ # Compare a working tree to a branch without a WT
+ dir1 = branch1_tree.bzrdir
+ dir1.destroy_workingtree()
+ self.assertFalse(dir1.has_workingtree())
+ self.check_b2_vs_b1('diff --old branch2 --new branch1')
+ self.check_b2_vs_b1('diff --old branch2 branch1')
+ self.check_b2_vs_b1('diff branch2 --new branch1')
+ # Compare a branch without a WT to one with a WT
+ self.check_b1_vs_b2('diff --old branch1 --new branch2')
+ self.check_b1_vs_b2('diff --old branch1 branch2')
+ self.check_b1_vs_b2('diff branch1 --new branch2')
+ # Compare a branch with a WT against another without a WT
+ dir2 = branch2_tree.bzrdir
+ dir2.destroy_workingtree()
+ self.assertFalse(dir2.has_workingtree())
+ self.check_b1_vs_b2('diff --old branch1 --new branch2')
+ self.check_b1_vs_b2('diff --old branch1 branch2')
+ self.check_b1_vs_b2('diff branch1 --new branch2')
+
+ def test_diff_revno_branches(self):
+ self.example_branches()
+ branch2_tree = workingtree.WorkingTree.open_containing('branch2')[0]
+ self.build_tree_contents([('branch2/file', 'even newer content')])
+ branch2_tree.commit(message='update file once more')
+
+ out, err = self.run_bzr('diff -r revno:1:branch2..revno:1:branch1',
+ )
+ self.assertEquals('', err)
+ self.assertEquals('', out)
+ out, err = self.run_bzr('diff -r revno:2:branch2..revno:1:branch1',
+ retcode=1)
+ self.assertEquals('', err)
+ self.assertEqualDiff("=== modified file 'file'\n"
+ "--- file\tYYYY-MM-DD HH:MM:SS +ZZZZ\n"
+ "+++ file\tYYYY-MM-DD HH:MM:SS +ZZZZ\n"
+ "@@ -1,1 +1,1 @@\n"
+ "-new content\n"
+ "+contents of branch1/file\n"
+ "\n", subst_dates(out))
+
+ def example_branch2(self):
+ branch1_tree = self.make_branch_and_tree('branch1')
+ self.build_tree_contents([('branch1/file1', 'original line\n')])
+ branch1_tree.add('file1')
+ branch1_tree.commit(message='first commit')
+ self.build_tree_contents([('branch1/file1', 'repo line\n')])
+ branch1_tree.commit(message='second commit')
+ return branch1_tree
+
+ def test_diff_to_working_tree(self):
+ self.example_branch2()
+ self.build_tree_contents([('branch1/file1', 'new line')])
+ output = self.run_bzr('diff -r 1.. branch1', retcode=1)
+ self.assertContainsRe(output[0], '\n\\-original line\n\\+new line\n')
+
+ def test_diff_to_working_tree_in_subdir(self):
+ self.example_branch2()
+ self.build_tree_contents([('branch1/file1', 'new line')])
+ os.mkdir('branch1/dir1')
+ output = self.run_bzr('diff -r 1..', retcode=1,
+ working_dir='branch1/dir1')
+ self.assertContainsRe(output[0], '\n\\-original line\n\\+new line\n')
+
+ def test_diff_across_rename(self):
+ """The working tree path should always be considered for diffing"""
+ tree = self.make_example_branch()
+ self.run_bzr('diff -r 0..1 hello', retcode=1)
+ tree.rename_one('hello', 'hello1')
+ self.run_bzr('diff hello1', retcode=1)
+ self.run_bzr('diff -r 0..1 hello1', retcode=1)
+
+ def test_diff_to_branch_no_working_tree(self):
+ branch1_tree = self.example_branch2()
+ dir1 = branch1_tree.bzrdir
+ dir1.destroy_workingtree()
+ self.assertFalse(dir1.has_workingtree())
+ output = self.run_bzr('diff -r 1.. branch1', retcode=1)
+ self.assertContainsRe(output[0], '\n\\-original line\n\\+repo line\n')
+
+ def test_custom_format(self):
+ class BooDiffTree(DiffTree):
+
+ def show_diff(self, specific_files, extra_trees=None):
+ self.to_file.write("BOO!\n")
+ return super(BooDiffTree, self).show_diff(specific_files,
+ extra_trees)
+
+ diff_format_registry.register("boo", BooDiffTree, "Scary diff format")
+ self.addCleanup(diff_format_registry.remove, "boo")
+ self.make_example_branch()
+ self.build_tree_contents([('hello', 'hello world!\n')])
+ output = self.run_bzr('diff --format=boo', retcode=1)
+ self.assertTrue("BOO!" in output[0])
+ output = self.run_bzr('diff -Fboo', retcode=1)
+ self.assertTrue("BOO!" in output[0])
+
+
+class TestCheckoutDiff(TestDiff):
+
+ def make_example_branch(self):
+ tree = super(TestCheckoutDiff, self).make_example_branch()
+ tree = tree.branch.create_checkout('checkout')
+ os.chdir('checkout')
+ return tree
+
+ def example_branch2(self):
+ tree = super(TestCheckoutDiff, self).example_branch2()
+ os.mkdir('checkouts')
+ tree = tree.branch.create_checkout('checkouts/branch1')
+ os.chdir('checkouts')
+ return tree
+
+ def example_branches(self):
+ branch1_tree, branch2_tree = super(TestCheckoutDiff,
+ self).example_branches()
+ os.mkdir('checkouts')
+ branch1_tree = branch1_tree.branch.create_checkout('checkouts/branch1')
+ branch2_tree = branch2_tree.branch.create_checkout('checkouts/branch2')
+ os.chdir('checkouts')
+ return branch1_tree, branch2_tree
+
+
+class TestDiffLabels(DiffBase):
+
+ def test_diff_label_removed(self):
+ tree = super(TestDiffLabels, self).make_example_branch()
+ tree.remove('hello', keep_files=False)
+ diff = self.run_bzr('diff', retcode=1)
+ self.assertTrue("=== removed file 'hello'" in diff[0])
+
+ def test_diff_label_added(self):
+ tree = super(TestDiffLabels, self).make_example_branch()
+ self.build_tree_contents([('barbar', 'barbar')])
+ tree.add('barbar')
+ diff = self.run_bzr('diff', retcode=1)
+ self.assertTrue("=== added file 'barbar'" in diff[0])
+
+ def test_diff_label_modified(self):
+ super(TestDiffLabels, self).make_example_branch()
+ self.build_tree_contents([('hello', 'barbar')])
+ diff = self.run_bzr('diff', retcode=1)
+ self.assertTrue("=== modified file 'hello'" in diff[0])
+
+ def test_diff_label_renamed(self):
+ tree = super(TestDiffLabels, self).make_example_branch()
+ tree.rename_one('hello', 'gruezi')
+ diff = self.run_bzr('diff', retcode=1)
+ self.assertTrue("=== renamed file 'hello' => 'gruezi'" in diff[0])
+
+
+class TestExternalDiff(DiffBase):
+
+ def test_external_diff(self):
+ """Test that we can spawn an external diff process"""
+ self.disable_missing_extensions_warning()
+ # We have to use run_bzr_subprocess, because we need to
+ # test writing directly to stdout, (there was a bug in
+ # subprocess.py that we had to workaround).
+ # However, if 'diff' may not be available
+ self.make_example_branch()
+ self.overrideEnv('BZR_PROGRESS_BAR', 'none')
+ out, err = self.run_bzr_subprocess('diff -r 1 --diff-options -ub',
+ universal_newlines=True,
+ retcode=None)
+ if 'Diff is not installed on this machine' in err:
+ raise tests.TestSkipped("No external 'diff' is available")
+ self.assertEqual('', err)
+ # We have to skip the stuff in the middle, because it depends
+ # on time.time()
+ self.assertStartsWith(out, "=== added file 'goodbye'\n"
+ "--- goodbye\t1970-01-01 00:00:00 +0000\n"
+ "+++ goodbye\t")
+ self.assertEndsWith(out, "\n@@ -0,0 +1 @@\n"
+ "+baz\n\n")
+
+ def test_external_diff_options_and_using(self):
+ """Test that the options are passed correctly to an external diff process"""
+ self.requireFeature(features.diff_feature)
+ self.make_example_branch()
+ self.build_tree_contents([('hello', 'Foo\n')])
+ out, err = self.run_bzr('diff --diff-options -i --using diff',
+ retcode=1)
+ self.assertEquals("=== modified file 'hello'\n", out)
+ self.assertEquals('', err)
+
+
+class TestDiffOutput(DiffBase):
+
+ def test_diff_output(self):
+ # check that output doesn't mangle line-endings
+ self.make_example_branch()
+ self.build_tree_contents([('hello', 'hello world!\n')])
+ output = self.run_bzr_subprocess('diff', retcode=1)[0]
+ self.assert_('\n+hello world!\n' in output)
diff --git a/bzrlib/tests/blackbox/test_dpush.py b/bzrlib/tests/blackbox/test_dpush.py
new file mode 100644
index 0000000..32f05d5
--- /dev/null
+++ b/bzrlib/tests/blackbox/test_dpush.py
@@ -0,0 +1,174 @@
+# Copyright (C) 2009-2012 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+
+"""Black-box tests for bzr dpush."""
+
+
+from bzrlib import (
+ branch,
+ tests,
+ )
+from bzrlib.tests import (
+ script,
+ test_foreign,
+ )
+from bzrlib.tests.blackbox import test_push
+from bzrlib.tests.scenarios import (
+ load_tests_apply_scenarios,
+ )
+
+
+load_tests = load_tests_apply_scenarios
+
+
+class TestDpush(tests.TestCaseWithTransport):
+
+ def setUp(self):
+ super(TestDpush, self).setUp()
+ test_foreign.register_dummy_foreign_for_test(self)
+
+ def make_dummy_builder(self, relpath):
+ builder = self.make_branch_builder(
+ relpath, format=test_foreign.DummyForeignVcsDirFormat())
+ builder.build_snapshot('revid', None,
+ [('add', ('', 'TREE_ROOT', 'directory', None)),
+ ('add', ('foo', 'fooid', 'file', 'bar'))])
+ return builder
+
+ def test_dpush_native(self):
+ target_tree = self.make_branch_and_tree("dp")
+ source_tree = self.make_branch_and_tree("dc")
+ output, error = self.run_bzr("dpush -d dc dp", retcode=3)
+ self.assertEquals("", output)
+ self.assertContainsRe(error,
+ 'in the same VCS, lossy push not necessary. Please use regular '
+ 'push.')
+
+ def test_dpush(self):
+ branch = self.make_dummy_builder('d').get_branch()
+
+ dc = branch.bzrdir.sprout('dc', force_new_repo=True)
+ self.build_tree(("dc/foo", "blaaaa"))
+ dc.open_workingtree().commit('msg')
+
+ script.run_script(self, """
+ $ bzr dpush -d dc d
+ 2>Doing on-the-fly conversion from DummyForeignVcsRepositoryFormat() to RepositoryFormat2a().
+ 2>This may take some time. Upgrade the repositories to the same format for better performance.
+ 2>Pushed up to revision 2.
+ $ bzr status dc
+ """)
+
+ def test_dpush_new(self):
+ b = self.make_dummy_builder('d').get_branch()
+
+ dc = b.bzrdir.sprout('dc', force_new_repo=True)
+ self.build_tree_contents([("dc/foofile", "blaaaa")])
+ dc_tree = dc.open_workingtree()
+ dc_tree.add("foofile")
+ dc_tree.commit("msg")
+
+ script.run_script(self, '''
+ $ bzr dpush -d dc d
+ 2>Doing on-the-fly conversion from DummyForeignVcsRepositoryFormat() to RepositoryFormat2a().
+ 2>This may take some time. Upgrade the repositories to the same format for better performance.
+ 2>Pushed up to revision 2.
+ $ bzr revno dc
+ 2
+ $ bzr status dc
+ ''')
+
+ def test_dpush_wt_diff(self):
+ b = self.make_dummy_builder('d').get_branch()
+
+ dc = b.bzrdir.sprout('dc', force_new_repo=True)
+ self.build_tree_contents([("dc/foofile", "blaaaa")])
+ dc_tree = dc.open_workingtree()
+ dc_tree.add("foofile")
+ newrevid = dc_tree.commit('msg')
+
+ self.build_tree_contents([("dc/foofile", "blaaaal")])
+ script.run_script(self, '''
+ $ bzr dpush -d dc d --no-strict
+ 2>Doing on-the-fly conversion from DummyForeignVcsRepositoryFormat() to RepositoryFormat2a().
+ 2>This may take some time. Upgrade the repositories to the same format for better performance.
+ 2>Pushed up to revision 2.
+ ''')
+ self.assertFileEqual("blaaaal", "dc/foofile")
+ # if the dummy vcs wasn't that dummy we could uncomment the line below
+ # self.assertFileEqual("blaaaa", "d/foofile")
+ script.run_script(self, '''
+ $ bzr status dc
+ modified:
+ foofile
+ ''')
+
+ def test_diverged(self):
+ builder = self.make_dummy_builder('d')
+
+ b = builder.get_branch()
+
+ dc = b.bzrdir.sprout('dc', force_new_repo=True)
+ dc_tree = dc.open_workingtree()
+
+ self.build_tree_contents([("dc/foo", "bar")])
+ dc_tree.commit('msg1')
+
+ builder.build_snapshot('revid2', None,
+ [('modify', ('fooid', 'blie'))])
+
+ output, error = self.run_bzr("dpush -d dc d", retcode=3)
+ self.assertEquals(output, "")
+ self.assertContainsRe(error, "have diverged")
+
+
+class TestDpushStrictMixin(object):
+
+ def setUp(self):
+ test_foreign.register_dummy_foreign_for_test(self)
+ # Create an empty branch where we will be able to push
+ self.foreign = self.make_branch(
+ 'to', format=test_foreign.DummyForeignVcsDirFormat())
+
+ def set_config_push_strict(self, value):
+ br = branch.Branch.open('local')
+ br.get_config_stack().set('dpush_strict', value)
+
+ _default_command = ['dpush', '../to']
+
+
+class TestDpushStrictWithoutChanges(TestDpushStrictMixin,
+ test_push.TestPushStrictWithoutChanges):
+
+ def setUp(self):
+ test_push.TestPushStrictWithoutChanges.setUp(self)
+ TestDpushStrictMixin.setUp(self)
+
+
+class TestDpushStrictWithChanges(TestDpushStrictMixin,
+ test_push.TestPushStrictWithChanges):
+
+ scenarios = test_push.strict_push_change_scenarios
+
+ _changes_type = None # Set by load_tests
+
+ def setUp(self):
+ test_push.TestPushStrictWithChanges.setUp(self)
+ TestDpushStrictMixin.setUp(self)
+
+ def test_push_with_revision(self):
+ raise tests.TestNotApplicable('dpush does not handle --revision')
diff --git a/bzrlib/tests/blackbox/test_dump_btree.py b/bzrlib/tests/blackbox/test_dump_btree.py
new file mode 100644
index 0000000..1e1f844
--- /dev/null
+++ b/bzrlib/tests/blackbox/test_dump_btree.py
@@ -0,0 +1,130 @@
+# Copyright (C) 2008, 2009, 2010 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+#
+
+"""Tests of the 'bzr dump-btree' command."""
+
+from bzrlib import (
+ btree_index,
+ tests,
+ )
+from bzrlib.tests import (
+ http_server,
+ )
+
+
+class TestDumpBtree(tests.TestCaseWithTransport):
+
+ def create_sample_btree_index(self):
+ builder = btree_index.BTreeBuilder(
+ reference_lists=1, key_elements=2)
+ builder.add_node(('test', 'key1'), 'value', ((('ref', 'entry'),),))
+ builder.add_node(('test', 'key2'), 'value2', ((('ref', 'entry2'),),))
+ builder.add_node(('test2', 'key3'), 'value3', ((('ref', 'entry3'),),))
+ out_f = builder.finish()
+ try:
+ self.build_tree_contents([('test.btree', out_f.read())])
+ finally:
+ out_f.close()
+
+ def test_dump_btree_smoke(self):
+ self.create_sample_btree_index()
+ out, err = self.run_bzr('dump-btree test.btree')
+ self.assertEqualDiff(
+ "(('test', 'key1'), 'value', ((('ref', 'entry'),),))\n"
+ "(('test', 'key2'), 'value2', ((('ref', 'entry2'),),))\n"
+ "(('test2', 'key3'), 'value3', ((('ref', 'entry3'),),))\n",
+ out)
+
+ def test_dump_btree_http_smoke(self):
+ self.transport_readonly_server = http_server.HttpServer
+ self.create_sample_btree_index()
+ url = self.get_readonly_url('test.btree')
+ out, err = self.run_bzr(['dump-btree', url])
+ self.assertEqualDiff(
+ "(('test', 'key1'), 'value', ((('ref', 'entry'),),))\n"
+ "(('test', 'key2'), 'value2', ((('ref', 'entry2'),),))\n"
+ "(('test2', 'key3'), 'value3', ((('ref', 'entry3'),),))\n",
+ out)
+
+ def test_dump_btree_raw_smoke(self):
+ self.create_sample_btree_index()
+ out, err = self.run_bzr('dump-btree test.btree --raw')
+ self.assertEqualDiff(
+ 'Root node:\n'
+ 'B+Tree Graph Index 2\n'
+ 'node_ref_lists=1\n'
+ 'key_elements=2\n'
+ 'len=3\n'
+ 'row_lengths=1\n'
+ '\n'
+ 'Page 0\n'
+ 'type=leaf\n'
+ 'test\0key1\0ref\0entry\0value\n'
+ 'test\0key2\0ref\0entry2\0value2\n'
+ 'test2\0key3\0ref\0entry3\0value3\n'
+ '\n',
+ out)
+
+ def test_dump_btree_no_refs_smoke(self):
+ # A BTree index with no ref lists (such as *.cix) can be dumped without
+ # errors.
+ builder = btree_index.BTreeBuilder(
+ reference_lists=0, key_elements=2)
+ builder.add_node(('test', 'key1'), 'value')
+ out_f = builder.finish()
+ try:
+ self.build_tree_contents([('test.btree', out_f.read())])
+ finally:
+ out_f.close()
+ out, err = self.run_bzr('dump-btree test.btree')
+
+ def create_sample_empty_btree_index(self):
+ builder = btree_index.BTreeBuilder(
+ reference_lists=1, key_elements=2)
+ out_f = builder.finish()
+ try:
+ self.build_tree_contents([('test.btree', out_f.read())])
+ finally:
+ out_f.close()
+
+ def test_dump_empty_btree_smoke(self):
+ self.create_sample_empty_btree_index()
+ out, err = self.run_bzr('dump-btree test.btree')
+ self.assertEqualDiff("", out)
+
+ def test_dump_empty_btree_http_smoke(self):
+ self.transport_readonly_server = http_server.HttpServer
+ self.create_sample_empty_btree_index()
+ url = self.get_readonly_url('test.btree')
+ out, err = self.run_bzr(['dump-btree', url])
+ self.assertEqualDiff("", out)
+
+ def test_dump_empty_btree_raw_smoke(self):
+ self.create_sample_empty_btree_index()
+ out, err = self.run_bzr('dump-btree test.btree --raw')
+ self.assertEqualDiff(
+ 'Root node:\n'
+ 'B+Tree Graph Index 2\n'
+ 'node_ref_lists=1\n'
+ 'key_elements=2\n'
+ 'len=0\n'
+ 'row_lengths=\n'
+ '\n'
+ 'Page 0\n'
+ '(empty)\n',
+ out)
+
diff --git a/bzrlib/tests/blackbox/test_exceptions.py b/bzrlib/tests/blackbox/test_exceptions.py
new file mode 100644
index 0000000..9124662
--- /dev/null
+++ b/bzrlib/tests/blackbox/test_exceptions.py
@@ -0,0 +1,167 @@
+# Copyright (C) 2006, 2007, 2009, 2010, 2011 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""Tests for display of exceptions."""
+
+import os
+import re
+
+from bzrlib import (
+ bzrdir,
+ config,
+ controldir,
+ errors,
+ osutils,
+ repository,
+ tests,
+ )
+from bzrlib.repofmt.groupcompress_repo import RepositoryFormat2a
+
+
+class TestExceptionReporting(tests.TestCaseInTempDir):
+
+ def test_exception_exitcode(self):
+ # we must use a subprocess, because the normal in-memory mechanism
+ # allows errors to propagate up through the test suite
+ out, err = self.run_bzr_subprocess(['assert-fail'],
+ universal_newlines=True,
+ retcode=errors.EXIT_INTERNAL_ERROR)
+ self.assertEqual(4, errors.EXIT_INTERNAL_ERROR)
+ self.assertContainsRe(err,
+ r'exceptions\.AssertionError: always fails\n')
+ self.assertContainsRe(err, r'Bazaar has encountered an internal error')
+
+ def test_undecodable_argv(self):
+ """A user error must be reported if argv is not in the locale encoding
+
+ A subprocess with an environment ascii-only setting is used so the test
+ can run without worrying about the locale the test suite is using.
+ """
+ if os.name != "posix":
+ raise tests.TestNotApplicable("Needs system beholden to C locales")
+ out, err = self.run_bzr_subprocess(["\xa0"],
+ env_changes={"LANG": "C", "LC_ALL": "C"},
+ universal_newlines=True,
+ retcode=errors.EXIT_ERROR)
+ self.assertContainsRe(err, r"^bzr: ERROR: .*'\\xa0'.* unsupported",
+ flags=re.MULTILINE)
+ self.assertEquals(out, "")
+
+ def test_utf8_default_fs_enc(self):
+ """In the C locale bzr treats a posix filesystem as UTF-8 encoded"""
+ if os.name != "posix":
+ raise tests.TestNotApplicable("Needs system beholden to C locales")
+ out, err = self.run_bzr_subprocess(["init", "file:%C2%A7"],
+ env_changes={"LANG": "C", "LC_ALL": "C"})
+ self.assertContainsRe(out, "^Created a standalone tree .*$")
+
+
+class TestOptParseBugHandling(tests.TestCase):
+ "Test that we handle http://bugs.python.org/issue2931"
+
+ def test_nonascii_optparse(self):
+ """Reasonable error raised when non-ascii in option name"""
+ error_re = 'Only ASCII permitted in option names'
+ out = self.run_bzr_error([error_re], ['st',u'-\xe4'])
+
+
+class TestObsoleteRepoFormat(RepositoryFormat2a):
+
+ @classmethod
+ def get_format_string(cls):
+ return "Test Obsolete Repository Format"
+
+ def is_deprecated(self):
+ return True
+
+
+class TestDeprecationWarning(tests.TestCaseWithTransport):
+ """The deprecation warning is controlled via a global variable:
+ repository._deprecation_warning_done. As such, it can be emitted only once
+ during a bzr invocation, no matter how many repositories are involved.
+
+ It would be better if it was a repo attribute instead but that's far more
+ work than I want to do right now -- vila 20091215.
+ """
+
+ def setUp(self):
+ super(TestDeprecationWarning, self).setUp()
+ self.addCleanup(repository.format_registry.remove,
+ TestObsoleteRepoFormat)
+ repository.format_registry.register(TestObsoleteRepoFormat)
+ self.addCleanup(controldir.format_registry.remove, "testobsolete")
+ bzrdir.register_metadir(controldir.format_registry, "testobsolete",
+ "bzrlib.tests.blackbox.test_exceptions.TestObsoleteRepoFormat",
+ branch_format='bzrlib.branch.BzrBranchFormat7',
+ tree_format='bzrlib.workingtree_4.WorkingTreeFormat6',
+ deprecated=True,
+ help='Same as 2a, but with an obsolete repo format.')
+ self.disable_deprecation_warning()
+
+ def enable_deprecation_warning(self, repo=None):
+ """repo is not used yet since _deprecation_warning_done is a global"""
+ repository._deprecation_warning_done = False
+
+ def disable_deprecation_warning(self, repo=None):
+ """repo is not used yet since _deprecation_warning_done is a global"""
+ repository._deprecation_warning_done = True
+
+ def make_obsolete_repo(self, path):
+ # We don't want the deprecation raising during the repo creation
+ format = controldir.format_registry.make_bzrdir("testobsolete")
+ tree = self.make_branch_and_tree(path, format=format)
+ return tree
+
+ def check_warning(self, present):
+ if present:
+ check = self.assertContainsRe
+ else:
+ check = self.assertNotContainsRe
+ check(self.get_log(), 'WARNING.*bzr upgrade')
+
+ def test_repository_deprecation_warning(self):
+ """Old formats give a warning"""
+ self.make_obsolete_repo('foo')
+ self.enable_deprecation_warning()
+ out, err = self.run_bzr('status', working_dir='foo')
+ self.check_warning(True)
+
+ def test_repository_deprecation_warning_suppressed_global(self):
+ """Old formats give a warning"""
+ conf = config.GlobalStack()
+ conf.set('suppress_warnings', 'format_deprecation')
+ self.make_obsolete_repo('foo')
+ self.enable_deprecation_warning()
+ out, err = self.run_bzr('status', working_dir='foo')
+ self.check_warning(False)
+
+ def test_repository_deprecation_warning_suppressed_locations(self):
+ """Old formats give a warning"""
+ self.make_obsolete_repo('foo')
+ conf = config.LocationStack(osutils.pathjoin(self.test_dir, 'foo'))
+ conf.set('suppress_warnings', 'format_deprecation')
+ self.enable_deprecation_warning()
+ out, err = self.run_bzr('status', working_dir='foo')
+ self.check_warning(False)
+
+ def test_repository_deprecation_warning_suppressed_branch(self):
+ """Old formats give a warning"""
+ tree = self.make_obsolete_repo('foo')
+ conf = tree.branch.get_config_stack()
+ conf.set('suppress_warnings', 'format_deprecation')
+ self.enable_deprecation_warning()
+ out, err = self.run_bzr('status', working_dir='foo')
+ self.check_warning(False)
diff --git a/bzrlib/tests/blackbox/test_export.py b/bzrlib/tests/blackbox/test_export.py
new file mode 100644
index 0000000..be63c84
--- /dev/null
+++ b/bzrlib/tests/blackbox/test_export.py
@@ -0,0 +1,453 @@
+# Copyright (C) 2005-2011 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+
+"""Black-box tests for bzr export.
+"""
+
+from StringIO import StringIO
+import os
+import stat
+import tarfile
+import time
+import zipfile
+
+
+from bzrlib import (
+ export,
+ )
+from bzrlib.tests import (
+ features,
+ TestCaseWithTransport,
+ )
+from bzrlib.tests.matchers import ContainsNoVfsCalls
+
+
+class TestExport(TestCaseWithTransport):
+
+ # On Windows, if we fail to set the binary bit, and a '\r' or '\n'
+ # ends up in the data stream, we will get corruption. Add a fair amount
+ # of random data, to help ensure there is at least one.
+ _file_content = ('!\r\n\t\n \r'
+ + 'r29trp9i1r8k0e24c2o7mcx2isrlaw7toh1hv2mtst3o1udkl36v9xn2z8kt\n'
+ 'tvjn7e3i9cj1qs1rw9gcye9w72cbdueiufw8nky7bs08lwggir59d62knecp\n'
+ '7s0537r8sg3e8hdnidji49rswo47c3j8190nh8emef2b6j1mf5kdq45nt3f5\n'
+ '1sz9u7fuzrm4w8bebre7p62sh5os2nkj2iiyuk9n0w0pjpdulu9k2aajejah\n'
+ 'ini90ny40qzs12ajuy0ua6l178n93lvy2atqngnntsmtlmqx7yhp0q9a1xr4\n'
+ '1n69kgbo6qu9osjpqq83446r00jijtcstzybfqwm1lnt9spnri2j07bt7bbh\n'
+ 'rf3ejatdxta83te2s0pt9rc4hidgy3d2pc53p4wscdt2b1dfxdj9utf5m17f\n'
+ 'f03oofcau950o090vyx6m72vfkywo7gp3ajzi6uk02dwqwtumq4r44xx6ho7\n'
+ 'nhynborjdjep5j53f9548msb7gd3x9a1xveb4s8zfo6cbdw2kdngcrbakwu8\n'
+ 'ql5a8l94gplkwr7oypw5nt1gj5i3xwadyjfr3lb61tfkz31ba7uda9knb294\n'
+ '1azhfta0q3ry9x36lxyanvhp0g5z0t5a0i4wnoc8p4htexi915y1cnw4nznn\n'
+ 'aj70dvp88ifiblv2bsp98hz570teinj8g472ddxni9ydmazfzwtznbf3hrg6\n'
+ '84gigirjt6n2yagf70036m8d73cz0jpcighpjtxsmbgzbxx7nb4ewq6jbgnc\n'
+ 'hux1b0qtsdi0zfhj6g1otf5jcldmtdvuon8y1ttszkqw3ograwi25yl921hy\n'
+ 'izgscmfha9xdhxxabs07b40secpw22ah9iwpbmsns6qz0yr6fswto3ft2ez5\n'
+ 'ngn48pdfxj1pw246drmj1y2ll5af5w7cz849rapzd9ih7qvalw358co0yzrs\n'
+ 'xan9291d1ivjku4o5gjrsnmllrqwxwy86pcivinbmlnzasa9v3o22lgv4uyd\n'
+ 'q8kw77bge3hr5rr5kzwjxk223bkmo3z9oju0954undsz8axr3kb3730otrcr\n'
+ '9cwhu37htnizdwxmpoc5qmobycfm7ubbykfumv6zgkl6b8zlslwl7a8b81vz\n'
+ '3weqkvv5csfza9xvwypr6lo0t03fwp0ihmci3m1muh0lf2u30ze0hjag691j\n'
+ '27fjtd3e3zbiin5n2hq21iuo09ukbs73r5rt7vaw6axvoilvdciir9ugjh2c\n'
+ 'na2b8dr0ptftoyhyxv1iwg661y338e28fhz4xxwgv3hnoe98ydfa1oou45vj\n'
+ 'ln74oac2keqt0agbylrqhfscin7ireae2bql7z2le823ksy47ud57z8ctomp\n'
+ '31s1vwbczdjwqp0o2jc7mkrurvzg8mj2zwcn2iily4gcl4sy4fsh4rignlyz\n')
+
+ def make_basic_tree(self):
+ tree = self.make_branch_and_tree('tree')
+ self.build_tree_contents([('tree/a', self._file_content)])
+ tree.add('a')
+ tree.commit('1')
+ return tree
+
+ def make_tree_with_extra_bzr_files(self):
+ tree = self.make_basic_tree()
+ self.build_tree_contents([('tree/.bzrrules', '')])
+ self.build_tree(['tree/.bzr-adir/', 'tree/.bzr-adir/afile'])
+ tree.add(['.bzrrules', '.bzr-adir/', '.bzr-adir/afile'])
+
+ self.run_bzr('ignore something -d tree')
+ tree.commit('2')
+ return tree
+
+ def test_tar_export_ignores_bzr(self):
+ tree = self.make_tree_with_extra_bzr_files()
+
+ self.assertTrue(tree.has_filename('.bzrignore'))
+ self.assertTrue(tree.has_filename('.bzrrules'))
+ self.assertTrue(tree.has_filename('.bzr-adir'))
+ self.assertTrue(tree.has_filename('.bzr-adir/afile'))
+ self.run_bzr('export test.tar.gz -d tree')
+ ball = tarfile.open('test.tar.gz')
+ # Make sure the tarball contains 'a', but does not contain
+ # '.bzrignore'.
+ self.assertEqual(['test/a'],
+ sorted(ball.getnames()))
+
+ def test_tar_export_unicode_filename(self):
+ self.requireFeature(features.UnicodeFilenameFeature)
+ tree = self.make_branch_and_tree('tar')
+ # FIXME: using fname = u'\xe5.txt' below triggers a bug revealed since
+ # bzr.dev revno 4216 but more related to OSX/working trees/unicode than
+ # export itself --vila 20090406
+ fname = u'\N{Euro Sign}.txt'
+ self.build_tree(['tar/' + fname])
+ tree.add([fname])
+ tree.commit('first')
+
+ self.run_bzr('export test.tar -d tar')
+ ball = tarfile.open('test.tar')
+ # all paths are prefixed with the base name of the tarball
+ self.assertEqual(['test/' + fname.encode('utf8')],
+ sorted(ball.getnames()))
+
+ def test_tar_export_unicode_basedir(self):
+ """Test for bug #413406"""
+ self.requireFeature(features.UnicodeFilenameFeature)
+ basedir = u'\N{euro sign}'
+ os.mkdir(basedir)
+ self.run_bzr(['init', basedir])
+ self.run_bzr(['export', '--format', 'tgz', u'test.tar.gz',
+ '-d', basedir])
+
+ def test_zip_export_ignores_bzr(self):
+ tree = self.make_tree_with_extra_bzr_files()
+
+ self.assertTrue(tree.has_filename('.bzrignore'))
+ self.assertTrue(tree.has_filename('.bzrrules'))
+ self.assertTrue(tree.has_filename('.bzr-adir'))
+ self.assertTrue(tree.has_filename('.bzr-adir/afile'))
+ self.run_bzr('export test.zip -d tree')
+
+ zfile = zipfile.ZipFile('test.zip')
+ # Make sure the zipfile contains 'a', but does not contain
+ # '.bzrignore'.
+ self.assertEqual(['test/a'], sorted(zfile.namelist()))
+
+ # TODO: This really looks like something that should be using permutation
+ # testing. Though the actual setup and teardown functions are pretty
+ # different for each
+ def assertZipANameAndContent(self, zfile, root=''):
+ """The file should only contain name 'a' and _file_content"""
+ fname = root + 'a'
+ self.assertEqual([fname], sorted(zfile.namelist()))
+ zfile.testzip()
+ self.assertEqualDiff(self._file_content, zfile.read(fname))
+
+ def test_zip_export_stdout(self):
+ tree = self.make_basic_tree()
+ contents = self.run_bzr('export -d tree --format=zip -')[0]
+ self.assertZipANameAndContent(zipfile.ZipFile(StringIO(contents)))
+
+ def test_zip_export_file(self):
+ tree = self.make_basic_tree()
+ self.run_bzr('export -d tree test.zip')
+ self.assertZipANameAndContent(zipfile.ZipFile('test.zip'),
+ root='test/')
+
+ def assertTarANameAndContent(self, ball, root=''):
+ fname = root + 'a'
+ tar_info = ball.next()
+ self.assertEqual(fname, tar_info.name)
+ self.assertEqual(tarfile.REGTYPE, tar_info.type)
+ self.assertEqual(len(self._file_content), tar_info.size)
+ f = ball.extractfile(tar_info)
+ if self._file_content != f.read():
+ self.fail('File content has been corrupted.'
+ ' Check that all streams are handled in binary mode.')
+ # There should be no other files in the tarball
+ self.assertIs(None, ball.next())
+
+ def run_tar_export_disk_and_stdout(self, extension, tarfile_flags):
+ tree = self.make_basic_tree()
+ fname = 'test.%s' % (extension,)
+ self.run_bzr('export -d tree %s' % (fname,))
+ mode = 'r|%s' % (tarfile_flags,)
+ ball = tarfile.open(fname, mode=mode)
+ self.assertTarANameAndContent(ball, root='test/')
+ content = self.run_bzr('export -d tree --format=%s -' % (extension,))[0]
+ ball = tarfile.open(mode=mode, fileobj=StringIO(content))
+ self.assertTarANameAndContent(ball, root='')
+
+ def test_tar_export(self):
+ self.run_tar_export_disk_and_stdout('tar', '')
+
+ def test_tgz_export(self):
+ self.run_tar_export_disk_and_stdout('tgz', 'gz')
+
+ def test_tbz2_export(self):
+ self.run_tar_export_disk_and_stdout('tbz2', 'bz2')
+
+ def test_zip_export_unicode(self):
+ self.requireFeature(features.UnicodeFilenameFeature)
+ tree = self.make_branch_and_tree('zip')
+ fname = u'\N{Euro Sign}.txt'
+ self.build_tree(['zip/' + fname])
+ tree.add([fname])
+ tree.commit('first')
+
+ os.chdir('zip')
+ self.run_bzr('export test.zip')
+ zfile = zipfile.ZipFile('test.zip')
+ # all paths are prefixed with the base name of the zipfile
+ self.assertEqual(['test/' + fname.encode('utf8')],
+ sorted(zfile.namelist()))
+
+ def test_zip_export_directories(self):
+ tree = self.make_branch_and_tree('zip')
+ self.build_tree(['zip/a', 'zip/b/', 'zip/b/c', 'zip/d/'])
+ tree.add(['a', 'b', 'b/c', 'd'])
+ tree.commit('init')
+
+ os.chdir('zip')
+ self.run_bzr('export test.zip')
+ zfile = zipfile.ZipFile('test.zip')
+ names = sorted(zfile.namelist())
+
+ # even on win32, zipfile.ZipFile changes all names to use
+ # forward slashes
+ self.assertEqual(['test/a', 'test/b/', 'test/b/c', 'test/d/'], names)
+
+ file_attr = stat.S_IFREG | export.zip_exporter.FILE_PERMISSIONS
+ dir_attr = (stat.S_IFDIR | export.zip_exporter.ZIP_DIRECTORY_BIT |
+ export.zip_exporter.DIR_PERMISSIONS)
+
+ a_info = zfile.getinfo(names[0])
+ self.assertEqual(file_attr, a_info.external_attr)
+
+ b_info = zfile.getinfo(names[1])
+ self.assertEqual(dir_attr, b_info.external_attr)
+
+ c_info = zfile.getinfo(names[2])
+ self.assertEqual(file_attr, c_info.external_attr)
+
+ d_info = zfile.getinfo(names[3])
+ self.assertEqual(dir_attr, d_info.external_attr)
+
+ def test_dir_export(self):
+ tree = self.make_branch_and_tree('dir')
+ self.build_tree(['dir/a'])
+ tree.add('a')
+ self.build_tree_contents([('dir/.bzrrules', '')])
+ tree.add('.bzrrules')
+ self.build_tree(['dir/.bzr-adir/', 'dir/.bzr-adir/afile'])
+ tree.add(['.bzr-adir/', '.bzr-adir/afile'])
+
+ os.chdir('dir')
+ self.run_bzr('ignore something')
+ tree.commit('1')
+
+ self.assertTrue(tree.has_filename('.bzrignore'))
+ self.assertTrue(tree.has_filename('.bzrrules'))
+ self.assertTrue(tree.has_filename('.bzr-adir'))
+ self.assertTrue(tree.has_filename('.bzr-adir/afile'))
+ self.run_bzr('export direxport')
+
+ files = sorted(os.listdir('direxport'))
+ # Make sure the exported directory contains 'a', but does not contain
+ # '.bzrignore'.
+ self.assertEqual(['a'], files)
+
+ def example_branch(self):
+ """Create a branch a 'branch' containing hello and goodbye."""
+ tree = self.make_branch_and_tree('branch')
+ self.build_tree_contents([('branch/hello', 'foo')])
+ tree.add('hello')
+ tree.commit('setup')
+
+ self.build_tree_contents([('branch/goodbye', 'baz')])
+ tree.add('goodbye')
+ tree.commit('setup')
+ return tree
+
+ def test_basic_directory_export(self):
+ self.example_branch()
+ os.chdir('branch')
+
+ # Directory exports
+ self.run_bzr('export ../latest')
+ self.assertEqual(['goodbye', 'hello'], sorted(os.listdir('../latest')))
+ self.check_file_contents('../latest/goodbye', 'baz')
+ self.run_bzr('export ../first -r 1')
+ self.assertEqual(['hello'], sorted(os.listdir('../first')))
+ self.check_file_contents('../first/hello', 'foo')
+
+ # Even with .gz and .bz2 it is still a directory
+ self.run_bzr('export ../first.gz -r 1')
+ self.check_file_contents('../first.gz/hello', 'foo')
+ self.run_bzr('export ../first.bz2 -r 1')
+ self.check_file_contents('../first.bz2/hello', 'foo')
+
+ def test_basic_tarfile_export(self):
+ self.example_branch()
+ os.chdir('branch')
+
+ self.run_bzr('export ../first.tar -r 1')
+ self.assertTrue(os.path.isfile('../first.tar'))
+ tf = tarfile.open('../first.tar')
+ try:
+ self.assertEqual(['first/hello'], sorted(tf.getnames()))
+ self.assertEqual('foo', tf.extractfile('first/hello').read())
+ finally:
+ tf.close()
+
+ self.run_bzr('export ../first.tar.gz -r 1')
+ self.assertTrue(os.path.isfile('../first.tar.gz'))
+ self.run_bzr('export ../first.tbz2 -r 1')
+ self.assertTrue(os.path.isfile('../first.tbz2'))
+ self.run_bzr('export ../first.tar.bz2 -r 1')
+ self.assertTrue(os.path.isfile('../first.tar.bz2'))
+ self.run_bzr('export ../first.tar.tbz2 -r 1')
+ self.assertTrue(os.path.isfile('../first.tar.tbz2'))
+
+ tf = tarfile.open('../first.tar.tbz2', 'r:bz2')
+ try:
+ self.assertEqual(['first.tar/hello'], sorted(tf.getnames()))
+ self.assertEqual('foo', tf.extractfile('first.tar/hello').read())
+ finally:
+ tf.close()
+ self.run_bzr('export ../first2.tar -r 1 --root pizza')
+ tf = tarfile.open('../first2.tar')
+ try:
+ self.assertEqual(['pizza/hello'], sorted(tf.getnames()))
+ self.assertEqual('foo', tf.extractfile('pizza/hello').read())
+ finally:
+ tf.close()
+
+ def test_basic_zipfile_export(self):
+ self.example_branch()
+ os.chdir('branch')
+
+ self.run_bzr('export ../first.zip -r 1')
+ self.assertPathExists('../first.zip')
+ zf = zipfile.ZipFile('../first.zip')
+ try:
+ self.assertEqual(['first/hello'], sorted(zf.namelist()))
+ self.assertEqual('foo', zf.read('first/hello'))
+ finally:
+ zf.close()
+
+ self.run_bzr('export ../first2.zip -r 1 --root pizza')
+ zf = zipfile.ZipFile('../first2.zip')
+ try:
+ self.assertEqual(['pizza/hello'], sorted(zf.namelist()))
+ self.assertEqual('foo', zf.read('pizza/hello'))
+ finally:
+ zf.close()
+
+ self.run_bzr('export ../first-zip --format=zip -r 1')
+ zf = zipfile.ZipFile('../first-zip')
+ try:
+ self.assertEqual(['first-zip/hello'], sorted(zf.namelist()))
+ self.assertEqual('foo', zf.read('first-zip/hello'))
+ finally:
+ zf.close()
+
+ def test_export_from_outside_branch(self):
+ self.example_branch()
+
+ # Use directory exports to test stating the branch location
+ self.run_bzr('export latest branch')
+ self.assertEqual(['goodbye', 'hello'], sorted(os.listdir('latest')))
+ self.check_file_contents('latest/goodbye', 'baz')
+ self.run_bzr('export first -r 1 branch')
+ self.assertEqual(['hello'], sorted(os.listdir('first')))
+ self.check_file_contents('first/hello', 'foo')
+
+ def test_export_partial_tree(self):
+ tree = self.example_branch()
+ self.build_tree(['branch/subdir/', 'branch/subdir/foo.txt'])
+ tree.smart_add(['branch'])
+ tree.commit('more setup')
+ out, err = self.run_bzr('export exported branch/subdir')
+ self.assertEqual(['foo.txt'], os.listdir('exported'))
+
+ def test_dir_export_per_file_timestamps(self):
+ tree = self.example_branch()
+ self.build_tree_contents([('branch/har', 'foo')])
+ tree.add('har')
+ # Earliest allowable date on FAT32 filesystems is 1980-01-01
+ tree.commit('setup', timestamp=315532800)
+ self.run_bzr('export --per-file-timestamps t branch')
+ har_st = os.stat('t/har')
+ self.assertEquals(315532800, har_st.st_mtime)
+
+ def test_dir_export_partial_tree_per_file_timestamps(self):
+ tree = self.example_branch()
+ self.build_tree(['branch/subdir/', 'branch/subdir/foo.txt'])
+ tree.smart_add(['branch'])
+ # Earliest allowable date on FAT32 filesystems is 1980-01-01
+ tree.commit('setup', timestamp=315532800)
+ self.run_bzr('export --per-file-timestamps tpart branch/subdir')
+ foo_st = os.stat('tpart/foo.txt')
+ self.assertEquals(315532800, foo_st.st_mtime)
+
+ def test_export_directory(self):
+ """Test --directory option"""
+ self.example_branch()
+ self.run_bzr(['export', '--directory=branch', 'latest'])
+ self.assertEqual(['goodbye', 'hello'], sorted(os.listdir('latest')))
+ self.check_file_contents('latest/goodbye', 'baz')
+
+ def test_export_uncommitted(self):
+ """Test --uncommitted option"""
+ self.example_branch()
+ os.chdir('branch')
+ self.build_tree_contents([('goodbye', 'uncommitted data')])
+ self.run_bzr(['export', '--uncommitted', 'latest'])
+ self.check_file_contents('latest/goodbye', 'uncommitted data')
+
+ def test_export_uncommitted_no_tree(self):
+ """Test --uncommitted option only works with a working tree."""
+ tree = self.example_branch()
+ tree.bzrdir.destroy_workingtree()
+ os.chdir('branch')
+ self.run_bzr_error(
+ ['bzr: ERROR: --uncommitted requires a working tree'],
+ 'export --uncommitted latest')
+
+ def test_zip_export_per_file_timestamps(self):
+ tree = self.example_branch()
+ self.build_tree_contents([('branch/har', 'foo')])
+ tree.add('har')
+ # Earliest allowable date on FAT32 filesystems is 1980-01-01
+ timestamp = 347151600
+ tree.commit('setup', timestamp=timestamp)
+ self.run_bzr('export --per-file-timestamps test.zip branch')
+ zfile = zipfile.ZipFile('test.zip')
+ info = zfile.getinfo("test/har")
+ self.assertEquals(time.localtime(timestamp)[:6], info.date_time)
+
+
+class TestSmartServerExport(TestCaseWithTransport):
+
+ def test_simple_export(self):
+ self.setup_smart_server_with_call_log()
+ t = self.make_branch_and_tree('branch')
+ self.build_tree_contents([('branch/foo', 'thecontents')])
+ t.add("foo")
+ t.commit("message")
+ self.reset_smart_call_log()
+ out, err = self.run_bzr(['export', "foo.tar.gz", self.get_url('branch')])
+ # This figure represent the amount of work to perform this use case. It
+ # is entirely ok to reduce this number if a test fails due to rpc_count
+ # being too low. If rpc_count increases, more network roundtrips have
+ # become necessary for this use case. Please do not adjust this number
+ # upwards without agreement from bzr's network support maintainers.
+ self.assertLength(7, self.hpss_calls)
+ self.assertLength(1, self.hpss_connections)
+ self.assertThat(self.hpss_calls, ContainsNoVfsCalls)
diff --git a/bzrlib/tests/blackbox/test_export_pot.py b/bzrlib/tests/blackbox/test_export_pot.py
new file mode 100644
index 0000000..02524fe
--- /dev/null
+++ b/bzrlib/tests/blackbox/test_export_pot.py
@@ -0,0 +1,37 @@
+# Copyright (C) 2011 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+
+"""External tests of 'bzr export-pot'"""
+import os
+
+from bzrlib import ignores, osutils
+from bzrlib.tests import TestCaseWithMemoryTransport
+from bzrlib.tests.features import ModuleAvailableFeature
+
+class TestExportPot(TestCaseWithMemoryTransport):
+
+ def test_export_pot(self):
+ out, err = self.run_bzr("export-pot")
+ self.assertContainsRe(err, 'Exporting messages from builtin command: add')
+ self.assertContainsRe(out, "help of 'change' option\n"\
+ "msgid \"Select changes introduced by the specified revision.")
+
+ def test_export_pot_plugin(self):
+ self.requireFeature(ModuleAvailableFeature('bzrlib.plugins.launchpad'))
+ out, err = self.run_bzr("export-pot --plugin=launchpad")
+ self.assertContainsRe(err, 'Exporting messages from plugin command: launchpad-login in launchpad')
+ self.assertContainsRe(out, 'msgid "Show or set the Launchpad user ID."')
diff --git a/bzrlib/tests/blackbox/test_filesystem_cicp.py b/bzrlib/tests/blackbox/test_filesystem_cicp.py
new file mode 100644
index 0000000..3956692
--- /dev/null
+++ b/bzrlib/tests/blackbox/test_filesystem_cicp.py
@@ -0,0 +1,282 @@
+# Copyright (C) 2008, 2009, 2010 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+#
+
+"""Tests variations of case-insensitive and case-preserving file-systems."""
+
+import os
+
+from bzrlib import (
+ osutils,
+ tests,
+ )
+from bzrlib.tests import KnownFailure
+from bzrlib.osutils import canonical_relpath, pathjoin
+from bzrlib.tests.script import run_script
+from bzrlib.tests.features import (
+ CaseInsCasePresFilenameFeature,
+ )
+
+
+
+class TestCICPBase(tests.TestCaseWithTransport):
+ """Base class for tests on a case-insensitive, case-preserving filesystem.
+ """
+
+ _test_needs_features = [CaseInsCasePresFilenameFeature]
+
+ def _make_mixed_case_tree(self):
+ """Make a working tree with mixed-case filenames."""
+ wt = self.make_branch_and_tree('.')
+ # create a file on disk with the mixed-case parent and base name
+ self.build_tree(['CamelCaseParent/', 'lowercaseparent/'])
+ self.build_tree_contents([('CamelCaseParent/CamelCase', 'camel case'),
+ ('lowercaseparent/lowercase', 'lower case'),
+ ('lowercaseparent/mixedCase', 'mixedCasecase'),
+ ])
+ return wt
+
+
+class TestAdd(TestCICPBase):
+
+ def test_add_simple(self):
+ """Test add always uses the case of the filename reported by the os."""
+ wt = self.make_branch_and_tree('.')
+ # create a file on disk with the mixed-case name
+ self.build_tree(['CamelCase'])
+ run_script(self, """
+ $ bzr add camelcase
+ adding CamelCase
+ """)
+
+ def test_add_subdir(self):
+ """test_add_simple but with subdirectories tested too."""
+ wt = self.make_branch_and_tree('.')
+ # create a file on disk with the mixed-case parent and base name
+ self.build_tree(['CamelCaseParent/', 'CamelCaseParent/CamelCase'])
+ run_script(self, """
+ $ bzr add camelcaseparent/camelcase
+ adding CamelCaseParent
+ adding CamelCaseParent/CamelCase
+ """)
+
+ def test_add_implied(self):
+ """test add with no args sees the correct names."""
+ wt = self.make_branch_and_tree('.')
+ # create a file on disk with the mixed-case parent and base name
+ self.build_tree(['CamelCaseParent/', 'CamelCaseParent/CamelCase'])
+ run_script(self, """
+ $ bzr add
+ adding CamelCaseParent
+ adding CamelCaseParent/CamelCase
+ """)
+
+ def test_re_add(self):
+ """Test than when a file has 'unintentionally' changed case, we can't
+ add a new entry using the new case."""
+ wt = self.make_branch_and_tree('.')
+ # create a file on disk with the mixed-case name
+ self.build_tree(['MixedCase'])
+ run_script(self, """
+ $ bzr add MixedCase
+ adding MixedCase
+ """)
+ # 'accidently' rename the file on disk
+ osutils.rename('MixedCase', 'mixedcase')
+ run_script(self, """
+ $ bzr add mixedcase
+ """)
+
+ def test_re_add_dir(self):
+ # like re-add, but tests when the operation is on a directory.
+ """Test than when a file has 'unintentionally' changed case, we can't
+ add a new entry using the new case."""
+ wt = self.make_branch_and_tree('.')
+ # create a file on disk with the mixed-case name
+ self.build_tree(['MixedCaseParent/', 'MixedCaseParent/MixedCase'])
+ run_script(self, """
+ $ bzr add MixedCaseParent
+ adding MixedCaseParent
+ adding MixedCaseParent/MixedCase
+ """)
+ # 'accidently' rename the directory on disk
+ osutils.rename('MixedCaseParent', 'mixedcaseparent')
+ run_script(self, """
+ $ bzr add mixedcaseparent
+ """)
+
+ def test_add_not_found(self):
+ """Test add when the input file doesn't exist."""
+ wt = self.make_branch_and_tree('.')
+ # create a file on disk with the mixed-case name
+ self.build_tree(['MixedCaseParent/', 'MixedCaseParent/MixedCase'])
+ expected_fname = pathjoin(wt.basedir, "MixedCaseParent", "notfound")
+ run_script(self, """
+ $ bzr add mixedcaseparent/notfound
+ 2>bzr: ERROR: No such file: %s
+ """ % (repr(expected_fname),))
+
+
+class TestMove(TestCICPBase):
+
+ def test_mv_newname(self):
+ wt = self._make_mixed_case_tree()
+ run_script(self, """
+ $ bzr add -q
+ $ bzr ci -qm message
+ $ bzr mv camelcaseparent/camelcase camelcaseparent/NewCamelCase
+ CamelCaseParent/CamelCase => CamelCaseParent/NewCamelCase
+ """)
+
+ def test_mv_newname_after(self):
+ wt = self._make_mixed_case_tree()
+ # In this case we can specify the incorrect case for the destination,
+ # as we use --after, so the file-system is sniffed.
+ run_script(self, """
+ $ bzr add -q
+ $ bzr ci -qm message
+ $ mv CamelCaseParent/CamelCase CamelCaseParent/NewCamelCase
+ $ bzr mv --after camelcaseparent/camelcase camelcaseparent/newcamelcase
+ CamelCaseParent/CamelCase => CamelCaseParent/NewCamelCase
+ """)
+
+ def test_mv_newname_exists(self):
+ # test a mv, but when the target already exists with a name that
+ # differs only by case.
+ wt = self._make_mixed_case_tree()
+ self.run_bzr('add')
+ self.run_bzr('ci -m message')
+ run_script(self, """
+ $ bzr mv camelcaseparent/camelcase LOWERCASEPARENT/LOWERCASE
+ 2>bzr: ERROR: Could not move CamelCase => lowercase: \
+lowercaseparent/lowercase is already versioned.
+ """)
+
+ def test_mv_newname_exists_after(self):
+ # test a 'mv --after', but when the target already exists with a name
+ # that differs only by case. Note that this is somewhat unlikely
+ # but still reasonable.
+ wt = self._make_mixed_case_tree()
+ self.run_bzr('add')
+ self.run_bzr('ci -m message')
+ # Remove the source and create a destination file on disk with a different case.
+ # bzr should report that the filename is already versioned.
+ os.unlink('CamelCaseParent/CamelCase')
+ osutils.rename('lowercaseparent/lowercase', 'lowercaseparent/LOWERCASE')
+ run_script(self, """
+ $ bzr mv --after camelcaseparent/camelcase LOWERCASEPARENT/LOWERCASE
+ 2>bzr: ERROR: Could not move CamelCase => lowercase: \
+lowercaseparent/lowercase is already versioned.
+ """)
+
+ def test_mv_newname_root(self):
+ wt = self._make_mixed_case_tree()
+ self.run_bzr('add')
+ self.run_bzr('ci -m message')
+ run_script(self, """
+ $ bzr mv camelcaseparent NewCamelCaseParent
+ CamelCaseParent => NewCamelCaseParent
+ """)
+
+ def test_mv_newname_root_after(self):
+ wt = self._make_mixed_case_tree()
+ self.run_bzr('add')
+ self.run_bzr('ci -m message')
+ # In this case we can specify the incorrect case for the destination,
+ # as we use --after, so the file-system is sniffed.
+ run_script(self, """
+ $ mv CamelCaseParent NewCamelCaseParent
+ $ bzr mv --after camelcaseparent NewCamelCaseParent
+ CamelCaseParent => NewCamelCaseParent
+ """)
+
+ def test_mv_newcase(self):
+ wt = self._make_mixed_case_tree()
+ self.run_bzr('add')
+ self.run_bzr('ci -m message')
+
+ # perform a mv to the new case - we expect bzr to accept the new
+ # name, as specified, and rename the file on the file-system too.
+ run_script(self, """
+ $ bzr mv camelcaseparent/camelcase camelcaseparent/camelCase
+ CamelCaseParent/CamelCase => CamelCaseParent/camelCase
+ """)
+ self.failUnlessEqual(canonical_relpath(wt.basedir, 'camelcaseparent/camelcase'),
+ 'CamelCaseParent/camelCase')
+
+ def test_mv_newcase_after(self):
+ wt = self._make_mixed_case_tree()
+ self.run_bzr('add')
+ self.run_bzr('ci -m message')
+
+ # perform a mv to the new case - we must ensure the file-system has the
+ # new case first.
+ osutils.rename('CamelCaseParent/CamelCase', 'CamelCaseParent/camelCase')
+ run_script(self, """
+ $ bzr mv --after camelcaseparent/camelcase camelcaseparent/camelCase
+ CamelCaseParent/CamelCase => CamelCaseParent/camelCase
+ """)
+ # bzr should not have renamed the file to a different case
+ self.failUnlessEqual(canonical_relpath(wt.basedir, 'camelcaseparent/camelcase'),
+ 'CamelCaseParent/camelCase')
+
+ def test_mv_multiple(self):
+ wt = self._make_mixed_case_tree()
+ self.run_bzr('add')
+ self.run_bzr('ci -m message')
+ run_script(self, """
+ $ bzr mv LOWercaseparent/LOWercase LOWercaseparent/MIXEDCase camelcaseparent
+ lowercaseparent/lowercase => CamelCaseParent/lowercase
+ lowercaseparent/mixedCase => CamelCaseParent/mixedCase
+ """)
+
+
+class TestMisc(TestCICPBase):
+
+ def test_status(self):
+ wt = self._make_mixed_case_tree()
+ self.run_bzr('add')
+ run_script(self, """
+ $ bzr status camelcaseparent/camelcase LOWERCASEPARENT/LOWERCASE
+ added:
+ CamelCaseParent/
+ CamelCaseParent/CamelCase
+ lowercaseparent/
+ lowercaseparent/lowercase
+ """)
+
+ def test_ci(self):
+ wt = self._make_mixed_case_tree()
+ self.run_bzr('add')
+
+ got = self.run_bzr('ci -m message camelcaseparent LOWERCASEPARENT')[1]
+ for expected in ['CamelCaseParent', 'lowercaseparent',
+ 'CamelCaseParent/CamelCase', 'lowercaseparent/lowercase']:
+ self.assertContainsRe(got, 'added ' + expected + '\n')
+
+ def test_rm(self):
+ wt = self._make_mixed_case_tree()
+ self.run_bzr('add')
+ self.run_bzr('ci -m message')
+
+ got = self.run_bzr('rm camelcaseparent LOWERCASEPARENT')[1]
+ for expected in ['lowercaseparent/lowercase', 'CamelCaseParent/CamelCase']:
+ self.assertContainsRe(got, 'deleted ' + expected + '\n')
+
+
+ # The following commands need tests and/or cicp lovin':
+ # update, remove, file_id, file_path, diff, log, touching_revisions, ls,
+ # ignore, cat, revert, resolve.
diff --git a/bzrlib/tests/blackbox/test_filtered_view_ops.py b/bzrlib/tests/blackbox/test_filtered_view_ops.py
new file mode 100644
index 0000000..460e472
--- /dev/null
+++ b/bzrlib/tests/blackbox/test_filtered_view_ops.py
@@ -0,0 +1,198 @@
+# Copyright (C) 2008, 2009, 2010 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""Tests that an enabled view is reported and impacts expected commands."""
+
+from bzrlib import (
+ osutils,
+ tests,
+ )
+
+
+class TestViewFileOperations(tests.TestCaseWithTransport):
+
+ def make_abc_tree_with_ab_view(self):
+ wt = self.make_branch_and_tree('.')
+ self.build_tree(['a', 'b', 'c'])
+ wt.views.set_view('my', ['a', 'b'])
+ return wt
+
+ def test_view_on_status(self):
+ wt = self.make_abc_tree_with_ab_view()
+ out, err = self.run_bzr('status')
+ self.assertEquals('Ignoring files outside view. View is a, b\n', err)
+ self.assertEquals('unknown:\n a\n b\n', out)
+
+ def test_view_on_status_selected(self):
+ wt = self.make_abc_tree_with_ab_view()
+ out, err = self.run_bzr('status a')
+ self.assertEquals('', err)
+ self.assertEquals('unknown:\n a\n', out)
+ out, err = self.run_bzr('status c', retcode=3)
+ self.assertEquals('bzr: ERROR: Specified file "c" is outside the '
+ 'current view: a, b\n', err)
+ self.assertEquals('', out)
+
+ def test_view_on_add(self):
+ wt = self.make_abc_tree_with_ab_view()
+ out, err = self.run_bzr('add')
+ self.assertEquals('Ignoring files outside view. View is a, b\n', err)
+ self.assertEquals('adding a\nadding b\n', out)
+
+ def test_view_on_add_selected(self):
+ wt = self.make_abc_tree_with_ab_view()
+ out, err = self.run_bzr('add a')
+ self.assertEquals('', err)
+ self.assertEquals('adding a\n', out)
+ out, err = self.run_bzr('add c', retcode=3)
+ self.assertEquals('bzr: ERROR: Specified file "c" is outside the '
+ 'current view: a, b\n', err)
+ self.assertEquals('', out)
+
+ def test_view_on_diff(self):
+ wt = self.make_abc_tree_with_ab_view()
+ self.run_bzr('add')
+ out, err = self.run_bzr('diff', retcode=1)
+ self.assertEquals('*** Ignoring files outside view. View is a, b\n', err)
+
+ def test_view_on_diff_selected(self):
+ wt = self.make_abc_tree_with_ab_view()
+ self.run_bzr('add')
+ out, err = self.run_bzr('diff a', retcode=1)
+ self.assertEquals('', err)
+ self.assertStartsWith(out, "=== added file 'a'\n")
+ out, err = self.run_bzr('diff c', retcode=3)
+ self.assertEquals('bzr: ERROR: Specified file "c" is outside the '
+ 'current view: a, b\n', err)
+ self.assertEquals('', out)
+
+ def test_view_on_commit(self):
+ wt = self.make_abc_tree_with_ab_view()
+ self.run_bzr('add')
+ out, err = self.run_bzr('commit -m "testing commit"')
+ err_lines = err.splitlines()
+ self.assertEquals('Ignoring files outside view. View is a, b', err_lines[0])
+ self.assertStartsWith(err_lines[1], 'Committing to:')
+ self.assertEquals('added a', err_lines[2])
+ self.assertEquals('added b', err_lines[3])
+ self.assertEquals('Committed revision 1.', err_lines[4])
+ self.assertEquals('', out)
+
+ def test_view_on_commit_selected(self):
+ wt = self.make_abc_tree_with_ab_view()
+ self.run_bzr('add')
+ out, err = self.run_bzr('commit -m "file in view" a')
+ err_lines = err.splitlines()
+ self.assertStartsWith(err_lines[0], 'Committing to:')
+ self.assertEquals('added a', err_lines[1])
+ self.assertEquals('Committed revision 1.', err_lines[2])
+ self.assertEquals('', out)
+ out, err = self.run_bzr('commit -m "file out of view" c', retcode=3)
+ self.assertEquals('bzr: ERROR: Specified file "c" is outside the '
+ 'current view: a, b\n', err)
+ self.assertEquals('', out)
+
+ def test_view_on_remove_selected(self):
+ wt = self.make_abc_tree_with_ab_view()
+ self.run_bzr('add')
+ out, err = self.run_bzr('remove --keep a')
+ self.assertEquals('removed a\n', err)
+ self.assertEquals('', out)
+ out, err = self.run_bzr('remove --keep c', retcode=3)
+ self.assertEquals('bzr: ERROR: Specified file "c" is outside the '
+ 'current view: a, b\n', err)
+ self.assertEquals('', out)
+
+ def test_view_on_revert(self):
+ wt = self.make_abc_tree_with_ab_view()
+ self.run_bzr('add')
+ out, err = self.run_bzr('revert')
+ err_lines = err.splitlines()
+ self.assertEquals('Ignoring files outside view. View is a, b', err_lines[0])
+ self.assertEquals('- a', err_lines[1])
+ self.assertEquals('- b', err_lines[2])
+ self.assertEquals('', out)
+
+ def test_view_on_revert_selected(self):
+ wt = self.make_abc_tree_with_ab_view()
+ self.run_bzr('add')
+ out, err = self.run_bzr('revert a')
+ self.assertEquals('- a\n', err)
+ self.assertEquals('', out)
+ out, err = self.run_bzr('revert c', retcode=3)
+ self.assertEquals('bzr: ERROR: Specified file "c" is outside the '
+ 'current view: a, b\n', err)
+ self.assertEquals('', out)
+
+ def test_view_on_ls(self):
+ wt = self.make_abc_tree_with_ab_view()
+ self.run_bzr('add')
+ out, err = self.run_bzr('ls')
+ out_lines = out.splitlines()
+ self.assertEquals('Ignoring files outside view. View is a, b\n', err)
+ self.assertEquals('a', out_lines[0])
+ self.assertEquals('b', out_lines[1])
+
+
+class TestViewTreeOperations(tests.TestCaseWithTransport):
+
+ def make_abc_tree_and_clone_with_ab_view(self):
+ # Build the first tree
+ wt1 = self.make_branch_and_tree('tree_1')
+ self.build_tree(['tree_1/a', 'tree_1/b', 'tree_1/c'])
+ wt1.add(['a', 'b', 'c'])
+ wt1.commit("adding a b c")
+ # Build the second tree and give it a view
+ wt2 = wt1.bzrdir.sprout('tree_2').open_workingtree()
+ wt2.views.set_view('my', ['a', 'b'])
+ # Commit a change to the first tree
+ self.build_tree_contents([
+ ('tree_1/a', 'changed a\n'),
+ ('tree_1/c', 'changed c\n'),
+ ])
+ wt1.commit("changing a c")
+ return wt1, wt2
+
+ def test_view_on_pull(self):
+ tree_1, tree_2 = self.make_abc_tree_and_clone_with_ab_view()
+ out, err = self.run_bzr('pull -d tree_2 tree_1')
+ self.assertEqualDiff(
+ "Operating on whole tree but only reporting on 'my' view.\n"
+ " M a\n"
+ "All changes applied successfully.\n", err)
+ self.assertEqualDiff("Now on revision 2.\n", out)
+
+ def test_view_on_update(self):
+ tree_1, tree_2 = self.make_abc_tree_and_clone_with_ab_view()
+ self.run_bzr("bind ../tree_1", working_dir='tree_2')
+ out, err = self.run_bzr('update', working_dir='tree_2')
+ self.assertEqualDiff(
+ """Operating on whole tree but only reporting on 'my' view.
+ M a
+All changes applied successfully.
+Updated to revision 2 of branch %s
+""" % osutils.pathjoin(self.test_dir, 'tree_1'),
+ err)
+ self.assertEqual("", out)
+
+ def test_view_on_merge(self):
+ tree_1, tree_2 = self.make_abc_tree_and_clone_with_ab_view()
+ out, err = self.run_bzr('merge -d tree_2 tree_1')
+ self.assertEqualDiff(
+ "Operating on whole tree but only reporting on 'my' view.\n"
+ " M a\n"
+ "All changes applied successfully.\n", err)
+ self.assertEqual("", out)
diff --git a/bzrlib/tests/blackbox/test_find_merge_base.py b/bzrlib/tests/blackbox/test_find_merge_base.py
new file mode 100644
index 0000000..4960fe1
--- /dev/null
+++ b/bzrlib/tests/blackbox/test_find_merge_base.py
@@ -0,0 +1,39 @@
+# Copyright (C) 2006, 2007, 2009, 2010 Canonical Ltd
+# -*- coding: utf-8 -*-
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+import os
+
+from bzrlib.tests import TestCaseWithTransport
+
+
+class TestFindMergeBase(TestCaseWithTransport):
+
+ def test_find_merge_base(self):
+ a_tree = self.make_branch_and_tree('a')
+ a_tree.commit(message='foo', allow_pointless=True)
+ b_tree = a_tree.bzrdir.sprout('b').open_workingtree()
+ q = self.run_bzr('find-merge-base a b')[0]
+ a_tree.commit(message='bar', allow_pointless=True)
+ b_tree.commit(message='baz', allow_pointless=True)
+ r = self.run_bzr('find-merge-base b a')[0]
+ self.assertEqual(q, r)
+
+ def test_find_null_merge_base(self):
+ tree = self.make_branch_and_tree('foo')
+ tree.commit('message')
+ tree2 = self.make_branch_and_tree('bar')
+ r = self.run_bzr('find-merge-base foo bar')[0]
+ self.assertEqual('merge base is revision null:\n', r)
diff --git a/bzrlib/tests/blackbox/test_help.py b/bzrlib/tests/blackbox/test_help.py
new file mode 100644
index 0000000..237462c
--- /dev/null
+++ b/bzrlib/tests/blackbox/test_help.py
@@ -0,0 +1,205 @@
+# Copyright (C) 2006, 2007, 2009, 2010, 2011 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+
+"""Black-box tests for bzr help.
+"""
+
+
+from bzrlib import (
+ config,
+ i18n,
+ tests,
+ )
+
+from bzrlib.tests.test_i18n import ZzzTranslations
+
+
+class TestHelp(tests.TestCaseWithTransport):
+
+ def test_help_basic(self):
+ for cmd in ['--help', 'help', '-h', '-?']:
+ output = self.run_bzr(cmd)[0]
+ line1 = output.split('\n')[0]
+ if not line1.startswith('Bazaar'):
+ self.fail("bad output from bzr %s:\n%r" % (cmd, output))
+ # see https://launchpad.net/products/bzr/+bug/35940, -h doesn't work
+
+ def test_help_topics(self):
+ """Smoketest for 'bzr help topics'"""
+ out, err = self.run_bzr('help topics')
+ self.assertContainsRe(out, 'basic')
+ self.assertContainsRe(out, 'topics')
+ self.assertContainsRe(out, 'commands')
+ self.assertContainsRe(out, 'revisionspec')
+
+ def test_help_revisionspec(self):
+ """Smoke test for 'bzr help revisionspec'"""
+ out, err = self.run_bzr('help revisionspec')
+ self.assertContainsRe(out, 'revno:')
+ self.assertContainsRe(out, 'date:')
+ self.assertContainsRe(out, 'revid:')
+ self.assertContainsRe(out, 'last:')
+ self.assertContainsRe(out, 'before:')
+ self.assertContainsRe(out, 'ancestor:')
+ self.assertContainsRe(out, 'branch:')
+
+ def test_help_checkouts(self):
+ """Smoke test for 'bzr help checkouts'"""
+ out, err = self.run_bzr('help checkouts')
+ self.assertContainsRe(out, 'checkout')
+ self.assertContainsRe(out, 'lightweight')
+
+ def test_help_urlspec(self):
+ """Smoke test for 'bzr help urlspec'"""
+ out, err = self.run_bzr('help urlspec')
+ self.assertContainsRe(out, 'aftp://')
+ self.assertContainsRe(out, 'bzr://')
+ self.assertContainsRe(out, 'bzr\+ssh://')
+ self.assertContainsRe(out, 'file://')
+ self.assertContainsRe(out, 'ftp://')
+ self.assertContainsRe(out, 'http://')
+ self.assertContainsRe(out, 'https://')
+ self.assertContainsRe(out, 'sftp://')
+
+ def test_help_repositories(self):
+ """Smoke test for 'bzr help repositories'"""
+ out, err = self.run_bzr('help repositories')
+ from bzrlib.help_topics import help_as_plain_text, _repositories
+ expected = help_as_plain_text(_repositories)
+ self.assertEqual(expected, out)
+
+ def test_help_working_trees(self):
+ """Smoke test for 'bzr help working-trees'"""
+ out, err = self.run_bzr('help working-trees')
+ from bzrlib.help_topics import help_as_plain_text, _working_trees
+ expected = help_as_plain_text(_working_trees)
+ self.assertEqual(expected, out)
+
+ def test_help_status_flags(self):
+ """Smoke test for 'bzr help status-flags'"""
+ out, err = self.run_bzr('help status-flags')
+ from bzrlib.help_topics import help_as_plain_text, _status_flags
+ expected = help_as_plain_text(_status_flags)
+ self.assertEqual(expected, out)
+
+ def test_help_commands(self):
+ dash_help = self.run_bzr('--help commands')[0]
+ commands = self.run_bzr('help commands')[0]
+ hidden = self.run_bzr('help hidden-commands')[0]
+ long_help = self.run_bzr('help --long')[0]
+ qmark_long = self.run_bzr('? --long')[0]
+ qmark_cmds = self.run_bzr('? commands')[0]
+ self.assertEquals(dash_help, commands)
+ self.assertEquals(dash_help, long_help)
+ self.assertEquals(dash_help, qmark_long)
+ self.assertEquals(dash_help, qmark_cmds)
+
+ def test_help_width_zero(self):
+ self.overrideEnv('BZR_COLUMNS', '0')
+ self.run_bzr('help commands')
+
+ def test_hidden(self):
+ help_commands = self.run_bzr('help commands')[0]
+ help_hidden = self.run_bzr('help hidden-commands')[0]
+
+ def extract_cmd_names(help_output):
+ # keep only the command names to avoid matching on help text (there
+ # is a high risk to fail a test when a plugin get installed
+ # otherwise)
+ cmds = []
+ for line in help_output.split('\n'):
+ if line.startswith(' '):
+ continue # help on more than one line
+ cmd = line.split(' ')[0]
+ if line:
+ cmds.append(cmd)
+ return cmds
+ commands = extract_cmd_names(help_commands)
+ hidden = extract_cmd_names(help_hidden)
+ self.assertTrue('commit' in commands)
+ self.assertTrue('commit' not in hidden)
+ self.assertTrue('rocks' in hidden)
+ self.assertTrue('rocks' not in commands)
+
+ def test_help_detail(self):
+ dash_h = self.run_bzr('diff -h')[0]
+ help_x = self.run_bzr('help diff')[0]
+ self.assertEquals(dash_h, help_x)
+ self.assertContainsRe(help_x, "Purpose:")
+ self.assertContainsRe(help_x, "Usage:")
+ self.assertContainsRe(help_x, "Options:")
+ self.assertContainsRe(help_x, "Description:")
+ self.assertContainsRe(help_x, "Examples:")
+ self.assertContainsRe(help_x, "See also:")
+ self.assertContainsRe(help_x, "Aliases:")
+
+ def test_help_usage(self):
+ usage = self.run_bzr('diff --usage')[0]
+ self.assertContainsRe(usage, "Purpose:")
+ self.assertContainsRe(usage, "Usage:")
+ self.assertContainsRe(usage, "Options:")
+ self.assertNotContainsRe(usage, "Description:")
+ self.assertNotContainsRe(usage, "Examples:")
+ self.assertContainsRe(usage, "See also:")
+ self.assertContainsRe(usage, "Aliases:")
+
+ def test_help_help(self):
+ help = self.run_bzr('help help')[0]
+ qmark = self.run_bzr('? ?')[0]
+ self.assertEquals(help, qmark)
+ for line in help.split('\n'):
+ if '--long' in line:
+ self.assertContainsRe(line,
+ r'Show help on all commands\.')
+
+ def test_help_with_aliases(self):
+ original = self.run_bzr('help cat')[0]
+
+ conf = config.GlobalConfig.from_string('''[ALIASES]
+c=cat
+cat=cat
+''', save=True)
+
+ expected = original + "'bzr cat' is an alias for 'bzr cat'.\n"
+ self.assertEqual(expected, self.run_bzr('help cat')[0])
+
+ self.assertEqual("'bzr c' is an alias for 'bzr cat'.\n",
+ self.run_bzr('help c')[0])
+
+
+class TestTranslatedHelp(tests.TestCaseWithTransport):
+ """Tests for display of translated help topics"""
+
+ def setUp(self):
+ super(TestTranslatedHelp, self).setUp()
+ self.overrideAttr(i18n, '_translations', ZzzTranslations())
+
+ def test_help_command_utf8(self):
+ out, err = self.run_bzr(["help", "push"], encoding="utf-8")
+ self.assertContainsRe(out, "zz\xc3\xa5{{:See also:")
+
+ def test_help_switch_utf8(self):
+ out, err = self.run_bzr(["push", "--help"], encoding="utf-8")
+ self.assertContainsRe(out, "zz\xc3\xa5{{:See also:")
+
+ def test_help_command_ascii(self):
+ out, err = self.run_bzr(["help", "push"], encoding="ascii")
+ self.assertContainsRe(out, "zz\\?{{:See also:")
+
+ def test_help_switch_ascii(self):
+ out, err = self.run_bzr(["push", "--help"], encoding="ascii")
+ self.assertContainsRe(out, "zz\\?{{:See also:")
diff --git a/bzrlib/tests/blackbox/test_hooks.py b/bzrlib/tests/blackbox/test_hooks.py
new file mode 100644
index 0000000..2d49a06
--- /dev/null
+++ b/bzrlib/tests/blackbox/test_hooks.py
@@ -0,0 +1,75 @@
+# Copyright (C) 2008 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""Tests for commands related to hooks"""
+
+from bzrlib.branch import Branch
+from bzrlib.tests import TestCaseWithTransport
+
+def _foo_hook():
+ pass
+
+class TestHooks(TestCaseWithTransport):
+
+ def _check_hooks_output(self, command_output, hooks):
+ for hook_type in Branch.hooks:
+ s = "\n ".join(hooks.get(hook_type, ["<no hooks installed>"]))
+ self.assert_("%s:\n %s" % (hook_type, s) in command_output)
+
+ def test_hooks_with_no_hooks(self):
+ self.make_branch('.')
+ out, err = self.run_bzr('hooks')
+ self.assertEqual(err, "")
+ for hook_type in Branch.hooks:
+ self._check_hooks_output(out, {})
+
+ def test_hooks_with_unnamed_hook(self):
+ self.make_branch('.')
+ def foo(): return
+ Branch.hooks.install_named_hook('post_push', foo, None)
+ out, err = self.run_bzr('hooks')
+ self._check_hooks_output(out, {'post_push': ["No hook name"]})
+
+ def test_hooks_with_named_hook(self):
+ self.make_branch('.')
+ def foo(): return
+ name = "Foo Bar Hook"
+ Branch.hooks.install_named_hook('post_push', foo, name)
+ out, err = self.run_bzr('hooks')
+ self._check_hooks_output(out, {'post_push': [name]})
+
+ def test_hooks_no_branch(self):
+ self.run_bzr('hooks')
+
+ def test_hooks_lazy_with_unnamed_hook(self):
+ self.make_branch('.')
+ def foo(): return
+ Branch.hooks.install_named_hook_lazy('post_push',
+ 'bzrlib.tests.blackbox.test_hooks',
+ '_foo_hook',
+ None)
+ out, err = self.run_bzr('hooks')
+ self._check_hooks_output(out, {'post_push': ["No hook name"]})
+
+ def test_hooks_lazy_with_named_hook(self):
+ self.make_branch('.')
+ def foo(): return
+ Branch.hooks.install_named_hook_lazy('post_push',
+ 'bzrlib.tests.blackbox.test_hooks',
+ '_foo_hook',
+ 'hook has a name')
+ out, err = self.run_bzr('hooks')
+ self._check_hooks_output(out, {'post_push': ["hook has a name"]})
diff --git a/bzrlib/tests/blackbox/test_ignore.py b/bzrlib/tests/blackbox/test_ignore.py
new file mode 100644
index 0000000..1ea8087
--- /dev/null
+++ b/bzrlib/tests/blackbox/test_ignore.py
@@ -0,0 +1,180 @@
+# Copyright (C) 2006-2010 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""UI tests for bzr ignore."""
+
+
+from cStringIO import StringIO
+import os
+import re
+import sys
+
+from bzrlib import (
+ ignores,
+ osutils,
+ )
+import bzrlib
+from bzrlib.branch import Branch
+import bzrlib.bzrdir as bzrdir
+from bzrlib.errors import BzrCommandError
+from bzrlib.osutils import (
+ pathjoin,
+ )
+from bzrlib.tests.test_sftp_transport import TestCaseWithSFTPServer
+from bzrlib.tests import TestCaseWithTransport
+from bzrlib.workingtree import WorkingTree
+
+
+class TestCommands(TestCaseWithTransport):
+
+ def test_ignore_absolutes(self):
+ """'ignore' with an absolute path returns an error"""
+ self.make_branch_and_tree('.')
+ self.run_bzr_error(('bzr: ERROR: NAME_PATTERN should not '
+ 'be an absolute path\n',),
+ 'ignore /crud')
+
+ def test_ignore_directories(self):
+ """ignoring a directory should ignore directory tree.
+
+ Also check that trailing slashes on directories are stripped.
+ """
+ self.run_bzr('init')
+ self.build_tree(['dir1/', 'dir1/foo',
+ 'dir2/', 'dir2/bar',
+ 'dir3/', 'dir3/baz'])
+ self.run_bzr(['ignore', 'dir1', 'dir2/', 'dir4\\'])
+ self.check_file_contents('.bzrignore', 'dir1\ndir2\ndir4\n')
+ self.assertEquals(self.run_bzr('unknowns')[0], 'dir3\n')
+
+ def test_ignore_patterns(self):
+ tree = self.make_branch_and_tree('.')
+
+ self.assertEquals(list(tree.unknowns()), [])
+
+ # is_ignored() will now create the user global ignore file
+ # if it doesn't exist, so make sure we ignore it in our tests
+ ignores._set_user_ignores(['*.tmp'])
+
+ self.build_tree_contents(
+ [('foo.tmp', '.tmp files are ignored by default')])
+ self.assertEquals(list(tree.unknowns()), [])
+
+ self.build_tree_contents([('foo.c', 'int main() {}')])
+ self.assertEquals(list(tree.unknowns()), ['foo.c'])
+
+ tree.add('foo.c')
+ self.assertEquals(list(tree.unknowns()), [])
+
+ # 'ignore' works when creating the .bzrignore file
+ self.build_tree_contents([('foo.blah', 'blah')])
+ self.assertEquals(list(tree.unknowns()), ['foo.blah'])
+ self.run_bzr('ignore *.blah')
+ self.assertEquals(list(tree.unknowns()), [])
+ self.check_file_contents('.bzrignore', '*.blah\n')
+
+ # 'ignore' works when then .bzrignore file already exists
+ self.build_tree_contents([('garh', 'garh')])
+ self.assertEquals(list(tree.unknowns()), ['garh'])
+ self.run_bzr('ignore garh')
+ self.assertEquals(list(tree.unknowns()), [])
+ self.check_file_contents('.bzrignore', '*.blah\ngarh\n')
+
+ def test_ignore_multiple_arguments(self):
+ """'ignore' works with multiple arguments"""
+ tree = self.make_branch_and_tree('.')
+ self.build_tree(['a','b','c','d'])
+ self.assertEquals(list(tree.unknowns()), ['a', 'b', 'c', 'd'])
+ self.run_bzr('ignore a b c')
+ self.assertEquals(list(tree.unknowns()), ['d'])
+ self.check_file_contents('.bzrignore', 'a\nb\nc\n')
+
+ def test_ignore_no_arguments(self):
+ """'ignore' with no arguments returns an error"""
+ self.make_branch_and_tree('.')
+ self.run_bzr_error(('bzr: ERROR: ignore requires at least one '
+ 'NAME_PATTERN or --default-rules.\n',),
+ 'ignore')
+
+ def test_ignore_default_rules(self):
+ out, err = self.run_bzr(['ignore', '--default-rules'])
+ reference_set = set(ignores.USER_DEFAULTS)
+ output_set = set(out.rstrip().split('\n'))
+ self.assertEqual(reference_set, output_set)
+ self.assertEqual('', err)
+
+ def test_ignore_versioned_file(self):
+ tree = self.make_branch_and_tree('.')
+ self.build_tree(['a','b'])
+ tree.add('a')
+
+ # test a single versioned file
+ out, err = self.run_bzr('ignore a')
+ self.assertEqual(out,
+ "Warning: the following files are version controlled"\
+ " and match your ignore pattern:\na\n"\
+ "These files will continue to be version controlled"\
+ " unless you 'bzr remove' them.\n")
+
+ # test a single unversioned file
+ out, err = self.run_bzr('ignore b')
+ self.assertEqual(out, '')
+
+ # test wildcards
+ tree.add('b')
+ out, err = self.run_bzr('ignore *')
+ self.assertEqual(out,
+ "Warning: the following files are version controlled"\
+ " and match your ignore pattern:\n.bzrignore\na\nb\n"\
+ "These files will continue to be version controlled"\
+ " unless you 'bzr remove' them.\n")
+
+ def test_ignored_versioned_file_matching_new_pattern(self):
+ tree = self.make_branch_and_tree('.')
+ self.build_tree(['a', 'b'])
+ tree.add(['a', 'b'])
+ self.run_bzr('ignore *')
+
+ # If only the given pattern is used then only 'b' should match in
+ # this case.
+ out, err = self.run_bzr('ignore b')
+ self.assertEqual(out,
+ "Warning: the following files are version controlled"\
+ " and match your ignore pattern:\nb\n"\
+ "These files will continue to be version controlled"\
+ " unless you 'bzr remove' them.\n")
+
+ def test_ignore_directory(self):
+ """Test --directory option"""
+ tree = self.make_branch_and_tree('a')
+ self.run_bzr(['ignore', '--directory=a', 'README'])
+ self.check_file_contents('a/.bzrignore', 'README\n')
+
+ def test_ignored_invalid_pattern(self):
+ """Ensure graceful handling for invalid ignore pattern.
+
+ Test case for #300062.
+ Invalid pattern should show clear error message.
+ Invalid pattern should not be added to .bzrignore file.
+ """
+ tree = self.make_branch_and_tree('.')
+ out, err = self.run_bzr(['ignore', 'RE:*.cpp', 'foo', 'RE:['], 3)
+ self.assertEqual(out, '')
+ self.assertContainsRe(err,
+ 'Invalid ignore pattern.*RE:\*\.cpp.*RE:\[', re.DOTALL)
+ self.assertNotContainsRe(err, 'foo', re.DOTALL)
+ self.assertFalse(os.path.isfile('.bzrignore'))
+
diff --git a/bzrlib/tests/blackbox/test_ignored.py b/bzrlib/tests/blackbox/test_ignored.py
new file mode 100644
index 0000000..381dbaf
--- /dev/null
+++ b/bzrlib/tests/blackbox/test_ignored.py
@@ -0,0 +1,48 @@
+# Copyright (C) 2006, 2009, 2010 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+#
+
+"""Tests of the 'bzr ignored' command."""
+
+from bzrlib.tests import TestCaseWithTransport
+
+
+class TestIgnored(TestCaseWithTransport):
+
+ def test_ignored_added_file(self):
+ """'bzr ignored' should not list versioned files."""
+ # this test can go in favour of a more general ui test at some point
+ # as it is actually testing the internals layer and should not be.
+ # There are no other 'ignored' tests though, so it should be retained
+ # until some are written.
+ tree = self.make_branch_and_tree('.')
+ self.build_tree(['foo.pyc'])
+ # ensure that foo.pyc is ignored
+ self.build_tree_contents([('.bzrignore', 'foo.pyc')])
+ self.assertTrue(tree.is_ignored('foo.pyc'))
+ # now add it and check the ui does not show it.
+ tree.add('foo.pyc')
+ out, err = self.run_bzr('ignored')
+ self.assertEqual('', out)
+ self.assertEqual('', err)
+
+ def test_ignored_directory(self):
+ """Test --directory option"""
+ tree = self.make_branch_and_tree('a')
+ self.build_tree_contents([('a/README', 'contents'),
+ ('a/.bzrignore', 'README')])
+ out, err = self.run_bzr(['ignored', '--directory=a'])
+ self.assertStartsWith(out, 'README')
diff --git a/bzrlib/tests/blackbox/test_info.py b/bzrlib/tests/blackbox/test_info.py
new file mode 100644
index 0000000..12d0000
--- /dev/null
+++ b/bzrlib/tests/blackbox/test_info.py
@@ -0,0 +1,1583 @@
+# Copyright (C) 2006-2012 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+
+"""Tests for the info command of bzr."""
+
+import shutil
+import sys
+
+from bzrlib import (
+ branch,
+ bzrdir,
+ controldir,
+ errors,
+ info,
+ osutils,
+ tests,
+ upgrade,
+ urlutils,
+ )
+from bzrlib.tests.matchers import ContainsNoVfsCalls
+from bzrlib.transport import memory
+
+
+class TestInfo(tests.TestCaseWithTransport):
+
+ def setUp(self):
+ super(TestInfo, self).setUp()
+ self._repo_strings = "2a"
+
+ def test_info_non_existing(self):
+ self.vfs_transport_factory = memory.MemoryServer
+ location = self.get_url()
+ out, err = self.run_bzr('info '+location, retcode=3)
+ self.assertEqual(out, '')
+ self.assertEqual(err, 'bzr: ERROR: Not a branch: "%s".\n' % location)
+
+ def test_info_empty_controldir(self):
+ self.make_bzrdir('ctrl')
+ out, err = self.run_bzr('info ctrl')
+ self.assertEquals(out,
+ 'Empty control directory (format: 2a or pack-0.92)\n'
+ 'Location:\n'
+ ' control directory: ctrl\n')
+ self.assertEquals(err, '')
+
+ def test_info_empty_controldir_verbose(self):
+ self.make_bzrdir('ctrl')
+ out, err = self.run_bzr('info -v ctrl')
+ self.assertEqualDiff(out,
+ 'Empty control directory (format: 2a or pack-0.92)\n'
+ 'Location:\n'
+ ' control directory: ctrl\n\n'
+ 'Format:\n'
+ ' control: Meta directory format 1\n\n'
+ 'Control directory:\n'
+ ' 0 branches\n')
+ self.assertEquals(err, '')
+
+ def test_info_dangling_branch_reference(self):
+ br = self.make_branch('target')
+ br.create_checkout('from', lightweight=True)
+ shutil.rmtree('target')
+ out, err = self.run_bzr('info from')
+ self.assertEquals(out,
+ 'Dangling branch reference (format: 2a or pack-0.92)\n'
+ 'Location:\n'
+ ' control directory: from\n'
+ ' checkout of branch: target\n')
+ self.assertEquals(err, '')
+
+ def test_info_standalone(self):
+ transport = self.get_transport()
+
+ # Create initial standalone branch
+ tree1 = self.make_branch_and_tree('standalone', 'knit')
+ self.build_tree(['standalone/a'])
+ tree1.add('a')
+ branch1 = tree1.branch
+
+ out, err = self.run_bzr('info standalone')
+ self.assertEqualDiff(
+"""Standalone tree (format: knit)
+Location:
+ branch root: standalone
+""", out)
+ self.assertEqual('', err)
+
+ # Standalone branch - verbose mode
+ out, err = self.run_bzr('info standalone -v')
+ self.assertEqualDiff(
+"""Standalone tree (format: knit)
+Location:
+ branch root: standalone
+
+Format:
+ control: Meta directory format 1
+ working tree: Working tree format 3
+ branch: Branch format 5
+ repository: Knit repository format 1
+
+Control directory:
+ 1 branches
+
+In the working tree:
+ 0 unchanged
+ 0 modified
+ 1 added
+ 0 removed
+ 0 renamed
+ 0 unknown
+ 0 ignored
+ 0 versioned subdirectories
+
+Branch history:
+ 0 revisions
+
+Repository:
+ 0 revisions
+""", out)
+ self.assertEqual('', err)
+
+ # Standalone branch - really verbose mode
+ out, err = self.run_bzr('info standalone -vv')
+ self.assertEqualDiff(
+"""Standalone tree (format: knit)
+Location:
+ branch root: standalone
+
+Format:
+ control: Meta directory format 1
+ working tree: Working tree format 3
+ branch: Branch format 5
+ repository: Knit repository format 1
+
+Control directory:
+ 1 branches
+
+In the working tree:
+ 0 unchanged
+ 0 modified
+ 1 added
+ 0 removed
+ 0 renamed
+ 0 unknown
+ 0 ignored
+ 0 versioned subdirectories
+
+Branch history:
+ 0 revisions
+ 0 committers
+
+Repository:
+ 0 revisions
+""", out)
+ self.assertEqual('', err)
+ tree1.commit('commit one')
+ rev = branch1.repository.get_revision(branch1.last_revision())
+ datestring_first = osutils.format_date(rev.timestamp, rev.timezone)
+
+ # Branch standalone with push location
+ branch2 = branch1.bzrdir.sprout('branch').open_branch()
+ branch2.set_push_location(branch1.bzrdir.root_transport.base)
+
+ out, err = self.run_bzr('info branch')
+ self.assertEqualDiff(
+"""Standalone tree (format: knit)
+Location:
+ branch root: branch
+
+Related branches:
+ push branch: standalone
+ parent branch: standalone
+""", out)
+ self.assertEqual('', err)
+
+ out, err = self.run_bzr('info branch --verbose')
+ self.assertEqualDiff(
+"""Standalone tree (format: knit)
+Location:
+ branch root: branch
+
+Related branches:
+ push branch: standalone
+ parent branch: standalone
+
+Format:
+ control: Meta directory format 1
+ working tree: Working tree format 3
+ branch: Branch format 5
+ repository: Knit repository format 1
+
+Control directory:
+ 1 branches
+
+In the working tree:
+ 1 unchanged
+ 0 modified
+ 0 added
+ 0 removed
+ 0 renamed
+ 0 unknown
+ 0 ignored
+ 0 versioned subdirectories
+
+Branch history:
+ 1 revision
+ 0 days old
+ first revision: %s
+ latest revision: %s
+
+Repository:
+ 1 revision
+""" % (datestring_first, datestring_first,
+ ), out)
+ self.assertEqual('', err)
+
+ # Branch and bind to standalone, needs upgrade to metadir
+ # (creates backup as unknown)
+ branch1.bzrdir.sprout('bound')
+ knit1_format = controldir.format_registry.make_bzrdir('knit')
+ upgrade.upgrade('bound', knit1_format)
+ branch3 = controldir.ControlDir.open('bound').open_branch()
+ branch3.bind(branch1)
+ bound_tree = branch3.bzrdir.open_workingtree()
+ out, err = self.run_bzr('info -v bound')
+ self.assertEqualDiff(
+"""Checkout (format: knit)
+Location:
+ checkout root: bound
+ checkout of branch: standalone
+
+Related branches:
+ parent branch: standalone
+
+Format:
+ control: Meta directory format 1
+ working tree: %s
+ branch: %s
+ repository: %s
+
+Control directory:
+ 1 branches
+
+In the working tree:
+ 1 unchanged
+ 0 modified
+ 0 added
+ 0 removed
+ 0 renamed
+ 0 unknown
+ 0 ignored
+ 0 versioned subdirectories
+
+Branch history:
+ 1 revision
+ 0 days old
+ first revision: %s
+ latest revision: %s
+
+Repository:
+ 1 revision
+""" % (bound_tree._format.get_format_description(),
+ branch3._format.get_format_description(),
+ branch3.repository._format.get_format_description(),
+ datestring_first, datestring_first,
+ ), out)
+ self.assertEqual('', err)
+
+ # Checkout standalone (same as above, but does not have parent set)
+ branch4 = controldir.ControlDir.create_branch_convenience('checkout',
+ format=knit1_format)
+ branch4.bind(branch1)
+ branch4.bzrdir.open_workingtree().update()
+ out, err = self.run_bzr('info checkout --verbose')
+ self.assertEqualDiff(
+"""Checkout (format: knit)
+Location:
+ checkout root: checkout
+ checkout of branch: standalone
+
+Format:
+ control: Meta directory format 1
+ working tree: Working tree format 3
+ branch: Branch format 5
+ repository: %s
+
+Control directory:
+ 1 branches
+
+In the working tree:
+ 1 unchanged
+ 0 modified
+ 0 added
+ 0 removed
+ 0 renamed
+ 0 unknown
+ 0 ignored
+ 0 versioned subdirectories
+
+Branch history:
+ 1 revision
+ 0 days old
+ first revision: %s
+ latest revision: %s
+
+Repository:
+ 1 revision
+""" % (branch4.repository._format.get_format_description(),
+ datestring_first, datestring_first,
+ ), out)
+ self.assertEqual('', err)
+
+ # Lightweight checkout (same as above, different branch and repository)
+ tree5 = branch1.create_checkout('lightcheckout', lightweight=True)
+ branch5 = tree5.branch
+ out, err = self.run_bzr('info -v lightcheckout')
+ if "metaweave" in controldir.format_registry:
+ format_description = "knit or metaweave"
+ else:
+ format_description = "knit"
+ self.assertEqualDiff(
+"""Lightweight checkout (format: %s)
+Location:
+ light checkout root: lightcheckout
+ checkout of branch: standalone
+
+Format:
+ control: Meta directory format 1
+ working tree: Working tree format 3
+ branch: Branch format 5
+ repository: Knit repository format 1
+
+Control directory:
+ 1 branches
+
+In the working tree:
+ 1 unchanged
+ 0 modified
+ 0 added
+ 0 removed
+ 0 renamed
+ 0 unknown
+ 0 ignored
+ 0 versioned subdirectories
+
+Branch history:
+ 1 revision
+ 0 days old
+ first revision: %s
+ latest revision: %s
+
+Repository:
+ 1 revision
+""" % (format_description, datestring_first, datestring_first,), out)
+ self.assertEqual('', err)
+
+ # Update initial standalone branch
+ self.build_tree(['standalone/b'])
+ tree1.add('b')
+ tree1.commit('commit two')
+ rev = branch1.repository.get_revision(branch1.last_revision())
+ datestring_last = osutils.format_date(rev.timestamp, rev.timezone)
+
+ # Out of date branched standalone branch will not be detected
+ out, err = self.run_bzr('info -v branch')
+ self.assertEqualDiff(
+"""Standalone tree (format: knit)
+Location:
+ branch root: branch
+
+Related branches:
+ push branch: standalone
+ parent branch: standalone
+
+Format:
+ control: Meta directory format 1
+ working tree: Working tree format 3
+ branch: Branch format 5
+ repository: Knit repository format 1
+
+Control directory:
+ 1 branches
+
+In the working tree:
+ 1 unchanged
+ 0 modified
+ 0 added
+ 0 removed
+ 0 renamed
+ 0 unknown
+ 0 ignored
+ 0 versioned subdirectories
+
+Branch history:
+ 1 revision
+ 0 days old
+ first revision: %s
+ latest revision: %s
+
+Repository:
+ 1 revision
+""" % (datestring_first, datestring_first,
+ ), out)
+ self.assertEqual('', err)
+
+ # Out of date bound branch
+ out, err = self.run_bzr('info -v bound')
+ self.assertEqualDiff(
+"""Checkout (format: knit)
+Location:
+ checkout root: bound
+ checkout of branch: standalone
+
+Related branches:
+ parent branch: standalone
+
+Format:
+ control: Meta directory format 1
+ working tree: Working tree format 3
+ branch: Branch format 5
+ repository: %s
+
+Control directory:
+ 1 branches
+
+Branch is out of date: missing 1 revision.
+
+In the working tree:
+ 1 unchanged
+ 0 modified
+ 0 added
+ 0 removed
+ 0 renamed
+ 0 unknown
+ 0 ignored
+ 0 versioned subdirectories
+
+Branch history:
+ 1 revision
+ 0 days old
+ first revision: %s
+ latest revision: %s
+
+Repository:
+ 1 revision
+""" % (branch3.repository._format.get_format_description(),
+ datestring_first, datestring_first,
+ ), out)
+ self.assertEqual('', err)
+
+ # Out of date checkout
+ out, err = self.run_bzr('info -v checkout')
+ self.assertEqualDiff(
+"""Checkout (format: knit)
+Location:
+ checkout root: checkout
+ checkout of branch: standalone
+
+Format:
+ control: Meta directory format 1
+ working tree: Working tree format 3
+ branch: Branch format 5
+ repository: %s
+
+Control directory:
+ 1 branches
+
+Branch is out of date: missing 1 revision.
+
+In the working tree:
+ 1 unchanged
+ 0 modified
+ 0 added
+ 0 removed
+ 0 renamed
+ 0 unknown
+ 0 ignored
+ 0 versioned subdirectories
+
+Branch history:
+ 1 revision
+ 0 days old
+ first revision: %s
+ latest revision: %s
+
+Repository:
+ 1 revision
+""" % (branch4.repository._format.get_format_description(),
+ datestring_first, datestring_first,
+ ), out)
+ self.assertEqual('', err)
+
+ # Out of date lightweight checkout
+ out, err = self.run_bzr('info lightcheckout --verbose')
+ self.assertEqualDiff(
+"""Lightweight checkout (format: %s)
+Location:
+ light checkout root: lightcheckout
+ checkout of branch: standalone
+
+Format:
+ control: Meta directory format 1
+ working tree: Working tree format 3
+ branch: Branch format 5
+ repository: Knit repository format 1
+
+Control directory:
+ 1 branches
+
+Working tree is out of date: missing 1 revision.
+
+In the working tree:
+ 1 unchanged
+ 0 modified
+ 0 added
+ 0 removed
+ 0 renamed
+ 0 unknown
+ 0 ignored
+ 0 versioned subdirectories
+
+Branch history:
+ 2 revisions
+ 0 days old
+ first revision: %s
+ latest revision: %s
+
+Repository:
+ 2 revisions
+""" % (format_description, datestring_first, datestring_last,), out)
+ self.assertEqual('', err)
+
+ def test_info_standalone_no_tree(self):
+ # create standalone branch without a working tree
+ format = controldir.format_registry.make_bzrdir('default')
+ branch = self.make_branch('branch')
+ repo = branch.repository
+ out, err = self.run_bzr('info branch -v')
+ self.assertEqualDiff(
+"""Standalone branch (format: %s)
+Location:
+ branch root: branch
+
+Format:
+ control: Meta directory format 1
+ branch: %s
+ repository: %s
+
+Control directory:
+ 1 branches
+
+Branch history:
+ 0 revisions
+
+Repository:
+ 0 revisions
+""" % (info.describe_format(repo.bzrdir, repo, branch, None),
+ format.get_branch_format().get_format_description(),
+ format.repository_format.get_format_description(),
+ ), out)
+ self.assertEqual('', err)
+
+ def test_info_shared_repository(self):
+ format = controldir.format_registry.make_bzrdir('knit')
+ transport = self.get_transport()
+
+ # Create shared repository
+ repo = self.make_repository('repo', shared=True, format=format)
+ repo.set_make_working_trees(False)
+ out, err = self.run_bzr('info -v repo')
+ self.assertEqualDiff(
+"""Shared repository (format: dirstate or dirstate-tags or knit)
+Location:
+ shared repository: %s
+
+Format:
+ control: Meta directory format 1
+ repository: %s
+
+Control directory:
+ 0 branches
+
+Repository:
+ 0 revisions
+""" % ('repo', format.repository_format.get_format_description(),
+ ), out)
+ self.assertEqual('', err)
+
+ # Create branch inside shared repository
+ repo.bzrdir.root_transport.mkdir('branch')
+ branch1 = controldir.ControlDir.create_branch_convenience(
+ 'repo/branch', format=format)
+ out, err = self.run_bzr('info -v repo/branch')
+ self.assertEqualDiff(
+"""Repository branch (format: dirstate or knit)
+Location:
+ shared repository: repo
+ repository branch: repo/branch
+
+Format:
+ control: Meta directory format 1
+ branch: %s
+ repository: %s
+
+Control directory:
+ 1 branches
+
+Branch history:
+ 0 revisions
+
+Repository:
+ 0 revisions
+""" % (format.get_branch_format().get_format_description(),
+ format.repository_format.get_format_description(),
+ ), out)
+ self.assertEqual('', err)
+
+ # Create lightweight checkout
+ transport.mkdir('tree')
+ transport.mkdir('tree/lightcheckout')
+ tree2 = branch1.create_checkout('tree/lightcheckout',
+ lightweight=True)
+ branch2 = tree2.branch
+ self.assertCheckoutStatusOutput('-v tree/lightcheckout', tree2,
+ shared_repo=repo, repo_branch=branch1, verbose=True)
+
+ # Create normal checkout
+ tree3 = branch1.create_checkout('tree/checkout')
+ self.assertCheckoutStatusOutput('tree/checkout --verbose', tree3,
+ verbose=True,
+ light_checkout=False, repo_branch=branch1)
+ # Update lightweight checkout
+ self.build_tree(['tree/lightcheckout/a'])
+ tree2.add('a')
+ tree2.commit('commit one')
+ rev = repo.get_revision(branch2.last_revision())
+ datestring_first = osutils.format_date(rev.timestamp, rev.timezone)
+ out, err = self.run_bzr('info tree/lightcheckout --verbose')
+ self.assertEqualDiff(
+"""Lightweight checkout (format: %s)
+Location:
+ light checkout root: tree/lightcheckout
+ checkout of branch: repo/branch
+ shared repository: repo
+
+Format:
+ control: Meta directory format 1
+ working tree: Working tree format 6
+ branch: %s
+ repository: %s
+
+Control directory:
+ 1 branches
+
+In the working tree:
+ 1 unchanged
+ 0 modified
+ 0 added
+ 0 removed
+ 0 renamed
+ 0 unknown
+ 0 ignored
+ 0 versioned subdirectories
+
+Branch history:
+ 1 revision
+ 0 days old
+ first revision: %s
+ latest revision: %s
+
+Repository:
+ 1 revision
+""" % (self._repo_strings, format.get_branch_format().get_format_description(),
+ format.repository_format.get_format_description(),
+ datestring_first, datestring_first,
+ ), out)
+ self.assertEqual('', err)
+
+ # Out of date checkout
+ out, err = self.run_bzr('info -v tree/checkout')
+ self.assertEqualDiff(
+"""Checkout (format: unnamed)
+Location:
+ checkout root: tree/checkout
+ checkout of branch: repo/branch
+
+Format:
+ control: Meta directory format 1
+ working tree: Working tree format 6
+ branch: %s
+ repository: %s
+
+Control directory:
+ 1 branches
+
+Branch is out of date: missing 1 revision.
+
+In the working tree:
+ 0 unchanged
+ 0 modified
+ 0 added
+ 0 removed
+ 0 renamed
+ 0 unknown
+ 0 ignored
+ 0 versioned subdirectories
+
+Branch history:
+ 0 revisions
+
+Repository:
+ 0 revisions
+""" % (format.get_branch_format().get_format_description(),
+ format.repository_format.get_format_description(),
+ ), out)
+ self.assertEqual('', err)
+
+ # Update checkout
+ tree3.update()
+ self.build_tree(['tree/checkout/b'])
+ tree3.add('b')
+ out, err = self.run_bzr('info tree/checkout --verbose')
+ self.assertEqualDiff(
+"""Checkout (format: unnamed)
+Location:
+ checkout root: tree/checkout
+ checkout of branch: repo/branch
+
+Format:
+ control: Meta directory format 1
+ working tree: Working tree format 6
+ branch: %s
+ repository: %s
+
+Control directory:
+ 1 branches
+
+In the working tree:
+ 1 unchanged
+ 0 modified
+ 1 added
+ 0 removed
+ 0 renamed
+ 0 unknown
+ 0 ignored
+ 0 versioned subdirectories
+
+Branch history:
+ 1 revision
+ 0 days old
+ first revision: %s
+ latest revision: %s
+
+Repository:
+ 1 revision
+""" % (format.get_branch_format().get_format_description(),
+ format.repository_format.get_format_description(),
+ datestring_first, datestring_first,
+ ), out)
+ self.assertEqual('', err)
+ tree3.commit('commit two')
+
+ # Out of date lightweight checkout
+ rev = repo.get_revision(branch1.last_revision())
+ datestring_last = osutils.format_date(rev.timestamp, rev.timezone)
+ out, err = self.run_bzr('info tree/lightcheckout --verbose')
+ self.assertEqualDiff(
+"""Lightweight checkout (format: %s)
+Location:
+ light checkout root: tree/lightcheckout
+ checkout of branch: repo/branch
+ shared repository: repo
+
+Format:
+ control: Meta directory format 1
+ working tree: Working tree format 6
+ branch: %s
+ repository: %s
+
+Control directory:
+ 1 branches
+
+Working tree is out of date: missing 1 revision.
+
+In the working tree:
+ 1 unchanged
+ 0 modified
+ 0 added
+ 0 removed
+ 0 renamed
+ 0 unknown
+ 0 ignored
+ 0 versioned subdirectories
+
+Branch history:
+ 2 revisions
+ 0 days old
+ first revision: %s
+ latest revision: %s
+
+Repository:
+ 2 revisions
+""" % (self._repo_strings, format.get_branch_format().get_format_description(),
+ format.repository_format.get_format_description(),
+ datestring_first, datestring_last,
+ ), out)
+ self.assertEqual('', err)
+
+ # Show info about shared branch
+ out, err = self.run_bzr('info repo/branch --verbose')
+ self.assertEqualDiff(
+"""Repository branch (format: dirstate or knit)
+Location:
+ shared repository: repo
+ repository branch: repo/branch
+
+Format:
+ control: Meta directory format 1
+ branch: %s
+ repository: %s
+
+Control directory:
+ 1 branches
+
+Branch history:
+ 2 revisions
+ 0 days old
+ first revision: %s
+ latest revision: %s
+
+Repository:
+ 2 revisions
+""" % (format.get_branch_format().get_format_description(),
+ format.repository_format.get_format_description(),
+ datestring_first, datestring_last,
+ ), out)
+ self.assertEqual('', err)
+
+ # Show info about repository with revisions
+ out, err = self.run_bzr('info -v repo')
+ self.assertEqualDiff(
+"""Shared repository (format: dirstate or dirstate-tags or knit)
+Location:
+ shared repository: repo
+
+Format:
+ control: Meta directory format 1
+ repository: %s
+
+Control directory:
+ 0 branches
+
+Repository:
+ 2 revisions
+""" % (format.repository_format.get_format_description(),
+ ), out)
+ self.assertEqual('', err)
+
+ def test_info_shared_repository_with_trees(self):
+ format = controldir.format_registry.make_bzrdir('knit')
+ transport = self.get_transport()
+
+ # Create shared repository with working trees
+ repo = self.make_repository('repo', shared=True, format=format)
+ repo.set_make_working_trees(True)
+ out, err = self.run_bzr('info -v repo')
+ self.assertEqualDiff(
+"""Shared repository with trees (format: dirstate or dirstate-tags or knit)
+Location:
+ shared repository: repo
+
+Format:
+ control: Meta directory format 1
+ repository: %s
+
+Control directory:
+ 0 branches
+
+Create working tree for new branches inside the repository.
+
+Repository:
+ 0 revisions
+""" % (format.repository_format.get_format_description(),
+ ), out)
+ self.assertEqual('', err)
+
+ # Create two branches
+ repo.bzrdir.root_transport.mkdir('branch1')
+ branch1 = controldir.ControlDir.create_branch_convenience('repo/branch1',
+ format=format)
+ branch2 = branch1.bzrdir.sprout('repo/branch2').open_branch()
+
+ # Empty first branch
+ out, err = self.run_bzr('info repo/branch1 --verbose')
+ self.assertEqualDiff(
+"""Repository tree (format: knit)
+Location:
+ shared repository: repo
+ repository branch: repo/branch1
+
+Format:
+ control: Meta directory format 1
+ working tree: Working tree format 3
+ branch: %s
+ repository: %s
+
+Control directory:
+ 1 branches
+
+In the working tree:
+ 0 unchanged
+ 0 modified
+ 0 added
+ 0 removed
+ 0 renamed
+ 0 unknown
+ 0 ignored
+ 0 versioned subdirectories
+
+Branch history:
+ 0 revisions
+
+Repository:
+ 0 revisions
+""" % (format.get_branch_format().get_format_description(),
+ format.repository_format.get_format_description(),
+ ), out)
+ self.assertEqual('', err)
+
+ # Update first branch
+ self.build_tree(['repo/branch1/a'])
+ tree1 = branch1.bzrdir.open_workingtree()
+ tree1.add('a')
+ tree1.commit('commit one')
+ rev = repo.get_revision(branch1.last_revision())
+ datestring_first = osutils.format_date(rev.timestamp, rev.timezone)
+ out, err = self.run_bzr('info -v repo/branch1')
+ self.assertEqualDiff(
+"""Repository tree (format: knit)
+Location:
+ shared repository: repo
+ repository branch: repo/branch1
+
+Format:
+ control: Meta directory format 1
+ working tree: Working tree format 3
+ branch: %s
+ repository: %s
+
+Control directory:
+ 1 branches
+
+In the working tree:
+ 1 unchanged
+ 0 modified
+ 0 added
+ 0 removed
+ 0 renamed
+ 0 unknown
+ 0 ignored
+ 0 versioned subdirectories
+
+Branch history:
+ 1 revision
+ 0 days old
+ first revision: %s
+ latest revision: %s
+
+Repository:
+ 1 revision
+""" % (format.get_branch_format().get_format_description(),
+ format.repository_format.get_format_description(),
+ datestring_first, datestring_first,
+ ), out)
+ self.assertEqual('', err)
+
+ # Out of date second branch
+ out, err = self.run_bzr('info repo/branch2 --verbose')
+ self.assertEqualDiff(
+"""Repository tree (format: knit)
+Location:
+ shared repository: repo
+ repository branch: repo/branch2
+
+Related branches:
+ parent branch: repo/branch1
+
+Format:
+ control: Meta directory format 1
+ working tree: Working tree format 3
+ branch: %s
+ repository: %s
+
+Control directory:
+ 1 branches
+
+In the working tree:
+ 0 unchanged
+ 0 modified
+ 0 added
+ 0 removed
+ 0 renamed
+ 0 unknown
+ 0 ignored
+ 0 versioned subdirectories
+
+Branch history:
+ 0 revisions
+
+Repository:
+ 1 revision
+""" % (format.get_branch_format().get_format_description(),
+ format.repository_format.get_format_description(),
+ ), out)
+ self.assertEqual('', err)
+
+ # Update second branch
+ tree2 = branch2.bzrdir.open_workingtree()
+ tree2.pull(branch1)
+ out, err = self.run_bzr('info -v repo/branch2')
+ self.assertEqualDiff(
+"""Repository tree (format: knit)
+Location:
+ shared repository: repo
+ repository branch: repo/branch2
+
+Related branches:
+ parent branch: repo/branch1
+
+Format:
+ control: Meta directory format 1
+ working tree: Working tree format 3
+ branch: %s
+ repository: %s
+
+Control directory:
+ 1 branches
+
+In the working tree:
+ 1 unchanged
+ 0 modified
+ 0 added
+ 0 removed
+ 0 renamed
+ 0 unknown
+ 0 ignored
+ 0 versioned subdirectories
+
+Branch history:
+ 1 revision
+ 0 days old
+ first revision: %s
+ latest revision: %s
+
+Repository:
+ 1 revision
+""" % (format.get_branch_format().get_format_description(),
+ format.repository_format.get_format_description(),
+ datestring_first, datestring_first,
+ ), out)
+ self.assertEqual('', err)
+
+ # Show info about repository with revisions
+ out, err = self.run_bzr('info -v repo')
+ self.assertEqualDiff(
+"""Shared repository with trees (format: dirstate or dirstate-tags or knit)
+Location:
+ shared repository: repo
+
+Format:
+ control: Meta directory format 1
+ repository: %s
+
+Control directory:
+ 0 branches
+
+Create working tree for new branches inside the repository.
+
+Repository:
+ 1 revision
+""" % (format.repository_format.get_format_description(),
+ ),
+ out)
+ self.assertEqual('', err)
+
+ def test_info_shared_repository_with_tree_in_root(self):
+ format = controldir.format_registry.make_bzrdir('knit')
+ transport = self.get_transport()
+
+ # Create shared repository with working trees
+ repo = self.make_repository('repo', shared=True, format=format)
+ repo.set_make_working_trees(True)
+ out, err = self.run_bzr('info -v repo')
+ self.assertEqualDiff(
+"""Shared repository with trees (format: dirstate or dirstate-tags or knit)
+Location:
+ shared repository: repo
+
+Format:
+ control: Meta directory format 1
+ repository: %s
+
+Control directory:
+ 0 branches
+
+Create working tree for new branches inside the repository.
+
+Repository:
+ 0 revisions
+""" % (format.repository_format.get_format_description(),
+ ), out)
+ self.assertEqual('', err)
+
+ # Create branch in root of repository
+ control = repo.bzrdir
+ branch = control.create_branch()
+ control.create_workingtree()
+ out, err = self.run_bzr('info -v repo')
+ self.assertEqualDiff(
+"""Repository tree (format: knit)
+Location:
+ shared repository: repo
+ repository branch: repo
+
+Format:
+ control: Meta directory format 1
+ working tree: Working tree format 3
+ branch: %s
+ repository: %s
+
+Control directory:
+ 1 branches
+
+In the working tree:
+ 0 unchanged
+ 0 modified
+ 0 added
+ 0 removed
+ 0 renamed
+ 0 unknown
+ 0 ignored
+ 0 versioned subdirectories
+
+Branch history:
+ 0 revisions
+
+Repository:
+ 0 revisions
+""" % (format.get_branch_format().get_format_description(),
+ format.repository_format.get_format_description(),
+ ), out)
+ self.assertEqual('', err)
+
+ def test_info_repository_hook(self):
+ format = controldir.format_registry.make_bzrdir('knit')
+ def repo_info(repo, stats, outf):
+ outf.write("more info\n")
+ info.hooks.install_named_hook('repository', repo_info, None)
+ # Create shared repository with working trees
+ repo = self.make_repository('repo', shared=True, format=format)
+ out, err = self.run_bzr('info -v repo')
+ self.assertEqualDiff(
+"""Shared repository with trees (format: dirstate or dirstate-tags or knit)
+Location:
+ shared repository: repo
+
+Format:
+ control: Meta directory format 1
+ repository: %s
+
+Control directory:
+ 0 branches
+
+Create working tree for new branches inside the repository.
+
+Repository:
+ 0 revisions
+more info
+""" % (format.repository_format.get_format_description(),
+ ), out)
+ self.assertEqual('', err)
+
+ def test_info_unshared_repository_with_colocated_branches(self):
+ format = controldir.format_registry.make_bzrdir('development-colo')
+ transport = self.get_transport()
+
+ # Create unshared repository
+ repo = self.make_repository('repo', shared=False, format=format)
+ repo.set_make_working_trees(True)
+ repo.bzrdir.create_branch(name='foo')
+ out, err = self.run_bzr('info repo')
+ self.assertEqualDiff(
+"""Unshared repository with trees and colocated branches (format: development-colo)
+Location:
+ repository: repo
+""", out)
+ self.assertEqual('', err)
+
+ def assertCheckoutStatusOutput(self,
+ command_string, lco_tree, shared_repo=None,
+ repo_branch=None,
+ tree_locked=False,
+ branch_locked=False, repo_locked=False,
+ verbose=False,
+ light_checkout=True,
+ checkout_root=None):
+ """Check the output of info in a checkout.
+
+ This is not quite a mirror of the info code: rather than using the
+ tree being examined to predict output, it uses a bunch of flags which
+ allow us, the test writers, to document what *should* be present in
+ the output. Removing this separation would remove the value of the
+ tests.
+
+ :param path: the path to the light checkout.
+ :param lco_tree: the tree object for the light checkout.
+ :param shared_repo: A shared repository is in use, expect that in
+ the output.
+ :param repo_branch: A branch in a shared repository for non light
+ checkouts.
+ :param tree_locked: If true, expect the tree to be locked.
+ :param branch_locked: If true, expect the branch to be locked.
+ :param repo_locked: If true, expect the repository to be locked.
+ Note that the lco_tree.branch.repository is inspected, and if is not
+ actually locked then this parameter is overridden. This is because
+ pack repositories do not have any public API for obtaining an
+ exclusive repository wide lock.
+ :param verbose: verbosity level: 2 or higher to show committers
+ """
+ def friendly_location(url):
+ path = urlutils.unescape_for_display(url, 'ascii')
+ try:
+ return osutils.relpath(osutils.getcwd(), path)
+ except errors.PathNotChild:
+ return path
+
+ if tree_locked:
+ # We expect this to fail because of locking errors.
+ # (A write-locked file cannot be read-locked
+ # in the different process -- either on win32 or on linux).
+ # This should be removed when the locking errors are fixed.
+ self.expectFailure('OS locks are exclusive '
+ 'for different processes (Bug #174055)',
+ self.run_bzr_subprocess,
+ 'info ' + command_string)
+ out, err = self.run_bzr('info %s' % command_string)
+ description = {
+ (True, True): 'Lightweight checkout',
+ (True, False): 'Repository checkout',
+ (False, True): 'Lightweight checkout',
+ (False, False): 'Checkout',
+ }[(shared_repo is not None, light_checkout)]
+ format = {True: self._repo_strings,
+ False: 'unnamed'}[light_checkout]
+ if repo_locked:
+ repo_locked = lco_tree.branch.repository.get_physical_lock_status()
+ if repo_locked or branch_locked or tree_locked:
+ def locked_message(a_bool):
+ if a_bool:
+ return 'locked'
+ else:
+ return 'unlocked'
+ expected_lock_output = (
+ "\n"
+ "Lock status:\n"
+ " working tree: %s\n"
+ " branch: %s\n"
+ " repository: %s\n" % (
+ locked_message(tree_locked),
+ locked_message(branch_locked),
+ locked_message(repo_locked)))
+ else:
+ expected_lock_output = ''
+ tree_data = ''
+ extra_space = ''
+ if light_checkout:
+ tree_data = (" light checkout root: %s\n" %
+ friendly_location(lco_tree.bzrdir.root_transport.base))
+ extra_space = ' '
+ if lco_tree.branch.get_bound_location() is not None:
+ tree_data += ("%s checkout root: %s\n" % (extra_space,
+ friendly_location(lco_tree.branch.bzrdir.root_transport.base)))
+ if shared_repo is not None:
+ branch_data = (
+ " checkout of branch: %s\n"
+ " shared repository: %s\n" %
+ (friendly_location(repo_branch.bzrdir.root_transport.base),
+ friendly_location(shared_repo.bzrdir.root_transport.base)))
+ elif repo_branch is not None:
+ branch_data = (
+ "%s checkout of branch: %s\n" %
+ (extra_space,
+ friendly_location(repo_branch.bzrdir.root_transport.base)))
+ else:
+ branch_data = (" checkout of branch: %s\n" %
+ lco_tree.branch.bzrdir.root_transport.base)
+
+ if verbose >= 2:
+ verbose_info = ' 0 committers\n'
+ else:
+ verbose_info = ''
+
+ self.assertEqualDiff(
+"""%s (format: %s)
+Location:
+%s%s
+Format:
+ control: Meta directory format 1
+ working tree: %s
+ branch: %s
+ repository: %s
+%s
+Control directory:
+ 1 branches
+
+In the working tree:
+ 0 unchanged
+ 0 modified
+ 0 added
+ 0 removed
+ 0 renamed
+ 0 unknown
+ 0 ignored
+ 0 versioned subdirectories
+
+Branch history:
+ 0 revisions
+%s
+Repository:
+ 0 revisions
+""" % (description,
+ format,
+ tree_data,
+ branch_data,
+ lco_tree._format.get_format_description(),
+ lco_tree.branch._format.get_format_description(),
+ lco_tree.branch.repository._format.get_format_description(),
+ expected_lock_output,
+ verbose_info,
+ ), out)
+ self.assertEqual('', err)
+
+ def test_info_locking(self):
+ transport = self.get_transport()
+ # Create shared repository with a branch
+ repo = self.make_repository('repo', shared=True,
+ format=bzrdir.BzrDirMetaFormat1())
+ repo.set_make_working_trees(False)
+ repo.bzrdir.root_transport.mkdir('branch')
+ repo_branch = controldir.ControlDir.create_branch_convenience(
+ 'repo/branch', format=bzrdir.BzrDirMetaFormat1())
+ # Do a heavy checkout
+ transport.mkdir('tree')
+ transport.mkdir('tree/checkout')
+ co_branch = controldir.ControlDir.create_branch_convenience(
+ 'tree/checkout', format=bzrdir.BzrDirMetaFormat1())
+ co_branch.bind(repo_branch)
+ # Do a light checkout of the heavy one
+ transport.mkdir('tree/lightcheckout')
+ lco_dir = bzrdir.BzrDirMetaFormat1().initialize('tree/lightcheckout')
+ lco_dir.set_branch_reference(co_branch)
+ lco_dir.create_workingtree()
+ lco_tree = lco_dir.open_workingtree()
+
+ # Test all permutations of locking the working tree, branch and repository
+ # W B R
+
+ # U U U
+ self.assertCheckoutStatusOutput('-v tree/lightcheckout', lco_tree,
+ repo_branch=repo_branch,
+ verbose=True, light_checkout=True)
+ # U U L
+ lco_tree.branch.repository.lock_write()
+ try:
+ self.assertCheckoutStatusOutput('-v tree/lightcheckout',
+ lco_tree, repo_branch=repo_branch,
+ repo_locked=True, verbose=True, light_checkout=True)
+ finally:
+ lco_tree.branch.repository.unlock()
+ # U L L
+ lco_tree.branch.lock_write()
+ try:
+ self.assertCheckoutStatusOutput('-v tree/lightcheckout',
+ lco_tree,
+ branch_locked=True,
+ repo_locked=True,
+ repo_branch=repo_branch,
+ verbose=True)
+ finally:
+ lco_tree.branch.unlock()
+ # L L L
+ lco_tree.lock_write()
+ try:
+ self.assertCheckoutStatusOutput('-v tree/lightcheckout',
+ lco_tree, repo_branch=repo_branch,
+ tree_locked=True,
+ branch_locked=True,
+ repo_locked=True,
+ verbose=True)
+ finally:
+ lco_tree.unlock()
+ # L L U
+ lco_tree.lock_write()
+ lco_tree.branch.repository.unlock()
+ try:
+ self.assertCheckoutStatusOutput('-v tree/lightcheckout',
+ lco_tree, repo_branch=repo_branch,
+ tree_locked=True,
+ branch_locked=True,
+ verbose=True)
+ finally:
+ lco_tree.branch.repository.lock_write()
+ lco_tree.unlock()
+ # L U U
+ lco_tree.lock_write()
+ lco_tree.branch.unlock()
+ try:
+ self.assertCheckoutStatusOutput('-v tree/lightcheckout',
+ lco_tree, repo_branch=repo_branch,
+ tree_locked=True,
+ verbose=True)
+ finally:
+ lco_tree.branch.lock_write()
+ lco_tree.unlock()
+ # L U L
+ lco_tree.lock_write()
+ lco_tree.branch.unlock()
+ lco_tree.branch.repository.lock_write()
+ try:
+ self.assertCheckoutStatusOutput('-v tree/lightcheckout',
+ lco_tree, repo_branch=repo_branch,
+ tree_locked=True,
+ repo_locked=True,
+ verbose=True)
+ finally:
+ lco_tree.branch.repository.unlock()
+ lco_tree.branch.lock_write()
+ lco_tree.unlock()
+ # U L U
+ lco_tree.branch.lock_write()
+ lco_tree.branch.repository.unlock()
+ try:
+ self.assertCheckoutStatusOutput('-v tree/lightcheckout',
+ lco_tree, repo_branch=repo_branch,
+ branch_locked=True,
+ verbose=True)
+ finally:
+ lco_tree.branch.repository.lock_write()
+ lco_tree.branch.unlock()
+
+ if sys.platform == 'win32':
+ self.knownFailure('Win32 cannot run "bzr info"'
+ ' when the tree is locked.')
+
+ def test_info_stacked(self):
+ # We have a mainline
+ trunk_tree = self.make_branch_and_tree('mainline',
+ format='1.6')
+ trunk_tree.commit('mainline')
+ # and a branch from it which is stacked
+ new_dir = trunk_tree.bzrdir.sprout('newbranch', stacked=True)
+ out, err = self.run_bzr('info newbranch')
+ self.assertEqual(
+"""Standalone tree (format: 1.6)
+Location:
+ branch root: newbranch
+
+Related branches:
+ parent branch: mainline
+ stacked on: mainline
+""", out)
+ self.assertEqual("", err)
+
+ def test_info_revinfo_optional(self):
+ tree = self.make_branch_and_tree('.')
+ def last_revision_info(self):
+ raise errors.UnsupportedOperation(last_revision_info, self)
+ self.overrideAttr(
+ branch.Branch, "last_revision_info", last_revision_info)
+ out, err = self.run_bzr('info -v .')
+ self.assertEqual(
+"""Standalone tree (format: 2a)
+Location:
+ branch root: .
+
+Format:
+ control: Meta directory format 1
+ working tree: Working tree format 6
+ branch: Branch format 7
+ repository: Repository format 2a - rich roots, group compression and chk inventories
+
+Control directory:
+ 1 branches
+
+In the working tree:
+ 0 unchanged
+ 0 modified
+ 0 added
+ 0 removed
+ 0 renamed
+ 0 unknown
+ 0 ignored
+ 0 versioned subdirectories
+""", out)
+ self.assertEqual("", err)
+
+ def test_info_shows_colocated_branches(self):
+ bzrdir = self.make_branch('.', format='development-colo').bzrdir
+ bzrdir.create_branch(name="colo1")
+ bzrdir.create_branch(name="colo2")
+ bzrdir.create_branch(name="colo3")
+ out, err = self.run_bzr('info -v .')
+ self.assertEqualDiff(
+"""Standalone branch (format: development-colo)
+Location:
+ branch root: .
+
+Format:
+ control: Meta directory format 1 with support for colocated branches
+ branch: Branch format 7
+ repository: Repository format 2a - rich roots, group compression and chk inventories
+
+Control directory:
+ 4 branches
+
+Branch history:
+ 0 revisions
+
+Repository:
+ 0 revisions
+""", out)
+ self.assertEqual("", err)
+
+
+class TestSmartServerInfo(tests.TestCaseWithTransport):
+
+ def test_simple_branch_info(self):
+ self.setup_smart_server_with_call_log()
+ t = self.make_branch_and_tree('branch')
+ self.build_tree_contents([('branch/foo', 'thecontents')])
+ t.add("foo")
+ t.commit("message")
+ self.reset_smart_call_log()
+ out, err = self.run_bzr(['info', self.get_url('branch')])
+ # This figure represent the amount of work to perform this use case. It
+ # is entirely ok to reduce this number if a test fails due to rpc_count
+ # being too low. If rpc_count increases, more network roundtrips have
+ # become necessary for this use case. Please do not adjust this number
+ # upwards without agreement from bzr's network support maintainers.
+ self.assertLength(10, self.hpss_calls)
+ self.assertLength(1, self.hpss_connections)
+ self.assertThat(self.hpss_calls, ContainsNoVfsCalls)
+
+ def test_verbose_branch_info(self):
+ self.setup_smart_server_with_call_log()
+ t = self.make_branch_and_tree('branch')
+ self.build_tree_contents([('branch/foo', 'thecontents')])
+ t.add("foo")
+ t.commit("message")
+ self.reset_smart_call_log()
+ out, err = self.run_bzr(['info', '-v', self.get_url('branch')])
+ # This figure represent the amount of work to perform this use case. It
+ # is entirely ok to reduce this number if a test fails due to rpc_count
+ # being too low. If rpc_count increases, more network roundtrips have
+ # become necessary for this use case. Please do not adjust this number
+ # upwards without agreement from bzr's network support maintainers.
+ self.assertLength(14, self.hpss_calls)
+ self.assertLength(1, self.hpss_connections)
+ self.assertThat(self.hpss_calls, ContainsNoVfsCalls)
diff --git a/bzrlib/tests/blackbox/test_init.py b/bzrlib/tests/blackbox/test_init.py
new file mode 100644
index 0000000..7031aba
--- /dev/null
+++ b/bzrlib/tests/blackbox/test_init.py
@@ -0,0 +1,236 @@
+# Copyright (C) 2006-2011 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+
+"""Test 'bzr init'"""
+
+import os
+import re
+
+from bzrlib import (
+ branch as _mod_branch,
+ config as _mod_config,
+ osutils,
+ urlutils,
+ )
+from bzrlib.bzrdir import BzrDirMetaFormat1
+from bzrlib.tests import TestSkipped
+from bzrlib.tests import TestCaseWithTransport
+from bzrlib.tests.test_sftp_transport import TestCaseWithSFTPServer
+from bzrlib.workingtree import WorkingTree
+
+
+class TestInit(TestCaseWithTransport):
+
+ def setUp(self):
+ TestCaseWithTransport.setUp(self)
+ self._default_label = '2a'
+
+ def test_init_with_format(self):
+ # Verify bzr init --format constructs something plausible
+ t = self.get_transport()
+ self.run_bzr('init --format default')
+ self.assertIsDirectory('.bzr', t)
+ self.assertIsDirectory('.bzr/checkout', t)
+ self.assertIsDirectory('.bzr/checkout/lock', t)
+
+ def test_init_format_2a(self):
+ """Smoke test for constructing a format 2a repository."""
+ out, err = self.run_bzr('init --format=2a')
+ self.assertEqual("""Created a standalone tree (format: 2a)\n""",
+ out)
+ self.assertEqual('', err)
+
+ def test_init_colocated(self):
+ """Smoke test for constructing a colocated branch."""
+ out, err = self.run_bzr('init --format=development-colo file:,branch=abranch')
+ self.assertEqual("""Created a lightweight checkout (format: development-colo)\n""",
+ out)
+ self.assertEqual('', err)
+ out, err = self.run_bzr('branches')
+ self.assertEqual(" abranch\n", out)
+ self.assertEqual('', err)
+
+ def test_init_at_repository_root(self):
+ # bzr init at the root of a repository should create a branch
+ # and working tree even when creation of working trees is disabled.
+ t = self.get_transport()
+ t.mkdir('repo')
+ format = BzrDirMetaFormat1()
+ newdir = format.initialize(t.abspath('repo'))
+ repo = newdir.create_repository(shared=True)
+ repo.set_make_working_trees(False)
+ out, err = self.run_bzr('init repo')
+ self.assertEqual("""Created a repository tree (format: %s)
+Using shared repository: %s
+""" % (self._default_label, urlutils.local_path_from_url(
+ repo.bzrdir.root_transport.external_url())), out)
+ cwd = osutils.getcwd()
+ self.assertEndsWith(out, cwd + '/repo/\n')
+ self.assertEqual('', err)
+ newdir.open_branch()
+ newdir.open_workingtree()
+
+ def test_init_branch(self):
+ out, err = self.run_bzr('init')
+ self.assertEqual("Created a standalone tree (format: %s)\n" % (
+ self._default_label,), out)
+ self.assertEqual('', err)
+
+ # Can it handle subdirectories of branches too ?
+ out, err = self.run_bzr('init subdir1')
+ self.assertEqual("Created a standalone tree (format: %s)\n" % (
+ self._default_label,), out)
+ self.assertEqual('', err)
+ WorkingTree.open('subdir1')
+
+ self.run_bzr_error(['Parent directory of subdir2/nothere does not exist'],
+ 'init subdir2/nothere')
+ out, err = self.run_bzr('init subdir2/nothere', retcode=3)
+ self.assertEqual('', out)
+
+ os.mkdir('subdir2')
+ out, err = self.run_bzr('init subdir2')
+ self.assertEqual("Created a standalone tree (format: %s)\n" % (
+ self._default_label,), out)
+ self.assertEqual('', err)
+ # init an existing branch.
+ out, err = self.run_bzr('init subdir2', retcode=3)
+ self.assertEqual('', out)
+ self.assertTrue(err.startswith('bzr: ERROR: Already a branch:'))
+
+ def test_init_branch_quiet(self):
+ out, err = self.run_bzr('init -q')
+ self.assertEqual('', out)
+ self.assertEqual('', err)
+
+ def test_init_existing_branch(self):
+ self.run_bzr('init')
+ out, err = self.run_bzr('init', retcode=3)
+ self.assertContainsRe(err, 'Already a branch')
+ # don't suggest making a checkout, there's already a working tree
+ self.assertFalse(re.search(r'checkout', err))
+
+ def test_init_existing_without_workingtree(self):
+ # make a repository
+ repo = self.make_repository('.', shared=True)
+ repo.set_make_working_trees(False)
+ # make a branch; by default without a working tree
+ self.run_bzr('init subdir')
+ # fail
+ out, err = self.run_bzr('init subdir', retcode=3)
+ # suggests using checkout
+ self.assertContainsRe(err,
+ 'ontains a branch.*but no working tree.*checkout')
+
+ def test_no_defaults(self):
+ """Init creates no default ignore rules."""
+ self.run_bzr('init')
+ self.assertFalse(os.path.exists('.bzrignore'))
+
+ def test_init_unicode(self):
+ # Make sure getcwd can handle unicode filenames
+ try:
+ os.mkdir(u'mu-\xb5')
+ except UnicodeError:
+ raise TestSkipped("Unable to create Unicode filename")
+ # try to init unicode dir
+ self.run_bzr(['init', '-q', u'mu-\xb5'])
+
+ def create_simple_tree(self):
+ tree = self.make_branch_and_tree('tree')
+ self.build_tree(['tree/a'])
+ tree.add(['a'], ['a-id'])
+ tree.commit('one', rev_id='r1')
+ return tree
+
+ def test_init_create_prefix(self):
+ """'bzr init --create-prefix; will create leading directories."""
+ tree = self.create_simple_tree()
+
+ self.run_bzr_error(['Parent directory of ../new/tree does not exist'],
+ 'init ../new/tree', working_dir='tree')
+ self.run_bzr('init ../new/tree --create-prefix', working_dir='tree')
+ self.assertPathExists('new/tree/.bzr')
+
+ def test_init_default_format_option(self):
+ """bzr init should read default format from option default_format"""
+ conf = _mod_config.GlobalConfig.from_string('''
+[DEFAULT]
+default_format = 1.9
+''', save=True)
+ out, err = self.run_bzr_subprocess('init')
+ self.assertContainsRe(out, '1.9')
+
+ def test_init_no_tree(self):
+ """'bzr init --no-tree' creates a branch with no working tree."""
+ out, err = self.run_bzr('init --no-tree')
+ self.assertStartsWith(out, 'Created a standalone branch')
+
+
+class TestSFTPInit(TestCaseWithSFTPServer):
+
+ def test_init(self):
+ # init on a remote url should succeed.
+ out, err = self.run_bzr(['init', '--pack-0.92', self.get_url()])
+ self.assertEqual(out,
+ """Created a standalone branch (format: pack-0.92)\n""")
+ self.assertEqual('', err)
+
+ def test_init_existing_branch(self):
+ # when there is already a branch present, make mention
+ self.make_branch('.')
+
+ # rely on SFTPServer get_url() pointing at '.'
+ out, err = self.run_bzr_error(['Already a branch'],
+ ['init', self.get_url()])
+
+ # make sure using 'bzr checkout' is not suggested
+ # for remote locations missing a working tree
+ self.assertFalse(re.search(r'use bzr checkout', err))
+
+ def test_init_existing_branch_with_workingtree(self):
+ # don't distinguish between the branch having a working tree or not
+ # when the branch itself is remote.
+ self.make_branch_and_tree('.')
+
+ # rely on SFTPServer get_url() pointing at '.'
+ self.run_bzr_error(['Already a branch'], ['init', self.get_url()])
+
+ def test_init_append_revisions_only(self):
+ self.run_bzr('init --dirstate-tags normal_branch6')
+ branch = _mod_branch.Branch.open('normal_branch6')
+ self.assertEqual(None, branch.get_append_revisions_only())
+ self.run_bzr('init --append-revisions-only --dirstate-tags branch6')
+ branch = _mod_branch.Branch.open('branch6')
+ self.assertEqual(True, branch.get_append_revisions_only())
+ self.run_bzr_error(['cannot be set to append-revisions-only'],
+ 'init --append-revisions-only --knit knit')
+
+ def test_init_without_username(self):
+ """Ensure init works if username is not set.
+ """
+ # bzr makes user specified whoami mandatory for operations
+ # like commit as whoami is recorded. init however is not so final
+ # and uses whoami only in a lock file. Without whoami the login name
+ # is used. This test is to ensure that init passes even when whoami
+ # is not available.
+ self.overrideEnv('EMAIL', None)
+ self.overrideEnv('BZR_EMAIL', None)
+ out, err = self.run_bzr(['init', 'foo'])
+ self.assertEqual(err, '')
+ self.assertTrue(os.path.exists('foo'))
+
diff --git a/bzrlib/tests/blackbox/test_inventory.py b/bzrlib/tests/blackbox/test_inventory.py
new file mode 100644
index 0000000..4d55c2e
--- /dev/null
+++ b/bzrlib/tests/blackbox/test_inventory.py
@@ -0,0 +1,117 @@
+# Copyright (C) 2006 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""Black-box tests for 'bzr inventory'."""
+
+import os
+
+from bzrlib.tests import TestCaseWithTransport
+
+
+class TestInventory(TestCaseWithTransport):
+
+ def setUp(self):
+ TestCaseWithTransport.setUp(self)
+
+ tree = self.make_branch_and_tree('.')
+ self.build_tree(['a', 'b/', 'b/c'])
+
+ tree.add(['a', 'b', 'b/c'], ['a-id', 'b-id', 'c-id'])
+ tree.commit('init', rev_id='one')
+ self.tree = tree
+
+ def assertInventoryEqual(self, expected, args=None, **kwargs):
+ """Test that the output of 'bzr inventory' is as expected.
+
+ Any arguments supplied will be passed to run_bzr.
+ """
+ command = 'inventory'
+ if args is not None:
+ command += ' ' + args
+ out, err = self.run_bzr(command, **kwargs)
+ self.assertEqual(expected, out)
+ self.assertEqual('', err)
+
+ def test_inventory(self):
+ self.assertInventoryEqual('a\nb\nb/c\n')
+
+ def test_inventory_kind(self):
+ self.assertInventoryEqual('a\nb/c\n', '--kind file')
+ self.assertInventoryEqual('b\n', '--kind directory')
+
+ def test_inventory_show_ids(self):
+ expected = ''.join(('%-50s %s\n' % (path, file_id))
+ for path, file_id in
+ [('a', 'a-id'),
+ ('b', 'b-id'),
+ ('b/c', 'c-id')
+ ]
+ )
+ self.assertInventoryEqual(expected, '--show-ids')
+
+ def test_inventory_specific_files(self):
+ self.assertInventoryEqual('a\n', 'a')
+ self.assertInventoryEqual('b\nb/c\n', 'b b/c')
+ # 'bzr inventory' recurses into subdirectories
+ self.assertInventoryEqual('b\nb/c\n', 'b')
+
+ def test_inventory_mixed(self):
+ """Test that we get expected results when mixing parameters"""
+ a_line = '%-50s %s\n' % ('a', 'a-id')
+ b_line = '%-50s %s\n' % ('b', 'b-id')
+ c_line = '%-50s %s\n' % ('b/c', 'c-id')
+
+ self.assertInventoryEqual('', '--kind directory a')
+ self.assertInventoryEqual(a_line + c_line, '--kind file --show-ids')
+ self.assertInventoryEqual(c_line, '--kind file --show-ids b b/c')
+
+ def test_in_subdir(self):
+ os.chdir('b')
+ # TODO: jam 20060922 Maybe inventory should return the paths as
+ # relative to '.', rather than relative to root
+
+ # a plain 'inventory' returns all files
+ self.assertInventoryEqual('a\nb\nb/c\n')
+ # But passing '.' will only return paths underneath here
+ self.assertInventoryEqual('b\nb/c\n', '.')
+
+
+ def test_inventory_revision(self):
+ self.build_tree(['b/d', 'e'])
+ self.tree.add(['b/d', 'e'], ['d-id', 'e-id'])
+ self.tree.commit('add files')
+
+ self.tree.rename_one('b/d', 'd')
+ self.tree.commit('rename b/d => d')
+
+ # Passing just -r returns the inventory of that revision
+ self.assertInventoryEqual('a\nb\nb/c\n', '-r 1')
+ self.assertInventoryEqual('a\nb\nb/c\nb/d\ne\n', '-r 2')
+
+ # Passing a path will lookup the path in the old and current locations
+ self.assertInventoryEqual('b/d\n', '-r 2 b/d')
+ self.assertInventoryEqual('b/d\n', '-r 2 d')
+
+ self.tree.rename_one('e', 'b/e')
+ self.tree.commit('rename e => b/e')
+
+ # When supplying just a directory paths that are now,
+ # or used to be, in that directory are shown
+ self.assertInventoryEqual('b\nb/c\nb/d\ne\n', '-r 2 b')
+
+ def test_missing_file(self):
+ self.run_bzr_error([r'Path\(s\) are not versioned: no-such-file'],
+ 'inventory no-such-file')
diff --git a/bzrlib/tests/blackbox/test_join.py b/bzrlib/tests/blackbox/test_join.py
new file mode 100644
index 0000000..f62f9ab
--- /dev/null
+++ b/bzrlib/tests/blackbox/test_join.py
@@ -0,0 +1,95 @@
+# Copyright (C) 2006, 2010 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+
+import os
+
+from bzrlib import (
+ osutils,
+ tests,
+ workingtree,
+ )
+
+
+class TestJoin(tests.TestCaseWithTransport):
+
+ def make_trees(self):
+ base_tree = self.make_branch_and_tree('tree',
+ format='development-subtree')
+ base_tree.commit('empty commit')
+ self.build_tree(['tree/subtree/', 'tree/subtree/file1'])
+ sub_tree = self.make_branch_and_tree('tree/subtree')
+ sub_tree.set_root_id('subtree-root-id')
+ sub_tree.add('file1', 'file1-id')
+ sub_tree.commit('added file1')
+ return base_tree, sub_tree
+
+ def check_success(self, path):
+ base_tree = workingtree.WorkingTree.open(path)
+ self.assertEqual('file1-id', base_tree.path2id('subtree/file1'))
+
+ def test_join(self):
+ base_tree, sub_tree = self.make_trees()
+ self.run_bzr('join tree/subtree')
+ self.check_success('tree')
+
+ def test_join_dot(self):
+ base_tree, sub_tree = self.make_trees()
+ self.run_bzr('join .', working_dir='tree/subtree')
+ self.check_success('tree')
+
+ def test_join_error(self):
+ base_tree, sub_tree = self.make_trees()
+ os.mkdir('tree/subtree2')
+ osutils.rename('tree/subtree', 'tree/subtree2/subtree')
+ self.run_bzr_error(
+ ('Cannot join .*subtree. Parent directory is not versioned',),
+ 'join tree/subtree2/subtree')
+ # disabled because this gives an ugly error at present -- mbp 20070306
+ ## self.run_bzr_error(
+ ## ('Cannot join .*subtree. Parent directory is not versioned',),
+ ## 'join', '--reference', 'tree/subtree2/subtree')
+ self.run_bzr_error(('Not a branch:.*subtree2',),
+ 'join tree/subtree2')
+
+ def test_join_reference(self):
+ """Join can add a reference if --reference is supplied"""
+ base_tree, sub_tree = self.make_trees()
+ self.run_bzr('join . --reference', working_dir='tree/subtree')
+ sub_tree.lock_read()
+ self.addCleanup(sub_tree.unlock)
+ self.assertEqual('file1-id', sub_tree.path2id('file1'))
+ self.assertTrue(sub_tree.has_id('file1-id'))
+ self.assertEqual('subtree-root-id', sub_tree.path2id(''))
+ self.assertEqual('', sub_tree.id2path('subtree-root-id'))
+ self.assertIs(None, base_tree.path2id('subtree/file1'))
+ base_tree.lock_read()
+ self.addCleanup(base_tree.unlock)
+ self.assertFalse(base_tree.has_id('file1-id'))
+ self.assertEqual('subtree-root-id', base_tree.path2id('subtree'))
+ self.assertEqual('subtree', base_tree.id2path('subtree-root-id'))
+
+ def test_references_check_repository_support(self):
+ """Users are stopped from adding a reference that can't be committed."""
+ # in 0.15 the default format has a dirstate workingtree, that can
+ # support tree references, but the default repository format
+ # cannot.
+ tree = self.make_branch_and_tree('tree', format='dirstate')
+ tree2 = self.make_branch_and_tree('tree/subtree')
+ out, err = self.run_bzr('join --reference tree/subtree',
+ retcode=3)
+ self.assertContainsRe(err, r"Can't join trees")
+ self.assertContainsRe(err, r"use bzr upgrade")
diff --git a/bzrlib/tests/blackbox/test_locale.py b/bzrlib/tests/blackbox/test_locale.py
new file mode 100644
index 0000000..d595e84
--- /dev/null
+++ b/bzrlib/tests/blackbox/test_locale.py
@@ -0,0 +1,89 @@
+# Copyright (C) 2006, 2011 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""Test that bzr handles locales in a reasonable way"""
+
+import sys
+
+from bzrlib import (
+ tests,
+ )
+
+
+class TestLocale(tests.TestCaseWithTransport):
+
+ def setUp(self):
+ super(TestLocale, self).setUp()
+
+ if sys.platform in ('win32',):
+ raise tests.TestSkipped('Windows does not respond to the LANG'
+ ' env variable')
+
+ tree = self.make_branch_and_tree('tree')
+ self.build_tree(['tree/a'])
+ tree.add('a')
+ tree.commit(u'Unicode \xb5 commit', rev_id='r1',
+ committer=u'\u062c\u0648\u062c\u0648'
+ u' Meinel <juju@info.com>',
+ timestamp=1156451297.96, timezone=0)
+ self.tree = tree
+
+ def test_log_C(self):
+ self.disable_missing_extensions_warning()
+ # C is not necessarily the default locale, so set both LANG and LC_ALL
+ # explicitly because LC_ALL is preferred on (some?) Linux systems but
+ # only LANG is respected on Windows.
+ out, err = self.run_bzr_subprocess(
+ '--no-aliases --no-plugins log -q --log-format=long tree',
+ env_changes={'LANG': 'C', 'BZR_PROGRESS_BAR':'none',
+ 'LC_ALL': 'C', 'LC_CTYPE':None, 'LANGUAGE':None})
+ self.assertEqual('', err)
+ self.assertEqualDiff("""\
+------------------------------------------------------------
+revno: 1
+committer: ???? Meinel <juju@info.com>
+branch nick: tree
+timestamp: Thu 2006-08-24 20:28:17 +0000
+message:
+ Unicode ? commit
+""", out)
+
+ def test_log_BOGUS(self):
+ out, err = self.run_bzr_subprocess(
+ '--no-aliases --no-plugins log -q --log-format=long tree',
+ env_changes={'LANG':'BOGUS', 'BZR_PROGRESS_BAR':'none',
+ 'LC_ALL':None, 'LC_CTYPE':None, 'LANGUAGE':None})
+ self.assertStartsWith(err, 'bzr: warning: unsupported locale setting')
+ self.assertEqualDiff("""\
+------------------------------------------------------------
+revno: 1
+committer: ???? Meinel <juju@info.com>
+branch nick: tree
+timestamp: Thu 2006-08-24 20:28:17 +0000
+message:
+ Unicode ? commit
+""", out)
+
+
+class TestMultibyteCodecs(tests.TestCaseWithTransport):
+ """Tests for quirks of multibyte encodings and their python codecs"""
+
+ def test_plugins_mbcs(self):
+ """Ensure the plugins command works with cjkcodecs, see lp:754082"""
+ self.disable_missing_extensions_warning()
+ out, err = self.run_bzr(["plugins"], encoding="EUC-JP")
+ # The output is tested in bt.test_plugins rather than here
+ self.assertEqual("", err)
diff --git a/bzrlib/tests/blackbox/test_log.py b/bzrlib/tests/blackbox/test_log.py
new file mode 100644
index 0000000..02a95bd
--- /dev/null
+++ b/bzrlib/tests/blackbox/test_log.py
@@ -0,0 +1,1133 @@
+# Copyright (C) 2006-2010 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+
+"""Black-box tests for bzr log."""
+
+from itertools import izip
+import os
+
+from bzrlib import (
+ branchbuilder,
+ errors,
+ log,
+ osutils,
+ tests,
+ )
+from bzrlib.tests import (
+ test_log,
+ features,
+ )
+from bzrlib.tests.matchers import ContainsNoVfsCalls
+
+
+class TestLog(tests.TestCaseWithTransport, test_log.TestLogMixin):
+
+ def make_minimal_branch(self, path='.', format=None):
+ tree = self.make_branch_and_tree(path, format=format)
+ self.build_tree([path + '/hello.txt'])
+ tree.add('hello.txt')
+ tree.commit(message='message1')
+ return tree
+
+ def make_linear_branch(self, path='.', format=None):
+ tree = self.make_branch_and_tree(path, format=format)
+ self.build_tree(
+ [path + '/hello.txt', path + '/goodbye.txt', path + '/meep.txt'])
+ tree.add('hello.txt')
+ tree.commit(message='message1')
+ tree.add('goodbye.txt')
+ tree.commit(message='message2')
+ tree.add('meep.txt')
+ tree.commit(message='message3')
+ return tree
+
+ def make_merged_branch(self, path='.', format=None):
+ tree = self.make_linear_branch(path, format)
+ tree2 = tree.bzrdir.sprout('tree2',
+ revision_id=tree.branch.get_rev_id(1)).open_workingtree()
+ tree2.commit(message='tree2 message2')
+ tree2.commit(message='tree2 message3')
+ tree.merge_from_branch(tree2.branch)
+ tree.commit(message='merge')
+ return tree
+
+
+class TestLogWithLogCatcher(TestLog):
+
+ def setUp(self):
+ super(TestLogWithLogCatcher, self).setUp()
+ # Capture log formatter creations
+ class MyLogFormatter(test_log.LogCatcher):
+
+ def __new__(klass, *args, **kwargs):
+ self.log_catcher = test_log.LogCatcher(*args, **kwargs)
+ # Always return our own log formatter
+ return self.log_catcher
+ # Break cycle with closure over self on cleanup by removing method
+ self.addCleanup(setattr, MyLogFormatter, "__new__", None)
+
+ def getme(branch):
+ # Always return our own log formatter class hijacking the
+ # default behavior (which requires setting up a config
+ # variable)
+ return MyLogFormatter
+ self.overrideAttr(log.log_formatter_registry, 'get_default', getme)
+
+ def get_captured_revisions(self):
+ return self.log_catcher.revisions
+
+ def assertLogRevnos(self, args, expected_revnos, working_dir='.',
+ out='', err=''):
+ actual_out, actual_err = self.run_bzr(['log'] + args,
+ working_dir=working_dir)
+ self.assertEqual(out, actual_out)
+ self.assertEqual(err, actual_err)
+ self.assertEqual(expected_revnos,
+ [r.revno for r in self.get_captured_revisions()])
+
+ def assertLogRevnosAndDepths(self, args, expected_revnos_and_depths,
+ working_dir='.'):
+ self.run_bzr(['log'] + args, working_dir=working_dir)
+ self.assertEqual(expected_revnos_and_depths,
+ [(r.revno, r.merge_depth)
+ for r in self.get_captured_revisions()])
+
+
+class TestLogRevSpecs(TestLogWithLogCatcher):
+
+ def test_log_no_revspec(self):
+ self.make_linear_branch()
+ self.assertLogRevnos([], ['3', '2', '1'])
+
+ def test_log_null_end_revspec(self):
+ self.make_linear_branch()
+ self.assertLogRevnos(['-r1..'], ['3', '2', '1'])
+
+ def test_log_null_begin_revspec(self):
+ self.make_linear_branch()
+ self.assertLogRevnos(['-r..3'], ['3', '2', '1'])
+
+ def test_log_null_both_revspecs(self):
+ self.make_linear_branch()
+ self.assertLogRevnos(['-r..'], ['3', '2', '1'])
+
+ def test_log_negative_begin_revspec_full_log(self):
+ self.make_linear_branch()
+ self.assertLogRevnos(['-r-3..'], ['3', '2', '1'])
+
+ def test_log_negative_both_revspec_full_log(self):
+ self.make_linear_branch()
+ self.assertLogRevnos(['-r-3..-1'], ['3', '2', '1'])
+
+ def test_log_negative_both_revspec_partial(self):
+ self.make_linear_branch()
+ self.assertLogRevnos(['-r-3..-2'], ['2', '1'])
+
+ def test_log_negative_begin_revspec(self):
+ self.make_linear_branch()
+ self.assertLogRevnos(['-r-2..'], ['3', '2'])
+
+ def test_log_positive_revspecs(self):
+ self.make_linear_branch()
+ self.assertLogRevnos(['-r1..3'], ['3', '2', '1'])
+
+ def test_log_dotted_revspecs(self):
+ self.make_merged_branch()
+ self.assertLogRevnos(['-n0', '-r1..1.1.1'], ['1.1.1', '1'])
+
+ def test_log_limit(self):
+ tree = self.make_branch_and_tree('.')
+ # We want more commits than our batch size starts at
+ for pos in range(10):
+ tree.commit("%s" % pos)
+ self.assertLogRevnos(['--limit', '2'], ['10', '9'])
+
+ def test_log_limit_short(self):
+ self.make_linear_branch()
+ self.assertLogRevnos(['-l', '2'], ['3', '2'])
+
+ def test_log_change_revno(self):
+ self.make_linear_branch()
+ self.assertLogRevnos(['-c1'], ['1'])
+
+ def test_branch_revspec(self):
+ foo = self.make_branch_and_tree('foo')
+ bar = self.make_branch_and_tree('bar')
+ self.build_tree(['foo/foo.txt', 'bar/bar.txt'])
+ foo.add('foo.txt')
+ bar.add('bar.txt')
+ foo.commit(message='foo')
+ bar.commit(message='bar')
+ self.run_bzr('log -r branch:../bar', working_dir='foo')
+ self.assertEqual([bar.branch.get_rev_id(1)],
+ [r.rev.revision_id
+ for r in self.get_captured_revisions()])
+
+
+class TestLogExcludeCommonAncestry(TestLogWithLogCatcher):
+
+ def test_exclude_common_ancestry_simple_revnos(self):
+ self.make_linear_branch()
+ self.assertLogRevnos(['-r1..3', '--exclude-common-ancestry'],
+ ['3', '2'])
+
+
+class TestLogMergedLinearAncestry(TestLogWithLogCatcher):
+
+ def setUp(self):
+ super(TestLogMergedLinearAncestry, self).setUp()
+ # FIXME: Using a MemoryTree would be even better here (but until we
+ # stop calling run_bzr, there is no point) --vila 100118.
+ builder = branchbuilder.BranchBuilder(self.get_transport())
+ builder.start_series()
+ # 1
+ # | \
+ # 2 1.1.1
+ # | / |
+ # 3 1.1.2
+ # | |
+ # | 1.1.3
+ # | / |
+ # 4 1.1.4
+ # | /
+ # 5
+ # | \
+ # | 5.1.1
+ # | /
+ # 6
+
+ # mainline
+ builder.build_snapshot('1', None, [
+ ('add', ('', 'root-id', 'directory', ''))])
+ builder.build_snapshot('2', ['1'], [])
+ # branch
+ builder.build_snapshot('1.1.1', ['1'], [])
+ # merge branch into mainline
+ builder.build_snapshot('3', ['2', '1.1.1'], [])
+ # new commits in branch
+ builder.build_snapshot('1.1.2', ['1.1.1'], [])
+ builder.build_snapshot('1.1.3', ['1.1.2'], [])
+ # merge branch into mainline
+ builder.build_snapshot('4', ['3', '1.1.3'], [])
+ # merge mainline into branch
+ builder.build_snapshot('1.1.4', ['1.1.3', '4'], [])
+ # merge branch into mainline
+ builder.build_snapshot('5', ['4', '1.1.4'], [])
+ builder.build_snapshot('5.1.1', ['5'], [])
+ builder.build_snapshot('6', ['5', '5.1.1'], [])
+ builder.finish_series()
+
+ def test_n0(self):
+ self.assertLogRevnos(['-n0', '-r1.1.1..1.1.4'],
+ ['1.1.4', '4', '1.1.3', '1.1.2', '3', '1.1.1'])
+ def test_n0_forward(self):
+ self.assertLogRevnos(['-n0', '-r1.1.1..1.1.4', '--forward'],
+ ['3', '1.1.1', '4', '1.1.2', '1.1.3', '1.1.4'])
+
+ def test_n1(self):
+ # starting from 1.1.4 we follow the left-hand ancestry
+ self.assertLogRevnos(['-n1', '-r1.1.1..1.1.4'],
+ ['1.1.4', '1.1.3', '1.1.2', '1.1.1'])
+
+ def test_n1_forward(self):
+ self.assertLogRevnos(['-n1', '-r1.1.1..1.1.4', '--forward'],
+ ['1.1.1', '1.1.2', '1.1.3', '1.1.4'])
+
+ def test_fallback_when_end_rev_is_not_on_mainline(self):
+ self.assertLogRevnos(['-n1', '-r1.1.1..5.1.1'],
+ # We don't get 1.1.1 because we say -n1
+ ['5.1.1', '5', '4', '3'])
+
+
+class Test_GenerateAllRevisions(TestLogWithLogCatcher):
+
+ def setUp(self):
+ super(Test_GenerateAllRevisions, self).setUp()
+ builder = self.make_branch_with_many_merges()
+ b = builder.get_branch()
+ b.lock_read()
+ self.addCleanup(b.unlock)
+ self.branch = b
+
+ def make_branch_with_many_merges(self, path='.', format=None):
+ builder = branchbuilder.BranchBuilder(self.get_transport())
+ builder.start_series()
+ # The graph below may look a bit complicated (and it may be but I've
+ # banged my head enough on it) but the bug requires at least dotted
+ # revnos *and* merged revisions below that.
+ # 1
+ # | \
+ # 2 1.1.1
+ # | X
+ # 3 2.1.1
+ # | | \
+ # | 2.1.2 2.2.1
+ # | | X
+ # | 2.1.3 \
+ # | / /
+ # 4 /
+ # | /
+ # 5 -----/
+ builder.build_snapshot('1', None, [
+ ('add', ('', 'root-id', 'directory', ''))])
+ builder.build_snapshot('2', ['1'], [])
+ builder.build_snapshot('1.1.1', ['1'], [])
+ builder.build_snapshot('2.1.1', ['2'], [])
+ builder.build_snapshot('3', ['2', '1.1.1'], [])
+ builder.build_snapshot('2.1.2', ['2.1.1'], [])
+ builder.build_snapshot('2.2.1', ['2.1.1'], [])
+ builder.build_snapshot('2.1.3', ['2.1.2', '2.2.1'], [])
+ builder.build_snapshot('4', ['3', '2.1.3'], [])
+ builder.build_snapshot('5', ['4', '2.1.2'], [])
+ builder.finish_series()
+ return builder
+
+ def test_not_an_ancestor(self):
+ self.assertRaises(errors.BzrCommandError,
+ log._generate_all_revisions,
+ self.branch, '1.1.1', '2.1.3', 'reverse',
+ delayed_graph_generation=True)
+
+ def test_wrong_order(self):
+ self.assertRaises(errors.BzrCommandError,
+ log._generate_all_revisions,
+ self.branch, '5', '2.1.3', 'reverse',
+ delayed_graph_generation=True)
+
+ def test_no_start_rev_id_with_end_rev_id_being_a_merge(self):
+ revs = log._generate_all_revisions(
+ self.branch, None, '2.1.3',
+ 'reverse', delayed_graph_generation=True)
+
+
+class TestLogRevSpecsWithPaths(TestLogWithLogCatcher):
+
+ def test_log_revno_n_path_wrong_namespace(self):
+ self.make_linear_branch('branch1')
+ self.make_linear_branch('branch2')
+ # There is no guarantee that a path exist between two arbitrary
+ # revisions.
+ self.run_bzr("log -r revno:2:branch1..revno:3:branch2", retcode=3)
+
+ def test_log_revno_n_path_correct_order(self):
+ self.make_linear_branch('branch2')
+ self.assertLogRevnos(['-rrevno:1:branch2..revno:3:branch2'],
+ ['3', '2','1'])
+
+ def test_log_revno_n_path(self):
+ self.make_linear_branch('branch2')
+ self.assertLogRevnos(['-rrevno:1:branch2'],
+ ['1'])
+ rev_props = self.log_catcher.revisions[0].rev.properties
+ self.assertEqual('branch2', rev_props['branch-nick'])
+
+
+class TestLogErrors(TestLog):
+
+ def test_log_zero_revspec(self):
+ self.make_minimal_branch()
+ self.run_bzr_error(['bzr: ERROR: Logging revision 0 is invalid.'],
+ ['log', '-r0'])
+
+ def test_log_zero_begin_revspec(self):
+ self.make_linear_branch()
+ self.run_bzr_error(['bzr: ERROR: Logging revision 0 is invalid.'],
+ ['log', '-r0..2'])
+
+ def test_log_zero_end_revspec(self):
+ self.make_linear_branch()
+ self.run_bzr_error(['bzr: ERROR: Logging revision 0 is invalid.'],
+ ['log', '-r-2..0'])
+
+ def test_log_nonexistent_revno(self):
+ self.make_minimal_branch()
+ self.run_bzr_error(["bzr: ERROR: Requested revision: '1234' "
+ "does not exist in branch:"],
+ ['log', '-r1234'])
+
+ def test_log_nonexistent_dotted_revno(self):
+ self.make_minimal_branch()
+ self.run_bzr_error(["bzr: ERROR: Requested revision: '123.123' "
+ "does not exist in branch:"],
+ ['log', '-r123.123'])
+
+ def test_log_change_nonexistent_revno(self):
+ self.make_minimal_branch()
+ self.run_bzr_error(["bzr: ERROR: Requested revision: '1234' "
+ "does not exist in branch:"],
+ ['log', '-c1234'])
+
+ def test_log_change_nonexistent_dotted_revno(self):
+ self.make_minimal_branch()
+ self.run_bzr_error(["bzr: ERROR: Requested revision: '123.123' "
+ "does not exist in branch:"],
+ ['log', '-c123.123'])
+
+ def test_log_change_single_revno_only(self):
+ self.make_minimal_branch()
+ self.run_bzr_error(['bzr: ERROR: Option --change does not'
+ ' accept revision ranges'],
+ ['log', '--change', '2..3'])
+
+ def test_log_change_incompatible_with_revision(self):
+ self.run_bzr_error(['bzr: ERROR: --revision and --change'
+ ' are mutually exclusive'],
+ ['log', '--change', '2', '--revision', '3'])
+
+ def test_log_nonexistent_file(self):
+ self.make_minimal_branch()
+ # files that don't exist in either the basis tree or working tree
+ # should give an error
+ out, err = self.run_bzr('log does-not-exist', retcode=3)
+ self.assertContainsRe(err,
+ 'Path unknown at end or start of revision range: '
+ 'does-not-exist')
+
+ def test_log_reversed_revspecs(self):
+ self.make_linear_branch()
+ self.run_bzr_error(('bzr: ERROR: Start revision must be older than '
+ 'the end revision.\n',),
+ ['log', '-r3..1'])
+
+ def test_log_reversed_dotted_revspecs(self):
+ self.make_merged_branch()
+ self.run_bzr_error(('bzr: ERROR: Start revision not found in '
+ 'history of end revision.\n',),
+ "log -r 1.1.1..1")
+
+ def test_log_bad_message_re(self):
+ """Bad --message argument gives a sensible message
+
+ See https://bugs.launchpad.net/bzr/+bug/251352
+ """
+ self.make_minimal_branch()
+ out, err = self.run_bzr(['log', '-m', '*'], retcode=3)
+ self.assertContainsRe(err, "ERROR.*Invalid pattern.*nothing to repeat")
+ self.assertNotContainsRe(err, "Unprintable exception")
+ self.assertEqual(out, '')
+
+ def test_log_unsupported_timezone(self):
+ self.make_linear_branch()
+ self.run_bzr_error(['bzr: ERROR: Unsupported timezone format "foo", '
+ 'options are "utc", "original", "local".'],
+ ['log', '--timezone', 'foo'])
+
+ def test_log_exclude_ancestry_no_range(self):
+ self.make_linear_branch()
+ self.run_bzr_error(['bzr: ERROR: --exclude-common-ancestry'
+ ' requires -r with two revisions'],
+ ['log', '--exclude-common-ancestry'])
+
+ def test_log_exclude_ancestry_single_revision(self):
+ self.make_merged_branch()
+ self.run_bzr_error(['bzr: ERROR: --exclude-common-ancestry'
+ ' requires two different revisions'],
+ ['log', '--exclude-common-ancestry',
+ '-r1.1.1..1.1.1'])
+
+class TestLogTags(TestLog):
+
+ def test_log_with_tags(self):
+ tree = self.make_linear_branch(format='dirstate-tags')
+ branch = tree.branch
+ branch.tags.set_tag('tag1', branch.get_rev_id(1))
+ branch.tags.set_tag('tag1.1', branch.get_rev_id(1))
+ branch.tags.set_tag('tag3', branch.last_revision())
+
+ log = self.run_bzr("log -r-1")[0]
+ self.assertTrue('tags: tag3' in log)
+
+ log = self.run_bzr("log -r1")[0]
+ # I guess that we can't know the order of tags in the output
+ # since dicts are unordered, need to check both possibilities
+ self.assertContainsRe(log, r'tags: (tag1, tag1\.1|tag1\.1, tag1)')
+
+ def test_merged_log_with_tags(self):
+ branch1_tree = self.make_linear_branch('branch1',
+ format='dirstate-tags')
+ branch1 = branch1_tree.branch
+ branch2_tree = branch1_tree.bzrdir.sprout('branch2').open_workingtree()
+ branch1_tree.commit(message='foobar', allow_pointless=True)
+ branch1.tags.set_tag('tag1', branch1.last_revision())
+ # tags don't propagate if we don't merge
+ self.run_bzr('merge ../branch1', working_dir='branch2')
+ branch2_tree.commit(message='merge branch 1')
+ log = self.run_bzr("log -n0 -r-1", working_dir='branch2')[0]
+ self.assertContainsRe(log, r' tags: tag1')
+ log = self.run_bzr("log -n0 -r3.1.1", working_dir='branch2')[0]
+ self.assertContainsRe(log, r'tags: tag1')
+
+
+class TestLogSignatures(TestLog):
+
+ def test_log_with_signatures(self):
+ self.requireFeature(features.gpgme)
+
+ tree = self.make_linear_branch(format='dirstate-tags')
+
+ log = self.run_bzr("log --signatures")[0]
+ self.assertTrue('signature: no signature' in log)
+
+ def test_log_without_signatures(self):
+ self.requireFeature(features.gpgme)
+
+ tree = self.make_linear_branch(format='dirstate-tags')
+
+ log = self.run_bzr("log")[0]
+ self.assertFalse('signature: no signature' in log)
+
+
+class TestLogVerbose(TestLog):
+
+ def setUp(self):
+ super(TestLogVerbose, self).setUp()
+ self.make_minimal_branch()
+
+ def assertUseShortDeltaFormat(self, cmd):
+ log = self.run_bzr(cmd)[0]
+ # Check that we use the short status format
+ self.assertContainsRe(log, '(?m)^\s*A hello.txt$')
+ self.assertNotContainsRe(log, '(?m)^\s*added:$')
+
+ def assertUseLongDeltaFormat(self, cmd):
+ log = self.run_bzr(cmd)[0]
+ # Check that we use the long status format
+ self.assertNotContainsRe(log, '(?m)^\s*A hello.txt$')
+ self.assertContainsRe(log, '(?m)^\s*added:$')
+
+ def test_log_short_verbose(self):
+ self.assertUseShortDeltaFormat(['log', '--short', '-v'])
+
+ def test_log_s_verbose(self):
+ self.assertUseShortDeltaFormat(['log', '-S', '-v'])
+
+ def test_log_short_verbose_verbose(self):
+ self.assertUseLongDeltaFormat(['log', '--short', '-vv'])
+
+ def test_log_long_verbose(self):
+ # Check that we use the long status format, ignoring the verbosity
+ # level
+ self.assertUseLongDeltaFormat(['log', '--long', '-v'])
+
+ def test_log_long_verbose_verbose(self):
+ # Check that we use the long status format, ignoring the verbosity
+ # level
+ self.assertUseLongDeltaFormat(['log', '--long', '-vv'])
+
+
+class TestLogMerges(TestLogWithLogCatcher):
+
+ def setUp(self):
+ super(TestLogMerges, self).setUp()
+ self.make_branches_with_merges()
+
+ def make_branches_with_merges(self):
+ level0 = self.make_branch_and_tree('level0')
+ self.wt_commit(level0, 'in branch level0')
+ level1 = level0.bzrdir.sprout('level1').open_workingtree()
+ self.wt_commit(level1, 'in branch level1')
+ level2 = level1.bzrdir.sprout('level2').open_workingtree()
+ self.wt_commit(level2, 'in branch level2')
+ level1.merge_from_branch(level2.branch)
+ self.wt_commit(level1, 'merge branch level2')
+ level0.merge_from_branch(level1.branch)
+ self.wt_commit(level0, 'merge branch level1')
+
+ def test_merges_are_indented_by_level(self):
+ self.run_bzr(['log', '-n0'], working_dir='level0')
+ revnos_and_depth = [(r.revno, r.merge_depth)
+ for r in self.get_captured_revisions()]
+ self.assertEqual([('2', 0), ('1.1.2', 1), ('1.2.1', 2), ('1.1.1', 1),
+ ('1', 0)],
+ [(r.revno, r.merge_depth)
+ for r in self.get_captured_revisions()])
+
+ def test_force_merge_revisions_off(self):
+ self.assertLogRevnos(['-n1'], ['2', '1'], working_dir='level0')
+
+ def test_force_merge_revisions_on(self):
+ self.assertLogRevnos(['-n0'], ['2', '1.1.2', '1.2.1', '1.1.1', '1'],
+ working_dir='level0')
+
+ def test_include_merges(self):
+ # Confirm --include-merges gives the same output as -n0
+ msg = ("The option '--include-merges' to 'bzr log' "
+ "has been deprecated in bzr 2.5. "
+ "Please use '--include-merged' instead.\n")
+ self.assertLogRevnos(['--include-merges'],
+ ['2', '1.1.2', '1.2.1', '1.1.1', '1'],
+ working_dir='level0', err=msg)
+ self.assertLogRevnos(['--include-merges'],
+ ['2', '1.1.2', '1.2.1', '1.1.1', '1'],
+ working_dir='level0', err=msg)
+ out_im, err_im = self.run_bzr('log --include-merges',
+ working_dir='level0')
+ out_n0, err_n0 = self.run_bzr('log -n0', working_dir='level0')
+ self.assertEqual(msg, err_im)
+ self.assertEqual('', err_n0)
+ self.assertEqual(out_im, out_n0)
+
+ def test_include_merged(self):
+ # Confirm --include-merged gives the same output as -n0
+ expected = ['2', '1.1.2', '1.2.1', '1.1.1', '1']
+ self.assertLogRevnos(['--include-merged'],
+ expected, working_dir='level0')
+ self.assertLogRevnos(['--include-merged'],
+ expected, working_dir='level0')
+
+ def test_force_merge_revisions_N(self):
+ self.assertLogRevnos(['-n2'],
+ ['2', '1.1.2', '1.1.1', '1'],
+ working_dir='level0')
+
+ def test_merges_single_merge_rev(self):
+ self.assertLogRevnosAndDepths(['-n0', '-r1.1.2'],
+ [('1.1.2', 0), ('1.2.1', 1)],
+ working_dir='level0')
+
+ def test_merges_partial_range(self):
+ self.assertLogRevnosAndDepths(
+ ['-n0', '-r1.1.1..1.1.2'],
+ [('1.1.2', 0), ('1.2.1', 1), ('1.1.1', 0)],
+ working_dir='level0')
+
+ def test_merges_partial_range_ignore_before_lower_bound(self):
+ """Dont show revisions before the lower bound's merged revs"""
+ self.assertLogRevnosAndDepths(
+ ['-n0', '-r1.1.2..2'],
+ [('2', 0), ('1.1.2', 1), ('1.2.1', 2)],
+ working_dir='level0')
+
+ def test_omit_merges_with_sidelines(self):
+ self.assertLogRevnos(['--omit-merges', '-n0'], ['1.2.1', '1.1.1', '1'],
+ working_dir='level0')
+
+ def test_omit_merges_without_sidelines(self):
+ self.assertLogRevnos(['--omit-merges', '-n1'], ['1'],
+ working_dir='level0')
+
+
+class TestLogDiff(TestLogWithLogCatcher):
+
+ # FIXME: We need specific tests for each LogFormatter about how the diffs
+ # are displayed: --long indent them by depth, --short use a fixed
+ # indent and --line does't display them. -- vila 10019
+
+ def setUp(self):
+ super(TestLogDiff, self).setUp()
+ self.make_branch_with_diffs()
+
+ def make_branch_with_diffs(self):
+ level0 = self.make_branch_and_tree('level0')
+ self.build_tree(['level0/file1', 'level0/file2'])
+ level0.add('file1')
+ level0.add('file2')
+ self.wt_commit(level0, 'in branch level0')
+
+ level1 = level0.bzrdir.sprout('level1').open_workingtree()
+ self.build_tree_contents([('level1/file2', 'hello\n')])
+ self.wt_commit(level1, 'in branch level1')
+ level0.merge_from_branch(level1.branch)
+ self.wt_commit(level0, 'merge branch level1')
+
+ def _diff_file1_revno1(self):
+ return """=== added file 'file1'
+--- file1\t1970-01-01 00:00:00 +0000
++++ file1\t2005-11-22 00:00:00 +0000
+@@ -0,0 +1,1 @@
++contents of level0/file1
+
+"""
+
+ def _diff_file2_revno2(self):
+ return """=== modified file 'file2'
+--- file2\t2005-11-22 00:00:00 +0000
++++ file2\t2005-11-22 00:00:01 +0000
+@@ -1,1 +1,1 @@
+-contents of level0/file2
++hello
+
+"""
+
+ def _diff_file2_revno1_1_1(self):
+ return """=== modified file 'file2'
+--- file2\t2005-11-22 00:00:00 +0000
++++ file2\t2005-11-22 00:00:01 +0000
+@@ -1,1 +1,1 @@
+-contents of level0/file2
++hello
+
+"""
+
+ def _diff_file2_revno1(self):
+ return """=== added file 'file2'
+--- file2\t1970-01-01 00:00:00 +0000
++++ file2\t2005-11-22 00:00:00 +0000
+@@ -0,0 +1,1 @@
++contents of level0/file2
+
+"""
+
+ def assertLogRevnosAndDiff(self, args, expected,
+ working_dir='.'):
+ self.run_bzr(['log', '-p'] + args, working_dir=working_dir)
+ expected_revnos_and_depths = [
+ (revno, depth) for revno, depth, diff in expected]
+ # Check the revnos and depths first to make debugging easier
+ self.assertEqual(expected_revnos_and_depths,
+ [(r.revno, r.merge_depth)
+ for r in self.get_captured_revisions()])
+ # Now check the diffs, adding the revno in case of failure
+ fmt = 'In revno %s\n%s'
+ for expected_rev, actual_rev in izip(expected,
+ self.get_captured_revisions()):
+ revno, depth, expected_diff = expected_rev
+ actual_diff = actual_rev.diff
+ self.assertEqualDiff(fmt % (revno, expected_diff),
+ fmt % (revno, actual_diff))
+
+ def test_log_diff_with_merges(self):
+ self.assertLogRevnosAndDiff(
+ ['-n0'],
+ [('2', 0, self._diff_file2_revno2()),
+ ('1.1.1', 1, self._diff_file2_revno1_1_1()),
+ ('1', 0, self._diff_file1_revno1()
+ + self._diff_file2_revno1())],
+ working_dir='level0')
+
+
+ def test_log_diff_file1(self):
+ self.assertLogRevnosAndDiff(['-n0', 'file1'],
+ [('1', 0, self._diff_file1_revno1())],
+ working_dir='level0')
+
+ def test_log_diff_file2(self):
+ self.assertLogRevnosAndDiff(['-n1', 'file2'],
+ [('2', 0, self._diff_file2_revno2()),
+ ('1', 0, self._diff_file2_revno1())],
+ working_dir='level0')
+
+
+class TestLogUnicodeDiff(TestLog):
+
+ def test_log_show_diff_non_ascii(self):
+ # Smoke test for bug #328007 UnicodeDecodeError on 'log -p'
+ message = u'Message with \xb5'
+ body = 'Body with \xb5\n'
+ wt = self.make_branch_and_tree('.')
+ self.build_tree_contents([('foo', body)])
+ wt.add('foo')
+ wt.commit(message=message)
+ # check that command won't fail with unicode error
+ # don't care about exact output because we have other tests for this
+ out,err = self.run_bzr('log -p --long')
+ self.assertNotEqual('', out)
+ self.assertEqual('', err)
+ out,err = self.run_bzr('log -p --short')
+ self.assertNotEqual('', out)
+ self.assertEqual('', err)
+ out,err = self.run_bzr('log -p --line')
+ self.assertNotEqual('', out)
+ self.assertEqual('', err)
+
+
+class TestLogEncodings(tests.TestCaseInTempDir):
+
+ _mu = u'\xb5'
+ _message = u'Message with \xb5'
+
+ # Encodings which can encode mu
+ good_encodings = [
+ 'utf-8',
+ 'latin-1',
+ 'iso-8859-1',
+ 'cp437', # Common windows encoding
+ 'cp1251', # Russian windows encoding
+ 'cp1258', # Common windows encoding
+ ]
+ # Encodings which cannot encode mu
+ bad_encodings = [
+ 'ascii',
+ 'iso-8859-2',
+ 'koi8_r',
+ ]
+
+ def setUp(self):
+ super(TestLogEncodings, self).setUp()
+ self.overrideAttr(osutils, '_cached_user_encoding')
+
+ def create_branch(self):
+ bzr = self.run_bzr
+ bzr('init')
+ self.build_tree_contents([('a', 'some stuff\n')])
+ bzr('add a')
+ bzr(['commit', '-m', self._message])
+
+ def try_encoding(self, encoding, fail=False):
+ bzr = self.run_bzr
+ if fail:
+ self.assertRaises(UnicodeEncodeError,
+ self._mu.encode, encoding)
+ encoded_msg = self._message.encode(encoding, 'replace')
+ else:
+ encoded_msg = self._message.encode(encoding)
+
+ old_encoding = osutils._cached_user_encoding
+ # This test requires that 'run_bzr' uses the current
+ # bzrlib, because we override user_encoding, and expect
+ # it to be used
+ try:
+ osutils._cached_user_encoding = 'ascii'
+ # We should be able to handle any encoding
+ out, err = bzr('log', encoding=encoding)
+ if not fail:
+ # Make sure we wrote mu as we expected it to exist
+ self.assertNotEqual(-1, out.find(encoded_msg))
+ out_unicode = out.decode(encoding)
+ self.assertNotEqual(-1, out_unicode.find(self._message))
+ else:
+ self.assertNotEqual(-1, out.find('Message with ?'))
+ finally:
+ osutils._cached_user_encoding = old_encoding
+
+ def test_log_handles_encoding(self):
+ self.create_branch()
+
+ for encoding in self.good_encodings:
+ self.try_encoding(encoding)
+
+ def test_log_handles_bad_encoding(self):
+ self.create_branch()
+
+ for encoding in self.bad_encodings:
+ self.try_encoding(encoding, fail=True)
+
+ def test_stdout_encoding(self):
+ bzr = self.run_bzr
+ osutils._cached_user_encoding = "cp1251"
+
+ bzr('init')
+ self.build_tree(['a'])
+ bzr('add a')
+ bzr(['commit', '-m', u'\u0422\u0435\u0441\u0442'])
+ stdout, stderr = self.run_bzr('log', encoding='cp866')
+
+ message = stdout.splitlines()[-1]
+
+ # explanation of the check:
+ # u'\u0422\u0435\u0441\u0442' is word 'Test' in russian
+ # in cp866 encoding this is string '\x92\xa5\xe1\xe2'
+ # in cp1251 encoding this is string '\xd2\xe5\xf1\xf2'
+ # This test should check that output of log command
+ # encoded to sys.stdout.encoding
+ test_in_cp866 = '\x92\xa5\xe1\xe2'
+ test_in_cp1251 = '\xd2\xe5\xf1\xf2'
+ # Make sure the log string is encoded in cp866
+ self.assertEquals(test_in_cp866, message[2:])
+ # Make sure the cp1251 string is not found anywhere
+ self.assertEquals(-1, stdout.find(test_in_cp1251))
+
+
+class TestLogFile(TestLogWithLogCatcher):
+
+ def test_log_local_branch_file(self):
+ """We should be able to log files in local treeless branches"""
+ tree = self.make_branch_and_tree('tree')
+ self.build_tree(['tree/file'])
+ tree.add('file')
+ tree.commit('revision 1')
+ tree.bzrdir.destroy_workingtree()
+ self.run_bzr('log tree/file')
+
+ def prepare_tree(self, complex=False):
+ # The complex configuration includes deletes and renames
+ tree = self.make_branch_and_tree('parent')
+ self.build_tree(['parent/file1', 'parent/file2', 'parent/file3'])
+ tree.add('file1')
+ tree.commit('add file1')
+ tree.add('file2')
+ tree.commit('add file2')
+ tree.add('file3')
+ tree.commit('add file3')
+ child_tree = tree.bzrdir.sprout('child').open_workingtree()
+ self.build_tree_contents([('child/file2', 'hello')])
+ child_tree.commit(message='branch 1')
+ tree.merge_from_branch(child_tree.branch)
+ tree.commit(message='merge child branch')
+ if complex:
+ tree.remove('file2')
+ tree.commit('remove file2')
+ tree.rename_one('file3', 'file4')
+ tree.commit('file3 is now called file4')
+ tree.remove('file1')
+ tree.commit('remove file1')
+ os.chdir('parent')
+
+ # FIXME: It would be good to parametrize the following tests against all
+ # formatters. But the revisions selection is not *currently* part of the
+ # LogFormatter contract, so using LogCatcher is sufficient -- vila 100118
+ def test_log_file1(self):
+ self.prepare_tree()
+ self.assertLogRevnos(['-n0', 'file1'], ['1'])
+
+ def test_log_file2(self):
+ self.prepare_tree()
+ # file2 full history
+ self.assertLogRevnos(['-n0', 'file2'], ['4', '3.1.1', '2'])
+ # file2 in a merge revision
+ self.assertLogRevnos(['-n0', '-r3.1.1', 'file2'], ['3.1.1'])
+ # file2 in a mainline revision
+ self.assertLogRevnos(['-n0', '-r4', 'file2'], ['4', '3.1.1'])
+ # file2 since a revision
+ self.assertLogRevnos(['-n0', '-r3..', 'file2'], ['4', '3.1.1'])
+ # file2 up to a revision
+ self.assertLogRevnos(['-n0', '-r..3', 'file2'], ['2'])
+
+ def test_log_file3(self):
+ self.prepare_tree()
+ self.assertLogRevnos(['-n0', 'file3'], ['3'])
+
+ def test_log_file_historical_missing(self):
+ # Check logging a deleted file gives an error if the
+ # file isn't found at the end or start of the revision range
+ self.prepare_tree(complex=True)
+ err_msg = "Path unknown at end or start of revision range: file2"
+ err = self.run_bzr('log file2', retcode=3)[1]
+ self.assertContainsRe(err, err_msg)
+
+ def test_log_file_historical_end(self):
+ # Check logging a deleted file is ok if the file existed
+ # at the end the revision range
+ self.prepare_tree(complex=True)
+ self.assertLogRevnos(['-n0', '-r..4', 'file2'], ['4', '3.1.1', '2'])
+
+ def test_log_file_historical_start(self):
+ # Check logging a deleted file is ok if the file existed
+ # at the start of the revision range
+ self.prepare_tree(complex=True)
+ self.assertLogRevnos(['file1'], ['1'])
+
+ def test_log_file_renamed(self):
+ """File matched against revision range, not current tree."""
+ self.prepare_tree(complex=True)
+
+ # Check logging a renamed file gives an error by default
+ err_msg = "Path unknown at end or start of revision range: file3"
+ err = self.run_bzr('log file3', retcode=3)[1]
+ self.assertContainsRe(err, err_msg)
+
+ # Check we can see a renamed file if we give the right end revision
+ self.assertLogRevnos(['-r..4', 'file3'], ['3'])
+
+
+class TestLogMultiple(TestLogWithLogCatcher):
+
+ def prepare_tree(self):
+ tree = self.make_branch_and_tree('parent')
+ self.build_tree([
+ 'parent/file1',
+ 'parent/file2',
+ 'parent/dir1/',
+ 'parent/dir1/file5',
+ 'parent/dir1/dir2/',
+ 'parent/dir1/dir2/file3',
+ 'parent/file4'])
+ tree.add('file1')
+ tree.commit('add file1')
+ tree.add('file2')
+ tree.commit('add file2')
+ tree.add(['dir1', 'dir1/dir2', 'dir1/dir2/file3'])
+ tree.commit('add file3')
+ tree.add('file4')
+ tree.commit('add file4')
+ tree.add('dir1/file5')
+ tree.commit('add file5')
+ child_tree = tree.bzrdir.sprout('child').open_workingtree()
+ self.build_tree_contents([('child/file2', 'hello')])
+ child_tree.commit(message='branch 1')
+ tree.merge_from_branch(child_tree.branch)
+ tree.commit(message='merge child branch')
+ os.chdir('parent')
+
+ def test_log_files(self):
+ """The log for multiple file should only list revs for those files"""
+ self.prepare_tree()
+ self.assertLogRevnos(['file1', 'file2', 'dir1/dir2/file3'],
+ ['6', '5.1.1', '3', '2', '1'])
+
+ def test_log_directory(self):
+ """The log for a directory should show all nested files."""
+ self.prepare_tree()
+ self.assertLogRevnos(['dir1'], ['5', '3'])
+
+ def test_log_nested_directory(self):
+ """The log for a directory should show all nested files."""
+ self.prepare_tree()
+ self.assertLogRevnos(['dir1/dir2'], ['3'])
+
+ def test_log_in_nested_directory(self):
+ """The log for a directory should show all nested files."""
+ self.prepare_tree()
+ os.chdir("dir1")
+ self.assertLogRevnos(['.'], ['5', '3'])
+
+ def test_log_files_and_directories(self):
+ """Logging files and directories together should be fine."""
+ self.prepare_tree()
+ self.assertLogRevnos(['file4', 'dir1/dir2'], ['4', '3'])
+
+ def test_log_files_and_dirs_in_nested_directory(self):
+ """The log for a directory should show all nested files."""
+ self.prepare_tree()
+ os.chdir("dir1")
+ self.assertLogRevnos(['dir2', 'file5'], ['5', '3'])
+
+
+class MainlineGhostTests(TestLogWithLogCatcher):
+
+ def setUp(self):
+ super(MainlineGhostTests, self).setUp()
+ tree = self.make_branch_and_tree('')
+ tree.set_parent_ids(["spooky"], allow_leftmost_as_ghost=True)
+ tree.add('')
+ tree.commit('msg1', rev_id='rev1')
+ tree.commit('msg2', rev_id='rev2')
+
+ def test_log_range(self):
+ self.assertLogRevnos(["-r1..2"], ["2", "1"])
+
+ def test_log_norange(self):
+ self.assertLogRevnos([], ["2", "1"])
+
+ def test_log_range_open_begin(self):
+ self.knownFailure("log with ghosts fails. bug #726466")
+ (stdout, stderr) = self.run_bzr(['log', '-r..2'], retcode=3)
+ self.assertEqual(["2", "1"],
+ [r.revno for r in self.get_captured_revisions()])
+ self.assertEquals("bzr: ERROR: Further revision history missing.", stderr)
+
+ def test_log_range_open_end(self):
+ self.assertLogRevnos(["-r1.."], ["2", "1"])
+
+class TestLogMatch(TestLogWithLogCatcher):
+ def prepare_tree(self):
+ tree = self.make_branch_and_tree('')
+ self.build_tree(
+ ['/hello.txt', '/goodbye.txt'])
+ tree.add('hello.txt')
+ tree.commit(message='message1', committer='committer1', authors=['author1'])
+ tree.add('goodbye.txt')
+ tree.commit(message='message2', committer='committer2', authors=['author2'])
+
+ def test_message(self):
+ self.prepare_tree()
+ self.assertLogRevnos(["-m", "message1"], ["1"])
+ self.assertLogRevnos(["-m", "message2"], ["2"])
+ self.assertLogRevnos(["-m", "message"], ["2", "1"])
+ self.assertLogRevnos(["-m", "message1", "-m", "message2"], ["2", "1"])
+ self.assertLogRevnos(["--match-message", "message1"], ["1"])
+ self.assertLogRevnos(["--match-message", "message2"], ["2"])
+ self.assertLogRevnos(["--match-message", "message"], ["2", "1"])
+ self.assertLogRevnos(["--match-message", "message1",
+ "--match-message", "message2"], ["2", "1"])
+ self.assertLogRevnos(["--message", "message1"], ["1"])
+ self.assertLogRevnos(["--message", "message2"], ["2"])
+ self.assertLogRevnos(["--message", "message"], ["2", "1"])
+ self.assertLogRevnos(["--match-message", "message1",
+ "--message", "message2"], ["2", "1"])
+ self.assertLogRevnos(["--message", "message1",
+ "--match-message", "message2"], ["2", "1"])
+
+ def test_committer(self):
+ self.prepare_tree()
+ self.assertLogRevnos(["-m", "committer1"], ["1"])
+ self.assertLogRevnos(["-m", "committer2"], ["2"])
+ self.assertLogRevnos(["-m", "committer"], ["2", "1"])
+ self.assertLogRevnos(["-m", "committer1", "-m", "committer2"],
+ ["2", "1"])
+ self.assertLogRevnos(["--match-committer", "committer1"], ["1"])
+ self.assertLogRevnos(["--match-committer", "committer2"], ["2"])
+ self.assertLogRevnos(["--match-committer", "committer"], ["2", "1"])
+ self.assertLogRevnos(["--match-committer", "committer1",
+ "--match-committer", "committer2"], ["2", "1"])
+
+ def test_author(self):
+ self.prepare_tree()
+ self.assertLogRevnos(["-m", "author1"], ["1"])
+ self.assertLogRevnos(["-m", "author2"], ["2"])
+ self.assertLogRevnos(["-m", "author"], ["2", "1"])
+ self.assertLogRevnos(["-m", "author1", "-m", "author2"],
+ ["2", "1"])
+ self.assertLogRevnos(["--match-author", "author1"], ["1"])
+ self.assertLogRevnos(["--match-author", "author2"], ["2"])
+ self.assertLogRevnos(["--match-author", "author"], ["2", "1"])
+ self.assertLogRevnos(["--match-author", "author1",
+ "--match-author", "author2"], ["2", "1"])
+
+
+class TestSmartServerLog(tests.TestCaseWithTransport):
+
+ def test_standard_log(self):
+ self.setup_smart_server_with_call_log()
+ t = self.make_branch_and_tree('branch')
+ self.build_tree_contents([('branch/foo', 'thecontents')])
+ t.add("foo")
+ t.commit("message")
+ self.reset_smart_call_log()
+ out, err = self.run_bzr(['log', self.get_url('branch')])
+ # This figure represent the amount of work to perform this use case. It
+ # is entirely ok to reduce this number if a test fails due to rpc_count
+ # being too low. If rpc_count increases, more network roundtrips have
+ # become necessary for this use case. Please do not adjust this number
+ # upwards without agreement from bzr's network support maintainers.
+ self.assertThat(self.hpss_calls, ContainsNoVfsCalls)
+ self.assertLength(1, self.hpss_connections)
+ self.assertLength(9, self.hpss_calls)
+
+ def test_verbose_log(self):
+ self.setup_smart_server_with_call_log()
+ t = self.make_branch_and_tree('branch')
+ self.build_tree_contents([('branch/foo', 'thecontents')])
+ t.add("foo")
+ t.commit("message")
+ self.reset_smart_call_log()
+ out, err = self.run_bzr(['log', '-v', self.get_url('branch')])
+ # This figure represent the amount of work to perform this use case. It
+ # is entirely ok to reduce this number if a test fails due to rpc_count
+ # being too low. If rpc_count increases, more network roundtrips have
+ # become necessary for this use case. Please do not adjust this number
+ # upwards without agreement from bzr's network support maintainers.
+ self.assertLength(10, self.hpss_calls)
+ self.assertLength(1, self.hpss_connections)
+ self.assertThat(self.hpss_calls, ContainsNoVfsCalls)
+
+ def test_per_file(self):
+ self.setup_smart_server_with_call_log()
+ t = self.make_branch_and_tree('branch')
+ self.build_tree_contents([('branch/foo', 'thecontents')])
+ t.add("foo")
+ t.commit("message")
+ self.reset_smart_call_log()
+ out, err = self.run_bzr(['log', '-v', self.get_url('branch') + "/foo"])
+ # This figure represent the amount of work to perform this use case. It
+ # is entirely ok to reduce this number if a test fails due to rpc_count
+ # being too low. If rpc_count increases, more network roundtrips have
+ # become necessary for this use case. Please do not adjust this number
+ # upwards without agreement from bzr's network support maintainers.
+ self.assertLength(14, self.hpss_calls)
+ self.assertLength(1, self.hpss_connections)
+ self.assertThat(self.hpss_calls, ContainsNoVfsCalls)
diff --git a/bzrlib/tests/blackbox/test_logformats.py b/bzrlib/tests/blackbox/test_logformats.py
new file mode 100644
index 0000000..4e6bc7f
--- /dev/null
+++ b/bzrlib/tests/blackbox/test_logformats.py
@@ -0,0 +1,124 @@
+# Copyright (C) 2005, 2006, 2009 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+
+"""Black-box tests for default log_formats/log_formatters
+"""
+
+
+import os
+
+from bzrlib import (
+ config,
+ tests,
+ workingtree,
+ )
+
+
+class TestLogFormats(tests.TestCaseWithTransport):
+
+ def setUp(self):
+ super(TestLogFormats, self).setUp()
+
+ # Create a config file with some useful variables
+ conf_path = config.config_filename()
+ if os.path.isfile(conf_path):
+ # Something is wrong in environment,
+ # we risk overwriting users config
+ self.fail("%s exists" % conf_path)
+
+ config.ensure_config_dir_exists()
+ f = open(conf_path,'wb')
+ try:
+ f.write("""[DEFAULT]
+email=Joe Foo <joe@foo.com>
+log_format=line
+""")
+ finally:
+ f.close()
+
+ def _make_simple_branch(self, relpath='.'):
+ wt = self.make_branch_and_tree(relpath)
+ wt.commit('first revision')
+ wt.commit('second revision')
+ return wt
+
+ def test_log_default_format(self):
+ self._make_simple_branch()
+ # only the lines formatter is this short, one line by revision
+ log = self.run_bzr('log')[0]
+ self.assertEquals(2, len(log.splitlines()))
+
+ def test_log_format_arg(self):
+ self._make_simple_branch()
+ log = self.run_bzr(['log', '--log-format', 'short'])[0]
+
+ def test_missing_default_format(self):
+ wt = self._make_simple_branch('a')
+ self.run_bzr(['branch', 'a', 'b'])
+ wt.commit('third revision')
+ wt.commit('fourth revision')
+
+ missing = self.run_bzr('missing', retcode=1, working_dir='b')[0]
+ # one line for 'Using save location'
+ # one line for 'You are missing 2 revision(s)'
+ # one line by missing revision (the line log format is used as
+ # configured)
+ self.assertEquals(4, len(missing.splitlines()))
+
+ def test_missing_format_arg(self):
+ wt = self._make_simple_branch('a')
+ self.run_bzr(['branch', 'a', 'b'])
+ wt.commit('third revision')
+ wt.commit('fourth revision')
+
+ missing = self.run_bzr(['missing', '--log-format', 'short'],
+ retcode=1, working_dir='b')[0]
+ # one line for 'Using save location'
+ # one line for 'You are missing 2 revision(s)'
+ # three lines by missing revision
+ self.assertEquals(8, len(missing.splitlines()))
+
+ def test_logformat_gnu_changelog(self):
+ # from http://launchpad.net/bugs/29582/
+ wt = self.make_branch_and_tree('.')
+ wt.commit('first revision', timestamp=1236045060,
+ timezone=0) # Aka UTC
+
+ log, err = self.run_bzr(['log', '--log-format', 'gnu-changelog',
+ '--timezone=utc'])
+ self.assertEquals('', err)
+ expected = """2009-03-03 Joe Foo <joe@foo.com>
+
+\tfirst revision
+
+"""
+ self.assertEqualDiff(expected, log)
+
+ def test_logformat_line_wide(self):
+ """Author field should get larger for column widths over 80"""
+ wt = self.make_branch_and_tree('.')
+ wt.commit('revision with a long author', committer='Person with'
+ ' long name SENTINEL')
+ log, err = self.run_bzr('log --line')
+ self.assertNotContainsString(log, 'SENTINEL')
+ self.overrideEnv('BZR_COLUMNS', '116')
+ log, err = self.run_bzr('log --line')
+ self.assertContainsString(log, 'SENT...')
+ self.overrideEnv('BZR_COLUMNS', '0')
+ log, err = self.run_bzr('log --line')
+ self.assertContainsString(log, 'SENTINEL')
+
diff --git a/bzrlib/tests/blackbox/test_lookup_revision.py b/bzrlib/tests/blackbox/test_lookup_revision.py
new file mode 100644
index 0000000..c94a844
--- /dev/null
+++ b/bzrlib/tests/blackbox/test_lookup_revision.py
@@ -0,0 +1,31 @@
+# Copyright (C) 2010 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+
+"""Black-box tests for bzr lookup-revision.
+"""
+
+from bzrlib import tests
+
+class TestLookupRevision(tests.TestCaseWithTransport):
+
+ def test_lookup_revison_directory(self):
+ """Test --directory option"""
+ tree = self.make_branch_and_tree('a')
+ tree.commit('This revision', rev_id='abcd')
+ out, err = self.run_bzr(['lookup-revision', '-d', 'a', '1'])
+ self.assertEqual('abcd\n', out)
+ self.assertEqual('', err)
diff --git a/bzrlib/tests/blackbox/test_ls.py b/bzrlib/tests/blackbox/test_ls.py
new file mode 100644
index 0000000..5906862
--- /dev/null
+++ b/bzrlib/tests/blackbox/test_ls.py
@@ -0,0 +1,262 @@
+# Copyright (C) 2006-2012 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""External tests of 'bzr ls'"""
+
+from bzrlib import (
+ ignores,
+ tests,
+ )
+from bzrlib.tests.matchers import ContainsNoVfsCalls
+
+
+class TestLS(tests.TestCaseWithTransport):
+
+ def setUp(self):
+ super(TestLS, self).setUp()
+
+ # Create a simple branch that can be used in testing
+ ignores._set_user_ignores(['user-ignore'])
+
+ self.wt = self.make_branch_and_tree('.')
+ self.build_tree_contents([
+ ('.bzrignore', '*.pyo\n'),
+ ('a', 'hello\n'),
+ ])
+
+ def ls_equals(self, value, args=None, recursive=True, working_dir=None):
+ command = 'ls'
+ if args is not None:
+ command += ' ' + args
+ if recursive:
+ command += ' -R'
+ out, err = self.run_bzr(command, working_dir=working_dir)
+ self.assertEqual('', err)
+ self.assertEqualDiff(value, out)
+
+ def test_ls_null_verbose(self):
+ # Can't supply both
+ self.run_bzr_error(['Cannot set both --verbose and --null'],
+ 'ls --verbose --null')
+
+ def test_ls_basic(self):
+ """Test the abilities of 'bzr ls'"""
+ self.ls_equals('.bzrignore\na\n')
+ self.ls_equals('.bzrignore\na\n', './')
+ self.ls_equals('? .bzrignore\n'
+ '? a\n',
+ '--verbose')
+ self.ls_equals('.bzrignore\n'
+ 'a\n',
+ '--unknown')
+ self.ls_equals('', '--ignored')
+ self.ls_equals('', '--versioned')
+ self.ls_equals('', '-V')
+ self.ls_equals('.bzrignore\n'
+ 'a\n',
+ '--unknown --ignored --versioned')
+ self.ls_equals('.bzrignore\n'
+ 'a\n',
+ '--unknown --ignored -V')
+ self.ls_equals('', '--ignored --versioned')
+ self.ls_equals('', '--ignored -V')
+ self.ls_equals('.bzrignore\0a\0', '--null')
+
+ def test_ls_added(self):
+ self.wt.add(['a'])
+ self.ls_equals('? .bzrignore\n'
+ 'V a\n',
+ '--verbose')
+ self.wt.commit('add')
+
+ self.build_tree(['subdir/'])
+ self.ls_equals('? .bzrignore\n'
+ 'V a\n'
+ '? subdir/\n'
+ , '--verbose')
+ self.build_tree(['subdir/b'])
+ self.wt.add(['subdir/', 'subdir/b', '.bzrignore'])
+ self.ls_equals('V .bzrignore\n'
+ 'V a\n'
+ 'V subdir/\n'
+ 'V subdir/b\n'
+ , '--verbose')
+
+ def test_show_ids(self):
+ self.build_tree(['subdir/'])
+ self.wt.add(['a', 'subdir'], ['a-id', 'subdir-id'])
+ self.ls_equals(
+ '.bzrignore \n'
+ 'a a-id\n'
+ 'subdir/ subdir-id\n',
+ '--show-ids')
+ self.ls_equals(
+ '? .bzrignore\n'
+ 'V a a-id\n'
+ 'V subdir/ subdir-id\n',
+ '--show-ids --verbose')
+ self.ls_equals('.bzrignore\0\0'
+ 'a\0a-id\0'
+ 'subdir\0subdir-id\0', '--show-ids --null')
+
+ def test_ls_no_recursive(self):
+ self.build_tree(['subdir/', 'subdir/b'])
+ self.wt.add(['a', 'subdir/', 'subdir/b', '.bzrignore'])
+
+ self.ls_equals('.bzrignore\n'
+ 'a\n'
+ 'subdir/\n'
+ , recursive=False)
+
+ self.ls_equals('V .bzrignore\n'
+ 'V a\n'
+ 'V subdir/\n'
+ , '--verbose', recursive=False)
+
+ # Check what happens in a sub-directory
+ self.ls_equals('b\n', working_dir='subdir')
+ self.ls_equals('b\0', '--null', working_dir='subdir')
+ self.ls_equals('subdir/b\n', '--from-root', working_dir='subdir')
+ self.ls_equals('subdir/b\0', '--from-root --null',
+ working_dir='subdir')
+ self.ls_equals('subdir/b\n', '--from-root', recursive=False,
+ working_dir='subdir')
+
+ def test_ls_path(self):
+ """If a path is specified, files are listed with that prefix"""
+ self.build_tree(['subdir/', 'subdir/b'])
+ self.wt.add(['subdir', 'subdir/b'])
+ self.ls_equals('subdir/b\n' ,
+ 'subdir')
+ self.ls_equals('../.bzrignore\n'
+ '../a\n'
+ '../subdir/\n'
+ '../subdir/b\n' ,
+ '..', working_dir='subdir')
+ self.ls_equals('../.bzrignore\0'
+ '../a\0'
+ '../subdir\0'
+ '../subdir/b\0' ,
+ '.. --null', working_dir='subdir')
+ self.ls_equals('? ../.bzrignore\n'
+ '? ../a\n'
+ 'V ../subdir/\n'
+ 'V ../subdir/b\n' ,
+ '.. --verbose', working_dir='subdir')
+ self.run_bzr_error(['cannot specify both --from-root and PATH'],
+ 'ls --from-root ..', working_dir='subdir')
+
+ def test_ls_revision(self):
+ self.wt.add(['a'])
+ self.wt.commit('add')
+
+ self.build_tree(['subdir/'])
+
+ # Check what happens when we supply a specific revision
+ self.ls_equals('a\n', '--revision 1')
+ self.ls_equals('V a\n'
+ , '--verbose --revision 1')
+
+ self.ls_equals('', '--revision 1', working_dir='subdir')
+
+ def test_ls_branch(self):
+ """If a branch is specified, files are listed from it"""
+ self.build_tree(['subdir/', 'subdir/b'])
+ self.wt.add(['subdir', 'subdir/b'])
+ self.wt.commit('committing')
+ branch = self.make_branch('branchdir')
+ branch.pull(self.wt.branch)
+ self.ls_equals('branchdir/subdir/\n'
+ 'branchdir/subdir/b\n',
+ 'branchdir')
+ self.ls_equals('branchdir/subdir/\n'
+ 'branchdir/subdir/b\n',
+ 'branchdir --revision 1')
+
+ def test_ls_ignored(self):
+ # Now try to do ignored files.
+ self.wt.add(['a', '.bzrignore'])
+
+ self.build_tree(['blah.py', 'blah.pyo', 'user-ignore'])
+ self.ls_equals('.bzrignore\n'
+ 'a\n'
+ 'blah.py\n'
+ 'blah.pyo\n'
+ 'user-ignore\n'
+ )
+ self.ls_equals('V .bzrignore\n'
+ 'V a\n'
+ '? blah.py\n'
+ 'I blah.pyo\n'
+ 'I user-ignore\n'
+ , '--verbose')
+ self.ls_equals('blah.pyo\n'
+ 'user-ignore\n'
+ , '--ignored')
+ self.ls_equals('blah.py\n'
+ , '--unknown')
+ self.ls_equals('.bzrignore\n'
+ 'a\n'
+ , '--versioned')
+ self.ls_equals('.bzrignore\n'
+ 'a\n'
+ , '-V')
+
+ def test_kinds(self):
+ self.build_tree(['subdir/'])
+ self.ls_equals('.bzrignore\n'
+ 'a\n',
+ '--kind=file')
+ self.ls_equals('subdir/\n',
+ '--kind=directory')
+ self.ls_equals('',
+ '--kind=symlink')
+ self.run_bzr_error(['invalid kind specified'], 'ls --kind=pile')
+
+ def test_ls_path_nonrecursive(self):
+ self.ls_equals('%s/.bzrignore\n'
+ '%s/a\n'
+ % (self.test_dir, self.test_dir),
+ self.test_dir, recursive=False)
+
+ def test_ls_directory(self):
+ """Test --directory option"""
+ self.wt = self.make_branch_and_tree('dir')
+ self.build_tree(['dir/sub/', 'dir/sub/file'])
+ self.wt.add(['sub', 'sub/file'])
+ self.wt.commit('commit')
+ self.ls_equals('sub/\nsub/file\n', '--directory=dir')
+ self.ls_equals('sub/file\n', '-d dir sub')
+
+
+class TestSmartServerLs(tests.TestCaseWithTransport):
+
+ def test_simple_ls(self):
+ self.setup_smart_server_with_call_log()
+ t = self.make_branch_and_tree('branch')
+ self.build_tree_contents([('branch/foo', 'thecontents')])
+ t.add("foo")
+ t.commit("message")
+ self.reset_smart_call_log()
+ out, err = self.run_bzr(['ls', self.get_url('branch')])
+ # This figure represent the amount of work to perform this use case. It
+ # is entirely ok to reduce this number if a test fails due to rpc_count
+ # being too low. If rpc_count increases, more network roundtrips have
+ # become necessary for this use case. Please do not adjust this number
+ # upwards without agreement from bzr's network support maintainers.
+ self.assertLength(6, self.hpss_calls)
+ self.assertLength(1, self.hpss_connections)
+ self.assertThat(self.hpss_calls, ContainsNoVfsCalls)
diff --git a/bzrlib/tests/blackbox/test_lsprof.py b/bzrlib/tests/blackbox/test_lsprof.py
new file mode 100644
index 0000000..675d630
--- /dev/null
+++ b/bzrlib/tests/blackbox/test_lsprof.py
@@ -0,0 +1,33 @@
+# Copyright (C) 2005, 2006, 2007 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+from bzrlib import tests
+from bzrlib.tests import features
+
+
+class TestLSProf(tests.TestCaseInTempDir):
+
+ _test_needs_features = [features.lsprof_feature]
+
+ def test_file(self):
+ out, err = self.run_bzr('--lsprof-file output.callgrind rocks')
+ self.assertNotContainsRe(out, 'Profile data written to')
+ self.assertContainsRe(err, 'Profile data written to')
+
+ def test_stdout(self):
+ out, err = self.run_bzr('--lsprof rocks')
+ self.assertContainsRe(out, 'CallCount')
+ self.assertNotContainsRe(err, 'Profile data written to')
diff --git a/bzrlib/tests/blackbox/test_merge.py b/bzrlib/tests/blackbox/test_merge.py
new file mode 100644
index 0000000..bfe71f1
--- /dev/null
+++ b/bzrlib/tests/blackbox/test_merge.py
@@ -0,0 +1,763 @@
+# Copyright (C) 2006-2012 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+#
+# Author: Aaron Bentley <aaron.bentley@utoronto.ca>
+
+"""Black-box tests for bzr merge.
+"""
+
+import doctest
+import os
+
+from testtools import matchers
+
+from bzrlib import (
+ branch,
+ conflicts,
+ controldir,
+ merge_directive,
+ osutils,
+ tests,
+ urlutils,
+ workingtree,
+ )
+from bzrlib.tests import (
+ scenarios,
+ script,
+ )
+
+
+load_tests = scenarios.load_tests_apply_scenarios
+
+
+class TestMerge(tests.TestCaseWithTransport):
+
+ def example_branch(self, path='.'):
+ tree = self.make_branch_and_tree(path)
+ self.build_tree_contents([
+ (osutils.pathjoin(path, 'hello'), 'foo'),
+ (osutils.pathjoin(path, 'goodbye'), 'baz')])
+ tree.add('hello')
+ tree.commit(message='setup')
+ tree.add('goodbye')
+ tree.commit(message='setup')
+ return tree
+
+ def create_conflicting_branches(self):
+ """Create two branches which have overlapping modifications.
+
+ :return: (tree, other_branch) Where merging other_branch causes a file
+ conflict.
+ """
+ builder = self.make_branch_builder('branch')
+ builder.build_snapshot('rev1', None,
+ [('add', ('', 'root-id', 'directory', None)),
+ ('add', ('fname', 'f-id', 'file', 'a\nb\nc\n'))])
+ builder.build_snapshot('rev2other', ['rev1'],
+ [('modify', ('f-id', 'a\nB\nD\n'))])
+ other = builder.get_branch().bzrdir.sprout('other').open_branch()
+ builder.build_snapshot('rev2this', ['rev1'],
+ [('modify', ('f-id', 'a\nB\nC\n'))])
+ tree = builder.get_branch().create_checkout('tree', lightweight=True)
+ return tree, other
+
+ def test_merge_reprocess(self):
+ d = controldir.ControlDir.create_standalone_workingtree('.')
+ d.commit('h')
+ self.run_bzr('merge . --reprocess --merge-type weave')
+
+ def test_merge(self):
+ a_tree = self.example_branch('a')
+ ancestor = a_tree.branch.revno()
+ b_tree = a_tree.bzrdir.sprout('b').open_workingtree()
+ self.build_tree_contents([('b/goodbye', 'quux')])
+ b_tree.commit(message="more u's are always good")
+
+ self.build_tree_contents([('a/hello', 'quuux')])
+ # We can't merge when there are in-tree changes
+ self.run_bzr('merge ../b', retcode=3, working_dir='a')
+ a = workingtree.WorkingTree.open('a')
+ a_tip = a.commit("Like an epidemic of u's")
+
+ def run_merge_then_revert(args, retcode=None, working_dir='a'):
+ self.run_bzr(['merge', '../b', '-r', 'last:1..last:1'] + args,
+ retcode=retcode, working_dir=working_dir)
+ if retcode != 3:
+ a_tree.revert(backups=False)
+
+ run_merge_then_revert(['--merge-type', 'bloof'], retcode=3)
+ run_merge_then_revert(['--merge-type', 'merge3'])
+ run_merge_then_revert(['--merge-type', 'weave'])
+ run_merge_then_revert(['--merge-type', 'lca'])
+ self.run_bzr_error(['Show-base is not supported for this merge type'],
+ 'merge ../b -r last:1..last:1 --merge-type weave'
+ ' --show-base', working_dir='a')
+ a_tree.revert(backups=False)
+ self.run_bzr('merge ../b -r last:1..last:1 --reprocess',
+ working_dir='a')
+ a_tree.revert(backups=False)
+ self.run_bzr('merge ../b -r last:1', working_dir='a')
+ self.check_file_contents('a/goodbye', 'quux')
+ # Merging a branch pulls its revision into the tree
+ b = branch.Branch.open('b')
+ b_tip = b.last_revision()
+ self.assertTrue(a.branch.repository.has_revision(b_tip))
+ self.assertEqual([a_tip, b_tip], a.get_parent_ids())
+ a_tree.revert(backups=False)
+ out, err = self.run_bzr('merge -r revno:1:./hello', retcode=3,
+ working_dir='a')
+ self.assertTrue("Not a branch" in err)
+ self.run_bzr('merge -r revno:%d:./..revno:%d:../b'
+ %(ancestor,b.revno()), working_dir='a')
+ self.assertEquals(a.get_parent_ids(),
+ [a.branch.last_revision(), b.last_revision()])
+ self.check_file_contents('a/goodbye', 'quux')
+ a_tree.revert(backups=False)
+ self.run_bzr('merge -r revno:%d:../b'%b.revno(), working_dir='a')
+ self.assertEquals(a.get_parent_ids(),
+ [a.branch.last_revision(), b.last_revision()])
+ a_tip = a.commit('merged')
+ self.run_bzr('merge ../b -r last:1', working_dir='a')
+ self.assertEqual([a_tip], a.get_parent_ids())
+
+ def test_merge_defaults_to_reprocess(self):
+ tree, other = self.create_conflicting_branches()
+ # The default merge algorithm should enable 'reprocess' because
+ # 'show-base' is not set
+ self.run_bzr('merge ../other', working_dir='tree',
+ retcode=1)
+ self.assertEqualDiff('a\n'
+ 'B\n'
+ '<<<<<<< TREE\n'
+ 'C\n'
+ '=======\n'
+ 'D\n'
+ '>>>>>>> MERGE-SOURCE\n',
+ tree.get_file_text('f-id'))
+
+ def test_merge_explicit_reprocess_show_base(self):
+ tree, other = self.create_conflicting_branches()
+ # Explicitly setting --reprocess, and --show-base is an error
+ self.run_bzr_error(['Cannot do conflict reduction and show base'],
+ 'merge ../other --reprocess --show-base',
+ working_dir='tree')
+
+ def test_merge_override_reprocess(self):
+ tree, other = self.create_conflicting_branches()
+ # Explicitly disable reprocess
+ self.run_bzr('merge ../other --no-reprocess', working_dir='tree',
+ retcode=1)
+ self.assertEqualDiff('a\n'
+ '<<<<<<< TREE\n'
+ 'B\n'
+ 'C\n'
+ '=======\n'
+ 'B\n'
+ 'D\n'
+ '>>>>>>> MERGE-SOURCE\n',
+ tree.get_file_text('f-id'))
+
+ def test_merge_override_show_base(self):
+ tree, other = self.create_conflicting_branches()
+ # Setting '--show-base' will auto-disable '--reprocess'
+ self.run_bzr('merge ../other --show-base', working_dir='tree',
+ retcode=1)
+ self.assertEqualDiff('a\n'
+ '<<<<<<< TREE\n'
+ 'B\n'
+ 'C\n'
+ '||||||| BASE-REVISION\n'
+ 'b\n'
+ 'c\n'
+ '=======\n'
+ 'B\n'
+ 'D\n'
+ '>>>>>>> MERGE-SOURCE\n',
+ tree.get_file_text('f-id'))
+
+ def test_merge_with_missing_file(self):
+ """Merge handles missing file conflicts"""
+ self.build_tree_contents([
+ ('a/',),
+ ('a/sub/',),
+ ('a/sub/a.txt', 'hello\n'),
+ ('a/b.txt', 'hello\n'),
+ ('a/sub/c.txt', 'hello\n')])
+ a_tree = self.make_branch_and_tree('a')
+ a_tree.add(['sub', 'b.txt', 'sub/c.txt', 'sub/a.txt'])
+ a_tree.commit(message='added a')
+ b_tree = a_tree.bzrdir.sprout('b').open_workingtree()
+ self.build_tree_contents([
+ ('a/sub/a.txt', 'hello\nthere\n'),
+ ('a/b.txt', 'hello\nthere\n'),
+ ('a/sub/c.txt', 'hello\nthere\n')])
+ a_tree.commit(message='Added there')
+ os.remove('a/sub/a.txt')
+ os.remove('a/sub/c.txt')
+ os.rmdir('a/sub')
+ os.remove('a/b.txt')
+ a_tree.commit(message='Removed a.txt')
+ self.build_tree_contents([
+ ('b/sub/a.txt', 'hello\nsomething\n'),
+ ('b/b.txt', 'hello\nsomething\n'),
+ ('b/sub/c.txt', 'hello\nsomething\n')])
+ b_tree.commit(message='Modified a.txt')
+
+ self.run_bzr('merge ../a/', retcode=1, working_dir='b')
+ self.assertPathExists('b/sub/a.txt.THIS')
+ self.assertPathExists('b/sub/a.txt.BASE')
+
+ self.run_bzr('merge ../b/', retcode=1, working_dir='a')
+ self.assertPathExists('a/sub/a.txt.OTHER')
+ self.assertPathExists('a/sub/a.txt.BASE')
+
+ def test_conflict_leaves_base_this_other_files(self):
+ tree, other = self.create_conflicting_branches()
+ self.run_bzr('merge ../other', working_dir='tree',
+ retcode=1)
+ self.assertFileEqual('a\nb\nc\n', 'tree/fname.BASE')
+ self.assertFileEqual('a\nB\nD\n', 'tree/fname.OTHER')
+ self.assertFileEqual('a\nB\nC\n', 'tree/fname.THIS')
+
+ def test_weave_conflict_leaves_base_this_other_files(self):
+ tree, other = self.create_conflicting_branches()
+ self.run_bzr('merge ../other --weave', working_dir='tree',
+ retcode=1)
+ self.assertFileEqual('a\nb\nc\n', 'tree/fname.BASE')
+ self.assertFileEqual('a\nB\nD\n', 'tree/fname.OTHER')
+ self.assertFileEqual('a\nB\nC\n', 'tree/fname.THIS')
+
+ def test_merge_remember(self):
+ """Merge changes from one branch to another, test submit location."""
+ tree_a = self.make_branch_and_tree('branch_a')
+ branch_a = tree_a.branch
+ self.build_tree(['branch_a/a'])
+ tree_a.add('a')
+ tree_a.commit('commit a')
+ branch_b = branch_a.bzrdir.sprout('branch_b').open_branch()
+ tree_b = branch_b.bzrdir.open_workingtree()
+ branch_c = branch_a.bzrdir.sprout('branch_c').open_branch()
+ tree_c = branch_c.bzrdir.open_workingtree()
+ self.build_tree(['branch_a/b'])
+ tree_a.add('b')
+ tree_a.commit('commit b')
+ self.build_tree(['branch_c/c'])
+ tree_c.add('c')
+ tree_c.commit('commit c')
+ # reset parent
+ parent = branch_b.get_parent()
+ branch_b.set_parent(None)
+ self.assertEqual(None, branch_b.get_parent())
+ # test merge for failure without parent set
+ out = self.run_bzr('merge', retcode=3, working_dir='branch_b')
+ self.assertEquals(out,
+ ('','bzr: ERROR: No location specified or remembered\n'))
+
+ # test uncommitted changes
+ self.build_tree(['branch_b/d'])
+ tree_b.add('d')
+ self.run_bzr_error(['Working tree ".*" has uncommitted changes'],
+ 'merge', working_dir='branch_b')
+
+ # merge should now pass and implicitly remember merge location
+ tree_b.commit('commit d')
+ out, err = self.run_bzr('merge ../branch_a', working_dir='branch_b')
+
+ base = urlutils.local_path_from_url(branch_a.base)
+ self.assertEndsWith(err, '+N b\nAll changes applied successfully.\n')
+ # re-open branch as external run_bzr modified it
+ branch_b = branch_b.bzrdir.open_branch()
+ self.assertEquals(osutils.abspath(branch_b.get_submit_branch()),
+ osutils.abspath(parent))
+ # test implicit --remember when committing new file
+ self.build_tree(['branch_b/e'])
+ tree_b.add('e')
+ tree_b.commit('commit e')
+ out, err = self.run_bzr('merge', working_dir='branch_b')
+ self.assertStartsWith(
+ err, 'Merging from remembered submit location %s\n' % (base,))
+ # re-open tree as external run_bzr modified it
+ tree_b = branch_b.bzrdir.open_workingtree()
+ tree_b.commit('merge branch_a')
+ # test explicit --remember
+ out, err = self.run_bzr('merge ../branch_c --remember',
+ working_dir='branch_b')
+ self.assertEquals(out, '')
+ self.assertEquals(err, '+N c\nAll changes applied successfully.\n')
+ # re-open branch as external run_bzr modified it
+ branch_b = branch_b.bzrdir.open_branch()
+ self.assertEquals(osutils.abspath(branch_b.get_submit_branch()),
+ osutils.abspath(branch_c.bzrdir.root_transport.base))
+ # re-open tree as external run_bzr modified it
+ tree_b = branch_b.bzrdir.open_workingtree()
+ tree_b.commit('merge branch_c')
+
+ def test_merge_bundle(self):
+ from bzrlib.testament import Testament
+ tree_a = self.make_branch_and_tree('branch_a')
+ self.build_tree_contents([('branch_a/a', 'hello')])
+ tree_a.add('a')
+ tree_a.commit('message')
+
+ tree_b = tree_a.bzrdir.sprout('branch_b').open_workingtree()
+ self.build_tree_contents([('branch_a/a', 'hey there')])
+ tree_a.commit('message')
+
+ self.build_tree_contents([('branch_b/a', 'goodbye')])
+ tree_b.commit('message')
+ self.run_bzr('bundle ../branch_a -o ../bundle', working_dir='branch_b')
+ self.run_bzr('merge ../bundle', retcode=1, working_dir='branch_a')
+ testament_a = Testament.from_revision(tree_a.branch.repository,
+ tree_b.get_parent_ids()[0])
+ testament_b = Testament.from_revision(tree_b.branch.repository,
+ tree_b.get_parent_ids()[0])
+ self.assertEqualDiff(testament_a.as_text(),
+ testament_b.as_text())
+ tree_a.set_conflicts(conflicts.ConflictList())
+ tree_a.commit('message')
+ # it is legal to attempt to merge an already-merged bundle
+ err = self.run_bzr('merge ../bundle', working_dir='branch_a')[1]
+ # but it does nothing
+ self.assertFalse(tree_a.changes_from(tree_a.basis_tree()).has_changed())
+ self.assertEqual('Nothing to do.\n', err)
+
+ def test_merge_uncommitted(self):
+ """Check that merge --uncommitted behaves properly"""
+ tree_a = self.make_branch_and_tree('a')
+ self.build_tree(['a/file_1', 'a/file_2'])
+ tree_a.add(['file_1', 'file_2'])
+ tree_a.commit('commit 1')
+ tree_b = tree_a.bzrdir.sprout('b').open_workingtree()
+ self.assertPathExists('b/file_1')
+ tree_a.rename_one('file_1', 'file_i')
+ tree_a.commit('commit 2')
+ tree_a.rename_one('file_2', 'file_ii')
+ self.run_bzr('merge a --uncommitted -d b')
+ self.assertPathExists('b/file_1')
+ self.assertPathExists('b/file_ii')
+ tree_b.revert()
+ self.run_bzr_error(('Cannot use --uncommitted and --revision',),
+ 'merge /a --uncommitted -r1 -d b')
+
+ def test_merge_uncommitted_file(self):
+ """It should be possible to merge changes from a single file."""
+ tree_a = self.make_branch_and_tree('tree_a')
+ tree_a.commit('initial commit')
+ tree_a.bzrdir.sprout('tree_b')
+ self.build_tree(['tree_a/file1', 'tree_a/file2'])
+ tree_a.add(['file1', 'file2'])
+ self.run_bzr(['merge', '--uncommitted', '../tree_a/file1'],
+ working_dir='tree_b')
+ self.assertPathExists('tree_b/file1')
+ self.assertPathDoesNotExist('tree_b/file2')
+
+ def test_merge_nonexistent_file(self):
+ """It should not be possible to merge changes from a file which
+ does not exist."""
+ tree_a = self.make_branch_and_tree('tree_a')
+ self.build_tree_contents([('tree_a/file', 'bar\n')])
+ tree_a.add(['file'])
+ tree_a.commit('commit 1')
+ self.run_bzr_error(('Path\(s\) do not exist: non/existing',),
+ ['merge', 'non/existing'], working_dir='tree_a')
+
+ def pullable_branch(self):
+ tree_a = self.make_branch_and_tree('a')
+ self.build_tree_contents([('a/file', 'bar\n')])
+ tree_a.add(['file'])
+ self.id1 = tree_a.commit('commit 1')
+
+ tree_b = self.make_branch_and_tree('b')
+ tree_b.pull(tree_a.branch)
+ self.build_tree_contents([('b/file', 'foo\n')])
+ self.id2 = tree_b.commit('commit 2')
+
+ def test_merge_pull(self):
+ self.pullable_branch()
+ (out, err) = self.run_bzr('merge --pull ../b', working_dir='a')
+ self.assertContainsRe(out, 'Now on revision 2\\.')
+ tree_a = workingtree.WorkingTree.open('a')
+ self.assertEqual([self.id2], tree_a.get_parent_ids())
+
+ def test_merge_pull_preview(self):
+ self.pullable_branch()
+ (out, err) = self.run_bzr('merge --pull --preview -d a b')
+ self.assertThat(out, matchers.DocTestMatches(
+"""=== modified file 'file'
+--- file\t...
++++ file\t...
+@@ -1,1 +1,1 @@
+-bar
++foo
+
+""", doctest.ELLIPSIS | doctest.REPORT_UDIFF))
+ tree_a = workingtree.WorkingTree.open('a')
+ self.assertEqual([self.id1], tree_a.get_parent_ids())
+
+ def test_merge_kind_change(self):
+ tree_a = self.make_branch_and_tree('tree_a')
+ self.build_tree_contents([('tree_a/file', 'content_1')])
+ tree_a.add('file', 'file-id')
+ tree_a.commit('added file')
+ tree_b = tree_a.bzrdir.sprout('tree_b').open_workingtree()
+ os.unlink('tree_a/file')
+ self.build_tree(['tree_a/file/'])
+ tree_a.commit('changed file to directory')
+ self.run_bzr('merge ../tree_a', working_dir='tree_b')
+ self.assertEqual('directory', osutils.file_kind('tree_b/file'))
+ tree_b.revert()
+ self.assertEqual('file', osutils.file_kind('tree_b/file'))
+ self.build_tree_contents([('tree_b/file', 'content_2')])
+ tree_b.commit('content change')
+ self.run_bzr('merge ../tree_a', retcode=1, working_dir='tree_b')
+ self.assertEqual(tree_b.conflicts(),
+ [conflicts.ContentsConflict('file',
+ file_id='file-id')])
+
+ def test_directive_cherrypick(self):
+ source = self.make_branch_and_tree('source')
+ source.commit("nothing")
+ # see https://bugs.launchpad.net/bzr/+bug/409688 - trying to
+ # cherrypick from one branch into another unrelated branch with a
+ # different root id will give shape conflicts. as a workaround we
+ # make sure they share the same root id.
+ target = source.bzrdir.sprout('target').open_workingtree()
+ self.build_tree(['source/a'])
+ source.add('a')
+ source.commit('Added a', rev_id='rev1')
+ self.build_tree(['source/b'])
+ source.add('b')
+ source.commit('Added b', rev_id='rev2')
+ target.commit('empty commit')
+ self.write_directive('directive', source.branch, 'target', 'rev2',
+ 'rev1')
+ out, err = self.run_bzr('merge -d target directive')
+ self.assertPathDoesNotExist('target/a')
+ self.assertPathExists('target/b')
+ self.assertContainsRe(err, 'Performing cherrypick')
+
+ def write_directive(self, filename, source, target, revision_id,
+ base_revision_id=None, mangle_patch=False):
+ md = merge_directive.MergeDirective2.from_objects(
+ source.repository, revision_id, 0, 0, target,
+ base_revision_id=base_revision_id)
+ if mangle_patch:
+ md.patch = 'asdf\n'
+ self.build_tree_contents([(filename, ''.join(md.to_lines()))])
+
+ def test_directive_verify_warning(self):
+ source = self.make_branch_and_tree('source')
+ self.build_tree(['source/a'])
+ source.add('a')
+ source.commit('Added a', rev_id='rev1')
+ target = self.make_branch_and_tree('target')
+ target.commit('empty commit')
+ self.write_directive('directive', source.branch, 'target', 'rev1')
+ err = self.run_bzr('merge -d target directive')[1]
+ self.assertNotContainsRe(err, 'Preview patch does not match changes')
+ target.revert()
+ self.write_directive('directive', source.branch, 'target', 'rev1',
+ mangle_patch=True)
+ err = self.run_bzr('merge -d target directive')[1]
+ self.assertContainsRe(err, 'Preview patch does not match changes')
+
+ def test_merge_arbitrary(self):
+ target = self.make_branch_and_tree('target')
+ target.commit('empty')
+ # We need a revision that has no integer revno
+ branch_a = target.bzrdir.sprout('branch_a').open_workingtree()
+ self.build_tree(['branch_a/file1'])
+ branch_a.add('file1')
+ branch_a.commit('added file1', rev_id='rev2a')
+ branch_b = target.bzrdir.sprout('branch_b').open_workingtree()
+ self.build_tree(['branch_b/file2'])
+ branch_b.add('file2')
+ branch_b.commit('added file2', rev_id='rev2b')
+ branch_b.merge_from_branch(branch_a.branch)
+ self.assertPathExists('branch_b/file1')
+ branch_b.commit('merged branch_a', rev_id='rev3b')
+
+ # It works if the revid has an interger revno
+ self.run_bzr('merge -d target -r revid:rev2a branch_a')
+ self.assertPathExists('target/file1')
+ self.assertPathDoesNotExist('target/file2')
+ target.revert()
+
+ # It should work if the revid has no integer revno
+ self.run_bzr('merge -d target -r revid:rev2a branch_b')
+ self.assertPathExists('target/file1')
+ self.assertPathDoesNotExist('target/file2')
+
+ def assertDirectoryContent(self, directory, entries, message=''):
+ """Assert whether entries (file or directories) exist in a directory.
+
+ It also checks that there are no extra entries.
+ """
+ ondisk = os.listdir(directory)
+ if set(ondisk) == set(entries):
+ return
+ if message:
+ message += '\n'
+ raise AssertionError(
+ '%s"%s" directory content is different:\na = %s\nb = %s\n'
+ % (message, directory, sorted(entries), sorted(ondisk)))
+
+ def test_cherrypicking_merge(self):
+ # make source branch
+ source = self.make_branch_and_tree('source')
+ for f in ('a', 'b', 'c', 'd'):
+ self.build_tree(['source/'+f])
+ source.add(f)
+ source.commit('added '+f, rev_id='rev_'+f)
+ # target branch
+ target = source.bzrdir.sprout('target', 'rev_a').open_workingtree()
+ self.assertDirectoryContent('target', ['.bzr', 'a'])
+ # pick 1 revision
+ self.run_bzr('merge -d target -r revid:rev_b..revid:rev_c source')
+ self.assertDirectoryContent('target', ['.bzr', 'a', 'c'])
+ target.revert()
+ # pick 2 revisions
+ self.run_bzr('merge -d target -r revid:rev_b..revid:rev_d source')
+ self.assertDirectoryContent('target', ['.bzr', 'a', 'c', 'd'])
+ target.revert()
+ # pick 1 revision with option --changes
+ self.run_bzr('merge -d target -c revid:rev_d source')
+ self.assertDirectoryContent('target', ['.bzr', 'a', 'd'])
+
+ def test_merge_criss_cross(self):
+ tree_a = self.make_branch_and_tree('a')
+ tree_a.commit('', rev_id='rev1')
+ tree_b = tree_a.bzrdir.sprout('b').open_workingtree()
+ tree_a.commit('', rev_id='rev2a')
+ tree_b.commit('', rev_id='rev2b')
+ tree_a.merge_from_branch(tree_b.branch)
+ tree_b.merge_from_branch(tree_a.branch)
+ tree_a.commit('', rev_id='rev3a')
+ tree_b.commit('', rev_id='rev3b')
+ graph = tree_a.branch.repository.get_graph(tree_b.branch.repository)
+ out, err = self.run_bzr(['merge', '-d', 'a', 'b'])
+ self.assertContainsRe(err, 'Warning: criss-cross merge encountered.')
+
+ def test_merge_from_submit(self):
+ tree_a = self.make_branch_and_tree('a')
+ tree_a.commit('test')
+ tree_b = tree_a.bzrdir.sprout('b').open_workingtree()
+ tree_c = tree_a.bzrdir.sprout('c').open_workingtree()
+ out, err = self.run_bzr(['merge', '-d', 'c'])
+ self.assertContainsRe(err,
+ 'Merging from remembered parent location .*a\/')
+ tree_c.branch.lock_write()
+ try:
+ tree_c.branch.set_submit_branch(tree_b.bzrdir.root_transport.base)
+ finally:
+ tree_c.branch.unlock()
+ out, err = self.run_bzr(['merge', '-d', 'c'])
+ self.assertContainsRe(err,
+ 'Merging from remembered submit location .*b\/')
+
+ def test_remember_sets_submit(self):
+ tree_a = self.make_branch_and_tree('a')
+ tree_a.commit('rev1')
+ tree_b = tree_a.bzrdir.sprout('b').open_workingtree()
+ self.assertIs(tree_b.branch.get_submit_branch(), None)
+
+ # Remember should not happen if using default from parent
+ out, err = self.run_bzr(['merge', '-d', 'b'])
+ refreshed = workingtree.WorkingTree.open('b')
+ self.assertIs(refreshed.branch.get_submit_branch(), None)
+
+ # Remember should happen if user supplies location
+ out, err = self.run_bzr(['merge', '-d', 'b', 'a'])
+ refreshed = workingtree.WorkingTree.open('b')
+ self.assertEqual(refreshed.branch.get_submit_branch(),
+ tree_a.bzrdir.root_transport.base)
+
+ def test_no_remember_dont_set_submit(self):
+ tree_a = self.make_branch_and_tree('a')
+ self.build_tree_contents([('a/file', "a\n")])
+ tree_a.add('file')
+ tree_a.commit('rev1')
+ tree_b = tree_a.bzrdir.sprout('b').open_workingtree()
+ self.assertIs(tree_b.branch.get_submit_branch(), None)
+
+ # Remember should not happen if using default from parent
+ out, err = self.run_bzr(['merge', '-d', 'b', '--no-remember'])
+ self.assertEquals(None, tree_b.branch.get_submit_branch())
+
+ # Remember should not happen if user supplies location but ask for not
+ # remembering it
+ out, err = self.run_bzr(['merge', '-d', 'b', '--no-remember', 'a'])
+ self.assertEqual(None, tree_b.branch.get_submit_branch())
+
+ def test_weave_cherrypick(self):
+ this_tree = self.make_branch_and_tree('this')
+ self.build_tree_contents([('this/file', "a\n")])
+ this_tree.add('file')
+ this_tree.commit('rev1')
+ other_tree = this_tree.bzrdir.sprout('other').open_workingtree()
+ self.build_tree_contents([('other/file', "a\nb\n")])
+ other_tree.commit('rev2b')
+ self.build_tree_contents([('other/file', "c\na\nb\n")])
+ other_tree.commit('rev3b')
+ self.run_bzr('merge --weave -d this other -r -2..-1')
+ self.assertFileEqual('c\na\n', 'this/file')
+
+ def test_lca_merge_criss_cross(self):
+ tree_a = self.make_branch_and_tree('a')
+ self.build_tree_contents([('a/file', 'base-contents\n')])
+ tree_a.add('file')
+ tree_a.commit('', rev_id='rev1')
+ tree_b = tree_a.bzrdir.sprout('b').open_workingtree()
+ self.build_tree_contents([('a/file',
+ 'base-contents\nthis-contents\n')])
+ tree_a.commit('', rev_id='rev2a')
+ self.build_tree_contents([('b/file',
+ 'base-contents\nother-contents\n')])
+ tree_b.commit('', rev_id='rev2b')
+ tree_a.merge_from_branch(tree_b.branch)
+ self.build_tree_contents([('a/file',
+ 'base-contents\nthis-contents\n')])
+ tree_a.set_conflicts(conflicts.ConflictList())
+ tree_b.merge_from_branch(tree_a.branch)
+ self.build_tree_contents([('b/file',
+ 'base-contents\nother-contents\n')])
+ tree_b.set_conflicts(conflicts.ConflictList())
+ tree_a.commit('', rev_id='rev3a')
+ tree_b.commit('', rev_id='rev3b')
+ out, err = self.run_bzr(['merge', '-d', 'a', 'b', '--lca'], retcode=1)
+ self.assertFileEqual('base-contents\n<<<<<<< TREE\nthis-contents\n'
+ '=======\nother-contents\n>>>>>>> MERGE-SOURCE\n',
+ 'a/file')
+
+ def test_merge_preview(self):
+ this_tree = self.make_branch_and_tree('this')
+ this_tree.commit('rev1')
+ other_tree = this_tree.bzrdir.sprout('other').open_workingtree()
+ self.build_tree_contents([('other/file', 'new line')])
+ other_tree.add('file')
+ other_tree.commit('rev2a')
+ this_tree.commit('rev2b')
+ out, err = self.run_bzr(['merge', '-d', 'this', 'other', '--preview'])
+ self.assertContainsRe(out, '\+new line')
+ self.assertNotContainsRe(err, '\+N file\n')
+ this_tree.lock_read()
+ self.addCleanup(this_tree.unlock)
+ self.assertEqual([],
+ list(this_tree.iter_changes(this_tree.basis_tree())))
+
+ def test_merge_missing_second_revision_spec(self):
+ """Merge uses branch basis when the second revision is unspecified."""
+ this = self.make_branch_and_tree('this')
+ this.commit('rev1')
+ other = self.make_branch_and_tree('other')
+ self.build_tree(['other/other_file'])
+ other.add('other_file')
+ other.commit('rev1b')
+ self.run_bzr('merge -d this other -r0..')
+ self.assertPathExists('this/other_file')
+
+ def test_merge_interactive_unlocks_branch(self):
+ this = self.make_branch_and_tree('this')
+ this.commit('empty commit')
+ other = this.bzrdir.sprout('other').open_workingtree()
+ other.commit('empty commit 2')
+ self.run_bzr('merge -i -d this other')
+ this.lock_write()
+ this.unlock()
+
+ def test_merge_fetches_tags(self):
+ """Tags are updated by merge, and revisions named in those tags are
+ fetched.
+ """
+ # Make a source, sprout a target off it
+ builder = self.make_branch_builder('source')
+ builder.build_commit(message="Rev 1", rev_id='rev-1')
+ source = builder.get_branch()
+ target_bzrdir = source.bzrdir.sprout('target')
+ # Add a non-ancestry tag to source
+ builder.build_commit(message="Rev 2a", rev_id='rev-2a')
+ source.tags.set_tag('tag-a', 'rev-2a')
+ source.set_last_revision_info(1, 'rev-1')
+ source.get_config_stack().set('branch.fetch_tags', True)
+ builder.build_commit(message="Rev 2b", rev_id='rev-2b')
+ # Merge from source
+ self.run_bzr('merge -d target source')
+ target = target_bzrdir.open_branch()
+ # The tag is present, and so is its revision.
+ self.assertEqual('rev-2a', target.tags.lookup_tag('tag-a'))
+ target.repository.get_revision('rev-2a')
+
+
+class TestMergeRevisionRange(tests.TestCaseWithTransport):
+
+ scenarios = (('whole-tree', dict(context='.')),
+ ('file-only', dict(context='a')))
+
+ def setUp(self):
+ super(TestMergeRevisionRange, self).setUp()
+ self.tree = self.make_branch_and_tree(".")
+ self.tree.commit('initial commit')
+ for f in ("a", "b"):
+ self.build_tree([f])
+ self.tree.add(f)
+ self.tree.commit("added " + f)
+
+ def test_merge_reversed_revision_range(self):
+ self.run_bzr("merge -r 2..1 " + self.context)
+ self.assertPathDoesNotExist("a")
+ self.assertPathExists("b")
+
+
+class TestMergeScript(script.TestCaseWithTransportAndScript):
+ def test_merge_empty_branch(self):
+ source = self.make_branch_and_tree('source')
+ self.build_tree(['source/a'])
+ source.add('a')
+ source.commit('Added a', rev_id='rev1')
+ target = self.make_branch_and_tree('target')
+ self.run_script("""\
+$ bzr merge -d target source
+2>bzr: ERROR: Merging into empty branches not currently supported, https://bugs.launchpad.net/bzr/+bug/308562
+""")
+
+class TestMergeForce(tests.TestCaseWithTransport):
+
+ def setUp(self):
+ super(TestMergeForce, self).setUp()
+ self.tree_a = self.make_branch_and_tree('a')
+ self.build_tree(['a/foo'])
+ self.tree_a.add(['foo'])
+ self.tree_a.commit('add file')
+ self.tree_b = self.tree_a.bzrdir.sprout('b').open_workingtree()
+ self.build_tree_contents([('a/foo', 'change 1')])
+ self.tree_a.commit('change file')
+ self.tree_b.merge_from_branch(self.tree_a.branch)
+
+ def test_merge_force(self):
+ self.tree_a.commit('empty change to allow merge to run')
+ # Second merge on top of the uncommitted one
+ self.run_bzr(['merge', '../a', '--force'], working_dir='b')
+
+
+ def test_merge_with_uncommitted_changes(self):
+ self.run_bzr_error(['Working tree .* has uncommitted changes'],
+ ['merge', '../a'], working_dir='b')
+
+ def test_merge_with_pending_merges(self):
+ # Revert the changes keeping the pending merge
+ self.run_bzr(['revert', 'b'])
+ self.run_bzr_error(['Working tree .* has uncommitted changes'],
+ ['merge', '../a'], working_dir='b')
diff --git a/bzrlib/tests/blackbox/test_merge_directive.py b/bzrlib/tests/blackbox/test_merge_directive.py
new file mode 100644
index 0000000..322f48d
--- /dev/null
+++ b/bzrlib/tests/blackbox/test_merge_directive.py
@@ -0,0 +1,261 @@
+# Copyright (C) 2007, 2009-2012 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+import os
+import smtplib
+
+from bzrlib import (
+ gpg,
+ merge_directive,
+ tests,
+ workingtree,
+ )
+
+
+EMAIL1 = """From: "J. Random Hacker" <jrandom@example.com>
+Subject: bar
+To: pqm@example.com
+User-Agent: Bazaar \(.*\)
+
+# Bazaar merge directive format 2 \\(Bazaar 0.90\\)
+# revision_id: bar-id
+# target_branch: ../tree2
+# testament_sha1: .*
+# timestamp: .*
+# source_branch: .
+#"""
+
+
+class TestMergeDirective(tests.TestCaseWithTransport):
+
+ def prepare_merge_directive(self):
+ self.tree1 = self.make_branch_and_tree('tree1')
+ self.build_tree_contents([('tree1/file', 'a\nb\nc\nd\n')])
+ self.tree1.branch.get_config_stack().set(
+ 'email', 'J. Random Hacker <jrandom@example.com>')
+ self.tree1.add('file')
+ self.tree1.commit('foo', rev_id='foo-id')
+ self.tree2 = self.tree1.bzrdir.sprout('tree2').open_workingtree()
+ self.build_tree_contents([('tree1/file', 'a\nb\nc\nd\ne\n')])
+ self.tree1.commit('bar', rev_id='bar-id')
+ os.chdir('tree1')
+ return self.tree1, self.tree2
+
+ def test_merge_directive(self):
+ self.prepare_merge_directive()
+ md_text = self.run_bzr('merge-directive ../tree2')[0]
+ self.assertContainsRe(md_text, "\\+e")
+ md_text = self.run_bzr('merge-directive -r -2 ../tree2')[0]
+ self.assertNotContainsRe(md_text, "\\+e")
+ md_text = self.run_bzr('merge-directive -r -1..-2 ../tree2')[0]
+ md2 = merge_directive.MergeDirective.from_lines(
+ md_text.splitlines(True))
+ self.assertEqual('foo-id', md2.revision_id)
+ self.assertEqual('bar-id', md2.base_revision_id)
+
+ def test_submit_branch(self):
+ self.prepare_merge_directive()
+ self.run_bzr_error(('No submit branch',), 'merge-directive', retcode=3)
+ self.run_bzr('merge-directive ../tree2')
+
+ def test_public_branch(self):
+ self.prepare_merge_directive()
+ self.run_bzr_error(('No public branch',),
+ 'merge-directive --diff ../tree2', retcode=3)
+ md_text = self.run_bzr('merge-directive ../tree2')[0]
+ self.assertNotContainsRe(md_text, 'source_branch:')
+ self.run_bzr('merge-directive --diff ../tree2 .')
+ self.run_bzr('merge-directive --diff')[0]
+ self.assertNotContainsRe(md_text, 'source_branch:')
+
+ def test_patch_types(self):
+ self.prepare_merge_directive()
+ md_text = self.run_bzr('merge-directive ../tree2')[0]
+ self.assertContainsRe(md_text, "# Begin bundle")
+ self.assertContainsRe(md_text, "\\+e")
+ md_text = self.run_bzr('merge-directive ../tree2 --diff .')[0]
+ self.assertNotContainsRe(md_text, "# Begin bundle")
+ self.assertContainsRe(md_text, "\\+e")
+ md_text = self.run_bzr('merge-directive --plain')[0]
+ self.assertNotContainsRe(md_text, "\\+e")
+
+ def test_message(self):
+ self.prepare_merge_directive()
+ md_text = self.run_bzr('merge-directive ../tree2')[0]
+ self.assertNotContainsRe(md_text, 'message: Message for merge')
+ md_text = self.run_bzr('merge-directive -m Message_for_merge')[0]
+ self.assertContainsRe(md_text, 'message: Message_for_merge')
+
+ def test_signing(self):
+ self.prepare_merge_directive()
+ old_strategy = gpg.GPGStrategy
+ gpg.GPGStrategy = gpg.LoopbackGPGStrategy
+ try:
+ md_text = self.run_bzr('merge-directive --sign ../tree2')[0]
+ finally:
+ gpg.GPGStrategy = old_strategy
+ self.assertContainsRe(md_text, '^-----BEGIN PSEUDO-SIGNED CONTENT')
+
+ def run_bzr_fakemail(self, *args, **kwargs):
+ sendmail_calls = []
+ def sendmail(self, from_, to, message):
+ sendmail_calls.append((self, from_, to, message))
+ connect_calls = []
+ def connect(self, host='localhost', port=0):
+ connect_calls.append((self, host, port))
+ def has_extn(self, extension):
+ return False
+ def ehlo(self):
+ return (200, 'Ok')
+ old_sendmail = smtplib.SMTP.sendmail
+ smtplib.SMTP.sendmail = sendmail
+ old_connect = smtplib.SMTP.connect
+ smtplib.SMTP.connect = connect
+ old_ehlo = smtplib.SMTP.ehlo
+ smtplib.SMTP.ehlo = ehlo
+ old_has_extn = smtplib.SMTP.has_extn
+ smtplib.SMTP.has_extn = has_extn
+ try:
+ result = self.run_bzr(*args, **kwargs)
+ finally:
+ smtplib.SMTP.sendmail = old_sendmail
+ smtplib.SMTP.connect = old_connect
+ smtplib.SMTP.ehlo = old_ehlo
+ smtplib.SMTP.has_extn = old_has_extn
+ return result + (connect_calls, sendmail_calls)
+
+ def test_mail_default(self):
+ tree1, tree2 = self.prepare_merge_directive()
+ md_text, errr, connect_calls, sendmail_calls =\
+ self.run_bzr_fakemail(['merge-directive', '--mail-to',
+ 'pqm@example.com', '--plain', '../tree2',
+ '.'])
+ self.assertEqual('', md_text)
+ self.assertEqual(1, len(connect_calls))
+ call = connect_calls[0]
+ self.assertEqual(('localhost', 0), call[1:3])
+ self.assertEqual(1, len(sendmail_calls))
+ call = sendmail_calls[0]
+ self.assertEqual(('jrandom@example.com', ['pqm@example.com']),
+ call[1:3])
+ self.assertContainsRe(call[3], EMAIL1)
+
+ def test_pull_raw(self):
+ self.prepare_merge_directive()
+ self.tree1.commit('baz', rev_id='baz-id')
+ md_text = self.run_bzr(['merge-directive', self.tree2.basedir,
+ '-r', '2', self.tree1.basedir, '--plain'])[0]
+ self.build_tree_contents([('../directive', md_text)])
+ os.chdir('../tree2')
+ self.run_bzr('pull ../directive')
+ wt = workingtree.WorkingTree.open('.')
+ self.assertEqual('bar-id', wt.last_revision())
+
+ def test_pull_user_r(self):
+ """If the user supplies -r, an error is emitted"""
+ self.prepare_merge_directive()
+ self.tree1.commit('baz', rev_id='baz-id')
+ md_text = self.run_bzr(['merge-directive', self.tree2.basedir,
+ self.tree1.basedir, '--plain'])[0]
+ self.build_tree_contents([('../directive', md_text)])
+ os.chdir('../tree2')
+ self.run_bzr_error(
+ ('Cannot use -r with merge directives or bundles',),
+ 'pull -r 2 ../directive')
+
+ def test_pull_bundle(self):
+ self.prepare_merge_directive()
+ self.tree1.commit('baz', rev_id='baz-id')
+ md_text = self.run_bzr(['merge-directive', self.tree2.basedir,
+ '-r', '2', '/dev/null', '--bundle'])[0]
+ self.build_tree_contents([('../directive', md_text)])
+ os.chdir('../tree2')
+ self.run_bzr('pull ../directive')
+ wt = workingtree.WorkingTree.open('.')
+ self.assertEqual('bar-id', wt.last_revision())
+
+ def test_merge_raw(self):
+ self.prepare_merge_directive()
+ self.tree1.commit('baz', rev_id='baz-id')
+ md_text = self.run_bzr(['merge-directive', self.tree2.basedir,
+ '-r', '2', self.tree1.basedir, '--plain'])[0]
+ self.build_tree_contents([('../directive', md_text)])
+ os.chdir('../tree2')
+ self.run_bzr('merge ../directive')
+ wt = workingtree.WorkingTree.open('.')
+ self.assertEqual('bar-id', wt.get_parent_ids()[1])
+
+ def test_merge_user_r(self):
+ """If the user supplies -r, an error is emitted"""
+ self.prepare_merge_directive()
+ self.tree1.commit('baz', rev_id='baz-id')
+ md_text = self.run_bzr(['merge-directive', self.tree2.basedir,
+ self.tree1.basedir, '--plain'])[0]
+ self.build_tree_contents([('../directive', md_text)])
+ os.chdir('../tree2')
+ self.run_bzr_error(
+ ('Cannot use -r with merge directives or bundles',),
+ 'merge -r 2 ../directive')
+
+ def test_merge_bundle(self):
+ self.prepare_merge_directive()
+ self.tree1.commit('baz', rev_id='baz-id')
+ md_text = self.run_bzr(['merge-directive', self.tree2.basedir,
+ '-r', '2', '/dev/null', '--bundle'])[0]
+ self.build_tree_contents([('../directive', md_text)])
+ os.chdir('../tree2')
+ self.run_bzr('merge ../directive')
+ wt = workingtree.WorkingTree.open('.')
+ self.assertEqual('bar-id', wt.get_parent_ids()[1])
+
+ def test_mail_uses_config(self):
+ tree1, tree2 = self.prepare_merge_directive()
+ br = tree1.branch
+ br.get_config_stack().set('smtp_server', 'bogushost')
+ md_text, errr, connect_calls, sendmail_calls =\
+ self.run_bzr_fakemail('merge-directive --mail-to'
+ ' pqm@example.com --plain ../tree2 .')
+ call = connect_calls[0]
+ self.assertEqual(('bogushost', 0), call[1:3])
+
+ def test_no_common_ancestor(self):
+ foo = self.make_branch_and_tree('foo')
+ foo.commit('rev1')
+ bar = self.make_branch_and_tree('bar')
+ self.run_bzr('merge-directive ../bar', working_dir='foo')
+
+ def test_no_commits(self):
+ foo = self.make_branch_and_tree('foo')
+ bar = self.make_branch_and_tree('bar')
+ self.run_bzr_error(('No revisions to bundle.', ),
+ 'merge-directive ../bar', working_dir='foo')
+
+ def test_encoding_exact(self):
+ tree1, tree2 = self.prepare_merge_directive()
+ tree1.commit(u'messag\xe9')
+ self.run_bzr('merge-directive ../tree2') # no exception raised
+
+ def test_merge_directive_directory(self):
+ """Test --directory option"""
+ import re
+ re_timestamp = re.compile(r'^# timestamp: .*', re.M)
+ self.prepare_merge_directive()
+ md1 = self.run_bzr('merge-directive ../tree2')[0]
+ md1 = re_timestamp.sub('# timestamp: XXX', md1)
+ os.chdir('..')
+ md2 = self.run_bzr('merge-directive --directory tree1 tree2')[0]
+ md2 = re_timestamp.sub('# timestamp: XXX', md2)
+ self.assertEqualDiff(md1.replace('../tree2', 'tree2'), md2)
diff --git a/bzrlib/tests/blackbox/test_missing.py b/bzrlib/tests/blackbox/test_missing.py
new file mode 100644
index 0000000..8e3217c
--- /dev/null
+++ b/bzrlib/tests/blackbox/test_missing.py
@@ -0,0 +1,262 @@
+# Copyright (C) 2005-2012 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""Black-box tests for bzr missing."""
+
+from bzrlib import (
+ osutils,
+ tests,
+ )
+
+
+class TestMissing(tests.TestCaseWithTransport):
+
+ def assertMessages(self, out, must_have=(), must_not_have=()):
+ """Check if commit messages are in or not in the output"""
+ for m in must_have:
+ self.assertContainsRe(out, r'\nmessage:\n %s\n' % m)
+ for m in must_not_have:
+ self.assertNotContainsRe(out, r'\nmessage:\n %s\n' % m)
+
+ def test_missing_quiet(self):
+ # <https://bugs.launchpad.net/bzr/+bug/284748>
+ # create a source branch
+ #
+ # XXX: This still needs a test that missing is quiet when there are
+ # missing revisions.
+ a_tree = self.make_branch_and_tree('.')
+ self.build_tree_contents([('a', 'initial\n')])
+ a_tree.add('a')
+ a_tree.commit(message='initial')
+
+ out, err = self.run_bzr('missing -q .')
+ self.assertEqual('', out)
+ self.assertEqual('', err)
+
+ def test_missing(self):
+ missing_one = "You are missing 1 revision:"
+ extra_one = "You have 1 extra revision:"
+
+ # create a source branch
+ a_tree = self.make_branch_and_tree('a')
+ self.build_tree_contents([('a/a', 'initial\n')])
+ a_tree.add('a')
+ a_tree.commit(message='initial')
+
+ # clone and add a differing revision
+ b_tree = a_tree.bzrdir.sprout('b').open_workingtree()
+ self.build_tree_contents([('b/a', 'initial\nmore\n')])
+ b_tree.commit(message='more')
+
+ def run_missing(args, retcode=1, working_dir=None):
+ out, err = self.run_bzr(['missing'] + args,
+ retcode=retcode, working_dir=working_dir)
+ # we do not expect any error output.
+ self.assertEqual('', err)
+ return out.splitlines()
+
+ def run_missing_a(args, retcode=1):
+ return run_missing(['../a'] + args,
+ retcode=retcode, working_dir='b')
+
+ def run_missing_b(args, retcode=1):
+ return run_missing(['../b'] + args,
+ retcode=retcode, working_dir='a')
+
+ # run missing in a against b
+ # this should not require missing to take out a write lock on a
+ # or b. So we take a write lock on both to test that at the same
+ # time. This may let the test pass while the default branch is an
+ # os-locking branch, but it will trigger failures with lockdir based
+ # branches.
+ a_branch = a_tree.branch
+ a_branch.lock_write()
+ b_branch = b_tree.branch
+ b_branch.lock_write()
+
+ lines = run_missing_b([])
+ # we're missing the extra revision here
+ self.assertEqual(missing_one, lines[0])
+ # and we expect 8 lines of output which we trust at the moment to be
+ # good.
+ self.assertEqual(8, len(lines))
+ # unlock the branches for the rest of the test
+ a_branch.unlock()
+ b_branch.unlock()
+
+ # get extra revision from b
+ a_tree.merge_from_branch(b_branch)
+ a_tree.commit(message='merge')
+
+ # compare again, but now we have the 'merge' commit extra
+ lines = run_missing_b([])
+ self.assertEqual(extra_one, lines[0])
+ self.assertLength(8, lines)
+
+ lines2 = run_missing_b(['--mine-only'])
+ self.assertEqual(lines, lines2)
+
+ lines3 = run_missing_b(['--theirs-only'], retcode=0)
+ self.assertEqualDiff('Other branch has no new revisions.', lines3[0])
+
+ # relative to a, missing the 'merge' commit
+ lines = run_missing_a([])
+ self.assertEqual(missing_one, lines[0])
+ self.assertLength(8, lines)
+
+ lines2 = run_missing_a(['--theirs-only'])
+ self.assertEqual(lines, lines2)
+
+ lines3 = run_missing_a(['--mine-only'], retcode=0)
+ self.assertEqualDiff('This branch has no new revisions.', lines3[0])
+
+ lines4 = run_missing_a(['--short'])
+ self.assertLength(4, lines4)
+
+ lines4a = run_missing_a(['-S'])
+ self.assertEqual(lines4, lines4a)
+
+ lines5 = run_missing_a(['--line'])
+ self.assertLength(2, lines5)
+
+ lines6 = run_missing_a(['--reverse'])
+ self.assertEqual(lines6, lines)
+
+ lines7 = run_missing_a(['--show-ids'])
+ self.assertLength(11, lines7)
+
+ lines8 = run_missing_a(['--verbose'])
+ self.assertEqual("modified:", lines8[-2])
+ self.assertEqual(" a", lines8[-1])
+
+ self.assertEqualDiff('Other branch has no new revisions.',
+ run_missing_b(['--theirs-only'], retcode=0)[0])
+
+ # after a pull we're back on track
+ b_tree.pull(a_branch)
+ self.assertEqualDiff("Branches are up to date.",
+ run_missing_b([], retcode=0)[0])
+ self.assertEqualDiff('Branches are up to date.',
+ run_missing_a([], retcode=0)[0])
+ # If you supply mine or theirs you only know one side is up to date
+ self.assertEqualDiff('This branch has no new revisions.',
+ run_missing_a(['--mine-only'], retcode=0)[0])
+ self.assertEqualDiff('Other branch has no new revisions.',
+ run_missing_a(['--theirs-only'], retcode=0)[0])
+
+ def test_missing_filtered(self):
+ # create a source branch
+ a_tree = self.make_branch_and_tree('a')
+ self.build_tree_contents([('a/a', 'initial\n')])
+ a_tree.add('a')
+ a_tree.commit(message='r1')
+ # clone and add differing revisions
+ b_tree = a_tree.bzrdir.sprout('b').open_workingtree()
+
+ for i in range(2, 6):
+ a_tree.commit(message='a%d' % i)
+ b_tree.commit(message='b%d' % i)
+
+ # local
+ out,err = self.run_bzr('missing ../b --my-revision 3',
+ retcode=1, working_dir='a')
+ self.assertMessages(out, ('a3', 'b2', 'b3', 'b4', 'b5'), ('a2', 'a4'))
+
+ out,err = self.run_bzr('missing ../b --my-revision 3..4',
+ retcode=1, working_dir='a')
+ self.assertMessages(out, ('a3', 'a4'), ('a2', 'a5'))
+
+ #remote
+ out,err = self.run_bzr('missing ../b -r 3',
+ retcode=1, working_dir='a')
+ self.assertMessages(out, ('a2', 'a3', 'a4', 'a5', 'b3'), ('b2', 'b4'))
+
+ out,err = self.run_bzr('missing ../b -r 3..4',
+ retcode=1, working_dir='a')
+ self.assertMessages(out, ('b3', 'b4'), ('b2', 'b5'))
+
+ #both
+ out,err = self.run_bzr('missing ../b --my-revision 3..4 -r 3..4',
+ retcode=1, working_dir='a')
+ self.assertMessages(out, ('a3', 'a4', 'b3', 'b4'),
+ ('a2', 'a5', 'b2', 'b5'))
+
+ def test_missing_check_last_location(self):
+ # check that last location shown as filepath not file URL
+
+ # create a source branch
+ wt = self.make_branch_and_tree('a')
+ b = wt.branch
+ self.build_tree(['a/foo'])
+ wt.add('foo')
+ wt.commit('initial')
+
+ location = osutils.getcwd() + '/a/'
+
+ # clone
+ b.bzrdir.sprout('b')
+
+ # check last location
+ lines, err = self.run_bzr('missing', working_dir='b')
+ self.assertEquals('Using saved parent location: %s\n'
+ 'Branches are up to date.\n' % location,
+ lines)
+ self.assertEquals('', err)
+
+ def test_missing_directory(self):
+ """Test --directory option"""
+
+ # create a source branch
+ a_tree = self.make_branch_and_tree('a')
+ self.build_tree_contents([('a/a', 'initial\n')])
+ a_tree.add('a')
+ a_tree.commit(message='initial')
+
+ # clone and add a differing revision
+ b_tree = a_tree.bzrdir.sprout('b').open_workingtree()
+ self.build_tree_contents([('b/a', 'initial\nmore\n')])
+ b_tree.commit(message='more')
+
+ out2, err2 = self.run_bzr('missing --directory a b', retcode=1)
+ out1, err1 = self.run_bzr('missing ../b', retcode=1, working_dir='a')
+ self.assertEqualDiff(out1, out2)
+ self.assertEqualDiff(err1, err2)
+
+ def test_missing_tags(self):
+ """Test showing tags"""
+
+ # create a source branch
+ a_tree = self.make_branch_and_tree('a')
+ self.build_tree_contents([('a/a', 'initial\n')])
+ a_tree.add('a')
+ a_tree.commit(message='initial')
+
+ # clone and add a differing revision
+ b_tree = a_tree.bzrdir.sprout('b').open_workingtree()
+ self.build_tree_contents([('b/a', 'initial\nmore\n')])
+ b_tree.commit(message='more')
+ b_tree.branch.tags.set_tag('a-tag', b_tree.last_revision())
+
+ for log_format in ['long', 'short', 'line']:
+ out, err = self.run_bzr(
+ 'missing --log-format={0} ../a'.format(log_format),
+ working_dir='b', retcode=1)
+ self.assertContainsString(out, 'a-tag')
+
+ out, err = self.run_bzr(
+ 'missing --log-format={0} ../b'.format(log_format),
+ working_dir='a', retcode=1)
+ self.assertContainsString(out, 'a-tag')
diff --git a/bzrlib/tests/blackbox/test_mkdir.py b/bzrlib/tests/blackbox/test_mkdir.py
new file mode 100644
index 0000000..ff4a743
--- /dev/null
+++ b/bzrlib/tests/blackbox/test_mkdir.py
@@ -0,0 +1,59 @@
+# Copyright (C) 2011 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+
+"""Black-box tests for bzr mkdir.
+"""
+
+import os
+from bzrlib import tests
+
+
+class TestMkdir(tests.TestCaseWithTransport):
+
+ def test_mkdir(self):
+ tree = self.make_branch_and_tree('.')
+ self.run_bzr(['mkdir', 'somedir'])
+ self.assertEquals(tree.kind(tree.path2id('somedir')), "directory")
+
+ def test_mkdir_multi(self):
+ tree = self.make_branch_and_tree('.')
+ self.run_bzr(['mkdir', 'somedir', 'anotherdir'])
+ self.assertEquals(tree.kind(tree.path2id('somedir')), "directory")
+ self.assertEquals(tree.kind(tree.path2id('anotherdir')), "directory")
+
+ def test_mkdir_parents(self):
+ tree = self.make_branch_and_tree('.')
+ self.run_bzr(['mkdir', '-p', 'somedir/foo'])
+ self.assertEquals(tree.kind(tree.path2id('somedir/foo')), "directory")
+
+ def test_mkdir_parents_existing_versioned_dir(self):
+ tree = self.make_branch_and_tree('.')
+ tree.mkdir('somedir')
+ self.assertEquals(tree.kind(tree.path2id('somedir')), "directory")
+ self.run_bzr(['mkdir', '-p', 'somedir'])
+
+ def test_mkdir_parents_existing_unversioned_dir(self):
+ tree = self.make_branch_and_tree('.')
+ os.mkdir('somedir')
+ self.run_bzr(['mkdir', '-p', 'somedir'])
+ self.assertEquals(tree.kind(tree.path2id('somedir')), "directory")
+
+ def test_mkdir_parents_with_unversioned_parent(self):
+ tree = self.make_branch_and_tree('.')
+ os.mkdir('somedir')
+ self.run_bzr(['mkdir', '-p', 'somedir/foo'])
+ self.assertEquals(tree.kind(tree.path2id('somedir/foo')), "directory")
diff --git a/bzrlib/tests/blackbox/test_modified.py b/bzrlib/tests/blackbox/test_modified.py
new file mode 100644
index 0000000..b35c0ac
--- /dev/null
+++ b/bzrlib/tests/blackbox/test_modified.py
@@ -0,0 +1,81 @@
+# Copyright (C) 2008, 2009, 2010 Canonical Ltd
+# -*- coding: utf-8 -*-
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+
+"""Black-box tests for 'bzr modified', which shows modified files."""
+
+import os
+
+from bzrlib.branch import Branch
+from bzrlib.tests import TestCaseWithTransport
+
+class TestModified(TestCaseWithTransport):
+
+ def test_modified(self):
+ """Test that 'modified' command reports modified files"""
+ self._test_modified('a', 'a')
+
+ def test_modified_with_spaces(self):
+ """Test that 'modified' command reports modified files with spaces in their names quoted"""
+ self._test_modified('a filename with spaces', '"a filename with spaces"')
+
+ def _test_modified(self, name, output):
+
+ def check_modified(expected, null=False):
+ command = 'modified'
+ if null:
+ command += ' --null'
+ out, err = self.run_bzr(command)
+ self.assertEquals(out, expected)
+ self.assertEquals(err, '')
+
+ # in empty directory, nothing modified
+ tree = self.make_branch_and_tree('.')
+ check_modified('')
+
+ # with unknown file, still nothing modified
+ self.build_tree_contents([(name, 'contents of %s\n' % (name))])
+ check_modified('')
+
+ # after add, not modified
+ tree.add(name)
+ check_modified('')
+
+ # after commit, not modified
+ tree.commit(message='add %s' % output)
+ check_modified('')
+
+ # modify the file
+ self.build_tree_contents([(name, 'changed\n')])
+ check_modified(output + '\n')
+
+ # check null seps - use the unquoted raw name here
+ check_modified(name + '\0', null=True)
+
+ # now commit the file and it's no longer modified
+ tree.commit(message='modified %s' %(name))
+ check_modified('')
+
+ def test_modified_directory(self):
+ """Test --directory option"""
+ tree = self.make_branch_and_tree('a')
+ self.build_tree(['a/README'])
+ tree.add('README')
+ tree.commit('r1')
+ self.build_tree_contents([('a/README', 'changed\n')])
+ out, err = self.run_bzr(['modified', '--directory=a'])
+ self.assertEquals('README\n', out)
diff --git a/bzrlib/tests/blackbox/test_mv.py b/bzrlib/tests/blackbox/test_mv.py
new file mode 100644
index 0000000..0b18317
--- /dev/null
+++ b/bzrlib/tests/blackbox/test_mv.py
@@ -0,0 +1,531 @@
+# Copyright (C) 2006-2012 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""Test for 'bzr mv'"""
+
+import os
+
+import bzrlib.branch
+from bzrlib import (
+ osutils,
+ workingtree,
+ )
+
+from bzrlib.tests import (
+ TestCaseWithTransport,
+ )
+from bzrlib.tests.features import (
+ CaseInsensitiveFilesystemFeature,
+ SymlinkFeature,
+ UnicodeFilenameFeature,
+ )
+
+
+class TestMove(TestCaseWithTransport):
+
+ def assertMoved(self,from_path,to_path):
+ """Assert that to_path is existing and versioned but from_path not. """
+ self.assertPathDoesNotExist(from_path)
+ self.assertNotInWorkingTree(from_path)
+
+ self.assertPathExists(to_path)
+ self.assertInWorkingTree(to_path)
+
+ def test_mv_modes(self):
+ """Test two modes of operation for mv"""
+ tree = self.make_branch_and_tree('.')
+ files = self.build_tree(['a', 'c', 'subdir/'])
+ tree.add(['a', 'c', 'subdir'])
+
+ self.run_bzr('mv a b')
+ self.assertMoved('a','b')
+
+ self.run_bzr('mv b subdir')
+ self.assertMoved('b','subdir/b')
+
+ self.run_bzr('mv subdir/b a')
+ self.assertMoved('subdir/b','a')
+
+ self.run_bzr('mv a c subdir')
+ self.assertMoved('a','subdir/a')
+ self.assertMoved('c','subdir/c')
+
+ self.run_bzr('mv subdir/a subdir/newa')
+ self.assertMoved('subdir/a','subdir/newa')
+
+ def test_mv_unversioned(self):
+ self.build_tree(['unversioned.txt'])
+ self.run_bzr_error(
+ ["^bzr: ERROR: Could not rename unversioned.txt => elsewhere."
+ " .*unversioned.txt is not versioned\.$"],
+ 'mv unversioned.txt elsewhere')
+
+ def test_mv_nonexisting(self):
+ self.run_bzr_error(
+ ["^bzr: ERROR: Could not rename doesnotexist => somewhereelse."
+ " .*doesnotexist is not versioned\.$"],
+ 'mv doesnotexist somewhereelse')
+
+ def test_mv_unqualified(self):
+ self.run_bzr_error(['^bzr: ERROR: missing file argument$'], 'mv')
+
+ def test_mv_invalid(self):
+ tree = self.make_branch_and_tree('.')
+ self.build_tree(['test.txt', 'sub1/'])
+ tree.add(['test.txt'])
+
+ self.run_bzr_error(
+ ["^bzr: ERROR: Could not move to sub1: sub1 is not versioned\.$"],
+ 'mv test.txt sub1')
+
+ self.run_bzr_error(
+ ["^bzr: ERROR: Could not move test.txt => .*hello.txt: "
+ "sub1 is not versioned\.$"],
+ 'mv test.txt sub1/hello.txt')
+
+ def test_mv_dirs(self):
+ tree = self.make_branch_and_tree('.')
+ self.build_tree(['hello.txt', 'sub1/'])
+ tree.add(['hello.txt', 'sub1'])
+
+ self.run_bzr('mv sub1 sub2')
+ self.assertMoved('sub1','sub2')
+
+ self.run_bzr('mv hello.txt sub2')
+ self.assertMoved('hello.txt','sub2/hello.txt')
+
+ self.build_tree(['sub1/'])
+ tree.add(['sub1'])
+ self.run_bzr('mv sub2/hello.txt sub1')
+ self.assertMoved('sub2/hello.txt','sub1/hello.txt')
+
+ self.run_bzr('mv sub2 sub1')
+ self.assertMoved('sub2','sub1/sub2')
+
+ def test_mv_relative(self):
+ self.build_tree(['sub1/', 'sub1/sub2/', 'sub1/hello.txt'])
+ tree = self.make_branch_and_tree('.')
+ tree.add(['sub1', 'sub1/sub2', 'sub1/hello.txt'])
+
+ self.run_bzr('mv ../hello.txt .', working_dir='sub1/sub2')
+ self.assertPathExists('sub1/sub2/hello.txt')
+
+ self.run_bzr('mv sub2/hello.txt .', working_dir='sub1')
+ self.assertMoved('sub1/sub2/hello.txt','sub1/hello.txt')
+
+ def test_mv_change_case_file(self):
+ # test for bug #77740 (mv unable change filename case on Windows)
+ tree = self.make_branch_and_tree('.')
+ self.build_tree(['test.txt'])
+ tree.add(['test.txt'])
+ self.run_bzr('mv test.txt Test.txt')
+ # we can't use failUnlessExists on case-insensitive filesystem
+ # so try to check shape of the tree
+ shape = sorted(os.listdir(u'.'))
+ self.assertEqual(['.bzr', 'Test.txt'], shape)
+ self.assertInWorkingTree('Test.txt')
+ self.assertNotInWorkingTree('test.txt')
+
+ def test_mv_change_case_dir(self):
+ tree = self.make_branch_and_tree('.')
+ self.build_tree(['foo/'])
+ tree.add(['foo'])
+ self.run_bzr('mv foo Foo')
+ # we can't use failUnlessExists on case-insensitive filesystem
+ # so try to check shape of the tree
+ shape = sorted(os.listdir(u'.'))
+ self.assertEqual(['.bzr', 'Foo'], shape)
+ self.assertInWorkingTree('Foo')
+ self.assertNotInWorkingTree('foo')
+
+ def test_mv_change_case_dir_w_files(self):
+ tree = self.make_branch_and_tree('.')
+ self.build_tree(['foo/', 'foo/bar'])
+ tree.add(['foo'])
+ self.run_bzr('mv foo Foo')
+ # we can't use failUnlessExists on case-insensitive filesystem
+ # so try to check shape of the tree
+ shape = sorted(os.listdir(u'.'))
+ self.assertEqual(['.bzr', 'Foo'], shape)
+ self.assertInWorkingTree('Foo')
+ self.assertNotInWorkingTree('foo')
+
+ def test_mv_file_to_wrong_case_dir(self):
+ self.requireFeature(CaseInsensitiveFilesystemFeature)
+ tree = self.make_branch_and_tree('.')
+ self.build_tree(['foo/', 'bar'])
+ tree.add(['foo', 'bar'])
+ out, err = self.run_bzr('mv bar Foo', retcode=3)
+ self.assertEquals('', out)
+ self.assertEquals(
+ 'bzr: ERROR: Could not move to Foo: Foo is not versioned.\n',
+ err)
+
+ def test_mv_smoke_aliases(self):
+ # just test that aliases for mv exist, if their behaviour is changed in
+ # the future, then extend the tests.
+ self.build_tree(['a'])
+ tree = self.make_branch_and_tree('.')
+ tree.add(['a'])
+
+ self.run_bzr('move a b')
+ self.run_bzr('rename b a')
+
+ def test_mv_no_root(self):
+ tree = self.make_branch_and_tree('.')
+ self.run_bzr_error(
+ ["bzr: ERROR: can not move root of branch"],
+ 'mv . a')
+
+ def test_mv_through_symlinks(self):
+ self.requireFeature(SymlinkFeature)
+ tree = self.make_branch_and_tree('.')
+ self.build_tree(['a/', 'a/b'])
+ os.symlink('a', 'c')
+ os.symlink('.', 'd')
+ tree.add(['a', 'a/b', 'c'], ['a-id', 'b-id', 'c-id'])
+ self.run_bzr('mv c/b b')
+ tree = workingtree.WorkingTree.open('.')
+ self.assertEqual('b-id', tree.path2id('b'))
+
+ def test_mv_already_moved_file(self):
+ """Test bzr mv original_file to moved_file.
+
+ Tests if a file which has allready been moved by an external tool,
+ is handled correctly by bzr mv.
+ Setup: a is in the working tree, b does not exist.
+ User does: mv a b; bzr mv a b
+ """
+ self.build_tree(['a'])
+ tree = self.make_branch_and_tree('.')
+ tree.add(['a'])
+
+ osutils.rename('a', 'b')
+ self.run_bzr('mv a b')
+ self.assertMoved('a','b')
+
+ def test_mv_already_moved_file_to_versioned_target(self):
+ """Test bzr mv existing_file to versioned_file.
+
+ Tests if an attempt to move an existing versioned file
+ to another versiond file will fail.
+ Setup: a and b are in the working tree.
+ User does: rm b; mv a b; bzr mv a b
+ """
+ self.build_tree(['a', 'b'])
+ tree = self.make_branch_and_tree('.')
+ tree.add(['a', 'b'])
+
+ os.remove('b')
+ osutils.rename('a', 'b')
+ self.run_bzr_error(
+ ["^bzr: ERROR: Could not move a => b. b is already versioned\.$"],
+ 'mv a b')
+ #check that nothing changed
+ self.assertPathDoesNotExist('a')
+ self.assertPathExists('b')
+
+ def test_mv_already_moved_file_into_subdir(self):
+ """Test bzr mv original_file to versioned_directory/file.
+
+ Tests if a file which has already been moved into a versioned
+ directory by an external tool, is handled correctly by bzr mv.
+ Setup: a and sub/ are in the working tree.
+ User does: mv a sub/a; bzr mv a sub/a
+ """
+ self.build_tree(['a', 'sub/'])
+ tree = self.make_branch_and_tree('.')
+ tree.add(['a', 'sub'])
+
+ osutils.rename('a', 'sub/a')
+ self.run_bzr('mv a sub/a')
+ self.assertMoved('a','sub/a')
+
+ def test_mv_already_moved_file_into_unversioned_subdir(self):
+ """Test bzr mv original_file to unversioned_directory/file.
+
+ Tests if an attempt to move an existing versioned file
+ into an unversioned directory will fail.
+ Setup: a is in the working tree, sub/ is not.
+ User does: mv a sub/a; bzr mv a sub/a
+ """
+ self.build_tree(['a', 'sub/'])
+ tree = self.make_branch_and_tree('.')
+ tree.add(['a'])
+
+ osutils.rename('a', 'sub/a')
+ self.run_bzr_error(
+ ["^bzr: ERROR: Could not move a => a: sub is not versioned\.$"],
+ 'mv a sub/a')
+ self.assertPathDoesNotExist('a')
+ self.assertPathExists('sub/a')
+
+ def test_mv_already_moved_files_into_subdir(self):
+ """Test bzr mv original_files to versioned_directory.
+
+ Tests if files which has already been moved into a versioned
+ directory by an external tool, is handled correctly by bzr mv.
+ Setup: a1, a2, sub are in the working tree.
+ User does: mv a1 sub/.; bzr mv a1 a2 sub
+ """
+ self.build_tree(['a1', 'a2', 'sub/'])
+ tree = self.make_branch_and_tree('.')
+ tree.add(['a1', 'a2', 'sub'])
+
+ osutils.rename('a1', 'sub/a1')
+ self.run_bzr('mv a1 a2 sub')
+ self.assertMoved('a1','sub/a1')
+ self.assertMoved('a2','sub/a2')
+
+ def test_mv_already_moved_files_into_unversioned_subdir(self):
+ """Test bzr mv original_file to unversioned_directory.
+
+ Tests if an attempt to move existing versioned file
+ into an unversioned directory will fail.
+ Setup: a1, a2 are in the working tree, sub is not.
+ User does: mv a1 sub/.; bzr mv a1 a2 sub
+ """
+ self.build_tree(['a1', 'a2', 'sub/'])
+ tree = self.make_branch_and_tree('.')
+ tree.add(['a1', 'a2'])
+
+ osutils.rename('a1', 'sub/a1')
+ self.run_bzr_error(
+ ["^bzr: ERROR: Could not move to sub. sub is not versioned\.$"],
+ 'mv a1 a2 sub')
+ self.assertPathDoesNotExist('a1')
+ self.assertPathExists('sub/a1')
+ self.assertPathExists('a2')
+ self.assertPathDoesNotExist('sub/a2')
+
+ def test_mv_already_moved_file_forcing_after(self):
+ """Test bzr mv versioned_file to unversioned_file.
+
+ Tests if an attempt to move an existing versioned file to an existing
+ unversioned file will fail, informing the user to use the --after
+ option to force this.
+ Setup: a is in the working tree, b not versioned.
+ User does: mv a b; touch a; bzr mv a b
+ """
+ self.build_tree(['a', 'b'])
+ tree = self.make_branch_and_tree('.')
+ tree.add(['a'])
+
+ osutils.rename('a', 'b')
+ self.build_tree(['a']) #touch a
+ self.run_bzr_error(
+ ["^bzr: ERROR: Could not rename a => b because both files exist."
+ " \(Use --after to tell bzr about a rename that has already"
+ " happened\)$"],
+ 'mv a b')
+ self.assertPathExists('a')
+ self.assertPathExists('b')
+
+ def test_mv_already_moved_file_using_after(self):
+ """Test bzr mv --after versioned_file to unversioned_file.
+
+ Tests if an existing versioned file can be forced to move to an
+ existing unversioned file using the --after option. With the result
+ that bazaar considers the unversioned_file to be moved from
+ versioned_file and versioned_file will become unversioned.
+ Setup: a is in the working tree and b exists.
+ User does: mv a b; touch a; bzr mv a b --after
+ Resulting in a => b and a is unknown.
+ """
+ self.build_tree(['a', 'b'])
+ tree = self.make_branch_and_tree('.')
+ tree.add(['a'])
+ osutils.rename('a', 'b')
+ self.build_tree(['a']) #touch a
+
+ self.run_bzr('mv a b --after')
+ self.assertPathExists('a')
+ self.assertNotInWorkingTree('a')#a should be unknown now.
+ self.assertPathExists('b')
+ self.assertInWorkingTree('b')
+
+ def test_mv_already_moved_files_forcing_after(self):
+ """Test bzr mv versioned_files to directory/unversioned_file.
+
+ Tests if an attempt to move an existing versioned file to an existing
+ unversioned file in some other directory will fail, informing the user
+ to use the --after option to force this.
+
+ Setup: a1, a2, sub are versioned and in the working tree,
+ sub/a1, sub/a2 are in working tree.
+ User does: mv a* sub; touch a1; touch a2; bzr mv a1 a2 sub
+ """
+ self.build_tree(['a1', 'a2', 'sub/', 'sub/a1', 'sub/a2'])
+ tree = self.make_branch_and_tree('.')
+ tree.add(['a1', 'a2', 'sub'])
+ osutils.rename('a1', 'sub/a1')
+ osutils.rename('a2', 'sub/a2')
+ self.build_tree(['a1']) #touch a1
+ self.build_tree(['a2']) #touch a2
+
+ self.run_bzr_error(
+ ["^bzr: ERROR: Could not rename a1 => sub/a1 because both files"
+ " exist. \(Use --after to tell bzr about a rename that has already"
+ " happened\)$"],
+ 'mv a1 a2 sub')
+ self.assertPathExists('a1')
+ self.assertPathExists('a2')
+ self.assertPathExists('sub/a1')
+ self.assertPathExists('sub/a2')
+
+ def test_mv_already_moved_files_using_after(self):
+ """Test bzr mv --after versioned_file to directory/unversioned_file.
+
+ Tests if an existing versioned file can be forced to move to an
+ existing unversioned file in some other directory using the --after
+ option. With the result that bazaar considers
+ directory/unversioned_file to be moved from versioned_file and
+ versioned_file will become unversioned.
+
+ Setup: a1, a2, sub are versioned and in the working tree,
+ sub/a1, sub/a2 are in working tree.
+ User does: mv a* sub; touch a1; touch a2; bzr mv a1 a2 sub --after
+ """
+ self.build_tree(['a1', 'a2', 'sub/', 'sub/a1', 'sub/a2'])
+ tree = self.make_branch_and_tree('.')
+ tree.add(['a1', 'a2', 'sub'])
+ osutils.rename('a1', 'sub/a1')
+ osutils.rename('a2', 'sub/a2')
+ self.build_tree(['a1']) #touch a1
+ self.build_tree(['a2']) #touch a2
+
+ self.run_bzr('mv a1 a2 sub --after')
+ self.assertPathExists('a1')
+ self.assertPathExists('a2')
+ self.assertPathExists('sub/a1')
+ self.assertPathExists('sub/a2')
+ self.assertInWorkingTree('sub/a1')
+ self.assertInWorkingTree('sub/a2')
+
+ def test_mv_already_moved_directory(self):
+ """Use `bzr mv a b` to mark a directory as renamed.
+
+ https://bugs.launchpad.net/bzr/+bug/107967/
+ """
+ self.build_tree(['a/', 'c/'])
+ tree = self.make_branch_and_tree('.')
+ tree.add(['a', 'c'])
+ osutils.rename('a', 'b')
+ osutils.rename('c', 'd')
+ # mv a b should work just like it does for already renamed files
+ self.run_bzr('mv a b')
+ self.assertPathDoesNotExist('a')
+ self.assertNotInWorkingTree('a')
+ self.assertPathExists('b')
+ self.assertInWorkingTree('b')
+ # and --after should work, too (technically it's ignored)
+ self.run_bzr('mv --after c d')
+ self.assertPathDoesNotExist('c')
+ self.assertNotInWorkingTree('c')
+ self.assertPathExists('d')
+ self.assertInWorkingTree('d')
+
+ def make_abcd_tree(self):
+ tree = self.make_branch_and_tree('tree')
+ self.build_tree(['tree/a', 'tree/c'])
+ tree.add(['a', 'c'])
+ tree.commit('record old names')
+ osutils.rename('tree/a', 'tree/b')
+ osutils.rename('tree/c', 'tree/d')
+ return tree
+
+ def test_mv_auto(self):
+ self.make_abcd_tree()
+ out, err = self.run_bzr('mv --auto', working_dir='tree')
+ self.assertEqual(out, '')
+ self.assertEqual(err, 'a => b\nc => d\n')
+ tree = workingtree.WorkingTree.open('tree')
+ self.assertIsNot(None, tree.path2id('b'))
+ self.assertIsNot(None, tree.path2id('d'))
+
+ def test_mv_auto_one_path(self):
+ self.make_abcd_tree()
+ out, err = self.run_bzr('mv --auto tree')
+ self.assertEqual(out, '')
+ self.assertEqual(err, 'a => b\nc => d\n')
+ tree = workingtree.WorkingTree.open('tree')
+ self.assertIsNot(None, tree.path2id('b'))
+ self.assertIsNot(None, tree.path2id('d'))
+
+ def test_mv_auto_two_paths(self):
+ self.make_abcd_tree()
+ out, err = self.run_bzr('mv --auto tree tree2', retcode=3)
+ self.assertEqual('bzr: ERROR: Only one path may be specified to'
+ ' --auto.\n', err)
+
+ def test_mv_auto_dry_run(self):
+ self.make_abcd_tree()
+ out, err = self.run_bzr('mv --auto --dry-run', working_dir='tree')
+ self.assertEqual(out, '')
+ self.assertEqual(err, 'a => b\nc => d\n')
+ tree = workingtree.WorkingTree.open('tree')
+ self.assertIsNot(None, tree.path2id('a'))
+ self.assertIsNot(None, tree.path2id('c'))
+
+ def test_mv_no_auto_dry_run(self):
+ self.make_abcd_tree()
+ out, err = self.run_bzr('mv c d --dry-run',
+ working_dir='tree', retcode=3)
+ self.assertEqual('bzr: ERROR: --dry-run requires --auto.\n', err)
+
+ def test_mv_auto_after(self):
+ self.make_abcd_tree()
+ out, err = self.run_bzr('mv --auto --after', working_dir='tree',
+ retcode=3)
+ self.assertEqual('bzr: ERROR: --after cannot be specified with'
+ ' --auto.\n', err)
+
+ def test_mv_quiet(self):
+ tree = self.make_branch_and_tree('.')
+ self.build_tree(['aaa'])
+ tree.add(['aaa'])
+ out, err = self.run_bzr('mv --quiet aaa bbb')
+ self.assertEqual(out, '')
+ self.assertEqual(err, '')
+
+ def test_mv_readonly_lightweight_checkout(self):
+ branch = self.make_branch('foo')
+ branch = bzrlib.branch.Branch.open(self.get_readonly_url('foo'))
+ tree = branch.create_checkout('tree', lightweight=True)
+ self.build_tree(['tree/path'])
+ tree.add('path')
+ # If this fails, the tree is trying to acquire a branch lock, which it
+ # shouldn't.
+ self.run_bzr(['mv', 'tree/path', 'tree/path2'])
+
+ def test_mv_unversioned_non_ascii(self):
+ """Clear error on mv of an unversioned non-ascii file, see lp:707954"""
+ self.requireFeature(UnicodeFilenameFeature)
+ tree = self.make_branch_and_tree(".")
+ self.build_tree([u"\xA7"])
+ out, err = self.run_bzr_error(["Could not rename", "not versioned"],
+ ["mv", u"\xA7", "b"])
+
+ def test_mv_removed_non_ascii(self):
+ """Clear error on mv of a removed non-ascii file, see lp:898541"""
+ self.requireFeature(UnicodeFilenameFeature)
+ tree = self.make_branch_and_tree(".")
+ self.build_tree([u"\xA7"])
+ tree.add([u"\xA7"])
+ tree.commit(u"Adding \xA7")
+ os.remove(u"\xA7")
+ out, err = self.run_bzr_error(["Could not rename", "not exist"],
+ ["mv", u"\xA7", "b"])
diff --git a/bzrlib/tests/blackbox/test_nick.py b/bzrlib/tests/blackbox/test_nick.py
new file mode 100644
index 0000000..a7b7ee2
--- /dev/null
+++ b/bzrlib/tests/blackbox/test_nick.py
@@ -0,0 +1,90 @@
+# Copyright (C) 2006-2010, 2012 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""Black-box tests for bzr nick."""
+
+import bzrlib
+from bzrlib import (
+ branch,
+ osutils,
+ tests,
+ )
+
+
+class TestNick(tests.TestCaseWithTransport):
+
+ def assertNick(self, expected, working_dir='.', explicit=None,
+ directory=None):
+ cmd = ['nick']
+ if directory is not None:
+ cmd.extend(['--directory', directory])
+ # The nick command outputs the nick on a single line
+ actual = self.run_bzr(cmd, working_dir=working_dir)[0][:-1]
+ self.assertEquals(expected, actual)
+ if explicit is not None:
+ br = branch.Branch.open(working_dir)
+ conf = br.get_config()
+ self.assertEquals(explicit, conf.has_explicit_nickname())
+ if explicit:
+ self.assertEquals(expected, conf._get_explicit_nickname())
+
+ def test_nick_command(self):
+ """bzr nick for viewing, setting nicknames"""
+ self.make_branch_and_tree('me.dev')
+ self.assertNick('me.dev', working_dir='me.dev')
+ # set the nickname
+ self.run_bzr("nick moo", working_dir='me.dev')
+ self.assertNick('moo', working_dir='me.dev')
+
+ def test_autonick_urlencoded(self):
+ # https://bugs.launchpad.net/bzr/+bug/66857 -- nick was printed
+ # urlencoded but shouldn't be
+ self.make_branch_and_tree('!repo')
+ self.assertNick('!repo', working_dir='!repo')
+
+ def test_bound_nick(self):
+ """Bind should not update implicit nick."""
+ base = self.make_branch_and_tree('base')
+ child = self.make_branch_and_tree('child')
+ self.assertNick('child', working_dir='child', explicit=False)
+
+ self.run_bzr('bind ../base', working_dir='child')
+ self.assertNick(base.branch.nick, working_dir='child', explicit=False)
+
+ def test_bound_nick_explicit(self):
+ """Bind should update explicit nick."""
+ base = self.make_branch_and_tree('base')
+ child = self.make_branch_and_tree('child')
+ self.run_bzr("nick explicit_nick", working_dir='child')
+ self.assertNick('explicit_nick', working_dir='child', explicit=True)
+ self.run_bzr('bind ../base', working_dir='child')
+ self.assertNick(base.branch.nick, working_dir='child', explicit=True)
+
+ def test_boundless_nick(self):
+ """Nick defaults to implicit local nick when bound branch is AWOL"""
+ base = self.make_branch_and_tree('base')
+ child = self.make_branch_and_tree('child')
+ self.run_bzr('bind ../base', working_dir='child')
+ self.assertNick(base.branch.nick, working_dir='child', explicit=False)
+ osutils.rmtree('base')
+ self.assertNick('child', working_dir='child', explicit=False)
+
+ def test_nick_directory(self):
+ """Test --directory option"""
+ self.make_branch_and_tree('me.dev')
+ self.assertNick('me.dev', directory='me.dev')
+ self.run_bzr(['nick', '-d', 'me.dev', 'moo'])
+ self.assertNick('moo', directory='me.dev')
diff --git a/bzrlib/tests/blackbox/test_non_ascii.py b/bzrlib/tests/blackbox/test_non_ascii.py
new file mode 100644
index 0000000..c6b5ffc
--- /dev/null
+++ b/bzrlib/tests/blackbox/test_non_ascii.py
@@ -0,0 +1,551 @@
+# Copyright (C) 2006-2011 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""Black-box tests for bzr handling non-ascii characters."""
+
+import os
+import sys
+
+from bzrlib import (
+ osutils,
+ tests,
+ urlutils,
+ )
+from bzrlib.tests import EncodingAdapter
+from bzrlib.tests.scenarios import load_tests_apply_scenarios
+
+
+load_tests = load_tests_apply_scenarios
+
+
+class TestNonAscii(tests.TestCaseWithTransport):
+ """Test that bzr handles files/committers/etc which are non-ascii."""
+
+ scenarios = EncodingAdapter.encoding_scenarios
+
+ def setUp(self):
+ super(TestNonAscii, self).setUp()
+ self._check_can_encode_paths()
+
+ self.overrideAttr(osutils, '_cached_user_encoding', self.encoding)
+ email = self.info['committer'] + ' <joe@foo.com>'
+ self.overrideEnv('BZR_EMAIL', email.encode(osutils.get_user_encoding()))
+ self.create_base()
+
+ def run_bzr_decode(self, args, encoding=None, fail=False, retcode=None,
+ working_dir=None):
+ """Run bzr and decode the output into a particular encoding.
+
+ Returns a string containing the stdout output from bzr.
+
+ :param fail: If true, the operation is expected to fail with
+ a UnicodeError.
+ """
+ if encoding is None:
+ encoding = osutils.get_user_encoding()
+ try:
+ out = self.run_bzr(args,
+ output_encoding=encoding, encoding=encoding,
+ retcode=retcode, working_dir=working_dir)[0]
+ return out.decode(encoding)
+ except UnicodeError, e:
+ if not fail:
+ raise
+ else:
+ # This command, run from the regular command line, will give a
+ # traceback to the user. That's not really good for a situation
+ # that can be provoked just by the interaction of their input data
+ # and locale, as some of these are. What would be better?
+ if fail:
+ self.fail("Expected UnicodeError not raised")
+
+ def _check_OSX_can_roundtrip(self, path, fs_enc=None):
+ """Stop the test if it's about to fail or errors out.
+
+ Until we get proper support on OSX for accented paths (in fact, any
+ path whose NFD decomposition is different than the NFC one), this is
+ the best way to keep test active (as opposed to disabling them
+ completely). This is a stop gap. The tests should at least be rewritten
+ so that the failing ones are clearly separated from the passing ones.
+ """
+ if fs_enc is None:
+ fs_enc = osutils._fs_enc
+ if sys.platform == 'darwin':
+ encoded = path.encode(fs_enc)
+ import unicodedata
+ normal_thing = unicodedata.normalize('NFD', path)
+ mac_encoded = normal_thing.encode(fs_enc)
+ if mac_encoded != encoded:
+ self.knownFailure(
+ 'Unable to roundtrip path %r on OSX filesystem'
+ ' using encoding "%s"'
+ % (path, fs_enc))
+
+ def _check_can_encode_paths(self):
+ fs_enc = osutils._fs_enc
+ terminal_enc = osutils.get_terminal_encoding()
+ fname = self.info['filename']
+ dir_name = self.info['directory']
+ for thing in [fname, dir_name]:
+ try:
+ thing.encode(fs_enc)
+ except UnicodeEncodeError:
+ raise tests.TestSkipped(
+ 'Unable to represent path %r in filesystem encoding "%s"'
+ % (thing, fs_enc))
+ try:
+ thing.encode(terminal_enc)
+ except UnicodeEncodeError:
+ raise tests.TestSkipped(
+ 'Unable to represent path %r in terminal encoding "%s"'
+ ' (even though it is valid in filesystem encoding "%s")'
+ % (thing, terminal_enc, fs_enc))
+
+ def create_base(self):
+ wt = self.make_branch_and_tree('.')
+ self.build_tree_contents([('a', 'foo\n')])
+ wt.add('a')
+ wt.commit('adding a')
+
+ self.build_tree_contents(
+ [('b', 'non-ascii \xFF\xFF\xFC\xFB\x00 in b\n')])
+ wt.add('b')
+ wt.commit(self.info['message'])
+
+ fname = self.info['filename']
+ self.build_tree_contents([(fname, 'unicode filename\n')])
+ wt.add(fname)
+ wt.commit(u'And a unicode file\n')
+ self.wt = wt
+ # FIXME: We don't check that the add went well, in fact, it doesn't on
+ # OSX (when LC_ALL is set correctly) because the added path doesn't
+ # match the one used on OSX. But checking here will require more
+ # invasive changes than adding the _check_OSX_can_roundtrip(), so I
+ # punt for now -- vila 20090702
+
+ def test_status(self):
+ self.build_tree_contents(
+ [(self.info['filename'], 'changed something\n')])
+ txt = self.run_bzr_decode('status')
+ self._check_OSX_can_roundtrip(self.info['filename'])
+ self.assertEqual(u'modified:\n %s\n' % (self.info['filename'],), txt)
+
+ txt = self.run_bzr_decode('status', encoding='ascii')
+ expected = u'modified:\n %s\n' % (
+ self.info['filename'].encode('ascii', 'replace'),)
+ self.assertEqual(expected, txt)
+
+ def test_cat(self):
+ # bzr cat shouldn't change the contents
+ # using run_bzr since that doesn't decode
+ txt = self.run_bzr('cat b')[0]
+ self.assertEqual('non-ascii \xFF\xFF\xFC\xFB\x00 in b\n', txt)
+
+ self._check_OSX_can_roundtrip(self.info['filename'])
+ txt = self.run_bzr(['cat', self.info['filename']])[0]
+ self.assertEqual('unicode filename\n', txt)
+
+ def test_cat_revision(self):
+ committer = self.info['committer']
+ txt = self.run_bzr_decode('cat-revision -r 1')
+ self.assertTrue(committer in txt,
+ 'failed to find %r in %r' % (committer, txt))
+
+ msg = self.info['message']
+ txt = self.run_bzr_decode('cat-revision -r 2')
+ self.assertTrue(msg in txt, 'failed to find %r in %r' % (msg, txt))
+
+ def test_mkdir(self):
+ txt = self.run_bzr_decode(['mkdir', self.info['directory']])
+ self.assertEqual(u'added %s\n' % self.info['directory'], txt)
+
+ # The text should be garbled, but the command should succeed
+ txt = self.run_bzr_decode(['mkdir', self.info['directory'] + '2'],
+ encoding='ascii')
+ expected = u'added %s2\n' % (self.info['directory'],)
+ expected = expected.encode('ascii', 'replace')
+ self.assertEqual(expected, txt)
+
+ def test_relpath(self):
+ txt = self.run_bzr_decode(['relpath', self.info['filename']])
+ self.assertEqual(self.info['filename'] + '\n', txt)
+
+ self.run_bzr_decode(['relpath', self.info['filename']],
+ encoding='ascii', fail=True)
+
+ def test_inventory(self):
+ txt = self.run_bzr_decode('inventory')
+ self._check_OSX_can_roundtrip(self.info['filename'])
+ self.assertEqual(['a', 'b', self.info['filename']],
+ txt.splitlines())
+
+ # inventory should fail if unable to encode
+ self.run_bzr_decode('inventory', encoding='ascii', fail=True)
+
+ # We don't really care about the ids themselves,
+ # but the command shouldn't fail
+ txt = self.run_bzr_decode('inventory --show-ids')
+
+ def test_revno(self):
+ # There isn't a lot to test here, since revno should always
+ # be an integer
+ self.assertEqual('3\n', self.run_bzr_decode('revno'))
+ self.assertEqual('3\n', self.run_bzr_decode('revno', encoding='ascii'))
+
+ def test_revision_info(self):
+ self.run_bzr_decode('revision-info -r 1')
+
+ # TODO: jam 20060105 If we support revisions with non-ascii characters,
+ # this should be strict and fail.
+ self.run_bzr_decode('revision-info -r 1', encoding='ascii')
+
+ def test_mv(self):
+ fname1 = self.info['filename']
+ fname2 = self.info['filename'] + '2'
+ dirname = self.info['directory']
+
+ # fname1 already exists
+ self.run_bzr_decode(['mv', 'a', fname1], fail=True)
+
+ txt = self.run_bzr_decode(['mv', 'a', fname2])
+ self.assertEqual(u'a => %s\n' % fname2, txt)
+ self.assertPathDoesNotExist('a')
+ self.assertPathExists(fname2)
+
+ # After 'mv' we need to re-open the working tree
+ self.wt = self.wt.bzrdir.open_workingtree()
+ self.wt.commit('renamed to non-ascii')
+
+ os.mkdir(dirname)
+ self.wt.add(dirname)
+ txt = self.run_bzr_decode(['mv', fname1, fname2, dirname])
+ self._check_OSX_can_roundtrip(self.info['filename'])
+ self.assertEqual([u'%s => %s/%s' % (fname1, dirname, fname1),
+ u'%s => %s/%s' % (fname2, dirname, fname2)]
+ , txt.splitlines())
+
+ # The rename should still succeed
+ newpath = u'%s/%s' % (dirname, fname2)
+ txt = self.run_bzr_decode(['mv', newpath, 'a'], encoding='ascii')
+ self.assertPathExists('a')
+ self.assertEqual(newpath.encode('ascii', 'replace') + ' => a\n', txt)
+
+ def test_branch(self):
+ # We should be able to branch into a directory that
+ # has a unicode name, even if we can't display the name
+ self.run_bzr_decode(['branch', u'.', self.info['directory']])
+ self.run_bzr_decode(['branch', u'.', self.info['directory'] + '2'],
+ encoding='ascii')
+
+ def test_pull(self):
+ # Make sure we can pull from paths that can't be encoded
+ dirname1 = self.info['directory']
+ dirname2 = self.info['directory'] + '2'
+ url1 = urlutils.local_path_to_url(dirname1)
+ url2 = urlutils.local_path_to_url(dirname2)
+ out_bzrdir = self.wt.bzrdir.sprout(url1)
+ out_bzrdir.sprout(url2)
+
+ self.build_tree_contents(
+ [(osutils.pathjoin(dirname1, "a"), 'different text\n')])
+ self.wt.commit('mod a')
+
+ txt = self.run_bzr_decode('pull', working_dir=dirname2)
+
+ expected = osutils.pathjoin(osutils.getcwd(), dirname1)
+ self.assertEqual(u'Using saved parent location: %s/\n'
+ 'No revisions or tags to pull.\n' % (expected,), txt)
+
+ self.build_tree_contents(
+ [(osutils.pathjoin(dirname1, 'a'), 'and yet more\n')])
+ self.wt.commit(u'modifying a by ' + self.info['committer'])
+
+ # We should be able to pull, even if our encoding is bad
+ self.run_bzr_decode('pull --verbose', encoding='ascii',
+ working_dir=dirname2)
+
+ def test_push(self):
+ # TODO: Test push to an SFTP location
+ # Make sure we can pull from paths that can't be encoded
+ # TODO: jam 20060427 For drastically improving performance, we probably
+ # could create a local repository, so it wouldn't have to copy
+ # the files around as much.
+ # Note that the tests don't actually fail, but if we don't set this
+ # flag, we end up getting "Lock was not Unlocked" warnings
+
+ dirname = self.info['directory']
+ self.run_bzr_decode(['push', dirname])
+
+ self.build_tree_contents([('a', 'adding more text\n')])
+ self.wt.commit('added some stuff')
+
+ # TODO: check the output text is properly encoded
+ self.run_bzr_decode('push')
+
+ self.build_tree_contents(
+ [('a', 'and a bit more: \n%s\n' % (dirname.encode('utf-8'),))])
+
+ self.wt.commit('Added some ' + dirname)
+ self.run_bzr_decode('push --verbose', encoding='ascii')
+
+ self.run_bzr_decode(['push', '--verbose', dirname + '2'])
+
+ self.run_bzr_decode(['push', '--verbose', dirname + '3'],
+ encoding='ascii')
+
+ self.run_bzr_decode(['push', '--verbose', '--create-prefix',
+ dirname + '4/' + dirname + '5'])
+ self.run_bzr_decode(['push', '--verbose', '--create-prefix',
+ dirname + '6/' + dirname + '7'], encoding='ascii')
+
+ def test_renames(self):
+ fname = self.info['filename'] + '2'
+ self.wt.rename_one('a', fname)
+ txt = self.run_bzr_decode('renames')
+ self.assertEqual(u'a => %s\n' % fname, txt)
+
+ self.run_bzr_decode('renames', fail=True, encoding='ascii')
+
+ def test_remove(self):
+ fname = self.info['filename']
+ txt = self.run_bzr_decode(['remove', fname], encoding='ascii')
+
+ def test_remove_verbose(self):
+ fname = self.info['filename']
+ txt = self.run_bzr_decode(['remove', '--verbose', fname],
+ encoding='ascii')
+
+ def test_file_id(self):
+ fname = self.info['filename']
+ txt = self.run_bzr_decode(['file-id', fname])
+
+ # TODO: jam 20060106 We don't support non-ascii file ids yet,
+ # so there is nothing which would fail in ascii encoding
+ # This *should* be retcode=3
+ txt = self.run_bzr_decode(['file-id', fname], encoding='ascii')
+
+ def test_file_path(self):
+ # Create a directory structure
+ fname = self.info['filename']
+ dirname = self.info['directory']
+ self.build_tree_contents([
+ ('base/', ),
+ (osutils.pathjoin('base', '%s/' % (dirname,)), )])
+ self.wt.add('base')
+ self.wt.add('base/'+dirname)
+ path = osutils.pathjoin('base', dirname, fname)
+ self._check_OSX_can_roundtrip(self.info['filename'])
+ self.wt.rename_one(fname, path)
+ self.wt.commit('moving things around')
+
+ txt = self.run_bzr_decode(['file-path', path])
+
+ # TODO: jam 20060106 We don't support non-ascii file ids yet,
+ # so there is nothing which would fail in ascii encoding
+ # This *should* be retcode=3
+ txt = self.run_bzr_decode(['file-path', path], encoding='ascii')
+
+ def test_revision_history(self):
+ # TODO: jam 20060106 We don't support non-ascii revision ids yet,
+ # so there is nothing which would fail in ascii encoding
+ txt = self.run_bzr_decode('revision-history')
+
+ def test_ancestry(self):
+ # TODO: jam 20060106 We don't support non-ascii revision ids yet,
+ # so there is nothing which would fail in ascii encoding
+ txt = self.run_bzr_decode('ancestry')
+
+ def test_diff(self):
+ self._check_OSX_can_roundtrip(self.info['filename'])
+ # TODO: jam 20060106 diff is a difficult one to test, because it
+ # shouldn't encode the file contents, but it needs some sort
+ # of encoding for the paths, etc which are displayed.
+ self.build_tree_contents([(self.info['filename'], 'newline\n')])
+ txt = self.run_bzr('diff', retcode=1)[0]
+
+ def test_deleted(self):
+ self._check_OSX_can_roundtrip(self.info['filename'])
+ fname = self.info['filename']
+ os.remove(fname)
+ self.wt.remove(fname)
+
+ txt = self.run_bzr_decode('deleted')
+ self.assertEqual(fname+'\n', txt)
+
+ txt = self.run_bzr_decode('deleted --show-ids')
+ self.assertTrue(txt.startswith(fname))
+
+ # Deleted should fail if cannot decode
+ # Because it is giving the exact paths
+ # which might be used by a front end
+ self.run_bzr_decode('deleted', encoding='ascii', fail=True)
+
+ def test_modified(self):
+ fname = self.info['filename']
+ self.build_tree_contents([(fname, 'modified\n')])
+
+ txt = self.run_bzr_decode('modified')
+ self._check_OSX_can_roundtrip(self.info['filename'])
+ self.assertEqual('"'+fname+'"'+'\n', txt)
+
+ self.run_bzr_decode('modified', encoding='ascii', fail=True)
+
+ def test_added(self):
+ fname = self.info['filename'] + '2'
+ self.build_tree_contents([(fname, 'added\n')])
+ self.wt.add(fname)
+
+ txt = self.run_bzr_decode('added')
+ self.assertEqual('"'+fname+'"'+'\n', txt)
+
+ self.run_bzr_decode('added', encoding='ascii', fail=True)
+
+ def test_root(self):
+ dirname = self.info['directory']
+ url = urlutils.local_path_to_url(dirname)
+ self.run_bzr_decode('root')
+
+ self.wt.bzrdir.sprout(url)
+
+ txt = self.run_bzr_decode('root', working_dir=dirname)
+ self.assertTrue(txt.endswith(dirname+'\n'))
+
+ txt = self.run_bzr_decode('root', encoding='ascii', fail=True,
+ working_dir=dirname)
+
+ def test_log(self):
+ fname = self.info['filename']
+
+ txt = self.run_bzr_decode('log')
+ self.assertNotEqual(-1, txt.find(self.info['committer']))
+ self.assertNotEqual(-1, txt.find(self.info['message']))
+
+ txt = self.run_bzr_decode('log --verbose')
+ # FIXME: iso-8859-2 test shouldn't be skipped here --vila 20090702
+ self._check_OSX_can_roundtrip(self.info['filename'])
+ self.assertNotEqual(-1, txt.find(fname))
+
+ # Make sure log doesn't fail even if we can't write out
+ txt = self.run_bzr_decode('log --verbose', encoding='ascii')
+ self.assertEqual(-1, txt.find(fname))
+ self.assertNotEqual(-1, txt.find(fname.encode('ascii', 'replace')))
+
+ def test_touching_revisions(self):
+ fname = self.info['filename']
+ txt = self.run_bzr_decode(['touching-revisions', fname])
+ self._check_OSX_can_roundtrip(self.info['filename'])
+ self.assertEqual(u' 3 added %s\n' % (fname,), txt)
+
+ fname2 = self.info['filename'] + '2'
+ self.wt.rename_one(fname, fname2)
+ self.wt.commit(u'Renamed %s => %s' % (fname, fname2))
+
+ txt = self.run_bzr_decode(['touching-revisions', fname2])
+ expected_txt = (u' 3 added %s\n'
+ u' 4 renamed %s => %s\n'
+ % (fname, fname, fname2))
+ self.assertEqual(expected_txt, txt)
+
+ self.run_bzr_decode(['touching-revisions', fname2], encoding='ascii',
+ fail=True)
+
+ def test_ls(self):
+ txt = self.run_bzr_decode('ls')
+ self.assertEqual(sorted(['a', 'b', self.info['filename']]),
+ sorted(txt.splitlines()))
+ txt = self.run_bzr_decode('ls --null')
+ self.assertEqual(sorted(['', 'a', 'b', self.info['filename']]),
+ sorted(txt.split('\0')))
+
+ txt = self.run_bzr_decode('ls', encoding='ascii', fail=True)
+ txt = self.run_bzr_decode('ls --null', encoding='ascii', fail=True)
+
+ def test_unknowns(self):
+ fname = self.info['filename'] + '2'
+ self.build_tree_contents([(fname, 'unknown\n')])
+
+ # TODO: jam 20060112 bzr unknowns is the only one which
+ # quotes paths do we really want it to?
+ # awilkins 20080521 added and modified do it now as well
+ txt = self.run_bzr_decode('unknowns')
+ self._check_OSX_can_roundtrip(self.info['filename'])
+ self.assertEqual(u'"%s"\n' % (fname,), txt)
+
+ self.run_bzr_decode('unknowns', encoding='ascii', fail=True)
+
+ def test_ignore(self):
+ fname2 = self.info['filename'] + '2.txt'
+ self.build_tree_contents([(fname2, 'ignored\n')])
+
+ def check_unknowns(expected):
+ self.assertEqual(expected, list(self.wt.unknowns()))
+
+ self._check_OSX_can_roundtrip(self.info['filename'])
+ check_unknowns([fname2])
+
+ self.run_bzr_decode(['ignore', './' + fname2])
+ check_unknowns([])
+
+ fname3 = self.info['filename'] + '3.txt'
+ self.build_tree_contents([(fname3, 'unknown 3\n')])
+ check_unknowns([fname3])
+
+ # Ignore should not care what the encoding is
+ # (right now it doesn't print anything)
+ self.run_bzr_decode(['ignore', fname3], encoding='ascii')
+ check_unknowns([])
+
+ # Now try a wildcard match
+ fname4 = self.info['filename'] + '4.txt'
+ self.build_tree_contents([(fname4, 'unknown 4\n')])
+ self.run_bzr_decode('ignore *.txt')
+ check_unknowns([])
+
+ # and a different wildcard that matches everything
+ os.remove('.bzrignore')
+ self.run_bzr_decode(['ignore', self.info['filename'] + '*'])
+ check_unknowns([])
+
+ def test_missing(self):
+ # create empty tree as reference for missing
+ self.make_branch_and_tree('empty-tree')
+
+ msg = self.info['message']
+
+ txt = self.run_bzr_decode('missing empty-tree')
+ self.assertNotEqual(-1, txt.find(self.info['committer']))
+ self.assertNotEqual(-1, txt.find(msg))
+
+ # Make sure missing doesn't fail even if we can't write out
+ txt = self.run_bzr_decode('missing empty-tree', encoding='ascii')
+ self.assertEqual(-1, txt.find(msg))
+ self.assertNotEqual(-1, txt.find(msg.encode('ascii', 'replace')))
+
+ def test_info(self):
+ self.run_bzr_decode(['branch', u'.', self.info['directory']])
+ self.run_bzr_decode(['info', self.info['directory']])
+ self.run_bzr_decode(['info', self.info['directory']],
+ encoding='ascii')
+
+ def test_ignored(self):
+ fname = self.info['filename'] + '1.txt'
+ self.build_tree_contents([(fname, 'ignored\n')])
+ self.run_bzr(['ignore', fname])
+ txt = self.run_bzr_decode(['ignored'])
+ self.assertEqual(txt, '%-50s %s\n' % (fname, fname))
+ txt = self.run_bzr_decode(['ignored'], encoding='ascii')
+ fname = fname.encode('ascii', 'replace')
+ self.assertEqual(txt, '%-50s %s\n' % (fname, fname))
diff --git a/bzrlib/tests/blackbox/test_outside_wt.py b/bzrlib/tests/blackbox/test_outside_wt.py
new file mode 100644
index 0000000..6d53377
--- /dev/null
+++ b/bzrlib/tests/blackbox/test_outside_wt.py
@@ -0,0 +1,82 @@
+# Copyright (C) 2006 Canonical Ltd
+# -*- coding: utf-8 -*-
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+
+"""Black-box tests for running bzr outside of a working tree."""
+
+import os
+
+from bzrlib import (
+ osutils,
+ tests,
+ transport,
+ urlutils,
+ )
+
+
+class TestOutsideWT(tests.ChrootedTestCase):
+ """Test that bzr gives proper errors outside of a working tree."""
+
+ def test_cwd_log(self):
+ # Watch out for tricky test dir (on OSX /tmp -> /private/tmp)
+ tmp_dir = osutils.realpath(osutils.mkdtemp())
+ # We expect a read-to-root attempt to occur.
+ self.permit_url('file:///')
+ self.addCleanup(osutils.rmtree, tmp_dir)
+ out, err = self.run_bzr('log', retcode=3, working_dir=tmp_dir)
+ self.assertEqual(u'bzr: ERROR: Not a branch: "%s/".\n'
+ % (tmp_dir,),
+ err)
+
+ def test_url_log(self):
+ url = self.get_readonly_url() + 'subdir/'
+ out, err = self.run_bzr(['log', url], retcode=3)
+ self.assertEqual(u'bzr: ERROR: Not a branch:'
+ u' "%s".\n' % url, err)
+
+ def test_diff_outside_tree(self):
+ tree = self.make_branch_and_tree('branch1')
+ tree.commit('nothing')
+ tree.commit('nothing')
+ # A directory we can run commands from which we hope is not contained
+ # in a bzr tree (though if there is one at or above $TEMPDIR, this is
+ # false and may cause test failures).
+ # Watch out for tricky test dir (on OSX /tmp -> /private/tmp)
+ tmp_dir = osutils.realpath(osutils.mkdtemp())
+ self.addCleanup(osutils.rmtree, tmp_dir)
+ # We expect a read-to-root attempt to occur.
+ self.permit_url('file:///')
+ expected_error = u'bzr: ERROR: Not a branch: "%s/branch2/".\n' % tmp_dir
+ # -r X..Y
+ out, err = self.run_bzr('diff -r revno:2:branch2..revno:1', retcode=3,
+ working_dir=tmp_dir)
+ self.assertEqual('', out)
+ self.assertEqual(expected_error, err)
+ # -r X
+ out, err = self.run_bzr('diff -r revno:2:branch2', retcode=3,
+ working_dir=tmp_dir)
+ self.assertEqual('', out)
+ self.assertEqual(expected_error, err)
+ # -r X..
+ out, err = self.run_bzr('diff -r revno:2:branch2..', retcode=3,
+ working_dir=tmp_dir)
+ self.assertEqual('', out)
+ self.assertEqual(expected_error, err)
+ # no -r at all.
+ out, err = self.run_bzr('diff', retcode=3, working_dir=tmp_dir)
+ self.assertEqual('', out)
+ self.assertEqual(u'bzr: ERROR: Not a branch: "%s/".\n' % tmp_dir, err)
diff --git a/bzrlib/tests/blackbox/test_pack.py b/bzrlib/tests/blackbox/test_pack.py
new file mode 100644
index 0000000..60f4a96
--- /dev/null
+++ b/bzrlib/tests/blackbox/test_pack.py
@@ -0,0 +1,106 @@
+# Copyright (C) 2007, 2009-2012 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+#
+
+"""Tests of the 'bzr pack' command."""
+import os
+
+from bzrlib import tests
+from bzrlib.tests.matchers import ContainsNoVfsCalls
+
+
+class TestPack(tests.TestCaseWithTransport):
+
+ def _make_versioned_file(self, path, line_prefix='line', total_lines=10):
+ self._make_file(path, line_prefix, total_lines, versioned=True)
+
+ def _make_file(self, path, line_prefix, total_lines, versioned):
+ text=''
+ for i in range(total_lines):
+ text += line_prefix + str(i+1) + "\n"
+
+ with open(path, 'w') as f:
+ f.write(text)
+ if versioned:
+ self.run_bzr(['add', path])
+ self.run_bzr(['ci', '-m', '"' + path + '"'])
+
+ def _update_file(self, path, text, checkin=True):
+ """append text to file 'path' and check it in"""
+ with open(path, 'a') as f:
+ f.write(text)
+
+ if checkin:
+ self.run_bzr(['ci', path, '-m', '"' + path + '"'])
+
+ def test_pack_silent(self):
+ """pack command has no intrinsic output."""
+ self.make_branch('.')
+ out, err = self.run_bzr('pack')
+ self.assertEqual('', out)
+ self.assertEqual('', err)
+
+ def test_pack_accepts_branch_url(self):
+ """pack command accepts the url to a branch."""
+ self.make_branch('branch')
+ out, err = self.run_bzr('pack branch')
+ self.assertEqual('', out)
+ self.assertEqual('', err)
+
+ def test_pack_accepts_repo_url(self):
+ """pack command accepts the url to a branch."""
+ self.make_repository('repository')
+ out, err = self.run_bzr('pack repository')
+ self.assertEqual('', out)
+ self.assertEqual('', err)
+
+ def test_pack_clean_obsolete_packs(self):
+ """Ensure --clean-obsolete-packs removes obsolete pack files
+ """
+ wt = self.make_branch_and_tree('.')
+ t = wt.branch.repository.bzrdir.transport
+
+ # do multiple commits to ensure that obsolete packs are created
+ # by 'bzr pack'
+ self._make_versioned_file('file0.txt')
+ for i in range(5):
+ self._update_file('file0.txt', 'HELLO %d\n' % i)
+
+ out, err = self.run_bzr(['pack', '--clean-obsolete-packs'])
+
+ pack_names = t.list_dir('repository/obsolete_packs')
+ self.assertTrue(len(pack_names) == 0)
+
+
+class TestSmartServerPack(tests.TestCaseWithTransport):
+
+ def test_simple_pack(self):
+ self.setup_smart_server_with_call_log()
+ t = self.make_branch_and_tree('branch')
+ self.build_tree_contents([('branch/foo', 'thecontents')])
+ t.add("foo")
+ t.commit("message")
+ self.reset_smart_call_log()
+ out, err = self.run_bzr(['pack', self.get_url('branch')])
+ # This figure represent the amount of HPSS calls to perform this use
+ # case. It is entirely ok to reduce this number if a test fails due to
+ # rpc_count # being too low. If rpc_count increases, more network
+ # roundtrips have become necessary for this use case. Please do not
+ # adjust this number upwards without agreement from bzr's network
+ # support maintainers.
+ self.assertLength(6, self.hpss_calls)
+ self.assertLength(1, self.hpss_connections)
+ self.assertThat(self.hpss_calls, ContainsNoVfsCalls)
diff --git a/bzrlib/tests/blackbox/test_pull.py b/bzrlib/tests/blackbox/test_pull.py
new file mode 100644
index 0000000..74d3226
--- /dev/null
+++ b/bzrlib/tests/blackbox/test_pull.py
@@ -0,0 +1,589 @@
+# Copyright (C) 2005-2012 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+
+"""Black-box tests for bzr pull."""
+
+import os
+import sys
+
+from bzrlib import (
+ branch,
+ debug,
+ osutils,
+ remote,
+ tests,
+ uncommit,
+ urlutils,
+ workingtree,
+ )
+
+from bzrlib.directory_service import directories
+from bzrlib.tests import (
+ fixtures,
+ script,
+ )
+
+
+class TestPull(tests.TestCaseWithTransport):
+
+ def example_branch(self, path='.'):
+ tree = self.make_branch_and_tree(path)
+ self.build_tree_contents([
+ (osutils.pathjoin(path, 'hello'), 'foo'),
+ (osutils.pathjoin(path, 'goodbye'), 'baz')])
+ tree.add('hello')
+ tree.commit(message='setup')
+ tree.add('goodbye')
+ tree.commit(message='setup')
+ return tree
+
+ def test_pull(self):
+ """Pull changes from one branch to another."""
+ a_tree = self.example_branch('a')
+ base_rev = a_tree.branch.last_revision()
+ self.run_bzr('pull', retcode=3, working_dir='a')
+ self.run_bzr('missing', retcode=3, working_dir='a')
+ self.run_bzr('missing .', working_dir='a')
+ self.run_bzr('missing', working_dir='a')
+ # this will work on windows because we check for the same branch
+ # in pull - if it fails, it is a regression
+ self.run_bzr('pull', working_dir='a')
+ self.run_bzr('pull /', retcode=3, working_dir='a')
+ if sys.platform not in ('win32', 'cygwin'):
+ self.run_bzr('pull', working_dir='a')
+
+ b_tree = a_tree.bzrdir.sprout('b').open_workingtree()
+ self.run_bzr('pull', working_dir='b')
+ os.mkdir('b/subdir')
+ b_tree.add('subdir')
+ new_rev = b_tree.commit(message='blah', allow_pointless=True)
+
+ a = branch.Branch.open('a')
+ b = branch.Branch.open('b')
+ self.assertEqual(a.last_revision(), base_rev)
+ self.assertEqual(b.last_revision(), new_rev)
+
+ self.run_bzr('pull ../b', working_dir='a')
+ self.assertEqual(a.last_revision(), b.last_revision())
+ a_tree.commit(message='blah2', allow_pointless=True)
+ b_tree.commit(message='blah3', allow_pointless=True)
+ # no overwrite
+ self.run_bzr('pull ../a', retcode=3, working_dir='b')
+ b_tree.bzrdir.sprout('overwriteme')
+ self.run_bzr('pull --overwrite ../a', working_dir='overwriteme')
+ overwritten = branch.Branch.open('overwriteme')
+ self.assertEqual(overwritten.last_revision(),
+ a.last_revision())
+ a_tree.merge_from_branch(b_tree.branch)
+ a_tree.commit(message="blah4", allow_pointless=True)
+
+ self.run_bzr('pull ../../a', working_dir='b/subdir')
+ self.assertEqual(a.last_revision(), b.last_revision())
+ sub_tree = workingtree.WorkingTree.open_containing('b/subdir')[0]
+ sub_tree.commit(message="blah5", allow_pointless=True)
+ sub_tree.commit(message="blah6", allow_pointless=True)
+ self.run_bzr('pull ../a', working_dir='b')
+ a_tree.commit(message="blah7", allow_pointless=True)
+ a_tree.merge_from_branch(b_tree.branch)
+ a_tree.commit(message="blah8", allow_pointless=True)
+ self.run_bzr('pull ../b', working_dir='a')
+ self.run_bzr('pull ../b', working_dir='a')
+
+ def test_pull_dash_d(self):
+ self.example_branch('a')
+ self.make_branch_and_tree('b')
+ self.make_branch_and_tree('c')
+ # pull into that branch
+ self.run_bzr('pull -d b a')
+ # pull into a branch specified by a url
+ c_url = urlutils.local_path_to_url('c')
+ self.assertStartsWith(c_url, 'file://')
+ self.run_bzr(['pull', '-d', c_url, 'a'])
+
+ def test_pull_revision(self):
+ """Pull some changes from one branch to another."""
+ a_tree = self.example_branch('a')
+ self.build_tree_contents([
+ ('a/hello2', 'foo'),
+ ('a/goodbye2', 'baz')])
+ a_tree.add('hello2')
+ a_tree.commit(message="setup")
+ a_tree.add('goodbye2')
+ a_tree.commit(message="setup")
+
+ b_tree = a_tree.bzrdir.sprout('b',
+ revision_id=a_tree.branch.get_rev_id(1)).open_workingtree()
+ self.run_bzr('pull -r 2', working_dir='b')
+ a = branch.Branch.open('a')
+ b = branch.Branch.open('b')
+ self.assertEqual(a.revno(),4)
+ self.assertEqual(b.revno(),2)
+ self.run_bzr('pull -r 3', working_dir='b')
+ self.assertEqual(b.revno(),3)
+ self.run_bzr('pull -r 4', working_dir='b')
+ self.assertEqual(a.last_revision(), b.last_revision())
+
+ def test_pull_tags(self):
+ """Tags are updated by pull, and revisions named in those tags are
+ fetched.
+ """
+ # Make a source, sprout a target off it
+ builder = self.make_branch_builder('source')
+ source = fixtures.build_branch_with_non_ancestral_rev(builder)
+ source.get_config_stack().set('branch.fetch_tags', True)
+ target_bzrdir = source.bzrdir.sprout('target')
+ source.tags.set_tag('tag-a', 'rev-2')
+ # Pull from source
+ self.run_bzr('pull -d target source')
+ target = target_bzrdir.open_branch()
+ # The tag is present, and so is its revision.
+ self.assertEqual('rev-2', target.tags.lookup_tag('tag-a'))
+ target.repository.get_revision('rev-2')
+
+ def test_overwrite_uptodate(self):
+ # Make sure pull --overwrite overwrites
+ # even if the target branch has merged
+ # everything already.
+ a_tree = self.make_branch_and_tree('a')
+ self.build_tree_contents([('a/foo', 'original\n')])
+ a_tree.add('foo')
+ a_tree.commit(message='initial commit')
+
+ b_tree = a_tree.bzrdir.sprout('b').open_workingtree()
+
+ self.build_tree_contents([('a/foo', 'changed\n')])
+ a_tree.commit(message='later change')
+
+ self.build_tree_contents([('a/foo', 'a third change')])
+ a_tree.commit(message='a third change')
+
+ self.assertEqual(a_tree.branch.last_revision_info()[0], 3)
+
+ b_tree.merge_from_branch(a_tree.branch)
+ b_tree.commit(message='merge')
+
+ self.assertEqual(b_tree.branch.last_revision_info()[0], 2)
+
+ self.run_bzr('pull --overwrite ../a', working_dir='b')
+ (last_revinfo_b) = b_tree.branch.last_revision_info()
+ self.assertEqual(last_revinfo_b[0], 3)
+ self.assertEqual(last_revinfo_b[1], a_tree.branch.last_revision())
+
+ def test_overwrite_children(self):
+ # Make sure pull --overwrite sets the revision-history
+ # to be identical to the pull source, even if we have convergence
+ a_tree = self.make_branch_and_tree('a')
+ self.build_tree_contents([('a/foo', 'original\n')])
+ a_tree.add('foo')
+ a_tree.commit(message='initial commit')
+
+ b_tree = a_tree.bzrdir.sprout('b').open_workingtree()
+
+ self.build_tree_contents([('a/foo', 'changed\n')])
+ a_tree.commit(message='later change')
+
+ self.build_tree_contents([('a/foo', 'a third change')])
+ a_tree.commit(message='a third change')
+
+ self.assertEqual(a_tree.branch.last_revision_info()[0], 3)
+
+ b_tree.merge_from_branch(a_tree.branch)
+ b_tree.commit(message='merge')
+
+ self.assertEqual(b_tree.branch.last_revision_info()[0], 2)
+
+ self.build_tree_contents([('a/foo', 'a fourth change\n')])
+ a_tree.commit(message='a fourth change')
+
+ rev_info_a = a_tree.branch.last_revision_info()
+ self.assertEqual(rev_info_a[0], 4)
+
+ # With convergence, we could just pull over the
+ # new change, but with --overwrite, we want to switch our history
+ self.run_bzr('pull --overwrite ../a', working_dir='b')
+ rev_info_b = b_tree.branch.last_revision_info()
+ self.assertEqual(rev_info_b[0], 4)
+ self.assertEqual(rev_info_b, rev_info_a)
+
+ def test_pull_remember(self):
+ """Pull changes from one branch to another and test parent location."""
+ t = self.get_transport()
+ tree_a = self.make_branch_and_tree('branch_a')
+ branch_a = tree_a.branch
+ self.build_tree(['branch_a/a'])
+ tree_a.add('a')
+ tree_a.commit('commit a')
+ tree_b = branch_a.bzrdir.sprout('branch_b').open_workingtree()
+ branch_b = tree_b.branch
+ tree_c = branch_a.bzrdir.sprout('branch_c').open_workingtree()
+ branch_c = tree_c.branch
+ self.build_tree(['branch_a/b'])
+ tree_a.add('b')
+ tree_a.commit('commit b')
+ # reset parent
+ parent = branch_b.get_parent()
+ branch_b = branch.Branch.open('branch_b')
+ branch_b.set_parent(None)
+ self.assertEqual(None, branch_b.get_parent())
+ # test pull for failure without parent set
+ out = self.run_bzr('pull', retcode=3, working_dir='branch_b')
+ self.assertEqual(out,
+ ('','bzr: ERROR: No pull location known or specified.\n'))
+ # test implicit --remember when no parent set, this pull conflicts
+ self.build_tree(['branch_b/d'])
+ tree_b.add('d')
+ tree_b.commit('commit d')
+ out = self.run_bzr('pull ../branch_a', retcode=3,
+ working_dir='branch_b')
+ self.assertEqual(out,
+ ('','bzr: ERROR: These branches have diverged.'
+ ' Use the missing command to see how.\n'
+ 'Use the merge command to reconcile them.\n'))
+ tree_b = tree_b.bzrdir.open_workingtree()
+ branch_b = tree_b.branch
+ self.assertEqual(parent, branch_b.get_parent())
+ # test implicit --remember after resolving previous failure
+ uncommit.uncommit(branch=branch_b, tree=tree_b)
+ t.delete('branch_b/d')
+ self.run_bzr('pull', working_dir='branch_b')
+ # Refresh the branch object as 'pull' modified it
+ branch_b = branch_b.bzrdir.open_branch()
+ self.assertEqual(branch_b.get_parent(), parent)
+ # test explicit --remember
+ self.run_bzr('pull ../branch_c --remember', working_dir='branch_b')
+ # Refresh the branch object as 'pull' modified it
+ branch_b = branch_b.bzrdir.open_branch()
+ self.assertEqual(branch_c.bzrdir.root_transport.base,
+ branch_b.get_parent())
+
+ def test_pull_bundle(self):
+ from bzrlib.testament import Testament
+ # Build up 2 trees and prepare for a pull
+ tree_a = self.make_branch_and_tree('branch_a')
+ with open('branch_a/a', 'wb') as f:
+ f.write('hello')
+ tree_a.add('a')
+ tree_a.commit('message')
+
+ tree_b = tree_a.bzrdir.sprout('branch_b').open_workingtree()
+
+ # Make a change to 'a' that 'b' can pull
+ with open('branch_a/a', 'wb') as f:
+ f.write('hey there')
+ tree_a.commit('message')
+
+ # Create the bundle for 'b' to pull
+ self.run_bzr('bundle ../branch_b -o ../bundle', working_dir='branch_a')
+
+ out, err = self.run_bzr('pull ../bundle', working_dir='branch_b')
+ self.assertEqual(out,
+ 'Now on revision 2.\n')
+ self.assertEqual(err,
+ ' M a\nAll changes applied successfully.\n')
+
+ self.assertEqualDiff(tree_a.branch.last_revision(),
+ tree_b.branch.last_revision())
+
+ testament_a = Testament.from_revision(tree_a.branch.repository,
+ tree_a.get_parent_ids()[0])
+ testament_b = Testament.from_revision(tree_b.branch.repository,
+ tree_b.get_parent_ids()[0])
+ self.assertEqualDiff(testament_a.as_text(),
+ testament_b.as_text())
+
+ # it is legal to attempt to pull an already-merged bundle
+ out, err = self.run_bzr('pull ../bundle', working_dir='branch_b')
+ self.assertEqual(err, '')
+ self.assertEqual(out, 'No revisions or tags to pull.\n')
+
+ def test_pull_verbose_no_files(self):
+ """Pull --verbose should not list modified files"""
+ tree_a = self.make_branch_and_tree('tree_a')
+ self.build_tree(['tree_a/foo'])
+ tree_a.add('foo')
+ tree_a.commit('bar')
+ tree_b = self.make_branch_and_tree('tree_b')
+ out = self.run_bzr('pull --verbose -d tree_b tree_a')[0]
+ self.assertContainsRe(out, 'bar')
+ self.assertNotContainsRe(out, 'added:')
+ self.assertNotContainsRe(out, 'foo')
+
+ def test_pull_quiet(self):
+ """Check that bzr pull --quiet does not print anything"""
+ tree_a = self.make_branch_and_tree('tree_a')
+ self.build_tree(['tree_a/foo'])
+ tree_a.add('foo')
+ revision_id = tree_a.commit('bar')
+ tree_b = tree_a.bzrdir.sprout('tree_b').open_workingtree()
+ out, err = self.run_bzr('pull --quiet -d tree_b')
+ self.assertEqual(out, '')
+ self.assertEqual(err, '')
+ self.assertEqual(tree_b.last_revision(), revision_id)
+ self.build_tree(['tree_a/moo'])
+ tree_a.add('moo')
+ revision_id = tree_a.commit('quack')
+ out, err = self.run_bzr('pull --quiet -d tree_b')
+ self.assertEqual(out, '')
+ self.assertEqual(err, '')
+ self.assertEqual(tree_b.last_revision(), revision_id)
+
+ def test_pull_from_directory_service(self):
+ source = self.make_branch_and_tree('source')
+ source.commit('commit 1')
+ target = source.bzrdir.sprout('target').open_workingtree()
+ source_last = source.commit('commit 2')
+ class FooService(object):
+ """A directory service that always returns source"""
+
+ def look_up(self, name, url):
+ return 'source'
+ directories.register('foo:', FooService, 'Testing directory service')
+ self.addCleanup(directories.remove, 'foo:')
+ self.run_bzr('pull foo:bar -d target')
+ self.assertEqual(source_last, target.last_revision())
+
+ def test_pull_verbose_defaults_to_long(self):
+ tree = self.example_branch('source')
+ target = self.make_branch_and_tree('target')
+ out = self.run_bzr('pull -v source -d target')[0]
+ self.assertContainsRe(out,
+ r'revno: 1\ncommitter: .*\nbranch nick: source')
+ self.assertNotContainsRe(out, r'\n {4}1 .*\n {6}setup\n')
+
+ def test_pull_verbose_uses_default_log(self):
+ tree = self.example_branch('source')
+ target = self.make_branch_and_tree('target')
+ target.branch.get_config_stack().set('log_format', 'short')
+ out = self.run_bzr('pull -v source -d target')[0]
+ self.assertContainsRe(out, r'\n {4}1 .*\n {6}setup\n')
+ self.assertNotContainsRe(
+ out, r'revno: 1\ncommitter: .*\nbranch nick: source')
+
+ def test_pull_smart_bound_branch(self):
+ self.setup_smart_server_with_call_log()
+ parent = self.make_branch_and_tree('parent')
+ parent.commit(message='first commit')
+ child = parent.bzrdir.sprout('child').open_workingtree()
+ child.commit(message='second commit')
+ checkout = parent.branch.create_checkout('checkout')
+ self.run_bzr(['pull', self.get_url('child')], working_dir='checkout')
+
+ def test_pull_smart_stacked_streaming_acceptance(self):
+ """'bzr pull -r 123' works on stacked, smart branches, even when the
+ revision specified by the revno is only present in the fallback
+ repository.
+
+ See <https://launchpad.net/bugs/380314>
+ """
+ self.setup_smart_server_with_call_log()
+ # Make a stacked-on branch with two commits so that the
+ # revision-history can't be determined just by looking at the parent
+ # field in the revision in the stacked repo.
+ parent = self.make_branch_and_tree('parent', format='1.9')
+ parent.commit(message='first commit')
+ parent.commit(message='second commit')
+ local = parent.bzrdir.sprout('local').open_workingtree()
+ local.commit(message='local commit')
+ local.branch.create_clone_on_transport(
+ self.get_transport('stacked'), stacked_on=self.get_url('parent'))
+ empty = self.make_branch_and_tree('empty', format='1.9')
+ self.reset_smart_call_log()
+ self.run_bzr(['pull', '-r', '1', self.get_url('stacked')],
+ working_dir='empty')
+ # This figure represent the amount of work to perform this use case. It
+ # is entirely ok to reduce this number if a test fails due to rpc_count
+ # being too low. If rpc_count increases, more network roundtrips have
+ # become necessary for this use case. Please do not adjust this number
+ # upwards without agreement from bzr's network support maintainers.
+ self.assertLength(19, self.hpss_calls)
+ self.assertLength(1, self.hpss_connections)
+ remote = branch.Branch.open('stacked')
+ self.assertEndsWith(remote.get_stacked_on_url(), '/parent')
+
+ def test_pull_cross_format_warning(self):
+ """You get a warning for probably slow cross-format pulls.
+ """
+ # this is assumed to be going through InterDifferingSerializer
+ from_tree = self.make_branch_and_tree('from', format='2a')
+ to_tree = self.make_branch_and_tree('to', format='1.14-rich-root')
+ from_tree.commit(message='first commit')
+ out, err = self.run_bzr(['pull', '-d', 'to', 'from'])
+ self.assertContainsRe(err,
+ "(?m)Doing on-the-fly conversion")
+
+ def test_pull_cross_format_warning_no_IDS(self):
+ """You get a warning for probably slow cross-format pulls.
+ """
+ # this simulates what would happen across the network, where
+ # interdifferingserializer is not active
+
+ debug.debug_flags.add('IDS_never')
+ # TestCase take care of restoring them
+
+ from_tree = self.make_branch_and_tree('from', format='2a')
+ to_tree = self.make_branch_and_tree('to', format='1.14-rich-root')
+ from_tree.commit(message='first commit')
+ out, err = self.run_bzr(['pull', '-d', 'to', 'from'])
+ self.assertContainsRe(err,
+ "(?m)Doing on-the-fly conversion")
+
+ def test_pull_cross_format_from_network(self):
+ self.setup_smart_server_with_call_log()
+ from_tree = self.make_branch_and_tree('from', format='2a')
+ to_tree = self.make_branch_and_tree('to', format='1.14-rich-root')
+ self.assertIsInstance(from_tree.branch, remote.RemoteBranch)
+ from_tree.commit(message='first commit')
+ out, err = self.run_bzr(['pull', '-d', 'to',
+ from_tree.branch.bzrdir.root_transport.base])
+ self.assertContainsRe(err,
+ "(?m)Doing on-the-fly conversion")
+
+ def test_pull_to_experimental_format_warning(self):
+ """You get a warning for pulling into experimental formats.
+ """
+ from_tree = self.make_branch_and_tree('from', format='development-subtree')
+ to_tree = self.make_branch_and_tree('to', format='development-subtree')
+ from_tree.commit(message='first commit')
+ out, err = self.run_bzr(['pull', '-d', 'to', 'from'])
+ self.assertContainsRe(err,
+ "(?m)Fetching into experimental format")
+
+ def test_pull_cross_to_experimental_format_warning(self):
+ """You get a warning for pulling into experimental formats.
+ """
+ from_tree = self.make_branch_and_tree('from', format='2a')
+ to_tree = self.make_branch_and_tree('to', format='development-subtree')
+ from_tree.commit(message='first commit')
+ out, err = self.run_bzr(['pull', '-d', 'to', 'from'])
+ self.assertContainsRe(err,
+ "(?m)Fetching into experimental format")
+
+ def test_pull_show_base(self):
+ """bzr pull supports --show-base
+
+ see https://bugs.launchpad.net/bzr/+bug/202374"""
+ # create two trees with conflicts, setup conflict, check that
+ # conflicted file looks correct
+ a_tree = self.example_branch('a')
+ b_tree = a_tree.bzrdir.sprout('b').open_workingtree()
+
+ with open(osutils.pathjoin('a', 'hello'),'wt') as f:
+ f.write('fee')
+ a_tree.commit('fee')
+
+ with open(osutils.pathjoin('b', 'hello'),'wt') as f:
+ f.write('fie')
+
+ out,err=self.run_bzr(['pull','-d','b','a','--show-base'])
+
+ # check for message here
+ self.assertEqual(
+ err,
+ ' M hello\nText conflict in hello\n1 conflicts encountered.\n')
+
+ self.assertEqualDiff('<<<<<<< TREE\n'
+ 'fie||||||| BASE-REVISION\n'
+ 'foo=======\n'
+ 'fee>>>>>>> MERGE-SOURCE\n',
+ open(osutils.pathjoin('b', 'hello')).read())
+
+ def test_pull_show_base_working_tree_only(self):
+ """--show-base only allowed if there's a working tree
+
+ see https://bugs.launchpad.net/bzr/+bug/202374"""
+ # create a branch, see that --show-base fails
+ self.make_branch('from')
+ self.make_branch('to')
+ out=self.run_bzr(['pull','-d','to','from','--show-base'],retcode=3)
+ self.assertEqual(
+ out, ('','bzr: ERROR: Need working tree for --show-base.\n'))
+
+ def test_pull_tag_conflicts(self):
+ """pulling tags with conflicts will change the exit code"""
+ # create a branch, see that --show-base fails
+ from_tree = self.make_branch_and_tree('from')
+ from_tree.branch.tags.set_tag("mytag", "somerevid")
+ to_tree = self.make_branch_and_tree('to')
+ to_tree.branch.tags.set_tag("mytag", "anotherrevid")
+ out = self.run_bzr(['pull','-d','to','from'],retcode=1)
+ self.assertEqual(out,
+ ('No revisions to pull.\nConflicting tags:\n mytag\n', ''))
+
+ def test_pull_tag_notification(self):
+ """pulling tags with conflicts will change the exit code"""
+ # create a branch, see that --show-base fails
+ from_tree = self.make_branch_and_tree('from')
+ from_tree.branch.tags.set_tag("mytag", "somerevid")
+ to_tree = self.make_branch_and_tree('to')
+ out = self.run_bzr(['pull', '-d', 'to', 'from'])
+ self.assertEqual(out,
+ ('1 tag(s) updated.\n', ''))
+
+ def test_overwrite_tags(self):
+ """--overwrite-tags only overwrites tags, not revisions."""
+ from_tree = self.make_branch_and_tree('from')
+ from_tree.branch.tags.set_tag("mytag", "somerevid")
+ to_tree = self.make_branch_and_tree('to')
+ to_tree.branch.tags.set_tag("mytag", "anotherrevid")
+ revid1 = to_tree.commit('my commit')
+ out = self.run_bzr(['pull', '-d', 'to', 'from'], retcode=1)
+ self.assertEquals(out,
+ ('No revisions to pull.\nConflicting tags:\n mytag\n', ''))
+ out = self.run_bzr(['pull', '-d', 'to', '--overwrite-tags', 'from'])
+ self.assertEquals(out, ('1 tag(s) updated.\n', ''))
+
+ self.assertEquals(to_tree.branch.tags.lookup_tag('mytag'),
+ 'somerevid')
+ self.assertEquals(to_tree.branch.last_revision(), revid1)
+
+ def test_pull_tag_overwrite(self):
+ """pulling tags with --overwrite only reports changed tags."""
+ # create a branch, see that --show-base fails
+ from_tree = self.make_branch_and_tree('from')
+ from_tree.branch.tags.set_tag("mytag", "somerevid")
+ to_tree = self.make_branch_and_tree('to')
+ to_tree.branch.tags.set_tag("mytag", "somerevid")
+ out = self.run_bzr(['pull', '--overwrite', '-d', 'to', 'from'])
+ self.assertEqual(out,
+ ('No revisions or tags to pull.\n', ''))
+
+
+class TestPullOutput(script.TestCaseWithTransportAndScript):
+
+ def test_pull_log_format(self):
+ self.run_script("""
+ $ bzr init trunk
+ Created a standalone tree (format: 2a)
+ $ cd trunk
+ $ echo foo > file
+ $ bzr add
+ adding file
+ $ bzr commit -m 'we need some foo'
+ 2>Committing to:...trunk/
+ 2>added file
+ 2>Committed revision 1.
+ $ cd ..
+ $ bzr init feature
+ Created a standalone tree (format: 2a)
+ $ cd feature
+ $ bzr pull -v ../trunk -Olog_format=line
+ Now on revision 1.
+ Added Revisions:
+ 1: jrandom@example.com ...we need some foo
+ 2>+N file
+ 2>All changes applied successfully.
+ """)
diff --git a/bzrlib/tests/blackbox/test_push.py b/bzrlib/tests/blackbox/test_push.py
new file mode 100644
index 0000000..e68e77f
--- /dev/null
+++ b/bzrlib/tests/blackbox/test_push.py
@@ -0,0 +1,919 @@
+# Copyright (C) 2006-2012 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+
+"""Black-box tests for bzr push."""
+
+import re
+
+from bzrlib import (
+ branch,
+ bzrdir,
+ controldir,
+ errors,
+ osutils,
+ tests,
+ transport,
+ uncommit,
+ urlutils,
+ workingtree
+ )
+from bzrlib.repofmt import knitrepo
+from bzrlib.tests import (
+ http_server,
+ scenarios,
+ script,
+ test_foreign,
+ )
+from bzrlib.tests.matchers import ContainsNoVfsCalls
+from bzrlib.transport import memory
+
+
+load_tests = scenarios.load_tests_apply_scenarios
+
+
+class TestPush(tests.TestCaseWithTransport):
+
+ def test_push_error_on_vfs_http(self):
+ """ pushing a branch to a HTTP server fails cleanly. """
+ # the trunk is published on a web server
+ self.transport_readonly_server = http_server.HttpServer
+ self.make_branch('source')
+ public_url = self.get_readonly_url('target')
+ self.run_bzr_error(['http does not support mkdir'],
+ ['push', public_url],
+ working_dir='source')
+
+ def test_push_suggests_parent_alias(self):
+ """Push suggests using :parent if there is a known parent branch."""
+ tree_a = self.make_branch_and_tree('a')
+ tree_a.commit('this is a commit')
+ tree_b = self.make_branch_and_tree('b')
+
+ # If there is no parent location set, :parent isn't mentioned.
+ out = self.run_bzr('push', working_dir='a', retcode=3)
+ self.assertEquals(out,
+ ('','bzr: ERROR: No push location known or specified.\n'))
+
+ # If there is a parent location set, the error suggests :parent.
+ tree_a.branch.set_parent(tree_b.branch.base)
+ out = self.run_bzr('push', working_dir='a', retcode=3)
+ self.assertEquals(out,
+ ('','bzr: ERROR: No push location known or specified. '
+ 'To push to the parent branch '
+ '(at %s), use \'bzr push :parent\'.\n' %
+ urlutils.unescape_for_display(tree_b.branch.base, 'utf-8')))
+
+ def test_push_remember(self):
+ """Push changes from one branch to another and test push location."""
+ transport = self.get_transport()
+ tree_a = self.make_branch_and_tree('branch_a')
+ branch_a = tree_a.branch
+ self.build_tree(['branch_a/a'])
+ tree_a.add('a')
+ tree_a.commit('commit a')
+ tree_b = branch_a.bzrdir.sprout('branch_b').open_workingtree()
+ branch_b = tree_b.branch
+ tree_c = branch_a.bzrdir.sprout('branch_c').open_workingtree()
+ branch_c = tree_c.branch
+ self.build_tree(['branch_a/b'])
+ tree_a.add('b')
+ tree_a.commit('commit b')
+ self.build_tree(['branch_b/c'])
+ tree_b.add('c')
+ tree_b.commit('commit c')
+ # initial push location must be empty
+ self.assertEqual(None, branch_b.get_push_location())
+
+ # test push for failure without push location set
+ out = self.run_bzr('push', working_dir='branch_a', retcode=3)
+ self.assertEquals(out,
+ ('','bzr: ERROR: No push location known or specified.\n'))
+
+ # test not remembered if cannot actually push
+ self.run_bzr('push path/which/doesnt/exist',
+ working_dir='branch_a', retcode=3)
+ out = self.run_bzr('push', working_dir='branch_a', retcode=3)
+ self.assertEquals(
+ ('', 'bzr: ERROR: No push location known or specified.\n'),
+ out)
+
+ # test implicit --remember when no push location set, push fails
+ out = self.run_bzr('push ../branch_b',
+ working_dir='branch_a', retcode=3)
+ self.assertEquals(out,
+ ('','bzr: ERROR: These branches have diverged. '
+ 'See "bzr help diverged-branches" for more information.\n'))
+ # Refresh the branch as 'push' modified it
+ branch_a = branch_a.bzrdir.open_branch()
+ self.assertEquals(osutils.abspath(branch_a.get_push_location()),
+ osutils.abspath(branch_b.bzrdir.root_transport.base))
+
+ # test implicit --remember after resolving previous failure
+ uncommit.uncommit(branch=branch_b, tree=tree_b)
+ transport.delete('branch_b/c')
+ out, err = self.run_bzr('push', working_dir='branch_a')
+ # Refresh the branch as 'push' modified it
+ branch_a = branch_a.bzrdir.open_branch()
+ path = branch_a.get_push_location()
+ self.assertEqual(err,
+ 'Using saved push location: %s\n'
+ 'All changes applied successfully.\n'
+ 'Pushed up to revision 2.\n'
+ % urlutils.local_path_from_url(path))
+ self.assertEqual(path,
+ branch_b.bzrdir.root_transport.base)
+ # test explicit --remember
+ self.run_bzr('push ../branch_c --remember', working_dir='branch_a')
+ # Refresh the branch as 'push' modified it
+ branch_a = branch_a.bzrdir.open_branch()
+ self.assertEquals(branch_a.get_push_location(),
+ branch_c.bzrdir.root_transport.base)
+
+ def test_push_without_tree(self):
+ # bzr push from a branch that does not have a checkout should work.
+ b = self.make_branch('.')
+ out, err = self.run_bzr('push pushed-location')
+ self.assertEqual('', out)
+ self.assertEqual('Created new branch.\n', err)
+ b2 = branch.Branch.open('pushed-location')
+ self.assertEndsWith(b2.base, 'pushed-location/')
+
+ def test_push_no_tree(self):
+ # bzr push --no-tree of a branch with working trees
+ b = self.make_branch_and_tree('push-from')
+ self.build_tree(['push-from/file'])
+ b.add('file')
+ b.commit('commit 1')
+ out, err = self.run_bzr('push --no-tree -d push-from push-to')
+ self.assertEqual('', out)
+ self.assertEqual('Created new branch.\n', err)
+ self.assertPathDoesNotExist('push-to/file')
+
+ def test_push_new_branch_revision_count(self):
+ # bzr push of a branch with revisions to a new location
+ # should print the number of revisions equal to the length of the
+ # local branch.
+ t = self.make_branch_and_tree('tree')
+ self.build_tree(['tree/file'])
+ t.add('file')
+ t.commit('commit 1')
+ out, err = self.run_bzr('push -d tree pushed-to')
+ self.assertEqual('', out)
+ self.assertEqual('Created new branch.\n', err)
+
+ def test_push_quiet(self):
+ # test that using -q makes output quiet
+ t = self.make_branch_and_tree('tree')
+ self.build_tree(['tree/file'])
+ t.add('file')
+ t.commit('commit 1')
+ self.run_bzr('push -d tree pushed-to')
+ # Refresh the branch as 'push' modified it and get the push location
+ push_loc = t.branch.bzrdir.open_branch().get_push_location()
+ out, err = self.run_bzr('push', working_dir="tree")
+ self.assertEqual('Using saved push location: %s\n'
+ 'No new revisions or tags to push.\n' %
+ urlutils.local_path_from_url(push_loc), err)
+ out, err = self.run_bzr('push -q', working_dir="tree")
+ self.assertEqual('', out)
+ self.assertEqual('', err)
+
+ def test_push_only_pushes_history(self):
+ # Knit branches should only push the history for the current revision.
+ format = bzrdir.BzrDirMetaFormat1()
+ format.repository_format = knitrepo.RepositoryFormatKnit1()
+ shared_repo = self.make_repository('repo', format=format, shared=True)
+ shared_repo.set_make_working_trees(True)
+
+ def make_shared_tree(path):
+ shared_repo.bzrdir.root_transport.mkdir(path)
+ controldir.ControlDir.create_branch_convenience('repo/' + path)
+ return workingtree.WorkingTree.open('repo/' + path)
+ tree_a = make_shared_tree('a')
+ self.build_tree(['repo/a/file'])
+ tree_a.add('file')
+ tree_a.commit('commit a-1', rev_id='a-1')
+ f = open('repo/a/file', 'ab')
+ f.write('more stuff\n')
+ f.close()
+ tree_a.commit('commit a-2', rev_id='a-2')
+
+ tree_b = make_shared_tree('b')
+ self.build_tree(['repo/b/file'])
+ tree_b.add('file')
+ tree_b.commit('commit b-1', rev_id='b-1')
+
+ self.assertTrue(shared_repo.has_revision('a-1'))
+ self.assertTrue(shared_repo.has_revision('a-2'))
+ self.assertTrue(shared_repo.has_revision('b-1'))
+
+ # Now that we have a repository with shared files, make sure
+ # that things aren't copied out by a 'push'
+ self.run_bzr('push ../../push-b', working_dir='repo/b')
+ pushed_tree = workingtree.WorkingTree.open('push-b')
+ pushed_repo = pushed_tree.branch.repository
+ self.assertFalse(pushed_repo.has_revision('a-1'))
+ self.assertFalse(pushed_repo.has_revision('a-2'))
+ self.assertTrue(pushed_repo.has_revision('b-1'))
+
+ def test_push_funky_id(self):
+ t = self.make_branch_and_tree('tree')
+ self.build_tree(['tree/filename'])
+ t.add('filename', 'funky-chars<>%&;"\'')
+ t.commit('commit filename')
+ self.run_bzr('push -d tree new-tree')
+
+ def test_push_dash_d(self):
+ t = self.make_branch_and_tree('from')
+ t.commit(allow_pointless=True,
+ message='first commit')
+ self.run_bzr('push -d from to-one')
+ self.assertPathExists('to-one')
+ self.run_bzr('push -d %s %s'
+ % tuple(map(urlutils.local_path_to_url, ['from', 'to-two'])))
+ self.assertPathExists('to-two')
+
+ def test_push_repository_no_branch_doesnt_fetch_all_revs(self):
+ # See https://bugs.launchpad.net/bzr/+bug/465517
+ target_repo = self.make_repository('target')
+ source = self.make_branch_builder('source')
+ source.start_series()
+ source.build_snapshot('A', None, [
+ ('add', ('', 'root-id', 'directory', None))])
+ source.build_snapshot('B', ['A'], [])
+ source.build_snapshot('C', ['A'], [])
+ source.finish_series()
+ self.run_bzr('push target -d source')
+ self.addCleanup(target_repo.lock_read().unlock)
+ # We should have pushed 'C', but not 'B', since it isn't in the
+ # ancestry
+ self.assertEqual([('A',), ('C',)], sorted(target_repo.revisions.keys()))
+
+ def test_push_smart_non_stacked_streaming_acceptance(self):
+ self.setup_smart_server_with_call_log()
+ t = self.make_branch_and_tree('from')
+ t.commit(allow_pointless=True, message='first commit')
+ self.reset_smart_call_log()
+ self.run_bzr(['push', self.get_url('to-one')], working_dir='from')
+ # This figure represent the amount of work to perform this use case. It
+ # is entirely ok to reduce this number if a test fails due to rpc_count
+ # being too low. If rpc_count increases, more network roundtrips have
+ # become necessary for this use case. Please do not adjust this number
+ # upwards without agreement from bzr's network support maintainers.
+ self.assertLength(9, self.hpss_calls)
+ self.assertLength(1, self.hpss_connections)
+ self.assertThat(self.hpss_calls, ContainsNoVfsCalls)
+
+ def test_push_smart_stacked_streaming_acceptance(self):
+ self.setup_smart_server_with_call_log()
+ parent = self.make_branch_and_tree('parent', format='1.9')
+ parent.commit(message='first commit')
+ local = parent.bzrdir.sprout('local').open_workingtree()
+ local.commit(message='local commit')
+ self.reset_smart_call_log()
+ self.run_bzr(['push', '--stacked', '--stacked-on', '../parent',
+ self.get_url('public')], working_dir='local')
+ # This figure represent the amount of work to perform this use case. It
+ # is entirely ok to reduce this number if a test fails due to rpc_count
+ # being too low. If rpc_count increases, more network roundtrips have
+ # become necessary for this use case. Please do not adjust this number
+ # upwards without agreement from bzr's network support maintainers.
+ self.assertLength(15, self.hpss_calls)
+ self.assertLength(1, self.hpss_connections)
+ self.assertThat(self.hpss_calls, ContainsNoVfsCalls)
+ remote = branch.Branch.open('public')
+ self.assertEndsWith(remote.get_stacked_on_url(), '/parent')
+
+ def test_push_smart_tags_streaming_acceptance(self):
+ self.setup_smart_server_with_call_log()
+ t = self.make_branch_and_tree('from')
+ rev_id = t.commit(allow_pointless=True, message='first commit')
+ t.branch.tags.set_tag('new-tag', rev_id)
+ self.reset_smart_call_log()
+ self.run_bzr(['push', self.get_url('to-one')], working_dir='from')
+ # This figure represent the amount of work to perform this use case. It
+ # is entirely ok to reduce this number if a test fails due to rpc_count
+ # being too low. If rpc_count increases, more network roundtrips have
+ # become necessary for this use case. Please do not adjust this number
+ # upwards without agreement from bzr's network support maintainers.
+ self.assertLength(11, self.hpss_calls)
+ self.assertLength(1, self.hpss_connections)
+ self.assertThat(self.hpss_calls, ContainsNoVfsCalls)
+
+ def test_push_smart_incremental_acceptance(self):
+ self.setup_smart_server_with_call_log()
+ t = self.make_branch_and_tree('from')
+ rev_id1 = t.commit(allow_pointless=True, message='first commit')
+ rev_id2 = t.commit(allow_pointless=True, message='second commit')
+ self.run_bzr(
+ ['push', self.get_url('to-one'), '-r1'], working_dir='from')
+ self.reset_smart_call_log()
+ self.run_bzr(['push', self.get_url('to-one')], working_dir='from')
+ # This figure represent the amount of work to perform this use case. It
+ # is entirely ok to reduce this number if a test fails due to rpc_count
+ # being too low. If rpc_count increases, more network roundtrips have
+ # become necessary for this use case. Please do not adjust this number
+ # upwards without agreement from bzr's network support maintainers.
+ self.assertLength(11, self.hpss_calls)
+ self.assertLength(1, self.hpss_connections)
+ self.assertThat(self.hpss_calls, ContainsNoVfsCalls)
+
+ def test_push_smart_with_default_stacking_url_path_segment(self):
+ # If the default stacked-on location is a path element then branches
+ # we push there over the smart server are stacked and their
+ # stacked_on_url is that exact path segment. Added to nail bug 385132.
+ self.setup_smart_server_with_call_log()
+ self.make_branch('stack-on', format='1.9')
+ self.make_bzrdir('.').get_config().set_default_stack_on(
+ '/stack-on')
+ self.make_branch('from', format='1.9')
+ out, err = self.run_bzr(['push', '-d', 'from', self.get_url('to')])
+ b = branch.Branch.open(self.get_url('to'))
+ self.assertEqual('/extra/stack-on', b.get_stacked_on_url())
+
+ def test_push_smart_with_default_stacking_relative_path(self):
+ # If the default stacked-on location is a relative path then branches
+ # we push there over the smart server are stacked and their
+ # stacked_on_url is a relative path. Added to nail bug 385132.
+ self.setup_smart_server_with_call_log()
+ self.make_branch('stack-on', format='1.9')
+ self.make_bzrdir('.').get_config().set_default_stack_on('stack-on')
+ self.make_branch('from', format='1.9')
+ out, err = self.run_bzr(['push', '-d', 'from', self.get_url('to')])
+ b = branch.Branch.open(self.get_url('to'))
+ self.assertEqual('../stack-on', b.get_stacked_on_url())
+
+ def create_simple_tree(self):
+ tree = self.make_branch_and_tree('tree')
+ self.build_tree(['tree/a'])
+ tree.add(['a'], ['a-id'])
+ tree.commit('one', rev_id='r1')
+ return tree
+
+ def test_push_create_prefix(self):
+ """'bzr push --create-prefix' will create leading directories."""
+ tree = self.create_simple_tree()
+
+ self.run_bzr_error(['Parent directory of ../new/tree does not exist'],
+ 'push ../new/tree',
+ working_dir='tree')
+ self.run_bzr('push ../new/tree --create-prefix',
+ working_dir='tree')
+ new_tree = workingtree.WorkingTree.open('new/tree')
+ self.assertEqual(tree.last_revision(), new_tree.last_revision())
+ self.assertPathExists('new/tree/a')
+
+ def test_push_use_existing(self):
+ """'bzr push --use-existing-dir' can push into an existing dir.
+
+ By default, 'bzr push' will not use an existing, non-versioned dir.
+ """
+ tree = self.create_simple_tree()
+ self.build_tree(['target/'])
+
+ self.run_bzr_error(['Target directory ../target already exists',
+ 'Supply --use-existing-dir',
+ ],
+ 'push ../target', working_dir='tree')
+
+ self.run_bzr('push --use-existing-dir ../target',
+ working_dir='tree')
+
+ new_tree = workingtree.WorkingTree.open('target')
+ self.assertEqual(tree.last_revision(), new_tree.last_revision())
+ # The push should have created target/a
+ self.assertPathExists('target/a')
+
+ def test_push_use_existing_into_empty_bzrdir(self):
+ """'bzr push --use-existing-dir' into a dir with an empty .bzr dir
+ fails.
+ """
+ tree = self.create_simple_tree()
+ self.build_tree(['target/', 'target/.bzr/'])
+ self.run_bzr_error(
+ ['Target directory ../target already contains a .bzr directory, '
+ 'but it is not valid.'],
+ 'push ../target --use-existing-dir', working_dir='tree')
+
+ def test_push_onto_repo(self):
+ """We should be able to 'bzr push' into an existing bzrdir."""
+ tree = self.create_simple_tree()
+ repo = self.make_repository('repo', shared=True)
+
+ self.run_bzr('push ../repo',
+ working_dir='tree')
+
+ # Pushing onto an existing bzrdir will create a repository and
+ # branch as needed, but will only create a working tree if there was
+ # no BzrDir before.
+ self.assertRaises(errors.NoWorkingTree,
+ workingtree.WorkingTree.open, 'repo')
+ new_branch = branch.Branch.open('repo')
+ self.assertEqual(tree.last_revision(), new_branch.last_revision())
+
+ def test_push_onto_just_bzrdir(self):
+ """We don't handle when the target is just a bzrdir.
+
+ Because you shouldn't be able to create *just* a bzrdir in the wild.
+ """
+ # TODO: jam 20070109 Maybe it would be better to create the repository
+ # if at this point
+ tree = self.create_simple_tree()
+ a_bzrdir = self.make_bzrdir('dir')
+
+ self.run_bzr_error(['At ../dir you have a valid .bzr control'],
+ 'push ../dir',
+ working_dir='tree')
+
+ def test_push_with_revisionspec(self):
+ """We should be able to push a revision older than the tip."""
+ tree_from = self.make_branch_and_tree('from')
+ tree_from.commit("One.", rev_id="from-1")
+ tree_from.commit("Two.", rev_id="from-2")
+
+ self.run_bzr('push -r1 ../to', working_dir='from')
+
+ tree_to = workingtree.WorkingTree.open('to')
+ repo_to = tree_to.branch.repository
+ self.assertTrue(repo_to.has_revision('from-1'))
+ self.assertFalse(repo_to.has_revision('from-2'))
+ self.assertEqual(tree_to.branch.last_revision_info()[1], 'from-1')
+
+ self.run_bzr_error(
+ ['bzr: ERROR: bzr push --revision '
+ 'takes exactly one revision identifier\n'],
+ 'push -r0..2 ../to', working_dir='from')
+
+ def create_trunk_and_feature_branch(self):
+ # We have a mainline
+ trunk_tree = self.make_branch_and_tree('target',
+ format='1.9')
+ trunk_tree.commit('mainline')
+ # and a branch from it
+ branch_tree = self.make_branch_and_tree('branch',
+ format='1.9')
+ branch_tree.pull(trunk_tree.branch)
+ branch_tree.branch.set_parent(trunk_tree.branch.base)
+ # with some work on it
+ branch_tree.commit('moar work plz')
+ return trunk_tree, branch_tree
+
+ def assertPublished(self, branch_revid, stacked_on):
+ """Assert that the branch 'published' has been published correctly."""
+ published_branch = branch.Branch.open('published')
+ # The published branch refers to the mainline
+ self.assertEqual(stacked_on, published_branch.get_stacked_on_url())
+ # and the branch's work was pushed
+ self.assertTrue(published_branch.repository.has_revision(branch_revid))
+
+ def test_push_new_branch_stacked_on(self):
+ """Pushing a new branch with --stacked-on creates a stacked branch."""
+ trunk_tree, branch_tree = self.create_trunk_and_feature_branch()
+ # we publish branch_tree with a reference to the mainline.
+ out, err = self.run_bzr(['push', '--stacked-on', trunk_tree.branch.base,
+ self.get_url('published')], working_dir='branch')
+ self.assertEqual('', out)
+ self.assertEqual('Created new stacked branch referring to %s.\n' %
+ trunk_tree.branch.base, err)
+ self.assertPublished(branch_tree.last_revision(),
+ trunk_tree.branch.base)
+
+ def test_push_new_branch_stacked_uses_parent_when_no_public_url(self):
+ """When the parent has no public url the parent is used as-is."""
+ trunk_tree, branch_tree = self.create_trunk_and_feature_branch()
+ # now we do a stacked push, which should determine the public location
+ # for us.
+ out, err = self.run_bzr(['push', '--stacked',
+ self.get_url('published')], working_dir='branch')
+ self.assertEqual('', out)
+ self.assertEqual('Created new stacked branch referring to %s.\n' %
+ trunk_tree.branch.base, err)
+ self.assertPublished(branch_tree.last_revision(),
+ trunk_tree.branch.base)
+
+ def test_push_new_branch_stacked_uses_parent_public(self):
+ """Pushing a new branch with --stacked creates a stacked branch."""
+ trunk_tree, branch_tree = self.create_trunk_and_feature_branch()
+ # the trunk is published on a web server
+ self.transport_readonly_server = http_server.HttpServer
+ trunk_public = self.make_branch('public_trunk', format='1.9')
+ trunk_public.pull(trunk_tree.branch)
+ trunk_public_url = self.get_readonly_url('public_trunk')
+ br = trunk_tree.branch
+ br.set_public_branch(trunk_public_url)
+ # now we do a stacked push, which should determine the public location
+ # for us.
+ out, err = self.run_bzr(['push', '--stacked',
+ self.get_url('published')], working_dir='branch')
+ self.assertEqual('', out)
+ self.assertEqual('Created new stacked branch referring to %s.\n' %
+ trunk_public_url, err)
+ self.assertPublished(branch_tree.last_revision(), trunk_public_url)
+
+ def test_push_new_branch_stacked_no_parent(self):
+ """Pushing with --stacked and no parent branch errors."""
+ branch = self.make_branch_and_tree('branch', format='1.9')
+ # now we do a stacked push, which should fail as the place to refer too
+ # cannot be determined.
+ out, err = self.run_bzr_error(
+ ['Could not determine branch to refer to\\.'], ['push', '--stacked',
+ self.get_url('published')], working_dir='branch')
+ self.assertEqual('', out)
+ self.assertFalse(self.get_transport('published').has('.'))
+
+ def test_push_notifies_default_stacking(self):
+ self.make_branch('stack_on', format='1.6')
+ self.make_bzrdir('.').get_config().set_default_stack_on('stack_on')
+ self.make_branch('from', format='1.6')
+ out, err = self.run_bzr('push -d from to')
+ self.assertContainsRe(err,
+ 'Using default stacking branch stack_on at .*')
+
+ def test_push_stacks_with_default_stacking_if_target_is_stackable(self):
+ self.make_branch('stack_on', format='1.6')
+ self.make_bzrdir('.').get_config().set_default_stack_on('stack_on')
+ self.make_branch('from', format='pack-0.92')
+ out, err = self.run_bzr('push -d from to')
+ b = branch.Branch.open('to')
+ self.assertEqual('../stack_on', b.get_stacked_on_url())
+
+ def test_push_does_not_change_format_with_default_if_target_cannot(self):
+ self.make_branch('stack_on', format='pack-0.92')
+ self.make_bzrdir('.').get_config().set_default_stack_on('stack_on')
+ self.make_branch('from', format='pack-0.92')
+ out, err = self.run_bzr('push -d from to')
+ b = branch.Branch.open('to')
+ self.assertRaises(errors.UnstackableBranchFormat, b.get_stacked_on_url)
+
+ def test_push_doesnt_create_broken_branch(self):
+ """Pushing a new standalone branch works even when there's a default
+ stacking policy at the destination.
+
+ The new branch will preserve the repo format (even if it isn't the
+ default for the branch), and will be stacked when the repo format
+ allows (which means that the branch format isn't necessarly preserved).
+ """
+ self.make_repository('repo', shared=True, format='1.6')
+ builder = self.make_branch_builder('repo/local', format='pack-0.92')
+ builder.start_series()
+ builder.build_snapshot('rev-1', None, [
+ ('add', ('', 'root-id', 'directory', '')),
+ ('add', ('filename', 'f-id', 'file', 'content\n'))])
+ builder.build_snapshot('rev-2', ['rev-1'], [])
+ builder.build_snapshot('rev-3', ['rev-2'],
+ [('modify', ('f-id', 'new-content\n'))])
+ builder.finish_series()
+ branch = builder.get_branch()
+ # Push rev-1 to "trunk", so that we can stack on it.
+ self.run_bzr('push -d repo/local trunk -r 1')
+ # Set a default stacking policy so that new branches will automatically
+ # stack on trunk.
+ self.make_bzrdir('.').get_config().set_default_stack_on('trunk')
+ # Push rev-2 to a new branch "remote". It will be stacked on "trunk".
+ out, err = self.run_bzr('push -d repo/local remote -r 2')
+ self.assertContainsRe(
+ err, 'Using default stacking branch trunk at .*')
+ # Push rev-3 onto "remote". If "remote" not stacked and is missing the
+ # fulltext record for f-id @ rev-1, then this will fail.
+ out, err = self.run_bzr('push -d repo/local remote -r 3')
+
+ def test_push_verbose_shows_log(self):
+ tree = self.make_branch_and_tree('source')
+ tree.commit('rev1')
+ out, err = self.run_bzr('push -v -d source target')
+ # initial push contains log
+ self.assertContainsRe(out, 'rev1')
+ tree.commit('rev2')
+ out, err = self.run_bzr('push -v -d source target')
+ # subsequent push contains log
+ self.assertContainsRe(out, 'rev2')
+ # subsequent log is accurate
+ self.assertNotContainsRe(out, 'rev1')
+
+ def test_push_from_subdir(self):
+ t = self.make_branch_and_tree('tree')
+ self.build_tree(['tree/dir/', 'tree/dir/file'])
+ t.add('dir', 'dir/file')
+ t.commit('r1')
+ out, err = self.run_bzr('push ../../pushloc', working_dir='tree/dir')
+ self.assertEqual('', out)
+ self.assertEqual('Created new branch.\n', err)
+
+ def test_overwrite_tags(self):
+ """--overwrite-tags only overwrites tags, not revisions."""
+ from_tree = self.make_branch_and_tree('from')
+ from_tree.branch.tags.set_tag("mytag", "somerevid")
+ to_tree = self.make_branch_and_tree('to')
+ to_tree.branch.tags.set_tag("mytag", "anotherrevid")
+ revid1 = to_tree.commit('my commit')
+ out = self.run_bzr(['push', '-d', 'from', 'to'])
+ self.assertEquals(out,
+ ('Conflicting tags:\n mytag\n', 'No new revisions to push.\n'))
+ out = self.run_bzr(['push', '-d', 'from', '--overwrite-tags', 'to'])
+ self.assertEquals(out, ('', '1 tag updated.\n'))
+ self.assertEquals(to_tree.branch.tags.lookup_tag('mytag'),
+ 'somerevid')
+ self.assertEquals(to_tree.branch.last_revision(), revid1)
+
+
+class RedirectingMemoryTransport(memory.MemoryTransport):
+
+ def mkdir(self, relpath, mode=None):
+ if self._cwd == '/source/':
+ raise errors.RedirectRequested(self.abspath(relpath),
+ self.abspath('../target'),
+ is_permanent=True)
+ elif self._cwd == '/infinite-loop/':
+ raise errors.RedirectRequested(self.abspath(relpath),
+ self.abspath('../infinite-loop'),
+ is_permanent=True)
+ else:
+ return super(RedirectingMemoryTransport, self).mkdir(
+ relpath, mode)
+
+ def get(self, relpath):
+ if self.clone(relpath)._cwd == '/infinite-loop/':
+ raise errors.RedirectRequested(self.abspath(relpath),
+ self.abspath('../infinite-loop'),
+ is_permanent=True)
+ else:
+ return super(RedirectingMemoryTransport, self).get(relpath)
+
+ def _redirected_to(self, source, target):
+ # We do accept redirections
+ return transport.get_transport(target)
+
+
+class RedirectingMemoryServer(memory.MemoryServer):
+
+ def start_server(self):
+ self._dirs = {'/': None}
+ self._files = {}
+ self._locks = {}
+ self._scheme = 'redirecting-memory+%s:///' % id(self)
+ transport.register_transport(self._scheme, self._memory_factory)
+
+ def _memory_factory(self, url):
+ result = RedirectingMemoryTransport(url)
+ result._dirs = self._dirs
+ result._files = self._files
+ result._locks = self._locks
+ return result
+
+ def stop_server(self):
+ transport.unregister_transport(self._scheme, self._memory_factory)
+
+
+class TestPushRedirect(tests.TestCaseWithTransport):
+
+ def setUp(self):
+ tests.TestCaseWithTransport.setUp(self)
+ self.memory_server = RedirectingMemoryServer()
+ self.start_server(self.memory_server)
+ # Make the branch and tree that we'll be pushing.
+ t = self.make_branch_and_tree('tree')
+ self.build_tree(['tree/file'])
+ t.add('file')
+ t.commit('commit 1')
+
+ def test_push_redirects_on_mkdir(self):
+ """If the push requires a mkdir, push respects redirect requests.
+
+ This is added primarily to handle lp:/ URI support, so that users can
+ push to new branches by specifying lp:/ URIs.
+ """
+ destination_url = self.memory_server.get_url() + 'source'
+ self.run_bzr(['push', '-d', 'tree', destination_url])
+
+ local_revision = branch.Branch.open('tree').last_revision()
+ remote_revision = branch.Branch.open(
+ self.memory_server.get_url() + 'target').last_revision()
+ self.assertEqual(remote_revision, local_revision)
+
+ def test_push_gracefully_handles_too_many_redirects(self):
+ """Push fails gracefully if the mkdir generates a large number of
+ redirects.
+ """
+ destination_url = self.memory_server.get_url() + 'infinite-loop'
+ out, err = self.run_bzr_error(
+ ['Too many redirections trying to make %s\\.\n'
+ % re.escape(destination_url)],
+ ['push', '-d', 'tree', destination_url], retcode=3)
+ self.assertEqual('', out)
+
+
+class TestPushStrictMixin(object):
+
+ def make_local_branch_and_tree(self):
+ self.tree = self.make_branch_and_tree('local')
+ self.build_tree_contents([('local/file', 'initial')])
+ self.tree.add('file')
+ self.tree.commit('adding file', rev_id='added')
+ self.build_tree_contents([('local/file', 'modified')])
+ self.tree.commit('modify file', rev_id='modified')
+
+ def set_config_push_strict(self, value):
+ br = branch.Branch.open('local')
+ br.get_config_stack().set('push_strict', value)
+
+ _default_command = ['push', '../to']
+ _default_wd = 'local'
+ _default_errors = ['Working tree ".*/local/" has uncommitted '
+ 'changes \(See bzr status\)\.',]
+ _default_additional_error = 'Use --no-strict to force the push.\n'
+ _default_additional_warning = 'Uncommitted changes will not be pushed.'
+
+
+ def assertPushFails(self, args):
+ out, err = self.run_bzr_error(self._default_errors,
+ self._default_command + args,
+ working_dir=self._default_wd, retcode=3)
+ self.assertContainsRe(err, self._default_additional_error)
+
+ def assertPushSucceeds(self, args, with_warning=False, revid_to_push=None):
+ if with_warning:
+ error_regexes = self._default_errors
+ else:
+ error_regexes = []
+ out, err = self.run_bzr(self._default_command + args,
+ working_dir=self._default_wd,
+ error_regexes=error_regexes)
+ if with_warning:
+ self.assertContainsRe(err, self._default_additional_warning)
+ else:
+ self.assertNotContainsRe(err, self._default_additional_warning)
+ branch_from = branch.Branch.open(self._default_wd)
+ if revid_to_push is None:
+ revid_to_push = branch_from.last_revision()
+ branch_to = branch.Branch.open('to')
+ repo_to = branch_to.repository
+ self.assertTrue(repo_to.has_revision(revid_to_push))
+ self.assertEqual(revid_to_push, branch_to.last_revision())
+
+
+
+class TestPushStrictWithoutChanges(tests.TestCaseWithTransport,
+ TestPushStrictMixin):
+
+ def setUp(self):
+ super(TestPushStrictWithoutChanges, self).setUp()
+ self.make_local_branch_and_tree()
+
+ def test_push_default(self):
+ self.assertPushSucceeds([])
+
+ def test_push_strict(self):
+ self.assertPushSucceeds(['--strict'])
+
+ def test_push_no_strict(self):
+ self.assertPushSucceeds(['--no-strict'])
+
+ def test_push_config_var_strict(self):
+ self.set_config_push_strict('true')
+ self.assertPushSucceeds([])
+
+ def test_push_config_var_no_strict(self):
+ self.set_config_push_strict('false')
+ self.assertPushSucceeds([])
+
+
+strict_push_change_scenarios = [
+ ('uncommitted',
+ dict(_changes_type= '_uncommitted_changes')),
+ ('pending-merges',
+ dict(_changes_type= '_pending_merges')),
+ ('out-of-sync-trees',
+ dict(_changes_type= '_out_of_sync_trees')),
+ ]
+
+
+class TestPushStrictWithChanges(tests.TestCaseWithTransport,
+ TestPushStrictMixin):
+
+ scenarios = strict_push_change_scenarios
+ _changes_type = None # Set by load_tests
+
+ def setUp(self):
+ super(TestPushStrictWithChanges, self).setUp()
+ # Apply the changes defined in load_tests: one of _uncommitted_changes,
+ # _pending_merges or _out_of_sync_trees
+ getattr(self, self._changes_type)()
+
+ def _uncommitted_changes(self):
+ self.make_local_branch_and_tree()
+ # Make a change without committing it
+ self.build_tree_contents([('local/file', 'in progress')])
+
+ def _pending_merges(self):
+ self.make_local_branch_and_tree()
+ # Create 'other' branch containing a new file
+ other_bzrdir = self.tree.bzrdir.sprout('other')
+ other_tree = other_bzrdir.open_workingtree()
+ self.build_tree_contents([('other/other-file', 'other')])
+ other_tree.add('other-file')
+ other_tree.commit('other commit', rev_id='other')
+ # Merge and revert, leaving a pending merge
+ self.tree.merge_from_branch(other_tree.branch)
+ self.tree.revert(filenames=['other-file'], backups=False)
+
+ def _out_of_sync_trees(self):
+ self.make_local_branch_and_tree()
+ self.run_bzr(['checkout', '--lightweight', 'local', 'checkout'])
+ # Make a change and commit it
+ self.build_tree_contents([('local/file', 'modified in local')])
+ self.tree.commit('modify file', rev_id='modified-in-local')
+ # Exercise commands from the checkout directory
+ self._default_wd = 'checkout'
+ self._default_errors = ["Working tree is out of date, please run"
+ " 'bzr update'\.",]
+
+ def test_push_default(self):
+ self.assertPushSucceeds([], with_warning=True)
+
+ def test_push_with_revision(self):
+ self.assertPushSucceeds(['-r', 'revid:added'], revid_to_push='added')
+
+ def test_push_no_strict(self):
+ self.assertPushSucceeds(['--no-strict'])
+
+ def test_push_strict_with_changes(self):
+ self.assertPushFails(['--strict'])
+
+ def test_push_respect_config_var_strict(self):
+ self.set_config_push_strict('true')
+ self.assertPushFails([])
+
+ def test_push_bogus_config_var_ignored(self):
+ self.set_config_push_strict("I don't want you to be strict")
+ self.assertPushSucceeds([], with_warning=True)
+
+ def test_push_no_strict_command_line_override_config(self):
+ self.set_config_push_strict('yES')
+ self.assertPushFails([])
+ self.assertPushSucceeds(['--no-strict'])
+
+ def test_push_strict_command_line_override_config(self):
+ self.set_config_push_strict('oFF')
+ self.assertPushFails(['--strict'])
+ self.assertPushSucceeds([])
+
+
+class TestPushForeign(tests.TestCaseWithTransport):
+
+ def setUp(self):
+ super(TestPushForeign, self).setUp()
+ test_foreign.register_dummy_foreign_for_test(self)
+
+ def make_dummy_builder(self, relpath):
+ builder = self.make_branch_builder(
+ relpath, format=test_foreign.DummyForeignVcsDirFormat())
+ builder.build_snapshot('revid', None,
+ [('add', ('', 'TREE_ROOT', 'directory', None)),
+ ('add', ('foo', 'fooid', 'file', 'bar'))])
+ return builder
+
+ def test_no_roundtripping(self):
+ target_branch = self.make_dummy_builder('dp').get_branch()
+ source_tree = self.make_branch_and_tree("dc")
+ output, error = self.run_bzr("push -d dc dp", retcode=3)
+ self.assertEquals("", output)
+ self.assertEquals(error, "bzr: ERROR: It is not possible to losslessly"
+ " push to dummy. You may want to use dpush instead.\n")
+
+
+class TestPushOutput(script.TestCaseWithTransportAndScript):
+
+ def test_push_log_format(self):
+ self.run_script("""
+ $ bzr init trunk
+ Created a standalone tree (format: 2a)
+ $ cd trunk
+ $ echo foo > file
+ $ bzr add
+ adding file
+ $ bzr commit -m 'we need some foo'
+ 2>Committing to:...trunk/
+ 2>added file
+ 2>Committed revision 1.
+ $ bzr init ../feature
+ Created a standalone tree (format: 2a)
+ $ bzr push -v ../feature -Olog_format=line
+ Added Revisions:
+ 1: jrandom@example.com ...we need some foo
+ 2>All changes applied successfully.
+ 2>Pushed up to revision 1.
+ """)
diff --git a/bzrlib/tests/blackbox/test_re_sign.py b/bzrlib/tests/blackbox/test_re_sign.py
new file mode 100644
index 0000000..77a5414
--- /dev/null
+++ b/bzrlib/tests/blackbox/test_re_sign.py
@@ -0,0 +1,101 @@
+# Copyright (C) 2005-2010 Canonical Ltd
+# -*- coding: utf-8 -*-
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+
+"""Black-box tests for bzr re-sign.
+"""
+
+from bzrlib import (
+ gpg,
+ tests,
+ )
+from bzrlib.controldir import ControlDir
+from bzrlib.testament import Testament
+
+
+class ReSign(tests.TestCaseInTempDir):
+
+ def monkey_patch_gpg(self):
+ """Monkey patch the gpg signing strategy to be a loopback.
+
+ This also registers the cleanup, so that we will revert to
+ the original gpg strategy when done.
+ """
+ # monkey patch gpg signing mechanism
+ self.overrideAttr(gpg, 'GPGStrategy', gpg.LoopbackGPGStrategy)
+
+ def setup_tree(self):
+ wt = ControlDir.create_standalone_workingtree('.')
+ wt.commit("base A", allow_pointless=True, rev_id='A')
+ wt.commit("base B", allow_pointless=True, rev_id='B')
+ wt.commit("base C", allow_pointless=True, rev_id='C')
+
+ return wt
+
+ def assertEqualSignature(self, repo, revision_id):
+ """Assert a signature is stored correctly in repository."""
+ self.assertEqual(
+ '-----BEGIN PSEUDO-SIGNED CONTENT-----\n' +
+ Testament.from_revision(repo, revision_id).as_short_text() +
+ '-----END PSEUDO-SIGNED CONTENT-----\n',
+ repo.get_signature_text(revision_id))
+
+ def test_resign(self):
+ #Test re signing of data.
+ wt = self.setup_tree()
+ repo = wt.branch.repository
+
+ self.monkey_patch_gpg()
+ self.run_bzr('re-sign -r revid:A')
+
+ self.assertEqualSignature(repo, 'A')
+
+ self.run_bzr('re-sign B')
+ self.assertEqualSignature(repo, 'B')
+
+ def test_resign_range(self):
+ wt = self.setup_tree()
+ repo = wt.branch.repository
+
+ self.monkey_patch_gpg()
+ self.run_bzr('re-sign -r 1..')
+ self.assertEqualSignature(repo, 'A')
+ self.assertEqualSignature(repo, 'B')
+ self.assertEqualSignature(repo, 'C')
+
+ def test_resign_multiple(self):
+ wt = self.setup_tree()
+ repo = wt.branch.repository
+
+ self.monkey_patch_gpg()
+ self.run_bzr('re-sign A B C')
+ self.assertEqualSignature(repo, 'A')
+ self.assertEqualSignature(repo, 'B')
+ self.assertEqualSignature(repo, 'C')
+
+ def test_resign_directory(self):
+ """Test --directory option"""
+ wt = ControlDir.create_standalone_workingtree('a')
+ wt.commit("base A", allow_pointless=True, rev_id='A')
+ wt.commit("base B", allow_pointless=True, rev_id='B')
+ wt.commit("base C", allow_pointless=True, rev_id='C')
+ repo = wt.branch.repository
+ self.monkey_patch_gpg()
+ self.run_bzr('re-sign --directory=a -r revid:A')
+ self.assertEqualSignature(repo, 'A')
+ self.run_bzr('re-sign -d a B')
+ self.assertEqualSignature(repo, 'B')
diff --git a/bzrlib/tests/blackbox/test_reconcile.py b/bzrlib/tests/blackbox/test_reconcile.py
new file mode 100644
index 0000000..1b30217
--- /dev/null
+++ b/bzrlib/tests/blackbox/test_reconcile.py
@@ -0,0 +1,91 @@
+# Copyright (C) 2006 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""Black box tests for the reconcile command."""
+
+
+from bzrlib import (
+ controldir,
+ inventory,
+ tests,
+ )
+from bzrlib.tests.matchers import ContainsNoVfsCalls
+
+
+class TrivialTest(tests.TestCaseWithTransport):
+
+ def test_trivial_reconcile(self):
+ t = controldir.ControlDir.create_standalone_workingtree('.')
+ (out, err) = self.run_bzr('reconcile')
+ if t.branch.repository._reconcile_backsup_inventory:
+ does_backup_text = "Inventory ok.\n"
+ else:
+ does_backup_text = ""
+ self.assertEqualDiff(out, "Reconciling branch %s\n"
+ "revision_history ok.\n"
+ "Reconciling repository %s\n"
+ "%s"
+ "Reconciliation complete.\n" %
+ (t.branch.base,
+ t.bzrdir.root_transport.base,
+ does_backup_text))
+ self.assertEqualDiff(err, "")
+
+ def test_does_something_reconcile(self):
+ t = controldir.ControlDir.create_standalone_workingtree('.')
+ # an empty inventory with no revision will trigger reconciliation.
+ repo = t.branch.repository
+ inv = inventory.Inventory(revision_id='missing')
+ inv.root.revision='missing'
+ repo.lock_write()
+ repo.start_write_group()
+ repo.add_inventory('missing', inv, [])
+ repo.commit_write_group()
+ repo.unlock()
+ (out, err) = self.run_bzr('reconcile')
+ if repo._reconcile_backsup_inventory:
+ does_backup_text = (
+ "Backup Inventory created.\n"
+ "Inventory regenerated.\n")
+ else:
+ does_backup_text = ""
+ expected = ("Reconciling branch %s\n"
+ "revision_history ok.\n"
+ "Reconciling repository %s\n"
+ "%s"
+ "Reconciliation complete.\n" %
+ (t.branch.base,
+ t.bzrdir.root_transport.base,
+ does_backup_text))
+ self.assertEqualDiff(expected, out)
+ self.assertEqualDiff(err, "")
+
+
+class TestSmartServerReconcile(tests.TestCaseWithTransport):
+
+ def test_simple_reconcile(self):
+ self.setup_smart_server_with_call_log()
+ self.make_branch('branch')
+ self.reset_smart_call_log()
+ out, err = self.run_bzr(['reconcile', self.get_url('branch')])
+ # This figure represent the amount of work to perform this use case. It
+ # is entirely ok to reduce this number if a test fails due to rpc_count
+ # being too low. If rpc_count increases, more network roundtrips have
+ # become necessary for this use case. Please do not adjust this number
+ # upwards without agreement from bzr's network support maintainers.
+ self.assertLength(10, self.hpss_calls)
+ self.assertLength(1, self.hpss_connections)
+ self.assertThat(self.hpss_calls, ContainsNoVfsCalls)
diff --git a/bzrlib/tests/blackbox/test_reconfigure.py b/bzrlib/tests/blackbox/test_reconfigure.py
new file mode 100644
index 0000000..c655b97
--- /dev/null
+++ b/bzrlib/tests/blackbox/test_reconfigure.py
@@ -0,0 +1,271 @@
+# Copyright (C) 2007-2012 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+from bzrlib import (
+ controldir,
+ errors,
+ tests,
+ workingtree,
+ )
+from bzrlib.tests.script import TestCaseWithTransportAndScript
+
+
+class TestReconfigure(TestCaseWithTransportAndScript):
+
+ def test_no_type(self):
+ branch = self.make_branch('branch')
+ self.run_bzr_error(['No target configuration specified'],
+ 'reconfigure branch')
+
+ def test_branch_to_tree(self):
+ branch = self.make_branch('branch')
+ self.run_bzr('reconfigure --tree branch')
+ tree = workingtree.WorkingTree.open('branch')
+
+ def test_tree_to_branch(self):
+ tree = self.make_branch_and_tree('tree')
+ self.run_bzr('reconfigure --branch tree')
+ self.assertRaises(errors.NoWorkingTree,
+ workingtree.WorkingTree.open, 'tree')
+
+ def test_branch_to_specified_checkout(self):
+ branch = self.make_branch('branch')
+ parent = self.make_branch('parent')
+ self.run_bzr('reconfigure branch --checkout --bind-to parent')
+
+ def test_force(self):
+ tree = self.make_branch_and_tree('tree')
+ self.build_tree(['tree/file'])
+ tree.add('file')
+ self.run_bzr_error(['Working tree ".*" has uncommitted changes'],
+ 'reconfigure --branch tree')
+ self.run_bzr('reconfigure --force --branch tree')
+
+ def test_lightweight_checkout_to_checkout(self):
+ branch = self.make_branch('branch')
+ checkout = branch.create_checkout('checkout', lightweight=True)
+ self.run_bzr('reconfigure --checkout checkout')
+
+ def test_lightweight_checkout_to_tree(self):
+ branch = self.make_branch('branch')
+ checkout = branch.create_checkout('checkout', lightweight=True)
+ self.run_bzr('reconfigure --tree checkout')
+
+ def test_no_args(self):
+ branch = self.make_branch('branch')
+ self.run_bzr_error(['No target configuration specified'],
+ 'reconfigure', working_dir='branch')
+
+ def test_checkout_to_lightweight_checkout(self):
+ branch = self.make_branch('branch')
+ checkout = branch.create_checkout('checkout')
+ self.run_bzr('reconfigure --lightweight-checkout checkout')
+
+ def test_standalone_to_use_shared(self):
+ self.build_tree(['repo/'])
+ tree = self.make_branch_and_tree('repo/tree')
+ repo = self.make_repository('repo', shared=True)
+ self.run_bzr('reconfigure --use-shared', working_dir='repo/tree')
+ tree = workingtree.WorkingTree.open('repo/tree')
+ self.assertNotEqual(tree.bzrdir.root_transport.base,
+ tree.branch.repository.bzrdir.root_transport.base)
+
+ def test_use_shared_to_standalone(self):
+ repo = self.make_repository('repo', shared=True)
+ branch = controldir.ControlDir.create_branch_convenience('repo/tree')
+ self.assertNotEqual(branch.bzrdir.root_transport.base,
+ branch.repository.bzrdir.root_transport.base)
+ self.run_bzr('reconfigure --standalone', working_dir='repo/tree')
+ tree = workingtree.WorkingTree.open('repo/tree')
+ self.assertEqual(tree.bzrdir.root_transport.base,
+ tree.branch.repository.bzrdir.root_transport.base)
+
+ def test_make_with_trees(self):
+ repo = self.make_repository('repo', shared=True)
+ repo.set_make_working_trees(False)
+ self.run_bzr('reconfigure --with-trees', working_dir='repo')
+ self.assertIs(True, repo.make_working_trees())
+
+ def test_make_with_trees_already_trees(self):
+ repo = self.make_repository('repo', shared=True)
+ repo.set_make_working_trees(True)
+ self.run_bzr_error([" already creates working trees"],
+ 'reconfigure --with-trees repo')
+
+ def test_make_without_trees(self):
+ repo = self.make_repository('repo', shared=True)
+ repo.set_make_working_trees(True)
+ self.run_bzr('reconfigure --with-no-trees', working_dir='repo')
+ self.assertIs(False, repo.make_working_trees())
+
+ def test_make_without_trees_already_no_trees(self):
+ repo = self.make_repository('repo', shared=True)
+ repo.set_make_working_trees(False)
+ self.run_bzr_error([" already doesn't create working trees"],
+ 'reconfigure --with-no-trees repo')
+
+ def test_make_with_trees_nonshared_repo(self):
+ branch = self.make_branch('branch')
+ self.run_bzr_error(
+ ["Requested reconfiguration of '.*' is not supported"],
+ 'reconfigure --with-trees branch')
+
+ def test_make_without_trees_leaves_tree_alone(self):
+ repo = self.make_repository('repo', shared=True)
+ branch = controldir.ControlDir.create_branch_convenience('repo/branch')
+ tree = workingtree.WorkingTree.open('repo/branch')
+ self.build_tree(['repo/branch/foo'])
+ tree.add('foo')
+ self.run_bzr('reconfigure --with-no-trees --force',
+ working_dir='repo/branch')
+ self.assertPathExists('repo/branch/foo')
+ tree = workingtree.WorkingTree.open('repo/branch')
+
+ def test_shared_format_to_standalone(self, format=None):
+ repo = self.make_repository('repo', shared=True, format=format)
+ branch = controldir.ControlDir.create_branch_convenience('repo/tree')
+ self.assertNotEqual(branch.bzrdir.root_transport.base,
+ branch.repository.bzrdir.root_transport.base)
+ tree = workingtree.WorkingTree.open('repo/tree')
+ self.build_tree_contents([('repo/tree/file', 'foo\n')]);
+ tree.add(['file'])
+ tree.commit('added file')
+ self.run_bzr('reconfigure --standalone', working_dir='repo/tree')
+ tree = workingtree.WorkingTree.open('repo/tree')
+ self.build_tree_contents([('repo/tree/file', 'bar\n')]);
+ self.check_file_contents('repo/tree/file', 'bar\n')
+ self.run_bzr('revert', working_dir='repo/tree')
+ self.check_file_contents('repo/tree/file', 'foo\n')
+ self.assertEqual(tree.bzrdir.root_transport.base,
+ tree.branch.repository.bzrdir.root_transport.base)
+
+ def test_shared_knit_to_standalone(self):
+ self.test_shared_format_to_standalone('knit')
+
+ def test_shared_pack092_to_standalone(self):
+ self.test_shared_format_to_standalone('pack-0.92')
+
+ def test_shared_rich_root_pack_to_standalone(self):
+ self.test_shared_format_to_standalone('rich-root-pack')
+
+ def test_lightweight_format_checkout_to_tree(self, format=None):
+ branch = self.make_branch('branch', format=format)
+ checkout = branch.create_checkout('checkout', lightweight=True)
+ tree = workingtree.WorkingTree.open('checkout')
+ self.build_tree_contents([('checkout/file', 'foo\n')]);
+ tree.add(['file'])
+ tree.commit('added file')
+ self.run_bzr('reconfigure --tree', working_dir='checkout')
+ tree = workingtree.WorkingTree.open('checkout')
+ self.build_tree_contents([('checkout/file', 'bar\n')]);
+ self.check_file_contents('checkout/file', 'bar\n')
+ self.run_bzr('revert', working_dir='checkout')
+ self.check_file_contents('checkout/file', 'foo\n')
+
+ def test_lightweight_knit_checkout_to_tree(self):
+ self.test_lightweight_format_checkout_to_tree('knit')
+
+ def test_lightweight_pack092_checkout_to_tree(self):
+ self.test_lightweight_format_checkout_to_tree('pack-0.92')
+
+ def test_lightweight_rich_root_pack_checkout_to_tree(self):
+ self.test_lightweight_format_checkout_to_tree('rich-root-pack')
+
+ def test_branch_and_use_shared(self):
+ self.run_script("""\
+$ bzr init -q branch
+$ echo foo > branch/foo
+$ bzr add -q branch/foo
+$ bzr commit -q -m msg branch
+$ bzr init-repo -q .
+$ bzr reconfigure --branch --use-shared branch
+$ bzr info branch
+Repository branch (format: ...)
+Location:
+ shared repository: .
+ repository branch: branch
+""")
+
+ def test_use_shared_and_branch(self):
+ self.run_script("""\
+$ bzr init -q branch
+$ echo foo > branch/foo
+$ bzr add -q branch/foo
+$ bzr commit -q -m msg branch
+$ bzr init-repo -q .
+$ bzr reconfigure --use-shared --branch branch
+$ bzr info branch
+Repository branch (format: ...)
+Location:
+ shared repository: .
+ repository branch: branch
+""")
+
+
+class TestReconfigureStacking(tests.TestCaseWithTransport):
+
+ def test_reconfigure_stacking(self):
+ """Test a fairly realistic scenario for stacking:
+
+ * make a branch with some history
+ * branch it
+ * make the second branch stacked on the first
+ * commit in the second
+ * then make the second unstacked, so it has to fill in history from
+ the original fallback lying underneath its original content
+
+ See discussion in <https://bugs.launchpad.net/bzr/+bug/391411>
+ """
+ # there are also per_branch tests that exercise remote operation etc
+ tree_1 = self.make_branch_and_tree('b1', format='2a')
+ self.build_tree(['b1/foo'])
+ tree_1.add(['foo'])
+ tree_1.commit('add foo')
+ branch_1 = tree_1.branch
+ # now branch and commit again
+ bzrdir_2 = tree_1.bzrdir.sprout('b2')
+ tree_2 = bzrdir_2.open_workingtree()
+ branch_2 = tree_2.branch
+ # now reconfigure to be stacked
+ out, err = self.run_bzr('reconfigure --stacked-on b1 b2')
+ self.assertContainsRe(out, '^.*/b2/ is now stacked on ../b1\n$')
+ self.assertEquals('', err)
+ # can also give the absolute URL of the branch, and it gets stored
+ # as a relative path if possible
+ out, err = self.run_bzr('reconfigure --stacked-on %s b2'
+ % (self.get_url('b1'),))
+ self.assertContainsRe(out, '^.*/b2/ is now stacked on ../b1\n$')
+ self.assertEquals('', err)
+ # Refresh the branch as 'reconfigure' modified it
+ branch_2 = branch_2.bzrdir.open_branch()
+ # It should be given a relative URL to the destination, if possible,
+ # because that's most likely to work across different transports
+ self.assertEquals('../b1', branch_2.get_stacked_on_url())
+ # commit, and it should be stored into b2's repo
+ self.build_tree_contents([('foo', 'new foo')])
+ tree_2.commit('update foo')
+ # Now turn it off again
+ out, err = self.run_bzr('reconfigure --unstacked b2')
+ self.assertContainsRe(out,
+ '^.*/b2/ is now not stacked\n$')
+ self.assertEquals('', err)
+ # Refresh the branch as 'reconfigure' modified it
+ branch_2 = branch_2.bzrdir.open_branch()
+ self.assertRaises(errors.NotStacked, branch_2.get_stacked_on_url)
+
+ # XXX: Needs a test for reconfiguring stacking and shape at the same time;
+ # no branch at location; stacked-on is not a branch; quiet mode.
+ # -- mbp 20090706
diff --git a/bzrlib/tests/blackbox/test_reference.py b/bzrlib/tests/blackbox/test_reference.py
new file mode 100644
index 0000000..fb49b00
--- /dev/null
+++ b/bzrlib/tests/blackbox/test_reference.py
@@ -0,0 +1,87 @@
+# Copyright (C) 2009 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+
+from bzrlib import (
+ branch as _mod_branch,
+ controldir,
+ )
+from bzrlib.tests import TestCaseWithTransport
+
+
+class TestReference(TestCaseWithTransport):
+
+ def get_default_format(self):
+ format = controldir.format_registry.make_bzrdir('1.9')
+ format.set_branch_format(_mod_branch.BzrBranchFormat8())
+ return format
+
+ def test_no_args_lists(self):
+ branch = self.make_branch('branch')
+ branch.set_reference_info('file-id', 'path', 'http://example.org')
+ branch.set_reference_info('file-id2', 'lath', 'http://example.org/2')
+ out, err = self.run_bzr('reference', working_dir='branch')
+ lines = out.splitlines()
+ self.assertEqual('lath http://example.org/2', lines[0])
+ self.assertEqual('path http://example.org', lines[1])
+
+ def make_tree_with_reference(self):
+ tree = self.make_branch_and_tree('tree')
+ self.build_tree(['tree/newpath'])
+ tree.add('newpath', 'file-id')
+ tree.branch.set_reference_info('file-id', 'path', 'http://example.org')
+ tree.branch.set_reference_info('file-id2', 'lath',
+ 'http://example.org/2')
+ return tree
+
+ def test_uses_working_tree_location(self):
+ tree = self.make_tree_with_reference()
+ out, err = self.run_bzr('reference', working_dir='tree')
+ self.assertContainsRe(out, 'newpath http://example.org\n')
+
+ def test_uses_basis_tree_location(self):
+ tree = self.make_tree_with_reference()
+ tree.commit('add newpath')
+ tree.bzrdir.destroy_workingtree()
+ out, err = self.run_bzr('reference', working_dir='tree')
+ self.assertContainsRe(out, 'newpath http://example.org\n')
+
+ def test_one_arg_displays(self):
+ tree = self.make_tree_with_reference()
+ out, err = self.run_bzr('reference newpath', working_dir='tree')
+ self.assertEqual('newpath http://example.org\n', out)
+
+ def test_one_arg_uses_containing_tree(self):
+ tree = self.make_tree_with_reference()
+ out, err = self.run_bzr('reference tree/newpath')
+ self.assertEqual('newpath http://example.org\n', out)
+
+ def test_two_args_sets(self):
+ tree = self.make_branch_and_tree('tree')
+ self.build_tree(['tree/file'])
+ tree.add('file', 'file-id')
+ out, err = self.run_bzr('reference tree/file http://example.org')
+ path, location = tree.branch.get_reference_info('file-id')
+ self.assertEqual('http://example.org', location)
+ self.assertEqual('file', path)
+ self.assertEqual('', out)
+ self.assertEqual('', err)
+
+ def test_missing_file(self):
+ tree = self.make_branch_and_tree('tree')
+ out, err = self.run_bzr('reference file http://example.org',
+ working_dir='tree', retcode=3)
+ self.assertEqual('bzr: ERROR: file is not versioned.\n', err)
diff --git a/bzrlib/tests/blackbox/test_remember_option.py b/bzrlib/tests/blackbox/test_remember_option.py
new file mode 100644
index 0000000..91ec87c
--- /dev/null
+++ b/bzrlib/tests/blackbox/test_remember_option.py
@@ -0,0 +1,208 @@
+# Copyright (C) 2011 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+
+from bzrlib import (
+ branch,
+ urlutils,
+ )
+from bzrlib.tests import (
+ script,
+ )
+
+
+class TestRememberMixin(object):
+ """--remember and --no-remember set locations or not."""
+
+ # the command to run (expecting additional arguments from the tests
+ command = []
+ # the dir where the command should be run (it should contain a branch for
+ # which the tested locations are/will be set)
+ working_dir = None
+ # argument list for the first command invocation
+ first_use_args = []
+ # argument list for the next command invocation
+ next_uses_args = []
+
+ def do_command(self, *args):
+ # We always expect the same result here and care only about the
+ # arguments used and their consequences on the remembered locations
+ out, err = self.run_bzr(self.command + list(args),
+ working_dir=self.working_dir)
+
+ def test_first_use_no_option(self):
+ self.do_command(*self.first_use_args)
+ self.assertLocations(self.first_use_args)
+
+ def test_first_use_remember(self):
+ self.do_command('--remember', *self.first_use_args)
+ self.assertLocations(self.first_use_args)
+
+ def test_first_use_no_remember(self):
+ self.do_command('--no-remember', *self.first_use_args)
+ self.assertLocations([])
+
+ def test_next_uses_no_option(self):
+ self.setup_next_uses()
+ self.do_command(*self.next_uses_args)
+ self.assertLocations(self.first_use_args)
+
+ def test_next_uses_remember(self):
+ self.setup_next_uses()
+ self.do_command('--remember', *self.next_uses_args)
+ self.assertLocations(self.next_uses_args)
+
+ def test_next_uses_no_remember(self):
+ self.setup_next_uses()
+ self.do_command('--no-remember', *self.next_uses_args)
+ self.assertLocations(self.first_use_args)
+
+
+class TestSendRemember(script.TestCaseWithTransportAndScript,
+ TestRememberMixin):
+
+ working_dir = 'work'
+ command = ['send', '-o-',]
+ first_use_args = ['../parent', '../grand_parent',]
+ next_uses_args = ['../new_parent', '../new_grand_parent']
+
+ def setUp(self):
+ super(TestSendRemember, self).setUp()
+ self.run_script('''
+ $ bzr init grand_parent
+ $ cd grand_parent
+ $ echo grand_parent > file
+ $ bzr add
+ $ bzr commit -m 'initial commit'
+ $ cd ..
+ $ bzr branch grand_parent parent
+ $ cd parent
+ $ echo parent > file
+ $ bzr commit -m 'parent'
+ $ cd ..
+ $ bzr branch parent %(working_dir)s
+ $ cd %(working_dir)s
+ $ echo %(working_dir)s > file
+ $ bzr commit -m '%(working_dir)s'
+ $ cd ..
+ ''' % {'working_dir': self.working_dir},
+ null_output_matches_anything=True)
+
+ def setup_next_uses(self):
+ # Do a first send that remembers the locations
+ self.do_command(*self.first_use_args)
+ # Now create some new targets
+ self.run_script('''
+ $ bzr branch grand_parent new_grand_parent
+ $ bzr branch parent new_parent
+ ''',
+ null_output_matches_anything=True)
+
+ def assertLocations(self, expected_locations):
+ if not expected_locations:
+ expected_submit_branch, expected_public_branch = None, None
+ else:
+ expected_submit_branch, expected_public_branch = expected_locations
+ br, _ = branch.Branch.open_containing(self.working_dir)
+ self.assertEquals(expected_submit_branch, br.get_submit_branch())
+ self.assertEquals(expected_public_branch, br.get_public_branch())
+
+
+class TestPushRemember(script.TestCaseWithTransportAndScript,
+ TestRememberMixin):
+
+ working_dir = 'work'
+ command = ['push',]
+ first_use_args = ['../target',]
+ next_uses_args = ['../new_target']
+
+ def setUp(self):
+ super(TestPushRemember, self).setUp()
+ self.run_script('''
+ $ bzr init %(working_dir)s
+ $ cd %(working_dir)s
+ $ echo some content > file
+ $ bzr add
+ $ bzr commit -m 'initial commit'
+ $ cd ..
+ ''' % {'working_dir': self.working_dir},
+ null_output_matches_anything=True)
+
+ def setup_next_uses(self):
+ # Do a first push that remembers the location
+ self.do_command(*self.first_use_args)
+ # Now create some new content
+ self.run_script('''
+ $ cd %(working_dir)s
+ $ echo new content > file
+ $ bzr commit -m 'new content'
+ $ cd ..
+ ''' % {'working_dir': self.working_dir},
+ null_output_matches_anything=True)
+
+ def assertLocations(self, expected_locations):
+ br, _ = branch.Branch.open_containing(self.working_dir)
+ if not expected_locations:
+ self.assertEquals(None, br.get_push_location())
+ else:
+ expected_push_location = expected_locations[0]
+ push_location = urlutils.relative_url(br.base,
+ br.get_push_location())
+ self.assertIsSameRealPath(expected_push_location, push_location)
+
+
+class TestPullRemember(script.TestCaseWithTransportAndScript,
+ TestRememberMixin):
+
+ working_dir = 'work'
+ command = ['pull',]
+ first_use_args = ['../parent',]
+ next_uses_args = ['../new_parent']
+
+ def setUp(self):
+ super(TestPullRemember, self).setUp()
+ self.run_script('''
+ $ bzr init parent
+ $ cd parent
+ $ echo parent > file
+ $ bzr add
+ $ bzr commit -m 'initial commit'
+ $ cd ..
+ $ bzr init %(working_dir)s
+ ''' % {'working_dir': self.working_dir},
+ null_output_matches_anything=True)
+
+ def setup_next_uses(self):
+ # Do a first push that remembers the location
+ self.do_command(*self.first_use_args)
+ # Now create some new content
+ self.run_script('''
+ $ bzr branch parent new_parent
+ $ cd new_parent
+ $ echo new parent > file
+ $ bzr commit -m 'new parent'
+ $ cd ..
+ ''' % {'working_dir': self.working_dir},
+ null_output_matches_anything=True)
+
+ def assertLocations(self, expected_locations):
+ br, _ = branch.Branch.open_containing(self.working_dir)
+ if not expected_locations:
+ self.assertEquals(None, br.get_parent())
+ else:
+ expected_pull_location = expected_locations[0]
+ pull_location = urlutils.relative_url(br.base, br.get_parent())
+ self.assertIsSameRealPath(expected_pull_location, pull_location)
diff --git a/bzrlib/tests/blackbox/test_remerge.py b/bzrlib/tests/blackbox/test_remerge.py
new file mode 100644
index 0000000..d3dcc47
--- /dev/null
+++ b/bzrlib/tests/blackbox/test_remerge.py
@@ -0,0 +1,125 @@
+# Copyright (C) 2006, 2007, 2009-2012 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+
+import os
+
+from bzrlib.tests import TestCaseWithTransport
+from bzrlib.workingtree import WorkingTree
+
+
+class TestRemerge(TestCaseWithTransport):
+
+ def make_file(self, name, contents):
+ with open(name, 'wb') as f:
+ f.write(contents)
+
+ def create_conflicts(self):
+ """Create a conflicted tree"""
+ os.mkdir('base')
+ self.make_file('base/hello', "hi world")
+ self.make_file('base/answer', "42")
+ self.run_bzr('init', working_dir='base')
+ self.run_bzr('add', working_dir='base')
+ self.run_bzr('commit -m base', working_dir='base')
+ self.run_bzr('branch base other')
+ self.run_bzr('branch base this')
+ self.make_file('other/hello', "Hello.")
+ self.make_file('other/answer', "Is anyone there?")
+ self.run_bzr('commit -m other', working_dir='other')
+ self.make_file('this/hello', "Hello, world")
+ self.run_bzr('mv answer question', working_dir='this')
+ self.make_file('this/question',
+ "What do you get when you multiply sixtimes nine?")
+ self.run_bzr('commit -m this', working_dir='this')
+
+ def test_remerge(self):
+ """Remerge command works as expected"""
+ self.create_conflicts()
+ self.run_bzr('merge ../other --show-base',
+ retcode=1, working_dir='this')
+ conflict_text = open('this/hello').read()
+ self.assertTrue('|||||||' in conflict_text)
+ self.assertTrue('hi world' in conflict_text)
+
+ self.run_bzr_error(['conflicts encountered'], 'remerge',
+ retcode=1, working_dir='this')
+ with open('this/hello') as f:
+ conflict_text = f.read()
+ self.assertFalse('|||||||' in conflict_text)
+ self.assertFalse('hi world' in conflict_text)
+
+ os.unlink('this/hello.OTHER')
+ os.unlink('this/question.OTHER')
+
+ self.run_bzr_error(['jello is not versioned'],
+ 'remerge jello --merge-type weave', working_dir='this')
+ self.run_bzr_error(['conflicts encountered'],
+ 'remerge hello --merge-type weave',
+ retcode=1, working_dir='this')
+
+ self.assertPathExists('this/hello.OTHER')
+ self.assertPathDoesNotExist('this/question.OTHER')
+
+ file_id = self.run_bzr('file-id hello', working_dir='this')[0]
+ self.run_bzr_error(['hello.THIS is not versioned'],
+ 'file-id hello.THIS', working_dir='this')
+
+ self.run_bzr_error(['conflicts encountered'],
+ 'remerge --merge-type weave',
+ retcode=1, working_dir='this')
+
+ self.assertPathExists('this/hello.OTHER')
+ self.assertTrue('this/hello.BASE')
+ with open('this/hello') as f:
+ conflict_text = f.read()
+ self.assertFalse('|||||||' in conflict_text)
+ self.assertFalse('hi world' in conflict_text)
+
+ self.run_bzr_error(['Showing base is not supported.*Weave'],
+ 'remerge . --merge-type weave --show-base',
+ working_dir='this')
+ self.run_bzr_error(["Can't reprocess and show base"],
+ 'remerge . --show-base --reprocess',
+ working_dir='this')
+ self.run_bzr_error(['conflicts encountered'],
+ 'remerge . --merge-type weave --reprocess',
+ retcode=1, working_dir='this')
+ self.run_bzr_error(['conflicts encountered'],
+ 'remerge hello --show-base',
+ retcode=1, working_dir='this')
+ self.run_bzr_error(['conflicts encountered'],
+ 'remerge hello --reprocess',
+ retcode=1, working_dir='this')
+
+ self.run_bzr('resolve --all', working_dir='this')
+ self.run_bzr('commit -m done', working_dir='this')
+
+ self.run_bzr_error(['remerge only works after normal merges',
+ 'Not cherrypicking or multi-merges'],
+ 'remerge', working_dir='this')
+
+ def test_conflicts(self):
+ self.create_conflicts()
+ self.run_bzr('merge ../other', retcode=1, working_dir='this')
+ wt = WorkingTree.open('this')
+ self.assertEqual(2, len(wt.conflicts()))
+ self.run_bzr('remerge', retcode=1, working_dir='this')
+ wt = WorkingTree.open('this')
+ self.assertEqual(2, len(wt.conflicts()))
+ self.run_bzr('remerge hello', retcode=1, working_dir='this')
+ wt = WorkingTree.open('this')
+ self.assertEqual(2, len(wt.conflicts()))
diff --git a/bzrlib/tests/blackbox/test_remove.py b/bzrlib/tests/blackbox/test_remove.py
new file mode 100644
index 0000000..a67ee02
--- /dev/null
+++ b/bzrlib/tests/blackbox/test_remove.py
@@ -0,0 +1,289 @@
+# Copyright (C) 2006-2010 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+
+import os
+import sys
+
+from bzrlib.tests import (
+ script,
+ features,
+ TestCaseWithTransport,
+ TestSkipped,
+ )
+from bzrlib.workingtree import WorkingTree
+from bzrlib import osutils
+
+_id='-id'
+a='a'
+b='b/'
+c='b/c'
+d='d/'
+files=(a, b, c, d)
+
+
+class TestRemove(TestCaseWithTransport):
+
+ def _make_tree_and_add(self, paths):
+ tree = self.make_branch_and_tree('.')
+ tree.lock_write()
+ try:
+ self.build_tree(paths)
+ for path in paths:
+ file_id=str(path).replace('/', '_') + _id
+ tree.add(path, file_id)
+ finally:
+ tree.unlock()
+ return tree
+
+ def assertFilesDeleted(self, files):
+ for f in files:
+ id=f+_id
+ self.assertNotInWorkingTree(f)
+ self.assertPathDoesNotExist(f)
+
+ def assertFilesUnversioned(self, files):
+ for f in files:
+ self.assertNotInWorkingTree(f)
+ self.assertPathExists(f)
+
+ def changeFile(self, file_name):
+ f = file(file_name, 'ab')
+ f.write("\nsome other new content!")
+ f.close()
+
+ def run_bzr_remove_changed_files(self, files_to_remove, working_dir=None):
+ self.run_bzr(['remove'] + list(files_to_remove),
+ working_dir=working_dir)
+
+ def test_remove_new_no_files_specified(self):
+ tree = self.make_branch_and_tree('.')
+ self.run_bzr_error(["bzr: ERROR: No matching files."], 'remove --new')
+ self.run_bzr_error(["bzr: ERROR: No matching files."], 'remove --new .')
+
+ def test_remove_no_files_specified(self):
+ tree = self._make_tree_and_add(['foo'])
+ out, err = self.run_bzr(['rm'])
+ self.assertEqual('', err)
+ self.assertEqual('', out)
+ self.assertInWorkingTree('foo', tree=tree)
+ self.assertPathExists('foo')
+
+ def test_remove_no_files_specified_missing_dir_and_contents(self):
+ tree = self._make_tree_and_add(
+ ['foo', 'dir/', 'dir/missing/', 'dir/missing/child'])
+ self.get_transport('.').delete_tree('dir/missing')
+ out, err = self.run_bzr(['rm'])
+ self.assertEqual('', out)
+ self.assertEqual(
+ 'removed dir/missing/child\n'
+ 'removed dir/missing\n',
+ err)
+ # non-missing paths not touched:
+ self.assertInWorkingTree('foo', tree=tree)
+ self.assertPathExists('foo')
+ self.assertInWorkingTree('dir', tree=tree)
+ self.assertPathExists('dir')
+ # missing files unversioned
+ self.assertNotInWorkingTree('dir/missing', tree=tree)
+ self.assertNotInWorkingTree('dir/missing/child', tree=tree)
+
+ def test_remove_no_files_specified_already_deleted(self):
+ tree = self._make_tree_and_add(['foo', 'bar'])
+ tree.commit('save foo and bar')
+ os.unlink('bar')
+ self.run_bzr(['rm'])
+ self.assertEqual(None, tree.path2id('bar'))
+ # Running rm with a deleted file does not error.
+ out, err = self.run_bzr(['rm'])
+ self.assertEqual('', out)
+ self.assertEqual('', err)
+
+ def test_remove_no_files_specified_missing_file(self):
+ tree = self._make_tree_and_add(['foo', 'bar'])
+ os.unlink('bar')
+ out, err = self.run_bzr(['rm'])
+ self.assertEqual('', out)
+ self.assertEqual('removed bar\n', err)
+ # non-missing files not touched:
+ self.assertInWorkingTree('foo', tree=tree)
+ self.assertPathExists('foo')
+ # missing files unversioned
+ self.assertNotInWorkingTree('bar', tree=tree)
+
+ def test_remove_no_files_specified_missing_link(self):
+ self.requireFeature(features.SymlinkFeature)
+ tree = self._make_tree_and_add(['foo'])
+ os.symlink('foo', 'linkname')
+ tree.add(['linkname'])
+ os.unlink('linkname')
+ out, err = self.run_bzr(['rm'])
+ self.assertEqual('', out)
+ self.assertEqual('removed linkname\n', err)
+ # non-missing files not touched:
+ self.assertInWorkingTree('foo', tree=tree)
+ self.assertPathExists('foo')
+ # missing files unversioned
+ self.assertNotInWorkingTree('linkname', tree=tree)
+
+ def test_rm_one_file(self):
+ tree = self._make_tree_and_add([a])
+ self.run_bzr("commit -m 'added a'")
+ self.run_bzr('rm a', error_regexes=["deleted a"])
+ self.assertFilesDeleted([a])
+
+ def test_remove_one_file(self):
+ tree = self._make_tree_and_add([a])
+ self.run_bzr("commit -m 'added a'")
+ self.run_bzr('remove a', error_regexes=["deleted a"])
+ self.assertFilesDeleted([a])
+
+ def test_remove_keep_one_file(self):
+ tree = self._make_tree_and_add([a])
+ self.run_bzr('remove --keep a', error_regexes=["removed a"])
+ self.assertFilesUnversioned([a])
+
+ def test_remove_one_deleted_file(self):
+ tree = self._make_tree_and_add([a])
+ self.run_bzr("commit -m 'added a'")
+ os.unlink(a)
+ self.assertInWorkingTree(a)
+ self.run_bzr('remove a')
+ self.assertNotInWorkingTree(a)
+
+ def test_remove_invalid_files(self):
+ self.build_tree(files)
+ tree = self.make_branch_and_tree('.')
+ self.run_bzr(['remove', '.', 'xyz', 'abc/def'])
+
+ def test_remove_unversioned_files(self):
+ self.build_tree(files)
+ tree = self.make_branch_and_tree('.')
+ self.run_bzr_remove_changed_files(files)
+
+ def test_remove_changed_files(self):
+ tree = self._make_tree_and_add(files)
+ self.run_bzr("commit -m 'added files'")
+ self.changeFile(a)
+ self.changeFile(c)
+ self.run_bzr_remove_changed_files(files)
+
+ def test_remove_changed_ignored_files(self):
+ tree = self._make_tree_and_add(['a'])
+ self.run_bzr(['ignore', 'a'])
+ self.run_bzr_remove_changed_files(['a'])
+
+ def test_remove_changed_files_from_child_dir(self):
+ if sys.platform == 'win32':
+ raise TestSkipped("Windows unable to remove '.' directory")
+ tree = self._make_tree_and_add(files)
+ self.run_bzr("commit -m 'added files'")
+ self.changeFile(a)
+ self.changeFile(c)
+ self.run_bzr_remove_changed_files(
+ ['../a', 'c', '.', '../d'], working_dir='b')
+ self.assertNotInWorkingTree(files)
+ self.assertPathDoesNotExist(files)
+
+ def test_remove_keep_unversioned_files(self):
+ self.build_tree(files)
+ tree = self.make_branch_and_tree('.')
+ self.run_bzr('remove --keep a', error_regexes=["a is not versioned."])
+ self.assertFilesUnversioned(files)
+
+ def test_remove_no_backup_unversioned_files(self):
+ self.build_tree(files)
+ tree = self.make_branch_and_tree('.')
+ script.ScriptRunner().run_script(self, '''
+ $ bzr remove --no-backup a b/ b/c d/
+ 2>deleted d
+ 2>removed b/c (but kept a copy: b/c.~1~)
+ 2>deleted b
+ 2>deleted a
+ ''')
+ self.assertFilesDeleted(files)
+
+ def test_remove_force_unversioned_files(self):
+ self.build_tree(files)
+ tree = self.make_branch_and_tree('.')
+ script.ScriptRunner().run_script(self, '''
+ $ bzr remove --force a b/ b/c d/
+ 2>(The --force option is deprecated, rather use --no-backup in future.)
+ 2>deleted d
+ 2>removed b/c (but kept a copy: b/c.~1~)
+ 2>deleted b
+ 2>deleted a
+ ''')
+ self.assertFilesDeleted(files)
+
+ def test_remove_deleted_files(self):
+ tree = self._make_tree_and_add(files)
+ self.run_bzr("commit -m 'added files'")
+ my_files=[f for f in files]
+ my_files.sort(reverse=True)
+ for f in my_files:
+ osutils.delete_any(f)
+ self.assertInWorkingTree(files)
+ self.assertPathDoesNotExist(files)
+ self.run_bzr('remove ' + ' '.join(files))
+ self.assertNotInWorkingTree(a)
+ self.assertPathDoesNotExist(files)
+
+ def test_remove_non_existing_files(self):
+ tree = self._make_tree_and_add([])
+ self.run_bzr(['remove', 'b'])
+
+ def test_remove_keep_non_existing_files(self):
+ tree = self._make_tree_and_add([])
+ self.run_bzr('remove --keep b', error_regexes=["b is not versioned."])
+
+ def test_remove_files(self):
+ tree = self._make_tree_and_add(files)
+ self.run_bzr("commit -m 'added files'")
+ self.run_bzr('remove a b b/c d',
+ error_regexes=["deleted a", "deleted b", "deleted b/c",
+ "deleted d"])
+ self.assertFilesDeleted(files)
+
+ def test_remove_keep_files(self):
+ tree = self._make_tree_and_add(files)
+ self.run_bzr("commit -m 'added files'")
+ self.run_bzr('remove --keep a b b/c d',
+ error_regexes=["removed a", "removed b", "removed b/c",
+ "removed d"])
+ self.assertFilesUnversioned(files)
+
+ def test_remove_with_new(self):
+ tree = self._make_tree_and_add(files)
+ self.run_bzr('remove --new --keep',
+ error_regexes=["removed a", "removed b", "removed b/c"])
+ self.assertFilesUnversioned(files)
+
+ def test_remove_with_new_in_dir1(self):
+ tree = self._make_tree_and_add(files)
+ self.run_bzr('remove --new --keep b b/c',
+ error_regexes=["removed b", "removed b/c"])
+ tree = WorkingTree.open('.')
+ self.assertInWorkingTree(a)
+ self.assertEqual(tree.path2id(a), a + _id)
+ self.assertFilesUnversioned([b,c])
+
+ def test_remove_with_new_in_dir2(self):
+ tree = self._make_tree_and_add(files)
+ self.run_bzr('remove --new --keep .',
+ error_regexes=["removed a", "removed b", "removed b/c"])
+ tree = WorkingTree.open('.')
+ self.assertFilesUnversioned(files)
diff --git a/bzrlib/tests/blackbox/test_remove_tree.py b/bzrlib/tests/blackbox/test_remove_tree.py
new file mode 100644
index 0000000..75745bd
--- /dev/null
+++ b/bzrlib/tests/blackbox/test_remove_tree.py
@@ -0,0 +1,170 @@
+# Copyright (C) 2006-2012 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+
+"""Black-box tests for bzr remove-tree."""
+
+import os
+
+from bzrlib import shelf
+from bzrlib.tests import TestCaseWithTransport
+
+
+class TestRemoveTree(TestCaseWithTransport):
+
+ def setUp(self):
+ super(TestRemoveTree, self).setUp()
+ self.tree = self.make_branch_and_tree('branch1')
+ self.build_tree(['branch1/foo'])
+ self.tree.add('foo')
+ self.tree.commit('1')
+ self.assertPathExists('branch1/foo')
+
+ # Success modes
+
+ def test_remove_tree_original_branch(self):
+ self.run_bzr('remove-tree', working_dir='branch1')
+ self.assertPathDoesNotExist('branch1/foo')
+
+ def test_remove_tree_original_branch_explicit(self):
+ self.run_bzr('remove-tree branch1')
+ self.assertPathDoesNotExist('branch1/foo')
+
+ def test_remove_tree_multiple_branch_explicit(self):
+ self.tree.bzrdir.sprout('branch2')
+ self.run_bzr('remove-tree branch1 branch2')
+ self.assertPathDoesNotExist('branch1/foo')
+ self.assertPathDoesNotExist('branch2/foo')
+
+ def test_remove_tree_sprouted_branch(self):
+ self.tree.bzrdir.sprout('branch2')
+ self.assertPathExists('branch2/foo')
+ self.run_bzr('remove-tree', working_dir='branch2')
+ self.assertPathDoesNotExist('branch2/foo')
+
+ def test_remove_tree_sprouted_branch_explicit(self):
+ self.tree.bzrdir.sprout('branch2')
+ self.assertPathExists('branch2/foo')
+ self.run_bzr('remove-tree branch2')
+ self.assertPathDoesNotExist('branch2/foo')
+
+ def test_remove_tree_checkout(self):
+ self.tree.branch.create_checkout('branch2', lightweight=False)
+ self.assertPathExists('branch2/foo')
+ self.run_bzr('remove-tree', working_dir='branch2')
+ self.assertPathDoesNotExist('branch2/foo')
+ self.assertPathExists('branch1/foo')
+
+ def test_remove_tree_checkout_explicit(self):
+ self.tree.branch.create_checkout('branch2', lightweight=False)
+ self.assertPathExists('branch2/foo')
+ self.run_bzr('remove-tree branch2')
+ self.assertPathDoesNotExist('branch2/foo')
+ self.assertPathExists('branch1/foo')
+
+ # Failure modes
+
+ def test_remove_tree_lightweight_checkout(self):
+ self.tree.branch.create_checkout('branch2', lightweight=True)
+ self.assertPathExists('branch2/foo')
+ output = self.run_bzr_error(
+ ["You cannot remove the working tree from a lightweight checkout"],
+ 'remove-tree', retcode=3, working_dir='branch2')
+ self.assertPathExists('branch2/foo')
+ self.assertPathExists('branch1/foo')
+
+ def test_remove_tree_lightweight_checkout_explicit(self):
+ self.tree.branch.create_checkout('branch2', lightweight=True)
+ self.assertPathExists('branch2/foo')
+ output = self.run_bzr_error(
+ ["You cannot remove the working tree from a lightweight checkout"],
+ 'remove-tree branch2', retcode=3)
+ self.assertPathExists('branch2/foo')
+ self.assertPathExists('branch1/foo')
+
+ def test_remove_tree_empty_dir(self):
+ os.mkdir('branch2')
+ output = self.run_bzr_error(
+ ["Not a branch"], 'remove-tree', retcode=3, working_dir='branch2')
+
+ def test_remove_tree_repeatedly(self):
+ self.run_bzr('remove-tree branch1')
+ self.assertPathDoesNotExist('branch1/foo')
+ output = self.run_bzr_error(["No working tree to remove"],
+ 'remove-tree branch1', retcode=3)
+
+ def test_remove_tree_remote_path(self):
+ # TODO: I can't think of a way to implement this...
+ pass
+
+ def test_remove_tree_uncommitted_changes(self):
+ self.build_tree(['branch1/bar'])
+ self.tree.add('bar')
+ output = self.run_bzr_error(["Working tree .* has uncommitted changes"],
+ 'remove-tree branch1', retcode=3)
+
+ def test_remove_tree_uncommitted_changes_force(self):
+ self.build_tree(['branch1/bar'])
+ self.tree.add('bar')
+ self.run_bzr('remove-tree branch1 --force')
+ self.assertPathDoesNotExist('branch1/foo')
+ self.assertPathExists('branch1/bar')
+
+ def test_remove_tree_pending_merges(self):
+ self.run_bzr(['branch', 'branch1', 'branch2'])
+ self.build_tree(['branch1/bar'])
+ self.tree.add('bar')
+ self.tree.commit('2')
+ self.assertPathExists('branch1/bar')
+ self.run_bzr(['merge', '../branch1'], working_dir='branch2')
+ self.assertPathExists('branch2/bar')
+ self.run_bzr(['revert', '.'], working_dir='branch2')
+ self.assertPathDoesNotExist('branch2/bar')
+ output = self.run_bzr_error(["Working tree .* has uncommitted changes"],
+ 'remove-tree branch2', retcode=3)
+
+ def test_remove_tree_pending_merges_force(self):
+ self.run_bzr(['branch', 'branch1', 'branch2'])
+ self.build_tree(['branch1/bar'])
+ self.tree.add('bar')
+ self.tree.commit('2')
+ self.assertPathExists('branch1/bar')
+ self.run_bzr(['merge', '../branch1'], working_dir='branch2')
+ self.assertPathExists('branch2/bar')
+ self.run_bzr(['revert', '.'], working_dir='branch2')
+ self.assertPathDoesNotExist('branch2/bar')
+ self.run_bzr('remove-tree branch2 --force')
+ self.assertPathDoesNotExist('branch2/foo')
+ self.assertPathDoesNotExist('branch2/bar')
+
+ def test_remove_tree_shelved_changes(self):
+ # https://bugs.launchpad.net/bzr/+bug/586639
+ tree = self.make_branch_and_tree('.')
+ creator = shelf.ShelfCreator(tree, tree.basis_tree(), [])
+ self.addCleanup(creator.finalize)
+ shelf_id = tree.get_shelf_manager().shelve_changes(creator, 'Foo')
+ output = self.run_bzr_error(["Working tree .* has shelved changes"],
+ 'remove-tree', retcode=3)
+
+ def test_remove_tree_shelved_changes_force(self):
+ tree = self.make_branch_and_tree('.')
+ creator = shelf.ShelfCreator(tree, tree.basis_tree(), [])
+ self.addCleanup(creator.finalize)
+ shelf_id = tree.get_shelf_manager().shelve_changes(creator, 'Foo')
+ self.run_bzr('remove-tree --force')
+ self.run_bzr('checkout')
+ # Ensure shelf is empty
+ self.assertIs(None, tree.get_shelf_manager().last_shelf())
diff --git a/bzrlib/tests/blackbox/test_repair_workingtree.py b/bzrlib/tests/blackbox/test_repair_workingtree.py
new file mode 100644
index 0000000..9709385
--- /dev/null
+++ b/bzrlib/tests/blackbox/test_repair_workingtree.py
@@ -0,0 +1,97 @@
+# Copyright (C) 2011 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+
+from bzrlib import (
+ workingtree,
+ )
+from bzrlib.tests import TestCaseWithTransport
+
+
+class TestRepairWorkingTree(TestCaseWithTransport):
+
+ def break_dirstate(self, tree, completely=False):
+ """Write garbage into the dirstate file."""
+ # This test assumes that the format uses a DirState file, which we then
+ # manually corrupt. If we change the way to get at that dirstate file,
+ # then we can update how this is done
+ self.assertIsNot(None, getattr(tree, 'current_dirstate', None))
+ tree.lock_read()
+ try:
+ dirstate = tree.current_dirstate()
+ dirstate_path = dirstate._filename
+ self.assertPathExists(dirstate_path)
+ finally:
+ tree.unlock()
+ # We have to have the tree unlocked at this point, so we can safely
+ # mutate the state file on all platforms.
+ if completely:
+ f = open(dirstate_path, 'wb')
+ else:
+ f = open(dirstate_path, 'ab')
+ try:
+ f.write('garbage-at-end-of-file\n')
+ finally:
+ f.close()
+
+ def make_initial_tree(self):
+ tree = self.make_branch_and_tree('tree')
+ self.build_tree(['tree/foo', 'tree/dir/', 'tree/dir/bar'])
+ tree.add(['foo', 'dir', 'dir/bar'])
+ tree.commit('first')
+ return tree
+
+ def test_repair_refuses_uncorrupted(self):
+ tree = self.make_initial_tree()
+ # If the tree doesn't appear to be corrupt, we refuse, but prompt the
+ # user to let them know that:
+ # a) they may want to use 'bzr revert' instead of repair-workingtree
+ # b) they can use --force if they really want to do this
+ self.run_bzr_error(['The tree does not appear to be corrupt',
+ '"bzr revert"',
+ '--force'],
+ 'repair-workingtree -d tree')
+
+ def test_repair_forced(self):
+ tree = self.make_initial_tree()
+ tree.rename_one('dir', 'alt_dir')
+ self.assertIsNot(None, tree.path2id('alt_dir'))
+ self.run_bzr('repair-workingtree -d tree --force')
+ # This requires the tree has reloaded the working state
+ self.assertIs(None, tree.path2id('alt_dir'))
+ self.assertPathExists('tree/alt_dir')
+
+ def test_repair_corrupted_dirstate(self):
+ tree = self.make_initial_tree()
+ self.break_dirstate(tree)
+ self.run_bzr('repair-workingtree -d tree')
+ tree = workingtree.WorkingTree.open('tree')
+ # At this point, check should be happy
+ tree.check_state()
+
+ def test_repair_naive_destroyed_fails(self):
+ tree = self.make_initial_tree()
+ self.break_dirstate(tree, completely=True)
+ self.run_bzr_error(['the header appears corrupt, try passing'],
+ 'repair-workingtree -d tree')
+
+ def test_repair_destroyed_with_revs_passes(self):
+ tree = self.make_initial_tree()
+ self.break_dirstate(tree, completely=True)
+ self.run_bzr('repair-workingtree -d tree -r -1')
+ tree = workingtree.WorkingTree.open('tree')
+ # At this point, check should be happy
+ tree.check_state()
diff --git a/bzrlib/tests/blackbox/test_resolve.py b/bzrlib/tests/blackbox/test_resolve.py
new file mode 100644
index 0000000..44a525f
--- /dev/null
+++ b/bzrlib/tests/blackbox/test_resolve.py
@@ -0,0 +1,131 @@
+# Copyright (C) 2010, 2011 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+from bzrlib import (
+ conflicts,
+ tests,
+ )
+from bzrlib.tests import script
+from bzrlib.tests.blackbox import test_conflicts
+
+
+class TestResolve(script.TestCaseWithTransportAndScript):
+
+ def setUp(self):
+ super(TestResolve, self).setUp()
+ test_conflicts.make_tree_with_conflicts(self, 'branch', 'other')
+
+ def test_resolve_one_by_one(self):
+ self.run_script("""\
+$ cd branch
+$ bzr conflicts
+Text conflict in my_other_file
+Path conflict: mydir3 / mydir2
+Text conflict in myfile
+$ bzr resolve myfile
+2>1 conflict resolved, 2 remaining
+$ bzr resolve my_other_file
+2>1 conflict resolved, 1 remaining
+$ bzr resolve mydir2
+2>1 conflict resolved, 0 remaining
+""")
+
+ def test_resolve_all(self):
+ self.run_script("""\
+$ cd branch
+$ bzr resolve --all
+2>3 conflicts resolved, 0 remaining
+$ bzr conflicts
+""")
+
+ def test_resolve_from_subdir(self):
+ self.run_script("""\
+$ mkdir branch/subdir
+$ cd branch/subdir
+$ bzr resolve ../myfile
+2>1 conflict resolved, 2 remaining
+""")
+
+ def test_resolve_via_directory_option(self):
+ self.run_script("""\
+$ bzr resolve -d branch myfile
+2>1 conflict resolved, 2 remaining
+""")
+
+ def test_resolve_all_via_directory_option(self):
+ self.run_script("""\
+$ bzr resolve -d branch --all
+2>3 conflicts resolved, 0 remaining
+$ bzr conflicts -d branch
+""")
+
+
+class TestBug788000(script.TestCaseWithTransportAndScript):
+
+ def test_bug_788000(self):
+ self.run_script('''\
+$ bzr init a
+$ mkdir a/dir
+$ echo foo > a/dir/file
+$ bzr add a/dir
+$ cd a
+$ bzr commit -m one
+$ cd ..
+$ bzr clone a b
+$ echo bar > b/dir/file
+$ cd a
+$ rm -r dir
+$ bzr commit -m two
+$ cd ../b
+''',
+ null_output_matches_anything=True)
+
+ self.run_script('''\
+$ bzr pull
+Using saved parent location:...
+Now on revision 2.
+2>RM dir/file => dir/file.THIS
+2>Conflict: can't delete dir because it is not empty. Not deleting.
+2>Conflict because dir is not versioned, but has versioned children...
+2>Contents conflict in dir/file
+2>3 conflicts encountered.
+''')
+ self.run_script('''\
+$ bzr resolve --take-other
+2>deleted dir/file.THIS
+2>deleted dir
+2>3 conflicts resolved, 0 remaining
+''')
+
+
+class TestResolveAuto(tests.TestCaseWithTransport):
+
+ def test_auto_resolve(self):
+ """Text conflicts can be resolved automatically"""
+ tree = self.make_branch_and_tree('tree')
+ self.build_tree_contents([('tree/file',
+ '<<<<<<<\na\n=======\n>>>>>>>\n')])
+ tree.add('file', 'file_id')
+ self.assertEqual(tree.kind('file_id'), 'file')
+ file_conflict = conflicts.TextConflict('file', file_id='file_id')
+ tree.set_conflicts(conflicts.ConflictList([file_conflict]))
+ note = self.run_bzr('resolve', retcode=1, working_dir='tree')[1]
+ self.assertContainsRe(note, '0 conflicts auto-resolved.')
+ self.assertContainsRe(note,
+ 'Remaining conflicts:\nText conflict in file')
+ self.build_tree_contents([('tree/file', 'a\n')])
+ note = self.run_bzr('resolve', working_dir='tree')[1]
+ self.assertContainsRe(note, 'All conflicts resolved.')
diff --git a/bzrlib/tests/blackbox/test_revert.py b/bzrlib/tests/blackbox/test_revert.py
new file mode 100644
index 0000000..84b54fa
--- /dev/null
+++ b/bzrlib/tests/blackbox/test_revert.py
@@ -0,0 +1,211 @@
+# Copyright (C) 2006, 2007, 2009, 2010 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""Black-box tests for bzr revert."""
+
+import os
+
+import bzrlib.osutils
+from bzrlib.tests import TestCaseWithTransport
+from bzrlib.trace import mutter
+from bzrlib.workingtree import WorkingTree
+
+
+class TestRevert(TestCaseWithTransport):
+
+ def _prepare_tree(self):
+ self.run_bzr('init')
+ self.run_bzr('mkdir dir')
+
+ f = file('dir/file', 'wb')
+ f.write('spam')
+ f.close()
+ self.run_bzr('add dir/file')
+
+ self.run_bzr('commit -m1')
+
+ # modify file
+ f = file('dir/file', 'wb')
+ f.write('eggs')
+ f.close()
+
+ # check status
+ self.assertEquals('modified:\n dir/file\n', self.run_bzr('status')[0])
+
+ def _prepare_rename_mod_tree(self):
+ self.build_tree(['a/', 'a/b', 'a/c', 'a/d/', 'a/d/e', 'f/', 'f/g',
+ 'f/h', 'f/i'])
+ self.run_bzr('init')
+ self.run_bzr('add')
+ self.run_bzr('commit -m 1')
+ wt = WorkingTree.open('.')
+ wt.rename_one('a/b', 'f/b')
+ wt.rename_one('a/d/e', 'f/e')
+ wt.rename_one('a/d', 'f/d')
+ wt.rename_one('f/g', 'a/g')
+ wt.rename_one('f/h', 'h')
+ wt.rename_one('f', 'j')
+
+ def helper(self, param=''):
+ self._prepare_tree()
+ # change dir
+ # revert to default revision for file in subdir does work
+ os.chdir('dir')
+ mutter('cd dir\n')
+
+ self.assertEquals('1\n', self.run_bzr('revno')[0])
+ self.run_bzr('revert %s file' % param)
+ self.assertEquals('spam', open('file', 'rb').read())
+
+ def test_revert_in_subdir(self):
+ self.helper()
+
+ def test_revert_to_revision_in_subdir(self):
+ # test case for bug #29424:
+ # revert to specific revision for file in subdir does not work
+ self.helper('-r 1')
+
+ def test_revert_in_checkout(self):
+ os.mkdir('brach')
+ os.chdir('brach')
+ self._prepare_tree()
+ self.run_bzr('checkout --lightweight . ../sprach')
+ self.run_bzr('commit -m more')
+ os.chdir('../sprach')
+ self.assertEqual('', self.run_bzr('status')[0])
+ self.run_bzr('revert')
+ self.assertEqual('', self.run_bzr('status')[0])
+
+ def test_revert_dirname(self):
+ """Test that revert DIRECTORY does what's expected"""
+ self._prepare_rename_mod_tree()
+ self.run_bzr('revert a')
+ self.assertPathExists('a/b')
+ self.assertPathExists('a/d')
+ self.assertPathDoesNotExist('a/g')
+ self.expectFailure(
+ "j is in the delta revert applies because j was renamed too",
+ self.assertPathExists, 'j')
+ self.assertPathExists('h')
+ self.run_bzr('revert f')
+ self.assertPathDoesNotExist('j')
+ self.assertPathDoesNotExist('h')
+ self.assertPathExists('a/d/e')
+
+ def test_revert_chatter(self):
+ self._prepare_rename_mod_tree()
+ chatter = self.run_bzr('revert')[1]
+ self.assertEqualDiff(
+ 'R a/g => f/g\n'
+ 'R h => f/h\n'
+ 'R j/ => f/\n'
+ 'R j/b => a/b\n'
+ 'R j/d/ => a/d/\n'
+ 'R j/e => a/d/e\n',
+ chatter)
+
+ def test_revert(self):
+ self.run_bzr('init')
+
+ with file('hello', 'wt') as f: f.write('foo')
+ self.run_bzr('add hello')
+ self.run_bzr('commit -m setup hello')
+
+ with file('goodbye', 'wt') as f: f.write('baz')
+ self.run_bzr('add goodbye')
+ self.run_bzr('commit -m setup goodbye')
+
+ with file('hello', 'wt') as f: f.write('bar')
+ with file('goodbye', 'wt') as f: f.write('qux')
+ self.run_bzr('revert hello')
+ self.check_file_contents('hello', 'foo')
+ self.check_file_contents('goodbye', 'qux')
+ self.run_bzr('revert')
+ self.check_file_contents('goodbye', 'baz')
+
+ os.mkdir('revertdir')
+ self.run_bzr('add revertdir')
+ self.run_bzr('commit -m f')
+ os.rmdir('revertdir')
+ self.run_bzr('revert')
+
+ if bzrlib.osutils.has_symlinks():
+ os.symlink('/unlikely/to/exist', 'symlink')
+ self.run_bzr('add symlink')
+ self.run_bzr('commit -m f')
+ os.unlink('symlink')
+ self.run_bzr('revert')
+ self.assertPathExists('symlink')
+ os.unlink('symlink')
+ os.symlink('a-different-path', 'symlink')
+ self.run_bzr('revert')
+ self.assertEqual('/unlikely/to/exist',
+ os.readlink('symlink'))
+ else:
+ self.log("skipping revert symlink tests")
+
+ with file('hello', 'wt') as f: f.write('xyz')
+ self.run_bzr('commit -m xyz hello')
+ self.run_bzr('revert -r 1 hello')
+ self.check_file_contents('hello', 'foo')
+ self.run_bzr('revert hello')
+ self.check_file_contents('hello', 'xyz')
+ os.chdir('revertdir')
+ self.run_bzr('revert')
+ os.chdir('..')
+
+ def test_revert_newly_added(self):
+ # this tests the UI reports reverting a newly added file
+ # correct (such files are not deleted)
+ tree = self.make_branch_and_tree('.')
+ self.build_tree(['file'])
+ tree.add(['file'])
+ out, err = self.run_bzr('revert')
+ self.assertEqual('', out)
+ self.assertEqual('- file\n', err)
+
+ def test_revert_removing_file(self):
+ # this tests the UI reports reverting a file which has been committed
+ # to a revision that did not have it, reports it as being deleted.
+ tree = self.make_branch_and_tree('.')
+ tree.commit('empty commit')
+ self.build_tree(['file'])
+ tree.add(['file'])
+ tree.commit('add file')
+ out, err = self.run_bzr('revert -r -2')
+ self.assertEqual('', out)
+ self.assertEqual('-D file\n', err)
+
+ def test_revert_forget_merges(self):
+ # revert --forget-merges removes any pending merges into the tree, but
+ # leaves the files unchanged
+ tree = self.make_branch_and_tree('.')
+ # forget-merges before first commit, though pointless, does not fail
+ self.run_bzr(['revert', '--forget-merges'])
+ self.build_tree(['file'])
+ first_rev_id = tree.commit('initial commit')
+ self.build_tree_contents([('file', 'new content')])
+ existing_parents = tree.get_parent_ids()
+ self.assertEquals([first_rev_id], existing_parents)
+ merged_parents = existing_parents + ['merged-in-rev']
+ tree.set_parent_ids(merged_parents)
+ self.assertEquals(merged_parents, tree.get_parent_ids())
+ self.run_bzr(['revert', '--forget-merges'])
+ self.assertEquals([first_rev_id], tree.get_parent_ids())
+ # changed files are not reverted
+ self.assertFileEqual('new content', 'file')
+ # you can give it the path of a tree
+ self.run_bzr(['revert', '--forget-merges', tree.abspath('.')])
diff --git a/bzrlib/tests/blackbox/test_revision_history.py b/bzrlib/tests/blackbox/test_revision_history.py
new file mode 100644
index 0000000..f8d5ea1
--- /dev/null
+++ b/bzrlib/tests/blackbox/test_revision_history.py
@@ -0,0 +1,76 @@
+# Copyright (C) 2006, 2007, 2009, 2012 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+from bzrlib import (
+ branch,
+ tests,
+ )
+
+
+class TestRevisionHistory(tests.TestCaseWithTransport):
+
+ def _build_branch(self):
+ # setup a standalone branch with three commits
+ tree = self.make_branch_and_tree('test')
+ with open('test/foo', 'wb') as f:
+ f.write('1111\n')
+ tree.add('foo')
+ tree.commit('added foo',rev_id='revision_1')
+ with open('test/foo', 'wb')as f:
+ f.write('2222\n')
+ tree.commit('updated foo',rev_id='revision_2')
+ with open('test/foo', 'wb')as f:
+ f.write('3333\n')
+ tree.commit('updated foo again',rev_id='revision_3')
+ return tree
+
+ def _check_revision_history(self, location='', working_dir=None):
+ rh = self.run_bzr(['revision-history', location],
+ working_dir=working_dir)[0]
+ self.assertEqual(rh, 'revision_1\nrevision_2\nrevision_3\n')
+
+ def test_revision_history(self):
+ """No location"""
+ self._build_branch()
+ self._check_revision_history(working_dir='test')
+
+ def test_revision_history_with_location(self):
+ """With a specified location."""
+ self._build_branch()
+ self._check_revision_history('test')
+
+ def test_revision_history_with_repo_branch(self):
+ """With a repository branch location."""
+ self._build_branch()
+ self.run_bzr('init-repo repo')
+ self.run_bzr('branch test repo/test')
+ self._check_revision_history('repo/test')
+
+ def test_revision_history_with_checkout(self):
+ """With a repository branch checkout location."""
+ self._build_branch()
+ self.run_bzr('init-repo repo')
+ self.run_bzr('branch test repo/test')
+ self.run_bzr('checkout repo/test test-checkout')
+ self._check_revision_history('test-checkout')
+
+ def test_revision_history_with_lightweight_checkout(self):
+ """With a repository branch lightweight checkout location."""
+ self._build_branch()
+ self.run_bzr('init-repo repo')
+ self.run_bzr('branch test repo/test')
+ self.run_bzr('checkout --lightweight repo/test test-checkout')
+ self._check_revision_history('test-checkout')
diff --git a/bzrlib/tests/blackbox/test_revision_info.py b/bzrlib/tests/blackbox/test_revision_info.py
new file mode 100644
index 0000000..a560d98
--- /dev/null
+++ b/bzrlib/tests/blackbox/test_revision_info.py
@@ -0,0 +1,128 @@
+# Copyright (C) 2005-2010 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+import os
+
+from bzrlib.errors import BzrCommandError, NoSuchRevision
+from bzrlib.tests import TestCaseWithTransport
+from bzrlib.workingtree import WorkingTree
+
+
+class TestRevisionInfo(TestCaseWithTransport):
+
+ def check_output(self, output, *args):
+ """Verify that the expected output matches what bzr says.
+
+ The output is supplied first, so that you can supply a variable
+ number of arguments to bzr.
+ """
+ self.assertEquals(self.run_bzr(*args)[0], output)
+
+ def test_revision_info(self):
+ """Test that 'bzr revision-info' reports the correct thing."""
+ wt = self.make_branch_and_tree('.')
+
+ # Make history with a non-mainline rev
+ wt.commit('Commit one', rev_id='a@r-0-1')
+ wt.commit('Commit two', rev_id='a@r-0-1.1.1')
+ wt.set_parent_ids(['a@r-0-1', 'a@r-0-1.1.1'])
+ wt.branch.set_last_revision_info(1, 'a@r-0-1')
+ wt.commit('Commit three', rev_id='a@r-0-2')
+
+ # This is expected to work even if the working tree is removed
+ wt.bzrdir.destroy_workingtree()
+
+ # Expected return values
+ values = {
+ '1' : '1 a@r-0-1\n',
+ '1.1.1': '1.1.1 a@r-0-1.1.1\n',
+ '2' : '2 a@r-0-2\n'
+ }
+
+ # Make sure with no arg it defaults to the head
+ self.check_output(values['2'], 'revision-info')
+
+ # Check the results of just specifying a numeric revision
+ self.check_output(values['1'], 'revision-info 1')
+ self.check_output(values['1.1.1'], 'revision-info 1.1.1')
+ self.check_output(values['2'], 'revision-info 2')
+ self.check_output(values['1']+values['2'], 'revision-info 1 2')
+ self.check_output(' '+values['1']+
+ values['1.1.1']+
+ ' '+values['2'],
+ 'revision-info 1 1.1.1 2')
+ self.check_output(values['2']+values['1'], 'revision-info 2 1')
+
+ # Check as above, only using the '--revision' syntax
+
+ self.check_output(values['1'], 'revision-info -r 1')
+ self.check_output(values['1.1.1'], 'revision-info --revision 1.1.1')
+ self.check_output(values['2'], 'revision-info -r 2')
+ self.check_output(values['1']+values['2'], 'revision-info -r 1..2')
+ self.check_output(' '+values['1']+
+ values['1.1.1']+
+ ' '+values['2'],
+ 'revision-info -r 1..1.1.1..2')
+ self.check_output(values['2']+values['1'], 'revision-info -r 2..1')
+
+ # Now try some more advanced revision specifications
+
+ self.check_output(values['1'], 'revision-info -r revid:a@r-0-1')
+ self.check_output(values['1.1.1'],
+ 'revision-info --revision revid:a@r-0-1.1.1')
+
+ def test_revision_info_explicit_branch_dir(self):
+ """Test that 'bzr revision-info' honors the '-d' option."""
+ wt = self.make_branch_and_tree('branch')
+
+ wt.commit('Commit one', rev_id='a@r-0-1')
+ self.check_output('1 a@r-0-1\n', 'revision-info -d branch')
+
+ def test_revision_info_tree(self):
+ # Make branch and checkout
+ wt = self.make_branch_and_tree('branch')
+ wt.commit('Commit one', rev_id='a@r-0-1')
+
+ # Make checkout and move the branch forward
+ wt.branch.create_checkout('checkout', lightweight=True)
+ wt.commit('Commit two', rev_id='a@r-0-2')
+
+ # Make sure the checkout gives the right answer for branch and
+ # tree
+ self.check_output('2 a@r-0-2\n', 'revision-info -d checkout')
+ self.check_output('1 a@r-0-1\n', 'revision-info --tree -d checkout')
+
+ def test_revision_info_tree_no_working_tree(self):
+ # Make branch with no tree
+ b = self.make_branch('branch')
+
+ # Try getting the --tree revision-info
+ out,err = self.run_bzr('revision-info --tree -d branch', retcode=3)
+ self.assertEqual('', out)
+ self.assertEqual('bzr: ERROR: No WorkingTree exists for "branch".\n',
+ err)
+
+ def test_revision_info_not_in_history(self):
+ builder = self.make_branch_builder('branch')
+ builder.start_series()
+ builder.build_snapshot('A-id', None, [
+ ('add', ('', 'root-id', 'directory', None))])
+ builder.build_snapshot('B-id', ['A-id'], [])
+ builder.build_snapshot('C-id', ['A-id'], [])
+ builder.finish_series()
+ self.check_output(' 1 A-id\n??? B-id\n 2 C-id\n',
+ 'revision-info -d branch'
+ ' revid:A-id revid:B-id revid:C-id')
diff --git a/bzrlib/tests/blackbox/test_revno.py b/bzrlib/tests/blackbox/test_revno.py
new file mode 100644
index 0000000..df30e9e
--- /dev/null
+++ b/bzrlib/tests/blackbox/test_revno.py
@@ -0,0 +1,179 @@
+# Copyright (C) 2005, 2006, 2007, 2009 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+
+"""Black-box tests for bzr revno.
+"""
+
+import os
+
+from bzrlib import tests
+from bzrlib.tests.matchers import ContainsNoVfsCalls
+
+
+class TestRevno(tests.TestCaseWithTransport):
+
+ def test_revno(self):
+
+ def bzr(*args, **kwargs):
+ return self.run_bzr(*args, **kwargs)[0]
+
+ os.mkdir('a')
+ os.chdir('a')
+ bzr('init')
+ self.assertEquals(int(bzr('revno')), 0)
+
+ with open('foo', 'wb') as f: f.write('foo\n')
+ bzr('add foo')
+ bzr('commit -m foo')
+ self.assertEquals(int(bzr('revno')), 1)
+
+ os.mkdir('baz')
+ bzr('add baz')
+ bzr('commit -m baz')
+ self.assertEquals(int(bzr('revno')), 2)
+
+ os.chdir('..')
+ self.assertEquals(int(bzr('revno a')), 2)
+ self.assertEquals(int(bzr('revno a/baz')), 2)
+
+ def test_revno_tree(self):
+ # Make branch and checkout
+ wt = self.make_branch_and_tree('branch')
+ checkout = wt.branch.create_checkout('checkout', lightweight=True)
+
+ # Get the checkout out of date
+ self.build_tree(['branch/file'])
+ wt.add(['file'])
+ wt.commit('mkfile')
+
+ # Make sure revno says we're on 1
+ out,err = self.run_bzr('revno checkout')
+ self.assertEqual('', err)
+ self.assertEqual('1\n', out)
+
+ # Make sure --tree knows it's still on 0
+ out,err = self.run_bzr('revno --tree checkout')
+ self.assertEqual('', err)
+ self.assertEqual('0\n', out)
+
+ def test_revno_tree_no_tree(self):
+ # Make treeless branch
+ b = self.make_branch('branch')
+
+ # Try getting it's --tree revno
+ out,err = self.run_bzr('revno --tree branch', retcode=3)
+ self.assertEqual('', out)
+ self.assertEqual('bzr: ERROR: No WorkingTree exists for "branch".\n',
+ err)
+
+ def test_dotted_revno_tree(self):
+ builder = self.make_branch_builder('branch')
+ builder.start_series()
+ builder.build_snapshot('A-id', None, [
+ ('add', ('', 'root-id', 'directory', None)),
+ ('add', ('file', 'file-id', 'file', 'content\n'))])
+ builder.build_snapshot('B-id', ['A-id'], [])
+ builder.build_snapshot('C-id', ['A-id', 'B-id'], [])
+ builder.finish_series()
+ b = builder.get_branch()
+ co_b = b.create_checkout('checkout_b', lightweight=True,
+ revision_id='B-id')
+ out, err = self.run_bzr('revno checkout_b')
+ self.assertEqual('', err)
+ self.assertEqual('2\n', out)
+ out, err = self.run_bzr('revno --tree checkout_b')
+ self.assertEqual('', err)
+ self.assertEqual('1.1.1\n', out)
+
+ def test_stale_revno_tree(self):
+ builder = self.make_branch_builder('branch')
+ builder.start_series()
+ builder.build_snapshot('A-id', None, [
+ ('add', ('', 'root-id', 'directory', None)),
+ ('add', ('file', 'file-id', 'file', 'content\n'))])
+ builder.build_snapshot('B-id', ['A-id'], [])
+ builder.build_snapshot('C-id', ['A-id'], [])
+ builder.finish_series()
+ b = builder.get_branch()
+ # The branch is now at "C-id", but the checkout is still at "B-id"
+ # which is no longer in the history
+ co_b = b.create_checkout('checkout_b', lightweight=True,
+ revision_id='B-id')
+ out, err = self.run_bzr('revno checkout_b')
+ self.assertEqual('', err)
+ self.assertEqual('2\n', out)
+ out, err = self.run_bzr('revno --tree checkout_b')
+ self.assertEqual('', err)
+ self.assertEqual('???\n', out)
+
+ def test_revno_with_revision(self):
+ wt = self.make_branch_and_tree('.')
+ revid1 = wt.commit('rev1')
+ revid2 = wt.commit('rev2')
+
+ out, err = self.run_bzr('revno -r-2 .')
+ self.assertEqual('1\n', out)
+
+ out, err = self.run_bzr('revno -rrevid:%s .' % revid1)
+ self.assertEqual('1\n', out)
+
+ def test_revno_and_tree_mutually_exclusive(self):
+ wt = self.make_branch_and_tree('.')
+ out, err = self.run_bzr('revno -r-2 --tree .', retcode=3)
+ self.assertEqual('', out)
+ self.assertEqual(
+ 'bzr: ERROR: --tree and --revision can not be used together\n',
+ err)
+
+
+class TestSmartServerRevno(tests.TestCaseWithTransport):
+
+ def test_simple_branch_revno(self):
+ self.setup_smart_server_with_call_log()
+ t = self.make_branch_and_tree('branch')
+ self.build_tree_contents([('branch/foo', 'thecontents')])
+ t.add("foo")
+ revid = t.commit("message")
+ self.reset_smart_call_log()
+ out, err = self.run_bzr(['revno', self.get_url('branch')])
+ # This figure represent the amount of work to perform this use case. It
+ # is entirely ok to reduce this number if a test fails due to rpc_count
+ # being too low. If rpc_count increases, more network roundtrips have
+ # become necessary for this use case. Please do not adjust this number
+ # upwards without agreement from bzr's network support maintainers.
+ self.assertThat(self.hpss_calls, ContainsNoVfsCalls)
+ self.assertLength(1, self.hpss_connections)
+ self.assertLength(6, self.hpss_calls)
+
+ def test_simple_branch_revno_lookup(self):
+ self.setup_smart_server_with_call_log()
+ t = self.make_branch_and_tree('branch')
+ self.build_tree_contents([('branch/foo', 'thecontents')])
+ t.add("foo")
+ revid1 = t.commit("message")
+ revid2 = t.commit("message")
+ self.reset_smart_call_log()
+ out, err = self.run_bzr(['revno', '-rrevid:' + revid1,
+ self.get_url('branch')])
+ # This figure represent the amount of work to perform this use case. It
+ # is entirely ok to reduce this number if a test fails due to rpc_count
+ # being too low. If rpc_count increases, more network roundtrips have
+ # become necessary for this use case. Please do not adjust this number
+ # upwards without agreement from bzr's network support maintainers.
+ self.assertLength(5, self.hpss_calls)
+ self.assertLength(1, self.hpss_connections)
+ self.assertThat(self.hpss_calls, ContainsNoVfsCalls)
diff --git a/bzrlib/tests/blackbox/test_rmbranch.py b/bzrlib/tests/blackbox/test_rmbranch.py
new file mode 100644
index 0000000..c943f59
--- /dev/null
+++ b/bzrlib/tests/blackbox/test_rmbranch.py
@@ -0,0 +1,121 @@
+# Copyright (C) 2010 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+
+"""Black-box tests for bzr rmbranch."""
+
+from bzrlib import (
+ controldir,
+ )
+from bzrlib.tests import (
+ TestCaseWithTransport,
+ )
+from bzrlib.tests.matchers import ContainsNoVfsCalls
+
+
+class TestRemoveBranch(TestCaseWithTransport):
+
+ def example_tree(self, path='.', format=None):
+ tree = self.make_branch_and_tree(path, format=format)
+ self.build_tree_contents([(path + '/hello', 'foo')])
+ tree.add('hello')
+ tree.commit(message='setup')
+ self.build_tree_contents([(path + '/goodbye', 'baz')])
+ tree.add('goodbye')
+ tree.commit(message='setup')
+ return tree
+
+ def test_remove_local(self):
+ # Remove a local branch.
+ tree = self.example_tree('a')
+ self.run_bzr_error(['Branch is active. Use --force to remove it.\n'],
+ 'rmbranch a')
+ self.run_bzr('rmbranch --force a')
+ dir = controldir.ControlDir.open('a')
+ self.assertFalse(dir.has_branch())
+ self.assertPathExists('a/hello')
+ self.assertPathExists('a/goodbye')
+
+ def test_no_branch(self):
+ # No branch in the current directory.
+ self.make_repository('a')
+ self.run_bzr_error(['Not a branch'],
+ 'rmbranch a')
+
+ def test_no_tree(self):
+ # removing the active branch is possible if there is no tree
+ tree = self.example_tree('a')
+ tree.bzrdir.destroy_workingtree()
+ self.run_bzr('rmbranch', working_dir='a')
+ dir = controldir.ControlDir.open('a')
+ self.assertFalse(dir.has_branch())
+
+ def test_no_arg(self):
+ # location argument defaults to current directory
+ self.example_tree('a')
+ self.run_bzr_error(['Branch is active. Use --force to remove it.\n'],
+ 'rmbranch a')
+ self.run_bzr('rmbranch --force', working_dir='a')
+ dir = controldir.ControlDir.open('a')
+ self.assertFalse(dir.has_branch())
+
+ def test_remove_colo(self):
+ # Remove a colocated branch.
+ tree = self.example_tree('a')
+ tree.bzrdir.create_branch(name="otherbranch")
+ self.assertTrue(tree.bzrdir.has_branch('otherbranch'))
+ self.run_bzr('rmbranch %s,branch=otherbranch' % tree.bzrdir.user_url)
+ dir = controldir.ControlDir.open('a')
+ self.assertFalse(dir.has_branch('otherbranch'))
+ self.assertTrue(dir.has_branch())
+
+ def test_remove_colo_directory(self):
+ # Remove a colocated branch.
+ tree = self.example_tree('a')
+ tree.bzrdir.create_branch(name="otherbranch")
+ self.assertTrue(tree.bzrdir.has_branch('otherbranch'))
+ self.run_bzr('rmbranch otherbranch -d %s' % tree.bzrdir.user_url)
+ dir = controldir.ControlDir.open('a')
+ self.assertFalse(dir.has_branch('otherbranch'))
+ self.assertTrue(dir.has_branch())
+
+ def test_remove_active_colo_branch(self):
+ # Remove a colocated branch.
+ dir = self.make_repository('a').bzrdir
+ branch = dir.create_branch('otherbranch')
+ branch.create_checkout('a')
+ self.run_bzr_error(['Branch is active. Use --force to remove it.\n'],
+ 'rmbranch otherbranch -d %s' % branch.bzrdir.user_url)
+ self.assertTrue(dir.has_branch('otherbranch'))
+ self.run_bzr('rmbranch --force otherbranch -d %s' % branch.bzrdir.user_url)
+ self.assertFalse(dir.has_branch('otherbranch'))
+
+
+class TestSmartServerRemoveBranch(TestCaseWithTransport):
+
+ def test_simple_remove_branch(self):
+ self.setup_smart_server_with_call_log()
+ self.make_branch('branch')
+ self.reset_smart_call_log()
+ out, err = self.run_bzr(['rmbranch', self.get_url('branch')])
+ # This figure represent the amount of work to perform this use case. It
+ # is entirely ok to reduce this number if a test fails due to rpc_count
+ # being too low. If rpc_count increases, more network roundtrips have
+ # become necessary for this use case. Please do not adjust this number
+ # upwards without agreement from bzr's network support maintainers.
+ self.assertLength(5, self.hpss_calls)
+ self.assertLength(1, self.hpss_connections)
+ self.assertThat(self.hpss_calls, ContainsNoVfsCalls)
diff --git a/bzrlib/tests/blackbox/test_script.py b/bzrlib/tests/blackbox/test_script.py
new file mode 100644
index 0000000..b015837
--- /dev/null
+++ b/bzrlib/tests/blackbox/test_script.py
@@ -0,0 +1,72 @@
+# Copyright (C) 2010 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""Blacbox tests for the test-script command."""
+
+import os
+
+from bzrlib import (
+ tests,
+ )
+from bzrlib.tests import (
+ script,
+ )
+
+
+class TestTestScript(tests.TestCaseInTempDir):
+
+ def test_unknnown_file(self):
+ self.run_bzr(['test-script', 'I-do-not-exist'], retcode=3)
+
+ def test_empty_file(self):
+ self.build_tree_contents([('script', '')])
+ out, err = self.run_bzr(['test-script', 'script'])
+ out_lines = out.splitlines()
+ self.assertStartsWith(out_lines[-3], 'Ran 1 test in ')
+ self.assertEquals('OK', out_lines[-1])
+ self.assertEquals('', err)
+
+ def test_simple_file(self):
+ self.build_tree_contents([('script', '''
+$ echo hello world
+hello world
+''')])
+ out, err = self.run_bzr(['test-script', 'script'])
+ out_lines = out.splitlines()
+ self.assertStartsWith(out_lines[-3], 'Ran 1 test in ')
+ self.assertEquals('OK', out_lines[-1])
+ self.assertEquals('', err)
+
+ def test_null_output(self):
+ self.build_tree_contents([('script', '''
+$ echo hello world
+''')])
+ out, err = self.run_bzr(['test-script', 'script', '--null-output'])
+ out_lines = out.splitlines()
+ self.assertStartsWith(out_lines[-3], 'Ran 1 test in ')
+ self.assertEquals('OK', out_lines[-1])
+ self.assertEquals('', err)
+
+ def test_failing_script(self):
+ self.build_tree_contents([('script', '''
+$ echo hello foo
+hello bar
+''')])
+ out, err = self.run_bzr(['test-script', 'script'], retcode=1)
+ out_lines = out.splitlines()
+ self.assertStartsWith(out_lines[-3], 'Ran 1 test in ')
+ self.assertEquals('FAILED (failures=1)', out_lines[-1])
+ self.assertEquals('', err)
diff --git a/bzrlib/tests/blackbox/test_selftest.py b/bzrlib/tests/blackbox/test_selftest.py
new file mode 100644
index 0000000..343de62
--- /dev/null
+++ b/bzrlib/tests/blackbox/test_selftest.py
@@ -0,0 +1,161 @@
+# Copyright (C) 2006-2010 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""UI tests for the test framework."""
+
+import os
+
+from bzrlib import (
+ tests,
+ )
+from bzrlib.tests import (
+ features,
+ )
+from bzrlib.transport import memory
+
+class SelfTestPatch:
+
+ def get_params_passed_to_core(self, cmdline):
+ params = []
+ def selftest(*args, **kwargs):
+ """Capture the arguments selftest was run with."""
+ params.append((args, kwargs))
+ return True
+ # Yes this prevents using threads to run the test suite in parallel,
+ # however we don't have a clean dependency injector for commands,
+ # and even if we did - we'd still be testing that the glue is wired
+ # up correctly. XXX: TODO: Solve this testing problem.
+ original_selftest = tests.selftest
+ tests.selftest = selftest
+ try:
+ self.run_bzr(cmdline)
+ return params[0]
+ finally:
+ tests.selftest = original_selftest
+
+
+class TestOptions(tests.TestCase, SelfTestPatch):
+
+ def test_load_list(self):
+ params = self.get_params_passed_to_core('selftest --load-list foo')
+ self.assertEqual('foo', params[1]['load_list'])
+
+ def test_transport_set_to_sftp(self):
+ # Test that we can pass a transport to the selftest core - sftp
+ # version.
+ self.requireFeature(features.paramiko)
+ from bzrlib.tests import stub_sftp
+ params = self.get_params_passed_to_core('selftest --transport=sftp')
+ self.assertEqual(stub_sftp.SFTPAbsoluteServer,
+ params[1]["transport"])
+
+ def test_transport_set_to_memory(self):
+ # Test that we can pass a transport to the selftest core - memory
+ # version.
+ params = self.get_params_passed_to_core('selftest --transport=memory')
+ self.assertEqual(memory.MemoryServer, params[1]["transport"])
+
+ def test_parameters_passed_to_core(self):
+ params = self.get_params_passed_to_core('selftest --list-only')
+ self.assertTrue("list_only" in params[1])
+ params = self.get_params_passed_to_core('selftest --list-only selftest')
+ self.assertTrue("list_only" in params[1])
+ params = self.get_params_passed_to_core(['selftest', '--list-only',
+ '--exclude', 'selftest'])
+ self.assertTrue("list_only" in params[1])
+ params = self.get_params_passed_to_core(['selftest', '--list-only',
+ 'selftest', '--randomize', 'now'])
+ self.assertSubset(["list_only", "random_seed"], params[1])
+
+ def test_starting_with(self):
+ params = self.get_params_passed_to_core('selftest --starting-with foo')
+ self.assertEqual(['foo'], params[1]['starting_with'])
+
+ def test_starting_with_multiple_argument(self):
+ params = self.get_params_passed_to_core(
+ 'selftest --starting-with foo --starting-with bar')
+ self.assertEqual(['foo', 'bar'], params[1]['starting_with'])
+
+ def test_subunit(self):
+ self.requireFeature(features.subunit)
+ params = self.get_params_passed_to_core('selftest --subunit')
+ self.assertEqual(tests.SubUnitBzrRunner, params[1]['runner_class'])
+
+ def _parse_test_list(self, lines, newlines_in_header=0):
+ "Parse a list of lines into a tuple of 3 lists (header,body,footer)."
+ in_header = newlines_in_header != 0
+ in_footer = False
+ header = []
+ body = []
+ footer = []
+ header_newlines_found = 0
+ for line in lines:
+ if in_header:
+ if line == '':
+ header_newlines_found += 1
+ if header_newlines_found >= newlines_in_header:
+ in_header = False
+ continue
+ header.append(line)
+ elif not in_footer:
+ if line.startswith('-------'):
+ in_footer = True
+ else:
+ body.append(line)
+ else:
+ footer.append(line)
+ # If the last body line is blank, drop it off the list
+ if len(body) > 0 and body[-1] == '':
+ body.pop()
+ return (header,body,footer)
+
+ def test_list_only(self):
+ # check that bzr selftest --list-only outputs no ui noise
+ def selftest(*args, **kwargs):
+ """Capture the arguments selftest was run with."""
+ return True
+ def outputs_nothing(cmdline):
+ out,err = self.run_bzr(cmdline)
+ (header,body,footer) = self._parse_test_list(out.splitlines())
+ num_tests = len(body)
+ self.assertLength(0, header)
+ self.assertLength(0, footer)
+ self.assertEqual('', err)
+ # Yes this prevents using threads to run the test suite in parallel,
+ # however we don't have a clean dependency injector for commands,
+ # and even if we did - we'd still be testing that the glue is wired
+ # up correctly. XXX: TODO: Solve this testing problem.
+ original_selftest = tests.selftest
+ tests.selftest = selftest
+ try:
+ outputs_nothing('selftest --list-only')
+ outputs_nothing('selftest --list-only selftest')
+ outputs_nothing(['selftest', '--list-only', '--exclude', 'selftest'])
+ finally:
+ tests.selftest = original_selftest
+
+ def test_lsprof_tests(self):
+ params = self.get_params_passed_to_core('selftest --lsprof-tests')
+ self.assertEqual(True, params[1]["lsprof_tests"])
+
+ def test_parallel_fork_unsupported(self):
+ if getattr(os, "fork", None) is not None:
+ self.addCleanup(setattr, os, "fork", os.fork)
+ del os.fork
+ out, err = self.run_bzr(["selftest", "--parallel=fork", "-s", "bt.x"],
+ retcode=3)
+ self.assertIn("platform does not support fork", err)
+ self.assertFalse(out)
diff --git a/bzrlib/tests/blackbox/test_send.py b/bzrlib/tests/blackbox/test_send.py
new file mode 100644
index 0000000..f3d7512
--- /dev/null
+++ b/bzrlib/tests/blackbox/test_send.py
@@ -0,0 +1,466 @@
+# Copyright (C) 2006-2012 Canonical Ltd
+# Authors: Aaron Bentley
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+
+from cStringIO import StringIO
+
+from bzrlib import (
+ branch,
+ merge_directive,
+ tests,
+ )
+from bzrlib.controldir import ControlDir
+from bzrlib.bundle import serializer
+from bzrlib.transport import memory
+from bzrlib.tests import (
+ scenarios,
+ )
+from bzrlib.tests.matchers import ContainsNoVfsCalls
+
+
+load_tests = scenarios.load_tests_apply_scenarios
+
+
+class TestSendMixin(object):
+
+ _default_command = ['send', '-o-']
+ _default_wd = 'branch'
+
+ def run_send(self, args, cmd=None, rc=0, wd=None, err_re=None):
+ if cmd is None: cmd = self._default_command
+ if wd is None: wd = self._default_wd
+ if err_re is None: err_re = []
+ return self.run_bzr(cmd + args, retcode=rc,
+ working_dir=wd,
+ error_regexes=err_re)
+
+ def get_MD(self, args, cmd=None, wd='branch'):
+ out = StringIO(self.run_send(args, cmd=cmd, wd=wd)[0])
+ return merge_directive.MergeDirective.from_lines(out)
+
+ def assertBundleContains(self, revs, args, cmd=None, wd='branch'):
+ md = self.get_MD(args, cmd=cmd, wd=wd)
+ br = serializer.read_bundle(StringIO(md.get_raw_bundle()))
+ self.assertEqual(set(revs), set(r.revision_id for r in br.revisions))
+
+
+class TestSend(tests.TestCaseWithTransport, TestSendMixin):
+
+ def setUp(self):
+ super(TestSend, self).setUp()
+ grandparent_tree = ControlDir.create_standalone_workingtree(
+ 'grandparent')
+ self.build_tree_contents([('grandparent/file1', 'grandparent')])
+ grandparent_tree.add('file1')
+ grandparent_tree.commit('initial commit', rev_id='rev1')
+
+ parent_bzrdir = grandparent_tree.bzrdir.sprout('parent')
+ parent_tree = parent_bzrdir.open_workingtree()
+ parent_tree.commit('next commit', rev_id='rev2')
+
+ branch_tree = parent_tree.bzrdir.sprout('branch').open_workingtree()
+ self.build_tree_contents([('branch/file1', 'branch')])
+ branch_tree.commit('last commit', rev_id='rev3')
+
+ def assertFormatIs(self, fmt_string, md):
+ self.assertEqual(fmt_string, md.get_raw_bundle().splitlines()[0])
+
+ def test_uses_parent(self):
+ """Parent location is used as a basis by default"""
+ errmsg = self.run_send([], rc=3, wd='grandparent')[1]
+ self.assertContainsRe(errmsg, 'No submit branch known or specified')
+ stdout, stderr = self.run_send([])
+ self.assertEqual(stderr.count('Using saved parent location'), 1)
+ self.assertBundleContains(['rev3'], [])
+
+ def test_bundle(self):
+ """Bundle works like send, except -o is not required"""
+ errmsg = self.run_send([], cmd=['bundle'], rc=3, wd='grandparent')[1]
+ self.assertContainsRe(errmsg, 'No submit branch known or specified')
+ stdout, stderr = self.run_send([], cmd=['bundle'])
+ self.assertEqual(stderr.count('Using saved parent location'), 1)
+ self.assertBundleContains(['rev3'], [], cmd=['bundle'])
+
+ def test_uses_submit(self):
+ """Submit location can be used and set"""
+ self.assertBundleContains(['rev3'], [])
+ self.assertBundleContains(['rev3', 'rev2'], ['../grandparent'])
+ # submit location should be auto-remembered
+ self.assertBundleContains(['rev3', 'rev2'], [])
+
+ self.run_send(['../parent'])
+ # We still point to ../grandparent
+ self.assertBundleContains(['rev3', 'rev2'], [])
+ # Remember parent now
+ self.run_send(['../parent', '--remember'])
+ # Now we point to parent
+ self.assertBundleContains(['rev3'], [])
+
+ err = self.run_send(['--remember'], rc=3)[1]
+ self.assertContainsRe(err,
+ '--remember requires a branch to be specified.')
+
+ def test_revision_branch_interaction(self):
+ self.assertBundleContains(['rev3', 'rev2'], ['../grandparent'])
+ self.assertBundleContains(['rev2'], ['../grandparent', '-r-2'])
+ self.assertBundleContains(['rev3', 'rev2'],
+ ['../grandparent', '-r-2..-1'])
+ md = self.get_MD(['-r-2..-1'])
+ self.assertEqual('rev2', md.base_revision_id)
+ self.assertEqual('rev3', md.revision_id)
+
+ def test_output(self):
+ # check output for consistency
+ # win32 stdout converts LF to CRLF,
+ # which would break patch-based bundles
+ self.assertBundleContains(['rev3'], [])
+
+ def test_no_common_ancestor(self):
+ foo = self.make_branch_and_tree('foo')
+ foo.commit('rev a')
+ bar = self.make_branch_and_tree('bar')
+ bar.commit('rev b')
+ self.run_send(['--from', 'foo', '../bar'], wd='foo')
+
+ def test_content_options(self):
+ """--no-patch and --no-bundle should work and be independant"""
+ md = self.get_MD([])
+ self.assertIsNot(None, md.bundle)
+ self.assertIsNot(None, md.patch)
+
+ md = self.get_MD(['--format=0.9'])
+ self.assertIsNot(None, md.bundle)
+ self.assertIsNot(None, md.patch)
+
+ md = self.get_MD(['--no-patch'])
+ self.assertIsNot(None, md.bundle)
+ self.assertIs(None, md.patch)
+ self.run_bzr_error(['Format 0.9 does not permit bundle with no patch'],
+ ['send', '--no-patch', '--format=0.9', '-o-'],
+ working_dir='branch')
+ md = self.get_MD(['--no-bundle', '.', '.'])
+ self.assertIs(None, md.bundle)
+ self.assertIsNot(None, md.patch)
+
+ md = self.get_MD(['--no-bundle', '--format=0.9', '../parent',
+ '.'])
+ self.assertIs(None, md.bundle)
+ self.assertIsNot(None, md.patch)
+
+ md = self.get_MD(['--no-bundle', '--no-patch', '.', '.'])
+ self.assertIs(None, md.bundle)
+ self.assertIs(None, md.patch)
+
+ md = self.get_MD(['--no-bundle', '--no-patch', '--format=0.9',
+ '../parent', '.'])
+ self.assertIs(None, md.bundle)
+ self.assertIs(None, md.patch)
+
+ def test_from_option(self):
+ self.run_bzr('send', retcode=3)
+ md = self.get_MD(['--from', 'branch'])
+ self.assertEqual('rev3', md.revision_id)
+ md = self.get_MD(['-f', 'branch'])
+ self.assertEqual('rev3', md.revision_id)
+
+ def test_output_option(self):
+ stdout = self.run_bzr('send -f branch --output file1')[0]
+ self.assertEqual('', stdout)
+ md_file = open('file1', 'rb')
+ self.addCleanup(md_file.close)
+ self.assertContainsRe(md_file.read(), 'rev3')
+ stdout = self.run_bzr('send -f branch --output -')[0]
+ self.assertContainsRe(stdout, 'rev3')
+
+ def test_note_revisions(self):
+ stderr = self.run_send([])[1]
+ self.assertEndsWith(stderr, '\nBundling 1 revision.\n')
+
+ def test_mailto_option(self):
+ b = branch.Branch.open('branch')
+ b.get_config_stack().set('mail_client', 'editor')
+ self.run_bzr_error(
+ ('No mail-to address \\(--mail-to\\) or output \\(-o\\) specified',
+ ), 'send -f branch')
+ b.get_config_stack().set('mail_client', 'bogus')
+ self.run_send([])
+ self.run_bzr_error(('Bad value "bogus" for option "mail_client"',),
+ 'send -f branch --mail-to jrandom@example.org')
+ b.get_config_stack().set('submit_to', 'jrandom@example.org')
+ self.run_bzr_error(('Bad value "bogus" for option "mail_client"',),
+ 'send -f branch')
+
+ def test_mailto_child_option(self):
+ """Make sure that child_submit_to is used."""
+ b = branch.Branch.open('branch')
+ b.get_config_stack().set('mail_client', 'bogus')
+ parent = branch.Branch.open('parent')
+ parent.get_config_stack().set('child_submit_to', 'somebody@example.org')
+ self.run_bzr_error(('Bad value "bogus" for option "mail_client"',),
+ 'send -f branch')
+
+ def test_format(self):
+ md = self.get_MD(['--format=4'])
+ self.assertIs(merge_directive.MergeDirective2, md.__class__)
+ self.assertFormatIs('# Bazaar revision bundle v4', md)
+
+ md = self.get_MD(['--format=0.9'])
+ self.assertFormatIs('# Bazaar revision bundle v0.9', md)
+
+ md = self.get_MD(['--format=0.9'], cmd=['bundle'])
+ self.assertFormatIs('# Bazaar revision bundle v0.9', md)
+ self.assertIs(merge_directive.MergeDirective, md.__class__)
+
+ self.run_bzr_error(['Bad value .* for option .format.'],
+ 'send -f branch -o- --format=0.999')[0]
+
+ def test_format_child_option(self):
+ br = branch.Branch.open('parent')
+ conf = br.get_config_stack()
+ conf.set('child_submit_format', '4')
+ md = self.get_MD([])
+ self.assertIs(merge_directive.MergeDirective2, md.__class__)
+
+ conf.set('child_submit_format', '0.9')
+ md = self.get_MD([])
+ self.assertFormatIs('# Bazaar revision bundle v0.9', md)
+
+ md = self.get_MD([], cmd=['bundle'])
+ self.assertFormatIs('# Bazaar revision bundle v0.9', md)
+ self.assertIs(merge_directive.MergeDirective, md.__class__)
+
+ conf.set('child_submit_format', '0.999')
+ self.run_bzr_error(["No such send format '0.999'"],
+ 'send -f branch -o-')[0]
+
+ def test_message_option(self):
+ self.run_bzr('send', retcode=3)
+ md = self.get_MD([])
+ self.assertIs(None, md.message)
+ md = self.get_MD(['-m', 'my message'])
+ self.assertEqual('my message', md.message)
+
+ def test_omitted_revision(self):
+ md = self.get_MD(['-r-2..'])
+ self.assertEqual('rev2', md.base_revision_id)
+ self.assertEqual('rev3', md.revision_id)
+ md = self.get_MD(['-r..3', '--from', 'branch', 'grandparent'], wd='.')
+ self.assertEqual('rev1', md.base_revision_id)
+ self.assertEqual('rev3', md.revision_id)
+
+ def test_nonexistant_branch(self):
+ self.vfs_transport_factory = memory.MemoryServer
+ location = self.get_url('absentdir/')
+ out, err = self.run_bzr(["send", "--from", location], retcode=3)
+ self.assertEqual(out, '')
+ self.assertEqual(err, 'bzr: ERROR: Not a branch: "%s".\n' % location)
+
+
+class TestSendStrictMixin(TestSendMixin):
+
+ def make_parent_and_local_branches(self):
+ # Create a 'parent' branch as the base
+ self.parent_tree = ControlDir.create_standalone_workingtree('parent')
+ self.build_tree_contents([('parent/file', 'parent')])
+ self.parent_tree.add('file')
+ self.parent_tree.commit('first commit', rev_id='parent')
+ # Branch 'local' from parent and do a change
+ local_bzrdir = self.parent_tree.bzrdir.sprout('local')
+ self.local_tree = local_bzrdir.open_workingtree()
+ self.build_tree_contents([('local/file', 'local')])
+ self.local_tree.commit('second commit', rev_id='local')
+
+ _default_command = ['send', '-o-', '../parent']
+ _default_wd = 'local'
+ _default_sent_revs = ['local']
+ _default_errors = ['Working tree ".*/local/" has uncommitted '
+ 'changes \(See bzr status\)\.',]
+ _default_additional_error = 'Use --no-strict to force the send.\n'
+ _default_additional_warning = 'Uncommitted changes will not be sent.'
+
+ def set_config_send_strict(self, value):
+ br = branch.Branch.open('local')
+ br.get_config_stack().set('send_strict', value)
+
+ def assertSendFails(self, args):
+ out, err = self.run_send(args, rc=3, err_re=self._default_errors)
+ self.assertContainsRe(err, self._default_additional_error)
+
+ def assertSendSucceeds(self, args, revs=None, with_warning=False):
+ if with_warning:
+ err_re = self._default_errors
+ else:
+ err_re = []
+ if revs is None:
+ revs = self._default_sent_revs
+ out, err = self.run_send(args, err_re=err_re)
+ if len(revs) == 1:
+ bundling_revs = 'Bundling %d revision.\n'% len(revs)
+ else:
+ bundling_revs = 'Bundling %d revisions.\n' % len(revs)
+ if with_warning:
+ self.assertContainsRe(err, self._default_additional_warning)
+ self.assertEndsWith(err, bundling_revs)
+ else:
+ self.assertEquals(bundling_revs, err)
+ md = merge_directive.MergeDirective.from_lines(StringIO(out))
+ self.assertEqual('parent', md.base_revision_id)
+ br = serializer.read_bundle(StringIO(md.get_raw_bundle()))
+ self.assertEqual(set(revs), set(r.revision_id for r in br.revisions))
+
+
+class TestSendStrictWithoutChanges(tests.TestCaseWithTransport,
+ TestSendStrictMixin):
+
+ def setUp(self):
+ super(TestSendStrictWithoutChanges, self).setUp()
+ self.make_parent_and_local_branches()
+
+ def test_send_without_workingtree(self):
+ ControlDir.open("local").destroy_workingtree()
+ self.assertSendSucceeds([])
+
+ def test_send_default(self):
+ self.assertSendSucceeds([])
+
+ def test_send_strict(self):
+ self.assertSendSucceeds(['--strict'])
+
+ def test_send_no_strict(self):
+ self.assertSendSucceeds(['--no-strict'])
+
+ def test_send_config_var_strict(self):
+ self.set_config_send_strict('true')
+ self.assertSendSucceeds([])
+
+ def test_send_config_var_no_strict(self):
+ self.set_config_send_strict('false')
+ self.assertSendSucceeds([])
+
+
+class TestSendStrictWithChanges(tests.TestCaseWithTransport,
+ TestSendStrictMixin):
+
+ # These are textually the same as test_push.strict_push_change_scenarios,
+ # but since the functions are reimplemented here, the definitions are left
+ # here too.
+ scenarios = [
+ ('uncommitted',
+ dict(_changes_type='_uncommitted_changes')),
+ ('pending_merges',
+ dict(_changes_type='_pending_merges')),
+ ('out-of-sync-trees',
+ dict(_changes_type='_out_of_sync_trees')),
+ ]
+
+ _changes_type = None # Set by load_tests
+
+ def setUp(self):
+ super(TestSendStrictWithChanges, self).setUp()
+ # load tests set _changes_types to the name of the method we want to
+ # call now
+ do_changes_func = getattr(self, self._changes_type)
+ do_changes_func()
+
+ def _uncommitted_changes(self):
+ self.make_parent_and_local_branches()
+ # Make a change without committing it
+ self.build_tree_contents([('local/file', 'modified')])
+
+ def _pending_merges(self):
+ self.make_parent_and_local_branches()
+ # Create 'other' branch containing a new file
+ other_bzrdir = self.parent_tree.bzrdir.sprout('other')
+ other_tree = other_bzrdir.open_workingtree()
+ self.build_tree_contents([('other/other-file', 'other')])
+ other_tree.add('other-file')
+ other_tree.commit('other commit', rev_id='other')
+ # Merge and revert, leaving a pending merge
+ self.local_tree.merge_from_branch(other_tree.branch)
+ self.local_tree.revert(filenames=['other-file'], backups=False)
+
+ def _out_of_sync_trees(self):
+ self.make_parent_and_local_branches()
+ self.run_bzr(['checkout', '--lightweight', 'local', 'checkout'])
+ # Make a change and commit it
+ self.build_tree_contents([('local/file', 'modified in local')])
+ self.local_tree.commit('modify file', rev_id='modified-in-local')
+ # Exercise commands from the checkout directory
+ self._default_wd = 'checkout'
+ self._default_errors = ["Working tree is out of date, please run"
+ " 'bzr update'\.",]
+ self._default_sent_revs = ['modified-in-local', 'local']
+
+ def test_send_default(self):
+ self.assertSendSucceeds([], with_warning=True)
+
+ def test_send_with_revision(self):
+ self.assertSendSucceeds(['-r', 'revid:local'], revs=['local'])
+
+ def test_send_no_strict(self):
+ self.assertSendSucceeds(['--no-strict'])
+
+ def test_send_strict_with_changes(self):
+ self.assertSendFails(['--strict'])
+
+ def test_send_respect_config_var_strict(self):
+ self.set_config_send_strict('true')
+ self.assertSendFails([])
+ self.assertSendSucceeds(['--no-strict'])
+
+ def test_send_bogus_config_var_ignored(self):
+ self.set_config_send_strict("I'm unsure")
+ self.assertSendSucceeds([], with_warning=True)
+
+ def test_send_no_strict_command_line_override_config(self):
+ self.set_config_send_strict('true')
+ self.assertSendFails([])
+ self.assertSendSucceeds(['--no-strict'])
+
+ def test_send_strict_command_line_override_config(self):
+ self.set_config_send_strict('false')
+ self.assertSendSucceeds([])
+ self.assertSendFails(['--strict'])
+
+
+class TestBundleStrictWithoutChanges(TestSendStrictWithoutChanges):
+
+ _default_command = ['bundle-revisions', '../parent']
+
+
+class TestSmartServerSend(tests.TestCaseWithTransport):
+
+ def test_send(self):
+ self.setup_smart_server_with_call_log()
+ t = self.make_branch_and_tree('branch')
+ self.build_tree_contents([('branch/foo', 'thecontents')])
+ t.add("foo")
+ t.commit("message")
+ local = t.bzrdir.sprout('local-branch').open_workingtree()
+ self.build_tree_contents([('branch/foo', 'thenewcontents')])
+ local.commit("anothermessage")
+ self.reset_smart_call_log()
+ out, err = self.run_bzr(
+ ['send', '-o', 'x.diff', self.get_url('branch')], working_dir='local-branch')
+ # This figure represent the amount of work to perform this use case. It
+ # is entirely ok to reduce this number if a test fails due to rpc_count
+ # being too low. If rpc_count increases, more network roundtrips have
+ # become necessary for this use case. Please do not adjust this number
+ # upwards without agreement from bzr's network support maintainers.
+ self.assertLength(7, self.hpss_calls)
+ self.assertLength(1, self.hpss_connections)
+ self.assertThat(self.hpss_calls, ContainsNoVfsCalls)
diff --git a/bzrlib/tests/blackbox/test_serve.py b/bzrlib/tests/blackbox/test_serve.py
new file mode 100644
index 0000000..c709e4e
--- /dev/null
+++ b/bzrlib/tests/blackbox/test_serve.py
@@ -0,0 +1,450 @@
+# Copyright (C) 2006-2011 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+
+"""Tests of the bzr serve command."""
+
+import os
+import signal
+import sys
+import thread
+import threading
+
+from bzrlib import (
+ builtins,
+ config,
+ errors,
+ osutils,
+ revision as _mod_revision,
+ trace,
+ transport,
+ urlutils,
+ )
+from bzrlib.branch import Branch
+from bzrlib.controldir import ControlDir
+from bzrlib.smart import client, medium
+from bzrlib.smart.server import (
+ BzrServerFactory,
+ SmartTCPServer,
+ )
+from bzrlib.tests import (
+ TestCaseWithMemoryTransport,
+ TestCaseWithTransport,
+ )
+from bzrlib.transport import remote
+
+
+class TestBzrServeBase(TestCaseWithTransport):
+
+ def run_bzr_serve_then_func(self, serve_args, retcode=0, func=None,
+ *func_args, **func_kwargs):
+ """Run 'bzr serve', and run the given func in a thread once the server
+ has started.
+
+ When 'func' terminates, the server will be terminated too.
+
+ Returns stdout and stderr.
+ """
+ def on_server_start_thread(tcp_server):
+ """This runs concurrently with the server thread.
+
+ The server is interrupted as soon as ``func`` finishes, even if an
+ exception is encountered.
+ """
+ try:
+ # Run func if set
+ self.tcp_server = tcp_server
+ if func is not None:
+ try:
+ func(*func_args, **func_kwargs)
+ except Exception, e:
+ # Log errors to make some test failures a little less
+ # mysterious.
+ trace.mutter('func broke: %r', e)
+ finally:
+ # Then stop the server
+ trace.mutter('interrupting...')
+ thread.interrupt_main()
+ # When the hook is fired, it just starts ``on_server_start_thread`` and
+ # return
+ def on_server_start(backing_urls, tcp_server):
+ t = threading.Thread(
+ target=on_server_start_thread, args=(tcp_server,))
+ t.start()
+ # install hook
+ SmartTCPServer.hooks.install_named_hook(
+ 'server_started_ex', on_server_start,
+ 'run_bzr_serve_then_func hook')
+ # It seesm thread.interrupt_main() will not raise KeyboardInterrupt
+ # until after socket.accept returns. So we set the timeout low to make
+ # the test faster.
+ self.overrideAttr(SmartTCPServer, '_ACCEPT_TIMEOUT', 0.1)
+ # start a TCP server
+ try:
+ out, err = self.run_bzr(['serve'] + list(serve_args),
+ retcode=retcode)
+ except KeyboardInterrupt, e:
+ out, err = e.args
+ return out, err
+
+
+class TestBzrServe(TestBzrServeBase):
+
+ def setUp(self):
+ super(TestBzrServe, self).setUp()
+ self.disable_missing_extensions_warning()
+
+ def test_server_exception_with_hook(self):
+ """Catch exception from the server in the server_exception hook.
+
+ We use ``run_bzr_serve_then_func`` without a ``func`` so the server
+ will receive a KeyboardInterrupt exception we want to catch.
+ """
+ def hook(exception):
+ if exception[0] is KeyboardInterrupt:
+ sys.stderr.write('catching KeyboardInterrupt\n')
+ return True
+ else:
+ return False
+ SmartTCPServer.hooks.install_named_hook(
+ 'server_exception', hook,
+ 'test_server_except_hook hook')
+ args = ['--listen', 'localhost', '--port', '0', '--quiet']
+ out, err = self.run_bzr_serve_then_func(args, retcode=0)
+ self.assertEqual('catching KeyboardInterrupt\n', err)
+
+ def test_server_exception_no_hook(self):
+ """test exception without hook returns error"""
+ args = []
+ out, err = self.run_bzr_serve_then_func(args, retcode=3)
+
+ def assertInetServerShutsdownCleanly(self, process):
+ """Shutdown the server process looking for errors."""
+ # Shutdown the server: the server should shut down when it cannot read
+ # from stdin anymore.
+ process.stdin.close()
+ # Hide stdin from the subprocess module, so it won't fail to close it.
+ process.stdin = None
+ result = self.finish_bzr_subprocess(process)
+ self.assertEqual('', result[0])
+ self.assertEqual('', result[1])
+
+ def assertServerFinishesCleanly(self, process):
+ """Shutdown the bzr serve instance process looking for errors."""
+ # Shutdown the server
+ result = self.finish_bzr_subprocess(process, retcode=3,
+ send_signal=signal.SIGINT)
+ self.assertEqual('', result[0])
+ self.assertEqual('bzr: interrupted\n', result[1])
+
+ def make_read_requests(self, branch):
+ """Do some read only requests."""
+ branch.lock_read()
+ try:
+ branch.repository.all_revision_ids()
+ self.assertEqual(_mod_revision.NULL_REVISION,
+ _mod_revision.ensure_null(branch.last_revision()))
+ finally:
+ branch.unlock()
+
+ def start_server_inet(self, extra_options=()):
+ """Start a bzr server subprocess using the --inet option.
+
+ :param extra_options: extra options to give the server.
+ :return: a tuple with the bzr process handle for passing to
+ finish_bzr_subprocess, a client for the server, and a transport.
+ """
+ # Serve from the current directory
+ args = ['serve', '--inet']
+ args.extend(extra_options)
+ process = self.start_bzr_subprocess(args)
+
+ # Connect to the server
+ # We use this url because while this is no valid URL to connect to this
+ # server instance, the transport needs a URL.
+ url = 'bzr://localhost/'
+ self.permit_url(url)
+ client_medium = medium.SmartSimplePipesClientMedium(
+ process.stdout, process.stdin, url)
+ transport = remote.RemoteTransport(url, medium=client_medium)
+ return process, transport
+
+ def start_server_port(self, extra_options=()):
+ """Start a bzr server subprocess.
+
+ :param extra_options: extra options to give the server.
+ :return: a tuple with the bzr process handle for passing to
+ finish_bzr_subprocess, and the base url for the server.
+ """
+ # Serve from the current directory
+ args = ['serve', '--listen', 'localhost', '--port', '0']
+ args.extend(extra_options)
+ process = self.start_bzr_subprocess(args, skip_if_plan_to_signal=True)
+ port_line = process.stderr.readline()
+ prefix = 'listening on port: '
+ self.assertStartsWith(port_line, prefix)
+ port = int(port_line[len(prefix):])
+ url = 'bzr://localhost:%d/' % port
+ self.permit_url(url)
+ return process, url
+
+ def test_bzr_serve_quiet(self):
+ self.make_branch('.')
+ args = ['--listen', 'localhost', '--port', '0', '--quiet']
+ out, err = self.run_bzr_serve_then_func(args, retcode=3)
+ self.assertEqual('', out)
+ self.assertEqual('', err)
+
+ def test_bzr_serve_inet_readonly(self):
+ """bzr server should provide a read only filesystem by default."""
+ process, transport = self.start_server_inet()
+ self.assertRaises(errors.TransportNotPossible, transport.mkdir, 'adir')
+ self.assertInetServerShutsdownCleanly(process)
+
+ def test_bzr_serve_inet_readwrite(self):
+ # Make a branch
+ self.make_branch('.')
+
+ process, transport = self.start_server_inet(['--allow-writes'])
+
+ # We get a working branch, and can create a directory
+ branch = ControlDir.open_from_transport(transport).open_branch()
+ self.make_read_requests(branch)
+ transport.mkdir('adir')
+ self.assertInetServerShutsdownCleanly(process)
+
+ def test_bzr_serve_port_readonly(self):
+ """bzr server should provide a read only filesystem by default."""
+ process, url = self.start_server_port()
+ t = transport.get_transport_from_url(url)
+ self.assertRaises(errors.TransportNotPossible, t.mkdir, 'adir')
+ self.assertServerFinishesCleanly(process)
+
+ def test_bzr_serve_port_readwrite(self):
+ # Make a branch
+ self.make_branch('.')
+
+ process, url = self.start_server_port(['--allow-writes'])
+
+ # Connect to the server
+ branch = Branch.open(url)
+ self.make_read_requests(branch)
+ self.assertServerFinishesCleanly(process)
+
+ def test_bzr_serve_supports_protocol(self):
+ # Make a branch
+ self.make_branch('.')
+
+ process, url = self.start_server_port(['--allow-writes',
+ '--protocol=bzr'])
+
+ # Connect to the server
+ branch = Branch.open(url)
+ self.make_read_requests(branch)
+ self.assertServerFinishesCleanly(process)
+
+ def test_bzr_serve_dhpss(self):
+ # This is a smoke test that the server doesn't crash when run with
+ # -Dhpss, and does drop some hpss logging to the file.
+ self.make_branch('.')
+ log_fname = os.getcwd() + '/server.log'
+ self.overrideEnv('BZR_LOG', log_fname)
+ process, transport = self.start_server_inet(['-Dhpss'])
+ branch = ControlDir.open_from_transport(transport).open_branch()
+ self.make_read_requests(branch)
+ self.assertInetServerShutsdownCleanly(process)
+ f = open(log_fname, 'rb')
+ content = f.read()
+ f.close()
+ self.assertContainsRe(content, r'hpss request: \[[0-9-]+\]')
+
+ def test_bzr_serve_supports_configurable_timeout(self):
+ gs = config.GlobalStack()
+ gs.set('serve.client_timeout', 0.2)
+ process, url = self.start_server_port()
+ self.build_tree_contents([('a_file', 'contents\n')])
+ # We can connect and issue a request
+ t = transport.get_transport_from_url(url)
+ self.assertEqual('contents\n', t.get_bytes('a_file'))
+ # However, if we just wait for more content from the server, it will
+ # eventually disconnect us.
+ # TODO: Use something like signal.alarm() so that if the server doesn't
+ # properly handle the timeout, we end up failing the test instead
+ # of hanging forever.
+ m = t.get_smart_medium()
+ m.read_bytes(1)
+ # Now, we wait for timeout to trigger
+ err = process.stderr.readline()
+ self.assertEqual(
+ 'Connection Timeout: disconnecting client after 0.2 seconds\n',
+ err)
+ self.assertServerFinishesCleanly(process)
+
+ def test_bzr_serve_supports_client_timeout(self):
+ process, url = self.start_server_port(['--client-timeout=0.1'])
+ self.build_tree_contents([('a_file', 'contents\n')])
+ # We can connect and issue a request
+ t = transport.get_transport_from_url(url)
+ self.assertEqual('contents\n', t.get_bytes('a_file'))
+ # However, if we just wait for more content from the server, it will
+ # eventually disconnect us.
+ # TODO: Use something like signal.alarm() so that if the server doesn't
+ # properly handle the timeout, we end up failing the test instead
+ # of hanging forever.
+ m = t.get_smart_medium()
+ m.read_bytes(1)
+ # Now, we wait for timeout to trigger
+ err = process.stderr.readline()
+ self.assertEqual(
+ 'Connection Timeout: disconnecting client after 0.1 seconds\n',
+ err)
+ self.assertServerFinishesCleanly(process)
+
+ def test_bzr_serve_graceful_shutdown(self):
+ big_contents = 'a'*64*1024
+ self.build_tree_contents([('bigfile', big_contents)])
+ process, url = self.start_server_port(['--client-timeout=1.0'])
+ t = transport.get_transport_from_url(url)
+ m = t.get_smart_medium()
+ c = client._SmartClient(m)
+ # Start, but don't finish a response
+ resp, response_handler = c.call_expecting_body('get', 'bigfile')
+ self.assertEqual(('ok',), resp)
+ # Note: process.send_signal is a Python 2.6ism
+ process.send_signal(signal.SIGHUP)
+ # Wait for the server to notice the signal, and then read the actual
+ # body of the response. That way we know that it is waiting for the
+ # request to finish
+ self.assertEqual('Requested to stop gracefully\n',
+ process.stderr.readline())
+ self.assertEqual('Waiting for 1 client(s) to finish\n',
+ process.stderr.readline())
+ body = response_handler.read_body_bytes()
+ if body != big_contents:
+ self.fail('Failed to properly read the contents of "bigfile"')
+ # Now that our request is finished, the medium should notice it has
+ # been disconnected.
+ self.assertEqual('', m.read_bytes(1))
+ # And the server should be stopping
+ self.assertEqual(0, process.wait())
+
+
+class TestCmdServeChrooting(TestBzrServeBase):
+
+ def test_serve_tcp(self):
+ """'bzr serve' wraps the given --directory in a ChrootServer.
+
+ So requests that search up through the parent directories (like
+ find_repositoryV3) will give "not found" responses, rather than
+ InvalidURLJoin or jail break errors.
+ """
+ t = self.get_transport()
+ t.mkdir('server-root')
+ self.run_bzr_serve_then_func(
+ ['--listen', '127.0.0.1', '--port', '0',
+ '--directory', t.local_abspath('server-root'),
+ '--allow-writes'],
+ func=self.when_server_started)
+ # The when_server_started method issued a find_repositoryV3 that should
+ # fail with 'norepository' because there are no repositories inside the
+ # --directory.
+ self.assertEqual(('norepository',), self.client_resp)
+
+ def when_server_started(self):
+ # Connect to the TCP server and issue some requests and see what comes
+ # back.
+ client_medium = medium.SmartTCPClientMedium(
+ '127.0.0.1', self.tcp_server.port,
+ 'bzr://localhost:%d/' % (self.tcp_server.port,))
+ smart_client = client._SmartClient(client_medium)
+ resp = smart_client.call('mkdir', 'foo', '')
+ resp = smart_client.call('BzrDirFormat.initialize', 'foo/')
+ try:
+ resp = smart_client.call('BzrDir.find_repositoryV3', 'foo/')
+ except errors.ErrorFromSmartServer, e:
+ resp = e.error_tuple
+ self.client_resp = resp
+ client_medium.disconnect()
+
+
+class TestUserdirExpansion(TestCaseWithMemoryTransport):
+
+ @staticmethod
+ def fake_expanduser(path):
+ """A simple, environment-independent, function for the duration of this
+ test.
+
+ Paths starting with a path segment of '~user' will expand to start with
+ '/home/user/'. Every other path will be unchanged.
+ """
+ if path.split('/', 1)[0] == '~user':
+ return '/home/user' + path[len('~user'):]
+ return path
+
+ def make_test_server(self, base_path='/'):
+ """Make and start a BzrServerFactory, backed by a memory transport, and
+ creat '/home/user' in that transport.
+ """
+ bzr_server = BzrServerFactory(
+ self.fake_expanduser, lambda t: base_path)
+ mem_transport = self.get_transport()
+ mem_transport.mkdir_multi(['home', 'home/user'])
+ bzr_server.set_up(mem_transport, None, None, inet=True, timeout=4.0)
+ self.addCleanup(bzr_server.tear_down)
+ return bzr_server
+
+ def test_bzr_serve_expands_userdir(self):
+ bzr_server = self.make_test_server()
+ self.assertTrue(bzr_server.smart_server.backing_transport.has('~user'))
+
+ def test_bzr_serve_does_not_expand_userdir_outside_base(self):
+ bzr_server = self.make_test_server('/foo')
+ self.assertFalse(bzr_server.smart_server.backing_transport.has('~user'))
+
+ def test_get_base_path(self):
+ """cmd_serve will turn the --directory option into a LocalTransport
+ (optionally decorated with 'readonly+'). BzrServerFactory can
+ determine the original --directory from that transport.
+ """
+ # URLs always include the trailing slash, and get_base_path returns it
+ base_dir = osutils.abspath('/a/b/c') + '/'
+ base_url = urlutils.local_path_to_url(base_dir) + '/'
+ # Define a fake 'protocol' to capture the transport that cmd_serve
+ # passes to serve_bzr.
+ def capture_transport(transport, host, port, inet, timeout):
+ self.bzr_serve_transport = transport
+ cmd = builtins.cmd_serve()
+ # Read-only
+ cmd.run(directory=base_dir, protocol=capture_transport)
+ server_maker = BzrServerFactory()
+ self.assertEqual(
+ 'readonly+%s' % base_url, self.bzr_serve_transport.base)
+ self.assertEqual(
+ base_dir, server_maker.get_base_path(self.bzr_serve_transport))
+ # Read-write
+ cmd.run(directory=base_dir, protocol=capture_transport,
+ allow_writes=True)
+ server_maker = BzrServerFactory()
+ self.assertEqual(base_url, self.bzr_serve_transport.base)
+ self.assertEqual(base_dir,
+ server_maker.get_base_path(self.bzr_serve_transport))
+ # Read-only, from a URL
+ cmd.run(directory=base_url, protocol=capture_transport)
+ server_maker = BzrServerFactory()
+ self.assertEqual(
+ 'readonly+%s' % base_url, self.bzr_serve_transport.base)
+ self.assertEqual(
+ base_dir, server_maker.get_base_path(self.bzr_serve_transport))
diff --git a/bzrlib/tests/blackbox/test_shared_repository.py b/bzrlib/tests/blackbox/test_shared_repository.py
new file mode 100644
index 0000000..ae2a243
--- /dev/null
+++ b/bzrlib/tests/blackbox/test_shared_repository.py
@@ -0,0 +1,166 @@
+# Copyright (C) 2006-2011 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""Black-box tests for repositories with shared branches"""
+
+import os
+
+from bzrlib.bzrdir import BzrDirMetaFormat1
+from bzrlib.controldir import ControlDir
+import bzrlib.errors as errors
+from bzrlib.tests import TestCaseInTempDir
+from bzrlib.tests.matchers import ContainsNoVfsCalls
+
+
+class TestSharedRepo(TestCaseInTempDir):
+
+ def test_make_repository(self):
+ out, err = self.run_bzr("init-repository a")
+ self.assertEqual(out,
+"""Shared repository with trees (format: 2a)
+Location:
+ shared repository: a
+""")
+ self.assertEqual(err, "")
+ dir = ControlDir.open('a')
+ self.assertIs(dir.open_repository().is_shared(), True)
+ self.assertRaises(errors.NotBranchError, dir.open_branch)
+ self.assertRaises(errors.NoWorkingTree, dir.open_workingtree)
+
+ def test_make_repository_quiet(self):
+ out, err = self.run_bzr("init-repository a -q")
+ self.assertEqual(out, "")
+ self.assertEqual(err, "")
+ dir = ControlDir.open('a')
+ self.assertIs(dir.open_repository().is_shared(), True)
+ self.assertRaises(errors.NotBranchError, dir.open_branch)
+ self.assertRaises(errors.NoWorkingTree, dir.open_workingtree)
+
+ def test_init_repo_existing_dir(self):
+ """Make repo in existing directory.
+
+ (Malone #38331)
+ """
+ out, err = self.run_bzr("init-repository .")
+ dir = ControlDir.open('.')
+ self.assertTrue(dir.open_repository())
+
+ def test_init(self):
+ self.run_bzr("init-repo a")
+ self.run_bzr("init --format=default a/b")
+ dir = ControlDir.open('a')
+ self.assertIs(dir.open_repository().is_shared(), True)
+ self.assertRaises(errors.NotBranchError, dir.open_branch)
+ self.assertRaises(errors.NoWorkingTree, dir.open_workingtree)
+ bdir = ControlDir.open('a/b')
+ bdir.open_branch()
+ self.assertRaises(errors.NoRepositoryPresent, bdir.open_repository)
+ wt = bdir.open_workingtree()
+
+ def test_branch(self):
+ self.run_bzr("init-repo a")
+ self.run_bzr("init --format=default a/b")
+ self.run_bzr('branch a/b a/c')
+ cdir = ControlDir.open('a/c')
+ cdir.open_branch()
+ self.assertRaises(errors.NoRepositoryPresent, cdir.open_repository)
+ cdir.open_workingtree()
+
+ def test_branch_tree(self):
+ self.run_bzr("init-repo --trees a")
+ self.run_bzr("init --format=default b")
+ with file('b/hello', 'wt') as f: f.write('bar')
+ self.run_bzr("add b/hello")
+ self.run_bzr("commit -m bar b/hello")
+
+ self.run_bzr('branch b a/c')
+ cdir = ControlDir.open('a/c')
+ cdir.open_branch()
+ self.assertRaises(errors.NoRepositoryPresent, cdir.open_repository)
+ self.assertPathExists('a/c/hello')
+ cdir.open_workingtree()
+
+ def test_trees_default(self):
+ # 0.15 switched to trees by default
+ self.run_bzr("init-repo repo")
+ repo = ControlDir.open("repo").open_repository()
+ self.assertEqual(True, repo.make_working_trees())
+
+ def test_trees_argument(self):
+ # Supplying the --trees argument should be harmless,
+ # as it was previously non-default we need to get it right.
+ self.run_bzr("init-repo --trees trees")
+ repo = ControlDir.open("trees").open_repository()
+ self.assertEqual(True, repo.make_working_trees())
+
+ def test_no_trees_argument(self):
+ # --no-trees should make it so that there is no working tree
+ self.run_bzr("init-repo --no-trees notrees")
+ repo = ControlDir.open("notrees").open_repository()
+ self.assertEqual(False, repo.make_working_trees())
+
+ def test_init_repo_smart_acceptance(self):
+ # The amount of hpss calls made on init-repo to a smart server should
+ # be fixed.
+ self.setup_smart_server_with_call_log()
+ self.run_bzr(['init-repo', self.get_url('repo')])
+ # This figure represent the amount of work to perform this use case. It
+ # is entirely ok to reduce this number if a test fails due to rpc_count
+ # being too low. If rpc_count increases, more network roundtrips have
+ # become necessary for this use case. Please do not adjust this number
+ # upwards without agreement from bzr's network support maintainers.
+ self.assertLength(11, self.hpss_calls)
+ self.assertLength(1, self.hpss_connections)
+ self.assertThat(self.hpss_calls, ContainsNoVfsCalls)
+
+ def test_notification_on_branch_from_repository(self):
+ out, err = self.run_bzr("init-repository -q a")
+ self.assertEqual(out, "")
+ self.assertEqual(err, "")
+ dir = ControlDir.open('a')
+ dir.open_repository() # there is a repository there
+ e = self.assertRaises(errors.NotBranchError, dir.open_branch)
+ self.assertContainsRe(str(e), "location is a repository")
+
+ def test_notification_on_branch_from_nonrepository(self):
+ fmt = BzrDirMetaFormat1()
+ t = self.get_transport()
+ t.mkdir('a')
+ dir = fmt.initialize_on_transport(t.clone('a'))
+ self.assertRaises(errors.NoRepositoryPresent, dir.open_repository)
+ e = self.assertRaises(errors.NotBranchError, dir.open_branch)
+ self.assertNotContainsRe(str(e), "location is a repository")
+
+ def test_init_repo_with_post_repo_init_hook(self):
+ calls = []
+ ControlDir.hooks.install_named_hook('post_repo_init', calls.append, None)
+ self.assertLength(0, calls)
+ self.run_bzr("init-repository a")
+ self.assertLength(1, calls)
+
+ def test_init_repo_without_username(self):
+ """Ensure init-repo works if username is not set.
+ """
+ # bzr makes user specified whoami mandatory for operations
+ # like commit as whoami is recorded. init-repo however is not so final
+ # and uses whoami only in a lock file. Without whoami the login name
+ # is used. This test is to ensure that init-repo passes even when whoami
+ # is not available.
+ self.overrideEnv('EMAIL', None)
+ self.overrideEnv('BZR_EMAIL', None)
+ out, err = self.run_bzr(['init-repo', 'foo'])
+ self.assertEqual(err, '')
+ self.assertTrue(os.path.exists('foo'))
diff --git a/bzrlib/tests/blackbox/test_shell_complete.py b/bzrlib/tests/blackbox/test_shell_complete.py
new file mode 100644
index 0000000..cef70ae
--- /dev/null
+++ b/bzrlib/tests/blackbox/test_shell_complete.py
@@ -0,0 +1,27 @@
+# Copyright (C) 2011 Canonical Ltd
+# -*- coding: utf-8 -*-
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+
+"""Black-box tests for 'bzr shell-complete'."""
+
+from bzrlib.tests import TestCaseWithTransport
+
+
+class TestShellComplete(TestCaseWithTransport):
+
+ def test_shell_complete(self):
+ self.run_bzr("shell-complete")
diff --git a/bzrlib/tests/blackbox/test_shelve.py b/bzrlib/tests/blackbox/test_shelve.py
new file mode 100644
index 0000000..a370822
--- /dev/null
+++ b/bzrlib/tests/blackbox/test_shelve.py
@@ -0,0 +1,155 @@
+# Copyright (C) 2008, 2009, 2010 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+import os
+
+from bzrlib import shelf
+from bzrlib.tests import TestCaseWithTransport
+from bzrlib.tests.script import ScriptRunner
+
+
+class TestShelveList(TestCaseWithTransport):
+
+ def test_no_shelved_changes(self):
+ tree = self.make_branch_and_tree('.')
+ err = self.run_bzr('shelve --list')[1]
+ self.assertEqual('No shelved changes.\n', err)
+
+ def make_creator(self, tree):
+ creator = shelf.ShelfCreator(tree, tree.basis_tree(), [])
+ self.addCleanup(creator.finalize)
+ return creator
+
+ def test_shelve_one(self):
+ tree = self.make_branch_and_tree('.')
+ creator = self.make_creator(tree)
+ shelf_id = tree.get_shelf_manager().shelve_changes(creator, 'Foo')
+ out, err = self.run_bzr('shelve --list', retcode=1)
+ self.assertEqual('', err)
+ self.assertEqual(' 1: Foo\n', out)
+
+ def test_shelve_list_via_directory(self):
+ tree = self.make_branch_and_tree('tree')
+ creator = self.make_creator(tree)
+ shelf_id = tree.get_shelf_manager().shelve_changes(creator, 'Foo')
+ out, err = self.run_bzr('shelve -d tree --list', retcode=1)
+ self.assertEqual('', err)
+ self.assertEqual(' 1: Foo\n', out)
+
+ def test_shelve_no_message(self):
+ tree = self.make_branch_and_tree('.')
+ creator = self.make_creator(tree)
+ shelf_id = tree.get_shelf_manager().shelve_changes(creator)
+ out, err = self.run_bzr('shelve --list', retcode=1)
+ self.assertEqual('', err)
+ self.assertEqual(' 1: <no message>\n', out)
+
+ def test_shelf_order(self):
+ tree = self.make_branch_and_tree('.')
+ creator = self.make_creator(tree)
+ tree.get_shelf_manager().shelve_changes(creator, 'Foo')
+ creator = self.make_creator(tree)
+ tree.get_shelf_manager().shelve_changes(creator, 'Bar')
+ out, err = self.run_bzr('shelve --list', retcode=1)
+ self.assertEqual('', err)
+ self.assertEqual(' 2: Bar\n 1: Foo\n', out)
+
+ def test_shelve_destroy(self):
+ tree = self.make_branch_and_tree('.')
+ self.build_tree(['file'])
+ tree.add('file')
+ self.run_bzr('shelve --all --destroy')
+ self.assertPathDoesNotExist('file')
+ self.assertIs(None, tree.get_shelf_manager().last_shelf())
+
+ def test_unshelve_keep(self):
+ # https://bugs.launchpad.net/bzr/+bug/492091
+ tree = self.make_branch_and_tree('.')
+ # shelve apparently unhappy working with a tree with no root yet
+ tree.commit('make root')
+ self.build_tree(['file'])
+
+ sr = ScriptRunner()
+ sr.run_script(self, '''
+$ bzr add file
+adding file
+$ bzr shelve --all -m Foo
+2>Selected changes:
+2>-D file
+2>Changes shelved with id "1".
+$ bzr shelve --list
+ 1: Foo
+$ bzr unshelve --keep
+2>Using changes with id "1".
+2>Message: Foo
+2>+N file
+2>All changes applied successfully.
+$ bzr shelve --list
+ 1: Foo
+$ cat file
+contents of file
+''')
+
+class TestUnshelvePreview(TestCaseWithTransport):
+
+ def test_non_ascii(self):
+ """Test that we can show a non-ascii diff that would result from unshelving"""
+
+ init_content = u'Initial: \u0418\u0437\u043d\u0430\u0447\n'.encode('utf-8')
+ more_content = u'More: \u0415\u0449\u0451\n'.encode('utf-8')
+ next_content = init_content + more_content
+ diff_part = '@@ -1,1 +1,2 @@\n %s+%s' % (init_content, more_content)
+
+ tree = self.make_branch_and_tree('.')
+ self.build_tree_contents([('a_file', init_content)])
+ tree.add('a_file')
+ tree.commit(message='committed')
+ self.build_tree_contents([('a_file', next_content)])
+ self.run_bzr(['shelve', '--all'])
+ out, err = self.run_bzr(['unshelve', '--preview'], encoding='latin-1')
+
+ self.assertContainsString(out, diff_part)
+
+
+class TestShelveRelpath(TestCaseWithTransport):
+
+ def test_shelve_in_subdir(self):
+ tree = self.make_branch_and_tree('tree')
+ self.build_tree(['tree/file', 'tree/dir/'])
+ tree.add('file')
+ os.chdir('tree/dir')
+ self.run_bzr('shelve --all ../file')
+
+ def test_shelve_via_directory(self):
+ tree = self.make_branch_and_tree('tree')
+ self.build_tree(['tree/file', 'tree/dir/'])
+ tree.add('file')
+ self.run_bzr('shelve -d tree/dir --all ../file')
+
+
+class TestShelveUnshelve(TestCaseWithTransport):
+
+ def test_directory(self):
+ """Test --directory option"""
+ tree = self.make_branch_and_tree('tree')
+ self.build_tree_contents([('tree/a', 'initial\n')])
+ tree.add('a')
+ tree.commit(message='committed')
+ self.build_tree_contents([('tree/a', 'initial\nmore\n')])
+ self.run_bzr('shelve -d tree --all')
+ self.assertFileEqual('initial\n', 'tree/a')
+ self.run_bzr('unshelve --directory tree')
+ self.assertFileEqual('initial\nmore\n', 'tree/a')
diff --git a/bzrlib/tests/blackbox/test_sign_my_commits.py b/bzrlib/tests/blackbox/test_sign_my_commits.py
new file mode 100644
index 0000000..79376e2
--- /dev/null
+++ b/bzrlib/tests/blackbox/test_sign_my_commits.py
@@ -0,0 +1,190 @@
+# Copyright (C) 2005 Canonical Ltd
+# -*- coding: utf-8 -*-
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""Black-box tests for bzr sign-my-commits."""
+
+from bzrlib import (
+ gpg,
+ tests,
+ )
+from bzrlib.tests.matchers import ContainsNoVfsCalls
+
+
+class SignMyCommits(tests.TestCaseWithTransport):
+
+ def monkey_patch_gpg(self):
+ """Monkey patch the gpg signing strategy to be a loopback.
+
+ This also registers the cleanup, so that we will revert to
+ the original gpg strategy when done.
+ """
+ # monkey patch gpg signing mechanism
+ self.overrideAttr(gpg, 'GPGStrategy', gpg.LoopbackGPGStrategy)
+
+ def setup_tree(self, location='.'):
+ wt = self.make_branch_and_tree(location)
+ wt.commit("base A", allow_pointless=True, rev_id='A')
+ wt.commit("base B", allow_pointless=True, rev_id='B')
+ wt.commit("base C", allow_pointless=True, rev_id='C')
+ wt.commit("base D", allow_pointless=True, rev_id='D',
+ committer='Alternate <alt@foo.com>')
+ wt.add_parent_tree_id("aghost")
+ wt.commit("base E", allow_pointless=True, rev_id='E')
+ return wt
+
+ def assertUnsigned(self, repo, revision_id):
+ """Assert that revision_id is not signed in repo."""
+ self.assertFalse(repo.has_signature_for_revision_id(revision_id))
+
+ def assertSigned(self, repo, revision_id):
+ """Assert that revision_id is signed in repo."""
+ self.assertTrue(repo.has_signature_for_revision_id(revision_id))
+
+ def test_sign_my_commits(self):
+ #Test re signing of data.
+ wt = self.setup_tree()
+ repo = wt.branch.repository
+
+ self.monkey_patch_gpg()
+
+ self.assertUnsigned(repo, 'A')
+ self.assertUnsigned(repo, 'B')
+ self.assertUnsigned(repo, 'C')
+ self.assertUnsigned(repo, 'D')
+
+ self.run_bzr('sign-my-commits')
+
+ self.assertSigned(repo, 'A')
+ self.assertSigned(repo, 'B')
+ self.assertSigned(repo, 'C')
+ self.assertUnsigned(repo, 'D')
+
+ def test_sign_my_commits_location(self):
+ wt = self.setup_tree('other')
+ repo = wt.branch.repository
+
+ self.monkey_patch_gpg()
+
+ self.run_bzr('sign-my-commits other')
+
+ self.assertSigned(repo, 'A')
+ self.assertSigned(repo, 'B')
+ self.assertSigned(repo, 'C')
+ self.assertUnsigned(repo, 'D')
+
+ def test_sign_diff_committer(self):
+ wt = self.setup_tree()
+ repo = wt.branch.repository
+
+ self.monkey_patch_gpg()
+
+ self.run_bzr(['sign-my-commits', '.', 'Alternate <alt@foo.com>'])
+
+ self.assertUnsigned(repo, 'A')
+ self.assertUnsigned(repo, 'B')
+ self.assertUnsigned(repo, 'C')
+ self.assertSigned(repo, 'D')
+
+ def test_sign_dry_run(self):
+ wt = self.setup_tree()
+ repo = wt.branch.repository
+
+ self.monkey_patch_gpg()
+
+ out = self.run_bzr('sign-my-commits --dry-run')[0]
+
+ outlines = out.splitlines()
+ self.assertEquals(5, len(outlines))
+ self.assertEquals('Signed 4 revisions.', outlines[-1])
+ self.assertUnsigned(repo, 'A')
+ self.assertUnsigned(repo, 'B')
+ self.assertUnsigned(repo, 'C')
+ self.assertUnsigned(repo, 'D')
+ self.assertUnsigned(repo, 'E')
+
+ def test_verify_commits(self):
+ wt = self.setup_tree()
+ self.monkey_patch_gpg()
+ self.run_bzr('sign-my-commits')
+ out = self.run_bzr('verify-signatures', retcode=1)
+ self.assertEquals(('4 commits with valid signatures\n'
+ '0 commits with key now expired\n'
+ '0 commits with unknown keys\n'
+ '0 commits not valid\n'
+ '1 commit not signed\n', ''), out)
+
+ def test_verify_commits_acceptable_key(self):
+ wt = self.setup_tree()
+ self.monkey_patch_gpg()
+ self.run_bzr('sign-my-commits')
+ out = self.run_bzr(['verify-signatures', '--acceptable-keys=foo,bar'],
+ retcode=1)
+ self.assertEquals(('4 commits with valid signatures\n'
+ '0 commits with key now expired\n'
+ '0 commits with unknown keys\n'
+ '0 commits not valid\n'
+ '1 commit not signed\n', ''), out)
+
+
+class TestSmartServerSignMyCommits(tests.TestCaseWithTransport):
+
+ def monkey_patch_gpg(self):
+ """Monkey patch the gpg signing strategy to be a loopback.
+
+ This also registers the cleanup, so that we will revert to
+ the original gpg strategy when done.
+ """
+ # monkey patch gpg signing mechanism
+ self.overrideAttr(gpg, 'GPGStrategy', gpg.LoopbackGPGStrategy)
+
+ def test_sign_single_commit(self):
+ self.setup_smart_server_with_call_log()
+ t = self.make_branch_and_tree('branch')
+ self.build_tree_contents([('branch/foo', 'thecontents')])
+ t.add("foo")
+ t.commit("message")
+ self.reset_smart_call_log()
+ self.monkey_patch_gpg()
+ out, err = self.run_bzr(['sign-my-commits', self.get_url('branch')])
+ # This figure represent the amount of work to perform this use case. It
+ # is entirely ok to reduce this number if a test fails due to rpc_count
+ # being too low. If rpc_count increases, more network roundtrips have
+ # become necessary for this use case. Please do not adjust this number
+ # upwards without agreement from bzr's network support maintainers.
+ self.assertLength(15, self.hpss_calls)
+ self.assertLength(1, self.hpss_connections)
+ self.assertThat(self.hpss_calls, ContainsNoVfsCalls)
+
+ def test_verify_commits(self):
+ self.setup_smart_server_with_call_log()
+ t = self.make_branch_and_tree('branch')
+ self.build_tree_contents([('branch/foo', 'thecontents')])
+ t.add("foo")
+ t.commit("message")
+ self.monkey_patch_gpg()
+ out, err = self.run_bzr(['sign-my-commits', self.get_url('branch')])
+ self.reset_smart_call_log()
+ self.run_bzr('sign-my-commits')
+ out = self.run_bzr(['verify-signatures', self.get_url('branch')])
+ # This figure represent the amount of work to perform this use case. It
+ # is entirely ok to reduce this number if a test fails due to rpc_count
+ # being too low. If rpc_count increases, more network roundtrips have
+ # become necessary for this use case. Please do not adjust this number
+ # upwards without agreement from bzr's network support maintainers.
+ self.assertLength(10, self.hpss_calls)
+ self.assertLength(1, self.hpss_connections)
+ self.assertThat(self.hpss_calls, ContainsNoVfsCalls)
diff --git a/bzrlib/tests/blackbox/test_split.py b/bzrlib/tests/blackbox/test_split.py
new file mode 100644
index 0000000..22793ff
--- /dev/null
+++ b/bzrlib/tests/blackbox/test_split.py
@@ -0,0 +1,64 @@
+# Copyright (C) 2006 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+
+from bzrlib import tests, workingtree
+from bzrlib.repofmt.knitrepo import RepositoryFormatKnit4
+from bzrlib.repofmt.knitpack_repo import RepositoryFormatKnitPack4
+
+
+
+class TestSplit(tests.TestCaseWithTransport):
+
+ def test_split(self):
+ self.build_tree(['a/', 'a/b/', 'a/b/c'])
+ wt = self.make_branch_and_tree('a', format='rich-root-pack')
+ wt.add(['b', 'b/c'])
+ wt.commit('rev1')
+ self.run_bzr('split a/b')
+ self.run_bzr_error(('.* is not versioned',), 'split q', working_dir='a')
+
+ def test_split_repo_failure(self):
+ repo = self.make_repository('branch', shared=True, format='knit')
+ a_branch = repo.bzrdir.create_branch()
+ self.build_tree(['a/', 'a/b/', 'a/b/c/', 'a/b/c/d'])
+ wt = a_branch.create_checkout('a', lightweight=True)
+ wt.add(['b', 'b/c', 'b/c/d'], ['b-id', 'c-id', 'd-id'])
+ wt.commit('added files')
+ self.run_bzr_error(('must upgrade your branch at .*a',), 'split a/b')
+
+ def test_split_tree_failure(self):
+ tree = self.make_branch_and_tree('tree', format='pack-0.92')
+ self.build_tree(['tree/subtree/'])
+ tree.add('subtree')
+ tree.commit('added subtree')
+ self.run_bzr_error(('must upgrade your branch at .*tree','rich roots'),
+ 'split tree/subtree')
+
+ def split_formats(self, format, repo_format):
+ tree = self.make_branch_and_tree('rich-root', format=format)
+ self.build_tree(['rich-root/a/'])
+ tree.add('a')
+ self.run_bzr(['split', 'rich-root/a'])
+ subtree = workingtree.WorkingTree.open('rich-root/a')
+ self.assertIsInstance(subtree.branch.repository._format,
+ repo_format)
+
+ def test_split_rich_root(self):
+ self.split_formats('rich-root', RepositoryFormatKnit4)
+
+ def test_split_rich_root_pack(self):
+ self.split_formats('rich-root-pack', RepositoryFormatKnitPack4)
diff --git a/bzrlib/tests/blackbox/test_status.py b/bzrlib/tests/blackbox/test_status.py
new file mode 100644
index 0000000..ad73fcf
--- /dev/null
+++ b/bzrlib/tests/blackbox/test_status.py
@@ -0,0 +1,804 @@
+# Copyright (C) 2005-2010 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""Tests of status command.
+
+Most of these depend on the particular formatting used.
+As such they really are blackbox tests even though some of the
+tests are not using self.capture. If we add tests for the programmatic
+interface later, they will be non blackbox tests.
+"""
+
+from cStringIO import StringIO
+import codecs
+from os import mkdir, chdir, rmdir, unlink
+import sys
+from tempfile import TemporaryFile
+
+from bzrlib import (
+ bzrdir,
+ conflicts,
+ errors,
+ osutils,
+ status,
+ )
+import bzrlib.branch
+from bzrlib.osutils import pathjoin
+from bzrlib.revisionspec import RevisionSpec
+from bzrlib.status import show_tree_status
+from bzrlib.tests import TestCaseWithTransport, TestSkipped
+from bzrlib.workingtree import WorkingTree
+
+
+class BranchStatus(TestCaseWithTransport):
+
+ def setUp(self):
+ super(BranchStatus, self).setUp()
+ # As TestCase.setUp clears all hooks, we install this default
+ # post_status hook handler for the test.
+ status.hooks.install_named_hook('post_status',
+ status._show_shelve_summary,
+ 'bzr status')
+
+ def assertStatus(self, expected_lines, working_tree, specific_files=None,
+ revision=None, short=False, pending=True, verbose=False):
+ """Run status in working_tree and look for output.
+
+ :param expected_lines: The lines to look for.
+ :param working_tree: The tree to run status in.
+ """
+ output_string = self.status_string(working_tree, specific_files, revision, short,
+ pending, verbose)
+ self.assertEqual(expected_lines, output_string.splitlines(True))
+
+ def status_string(self, wt, specific_files=None, revision=None,
+ short=False, pending=True, verbose=False):
+ # use a real file rather than StringIO because it doesn't handle
+ # Unicode very well.
+ tof = codecs.getwriter('utf-8')(TemporaryFile())
+ show_tree_status(wt, specific_files=specific_files, to_file=tof,
+ revision=revision, short=short, show_pending=pending,
+ verbose=verbose)
+ tof.seek(0)
+ return tof.read().decode('utf-8')
+
+ def test_branch_status(self):
+ """Test basic branch status"""
+ wt = self.make_branch_and_tree('.')
+
+ # status with no commits or files - it must
+ # work and show no output. We do this with no
+ # commits to be sure that it's not going to fail
+ # as a corner case.
+ self.assertStatus([], wt)
+
+ self.build_tree(['hello.c', 'bye.c'])
+ self.assertStatus([
+ 'unknown:\n',
+ ' bye.c\n',
+ ' hello.c\n',
+ ],
+ wt)
+ self.assertStatus([
+ '? bye.c\n',
+ '? hello.c\n',
+ ],
+ wt, short=True)
+
+ # add a commit to allow showing pending merges.
+ wt.commit('create a parent to allow testing merge output')
+
+ wt.add_parent_tree_id('pending@pending-0-0')
+ self.assertStatus([
+ 'unknown:\n',
+ ' bye.c\n',
+ ' hello.c\n',
+ 'pending merge tips: (use -v to see all merge revisions)\n',
+ ' (ghost) pending@pending-0-0\n',
+ ],
+ wt)
+ self.assertStatus([
+ 'unknown:\n',
+ ' bye.c\n',
+ ' hello.c\n',
+ 'pending merges:\n',
+ ' (ghost) pending@pending-0-0\n',
+ ],
+ wt, verbose=True)
+ self.assertStatus([
+ '? bye.c\n',
+ '? hello.c\n',
+ 'P (ghost) pending@pending-0-0\n',
+ ],
+ wt, short=True)
+ self.assertStatus([
+ 'unknown:\n',
+ ' bye.c\n',
+ ' hello.c\n',
+ ],
+ wt, pending=False)
+ self.assertStatus([
+ '? bye.c\n',
+ '? hello.c\n',
+ ],
+ wt, short=True, pending=False)
+
+ def test_branch_status_revisions(self):
+ """Tests branch status with revisions"""
+ wt = self.make_branch_and_tree('.')
+
+ self.build_tree(['hello.c', 'bye.c'])
+ wt.add('hello.c')
+ wt.add('bye.c')
+ wt.commit('Test message')
+
+ revs = [RevisionSpec.from_string('0')]
+ self.assertStatus([
+ 'added:\n',
+ ' bye.c\n',
+ ' hello.c\n'
+ ],
+ wt,
+ revision=revs)
+
+ self.build_tree(['more.c'])
+ wt.add('more.c')
+ wt.commit('Another test message')
+
+ revs.append(RevisionSpec.from_string('1'))
+ self.assertStatus([
+ 'added:\n',
+ ' bye.c\n',
+ ' hello.c\n',
+ ],
+ wt,
+ revision=revs)
+
+ def test_pending(self):
+ """Pending merges display works, including Unicode"""
+ mkdir("./branch")
+ wt = self.make_branch_and_tree('branch')
+ b = wt.branch
+ wt.commit("Empty commit 1")
+ b_2_dir = b.bzrdir.sprout('./copy')
+ b_2 = b_2_dir.open_branch()
+ wt2 = b_2_dir.open_workingtree()
+ wt.commit(u"\N{TIBETAN DIGIT TWO} Empty commit 2")
+ wt2.merge_from_branch(wt.branch)
+ message = self.status_string(wt2, verbose=True)
+ self.assertStartsWith(message, "pending merges:\n")
+ self.assertEndsWith(message, "Empty commit 2\n")
+ wt2.commit("merged")
+ # must be long to make sure we see elipsis at the end
+ wt.commit("Empty commit 3 " +
+ "blah blah blah blah " * 100)
+ wt2.merge_from_branch(wt.branch)
+ message = self.status_string(wt2, verbose=True)
+ self.assertStartsWith(message, "pending merges:\n")
+ self.assert_("Empty commit 3" in message)
+ self.assertEndsWith(message, "...\n")
+
+ def test_tree_status_ignores(self):
+ """Tests branch status with ignores"""
+ wt = self.make_branch_and_tree('.')
+ self.run_bzr('ignore *~')
+ wt.commit('commit .bzrignore')
+ self.build_tree(['foo.c', 'foo.c~'])
+ self.assertStatus([
+ 'unknown:\n',
+ ' foo.c\n',
+ ],
+ wt)
+ self.assertStatus([
+ '? foo.c\n',
+ ],
+ wt, short=True)
+
+ def test_tree_status_specific_files(self):
+ """Tests branch status with given specific files"""
+ wt = self.make_branch_and_tree('.')
+ b = wt.branch
+
+ self.build_tree(['directory/','directory/hello.c',
+ 'bye.c','test.c','dir2/',
+ 'missing.c'])
+ wt.add('directory')
+ wt.add('test.c')
+ wt.commit('testing')
+ wt.add('missing.c')
+ unlink('missing.c')
+
+ self.assertStatus([
+ 'missing:\n',
+ ' missing.c\n',
+ 'unknown:\n',
+ ' bye.c\n',
+ ' dir2/\n',
+ ' directory/hello.c\n'
+ ],
+ wt)
+
+ self.assertStatus([
+ '? bye.c\n',
+ '? dir2/\n',
+ '+! missing.c\n',
+ '? directory/hello.c\n'
+ ],
+ wt, short=True)
+
+ tof = StringIO()
+ self.assertRaises(errors.PathsDoNotExist,
+ show_tree_status,
+ wt, specific_files=['bye.c','test.c','absent.c'],
+ to_file=tof)
+
+ tof = StringIO()
+ show_tree_status(wt, specific_files=['directory'], to_file=tof)
+ tof.seek(0)
+ self.assertEquals(tof.readlines(),
+ ['unknown:\n',
+ ' directory/hello.c\n'
+ ])
+ tof = StringIO()
+ show_tree_status(wt, specific_files=['directory'], to_file=tof,
+ short=True)
+ tof.seek(0)
+ self.assertEquals(tof.readlines(), ['? directory/hello.c\n'])
+
+ tof = StringIO()
+ show_tree_status(wt, specific_files=['dir2'], to_file=tof)
+ tof.seek(0)
+ self.assertEquals(tof.readlines(),
+ ['unknown:\n',
+ ' dir2/\n'
+ ])
+ tof = StringIO()
+ show_tree_status(wt, specific_files=['dir2'], to_file=tof, short=True)
+ tof.seek(0)
+ self.assertEquals(tof.readlines(), ['? dir2/\n'])
+
+ tof = StringIO()
+ revs = [RevisionSpec.from_string('0'), RevisionSpec.from_string('1')]
+ show_tree_status(wt, specific_files=['test.c'], to_file=tof,
+ short=True, revision=revs)
+ tof.seek(0)
+ self.assertEquals(tof.readlines(), ['+N test.c\n'])
+
+ tof = StringIO()
+ show_tree_status(wt, specific_files=['missing.c'], to_file=tof)
+ tof.seek(0)
+ self.assertEquals(tof.readlines(),
+ ['missing:\n',
+ ' missing.c\n'])
+
+ tof = StringIO()
+ show_tree_status(wt, specific_files=['missing.c'], to_file=tof,
+ short=True)
+ tof.seek(0)
+ self.assertEquals(tof.readlines(),
+ ['+! missing.c\n'])
+
+ def test_specific_files_conflicts(self):
+ tree = self.make_branch_and_tree('.')
+ self.build_tree(['dir2/'])
+ tree.add('dir2')
+ tree.commit('added dir2')
+ tree.set_conflicts(conflicts.ConflictList(
+ [conflicts.ContentsConflict('foo')]))
+ tof = StringIO()
+ show_tree_status(tree, specific_files=['dir2'], to_file=tof)
+ self.assertEqualDiff('', tof.getvalue())
+ tree.set_conflicts(conflicts.ConflictList(
+ [conflicts.ContentsConflict('dir2')]))
+ tof = StringIO()
+ show_tree_status(tree, specific_files=['dir2'], to_file=tof)
+ self.assertEqualDiff('conflicts:\n Contents conflict in dir2\n',
+ tof.getvalue())
+
+ tree.set_conflicts(conflicts.ConflictList(
+ [conflicts.ContentsConflict('dir2/file1')]))
+ tof = StringIO()
+ show_tree_status(tree, specific_files=['dir2'], to_file=tof)
+ self.assertEqualDiff('conflicts:\n Contents conflict in dir2/file1\n',
+ tof.getvalue())
+
+ def _prepare_nonexistent(self):
+ wt = self.make_branch_and_tree('.')
+ self.assertStatus([], wt)
+ self.build_tree(['FILE_A', 'FILE_B', 'FILE_C', 'FILE_D', 'FILE_E', ])
+ wt.add('FILE_A')
+ wt.add('FILE_B')
+ wt.add('FILE_C')
+ wt.add('FILE_D')
+ wt.add('FILE_E')
+ wt.commit('Create five empty files.')
+ with open('FILE_B', 'w') as f: f.write('Modification to file FILE_B.')
+ with open('FILE_C', 'w') as f: f.write('Modification to file FILE_C.')
+ unlink('FILE_E') # FILE_E will be versioned but missing
+ with open('FILE_Q', 'w') as f: f.write('FILE_Q is added but not committed.')
+ wt.add('FILE_Q') # FILE_Q will be added but not committed
+ open('UNVERSIONED_BUT_EXISTING', 'w')
+ return wt
+
+ def test_status_nonexistent_file(self):
+ # files that don't exist in either the basis tree or working tree
+ # should give an error
+ wt = self._prepare_nonexistent()
+ self.assertStatus([
+ 'removed:\n',
+ ' FILE_E\n',
+ 'added:\n',
+ ' FILE_Q\n',
+ 'modified:\n',
+ ' FILE_B\n',
+ ' FILE_C\n',
+ 'unknown:\n',
+ ' UNVERSIONED_BUT_EXISTING\n',
+ ],
+ wt)
+ self.assertStatus([
+ ' M FILE_B\n',
+ ' M FILE_C\n',
+ ' D FILE_E\n',
+ '+N FILE_Q\n',
+ '? UNVERSIONED_BUT_EXISTING\n',
+ ],
+ wt, short=True)
+
+ # Okay, everything's looking good with the existent files.
+ # Let's see what happens when we throw in non-existent files.
+
+ # bzr st [--short] NONEXISTENT '
+ expected = [
+ 'nonexistent:\n',
+ ' NONEXISTENT\n',
+ ]
+ out, err = self.run_bzr('status NONEXISTENT', retcode=3)
+ self.assertEqual(expected, out.splitlines(True))
+ self.assertContainsRe(err,
+ r'.*ERROR: Path\(s\) do not exist: '
+ 'NONEXISTENT.*')
+ expected = [
+ 'X: NONEXISTENT\n',
+ ]
+ out, err = self.run_bzr('status --short NONEXISTENT', retcode=3)
+ self.assertContainsRe(err,
+ r'.*ERROR: Path\(s\) do not exist: '
+ 'NONEXISTENT.*')
+
+ def test_status_nonexistent_file_with_others(self):
+ # bzr st [--short] NONEXISTENT ...others..
+ wt = self._prepare_nonexistent()
+ expected = [
+ 'removed:\n',
+ ' FILE_E\n',
+ 'modified:\n',
+ ' FILE_B\n',
+ ' FILE_C\n',
+ 'nonexistent:\n',
+ ' NONEXISTENT\n',
+ ]
+ out, err = self.run_bzr('status NONEXISTENT '
+ 'FILE_A FILE_B FILE_C FILE_D FILE_E',
+ retcode=3)
+ self.assertEqual(expected, out.splitlines(True))
+ self.assertContainsRe(err,
+ r'.*ERROR: Path\(s\) do not exist: '
+ 'NONEXISTENT.*')
+ expected = [
+ ' D FILE_E\n',
+ ' M FILE_C\n',
+ ' M FILE_B\n',
+ 'X NONEXISTENT\n',
+ ]
+ out, err = self.run_bzr('status --short NONEXISTENT '
+ 'FILE_A FILE_B FILE_C FILE_D FILE_E',
+ retcode=3)
+ self.assertEqual(expected, out.splitlines(True))
+ self.assertContainsRe(err,
+ r'.*ERROR: Path\(s\) do not exist: '
+ 'NONEXISTENT.*')
+
+ def test_status_multiple_nonexistent_files(self):
+ # bzr st [--short] NONEXISTENT ... ANOTHER_NONEXISTENT ...
+ wt = self._prepare_nonexistent()
+ expected = [
+ 'removed:\n',
+ ' FILE_E\n',
+ 'modified:\n',
+ ' FILE_B\n',
+ ' FILE_C\n',
+ 'nonexistent:\n',
+ ' ANOTHER_NONEXISTENT\n',
+ ' NONEXISTENT\n',
+ ]
+ out, err = self.run_bzr('status NONEXISTENT '
+ 'FILE_A FILE_B ANOTHER_NONEXISTENT '
+ 'FILE_C FILE_D FILE_E', retcode=3)
+ self.assertEqual(expected, out.splitlines(True))
+ self.assertContainsRe(err,
+ r'.*ERROR: Path\(s\) do not exist: '
+ 'ANOTHER_NONEXISTENT NONEXISTENT.*')
+ expected = [
+ ' D FILE_E\n',
+ ' M FILE_C\n',
+ ' M FILE_B\n',
+ 'X ANOTHER_NONEXISTENT\n',
+ 'X NONEXISTENT\n',
+ ]
+ out, err = self.run_bzr('status --short NONEXISTENT '
+ 'FILE_A FILE_B ANOTHER_NONEXISTENT '
+ 'FILE_C FILE_D FILE_E', retcode=3)
+ self.assertEqual(expected, out.splitlines(True))
+ self.assertContainsRe(err,
+ r'.*ERROR: Path\(s\) do not exist: '
+ 'ANOTHER_NONEXISTENT NONEXISTENT.*')
+
+ def test_status_nonexistent_file_with_unversioned(self):
+ # bzr st [--short] NONEXISTENT A B UNVERSIONED_BUT_EXISTING C D E Q
+ wt = self._prepare_nonexistent()
+ expected = [
+ 'removed:\n',
+ ' FILE_E\n',
+ 'added:\n',
+ ' FILE_Q\n',
+ 'modified:\n',
+ ' FILE_B\n',
+ ' FILE_C\n',
+ 'unknown:\n',
+ ' UNVERSIONED_BUT_EXISTING\n',
+ 'nonexistent:\n',
+ ' NONEXISTENT\n',
+ ]
+ out, err = self.run_bzr('status NONEXISTENT '
+ 'FILE_A FILE_B UNVERSIONED_BUT_EXISTING '
+ 'FILE_C FILE_D FILE_E FILE_Q', retcode=3)
+ self.assertEqual(expected, out.splitlines(True))
+ self.assertContainsRe(err,
+ r'.*ERROR: Path\(s\) do not exist: '
+ 'NONEXISTENT.*')
+ expected = [
+ '+N FILE_Q\n',
+ '? UNVERSIONED_BUT_EXISTING\n',
+ ' D FILE_E\n',
+ ' M FILE_C\n',
+ ' M FILE_B\n',
+ 'X NONEXISTENT\n',
+ ]
+ expected.sort()
+ out, err = self.run_bzr('status --short NONEXISTENT '
+ 'FILE_A FILE_B UNVERSIONED_BUT_EXISTING '
+ 'FILE_C FILE_D FILE_E FILE_Q', retcode=3)
+ actual = out.splitlines(True)
+ actual.sort()
+ self.assertEqual(expected, actual)
+ self.assertContainsRe(err,
+ r'.*ERROR: Path\(s\) do not exist: '
+ 'NONEXISTENT.*')
+
+ def test_status_out_of_date(self):
+ """Simulate status of out-of-date tree after remote push"""
+ tree = self.make_branch_and_tree('.')
+ self.build_tree_contents([('a', 'foo\n')])
+ tree.lock_write()
+ try:
+ tree.add(['a'])
+ tree.commit('add test file')
+ # simulate what happens after a remote push
+ tree.set_last_revision("0")
+ finally:
+ # before run another commands we should unlock tree
+ tree.unlock()
+ out, err = self.run_bzr('status')
+ self.assertEqual("working tree is out of date, run 'bzr update'\n",
+ err)
+
+ def test_status_on_ignored(self):
+ """Tests branch status on an unversioned file which is considered ignored.
+
+ See https://bugs.launchpad.net/bzr/+bug/40103
+ """
+ tree = self.make_branch_and_tree('.')
+
+ self.build_tree(['test1.c', 'test1.c~', 'test2.c~'])
+ result = self.run_bzr('status')[0]
+ self.assertContainsRe(result, "unknown:\n test1.c\n")
+ short_result = self.run_bzr('status --short')[0]
+ self.assertContainsRe(short_result, "\? test1.c\n")
+
+ result = self.run_bzr('status test1.c')[0]
+ self.assertContainsRe(result, "unknown:\n test1.c\n")
+ short_result = self.run_bzr('status --short test1.c')[0]
+ self.assertContainsRe(short_result, "\? test1.c\n")
+
+ result = self.run_bzr('status test1.c~')[0]
+ self.assertContainsRe(result, "ignored:\n test1.c~\n")
+ short_result = self.run_bzr('status --short test1.c~')[0]
+ self.assertContainsRe(short_result, "I test1.c~\n")
+
+ result = self.run_bzr('status test1.c~ test2.c~')[0]
+ self.assertContainsRe(result, "ignored:\n test1.c~\n test2.c~\n")
+ short_result = self.run_bzr('status --short test1.c~ test2.c~')[0]
+ self.assertContainsRe(short_result, "I test1.c~\nI test2.c~\n")
+
+ result = self.run_bzr('status test1.c test1.c~ test2.c~')[0]
+ self.assertContainsRe(result, "unknown:\n test1.c\nignored:\n test1.c~\n test2.c~\n")
+ short_result = self.run_bzr('status --short test1.c test1.c~ test2.c~')[0]
+ self.assertContainsRe(short_result, "\? test1.c\nI test1.c~\nI test2.c~\n")
+
+ def test_status_write_lock(self):
+ """Test that status works without fetching history and
+ having a write lock.
+
+ See https://bugs.launchpad.net/bzr/+bug/149270
+ """
+ mkdir('branch1')
+ wt = self.make_branch_and_tree('branch1')
+ b = wt.branch
+ wt.commit('Empty commit 1')
+ wt2 = b.bzrdir.sprout('branch2').open_workingtree()
+ wt2.commit('Empty commit 2')
+ out, err = self.run_bzr('status branch1 -rbranch:branch2')
+ self.assertEqual('', out)
+
+ def test_status_with_shelves(self):
+ """Ensure that _show_shelve_summary handler works.
+ """
+ wt = self.make_branch_and_tree('.')
+ self.build_tree(['hello.c'])
+ wt.add('hello.c')
+ self.run_bzr(['shelve', '--all', '-m', 'foo'])
+ self.build_tree(['bye.c'])
+ wt.add('bye.c')
+ self.assertStatus([
+ 'added:\n',
+ ' bye.c\n',
+ '1 shelf exists. See "bzr shelve --list" for details.\n',
+ ],
+ wt)
+ self.run_bzr(['shelve', '--all', '-m', 'bar'])
+ self.build_tree(['eggs.c', 'spam.c'])
+ wt.add('eggs.c')
+ wt.add('spam.c')
+ self.assertStatus([
+ 'added:\n',
+ ' eggs.c\n',
+ ' spam.c\n',
+ '2 shelves exist. See "bzr shelve --list" for details.\n',
+ ],
+ wt)
+ self.assertStatus([
+ 'added:\n',
+ ' spam.c\n',
+ ],
+ wt,
+ specific_files=['spam.c'])
+
+
+class CheckoutStatus(BranchStatus):
+
+ def setUp(self):
+ super(CheckoutStatus, self).setUp()
+ mkdir('codir')
+ chdir('codir')
+
+ def make_branch_and_tree(self, relpath):
+ source = self.make_branch(pathjoin('..', relpath))
+ checkout = bzrdir.BzrDirMetaFormat1().initialize(relpath)
+ checkout.set_branch_reference(source)
+ return checkout.create_workingtree()
+
+
+class TestStatus(TestCaseWithTransport):
+
+ def test_status_plain(self):
+ tree = self.make_branch_and_tree('.')
+
+ self.build_tree(['hello.txt'])
+ result = self.run_bzr("status")[0]
+ self.assertContainsRe(result, "unknown:\n hello.txt\n")
+
+ tree.add("hello.txt")
+ result = self.run_bzr("status")[0]
+ self.assertContainsRe(result, "added:\n hello.txt\n")
+
+ tree.commit(message="added")
+ result = self.run_bzr("status -r 0..1")[0]
+ self.assertContainsRe(result, "added:\n hello.txt\n")
+
+ result = self.run_bzr("status -c 1")[0]
+ self.assertContainsRe(result, "added:\n hello.txt\n")
+
+ self.build_tree(['world.txt'])
+ result = self.run_bzr("status -r 0")[0]
+ self.assertContainsRe(result, "added:\n hello.txt\n" \
+ "unknown:\n world.txt\n")
+ result2 = self.run_bzr("status -r 0..")[0]
+ self.assertEquals(result2, result)
+
+ def test_status_short(self):
+ tree = self.make_branch_and_tree('.')
+
+ self.build_tree(['hello.txt'])
+ result = self.run_bzr("status --short")[0]
+ self.assertContainsRe(result, "[?] hello.txt\n")
+
+ tree.add("hello.txt")
+ result = self.run_bzr("status --short")[0]
+ self.assertContainsRe(result, "[+]N hello.txt\n")
+
+ tree.commit(message="added")
+ result = self.run_bzr("status --short -r 0..1")[0]
+ self.assertContainsRe(result, "[+]N hello.txt\n")
+
+ self.build_tree(['world.txt'])
+ result = self.run_bzr("status -S -r 0")[0]
+ self.assertContainsRe(result, "[+]N hello.txt\n" \
+ "[?] world.txt\n")
+ result2 = self.run_bzr("status -S -r 0..")[0]
+ self.assertEquals(result2, result)
+
+ def test_status_versioned(self):
+ tree = self.make_branch_and_tree('.')
+
+ self.build_tree(['hello.txt'])
+ result = self.run_bzr("status --versioned")[0]
+ self.assertNotContainsRe(result, "unknown:\n hello.txt\n")
+
+ tree.add("hello.txt")
+ result = self.run_bzr("status --versioned")[0]
+ self.assertContainsRe(result, "added:\n hello.txt\n")
+
+ tree.commit("added")
+ result = self.run_bzr("status --versioned -r 0..1")[0]
+ self.assertContainsRe(result, "added:\n hello.txt\n")
+
+ self.build_tree(['world.txt'])
+ result = self.run_bzr("status --versioned -r 0")[0]
+ self.assertContainsRe(result, "added:\n hello.txt\n")
+ self.assertNotContainsRe(result, "unknown:\n world.txt\n")
+ result2 = self.run_bzr("status --versioned -r 0..")[0]
+ self.assertEquals(result2, result)
+
+ def test_status_SV(self):
+ tree = self.make_branch_and_tree('.')
+
+ self.build_tree(['hello.txt'])
+ result = self.run_bzr("status -SV")[0]
+ self.assertNotContainsRe(result, "hello.txt")
+
+ tree.add("hello.txt")
+ result = self.run_bzr("status -SV")[0]
+ self.assertContainsRe(result, "[+]N hello.txt\n")
+
+ tree.commit(message="added")
+ result = self.run_bzr("status -SV -r 0..1")[0]
+ self.assertContainsRe(result, "[+]N hello.txt\n")
+
+ self.build_tree(['world.txt'])
+ result = self.run_bzr("status -SV -r 0")[0]
+ self.assertContainsRe(result, "[+]N hello.txt\n")
+
+ result2 = self.run_bzr("status -SV -r 0..")[0]
+ self.assertEquals(result2, result)
+
+ def assertStatusContains(self, pattern, short=False):
+ """Run status, and assert it contains the given pattern"""
+ if short:
+ result = self.run_bzr("status --short")[0]
+ else:
+ result = self.run_bzr("status")[0]
+ self.assertContainsRe(result, pattern)
+
+ def test_kind_change_plain(self):
+ tree = self.make_branch_and_tree('.')
+ self.build_tree(['file'])
+ tree.add('file')
+ tree.commit('added file')
+ unlink('file')
+ self.build_tree(['file/'])
+ self.assertStatusContains('kind changed:\n file \(file => directory\)')
+ tree.rename_one('file', 'directory')
+ self.assertStatusContains('renamed:\n file/ => directory/\n' \
+ 'modified:\n directory/\n')
+ rmdir('directory')
+ self.assertStatusContains('removed:\n file\n')
+
+ def test_kind_change_short(self):
+ tree = self.make_branch_and_tree('.')
+ self.build_tree(['file'])
+ tree.add('file')
+ tree.commit('added file')
+ unlink('file')
+ self.build_tree(['file/'])
+ self.assertStatusContains('K file => file/',
+ short=True)
+ tree.rename_one('file', 'directory')
+ self.assertStatusContains('RK file => directory/',
+ short=True)
+ rmdir('directory')
+ self.assertStatusContains('RD file => directory',
+ short=True)
+
+ def test_status_illegal_revision_specifiers(self):
+ out, err = self.run_bzr('status -r 1..23..123', retcode=3)
+ self.assertContainsRe(err, 'one or two revision specifiers')
+
+ def test_status_no_pending(self):
+ a_tree = self.make_branch_and_tree('a')
+ self.build_tree(['a/a'])
+ a_tree.add('a')
+ a_tree.commit('a')
+ b_tree = a_tree.bzrdir.sprout('b').open_workingtree()
+ self.build_tree(['b/b'])
+ b_tree.add('b')
+ b_tree.commit('b')
+
+ self.run_bzr('merge ../b', working_dir='a')
+ out, err = self.run_bzr('status --no-pending', working_dir='a')
+ self.assertEquals(out, "added:\n b\n")
+
+ def test_pending_specific_files(self):
+ """With a specific file list, pending merges are not shown."""
+ tree = self.make_branch_and_tree('tree')
+ self.build_tree_contents([('tree/a', 'content of a\n')])
+ tree.add('a')
+ r1_id = tree.commit('one')
+ alt = tree.bzrdir.sprout('alt').open_workingtree()
+ self.build_tree_contents([('alt/a', 'content of a\nfrom alt\n')])
+ alt_id = alt.commit('alt')
+ tree.merge_from_branch(alt.branch)
+ output = self.make_utf8_encoded_stringio()
+ show_tree_status(tree, to_file=output)
+ self.assertContainsRe(output.getvalue(), 'pending merge')
+ out, err = self.run_bzr('status tree/a')
+ self.assertNotContainsRe(out, 'pending merge')
+
+
+class TestStatusEncodings(TestCaseWithTransport):
+
+ def make_uncommitted_tree(self):
+ """Build a branch with uncommitted unicode named changes in the cwd."""
+ working_tree = self.make_branch_and_tree(u'.')
+ filename = u'hell\u00d8'
+ try:
+ self.build_tree_contents([(filename, 'contents of hello')])
+ except UnicodeEncodeError:
+ raise TestSkipped("can't build unicode working tree in "
+ "filesystem encoding %s" % sys.getfilesystemencoding())
+ working_tree.add(filename)
+ return working_tree
+
+ def test_stdout_ascii(self):
+ self.overrideAttr(osutils, '_cached_user_encoding', 'ascii')
+ working_tree = self.make_uncommitted_tree()
+ stdout, stderr = self.run_bzr("status")
+
+ self.assertEquals(stdout, """\
+added:
+ hell?
+""")
+
+ def test_stdout_latin1(self):
+ self.overrideAttr(osutils, '_cached_user_encoding', 'latin-1')
+ working_tree = self.make_uncommitted_tree()
+ stdout, stderr = self.run_bzr('status')
+
+ self.assertEquals(stdout, u"""\
+added:
+ hell\u00d8
+""".encode('latin-1'))
+
diff --git a/bzrlib/tests/blackbox/test_switch.py b/bzrlib/tests/blackbox/test_switch.py
new file mode 100644
index 0000000..f5f9893
--- /dev/null
+++ b/bzrlib/tests/blackbox/test_switch.py
@@ -0,0 +1,494 @@
+# Copyright (C) 2007-2012 Canonical Ltd
+# -*- coding: utf-8 -*-
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+
+"""Tests for the switch command of bzr."""
+
+import os
+
+from bzrlib.controldir import ControlDir
+from bzrlib import (
+ osutils,
+ urlutils,
+ branch,
+ )
+from bzrlib.workingtree import WorkingTree
+from bzrlib.tests import (
+ TestCaseWithTransport,
+ script,
+ )
+from bzrlib.tests.features import UnicodeFilenameFeature
+from bzrlib.directory_service import directories
+
+from bzrlib.tests.matchers import ContainsNoVfsCalls
+
+
+class TestSwitch(TestCaseWithTransport):
+
+ def _create_sample_tree(self):
+ tree = self.make_branch_and_tree('branch-1')
+ self.build_tree(['branch-1/file-1', 'branch-1/file-2'])
+ tree.add('file-1')
+ tree.commit('rev1')
+ tree.add('file-2')
+ tree.commit('rev2')
+ return tree
+
+ def test_switch_up_to_date_light_checkout(self):
+ self.make_branch_and_tree('branch')
+ self.run_bzr('branch branch branch2')
+ self.run_bzr('checkout --lightweight branch checkout')
+ os.chdir('checkout')
+ out, err = self.run_bzr('switch ../branch2')
+ self.assertContainsRe(err, 'Tree is up to date at revision 0.\n')
+ self.assertContainsRe(err, 'Switched to branch: .*/branch2.\n')
+ self.assertEqual('', out)
+
+ def test_switch_out_of_date_light_checkout(self):
+ self.make_branch_and_tree('branch')
+ self.run_bzr('branch branch branch2')
+ self.build_tree(['branch2/file'])
+ self.run_bzr('add branch2/file')
+ self.run_bzr('commit -m add-file branch2')
+ self.run_bzr('checkout --lightweight branch checkout')
+ os.chdir('checkout')
+ out, err = self.run_bzr('switch ../branch2')
+ #self.assertContainsRe(err, '\+N file')
+ self.assertContainsRe(err, 'Updated to revision 1.\n')
+ self.assertContainsRe(err, 'Switched to branch: .*/branch2.\n')
+ self.assertEqual('', out)
+
+ def _test_switch_nick(self, lightweight):
+ """Check that the nick gets switched too."""
+ tree1 = self.make_branch_and_tree('branch1')
+ tree2 = self.make_branch_and_tree('branch2')
+ tree2.pull(tree1.branch)
+ checkout = tree1.branch.create_checkout('checkout',
+ lightweight=lightweight)
+ self.assertEqual(checkout.branch.nick, tree1.branch.nick)
+ self.assertEqual(checkout.branch.get_config().has_explicit_nickname(),
+ False)
+ self.run_bzr('switch branch2', working_dir='checkout')
+
+ # we need to get the tree again, otherwise we don't get the new branch
+ checkout = WorkingTree.open('checkout')
+ self.assertEqual(checkout.branch.nick, tree2.branch.nick)
+ self.assertEqual(checkout.branch.get_config().has_explicit_nickname(),
+ False)
+
+ def test_switch_nick(self):
+ self._test_switch_nick(lightweight=False)
+
+ def test_switch_nick_lightweight(self):
+ self._test_switch_nick(lightweight=True)
+
+ def _test_switch_explicit_nick(self, lightweight):
+ """Check that the nick gets switched too."""
+ tree1 = self.make_branch_and_tree('branch1')
+ tree2 = self.make_branch_and_tree('branch2')
+ tree2.pull(tree1.branch)
+ checkout = tree1.branch.create_checkout('checkout',
+ lightweight=lightweight)
+ self.assertEqual(checkout.branch.nick, tree1.branch.nick)
+ checkout.branch.nick = "explicit_nick"
+ self.assertEqual(checkout.branch.nick, "explicit_nick")
+ self.assertEqual(checkout.branch.get_config()._get_explicit_nickname(),
+ "explicit_nick")
+ self.run_bzr('switch branch2', working_dir='checkout')
+
+ # we need to get the tree again, otherwise we don't get the new branch
+ checkout = WorkingTree.open('checkout')
+ self.assertEqual(checkout.branch.nick, tree2.branch.nick)
+ self.assertEqual(checkout.branch.get_config()._get_explicit_nickname(),
+ tree2.branch.nick)
+
+ def test_switch_explicit_nick(self):
+ self._test_switch_explicit_nick(lightweight=False)
+
+ def test_switch_explicit_nick_lightweight(self):
+ self._test_switch_explicit_nick(lightweight=True)
+
+ def test_switch_finds_relative_branch(self):
+ """Switch will find 'foo' relative to the branch the checkout is of."""
+ self.build_tree(['repo/'])
+ tree1 = self.make_branch_and_tree('repo/brancha')
+ tree1.commit('foo')
+ tree2 = self.make_branch_and_tree('repo/branchb')
+ tree2.pull(tree1.branch)
+ branchb_id = tree2.commit('bar')
+ checkout = tree1.branch.create_checkout('checkout', lightweight=True)
+ self.run_bzr(['switch', 'branchb'], working_dir='checkout')
+ self.assertEqual(branchb_id, checkout.last_revision())
+ checkout = checkout.bzrdir.open_workingtree()
+ self.assertEqual(tree2.branch.base, checkout.branch.base)
+
+ def test_switch_finds_relative_bound_branch(self):
+ """Using switch on a heavy checkout should find master sibling
+
+ The behaviour of lighweight and heavy checkouts should be
+ consistent when using the convenient "switch to sibling" feature
+ Both should switch to a sibling of the branch
+ they are bound to, and not a sibling of themself"""
+
+ self.build_tree(['repo/',
+ 'heavyco/'])
+ tree1 = self.make_branch_and_tree('repo/brancha')
+ tree1.commit('foo')
+ tree2 = self.make_branch_and_tree('repo/branchb')
+ tree2.pull(tree1.branch)
+ branchb_id = tree2.commit('bar')
+ checkout = tree1.branch.create_checkout('heavyco/a', lightweight=False)
+ self.run_bzr(['switch', 'branchb'], working_dir='heavyco/a')
+ # Refresh checkout as 'switch' modified it
+ checkout = checkout.bzrdir.open_workingtree()
+ self.assertEqual(branchb_id, checkout.last_revision())
+ self.assertEqual(tree2.branch.base,
+ checkout.branch.get_bound_location())
+
+ def test_switch_finds_relative_unicode_branch(self):
+ """Switch will find 'foo' relative to the branch the checkout is of."""
+ self.requireFeature(UnicodeFilenameFeature)
+ self.build_tree(['repo/'])
+ tree1 = self.make_branch_and_tree('repo/brancha')
+ tree1.commit('foo')
+ tree2 = self.make_branch_and_tree(u'repo/branch\xe9')
+ tree2.pull(tree1.branch)
+ branchb_id = tree2.commit('bar')
+ checkout = tree1.branch.create_checkout('checkout', lightweight=True)
+ self.run_bzr(['switch', u'branch\xe9'], working_dir='checkout')
+ self.assertEqual(branchb_id, checkout.last_revision())
+ checkout = checkout.bzrdir.open_workingtree()
+ self.assertEqual(tree2.branch.base, checkout.branch.base)
+
+ def test_switch_finds_relative_unicode_branch(self):
+ """Switch will find 'foo' relative to the branch the checkout is of."""
+ self.requireFeature(UnicodeFilenameFeature)
+ self.build_tree(['repo/'])
+ tree1 = self.make_branch_and_tree('repo/brancha')
+ tree1.commit('foo')
+ tree2 = self.make_branch_and_tree(u'repo/branch\xe9')
+ tree2.pull(tree1.branch)
+ branchb_id = tree2.commit('bar')
+ checkout = tree1.branch.create_checkout('checkout', lightweight=True)
+ self.run_bzr(['switch', u'branch\xe9'], working_dir='checkout')
+ self.assertEqual(branchb_id, checkout.last_revision())
+ checkout = checkout.bzrdir.open_workingtree()
+ self.assertEqual(tree2.branch.base, checkout.branch.base)
+
+ def test_switch_revision(self):
+ tree = self._create_sample_tree()
+ checkout = tree.branch.create_checkout('checkout', lightweight=True)
+ self.run_bzr(['switch', 'branch-1', '-r1'], working_dir='checkout')
+ self.assertPathExists('checkout/file-1')
+ self.assertPathDoesNotExist('checkout/file-2')
+
+ def test_switch_into_colocated(self):
+ # Create a new colocated branch from an existing non-colocated branch.
+ tree = self.make_branch_and_tree('.', format='development-colo')
+ self.build_tree(['file-1', 'file-2'])
+ tree.add('file-1')
+ revid1 = tree.commit('rev1')
+ tree.add('file-2')
+ revid2 = tree.commit('rev2')
+ self.run_bzr(['switch', '-b', 'anotherbranch'])
+ self.assertEquals(
+ set(['', 'anotherbranch']),
+ set(tree.branch.bzrdir.get_branches().keys()))
+
+ def test_switch_into_unrelated_colocated(self):
+ # Create a new colocated branch from an existing non-colocated branch.
+ tree = self.make_branch_and_tree('.', format='development-colo')
+ self.build_tree(['file-1', 'file-2'])
+ tree.add('file-1')
+ revid1 = tree.commit('rev1')
+ tree.add('file-2')
+ revid2 = tree.commit('rev2')
+ tree.bzrdir.create_branch(name='foo')
+ self.run_bzr_error(['Cannot switch a branch, only a checkout.'],
+ 'switch foo')
+ self.run_bzr(['switch', '--force', 'foo'])
+
+ def test_switch_existing_colocated(self):
+ # Create a branch branch-1 that initially is a checkout of 'foo'
+ # Use switch to change it to 'anotherbranch'
+ repo = self.make_repository('branch-1', format='development-colo')
+ target_branch = repo.bzrdir.create_branch(name='foo')
+ repo.bzrdir.set_branch_reference(target_branch)
+ tree = repo.bzrdir.create_workingtree()
+ self.build_tree(['branch-1/file-1', 'branch-1/file-2'])
+ tree.add('file-1')
+ revid1 = tree.commit('rev1')
+ tree.add('file-2')
+ revid2 = tree.commit('rev2')
+ otherbranch = tree.bzrdir.create_branch(name='anotherbranch')
+ otherbranch.generate_revision_history(revid1)
+ self.run_bzr(['switch', 'anotherbranch'], working_dir='branch-1')
+ tree = WorkingTree.open("branch-1")
+ self.assertEquals(tree.last_revision(), revid1)
+ self.assertEquals(tree.branch.control_url, otherbranch.control_url)
+
+ def test_switch_new_colocated(self):
+ # Create a branch branch-1 that initially is a checkout of 'foo'
+ # Use switch to create 'anotherbranch' which derives from that
+ repo = self.make_repository('branch-1', format='development-colo')
+ target_branch = repo.bzrdir.create_branch(name='foo')
+ repo.bzrdir.set_branch_reference(target_branch)
+ tree = repo.bzrdir.create_workingtree()
+ self.build_tree(['branch-1/file-1', 'branch-1/file-2'])
+ tree.add('file-1')
+ revid1 = tree.commit('rev1')
+ self.run_bzr(['switch', '-b', 'anotherbranch'], working_dir='branch-1')
+ bzrdir = ControlDir.open("branch-1")
+ self.assertEquals(
+ set([b.name for b in bzrdir.list_branches()]),
+ set(["foo", "anotherbranch"]))
+ self.assertEquals(bzrdir.open_branch().name, "anotherbranch")
+ self.assertEquals(bzrdir.open_branch().last_revision(), revid1)
+
+ def test_switch_new_colocated_unicode(self):
+ # Create a branch branch-1 that initially is a checkout of 'foo'
+ # Use switch to create 'branch\xe9' which derives from that
+ self.requireFeature(UnicodeFilenameFeature)
+ repo = self.make_repository('branch-1', format='development-colo')
+ target_branch = repo.bzrdir.create_branch(name='foo')
+ repo.bzrdir.set_branch_reference(target_branch)
+ tree = repo.bzrdir.create_workingtree()
+ self.build_tree(['branch-1/file-1', 'branch-1/file-2'])
+ tree.add('file-1')
+ revid1 = tree.commit('rev1')
+ self.run_bzr(['switch', '-b', u'branch\xe9'], working_dir='branch-1')
+ bzrdir = ControlDir.open("branch-1")
+ self.assertEquals(
+ set([b.name for b in bzrdir.list_branches()]),
+ set(["foo", u"branch\xe9"]))
+ self.assertEquals(bzrdir.open_branch().name, u"branch\xe9")
+ self.assertEquals(bzrdir.open_branch().last_revision(), revid1)
+
+ def test_switch_only_revision(self):
+ tree = self._create_sample_tree()
+ checkout = tree.branch.create_checkout('checkout', lightweight=True)
+ self.assertPathExists('checkout/file-1')
+ self.assertPathExists('checkout/file-2')
+ self.run_bzr(['switch', '-r1'], working_dir='checkout')
+ self.assertPathExists('checkout/file-1')
+ self.assertPathDoesNotExist('checkout/file-2')
+ # Check that we don't accept a range
+ self.run_bzr_error(
+ ['bzr switch --revision takes exactly one revision identifier'],
+ ['switch', '-r0..2'], working_dir='checkout')
+
+ def prepare_lightweight_switch(self):
+ branch = self.make_branch('branch')
+ branch.create_checkout('tree', lightweight=True)
+ osutils.rename('branch', 'branch1')
+
+ def test_switch_lightweight_after_branch_moved(self):
+ self.prepare_lightweight_switch()
+ self.run_bzr('switch --force ../branch1', working_dir='tree')
+ branch_location = WorkingTree.open('tree').branch.base
+ self.assertEndsWith(branch_location, 'branch1/')
+
+ def test_switch_lightweight_after_branch_moved_relative(self):
+ self.prepare_lightweight_switch()
+ self.run_bzr('switch --force branch1', working_dir='tree')
+ branch_location = WorkingTree.open('tree').branch.base
+ self.assertEndsWith(branch_location, 'branch1/')
+
+ def test_create_branch_no_branch(self):
+ self.prepare_lightweight_switch()
+ self.run_bzr_error(['cannot create branch without source branch'],
+ 'switch --create-branch ../branch2', working_dir='tree')
+
+ def test_create_branch(self):
+ branch = self.make_branch('branch')
+ tree = branch.create_checkout('tree', lightweight=True)
+ tree.commit('one', rev_id='rev-1')
+ self.run_bzr('switch --create-branch ../branch2', working_dir='tree')
+ tree = WorkingTree.open('tree')
+ self.assertEndsWith(tree.branch.base, '/branch2/')
+
+ def test_create_branch_local(self):
+ branch = self.make_branch('branch')
+ tree = branch.create_checkout('tree', lightweight=True)
+ tree.commit('one', rev_id='rev-1')
+ self.run_bzr('switch --create-branch branch2', working_dir='tree')
+ tree = WorkingTree.open('tree')
+ # The new branch should have been created at the same level as
+ # 'branch', because we did not have a '/' segment
+ self.assertEqual(branch.base[:-1] + '2/', tree.branch.base)
+
+ def test_create_branch_short_name(self):
+ branch = self.make_branch('branch')
+ tree = branch.create_checkout('tree', lightweight=True)
+ tree.commit('one', rev_id='rev-1')
+ self.run_bzr('switch -b branch2', working_dir='tree')
+ tree = WorkingTree.open('tree')
+ # The new branch should have been created at the same level as
+ # 'branch', because we did not have a '/' segment
+ self.assertEqual(branch.base[:-1] + '2/', tree.branch.base)
+
+ def test_create_branch_directory_services(self):
+ branch = self.make_branch('branch')
+ tree = branch.create_checkout('tree', lightweight=True)
+ class FooLookup(object):
+ def look_up(self, name, url):
+ return 'foo-'+name
+ directories.register('foo:', FooLookup, 'Create branches named foo-')
+ self.addCleanup(directories.remove, 'foo:')
+ self.run_bzr('switch -b foo:branch2', working_dir='tree')
+ tree = WorkingTree.open('tree')
+ self.assertEndsWith(tree.branch.base, 'foo-branch2/')
+
+ def test_switch_with_post_switch_hook(self):
+ from bzrlib import branch as _mod_branch
+ calls = []
+ _mod_branch.Branch.hooks.install_named_hook('post_switch',
+ calls.append, None)
+ self.make_branch_and_tree('branch')
+ self.run_bzr('branch branch branch2')
+ self.run_bzr('checkout branch checkout')
+ os.chdir('checkout')
+ self.assertLength(0, calls)
+ out, err = self.run_bzr('switch ../branch2')
+ self.assertLength(1, calls)
+
+ def test_switch_lightweight_co_with_post_switch_hook(self):
+ from bzrlib import branch as _mod_branch
+ calls = []
+ _mod_branch.Branch.hooks.install_named_hook('post_switch',
+ calls.append, None)
+ self.make_branch_and_tree('branch')
+ self.run_bzr('branch branch branch2')
+ self.run_bzr('checkout --lightweight branch checkout')
+ os.chdir('checkout')
+ self.assertLength(0, calls)
+ out, err = self.run_bzr('switch ../branch2')
+ self.assertLength(1, calls)
+
+ def test_switch_lightweight_directory(self):
+ """Test --directory option"""
+
+ # create a source branch
+ a_tree = self.make_branch_and_tree('a')
+ self.build_tree_contents([('a/a', 'initial\n')])
+ a_tree.add('a')
+ a_tree.commit(message='initial')
+
+ # clone and add a differing revision
+ b_tree = a_tree.bzrdir.sprout('b').open_workingtree()
+ self.build_tree_contents([('b/a', 'initial\nmore\n')])
+ b_tree.commit(message='more')
+
+ self.run_bzr('checkout --lightweight a checkout')
+ self.run_bzr('switch --directory checkout b')
+ self.assertFileEqual('initial\nmore\n', 'checkout/a')
+
+
+class TestSwitchParentLocationBase(TestCaseWithTransport):
+
+ def setUp(self):
+ """Set up a repository and branch ready for testing."""
+ super(TestSwitchParentLocationBase, self).setUp()
+ self.script_runner = script.ScriptRunner()
+ self.script_runner.run_script(self, '''
+ $ bzr init-repo --no-trees repo
+ Shared repository...
+ Location:
+ shared repository: repo
+ $ bzr init repo/trunk
+ Created a repository branch...
+ Using shared repository: ...
+ ''')
+
+ def assertParent(self, expected_parent, branch):
+ """Verify that the parent is not None and is set correctly."""
+ actual_parent = branch.get_parent()
+ self.assertIsSameRealPath(urlutils.local_path_to_url(expected_parent),
+ branch.get_parent())
+
+
+class TestSwitchParentLocation(TestSwitchParentLocationBase):
+
+ def _checkout_and_switch(self, option=''):
+ self.script_runner.run_script(self, '''
+ $ bzr checkout %(option)s repo/trunk checkout
+ $ cd checkout
+ $ bzr switch --create-branch switched
+ 2>Tree is up to date at revision 0.
+ 2>Switched to branch:...switched...
+ $ cd ..
+ ''' % locals())
+ bound_branch = branch.Branch.open_containing('checkout')[0]
+ master_branch = branch.Branch.open_containing('repo/switched')[0]
+ return (bound_branch, master_branch)
+
+ def test_switch_parent_lightweight(self):
+ """Lightweight checkout using bzr switch."""
+ bb, mb = self._checkout_and_switch(option='--lightweight')
+ self.assertParent('repo/trunk', bb)
+ self.assertParent('repo/trunk', mb)
+
+ def test_switch_parent_heavyweight(self):
+ """Heavyweight checkout using bzr switch."""
+ bb, mb = self._checkout_and_switch()
+ self.assertParent('repo/trunk', bb)
+ self.assertParent('repo/trunk', mb)
+
+
+class TestSwitchDoesntOpenMasterBranch(TestCaseWithTransport):
+ # See https://bugs.launchpad.net/bzr/+bug/812285
+ # "bzr switch --create-branch" can point the new branch's parent to the
+ # master branch, but it doesn't have to open it to do so.
+
+ def test_switch_create_doesnt_open_master_branch(self):
+ master = self.make_branch_and_tree('master')
+ master.commit('one')
+ # Note: not a lightweight checkout
+ checkout = master.branch.create_checkout('checkout')
+ opened = []
+ def open_hook(branch):
+ # Just append the final directory of the branch
+ name = branch.base.rstrip('/').rsplit('/', 1)[1]
+ opened.append(name)
+ branch.Branch.hooks.install_named_hook('open', open_hook,
+ 'open_hook_logger')
+ self.run_bzr('switch --create-branch -d checkout feature')
+ # We only open the master branch 1 time.
+ # This test should be cleaner to write, but see bug:
+ # https://bugs.launchpad.net/bzr/+bug/812295
+ self.assertEqual(1, opened.count('master'))
+
+
+class TestSmartServerSwitch(TestCaseWithTransport):
+
+ def test_switch_lightweight(self):
+ self.setup_smart_server_with_call_log()
+ t = self.make_branch_and_tree('from')
+ for count in range(9):
+ t.commit(message='commit %d' % count)
+ out, err = self.run_bzr(['checkout', '--lightweight', self.get_url('from'),
+ 'target'])
+ self.reset_smart_call_log()
+ self.run_bzr(['switch', self.get_url('from')], working_dir='target')
+ # This figure represent the amount of work to perform this use case. It
+ # is entirely ok to reduce this number if a test fails due to rpc_count
+ # being too low. If rpc_count increases, more network roundtrips have
+ # become necessary for this use case. Please do not adjust this number
+ # upwards without agreement from bzr's network support maintainers.
+ self.assertLength(24, self.hpss_calls)
+ self.assertLength(4, self.hpss_connections)
+ self.assertThat(self.hpss_calls, ContainsNoVfsCalls)
diff --git a/bzrlib/tests/blackbox/test_tags.py b/bzrlib/tests/blackbox/test_tags.py
new file mode 100644
index 0000000..cc74792
--- /dev/null
+++ b/bzrlib/tests/blackbox/test_tags.py
@@ -0,0 +1,448 @@
+# Copyright (C) 2007-2011 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""Tests for commands related to tags"""
+
+from bzrlib import (
+ errors,
+ tag,
+ transform,
+ )
+from bzrlib.branch import (
+ Branch,
+ )
+from bzrlib.tests import (
+ script,
+ TestCaseWithTransport,
+ )
+from bzrlib.tests.matchers import ContainsNoVfsCalls
+from bzrlib.workingtree import WorkingTree
+
+
+class TestTagging(TestCaseWithTransport):
+
+ def test_tag_command_help(self):
+ out, err = self.run_bzr('help tag')
+ self.assertContainsRe(out, 'Create, remove or modify a tag')
+
+ def test_cannot_tag_range(self):
+ out, err = self.run_bzr('tag -r1..10 name', retcode=3)
+ self.assertContainsRe(err,
+ "Tags can only be placed on a single revision")
+
+ def test_no_tag_name(self):
+ out, err = self.run_bzr('tag -d branch', retcode=3)
+ self.assertContainsRe(err, 'Please specify a tag name.')
+
+ def test_automatic_tag_name(self):
+ def get_tag_name(branch, revid):
+ return "mytag"
+ Branch.hooks.install_named_hook('automatic_tag_name',
+ get_tag_name, 'get tag name')
+ out, err = self.run_bzr('tag -d branch')
+ self.assertContainsRe(err, 'Created tag mytag.')
+
+ def test_tag_current_rev(self):
+ t = self.make_branch_and_tree('branch')
+ t.commit(allow_pointless=True, message='initial commit',
+ rev_id='first-revid')
+ # make a tag through the command line
+ out, err = self.run_bzr('tag -d branch NEWTAG')
+ self.assertContainsRe(err, 'Created tag NEWTAG.')
+ # tag should be observable through the api
+ self.assertEquals(t.branch.tags.get_tag_dict(),
+ dict(NEWTAG='first-revid'))
+ # can also create tags using -r
+ self.run_bzr('tag -d branch tag2 -r1')
+ self.assertEquals(t.branch.tags.lookup_tag('tag2'), 'first-revid')
+ # regression test: make sure a unicode revision from the user
+ # gets turned into a str object properly. The use of a unicode
+ # object for the revid is intentional.
+ self.run_bzr(['tag', '-d', 'branch', 'tag3', u'-rrevid:first-revid'])
+ self.assertEquals(t.branch.tags.lookup_tag('tag3'), 'first-revid')
+ # can also delete an existing tag
+ out, err = self.run_bzr('tag --delete -d branch tag2')
+ # cannot replace an existing tag normally
+ out, err = self.run_bzr('tag -d branch NEWTAG -r0', retcode=3)
+ self.assertContainsRe(err, 'Tag NEWTAG already exists\\.')
+ # ... but can if you use --force
+ out, err = self.run_bzr('tag -d branch NEWTAG --force -r0')
+ self.assertEquals("Updated tag NEWTAG.\n", err)
+
+ def test_tag_same_revision(self):
+ t = self.make_branch_and_tree('branch')
+ t.commit(allow_pointless=True, message='initial commit',
+ rev_id='first-revid')
+ t.commit(allow_pointless=True, message='second commit',
+ rev_id='second-revid')
+ out, err = self.run_bzr('tag -rrevid:first-revid -d branch NEWTAG')
+ out, err = self.run_bzr('tag -rrevid:first-revid -d branch NEWTAG')
+ self.assertContainsRe(err,
+ 'Tag NEWTAG already exists for that revision\\.')
+ out, err = self.run_bzr('tag -rrevid:second-revid -d branch NEWTAG',
+ retcode=3)
+ self.assertContainsRe(err, 'Tag NEWTAG already exists\\.')
+
+ def test_tag_delete_requires_name(self):
+ out, err = self.run_bzr('tag -d branch', retcode=3)
+ self.assertContainsRe(err, 'Please specify a tag name\\.')
+
+ def test_branch_push_pull_merge_copies_tags(self):
+ t = self.make_branch_and_tree('branch1')
+ t.commit(allow_pointless=True, message='initial commit',
+ rev_id='first-revid')
+ b1 = t.branch
+ b1.tags.set_tag('tag1', 'first-revid')
+ # branching copies the tag across
+ self.run_bzr('branch branch1 branch2')
+ b2 = Branch.open('branch2')
+ self.assertEquals(b2.tags.lookup_tag('tag1'), 'first-revid')
+ # make a new tag and pull it
+ b1.tags.set_tag('tag2', 'twa')
+ self.run_bzr('pull -d branch2 branch1')
+ self.assertEquals(b2.tags.lookup_tag('tag2'), 'twa')
+ # make a new tag and push it
+ b1.tags.set_tag('tag3', 'san')
+ self.run_bzr('push -d branch1 branch2')
+ self.assertEquals(b2.tags.lookup_tag('tag3'), 'san')
+ # make a new tag and merge it
+ t.commit(allow_pointless=True, message='second commit',
+ rev_id='second-revid')
+ t2 = WorkingTree.open('branch2')
+ t2.commit(allow_pointless=True, message='commit in second')
+ b1.tags.set_tag('tag4', 'second-revid')
+ self.run_bzr('merge -d branch2 branch1')
+ self.assertEquals(b2.tags.lookup_tag('tag4'), 'second-revid')
+ # pushing to a new location copies the tag across
+ self.run_bzr('push -d branch1 branch3')
+ b3 = Branch.open('branch3')
+ self.assertEquals(b3.tags.lookup_tag('tag1'), 'first-revid')
+
+ def make_master_and_checkout(self):
+ builder = self.make_branch_builder('master')
+ builder.build_commit(message='Initial commit.', rev_id='rev-1')
+ master = builder.get_branch()
+ child = master.create_checkout(self.get_url('child'))
+ return master, child
+
+ def make_fork(self, branch):
+ fork = branch.create_clone_on_transport(self.get_transport('fork'))
+ self.addCleanup(fork.lock_write().unlock)
+ with transform.TransformPreview(fork.basis_tree()) as tt:
+ tt.commit(fork, message='Commit in fork.', revision_id='fork-0')
+ with transform.TransformPreview(fork.basis_tree()) as tt:
+ tt.commit(fork, message='Commit in fork.', revision_id='fork-1')
+ return fork
+
+ def test_merge_without_commit_does_not_propagate_tags_to_master(self):
+ """'bzr merge' alone does not propagate tags to a master branch.
+
+ (If the user runs 'bzr commit', then that is when the tags from the
+ merge are propagated.)
+ """
+ master, child = self.make_master_and_checkout()
+ fork = self.make_fork(master)
+ fork.tags.set_tag('new-tag', fork.last_revision())
+ self.run_bzr(['merge', '../fork'], working_dir='child')
+ self.assertEqual({}, master.tags.get_tag_dict())
+
+ def test_commit_in_heavyweight_checkout_copies_tags_to_master(self):
+ master, child = self.make_master_and_checkout()
+ fork = self.make_fork(master)
+ fork.tags.set_tag('new-tag', fork.last_revision())
+ fork.tags.set_tag('non-ancestry-tag', 'fork-0')
+ fork.tags.set_tag('absent-tag', 'absent-rev')
+ script.run_script(self, """
+ $ cd child
+ $ bzr merge ../fork
+ $ bzr commit -m "Merge fork."
+ 2>Committing to: .../master/
+ 2>Committed revision 2.
+ """, null_output_matches_anything=True)
+ # Merge copied the tag to child and commit propagated it to master
+ expected_tag_dict = {
+ 'new-tag': fork.last_revision(),
+ 'non-ancestry-tag': 'fork-0',
+ 'absent-tag': 'absent-rev',
+ }
+ self.assertEqual(expected_tag_dict, child.branch.tags.get_tag_dict())
+ self.assertEqual(expected_tag_dict, master.tags.get_tag_dict())
+ # Revisions not in ancestry but named in tags are present
+ child.branch.repository.get_revision('fork-0')
+ master.repository.get_revision('fork-0')
+
+ def test_commit_in_heavyweight_checkout_reports_tag_conflict(self):
+ master, child = self.make_master_and_checkout()
+ fork = self.make_fork(master)
+ fork.tags.set_tag('new-tag', fork.last_revision())
+ master_r1 = master.last_revision()
+ master.tags.set_tag('new-tag', master_r1)
+ script.run_script(self, """
+ $ cd child
+ $ bzr merge ../fork
+ $ bzr commit -m "Merge fork."
+ 2>Committing to: .../master/
+ 2>Conflicting tags in bound branch:
+ 2> new-tag
+ 2>Committed revision 2.
+ """, null_output_matches_anything=True)
+ # Merge copied the tag to child. master's conflicting tag is unchanged.
+ self.assertEqual(
+ {'new-tag': fork.last_revision()}, child.branch.tags.get_tag_dict())
+ self.assertEqual(
+ {'new-tag': master_r1}, master.tags.get_tag_dict())
+
+ def test_list_tags(self):
+ tree1 = self.make_branch_and_tree('branch1')
+ tree1.commit(allow_pointless=True, message='revision 1',
+ rev_id='revid-1', timestamp=10)
+ tree1.commit(allow_pointless=True, message='revision 2',
+ rev_id='revid-2', timestamp=15)
+
+ b1 = tree1.branch
+ # note how the tag for revid-1 sorts after the one for revid-2
+ b1.tags.set_tag(u'tag1\u30d0', 'revid-2')
+ b1.tags.set_tag(u'tag10\u30d0', 'missing') # not present in repository
+ b1.tags.set_tag(u'tag2\u30d0', 'revid-1')
+
+ # natural order
+ out, err = self.run_bzr('tags -d branch1',
+ encoding='utf-8')
+ self.assertEquals(err, '')
+ self.assertContainsRe(out, (u'^tag1\u30d0 *2\ntag2\u30d0 *1\n' +
+ u'tag10\u30d0 *\\?\n').encode('utf-8'))
+
+ # lexicographical order
+ out, err = self.run_bzr('tags --sort=alpha -d branch1',
+ encoding='utf-8')
+ self.assertEquals(err, '')
+ self.assertContainsRe(out, (u'^tag10\u30d0 *\\?\ntag1\u30d0 *2\n' +
+ u'tag2\u30d0 *1\n').encode('utf-8'))
+
+ out, err = self.run_bzr('tags --sort=alpha --show-ids -d branch1',
+ encoding='utf-8')
+ self.assertEquals(err, '')
+ self.assertContainsRe(out, (u'^tag10\u30d0 *missing\n' +
+ u'tag1\u30d0 *revid-2\ntag2\u30d0 *revid-1\n').encode('utf-8'))
+
+ # chronological order
+ out, err = self.run_bzr('tags --sort=time -d branch1',
+ encoding='utf-8')
+ self.assertEquals(err, '')
+ self.assertContainsRe(out, (u'^tag2\u30d0 *1\ntag1\u30d0 *2\n' +
+ u'tag10\u30d0 *\\?\n').encode('utf-8'))
+
+ out, err = self.run_bzr('tags --sort=time --show-ids -d branch1',
+ encoding='utf-8')
+ self.assertEquals(err, '')
+ self.assertContainsRe(out, (u'^tag2\u30d0 *revid-1\n' +
+ u'tag1\u30d0 *revid-2\ntag10\u30d0 *missing\n').encode('utf-8'))
+
+ # now test dotted revnos
+ tree2 = tree1.bzrdir.sprout('branch2').open_workingtree()
+ tree1.commit(allow_pointless=True, message='revision 3 in branch1',
+ rev_id='revid-3a')
+ tree2.commit(allow_pointless=True, message='revision 3 in branch2',
+ rev_id='revid-3b')
+
+ b2 = tree2.branch
+ b2.tags.set_tag('tagD', 'revid-3b')
+ self.run_bzr('merge -d branch1 branch2')
+ tree1.commit('merge', rev_id='revid-4')
+
+ out, err = self.run_bzr('tags -d branch1', encoding='utf-8')
+ self.assertEquals(err, '')
+ self.assertContainsRe(out, r'tagD *2\.1\.1\n')
+ out, err = self.run_bzr('tags -d branch2', encoding='utf-8')
+ self.assertEquals(err, '')
+ self.assertContainsRe(out, r'tagD *3\n')
+
+ def test_list_tags_dotted_revnos_unsupported(self):
+ tree = self.make_branch_and_tree('branch')
+ rev1 = tree.commit("rev1")
+ tree.branch.tags.set_tag("mytag", rev1)
+ def revision_id_to_dotted_revno(self, revid):
+ raise errors.UnsupportedOperation(revision_id_to_dotted_revno, self)
+ self.overrideAttr(Branch, "revision_id_to_dotted_revno",
+ revision_id_to_dotted_revno)
+ out, err = self.run_bzr('tags -d branch', encoding='utf-8')
+ self.assertEquals(out, 'mytag ?\n')
+
+ def test_list_tags_revision_filtering(self):
+ tree1 = self.make_branch_and_tree('.')
+ tree1.commit(allow_pointless=True, message='revision 1',
+ rev_id='revid-1')
+ tree1.commit(allow_pointless=True, message='revision 2',
+ rev_id='revid-2')
+ tree1.commit(allow_pointless=True, message='revision 3',
+ rev_id='revid-3')
+ tree1.commit(allow_pointless=True, message='revision 4',
+ rev_id='revid-4')
+ b1 = tree1.branch
+ b1.tags.set_tag(u'tag 1', 'revid-1')
+ b1.tags.set_tag(u'tag 2', 'revid-2')
+ b1.tags.set_tag(u'tag 3', 'revid-3')
+ b1.tags.set_tag(u'tag 4', 'revid-4')
+ self._check_tag_filter('', (1, 2, 3, 4))
+ self._check_tag_filter('-r ..', (1, 2, 3, 4))
+ self._check_tag_filter('-r ..2', (1, 2))
+ self._check_tag_filter('-r 2..', (2, 3, 4))
+ self._check_tag_filter('-r 2..3', (2, 3))
+ self._check_tag_filter('-r 3..2', ())
+ self.run_bzr_error(args="tags -r 123",
+ error_regexes=["bzr: ERROR: Requested revision: '123' "
+ "does not exist in branch:"])
+ self.run_bzr_error(args="tags -r ..123",
+ error_regexes=["bzr: ERROR: Requested revision: '123' "
+ "does not exist in branch:"])
+ self.run_bzr_error(args="tags -r 123.123",
+ error_regexes=["bzr: ERROR: Requested revision: '123.123' "
+ "does not exist in branch:"])
+
+ def test_sort_tags_custom(self):
+ def sort_by_dots(branch, tags):
+ def sort_key((tag, revid)):
+ return tag.count(".")
+ tags.sort(key=sort_key)
+
+ # Register a custom sort method
+ tag.tag_sort_methods.register("dots", sort_by_dots, "Sort by dots.")
+ self.addCleanup(tag.tag_sort_methods.remove, "dots")
+
+ tree1 = self.make_branch_and_tree('branch1')
+ tree1.commit(allow_pointless=True, message='revision 1',
+ rev_id='revid-1', timestamp=10)
+ tree1.commit(allow_pointless=True, message='revision 2',
+ rev_id='revid-2', timestamp=15)
+
+ b1 = tree1.branch
+
+ b1.tags.set_tag(u'tag..', 'revid-2')
+ b1.tags.set_tag(u'tag....', 'missing') # not present in repository
+ b1.tags.set_tag(u'tag.', 'revid-1')
+ b1.tags.set_tag(u'tag...', 'revid-1')
+ b1.tags.set_tag(u'tag....', 'revid-1')
+
+ # sorted by number of dots
+ out, err = self.run_bzr('tags --sort=dots -d branch1')
+ self.assertEquals(err, '')
+ self.assertEquals([
+ 'tag. 1',
+ 'tag.. 2',
+ 'tag... 1',
+ 'tag.... 1'],
+ out.splitlines())
+
+ def _check_tag_filter(self, argstr, expected_revnos):
+ #upper bound of laziness
+ out, err = self.run_bzr('tags ' + argstr)
+ self.assertEquals(err, '')
+ self.assertContainsRe(out, "^" + ''.join(["tag %s +%s\n" % (
+ revno, revno) for revno in expected_revnos]) + "$")
+
+ def test_conflicting_tags(self):
+ # setup two empty branches with different tags
+ t1 = self.make_branch_and_tree('one')
+ t2 = self.make_branch_and_tree('two')
+ b1 = t1.branch
+ b2 = t2.branch
+ tagname = u'\u30d0zaar'
+ b1.tags.set_tag(tagname, 'revid1')
+ b2.tags.set_tag(tagname, 'revid2')
+ # push should give a warning about the tags
+ out, err = self.run_bzr('push -d one two', encoding='utf-8')
+ self.assertContainsRe(out,
+ 'Conflicting tags:\n.*' + tagname.encode('utf-8'))
+ # pull should give a warning about the tags
+ out, err = self.run_bzr('pull -d one two', encoding='utf-8',
+ retcode=1)
+ self.assertContainsRe(out,
+ 'Conflicting tags:\n.*' + tagname.encode('utf-8'))
+ # merge should give a warning about the tags -- not implemented yet
+ ## out, err = self.run_bzr('merge -d one two', encoding='utf-8')
+ ## self.assertContainsRe(out,
+ ## 'Conflicting tags:\n.*' + tagname.encode('utf-8'))
+
+ def test_tag_quiet(self):
+ t1 = self.make_branch_and_tree('')
+ out, err = self.run_bzr('tag --quiet test1')
+ self.assertEqual('', out)
+ self.assertEqual('', err)
+
+ def test_tag_delete_quiet(self):
+ t1 = self.make_branch_and_tree('')
+ self.run_bzr('tag test1')
+ out, err = self.run_bzr('tag --delete --quiet test1')
+ self.assertEqual('', out)
+ self.assertEqual('', err)
+
+ def test_tags_with_mainline_ghosts(self):
+ tree = self.make_branch_and_tree('tree1')
+ tree.set_parent_ids(["spooky"], allow_leftmost_as_ghost=True)
+ tree.add('')
+ tree.commit('msg1', rev_id='rev1')
+ tree.commit('msg2', rev_id='rev2')
+ tree.branch.tags.set_tag('unknown', 'out-of-mainline')
+ tree.branch.tags.set_tag('ghost', 'spooky')
+ tree.branch.tags.set_tag('tag1', 'rev1')
+ tree.branch.tags.set_tag('tag2', 'rev2')
+
+ out, err = self.run_bzr('tags -d tree1', encoding='utf-8')
+ self.assertEqual(out,
+ 'ghost ?\n'
+ 'tag1 1\n'
+ 'tag2 2\n'
+ 'unknown ?\n')
+ self.assertEqual('', err)
+
+
+class TestSmartServerCat(TestCaseWithTransport):
+
+ def test_set_tag(self):
+ self.setup_smart_server_with_call_log()
+ t = self.make_branch_and_tree('branch')
+ self.build_tree_contents([('branch/foo', 'thecontents')])
+ t.add("foo")
+ t.commit("message")
+ self.reset_smart_call_log()
+ out, err = self.run_bzr(['tag', "-d", self.get_url('branch'), "tagname"])
+ # This figure represent the amount of work to perform this use case. It
+ # is entirely ok to reduce this number if a test fails due to rpc_count
+ # being too low. If rpc_count increases, more network roundtrips have
+ # become necessary for this use case. Please do not adjust this number
+ # upwards without agreement from bzr's network support maintainers.
+ self.assertLength(9, self.hpss_calls)
+ self.assertLength(1, self.hpss_connections)
+ self.assertThat(self.hpss_calls, ContainsNoVfsCalls)
+
+ def test_show_tags(self):
+ self.setup_smart_server_with_call_log()
+ t = self.make_branch_and_tree('branch')
+ self.build_tree_contents([('branch/foo', 'thecontents')])
+ t.add("foo")
+ t.commit("message")
+ t.branch.tags.set_tag("sometag", "rev1")
+ t.branch.tags.set_tag("sometag", "rev2")
+ self.reset_smart_call_log()
+ out, err = self.run_bzr(['tags', "-d", self.get_url('branch')])
+ # This figure represent the amount of work to perform this use case. It
+ # is entirely ok to reduce this number if a test fails due to rpc_count
+ # being too low. If rpc_count increases, more network roundtrips have
+ # become necessary for this use case. Please do not adjust this number
+ # upwards without agreement from bzr's network support maintainers.
+ self.assertLength(6, self.hpss_calls)
+ self.assertLength(1, self.hpss_connections)
+ self.assertThat(self.hpss_calls, ContainsNoVfsCalls)
diff --git a/bzrlib/tests/blackbox/test_testament.py b/bzrlib/tests/blackbox/test_testament.py
new file mode 100644
index 0000000..a3ca787
--- /dev/null
+++ b/bzrlib/tests/blackbox/test_testament.py
@@ -0,0 +1,48 @@
+# Copyright (C) 2006 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""Blackbox tests for the 'bzr testament' command"""
+
+
+from bzrlib.tests.test_testament import (
+ REV_1_SHORT,
+ REV_1_SHORT_STRICT,
+ REV_2_TESTAMENT,
+ TestamentSetup,
+ )
+
+
+class TestTestament(TestamentSetup):
+ """Run blackbox tests on 'bzr testament'"""
+
+ def test_testament_command(self):
+ """Testament containing a file and a directory."""
+ out, err = self.run_bzr('testament --long')
+ self.assertEqualDiff(err, '')
+ self.assertEqualDiff(out, REV_2_TESTAMENT)
+
+ def test_testament_command_2(self):
+ """Command getting short testament of previous version."""
+ out, err = self.run_bzr('testament -r1')
+ self.assertEqualDiff(err, '')
+ self.assertEqualDiff(out, REV_1_SHORT)
+
+ def test_testament_command_3(self):
+ """Command getting short testament of previous version."""
+ out, err = self.run_bzr('testament -r1 --strict')
+ self.assertEqualDiff(err, '')
+ self.assertEqualDiff(out, REV_1_SHORT_STRICT)
+
diff --git a/bzrlib/tests/blackbox/test_too_much.py b/bzrlib/tests/blackbox/test_too_much.py
new file mode 100644
index 0000000..8cf2f61
--- /dev/null
+++ b/bzrlib/tests/blackbox/test_too_much.py
@@ -0,0 +1,622 @@
+# Copyright (C) 2005-2011 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+# Mr. Smoketoomuch: I'm sorry?
+# Mr. Bounder: You'd better cut down a little then.
+# Mr. Smoketoomuch: Oh, I see! Smoke too much so I'd better cut down a little
+# then!
+
+"""Black-box tests for bzr.
+
+These check that it behaves properly when it's invoked through the regular
+command-line interface. This doesn't actually run a new interpreter but
+rather starts again from the run_bzr function.
+"""
+
+
+# XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
+# Note: Please don't add new tests here, it's too big and bulky. Instead add
+# them into small suites in bzrlib.tests.blackbox.test_FOO for the particular
+# UI command/aspect that is being tested.
+
+
+from cStringIO import StringIO
+import os
+import re
+import sys
+
+import bzrlib
+from bzrlib import (
+ osutils,
+ )
+from bzrlib.branch import Branch
+from bzrlib.errors import BzrCommandError
+from bzrlib.tests.http_utils import TestCaseWithWebserver
+from bzrlib.tests.test_sftp_transport import TestCaseWithSFTPServer
+from bzrlib.tests import TestCaseWithTransport
+from bzrlib.workingtree import WorkingTree
+
+
+class TestCommands(TestCaseWithTransport):
+
+ def test_invalid_commands(self):
+ self.run_bzr("pants", retcode=3)
+ self.run_bzr("--pants off", retcode=3)
+ self.run_bzr("diff --message foo", retcode=3)
+
+ def test_revert(self):
+ self.run_bzr('init')
+
+ with file('hello', 'wt') as f: f.write('foo')
+ self.run_bzr('add hello')
+ self.run_bzr('commit -m setup hello')
+
+ with file('goodbye', 'wt') as f: f.write('baz')
+ self.run_bzr('add goodbye')
+ self.run_bzr('commit -m setup goodbye')
+
+ with file('hello', 'wt') as f: f.write('bar')
+ with file('goodbye', 'wt') as f: f.write('qux')
+ self.run_bzr('revert hello')
+ self.check_file_contents('hello', 'foo')
+ self.check_file_contents('goodbye', 'qux')
+ self.run_bzr('revert')
+ self.check_file_contents('goodbye', 'baz')
+
+ os.mkdir('revertdir')
+ self.run_bzr('add revertdir')
+ self.run_bzr('commit -m f')
+ os.rmdir('revertdir')
+ self.run_bzr('revert')
+
+ if osutils.has_symlinks():
+ os.symlink('/unlikely/to/exist', 'symlink')
+ self.run_bzr('add symlink')
+ self.run_bzr('commit -m f')
+ os.unlink('symlink')
+ self.run_bzr('revert')
+ self.assertPathExists('symlink')
+ os.unlink('symlink')
+ os.symlink('a-different-path', 'symlink')
+ self.run_bzr('revert')
+ self.assertEqual('/unlikely/to/exist',
+ os.readlink('symlink'))
+ else:
+ self.log("skipping revert symlink tests")
+
+ with file('hello', 'wt') as f: f.write('xyz')
+ self.run_bzr('commit -m xyz hello')
+ self.run_bzr('revert -r 1 hello')
+ self.check_file_contents('hello', 'foo')
+ self.run_bzr('revert hello')
+ self.check_file_contents('hello', 'xyz')
+ os.chdir('revertdir')
+ self.run_bzr('revert')
+ os.chdir('..')
+
+ def example_branch(test):
+ test.run_bzr('init')
+ with file('hello', 'wt') as f: f.write('foo')
+ test.run_bzr('add hello')
+ test.run_bzr('commit -m setup hello')
+ with file('goodbye', 'wt') as f: f.write('baz')
+ test.run_bzr('add goodbye')
+ test.run_bzr('commit -m setup goodbye')
+
+ def test_pull_verbose(self):
+ """Pull changes from one branch to another and watch the output."""
+
+ os.mkdir('a')
+ os.chdir('a')
+
+ self.example_branch()
+
+ os.chdir('..')
+ self.run_bzr('branch a b')
+ os.chdir('b')
+ with open('b', 'wb') as f: f.write('else\n')
+ self.run_bzr('add b')
+ self.run_bzr(['commit', '-m', 'added b'])
+
+ os.chdir('../a')
+ out = self.run_bzr('pull --verbose ../b')[0]
+ self.assertNotEqual(out.find('Added Revisions:'), -1)
+ self.assertNotEqual(out.find('message:\n added b'), -1)
+ self.assertNotEqual(out.find('added b'), -1)
+
+ # Check that --overwrite --verbose prints out the removed entries
+ self.run_bzr('commit -m foo --unchanged')
+ os.chdir('../b')
+ self.run_bzr('commit -m baz --unchanged')
+ self.run_bzr('pull ../a', retcode=3)
+ out = self.run_bzr('pull --overwrite --verbose ../a')[0]
+
+ remove_loc = out.find('Removed Revisions:')
+ self.assertNotEqual(remove_loc, -1)
+ added_loc = out.find('Added Revisions:')
+ self.assertNotEqual(added_loc, -1)
+
+ removed_message = out.find('message:\n baz')
+ self.assertNotEqual(removed_message, -1)
+ self.assertTrue(remove_loc < removed_message < added_loc)
+
+ added_message = out.find('message:\n foo')
+ self.assertNotEqual(added_message, -1)
+ self.assertTrue(added_loc < added_message)
+
+ def test_locations(self):
+ """Using and remembering different locations"""
+ os.mkdir('a')
+ os.chdir('a')
+ self.run_bzr('init')
+ self.run_bzr('commit -m unchanged --unchanged')
+ self.run_bzr('pull', retcode=3)
+ self.run_bzr('merge', retcode=3)
+ self.run_bzr('branch . ../b')
+ os.chdir('../b')
+ self.run_bzr('pull')
+ self.run_bzr('branch . ../c')
+ self.run_bzr('pull ../c')
+ self.run_bzr('merge')
+ os.chdir('../a')
+ self.run_bzr('pull ../b')
+ self.run_bzr('pull')
+ self.run_bzr('pull ../c')
+ self.run_bzr('branch ../c ../d')
+ osutils.rmtree('../c')
+ self.run_bzr('pull')
+ os.chdir('../b')
+ self.run_bzr('pull')
+ os.chdir('../d')
+ self.run_bzr('pull', retcode=3)
+ self.run_bzr('pull ../a --remember')
+ self.run_bzr('pull')
+
+ def test_unknown_command(self):
+ """Handling of unknown command."""
+ out, err = self.run_bzr('fluffy-badger', retcode=3)
+ self.assertEquals(out, '')
+ err.index('unknown command')
+
+ def create_conflicts(self):
+ """Create a conflicted tree"""
+ os.mkdir('base')
+ os.chdir('base')
+ with file('hello', 'wb') as f: f.write("hi world")
+ with file('answer', 'wb') as f: f.write("42")
+ self.run_bzr('init')
+ self.run_bzr('add')
+ self.run_bzr('commit -m base')
+ self.run_bzr('branch . ../other')
+ self.run_bzr('branch . ../this')
+ os.chdir('../other')
+ with file('hello', 'wb') as f: f.write("Hello.")
+ with file('answer', 'wb') as f: f.write("Is anyone there?")
+ self.run_bzr('commit -m other')
+ os.chdir('../this')
+ with file('hello', 'wb') as f: f.write("Hello, world")
+ self.run_bzr('mv answer question')
+ with file('question', 'wb') as f: f.write("What do you get when you multiply six"
+ "times nine?")
+ self.run_bzr('commit -m this')
+
+ def test_status(self):
+ os.mkdir('branch1')
+ os.chdir('branch1')
+ self.run_bzr('init')
+ self.run_bzr('commit --unchanged --message f')
+ self.run_bzr('branch . ../branch2')
+ self.run_bzr('branch . ../branch3')
+ self.run_bzr('commit --unchanged --message peter')
+ os.chdir('../branch2')
+ self.run_bzr('merge ../branch1')
+ self.run_bzr('commit --unchanged --message pumpkin')
+ os.chdir('../branch3')
+ self.run_bzr('merge ../branch2')
+ message = self.run_bzr('status')[0]
+
+
+ def test_conflicts(self):
+ """Handling of merge conflicts"""
+ self.create_conflicts()
+ self.run_bzr('merge ../other --show-base', retcode=1)
+ conflict_text = file('hello').read()
+ self.assert_('<<<<<<<' in conflict_text)
+ self.assert_('>>>>>>>' in conflict_text)
+ self.assert_('=======' in conflict_text)
+ self.assert_('|||||||' in conflict_text)
+ self.assert_('hi world' in conflict_text)
+ self.run_bzr('revert')
+ self.run_bzr('resolve --all')
+ self.run_bzr('merge ../other', retcode=1)
+ conflict_text = file('hello').read()
+ self.assert_('|||||||' not in conflict_text)
+ self.assert_('hi world' not in conflict_text)
+ result = self.run_bzr('conflicts')[0]
+ self.assertEquals(result, "Text conflict in hello\nText conflict in"
+ " question\n")
+ result = self.run_bzr('status')[0]
+ self.assert_("conflicts:\n Text conflict in hello\n"
+ " Text conflict in question\n" in result, result)
+ self.run_bzr('resolve hello')
+ result = self.run_bzr('conflicts')[0]
+ self.assertEquals(result, "Text conflict in question\n")
+ self.run_bzr('commit -m conflicts', retcode=3)
+ self.run_bzr('resolve --all')
+ result = self.run_bzr('conflicts')[0]
+ self.run_bzr('commit -m conflicts')
+ self.assertEquals(result, "")
+
+ def test_push(self):
+ # create a source branch
+ os.mkdir('my-branch')
+ os.chdir('my-branch')
+ self.example_branch()
+
+ # with no push target, fail
+ self.run_bzr('push', retcode=3)
+ # with an explicit target work
+ self.run_bzr('push ../output-branch')
+ # with an implicit target work
+ self.run_bzr('push')
+ # nothing missing
+ self.run_bzr('missing ../output-branch')
+ # advance this branch
+ self.run_bzr('commit --unchanged -m unchanged')
+
+ os.chdir('../output-branch')
+ # There is no longer a difference as long as we have
+ # access to the working tree
+ self.run_bzr('diff')
+
+ # But we should be missing a revision
+ self.run_bzr('missing ../my-branch', retcode=1)
+
+ # diverge the branches
+ self.run_bzr('commit --unchanged -m unchanged')
+ os.chdir('../my-branch')
+ # cannot push now
+ self.run_bzr('push', retcode=3)
+ # and there are difference
+ self.run_bzr('missing ../output-branch', retcode=1)
+ self.run_bzr('missing --verbose ../output-branch', retcode=1)
+ # but we can force a push
+ self.run_bzr('push --overwrite')
+ # nothing missing
+ self.run_bzr('missing ../output-branch')
+
+ # pushing to a new dir with no parent should fail
+ self.run_bzr('push ../missing/new-branch', retcode=3)
+ # unless we provide --create-prefix
+ self.run_bzr('push --create-prefix ../missing/new-branch')
+ # nothing missing
+ self.run_bzr('missing ../missing/new-branch')
+
+ def test_external_command(self):
+ """Test that external commands can be run by setting the path
+ """
+ # We don't at present run bzr in a subprocess for blackbox tests, and so
+ # don't really capture stdout, only the internal python stream.
+ # Therefore we don't use a subcommand that produces any output or does
+ # anything -- we just check that it can be run successfully.
+ cmd_name = 'test-command'
+ if sys.platform == 'win32':
+ cmd_name += '.bat'
+ self.overrideEnv('BZRPATH', None)
+
+ f = file(cmd_name, 'wb')
+ if sys.platform == 'win32':
+ f.write('@echo off\n')
+ else:
+ f.write('#!/bin/sh\n')
+ # f.write('echo Hello from test-command')
+ f.close()
+ os.chmod(cmd_name, 0755)
+
+ # It should not find the command in the local
+ # directory by default, since it is not in my path
+ self.run_bzr(cmd_name, retcode=3)
+
+ # Now put it into my path
+ self.overrideEnv('BZRPATH', '.')
+ self.run_bzr(cmd_name)
+
+ # Make sure empty path elements are ignored
+ self.overrideEnv('BZRPATH', os.pathsep)
+ self.run_bzr(cmd_name, retcode=3)
+
+
+def listdir_sorted(dir):
+ L = os.listdir(dir)
+ L.sort()
+ return L
+
+
+class OldTests(TestCaseWithTransport):
+ """old tests moved from ./testbzr."""
+
+ def test_bzr(self):
+ from os import chdir, mkdir
+ from os.path import exists
+
+ progress = self.log
+
+ progress("basic branch creation")
+ mkdir('branch1')
+ chdir('branch1')
+ self.run_bzr('init')
+
+ self.assertIsSameRealPath(self.run_bzr('root')[0].rstrip(),
+ osutils.pathjoin(self.test_dir, 'branch1'))
+
+ progress("status of new file")
+
+ f = file('test.txt', 'wt')
+ f.write('hello world!\n')
+ f.close()
+
+ self.assertEquals(self.run_bzr('unknowns')[0], 'test.txt\n')
+
+ out = self.run_bzr("status")[0]
+ self.assertEquals(out, 'unknown:\n test.txt\n')
+
+ f = file('test2.txt', 'wt')
+ f.write('goodbye cruel world...\n')
+ f.close()
+
+ out = self.run_bzr("status test.txt")[0]
+ self.assertEquals(out, "unknown:\n test.txt\n")
+
+ out = self.run_bzr("status")[0]
+ self.assertEquals(out, ("unknown:\n" " test.txt\n" " test2.txt\n"))
+
+ os.unlink('test2.txt')
+
+ progress("command aliases")
+ out = self.run_bzr("st")[0]
+ self.assertEquals(out, ("unknown:\n" " test.txt\n"))
+
+ out = self.run_bzr("stat")[0]
+ self.assertEquals(out, ("unknown:\n" " test.txt\n"))
+
+ progress("command help")
+ self.run_bzr("help st")
+ self.run_bzr("help")
+ self.run_bzr("help commands")
+ self.run_bzr("help slartibartfast", retcode=3)
+
+ out = self.run_bzr("help ci")[0]
+ out.index('Aliases: ci, checkin\n')
+
+ f = file('hello.txt', 'wt')
+ f.write('some nice new content\n')
+ f.close()
+
+ self.run_bzr("add hello.txt")
+
+ f = file('msg.tmp', 'wt')
+ f.write('this is my new commit\nand it has multiple lines, for fun')
+ f.close()
+
+ self.run_bzr('commit -F msg.tmp')
+
+ self.assertEquals(self.run_bzr('revno')[0], '1\n')
+ self.run_bzr('export -r 1 export-1.tmp')
+ self.run_bzr('export export.tmp')
+
+ self.run_bzr('log')
+ self.run_bzr('log -v')
+ self.run_bzr('log -v --forward')
+ self.run_bzr('log -m', retcode=3)
+ log_out = self.run_bzr('log -m commit')[0]
+ self.assert_("this is my new commit\n and" in log_out)
+ self.assert_("rename nested" not in log_out)
+ self.assert_('revision-id' not in log_out)
+ self.assert_('revision-id' in self.run_bzr('log --show-ids -m commit')[0])
+
+ log_out = self.run_bzr('log --line')[0]
+ # determine the widest line we want
+ max_width = osutils.terminal_width()
+ if max_width is not None:
+ for line in log_out.splitlines():
+ self.assert_(len(line) <= max_width - 1, len(line))
+ self.assert_("this is my new commit and" not in log_out)
+ self.assert_("this is my new commit" in log_out)
+
+ progress("file with spaces in name")
+ mkdir('sub directory')
+ with file('sub directory/file with spaces ', 'wt') as f: f.write('see how this works\n')
+ self.run_bzr('add .')
+ self.run_bzr('diff', retcode=1)
+ self.run_bzr('commit -m add-spaces')
+ self.run_bzr('check')
+
+ self.run_bzr('log')
+ self.run_bzr('log --forward')
+
+ self.run_bzr('info')
+
+ if osutils.has_symlinks():
+ progress("symlinks")
+ mkdir('symlinks')
+ chdir('symlinks')
+ self.run_bzr('init')
+ os.symlink("NOWHERE1", "link1")
+ self.run_bzr('add link1')
+ self.assertEquals(self.run_bzr('unknowns')[0], '')
+ self.run_bzr(['commit', '-m', '1: added symlink link1'])
+
+ mkdir('d1')
+ self.run_bzr('add d1')
+ self.assertEquals(self.run_bzr('unknowns')[0], '')
+ os.symlink("NOWHERE2", "d1/link2")
+ self.assertEquals(self.run_bzr('unknowns')[0], 'd1/link2\n')
+ # is d1/link2 found when adding d1
+ self.run_bzr('add d1')
+ self.assertEquals(self.run_bzr('unknowns')[0], '')
+ os.symlink("NOWHERE3", "d1/link3")
+ self.assertEquals(self.run_bzr('unknowns')[0], 'd1/link3\n')
+ self.run_bzr(['commit', '-m', '2: added dir, symlink'])
+
+ self.run_bzr('rename d1 d2')
+ self.run_bzr('move d2/link2 .')
+ self.run_bzr('move link1 d2')
+ self.assertEquals(os.readlink("./link2"), "NOWHERE2")
+ self.assertEquals(os.readlink("d2/link1"), "NOWHERE1")
+ self.run_bzr('add d2/link3')
+ self.run_bzr('diff', retcode=1)
+ self.run_bzr(['commit', '-m',
+ '3: rename of dir, move symlinks, add link3'])
+
+ os.unlink("link2")
+ os.symlink("TARGET 2", "link2")
+ os.unlink("d2/link1")
+ os.symlink("TARGET 1", "d2/link1")
+ self.run_bzr('diff', retcode=1)
+ self.assertEquals(self.run_bzr("relpath d2/link1")[0], "d2/link1\n")
+ self.run_bzr(['commit', '-m', '4: retarget of two links'])
+
+ self.run_bzr('remove --keep d2/link1')
+ self.assertEquals(self.run_bzr('unknowns')[0], 'd2/link1\n')
+ self.run_bzr(['commit', '-m', '5: remove d2/link1'])
+ # try with the rm alias
+ self.run_bzr('add d2/link1')
+ self.run_bzr(['commit', '-m', '6: add d2/link1'])
+ self.run_bzr('rm --keep d2/link1')
+ self.assertEquals(self.run_bzr('unknowns')[0], 'd2/link1\n')
+ self.run_bzr(['commit', '-m', '7: remove d2/link1'])
+
+ os.mkdir("d1")
+ self.run_bzr('add d1')
+ self.run_bzr('rename d2/link3 d1/link3new')
+ self.assertEquals(self.run_bzr('unknowns')[0], 'd2/link1\n')
+ self.run_bzr(['commit', '-m',
+ '8: remove d2/link1, move/rename link3'])
+
+ self.run_bzr('check')
+
+ self.run_bzr('export -r 1 exp1.tmp')
+ chdir("exp1.tmp")
+ self.assertEquals(listdir_sorted("."), [ "link1" ])
+ self.assertEquals(os.readlink("link1"), "NOWHERE1")
+ chdir("..")
+
+ self.run_bzr('export -r 2 exp2.tmp')
+ chdir("exp2.tmp")
+ self.assertEquals(listdir_sorted("."), [ "d1", "link1" ])
+ chdir("..")
+
+ self.run_bzr('export -r 3 exp3.tmp')
+ chdir("exp3.tmp")
+ self.assertEquals(listdir_sorted("."), [ "d2", "link2" ])
+ self.assertEquals(listdir_sorted("d2"), [ "link1", "link3" ])
+ self.assertEquals(os.readlink("d2/link1"), "NOWHERE1")
+ self.assertEquals(os.readlink("link2") , "NOWHERE2")
+ chdir("..")
+
+ self.run_bzr('export -r 4 exp4.tmp')
+ chdir("exp4.tmp")
+ self.assertEquals(listdir_sorted("."), [ "d2", "link2" ])
+ self.assertEquals(os.readlink("d2/link1"), "TARGET 1")
+ self.assertEquals(os.readlink("link2") , "TARGET 2")
+ self.assertEquals(listdir_sorted("d2"), [ "link1", "link3" ])
+ chdir("..")
+
+ self.run_bzr('export -r 5 exp5.tmp')
+ chdir("exp5.tmp")
+ self.assertEquals(listdir_sorted("."), [ "d2", "link2" ])
+ self.assert_(os.path.islink("link2"))
+ self.assert_(listdir_sorted("d2")== [ "link3" ])
+ chdir("..")
+
+ self.run_bzr('export -r 8 exp6.tmp')
+ chdir("exp6.tmp")
+ self.assertEqual(listdir_sorted("."), [ "d1", "d2", "link2"])
+ self.assertEquals(listdir_sorted("d1"), [ "link3new" ])
+ self.assertEquals(listdir_sorted("d2"), [])
+ self.assertEquals(os.readlink("d1/link3new"), "NOWHERE3")
+ chdir("..")
+ else:
+ progress("skipping symlink tests")
+
+
+class RemoteTests(object):
+ """Test bzr ui commands against remote branches."""
+
+ def test_branch(self):
+ os.mkdir('from')
+ wt = self.make_branch_and_tree('from')
+ branch = wt.branch
+ wt.commit('empty commit for nonsense', allow_pointless=True)
+ url = self.get_readonly_url('from')
+ self.run_bzr(['branch', url, 'to'])
+ branch = Branch.open('to')
+ self.assertEqual(1, branch.last_revision_info()[0])
+ # the branch should be set in to to from
+ self.assertEqual(url + '/', branch.get_parent())
+
+ def test_log(self):
+ self.build_tree(['branch/', 'branch/file'])
+ self.run_bzr('init branch')[0]
+ self.run_bzr('add branch/file')[0]
+ self.run_bzr('commit -m foo branch')[0]
+ url = self.get_readonly_url('branch/file')
+ output = self.run_bzr('log %s' % url)[0]
+ self.assertEqual(8, len(output.split('\n')))
+
+ def test_check(self):
+ self.build_tree(['branch/', 'branch/file'])
+ self.run_bzr('init branch')[0]
+ self.run_bzr('add branch/file')[0]
+ self.run_bzr('commit -m foo branch')[0]
+ url = self.get_readonly_url('branch/')
+ self.run_bzr(['check', url])
+
+ def test_push(self):
+ # create a source branch
+ os.mkdir('my-branch')
+ os.chdir('my-branch')
+ self.run_bzr('init')
+ with file('hello', 'wt') as f: f.write('foo')
+ self.run_bzr('add hello')
+ self.run_bzr('commit -m setup')
+
+ # with an explicit target work
+ self.run_bzr(['push', self.get_url('output-branch')])
+
+
+class HTTPTests(TestCaseWithWebserver, RemoteTests):
+ """Test various commands against a HTTP server."""
+
+
+class SFTPTestsAbsolute(TestCaseWithSFTPServer, RemoteTests):
+ """Test various commands against a SFTP server using abs paths."""
+
+
+class SFTPTestsAbsoluteSibling(TestCaseWithSFTPServer, RemoteTests):
+ """Test various commands against a SFTP server using abs paths."""
+
+ def setUp(self):
+ super(SFTPTestsAbsoluteSibling, self).setUp()
+ self._override_home = '/dev/noone/runs/tests/here'
+
+
+class SFTPTestsRelative(TestCaseWithSFTPServer, RemoteTests):
+ """Test various commands against a SFTP server using homedir rel paths."""
+
+ def setUp(self):
+ super(SFTPTestsRelative, self).setUp()
+ self._get_remote_is_absolute = False
diff --git a/bzrlib/tests/blackbox/test_uncommit.py b/bzrlib/tests/blackbox/test_uncommit.py
new file mode 100644
index 0000000..1f10579
--- /dev/null
+++ b/bzrlib/tests/blackbox/test_uncommit.py
@@ -0,0 +1,316 @@
+# Copyright (C) 2005-2010 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""Test the uncommit command."""
+
+import os
+
+from bzrlib import uncommit
+from bzrlib.bzrdir import BzrDirMetaFormat1
+from bzrlib.errors import BoundBranchOutOfDate
+from bzrlib.tests import TestCaseWithTransport
+from bzrlib.tests.matchers import ContainsNoVfsCalls
+from bzrlib.tests.script import (
+ run_script,
+ ScriptRunner,
+ )
+
+
+class TestUncommit(TestCaseWithTransport):
+
+ def create_simple_tree(self):
+ wt = self.make_branch_and_tree('tree')
+ self.build_tree(['tree/a', 'tree/b', 'tree/c'])
+ wt.add(['a', 'b', 'c'])
+ wt.commit('initial commit', rev_id='a1')
+
+ self.build_tree_contents([('tree/a', 'new contents of a\n')])
+ wt.commit('second commit', rev_id='a2')
+
+ return wt
+
+ def test_uncommit(self):
+ """Test uncommit functionality."""
+ wt = self.create_simple_tree()
+
+ os.chdir('tree')
+ out, err = self.run_bzr('uncommit --dry-run --force')
+ self.assertContainsRe(out, 'Dry-run')
+ self.assertNotContainsRe(out, 'initial commit')
+ self.assertContainsRe(out, 'second commit')
+
+ # Nothing has changed
+ self.assertEqual(['a2'], wt.get_parent_ids())
+
+ # Uncommit, don't prompt
+ out, err = self.run_bzr('uncommit --force')
+ self.assertNotContainsRe(out, 'initial commit')
+ self.assertContainsRe(out, 'second commit')
+
+ # This should look like we are back in revno 1
+ self.assertEqual(['a1'], wt.get_parent_ids())
+ out, err = self.run_bzr('status')
+ self.assertEquals(out, 'modified:\n a\n')
+
+ def test_uncommit_interactive(self):
+ """Uncommit seeks confirmation, and doesn't proceed without it."""
+ wt = self.create_simple_tree()
+ os.chdir('tree')
+ run_script(self, """
+ $ bzr uncommit
+ ...
+ The above revision(s) will be removed.
+ 2>Uncommit these revisions? ([y]es, [n]o): no
+ <n
+ Canceled
+ """)
+ self.assertEqual(['a2'], wt.get_parent_ids())
+
+ def test_uncommit_no_history(self):
+ wt = self.make_branch_and_tree('tree')
+ out, err = self.run_bzr('uncommit --force', retcode=1)
+ self.assertEqual('', err)
+ self.assertEqual('No revisions to uncommit.\n', out)
+
+ def test_uncommit_checkout(self):
+ wt = self.create_simple_tree()
+ checkout_tree = wt.branch.create_checkout('checkout')
+
+ self.assertEqual(['a2'], checkout_tree.get_parent_ids())
+
+ os.chdir('checkout')
+ out, err = self.run_bzr('uncommit --dry-run --force')
+ self.assertContainsRe(out, 'Dry-run')
+ self.assertNotContainsRe(out, 'initial commit')
+ self.assertContainsRe(out, 'second commit')
+
+ self.assertEqual(['a2'], checkout_tree.get_parent_ids())
+
+ out, err = self.run_bzr('uncommit --force')
+ self.assertNotContainsRe(out, 'initial commit')
+ self.assertContainsRe(out, 'second commit')
+
+ # uncommit in a checkout should uncommit the parent branch
+ # (but doesn't effect the other working tree)
+ self.assertEquals(['a1'], checkout_tree.get_parent_ids())
+ self.assertEquals('a1', wt.branch.last_revision())
+ self.assertEquals(['a2'], wt.get_parent_ids())
+
+ def test_uncommit_bound(self):
+ os.mkdir('a')
+ a = BzrDirMetaFormat1().initialize('a')
+ a.create_repository()
+ a.create_branch()
+ t_a = a.create_workingtree()
+ t_a.commit('commit 1')
+ t_a.commit('commit 2')
+ t_a.commit('commit 3')
+ b = t_a.branch.create_checkout('b').branch
+ uncommit.uncommit(b)
+ self.assertEqual(b.last_revision_info()[0], 2)
+ self.assertEqual(t_a.branch.last_revision_info()[0], 2)
+ # update A's tree to not have the uncommitted revision referenced.
+ t_a.update()
+ t_a.commit('commit 3b')
+ self.assertRaises(BoundBranchOutOfDate, uncommit.uncommit, b)
+ b.pull(t_a.branch)
+ uncommit.uncommit(b)
+
+ def test_uncommit_bound_local(self):
+ t_a = self.make_branch_and_tree('a')
+ rev_id1 = t_a.commit('commit 1')
+ rev_id2 = t_a.commit('commit 2')
+ rev_id3 = t_a.commit('commit 3')
+ b = t_a.branch.create_checkout('b').branch
+
+ out, err = self.run_bzr(['uncommit', '--local', 'b', '--force'])
+ self.assertEqual(rev_id3, t_a.last_revision())
+ self.assertEqual((3, rev_id3), t_a.branch.last_revision_info())
+ self.assertEqual((2, rev_id2), b.last_revision_info())
+
+ def test_uncommit_revision(self):
+ wt = self.create_simple_tree()
+
+ os.chdir('tree')
+ out, err = self.run_bzr('uncommit -r1 --force')
+
+ self.assertNotContainsRe(out, 'initial commit')
+ self.assertContainsRe(out, 'second commit')
+ self.assertEqual(['a1'], wt.get_parent_ids())
+ self.assertEqual('a1', wt.branch.last_revision())
+
+ def test_uncommit_neg_1(self):
+ wt = self.create_simple_tree()
+ os.chdir('tree')
+ out, err = self.run_bzr('uncommit -r -1', retcode=1)
+ self.assertEqual('No revisions to uncommit.\n', out)
+
+ def test_uncommit_merges(self):
+ wt = self.create_simple_tree()
+
+ tree2 = wt.bzrdir.sprout('tree2').open_workingtree()
+
+ tree2.commit('unchanged', rev_id='b3')
+ tree2.commit('unchanged', rev_id='b4')
+
+ wt.merge_from_branch(tree2.branch)
+ wt.commit('merge b4', rev_id='a3')
+
+ self.assertEqual(['a3'], wt.get_parent_ids())
+
+ os.chdir('tree')
+ out, err = self.run_bzr('uncommit --force')
+
+ self.assertEqual(['a2', 'b4'], wt.get_parent_ids())
+
+ def test_uncommit_pending_merge(self):
+ wt = self.create_simple_tree()
+ tree2 = wt.bzrdir.sprout('tree2').open_workingtree()
+ tree2.commit('unchanged', rev_id='b3')
+
+ wt.branch.fetch(tree2.branch)
+ wt.set_pending_merges(['b3'])
+
+ os.chdir('tree')
+ out, err = self.run_bzr('uncommit --force')
+ self.assertEqual(['a1', 'b3'], wt.get_parent_ids())
+
+ def test_uncommit_multiple_merge(self):
+ wt = self.create_simple_tree()
+
+ tree2 = wt.bzrdir.sprout('tree2').open_workingtree()
+ tree2.commit('unchanged', rev_id='b3')
+
+ tree3 = wt.bzrdir.sprout('tree3').open_workingtree()
+ tree3.commit('unchanged', rev_id='c3')
+
+ wt.merge_from_branch(tree2.branch)
+ wt.commit('merge b3', rev_id='a3')
+
+ wt.merge_from_branch(tree3.branch)
+ wt.commit('merge c3', rev_id='a4')
+
+ self.assertEqual(['a4'], wt.get_parent_ids())
+
+ os.chdir('tree')
+ out, err = self.run_bzr('uncommit --force -r 2')
+
+ self.assertEqual(['a2', 'b3', 'c3'], wt.get_parent_ids())
+
+ def test_uncommit_merge_plus_pending(self):
+ wt = self.create_simple_tree()
+
+ tree2 = wt.bzrdir.sprout('tree2').open_workingtree()
+ tree2.commit('unchanged', rev_id='b3')
+ tree3 = wt.bzrdir.sprout('tree3').open_workingtree()
+ tree3.commit('unchanged', rev_id='c3')
+
+ wt.branch.fetch(tree2.branch)
+ wt.set_pending_merges(['b3'])
+ wt.commit('merge b3', rev_id='a3')
+
+
+ wt.merge_from_branch(tree3.branch)
+
+ self.assertEqual(['a3', 'c3'], wt.get_parent_ids())
+
+ os.chdir('tree')
+ out, err = self.run_bzr('uncommit --force -r 2')
+
+ self.assertEqual(['a2', 'b3', 'c3'], wt.get_parent_ids())
+
+ def test_uncommit_shows_log_with_revision_id(self):
+ wt = self.create_simple_tree()
+
+ script = ScriptRunner()
+ script.run_script(self, """
+$ cd tree
+$ bzr uncommit --force
+ 2 ...
+ second commit
+...
+The above revision(s) will be removed.
+You can restore the old tip by running:
+ bzr pull . -r revid:a2
+""")
+
+ def test_uncommit_octopus_merge(self):
+ # Check that uncommit keeps the pending merges in the same order
+ # though it will also filter out ones in the ancestry
+ wt = self.create_simple_tree()
+
+ tree2 = wt.bzrdir.sprout('tree2').open_workingtree()
+ tree3 = wt.bzrdir.sprout('tree3').open_workingtree()
+
+ tree2.commit('unchanged', rev_id='b3')
+ tree3.commit('unchanged', rev_id='c3')
+
+ wt.merge_from_branch(tree2.branch)
+ wt.merge_from_branch(tree3.branch, force=True)
+ wt.commit('merge b3, c3', rev_id='a3')
+
+ tree2.commit('unchanged', rev_id='b4')
+ tree3.commit('unchanged', rev_id='c4')
+
+ wt.merge_from_branch(tree3.branch)
+ wt.merge_from_branch(tree2.branch, force=True)
+ wt.commit('merge b4, c4', rev_id='a4')
+
+ self.assertEqual(['a4'], wt.get_parent_ids())
+
+ os.chdir('tree')
+ out, err = self.run_bzr('uncommit --force -r 2')
+
+ self.assertEqual(['a2', 'c4', 'b4'], wt.get_parent_ids())
+
+ def test_uncommit_nonascii(self):
+ tree = self.make_branch_and_tree('tree')
+ tree.commit(u'\u1234 message')
+ out, err = self.run_bzr('uncommit --force tree', encoding='ascii')
+ self.assertContainsRe(out, r'\? message')
+
+ def test_uncommit_removes_tags(self):
+ tree = self.make_branch_and_tree('tree')
+ revid = tree.commit('message')
+ tree.branch.tags.set_tag("atag", revid)
+ out, err = self.run_bzr('uncommit --force tree')
+ self.assertEquals({}, tree.branch.tags.get_tag_dict())
+
+ def test_uncommit_keep_tags(self):
+ tree = self.make_branch_and_tree('tree')
+ revid = tree.commit('message')
+ tree.branch.tags.set_tag("atag", revid)
+ out, err = self.run_bzr('uncommit --keep-tags --force tree')
+ self.assertEquals({"atag": revid}, tree.branch.tags.get_tag_dict())
+
+
+class TestSmartServerUncommit(TestCaseWithTransport):
+
+ def test_uncommit(self):
+ self.setup_smart_server_with_call_log()
+ t = self.make_branch_and_tree('from')
+ for count in range(2):
+ t.commit(message='commit %d' % count)
+ self.reset_smart_call_log()
+ out, err = self.run_bzr(['uncommit', '--force', self.get_url('from')])
+ # This figure represent the amount of work to perform this use case. It
+ # is entirely ok to reduce this number if a test fails due to rpc_count
+ # being too low. If rpc_count increases, more network roundtrips have
+ # become necessary for this use case. Please do not adjust this number
+ # upwards without agreement from bzr's network support maintainers.
+ self.assertLength(14, self.hpss_calls)
+ self.assertLength(1, self.hpss_connections)
+ self.assertThat(self.hpss_calls, ContainsNoVfsCalls)
diff --git a/bzrlib/tests/blackbox/test_unknowns.py b/bzrlib/tests/blackbox/test_unknowns.py
new file mode 100644
index 0000000..fccf7bb
--- /dev/null
+++ b/bzrlib/tests/blackbox/test_unknowns.py
@@ -0,0 +1,54 @@
+# Copyright (C) 2007-2010 Canonical Ltd
+# -*- coding: utf-8 -*-
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+
+"""Black-box tests for 'bzr unknowns', which shows unknown files."""
+
+from bzrlib.tests import TestCaseWithTransport
+
+
+class TestUnknowns(TestCaseWithTransport):
+
+ def test_unknowns(self):
+ """Test that 'unknown' command reports unknown files"""
+
+ # in empty directory, no unknowns
+ tree = self.make_branch_and_tree('.')
+ self.assertEquals(self.run_bzr('unknowns')[0], '')
+
+ # single unknown file
+ self.build_tree_contents([('a', 'contents of a\n')])
+ self.assertEquals(self.run_bzr('unknowns')[0], 'a\n')
+
+ # multiple unknown files, including one with a space in its name
+ self.build_tree(['b', 'c', 'd e'])
+ self.assertEquals(self.run_bzr('unknowns')[0], 'a\nb\nc\n"d e"\n')
+
+ # after add, file no longer shown
+ tree.add(['a', 'd e'])
+ self.assertEquals(self.run_bzr('unknowns')[0], 'b\nc\n')
+
+ # after all added, none shown
+ tree.add(['b', 'c'])
+ self.assertEquals(self.run_bzr('unknowns')[0], '')
+
+ def test_unknowns_directory(self):
+ """Test --directory option"""
+ tree = self.make_branch_and_tree('a')
+ self.build_tree(['a/README'])
+ out, err = self.run_bzr(['unknowns', '--directory=a'])
+ self.assertEquals('README\n', out)
diff --git a/bzrlib/tests/blackbox/test_update.py b/bzrlib/tests/blackbox/test_update.py
new file mode 100644
index 0000000..1ac5588
--- /dev/null
+++ b/bzrlib/tests/blackbox/test_update.py
@@ -0,0 +1,482 @@
+# Copyright (C) 2006-2011 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+
+"""Tests for the update command of bzr."""
+
+import os
+
+from bzrlib import (
+ branch,
+ bzrdir,
+ osutils,
+ tests,
+ workingtree,
+ )
+from bzrlib.tests.script import ScriptRunner
+
+
+class TestUpdate(tests.TestCaseWithTransport):
+
+ def test_update_standalone_trivial(self):
+ self.make_branch_and_tree('.')
+ out, err = self.run_bzr('update')
+ self.assertEqual(
+ 'Tree is up to date at revision 0 of branch %s\n' % self.test_dir,
+ err)
+ self.assertEqual('', out)
+
+ def test_update_quiet(self):
+ self.make_branch_and_tree('.')
+ out, err = self.run_bzr('update --quiet')
+ self.assertEqual('', err)
+ self.assertEqual('', out)
+
+ def test_update_standalone_trivial_with_alias_up(self):
+ self.make_branch_and_tree('.')
+ out, err = self.run_bzr('up')
+ self.assertEqual('Tree is up to date at revision 0 of branch %s\n'
+ % self.test_dir,
+ err)
+ self.assertEqual('', out)
+
+ def test_update_up_to_date_light_checkout(self):
+ self.make_branch_and_tree('branch')
+ self.run_bzr('checkout --lightweight branch checkout')
+ out, err = self.run_bzr('update checkout')
+ self.assertEqual('Tree is up to date at revision 0 of branch %s\n'
+ % osutils.pathjoin(self.test_dir, 'branch'),
+ err)
+ self.assertEqual('', out)
+
+ def test_update_up_to_date_checkout(self):
+ self.make_branch_and_tree('branch')
+ self.run_bzr('checkout branch checkout')
+ sr = ScriptRunner()
+ sr.run_script(self, '''
+$ bzr update checkout
+2>Tree is up to date at revision 0 of branch .../branch
+''')
+
+ def test_update_out_of_date_standalone_tree(self):
+ # FIXME the default format has to change for this to pass
+ # because it currently uses the branch last-revision marker.
+ self.make_branch_and_tree('branch')
+ # make a checkout
+ self.run_bzr('checkout --lightweight branch checkout')
+ self.build_tree(['checkout/file'])
+ self.run_bzr('add checkout/file')
+ self.run_bzr('commit -m add-file checkout')
+ # now branch should be out of date
+ out,err = self.run_bzr('update branch')
+ self.assertEqual('', out)
+ self.assertEqualDiff("""+N file
+All changes applied successfully.
+Updated to revision 1 of branch %s
+""" % osutils.pathjoin(self.test_dir, 'branch',),
+ err)
+ self.assertPathExists('branch/file')
+
+ def test_update_out_of_date_light_checkout(self):
+ self.make_branch_and_tree('branch')
+ # make two checkouts
+ self.run_bzr('checkout --lightweight branch checkout')
+ self.run_bzr('checkout --lightweight branch checkout2')
+ self.build_tree(['checkout/file'])
+ self.run_bzr('add checkout/file')
+ self.run_bzr('commit -m add-file checkout')
+ # now checkout2 should be out of date
+ out,err = self.run_bzr('update checkout2')
+ self.assertEqualDiff('''+N file
+All changes applied successfully.
+Updated to revision 1 of branch %s
+''' % osutils.pathjoin(self.test_dir, 'branch',),
+ err)
+ self.assertEqual('', out)
+
+ def test_update_conflicts_returns_2(self):
+ self.make_branch_and_tree('branch')
+ # make two checkouts
+ self.run_bzr('checkout --lightweight branch checkout')
+ self.build_tree(['checkout/file'])
+ self.run_bzr('add checkout/file')
+ self.run_bzr('commit -m add-file checkout')
+ self.run_bzr('checkout --lightweight branch checkout2')
+ # now alter file in checkout
+ a_file = file('checkout/file', 'wt')
+ a_file.write('Foo')
+ a_file.close()
+ self.run_bzr('commit -m checnge-file checkout')
+ # now checkout2 should be out of date
+ # make a local change to file
+ a_file = file('checkout2/file', 'wt')
+ a_file.write('Bar')
+ a_file.close()
+ out,err = self.run_bzr('update checkout2', retcode=1)
+ self.assertEqualDiff(''' M file
+Text conflict in file
+1 conflicts encountered.
+Updated to revision 2 of branch %s
+''' % osutils.pathjoin(self.test_dir, 'branch',),
+ err)
+ self.assertEqual('', out)
+
+ def test_smoke_update_checkout_bound_branch_local_commits(self):
+ # smoke test for doing an update of a checkout of a bound
+ # branch with local commits.
+ master = self.make_branch_and_tree('master')
+ master.commit('first commit')
+ # make a bound branch
+ self.run_bzr('checkout master child')
+ # check that out
+ self.run_bzr('checkout --lightweight child checkout')
+ # get an object form of the checkout to manipulate
+ wt = workingtree.WorkingTree.open('checkout')
+ # change master
+ a_file = file('master/file', 'wt')
+ a_file.write('Foo')
+ a_file.close()
+ master.add(['file'])
+ master_tip = master.commit('add file')
+ # change child
+ a_file = file('child/file_b', 'wt')
+ a_file.write('Foo')
+ a_file.close()
+ # get an object form of child
+ child = workingtree.WorkingTree.open('child')
+ child.add(['file_b'])
+ child_tip = child.commit('add file_b', local=True)
+ # check checkout
+ a_file = file('checkout/file_c', 'wt')
+ a_file.write('Foo')
+ a_file.close()
+ wt.add(['file_c'])
+
+ # now, update checkout ->
+ # get all three files and a pending merge.
+ out, err = self.run_bzr('update checkout')
+ self.assertEqual('', out)
+ self.assertEqualDiff("""+N file_b
+All changes applied successfully.
++N file
+All changes applied successfully.
+Updated to revision 2 of branch %s
+Your local commits will now show as pending merges with 'bzr status', and can be committed with 'bzr commit'.
+""" % osutils.pathjoin(self.test_dir, 'master',),
+ err)
+ self.assertEqual([master_tip, child_tip], wt.get_parent_ids())
+ self.assertPathExists('checkout/file')
+ self.assertPathExists('checkout/file_b')
+ self.assertPathExists('checkout/file_c')
+ self.assertTrue(wt.has_filename('file_c'))
+
+ def test_update_with_merges(self):
+ # Test that 'bzr update' works correctly when you have
+ # an update in the master tree, and a lightweight checkout
+ # which has merged another branch
+ master = self.make_branch_and_tree('master')
+ self.build_tree(['master/file'])
+ master.add(['file'])
+ master.commit('one', rev_id='m1')
+
+ self.build_tree(['checkout1/'])
+ checkout_dir = bzrdir.BzrDirMetaFormat1().initialize('checkout1')
+ checkout_dir.set_branch_reference(master.branch)
+ checkout1 = checkout_dir.create_workingtree('m1')
+
+ # Create a second branch, with an extra commit
+ other = master.bzrdir.sprout('other').open_workingtree()
+ self.build_tree(['other/file2'])
+ other.add(['file2'])
+ other.commit('other2', rev_id='o2')
+
+ # Create a new commit in the master branch
+ self.build_tree(['master/file3'])
+ master.add(['file3'])
+ master.commit('f3', rev_id='m2')
+
+ # Merge the other branch into checkout
+ os.chdir('checkout1')
+ self.run_bzr('merge ../other')
+
+ self.assertEqual(['o2'], checkout1.get_parent_ids()[1:])
+
+ # At this point, 'commit' should fail, because we are out of date
+ self.run_bzr_error(["please run 'bzr update'"],
+ 'commit -m merged')
+
+ # This should not report about local commits being pending
+ # merges, because they were real merges
+ out, err = self.run_bzr('update')
+ self.assertEqual('', out)
+ self.assertEqualDiff('''+N file3
+All changes applied successfully.
+Updated to revision 2 of branch %s
+''' % osutils.pathjoin(self.test_dir, 'master',),
+ err)
+ # The pending merges should still be there
+ self.assertEqual(['o2'], checkout1.get_parent_ids()[1:])
+
+ def test_readonly_lightweight_update(self):
+ """Update a light checkout of a readonly branch"""
+ tree = self.make_branch_and_tree('branch')
+ readonly_branch = branch.Branch.open(self.get_readonly_url('branch'))
+ checkout = readonly_branch.create_checkout('checkout',
+ lightweight=True)
+ tree.commit('empty commit')
+ self.run_bzr('update checkout')
+
+ def test_update_with_merge_merged_to_master(self):
+ # Test that 'bzr update' works correctly when you have
+ # an update in the master tree, and a [lightweight or otherwise]
+ # checkout which has merge a revision merged to master already.
+ master = self.make_branch_and_tree('master')
+ self.build_tree(['master/file'])
+ master.add(['file'])
+ master.commit('one', rev_id='m1')
+
+ self.build_tree(['checkout1/'])
+ checkout_dir = bzrdir.BzrDirMetaFormat1().initialize('checkout1')
+ checkout_dir.set_branch_reference(master.branch)
+ checkout1 = checkout_dir.create_workingtree('m1')
+
+ # Create a second branch, with an extra commit
+ other = master.bzrdir.sprout('other').open_workingtree()
+ self.build_tree(['other/file2'])
+ other.add(['file2'])
+ other.commit('other2', rev_id='o2')
+
+ # Merge the other branch into checkout - 'start reviewing a patch'
+ checkout1.merge_from_branch(other.branch)
+ self.assertEqual(['o2'], checkout1.get_parent_ids()[1:])
+
+ # Create a new commit in the master branch - 'someone else lands its'
+ master.merge_from_branch(other.branch)
+ master.commit('f3', rev_id='m2')
+
+ # This should not report about local commits being pending
+ # merges, because they were real merges (but are now gone).
+ # It should perhaps report on them.
+ out, err = self.run_bzr('update', working_dir='checkout1')
+ self.assertEqual('', out)
+ self.assertEqualDiff('''All changes applied successfully.
+Updated to revision 2 of branch %s
+''' % osutils.pathjoin(self.test_dir, 'master',),
+ err)
+ # The pending merges should still be there
+ self.assertEqual([], checkout1.get_parent_ids()[1:])
+
+ def test_update_dash_r(self):
+ master = self.make_branch_and_tree('master')
+ os.chdir('master')
+ self.build_tree(['./file1'])
+ master.add(['file1'])
+ master.commit('one', rev_id='m1')
+ self.build_tree(['./file2'])
+ master.add(['file2'])
+ master.commit('two', rev_id='m2')
+
+ sr = ScriptRunner()
+ sr.run_script(self, '''
+$ bzr update -r 1
+2>-D file2
+2>All changes applied successfully.
+2>Updated to revision 1 of .../master
+''')
+ self.assertPathExists('./file1')
+ self.assertPathDoesNotExist('./file2')
+ self.assertEquals(['m1'], master.get_parent_ids())
+
+ def test_update_dash_r_outside_history(self):
+ """Ensure that we can update -r to dotted revisions.
+ """
+ master = self.make_branch_and_tree('master')
+ self.build_tree(['master/file1'])
+ master.add(['file1'])
+ master.commit('one', rev_id='m1')
+
+ # Create a second branch, with extra commits
+ other = master.bzrdir.sprout('other').open_workingtree()
+ self.build_tree(['other/file2', 'other/file3'])
+ other.add(['file2'])
+ other.commit('other2', rev_id='o2')
+ other.add(['file3'])
+ other.commit('other3', rev_id='o3')
+
+ os.chdir('master')
+ self.run_bzr('merge ../other')
+ master.commit('merge', rev_id='merge')
+
+ # Switch to o2. file3 was added only in o3 and should be deleted.
+ out, err = self.run_bzr('update -r revid:o2')
+ self.assertContainsRe(err, '-D\s+file3')
+ self.assertContainsRe(err, 'All changes applied successfully\.')
+ self.assertContainsRe(err, 'Updated to revision 1.1.1 of branch .*')
+
+ # Switch back to latest
+ out, err = self.run_bzr('update')
+ self.assertContainsRe(err, '\+N\s+file3')
+ self.assertContainsRe(err, 'All changes applied successfully\.')
+ self.assertContainsRe(err, 'Updated to revision 2 of branch .*')
+
+ def test_update_dash_r_in_master(self):
+ # Test that 'bzr update' works correctly when you have
+ # an update in the master tree,
+ master = self.make_branch_and_tree('master')
+ self.build_tree(['master/file1'])
+ master.add(['file1'])
+ master.commit('one', rev_id='m1')
+
+ self.run_bzr('checkout master checkout')
+
+ # add a revision in the master.
+ self.build_tree(['master/file2'])
+ master.add(['file2'])
+ master.commit('two', rev_id='m2')
+
+ os.chdir('checkout')
+ sr = ScriptRunner()
+ sr.run_script(self, '''
+$ bzr update -r revid:m2
+2>+N file2
+2>All changes applied successfully.
+2>Updated to revision 2 of branch .../master
+''')
+
+ def test_update_show_base(self):
+ """bzr update support --show-base
+
+ see https://bugs.launchpad.net/bzr/+bug/202374"""
+
+ tree=self.make_branch_and_tree('.')
+
+ f = open('hello','wt')
+ f.write('foo')
+ f.close()
+ tree.add('hello')
+ tree.commit('fie')
+
+ f = open('hello','wt')
+ f.write('fee')
+ f.close()
+ tree.commit('fee')
+
+ #tree.update() gives no such revision, so ...
+ self.run_bzr(['update','-r1'])
+
+ #create conflict
+ f = open('hello','wt')
+ f.write('fie')
+ f.close()
+
+ out, err = self.run_bzr(['update','--show-base'],retcode=1)
+
+ # check for conflict notification
+ self.assertContainsString(err,
+ ' M hello\nText conflict in hello\n1 conflicts encountered.\n')
+
+ self.assertEqualDiff('<<<<<<< TREE\n'
+ 'fie||||||| BASE-REVISION\n'
+ 'foo=======\n'
+ 'fee>>>>>>> MERGE-SOURCE\n',
+ open('hello').read())
+
+ def test_update_checkout_prevent_double_merge(self):
+ """"Launchpad bug 113809 in bzr "update performs two merges"
+ https://launchpad.net/bugs/113809"""
+ master = self.make_branch_and_tree('master')
+ self.build_tree_contents([('master/file', 'initial contents\n')])
+ master.add(['file'])
+ master.commit('one', rev_id='m1')
+
+ checkout = master.branch.create_checkout('checkout')
+ lightweight = checkout.branch.create_checkout('lightweight',
+ lightweight=True)
+
+ # time to create a mess
+ # add a commit to the master
+ self.build_tree_contents([('master/file', 'master\n')])
+ master.commit('two', rev_id='m2')
+ self.build_tree_contents([('master/file', 'master local changes\n')])
+
+ # local commit on the checkout
+ self.build_tree_contents([('checkout/file', 'checkout\n')])
+ checkout.commit('tree', rev_id='c2', local=True)
+ self.build_tree_contents([('checkout/file',
+ 'checkout local changes\n')])
+
+ # lightweight
+ self.build_tree_contents([('lightweight/file',
+ 'lightweight local changes\n')])
+
+ # now update (and get conflicts)
+ out, err = self.run_bzr('update lightweight', retcode=1)
+ self.assertEqual('', out)
+ # NB: these conflicts are actually in the source code
+ self.assertFileEqual('''\
+<<<<<<< TREE
+lightweight local changes
+=======
+checkout
+>>>>>>> MERGE-SOURCE
+''',
+ 'lightweight/file')
+
+ # resolve it
+ self.build_tree_contents([('lightweight/file',
+ 'lightweight+checkout\n')])
+ self.run_bzr('resolve lightweight/file')
+
+ # check we get the second conflict
+ out, err = self.run_bzr('update lightweight', retcode=1)
+ self.assertEqual('', out)
+ # NB: these conflicts are actually in the source code
+ self.assertFileEqual('''\
+<<<<<<< TREE
+lightweight+checkout
+=======
+master
+>>>>>>> MERGE-SOURCE
+''',
+ 'lightweight/file')
+
+
+ def test_no_upgrade_single_file(self):
+ """There's one basis revision per tree.
+
+ Since you can't actually change the basis for a single file at the
+ moment, we don't let you think you can.
+
+ See bug 557886.
+ """
+ self.make_branch_and_tree('.')
+ self.build_tree_contents([('a/',),
+ ('a/file', 'content')])
+ sr = ScriptRunner()
+ sr.run_script(self, '''
+ $ bzr update ./a
+ 2>bzr: ERROR: bzr update can only update a whole tree, not a file or subdirectory
+ $ bzr update ./a/file
+ 2>bzr: ERROR: bzr update can only update a whole tree, not a file or subdirectory
+ $ bzr update .
+ 2>Tree is up to date at revision 0 of branch ...
+ $ cd a
+ $ bzr update .
+ 2>bzr: ERROR: bzr update can only update a whole tree, not a file or subdirectory
+ # however, you can update the whole tree from a subdirectory
+ $ bzr update
+ 2>Tree is up to date at revision 0 of branch ...
+ ''')
diff --git a/bzrlib/tests/blackbox/test_upgrade.py b/bzrlib/tests/blackbox/test_upgrade.py
new file mode 100644
index 0000000..a0f2525
--- /dev/null
+++ b/bzrlib/tests/blackbox/test_upgrade.py
@@ -0,0 +1,296 @@
+# Copyright (C) 2006-2011 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""Black box tests for the upgrade ui."""
+import os
+import stat
+
+from bzrlib import (
+ bzrdir,
+ controldir,
+ lockable_files,
+ ui,
+ urlutils,
+ )
+from bzrlib.tests import (
+ features,
+ TestCaseWithTransport,
+ )
+from bzrlib.tests.test_sftp_transport import TestCaseWithSFTPServer
+from bzrlib.repofmt.knitpack_repo import RepositoryFormatKnitPack1
+
+
+class OldBzrDir(bzrdir.BzrDirMeta1):
+ """An test bzr dir implementation"""
+
+ def needs_format_conversion(self, format):
+ return not isinstance(format, self.__class__)
+
+
+class ConvertOldTestToMeta(controldir.Converter):
+ """A trivial converter, used for testing."""
+
+ def convert(self, to_convert, pb):
+ ui.ui_factory.note('starting upgrade from old test format to 2a')
+ to_convert.control_transport.put_bytes(
+ 'branch-format',
+ bzrdir.BzrDirMetaFormat1().get_format_string(),
+ mode=to_convert._get_file_mode())
+ return controldir.ControlDir.open(to_convert.user_url)
+
+
+class OldBzrDirFormat(bzrdir.BzrDirMetaFormat1):
+
+ _lock_class = lockable_files.TransportLock
+
+ def get_converter(self, format=None):
+ return ConvertOldTestToMeta()
+
+ @classmethod
+ def get_format_string(cls):
+ return "Ancient Test Format"
+
+ def _open(self, transport):
+ return OldBzrDir(transport, self)
+
+
+class TestWithUpgradableBranches(TestCaseWithTransport):
+
+ def setUp(self):
+ super(TestWithUpgradableBranches, self).setUp()
+
+ def make_current_format_branch_and_checkout(self):
+ current_tree = self.make_branch_and_tree('current_format_branch',
+ format='default')
+ current_tree.branch.create_checkout(
+ self.get_url('current_format_checkout'), lightweight=True)
+
+ def test_readonly_url_error(self):
+ self.make_branch_and_tree("old_format_branch", format="knit")
+ (out, err) = self.run_bzr(
+ ['upgrade', self.get_readonly_url("old_format_branch")], retcode=3)
+ err_msg = 'Upgrade URL cannot work with readonly URLs.'
+ self.assertEqualDiff('conversion error: %s\nbzr: ERROR: %s\n'
+ % (err_msg, err_msg),
+ err)
+
+ def test_upgrade_up_to_date(self):
+ self.make_current_format_branch_and_checkout()
+ # when up to date we should get a message to that effect
+ burl = self.get_transport('current_format_branch').local_abspath(".")
+ (out, err) = self.run_bzr('upgrade current_format_branch', retcode=0)
+ self.assertEqual(
+ 'Upgrading branch %s/ ...\n'
+ 'The branch format %s is already at the most recent format.\n'
+ % (burl, 'Meta directory format 1'),
+ out)
+
+ def test_upgrade_up_to_date_checkout_warns_branch_left_alone(self):
+ self.make_current_format_branch_and_checkout()
+ # when upgrading a checkout, the branch location and a suggestion
+ # to upgrade it should be emitted even if the checkout is up to
+ # date
+ burl = self.get_transport('current_format_branch').local_abspath(".")
+ curl = self.get_transport('current_format_checkout').local_abspath(".")
+ (out, err) = self.run_bzr('upgrade current_format_checkout', retcode=0)
+ self.assertEqual(
+ 'Upgrading branch %s/ ...\nThis is a checkout.'
+ ' The branch (%s/) needs to be upgraded separately.\n'
+ 'The branch format %s is already at the most recent format.\n'
+ % (curl, burl, 'Meta directory format 1'),
+ out)
+
+ def test_upgrade_checkout(self):
+ # upgrading a checkout should work
+ pass
+
+ def test_upgrade_repository_scans_branches(self):
+ # we should get individual upgrade notes for each branch even the
+ # anonymous branch
+ pass
+
+ def test_upgrade_branch_in_repo(self):
+ # upgrading a branch in a repo should warn about not upgrading the repo
+ pass
+
+ def test_upgrade_control_dir(self):
+ old_format = OldBzrDirFormat()
+ self.addCleanup(bzrdir.BzrProber.formats.remove,
+ old_format.get_format_string())
+ bzrdir.BzrProber.formats.register(old_format.get_format_string(),
+ old_format)
+ self.addCleanup(controldir.ControlDirFormat._set_default_format,
+ controldir.ControlDirFormat.get_default_format())
+
+ # setup an old format branch we can upgrade from.
+ path = 'old_format_branch'
+ self.make_branch_and_tree(path, format=old_format)
+ transport = self.get_transport(path)
+ url = transport.base
+ display_url = transport.local_abspath('.')
+ # check --format takes effect
+ controldir.ControlDirFormat._set_default_format(old_format)
+ backup_dir = 'backup.bzr.~1~'
+ (out, err) = self.run_bzr(
+ ['upgrade', '--format=2a', url])
+ self.assertEqualDiff("""Upgrading branch %s/ ...
+starting upgrade of %s/
+making backup of %s/.bzr
+ to %s/%s
+starting upgrade from old test format to 2a
+finished
+""" % (display_url, display_url, display_url, display_url, backup_dir), out)
+ self.assertEqualDiff("", err)
+ self.assertTrue(isinstance(
+ controldir.ControlDir.open(self.get_url(path))._format,
+ bzrdir.BzrDirMetaFormat1))
+
+ def test_upgrade_explicit_knit(self):
+ # users can force an upgrade to knit format from a metadir pack 0.92
+ # branch to a 2a branch.
+ self.make_branch_and_tree('branch', format='knit')
+ transport = self.get_transport('branch')
+ url = transport.base
+ display_url = transport.local_abspath('.')
+ # check --format takes effect
+ backup_dir = 'backup.bzr.~1~'
+ (out, err) = self.run_bzr(
+ ['upgrade', '--format=pack-0.92', url])
+ self.assertEqualDiff("""Upgrading branch %s/ ...
+starting upgrade of %s/
+making backup of %s/.bzr
+ to %s/%s
+starting repository conversion
+repository converted
+finished
+""" % (display_url, display_url, display_url, display_url, backup_dir),
+ out)
+ self.assertEqualDiff("", err)
+ converted_dir = controldir.ControlDir.open(self.get_url('branch'))
+ self.assertTrue(isinstance(converted_dir._format,
+ bzrdir.BzrDirMetaFormat1))
+ self.assertTrue(isinstance(converted_dir.open_repository()._format,
+ RepositoryFormatKnitPack1))
+
+ def test_upgrade_repo(self):
+ self.run_bzr('init-repository --format=pack-0.92 repo')
+ self.run_bzr('upgrade --format=2a repo')
+
+ def assertLegalOption(self, option_str):
+ # Confirm that an option is legal. (Lower level tests are
+ # expected to validate the actual functionality.)
+ self.run_bzr('init --format=pack-0.92 branch-foo')
+ self.run_bzr('upgrade --format=2a branch-foo %s' % (option_str,))
+
+ def assertBranchFormat(self, dir, format):
+ branch = controldir.ControlDir.open_tree_or_branch(self.get_url(dir))[1]
+ branch_format = branch._format
+ meta_format = controldir.format_registry.make_bzrdir(format)
+ expected_format = meta_format.get_branch_format()
+ self.assertEqual(expected_format, branch_format)
+
+ def test_upgrade_clean_supported(self):
+ self.assertLegalOption('--clean')
+ self.assertBranchFormat('branch-foo', '2a')
+ backup_bzr_dir = os.path.join("branch-foo", "backup.bzr.~1~")
+ self.assertFalse(os.path.exists(backup_bzr_dir))
+
+ def test_upgrade_dry_run_supported(self):
+ self.assertLegalOption('--dry-run')
+ self.assertBranchFormat('branch-foo', 'pack-0.92')
+
+ def test_upgrade_permission_check(self):
+ """'backup.bzr' should retain permissions of .bzr. Bug #262450"""
+ self.requireFeature(features.posix_permissions_feature)
+ old_perms = stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR
+ backup_dir = 'backup.bzr.~1~'
+ self.run_bzr('init --format=1.6')
+ os.chmod('.bzr', old_perms)
+ self.run_bzr('upgrade')
+ new_perms = os.stat(backup_dir).st_mode & 0777
+ self.assertTrue(new_perms == old_perms)
+
+ def test_upgrade_with_existing_backup_dir(self):
+ self.make_branch_and_tree("old_format_branch", format="knit")
+ t = self.get_transport("old_format_branch")
+ url = t.base
+ display_url = t.local_abspath('.')
+ backup_dir1 = 'backup.bzr.~1~'
+ backup_dir2 = 'backup.bzr.~2~'
+ # explicitly create backup_dir1. bzr should create the .~2~ directory
+ # as backup
+ t.mkdir(backup_dir1)
+ (out, err) = self.run_bzr(
+ ['upgrade', '--format=2a', url])
+ self.assertEqualDiff("""Upgrading branch %s/ ...
+starting upgrade of %s/
+making backup of %s/.bzr
+ to %s/%s
+starting repository conversion
+repository converted
+finished
+""" % (display_url, display_url, display_url, display_url, backup_dir2), out)
+ self.assertEqualDiff("", err)
+ self.assertTrue(isinstance(
+ controldir.ControlDir.open(self.get_url("old_format_branch"))._format,
+ bzrdir.BzrDirMetaFormat1))
+ self.assertTrue(t.has(backup_dir2))
+
+
+class SFTPTests(TestCaseWithSFTPServer):
+ """Tests for upgrade over sftp."""
+
+ def test_upgrade_url(self):
+ self.run_bzr('init --format=pack-0.92')
+ t = self.get_transport()
+ url = t.base
+ display_url = urlutils.unescape_for_display(url,
+ 'utf-8')
+ out, err = self.run_bzr(['upgrade', '--format=2a', url])
+ backup_dir = 'backup.bzr.~1~'
+ self.assertEqualDiff("""Upgrading branch %s ...
+starting upgrade of %s
+making backup of %s.bzr
+ to %s%s
+starting repository conversion
+repository converted
+finished
+""" % (display_url, display_url, display_url, display_url, backup_dir), out)
+ self.assertEqual('', err)
+
+
+class UpgradeRecommendedTests(TestCaseWithTransport):
+
+ def test_recommend_upgrade_wt4(self):
+ # using a deprecated format gives a warning
+ self.run_bzr('init --knit a')
+ out, err = self.run_bzr('status a')
+ self.assertContainsRe(err, 'bzr upgrade .*[/\\\\]a')
+
+ def test_no_upgrade_recommendation_from_bzrdir(self):
+ # we should only get a recommendation to upgrade when we're accessing
+ # the actual workingtree, not when we only open a bzrdir that contains
+ # an old workngtree
+ self.run_bzr('init --knit a')
+ out, err = self.run_bzr('revno a')
+ if err.find('upgrade') > -1:
+ self.fail("message shouldn't suggest upgrade:\n%s" % err)
+
+ def test_upgrade_shared_repo(self):
+ repo = self.make_repository('repo', format='2a', shared=True)
+ branch = self.make_branch_and_tree('repo/branch', format="pack-0.92")
+ self.get_transport('repo/branch/.bzr/repository').delete_tree('.')
+ out, err = self.run_bzr(['upgrade'], working_dir='repo/branch')
diff --git a/bzrlib/tests/blackbox/test_version.py b/bzrlib/tests/blackbox/test_version.py
new file mode 100644
index 0000000..cf1d2c5
--- /dev/null
+++ b/bzrlib/tests/blackbox/test_version.py
@@ -0,0 +1,148 @@
+# Copyright (C) 2007, 2008, 2009, 2011 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""Black-box tests for bzr version."""
+
+import os
+import sys
+
+import bzrlib
+from bzrlib import osutils, trace
+from bzrlib.tests import (
+ probe_unicode_in_user_encoding,
+ TestCase,
+ TestCaseInTempDir,
+ TestSkipped,
+ )
+
+
+class TestVersion(TestCase):
+
+ def test_main_version(self):
+ """Check output from version command and master option is reasonable"""
+ # output is intentionally passed through to stdout so that we
+ # can see the version being tested
+ self.permit_source_tree_branch_repo()
+ output = self.run_bzr('version')[0]
+ self.log('bzr version output:')
+ self.log(output)
+ self.assert_(output.startswith('Bazaar (bzr) '))
+ self.assertNotEqual(output.index('Canonical'), -1)
+ # make sure --version is consistent
+ tmp_output = self.run_bzr('--version')[0]
+ self.assertEquals(output, tmp_output)
+
+ def test_version(self):
+ self.permit_source_tree_branch_repo()
+ out = self.run_bzr("version")[0]
+ self.assertTrue(len(out) > 0)
+ self.assertEqualDiff(out.splitlines()[0],
+ "Bazaar (bzr) %s" % bzrlib.__version__)
+ self.assertContainsRe(out, r"(?m)^ Python interpreter:")
+ self.assertContainsRe(out, r"(?m)^ Python standard library:")
+ self.assertContainsRe(out, r"(?m)^ bzrlib:")
+ self.assertContainsRe(out, r"(?m)^ Bazaar configuration:")
+ self.assertContainsRe(out, r'(?m)^ Bazaar log file:.*\.bzr\.log')
+
+ def test_version_short(self):
+ self.permit_source_tree_branch_repo()
+ out = self.run_bzr(["version", "--short"])[0]
+ self.assertEqualDiff(out, bzrlib.version_string + '\n')
+
+
+class TestVersionUnicodeOutput(TestCaseInTempDir):
+
+ def _check(self, args):
+ self.permit_source_tree_branch_repo()
+ # Even though trace._bzr_log_filename variable
+ # is used only to keep actual log filename
+ # and changing this variable in selftest
+ # don't change main .bzr.log location,
+ # and therefore pretty safe,
+ # but we run these tests in separate temp dir
+ # with relative unicoded path
+ old_trace_file = trace._bzr_log_filename
+ trace._bzr_log_filename = u'\u1234/.bzr.log'
+ try:
+ out = self.run_bzr(args)[0]
+ finally:
+ trace._bzr_log_filename = old_trace_file
+ self.assertTrue(len(out) > 0)
+ self.assertContainsRe(out, r'(?m)^ Bazaar log file:.*bzr\.log')
+
+ def test_command(self):
+ self._check("version")
+
+ def test_flag(self):
+ self._check("--version")
+
+ def test_unicode_bzr_home(self):
+ uni_val, str_val = probe_unicode_in_user_encoding()
+ if uni_val is None:
+ raise TestSkipped('Cannot find a unicode character that works in'
+ ' encoding %s' % (osutils.get_user_encoding(),))
+
+ self.overrideEnv('BZR_HOME', str_val)
+ self.permit_source_tree_branch_repo()
+ out = self.run_bzr("version")[0]
+ self.assertTrue(len(out) > 0)
+ self.assertContainsRe(out, r"(?m)^ Bazaar configuration: " + str_val)
+
+
+class TestVersionBzrLogLocation(TestCaseInTempDir):
+
+ def test_simple(self):
+ bzr_log = 'my.bzr.log'
+ self.overrideEnv('BZR_LOG', bzr_log)
+ default_log = os.path.join(os.environ['BZR_HOME'], '.bzr.log')
+ self.assertPathDoesNotExist([default_log, bzr_log])
+ out = self.run_bzr_subprocess('version')[0]
+ self.assertTrue(len(out) > 0)
+ self.assertContainsRe(out, r"(?m)^ Bazaar log file: " + bzr_log)
+ self.assertPathDoesNotExist(default_log)
+ self.assertPathExists(bzr_log)
+
+ def test_dev_null(self):
+ # This test uses a subprocess to cause the log opening logic to
+ # execute. It would be better to just execute that logic directly.
+ if sys.platform == 'win32':
+ bzr_log = 'NUL'
+ else:
+ bzr_log = '/dev/null'
+ self.overrideEnv('BZR_LOG', bzr_log)
+ default_log = os.path.join(os.environ['BZR_HOME'], '.bzr.log')
+ self.assertPathDoesNotExist(default_log)
+ out = self.run_bzr_subprocess('version')[0]
+ self.assertTrue(len(out) > 0)
+ self.assertContainsRe(out, r"(?m)^ Bazaar log file: " + bzr_log)
+ self.assertPathDoesNotExist(default_log)
+
+ def test_unicode_bzr_log(self):
+ uni_val = u"\xa7"
+ enc = osutils.get_user_encoding()
+ try:
+ str_val = uni_val.encode(enc)
+ except UnicodeEncodeError:
+ self.skip("Test string %r unrepresentable in user encoding %s" % (
+ uni_val, enc))
+ self.overrideEnv('BZR_HOME', self.test_base_dir)
+ self.overrideEnv("BZR_LOG",
+ os.path.join(self.test_base_dir, uni_val).encode(enc))
+ out, err = self.run_bzr_subprocess("version")
+ uni_out = out.decode(enc)
+ self.assertContainsRe(uni_out, u"(?m)^ Bazaar log file: .*/\xa7$")
+
+
diff --git a/bzrlib/tests/blackbox/test_version_info.py b/bzrlib/tests/blackbox/test_version_info.py
new file mode 100644
index 0000000..94cfb0d
--- /dev/null
+++ b/bzrlib/tests/blackbox/test_version_info.py
@@ -0,0 +1,184 @@
+# Copyright (C) 2005-2010 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""Blackbox tests for version_info"""
+
+import os
+
+from bzrlib.tests import TestCaseWithTransport
+from bzrlib.version_info_formats import VersionInfoBuilder
+
+
+class TestVersionInfo(TestCaseWithTransport):
+
+ def test_invalid_format(self):
+ self.run_bzr('version-info --format quijibo', retcode=3)
+
+ def create_tree(self):
+ wt = self.make_branch_and_tree('branch')
+
+ self.build_tree(['branch/a'])
+ wt.add('a')
+ wt.commit('adding a', rev_id='r1')
+
+ self.build_tree(['branch/b'])
+ wt.add('b')
+ wt.commit('adding b', rev_id='r2')
+ return wt
+
+ def test_basic(self):
+ wt = self.create_tree()
+
+ txt = self.run_bzr('version-info branch')[0]
+ self.assertContainsRe(txt, 'date:')
+ self.assertContainsRe(txt, 'build-date:')
+ self.assertContainsRe(txt, 'revno: 2')
+ self.assertContainsRe(txt, 'revision-id: ' + wt.branch.last_revision())
+
+ def test_all(self):
+ """'--all' includes clean, revision history, and file revisions"""
+ wt = self.create_tree()
+ txt = self.run_bzr('version-info branch --all')[0]
+ self.assertContainsRe(txt, 'date:')
+ self.assertContainsRe(txt, 'revno: 2')
+ self.assertContainsRe(txt, 'revision-id: ' + wt.branch.last_revision())
+ self.assertContainsRe(txt, 'clean: True')
+ self.assertContainsRe(txt, 'revisions:')
+ for rev_id in wt.branch.repository.all_revision_ids():
+ self.assertContainsRe(txt, 'id: ' + rev_id)
+ self.assertContainsRe(txt, 'message: adding a')
+ self.assertContainsRe(txt, 'message: adding b')
+ self.assertContainsRe(txt, 'file-revisions:')
+ self.assertContainsRe(txt, 'path: a')
+ self.assertContainsRe(txt, 'path: b')
+
+ def test_clean(self):
+ """Test that --check-clean includes the right info"""
+ self.create_tree()
+
+ txt = self.run_bzr('version-info branch --check-clean')[0]
+ self.assertContainsRe(txt, 'clean: True')
+
+ self.build_tree_contents([('branch/c', 'now unclean\n')])
+ txt = self.run_bzr('version-info branch --check-clean')[0]
+ self.assertContainsRe(txt, 'clean: False')
+
+ txt = self.run_bzr('version-info branch --check-clean'
+ ' --include-file-revisions')[0]
+ self.assertContainsRe(txt, 'revision: unversioned')
+
+ os.remove('branch/c')
+
+ def test_no_working_tree(self):
+ tree = self.create_tree()
+ branch = self.make_branch('just_branch')
+ branch.pull(tree.branch)
+
+ txt = self.run_bzr('version-info just_branch')[0]
+ self.assertStartsWith(txt, 'revision-id: r2\n')
+
+ def assertEqualNoBuildDate(self, text1, text2):
+ """Compare 2 texts, but ignore the build-date field.
+
+ build-date is the current timestamp, accurate to seconds. But the
+ clock is always ticking, and it may have ticked between the time
+ that text1 and text2 were generated.
+ """
+ lines1 = text1.splitlines(True)
+ lines2 = text2.splitlines(True)
+ for line1, line2 in zip(lines1, lines2):
+ if line1.startswith('build-date: '):
+ self.assertStartsWith(line2, 'build-date: ')
+ else:
+ self.assertEqual(line1, line2)
+ self.assertEqual(len(lines1), len(lines2))
+
+ def test_no_branch(self):
+ """Test that bzr defaults to the local working directory"""
+ self.create_tree()
+
+ txt1 = self.run_bzr('version-info branch')[0]
+
+ os.chdir('branch')
+ txt2 = self.run_bzr('version-info')[0]
+ self.assertEqualNoBuildDate(txt1, txt2)
+
+ def test_rio(self):
+ """Test that we can pass --format=rio"""
+ self.create_tree()
+
+ txt = self.run_bzr('version-info branch')[0]
+ txt1 = self.run_bzr('version-info --format rio branch')[0]
+ txt2 = self.run_bzr('version-info --format=rio branch')[0]
+ self.assertEqualNoBuildDate(txt, txt1)
+ self.assertEqualNoBuildDate(txt, txt2)
+
+ def test_python(self):
+ """Test that we can do --format=python"""
+ self.create_tree()
+
+ txt = self.run_bzr('version-info --format python branch')[0]
+
+ self.assertContainsRe(txt, 'version_info = {')
+
+ def test_custom_without_template(self):
+ wt = self.make_branch_and_tree('branch')
+ out, err = self.run_bzr('version-info --custom', retcode=3)
+ self.assertContainsRe(err, r'ERROR: No template specified\.')
+
+ def test_custom_implies_all(self):
+ self.create_tree()
+ out, err = self.run_bzr('version-info --custom --template='
+ '"{revno} {branch_nick} {clean}\n" branch')
+ self.assertEqual("2 branch 1\n", out)
+ self.assertEqual("", err)
+ self.build_tree_contents([('branch/c', 'now unclean\n')])
+ out, err = self.run_bzr('version-info --custom --template='
+ '"{revno} {branch_nick} {clean}\n" branch')
+ self.assertEqual("2 branch 0\n", out)
+ self.assertEqual("", err)
+
+ def test_custom_no_clean_in_template(self):
+ def should_not_be_called(self):
+ raise AssertionError("Method on %r should not have been used" % (self,))
+ self.overrideAttr(VersionInfoBuilder, "_extract_file_revisions",
+ should_not_be_called)
+ self.create_tree()
+ out, err = self.run_bzr('version-info --custom --template=r{revno} branch')
+ self.assertEqual("r2", out)
+ self.assertEqual("", err)
+
+ def test_non_ascii(self):
+ """Test that we can output non-ascii data"""
+
+ commit_message = u'Non-ascii message with character not in latin-1: \u1234'
+
+ tree = self.make_branch_and_tree('.')
+ self.build_tree(['a_file'])
+ tree.add('a_file')
+ tree.commit(commit_message)
+ out, err = self.run_bzr(
+ ['version-info', '--include-history'], encoding='latin-1')
+
+ self.assertContainsString(out, commit_message.encode('utf-8'))
+
+ def test_revision(self):
+ tree = self.create_tree()
+ branch = self.make_branch('just_branch')
+ branch.pull(tree.branch)
+
+ txt = self.run_bzr('version-info -r1 just_branch')[0]
+ self.assertStartsWith(txt, 'revision-id: r1\n')
diff --git a/bzrlib/tests/blackbox/test_versioning.py b/bzrlib/tests/blackbox/test_versioning.py
new file mode 100644
index 0000000..48895ed
--- /dev/null
+++ b/bzrlib/tests/blackbox/test_versioning.py
@@ -0,0 +1,176 @@
+# Copyright (C) 2005, 2006, 2007, 2009-2012 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+
+"""Tests of simple versioning operations"""
+
+# TODO: test trying to commit within a directory that is not yet added
+
+
+import os
+
+from bzrlib.branch import Branch
+from bzrlib.osutils import pathjoin
+from bzrlib.tests import TestCaseInTempDir, TestCaseWithTransport
+from bzrlib.trace import mutter
+from bzrlib.workingtree import WorkingTree
+
+
+class TestMkdir(TestCaseWithTransport):
+
+ def test_mkdir_fails_cleanly(self):
+ """'mkdir' fails cleanly when no working tree is available.
+ https://bugs.launchpad.net/bzr/+bug/138600
+ """
+ # Since there is a safety working tree above us, we create a bare repo
+ # here locally.
+ shared_repo = self.make_repository('.')
+ self.run_bzr(['mkdir', 'abc'], retcode=3)
+ self.assertPathDoesNotExist('abc')
+
+ def test_mkdir(self):
+ """Basic 'bzr mkdir' operation"""
+
+ self.make_branch_and_tree('.')
+ self.run_bzr(['mkdir', 'foo'])
+ self.assert_(os.path.isdir('foo'))
+
+ self.run_bzr(['mkdir', 'foo'], retcode=3)
+
+ wt = WorkingTree.open('.')
+
+ delta = wt.changes_from(wt.basis_tree())
+
+ self.log('delta.added = %r' % delta.added)
+
+ self.assertEquals(len(delta.added), 1)
+ self.assertEquals(delta.added[0][0], 'foo')
+ self.assertFalse(delta.modified)
+
+ def test_mkdir_in_subdir(self):
+ """'bzr mkdir' operation in subdirectory"""
+
+ self.make_branch_and_tree('.')
+ self.run_bzr(['mkdir', 'dir'])
+ self.assert_(os.path.isdir('dir'))
+
+ self.log('Run mkdir in subdir')
+ self.run_bzr(['mkdir', 'subdir'], working_dir='dir')
+ self.assert_(os.path.isdir('dir/subdir'))
+
+ wt = WorkingTree.open('.')
+
+ delta = wt.changes_from(wt.basis_tree())
+
+ self.log('delta.added = %r' % delta.added)
+
+ self.assertEquals(len(delta.added), 2)
+ self.assertEquals(delta.added[0][0], 'dir')
+ self.assertEquals(delta.added[1][0], pathjoin('dir','subdir'))
+ self.assertFalse(delta.modified)
+
+ def test_mkdir_w_nested_trees(self):
+ """'bzr mkdir' with nested trees"""
+
+ self.make_branch_and_tree('.')
+ self.make_branch_and_tree('a')
+ self.make_branch_and_tree('a/b')
+
+ self.run_bzr(['mkdir', 'dir', 'a/dir', 'a/b/dir'])
+ self.assertTrue(os.path.isdir('dir'))
+ self.assertTrue(os.path.isdir('a/dir'))
+ self.assertTrue(os.path.isdir('a/b/dir'))
+
+ wt = WorkingTree.open('.')
+ wt_a = WorkingTree.open('a')
+ wt_b = WorkingTree.open('a/b')
+
+ delta = wt.changes_from(wt.basis_tree())
+ self.assertEquals(len(delta.added), 1)
+ self.assertEquals(delta.added[0][0], 'dir')
+ self.assertFalse(delta.modified)
+
+ delta = wt_a.changes_from(wt_a.basis_tree())
+ self.assertEquals(len(delta.added), 1)
+ self.assertEquals(delta.added[0][0], 'dir')
+ self.assertFalse(delta.modified)
+
+ delta = wt_b.changes_from(wt_b.basis_tree())
+ self.assertEquals(len(delta.added), 1)
+ self.assertEquals(delta.added[0][0], 'dir')
+ self.assertFalse(delta.modified)
+
+ def test_mkdir_quiet(self):
+ """'bzr mkdir --quiet' should not print a status message"""
+
+ self.make_branch_and_tree('.')
+ out, err = self.run_bzr(['mkdir', '--quiet', 'foo'])
+ self.assertEquals('', err)
+ self.assertEquals('', out)
+
+
+class SubdirCommit(TestCaseWithTransport):
+
+ def test_subdir_commit(self):
+ """Test committing a subdirectory, and committing a directory."""
+ tree = self.make_branch_and_tree('.')
+ b = tree.branch
+ self.build_tree(['a/', 'b/'])
+ def set_contents(contents):
+ self.build_tree_contents([
+ ('a/one', contents),
+ ('b/two', contents),
+ ('top', contents),
+ ])
+ set_contents('old contents')
+ tree.smart_add(['.'])
+ tree.commit('first revision')
+ set_contents('new contents')
+
+ mutter('start selective subdir commit')
+ self.run_bzr(['commit', 'a', '-m', 'commit a only'])
+
+ new = b.repository.revision_tree(b.get_rev_id(2))
+ new.lock_read()
+
+ def get_text_by_path(tree, path):
+ return tree.get_file_text(tree.path2id(path), path)
+
+ self.assertEqual(get_text_by_path(new, 'b/two'), 'old contents')
+ self.assertEqual(get_text_by_path(new, 'top'), 'old contents')
+ self.assertEqual(get_text_by_path(new, 'a/one'), 'new contents')
+ new.unlock()
+
+ # commit from here should do nothing
+ self.run_bzr(['commit', '.', '-m', 'commit subdir only', '--unchanged'],
+ working_dir='a')
+ v3 = b.repository.revision_tree(b.get_rev_id(3))
+ v3.lock_read()
+ self.assertEqual(get_text_by_path(v3, 'b/two'), 'old contents')
+ self.assertEqual(get_text_by_path(v3, 'top'), 'old contents')
+ self.assertEqual(get_text_by_path(v3, 'a/one'), 'new contents')
+ v3.unlock()
+
+ # commit in subdirectory commits whole tree
+ self.run_bzr(['commit', '-m', 'commit whole tree from subdir'],
+ working_dir='a')
+ v4 = b.repository.revision_tree(b.get_rev_id(4))
+ v4.lock_read()
+ self.assertEqual(get_text_by_path(v4, 'b/two'), 'new contents')
+ self.assertEqual(get_text_by_path(v4, 'top'), 'new contents')
+ v4.unlock()
+
+ # TODO: factor out some kind of assert_tree_state() method
diff --git a/bzrlib/tests/blackbox/test_view.py b/bzrlib/tests/blackbox/test_view.py
new file mode 100644
index 0000000..9ef05d9
--- /dev/null
+++ b/bzrlib/tests/blackbox/test_view.py
@@ -0,0 +1,111 @@
+# Copyright (C) 2008 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""Tests for the view command"""
+
+from bzrlib import bzrdir
+from bzrlib.tests import TestCaseWithTransport
+from bzrlib.workingtree import WorkingTree
+
+
+class TestViewUI(TestCaseWithTransport):
+
+ def test_view_command_help(self):
+ out, err = self.run_bzr('help view')
+ self.assertContainsRe(out, 'Manage filtered views')
+
+ def test_define_view(self):
+ wt = self.make_branch_and_tree('.')
+ # Check definition of a new view
+ out, err = self.run_bzr('view a b c')
+ self.assertEquals(out, "Using 'my' view: a, b, c\n")
+ out, err = self.run_bzr('view e f --name foo')
+ self.assertEquals(out, "Using 'foo' view: e, f\n")
+ # Check re-definition of an existing view
+ out, err = self.run_bzr('view p q')
+ self.assertEquals(out, "Using 'foo' view: p, q\n")
+ out, err = self.run_bzr('view r s --name my')
+ self.assertEquals(out, "Using 'my' view: r, s\n")
+ # Check attempts to define the 'off' view are prevented
+ out, err = self.run_bzr('view a --name off', retcode=3)
+ self.assertContainsRe(err, "Cannot change the 'off' pseudo view")
+
+ def test_list_view(self):
+ wt = self.make_branch_and_tree('.')
+ # Check list of the current view
+ out, err = self.run_bzr('view')
+ self.assertEquals(out, "No current view.\n")
+ self.run_bzr('view a b c')
+ out, err = self.run_bzr('view')
+ self.assertEquals(out, "'my' view is: a, b, c\n")
+ # Check list of a named view
+ self.run_bzr('view e f --name foo')
+ out, err = self.run_bzr('view --name my')
+ self.assertEquals(out, "'my' view is: a, b, c\n")
+ out, err = self.run_bzr('view --name foo')
+ self.assertEquals(out, "'foo' view is: e, f\n")
+ # Check list of all views
+ out, err = self.run_bzr('view --all')
+ self.assertEquals(out.splitlines(), [
+ "Views defined:",
+ "=> foo e, f",
+ " my a, b, c",
+ ])
+ # Check list of an unknown view
+ out, err = self.run_bzr('view --name bar', retcode=3)
+ self.assertContainsRe(err, "No such view")
+
+ def test_delete_view(self):
+ wt = self.make_branch_and_tree('.')
+ # Check delete of the current view
+ out, err = self.run_bzr('view --delete', retcode=3)
+ self.assertContainsRe(err, "No current view to delete")
+ self.run_bzr('view a b c')
+ out, err = self.run_bzr('view --delete')
+ self.assertEquals(out, "Deleted 'my' view.\n")
+ # Check delete of a named view
+ self.run_bzr('view e f --name foo')
+ out, err = self.run_bzr('view --name foo --delete')
+ self.assertEquals(out, "Deleted 'foo' view.\n")
+ # Check delete of all views
+ out, err = self.run_bzr('view --delete --all')
+ self.assertEquals(out, "Deleted all views.\n")
+ # Check delete of an unknown view
+ out, err = self.run_bzr('view --delete --name bar', retcode=3)
+ self.assertContainsRe(err, "No such view")
+ # Check bad usage is reported to the user
+ out, err = self.run_bzr('view --delete --switch x', retcode=3)
+ self.assertContainsRe(err,
+ "Both --delete and --switch specified")
+ out, err = self.run_bzr('view --delete a b c', retcode=3)
+ self.assertContainsRe(err, "Both --delete and a file list specified")
+
+ def test_switch_view(self):
+ wt = self.make_branch_and_tree('.')
+ # Check switching to a named view
+ self.run_bzr('view a b c')
+ self.run_bzr('view e f --name foo')
+ out, err = self.run_bzr('view --switch my')
+ self.assertEquals(out, "Using 'my' view: a, b, c\n")
+ # Check switching off the current view does not delete it
+ out, err = self.run_bzr('view --switch off')
+ self.assertEquals(out, "Disabled 'my' view.\n")
+ # Check error reporting when attempt to switch off again
+ out, err = self.run_bzr('view --switch off', retcode=3)
+ self.assertContainsRe(err, "No current view to disable")
+ # Check bad usage is reported to the user
+ out, err = self.run_bzr('view --switch x --all', retcode=3)
+ self.assertContainsRe(err, "Both --switch and --all specified")
diff --git a/bzrlib/tests/blackbox/test_whoami.py b/bzrlib/tests/blackbox/test_whoami.py
new file mode 100644
index 0000000..65e72d0
--- /dev/null
+++ b/bzrlib/tests/blackbox/test_whoami.py
@@ -0,0 +1,151 @@
+# Copyright (C) 2006, 2007, 2009-2012 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+
+"""Black-box tests for bzr whoami."""
+
+import bzrlib
+from bzrlib import (
+ branch,
+ config,
+ errors,
+ tests,
+ )
+
+
+class TestWhoami(tests.TestCaseWithTransport):
+
+ def assertWhoAmI(self, expected, *cmd_args, **kwargs):
+ out, err = self.run_bzr(('whoami',) + cmd_args, **kwargs)
+ self.assertEquals('', err)
+ lines = out.splitlines()
+ self.assertLength(1, lines)
+ self.assertEquals(expected, lines[0].rstrip())
+
+ def test_whoami_no_args_no_conf(self):
+ # this should always identify something, if only "john@localhost"
+ out = self.run_bzr("whoami")[0]
+ self.assertTrue(len(out) > 0)
+ self.assertEquals(1, out.count('@'))
+
+ def test_whoami_email_no_args(self):
+ out = self.run_bzr("whoami --email")[0]
+ self.assertTrue(len(out) > 0)
+ self.assertEquals(1, out.count('@'))
+
+ def test_whoami_email_arg(self):
+ # whoami --email is mutually exclusive with any arguments
+ out = self.run_bzr("whoami --email 'foo <foo@example.com>'", 3)[0]
+ self.assertEquals("", out)
+
+ def set_branch_email(self, b, email):
+ b.get_config_stack().set('email', email)
+
+ def test_whoami_branch(self):
+ """branch specific user identity works."""
+ wt = self.make_branch_and_tree('.')
+ b = bzrlib.branch.Branch.open('.')
+ self.set_branch_email(b, 'Branch Identity <branch@identi.ty>')
+ self.assertWhoAmI('Branch Identity <branch@identi.ty>')
+ self.assertWhoAmI('branch@identi.ty', '--email')
+
+ # Verify that the environment variable overrides the value
+ # in the file
+ self.overrideEnv('BZR_EMAIL', 'Different ID <other@environ.ment>')
+ self.assertWhoAmI('Different ID <other@environ.ment>')
+ self.assertWhoAmI('other@environ.ment', '--email')
+
+ def test_whoami_utf8(self):
+ """verify that an identity can be in utf-8."""
+ self.run_bzr(['whoami', u'Branch Identity \u20ac <branch@identi.ty>'],
+ encoding='utf-8')
+ self.assertWhoAmI('Branch Identity \xe2\x82\xac <branch@identi.ty>',
+ encoding='utf-8')
+ self.assertWhoAmI('branch@identi.ty', '--email')
+
+ def test_whoami_ascii(self):
+ """
+ verify that whoami doesn't totally break when in utf-8, using an ascii
+ encoding.
+ """
+ wt = self.make_branch_and_tree('.')
+ b = bzrlib.branch.Branch.open('.')
+ self.set_branch_email(b, u'Branch Identity \u20ac <branch@identi.ty>')
+ self.assertWhoAmI('Branch Identity ? <branch@identi.ty>',
+ encoding='ascii')
+ self.assertWhoAmI('branch@identi.ty', '--email',
+ encoding='ascii')
+
+ def test_warning(self):
+ """verify that a warning is displayed if no email is given."""
+ self.make_branch_and_tree('.')
+ display = self.run_bzr(['whoami', 'Branch Identity'])[1]
+ self.assertEquals('"Branch Identity" does not seem to contain an '
+ 'email address. This is allowed, but not '
+ 'recommended.\n', display)
+
+ def test_whoami_not_set(self):
+ """Ensure whoami error if username is not set and not inferred.
+ """
+ self.overrideEnv('EMAIL', None)
+ self.overrideEnv('BZR_EMAIL', None)
+ # Also, make sure that it's not inferred from mailname.
+ self.overrideAttr(config, '_auto_user_id', lambda: (None, None))
+ out, err = self.run_bzr(['whoami'], 3)
+ self.assertContainsRe(err, 'Unable to determine your name')
+
+ def test_whoami_directory(self):
+ """Test --directory option."""
+ wt = self.make_branch_and_tree('subdir')
+ self.set_branch_email(wt.branch, 'Branch Identity <branch@identi.ty>')
+ self.assertWhoAmI('Branch Identity <branch@identi.ty>',
+ '--directory', 'subdir')
+ self.run_bzr(['whoami', '--directory', 'subdir', '--branch',
+ 'Changed Identity <changed@identi.ty>'])
+ # Refresh wt as 'whoami' modified it
+ wt = wt.bzrdir.open_workingtree()
+ c = wt.branch.get_config_stack()
+ self.assertEquals('Changed Identity <changed@identi.ty>',
+ c.get('email'))
+
+ def test_whoami_remote_directory(self):
+ """Test --directory option with a remote directory."""
+ wt = self.make_branch_and_tree('subdir')
+ self.set_branch_email(wt.branch, 'Branch Identity <branch@identi.ty>')
+ url = self.get_readonly_url() + '/subdir'
+ self.assertWhoAmI('Branch Identity <branch@identi.ty>',
+ '--directory', url)
+ url = self.get_url('subdir')
+ self.run_bzr(['whoami', '--directory', url, '--branch',
+ 'Changed Identity <changed@identi.ty>'])
+ # The identity has been set in the branch config (but not the global
+ # config)
+ c = branch.Branch.open(url).get_config_stack()
+ self.assertEquals('Changed Identity <changed@identi.ty>',
+ c.get('email'))
+ # Ensuring that the value does not come from the bazaar.conf file
+ # itself requires some isolation setup
+ self.overrideEnv('BZR_EMAIL', None)
+ self.overrideEnv('EMAIL', None)
+ self.overrideAttr(config, '_auto_user_id', lambda: (None, None))
+ global_conf = config.GlobalStack()
+ self.assertRaises(errors.NoWhoami, global_conf.get, 'email')
+
+ def test_whoami_nonbranch_directory(self):
+ """Test --directory mentioning a non-branch directory."""
+ wt = self.build_tree(['subdir/'])
+ out, err = self.run_bzr("whoami --directory subdir", retcode=3)
+ self.assertContainsRe(err, 'ERROR: Not a branch')
diff --git a/bzrlib/tests/commands/__init__.py b/bzrlib/tests/commands/__init__.py
new file mode 100644
index 0000000..26cff99
--- /dev/null
+++ b/bzrlib/tests/commands/__init__.py
@@ -0,0 +1,49 @@
+# Copyright (C) 2007-2010 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+
+"""Commands behaviour tests for bzr.
+
+Test the internal behaviour of the commands (the blackbox tests are intended to
+test the usage of the commands).
+"""
+
+# FIXME: If the separation described above from the blackbox tests is not worth
+# it, all the tests defined below should be moved to blackbox instead.
+
+def load_tests(basic_tests, module, loader):
+ suite = loader.suiteClass()
+ # add the tests for this module
+ suite.addTests(basic_tests)
+
+ testmod_names = [
+ 'bzrlib.tests.commands.test_branch',
+ 'bzrlib.tests.commands.test_cat',
+ 'bzrlib.tests.commands.test_checkout',
+ 'bzrlib.tests.commands.test_commit',
+ 'bzrlib.tests.commands.test_init',
+ 'bzrlib.tests.commands.test_init_repository',
+ 'bzrlib.tests.commands.test_merge',
+ 'bzrlib.tests.commands.test_missing',
+ 'bzrlib.tests.commands.test_pull',
+ 'bzrlib.tests.commands.test_push',
+ 'bzrlib.tests.commands.test_update',
+ 'bzrlib.tests.commands.test_revert',
+ ]
+ # add the tests for the sub modules
+ suite.addTests(loader.loadTestsFromModuleNames(testmod_names))
+
+ return suite
diff --git a/bzrlib/tests/commands/test_branch.py b/bzrlib/tests/commands/test_branch.py
new file mode 100644
index 0000000..bb48d11
--- /dev/null
+++ b/bzrlib/tests/commands/test_branch.py
@@ -0,0 +1,43 @@
+# Copyright (C) 2007, 2009, 2010 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+
+from bzrlib.builtins import cmd_branch
+from bzrlib.tests.transport_util import TestCaseWithConnectionHookedTransport
+
+
+class TestBranch(TestCaseWithConnectionHookedTransport):
+
+ def setUp(self):
+ super(TestBranch, self).setUp()
+ self.make_branch_and_tree('branch')
+ self.start_logging_connections()
+
+ def test_branch_remote_local(self):
+ cmd = cmd_branch()
+ cmd.run(self.get_url('branch'), 'local')
+ self.assertEquals(1, len(self.connections))
+
+ def test_branch_local_remote(self):
+ cmd = cmd_branch()
+ cmd.run('branch', self.get_url('remote'))
+ self.assertEquals(1, len(self.connections))
+
+ def test_branch_remote_remote(self):
+ cmd = cmd_branch()
+ cmd.run(self.get_url('branch'), self.get_url('remote'))
+ self.assertEquals(2, len(self.connections))
+
diff --git a/bzrlib/tests/commands/test_cat.py b/bzrlib/tests/commands/test_cat.py
new file mode 100644
index 0000000..20ca9f6
--- /dev/null
+++ b/bzrlib/tests/commands/test_cat.py
@@ -0,0 +1,50 @@
+# Copyright (C) 2007, 2009, 2010 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+import sys
+
+from bzrlib.builtins import cmd_cat
+from bzrlib.tests import StringIOWrapper
+from bzrlib.tests.transport_util import TestCaseWithConnectionHookedTransport
+
+
+class TestCat(TestCaseWithConnectionHookedTransport):
+
+ def setUp(self):
+ super(TestCat, self).setUp()
+ # Redirect sys.stdout as this is what cat uses
+ self.outf = StringIOWrapper()
+ self.overrideAttr(sys, 'stdout', self.outf)
+
+ def test_cat(self):
+ # FIXME: sftp raises ReadError instead of NoSuchFile when probing for
+ # branch/foo/.bzr/branch-format when used with the paramiko test
+ # server.
+ from bzrlib.tests import TestSkipped
+ raise TestSkipped('SFTPTransport raises incorrect exception'
+ ' when reading from paramiko server')
+ wt1 = self.make_branch_and_tree('branch')
+ self.build_tree_contents([('branch/foo', 'foo')])
+ wt1.add('foo')
+ wt1.commit('add foo')
+
+ self.start_logging_connections()
+
+ cmd = cmd_cat()
+ cmd.run(self.get_url('branch/foo'))
+ self.assertEquals(1, len(self.connections))
+ self.assertEquals('foo', self.outf.getvalue())
+
diff --git a/bzrlib/tests/commands/test_checkout.py b/bzrlib/tests/commands/test_checkout.py
new file mode 100644
index 0000000..2f38b04
--- /dev/null
+++ b/bzrlib/tests/commands/test_checkout.py
@@ -0,0 +1,40 @@
+# Copyright (C) 2007, 2009, 2010 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+from bzrlib.builtins import cmd_checkout
+from bzrlib.tests.transport_util import TestCaseWithConnectionHookedTransport
+
+
+class TestCheckout(TestCaseWithConnectionHookedTransport):
+
+ def test_checkout(self):
+ self.make_branch_and_tree('branch1')
+
+ self.start_logging_connections()
+
+ cmd = cmd_checkout()
+ cmd.run(self.get_url('branch1'), 'local')
+ self.assertEquals(1, len(self.connections))
+
+ def test_checkout_lightweight(self):
+ self.make_branch_and_tree('branch1')
+
+ self.start_logging_connections()
+
+ cmd = cmd_checkout()
+ cmd.run(self.get_url('branch1'), 'local', lightweight=True)
+ self.assertEquals(1, len(self.connections))
+
diff --git a/bzrlib/tests/commands/test_commit.py b/bzrlib/tests/commands/test_commit.py
new file mode 100644
index 0000000..587e2a8
--- /dev/null
+++ b/bzrlib/tests/commands/test_commit.py
@@ -0,0 +1,71 @@
+# Copyright (C) 2007-2010 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+import os
+from bzrlib import (
+ branch,
+ builtins,
+ errors,
+ )
+from bzrlib.tests import transport_util
+
+
+class TestCommitWithBoundBranch(
+ transport_util.TestCaseWithConnectionHookedTransport):
+
+ def setUp(self):
+ super(TestCommitWithBoundBranch, self).setUp()
+ self.master_wt = self.make_branch_and_tree('master')
+ self.local_wt = self.make_branch_and_tree('local')
+
+ master_branch = branch.Branch.open(self.get_url('master'))
+ self.local_wt.branch.bind(master_branch)
+
+ def test_commit_mine_modified(self):
+
+ self.start_logging_connections()
+
+ commit = builtins.cmd_commit()
+ # commit do not provide a directory parameter, we have to change dir
+ # manually
+ os.chdir('local')
+ commit.run(message=u'empty commit', unchanged=True)
+ self.assertEquals(1, len(self.connections))
+
+ def test_commit_both_modified(self):
+ self.master_wt.commit('empty commit on master')
+ self.start_logging_connections()
+
+ commit = builtins.cmd_commit()
+ # commit do not provide a directory parameter, we have to change dir
+ # manually
+ os.chdir('local')
+ self.assertRaises(errors.BoundBranchOutOfDate, commit.run,
+ message=u'empty commit', unchanged=True)
+ self.assertEquals(1, len(self.connections))
+
+ def test_commit_local(self):
+ """Commits with --local should not connect to the master!"""
+ self.start_logging_connections()
+
+ commit = builtins.cmd_commit()
+ # commit do not provide a directory parameter, we have to change dir
+ # manually
+ os.chdir('local')
+ commit.run(message=u'empty commit', unchanged=True, local=True)
+
+ #it shouldn't open any connections
+ self.assertEquals(0, len(self.connections))
diff --git a/bzrlib/tests/commands/test_init.py b/bzrlib/tests/commands/test_init.py
new file mode 100644
index 0000000..c4eebdd
--- /dev/null
+++ b/bzrlib/tests/commands/test_init.py
@@ -0,0 +1,35 @@
+# Copyright (C) 2007-2010 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+
+from bzrlib import tests
+from bzrlib.builtins import cmd_init
+from bzrlib.tests.transport_util import TestCaseWithConnectionHookedTransport
+
+
+class TestInit(TestCaseWithConnectionHookedTransport):
+
+ def setUp(self):
+ super(TestInit, self).setUp()
+ self.start_logging_connections()
+
+ def test_init(self):
+ cmd = cmd_init()
+ # We don't care about the ouput but 'outf' should be defined
+ cmd.outf = tests.StringIOWrapper()
+ cmd.run(self.get_url())
+ self.assertEquals(1, len(self.connections))
+
diff --git a/bzrlib/tests/commands/test_init_repository.py b/bzrlib/tests/commands/test_init_repository.py
new file mode 100644
index 0000000..d5463c8
--- /dev/null
+++ b/bzrlib/tests/commands/test_init_repository.py
@@ -0,0 +1,35 @@
+# Copyright (C) 2007-2010 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+
+from bzrlib import tests
+from bzrlib.builtins import cmd_init_repository
+from bzrlib.tests.transport_util import TestCaseWithConnectionHookedTransport
+
+
+class TestInitRepository(TestCaseWithConnectionHookedTransport):
+
+ def setUp(self):
+ super(TestInitRepository, self).setUp()
+ self.start_logging_connections()
+
+ def test_init_repository(self):
+ cmd = cmd_init_repository()
+ # We don't care about the ouput but 'outf' should be defined
+ cmd.outf = tests.StringIOWrapper()
+ cmd.run(self.get_url())
+ self.assertEquals(1, len(self.connections))
+
diff --git a/bzrlib/tests/commands/test_merge.py b/bzrlib/tests/commands/test_merge.py
new file mode 100644
index 0000000..ab440dc
--- /dev/null
+++ b/bzrlib/tests/commands/test_merge.py
@@ -0,0 +1,38 @@
+# Copyright (C) 2007, 2009, 2010 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+
+from bzrlib.builtins import cmd_merge
+from bzrlib.tests import StringIOWrapper
+from bzrlib.tests.transport_util import TestCaseWithConnectionHookedTransport
+
+
+class TestMerge(TestCaseWithConnectionHookedTransport):
+
+ def test_merge(self):
+ wt1 = self.make_branch_and_tree('branch1')
+ wt1.commit('empty commit')
+ wt2 = self.make_branch_and_tree('branch2')
+ wt2.pull(wt1.branch)
+ wt2.commit('empty commit too')
+
+ self.start_logging_connections()
+
+ cmd = cmd_merge()
+ # We don't care about the ouput but 'outf' should be defined
+ cmd.outf = StringIOWrapper()
+ cmd.run(self.get_url('branch1'), directory='branch2')
+ self.assertEquals(1, len(self.connections))
diff --git a/bzrlib/tests/commands/test_missing.py b/bzrlib/tests/commands/test_missing.py
new file mode 100644
index 0000000..cde9512
--- /dev/null
+++ b/bzrlib/tests/commands/test_missing.py
@@ -0,0 +1,38 @@
+# Copyright (C) 2007, 2009, 2010 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+
+from bzrlib.builtins import cmd_missing
+from bzrlib.tests.transport_util import TestCaseWithConnectionHookedTransport
+
+
+class TestMissing(TestCaseWithConnectionHookedTransport):
+
+ def test_missing(self):
+ wt1 = self.make_branch_and_tree('.')
+ wt1.commit('empty commit')
+ wt2 = self.make_branch_and_tree('branch2')
+ wt2.pull(wt1.branch)
+ wt2.commit('empty commit too')
+
+ self.start_logging_connections()
+
+ cmd = cmd_missing()
+ # We don't care about the ouput but 'outf' should be defined
+ cmd.outf = self.make_utf8_encoded_stringio()
+ cmd.run(self.get_url('branch2'))
+ self.assertEquals(1, len(self.connections))
+
diff --git a/bzrlib/tests/commands/test_pull.py b/bzrlib/tests/commands/test_pull.py
new file mode 100644
index 0000000..932df93
--- /dev/null
+++ b/bzrlib/tests/commands/test_pull.py
@@ -0,0 +1,58 @@
+# Copyright (C) 2007, 2009, 2010 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+
+from bzrlib import (
+ branch,
+ builtins,
+ tests,
+ )
+from bzrlib.tests import transport_util
+
+
+class TestPull(transport_util.TestCaseWithConnectionHookedTransport):
+
+ def test_pull(self):
+ wt1 = self.make_branch_and_tree('branch1')
+ tip = wt1.commit('empty commit')
+ wt2 = self.make_branch_and_tree('branch2')
+
+ self.start_logging_connections()
+
+ cmd = builtins.cmd_pull()
+ # We don't care about the ouput but 'outf' should be defined
+ cmd.outf = tests.StringIOWrapper()
+ cmd.run(self.get_url('branch1'), directory='branch2')
+ self.assertEquals(1, len(self.connections))
+
+ def test_pull_with_bound_branch(self):
+
+ master_wt = self.make_branch_and_tree('master')
+ local_wt = self.make_branch_and_tree('local')
+ master_branch = branch.Branch.open(self.get_url('master'))
+ local_wt.branch.bind(master_branch)
+
+ remote_wt = self.make_branch_and_tree('remote')
+ remote_wt.commit('empty commit')
+
+ self.start_logging_connections()
+
+ pull = builtins.cmd_pull()
+ # We don't care about the ouput but 'outf' should be defined
+ pull.outf = tests.StringIOWrapper()
+ pull.run(self.get_url('remote'), directory='local')
+ self.assertEquals(2, len(self.connections))
+
diff --git a/bzrlib/tests/commands/test_push.py b/bzrlib/tests/commands/test_push.py
new file mode 100644
index 0000000..6f46bfe
--- /dev/null
+++ b/bzrlib/tests/commands/test_push.py
@@ -0,0 +1,46 @@
+# Copyright (C) 2007-2010 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+
+from bzrlib import tests
+from bzrlib.builtins import cmd_push
+from bzrlib.tests.transport_util import TestCaseWithConnectionHookedTransport
+
+
+class TestPush(TestCaseWithConnectionHookedTransport):
+
+ def test_push(self):
+ self.make_branch_and_tree('branch')
+
+ self.start_logging_connections()
+
+ cmd = cmd_push()
+ # We don't care about the ouput but 'outf' should be defined
+ cmd.outf = tests.StringIOWrapper()
+ cmd.run(self.get_url('remote'), directory='branch')
+ self.assertEquals(1, len(self.connections))
+
+ def test_push_onto_stacked(self):
+ self.make_branch_and_tree('base', format='1.9')
+ self.make_branch_and_tree('source', format='1.9')
+
+ self.start_logging_connections()
+
+ cmd = cmd_push()
+ cmd.outf = tests.StringIOWrapper()
+ cmd.run(self.get_url('remote'), directory='source',
+ stacked_on=self.get_url('base'))
+ self.assertEqual(1, len(self.connections))
diff --git a/bzrlib/tests/commands/test_revert.py b/bzrlib/tests/commands/test_revert.py
new file mode 100644
index 0000000..c1ab41a
--- /dev/null
+++ b/bzrlib/tests/commands/test_revert.py
@@ -0,0 +1,61 @@
+# Copyright (C) 2010 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+import os
+from bzrlib import (
+ branch,
+ builtins,
+ errors,
+ lock,
+ )
+from bzrlib.tests import (
+ transport_util,
+ TestCaseInTempDir,
+ )
+
+
+class TestRevert(TestCaseInTempDir):
+
+ def setUp(self):
+ super(TestRevert, self).setUp()
+
+ def test_revert_tree_write_lock_and_branch_read_lock(self):
+
+ # install lock hooks to find out about cmd_revert's locking actions
+ locks_acquired = []
+ locks_released = []
+ lock.Lock.hooks.install_named_hook('lock_acquired',
+ locks_acquired.append, None)
+ lock.Lock.hooks.install_named_hook('lock_released',
+ locks_released.append, None)
+
+ # execute the revert command (There is nothing to actually revert,
+ # but locks are acquired either way.)
+ revert = builtins.cmd_revert()
+ revert.run()
+
+ # make sure that only one lock is acquired and released.
+ self.assertLength(1, locks_acquired)
+ self.assertLength(1, locks_released)
+
+ # make sure that the nonces are the same, since otherwise
+ # this would not be the same lock.
+ self.assertEqual(locks_acquired[0].details, locks_released[0].details)
+
+ # make sure that the locks are checkout locks.
+ self.assertEndsWith(locks_acquired[0].lock_url, "/checkout/lock")
+ self.assertEndsWith(locks_released[0].lock_url, "/checkout/lock")
+
diff --git a/bzrlib/tests/commands/test_update.py b/bzrlib/tests/commands/test_update.py
new file mode 100644
index 0000000..057d919
--- /dev/null
+++ b/bzrlib/tests/commands/test_update.py
@@ -0,0 +1,45 @@
+# Copyright (C) 2007, 2009, 2010 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+
+from bzrlib import (
+ branch,
+ builtins,
+ tests,
+ )
+from bzrlib.tests import transport_util
+
+
+class TestUpdate(transport_util.TestCaseWithConnectionHookedTransport):
+
+ def test_update(self):
+ remote_wt = self.make_branch_and_tree('remote')
+ local_wt = self.make_branch_and_tree('local')
+
+ remote_branch = branch.Branch.open(self.get_url('remote'))
+ local_wt.branch.bind(remote_branch)
+
+ remote_wt.commit('empty commit')
+
+ self.start_logging_connections()
+
+ update = builtins.cmd_update()
+ # update needs the encoding from outf to print URLs
+ update.outf = tests.StringIOWrapper()
+ # update calls it 'dir' where other commands calls it 'directory'
+ update.run(dir='local')
+ self.assertEquals(1, len(self.connections))
+
diff --git a/bzrlib/tests/fake_command.py b/bzrlib/tests/fake_command.py
new file mode 100644
index 0000000..a9a9a78
--- /dev/null
+++ b/bzrlib/tests/fake_command.py
@@ -0,0 +1,23 @@
+# Copyright (C) 2008 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+from bzrlib.tests import test_commands
+test_commands.lazy_command_imported = True
+
+
+class cmd_fake(object):
+
+ pass
diff --git a/bzrlib/tests/features.py b/bzrlib/tests/features.py
new file mode 100644
index 0000000..b38a5e3
--- /dev/null
+++ b/bzrlib/tests/features.py
@@ -0,0 +1,494 @@
+# Copyright (C) 2009, 2010, 2011 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""A collection of commonly used 'Features' to optionally run tests.
+"""
+
+import os
+import subprocess
+import stat
+import sys
+import tempfile
+
+from bzrlib import (
+ osutils,
+ symbol_versioning,
+ )
+
+
+class Feature(object):
+ """An operating system Feature."""
+
+ def __init__(self):
+ self._available = None
+
+ def available(self):
+ """Is the feature available?
+
+ :return: True if the feature is available.
+ """
+ if self._available is None:
+ self._available = self._probe()
+ return self._available
+
+ def _probe(self):
+ """Implement this method in concrete features.
+
+ :return: True if the feature is available.
+ """
+ raise NotImplementedError
+
+ def __str__(self):
+ if getattr(self, 'feature_name', None):
+ return self.feature_name()
+ return self.__class__.__name__
+
+
+class _SymlinkFeature(Feature):
+
+ def _probe(self):
+ return osutils.has_symlinks()
+
+ def feature_name(self):
+ return 'symlinks'
+
+SymlinkFeature = _SymlinkFeature()
+
+
+class _HardlinkFeature(Feature):
+
+ def _probe(self):
+ return osutils.has_hardlinks()
+
+ def feature_name(self):
+ return 'hardlinks'
+
+HardlinkFeature = _HardlinkFeature()
+
+
+class _OsFifoFeature(Feature):
+
+ def _probe(self):
+ return getattr(os, 'mkfifo', None)
+
+ def feature_name(self):
+ return 'filesystem fifos'
+
+OsFifoFeature = _OsFifoFeature()
+
+
+class _UnicodeFilenameFeature(Feature):
+ """Does the filesystem support Unicode filenames?"""
+
+ def _probe(self):
+ try:
+ # Check for character combinations unlikely to be covered by any
+ # single non-unicode encoding. We use the characters
+ # - greek small letter alpha (U+03B1) and
+ # - braille pattern dots-123456 (U+283F).
+ os.stat(u'\u03b1\u283f')
+ except UnicodeEncodeError:
+ return False
+ except (IOError, OSError):
+ # The filesystem allows the Unicode filename but the file doesn't
+ # exist.
+ return True
+ else:
+ # The filesystem allows the Unicode filename and the file exists,
+ # for some reason.
+ return True
+
+UnicodeFilenameFeature = _UnicodeFilenameFeature()
+
+
+class _CompatabilityThunkFeature(Feature):
+ """This feature is just a thunk to another feature.
+
+ It issues a deprecation warning if it is accessed, to let you know that you
+ should really use a different feature.
+ """
+
+ def __init__(self, dep_version, module, name,
+ replacement_name, replacement_module=None):
+ super(_CompatabilityThunkFeature, self).__init__()
+ self._module = module
+ if replacement_module is None:
+ replacement_module = module
+ self._replacement_module = replacement_module
+ self._name = name
+ self._replacement_name = replacement_name
+ self._dep_version = dep_version
+ self._feature = None
+
+ def _ensure(self):
+ if self._feature is None:
+ from bzrlib import pyutils
+ depr_msg = self._dep_version % ('%s.%s'
+ % (self._module, self._name))
+ use_msg = ' Use %s.%s instead.' % (self._replacement_module,
+ self._replacement_name)
+ symbol_versioning.warn(depr_msg + use_msg, DeprecationWarning,
+ stacklevel=5)
+ # Import the new feature and use it as a replacement for the
+ # deprecated one.
+ self._feature = pyutils.get_named_object(
+ self._replacement_module, self._replacement_name)
+
+ def _probe(self):
+ self._ensure()
+ return self._feature._probe()
+
+
+class ModuleAvailableFeature(Feature):
+ """This is a feature than describes a module we want to be available.
+
+ Declare the name of the module in __init__(), and then after probing, the
+ module will be available as 'self.module'.
+
+ :ivar module: The module if it is available, else None.
+ """
+
+ def __init__(self, module_name):
+ super(ModuleAvailableFeature, self).__init__()
+ self.module_name = module_name
+
+ def _probe(self):
+ sentinel = object()
+ module = sys.modules.get(self.module_name, sentinel)
+ if module is sentinel:
+ try:
+ self._module = __import__(self.module_name, {}, {}, [''])
+ return True
+ except ImportError:
+ return False
+ else:
+ self._module = module
+ return True
+
+ @property
+ def module(self):
+ if self.available():
+ return self._module
+ return None
+
+ def feature_name(self):
+ return self.module_name
+
+
+class _HTTPSServerFeature(Feature):
+ """Some tests want an https Server, check if one is available.
+
+ Right now, the only way this is available is under python2.6 which provides
+ an ssl module.
+ """
+
+ def _probe(self):
+ try:
+ import ssl
+ return True
+ except ImportError:
+ return False
+
+ def feature_name(self):
+ return 'HTTPSServer'
+
+
+HTTPSServerFeature = _HTTPSServerFeature()
+
+
+class _ByteStringNamedFilesystem(Feature):
+ """Is the filesystem based on bytes?"""
+
+ def _probe(self):
+ if os.name == "posix":
+ return True
+ return False
+
+ByteStringNamedFilesystem = _ByteStringNamedFilesystem()
+
+
+class _UTF8Filesystem(Feature):
+ """Is the filesystem UTF-8?"""
+
+ def _probe(self):
+ if osutils._fs_enc.upper() in ('UTF-8', 'UTF8'):
+ return True
+ return False
+
+UTF8Filesystem = _UTF8Filesystem()
+
+
+class _BreakinFeature(Feature):
+ """Does this platform support the breakin feature?"""
+
+ def _probe(self):
+ from bzrlib import breakin
+ if breakin.determine_signal() is None:
+ return False
+ if sys.platform == 'win32':
+ # Windows doesn't have os.kill, and we catch the SIGBREAK signal.
+ # We trigger SIGBREAK via a Console api so we need ctypes to
+ # access the function
+ try:
+ import ctypes
+ except OSError:
+ return False
+ return True
+
+ def feature_name(self):
+ return "SIGQUIT or SIGBREAK w/ctypes on win32"
+
+
+BreakinFeature = _BreakinFeature()
+
+
+class _CaseInsCasePresFilenameFeature(Feature):
+ """Is the file-system case insensitive, but case-preserving?"""
+
+ def _probe(self):
+ fileno, name = tempfile.mkstemp(prefix='MixedCase')
+ try:
+ # first check truly case-preserving for created files, then check
+ # case insensitive when opening existing files.
+ name = osutils.normpath(name)
+ base, rel = osutils.split(name)
+ found_rel = osutils.canonical_relpath(base, name)
+ return (found_rel == rel
+ and os.path.isfile(name.upper())
+ and os.path.isfile(name.lower()))
+ finally:
+ os.close(fileno)
+ os.remove(name)
+
+ def feature_name(self):
+ return "case-insensitive case-preserving filesystem"
+
+CaseInsCasePresFilenameFeature = _CaseInsCasePresFilenameFeature()
+
+
+class _CaseInsensitiveFilesystemFeature(Feature):
+ """Check if underlying filesystem is case-insensitive but *not* case
+ preserving.
+ """
+ # Note that on Windows, Cygwin, MacOS etc, the file-systems are far
+ # more likely to be case preserving, so this case is rare.
+
+ def _probe(self):
+ if CaseInsCasePresFilenameFeature.available():
+ return False
+
+ from bzrlib import tests
+
+ if tests.TestCaseWithMemoryTransport.TEST_ROOT is None:
+ root = osutils.mkdtemp(prefix='testbzr-', suffix='.tmp')
+ tests.TestCaseWithMemoryTransport.TEST_ROOT = root
+ else:
+ root = tests.TestCaseWithMemoryTransport.TEST_ROOT
+ tdir = osutils.mkdtemp(prefix='case-sensitive-probe-', suffix='',
+ dir=root)
+ name_a = osutils.pathjoin(tdir, 'a')
+ name_A = osutils.pathjoin(tdir, 'A')
+ os.mkdir(name_a)
+ result = osutils.isdir(name_A)
+ tests._rmtree_temp_dir(tdir)
+ return result
+
+ def feature_name(self):
+ return 'case-insensitive filesystem'
+
+CaseInsensitiveFilesystemFeature = _CaseInsensitiveFilesystemFeature()
+
+
+class _CaseSensitiveFilesystemFeature(Feature):
+
+ def _probe(self):
+ if CaseInsCasePresFilenameFeature.available():
+ return False
+ elif CaseInsensitiveFilesystemFeature.available():
+ return False
+ else:
+ return True
+
+ def feature_name(self):
+ return 'case-sensitive filesystem'
+
+# new coding style is for feature instances to be lowercase
+case_sensitive_filesystem_feature = _CaseSensitiveFilesystemFeature()
+
+
+class _NotRunningAsRoot(Feature):
+
+ def _probe(self):
+ try:
+ uid = os.getuid()
+ except AttributeError:
+ # If there is no uid, chances are there is no root either
+ return True
+ return uid != 0
+
+ def feature_name(self):
+ return 'Not running as root'
+
+
+not_running_as_root = _NotRunningAsRoot()
+
+apport = ModuleAvailableFeature('apport')
+gpgme = ModuleAvailableFeature('gpgme')
+lzma = ModuleAvailableFeature('lzma')
+meliae = ModuleAvailableFeature('meliae.scanner')
+paramiko = ModuleAvailableFeature('paramiko')
+pycurl = ModuleAvailableFeature('pycurl')
+pywintypes = ModuleAvailableFeature('pywintypes')
+subunit = ModuleAvailableFeature('subunit')
+testtools = ModuleAvailableFeature('testtools')
+
+compiled_patiencediff_feature = ModuleAvailableFeature(
+ 'bzrlib._patiencediff_c')
+lsprof_feature = ModuleAvailableFeature('bzrlib.lsprof')
+
+
+class _BackslashDirSeparatorFeature(Feature):
+
+ def _probe(self):
+ try:
+ os.lstat(os.getcwd() + '\\')
+ except OSError:
+ return False
+ else:
+ return True
+
+ def feature_name(self):
+ return "Filesystem treats '\\' as a directory separator."
+
+backslashdir_feature = _BackslashDirSeparatorFeature()
+
+
+class _ChownFeature(Feature):
+ """os.chown is supported"""
+
+ def _probe(self):
+ return os.name == 'posix' and hasattr(os, 'chown')
+
+chown_feature = _ChownFeature()
+
+
+class ExecutableFeature(Feature):
+ """Feature testing whether an executable of a given name is on the PATH."""
+
+ def __init__(self, name):
+ super(ExecutableFeature, self).__init__()
+ self.name = name
+ self._path = None
+
+ @property
+ def path(self):
+ # This is a property, so accessing path ensures _probe was called
+ self.available()
+ return self._path
+
+ def _probe(self):
+ self._path = osutils.find_executable_on_path(self.name)
+ return self._path is not None
+
+ def feature_name(self):
+ return '%s executable' % self.name
+
+
+bash_feature = ExecutableFeature('bash')
+diff_feature = ExecutableFeature('diff')
+sed_feature = ExecutableFeature('sed')
+msgmerge_feature = ExecutableFeature('msgmerge')
+
+
+class _PosixPermissionsFeature(Feature):
+
+ def _probe(self):
+ def has_perms():
+ # Create temporary file and check if specified perms are
+ # maintained.
+ write_perms = stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR
+ f = tempfile.mkstemp(prefix='bzr_perms_chk_')
+ fd, name = f
+ os.close(fd)
+ osutils.chmod_if_possible(name, write_perms)
+
+ read_perms = os.stat(name).st_mode & 0777
+ os.unlink(name)
+ return (write_perms == read_perms)
+
+ return (os.name == 'posix') and has_perms()
+
+ def feature_name(self):
+ return 'POSIX permissions support'
+
+
+posix_permissions_feature = _PosixPermissionsFeature()
+
+
+class _StraceFeature(Feature):
+
+ def _probe(self):
+ try:
+ proc = subprocess.Popen(['strace'],
+ stderr=subprocess.PIPE,
+ stdout=subprocess.PIPE)
+ proc.communicate()
+ return True
+ except OSError, e:
+ if e.errno == errno.ENOENT:
+ # strace is not installed
+ return False
+ else:
+ raise
+
+ def feature_name(self):
+ return 'strace'
+
+
+strace_feature = _StraceFeature()
+
+
+class _AttribFeature(Feature):
+
+ def _probe(self):
+ if (sys.platform not in ('cygwin', 'win32')):
+ return False
+ try:
+ proc = subprocess.Popen(['attrib', '.'], stdout=subprocess.PIPE)
+ except OSError, e:
+ return False
+ return (0 == proc.wait())
+
+ def feature_name(self):
+ return 'attrib Windows command-line tool'
+
+
+AttribFeature = _AttribFeature()
+
+
+class Win32Feature(Feature):
+ """Feature testing whether we're running selftest on Windows
+ or Windows-like platform.
+ """
+
+ def _probe(self):
+ return sys.platform == 'win32'
+
+ def feature_name(self):
+ return "win32 platform"
+
+
+win32_feature = Win32Feature()
diff --git a/bzrlib/tests/file_utils.py b/bzrlib/tests/file_utils.py
new file mode 100644
index 0000000..ea6b870
--- /dev/null
+++ b/bzrlib/tests/file_utils.py
@@ -0,0 +1,47 @@
+# Copyright (C) 2008 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+from cStringIO import StringIO
+
+class FakeReadFile(object):
+ """A file-like object that can be given predefined content and read
+ like a file. The maximum size and number of the reads is recorded."""
+
+ def __init__(self, data):
+ """Initialize the mock file object with the provided data."""
+ self.data = StringIO(data)
+ self.max_read_size = None
+ self.read_count = 0
+
+ def read(self, size=-1):
+ """Reads size characters from the input (or the rest of the string if
+ size is -1)."""
+ data = self.data.read(size)
+ self.max_read_size = max(self.max_read_size, len(data))
+ self.read_count += 1
+ return data
+
+ def get_max_read_size(self):
+ """Returns the maximum read size or None if no reads have occured."""
+ return self.max_read_size
+
+ def get_read_count(self):
+ """Returns the number of calls to read."""
+ return self.read_count
+
+ def reset_read_count(self):
+ """Clears the read count."""
+ self.read_count = 0
diff --git a/bzrlib/tests/fixtures.py b/bzrlib/tests/fixtures.py
new file mode 100644
index 0000000..33591d9
--- /dev/null
+++ b/bzrlib/tests/fixtures.py
@@ -0,0 +1,160 @@
+# Copyright (C) 2010 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+
+"""Fixtures that can be used within tests.
+
+Fixtures can be created during a test as a way to separate out creation of
+objects to test. Fixture objects can hold some state so that different
+objects created during a test instance can be related. Normally a fixture
+should live only for the duration of a single test, and its tearDown method
+should be passed to `addCleanup` on the test.
+"""
+
+
+import itertools
+
+
+def generate_unicode_names():
+ """Generate a sequence of arbitrary unique unicode names.
+
+ By default they are not representable in ascii.
+
+ >>> gen = generate_unicode_names()
+ >>> n1 = gen.next()
+ >>> n2 = gen.next()
+ >>> type(n1)
+ <type 'unicode'>
+ >>> n1 == n2
+ False
+ >>> n1.encode('ascii', 'replace') == n1
+ False
+ """
+ # include a mathematical symbol unlikely to be in 8-bit encodings
+ return (u"\N{SINE WAVE}%d" % x for x in itertools.count())
+
+
+interesting_encodings = [
+ ('iso-8859-1', False),
+ ('ascii', False),
+ ('cp850', False),
+ ('utf-8', True),
+ ('ucs-2', True),
+ ]
+
+
+def generate_unicode_encodings(universal_encoding=None):
+ """Return a generator of unicode encoding names.
+
+ These can be passed to Python encode/decode/etc.
+
+ :param universal_encoding: True/False/None tristate to say whether the
+ generated encodings either can or cannot encode all unicode
+ strings.
+
+ >>> n1 = generate_unicode_names().next()
+ >>> enc = generate_unicode_encodings(universal_encoding=True).next()
+ >>> enc2 = generate_unicode_encodings(universal_encoding=False).next()
+ >>> n1.encode(enc).decode(enc) == n1
+ True
+ >>> try:
+ ... n1.encode(enc2).decode(enc2)
+ ... except UnicodeError:
+ ... print 'fail'
+ fail
+ """
+ # TODO: check they're supported on this platform?
+ if universal_encoding is not None:
+ e = [n for (n, u) in interesting_encodings if u == universal_encoding]
+ else:
+ e = [n for (n, u) in interesting_encodings]
+ return itertools.cycle(iter(e))
+
+
+class RecordingContextManager(object):
+ """A context manager that records."""
+
+ def __init__(self):
+ self._calls = []
+
+ def __enter__(self):
+ self._calls.append('__enter__')
+ return self # This is bound to the 'as' clause in a with statement.
+
+ def __exit__(self, exc_type, exc_val, exc_tb):
+ self._calls.append('__exit__')
+ return False # propogate exceptions.
+
+
+def build_branch_with_non_ancestral_rev(branch_builder):
+ """Builds a branch with a rev not in the ancestry of the tip.
+
+ This is the revision graph::
+
+ rev-2
+ |
+ rev-1
+ |
+ (null)
+
+ The branch tip is 'rev-1'. 'rev-2' is present in the branch's repository,
+ but is not part of rev-1's ancestry.
+
+ :param branch_builder: A BranchBuilder (e.g. from
+ TestCaseWithMemoryTransport.make_branch_builder).
+ :returns: the new branch
+ """
+ # Make a sequence of two commits
+ branch_builder.build_commit(message="Rev 1", rev_id='rev-1')
+ branch_builder.build_commit(message="Rev 2", rev_id='rev-2')
+ # Move the branch tip back to the first commit
+ source = branch_builder.get_branch()
+ source.set_last_revision_info(1, 'rev-1')
+ return source
+
+
+def make_branch_and_populated_tree(testcase):
+ """Make a simple branch and tree.
+
+ The tree holds some added but uncommitted files.
+ """
+ # TODO: Either accept or return the names of the files, so the caller
+ # doesn't need to be bound to the particular files created? -- mbp
+ # 20110705
+ tree = testcase.make_branch_and_tree('t')
+ testcase.build_tree_contents([('t/hello', 'hello world')])
+ tree.add(['hello'], ['hello-id'])
+ return tree
+
+
+class TimeoutFixture(object):
+ """Kill a test with sigalarm if it runs too long.
+
+ Only works on Unix at present.
+ """
+
+ def __init__(self, timeout_secs):
+ import signal
+ self.timeout_secs = timeout_secs
+ self.alarm_fn = getattr(signal, 'alarm', None)
+
+ def setUp(self):
+ if self.alarm_fn is not None:
+ self.alarm_fn(self.timeout_secs)
+
+ def cleanUp(self):
+ if self.alarm_fn is not None:
+ self.alarm_fn(0)
diff --git a/bzrlib/tests/ftp_server/__init__.py b/bzrlib/tests/ftp_server/__init__.py
new file mode 100644
index 0000000..dd8f37b
--- /dev/null
+++ b/bzrlib/tests/ftp_server/__init__.py
@@ -0,0 +1,100 @@
+# Copyright (C) 2009, 2010 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""
+Facilities to use ftp test servers.
+"""
+
+import sys
+
+from bzrlib import tests
+from bzrlib.tests import (
+ features,
+ )
+
+
+try:
+ from bzrlib.tests.ftp_server import medusa_based
+ # medusa is bogus starting with python2.6, since we don't support earlier
+ # pythons anymore, it's currently useless. There is hope though that the
+ # unicode bugs get fixed in the future so we leave it disabled until
+ # then. Keeping the framework in place means that only the following line
+ # will need to be changed. The last tests were conducted with medusa-2.0
+ # -- vila 20110607
+ medusa_available = False
+except ImportError:
+ medusa_available = False
+
+
+try:
+ from bzrlib.tests.ftp_server import pyftpdlib_based
+ if pyftpdlib_based.pyftplib_version >= (0, 7, 0):
+ pyftpdlib_available = True
+ else:
+ # 0.6.0 breaks SITE CHMOD
+ pyftpdlib_available = False
+except ImportError:
+ pyftpdlib_available = False
+
+
+class _FTPServerFeature(features.Feature):
+ """Some tests want an FTP Server, check if one is available.
+
+ Right now, the only way this is available is if one of the following is
+ installed:
+
+ - 'medusa': http://www.amk.ca/python/code/medusa.html
+ - 'pyftpdlib': http://code.google.com/p/pyftpdlib/
+ """
+
+ def _probe(self):
+ return medusa_available or pyftpdlib_available
+
+ def feature_name(self):
+ return 'FTPServer'
+
+
+FTPServerFeature = _FTPServerFeature()
+
+
+class UnavailableFTPTestServer(object):
+ """Dummy ftp test server.
+
+ This allows the test suite report the number of tests needing that
+ feature. We raise UnavailableFeature from methods before the test server is
+ being used. Doing so in the setUp method has bad side-effects (tearDown is
+ never called).
+ """
+
+ def start_server(self, vfs_server=None):
+ pass
+
+ def stop_server(self):
+ pass
+
+ def get_url(self):
+ raise tests.UnavailableFeature(FTPServerFeature)
+
+ def get_bogus_url(self):
+ raise tests.UnavailableFeature(FTPServerFeature)
+
+
+if medusa_available:
+ FTPTestServer = medusa_based.FTPTestServer
+elif pyftpdlib_available:
+ FTPTestServer = pyftpdlib_based.FTPTestServer
+else:
+ FTPTestServer = UnavailableFTPTestServer
diff --git a/bzrlib/tests/ftp_server/medusa_based.py b/bzrlib/tests/ftp_server/medusa_based.py
new file mode 100644
index 0000000..022a9a5
--- /dev/null
+++ b/bzrlib/tests/ftp_server/medusa_based.py
@@ -0,0 +1,294 @@
+# Copyright (C) 2007-2010 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+"""
+FTP test server.
+
+Based on medusa: http://www.amk.ca/python/code/medusa.html
+"""
+
+import asyncore
+import errno
+import os
+import select
+import stat
+import threading
+
+import medusa
+import medusa.filesys
+import medusa.ftp_server
+
+from bzrlib import (
+ tests,
+ trace,
+ )
+from bzrlib.tests import test_server
+
+
+class test_filesystem(medusa.filesys.os_filesystem):
+ """A custom filesystem wrapper to add missing functionalities."""
+
+ def chmod(self, path, mode):
+ p = self.normalize(self.path_module.join (self.wd, path))
+ return os.chmod(self.translate(p), mode)
+
+
+class test_authorizer(object):
+ """A custom Authorizer object for running the test suite.
+
+ The reason we cannot use dummy_authorizer, is because it sets the
+ channel to readonly, which we don't always want to do.
+ """
+
+ def __init__(self, root):
+ self.root = root
+ # If secured_user is set secured_password will be checked
+ self.secured_user = None
+ self.secured_password = None
+
+ def authorize(self, channel, username, password):
+ """Return (success, reply_string, filesystem)"""
+ channel.persona = -1, -1
+ if username == 'anonymous':
+ channel.read_only = 1
+ else:
+ channel.read_only = 0
+
+ # Check secured_user if set
+ if (self.secured_user is not None
+ and username == self.secured_user
+ and password != self.secured_password):
+ return 0, 'Password invalid.', None
+ else:
+ return 1, 'OK.', test_filesystem(self.root)
+
+
+class ftp_channel(medusa.ftp_server.ftp_channel):
+ """Customized ftp channel"""
+
+ def log(self, message):
+ """Redirect logging requests."""
+ trace.mutter('ftp_channel: %s', message)
+
+ def log_info(self, message, type='info'):
+ """Redirect logging requests."""
+ trace.mutter('ftp_channel %s: %s', type, message)
+
+ def cmd_rnfr(self, line):
+ """Prepare for renaming a file."""
+ self._renaming = line[1]
+ self.respond('350 Ready for RNTO')
+ # TODO: jam 20060516 in testing, the ftp server seems to
+ # check that the file already exists, or it sends
+ # 550 RNFR command failed
+
+ def cmd_rnto(self, line):
+ """Rename a file based on the target given.
+
+ rnto must be called after calling rnfr.
+ """
+ if not self._renaming:
+ self.respond('503 RNFR required first.')
+ pfrom = self.filesystem.translate(self._renaming)
+ self._renaming = None
+ pto = self.filesystem.translate(line[1])
+ if os.path.exists(pto):
+ self.respond('550 RNTO failed: file exists')
+ return
+ try:
+ os.rename(pfrom, pto)
+ except (IOError, OSError), e:
+ # TODO: jam 20060516 return custom responses based on
+ # why the command failed
+ # (bialix 20070418) str(e) on Python 2.5 @ Windows
+ # sometimes don't provide expected error message;
+ # so we obtain such message via os.strerror()
+ self.respond('550 RNTO failed: %s' % os.strerror(e.errno))
+ except:
+ self.respond('550 RNTO failed')
+ # For a test server, we will go ahead and just die
+ raise
+ else:
+ self.respond('250 Rename successful.')
+
+ def cmd_size(self, line):
+ """Return the size of a file
+
+ This is overloaded to help the test suite determine if the
+ target is a directory.
+ """
+ filename = line[1]
+ if not self.filesystem.isfile(filename):
+ if self.filesystem.isdir(filename):
+ self.respond('550 "%s" is a directory' % (filename,))
+ else:
+ self.respond('550 "%s" is not a file' % (filename,))
+ else:
+ self.respond('213 %d'
+ % (self.filesystem.stat(filename)[stat.ST_SIZE]),)
+
+ def cmd_mkd(self, line):
+ """Create a directory.
+
+ Overloaded because default implementation does not distinguish
+ *why* it cannot make a directory.
+ """
+ if len (line) != 2:
+ self.command_not_understood(''.join(line))
+ else:
+ path = line[1]
+ try:
+ self.filesystem.mkdir (path)
+ self.respond ('257 MKD command successful.')
+ except (IOError, OSError), e:
+ # (bialix 20070418) str(e) on Python 2.5 @ Windows
+ # sometimes don't provide expected error message;
+ # so we obtain such message via os.strerror()
+ self.respond ('550 error creating directory: %s' %
+ os.strerror(e.errno))
+ except:
+ self.respond ('550 error creating directory.')
+
+ def cmd_site(self, line):
+ """Site specific commands."""
+ command, args = line[1].split(' ', 1)
+ if command.lower() == 'chmod':
+ try:
+ mode, path = args.split()
+ mode = int(mode, 8)
+ except ValueError:
+ # We catch both malformed line and malformed mode with the same
+ # ValueError.
+ self.command_not_understood(' '.join(line))
+ return
+ try:
+ # Yes path and mode are reversed
+ self.filesystem.chmod(path, mode)
+ self.respond('200 SITE CHMOD command successful')
+ except AttributeError:
+ # The chmod method is not available in read-only and will raise
+ # AttributeError since a different filesystem is used in that
+ # case
+ self.command_not_authorized(' '.join(line))
+ else:
+ # Another site specific command was requested. We don't know that
+ # one
+ self.command_not_understood(' '.join(line))
+
+
+class ftp_server(medusa.ftp_server.ftp_server):
+ """Customize the behavior of the Medusa ftp_server.
+
+ There are a few warts on the ftp_server, based on how it expects
+ to be used.
+ """
+ _renaming = None
+ ftp_channel_class = ftp_channel
+
+ def __init__(self, *args, **kwargs):
+ trace.mutter('Initializing ftp_server: %r, %r', args, kwargs)
+ medusa.ftp_server.ftp_server.__init__(self, *args, **kwargs)
+
+ def log(self, message):
+ """Redirect logging requests."""
+ trace.mutter('ftp_server: %s', message)
+
+ def log_info(self, message, type='info'):
+ """Override the asyncore.log_info so we don't stipple the screen."""
+ trace.mutter('ftp_server %s: %s', type, message)
+
+
+class FTPTestServer(test_server.TestServer):
+ """Common code for FTP server facilities."""
+
+ no_unicode_support = True
+
+ def __init__(self):
+ self._root = None
+ self._ftp_server = None
+ self._port = None
+ self._async_thread = None
+ # ftp server logs
+ self.logs = []
+
+ def get_url(self):
+ """Calculate an ftp url to this server."""
+ return 'ftp://foo:bar@localhost:%d/' % (self._port)
+
+ def get_bogus_url(self):
+ """Return a URL which cannot be connected to."""
+ return 'ftp://127.0.0.1:1'
+
+ def log(self, message):
+ """This is used by medusa.ftp_server to log connections, etc."""
+ self.logs.append(message)
+
+ def start_server(self, vfs_server=None):
+ if not (vfs_server is None or isinstance(vfs_server,
+ test_server.LocalURLServer)):
+ raise AssertionError(
+ "FTPServer currently assumes local transport, got %s" % vfs_server)
+ self._root = os.getcwdu()
+ self._ftp_server = ftp_server(
+ authorizer=test_authorizer(root=self._root),
+ ip='localhost',
+ port=0, # bind to a random port
+ resolver=None,
+ logger_object=self # Use FTPServer.log() for messages
+ )
+ self._port = self._ftp_server.getsockname()[1]
+ # Don't let it loop forever, or handle an infinite number of requests.
+ # In this case it will run for 1000s, or 10000 requests
+ self._async_thread = threading.Thread(
+ target=FTPTestServer._asyncore_loop_ignore_EBADF,
+ kwargs={'timeout':0.1, 'count':10000})
+ if 'threads' in tests.selftest_debug_flags:
+ sys.stderr.write('Thread started: %s\n'
+ % (self._async_thread.ident,))
+ self._async_thread.setDaemon(True)
+ self._async_thread.start()
+
+ def stop_server(self):
+ self._ftp_server.close()
+ asyncore.close_all()
+ self._async_thread.join()
+ if 'threads' in tests.selftest_debug_flags:
+ sys.stderr.write('Thread joined: %s\n'
+ % (self._async_thread.ident,))
+
+ @staticmethod
+ def _asyncore_loop_ignore_EBADF(*args, **kwargs):
+ """Ignore EBADF during server shutdown.
+
+ We close the socket to get the server to shutdown, but this causes
+ select.select() to raise EBADF.
+ """
+ try:
+ asyncore.loop(*args, **kwargs)
+ # FIXME: If we reach that point, we should raise an exception
+ # explaining that the 'count' parameter in setUp is too low or
+ # testers may wonder why their test just sits there waiting for a
+ # server that is already dead. Note that if the tester waits too
+ # long under pdb the server will also die.
+ except select.error, e:
+ if e.args[0] != errno.EBADF:
+ raise
+
+ def add_user(self, user, password):
+ """Add a user with write access."""
+ authorizer = server = self._ftp_server.authorizer
+ authorizer.secured_user = user
+ authorizer.secured_password = password
+
diff --git a/bzrlib/tests/ftp_server/pyftpdlib_based.py b/bzrlib/tests/ftp_server/pyftpdlib_based.py
new file mode 100644
index 0000000..70ee296
--- /dev/null
+++ b/bzrlib/tests/ftp_server/pyftpdlib_based.py
@@ -0,0 +1,208 @@
+# Copyright (C) 2009, 2010 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+"""
+FTP test server.
+
+Based on pyftpdlib: http://code.google.com/p/pyftpdlib/
+"""
+
+import errno
+import os
+from pyftpdlib import ftpserver
+import select
+import threading
+
+
+from bzrlib import (
+ osutils,
+ tests,
+ trace,
+ )
+from bzrlib.tests import test_server
+
+
+# Convert the pyftplib string version into a tuple to avoid traps in string
+# comparison.
+pyftplib_version = tuple(map(int, ftpserver.__ver__.split('.')))
+
+
+class AnonymousWithWriteAccessAuthorizer(ftpserver.DummyAuthorizer):
+
+ def _check_permissions(self, username, perm):
+ # Like base implementation but don't warn about write permissions
+ # assigned to anonymous, since that's exactly our purpose.
+ for p in perm:
+ if p not in self.read_perms + self.write_perms:
+ raise ftpserver.AuthorizerError('No such permission "%s"' %p)
+
+
+class BzrConformingFS(ftpserver.AbstractedFS):
+
+ def chmod(self, path, mode):
+ return os.chmod(path, mode)
+
+ def listdir(self, path):
+ """List the content of a directory."""
+ return [osutils.safe_utf8(s) for s in os.listdir(path)]
+
+ def fs2ftp(self, fspath):
+ p = ftpserver.AbstractedFS.fs2ftp(self, osutils.safe_unicode(fspath))
+ return osutils.safe_utf8(p)
+
+ def ftp2fs(self, ftppath):
+ p = osutils.safe_unicode(ftppath)
+ return ftpserver.AbstractedFS.ftp2fs(self, p)
+
+class BzrConformingFTPHandler(ftpserver.FTPHandler):
+
+ abstracted_fs = BzrConformingFS
+
+ def __init__(self, conn, server):
+ ftpserver.FTPHandler.__init__(self, conn, server)
+ self.authorizer = server.authorizer
+
+ def ftp_SIZE(self, path):
+ # bzr is overly picky here, but we want to make the test suite pass
+ # first. This may need to be revisited -- vila 20090226
+ line = self.fs.fs2ftp(path)
+ if self.fs.isdir(self.fs.realpath(path)):
+ why = "%s is a directory" % line
+ self.log('FAIL SIZE "%s". %s.' % (line, why))
+ self.respond("550 %s." %why)
+ else:
+ ftpserver.FTPHandler.ftp_SIZE(self, path)
+
+ def ftp_NLST(self, path):
+ # bzr is overly picky here, but we want to make the test suite pass
+ # first. This may need to be revisited -- vila 20090226
+ line = self.fs.fs2ftp(path)
+ if self.fs.isfile(self.fs.realpath(path)):
+ why = "Not a directory: %s" % line
+ self.log('FAIL NLST "%s". %s.' % (line, why))
+ self.respond("550 %s." %why)
+ else:
+ ftpserver.FTPHandler.ftp_NLST(self, path)
+
+ def log_cmd(self, cmd, arg, respcode, respstr):
+ # base class version choke on unicode, the alternative is to just
+ # provide an empty implementation and relies on the client to do
+ # the logging for debugging purposes. Not worth the trouble so far
+ # -- vila 20110607
+ if cmd in ("DELE", "RMD", "RNFR", "RNTO", "MKD"):
+ line = '"%s" %s' % (' '.join([cmd, unicode(arg)]).strip(), respcode)
+ self.log(line)
+
+
+# An empty password is valid, hence the arg is neither mandatory nor forbidden
+ftpserver.proto_cmds['PASS']['arg'] = None
+
+class ftp_server(ftpserver.FTPServer):
+
+ def __init__(self, address, handler, authorizer):
+ ftpserver.FTPServer.__init__(self, address, handler)
+ self.authorizer = authorizer
+ # Worth backporting upstream ?
+ self.addr = self.socket.getsockname()
+
+
+class FTPTestServer(test_server.TestServer):
+ """Common code for FTP server facilities."""
+
+ def __init__(self):
+ self._root = None
+ self._ftp_server = None
+ self._port = None
+ self._async_thread = None
+ # ftp server logs
+ self.logs = []
+ self._ftpd_running = False
+
+ def get_url(self):
+ """Calculate an ftp url to this server."""
+ return 'ftp://anonymous@localhost:%d/' % (self._port)
+
+ def get_bogus_url(self):
+ """Return a URL which cannot be connected to."""
+ return 'ftp://127.0.0.1:1/'
+
+ def log(self, message):
+ """This is used by ftp_server to log connections, etc."""
+ self.logs.append(message)
+
+ def start_server(self, vfs_server=None):
+ if not (vfs_server is None or isinstance(vfs_server,
+ test_server.LocalURLServer)):
+ raise AssertionError(
+ "FTPServer currently assumes local transport, got %s"
+ % vfs_server)
+ self._root = os.getcwdu()
+
+ address = ('localhost', 0) # bind to a random port
+ authorizer = AnonymousWithWriteAccessAuthorizer()
+ authorizer.add_anonymous(self._root, perm='elradfmwM')
+ self._ftp_server = ftp_server(address, BzrConformingFTPHandler,
+ authorizer)
+ # This is hacky as hell, will not work if we need two servers working
+ # at the same time, but that's the best we can do so far...
+ # FIXME: At least log and logline could be overriden in the handler ?
+ # -- vila 20090227
+ ftpserver.log = self.log
+ ftpserver.logline = self.log
+ ftpserver.logerror = self.log
+
+ self._port = self._ftp_server.socket.getsockname()[1]
+ self._ftpd_starting = threading.Lock()
+ self._ftpd_starting.acquire() # So it can be released by the server
+ self._ftpd_thread = threading.Thread(target=self._run_server,)
+ self._ftpd_thread.start()
+ if 'threads' in tests.selftest_debug_flags:
+ sys.stderr.write('Thread started: %s\n'
+ % (self._ftpd_thread.ident,))
+ # Wait for the server thread to start (i.e release the lock)
+ self._ftpd_starting.acquire()
+ self._ftpd_starting.release()
+
+ def stop_server(self):
+ """See bzrlib.transport.Server.stop_server."""
+ # Tell the server to stop, but also close the server socket for tests
+ # that start the server but never initiate a connection. Closing the
+ # socket should be done first though, to avoid further connections.
+ self._ftp_server.close()
+ self._ftpd_running = False
+ self._ftpd_thread.join()
+ if 'threads' in tests.selftest_debug_flags:
+ sys.stderr.write('Thread joined: %s\n'
+ % (self._ftpd_thread.ident,))
+
+ def _run_server(self):
+ """Run the server until stop_server is called.
+
+ Shut it down properly then.
+ """
+ self._ftpd_running = True
+ self._ftpd_starting.release()
+ while self._ftpd_running:
+ try:
+ self._ftp_server.serve_forever(timeout=0.1, count=1)
+ except select.error, e:
+ if e.args[0] != errno.EBADF:
+ raise
+ self._ftp_server.close_all(ignore_all=True)
+
+ def add_user(self, user, password):
+ """Add a user with write access."""
+ self._ftp_server.authorizer.add_user(user, password, self._root,
+ perm='elradfmwM')
diff --git a/bzrlib/tests/http_server.py b/bzrlib/tests/http_server.py
new file mode 100644
index 0000000..eaaa5e8
--- /dev/null
+++ b/bzrlib/tests/http_server.py
@@ -0,0 +1,516 @@
+# Copyright (C) 2006-2011 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+import errno
+import httplib
+import os
+import posixpath
+import random
+import re
+import SimpleHTTPServer
+import socket
+import urlparse
+
+from bzrlib import urlutils
+from bzrlib.tests import test_server
+
+
+class BadWebserverPath(ValueError):
+ def __str__(self):
+ return 'path %s is not in %s' % self.args
+
+
+class TestingHTTPRequestHandler(SimpleHTTPServer.SimpleHTTPRequestHandler):
+ """Handles one request.
+
+ A TestingHTTPRequestHandler is instantiated for every request received by
+ the associated server. Note that 'request' here is inherited from the base
+ TCPServer class, for the HTTP server it is really a connection which itself
+ will handle one or several HTTP requests.
+ """
+ # Default protocol version
+ protocol_version = 'HTTP/1.1'
+
+ # The Message-like class used to parse the request headers
+ MessageClass = httplib.HTTPMessage
+
+ def setup(self):
+ SimpleHTTPServer.SimpleHTTPRequestHandler.setup(self)
+ self._cwd = self.server._home_dir
+ tcs = self.server.test_case_server
+ if tcs.protocol_version is not None:
+ # If the test server forced a protocol version, use it
+ self.protocol_version = tcs.protocol_version
+
+ def log_message(self, format, *args):
+ tcs = self.server.test_case_server
+ tcs.log('webserver - %s - - [%s] %s "%s" "%s"',
+ self.address_string(),
+ self.log_date_time_string(),
+ format % args,
+ self.headers.get('referer', '-'),
+ self.headers.get('user-agent', '-'))
+
+ def handle(self):
+ SimpleHTTPServer.SimpleHTTPRequestHandler.handle(self)
+ # Some client (pycurl, I'm looking at you) are more picky than others
+ # and require that the socket itself is closed
+ # (SocketServer.StreamRequestHandler only close the two associated
+ # 'makefile' objects)
+ self.connection.close()
+
+ def handle_one_request(self):
+ """Handle a single HTTP request.
+
+ We catch all socket errors occurring when the client close the
+ connection early to avoid polluting the test results.
+ """
+ try:
+ self._handle_one_request()
+ except socket.error, e:
+ # Any socket error should close the connection, but some errors are
+ # due to the client closing early and we don't want to pollute test
+ # results, so we raise only the others.
+ self.close_connection = 1
+ if (len(e.args) == 0
+ or e.args[0] not in (errno.EPIPE, errno.ECONNRESET,
+ errno.ECONNABORTED, errno.EBADF)):
+ raise
+
+ error_content_type = 'text/plain'
+ error_message_format = '''\
+Error code: %(code)s.
+Message: %(message)s.
+'''
+
+ def send_error(self, code, message=None):
+ """Send and log an error reply.
+
+ We redefine the python-provided version to be able to set a
+ ``Content-Length`` header as some http/1.1 clients complain otherwise
+ (see bug #568421).
+
+ :param code: The HTTP error code.
+
+ :param message: The explanation of the error code, Defaults to a short
+ entry.
+ """
+
+ if message is None:
+ try:
+ message = self.responses[code][0]
+ except KeyError:
+ message = '???'
+ self.log_error("code %d, message %s", code, message)
+ content = (self.error_message_format %
+ {'code': code, 'message': message})
+ self.send_response(code, message)
+ self.send_header("Content-Type", self.error_content_type)
+ self.send_header("Content-Length", "%d" % len(content))
+ self.send_header('Connection', 'close')
+ self.end_headers()
+ if self.command != 'HEAD' and code >= 200 and code not in (204, 304):
+ self.wfile.write(content)
+
+ def _handle_one_request(self):
+ SimpleHTTPServer.SimpleHTTPRequestHandler.handle_one_request(self)
+
+ _range_regexp = re.compile(r'^(?P<start>\d+)-(?P<end>\d+)?$')
+ _tail_regexp = re.compile(r'^-(?P<tail>\d+)$')
+
+ def _parse_ranges(self, ranges_header, file_size):
+ """Parse the range header value and returns ranges.
+
+ RFC2616 14.35 says that syntactically invalid range specifiers MUST be
+ ignored. In that case, we return None instead of a range list.
+
+ :param ranges_header: The 'Range' header value.
+
+ :param file_size: The size of the requested file.
+
+ :return: A list of (start, end) tuples or None if some invalid range
+ specifier is encountered.
+ """
+ if not ranges_header.startswith('bytes='):
+ # Syntactically invalid header
+ return None
+
+ tail = None
+ ranges = []
+ ranges_header = ranges_header[len('bytes='):]
+ for range_str in ranges_header.split(','):
+ range_match = self._range_regexp.match(range_str)
+ if range_match is not None:
+ start = int(range_match.group('start'))
+ end_match = range_match.group('end')
+ if end_match is None:
+ # RFC2616 says end is optional and default to file_size
+ end = file_size
+ else:
+ end = int(end_match)
+ if start > end:
+ # Syntactically invalid range
+ return None
+ ranges.append((start, end))
+ else:
+ tail_match = self._tail_regexp.match(range_str)
+ if tail_match is not None:
+ tail = int(tail_match.group('tail'))
+ else:
+ # Syntactically invalid range
+ return None
+ if tail is not None:
+ # Normalize tail into ranges
+ ranges.append((max(0, file_size - tail), file_size))
+
+ checked_ranges = []
+ for start, end in ranges:
+ if start >= file_size:
+ # RFC2616 14.35, ranges are invalid if start >= file_size
+ return None
+ # RFC2616 14.35, end values should be truncated
+ # to file_size -1 if they exceed it
+ end = min(end, file_size - 1)
+ checked_ranges.append((start, end))
+ return checked_ranges
+
+ def _header_line_length(self, keyword, value):
+ header_line = '%s: %s\r\n' % (keyword, value)
+ return len(header_line)
+
+ def send_head(self):
+ """Overrides base implementation to work around a bug in python2.5."""
+ path = self.translate_path(self.path)
+ if os.path.isdir(path) and not self.path.endswith('/'):
+ # redirect browser - doing basically what apache does when
+ # DirectorySlash option is On which is quite common (braindead, but
+ # common)
+ self.send_response(301)
+ self.send_header("Location", self.path + "/")
+ # Indicates that the body is empty for HTTP/1.1 clients
+ self.send_header('Content-Length', '0')
+ self.end_headers()
+ return None
+
+ return SimpleHTTPServer.SimpleHTTPRequestHandler.send_head(self)
+
+ def send_range_content(self, file, start, length):
+ file.seek(start)
+ self.wfile.write(file.read(length))
+
+ def get_single_range(self, file, file_size, start, end):
+ self.send_response(206)
+ length = end - start + 1
+ self.send_header('Accept-Ranges', 'bytes')
+ self.send_header("Content-Length", "%d" % length)
+
+ self.send_header("Content-Type", 'application/octet-stream')
+ self.send_header("Content-Range", "bytes %d-%d/%d" % (start,
+ end,
+ file_size))
+ self.end_headers()
+ self.send_range_content(file, start, length)
+
+ def get_multiple_ranges(self, file, file_size, ranges):
+ self.send_response(206)
+ self.send_header('Accept-Ranges', 'bytes')
+ boundary = '%d' % random.randint(0,0x7FFFFFFF)
+ self.send_header('Content-Type',
+ 'multipart/byteranges; boundary=%s' % boundary)
+ boundary_line = '--%s\r\n' % boundary
+ # Calculate the Content-Length
+ content_length = 0
+ for (start, end) in ranges:
+ content_length += len(boundary_line)
+ content_length += self._header_line_length(
+ 'Content-type', 'application/octet-stream')
+ content_length += self._header_line_length(
+ 'Content-Range', 'bytes %d-%d/%d' % (start, end, file_size))
+ content_length += len('\r\n') # end headers
+ content_length += end - start + 1
+ content_length += len(boundary_line)
+ self.send_header('Content-length', content_length)
+ self.end_headers()
+
+ # Send the multipart body
+ for (start, end) in ranges:
+ self.wfile.write(boundary_line)
+ self.send_header('Content-type', 'application/octet-stream')
+ self.send_header('Content-Range', 'bytes %d-%d/%d'
+ % (start, end, file_size))
+ self.end_headers()
+ self.send_range_content(file, start, end - start + 1)
+ # Final boundary
+ self.wfile.write(boundary_line)
+
+ def do_GET(self):
+ """Serve a GET request.
+
+ Handles the Range header.
+ """
+ # Update statistics
+ self.server.test_case_server.GET_request_nb += 1
+
+ path = self.translate_path(self.path)
+ ranges_header_value = self.headers.get('Range')
+ if ranges_header_value is None or os.path.isdir(path):
+ # Let the mother class handle most cases
+ return SimpleHTTPServer.SimpleHTTPRequestHandler.do_GET(self)
+
+ try:
+ # Always read in binary mode. Opening files in text
+ # mode may cause newline translations, making the
+ # actual size of the content transmitted *less* than
+ # the content-length!
+ f = open(path, 'rb')
+ except IOError:
+ self.send_error(404, "File not found")
+ return
+
+ file_size = os.fstat(f.fileno())[6]
+ ranges = self._parse_ranges(ranges_header_value, file_size)
+ if not ranges:
+ # RFC2616 14.16 and 14.35 says that when a server
+ # encounters unsatisfiable range specifiers, it
+ # SHOULD return a 416.
+ f.close()
+ # FIXME: We SHOULD send a Content-Range header too,
+ # but the implementation of send_error does not
+ # allows that. So far.
+ self.send_error(416, "Requested range not satisfiable")
+ return
+
+ if len(ranges) == 1:
+ (start, end) = ranges[0]
+ self.get_single_range(f, file_size, start, end)
+ else:
+ self.get_multiple_ranges(f, file_size, ranges)
+ f.close()
+
+ def translate_path(self, path):
+ """Translate a /-separated PATH to the local filename syntax.
+
+ If the server requires it, proxy the path before the usual translation
+ """
+ if self.server.test_case_server.proxy_requests:
+ # We need to act as a proxy and accept absolute urls,
+ # which SimpleHTTPRequestHandler (parent) is not
+ # ready for. So we just drop the protocol://host:port
+ # part in front of the request-url (because we know
+ # we would not forward the request to *another*
+ # proxy).
+
+ # So we do what SimpleHTTPRequestHandler.translate_path
+ # do beginning with python 2.4.3: abandon query
+ # parameters, scheme, host port, etc (which ensure we
+ # provide the right behaviour on all python versions).
+ path = urlparse.urlparse(path)[2]
+ # And now, we can apply *our* trick to proxy files
+ path += '-proxied'
+
+ return self._translate_path(path)
+
+ def _translate_path(self, path):
+ """Translate a /-separated PATH to the local filename syntax.
+
+ Note that we're translating http URLs here, not file URLs.
+ The URL root location is the server's startup directory.
+ Components that mean special things to the local file system
+ (e.g. drive or directory names) are ignored. (XXX They should
+ probably be diagnosed.)
+
+ Override from python standard library to stop it calling os.getcwd()
+ """
+ # abandon query parameters
+ path = urlparse.urlparse(path)[2]
+ path = posixpath.normpath(urlutils.unquote(path))
+ path = path.decode('utf-8')
+ words = path.split('/')
+ words = filter(None, words)
+ path = self._cwd
+ for num, word in enumerate(words):
+ if num == 0:
+ drive, word = os.path.splitdrive(word)
+ head, word = os.path.split(word)
+ if word in (os.curdir, os.pardir): continue
+ path = os.path.join(path, word)
+ return path
+
+
+class TestingHTTPServerMixin:
+
+ def __init__(self, test_case_server):
+ # test_case_server can be used to communicate between the
+ # tests and the server (or the request handler and the
+ # server), allowing dynamic behaviors to be defined from
+ # the tests cases.
+ self.test_case_server = test_case_server
+ self._home_dir = test_case_server._home_dir
+
+
+class TestingHTTPServer(test_server.TestingTCPServer, TestingHTTPServerMixin):
+
+ def __init__(self, server_address, request_handler_class,
+ test_case_server):
+ test_server.TestingTCPServer.__init__(self, server_address,
+ request_handler_class)
+ TestingHTTPServerMixin.__init__(self, test_case_server)
+
+
+class TestingThreadingHTTPServer(test_server.TestingThreadingTCPServer,
+ TestingHTTPServerMixin):
+ """A threading HTTP test server for HTTP 1.1.
+
+ Since tests can initiate several concurrent connections to the same http
+ server, we need an independent connection for each of them. We achieve that
+ by spawning a new thread for each connection.
+ """
+ def __init__(self, server_address, request_handler_class,
+ test_case_server):
+ test_server.TestingThreadingTCPServer.__init__(self, server_address,
+ request_handler_class)
+ TestingHTTPServerMixin.__init__(self, test_case_server)
+
+
+class HttpServer(test_server.TestingTCPServerInAThread):
+ """A test server for http transports.
+
+ Subclasses can provide a specific request handler.
+ """
+
+ # The real servers depending on the protocol
+ http_server_class = {'HTTP/1.0': TestingHTTPServer,
+ 'HTTP/1.1': TestingThreadingHTTPServer,
+ }
+
+ # Whether or not we proxy the requests (see
+ # TestingHTTPRequestHandler.translate_path).
+ proxy_requests = False
+
+ # used to form the url that connects to this server
+ _url_protocol = 'http'
+
+ def __init__(self, request_handler=TestingHTTPRequestHandler,
+ protocol_version=None):
+ """Constructor.
+
+ :param request_handler: a class that will be instantiated to handle an
+ http connection (one or several requests).
+
+ :param protocol_version: if specified, will override the protocol
+ version of the request handler.
+ """
+ # Depending on the protocol version, we will create the approriate
+ # server
+ if protocol_version is None:
+ # Use the request handler one
+ proto_vers = request_handler.protocol_version
+ else:
+ # Use our own, it will be used to override the request handler
+ # one too.
+ proto_vers = protocol_version
+ # Get the appropriate server class for the required protocol
+ serv_cls = self.http_server_class.get(proto_vers, None)
+ if serv_cls is None:
+ raise httplib.UnknownProtocol(proto_vers)
+ self.host = 'localhost'
+ self.port = 0
+ super(HttpServer, self).__init__((self.host, self.port),
+ serv_cls,
+ request_handler)
+ self.protocol_version = proto_vers
+ # Allows tests to verify number of GET requests issued
+ self.GET_request_nb = 0
+ self._http_base_url = None
+ self.logs = []
+
+ def create_server(self):
+ return self.server_class(
+ (self.host, self.port), self.request_handler_class, self)
+
+ def _get_remote_url(self, path):
+ path_parts = path.split(os.path.sep)
+ if os.path.isabs(path):
+ if path_parts[:len(self._local_path_parts)] != \
+ self._local_path_parts:
+ raise BadWebserverPath(path, self.test_dir)
+ remote_path = '/'.join(path_parts[len(self._local_path_parts):])
+ else:
+ remote_path = '/'.join(path_parts)
+
+ return self._http_base_url + remote_path
+
+ def log(self, format, *args):
+ """Capture Server log output."""
+ self.logs.append(format % args)
+
+ def start_server(self, backing_transport_server=None):
+ """See bzrlib.transport.Server.start_server.
+
+ :param backing_transport_server: The transport that requests over this
+ protocol should be forwarded to. Note that this is currently not
+ supported for HTTP.
+ """
+ # XXX: TODO: make the server back onto vfs_server rather than local
+ # disk.
+ if not (backing_transport_server is None
+ or isinstance(backing_transport_server,
+ test_server.LocalURLServer)):
+ raise AssertionError(
+ "HTTPServer currently assumes local transport, got %s" %
+ backing_transport_server)
+ self._home_dir = os.getcwdu()
+ self._local_path_parts = self._home_dir.split(os.path.sep)
+ self.logs = []
+
+ super(HttpServer, self).start_server()
+ self._http_base_url = '%s://%s:%s/' % (
+ self._url_protocol, self.host, self.port)
+
+ def get_url(self):
+ """See bzrlib.transport.Server.get_url."""
+ return self._get_remote_url(self._home_dir)
+
+ def get_bogus_url(self):
+ """See bzrlib.transport.Server.get_bogus_url."""
+ # this is chosen to try to prevent trouble with proxies, weird dns,
+ # etc
+ return self._url_protocol + '://127.0.0.1:1/'
+
+
+class HttpServer_urllib(HttpServer):
+ """Subclass of HttpServer that gives http+urllib urls.
+
+ This is for use in testing: connections to this server will always go
+ through urllib where possible.
+ """
+
+ # urls returned by this server should require the urllib client impl
+ _url_protocol = 'http+urllib'
+
+
+class HttpServer_PyCurl(HttpServer):
+ """Subclass of HttpServer that gives http+pycurl urls.
+
+ This is for use in testing: connections to this server will always go
+ through pycurl where possible.
+ """
+
+ # We don't care about checking the pycurl availability as
+ # this server will be required only when pycurl is present
+
+ # urls returned by this server should require the pycurl client impl
+ _url_protocol = 'http+pycurl'
diff --git a/bzrlib/tests/http_utils.py b/bzrlib/tests/http_utils.py
new file mode 100644
index 0000000..bb96b81
--- /dev/null
+++ b/bzrlib/tests/http_utils.py
@@ -0,0 +1,557 @@
+# Copyright (C) 2005-2011 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+from cStringIO import StringIO
+import re
+import urllib2
+
+
+from bzrlib import (
+ errors,
+ osutils,
+ tests,
+ transport,
+ )
+from bzrlib.smart import (
+ medium,
+ )
+from bzrlib.tests import http_server
+from bzrlib.transport import chroot
+
+
+class HTTPServerWithSmarts(http_server.HttpServer):
+ """HTTPServerWithSmarts extends the HttpServer with POST methods that will
+ trigger a smart server to execute with a transport rooted at the rootdir of
+ the HTTP server.
+ """
+
+ def __init__(self, protocol_version=None):
+ http_server.HttpServer.__init__(self, SmartRequestHandler,
+ protocol_version=protocol_version)
+
+
+class SmartRequestHandler(http_server.TestingHTTPRequestHandler):
+ """Extend TestingHTTPRequestHandler to support smart client POSTs.
+
+ XXX: This duplicates a fair bit of the logic in bzrlib.transport.http.wsgi.
+ """
+
+ def do_POST(self):
+ """Hand the request off to a smart server instance."""
+ backing = transport.get_transport_from_path(
+ self.server.test_case_server._home_dir)
+ chroot_server = chroot.ChrootServer(backing)
+ chroot_server.start_server()
+ try:
+ t = transport.get_transport_from_url(chroot_server.get_url())
+ self.do_POST_inner(t)
+ finally:
+ chroot_server.stop_server()
+
+ def do_POST_inner(self, chrooted_transport):
+ self.send_response(200)
+ self.send_header("Content-type", "application/octet-stream")
+ if not self.path.endswith('.bzr/smart'):
+ raise AssertionError(
+ 'POST to path not ending in .bzr/smart: %r' % (self.path,))
+ t = chrooted_transport.clone(self.path[:-len('.bzr/smart')])
+ # if this fails, we should return 400 bad request, but failure is
+ # failure for now - RBC 20060919
+ data_length = int(self.headers['Content-Length'])
+ # TODO: We might like to support streaming responses. 1.0 allows no
+ # Content-length in this case, so for integrity we should perform our
+ # own chunking within the stream.
+ # 1.1 allows chunked responses, and in this case we could chunk using
+ # the HTTP chunking as this will allow HTTP persistence safely, even if
+ # we have to stop early due to error, but we would also have to use the
+ # HTTP trailer facility which may not be widely available.
+ request_bytes = self.rfile.read(data_length)
+ protocol_factory, unused_bytes = medium._get_protocol_factory_for_bytes(
+ request_bytes)
+ out_buffer = StringIO()
+ smart_protocol_request = protocol_factory(t, out_buffer.write, '/')
+ # Perhaps there should be a SmartServerHTTPMedium that takes care of
+ # feeding the bytes in the http request to the smart_protocol_request,
+ # but for now it's simpler to just feed the bytes directly.
+ smart_protocol_request.accept_bytes(unused_bytes)
+ if not (smart_protocol_request.next_read_size() == 0):
+ raise errors.SmartProtocolError(
+ "not finished reading, but all data sent to protocol.")
+ self.send_header("Content-Length", str(len(out_buffer.getvalue())))
+ self.end_headers()
+ self.wfile.write(out_buffer.getvalue())
+
+
+class TestCaseWithWebserver(tests.TestCaseWithTransport):
+ """A support class that provides readonly urls that are http://.
+
+ This is done by forcing the readonly server to be an http
+ one. This will currently fail if the primary transport is not
+ backed by regular disk files.
+ """
+
+ # These attributes can be overriden or parametrized by daughter clasess if
+ # needed, but must exist so that the create_transport_readonly_server()
+ # method (or any method creating an http(s) server) can propagate it.
+ _protocol_version = None
+ _url_protocol = 'http'
+
+ def setUp(self):
+ super(TestCaseWithWebserver, self).setUp()
+ self.transport_readonly_server = http_server.HttpServer
+
+ def create_transport_readonly_server(self):
+ server = self.transport_readonly_server(
+ protocol_version=self._protocol_version)
+ server._url_protocol = self._url_protocol
+ return server
+
+
+class TestCaseWithTwoWebservers(TestCaseWithWebserver):
+ """A support class providing readonly urls on two servers that are http://.
+
+ We set up two webservers to allows various tests involving
+ proxies or redirections from one server to the other.
+ """
+ def setUp(self):
+ super(TestCaseWithTwoWebservers, self).setUp()
+ self.transport_secondary_server = http_server.HttpServer
+ self.__secondary_server = None
+
+ def create_transport_secondary_server(self):
+ """Create a transport server from class defined at init.
+
+ This is mostly a hook for daughter classes.
+ """
+ server = self.transport_secondary_server(
+ protocol_version=self._protocol_version)
+ server._url_protocol = self._url_protocol
+ return server
+
+ def get_secondary_server(self):
+ """Get the server instance for the secondary transport."""
+ if self.__secondary_server is None:
+ self.__secondary_server = self.create_transport_secondary_server()
+ self.start_server(self.__secondary_server)
+ return self.__secondary_server
+
+ def get_secondary_url(self, relpath=None):
+ base = self.get_secondary_server().get_url()
+ return self._adjust_url(base, relpath)
+
+ def get_secondary_transport(self, relpath=None):
+ t = transport.get_transport_from_url(self.get_secondary_url(relpath))
+ self.assertTrue(t.is_readonly())
+ return t
+
+
+class ProxyServer(http_server.HttpServer):
+ """A proxy test server for http transports."""
+
+ proxy_requests = True
+
+
+class RedirectRequestHandler(http_server.TestingHTTPRequestHandler):
+ """Redirect all request to the specified server"""
+
+ def parse_request(self):
+ """Redirect a single HTTP request to another host"""
+ valid = http_server.TestingHTTPRequestHandler.parse_request(self)
+ if valid:
+ tcs = self.server.test_case_server
+ code, target = tcs.is_redirected(self.path)
+ if code is not None and target is not None:
+ # Redirect as instructed
+ self.send_response(code)
+ self.send_header('Location', target)
+ # We do not send a body
+ self.send_header('Content-Length', '0')
+ self.end_headers()
+ return False # The job is done
+ else:
+ # We leave the parent class serve the request
+ pass
+ return valid
+
+
+class HTTPServerRedirecting(http_server.HttpServer):
+ """An HttpServer redirecting to another server """
+
+ def __init__(self, request_handler=RedirectRequestHandler,
+ protocol_version=None):
+ http_server.HttpServer.__init__(self, request_handler,
+ protocol_version=protocol_version)
+ # redirections is a list of tuples (source, target, code)
+ # - source is a regexp for the paths requested
+ # - target is a replacement for re.sub describing where
+ # the request will be redirected
+ # - code is the http error code associated to the
+ # redirection (301 permanent, 302 temporarry, etc
+ self.redirections = []
+
+ def redirect_to(self, host, port):
+ """Redirect all requests to a specific host:port"""
+ self.redirections = [('(.*)',
+ r'http://%s:%s\1' % (host, port) ,
+ 301)]
+
+ def is_redirected(self, path):
+ """Is the path redirected by this server.
+
+ :param path: the requested relative path
+
+ :returns: a tuple (code, target) if a matching
+ redirection is found, (None, None) otherwise.
+ """
+ code = None
+ target = None
+ for (rsource, rtarget, rcode) in self.redirections:
+ target, match = re.subn(rsource, rtarget, path)
+ if match:
+ code = rcode
+ break # The first match wins
+ else:
+ target = None
+ return code, target
+
+
+class TestCaseWithRedirectedWebserver(TestCaseWithTwoWebservers):
+ """A support class providing redirections from one server to another.
+
+ We set up two webservers to allows various tests involving
+ redirections.
+ The 'old' server is redirected to the 'new' server.
+ """
+
+ def setUp(self):
+ super(TestCaseWithRedirectedWebserver, self).setUp()
+ # The redirections will point to the new server
+ self.new_server = self.get_readonly_server()
+ # The requests to the old server will be redirected to the new server
+ self.old_server = self.get_secondary_server()
+
+ def create_transport_secondary_server(self):
+ """Create the secondary server redirecting to the primary server"""
+ new = self.get_readonly_server()
+ redirecting = HTTPServerRedirecting(
+ protocol_version=self._protocol_version)
+ redirecting.redirect_to(new.host, new.port)
+ redirecting._url_protocol = self._url_protocol
+ return redirecting
+
+ def get_old_url(self, relpath=None):
+ base = self.old_server.get_url()
+ return self._adjust_url(base, relpath)
+
+ def get_old_transport(self, relpath=None):
+ t = transport.get_transport_from_url(self.get_old_url(relpath))
+ self.assertTrue(t.is_readonly())
+ return t
+
+ def get_new_url(self, relpath=None):
+ base = self.new_server.get_url()
+ return self._adjust_url(base, relpath)
+
+ def get_new_transport(self, relpath=None):
+ t = transport.get_transport_from_url(self.get_new_url(relpath))
+ self.assertTrue(t.is_readonly())
+ return t
+
+
+class AuthRequestHandler(http_server.TestingHTTPRequestHandler):
+ """Requires an authentication to process requests.
+
+ This is intended to be used with a server that always and
+ only use one authentication scheme (implemented by daughter
+ classes).
+ """
+
+ # The following attributes should be defined in the server
+ # - auth_header_sent: the header name sent to require auth
+ # - auth_header_recv: the header received containing auth
+ # - auth_error_code: the error code to indicate auth required
+
+ def _require_authentication(self):
+ # Note that we must update test_case_server *before*
+ # sending the error or the client may try to read it
+ # before we have sent the whole error back.
+ tcs = self.server.test_case_server
+ tcs.auth_required_errors += 1
+ self.send_response(tcs.auth_error_code)
+ self.send_header_auth_reqed()
+ # We do not send a body
+ self.send_header('Content-Length', '0')
+ self.end_headers()
+ return
+
+ def do_GET(self):
+ if self.authorized():
+ return http_server.TestingHTTPRequestHandler.do_GET(self)
+ else:
+ return self._require_authentication()
+
+ def do_HEAD(self):
+ if self.authorized():
+ return http_server.TestingHTTPRequestHandler.do_HEAD(self)
+ else:
+ return self._require_authentication()
+
+
+class BasicAuthRequestHandler(AuthRequestHandler):
+ """Implements the basic authentication of a request"""
+
+ def authorized(self):
+ tcs = self.server.test_case_server
+ if tcs.auth_scheme != 'basic':
+ return False
+
+ auth_header = self.headers.get(tcs.auth_header_recv, None)
+ if auth_header:
+ scheme, raw_auth = auth_header.split(' ', 1)
+ if scheme.lower() == tcs.auth_scheme:
+ user, password = raw_auth.decode('base64').split(':')
+ return tcs.authorized(user, password)
+
+ return False
+
+ def send_header_auth_reqed(self):
+ tcs = self.server.test_case_server
+ self.send_header(tcs.auth_header_sent,
+ 'Basic realm="%s"' % tcs.auth_realm)
+
+
+# FIXME: We could send an Authentication-Info header too when
+# the authentication is succesful
+
+class DigestAuthRequestHandler(AuthRequestHandler):
+ """Implements the digest authentication of a request.
+
+ We need persistence for some attributes and that can't be
+ achieved here since we get instantiated for each request. We
+ rely on the DigestAuthServer to take care of them.
+ """
+
+ def authorized(self):
+ tcs = self.server.test_case_server
+
+ auth_header = self.headers.get(tcs.auth_header_recv, None)
+ if auth_header is None:
+ return False
+ scheme, auth = auth_header.split(None, 1)
+ if scheme.lower() == tcs.auth_scheme:
+ auth_dict = urllib2.parse_keqv_list(urllib2.parse_http_list(auth))
+
+ return tcs.digest_authorized(auth_dict, self.command)
+
+ return False
+
+ def send_header_auth_reqed(self):
+ tcs = self.server.test_case_server
+ header = 'Digest realm="%s", ' % tcs.auth_realm
+ header += 'nonce="%s", algorithm="%s", qop="auth"' % (tcs.auth_nonce,
+ 'MD5')
+ self.send_header(tcs.auth_header_sent,header)
+
+
+class DigestAndBasicAuthRequestHandler(DigestAuthRequestHandler):
+ """Implements a digest and basic authentication of a request.
+
+ I.e. the server proposes both schemes and the client should choose the best
+ one it can handle, which, in that case, should be digest, the only scheme
+ accepted here.
+ """
+
+ def send_header_auth_reqed(self):
+ tcs = self.server.test_case_server
+ self.send_header(tcs.auth_header_sent,
+ 'Basic realm="%s"' % tcs.auth_realm)
+ header = 'Digest realm="%s", ' % tcs.auth_realm
+ header += 'nonce="%s", algorithm="%s", qop="auth"' % (tcs.auth_nonce,
+ 'MD5')
+ self.send_header(tcs.auth_header_sent,header)
+
+
+class AuthServer(http_server.HttpServer):
+ """Extends HttpServer with a dictionary of passwords.
+
+ This is used as a base class for various schemes which should
+ all use or redefined the associated AuthRequestHandler.
+
+ Note that no users are defined by default, so add_user should
+ be called before issuing the first request.
+ """
+
+ # The following attributes should be set dy daughter classes
+ # and are used by AuthRequestHandler.
+ auth_header_sent = None
+ auth_header_recv = None
+ auth_error_code = None
+ auth_realm = "Thou should not pass"
+
+ def __init__(self, request_handler, auth_scheme,
+ protocol_version=None):
+ http_server.HttpServer.__init__(self, request_handler,
+ protocol_version=protocol_version)
+ self.auth_scheme = auth_scheme
+ self.password_of = {}
+ self.auth_required_errors = 0
+
+ def add_user(self, user, password):
+ """Declare a user with an associated password.
+
+ password can be empty, use an empty string ('') in that
+ case, not None.
+ """
+ self.password_of[user] = password
+
+ def authorized(self, user, password):
+ """Check that the given user provided the right password"""
+ expected_password = self.password_of.get(user, None)
+ return expected_password is not None and password == expected_password
+
+
+# FIXME: There is some code duplication with
+# _urllib2_wrappers.py.DigestAuthHandler. If that duplication
+# grows, it may require a refactoring. Also, we don't implement
+# SHA algorithm nor MD5-sess here, but that does not seem worth
+# it.
+class DigestAuthServer(AuthServer):
+ """A digest authentication server"""
+
+ auth_nonce = 'now!'
+
+ def __init__(self, request_handler, auth_scheme,
+ protocol_version=None):
+ AuthServer.__init__(self, request_handler, auth_scheme,
+ protocol_version=protocol_version)
+
+ def digest_authorized(self, auth, command):
+ nonce = auth['nonce']
+ if nonce != self.auth_nonce:
+ return False
+ realm = auth['realm']
+ if realm != self.auth_realm:
+ return False
+ user = auth['username']
+ if not self.password_of.has_key(user):
+ return False
+ algorithm= auth['algorithm']
+ if algorithm != 'MD5':
+ return False
+ qop = auth['qop']
+ if qop != 'auth':
+ return False
+
+ password = self.password_of[user]
+
+ # Recalculate the response_digest to compare with the one
+ # sent by the client
+ A1 = '%s:%s:%s' % (user, realm, password)
+ A2 = '%s:%s' % (command, auth['uri'])
+
+ H = lambda x: osutils.md5(x).hexdigest()
+ KD = lambda secret, data: H("%s:%s" % (secret, data))
+
+ nonce_count = int(auth['nc'], 16)
+
+ ncvalue = '%08x' % nonce_count
+
+ cnonce = auth['cnonce']
+ noncebit = '%s:%s:%s:%s:%s' % (nonce, ncvalue, cnonce, qop, H(A2))
+ response_digest = KD(H(A1), noncebit)
+
+ return response_digest == auth['response']
+
+
+class HTTPAuthServer(AuthServer):
+ """An HTTP server requiring authentication"""
+
+ def init_http_auth(self):
+ self.auth_header_sent = 'WWW-Authenticate'
+ self.auth_header_recv = 'Authorization'
+ self.auth_error_code = 401
+
+
+class ProxyAuthServer(AuthServer):
+ """A proxy server requiring authentication"""
+
+ def init_proxy_auth(self):
+ self.proxy_requests = True
+ self.auth_header_sent = 'Proxy-Authenticate'
+ self.auth_header_recv = 'Proxy-Authorization'
+ self.auth_error_code = 407
+
+
+class HTTPBasicAuthServer(HTTPAuthServer):
+ """An HTTP server requiring basic authentication"""
+
+ def __init__(self, protocol_version=None):
+ HTTPAuthServer.__init__(self, BasicAuthRequestHandler, 'basic',
+ protocol_version=protocol_version)
+ self.init_http_auth()
+
+
+class HTTPDigestAuthServer(DigestAuthServer, HTTPAuthServer):
+ """An HTTP server requiring digest authentication"""
+
+ def __init__(self, protocol_version=None):
+ DigestAuthServer.__init__(self, DigestAuthRequestHandler, 'digest',
+ protocol_version=protocol_version)
+ self.init_http_auth()
+
+
+class HTTPBasicAndDigestAuthServer(DigestAuthServer, HTTPAuthServer):
+ """An HTTP server requiring basic or digest authentication"""
+
+ def __init__(self, protocol_version=None):
+ DigestAuthServer.__init__(self, DigestAndBasicAuthRequestHandler,
+ 'basicdigest',
+ protocol_version=protocol_version)
+ self.init_http_auth()
+ # We really accept Digest only
+ self.auth_scheme = 'digest'
+
+
+class ProxyBasicAuthServer(ProxyAuthServer):
+ """A proxy server requiring basic authentication"""
+
+ def __init__(self, protocol_version=None):
+ ProxyAuthServer.__init__(self, BasicAuthRequestHandler, 'basic',
+ protocol_version=protocol_version)
+ self.init_proxy_auth()
+
+
+class ProxyDigestAuthServer(DigestAuthServer, ProxyAuthServer):
+ """A proxy server requiring basic authentication"""
+
+ def __init__(self, protocol_version=None):
+ ProxyAuthServer.__init__(self, DigestAuthRequestHandler, 'digest',
+ protocol_version=protocol_version)
+ self.init_proxy_auth()
+
+
+class ProxyBasicAndDigestAuthServer(DigestAuthServer, ProxyAuthServer):
+ """An proxy server requiring basic or digest authentication"""
+
+ def __init__(self, protocol_version=None):
+ DigestAuthServer.__init__(self, DigestAndBasicAuthRequestHandler,
+ 'basicdigest',
+ protocol_version=protocol_version)
+ self.init_proxy_auth()
+ # We really accept Digest only
+ self.auth_scheme = 'digest'
+
+
diff --git a/bzrlib/tests/https_server.py b/bzrlib/tests/https_server.py
new file mode 100644
index 0000000..d4fac6e
--- /dev/null
+++ b/bzrlib/tests/https_server.py
@@ -0,0 +1,150 @@
+# Copyright (C) 2007-2011 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""HTTPS test server, available when ssl python module is available"""
+
+import ssl
+import sys
+
+from bzrlib.tests import (
+ http_server,
+ ssl_certs,
+ test_server,
+ )
+
+
+class TestingHTTPSServerMixin:
+
+ def __init__(self, key_file, cert_file):
+ self.key_file = key_file
+ self.cert_file = cert_file
+
+ def _get_ssl_request (self, sock, addr):
+ """Wrap the socket with SSL"""
+ ssl_sock = ssl.wrap_socket(sock, server_side=True,
+ keyfile=self.key_file,
+ certfile=self.cert_file,
+ do_handshake_on_connect=False)
+ return ssl_sock, addr
+
+ def verify_request(self, request, client_address):
+ """Verify the request.
+
+ Return True if we should proceed with this request, False if we should
+ not even touch a single byte in the socket !
+ """
+ serving = test_server.TestingTCPServerMixin.verify_request(
+ self, request, client_address)
+ if serving:
+ try:
+ request.do_handshake()
+ except ssl.SSLError, e:
+ # FIXME: We proabaly want more tests to capture which ssl
+ # errors are worth reporting but mostly our tests want an https
+ # server that works -- vila 2012-01-19
+ return False
+ return serving
+
+ def ignored_exceptions_during_shutdown(self, e):
+ if (sys.version < (2, 7) and isinstance(e, TypeError)
+ and e.args[0] == "'member_descriptor' object is not callable"):
+ # Fixed in python-2.7 (and some Ubuntu 2.6) there is a bug where
+ # the ssl socket fail to raise a socket.error when trying to read
+ # from a closed socket. This is rarely observed in practice but
+ # still make valid selftest runs fail if not caught.
+ return True
+ base = test_server.TestingTCPServerMixin
+ return base.ignored_exceptions_during_shutdown(self, e)
+
+
+class TestingHTTPSServer(TestingHTTPSServerMixin,
+ http_server.TestingHTTPServer):
+
+ def __init__(self, server_address, request_handler_class,
+ test_case_server, key_file, cert_file):
+ TestingHTTPSServerMixin.__init__(self, key_file, cert_file)
+ http_server.TestingHTTPServer.__init__(
+ self, server_address, request_handler_class, test_case_server)
+
+ def get_request(self):
+ sock, addr = http_server.TestingHTTPServer.get_request(self)
+ return self._get_ssl_request(sock, addr)
+
+
+class TestingThreadingHTTPSServer(TestingHTTPSServerMixin,
+ http_server.TestingThreadingHTTPServer):
+
+ def __init__(self, server_address, request_handler_class,
+ test_case_server, key_file, cert_file):
+ TestingHTTPSServerMixin.__init__(self, key_file, cert_file)
+ http_server.TestingThreadingHTTPServer.__init__(
+ self, server_address, request_handler_class, test_case_server)
+
+ def get_request(self):
+ sock, addr = http_server.TestingThreadingHTTPServer.get_request(self)
+ return self._get_ssl_request(sock, addr)
+
+
+class HTTPSServer(http_server.HttpServer):
+
+ _url_protocol = 'https'
+
+ # The real servers depending on the protocol
+ http_server_class = {'HTTP/1.0': TestingHTTPSServer,
+ 'HTTP/1.1': TestingThreadingHTTPSServer,
+ }
+
+ # Provides usable defaults since an https server requires both a
+ # private key and a certificate to work.
+ def __init__(self, request_handler=http_server.TestingHTTPRequestHandler,
+ protocol_version=None,
+ key_file=ssl_certs.build_path('server_without_pass.key'),
+ cert_file=ssl_certs.build_path('server.crt')):
+ http_server.HttpServer.__init__(self, request_handler=request_handler,
+ protocol_version=protocol_version)
+ self.key_file = key_file
+ self.cert_file = cert_file
+ self.temp_files = []
+
+ def create_server(self):
+ return self.server_class(
+ (self.host, self.port), self.request_handler_class, self,
+ self.key_file, self.cert_file)
+
+
+class HTTPSServer_urllib(HTTPSServer):
+ """Subclass of HTTPSServer that gives https+urllib urls.
+
+ This is for use in testing: connections to this server will always go
+ through urllib where possible.
+ """
+
+ # urls returned by this server should require the urllib client impl
+ _url_protocol = 'https+urllib'
+
+
+class HTTPSServer_PyCurl(HTTPSServer):
+ """Subclass of HTTPSServer that gives http+pycurl urls.
+
+ This is for use in testing: connections to this server will always go
+ through pycurl where possible.
+ """
+
+ # We don't care about checking the pycurl availability as
+ # this server will be required only when pycurl is present
+
+ # urls returned by this server should require the pycurl client impl
+ _url_protocol = 'https+pycurl'
diff --git a/bzrlib/tests/lock_helpers.py b/bzrlib/tests/lock_helpers.py
new file mode 100644
index 0000000..8ba01b0
--- /dev/null
+++ b/bzrlib/tests/lock_helpers.py
@@ -0,0 +1,91 @@
+# Copyright (C) 2005, 2006 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""Helper functions/classes for testing locking"""
+
+from bzrlib import errors
+from bzrlib.decorators import only_raises
+
+
+class TestPreventLocking(errors.LockError):
+ """A test exception for forcing locking failure: %(message)s"""
+
+
+class LockWrapper(object):
+ """A wrapper which lets us set locking ability.
+
+ This also lets us record what objects were locked in what order,
+ to ensure that locking happens correctly.
+ """
+
+ def __init__(self, sequence, other, other_id):
+ """Wrap a locking policy around a given object.
+
+ :param sequence: A list object where we should record actions
+ :param other: The object to control policy on
+ :param other_id: Something to identify the object by
+ """
+ self.__dict__['_sequence'] = sequence
+ self.__dict__['_other'] = other
+ self.__dict__['_other_id'] = other_id
+ self.__dict__['_allow_write'] = True
+ self.__dict__['_allow_read'] = True
+ self.__dict__['_allow_unlock'] = True
+
+ def __eq__(self, other):
+ # Branch objects look for controlfiles == repo.controlfiles.
+ if type(other) is LockWrapper:
+ return self._other == other._other
+ return False
+
+ def __getattr__(self, attr):
+ return getattr(self._other, attr)
+
+ def __setattr__(self, attr, val):
+ return setattr(self._other, attr, val)
+
+ def lock_read(self):
+ self._sequence.append((self._other_id, 'lr', self._allow_read))
+ if self._allow_read:
+ return self._other.lock_read()
+ raise TestPreventLocking('lock_read disabled')
+
+ def lock_write(self, token=None):
+ self._sequence.append((self._other_id, 'lw', self._allow_write))
+ if self._allow_write:
+ return self._other.lock_write()
+ raise TestPreventLocking('lock_write disabled')
+
+ @only_raises(errors.LockNotHeld, errors.LockBroken)
+ def unlock(self):
+ self._sequence.append((self._other_id, 'ul', self._allow_unlock))
+ if self._allow_unlock:
+ return self._other.unlock()
+ raise TestPreventLocking('unlock disabled')
+
+ def disable_lock_read(self):
+ """Make a lock_read call fail"""
+ self.__dict__['_allow_read'] = False
+
+ def disable_unlock(self):
+ """Make an unlock call fail"""
+ self.__dict__['_allow_unlock'] = False
+
+ def disable_lock_write(self):
+ """Make a lock_write call fail"""
+ self.__dict__['_allow_write'] = False
+
+
diff --git a/bzrlib/tests/matchers.py b/bzrlib/tests/matchers.py
new file mode 100644
index 0000000..f62952b
--- /dev/null
+++ b/bzrlib/tests/matchers.py
@@ -0,0 +1,244 @@
+# Copyright (C) 2010 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""Matchers for bzrlib.
+
+Primarily test support, Matchers are used by self.assertThat in the bzrlib
+test suite. A matcher is a stateful test helper which can be used to determine
+if a passed object 'matches', much like a regex. If the object does not match
+the mismatch can be described in a human readable fashion. assertThat then
+raises if a mismatch occurs, showing the description as the assertion error.
+
+Matchers are designed to be more reusable and composable than layered
+assertions in Test Case objects, so they are recommended for new testing work.
+"""
+
+__all__ = [
+ 'HasLayout',
+ 'MatchesAncestry',
+ 'ContainsNoVfsCalls',
+ 'ReturnsUnlockable',
+ 'RevisionHistoryMatches',
+ ]
+
+from bzrlib import (
+ osutils,
+ revision as _mod_revision,
+ )
+from bzrlib import lazy_import
+lazy_import.lazy_import(globals(),
+"""
+from bzrlib.smart.request import request_handlers as smart_request_handlers
+from bzrlib.smart import vfs
+""")
+
+from testtools.matchers import Equals, Mismatch, Matcher
+
+
+class ReturnsUnlockable(Matcher):
+ """A matcher that checks for the pattern we want lock* methods to have:
+
+ They should return an object with an unlock() method.
+ Calling that method should unlock the original object.
+
+ :ivar lockable_thing: The object which can be locked that will be
+ inspected.
+ """
+
+ def __init__(self, lockable_thing):
+ Matcher.__init__(self)
+ self.lockable_thing = lockable_thing
+
+ def __str__(self):
+ return ('ReturnsUnlockable(lockable_thing=%s)' %
+ self.lockable_thing)
+
+ def match(self, lock_method):
+ lock_method().unlock()
+ if self.lockable_thing.is_locked():
+ return _IsLocked(self.lockable_thing)
+ return None
+
+
+class _IsLocked(Mismatch):
+ """Something is locked."""
+
+ def __init__(self, lockable_thing):
+ self.lockable_thing = lockable_thing
+
+ def describe(self):
+ return "%s is locked" % self.lockable_thing
+
+
+class _AncestryMismatch(Mismatch):
+ """Ancestry matching mismatch."""
+
+ def __init__(self, tip_revision, got, expected):
+ self.tip_revision = tip_revision
+ self.got = got
+ self.expected = expected
+
+ def describe(self):
+ return "mismatched ancestry for revision %r was %r, expected %r" % (
+ self.tip_revision, self.got, self.expected)
+
+
+class MatchesAncestry(Matcher):
+ """A matcher that checks the ancestry of a particular revision.
+
+ :ivar graph: Graph in which to check the ancestry
+ :ivar revision_id: Revision id of the revision
+ """
+
+ def __init__(self, repository, revision_id):
+ Matcher.__init__(self)
+ self.repository = repository
+ self.revision_id = revision_id
+
+ def __str__(self):
+ return ('MatchesAncestry(repository=%r, revision_id=%r)' % (
+ self.repository, self.revision_id))
+
+ def match(self, expected):
+ self.repository.lock_read()
+ try:
+ graph = self.repository.get_graph()
+ got = [r for r, p in graph.iter_ancestry([self.revision_id])]
+ if _mod_revision.NULL_REVISION in got:
+ got.remove(_mod_revision.NULL_REVISION)
+ finally:
+ self.repository.unlock()
+ if sorted(got) != sorted(expected):
+ return _AncestryMismatch(self.revision_id, sorted(got),
+ sorted(expected))
+
+
+class HasLayout(Matcher):
+ """A matcher that checks if a tree has a specific layout.
+
+ :ivar entries: List of expected entries, as (path, file_id) pairs.
+ """
+
+ def __init__(self, entries):
+ Matcher.__init__(self)
+ self.entries = entries
+
+ def get_tree_layout(self, tree):
+ """Get the (path, file_id) pairs for the current tree."""
+ tree.lock_read()
+ try:
+ for path, ie in tree.iter_entries_by_dir():
+ if ie.parent_id is None:
+ yield (u"", ie.file_id)
+ else:
+ yield (path+ie.kind_character(), ie.file_id)
+ finally:
+ tree.unlock()
+
+ @staticmethod
+ def _strip_unreferenced_directories(entries):
+ """Strip all directories that don't (in)directly contain any files.
+
+ :param entries: List of path strings or (path, ie) tuples to process
+ """
+ directories = []
+ for entry in entries:
+ if isinstance(entry, basestring):
+ path = entry
+ else:
+ path = entry[0]
+ if not path or path[-1] == "/":
+ # directory
+ directories.append((path, entry))
+ else:
+ # Yield the referenced parent directories
+ for dirpath, direntry in directories:
+ if osutils.is_inside(dirpath, path):
+ yield direntry
+ directories = []
+ yield entry
+
+ def __str__(self):
+ return 'HasLayout(%r)' % self.entries
+
+ def match(self, tree):
+ actual = list(self.get_tree_layout(tree))
+ if self.entries and isinstance(self.entries[0], basestring):
+ actual = [path for (path, fileid) in actual]
+ if not tree.has_versioned_directories():
+ entries = list(self._strip_unreferenced_directories(self.entries))
+ else:
+ entries = self.entries
+ return Equals(entries).match(actual)
+
+
+class RevisionHistoryMatches(Matcher):
+ """A matcher that checks if a branch has a specific revision history.
+
+ :ivar history: Revision history, as list of revisions. Oldest first.
+ """
+
+ def __init__(self, history):
+ Matcher.__init__(self)
+ self.expected = history
+
+ def __str__(self):
+ return 'RevisionHistoryMatches(%r)' % self.expected
+
+ def match(self, branch):
+ branch.lock_read()
+ try:
+ graph = branch.repository.get_graph()
+ history = list(graph.iter_lefthand_ancestry(
+ branch.last_revision(), [_mod_revision.NULL_REVISION]))
+ history.reverse()
+ finally:
+ branch.unlock()
+ return Equals(self.expected).match(history)
+
+
+class _NoVfsCallsMismatch(Mismatch):
+ """Mismatch describing a list of HPSS calls which includes VFS requests."""
+
+ def __init__(self, vfs_calls):
+ self.vfs_calls = vfs_calls
+
+ def describe(self):
+ return "no VFS calls expected, got: %s" % ",".join([
+ "%s(%s)" % (c.method,
+ ", ".join([repr(a) for a in c.args])) for c in self.vfs_calls])
+
+
+class ContainsNoVfsCalls(Matcher):
+ """Ensure that none of the specified calls are HPSS calls."""
+
+ def __str__(self):
+ return 'ContainsNoVfsCalls()'
+
+ @classmethod
+ def match(cls, hpss_calls):
+ vfs_calls = []
+ for call in hpss_calls:
+ try:
+ request_method = smart_request_handlers.get(call.call.method)
+ except KeyError:
+ # A method we don't know about doesn't count as a VFS method.
+ continue
+ if issubclass(request_method, vfs.VfsRequest):
+ vfs_calls.append(call.call)
+ if len(vfs_calls) == 0:
+ return None
+ return _NoVfsCallsMismatch(vfs_calls)
diff --git a/bzrlib/tests/per_branch/__init__.py b/bzrlib/tests/per_branch/__init__.py
new file mode 100644
index 0000000..5f0719f
--- /dev/null
+++ b/bzrlib/tests/per_branch/__init__.py
@@ -0,0 +1,182 @@
+# Copyright (C) 2006-2010 Canonical Ltd
+# Authors: Robert Collins <robert.collins@canonical.com>
+# and others
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+
+"""Branch implementation tests for bzr.
+
+These test the conformance of all the branch variations to the expected API.
+Specific tests for individual formats are in the `tests/test_branch` file
+rather than in `tests/per_branch/*.py`.
+"""
+
+from bzrlib import (
+ errors,
+ tests,
+ )
+from bzrlib.branch import format_registry
+from bzrlib.remote import RemoteBranchFormat
+from bzrlib.tests import test_server
+from bzrlib.tests.per_controldir.test_controldir import TestCaseWithControlDir
+from bzrlib.transport import memory
+
+
+def make_scenarios(transport_server, transport_readonly_server,
+ formats, vfs_transport_factory=None, name_suffix=''):
+ """Transform the input formats to a list of scenarios.
+
+ :param formats: A list of (branch_format, bzrdir_format).
+ """
+ result = []
+ for branch_format, bzrdir_format in formats:
+ # some branches don't have separate format objects.
+ # so we have a conditional here to handle them.
+ scenario_name = getattr(branch_format, '__name__',
+ branch_format.__class__.__name__)
+ scenario_name += name_suffix
+ scenario = (scenario_name, {
+ "transport_server":transport_server,
+ "transport_readonly_server":transport_readonly_server,
+ "bzrdir_format":bzrdir_format,
+ "branch_format":branch_format,
+ })
+ result.append(scenario)
+ return result
+
+
+class TestCaseWithBranch(TestCaseWithControlDir):
+ """This helper will be parameterised in each per_branch test."""
+
+ def setUp(self):
+ super(TestCaseWithBranch, self).setUp()
+ self.branch = None
+
+ def get_branch(self):
+ if self.branch is None:
+ self.branch = self.make_branch('abranch')
+ return self.branch
+
+ def get_default_format(self):
+ format = self.bzrdir_format
+ self.assertEquals(format.get_branch_format(), self.branch_format)
+ return format
+
+ def make_branch(self, relpath, format=None):
+ try:
+ return super(TestCaseWithBranch, self).make_branch(relpath, format)
+ except errors.UninitializableFormat:
+ raise tests.TestNotApplicable('Uninitializable branch format')
+
+ def create_tree_with_merge(self):
+ """Create a branch with a simple ancestry.
+
+ The graph should look like:
+ digraph H {
+ "rev-1" -> "rev-2" -> "rev-3";
+ "rev-1" -> "rev-1.1.1" -> "rev-3";
+ }
+
+ Or in ASCII:
+ 1
+ |\
+ 2 1.1.1
+ |/
+ 3
+ """
+ tree = self.make_branch_and_memory_tree('tree')
+ tree.lock_write()
+ try:
+ tree.add('')
+ tree.commit('first', rev_id='rev-1')
+ tree.commit('second', rev_id='rev-1.1.1')
+ # Uncommit that last commit and switch to the other line
+ tree.branch.set_last_revision_info(1, 'rev-1')
+ tree.set_parent_ids(['rev-1'])
+ tree.commit('alt-second', rev_id='rev-2')
+ tree.set_parent_ids(['rev-2', 'rev-1.1.1'])
+ tree.commit('third', rev_id='rev-3')
+ finally:
+ tree.unlock()
+
+ return tree
+
+
+def branch_scenarios():
+ """ """
+ # Generate a list of branch formats and their associated bzrdir formats to
+ # use.
+ combinations = [(format, format._matchingbzrdir) for format in
+ format_registry._get_all()]
+ scenarios = make_scenarios(
+ # None here will cause the default vfs transport server to be used.
+ None,
+ # None here will cause a readonly decorator to be created
+ # by the TestCaseWithTransport.get_readonly_transport method.
+ None,
+ combinations)
+ # Add RemoteBranch tests, which need a special server.
+ remote_branch_format = RemoteBranchFormat()
+ scenarios.extend(make_scenarios(
+ test_server.SmartTCPServer_for_testing,
+ test_server.ReadonlySmartTCPServer_for_testing,
+ [(remote_branch_format, remote_branch_format._matchingbzrdir)],
+ memory.MemoryServer,
+ name_suffix='-default'))
+ # Also add tests for RemoteBranch with HPSS protocol v2 (i.e. bzr <1.6)
+ # server.
+ scenarios.extend(make_scenarios(
+ test_server.SmartTCPServer_for_testing_v2_only,
+ test_server.ReadonlySmartTCPServer_for_testing_v2_only,
+ [(remote_branch_format, remote_branch_format._matchingbzrdir)],
+ memory.MemoryServer,
+ name_suffix='-v2'))
+ return scenarios
+
+
+def load_tests(standard_tests, module, loader):
+ per_branch_mod_names = [
+ 'branch',
+ 'break_lock',
+ 'check',
+ 'config',
+ 'create_checkout',
+ 'create_clone',
+ 'commit',
+ 'dotted_revno_to_revision_id',
+ 'get_revision_id_to_revno_map',
+ 'hooks',
+ 'http',
+ 'iter_merge_sorted_revisions',
+ 'last_revision_info',
+ 'locking',
+ 'parent',
+ 'permissions',
+ 'pull',
+ 'push',
+ 'reconcile',
+ 'revision_id_to_dotted_revno',
+ 'revision_id_to_revno',
+ 'sprout',
+ 'stacking',
+ 'tags',
+ 'uncommit',
+ 'update',
+ ]
+ sub_tests = loader.loadTestsFromModuleNames(
+ ['bzrlib.tests.per_branch.test_' + name
+ for name in per_branch_mod_names])
+ return tests.multiply_tests(sub_tests, branch_scenarios(), standard_tests)
diff --git a/bzrlib/tests/per_branch/test_branch.py b/bzrlib/tests/per_branch/test_branch.py
new file mode 100644
index 0000000..186b171
--- /dev/null
+++ b/bzrlib/tests/per_branch/test_branch.py
@@ -0,0 +1,1059 @@
+# Copyright (C) 2005-2012 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""Tests for branch implementations - tests a branch format."""
+
+from bzrlib import (
+ branch as _mod_branch,
+ controldir,
+ config,
+ delta as _mod_delta,
+ errors,
+ merge,
+ osutils,
+ urlutils,
+ transport,
+ remote,
+ repository,
+ revision,
+ symbol_versioning,
+ tests,
+ )
+from bzrlib.tests import (
+ per_branch,
+ )
+from bzrlib.tests.http_server import HttpServer
+from bzrlib.transport import memory
+
+
+class TestTestCaseWithBranch(per_branch.TestCaseWithBranch):
+
+ def test_branch_format_matches_bzrdir_branch_format(self):
+ bzrdir_branch_format = self.bzrdir_format.get_branch_format()
+ self.assertIs(self.branch_format.__class__,
+ bzrdir_branch_format.__class__)
+
+ def test_make_branch_gets_expected_format(self):
+ branch = self.make_branch('.')
+ self.assertIs(self.branch_format.__class__,
+ branch._format.__class__)
+
+
+class TestBranch(per_branch.TestCaseWithBranch):
+
+ def test_create_tree_with_merge(self):
+ tree = self.create_tree_with_merge()
+ tree.lock_read()
+ self.addCleanup(tree.unlock)
+ graph = tree.branch.repository.get_graph()
+ ancestry_graph = graph.get_parent_map(
+ tree.branch.repository.all_revision_ids())
+ self.assertEqual({'rev-1':('null:',),
+ 'rev-2':('rev-1', ),
+ 'rev-1.1.1':('rev-1', ),
+ 'rev-3':('rev-2', 'rev-1.1.1', ),
+ }, ancestry_graph)
+
+ def test_revision_ids_are_utf8(self):
+ wt = self.make_branch_and_tree('tree')
+ wt.commit('f', rev_id='rev1')
+ wt.commit('f', rev_id='rev2')
+ wt.commit('f', rev_id='rev3')
+
+ br = self.get_branch()
+ br.fetch(wt.branch)
+ br.generate_revision_history('rev3')
+ for revision_id in ['rev3', 'rev2', 'rev1']:
+ self.assertIsInstance(revision_id, str)
+ last = br.last_revision()
+ self.assertEqual('rev3', last)
+ self.assertIsInstance(last, str)
+ revno, last = br.last_revision_info()
+ self.assertEqual(3, revno)
+ self.assertEqual('rev3', last)
+ self.assertIsInstance(last, str)
+
+ def test_fetch_revisions(self):
+ """Test fetch-revision operation."""
+ wt = self.make_branch_and_tree('b1')
+ b1 = wt.branch
+ self.build_tree_contents([('b1/foo', 'hello')])
+ wt.add(['foo'], ['foo-id'])
+ wt.commit('lala!', rev_id='revision-1', allow_pointless=False)
+
+ b2 = self.make_branch('b2')
+ b2.fetch(b1)
+
+ rev = b2.repository.get_revision('revision-1')
+ tree = b2.repository.revision_tree('revision-1')
+ tree.lock_read()
+ self.addCleanup(tree.unlock)
+ self.assertEqual(tree.get_file_text('foo-id'), 'hello')
+
+ def test_get_revision_delta(self):
+ tree_a = self.make_branch_and_tree('a')
+ self.build_tree(['a/foo'])
+ tree_a.add('foo', 'file1')
+ tree_a.commit('rev1', rev_id='rev1')
+ self.build_tree(['a/vla'])
+ tree_a.add('vla', 'file2')
+ tree_a.commit('rev2', rev_id='rev2')
+
+ delta = self.applyDeprecated(symbol_versioning.deprecated_in(
+ (2, 5, 0)), tree_a.branch.get_revision_delta, 1)
+ self.assertIsInstance(delta, _mod_delta.TreeDelta)
+ self.assertEqual([('foo', 'file1', 'file')], delta.added)
+ delta = self.applyDeprecated(symbol_versioning.deprecated_in(
+ (2, 5, 0)), tree_a.branch.get_revision_delta, 2)
+ self.assertIsInstance(delta, _mod_delta.TreeDelta)
+ self.assertEqual([('vla', 'file2', 'file')], delta.added)
+
+ def get_unbalanced_tree_pair(self):
+ """Return two branches, a and b, with one file in a."""
+ tree_a = self.make_branch_and_tree('a')
+ self.build_tree_contents([('a/b', 'b')])
+ tree_a.add('b')
+ tree_a.commit("silly commit", rev_id='A')
+
+ tree_b = self.make_branch_and_tree('b')
+ return tree_a, tree_b
+
+ def get_balanced_branch_pair(self):
+ """Returns br_a, br_b as with one commit in a, and b has a's stores."""
+ tree_a, tree_b = self.get_unbalanced_tree_pair()
+ tree_b.branch.repository.fetch(tree_a.branch.repository)
+ return tree_a, tree_b
+
+ def test_clone_partial(self):
+ """Copy only part of the history of a branch."""
+ # TODO: RBC 20060208 test with a revision not on revision-history.
+ # what should that behaviour be ? Emailed the list.
+ # First, make a branch with two commits.
+ wt_a = self.make_branch_and_tree('a')
+ self.build_tree(['a/one'])
+ wt_a.add(['one'])
+ wt_a.commit('commit one', rev_id='1')
+ self.build_tree(['a/two'])
+ wt_a.add(['two'])
+ wt_a.commit('commit two', rev_id='2')
+ # Now make a copy of the repository.
+ repo_b = self.make_repository('b')
+ wt_a.branch.repository.copy_content_into(repo_b)
+ # wt_a might be a lightweight checkout, so get a hold of the actual
+ # branch (because you can't do a partial clone of a lightweight
+ # checkout).
+ branch = wt_a.branch.bzrdir.open_branch()
+ # Then make a branch where the new repository is, but specify a revision
+ # ID. The new branch's history will stop at the specified revision.
+ br_b = branch.clone(repo_b.bzrdir, revision_id='1')
+ self.assertEqual('1', br_b.last_revision())
+
+ def get_parented_branch(self):
+ wt_a = self.make_branch_and_tree('a')
+ self.build_tree(['a/one'])
+ wt_a.add(['one'])
+ wt_a.commit('commit one', rev_id='1')
+
+ branch_b = wt_a.branch.bzrdir.sprout('b', revision_id='1').open_branch()
+ self.assertEqual(wt_a.branch.base, branch_b.get_parent())
+ return branch_b
+
+ def test_clone_branch_nickname(self):
+ # test the nick name is preserved always
+ raise tests.TestSkipped('XXX branch cloning is not yet tested.')
+
+ def test_clone_branch_parent(self):
+ # test the parent is preserved always
+ branch_b = self.get_parented_branch()
+ repo_c = self.make_repository('c')
+ branch_b.repository.copy_content_into(repo_c)
+ branch_c = branch_b.clone(repo_c.bzrdir)
+ self.assertNotEqual(None, branch_c.get_parent())
+ self.assertEqual(branch_b.get_parent(), branch_c.get_parent())
+
+ # We can also set a specific parent, and it should be honored
+ random_parent = 'http://example.com/path/to/branch'
+ branch_b.set_parent(random_parent)
+ repo_d = self.make_repository('d')
+ branch_b.repository.copy_content_into(repo_d)
+ branch_d = branch_b.clone(repo_d.bzrdir)
+ self.assertEqual(random_parent, branch_d.get_parent())
+
+ def test_submit_branch(self):
+ """Submit location can be queried and set"""
+ branch = self.make_branch('branch')
+ self.assertEqual(branch.get_submit_branch(), None)
+ branch.set_submit_branch('sftp://example.com')
+ self.assertEqual(branch.get_submit_branch(), 'sftp://example.com')
+ branch.set_submit_branch('sftp://example.net')
+ self.assertEqual(branch.get_submit_branch(), 'sftp://example.net')
+
+ def test_public_branch(self):
+ """public location can be queried and set"""
+ branch = self.make_branch('branch')
+ self.assertEqual(branch.get_public_branch(), None)
+ branch.set_public_branch('sftp://example.com')
+ self.assertEqual(branch.get_public_branch(), 'sftp://example.com')
+ branch.set_public_branch('sftp://example.net')
+ self.assertEqual(branch.get_public_branch(), 'sftp://example.net')
+ branch.set_public_branch(None)
+ self.assertEqual(branch.get_public_branch(), None)
+
+ def test_record_initial_ghost(self):
+ """Branches should support having ghosts."""
+ wt = self.make_branch_and_tree('.')
+ if not wt.branch.repository._format.supports_ghosts:
+ raise tests.TestNotApplicable("repository format does not "
+ "support ghosts")
+ wt.set_parent_ids(['non:existent@rev--ision--0--2'],
+ allow_leftmost_as_ghost=True)
+ self.assertEqual(['non:existent@rev--ision--0--2'],
+ wt.get_parent_ids())
+ rev_id = wt.commit('commit against a ghost first parent.')
+ rev = wt.branch.repository.get_revision(rev_id)
+ self.assertEqual(rev.parent_ids, ['non:existent@rev--ision--0--2'])
+ # parent_sha1s is not populated now, WTF. rbc 20051003
+ self.assertEqual(len(rev.parent_sha1s), 0)
+
+ def test_record_two_ghosts(self):
+ """Recording with all ghosts works."""
+ wt = self.make_branch_and_tree('.')
+ if not wt.branch.repository._format.supports_ghosts:
+ raise tests.TestNotApplicable("repository format does not "
+ "support ghosts")
+ wt.set_parent_ids([
+ 'foo@azkhazan-123123-abcabc',
+ 'wibble@fofof--20050401--1928390812',
+ ],
+ allow_leftmost_as_ghost=True)
+ rev_id = wt.commit("commit from ghost base with one merge")
+ # the revision should have been committed with two parents
+ rev = wt.branch.repository.get_revision(rev_id)
+ self.assertEqual(['foo@azkhazan-123123-abcabc',
+ 'wibble@fofof--20050401--1928390812'],
+ rev.parent_ids)
+
+ def test_bad_revision(self):
+ self.assertRaises(errors.InvalidRevisionId,
+ self.get_branch().repository.get_revision,
+ None)
+
+ def test_nicks_bzr(self):
+ """Test the behaviour of branch nicks specific to bzr branches.
+
+ Nicknames are implicitly the name of the branch's directory, unless an
+ explicit nickname is set. That is, an explicit nickname always
+ overrides the implicit one.
+
+ """
+ t = self.get_transport()
+ branch = self.make_branch('bzr.dev')
+ if not isinstance(branch, _mod_branch.BzrBranch):
+ raise tests.TestNotApplicable("not a bzr branch format")
+ # The nick will be 'bzr.dev', because there is no explicit nick set.
+ self.assertEqual(branch.nick, 'bzr.dev')
+ # Move the branch to a different directory, 'bzr.ab'. Now that branch
+ # will report its nick as 'bzr.ab'.
+ t.move('bzr.dev', 'bzr.ab')
+ branch = _mod_branch.Branch.open(self.get_url('bzr.ab'))
+ self.assertEqual(branch.nick, 'bzr.ab')
+ # Set the branch nick explicitly. This will ensure there's a branch
+ # config file in the branch.
+ branch.nick = "Aaron's branch"
+ if not isinstance(branch, remote.RemoteBranch):
+ self.assertTrue(branch._transport.has("branch.conf"))
+ # Because the nick has been set explicitly, the nick is now always
+ # "Aaron's branch", regardless of directory name.
+ self.assertEqual(branch.nick, "Aaron's branch")
+ t.move('bzr.ab', 'integration')
+ branch = _mod_branch.Branch.open(self.get_url('integration'))
+ self.assertEqual(branch.nick, "Aaron's branch")
+ branch.nick = u"\u1234"
+ self.assertEqual(branch.nick, u"\u1234")
+
+ def test_nicks(self):
+ """Test explicit and implicit branch nicknames.
+
+ A nickname is always available, whether set explicitly or not.
+ """
+ t = self.get_transport()
+ branch = self.make_branch('bzr.dev')
+ # An implicit nick name is set; what it is exactly depends on the
+ # format.
+ self.assertIsInstance(branch.nick, basestring)
+ # Set the branch nick explicitly.
+ branch.nick = "Aaron's branch"
+ # Because the nick has been set explicitly, the nick is now always
+ # "Aaron's branch".
+ self.assertEqual(branch.nick, "Aaron's branch")
+ branch.nick = u"\u1234"
+ self.assertEqual(branch.nick, u"\u1234")
+
+ def test_commit_nicks(self):
+ """Nicknames are committed to the revision"""
+ wt = self.make_branch_and_tree('bzr.dev')
+ branch = wt.branch
+ branch.nick = "My happy branch"
+ wt.commit('My commit respect da nick.')
+ committed = branch.repository.get_revision(branch.last_revision())
+ self.assertEqual(committed.properties["branch-nick"],
+ "My happy branch")
+
+ def test_create_colocated(self):
+ try:
+ repo = self.make_repository('.', shared=True)
+ except errors.IncompatibleFormat:
+ return
+ if repo.bzrdir._format.colocated_branches:
+ raise tests.TestNotApplicable(
+ "control dir does not support colocated branches")
+ self.assertEquals(0, len(repo.bzrdir.list_branches()))
+ if not self.bzrdir_format.colocated_branches:
+ raise tests.TestNotApplicable("control dir format does not support "
+ "colocated branches")
+ try:
+ child_branch1 = self.branch_format.initialize(repo.bzrdir,
+ name='branch1')
+ except errors.UninitializableFormat:
+ # branch references are not default init'able and
+ # not all bzrdirs support colocated branches.
+ return
+ self.assertEquals(1, len(repo.bzrdir.list_branches()))
+ self.branch_format.initialize(repo.bzrdir, name='branch2')
+ self.assertEquals(2, len(repo.bzrdir.list_branches()))
+
+ def test_create_append_revisions_only(self):
+ try:
+ repo = self.make_repository('.', shared=True)
+ except errors.IncompatibleFormat:
+ return
+ for val in (True, False):
+ try:
+ branch = self.branch_format.initialize(repo.bzrdir,
+ append_revisions_only=True)
+ except (errors.UninitializableFormat, errors.UpgradeRequired):
+ # branch references are not default init'able and
+ # not all branches support append_revisions_only
+ return
+ self.assertEquals(True, branch.get_append_revisions_only())
+ repo.bzrdir.destroy_branch()
+
+ def test_get_set_append_revisions_only(self):
+ branch = self.make_branch('.')
+ if branch._format.supports_set_append_revisions_only():
+ branch.set_append_revisions_only(True)
+ self.assertTrue(branch.get_append_revisions_only())
+ branch.set_append_revisions_only(False)
+ self.assertFalse(branch.get_append_revisions_only())
+ else:
+ self.assertRaises(errors.UpgradeRequired,
+ branch.set_append_revisions_only, True)
+ self.assertFalse(branch.get_append_revisions_only())
+
+ def test_create_open_branch_uses_repository(self):
+ try:
+ repo = self.make_repository('.', shared=True)
+ except errors.IncompatibleFormat:
+ raise tests.TestNotApplicable("requires shared repository support")
+ child_transport = repo.bzrdir.root_transport.clone('child')
+ child_transport.mkdir('.')
+ try:
+ child_dir = self.bzrdir_format.initialize_on_transport(child_transport)
+ except errors.UninitializableFormat:
+ raise tests.TestNotApplicable("control dir format not initializable")
+ try:
+ child_branch = self.branch_format.initialize(child_dir)
+ except errors.UninitializableFormat:
+ # branch references are not default init'able.
+ return
+ self.assertEqual(repo.bzrdir.root_transport.base,
+ child_branch.repository.bzrdir.root_transport.base)
+ child_branch = _mod_branch.Branch.open(self.get_url('child'))
+ self.assertEqual(repo.bzrdir.root_transport.base,
+ child_branch.repository.bzrdir.root_transport.base)
+
+ def test_format_description(self):
+ tree = self.make_branch_and_tree('tree')
+ text = tree.branch._format.get_format_description()
+ self.assertTrue(len(text))
+
+ def test_get_commit_builder(self):
+ branch = self.make_branch(".")
+ branch.lock_write()
+ builder = branch.get_commit_builder([])
+ self.assertIsInstance(builder, repository.CommitBuilder)
+ branch.repository.commit_write_group()
+ branch.unlock()
+
+ def test_generate_revision_history(self):
+ """Create a fake revision history easily."""
+ tree = self.make_branch_and_tree('.')
+ rev1 = tree.commit('foo')
+ tree.lock_write()
+ self.addCleanup(tree.unlock)
+ graph = tree.branch.repository.get_graph()
+ orig_history = list(
+ graph.iter_lefthand_ancestry(
+ tree.branch.last_revision(), [revision.NULL_REVISION]))
+ rev2 = tree.commit('bar', allow_pointless=True)
+ tree.branch.generate_revision_history(rev1)
+ self.assertEqual(orig_history, list(
+ graph.iter_lefthand_ancestry(
+ tree.branch.last_revision(), [revision.NULL_REVISION])))
+
+ def test_generate_revision_history_NULL_REVISION(self):
+ tree = self.make_branch_and_tree('.')
+ rev1 = tree.commit('foo')
+ tree.lock_write()
+ self.addCleanup(tree.unlock)
+ tree.branch.generate_revision_history(revision.NULL_REVISION)
+ self.assertEqual(revision.NULL_REVISION, tree.branch.last_revision())
+
+ def test_create_checkout(self):
+ tree_a = self.make_branch_and_tree('a')
+ branch_a = tree_a.branch
+ checkout_b = branch_a.create_checkout('b')
+ self.assertEqual('null:', checkout_b.last_revision())
+ checkout_b.commit('rev1', rev_id='rev1')
+ self.assertEqual('rev1', branch_a.last_revision())
+ self.assertNotEqual(checkout_b.branch.base, branch_a.base)
+
+ checkout_c = branch_a.create_checkout('c', lightweight=True)
+ self.assertEqual('rev1', checkout_c.last_revision())
+ checkout_c.commit('rev2', rev_id='rev2')
+ self.assertEqual('rev2', branch_a.last_revision())
+ self.assertEqual(checkout_c.branch.base, branch_a.base)
+
+ checkout_d = branch_a.create_checkout('d', lightweight=True)
+ self.assertEqual('rev2', checkout_d.last_revision())
+ checkout_e = branch_a.create_checkout('e')
+ self.assertEqual('rev2', checkout_e.last_revision())
+
+ def test_create_anonymous_lightweight_checkout(self):
+ """A lightweight checkout from a readonly branch should succeed."""
+ tree_a = self.make_branch_and_tree('a')
+ rev_id = tree_a.commit('put some content in the branch')
+ # open the branch via a readonly transport
+ url = self.get_readonly_url(urlutils.basename(tree_a.branch.base))
+ t = transport.get_transport_from_url(url)
+ if not tree_a.branch.bzrdir._format.supports_transport(t):
+ raise tests.TestNotApplicable("format does not support transport")
+ source_branch = _mod_branch.Branch.open(url)
+ # sanity check that the test will be valid
+ self.assertRaises((errors.LockError, errors.TransportNotPossible),
+ source_branch.lock_write)
+ checkout = source_branch.create_checkout('c', lightweight=True)
+ self.assertEqual(rev_id, checkout.last_revision())
+
+ def test_create_anonymous_heavyweight_checkout(self):
+ """A regular checkout from a readonly branch should succeed."""
+ tree_a = self.make_branch_and_tree('a')
+ rev_id = tree_a.commit('put some content in the branch')
+ # open the branch via a readonly transport
+ url = self.get_readonly_url(
+ osutils.basename(tree_a.branch.base.rstrip('/')))
+ t = transport.get_transport_from_url(url)
+ if not tree_a.branch.bzrdir._format.supports_transport(t):
+ raise tests.TestNotApplicable("format does not support transport")
+ source_branch = _mod_branch.Branch.open(url)
+ # sanity check that the test will be valid
+ self.assertRaises((errors.LockError, errors.TransportNotPossible),
+ source_branch.lock_write)
+ checkout = source_branch.create_checkout('c')
+ self.assertEqual(rev_id, checkout.last_revision())
+
+ def test_heads_to_fetch(self):
+ # heads_to_fetch is a method that returns a collection of revids that
+ # need to be fetched to copy this branch into another repo. At a
+ # minimum this will include the tip.
+ # (In native formats, this is the tip + tags, but other formats may
+ # have other revs needed)
+ tree = self.make_branch_and_tree('a')
+ tree.commit('first commit', rev_id='rev1')
+ tree.commit('second commit', rev_id='rev2')
+ must_fetch, should_fetch = tree.branch.heads_to_fetch()
+ self.assertTrue('rev2' in must_fetch)
+
+ def test_heads_to_fetch_not_null_revision(self):
+ # NULL_REVISION does not appear in the result of heads_to_fetch, even
+ # for an empty branch.
+ tree = self.make_branch_and_tree('a')
+ must_fetch, should_fetch = tree.branch.heads_to_fetch()
+ self.assertFalse(revision.NULL_REVISION in must_fetch)
+ self.assertFalse(revision.NULL_REVISION in should_fetch)
+
+
+class TestBranchFormat(per_branch.TestCaseWithBranch):
+
+ def test_branch_format_network_name(self):
+ br = self.make_branch('.')
+ format = br._format
+ network_name = format.network_name()
+ self.assertIsInstance(network_name, str)
+ # We want to test that the network_name matches the actual format on
+ # disk. For local branches that means that using network_name as a key
+ # in the registry gives back the same format. For remote branches we
+ # check that the network_name of the RemoteBranchFormat we have locally
+ # matches the actual format present on disk.
+ if isinstance(format, remote.RemoteBranchFormat):
+ br._ensure_real()
+ real_branch = br._real_branch
+ self.assertEqual(real_branch._format.network_name(), network_name)
+ else:
+ registry = _mod_branch.network_format_registry
+ looked_up_format = registry.get(network_name)
+ self.assertEqual(format.__class__, looked_up_format.__class__)
+
+ def test_get_config_calls(self):
+ # Smoke test that all branch succeed getting a config
+ br = self.make_branch('.')
+ br.get_config()
+ br.get_config_stack()
+
+
+class ChrootedTests(per_branch.TestCaseWithBranch):
+ """A support class that provides readonly urls outside the local namespace.
+
+ This is done by checking if self.transport_server is a MemoryServer. if it
+ is then we are chrooted already, if it is not then an HttpServer is used
+ for readonly urls.
+ """
+
+ def setUp(self):
+ super(ChrootedTests, self).setUp()
+ if not self.vfs_transport_factory == memory.MemoryServer:
+ self.transport_readonly_server = HttpServer
+
+ def test_open_containing(self):
+ self.assertRaises(errors.NotBranchError,
+ _mod_branch.Branch.open_containing,
+ self.get_readonly_url(''))
+ self.assertRaises(errors.NotBranchError,
+ _mod_branch.Branch.open_containing,
+ self.get_readonly_url('g/p/q'))
+ branch = self.make_branch('.')
+ if not branch.bzrdir._format.supports_transport(
+ transport.get_transport_from_url(self.get_readonly_url('.'))):
+ raise tests.TestNotApplicable("format does not support transport")
+ branch, relpath = _mod_branch.Branch.open_containing(
+ self.get_readonly_url(''))
+ self.assertEqual('', relpath)
+ branch, relpath = _mod_branch.Branch.open_containing(
+ self.get_readonly_url('g/p/q'))
+ self.assertEqual('g/p/q', relpath)
+
+
+class InstrumentedTransaction(object):
+
+ def finish(self):
+ self.calls.append('finish')
+
+ def __init__(self):
+ self.calls = []
+
+
+class TestDecorator(object):
+
+ def __init__(self):
+ self._calls = []
+
+ def lock_read(self):
+ self._calls.append('lr')
+
+ def lock_write(self):
+ self._calls.append('lw')
+
+ def unlock(self):
+ self._calls.append('ul')
+
+ @_mod_branch.needs_read_lock
+ def do_with_read(self):
+ return 1
+
+ @_mod_branch.needs_read_lock
+ def except_with_read(self):
+ raise RuntimeError
+
+ @_mod_branch.needs_write_lock
+ def do_with_write(self):
+ return 2
+
+ @_mod_branch.needs_write_lock
+ def except_with_write(self):
+ raise RuntimeError
+
+
+class TestDecorators(tests.TestCase):
+
+ def test_needs_read_lock(self):
+ branch = TestDecorator()
+ self.assertEqual(1, branch.do_with_read())
+ self.assertEqual(['lr', 'ul'], branch._calls)
+
+ def test_excepts_in_read_lock(self):
+ branch = TestDecorator()
+ self.assertRaises(RuntimeError, branch.except_with_read)
+ self.assertEqual(['lr', 'ul'], branch._calls)
+
+ def test_needs_write_lock(self):
+ branch = TestDecorator()
+ self.assertEqual(2, branch.do_with_write())
+ self.assertEqual(['lw', 'ul'], branch._calls)
+
+ def test_excepts_in_write_lock(self):
+ branch = TestDecorator()
+ self.assertRaises(RuntimeError, branch.except_with_write)
+ self.assertEqual(['lw', 'ul'], branch._calls)
+
+
+class TestBranchPushLocations(per_branch.TestCaseWithBranch):
+
+ def test_get_push_location_unset(self):
+ self.assertEqual(None, self.get_branch().get_push_location())
+
+ def test_get_push_location_exact(self):
+ b = self.get_branch()
+ config.LocationConfig.from_string(
+ '[%s]\npush_location=foo\n' % (b.base,), b.base, save=True)
+ self.assertEqual("foo", self.get_branch().get_push_location())
+
+ def test_set_push_location(self):
+ branch = self.get_branch()
+ branch.set_push_location('foo')
+ self.assertEqual('foo', branch.get_push_location())
+
+
+class TestChildSubmitFormats(per_branch.TestCaseWithBranch):
+
+ def test_get_child_submit_format_default(self):
+ submit_format = self.get_branch().get_child_submit_format()
+ self.assertTrue(submit_format is None or
+ isinstance(submit_format, str))
+
+ def test_get_child_submit_format(self):
+ branch = self.get_branch()
+ branch.get_config_stack().set('child_submit_format', '10')
+ branch = self.get_branch()
+ self.assertEqual('10', branch.get_child_submit_format())
+
+
+class TestFormat(per_branch.TestCaseWithBranch):
+ """Tests for the format itself."""
+
+ def test_get_reference(self):
+ """get_reference on all regular branches should return None."""
+ if not self.branch_format.is_supported():
+ # unsupported formats are not loopback testable
+ # because the default open will not open them and
+ # they may not be initializable.
+ return
+ made_branch = self.make_branch('.')
+ self.assertEqual(None,
+ made_branch._format.get_reference(made_branch.bzrdir))
+
+ def test_set_reference(self):
+ """set_reference on all regular branches should be callable."""
+ if not self.branch_format.is_supported():
+ # unsupported formats are not loopback testable
+ # because the default open will not open them and
+ # they may not be initializable.
+ return
+ this_branch = self.make_branch('this')
+ other_branch = self.make_branch('other')
+ try:
+ this_branch._format.set_reference(this_branch.bzrdir, None,
+ other_branch)
+ except NotImplementedError:
+ # that's ok
+ pass
+ else:
+ ref = this_branch._format.get_reference(this_branch.bzrdir)
+ self.assertEqual(ref, other_branch.base)
+
+ def test_format_initialize_find_open(self):
+ # loopback test to check the current format initializes to itself.
+ if not self.branch_format.is_supported():
+ # unsupported formats are not loopback testable
+ # because the default open will not open them and
+ # they may not be initializable.
+ return
+ # supported formats must be able to init and open
+ t = self.get_transport()
+ readonly_t = transport.get_transport_from_url(self.get_readonly_url())
+ made_branch = self.make_branch('.')
+ self.assertIsInstance(made_branch, _mod_branch.Branch)
+
+ # find it via bzrdir opening:
+ opened_control = controldir.ControlDir.open(readonly_t.base)
+ direct_opened_branch = opened_control.open_branch()
+ self.assertEqual(direct_opened_branch.__class__, made_branch.__class__)
+ self.assertEqual(opened_control, direct_opened_branch.bzrdir)
+ self.assertIsInstance(direct_opened_branch._format,
+ self.branch_format.__class__)
+
+ # find it via Branch.open
+ opened_branch = _mod_branch.Branch.open(readonly_t.base)
+ self.assertIsInstance(opened_branch, made_branch.__class__)
+ self.assertEqual(made_branch._format.__class__,
+ opened_branch._format.__class__)
+ # if it has a unique id string, can we probe for it ?
+ try:
+ self.branch_format.get_format_string()
+ except NotImplementedError:
+ return
+ self.assertEqual(self.branch_format,
+ opened_control.find_branch_format())
+
+
+class TestBound(per_branch.TestCaseWithBranch):
+
+ def test_bind_unbind(self):
+ branch = self.make_branch('1')
+ branch2 = self.make_branch('2')
+ try:
+ branch.bind(branch2)
+ except errors.UpgradeRequired:
+ raise tests.TestNotApplicable('Format does not support binding')
+ self.assertTrue(branch.unbind())
+ self.assertFalse(branch.unbind())
+ self.assertIs(None, branch.get_bound_location())
+
+ def test_old_bound_location(self):
+ branch = self.make_branch('branch1')
+ try:
+ self.assertIs(None, branch.get_old_bound_location())
+ except errors.UpgradeRequired:
+ raise tests.TestNotApplicable(
+ 'Format does not store old bound locations')
+ branch2 = self.make_branch('branch2')
+ branch.bind(branch2)
+ self.assertIs(None, branch.get_old_bound_location())
+ branch.unbind()
+ self.assertContainsRe(branch.get_old_bound_location(), '\/branch2\/$')
+
+ def test_bind_diverged(self):
+ tree_a = self.make_branch_and_tree('tree_a')
+ tree_a.commit('rev1a')
+ tree_b = tree_a.bzrdir.sprout('tree_b').open_workingtree()
+ tree_a.commit('rev2a')
+ tree_b.commit('rev2b')
+ try:
+ tree_b.branch.bind(tree_a.branch)
+ except errors.UpgradeRequired:
+ raise tests.TestNotApplicable('Format does not support binding')
+
+ def test_unbind_clears_cached_master_branch(self):
+ """b.unbind clears any cached value of b.get_master_branch."""
+ master = self.make_branch('master')
+ branch = self.make_branch('branch')
+ try:
+ branch.bind(master)
+ except errors.UpgradeRequired:
+ raise tests.TestNotApplicable('Format does not support binding')
+ self.addCleanup(branch.lock_write().unlock)
+ self.assertNotEqual(None, branch.get_master_branch())
+ branch.unbind()
+ self.assertEqual(None, branch.get_master_branch())
+
+ def test_bind_clears_cached_master_branch(self):
+ """b.bind clears any cached value of b.get_master_branch."""
+ master1 = self.make_branch('master1')
+ master2 = self.make_branch('master2')
+ branch = self.make_branch('branch')
+ try:
+ branch.bind(master1)
+ except errors.UpgradeRequired:
+ raise tests.TestNotApplicable('Format does not support binding')
+ self.addCleanup(branch.lock_write().unlock)
+ self.assertNotEqual(None, branch.get_master_branch())
+ branch.bind(master2)
+ self.assertEqual('.', urlutils.relative_url(self.get_url('master2'),
+ branch.get_master_branch().base))
+
+ def test_set_bound_location_clears_cached_master_branch(self):
+ """b.set_bound_location clears any cached value of b.get_master_branch.
+ """
+ master1 = self.make_branch('master1')
+ master2 = self.make_branch('master2')
+ branch = self.make_branch('branch')
+ try:
+ branch.bind(master1)
+ except errors.UpgradeRequired:
+ raise tests.TestNotApplicable('Format does not support binding')
+ self.addCleanup(branch.lock_write().unlock)
+ self.assertNotEqual(None, branch.get_master_branch())
+ branch.set_bound_location(self.get_url('master2'))
+ self.assertEqual('.', urlutils.relative_url(self.get_url('master2'),
+ branch.get_master_branch().base))
+
+
+class TestStrict(per_branch.TestCaseWithBranch):
+
+ def test_strict_history(self):
+ tree1 = self.make_branch_and_tree('tree1')
+ try:
+ tree1.branch.set_append_revisions_only(True)
+ except errors.UpgradeRequired:
+ raise tests.TestSkipped('Format does not support strict history')
+ tree1.commit('empty commit')
+ tree2 = tree1.bzrdir.sprout('tree2').open_workingtree()
+ tree2.commit('empty commit 2')
+ tree1.pull(tree2.branch)
+ tree1.commit('empty commit 3')
+ tree2.commit('empty commit 4')
+ self.assertRaises(errors.DivergedBranches, tree1.pull, tree2.branch)
+ tree2.merge_from_branch(tree1.branch)
+ tree2.commit('empty commit 5')
+ self.assertRaises(errors.AppendRevisionsOnlyViolation, tree1.pull,
+ tree2.branch)
+ tree3 = tree1.bzrdir.sprout('tree3').open_workingtree()
+ tree3.merge_from_branch(tree2.branch)
+ tree3.commit('empty commit 6')
+ tree2.pull(tree3.branch)
+
+
+class TestIgnoreFallbacksParameter(per_branch.TestCaseWithBranch):
+
+ def make_branch_with_fallback(self):
+ fallback = self.make_branch('fallback')
+ if not fallback._format.supports_stacking():
+ raise tests.TestNotApplicable("format does not support stacking")
+ stacked = self.make_branch('stacked')
+ stacked.set_stacked_on_url(fallback.base)
+ return stacked
+
+ def test_fallbacks_not_opened(self):
+ stacked = self.make_branch_with_fallback()
+ self.get_transport('').rename('fallback', 'moved')
+ reopened_dir = controldir.ControlDir.open(stacked.base)
+ reopened = reopened_dir.open_branch(ignore_fallbacks=True)
+ self.assertEqual([], reopened.repository._fallback_repositories)
+
+ def test_fallbacks_are_opened(self):
+ stacked = self.make_branch_with_fallback()
+ reopened_dir = controldir.ControlDir.open(stacked.base)
+ reopened = reopened_dir.open_branch(ignore_fallbacks=False)
+ self.assertLength(1, reopened.repository._fallback_repositories)
+
+
+class TestReferenceLocation(per_branch.TestCaseWithBranch):
+
+ def test_reference_parent(self):
+ tree = self.make_branch_and_tree('tree')
+ subtree = self.make_branch_and_tree('tree/subtree')
+ subtree.set_root_id('subtree-id')
+ try:
+ tree.add_reference(subtree)
+ except errors.UnsupportedOperation:
+ raise tests.TestNotApplicable('Tree cannot hold references.')
+ reference_parent = tree.branch.reference_parent(
+ 'subtree-id',
+ urlutils.relative_url(tree.branch.user_url, subtree.branch.user_url))
+ self.assertEqual(subtree.branch.base, reference_parent.base)
+
+ def test_reference_parent_accepts_possible_transports(self):
+ tree = self.make_branch_and_tree('tree')
+ subtree = self.make_branch_and_tree('tree/subtree')
+ subtree.set_root_id('subtree-id')
+ try:
+ tree.add_reference(subtree)
+ except errors.UnsupportedOperation:
+ raise tests.TestNotApplicable('Tree cannot hold references.')
+ reference_parent = tree.branch.reference_parent('subtree-id',
+ urlutils.relative_url(
+ tree.branch.user_url, subtree.branch.user_url),
+ possible_transports=[subtree.bzrdir.root_transport])
+
+ def test_get_reference_info(self):
+ branch = self.make_branch('branch')
+ try:
+ path, loc = branch.get_reference_info('file-id')
+ except errors.UnsupportedOperation:
+ raise tests.TestNotApplicable('Branch cannot hold references.')
+ self.assertIs(None, path)
+ self.assertIs(None, loc)
+
+ def test_set_reference_info(self):
+ branch = self.make_branch('branch')
+ try:
+ branch.set_reference_info('file-id', 'path/to/location',
+ 'path/to/file')
+ except errors.UnsupportedOperation:
+ raise tests.TestNotApplicable('Branch cannot hold references.')
+
+ def test_set_get_reference_info(self):
+ branch = self.make_branch('branch')
+ try:
+ branch.set_reference_info('file-id', 'path/to/file',
+ 'path/to/location')
+ except errors.UnsupportedOperation:
+ raise tests.TestNotApplicable('Branch cannot hold references.')
+ # Create a new instance to ensure storage is permanent
+ branch = _mod_branch.Branch.open('branch')
+ tree_path, branch_location = branch.get_reference_info('file-id')
+ self.assertEqual('path/to/location', branch_location)
+
+ def test_set_null_reference_info(self):
+ branch = self.make_branch('branch')
+ try:
+ branch.set_reference_info('file-id', 'path/to/file',
+ 'path/to/location')
+ except errors.UnsupportedOperation:
+ raise tests.TestNotApplicable('Branch cannot hold references.')
+ branch.set_reference_info('file-id', None, None)
+ tree_path, branch_location = branch.get_reference_info('file-id')
+ self.assertIs(None, tree_path)
+ self.assertIs(None, branch_location)
+
+ def test_set_null_reference_info_when_null(self):
+ branch = self.make_branch('branch')
+ try:
+ tree_path, branch_location = branch.get_reference_info('file-id')
+ except errors.UnsupportedOperation:
+ raise tests.TestNotApplicable('Branch cannot hold references.')
+ self.assertIs(None, tree_path)
+ self.assertIs(None, branch_location)
+ branch.set_reference_info('file-id', None, None)
+
+ def test_set_null_requires_two_nones(self):
+ branch = self.make_branch('branch')
+ try:
+ e = self.assertRaises(ValueError, branch.set_reference_info,
+ 'file-id', 'path', None)
+ except errors.UnsupportedOperation:
+ raise tests.TestNotApplicable('Branch cannot hold references.')
+ self.assertEqual('tree_path must be None when branch_location is'
+ ' None.', str(e))
+ e = self.assertRaises(ValueError, branch.set_reference_info,
+ 'file-id', None, 'location')
+ self.assertEqual('branch_location must be None when tree_path is'
+ ' None.', str(e))
+
+ def make_branch_with_reference(self, location, reference_location,
+ file_id='file-id'):
+ branch = self.make_branch(location)
+ try:
+ branch.set_reference_info(file_id, 'path/to/file',
+ reference_location)
+ except errors.UnsupportedOperation:
+ raise tests.TestNotApplicable('Branch cannot hold references.')
+ return branch
+
+ def test_reference_parent_from_reference_info_(self):
+ referenced_branch = self.make_branch('reference_branch')
+ branch = self.make_branch_with_reference('branch',
+ referenced_branch.base)
+ parent = branch.reference_parent('file-id', 'path/to/file')
+ self.assertEqual(parent.base, referenced_branch.base)
+
+ def test_branch_relative_reference_location(self):
+ branch = self.make_branch('branch')
+ try:
+ branch.set_reference_info('file-id', 'path/to/file',
+ '../reference_branch')
+ except errors.UnsupportedOperation:
+ raise tests.TestNotApplicable('Branch cannot hold references.')
+ referenced_branch = self.make_branch('reference_branch')
+ parent = branch.reference_parent('file-id', 'path/to/file')
+ self.assertEqual(parent.base, referenced_branch.base)
+
+ def test_sprout_copies_reference_location(self):
+ branch = self.make_branch_with_reference('branch', '../reference')
+ new_branch = branch.bzrdir.sprout('new-branch').open_branch()
+ self.assertEqual('../reference',
+ new_branch.get_reference_info('file-id')[1])
+
+ def test_clone_copies_reference_location(self):
+ branch = self.make_branch_with_reference('branch', '../reference')
+ new_branch = branch.bzrdir.clone('new-branch').open_branch()
+ self.assertEqual('../reference',
+ new_branch.get_reference_info('file-id')[1])
+
+ def test_copied_locations_are_rebased(self):
+ branch = self.make_branch_with_reference('branch', 'reference')
+ new_branch = branch.bzrdir.sprout('branch/new-branch').open_branch()
+ self.assertEqual('../reference',
+ new_branch.get_reference_info('file-id')[1])
+
+ def test_update_references_retains_old_references(self):
+ branch = self.make_branch_with_reference('branch', 'reference')
+ new_branch = self.make_branch_with_reference(
+ 'new_branch', 'reference', 'file-id2')
+ new_branch.update_references(branch)
+ self.assertEqual('reference',
+ branch.get_reference_info('file-id')[1])
+
+ def test_update_references_retains_known_references(self):
+ branch = self.make_branch_with_reference('branch', 'reference')
+ new_branch = self.make_branch_with_reference(
+ 'new_branch', 'reference2')
+ new_branch.update_references(branch)
+ self.assertEqual('reference',
+ branch.get_reference_info('file-id')[1])
+
+ def test_update_references_skips_known_references(self):
+ branch = self.make_branch_with_reference('branch', 'reference')
+ new_branch = branch.bzrdir.sprout('branch/new-branch').open_branch()
+ new_branch.set_reference_info('file-id', '../foo', '../foo')
+ new_branch.update_references(branch)
+ self.assertEqual('reference',
+ branch.get_reference_info('file-id')[1])
+
+ def test_pull_updates_references(self):
+ branch = self.make_branch_with_reference('branch', 'reference')
+ new_branch = branch.bzrdir.sprout('branch/new-branch').open_branch()
+ new_branch.set_reference_info('file-id2', '../foo', '../foo')
+ branch.pull(new_branch)
+ self.assertEqual('foo',
+ branch.get_reference_info('file-id2')[1])
+
+ def test_push_updates_references(self):
+ branch = self.make_branch_with_reference('branch', 'reference')
+ new_branch = branch.bzrdir.sprout('branch/new-branch').open_branch()
+ new_branch.set_reference_info('file-id2', '../foo', '../foo')
+ new_branch.push(branch)
+ self.assertEqual('foo',
+ branch.get_reference_info('file-id2')[1])
+
+ def test_merge_updates_references(self):
+ branch = self.make_branch_with_reference('branch', 'reference')
+ tree = self.make_branch_and_tree('tree')
+ tree.commit('foo')
+ branch.pull(tree.branch)
+ checkout = branch.create_checkout('checkout', lightweight=True)
+ checkout.commit('bar')
+ tree.lock_write()
+ self.addCleanup(tree.unlock)
+ merger = merge.Merger.from_revision_ids(None, tree,
+ branch.last_revision(),
+ other_branch=branch)
+ merger.merge_type = merge.Merge3Merger
+ merger.do_merge()
+ self.assertEqual('../branch/reference',
+ tree.branch.get_reference_info('file-id')[1])
+
+
+class TestBranchControlComponent(per_branch.TestCaseWithBranch):
+ """Branch implementations adequately implement ControlComponent."""
+
+ def test_urls(self):
+ br = self.make_branch('branch')
+ self.assertIsInstance(br.user_url, str)
+ self.assertEqual(br.user_url, br.user_transport.base)
+ # for all current bzrdir implementations the user dir must be
+ # above the control dir but we might need to relax that?
+ self.assertEqual(br.control_url.find(br.user_url), 0)
+ self.assertEqual(br.control_url, br.control_transport.base)
diff --git a/bzrlib/tests/per_branch/test_break_lock.py b/bzrlib/tests/per_branch/test_break_lock.py
new file mode 100644
index 0000000..3f20a7d
--- /dev/null
+++ b/bzrlib/tests/per_branch/test_break_lock.py
@@ -0,0 +1,99 @@
+# Copyright (C) 2006-2012 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""Tests for branch break-lock behaviour."""
+
+from bzrlib import (
+ branch as _mod_branch,
+ errors,
+ ui,
+ tests,
+ )
+from bzrlib.tests import per_branch
+
+
+class TestBreakLock(per_branch.TestCaseWithBranch):
+
+ def setUp(self):
+ super(TestBreakLock, self).setUp()
+ self.unused_branch = self.make_branch('branch')
+ self.branch = _mod_branch.Branch.open(self.unused_branch.base)
+
+ def test_unlocked(self):
+ # break lock when nothing is locked should just return
+ try:
+ self.branch.break_lock()
+ except NotImplementedError:
+ pass
+
+ def test_unlocked_repo_locked(self):
+ # break lock on the branch should try on the repository even
+ # if the branch isn't locked
+ token = self.branch.repository.lock_write().repository_token
+ if token is None:
+ self.branch.repository.unlock()
+ raise tests.TestNotApplicable(
+ 'Repository does not use physical locks.')
+ self.branch.repository.leave_lock_in_place()
+ self.branch.repository.unlock()
+ other_instance = self.branch.repository.bzrdir.open_repository()
+ if not other_instance.get_physical_lock_status():
+ raise tests.TestNotApplicable(
+ 'Repository does not lock persistently.')
+ ui.ui_factory = ui.CannedInputUIFactory([True])
+ try:
+ self.unused_branch.break_lock()
+ except NotImplementedError:
+ # branch does not support break_lock
+ self.branch.repository.unlock()
+ return
+ self.assertRaises(errors.LockBroken, self.branch.repository.unlock)
+
+ def test_locked(self):
+ # break_lock when locked should unlock the branch and repo
+ self.branch.lock_write()
+ ui.ui_factory = ui.CannedInputUIFactory([True, True])
+ try:
+ self.unused_branch.break_lock()
+ except NotImplementedError:
+ # branch does not support break_lock
+ self.branch.unlock()
+ return
+ self.assertRaises(errors.LockBroken, self.branch.unlock)
+
+ def test_unlocks_master_branch(self):
+ # break_lock when the master branch is locked should offer to
+ # unlock it.
+ master = self.make_branch('master')
+ try:
+ self.branch.bind(master)
+ except errors.UpgradeRequired:
+ # this branch does not support binding.
+ return
+ master.lock_write()
+ ui.ui_factory = ui.CannedInputUIFactory([True, True])
+ try:
+ fresh = _mod_branch.Branch.open(self.unused_branch.base)
+ fresh.break_lock()
+ except NotImplementedError:
+ # branch does not support break_lock
+ master.unlock()
+ return
+ self.assertRaises(errors.LockBroken, master.unlock)
+ # can we lock it now ?
+ master.lock_write()
+ master.unlock()
+
diff --git a/bzrlib/tests/per_branch/test_check.py b/bzrlib/tests/per_branch/test_check.py
new file mode 100644
index 0000000..02112fe
--- /dev/null
+++ b/bzrlib/tests/per_branch/test_check.py
@@ -0,0 +1,105 @@
+# Copyright (C) 2008 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""Tests for branch implementations - test check() functionality"""
+
+from StringIO import StringIO
+
+from bzrlib import errors, tests, ui
+from bzrlib.symbol_versioning import deprecated_in
+from bzrlib.tests.per_branch import TestCaseWithBranch
+
+
+class TestBranchCheck(TestCaseWithBranch):
+
+ def test_check_detects_invalid_revhistory(self):
+ # Different formats have different ways of handling invalid revision
+ # histories, so the setup portion is customized
+ tree = self.make_branch_and_tree('test')
+ r1 = tree.commit('one')
+ r2 = tree.commit('two')
+ r3 = tree.commit('three')
+ r4 = tree.commit('four')
+ # create an alternate branch
+ tree.set_parent_ids([r1])
+ tree.branch.set_last_revision_info(1, r1)
+ r2b = tree.commit('two-b')
+
+ # now go back and merge the commit
+ tree.set_parent_ids([r4, r2b])
+ tree.branch.set_last_revision_info(4, r4)
+
+ r5 = tree.commit('five')
+ # Now, try to set an invalid history
+ if getattr(tree.branch, "_set_revision_history", None) is not None:
+ tree.branch._set_revision_history([r1, r2b, r5])
+ else:
+ tree.branch.set_last_revision_info(3, r5)
+
+ tree.lock_read()
+ self.addCleanup(tree.unlock)
+ refs = self.make_refs(tree.branch)
+ result = tree.branch.check(refs)
+ ui.ui_factory = tests.TestUIFactory(stdout=StringIO())
+ result.report_results(True)
+ self.assertContainsRe('revno does not match len',
+ ui.ui_factory.stdout.getvalue())
+
+ def test_check_branch_report_results(self):
+ """Checking a branch produces results which can be printed"""
+ branch = self.make_branch('.')
+ branch.lock_read()
+ self.addCleanup(branch.unlock)
+ result = branch.check(self.make_refs(branch))
+ # reports results through logging
+ result.report_results(verbose=True)
+ result.report_results(verbose=False)
+
+ def test__get_check_refs(self):
+ tree = self.make_branch_and_tree('.')
+ revid = tree.commit('foo')
+ self.assertEqual(
+ set([('revision-existence', revid), ('lefthand-distance', revid)]),
+ set(tree.branch._get_check_refs()))
+
+ def make_refs(self, branch):
+ needed_refs = branch._get_check_refs()
+ refs = {}
+ distances = set()
+ existences = set()
+ for ref in needed_refs:
+ kind, value = ref
+ if kind == 'lefthand-distance':
+ distances.add(value)
+ elif kind == 'revision-existence':
+ existences.add(value)
+ else:
+ raise AssertionError(
+ 'unknown ref kind for ref %s' % ref)
+ node_distances = branch.repository.get_graph().find_lefthand_distances(
+ distances)
+ for key, distance in node_distances.iteritems():
+ refs[('lefthand-distance', key)] = distance
+ if key in existences and distance > 0:
+ refs[('revision-existence', key)] = True
+ existences.remove(key)
+ parent_map = branch.repository.get_graph().get_parent_map(existences)
+ for key in parent_map:
+ refs[('revision-existence', key)] = True
+ existences.remove(key)
+ for key in existences:
+ refs[('revision-existence', key)] = False
+ return refs
diff --git a/bzrlib/tests/per_branch/test_commit.py b/bzrlib/tests/per_branch/test_commit.py
new file mode 100644
index 0000000..de34dca
--- /dev/null
+++ b/bzrlib/tests/per_branch/test_commit.py
@@ -0,0 +1,222 @@
+# Copyright (C) 2007-2010 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""Tests for the contract of commit on branches."""
+
+from bzrlib import (
+ branch,
+ delta,
+ errors,
+ revision,
+ transport,
+ )
+from bzrlib.tests import per_branch
+
+
+class TestCommit(per_branch.TestCaseWithBranch):
+
+ def test_commit_nicks(self):
+ """Nicknames are committed to the revision"""
+ self.get_transport().mkdir('bzr.dev')
+ wt = self.make_branch_and_tree('bzr.dev')
+ branch = wt.branch
+ branch.nick = "My happy branch"
+ wt.commit('My commit respect da nick.')
+ committed = branch.repository.get_revision(branch.last_revision())
+ self.assertEqual(committed.properties["branch-nick"],
+ "My happy branch")
+
+
+class TestCommitHook(per_branch.TestCaseWithBranch):
+
+ def setUp(self):
+ self.hook_calls = []
+ super(TestCommitHook, self).setUp()
+
+ def capture_post_commit_hook(self, local, master, old_revno,
+ old_revid, new_revno, new_revid):
+ """Capture post commit hook calls to self.hook_calls.
+
+ The call is logged, as is some state of the two branches.
+ """
+ if local:
+ local_locked = local.is_locked()
+ local_base = local.base
+ else:
+ local_locked = None
+ local_base = None
+ self.hook_calls.append(
+ ('post_commit', local_base, master.base, old_revno, old_revid,
+ new_revno, new_revid, local_locked, master.is_locked()))
+
+ def capture_pre_commit_hook(self, local, master, old_revno, old_revid,
+ new_revno, new_revid,
+ tree_delta, future_tree):
+ self.hook_calls.append(('pre_commit', old_revno, old_revid,
+ new_revno, new_revid, tree_delta))
+
+ def test_post_commit_to_origin(self):
+ tree = self.make_branch_and_memory_tree('branch')
+ branch.Branch.hooks.install_named_hook(
+ 'post_commit', self.capture_post_commit_hook, None)
+ tree.lock_write()
+ tree.add('')
+ revid = tree.commit('a revision')
+ # should have had one notification, from origin, and
+ # have the branch locked at notification time.
+ self.assertEqual([
+ ('post_commit', None, tree.branch.base, 0, revision.NULL_REVISION,
+ 1, revid, None, True)
+ ],
+ self.hook_calls)
+ tree.unlock()
+
+ def test_post_commit_bound(self):
+ master = self.make_branch('master')
+ tree = self.make_branch_and_memory_tree('local')
+ try:
+ tree.branch.bind(master)
+ except errors.UpgradeRequired:
+ # cant bind this format, the test is irrelevant.
+ return
+ branch.Branch.hooks.install_named_hook(
+ 'post_commit', self.capture_post_commit_hook, None)
+ tree.lock_write()
+ tree.add('')
+ revid = tree.commit('a revision')
+ # with a bound branch, local is set.
+ self.assertEqual([
+ ('post_commit', tree.branch.base, master.base, 0,
+ revision.NULL_REVISION, 1, revid, True, True)
+ ],
+ self.hook_calls)
+ tree.unlock()
+
+ def test_post_commit_not_to_origin(self):
+ tree = self.make_branch_and_memory_tree('branch')
+ tree.lock_write()
+ tree.add('')
+ revid = tree.commit('first revision')
+ branch.Branch.hooks.install_named_hook(
+ 'post_commit', self.capture_post_commit_hook, None)
+ revid2 = tree.commit('second revision')
+ # having committed from up the branch, we should get the
+ # before and after revnos and revids correctly.
+ self.assertEqual([
+ ('post_commit', None, tree.branch.base, 1, revid, 2, revid2,
+ None, True)
+ ],
+ self.hook_calls)
+ tree.unlock()
+
+ def test_pre_commit_passes(self):
+ empty_delta = delta.TreeDelta()
+ root_delta = delta.TreeDelta()
+ tree = self.make_branch_and_memory_tree('branch')
+ tree.lock_write()
+ tree.add('')
+ root_delta.added = [('', tree.path2id(''), 'directory')]
+ branch.Branch.hooks.install_named_hook(
+ "pre_commit", self.capture_pre_commit_hook, None)
+ revid1 = tree.commit('first revision')
+ revid2 = tree.commit('second revision')
+ self.assertEqual([
+ ('pre_commit', 0, revision.NULL_REVISION, 1, revid1, root_delta),
+ ('pre_commit', 1, revid1, 2, revid2, empty_delta)
+ ],
+ self.hook_calls)
+ tree.unlock()
+
+ def test_pre_commit_fails(self):
+ empty_delta = delta.TreeDelta()
+ root_delta = delta.TreeDelta()
+ tree = self.make_branch_and_memory_tree('branch')
+ tree.lock_write()
+ tree.add('')
+ root_delta.added = [('', tree.path2id(''), 'directory')]
+ class PreCommitException(Exception): pass
+ def hook_func(local, master,
+ old_revno, old_revid, new_revno, new_revid,
+ tree_delta, future_tree):
+ raise PreCommitException(new_revid)
+ branch.Branch.hooks.install_named_hook(
+ "pre_commit", self.capture_pre_commit_hook, None)
+ branch.Branch.hooks.install_named_hook("pre_commit", hook_func, None)
+ revids = [None, None, None]
+ # this commit will raise an exception
+ # so the commit is rolled back and revno unchanged
+ err = self.assertRaises(PreCommitException, tree.commit, 'message')
+ # we have to record the revid to use in assertEqual later
+ revids[0] = str(err)
+ # unregister all pre_commit hooks
+ branch.Branch.hooks["pre_commit"] = []
+ # and re-register the capture hook
+ branch.Branch.hooks.install_named_hook(
+ "pre_commit", self.capture_pre_commit_hook, None)
+ # now these commits should go through
+ for i in range(1, 3):
+ revids[i] = tree.commit('message')
+ self.assertEqual([
+ ('pre_commit', 0, revision.NULL_REVISION, 1, revids[0], root_delta),
+ ('pre_commit', 0, revision.NULL_REVISION, 1, revids[1], root_delta),
+ ('pre_commit', 1, revids[1], 2, revids[2], empty_delta)
+ ],
+ self.hook_calls)
+ tree.unlock()
+
+ def test_pre_commit_delta(self):
+ # This tests the TreeDelta object passed to pre_commit hook.
+ # This does not try to validate data correctness in the delta.
+ self.build_tree(['rootfile', 'dir/', 'dir/subfile'])
+ tree = self.make_branch_and_tree('.')
+ tree.lock_write()
+ try:
+ # setting up a playground
+ tree.set_root_id('root_id')
+ tree.add('rootfile', 'rootfile_id')
+ tree.put_file_bytes_non_atomic('rootfile_id', 'abc')
+ tree.add('dir', 'dir_id')
+ tree.add('dir/subfile', 'dir_subfile_id')
+ tree.mkdir('to_be_unversioned', 'to_be_unversioned_id')
+ tree.put_file_bytes_non_atomic('dir_subfile_id', 'def')
+ revid1 = tree.commit('first revision')
+ finally:
+ tree.unlock()
+
+ tree.lock_write()
+ try:
+ # making changes
+ tree.put_file_bytes_non_atomic('rootfile_id', 'jkl')
+ tree.rename_one('dir/subfile', 'dir/subfile_renamed')
+ tree.unversion(['to_be_unversioned_id'])
+ tree.mkdir('added_dir', 'added_dir_id')
+ # start to capture pre_commit delta
+ branch.Branch.hooks.install_named_hook(
+ "pre_commit", self.capture_pre_commit_hook, None)
+ revid2 = tree.commit('second revision')
+ finally:
+ tree.unlock()
+
+ expected_delta = delta.TreeDelta()
+ expected_delta.added = [('added_dir', 'added_dir_id', 'directory')]
+ expected_delta.removed = [('to_be_unversioned',
+ 'to_be_unversioned_id', 'directory')]
+ expected_delta.renamed = [('dir/subfile', 'dir/subfile_renamed',
+ 'dir_subfile_id', 'file', False, False)]
+ expected_delta.modified=[('rootfile', 'rootfile_id', 'file', True,
+ False)]
+ self.assertEqual([('pre_commit', 1, revid1, 2, revid2,
+ expected_delta)], self.hook_calls)
diff --git a/bzrlib/tests/per_branch/test_config.py b/bzrlib/tests/per_branch/test_config.py
new file mode 100644
index 0000000..35044e3
--- /dev/null
+++ b/bzrlib/tests/per_branch/test_config.py
@@ -0,0 +1,46 @@
+# Copyright (C) 2010 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""Tests for branch.get_config behaviour."""
+
+from bzrlib import (
+ branch,
+ errors,
+ remote,
+ tests,
+ )
+from bzrlib.tests import per_branch
+
+
+class TestGetConfig(per_branch.TestCaseWithBranch):
+
+ def test_set_user_option_with_dict(self):
+ b = self.make_branch('b')
+ config = b.get_config()
+ value_dict = {
+ 'ascii': 'abcd', u'unicode \N{WATCH}': u'foo \N{INTERROBANG}'}
+ config.set_user_option('name', value_dict.copy())
+ self.assertEqual(value_dict, config.get_user_option('name'))
+
+ def test_set_submit_branch(self):
+ # Make sure setting a config option persists on disk
+ b = self.make_branch('.')
+ b.set_submit_branch('foo')
+ # Refresh the branch
+ b = branch.Branch.open('.')
+ self.assertEquals('foo', b.get_submit_branch())
+
+
diff --git a/bzrlib/tests/per_branch/test_create_checkout.py b/bzrlib/tests/per_branch/test_create_checkout.py
new file mode 100644
index 0000000..f49be1c
--- /dev/null
+++ b/bzrlib/tests/per_branch/test_create_checkout.py
@@ -0,0 +1,70 @@
+# Copyright (C) 2007, 2009, 2010 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""Tests for the Branch.create_checkout"""
+
+from bzrlib.tests import per_branch
+
+
+class TestCreateCheckout(per_branch.TestCaseWithBranch):
+
+ def test_checkout_format_lightweight(self):
+ """Make sure the new light checkout uses the desired branch format."""
+ a_branch = self.make_branch('branch')
+ tree = a_branch.create_checkout('checkout', lightweight=True)
+ # All branches can define the format they want checkouts made in.
+ # This checks it is honoured.
+ expected_format = a_branch._get_checkout_format(lightweight=True)
+ self.assertEqual(expected_format.get_branch_format().network_name(),
+ tree.branch._format.network_name())
+
+ def test_checkout_format_heavyweight(self):
+ """Make sure the new heavy checkout uses the desired branch format."""
+ a_branch = self.make_branch('branch')
+ tree = a_branch.create_checkout('checkout', lightweight=False)
+ # All branches can define the format they want checkouts made in.
+ # This checks it is honoured.
+ expected_format = a_branch._get_checkout_format(lightweight=False)
+ self.assertEqual(expected_format.get_branch_format().network_name(),
+ tree.branch._format.network_name())
+
+ def test_create_revision_checkout(self):
+ """Test that we can create a checkout from an earlier revision."""
+ tree1 = self.make_branch_and_tree('base')
+ self.build_tree(['base/a'])
+ tree1.add(['a'], ['a-id'])
+ tree1.commit('first', rev_id='rev-1')
+ self.build_tree(['base/b'])
+ tree1.add(['b'], ['b-id'])
+ tree1.commit('second', rev_id='rev-2')
+
+ tree2 = tree1.branch.create_checkout('checkout', revision_id='rev-1')
+ self.assertEqual('rev-1', tree2.last_revision())
+ self.assertPathExists('checkout/a')
+ self.assertPathDoesNotExist('checkout/b')
+
+ def test_create_lightweight_checkout(self):
+ """We should be able to make a lightweight checkout."""
+ tree1 = self.make_branch_and_tree('base')
+ tree2 = tree1.branch.create_checkout('checkout', lightweight=True)
+ self.assertNotEqual(tree1.basedir, tree2.basedir)
+ self.assertEqual(tree1.branch.base, tree2.branch.base)
+
+ def test_create_checkout_exists(self):
+ """We shouldn't fail if the directory already exists."""
+ tree1 = self.make_branch_and_tree('base')
+ self.build_tree(['checkout/'])
+ tree2 = tree1.branch.create_checkout('checkout', lightweight=True)
diff --git a/bzrlib/tests/per_branch/test_create_clone.py b/bzrlib/tests/per_branch/test_create_clone.py
new file mode 100644
index 0000000..a30965d
--- /dev/null
+++ b/bzrlib/tests/per_branch/test_create_clone.py
@@ -0,0 +1,154 @@
+# Copyright (C) 2009-2012 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""Tests for branch.create_clone behaviour."""
+
+from bzrlib import (
+ branch,
+ errors,
+ remote,
+ tests,
+ )
+from bzrlib.tests import per_branch
+
+
+class TestCreateClone(per_branch.TestCaseWithBranch):
+
+ def test_create_clone_on_transport_missing_parent_dir(self):
+ tree = self.make_branch_and_tree('source')
+ tree.commit('a commit')
+ source = tree.branch
+ target_transport = self.get_transport('subdir').clone('target')
+ self.assertRaises(errors.NoSuchFile,
+ tree.branch.create_clone_on_transport, target_transport)
+ self.assertFalse(self.get_transport('.').has('subdir'))
+
+ def test_create_clone_on_transport_missing_parent_dir_create(self):
+ tree = self.make_branch_and_tree('source')
+ tree.commit('a commit')
+ source = tree.branch
+ target_transport = self.get_transport('subdir').clone('target')
+ result = tree.branch.create_clone_on_transport(target_transport,
+ create_prefix=True)
+ self.assertEqual(source.last_revision(), result.last_revision())
+ self.assertEqual(target_transport.base,
+ result.bzrdir.root_transport.base)
+
+ def test_create_clone_on_transport_use_existing_dir_false(self):
+ tree = self.make_branch_and_tree('source')
+ tree.commit('a commit')
+ source = tree.branch
+ target_transport = self.get_transport('target')
+ target_transport.create_prefix()
+ self.assertRaises(errors.FileExists,
+ tree.branch.create_clone_on_transport, target_transport)
+ self.assertFalse(target_transport.has(".bzr"))
+
+ def test_create_clone_on_transport_use_existing_dir_true(self):
+ tree = self.make_branch_and_tree('source')
+ tree.commit('a commit')
+ source = tree.branch
+ target_transport = self.get_transport('target')
+ target_transport.create_prefix()
+ result = tree.branch.create_clone_on_transport(target_transport,
+ use_existing_dir=True)
+ self.assertEqual(source.last_revision(), result.last_revision())
+
+ def test_create_clone_on_transport_no_revision_id(self):
+ tree = self.make_branch_and_tree('source')
+ tree.commit('a commit')
+ source = tree.branch
+ target_transport = self.get_transport('target')
+ result = tree.branch.create_clone_on_transport(target_transport)
+ self.assertEqual(source.last_revision(), result.last_revision())
+
+ def test_create_clone_on_transport_revision_id(self):
+ tree = self.make_branch_and_tree('source')
+ old_revid = tree.commit('a commit')
+ source_tip = tree.commit('a second commit')
+ source = tree.branch
+ target_transport = self.get_transport('target')
+ result = tree.branch.create_clone_on_transport(target_transport,
+ revision_id=old_revid)
+ self.assertEqual(old_revid, result.last_revision())
+ result.lock_read()
+ self.addCleanup(result.unlock)
+ self.assertFalse(result.repository.has_revision(source_tip))
+
+ def test_create_clone_on_transport_stacked(self):
+ tree = self.make_branch_and_tree('source')
+ tree.commit('a commit')
+ trunk = tree.branch.create_clone_on_transport(
+ self.get_transport('trunk'))
+ revid = tree.commit('a second commit')
+ source = tree.branch
+ target_transport = self.get_transport('target')
+ try:
+ result = tree.branch.create_clone_on_transport(target_transport,
+ stacked_on=trunk.base)
+ except errors.UnstackableBranchFormat:
+ if not trunk.repository._format.supports_full_versioned_files:
+ raise tests.TestNotApplicable("can not stack on format")
+ raise
+ self.assertEqual(revid, result.last_revision())
+ self.assertEqual(trunk.base, result.get_stacked_on_url())
+
+ def test_create_clone_of_multiple_roots(self):
+ try:
+ builder = self.make_branch_builder('local')
+ except (errors.TransportNotPossible, errors.UninitializableFormat):
+ raise tests.TestNotApplicable('format not directly constructable')
+ builder.start_series()
+ builder.build_snapshot('rev1', None, [
+ ('add', ('', 'root-id', 'directory', ''))])
+ builder.build_snapshot('rev2', ['rev1'], [])
+ builder.build_snapshot('other', None, [
+ ('add', ('', 'root-id', 'directory', ''))])
+ builder.build_snapshot('rev3', ['rev2', 'other'], [])
+ builder.finish_series()
+ local = builder.get_branch()
+ local.bzrdir.clone(self.get_url('remote'), revision_id='rev3')
+
+ def assertBranchHookBranchIsStacked(self, pre_change_params):
+ # Just calling will either succeed or fail.
+ pre_change_params.branch.get_stacked_on_url()
+ self.hook_calls.append(pre_change_params)
+
+ def test_create_clone_on_transport_stacked_hooks_get_stacked_branch(self):
+ tree = self.make_branch_and_tree('source')
+ tree.commit('a commit')
+ trunk = tree.branch.create_clone_on_transport(
+ self.get_transport('trunk'))
+ revid = tree.commit('a second commit')
+ target_transport = self.get_transport('target')
+ self.hook_calls = []
+ branch.Branch.hooks.install_named_hook(
+ 'pre_change_branch_tip', self.assertBranchHookBranchIsStacked, None)
+ try:
+ result = tree.branch.create_clone_on_transport(target_transport,
+ stacked_on=trunk.base)
+ except errors.UnstackableBranchFormat:
+ if not trunk.repository._format.supports_full_versioned_files:
+ raise tests.TestNotApplicable("can not stack on format")
+ raise
+ self.assertEqual(revid, result.last_revision())
+ self.assertEqual(trunk.base, result.get_stacked_on_url())
+ # Smart servers invoke hooks on both sides
+ if isinstance(result, remote.RemoteBranch):
+ expected_calls = 2
+ else:
+ expected_calls = 1
+ self.assertEqual(expected_calls, len(self.hook_calls))
diff --git a/bzrlib/tests/per_branch/test_dotted_revno_to_revision_id.py b/bzrlib/tests/per_branch/test_dotted_revno_to_revision_id.py
new file mode 100644
index 0000000..6dcf145
--- /dev/null
+++ b/bzrlib/tests/per_branch/test_dotted_revno_to_revision_id.py
@@ -0,0 +1,45 @@
+# Copyright (C) 2007 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""Tests for Branch.dotted_revno_to_revision_id()"""
+
+from bzrlib import errors
+
+from bzrlib.tests.per_branch import TestCaseWithBranch
+
+
+class TestDottedRevnoToRevisionId(TestCaseWithBranch):
+
+ def test_lookup_revision_id_by_dotted(self):
+ tree = self.create_tree_with_merge()
+ the_branch = tree.branch
+ the_branch.lock_read()
+ self.addCleanup(the_branch.unlock)
+ self.assertEqual('null:', the_branch.dotted_revno_to_revision_id((0,)))
+ self.assertEqual('rev-1', the_branch.dotted_revno_to_revision_id((1,)))
+ self.assertEqual('rev-2', the_branch.dotted_revno_to_revision_id((2,)))
+ self.assertEqual('rev-3', the_branch.dotted_revno_to_revision_id((3,)))
+ self.assertEqual('rev-1.1.1', the_branch.dotted_revno_to_revision_id(
+ (1,1,1)))
+ self.assertRaises(errors.NoSuchRevision,
+ the_branch.dotted_revno_to_revision_id, (1,0,2))
+ # Test reverse caching
+ self.assertEqual(None,
+ the_branch._partial_revision_id_to_revno_cache.get('rev-1'))
+ self.assertEqual('rev-1', the_branch.dotted_revno_to_revision_id((1,),
+ _cache_reverse=True))
+ self.assertEqual((1,),
+ the_branch._partial_revision_id_to_revno_cache.get('rev-1'))
diff --git a/bzrlib/tests/per_branch/test_get_revision_id_to_revno_map.py b/bzrlib/tests/per_branch/test_get_revision_id_to_revno_map.py
new file mode 100644
index 0000000..1218816
--- /dev/null
+++ b/bzrlib/tests/per_branch/test_get_revision_id_to_revno_map.py
@@ -0,0 +1,99 @@
+# Copyright (C) 2007 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""Tests for Branch.get_revision_id_to_revno_map()"""
+
+from bzrlib.symbol_versioning import deprecated_in
+from bzrlib.tests.per_branch import TestCaseWithBranch
+
+
+class TestRevisionIdToDottedRevno(TestCaseWithBranch):
+
+ def test_simple_revno(self):
+ tree = self.create_tree_with_merge()
+ # Re-open the branch so we make sure we start fresh.
+ # see bug #162486
+ the_branch = tree.bzrdir.open_branch()
+
+ self.assertEqual({'rev-1':(1,), 'rev-2':(2,), 'rev-3':(3,),
+ 'rev-1.1.1':(1,1,1)
+ }, the_branch.get_revision_id_to_revno_map())
+
+
+class TestCaching(TestCaseWithBranch):
+ """Tests for the caching of branches' dotted revno generation.
+
+ When locked, branches should avoid regenerating revision_id=>dotted revno
+ mapping.
+
+ When not locked, obviously the revision_id => dotted revno will need to be
+ regenerated or reread each time.
+
+ We test if revision_history is using the cache by instrumenting the branch's
+ _gen_revno_map method, which is called by get_revision_id_to_revno_map.
+ """
+
+ def get_instrumented_branch(self):
+ """Get a branch and monkey patch it to log calls to _gen_revno_map.
+
+ :returns: a tuple of (the branch, list that calls will be logged to)
+ """
+ tree = self.create_tree_with_merge()
+ calls = []
+ real_func = tree.branch._gen_revno_map
+ def wrapper():
+ calls.append('_gen_revno_map')
+ return real_func()
+ tree.branch._gen_revno_map = wrapper
+ return tree.branch, calls
+
+ def test_unlocked(self):
+ """Repeated calls will call _gen_revno_map each time."""
+ branch, calls = self.get_instrumented_branch()
+ # Repeatedly call revision_history.
+ branch.get_revision_id_to_revno_map()
+ branch.get_revision_id_to_revno_map()
+ branch.get_revision_id_to_revno_map()
+ self.assertEqual(['_gen_revno_map']*3, calls)
+
+ def test_locked(self):
+ """Repeated calls will only call _gen_revno_map once.
+ """
+ branch, calls = self.get_instrumented_branch()
+ # Lock the branch, then repeatedly call revision_history.
+ branch.lock_read()
+ try:
+ branch.get_revision_id_to_revno_map()
+ self.assertEqual(['_gen_revno_map'], calls)
+ finally:
+ branch.unlock()
+
+ def test_set_last_revision_info_when_locked(self):
+ """Calling set_last_revision_info should reset the cache."""
+ branch, calls = self.get_instrumented_branch()
+ branch.lock_write()
+ try:
+ self.assertEqual({'rev-1':(1,), 'rev-2':(2,), 'rev-3':(3,),
+ 'rev-1.1.1':(1,1,1)
+ }, branch.get_revision_id_to_revno_map())
+ branch.set_last_revision_info(2, 'rev-2')
+ self.assertEqual({'rev-1':(1,), 'rev-2':(2,)},
+ branch.get_revision_id_to_revno_map())
+ self.assertEqual({'rev-1':(1,), 'rev-2':(2,)},
+ branch.get_revision_id_to_revno_map())
+ self.assertEqual(['_gen_revno_map']*2, calls)
+ finally:
+ branch.unlock()
diff --git a/bzrlib/tests/per_branch/test_hooks.py b/bzrlib/tests/per_branch/test_hooks.py
new file mode 100644
index 0000000..17cbfc1
--- /dev/null
+++ b/bzrlib/tests/per_branch/test_hooks.py
@@ -0,0 +1,347 @@
+# Copyright (C) 2007-2010 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""Tests that branch classes implement hook callouts correctly."""
+
+from bzrlib import (
+ branch as _mod_branch,
+ errors,
+ remote,
+ revision,
+ tests,
+ )
+from bzrlib.symbol_versioning import deprecated_in
+from bzrlib.tests import test_server
+
+class ChangeBranchTipTestCase(tests.TestCaseWithMemoryTransport):
+ """Base TestCase for testing pre/post_change_branch_tip hooks."""
+
+ def install_logging_hook(self, prefix):
+ """Add a hook that logs calls made to it.
+
+ :returns: the list that the calls will be appended to.
+ """
+ hook_calls = []
+ _mod_branch.Branch.hooks.install_named_hook(
+ prefix + '_change_branch_tip', hook_calls.append, None)
+ return hook_calls
+
+ def make_branch_with_revision_ids(self, *revision_ids):
+ """Makes a branch with the given commits."""
+ tree = self.make_branch_and_memory_tree('source')
+ tree.lock_write()
+ tree.add('')
+ for revision_id in revision_ids:
+ tree.commit(u'Message of ' + revision_id.decode('utf8'),
+ rev_id=revision_id)
+ tree.unlock()
+ branch = tree.branch
+ return branch
+
+ def assertHookCalls(self, expected_params, branch, hook_calls=None,
+ pre=False):
+ if hook_calls is None:
+ hook_calls = self.hook_calls
+ if isinstance(branch, remote.RemoteBranch):
+ # For a remote branch, both the server and the client will raise
+ # this hook, and we see both in the test environment. The remote
+ # instance comes in between the clients - the client doe pre, the
+ # server does pre, the server does post, the client does post.
+ if pre:
+ offset = 0
+ else:
+ offset = 1
+ self.assertEqual(expected_params, hook_calls[offset])
+ self.assertEqual(2, len(hook_calls))
+ else:
+ self.assertEqual([expected_params], hook_calls)
+
+
+class TestOpen(tests.TestCaseWithMemoryTransport):
+
+ def capture_hook(self, branch):
+ self.hook_calls.append(branch)
+
+ def install_hook(self):
+ self.hook_calls = []
+ _mod_branch.Branch.hooks.install_named_hook(
+ 'open', self.capture_hook, None)
+
+ def test_create(self):
+ self.install_hook()
+ b = self.make_branch('.')
+ if isinstance(b, remote.RemoteBranch):
+ # RemoteBranch creation:
+ if (self.transport_readonly_server
+ == test_server.ReadonlySmartTCPServer_for_testing_v2_only):
+ # Older servers:
+ self.assertEqual(3, len(self.hook_calls))
+ # creates the branch via the VFS (for older servers)
+ self.assertEqual(b._real_branch, self.hook_calls[0])
+ # creates a RemoteBranch object
+ self.assertEqual(b, self.hook_calls[1])
+ # get_stacked_on_url RPC
+ self.assertRealBranch(self.hook_calls[2])
+ else:
+ self.assertEqual(2, len(self.hook_calls))
+ # create_branch RPC
+ self.assertRealBranch(self.hook_calls[0])
+ # create RemoteBranch locally
+ self.assertEqual(b, self.hook_calls[1])
+ else:
+ self.assertEqual([b], self.hook_calls)
+
+ def test_open(self):
+ branch_url = self.make_branch('.').bzrdir.root_transport.base
+ self.install_hook()
+ b = _mod_branch.Branch.open(branch_url)
+ if isinstance(b, remote.RemoteBranch):
+ self.assertEqual(3, len(self.hook_calls))
+ # open_branchV2 RPC
+ self.assertRealBranch(self.hook_calls[0])
+ # create RemoteBranch locally
+ self.assertEqual(b, self.hook_calls[1])
+ # get_stacked_on_url RPC
+ self.assertRealBranch(self.hook_calls[2])
+ else:
+ self.assertEqual([b], self.hook_calls)
+
+ def assertRealBranch(self, b):
+ # Branches opened on the server don't have comparable URLs, so we just
+ # assert that it is not a RemoteBranch.
+ self.assertIsInstance(b, _mod_branch.Branch)
+ self.assertFalse(isinstance(b, remote.RemoteBranch))
+
+
+class TestPreChangeBranchTip(ChangeBranchTipTestCase):
+ """Tests for pre_change_branch_tip hook.
+
+ Most of these tests are very similar to the tests in
+ TestPostChangeBranchTip.
+ """
+
+ def test_hook_runs_before_change(self):
+ """The hook runs *before* the branch's last_revision_info has changed.
+ """
+ branch = self.make_branch_with_revision_ids('revid-one')
+ def assertBranchAtRevision1(params):
+ self.assertEquals(
+ (1, 'revid-one'), params.branch.last_revision_info())
+ _mod_branch.Branch.hooks.install_named_hook(
+ 'pre_change_branch_tip', assertBranchAtRevision1, None)
+ branch.set_last_revision_info(0, revision.NULL_REVISION)
+
+ def test_hook_failure_prevents_change(self):
+ """If a hook raises an exception, the change does not take effect."""
+ branch = self.make_branch_with_revision_ids(
+ 'one-\xc2\xb5', 'two-\xc2\xb5')
+ class PearShapedError(Exception):
+ pass
+ def hook_that_raises(params):
+ raise PearShapedError()
+ _mod_branch.Branch.hooks.install_named_hook(
+ 'pre_change_branch_tip', hook_that_raises, None)
+ hook_failed_exc = self.assertRaises(
+ PearShapedError,
+ branch.set_last_revision_info, 0, revision.NULL_REVISION)
+ # The revision info is unchanged.
+ self.assertEqual((2, 'two-\xc2\xb5'), branch.last_revision_info())
+
+ def test_empty_history(self):
+ branch = self.make_branch('source')
+ hook_calls = self.install_logging_hook('pre')
+ branch.set_last_revision_info(0, revision.NULL_REVISION)
+ expected_params = _mod_branch.ChangeBranchTipParams(
+ branch, 0, 0, revision.NULL_REVISION, revision.NULL_REVISION)
+ self.assertHookCalls(expected_params, branch, hook_calls, pre=True)
+
+ def test_nonempty_history(self):
+ # some branches require that their history be set to a revision in the
+ # repository, so we need to make a branch with non-empty history for
+ # this test.
+ branch = self.make_branch_with_revision_ids(
+ 'one-\xc2\xb5', 'two-\xc2\xb5')
+ hook_calls = self.install_logging_hook('pre')
+ branch.set_last_revision_info(1, 'one-\xc2\xb5')
+ expected_params = _mod_branch.ChangeBranchTipParams(
+ branch, 2, 1, 'two-\xc2\xb5', 'one-\xc2\xb5')
+ self.assertHookCalls(expected_params, branch, hook_calls, pre=True)
+
+ def test_branch_is_locked(self):
+ branch = self.make_branch('source')
+ def assertBranchIsLocked(params):
+ self.assertTrue(params.branch.is_locked())
+ _mod_branch.Branch.hooks.install_named_hook(
+ 'pre_change_branch_tip', assertBranchIsLocked, None)
+ branch.set_last_revision_info(0, revision.NULL_REVISION)
+
+ def test_calls_all_hooks_no_errors(self):
+ """If multiple hooks are registered, all are called (if none raise
+ errors).
+ """
+ branch = self.make_branch('source')
+ hook_calls_1 = self.install_logging_hook('pre')
+ hook_calls_2 = self.install_logging_hook('pre')
+ self.assertIsNot(hook_calls_1, hook_calls_2)
+ branch.set_last_revision_info(0, revision.NULL_REVISION)
+ # Both hooks are called.
+ if isinstance(branch, remote.RemoteBranch):
+ count = 2
+ else:
+ count = 1
+ self.assertEqual(len(hook_calls_1), count)
+ self.assertEqual(len(hook_calls_2), count)
+
+ def test_explicit_reject_by_hook(self):
+ """If a hook raises TipChangeRejected, the change does not take effect.
+
+ TipChangeRejected exceptions are propagated, not wrapped in HookFailed.
+ """
+ branch = self.make_branch_with_revision_ids(
+ 'one-\xc2\xb5', 'two-\xc2\xb5')
+ def hook_that_rejects(params):
+ raise errors.TipChangeRejected('rejection message')
+ _mod_branch.Branch.hooks.install_named_hook(
+ 'pre_change_branch_tip', hook_that_rejects, None)
+ self.assertRaises(
+ errors.TipChangeRejected,
+ branch.set_last_revision_info, 0, revision.NULL_REVISION)
+ # The revision info is unchanged.
+ self.assertEqual((2, 'two-\xc2\xb5'), branch.last_revision_info())
+
+
+class TestPostChangeBranchTip(ChangeBranchTipTestCase):
+ """Tests for post_change_branch_tip hook.
+
+ Most of these tests are very similar to the tests in
+ TestPostChangeBranchTip.
+ """
+
+ def test_hook_runs_after_change(self):
+ """The hook runs *after* the branch's last_revision_info has changed.
+ """
+ branch = self.make_branch_with_revision_ids('revid-one')
+ def assertBranchAtRevision1(params):
+ self.assertEquals(
+ (0, revision.NULL_REVISION), params.branch.last_revision_info())
+ _mod_branch.Branch.hooks.install_named_hook(
+ 'post_change_branch_tip', assertBranchAtRevision1, None)
+ branch.set_last_revision_info(0, revision.NULL_REVISION)
+
+ def test_empty_history(self):
+ branch = self.make_branch('source')
+ hook_calls = self.install_logging_hook('post')
+ branch.set_last_revision_info(0, revision.NULL_REVISION)
+ expected_params = _mod_branch.ChangeBranchTipParams(
+ branch, 0, 0, revision.NULL_REVISION, revision.NULL_REVISION)
+ self.assertHookCalls(expected_params, branch, hook_calls)
+
+ def test_nonempty_history(self):
+ # some branches require that their history be set to a revision in the
+ # repository, so we need to make a branch with non-empty history for
+ # this test.
+ branch = self.make_branch_with_revision_ids(
+ 'one-\xc2\xb5', 'two-\xc2\xb5')
+ hook_calls = self.install_logging_hook('post')
+ branch.set_last_revision_info(1, 'one-\xc2\xb5')
+ expected_params = _mod_branch.ChangeBranchTipParams(
+ branch, 2, 1, 'two-\xc2\xb5', 'one-\xc2\xb5')
+ self.assertHookCalls(expected_params, branch, hook_calls)
+
+ def test_branch_is_locked(self):
+ """The branch passed to the hook is locked."""
+ branch = self.make_branch('source')
+ def assertBranchIsLocked(params):
+ self.assertTrue(params.branch.is_locked())
+ _mod_branch.Branch.hooks.install_named_hook(
+ 'post_change_branch_tip', assertBranchIsLocked, None)
+ branch.set_last_revision_info(0, revision.NULL_REVISION)
+
+ def test_calls_all_hooks_no_errors(self):
+ """If multiple hooks are registered, all are called (if none raise
+ errors).
+ """
+ branch = self.make_branch('source')
+ hook_calls_1 = self.install_logging_hook('post')
+ hook_calls_2 = self.install_logging_hook('post')
+ self.assertIsNot(hook_calls_1, hook_calls_2)
+ branch.set_last_revision_info(0, revision.NULL_REVISION)
+ # Both hooks are called.
+ if isinstance(branch, remote.RemoteBranch):
+ count = 2
+ else:
+ count = 1
+ self.assertEqual(len(hook_calls_1), count)
+ self.assertEqual(len(hook_calls_2), count)
+
+
+class TestAllMethodsThatChangeTipWillRunHooks(ChangeBranchTipTestCase):
+ """Every method of Branch that changes a branch tip will invoke the
+ pre/post_change_branch_tip hooks.
+ """
+
+ def setUp(self):
+ ChangeBranchTipTestCase.setUp(self)
+ self.installPreAndPostHooks()
+
+ def installPreAndPostHooks(self):
+ self.pre_hook_calls = self.install_logging_hook('pre')
+ self.post_hook_calls = self.install_logging_hook('post')
+
+ def resetHookCalls(self):
+ del self.pre_hook_calls[:], self.post_hook_calls[:]
+
+ def assertPreAndPostHooksWereInvoked(self, branch, smart_enabled):
+ """assert that both pre and post hooks were called
+
+ :param smart_enabled: The method invoked is one that should be
+ smart server ready.
+ """
+ # Check for the number of invocations expected. One invocation is
+ # local, one is remote (if the branch is remote).
+ if smart_enabled and isinstance(branch, remote.RemoteBranch):
+ length = 2
+ else:
+ length = 1
+ self.assertEqual(length, len(self.pre_hook_calls))
+ self.assertEqual(length, len(self.post_hook_calls))
+
+ def test_set_last_revision_info(self):
+ branch = self.make_branch('')
+ branch.set_last_revision_info(0, revision.NULL_REVISION)
+ self.assertPreAndPostHooksWereInvoked(branch, True)
+
+ def test_generate_revision_history(self):
+ branch = self.make_branch('')
+ branch.generate_revision_history(revision.NULL_REVISION)
+ # NB: for HPSS protocols < v3, the server does not invoke branch tip
+ # change events on generate_revision_history, as the change is done
+ # directly by the client over the VFS.
+ self.assertPreAndPostHooksWereInvoked(branch, True)
+
+ def test_pull(self):
+ source_branch = self.make_branch_with_revision_ids('rev-1', 'rev-2')
+ self.resetHookCalls()
+ destination_branch = self.make_branch('destination')
+ destination_branch.pull(source_branch)
+ self.assertPreAndPostHooksWereInvoked(destination_branch, False)
+
+ def test_push(self):
+ source_branch = self.make_branch_with_revision_ids('rev-1', 'rev-2')
+ self.resetHookCalls()
+ destination_branch = self.make_branch('destination')
+ source_branch.push(destination_branch)
+ self.assertPreAndPostHooksWereInvoked(destination_branch, True)
diff --git a/bzrlib/tests/per_branch/test_http.py b/bzrlib/tests/per_branch/test_http.py
new file mode 100644
index 0000000..9a387d4
--- /dev/null
+++ b/bzrlib/tests/per_branch/test_http.py
@@ -0,0 +1,78 @@
+# Copyright (C) 2006-2010 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""Test branches with inaccessible parents."""
+
+from bzrlib import (
+ branch,
+ errors,
+ )
+from bzrlib.tests import (
+ per_branch,
+ test_server,
+ )
+
+
+class InaccessibleParentTests(per_branch.TestCaseWithBranch):
+ """Tests with branches with "inaccessible" parents.
+
+ An "inaccessible" parent location is one that cannot be represented, e.g. if
+ a child branch says its parent is at "../../my-parent", but that child is at
+ "http://host/one" then that parent location is inaccessible. These
+ branches' get_parent method will raise InaccessibleParent.
+ """
+
+ def setUp(self):
+ super(InaccessibleParentTests, self).setUp()
+ if self.transport_server in (test_server.LocalURLServer, None):
+ self.transport_readonly_server = test_server.TestingChrootServer
+
+ def get_branch_with_invalid_parent(self):
+ """Get a branch whose get_parent will raise InaccessibleParent."""
+ self.build_tree(
+ ['parent/', 'parent/path/', 'parent/path/to/',
+ 'child/', 'child/path/', 'child/path/to/'],
+ transport=self.get_transport())
+ self.make_branch('parent/path/to/a').bzrdir.sprout(self.get_url('child/path/to/b'))
+
+ # The child branch internally will have recorded that its parent is at
+ # "../../../../parent/path/to/a" or similar. So we move the child
+ # branch up several directories, so that its parent path will point to
+ # somewhere outside the directory served by the HTTP server. Thus its
+ # parent is now inaccessible.
+ self.get_transport().rename('child/path/to/b', 'b')
+ branch_b = branch.Branch.open(self.get_readonly_url('b'))
+ return branch_b
+
+ def test_get_parent_invalid(self):
+ # When you have a branch whose parent URL cannot be calculated, this
+ # exception will be raised.
+ branch_b = self.get_branch_with_invalid_parent()
+ self.assertRaises(errors.InaccessibleParent, branch_b.get_parent)
+
+ def test_clone_invalid_parent(self):
+ # If clone can't determine the location of the parent of the branch
+ # being cloned, then the new branch will have no parent set.
+ branch_b = self.get_branch_with_invalid_parent()
+ branch_c = branch_b.bzrdir.clone('c').open_branch()
+ self.assertEqual(None, branch_c.get_parent())
+
+ def test_sprout_invalid_parent(self):
+ # A sprouted branch will have a parent of the branch it was sprouted
+ # from, even if that branch has an invalid parent.
+ branch_b = self.get_branch_with_invalid_parent()
+ branch_c = branch_b.bzrdir.sprout('c').open_branch()
+ self.assertEqual(branch_b.base, branch_c.get_parent())
diff --git a/bzrlib/tests/per_branch/test_iter_merge_sorted_revisions.py b/bzrlib/tests/per_branch/test_iter_merge_sorted_revisions.py
new file mode 100644
index 0000000..c016961
--- /dev/null
+++ b/bzrlib/tests/per_branch/test_iter_merge_sorted_revisions.py
@@ -0,0 +1,305 @@
+# Copyright (C) 2009, 2010 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""Tests for Branch.iter_merge_sorted_revisions()"""
+
+from bzrlib import (
+ errors,
+ revision,
+ tests,
+ )
+
+from bzrlib.tests import per_branch
+
+
+class TestIterMergeSortedRevisionsSimpleGraph(per_branch.TestCaseWithBranch):
+
+ def setUp(self):
+ super(TestIterMergeSortedRevisionsSimpleGraph, self).setUp()
+ builder = self.make_builder_with_merges('.')
+ self.branch = builder.get_branch()
+ self.branch.lock_read()
+ self.addCleanup(self.branch.unlock)
+
+ def make_builder_with_merges(self, relpath):
+ try:
+ builder = self.make_branch_builder(relpath)
+ except (errors.TransportNotPossible, errors.UninitializableFormat):
+ raise tests.TestNotApplicable('format not directly constructable')
+ builder.start_series()
+ # 1
+ # |\
+ # 2 |
+ # | |
+ # | 1.1.1
+ # |/
+ # 3
+ builder.build_snapshot('1', None, [
+ ('add', ('', 'TREE_ROOT', 'directory', '')),])
+ builder.build_snapshot('1.1.1', ['1'], [])
+ builder.build_snapshot('2', ['1'], [])
+ builder.build_snapshot('3', ['2', '1.1.1'], [])
+ builder.finish_series()
+ return builder
+
+ def assertIterRevids(self, expected, *args, **kwargs):
+ # We don't care about depths and revnos here, only about returning the
+ # right revids.
+ revids = [revid for (revid, depth, revno, eom) in
+ self.branch.iter_merge_sorted_revisions(*args, **kwargs)]
+ self.assertEqual(expected, revids)
+
+ def test_merge_sorted(self):
+ self.assertIterRevids(['3', '1.1.1', '2', '1'])
+
+ def test_merge_sorted_range(self):
+ self.assertIterRevids(['1.1.1'],
+ start_revision_id='1.1.1', stop_revision_id='1')
+
+ def test_merge_sorted_range_start_only(self):
+ self.assertIterRevids(['1.1.1', '1'],
+ start_revision_id='1.1.1')
+
+ def test_merge_sorted_range_stop_exclude(self):
+ self.assertIterRevids(['3', '1.1.1', '2'], stop_revision_id='1')
+
+ def test_merge_sorted_range_stop_include(self):
+ self.assertIterRevids(['3', '1.1.1', '2'],
+ stop_revision_id='2', stop_rule='include')
+
+ def test_merge_sorted_range_stop_with_merges(self):
+ self.assertIterRevids(['3', '1.1.1'],
+ stop_revision_id='3', stop_rule='with-merges')
+
+ def test_merge_sorted_range_stop_with_merges_can_show_non_parents(self):
+ # 1.1.1 gets logged before the end revision is reached.
+ # so it is returned even though 1.1.1 is not a parent of 2.
+ self.assertIterRevids(['3', '1.1.1', '2'],
+ stop_revision_id='2', stop_rule='with-merges')
+
+ def test_merge_sorted_range_stop_with_merges_ignore_non_parents(self):
+ # 2 is not a parent of 1.1.1 so it must not be returned
+ self.assertIterRevids(['3', '1.1.1'],
+ stop_revision_id='1.1.1', stop_rule='with-merges')
+
+ def test_merge_sorted_single_stop_exclude(self):
+ # from X..X exclusive is an empty result
+ self.assertIterRevids([], start_revision_id='3', stop_revision_id='3')
+
+ def test_merge_sorted_single_stop_include(self):
+ # from X..X inclusive is [X]
+ self.assertIterRevids(['3'],
+ start_revision_id='3', stop_revision_id='3',
+ stop_rule='include')
+
+ def test_merge_sorted_single_stop_with_merges(self):
+ self.assertIterRevids(['3', '1.1.1'],
+ start_revision_id='3', stop_revision_id='3',
+ stop_rule='with-merges')
+
+ def test_merge_sorted_forward(self):
+ self.assertIterRevids(['1', '2', '1.1.1', '3'], direction='forward')
+
+ def test_merge_sorted_range_forward(self):
+ self.assertIterRevids(['1.1.1'],
+ start_revision_id='1.1.1', stop_revision_id='1',
+ direction='forward')
+
+ def test_merge_sorted_range_start_only_forward(self):
+ self.assertIterRevids(['1', '1.1.1'],
+ start_revision_id='1.1.1', direction='forward')
+
+ def test_merge_sorted_range_stop_exclude_forward(self):
+ self.assertIterRevids(['2', '1.1.1', '3'],
+ stop_revision_id='1', direction='forward')
+
+ def test_merge_sorted_range_stop_include_forward(self):
+ self.assertIterRevids(['2', '1.1.1', '3'],
+ stop_revision_id='2', stop_rule='include',
+ direction='forward')
+
+ def test_merge_sorted_range_stop_with_merges_forward(self):
+ self.assertIterRevids(['1.1.1', '3'],
+ stop_revision_id='3', stop_rule='with-merges',
+ direction='forward')
+
+
+class TestIterMergeSortedRevisionsBushyGraph(per_branch.TestCaseWithBranch):
+
+ def make_branch_builder(self, relpath):
+ try:
+ builder = super(TestIterMergeSortedRevisionsBushyGraph,
+ self).make_branch_builder(relpath)
+ except (errors.TransportNotPossible, errors.UninitializableFormat):
+ raise tests.TestNotApplicable('format not directly constructable')
+ return builder
+
+ def make_branch_with_embedded_merges(self, relpath='.'):
+ builder = self.make_branch_builder(relpath)
+ # 1
+ # |\
+ # | 1.1.1
+ # | /
+ # 2
+ # | \
+ # 3 |
+ # | |
+ # | 2.1.1
+ # | | \
+ # | 2.1.2 |
+ # | | |
+ # | | 2.2.1
+ # | | /
+ # | 2.1.3
+ # |/
+ # 4
+ builder.start_series()
+ builder.build_snapshot('1', None, [
+ ('add', ('', 'TREE_ROOT', 'directory', '')),])
+ builder.build_snapshot('1.1.1', ['1'], [])
+ builder.build_snapshot('2', ['1', '1.1.1'], [])
+ builder.build_snapshot('2.1.1', ['2'], [])
+ builder.build_snapshot('2.1.2', ['2.1.1'], [])
+ builder.build_snapshot('2.2.1', ['2.1.1'], [])
+ builder.build_snapshot('2.1.3', ['2.1.2', '2.2.1'], [])
+ builder.build_snapshot('3', ['2'], [])
+ builder.build_snapshot('4', ['3', '2.1.3'], [])
+ builder.finish_series()
+ br = builder.get_branch()
+ br.lock_read()
+ self.addCleanup(br.unlock)
+ return br
+
+ def make_branch_with_different_depths_merges(self, relpath='.'):
+ builder = self.make_branch_builder(relpath)
+ # 1
+ # |\
+ # | 1.1.1
+ # | /
+ # 2
+ # | \
+ # 3 |
+ # | |
+ # | 2.1.1
+ # | | \
+ # | 2.1.2 |
+ # | | |
+ # | | 2.2.1
+ # | | /
+ # | 2.1.3
+ # |/
+ # 4
+ builder.start_series()
+ builder.build_snapshot('1', None, [
+ ('add', ('', 'TREE_ROOT', 'directory', '')),])
+ builder.build_snapshot('2', ['1'], [])
+ builder.build_snapshot('1.1.1', ['1'], [])
+ builder.build_snapshot('1.1.2', ['1.1.1'], [])
+ builder.build_snapshot('1.2.1', ['1.1.1'], [])
+ builder.build_snapshot('1.2.2', ['1.2.1'], [])
+ builder.build_snapshot('1.3.1', ['1.2.1'], [])
+ builder.build_snapshot('1.3.2', ['1.3.1'], [])
+ builder.build_snapshot('1.4.1', ['1.3.1'], [])
+ builder.build_snapshot('1.3.3', ['1.3.2', '1.4.11'], [])
+ builder.build_snapshot('1.2.3', ['1.2.2', '1.3.3'], [])
+ builder.build_snapshot('2.1.1', ['2'], [])
+ builder.build_snapshot('2.1.2', ['2.1.1'], [])
+ builder.build_snapshot('2.2.1', ['2.1.1'], [])
+ builder.build_snapshot('2.1.3', ['2.1.2', '2.2.1'], [])
+ builder.build_snapshot('3', ['2', '1.2.3'], [])
+ # .. to bring them all and ... bind them
+ builder.build_snapshot('4', ['3', '2.1.3'],
+ [])
+ builder.finish_series()
+ br = builder.get_branch()
+ br.lock_read()
+ self.addCleanup(br.unlock)
+ return br
+
+ def make_branch_with_alternate_ancestries(self, relpath='.'):
+ # See test_merge_sorted_exclude_ancestry below for the difference with
+ # bt.test_log.TestLogExcludeAncestry.
+ # make_branch_with_alternate_ancestries and
+ # test_merge_sorted_exclude_ancestry
+ # See the FIXME in assertLogRevnos there too.
+ builder = self.make_branch_builder(relpath)
+ # 1
+ # |\
+ # | 1.1.1
+ # | /| \
+ # 2 | |
+ # | | 1.2.1
+ # | | /
+ # | 1.1.2
+ # | /
+ # 3
+ builder.start_series()
+ builder.build_snapshot('1', None, [
+ ('add', ('', 'TREE_ROOT', 'directory', '')),])
+ builder.build_snapshot('1.1.1', ['1'], [])
+ builder.build_snapshot('2', ['1', '1.1.1'], [])
+ builder.build_snapshot('1.2.1', ['1.1.1'], [])
+ builder.build_snapshot('1.1.2', ['1.1.1', '1.2.1'], [])
+ builder.build_snapshot('3', ['2', '1.1.2'], [])
+ builder.finish_series()
+ br = builder.get_branch()
+ br.lock_read()
+ self.addCleanup(br.unlock)
+ return br
+
+ def assertIterRevids(self, expected, branch, *args, **kwargs):
+ # We don't care about depths and revnos here, only about returning the
+ # right revids.
+ revs = list(branch.iter_merge_sorted_revisions(*args, **kwargs))
+ revids = [revid for (revid, depth, revno, eom) in revs]
+ self.assertEqual(expected, revids)
+
+ def test_merge_sorted_starting_at_embedded_merge(self):
+ branch = self.make_branch_with_embedded_merges()
+ self.assertIterRevids(['4', '2.1.3', '2.2.1', '2.1.2', '2.1.1',
+ '3', '2', '1.1.1', '1'],
+ branch)
+ # 3 and 2.1.2 are not part of 2.2.1 ancestry and should not appear
+ self.assertIterRevids(['2.2.1', '2.1.1', '2', '1.1.1', '1'],
+ branch, start_revision_id='2.2.1',
+ stop_rule='with-merges')
+
+ def test_merge_sorted_with_different_depths_merge(self):
+ branch = self.make_branch_with_different_depths_merges()
+ self.assertIterRevids(['4', '2.1.3', '2.2.1', '2.1.2', '2.1.1',
+ '3',
+ '1.2.3', '1.3.3', '1.3.2', '1.3.1',
+ '1.2.2', '1.2.1', '1.1.1',
+ '2', '1'],
+ branch)
+ # 3 (and its descendants) and 2.1.2 are not part of 2.2.1 ancestry and
+ # should not appear
+ self.assertIterRevids(['2.2.1', '2.1.1', '2', '1'],
+ branch, start_revision_id='2.2.1',
+ stop_rule='with-merges')
+
+ def test_merge_sorted_exclude_ancestry(self):
+ branch = self.make_branch_with_alternate_ancestries()
+ self.assertIterRevids(['3', '1.1.2', '1.2.1', '2', '1.1.1', '1'],
+ branch)
+ # '2' is not part of the ancestry even if merge_sort order will make it
+ # appear before 1.1.1
+ self.assertIterRevids(['1.1.2', '1.2.1'],
+ branch,
+ stop_rule='with-merges-without-common-ancestry',
+ start_revision_id='1.1.2',
+ stop_revision_id='1.1.1')
+
diff --git a/bzrlib/tests/per_branch/test_last_revision_info.py b/bzrlib/tests/per_branch/test_last_revision_info.py
new file mode 100644
index 0000000..6471071
--- /dev/null
+++ b/bzrlib/tests/per_branch/test_last_revision_info.py
@@ -0,0 +1,67 @@
+# Copyright (C) 2004, 2005, 2007 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""Tests for branch.last_revision_info."""
+
+from bzrlib.revision import NULL_REVISION
+from bzrlib.tests import TestCaseWithTransport
+
+
+class TestLastRevisionInfo(TestCaseWithTransport):
+
+ def test_empty_branch(self):
+ # on an empty branch we want (0, NULL_REVISION)
+ branch = self.make_branch('branch')
+ self.assertEqual((0, NULL_REVISION), branch.last_revision_info())
+
+ def test_non_empty_branch(self):
+ # after the second commit we want (2, 'second-revid')
+ tree = self.make_branch_and_tree('branch')
+ tree.commit('1st post')
+ revid = tree.commit('2st post', allow_pointless=True)
+ self.assertEqual((2, revid), tree.branch.last_revision_info())
+
+ def test_import(self):
+ # importing and setting last revision
+ tree1 = self.make_branch_and_tree('branch1')
+ tree1.commit('1st post')
+ revid = tree1.commit('2st post', allow_pointless=True)
+ branch2 = self.make_branch('branch2')
+ self.assertEquals((2, revid),
+ branch2.import_last_revision_info_and_tags(tree1.branch, 2, revid))
+ self.assertEqual((2, revid), branch2.last_revision_info())
+ self.assertTrue(branch2.repository.has_revision(revid))
+
+ def test_import_lossy(self):
+ # importing with lossy=True works
+ tree1 = self.make_branch_and_tree('branch1')
+ tree1.commit('1st post')
+ revid = tree1.commit('2st post', allow_pointless=True)
+ branch2 = self.make_branch('branch2')
+ ret = branch2.import_last_revision_info_and_tags(tree1.branch, 2,
+ revid, lossy=True)
+ self.assertIsInstance(ret, tuple)
+ self.assertIsInstance(ret[0], int)
+ self.assertIsInstance(ret[1], str)
+
+ def test_same_repo(self):
+ # importing and setting last revision within the same repo
+ tree = self.make_branch_and_tree('branch1')
+ tree.commit('1st post')
+ revid = tree.commit('2st post', allow_pointless=True)
+ tree.branch.set_last_revision_info(0, NULL_REVISION)
+ tree.branch.import_last_revision_info_and_tags(tree.branch, 2, revid)
+ self.assertEqual((2, revid), tree.branch.last_revision_info())
diff --git a/bzrlib/tests/per_branch/test_locking.py b/bzrlib/tests/per_branch/test_locking.py
new file mode 100644
index 0000000..70a7b8f
--- /dev/null
+++ b/bzrlib/tests/per_branch/test_locking.py
@@ -0,0 +1,539 @@
+# Copyright (C) 2006-2010 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""Test locks across all branch implemenations"""
+
+from bzrlib import (
+ errors,
+ tests,
+ )
+from bzrlib.tests import (
+ lock_helpers,
+ per_branch,
+ )
+from bzrlib.tests.matchers import *
+
+
+class TestBranchLocking(per_branch.TestCaseWithBranch):
+
+ def setUp(self):
+ super(TestBranchLocking, self).setUp()
+ self.reduceLockdirTimeout()
+
+ def get_instrumented_branch(self):
+ """Get a Branch object which has been instrumented"""
+ # TODO: jam 20060630 It may be that not all formats have a
+ # 'control_files' member. So we should fail gracefully if
+ # not there. But assuming it has them lets us test the exact
+ # lock/unlock order.
+ self.locks = []
+ b = lock_helpers.LockWrapper(self.locks, self.get_branch(), 'b')
+ b.repository = lock_helpers.LockWrapper(self.locks, b.repository, 'r')
+ bcf = getattr(b, "control_files", None)
+ rcf = getattr(b.repository, 'control_files', None)
+ if rcf is None:
+ self.combined_branch = False
+ else:
+ # Look out for branch types that reuse their control files
+ self.combined_control = bcf is rcf and bcf is not None
+ try:
+ b.control_files = lock_helpers.LockWrapper(
+ self.locks, b.control_files, 'bc')
+ except AttributeError:
+ # RemoteBranch seems to trigger this.
+ raise tests.TestSkipped(
+ 'Could not instrument branch control files.')
+ if self.combined_control:
+ # instrument the repository control files too to ensure its worked
+ # with correctly. When they are not shared, we trust the repository
+ # API and only instrument the repository itself.
+ b.repository.control_files = lock_helpers.LockWrapper(
+ self.locks, b.repository.control_files, 'rc')
+ return b
+
+ def test_01_lock_read(self):
+ # Test that locking occurs in the correct order
+ b = self.get_instrumented_branch()
+
+ self.assertFalse(b.is_locked())
+ self.assertFalse(b.repository.is_locked())
+ b.lock_read()
+ try:
+ self.assertTrue(b.is_locked())
+ self.assertTrue(b.repository.is_locked())
+ finally:
+ b.unlock()
+ self.assertFalse(b.is_locked())
+ self.assertFalse(b.repository.is_locked())
+
+ if self.combined_control:
+ self.assertEqual([('b', 'lr', True),
+ ('r', 'lr', True),
+ ('rc', 'lr', True),
+ ('bc', 'lr', True),
+ ('b', 'ul', True),
+ ('bc', 'ul', True),
+ ('r', 'ul', True),
+ ('rc', 'ul', True),
+ ], self.locks)
+ else:
+ self.assertEqual([('b', 'lr', True),
+ ('r', 'lr', True),
+ ('bc', 'lr', True),
+ ('b', 'ul', True),
+ ('bc', 'ul', True),
+ ('r', 'ul', True),
+ ], self.locks)
+
+ def test_02_lock_write(self):
+ # Test that locking occurs in the correct order
+ b = self.get_instrumented_branch()
+
+ self.assertFalse(b.is_locked())
+ self.assertFalse(b.repository.is_locked())
+ b.lock_write()
+ try:
+ self.assertTrue(b.is_locked())
+ self.assertTrue(b.repository.is_locked())
+ finally:
+ b.unlock()
+ self.assertFalse(b.is_locked())
+ self.assertFalse(b.repository.is_locked())
+
+ if self.combined_control:
+ self.assertEqual([('b', 'lw', True),
+ ('r', 'lw', True),
+ ('rc', 'lw', True),
+ ('bc', 'lw', True),
+ ('b', 'ul', True),
+ ('bc', 'ul', True),
+ ('r', 'ul', True),
+ ('rc', 'ul', True),
+ ], self.locks)
+ else:
+ self.assertEqual([('b', 'lw', True),
+ ('r', 'lw', True),
+ ('bc', 'lw', True),
+ ('b', 'ul', True),
+ ('bc', 'ul', True),
+ ('r', 'ul', True),
+ ], self.locks)
+
+ def test_03_lock_fail_unlock_repo(self):
+ # Make sure branch.unlock() is called, even if there is a
+ # failure while unlocking the repository.
+ b = self.get_instrumented_branch()
+ b.repository.disable_unlock()
+
+ self.assertFalse(b.is_locked())
+ self.assertFalse(b.repository.is_locked())
+ b.lock_write()
+ try:
+ self.assertTrue(b.is_locked())
+ self.assertTrue(b.repository.is_locked())
+ self.assertLogsError(lock_helpers.TestPreventLocking, b.unlock)
+ if self.combined_control:
+ self.assertTrue(b.is_locked())
+ else:
+ self.assertFalse(b.is_locked())
+ self.assertTrue(b.repository.is_locked())
+
+ # We unlock the branch control files, even if
+ # we fail to unlock the repository
+ if self.combined_control:
+ self.assertEqual([('b', 'lw', True),
+ ('r', 'lw', True),
+ ('rc', 'lw', True),
+ ('bc', 'lw', True),
+ ('b', 'ul', True),
+ ('bc', 'ul', True),
+ ('r', 'ul', False),
+ ], self.locks)
+ else:
+ self.assertEqual([('b', 'lw', True),
+ ('r', 'lw', True),
+ ('bc', 'lw', True),
+ ('b', 'ul', True),
+ ('bc', 'ul', True),
+ ('r', 'ul', False),
+ ], self.locks)
+
+ finally:
+ # For cleanup purposes, make sure we are unlocked
+ b.repository._other.unlock()
+
+ def test_04_lock_fail_unlock_control(self):
+ # Make sure repository.unlock() is not called, if we fail to unlock
+ # self leaving ourselves still locked, so that attempts to recover
+ # don't encounter an unlocked repository.
+ b = self.get_instrumented_branch()
+ b.control_files.disable_unlock()
+
+ self.assertFalse(b.is_locked())
+ self.assertFalse(b.repository.is_locked())
+ b.lock_write()
+ try:
+ self.assertTrue(b.is_locked())
+ self.assertTrue(b.repository.is_locked())
+ self.assertLogsError(lock_helpers.TestPreventLocking, b.unlock)
+ self.assertTrue(b.is_locked())
+ self.assertTrue(b.repository.is_locked())
+
+ # We unlock the repository even if
+ # we fail to unlock the control files
+ if self.combined_control:
+ self.assertEqual([('b', 'lw', True),
+ ('r', 'lw', True),
+ ('rc', 'lw', True),
+ ('bc', 'lw', True),
+ ('b', 'ul', True),
+ ('bc', 'ul', False),
+ ('r', 'ul', True),
+ ('rc', 'ul', True),
+ ], self.locks)
+ else:
+ self.assertEqual([('b', 'lw', True),
+ ('r', 'lw', True),
+ ('bc', 'lw', True),
+ ('b', 'ul', True),
+ ('bc', 'ul', False),
+ ], self.locks)
+
+ finally:
+ # For cleanup purposes, make sure we are unlocked
+ b.control_files._other.unlock()
+
+ def test_05_lock_read_fail_repo(self):
+ # Test that the branch is not locked if it cannot lock the repository
+ b = self.get_instrumented_branch()
+ b.repository.disable_lock_read()
+
+ self.assertRaises(lock_helpers.TestPreventLocking, b.lock_read)
+ self.assertFalse(b.is_locked())
+ self.assertFalse(b.repository.is_locked())
+
+ self.assertEqual([('b', 'lr', True),
+ ('r', 'lr', False),
+ ], self.locks)
+
+ def test_06_lock_write_fail_repo(self):
+ # Test that the branch is not locked if it cannot lock the repository
+ b = self.get_instrumented_branch()
+ b.repository.disable_lock_write()
+
+ self.assertRaises(lock_helpers.TestPreventLocking, b.lock_write)
+ self.assertFalse(b.is_locked())
+ self.assertFalse(b.repository.is_locked())
+
+ self.assertEqual([('b', 'lw', True),
+ ('r', 'lw', False),
+ ], self.locks)
+
+ def test_07_lock_read_fail_control(self):
+ # Test the repository is unlocked if we can't lock self
+ b = self.get_instrumented_branch()
+ b.control_files.disable_lock_read()
+
+ self.assertRaises(lock_helpers.TestPreventLocking, b.lock_read)
+ self.assertFalse(b.is_locked())
+ self.assertFalse(b.repository.is_locked())
+
+ if self.combined_control:
+ self.assertEqual([('b', 'lr', True),
+ ('r', 'lr', True),
+ ('rc', 'lr', True),
+ ('bc', 'lr', False),
+ ('r', 'ul', True),
+ ('rc', 'ul', True),
+ ], self.locks)
+ else:
+ self.assertEqual([('b', 'lr', True),
+ ('r', 'lr', True),
+ ('bc', 'lr', False),
+ ('r', 'ul', True),
+ ], self.locks)
+
+ def test_08_lock_write_fail_control(self):
+ # Test the repository is unlocked if we can't lock self
+ b = self.get_instrumented_branch()
+ b.control_files.disable_lock_write()
+
+ self.assertRaises(lock_helpers.TestPreventLocking, b.lock_write)
+ self.assertFalse(b.is_locked())
+ self.assertFalse(b.repository.is_locked())
+ if self.combined_control:
+ self.assertEqual([('b', 'lw', True),
+ ('r', 'lw', True),
+ ('rc', 'lw', True),
+ ('bc', 'lw', False),
+ ('r', 'ul', True),
+ ('rc', 'ul', True),
+ ], self.locks)
+ else:
+ self.assertEqual([('b', 'lw', True),
+ ('r', 'lw', True),
+ ('bc', 'lw', False),
+ ('r', 'ul', True),
+ ], self.locks)
+
+ def test_lock_write_returns_None_refuses_token(self):
+ branch = self.make_branch('b')
+ token = branch.lock_write().branch_token
+ try:
+ if token is not None:
+ # This test does not apply, because this lockable supports
+ # tokens.
+ return
+ self.assertRaises(errors.TokenLockingNotSupported,
+ branch.lock_write, token='token')
+ finally:
+ branch.unlock()
+
+ def test_reentering_lock_write_raises_on_token_mismatch(self):
+ branch = self.make_branch('b')
+ token = branch.lock_write().branch_token
+ try:
+ if token is None:
+ # This test does not apply, because this lockable refuses
+ # tokens.
+ return
+ different_branch_token = token + 'xxx'
+ # Re-using the same lockable instance with a different branch token
+ # will raise TokenMismatch.
+ self.assertRaises(errors.TokenMismatch,
+ branch.lock_write,
+ token=different_branch_token)
+ finally:
+ branch.unlock()
+
+ def test_lock_write_with_nonmatching_token(self):
+ branch = self.make_branch('b')
+ token = branch.lock_write().branch_token
+ try:
+ if token is None:
+ # This test does not apply, because this branch refuses
+ # tokens.
+ return
+ different_branch_token = token + 'xxx'
+
+ new_branch = branch.bzrdir.open_branch()
+ # We only want to test the relocking abilities of branch, so use the
+ # existing repository object which is already locked.
+ new_branch.repository = branch.repository
+ self.assertRaises(errors.TokenMismatch,
+ new_branch.lock_write,
+ token=different_branch_token)
+ finally:
+ branch.unlock()
+
+
+ def test_lock_write_with_matching_token(self):
+ """Test that a branch can be locked with a token, if it is already
+ locked by that token."""
+ branch = self.make_branch('b')
+ token = branch.lock_write().branch_token
+ try:
+ if token is None:
+ # This test does not apply, because this branch refuses tokens.
+ return
+ # The same instance will accept a second lock_write if the specified
+ # token matches.
+ branch.lock_write(token=token)
+ branch.unlock()
+ # Calling lock_write on a new instance for the same lockable will
+ # also succeed.
+ new_branch = branch.bzrdir.open_branch()
+ # We only want to test the relocking abilities of branch, so use the
+ # existing repository object which is already locked.
+ new_branch.repository = branch.repository
+ new_branch.lock_write(token=token)
+ new_branch.unlock()
+ finally:
+ branch.unlock()
+
+ def test_unlock_after_lock_write_with_token(self):
+ # If lock_write did not physically acquire the lock (because it was
+ # passed some tokens), then unlock should not physically release it.
+ branch = self.make_branch('b')
+ token = branch.lock_write().branch_token
+ try:
+ if token is None:
+ # This test does not apply, because this lockable refuses
+ # tokens.
+ return
+ new_branch = branch.bzrdir.open_branch()
+ # We only want to test the relocking abilities of branch, so use the
+ # existing repository object which is already locked.
+ new_branch.repository = branch.repository
+ new_branch.lock_write(token=token)
+ new_branch.unlock()
+ self.assertTrue(branch.get_physical_lock_status()) #XXX
+ finally:
+ branch.unlock()
+
+ def test_lock_write_with_token_fails_when_unlocked(self):
+ # First, lock and then unlock to get superficially valid tokens. This
+ # mimics a likely programming error, where a caller accidentally tries
+ # to lock with a token that is no longer valid (because the original
+ # lock was released).
+ branch = self.make_branch('b')
+ token = branch.lock_write().branch_token
+ branch.unlock()
+ if token is None:
+ # This test does not apply, because this lockable refuses
+ # tokens.
+ return
+ self.assertRaises(errors.TokenMismatch, branch.lock_write, token=token)
+
+ def test_lock_write_reenter_with_token(self):
+ branch = self.make_branch('b')
+ token = branch.lock_write().branch_token
+ try:
+ if token is None:
+ # This test does not apply, because this lockable refuses
+ # tokens.
+ return
+ # Relock with a token.
+ branch.lock_write(token=token)
+ branch.unlock()
+ finally:
+ branch.unlock()
+ # The lock should be unlocked on disk. Verify that with a new lock
+ # instance.
+ new_branch = branch.bzrdir.open_branch()
+ # Calling lock_write now should work, rather than raise LockContention.
+ new_branch.lock_write()
+ new_branch.unlock()
+
+ def test_leave_lock_in_place(self):
+ branch = self.make_branch('b')
+ # Lock the branch, then use leave_lock_in_place so that when we
+ # unlock the branch the lock is still held on disk.
+ token = branch.lock_write().branch_token
+ try:
+ if token is None:
+ # This test does not apply, because this repository refuses lock
+ # tokens.
+ self.assertRaises(NotImplementedError,
+ branch.leave_lock_in_place)
+ return
+ branch.leave_lock_in_place()
+ finally:
+ branch.unlock()
+ # We should be unable to relock the repo.
+ self.assertRaises(errors.LockContention, branch.lock_write)
+ # Cleanup
+ branch.lock_write(token)
+ branch.dont_leave_lock_in_place()
+ branch.unlock()
+
+ def test_dont_leave_lock_in_place(self):
+ branch = self.make_branch('b')
+ # Create a lock on disk.
+ token = branch.lock_write().branch_token
+ try:
+ if token is None:
+ # This test does not apply, because this branch refuses lock
+ # tokens.
+ self.assertRaises(NotImplementedError,
+ branch.dont_leave_lock_in_place)
+ return
+ try:
+ branch.leave_lock_in_place()
+ except NotImplementedError:
+ # This branch doesn't support this API.
+ return
+ try:
+ branch.repository.leave_lock_in_place()
+ except NotImplementedError:
+ # This repo doesn't support leaving locks around,
+ # assume it is essentially lock-free.
+ repo_token = None
+ else:
+ repo_token = branch.repository.lock_write()
+ branch.repository.unlock()
+ finally:
+ branch.unlock()
+ # Reacquire the lock (with a different branch object) by using the
+ # tokens.
+ new_branch = branch.bzrdir.open_branch()
+ if repo_token is not None:
+ # We have to explicitly lock the repository first.
+ new_branch.repository.lock_write(token=repo_token)
+ new_branch.lock_write(token=token)
+ if repo_token is not None:
+ # Now we don't need our own repository lock anymore (the branch is
+ # holding it for us).
+ new_branch.repository.unlock()
+ # Call dont_leave_lock_in_place, so that the lock will be released by
+ # this instance, even though the lock wasn't originally acquired by it.
+ new_branch.dont_leave_lock_in_place()
+ if repo_token is not None:
+ new_branch.repository.dont_leave_lock_in_place()
+ new_branch.unlock()
+ # Now the branch (and repository) is unlocked. Test this by locking it
+ # without tokens.
+ branch.lock_write()
+ branch.unlock()
+
+ def test_lock_read_then_unlock(self):
+ # Calling lock_read then unlocking should work without errors.
+ branch = self.make_branch('b')
+ branch.lock_read()
+ branch.unlock()
+
+ def test_lock_read_returns_unlockable(self):
+ branch = self.make_branch('b')
+ self.assertThat(branch.lock_read, ReturnsUnlockable(branch))
+
+ def test_lock_write_locks_repo_too(self):
+ branch = self.make_branch('b')
+ branch = branch.bzrdir.open_branch()
+ branch.lock_write()
+ try:
+ # The branch should have asked the repository to lock.
+ self.assertTrue(branch.repository.is_write_locked())
+ # Does the repository type actually lock?
+ if not branch.repository.get_physical_lock_status():
+ # The test was successfully applied, so it was applicable.
+ return
+ # Now the branch.repository is physically locked, so we can't lock
+ # it with a new repository instance.
+ new_repo = branch.bzrdir.open_repository()
+ self.assertRaises(errors.LockContention, new_repo.lock_write)
+ # We can call lock_write on the original repository object though,
+ # because it is already locked.
+ branch.repository.lock_write()
+ branch.repository.unlock()
+ finally:
+ branch.unlock()
+
+ def test_lock_write_returns_unlockable(self):
+ branch = self.make_branch('b')
+ self.assertThat(branch.lock_write, ReturnsUnlockable(branch))
+
+ def test_lock_write_raises_in_lock_read(self):
+ branch = self.make_branch('b')
+ branch.lock_read()
+ self.addCleanup(branch.unlock)
+ err = self.assertRaises(errors.ReadOnlyError, branch.lock_write)
+
+ def test_lock_and_unlock_leaves_repo_unlocked(self):
+ branch = self.make_branch('b')
+ branch.lock_write()
+ branch.unlock()
+ self.assertRaises(errors.LockNotHeld, branch.repository.unlock)
+
diff --git a/bzrlib/tests/per_branch/test_parent.py b/bzrlib/tests/per_branch/test_parent.py
new file mode 100644
index 0000000..500aa09
--- /dev/null
+++ b/bzrlib/tests/per_branch/test_parent.py
@@ -0,0 +1,107 @@
+# Copyright (C) 2005, 2006, 2007, 2009, 2010, 2011 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+
+import sys
+
+import bzrlib.errors
+from bzrlib.osutils import getcwd
+from bzrlib.tests import (
+ TestCaseWithTransport,
+ TestNotApplicable,
+ TestSkipped,
+ )
+from bzrlib import urlutils
+
+
+"""Tests for Branch parent URL"""
+
+
+class TestParent(TestCaseWithTransport):
+
+ def test_no_default_parent(self):
+ """Branches should have no parent by default"""
+ b = self.make_branch('.')
+ self.assertEqual(None, b.get_parent())
+
+ def test_set_get_parent(self):
+ """Set, re-get and reset the parent"""
+ b = self.make_branch('subdir')
+ url = 'http://example.com/bzr/bzr.dev'
+ b.set_parent(url)
+ self.assertEqual(url, b.get_parent())
+ self.assertEqual(url, b._get_parent_location())
+
+ b.set_parent(None)
+ self.assertEqual(None, b.get_parent())
+
+ b.set_parent('../other_branch')
+
+ expected_parent = urlutils.join(self.get_url('subdir'),
+ '../other_branch')
+ self.assertEqual(expected_parent, b.get_parent())
+ path = urlutils.join(self.get_url('subdir'), '../yanb')
+ b.set_parent(path)
+ self.assertEqual('../yanb', b._get_parent_location())
+ self.assertEqual(path, b.get_parent())
+
+
+ self.assertRaises(bzrlib.errors.InvalidURL, b.set_parent, u'\xb5')
+ b.set_parent(urlutils.escape(u'\xb5'))
+ self.assertEqual('%C2%B5', b._get_parent_location())
+
+ self.assertEqual(b.base + '%C2%B5', b.get_parent())
+
+ # Handle the case for older style absolute local paths
+ if sys.platform == 'win32':
+ # TODO: jam 20060515 Do we want to special case Windows local
+ # paths as well? Nobody has complained about it.
+ pass
+ else:
+ b.lock_write()
+ b._set_parent_location('/local/abs/path')
+ b.unlock()
+ self.assertEqual('file:///local/abs/path', b.get_parent())
+
+ def test_get_invalid_parent(self):
+ b = self.make_branch('.')
+
+ cwd = getcwd()
+ n_dirs = len(cwd.split('/'))
+
+ # Force the relative path to be something invalid
+ # This should attempt to go outside the filesystem
+ path = ('../'*(n_dirs+5)) + 'foo'
+ b.lock_write()
+ b._set_parent_location(path)
+ b.unlock()
+
+ # With an invalid branch parent, just return None
+ self.assertRaises(bzrlib.errors.InaccessibleParent, b.get_parent)
+
+ def test_win32_set_parent_on_another_drive(self):
+ if sys.platform != 'win32':
+ raise TestSkipped('windows-specific test')
+ b = self.make_branch('.')
+ base_url = b.bzrdir.transport.abspath('.')
+ if not base_url.startswith('file:///'):
+ raise TestNotApplicable('this test should be run with local base')
+ base = urlutils.local_path_from_url(base_url)
+ other = 'file:///D:/path'
+ if base[0] != 'C':
+ other = 'file:///C:/path'
+ b.set_parent(other)
+ self.assertEquals(other, b._get_parent_location())
diff --git a/bzrlib/tests/per_branch/test_permissions.py b/bzrlib/tests/per_branch/test_permissions.py
new file mode 100644
index 0000000..5740276
--- /dev/null
+++ b/bzrlib/tests/per_branch/test_permissions.py
@@ -0,0 +1,137 @@
+# Copyright (C) 2006-2010 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+
+"""Tests for bzr setting permissions.
+
+Files in the branch control directory (.bzr or .bzr/branch) should inherit
+the .bzr directory permissions.
+So if the directory is group writable, the files and subdirs should be as well.
+"""
+
+# TODO: jam 20051215 Currently the default behavior for 'bzr branch' is just
+# defined by the local umask. This isn't terrible, is it
+# the truly desired behavior?
+
+import os
+import sys
+import stat
+
+from bzrlib import tests
+from bzrlib.branch import BzrBranch
+from bzrlib.controldir import ControlDir
+from bzrlib.remote import RemoteBranchFormat
+from bzrlib.tests.test_permissions import check_mode_r
+
+
+class _NullPermsStat(object):
+ """A class that proxy's a stat result and strips permissions."""
+
+ def __init__(self, orig_stat):
+ self._orig_stat = orig_stat
+ # We strip all permission bits from st_mode
+ self.st_mode = orig_stat.st_mode & ~07777
+
+ def __getattr__(self, name):
+ return getattr(self._orig_stat, name)
+
+
+class TestPermissions(tests.TestCaseWithTransport):
+
+ def test_new_branch(self):
+ if isinstance(self.branch_format, RemoteBranchFormat):
+ # Remote branch format have no permission logic in them; there's
+ # nothing to test here.
+ raise tests.TestNotApplicable('Remote branches have no'
+ ' permission logic')
+ if sys.platform == 'win32':
+ raise tests.TestNotApplicable('chmod has no effect on win32')
+ os.mkdir('a')
+ mode = stat.S_IMODE(os.stat('a').st_mode)
+ t = self.make_branch_and_tree('.')
+ # also, these are BzrBranch format specific things..
+ if not isinstance(t.branch, BzrBranch):
+ raise tests.TestNotApplicable(
+ "Only applicable to bzr branches")
+ b = t.branch
+ self.assertEqualMode(mode, b.bzrdir._get_dir_mode())
+ self.assertEqualMode(mode & ~07111, b.bzrdir._get_file_mode())
+ self.assertEqualMode(mode, b.control_files._dir_mode)
+ self.assertEqualMode(mode & ~07111, b.control_files._file_mode)
+
+ os.mkdir('d')
+ os.chmod('d', 0700)
+ b = self.make_branch('d')
+ self.assertEqualMode(0700, b.bzrdir._get_dir_mode())
+ self.assertEqualMode(0600, b.bzrdir._get_file_mode())
+ self.assertEqualMode(0700, b.control_files._dir_mode)
+ self.assertEqualMode(0600, b.control_files._file_mode)
+ check_mode_r(self, 'd/.bzr', 00600, 00700)
+
+ def test_new_branch_group_sticky_bit(self):
+ if isinstance(self.branch_format, RemoteBranchFormat):
+ # Remote branch format have no permission logic in them; there's
+ # nothing to test here.
+ raise tests.TestNotApplicable('Remote branches have no'
+ ' permission logic')
+ if sys.platform == 'win32':
+ raise tests.TestNotApplicable('chmod has no effect on win32')
+ elif sys.platform == 'darwin' or 'freebsd' in sys.platform:
+ # FreeBSD-based platforms create temp dirs with the 'wheel' group,
+ # which users are not likely to be in, and this prevents us
+ # from setting the sgid bit
+ os.chown(self.test_dir, os.getuid(), os.getgid())
+ t = self.make_branch_and_tree('.')
+ b = t.branch
+ # also, these are BzrBranch format specific things..
+ if not isinstance(b, BzrBranch):
+ raise tests.TestNotApplicable(
+ "Only applicable to bzr branches")
+ os.mkdir('b')
+ os.chmod('b', 02777)
+ b = self.make_branch('b')
+ self.assertEqualMode(02777, b.bzrdir._get_dir_mode())
+ self.assertEqualMode(00666, b.bzrdir._get_file_mode())
+ self.assertEqualMode(02777, b.control_files._dir_mode)
+ self.assertEqualMode(00666, b.control_files._file_mode)
+ check_mode_r(self, 'b/.bzr', 00666, 02777)
+
+ os.mkdir('c')
+ os.chmod('c', 02750)
+ b = self.make_branch('c')
+ self.assertEqualMode(02750, b.bzrdir._get_dir_mode())
+ self.assertEqualMode(00640, b.bzrdir._get_file_mode())
+ self.assertEqualMode(02750, b.control_files._dir_mode)
+ self.assertEqualMode(00640, b.control_files._file_mode)
+ check_mode_r(self, 'c/.bzr', 00640, 02750)
+
+ def test_mode_0(self):
+ """Test when a transport returns null permissions for .bzr"""
+ if isinstance(self.branch_format, RemoteBranchFormat):
+ # Remote branch format have no permission logic in them; there's
+ # nothing to test here.
+ raise tests.TestNotApplicable('Remote branches have no'
+ ' permission logic')
+ self.make_branch_and_tree('.')
+ bzrdir = ControlDir.open('.')
+ # Monkey patch the transport
+ _orig_stat = bzrdir.transport.stat
+ def null_perms_stat(*args, **kwargs):
+ result = _orig_stat(*args, **kwargs)
+ return _NullPermsStat(result)
+ bzrdir.transport.stat = null_perms_stat
+ self.assertIs(None, bzrdir._get_dir_mode())
+ self.assertIs(None, bzrdir._get_file_mode())
diff --git a/bzrlib/tests/per_branch/test_pull.py b/bzrlib/tests/per_branch/test_pull.py
new file mode 100644
index 0000000..7fa92e5
--- /dev/null
+++ b/bzrlib/tests/per_branch/test_pull.py
@@ -0,0 +1,274 @@
+# Copyright (C) 2006-2010 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""Tests for branch.pull behaviour."""
+
+from bzrlib import (
+ branch,
+ controldir,
+ errors,
+ memorytree,
+ revision,
+ )
+from bzrlib.tests import (
+ fixtures,
+ per_branch,
+ TestNotApplicable,
+ )
+
+
+class TestPull(per_branch.TestCaseWithBranch):
+
+ def test_pull_convergence_simple(self):
+ # when revisions are pulled, the left-most accessible parents must
+ # become the revision-history.
+ parent = self.make_branch_and_tree('parent')
+ parent.commit('1st post', rev_id='P1', allow_pointless=True)
+ mine = parent.bzrdir.sprout('mine').open_workingtree()
+ mine.commit('my change', rev_id='M1', allow_pointless=True)
+ parent.merge_from_branch(mine.branch)
+ parent.commit('merge my change', rev_id='P2')
+ mine.pull(parent.branch)
+ self.assertEqual('P2', mine.branch.last_revision())
+
+ def test_pull_merged_indirect(self):
+ # it should be possible to do a pull from one branch into another
+ # when the tip of the target was merged into the source branch
+ # via a third branch - so its buried in the ancestry and is not
+ # directly accessible.
+ parent = self.make_branch_and_tree('parent')
+ parent.commit('1st post', rev_id='P1', allow_pointless=True)
+ mine = parent.bzrdir.sprout('mine').open_workingtree()
+ mine.commit('my change', rev_id='M1', allow_pointless=True)
+ other = parent.bzrdir.sprout('other').open_workingtree()
+ other.merge_from_branch(mine.branch)
+ other.commit('merge my change', rev_id='O2')
+ parent.merge_from_branch(other.branch)
+ parent.commit('merge other', rev_id='P2')
+ mine.pull(parent.branch)
+ self.assertEqual('P2', mine.branch.last_revision())
+
+ def test_pull_updates_checkout_and_master(self):
+ """Pulling into a checkout updates the checkout and the master branch"""
+ master_tree = self.make_branch_and_tree('master')
+ rev1 = master_tree.commit('master')
+ checkout = master_tree.branch.create_checkout('checkout')
+
+ other = master_tree.branch.bzrdir.sprout('other').open_workingtree()
+ rev2 = other.commit('other commit')
+ # now pull, which should update both checkout and master.
+ checkout.branch.pull(other.branch)
+ self.assertEqual(rev2, checkout.branch.last_revision())
+ self.assertEqual(rev2, master_tree.branch.last_revision())
+
+ def test_pull_local_updates_checkout_only(self):
+ """Pulling --local into a checkout updates the checkout and not the
+ master branch"""
+ master_tree = self.make_branch_and_tree('master')
+ rev1 = master_tree.commit('master')
+ checkout = master_tree.branch.create_checkout('checkout')
+
+ other = master_tree.branch.bzrdir.sprout('other').open_workingtree()
+ rev2 = other.commit('other commit')
+ # now pull local, which should update checkout but not master.
+ checkout.branch.pull(other.branch, local = True)
+ self.assertEqual(rev2, checkout.branch.last_revision())
+ self.assertEqual(rev1, master_tree.branch.last_revision())
+
+ def test_pull_local_raises_LocalRequiresBoundBranch_on_unbound(self):
+ """Pulling --local into a branch that is not bound should fail."""
+ master_tree = self.make_branch_and_tree('branch')
+ rev1 = master_tree.commit('master')
+
+ other = master_tree.branch.bzrdir.sprout('other').open_workingtree()
+ rev2 = other.commit('other commit')
+ # now pull --local, which should raise LocalRequiresBoundBranch error.
+ self.assertRaises(errors.LocalRequiresBoundBranch,
+ master_tree.branch.pull, other.branch, local = True)
+ self.assertEqual(rev1, master_tree.branch.last_revision())
+
+ def test_pull_returns_result(self):
+ parent = self.make_branch_and_tree('parent')
+ parent.commit('1st post', rev_id='P1')
+ mine = parent.bzrdir.sprout('mine').open_workingtree()
+ mine.commit('my change', rev_id='M1')
+ result = parent.branch.pull(mine.branch)
+ self.assertIsNot(None, result)
+ self.assertIs(mine.branch, result.source_branch)
+ self.assertIs(parent.branch, result.target_branch)
+ self.assertIs(parent.branch, result.master_branch)
+ self.assertIs(None, result.local_branch)
+ self.assertEqual(1, result.old_revno)
+ self.assertEqual('P1', result.old_revid)
+ self.assertEqual(2, result.new_revno)
+ self.assertEqual('M1', result.new_revid)
+ self.assertEqual([], result.tag_conflicts)
+
+ def test_pull_overwrite(self):
+ tree_a = self.make_branch_and_tree('tree_a')
+ tree_a.commit('message 1')
+ tree_b = tree_a.bzrdir.sprout('tree_b').open_workingtree()
+ tree_a.commit('message 2', rev_id='rev2a')
+ tree_b.commit('message 2', rev_id='rev2b')
+ self.assertRaises(errors.DivergedBranches, tree_a.pull, tree_b.branch)
+ self.assertRaises(errors.DivergedBranches,
+ tree_a.branch.pull, tree_b.branch,
+ overwrite=False, stop_revision='rev2b')
+ # It should not have updated the branch tip, but it should have fetched
+ # the revision if the repository supports "invisible" revisions
+ self.assertEqual('rev2a', tree_a.branch.last_revision())
+ if tree_a.branch.repository._format.supports_unreferenced_revisions:
+ self.assertTrue(tree_a.branch.repository.has_revision('rev2b'))
+ tree_a.branch.pull(tree_b.branch, overwrite=True,
+ stop_revision='rev2b')
+ self.assertEqual('rev2b', tree_a.branch.last_revision())
+ self.assertEqual(tree_b.branch.last_revision(),
+ tree_a.branch.last_revision())
+
+ def test_pull_merges_and_fetches_tags(self):
+ """Tags are updated by br.pull(source), and revisions named in those
+ tags are fetched.
+ """
+ # Make a source, sprout a target off it
+ try:
+ builder = self.make_branch_builder('source')
+ except errors.UninitializableFormat:
+ raise TestNotApplicable('uninitializeable format')
+ source = fixtures.build_branch_with_non_ancestral_rev(builder)
+ target = source.bzrdir.sprout('target').open_branch()
+ # Add a tag to the source, then pull from source
+ try:
+ source.tags.set_tag('tag-a', 'rev-2')
+ except errors.TagsNotSupported:
+ raise TestNotApplicable('format does not support tags.')
+ source.tags.set_tag('tag-a', 'rev-2')
+ source.get_config_stack().set('branch.fetch_tags', True)
+ target.pull(source)
+ # The tag is present, and so is its revision.
+ self.assertEqual('rev-2', target.tags.lookup_tag('tag-a'))
+ target.repository.get_revision('rev-2')
+
+ def test_pull_stop_revision_merges_and_fetches_tags(self):
+ """br.pull(source, stop_revision=REV) updates and fetches tags."""
+ # Make a source, sprout a target off it
+ try:
+ builder = self.make_branch_builder('source')
+ except errors.UninitializableFormat:
+ raise TestNotApplicable('uninitializeable format')
+ source = fixtures.build_branch_with_non_ancestral_rev(builder)
+ target = source.bzrdir.sprout('target').open_branch()
+ # Add a new commit to the ancestry
+ builder.build_commit(message="Rev 2 again", rev_id='rev-2-again')
+ # Add a tag to the source, then pull rev-2-again from source
+ try:
+ source.tags.set_tag('tag-a', 'rev-2')
+ except errors.TagsNotSupported:
+ raise TestNotApplicable('format does not support tags.')
+ source.get_config_stack().set('branch.fetch_tags', True)
+ target.pull(source, 'rev-2-again')
+ # The tag is present, and so is its revision.
+ self.assertEqual('rev-2', target.tags.lookup_tag('tag-a'))
+ target.repository.get_revision('rev-2')
+
+
+class TestPullHook(per_branch.TestCaseWithBranch):
+
+ def setUp(self):
+ self.hook_calls = []
+ super(TestPullHook, self).setUp()
+
+ def capture_post_pull_hook(self, result):
+ """Capture post pull hook calls to self.hook_calls.
+
+ The call is logged, as is some state of the two branches.
+ """
+ if result.local_branch:
+ local_locked = result.local_branch.is_locked()
+ local_base = result.local_branch.base
+ else:
+ local_locked = None
+ local_base = None
+ self.hook_calls.append(
+ ('post_pull', result.source_branch, local_base,
+ result.master_branch.base, result.old_revno,
+ result.old_revid,
+ result.new_revno, result.new_revid,
+ result.source_branch.is_locked(), local_locked,
+ result.master_branch.is_locked()))
+
+ def test_post_pull_empty_history(self):
+ target = self.make_branch('target')
+ source = self.make_branch('source')
+ branch.Branch.hooks.install_named_hook(
+ 'post_pull', self.capture_post_pull_hook, None)
+ target.pull(source)
+ # with nothing there we should still get a notification, and
+ # have both branches locked at the notification time.
+ self.assertEqual([
+ ('post_pull', source, None, target.base, 0, revision.NULL_REVISION,
+ 0, revision.NULL_REVISION, True, None, True)
+ ],
+ self.hook_calls)
+
+ def test_post_pull_bound_branch(self):
+ # pulling to a bound branch should pass in the master branch to the
+ # hook, allowing the correct number of emails to be sent, while still
+ # allowing hooks that want to modify the target to do so to both
+ # instances.
+ target = self.make_branch('target')
+ local = self.make_branch('local')
+ try:
+ local.bind(target)
+ except errors.UpgradeRequired:
+ # We can't bind this format to itself- typically it is the local
+ # branch that doesn't support binding. As of May 2007
+ # remotebranches can't be bound. Let's instead make a new local
+ # branch of the default type, which does allow binding.
+ # See https://bugs.launchpad.net/bzr/+bug/112020
+ local = controldir.ControlDir.create_branch_convenience('local2')
+ local.bind(target)
+ source = self.make_branch('source')
+ branch.Branch.hooks.install_named_hook(
+ 'post_pull', self.capture_post_pull_hook, None)
+ local.pull(source)
+ # with nothing there we should still get a notification, and
+ # have both branches locked at the notification time.
+ self.assertEqual([
+ ('post_pull', source, local.base, target.base, 0,
+ revision.NULL_REVISION, 0, revision.NULL_REVISION,
+ True, True, True)
+ ],
+ self.hook_calls)
+
+ def test_post_pull_nonempty_history(self):
+ target = self.make_branch_and_memory_tree('target')
+ target.lock_write()
+ target.add('')
+ rev1 = target.commit('rev 1')
+ target.unlock()
+ sourcedir = target.bzrdir.clone(self.get_url('source'))
+ source = memorytree.MemoryTree.create_on_branch(sourcedir.open_branch())
+ rev2 = source.commit('rev 2')
+ branch.Branch.hooks.install_named_hook(
+ 'post_pull', self.capture_post_pull_hook, None)
+ target.branch.pull(source.branch)
+ # with nothing there we should still get a notification, and
+ # have both branches locked at the notification time.
+ self.assertEqual([
+ ('post_pull', source.branch, None, target.branch.base, 1, rev1,
+ 2, rev2, True, None, True)
+ ],
+ self.hook_calls)
diff --git a/bzrlib/tests/per_branch/test_push.py b/bzrlib/tests/per_branch/test_push.py
new file mode 100644
index 0000000..b837b4a
--- /dev/null
+++ b/bzrlib/tests/per_branch/test_push.py
@@ -0,0 +1,461 @@
+# Copyright (C) 2007-2011 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""Tests for branch.push behaviour."""
+
+from cStringIO import StringIO
+import os
+
+from bzrlib import (
+ branch,
+ builtins,
+ controldir,
+ check,
+ errors,
+ memorytree,
+ push,
+ revision,
+ symbol_versioning,
+ tests,
+ transport,
+ )
+from bzrlib.smart import (
+ client,
+ )
+from bzrlib.tests import (
+ per_branch,
+ test_server,
+ )
+
+
+class TestPush(per_branch.TestCaseWithBranch):
+
+ def test_push_convergence_simple(self):
+ # when revisions are pushed, the left-most accessible parents must
+ # become the revision-history.
+ mine = self.make_branch_and_tree('mine')
+ mine.commit('1st post', rev_id='P1', allow_pointless=True)
+ other = mine.bzrdir.sprout('other').open_workingtree()
+ other.commit('my change', rev_id='M1', allow_pointless=True)
+ mine.merge_from_branch(other.branch)
+ mine.commit('merge my change', rev_id='P2')
+ result = mine.branch.push(other.branch)
+ self.assertEqual('P2', other.branch.last_revision())
+ # result object contains some structured data
+ self.assertEqual(result.old_revid, 'M1')
+ self.assertEqual(result.new_revid, 'P2')
+
+ def test_push_merged_indirect(self):
+ # it should be possible to do a push from one branch into another
+ # when the tip of the target was merged into the source branch
+ # via a third branch - so its buried in the ancestry and is not
+ # directly accessible.
+ mine = self.make_branch_and_tree('mine')
+ mine.commit('1st post', rev_id='P1', allow_pointless=True)
+ target = mine.bzrdir.sprout('target').open_workingtree()
+ target.commit('my change', rev_id='M1', allow_pointless=True)
+ other = mine.bzrdir.sprout('other').open_workingtree()
+ other.merge_from_branch(target.branch)
+ other.commit('merge my change', rev_id='O2')
+ mine.merge_from_branch(other.branch)
+ mine.commit('merge other', rev_id='P2')
+ mine.branch.push(target.branch)
+ self.assertEqual('P2', target.branch.last_revision())
+
+ def test_push_to_checkout_updates_master(self):
+ """Pushing into a checkout updates the checkout and the master branch"""
+ master_tree = self.make_branch_and_tree('master')
+ checkout = self.make_branch_and_tree('checkout')
+ try:
+ checkout.branch.bind(master_tree.branch)
+ except errors.UpgradeRequired:
+ # cant bind this format, the test is irrelevant.
+ return
+ rev1 = checkout.commit('master')
+
+ other = master_tree.branch.bzrdir.sprout('other').open_workingtree()
+ rev2 = other.commit('other commit')
+ # now push, which should update both checkout and master.
+ other.branch.push(checkout.branch)
+ self.assertEqual(rev2, checkout.branch.last_revision())
+ self.assertEqual(rev2, master_tree.branch.last_revision())
+
+ def test_push_raises_specific_error_on_master_connection_error(self):
+ master_tree = self.make_branch_and_tree('master')
+ checkout = self.make_branch_and_tree('checkout')
+ try:
+ checkout.branch.bind(master_tree.branch)
+ except errors.UpgradeRequired:
+ # cant bind this format, the test is irrelevant.
+ return
+ other = master_tree.branch.bzrdir.sprout('other').open_workingtree()
+ # move the branch out of the way on disk to cause a connection
+ # error.
+ os.rename('master', 'master_gone')
+ # try to push, which should raise a BoundBranchConnectionFailure.
+ self.assertRaises(errors.BoundBranchConnectionFailure,
+ other.branch.push, checkout.branch)
+
+ def test_push_new_tag_to_bound_branch(self):
+ master = self.make_branch('master')
+ bound = self.make_branch('bound')
+ try:
+ bound.bind(master)
+ except errors.UpgradeRequired:
+ raise tests.TestNotApplicable(
+ 'Format does not support bound branches')
+ other = bound.bzrdir.sprout('other').open_branch()
+ try:
+ other.tags.set_tag('new-tag', 'some-rev')
+ except errors.TagsNotSupported:
+ raise tests.TestNotApplicable('Format does not support tags')
+ other.push(bound)
+ self.assertEqual({'new-tag': 'some-rev'}, bound.tags.get_tag_dict())
+ self.assertEqual({'new-tag': 'some-rev'}, master.tags.get_tag_dict())
+
+ def test_push_uses_read_lock(self):
+ """Push should only need a read lock on the source side."""
+ source = self.make_branch_and_tree('source')
+ target = self.make_branch('target')
+
+ self.build_tree(['source/a'])
+ source.add(['a'])
+ source.commit('a')
+
+ source.branch.lock_read()
+ try:
+ target.lock_write()
+ try:
+ source.branch.push(target, stop_revision=source.last_revision())
+ finally:
+ target.unlock()
+ finally:
+ source.branch.unlock()
+
+ def test_push_within_repository(self):
+ """Push from one branch to another inside the same repository."""
+ try:
+ repo = self.make_repository('repo', shared=True)
+ except (errors.IncompatibleFormat, errors.UninitializableFormat):
+ # This Branch format cannot create shared repositories
+ return
+ if not repo._format.supports_nesting_repositories:
+ return
+ # This is a little bit trickier because make_branch_and_tree will not
+ # re-use a shared repository.
+ a_bzrdir = self.make_bzrdir('repo/tree')
+ try:
+ a_branch = self.branch_format.initialize(a_bzrdir)
+ except (errors.UninitializableFormat):
+ # Cannot create these branches
+ return
+ try:
+ tree = a_branch.bzrdir.create_workingtree()
+ except errors.NotLocalUrl:
+ if self.vfs_transport_factory is test_server.LocalURLServer:
+ # the branch is colocated on disk, we cannot create a checkout.
+ # hopefully callers will expect this.
+ local_controldir = controldir.ControlDir.open(
+ self.get_vfs_only_url('repo/tree'))
+ tree = local_controldir.create_workingtree()
+ else:
+ tree = a_branch.create_checkout('repo/tree', lightweight=True)
+ self.build_tree(['repo/tree/a'])
+ tree.add(['a'])
+ tree.commit('a')
+
+ to_bzrdir = self.make_bzrdir('repo/branch')
+ to_branch = self.branch_format.initialize(to_bzrdir)
+ tree.branch.push(to_branch)
+
+ self.assertEqual(tree.branch.last_revision(),
+ to_branch.last_revision())
+
+ def test_push_overwrite_with_older_mainline_rev(self):
+ """Pushing an older mainline revision with overwrite.
+
+ This was <https://bugs.launchpad.net/bzr/+bug/386576>.
+ """
+ source = self.make_branch_and_tree('source')
+ target = self.make_branch('target')
+
+ source.commit('1st commit')
+ source.commit('2nd commit', rev_id='rev-2')
+ source.commit('3rd commit')
+ source.branch.push(target)
+ source.branch.push(target, stop_revision='rev-2', overwrite=True)
+ self.assertEqual('rev-2', target.last_revision())
+
+ def test_push_overwrite_of_non_tip_with_stop_revision(self):
+ """Combining the stop_revision and overwrite options works.
+
+ This was <https://bugs.launchpad.net/bzr/+bug/234229>.
+ """
+ source = self.make_branch_and_tree('source')
+ target = self.make_branch('target')
+
+ source.commit('1st commit')
+ source.branch.push(target)
+ source.commit('2nd commit', rev_id='rev-2')
+ source.commit('3rd commit')
+
+ source.branch.push(target, stop_revision='rev-2', overwrite=True)
+ self.assertEqual('rev-2', target.last_revision())
+
+ def test_push_repository_no_branch_doesnt_fetch_all_revs(self):
+ # See https://bugs.launchpad.net/bzr/+bug/465517
+ t = self.get_transport('target')
+ t.ensure_base()
+ bzrdir = self.bzrdir_format.initialize_on_transport(t)
+ try:
+ bzrdir.open_branch()
+ except errors.NotBranchError:
+ pass
+ else:
+ raise tests.TestNotApplicable('older formats can\'t have a repo'
+ ' without a branch')
+ try:
+ source = self.make_branch_builder('source',
+ format=self.bzrdir_format)
+ except errors.UninitializableFormat:
+ raise tests.TestNotApplicable('cannot initialize this format')
+ source.start_series()
+ source.build_snapshot('A', None, [
+ ('add', ('', 'root-id', 'directory', None))])
+ source.build_snapshot('B', ['A'], [])
+ source.build_snapshot('C', ['A'], [])
+ source.finish_series()
+ b = source.get_branch()
+ # Note: We can't read lock the source branch. Some formats take a write
+ # lock to 'set_push_location', which breaks
+ self.addCleanup(b.lock_write().unlock)
+ repo = bzrdir.create_repository()
+ # This means 'push the source branch into this dir'
+ bzrdir.push_branch(b)
+ self.addCleanup(repo.lock_read().unlock)
+ # We should have pushed 'C', but not 'B', since it isn't in the
+ # ancestry
+ self.assertEqual(['A', 'C'], sorted(repo.all_revision_ids()))
+
+ def test_push_with_default_stacking_does_not_create_broken_branch(self):
+ """Pushing a new standalone branch works even when there's a default
+ stacking policy at the destination.
+
+ The new branch will preserve the repo format (even if it isn't the
+ default for the branch), and will be stacked when the repo format
+ allows (which means that the branch format isn't necessarly preserved).
+ """
+ if self.bzrdir_format.fixed_components:
+ raise tests.TestNotApplicable('Not a metadir format.')
+ if isinstance(self.branch_format, branch.BranchReferenceFormat):
+ # This test could in principle apply to BranchReferenceFormat, but
+ # make_branch_builder doesn't support it.
+ raise tests.TestSkipped(
+ "BranchBuilder can't make reference branches.")
+ # Make a branch called "local" in a stackable repository
+ # The branch has 3 revisions:
+ # - rev-1, adds a file
+ # - rev-2, no changes
+ # - rev-3, modifies the file.
+ repo = self.make_repository('repo', shared=True, format='1.6')
+ builder = self.make_branch_builder('repo/local')
+ builder.start_series()
+ builder.build_snapshot('rev-1', None, [
+ ('add', ('', 'root-id', 'directory', '')),
+ ('add', ('filename', 'f-id', 'file', 'content\n'))])
+ builder.build_snapshot('rev-2', ['rev-1'], [])
+ builder.build_snapshot('rev-3', ['rev-2'],
+ [('modify', ('f-id', 'new-content\n'))])
+ builder.finish_series()
+ trunk = builder.get_branch()
+ # Sprout rev-1 to "trunk", so that we can stack on it.
+ trunk.bzrdir.sprout(self.get_url('trunk'), revision_id='rev-1')
+ # Set a default stacking policy so that new branches will automatically
+ # stack on trunk.
+ self.make_bzrdir('.').get_config().set_default_stack_on('trunk')
+ # Push rev-2 to a new branch "remote". It will be stacked on "trunk".
+ output = StringIO()
+ push._show_push_branch(trunk, 'rev-2', self.get_url('remote'), output)
+ # Push rev-3 onto "remote". If "remote" not stacked and is missing the
+ # fulltext record for f-id @ rev-1, then this will fail.
+ remote_branch = branch.Branch.open(self.get_url('remote'))
+ trunk.push(remote_branch)
+ check.check_dwim(remote_branch.base, False, True, True)
+
+
+class TestPushHook(per_branch.TestCaseWithBranch):
+
+ def setUp(self):
+ self.hook_calls = []
+ super(TestPushHook, self).setUp()
+
+ def capture_post_push_hook(self, result):
+ """Capture post push hook calls to self.hook_calls.
+
+ The call is logged, as is some state of the two branches.
+ """
+ if result.local_branch:
+ local_locked = result.local_branch.is_locked()
+ local_base = result.local_branch.base
+ else:
+ local_locked = None
+ local_base = None
+ self.hook_calls.append(
+ ('post_push', result.source_branch, local_base,
+ result.master_branch.base,
+ result.old_revno, result.old_revid,
+ result.new_revno, result.new_revid,
+ result.source_branch.is_locked(), local_locked,
+ result.master_branch.is_locked()))
+
+ def test_post_push_empty_history(self):
+ target = self.make_branch('target')
+ source = self.make_branch('source')
+ branch.Branch.hooks.install_named_hook(
+ 'post_push', self.capture_post_push_hook, None)
+ source.push(target)
+ # with nothing there we should still get a notification, and
+ # have both branches locked at the notification time.
+ self.assertEqual([
+ ('post_push', source, None, target.base, 0, revision.NULL_REVISION,
+ 0, revision.NULL_REVISION, True, None, True)
+ ],
+ self.hook_calls)
+
+ def test_post_push_bound_branch(self):
+ # pushing to a bound branch should pass in the master branch to the
+ # hook, allowing the correct number of emails to be sent, while still
+ # allowing hooks that want to modify the target to do so to both
+ # instances.
+ target = self.make_branch('target')
+ local = self.make_branch('local')
+ try:
+ local.bind(target)
+ except errors.UpgradeRequired:
+ # We can't bind this format to itself- typically it is the local
+ # branch that doesn't support binding. As of May 2007
+ # remotebranches can't be bound. Let's instead make a new local
+ # branch of the default type, which does allow binding.
+ # See https://bugs.launchpad.net/bzr/+bug/112020
+ local = controldir.ControlDir.create_branch_convenience('local2')
+ local.bind(target)
+ source = self.make_branch('source')
+ branch.Branch.hooks.install_named_hook(
+ 'post_push', self.capture_post_push_hook, None)
+ source.push(local)
+ # with nothing there we should still get a notification, and
+ # have both branches locked at the notification time.
+ self.assertEqual([
+ ('post_push', source, local.base, target.base, 0,
+ revision.NULL_REVISION, 0, revision.NULL_REVISION,
+ True, True, True)
+ ],
+ self.hook_calls)
+
+ def test_post_push_nonempty_history(self):
+ target = self.make_branch_and_memory_tree('target')
+ target.lock_write()
+ target.add('')
+ rev1 = target.commit('rev 1')
+ target.unlock()
+ sourcedir = target.bzrdir.clone(self.get_url('source'))
+ source = memorytree.MemoryTree.create_on_branch(sourcedir.open_branch())
+ rev2 = source.commit('rev 2')
+ branch.Branch.hooks.install_named_hook(
+ 'post_push', self.capture_post_push_hook, None)
+ source.branch.push(target.branch)
+ # with nothing there we should still get a notification, and
+ # have both branches locked at the notification time.
+ self.assertEqual([
+ ('post_push', source.branch, None, target.branch.base, 1, rev1,
+ 2, rev2, True, None, True)
+ ],
+ self.hook_calls)
+
+
+class EmptyPushSmartEffortTests(per_branch.TestCaseWithBranch):
+ """Tests that a push of 0 revisions should make a limited number of smart
+ protocol RPCs.
+ """
+
+ def setUp(self):
+ # Skip some scenarios that don't apply to these tests.
+ if (self.transport_server is not None
+ and issubclass(self.transport_server,
+ test_server.SmartTCPServer_for_testing)):
+ raise tests.TestNotApplicable(
+ 'Does not apply when remote backing branch is also '
+ 'a smart branch')
+ if not self.branch_format.supports_leaving_lock():
+ raise tests.TestNotApplicable(
+ 'Branch format is not usable via HPSS.')
+ super(EmptyPushSmartEffortTests, self).setUp()
+ # Create a smart server that publishes whatever the backing VFS server
+ # does.
+ self.smart_server = test_server.SmartTCPServer_for_testing()
+ self.start_server(self.smart_server, self.get_server())
+ # Make two empty branches, 'empty' and 'target'.
+ self.empty_branch = self.make_branch('empty')
+ self.make_branch('target')
+ # Log all HPSS calls into self.hpss_calls.
+ client._SmartClient.hooks.install_named_hook(
+ 'call', self.capture_hpss_call, None)
+ self.hpss_calls = []
+
+ def capture_hpss_call(self, params):
+ self.hpss_calls.append(params.method)
+
+ def test_empty_branch_api(self):
+ """The branch_obj.push API should make a limited number of HPSS calls.
+ """
+ t = transport.get_transport_from_url(self.smart_server.get_url()).clone('target')
+ target = branch.Branch.open_from_transport(t)
+ self.empty_branch.push(target)
+ self.assertEqual(
+ ['BzrDir.open_2.1',
+ 'BzrDir.open_branchV3',
+ 'BzrDir.find_repositoryV3',
+ 'Branch.get_stacked_on_url',
+ 'Branch.lock_write',
+ 'Branch.last_revision_info',
+ 'Branch.unlock'],
+ self.hpss_calls)
+
+ def test_empty_branch_command(self):
+ """The 'bzr push' command should make a limited number of HPSS calls.
+ """
+ cmd = builtins.cmd_push()
+ cmd.outf = tests.StringIOWrapper()
+ cmd.run(
+ directory=self.get_url('empty'),
+ location=self.smart_server.get_url() + 'target')
+ # HPSS calls as of 2008/09/22:
+ # [BzrDir.open, BzrDir.open_branch, BzrDir.find_repositoryV2,
+ # Branch.get_stacked_on_url, get, get, Branch.lock_write,
+ # Branch.last_revision_info, Branch.unlock]
+ self.assertTrue(len(self.hpss_calls) <= 9, self.hpss_calls)
+
+
+class TestLossyPush(per_branch.TestCaseWithBranch):
+
+ def setUp(self):
+ self.hook_calls = []
+ super(TestLossyPush, self).setUp()
+
+ def test_lossy_push_raises_same_vcs(self):
+ target = self.make_branch('target')
+ source = self.make_branch('source')
+ self.assertRaises(errors.LossyPushToSameVCS, source.push, target, lossy=True)
diff --git a/bzrlib/tests/per_branch/test_reconcile.py b/bzrlib/tests/per_branch/test_reconcile.py
new file mode 100644
index 0000000..61fe0ac
--- /dev/null
+++ b/bzrlib/tests/per_branch/test_reconcile.py
@@ -0,0 +1,90 @@
+# Copyright (C) 2008 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""Tests for branch implementations - test reconcile() functionality"""
+
+from bzrlib import errors, reconcile
+from bzrlib.branch import BzrBranch
+from bzrlib.symbol_versioning import deprecated_in
+from bzrlib.tests.per_branch import TestCaseWithBranch
+from bzrlib.tests import TestNotApplicable
+
+
+class TestBranchReconcile(TestCaseWithBranch):
+
+ def test_reconcile_fixes_invalid_revhistory(self):
+ if not isinstance(self.branch_format, BzrBranch):
+ raise TestNotApplicable("test only applies to bzr formats")
+ # Different formats have different ways of handling invalid revision
+ # histories, so the setup portion is customized
+ tree = self.make_branch_and_tree('test')
+ r1 = tree.commit('one')
+ r2 = tree.commit('two')
+ r3 = tree.commit('three')
+ r4 = tree.commit('four')
+ # create an alternate branch
+ tree.set_parent_ids([r1])
+ tree.branch.set_last_revision_info(1, r1)
+ r2b = tree.commit('two-b')
+
+ # now go back and merge the commit
+ tree.set_parent_ids([r4, r2b])
+ tree.branch.set_last_revision_info(4, r4)
+
+ r5 = tree.commit('five')
+ # Now, try to set an invalid history
+ try:
+ self.applyDeprecated(deprecated_in((2, 4, 0)),
+ tree.branch.set_revision_history, [r1, r2b, r5])
+ if tree.branch.last_revision_info() != (3, r5):
+ # RemoteBranch silently corrects an impossible revision
+ # history given to set_revision_history. It can be tricked
+ # with set_last_revision_info though.
+ tree.branch.set_last_revision_info(3, r5)
+ except errors.NotLefthandHistory:
+ # Branch5 allows set_revision_history to be wrong
+ # Branch6 raises NotLefthandHistory, but we can force bogus stuff
+ # with set_last_revision_info
+ tree.branch.set_last_revision_info(3, r5)
+
+ self.assertEqual((3, r5), tree.branch.last_revision_info())
+ reconciler = tree.branch.reconcile()
+ self.assertEqual((5, r5), tree.branch.last_revision_info())
+ self.assertIs(True, reconciler.fixed_history)
+
+ def test_reconcile_returns_reconciler(self):
+ a_branch = self.make_branch('a_branch')
+ result = a_branch.reconcile()
+ self.assertIsInstance(result, reconcile.BranchReconciler)
+ # No history to fix
+ self.assertIs(False, result.fixed_history)
+
+ def test_reconcile_supports_thorough(self):
+ a_branch = self.make_branch('a_branch')
+ a_branch.reconcile(thorough=False)
+ a_branch.reconcile(thorough=True)
+
+ def test_reconcile_handles_ghosts_in_revhistory(self):
+ tree = self.make_branch_and_tree('test')
+ if not tree.branch.repository._format.supports_ghosts:
+ raise TestNotApplicable("repository format does not support ghosts")
+ tree.set_parent_ids(["spooky"], allow_leftmost_as_ghost=True)
+ r1 = tree.commit('one')
+ r2 = tree.commit('two')
+ tree.branch.set_last_revision_info(2, r2)
+
+ reconciler = tree.branch.reconcile()
+ self.assertEquals(r2, tree.branch.last_revision())
diff --git a/bzrlib/tests/per_branch/test_revision_id_to_dotted_revno.py b/bzrlib/tests/per_branch/test_revision_id_to_dotted_revno.py
new file mode 100644
index 0000000..74299c9
--- /dev/null
+++ b/bzrlib/tests/per_branch/test_revision_id_to_dotted_revno.py
@@ -0,0 +1,36 @@
+# Copyright (C) 2007 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""Tests for Branch.revision_id_to_dotted_revno()"""
+
+from bzrlib import errors
+
+from bzrlib.tests.per_branch import TestCaseWithBranch
+
+
+class TestRevisionIdToDottedRevno(TestCaseWithBranch):
+
+ def test_lookup_dotted_revno(self):
+ tree = self.create_tree_with_merge()
+ the_branch = tree.branch
+ self.assertEqual((0,), the_branch.revision_id_to_dotted_revno('null:'))
+ self.assertEqual((1,), the_branch.revision_id_to_dotted_revno('rev-1'))
+ self.assertEqual((2,), the_branch.revision_id_to_dotted_revno('rev-2'))
+ self.assertEqual((3,), the_branch.revision_id_to_dotted_revno('rev-3'))
+ self.assertEqual((1,1,1), the_branch.revision_id_to_dotted_revno(
+ 'rev-1.1.1'))
+ self.assertRaises(errors.NoSuchRevision,
+ the_branch.revision_id_to_dotted_revno, 'rev-1.0.2')
diff --git a/bzrlib/tests/per_branch/test_revision_id_to_revno.py b/bzrlib/tests/per_branch/test_revision_id_to_revno.py
new file mode 100644
index 0000000..7e65d86
--- /dev/null
+++ b/bzrlib/tests/per_branch/test_revision_id_to_revno.py
@@ -0,0 +1,64 @@
+# Copyright (C) 2007 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""Tests for Branch.revision_id_to_revno()"""
+
+from bzrlib import errors
+from bzrlib.tests import TestNotApplicable
+
+from bzrlib.tests.per_branch import TestCaseWithBranch
+
+
+class TestRevisionIdToRevno(TestCaseWithBranch):
+
+ def test_simple_revno(self):
+ tree = self.create_tree_with_merge()
+ the_branch = tree.branch
+
+ self.assertEqual(0, the_branch.revision_id_to_revno('null:'))
+ self.assertEqual(1, the_branch.revision_id_to_revno('rev-1'))
+ self.assertEqual(2, the_branch.revision_id_to_revno('rev-2'))
+ self.assertEqual(3, the_branch.revision_id_to_revno('rev-3'))
+
+ self.assertRaises(errors.NoSuchRevision,
+ the_branch.revision_id_to_revno, 'rev-none')
+ # revision_id_to_revno is defined as returning only integer revision
+ # numbers, so non-mainline revisions get NoSuchRevision raised
+ self.assertRaises(errors.NoSuchRevision,
+ the_branch.revision_id_to_revno, 'rev-1.1.1')
+
+ def test_mainline_ghost(self):
+ tree = self.make_branch_and_tree('tree1')
+ if not tree.branch.repository._format.supports_ghosts:
+ raise TestNotApplicable("repository format does not support ghosts")
+ tree.set_parent_ids(["spooky"], allow_leftmost_as_ghost=True)
+ tree.add('')
+ tree.commit('msg1', rev_id='rev1')
+ tree.commit('msg2', rev_id='rev2')
+ # Some older branch formats store the full known revision history
+ # and thus can't distinguish between not being able to find a revno because of
+ # a ghost and the revision not being on the mainline. As such,
+ # allow both NoSuchRevision and GhostRevisionsHaveNoRevno here.
+ self.assertRaises((errors.NoSuchRevision, errors.GhostRevisionsHaveNoRevno),
+ tree.branch.revision_id_to_revno, "unknown")
+ self.assertEquals(1, tree.branch.revision_id_to_revno("rev1"))
+ self.assertEquals(2, tree.branch.revision_id_to_revno("rev2"))
+
+ def test_empty(self):
+ branch = self.make_branch('.')
+ self.assertRaises(errors.NoSuchRevision,
+ branch.revision_id_to_revno, "unknown")
+ self.assertEquals(0, branch.revision_id_to_revno('null:'))
diff --git a/bzrlib/tests/per_branch/test_sprout.py b/bzrlib/tests/per_branch/test_sprout.py
new file mode 100644
index 0000000..6b7b153
--- /dev/null
+++ b/bzrlib/tests/per_branch/test_sprout.py
@@ -0,0 +1,224 @@
+# Copyright (C) 2007 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""Tests for Branch.sprout()"""
+
+import os
+from bzrlib import (
+ branch as _mod_branch,
+ errors,
+ osutils,
+ remote,
+ revision as _mod_revision,
+ tests,
+ )
+from bzrlib.tests import (
+ features,
+ )
+from bzrlib.tests.per_branch import TestCaseWithBranch
+
+
+class TestSprout(TestCaseWithBranch):
+
+ def test_sprout_branch_nickname(self):
+ # test the nick name is reset always
+ raise tests.TestSkipped('XXX branch sprouting is not yet tested.')
+
+ def test_sprout_branch_parent(self):
+ source = self.make_branch('source')
+ target = source.bzrdir.sprout(self.get_url('target')).open_branch()
+ self.assertEqual(source.bzrdir.root_transport.base, target.get_parent())
+
+ def test_sprout_uses_bzrdir_branch_format(self):
+ # branch.sprout(bzrdir) is defined as using the branch format selected
+ # by bzrdir; format preservation is achieved by parameterising the
+ # bzrdir during bzrdir.sprout, which is where stacking compatibility
+ # checks are done. So this test tests that each implementation of
+ # Branch.sprout delegates appropriately to the bzrdir which the
+ # branch is being created in, rather than testing that the result is
+ # in the format that we are testing (which is what would happen if
+ # the branch did not delegate appropriately).
+ if isinstance(self.branch_format, _mod_branch.BranchReferenceFormat):
+ raise tests.TestNotApplicable('cannot sprout to a reference')
+ # Start with a format that is unlikely to be the target format
+ # We call the super class to allow overriding the format of creation)
+ source = tests.TestCaseWithTransport.make_branch(self, 'old-branch',
+ format='knit')
+ target_bzrdir = self.make_bzrdir('target')
+ target_bzrdir.create_repository()
+ result_format = self.branch_format
+ if isinstance(target_bzrdir, remote.RemoteBzrDir):
+ # for a remote bzrdir, we need to parameterise it with a branch
+ # format, as, after creation, the newly opened remote objects
+ # do not have one unless a branch was created at the time.
+ # We use branch format 6 because its not the default, and its not
+ # metaweave either.
+ target_bzrdir._format.set_branch_format(_mod_branch.BzrBranchFormat6())
+ result_format = target_bzrdir._format.get_branch_format()
+ target = source.sprout(target_bzrdir)
+ if isinstance(target, remote.RemoteBranch):
+ # we have to look at the real branch to see whether RemoteBranch
+ # did the right thing.
+ target._ensure_real()
+ target = target._real_branch
+ if isinstance(result_format, remote.RemoteBranchFormat):
+ # Unwrap a parameterised RemoteBranchFormat for comparison.
+ result_format = result_format._custom_format
+ self.assertIs(result_format.__class__, target._format.__class__)
+
+ def test_sprout_partial(self):
+ # test sprouting with a prefix of the revision-history.
+ # also needs not-on-revision-history behaviour defined.
+ wt_a = self.make_branch_and_tree('a')
+ self.build_tree(['a/one'])
+ wt_a.add(['one'])
+ wt_a.commit('commit one', rev_id='1')
+ self.build_tree(['a/two'])
+ wt_a.add(['two'])
+ wt_a.commit('commit two', rev_id='2')
+ repo_b = self.make_repository('b')
+ repo_a = wt_a.branch.repository
+ repo_a.copy_content_into(repo_b)
+ br_b = wt_a.branch.sprout(repo_b.bzrdir, revision_id='1')
+ self.assertEqual('1', br_b.last_revision())
+
+ def test_sprout_partial_not_in_revision_history(self):
+ """We should be able to sprout from any revision in ancestry."""
+ wt = self.make_branch_and_tree('source')
+ self.build_tree(['source/a'])
+ wt.add('a')
+ wt.commit('rev1', rev_id='rev1')
+ wt.commit('rev2-alt', rev_id='rev2-alt')
+ wt.set_parent_ids(['rev1'])
+ wt.branch.set_last_revision_info(1, 'rev1')
+ wt.commit('rev2', rev_id='rev2')
+ wt.set_parent_ids(['rev2', 'rev2-alt'])
+ wt.commit('rev3', rev_id='rev3')
+
+ repo = self.make_repository('target')
+ repo.fetch(wt.branch.repository)
+ branch2 = wt.branch.sprout(repo.bzrdir, revision_id='rev2-alt')
+ self.assertEqual((2, 'rev2-alt'), branch2.last_revision_info())
+ self.assertEqual('rev2-alt', branch2.last_revision())
+
+ def test_sprout_preserves_tags(self):
+ """Sprout preserves tags, even tags of absent revisions."""
+ try:
+ builder = self.make_branch_builder('source')
+ except errors.UninitializableFormat:
+ raise tests.TestSkipped('Uninitializable branch format')
+ builder.build_commit(message="Rev 1", rev_id='rev-1')
+ source = builder.get_branch()
+ try:
+ source.tags.set_tag('tag-a', 'missing-rev')
+ except (errors.TagsNotSupported, errors.GhostTagsNotSupported):
+ raise tests.TestNotApplicable(
+ 'Branch format does not support tags or tags to ghosts.')
+ # Now source has a tag pointing to an absent revision. Sprout it.
+ target_bzrdir = self.make_repository('target').bzrdir
+ new_branch = source.sprout(target_bzrdir)
+ # The tag is present in the target
+ self.assertEqual('missing-rev', new_branch.tags.lookup_tag('tag-a'))
+
+ def test_sprout_from_any_repo_revision(self):
+ """We should be able to sprout from any revision."""
+ wt = self.make_branch_and_tree('source')
+ self.build_tree(['source/a'])
+ wt.add('a')
+ wt.commit('rev1a', rev_id='rev1a')
+ # simulated uncommit
+ wt.branch.set_last_revision_info(0, _mod_revision.NULL_REVISION)
+ wt.set_last_revision(_mod_revision.NULL_REVISION)
+ wt.revert()
+ wt.commit('rev1b', rev_id='rev1b')
+ wt2 = wt.bzrdir.sprout('target',
+ revision_id='rev1a').open_workingtree()
+ self.assertEqual('rev1a', wt2.last_revision())
+ self.assertPathExists('target/a')
+
+ def test_sprout_with_unicode_symlink(self):
+ # this tests bug #272444
+ # Since the trigger function seems to be set_parent_trees, there exists
+ # also a similar test, with name test_unicode_symlink, in class
+ # TestSetParents at file per_workingtree/test_parents.py
+ self.requireFeature(features.SymlinkFeature)
+ self.requireFeature(features.UnicodeFilenameFeature)
+
+ tree = self.make_branch_and_tree('tree1')
+
+ # The link points to a file whose name is an omega
+ # U+03A9 GREEK CAPITAL LETTER OMEGA
+ # UTF-8: ce a9 UTF-16BE: 03a9 Decimal: &#937;
+ target = u'\u03a9'
+ link_name = u'\N{Euro Sign}link'
+ os.symlink(target, 'tree1/' + link_name)
+ tree.add([link_name],['link-id'])
+
+ revision = tree.commit('added a link to a Unicode target')
+ tree.bzrdir.sprout('dest')
+ self.assertEqual(target, osutils.readlink('dest/' + link_name))
+ tree.lock_read()
+ self.addCleanup(tree.unlock)
+ # Check that the symlink target is safely round-tripped in the trees.
+ self.assertEqual(target, tree.get_symlink_target('link-id'))
+ self.assertEqual(target,
+ tree.basis_tree().get_symlink_target('link-id'))
+
+ def test_sprout_with_ghost_in_mainline(self):
+ tree = self.make_branch_and_tree('tree1')
+ if not tree.branch.repository._format.supports_ghosts:
+ raise tests.TestNotApplicable(
+ "repository format does not support ghosts in mainline")
+ tree.set_parent_ids(["spooky"], allow_leftmost_as_ghost=True)
+ tree.add('')
+ tree.commit('msg1', rev_id='rev1')
+ tree.commit('msg2', rev_id='rev2')
+ tree.bzrdir.sprout('target', revision_id='rev1')
+
+ def assertBranchHookBranchIsStacked(self, pre_change_params):
+ # Just calling will either succeed or fail.
+ pre_change_params.branch.get_stacked_on_url()
+ self.hook_calls.append(pre_change_params)
+
+ def test_sprout_stacked_hooks_get_stacked_branch(self):
+ tree = self.make_branch_and_tree('source')
+ tree.commit('a commit')
+ revid = tree.commit('a second commit')
+ source = tree.branch
+ target_transport = self.get_transport('target')
+ self.hook_calls = []
+ _mod_branch.Branch.hooks.install_named_hook("pre_change_branch_tip",
+ self.assertBranchHookBranchIsStacked, None)
+ try:
+ dir = source.bzrdir.sprout(target_transport.base,
+ source.last_revision(), possible_transports=[target_transport],
+ source_branch=source, stacked=True)
+ except errors.UnstackableBranchFormat:
+ if not self.branch_format.supports_stacking():
+ raise tests.TestNotApplicable(
+ "Format doesn't auto stack successfully.")
+ else:
+ raise
+ result = dir.open_branch()
+ self.assertEqual(revid, result.last_revision())
+ self.assertEqual(source.base, result.get_stacked_on_url())
+ # Smart servers invoke hooks on both sides
+ if isinstance(result, remote.RemoteBranch):
+ expected_calls = 2
+ else:
+ expected_calls = 1
+ self.assertEqual(expected_calls, len(self.hook_calls))
+
diff --git a/bzrlib/tests/per_branch/test_stacking.py b/bzrlib/tests/per_branch/test_stacking.py
new file mode 100644
index 0000000..877e44b
--- /dev/null
+++ b/bzrlib/tests/per_branch/test_stacking.py
@@ -0,0 +1,589 @@
+# Copyright (C) 2008, 2009, 2010 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""Tests for Branch.get_stacked_on_url and set_stacked_on_url."""
+
+from bzrlib import (
+ branch,
+ controldir,
+ check,
+ errors,
+ )
+from bzrlib.revision import NULL_REVISION
+from bzrlib.tests import fixtures, TestNotApplicable, transport_util
+from bzrlib.tests.per_branch import TestCaseWithBranch
+
+
+unstackable_format_errors = (
+ errors.UnstackableBranchFormat,
+ errors.UnstackableRepositoryFormat,
+ )
+
+
+class TestStacking(TestCaseWithBranch):
+
+ def check_lines_added_or_present(self, stacked_branch, revid):
+ # similar to a failure seen in bug 288751 by mbp 20081120
+ stacked_repo = stacked_branch.repository
+ stacked_repo.lock_read()
+ try:
+ list(stacked_repo.inventories.iter_lines_added_or_present_in_keys(
+ [(revid,)]))
+ finally:
+ stacked_repo.unlock()
+
+ def test_get_set_stacked_on_url(self):
+ # branches must either:
+ # raise UnstackableBranchFormat or
+ # raise UnstackableRepositoryFormat or
+ # permit stacking to be done and then return the stacked location.
+ branch = self.make_branch('branch')
+ target = self.make_branch('target')
+ try:
+ branch.set_stacked_on_url(target.base)
+ except unstackable_format_errors:
+ # if the set failed, so must the get
+ self.assertRaises(unstackable_format_errors, branch.get_stacked_on_url)
+ self.assertFalse(branch._format.supports_stacking())
+ return
+ self.assertTrue(branch._format.supports_stacking())
+ # now we have a stacked branch:
+ self.assertEqual(target.base, branch.get_stacked_on_url())
+ branch.set_stacked_on_url(None)
+ self.assertRaises(errors.NotStacked, branch.get_stacked_on_url)
+
+ def test_get_set_stacked_on_relative(self):
+ # Branches can be stacked on other branches using relative paths.
+ branch = self.make_branch('branch')
+ target = self.make_branch('target')
+ try:
+ branch.set_stacked_on_url('../target')
+ except unstackable_format_errors:
+ # if the set failed, so must the get
+ self.assertRaises(unstackable_format_errors, branch.get_stacked_on_url)
+ return
+ self.assertEqual('../target', branch.get_stacked_on_url())
+
+ def test_set_stacked_on_same_branch_raises(self):
+ # Stacking on the same branch silently raises and doesn't execute the
+ # change. Reported in bug 376243.
+ branch = self.make_branch('branch')
+ try:
+ self.assertRaises(errors.UnstackableLocationError,
+ branch.set_stacked_on_url, '../branch')
+ except unstackable_format_errors:
+ # if the set failed, so must the get
+ self.assertRaises(unstackable_format_errors, branch.get_stacked_on_url)
+ return
+ self.assertRaises(errors.NotStacked, branch.get_stacked_on_url)
+
+ def test_set_stacked_on_same_branch_after_being_stacked_raises(self):
+ # Stacking on the same branch silently raises and doesn't execute the
+ # change.
+ branch = self.make_branch('branch')
+ target = self.make_branch('target')
+ try:
+ branch.set_stacked_on_url('../target')
+ except unstackable_format_errors:
+ # if the set failed, so must the get
+ self.assertRaises(unstackable_format_errors, branch.get_stacked_on_url)
+ return
+ self.assertRaises(errors.UnstackableLocationError,
+ branch.set_stacked_on_url, '../branch')
+ self.assertEqual('../target', branch.get_stacked_on_url())
+
+ def assertRevisionInRepository(self, repo_path, revid):
+ """Check that a revision is in a repository, disregarding stacking."""
+ repo = controldir.ControlDir.open(repo_path).open_repository()
+ self.assertTrue(repo.has_revision(revid))
+
+ def assertRevisionNotInRepository(self, repo_path, revid):
+ """Check that a revision is not in a repository, disregarding stacking."""
+ repo = controldir.ControlDir.open(repo_path).open_repository()
+ self.assertFalse(repo.has_revision(revid))
+
+ def test_get_graph_stacked(self):
+ """A stacked repository shows the graph of its parent."""
+ trunk_tree = self.make_branch_and_tree('mainline')
+ trunk_revid = trunk_tree.commit('mainline')
+ # make a new branch, and stack on the existing one. we don't use
+ # sprout(stacked=True) here because if that is buggy and copies data
+ # it would cause a false pass of this test.
+ new_branch = self.make_branch('new_branch')
+ try:
+ new_branch.set_stacked_on_url(trunk_tree.branch.base)
+ except unstackable_format_errors, e:
+ raise TestNotApplicable(e)
+ # reading the graph from the stacked branch's repository should see
+ # data from the stacked-on branch
+ new_repo = new_branch.repository
+ new_repo.lock_read()
+ try:
+ self.assertEqual(new_repo.get_parent_map([trunk_revid]),
+ {trunk_revid: (NULL_REVISION, )})
+ finally:
+ new_repo.unlock()
+
+ def test_sprout_stacked(self):
+ # We have a mainline
+ trunk_tree = self.make_branch_and_tree('mainline')
+ trunk_revid = trunk_tree.commit('mainline')
+ # and make branch from it which is stacked
+ try:
+ new_dir = trunk_tree.bzrdir.sprout('newbranch', stacked=True)
+ except unstackable_format_errors, e:
+ raise TestNotApplicable(e)
+ # stacked repository
+ self.assertRevisionNotInRepository('newbranch', trunk_revid)
+ tree = new_dir.open_branch().create_checkout('local')
+ new_branch_revid = tree.commit('something local')
+ self.assertRevisionNotInRepository(
+ trunk_tree.branch.base, new_branch_revid)
+ self.assertRevisionInRepository('newbranch', new_branch_revid)
+
+ def test_sprout_stacked_from_smart_server(self):
+ # We have a mainline
+ trunk_tree = self.make_branch_and_tree('mainline')
+ trunk_revid = trunk_tree.commit('mainline')
+ # Make sure that we can make a stacked branch from it
+ try:
+ trunk_tree.bzrdir.sprout('testbranch', stacked=True)
+ except unstackable_format_errors, e:
+ raise TestNotApplicable(e)
+ # Now serve the original mainline from a smart server
+ remote_transport = self.make_smart_server('mainline')
+ remote_bzrdir = controldir.ControlDir.open_from_transport(remote_transport)
+ # and make branch from the smart server which is stacked
+ new_dir = remote_bzrdir.sprout('newbranch', stacked=True)
+ # stacked repository
+ self.assertRevisionNotInRepository('newbranch', trunk_revid)
+ tree = new_dir.open_branch().create_checkout('local')
+ new_branch_revid = tree.commit('something local')
+ self.assertRevisionNotInRepository(trunk_tree.branch.user_url,
+ new_branch_revid)
+ self.assertRevisionInRepository('newbranch', new_branch_revid)
+
+ def test_unstack_fetches(self):
+ """Removing the stacked-on branch pulls across all data"""
+ try:
+ builder = self.make_branch_builder('trunk')
+ except errors.UninitializableFormat:
+ raise TestNotApplicable('uninitializeable format')
+ # We have a mainline
+ trunk = fixtures.build_branch_with_non_ancestral_rev(builder)
+ mainline_revid = 'rev-1'
+ # and make branch from it which is stacked (with no tags)
+ try:
+ new_dir = trunk.bzrdir.sprout(self.get_url('newbranch'), stacked=True)
+ except unstackable_format_errors, e:
+ raise TestNotApplicable(e)
+ # stacked repository
+ self.assertRevisionNotInRepository('newbranch', mainline_revid)
+ # TODO: we'd like to commit in the stacked repository; that requires
+ # some care (maybe a BranchBuilder) if it's remote and has no
+ # workingtree
+ ##newbranch_revid = new_dir.open_workingtree().commit('revision in '
+ ##'newbranch')
+ # now when we unstack that should implicitly fetch, to make sure that
+ # the branch will still work
+ new_branch = new_dir.open_branch()
+ try:
+ new_branch.tags.set_tag('tag-a', 'rev-2')
+ except errors.TagsNotSupported:
+ tags_supported = False
+ else:
+ tags_supported = True
+ new_branch.set_stacked_on_url(None)
+ self.assertRevisionInRepository('newbranch', mainline_revid)
+ # of course it's still in the mainline
+ self.assertRevisionInRepository('trunk', mainline_revid)
+ if tags_supported:
+ # the tagged revision in trunk is now in newbranch too
+ self.assertRevisionInRepository('newbranch', 'rev-2')
+ # and now we're no longer stacked
+ self.assertRaises(errors.NotStacked, new_branch.get_stacked_on_url)
+
+ def test_unstack_already_locked(self):
+ """Removing the stacked-on branch with an already write-locked branch
+ works.
+
+ This was bug 551525.
+ """
+ try:
+ stacked_bzrdir = self.make_stacked_bzrdir()
+ except unstackable_format_errors, e:
+ raise TestNotApplicable(e)
+ stacked_branch = stacked_bzrdir.open_branch()
+ stacked_branch.lock_write()
+ stacked_branch.set_stacked_on_url(None)
+ stacked_branch.unlock()
+
+ def test_unstack_already_multiple_locked(self):
+ """Unstacking a branch preserves the lock count (even though it
+ replaces the br.repository object).
+
+ This is a more extreme variation of test_unstack_already_locked.
+ """
+ try:
+ stacked_bzrdir = self.make_stacked_bzrdir()
+ except unstackable_format_errors, e:
+ raise TestNotApplicable(e)
+ stacked_branch = stacked_bzrdir.open_branch()
+ stacked_branch.lock_write()
+ stacked_branch.lock_write()
+ stacked_branch.lock_write()
+ stacked_branch.set_stacked_on_url(None)
+ stacked_branch.unlock()
+ stacked_branch.unlock()
+ stacked_branch.unlock()
+
+ def make_stacked_bzrdir(self, in_directory=None):
+ """Create a stacked branch and return its bzrdir.
+
+ :param in_directory: If not None, create a directory of this
+ name and create the stacking and stacked-on bzrdirs in
+ this directory.
+ """
+ if in_directory is not None:
+ self.get_transport().mkdir(in_directory)
+ prefix = in_directory + '/'
+ else:
+ prefix = ''
+ tree = self.make_branch_and_tree(prefix + 'stacked-on')
+ tree.commit('Added foo')
+ stacked_bzrdir = tree.branch.bzrdir.sprout(
+ self.get_url(prefix + 'stacked'), tree.branch.last_revision(),
+ stacked=True)
+ return stacked_bzrdir
+
+ def test_clone_from_stacked_branch_preserve_stacking(self):
+ # We can clone from the bzrdir of a stacked branch. If
+ # preserve_stacking is True, the cloned branch is stacked on the
+ # same branch as the original.
+ try:
+ stacked_bzrdir = self.make_stacked_bzrdir()
+ except unstackable_format_errors, e:
+ raise TestNotApplicable(e)
+ cloned_bzrdir = stacked_bzrdir.clone('cloned', preserve_stacking=True)
+ try:
+ self.assertEqual(
+ stacked_bzrdir.open_branch().get_stacked_on_url(),
+ cloned_bzrdir.open_branch().get_stacked_on_url())
+ except unstackable_format_errors, e:
+ pass
+
+ def test_clone_from_branch_stacked_on_relative_url_preserve_stacking(self):
+ # If a branch's stacked-on url is relative, we can still clone
+ # from it with preserve_stacking True and get a branch stacked
+ # on an appropriately adjusted relative url.
+ try:
+ stacked_bzrdir = self.make_stacked_bzrdir(in_directory='dir')
+ except unstackable_format_errors, e:
+ raise TestNotApplicable(e)
+ stacked_bzrdir.open_branch().set_stacked_on_url('../stacked-on')
+ cloned_bzrdir = stacked_bzrdir.clone(
+ self.get_url('cloned'), preserve_stacking=True)
+ self.assertEqual(
+ '../dir/stacked-on',
+ cloned_bzrdir.open_branch().get_stacked_on_url())
+
+ def test_clone_from_stacked_branch_no_preserve_stacking(self):
+ try:
+ stacked_bzrdir = self.make_stacked_bzrdir()
+ except unstackable_format_errors, e:
+ # not a testable combination.
+ raise TestNotApplicable(e)
+ cloned_unstacked_bzrdir = stacked_bzrdir.clone('cloned-unstacked',
+ preserve_stacking=False)
+ unstacked_branch = cloned_unstacked_bzrdir.open_branch()
+ self.assertRaises((errors.NotStacked, errors.UnstackableBranchFormat),
+ unstacked_branch.get_stacked_on_url)
+
+ def test_no_op_preserve_stacking(self):
+ """With no stacking, preserve_stacking should be a no-op."""
+ branch = self.make_branch('source')
+ cloned_bzrdir = branch.bzrdir.clone('cloned', preserve_stacking=True)
+ self.assertRaises((errors.NotStacked, errors.UnstackableBranchFormat),
+ cloned_bzrdir.open_branch().get_stacked_on_url)
+
+ def make_stacked_on_matching(self, source):
+ if source.repository.supports_rich_root():
+ if source.repository._format.supports_chks:
+ format = "2a"
+ else:
+ format = "1.9-rich-root"
+ else:
+ format = "1.9"
+ return self.make_branch('stack-on', format)
+
+ def test_sprout_stacking_policy_handling(self):
+ """Obey policy where possible, ignore otherwise."""
+ if self.bzrdir_format.fixed_components:
+ raise TestNotApplicable('Branch format 4 does not autoupgrade.')
+ source = self.make_branch('source')
+ stack_on = self.make_stacked_on_matching(source)
+ parent_bzrdir = self.make_bzrdir('.', format='default')
+ parent_bzrdir.get_config().set_default_stack_on('stack-on')
+ target = source.bzrdir.sprout('target').open_branch()
+ # When we sprout we upgrade the branch when there is a default stack_on
+ # set by a config *and* the targeted branch supports stacking.
+ if stack_on._format.supports_stacking():
+ self.assertEqual('../stack-on', target.get_stacked_on_url())
+ else:
+ self.assertRaises(
+ errors.UnstackableBranchFormat, target.get_stacked_on_url)
+
+ def test_clone_stacking_policy_handling(self):
+ """Obey policy where possible, ignore otherwise."""
+ if self.bzrdir_format.fixed_components:
+ raise TestNotApplicable('Branch format 4 does not autoupgrade.')
+ source = self.make_branch('source')
+ stack_on = self.make_stacked_on_matching(source)
+ parent_bzrdir = self.make_bzrdir('.', format='default')
+ parent_bzrdir.get_config().set_default_stack_on('stack-on')
+ target = source.bzrdir.clone('target').open_branch()
+ # When we clone we upgrade the branch when there is a default stack_on
+ # set by a config *and* the targeted branch supports stacking.
+ if stack_on._format.supports_stacking():
+ self.assertEqual('../stack-on', target.get_stacked_on_url())
+ else:
+ self.assertRaises(
+ errors.UnstackableBranchFormat, target.get_stacked_on_url)
+
+ def test_sprout_to_smart_server_stacking_policy_handling(self):
+ """Obey policy where possible, ignore otherwise."""
+ if not self.branch_format.supports_leaving_lock():
+ raise TestNotApplicable('Branch format is not usable via HPSS.')
+ source = self.make_branch('source')
+ stack_on = self.make_stacked_on_matching(source)
+ parent_bzrdir = self.make_bzrdir('.', format='default')
+ parent_bzrdir.get_config().set_default_stack_on('stack-on')
+ url = self.make_smart_server('target').base
+ target = source.bzrdir.sprout(url).open_branch()
+ # When we sprout we upgrade the branch when there is a default stack_on
+ # set by a config *and* the targeted branch supports stacking.
+ if stack_on._format.supports_stacking():
+ self.assertEqual('../stack-on', target.get_stacked_on_url())
+ else:
+ self.assertRaises(
+ errors.UnstackableBranchFormat, target.get_stacked_on_url)
+
+ def prepare_stacked_on_fetch(self):
+ stack_on = self.make_branch_and_tree('stack-on')
+ stack_on.commit('first commit', rev_id='rev1')
+ try:
+ stacked_dir = stack_on.bzrdir.sprout('stacked', stacked=True)
+ except unstackable_format_errors, e:
+ raise TestNotApplicable('Format does not support stacking.')
+ unstacked = self.make_repository('unstacked')
+ return stacked_dir.open_workingtree(), unstacked
+
+ def test_fetch_copies_from_stacked_on(self):
+ stacked, unstacked = self.prepare_stacked_on_fetch()
+ unstacked.fetch(stacked.branch.repository, 'rev1')
+ unstacked.get_revision('rev1')
+
+ def test_fetch_copies_from_stacked_on_and_stacked(self):
+ stacked, unstacked = self.prepare_stacked_on_fetch()
+ tree = stacked.branch.create_checkout('local')
+ tree.commit('second commit', rev_id='rev2')
+ unstacked.fetch(stacked.branch.repository, 'rev2')
+ unstacked.get_revision('rev1')
+ unstacked.get_revision('rev2')
+ self.check_lines_added_or_present(stacked.branch, 'rev1')
+ self.check_lines_added_or_present(stacked.branch, 'rev2')
+
+ def test_autopack_when_stacked(self):
+ # in bzr.dev as of 20080730, autopack was reported to fail in stacked
+ # repositories because of problems with text deltas spanning physical
+ # repository boundaries. however, i didn't actually get this test to
+ # fail on that code. -- mbp
+ # see https://bugs.launchpad.net/bzr/+bug/252821
+ stack_on = self.make_branch_and_tree('stack-on')
+ if not stack_on.branch._format.supports_stacking():
+ raise TestNotApplicable("%r does not support stacking"
+ % self.branch_format)
+ text_lines = ['line %d blah blah blah\n' % i for i in range(20)]
+ self.build_tree_contents([('stack-on/a', ''.join(text_lines))])
+ stack_on.add('a')
+ stack_on.commit('base commit')
+ stacked_dir = stack_on.bzrdir.sprout('stacked', stacked=True)
+ stacked_branch = stacked_dir.open_branch()
+ local_tree = stack_on.bzrdir.sprout('local').open_workingtree()
+ for i in range(20):
+ text_lines[0] = 'changed in %d\n' % i
+ self.build_tree_contents([('local/a', ''.join(text_lines))])
+ local_tree.commit('commit %d' % i)
+ local_tree.branch.push(stacked_branch)
+ stacked_branch.repository.pack()
+ check.check_dwim(stacked_branch.base, False, True, True)
+
+ def test_pull_delta_when_stacked(self):
+ if not self.branch_format.supports_stacking():
+ raise TestNotApplicable("%r does not support stacking"
+ % self.branch_format)
+ stack_on = self.make_branch_and_tree('stack-on')
+ text_lines = ['line %d blah blah blah\n' % i for i in range(20)]
+ self.build_tree_contents([('stack-on/a', ''.join(text_lines))])
+ stack_on.add('a')
+ stack_on.commit('base commit')
+ # make a stacked branch from the mainline
+ stacked_dir = stack_on.bzrdir.sprout('stacked', stacked=True)
+ stacked_tree = stacked_dir.open_workingtree()
+ # make a second non-stacked branch from the mainline
+ other_dir = stack_on.bzrdir.sprout('other')
+ other_tree = other_dir.open_workingtree()
+ text_lines[9] = 'changed in other\n'
+ self.build_tree_contents([('other/a', ''.join(text_lines))])
+ stacked_revid = other_tree.commit('commit in other')
+ # this should have generated a delta; try to pull that across
+ # bug 252821 caused a RevisionNotPresent here...
+ stacked_tree.pull(other_tree.branch)
+ stacked_tree.branch.repository.pack()
+ check.check_dwim(stacked_tree.branch.base, False, True, True)
+ self.check_lines_added_or_present(stacked_tree.branch, stacked_revid)
+
+ def test_fetch_revisions_with_file_changes(self):
+ # Fetching revisions including file changes into a stacked branch
+ # works without error.
+ # Make the source tree.
+ src_tree = self.make_branch_and_tree('src')
+ self.build_tree_contents([('src/a', 'content')])
+ src_tree.add('a')
+ src_tree.commit('first commit')
+
+ # Make the stacked-on branch.
+ src_tree.bzrdir.sprout('stacked-on')
+
+ # Make a branch stacked on it.
+ target = self.make_branch('target')
+ try:
+ target.set_stacked_on_url('../stacked-on')
+ except unstackable_format_errors, e:
+ raise TestNotApplicable('Format does not support stacking.')
+
+ # Change the source branch.
+ self.build_tree_contents([('src/a', 'new content')])
+ src_tree.commit('second commit', rev_id='rev2')
+
+ # Fetch changes to the target.
+ target.fetch(src_tree.branch)
+ rtree = target.repository.revision_tree('rev2')
+ rtree.lock_read()
+ self.addCleanup(rtree.unlock)
+ self.assertEqual(
+ 'new content',
+ rtree.get_file_text(rtree.path2id('a'), 'a'))
+ self.check_lines_added_or_present(target, 'rev2')
+
+ def test_transform_fallback_location_hook(self):
+ # The 'transform_fallback_location' branch hook allows us to inspect
+ # and transform the URL of the fallback location for the branch.
+ stack_on = self.make_branch('stack-on')
+ stacked = self.make_branch('stacked')
+ try:
+ stacked.set_stacked_on_url('../stack-on')
+ except unstackable_format_errors, e:
+ raise TestNotApplicable('Format does not support stacking.')
+ self.get_transport().rename('stack-on', 'new-stack-on')
+ hook_calls = []
+ def hook(stacked_branch, url):
+ hook_calls.append(url)
+ return '../new-stack-on'
+ branch.Branch.hooks.install_named_hook(
+ 'transform_fallback_location', hook, None)
+ branch.Branch.open('stacked')
+ self.assertEqual(['../stack-on'], hook_calls)
+
+ def test_stack_on_repository_branch(self):
+ # Stacking should work when the repo isn't co-located with the
+ # stack-on branch.
+ try:
+ repo = self.make_repository('repo', shared=True)
+ except errors.IncompatibleFormat:
+ raise TestNotApplicable()
+ if not repo._format.supports_nesting_repositories:
+ raise TestNotApplicable()
+ # Avoid make_branch, which produces standalone branches.
+ bzrdir = self.make_bzrdir('repo/stack-on')
+ try:
+ b = bzrdir.create_branch()
+ except errors.UninitializableFormat:
+ raise TestNotApplicable()
+ transport = self.get_transport('stacked')
+ b.bzrdir.clone_on_transport(transport, stacked_on=b.base)
+ # Ensure that opening the branch doesn't raise.
+ branch.Branch.open(transport.base)
+
+ def test_revision_history_of_stacked(self):
+ # See <https://launchpad.net/bugs/380314>.
+ stack_on = self.make_branch_and_tree('stack-on')
+ stack_on.commit('first commit', rev_id='rev1')
+ try:
+ stacked_dir = stack_on.bzrdir.sprout(
+ self.get_url('stacked'), stacked=True)
+ except unstackable_format_errors, e:
+ raise TestNotApplicable('Format does not support stacking.')
+ try:
+ stacked = stacked_dir.open_workingtree()
+ except errors.NoWorkingTree:
+ stacked = stacked_dir.open_branch().create_checkout(
+ 'stacked-checkout', lightweight=True)
+ tree = stacked.branch.create_checkout('local')
+ tree.commit('second commit', rev_id='rev2')
+ # Sanity check: stacked's repo should not contain rev1, otherwise this
+ # test isn't testing what it's supposed to.
+ repo = stacked.branch.repository.bzrdir.open_repository()
+ repo.lock_read()
+ self.addCleanup(repo.unlock)
+ self.assertEqual({}, repo.get_parent_map(['rev1']))
+ # revision_history should work, even though the history is spread over
+ # multiple repositories.
+ self.assertEquals((2, 'rev2'), stacked.branch.last_revision_info())
+
+
+class TestStackingConnections(
+ transport_util.TestCaseWithConnectionHookedTransport):
+
+ def setUp(self):
+ super(TestStackingConnections, self).setUp()
+ try:
+ base_tree = self.make_branch_and_tree('base',
+ format=self.bzrdir_format)
+ except errors.UninitializableFormat, e:
+ raise TestNotApplicable(e)
+ stacked = self.make_branch('stacked', format=self.bzrdir_format)
+ try:
+ stacked.set_stacked_on_url(base_tree.branch.base)
+ except unstackable_format_errors, e:
+ raise TestNotApplicable(e)
+ base_tree.commit('first', rev_id='rev-base')
+ stacked.set_last_revision_info(1, 'rev-base')
+ stacked_relative = self.make_branch('stacked_relative',
+ format=self.bzrdir_format)
+ stacked_relative.set_stacked_on_url(base_tree.branch.user_url)
+ stacked.set_last_revision_info(1, 'rev-base')
+ self.start_logging_connections()
+
+ def test_open_stacked(self):
+ b = branch.Branch.open(self.get_url('stacked'))
+ rev = b.repository.get_revision('rev-base')
+ self.assertEqual(1, len(self.connections))
+
+ def test_open_stacked_relative(self):
+ b = branch.Branch.open(self.get_url('stacked_relative'))
+ rev = b.repository.get_revision('rev-base')
+ self.assertEqual(1, len(self.connections))
diff --git a/bzrlib/tests/per_branch/test_tags.py b/bzrlib/tests/per_branch/test_tags.py
new file mode 100644
index 0000000..7556dba
--- /dev/null
+++ b/bzrlib/tests/per_branch/test_tags.py
@@ -0,0 +1,508 @@
+# Copyright (C) 2007, 2009, 2010 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""Tags stored within a branch
+
+The tags are actually in the Branch.tags namespace, but these are
+1:1 with Branch implementations so can be tested from here.
+"""
+
+from bzrlib import (
+ branch,
+ controldir,
+ errors,
+ tests,
+ )
+from bzrlib.tests import per_branch
+
+
+class TestBranchTags(per_branch.TestCaseWithBranch):
+
+ def setUp(self):
+ super(TestBranchTags, self).setUp()
+ # formats that don't support tags can skip the rest of these
+ # tests...
+ branch = self.make_branch('probe')
+ if not branch._format.supports_tags():
+ raise tests.TestSkipped(
+ "format %s doesn't support tags" % branch._format)
+
+ def make_branch_with_revisions(self, relpath, revisions):
+ builder = self.make_branch_builder(relpath)
+ builder.start_series()
+ for revid in revisions:
+ builder.build_commit(rev_id=revid)
+ builder.finish_series()
+ return builder.get_branch()
+
+ def test_tags_initially_empty(self):
+ b = self.make_branch('b')
+ tags = b.tags.get_tag_dict()
+ self.assertEqual(tags, {})
+
+ def test_make_and_lookup_tag(self):
+ b = self.make_branch_with_revisions('b',
+ ['target-revid-1', 'target-revid-2'])
+ b.tags.set_tag('tag-name', 'target-revid-1')
+ b.tags.set_tag('other-name', 'target-revid-2')
+ # then reopen the branch and see they're still there
+ b = branch.Branch.open('b')
+ self.assertEqual(b.tags.get_tag_dict(),
+ {'tag-name': 'target-revid-1',
+ 'other-name': 'target-revid-2',
+ })
+ # read one at a time
+ result = b.tags.lookup_tag('tag-name')
+ self.assertEqual(result, 'target-revid-1')
+ # and try has_tag
+ self.assertTrue(b.tags.has_tag('tag-name'))
+ self.assertFalse(b.tags.has_tag('imaginary'))
+
+ def test_reverse_tag_dict(self):
+ b = self.make_branch_with_revisions('b',
+ ['target-revid-1', 'target-revid-2'])
+ b.tags.set_tag('tag-name', 'target-revid-1')
+ b.tags.set_tag('other-name', 'target-revid-2')
+ # then reopen the branch and check reverse map id->tags list
+ b = branch.Branch.open('b')
+ self.assertEqual(dict(b.tags.get_reverse_tag_dict()),
+ {'target-revid-1': ['tag-name'],
+ 'target-revid-2': ['other-name'],
+ })
+
+ def test_ghost_tag(self):
+ b = self.make_branch('b')
+ if not b._format.supports_tags_referencing_ghosts():
+ self.assertRaises(errors.GhostTagsNotSupported,
+ b.tags.set_tag, "ghost", "idontexist")
+ else:
+ b.tags.set_tag("ghost", "idontexist")
+ self.assertEquals("idontexist", b.tags.lookup_tag("ghost"))
+
+ def test_no_such_tag(self):
+ b = self.make_branch('b')
+ try:
+ b.tags.lookup_tag('bosko')
+ except errors.NoSuchTag, e:
+ self.assertEquals(e.tag_name, 'bosko')
+ self.assertEquals(str(e), 'No such tag: bosko')
+ else:
+ self.fail("didn't get expected exception")
+
+ def test_merge_tags(self):
+ b1 = self.make_branch_with_revisions('b1', ['revid', 'revid-1'])
+ b2 = self.make_branch_with_revisions('b2', ['revid', 'revid-2'])
+ # if there are tags in the source and not the destination, then they
+ # just go across
+ b1.tags.set_tag('tagname', 'revid')
+ b1.tags.merge_to(b2.tags)
+ self.assertEquals(b2.tags.lookup_tag('tagname'), 'revid')
+ # if a tag is in the destination and not in the source, it is not
+ # removed when we merge them
+ b2.tags.set_tag('in-destination', 'revid')
+ updates, conflicts = b1.tags.merge_to(b2.tags)
+ self.assertEquals(list(conflicts), [])
+ self.assertEquals(updates, {})
+ self.assertEquals(b2.tags.lookup_tag('in-destination'), 'revid')
+ # if there's a conflicting tag, it's reported -- the command line
+ # interface will say "these tags couldn't be copied"
+ b1.tags.set_tag('conflicts', 'revid-1')
+ b2.tags.set_tag('conflicts', 'revid-2')
+ updates, conflicts = b1.tags.merge_to(b2.tags)
+ self.assertEquals(list(conflicts),
+ [('conflicts', 'revid-1', 'revid-2')])
+ # and it keeps the same value
+ self.assertEquals(updates, {})
+ self.assertEquals(b2.tags.lookup_tag('conflicts'), 'revid-2')
+
+ def test_unicode_tag(self):
+ tag_name = u'\u3070'
+ # in anticipation of the planned change to treating revision ids as
+ # just 8bit strings
+ revid = ('revid' + tag_name).encode('utf-8')
+ b1 = self.make_branch_with_revisions('b', [revid])
+ b1.tags.set_tag(tag_name, revid)
+ self.assertEquals(b1.tags.lookup_tag(tag_name), revid)
+
+ def test_delete_tag(self):
+ tag_name = u'\N{GREEK SMALL LETTER ALPHA}'
+ revid = ('revid' + tag_name).encode('utf-8')
+ b = self.make_branch_with_revisions('b', [revid])
+ b.tags.set_tag(tag_name, revid)
+ # now try to delete it
+ b.tags.delete_tag(tag_name)
+ # now you can't look it up
+ self.assertRaises(errors.NoSuchTag,
+ b.tags.lookup_tag, tag_name)
+ # and it's not in the dictionary
+ self.assertEquals(b.tags.get_tag_dict(), {})
+ # and you can't remove it a second time
+ self.assertRaises(errors.NoSuchTag,
+ b.tags.delete_tag, tag_name)
+ # or remove a tag that never existed
+ self.assertRaises(errors.NoSuchTag,
+ b.tags.delete_tag, tag_name + '2')
+
+ def test_merge_empty_tags(self):
+ # you can merge tags between two instances, since neither have tags
+ b1 = self.make_branch('b1')
+ b2 = self.make_branch('b2')
+ b1.tags.merge_to(b2.tags)
+
+ def test_read_lock_caches_tags(self):
+ """Tags are read from a branch only once during a read-lock."""
+ # Open the same branch twice. Read-lock one, and then mutate the tags
+ # in the second. The read-locked branch never re-reads the tags, so it
+ # never observes the changed/new tags.
+ b1 = self.make_branch_with_revisions('b',
+ ['rev-1', 'rev-1-changed', 'rev-2'])
+ b1.tags.set_tag('one', 'rev-1')
+ b2 = controldir.ControlDir.open('b').open_branch()
+ b1.lock_read()
+ self.assertEqual({'one': 'rev-1'}, b1.tags.get_tag_dict())
+ # Add a tag and modify a tag in b2. b1 is read-locked and has already
+ # read the tags, so it is unaffected.
+ b2.tags.set_tag('one', 'rev-1-changed')
+ b2.tags.set_tag('two', 'rev-2')
+ self.assertEqual({'one': 'rev-1'}, b1.tags.get_tag_dict())
+ b1.unlock()
+ # Once unlocked the cached value is forgotten, so now the latest tags
+ # will be retrieved.
+ self.assertEqual(
+ {'one': 'rev-1-changed', 'two': 'rev-2'}, b1.tags.get_tag_dict())
+
+ def test_unlocked_does_not_cache_tags(self):
+ """Unlocked branches do not cache tags."""
+ # Open the same branch twice.
+ b1 = self.make_branch_with_revisions('b',
+ ['rev-1', 'rev-1-changed', 'rev-2'])
+ b1.tags.set_tag('one', 'rev-1')
+ b2 = b1.bzrdir.open_branch()
+ self.assertEqual({'one': 'rev-1'}, b1.tags.get_tag_dict())
+ # Add a tag and modify a tag in b2. b1 isn't locked, so it will
+ # immediately return the new tags too.
+ b2.tags.set_tag('one', 'rev-1-changed')
+ b2.tags.set_tag('two', 'rev-2')
+ self.assertEqual(
+ {'one': 'rev-1-changed', 'two': 'rev-2'}, b1.tags.get_tag_dict())
+
+ def test_cached_tag_dict_not_accidentally_mutable(self):
+ """When there's a cached version of the tags, b.tags.get_tag_dict
+ returns a copy of the cached data so that callers cannot accidentally
+ corrupt the cache.
+ """
+ b = self.make_branch_with_revisions('b',
+ ['rev-1', 'rev-2', 'rev-3'])
+ b.tags.set_tag('one', 'rev-1')
+ self.addCleanup(b.lock_read().unlock)
+ # The first time the data returned will not be in the cache
+ tags_dict = b.tags.get_tag_dict()
+ tags_dict['two'] = 'rev-2'
+ # The second time the data comes from the cache
+ tags_dict = b.tags.get_tag_dict()
+ tags_dict['three'] = 'rev-3'
+ # The get_tag_dict() result should still be unchanged, even though we
+ # mutated its earlier return values.
+ self.assertEqual({'one': 'rev-1'}, b.tags.get_tag_dict())
+
+ def make_write_locked_branch_with_one_tag(self):
+ b = self.make_branch_with_revisions('b',
+ ['rev-1', 'rev-1-changed', 'rev-2'])
+ b.tags.set_tag('one', 'rev-1')
+ self.addCleanup(b.lock_write().unlock)
+ # Populate the cache
+ b.tags.get_tag_dict()
+ return b
+
+ def test_set_tag_invalides_cache(self):
+ b = self.make_write_locked_branch_with_one_tag()
+ b.tags.set_tag('one', 'rev-1-changed')
+ self.assertEqual({'one': 'rev-1-changed'}, b.tags.get_tag_dict())
+
+ def test_delete_tag_invalides_cache(self):
+ b = self.make_write_locked_branch_with_one_tag()
+ b.tags.delete_tag('one')
+ self.assertEqual({}, b.tags.get_tag_dict())
+
+ def test_merge_to_invalides_cache(self):
+ b1 = self.make_write_locked_branch_with_one_tag()
+ b2 = self.make_branch_with_revisions('b2', ['rev-2', 'rev-1'])
+ b2.tags.set_tag('two', 'rev-2')
+ b2.tags.merge_to(b1.tags)
+ self.assertEqual(
+ {'one': 'rev-1', 'two': 'rev-2'}, b1.tags.get_tag_dict())
+
+ def test_rename_revisions_invalides_cache(self):
+ b = self.make_write_locked_branch_with_one_tag()
+ b.tags.rename_revisions({'rev-1': 'rev-1-changed'})
+ self.assertEqual({'one': 'rev-1-changed'}, b.tags.get_tag_dict())
+
+
+class TestTagsMergeToInCheckouts(per_branch.TestCaseWithBranch):
+ """Tests for checkout.branch.tags.merge_to.
+
+ In particular this exercises variations in tag conflicts in the master
+ branch and/or the checkout (child). It may seem strange to have different
+ tags in the child and master, but 'bzr merge' intentionally updates the
+ child and not the master (instead the next 'bzr commit', if the user
+ decides to commit, will update the master). Also, merge_to in bzr < 2.3
+ didn't propagate changes to the master, and current bzr versions may find
+ themselves operating on checkouts touched by older bzrs
+
+ So we need to make sure bzr copes gracefully with differing tags in the
+ master versus the child.
+
+ See also <https://bugs.launchpad.net/bzr/+bug/603395>.
+ """
+
+ def setUp(self):
+ super(TestTagsMergeToInCheckouts, self).setUp()
+ branch1 = self.make_branch('tags-probe')
+ if not branch1._format.supports_tags():
+ raise tests.TestSkipped(
+ "format %s doesn't support tags" % branch1._format)
+ branch2 = self.make_branch('bind-probe')
+ try:
+ branch2.bind(branch1)
+ except errors.UpgradeRequired:
+ raise tests.TestNotApplicable(
+ "format %s doesn't support bound branches" % branch2._format)
+
+ def test_merge_to_propagates_tags(self):
+ """merge_to(child) also merges tags to the master."""
+ master = self.make_branch('master')
+ other = self.make_branch('other')
+ other.tags.set_tag('foo', 'rev-1')
+ child = self.make_branch('child')
+ child.bind(master)
+ child.update()
+ other.tags.merge_to(child.tags)
+ self.assertEquals('rev-1', child.tags.lookup_tag('foo'))
+ self.assertEquals('rev-1', master.tags.lookup_tag('foo'))
+
+ def test_ignore_master_disables_tag_propagation(self):
+ """merge_to(child, ignore_master=True) does not merge tags to the
+ master.
+ """
+ master = self.make_branch('master')
+ other = self.make_branch('other')
+ other.tags.set_tag('foo', 'rev-1')
+ child = self.make_branch('child')
+ child.bind(master)
+ child.update()
+ other.tags.merge_to(child.tags, ignore_master=True)
+ self.assertEquals('rev-1', child.tags.lookup_tag('foo'))
+ self.assertRaises(errors.NoSuchTag, master.tags.lookup_tag, 'foo')
+
+ def test_merge_to_overwrite_conflict_in_master(self):
+ """merge_to(child, overwrite=True) overwrites any conflicting tags in
+ the master.
+ """
+ master = self.make_branch('master')
+ other = self.make_branch('other')
+ other.tags.set_tag('foo', 'rev-1')
+ child = self.make_branch('child')
+ child.bind(master)
+ child.update()
+ master.tags.set_tag('foo', 'rev-2')
+ tag_updates, tag_conflicts = other.tags.merge_to(child.tags, overwrite=True)
+ self.assertEquals('rev-1', child.tags.lookup_tag('foo'))
+ self.assertEquals('rev-1', master.tags.lookup_tag('foo'))
+ self.assertEquals({"foo": "rev-1"}, tag_updates)
+ self.assertLength(0, tag_conflicts)
+
+ def test_merge_to_overwrite_conflict_in_child_and_master(self):
+ """merge_to(child, overwrite=True) overwrites any conflicting tags in
+ both the child and the master.
+ """
+ master = self.make_branch('master')
+ master.tags.set_tag('foo', 'rev-2')
+ other = self.make_branch('other')
+ other.tags.set_tag('foo', 'rev-1')
+ child = self.make_branch('child')
+ child.bind(master)
+ child.update()
+ tag_updates, tag_conflicts = other.tags.merge_to(
+ child.tags, overwrite=True)
+ self.assertEquals('rev-1', child.tags.lookup_tag('foo'))
+ self.assertEquals('rev-1', master.tags.lookup_tag('foo'))
+ self.assertEquals({u'foo': 'rev-1'}, tag_updates)
+ self.assertLength(0, tag_conflicts)
+
+ def test_merge_to_conflict_in_child_only(self):
+ """When new_tags.merge_to(child.tags) conflicts with the child but not
+ the master, a conflict is reported and the child receives the new tag.
+ """
+ master = self.make_branch('master')
+ master.tags.set_tag('foo', 'rev-2')
+ other = self.make_branch('other')
+ other.tags.set_tag('foo', 'rev-1')
+ child = self.make_branch('child')
+ child.bind(master)
+ child.update()
+ master.tags.delete_tag('foo')
+ tag_updates, tag_conflicts = other.tags.merge_to(child.tags)
+ # Conflict in child, so it is unchanged.
+ self.assertEquals('rev-2', child.tags.lookup_tag('foo'))
+ # No conflict in the master, so the 'foo' tag equals other's value here.
+ self.assertEquals('rev-1', master.tags.lookup_tag('foo'))
+ # The conflict is reported.
+ self.assertEquals([(u'foo', 'rev-1', 'rev-2')], list(tag_conflicts))
+ self.assertEquals({u'foo': 'rev-1'}, tag_updates)
+
+ def test_merge_to_conflict_in_master_only(self):
+ """When new_tags.merge_to(child.tags) conflicts with the master but not
+ the child, a conflict is reported and the child receives the new tag.
+ """
+ master = self.make_branch('master')
+ other = self.make_branch('other')
+ other.tags.set_tag('foo', 'rev-1')
+ child = self.make_branch('child')
+ child.bind(master)
+ child.update()
+ master.tags.set_tag('foo', 'rev-2')
+ tag_updates, tag_conflicts = other.tags.merge_to(child.tags)
+ # No conflict in the child, so the 'foo' tag equals other's value here.
+ self.assertEquals('rev-1', child.tags.lookup_tag('foo'))
+ # Conflict in master, so it is unchanged.
+ self.assertEquals('rev-2', master.tags.lookup_tag('foo'))
+ # The conflict is reported.
+ self.assertEquals({u'foo': 'rev-1'}, tag_updates)
+ self.assertEquals([(u'foo', 'rev-1', 'rev-2')], list(tag_conflicts))
+
+ def test_merge_to_same_conflict_in_master_and_child(self):
+ """When new_tags.merge_to(child.tags) conflicts the same way with the
+ master and the child a single conflict is reported.
+ """
+ master = self.make_branch('master')
+ master.tags.set_tag('foo', 'rev-2')
+ other = self.make_branch('other')
+ other.tags.set_tag('foo', 'rev-1')
+ child = self.make_branch('child')
+ child.bind(master)
+ child.update()
+ tag_updates, tag_conflicts = other.tags.merge_to(child.tags)
+ # Both master and child conflict, so both stay as rev-2
+ self.assertEquals('rev-2', child.tags.lookup_tag('foo'))
+ self.assertEquals('rev-2', master.tags.lookup_tag('foo'))
+ # The conflict is reported exactly once, even though it occurs in both
+ # master and child.
+ self.assertEquals({}, tag_updates)
+ self.assertEquals([(u'foo', 'rev-1', 'rev-2')], list(tag_conflicts))
+
+ def test_merge_to_different_conflict_in_master_and_child(self):
+ """When new_tags.merge_to(child.tags) conflicts differently in the
+ master and the child both conflicts are reported.
+ """
+ master = self.make_branch('master')
+ master.tags.set_tag('foo', 'rev-2')
+ other = self.make_branch('other')
+ other.tags.set_tag('foo', 'rev-1')
+ child = self.make_branch('child')
+ child.bind(master)
+ child.update()
+ # We use the private method _set_tag_dict because normally bzr tries to
+ # avoid this scenario.
+ child.tags._set_tag_dict({'foo': 'rev-3'})
+ tag_updates, tag_conflicts = other.tags.merge_to(child.tags)
+ # Both master and child conflict, so both stay as they were.
+ self.assertEquals('rev-3', child.tags.lookup_tag('foo'))
+ self.assertEquals('rev-2', master.tags.lookup_tag('foo'))
+ # Both conflicts are reported.
+ self.assertEquals({}, tag_updates)
+ self.assertEquals(
+ [(u'foo', 'rev-1', 'rev-2'), (u'foo', 'rev-1', 'rev-3')],
+ sorted(tag_conflicts))
+
+
+class TestUnsupportedTags(per_branch.TestCaseWithBranch):
+ """Formats that don't support tags should give reasonable errors."""
+
+ def setUp(self):
+ super(TestUnsupportedTags, self).setUp()
+ branch = self.make_branch('probe')
+ if branch._format.supports_tags():
+ raise tests.TestSkipped("Format %s declares that tags are supported"
+ % branch._format)
+ # it's covered by TestBranchTags
+
+ def test_tag_methods_raise(self):
+ b = self.make_branch('b')
+ self.assertRaises(errors.TagsNotSupported,
+ b.tags.set_tag, 'foo', 'bar')
+ self.assertRaises(errors.TagsNotSupported,
+ b.tags.lookup_tag, 'foo')
+ self.assertRaises(errors.TagsNotSupported,
+ b.tags.set_tag, 'foo', 'bar')
+ self.assertRaises(errors.TagsNotSupported,
+ b.tags.delete_tag, 'foo')
+
+ def test_merge_empty_tags(self):
+ # you can merge tags between two instances, since neither have tags
+ b1 = self.make_branch('b1')
+ b2 = self.make_branch('b2')
+ b1.tags.merge_to(b2.tags)
+
+
+class AutomaticTagNameTests(per_branch.TestCaseWithBranch):
+
+ def setUp(self):
+ super(AutomaticTagNameTests, self).setUp()
+ if isinstance(self.branch_format, branch.BranchReferenceFormat):
+ # This test could in principle apply to BranchReferenceFormat, but
+ # make_branch_builder doesn't support it.
+ raise tests.TestSkipped(
+ "BranchBuilder can't make reference branches.")
+ self.builder = self.make_branch_builder('.')
+ self.builder.build_snapshot('foo', None,
+ [('add', ('', None, 'directory', None))],
+ message='foo')
+ self.branch = self.builder.get_branch()
+ if not self.branch._format.supports_tags():
+ raise tests.TestSkipped(
+ "format %s doesn't support tags" % self.branch._format)
+
+ def test_no_functions(self):
+ rev = self.branch.last_revision()
+ self.assertEquals(None, self.branch.automatic_tag_name(rev))
+
+ def test_returns_tag_name(self):
+ def get_tag_name(br, revid):
+ return "foo"
+ branch.Branch.hooks.install_named_hook('automatic_tag_name',
+ get_tag_name, 'get tag name foo')
+ self.assertEquals("foo", self.branch.automatic_tag_name(
+ self.branch.last_revision()))
+
+ def test_uses_first_return(self):
+ get_tag_name_1 = lambda br, revid: "foo1"
+ get_tag_name_2 = lambda br, revid: "foo2"
+ branch.Branch.hooks.install_named_hook('automatic_tag_name',
+ get_tag_name_1, 'tagname1')
+ branch.Branch.hooks.install_named_hook('automatic_tag_name',
+ get_tag_name_2, 'tagname2')
+ self.assertEquals("foo1", self.branch.automatic_tag_name(
+ self.branch.last_revision()))
+
+ def test_ignores_none(self):
+ get_tag_name_1 = lambda br, revid: None
+ get_tag_name_2 = lambda br, revid: "foo2"
+ branch.Branch.hooks.install_named_hook('automatic_tag_name',
+ get_tag_name_1, 'tagname1')
+ branch.Branch.hooks.install_named_hook('automatic_tag_name',
+ get_tag_name_2, 'tagname2')
+ self.assertEquals("foo2", self.branch.automatic_tag_name(
+ self.branch.last_revision()))
diff --git a/bzrlib/tests/per_branch/test_uncommit.py b/bzrlib/tests/per_branch/test_uncommit.py
new file mode 100644
index 0000000..49b0b04
--- /dev/null
+++ b/bzrlib/tests/per_branch/test_uncommit.py
@@ -0,0 +1,109 @@
+# Copyright (C) 2007-2010 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""Tests for the contract of uncommit on branches.
+
+Note that uncommit currently is not a branch method; it should be.
+"""
+
+from bzrlib import (
+ branch,
+ errors,
+ uncommit,
+ )
+from bzrlib.tests import per_branch
+
+
+class TestUncommitHook(per_branch.TestCaseWithBranch):
+
+ def setUp(self):
+ self.hook_calls = []
+ super(TestUncommitHook, self).setUp()
+
+ def capture_post_uncommit_hook(self, local, master, old_revno,
+ old_revid, new_revno, new_revid):
+ """Capture post uncommit hook calls to self.hook_calls.
+
+ The call is logged, as is some state of the two branches.
+ """
+ if local:
+ local_locked = local.is_locked()
+ local_base = local.base
+ else:
+ local_locked = None
+ local_base = None
+ self.hook_calls.append(
+ ('post_uncommit', local_base, master.base, old_revno, old_revid,
+ new_revno, new_revid, local_locked, master.is_locked()))
+
+ def test_post_uncommit_to_origin(self):
+ tree = self.make_branch_and_memory_tree('branch')
+ tree.lock_write()
+ tree.add('')
+ revid = tree.commit('a revision')
+ tree.unlock()
+ branch.Branch.hooks.install_named_hook('post_uncommit',
+ self.capture_post_uncommit_hook, None)
+ uncommit.uncommit(tree.branch)
+ # with nothing left we should still get a notification, and
+ # have the branch locked at notification time.
+ self.assertEqual([
+ ('post_uncommit', None, tree.branch.base, 1, revid,
+ 0, None, None, True)
+ ],
+ self.hook_calls)
+
+ def test_post_uncommit_bound(self):
+ master = self.make_branch('master')
+ tree = self.make_branch_and_memory_tree('local')
+ try:
+ tree.branch.bind(master)
+ except errors.UpgradeRequired:
+ # cant bind this format, the test is irrelevant.
+ return
+ tree.lock_write()
+ tree.add('')
+ revid = tree.commit('a revision')
+ tree.unlock()
+ branch.Branch.hooks.install_named_hook(
+ 'post_uncommit', self.capture_post_uncommit_hook, None)
+ uncommit.uncommit(tree.branch)
+ # with nothing left we should still get a notification, and
+ # have the branch locked at notification time.
+ self.assertEqual([
+ ('post_uncommit', tree.branch.base, master.base, 1, revid,
+ 0, None, True, True)
+ ],
+ self.hook_calls)
+
+ def test_post_uncommit_not_to_origin(self):
+ tree = self.make_branch_and_memory_tree('branch')
+ tree.lock_write()
+ tree.add('')
+ revid = tree.commit('first revision')
+ revid2 = tree.commit('second revision')
+ revid3 = tree.commit('third revision')
+ tree.unlock()
+ branch.Branch.hooks.install_named_hook(
+ 'post_uncommit', self.capture_post_uncommit_hook, None)
+ uncommit.uncommit(tree.branch, revno=2)
+ # having uncommitted from up the branch, we should get the
+ # before and after revnos and revids correctly.
+ self.assertEqual([
+ ('post_uncommit', None, tree.branch.base, 3, revid3,
+ 1, revid, None, True)
+ ],
+ self.hook_calls)
diff --git a/bzrlib/tests/per_branch/test_update.py b/bzrlib/tests/per_branch/test_update.py
new file mode 100644
index 0000000..7fefb89
--- /dev/null
+++ b/bzrlib/tests/per_branch/test_update.py
@@ -0,0 +1,91 @@
+# Copyright (C) 2006-2011 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+
+from bzrlib import (
+ branch,
+ errors,
+ revision as _mod_revision,
+ tests,
+ )
+from bzrlib.tests import per_branch
+
+
+"""Tests for branch.update()"""
+
+
+class TestUpdate(per_branch.TestCaseWithBranch):
+
+ def test_update_unbound_works(self):
+ b = self.make_branch('.')
+ b.update()
+ self.assertEqual(_mod_revision.NULL_REVISION,
+ _mod_revision.ensure_null(b.last_revision()))
+
+ def test_update_prefix_returns_none(self):
+ # update in a branch when its a prefix of the master should
+ # indicate that no local changes were present.
+ master_tree = self.make_branch_and_tree('master')
+ child_tree = self.make_branch_and_tree('child')
+ try:
+ child_tree.branch.bind(master_tree.branch)
+ except errors.UpgradeRequired:
+ # old branch, cant test.
+ return
+ # commit to the child to make the last rev not-None.
+ child_tree.commit('foo', rev_id='foo', allow_pointless=True)
+ # update the master so we can commit there.
+ master_tree.update()
+ # commit to the master making the child tree out of date and a prefix.
+ master_tree.commit('bar', rev_id='bar', allow_pointless=True)
+ self.assertEqual(None, child_tree.branch.update())
+
+ def test_update_local_commits_returns_old_tip(self):
+ # update in a branch when its not a prefix of the master should
+ # return the previous tip and reset the revision history.
+ master_tree = self.make_branch_and_tree('master')
+ child_tree = self.make_branch_and_tree('child')
+ try:
+ child_tree.branch.bind(master_tree.branch)
+ except errors.UpgradeRequired:
+ # old branch, cant test.
+ return
+ # commit to the child to make the last rev not-None and skew it from master.
+ child_tree.commit('foo', rev_id='foo', allow_pointless=True, local=True)
+ # commit to the master making the child tree out of date and not a prefix.
+ master_tree.commit('bar', rev_id='bar', allow_pointless=True)
+ self.assertEqual('foo', child_tree.branch.update())
+ self.assertEqual('bar', child_tree.branch.last_revision())
+
+ def test_update_in_checkout_of_readonly(self):
+ tree1 = self.make_branch_and_tree('tree1')
+ rev1 = tree1.commit('one')
+ try:
+ tree1.branch.tags.set_tag('test-tag', rev1)
+ except errors.TagsNotSupported:
+ # Tags not supported
+ raise tests.TestNotApplicable("only triggered from branches with"
+ " tags")
+ readonly_branch1 = branch.Branch.open('readonly+' + tree1.branch.base)
+ tree2 = tree1.bzrdir.sprout('tree2').open_workingtree()
+ try:
+ tree2.branch.bind(readonly_branch1)
+ except errors.UpgradeRequired:
+ # old branch, cant test.
+ raise tests.TestNotApplicable("only triggered in bound branches")
+ rev2 = tree1.commit('two')
+ tree2.update()
+ self.assertEqual(rev2, tree2.branch.last_revision())
diff --git a/bzrlib/tests/per_bzrdir/__init__.py b/bzrlib/tests/per_bzrdir/__init__.py
new file mode 100644
index 0000000..d075193
--- /dev/null
+++ b/bzrlib/tests/per_bzrdir/__init__.py
@@ -0,0 +1,88 @@
+# Copyright (C) 2010 Canonical Ltd
+# Authors: Robert Collins <robert.collins@canonical.com>
+# Jelmer Vernooij <jelmer.vernooij@canonical.com>
+# -*- coding: utf-8 -*-
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+
+"""BzrDir implementation tests for bzr.
+
+These test the conformance of all the bzrdir variations to the expected API.
+Specific tests for individual formats are in the tests/test_bzrdir.py file
+rather than in tests/per_branch/*.py. Generic control directory tests not
+specific to BzrDir are in tests/per_controldir/*.py.
+"""
+
+from bzrlib.bzrdir import BzrDirFormat
+from bzrlib.controldir import ControlDirFormat
+from bzrlib.tests import (
+ default_transport,
+ multiply_tests,
+ test_server,
+ TestCaseWithTransport,
+ )
+from bzrlib.tests.per_controldir import make_scenarios
+from bzrlib.transport import memory
+
+
+class TestCaseWithBzrDir(TestCaseWithTransport):
+
+ def setUp(self):
+ super(TestCaseWithBzrDir, self).setUp()
+ self.bzrdir = None
+
+ def get_bzrdir(self):
+ if self.bzrdir is None:
+ self.bzrdir = self.make_bzrdir(None)
+ return self.bzrdir
+
+ def get_default_format(self):
+ return self.bzrdir_format
+
+
+def load_tests(standard_tests, module, loader):
+ test_per_bzrdir = [
+ 'bzrlib.tests.per_bzrdir.test_bzrdir',
+ ]
+ submod_tests = loader.loadTestsFromModuleNames(test_per_bzrdir)
+ formats = [format for format in ControlDirFormat.known_formats()
+ if isinstance(format, BzrDirFormat)]
+ scenarios = make_scenarios(
+ default_transport,
+ None,
+ # None here will cause a readonly decorator to be created
+ # by the TestCaseWithTransport.get_readonly_transport method.
+ None,
+ formats)
+ # This will always add scenarios using the smart server.
+ from bzrlib.remote import RemoteBzrDirFormat
+ # test the remote server behaviour when backed with a MemoryTransport
+ # Once for the current version
+ scenarios.extend(make_scenarios(
+ memory.MemoryServer,
+ test_server.SmartTCPServer_for_testing,
+ test_server.ReadonlySmartTCPServer_for_testing,
+ [(RemoteBzrDirFormat())],
+ name_suffix='-default'))
+ # And once with < 1.6 - the 'v2' protocol.
+ scenarios.extend(make_scenarios(
+ memory.MemoryServer,
+ test_server.SmartTCPServer_for_testing_v2_only,
+ test_server.ReadonlySmartTCPServer_for_testing_v2_only,
+ [(RemoteBzrDirFormat())],
+ name_suffix='-v2'))
+ # add the tests for the sub modules
+ return multiply_tests(submod_tests, scenarios, standard_tests)
diff --git a/bzrlib/tests/per_bzrdir/test_bzrdir.py b/bzrlib/tests/per_bzrdir/test_bzrdir.py
new file mode 100644
index 0000000..28009b0
--- /dev/null
+++ b/bzrlib/tests/per_bzrdir/test_bzrdir.py
@@ -0,0 +1,693 @@
+# Copyright (C) 2010 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""Tests for bzrdir implementations - tests a bzrdir format."""
+
+import errno
+from stat import S_ISDIR
+
+import bzrlib.branch
+from bzrlib import (
+ bzrdir,
+ controldir,
+ errors,
+ repository,
+ revision as _mod_revision,
+ transport,
+ workingtree,
+ )
+from bzrlib.remote import RemoteBzrDirFormat
+from bzrlib.tests import (
+ TestNotApplicable,
+ TestSkipped,
+ )
+from bzrlib.tests.per_bzrdir import TestCaseWithBzrDir
+from bzrlib.transport.local import (
+ LocalTransport,
+ )
+
+
+class AnonymousTestBranchFormat(bzrlib.branch.BranchFormat):
+ """An anonymous branch format (does not have a format string)"""
+
+ def get_format_string(self):
+ raise NotImplementedError(self.get_format_string)
+
+
+class IdentifiableTestBranchFormat(bzrlib.branch.BranchFormat):
+ """An identifable branch format (has a format string)"""
+
+ def get_format_string(self):
+ return "I have an identity"
+
+
+class AnonymousTestRepositoryFormat(repository.RepositoryFormat):
+ """An anonymous branch format (does not have a format string)"""
+
+ def get_format_string(self):
+ raise NotImplementedError(self.get_format_string)
+
+
+class IdentifiableTestRepositoryFormat(repository.RepositoryFormat):
+ """An identifable branch format (has a format string)"""
+
+ def get_format_string(self):
+ return "I have an identity"
+
+
+class AnonymousTestWorkingTreeFormat(workingtree.WorkingTreeFormat):
+ """An anonymous branch format (does not have a format string)"""
+
+ def get_format_string(self):
+ raise NotImplementedError(self.get_format_string)
+
+
+class IdentifiableTestWorkingTreeFormat(workingtree.WorkingTreeFormat):
+ """An identifable branch format (has a format string)"""
+
+ def get_format_string(self):
+ return "I have an identity"
+
+
+class TestBzrDir(TestCaseWithBzrDir):
+
+ # Many of these tests test for disk equality rather than checking
+ # for semantic equivalence. This works well for some tests but
+ # is not good at handling changes in representation or the addition
+ # or removal of control data. It would be nice to for instance:
+ # sprout a new branch, check that the nickname has been reset by hand
+ # and then set the nickname to match the source branch, at which point
+ # a semantic equivalence should pass
+
+ def assertDirectoriesEqual(self, source, target, ignore_list=[]):
+ """Assert that the content of source and target are identical.
+
+ paths in ignore list will be completely ignored.
+
+ We ignore paths that represent data which is allowed to change during
+ a clone or sprout: for instance, inventory.knit contains gzip fragements
+ which have timestamps in them, and as we have read the inventory from
+ the source knit, the already-read data is recompressed rather than
+ reading it again, which leads to changed timestamps. This is ok though,
+ because the inventory.kndx file is not ignored, and the integrity of
+ knit joins is tested by test_knit and test_versionedfile.
+
+ :seealso: Additionally, assertRepositoryHasSameItems provides value
+ rather than representation checking of repositories for
+ equivalence.
+ """
+ files = []
+ directories = ['.']
+ while directories:
+ dir = directories.pop()
+ for path in set(source.list_dir(dir) + target.list_dir(dir)):
+ path = dir + '/' + path
+ if path in ignore_list:
+ continue
+ try:
+ stat = source.stat(path)
+ except errors.NoSuchFile:
+ self.fail('%s not in source' % path)
+ if S_ISDIR(stat.st_mode):
+ self.assertTrue(S_ISDIR(target.stat(path).st_mode))
+ directories.append(path)
+ else:
+ self.assertEqualDiff(source.get_bytes(path),
+ target.get_bytes(path),
+ "text for file %r differs:\n" % path)
+
+ def assertRepositoryHasSameItems(self, left_repo, right_repo):
+ """require left_repo and right_repo to contain the same data."""
+ # XXX: TODO: Doesn't work yet, because we need to be able to compare
+ # local repositories to remote ones... but this is an as-yet unsolved
+ # aspect of format management and the Remote protocols...
+ # self.assertEqual(left_repo._format.__class__,
+ # right_repo._format.__class__)
+ left_repo.lock_read()
+ try:
+ right_repo.lock_read()
+ try:
+ # revs
+ all_revs = left_repo.all_revision_ids()
+ self.assertEqual(left_repo.all_revision_ids(),
+ right_repo.all_revision_ids())
+ for rev_id in left_repo.all_revision_ids():
+ self.assertEqual(left_repo.get_revision(rev_id),
+ right_repo.get_revision(rev_id))
+ # Assert the revision trees (and thus the inventories) are equal
+ sort_key = lambda rev_tree: rev_tree.get_revision_id()
+ rev_trees_a = sorted(
+ left_repo.revision_trees(all_revs), key=sort_key)
+ rev_trees_b = sorted(
+ right_repo.revision_trees(all_revs), key=sort_key)
+ for tree_a, tree_b in zip(rev_trees_a, rev_trees_b):
+ self.assertEqual([], list(tree_a.iter_changes(tree_b)))
+ # texts
+ text_index = left_repo._generate_text_key_index()
+ self.assertEqual(text_index,
+ right_repo._generate_text_key_index())
+ desired_files = []
+ for file_id, revision_id in text_index.iterkeys():
+ desired_files.append(
+ (file_id, revision_id, (file_id, revision_id)))
+ left_texts = [(identifier, "".join(bytes_iterator)) for
+ (identifier, bytes_iterator) in
+ left_repo.iter_files_bytes(desired_files)]
+ right_texts = [(identifier, "".join(bytes_iterator)) for
+ (identifier, bytes_iterator) in
+ right_repo.iter_files_bytes(desired_files)]
+ left_texts.sort()
+ right_texts.sort()
+ self.assertEqual(left_texts, right_texts)
+ # signatures
+ for rev_id in all_revs:
+ try:
+ left_text = left_repo.get_signature_text(rev_id)
+ except errors.NoSuchRevision:
+ continue
+ right_text = right_repo.get_signature_text(rev_id)
+ self.assertEqual(left_text, right_text)
+ finally:
+ right_repo.unlock()
+ finally:
+ left_repo.unlock()
+
+ def sproutOrSkip(self, from_bzrdir, to_url, revision_id=None,
+ force_new_repo=False, accelerator_tree=None,
+ create_tree_if_local=True):
+ """Sprout from_bzrdir into to_url, or raise TestSkipped.
+
+ A simple wrapper for from_bzrdir.sprout that translates NotLocalUrl into
+ TestSkipped. Returns the newly sprouted bzrdir.
+ """
+ to_transport = transport.get_transport(to_url)
+ if not isinstance(to_transport, LocalTransport):
+ raise TestSkipped('Cannot sprout to remote bzrdirs.')
+ target = from_bzrdir.sprout(to_url, revision_id=revision_id,
+ force_new_repo=force_new_repo,
+ possible_transports=[to_transport],
+ accelerator_tree=accelerator_tree,
+ create_tree_if_local=create_tree_if_local)
+ return target
+
+ def skipIfNoWorkingTree(self, a_bzrdir):
+ """Raises TestSkipped if a_bzrdir doesn't have a working tree.
+
+ If the bzrdir does have a workingtree, this is a no-op.
+ """
+ try:
+ a_bzrdir.open_workingtree()
+ except (errors.NotLocalUrl, errors.NoWorkingTree):
+ raise TestSkipped("bzrdir on transport %r has no working tree"
+ % a_bzrdir.transport)
+
+ def createWorkingTreeOrSkip(self, a_bzrdir):
+ """Create a working tree on a_bzrdir, or raise TestSkipped.
+
+ A simple wrapper for create_workingtree that translates NotLocalUrl into
+ TestSkipped. Returns the newly created working tree.
+ """
+ try:
+ # This passes in many named options to make sure they're
+ # understood by subclasses: see
+ # <https://bugs.launchpad.net/bzr/+bug/524627>.
+ return a_bzrdir.create_workingtree(
+ revision_id=None,
+ from_branch=None,
+ accelerator_tree=None,
+ hardlink=False)
+ except errors.NotLocalUrl:
+ raise TestSkipped("cannot make working tree with transport %r"
+ % a_bzrdir.transport)
+
+ def test_clone_bzrdir_repository_under_shared_force_new_repo(self):
+ tree = self.make_branch_and_tree('commit_tree')
+ self.build_tree(['commit_tree/foo'])
+ tree.add('foo')
+ tree.commit('revision 1', rev_id='1')
+ dir = self.make_bzrdir('source')
+ repo = dir.create_repository()
+ repo.fetch(tree.branch.repository)
+ self.assertTrue(repo.has_revision('1'))
+ try:
+ self.make_repository('target', shared=True)
+ except errors.IncompatibleFormat:
+ return
+ target = dir.clone(self.get_url('target/child'), force_new_repo=True)
+ self.assertNotEqual(dir.transport.base, target.transport.base)
+ self.assertDirectoriesEqual(dir.root_transport, target.root_transport,
+ ['./.bzr/repository',
+ ])
+ self.assertRepositoryHasSameItems(tree.branch.repository, repo)
+
+ def test_clone_bzrdir_branch_and_repo(self):
+ tree = self.make_branch_and_tree('commit_tree')
+ self.build_tree(['commit_tree/foo'])
+ tree.add('foo')
+ tree.commit('revision 1')
+ source = self.make_branch('source')
+ tree.branch.repository.copy_content_into(source.repository)
+ tree.branch.copy_content_into(source)
+ dir = source.bzrdir
+ target = dir.clone(self.get_url('target'))
+ self.assertNotEqual(dir.transport.base, target.transport.base)
+ self.assertDirectoriesEqual(dir.root_transport, target.root_transport,
+ [
+ './.bzr/basis-inventory-cache',
+ './.bzr/checkout/stat-cache',
+ './.bzr/merge-hashes',
+ './.bzr/repository',
+ './.bzr/stat-cache',
+ ])
+ self.assertRepositoryHasSameItems(
+ tree.branch.repository, target.open_repository())
+
+ def test_clone_on_transport(self):
+ a_dir = self.make_bzrdir('source')
+ target_transport = a_dir.root_transport.clone('..').clone('target')
+ target = a_dir.clone_on_transport(target_transport)
+ self.assertNotEqual(a_dir.transport.base, target.transport.base)
+ self.assertDirectoriesEqual(a_dir.root_transport, target.root_transport,
+ ['./.bzr/merge-hashes'])
+
+ def test_clone_bzrdir_empty(self):
+ dir = self.make_bzrdir('source')
+ target = dir.clone(self.get_url('target'))
+ self.assertNotEqual(dir.transport.base, target.transport.base)
+ self.assertDirectoriesEqual(dir.root_transport, target.root_transport,
+ ['./.bzr/merge-hashes'])
+
+ def test_clone_bzrdir_empty_force_new_ignored(self):
+ # the force_new_repo parameter should have no effect on an empty
+ # bzrdir's clone logic
+ dir = self.make_bzrdir('source')
+ target = dir.clone(self.get_url('target'), force_new_repo=True)
+ self.assertNotEqual(dir.transport.base, target.transport.base)
+ self.assertDirectoriesEqual(dir.root_transport, target.root_transport,
+ ['./.bzr/merge-hashes'])
+
+ def test_clone_bzrdir_repository(self):
+ tree = self.make_branch_and_tree('commit_tree')
+ self.build_tree(['foo'], transport=tree.bzrdir.transport.clone('..'))
+ tree.add('foo')
+ tree.commit('revision 1', rev_id='1')
+ dir = self.make_bzrdir('source')
+ repo = dir.create_repository()
+ repo.fetch(tree.branch.repository)
+ self.assertTrue(repo.has_revision('1'))
+ target = dir.clone(self.get_url('target'))
+ self.assertNotEqual(dir.transport.base, target.transport.base)
+ self.assertDirectoriesEqual(dir.root_transport, target.root_transport,
+ [
+ './.bzr/merge-hashes',
+ './.bzr/repository',
+ ])
+ self.assertRepositoryHasSameItems(tree.branch.repository,
+ target.open_repository())
+
+ def test_clone_bzrdir_tree_branch_repo(self):
+ tree = self.make_branch_and_tree('source')
+ self.build_tree(['source/foo'])
+ tree.add('foo')
+ tree.commit('revision 1')
+ dir = tree.bzrdir
+ target = dir.clone(self.get_url('target'))
+ self.skipIfNoWorkingTree(target)
+ self.assertNotEqual(dir.transport.base, target.transport.base)
+ self.assertDirectoriesEqual(dir.root_transport, target.root_transport,
+ ['./.bzr/stat-cache',
+ './.bzr/checkout/dirstate',
+ './.bzr/checkout/stat-cache',
+ './.bzr/checkout/merge-hashes',
+ './.bzr/merge-hashes',
+ './.bzr/repository',
+ ])
+ self.assertRepositoryHasSameItems(tree.branch.repository,
+ target.open_branch().repository)
+ target.open_workingtree().revert()
+
+ def test_revert_inventory(self):
+ tree = self.make_branch_and_tree('source')
+ self.build_tree(['source/foo'])
+ tree.add('foo')
+ tree.commit('revision 1')
+ dir = tree.bzrdir
+ target = dir.clone(self.get_url('target'))
+ self.skipIfNoWorkingTree(target)
+ self.assertDirectoriesEqual(dir.root_transport, target.root_transport,
+ ['./.bzr/stat-cache',
+ './.bzr/checkout/dirstate',
+ './.bzr/checkout/stat-cache',
+ './.bzr/checkout/merge-hashes',
+ './.bzr/merge-hashes',
+ './.bzr/repository',
+ ])
+ self.assertRepositoryHasSameItems(tree.branch.repository,
+ target.open_branch().repository)
+
+ target.open_workingtree().revert()
+ self.assertDirectoriesEqual(dir.root_transport, target.root_transport,
+ ['./.bzr/stat-cache',
+ './.bzr/checkout/dirstate',
+ './.bzr/checkout/stat-cache',
+ './.bzr/checkout/merge-hashes',
+ './.bzr/merge-hashes',
+ './.bzr/repository',
+ ])
+ self.assertRepositoryHasSameItems(tree.branch.repository,
+ target.open_branch().repository)
+
+ def test_clone_bzrdir_tree_branch_reference(self):
+ # a tree with a branch reference (aka a checkout)
+ # should stay a checkout on clone.
+ referenced_branch = self.make_branch('referencced')
+ dir = self.make_bzrdir('source')
+ try:
+ dir.set_branch_reference(referenced_branch)
+ except errors.IncompatibleFormat:
+ # this is ok too, not all formats have to support references.
+ return
+ self.createWorkingTreeOrSkip(dir)
+ target = dir.clone(self.get_url('target'))
+ self.skipIfNoWorkingTree(target)
+ self.assertNotEqual(dir.transport.base, target.transport.base)
+ self.assertDirectoriesEqual(dir.root_transport, target.root_transport,
+ ['./.bzr/stat-cache',
+ './.bzr/checkout/stat-cache',
+ './.bzr/checkout/merge-hashes',
+ './.bzr/merge-hashes',
+ './.bzr/repository/inventory.knit',
+ ])
+
+ def test_clone_bzrdir_branch_and_repo_into_shared_repo_force_new_repo(self):
+ # by default cloning into a shared repo uses the shared repo.
+ tree = self.make_branch_and_tree('commit_tree')
+ self.build_tree(['commit_tree/foo'])
+ tree.add('foo')
+ tree.commit('revision 1')
+ source = self.make_branch('source')
+ tree.branch.repository.copy_content_into(source.repository)
+ tree.branch.copy_content_into(source)
+ try:
+ self.make_repository('target', shared=True)
+ except errors.IncompatibleFormat:
+ return
+ dir = source.bzrdir
+ target = dir.clone(self.get_url('target/child'), force_new_repo=True)
+ self.assertNotEqual(dir.transport.base, target.transport.base)
+ repo = target.open_repository()
+ self.assertDirectoriesEqual(dir.root_transport, target.root_transport,
+ ['./.bzr/repository',
+ ])
+ self.assertRepositoryHasSameItems(tree.branch.repository, repo)
+
+ def test_clone_bzrdir_branch_reference(self):
+ # cloning should preserve the reference status of the branch in a bzrdir
+ referenced_branch = self.make_branch('referencced')
+ dir = self.make_bzrdir('source')
+ try:
+ dir.set_branch_reference(referenced_branch)
+ except errors.IncompatibleFormat:
+ # this is ok too, not all formats have to support references.
+ return
+ target = dir.clone(self.get_url('target'))
+ self.assertNotEqual(dir.transport.base, target.transport.base)
+ self.assertDirectoriesEqual(dir.root_transport, target.root_transport)
+
+ def test_sprout_bzrdir_repository(self):
+ tree = self.make_branch_and_tree('commit_tree')
+ self.build_tree(['foo'], transport=tree.bzrdir.transport.clone('..'))
+ tree.add('foo')
+ tree.commit('revision 1', rev_id='1')
+ dir = self.make_bzrdir('source')
+ repo = dir.create_repository()
+ repo.fetch(tree.branch.repository)
+ self.assertTrue(repo.has_revision('1'))
+ try:
+ self.assertTrue(
+ _mod_revision.is_null(_mod_revision.ensure_null(
+ dir.open_branch().last_revision())))
+ except errors.NotBranchError:
+ pass
+ target = dir.sprout(self.get_url('target'))
+ self.assertNotEqual(dir.transport.base, target.transport.base)
+ # testing inventory isn't reasonable for repositories
+ self.assertDirectoriesEqual(dir.root_transport, target.root_transport,
+ [
+ './.bzr/branch',
+ './.bzr/checkout',
+ './.bzr/inventory',
+ './.bzr/parent',
+ './.bzr/repository/inventory.knit',
+ ])
+ try:
+ local_inventory = dir.transport.local_abspath('inventory')
+ except errors.NotLocalUrl:
+ return
+ try:
+ # If we happen to have a tree, we'll guarantee everything
+ # except for the tree root is the same.
+ inventory_f = file(local_inventory, 'rb')
+ self.addCleanup(inventory_f.close)
+ self.assertContainsRe(inventory_f.read(),
+ '<inventory format="5">\n</inventory>\n')
+ except IOError, e:
+ if e.errno != errno.ENOENT:
+ raise
+
+ def test_sprout_bzrdir_branch_and_repo(self):
+ tree = self.make_branch_and_tree('commit_tree')
+ self.build_tree(['commit_tree/foo'])
+ tree.add('foo')
+ tree.commit('revision 1')
+ source = self.make_branch('source')
+ tree.branch.repository.copy_content_into(source.repository)
+ tree.bzrdir.open_branch().copy_content_into(source)
+ dir = source.bzrdir
+ target = dir.sprout(self.get_url('target'))
+ self.assertNotEqual(dir.transport.base, target.transport.base)
+ target_repo = target.open_repository()
+ self.assertRepositoryHasSameItems(source.repository, target_repo)
+ self.assertDirectoriesEqual(dir.root_transport, target.root_transport,
+ [
+ './.bzr/basis-inventory-cache',
+ './.bzr/branch/branch.conf',
+ './.bzr/branch/parent',
+ './.bzr/checkout',
+ './.bzr/checkout/inventory',
+ './.bzr/checkout/stat-cache',
+ './.bzr/inventory',
+ './.bzr/parent',
+ './.bzr/repository',
+ './.bzr/stat-cache',
+ './foo',
+ ])
+
+ def test_sprout_bzrdir_tree_branch_repo(self):
+ tree = self.make_branch_and_tree('source')
+ self.build_tree(['foo'], transport=tree.bzrdir.transport.clone('..'))
+ tree.add('foo')
+ tree.commit('revision 1')
+ dir = tree.bzrdir
+ target = self.sproutOrSkip(dir, self.get_url('target'))
+ self.assertNotEqual(dir.transport.base, target.transport.base)
+ self.assertDirectoriesEqual(dir.root_transport, target.root_transport,
+ [
+ './.bzr/branch',
+ './.bzr/checkout/dirstate',
+ './.bzr/checkout/stat-cache',
+ './.bzr/checkout/inventory',
+ './.bzr/inventory',
+ './.bzr/parent',
+ './.bzr/repository',
+ './.bzr/stat-cache',
+ ])
+ self.assertRepositoryHasSameItems(
+ tree.branch.repository, target.open_repository())
+
+
+ def test_retire_bzrdir(self):
+ bd = self.make_bzrdir('.')
+ transport = bd.root_transport
+ # must not overwrite existing directories
+ self.build_tree(['.bzr.retired.0/', '.bzr.retired.0/junk',],
+ transport=transport)
+ self.assertTrue(transport.has('.bzr'))
+ bd.retire_bzrdir()
+ self.assertFalse(transport.has('.bzr'))
+ self.assertTrue(transport.has('.bzr.retired.1'))
+
+ def test_retire_bzrdir_limited(self):
+ bd = self.make_bzrdir('.')
+ transport = bd.root_transport
+ # must not overwrite existing directories
+ self.build_tree(['.bzr.retired.0/', '.bzr.retired.0/junk',],
+ transport=transport)
+ self.assertTrue(transport.has('.bzr'))
+ self.assertRaises((errors.FileExists, errors.DirectoryNotEmpty),
+ bd.retire_bzrdir, limit=0)
+
+ def test_get_branch_transport(self):
+ dir = self.make_bzrdir('.')
+ # without a format, get_branch_transport gives use a transport
+ # which -may- point to an existing dir.
+ self.assertTrue(isinstance(dir.get_branch_transport(None),
+ transport.Transport))
+ # with a given format, either the bzr dir supports identifiable
+ # branches, or it supports anonymous branch formats, but not both.
+ anonymous_format = AnonymousTestBranchFormat()
+ identifiable_format = IdentifiableTestBranchFormat()
+ try:
+ found_transport = dir.get_branch_transport(anonymous_format)
+ self.assertRaises(errors.IncompatibleFormat,
+ dir.get_branch_transport,
+ identifiable_format)
+ except errors.IncompatibleFormat:
+ found_transport = dir.get_branch_transport(identifiable_format)
+ self.assertTrue(isinstance(found_transport, transport.Transport))
+ # and the dir which has been initialized for us must exist.
+ found_transport.list_dir('.')
+
+ def test_get_repository_transport(self):
+ dir = self.make_bzrdir('.')
+ # without a format, get_repository_transport gives use a transport
+ # which -may- point to an existing dir.
+ self.assertTrue(isinstance(dir.get_repository_transport(None),
+ transport.Transport))
+ # with a given format, either the bzr dir supports identifiable
+ # repositories, or it supports anonymous repository formats, but not both.
+ anonymous_format = AnonymousTestRepositoryFormat()
+ identifiable_format = IdentifiableTestRepositoryFormat()
+ try:
+ found_transport = dir.get_repository_transport(anonymous_format)
+ self.assertRaises(errors.IncompatibleFormat,
+ dir.get_repository_transport,
+ identifiable_format)
+ except errors.IncompatibleFormat:
+ found_transport = dir.get_repository_transport(identifiable_format)
+ self.assertTrue(isinstance(found_transport, transport.Transport))
+ # and the dir which has been initialized for us must exist.
+ found_transport.list_dir('.')
+
+ def test_get_workingtree_transport(self):
+ dir = self.make_bzrdir('.')
+ # without a format, get_workingtree_transport gives use a transport
+ # which -may- point to an existing dir.
+ self.assertTrue(isinstance(dir.get_workingtree_transport(None),
+ transport.Transport))
+ # with a given format, either the bzr dir supports identifiable
+ # trees, or it supports anonymous tree formats, but not both.
+ anonymous_format = AnonymousTestWorkingTreeFormat()
+ identifiable_format = IdentifiableTestWorkingTreeFormat()
+ try:
+ found_transport = dir.get_workingtree_transport(anonymous_format)
+ self.assertRaises(errors.IncompatibleFormat,
+ dir.get_workingtree_transport,
+ identifiable_format)
+ except errors.IncompatibleFormat:
+ found_transport = dir.get_workingtree_transport(identifiable_format)
+ self.assertTrue(isinstance(found_transport, transport.Transport))
+ # and the dir which has been initialized for us must exist.
+ found_transport.list_dir('.')
+
+ def assertInitializeEx(self, t, need_meta=False, **kwargs):
+ """Execute initialize_on_transport_ex and check it succeeded correctly.
+
+ This involves checking that the disk objects were created, open with
+ the same format returned, and had the expected disk format.
+
+ :param t: The transport to initialize on.
+ :param **kwargs: Additional arguments to pass to
+ initialize_on_transport_ex.
+ :return: the resulting repo, control dir tuple.
+ """
+ if not self.bzrdir_format.is_initializable():
+ raise TestNotApplicable("control dir format is not "
+ "initializable")
+ repo, control, require_stacking, repo_policy = \
+ self.bzrdir_format.initialize_on_transport_ex(t, **kwargs)
+ if repo is not None:
+ # Repositories are open write-locked
+ self.assertTrue(repo.is_write_locked())
+ self.addCleanup(repo.unlock)
+ self.assertIsInstance(control, bzrdir.BzrDir)
+ opened = bzrdir.BzrDir.open(t.base)
+ expected_format = self.bzrdir_format
+ if need_meta and expected_format.fixed_components:
+ # Pre-metadir formats change when we are making something that
+ # needs a metaformat, because clone is used for push.
+ expected_format = bzrdir.BzrDirMetaFormat1()
+ if not isinstance(expected_format, RemoteBzrDirFormat):
+ self.assertEqual(control._format.network_name(),
+ expected_format.network_name())
+ self.assertEqual(control._format.network_name(),
+ opened._format.network_name())
+ self.assertEqual(control.__class__, opened.__class__)
+ return repo, control
+
+ def test_format_initialize_on_transport_ex_default_stack_on(self):
+ # When initialize_on_transport_ex uses a stacked-on branch because of
+ # a stacking policy on the target, the location of the fallback
+ # repository is the same as the external location of the stacked-on
+ # branch.
+ balloon = self.make_bzrdir('balloon')
+ if isinstance(balloon._format, bzrdir.BzrDirMetaFormat1):
+ stack_on = self.make_branch('stack-on', format='1.9')
+ else:
+ stack_on = self.make_branch('stack-on')
+ if not stack_on.repository._format.supports_nesting_repositories:
+ raise TestNotApplicable("requires nesting repositories")
+ config = self.make_bzrdir('.').get_config()
+ try:
+ config.set_default_stack_on('stack-on')
+ except errors.BzrError:
+ raise TestNotApplicable('Only relevant for stackable formats.')
+ # Initialize a bzrdir subject to the policy.
+ t = self.get_transport('stacked')
+ repo_fmt = controldir.format_registry.make_bzrdir('1.9')
+ repo_name = repo_fmt.repository_format.network_name()
+ repo, control = self.assertInitializeEx(
+ t, need_meta=True, repo_format_name=repo_name, stacked_on=None)
+ # self.addCleanup(repo.unlock)
+ # There's one fallback repo, with a public location.
+ self.assertLength(1, repo._fallback_repositories)
+ fallback_repo = repo._fallback_repositories[0]
+ self.assertEqual(
+ stack_on.base, fallback_repo.bzrdir.root_transport.base)
+ # The bzrdir creates a branch in stacking-capable format.
+ new_branch = control.create_branch()
+ self.assertTrue(new_branch._format.supports_stacking())
+
+ def test_no_leftover_dirs(self):
+ # bug 886196: development-colo uses a branch-lock directory
+ # in the user directory rather than the control directory.
+ if not self.bzrdir_format.colocated_branches:
+ raise TestNotApplicable(
+ "format does not support colocated branches")
+ branch = self.make_branch('.', format='development-colo')
+ branch.bzrdir.create_branch(name="another-colocated-branch")
+ self.assertEquals(
+ branch.bzrdir.user_transport.list_dir("."),
+ [".bzr"])
+
+ def test_get_branches(self):
+ repo = self.make_repository('branch-1')
+ if not repo.bzrdir._format.colocated_branches:
+ raise TestNotApplicable('Format does not support colocation')
+ target_branch = repo.bzrdir.create_branch(name='foo')
+ repo.bzrdir.set_branch_reference(target_branch)
+ self.assertEqual(set(["", 'foo']),
+ set(repo.bzrdir.get_branches().keys()))
diff --git a/bzrlib/tests/per_controldir/__init__.py b/bzrlib/tests/per_controldir/__init__.py
new file mode 100644
index 0000000..16cb19a
--- /dev/null
+++ b/bzrlib/tests/per_controldir/__init__.py
@@ -0,0 +1,107 @@
+# Copyright (C) 2006-2010 Canonical Ltd
+# Authors: Robert Collins <robert.collins@canonical.com>
+# -*- coding: utf-8 -*-
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+
+"""ControlDir implementation tests for bzr.
+
+These test the conformance of all the controldir variations to the expected API.
+Specific tests for individual formats are in the tests/test_bzrdir.py file
+rather than in tests/per_branch/*.py.
+"""
+
+from bzrlib.controldir import ControlDirFormat
+from bzrlib.tests import (
+ default_transport,
+ multiply_tests,
+ test_server,
+ TestCaseWithTransport,
+ )
+from bzrlib.transport import memory
+
+
+def make_scenarios(vfs_factory, transport_server, transport_readonly_server,
+ formats, name_suffix=''):
+ """Transform the input to a list of scenarios.
+
+ :param formats: A list of bzrdir_format objects.
+ :param vfs_server: A factory to create a Transport Server which has
+ all the VFS methods working, and is writable.
+ """
+ result = []
+ for format in formats:
+ scenario_name = format.__class__.__name__
+ scenario_name += name_suffix
+ scenario = (scenario_name, {
+ "vfs_transport_factory": vfs_factory,
+ "transport_server": transport_server,
+ "transport_readonly_server": transport_readonly_server,
+ "bzrdir_format": format,
+ })
+ result.append(scenario)
+ return result
+
+
+class TestCaseWithControlDir(TestCaseWithTransport):
+
+ def setUp(self):
+ super(TestCaseWithControlDir, self).setUp()
+ self.bzrdir = None
+
+ def get_bzrdir(self):
+ if self.bzrdir is None:
+ self.bzrdir = self.make_bzrdir(None)
+ return self.bzrdir
+
+ def get_default_format(self):
+ return self.bzrdir_format
+
+
+def load_tests(standard_tests, module, loader):
+ test_per_controldir = [
+ 'bzrlib.tests.per_controldir.test_controldir',
+ 'bzrlib.tests.per_controldir.test_format',
+ 'bzrlib.tests.per_controldir.test_push',
+ ]
+ submod_tests = loader.loadTestsFromModuleNames(test_per_controldir)
+ formats = ControlDirFormat.known_formats()
+ scenarios = make_scenarios(
+ default_transport,
+ None,
+ # None here will cause a readonly decorator to be created
+ # by the TestCaseWithTransport.get_readonly_transport method.
+ None,
+ formats)
+ # This will always add scenarios using the smart server.
+ from bzrlib.remote import RemoteBzrDirFormat
+ # test the remote server behaviour when backed with a MemoryTransport
+ # Once for the current version
+ scenarios.extend(make_scenarios(
+ memory.MemoryServer,
+ test_server.SmartTCPServer_for_testing,
+ test_server.ReadonlySmartTCPServer_for_testing,
+ [(RemoteBzrDirFormat())],
+ name_suffix='-default'))
+ # And once with < 1.6 - the 'v2' protocol.
+ scenarios.extend(make_scenarios(
+ memory.MemoryServer,
+ test_server.SmartTCPServer_for_testing_v2_only,
+ test_server.ReadonlySmartTCPServer_for_testing_v2_only,
+ [(RemoteBzrDirFormat())],
+ name_suffix='-v2'))
+ # add the tests for the sub modules
+ return multiply_tests(submod_tests, scenarios, standard_tests)
diff --git a/bzrlib/tests/per_controldir/test_controldir.py b/bzrlib/tests/per_controldir/test_controldir.py
new file mode 100644
index 0000000..621f81b
--- /dev/null
+++ b/bzrlib/tests/per_controldir/test_controldir.py
@@ -0,0 +1,1766 @@
+# Copyright (C) 2006-2011 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""Tests for control directory implementations - tests a controldir format."""
+
+from itertools import izip
+
+import bzrlib.branch
+from bzrlib import (
+ bzrdir as _mod_bzrdir,
+ check,
+ controldir,
+ errors,
+ gpg,
+ osutils,
+ revision as _mod_revision,
+ transport,
+ ui,
+ urlutils,
+ workingtree,
+ )
+from bzrlib.tests import (
+ fixtures,
+ ChrootedTestCase,
+ TestNotApplicable,
+ TestSkipped,
+ )
+from bzrlib.tests.per_controldir import TestCaseWithControlDir
+from bzrlib.transport.local import LocalTransport
+from bzrlib.ui import (
+ CannedInputUIFactory,
+ )
+from bzrlib.remote import (
+ RemoteBzrDir,
+ RemoteBzrDirFormat,
+ RemoteRepository,
+ )
+
+
+class TestControlDir(TestCaseWithControlDir):
+
+ def skipIfNoWorkingTree(self, a_bzrdir):
+ """Raises TestSkipped if a_bzrdir doesn't have a working tree.
+
+ If the bzrdir does have a workingtree, this is a no-op.
+ """
+ try:
+ a_bzrdir.open_workingtree()
+ except (errors.NotLocalUrl, errors.NoWorkingTree):
+ raise TestSkipped("bzrdir on transport %r has no working tree"
+ % a_bzrdir.transport)
+
+ def openWorkingTreeIfLocal(self, a_bzrdir):
+ """If a_bzrdir is on a local transport, call open_workingtree() on it.
+ """
+ if not isinstance(a_bzrdir.root_transport, LocalTransport):
+ # it's not local, but that's ok
+ return
+ a_bzrdir.open_workingtree()
+
+ def createWorkingTreeOrSkip(self, a_bzrdir):
+ """Create a working tree on a_bzrdir, or raise TestSkipped.
+
+ A simple wrapper for create_workingtree that translates NotLocalUrl into
+ TestSkipped. Returns the newly created working tree.
+ """
+ try:
+ return a_bzrdir.create_workingtree()
+ except (errors.NotLocalUrl, errors.UnsupportedOperation):
+ raise TestSkipped("cannot make working tree with transport %r"
+ % a_bzrdir.transport)
+
+ def sproutOrSkip(self, from_bzrdir, to_url, revision_id=None,
+ force_new_repo=False, accelerator_tree=None,
+ create_tree_if_local=True):
+ """Sprout from_bzrdir into to_url, or raise TestSkipped.
+
+ A simple wrapper for from_bzrdir.sprout that translates NotLocalUrl into
+ TestSkipped. Returns the newly sprouted bzrdir.
+ """
+ to_transport = transport.get_transport(to_url)
+ if not isinstance(to_transport, LocalTransport):
+ raise TestSkipped('Cannot sprout to remote bzrdirs.')
+ target = from_bzrdir.sprout(to_url, revision_id=revision_id,
+ force_new_repo=force_new_repo,
+ possible_transports=[to_transport],
+ accelerator_tree=accelerator_tree,
+ create_tree_if_local=create_tree_if_local)
+ return target
+
+ def test_uninitializable(self):
+ if self.bzrdir_format.is_initializable():
+ raise TestNotApplicable("format is initializable")
+ t = self.get_transport()
+ self.assertRaises(errors.UninitializableFormat,
+ self.bzrdir_format.initialize, t.base)
+
+ def test_multiple_initialization(self):
+ # loopback test to check the current format initializes to itself.
+ if not self.bzrdir_format.is_initializable():
+ # unsupported formats are not loopback testable
+ # because the default open will not open them and
+ # they may not be initializable.
+ raise TestNotApplicable("format is not initializable")
+ self.bzrdir_format.initialize('.')
+ self.assertRaises(errors.AlreadyControlDirError,
+ self.bzrdir_format.initialize, '.')
+
+ def test_create_null_workingtree(self):
+ dir = self.make_bzrdir('dir1')
+ dir.create_repository()
+ dir.create_branch()
+ try:
+ wt = dir.create_workingtree(revision_id=_mod_revision.NULL_REVISION)
+ except (errors.NotLocalUrl, errors.UnsupportedOperation):
+ raise TestSkipped("cannot make working tree with transport %r"
+ % dir.transport)
+ self.assertEqual([], wt.get_parent_ids())
+
+ def test_destroy_workingtree(self):
+ tree = self.make_branch_and_tree('tree')
+ self.build_tree(['tree/file'])
+ tree.add('file')
+ tree.commit('first commit')
+ bzrdir = tree.bzrdir
+ try:
+ bzrdir.destroy_workingtree()
+ except errors.UnsupportedOperation:
+ raise TestSkipped('Format does not support destroying tree')
+ self.assertPathDoesNotExist('tree/file')
+ self.assertRaises(errors.NoWorkingTree, bzrdir.open_workingtree)
+ bzrdir.create_workingtree()
+ self.assertPathExists('tree/file')
+ bzrdir.destroy_workingtree_metadata()
+ self.assertPathExists('tree/file')
+ self.assertRaises(errors.NoWorkingTree, bzrdir.open_workingtree)
+
+ def test_destroy_branch(self):
+ branch = self.make_branch('branch')
+ bzrdir = branch.bzrdir
+ try:
+ bzrdir.destroy_branch()
+ except (errors.UnsupportedOperation, errors.TransportNotPossible):
+ raise TestNotApplicable('Format does not support destroying branch')
+ self.assertRaises(errors.NotBranchError, bzrdir.open_branch)
+ bzrdir.create_branch()
+ bzrdir.open_branch()
+
+ def test_destroy_branch_no_branch(self):
+ branch = self.make_repository('branch')
+ bzrdir = branch.bzrdir
+ try:
+ self.assertRaises(errors.NotBranchError, bzrdir.destroy_branch)
+ except (errors.UnsupportedOperation, errors.TransportNotPossible):
+ raise TestNotApplicable('Format does not support destroying branch')
+
+ def test_destroy_repository(self):
+ repo = self.make_repository('repository')
+ bzrdir = repo.bzrdir
+ try:
+ bzrdir.destroy_repository()
+ except (errors.UnsupportedOperation, errors.TransportNotPossible):
+ raise TestNotApplicable('Format does not support destroying'
+ ' repository')
+ self.assertRaises(errors.NoRepositoryPresent,
+ bzrdir.destroy_repository)
+ self.assertRaises(errors.NoRepositoryPresent, bzrdir.open_repository)
+ bzrdir.create_repository()
+ bzrdir.open_repository()
+
+ def test_open_workingtree_raises_no_working_tree(self):
+ """ControlDir.open_workingtree() should raise NoWorkingTree (rather than
+ e.g. NotLocalUrl) if there is no working tree.
+ """
+ dir = self.make_bzrdir('source')
+ vfs_dir = controldir.ControlDir.open(self.get_vfs_only_url('source'))
+ if vfs_dir.has_workingtree():
+ # This ControlDir format doesn't support ControlDirs without
+ # working trees, so this test is irrelevant.
+ raise TestNotApplicable("format does not support "
+ "control directories without working tree")
+ self.assertRaises(errors.NoWorkingTree, dir.open_workingtree)
+
+ def test_clone_bzrdir_repository_under_shared(self):
+ tree = self.make_branch_and_tree('commit_tree')
+ self.build_tree(['foo'], transport=tree.bzrdir.transport.clone('..'))
+ tree.add('foo')
+ tree.commit('revision 1', rev_id='1')
+ dir = self.make_bzrdir('source')
+ repo = dir.create_repository()
+ if not repo._format.supports_nesting_repositories:
+ raise TestNotApplicable("repository format does not support "
+ "nesting")
+ repo.fetch(tree.branch.repository)
+ self.assertTrue(repo.has_revision('1'))
+ try:
+ self.make_repository('target', shared=True)
+ except errors.IncompatibleFormat:
+ raise TestNotApplicable("repository format does not support "
+ "shared repositories")
+ target = dir.clone(self.get_url('target/child'))
+ self.assertNotEqual(dir.transport.base, target.transport.base)
+ self.assertRaises(errors.NoRepositoryPresent, target.open_repository)
+
+ def test_clone_bzrdir_repository_branch_both_under_shared(self):
+ # Create a shared repository
+ try:
+ shared_repo = self.make_repository('shared', shared=True)
+ except errors.IncompatibleFormat:
+ raise TestNotApplicable("repository format does not support "
+ "shared repositories")
+ if not shared_repo._format.supports_nesting_repositories:
+ raise TestNotApplicable("format does not support nesting "
+ "repositories")
+ # Make a branch, 'commit_tree', and working tree outside of the shared
+ # repository, and commit some revisions to it.
+ tree = self.make_branch_and_tree('commit_tree')
+ self.build_tree(['foo'], transport=tree.bzrdir.root_transport)
+ tree.add('foo')
+ tree.commit('revision 1', rev_id='1')
+ tree.bzrdir.open_branch().generate_revision_history(
+ _mod_revision.NULL_REVISION)
+ tree.set_parent_trees([])
+ tree.commit('revision 2', rev_id='2')
+ # Copy the content (i.e. revisions) from the 'commit_tree' branch's
+ # repository into the shared repository.
+ tree.branch.repository.copy_content_into(shared_repo)
+ # Make a branch 'source' inside the shared repository.
+ dir = self.make_bzrdir('shared/source')
+ dir.create_branch()
+ # Clone 'source' to 'target', also inside the shared repository.
+ target = dir.clone(self.get_url('shared/target'))
+ # 'source', 'target', and the shared repo all have distinct bzrdirs.
+ self.assertNotEqual(dir.transport.base, target.transport.base)
+ self.assertNotEqual(dir.transport.base, shared_repo.bzrdir.transport.base)
+ # The shared repository will contain revisions from the 'commit_tree'
+ # repository, even revisions that are not part of the history of the
+ # 'commit_tree' branch.
+ self.assertTrue(shared_repo.has_revision('1'))
+
+ def test_clone_bzrdir_repository_branch_only_source_under_shared(self):
+ try:
+ shared_repo = self.make_repository('shared', shared=True)
+ except errors.IncompatibleFormat:
+ raise TestNotApplicable("repository format does not support "
+ "shared repositories")
+ if not shared_repo._format.supports_nesting_repositories:
+ raise TestNotApplicable("format does not support nesting "
+ "repositories")
+ tree = self.make_branch_and_tree('commit_tree')
+ self.build_tree(['commit_tree/foo'])
+ tree.add('foo')
+ tree.commit('revision 1', rev_id='1')
+ tree.branch.bzrdir.open_branch().generate_revision_history(
+ _mod_revision.NULL_REVISION)
+ tree.set_parent_trees([])
+ tree.commit('revision 2', rev_id='2')
+ tree.branch.repository.copy_content_into(shared_repo)
+ if shared_repo.make_working_trees():
+ shared_repo.set_make_working_trees(False)
+ self.assertFalse(shared_repo.make_working_trees())
+ self.assertTrue(shared_repo.has_revision('1'))
+ dir = self.make_bzrdir('shared/source')
+ dir.create_branch()
+ target = dir.clone(self.get_url('target'))
+ self.assertNotEqual(dir.transport.base, target.transport.base)
+ self.assertNotEqual(dir.transport.base, shared_repo.bzrdir.transport.base)
+ branch = target.open_branch()
+ self.assertTrue(branch.repository.has_revision('1'))
+ self.assertFalse(branch.repository.make_working_trees())
+ self.assertTrue(branch.repository.is_shared())
+
+ def test_clone_bzrdir_repository_revision(self):
+ # test for revision limiting, [smoke test, not corner case checks].
+ # make a repository with some revisions,
+ # and clone it with a revision limit.
+ #
+ tree = self.make_branch_and_tree('commit_tree')
+ self.build_tree(['commit_tree/foo'])
+ tree.add('foo')
+ tree.commit('revision 1', rev_id='1')
+ tree.branch.bzrdir.open_branch().generate_revision_history(
+ _mod_revision.NULL_REVISION)
+ tree.set_parent_trees([])
+ tree.commit('revision 2', rev_id='2')
+ source = self.make_repository('source')
+ tree.branch.repository.copy_content_into(source)
+ dir = source.bzrdir
+ target = dir.clone(self.get_url('target'), revision_id='2')
+ raise TestSkipped('revision limiting not strict yet')
+
+ def test_clone_bzrdir_branch_and_repo_fixed_user_id(self):
+ # Bug #430868 is about an email containing '.sig'
+ self.overrideEnv('BZR_EMAIL', 'murphy@host.sighup.org')
+ tree = self.make_branch_and_tree('commit_tree')
+ self.build_tree(['commit_tree/foo'])
+ tree.add('foo')
+ rev1 = tree.commit('revision 1')
+ tree_repo = tree.branch.repository
+ tree_repo.lock_write()
+ tree_repo.start_write_group()
+ tree_repo.sign_revision(rev1, gpg.LoopbackGPGStrategy(None))
+ tree_repo.commit_write_group()
+ tree_repo.unlock()
+ target = self.make_branch('target')
+ tree.branch.repository.copy_content_into(target.repository)
+ tree.branch.copy_content_into(target)
+ self.assertTrue(target.repository.has_revision(rev1))
+ self.assertEqual(
+ tree_repo.get_signature_text(rev1),
+ target.repository.get_signature_text(rev1))
+
+ def test_clone_bzrdir_branch_and_repo_into_shared_repo(self):
+ # by default cloning into a shared repo uses the shared repo.
+ tree = self.make_branch_and_tree('commit_tree')
+ self.build_tree(['commit_tree/foo'])
+ tree.add('foo')
+ tree.commit('revision 1')
+ source = self.make_branch('source')
+ tree.branch.repository.copy_content_into(source.repository)
+ tree.branch.copy_content_into(source)
+ try:
+ shared_repo = self.make_repository('target', shared=True)
+ except errors.IncompatibleFormat:
+ raise TestNotApplicable("repository format does not support "
+ "shared repositories")
+ if not shared_repo._format.supports_nesting_repositories:
+ raise TestNotApplicable("format does not support nesting "
+ "repositories")
+ dir = source.bzrdir
+ target = dir.clone(self.get_url('target/child'))
+ self.assertNotEqual(dir.transport.base, target.transport.base)
+ self.assertRaises(errors.NoRepositoryPresent, target.open_repository)
+ self.assertEqual(source.last_revision(),
+ target.open_branch().last_revision())
+
+ def test_clone_bzrdir_branch_revision(self):
+ # test for revision limiting, [smoke test, not corner case checks].
+ # make a branch with some revisions,
+ # and clone it with a revision limit.
+ #
+ tree = self.make_branch_and_tree('commit_tree')
+ self.build_tree(['commit_tree/foo'])
+ tree.add('foo')
+ tree.commit('revision 1', rev_id='1')
+ tree.commit('revision 2', rev_id='2', allow_pointless=True)
+ source = self.make_branch('source')
+ tree.branch.repository.copy_content_into(source.repository)
+ tree.branch.copy_content_into(source)
+ dir = source.bzrdir
+ target = dir.clone(self.get_url('target'), revision_id='1')
+ self.assertEqual('1', target.open_branch().last_revision())
+
+ def test_clone_on_transport_preserves_repo_format(self):
+ if self.bzrdir_format == controldir.format_registry.make_bzrdir('default'):
+ format = 'knit'
+ else:
+ format = None
+ source_branch = self.make_branch('source', format=format)
+ # Ensure no format data is cached
+ a_dir = bzrlib.branch.Branch.open_from_transport(
+ self.get_transport('source')).bzrdir
+ target_transport = self.get_transport('target')
+ target_bzrdir = a_dir.clone_on_transport(target_transport)
+ target_repo = target_bzrdir.open_repository()
+ source_branch = bzrlib.branch.Branch.open(
+ self.get_vfs_only_url('source'))
+ if isinstance(target_repo, RemoteRepository):
+ target_repo._ensure_real()
+ target_repo = target_repo._real_repository
+ self.assertEqual(target_repo._format, source_branch.repository._format)
+
+ def test_clone_bzrdir_tree_revision(self):
+ # test for revision limiting, [smoke test, not corner case checks].
+ # make a tree with a revision with a last-revision
+ # and clone it with a revision limit.
+ # This smoke test just checks the revision-id is right. Tree specific
+ # tests will check corner cases.
+ tree = self.make_branch_and_tree('source')
+ self.build_tree(['source/foo'])
+ tree.add('foo')
+ tree.commit('revision 1', rev_id='1')
+ tree.commit('revision 2', rev_id='2', allow_pointless=True)
+ dir = tree.bzrdir
+ target = dir.clone(self.get_url('target'), revision_id='1')
+ self.skipIfNoWorkingTree(target)
+ self.assertEqual(['1'], target.open_workingtree().get_parent_ids())
+
+ def test_clone_bzrdir_into_notrees_repo(self):
+ """Cloning into a no-trees repo should not create a working tree"""
+ tree = self.make_branch_and_tree('source')
+ self.build_tree(['source/foo'])
+ tree.add('foo')
+ tree.commit('revision 1')
+
+ try:
+ repo = self.make_repository('repo', shared=True)
+ except errors.IncompatibleFormat:
+ raise TestNotApplicable('must support shared repositories')
+ if repo.make_working_trees():
+ repo.set_make_working_trees(False)
+ self.assertFalse(repo.make_working_trees())
+
+ a_dir = tree.bzrdir.clone(self.get_url('repo/a'))
+ a_branch = a_dir.open_branch()
+ # If the new control dir actually uses the repository, it should
+ # not have a working tree.
+ if not a_branch.repository.has_same_location(repo):
+ raise TestNotApplicable('new control dir does not use repository')
+ self.assertRaises(errors.NoWorkingTree, a_dir.open_workingtree)
+
+ def test_clone_respects_stacked(self):
+ branch = self.make_branch('parent')
+ child_transport = self.get_transport('child')
+ try:
+ child = branch.bzrdir.clone_on_transport(child_transport,
+ stacked_on=branch.base)
+ except (errors.UnstackableBranchFormat,
+ errors.UnstackableRepositoryFormat):
+ raise TestNotApplicable("branch or repository format does "
+ "not support stacking")
+ self.assertEqual(child.open_branch().get_stacked_on_url(), branch.base)
+
+ def test_set_branch_reference(self):
+ """set_branch_reference creates a branch reference"""
+ referenced_branch = self.make_branch('referenced')
+ dir = self.make_bzrdir('source')
+ try:
+ reference = dir.set_branch_reference(referenced_branch)
+ except errors.IncompatibleFormat:
+ # this is ok too, not all formats have to support references.
+ raise TestNotApplicable("control directory does not "
+ "support branch references")
+ self.assertEqual(
+ referenced_branch.bzrdir.root_transport.abspath('') + '/',
+ dir.get_branch_reference())
+
+ def test_set_branch_reference_on_existing_reference(self):
+ """set_branch_reference creates a branch reference"""
+ referenced_branch1 = self.make_branch('old-referenced')
+ referenced_branch2 = self.make_branch('new-referenced')
+ dir = self.make_bzrdir('source')
+ try:
+ reference = dir.set_branch_reference(referenced_branch1)
+ except errors.IncompatibleFormat:
+ # this is ok too, not all formats have to support references.
+ raise TestNotApplicable("control directory does not "
+ "support branch references")
+ reference = dir.set_branch_reference(referenced_branch2)
+ self.assertEqual(
+ referenced_branch2.bzrdir.root_transport.abspath('') + '/',
+ dir.get_branch_reference())
+
+ def test_set_branch_reference_on_existing_branch(self):
+ """set_branch_reference creates a branch reference"""
+ referenced_branch = self.make_branch('referenced')
+ dir = self.make_branch('source').bzrdir
+ try:
+ reference = dir.set_branch_reference(referenced_branch)
+ except errors.IncompatibleFormat:
+ # this is ok too, not all formats have to support references.
+ raise TestNotApplicable("control directory does not "
+ "support branch references")
+ self.assertEqual(
+ referenced_branch.bzrdir.root_transport.abspath('') + '/',
+ dir.get_branch_reference())
+
+ def test_get_branch_reference_on_reference(self):
+ """get_branch_reference should return the right url."""
+ referenced_branch = self.make_branch('referenced')
+ dir = self.make_bzrdir('source')
+ try:
+ dir.set_branch_reference(referenced_branch)
+ except errors.IncompatibleFormat:
+ # this is ok too, not all formats have to support references.
+ raise TestNotApplicable("control directory does not "
+ "support branch references")
+ self.assertEqual(referenced_branch.bzrdir.root_transport.abspath('') + '/',
+ dir.get_branch_reference())
+
+ def test_get_branch_reference_on_non_reference(self):
+ """get_branch_reference should return None for non-reference branches."""
+ branch = self.make_branch('referenced')
+ self.assertEqual(None, branch.bzrdir.get_branch_reference())
+
+ def test_get_branch_reference_no_branch(self):
+ """get_branch_reference should not mask NotBranchErrors."""
+ dir = self.make_bzrdir('source')
+ if dir.has_branch():
+ # this format does not support branchless bzrdirs.
+ raise TestNotApplicable("format does not support "
+ "branchless control directories")
+ self.assertRaises(errors.NotBranchError, dir.get_branch_reference)
+
+ def test_sprout_bzrdir_empty(self):
+ dir = self.make_bzrdir('source')
+ target = dir.sprout(self.get_url('target'))
+ self.assertNotEqual(dir.control_transport.base, target.control_transport.base)
+ # creates a new repository branch and tree
+ target.open_repository()
+ target.open_branch()
+ self.openWorkingTreeIfLocal(target)
+
+ def test_sprout_bzrdir_empty_under_shared_repo(self):
+ # sprouting an empty dir into a repo uses the repo
+ dir = self.make_bzrdir('source')
+ try:
+ self.make_repository('target', shared=True)
+ except errors.IncompatibleFormat:
+ raise TestNotApplicable("format does not support shared "
+ "repositories")
+ target = dir.sprout(self.get_url('target/child'))
+ self.assertRaises(errors.NoRepositoryPresent, target.open_repository)
+ target.open_branch()
+ try:
+ target.open_workingtree()
+ except errors.NoWorkingTree:
+ # Some bzrdirs can never have working trees.
+ repo = target.find_repository()
+ self.assertFalse(repo.bzrdir._format.supports_workingtrees)
+
+ def test_sprout_bzrdir_empty_under_shared_repo_force_new(self):
+ # the force_new_repo parameter should force use of a new repo in an empty
+ # bzrdir's sprout logic
+ dir = self.make_bzrdir('source')
+ try:
+ self.make_repository('target', shared=True)
+ except errors.IncompatibleFormat:
+ raise TestNotApplicable("format does not support shared "
+ "repositories")
+ target = dir.sprout(self.get_url('target/child'), force_new_repo=True)
+ target.open_repository()
+ target.open_branch()
+ self.openWorkingTreeIfLocal(target)
+
+ def test_sprout_bzrdir_with_repository_to_shared(self):
+ tree = self.make_branch_and_tree('commit_tree')
+ self.build_tree(['commit_tree/foo'])
+ tree.add('foo')
+ tree.commit('revision 1', rev_id='1')
+ tree.bzrdir.open_branch().generate_revision_history(
+ _mod_revision.NULL_REVISION)
+ tree.set_parent_trees([])
+ tree.commit('revision 2', rev_id='2')
+ source = self.make_repository('source')
+ tree.branch.repository.copy_content_into(source)
+ dir = source.bzrdir
+ try:
+ shared_repo = self.make_repository('target', shared=True)
+ except errors.IncompatibleFormat:
+ raise TestNotApplicable("format does not support "
+ "shared repositories")
+ target = dir.sprout(self.get_url('target/child'))
+ self.assertNotEqual(dir.user_transport.base, target.user_transport.base)
+ self.assertTrue(shared_repo.has_revision('1'))
+
+ def test_sprout_bzrdir_repository_branch_both_under_shared(self):
+ try:
+ shared_repo = self.make_repository('shared', shared=True)
+ except errors.IncompatibleFormat:
+ raise TestNotApplicable("format does not support shared "
+ "repositories")
+ if not shared_repo._format.supports_nesting_repositories:
+ raise TestNotApplicable("format does not support nesting "
+ "repositories")
+ tree = self.make_branch_and_tree('commit_tree')
+ self.build_tree(['commit_tree/foo'])
+ tree.add('foo')
+ tree.commit('revision 1', rev_id='1')
+ tree.bzrdir.open_branch().generate_revision_history(
+ _mod_revision.NULL_REVISION)
+ tree.set_parent_trees([])
+ tree.commit('revision 2', rev_id='2')
+ tree.branch.repository.copy_content_into(shared_repo)
+ dir = self.make_bzrdir('shared/source')
+ dir.create_branch()
+ target = dir.sprout(self.get_url('shared/target'))
+ self.assertNotEqual(dir.transport.base, target.transport.base)
+ self.assertNotEqual(dir.transport.base, shared_repo.bzrdir.transport.base)
+ self.assertTrue(shared_repo.has_revision('1'))
+
+ def test_sprout_bzrdir_repository_branch_only_source_under_shared(self):
+ try:
+ shared_repo = self.make_repository('shared', shared=True)
+ except errors.IncompatibleFormat:
+ raise TestNotApplicable("format does not support shared "
+ "repositories")
+ if not shared_repo._format.supports_nesting_repositories:
+ raise TestNotApplicable("format does not support nesting "
+ "repositories")
+ tree = self.make_branch_and_tree('commit_tree')
+ self.build_tree(['commit_tree/foo'])
+ tree.add('foo')
+ tree.commit('revision 1', rev_id='1')
+ tree.bzrdir.open_branch().generate_revision_history(
+ _mod_revision.NULL_REVISION)
+ tree.set_parent_trees([])
+ tree.commit('revision 2', rev_id='2')
+ tree.branch.repository.copy_content_into(shared_repo)
+ if shared_repo.make_working_trees():
+ shared_repo.set_make_working_trees(False)
+ self.assertFalse(shared_repo.make_working_trees())
+ self.assertTrue(shared_repo.has_revision('1'))
+ dir = self.make_bzrdir('shared/source')
+ dir.create_branch()
+ target = dir.sprout(self.get_url('target'))
+ self.assertNotEqual(dir.transport.base, target.transport.base)
+ self.assertNotEqual(dir.transport.base, shared_repo.bzrdir.transport.base)
+ branch = target.open_branch()
+ # The sprouted bzrdir has a branch, so only revisions referenced by
+ # that branch are copied, rather than the whole repository. It's an
+ # empty branch, so none are copied.
+ self.assertEqual([], branch.repository.all_revision_ids())
+ if branch.bzrdir._format.supports_workingtrees:
+ self.assertTrue(branch.repository.make_working_trees())
+ self.assertFalse(branch.repository.is_shared())
+
+ def test_sprout_bzrdir_repository_under_shared_force_new_repo(self):
+ tree = self.make_branch_and_tree('commit_tree')
+ self.build_tree(['commit_tree/foo'])
+ tree.add('foo')
+ tree.commit('revision 1', rev_id='1')
+ tree.bzrdir.open_branch().generate_revision_history(
+ _mod_revision.NULL_REVISION)
+ tree.set_parent_trees([])
+ tree.commit('revision 2', rev_id='2')
+ source = self.make_repository('source')
+ tree.branch.repository.copy_content_into(source)
+ dir = source.bzrdir
+ try:
+ shared_repo = self.make_repository('target', shared=True)
+ except errors.IncompatibleFormat:
+ raise TestNotApplicable("format does not support shared "
+ "repositories")
+ target = dir.sprout(self.get_url('target/child'), force_new_repo=True)
+ self.assertNotEqual(
+ dir.control_transport.base,
+ target.control_transport.base)
+ self.assertFalse(shared_repo.has_revision('1'))
+
+ def test_sprout_bzrdir_repository_revision(self):
+ # test for revision limiting, [smoke test, not corner case checks].
+ # make a repository with some revisions,
+ # and sprout it with a revision limit.
+ #
+ tree = self.make_branch_and_tree('commit_tree')
+ self.build_tree(['commit_tree/foo'])
+ tree.add('foo')
+ tree.commit('revision 1', rev_id='1')
+ br = tree.bzrdir.open_branch()
+ br.set_last_revision_info(0, _mod_revision.NULL_REVISION)
+ tree.set_parent_trees([])
+ tree.commit('revision 2', rev_id='2')
+ source = self.make_repository('source')
+ tree.branch.repository.copy_content_into(source)
+ dir = source.bzrdir
+ target = self.sproutOrSkip(dir, self.get_url('target'), revision_id='2')
+ raise TestSkipped('revision limiting not strict yet')
+
+ def test_sprout_bzrdir_branch_and_repo_shared(self):
+ # sprouting a branch with a repo into a shared repo uses the shared
+ # repo
+ tree = self.make_branch_and_tree('commit_tree')
+ self.build_tree(['commit_tree/foo'])
+ tree.add('foo')
+ tree.commit('revision 1', rev_id='1')
+ source = self.make_branch('source')
+ tree.branch.repository.copy_content_into(source.repository)
+ tree.bzrdir.open_branch().copy_content_into(source)
+ dir = source.bzrdir
+ try:
+ shared_repo = self.make_repository('target', shared=True)
+ except errors.IncompatibleFormat:
+ raise TestNotApplicable("format does not support shared "
+ "repositories")
+ target = dir.sprout(self.get_url('target/child'))
+ self.assertTrue(shared_repo.has_revision('1'))
+
+ def test_sprout_bzrdir_branch_and_repo_shared_force_new_repo(self):
+ # sprouting a branch with a repo into a shared repo uses the shared
+ # repo
+ tree = self.make_branch_and_tree('commit_tree')
+ self.build_tree(['commit_tree/foo'])
+ tree.add('foo')
+ tree.commit('revision 1', rev_id='1')
+ source = self.make_branch('source')
+ tree.branch.repository.copy_content_into(source.repository)
+ tree.bzrdir.open_branch().copy_content_into(source)
+ dir = source.bzrdir
+ try:
+ shared_repo = self.make_repository('target', shared=True)
+ except errors.IncompatibleFormat:
+ raise TestNotApplicable("format does not support shared "
+ "repositories")
+ target = dir.sprout(self.get_url('target/child'), force_new_repo=True)
+ self.assertNotEqual(dir.control_transport.base, target.control_transport.base)
+ self.assertFalse(shared_repo.has_revision('1'))
+
+ def test_sprout_bzrdir_branch_reference(self):
+ # sprouting should create a repository if needed and a sprouted branch.
+ referenced_branch = self.make_branch('referenced')
+ dir = self.make_bzrdir('source')
+ try:
+ dir.set_branch_reference(referenced_branch)
+ except errors.IncompatibleFormat:
+ raise TestNotApplicable("format does not support branch "
+ "references")
+ self.assertRaises(errors.NoRepositoryPresent, dir.open_repository)
+ target = dir.sprout(self.get_url('target'))
+ self.assertNotEqual(dir.transport.base, target.transport.base)
+ # we want target to have a branch that is in-place.
+ self.assertEqual(target, target.open_branch().bzrdir)
+ # and as we dont support repositories being detached yet, a repo in
+ # place
+ target.open_repository()
+
+ def test_sprout_bzrdir_branch_reference_shared(self):
+ # sprouting should create a repository if needed and a sprouted branch.
+ referenced_tree = self.make_branch_and_tree('referenced')
+ referenced_tree.commit('1', rev_id='1', allow_pointless=True)
+ dir = self.make_bzrdir('source')
+ try:
+ dir.set_branch_reference(referenced_tree.branch)
+ except errors.IncompatibleFormat:
+ raise TestNotApplicable("format does not support branch "
+ "references")
+ self.assertRaises(errors.NoRepositoryPresent, dir.open_repository)
+ try:
+ shared_repo = self.make_repository('target', shared=True)
+ except errors.IncompatibleFormat:
+ raise TestNotApplicable("format does not support "
+ "shared repositories")
+ target = dir.sprout(self.get_url('target/child'))
+ self.assertNotEqual(dir.transport.base, target.transport.base)
+ # we want target to have a branch that is in-place.
+ self.assertEqual(target, target.open_branch().bzrdir)
+ # and we want no repository as the target is shared
+ self.assertRaises(errors.NoRepositoryPresent,
+ target.open_repository)
+ # and we want revision '1' in the shared repo
+ self.assertTrue(shared_repo.has_revision('1'))
+
+ def test_sprout_bzrdir_branch_reference_shared_force_new_repo(self):
+ # sprouting should create a repository if needed and a sprouted branch.
+ referenced_tree = self.make_branch_and_tree('referenced')
+ referenced_tree.commit('1', rev_id='1', allow_pointless=True)
+ dir = self.make_bzrdir('source')
+ try:
+ dir.set_branch_reference(referenced_tree.branch)
+ except errors.IncompatibleFormat:
+ # this is ok too, not all formats have to support references.
+ raise TestNotApplicable("format does not support "
+ "branch references")
+ self.assertRaises(errors.NoRepositoryPresent, dir.open_repository)
+ try:
+ shared_repo = self.make_repository('target', shared=True)
+ except errors.IncompatibleFormat:
+ raise TestNotApplicable("format does not support shared "
+ "repositories")
+ target = dir.sprout(self.get_url('target/child'), force_new_repo=True)
+ self.assertNotEqual(dir.transport.base, target.transport.base)
+ # we want target to have a branch that is in-place.
+ self.assertEqual(target, target.open_branch().bzrdir)
+ # and we want revision '1' in the new repo
+ self.assertTrue(target.open_repository().has_revision('1'))
+ # but not the shared one
+ self.assertFalse(shared_repo.has_revision('1'))
+
+ def test_sprout_bzrdir_branch_revision(self):
+ # test for revision limiting, [smoke test, not corner case checks].
+ # make a repository with some revisions,
+ # and sprout it with a revision limit.
+ #
+ tree = self.make_branch_and_tree('commit_tree')
+ self.build_tree(['commit_tree/foo'])
+ tree.add('foo')
+ tree.commit('revision 1', rev_id='1')
+ tree.commit('revision 2', rev_id='2', allow_pointless=True)
+ source = self.make_branch('source')
+ tree.branch.repository.copy_content_into(source.repository)
+ tree.bzrdir.open_branch().copy_content_into(source)
+ dir = source.bzrdir
+ target = dir.sprout(self.get_url('target'), revision_id='1')
+ self.assertEqual('1', target.open_branch().last_revision())
+
+ def test_sprout_bzrdir_branch_with_tags(self):
+ # when sprouting a branch all revisions named in the tags are copied
+ # too.
+ builder = self.make_branch_builder('source')
+ source = fixtures.build_branch_with_non_ancestral_rev(builder)
+ try:
+ source.tags.set_tag('tag-a', 'rev-2')
+ except errors.TagsNotSupported:
+ raise TestNotApplicable('Branch format does not support tags.')
+ source.get_config_stack().set('branch.fetch_tags', True)
+ # Now source has a tag not in its ancestry. Sprout its controldir.
+ dir = source.bzrdir
+ target = dir.sprout(self.get_url('target'))
+ # The tag is present, and so is its revision.
+ new_branch = target.open_branch()
+ self.assertEqual('rev-2', new_branch.tags.lookup_tag('tag-a'))
+ new_branch.repository.get_revision('rev-2')
+
+ def test_sprout_bzrdir_branch_with_absent_tag(self):
+ # tags referencing absent revisions are copied (and those absent
+ # revisions do not prevent the sprout.)
+ builder = self.make_branch_builder('source')
+ builder.build_commit(message="Rev 1", rev_id='rev-1')
+ source = builder.get_branch()
+ try:
+ source.tags.set_tag('tag-a', 'missing-rev')
+ except (errors.TagsNotSupported, errors.GhostTagsNotSupported):
+ raise TestNotApplicable('Branch format does not support tags '
+ 'or tags referencing ghost revisions.')
+ # Now source has a tag pointing to an absent revision. Sprout its
+ # controldir.
+ dir = source.bzrdir
+ target = dir.sprout(self.get_url('target'))
+ # The tag is present in the target
+ new_branch = target.open_branch()
+ self.assertEqual('missing-rev', new_branch.tags.lookup_tag('tag-a'))
+
+ def test_sprout_bzrdir_passing_source_branch_with_absent_tag(self):
+ # tags referencing absent revisions are copied (and those absent
+ # revisions do not prevent the sprout.)
+ builder = self.make_branch_builder('source')
+ builder.build_commit(message="Rev 1", rev_id='rev-1')
+ source = builder.get_branch()
+ try:
+ source.tags.set_tag('tag-a', 'missing-rev')
+ except (errors.TagsNotSupported, errors.GhostTagsNotSupported):
+ raise TestNotApplicable('Branch format does not support tags '
+ 'or tags referencing missing revisions.')
+ # Now source has a tag pointing to an absent revision. Sprout its
+ # controldir.
+ dir = source.bzrdir
+ target = dir.sprout(self.get_url('target'), source_branch=source)
+ # The tag is present in the target
+ new_branch = target.open_branch()
+ self.assertEqual('missing-rev', new_branch.tags.lookup_tag('tag-a'))
+
+ def test_sprout_bzrdir_passing_rev_not_source_branch_copies_tags(self):
+ # dir.sprout(..., revision_id='rev1') copies rev1, and all the tags of
+ # the branch at that bzrdir, the ancestry of all of those, but no other
+ # revs (not even the tip of the source branch).
+ builder = self.make_branch_builder('source')
+ builder.build_commit(message="Base", rev_id='base-rev')
+ # Make three parallel lines of ancestry off this base.
+ source = builder.get_branch()
+ builder.build_commit(message="Rev A1", rev_id='rev-a1')
+ builder.build_commit(message="Rev A2", rev_id='rev-a2')
+ builder.build_commit(message="Rev A3", rev_id='rev-a3')
+ source.set_last_revision_info(1, 'base-rev')
+ builder.build_commit(message="Rev B1", rev_id='rev-b1')
+ builder.build_commit(message="Rev B2", rev_id='rev-b2')
+ builder.build_commit(message="Rev B3", rev_id='rev-b3')
+ source.set_last_revision_info(1, 'base-rev')
+ builder.build_commit(message="Rev C1", rev_id='rev-c1')
+ builder.build_commit(message="Rev C2", rev_id='rev-c2')
+ builder.build_commit(message="Rev C3", rev_id='rev-c3')
+ # Set the branch tip to A2
+ source.set_last_revision_info(3, 'rev-a2')
+ try:
+ # Create a tag for B2, and for an absent rev
+ source.tags.set_tag('tag-non-ancestry', 'rev-b2')
+ except errors.TagsNotSupported:
+ raise TestNotApplicable('Branch format does not support tags ')
+ try:
+ source.tags.set_tag('tag-absent', 'absent-rev')
+ except errors.GhostTagsNotSupported:
+ has_ghost_tag = False
+ else:
+ has_ghost_tag = True
+ source.get_config_stack().set('branch.fetch_tags', True)
+ # And ask sprout for C2
+ dir = source.bzrdir
+ target = dir.sprout(self.get_url('target'), revision_id='rev-c2')
+ # The tags are present
+ new_branch = target.open_branch()
+ if has_ghost_tag:
+ self.assertEqual(
+ {'tag-absent': 'absent-rev', 'tag-non-ancestry': 'rev-b2'},
+ new_branch.tags.get_tag_dict())
+ else:
+ self.assertEqual(
+ {'tag-non-ancestry': 'rev-b2'},
+ new_branch.tags.get_tag_dict())
+ # And the revs for A2, B2 and C2's ancestries are present, but no
+ # others.
+ self.assertEqual(
+ ['base-rev', 'rev-b1', 'rev-b2', 'rev-c1', 'rev-c2'],
+ sorted(new_branch.repository.all_revision_ids()))
+
+ def test_sprout_bzrdir_tree_branch_reference(self):
+ # sprouting should create a repository if needed and a sprouted branch.
+ # the tree state should not be copied.
+ referenced_branch = self.make_branch('referencced')
+ dir = self.make_bzrdir('source')
+ try:
+ dir.set_branch_reference(referenced_branch)
+ except errors.IncompatibleFormat:
+ # this is ok too, not all formats have to support references.
+ raise TestNotApplicable("format does not support "
+ "branch references")
+ self.assertRaises(errors.NoRepositoryPresent, dir.open_repository)
+ tree = self.createWorkingTreeOrSkip(dir)
+ self.build_tree(['source/subdir/'])
+ tree.add('subdir')
+ target = dir.sprout(self.get_url('target'))
+ self.assertNotEqual(dir.transport.base, target.transport.base)
+ # we want target to have a branch that is in-place.
+ self.assertEqual(target, target.open_branch().bzrdir)
+ # and as we dont support repositories being detached yet, a repo in
+ # place
+ target.open_repository()
+ result_tree = target.open_workingtree()
+ self.assertFalse(result_tree.has_filename('subdir'))
+
+ def test_sprout_bzrdir_tree_branch_reference_revision(self):
+ # sprouting should create a repository if needed and a sprouted branch.
+ # the tree state should not be copied but the revision changed,
+ # and the likewise the new branch should be truncated too
+ referenced_branch = self.make_branch('referencced')
+ dir = self.make_bzrdir('source')
+ try:
+ dir.set_branch_reference(referenced_branch)
+ except errors.IncompatibleFormat:
+ # this is ok too, not all formats have to support references.
+ raise TestNotApplicable("format does not support "
+ "branch references")
+ self.assertRaises(errors.NoRepositoryPresent, dir.open_repository)
+ tree = self.createWorkingTreeOrSkip(dir)
+ self.build_tree(['source/foo'])
+ tree.add('foo')
+ tree.commit('revision 1', rev_id='1')
+ tree.commit('revision 2', rev_id='2', allow_pointless=True)
+ target = dir.sprout(self.get_url('target'), revision_id='1')
+ self.skipIfNoWorkingTree(target)
+ self.assertNotEqual(dir.transport.base, target.transport.base)
+ # we want target to have a branch that is in-place.
+ self.assertEqual(target, target.open_branch().bzrdir)
+ # and as we dont support repositories being detached yet, a repo in
+ # place
+ target.open_repository()
+ # we trust that the working tree sprouting works via the other tests.
+ self.assertEqual(['1'], target.open_workingtree().get_parent_ids())
+ self.assertEqual('1', target.open_branch().last_revision())
+
+ def test_sprout_bzrdir_tree_revision(self):
+ # test for revision limiting, [smoke test, not corner case checks].
+ # make a tree with a revision with a last-revision
+ # and sprout it with a revision limit.
+ # This smoke test just checks the revision-id is right. Tree specific
+ # tests will check corner cases.
+ tree = self.make_branch_and_tree('source')
+ self.build_tree(['source/foo'])
+ tree.add('foo')
+ tree.commit('revision 1', rev_id='1')
+ tree.commit('revision 2', rev_id='2', allow_pointless=True)
+ dir = tree.bzrdir
+ target = self.sproutOrSkip(dir, self.get_url('target'), revision_id='1')
+ self.assertEqual(['1'], target.open_workingtree().get_parent_ids())
+
+ def test_sprout_takes_accelerator(self):
+ tree = self.make_branch_and_tree('source')
+ self.build_tree(['source/foo'])
+ tree.add('foo')
+ tree.commit('revision 1', rev_id='1')
+ tree.commit('revision 2', rev_id='2', allow_pointless=True)
+ dir = tree.bzrdir
+ target = self.sproutOrSkip(dir, self.get_url('target'),
+ accelerator_tree=tree)
+ self.assertEqual(['2'], target.open_workingtree().get_parent_ids())
+
+ def test_sprout_branch_no_tree(self):
+ tree = self.make_branch_and_tree('source')
+ self.build_tree(['source/foo'])
+ tree.add('foo')
+ tree.commit('revision 1', rev_id='1')
+ tree.commit('revision 2', rev_id='2', allow_pointless=True)
+ dir = tree.bzrdir
+ try:
+ target = dir.sprout(self.get_url('target'),
+ create_tree_if_local=False)
+ except errors.MustHaveWorkingTree:
+ raise TestNotApplicable("control dir format requires working tree")
+ self.assertPathDoesNotExist('target/foo')
+ self.assertEqual(tree.branch.last_revision(),
+ target.open_branch().last_revision())
+
+ def test_sprout_with_revision_id_uses_default_stack_on(self):
+ # Make a branch with three commits to stack on.
+ builder = self.make_branch_builder('stack-on')
+ builder.start_series()
+ builder.build_commit(message='Rev 1.', rev_id='rev-1')
+ builder.build_commit(message='Rev 2.', rev_id='rev-2')
+ builder.build_commit(message='Rev 3.', rev_id='rev-3')
+ builder.finish_series()
+ stack_on = builder.get_branch()
+ # Make a bzrdir with a default stacking policy to stack on that branch.
+ config = self.make_bzrdir('policy-dir').get_config()
+ try:
+ config.set_default_stack_on(self.get_url('stack-on'))
+ except errors.BzrError:
+ raise TestNotApplicable('Only relevant for stackable formats.')
+ # Sprout the stacked-on branch into the bzrdir.
+ sprouted = stack_on.bzrdir.sprout(
+ self.get_url('policy-dir/sprouted'), revision_id='rev-3')
+ # Not all revisions are copied into the sprouted repository.
+ repo = sprouted.open_repository()
+ self.addCleanup(repo.lock_read().unlock)
+ self.assertEqual(None, repo.get_parent_map(['rev-1']).get('rev-1'))
+
+ def test_format_initialize_find_open(self):
+ # loopback test to check the current format initializes to itself.
+ if not self.bzrdir_format.is_initializable():
+ # unsupported formats are not loopback testable
+ # because the default open will not open them and
+ # they may not be initializable.
+ raise TestNotApplicable("format is not initializable")
+ # for remote formats, there must be no prior assumption about the
+ # network name to use - it's possible that this may somehow have got
+ # in through an unisolated test though - see
+ # <https://bugs.launchpad.net/bzr/+bug/504102>
+ self.assertEquals(getattr(self.bzrdir_format,
+ '_network_name', None),
+ None)
+ # supported formats must be able to init and open
+ t = self.get_transport()
+ readonly_t = self.get_readonly_transport()
+ made_control = self.bzrdir_format.initialize(t.base)
+ self.assertIsInstance(made_control, controldir.ControlDir)
+ if isinstance(self.bzrdir_format, RemoteBzrDirFormat):
+ return
+ self.assertEqual(self.bzrdir_format,
+ controldir.ControlDirFormat.find_format(readonly_t))
+ direct_opened_dir = self.bzrdir_format.open(readonly_t)
+ opened_dir = controldir.ControlDir.open(t.base)
+ self.assertEqual(made_control._format,
+ opened_dir._format)
+ self.assertEqual(direct_opened_dir._format,
+ opened_dir._format)
+ self.assertIsInstance(opened_dir, controldir.ControlDir)
+
+ def test_format_initialize_on_transport_ex(self):
+ t = self.get_transport('dir')
+ self.assertInitializeEx(t)
+
+ def test_format_initialize_on_transport_ex_use_existing_dir_True(self):
+ t = self.get_transport('dir')
+ t.ensure_base()
+ self.assertInitializeEx(t, use_existing_dir=True)
+
+ def test_format_initialize_on_transport_ex_use_existing_dir_False(self):
+ if not self.bzrdir_format.is_initializable():
+ raise TestNotApplicable("format is not initializable")
+ t = self.get_transport('dir')
+ t.ensure_base()
+ self.assertRaises(errors.FileExists,
+ self.bzrdir_format.initialize_on_transport_ex, t,
+ use_existing_dir=False)
+
+ def test_format_initialize_on_transport_ex_create_prefix_True(self):
+ t = self.get_transport('missing/dir')
+ self.assertInitializeEx(t, create_prefix=True)
+
+ def test_format_initialize_on_transport_ex_create_prefix_False(self):
+ if not self.bzrdir_format.is_initializable():
+ raise TestNotApplicable("format is not initializable")
+ t = self.get_transport('missing/dir')
+ self.assertRaises(errors.NoSuchFile, self.assertInitializeEx, t,
+ create_prefix=False)
+
+ def test_format_initialize_on_transport_ex_force_new_repo_True(self):
+ t = self.get_transport('repo')
+ repo_fmt = controldir.format_registry.make_bzrdir('1.9')
+ repo_name = repo_fmt.repository_format.network_name()
+ repo = repo_fmt.initialize_on_transport_ex(t,
+ repo_format_name=repo_name, shared_repo=True)[0]
+ made_repo, control = self.assertInitializeEx(t.clone('branch'),
+ force_new_repo=True, repo_format_name=repo_name)
+ self.assertNotEqual(repo.bzrdir.root_transport.base,
+ made_repo.bzrdir.root_transport.base)
+
+ def test_format_initialize_on_transport_ex_force_new_repo_False(self):
+ t = self.get_transport('repo')
+ repo_fmt = controldir.format_registry.make_bzrdir('1.9')
+ repo_name = repo_fmt.repository_format.network_name()
+ repo = repo_fmt.initialize_on_transport_ex(t,
+ repo_format_name=repo_name, shared_repo=True)[0]
+ made_repo, control = self.assertInitializeEx(t.clone('branch'),
+ force_new_repo=False, repo_format_name=repo_name)
+ if not control._format.fixed_components:
+ self.assertEqual(repo.bzrdir.root_transport.base,
+ made_repo.bzrdir.root_transport.base)
+
+ def test_format_initialize_on_transport_ex_repo_fmt_name_None(self):
+ t = self.get_transport('dir')
+ repo, control = self.assertInitializeEx(t)
+ self.assertEqual(None, repo)
+
+ def test_format_initialize_on_transport_ex_repo_fmt_name_followed(self):
+ t = self.get_transport('dir')
+ # 1.6 is likely to never be default
+ fmt = controldir.format_registry.make_bzrdir('1.6')
+ repo_name = fmt.repository_format.network_name()
+ repo, control = self.assertInitializeEx(t, repo_format_name=repo_name)
+ if self.bzrdir_format.fixed_components:
+ # must stay with the all-in-one-format.
+ repo_name = self.bzrdir_format.network_name()
+ self.assertEqual(repo_name, repo._format.network_name())
+
+ def assertInitializeEx(self, t, **kwargs):
+ """Execute initialize_on_transport_ex and check it succeeded correctly.
+
+ This involves checking that the disk objects were created, open with
+ the same format returned, and had the expected disk format.
+
+ :param t: The transport to initialize on.
+ :param **kwargs: Additional arguments to pass to
+ initialize_on_transport_ex.
+ :return: the resulting repo, control dir tuple.
+ """
+ if not self.bzrdir_format.is_initializable():
+ raise TestNotApplicable("control dir format is not "
+ "initializable")
+ repo, control, require_stacking, repo_policy = \
+ self.bzrdir_format.initialize_on_transport_ex(t, **kwargs)
+ if repo is not None:
+ # Repositories are open write-locked
+ self.assertTrue(repo.is_write_locked())
+ self.addCleanup(repo.unlock)
+ self.assertIsInstance(control, controldir.ControlDir)
+ opened = controldir.ControlDir.open(t.base)
+ expected_format = self.bzrdir_format
+ if not isinstance(expected_format, RemoteBzrDirFormat):
+ self.assertEqual(control._format.network_name(),
+ expected_format.network_name())
+ self.assertEqual(control._format.network_name(),
+ opened._format.network_name())
+ self.assertEqual(control.__class__, opened.__class__)
+ return repo, control
+
+ def test_format_network_name(self):
+ # All control formats must have a network name.
+ dir = self.make_bzrdir('.')
+ format = dir._format
+ # We want to test that the network_name matches the actual format on
+ # disk. For local control dirsthat means that using network_name as a
+ # key in the registry gives back the same format. For remote obects
+ # we check that the network_name of the RemoteBzrDirFormat we have
+ # locally matches the actual format present on disk.
+ if isinstance(format, RemoteBzrDirFormat):
+ dir._ensure_real()
+ real_dir = dir._real_bzrdir
+ network_name = format.network_name()
+ self.assertEqual(real_dir._format.network_name(), network_name)
+ else:
+ registry = controldir.network_format_registry
+ network_name = format.network_name()
+ looked_up_format = registry.get(network_name)
+ self.assertTrue(
+ issubclass(format.__class__, looked_up_format.__class__))
+ # The network name must be a byte string.
+ self.assertIsInstance(network_name, str)
+
+ def test_open_not_bzrdir(self):
+ # test the formats specific behaviour for no-content or similar dirs.
+ self.assertRaises(errors.NotBranchError,
+ self.bzrdir_format.open,
+ transport.get_transport_from_url(self.get_readonly_url()))
+
+ def test_create_branch(self):
+ # a bzrdir can construct a branch and repository for itself.
+ if not self.bzrdir_format.is_initializable():
+ # unsupported formats are not loopback testable
+ # because the default open will not open them and
+ # they may not be initializable.
+ raise TestNotApplicable("format is not initializable")
+ t = self.get_transport()
+ made_control = self.bzrdir_format.initialize(t.base)
+ made_repo = made_control.create_repository()
+ made_branch = made_control.create_branch()
+ self.assertIsInstance(made_branch, bzrlib.branch.Branch)
+ self.assertEqual(made_control, made_branch.bzrdir)
+
+ def test_create_branch_append_revisions_only(self):
+ # a bzrdir can construct a branch and repository for itself.
+ if not self.bzrdir_format.is_initializable():
+ # unsupported formats are not loopback testable
+ # because the default open will not open them and
+ # they may not be initializable.
+ raise TestNotApplicable("format is not initializable")
+ t = self.get_transport()
+ made_control = self.bzrdir_format.initialize(t.base)
+ made_repo = made_control.create_repository()
+ try:
+ made_branch = made_control.create_branch(
+ append_revisions_only=True)
+ except errors.UpgradeRequired:
+ raise TestNotApplicable("format does not support "
+ "append_revisions_only setting")
+ self.assertIsInstance(made_branch, bzrlib.branch.Branch)
+ self.assertEquals(True, made_branch.get_append_revisions_only())
+ self.assertEqual(made_control, made_branch.bzrdir)
+
+ def test_open_branch(self):
+ if not self.bzrdir_format.is_initializable():
+ # unsupported formats are not loopback testable
+ # because the default open will not open them and
+ # they may not be initializable.
+ raise TestNotApplicable("format is not initializable")
+ t = self.get_transport()
+ made_control = self.bzrdir_format.initialize(t.base)
+ made_repo = made_control.create_repository()
+ made_branch = made_control.create_branch()
+ opened_branch = made_control.open_branch()
+ self.assertEqual(made_control, opened_branch.bzrdir)
+ self.assertIsInstance(opened_branch, made_branch.__class__)
+ self.assertIsInstance(opened_branch._format, made_branch._format.__class__)
+
+ def test_list_branches(self):
+ if not self.bzrdir_format.is_initializable():
+ raise TestNotApplicable("format is not initializable")
+ t = self.get_transport()
+ made_control = self.bzrdir_format.initialize(t.base)
+ made_repo = made_control.create_repository()
+ made_branch = made_control.create_branch()
+ branches = made_control.list_branches()
+ self.assertEquals(1, len(branches))
+ self.assertEquals(made_branch.base, branches[0].base)
+ try:
+ made_control.destroy_branch()
+ except errors.UnsupportedOperation:
+ pass # Not all bzrdirs support destroying directories
+ else:
+ self.assertEquals([], made_control.list_branches())
+
+ def test_get_branches(self):
+ repo = self.make_repository('branch-1')
+ target_branch = repo.bzrdir.create_branch()
+ self.assertEqual([""], repo.bzrdir.get_branches().keys())
+
+ def test_create_repository(self):
+ # a bzrdir can construct a repository for itself.
+ if not self.bzrdir_format.is_initializable():
+ # unsupported formats are not loopback testable
+ # because the default open will not open them and
+ # they may not be initializable.
+ raise TestNotApplicable("format is not initializable")
+ t = self.get_transport()
+ made_control = self.bzrdir_format.initialize(t.base)
+ made_repo = made_control.create_repository()
+ # Check that we have a repository object.
+ made_repo.has_revision('foo')
+ self.assertEqual(made_control, made_repo.bzrdir)
+
+ def test_create_repository_shared(self):
+ # a bzrdir can create a shared repository or
+ # fail appropriately
+ if not self.bzrdir_format.is_initializable():
+ # unsupported formats are not loopback testable
+ # because the default open will not open them and
+ # they may not be initializable.
+ raise TestNotApplicable("format is not initializable")
+ t = self.get_transport()
+ made_control = self.bzrdir_format.initialize(t.base)
+ try:
+ made_repo = made_control.create_repository(shared=True)
+ except errors.IncompatibleFormat:
+ # Old bzrdir formats don't support shared repositories
+ # and should raise IncompatibleFormat
+ raise TestNotApplicable("format does not support shared "
+ "repositories")
+ self.assertTrue(made_repo.is_shared())
+
+ def test_create_repository_nonshared(self):
+ # a bzrdir can create a non-shared repository
+ if not self.bzrdir_format.is_initializable():
+ # unsupported formats are not loopback testable
+ # because the default open will not open them and
+ # they may not be initializable.
+ raise TestNotApplicable("format is not initializable")
+ t = self.get_transport()
+ made_control = self.bzrdir_format.initialize(t.base)
+ try:
+ made_repo = made_control.create_repository(shared=False)
+ except errors.IncompatibleFormat:
+ # Some control dir formats don't support non-shared repositories
+ # and should raise IncompatibleFormat
+ raise TestNotApplicable("format does not support shared "
+ "repositories")
+ self.assertFalse(made_repo.is_shared())
+
+ def test_open_repository(self):
+ if not self.bzrdir_format.is_initializable():
+ # unsupported formats are not loopback testable
+ # because the default open will not open them and
+ # they may not be initializable.
+ raise TestNotApplicable("format is not initializable")
+ t = self.get_transport()
+ made_control = self.bzrdir_format.initialize(t.base)
+ made_repo = made_control.create_repository()
+ opened_repo = made_control.open_repository()
+ self.assertEqual(made_control, opened_repo.bzrdir)
+ self.assertIsInstance(opened_repo, made_repo.__class__)
+ self.assertIsInstance(opened_repo._format, made_repo._format.__class__)
+
+ def test_create_workingtree(self):
+ # a bzrdir can construct a working tree for itself.
+ if not self.bzrdir_format.is_initializable():
+ # unsupported formats are not loopback testable
+ # because the default open will not open them and
+ # they may not be initializable.
+ raise TestNotApplicable("format is not initializable")
+ t = self.get_transport()
+ made_control = self.bzrdir_format.initialize(t.base)
+ made_repo = made_control.create_repository()
+ made_branch = made_control.create_branch()
+ made_tree = self.createWorkingTreeOrSkip(made_control)
+ self.assertIsInstance(made_tree, workingtree.WorkingTree)
+ self.assertEqual(made_control, made_tree.bzrdir)
+
+ def test_create_workingtree_revision(self):
+ # a bzrdir can construct a working tree for itself @ a specific revision.
+ if not self.bzrdir_format.is_initializable():
+ raise TestNotApplicable("format is not initializable")
+ t = self.get_transport()
+ source = self.make_branch_and_tree('source')
+ source.commit('a', rev_id='a', allow_pointless=True)
+ source.commit('b', rev_id='b', allow_pointless=True)
+ t.mkdir('new')
+ t_new = t.clone('new')
+ made_control = self.bzrdir_format.initialize_on_transport(t_new)
+ source.branch.repository.clone(made_control)
+ source.branch.clone(made_control)
+ try:
+ made_tree = made_control.create_workingtree(revision_id='a')
+ except (errors.NotLocalUrl, errors.UnsupportedOperation):
+ raise TestSkipped("Can't make working tree on transport %r" % t)
+ self.assertEqual(['a'], made_tree.get_parent_ids())
+
+ def test_open_workingtree(self):
+ if not self.bzrdir_format.is_initializable():
+ raise TestNotApplicable("format is not initializable")
+ # this has to be tested with local access as we still support creating
+ # format 6 bzrdirs
+ t = self.get_transport()
+ try:
+ made_control = self.bzrdir_format.initialize(t.base)
+ made_repo = made_control.create_repository()
+ made_branch = made_control.create_branch()
+ made_tree = made_control.create_workingtree()
+ except (errors.NotLocalUrl, errors.UnsupportedOperation):
+ raise TestSkipped("Can't initialize %r on transport %r"
+ % (self.bzrdir_format, t))
+ opened_tree = made_control.open_workingtree()
+ self.assertEqual(made_control, opened_tree.bzrdir)
+ self.assertIsInstance(opened_tree, made_tree.__class__)
+ self.assertIsInstance(opened_tree._format, made_tree._format.__class__)
+
+ def test_get_selected_branch(self):
+ # The segment parameters are accessible from the root transport
+ # if a URL with segment parameters is opened.
+ if not self.bzrdir_format.is_initializable():
+ raise TestNotApplicable("format is not initializable")
+ t = self.get_transport()
+ try:
+ made_control = self.bzrdir_format.initialize(t.base)
+ except (errors.NotLocalUrl, errors.UnsupportedOperation):
+ raise TestSkipped("Can't initialize %r on transport %r"
+ % (self.bzrdir_format, t))
+ dir = controldir.ControlDir.open(t.base+",branch=foo")
+ self.assertEquals({"branch": "foo"},
+ dir.user_transport.get_segment_parameters())
+ self.assertEquals("foo", dir._get_selected_branch())
+
+ def test_get_selected_branch_none_selected(self):
+ # _get_selected_branch defaults to None
+ if not self.bzrdir_format.is_initializable():
+ raise TestNotApplicable("format is not initializable")
+ t = self.get_transport()
+ try:
+ made_control = self.bzrdir_format.initialize(t.base)
+ except (errors.NotLocalUrl, errors.UnsupportedOperation):
+ raise TestSkipped("Can't initialize %r on transport %r"
+ % (self.bzrdir_format, t))
+ dir = controldir.ControlDir.open(t.base)
+ self.assertEqual(u"", dir._get_selected_branch())
+
+ def test_root_transport(self):
+ dir = self.make_bzrdir('.')
+ self.assertEqual(dir.root_transport.base,
+ self.get_transport().base)
+
+ def test_find_repository_no_repo_under_standalone_branch(self):
+ # finding a repo stops at standalone branches even if there is a
+ # higher repository available.
+ try:
+ repo = self.make_repository('.', shared=True)
+ except errors.IncompatibleFormat:
+ # need a shared repository to test this.
+ raise TestNotApplicable("requires shared repository support")
+ if not repo._format.supports_nesting_repositories:
+ raise TestNotApplicable("requires nesting repositories")
+ url = self.get_url('intermediate')
+ t = self.get_transport()
+ t.mkdir('intermediate')
+ t.mkdir('intermediate/child')
+ made_control = self.bzrdir_format.initialize(url)
+ made_control.create_repository()
+ innermost_control = self.bzrdir_format.initialize(
+ self.get_url('intermediate/child'))
+ try:
+ child_repo = innermost_control.open_repository()
+ # if there is a repository, then the format cannot ever hit this
+ # code path.
+ return
+ except errors.NoRepositoryPresent:
+ pass
+ self.assertRaises(errors.NoRepositoryPresent,
+ innermost_control.find_repository)
+
+ def test_find_repository_containing_shared_repository(self):
+ # find repo inside a shared repo with an empty control dir
+ # returns the shared repo.
+ try:
+ repo = self.make_repository('.', shared=True)
+ except errors.IncompatibleFormat:
+ # need a shared repository to test this.
+ raise TestNotApplicable("requires format with shared repository "
+ "support")
+ if not repo._format.supports_nesting_repositories:
+ raise TestNotApplicable("requires support for nesting "
+ "repositories")
+ url = self.get_url('childbzrdir')
+ self.get_transport().mkdir('childbzrdir')
+ made_control = self.bzrdir_format.initialize(url)
+ try:
+ child_repo = made_control.open_repository()
+ # if there is a repository, then the format cannot ever hit this
+ # code path.
+ return
+ except errors.NoRepositoryPresent:
+ pass
+ found_repo = made_control.find_repository()
+ self.assertEqual(repo.bzrdir.root_transport.base,
+ found_repo.bzrdir.root_transport.base)
+
+ def test_find_repository_standalone_with_containing_shared_repository(self):
+ # find repo inside a standalone repo inside a shared repo finds the
+ # standalone repo
+ try:
+ containing_repo = self.make_repository('.', shared=True)
+ except errors.IncompatibleFormat:
+ # need a shared repository to test this.
+ raise TestNotApplicable("requires support for shared "
+ "repositories")
+ if not containing_repo._format.supports_nesting_repositories:
+ raise TestNotApplicable("format does not support "
+ "nesting repositories")
+ child_repo = self.make_repository('childrepo')
+ opened_control = controldir.ControlDir.open(self.get_url('childrepo'))
+ found_repo = opened_control.find_repository()
+ self.assertEqual(child_repo.bzrdir.root_transport.base,
+ found_repo.bzrdir.root_transport.base)
+
+ def test_find_repository_shared_within_shared_repository(self):
+ # find repo at a shared repo inside a shared repo finds the inner repo
+ try:
+ containing_repo = self.make_repository('.', shared=True)
+ except errors.IncompatibleFormat:
+ # need a shared repository to test this.
+ raise TestNotApplicable("requires support for shared "
+ "repositories")
+ if not containing_repo._format.supports_nesting_repositories:
+ raise TestNotApplicable("requires support for nesting "
+ "repositories")
+ url = self.get_url('childrepo')
+ self.get_transport().mkdir('childrepo')
+ child_control = self.bzrdir_format.initialize(url)
+ child_repo = child_control.create_repository(shared=True)
+ opened_control = controldir.ControlDir.open(self.get_url('childrepo'))
+ found_repo = opened_control.find_repository()
+ self.assertEqual(child_repo.bzrdir.root_transport.base,
+ found_repo.bzrdir.root_transport.base)
+ self.assertNotEqual(child_repo.bzrdir.root_transport.base,
+ containing_repo.bzrdir.root_transport.base)
+
+ def test_find_repository_with_nested_dirs_works(self):
+ # find repo inside a bzrdir inside a bzrdir inside a shared repo
+ # finds the outer shared repo.
+ try:
+ repo = self.make_repository('.', shared=True)
+ except errors.IncompatibleFormat:
+ # need a shared repository to test this.
+ raise TestNotApplicable("requires support for shared "
+ "repositories")
+ if not repo._format.supports_nesting_repositories:
+ raise TestNotApplicable("requires support for nesting "
+ "repositories")
+ url = self.get_url('intermediate')
+ t = self.get_transport()
+ t.mkdir('intermediate')
+ t.mkdir('intermediate/child')
+ made_control = self.bzrdir_format.initialize(url)
+ try:
+ child_repo = made_control.open_repository()
+ # if there is a repository, then the format cannot ever hit this
+ # code path.
+ return
+ except errors.NoRepositoryPresent:
+ pass
+ innermost_control = self.bzrdir_format.initialize(
+ self.get_url('intermediate/child'))
+ try:
+ child_repo = innermost_control.open_repository()
+ # if there is a repository, then the format cannot ever hit this
+ # code path.
+ return
+ except errors.NoRepositoryPresent:
+ pass
+ found_repo = innermost_control.find_repository()
+ self.assertEqual(repo.bzrdir.root_transport.base,
+ found_repo.bzrdir.root_transport.base)
+
+ def test_can_and_needs_format_conversion(self):
+ # check that we can ask an instance if its upgradable
+ dir = self.make_bzrdir('.')
+ if dir.can_convert_format():
+ # if its default updatable there must be an updater
+ # (we force the latest known format as downgrades may not be
+ # available
+ self.assertTrue(isinstance(dir._format.get_converter(
+ format=dir._format), controldir.Converter))
+ dir.needs_format_conversion(
+ controldir.ControlDirFormat.get_default_format())
+
+ def test_backup_copies_existing(self):
+ tree = self.make_branch_and_tree('test')
+ self.build_tree(['test/a'])
+ tree.add(['a'], ['a-id'])
+ tree.commit('some data to be copied.')
+ old_url, new_url = tree.bzrdir.backup_bzrdir()
+ old_path = urlutils.local_path_from_url(old_url)
+ new_path = urlutils.local_path_from_url(new_url)
+ self.assertPathExists(old_path)
+ self.assertPathExists(new_path)
+ for (((dir_relpath1, _), entries1),
+ ((dir_relpath2, _), entries2)) in izip(
+ osutils.walkdirs(old_path),
+ osutils.walkdirs(new_path)):
+ self.assertEquals(dir_relpath1, dir_relpath2)
+ for f1, f2 in zip(entries1, entries2):
+ self.assertEquals(f1[0], f2[0])
+ self.assertEquals(f1[2], f2[2])
+ if f1[2] == "file":
+ osutils.compare_files(open(f1[4]), open(f2[4]))
+
+ def test_upgrade_new_instance(self):
+ """Does an available updater work?"""
+ dir = self.make_bzrdir('.')
+ # for now, upgrade is not ready for partial bzrdirs.
+ dir.create_repository()
+ dir.create_branch()
+ self.createWorkingTreeOrSkip(dir)
+ if dir.can_convert_format():
+ # if its default updatable there must be an updater
+ # (we force the latest known format as downgrades may not be
+ # available
+ pb = ui.ui_factory.nested_progress_bar()
+ try:
+ dir._format.get_converter(format=dir._format).convert(dir, pb)
+ finally:
+ pb.finished()
+ # and it should pass 'check' now.
+ check.check_dwim(self.get_url('.'), False, True, True)
+
+ def test_format_description(self):
+ dir = self.make_bzrdir('.')
+ text = dir._format.get_format_description()
+ self.assertTrue(len(text))
+
+
+class TestBreakLock(TestCaseWithControlDir):
+
+ def test_break_lock_empty(self):
+ # break lock on an empty bzrdir should work silently.
+ dir = self.make_bzrdir('.')
+ try:
+ dir.break_lock()
+ except NotImplementedError:
+ pass
+
+ def test_break_lock_repository(self):
+ # break lock with just a repo should unlock the repo.
+ repo = self.make_repository('.')
+ repo.lock_write()
+ lock_repo = repo.bzrdir.open_repository()
+ if not lock_repo.get_physical_lock_status():
+ # This bzrdir's default repository does not physically lock things
+ # and thus this interaction cannot be tested at the interface
+ # level.
+ repo.unlock()
+ raise TestNotApplicable("format does not physically lock")
+ # only one yes needed here: it should only be unlocking
+ # the repo
+ bzrlib.ui.ui_factory = CannedInputUIFactory([True])
+ try:
+ repo.bzrdir.break_lock()
+ except NotImplementedError:
+ # this bzrdir does not implement break_lock - so we cant test it.
+ repo.unlock()
+ raise TestNotApplicable("format does not support breaking locks")
+ lock_repo.lock_write()
+ lock_repo.unlock()
+ self.assertRaises(errors.LockBroken, repo.unlock)
+
+ def test_break_lock_branch(self):
+ # break lock with just a repo should unlock the branch.
+ # and not directly try the repository.
+ # we test this by making a branch reference to a branch
+ # and repository in another bzrdir
+ # for pre-metadir formats this will fail, thats ok.
+ master = self.make_branch('branch')
+ thisdir = self.make_bzrdir('this')
+ try:
+ thisdir.set_branch_reference(master)
+ except errors.IncompatibleFormat:
+ raise TestNotApplicable("format does not support "
+ "branch references")
+ unused_repo = thisdir.create_repository()
+ master.lock_write()
+ unused_repo.lock_write()
+ try:
+ # two yes's : branch and repository. If the repo in this
+ # dir is inappropriately accessed, 3 will be needed, and
+ # we'll see that because the stream will be fully consumed
+ bzrlib.ui.ui_factory = CannedInputUIFactory([True, True, True])
+ # determine if the repository will have been locked;
+ this_repo_locked = \
+ thisdir.open_repository().get_physical_lock_status()
+ master.bzrdir.break_lock()
+ if this_repo_locked:
+ # only two ys should have been read
+ self.assertEqual([True],
+ bzrlib.ui.ui_factory.responses)
+ else:
+ # only one y should have been read
+ self.assertEqual([True, True],
+ bzrlib.ui.ui_factory.responses)
+ # we should be able to lock a newly opened branch now
+ branch = master.bzrdir.open_branch()
+ branch.lock_write()
+ branch.unlock()
+ if this_repo_locked:
+ # we should not be able to lock the repository in thisdir as
+ # its still held by the explicit lock we took, and the break
+ # lock should not have touched it.
+ repo = thisdir.open_repository()
+ self.assertRaises(errors.LockContention, repo.lock_write)
+ finally:
+ unused_repo.unlock()
+ self.assertRaises(errors.LockBroken, master.unlock)
+
+ def test_break_lock_tree(self):
+ # break lock with a tree should unlock the tree but not try the
+ # branch explicitly. However this is very hard to test for as we
+ # dont have a tree reference class, nor is one needed;
+ # the worst case if this code unlocks twice is an extra question
+ # being asked.
+ tree = self.make_branch_and_tree('.')
+ tree.lock_write()
+ # three yes's : tree, branch and repository.
+ bzrlib.ui.ui_factory = CannedInputUIFactory([True, True, True])
+ try:
+ tree.bzrdir.break_lock()
+ except (NotImplementedError, errors.LockActive):
+ # bzrdir does not support break_lock
+ # or one of the locked objects (currently only tree does this)
+ # raised a LockActive because we do still have a live locked
+ # object.
+ tree.unlock()
+ raise TestNotApplicable("format does not support breaking locks")
+ self.assertEqual([True],
+ bzrlib.ui.ui_factory.responses)
+ lock_tree = tree.bzrdir.open_workingtree()
+ lock_tree.lock_write()
+ lock_tree.unlock()
+ self.assertRaises(errors.LockBroken, tree.unlock)
+
+
+class TestTransportConfig(TestCaseWithControlDir):
+
+ def test_get_config(self):
+ my_dir = self.make_bzrdir('.')
+ config = my_dir.get_config()
+ try:
+ config.set_default_stack_on('http://example.com')
+ except errors.BzrError, e:
+ if 'Cannot set config' in str(e):
+ self.assertFalse(
+ isinstance(my_dir, (_mod_bzrdir.BzrDirMeta1, RemoteBzrDir)),
+ "%r should support configs" % my_dir)
+ raise TestNotApplicable(
+ 'This BzrDir format does not support configs.')
+ else:
+ raise
+ self.assertEqual('http://example.com', config.get_default_stack_on())
+ my_dir2 = controldir.ControlDir.open(self.get_url('.'))
+ config2 = my_dir2.get_config()
+ self.assertEqual('http://example.com', config2.get_default_stack_on())
+
+
+class ChrootedControlDirTests(ChrootedTestCase):
+
+ def test_find_repository_no_repository(self):
+ # loopback test to check the current format fails to find a
+ # share repository correctly.
+ if not self.bzrdir_format.is_initializable():
+ # unsupported formats are not loopback testable
+ # because the default open will not open them and
+ # they may not be initializable.
+ raise TestNotApplicable("format is not initializable")
+ # supported formats must be able to init and open
+ # - do the vfs initialisation over the basic vfs transport
+ # XXX: TODO this should become a 'bzrdirlocation' api call.
+ url = self.get_vfs_only_url('subdir')
+ transport.get_transport_from_url(self.get_vfs_only_url()).mkdir('subdir')
+ made_control = self.bzrdir_format.initialize(self.get_url('subdir'))
+ try:
+ repo = made_control.open_repository()
+ # if there is a repository, then the format cannot ever hit this
+ # code path.
+ return
+ except errors.NoRepositoryPresent:
+ pass
+ made_control = controldir.ControlDir.open(self.get_readonly_url('subdir'))
+ self.assertRaises(errors.NoRepositoryPresent,
+ made_control.find_repository)
+
+
+class TestControlDirControlComponent(TestCaseWithControlDir):
+ """ControlDir implementations adequately implement ControlComponent."""
+
+ def test_urls(self):
+ bd = self.make_bzrdir('bd')
+ self.assertIsInstance(bd.user_url, str)
+ self.assertEqual(bd.user_url, bd.user_transport.base)
+ # for all current bzrdir implementations the user dir must be
+ # above the control dir but we might need to relax that?
+ self.assertEqual(bd.control_url.find(bd.user_url), 0)
+ self.assertEqual(bd.control_url, bd.control_transport.base)
diff --git a/bzrlib/tests/per_controldir/test_format.py b/bzrlib/tests/per_controldir/test_format.py
new file mode 100644
index 0000000..4536190
--- /dev/null
+++ b/bzrlib/tests/per_controldir/test_format.py
@@ -0,0 +1,48 @@
+# Copyright (C) 2011 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""Tests for control directory formats."""
+
+from bzrlib import (
+ errors,
+ )
+
+from bzrlib.tests.per_controldir import TestCaseWithControlDir
+
+
+class TestControlDir(TestCaseWithControlDir):
+
+ def test_get_format_description(self):
+ self.assertIsInstance(self.bzrdir_format.get_format_description(),
+ str)
+
+ def test_is_supported(self):
+ self.assertIsInstance(self.bzrdir_format.is_supported(), bool)
+
+ def test_upgrade_recommended(self):
+ self.assertIsInstance(self.bzrdir_format.upgrade_recommended, bool)
+
+ def test_supports_transport(self):
+ self.assertIsInstance(
+ self.bzrdir_format.supports_transport(self.get_transport()), bool)
+
+ def test_check_support_status(self):
+ if not self.bzrdir_format.is_supported():
+ self.assertRaises(errors.UnsupportedFormatError,
+ self.bzrdir_format.check_support_status, False)
+ else:
+ self.bzrdir_format.check_support_status(True)
+ self.bzrdir_format.check_support_status(False)
diff --git a/bzrlib/tests/per_controldir/test_push.py b/bzrlib/tests/per_controldir/test_push.py
new file mode 100644
index 0000000..f63f87d
--- /dev/null
+++ b/bzrlib/tests/per_controldir/test_push.py
@@ -0,0 +1,62 @@
+# Copyright (C) 2009, 2010 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""Tests for bzrdir implementations - push."""
+
+from bzrlib.tests.per_controldir import (
+ TestCaseWithControlDir,
+ )
+
+
+class TestPush(TestCaseWithControlDir):
+
+ def create_simple_tree(self):
+ tree = self.make_branch_and_tree('tree')
+ self.build_tree(['tree/a'])
+ tree.add(['a'], ['a-id'])
+ tree.commit('one', rev_id='r1')
+ return tree
+
+ def test_push_new_branch(self):
+ tree = self.create_simple_tree()
+ dir = self.make_repository('dir').bzrdir
+ result = dir.push_branch(tree.branch)
+ self.assertEquals(tree.branch, result.source_branch)
+ self.assertEquals(dir.open_branch().base, result.target_branch.base)
+ self.assertEquals(dir.open_branch().base,
+ tree.branch.get_push_location())
+
+ def test_push_new_empty(self):
+ tree = self.make_branch_and_tree('tree')
+ dir = self.make_repository('dir').bzrdir
+ result = dir.push_branch(tree.branch)
+ self.assertEquals(tree.branch.base, result.source_branch.base)
+ self.assertEquals(dir.open_branch().base,
+ result.target_branch.base)
+
+ def test_push_incremental(self):
+ tree = self.create_simple_tree()
+ dir = self.make_repository('dir').bzrdir
+ dir.push_branch(tree.branch)
+ self.build_tree(['tree/b'])
+ tree.add(['b'])
+ tree.commit('two', rev_id='r2')
+ result = dir.push_branch(tree.branch)
+ self.assertEquals(tree.last_revision(),
+ result.branch_push_result.new_revid)
+ self.assertEquals(2, result.branch_push_result.new_revno)
+ self.assertEquals(tree.branch.base, result.source_branch.base)
+ self.assertEquals(dir.open_branch().base, result.target_branch.base)
diff --git a/bzrlib/tests/per_controldir_colo/__init__.py b/bzrlib/tests/per_controldir_colo/__init__.py
new file mode 100644
index 0000000..8797886
--- /dev/null
+++ b/bzrlib/tests/per_controldir_colo/__init__.py
@@ -0,0 +1,78 @@
+# Copyright (C) 2010 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+
+"""BzrDir implementation tests for colocated branch support.
+
+These tests check the conformance of the colocated branches support.
+All bzrdir formats are tested - those that do not suppport colocated branches
+have the test_unsupported tests run; the others have the test_supported tests
+run.
+"""
+
+from bzrlib.controldir import ControlDirFormat
+from bzrlib.tests import (
+ default_transport,
+ multiply_tests,
+ test_server,
+ )
+from bzrlib.tests.per_controldir import (
+ TestCaseWithControlDir,
+ make_scenarios,
+ )
+from bzrlib.transport import memory
+
+
+def load_tests(standard_tests, module, loader):
+ colo_supported_formats = []
+ colo_unsupported_formats = []
+ # This will always add scenarios using the smart server.
+ from bzrlib.remote import RemoteBzrDirFormat
+ for format in ControlDirFormat.known_formats():
+ if isinstance(format, RemoteBzrDirFormat):
+ continue
+ if format.colocated_branches:
+ colo_supported_formats.append(format)
+ else:
+ colo_unsupported_formats.append(format)
+ supported_scenarios = make_scenarios(default_transport, None, None,
+ colo_supported_formats)
+ unsupported_scenarios = make_scenarios(default_transport, None, None,
+ colo_unsupported_formats)
+ # test the remote server behaviour when backed with a MemoryTransport
+ # Once for the current version
+ unsupported_scenarios.extend(make_scenarios(
+ memory.MemoryServer,
+ test_server.SmartTCPServer_for_testing,
+ test_server.ReadonlySmartTCPServer_for_testing,
+ [(RemoteBzrDirFormat())],
+ name_suffix='-default'))
+ # And once with < 1.6 - the 'v2' protocol.
+ unsupported_scenarios.extend(make_scenarios(
+ memory.MemoryServer,
+ test_server.SmartTCPServer_for_testing_v2_only,
+ test_server.ReadonlySmartTCPServer_for_testing_v2_only,
+ [(RemoteBzrDirFormat())],
+ name_suffix='-v2'))
+
+ result = loader.suiteClass()
+ supported_tests = loader.loadTestsFromModuleNames([
+ 'bzrlib.tests.per_controldir_colo.test_supported'])
+ unsupported_tests = loader.loadTestsFromModuleNames([
+ 'bzrlib.tests.per_controldir_colo.test_unsupported'])
+ multiply_tests(supported_tests, supported_scenarios, result)
+ multiply_tests(unsupported_tests, unsupported_scenarios, result)
+ return result
diff --git a/bzrlib/tests/per_controldir_colo/test_supported.py b/bzrlib/tests/per_controldir_colo/test_supported.py
new file mode 100644
index 0000000..c6a9cc7
--- /dev/null
+++ b/bzrlib/tests/per_controldir_colo/test_supported.py
@@ -0,0 +1,158 @@
+# Copyright (C) 2010 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""Tests for bzr directories that support colocated branches."""
+
+from bzrlib.branch import Branch
+from bzrlib import (
+ errors,
+ tests,
+ urlutils,
+ )
+from bzrlib.tests import (
+ per_controldir,
+ )
+from bzrlib.tests.features import (
+ UnicodeFilenameFeature,
+ )
+
+
+class TestColocatedBranchSupport(per_controldir.TestCaseWithControlDir):
+
+ def test_destroy_colocated_branch(self):
+ branch = self.make_branch('branch')
+ bzrdir = branch.bzrdir
+ colo_branch = bzrdir.create_branch('colo')
+ try:
+ bzrdir.destroy_branch("colo")
+ except (errors.UnsupportedOperation, errors.TransportNotPossible):
+ raise tests.TestNotApplicable('Format does not support destroying branch')
+ self.assertRaises(errors.NotBranchError, bzrdir.open_branch,
+ "colo")
+
+ def test_create_colo_branch(self):
+ # a bzrdir can construct a branch and repository for itself.
+ if not self.bzrdir_format.is_supported():
+ # unsupported formats are not loopback testable
+ # because the default open will not open them and
+ # they may not be initializable.
+ raise tests.TestNotApplicable('Control dir format not supported')
+ t = self.get_transport()
+ try:
+ made_control = self.bzrdir_format.initialize(t.base)
+ except errors.UninitializableFormat:
+ raise tests.TestNotApplicable(
+ 'Control dir does not support creating new branches.')
+ made_control.create_repository()
+ made_branch = made_control.create_branch("colo")
+ self.assertIsInstance(made_branch, Branch)
+ self.assertEquals("colo", made_branch.name)
+ self.assertEqual(made_control, made_branch.bzrdir)
+
+ def test_open_by_url(self):
+ # a bzrdir can construct a branch and repository for itself.
+ if not self.bzrdir_format.is_supported():
+ # unsupported formats are not loopback testable
+ # because the default open will not open them and
+ # they may not be initializable.
+ raise tests.TestNotApplicable('Control dir format not supported')
+ t = self.get_transport()
+ try:
+ made_control = self.bzrdir_format.initialize(t.base)
+ except errors.UninitializableFormat:
+ raise tests.TestNotApplicable(
+ 'Control dir does not support creating new branches.')
+ made_control.create_repository()
+ made_branch = made_control.create_branch(name="colo")
+ other_branch = made_control.create_branch(name="othercolo")
+ self.assertIsInstance(made_branch, Branch)
+ self.assertEqual(made_control, made_branch.bzrdir)
+ self.assertNotEqual(made_branch.user_url, other_branch.user_url)
+ self.assertNotEqual(made_branch.control_url, other_branch.control_url)
+ re_made_branch = Branch.open(made_branch.user_url)
+ self.assertEquals(re_made_branch.name, "colo")
+ self.assertEqual(made_branch.control_url, re_made_branch.control_url)
+ self.assertEqual(made_branch.user_url, re_made_branch.user_url)
+
+ def test_sprout_into_colocated(self):
+ # a bzrdir can construct a branch and repository for itself.
+ if not self.bzrdir_format.is_supported():
+ # unsupported formats are not loopback testable
+ # because the default open will not open them and
+ # they may not be initializable.
+ raise tests.TestNotApplicable('Control dir format not supported')
+ from_tree = self.make_branch_and_tree('from')
+ revid = from_tree.commit("rev1")
+ try:
+ other_branch = self.make_branch("to")
+ except errors.UninitializableFormat:
+ raise tests.TestNotApplicable(
+ 'Control dir does not support creating new branches.')
+ to_dir = from_tree.bzrdir.sprout(
+ urlutils.join_segment_parameters(
+ other_branch.bzrdir.user_url, {"branch": "target"}))
+ to_branch = to_dir.open_branch(name="target")
+ self.assertEquals(revid, to_branch.last_revision())
+
+ def test_unicode(self):
+ self.requireFeature(UnicodeFilenameFeature)
+ if not self.bzrdir_format.is_supported():
+ # unsupported formats are not loopback testable
+ # because the default open will not open them and
+ # they may not be initializable.
+ raise tests.TestNotApplicable('Control dir format not supported')
+ t = self.get_transport()
+ try:
+ made_control = self.bzrdir_format.initialize(t.base)
+ except errors.UninitializableFormat:
+ raise tests.TestNotApplicable(
+ 'Control dir does not support creating new branches.')
+ made_control.create_repository()
+ made_branch = made_control.create_branch(name=u"col\xe9")
+ self.assertTrue(
+ u"col\xe9" in [b.name for b in made_control.list_branches()])
+ made_branch = Branch.open(made_branch.user_url)
+ self.assertEquals(u"col\xe9", made_branch.name)
+ made_control.destroy_branch(u"col\xe9")
+
+ def test_get_branches(self):
+ repo = self.make_repository('branch-1')
+ target_branch = repo.bzrdir.create_branch(name='foo')
+ self.assertEqual(['foo'], repo.bzrdir.get_branches().keys())
+ self.assertEqual(target_branch.base,
+ repo.bzrdir.get_branches()['foo'].base)
+
+ def test_branch_name_with_slash(self):
+ repo = self.make_repository('branch-1')
+ try:
+ target_branch = repo.bzrdir.create_branch(name='foo/bar')
+ except errors.InvalidBranchName:
+ raise tests.TestNotApplicable(
+ "format does not support branches with / in their name")
+ self.assertEqual(['foo/bar'], repo.bzrdir.get_branches().keys())
+ self.assertEqual(
+ target_branch.base, repo.bzrdir.open_branch(name='foo/bar').base)
+
+ def test_branch_reference(self):
+ referenced = self.make_branch('referenced')
+ repo = self.make_repository('repo')
+ try:
+ repo.bzrdir.set_branch_reference(referenced, name='foo')
+ except errors.IncompatibleFormat:
+ raise tests.TestNotApplicable(
+ 'Control dir does not support creating branch references.')
+ self.assertEquals(referenced.base,
+ repo.bzrdir.get_branch_reference('foo'))
diff --git a/bzrlib/tests/per_controldir_colo/test_unsupported.py b/bzrlib/tests/per_controldir_colo/test_unsupported.py
new file mode 100644
index 0000000..490f685
--- /dev/null
+++ b/bzrlib/tests/per_controldir_colo/test_unsupported.py
@@ -0,0 +1,83 @@
+# Copyright (C) 2010 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""Tests for bazaar control directories that do not support colocated branches.
+
+Colocated branch support is optional, and when it is not supported the methods
+and attributes colocated branch support added should fail in known ways.
+"""
+
+from bzrlib import (
+ errors,
+ tests,
+ )
+from bzrlib.tests import (
+ per_controldir,
+ )
+
+
+class TestNoColocatedSupport(per_controldir.TestCaseWithControlDir):
+
+ def make_bzrdir_with_repo(self):
+ # a bzrdir can construct a branch and repository for itself.
+ if not self.bzrdir_format.is_supported():
+ # unsupported formats are not loopback testable
+ # because the default open will not open them and
+ # they may not be initializable.
+ raise tests.TestNotApplicable('Control dir format not supported')
+ t = self.get_transport()
+ try:
+ made_control = self.make_bzrdir('.', format=self.bzrdir_format)
+ except errors.UninitializableFormat:
+ raise tests.TestNotApplicable('Control dir format not initializable')
+ self.assertEquals(made_control._format, self.bzrdir_format)
+ made_repo = made_control.create_repository()
+ return made_control
+
+ def test_destroy_colocated_branch(self):
+ branch = self.make_branch('branch')
+ # Colocated branches should not be supported *or*
+ # destroy_branch should not be supported at all
+ self.assertRaises(
+ (errors.NoColocatedBranchSupport, errors.UnsupportedOperation),
+ branch.bzrdir.destroy_branch, 'colo')
+
+ def test_create_colo_branch(self):
+ made_control = self.make_bzrdir_with_repo()
+ self.assertRaises(errors.NoColocatedBranchSupport,
+ made_control.create_branch, "colo")
+
+ def test_open_branch(self):
+ made_control = self.make_bzrdir_with_repo()
+ self.assertRaises(errors.NoColocatedBranchSupport,
+ made_control.open_branch, name="colo")
+
+ def test_get_branch_reference(self):
+ made_control = self.make_bzrdir_with_repo()
+ self.assertRaises(errors.NoColocatedBranchSupport,
+ made_control.get_branch_reference, "colo")
+
+ def test_set_branch_reference(self):
+ referenced = self.make_branch('referenced')
+ made_control = self.make_bzrdir_with_repo()
+ self.assertRaises(errors.NoColocatedBranchSupport,
+ made_control.set_branch_reference, referenced, name="colo")
+
+ def test_get_branches(self):
+ made_control = self.make_bzrdir_with_repo()
+ made_control.create_branch()
+ self.assertEqual(made_control.get_branches().keys(),
+ [""])
diff --git a/bzrlib/tests/per_foreign_vcs/__init__.py b/bzrlib/tests/per_foreign_vcs/__init__.py
new file mode 100644
index 0000000..78b5d55
--- /dev/null
+++ b/bzrlib/tests/per_foreign_vcs/__init__.py
@@ -0,0 +1,50 @@
+# Copyright (C) 2009, 2010 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+
+"""Tests specific to foreign branch implementations.
+
+"""
+
+from bzrlib import (
+ foreign,
+ tests,
+ )
+
+
+def vcs_scenarios():
+ scenarios = []
+ for name, vcs in foreign.foreign_vcs_registry.iteritems():
+ scenarios.append((vcs.__class__.__name__, {
+ "branch_factory": vcs.branch_format.get_foreign_tests_branch_factory(),
+ "repository_factory": vcs.repository_format.get_foreign_tests_repository_factory(),
+ "branch_format": vcs.branch_format,
+ "repository_format": vcs.repository_format,
+ }))
+ return scenarios
+
+
+def load_tests(standard_tests, module, loader):
+ result = loader.suiteClass()
+ per_vcs_mod_names = [
+ 'branch',
+ 'repository',
+ ]
+ sub_tests = loader.loadTestsFromModuleNames(
+ ['bzrlib.tests.per_foreign_vcs.test_' + name
+ for name in per_vcs_mod_names])
+ tests.multiply_tests(sub_tests, vcs_scenarios(), result)
+ return result
diff --git a/bzrlib/tests/per_foreign_vcs/test_branch.py b/bzrlib/tests/per_foreign_vcs/test_branch.py
new file mode 100644
index 0000000..68bdaa5
--- /dev/null
+++ b/bzrlib/tests/per_foreign_vcs/test_branch.py
@@ -0,0 +1,145 @@
+# Copyright (C) 2009 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+
+"""Tests specific to Branch implementations that use foreign VCS'es."""
+
+
+from bzrlib.errors import (
+ IncompatibleFormat,
+ UnstackableBranchFormat,
+ )
+from bzrlib.revision import (
+ NULL_REVISION,
+ )
+from bzrlib.tests import (
+ TestCaseWithTransport,
+ )
+
+
+class ForeignBranchFactory(object):
+ """Factory of branches for ForeignBranchTests."""
+
+ def make_empty_branch(self, transport):
+ """Create an empty branch with no commits in it."""
+ raise NotImplementedError(self.make_empty_branch)
+
+ def make_branch(self, transport):
+ """Create *some* branch, may be empty or not."""
+ return self.make_empty_branch(transport)
+
+
+class ForeignBranchTests(TestCaseWithTransport):
+ """Basic tests for foreign branch implementations.
+
+ These tests mainly make sure that the implementation covers the required
+ bits of the API and returns reasonable values.
+ """
+ branch_factory = None # Set to an instance of ForeignBranchFactory by scenario
+
+ def make_empty_branch(self):
+ return self.branch_factory.make_empty_branch(self.get_transport())
+
+ def make_branch(self):
+ return self.branch_factory.make_branch(self.get_transport())
+
+ def test_set_parent(self):
+ """Test that setting the parent works."""
+ branch = self.make_branch()
+ branch.set_parent("foobar")
+
+ def test_set_push_location(self):
+ """Test that setting the push location works."""
+ branch = self.make_branch()
+ branch.set_push_location("http://bar/bloe")
+
+ def test_repr_type(self):
+ branch = self.make_branch()
+ self.assertIsInstance(repr(branch), str)
+
+ def test_get_parent(self):
+ """Test that getting the parent location works, and returns None."""
+ # TODO: Allow this to be non-None when foreign branches add support
+ # for storing this URL.
+ branch = self.make_branch()
+ self.assertIs(None, branch.get_parent())
+
+ def test_get_push_location(self):
+ """Test that getting the push location works, and returns None."""
+ # TODO: Allow this to be non-None when foreign branches add support
+ # for storing this URL.
+ branch = self.make_branch()
+ self.assertIs(None, branch.get_push_location())
+
+ def test_attributes(self):
+ """Check that various required attributes are present."""
+ branch = self.make_branch()
+ self.assertIsNot(None, getattr(branch, "repository", None))
+ self.assertIsNot(None, getattr(branch, "mapping", None))
+ self.assertIsNot(None, getattr(branch, "_format", None))
+ self.assertIsNot(None, getattr(branch, "base", None))
+
+ def test__get_nick(self):
+ """Make sure _get_nick is implemented and returns a string."""
+ branch = self.make_branch()
+ self.assertIsInstance(branch._get_nick(local=False), str)
+ self.assertIsInstance(branch._get_nick(local=True), str)
+
+ def test_null_revid_revno(self):
+ """null: should return revno 0."""
+ branch = self.make_branch()
+ self.assertEquals(0, branch.revision_id_to_revno(NULL_REVISION))
+
+ def test_get_stacked_on_url(self):
+ """Test that get_stacked_on_url() behaves as expected.
+
+ Inter-Format stacking doesn't work yet, so all foreign implementations
+ should raise UnstackableBranchFormat at the moment.
+ """
+ branch = self.make_branch()
+ self.assertRaises(UnstackableBranchFormat,
+ branch.get_stacked_on_url)
+
+ def test_get_physical_lock_status(self):
+ branch = self.make_branch()
+ self.assertFalse(branch.get_physical_lock_status())
+
+ def test_last_revision_empty_branch(self):
+ branch = self.make_empty_branch()
+ self.assertEquals(NULL_REVISION, branch.last_revision())
+ self.assertEquals(0, branch.revno())
+ self.assertEquals((0, NULL_REVISION), branch.last_revision_info())
+
+
+class ForeignBranchFormatTests(TestCaseWithTransport):
+ """Basic tests for foreign branch format objects."""
+
+ branch_format = None # Set to a BranchFormat instance by adapter
+
+ def test_initialize(self):
+ """Test this format is not initializable.
+
+ Remote branches may be initializable on their own, but none currently
+ support living in .bzr/branch.
+ """
+ bzrdir = self.make_bzrdir('dir')
+ self.assertRaises(IncompatibleFormat, self.branch_format.initialize, bzrdir)
+
+ def test_get_format_description_type(self):
+ self.assertIsInstance(self.branch_format.get_format_description(), str)
+
+ def test_network_name(self):
+ self.assertIsInstance(self.branch_format.network_name(), str)
diff --git a/bzrlib/tests/per_foreign_vcs/test_repository.py b/bzrlib/tests/per_foreign_vcs/test_repository.py
new file mode 100644
index 0000000..55543b2
--- /dev/null
+++ b/bzrlib/tests/per_foreign_vcs/test_repository.py
@@ -0,0 +1,87 @@
+# Copyright (C) 2009, 2010 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+
+"""Tests specific to Repository implementations that use foreign VCS'es."""
+
+
+from bzrlib.tests import (
+ TestCase,
+ TestCaseWithTransport,
+ )
+
+
+class TestRepositoryFormat(TestCase):
+
+ def test_format_string(self):
+ self.assertRaises(NotImplementedError,
+ self.repository_format.get_format_string)
+
+ def test_network_name(self):
+ self.assertIsInstance(self.repository_format.network_name(),
+ str)
+
+ def test_format_description(self):
+ self.assertIsInstance(self.repository_format.get_format_description(),
+ str)
+
+
+class ForeignRepositoryFactory(object):
+ """Factory of repository for ForeignRepositoryTests."""
+
+ def make_repository(self, transport):
+ """Create a new, valid, repository. May or may not contain
+ data."""
+ raise NotImplementedError(self.make_repository)
+
+
+class ForeignRepositoryTests(TestCaseWithTransport):
+ """Basic tests for foreign repository implementations.
+
+ These tests mainly make sure that the implementation covers the required
+ bits of the API and returns semi-reasonable values, that are
+ at least of the expected types and in the expected ranges.
+ """
+
+ # XXX: Some of these tests could be moved into a common testcase for
+ # both native and foreign repositories.
+
+ repository_factory = None # Set to an instance of ForeignRepositoryFactory by the scenario
+
+ def make_repository(self):
+ return self.repository_factory.make_repository(self.get_transport())
+
+ def test_make_working_trees(self):
+ """Test that Repository.make_working_trees() returns a boolean."""
+ repo = self.make_repository()
+ self.assertIsInstance(repo.make_working_trees(), bool)
+
+ def test_get_physical_lock_status(self):
+ """Test that a new repository is not locked by default."""
+ repo = self.make_repository()
+ self.assertFalse(repo.get_physical_lock_status())
+
+ def test_is_shared(self):
+ """Test that is_shared() returns a bool."""
+ repo = self.make_repository()
+ self.assertIsInstance(repo.is_shared(), bool)
+
+ def test_gather_stats(self):
+ """Test that gather_stats() will at least return a dictionary
+ with the required keys."""
+ repo = self.make_repository()
+ stats = repo.gather_stats()
+ self.assertIsInstance(stats, dict)
diff --git a/bzrlib/tests/per_interbranch/__init__.py b/bzrlib/tests/per_interbranch/__init__.py
new file mode 100644
index 0000000..f3d984e
--- /dev/null
+++ b/bzrlib/tests/per_interbranch/__init__.py
@@ -0,0 +1,191 @@
+# Copyright (C) 2009, 2010 Canonical Ltd
+# -*- coding: utf-8 -*-
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+
+"""InterBranch implementation tests for bzr.
+
+These test the conformance of all the interbranch variations to the
+expected API including generally applicable corner cases.
+Specific tests for individual formats are in the tests for the formats
+itself rather than in tests/per_interbranch/*.py.
+"""
+
+
+from bzrlib import (
+ branchbuilder,
+ )
+from bzrlib.branch import (
+ GenericInterBranch,
+ InterBranch,
+ )
+from bzrlib.tests import (
+ TestCaseWithTransport,
+ multiply_tests,
+ )
+
+
+def make_scenarios(test_list):
+ """Transform the input test list to a list of scenarios.
+
+ :param formats: A list of tuples:
+ (interbranch_class, branch_format_from, branch_format_to).
+ """
+ result = []
+ for interbranch_class, branch_format_from, branch_format_to in test_list:
+ id = '%s,%s,%s' % (interbranch_class.__name__,
+ branch_format_from.__class__.__name__,
+ branch_format_to.__class__.__name__)
+ scenario = (id,
+ {
+ "branch_format_from": branch_format_from,
+ "interbranch_class": interbranch_class,
+ "branch_format_to": branch_format_to,
+ })
+ result.append(scenario)
+ return result
+
+
+def default_test_list():
+ """Generate the default list of interbranch permutations to test."""
+ result = []
+ # test the default InterBranch between format 6 and the current
+ # default format.
+ for optimiser_class in InterBranch._optimisers:
+ for format_from_test, format_to_test in \
+ optimiser_class._get_branch_formats_to_test():
+ result.append((optimiser_class, format_from_test, format_to_test))
+ # if there are specific combinations we want to use, we can add them
+ # here.
+ return result
+
+
+class TestCaseWithInterBranch(TestCaseWithTransport):
+
+ def make_from_branch(self, relpath):
+ return self.make_branch(relpath, format=self.branch_format_from._matchingbzrdir)
+
+ def make_from_branch_and_memory_tree(self, relpath):
+ """Create a branch on the default transport and a MemoryTree for it."""
+ self.assertEquals(
+ self.branch_format_from._matchingbzrdir.get_branch_format(),
+ self.branch_format_from)
+ return self.make_branch_and_memory_tree(
+ relpath, format=self.branch_format_from._matchingbzrdir)
+
+ def make_from_branch_and_tree(self, relpath):
+ """Create a branch on the default transport and a working tree for it."""
+ self.assertEquals(
+ self.branch_format_from._matchingbzrdir.get_branch_format(),
+ self.branch_format_from)
+ return self.make_branch_and_tree(relpath,
+ format=self.branch_format_from._matchingbzrdir)
+
+ def make_from_branch_builder(self, relpath):
+ self.assertEquals(
+ self.branch_format_from._matchingbzrdir.get_branch_format(),
+ self.branch_format_from)
+ return branchbuilder.BranchBuilder(self.get_transport(relpath),
+ format=self.branch_format_from._matchingbzrdir)
+
+ def make_to_branch(self, relpath):
+ self.assertEquals(
+ self.branch_format_to._matchingbzrdir.get_branch_format(),
+ self.branch_format_to)
+ return self.make_branch(relpath, format=self.branch_format_to._matchingbzrdir)
+
+ def make_to_branch_and_memory_tree(self, relpath):
+ """Create a branch on the default transport and a MemoryTree for it."""
+ self.assertEquals(
+ self.branch_format_to._matchingbzrdir.get_branch_format(),
+ self.branch_format_to)
+ return self.make_branch_and_memory_tree(
+ relpath, format=self.branch_format_to._matchingbzrdir)
+
+ def make_to_branch_and_tree(self, relpath):
+ """Create a branch on the default transport and a working tree for it."""
+ self.assertEquals(
+ self.branch_format_to._matchingbzrdir.get_branch_format(),
+ self.branch_format_to)
+ return self.make_branch_and_tree(relpath,
+ format=self.branch_format_to._matchingbzrdir)
+
+ def _sprout(self, origdir, to_url, format):
+ if format.supports_workingtrees:
+ newbranch = self.make_branch(to_url, format=format)
+ else:
+ newbranch = self.make_branch(to_url+".branch", format=format)
+ origbranch = origdir.open_branch()
+ newbranch.repository.fetch(origbranch.repository)
+ origbranch.copy_content_into(newbranch)
+ if format.supports_workingtrees:
+ wt = newbranch.bzrdir.create_workingtree()
+ else:
+ wt = newbranch.create_checkout(to_url, lightweight=True)
+ return wt
+
+ def sprout_to(self, origdir, to_url):
+ """Sprout a bzrdir, using to_format for the new branch."""
+ wt = self._sprout(origdir, to_url, self.branch_format_to._matchingbzrdir)
+ self.assertEquals(wt.branch._format, self.branch_format_to)
+ return wt.bzrdir
+
+ def sprout_from(self, origdir, to_url):
+ """Sprout a bzrdir, using from_format for the new bzrdir."""
+ wt = self._sprout(origdir, to_url,
+ self.branch_format_from._matchingbzrdir)
+ self.assertEquals(wt.branch._format, self.branch_format_from)
+ return wt.bzrdir
+
+
+class StubWithFormat(object):
+ """A stub object used to check that convenience methods call Inter's."""
+
+ _format = object()
+
+
+class StubMatchingInter(object):
+ """An inter for tests.
+
+ This is not a subclass of InterBranch so that missing methods are caught
+ and added rather than actually trying to do something.
+ """
+
+ _uses = []
+
+ def __init__(self, source, target):
+ self.source = source
+ self.target = target
+
+ @classmethod
+ def is_compatible(klass, source, target):
+ return StubWithFormat._format in (source._format, target._format)
+
+ def copy_content_into(self, *args, **kwargs):
+ self.__class__._uses.append(
+ (self, 'copy_content_into', args, kwargs))
+
+
+def load_tests(standard_tests, module, loader):
+ submod_tests = loader.loadTestsFromModuleNames([
+ 'bzrlib.tests.per_interbranch.test_fetch',
+ 'bzrlib.tests.per_interbranch.test_get',
+ 'bzrlib.tests.per_interbranch.test_copy_content_into',
+ 'bzrlib.tests.per_interbranch.test_pull',
+ 'bzrlib.tests.per_interbranch.test_push',
+ ])
+ scenarios = make_scenarios(default_test_list())
+ return multiply_tests(submod_tests, scenarios, standard_tests)
diff --git a/bzrlib/tests/per_interbranch/test_copy_content_into.py b/bzrlib/tests/per_interbranch/test_copy_content_into.py
new file mode 100644
index 0000000..d957dce
--- /dev/null
+++ b/bzrlib/tests/per_interbranch/test_copy_content_into.py
@@ -0,0 +1,47 @@
+# Copyright (C) 2010 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""Tests for bzrlib.branch.InterBranch.copy_content_into."""
+
+from bzrlib import branch
+from bzrlib.tests.per_interbranch import (
+ StubMatchingInter,
+ StubWithFormat,
+ TestCaseWithInterBranch,
+ )
+
+
+class TestCopyContentInto(TestCaseWithInterBranch):
+
+ def test_contract_convenience_method(self):
+ self.tree1 = self.make_from_branch_and_tree('tree1')
+ rev1 = self.tree1.commit('one')
+ branch2 = self.make_to_branch('tree2')
+ branch2.repository.fetch(self.tree1.branch.repository)
+ self.tree1.branch.copy_content_into(branch2, revision_id=rev1)
+
+ def test_inter_is_used(self):
+ self.tree1 = self.make_from_branch_and_tree('tree1')
+ self.addCleanup(branch.InterBranch.unregister_optimiser,
+ StubMatchingInter)
+ branch.InterBranch.register_optimiser(StubMatchingInter)
+ del StubMatchingInter._uses[:]
+ self.tree1.branch.copy_content_into(StubWithFormat(), revision_id='54')
+ self.assertLength(1, StubMatchingInter._uses)
+ use = StubMatchingInter._uses[0]
+ self.assertEqual('copy_content_into', use[1])
+ self.assertEqual('54', use[3]['revision_id'])
+ del StubMatchingInter._uses[:]
diff --git a/bzrlib/tests/per_interbranch/test_fetch.py b/bzrlib/tests/per_interbranch/test_fetch.py
new file mode 100644
index 0000000..aba9c3b
--- /dev/null
+++ b/bzrlib/tests/per_interbranch/test_fetch.py
@@ -0,0 +1,95 @@
+# Copyright (C) 2011 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""Tests for InterBranch.fetch."""
+
+from bzrlib.revision import NULL_REVISION
+from bzrlib.tests.per_interbranch import (
+ TestCaseWithInterBranch,
+ )
+
+
+class TestInterBranchFetch(TestCaseWithInterBranch):
+
+ def test_fetch_revisions(self):
+ """Test fetch-revision operation."""
+ wt = self.make_from_branch_and_tree('b1')
+ b1 = wt.branch
+ self.build_tree_contents([('b1/foo', 'hello')])
+ wt.add(['foo'], ['foo-id'])
+ wt.commit('lala!', rev_id='revision-1', allow_pointless=False)
+
+ b2 = self.make_to_branch('b2')
+ b2.fetch(b1)
+
+ # fetch does not update the last revision
+ self.assertEquals(NULL_REVISION, b2.last_revision())
+
+ rev = b2.repository.get_revision('revision-1')
+ tree = b2.repository.revision_tree('revision-1')
+ tree.lock_read()
+ self.addCleanup(tree.unlock)
+ self.assertEqual(tree.get_file_text('foo-id'), 'hello')
+
+ def test_fetch_revisions_limit(self):
+ """Test fetch-revision operation."""
+ builder = self.make_branch_builder('b1',
+ format=self.branch_format_from._matchingbzrdir)
+ builder.start_series()
+ builder.build_commit(rev_id='revision-1')
+ builder.build_commit(rev_id='revision-2')
+ builder.build_commit(rev_id='revision-3')
+ builder.finish_series()
+ b1 = builder.get_branch()
+ b2 = self.make_to_branch('b2')
+ b2.fetch(b1, limit=1)
+
+ # fetch does not update the last revision
+ self.assertEquals(NULL_REVISION, b2.last_revision())
+
+ self.assertEquals(
+ set(['revision-1']),
+ b2.repository.has_revisions(
+ ['revision-1', 'revision-2', 'revision-3']))
+
+ def test_fetch_revisions_limit_incremental(self):
+ """Test incremental fetch-revision operation with limit."""
+ wt = self.make_from_branch_and_tree('b1')
+ b1 = wt.branch
+ self.build_tree_contents([('b1/foo', 'hello')])
+ wt.add(['foo'], ['foo-id'])
+ wt.commit('lala!', rev_id='revision-1', allow_pointless=False)
+
+ b2 = self.make_to_branch('b2')
+ b2.fetch(b1, limit=1)
+
+ self.assertEquals(
+ set(['revision-1']),
+ b2.repository.has_revisions(
+ ['revision-1', 'revision-2', 'revision-3']))
+
+ wt.commit('hmm', rev_id='revision-2')
+ wt.commit('hmmm', rev_id='revision-3')
+
+ b2.fetch(b1, limit=1)
+
+ # fetch does not update the last revision
+ self.assertEquals(NULL_REVISION, b2.last_revision())
+
+ self.assertEquals(
+ set(['revision-1', 'revision-2']),
+ b2.repository.has_revisions(
+ ['revision-1', 'revision-2', 'revision-3']))
diff --git a/bzrlib/tests/per_interbranch/test_get.py b/bzrlib/tests/per_interbranch/test_get.py
new file mode 100644
index 0000000..1b848f2
--- /dev/null
+++ b/bzrlib/tests/per_interbranch/test_get.py
@@ -0,0 +1,34 @@
+# Copyright (C) 2010 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""Tests for bzrlib.branch.InterBranch.get."""
+
+from bzrlib import (
+ branch,
+ )
+from bzrlib.tests.per_interbranch import (
+ TestCaseWithInterBranch,
+ )
+
+
+class TestInterBranchGet(TestCaseWithInterBranch):
+
+ def test_gets_right_inter(self):
+ self.tree1 = self.make_from_branch_and_tree('tree1')
+ branch2 = self.make_to_branch('tree2')
+ self.assertIs(branch.InterBranch.get(
+ self.tree1.branch, branch2).__class__,
+ self.interbranch_class)
diff --git a/bzrlib/tests/per_interbranch/test_pull.py b/bzrlib/tests/per_interbranch/test_pull.py
new file mode 100644
index 0000000..5d56d93
--- /dev/null
+++ b/bzrlib/tests/per_interbranch/test_pull.py
@@ -0,0 +1,210 @@
+# Copyright (C) 2009, 2010 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""Tests for InterBranch.pull behaviour."""
+
+from bzrlib.branch import Branch
+from bzrlib.controldir import ControlDir
+from bzrlib import errors
+from bzrlib.memorytree import MemoryTree
+from bzrlib.revision import NULL_REVISION
+from bzrlib.tests.per_interbranch import TestCaseWithInterBranch
+
+
+# The tests here are based on the tests in
+# bzrlib.tests.per_branch.test_pull
+
+
+class TestPull(TestCaseWithInterBranch):
+
+ def test_pull_convergence_simple(self):
+ # when revisions are pulled, the left-most accessible parents must
+ # become the revision-history.
+ parent = self.make_from_branch_and_tree('parent')
+ parent.commit('1st post', rev_id='P1', allow_pointless=True)
+ mine = self.sprout_to(parent.bzrdir, 'mine').open_workingtree()
+ mine.commit('my change', rev_id='M1', allow_pointless=True)
+ parent.merge_from_branch(mine.branch)
+ parent.commit('merge my change', rev_id='P2')
+ mine.pull(parent.branch)
+ self.assertEqual('P2', mine.branch.last_revision())
+
+ def test_pull_merged_indirect(self):
+ # it should be possible to do a pull from one branch into another
+ # when the tip of the target was merged into the source branch
+ # via a third branch - so its buried in the ancestry and is not
+ # directly accessible.
+ parent = self.make_from_branch_and_tree('parent')
+ parent.commit('1st post', rev_id='P1', allow_pointless=True)
+ mine = self.sprout_to(parent.bzrdir, 'mine').open_workingtree()
+ mine.commit('my change', rev_id='M1', allow_pointless=True)
+ other = self.sprout_to(parent.bzrdir, 'other').open_workingtree()
+ other.merge_from_branch(mine.branch)
+ other.commit('merge my change', rev_id='O2')
+ parent.merge_from_branch(other.branch)
+ parent.commit('merge other', rev_id='P2')
+ mine.pull(parent.branch)
+ self.assertEqual('P2', mine.branch.last_revision())
+
+ def test_pull_updates_checkout_and_master(self):
+ """Pulling into a checkout updates the checkout and the master branch"""
+ master_tree = self.make_from_branch_and_tree('master')
+ rev1 = master_tree.commit('master')
+ checkout = master_tree.branch.create_checkout('checkout')
+ other = self.sprout_to(master_tree.branch.bzrdir, 'other').open_workingtree()
+ rev2 = other.commit('other commit')
+ # now pull, which should update both checkout and master.
+ checkout.branch.pull(other.branch)
+ self.assertEqual(rev2, checkout.branch.last_revision())
+ self.assertEqual(rev2, master_tree.branch.last_revision())
+
+ def test_pull_raises_specific_error_on_master_connection_error(self):
+ master_tree = self.make_from_branch_and_tree('master')
+ checkout = master_tree.branch.create_checkout('checkout')
+ other = self.sprout_to(master_tree.branch.bzrdir, 'other').open_branch()
+ # move the branch out of the way on disk to cause a connection
+ # error.
+ master_tree.branch.bzrdir.destroy_branch()
+ # try to pull, which should raise a BoundBranchConnectionFailure.
+ self.assertRaises(errors.BoundBranchConnectionFailure,
+ checkout.branch.pull, other)
+
+ def test_pull_returns_result(self):
+ parent = self.make_from_branch_and_tree('parent')
+ parent.commit('1st post', rev_id='P1')
+ mine = self.sprout_to(parent.bzrdir, 'mine').open_workingtree()
+ mine.commit('my change', rev_id='M1')
+ result = parent.branch.pull(mine.branch)
+ self.assertIsNot(None, result)
+ self.assertIs(mine.branch, result.source_branch)
+ self.assertIs(parent.branch, result.target_branch)
+ self.assertIs(parent.branch, result.master_branch)
+ self.assertIs(None, result.local_branch)
+ self.assertEqual(1, result.old_revno)
+ self.assertEqual('P1', result.old_revid)
+ self.assertEqual(2, result.new_revno)
+ self.assertEqual('M1', result.new_revid)
+ self.assertEqual([], result.tag_conflicts)
+
+ def test_pull_overwrite(self):
+ tree_a = self.make_from_branch_and_tree('tree_a')
+ tree_a.commit('message 1')
+ tree_b = self.sprout_to(tree_a.bzrdir, 'tree_b').open_workingtree()
+ tree_a.commit('message 2', rev_id='rev2a')
+ tree_b.commit('message 2', rev_id='rev2b')
+ self.assertRaises(errors.DivergedBranches, tree_a.pull, tree_b.branch)
+ self.assertRaises(errors.DivergedBranches,
+ tree_a.branch.pull, tree_b.branch,
+ overwrite=False, stop_revision='rev2b')
+ # It should not have updated the branch tip, but it should have fetched
+ # the revision if the repository supports "invisible" revisions.
+ self.assertEqual('rev2a', tree_a.branch.last_revision())
+ if tree_a.branch.repository._format.supports_unreferenced_revisions:
+ self.assertTrue(tree_a.branch.repository.has_revision('rev2b'))
+ tree_a.branch.pull(tree_b.branch, overwrite=True,
+ stop_revision='rev2b')
+ self.assertEqual('rev2b', tree_a.branch.last_revision())
+ self.assertEqual(tree_b.branch.last_revision(),
+ tree_a.branch.last_revision())
+
+
+class TestPullHook(TestCaseWithInterBranch):
+
+ def setUp(self):
+ self.hook_calls = []
+ TestCaseWithInterBranch.setUp(self)
+
+ def capture_post_pull_hook(self, result):
+ """Capture post pull hook calls to self.hook_calls.
+
+ The call is logged, as is some state of the two branches.
+ """
+ if result.local_branch:
+ local_locked = result.local_branch.is_locked()
+ local_base = result.local_branch.base
+ else:
+ local_locked = None
+ local_base = None
+ self.hook_calls.append(
+ ('post_pull', result.source_branch, local_base,
+ result.master_branch.base, result.old_revno,
+ result.old_revid,
+ result.new_revno, result.new_revid,
+ result.source_branch.is_locked(), local_locked,
+ result.master_branch.is_locked()))
+
+ def test_post_pull_empty_history(self):
+ target = self.make_to_branch('target')
+ source = self.make_from_branch('source')
+ Branch.hooks.install_named_hook('post_pull',
+ self.capture_post_pull_hook, None)
+ target.pull(source)
+ # with nothing there we should still get a notification, and
+ # have both branches locked at the notification time.
+ self.assertEqual([
+ ('post_pull', source, None, target.base, 0, NULL_REVISION,
+ 0, NULL_REVISION, True, None, True)
+ ],
+ self.hook_calls)
+
+ def test_post_pull_bound_branch(self):
+ # pulling to a bound branch should pass in the master branch to the
+ # hook, allowing the correct number of emails to be sent, while still
+ # allowing hooks that want to modify the target to do so to both
+ # instances.
+ target = self.make_to_branch('target')
+ local = self.make_from_branch('local')
+ try:
+ local.bind(target)
+ except errors.UpgradeRequired:
+ # We can't bind this format to itself- typically it is the local
+ # branch that doesn't support binding. As of May 2007
+ # remotebranches can't be bound. Let's instead make a new local
+ # branch of the default type, which does allow binding.
+ # See https://bugs.launchpad.net/bzr/+bug/112020
+ local = ControlDir.create_branch_convenience('local2')
+ local.bind(target)
+ source = self.make_from_branch('source')
+ Branch.hooks.install_named_hook('post_pull',
+ self.capture_post_pull_hook, None)
+ local.pull(source)
+ # with nothing there we should still get a notification, and
+ # have both branches locked at the notification time.
+ self.assertEqual([
+ ('post_pull', source, local.base, target.base, 0, NULL_REVISION,
+ 0, NULL_REVISION, True, True, True)
+ ],
+ self.hook_calls)
+
+ def test_post_pull_nonempty_history(self):
+ target = self.make_to_branch_and_memory_tree('target')
+ target.lock_write()
+ target.add('')
+ rev1 = target.commit('rev 1')
+ target.unlock()
+ sourcedir = target.bzrdir.clone(self.get_url('source'))
+ source = MemoryTree.create_on_branch(sourcedir.open_branch())
+ rev2 = source.commit('rev 2')
+ Branch.hooks.install_named_hook('post_pull',
+ self.capture_post_pull_hook, None)
+ target.branch.pull(source.branch)
+ # with nothing there we should still get a notification, and
+ # have both branches locked at the notification time.
+ self.assertEqual([
+ ('post_pull', source.branch, None, target.branch.base, 1, rev1,
+ 2, rev2, True, None, True)
+ ],
+ self.hook_calls)
diff --git a/bzrlib/tests/per_interbranch/test_push.py b/bzrlib/tests/per_interbranch/test_push.py
new file mode 100644
index 0000000..47cbc34
--- /dev/null
+++ b/bzrlib/tests/per_interbranch/test_push.py
@@ -0,0 +1,379 @@
+# Copyright (C) 2009, 2010 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""Tests for branch.push behaviour."""
+
+from cStringIO import StringIO
+
+from testtools.matchers import (
+ Equals,
+ MatchesAny,
+ )
+
+from bzrlib import (
+ branch,
+ check,
+ controldir,
+ errors,
+ push,
+ symbol_versioning,
+ tests,
+ vf_repository,
+ )
+from bzrlib.branch import Branch
+from bzrlib.controldir import ControlDir
+from bzrlib.memorytree import MemoryTree
+from bzrlib.revision import NULL_REVISION
+from bzrlib.smart.repository import SmartServerRepositoryGetParentMap
+from bzrlib.tests.per_interbranch import (
+ TestCaseWithInterBranch,
+ )
+from bzrlib.tests import test_server
+
+
+# These tests are based on similar tests in
+# bzrlib.tests.per_branch.test_push.
+
+
+class TestPush(TestCaseWithInterBranch):
+
+ def test_push_convergence_simple(self):
+ # when revisions are pushed, the left-most accessible parents must
+ # become the revision-history.
+ mine = self.make_from_branch_and_tree('mine')
+ mine.commit('1st post', rev_id='P1', allow_pointless=True)
+ other = self.sprout_to(mine.bzrdir, 'other').open_workingtree()
+ other.commit('my change', rev_id='M1', allow_pointless=True)
+ mine.merge_from_branch(other.branch)
+ mine.commit('merge my change', rev_id='P2')
+ result = mine.branch.push(other.branch)
+ self.assertEqual('P2', other.branch.last_revision())
+ # result object contains some structured data
+ self.assertEqual(result.old_revid, 'M1')
+ self.assertEqual(result.new_revid, 'P2')
+
+ def test_push_merged_indirect(self):
+ # it should be possible to do a push from one branch into another
+ # when the tip of the target was merged into the source branch
+ # via a third branch - so its buried in the ancestry and is not
+ # directly accessible.
+ mine = self.make_from_branch_and_tree('mine')
+ mine.commit('1st post', rev_id='P1', allow_pointless=True)
+ target = self.sprout_to(mine.bzrdir, 'target').open_workingtree()
+ target.commit('my change', rev_id='M1', allow_pointless=True)
+ other = self.sprout_to(mine.bzrdir, 'other').open_workingtree()
+ other.merge_from_branch(target.branch)
+ other.commit('merge my change', rev_id='O2')
+ mine.merge_from_branch(other.branch)
+ mine.commit('merge other', rev_id='P2')
+ mine.branch.push(target.branch)
+ self.assertEqual('P2', target.branch.last_revision())
+
+ def test_push_to_checkout_updates_master(self):
+ """Pushing into a checkout updates the checkout and the master branch"""
+ master_tree = self.make_to_branch_and_tree('master')
+ checkout = self.make_to_branch_and_tree('checkout')
+ try:
+ checkout.branch.bind(master_tree.branch)
+ except errors.UpgradeRequired:
+ # cant bind this format, the test is irrelevant.
+ return
+ rev1 = checkout.commit('master')
+
+ other_bzrdir = self.sprout_from(master_tree.branch.bzrdir, 'other')
+ other = other_bzrdir.open_workingtree()
+ rev2 = other.commit('other commit')
+ # now push, which should update both checkout and master.
+ other.branch.push(checkout.branch)
+ self.assertEqual(rev2, checkout.branch.last_revision())
+ self.assertEqual(rev2, master_tree.branch.last_revision())
+
+ def test_push_raises_specific_error_on_master_connection_error(self):
+ master_tree = self.make_to_branch_and_tree('master')
+ checkout = self.make_to_branch_and_tree('checkout')
+ try:
+ checkout.branch.bind(master_tree.branch)
+ except errors.UpgradeRequired:
+ # cant bind this format, the test is irrelevant.
+ return
+ other_bzrdir = self.sprout_from(master_tree.branch.bzrdir, 'other')
+ other = other_bzrdir.open_workingtree()
+ # move the branch out of the way on disk to cause a connection
+ # error.
+ master_tree.bzrdir.destroy_branch()
+ # try to push, which should raise a BoundBranchConnectionFailure.
+ self.assertRaises(errors.BoundBranchConnectionFailure,
+ other.branch.push, checkout.branch)
+
+ def test_push_uses_read_lock(self):
+ """Push should only need a read lock on the source side."""
+ source = self.make_from_branch_and_tree('source')
+ target = self.make_to_branch('target')
+
+ self.build_tree(['source/a'])
+ source.add(['a'])
+ source.commit('a')
+
+ source.branch.lock_read()
+ try:
+ target.lock_write()
+ try:
+ source.branch.push(target, stop_revision=source.last_revision())
+ finally:
+ target.unlock()
+ finally:
+ source.branch.unlock()
+
+ def test_push_within_repository(self):
+ """Push from one branch to another inside the same repository."""
+ try:
+ repo = self.make_repository('repo', shared=True)
+ except (errors.IncompatibleFormat, errors.UninitializableFormat):
+ # This Branch format cannot create shared repositories
+ return
+ # This is a little bit trickier because make_from_branch_and_tree will not
+ # re-use a shared repository.
+ try:
+ a_branch = self.make_from_branch('repo/tree')
+ except (errors.UninitializableFormat):
+ # Cannot create these branches
+ return
+ try:
+ tree = a_branch.bzrdir.create_workingtree()
+ except errors.UnsupportedOperation:
+ self.assertFalse(a_branch.bzrdir._format.supports_workingtrees)
+ tree = a_branch.create_checkout('repo/tree', lightweight=True)
+ except errors.NotLocalUrl:
+ if self.vfs_transport_factory is test_server.LocalURLServer:
+ # the branch is colocated on disk, we cannot create a checkout.
+ # hopefully callers will expect this.
+ local_controldir = controldir.ControlDir.open(self.get_vfs_only_url('repo/tree'))
+ tree = local_controldir.create_workingtree()
+ else:
+ tree = a_branch.create_checkout('repo/tree', lightweight=True)
+ self.build_tree(['repo/tree/a'])
+ tree.add(['a'])
+ tree.commit('a')
+
+ to_branch = self.make_to_branch('repo/branch')
+ tree.branch.push(to_branch)
+
+ self.assertEqual(tree.branch.last_revision(),
+ to_branch.last_revision())
+
+ def test_push_overwrite_of_non_tip_with_stop_revision(self):
+ """Combining the stop_revision and overwrite options works.
+
+ This was <https://bugs.launchpad.net/bzr/+bug/234229>.
+ """
+ source = self.make_from_branch_and_tree('source')
+ target = self.make_to_branch('target')
+
+ source.commit('1st commit')
+ source.branch.push(target)
+ source.commit('2nd commit', rev_id='rev-2')
+ source.commit('3rd commit')
+
+ source.branch.push(target, stop_revision='rev-2', overwrite=True)
+ self.assertEqual('rev-2', target.last_revision())
+
+ def test_push_with_default_stacking_does_not_create_broken_branch(self):
+ """Pushing a new standalone branch works even when there's a default
+ stacking policy at the destination.
+
+ The new branch will preserve the repo format (even if it isn't the
+ default for the branch), and will be stacked when the repo format
+ allows (which means that the branch format isn't necessarly preserved).
+ """
+ if isinstance(self.branch_format_from, branch.BranchReferenceFormat):
+ # This test could in principle apply to BranchReferenceFormat, but
+ # make_branch_builder doesn't support it.
+ raise tests.TestSkipped(
+ "BranchBuilder can't make reference branches.")
+ # Make a branch called "local" in a stackable repository
+ # The branch has 3 revisions:
+ # - rev-1, adds a file
+ # - rev-2, no changes
+ # - rev-3, modifies the file.
+ repo = self.make_repository('repo', shared=True, format='1.6')
+ builder = self.make_from_branch_builder('repo/local')
+ builder.start_series()
+ builder.build_snapshot('rev-1', None, [
+ ('add', ('', 'root-id', 'directory', '')),
+ ('add', ('filename', 'f-id', 'file', 'content\n'))])
+ builder.build_snapshot('rev-2', ['rev-1'], [])
+ builder.build_snapshot('rev-3', ['rev-2'],
+ [('modify', ('f-id', 'new-content\n'))])
+ builder.finish_series()
+ trunk = builder.get_branch()
+ # Sprout rev-1 to "trunk", so that we can stack on it.
+ trunk.bzrdir.sprout(self.get_url('trunk'), revision_id='rev-1')
+ # Set a default stacking policy so that new branches will automatically
+ # stack on trunk.
+ self.make_bzrdir('.').get_config().set_default_stack_on('trunk')
+ # Push rev-2 to a new branch "remote". It will be stacked on "trunk".
+ output = StringIO()
+ push._show_push_branch(trunk, 'rev-2', self.get_url('remote'), output)
+ # Push rev-3 onto "remote". If "remote" not stacked and is missing the
+ # fulltext record for f-id @ rev-1, then this will fail.
+ remote_branch = Branch.open(self.get_url('remote'))
+ trunk.push(remote_branch)
+ check.check_dwim(remote_branch.base, False, True, True)
+
+ def test_no_get_parent_map_after_insert_stream(self):
+ # Effort test for bug 331823
+ self.setup_smart_server_with_call_log()
+ # Make a local branch with four revisions. Four revisions because:
+ # one to push, one there for _walk_to_common_revisions to find, one we
+ # don't want to access, one for luck :)
+ if isinstance(self.branch_format_from, branch.BranchReferenceFormat):
+ # This test could in principle apply to BranchReferenceFormat, but
+ # make_branch_builder doesn't support it.
+ raise tests.TestSkipped(
+ "BranchBuilder can't make reference branches.")
+ try:
+ builder = self.make_from_branch_builder('local')
+ except (errors.TransportNotPossible, errors.UninitializableFormat):
+ raise tests.TestNotApplicable('format not directly constructable')
+ builder.start_series()
+ builder.build_snapshot('first', None, [
+ ('add', ('', 'root-id', 'directory', ''))])
+ builder.build_snapshot('second', ['first'], [])
+ builder.build_snapshot('third', ['second'], [])
+ builder.build_snapshot('fourth', ['third'], [])
+ builder.finish_series()
+ local = branch.Branch.open(self.get_vfs_only_url('local'))
+ # Initial push of three revisions
+ remote_bzrdir = local.bzrdir.sprout(
+ self.get_url('remote'), revision_id='third')
+ remote = remote_bzrdir.open_branch()
+ # Push fourth revision
+ self.reset_smart_call_log()
+ self.disableOptimisticGetParentMap()
+ self.assertFalse(local.is_locked())
+ local.push(remote)
+ hpss_call_names = [item.call.method for item in self.hpss_calls]
+ self.assertTrue('Repository.insert_stream_1.19' in hpss_call_names)
+ insert_stream_idx = hpss_call_names.index(
+ 'Repository.insert_stream_1.19')
+ calls_after_insert_stream = hpss_call_names[insert_stream_idx:]
+ # After inserting the stream the client has no reason to query the
+ # remote graph any further.
+ bzr_core_trace = Equals(
+ ['Repository.insert_stream_1.19', 'Repository.insert_stream_1.19',
+ 'Branch.set_last_revision_info', 'Branch.unlock'])
+ bzr_loom_trace = Equals(
+ ['Repository.insert_stream_1.19', 'Repository.insert_stream_1.19',
+ 'Branch.set_last_revision_info', 'get', 'Branch.unlock'])
+ self.assertThat(calls_after_insert_stream,
+ MatchesAny(bzr_core_trace, bzr_loom_trace))
+
+ def disableOptimisticGetParentMap(self):
+ # Tweak some class variables to stop remote get_parent_map calls asking
+ # for or receiving more data than the caller asked for.
+ self.overrideAttr(vf_repository.InterVersionedFileRepository,
+ '_walk_to_common_revisions_batch_size', 1)
+ self.overrideAttr(SmartServerRepositoryGetParentMap,
+ 'no_extra_results', True)
+
+
+class TestPushHook(TestCaseWithInterBranch):
+
+ def setUp(self):
+ self.hook_calls = []
+ TestCaseWithInterBranch.setUp(self)
+
+ def capture_post_push_hook(self, result):
+ """Capture post push hook calls to self.hook_calls.
+
+ The call is logged, as is some state of the two branches.
+ """
+ if result.local_branch:
+ local_locked = result.local_branch.is_locked()
+ local_base = result.local_branch.base
+ else:
+ local_locked = None
+ local_base = None
+ self.hook_calls.append(
+ ('post_push', result.source_branch, local_base,
+ result.master_branch.base,
+ result.old_revno, result.old_revid,
+ result.new_revno, result.new_revid,
+ result.source_branch.is_locked(), local_locked,
+ result.master_branch.is_locked()))
+
+ def test_post_push_empty_history(self):
+ target = self.make_to_branch('target')
+ source = self.make_from_branch('source')
+ Branch.hooks.install_named_hook('post_push',
+ self.capture_post_push_hook, None)
+ source.push(target)
+ # with nothing there we should still get a notification, and
+ # have both branches locked at the notification time.
+ self.assertEqual([
+ ('post_push', source, None, target.base, 0, NULL_REVISION,
+ 0, NULL_REVISION, True, None, True)
+ ],
+ self.hook_calls)
+
+ def test_post_push_bound_branch(self):
+ # pushing to a bound branch should pass in the master branch to the
+ # hook, allowing the correct number of emails to be sent, while still
+ # allowing hooks that want to modify the target to do so to both
+ # instances.
+ target = self.make_to_branch('target')
+ local = self.make_from_branch('local')
+ try:
+ local.bind(target)
+ except errors.UpgradeRequired:
+ # We can't bind this format to itself- typically it is the local
+ # branch that doesn't support binding. As of May 2007
+ # remotebranches can't be bound. Let's instead make a new local
+ # branch of the default type, which does allow binding.
+ # See https://bugs.launchpad.net/bzr/+bug/112020
+ local = ControlDir.create_branch_convenience('local2')
+ local.bind(target)
+ source = self.make_from_branch('source')
+ Branch.hooks.install_named_hook('post_push',
+ self.capture_post_push_hook, None)
+ source.push(local)
+ # with nothing there we should still get a notification, and
+ # have both branches locked at the notification time.
+ self.assertEqual([
+ ('post_push', source, local.base, target.base, 0, NULL_REVISION,
+ 0, NULL_REVISION, True, True, True)
+ ],
+ self.hook_calls)
+
+ def test_post_push_nonempty_history(self):
+ target = self.make_to_branch_and_tree('target')
+ target.lock_write()
+ target.add('')
+ rev1 = target.commit('rev 1')
+ target.unlock()
+ sourcedir = target.branch.bzrdir.clone(self.get_url('source'))
+ source = MemoryTree.create_on_branch(sourcedir.open_branch())
+ rev2 = source.commit('rev 2')
+ Branch.hooks.install_named_hook('post_push',
+ self.capture_post_push_hook, None)
+ source.branch.push(target.branch)
+ # with nothing there we should still get a notification, and
+ # have both branches locked at the notification time.
+ self.assertEqual([
+ ('post_push', source.branch, None, target.branch.base, 1, rev1,
+ 2, rev2, True, None, True)
+ ],
+ self.hook_calls)
diff --git a/bzrlib/tests/per_interrepository/__init__.py b/bzrlib/tests/per_interrepository/__init__.py
new file mode 100644
index 0000000..80cae94
--- /dev/null
+++ b/bzrlib/tests/per_interrepository/__init__.py
@@ -0,0 +1,211 @@
+# Copyright (C) 2006-2010 Canonical Ltd
+# Authors: Robert Collins <robert.collins@canonical.com>
+# -*- coding: utf-8 -*-
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+
+"""InterRepository implementation tests for bzr.
+
+These test the conformance of all the interrepository variations to the
+expected API including generally applicable corner cases.
+Specific tests for individual formats are in the tests/test_repository.py file
+rather than in tests/per_interrepository/*.py.
+"""
+
+
+from bzrlib import (
+ pyutils,
+ transport,
+ )
+from bzrlib.errors import (
+ FileExists,
+ UninitializableFormat,
+ )
+
+from bzrlib.repository import (
+ format_registry,
+ InterRepository,
+ )
+from bzrlib.tests import (
+ TestSkipped,
+ default_transport,
+ multiply_tests,
+ )
+from bzrlib.tests.per_controldir.test_controldir import TestCaseWithControlDir
+from bzrlib.vf_repository import (
+ InterDifferingSerializer,
+ )
+
+
+def make_scenarios(transport_server, transport_readonly_server, formats):
+ """Transform the input formats to a list of scenarios.
+
+ :param formats: A list of tuples:
+ (label, repository_format, repository_format_to).
+ """
+ result = []
+ for label, repository_format, repository_format_to, extra_setup in formats:
+ id = '%s,%s,%s' % (label, repository_format.__class__.__name__,
+ repository_format_to.__class__.__name__)
+ scenario = (id,
+ {"transport_server": transport_server,
+ "transport_readonly_server": transport_readonly_server,
+ "repository_format": repository_format,
+ "repository_format_to": repository_format_to,
+ "extra_setup": extra_setup,
+ })
+ result.append(scenario)
+ return result
+
+
+def default_test_list():
+ """Generate the default list of interrepo permutations to test."""
+ from bzrlib.repofmt import (
+ groupcompress_repo,
+ knitrepo,
+ knitpack_repo,
+ )
+ result = []
+ def add_combo(interrepo_cls, from_format, to_format, extra_setup=None,
+ label=None):
+ if label is None:
+ label = interrepo_cls.__name__
+ result.append((label, from_format, to_format, extra_setup))
+ # test the default InterRepository between format 6 and the current
+ # default format.
+ # XXX: robertc 20060220 reinstate this when there are two supported
+ # formats which do not have an optimal code path between them.
+ #result.append((InterRepository,
+ # RepositoryFormat6(),
+ # RepositoryFormatKnit1()))
+ for optimiser_class in InterRepository._optimisers:
+ format_to_test = optimiser_class._get_repo_format_to_test()
+ if format_to_test is not None:
+ add_combo(optimiser_class, format_to_test, format_to_test)
+ # if there are specific combinations we want to use, we can add them
+ # here. We want to test rich root upgrading.
+ # XXX: although we attach InterRepository class names to these scenarios,
+ # there's nothing asserting that these labels correspond to what is
+ # actually used.
+ def force_known_graph(testcase):
+ from bzrlib.fetch import Inter1and2Helper
+ testcase.overrideAttr(Inter1and2Helper, 'known_graph_threshold', -1)
+ # Gather extra scenarios from the repository implementations,
+ # as InterRepositories can be used by Repository implementations
+ # they aren't aware of.
+ for module_name in format_registry._get_all_modules():
+ module = pyutils.get_named_object(module_name)
+ try:
+ get_extra_interrepo_test_combinations = getattr(
+ module,
+ "get_extra_interrepo_test_combinations")
+ except AttributeError:
+ continue
+ for (interrepo_cls, from_format, to_format) in (
+ get_extra_interrepo_test_combinations()):
+ add_combo(interrepo_cls, from_format, to_format)
+ add_combo(InterRepository,
+ knitrepo.RepositoryFormatKnit1(),
+ knitrepo.RepositoryFormatKnit3())
+ add_combo(knitrepo.InterKnitRepo,
+ knitrepo.RepositoryFormatKnit1(),
+ knitpack_repo.RepositoryFormatKnitPack1())
+ add_combo(knitrepo.InterKnitRepo,
+ knitpack_repo.RepositoryFormatKnitPack1(),
+ knitrepo.RepositoryFormatKnit1())
+ add_combo(knitrepo.InterKnitRepo,
+ knitrepo.RepositoryFormatKnit3(),
+ knitpack_repo.RepositoryFormatKnitPack3())
+ add_combo(knitrepo.InterKnitRepo,
+ knitpack_repo.RepositoryFormatKnitPack3(),
+ knitrepo.RepositoryFormatKnit3())
+ add_combo(knitrepo.InterKnitRepo,
+ knitpack_repo.RepositoryFormatKnitPack3(),
+ knitpack_repo.RepositoryFormatKnitPack4())
+ add_combo(InterDifferingSerializer,
+ knitpack_repo.RepositoryFormatKnitPack1(),
+ knitpack_repo.RepositoryFormatKnitPack6RichRoot())
+ add_combo(InterDifferingSerializer,
+ knitpack_repo.RepositoryFormatKnitPack1(),
+ knitpack_repo.RepositoryFormatKnitPack6RichRoot(),
+ force_known_graph,
+ label='InterDifferingSerializer+get_known_graph_ancestry')
+ add_combo(InterDifferingSerializer,
+ knitpack_repo.RepositoryFormatKnitPack6RichRoot(),
+ groupcompress_repo.RepositoryFormat2a())
+ add_combo(InterDifferingSerializer,
+ groupcompress_repo.RepositoryFormat2a(),
+ knitpack_repo.RepositoryFormatKnitPack6RichRoot())
+ return result
+
+
+class TestCaseWithInterRepository(TestCaseWithControlDir):
+
+ def setUp(self):
+ super(TestCaseWithInterRepository, self).setUp()
+ if self.extra_setup:
+ self.extra_setup(self)
+
+ def get_default_format(self):
+ self.assertEquals(
+ self.repository_format._matchingbzrdir.repository_format,
+ self.repository_format)
+ return self.repository_format._matchingbzrdir
+
+ def make_branch(self, relpath, format=None):
+ repo = self.make_repository(relpath, format=format)
+ return repo.bzrdir.create_branch()
+
+ def make_bzrdir(self, relpath, format=None):
+ try:
+ url = self.get_url(relpath)
+ segments = url.split('/')
+ if segments and segments[-1] not in ('', '.'):
+ parent = '/'.join(segments[:-1])
+ t = transport.get_transport(parent)
+ try:
+ t.mkdir(segments[-1])
+ except FileExists:
+ pass
+ if format is None:
+ format = self.repository_format._matchingbzrdir
+ return format.initialize(url)
+ except UninitializableFormat:
+ raise TestSkipped("Format %s is not initializable." % format)
+
+ def make_repository(self, relpath, format=None):
+ made_control = self.make_bzrdir(relpath, format=format)
+ return self.repository_format.initialize(made_control)
+
+ def make_to_repository(self, relpath):
+ made_control = self.make_bzrdir(relpath,
+ self.repository_format_to._matchingbzrdir)
+ return self.repository_format_to.initialize(made_control)
+
+
+def load_tests(standard_tests, module, loader):
+ submod_tests = loader.loadTestsFromModuleNames([
+ 'bzrlib.tests.per_interrepository.test_fetch',
+ 'bzrlib.tests.per_interrepository.test_interrepository',
+ ])
+ scenarios = make_scenarios(
+ default_transport,
+ # None here will cause a readonly decorator to be created
+ # by the TestCaseWithTransport.get_readonly_transport method.
+ None,
+ default_test_list()
+ )
+ return multiply_tests(submod_tests, scenarios, standard_tests)
diff --git a/bzrlib/tests/per_interrepository/test_fetch.py b/bzrlib/tests/per_interrepository/test_fetch.py
new file mode 100644
index 0000000..96338f5
--- /dev/null
+++ b/bzrlib/tests/per_interrepository/test_fetch.py
@@ -0,0 +1,553 @@
+# Copyright (C) 2008, 2009, 2010 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+
+import sys
+
+from bzrlib import (
+ errors,
+ inventory,
+ osutils,
+ repository,
+ versionedfile,
+ )
+from bzrlib.errors import (
+ NoSuchRevision,
+ )
+from bzrlib.vf_search import (
+ SearchResult,
+ )
+from bzrlib.revision import (
+ NULL_REVISION,
+ Revision,
+ )
+from bzrlib.tests import (
+ TestNotApplicable,
+ )
+from bzrlib.tests.per_interrepository import (
+ TestCaseWithInterRepository,
+ )
+from bzrlib.tests.per_interrepository.test_interrepository import (
+ check_repo_format_for_funky_id_on_win32
+ )
+
+
+
+class TestInterRepository(TestCaseWithInterRepository):
+
+ def disable_commit_write_group_paranoia(self, repo):
+ pack_coll = getattr(repo, '_pack_collection', None)
+ if pack_coll is not None:
+ # Monkey-patch the pack collection instance to allow storing
+ # incomplete revisions.
+ pack_coll._check_new_inventories = lambda: []
+
+ def test_fetch(self):
+ tree_a = self.make_branch_and_tree('a')
+ self.build_tree(['a/foo'])
+ tree_a.add('foo', 'file1')
+ tree_a.commit('rev1', rev_id='rev1')
+ def check_push_rev1(repo):
+ # ensure the revision is missing.
+ self.assertRaises(NoSuchRevision, repo.get_revision, 'rev1')
+ # fetch with a limit of NULL_REVISION
+ repo.fetch(tree_a.branch.repository,
+ revision_id=NULL_REVISION)
+ # nothing should have been pushed
+ self.assertFalse(repo.has_revision('rev1'))
+ # fetch with a default limit (grab everything)
+ repo.fetch(tree_a.branch.repository)
+ # check that b now has all the data from a's first commit.
+ rev = repo.get_revision('rev1')
+ tree = repo.revision_tree('rev1')
+ tree.lock_read()
+ self.addCleanup(tree.unlock)
+ tree.get_file_text('file1')
+ for file_id in tree.all_file_ids():
+ if tree.kind(file_id) == "file":
+ tree.get_file(file_id).read()
+
+ # makes a target version repo
+ repo_b = self.make_to_repository('b')
+ check_push_rev1(repo_b)
+
+ def test_fetch_inconsistent_last_changed_entries(self):
+ """If an inventory has odd data we should still get what it references.
+
+ This test tests that we do fetch a file text created in a revision not
+ being fetched, but referenced from the revision we are fetching when the
+ adjacent revisions to the one being fetched do not reference that text.
+ """
+ tree = self.make_branch_and_tree('source')
+ revid = tree.commit('old')
+ to_repo = self.make_to_repository('to_repo')
+ to_repo.fetch(tree.branch.repository, revid)
+ # Make a broken revision and fetch it.
+ source = tree.branch.repository
+ source.lock_write()
+ self.addCleanup(source.unlock)
+ source.start_write_group()
+ try:
+ # We need two revisions: OLD and NEW. NEW will claim to need a file
+ # 'FOO' changed in 'OLD'. OLD will not have that file at all.
+ source.texts.insert_record_stream([
+ versionedfile.FulltextContentFactory(('foo', revid), (), None,
+ 'contents')])
+ basis = source.revision_tree(revid)
+ parent_id = basis.path2id('')
+ entry = inventory.make_entry('file', 'foo-path', parent_id, 'foo')
+ entry.revision = revid
+ entry.text_size = len('contents')
+ entry.text_sha1 = osutils.sha_string('contents')
+ inv_sha1, _ = source.add_inventory_by_delta(revid, [
+ (None, 'foo-path', 'foo', entry)], 'new', [revid])
+ rev = Revision(timestamp=0,
+ timezone=None,
+ committer="Foo Bar <foo@example.com>",
+ message="Message",
+ inventory_sha1=inv_sha1,
+ revision_id='new',
+ parent_ids=[revid])
+ source.add_revision(rev.revision_id, rev)
+ except:
+ source.abort_write_group()
+ raise
+ else:
+ source.commit_write_group()
+ to_repo.fetch(source, 'new')
+ to_repo.lock_read()
+ self.addCleanup(to_repo.unlock)
+ self.assertEqual('contents',
+ to_repo.texts.get_record_stream([('foo', revid)],
+ 'unordered', True).next().get_bytes_as('fulltext'))
+
+ def test_fetch_from_stacked_smart(self):
+ self.setup_smart_server_with_call_log()
+ self.test_fetch_from_stacked()
+
+ def test_fetch_from_stacked_smart_old(self):
+ self.setup_smart_server_with_call_log()
+ self.disable_verb('Repository.get_stream_1.19')
+ self.test_fetch_from_stacked()
+
+ def test_fetch_from_stacked(self):
+ """Fetch from a stacked branch succeeds."""
+ if not self.repository_format.supports_external_lookups:
+ raise TestNotApplicable("Need stacking support in the source.")
+ builder = self.make_branch_builder('full-branch')
+ builder.start_series()
+ builder.build_snapshot('first', None, [
+ ('add', ('', 'root-id', 'directory', '')),
+ ('add', ('file', 'file-id', 'file', 'content\n'))])
+ builder.build_snapshot('second', ['first'], [
+ ('modify', ('file-id', 'second content\n'))])
+ builder.build_snapshot('third', ['second'], [
+ ('modify', ('file-id', 'third content\n'))])
+ builder.finish_series()
+ branch = builder.get_branch()
+ repo = self.make_repository('stacking-base')
+ trunk = repo.bzrdir.create_branch()
+ trunk.repository.fetch(branch.repository, 'second')
+ repo = self.make_repository('stacked')
+ stacked_branch = repo.bzrdir.create_branch()
+ stacked_branch.set_stacked_on_url(trunk.base)
+ stacked_branch.repository.fetch(branch.repository, 'third')
+ target = self.make_to_repository('target')
+ target.fetch(stacked_branch.repository, 'third')
+ target.lock_read()
+ self.addCleanup(target.unlock)
+ all_revs = set(['first', 'second', 'third'])
+ self.assertEqual(all_revs, set(target.get_parent_map(all_revs)))
+
+ def test_fetch_parent_inventories_at_stacking_boundary_smart(self):
+ self.setup_smart_server_with_call_log()
+ self.test_fetch_parent_inventories_at_stacking_boundary()
+
+ def test_fetch_parent_inventories_at_stacking_boundary_smart_old(self):
+ self.setup_smart_server_with_call_log()
+ self.disable_verb('Repository.insert_stream_1.19')
+ try:
+ self.test_fetch_parent_inventories_at_stacking_boundary()
+ except errors.ConnectionReset:
+ self.knownFailure("Random spurious failure, see bug 874153")
+
+ def test_fetch_parent_inventories_at_stacking_boundary(self):
+ """Fetch to a stacked branch copies inventories for parents of
+ revisions at the stacking boundary.
+
+ This is necessary so that the server is able to determine the file-ids
+ altered by all revisions it contains, which means that it needs both
+ the inventory for any revision it has, and the inventories of all that
+ revision's parents.
+
+ However, we should also skip any revisions which are ghosts in the
+ parents.
+ """
+ if not self.repository_format_to.supports_external_lookups:
+ raise TestNotApplicable("Need stacking support in the target.")
+ builder = self.make_branch_builder('branch')
+ builder.start_series()
+ builder.build_snapshot('base', None, [
+ ('add', ('', 'root-id', 'directory', '')),
+ ('add', ('file', 'file-id', 'file', 'content\n'))])
+ builder.build_snapshot('left', ['base'], [
+ ('modify', ('file-id', 'left content\n'))])
+ builder.build_snapshot('right', ['base'], [
+ ('modify', ('file-id', 'right content\n'))])
+ builder.build_snapshot('merge', ['left', 'right'], [
+ ('modify', ('file-id', 'left and right content\n'))])
+ builder.finish_series()
+ branch = builder.get_branch()
+ repo = self.make_to_repository('trunk')
+ trunk = repo.bzrdir.create_branch()
+ trunk.repository.fetch(branch.repository, 'left')
+ trunk.repository.fetch(branch.repository, 'right')
+ repo = self.make_to_repository('stacked')
+ stacked_branch = repo.bzrdir.create_branch()
+ stacked_branch.set_stacked_on_url(trunk.base)
+ stacked_branch.repository.fetch(branch.repository, 'merge')
+ unstacked_repo = stacked_branch.bzrdir.open_repository()
+ unstacked_repo.lock_read()
+ self.addCleanup(unstacked_repo.unlock)
+ self.assertFalse(unstacked_repo.has_revision('left'))
+ self.assertFalse(unstacked_repo.has_revision('right'))
+ self.assertEqual(
+ set([('left',), ('right',), ('merge',)]),
+ unstacked_repo.inventories.keys())
+ # And the basis inventories have been copied correctly
+ trunk.lock_read()
+ self.addCleanup(trunk.unlock)
+ left_tree, right_tree = trunk.repository.revision_trees(
+ ['left', 'right'])
+ stacked_branch.lock_read()
+ self.addCleanup(stacked_branch.unlock)
+ (stacked_left_tree,
+ stacked_right_tree) = stacked_branch.repository.revision_trees(
+ ['left', 'right'])
+ self.assertEqual(
+ left_tree.root_inventory, stacked_left_tree.root_inventory)
+ self.assertEqual(
+ right_tree.root_inventory, stacked_right_tree.root_inventory)
+
+ # Finally, it's not enough to see that the basis inventories are
+ # present. The texts introduced in merge (and only those) should be
+ # present, and also generating a stream should succeed without blowing
+ # up.
+ self.assertTrue(unstacked_repo.has_revision('merge'))
+ expected_texts = set([('file-id', 'merge')])
+ if stacked_branch.repository.texts.get_parent_map([('root-id',
+ 'merge')]):
+ # If a (root-id,merge) text exists, it should be in the stacked
+ # repo.
+ expected_texts.add(('root-id', 'merge'))
+ self.assertEqual(expected_texts, unstacked_repo.texts.keys())
+ self.assertCanStreamRevision(unstacked_repo, 'merge')
+
+ def assertCanStreamRevision(self, repo, revision_id):
+ exclude_keys = set(repo.all_revision_ids()) - set([revision_id])
+ search = SearchResult([revision_id], exclude_keys, 1, [revision_id])
+ source = repo._get_source(repo._format)
+ for substream_kind, substream in source.get_stream(search):
+ # Consume the substream
+ list(substream)
+
+ def test_fetch_across_stacking_boundary_ignores_ghost(self):
+ if not self.repository_format_to.supports_external_lookups:
+ raise TestNotApplicable("Need stacking support in the target.")
+ to_repo = self.make_to_repository('to')
+ builder = self.make_branch_builder('branch')
+ builder.start_series()
+ builder.build_snapshot('base', None, [
+ ('add', ('', 'root-id', 'directory', '')),
+ ('add', ('file', 'file-id', 'file', 'content\n'))])
+ builder.build_snapshot('second', ['base'], [
+ ('modify', ('file-id', 'second content\n'))])
+ builder.build_snapshot('third', ['second', 'ghost'], [
+ ('modify', ('file-id', 'third content\n'))])
+ builder.finish_series()
+ branch = builder.get_branch()
+ repo = self.make_to_repository('trunk')
+ trunk = repo.bzrdir.create_branch()
+ trunk.repository.fetch(branch.repository, 'second')
+ repo = self.make_to_repository('stacked')
+ stacked_branch = repo.bzrdir.create_branch()
+ stacked_branch.set_stacked_on_url(trunk.base)
+ stacked_branch.repository.fetch(branch.repository, 'third')
+ unstacked_repo = stacked_branch.bzrdir.open_repository()
+ unstacked_repo.lock_read()
+ self.addCleanup(unstacked_repo.unlock)
+ self.assertFalse(unstacked_repo.has_revision('second'))
+ self.assertFalse(unstacked_repo.has_revision('ghost'))
+ self.assertEqual(
+ set([('second',), ('third',)]),
+ unstacked_repo.inventories.keys())
+ # And the basis inventories have been copied correctly
+ trunk.lock_read()
+ self.addCleanup(trunk.unlock)
+ second_tree = trunk.repository.revision_tree('second')
+ stacked_branch.lock_read()
+ self.addCleanup(stacked_branch.unlock)
+ stacked_second_tree = stacked_branch.repository.revision_tree('second')
+ self.assertEqual(second_tree, stacked_second_tree)
+ # Finally, it's not enough to see that the basis inventories are
+ # present. The texts introduced in merge (and only those) should be
+ # present, and also generating a stream should succeed without blowing
+ # up.
+ self.assertTrue(unstacked_repo.has_revision('third'))
+ expected_texts = set([('file-id', 'third')])
+ if stacked_branch.repository.texts.get_parent_map([('root-id',
+ 'third')]):
+ # If a (root-id,third) text exists, it should be in the stacked
+ # repo.
+ expected_texts.add(('root-id', 'third'))
+ self.assertEqual(expected_texts, unstacked_repo.texts.keys())
+ self.assertCanStreamRevision(unstacked_repo, 'third')
+
+ def test_fetch_from_stacked_to_stacked_copies_parent_inventories(self):
+ """Fetch from a stacked branch copies inventories for parents of
+ revisions at the stacking boundary.
+
+ Specifically, fetch will copy the parent inventories from the
+ source for which the corresponding revisions are not present. This
+ will happen even when the source repository has no fallbacks configured
+ (as is the case during upgrade).
+ """
+ if not self.repository_format.supports_external_lookups:
+ raise TestNotApplicable("Need stacking support in the source.")
+ if not self.repository_format_to.supports_external_lookups:
+ raise TestNotApplicable("Need stacking support in the target.")
+ builder = self.make_branch_builder('branch')
+ builder.start_series()
+ builder.build_snapshot('base', None, [
+ ('add', ('', 'root-id', 'directory', '')),
+ ('add', ('file', 'file-id', 'file', 'content\n'))])
+ builder.build_snapshot('left', ['base'], [
+ ('modify', ('file-id', 'left content\n'))])
+ builder.build_snapshot('right', ['base'], [
+ ('modify', ('file-id', 'right content\n'))])
+ builder.build_snapshot('merge', ['left', 'right'], [
+ ('modify', ('file-id', 'left and right content\n'))])
+ builder.finish_series()
+ branch = builder.get_branch()
+ repo = self.make_repository('old-trunk')
+ # Make a pair of equivalent trunk repos in the from and to formats.
+ old_trunk = repo.bzrdir.create_branch()
+ old_trunk.repository.fetch(branch.repository, 'left')
+ old_trunk.repository.fetch(branch.repository, 'right')
+ repo = self.make_to_repository('new-trunk')
+ new_trunk = repo.bzrdir.create_branch()
+ new_trunk.repository.fetch(branch.repository, 'left')
+ new_trunk.repository.fetch(branch.repository, 'right')
+ # Make the source; a repo stacked on old_trunk contained just the data
+ # for 'merge'.
+ repo = self.make_repository('old-stacked')
+ old_stacked_branch = repo.bzrdir.create_branch()
+ old_stacked_branch.set_stacked_on_url(old_trunk.base)
+ old_stacked_branch.repository.fetch(branch.repository, 'merge')
+ # Make the target, a repo stacked on new_trunk.
+ repo = self.make_to_repository('new-stacked')
+ new_stacked_branch = repo.bzrdir.create_branch()
+ new_stacked_branch.set_stacked_on_url(new_trunk.base)
+ old_unstacked_repo = old_stacked_branch.bzrdir.open_repository()
+ new_unstacked_repo = new_stacked_branch.bzrdir.open_repository()
+ # Reopen the source and target repos without any fallbacks, and fetch
+ # 'merge'.
+ new_unstacked_repo.fetch(old_unstacked_repo, 'merge')
+ # Now check the results. new_unstacked_repo should contain all the
+ # data necessary to stream 'merge' (i.e. the parent inventories).
+ new_unstacked_repo.lock_read()
+ self.addCleanup(new_unstacked_repo.unlock)
+ self.assertFalse(new_unstacked_repo.has_revision('left'))
+ self.assertFalse(new_unstacked_repo.has_revision('right'))
+ self.assertEqual(
+ set([('left',), ('right',), ('merge',)]),
+ new_unstacked_repo.inventories.keys())
+ # And the basis inventories have been copied correctly
+ new_trunk.lock_read()
+ self.addCleanup(new_trunk.unlock)
+ left_tree, right_tree = new_trunk.repository.revision_trees(
+ ['left', 'right'])
+ new_stacked_branch.lock_read()
+ self.addCleanup(new_stacked_branch.unlock)
+ (stacked_left_tree,
+ stacked_right_tree) = new_stacked_branch.repository.revision_trees(
+ ['left', 'right'])
+ self.assertEqual(left_tree, stacked_left_tree)
+ self.assertEqual(right_tree, stacked_right_tree)
+ # Finally, it's not enough to see that the basis inventories are
+ # present. The texts introduced in merge (and only those) should be
+ # present, and also generating a stream should succeed without blowing
+ # up.
+ self.assertTrue(new_unstacked_repo.has_revision('merge'))
+ expected_texts = set([('file-id', 'merge')])
+ if new_stacked_branch.repository.texts.get_parent_map([('root-id',
+ 'merge')]):
+ # If a (root-id,merge) text exists, it should be in the stacked
+ # repo.
+ expected_texts.add(('root-id', 'merge'))
+ self.assertEqual(expected_texts, new_unstacked_repo.texts.keys())
+ self.assertCanStreamRevision(new_unstacked_repo, 'merge')
+
+ def test_fetch_missing_basis_text(self):
+ """If fetching a delta, we should die if a basis is not present."""
+ tree = self.make_branch_and_tree('tree')
+ self.build_tree(['tree/a'])
+ tree.add(['a'], ['a-id'])
+ tree.commit('one', rev_id='rev-one')
+ self.build_tree_contents([('tree/a', 'new contents\n')])
+ tree.commit('two', rev_id='rev-two')
+
+ to_repo = self.make_to_repository('to_repo')
+ # We build a broken revision so that we can test the fetch code dies
+ # properly. So copy the inventory and revision, but not the text.
+ to_repo.lock_write()
+ try:
+ to_repo.start_write_group()
+ try:
+ inv = tree.branch.repository.get_inventory('rev-one')
+ to_repo.add_inventory('rev-one', inv, [])
+ rev = tree.branch.repository.get_revision('rev-one')
+ to_repo.add_revision('rev-one', rev, inv=inv)
+ self.disable_commit_write_group_paranoia(to_repo)
+ to_repo.commit_write_group()
+ except:
+ to_repo.abort_write_group(suppress_errors=True)
+ raise
+ finally:
+ to_repo.unlock()
+
+ # Implementations can either ensure that the target of the delta is
+ # reconstructable, or raise an exception (which stream based copies
+ # generally do).
+ try:
+ to_repo.fetch(tree.branch.repository, 'rev-two')
+ except (errors.BzrCheckError, errors.RevisionNotPresent), e:
+ # If an exception is raised, the revision should not be in the
+ # target.
+ #
+ # Can also just raise a generic check errors; stream insertion
+ # does this to include all the missing data
+ self.assertRaises((errors.NoSuchRevision, errors.RevisionNotPresent),
+ to_repo.revision_tree, 'rev-two')
+ else:
+ # If not exception is raised, then the text should be
+ # available.
+ to_repo.lock_read()
+ try:
+ rt = to_repo.revision_tree('rev-two')
+ self.assertEqual('new contents\n',
+ rt.get_file_text('a-id'))
+ finally:
+ to_repo.unlock()
+
+ def test_fetch_missing_revision_same_location_fails(self):
+ repo_a = self.make_repository('.')
+ repo_b = repository.Repository.open('.')
+ self.assertRaises(errors.NoSuchRevision,
+ repo_b.fetch, repo_a, revision_id='XXX')
+
+ def test_fetch_same_location_trivial_works(self):
+ repo_a = self.make_repository('.')
+ repo_b = repository.Repository.open('.')
+ repo_a.fetch(repo_b)
+
+ def test_fetch_missing_text_other_location_fails(self):
+ source_tree = self.make_branch_and_tree('source')
+ source = source_tree.branch.repository
+ target = self.make_to_repository('target')
+
+ # start by adding a file so the data knit for the file exists in
+ # repositories that have specific files for each fileid.
+ self.build_tree(['source/id'])
+ source_tree.add(['id'], ['id'])
+ source_tree.commit('a', rev_id='a')
+ # now we manually insert a revision with an inventory referencing
+ # file 'id' at revision 'b', but we do not insert revision b.
+ # this should ensure that the new versions of files are being checked
+ # for during pull operations
+ inv = source.get_inventory('a')
+ source.lock_write()
+ self.addCleanup(source.unlock)
+ source.start_write_group()
+ inv['id'].revision = 'b'
+ inv.revision_id = 'b'
+ sha1 = source.add_inventory('b', inv, ['a'])
+ rev = Revision(timestamp=0,
+ timezone=None,
+ committer="Foo Bar <foo@example.com>",
+ message="Message",
+ inventory_sha1=sha1,
+ revision_id='b')
+ rev.parent_ids = ['a']
+ source.add_revision('b', rev)
+ self.disable_commit_write_group_paranoia(source)
+ source.commit_write_group()
+ self.assertRaises(errors.RevisionNotPresent, target.fetch, source)
+ self.assertFalse(target.has_revision('b'))
+
+ def test_fetch_funky_file_id(self):
+ from_tree = self.make_branch_and_tree('tree')
+ if sys.platform == 'win32':
+ from_repo = from_tree.branch.repository
+ check_repo_format_for_funky_id_on_win32(from_repo)
+ self.build_tree(['tree/filename'])
+ from_tree.add('filename', 'funky-chars<>%&;"\'')
+ from_tree.commit('commit filename')
+ to_repo = self.make_to_repository('to')
+ to_repo.fetch(from_tree.branch.repository, from_tree.get_parent_ids()[0])
+
+ def test_fetch_revision_hash(self):
+ """Ensure that inventory hashes are updated by fetch"""
+ from_tree = self.make_branch_and_tree('tree')
+ from_tree.commit('foo', rev_id='foo-id')
+ to_repo = self.make_to_repository('to')
+ to_repo.fetch(from_tree.branch.repository)
+ recorded_inv_sha1 = to_repo.get_revision('foo-id').inventory_sha1
+ to_repo.lock_read()
+ self.addCleanup(to_repo.unlock)
+ stream = to_repo.inventories.get_record_stream([('foo-id',)],
+ 'unordered', True)
+ bytes = stream.next().get_bytes_as('fulltext')
+ computed_inv_sha1 = osutils.sha_string(bytes)
+ self.assertEqual(computed_inv_sha1, recorded_inv_sha1)
+
+
+class TestFetchDependentData(TestCaseWithInterRepository):
+
+ def test_reference(self):
+ from_tree = self.make_branch_and_tree('tree')
+ to_repo = self.make_to_repository('to')
+ if (not from_tree.supports_tree_reference() or
+ not from_tree.branch.repository._format.supports_tree_reference or
+ not to_repo._format.supports_tree_reference):
+ raise TestNotApplicable("Need subtree support.")
+ subtree = self.make_branch_and_tree('tree/subtree')
+ subtree.commit('subrev 1')
+ from_tree.add_reference(subtree)
+ tree_rev = from_tree.commit('foo')
+ # now from_tree has a last-modified of subtree of the rev id of the
+ # commit for foo, and a reference revision of the rev id of the commit
+ # for subrev 1
+ to_repo.fetch(from_tree.branch.repository, tree_rev)
+ # to_repo should have a file_graph for from_tree.path2id('subtree') and
+ # revid tree_rev.
+ file_id = from_tree.path2id('subtree')
+ to_repo.lock_read()
+ try:
+ self.assertEqual({(file_id, tree_rev):()},
+ to_repo.texts.get_parent_map([(file_id, tree_rev)]))
+ finally:
+ to_repo.unlock()
diff --git a/bzrlib/tests/per_interrepository/test_interrepository.py b/bzrlib/tests/per_interrepository/test_interrepository.py
new file mode 100644
index 0000000..ec466d1
--- /dev/null
+++ b/bzrlib/tests/per_interrepository/test_interrepository.py
@@ -0,0 +1,205 @@
+# Copyright (C) 2006-2009, 2011 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""Tests for InterRepository implementastions."""
+
+import sys
+
+import bzrlib
+import bzrlib.errors as errors
+import bzrlib.gpg
+from bzrlib.inventory import Inventory
+from bzrlib.revision import NULL_REVISION
+from bzrlib.tests import (
+ TestNotApplicable,
+ TestSkipped,
+ )
+from bzrlib.tests.matchers import MatchesAncestry
+from bzrlib.tests.per_interrepository import (
+ TestCaseWithInterRepository,
+ )
+
+
+def check_repo_format_for_funky_id_on_win32(repo):
+ if not repo._format.supports_funky_characters and sys.platform == 'win32':
+ raise TestSkipped("funky chars not allowed on this platform in repository"
+ " %s" % repo.__class__.__name__)
+
+
+class TestInterRepository(TestCaseWithInterRepository):
+
+ def test_interrepository_get_returns_correct_optimiser(self):
+ # we assume the optimising code paths are triggered
+ # by the type of the repo not the transport - at this point.
+ # we may need to update this test if this changes.
+ #
+ # XXX: This code tests that we get an InterRepository when we try to
+ # convert between the two repositories that it wants to be tested with
+ # -- but that's not necessarily correct. So for now this is disabled.
+ # mbp 20070206
+ ## source_repo = self.make_repository("source")
+ ## target_repo = self.make_to_repository("target")
+ ## interrepo = repository.InterRepository.get(source_repo, target_repo)
+ ## self.assertEqual(self.interrepo_class, interrepo.__class__)
+ pass
+
+
+class TestCaseWithComplexRepository(TestCaseWithInterRepository):
+
+ def setUp(self):
+ super(TestCaseWithComplexRepository, self).setUp()
+ tree_a = self.make_branch_and_tree('a')
+ self.bzrdir = tree_a.branch.bzrdir
+ # add a corrupt inventory 'orphan'
+ tree_a.branch.repository.lock_write()
+ tree_a.branch.repository.start_write_group()
+ inv_file = tree_a.branch.repository.inventories
+ inv_file.add_lines(('orphan',), [], [])
+ tree_a.branch.repository.commit_write_group()
+ tree_a.branch.repository.unlock()
+ # add a real revision 'rev1'
+ tree_a.commit('rev1', rev_id='rev1', allow_pointless=True)
+ # add a real revision 'rev2' based on rev1
+ tree_a.commit('rev2', rev_id='rev2', allow_pointless=True)
+ # and sign 'rev2'
+ tree_a.branch.repository.lock_write()
+ tree_a.branch.repository.start_write_group()
+ tree_a.branch.repository.sign_revision('rev2',
+ bzrlib.gpg.LoopbackGPGStrategy(None))
+ tree_a.branch.repository.commit_write_group()
+ tree_a.branch.repository.unlock()
+
+ def test_search_missing_revision_ids(self):
+ # revision ids in repository A but not B are returned, fake ones
+ # are stripped. (fake meaning no revision object, but an inventory
+ # as some formats keyed off inventory data in the past.)
+ # make a repository to compare against that claims to have rev1
+ repo_b = self.make_to_repository('rev1_only')
+ repo_a = self.bzrdir.open_repository()
+ repo_b.fetch(repo_a, 'rev1')
+ # check the test will be valid
+ self.assertFalse(repo_b.has_revision('rev2'))
+ result = repo_b.search_missing_revision_ids(repo_a)
+ self.assertEqual(set(['rev2']), result.get_keys())
+ self.assertEqual(('search', set(['rev2']), set(['rev1']), 1),
+ result.get_recipe())
+
+ def test_search_missing_revision_ids_absent_requested_raises(self):
+ # Asking for missing revisions with a tip that is itself absent in the
+ # source raises NoSuchRevision.
+ repo_b = self.make_to_repository('target')
+ repo_a = self.bzrdir.open_repository()
+ # No pizza revisions anywhere
+ self.assertFalse(repo_a.has_revision('pizza'))
+ self.assertFalse(repo_b.has_revision('pizza'))
+ # Asking specifically for an absent revision errors.
+ self.assertRaises(errors.NoSuchRevision,
+ repo_b.search_missing_revision_ids, repo_a, revision_ids=['pizza'],
+ find_ghosts=True)
+ self.assertRaises(errors.NoSuchRevision,
+ repo_b.search_missing_revision_ids, repo_a, revision_ids=['pizza'],
+ find_ghosts=False)
+ self.callDeprecated(
+ ['search_missing_revision_ids(revision_id=...) was deprecated in '
+ '2.4. Use revision_ids=[...] instead.'],
+ self.assertRaises, errors.NoSuchRevision,
+ repo_b.search_missing_revision_ids, repo_a, revision_id='pizza',
+ find_ghosts=False)
+
+ def test_search_missing_revision_ids_revision_limited(self):
+ # revision ids in repository A that are not referenced by the
+ # requested revision are not returned.
+ # make a repository to compare against that is empty
+ repo_b = self.make_to_repository('empty')
+ repo_a = self.bzrdir.open_repository()
+ result = repo_b.search_missing_revision_ids(
+ repo_a, revision_ids=['rev1'])
+ self.assertEqual(set(['rev1']), result.get_keys())
+ self.assertEqual(('search', set(['rev1']), set([NULL_REVISION]), 1),
+ result.get_recipe())
+
+ def test_search_missing_revision_ids_limit(self):
+ # The limit= argument makes fetch() limit
+ # the results to the first X topo-sorted revisions.
+ repo_b = self.make_to_repository('rev1_only')
+ repo_a = self.bzrdir.open_repository()
+ # check the test will be valid
+ self.assertFalse(repo_b.has_revision('rev2'))
+ result = repo_b.search_missing_revision_ids(repo_a, limit=1)
+ self.assertEqual(('search', set(['rev1']), set(['null:']), 1),
+ result.get_recipe())
+
+ def test_fetch_fetches_signatures_too(self):
+ from_repo = self.bzrdir.open_repository()
+ from_signature = from_repo.get_signature_text('rev2')
+ to_repo = self.make_to_repository('target')
+ to_repo.fetch(from_repo)
+ to_signature = to_repo.get_signature_text('rev2')
+ self.assertEqual(from_signature, to_signature)
+
+
+class TestCaseWithGhosts(TestCaseWithInterRepository):
+
+ def test_fetch_all_fixes_up_ghost(self):
+ # we want two repositories at this point:
+ # one with a revision that is a ghost in the other
+ # repository.
+ # 'ghost' is present in has_ghost, 'ghost' is absent in 'missing_ghost'.
+ # 'references' is present in both repositories, and 'tip' is present
+ # just in has_ghost.
+ # has_ghost missing_ghost
+ #------------------------------
+ # 'ghost' -
+ # 'references' 'references'
+ # 'tip' -
+ # In this test we fetch 'tip' which should not fetch 'ghost'
+ has_ghost = self.make_repository('has_ghost')
+ missing_ghost = self.make_repository('missing_ghost')
+ if [True, True] != [repo._format.supports_ghosts for repo in
+ (has_ghost, missing_ghost)]:
+ raise TestNotApplicable("Need ghost support.")
+
+ def add_commit(repo, revision_id, parent_ids):
+ repo.lock_write()
+ repo.start_write_group()
+ inv = Inventory(revision_id=revision_id)
+ inv.root.revision = revision_id
+ root_id = inv.root.file_id
+ sha1 = repo.add_inventory(revision_id, inv, parent_ids)
+ repo.texts.add_lines((root_id, revision_id), [], [])
+ rev = bzrlib.revision.Revision(timestamp=0,
+ timezone=None,
+ committer="Foo Bar <foo@example.com>",
+ message="Message",
+ inventory_sha1=sha1,
+ revision_id=revision_id)
+ rev.parent_ids = parent_ids
+ repo.add_revision(revision_id, rev)
+ repo.commit_write_group()
+ repo.unlock()
+ add_commit(has_ghost, 'ghost', [])
+ add_commit(has_ghost, 'references', ['ghost'])
+ add_commit(missing_ghost, 'references', ['ghost'])
+ add_commit(has_ghost, 'tip', ['references'])
+ missing_ghost.fetch(has_ghost, 'tip', find_ghosts=True)
+ # missing ghost now has tip and ghost.
+ rev = missing_ghost.get_revision('tip')
+ inv = missing_ghost.get_inventory('tip')
+ rev = missing_ghost.get_revision('ghost')
+ inv = missing_ghost.get_inventory('ghost')
+ # rev must not be corrupt now
+ self.assertThat(['ghost', 'references', 'tip'],
+ MatchesAncestry(missing_ghost, 'tip'))
diff --git a/bzrlib/tests/per_intertree/__init__.py b/bzrlib/tests/per_intertree/__init__.py
new file mode 100644
index 0000000..012529a
--- /dev/null
+++ b/bzrlib/tests/per_intertree/__init__.py
@@ -0,0 +1,184 @@
+# Copyright (C) 2006-2010 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+
+"""InterTree implementation tests for bzr.
+
+These test the conformance of all the InterTree variations to the expected API.
+Specific tests for individual variations are in other places such as:
+ - tests/test_workingtree.py
+"""
+
+import bzrlib
+from bzrlib import (
+ revisiontree,
+ tests,
+ )
+from bzrlib.transform import TransformPreview
+from bzrlib.tests import (
+ default_transport,
+ multiply_tests,
+ )
+from bzrlib.tests.per_tree import (
+ return_parameter,
+ revision_tree_from_workingtree,
+ TestCaseWithTree,
+ )
+from bzrlib.tree import InterTree
+from bzrlib.workingtree_3 import WorkingTreeFormat3
+from bzrlib.workingtree_4 import WorkingTreeFormat4
+
+
+def return_provided_trees(test_case, source, target):
+ """Return the source and target tree unaltered."""
+ return source, target
+
+
+class TestCaseWithTwoTrees(TestCaseWithTree):
+
+ def not_applicable_if_cannot_represent_unversioned(self, tree):
+ if isinstance(tree, revisiontree.RevisionTree):
+ # The locked test trees conversion could not preserve the
+ # unversioned file status. This is normal (e.g. InterDirstateTree
+ # falls back to InterTree if the basis is not a
+ # DirstateRevisionTree, and revision trees cannot have unversioned
+ # files.
+ raise tests.TestNotApplicable('cannot represent unversioned files')
+
+ def not_applicable_if_missing_in(self, relpath, tree):
+ if not tree.path2id(relpath):
+ # The locked test trees conversion could not preserve the missing
+ # file status. This is normal (e.g. InterDirstateTree falls back
+ # to InterTree if the basis is not a DirstateRevisionTree, and
+ # revision trees cannot have missing files.
+ raise tests.TestNotApplicable('cannot represent missing files')
+
+ def make_to_branch_and_tree(self, relpath):
+ """Make a to_workingtree_format branch and tree."""
+ made_control = self.make_bzrdir(relpath,
+ format=self.workingtree_format_to._matchingbzrdir)
+ made_control.create_repository()
+ made_control.create_branch()
+ return self.workingtree_format_to.initialize(made_control)
+
+
+def make_scenarios(transport_server, transport_readonly_server, formats):
+ """Transform the input formats to a list of scenarios.
+
+ :param formats: A list of tuples:.
+ (intertree_class,
+ workingtree_format,
+ workingtree_format_to,
+ mutable_trees_to_test_trees)
+ """
+ result = []
+ for (label, intertree_class,
+ workingtree_format,
+ workingtree_format_to,
+ mutable_trees_to_test_trees) in formats:
+ scenario = (label, {
+ "transport_server": transport_server,
+ "transport_readonly_server": transport_readonly_server,
+ "bzrdir_format":workingtree_format._matchingbzrdir,
+ "workingtree_format":workingtree_format,
+ "intertree_class":intertree_class,
+ "workingtree_format_to":workingtree_format_to,
+ # mutable_trees_to_test_trees takes two trees and converts them to,
+ # whatever relationship the optimiser under test requires.,
+ "mutable_trees_to_test_trees":mutable_trees_to_test_trees,
+ # workingtree_to_test_tree is set to disable changing individual,
+ # trees: instead the mutable_trees_to_test_trees helper is used.,
+ "_workingtree_to_test_tree": return_parameter,
+ })
+ result.append(scenario)
+ return result
+
+
+def mutable_trees_to_preview_trees(test_case, source, target):
+ preview = TransformPreview(target)
+ test_case.addCleanup(preview.finalize)
+ return source, preview.get_preview_tree()
+
+def mutable_trees_to_revision_trees(test_case, source, target):
+ """Convert both trees to repository based revision trees."""
+ return (revision_tree_from_workingtree(test_case, source),
+ revision_tree_from_workingtree(test_case, target))
+
+
+def load_tests(standard_tests, module, loader):
+ default_tree_format = WorkingTreeFormat3()
+ submod_tests = loader.loadTestsFromModuleNames([
+ 'bzrlib.tests.per_intertree.test_compare',
+ 'bzrlib.tests.per_intertree.test_file_content_matches',
+ ])
+ test_intertree_permutations = [
+ # test InterTree with two default-format working trees.
+ (InterTree.__name__, InterTree,
+ default_tree_format, default_tree_format,
+ return_provided_trees)]
+ for optimiser in InterTree._optimisers:
+ if optimiser is revisiontree.InterCHKRevisionTree:
+ # XXX: we shouldn't use an Intertree object to detect inventories
+ # -- vila 20090311
+ chk_tree_format = WorkingTreeFormat4()
+ chk_tree_format._get_matchingbzrdir = \
+ lambda:bzrlib.controldir.format_registry.make_bzrdir('2a')
+ test_intertree_permutations.append(
+ (InterTree.__name__ + "(CHKInventory)",
+ InterTree,
+ chk_tree_format,
+ chk_tree_format,
+ mutable_trees_to_revision_trees))
+ elif optimiser is bzrlib.workingtree_4.InterDirStateTree:
+ # Its a little ugly to be conditional here, but less so than having
+ # the optimiser listed twice.
+ # Add once, compiled version
+ test_intertree_permutations.append(
+ (optimiser.__name__ + "(C)",
+ optimiser,
+ optimiser._matching_from_tree_format,
+ optimiser._matching_to_tree_format,
+ optimiser.make_source_parent_tree_compiled_dirstate))
+ # python version
+ test_intertree_permutations.append(
+ (optimiser.__name__ + "(PY)",
+ optimiser,
+ optimiser._matching_from_tree_format,
+ optimiser._matching_to_tree_format,
+ optimiser.make_source_parent_tree_python_dirstate))
+ elif (optimiser._matching_from_tree_format is not None and
+ optimiser._matching_to_tree_format is not None):
+ test_intertree_permutations.append(
+ (optimiser.__name__,
+ optimiser,
+ optimiser._matching_from_tree_format,
+ optimiser._matching_to_tree_format,
+ optimiser._test_mutable_trees_to_test_trees))
+ # PreviewTree does not have an InterTree optimiser class.
+ test_intertree_permutations.append(
+ (InterTree.__name__ + "(PreviewTree)",
+ InterTree,
+ default_tree_format,
+ default_tree_format,
+ mutable_trees_to_preview_trees))
+ scenarios = make_scenarios(
+ default_transport,
+ # None here will cause a readonly decorator to be created
+ # by the TestCaseWithTransport.get_readonly_transport method.
+ None,
+ test_intertree_permutations)
+ # add the tests for the sub modules to the standard tests.
+ return multiply_tests(submod_tests, scenarios, standard_tests)
diff --git a/bzrlib/tests/per_intertree/test_compare.py b/bzrlib/tests/per_intertree/test_compare.py
new file mode 100644
index 0000000..76bc131
--- /dev/null
+++ b/bzrlib/tests/per_intertree/test_compare.py
@@ -0,0 +1,1846 @@
+# Copyright (C) 2006, 2007 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""Tests for the InterTree.compare() function."""
+
+import os
+import shutil
+
+from bzrlib import (
+ errors,
+ mutabletree,
+ tests,
+ )
+from bzrlib.osutils import has_symlinks
+from bzrlib.tests.per_intertree import TestCaseWithTwoTrees
+from bzrlib.tests import (
+ features,
+ )
+
+# TODO: test the include_root option.
+# TODO: test that renaming a directory x->y does not emit a rename for the
+# child x/a->y/a.
+# TODO: test that renaming a directory x-> does not emit a rename for the child
+# x/a -> y/a when a supplied_files argument gives either 'x/' or 'y/a'
+# -> that is, when the renamed parent is not processed by the function.
+# TODO: test items are only emitted once when a specific_files list names a dir
+# whose parent is now a child.
+# TODO: test comparisons between trees with different root ids. mbp 20070301
+#
+# TODO: More comparisons between trees with subtrees in different states.
+#
+# TODO: Many tests start out by setting the tree roots ids the same, maybe
+# that should just be the default for these tests, by changing
+# make_branch_and_tree. mbp 20070307
+
+class TestCompare(TestCaseWithTwoTrees):
+
+ def test_compare_empty_trees(self):
+ tree1 = self.make_branch_and_tree('1')
+ tree2 = self.make_to_branch_and_tree('2')
+ tree2.set_root_id(tree1.get_root_id())
+ tree1 = self.get_tree_no_parents_no_content(tree1)
+ tree2 = self.get_tree_no_parents_no_content(tree2)
+ tree1, tree2 = self.mutable_trees_to_test_trees(self, tree1, tree2)
+ d = self.intertree_class(tree1, tree2).compare()
+ self.assertEqual([], d.added)
+ self.assertEqual([], d.modified)
+ self.assertEqual([], d.removed)
+ self.assertEqual([], d.renamed)
+ self.assertEqual([], d.unchanged)
+
+ def test_empty_to_abc_content(self):
+ tree1 = self.make_branch_and_tree('1')
+ tree2 = self.make_to_branch_and_tree('2')
+ tree2.set_root_id(tree1.get_root_id())
+ tree1 = self.get_tree_no_parents_no_content(tree1)
+ tree2 = self.get_tree_no_parents_abc_content(tree2)
+ tree1, tree2 = self.mutable_trees_to_test_trees(self, tree1, tree2)
+ d = self.intertree_class(tree1, tree2).compare()
+ self.assertEqual([('a', 'a-id', 'file'),
+ ('b', 'b-id', 'directory'),
+ ('b/c', 'c-id', 'file'),
+ ], d.added)
+ self.assertEqual([], d.modified)
+ self.assertEqual([], d.removed)
+ self.assertEqual([], d.renamed)
+ self.assertEqual([], d.unchanged)
+
+ def test_dangling(self):
+ # This test depends on the ability for some trees to have a difference
+ # between a 'versioned present' and 'versioned not present' (aka
+ # dangling) file. In this test there are two trees each with a separate
+ # dangling file, and the dangling files should be considered absent for
+ # the test.
+ tree1 = self.make_branch_and_tree('1')
+ tree2 = self.make_to_branch_and_tree('2')
+ tree2.set_root_id(tree1.get_root_id())
+ self.build_tree(['2/a'])
+ tree2.add('a')
+ os.unlink('2/a')
+ self.build_tree(['1/b'])
+ tree1.add('b')
+ os.unlink('1/b')
+ # the conversion to test trees here will leave the trees intact for the
+ # default intertree, but may perform a commit for other tree types,
+ # which may reduce the validity of the test. XXX: Think about how to
+ # address this.
+ tree1, tree2 = self.mutable_trees_to_test_trees(self, tree1, tree2)
+ d = self.intertree_class(tree1, tree2).compare()
+ self.assertEqual([], d.added)
+ self.assertEqual([], d.modified)
+ self.assertEqual([], d.removed)
+ self.assertEqual([], d.renamed)
+ self.assertEqual([], d.unchanged)
+
+ def test_abc_content_to_empty(self):
+ tree1 = self.make_branch_and_tree('1')
+ tree2 = self.make_to_branch_and_tree('2')
+ tree2.set_root_id(tree1.get_root_id())
+ tree1 = self.get_tree_no_parents_abc_content(tree1)
+ tree2 = self.get_tree_no_parents_no_content(tree2)
+ tree1, tree2 = self.mutable_trees_to_test_trees(self, tree1, tree2)
+ d = self.intertree_class(tree1, tree2).compare()
+ self.assertEqual([], d.added)
+ self.assertEqual([], d.modified)
+ self.assertEqual([('a', 'a-id', 'file'),
+ ('b', 'b-id', 'directory'),
+ ('b/c', 'c-id', 'file'),
+ ], d.removed)
+ self.assertEqual([], d.renamed)
+ self.assertEqual([], d.unchanged)
+
+ def test_content_modification(self):
+ tree1 = self.make_branch_and_tree('1')
+ tree2 = self.make_to_branch_and_tree('2')
+ tree2.set_root_id(tree1.get_root_id())
+ tree1 = self.get_tree_no_parents_abc_content(tree1)
+ tree2 = self.get_tree_no_parents_abc_content_2(tree2)
+ tree1, tree2 = self.mutable_trees_to_test_trees(self, tree1, tree2)
+ d = self.intertree_class(tree1, tree2).compare()
+ self.assertEqual([], d.added)
+ self.assertEqual([('a', 'a-id', 'file', True, False)], d.modified)
+ self.assertEqual([], d.removed)
+ self.assertEqual([], d.renamed)
+ self.assertEqual([], d.unchanged)
+
+ def test_meta_modification(self):
+ tree1 = self.make_branch_and_tree('1')
+ tree2 = self.make_to_branch_and_tree('2')
+ tree2.set_root_id(tree1.get_root_id())
+ tree1 = self.get_tree_no_parents_abc_content(tree1)
+ tree2 = self.get_tree_no_parents_abc_content_3(tree2)
+ tree1, tree2 = self.mutable_trees_to_test_trees(self, tree1, tree2)
+ d = self.intertree_class(tree1, tree2).compare()
+ self.assertEqual([], d.added)
+ self.assertEqual([('b/c', 'c-id', 'file', False, True)], d.modified)
+ self.assertEqual([], d.removed)
+ self.assertEqual([], d.renamed)
+ self.assertEqual([], d.unchanged)
+
+ def test_file_rename(self):
+ tree1 = self.make_branch_and_tree('1')
+ tree2 = self.make_to_branch_and_tree('2')
+ tree2.set_root_id(tree1.get_root_id())
+ tree1 = self.get_tree_no_parents_abc_content(tree1)
+ tree2 = self.get_tree_no_parents_abc_content_4(tree2)
+ tree1, tree2 = self.mutable_trees_to_test_trees(self, tree1, tree2)
+ d = self.intertree_class(tree1, tree2).compare()
+ self.assertEqual([], d.added)
+ self.assertEqual([], d.modified)
+ self.assertEqual([], d.removed)
+ self.assertEqual([('a', 'd', 'a-id', 'file', False, False)], d.renamed)
+ self.assertEqual([], d.unchanged)
+
+ def test_file_rename_and_modification(self):
+ tree1 = self.make_branch_and_tree('1')
+ tree2 = self.make_to_branch_and_tree('2')
+ tree2.set_root_id(tree1.get_root_id())
+ tree1 = self.get_tree_no_parents_abc_content(tree1)
+ tree2 = self.get_tree_no_parents_abc_content_5(tree2)
+ tree1, tree2 = self.mutable_trees_to_test_trees(self, tree1, tree2)
+ d = self.intertree_class(tree1, tree2).compare()
+ self.assertEqual([], d.added)
+ self.assertEqual([], d.modified)
+ self.assertEqual([], d.removed)
+ self.assertEqual([('a', 'd', 'a-id', 'file', True, False)], d.renamed)
+ self.assertEqual([], d.unchanged)
+
+ def test_file_rename_and_meta_modification(self):
+ tree1 = self.make_branch_and_tree('1')
+ tree2 = self.make_to_branch_and_tree('2')
+ tree2.set_root_id(tree1.get_root_id())
+ tree1 = self.get_tree_no_parents_abc_content(tree1)
+ tree2 = self.get_tree_no_parents_abc_content_6(tree2)
+ tree1, tree2 = self.mutable_trees_to_test_trees(self, tree1, tree2)
+ d = self.intertree_class(tree1, tree2).compare()
+ self.assertEqual([], d.added)
+ self.assertEqual([], d.modified)
+ self.assertEqual([], d.removed)
+ self.assertEqual([('b/c', 'e', 'c-id', 'file', False, True)], d.renamed)
+ self.assertEqual([], d.unchanged)
+
+ def test_empty_to_abc_content_a_only(self):
+ tree1 = self.make_branch_and_tree('1')
+ tree2 = self.make_to_branch_and_tree('2')
+ tree2.set_root_id(tree1.get_root_id())
+ tree1 = self.get_tree_no_parents_no_content(tree1)
+ tree2 = self.get_tree_no_parents_abc_content(tree2)
+ tree1, tree2 = self.mutable_trees_to_test_trees(self, tree1, tree2)
+ d = self.intertree_class(tree1, tree2).compare(specific_files=['a'])
+ self.assertEqual([('a', 'a-id', 'file')], d.added)
+ self.assertEqual([], d.modified)
+ self.assertEqual([], d.removed)
+ self.assertEqual([], d.renamed)
+ self.assertEqual([], d.unchanged)
+
+ def test_empty_to_abc_content_a_and_c_only(self):
+ tree1 = self.make_branch_and_tree('1')
+ tree2 = self.make_to_branch_and_tree('2')
+ tree1 = self.get_tree_no_parents_no_content(tree1)
+ tree2 = self.get_tree_no_parents_abc_content(tree2)
+ tree1, tree2 = self.mutable_trees_to_test_trees(self, tree1, tree2)
+ d = self.intertree_class(tree1, tree2).compare(
+ specific_files=['a', 'b/c'])
+ self.assertEqual(
+ [('a', 'a-id', 'file'), (u'b', 'b-id', 'directory'),
+ ('b/c', 'c-id', 'file')],
+ d.added)
+ self.assertEqual([], d.modified)
+ self.assertEqual([], d.removed)
+ self.assertEqual([], d.renamed)
+ self.assertEqual([], d.unchanged)
+
+ def test_empty_to_abc_content_c_only(self):
+ tree1 = self.make_branch_and_tree('1')
+ tree2 = self.make_to_branch_and_tree('2')
+ tree1 = self.get_tree_no_parents_no_content(tree1)
+ tree2 = self.get_tree_no_parents_abc_content(tree2)
+ tree1, tree2 = self.mutable_trees_to_test_trees(self, tree1, tree2)
+ d = self.intertree_class(tree1, tree2).compare(
+ specific_files=['b/c'])
+ self.assertEqual(
+ [(u'b', 'b-id', 'directory'), ('b/c', 'c-id', 'file')], d.added)
+ self.assertEqual([], d.modified)
+ self.assertEqual([], d.removed)
+ self.assertEqual([], d.renamed)
+ self.assertEqual([], d.unchanged)
+
+ def test_empty_to_abc_content_b_only(self):
+ """Restricting to a dir matches the children of the dir."""
+ tree1 = self.make_branch_and_tree('1')
+ tree2 = self.make_to_branch_and_tree('2')
+ tree1 = self.get_tree_no_parents_no_content(tree1)
+ tree2 = self.get_tree_no_parents_abc_content(tree2)
+ tree1, tree2 = self.mutable_trees_to_test_trees(self, tree1, tree2)
+ d = self.intertree_class(tree1, tree2).compare(specific_files=['b'])
+ self.assertEqual(
+ [('b', 'b-id', 'directory'), ('b/c', 'c-id', 'file')],
+ d.added)
+ self.assertEqual([], d.modified)
+ self.assertEqual([], d.removed)
+ self.assertEqual([], d.renamed)
+ self.assertEqual([], d.unchanged)
+
+ def test_unchanged_with_renames_and_modifications(self):
+ """want_unchanged should generate a list of unchanged entries."""
+ tree1 = self.make_branch_and_tree('1')
+ tree2 = self.make_to_branch_and_tree('2')
+ tree1 = self.get_tree_no_parents_abc_content(tree1)
+ tree2 = self.get_tree_no_parents_abc_content_5(tree2)
+ tree1, tree2 = self.mutable_trees_to_test_trees(self, tree1, tree2)
+ d = self.intertree_class(tree1, tree2).compare(want_unchanged=True)
+ self.assertEqual([], d.added)
+ self.assertEqual([], d.modified)
+ self.assertEqual([], d.removed)
+ self.assertEqual([('a', 'd', 'a-id', 'file', True, False)], d.renamed)
+ self.assertEqual(
+ [(u'b', 'b-id', 'directory'), (u'b/c', 'c-id', 'file')],
+ d.unchanged)
+
+ def test_extra_trees_finds_ids(self):
+ """Ask for a delta between two trees with a path present in a third."""
+ tree1 = self.make_branch_and_tree('1')
+ tree2 = self.make_to_branch_and_tree('2')
+ tree1 = self.get_tree_no_parents_abc_content(tree1)
+ tree2 = self.get_tree_no_parents_abc_content_3(tree2)
+ tree1, tree2 = self.mutable_trees_to_test_trees(self, tree1, tree2)
+ d = self.intertree_class(tree1, tree2).compare(specific_files=['b'])
+ # the type of tree-3 does not matter - it is used as a lookup, not
+ # a dispatch. XXX: For dirstate it does speak to the optimisability of
+ # the lookup, in merged trees it can be fast-pathed. We probably want
+ # two tests: one as is, and one with it as a pending merge.
+ tree3 = self.make_branch_and_tree('3')
+ tree3 = self.get_tree_no_parents_abc_content_6(tree3)
+ tree3.lock_read()
+ self.addCleanup(tree3.unlock)
+ # tree 3 has 'e' which is 'c-id'. Tree 1 has c-id at b/c, and Tree 2
+ # has c-id at b/c with its exec flag toggled.
+ # without extra_trees, we should get no modifications from this
+ # so do one, to be sure the test is valid.
+ d = self.intertree_class(tree1, tree2).compare(
+ specific_files=['e'])
+ self.assertEqual([], d.modified)
+ # now give it an additional lookup:
+ d = self.intertree_class(tree1, tree2).compare(
+ specific_files=['e'], extra_trees=[tree3])
+ self.assertEqual([], d.added)
+ self.assertEqual([('b/c', 'c-id', 'file', False, True)], d.modified)
+ self.assertEqual([], d.removed)
+ self.assertEqual([], d.renamed)
+ self.assertEqual([], d.unchanged)
+
+ def test_require_versioned(self):
+ # this does not quite robustly test, as it is passing in missing paths
+ # rather than present-but-not-versioned paths. At the moment there is
+ # no mechanism for managing the test trees (which are readonly) to
+ # get present-but-not-versioned files for trees that can do that.
+ tree1 = self.make_branch_and_tree('1')
+ tree2 = self.make_to_branch_and_tree('2')
+ tree1 = self.get_tree_no_parents_no_content(tree1)
+ tree2 = self.get_tree_no_parents_abc_content(tree2)
+ tree1, tree2 = self.mutable_trees_to_test_trees(self, tree1, tree2)
+ self.assertRaises(errors.PathsNotVersionedError,
+ self.intertree_class(tree1, tree2).compare,
+ specific_files=['d'],
+ require_versioned=True)
+
+ def test_default_ignores_unversioned_files(self):
+ tree1 = self.make_branch_and_tree('tree1')
+ tree2 = self.make_to_branch_and_tree('tree2')
+ tree2.set_root_id(tree1.get_root_id())
+ self.build_tree(['tree1/a', 'tree1/c',
+ 'tree2/a', 'tree2/b', 'tree2/c'])
+ tree1.add(['a', 'c'], ['a-id', 'c-id'])
+ tree2.add(['a', 'c'], ['a-id', 'c-id'])
+
+ tree1, tree2 = self.mutable_trees_to_test_trees(self, tree1, tree2)
+ d = self.intertree_class(tree1, tree2).compare()
+ self.assertEqual([], d.added)
+ self.assertEqual([(u'a', 'a-id', 'file', True, False),
+ (u'c', 'c-id', 'file', True, False)], d.modified)
+ self.assertEqual([], d.removed)
+ self.assertEqual([], d.renamed)
+ self.assertEqual([], d.unchanged)
+ self.assertEqual([], d.unversioned)
+
+ def test_unversioned_paths_in_tree(self):
+ tree1 = self.make_branch_and_tree('tree1')
+ tree2 = self.make_to_branch_and_tree('tree2')
+ tree2.set_root_id(tree1.get_root_id())
+ self.build_tree(['tree2/file', 'tree2/dir/'])
+ if has_symlinks():
+ os.symlink('target', 'tree2/link')
+ links_supported = True
+ else:
+ links_supported = False
+ tree1, tree2 = self.mutable_trees_to_test_trees(self, tree1, tree2)
+ self.not_applicable_if_cannot_represent_unversioned(tree2)
+ d = self.intertree_class(tree1, tree2).compare(want_unversioned=True)
+ self.assertEqual([], d.added)
+ self.assertEqual([], d.modified)
+ self.assertEqual([], d.removed)
+ self.assertEqual([], d.renamed)
+ self.assertEqual([], d.unchanged)
+ expected_unversioned = [(u'dir', None, 'directory'),
+ (u'file', None, 'file')]
+ if links_supported:
+ expected_unversioned.append((u'link', None, 'symlink'))
+ self.assertEqual(expected_unversioned, d.unversioned)
+
+
+class TestIterChanges(TestCaseWithTwoTrees):
+ """Test the comparison iterator"""
+
+ def assertEqualIterChanges(self, left_changes, right_changes):
+ """Assert that left_changes == right_changes.
+
+ :param left_changes: A list of the output from iter_changes.
+ :param right_changes: A list of the output from iter_changes.
+ """
+ left_changes = sorted(left_changes)
+ right_changes = sorted(right_changes)
+ if left_changes == right_changes:
+ return
+ # setify to get item by item differences, but we can only do this
+ # when all the ids are unique on both sides.
+ left_dict = dict((item[0], item) for item in left_changes)
+ right_dict = dict((item[0], item) for item in right_changes)
+ if (len(left_dict) != len(left_changes) or
+ len(right_dict) != len(right_changes)):
+ # Can't do a direct comparison. We could do a sequence diff, but
+ # for now just do a regular assertEqual for now.
+ self.assertEqual(left_changes, right_changes)
+ keys = set(left_dict).union(set(right_dict))
+ different = []
+ same = []
+ for key in keys:
+ left_item = left_dict.get(key)
+ right_item = right_dict.get(key)
+ if left_item == right_item:
+ same.append(str(left_item))
+ else:
+ different.append(" %s\n %s" % (left_item, right_item))
+ self.fail("iter_changes output different. Unchanged items:\n" +
+ "\n".join(same) + "\nChanged items:\n" + "\n".join(different))
+
+ def do_iter_changes(self, tree1, tree2, **extra_args):
+ """Helper to run iter_changes from tree1 to tree2.
+
+ :param tree1, tree2: The source and target trees. These will be locked
+ automatically.
+ :param **extra_args: Extra args to pass to iter_changes. This is not
+ inspected by this test helper.
+ """
+ tree1.lock_read()
+ tree2.lock_read()
+ try:
+ # sort order of output is not strictly defined
+ return sorted(self.intertree_class(tree1, tree2)
+ .iter_changes(**extra_args))
+ finally:
+ tree1.unlock()
+ tree2.unlock()
+
+ def check_has_changes(self, expected, tree1, tree2):
+ # has_changes is defined for mutable trees only
+ if not isinstance(tree2, mutabletree.MutableTree):
+ if isinstance(tree1, mutabletree.MutableTree):
+ # Let's switch the trees since has_changes() is commutative
+ # (where we can apply it)
+ tree2, tree1 = tree1, tree2
+ else:
+ # Neither tree can be used
+ return
+ tree1.lock_read()
+ try:
+ tree2.lock_read()
+ try:
+ return tree2.has_changes(tree1)
+ finally:
+ tree2.unlock()
+ finally:
+ tree1.unlock()
+
+ def mutable_trees_to_locked_test_trees(self, tree1, tree2):
+ """Convert the working trees into test trees.
+
+ Read lock them, and add the unlock to the cleanup.
+ """
+ tree1, tree2 = self.mutable_trees_to_test_trees(self, tree1, tree2)
+ tree1.lock_read()
+ self.addCleanup(tree1.unlock)
+ tree2.lock_read()
+ self.addCleanup(tree2.unlock)
+ return tree1, tree2
+
+ def make_tree_with_special_names(self):
+ """Create a tree with filenames chosen to exercise the walk order."""
+ tree1 = self.make_branch_and_tree('tree1')
+ tree2 = self.make_to_branch_and_tree('tree2')
+ tree2.set_root_id(tree1.get_root_id())
+ paths, path_ids = self._create_special_names(tree2, 'tree2')
+ tree2.commit('initial', rev_id='rev-1')
+ tree1, tree2 = self.mutable_trees_to_locked_test_trees(tree1, tree2)
+ return (tree1, tree2, paths, path_ids)
+
+ def make_trees_with_special_names(self):
+ """Both trees will use the special names.
+
+ But the contents will differ for each file.
+ """
+ tree1 = self.make_branch_and_tree('tree1')
+ tree2 = self.make_to_branch_and_tree('tree2')
+ tree2.set_root_id(tree1.get_root_id())
+ paths, path_ids = self._create_special_names(tree1, 'tree1')
+ paths, path_ids = self._create_special_names(tree2, 'tree2')
+ tree1, tree2 = self.mutable_trees_to_locked_test_trees(tree1, tree2)
+ return (tree1, tree2, paths, path_ids)
+
+ def _create_special_names(self, tree, base_path):
+ """Create a tree with paths that expose differences in sort orders."""
+ # Each directory will have a single file named 'f' inside
+ dirs = ['a',
+ 'a-a',
+ 'a/a',
+ 'a/a-a',
+ 'a/a/a',
+ 'a/a/a-a',
+ 'a/a/a/a',
+ 'a/a/a/a-a',
+ 'a/a/a/a/a',
+ ]
+ with_slashes = []
+ paths = []
+ path_ids = []
+ for d in dirs:
+ with_slashes.append(base_path + '/' + d + '/')
+ with_slashes.append(base_path + '/' + d + '/f')
+ paths.append(d)
+ paths.append(d+'/f')
+ path_ids.append(d.replace('/', '_') + '-id')
+ path_ids.append(d.replace('/', '_') + '_f-id')
+ self.build_tree(with_slashes)
+ tree.add(paths, path_ids)
+ return paths, path_ids
+
+ def test_compare_empty_trees(self):
+ tree1 = self.make_branch_and_tree('1')
+ tree2 = self.make_to_branch_and_tree('2')
+ tree1 = self.get_tree_no_parents_no_content(tree1)
+ tree2 = self.get_tree_no_parents_no_content(tree2)
+ tree1, tree2 = self.mutable_trees_to_test_trees(self, tree1, tree2)
+ self.assertEqual([], self.do_iter_changes(tree1, tree2))
+ self.check_has_changes(False, tree1, tree2)
+
+ def added(self, tree, file_id):
+ path, entry = self.get_path_entry(tree, file_id)
+ return (file_id, (None, path), True, (False, True), (None, entry.parent_id),
+ (None, entry.name), (None, entry.kind),
+ (None, entry.executable))
+
+ @staticmethod
+ def get_path_entry(tree, file_id):
+ iterator = tree.iter_entries_by_dir(specific_file_ids=[file_id])
+ return iterator.next()
+
+ def content_changed(self, tree, file_id):
+ path, entry = self.get_path_entry(tree, file_id)
+ return (file_id, (path, path), True, (True, True),
+ (entry.parent_id, entry.parent_id),
+ (entry.name, entry.name), (entry.kind, entry.kind),
+ (entry.executable, entry.executable))
+
+ def kind_changed(self, from_tree, to_tree, file_id):
+ from_path, old_entry = self.get_path_entry(from_tree, file_id)
+ path, new_entry = self.get_path_entry(to_tree, file_id)
+ return (file_id, (from_path, path), True, (True, True),
+ (old_entry.parent_id, new_entry.parent_id),
+ (old_entry.name, new_entry.name),
+ (old_entry.kind, new_entry.kind),
+ (old_entry.executable, new_entry.executable))
+
+ def missing(self, file_id, from_path, to_path, parent_id, kind):
+ _, from_basename = os.path.split(from_path)
+ _, to_basename = os.path.split(to_path)
+ # missing files have both paths, but no kind.
+ return (file_id, (from_path, to_path), True, (True, True),
+ (parent_id, parent_id),
+ (from_basename, to_basename), (kind, None), (False, False))
+
+ def deleted(self, tree, file_id):
+ entry = tree.root_inventory[file_id]
+ path = tree.id2path(file_id)
+ return (file_id, (path, None), True, (True, False), (entry.parent_id, None),
+ (entry.name, None), (entry.kind, None),
+ (entry.executable, None))
+
+ def renamed(self, from_tree, to_tree, file_id, content_changed):
+ from_path, from_entry = self.get_path_entry(from_tree, file_id)
+ to_path, to_entry = self.get_path_entry(to_tree, file_id)
+ return (file_id, (from_path, to_path), content_changed, (True, True),
+ (from_entry.parent_id, to_entry.parent_id),
+ (from_entry.name, to_entry.name),
+ (from_entry.kind, to_entry.kind),
+ (from_entry.executable, to_entry.executable))
+
+ def unchanged(self, tree, file_id):
+ path, entry = self.get_path_entry(tree, file_id)
+ parent = entry.parent_id
+ name = entry.name
+ kind = entry.kind
+ executable = entry.executable
+ return (file_id, (path, path), False, (True, True),
+ (parent, parent), (name, name), (kind, kind),
+ (executable, executable))
+
+ def unversioned(self, tree, path):
+ """Create an unversioned result."""
+ _, basename = os.path.split(path)
+ kind = tree._comparison_data(None, path)[0]
+ return (None, (None, path), True, (False, False), (None, None),
+ (None, basename), (None, kind),
+ (None, False))
+
+ def test_empty_to_abc_content(self):
+ tree1 = self.make_branch_and_tree('1')
+ tree2 = self.make_to_branch_and_tree('2')
+ tree1 = self.get_tree_no_parents_no_content(tree1)
+ tree2 = self.get_tree_no_parents_abc_content(tree2)
+ tree1, tree2 = self.mutable_trees_to_locked_test_trees(tree1, tree2)
+ expected_results = sorted([
+ self.added(tree2, 'root-id'),
+ self.added(tree2, 'a-id'),
+ self.added(tree2, 'b-id'),
+ self.added(tree2, 'c-id'),
+ self.deleted(tree1, 'empty-root-id')])
+ self.assertEqual(expected_results, self.do_iter_changes(tree1, tree2))
+ self.check_has_changes(True, tree1, tree2)
+
+ def test_empty_specific_files(self):
+ tree1 = self.make_branch_and_tree('1')
+ tree2 = self.make_to_branch_and_tree('2')
+ tree1 = self.get_tree_no_parents_no_content(tree1)
+ tree2 = self.get_tree_no_parents_abc_content(tree2)
+ tree1, tree2 = self.mutable_trees_to_locked_test_trees(tree1, tree2)
+ self.assertEqual([],
+ self.do_iter_changes(tree1, tree2, specific_files=[]))
+
+ def test_no_specific_files(self):
+ tree1 = self.make_branch_and_tree('1')
+ tree2 = self.make_to_branch_and_tree('2')
+ tree1 = self.get_tree_no_parents_no_content(tree1)
+ tree2 = self.get_tree_no_parents_abc_content(tree2)
+ tree1, tree2 = self.mutable_trees_to_locked_test_trees(tree1, tree2)
+ expected_results = sorted([
+ self.added(tree2, 'root-id'),
+ self.added(tree2, 'a-id'),
+ self.added(tree2, 'b-id'),
+ self.added(tree2, 'c-id'),
+ self.deleted(tree1, 'empty-root-id')])
+ self.assertEqual(expected_results, self.do_iter_changes(tree1, tree2))
+ self.check_has_changes(True, tree1, tree2)
+
+ def test_empty_to_abc_content_a_only(self):
+ tree1 = self.make_branch_and_tree('1')
+ tree2 = self.make_to_branch_and_tree('2')
+ tree1 = self.get_tree_no_parents_no_content(tree1)
+ tree2 = self.get_tree_no_parents_abc_content(tree2)
+ tree1, tree2 = self.mutable_trees_to_locked_test_trees(tree1, tree2)
+ self.assertEqual(
+ sorted([self.added(tree2, 'root-id'),
+ self.added(tree2, 'a-id'),
+ self.deleted(tree1, 'empty-root-id')]),
+ self.do_iter_changes(tree1, tree2, specific_files=['a']))
+
+ def test_abc_content_to_empty_a_only(self):
+ # For deletes we don't need to pickup parents.
+ tree1 = self.make_branch_and_tree('1')
+ tree2 = self.make_to_branch_and_tree('2')
+ tree1 = self.get_tree_no_parents_abc_content(tree1)
+ tree2 = self.get_tree_no_parents_no_content(tree2)
+ tree1, tree2 = self.mutable_trees_to_locked_test_trees(tree1, tree2)
+ self.assertEqual(
+ [self.deleted(tree1, 'a-id')],
+ self.do_iter_changes(tree1, tree2, specific_files=['a']))
+
+ def test_abc_content_to_empty_b_only(self):
+ # When b stops being a directory we have to pick up b/c as well.
+ tree1 = self.make_branch_and_tree('1')
+ tree2 = self.make_to_branch_and_tree('2')
+ tree1 = self.get_tree_no_parents_abc_content(tree1)
+ tree2 = self.get_tree_no_parents_no_content(tree2)
+ tree1, tree2 = self.mutable_trees_to_locked_test_trees(tree1, tree2)
+ self.assertEqual(
+ [self.deleted(tree1, 'b-id'), self.deleted(tree1, 'c-id')],
+ self.do_iter_changes(tree1, tree2, specific_files=['b']))
+
+ def test_empty_to_abc_content_a_and_c_only(self):
+ tree1 = self.make_branch_and_tree('1')
+ tree2 = self.make_to_branch_and_tree('2')
+ tree1 = self.get_tree_no_parents_no_content(tree1)
+ tree2 = self.get_tree_no_parents_abc_content(tree2)
+ tree1, tree2 = self.mutable_trees_to_locked_test_trees(tree1, tree2)
+ expected_result = sorted([self.added(tree2, 'root-id'),
+ self.added(tree2, 'a-id'), self.added(tree2, 'b-id'),
+ self.added(tree2, 'c-id'), self.deleted(tree1, 'empty-root-id')])
+ self.assertEqual(expected_result,
+ self.do_iter_changes(tree1, tree2, specific_files=['a', 'b/c']))
+
+ def test_abc_content_to_empty(self):
+ tree1 = self.make_branch_and_tree('1')
+ tree2 = self.make_to_branch_and_tree('2')
+ tree1 = self.get_tree_no_parents_abc_content(tree1)
+ tree2 = self.get_tree_no_parents_no_content(tree2)
+ tree1, tree2 = self.mutable_trees_to_locked_test_trees(tree1, tree2)
+ expected_results = sorted([
+ self.added(tree2, 'empty-root-id'),
+ self.deleted(tree1, 'root-id'), self.deleted(tree1, 'a-id'),
+ self.deleted(tree1, 'b-id'), self.deleted(tree1, 'c-id')])
+ self.assertEqual(
+ expected_results,
+ self.do_iter_changes(tree1, tree2))
+ self.check_has_changes(True, tree1, tree2)
+
+ def test_content_modification(self):
+ tree1 = self.make_branch_and_tree('1')
+ tree2 = self.make_to_branch_and_tree('2')
+ tree1 = self.get_tree_no_parents_abc_content(tree1)
+ tree2 = self.get_tree_no_parents_abc_content_2(tree2)
+ tree1, tree2 = self.mutable_trees_to_test_trees(self, tree1, tree2)
+ root_id = tree1.path2id('')
+ self.assertEqual([('a-id', ('a', 'a'), True, (True, True),
+ (root_id, root_id), ('a', 'a'),
+ ('file', 'file'), (False, False))],
+ self.do_iter_changes(tree1, tree2))
+ self.check_has_changes(True, tree1, tree2)
+
+ def test_meta_modification(self):
+ tree1 = self.make_branch_and_tree('1')
+ tree2 = self.make_to_branch_and_tree('2')
+ tree1 = self.get_tree_no_parents_abc_content(tree1)
+ tree2 = self.get_tree_no_parents_abc_content_3(tree2)
+ tree1, tree2 = self.mutable_trees_to_test_trees(self, tree1, tree2)
+ self.assertEqual([('c-id', ('b/c', 'b/c'), False, (True, True),
+ ('b-id', 'b-id'), ('c', 'c'), ('file', 'file'),
+ (False, True))],
+ self.do_iter_changes(tree1, tree2))
+
+ def test_empty_dir(self):
+ """an empty dir should not cause glitches to surrounding files."""
+ tree1 = self.make_branch_and_tree('1')
+ tree2 = self.make_to_branch_and_tree('2')
+ tree1 = self.get_tree_no_parents_abc_content(tree1)
+ tree2 = self.get_tree_no_parents_abc_content(tree2)
+ # the pathname is chosen to fall between 'a' and 'b'.
+ self.build_tree(['1/a-empty/', '2/a-empty/'])
+ tree1.add(['a-empty'], ['a-empty'])
+ tree2.add(['a-empty'], ['a-empty'])
+ tree1, tree2 = self.mutable_trees_to_test_trees(self, tree1, tree2)
+ expected = []
+ self.assertEqual(expected, self.do_iter_changes(tree1, tree2))
+
+ def test_file_rename(self):
+ tree1 = self.make_branch_and_tree('1')
+ tree2 = self.make_to_branch_and_tree('2')
+ tree1 = self.get_tree_no_parents_abc_content(tree1)
+ tree2 = self.get_tree_no_parents_abc_content_4(tree2)
+ tree1, tree2 = self.mutable_trees_to_test_trees(self, tree1, tree2)
+ root_id = tree1.path2id('')
+ self.assertEqual([('a-id', ('a', 'd'), False, (True, True),
+ (root_id, root_id), ('a', 'd'), ('file', 'file'),
+ (False, False))],
+ self.do_iter_changes(tree1, tree2))
+
+ def test_file_rename_and_modification(self):
+ tree1 = self.make_branch_and_tree('1')
+ tree2 = self.make_to_branch_and_tree('2')
+ tree1 = self.get_tree_no_parents_abc_content(tree1)
+ tree2 = self.get_tree_no_parents_abc_content_5(tree2)
+ tree1, tree2 = self.mutable_trees_to_test_trees(self, tree1, tree2)
+ root_id = tree1.path2id('')
+ self.assertEqual([('a-id', ('a', 'd'), True, (True, True),
+ (root_id, root_id), ('a', 'd'), ('file', 'file'),
+ (False, False))],
+ self.do_iter_changes(tree1, tree2))
+
+ def test_specific_content_modification_grabs_parents(self):
+ # WHen the only direct change to a specified file is a content change,
+ # and its in a reparented subtree, the parents are grabbed.
+ tree1 = self.make_branch_and_tree('1')
+ tree1.mkdir('changing', 'parent-id')
+ tree1.mkdir('changing/unchanging', 'mid-id')
+ tree1.add(['changing/unchanging/file'], ['file-id'], ['file'])
+ tree1.put_file_bytes_non_atomic('file-id', 'a file')
+ tree2 = self.make_to_branch_and_tree('2')
+ tree2.set_root_id(tree1.get_root_id())
+ tree2.mkdir('changed', 'parent-id')
+ tree2.mkdir('changed/unchanging', 'mid-id')
+ tree2.add(['changed/unchanging/file'], ['file-id'], ['file'])
+ tree2.put_file_bytes_non_atomic('file-id', 'changed content')
+ tree1, tree2 = self.mutable_trees_to_test_trees(self, tree1, tree2)
+ # parent-id has changed, as has file-id
+ root_id = tree1.path2id('')
+ self.assertEqualIterChanges(
+ [self.renamed(tree1, tree2, 'parent-id', False),
+ self.renamed(tree1, tree2, 'file-id', True)],
+ self.do_iter_changes(tree1, tree2,
+ specific_files=['changed/unchanging/file']))
+
+ def test_specific_content_modification_grabs_parents_root_changes(self):
+ # WHen the only direct change to a specified file is a content change,
+ # and its in a reparented subtree, the parents are grabbed, even if
+ # that includes the root.
+ tree1 = self.make_branch_and_tree('1')
+ tree1.set_root_id('old')
+ tree1.mkdir('changed', 'parent-id')
+ tree1.mkdir('changed/unchanging', 'mid-id')
+ tree1.add(['changed/unchanging/file'], ['file-id'], ['file'])
+ tree1.put_file_bytes_non_atomic('file-id', 'a file')
+ tree2 = self.make_to_branch_and_tree('2')
+ tree2.set_root_id('new')
+ tree2.mkdir('changed', 'parent-id')
+ tree2.mkdir('changed/unchanging', 'mid-id')
+ tree2.add(['changed/unchanging/file'], ['file-id'], ['file'])
+ tree2.put_file_bytes_non_atomic('file-id', 'changed content')
+ tree1, tree2 = self.mutable_trees_to_locked_test_trees(tree1, tree2)
+ # old is gone, new is added, parent-id has changed(reparented), as has
+ # file-id(content)
+ root_id = tree1.path2id('')
+ self.assertEqualIterChanges(
+ [self.renamed(tree1, tree2, 'parent-id', False),
+ self.added(tree2, 'new'),
+ self.deleted(tree1, 'old'),
+ self.renamed(tree1, tree2, 'file-id', True)],
+ self.do_iter_changes(tree1, tree2,
+ specific_files=['changed/unchanging/file']))
+
+ def test_specific_with_rename_under_new_dir_reports_new_dir(self):
+ tree1 = self.make_branch_and_tree('1')
+ tree2 = self.make_to_branch_and_tree('2')
+ tree1 = self.get_tree_no_parents_abc_content(tree1)
+ tree2 = self.get_tree_no_parents_abc_content_7(tree2)
+ tree1, tree2 = self.mutable_trees_to_test_trees(self, tree1, tree2)
+ # d(d-id) is new, e is b-id renamed.
+ root_id = tree1.path2id('')
+ self.assertEqualIterChanges(
+ [self.renamed(tree1, tree2, 'b-id', False),
+ self.added(tree2, 'd-id')],
+ self.do_iter_changes(tree1, tree2, specific_files=['d/e']))
+
+ def test_specific_with_rename_under_dir_under_new_dir_reports_new_dir(self):
+ tree1 = self.make_branch_and_tree('1')
+ tree2 = self.make_to_branch_and_tree('2')
+ tree1 = self.get_tree_no_parents_abc_content(tree1)
+ tree2 = self.get_tree_no_parents_abc_content_7(tree2)
+ tree2.rename_one('a', 'd/e/a')
+ tree1, tree2 = self.mutable_trees_to_test_trees(self, tree1, tree2)
+ # d is new, d/e is b-id renamed, d/e/a is a-id renamed
+ root_id = tree1.path2id('')
+ self.assertEqualIterChanges(
+ [self.renamed(tree1, tree2, 'b-id', False),
+ self.added(tree2, 'd-id'),
+ self.renamed(tree1, tree2, 'a-id', False)],
+ self.do_iter_changes(tree1, tree2, specific_files=['d/e/a']))
+
+ def test_specific_old_parent_same_path_new_parent(self):
+ # when a parent is new at its path, if the path was used in the source
+ # it must be emitted as a change.
+ tree1 = self.make_branch_and_tree('1')
+ tree1.add(['a'], ['a-id'], ['file'])
+ tree1.put_file_bytes_non_atomic('a-id', 'a file')
+ tree2 = self.make_to_branch_and_tree('2')
+ tree2.set_root_id(tree1.get_root_id())
+ tree2.mkdir('a', 'b-id')
+ tree2.add(['a/c'], ['c-id'], ['file'])
+ tree2.put_file_bytes_non_atomic('c-id', 'another file')
+ tree1, tree2 = self.mutable_trees_to_locked_test_trees(tree1, tree2)
+ # a-id is gone, b-id and c-id are added.
+ self.assertEqualIterChanges(
+ [self.deleted(tree1, 'a-id'),
+ self.added(tree2, 'b-id'),
+ self.added(tree2, 'c-id')],
+ self.do_iter_changes(tree1, tree2, specific_files=['a/c']))
+
+ def test_specific_old_parent_becomes_file(self):
+ # When an old parent included because of a path conflict becomes a
+ # non-directory, its children have to be all included in the delta.
+ tree1 = self.make_branch_and_tree('1')
+ tree1.mkdir('a', 'a-old-id')
+ tree1.mkdir('a/reparented', 'reparented-id')
+ tree1.mkdir('a/deleted', 'deleted-id')
+ tree2 = self.make_to_branch_and_tree('2')
+ tree2.set_root_id(tree1.get_root_id())
+ tree2.mkdir('a', 'a-new-id')
+ tree2.mkdir('a/reparented', 'reparented-id')
+ tree2.add(['b'], ['a-old-id'], ['file'])
+ tree2.put_file_bytes_non_atomic('a-old-id', '')
+ tree1, tree2 = self.mutable_trees_to_locked_test_trees(tree1, tree2)
+ # a-old-id is kind-changed, a-new-id is added, reparented-id is renamed,
+ # deleted-id is gone
+ self.assertEqualIterChanges(
+ [self.kind_changed(tree1, tree2, 'a-old-id'),
+ self.added(tree2, 'a-new-id'),
+ self.renamed(tree1, tree2, 'reparented-id', False),
+ self.deleted(tree1, 'deleted-id')],
+ self.do_iter_changes(tree1, tree2,
+ specific_files=['a/reparented']))
+
+ def test_specific_old_parent_is_deleted(self):
+ # When an old parent included because of a path conflict is removed,
+ # its children have to be all included in the delta.
+ tree1 = self.make_branch_and_tree('1')
+ tree1.mkdir('a', 'a-old-id')
+ tree1.mkdir('a/reparented', 'reparented-id')
+ tree1.mkdir('a/deleted', 'deleted-id')
+ tree2 = self.make_to_branch_and_tree('2')
+ tree2.set_root_id(tree1.get_root_id())
+ tree2.mkdir('a', 'a-new-id')
+ tree2.mkdir('a/reparented', 'reparented-id')
+ tree1, tree2 = self.mutable_trees_to_locked_test_trees(tree1, tree2)
+ # a-old-id is gone, a-new-id is added, reparented-id is renamed,
+ # deleted-id is gone
+ self.assertEqualIterChanges(
+ [self.deleted(tree1, 'a-old-id'),
+ self.added(tree2, 'a-new-id'),
+ self.renamed(tree1, tree2, 'reparented-id', False),
+ self.deleted(tree1, 'deleted-id')],
+ self.do_iter_changes(tree1, tree2,
+ specific_files=['a/reparented']))
+
+ def test_specific_old_parent_child_collides_with_unselected_new(self):
+ # When the child of an old parent because of a path conflict becomes a
+ # path conflict with some unselected item in the source, that item also
+ # needs to be included (because otherwise the output of applying the
+ # delta to the source would have two items at that path).
+ tree1 = self.make_branch_and_tree('1')
+ tree1.mkdir('a', 'a-old-id')
+ tree1.mkdir('a/reparented', 'reparented-id')
+ tree1.mkdir('collides', 'collides-id')
+ tree2 = self.make_to_branch_and_tree('2')
+ tree2.set_root_id(tree1.get_root_id())
+ tree2.mkdir('a', 'a-new-id')
+ tree2.mkdir('a/selected', 'selected-id')
+ tree2.mkdir('collides', 'reparented-id')
+ tree1, tree2 = self.mutable_trees_to_locked_test_trees(tree1, tree2)
+ # a-old-id is one, a-new-id is added, reparented-id is renamed,
+ # collides-id is gone, selected-id is new.
+ self.assertEqualIterChanges(
+ [self.deleted(tree1, 'a-old-id'),
+ self.added(tree2, 'a-new-id'),
+ self.renamed(tree1, tree2, 'reparented-id', False),
+ self.deleted(tree1, 'collides-id'),
+ self.added(tree2, 'selected-id')],
+ self.do_iter_changes(tree1, tree2,
+ specific_files=['a/selected']))
+
+ def test_specific_old_parent_child_dir_stops_being_dir(self):
+ # When the child of an old parent also stops being a directory, its
+ # children must also be included. This test checks that downward
+ # recursion is done appropriately by starting at a child of the root of
+ # a deleted subtree (a/reparented), and checking that a sibling
+ # directory (a/deleted) has its children included in the delta.
+ tree1 = self.make_branch_and_tree('1')
+ tree1.mkdir('a', 'a-old-id')
+ tree1.mkdir('a/reparented', 'reparented-id-1')
+ tree1.mkdir('a/deleted', 'deleted-id-1')
+ tree1.mkdir('a/deleted/reparented', 'reparented-id-2')
+ tree1.mkdir('a/deleted/deleted', 'deleted-id-2')
+ tree2 = self.make_to_branch_and_tree('2')
+ tree2.set_root_id(tree1.get_root_id())
+ tree2.mkdir('a', 'a-new-id')
+ tree2.mkdir('a/reparented', 'reparented-id-1')
+ tree2.mkdir('reparented', 'reparented-id-2')
+ tree1, tree2 = self.mutable_trees_to_locked_test_trees(tree1, tree2)
+ # a-old-id is gone, a-new-id is added, reparented-id-1, -2 are renamed,
+ # deleted-id-1 and -2 are gone.
+ self.assertEqualIterChanges(
+ [self.deleted(tree1, 'a-old-id'),
+ self.added(tree2, 'a-new-id'),
+ self.renamed(tree1, tree2, 'reparented-id-1', False),
+ self.renamed(tree1, tree2, 'reparented-id-2', False),
+ self.deleted(tree1, 'deleted-id-1'),
+ self.deleted(tree1, 'deleted-id-2')],
+ self.do_iter_changes(tree1, tree2,
+ specific_files=['a/reparented']))
+
+ def test_file_rename_and_meta_modification(self):
+ tree1 = self.make_branch_and_tree('1')
+ tree2 = self.make_to_branch_and_tree('2')
+ tree1 = self.get_tree_no_parents_abc_content(tree1)
+ tree2 = self.get_tree_no_parents_abc_content_6(tree2)
+ tree1, tree2 = self.mutable_trees_to_test_trees(self, tree1, tree2)
+ root_id = tree1.path2id('')
+ self.assertEqual([('c-id', ('b/c', 'e'), False, (True, True),
+ ('b-id', root_id), ('c', 'e'), ('file', 'file'),
+ (False, True))],
+ self.do_iter_changes(tree1, tree2))
+
+ def test_file_becomes_unversionable_bug_438569(self):
+ # This isn't strictly a intertree problem, but its the intertree code
+ # path that triggers all stat cache updates on both xml and dirstate
+ # trees.
+ # In bug 438569, a file becoming a fifo causes an assert. Fifo's are
+ # not versionable or diffable. For now, we simply stop cold when they
+ # are detected (because we don't know how far through the code the
+ # assumption 'fifo's do not exist' goes). In future we could report
+ # the kind change and have commit refuse to go futher, or something
+ # similar. One particular reason for choosing this approach is that
+ # there is no minikind for 'fifo' in dirstate today, so we can't
+ # actually update records that way.
+ # To add confusion, the totally generic code path works - but it
+ # doesn't update persistent metadata. So this test permits InterTrees
+ # to either work, or fail with BadFileKindError.
+ self.requireFeature(features.OsFifoFeature)
+ tree1 = self.make_branch_and_tree('1')
+ self.build_tree(['1/a'])
+ tree1.set_root_id('root-id')
+ tree1.add(['a'], ['a-id'])
+ tree2 = self.make_branch_and_tree('2')
+ os.mkfifo('2/a')
+ tree2.add(['a'], ['a-id'], ['file'])
+ try:
+ tree1, tree2 = self.mutable_trees_to_test_trees(self, tree1, tree2)
+ except (KeyError,):
+ raise tests.TestNotApplicable(
+ "Cannot represent a FIFO in this case %s" % self.id())
+ try:
+ self.do_iter_changes(tree1, tree2)
+ except errors.BadFileKindError:
+ pass
+
+ def test_missing_in_target(self):
+ """Test with the target files versioned but absent from disk."""
+ tree1 = self.make_branch_and_tree('1')
+ tree2 = self.make_to_branch_and_tree('2')
+ tree1 = self.get_tree_no_parents_abc_content(tree1)
+ tree2 = self.get_tree_no_parents_abc_content(tree2)
+ os.unlink('2/a')
+ shutil.rmtree('2/b')
+ # TODO ? have a symlink here?
+ tree1, tree2 = self.mutable_trees_to_test_trees(self, tree1, tree2)
+ self.not_applicable_if_missing_in('a', tree2)
+ self.not_applicable_if_missing_in('b', tree2)
+ root_id = tree1.path2id('')
+ expected = sorted([
+ self.missing('a-id', 'a', 'a', root_id, 'file'),
+ self.missing('b-id', 'b', 'b', root_id, 'directory'),
+ self.missing('c-id', 'b/c', 'b/c', 'b-id', 'file'),
+ ])
+ self.assertEqual(expected, self.do_iter_changes(tree1, tree2))
+
+ def test_missing_and_renamed(self):
+ tree1 = self.make_branch_and_tree('tree1')
+ tree2 = self.make_to_branch_and_tree('tree2')
+ tree2.set_root_id(tree1.get_root_id())
+ self.build_tree(['tree1/file'])
+ tree1.add(['file'], ['file-id'])
+ self.build_tree(['tree2/directory/'])
+ tree2.add(['directory'], ['file-id'])
+ os.rmdir('tree2/directory')
+ tree1, tree2 = self.mutable_trees_to_locked_test_trees(tree1, tree2)
+ self.not_applicable_if_missing_in('directory', tree2)
+
+ root_id = tree1.path2id('')
+ expected = sorted([
+ self.missing('file-id', 'file', 'directory', root_id, 'file'),
+ ])
+ self.assertEqual(expected, self.do_iter_changes(tree1, tree2))
+
+ def test_only_in_source_and_missing(self):
+ tree1 = self.make_branch_and_tree('tree1')
+ tree2 = self.make_to_branch_and_tree('tree2')
+ tree2.set_root_id(tree1.get_root_id())
+ self.build_tree(['tree1/file'])
+ tree1.add(['file'], ['file-id'])
+ os.unlink('tree1/file')
+ tree1, tree2 = self.mutable_trees_to_locked_test_trees(tree1, tree2)
+ self.not_applicable_if_missing_in('file', tree1)
+ root_id = tree1.path2id('')
+ expected = [('file-id', ('file', None), False, (True, False),
+ (root_id, None), ('file', None), (None, None), (False, None))]
+ self.assertEqual(expected, self.do_iter_changes(tree1, tree2))
+
+ def test_only_in_target_and_missing(self):
+ tree1 = self.make_branch_and_tree('tree1')
+ tree2 = self.make_to_branch_and_tree('tree2')
+ tree2.set_root_id(tree1.get_root_id())
+ self.build_tree(['tree2/file'])
+ tree2.add(['file'], ['file-id'])
+ os.unlink('tree2/file')
+ tree1, tree2 = self.mutable_trees_to_locked_test_trees(tree1, tree2)
+ self.not_applicable_if_missing_in('file', tree2)
+ root_id = tree1.path2id('')
+ expected = [('file-id', (None, 'file'), False, (False, True),
+ (None, root_id), (None, 'file'), (None, None), (None, False))]
+ self.assertEqual(expected, self.do_iter_changes(tree1, tree2))
+
+ def test_only_in_target_missing_subtree_specific_bug_367632(self):
+ tree1 = self.make_branch_and_tree('tree1')
+ tree2 = self.make_to_branch_and_tree('tree2')
+ tree2.set_root_id(tree1.get_root_id())
+ self.build_tree(['tree2/a-dir/', 'tree2/a-dir/a-file'])
+ tree2.add(['a-dir', 'a-dir/a-file'], ['dir-id', 'file-id'])
+ os.unlink('tree2/a-dir/a-file')
+ os.rmdir('tree2/a-dir')
+ tree1, tree2 = self.mutable_trees_to_locked_test_trees(tree1, tree2)
+ self.not_applicable_if_missing_in('a-dir', tree2)
+ root_id = tree1.path2id('')
+ expected = [
+ ('dir-id', (None, 'a-dir'), False, (False, True),
+ (None, root_id), (None, 'a-dir'), (None, None), (None, False)),
+ ('file-id', (None, 'a-dir/a-file'), False, (False, True),
+ (None, 'dir-id'), (None, 'a-file'), (None, None), (None, False))
+ ]
+ # bug 367632 showed that specifying the root broke some code paths,
+ # so we check this contract with and without it.
+ self.assertEqual(expected, self.do_iter_changes(tree1, tree2))
+ self.assertEqual(expected,
+ self.do_iter_changes(tree1, tree2, specific_files=['']))
+
+ def test_unchanged_with_renames_and_modifications(self):
+ """want_unchanged should generate a list of unchanged entries."""
+ tree1 = self.make_branch_and_tree('1')
+ tree2 = self.make_to_branch_and_tree('2')
+ tree1 = self.get_tree_no_parents_abc_content(tree1)
+ tree2 = self.get_tree_no_parents_abc_content_5(tree2)
+ tree1, tree2 = self.mutable_trees_to_locked_test_trees(tree1, tree2)
+ root_id = tree1.path2id('')
+ self.assertEqual(sorted([self.unchanged(tree1, root_id),
+ self.unchanged(tree1, 'b-id'),
+ ('a-id', ('a', 'd'), True, (True, True),
+ (root_id, root_id), ('a', 'd'), ('file', 'file'),
+ (False, False)), self.unchanged(tree1, 'c-id')]),
+ self.do_iter_changes(tree1, tree2, include_unchanged=True))
+
+ def test_compare_subtrees(self):
+ tree1 = self.make_branch_and_tree('1')
+ if not tree1.supports_tree_reference():
+ return
+ tree1.set_root_id('root-id')
+ subtree1 = self.make_branch_and_tree('1/sub')
+ subtree1.set_root_id('subtree-id')
+ tree1.add_reference(subtree1)
+
+ tree2 = self.make_to_branch_and_tree('2')
+ if not tree2.supports_tree_reference():
+ return
+ tree2.set_root_id('root-id')
+ subtree2 = self.make_to_branch_and_tree('2/sub')
+ subtree2.set_root_id('subtree-id')
+ tree2.add_reference(subtree2)
+ tree1, tree2 = self.mutable_trees_to_locked_test_trees(tree1, tree2)
+
+ self.assertEqual([], list(tree2.iter_changes(tree1)))
+ subtree1.commit('commit', rev_id='commit-a')
+ self.assertEqual([
+ ('root-id',
+ (u'', u''),
+ False,
+ (True, True),
+ (None, None),
+ (u'', u''),
+ ('directory', 'directory'),
+ (False, False)),
+ ('subtree-id',
+ ('sub', 'sub',),
+ False,
+ (True, True),
+ ('root-id', 'root-id'),
+ ('sub', 'sub'),
+ ('tree-reference', 'tree-reference'),
+ (False, False))],
+ list(tree2.iter_changes(tree1,
+ include_unchanged=True)))
+
+ def test_disk_in_subtrees_skipped(self):
+ """subtrees are considered not-in-the-current-tree.
+
+ This test tests the trivial case, where the basis has no paths in the
+ current trees subtree.
+ """
+ tree1 = self.make_branch_and_tree('1')
+ tree1.set_root_id('root-id')
+ tree2 = self.make_to_branch_and_tree('2')
+ if not tree2.supports_tree_reference():
+ return
+ tree2.set_root_id('root-id')
+ subtree2 = self.make_to_branch_and_tree('2/sub')
+ subtree2.set_root_id('subtree-id')
+ tree2.add_reference(subtree2)
+ self.build_tree(['2/sub/file'])
+ subtree2.add(['file'])
+
+ tree1, tree2 = self.mutable_trees_to_locked_test_trees(tree1, tree2)
+ # this should filter correctly from above
+ self.assertEqual([self.added(tree2, 'subtree-id')],
+ self.do_iter_changes(tree1, tree2, want_unversioned=True))
+ # and when the path is named
+ self.assertEqual([self.added(tree2, 'subtree-id')],
+ self.do_iter_changes(tree1, tree2, specific_files=['sub'],
+ want_unversioned=True))
+
+ def test_default_ignores_unversioned_files(self):
+ tree1 = self.make_branch_and_tree('tree1')
+ tree2 = self.make_to_branch_and_tree('tree2')
+ tree2.set_root_id(tree1.get_root_id())
+ self.build_tree(['tree1/a', 'tree1/c',
+ 'tree2/a', 'tree2/b', 'tree2/c'])
+ tree1.add(['a', 'c'], ['a-id', 'c-id'])
+ tree2.add(['a', 'c'], ['a-id', 'c-id'])
+
+ tree1, tree2 = self.mutable_trees_to_locked_test_trees(tree1, tree2)
+
+ # We should ignore the fact that 'b' exists in tree-2
+ # because the want_unversioned parameter was not given.
+ expected = sorted([
+ self.content_changed(tree2, 'a-id'),
+ self.content_changed(tree2, 'c-id'),
+ ])
+ self.assertEqual(expected, self.do_iter_changes(tree1, tree2))
+ self.check_has_changes(True, tree1, tree2)
+
+ def test_unversioned_paths_in_tree(self):
+ tree1 = self.make_branch_and_tree('tree1')
+ tree2 = self.make_to_branch_and_tree('tree2')
+ tree2.set_root_id(tree1.get_root_id())
+ self.build_tree(['tree2/file', 'tree2/dir/'])
+ if has_symlinks():
+ os.symlink('target', 'tree2/link')
+ links_supported = True
+ else:
+ links_supported = False
+ tree1, tree2 = self.mutable_trees_to_locked_test_trees(tree1, tree2)
+ self.not_applicable_if_cannot_represent_unversioned(tree2)
+ expected = [
+ self.unversioned(tree2, 'file'),
+ self.unversioned(tree2, 'dir'),
+ ]
+ if links_supported:
+ expected.append(self.unversioned(tree2, 'link'))
+ expected = sorted(expected)
+ self.assertEqual(expected, self.do_iter_changes(tree1, tree2,
+ want_unversioned=True))
+
+ def test_unversioned_paths_in_tree_specific_files(self):
+ tree1 = self.make_branch_and_tree('tree1')
+ tree2 = self.make_to_branch_and_tree('tree2')
+ self.build_tree(['tree2/file', 'tree2/dir/'])
+ if has_symlinks():
+ os.symlink('target', 'tree2/link')
+ links_supported = True
+ else:
+ links_supported = False
+ tree1, tree2 = self.mutable_trees_to_locked_test_trees(tree1, tree2)
+ self.not_applicable_if_cannot_represent_unversioned(tree2)
+ expected = [
+ self.unversioned(tree2, 'file'),
+ self.unversioned(tree2, 'dir'),
+ ]
+ specific_files=['file', 'dir']
+ if links_supported:
+ expected.append(self.unversioned(tree2, 'link'))
+ specific_files.append('link')
+ expected = sorted(expected)
+ self.assertEqual(expected, self.do_iter_changes(tree1, tree2,
+ specific_files=specific_files, require_versioned=False,
+ want_unversioned=True))
+
+ def test_unversioned_paths_in_target_matching_source_old_names(self):
+ # its likely that naive implementations of unversioned file support
+ # will fail if the path was versioned, but is not any more,
+ # due to a rename, not due to unversioning it.
+ # That is, if the old tree has a versioned file 'foo', and
+ # the new tree has the same file but versioned as 'bar', and also
+ # has an unknown file 'foo', we should get back output for
+ # both foo and bar.
+ tree1 = self.make_branch_and_tree('tree1')
+ tree2 = self.make_to_branch_and_tree('tree2')
+ tree2.set_root_id(tree1.get_root_id())
+ self.build_tree(['tree2/file', 'tree2/dir/',
+ 'tree1/file', 'tree2/movedfile',
+ 'tree1/dir/', 'tree2/moveddir/'])
+ if has_symlinks():
+ os.symlink('target', 'tree1/link')
+ os.symlink('target', 'tree2/link')
+ os.symlink('target', 'tree2/movedlink')
+ links_supported = True
+ else:
+ links_supported = False
+ tree1.add(['file', 'dir'], ['file-id', 'dir-id'])
+ tree2.add(['movedfile', 'moveddir'], ['file-id', 'dir-id'])
+ if links_supported:
+ tree1.add(['link'], ['link-id'])
+ tree2.add(['movedlink'], ['link-id'])
+ tree1, tree2 = self.mutable_trees_to_locked_test_trees(tree1, tree2)
+ self.not_applicable_if_cannot_represent_unversioned(tree2)
+ root_id = tree1.path2id('')
+ expected = [
+ self.renamed(tree1, tree2, 'dir-id', False),
+ self.renamed(tree1, tree2, 'file-id', True),
+ self.unversioned(tree2, 'file'),
+ self.unversioned(tree2, 'dir'),
+ ]
+ specific_files=['file', 'dir']
+ if links_supported:
+ expected.append(self.renamed(tree1, tree2, 'link-id', False))
+ expected.append(self.unversioned(tree2, 'link'))
+ specific_files.append('link')
+ expected = sorted(expected)
+ # run once with, and once without specific files, to catch
+ # potentially different code paths.
+ self.assertEqual(expected, self.do_iter_changes(tree1, tree2,
+ require_versioned=False,
+ want_unversioned=True))
+ self.assertEqual(expected, self.do_iter_changes(tree1, tree2,
+ specific_files=specific_files, require_versioned=False,
+ want_unversioned=True))
+
+ def test_similar_filenames(self):
+ """Test when we have a few files with similar names."""
+ tree1 = self.make_branch_and_tree('tree1')
+ tree2 = self.make_branch_and_tree('tree2')
+ tree2.set_root_id(tree1.get_root_id())
+
+ # The trees are actually identical, but they happen to contain
+ # similarly named files.
+ self.build_tree(['tree1/a/',
+ 'tree1/a/b/',
+ 'tree1/a/b/c/',
+ 'tree1/a/b/c/d/',
+ 'tree1/a-c/',
+ 'tree1/a-c/e/',
+ 'tree2/a/',
+ 'tree2/a/b/',
+ 'tree2/a/b/c/',
+ 'tree2/a/b/c/d/',
+ 'tree2/a-c/',
+ 'tree2/a-c/e/',
+ ])
+ tree1.add(['a', 'a/b', 'a/b/c', 'a/b/c/d', 'a-c', 'a-c/e'],
+ ['a-id', 'b-id', 'c-id', 'd-id', 'a-c-id', 'e-id'])
+ tree2.add(['a', 'a/b', 'a/b/c', 'a/b/c/d', 'a-c', 'a-c/e'],
+ ['a-id', 'b-id', 'c-id', 'd-id', 'a-c-id', 'e-id'])
+
+ tree1, tree2 = self.mutable_trees_to_locked_test_trees(tree1, tree2)
+ self.not_applicable_if_cannot_represent_unversioned(tree2)
+
+ self.assertEqual([], self.do_iter_changes(tree1, tree2,
+ want_unversioned=True))
+ expected = sorted([
+ self.unchanged(tree2, tree2.get_root_id()),
+ self.unchanged(tree2, 'a-id'),
+ self.unchanged(tree2, 'b-id'),
+ self.unchanged(tree2, 'c-id'),
+ self.unchanged(tree2, 'd-id'),
+ self.unchanged(tree2, 'a-c-id'),
+ self.unchanged(tree2, 'e-id'),
+ ])
+ self.assertEqual(expected,
+ self.do_iter_changes(tree1, tree2,
+ want_unversioned=True,
+ include_unchanged=True))
+
+
+ def test_unversioned_subtree_only_emits_root(self):
+ tree1 = self.make_branch_and_tree('tree1')
+ tree2 = self.make_to_branch_and_tree('tree2')
+ tree2.set_root_id(tree1.get_root_id())
+ self.build_tree(['tree2/dir/', 'tree2/dir/file'])
+ tree1, tree2 = self.mutable_trees_to_test_trees(self, tree1, tree2)
+ self.not_applicable_if_cannot_represent_unversioned(tree2)
+ expected = [
+ self.unversioned(tree2, 'dir'),
+ ]
+ self.assertEqual(expected, self.do_iter_changes(tree1, tree2,
+ want_unversioned=True))
+
+ def make_trees_with_symlinks(self):
+ tree1 = self.make_branch_and_tree('tree1')
+ tree2 = self.make_to_branch_and_tree('tree2')
+ tree2.set_root_id(tree1.get_root_id())
+ self.build_tree(['tree1/fromfile', 'tree1/fromdir/'])
+ self.build_tree(['tree2/tofile', 'tree2/todir/', 'tree2/unknown'])
+ os.symlink('original', 'tree1/changed')
+ os.symlink('original', 'tree1/removed')
+ os.symlink('original', 'tree1/tofile')
+ os.symlink('original', 'tree1/todir')
+ # we make the unchanged link point at unknown to catch incorrect
+ # symlink-following code in the specified_files test.
+ os.symlink('unknown', 'tree1/unchanged')
+ os.symlink('new', 'tree2/added')
+ os.symlink('new', 'tree2/changed')
+ os.symlink('new', 'tree2/fromfile')
+ os.symlink('new', 'tree2/fromdir')
+ os.symlink('unknown', 'tree2/unchanged')
+ from_paths_and_ids = [
+ 'fromdir',
+ 'fromfile',
+ 'changed',
+ 'removed',
+ 'todir',
+ 'tofile',
+ 'unchanged',
+ ]
+ to_paths_and_ids = [
+ 'added',
+ 'fromdir',
+ 'fromfile',
+ 'changed',
+ 'todir',
+ 'tofile',
+ 'unchanged',
+ ]
+ tree1.add(from_paths_and_ids, from_paths_and_ids)
+ tree2.add(to_paths_and_ids, to_paths_and_ids)
+ return self.mutable_trees_to_locked_test_trees(tree1, tree2)
+
+ def test_versioned_symlinks(self):
+ self.requireFeature(features.SymlinkFeature)
+ tree1, tree2 = self.make_trees_with_symlinks()
+ self.not_applicable_if_cannot_represent_unversioned(tree2)
+ root_id = tree1.path2id('')
+ expected = [
+ self.unchanged(tree1, tree1.path2id('')),
+ self.added(tree2, 'added'),
+ self.content_changed(tree2, 'changed'),
+ self.kind_changed(tree1, tree2, 'fromdir'),
+ self.kind_changed(tree1, tree2, 'fromfile'),
+ self.deleted(tree1, 'removed'),
+ self.unchanged(tree2, 'unchanged'),
+ self.unversioned(tree2, 'unknown'),
+ self.kind_changed(tree1, tree2, 'todir'),
+ self.kind_changed(tree1, tree2, 'tofile'),
+ ]
+ expected = sorted(expected)
+ self.assertEqual(expected,
+ self.do_iter_changes(tree1, tree2, include_unchanged=True,
+ want_unversioned=True))
+ self.check_has_changes(True, tree1, tree2)
+
+ def test_versioned_symlinks_specific_files(self):
+ self.requireFeature(features.SymlinkFeature)
+ tree1, tree2 = self.make_trees_with_symlinks()
+ root_id = tree1.path2id('')
+ expected = [
+ self.added(tree2, 'added'),
+ self.content_changed(tree2, 'changed'),
+ self.kind_changed(tree1, tree2, 'fromdir'),
+ self.kind_changed(tree1, tree2, 'fromfile'),
+ self.deleted(tree1, 'removed'),
+ self.kind_changed(tree1, tree2, 'todir'),
+ self.kind_changed(tree1, tree2, 'tofile'),
+ ]
+ expected = sorted(expected)
+ # we should get back just the changed links. We pass in 'unchanged' to
+ # make sure that it is correctly not returned - and neither is the
+ # unknown path 'unknown' which it points at.
+ self.assertEqual(expected, self.do_iter_changes(tree1, tree2,
+ specific_files=['added', 'changed', 'fromdir', 'fromfile',
+ 'removed', 'unchanged', 'todir', 'tofile']))
+ self.check_has_changes(True, tree1, tree2)
+
+ def test_tree_with_special_names(self):
+ tree1, tree2, paths, path_ids = self.make_tree_with_special_names()
+ expected = sorted(self.added(tree2, f_id) for f_id in path_ids)
+ self.assertEqual(expected, self.do_iter_changes(tree1, tree2))
+ self.check_has_changes(True, tree1, tree2)
+
+ def test_trees_with_special_names(self):
+ tree1, tree2, paths, path_ids = self.make_trees_with_special_names()
+ expected = sorted(self.content_changed(tree2, f_id) for f_id in path_ids
+ if f_id.endswith('_f-id'))
+ self.assertEqual(expected, self.do_iter_changes(tree1, tree2))
+ self.check_has_changes(True, tree1, tree2)
+
+ def test_trees_with_deleted_dir(self):
+ tree1 = self.make_branch_and_tree('tree1')
+ tree2 = self.make_to_branch_and_tree('tree2')
+ tree2.set_root_id(tree1.get_root_id())
+ self.build_tree(['tree1/a', 'tree1/b/', 'tree1/b/c',
+ 'tree1/b/d/', 'tree1/b/d/e', 'tree1/f/', 'tree1/f/g',
+ 'tree2/a', 'tree2/f/', 'tree2/f/g'])
+ tree1.add(['a', 'b', 'b/c', 'b/d/', 'b/d/e', 'f', 'f/g'],
+ ['a-id', 'b-id', 'c-id', 'd-id', 'e-id', 'f-id', 'g-id'])
+ tree2.add(['a', 'f', 'f/g'], ['a-id', 'f-id', 'g-id'])
+
+ tree1, tree2 = self.mutable_trees_to_locked_test_trees(tree1, tree2)
+ # We should notice that 'b' and all its children are deleted
+ expected = [
+ self.content_changed(tree2, 'a-id'),
+ self.content_changed(tree2, 'g-id'),
+ self.deleted(tree1, 'b-id'),
+ self.deleted(tree1, 'c-id'),
+ self.deleted(tree1, 'd-id'),
+ self.deleted(tree1, 'e-id'),
+ ]
+ self.assertEqualIterChanges(expected,
+ self.do_iter_changes(tree1, tree2))
+ self.check_has_changes(True, tree1, tree2)
+
+ def test_added_unicode(self):
+ tree1 = self.make_branch_and_tree('tree1')
+ tree2 = self.make_to_branch_and_tree('tree2')
+ root_id = tree1.get_root_id()
+ tree2.set_root_id(root_id)
+
+ # u'\u03b1' == GREEK SMALL LETTER ALPHA
+ # u'\u03c9' == GREEK SMALL LETTER OMEGA
+ a_id = u'\u03b1-id'.encode('utf8')
+ added_id = u'\u03c9_added_id'.encode('utf8')
+ try:
+ self.build_tree([u'tree1/\u03b1/',
+ u'tree2/\u03b1/',
+ u'tree2/\u03b1/\u03c9-added',
+ ])
+ except UnicodeError:
+ raise tests.TestSkipped("Could not create Unicode files.")
+ tree1.add([u'\u03b1'], [a_id])
+ tree2.add([u'\u03b1', u'\u03b1/\u03c9-added'], [a_id, added_id])
+
+ tree1, tree2 = self.mutable_trees_to_locked_test_trees(tree1, tree2)
+
+ self.assertEqual([self.added(tree2, added_id)],
+ self.do_iter_changes(tree1, tree2))
+ self.assertEqual([self.added(tree2, added_id)],
+ self.do_iter_changes(tree1, tree2,
+ specific_files=[u'\u03b1']))
+ self.check_has_changes(True, tree1, tree2)
+
+ def test_deleted_unicode(self):
+ tree1 = self.make_branch_and_tree('tree1')
+ tree2 = self.make_to_branch_and_tree('tree2')
+ root_id = tree1.get_root_id()
+ tree2.set_root_id(root_id)
+
+ # u'\u03b1' == GREEK SMALL LETTER ALPHA
+ # u'\u03c9' == GREEK SMALL LETTER OMEGA
+ a_id = u'\u03b1-id'.encode('utf8')
+ deleted_id = u'\u03c9_deleted_id'.encode('utf8')
+ try:
+ self.build_tree([u'tree1/\u03b1/',
+ u'tree1/\u03b1/\u03c9-deleted',
+ u'tree2/\u03b1/',
+ ])
+ except UnicodeError:
+ raise tests.TestSkipped("Could not create Unicode files.")
+ tree1.add([u'\u03b1', u'\u03b1/\u03c9-deleted'], [a_id, deleted_id])
+ tree2.add([u'\u03b1'], [a_id])
+
+ tree1, tree2 = self.mutable_trees_to_locked_test_trees(tree1, tree2)
+
+ self.assertEqual([self.deleted(tree1, deleted_id)],
+ self.do_iter_changes(tree1, tree2))
+ self.assertEqual([self.deleted(tree1, deleted_id)],
+ self.do_iter_changes(tree1, tree2,
+ specific_files=[u'\u03b1']))
+ self.check_has_changes(True, tree1, tree2)
+
+ def test_modified_unicode(self):
+ tree1 = self.make_branch_and_tree('tree1')
+ tree2 = self.make_to_branch_and_tree('tree2')
+ root_id = tree1.get_root_id()
+ tree2.set_root_id(root_id)
+
+ # u'\u03b1' == GREEK SMALL LETTER ALPHA
+ # u'\u03c9' == GREEK SMALL LETTER OMEGA
+ a_id = u'\u03b1-id'.encode('utf8')
+ mod_id = u'\u03c9_mod_id'.encode('utf8')
+ try:
+ self.build_tree([u'tree1/\u03b1/',
+ u'tree1/\u03b1/\u03c9-modified',
+ u'tree2/\u03b1/',
+ u'tree2/\u03b1/\u03c9-modified',
+ ])
+ except UnicodeError:
+ raise tests.TestSkipped("Could not create Unicode files.")
+ tree1.add([u'\u03b1', u'\u03b1/\u03c9-modified'], [a_id, mod_id])
+ tree2.add([u'\u03b1', u'\u03b1/\u03c9-modified'], [a_id, mod_id])
+
+ tree1, tree2 = self.mutable_trees_to_locked_test_trees(tree1, tree2)
+
+ self.assertEqual([self.content_changed(tree1, mod_id)],
+ self.do_iter_changes(tree1, tree2))
+ self.assertEqual([self.content_changed(tree1, mod_id)],
+ self.do_iter_changes(tree1, tree2,
+ specific_files=[u'\u03b1']))
+ self.check_has_changes(True, tree1, tree2)
+
+ def test_renamed_unicode(self):
+ tree1 = self.make_branch_and_tree('tree1')
+ tree2 = self.make_to_branch_and_tree('tree2')
+ root_id = tree1.get_root_id()
+ tree2.set_root_id(root_id)
+
+ # u'\u03b1' == GREEK SMALL LETTER ALPHA
+ # u'\u03c9' == GREEK SMALL LETTER OMEGA
+ a_id = u'\u03b1-id'.encode('utf8')
+ rename_id = u'\u03c9_rename_id'.encode('utf8')
+ try:
+ self.build_tree([u'tree1/\u03b1/',
+ u'tree2/\u03b1/',
+ ])
+ except UnicodeError:
+ raise tests.TestSkipped("Could not create Unicode files.")
+ self.build_tree_contents([(u'tree1/\u03c9-source', 'contents\n'),
+ (u'tree2/\u03b1/\u03c9-target', 'contents\n'),
+ ])
+ tree1.add([u'\u03b1', u'\u03c9-source'], [a_id, rename_id])
+ tree2.add([u'\u03b1', u'\u03b1/\u03c9-target'], [a_id, rename_id])
+
+ tree1, tree2 = self.mutable_trees_to_locked_test_trees(tree1, tree2)
+
+ self.assertEqual([self.renamed(tree1, tree2, rename_id, False)],
+ self.do_iter_changes(tree1, tree2))
+ self.assertEqualIterChanges(
+ [self.renamed(tree1, tree2, rename_id, False)],
+ self.do_iter_changes(tree1, tree2, specific_files=[u'\u03b1']))
+ self.check_has_changes(True, tree1, tree2)
+
+ def test_unchanged_unicode(self):
+ tree1 = self.make_branch_and_tree('tree1')
+ tree2 = self.make_to_branch_and_tree('tree2')
+ root_id = tree1.get_root_id()
+ tree2.set_root_id(root_id)
+ # u'\u03b1' == GREEK SMALL LETTER ALPHA
+ # u'\u03c9' == GREEK SMALL LETTER OMEGA
+ a_id = u'\u03b1-id'.encode('utf8')
+ subfile_id = u'\u03c9-subfile-id'.encode('utf8')
+ rootfile_id = u'\u03c9-root-id'.encode('utf8')
+ try:
+ self.build_tree([u'tree1/\u03b1/',
+ u'tree2/\u03b1/',
+ ])
+ except UnicodeError:
+ raise tests.TestSkipped("Could not create Unicode files.")
+ self.build_tree_contents([
+ (u'tree1/\u03b1/\u03c9-subfile', 'sub contents\n'),
+ (u'tree2/\u03b1/\u03c9-subfile', 'sub contents\n'),
+ (u'tree1/\u03c9-rootfile', 'root contents\n'),
+ (u'tree2/\u03c9-rootfile', 'root contents\n'),
+ ])
+ tree1.add([u'\u03b1', u'\u03b1/\u03c9-subfile', u'\u03c9-rootfile'],
+ [a_id, subfile_id, rootfile_id])
+ tree2.add([u'\u03b1', u'\u03b1/\u03c9-subfile', u'\u03c9-rootfile'],
+ [a_id, subfile_id, rootfile_id])
+
+ tree1, tree2 = self.mutable_trees_to_locked_test_trees(tree1, tree2)
+
+ expected = sorted([
+ self.unchanged(tree1, root_id),
+ self.unchanged(tree1, a_id),
+ self.unchanged(tree1, subfile_id),
+ self.unchanged(tree1, rootfile_id),
+ ])
+ self.assertEqual(expected,
+ self.do_iter_changes(tree1, tree2,
+ include_unchanged=True))
+
+ # We should also be able to select just a subset
+ expected = sorted([
+ self.unchanged(tree1, a_id),
+ self.unchanged(tree1, subfile_id),
+ ])
+ self.assertEqual(expected,
+ self.do_iter_changes(tree1, tree2, specific_files=[u'\u03b1'],
+ include_unchanged=True))
+
+ def test_unknown_unicode(self):
+ tree1 = self.make_branch_and_tree('tree1')
+ tree2 = self.make_to_branch_and_tree('tree2')
+ root_id = tree1.get_root_id()
+ tree2.set_root_id(root_id)
+ # u'\u03b1' == GREEK SMALL LETTER ALPHA
+ # u'\u03c9' == GREEK SMALL LETTER OMEGA
+ a_id = u'\u03b1-id'.encode('utf8')
+ try:
+ self.build_tree([u'tree1/\u03b1/',
+ u'tree2/\u03b1/',
+ u'tree2/\u03b1/unknown_dir/',
+ u'tree2/\u03b1/unknown_file',
+ u'tree2/\u03b1/unknown_dir/file',
+ u'tree2/\u03c9-unknown_root_file',
+ ])
+ except UnicodeError:
+ raise tests.TestSkipped("Could not create Unicode files.")
+ tree1.add([u'\u03b1'], [a_id])
+ tree2.add([u'\u03b1'], [a_id])
+
+ tree1, tree2 = self.mutable_trees_to_locked_test_trees(tree1, tree2)
+ self.not_applicable_if_cannot_represent_unversioned(tree2)
+
+ expected = sorted([
+ self.unversioned(tree2, u'\u03b1/unknown_dir'),
+ self.unversioned(tree2, u'\u03b1/unknown_file'),
+ self.unversioned(tree2, u'\u03c9-unknown_root_file'),
+ # a/unknown_dir/file should not be included because we should not
+ # recurse into unknown_dir
+ # self.unversioned(tree2, 'a/unknown_dir/file'),
+ ])
+ self.assertEqual(expected,
+ self.do_iter_changes(tree1, tree2,
+ require_versioned=False,
+ want_unversioned=True))
+ self.assertEqual([], # Without want_unversioned we should get nothing
+ self.do_iter_changes(tree1, tree2))
+ self.check_has_changes(False, tree1, tree2)
+
+ # We should also be able to select just a subset
+ expected = sorted([
+ self.unversioned(tree2, u'\u03b1/unknown_dir'),
+ self.unversioned(tree2, u'\u03b1/unknown_file'),
+ ])
+ self.assertEqual(expected,
+ self.do_iter_changes(tree1, tree2,
+ specific_files=[u'\u03b1'],
+ require_versioned=False,
+ want_unversioned=True))
+ self.assertEqual([], # Without want_unversioned we should get nothing
+ self.do_iter_changes(tree1, tree2,
+ specific_files=[u'\u03b1']))
+
+ def test_unknown_empty_dir(self):
+ tree1 = self.make_branch_and_tree('tree1')
+ tree2 = self.make_to_branch_and_tree('tree2')
+ root_id = tree1.get_root_id()
+ tree2.set_root_id(root_id)
+
+ # Start with 2 identical trees
+ self.build_tree(['tree1/a/', 'tree1/b/',
+ 'tree2/a/', 'tree2/b/'])
+ self.build_tree_contents([('tree1/b/file', 'contents\n'),
+ ('tree2/b/file', 'contents\n')])
+ tree1.add(['a', 'b', 'b/file'], ['a-id', 'b-id', 'b-file-id'])
+ tree2.add(['a', 'b', 'b/file'], ['a-id', 'b-id', 'b-file-id'])
+
+ # Now create some unknowns in tree2
+ # We should find both a/file and a/dir as unknown, but we shouldn't
+ # recurse into a/dir to find that a/dir/subfile is also unknown.
+ self.build_tree(['tree2/a/file', 'tree2/a/dir/', 'tree2/a/dir/subfile'])
+
+ tree1, tree2 = self.mutable_trees_to_locked_test_trees(tree1, tree2)
+ self.not_applicable_if_cannot_represent_unversioned(tree2)
+
+ expected = sorted([
+ self.unversioned(tree2, u'a/file'),
+ self.unversioned(tree2, u'a/dir'),
+ ])
+ self.assertEqual(expected,
+ self.do_iter_changes(tree1, tree2,
+ require_versioned=False,
+ want_unversioned=True))
+
+ def test_rename_over_deleted(self):
+ tree1 = self.make_branch_and_tree('tree1')
+ tree2 = self.make_to_branch_and_tree('tree2')
+ root_id = tree1.get_root_id()
+ tree2.set_root_id(root_id)
+
+ # The final changes should be:
+ # touch a b c d
+ # add a b c d
+ # commit
+ # rm a d
+ # mv b a
+ # mv c d
+ self.build_tree_contents([
+ ('tree1/a', 'a contents\n'),
+ ('tree1/b', 'b contents\n'),
+ ('tree1/c', 'c contents\n'),
+ ('tree1/d', 'd contents\n'),
+ ('tree2/a', 'b contents\n'),
+ ('tree2/d', 'c contents\n'),
+ ])
+ tree1.add(['a', 'b', 'c', 'd'], ['a-id', 'b-id', 'c-id', 'd-id'])
+ tree2.add(['a', 'd'], ['b-id', 'c-id'])
+
+ tree1, tree2 = self.mutable_trees_to_locked_test_trees(tree1, tree2)
+
+ expected = sorted([
+ self.deleted(tree1, 'a-id'),
+ self.deleted(tree1, 'd-id'),
+ self.renamed(tree1, tree2, 'b-id', False),
+ self.renamed(tree1, tree2, 'c-id', False),
+ ])
+ self.assertEqual(expected,
+ self.do_iter_changes(tree1, tree2))
+ self.check_has_changes(True, tree1, tree2)
+
+ def test_deleted_and_unknown(self):
+ """Test a file marked removed, but still present on disk."""
+ tree1 = self.make_branch_and_tree('tree1')
+ tree2 = self.make_to_branch_and_tree('tree2')
+ root_id = tree1.get_root_id()
+ tree2.set_root_id(root_id)
+
+ # The final changes should be:
+ # bzr add a b c
+ # bzr rm --keep b
+ self.build_tree_contents([
+ ('tree1/a', 'a contents\n'),
+ ('tree1/b', 'b contents\n'),
+ ('tree1/c', 'c contents\n'),
+ ('tree2/a', 'a contents\n'),
+ ('tree2/b', 'b contents\n'),
+ ('tree2/c', 'c contents\n'),
+ ])
+ tree1.add(['a', 'b', 'c'], ['a-id', 'b-id', 'c-id'])
+ tree2.add(['a', 'c'], ['a-id', 'c-id'])
+
+ tree1, tree2 = self.mutable_trees_to_locked_test_trees(tree1, tree2)
+ self.not_applicable_if_cannot_represent_unversioned(tree2)
+
+ expected = sorted([
+ self.deleted(tree1, 'b-id'),
+ self.unversioned(tree2, 'b'),
+ ])
+ self.assertEqual(expected,
+ self.do_iter_changes(tree1, tree2,
+ want_unversioned=True))
+ expected = sorted([
+ self.deleted(tree1, 'b-id'),
+ ])
+ self.assertEqual(expected,
+ self.do_iter_changes(tree1, tree2,
+ want_unversioned=False))
+
+ def test_renamed_and_added(self):
+ """Test when we have renamed a file, and put another in its place."""
+ tree1 = self.make_branch_and_tree('tree1')
+ tree2 = self.make_to_branch_and_tree('tree2')
+ root_id = tree1.get_root_id()
+ tree2.set_root_id(root_id)
+
+ # The final changes are:
+ # bzr add b c
+ # bzr mv b a
+ # bzr mv c d
+ # bzr add b c
+
+ self.build_tree_contents([
+ ('tree1/b', 'b contents\n'),
+ ('tree1/c', 'c contents\n'),
+ ('tree2/a', 'b contents\n'),
+ ('tree2/b', 'new b contents\n'),
+ ('tree2/c', 'new c contents\n'),
+ ('tree2/d', 'c contents\n'),
+ ])
+ tree1.add(['b', 'c'], ['b1-id', 'c1-id'])
+ tree2.add(['a', 'b', 'c', 'd'], ['b1-id', 'b2-id', 'c2-id', 'c1-id'])
+
+ tree1, tree2 = self.mutable_trees_to_locked_test_trees(tree1, tree2)
+
+ expected = sorted([
+ self.renamed(tree1, tree2, 'b1-id', False),
+ self.renamed(tree1, tree2, 'c1-id', False),
+ self.added(tree2, 'b2-id'),
+ self.added(tree2, 'c2-id'),
+ ])
+ self.assertEqual(expected,
+ self.do_iter_changes(tree1, tree2,
+ want_unversioned=True))
+
+ def test_renamed_and_unknown(self):
+ """A file was moved on the filesystem, but not in bzr."""
+ tree1 = self.make_branch_and_tree('tree1')
+ tree2 = self.make_to_branch_and_tree('tree2')
+ root_id = tree1.get_root_id()
+ tree2.set_root_id(root_id)
+
+ # The final changes are:
+ # bzr add a b
+ # mv a a2
+
+ self.build_tree_contents([
+ ('tree1/a', 'a contents\n'),
+ ('tree1/b', 'b contents\n'),
+ ('tree2/a', 'a contents\n'),
+ ('tree2/b', 'b contents\n'),
+ ])
+ tree1.add(['a', 'b'], ['a-id', 'b-id'])
+ tree2.add(['a', 'b'], ['a-id', 'b-id'])
+ os.rename('tree2/a', 'tree2/a2')
+
+ tree1, tree2 = self.mutable_trees_to_locked_test_trees(tree1, tree2)
+ self.not_applicable_if_missing_in('a', tree2)
+
+ expected = sorted([
+ self.missing('a-id', 'a', 'a', tree2.get_root_id(), 'file'),
+ self.unversioned(tree2, 'a2'),
+ ])
+ self.assertEqual(expected,
+ self.do_iter_changes(tree1, tree2,
+ want_unversioned=True))
diff --git a/bzrlib/tests/per_intertree/test_file_content_matches.py b/bzrlib/tests/per_intertree/test_file_content_matches.py
new file mode 100644
index 0000000..2f626db
--- /dev/null
+++ b/bzrlib/tests/per_intertree/test_file_content_matches.py
@@ -0,0 +1,48 @@
+# Copyright (C) 2011 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""Tests for the InterTree.file_content_matches() function."""
+
+from bzrlib.tests.per_intertree import TestCaseWithTwoTrees
+
+
+class TestFileContentMatches(TestCaseWithTwoTrees):
+
+ def test_same_contents_and_verifier(self):
+ tree1 = self.make_branch_and_tree('1')
+ tree2 = self.make_to_branch_and_tree('2')
+ self.build_tree_contents([
+ ('1/file', 'apples'),
+ ('2/file', 'apples'),
+ ])
+ tree1.add('file', 'file-id-1')
+ tree2.add('file', 'file-id-2')
+ tree1, tree2 = self.mutable_trees_to_test_trees(self, tree1, tree2)
+ inter = self.intertree_class(tree1, tree2)
+ self.assertTrue(inter.file_content_matches('file-id-1', 'file-id-2'))
+
+ def test_different_contents_and_same_verifier(self):
+ tree1 = self.make_branch_and_tree('1')
+ tree2 = self.make_to_branch_and_tree('2')
+ self.build_tree_contents([
+ ('1/file', 'apples'),
+ ('2/file', 'oranges'),
+ ])
+ tree1.add('file', 'file-id-1')
+ tree2.add('file', 'file-id-2')
+ tree1, tree2 = self.mutable_trees_to_test_trees(self, tree1, tree2)
+ inter = self.intertree_class(tree1, tree2)
+ self.assertFalse(inter.file_content_matches('file-id-1', 'file-id-2'))
diff --git a/bzrlib/tests/per_inventory/__init__.py b/bzrlib/tests/per_inventory/__init__.py
new file mode 100644
index 0000000..dd97a3d
--- /dev/null
+++ b/bzrlib/tests/per_inventory/__init__.py
@@ -0,0 +1,67 @@
+# Copyright (C) 2005, 2006, 2007 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""Tests for different inventory implementations"""
+
+from bzrlib import (
+ groupcompress,
+ tests,
+ )
+
+def load_tests(basic_tests, module, loader):
+ """Generate suite containing all parameterized tests"""
+ modules_to_test = [
+ 'bzrlib.tests.per_inventory.basics',
+ ]
+ from bzrlib.inventory import Inventory, CHKInventory
+
+ def inv_to_chk_inv(test, inv):
+ """CHKInventory needs a backing VF, so we create one."""
+ factory = groupcompress.make_pack_factory(True, True, 1)
+ trans = test.get_transport('chk-inv')
+ trans.ensure_base()
+ vf = factory(trans)
+ # We intentionally use a non-standard maximum_size, so that we are more
+ # likely to trigger splits, and get increased test coverage.
+ chk_inv = CHKInventory.from_inventory(vf, inv,
+ maximum_size=100,
+ search_key_name='hash-255-way')
+ return chk_inv
+ scenarios = [('Inventory', {'_inventory_class': Inventory,
+ '_inv_to_test_inv': lambda test, inv: inv
+ }),
+ ('CHKInventory', {'_inventory_class': CHKInventory,
+ '_inv_to_test_inv': inv_to_chk_inv,
+ })]
+ # add the tests for the sub modules
+ return tests.multiply_tests(
+ loader.loadTestsFromModuleNames(modules_to_test),
+ scenarios, basic_tests)
+
+
+class TestCaseWithInventory(tests.TestCaseWithMemoryTransport):
+
+ _inventory_class = None # set by load_tests
+ _inv_to_test_inv = None # set by load_tests
+
+ def make_test_inventory(self):
+ """Return an instance of the Inventory class under test."""
+ return self._inventory_class()
+
+ def inv_to_test_inv(self, inv):
+ """Convert a regular Inventory object into an inventory under test."""
+ return self._inv_to_test_inv(self, inv)
+
diff --git a/bzrlib/tests/per_inventory/basics.py b/bzrlib/tests/per_inventory/basics.py
new file mode 100644
index 0000000..9d49841
--- /dev/null
+++ b/bzrlib/tests/per_inventory/basics.py
@@ -0,0 +1,343 @@
+# Copyright (C) 2005, 2006, 2007 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""Tests for different inventory implementations"""
+
+# NOTE: Don't import Inventory here, to make sure that we don't accidentally
+# hardcode that when we should be using self.make_inventory
+
+from bzrlib import (
+ errors,
+ inventory,
+ osutils,
+ )
+
+from bzrlib.inventory import (
+ InventoryDirectory,
+ InventoryEntry,
+ InventoryFile,
+ InventoryLink,
+ TreeReference,
+ )
+
+from bzrlib.tests.per_inventory import TestCaseWithInventory
+
+from bzrlib.symbol_versioning import (
+ deprecated_in,
+ )
+
+
+class TestInventory(TestCaseWithInventory):
+
+ def make_init_inventory(self):
+ inv = inventory.Inventory('tree-root')
+ inv.revision = 'initial-rev'
+ inv.root.revision = 'initial-rev'
+ return self.inv_to_test_inv(inv)
+
+ def make_file(self, file_id, name, parent_id, content='content\n',
+ revision='new-test-rev'):
+ ie = InventoryFile(file_id, name, parent_id)
+ ie.text_sha1 = osutils.sha_string(content)
+ ie.text_size = len(content)
+ ie.revision = revision
+ return ie
+
+ def make_link(self, file_id, name, parent_id, target='link-target\n'):
+ ie = InventoryLink(file_id, name, parent_id)
+ ie.symlink_target = target
+ return ie
+
+ def prepare_inv_with_nested_dirs(self):
+ inv = inventory.Inventory('tree-root')
+ for args in [('src', 'directory', 'src-id'),
+ ('doc', 'directory', 'doc-id'),
+ ('src/hello.c', 'file', 'hello-id'),
+ ('src/bye.c', 'file', 'bye-id'),
+ ('zz', 'file', 'zz-id'),
+ ('src/sub/', 'directory', 'sub-id'),
+ ('src/zz.c', 'file', 'zzc-id'),
+ ('src/sub/a', 'file', 'a-id'),
+ ('Makefile', 'file', 'makefile-id')]:
+ ie = inv.add_path(*args)
+ if args[1] == 'file':
+ ie.text_sha1 = osutils.sha_string('content\n')
+ ie.text_size = len('content\n')
+ return self.inv_to_test_inv(inv)
+
+
+class TestInventoryCreateByApplyDelta(TestInventory):
+ """A subset of the inventory delta application tests.
+
+ See test_inv which has comprehensive delta application tests for
+ inventories, dirstate, and repository based inventories.
+ """
+ def test_add(self):
+ inv = self.make_init_inventory()
+ inv = inv.create_by_apply_delta([
+ (None, "a", "a-id", self.make_file('a-id', 'a', 'tree-root')),
+ ], 'new-test-rev')
+ self.assertEqual('a', inv.id2path('a-id'))
+
+ def test_delete(self):
+ inv = self.make_init_inventory()
+ inv = inv.create_by_apply_delta([
+ (None, "a", "a-id", self.make_file('a-id', 'a', 'tree-root')),
+ ], 'new-rev-1')
+ self.assertEqual('a', inv.id2path('a-id'))
+ inv = inv.create_by_apply_delta([
+ ("a", None, "a-id", None),
+ ], 'new-rev-2')
+ self.assertRaises(errors.NoSuchId, inv.id2path, 'a-id')
+
+ def test_rename(self):
+ inv = self.make_init_inventory()
+ inv = inv.create_by_apply_delta([
+ (None, "a", "a-id", self.make_file('a-id', 'a', 'tree-root')),
+ ], 'new-rev-1')
+ self.assertEqual('a', inv.id2path('a-id'))
+ a_ie = inv['a-id']
+ b_ie = self.make_file(a_ie.file_id, "b", a_ie.parent_id)
+ inv = inv.create_by_apply_delta([("a", "b", "a-id", b_ie)], 'new-rev-2')
+ self.assertEqual("b", inv.id2path('a-id'))
+
+ def test_illegal(self):
+ # A file-id cannot appear in a delta more than once
+ inv = self.make_init_inventory()
+ self.assertRaises(errors.InconsistentDelta, inv.create_by_apply_delta, [
+ (None, "a", "id-1", self.make_file('id-1', 'a', 'tree-root')),
+ (None, "b", "id-1", self.make_file('id-1', 'b', 'tree-root')),
+ ], 'new-rev-1')
+
+
+class TestInventoryReads(TestInventory):
+
+ def test_is_root(self):
+ """Ensure our root-checking code is accurate."""
+ inv = self.make_init_inventory()
+ self.assertTrue(inv.is_root('tree-root'))
+ self.assertFalse(inv.is_root('booga'))
+ ie = inv['tree-root'].copy()
+ ie.file_id = 'booga'
+ inv = inv.create_by_apply_delta([("", None, "tree-root", None),
+ (None, "", "booga", ie)], 'new-rev-2')
+ self.assertFalse(inv.is_root('TREE_ROOT'))
+ self.assertTrue(inv.is_root('booga'))
+
+ def test_ids(self):
+ """Test detection of files within selected directories."""
+ inv = inventory.Inventory('TREE_ROOT')
+ for args in [('src', 'directory', 'src-id'),
+ ('doc', 'directory', 'doc-id'),
+ ('src/hello.c', 'file'),
+ ('src/bye.c', 'file', 'bye-id'),
+ ('Makefile', 'file')]:
+ ie = inv.add_path(*args)
+ if args[1] == 'file':
+ ie.text_sha1 = osutils.sha_string('content\n')
+ ie.text_size = len('content\n')
+ inv = self.inv_to_test_inv(inv)
+ self.assertEqual(inv.path2id('src'), 'src-id')
+ self.assertEqual(inv.path2id('src/bye.c'), 'bye-id')
+
+ def test_non_directory_children(self):
+ """Test path2id when a parent directory has no children"""
+ inv = inventory.Inventory('tree-root')
+ inv.add(self.make_file('file-id','file', 'tree-root'))
+ inv.add(self.make_link('link-id','link', 'tree-root'))
+ self.assertIs(None, inv.path2id('file/subfile'))
+ self.assertIs(None, inv.path2id('link/subfile'))
+
+ def test_iter_entries(self):
+ inv = self.prepare_inv_with_nested_dirs()
+
+ # Test all entries
+ self.assertEqual([
+ ('', 'tree-root'),
+ ('Makefile', 'makefile-id'),
+ ('doc', 'doc-id'),
+ ('src', 'src-id'),
+ ('src/bye.c', 'bye-id'),
+ ('src/hello.c', 'hello-id'),
+ ('src/sub', 'sub-id'),
+ ('src/sub/a', 'a-id'),
+ ('src/zz.c', 'zzc-id'),
+ ('zz', 'zz-id'),
+ ], [(path, ie.file_id) for path, ie in inv.iter_entries()])
+
+ # Test a subdirectory
+ self.assertEqual([
+ ('bye.c', 'bye-id'),
+ ('hello.c', 'hello-id'),
+ ('sub', 'sub-id'),
+ ('sub/a', 'a-id'),
+ ('zz.c', 'zzc-id'),
+ ], [(path, ie.file_id) for path, ie in inv.iter_entries(
+ from_dir='src-id')])
+
+ # Test not recursing at the root level
+ self.assertEqual([
+ ('', 'tree-root'),
+ ('Makefile', 'makefile-id'),
+ ('doc', 'doc-id'),
+ ('src', 'src-id'),
+ ('zz', 'zz-id'),
+ ], [(path, ie.file_id) for path, ie in inv.iter_entries(
+ recursive=False)])
+
+ # Test not recursing at a subdirectory level
+ self.assertEqual([
+ ('bye.c', 'bye-id'),
+ ('hello.c', 'hello-id'),
+ ('sub', 'sub-id'),
+ ('zz.c', 'zzc-id'),
+ ], [(path, ie.file_id) for path, ie in inv.iter_entries(
+ from_dir='src-id', recursive=False)])
+
+ def test_iter_just_entries(self):
+ inv = self.prepare_inv_with_nested_dirs()
+ self.assertEqual([
+ 'a-id',
+ 'bye-id',
+ 'doc-id',
+ 'hello-id',
+ 'makefile-id',
+ 'src-id',
+ 'sub-id',
+ 'tree-root',
+ 'zz-id',
+ 'zzc-id',
+ ], sorted([ie.file_id for ie in inv.iter_just_entries()]))
+
+ def test_iter_entries_by_dir(self):
+ inv = self. prepare_inv_with_nested_dirs()
+ self.assertEqual([
+ ('', 'tree-root'),
+ ('Makefile', 'makefile-id'),
+ ('doc', 'doc-id'),
+ ('src', 'src-id'),
+ ('zz', 'zz-id'),
+ ('src/bye.c', 'bye-id'),
+ ('src/hello.c', 'hello-id'),
+ ('src/sub', 'sub-id'),
+ ('src/zz.c', 'zzc-id'),
+ ('src/sub/a', 'a-id'),
+ ], [(path, ie.file_id) for path, ie in inv.iter_entries_by_dir()])
+ self.assertEqual([
+ ('', 'tree-root'),
+ ('Makefile', 'makefile-id'),
+ ('doc', 'doc-id'),
+ ('src', 'src-id'),
+ ('zz', 'zz-id'),
+ ('src/bye.c', 'bye-id'),
+ ('src/hello.c', 'hello-id'),
+ ('src/sub', 'sub-id'),
+ ('src/zz.c', 'zzc-id'),
+ ('src/sub/a', 'a-id'),
+ ], [(path, ie.file_id) for path, ie in inv.iter_entries_by_dir(
+ specific_file_ids=('a-id', 'zzc-id', 'doc-id', 'tree-root',
+ 'hello-id', 'bye-id', 'zz-id', 'src-id', 'makefile-id',
+ 'sub-id'))])
+
+ self.assertEqual([
+ ('Makefile', 'makefile-id'),
+ ('doc', 'doc-id'),
+ ('zz', 'zz-id'),
+ ('src/bye.c', 'bye-id'),
+ ('src/hello.c', 'hello-id'),
+ ('src/zz.c', 'zzc-id'),
+ ('src/sub/a', 'a-id'),
+ ], [(path, ie.file_id) for path, ie in inv.iter_entries_by_dir(
+ specific_file_ids=('a-id', 'zzc-id', 'doc-id',
+ 'hello-id', 'bye-id', 'zz-id', 'makefile-id'))])
+
+ self.assertEqual([
+ ('Makefile', 'makefile-id'),
+ ('src/bye.c', 'bye-id'),
+ ], [(path, ie.file_id) for path, ie in inv.iter_entries_by_dir(
+ specific_file_ids=('bye-id', 'makefile-id'))])
+
+ self.assertEqual([
+ ('Makefile', 'makefile-id'),
+ ('src/bye.c', 'bye-id'),
+ ], [(path, ie.file_id) for path, ie in inv.iter_entries_by_dir(
+ specific_file_ids=('bye-id', 'makefile-id'))])
+
+ self.assertEqual([
+ ('src/bye.c', 'bye-id'),
+ ], [(path, ie.file_id) for path, ie in inv.iter_entries_by_dir(
+ specific_file_ids=('bye-id',))])
+
+ self.assertEqual([
+ ('', 'tree-root'),
+ ('src', 'src-id'),
+ ('src/bye.c', 'bye-id'),
+ ], [(path, ie.file_id) for path, ie in inv.iter_entries_by_dir(
+ specific_file_ids=('bye-id',), yield_parents=True)])
+
+
+class TestInventoryFiltering(TestInventory):
+
+ def test_inv_filter_empty(self):
+ inv = self.prepare_inv_with_nested_dirs()
+ new_inv = inv.filter([])
+ self.assertEqual([
+ ('', 'tree-root'),
+ ], [(path, ie.file_id) for path, ie in new_inv.iter_entries()])
+
+ def test_inv_filter_files(self):
+ inv = self.prepare_inv_with_nested_dirs()
+ new_inv = inv.filter(['zz-id', 'hello-id', 'a-id'])
+ self.assertEqual([
+ ('', 'tree-root'),
+ ('src', 'src-id'),
+ ('src/hello.c', 'hello-id'),
+ ('src/sub', 'sub-id'),
+ ('src/sub/a', 'a-id'),
+ ('zz', 'zz-id'),
+ ], [(path, ie.file_id) for path, ie in new_inv.iter_entries()])
+
+ def test_inv_filter_dirs(self):
+ inv = self.prepare_inv_with_nested_dirs()
+ new_inv = inv.filter(['doc-id', 'sub-id'])
+ self.assertEqual([
+ ('', 'tree-root'),
+ ('doc', 'doc-id'),
+ ('src', 'src-id'),
+ ('src/sub', 'sub-id'),
+ ('src/sub/a', 'a-id'),
+ ], [(path, ie.file_id) for path, ie in new_inv.iter_entries()])
+
+ def test_inv_filter_files_and_dirs(self):
+ inv = self.prepare_inv_with_nested_dirs()
+ new_inv = inv.filter(['makefile-id', 'src-id'])
+ self.assertEqual([
+ ('', 'tree-root'),
+ ('Makefile', 'makefile-id'),
+ ('src', 'src-id'),
+ ('src/bye.c', 'bye-id'),
+ ('src/hello.c', 'hello-id'),
+ ('src/sub', 'sub-id'),
+ ('src/sub/a', 'a-id'),
+ ('src/zz.c', 'zzc-id'),
+ ], [(path, ie.file_id) for path, ie in new_inv.iter_entries()])
+
+ def test_inv_filter_entry_not_present(self):
+ inv = self.prepare_inv_with_nested_dirs()
+ new_inv = inv.filter(['not-present-id'])
+ self.assertEqual([
+ ('', 'tree-root'),
+ ], [(path, ie.file_id) for path, ie in new_inv.iter_entries()])
diff --git a/bzrlib/tests/per_lock/__init__.py b/bzrlib/tests/per_lock/__init__.py
new file mode 100644
index 0000000..d306d44
--- /dev/null
+++ b/bzrlib/tests/per_lock/__init__.py
@@ -0,0 +1,50 @@
+# Copyright (C) 2007 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""OS Lock implementation tests for bzr.
+
+These test the conformance of all the lock variations to the expected API.
+"""
+
+from copy import deepcopy
+
+from bzrlib import (
+ lock,
+ tests,
+ )
+
+
+class TestCaseWithLock(tests.TestCaseWithTransport):
+ pass
+
+
+def make_scenarios(lock_classes):
+ result = []
+ for name, write_lock, read_lock in lock_classes:
+ result.append(
+ (name, {'write_lock': write_lock, 'read_lock': read_lock}))
+ return result
+
+
+def load_tests(standard_tests, module, loader):
+ submod_tests = loader.loadTestsFromModuleNames([
+ 'bzrlib.tests.per_lock.test_lock',
+ 'bzrlib.tests.per_lock.test_temporary_write_lock',
+ ])
+ scenarios = make_scenarios(lock._lock_classes)
+ # add the tests for the sub modules
+ return tests.multiply_tests(submod_tests, scenarios,
+ standard_tests)
diff --git a/bzrlib/tests/per_lock/test_lock.py b/bzrlib/tests/per_lock/test_lock.py
new file mode 100644
index 0000000..336a73a
--- /dev/null
+++ b/bzrlib/tests/per_lock/test_lock.py
@@ -0,0 +1,181 @@
+# Copyright (C) 2007, 2009, 2010 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""Tests for OS level locks."""
+
+from bzrlib import (
+ errors,
+ osutils,
+ )
+
+from bzrlib.tests import (
+ features,
+ )
+from bzrlib.tests.per_lock import TestCaseWithLock
+
+
+class TestLock(TestCaseWithLock):
+
+ def setUp(self):
+ super(TestLock, self).setUp()
+ self.build_tree(['a-file'])
+
+ def test_read_lock(self):
+ """Smoke test for read locks."""
+ a_lock = self.read_lock('a-file')
+ self.addCleanup(a_lock.unlock)
+ # The lock file should be opened for reading
+ txt = a_lock.f.read()
+ self.assertEqual('contents of a-file\n', txt)
+
+ def test_create_if_needed_read(self):
+ """We will create the file if it doesn't exist yet."""
+ a_lock = self.read_lock('other-file')
+ self.addCleanup(a_lock.unlock)
+ txt = a_lock.f.read()
+ self.assertEqual('', txt)
+
+ def test_create_if_needed_write(self):
+ """We will create the file if it doesn't exist yet."""
+ a_lock = self.write_lock('other-file')
+ self.addCleanup(a_lock.unlock)
+ txt = a_lock.f.read()
+ self.assertEqual('', txt)
+ a_lock.f.seek(0)
+ a_lock.f.write('foo\n')
+ a_lock.f.seek(0)
+ txt = a_lock.f.read()
+ self.assertEqual('foo\n', txt)
+
+ def test_readonly_file(self):
+ """If the file is readonly, we can take a read lock.
+
+ But we shouldn't be able to take a write lock.
+ """
+ self.requireFeature(features.not_running_as_root)
+ osutils.make_readonly('a-file')
+ # Make sure the file is read-only (on all platforms)
+ self.assertRaises(IOError, open, 'a-file', 'rb+')
+ a_lock = self.read_lock('a-file')
+ a_lock.unlock()
+
+ self.assertRaises(errors.LockFailed, self.write_lock, 'a-file')
+
+ def test_write_lock(self):
+ """Smoke test for write locks."""
+ a_lock = self.write_lock('a-file')
+ self.addCleanup(a_lock.unlock)
+ # You should be able to read and write to the lock file.
+ txt = a_lock.f.read()
+ self.assertEqual('contents of a-file\n', txt)
+ # Win32 requires that you call seek() when switching between a read
+ # operation and a write operation.
+ a_lock.f.seek(0, 2)
+ a_lock.f.write('more content\n')
+ a_lock.f.seek(0)
+ txt = a_lock.f.read()
+ self.assertEqual('contents of a-file\nmore content\n', txt)
+
+ def test_multiple_read_locks(self):
+ """You can take out more than one read lock on the same file."""
+ a_lock = self.read_lock('a-file')
+ self.addCleanup(a_lock.unlock)
+ b_lock = self.read_lock('a-file')
+ self.addCleanup(b_lock.unlock)
+
+ def test_multiple_write_locks_exclude(self):
+ """Taking out more than one write lock should fail."""
+ a_lock = self.write_lock('a-file')
+ self.addCleanup(a_lock.unlock)
+ # Taking out a lock on a locked file should raise LockContention
+ self.assertRaises(errors.LockContention, self.write_lock, 'a-file')
+
+ def _disabled_test_read_then_write_excludes(self):
+ """If a file is read-locked, taking out a write lock should fail."""
+ a_lock = self.read_lock('a-file')
+ self.addCleanup(a_lock.unlock)
+ # Taking out a lock on a locked file should raise LockContention
+ self.assertRaises(errors.LockContention, self.write_lock, 'a-file')
+
+ def test_read_unlock_write(self):
+ """Make sure that unlocking allows us to lock write"""
+ a_lock = self.read_lock('a-file')
+ a_lock.unlock()
+ a_lock = self.write_lock('a-file')
+ a_lock.unlock()
+
+ # TODO: jam 20070319 fcntl read locks are not currently fully
+ # mutually exclusive with write locks. This will be fixed
+ # in the next release.
+ def _disabled_test_write_then_read_excludes(self):
+ """If a file is write-locked, taking out a read lock should fail.
+
+ The file is exclusively owned by the write lock, so we shouldn't be
+ able to take out a shared read lock.
+ """
+ a_lock = self.write_lock('a-file')
+ self.addCleanup(a_lock.unlock)
+ # Taking out a lock on a locked file should raise LockContention
+ self.assertRaises(errors.LockContention, self.read_lock, 'a-file')
+
+ # TODO: jam 20070319 fcntl write locks are not currently fully
+ # mutually exclusive with read locks. This will be fixed
+ # in the next release.
+ def _disabled_test_write_unlock_read(self):
+ """If we have removed the write lock, we can grab a read lock."""
+ a_lock = self.write_lock('a-file')
+ a_lock.unlock()
+ a_lock = self.read_lock('a-file')
+ a_lock.unlock()
+
+ def _disabled_test_multiple_read_unlock_write(self):
+ """We can only grab a write lock if all read locks are done."""
+ a_lock = b_lock = c_lock = None
+ try:
+ a_lock = self.read_lock('a-file')
+ b_lock = self.read_lock('a-file')
+ self.assertRaises(errors.LockContention, self.write_lock, 'a-file')
+ a_lock.unlock()
+ a_lock = None
+ self.assertRaises(errors.LockContention, self.write_lock, 'a-file')
+ b_lock.unlock()
+ b_lock = None
+ c_lock = self.write_lock('a-file')
+ c_lock.unlock()
+ c_lock = None
+ finally:
+ # Cleanup as needed
+ if a_lock is not None:
+ a_lock.unlock()
+ if b_lock is not None:
+ b_lock.unlock()
+ if c_lock is not None:
+ c_lock.unlock()
+
+
+class TestLockUnicodePath(TestCaseWithLock):
+
+ _test_needs_features = [features.UnicodeFilenameFeature]
+
+ def test_read_lock(self):
+ self.build_tree([u'\u1234'])
+ u_lock = self.read_lock(u'\u1234')
+ self.addCleanup(u_lock.unlock)
+
+ def test_write_lock(self):
+ self.build_tree([u'\u1234'])
+ u_lock = self.write_lock(u'\u1234')
+ self.addCleanup(u_lock.unlock)
diff --git a/bzrlib/tests/per_lock/test_temporary_write_lock.py b/bzrlib/tests/per_lock/test_temporary_write_lock.py
new file mode 100644
index 0000000..0c32a37
--- /dev/null
+++ b/bzrlib/tests/per_lock/test_temporary_write_lock.py
@@ -0,0 +1,114 @@
+# Copyright (C) 2007 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""Tests for temporarily upgrading to a WriteLock."""
+
+from bzrlib import (
+ errors,
+ )
+
+from bzrlib.tests.per_lock import TestCaseWithLock
+
+
+class TestTemporaryWriteLock(TestCaseWithLock):
+
+ def setUp(self):
+ super(TestTemporaryWriteLock, self).setUp()
+ self.build_tree(['a-file'])
+
+ def test_can_upgrade_and_write(self):
+ """With only one lock, we should be able to write lock and switch back."""
+ a_lock = self.read_lock('a-file')
+ try:
+ success, t_write_lock = a_lock.temporary_write_lock()
+ self.assertTrue(success, "We failed to grab a write lock.")
+ try:
+ self.assertEqual('contents of a-file\n',
+ t_write_lock.f.read())
+ # We should be able to write to the file.
+ t_write_lock.f.seek(0)
+ t_write_lock.f.write('new contents for a-file\n')
+ t_write_lock.f.seek(0)
+ self.assertEqual('new contents for a-file\n',
+ t_write_lock.f.read())
+ finally:
+ a_lock = t_write_lock.restore_read_lock()
+ finally:
+ a_lock.unlock()
+
+ def test_is_write_locked(self):
+ """With a temporary write lock, we cannot grab another lock."""
+ a_lock = self.read_lock('a-file')
+ try:
+ success, t_write_lock = a_lock.temporary_write_lock()
+ self.assertTrue(success, "We failed to grab a write lock.")
+ try:
+ self.assertRaises(errors.LockContention,
+ self.write_lock, 'a-file')
+ # TODO: jam 20070319 fcntl read locks are not currently fully
+ # mutually exclusive with write locks. This will be fixed
+ # in the next release.
+ # self.assertRaises(errors.LockContention,
+ # self.read_lock, 'a-file')
+ finally:
+ a_lock = t_write_lock.restore_read_lock()
+ # Now we only have a read lock, so we should be able to grab
+ # another read lock, but not a write lock
+ # TODO: jam 20070319 fcntl write locks are not currently fully
+ # mutually exclusive with read locks. This will be fixed
+ # in the next release.
+ # self.assertRaises(errors.LockContention,
+ # self.write_lock, 'a-file')
+ b_lock = self.read_lock('a-file')
+ b_lock.unlock()
+ finally:
+ a_lock.unlock()
+
+ def test_fails_when_locked(self):
+ """We can't upgrade to a write lock if something else locks."""
+ a_lock = self.read_lock('a-file')
+ try:
+ b_lock = self.read_lock('a-file')
+ try:
+ success, alt_lock = a_lock.temporary_write_lock()
+ self.assertFalse(success)
+ # Now, 'alt_lock' should be a read-lock on the file. It should
+ # either be the same object as a_lock, or a new object.
+ # If it is a new object, a_lock should be unlocked. (we don't
+ # want to end up with 2 locks on the file)
+ self.assertTrue((alt_lock is a_lock) or (a_lock.f is None))
+ # set a_lock = alt_lock so that cleanup works correctly
+ a_lock = alt_lock
+
+ # We should not be able to grab a write lock
+ # but we should be able to grab another read lock
+ # TODO: jam 20070319 fcntl write locks are not currently fully
+ # mutually exclusive with read locks. This will be fixed
+ # in the next release.
+ # self.assertRaises(errors.LockContention,
+ # self.write_lock, 'a-file')
+ c_lock = self.read_lock('a-file')
+ c_lock.unlock()
+ finally:
+ b_lock.unlock()
+ finally:
+ a_lock.unlock()
+
+ # TODO: jam 20070314 to truly test these, we should be spawning an external
+ # process, and having it lock/unlock/try lock on request.
+
+ # TODO: jam 20070314 Test that the write lock can fail if another process
+ # holds a read lock. And that we recover properly.
diff --git a/bzrlib/tests/per_merger.py b/bzrlib/tests/per_merger.py
new file mode 100644
index 0000000..c9c41b3
--- /dev/null
+++ b/bzrlib/tests/per_merger.py
@@ -0,0 +1,417 @@
+# Copyright (C) 2009, 2010, 2011 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""Implementation tests for bzrlib.merge.Merger."""
+
+import os
+
+from bzrlib.conflicts import TextConflict
+from bzrlib import (
+ errors,
+ merge as _mod_merge,
+ )
+from bzrlib.tests import (
+ multiply_tests,
+ TestCaseWithTransport,
+ )
+from bzrlib.tests.test_merge_core import MergeBuilder
+from bzrlib.transform import TreeTransform
+
+
+
+def load_tests(standard_tests, module, loader):
+ """Multiply tests for tranport implementations."""
+ result = loader.suiteClass()
+ scenarios = [
+ (name, {'merge_type': merger})
+ for name, merger in _mod_merge.merge_type_registry.items()]
+ return multiply_tests(standard_tests, scenarios, result)
+
+
+class TestMergeImplementation(TestCaseWithTransport):
+
+ def do_merge(self, target_tree, source_tree, **kwargs):
+ merger = _mod_merge.Merger.from_revision_ids(None,
+ target_tree, source_tree.last_revision(),
+ other_branch=source_tree.branch)
+ merger.merge_type=self.merge_type
+ for name, value in kwargs.items():
+ setattr(merger, name, value)
+ merger.do_merge()
+
+ def test_merge_specific_file(self):
+ this_tree = self.make_branch_and_tree('this')
+ this_tree.lock_write()
+ self.addCleanup(this_tree.unlock)
+ self.build_tree_contents([
+ ('this/file1', 'a\nb\n'),
+ ('this/file2', 'a\nb\n')
+ ])
+ this_tree.add(['file1', 'file2'])
+ this_tree.commit('Added files')
+ other_tree = this_tree.bzrdir.sprout('other').open_workingtree()
+ self.build_tree_contents([
+ ('other/file1', 'a\nb\nc\n'),
+ ('other/file2', 'a\nb\nc\n')
+ ])
+ other_tree.commit('modified both')
+ self.build_tree_contents([
+ ('this/file1', 'd\na\nb\n'),
+ ('this/file2', 'd\na\nb\n')
+ ])
+ this_tree.commit('modified both')
+ self.do_merge(this_tree, other_tree, interesting_files=['file1'])
+ self.assertFileEqual('d\na\nb\nc\n', 'this/file1')
+ self.assertFileEqual('d\na\nb\n', 'this/file2')
+
+ def test_merge_move_and_change(self):
+ this_tree = self.make_branch_and_tree('this')
+ this_tree.lock_write()
+ self.addCleanup(this_tree.unlock)
+ self.build_tree_contents([
+ ('this/file1', 'line 1\nline 2\nline 3\nline 4\n'),
+ ])
+ this_tree.add('file1',)
+ this_tree.commit('Added file')
+ other_tree = this_tree.bzrdir.sprout('other').open_workingtree()
+ self.build_tree_contents([
+ ('other/file1', 'line 1\nline 2 to 2.1\nline 3\nline 4\n'),
+ ])
+ other_tree.commit('Changed 2 to 2.1')
+ self.build_tree_contents([
+ ('this/file1', 'line 1\nline 3\nline 2\nline 4\n'),
+ ])
+ this_tree.commit('Swapped 2 & 3')
+ self.do_merge(this_tree, other_tree)
+ if self.merge_type is _mod_merge.LCAMerger:
+ self.expectFailure(
+ "lca merge doesn't conflict for move and change",
+ self.assertFileEqual,
+ 'line 1\n'
+ '<<<<<<< TREE\n'
+ 'line 3\n'
+ 'line 2\n'
+ '=======\n'
+ 'line 2 to 2.1\n'
+ 'line 3\n'
+ '>>>>>>> MERGE-SOURCE\n'
+ 'line 4\n', 'this/file1')
+ else:
+ self.assertFileEqual('line 1\n'
+ '<<<<<<< TREE\n'
+ 'line 3\n'
+ 'line 2\n'
+ '=======\n'
+ 'line 2 to 2.1\n'
+ 'line 3\n'
+ '>>>>>>> MERGE-SOURCE\n'
+ 'line 4\n', 'this/file1')
+
+ def test_modify_conflicts_with_delete(self):
+ # If one side deletes a line, and the other modifies that line, then
+ # the modification should be considered a conflict
+ builder = self.make_branch_builder('test')
+ builder.start_series()
+ builder.build_snapshot('BASE-id', None,
+ [('add', ('', None, 'directory', None)),
+ ('add', ('foo', 'foo-id', 'file', 'a\nb\nc\nd\ne\n')),
+ ])
+ # Delete 'b\n'
+ builder.build_snapshot('OTHER-id', ['BASE-id'],
+ [('modify', ('foo-id', 'a\nc\nd\ne\n'))])
+ # Modify 'b\n', add 'X\n'
+ builder.build_snapshot('THIS-id', ['BASE-id'],
+ [('modify', ('foo-id', 'a\nb2\nc\nd\nX\ne\n'))])
+ builder.finish_series()
+ branch = builder.get_branch()
+ this_tree = branch.bzrdir.create_workingtree()
+ this_tree.lock_write()
+ self.addCleanup(this_tree.unlock)
+ other_tree = this_tree.bzrdir.sprout('other',
+ 'OTHER-id').open_workingtree()
+ self.do_merge(this_tree, other_tree)
+ if self.merge_type is _mod_merge.LCAMerger:
+ self.expectFailure("lca merge doesn't track deleted lines",
+ self.assertFileEqual,
+ 'a\n'
+ '<<<<<<< TREE\n'
+ 'b2\n'
+ '=======\n'
+ '>>>>>>> MERGE-SOURCE\n'
+ 'c\n'
+ 'd\n'
+ 'X\n'
+ 'e\n', 'test/foo')
+ else:
+ self.assertFileEqual(
+ 'a\n'
+ '<<<<<<< TREE\n'
+ 'b2\n'
+ '=======\n'
+ '>>>>>>> MERGE-SOURCE\n'
+ 'c\n'
+ 'd\n'
+ 'X\n'
+ 'e\n', 'test/foo')
+
+ def get_limbodir_deletiondir(self, wt):
+ transform = TreeTransform(wt)
+ limbodir = transform._limbodir
+ deletiondir = transform._deletiondir
+ transform.finalize()
+ return (limbodir, deletiondir)
+
+ def test_merge_with_existing_limbo_empty(self):
+ """Empty limbo dir is just cleaned up - see bug 427773"""
+ wt = self.make_branch_and_tree('this')
+ (limbodir, deletiondir) = self.get_limbodir_deletiondir(wt)
+ os.mkdir(limbodir)
+ self.do_merge(wt, wt)
+
+ def test_merge_with_existing_limbo_non_empty(self):
+ wt = self.make_branch_and_tree('this')
+ (limbodir, deletiondir) = self.get_limbodir_deletiondir(wt)
+ os.mkdir(limbodir)
+ os.mkdir(os.path.join(limbodir, 'something'))
+ self.assertRaises(errors.ExistingLimbo, self.do_merge, wt, wt)
+ self.assertRaises(errors.LockError, wt.unlock)
+
+ def test_merge_with_pending_deletion_empty(self):
+ wt = self.make_branch_and_tree('this')
+ (limbodir, deletiondir) = self.get_limbodir_deletiondir(wt)
+ os.mkdir(deletiondir)
+ self.do_merge(wt, wt)
+
+ def test_merge_with_pending_deletion_non_empty(self):
+ """Also see bug 427773"""
+ wt = self.make_branch_and_tree('this')
+ (limbodir, deletiondir) = self.get_limbodir_deletiondir(wt)
+ os.mkdir(deletiondir)
+ os.mkdir(os.path.join(deletiondir, 'something'))
+ self.assertRaises(errors.ExistingPendingDeletion, self.do_merge, wt, wt)
+ self.assertRaises(errors.LockError, wt.unlock)
+
+
+class TestHookMergeFileContent(TestCaseWithTransport):
+ """Tests that the 'merge_file_content' hook is invoked."""
+
+ def setUp(self):
+ TestCaseWithTransport.setUp(self)
+ self.hook_log = []
+
+ def install_hook_inactive(self):
+ def inactive_factory(merger):
+ # This hook is never active
+ self.hook_log.append(('inactive',))
+ return None
+ _mod_merge.Merger.hooks.install_named_hook(
+ 'merge_file_content', inactive_factory, 'test hook (inactive)')
+
+ def install_hook_noop(self):
+ test = self
+ class HookNA(_mod_merge.AbstractPerFileMerger):
+ def merge_contents(self, merge_params):
+ # This hook unconditionally does nothing.
+ test.hook_log.append(('no-op',))
+ return 'not_applicable', None
+ def hook_na_factory(merger):
+ return HookNA(merger)
+ _mod_merge.Merger.hooks.install_named_hook(
+ 'merge_file_content', hook_na_factory, 'test hook (no-op)')
+
+ def install_hook_success(self):
+ test = self
+ class HookSuccess(_mod_merge.AbstractPerFileMerger):
+ def merge_contents(self, merge_params):
+ test.hook_log.append(('success',))
+ if merge_params.file_id == '1':
+ return 'success', ['text-merged-by-hook']
+ return 'not_applicable', None
+ def hook_success_factory(merger):
+ return HookSuccess(merger)
+ _mod_merge.Merger.hooks.install_named_hook(
+ 'merge_file_content', hook_success_factory, 'test hook (success)')
+
+ def install_hook_conflict(self):
+ test = self
+ class HookConflict(_mod_merge.AbstractPerFileMerger):
+ def merge_contents(self, merge_params):
+ test.hook_log.append(('conflict',))
+ if merge_params.file_id == '1':
+ return ('conflicted',
+ ['text-with-conflict-markers-from-hook'])
+ return 'not_applicable', None
+ def hook_conflict_factory(merger):
+ return HookConflict(merger)
+ _mod_merge.Merger.hooks.install_named_hook(
+ 'merge_file_content', hook_conflict_factory, 'test hook (delete)')
+
+ def install_hook_delete(self):
+ test = self
+ class HookDelete(_mod_merge.AbstractPerFileMerger):
+ def merge_contents(self, merge_params):
+ test.hook_log.append(('delete',))
+ if merge_params.file_id == '1':
+ return 'delete', None
+ return 'not_applicable', None
+ def hook_delete_factory(merger):
+ return HookDelete(merger)
+ _mod_merge.Merger.hooks.install_named_hook(
+ 'merge_file_content', hook_delete_factory, 'test hook (delete)')
+
+ def install_hook_log_lines(self):
+ """Install a hook that saves the get_lines for the this, base and other
+ versions of the file.
+ """
+ test = self
+ class HookLogLines(_mod_merge.AbstractPerFileMerger):
+ def merge_contents(self, merge_params):
+ test.hook_log.append((
+ 'log_lines',
+ merge_params.this_lines,
+ merge_params.other_lines,
+ merge_params.base_lines,
+ ))
+ return 'not_applicable', None
+ def hook_log_lines_factory(merger):
+ return HookLogLines(merger)
+ _mod_merge.Merger.hooks.install_named_hook(
+ 'merge_file_content', hook_log_lines_factory,
+ 'test hook (log_lines)')
+
+ def make_merge_builder(self):
+ builder = MergeBuilder(self.test_base_dir)
+ self.addCleanup(builder.cleanup)
+ return builder
+
+ def create_file_needing_contents_merge(self, builder, file_id):
+ builder.add_file(file_id, builder.tree_root, "name1", "text1", True)
+ builder.change_contents(file_id, other="text4", this="text3")
+
+ def test_change_vs_change(self):
+ """Hook is used for (changed, changed)"""
+ self.install_hook_success()
+ builder = self.make_merge_builder()
+ builder.add_file("1", builder.tree_root, "name1", "text1", True)
+ builder.change_contents("1", other="text4", this="text3")
+ conflicts = builder.merge(self.merge_type)
+ self.assertEqual(conflicts, [])
+ self.assertEqual(
+ builder.this.get_file('1').read(), 'text-merged-by-hook')
+
+ def test_change_vs_deleted(self):
+ """Hook is used for (changed, deleted)"""
+ self.install_hook_success()
+ builder = self.make_merge_builder()
+ builder.add_file("1", builder.tree_root, "name1", "text1", True)
+ builder.change_contents("1", this="text2")
+ builder.remove_file("1", other=True)
+ conflicts = builder.merge(self.merge_type)
+ self.assertEqual(conflicts, [])
+ self.assertEqual(
+ builder.this.get_file('1').read(), 'text-merged-by-hook')
+
+ def test_result_can_be_delete(self):
+ """A hook's result can be the deletion of a file."""
+ self.install_hook_delete()
+ builder = self.make_merge_builder()
+ self.create_file_needing_contents_merge(builder, "1")
+ conflicts = builder.merge(self.merge_type)
+ self.assertEqual(conflicts, [])
+ self.assertRaises(errors.NoSuchId, builder.this.id2path, '1')
+ self.assertEqual([], list(builder.this.list_files()))
+
+ def test_result_can_be_conflict(self):
+ """A hook's result can be a conflict."""
+ self.install_hook_conflict()
+ builder = self.make_merge_builder()
+ self.create_file_needing_contents_merge(builder, "1")
+ conflicts = builder.merge(self.merge_type)
+ self.assertEqual(conflicts, [TextConflict('name1', file_id='1')])
+ # The hook still gets to set the file contents in this case, so that it
+ # can insert custom conflict markers.
+ self.assertEqual(
+ builder.this.get_file('1').read(),
+ 'text-with-conflict-markers-from-hook')
+
+ def test_can_access_this_other_and_base_versions(self):
+ """The hook function can call params.merger.get_lines to access the
+ THIS/OTHER/BASE versions of the file.
+ """
+ self.install_hook_log_lines()
+ builder = self.make_merge_builder()
+ builder.add_file("1", builder.tree_root, "name1", "text1", True)
+ builder.change_contents("1", this="text2", other="text3")
+ conflicts = builder.merge(self.merge_type)
+ self.assertEqual(
+ [('log_lines', ['text2'], ['text3'], ['text1'])], self.hook_log)
+
+ def test_chain_when_not_active(self):
+ """When a hook function returns None, merging still works."""
+ self.install_hook_inactive()
+ self.install_hook_success()
+ builder = self.make_merge_builder()
+ self.create_file_needing_contents_merge(builder, "1")
+ conflicts = builder.merge(self.merge_type)
+ self.assertEqual(conflicts, [])
+ self.assertEqual(
+ builder.this.get_file('1').read(), 'text-merged-by-hook')
+ self.assertEqual([('inactive',), ('success',)], self.hook_log)
+
+ def test_chain_when_not_applicable(self):
+ """When a hook function returns not_applicable, the next function is
+ tried (when one exists).
+ """
+ self.install_hook_noop()
+ self.install_hook_success()
+ builder = self.make_merge_builder()
+ self.create_file_needing_contents_merge(builder, "1")
+ conflicts = builder.merge(self.merge_type)
+ self.assertEqual(conflicts, [])
+ self.assertEqual(
+ builder.this.get_file('1').read(), 'text-merged-by-hook')
+ self.assertEqual([('no-op',), ('success',)], self.hook_log)
+
+ def test_chain_stops_after_success(self):
+ """When a hook function returns success, no later functions are tried.
+ """
+ self.install_hook_success()
+ self.install_hook_noop()
+ builder = self.make_merge_builder()
+ self.create_file_needing_contents_merge(builder, "1")
+ conflicts = builder.merge(self.merge_type)
+ self.assertEqual([('success',)], self.hook_log)
+
+ def test_chain_stops_after_conflict(self):
+ """When a hook function returns conflict, no later functions are tried.
+ """
+ self.install_hook_conflict()
+ self.install_hook_noop()
+ builder = self.make_merge_builder()
+ self.create_file_needing_contents_merge(builder, "1")
+ conflicts = builder.merge(self.merge_type)
+ self.assertEqual([('conflict',)], self.hook_log)
+
+ def test_chain_stops_after_delete(self):
+ """When a hook function returns delete, no later functions are tried.
+ """
+ self.install_hook_delete()
+ self.install_hook_noop()
+ builder = self.make_merge_builder()
+ self.create_file_needing_contents_merge(builder, "1")
+ conflicts = builder.merge(self.merge_type)
+ self.assertEqual([('delete',)], self.hook_log)
+
diff --git a/bzrlib/tests/per_pack_repository.py b/bzrlib/tests/per_pack_repository.py
new file mode 100644
index 0000000..4fa4e09
--- /dev/null
+++ b/bzrlib/tests/per_pack_repository.py
@@ -0,0 +1,1162 @@
+# Copyright (C) 2008-2011 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""Tests for pack repositories.
+
+These tests are repeated for all pack-based repository formats.
+"""
+
+from stat import S_ISDIR
+
+from bzrlib.btree_index import BTreeGraphIndex
+from bzrlib.index import GraphIndex
+from bzrlib import (
+ controldir,
+ errors,
+ inventory,
+ osutils,
+ repository,
+ revision as _mod_revision,
+ tests,
+ transport,
+ ui,
+ )
+from bzrlib.repofmt.groupcompress_repo import RepositoryFormat2a
+from bzrlib.smart import (
+ client,
+ )
+from bzrlib.tests import (
+ TestCaseWithTransport,
+ TestNotApplicable,
+ )
+from bzrlib.transport import (
+ memory,
+ )
+from bzrlib.tests import test_server
+
+
+class TestPackRepository(TestCaseWithTransport):
+ """Tests to be repeated across all pack-based formats.
+
+ The following are populated from the test scenario:
+
+ :ivar format_name: Registered name fo the format to test.
+ :ivar format_string: On-disk format marker.
+ :ivar format_supports_external_lookups: Boolean.
+ """
+
+ def get_format(self):
+ return controldir.format_registry.make_bzrdir(self.format_name)
+
+ def test_attribute__fetch_order(self):
+ """Packs do not need ordered data retrieval."""
+ format = self.get_format()
+ repo = self.make_repository('.', format=format)
+ self.assertEqual('unordered', repo._format._fetch_order)
+
+ def test_attribute__fetch_uses_deltas(self):
+ """Packs reuse deltas."""
+ format = self.get_format()
+ repo = self.make_repository('.', format=format)
+ if isinstance(format.repository_format, RepositoryFormat2a):
+ # TODO: This is currently a workaround. CHK format repositories
+ # ignore the 'deltas' flag, but during conversions, we can't
+ # do unordered delta fetches. Remove this clause once we
+ # improve the inter-format fetching.
+ self.assertEqual(False, repo._format._fetch_uses_deltas)
+ else:
+ self.assertEqual(True, repo._format._fetch_uses_deltas)
+
+ def test_disk_layout(self):
+ format = self.get_format()
+ repo = self.make_repository('.', format=format)
+ # in case of side effects of locking.
+ repo.lock_write()
+ repo.unlock()
+ t = repo.bzrdir.get_repository_transport(None)
+ self.check_format(t)
+ # XXX: no locks left when unlocked at the moment
+ # self.assertEqualDiff('', t.get('lock').read())
+ self.check_databases(t)
+
+ def check_format(self, t):
+ self.assertEqualDiff(
+ self.format_string, # from scenario
+ t.get('format').read())
+
+ def assertHasNoKndx(self, t, knit_name):
+ """Assert that knit_name has no index on t."""
+ self.assertFalse(t.has(knit_name + '.kndx'))
+
+ def assertHasNoKnit(self, t, knit_name):
+ """Assert that knit_name exists on t."""
+ # no default content
+ self.assertFalse(t.has(knit_name + '.knit'))
+
+ def check_databases(self, t):
+ """check knit content for a repository."""
+ # check conversion worked
+ self.assertHasNoKndx(t, 'inventory')
+ self.assertHasNoKnit(t, 'inventory')
+ self.assertHasNoKndx(t, 'revisions')
+ self.assertHasNoKnit(t, 'revisions')
+ self.assertHasNoKndx(t, 'signatures')
+ self.assertHasNoKnit(t, 'signatures')
+ self.assertFalse(t.has('knits'))
+ # revision-indexes file-container directory
+ self.assertEqual([],
+ list(self.index_class(t, 'pack-names', None).iter_all_entries()))
+ self.assertTrue(S_ISDIR(t.stat('packs').st_mode))
+ self.assertTrue(S_ISDIR(t.stat('upload').st_mode))
+ self.assertTrue(S_ISDIR(t.stat('indices').st_mode))
+ self.assertTrue(S_ISDIR(t.stat('obsolete_packs').st_mode))
+
+ def test_shared_disk_layout(self):
+ format = self.get_format()
+ repo = self.make_repository('.', shared=True, format=format)
+ # we want:
+ t = repo.bzrdir.get_repository_transport(None)
+ self.check_format(t)
+ # XXX: no locks left when unlocked at the moment
+ # self.assertEqualDiff('', t.get('lock').read())
+ # We should have a 'shared-storage' marker file.
+ self.assertEqualDiff('', t.get('shared-storage').read())
+ self.check_databases(t)
+
+ def test_shared_no_tree_disk_layout(self):
+ format = self.get_format()
+ repo = self.make_repository('.', shared=True, format=format)
+ repo.set_make_working_trees(False)
+ # we want:
+ t = repo.bzrdir.get_repository_transport(None)
+ self.check_format(t)
+ # XXX: no locks left when unlocked at the moment
+ # self.assertEqualDiff('', t.get('lock').read())
+ # We should have a 'shared-storage' marker file.
+ self.assertEqualDiff('', t.get('shared-storage').read())
+ # We should have a marker for the no-working-trees flag.
+ self.assertEqualDiff('', t.get('no-working-trees').read())
+ # The marker should go when we toggle the setting.
+ repo.set_make_working_trees(True)
+ self.assertFalse(t.has('no-working-trees'))
+ self.check_databases(t)
+
+ def test_adding_revision_creates_pack_indices(self):
+ format = self.get_format()
+ tree = self.make_branch_and_tree('.', format=format)
+ trans = tree.branch.repository.bzrdir.get_repository_transport(None)
+ self.assertEqual([],
+ list(self.index_class(trans, 'pack-names', None).iter_all_entries()))
+ tree.commit('foobarbaz')
+ index = self.index_class(trans, 'pack-names', None)
+ index_nodes = list(index.iter_all_entries())
+ self.assertEqual(1, len(index_nodes))
+ node = index_nodes[0]
+ name = node[1][0]
+ # the pack sizes should be listed in the index
+ pack_value = node[2]
+ sizes = [int(digits) for digits in pack_value.split(' ')]
+ for size, suffix in zip(sizes, ['.rix', '.iix', '.tix', '.six']):
+ stat = trans.stat('indices/%s%s' % (name, suffix))
+ self.assertEqual(size, stat.st_size)
+
+ def test_pulling_nothing_leads_to_no_new_names(self):
+ format = self.get_format()
+ tree1 = self.make_branch_and_tree('1', format=format)
+ tree2 = self.make_branch_and_tree('2', format=format)
+ tree1.branch.repository.fetch(tree2.branch.repository)
+ trans = tree1.branch.repository.bzrdir.get_repository_transport(None)
+ self.assertEqual([],
+ list(self.index_class(trans, 'pack-names', None).iter_all_entries()))
+
+ def test_commit_across_pack_shape_boundary_autopacks(self):
+ format = self.get_format()
+ tree = self.make_branch_and_tree('.', format=format)
+ trans = tree.branch.repository.bzrdir.get_repository_transport(None)
+ # This test could be a little cheaper by replacing the packs
+ # attribute on the repository to allow a different pack distribution
+ # and max packs policy - so we are checking the policy is honoured
+ # in the test. But for now 11 commits is not a big deal in a single
+ # test.
+ for x in range(9):
+ tree.commit('commit %s' % x)
+ # there should be 9 packs:
+ index = self.index_class(trans, 'pack-names', None)
+ self.assertEqual(9, len(list(index.iter_all_entries())))
+ # insert some files in obsolete_packs which should be removed by pack.
+ trans.put_bytes('obsolete_packs/foo', '123')
+ trans.put_bytes('obsolete_packs/bar', '321')
+ # committing one more should coalesce to 1 of 10.
+ tree.commit('commit triggering pack')
+ index = self.index_class(trans, 'pack-names', None)
+ self.assertEqual(1, len(list(index.iter_all_entries())))
+ # packing should not damage data
+ tree = tree.bzrdir.open_workingtree()
+ check_result = tree.branch.repository.check(
+ [tree.branch.last_revision()])
+ nb_files = 5 # .pack, .rix, .iix, .tix, .six
+ if tree.branch.repository._format.supports_chks:
+ nb_files += 1 # .cix
+ # We should have 10 x nb_files files in the obsolete_packs directory.
+ obsolete_files = list(trans.list_dir('obsolete_packs'))
+ self.assertFalse('foo' in obsolete_files)
+ self.assertFalse('bar' in obsolete_files)
+ self.assertEqual(10 * nb_files, len(obsolete_files))
+ # XXX: Todo check packs obsoleted correctly - old packs and indices
+ # in the obsolete_packs directory.
+ large_pack_name = list(index.iter_all_entries())[0][1][0]
+ # finally, committing again should not touch the large pack.
+ tree.commit('commit not triggering pack')
+ index = self.index_class(trans, 'pack-names', None)
+ self.assertEqual(2, len(list(index.iter_all_entries())))
+ pack_names = [node[1][0] for node in index.iter_all_entries()]
+ self.assertTrue(large_pack_name in pack_names)
+
+ def test_commit_write_group_returns_new_pack_names(self):
+ # This test doesn't need real disk.
+ self.vfs_transport_factory = memory.MemoryServer
+ format = self.get_format()
+ repo = self.make_repository('foo', format=format)
+ repo.lock_write()
+ try:
+ # All current pack repository styles autopack at 10 revisions; and
+ # autopack as well as regular commit write group needs to return
+ # the new pack name. Looping is a little ugly, but we don't have a
+ # clean way to test both the autopack logic and the normal code
+ # path without doing this loop.
+ for pos in range(10):
+ revid = str(pos)
+ repo.start_write_group()
+ try:
+ inv = inventory.Inventory(revision_id=revid)
+ inv.root.revision = revid
+ repo.texts.add_lines((inv.root.file_id, revid), [], [])
+ rev = _mod_revision.Revision(timestamp=0, timezone=None,
+ committer="Foo Bar <foo@example.com>", message="Message",
+ revision_id=revid)
+ rev.parent_ids = ()
+ repo.add_revision(revid, rev, inv=inv)
+ except:
+ repo.abort_write_group()
+ raise
+ else:
+ old_names = repo._pack_collection._names.keys()
+ result = repo.commit_write_group()
+ cur_names = repo._pack_collection._names.keys()
+ new_names = list(set(cur_names) - set(old_names))
+ self.assertEqual(new_names, result)
+ finally:
+ repo.unlock()
+
+ def test_fail_obsolete_deletion(self):
+ # failing to delete obsolete packs is not fatal
+ format = self.get_format()
+ server = test_server.FakeNFSServer()
+ self.start_server(server)
+ t = transport.get_transport_from_url(server.get_url())
+ bzrdir = self.get_format().initialize_on_transport(t)
+ repo = bzrdir.create_repository()
+ repo_transport = bzrdir.get_repository_transport(None)
+ self.assertTrue(repo_transport.has('obsolete_packs'))
+ # these files are in use by another client and typically can't be deleted
+ repo_transport.put_bytes('obsolete_packs/.nfsblahblah', 'contents')
+ repo._pack_collection._clear_obsolete_packs()
+ self.assertTrue(repo_transport.has('obsolete_packs/.nfsblahblah'))
+
+ def test_pack_collection_sets_sibling_indices(self):
+ """The CombinedGraphIndex objects in the pack collection are all
+ siblings of each other, so that search-order reorderings will be copied
+ to each other.
+ """
+ repo = self.make_repository('repo')
+ pack_coll = repo._pack_collection
+ indices = set([pack_coll.revision_index, pack_coll.inventory_index,
+ pack_coll.text_index, pack_coll.signature_index])
+ if pack_coll.chk_index is not None:
+ indices.add(pack_coll.chk_index)
+ combined_indices = set(idx.combined_index for idx in indices)
+ for combined_index in combined_indices:
+ self.assertEqual(
+ combined_indices.difference([combined_index]),
+ combined_index._sibling_indices)
+
+ def test_pack_after_two_commits_packs_everything(self):
+ format = self.get_format()
+ tree = self.make_branch_and_tree('.', format=format)
+ trans = tree.branch.repository.bzrdir.get_repository_transport(None)
+ tree.commit('start')
+ tree.commit('more work')
+ tree.branch.repository.pack()
+ # there should be 1 pack:
+ index = self.index_class(trans, 'pack-names', None)
+ self.assertEqual(1, len(list(index.iter_all_entries())))
+ self.assertEqual(2, len(tree.branch.repository.all_revision_ids()))
+
+ def test_pack_preserves_all_inventories(self):
+ # This is related to bug:
+ # https://bugs.launchpad.net/bzr/+bug/412198
+ # Stacked repositories need to keep the inventory for parents, even
+ # after a pack operation. However, it is harder to test that, then just
+ # test that all inventory texts are preserved.
+ format = self.get_format()
+ builder = self.make_branch_builder('source', format=format)
+ builder.start_series()
+ builder.build_snapshot('A-id', None, [
+ ('add', ('', 'root-id', 'directory', None))])
+ builder.build_snapshot('B-id', None, [
+ ('add', ('file', 'file-id', 'file', 'B content\n'))])
+ builder.build_snapshot('C-id', None, [
+ ('modify', ('file-id', 'C content\n'))])
+ builder.finish_series()
+ b = builder.get_branch()
+ b.lock_read()
+ self.addCleanup(b.unlock)
+ repo = self.make_repository('repo', shared=True, format=format)
+ repo.lock_write()
+ self.addCleanup(repo.unlock)
+ repo.fetch(b.repository, revision_id='B-id')
+ inv = b.repository.iter_inventories(['C-id']).next()
+ repo.start_write_group()
+ repo.add_inventory('C-id', inv, ['B-id'])
+ repo.commit_write_group()
+ self.assertEqual([('A-id',), ('B-id',), ('C-id',)],
+ sorted(repo.inventories.keys()))
+ repo.pack()
+ self.assertEqual([('A-id',), ('B-id',), ('C-id',)],
+ sorted(repo.inventories.keys()))
+ # Content should be preserved as well
+ self.assertEqual(inv, repo.iter_inventories(['C-id']).next())
+
+ def test_pack_layout(self):
+ # Test that the ordering of revisions in pack repositories is
+ # tip->ancestor
+ format = self.get_format()
+ tree = self.make_branch_and_tree('.', format=format)
+ trans = tree.branch.repository.bzrdir.get_repository_transport(None)
+ tree.commit('start', rev_id='1')
+ tree.commit('more work', rev_id='2')
+ tree.branch.repository.pack()
+ tree.lock_read()
+ self.addCleanup(tree.unlock)
+ pack = tree.branch.repository._pack_collection.get_pack_by_name(
+ tree.branch.repository._pack_collection.names()[0])
+ # revision access tends to be tip->ancestor, so ordering that way on
+ # disk is a good idea.
+ for _1, key, val, refs in pack.revision_index.iter_all_entries():
+ if type(format.repository_format) is RepositoryFormat2a:
+ # group_start, group_len, internal_start, internal_len
+ pos = map(int, val.split())
+ else:
+ # eol_flag, start, len
+ pos = int(val[1:].split()[0])
+ if key == ('1',):
+ pos_1 = pos
+ else:
+ pos_2 = pos
+ self.assertTrue(pos_2 < pos_1, 'rev 1 came before rev 2 %s > %s'
+ % (pos_1, pos_2))
+
+ def test_pack_repositories_support_multiple_write_locks(self):
+ format = self.get_format()
+ self.make_repository('.', shared=True, format=format)
+ r1 = repository.Repository.open('.')
+ r2 = repository.Repository.open('.')
+ r1.lock_write()
+ self.addCleanup(r1.unlock)
+ r2.lock_write()
+ r2.unlock()
+
+ def _add_text(self, repo, fileid):
+ """Add a text to the repository within a write group."""
+ repo.texts.add_lines((fileid, 'samplerev+'+fileid), [],
+ ['smaplerev+'+fileid])
+
+ def test_concurrent_writers_merge_new_packs(self):
+ format = self.get_format()
+ self.make_repository('.', shared=True, format=format)
+ r1 = repository.Repository.open('.')
+ r2 = repository.Repository.open('.')
+ r1.lock_write()
+ try:
+ # access enough data to load the names list
+ list(r1.all_revision_ids())
+ r2.lock_write()
+ try:
+ # access enough data to load the names list
+ list(r2.all_revision_ids())
+ r1.start_write_group()
+ try:
+ r2.start_write_group()
+ try:
+ self._add_text(r1, 'fileidr1')
+ self._add_text(r2, 'fileidr2')
+ except:
+ r2.abort_write_group()
+ raise
+ except:
+ r1.abort_write_group()
+ raise
+ # both r1 and r2 have open write groups with data in them
+ # created while the other's write group was open.
+ # Commit both which requires a merge to the pack-names.
+ try:
+ r1.commit_write_group()
+ except:
+ r1.abort_write_group()
+ r2.abort_write_group()
+ raise
+ r2.commit_write_group()
+ # tell r1 to reload from disk
+ r1._pack_collection.reset()
+ # Now both repositories should know about both names
+ r1._pack_collection.ensure_loaded()
+ r2._pack_collection.ensure_loaded()
+ self.assertEqual(r1._pack_collection.names(), r2._pack_collection.names())
+ self.assertEqual(2, len(r1._pack_collection.names()))
+ finally:
+ r2.unlock()
+ finally:
+ r1.unlock()
+
+ def test_concurrent_writer_second_preserves_dropping_a_pack(self):
+ format = self.get_format()
+ self.make_repository('.', shared=True, format=format)
+ r1 = repository.Repository.open('.')
+ r2 = repository.Repository.open('.')
+ # add a pack to drop
+ r1.lock_write()
+ try:
+ r1.start_write_group()
+ try:
+ self._add_text(r1, 'fileidr1')
+ except:
+ r1.abort_write_group()
+ raise
+ else:
+ r1.commit_write_group()
+ r1._pack_collection.ensure_loaded()
+ name_to_drop = r1._pack_collection.all_packs()[0].name
+ finally:
+ r1.unlock()
+ r1.lock_write()
+ try:
+ # access enough data to load the names list
+ list(r1.all_revision_ids())
+ r2.lock_write()
+ try:
+ # access enough data to load the names list
+ list(r2.all_revision_ids())
+ r1._pack_collection.ensure_loaded()
+ try:
+ r2.start_write_group()
+ try:
+ # in r1, drop the pack
+ r1._pack_collection._remove_pack_from_memory(
+ r1._pack_collection.get_pack_by_name(name_to_drop))
+ # in r2, add a pack
+ self._add_text(r2, 'fileidr2')
+ except:
+ r2.abort_write_group()
+ raise
+ except:
+ r1._pack_collection.reset()
+ raise
+ # r1 has a changed names list, and r2 an open write groups with
+ # changes.
+ # save r1, and then commit the r2 write group, which requires a
+ # merge to the pack-names, which should not reinstate
+ # name_to_drop
+ try:
+ r1._pack_collection._save_pack_names()
+ r1._pack_collection.reset()
+ except:
+ r2.abort_write_group()
+ raise
+ try:
+ r2.commit_write_group()
+ except:
+ r2.abort_write_group()
+ raise
+ # Now both repositories should now about just one name.
+ r1._pack_collection.ensure_loaded()
+ r2._pack_collection.ensure_loaded()
+ self.assertEqual(r1._pack_collection.names(), r2._pack_collection.names())
+ self.assertEqual(1, len(r1._pack_collection.names()))
+ self.assertFalse(name_to_drop in r1._pack_collection.names())
+ finally:
+ r2.unlock()
+ finally:
+ r1.unlock()
+
+ def test_concurrent_pack_triggers_reload(self):
+ # create 2 packs, which we will then collapse
+ tree = self.make_branch_and_tree('tree')
+ tree.lock_write()
+ try:
+ rev1 = tree.commit('one')
+ rev2 = tree.commit('two')
+ r2 = repository.Repository.open('tree')
+ r2.lock_read()
+ try:
+ # Now r2 has read the pack-names file, but will need to reload
+ # it after r1 has repacked
+ tree.branch.repository.pack()
+ self.assertEqual({rev2:(rev1,)}, r2.get_parent_map([rev2]))
+ finally:
+ r2.unlock()
+ finally:
+ tree.unlock()
+
+ def test_concurrent_pack_during_get_record_reloads(self):
+ tree = self.make_branch_and_tree('tree')
+ tree.lock_write()
+ try:
+ rev1 = tree.commit('one')
+ rev2 = tree.commit('two')
+ keys = [(rev1,), (rev2,)]
+ r2 = repository.Repository.open('tree')
+ r2.lock_read()
+ try:
+ # At this point, we will start grabbing a record stream, and
+ # trigger a repack mid-way
+ packed = False
+ result = {}
+ record_stream = r2.revisions.get_record_stream(keys,
+ 'unordered', False)
+ for record in record_stream:
+ result[record.key] = record
+ if not packed:
+ tree.branch.repository.pack()
+ packed = True
+ # The first record will be found in the original location, but
+ # after the pack, we have to reload to find the next record
+ self.assertEqual(sorted(keys), sorted(result.keys()))
+ finally:
+ r2.unlock()
+ finally:
+ tree.unlock()
+
+ def test_concurrent_pack_during_autopack(self):
+ tree = self.make_branch_and_tree('tree')
+ tree.lock_write()
+ try:
+ for i in xrange(9):
+ tree.commit('rev %d' % (i,))
+ r2 = repository.Repository.open('tree')
+ r2.lock_write()
+ try:
+ # Monkey patch so that pack occurs while the other repo is
+ # autopacking. This is slightly bad, but all current pack
+ # repository implementations have a _pack_collection, and we
+ # test that it gets triggered. So if a future format changes
+ # things, the test will fail rather than succeed accidentally.
+ autopack_count = [0]
+ r1 = tree.branch.repository
+ orig = r1._pack_collection.pack_distribution
+ def trigger_during_auto(*args, **kwargs):
+ ret = orig(*args, **kwargs)
+ if not autopack_count[0]:
+ r2.pack()
+ autopack_count[0] += 1
+ return ret
+ r1._pack_collection.pack_distribution = trigger_during_auto
+ tree.commit('autopack-rev')
+ # This triggers 2 autopacks. The first one causes r2.pack() to
+ # fire, but r2 doesn't see the new pack file yet. The
+ # autopack restarts and sees there are 2 files and there
+ # should be only 1 for 10 commits. So it goes ahead and
+ # finishes autopacking.
+ self.assertEqual([2], autopack_count)
+ finally:
+ r2.unlock()
+ finally:
+ tree.unlock()
+
+ def test_lock_write_does_not_physically_lock(self):
+ repo = self.make_repository('.', format=self.get_format())
+ repo.lock_write()
+ self.addCleanup(repo.unlock)
+ self.assertFalse(repo.get_physical_lock_status())
+
+ def prepare_for_break_lock(self):
+ # Setup the global ui factory state so that a break-lock method call
+ # will find usable input in the input stream.
+ ui.ui_factory = ui.CannedInputUIFactory([True])
+
+ def test_break_lock_breaks_physical_lock(self):
+ repo = self.make_repository('.', format=self.get_format())
+ repo._pack_collection.lock_names()
+ repo.control_files.leave_in_place()
+ repo.unlock()
+ repo2 = repository.Repository.open('.')
+ self.assertTrue(repo.get_physical_lock_status())
+ self.prepare_for_break_lock()
+ repo2.break_lock()
+ self.assertFalse(repo.get_physical_lock_status())
+
+ def test_broken_physical_locks_error_on__unlock_names_lock(self):
+ repo = self.make_repository('.', format=self.get_format())
+ repo._pack_collection.lock_names()
+ self.assertTrue(repo.get_physical_lock_status())
+ repo2 = repository.Repository.open('.')
+ self.prepare_for_break_lock()
+ repo2.break_lock()
+ self.assertRaises(errors.LockBroken, repo._pack_collection._unlock_names)
+
+ def test_fetch_without_find_ghosts_ignores_ghosts(self):
+ # we want two repositories at this point:
+ # one with a revision that is a ghost in the other
+ # repository.
+ # 'ghost' is present in has_ghost, 'ghost' is absent in 'missing_ghost'.
+ # 'references' is present in both repositories, and 'tip' is present
+ # just in has_ghost.
+ # has_ghost missing_ghost
+ #------------------------------
+ # 'ghost' -
+ # 'references' 'references'
+ # 'tip' -
+ # In this test we fetch 'tip' which should not fetch 'ghost'
+ has_ghost = self.make_repository('has_ghost', format=self.get_format())
+ missing_ghost = self.make_repository('missing_ghost',
+ format=self.get_format())
+
+ def add_commit(repo, revision_id, parent_ids):
+ repo.lock_write()
+ repo.start_write_group()
+ inv = inventory.Inventory(revision_id=revision_id)
+ inv.root.revision = revision_id
+ root_id = inv.root.file_id
+ sha1 = repo.add_inventory(revision_id, inv, [])
+ repo.texts.add_lines((root_id, revision_id), [], [])
+ rev = _mod_revision.Revision(timestamp=0,
+ timezone=None,
+ committer="Foo Bar <foo@example.com>",
+ message="Message",
+ inventory_sha1=sha1,
+ revision_id=revision_id)
+ rev.parent_ids = parent_ids
+ repo.add_revision(revision_id, rev)
+ repo.commit_write_group()
+ repo.unlock()
+ add_commit(has_ghost, 'ghost', [])
+ add_commit(has_ghost, 'references', ['ghost'])
+ add_commit(missing_ghost, 'references', ['ghost'])
+ add_commit(has_ghost, 'tip', ['references'])
+ missing_ghost.fetch(has_ghost, 'tip')
+ # missing ghost now has tip and not ghost.
+ rev = missing_ghost.get_revision('tip')
+ inv = missing_ghost.get_inventory('tip')
+ self.assertRaises(errors.NoSuchRevision,
+ missing_ghost.get_revision, 'ghost')
+ self.assertRaises(errors.NoSuchRevision,
+ missing_ghost.get_inventory, 'ghost')
+
+ def make_write_ready_repo(self):
+ format = self.get_format()
+ if isinstance(format.repository_format, RepositoryFormat2a):
+ raise TestNotApplicable("No missing compression parents")
+ repo = self.make_repository('.', format=format)
+ repo.lock_write()
+ self.addCleanup(repo.unlock)
+ repo.start_write_group()
+ self.addCleanup(repo.abort_write_group)
+ return repo
+
+ def test_missing_inventories_compression_parent_prevents_commit(self):
+ repo = self.make_write_ready_repo()
+ key = ('junk',)
+ repo.inventories._index._missing_compression_parents.add(key)
+ self.assertRaises(errors.BzrCheckError, repo.commit_write_group)
+ self.assertRaises(errors.BzrCheckError, repo.commit_write_group)
+
+ def test_missing_revisions_compression_parent_prevents_commit(self):
+ repo = self.make_write_ready_repo()
+ key = ('junk',)
+ repo.revisions._index._missing_compression_parents.add(key)
+ self.assertRaises(errors.BzrCheckError, repo.commit_write_group)
+ self.assertRaises(errors.BzrCheckError, repo.commit_write_group)
+
+ def test_missing_signatures_compression_parent_prevents_commit(self):
+ repo = self.make_write_ready_repo()
+ key = ('junk',)
+ repo.signatures._index._missing_compression_parents.add(key)
+ self.assertRaises(errors.BzrCheckError, repo.commit_write_group)
+ self.assertRaises(errors.BzrCheckError, repo.commit_write_group)
+
+ def test_missing_text_compression_parent_prevents_commit(self):
+ repo = self.make_write_ready_repo()
+ key = ('some', 'junk')
+ repo.texts._index._missing_compression_parents.add(key)
+ self.assertRaises(errors.BzrCheckError, repo.commit_write_group)
+ e = self.assertRaises(errors.BzrCheckError, repo.commit_write_group)
+
+ def test_supports_external_lookups(self):
+ repo = self.make_repository('.', format=self.get_format())
+ self.assertEqual(self.format_supports_external_lookups,
+ repo._format.supports_external_lookups)
+
+ def _lock_write(self, write_lockable):
+ """Lock write_lockable, add a cleanup and return the result.
+
+ :param write_lockable: An object with a lock_write method.
+ :return: The result of write_lockable.lock_write().
+ """
+ result = write_lockable.lock_write()
+ self.addCleanup(result.unlock)
+ return result
+
+ def test_abort_write_group_does_not_raise_when_suppressed(self):
+ """Similar to per_repository.test_write_group's test of the same name.
+
+ Also requires that the exception is logged.
+ """
+ self.vfs_transport_factory = memory.MemoryServer
+ repo = self.make_repository('repo', format=self.get_format())
+ token = self._lock_write(repo).repository_token
+ repo.start_write_group()
+ # Damage the repository on the filesystem
+ self.get_transport('').rename('repo', 'foo')
+ # abort_write_group will not raise an error
+ self.assertEqual(None, repo.abort_write_group(suppress_errors=True))
+ # But it does log an error
+ log = self.get_log()
+ self.assertContainsRe(log, 'abort_write_group failed')
+ self.assertContainsRe(log, r'INFO bzr: ERROR \(ignored\):')
+ if token is not None:
+ repo.leave_lock_in_place()
+
+ def test_abort_write_group_does_raise_when_not_suppressed(self):
+ self.vfs_transport_factory = memory.MemoryServer
+ repo = self.make_repository('repo', format=self.get_format())
+ token = self._lock_write(repo).repository_token
+ repo.start_write_group()
+ # Damage the repository on the filesystem
+ self.get_transport('').rename('repo', 'foo')
+ # abort_write_group will not raise an error
+ self.assertRaises(Exception, repo.abort_write_group)
+ if token is not None:
+ repo.leave_lock_in_place()
+
+ def test_suspend_write_group(self):
+ self.vfs_transport_factory = memory.MemoryServer
+ repo = self.make_repository('repo', format=self.get_format())
+ token = self._lock_write(repo).repository_token
+ repo.start_write_group()
+ repo.texts.add_lines(('file-id', 'revid'), (), ['lines'])
+ wg_tokens = repo.suspend_write_group()
+ expected_pack_name = wg_tokens[0] + '.pack'
+ expected_names = [wg_tokens[0] + ext for ext in
+ ('.rix', '.iix', '.tix', '.six')]
+ if repo.chk_bytes is not None:
+ expected_names.append(wg_tokens[0] + '.cix')
+ expected_names.append(expected_pack_name)
+ upload_transport = repo._pack_collection._upload_transport
+ limbo_files = upload_transport.list_dir('')
+ self.assertEqual(sorted(expected_names), sorted(limbo_files))
+ md5 = osutils.md5(upload_transport.get_bytes(expected_pack_name))
+ self.assertEqual(wg_tokens[0], md5.hexdigest())
+
+ def test_resume_chk_bytes(self):
+ self.vfs_transport_factory = memory.MemoryServer
+ repo = self.make_repository('repo', format=self.get_format())
+ if repo.chk_bytes is None:
+ raise TestNotApplicable('no chk_bytes for this repository')
+ token = self._lock_write(repo).repository_token
+ repo.start_write_group()
+ text = 'a bit of text\n'
+ key = ('sha1:' + osutils.sha_string(text),)
+ repo.chk_bytes.add_lines(key, (), [text])
+ wg_tokens = repo.suspend_write_group()
+ same_repo = repo.bzrdir.open_repository()
+ same_repo.lock_write()
+ self.addCleanup(same_repo.unlock)
+ same_repo.resume_write_group(wg_tokens)
+ self.assertEqual([key], list(same_repo.chk_bytes.keys()))
+ self.assertEqual(
+ text, same_repo.chk_bytes.get_record_stream([key],
+ 'unordered', True).next().get_bytes_as('fulltext'))
+ same_repo.abort_write_group()
+ self.assertEqual([], list(same_repo.chk_bytes.keys()))
+
+ def test_resume_write_group_then_abort(self):
+ # Create a repo, start a write group, insert some data, suspend.
+ self.vfs_transport_factory = memory.MemoryServer
+ repo = self.make_repository('repo', format=self.get_format())
+ token = self._lock_write(repo).repository_token
+ repo.start_write_group()
+ text_key = ('file-id', 'revid')
+ repo.texts.add_lines(text_key, (), ['lines'])
+ wg_tokens = repo.suspend_write_group()
+ # Get a fresh repository object for the repo on the filesystem.
+ same_repo = repo.bzrdir.open_repository()
+ # Resume
+ same_repo.lock_write()
+ self.addCleanup(same_repo.unlock)
+ same_repo.resume_write_group(wg_tokens)
+ same_repo.abort_write_group()
+ self.assertEqual(
+ [], same_repo._pack_collection._upload_transport.list_dir(''))
+ self.assertEqual(
+ [], same_repo._pack_collection._pack_transport.list_dir(''))
+
+ def test_commit_resumed_write_group(self):
+ self.vfs_transport_factory = memory.MemoryServer
+ repo = self.make_repository('repo', format=self.get_format())
+ token = self._lock_write(repo).repository_token
+ repo.start_write_group()
+ text_key = ('file-id', 'revid')
+ repo.texts.add_lines(text_key, (), ['lines'])
+ wg_tokens = repo.suspend_write_group()
+ # Get a fresh repository object for the repo on the filesystem.
+ same_repo = repo.bzrdir.open_repository()
+ # Resume
+ same_repo.lock_write()
+ self.addCleanup(same_repo.unlock)
+ same_repo.resume_write_group(wg_tokens)
+ same_repo.commit_write_group()
+ expected_pack_name = wg_tokens[0] + '.pack'
+ expected_names = [wg_tokens[0] + ext for ext in
+ ('.rix', '.iix', '.tix', '.six')]
+ if repo.chk_bytes is not None:
+ expected_names.append(wg_tokens[0] + '.cix')
+ self.assertEqual(
+ [], same_repo._pack_collection._upload_transport.list_dir(''))
+ index_names = repo._pack_collection._index_transport.list_dir('')
+ self.assertEqual(sorted(expected_names), sorted(index_names))
+ pack_names = repo._pack_collection._pack_transport.list_dir('')
+ self.assertEqual([expected_pack_name], pack_names)
+
+ def test_resume_malformed_token(self):
+ self.vfs_transport_factory = memory.MemoryServer
+ # Make a repository with a suspended write group
+ repo = self.make_repository('repo', format=self.get_format())
+ token = self._lock_write(repo).repository_token
+ repo.start_write_group()
+ text_key = ('file-id', 'revid')
+ repo.texts.add_lines(text_key, (), ['lines'])
+ wg_tokens = repo.suspend_write_group()
+ # Make a new repository
+ new_repo = self.make_repository('new_repo', format=self.get_format())
+ token = self._lock_write(new_repo).repository_token
+ hacked_wg_token = (
+ '../../../../repo/.bzr/repository/upload/' + wg_tokens[0])
+ self.assertRaises(
+ errors.UnresumableWriteGroup,
+ new_repo.resume_write_group, [hacked_wg_token])
+
+
+class TestPackRepositoryStacking(TestCaseWithTransport):
+
+ """Tests for stacking pack repositories"""
+
+ def setUp(self):
+ if not self.format_supports_external_lookups:
+ raise TestNotApplicable("%r doesn't support stacking"
+ % (self.format_name,))
+ super(TestPackRepositoryStacking, self).setUp()
+
+ def get_format(self):
+ return controldir.format_registry.make_bzrdir(self.format_name)
+
+ def test_stack_checks_rich_root_compatibility(self):
+ # early versions of the packing code relied on pack internals to
+ # stack, but the current version should be able to stack on any
+ # format.
+ #
+ # TODO: Possibly this should be run per-repository-format and raise
+ # TestNotApplicable on formats that don't support stacking. -- mbp
+ # 20080729
+ repo = self.make_repository('repo', format=self.get_format())
+ if repo.supports_rich_root():
+ # can only stack on repositories that have compatible internal
+ # metadata
+ if getattr(repo._format, 'supports_tree_reference', False):
+ matching_format_name = 'pack-0.92-subtree'
+ else:
+ if repo._format.supports_chks:
+ matching_format_name = '2a'
+ else:
+ matching_format_name = 'rich-root-pack'
+ mismatching_format_name = 'pack-0.92'
+ else:
+ # We don't have a non-rich-root CHK format.
+ if repo._format.supports_chks:
+ raise AssertionError("no non-rich-root CHK formats known")
+ else:
+ matching_format_name = 'pack-0.92'
+ mismatching_format_name = 'pack-0.92-subtree'
+ base = self.make_repository('base', format=matching_format_name)
+ repo.add_fallback_repository(base)
+ # you can't stack on something with incompatible data
+ bad_repo = self.make_repository('mismatch',
+ format=mismatching_format_name)
+ e = self.assertRaises(errors.IncompatibleRepositories,
+ repo.add_fallback_repository, bad_repo)
+ self.assertContainsRe(str(e),
+ r'(?m)KnitPackRepository.*/mismatch/.*\nis not compatible with\n'
+ r'.*Repository.*/repo/.*\n'
+ r'different rich-root support')
+
+ def test_stack_checks_serializers_compatibility(self):
+ repo = self.make_repository('repo', format=self.get_format())
+ if getattr(repo._format, 'supports_tree_reference', False):
+ # can only stack on repositories that have compatible internal
+ # metadata
+ matching_format_name = 'pack-0.92-subtree'
+ mismatching_format_name = 'rich-root-pack'
+ else:
+ if repo.supports_rich_root():
+ if repo._format.supports_chks:
+ matching_format_name = '2a'
+ else:
+ matching_format_name = 'rich-root-pack'
+ mismatching_format_name = 'pack-0.92-subtree'
+ else:
+ raise TestNotApplicable('No formats use non-v5 serializer'
+ ' without having rich-root also set')
+ base = self.make_repository('base', format=matching_format_name)
+ repo.add_fallback_repository(base)
+ # you can't stack on something with incompatible data
+ bad_repo = self.make_repository('mismatch',
+ format=mismatching_format_name)
+ e = self.assertRaises(errors.IncompatibleRepositories,
+ repo.add_fallback_repository, bad_repo)
+ self.assertContainsRe(str(e),
+ r'(?m)KnitPackRepository.*/mismatch/.*\nis not compatible with\n'
+ r'.*Repository.*/repo/.*\n'
+ r'different serializers')
+
+ def test_adding_pack_does_not_record_pack_names_from_other_repositories(self):
+ base = self.make_branch_and_tree('base', format=self.get_format())
+ base.commit('foo')
+ referencing = self.make_branch_and_tree('repo', format=self.get_format())
+ referencing.branch.repository.add_fallback_repository(base.branch.repository)
+ local_tree = referencing.branch.create_checkout('local')
+ local_tree.commit('bar')
+ new_instance = referencing.bzrdir.open_repository()
+ new_instance.lock_read()
+ self.addCleanup(new_instance.unlock)
+ new_instance._pack_collection.ensure_loaded()
+ self.assertEqual(1, len(new_instance._pack_collection.all_packs()))
+
+ def test_autopack_only_considers_main_repo_packs(self):
+ format = self.get_format()
+ base = self.make_branch_and_tree('base', format=format)
+ base.commit('foo')
+ tree = self.make_branch_and_tree('repo', format=format)
+ tree.branch.repository.add_fallback_repository(base.branch.repository)
+ trans = tree.branch.repository.bzrdir.get_repository_transport(None)
+ # This test could be a little cheaper by replacing the packs
+ # attribute on the repository to allow a different pack distribution
+ # and max packs policy - so we are checking the policy is honoured
+ # in the test. But for now 11 commits is not a big deal in a single
+ # test.
+ local_tree = tree.branch.create_checkout('local')
+ for x in range(9):
+ local_tree.commit('commit %s' % x)
+ # there should be 9 packs:
+ index = self.index_class(trans, 'pack-names', None)
+ self.assertEqual(9, len(list(index.iter_all_entries())))
+ # committing one more should coalesce to 1 of 10.
+ local_tree.commit('commit triggering pack')
+ index = self.index_class(trans, 'pack-names', None)
+ self.assertEqual(1, len(list(index.iter_all_entries())))
+ # packing should not damage data
+ tree = tree.bzrdir.open_workingtree()
+ check_result = tree.branch.repository.check(
+ [tree.branch.last_revision()])
+ nb_files = 5 # .pack, .rix, .iix, .tix, .six
+ if tree.branch.repository._format.supports_chks:
+ nb_files += 1 # .cix
+ # We should have 10 x nb_files files in the obsolete_packs directory.
+ obsolete_files = list(trans.list_dir('obsolete_packs'))
+ self.assertFalse('foo' in obsolete_files)
+ self.assertFalse('bar' in obsolete_files)
+ self.assertEqual(10 * nb_files, len(obsolete_files))
+ # XXX: Todo check packs obsoleted correctly - old packs and indices
+ # in the obsolete_packs directory.
+ large_pack_name = list(index.iter_all_entries())[0][1][0]
+ # finally, committing again should not touch the large pack.
+ local_tree.commit('commit not triggering pack')
+ index = self.index_class(trans, 'pack-names', None)
+ self.assertEqual(2, len(list(index.iter_all_entries())))
+ pack_names = [node[1][0] for node in index.iter_all_entries()]
+ self.assertTrue(large_pack_name in pack_names)
+
+
+class TestKeyDependencies(TestCaseWithTransport):
+
+ def get_format(self):
+ return controldir.format_registry.make_bzrdir(self.format_name)
+
+ def create_source_and_target(self):
+ builder = self.make_branch_builder('source', format=self.get_format())
+ builder.start_series()
+ builder.build_snapshot('A-id', None, [
+ ('add', ('', 'root-id', 'directory', None))])
+ builder.build_snapshot('B-id', ['A-id', 'ghost-id'], [])
+ builder.finish_series()
+ repo = self.make_repository('target', format=self.get_format())
+ b = builder.get_branch()
+ b.lock_read()
+ self.addCleanup(b.unlock)
+ repo.lock_write()
+ self.addCleanup(repo.unlock)
+ return b.repository, repo
+
+ def test_key_dependencies_cleared_on_abort(self):
+ source_repo, target_repo = self.create_source_and_target()
+ target_repo.start_write_group()
+ try:
+ stream = source_repo.revisions.get_record_stream([('B-id',)],
+ 'unordered', True)
+ target_repo.revisions.insert_record_stream(stream)
+ key_refs = target_repo.revisions._index._key_dependencies
+ self.assertEqual([('B-id',)], sorted(key_refs.get_referrers()))
+ finally:
+ target_repo.abort_write_group()
+ self.assertEqual([], sorted(key_refs.get_referrers()))
+
+ def test_key_dependencies_cleared_on_suspend(self):
+ source_repo, target_repo = self.create_source_and_target()
+ target_repo.start_write_group()
+ try:
+ stream = source_repo.revisions.get_record_stream([('B-id',)],
+ 'unordered', True)
+ target_repo.revisions.insert_record_stream(stream)
+ key_refs = target_repo.revisions._index._key_dependencies
+ self.assertEqual([('B-id',)], sorted(key_refs.get_referrers()))
+ finally:
+ target_repo.suspend_write_group()
+ self.assertEqual([], sorted(key_refs.get_referrers()))
+
+ def test_key_dependencies_cleared_on_commit(self):
+ source_repo, target_repo = self.create_source_and_target()
+ target_repo.start_write_group()
+ try:
+ # Copy all texts, inventories, and chks so that nothing is missing
+ # for revision B-id.
+ for vf_name in ['texts', 'chk_bytes', 'inventories']:
+ source_vf = getattr(source_repo, vf_name, None)
+ if source_vf is None:
+ continue
+ target_vf = getattr(target_repo, vf_name)
+ stream = source_vf.get_record_stream(
+ source_vf.keys(), 'unordered', True)
+ target_vf.insert_record_stream(stream)
+ # Copy just revision B-id
+ stream = source_repo.revisions.get_record_stream(
+ [('B-id',)], 'unordered', True)
+ target_repo.revisions.insert_record_stream(stream)
+ key_refs = target_repo.revisions._index._key_dependencies
+ self.assertEqual([('B-id',)], sorted(key_refs.get_referrers()))
+ finally:
+ target_repo.commit_write_group()
+ self.assertEqual([], sorted(key_refs.get_referrers()))
+
+
+class TestSmartServerAutopack(TestCaseWithTransport):
+
+ def setUp(self):
+ super(TestSmartServerAutopack, self).setUp()
+ # Create a smart server that publishes whatever the backing VFS server
+ # does.
+ self.smart_server = test_server.SmartTCPServer_for_testing()
+ self.start_server(self.smart_server, self.get_server())
+ # Log all HPSS calls into self.hpss_calls.
+ client._SmartClient.hooks.install_named_hook(
+ 'call', self.capture_hpss_call, None)
+ self.hpss_calls = []
+
+ def capture_hpss_call(self, params):
+ self.hpss_calls.append(params.method)
+
+ def get_format(self):
+ return controldir.format_registry.make_bzrdir(self.format_name)
+
+ def test_autopack_or_streaming_rpc_is_used_when_using_hpss(self):
+ # Make local and remote repos
+ format = self.get_format()
+ tree = self.make_branch_and_tree('local', format=format)
+ self.make_branch_and_tree('remote', format=format)
+ remote_branch_url = self.smart_server.get_url() + 'remote'
+ remote_branch = controldir.ControlDir.open(remote_branch_url).open_branch()
+ # Make 9 local revisions, and push them one at a time to the remote
+ # repo to produce 9 pack files.
+ for x in range(9):
+ tree.commit('commit %s' % x)
+ tree.branch.push(remote_branch)
+ # Make one more push to trigger an autopack
+ self.hpss_calls = []
+ tree.commit('commit triggering pack')
+ tree.branch.push(remote_branch)
+ autopack_calls = len([call for call in self.hpss_calls if call ==
+ 'PackRepository.autopack'])
+ streaming_calls = len([call for call in self.hpss_calls if call in
+ ('Repository.insert_stream', 'Repository.insert_stream_1.19')])
+ if autopack_calls:
+ # Non streaming server
+ self.assertEqual(1, autopack_calls)
+ self.assertEqual(0, streaming_calls)
+ else:
+ # Streaming was used, which autopacks on the remote end.
+ self.assertEqual(0, autopack_calls)
+ # NB: The 2 calls are because of the sanity check that the server
+ # supports the verb (see remote.py:RemoteSink.insert_stream for
+ # details).
+ self.assertEqual(2, streaming_calls)
+
+
+def load_tests(basic_tests, module, loader):
+ # these give the bzrdir canned format name, and the repository on-disk
+ # format string
+ scenarios_params = [
+ dict(format_name='pack-0.92',
+ format_string="Bazaar pack repository format 1 (needs bzr 0.92)\n",
+ format_supports_external_lookups=False,
+ index_class=GraphIndex),
+ dict(format_name='pack-0.92-subtree',
+ format_string="Bazaar pack repository format 1 "
+ "with subtree support (needs bzr 0.92)\n",
+ format_supports_external_lookups=False,
+ index_class=GraphIndex),
+ dict(format_name='1.6',
+ format_string="Bazaar RepositoryFormatKnitPack5 (bzr 1.6)\n",
+ format_supports_external_lookups=True,
+ index_class=GraphIndex),
+ dict(format_name='1.6.1-rich-root',
+ format_string="Bazaar RepositoryFormatKnitPack5RichRoot "
+ "(bzr 1.6.1)\n",
+ format_supports_external_lookups=True,
+ index_class=GraphIndex),
+ dict(format_name='1.9',
+ format_string="Bazaar RepositoryFormatKnitPack6 (bzr 1.9)\n",
+ format_supports_external_lookups=True,
+ index_class=BTreeGraphIndex),
+ dict(format_name='1.9-rich-root',
+ format_string="Bazaar RepositoryFormatKnitPack6RichRoot "
+ "(bzr 1.9)\n",
+ format_supports_external_lookups=True,
+ index_class=BTreeGraphIndex),
+ dict(format_name='2a',
+ format_string="Bazaar repository format 2a "
+ "(needs bzr 1.16 or later)\n",
+ format_supports_external_lookups=True,
+ index_class=BTreeGraphIndex),
+ ]
+ # name of the scenario is the format name
+ scenarios = [(s['format_name'], s) for s in scenarios_params]
+ return tests.multiply_tests(basic_tests, scenarios, loader.suiteClass())
diff --git a/bzrlib/tests/per_repository/__init__.py b/bzrlib/tests/per_repository/__init__.py
new file mode 100644
index 0000000..9cbbae8
--- /dev/null
+++ b/bzrlib/tests/per_repository/__init__.py
@@ -0,0 +1,137 @@
+# Copyright (C) 2006-2010 Canonical Ltd
+# Authors: Robert Collins <robert.collins@canonical.com>
+# and others
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+
+"""Repository implementation tests for bzr.
+
+These test the conformance of all the repository variations to the expected API.
+Specific tests for individual formats are in the tests/test_repository.py file
+rather than in tests/per_branch/*.py.
+"""
+
+from bzrlib import (
+ repository,
+ )
+from bzrlib.remote import RemoteRepositoryFormat
+from bzrlib.tests import (
+ default_transport,
+ multiply_tests,
+ test_server,
+ )
+from bzrlib.tests.per_controldir.test_controldir import TestCaseWithControlDir
+from bzrlib.transport import memory
+
+
+def formats_to_scenarios(formats, transport_server, transport_readonly_server,
+ vfs_transport_factory=None):
+ """Transform the input formats to a list of scenarios.
+
+ :param formats: A list of (scenario_name_suffix, repo_format)
+ where the scenario_name_suffix is to be appended to the format
+ name, and the repo_format is a RepositoryFormat subclass
+ instance.
+ :returns: Scenarios of [(scenario_name, {parameter_name: value})]
+ """
+ result = []
+ for scenario_name_suffix, repository_format in formats:
+ scenario_name = repository_format.__class__.__name__
+ scenario_name += scenario_name_suffix
+ scenario = (scenario_name,
+ {"transport_server":transport_server,
+ "transport_readonly_server":transport_readonly_server,
+ "bzrdir_format":repository_format._matchingbzrdir,
+ "repository_format":repository_format,
+ })
+ # Only override the test's vfs_transport_factory if one was
+ # specified, otherwise just leave the default in place.
+ if vfs_transport_factory:
+ scenario[1]['vfs_transport_factory'] = vfs_transport_factory
+ result.append(scenario)
+ return result
+
+
+def all_repository_format_scenarios():
+ """Return a list of test scenarios for parameterising repository tests.
+ """
+ all_formats = repository.format_registry._get_all()
+ # format_scenarios is all the implementations of Repository; i.e. all disk
+ # formats plus RemoteRepository.
+ format_scenarios = formats_to_scenarios(
+ [('', format) for format in all_formats],
+ default_transport,
+ # None here will cause a readonly decorator to be created
+ # by the TestCaseWithTransport.get_readonly_transport method.
+ None)
+ format_scenarios.extend(formats_to_scenarios(
+ [('-default', RemoteRepositoryFormat())],
+ test_server.SmartTCPServer_for_testing,
+ test_server.ReadonlySmartTCPServer_for_testing,
+ memory.MemoryServer))
+ format_scenarios.extend(formats_to_scenarios(
+ [('-v2', RemoteRepositoryFormat())],
+ test_server.SmartTCPServer_for_testing_v2_only,
+ test_server.ReadonlySmartTCPServer_for_testing_v2_only,
+ memory.MemoryServer))
+ return format_scenarios
+
+
+class TestCaseWithRepository(TestCaseWithControlDir):
+
+ def get_default_format(self):
+ format = self.repository_format._matchingbzrdir
+ self.assertEquals(format.repository_format, self.repository_format)
+ return format
+
+ def make_repository(self, relpath, shared=None, format=None):
+ format = self.resolve_format(format)
+ repo = super(TestCaseWithRepository, self).make_repository(
+ relpath, shared=shared, format=format)
+ if format is None or format.repository_format is self.repository_format:
+ # Create a repository of the type we are trying to test.
+ if getattr(self, "repository_to_test_repository", None):
+ repo = self.repository_to_test_repository(repo)
+ return repo
+
+
+def load_tests(standard_tests, module, loader):
+ prefix = 'bzrlib.tests.per_repository.'
+ test_repository_modules = [
+ 'test_add_fallback_repository',
+ 'test_break_lock',
+ 'test_check',
+ 'test_commit_builder',
+ 'test_fetch',
+ 'test_file_graph',
+ 'test_get_parent_map',
+ 'test_has_same_location',
+ 'test_has_revisions',
+ 'test_locking',
+ 'test_pack',
+ 'test_reconcile',
+ 'test_refresh_data',
+ 'test_repository',
+ 'test_revision',
+ 'test_signatures',
+ 'test_statistics',
+ 'test_write_group',
+ ]
+ # Parameterize per_repository test modules by format.
+ submod_tests = loader.loadTestsFromModuleNames(
+ [prefix + module_name for module_name in test_repository_modules])
+ format_scenarios = all_repository_format_scenarios()
+ return multiply_tests(submod_tests, format_scenarios, standard_tests)
diff --git a/bzrlib/tests/per_repository/test_add_fallback_repository.py b/bzrlib/tests/per_repository/test_add_fallback_repository.py
new file mode 100644
index 0000000..6c9597c
--- /dev/null
+++ b/bzrlib/tests/per_repository/test_add_fallback_repository.py
@@ -0,0 +1,58 @@
+# Copyright (C) 2008 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""Tests for Repository.add_fallback_repository."""
+
+from bzrlib import (
+ errors,
+ )
+from bzrlib.revision import NULL_REVISION
+from bzrlib.tests import TestNotApplicable
+from bzrlib.tests.per_repository import TestCaseWithRepository
+
+
+class TestAddFallbackRepository(TestCaseWithRepository):
+
+ def test_add_fallback_repository(self):
+ repo = self.make_repository('repo')
+ tree = self.make_branch_and_tree('branch')
+ if not repo._format.supports_external_lookups:
+ self.assertRaises(errors.UnstackableRepositoryFormat,
+ repo.add_fallback_repository, tree.branch.repository)
+ raise TestNotApplicable
+ repo.add_fallback_repository(tree.branch.repository)
+ # the repository has been added correctly if we can query against it.
+ revision_id = tree.commit('1st post')
+ repo.lock_read()
+ self.addCleanup(repo.unlock)
+ # can see all revisions
+ self.assertEqual(set([revision_id]), set(repo.all_revision_ids()))
+ # and can also query the parent map, either on the revisions
+ # versionedfiles, which works in tuple keys...
+ self.assertEqual({(revision_id,): ()},
+ repo.revisions.get_parent_map([(revision_id,)]))
+ # ... or on the repository directly...
+ self.assertEqual({revision_id: (NULL_REVISION,)},
+ repo.get_parent_map([revision_id]))
+ # ... or on the repository's graph.
+ self.assertEqual({revision_id: (NULL_REVISION,)},
+ repo.get_graph().get_parent_map([revision_id]))
+ # ... or on the repository's graph, when there is an other repository.
+ other = self.make_repository('other')
+ other.lock_read()
+ self.addCleanup(other.unlock)
+ self.assertEqual({revision_id: (NULL_REVISION,)},
+ repo.get_graph(other).get_parent_map([revision_id]))
diff --git a/bzrlib/tests/per_repository/test_break_lock.py b/bzrlib/tests/per_repository/test_break_lock.py
new file mode 100644
index 0000000..e06539e
--- /dev/null
+++ b/bzrlib/tests/per_repository/test_break_lock.py
@@ -0,0 +1,51 @@
+# Copyright (C) 2006-2010 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""Tests for repository break-lock."""
+
+from bzrlib import (
+ errors,
+ ui,
+ )
+from bzrlib.tests import per_repository
+
+
+class TestBreakLock(per_repository.TestCaseWithRepository):
+
+ def setUp(self):
+ super(TestBreakLock, self).setUp()
+ self.unused_repo = self.make_repository('.')
+ self.repo = self.unused_repo.bzrdir.open_repository()
+ ui.ui_factory = ui.CannedInputUIFactory([True])
+
+ def test_unlocked(self):
+ # break lock when nothing is locked should just return
+ try:
+ self.repo.break_lock()
+ except NotImplementedError:
+ pass
+
+ def test_locked(self):
+ # break_lock when locked should
+ self.repo.lock_write()
+ self.assertEqual(self.repo.get_physical_lock_status(),
+ self.unused_repo.get_physical_lock_status())
+ if not self.unused_repo.get_physical_lock_status():
+ # 'lock_write' has not taken a physical mutex out.
+ self.repo.unlock()
+ return
+ self.unused_repo.break_lock()
+ self.assertRaises(errors.LockBroken, self.repo.unlock)
diff --git a/bzrlib/tests/per_repository/test_check.py b/bzrlib/tests/per_repository/test_check.py
new file mode 100644
index 0000000..a26141c
--- /dev/null
+++ b/bzrlib/tests/per_repository/test_check.py
@@ -0,0 +1,42 @@
+# Copyright (C) 2007-2011 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+
+"""Test operations that check the repository for corruption"""
+
+from bzrlib import (
+ revision as _mod_revision,
+ )
+from bzrlib.tests.per_repository import TestCaseWithRepository
+
+
+class TestCleanRepository(TestCaseWithRepository):
+
+ def test_new_repo(self):
+ branch = self.make_branch('foo')
+ branch.lock_write()
+ self.addCleanup(branch.unlock)
+ self.overrideEnv('BZR_EMAIL', 'foo@sample.com')
+ builder = branch.get_commit_builder([], branch.get_config_stack())
+ list(builder.record_iter_changes(None, _mod_revision.NULL_REVISION, [
+ ('TREE_ROOT', (None, ''), True, (False, True), (None, None),
+ (None, ''), (None, 'directory'), (None, False))]))
+ builder.finish_inventory()
+ rev_id = builder.commit('first post')
+ result = branch.repository.check(None, check_repo=True)
+ result.report_results(True)
+ log = self.get_log()
+ self.assertFalse('Missing' in log, "Something was missing in %r" % log)
diff --git a/bzrlib/tests/per_repository/test_commit_builder.py b/bzrlib/tests/per_repository/test_commit_builder.py
new file mode 100644
index 0000000..6560b66
--- /dev/null
+++ b/bzrlib/tests/per_repository/test_commit_builder.py
@@ -0,0 +1,1443 @@
+# Copyright (C) 2006-2011 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""Tests for repository commit builder."""
+
+import os
+
+from bzrlib import (
+ config,
+ errors,
+ inventory,
+ osutils,
+ repository,
+ revision as _mod_revision,
+ tests,
+ )
+from bzrlib.tests import per_repository
+from bzrlib.tests import (
+ features,
+ )
+
+
+class TestCommitBuilder(per_repository.TestCaseWithRepository):
+
+ def test_get_commit_builder(self):
+ branch = self.make_branch('.')
+ branch.repository.lock_write()
+ builder = branch.repository.get_commit_builder(
+ branch, [], branch.get_config_stack())
+ self.assertIsInstance(builder, repository.CommitBuilder)
+ self.assertTrue(builder.random_revid)
+ branch.repository.commit_write_group()
+ branch.repository.unlock()
+
+ def record_root(self, builder, tree):
+ if builder.record_root_entry is True:
+ tree.lock_read()
+ try:
+ ie = tree.root_inventory.root
+ finally:
+ tree.unlock()
+ parent_tree = tree.branch.repository.revision_tree(
+ _mod_revision.NULL_REVISION)
+ parent_invs = []
+ builder.record_entry_contents(ie, parent_invs, '', tree,
+ tree.path_content_summary(''))
+
+ def test_finish_inventory_with_record_root(self):
+ tree = self.make_branch_and_tree(".")
+ tree.lock_write()
+ try:
+ builder = tree.branch.get_commit_builder([])
+ if not builder.supports_record_entry_contents:
+ raise tests.TestNotApplicable("CommitBuilder doesn't support "
+ "record_entry_contents")
+ repo = tree.branch.repository
+ self.record_root(builder, tree)
+ builder.finish_inventory()
+ repo.commit_write_group()
+ finally:
+ tree.unlock()
+
+ def test_finish_inventory_record_iter_changes(self):
+ tree = self.make_branch_and_tree(".")
+ tree.lock_write()
+ try:
+ builder = tree.branch.get_commit_builder([])
+ try:
+ list(builder.record_iter_changes(tree, tree.last_revision(),
+ tree.iter_changes(tree.basis_tree())))
+ builder.finish_inventory()
+ except:
+ builder.abort()
+ raise
+ repo = tree.branch.repository
+ repo.commit_write_group()
+ finally:
+ tree.unlock()
+
+ def test_abort_record_entry_contents(self):
+ tree = self.make_branch_and_tree(".")
+ tree.lock_write()
+ try:
+ builder = tree.branch.get_commit_builder([])
+ if not builder.supports_record_entry_contents:
+ raise tests.TestNotApplicable("CommitBuilder doesn't support "
+ "record_entry_contents")
+ self.record_root(builder, tree)
+ builder.finish_inventory()
+ builder.abort()
+ finally:
+ tree.unlock()
+
+ def test_abort_record_iter_changes(self):
+ tree = self.make_branch_and_tree(".")
+ tree.lock_write()
+ try:
+ builder = tree.branch.get_commit_builder([])
+ try:
+ basis = tree.basis_tree()
+ last_rev = tree.last_revision()
+ changes = tree.iter_changes(basis)
+ list(builder.record_iter_changes(tree, last_rev, changes))
+ builder.finish_inventory()
+ finally:
+ builder.abort()
+ finally:
+ tree.unlock()
+
+ def test_commit_lossy(self):
+ tree = self.make_branch_and_tree(".")
+ tree.lock_write()
+ try:
+ builder = tree.branch.get_commit_builder([], lossy=True)
+ list(builder.record_iter_changes(tree, tree.last_revision(),
+ tree.iter_changes(tree.basis_tree())))
+ builder.finish_inventory()
+ rev_id = builder.commit('foo bar blah')
+ finally:
+ tree.unlock()
+ rev = tree.branch.repository.get_revision(rev_id)
+ self.assertEqual('foo bar blah', rev.message)
+
+ def test_commit_message(self):
+ tree = self.make_branch_and_tree(".")
+ tree.lock_write()
+ try:
+ builder = tree.branch.get_commit_builder([])
+ list(builder.record_iter_changes(tree, tree.last_revision(),
+ tree.iter_changes(tree.basis_tree())))
+ builder.finish_inventory()
+ rev_id = builder.commit('foo bar blah')
+ finally:
+ tree.unlock()
+ rev = tree.branch.repository.get_revision(rev_id)
+ self.assertEqual('foo bar blah', rev.message)
+
+ def test_updates_branch(self):
+ tree = self.make_branch_and_tree(".")
+ tree.lock_write()
+ try:
+ builder = tree.branch.get_commit_builder([])
+ list(builder.record_iter_changes(tree, tree.last_revision(),
+ tree.iter_changes(tree.basis_tree())))
+ builder.finish_inventory()
+ will_update_branch = builder.updates_branch
+ rev_id = builder.commit('might update the branch')
+ finally:
+ tree.unlock()
+ actually_updated_branch = (tree.branch.last_revision() == rev_id)
+ self.assertEquals(actually_updated_branch, will_update_branch)
+
+ def test_commit_with_revision_id_record_entry_contents(self):
+ tree = self.make_branch_and_tree(".")
+ tree.lock_write()
+ try:
+ # use a unicode revision id to test more corner cases.
+ # The repository layer is meant to handle this.
+ revision_id = u'\xc8abc'.encode('utf8')
+ try:
+ try:
+ builder = tree.branch.get_commit_builder([],
+ revision_id=revision_id)
+ except errors.NonAsciiRevisionId:
+ revision_id = 'abc'
+ builder = tree.branch.get_commit_builder([],
+ revision_id=revision_id)
+ except errors.CannotSetRevisionId:
+ # This format doesn't support supplied revision ids
+ return
+ if not builder.supports_record_entry_contents:
+ raise tests.TestNotApplicable("CommitBuilder doesn't support "
+ "record_entry_contents")
+ self.assertFalse(builder.random_revid)
+ self.record_root(builder, tree)
+ builder.finish_inventory()
+ self.assertEqual(revision_id, builder.commit('foo bar'))
+ finally:
+ tree.unlock()
+ self.assertTrue(tree.branch.repository.has_revision(revision_id))
+ # the revision id must be set on the inventory when saving it. This
+ # does not precisely test that - a repository that wants to can add it
+ # on deserialisation, but thats all the current contract guarantees
+ # anyway.
+ self.assertEqual(revision_id,
+ tree.branch.repository.get_inventory(revision_id).revision_id)
+
+ def test_commit_with_revision_id_record_iter_changes(self):
+ tree = self.make_branch_and_tree(".")
+ tree.lock_write()
+ try:
+ # use a unicode revision id to test more corner cases.
+ # The repository layer is meant to handle this.
+ revision_id = u'\xc8abc'.encode('utf8')
+ try:
+ try:
+ builder = tree.branch.get_commit_builder([],
+ revision_id=revision_id)
+ except errors.NonAsciiRevisionId:
+ revision_id = 'abc'
+ builder = tree.branch.get_commit_builder([],
+ revision_id=revision_id)
+ except errors.CannotSetRevisionId:
+ # This format doesn't support supplied revision ids
+ return
+ self.assertFalse(builder.random_revid)
+ try:
+ list(builder.record_iter_changes(tree, tree.last_revision(),
+ tree.iter_changes(tree.basis_tree())))
+ builder.finish_inventory()
+ except:
+ builder.abort()
+ raise
+ self.assertEqual(revision_id, builder.commit('foo bar'))
+ finally:
+ tree.unlock()
+ self.assertTrue(tree.branch.repository.has_revision(revision_id))
+ # the revision id must be set on the inventory when saving it. This
+ # does not precisely test that - a repository that wants to can add it
+ # on deserialisation, but thats all the current contract guarantees
+ # anyway.
+ self.assertEqual(revision_id,
+ tree.branch.repository.revision_tree(revision_id).get_revision_id())
+
+ def test_commit_without_root_errors(self):
+ tree = self.make_branch_and_tree(".")
+ tree.lock_write()
+ try:
+ builder = tree.branch.get_commit_builder([])
+ def do_commit():
+ try:
+ list(builder.record_iter_changes(
+ tree, tree.last_revision(), []))
+ builder.finish_inventory()
+ except:
+ builder.abort()
+ raise
+ else:
+ builder.commit("msg")
+ self.assertRaises(errors.RootMissing, do_commit)
+ finally:
+ tree.unlock()
+
+ def test_commit_without_root_or_record_iter_changes_errors(self):
+ tree = self.make_branch_and_tree(".")
+ tree.lock_write()
+ try:
+ self.build_tree(['foo'])
+ tree.add('foo', 'foo-id')
+ builder = tree.branch.get_commit_builder([])
+ if not builder.supports_record_entry_contents:
+ raise tests.TestNotApplicable("CommitBuilder doesn't support "
+ "record_entry_contents")
+ entry = tree.root_inventory['foo-id']
+ self.assertRaises(errors.RootMissing,
+ builder.record_entry_contents, entry, [], 'foo', tree,
+ tree.path_content_summary('foo'))
+ builder.abort()
+ finally:
+ tree.unlock()
+
+ def test_commit_unchanged_root_record_entry_contents(self):
+ tree = self.make_branch_and_tree(".")
+ old_revision_id = tree.commit('')
+ tree.lock_write()
+ parent_tree = tree.basis_tree()
+ parent_tree.lock_read()
+ self.addCleanup(parent_tree.unlock)
+ builder = tree.branch.get_commit_builder([old_revision_id])
+ try:
+ if not builder.supports_record_entry_contents:
+ raise tests.TestNotApplicable("CommitBuilder doesn't support "
+ "record_entry_contents")
+ builder.will_record_deletes()
+ ie = inventory.make_entry('directory', '', None,
+ tree.get_root_id())
+ delta, version_recorded, fs_hash = builder.record_entry_contents(
+ ie, [parent_tree.root_inventory], '', tree,
+ tree.path_content_summary(''))
+ # Regardless of repository root behaviour we should consider this a
+ # pointless commit.
+ self.assertFalse(builder.any_changes())
+ self.assertFalse(version_recorded)
+ # if the repository format recorded a new root revision, that
+ # should be in the delta
+ got_new_revision = ie.revision != old_revision_id
+ if got_new_revision:
+ self.assertEqual(('', '', ie.file_id, ie), delta)
+ # The delta should be tracked
+ self.assertEqual(delta, builder.get_basis_delta()[-1])
+ else:
+ self.assertEqual(None, delta)
+ # Directories do not get hashed.
+ self.assertEqual(None, fs_hash)
+ builder.abort()
+ except:
+ builder.abort()
+ tree.unlock()
+ raise
+ else:
+ tree.unlock()
+
+ def test_commit_unchanged_root_record_iter_changes(self):
+ tree = self.make_branch_and_tree(".")
+ old_revision_id = tree.commit('')
+ tree.lock_write()
+ builder = tree.branch.get_commit_builder([old_revision_id])
+ try:
+ list(builder.record_iter_changes(tree, old_revision_id, []))
+ # Regardless of repository root behaviour we should consider this a
+ # pointless commit.
+ self.assertFalse(builder.any_changes())
+ builder.finish_inventory()
+ builder.commit('')
+ builder_tree = builder.revision_tree()
+ new_root_id = builder_tree.get_root_id()
+ new_root_revision = builder_tree.get_file_revision(new_root_id)
+ if tree.branch.repository.supports_rich_root():
+ # We should not have seen a new root revision
+ self.assertEqual(old_revision_id, new_root_revision)
+ else:
+ # We should see a new root revision
+ self.assertNotEqual(old_revision_id, new_root_revision)
+ finally:
+ tree.unlock()
+
+ def test_commit_record_entry_contents(self):
+ tree = self.make_branch_and_tree(".")
+ tree.lock_write()
+ try:
+ builder = tree.branch.get_commit_builder([])
+ if not builder.supports_record_entry_contents:
+ raise tests.TestNotApplicable("CommitBuilder doesn't "
+ "support record_entry_contents")
+ self.record_root(builder, tree)
+ builder.finish_inventory()
+ rev_id = builder.commit('foo bar')
+ finally:
+ tree.unlock()
+ self.assertNotEqual(None, rev_id)
+ self.assertTrue(tree.branch.repository.has_revision(rev_id))
+ # the revision id must be set on the inventory when saving it. This does not
+ # precisely test that - a repository that wants to can add it on deserialisation,
+ # but thats all the current contract guarantees anyway.
+ self.assertEqual(rev_id, tree.branch.repository.get_inventory(rev_id).revision_id)
+
+ def test_get_basis_delta(self):
+ tree = self.make_branch_and_tree(".")
+ self.build_tree(["foo"])
+ tree.add(["foo"], ["foo-id"])
+ old_revision_id = tree.commit("added foo")
+ tree.lock_write()
+ try:
+ self.build_tree(['bar'])
+ tree.add(['bar'], ['bar-id'])
+ basis = tree.branch.repository.revision_tree(old_revision_id)
+ basis.lock_read()
+ self.addCleanup(basis.unlock)
+ builder = tree.branch.get_commit_builder([old_revision_id])
+ total_delta = []
+ try:
+ if not builder.supports_record_entry_contents:
+ raise tests.TestNotApplicable("CommitBuilder doesn't "
+ "support record_entry_contents")
+ parent_invs = [basis.root_inventory]
+ builder.will_record_deletes()
+ if builder.record_root_entry:
+ ie = basis.root_inventory.root.copy()
+ delta, _, _ = builder.record_entry_contents(ie, parent_invs,
+ '', tree, tree.path_content_summary(''))
+ if delta is not None:
+ total_delta.append(delta)
+ delta = builder.record_delete("foo", "foo-id")
+ total_delta.append(delta)
+ new_bar = inventory.make_entry('file', 'bar',
+ parent_id=tree.get_root_id(), file_id='bar-id')
+ delta, _, _ = builder.record_entry_contents(new_bar, parent_invs,
+ 'bar', tree, tree.path_content_summary('bar'))
+ total_delta.append(delta)
+ # All actions should have been recorded in the basis_delta
+ self.assertEqual(total_delta, builder.get_basis_delta())
+ builder.finish_inventory()
+ builder.commit('delete foo, add bar')
+ except:
+ tree.branch.repository.abort_write_group()
+ raise
+ finally:
+ tree.unlock()
+
+ def test_get_basis_delta_without_notification(self):
+ tree = self.make_branch_and_tree(".")
+ old_revision_id = tree.commit('')
+ tree.lock_write()
+ try:
+ parent_tree = tree.basis_tree()
+ parent_tree.lock_read()
+ self.addCleanup(parent_tree.unlock)
+ builder = tree.branch.get_commit_builder([old_revision_id])
+ # It is an error to expect builder.get_basis_delta() to be correct,
+ # if you have not also called will_record_deletes() to indicate you
+ # will be calling record_delete() when appropriate
+ self.assertRaises(AssertionError, builder.get_basis_delta)
+ tree.branch.repository.abort_write_group()
+ finally:
+ tree.unlock()
+
+ def test_record_delete(self):
+ tree = self.make_branch_and_tree(".")
+ self.build_tree(["foo"])
+ tree.add(["foo"], ["foo-id"])
+ rev_id = tree.commit("added foo")
+ # Remove the inventory details for foo-id, because
+ # record_entry_contents ends up copying root verbatim.
+ tree.unversion(["foo-id"])
+ tree.lock_write()
+ try:
+ basis = tree.branch.repository.revision_tree(rev_id)
+ builder = tree.branch.get_commit_builder([rev_id])
+ try:
+ if not builder.supports_record_entry_contents:
+ raise tests.TestNotApplicable("CommitBuilder doesn't "
+ "support record_entry_contents")
+ builder.will_record_deletes()
+ if builder.record_root_entry is True:
+ parent_invs = [basis.root_inventory]
+ del basis.root_inventory.root.children['foo']
+ builder.record_entry_contents(basis.root_inventory.root,
+ parent_invs, '', tree, tree.path_content_summary(''))
+ # the delta should be returned, and recorded in _basis_delta
+ delta = builder.record_delete("foo", "foo-id")
+ self.assertEqual(("foo", None, "foo-id", None), delta)
+ self.assertEqual(delta, builder.get_basis_delta()[-1])
+ builder.finish_inventory()
+ rev_id2 = builder.commit('delete foo')
+ except:
+ tree.branch.repository.abort_write_group()
+ raise
+ finally:
+ tree.unlock()
+ rev_tree = builder.revision_tree()
+ rev_tree.lock_read()
+ self.addCleanup(rev_tree.unlock)
+ self.assertFalse(rev_tree.path2id('foo'))
+
+ def test_record_delete_record_iter_changes(self):
+ tree = self.make_branch_and_tree(".")
+ self.build_tree(["foo"])
+ tree.add(["foo"], ["foo-id"])
+ rev_id = tree.commit("added foo")
+ tree.lock_write()
+ try:
+ builder = tree.branch.get_commit_builder([rev_id])
+ try:
+ builder.will_record_deletes()
+ delete_change = ('foo-id', ('foo', None), True, (True, False),
+ (tree.path2id(''), None), ('foo', None), ('file', None),
+ (False, None))
+ list(builder.record_iter_changes(tree, rev_id,
+ [delete_change]))
+ self.assertEqual(("foo", None, "foo-id", None),
+ builder.get_basis_delta()[0])
+ self.assertTrue(builder.any_changes())
+ builder.finish_inventory()
+ rev_id2 = builder.commit('delete foo')
+ except:
+ builder.abort()
+ raise
+ finally:
+ tree.unlock()
+ rev_tree = builder.revision_tree()
+ rev_tree.lock_read()
+ self.addCleanup(rev_tree.unlock)
+ self.assertFalse(rev_tree.path2id('foo'))
+
+ def test_record_delete_without_notification(self):
+ tree = self.make_branch_and_tree(".")
+ self.build_tree(["foo"])
+ tree.add(["foo"], ["foo-id"])
+ rev_id = tree.commit("added foo")
+ tree.lock_write()
+ try:
+ builder = tree.branch.get_commit_builder([rev_id])
+ try:
+ if not builder.supports_record_entry_contents:
+ raise tests.TestNotApplicable("CommitBuilder doesn't "
+ "support record_entry_contents")
+ self.record_root(builder, tree)
+ self.assertRaises(AssertionError,
+ builder.record_delete, "foo", "foo-id")
+ finally:
+ tree.branch.repository.abort_write_group()
+ finally:
+ tree.unlock()
+
+ def test_revision_tree_record_entry_contents(self):
+ tree = self.make_branch_and_tree(".")
+ tree.lock_write()
+ try:
+ builder = tree.branch.get_commit_builder([])
+ if not builder.supports_record_entry_contents:
+ raise tests.TestNotApplicable("CommitBuilder doesn't "
+ "support record_entry_contents")
+ self.record_root(builder, tree)
+ builder.finish_inventory()
+ rev_id = builder.commit('foo bar')
+ finally:
+ tree.unlock()
+ rev_tree = builder.revision_tree()
+ # Just a couple simple tests to ensure that it actually follows
+ # the RevisionTree api.
+ self.assertEqual(rev_id, rev_tree.get_revision_id())
+ self.assertEqual([], rev_tree.get_parent_ids())
+
+ def test_revision_tree_record_iter_changes(self):
+ tree = self.make_branch_and_tree(".")
+ tree.lock_write()
+ try:
+ builder = tree.branch.get_commit_builder([])
+ try:
+ list(builder.record_iter_changes(tree,
+ _mod_revision.NULL_REVISION,
+ tree.iter_changes(tree.basis_tree())))
+ builder.finish_inventory()
+ rev_id = builder.commit('foo bar')
+ except:
+ builder.abort()
+ raise
+ rev_tree = builder.revision_tree()
+ # Just a couple simple tests to ensure that it actually follows
+ # the RevisionTree api.
+ self.assertEqual(rev_id, rev_tree.get_revision_id())
+ self.assertEqual((), tuple(rev_tree.get_parent_ids()))
+ finally:
+ tree.unlock()
+
+ def test_root_entry_has_revision(self):
+ # test the root revision created and put in the basis
+ # has the right rev id.
+ # XXX: RBC 20081118 - this test is too big, it depends on the exact
+ # behaviour of tree methods and so on; it should be written to the
+ # commit builder interface directly.
+ tree = self.make_branch_and_tree('.')
+ rev_id = tree.commit('message')
+ basis_tree = tree.basis_tree()
+ basis_tree.lock_read()
+ self.addCleanup(basis_tree.unlock)
+ self.assertEqual(rev_id,
+ basis_tree.get_file_revision(basis_tree.get_root_id()))
+
+ def _get_revtrees(self, tree, revision_ids):
+ tree.lock_read()
+ try:
+ trees = list(tree.branch.repository.revision_trees(revision_ids))
+ for _tree in trees:
+ _tree.lock_read()
+ self.addCleanup(_tree.unlock)
+ return trees
+ finally:
+ tree.unlock()
+
+ def test_last_modified_revision_after_commit_root_unchanged(self):
+ # commiting without changing the root does not change the
+ # last modified except on non-rich-root-repositories.
+ tree = self.make_branch_and_tree('.')
+ rev1 = tree.commit('')
+ rev2 = tree.commit('')
+ tree1, tree2 = self._get_revtrees(tree, [rev1, rev2])
+ self.assertEqual(rev1, tree1.get_file_revision(tree1.get_root_id()))
+ if tree.branch.repository.supports_rich_root():
+ self.assertEqual(rev1,
+ tree2.get_file_revision(tree2.get_root_id()))
+ else:
+ self.assertEqual(rev2,
+ tree2.get_file_revision(tree2.get_root_id()))
+
+ def _add_commit_check_unchanged(self, tree, name, mini_commit=None):
+ tree.add([name], [name + 'id'])
+ self._commit_check_unchanged(tree, name, name + 'id',
+ mini_commit=mini_commit)
+
+ def _commit_check_unchanged(self, tree, name, file_id, mini_commit=None):
+ rev1 = tree.commit('')
+ if mini_commit is None:
+ mini_commit = self.mini_commit
+ rev2 = mini_commit(tree, name, name, False, False)
+ tree1, tree2 = self._get_revtrees(tree, [rev1, rev2])
+ self.assertEqual(rev1, tree1.get_file_revision(file_id))
+ self.assertEqual(rev1, tree2.get_file_revision(file_id))
+ expected_graph = {}
+ expected_graph[(file_id, rev1)] = ()
+ self.assertFileGraph(expected_graph, tree, (file_id, rev1))
+
+ def test_last_modified_revision_after_commit_dir_unchanged(self):
+ # committing without changing a dir does not change the last modified.
+ tree = self.make_branch_and_tree('.')
+ self.build_tree(['dir/'])
+ self._add_commit_check_unchanged(tree, 'dir')
+
+ def test_last_modified_revision_after_commit_dir_unchanged_ric(self):
+ # committing without changing a dir does not change the last modified.
+ tree = self.make_branch_and_tree('.')
+ self.build_tree(['dir/'])
+ self._add_commit_check_unchanged(tree, 'dir',
+ mini_commit=self.mini_commit_record_iter_changes)
+
+ def test_last_modified_revision_after_commit_dir_contents_unchanged(self):
+ # committing without changing a dir does not change the last modified
+ # of the dir even the dirs contents are changed.
+ tree = self.make_branch_and_tree('.')
+ self.build_tree(['dir/'])
+ tree.add(['dir'], ['dirid'])
+ rev1 = tree.commit('')
+ self.build_tree(['dir/content'])
+ tree.add(['dir/content'], ['contentid'])
+ rev2 = tree.commit('')
+ tree1, tree2 = self._get_revtrees(tree, [rev1, rev2])
+ self.assertEqual(rev1, tree1.get_file_revision('dirid'))
+ self.assertEqual(rev1, tree2.get_file_revision('dirid'))
+ file_id = 'dirid'
+ expected_graph = {}
+ expected_graph[(file_id, rev1)] = ()
+ self.assertFileGraph(expected_graph, tree, (file_id, rev1))
+
+ def test_last_modified_revision_after_commit_file_unchanged(self):
+ # committing without changing a file does not change the last modified.
+ tree = self.make_branch_and_tree('.')
+ self.build_tree(['file'])
+ self._add_commit_check_unchanged(tree, 'file')
+
+ def test_last_modified_revision_after_commit_file_unchanged_ric(self):
+ # committing without changing a file does not change the last modified.
+ tree = self.make_branch_and_tree('.')
+ self.build_tree(['file'])
+ self._add_commit_check_unchanged(tree, 'file',
+ mini_commit=self.mini_commit_record_iter_changes)
+
+ def test_last_modified_revision_after_commit_link_unchanged(self):
+ # committing without changing a link does not change the last modified.
+ self.requireFeature(features.SymlinkFeature)
+ tree = self.make_branch_and_tree('.')
+ os.symlink('target', 'link')
+ self._add_commit_check_unchanged(tree, 'link')
+
+ def test_last_modified_revision_after_commit_link_unchanged_ric(self):
+ # committing without changing a link does not change the last modified.
+ self.requireFeature(features.SymlinkFeature)
+ tree = self.make_branch_and_tree('.')
+ os.symlink('target', 'link')
+ self._add_commit_check_unchanged(tree, 'link',
+ mini_commit=self.mini_commit_record_iter_changes)
+
+ def test_last_modified_revision_after_commit_reference_unchanged(self):
+ # committing without changing a subtree does not change the last
+ # modified.
+ tree = self.make_branch_and_tree('.')
+ subtree = self.make_reference('reference')
+ try:
+ tree.add_reference(subtree)
+ self._commit_check_unchanged(tree, 'reference',
+ subtree.get_root_id())
+ except errors.UnsupportedOperation:
+ return
+
+ def test_last_modified_revision_after_commit_reference_unchanged_ric(self):
+ # committing without changing a subtree does not change the last
+ # modified.
+ tree = self.make_branch_and_tree('.')
+ subtree = self.make_reference('reference')
+ try:
+ tree.add_reference(subtree)
+ self._commit_check_unchanged(tree, 'reference',
+ subtree.get_root_id(),
+ mini_commit=self.mini_commit_record_iter_changes)
+ except errors.UnsupportedOperation:
+ return
+
+ def _add_commit_renamed_check_changed(self, tree, name,
+ expect_fs_hash=False, mini_commit=None):
+ def rename():
+ tree.rename_one(name, 'new_' + name)
+ self._add_commit_change_check_changed(tree, name, rename,
+ expect_fs_hash=expect_fs_hash, mini_commit=mini_commit)
+
+ def _commit_renamed_check_changed(self, tree, name, file_id,
+ expect_fs_hash=False, mini_commit=None):
+ def rename():
+ tree.rename_one(name, 'new_' + name)
+ self._commit_change_check_changed(tree, name, file_id, rename,
+ expect_fs_hash=expect_fs_hash, mini_commit=mini_commit)
+
+ def test_last_modified_revision_after_rename_dir_changes(self):
+ # renaming a dir changes the last modified.
+ tree = self.make_branch_and_tree('.')
+ self.build_tree(['dir/'])
+ self._add_commit_renamed_check_changed(tree, 'dir')
+
+ def test_last_modified_revision_after_rename_dir_changes_ric(self):
+ # renaming a dir changes the last modified.
+ tree = self.make_branch_and_tree('.')
+ self.build_tree(['dir/'])
+ self._add_commit_renamed_check_changed(tree, 'dir',
+ mini_commit=self.mini_commit_record_iter_changes)
+
+ def test_last_modified_revision_after_rename_file_changes(self):
+ # renaming a file changes the last modified.
+ tree = self.make_branch_and_tree('.')
+ self.build_tree(['file'])
+ self._add_commit_renamed_check_changed(tree, 'file',
+ expect_fs_hash=True)
+
+ def test_last_modified_revision_after_rename_file_changes_ric(self):
+ # renaming a file changes the last modified.
+ tree = self.make_branch_and_tree('.')
+ self.build_tree(['file'])
+ self._add_commit_renamed_check_changed(tree, 'file',
+ expect_fs_hash=True,
+ mini_commit=self.mini_commit_record_iter_changes)
+
+ def test_last_modified_revision_after_rename_link_changes(self):
+ # renaming a link changes the last modified.
+ self.requireFeature(features.SymlinkFeature)
+ tree = self.make_branch_and_tree('.')
+ os.symlink('target', 'link')
+ self._add_commit_renamed_check_changed(tree, 'link')
+
+ def test_last_modified_revision_after_rename_link_changes_ric(self):
+ # renaming a link changes the last modified.
+ self.requireFeature(features.SymlinkFeature)
+ tree = self.make_branch_and_tree('.')
+ os.symlink('target', 'link')
+ self._add_commit_renamed_check_changed(tree, 'link',
+ mini_commit=self.mini_commit_record_iter_changes)
+
+ def test_last_modified_revision_after_rename_ref_changes(self):
+ # renaming a reference changes the last modified.
+ tree = self.make_branch_and_tree('.')
+ subtree = self.make_reference('reference')
+ try:
+ tree.add_reference(subtree)
+ self._commit_renamed_check_changed(tree, 'reference',
+ subtree.get_root_id())
+ except errors.UnsupportedOperation:
+ return
+
+ def test_last_modified_revision_after_rename_ref_changes_ric(self):
+ # renaming a reference changes the last modified.
+ tree = self.make_branch_and_tree('.')
+ subtree = self.make_reference('reference')
+ try:
+ tree.add_reference(subtree)
+ self._commit_renamed_check_changed(tree, 'reference',
+ subtree.get_root_id(),
+ mini_commit=self.mini_commit_record_iter_changes)
+ except errors.UnsupportedOperation:
+ return
+
+ def _add_commit_reparent_check_changed(self, tree, name,
+ expect_fs_hash=False, mini_commit=None):
+ self.build_tree(['newparent/'])
+ tree.add(['newparent'])
+ def reparent():
+ tree.rename_one(name, 'newparent/new_' + name)
+ self._add_commit_change_check_changed(tree, name, reparent,
+ expect_fs_hash=expect_fs_hash, mini_commit=mini_commit)
+
+ def test_last_modified_revision_after_reparent_dir_changes(self):
+ # reparenting a dir changes the last modified.
+ tree = self.make_branch_and_tree('.')
+ self.build_tree(['dir/'])
+ self._add_commit_reparent_check_changed(tree, 'dir')
+
+ def test_last_modified_revision_after_reparent_dir_changes_ric(self):
+ # reparenting a dir changes the last modified.
+ tree = self.make_branch_and_tree('.')
+ self.build_tree(['dir/'])
+ self._add_commit_reparent_check_changed(tree, 'dir',
+ mini_commit=self.mini_commit_record_iter_changes)
+
+ def test_last_modified_revision_after_reparent_file_changes(self):
+ # reparenting a file changes the last modified.
+ tree = self.make_branch_and_tree('.')
+ self.build_tree(['file'])
+ self._add_commit_reparent_check_changed(tree, 'file',
+ expect_fs_hash=True)
+
+ def test_last_modified_revision_after_reparent_file_changes_ric(self):
+ # reparenting a file changes the last modified.
+ tree = self.make_branch_and_tree('.')
+ self.build_tree(['file'])
+ self._add_commit_reparent_check_changed(tree, 'file',
+ expect_fs_hash=True,
+ mini_commit=self.mini_commit_record_iter_changes)
+
+ def test_last_modified_revision_after_reparent_link_changes(self):
+ # reparenting a link changes the last modified.
+ self.requireFeature(features.SymlinkFeature)
+ tree = self.make_branch_and_tree('.')
+ os.symlink('target', 'link')
+ self._add_commit_reparent_check_changed(tree, 'link')
+
+ def test_last_modified_revision_after_reparent_link_changes_ric(self):
+ # reparenting a link changes the last modified.
+ self.requireFeature(features.SymlinkFeature)
+ tree = self.make_branch_and_tree('.')
+ os.symlink('target', 'link')
+ self._add_commit_reparent_check_changed(tree, 'link',
+ mini_commit=self.mini_commit_record_iter_changes)
+
+ def _add_commit_change_check_changed(self, tree, name, changer,
+ expect_fs_hash=False, mini_commit=None, file_id=None):
+ if file_id is None:
+ file_id = name + 'id'
+ tree.add([name], [file_id])
+ self._commit_change_check_changed(
+ tree, name, file_id,
+ changer, expect_fs_hash=expect_fs_hash, mini_commit=mini_commit)
+
+ def _commit_change_check_changed(self, tree, name, file_id, changer,
+ expect_fs_hash=False, mini_commit=None):
+ rev1 = tree.commit('')
+ changer()
+ if mini_commit is None:
+ mini_commit = self.mini_commit
+ rev2 = mini_commit(tree, name, tree.id2path(file_id),
+ expect_fs_hash=expect_fs_hash)
+ tree1, tree2 = self._get_revtrees(tree, [rev1, rev2])
+ self.assertEqual(rev1, tree1.get_file_revision(file_id))
+ self.assertEqual(rev2, tree2.get_file_revision(file_id))
+ expected_graph = {}
+ expected_graph[(file_id, rev1)] = ()
+ expected_graph[(file_id, rev2)] = ((file_id, rev1),)
+ self.assertFileGraph(expected_graph, tree, (file_id, rev2))
+
+ def mini_commit(self, tree, name, new_name, records_version=True,
+ delta_against_basis=True, expect_fs_hash=False):
+ """Perform a miniature commit looking for record entry results.
+
+ :param tree: The tree to commit.
+ :param name: The path in the basis tree of the tree being committed.
+ :param new_name: The path in the tree being committed.
+ :param records_version: True if the commit of new_name is expected to
+ record a new version.
+ :param delta_against_basis: True of the commit of new_name is expected
+ to have a delta against the basis.
+ :param expect_fs_hash: True or false to indicate whether we expect a
+ file hash to be returned from the record_entry_contents call.
+ """
+ tree.lock_write()
+ try:
+ # mini manual commit here so we can check the return of
+ # record_entry_contents.
+ parent_ids = tree.get_parent_ids()
+ builder = tree.branch.get_commit_builder(parent_ids)
+ try:
+ if not builder.supports_record_entry_contents:
+ raise tests.TestNotApplicable("CommitBuilder doesn't "
+ "support record_entry_contents")
+ builder.will_record_deletes()
+ parent_tree = tree.basis_tree()
+ parent_tree.lock_read()
+ self.addCleanup(parent_tree.unlock)
+ parent_invs = [parent_tree.root_inventory]
+ for parent_id in parent_ids[1:]:
+ parent_invs.append(tree.branch.repository.revision_tree(
+ parent_id).root_inventory)
+ # root
+ builder.record_entry_contents(
+ inventory.make_entry('directory', '', None,
+ tree.get_root_id()), parent_invs, '', tree,
+ tree.path_content_summary(''))
+ def commit_id(file_id):
+ old_ie = tree.root_inventory[file_id]
+ path = tree.id2path(file_id)
+ ie = inventory.make_entry(tree.kind(file_id), old_ie.name,
+ old_ie.parent_id, file_id)
+ content_summary = tree.path_content_summary(path)
+ if content_summary[0] == 'tree-reference':
+ content_summary = content_summary[:3] + (
+ tree.get_reference_revision(file_id),)
+ return builder.record_entry_contents(ie, parent_invs, path,
+ tree, content_summary)
+
+ file_id = tree.path2id(new_name)
+ parent_id = tree.root_inventory[file_id].parent_id
+ if parent_id != tree.get_root_id():
+ commit_id(parent_id)
+ # because a change of some sort is meant to have occurred,
+ # recording the entry must return True.
+ delta, version_recorded, fs_hash = commit_id(file_id)
+ if records_version:
+ self.assertTrue(version_recorded)
+ else:
+ self.assertFalse(version_recorded)
+ if expect_fs_hash:
+ tree_file_stat = tree.get_file_with_stat(file_id)
+ tree_file_stat[0].close()
+ self.assertEqual(2, len(fs_hash))
+ self.assertEqual(tree.get_file_sha1(file_id), fs_hash[0])
+ self.assertEqualStat(tree_file_stat[1], fs_hash[1])
+ else:
+ self.assertEqual(None, fs_hash)
+ new_entry = builder.new_inventory[file_id]
+ if delta_against_basis:
+ expected_delta = (name, new_name, file_id, new_entry)
+ # The delta should be recorded
+ self.assertEqual(expected_delta,
+ builder.get_basis_delta()[-1])
+ else:
+ expected_delta = None
+ self.assertEqual(expected_delta, delta)
+ builder.finish_inventory()
+ except:
+ builder.abort()
+ raise
+ else:
+ rev2 = builder.commit('')
+ except:
+ tree.unlock()
+ raise
+ try:
+ tree.set_parent_ids([rev2])
+ finally:
+ tree.unlock()
+ return rev2
+
+ def mini_commit_record_iter_changes(self, tree, name, new_name,
+ records_version=True, delta_against_basis=True, expect_fs_hash=False):
+ """Perform a miniature commit looking for record entry results.
+
+ This version uses the record_iter_changes interface.
+
+ :param tree: The tree to commit.
+ :param name: The path in the basis tree of the tree being committed.
+ :param new_name: The path in the tree being committed.
+ :param records_version: True if the commit of new_name is expected to
+ record a new version.
+ :param delta_against_basis: True of the commit of new_name is expected
+ to have a delta against the basis.
+ :param expect_fs_hash: If true, looks for a fs hash output from
+ record_iter_changes.
+ """
+ tree.lock_write()
+ try:
+ # mini manual commit here so we can check the return of
+ # record_entry_contents.
+ parent_ids = tree.get_parent_ids()
+ builder = tree.branch.get_commit_builder(parent_ids)
+ builder.will_record_deletes()
+ parent_tree = tree.basis_tree()
+ parent_tree.lock_read()
+ self.addCleanup(parent_tree.unlock)
+ parent_trees = [parent_tree]
+ for parent_id in parent_ids[1:]:
+ parent_trees.append(tree.branch.repository.revision_tree(
+ parent_id))
+ changes = list(tree.iter_changes(parent_tree))
+ result = list(builder.record_iter_changes(tree, parent_ids[0],
+ changes))
+ file_id = tree.path2id(new_name)
+ if expect_fs_hash:
+ tree_file_stat = tree.get_file_with_stat(file_id)
+ tree_file_stat[0].close()
+ self.assertLength(1, result)
+ result = result[0]
+ self.assertEqual(result[:2], (file_id, new_name))
+ self.assertEqual(result[2][0], tree.get_file_sha1(file_id))
+ self.assertEqualStat(result[2][1], tree_file_stat[1])
+ else:
+ self.assertEqual([], result)
+ self.assertIs(None, builder.new_inventory)
+ builder.finish_inventory()
+ if tree.branch.repository._format.supports_full_versioned_files:
+ inv_key = (builder._new_revision_id,)
+ inv_sha1 = tree.branch.repository.inventories.get_sha1s(
+ [inv_key])[inv_key]
+ self.assertEqual(inv_sha1, builder.inv_sha1)
+ self.assertIs(None, builder.new_inventory)
+ rev2 = builder.commit('')
+ delta = builder.get_basis_delta()
+ delta_dict = dict((change[2], change) for change in delta)
+ version_recorded = (file_id in delta_dict and
+ delta_dict[file_id][3] is not None and
+ delta_dict[file_id][3].revision == rev2)
+ if records_version:
+ self.assertTrue(version_recorded)
+ else:
+ self.assertFalse(version_recorded)
+
+ new_inventory = builder.revision_tree().root_inventory
+ new_entry = new_inventory[file_id]
+ if delta_against_basis:
+ expected_delta = (name, new_name, file_id, new_entry)
+ self.assertEqual(expected_delta, delta_dict[file_id])
+ else:
+ expected_delta = None
+ self.assertFalse(version_recorded)
+ tree.set_parent_ids([rev2])
+ except:
+ builder.abort()
+ tree.unlock()
+ raise
+ else:
+ tree.unlock()
+ return rev2
+
+ def assertFileGraph(self, expected_graph, tree, tip):
+ # all the changes that have occured should be in the ancestry
+ # (closest to a public per-file graph API we have today)
+ tree.lock_read()
+ self.addCleanup(tree.unlock)
+ g = dict(tree.branch.repository.get_file_graph().iter_ancestry([tip]))
+ self.assertEqual(expected_graph, g)
+
+ def test_last_modified_revision_after_content_file_changes(self):
+ # altering a file changes the last modified.
+ tree = self.make_branch_and_tree('.')
+ self.build_tree(['file'])
+ def change_file():
+ tree.put_file_bytes_non_atomic('fileid', 'new content')
+ self._add_commit_change_check_changed(tree, 'file', change_file,
+ expect_fs_hash=True)
+
+ def test_last_modified_revision_after_content_file_changes_ric(self):
+ # altering a file changes the last modified.
+ tree = self.make_branch_and_tree('.')
+ self.build_tree(['file'])
+ def change_file():
+ tree.put_file_bytes_non_atomic('fileid', 'new content')
+ self._add_commit_change_check_changed(tree, 'file', change_file,
+ expect_fs_hash=True,
+ mini_commit=self.mini_commit_record_iter_changes)
+
+ def test_last_modified_revision_after_content_link_changes(self):
+ # changing a link changes the last modified.
+ self.requireFeature(features.SymlinkFeature)
+ tree = self.make_branch_and_tree('.')
+ os.symlink('target', 'link')
+ def change_link():
+ os.unlink('link')
+ os.symlink('newtarget', 'link')
+ self._add_commit_change_check_changed(tree, 'link', change_link)
+
+ def _test_last_mod_rev_after_content_link_changes_ric(
+ self, link, target, newtarget, file_id=None):
+ if file_id is None:
+ file_id = link
+ # changing a link changes the last modified.
+ self.requireFeature(features.SymlinkFeature)
+ tree = self.make_branch_and_tree('.')
+ os.symlink(target, link)
+ def change_link():
+ os.unlink(link)
+ os.symlink(newtarget, link)
+ self._add_commit_change_check_changed(
+ tree, link, change_link,
+ mini_commit=self.mini_commit_record_iter_changes,
+ file_id=file_id)
+
+ def test_last_modified_rev_after_content_link_changes_ric(self):
+ self._test_last_mod_rev_after_content_link_changes_ric(
+ 'link', 'target', 'newtarget')
+
+ def test_last_modified_rev_after_content_unicode_link_changes_ric(self):
+ self.requireFeature(features.UnicodeFilenameFeature)
+ self._test_last_mod_rev_after_content_link_changes_ric(
+ u'li\u1234nk', u'targ\N{Euro Sign}t', u'n\N{Euro Sign}wtarget',
+
+ file_id=u'li\u1234nk'.encode('UTF-8'))
+
+ def _commit_sprout(self, tree, name):
+ tree.add([name], [name + 'id'])
+ rev_id = tree.commit('')
+ return rev_id, tree.bzrdir.sprout('t2').open_workingtree()
+
+ def _rename_in_tree(self, tree, name):
+ tree.rename_one(name, 'new_' + name)
+ return tree.commit('')
+
+ def _commit_sprout_rename_merge(self, tree1, name, expect_fs_hash=False,
+ mini_commit=None):
+ """Do a rename in both trees."""
+ rev1, tree2 = self._commit_sprout(tree1, name)
+ # change both sides equally
+ rev2 = self._rename_in_tree(tree1, name)
+ rev3 = self._rename_in_tree(tree2, name)
+ tree1.merge_from_branch(tree2.branch)
+ if mini_commit is None:
+ mini_commit = self.mini_commit
+ rev4 = mini_commit(tree1, 'new_' + name, 'new_' + name,
+ expect_fs_hash=expect_fs_hash)
+ tree3, = self._get_revtrees(tree1, [rev4])
+ self.assertEqual(rev4, tree3.get_file_revision(name + 'id'))
+ file_id = name + 'id'
+ expected_graph = {}
+ expected_graph[(file_id, rev1)] = ()
+ expected_graph[(file_id, rev2)] = ((file_id, rev1),)
+ expected_graph[(file_id, rev3)] = ((file_id, rev1),)
+ expected_graph[(file_id, rev4)] = ((file_id, rev2), (file_id, rev3),)
+ self.assertFileGraph(expected_graph, tree1, (file_id, rev4))
+
+ def test_last_modified_revision_after_merge_dir_changes(self):
+ # merge a dir changes the last modified.
+ tree1 = self.make_branch_and_tree('t1')
+ self.build_tree(['t1/dir/'])
+ self._commit_sprout_rename_merge(tree1, 'dir')
+
+ def test_last_modified_revision_after_merge_dir_changes_ric(self):
+ # merge a dir changes the last modified.
+ tree1 = self.make_branch_and_tree('t1')
+ self.build_tree(['t1/dir/'])
+ self._commit_sprout_rename_merge(tree1, 'dir',
+ mini_commit=self.mini_commit_record_iter_changes)
+
+ def test_last_modified_revision_after_merge_file_changes(self):
+ # merge a file changes the last modified.
+ tree1 = self.make_branch_and_tree('t1')
+ self.build_tree(['t1/file'])
+ self._commit_sprout_rename_merge(tree1, 'file', expect_fs_hash=True)
+
+ def test_last_modified_revision_after_merge_file_changes_ric(self):
+ # merge a file changes the last modified.
+ tree1 = self.make_branch_and_tree('t1')
+ self.build_tree(['t1/file'])
+ self._commit_sprout_rename_merge(tree1, 'file', expect_fs_hash=True,
+ mini_commit=self.mini_commit_record_iter_changes)
+
+ def test_last_modified_revision_after_merge_link_changes(self):
+ # merge a link changes the last modified.
+ self.requireFeature(features.SymlinkFeature)
+ tree1 = self.make_branch_and_tree('t1')
+ os.symlink('target', 't1/link')
+ self._commit_sprout_rename_merge(tree1, 'link')
+
+ def test_last_modified_revision_after_merge_link_changes_ric(self):
+ # merge a link changes the last modified.
+ self.requireFeature(features.SymlinkFeature)
+ tree1 = self.make_branch_and_tree('t1')
+ os.symlink('target', 't1/link')
+ self._commit_sprout_rename_merge(tree1, 'link',
+ mini_commit=self.mini_commit_record_iter_changes)
+
+ def _commit_sprout_rename_merge_converged(self, tree1, name,
+ mini_commit=None):
+ # Make a merge which just incorporates a change from a branch:
+ # The per-file graph is straight line, and no alteration occurs
+ # in the inventory.
+ # Part 1: change in the merged branch.
+ rev1, tree2 = self._commit_sprout(tree1, name)
+ # change on the other side to merge back
+ rev2 = self._rename_in_tree(tree2, name)
+ tree1.merge_from_branch(tree2.branch)
+ if mini_commit is None:
+ mini_commit = self.mini_commit
+ def _check_graph(in_tree, changed_in_tree):
+ rev3 = mini_commit(in_tree, name, 'new_' + name, False,
+ delta_against_basis=changed_in_tree)
+ tree3, = self._get_revtrees(in_tree, [rev2])
+ self.assertEqual(rev2, tree3.get_file_revision(name + 'id'))
+ file_id = name + 'id'
+ expected_graph = {}
+ expected_graph[(file_id, rev1)] = ()
+ expected_graph[(file_id, rev2)] = ((file_id, rev1),)
+ self.assertFileGraph(expected_graph, in_tree, (file_id, rev2))
+ _check_graph(tree1, True)
+ # Part 2: change in the merged into branch - we use tree2 that has a
+ # change to name, branch tree1 and give it an unrelated change, then
+ # merge that to t2.
+ other_tree = tree1.bzrdir.sprout('t3').open_workingtree()
+ other_rev = other_tree.commit('')
+ tree2.merge_from_branch(other_tree.branch)
+ _check_graph(tree2, False)
+
+ def _commit_sprout_make_merge(self, tree1, make, mini_commit=None):
+ # Make a merge which incorporates the addition of a new object to
+ # another branch. The per-file graph shows no additional change
+ # in the merge because its a straight line.
+ rev1 = tree1.commit('')
+ tree2 = tree1.bzrdir.sprout('t2').open_workingtree()
+ # make and commit on the other side to merge back
+ make('t2/name')
+ file_id = 'nameid'
+ tree2.add(['name'], [file_id])
+ rev2 = tree2.commit('')
+ tree1.merge_from_branch(tree2.branch)
+ if mini_commit is None:
+ mini_commit = self.mini_commit
+ rev3 = mini_commit(tree1, None, 'name', False)
+ tree3, = self._get_revtrees(tree1, [rev2])
+ # in rev2, name should be only changed in rev2
+ self.assertEqual(rev2, tree3.get_file_revision(file_id))
+ expected_graph = {}
+ expected_graph[(file_id, rev2)] = ()
+ self.assertFileGraph(expected_graph, tree1, (file_id, rev2))
+
+ def test_last_modified_revision_after_converged_merge_dir_unchanged(self):
+ # merge a dir that changed preserves the last modified.
+ tree1 = self.make_branch_and_tree('t1')
+ self.build_tree(['t1/dir/'])
+ self._commit_sprout_rename_merge_converged(tree1, 'dir')
+
+ def test_last_modified_revision_after_converged_merge_dir_unchanged_ric(self):
+ # merge a dir that changed preserves the last modified.
+ tree1 = self.make_branch_and_tree('t1')
+ self.build_tree(['t1/dir/'])
+ self._commit_sprout_rename_merge_converged(tree1, 'dir',
+ mini_commit=self.mini_commit_record_iter_changes)
+
+ def test_last_modified_revision_after_converged_merge_file_unchanged(self):
+ # merge a file that changed preserves the last modified.
+ tree1 = self.make_branch_and_tree('t1')
+ self.build_tree(['t1/file'])
+ self._commit_sprout_rename_merge_converged(tree1, 'file')
+
+ def test_last_modified_revision_after_converged_merge_file_unchanged_ric(self):
+ # merge a file that changed preserves the last modified.
+ tree1 = self.make_branch_and_tree('t1')
+ self.build_tree(['t1/file'])
+ self._commit_sprout_rename_merge_converged(tree1, 'file',
+ mini_commit=self.mini_commit_record_iter_changes)
+
+ def test_last_modified_revision_after_converged_merge_link_unchanged(self):
+ # merge a link that changed preserves the last modified.
+ self.requireFeature(features.SymlinkFeature)
+ tree1 = self.make_branch_and_tree('t1')
+ os.symlink('target', 't1/link')
+ self._commit_sprout_rename_merge_converged(tree1, 'link')
+
+ def test_last_modified_revision_after_converged_merge_link_unchanged_ric(self):
+ # merge a link that changed preserves the last modified.
+ self.requireFeature(features.SymlinkFeature)
+ tree1 = self.make_branch_and_tree('t1')
+ os.symlink('target', 't1/link')
+ self._commit_sprout_rename_merge_converged(tree1, 'link',
+ mini_commit=self.mini_commit_record_iter_changes)
+
+ def test_last_modified_revision_after_merge_new_dir_unchanged(self):
+ # merge a new dir does not change the last modified.
+ tree1 = self.make_branch_and_tree('t1')
+ self._commit_sprout_make_merge(tree1, self.make_dir)
+
+ def test_last_modified_revision_after_merge_new_dir_unchanged_ric(self):
+ # merge a new dir does not change the last modified.
+ tree1 = self.make_branch_and_tree('t1')
+ self._commit_sprout_make_merge(tree1, self.make_dir,
+ mini_commit=self.mini_commit_record_iter_changes)
+
+ def test_last_modified_revision_after_merge_new_file_unchanged(self):
+ # merge a new file does not change the last modified.
+ tree1 = self.make_branch_and_tree('t1')
+ self._commit_sprout_make_merge(tree1, self.make_file)
+
+ def test_last_modified_revision_after_merge_new_file_unchanged_ric(self):
+ # merge a new file does not change the last modified.
+ tree1 = self.make_branch_and_tree('t1')
+ self._commit_sprout_make_merge(tree1, self.make_file,
+ mini_commit=self.mini_commit_record_iter_changes)
+
+ def test_last_modified_revision_after_merge_new_link_unchanged(self):
+ # merge a new link does not change the last modified.
+ tree1 = self.make_branch_and_tree('t1')
+ self._commit_sprout_make_merge(tree1, self.make_link)
+
+ def test_last_modified_revision_after_merge_new_link_unchanged_ric(self):
+ # merge a new link does not change the last modified.
+ tree1 = self.make_branch_and_tree('t1')
+ self._commit_sprout_make_merge(tree1, self.make_link,
+ mini_commit=self.mini_commit_record_iter_changes)
+
+ def make_dir(self, name):
+ self.build_tree([name + '/'])
+
+ def make_file(self, name):
+ self.build_tree([name])
+
+ def make_link(self, name):
+ self.requireFeature(features.SymlinkFeature)
+ os.symlink('target', name)
+
+ def make_reference(self, name):
+ tree = self.make_branch_and_tree(name, format='1.9-rich-root')
+ tree.commit('foo')
+ return tree
+
+ def _check_kind_change(self, make_before, make_after, expect_fs_hash=False,
+ mini_commit=None):
+ tree = self.make_branch_and_tree('.')
+ path = 'name'
+ make_before(path)
+
+ def change_kind():
+ if osutils.file_kind(path) == "directory":
+ osutils.rmtree(path)
+ else:
+ osutils.delete_any(path)
+ make_after(path)
+
+ self._add_commit_change_check_changed(tree, path, change_kind,
+ expect_fs_hash=expect_fs_hash, mini_commit=mini_commit)
+
+ def test_last_modified_dir_file(self):
+ self._check_kind_change(self.make_dir, self.make_file,
+ expect_fs_hash=True)
+
+ def test_last_modified_dir_file_ric(self):
+ try:
+ self._check_kind_change(self.make_dir, self.make_file,
+ expect_fs_hash=True,
+ mini_commit=self.mini_commit_record_iter_changes)
+ except errors.UnsupportedKindChange:
+ raise tests.TestSkipped(
+ "tree does not support changing entry kind from "
+ "directory to file")
+
+ def test_last_modified_dir_link(self):
+ self._check_kind_change(self.make_dir, self.make_link)
+
+ def test_last_modified_dir_link_ric(self):
+ try:
+ self._check_kind_change(self.make_dir, self.make_link,
+ mini_commit=self.mini_commit_record_iter_changes)
+ except errors.UnsupportedKindChange:
+ raise tests.TestSkipped(
+ "tree does not support changing entry kind from "
+ "directory to link")
+
+ def test_last_modified_link_file(self):
+ self._check_kind_change(self.make_link, self.make_file,
+ expect_fs_hash=True)
+
+ def test_last_modified_link_file_ric(self):
+ self._check_kind_change(self.make_link, self.make_file,
+ expect_fs_hash=True,
+ mini_commit=self.mini_commit_record_iter_changes)
+
+ def test_last_modified_link_dir(self):
+ self._check_kind_change(self.make_link, self.make_dir)
+
+ def test_last_modified_link_dir_ric(self):
+ self._check_kind_change(self.make_link, self.make_dir,
+ mini_commit=self.mini_commit_record_iter_changes)
+
+ def test_last_modified_file_dir(self):
+ self._check_kind_change(self.make_file, self.make_dir)
+
+ def test_last_modified_file_dir_ric(self):
+ self._check_kind_change(self.make_file, self.make_dir,
+ mini_commit=self.mini_commit_record_iter_changes)
+
+ def test_last_modified_file_link(self):
+ self._check_kind_change(self.make_file, self.make_link)
+
+ def test_last_modified_file_link_ric(self):
+ self._check_kind_change(self.make_file, self.make_link,
+ mini_commit=self.mini_commit_record_iter_changes)
+
+ def test_get_commit_builder_with_invalid_revprops(self):
+ branch = self.make_branch('.')
+ branch.repository.lock_write()
+ self.addCleanup(branch.repository.unlock)
+ self.assertRaises(ValueError, branch.repository.get_commit_builder,
+ branch, [], branch.get_config_stack(),
+ revprops={'invalid': u'property\rwith\r\ninvalid chars'})
+
+ def test_commit_builder_commit_with_invalid_message(self):
+ branch = self.make_branch('.')
+ branch.repository.lock_write()
+ self.addCleanup(branch.repository.unlock)
+ builder = branch.repository.get_commit_builder(branch, [],
+ branch.get_config_stack())
+ self.addCleanup(branch.repository.abort_write_group)
+ self.assertRaises(ValueError, builder.commit,
+ u'Invalid\r\ncommit message\r\n')
+
+ def test_non_ascii_str_committer_rejected(self):
+ """Ensure an error is raised on a non-ascii byte string committer"""
+ branch = self.make_branch('.')
+ branch.repository.lock_write()
+ self.addCleanup(branch.repository.unlock)
+ self.assertRaises(UnicodeDecodeError,
+ branch.repository.get_commit_builder,
+ branch, [], branch.get_config_stack(),
+ committer="Erik B\xe5gfors <erik@example.com>")
+
+ def test_stacked_repositories_reject_commit_builder(self):
+ # As per bug 375013, committing to stacked repositories is currently
+ # broken if we aren't in a chk repository. So old repositories with
+ # fallbacks refuse to hand out a commit builder.
+ repo_basis = self.make_repository('basis')
+ branch = self.make_branch('local')
+ repo_local = branch.repository
+ try:
+ repo_local.add_fallback_repository(repo_basis)
+ except errors.UnstackableRepositoryFormat:
+ raise tests.TestNotApplicable("not a stackable format.")
+ self.addCleanup(repo_local.lock_write().unlock)
+ if not repo_local._format.supports_chks:
+ self.assertRaises(errors.BzrError, repo_local.get_commit_builder,
+ branch, [], branch.get_config_stack())
+ else:
+ builder = repo_local.get_commit_builder(branch, [],
+ branch.get_config_stack())
+ builder.abort()
+
+ def test_committer_no_username(self):
+ # Ensure that when no username is available but a committer is
+ # supplied, commit works.
+ self.overrideEnv('EMAIL', None)
+ self.overrideEnv('BZR_EMAIL', None)
+ # Also, make sure that it's not inferred from mailname.
+ self.overrideAttr(config, '_auto_user_id',
+ lambda: (None, None))
+ tree = self.make_branch_and_tree(".")
+ tree.lock_write()
+ try:
+ # Make sure no username is available.
+ self.assertRaises(errors.NoWhoami, tree.branch.get_commit_builder,
+ [])
+ builder = tree.branch.get_commit_builder(
+ [], committer='me@example.com')
+ try:
+ list(builder.record_iter_changes(tree, tree.last_revision(),
+ tree.iter_changes(tree.basis_tree())))
+ builder.finish_inventory()
+ except:
+ builder.abort()
+ raise
+ repo = tree.branch.repository
+ repo.commit_write_group()
+ finally:
+ tree.unlock()
diff --git a/bzrlib/tests/per_repository/test_fetch.py b/bzrlib/tests/per_repository/test_fetch.py
new file mode 100644
index 0000000..3ddbc9d
--- /dev/null
+++ b/bzrlib/tests/per_repository/test_fetch.py
@@ -0,0 +1,369 @@
+# Copyright (C) 2007-2010 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""Tests for fetch between repositories of the same type."""
+
+from bzrlib import (
+ controldir,
+ errors,
+ gpg,
+ remote,
+ repository,
+ )
+from bzrlib.inventory import ROOT_ID
+from bzrlib.tests import (
+ TestNotApplicable,
+ TestSkipped,
+ )
+from bzrlib.tests.per_repository import TestCaseWithRepository
+
+
+class TestFetchSameRepository(TestCaseWithRepository):
+
+ def test_fetch(self):
+ # smoke test fetch to ensure that the convenience function works.
+ # it is defined as a convenience function with the underlying
+ # functionality provided by an InterRepository
+ tree_a = self.make_branch_and_tree('a')
+ self.build_tree(['a/foo'])
+ tree_a.add('foo', 'file1')
+ tree_a.commit('rev1', rev_id='rev1')
+ # fetch with a default limit (grab everything)
+ repo = self.make_repository('b')
+ if (tree_a.branch.repository.supports_rich_root() and not
+ repo.supports_rich_root()):
+ raise TestSkipped('Cannot fetch from model2 to model1')
+ repo.fetch(tree_a.branch.repository,
+ revision_id=None)
+
+ def test_fetch_fails_in_write_group(self):
+ # fetch() manages a write group itself, fetching within one isn't safe.
+ repo = self.make_repository('a')
+ repo.lock_write()
+ self.addCleanup(repo.unlock)
+ repo.start_write_group()
+ self.addCleanup(repo.abort_write_group)
+ # Don't need a specific class - not expecting flow control based on
+ # this.
+ self.assertRaises(errors.BzrError, repo.fetch, repo)
+
+ def test_fetch_to_knit3(self):
+ # create a repository of the sort we are testing.
+ tree_a = self.make_branch_and_tree('a')
+ self.build_tree(['a/foo'])
+ tree_a.add('foo', 'file1')
+ tree_a.commit('rev1', rev_id='rev1')
+ # create a knit-3 based format to fetch into
+ f = controldir.format_registry.make_bzrdir('development-subtree')
+ try:
+ format = tree_a.branch.repository._format
+ format.check_conversion_target(f.repository_format)
+ # if we cannot convert data to knit3, skip the test.
+ except errors.BadConversionTarget, e:
+ raise TestSkipped(str(e))
+ self.get_transport().mkdir('b')
+ b_bzrdir = f.initialize(self.get_url('b'))
+ knit3_repo = b_bzrdir.create_repository()
+ # fetch with a default limit (grab everything)
+ knit3_repo.fetch(tree_a.branch.repository, revision_id=None)
+ # Reopen to avoid any in-memory caching - ensure its reading from
+ # disk.
+ knit3_repo = b_bzrdir.open_repository()
+ rev1_tree = knit3_repo.revision_tree('rev1')
+ rev1_tree.lock_read()
+ try:
+ lines = rev1_tree.get_file_lines(rev1_tree.get_root_id())
+ finally:
+ rev1_tree.unlock()
+ self.assertEqual([], lines)
+ b_branch = b_bzrdir.create_branch()
+ b_branch.pull(tree_a.branch)
+ try:
+ tree_b = b_bzrdir.create_workingtree()
+ except errors.NotLocalUrl:
+ try:
+ tree_b = b_branch.create_checkout('b', lightweight=True)
+ except errors.NotLocalUrl:
+ raise TestSkipped("cannot make working tree with transport %r"
+ % b_bzrdir.transport)
+ tree_b.commit('no change', rev_id='rev2')
+ rev2_tree = knit3_repo.revision_tree('rev2')
+ self.assertEqual(
+ 'rev1',
+ rev2_tree.get_file_revision(rev2_tree.get_root_id()))
+
+ def do_test_fetch_to_rich_root_sets_parents_correctly(self, result,
+ snapshots, root_id=ROOT_ID, allow_lefthand_ghost=False):
+ """Assert that result is the parents of 'tip' after fetching snapshots.
+
+ This helper constructs a 1.9 format source, and a test-format target
+ and fetches the result of building snapshots in the source, then
+ asserts that the parents of tip are result.
+
+ :param result: A parents list for the inventories.get_parent_map call.
+ :param snapshots: An iterable of snapshot parameters for
+ BranchBuilder.build_snapshot.
+ '"""
+ # This overlaps slightly with the tests for commit builder about graph
+ # consistency.
+ # Cases:
+ repo = self.make_repository('target')
+ remote_format = isinstance(repo, remote.RemoteRepository)
+ if not repo._format.rich_root_data and not remote_format:
+ return # not relevant
+ builder = self.make_branch_builder('source', format='1.9')
+ builder.start_series()
+ for revision_id, parent_ids, actions in snapshots:
+ builder.build_snapshot(revision_id, parent_ids, actions,
+ allow_leftmost_as_ghost=allow_lefthand_ghost)
+ builder.finish_series()
+ source = builder.get_branch()
+ if remote_format and not repo._format.rich_root_data:
+ # use a manual rich root format to ensure the code path is tested.
+ repo = self.make_repository('remote-target',
+ format='1.9-rich-root')
+ repo.lock_write()
+ self.addCleanup(repo.unlock)
+ repo.fetch(source.repository)
+ graph = repo.get_file_graph()
+ self.assertEqual(result,
+ graph.get_parent_map([(root_id, 'tip')])[(root_id, 'tip')])
+
+ def test_fetch_to_rich_root_set_parent_no_parents(self):
+ # No parents rev -> No parents
+ self.do_test_fetch_to_rich_root_sets_parents_correctly((),
+ [('tip', None, [('add', ('', ROOT_ID, 'directory', ''))]),
+ ])
+
+ def test_fetch_to_rich_root_set_parent_1_parent(self):
+ # 1 parent rev -> 1 parent
+ self.do_test_fetch_to_rich_root_sets_parents_correctly(
+ ((ROOT_ID, 'base'),),
+ [('base', None, [('add', ('', ROOT_ID, 'directory', ''))]),
+ ('tip', None, []),
+ ])
+
+ def test_fetch_to_rich_root_set_parent_1_ghost_parent(self):
+ # 1 ghost parent -> No parents
+ if not self.repository_format.supports_ghosts:
+ raise TestNotApplicable("repository format does not support "
+ "ghosts")
+ self.do_test_fetch_to_rich_root_sets_parents_correctly((),
+ [('tip', ['ghost'], [('add', ('', ROOT_ID, 'directory', ''))]),
+ ], allow_lefthand_ghost=True)
+
+ def test_fetch_to_rich_root_set_parent_2_head_parents(self):
+ # 2 parents both heads -> 2 parents
+ self.do_test_fetch_to_rich_root_sets_parents_correctly(
+ ((ROOT_ID, 'left'), (ROOT_ID, 'right')),
+ [('base', None, [('add', ('', ROOT_ID, 'directory', ''))]),
+ ('left', None, []),
+ ('right', ['base'], []),
+ ('tip', ['left', 'right'], []),
+ ])
+
+ def test_fetch_to_rich_root_set_parent_2_parents_1_head(self):
+ # 2 parents one head -> 1 parent
+ self.do_test_fetch_to_rich_root_sets_parents_correctly(
+ ((ROOT_ID, 'right'),),
+ [('left', None, [('add', ('', ROOT_ID, 'directory', ''))]),
+ ('right', None, []),
+ ('tip', ['left', 'right'], []),
+ ])
+
+ def test_fetch_to_rich_root_set_parent_1_parent_different_id_gone(self):
+ # 1 parent different fileid, ours missing -> no parents
+ self.do_test_fetch_to_rich_root_sets_parents_correctly(
+ (),
+ [('base', None, [('add', ('', ROOT_ID, 'directory', ''))]),
+ ('tip', None, [('unversion', ROOT_ID),
+ ('add', ('', 'my-root', 'directory', '')),
+ ]),
+ ], root_id='my-root')
+
+ def test_fetch_to_rich_root_set_parent_1_parent_different_id_moved(self):
+ # 1 parent different fileid, ours moved -> 1 parent
+ # (and that parent honours the changing revid of the other location)
+ self.do_test_fetch_to_rich_root_sets_parents_correctly(
+ (('my-root', 'origin'),),
+ [('origin', None, [('add', ('', ROOT_ID, 'directory', '')),
+ ('add', ('child', 'my-root', 'directory', ''))]),
+ ('base', None, []),
+ ('tip', None, [('unversion', 'my-root'),
+ ('unversion', ROOT_ID),
+ ('flush', None),
+ ('add', ('', 'my-root', 'directory', '')),
+ ]),
+ ], root_id='my-root')
+
+ def test_fetch_to_rich_root_set_parent_2_parent_1_different_id_gone(self):
+ # 2 parents, 1 different fileid, our second missing -> 1 parent
+ self.do_test_fetch_to_rich_root_sets_parents_correctly(
+ (('my-root', 'right'),),
+ [('base', None, [('add', ('', ROOT_ID, 'directory', ''))]),
+ ('right', None, [('unversion', ROOT_ID),
+ ('add', ('', 'my-root', 'directory', ''))]),
+ ('tip', ['base', 'right'], [('unversion', ROOT_ID),
+ ('add', ('', 'my-root', 'directory', '')),
+ ]),
+ ], root_id='my-root')
+
+ def test_fetch_to_rich_root_set_parent_2_parent_2_different_id_moved(self):
+ # 2 parents, 1 different fileid, our second moved -> 2 parent
+ # (and that parent honours the changing revid of the other location)
+ self.do_test_fetch_to_rich_root_sets_parents_correctly(
+ (('my-root', 'right'),),
+ # 'my-root' at 'child'.
+ [('origin', None, [('add', ('', ROOT_ID, 'directory', '')),
+ ('add', ('child', 'my-root', 'directory', ''))]),
+ ('base', None, []),
+ # 'my-root' at root
+ ('right', None, [('unversion', 'my-root'),
+ ('unversion', ROOT_ID),
+ ('flush', None),
+ ('add', ('', 'my-root', 'directory', ''))]),
+ ('tip', ['base', 'right'], [('unversion', 'my-root'),
+ ('unversion', ROOT_ID),
+ ('flush', None),
+ ('add', ('', 'my-root', 'directory', '')),
+ ]),
+ ], root_id='my-root')
+
+ def test_fetch_all_from_self(self):
+ tree = self.make_branch_and_tree('.')
+ rev_id = tree.commit('one')
+ # This needs to be a new copy of the repository, if this changes, the
+ # test needs to be rewritten
+ repo = tree.branch.repository.bzrdir.open_repository()
+ # This fetch should be a no-op see bug #158333
+ tree.branch.repository.fetch(repo, None)
+
+ def test_fetch_from_self(self):
+ tree = self.make_branch_and_tree('.')
+ rev_id = tree.commit('one')
+ repo = tree.branch.repository.bzrdir.open_repository()
+ # This fetch should be a no-op see bug #158333
+ tree.branch.repository.fetch(repo, rev_id)
+
+ def test_fetch_missing_from_self(self):
+ tree = self.make_branch_and_tree('.')
+ rev_id = tree.commit('one')
+ # Even though the fetch() is a NO-OP it should assert the revision id
+ # is present
+ repo = tree.branch.repository.bzrdir.open_repository()
+ self.assertRaises(errors.NoSuchRevision, tree.branch.repository.fetch,
+ repo, 'no-such-revision')
+
+ def makeARepoWithSignatures(self):
+ wt = self.make_branch_and_tree('a-repo-with-sigs')
+ wt.commit('rev1', allow_pointless=True, rev_id='rev1')
+ repo = wt.branch.repository
+ repo.lock_write()
+ repo.start_write_group()
+ try:
+ repo.sign_revision('rev1', gpg.LoopbackGPGStrategy(None))
+ except errors.UnsupportedOperation:
+ self.assertFalse(repo._format.supports_revision_signatures)
+ raise TestNotApplicable("repository format does not support signatures")
+ repo.commit_write_group()
+ repo.unlock()
+ return repo
+
+ def test_fetch_copies_signatures(self):
+ source_repo = self.makeARepoWithSignatures()
+ target_repo = self.make_repository('target')
+ target_repo.fetch(source_repo, revision_id=None)
+ self.assertEqual(
+ source_repo.get_signature_text('rev1'),
+ target_repo.get_signature_text('rev1'))
+
+ def make_repository_with_one_revision(self):
+ wt = self.make_branch_and_tree('source')
+ wt.commit('rev1', allow_pointless=True, rev_id='rev1')
+ return wt.branch.repository
+
+ def test_fetch_revision_already_exists(self):
+ # Make a repository with one revision.
+ source_repo = self.make_repository_with_one_revision()
+ # Fetch that revision into a second repository.
+ target_repo = self.make_repository('target')
+ target_repo.fetch(source_repo, revision_id='rev1')
+ # Now fetch again; there will be nothing to do. This should work
+ # without causing any errors.
+ target_repo.fetch(source_repo, revision_id='rev1')
+
+ def test_fetch_all_same_revisions_twice(self):
+ # Blind-fetching all the same revisions twice should succeed and be a
+ # no-op the second time.
+ repo = self.make_repository('repo')
+ tree = self.make_branch_and_tree('tree')
+ revision_id = tree.commit('test')
+ repo.fetch(tree.branch.repository)
+ repo.fetch(tree.branch.repository)
+
+ def make_simple_branch_with_ghost(self):
+ builder = self.make_branch_builder('source')
+ builder.start_series()
+ builder.build_snapshot('A-id', None, [
+ ('add', ('', 'root-id', 'directory', None)),
+ ('add', ('file', 'file-id', 'file', 'content\n'))])
+ builder.build_snapshot('B-id', ['A-id', 'ghost-id'], [])
+ builder.finish_series()
+ source_b = builder.get_branch()
+ source_b.lock_read()
+ self.addCleanup(source_b.unlock)
+ return source_b
+
+ def test_fetch_with_ghost(self):
+ source_b = self.make_simple_branch_with_ghost()
+ target = self.make_repository('target')
+ target.lock_write()
+ self.addCleanup(target.unlock)
+ target.fetch(source_b.repository, revision_id='B-id')
+
+ def test_fetch_into_smart_with_ghost(self):
+ trans = self.make_smart_server('target')
+ source_b = self.make_simple_branch_with_ghost()
+ if not source_b.bzrdir._format.supports_transport(trans):
+ raise TestNotApplicable("format does not support transport")
+ target = self.make_repository('target')
+ # Re-open the repository over the smart protocol
+ target = repository.Repository.open(trans.base)
+ target.lock_write()
+ self.addCleanup(target.unlock)
+ try:
+ target.fetch(source_b.repository, revision_id='B-id')
+ except errors.TokenLockingNotSupported:
+ # The code inside fetch() that tries to lock and then fails, also
+ # causes weird problems with 'lock_not_held' later on...
+ target.lock_read()
+ self.knownFailure('some repositories fail to fetch'
+ ' via the smart server because of locking issues.')
+
+ def test_fetch_from_smart_with_ghost(self):
+ trans = self.make_smart_server('source')
+ source_b = self.make_simple_branch_with_ghost()
+ if not source_b.bzrdir._format.supports_transport(trans):
+ raise TestNotApplicable("format does not support transport")
+ target = self.make_repository('target')
+ target.lock_write()
+ self.addCleanup(target.unlock)
+ # Re-open the repository over the smart protocol
+ source = repository.Repository.open(trans.base)
+ source.lock_read()
+ self.addCleanup(source.unlock)
+ target.fetch(source, revision_id='B-id')
+
diff --git a/bzrlib/tests/per_repository/test_file_graph.py b/bzrlib/tests/per_repository/test_file_graph.py
new file mode 100644
index 0000000..0483561
--- /dev/null
+++ b/bzrlib/tests/per_repository/test_file_graph.py
@@ -0,0 +1,37 @@
+# Copyright (C) 2011 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+
+"""Tests for the per file graph API."""
+
+
+from bzrlib.tests.per_repository import TestCaseWithRepository
+
+
+class TestPerFileGraph(TestCaseWithRepository):
+
+ def test_file_graph(self):
+ tree = self.make_branch_and_tree('.')
+ self.build_tree_contents([("a", "contents")])
+ tree.add(["a"], ["fileid"])
+ revid1 = tree.commit("msg")
+ self.build_tree_contents([("a", "new contents")])
+ revid2 = tree.commit("msg")
+ self.addCleanup(tree.lock_read().unlock)
+ graph = tree.branch.repository.get_file_graph()
+ self.assertEquals({
+ ("fileid", revid2): (("fileid", revid1),), ("fileid", revid1):()},
+ graph.get_parent_map([("fileid", revid2), ("fileid", revid1)]))
diff --git a/bzrlib/tests/per_repository/test_get_parent_map.py b/bzrlib/tests/per_repository/test_get_parent_map.py
new file mode 100644
index 0000000..452b7ae
--- /dev/null
+++ b/bzrlib/tests/per_repository/test_get_parent_map.py
@@ -0,0 +1,73 @@
+# Copyright (C) 2008 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""Tests for the get_parent_map API."""
+
+from bzrlib import revision
+from bzrlib.tests.per_repository import TestCaseWithRepository
+
+
+class TestGetParentMap(TestCaseWithRepository):
+
+ def test_missing_revision(self):
+ tree = self.make_branch_and_tree('.')
+ repo = tree.branch.repository
+ repo.lock_read()
+ self.addCleanup(repo.unlock)
+ self.assertEqual({}, repo.get_parent_map(['non-existant']))
+
+ def test_multiple_parents(self):
+ tree = self.make_branch_and_tree('.')
+ rev1 = tree.commit('first')
+ rev2 = tree.commit('second')
+ tree.set_parent_ids([rev1, rev2])
+ tree.branch.set_last_revision_info(1, rev1)
+ rev3 = tree.commit('third')
+ repo = tree.branch.repository
+ repo.lock_read()
+ self.addCleanup(repo.unlock)
+ self.assertEqual({rev3:(rev1, rev2)},
+ repo.get_parent_map([rev3]))
+ self.assertEqual({rev1:(revision.NULL_REVISION,),
+ rev2:(rev1,),
+ rev3:(rev1, rev2),
+ }, repo.get_parent_map([rev1, rev2, rev3]))
+
+ def test_no_parents(self):
+ tree = self.make_branch_and_tree('.')
+ rev1 = tree.commit('first')
+ repo = tree.branch.repository
+ repo.lock_read()
+ self.addCleanup(repo.unlock)
+ self.assertEqual({rev1:(revision.NULL_REVISION,)},
+ repo.get_parent_map([rev1]))
+
+ def test_none(self):
+ tree = self.make_branch_and_tree('.')
+ rev1 = tree.commit('first')
+ repo = tree.branch.repository
+ repo.lock_read()
+ self.addCleanup(repo.unlock)
+ self.assertRaises(ValueError,
+ repo.get_parent_map, [None])
+
+ def test_null_revision(self):
+ tree = self.make_branch_and_tree('.')
+ repo = tree.branch.repository
+ repo.lock_read()
+ self.addCleanup(repo.unlock)
+ self.assertEqual({revision.NULL_REVISION:()},
+ repo.get_parent_map([revision.NULL_REVISION]))
diff --git a/bzrlib/tests/per_repository/test_has_revisions.py b/bzrlib/tests/per_repository/test_has_revisions.py
new file mode 100644
index 0000000..adaf4e7
--- /dev/null
+++ b/bzrlib/tests/per_repository/test_has_revisions.py
@@ -0,0 +1,43 @@
+# Copyright (C) 2008 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""Tests for implementations of Repository.has_revisions."""
+
+from bzrlib.revision import NULL_REVISION
+from bzrlib.tests.per_repository import TestCaseWithRepository
+
+
+class TestHasRevisions(TestCaseWithRepository):
+
+ def test_empty_list(self):
+ repo = self.make_repository('.')
+ self.assertEqual(set(), repo.has_revisions([]))
+
+ def test_superset(self):
+ tree = self.make_branch_and_tree('.')
+ repo = tree.branch.repository
+ rev1 = tree.commit('1')
+ rev2 = tree.commit('2')
+ rev3 = tree.commit('3')
+ self.assertEqual(set([rev1, rev3]),
+ repo.has_revisions([rev1, rev3, 'foobar:']))
+
+ def test_NULL(self):
+ # NULL_REVISION is always present. So for
+ # compatibility with 'has_revision' we make this work.
+ repo = self.make_repository('.')
+ self.assertEqual(set([NULL_REVISION]),
+ repo.has_revisions([NULL_REVISION]))
diff --git a/bzrlib/tests/per_repository/test_has_same_location.py b/bzrlib/tests/per_repository/test_has_same_location.py
new file mode 100644
index 0000000..99834d0
--- /dev/null
+++ b/bzrlib/tests/per_repository/test_has_same_location.py
@@ -0,0 +1,126 @@
+# Copyright (C) 2007-2010 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""Tests for implementations of Repository.has_same_location."""
+
+from bzrlib import (
+ controldir,
+ transport,
+ )
+from bzrlib.remote import (
+ RemoteRepositoryFormat,
+ )
+from bzrlib.tests import (
+ TestNotApplicable,
+ )
+from bzrlib.tests.per_repository import TestCaseWithRepository
+
+
+class TestHasSameLocation(TestCaseWithRepository):
+ """Tests for Repository.has_same_location method."""
+
+ def assertSameRepo(self, a, b):
+ """Asserts that two objects are the same repository.
+
+ This method does the comparison both ways (`a.has_same_location(b)` as
+ well as `b.has_same_location(a)`) to make sure both objects'
+ `has_same_location` methods give the same results.
+ """
+ self.assertTrue(a.has_same_location(b),
+ "%r is not the same repository as %r" % (a, b))
+ self.assertTrue(b.has_same_location(a),
+ "%r is the same as %r, but not vice versa" % (a, b))
+
+ def assertDifferentRepo(self, a, b):
+ """Asserts that two objects are the not same repository.
+
+ This method does the comparison both ways (`a.has_same_location(b)` as
+ well as `b.has_same_location(a)`) to make sure both objects'
+ `has_same_location` methods give the same results.
+
+ :seealso: assertDifferentRepo
+ """
+ self.assertFalse(a.has_same_location(b),
+ "%r is not the same repository as %r" % (a, b))
+ self.assertFalse(b.has_same_location(a),
+ "%r is the same as %r, but not vice versa" % (a, b))
+
+ def test_same_repo_instance(self):
+ """A repository object is the same repository as itself."""
+ repo = self.make_repository('.')
+ self.assertSameRepo(repo, repo)
+
+ def test_same_repo_location(self):
+ """Different repository objects for the same location are the same."""
+ repo = self.make_repository('.')
+ reopened_repo = repo.bzrdir.open_repository()
+ self.assertFalse(
+ repo is reopened_repo,
+ "This test depends on reopened_repo being a different instance of "
+ "the same repo.")
+ self.assertSameRepo(repo, reopened_repo)
+
+ def test_different_repos_not_equal(self):
+ """Repositories at different locations are not the same."""
+ repo_one = self.make_repository('one')
+ repo_two = self.make_repository('two')
+ self.assertDifferentRepo(repo_one, repo_two)
+
+ def test_same_bzrdir_different_control_files_not_equal(self):
+ """Repositories in the same bzrdir, but with different control files,
+ are not the same.
+
+ This can happens e.g. when upgrading a repository. This test mimics how
+ CopyConverter creates a second repository in one bzrdir.
+ """
+ repo = self.make_repository('repo')
+ if repo.control_transport.base == repo.bzrdir.control_transport.base:
+ raise TestNotApplicable(
+ "%r has repository files directly in the bzrdir"
+ % (repo,))
+ # This test only applies to repository formats where the repo
+ # control_files are separate from other bzrdir files, i.e. metadir
+ # formats.
+ repo.control_transport.copy_tree('.', '../repository.backup')
+ backup_transport = repo.control_transport.clone('../repository.backup')
+ if isinstance(repo._format, RemoteRepositoryFormat):
+ raise TestNotApplicable("remote repositories don't support overriding "
+ "transport")
+ backup_repo = repo._format.open(repo.bzrdir,
+ _override_transport=backup_transport)
+ self.assertDifferentRepo(repo, backup_repo)
+
+ def test_different_format_not_equal(self):
+ """Different format repositories are comparable and not the same.
+
+ Comparing different format repository objects should give a negative
+ result, rather than trigger an exception (which could happen with a
+ naive __eq__ implementation, e.g. due to missing attributes).
+ """
+ repo = self.make_repository('repo')
+ other_repo = self.make_repository('other', format='default')
+ if repo._format == other_repo._format:
+ # We're testing the default format! So we have to use a non-default
+ # format for other_repo.
+ transport.get_transport_from_url(
+ self.get_vfs_only_url()).delete_tree('other')
+ other_repo = self.make_repository('other', format='knit')
+ # Make sure the other_repo is not a RemoteRepository.
+ other_bzrdir = controldir.ControlDir.open(self.get_vfs_only_url('other'))
+ other_repo = other_bzrdir.open_repository()
+ self.assertDifferentRepo(repo, other_repo)
+
+
diff --git a/bzrlib/tests/per_repository/test_locking.py b/bzrlib/tests/per_repository/test_locking.py
new file mode 100644
index 0000000..f506f20
--- /dev/null
+++ b/bzrlib/tests/per_repository/test_locking.py
@@ -0,0 +1,57 @@
+# Copyright (C) 2007 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""Tests for Repository.is_write_locked()."""
+
+from bzrlib.tests.per_repository import TestCaseWithRepository
+
+
+class TestIsWriteLocked(TestCaseWithRepository):
+
+ def test_not_locked(self):
+ repo = self.make_repository('.')
+ self.assertFalse(repo.is_write_locked())
+
+ def test_read_locked(self):
+ repo = self.make_repository('.')
+ repo.lock_read()
+ self.addCleanup(repo.unlock)
+ self.assertFalse(repo.is_write_locked())
+
+ def test_write_locked(self):
+ repo = self.make_repository('.')
+ repo.lock_write()
+ self.addCleanup(repo.unlock)
+ self.assertTrue(repo.is_write_locked())
+
+
+class TestIsLocked(TestCaseWithRepository):
+
+ def test_not_locked(self):
+ repo = self.make_repository('.')
+ self.assertFalse(repo.is_locked())
+
+ def test_read_locked(self):
+ repo = self.make_repository('.')
+ repo.lock_read()
+ self.addCleanup(repo.unlock)
+ self.assertTrue(repo.is_locked())
+
+ def test_write_locked(self):
+ repo = self.make_repository('.')
+ repo.lock_write()
+ self.addCleanup(repo.unlock)
+ self.assertTrue(repo.is_locked())
diff --git a/bzrlib/tests/per_repository/test_pack.py b/bzrlib/tests/per_repository/test_pack.py
new file mode 100644
index 0000000..30e1c52
--- /dev/null
+++ b/bzrlib/tests/per_repository/test_pack.py
@@ -0,0 +1,37 @@
+# Copyright (C) 2007 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""Tests for repository packing."""
+
+from bzrlib.tests.per_repository import TestCaseWithRepository
+
+
+class TestPack(TestCaseWithRepository):
+
+ def test_pack_empty_does_not_error(self):
+ repo = self.make_repository('.')
+ repo.pack()
+
+ def test_pack_accepts_opaque_hint(self):
+ # For requesting packs of a repository where some data is known to be
+ # unoptimal we permit packing just some data via a hint. If the hint is
+ # illegible it is ignored.
+ tree = self.make_branch_and_tree('tree')
+ rev1 = tree.commit('1')
+ rev2 = tree.commit('2')
+ rev3 = tree.commit('3')
+ rev4 = tree.commit('4')
+ tree.branch.repository.pack(hint=[rev3, rev4])
diff --git a/bzrlib/tests/per_repository/test_reconcile.py b/bzrlib/tests/per_repository/test_reconcile.py
new file mode 100644
index 0000000..7f1a437
--- /dev/null
+++ b/bzrlib/tests/per_repository/test_reconcile.py
@@ -0,0 +1,31 @@
+# Copyright (C) 2006-2010 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""Tests for reconciliation of repositories."""
+
+
+from bzrlib.tests.per_repository import (
+ TestCaseWithRepository,
+ )
+
+
+class TestRepeatedReconcile(TestCaseWithRepository):
+
+ def test_trivial_two_reconciles_no_error(self):
+ tree = self.make_branch_and_tree('.')
+ tree.commit('first post')
+ tree.branch.repository.reconcile(thorough=True)
+ tree.branch.repository.reconcile(thorough=True)
diff --git a/bzrlib/tests/per_repository/test_refresh_data.py b/bzrlib/tests/per_repository/test_refresh_data.py
new file mode 100644
index 0000000..328f851
--- /dev/null
+++ b/bzrlib/tests/per_repository/test_refresh_data.py
@@ -0,0 +1,61 @@
+# Copyright (C) 2009, 2010 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""Tests for Repository.refresh_data."""
+
+from bzrlib import (
+ repository,
+ )
+from bzrlib.tests.per_repository import TestCaseWithRepository
+
+
+class TestRefreshData(TestCaseWithRepository):
+
+ def test_refresh_data_unlocked(self):
+ # While not interesting, it should not error.
+ repo = self.make_repository('.')
+ repo.refresh_data()
+
+ def test_refresh_data_read_locked(self):
+ # While not interesting, it should not error.
+ repo = self.make_repository('.')
+ repo.lock_read()
+ self.addCleanup(repo.unlock)
+ repo.refresh_data()
+
+ def test_refresh_data_write_locked(self):
+ # While not interesting, it should not error.
+ repo = self.make_repository('.')
+ repo.lock_write()
+ self.addCleanup(repo.unlock)
+ repo.refresh_data()
+
+ def test_refresh_data_in_write_group(self):
+ # refresh_data may either succeed or raise IsInWriteGroupError during a
+ # write group.
+ repo = self.make_repository('.')
+ repo.lock_write()
+ self.addCleanup(repo.unlock)
+ repo.start_write_group()
+ self.addCleanup(repo.abort_write_group)
+ try:
+ repo.refresh_data()
+ except repository.IsInWriteGroupError:
+ # This is ok.
+ pass
+ else:
+ # This is ok too.
+ pass
diff --git a/bzrlib/tests/per_repository/test_repository.py b/bzrlib/tests/per_repository/test_repository.py
new file mode 100644
index 0000000..2e89ccb
--- /dev/null
+++ b/bzrlib/tests/per_repository/test_repository.py
@@ -0,0 +1,1002 @@
+# Copyright (C) 2006-2011 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""Tests for repository implementations - tests a repository format."""
+
+from cStringIO import StringIO
+import re
+
+from bzrlib import (
+ branch as _mod_branch,
+ controldir,
+ delta as _mod_delta,
+ errors,
+ gpg,
+ info,
+ inventory,
+ remote,
+ repository,
+ revision as _mod_revision,
+ tests,
+ transport,
+ upgrade,
+ workingtree,
+ )
+from bzrlib.repofmt import (
+ knitpack_repo,
+ )
+from bzrlib.tests import (
+ per_repository,
+ test_server,
+ )
+from bzrlib.tests.matchers import *
+
+
+class TestRepositoryMakeBranchAndTree(per_repository.TestCaseWithRepository):
+
+ def test_repository_format(self):
+ # make sure the repository on tree.branch is of the desired format,
+ # because developers use this api to setup the tree, branch and
+ # repository for their tests: having it now give the right repository
+ # type would invalidate the tests.
+ tree = self.make_branch_and_tree('repo')
+ self.assertIsInstance(tree.branch.repository._format,
+ self.repository_format.__class__)
+
+
+class TestRepository(per_repository.TestCaseWithRepository):
+
+ def assertFormatAttribute(self, attribute, allowed_values):
+ """Assert that the format has an attribute 'attribute'."""
+ repo = self.make_repository('repo')
+ self.assertSubset([getattr(repo._format, attribute)], allowed_values)
+
+ def test_attribute_fast_deltas(self):
+ """Test the format.fast_deltas attribute."""
+ self.assertFormatAttribute('fast_deltas', (True, False))
+
+ def test_attribute_supports_nesting_repositories(self):
+ """Test the format.supports_nesting_repositories."""
+ self.assertFormatAttribute('supports_nesting_repositories',
+ (True, False))
+
+ def test_attribute_supports_unreferenced_revisions(self):
+ """Test the format.supports_unreferenced_revisions."""
+ self.assertFormatAttribute('supports_unreferenced_revisions',
+ (True, False))
+
+ def test_attribute__fetch_reconcile(self):
+ """Test the _fetch_reconcile attribute."""
+ self.assertFormatAttribute('_fetch_reconcile', (True, False))
+
+ def test_attribute_format_experimental(self):
+ self.assertFormatAttribute('experimental', (True, False))
+
+ def test_attribute_format_pack_compresses(self):
+ self.assertFormatAttribute('pack_compresses', (True, False))
+
+ def test_attribute_format_supports_full_versioned_files(self):
+ self.assertFormatAttribute('supports_full_versioned_files',
+ (True, False))
+
+ def test_attribute_format_supports_funky_characters(self):
+ self.assertFormatAttribute('supports_funky_characters',
+ (True, False))
+
+ def test_attribute_format_supports_leaving_lock(self):
+ self.assertFormatAttribute('supports_leaving_lock',
+ (True, False))
+
+ def test_attribute_format_versioned_directories(self):
+ self.assertFormatAttribute('supports_versioned_directories', (True, False))
+
+ def test_attribute_format_revision_graph_can_have_wrong_parents(self):
+ self.assertFormatAttribute('revision_graph_can_have_wrong_parents',
+ (True, False))
+
+ def test_format_is_deprecated(self):
+ repo = self.make_repository('repo')
+ self.assertSubset([repo._format.is_deprecated()], (True, False))
+
+ def test_format_is_supported(self):
+ repo = self.make_repository('repo')
+ self.assertSubset([repo._format.is_supported()], (True, False))
+
+ def test_clone_to_default_format(self):
+ #TODO: Test that cloning a repository preserves all the information
+ # such as signatures[not tested yet] etc etc.
+ # when changing to the current default format.
+ tree_a = self.make_branch_and_tree('a')
+ self.build_tree(['a/foo'])
+ tree_a.add('foo', 'file1')
+ tree_a.commit('rev1', rev_id='rev1')
+ bzrdirb = self.make_bzrdir('b')
+ repo_b = tree_a.branch.repository.clone(bzrdirb)
+ tree_b = repo_b.revision_tree('rev1')
+ tree_b.lock_read()
+ self.addCleanup(tree_b.unlock)
+ tree_b.get_file_text('file1')
+ rev1 = repo_b.get_revision('rev1')
+
+ def test_supports_rich_root(self):
+ tree = self.make_branch_and_tree('a')
+ tree.commit('')
+ second_revision = tree.commit('')
+ rev_tree = tree.branch.repository.revision_tree(second_revision)
+ rev_tree.lock_read()
+ self.addCleanup(rev_tree.unlock)
+ root_revision = rev_tree.get_file_revision(rev_tree.get_root_id())
+ rich_root = (root_revision != second_revision)
+ self.assertEqual(rich_root,
+ tree.branch.repository.supports_rich_root())
+
+ def test_clone_specific_format(self):
+ """todo"""
+
+ def test_format_initialize_find_open(self):
+ # loopback test to check the current format initializes to itself.
+ if not self.repository_format.is_supported():
+ # unsupported formats are not loopback testable
+ # because the default open will not open them and
+ # they may not be initializable.
+ return
+ # supported formats must be able to init and open
+ t = self.get_transport()
+ readonly_t = self.get_readonly_transport()
+ made_control = self.bzrdir_format.initialize(t.base)
+ made_repo = self.repository_format.initialize(made_control)
+ self.assertEqual(made_control, made_repo.bzrdir)
+
+ # find it via controldir opening:
+ opened_control = controldir.ControlDir.open(readonly_t.base)
+ direct_opened_repo = opened_control.open_repository()
+ self.assertEqual(direct_opened_repo.__class__, made_repo.__class__)
+ self.assertEqual(opened_control, direct_opened_repo.bzrdir)
+
+ self.assertIsInstance(direct_opened_repo._format,
+ self.repository_format.__class__)
+ # find it via Repository.open
+ opened_repo = repository.Repository.open(readonly_t.base)
+ self.assertIsInstance(opened_repo, made_repo.__class__)
+ self.assertEqual(made_repo._format.__class__,
+ opened_repo._format.__class__)
+ # if it has a unique id string, can we probe for it ?
+ try:
+ self.repository_format.get_format_string()
+ except NotImplementedError:
+ return
+ self.assertEqual(self.repository_format,
+ repository.RepositoryFormatMetaDir.find_format(opened_control))
+
+ def test_format_matchingbzrdir(self):
+ self.assertEqual(self.repository_format,
+ self.repository_format._matchingbzrdir.repository_format)
+ self.assertEqual(self.repository_format,
+ self.bzrdir_format.repository_format)
+
+ def test_format_network_name(self):
+ repo = self.make_repository('r')
+ format = repo._format
+ network_name = format.network_name()
+ self.assertIsInstance(network_name, str)
+ # We want to test that the network_name matches the actual format on
+ # disk. For local repositories, that means that using network_name as
+ # a key in the registry gives back the same format. For remote
+ # repositories, that means that the network_name of the
+ # RemoteRepositoryFormat we have locally matches the actual format
+ # present on the remote side.
+ if isinstance(format, remote.RemoteRepositoryFormat):
+ repo._ensure_real()
+ real_repo = repo._real_repository
+ self.assertEqual(real_repo._format.network_name(), network_name)
+ else:
+ registry = repository.network_format_registry
+ looked_up_format = registry.get(network_name)
+ self.assertEqual(format.__class__, looked_up_format.__class__)
+
+ def test_create_repository(self):
+ # bzrdir can construct a repository for itself.
+ if not self.bzrdir_format.is_supported():
+ # unsupported formats are not loopback testable
+ # because the default open will not open them and
+ # they may not be initializable.
+ return
+ t = self.get_transport()
+ made_control = self.bzrdir_format.initialize(t.base)
+ made_repo = made_control.create_repository()
+ # Check that we have a repository object.
+ made_repo.has_revision('foo')
+ self.assertEqual(made_control, made_repo.bzrdir)
+
+ def test_create_repository_shared(self):
+ # bzrdir can construct a shared repository.
+ if not self.bzrdir_format.is_supported():
+ # unsupported formats are not loopback testable
+ # because the default open will not open them and
+ # they may not be initializable.
+ return
+ t = self.get_transport()
+ made_control = self.bzrdir_format.initialize(t.base)
+ try:
+ made_repo = made_control.create_repository(shared=True)
+ except errors.IncompatibleFormat:
+ # not all repository formats understand being shared, or
+ # may only be shared in some circumstances.
+ return
+ # Check that we have a repository object.
+ made_repo.has_revision('foo')
+ self.assertEqual(made_control, made_repo.bzrdir)
+ self.assertTrue(made_repo.is_shared())
+
+ def test_revision_tree(self):
+ wt = self.make_branch_and_tree('.')
+ wt.set_root_id('fixed-root')
+ wt.commit('lala!', rev_id='revision-1', allow_pointless=True)
+ tree = wt.branch.repository.revision_tree('revision-1')
+ tree.lock_read()
+ try:
+ self.assertEqual('revision-1',
+ tree.get_file_revision(tree.get_root_id()))
+ expected = inventory.InventoryDirectory('fixed-root', '', None)
+ expected.revision = 'revision-1'
+ self.assertEqual([('', 'V', 'directory', 'fixed-root', expected)],
+ list(tree.list_files(include_root=True)))
+ finally:
+ tree.unlock()
+ tree = self.callDeprecated(['NULL_REVISION should be used for the null'
+ ' revision instead of None, as of bzr 0.91.'],
+ wt.branch.repository.revision_tree, None)
+ tree.lock_read()
+ try:
+ self.assertEqual([], list(tree.list_files(include_root=True)))
+ finally:
+ tree.unlock()
+ tree = wt.branch.repository.revision_tree(_mod_revision.NULL_REVISION)
+ tree.lock_read()
+ try:
+ self.assertEqual([], list(tree.list_files(include_root=True)))
+ finally:
+ tree.unlock()
+
+ def test_get_revision_delta(self):
+ tree_a = self.make_branch_and_tree('a')
+ self.build_tree(['a/foo'])
+ tree_a.add('foo', 'file1')
+ tree_a.commit('rev1', rev_id='rev1')
+ self.build_tree(['a/vla'])
+ tree_a.add('vla', 'file2')
+ tree_a.commit('rev2', rev_id='rev2')
+
+ delta = tree_a.branch.repository.get_revision_delta('rev1')
+ self.assertIsInstance(delta, _mod_delta.TreeDelta)
+ self.assertEqual([('foo', 'file1', 'file')], delta.added)
+ delta = tree_a.branch.repository.get_revision_delta('rev2')
+ self.assertIsInstance(delta, _mod_delta.TreeDelta)
+ self.assertEqual([('vla', 'file2', 'file')], delta.added)
+
+ def test_clone_bzrdir_repository_revision(self):
+ # make a repository with some revisions,
+ # and clone it, this should not have unreferenced revisions.
+ # also: test cloning with a revision id of NULL_REVISION -> empty repo.
+ raise tests.TestSkipped('revision limiting is not implemented yet.')
+
+ def test_clone_repository_basis_revision(self):
+ raise tests.TestSkipped(
+ 'the use of a basis should not add noise data to the result.')
+
+ def test_clone_shared_no_tree(self):
+ # cloning a shared repository keeps it shared
+ # and preserves the make_working_tree setting.
+ made_control = self.make_bzrdir('source')
+ try:
+ made_repo = made_control.create_repository(shared=True)
+ except errors.IncompatibleFormat:
+ # not all repository formats understand being shared, or
+ # may only be shared in some circumstances.
+ return
+ try:
+ made_repo.set_make_working_trees(False)
+ except errors.UnsupportedOperation:
+ # the repository does not support having its tree-making flag
+ # toggled.
+ return
+ result = made_control.clone(self.get_url('target'))
+ # Check that we have a repository object.
+ made_repo.has_revision('foo')
+
+ self.assertEqual(made_control, made_repo.bzrdir)
+ self.assertTrue(result.open_repository().is_shared())
+ self.assertFalse(result.open_repository().make_working_trees())
+
+ def test_upgrade_preserves_signatures(self):
+ if not self.repository_format.supports_revision_signatures:
+ raise tests.TestNotApplicable(
+ "repository does not support signing revisions")
+ wt = self.make_branch_and_tree('source')
+ wt.commit('A', allow_pointless=True, rev_id='A')
+ repo = wt.branch.repository
+ repo.lock_write()
+ repo.start_write_group()
+ try:
+ repo.sign_revision('A', gpg.LoopbackGPGStrategy(None))
+ except errors.UnsupportedOperation:
+ self.assertFalse(repo._format.supports_revision_signatures)
+ raise tests.TestNotApplicable("signatures not supported by repository format")
+ repo.commit_write_group()
+ repo.unlock()
+ old_signature = repo.get_signature_text('A')
+ try:
+ old_format = controldir.ControlDirFormat.get_default_format()
+ # This gives metadir branches something they can convert to.
+ # it would be nice to have a 'latest' vs 'default' concept.
+ format = controldir.format_registry.make_bzrdir(
+ 'development-subtree')
+ upgrade.upgrade(repo.bzrdir.root_transport.base, format=format)
+ except errors.UpToDateFormat:
+ # this is in the most current format already.
+ return
+ except errors.BadConversionTarget, e:
+ raise tests.TestSkipped(str(e))
+ wt = workingtree.WorkingTree.open(wt.basedir)
+ new_signature = wt.branch.repository.get_signature_text('A')
+ self.assertEqual(old_signature, new_signature)
+
+ def test_format_description(self):
+ repo = self.make_repository('.')
+ text = repo._format.get_format_description()
+ self.assertTrue(len(text))
+
+ def test_format_supports_external_lookups(self):
+ repo = self.make_repository('.')
+ self.assertSubset(
+ [repo._format.supports_external_lookups], (True, False))
+
+ def assertMessageRoundtrips(self, message):
+ """Assert that message roundtrips to a repository and back intact."""
+ tree = self.make_branch_and_tree('.')
+ tree.commit(message, rev_id='a', allow_pointless=True)
+ rev = tree.branch.repository.get_revision('a')
+ serializer = getattr(tree.branch.repository, "_serializer", None)
+ if serializer is not None and serializer.squashes_xml_invalid_characters:
+ # we have to manually escape this as we dont try to
+ # roundtrip xml invalid characters in the xml-based serializers.
+ escaped_message, escape_count = re.subn(
+ u'[^\x09\x0A\x0D\u0020-\uD7FF\uE000-\uFFFD]+',
+ lambda match: match.group(0).encode('unicode_escape'),
+ message)
+ self.assertEqual(rev.message, escaped_message)
+ else:
+ self.assertEqual(rev.message, message)
+ # insist the class is unicode no matter what came in for
+ # consistency.
+ self.assertIsInstance(rev.message, unicode)
+
+ def test_commit_unicode_message(self):
+ # a siple unicode message should be preserved
+ self.assertMessageRoundtrips(u'foo bar gamm\xae plop')
+
+ def test_commit_unicode_control_characters(self):
+ # a unicode message with control characters should roundtrip too.
+ unichars = [unichr(x) for x in range(256)]
+ # '\r' is not directly allowed anymore, as it used to be translated
+ # into '\n' anyway
+ unichars[ord('\r')] = u'\n'
+ self.assertMessageRoundtrips(
+ u"All 8-bit chars: " + ''.join(unichars))
+
+ def test_check_repository(self):
+ """Check a fairly simple repository's history"""
+ tree = self.make_branch_and_tree('.')
+ tree.commit('initial empty commit', rev_id='a-rev',
+ allow_pointless=True)
+ result = tree.branch.repository.check()
+ # writes to log; should accept both verbose or non-verbose
+ result.report_results(verbose=True)
+ result.report_results(verbose=False)
+
+ def test_get_revisions(self):
+ tree = self.make_branch_and_tree('.')
+ tree.commit('initial empty commit', rev_id='a-rev',
+ allow_pointless=True)
+ tree.commit('second empty commit', rev_id='b-rev',
+ allow_pointless=True)
+ tree.commit('third empty commit', rev_id='c-rev',
+ allow_pointless=True)
+ repo = tree.branch.repository
+ revision_ids = ['a-rev', 'b-rev', 'c-rev']
+ revisions = repo.get_revisions(revision_ids)
+ self.assertEqual(len(revisions), 3)
+ zipped = zip(revisions, revision_ids)
+ self.assertEqual(len(zipped), 3)
+ for revision, revision_id in zipped:
+ self.assertEqual(revision.revision_id, revision_id)
+ self.assertEqual(revision, repo.get_revision(revision_id))
+
+ def test_root_entry_has_revision(self):
+ tree = self.make_branch_and_tree('.')
+ tree.commit('message', rev_id='rev_id')
+ rev_tree = tree.branch.repository.revision_tree(tree.last_revision())
+ rev_tree.lock_read()
+ self.addCleanup(rev_tree.unlock)
+ root_id = rev_tree.get_root_id()
+ self.assertEqual('rev_id', rev_tree.get_file_revision(root_id))
+
+ def test_pointless_commit(self):
+ tree = self.make_branch_and_tree('.')
+ self.assertRaises(errors.PointlessCommit, tree.commit, 'pointless',
+ allow_pointless=False)
+ tree.commit('pointless', allow_pointless=True)
+
+ def test_format_attributes(self):
+ """All repository formats should have some basic attributes."""
+ # create a repository to get a real format instance, not the
+ # template from the test suite parameterization.
+ repo = self.make_repository('.')
+ repo._format.rich_root_data
+ repo._format.supports_tree_reference
+
+ def test_iter_files_bytes(self):
+ tree = self.make_branch_and_tree('tree')
+ self.build_tree_contents([('tree/file1', 'foo'),
+ ('tree/file2', 'bar')])
+ tree.add(['file1', 'file2'], ['file1-id', 'file2-id'])
+ tree.commit('rev1', rev_id='rev1')
+ self.build_tree_contents([('tree/file1', 'baz')])
+ tree.commit('rev2', rev_id='rev2')
+ repository = tree.branch.repository
+ repository.lock_read()
+ self.addCleanup(repository.unlock)
+ extracted = dict((i, ''.join(b)) for i, b in
+ repository.iter_files_bytes(
+ [('file1-id', 'rev1', 'file1-old'),
+ ('file1-id', 'rev2', 'file1-new'),
+ ('file2-id', 'rev1', 'file2'),
+ ]))
+ self.assertEqual('foo', extracted['file1-old'])
+ self.assertEqual('bar', extracted['file2'])
+ self.assertEqual('baz', extracted['file1-new'])
+ self.assertRaises(errors.RevisionNotPresent, list,
+ repository.iter_files_bytes(
+ [('file1-id', 'rev3', 'file1-notpresent')]))
+ self.assertRaises((errors.RevisionNotPresent, errors.NoSuchId), list,
+ repository.iter_files_bytes(
+ [('file3-id', 'rev3', 'file1-notpresent')]))
+
+ def test_get_graph(self):
+ """Bare-bones smoketest that all repositories implement get_graph."""
+ repo = self.make_repository('repo')
+ repo.lock_read()
+ self.addCleanup(repo.unlock)
+ repo.get_graph()
+
+ def test_graph_ghost_handling(self):
+ tree = self.make_branch_and_tree('here')
+ tree.lock_write()
+ self.addCleanup(tree.unlock)
+ tree.commit('initial commit', rev_id='rev1')
+ tree.add_parent_tree_id('ghost')
+ tree.commit('commit-with-ghost', rev_id='rev2')
+ graph = tree.branch.repository.get_graph()
+ parents = graph.get_parent_map(['ghost', 'rev2'])
+ self.assertTrue('ghost' not in parents)
+ self.assertEqual(parents['rev2'], ('rev1', 'ghost'))
+
+ def test_get_known_graph_ancestry(self):
+ tree = self.make_branch_and_tree('here')
+ tree.lock_write()
+ self.addCleanup(tree.unlock)
+ # A
+ # |\
+ # | B
+ # |/
+ # C
+ tree.commit('initial commit', rev_id='A')
+ tree_other = tree.bzrdir.sprout('there').open_workingtree()
+ tree_other.commit('another', rev_id='B')
+ tree.merge_from_branch(tree_other.branch)
+ tree.commit('another', rev_id='C')
+ kg = tree.branch.repository.get_known_graph_ancestry(
+ ['C'])
+ self.assertEqual(['C'], list(kg.heads(['A', 'B', 'C'])))
+ self.assertEqual(['A', 'B', 'C'], list(kg.topo_sort()))
+
+ def test_parent_map_type(self):
+ tree = self.make_branch_and_tree('here')
+ tree.lock_write()
+ self.addCleanup(tree.unlock)
+ tree.commit('initial commit', rev_id='rev1')
+ tree.commit('next commit', rev_id='rev2')
+ graph = tree.branch.repository.get_graph()
+ parents = graph.get_parent_map(
+ [_mod_revision.NULL_REVISION, 'rev1', 'rev2'])
+ for value in parents.values():
+ self.assertIsInstance(value, tuple)
+
+ def test_implements_revision_graph_can_have_wrong_parents(self):
+ """All repositories should implement
+ revision_graph_can_have_wrong_parents, so that check and reconcile can
+ work correctly.
+ """
+ repo = self.make_repository('.')
+ # This should work, not raise NotImplementedError:
+ if not repo._format.revision_graph_can_have_wrong_parents:
+ return
+ repo.lock_read()
+ self.addCleanup(repo.unlock)
+ # This repo must also implement
+ # _find_inconsistent_revision_parents and
+ # _check_for_inconsistent_revision_parents. So calling these
+ # should not raise NotImplementedError.
+ list(repo._find_inconsistent_revision_parents())
+ repo._check_for_inconsistent_revision_parents()
+
+ def test_add_signature_text(self):
+ builder = self.make_branch_builder('.')
+ builder.start_series()
+ builder.build_snapshot('A', None, [
+ ('add', ('', 'root-id', 'directory', None))])
+ builder.finish_series()
+ b = builder.get_branch()
+ b.lock_write()
+ self.addCleanup(b.unlock)
+ if b.repository._format.supports_revision_signatures:
+ b.repository.start_write_group()
+ b.repository.add_signature_text('A', 'This might be a signature')
+ b.repository.commit_write_group()
+ self.assertEqual('This might be a signature',
+ b.repository.get_signature_text('A'))
+ else:
+ b.repository.start_write_group()
+ self.addCleanup(b.repository.abort_write_group)
+ self.assertRaises(errors.UnsupportedOperation,
+ b.repository.add_signature_text, 'A',
+ 'This might be a signature')
+
+ # XXX: this helper duplicated from tests.test_repository
+ def make_remote_repository(self, path, shared=None):
+ """Make a RemoteRepository object backed by a real repository that will
+ be created at the given path."""
+ repo = self.make_repository(path, shared=shared)
+ smart_server = test_server.SmartTCPServer_for_testing()
+ self.start_server(smart_server, self.get_server())
+ remote_transport = transport.get_transport_from_url(
+ smart_server.get_url()).clone(path)
+ if not repo.bzrdir._format.supports_transport(remote_transport):
+ raise tests.TestNotApplicable(
+ "format does not support transport")
+ remote_bzrdir = controldir.ControlDir.open_from_transport(
+ remote_transport)
+ remote_repo = remote_bzrdir.open_repository()
+ return remote_repo
+
+ def test_sprout_from_hpss_preserves_format(self):
+ """repo.sprout from a smart server preserves the repository format."""
+ remote_repo = self.make_remote_repository('remote')
+ local_bzrdir = self.make_bzrdir('local')
+ try:
+ local_repo = remote_repo.sprout(local_bzrdir)
+ except errors.TransportNotPossible:
+ raise tests.TestNotApplicable(
+ "Cannot lock_read old formats like AllInOne over HPSS.")
+ remote_backing_repo = controldir.ControlDir.open(
+ self.get_vfs_only_url('remote')).open_repository()
+ self.assertEqual(
+ remote_backing_repo._format.network_name(),
+ local_repo._format.network_name())
+
+ def test_sprout_branch_from_hpss_preserves_repo_format(self):
+ """branch.sprout from a smart server preserves the repository format.
+ """
+ if not self.repository_format.supports_leaving_lock:
+ raise tests.TestNotApplicable(
+ "Format can not be used over HPSS")
+ remote_repo = self.make_remote_repository('remote')
+ remote_branch = remote_repo.bzrdir.create_branch()
+ try:
+ local_bzrdir = remote_branch.bzrdir.sprout('local')
+ except errors.TransportNotPossible:
+ raise tests.TestNotApplicable(
+ "Cannot lock_read old formats like AllInOne over HPSS.")
+ local_repo = local_bzrdir.open_repository()
+ remote_backing_repo = controldir.ControlDir.open(
+ self.get_vfs_only_url('remote')).open_repository()
+ self.assertEqual(remote_backing_repo._format, local_repo._format)
+
+ def test_sprout_branch_from_hpss_preserves_shared_repo_format(self):
+ """branch.sprout from a smart server preserves the repository format of
+ a branch from a shared repository.
+ """
+ if not self.repository_format.supports_leaving_lock:
+ raise tests.TestNotApplicable(
+ "Format can not be used over HPSS")
+ # Make a shared repo
+ remote_repo = self.make_remote_repository('remote', shared=True)
+ remote_backing_repo = controldir.ControlDir.open(
+ self.get_vfs_only_url('remote')).open_repository()
+ # Make a branch in that repo in an old format that isn't the default
+ # branch format for the repo.
+ from bzrlib.branchfmt.fullhistory import BzrBranchFormat5
+ format = remote_backing_repo.bzrdir.cloning_metadir()
+ format._branch_format = BzrBranchFormat5()
+ remote_transport = remote_repo.bzrdir.root_transport.clone('branch')
+ controldir.ControlDir.create_branch_convenience(
+ remote_transport.base, force_new_repo=False, format=format)
+ remote_branch = controldir.ControlDir.open_from_transport(
+ remote_transport).open_branch()
+ try:
+ local_bzrdir = remote_branch.bzrdir.sprout('local')
+ except errors.TransportNotPossible:
+ raise tests.TestNotApplicable(
+ "Cannot lock_read old formats like AllInOne over HPSS.")
+ local_repo = local_bzrdir.open_repository()
+ self.assertEqual(remote_backing_repo._format, local_repo._format)
+
+ def test_clone_to_hpss(self):
+ if not self.repository_format.supports_leaving_lock:
+ raise tests.TestNotApplicable(
+ "Cannot lock pre_metadir_formats remotely.")
+ remote_transport = self.make_smart_server('remote')
+ local_branch = self.make_branch('local')
+ remote_branch = local_branch.create_clone_on_transport(remote_transport)
+ self.assertEqual(
+ local_branch.repository._format.supports_external_lookups,
+ remote_branch.repository._format.supports_external_lookups)
+
+ def test_clone_stacking_policy_upgrades(self):
+ """Cloning an unstackable branch format to somewhere with a default
+ stack-on branch upgrades branch and repo to match the target and honour
+ the policy.
+ """
+ try:
+ repo = self.make_repository('repo', shared=True)
+ except errors.IncompatibleFormat:
+ raise tests.TestNotApplicable('Cannot make a shared repository')
+ if repo.bzrdir._format.fixed_components:
+ self.knownFailure(
+ "pre metadir branches do not upgrade on push "
+ "with stacking policy")
+ if isinstance(repo._format,
+ knitpack_repo.RepositoryFormatKnitPack5RichRootBroken):
+ raise tests.TestNotApplicable("unsupported format")
+ # Make a source branch in 'repo' in an unstackable branch format
+ bzrdir_format = self.repository_format._matchingbzrdir
+ transport = self.get_transport('repo/branch')
+ transport.mkdir('.')
+ target_bzrdir = bzrdir_format.initialize_on_transport(transport)
+ branch = _mod_branch.BzrBranchFormat6().initialize(target_bzrdir)
+ # Ensure that stack_on will be stackable and match the serializer of
+ # repo.
+ if isinstance(repo, remote.RemoteRepository):
+ repo._ensure_real()
+ info_repo = repo._real_repository
+ else:
+ info_repo = repo
+ format_description = info.describe_format(info_repo.bzrdir,
+ info_repo, None, None)
+ formats = format_description.split(' or ')
+ stack_on_format = formats[0]
+ if stack_on_format in ["pack-0.92", "dirstate", "metaweave"]:
+ stack_on_format = "1.9"
+ elif stack_on_format in ["dirstate-with-subtree", "rich-root",
+ "rich-root-pack", "pack-0.92-subtree"]:
+ stack_on_format = "1.9-rich-root"
+ # formats not tested for above are already stackable, so we can use the
+ # format as-is.
+ stack_on = self.make_branch('stack-on-me', format=stack_on_format)
+ self.make_bzrdir('.').get_config().set_default_stack_on('stack-on-me')
+ target = branch.bzrdir.clone(self.get_url('target'))
+ # The target branch supports stacking.
+ self.assertTrue(target.open_branch()._format.supports_stacking())
+ if isinstance(repo, remote.RemoteRepository):
+ repo._ensure_real()
+ repo = repo._real_repository
+ target_repo = target.open_repository()
+ if isinstance(target_repo, remote.RemoteRepository):
+ target_repo._ensure_real()
+ target_repo = target_repo._real_repository
+ # The repository format is unchanged if it could already stack, or the
+ # same as the stack on.
+ if repo._format.supports_external_lookups:
+ self.assertEqual(repo._format, target_repo._format)
+ else:
+ self.assertEqual(stack_on.repository._format, target_repo._format)
+
+ def test__make_parents_provider(self):
+ """Repositories must have a _make_parents_provider method that returns
+ an object with a get_parent_map method.
+ """
+ repo = self.make_repository('repo')
+ repo._make_parents_provider().get_parent_map
+
+ def make_repository_and_foo_bar(self, shared=None):
+ made_control = self.make_bzrdir('repository')
+ repo = made_control.create_repository(shared=shared)
+ if not repo._format.supports_nesting_repositories:
+ raise tests.TestNotApplicable("repository does not support "
+ "nesting repositories")
+ controldir.ControlDir.create_branch_convenience(
+ self.get_url('repository/foo'), force_new_repo=False)
+ controldir.ControlDir.create_branch_convenience(
+ self.get_url('repository/bar'), force_new_repo=True)
+ baz = self.make_bzrdir('repository/baz')
+ qux = self.make_branch('repository/baz/qux')
+ quxx = self.make_branch('repository/baz/qux/quxx')
+ return repo
+
+ def test_find_branches(self):
+ repo = self.make_repository_and_foo_bar()
+ branches = repo.find_branches()
+ self.assertContainsRe(branches[-1].base, 'repository/foo/$')
+ self.assertContainsRe(branches[-3].base, 'repository/baz/qux/$')
+ self.assertContainsRe(branches[-2].base, 'repository/baz/qux/quxx/$')
+ # in some formats, creating a repo creates a branch
+ if len(branches) == 6:
+ self.assertContainsRe(branches[-4].base, 'repository/baz/$')
+ self.assertContainsRe(branches[-5].base, 'repository/bar/$')
+ self.assertContainsRe(branches[-6].base, 'repository/$')
+ else:
+ self.assertEqual(4, len(branches))
+ self.assertContainsRe(branches[-4].base, 'repository/bar/$')
+
+ def test_find_branches_using(self):
+ try:
+ repo = self.make_repository_and_foo_bar(shared=True)
+ except errors.IncompatibleFormat:
+ raise tests.TestNotApplicable
+ branches = repo.find_branches(using=True)
+ self.assertContainsRe(branches[-1].base, 'repository/foo/$')
+ # in some formats, creating a repo creates a branch
+ if len(branches) == 2:
+ self.assertContainsRe(branches[-2].base, 'repository/$')
+ else:
+ self.assertEqual(1, len(branches))
+
+ def test_find_branches_using_standalone(self):
+ branch = self.make_branch('branch')
+ if not branch.repository._format.supports_nesting_repositories:
+ raise tests.TestNotApplicable("format does not support nesting "
+ "repositories")
+ contained = self.make_branch('branch/contained')
+ branches = branch.repository.find_branches(using=True)
+ self.assertEqual([branch.base], [b.base for b in branches])
+ branches = branch.repository.find_branches(using=False)
+ self.assertEqual([branch.base, contained.base],
+ [b.base for b in branches])
+
+ def test_find_branches_using_empty_standalone_repo(self):
+ try:
+ repo = self.make_repository('repo', shared=False)
+ except errors.IncompatibleFormat:
+ raise tests.TestNotApplicable("format does not support standalone "
+ "repositories")
+ try:
+ repo.bzrdir.open_branch()
+ except errors.NotBranchError:
+ self.assertEqual([], repo.find_branches(using=True))
+ else:
+ self.assertEqual([repo.bzrdir.root_transport.base],
+ [b.base for b in repo.find_branches(using=True)])
+
+ def test_set_get_make_working_trees_true(self):
+ repo = self.make_repository('repo')
+ try:
+ repo.set_make_working_trees(True)
+ except (errors.RepositoryUpgradeRequired, errors.UnsupportedOperation), e:
+ raise tests.TestNotApplicable('Format does not support this flag.')
+ self.assertTrue(repo.make_working_trees())
+
+ def test_set_get_make_working_trees_false(self):
+ repo = self.make_repository('repo')
+ try:
+ repo.set_make_working_trees(False)
+ except (errors.RepositoryUpgradeRequired, errors.UnsupportedOperation), e:
+ raise tests.TestNotApplicable('Format does not support this flag.')
+ self.assertFalse(repo.make_working_trees())
+
+
+class TestRepositoryLocking(per_repository.TestCaseWithRepository):
+
+ def test_leave_lock_in_place(self):
+ repo = self.make_repository('r')
+ # Lock the repository, then use leave_lock_in_place so that when we
+ # unlock the repository the lock is still held on disk.
+ token = repo.lock_write().repository_token
+ try:
+ if token is None:
+ # This test does not apply, because this repository refuses lock
+ # tokens.
+ self.assertRaises(NotImplementedError, repo.leave_lock_in_place)
+ return
+ repo.leave_lock_in_place()
+ finally:
+ repo.unlock()
+ # We should be unable to relock the repo.
+ self.assertRaises(errors.LockContention, repo.lock_write)
+ # Cleanup
+ repo.lock_write(token)
+ repo.dont_leave_lock_in_place()
+ repo.unlock()
+
+ def test_dont_leave_lock_in_place(self):
+ repo = self.make_repository('r')
+ # Create a lock on disk.
+ token = repo.lock_write().repository_token
+ try:
+ if token is None:
+ # This test does not apply, because this repository refuses lock
+ # tokens.
+ self.assertRaises(NotImplementedError,
+ repo.dont_leave_lock_in_place)
+ return
+ try:
+ repo.leave_lock_in_place()
+ except NotImplementedError:
+ # This repository doesn't support this API.
+ return
+ finally:
+ repo.unlock()
+ # Reacquire the lock (with a different repository object) by using the
+ # token.
+ new_repo = repo.bzrdir.open_repository()
+ new_repo.lock_write(token=token)
+ # Call dont_leave_lock_in_place, so that the lock will be released by
+ # this instance, even though the lock wasn't originally acquired by it.
+ new_repo.dont_leave_lock_in_place()
+ new_repo.unlock()
+ # Now the repository is unlocked. Test this by locking it (without a
+ # token).
+ repo.lock_write()
+ repo.unlock()
+
+ def test_lock_read_then_unlock(self):
+ # Calling lock_read then unlocking should work without errors.
+ repo = self.make_repository('r')
+ repo.lock_read()
+ repo.unlock()
+
+ def test_lock_read_returns_unlockable(self):
+ repo = self.make_repository('r')
+ self.assertThat(repo.lock_read, ReturnsUnlockable(repo))
+
+ def test_lock_write_returns_unlockable(self):
+ repo = self.make_repository('r')
+ self.assertThat(repo.lock_write, ReturnsUnlockable(repo))
+
+
+# FIXME: document why this is a TestCaseWithTransport rather than a
+# TestCaseWithRepository
+class TestEscaping(tests.TestCaseWithTransport):
+ """Test that repositories can be stored correctly on VFAT transports.
+
+ Makes sure we have proper escaping of invalid characters, etc.
+
+ It'd be better to test all operations on the FakeVFATTransportDecorator,
+ but working trees go straight to the os not through the Transport layer.
+ Therefore we build some history first in the regular way and then
+ check it's safe to access for vfat.
+ """
+
+ def test_on_vfat(self):
+ # dont bother with remote repository testing, because this test is
+ # about local disk layout/support.
+ if isinstance(self.repository_format, remote.RemoteRepositoryFormat):
+ return
+ self.transport_server = test_server.FakeVFATServer
+ FOO_ID = 'foo<:>ID'
+ REV_ID = 'revid-1'
+ # this makes a default format repository always, which is wrong:
+ # it should be a TestCaseWithRepository in order to get the
+ # default format.
+ wt = self.make_branch_and_tree('repo')
+ self.build_tree(["repo/foo"], line_endings='binary')
+ # add file with id containing wierd characters
+ wt.add(['foo'], [FOO_ID])
+ wt.commit('this is my new commit', rev_id=REV_ID)
+ # now access over vfat; should be safe
+ branch = controldir.ControlDir.open(self.get_url('repo')).open_branch()
+ revtree = branch.repository.revision_tree(REV_ID)
+ revtree.lock_read()
+ self.addCleanup(revtree.unlock)
+ contents = revtree.get_file_text(FOO_ID)
+ self.assertEqual(contents, 'contents of repo/foo\n')
+
+ def test_create_bundle(self):
+ wt = self.make_branch_and_tree('repo')
+ self.build_tree(['repo/file1'])
+ wt.add('file1')
+ wt.commit('file1', rev_id='rev1')
+ fileobj = StringIO()
+ wt.branch.repository.create_bundle(
+ 'rev1', _mod_revision.NULL_REVISION, fileobj)
+
+
+class TestRepositoryControlComponent(per_repository.TestCaseWithRepository):
+ """Repository implementations adequately implement ControlComponent."""
+
+ def test_urls(self):
+ repo = self.make_repository('repo')
+ self.assertIsInstance(repo.user_url, str)
+ self.assertEqual(repo.user_url, repo.user_transport.base)
+ # for all current bzrdir implementations the user dir must be
+ # above the control dir but we might need to relax that?
+ self.assertEqual(repo.control_url.find(repo.user_url), 0)
+ self.assertEqual(repo.control_url, repo.control_transport.base)
+
+
+class TestDeltaRevisionFiltered(per_repository.TestCaseWithRepository):
+
+ def setUp(self):
+ super(TestDeltaRevisionFiltered, self).setUp()
+ tree_a = self.make_branch_and_tree('a')
+ self.build_tree(['a/foo', 'a/bar/', 'a/bar/b1', 'a/bar/b2', 'a/baz'])
+ tree_a.add(['foo', 'bar', 'bar/b1', 'bar/b2', 'baz'],
+ ['foo-id', 'bar-id', 'b1-id', 'b2-id', 'baz-id'])
+ tree_a.commit('rev1', rev_id='rev1')
+ self.build_tree(['a/bar/b3'])
+ tree_a.add('bar/b3', 'b3-id')
+ tree_a.commit('rev2', rev_id='rev2')
+ self.repository = tree_a.branch.repository
+
+ def test_multiple_files(self):
+ # Test multiple files
+ delta = self.repository.get_revision_delta('rev1',
+ specific_fileids=['foo-id', 'baz-id'])
+ self.assertIsInstance(delta, _mod_delta.TreeDelta)
+ self.assertEqual([
+ ('baz', 'baz-id', 'file'),
+ ('foo', 'foo-id', 'file'),
+ ], delta.added)
+
+ def test_directory(self):
+ # Test a directory
+ delta = self.repository.get_revision_delta('rev1',
+ specific_fileids=['bar-id'])
+ self.assertIsInstance(delta, _mod_delta.TreeDelta)
+ self.assertEqual([
+ ('bar', 'bar-id', 'directory'),
+ ('bar/b1', 'b1-id', 'file'),
+ ('bar/b2', 'b2-id', 'file'),
+ ], delta.added)
+
+ def test_unrelated(self):
+ # Try another revision
+ delta = self.repository.get_revision_delta('rev2',
+ specific_fileids=['foo-id'])
+ self.assertIsInstance(delta, _mod_delta.TreeDelta)
+ self.assertEqual([], delta.added)
+
+ def test_file_in_directory(self):
+ # Test a file in a directory, both of which were added
+ delta = self.repository.get_revision_delta('rev1',
+ specific_fileids=['b2-id'])
+ self.assertIsInstance(delta, _mod_delta.TreeDelta)
+ self.assertEqual([
+ ('bar', 'bar-id', 'directory'),
+ ('bar/b2', 'b2-id', 'file'),
+ ], delta.added)
+
+ def test_file_in_unchanged_directory(self):
+ delta = self.repository.get_revision_delta('rev2',
+ specific_fileids=['b3-id'])
+ self.assertIsInstance(delta, _mod_delta.TreeDelta)
+ if delta.added == [
+ ('bar', 'bar-id', 'directory'),
+ ('bar/b3', 'b3-id', 'file')]:
+ self.knownFailure("bzr incorrectly reports 'bar' as added - "
+ "bug 878217")
+ self.assertEqual([
+ ('bar/b3', 'b3-id', 'file'),
+ ], delta.added)
diff --git a/bzrlib/tests/per_repository/test_revision.py b/bzrlib/tests/per_repository/test_revision.py
new file mode 100644
index 0000000..d00e4e0
--- /dev/null
+++ b/bzrlib/tests/per_repository/test_revision.py
@@ -0,0 +1,109 @@
+# Copyright (C) 2006 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""Tests for revision properties."""
+
+from bzrlib.tests.per_repository import (
+ TestCaseWithRepository,
+ )
+
+class TestRevProps(TestCaseWithRepository):
+
+ def test_simple_revprops(self):
+ """Simple revision properties"""
+ wt = self.make_branch_and_tree('.')
+ b = wt.branch
+ b.nick = 'Nicholas'
+ props = dict(flavor='choc-mint',
+ condiment='orange\n mint\n\tcandy',
+ empty='',
+ non_ascii=u'\xb5')
+ wt.commit(message='initial null commit',
+ revprops=props,
+ allow_pointless=True,
+ rev_id='test@user-1')
+ rev = b.repository.get_revision('test@user-1')
+ self.assertTrue('flavor' in rev.properties)
+ self.assertEquals(rev.properties['flavor'], 'choc-mint')
+ self.assertEquals([('branch-nick', 'Nicholas'),
+ ('condiment', 'orange\n mint\n\tcandy'),
+ ('empty', ''),
+ ('flavor', 'choc-mint'),
+ ('non_ascii', u'\xb5'),
+ ], sorted(rev.properties.items()))
+
+ def test_invalid_revprops(self):
+ """Invalid revision properties"""
+ wt = self.make_branch_and_tree('.')
+ b = wt.branch
+ self.assertRaises(ValueError,
+ wt.commit,
+ message='invalid',
+ revprops={'what a silly property': 'fine'})
+ self.assertRaises(ValueError,
+ wt.commit,
+ message='invalid',
+ revprops=dict(number=13))
+
+
+class TestRevisionAttributes(TestCaseWithRepository):
+ """Test that revision attributes are correct."""
+
+ def test_revision_accessors(self):
+ """Make sure the values that come out of a revision are the
+ same as the ones that go in.
+ """
+ tree1 = self.make_branch_and_tree("br1")
+
+ # create a revision
+ rev1 = tree1.commit(message="quux", allow_pointless=True,
+ committer="jaq",
+ revprops={'empty':'',
+ 'value':'one',
+ 'unicode':u'\xb5',
+ 'multiline':'foo\nbar\n\n'
+ })
+ self.assertEquals(tree1.branch.last_revision(), rev1)
+ rev_a = tree1.branch.repository.get_revision(
+ tree1.branch.last_revision())
+
+ tree2 = self.make_branch_and_tree("br2")
+ tree2.commit(message=rev_a.message,
+ timestamp=rev_a.timestamp,
+ timezone=rev_a.timezone,
+ committer=rev_a.committer,
+ rev_id=rev_a.revision_id,
+ revprops=rev_a.properties,
+ allow_pointless=True, # there's nothing in this commit
+ strict=True,
+ verbose=True)
+ rev_b = tree2.branch.repository.get_revision(
+ tree2.branch.last_revision())
+
+ self.assertEqual(rev_a.message, rev_b.message)
+ self.assertEqual(rev_a.timestamp, rev_b.timestamp)
+ self.assertEqual(rev_a.timezone, rev_b.timezone)
+ self.assertEqual(rev_a.committer, rev_b.committer)
+ self.assertEqual(rev_a.revision_id, rev_b.revision_id)
+ self.assertEqual(rev_a.properties, rev_b.properties)
+
+ def test_zero_timezone(self):
+ tree1 = self.make_branch_and_tree("br1")
+
+ # create a revision
+ tree1.commit(message="quux", timezone=0, rev_id='r1')
+ rev_a = tree1.branch.repository.get_revision('r1')
+ self.assertEqual(0, rev_a.timezone)
diff --git a/bzrlib/tests/per_repository/test_signatures.py b/bzrlib/tests/per_repository/test_signatures.py
new file mode 100644
index 0000000..d6737a4
--- /dev/null
+++ b/bzrlib/tests/per_repository/test_signatures.py
@@ -0,0 +1,163 @@
+# Copyright (C) 2005-2011 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""Tests for repository revision signatures."""
+
+from bzrlib import (
+ errors,
+ gpg,
+ tests,
+ urlutils,
+ )
+
+from bzrlib.testament import Testament
+from bzrlib.tests import per_repository
+
+class TestSignatures(per_repository.TestCaseWithRepository):
+
+ def setUp(self):
+ super(TestSignatures, self).setUp()
+ if not self.repository_format.supports_revision_signatures:
+ raise tests.TestNotApplicable(
+ "repository does not support signing revisions")
+
+# TODO 20051003 RBC:
+# compare the gpg-to-sign info for a commit with a ghost and
+# an identical tree without a ghost
+# fetch missing should rewrite the TOC of weaves to list newly available parents.
+
+ def test_sign_existing_revision(self):
+ wt = self.make_branch_and_tree('.')
+ wt.commit("base", allow_pointless=True, rev_id='A')
+ strategy = gpg.LoopbackGPGStrategy(None)
+ repo = wt.branch.repository
+ self.addCleanup(repo.lock_write().unlock)
+ repo.start_write_group()
+ repo.sign_revision('A', strategy)
+ repo.commit_write_group()
+ self.assertEqual('-----BEGIN PSEUDO-SIGNED CONTENT-----\n' +
+ Testament.from_revision(repo,
+ 'A').as_short_text() +
+ '-----END PSEUDO-SIGNED CONTENT-----\n',
+ repo.get_signature_text('A'))
+
+ def test_store_signature(self):
+ wt = self.make_branch_and_tree('.')
+ branch = wt.branch
+ branch.lock_write()
+ try:
+ branch.repository.start_write_group()
+ try:
+ branch.repository.store_revision_signature(
+ gpg.LoopbackGPGStrategy(None), 'FOO', 'A')
+ except errors.NoSuchRevision:
+ branch.repository.abort_write_group()
+ raise tests.TestNotApplicable(
+ "repository does not support signing non-present"
+ "revisions")
+ except:
+ branch.repository.abort_write_group()
+ raise
+ else:
+ branch.repository.commit_write_group()
+ finally:
+ branch.unlock()
+ # A signature without a revision should not be accessible.
+ self.assertRaises(errors.NoSuchRevision,
+ branch.repository.has_signature_for_revision_id,
+ 'A')
+ wt.commit("base", allow_pointless=True, rev_id='A')
+ self.assertEqual('-----BEGIN PSEUDO-SIGNED CONTENT-----\n'
+ 'FOO-----END PSEUDO-SIGNED CONTENT-----\n',
+ branch.repository.get_signature_text('A'))
+
+ def test_clone_preserves_signatures(self):
+ wt = self.make_branch_and_tree('source')
+ wt.commit('A', allow_pointless=True, rev_id='A')
+ repo = wt.branch.repository
+ repo.lock_write()
+ repo.start_write_group()
+ repo.sign_revision('A', gpg.LoopbackGPGStrategy(None))
+ repo.commit_write_group()
+ repo.unlock()
+ #FIXME: clone should work to urls,
+ # wt.clone should work to disks.
+ self.build_tree(['target/'])
+ d2 = repo.bzrdir.clone(urlutils.local_path_to_url('target'))
+ self.assertEqual(repo.get_signature_text('A'),
+ d2.open_repository().get_signature_text('A'))
+
+ def test_verify_revision_signature_not_signed(self):
+ wt = self.make_branch_and_tree('.')
+ wt.commit("base", allow_pointless=True, rev_id='A')
+ strategy = gpg.LoopbackGPGStrategy(None)
+ self.assertEquals(
+ (gpg.SIGNATURE_NOT_SIGNED, None),
+ wt.branch.repository.verify_revision_signature('A', strategy))
+
+ def test_verify_revision_signature(self):
+ wt = self.make_branch_and_tree('.')
+ wt.commit("base", allow_pointless=True, rev_id='A')
+ strategy = gpg.LoopbackGPGStrategy(None)
+ repo = wt.branch.repository
+ self.addCleanup(repo.lock_write().unlock)
+ repo.start_write_group()
+ repo.sign_revision('A', strategy)
+ repo.commit_write_group()
+ self.assertEqual('-----BEGIN PSEUDO-SIGNED CONTENT-----\n' +
+ Testament.from_revision(repo,
+ 'A').as_short_text() +
+ '-----END PSEUDO-SIGNED CONTENT-----\n',
+ repo.get_signature_text('A'))
+ self.assertEquals(
+ (gpg.SIGNATURE_VALID, None, ),
+ repo.verify_revision_signature('A', strategy))
+
+ def test_verify_revision_signatures(self):
+ wt = self.make_branch_and_tree('.')
+ wt.commit("base", allow_pointless=True, rev_id='A')
+ wt.commit("second", allow_pointless=True, rev_id='B')
+ strategy = gpg.LoopbackGPGStrategy(None)
+ repo = wt.branch.repository
+ self.addCleanup(repo.lock_write().unlock)
+ repo.start_write_group()
+ repo.sign_revision('A', strategy)
+ repo.commit_write_group()
+ self.assertEqual('-----BEGIN PSEUDO-SIGNED CONTENT-----\n' +
+ Testament.from_revision(repo,
+ 'A').as_short_text() +
+ '-----END PSEUDO-SIGNED CONTENT-----\n',
+ repo.get_signature_text('A'))
+ self.assertEquals(
+ [('A', gpg.SIGNATURE_VALID, None),
+ ('B', gpg.SIGNATURE_NOT_SIGNED, None)],
+ list(repo.verify_revision_signatures(['A', 'B'], strategy)))
+
+
+class TestUnsupportedSignatures(per_repository.TestCaseWithRepository):
+
+ def test_sign_revision(self):
+ if self.repository_format.supports_revision_signatures:
+ raise tests.TestNotApplicable(
+ "repository supports signing revisions")
+ wt = self.make_branch_and_tree('source')
+ wt.commit('A', allow_pointless=True, rev_id='A')
+ repo = wt.branch.repository
+ repo.lock_write()
+ repo.start_write_group()
+ self.assertRaises(errors.UnsupportedOperation,
+ repo.sign_revision, 'A', gpg.LoopbackGPGStrategy(None))
+ repo.commit_write_group()
diff --git a/bzrlib/tests/per_repository/test_statistics.py b/bzrlib/tests/per_repository/test_statistics.py
new file mode 100644
index 0000000..cbbe0bf
--- /dev/null
+++ b/bzrlib/tests/per_repository/test_statistics.py
@@ -0,0 +1,61 @@
+# Copyright (C) 2007 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""Tests for repository statistic-gathering apis."""
+
+from bzrlib.tests.per_repository import TestCaseWithRepository
+
+
+class TestGatherStats(TestCaseWithRepository):
+
+ def test_gather_stats(self):
+ """First smoke test covering the refactoring into the Repository api."""
+ tree = self.make_branch_and_memory_tree('.')
+ tree.lock_write()
+ tree.add('')
+ # three commits: one to be included by reference, one to be
+ # requested, and one to be in the repository but [mostly] ignored.
+ rev1 = tree.commit('first post', committer='person 1',
+ timestamp=1170491381, timezone=0)
+ rev2 = tree.commit('second post', committer='person 2',
+ timestamp=1171491381, timezone=0)
+ rev3 = tree.commit('third post', committer='person 3',
+ timestamp=1172491381, timezone=0)
+ tree.unlock()
+ # now, in the same repository, asking for stats with/without the
+ # committers flag generates the same date information.
+ stats = tree.branch.repository.gather_stats(rev2, committers=False)
+ # this test explicitly only checks for certain keys
+ # in the dictionary, as implementations are allowed to
+ # provide arbitrary data in other keys.
+ self.assertEqual(stats['firstrev'], (1170491381.0, 0))
+ self.assertEqual(stats['latestrev'], (1171491381.0, 0))
+ self.assertEqual(stats['revisions'], 3)
+ stats = tree.branch.repository.gather_stats(rev2, committers=True)
+ self.assertEquals(2, stats["committers"])
+ self.assertEquals((1170491381.0, 0), stats["firstrev"])
+ self.assertEquals((1171491381.0, 0), stats["latestrev"])
+ self.assertEquals(3, stats["revisions"])
+
+ def test_gather_stats_empty_repo(self):
+ """An empty repository still has revisions."""
+ tree = self.make_branch_and_memory_tree('.')
+ # now ask for global repository stats.
+ stats = tree.branch.repository.gather_stats()
+ self.assertEquals(0, stats['revisions'])
+ self.assertFalse("committers" in stats)
+ self.assertFalse("firstrev" in stats)
+ self.assertFalse("latestrev" in stats)
diff --git a/bzrlib/tests/per_repository/test_write_group.py b/bzrlib/tests/per_repository/test_write_group.py
new file mode 100644
index 0000000..dab677b
--- /dev/null
+++ b/bzrlib/tests/per_repository/test_write_group.py
@@ -0,0 +1,124 @@
+# Copyright (C) 2007-2011 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""Tests for repository write groups."""
+
+from bzrlib import (
+ errors,
+ )
+from bzrlib.tests import (
+ per_repository,
+ test_server,
+ )
+from bzrlib.transport import memory
+
+
+class TestWriteGroup(per_repository.TestCaseWithRepository):
+
+ def test_start_write_group_unlocked_needs_write_lock(self):
+ repo = self.make_repository('.')
+ self.assertRaises(errors.NotWriteLocked, repo.start_write_group)
+
+ def test_start_write_group_read_locked_needs_write_lock(self):
+ repo = self.make_repository('.')
+ repo.lock_read()
+ try:
+ self.assertRaises(errors.NotWriteLocked, repo.start_write_group)
+ finally:
+ repo.unlock()
+
+ def test_start_write_group_write_locked_gets_None(self):
+ repo = self.make_repository('.')
+ repo.lock_write()
+ self.assertEqual(None, repo.start_write_group())
+ repo.commit_write_group()
+ repo.unlock()
+
+ def test_start_write_group_twice_errors(self):
+ repo = self.make_repository('.')
+ repo.lock_write()
+ repo.start_write_group()
+ try:
+ # don't need a specific exception for now - this is
+ # really to be sure it's used right, not for signalling
+ # semantic information.
+ self.assertRaises(errors.BzrError, repo.start_write_group)
+ finally:
+ repo.commit_write_group()
+ repo.unlock()
+
+ def test_commit_write_group_does_not_error(self):
+ repo = self.make_repository('.')
+ repo.lock_write()
+ repo.start_write_group()
+ # commit_write_group can either return None (for repositories without
+ # isolated transactions) or a hint for pack(). So we only check it
+ # works in this interface test, because all repositories are exercised.
+ repo.commit_write_group()
+ repo.unlock()
+
+ def test_unlock_in_write_group(self):
+ repo = self.make_repository('.')
+ repo.lock_write()
+ repo.start_write_group()
+ # don't need a specific exception for now - this is
+ # really to be sure it's used right, not for signalling
+ # semantic information.
+ self.assertLogsError(errors.BzrError, repo.unlock)
+ # after this error occurs, the repository is unlocked, and the write
+ # group is gone. you've had your chance, and you blew it. ;-)
+ self.assertFalse(repo.is_locked())
+ self.assertRaises(errors.BzrError, repo.commit_write_group)
+ self.assertRaises(errors.BzrError, repo.unlock)
+
+ def test_is_in_write_group(self):
+ repo = self.make_repository('.')
+ self.assertFalse(repo.is_in_write_group())
+ repo.lock_write()
+ repo.start_write_group()
+ self.assertTrue(repo.is_in_write_group())
+ repo.commit_write_group()
+ self.assertFalse(repo.is_in_write_group())
+ # abort also removes the in_write_group status.
+ repo.start_write_group()
+ self.assertTrue(repo.is_in_write_group())
+ repo.abort_write_group()
+ self.assertFalse(repo.is_in_write_group())
+ repo.unlock()
+
+ def test_abort_write_group_gets_None(self):
+ repo = self.make_repository('.')
+ repo.lock_write()
+ repo.start_write_group()
+ self.assertEqual(None, repo.abort_write_group())
+ repo.unlock()
+
+ def test_abort_write_group_does_not_raise_when_suppressed(self):
+ if self.transport_server is test_server.LocalURLServer:
+ self.transport_server = None
+ self.vfs_transport_factory = memory.MemoryServer
+ repo = self.make_repository('repo')
+ token = repo.lock_write()
+ self.addCleanup(repo.unlock)
+ repo.start_write_group()
+ # Damage the repository on the filesystem
+ t = self.get_transport('')
+ t.rename('repo', 'foo')
+ self.addCleanup(t.rename, 'foo', 'repo')
+ # abort_write_group will not raise an error, because either an
+ # exception was not generated, or the exception was caught and
+ # suppressed. See also test_pack_repository's test of the same name.
+ self.assertEqual(None, repo.abort_write_group(suppress_errors=True))
diff --git a/bzrlib/tests/per_repository_chk/__init__.py b/bzrlib/tests/per_repository_chk/__init__.py
new file mode 100644
index 0000000..e6fc426
--- /dev/null
+++ b/bzrlib/tests/per_repository_chk/__init__.py
@@ -0,0 +1,80 @@
+# Copyright (C) 2008, 2009 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+
+"""Repository implementation tests for CHK support.
+
+These tests check the conformance of the chk index some repositories support.
+All repository formats are tested - those that do not suppport chk indices
+have the test_unsupported tests run; the others have the test_supported tests
+run.
+"""
+
+from bzrlib import (
+ repository,
+ remote,
+ )
+from bzrlib.repofmt.knitpack_repo import (
+ RepositoryFormatKnitPack5,
+ )
+from bzrlib.repofmt.groupcompress_repo import (
+ RepositoryFormat2a,
+ )
+from bzrlib.tests import (
+ multiply_tests,
+ )
+from bzrlib.tests.per_repository import (
+ all_repository_format_scenarios,
+ TestCaseWithRepository,
+ )
+
+
+class TestCaseWithRepositoryCHK(TestCaseWithRepository):
+
+ def make_repository(self, path, format=None):
+ TestCaseWithRepository.make_repository(self, path, format=format)
+ return repository.Repository.open(self.get_transport(path).base)
+
+
+def load_tests(standard_tests, module, loader):
+ supported_scenarios = []
+ unsupported_scenarios = []
+ for test_name, scenario_info in all_repository_format_scenarios():
+ format = scenario_info['repository_format']
+ # For remote repositories, we test both with, and without a backing chk
+ # capable format: change the format we use to create the repo to direct
+ # formats, and then the overridden make_repository in
+ # TestCaseWithRepositoryCHK will give a re-opened RemoteRepository
+ # with the chosen backing format.
+ if isinstance(format, remote.RemoteRepositoryFormat):
+ with_support = dict(scenario_info)
+ with_support['repository_format'] = RepositoryFormat2a()
+ supported_scenarios.append((test_name + "(Supported)", with_support))
+ no_support = dict(scenario_info)
+ no_support['repository_format'] = RepositoryFormatKnitPack5()
+ unsupported_scenarios.append((test_name + "(Not Supported)", no_support))
+ elif format.supports_chks:
+ supported_scenarios.append((test_name, scenario_info))
+ else:
+ unsupported_scenarios.append((test_name, scenario_info))
+ result = loader.suiteClass()
+ supported_tests = loader.loadTestsFromModuleNames([
+ 'bzrlib.tests.per_repository_chk.test_supported'])
+ unsupported_tests = loader.loadTestsFromModuleNames([
+ 'bzrlib.tests.per_repository_chk.test_unsupported'])
+ multiply_tests(supported_tests, supported_scenarios, result)
+ multiply_tests(unsupported_tests, unsupported_scenarios, result)
+ return result
diff --git a/bzrlib/tests/per_repository_chk/test_supported.py b/bzrlib/tests/per_repository_chk/test_supported.py
new file mode 100644
index 0000000..a6a4e61
--- /dev/null
+++ b/bzrlib/tests/per_repository_chk/test_supported.py
@@ -0,0 +1,395 @@
+# Copyright (C) 2008 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""Tests for repositories that support CHK indices."""
+
+from bzrlib import (
+ btree_index,
+ errors,
+ osutils,
+ repository,
+ )
+from bzrlib.remote import RemoteRepository
+from bzrlib.versionedfile import VersionedFiles
+from bzrlib.tests import TestNotApplicable
+from bzrlib.tests.per_repository_chk import TestCaseWithRepositoryCHK
+
+
+class TestCHKSupport(TestCaseWithRepositoryCHK):
+
+ def test_chk_bytes_attribute_is_VersionedFiles(self):
+ repo = self.make_repository('.')
+ self.assertIsInstance(repo.chk_bytes, VersionedFiles)
+
+ def test_add_bytes_to_chk_bytes_store(self):
+ repo = self.make_repository('.')
+ repo.lock_write()
+ try:
+ repo.start_write_group()
+ try:
+ sha1, len, _ = repo.chk_bytes.add_lines((None,),
+ None, ["foo\n", "bar\n"], random_id=True)
+ self.assertEqual('4e48e2c9a3d2ca8a708cb0cc545700544efb5021',
+ sha1)
+ self.assertEqual(
+ set([('sha1:4e48e2c9a3d2ca8a708cb0cc545700544efb5021',)]),
+ repo.chk_bytes.keys())
+ except:
+ repo.abort_write_group()
+ raise
+ else:
+ repo.commit_write_group()
+ finally:
+ repo.unlock()
+ # And after an unlock/lock pair
+ repo.lock_read()
+ try:
+ self.assertEqual(
+ set([('sha1:4e48e2c9a3d2ca8a708cb0cc545700544efb5021',)]),
+ repo.chk_bytes.keys())
+ finally:
+ repo.unlock()
+ # and reopening
+ repo = repo.bzrdir.open_repository()
+ repo.lock_read()
+ try:
+ self.assertEqual(
+ set([('sha1:4e48e2c9a3d2ca8a708cb0cc545700544efb5021',)]),
+ repo.chk_bytes.keys())
+ finally:
+ repo.unlock()
+
+ def test_pack_preserves_chk_bytes_store(self):
+ leaf_lines = ["chkleaf:\n", "0\n", "1\n", "0\n", "\n"]
+ leaf_sha1 = osutils.sha_strings(leaf_lines)
+ node_lines = ["chknode:\n", "0\n", "1\n", "1\n", "foo\n",
+ "\x00sha1:%s\n" % (leaf_sha1,)]
+ node_sha1 = osutils.sha_strings(node_lines)
+ expected_set = set([('sha1:' + leaf_sha1,), ('sha1:' + node_sha1,)])
+ repo = self.make_repository('.')
+ repo.lock_write()
+ try:
+ repo.start_write_group()
+ try:
+ # Internal node pointing at a leaf.
+ repo.chk_bytes.add_lines((None,), None, node_lines, random_id=True)
+ except:
+ repo.abort_write_group()
+ raise
+ else:
+ repo.commit_write_group()
+ repo.start_write_group()
+ try:
+ # Leaf in a separate pack.
+ repo.chk_bytes.add_lines((None,), None, leaf_lines, random_id=True)
+ except:
+ repo.abort_write_group()
+ raise
+ else:
+ repo.commit_write_group()
+ repo.pack()
+ self.assertEqual(expected_set, repo.chk_bytes.keys())
+ finally:
+ repo.unlock()
+ # and reopening
+ repo = repo.bzrdir.open_repository()
+ repo.lock_read()
+ try:
+ self.assertEqual(expected_set, repo.chk_bytes.keys())
+ finally:
+ repo.unlock()
+
+ def test_chk_bytes_are_fully_buffered(self):
+ repo = self.make_repository('.')
+ repo.lock_write()
+ self.addCleanup(repo.unlock)
+ repo.start_write_group()
+ try:
+ sha1, len, _ = repo.chk_bytes.add_lines((None,),
+ None, ["foo\n", "bar\n"], random_id=True)
+ self.assertEqual('4e48e2c9a3d2ca8a708cb0cc545700544efb5021',
+ sha1)
+ self.assertEqual(
+ set([('sha1:4e48e2c9a3d2ca8a708cb0cc545700544efb5021',)]),
+ repo.chk_bytes.keys())
+ except:
+ repo.abort_write_group()
+ raise
+ else:
+ repo.commit_write_group()
+ # This may not always be correct if we change away from BTreeGraphIndex
+ # in the future. But for now, lets check that chk_bytes are fully
+ # buffered
+ index = repo.chk_bytes._index._graph_index._indices[0]
+ self.assertIsInstance(index, btree_index.BTreeGraphIndex)
+ self.assertIs(type(index._leaf_node_cache), dict)
+ # Re-opening the repository should also have a repo with everything
+ # fully buffered
+ repo2 = repository.Repository.open(self.get_url())
+ repo2.lock_read()
+ self.addCleanup(repo2.unlock)
+ index = repo2.chk_bytes._index._graph_index._indices[0]
+ self.assertIsInstance(index, btree_index.BTreeGraphIndex)
+ self.assertIs(type(index._leaf_node_cache), dict)
+
+
+class TestCommitWriteGroupIntegrityCheck(TestCaseWithRepositoryCHK):
+ """Tests that commit_write_group prevents various kinds of invalid data
+ from being committed to a CHK repository.
+ """
+
+ def reopen_repo_and_resume_write_group(self, repo):
+ resume_tokens = repo.suspend_write_group()
+ repo.unlock()
+ reopened_repo = repo.bzrdir.open_repository()
+ reopened_repo.lock_write()
+ self.addCleanup(reopened_repo.unlock)
+ reopened_repo.resume_write_group(resume_tokens)
+ return reopened_repo
+
+ def test_missing_chk_root_for_inventory(self):
+ """commit_write_group fails with BzrCheckError when the chk root record
+ for a new inventory is missing.
+ """
+ repo = self.make_repository('damaged-repo')
+ builder = self.make_branch_builder('simple-branch')
+ builder.build_snapshot('A-id', None, [
+ ('add', ('', 'root-id', 'directory', None)),
+ ('add', ('file', 'file-id', 'file', 'content\n'))])
+ b = builder.get_branch()
+ b.lock_read()
+ self.addCleanup(b.unlock)
+ repo.lock_write()
+ repo.start_write_group()
+ # Now, add the objects manually
+ text_keys = [('file-id', 'A-id'), ('root-id', 'A-id')]
+ # Directly add the texts, inventory, and revision object for 'A-id' --
+ # but don't add the chk_bytes.
+ src_repo = b.repository
+ repo.texts.insert_record_stream(src_repo.texts.get_record_stream(
+ text_keys, 'unordered', True))
+ repo.inventories.insert_record_stream(
+ src_repo.inventories.get_record_stream(
+ [('A-id',)], 'unordered', True))
+ repo.revisions.insert_record_stream(
+ src_repo.revisions.get_record_stream(
+ [('A-id',)], 'unordered', True))
+ # Make sure the presence of the missing data in a fallback does not
+ # avoid the error.
+ repo.add_fallback_repository(b.repository)
+ self.assertRaises(errors.BzrCheckError, repo.commit_write_group)
+ reopened_repo = self.reopen_repo_and_resume_write_group(repo)
+ self.assertRaises(
+ errors.BzrCheckError, reopened_repo.commit_write_group)
+ reopened_repo.abort_write_group()
+
+ def test_missing_chk_root_for_unchanged_inventory(self):
+ """commit_write_group fails with BzrCheckError when the chk root record
+ for a new inventory is missing, even if the parent inventory is present
+ and has identical content (i.e. the same chk root).
+
+ A stacked repository containing only a revision with an identical
+ inventory to its parent will still have the chk root records for those
+ inventories.
+
+ (In principle the chk records are unnecessary in this case, but in
+ practice bzr 2.0rc1 (at least) expects to find them.)
+ """
+ repo = self.make_repository('damaged-repo')
+ # Make a branch where the last two revisions have identical
+ # inventories.
+ builder = self.make_branch_builder('simple-branch')
+ builder.build_snapshot('A-id', None, [
+ ('add', ('', 'root-id', 'directory', None)),
+ ('add', ('file', 'file-id', 'file', 'content\n'))])
+ builder.build_snapshot('B-id', None, [])
+ builder.build_snapshot('C-id', None, [])
+ b = builder.get_branch()
+ b.lock_read()
+ self.addCleanup(b.unlock)
+ # check our setup: B-id and C-id should have identical chk root keys.
+ inv_b = b.repository.get_inventory('B-id')
+ inv_c = b.repository.get_inventory('C-id')
+ if not isinstance(repo, RemoteRepository):
+ # Remote repositories always return plain inventories
+ self.assertEqual(inv_b.id_to_entry.key(), inv_c.id_to_entry.key())
+ # Now, manually insert objects for a stacked repo with only revision
+ # C-id:
+ # We need ('revisions', 'C-id'), ('inventories', 'C-id'),
+ # ('inventories', 'B-id'), and the corresponding chk roots for those
+ # inventories.
+ repo.lock_write()
+ repo.start_write_group()
+ src_repo = b.repository
+ repo.inventories.insert_record_stream(
+ src_repo.inventories.get_record_stream(
+ [('B-id',), ('C-id',)], 'unordered', True))
+ repo.revisions.insert_record_stream(
+ src_repo.revisions.get_record_stream(
+ [('C-id',)], 'unordered', True))
+ # Make sure the presence of the missing data in a fallback does not
+ # avoid the error.
+ repo.add_fallback_repository(b.repository)
+ self.assertRaises(errors.BzrCheckError, repo.commit_write_group)
+ reopened_repo = self.reopen_repo_and_resume_write_group(repo)
+ self.assertRaises(
+ errors.BzrCheckError, reopened_repo.commit_write_group)
+ reopened_repo.abort_write_group()
+
+ def test_missing_chk_leaf_for_inventory(self):
+ """commit_write_group fails with BzrCheckError when the chk root record
+ for a parent inventory of a new revision is missing.
+ """
+ repo = self.make_repository('damaged-repo')
+ if isinstance(repo, RemoteRepository):
+ raise TestNotApplicable(
+ "Unable to obtain CHKInventory from remote repo")
+ b = self.make_branch_with_multiple_chk_nodes()
+ src_repo = b.repository
+ src_repo.lock_read()
+ self.addCleanup(src_repo.unlock)
+ # Now, manually insert objects for a stacked repo with only revision
+ # C-id, *except* drop the non-root chk records.
+ inv_b = src_repo.get_inventory('B-id')
+ inv_c = src_repo.get_inventory('C-id')
+ chk_root_keys_only = [
+ inv_b.id_to_entry.key(), inv_b.parent_id_basename_to_file_id.key(),
+ inv_c.id_to_entry.key(), inv_c.parent_id_basename_to_file_id.key()]
+ all_chks = src_repo.chk_bytes.keys()
+ # Pick a non-root key to drop
+ key_to_drop = all_chks.difference(chk_root_keys_only).pop()
+ all_chks.discard(key_to_drop)
+ repo.lock_write()
+ repo.start_write_group()
+ repo.chk_bytes.insert_record_stream(
+ src_repo.chk_bytes.get_record_stream(
+ all_chks, 'unordered', True))
+ repo.texts.insert_record_stream(
+ src_repo.texts.get_record_stream(
+ src_repo.texts.keys(), 'unordered', True))
+ repo.inventories.insert_record_stream(
+ src_repo.inventories.get_record_stream(
+ [('B-id',), ('C-id',)], 'unordered', True))
+ repo.revisions.insert_record_stream(
+ src_repo.revisions.get_record_stream(
+ [('C-id',)], 'unordered', True))
+ # Make sure the presence of the missing data in a fallback does not
+ # avoid the error.
+ repo.add_fallback_repository(b.repository)
+ self.assertRaises(errors.BzrCheckError, repo.commit_write_group)
+ reopened_repo = self.reopen_repo_and_resume_write_group(repo)
+ self.assertRaises(
+ errors.BzrCheckError, reopened_repo.commit_write_group)
+ reopened_repo.abort_write_group()
+
+ def test_missing_chk_root_for_parent_inventory(self):
+ """commit_write_group fails with BzrCheckError when the chk root record
+ for a parent inventory of a new revision is missing.
+ """
+ repo = self.make_repository('damaged-repo')
+ if isinstance(repo, RemoteRepository):
+ raise TestNotApplicable(
+ "Unable to obtain CHKInventory from remote repo")
+ b = self.make_branch_with_multiple_chk_nodes()
+ b.lock_read()
+ self.addCleanup(b.unlock)
+ # Now, manually insert objects for a stacked repo with only revision
+ # C-id, *except* the chk root entry for the parent inventory.
+ # We need ('revisions', 'C-id'), ('inventories', 'C-id'),
+ # ('inventories', 'B-id'), and the corresponding chk roots for those
+ # inventories.
+ inv_c = b.repository.get_inventory('C-id')
+ chk_keys_for_c_only = [
+ inv_c.id_to_entry.key(), inv_c.parent_id_basename_to_file_id.key()]
+ repo.lock_write()
+ repo.start_write_group()
+ src_repo = b.repository
+ repo.chk_bytes.insert_record_stream(
+ src_repo.chk_bytes.get_record_stream(
+ chk_keys_for_c_only, 'unordered', True))
+ repo.inventories.insert_record_stream(
+ src_repo.inventories.get_record_stream(
+ [('B-id',), ('C-id',)], 'unordered', True))
+ repo.revisions.insert_record_stream(
+ src_repo.revisions.get_record_stream(
+ [('C-id',)], 'unordered', True))
+ # Make sure the presence of the missing data in a fallback does not
+ # avoid the error.
+ repo.add_fallback_repository(b.repository)
+ self.assertRaises(errors.BzrCheckError, repo.commit_write_group)
+ reopened_repo = self.reopen_repo_and_resume_write_group(repo)
+ self.assertRaises(
+ errors.BzrCheckError, reopened_repo.commit_write_group)
+ reopened_repo.abort_write_group()
+
+ def make_branch_with_multiple_chk_nodes(self):
+ # add and modify files with very long file-ids, so that the chk map
+ # will need more than just a root node.
+ builder = self.make_branch_builder('simple-branch')
+ file_adds = []
+ file_modifies = []
+ for char in 'abc':
+ name = char * 10000
+ file_adds.append(
+ ('add', ('file-' + name, 'file-%s-id' % name, 'file',
+ 'content %s\n' % name)))
+ file_modifies.append(
+ ('modify', ('file-%s-id' % name, 'new content %s\n' % name)))
+ builder.build_snapshot('A-id', None, [
+ ('add', ('', 'root-id', 'directory', None))] +
+ file_adds)
+ builder.build_snapshot('B-id', None, [])
+ builder.build_snapshot('C-id', None, file_modifies)
+ return builder.get_branch()
+
+ def test_missing_text_record(self):
+ """commit_write_group fails with BzrCheckError when a text is missing.
+ """
+ repo = self.make_repository('damaged-repo')
+ b = self.make_branch_with_multiple_chk_nodes()
+ src_repo = b.repository
+ src_repo.lock_read()
+ self.addCleanup(src_repo.unlock)
+ # Now, manually insert objects for a stacked repo with only revision
+ # C-id, *except* drop one changed text.
+ all_texts = src_repo.texts.keys()
+ all_texts.remove(('file-%s-id' % ('c'*10000,), 'C-id'))
+ repo.lock_write()
+ repo.start_write_group()
+ repo.chk_bytes.insert_record_stream(
+ src_repo.chk_bytes.get_record_stream(
+ src_repo.chk_bytes.keys(), 'unordered', True))
+ repo.texts.insert_record_stream(
+ src_repo.texts.get_record_stream(
+ all_texts, 'unordered', True))
+ repo.inventories.insert_record_stream(
+ src_repo.inventories.get_record_stream(
+ [('B-id',), ('C-id',)], 'unordered', True))
+ repo.revisions.insert_record_stream(
+ src_repo.revisions.get_record_stream(
+ [('C-id',)], 'unordered', True))
+ # Make sure the presence of the missing data in a fallback does not
+ # avoid the error.
+ repo.add_fallback_repository(b.repository)
+ self.assertRaises(errors.BzrCheckError, repo.commit_write_group)
+ reopened_repo = self.reopen_repo_and_resume_write_group(repo)
+ self.assertRaises(
+ errors.BzrCheckError, reopened_repo.commit_write_group)
+ reopened_repo.abort_write_group()
+
+
+
+
diff --git a/bzrlib/tests/per_repository_chk/test_unsupported.py b/bzrlib/tests/per_repository_chk/test_unsupported.py
new file mode 100644
index 0000000..029283c
--- /dev/null
+++ b/bzrlib/tests/per_repository_chk/test_unsupported.py
@@ -0,0 +1,30 @@
+# Copyright (C) 2008 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""Tests for repositories that do not support CHK indices.
+
+CHK support is optional, and when it is not supported the methods and
+attributes CHK support added should fail in known ways.
+"""
+
+from bzrlib.tests.per_repository_chk import TestCaseWithRepositoryCHK
+
+
+class TestNoCHKSupport(TestCaseWithRepositoryCHK):
+
+ def test_chk_bytes_attribute_is_None(self):
+ repo = self.make_repository('.')
+ self.assertEqual(None, repo.chk_bytes)
diff --git a/bzrlib/tests/per_repository_reference/__init__.py b/bzrlib/tests/per_repository_reference/__init__.py
new file mode 100644
index 0000000..f9b57d0
--- /dev/null
+++ b/bzrlib/tests/per_repository_reference/__init__.py
@@ -0,0 +1,137 @@
+# Copyright (C) 2008-2011 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+
+"""Repository implementation tests for external reference repositories.
+
+These tests check the conformance of repositories which refer to other
+repositories for some data, and are run for each repository format supporting
+this.
+"""
+
+from bzrlib import (
+ errors,
+ remote,
+ urlutils,
+ )
+from bzrlib.controldir import ControlDir
+from bzrlib.tests import multiply_tests
+from bzrlib.tests.per_repository import (
+ all_repository_format_scenarios,
+ TestCaseWithRepository,
+ )
+
+
+class TestCaseWithExternalReferenceRepository(TestCaseWithRepository):
+
+ def make_referring(self, relpath, a_repository):
+ """Get a new repository that refers to a_repository.
+
+ :param relpath: The path to create the repository at.
+ :param a_repository: A repository to refer to.
+ """
+ repo = self.make_repository(relpath)
+ repo.add_fallback_repository(self.readonly_repository(a_repository))
+ return repo
+
+ def readonly_repository(self, repo):
+ relpath = urlutils.basename(repo.bzrdir.user_url.rstrip('/'))
+ return ControlDir.open_from_transport(
+ self.get_readonly_transport(relpath)).open_repository()
+
+
+class TestCorrectFormat(TestCaseWithExternalReferenceRepository):
+
+ def test_repository_format(self):
+ # make sure the repository on tree.branch is of the desired format,
+ # because developers use this api to setup the tree, branch and
+ # repository for their tests: having it not give the right repository
+ # type would invalidate the tests.
+ tree = self.make_branch_and_tree('repo')
+ repo = self.make_referring('referring', tree.branch.repository)
+ self.assertIsInstance(repo._format,
+ self.repository_format.__class__)
+
+
+class TestIncompatibleStacking(TestCaseWithRepository):
+
+ def make_repo_and_incompatible_fallback(self):
+ referring = self.make_repository('referring')
+ if referring._format.supports_chks:
+ different_fmt = '1.9'
+ else:
+ different_fmt = '2a'
+ fallback = self.make_repository('fallback', format=different_fmt)
+ return referring, fallback
+
+ def test_add_fallback_repository_rejects_incompatible(self):
+ # Repository.add_fallback_repository raises IncompatibleRepositories
+ # if you take two repositories in different serializations and try to
+ # stack them.
+ referring, fallback = self.make_repo_and_incompatible_fallback()
+ self.assertRaises(errors.IncompatibleRepositories,
+ referring.add_fallback_repository, fallback)
+
+ def test_add_fallback_doesnt_leave_fallback_locked(self):
+ # Bug #835035. If the referring repository is locked, it wants to lock
+ # the fallback repository. But if they are incompatible, the referring
+ # repository won't take ownership of the fallback, and thus should not
+ # leave the repository in a locked state.
+ referring, fallback = self.make_repo_and_incompatible_fallback()
+ self.addCleanup(referring.lock_read().unlock)
+ # Assert precondition.
+ self.assertFalse(fallback.is_locked())
+ # Assert action.
+ self.assertRaises(errors.IncompatibleRepositories,
+ referring.add_fallback_repository, fallback)
+ # Assert postcondition.
+ self.assertFalse(fallback.is_locked())
+
+
+def external_reference_test_scenarios():
+ """Generate test scenarios for repositories supporting external references.
+ """
+ result = []
+ for test_name, scenario_info in all_repository_format_scenarios():
+ format = scenario_info['repository_format']
+ if (isinstance(format, remote.RemoteRepositoryFormat)
+ or format.supports_external_lookups):
+ result.append((test_name, scenario_info))
+ return result
+
+
+def load_tests(standard_tests, module, loader):
+ module_list = [
+ 'bzrlib.tests.per_repository_reference.test_add_inventory',
+ 'bzrlib.tests.per_repository_reference.test_add_revision',
+ 'bzrlib.tests.per_repository_reference.test_add_signature_text',
+ 'bzrlib.tests.per_repository_reference.test_all_revision_ids',
+ 'bzrlib.tests.per_repository_reference.test_break_lock',
+ 'bzrlib.tests.per_repository_reference.test_check',
+ 'bzrlib.tests.per_repository_reference.test_commit_with_stacking',
+ 'bzrlib.tests.per_repository_reference.test_default_stacking',
+ 'bzrlib.tests.per_repository_reference.test_fetch',
+ 'bzrlib.tests.per_repository_reference.test_get_record_stream',
+ 'bzrlib.tests.per_repository_reference.test_get_rev_id_for_revno',
+ 'bzrlib.tests.per_repository_reference.test_graph',
+ 'bzrlib.tests.per_repository_reference.test_initialize',
+ 'bzrlib.tests.per_repository_reference.test__make_parents_provider',
+ 'bzrlib.tests.per_repository_reference.test_unlock',
+ ]
+ # Parameterize per_repository_reference test modules by format.
+ standard_tests.addTests(loader.loadTestsFromModuleNames(module_list))
+ return multiply_tests(standard_tests, external_reference_test_scenarios(),
+ loader.suiteClass())
diff --git a/bzrlib/tests/per_repository_reference/test__make_parents_provider.py b/bzrlib/tests/per_repository_reference/test__make_parents_provider.py
new file mode 100644
index 0000000..0646a68
--- /dev/null
+++ b/bzrlib/tests/per_repository_reference/test__make_parents_provider.py
@@ -0,0 +1,44 @@
+# Copyright (C) 2011 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+
+"""Tests for _make_parents_provider on stacked repositories."""
+
+
+from bzrlib.tests.per_repository import TestCaseWithRepository
+
+
+class Test_MakeParentsProvider(TestCaseWithRepository):
+
+ def test_add_fallback_after_make_pp(self):
+ """Fallbacks added after _make_parents_provider are used by that
+ provider.
+ """
+ referring_repo = self.make_repository('repo')
+ pp = referring_repo._make_parents_provider()
+ # Initially referring_repo has no revisions and no fallbacks
+ self.addCleanup(referring_repo.lock_read().unlock)
+ self.assertEqual({}, pp.get_parent_map(['revid2']))
+ # Add a fallback repo with a commit
+ wt_a = self.make_branch_and_tree('fallback')
+ wt_a.commit('first commit', rev_id='revid1')
+ wt_a.commit('second commit', rev_id='revid2')
+ fallback_repo = wt_a.branch.repository
+ referring_repo.add_fallback_repository(fallback_repo)
+ # Now revid1 appears in pp's results.
+ self.assertEqual(('revid1',), pp.get_parent_map(['revid2'])['revid2'])
+
+
diff --git a/bzrlib/tests/per_repository_reference/test_add_inventory.py b/bzrlib/tests/per_repository_reference/test_add_inventory.py
new file mode 100644
index 0000000..d461a36
--- /dev/null
+++ b/bzrlib/tests/per_repository_reference/test_add_inventory.py
@@ -0,0 +1,55 @@
+# Copyright (C) 2008 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""Tests for add_inventory on a repository with external references."""
+
+from bzrlib import errors
+from bzrlib.tests.per_repository_reference import (
+ TestCaseWithExternalReferenceRepository,
+ )
+
+
+class TestAddInventory(TestCaseWithExternalReferenceRepository):
+
+ def test_add_inventory_goes_to_repo(self):
+ # adding an inventory only writes to the repository add_inventory is
+ # called on.
+ tree = self.make_branch_and_tree('sample')
+ revid = tree.commit('one')
+ inv = tree.branch.repository.get_inventory(revid)
+ tree.lock_read()
+ self.addCleanup(tree.unlock)
+ base = self.make_repository('base')
+ repo = self.make_referring('referring', base)
+ repo.lock_write()
+ try:
+ repo.start_write_group()
+ try:
+ repo.add_inventory(revid, inv, [])
+ except:
+ repo.abort_write_group()
+ raise
+ else:
+ repo.commit_write_group()
+ finally:
+ repo.unlock()
+ repo.lock_read()
+ self.addCleanup(repo.unlock)
+ inv2 = repo.get_inventory(revid)
+ content1 = dict((file_id, inv[file_id]) for file_id in inv)
+ content2 = dict((file_id, inv[file_id]) for file_id in inv2)
+ self.assertEqual(content1, content2)
+ self.assertRaises(errors.NoSuchRevision, base.get_inventory, revid)
diff --git a/bzrlib/tests/per_repository_reference/test_add_revision.py b/bzrlib/tests/per_repository_reference/test_add_revision.py
new file mode 100644
index 0000000..ec9b4b0
--- /dev/null
+++ b/bzrlib/tests/per_repository_reference/test_add_revision.py
@@ -0,0 +1,54 @@
+# Copyright (C) 2008 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""Tests for add_revision on a repository with external references."""
+
+from bzrlib import errors
+from bzrlib.tests.per_repository_reference import (
+ TestCaseWithExternalReferenceRepository,
+ )
+
+
+class TestAddRevision(TestCaseWithExternalReferenceRepository):
+
+ def test_add_revision_goes_to_repo(self):
+ # adding a revision only writes to the repository add_revision is
+ # called on.
+ tree = self.make_branch_and_tree('sample')
+ revid = tree.commit('one')
+ inv = tree.branch.repository.get_inventory(revid)
+ tree.lock_read()
+ self.addCleanup(tree.unlock)
+ rev = tree.branch.repository.get_revision(revid)
+ base = self.make_repository('base')
+ repo = self.make_referring('referring', base)
+ repo.lock_write()
+ try:
+ repo.start_write_group()
+ try:
+ rev = tree.branch.repository.get_revision(revid)
+ repo.texts.add_lines((inv.root.file_id, revid), [], [])
+ repo.add_revision(revid, rev, inv=inv)
+ except:
+ repo.abort_write_group()
+ raise
+ else:
+ repo.commit_write_group()
+ finally:
+ repo.unlock()
+ rev2 = repo.get_revision(revid)
+ self.assertEqual(rev, rev2)
+ self.assertRaises(errors.NoSuchRevision, base.get_revision, revid)
diff --git a/bzrlib/tests/per_repository_reference/test_add_signature_text.py b/bzrlib/tests/per_repository_reference/test_add_signature_text.py
new file mode 100644
index 0000000..1a09b92
--- /dev/null
+++ b/bzrlib/tests/per_repository_reference/test_add_signature_text.py
@@ -0,0 +1,53 @@
+# Copyright (C) 2008 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""Tests for add_signature_text on a repository with external references."""
+
+from bzrlib import errors
+from bzrlib.tests.per_repository_reference import (
+ TestCaseWithExternalReferenceRepository,
+ )
+
+
+class TestAddSignatureText(TestCaseWithExternalReferenceRepository):
+
+ def test_add_signature_text_goes_to_repo(self):
+ # adding a signature only writes to the repository add_signature_text
+ # is called on.
+ tree = self.make_branch_and_tree('sample')
+ revid = tree.commit('one')
+ inv = tree.branch.repository.get_inventory(revid)
+ tree.lock_read()
+ self.addCleanup(tree.unlock)
+ base = self.make_repository('base')
+ repo = self.make_referring('referring', base)
+ repo.lock_write()
+ try:
+ repo.start_write_group()
+ try:
+ rev = tree.branch.repository.get_revision(revid)
+ repo.texts.add_lines((inv.root.file_id, revid), [], [])
+ repo.add_revision(revid, rev, inv=inv)
+ repo.add_signature_text(revid, "text")
+ repo.commit_write_group()
+ except:
+ repo.abort_write_group()
+ raise
+ finally:
+ repo.unlock()
+ repo.get_signature_text(revid)
+ self.assertRaises(errors.NoSuchRevision, base.get_signature_text,
+ revid)
diff --git a/bzrlib/tests/per_repository_reference/test_all_revision_ids.py b/bzrlib/tests/per_repository_reference/test_all_revision_ids.py
new file mode 100644
index 0000000..ceeb2e2
--- /dev/null
+++ b/bzrlib/tests/per_repository_reference/test_all_revision_ids.py
@@ -0,0 +1,63 @@
+# Copyright (C) 2008 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""Tests for all_revision_ids on a repository with external references."""
+
+from bzrlib.tests.per_repository_reference import (
+ TestCaseWithExternalReferenceRepository,
+ )
+
+
+class TestAllRevisionIds(TestCaseWithExternalReferenceRepository):
+
+ def test_all_revision_ids_empty(self):
+ base = self.make_repository('base')
+ repo = self.make_referring('referring', base)
+ self.assertEqual(set([]), set(repo.all_revision_ids()))
+
+ def test_all_revision_ids_from_base(self):
+ tree = self.make_branch_and_tree('base')
+ revid = tree.commit('one')
+ repo = self.make_referring('referring', tree.branch.repository)
+ self.assertEqual(set([revid]), set(repo.all_revision_ids()))
+
+ def test_all_revision_ids_from_repo(self):
+ tree = self.make_branch_and_tree('spare')
+ revid = tree.commit('one')
+ base = self.make_repository('base')
+ repo = self.make_referring('referring', base)
+ repo.fetch(tree.branch.repository, revid)
+ self.assertEqual(set([revid]), set(repo.all_revision_ids()))
+
+ def test_all_revision_ids_from_both(self):
+ tree = self.make_branch_and_tree('spare')
+ revid = tree.commit('one')
+ base_tree = self.make_branch_and_tree('base')
+ revid2 = base_tree.commit('two')
+ repo = self.make_referring('referring', base_tree.branch.repository)
+ repo.fetch(tree.branch.repository, revid)
+ self.assertEqual(set([revid, revid2]), set(repo.all_revision_ids()))
+
+ def test_duplicate_ids_do_not_affect_length(self):
+ tree = self.make_branch_and_tree('spare')
+ revid = tree.commit('one')
+ base = self.make_repository('base')
+ repo = self.make_referring('referring', base)
+ repo.fetch(tree.branch.repository, revid)
+ base.fetch(tree.branch.repository, revid)
+ self.assertEqual(set([revid]), set(repo.all_revision_ids()))
+ self.assertEqual(1, len(repo.all_revision_ids()))
+
diff --git a/bzrlib/tests/per_repository_reference/test_break_lock.py b/bzrlib/tests/per_repository_reference/test_break_lock.py
new file mode 100644
index 0000000..c63ae96
--- /dev/null
+++ b/bzrlib/tests/per_repository_reference/test_break_lock.py
@@ -0,0 +1,46 @@
+# Copyright (C) 2008 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""Tests for break_lock on a repository with external references."""
+
+from bzrlib import (
+ errors,
+ ui,
+ )
+from bzrlib.tests.per_repository_reference import (
+ TestCaseWithExternalReferenceRepository,
+ )
+
+
+class TestBreakLock(TestCaseWithExternalReferenceRepository):
+
+ def test_break_lock(self):
+ base = self.make_repository('base')
+ repo = self.make_referring('referring', base)
+ unused_repo = repo.bzrdir.open_repository()
+ base.lock_write()
+ self.addCleanup(base.unlock)
+ # break_lock when locked should
+ repo.lock_write()
+ self.assertEqual(repo.get_physical_lock_status(),
+ unused_repo.get_physical_lock_status())
+ if not unused_repo.get_physical_lock_status():
+ # 'lock_write' has not taken a physical mutex out.
+ repo.unlock()
+ return
+ ui.ui_factory = ui.CannedInputUIFactory([True])
+ unused_repo.break_lock()
+ self.assertRaises(errors.LockBroken, repo.unlock)
diff --git a/bzrlib/tests/per_repository_reference/test_check.py b/bzrlib/tests/per_repository_reference/test_check.py
new file mode 100644
index 0000000..d5e623d
--- /dev/null
+++ b/bzrlib/tests/per_repository_reference/test_check.py
@@ -0,0 +1,41 @@
+# Copyright (C) 2008, 2009, 2010 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""Tests for check on a repository with external references."""
+
+import bzrlib.ui
+from bzrlib.tests.per_repository_reference import (
+ TestCaseWithExternalReferenceRepository,
+ )
+
+
+class TestCheck(TestCaseWithExternalReferenceRepository):
+
+ def test_check_file_graph_across_external_boundary_ok(self):
+ tree = self.make_branch_and_tree('base')
+ self.build_tree(['base/file'])
+ tree.add(['file'], ['file-id'])
+ rev1_id = tree.commit('one')
+ referring = self.make_branch_and_tree('referring')
+ readonly_base = self.readonly_repository(tree.branch.repository)
+ referring.branch.repository.add_fallback_repository(readonly_base)
+ local_tree = referring.branch.create_checkout('local')
+ self.build_tree_contents([('local/file', 'change')])
+ rev2_id = local_tree.commit('two')
+ check_result = referring.branch.repository.check(
+ referring.branch.repository.all_revision_ids())
+ check_result.report_results(verbose=False)
+ self.assertFalse("inconsistent parents" in self.get_log())
diff --git a/bzrlib/tests/per_repository_reference/test_commit_with_stacking.py b/bzrlib/tests/per_repository_reference/test_commit_with_stacking.py
new file mode 100644
index 0000000..4d00756
--- /dev/null
+++ b/bzrlib/tests/per_repository_reference/test_commit_with_stacking.py
@@ -0,0 +1,220 @@
+# Copyright (C) 2010, 2011 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+
+from bzrlib import (
+ errors,
+ remote,
+ tests,
+ urlutils,
+ )
+from bzrlib.tests.per_repository import TestCaseWithRepository
+
+
+class TestCaseWithStackedTarget(TestCaseWithRepository):
+
+ r1_key = ('rev1-id',)
+ r2_key = ('rev2-id',)
+
+ def make_stacked_target(self):
+ base_tree = self.make_branch_and_tree('base')
+ self.build_tree(['base/f1.txt'])
+ base_tree.add(['f1.txt'], ['f1.txt-id'])
+ base_tree.commit('initial', rev_id=self.r1_key[0])
+ self.build_tree(['base/f2.txt'])
+ base_tree.add(['f2.txt'], ['f2.txt-id'])
+ base_tree.commit('base adds f2', rev_id=self.r2_key[0])
+ stacked_url = urlutils.join(base_tree.branch.base, '../stacked')
+ stacked_bzrdir = base_tree.bzrdir.sprout(stacked_url,
+ stacked=True)
+ if isinstance(stacked_bzrdir, remote.RemoteBzrDir):
+ stacked_branch = stacked_bzrdir.open_branch()
+ stacked_tree = stacked_branch.create_checkout('stacked',
+ lightweight=True)
+ else:
+ stacked_tree = stacked_bzrdir.open_workingtree()
+ return base_tree, stacked_tree
+
+
+class TestCommitWithStacking(TestCaseWithStackedTarget):
+
+ def setUp(self):
+ super(TestCommitWithStacking, self).setUp()
+ format = self.repository_format
+ if (not (isinstance(format, remote.RemoteRepositoryFormat)
+ or format.supports_chks)):
+ raise tests.TestNotApplicable('stacked commit only supported'
+ ' for chk repositories')
+
+ def get_only_repo(self, tree):
+ """Open just the repository used by this tree.
+
+ This returns a read locked Repository object without any stacking
+ fallbacks.
+ """
+ repo = tree.branch.repository.bzrdir.open_repository()
+ repo.lock_read()
+ self.addCleanup(repo.unlock)
+ return repo
+
+ def assertPresent(self, expected, vf, keys):
+ """Check which of the supplied keys are present."""
+ parent_map = vf.get_parent_map(keys)
+ self.assertEqual(sorted(expected), sorted(parent_map))
+
+ def test_simple_commit(self):
+ base_tree, stacked_tree = self.make_stacked_target()
+ self.assertEqual(1,
+ len(stacked_tree.branch.repository._fallback_repositories))
+ self.build_tree_contents([('stacked/f1.txt', 'new content\n')])
+ stacked_tree.commit('new content', rev_id='new-rev-id')
+ # We open the repository without fallbacks to ensure the data is
+ # locally true
+ stacked_only_repo = self.get_only_repo(stacked_tree)
+ # We should have the immediate parent inventory available, but not the
+ # grandparent's
+ self.assertPresent([self.r2_key],
+ stacked_only_repo.inventories, [self.r1_key, self.r2_key])
+ # And we should be able to pull this revision into another stacked
+ # branch
+ stacked2_branch = base_tree.bzrdir.sprout('stacked2',
+ stacked=True).open_branch()
+ stacked2_branch.repository.fetch(stacked_only_repo,
+ revision_id='new-rev-id')
+
+ def test_merge_commit(self):
+ base_tree, stacked_tree = self.make_stacked_target()
+ self.build_tree_contents([('base/f1.txt', 'new content\n')])
+ r3_key = ('rev3-id',)
+ base_tree.commit('second base', rev_id=r3_key[0])
+ to_be_merged_tree = base_tree.bzrdir.sprout('merged'
+ ).open_workingtree()
+ self.build_tree(['merged/f2.txt'])
+ to_be_merged_tree.add(['f2.txt'], ['f2.txt-id'])
+ to_merge_key = ('to-merge-rev-id',)
+ to_be_merged_tree.commit('new-to-be-merged', rev_id=to_merge_key[0])
+ stacked_tree.merge_from_branch(to_be_merged_tree.branch)
+ merged_key = ('merged-rev-id',)
+ stacked_tree.commit('merge', rev_id=merged_key[0])
+ # to-merge isn't in base, so it should be in stacked.
+ # rev3-id is a parent of a revision we have, so we should have the
+ # inventory, but not the revision.
+ # merged has a parent of r2, so we should also have r2's
+ # inventory-but-not-revision.
+ # Nothing has r1 directly, so we shouldn't have anything present for it
+ stacked_only_repo = self.get_only_repo(stacked_tree)
+ all_keys = [self.r1_key, self.r2_key, r3_key, to_merge_key, merged_key]
+ self.assertPresent([to_merge_key, merged_key],
+ stacked_only_repo.revisions, all_keys)
+ self.assertPresent([self.r2_key, r3_key, to_merge_key, merged_key],
+ stacked_only_repo.inventories, all_keys)
+
+ def test_merge_from_master(self):
+ base_tree, stacked_tree = self.make_stacked_target()
+ self.build_tree_contents([('base/f1.txt', 'new content\n')])
+ r3_key = ('rev3-id',)
+ base_tree.commit('second base', rev_id=r3_key[0])
+ stacked_tree.merge_from_branch(base_tree.branch)
+ merged_key = ('merged-rev-id',)
+ stacked_tree.commit('merge', rev_id=merged_key[0])
+ all_keys = [self.r1_key, self.r2_key, r3_key, merged_key]
+ # We shouldn't have any of the base revisions in the local repo, but we
+ # should have both base inventories.
+ stacked_only_repo = self.get_only_repo(stacked_tree)
+ self.assertPresent([merged_key],
+ stacked_only_repo.revisions, all_keys)
+ self.assertPresent([self.r2_key, r3_key, merged_key],
+ stacked_only_repo.inventories, all_keys)
+
+ def test_multi_stack(self):
+ """base + stacked + stacked-on-stacked"""
+ base_tree, stacked_tree = self.make_stacked_target()
+ self.build_tree(['stacked/f3.txt'])
+ stacked_tree.add(['f3.txt'], ['f3.txt-id'])
+ stacked_key = ('stacked-rev-id',)
+ stacked_tree.commit('add f3', rev_id=stacked_key[0])
+ stacked_only_repo = self.get_only_repo(stacked_tree)
+ self.assertPresent([self.r2_key], stacked_only_repo.inventories,
+ [self.r1_key, self.r2_key])
+ # This ensures we get a Remote URL, rather than a local one.
+ stacked2_url = urlutils.join(base_tree.branch.base, '../stacked2')
+ stacked2_bzrdir = stacked_tree.bzrdir.sprout(stacked2_url,
+ revision_id=self.r1_key[0],
+ stacked=True)
+ if isinstance(stacked2_bzrdir, remote.RemoteBzrDir):
+ stacked2_branch = stacked2_bzrdir.open_branch()
+ stacked2_tree = stacked2_branch.create_checkout('stacked2',
+ lightweight=True)
+ else:
+ stacked2_tree = stacked2_bzrdir.open_workingtree()
+ # stacked2 is stacked on stacked, but its content is rev1, so
+ # it needs to pull the basis information from a fallback-of-fallback.
+ self.build_tree(['stacked2/f3.txt'])
+ stacked2_only_repo = self.get_only_repo(stacked2_tree)
+ self.assertPresent([], stacked2_only_repo.inventories,
+ [self.r1_key, self.r2_key])
+ stacked2_tree.add(['f3.txt'], ['f3.txt-id'])
+ stacked2_tree.commit('add f3', rev_id='stacked2-rev-id')
+ # We added data to this read-locked repo, so refresh it
+ stacked2_only_repo.refresh_data()
+ self.assertPresent([self.r1_key], stacked2_only_repo.inventories,
+ [self.r1_key, self.r2_key])
+
+ def test_commit_with_ghosts_fails(self):
+ base_tree, stacked_tree = self.make_stacked_target()
+ stacked_tree.set_parent_ids([stacked_tree.last_revision(),
+ 'ghost-rev-id'])
+ self.assertRaises(errors.BzrError,
+ stacked_tree.commit, 'failed_commit')
+
+ def test_commit_with_ghost_in_ancestry(self):
+ base_tree, stacked_tree = self.make_stacked_target()
+ self.build_tree_contents([('base/f1.txt', 'new content\n')])
+ r3_key = ('rev3-id',)
+ base_tree.commit('second base', rev_id=r3_key[0])
+ to_be_merged_tree = base_tree.bzrdir.sprout('merged'
+ ).open_workingtree()
+ self.build_tree(['merged/f2.txt'])
+ to_be_merged_tree.add(['f2.txt'], ['f2.txt-id'])
+ ghost_key = ('ghost-rev-id',)
+ to_be_merged_tree.set_parent_ids([r3_key[0], ghost_key[0]])
+ to_merge_key = ('to-merge-rev-id',)
+ to_be_merged_tree.commit('new-to-be-merged', rev_id=to_merge_key[0])
+ stacked_tree.merge_from_branch(to_be_merged_tree.branch)
+ merged_key = ('merged-rev-id',)
+ stacked_tree.commit('merge', rev_id=merged_key[0])
+ # vs test_merge_commit, the fetch for 'merge_from_branch' should
+ # already have handled that 'ghost-rev-id' is a ghost, and commit
+ # should not try to fill it in at this point.
+ stacked_only_repo = self.get_only_repo(stacked_tree)
+ all_keys = [self.r1_key, self.r2_key, r3_key, to_merge_key, merged_key,
+ ghost_key]
+ self.assertPresent([to_merge_key, merged_key],
+ stacked_only_repo.revisions, all_keys)
+ self.assertPresent([self.r2_key, r3_key, to_merge_key, merged_key],
+ stacked_only_repo.inventories, all_keys)
+
+
+class TestCommitStackedFailsAppropriately(TestCaseWithStackedTarget):
+
+ def test_stacked_commit_fails_on_old_formats(self):
+ base_tree, stacked_tree = self.make_stacked_target()
+ format = stacked_tree.branch.repository._format
+ if format.supports_chks:
+ stacked_tree.commit('should succeed')
+ else:
+ self.assertRaises(errors.BzrError,
+ stacked_tree.commit, 'unsupported format')
diff --git a/bzrlib/tests/per_repository_reference/test_default_stacking.py b/bzrlib/tests/per_repository_reference/test_default_stacking.py
new file mode 100644
index 0000000..e6d00cd
--- /dev/null
+++ b/bzrlib/tests/per_repository_reference/test_default_stacking.py
@@ -0,0 +1,34 @@
+# Copyright (C) 2009, 2010 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+
+from bzrlib.tests.per_repository import TestCaseWithRepository
+
+
+class TestDefaultStackingPolicy(TestCaseWithRepository):
+
+ def test_sprout_to_smart_server_stacking_policy_handling(self):
+ """Obey policy where possible, ignore otherwise."""
+ stack_on = self.make_branch('stack-on')
+ parent_bzrdir = self.make_bzrdir('.', format='default')
+ parent_bzrdir.get_config().set_default_stack_on('stack-on')
+ source = self.make_branch('source')
+ url = self.make_smart_server('target').abspath('')
+ target = source.bzrdir.sprout(url).open_branch()
+ self.assertEqual('../stack-on', target.get_stacked_on_url())
+ self.assertEqual(
+ source._format.network_name(), target._format.network_name())
+
diff --git a/bzrlib/tests/per_repository_reference/test_fetch.py b/bzrlib/tests/per_repository_reference/test_fetch.py
new file mode 100644
index 0000000..fff129b
--- /dev/null
+++ b/bzrlib/tests/per_repository_reference/test_fetch.py
@@ -0,0 +1,186 @@
+# Copyright (C) 2009 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+
+from bzrlib import (
+ branch,
+ vf_search,
+ )
+from bzrlib.tests.per_repository import TestCaseWithRepository
+
+
+class TestFetchBase(TestCaseWithRepository):
+
+ def make_source_branch(self):
+ # It would be nice if there was a way to force this to be memory-only
+ builder = self.make_branch_builder('source')
+ content = ['content lines\n'
+ 'for the first revision\n'
+ 'which is a marginal amount of content\n'
+ ]
+ builder.start_series()
+ builder.build_snapshot('A-id', None, [
+ ('add', ('', 'root-id', 'directory', None)),
+ ('add', ('a', 'a-id', 'file', ''.join(content))),
+ ])
+ content.append('and some more lines for B\n')
+ builder.build_snapshot('B-id', ['A-id'], [
+ ('modify', ('a-id', ''.join(content)))])
+ content.append('and yet even more content for C\n')
+ builder.build_snapshot('C-id', ['B-id'], [
+ ('modify', ('a-id', ''.join(content)))])
+ builder.finish_series()
+ source_b = builder.get_branch()
+ source_b.lock_read()
+ self.addCleanup(source_b.unlock)
+ return content, source_b
+
+
+class TestFetch(TestFetchBase):
+
+ def test_sprout_from_stacked_with_short_history(self):
+ content, source_b = self.make_source_branch()
+ # Split the generated content into a base branch, and a stacked branch
+ # Use 'make_branch' which gives us a bzr:// branch when appropriate,
+ # rather than creating a branch-on-disk
+ stack_b = self.make_branch('stack-on')
+ stack_b.pull(source_b, stop_revision='B-id')
+ target_b = self.make_branch('target')
+ target_b.set_stacked_on_url('../stack-on')
+ target_b.pull(source_b, stop_revision='C-id')
+ # At this point, we should have a target branch, with 1 revision, on
+ # top of the source.
+ final_b = self.make_branch('final')
+ final_b.pull(target_b)
+ final_b.lock_read()
+ self.addCleanup(final_b.unlock)
+ self.assertEqual('C-id', final_b.last_revision())
+ text_keys = [('a-id', 'A-id'), ('a-id', 'B-id'), ('a-id', 'C-id')]
+ stream = final_b.repository.texts.get_record_stream(text_keys,
+ 'unordered', True)
+ records = sorted([(r.key, r.get_bytes_as('fulltext')) for r in stream])
+ self.assertEqual([
+ (('a-id', 'A-id'), ''.join(content[:-2])),
+ (('a-id', 'B-id'), ''.join(content[:-1])),
+ (('a-id', 'C-id'), ''.join(content)),
+ ], records)
+
+ def test_sprout_from_smart_stacked_with_short_history(self):
+ content, source_b = self.make_source_branch()
+ transport = self.make_smart_server('server')
+ transport.ensure_base()
+ url = transport.abspath('')
+ stack_b = source_b.bzrdir.sprout(url + '/stack-on', revision_id='B-id')
+ # self.make_branch only takes relative paths, so we do it the 'hard'
+ # way
+ target_transport = transport.clone('target')
+ target_transport.ensure_base()
+ target_bzrdir = self.bzrdir_format.initialize_on_transport(
+ target_transport)
+ target_bzrdir.create_repository()
+ target_b = target_bzrdir.create_branch()
+ target_b.set_stacked_on_url('../stack-on')
+ target_b.pull(source_b, stop_revision='C-id')
+ # Now we should be able to branch from the remote location to a local
+ # location
+ final_b = target_b.bzrdir.sprout('final').open_branch()
+ self.assertEqual('C-id', final_b.last_revision())
+
+ # bzrdir.sprout() has slightly different code paths if you supply a
+ # revision_id versus not. If you supply revision_id, then you get a
+ # PendingAncestryResult for the search, versus a SearchResult...
+ final2_b = target_b.bzrdir.sprout('final2',
+ revision_id='C-id').open_branch()
+ self.assertEqual('C-id', final_b.last_revision())
+
+ def make_source_with_ghost_and_stacked_target(self):
+ builder = self.make_branch_builder('source')
+ builder.start_series()
+ builder.build_snapshot('A-id', None, [
+ ('add', ('', 'root-id', 'directory', None)),
+ ('add', ('file', 'file-id', 'file', 'content\n'))])
+ builder.build_snapshot('B-id', ['A-id', 'ghost-id'], [])
+ builder.finish_series()
+ source_b = builder.get_branch()
+ source_b.lock_read()
+ self.addCleanup(source_b.unlock)
+ base = self.make_branch('base')
+ base.pull(source_b, stop_revision='A-id')
+ stacked = self.make_branch('stacked')
+ stacked.set_stacked_on_url('../base')
+ return source_b, base, stacked
+
+ def test_fetch_with_ghost_stacked(self):
+ (source_b, base,
+ stacked) = self.make_source_with_ghost_and_stacked_target()
+ stacked.pull(source_b, stop_revision='B-id')
+
+ def test_fetch_into_smart_stacked_with_ghost(self):
+ (source_b, base,
+ stacked) = self.make_source_with_ghost_and_stacked_target()
+ # Now, create a smart server on 'stacked' and re-open to force the
+ # target to be a smart target
+ trans = self.make_smart_server('stacked')
+ stacked = branch.Branch.open(trans.base)
+ stacked.lock_write()
+ self.addCleanup(stacked.unlock)
+ stacked.pull(source_b, stop_revision='B-id')
+
+ def test_fetch_to_stacked_from_smart_with_ghost(self):
+ (source_b, base,
+ stacked) = self.make_source_with_ghost_and_stacked_target()
+ # Now, create a smart server on 'source' and re-open to force the
+ # target to be a smart target
+ trans = self.make_smart_server('source')
+ source_b = branch.Branch.open(trans.base)
+ source_b.lock_read()
+ self.addCleanup(source_b.unlock)
+ stacked.pull(source_b, stop_revision='B-id')
+
+
+class TestFetchFromRepoWithUnconfiguredFallbacks(TestFetchBase):
+
+ def make_stacked_source_repo(self):
+ _, source_b = self.make_source_branch()
+ # Use 'make_branch' which gives us a bzr:// branch when appropriate,
+ # rather than creating a branch-on-disk
+ stack_b = self.make_branch('stack-on')
+ stack_b.pull(source_b, stop_revision='B-id')
+ stacked_b = self.make_branch('stacked')
+ stacked_b.set_stacked_on_url('../stack-on')
+ stacked_b.pull(source_b, stop_revision='C-id')
+ return stacked_b.repository
+
+ def test_fetch_everything_includes_parent_invs(self):
+ stacked = self.make_stacked_source_repo()
+ repo_missing_fallbacks = stacked.bzrdir.open_repository()
+ self.addCleanup(repo_missing_fallbacks.lock_read().unlock)
+ target = self.make_repository('target')
+ self.addCleanup(target.lock_write().unlock)
+ target.fetch(
+ repo_missing_fallbacks,
+ fetch_spec=vf_search.EverythingResult(repo_missing_fallbacks))
+ self.assertEqual(repo_missing_fallbacks.revisions.keys(),
+ target.revisions.keys())
+ self.assertEqual(repo_missing_fallbacks.inventories.keys(),
+ target.inventories.keys())
+ self.assertEqual(['C-id'],
+ sorted(k[-1] for k in target.revisions.keys()))
+ self.assertEqual(['B-id', 'C-id'],
+ sorted(k[-1] for k in target.inventories.keys()))
+
+
+
diff --git a/bzrlib/tests/per_repository_reference/test_get_record_stream.py b/bzrlib/tests/per_repository_reference/test_get_record_stream.py
new file mode 100644
index 0000000..2d0369c
--- /dev/null
+++ b/bzrlib/tests/per_repository_reference/test_get_record_stream.py
@@ -0,0 +1,201 @@
+# Copyright (C) 2008, 2009 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""Tests that get_record_stream() behaves itself properly when stacked."""
+
+from bzrlib import (
+ errors,
+ knit,
+ )
+from bzrlib.tests.per_repository_reference import (
+ TestCaseWithExternalReferenceRepository,
+ )
+
+
+class TestGetRecordStream(TestCaseWithExternalReferenceRepository):
+
+ def setUp(self):
+ super(TestGetRecordStream, self).setUp()
+ builder = self.make_branch_builder('all')
+ builder.start_series()
+ # Graph of revisions:
+ #
+ # A
+ # |\
+ # B C
+ # |/|
+ # D E
+ # |\|
+ # F G
+ # These can be split up among the different repos as desired
+ #
+
+ builder.build_snapshot('A', None, [
+ ('add', ('', 'root-id', 'directory', None)),
+ ('add', ('file', 'f-id', 'file', 'initial content\n')),
+ ])
+ builder.build_snapshot('B', ['A'], [
+ ('modify', ('f-id', 'initial content\n'
+ 'and B content\n')),
+ ])
+ builder.build_snapshot('C', ['A'], [
+ ('modify', ('f-id', 'initial content\n'
+ 'and C content\n')),
+ ])
+ builder.build_snapshot('D', ['B', 'C'], [
+ ('modify', ('f-id', 'initial content\n'
+ 'and B content\n'
+ 'and C content\n')),
+ ])
+ builder.build_snapshot('E', ['C'], [
+ ('modify', ('f-id', 'initial content\n'
+ 'and C content\n'
+ 'and E content\n')),
+ ])
+ builder.build_snapshot('F', ['D'], [
+ ('modify', ('f-id', 'initial content\n'
+ 'and B content\n'
+ 'and C content\n'
+ 'and F content\n')),
+ ])
+ builder.build_snapshot('G', ['E', 'D'], [
+ ('modify', ('f-id', 'initial content\n'
+ 'and B content\n'
+ 'and C content\n'
+ 'and E content\n')),
+ ])
+ builder.finish_series()
+ self.all_repo = builder.get_branch().repository
+ self.all_repo.lock_read()
+ self.addCleanup(self.all_repo.unlock)
+ self.base_repo = self.make_repository('base')
+ self.stacked_repo = self.make_referring('referring', self.base_repo)
+
+ def make_simple_split(self):
+ """Set up the repositories so that everything is in base except F"""
+ self.base_repo.fetch(self.all_repo, revision_id='G')
+ self.stacked_repo.fetch(self.all_repo, revision_id='F')
+
+ def make_complex_split(self):
+ """intermix the revisions so that base holds left stacked holds right.
+
+ base will hold
+ A B D F (and C because it is a parent of D)
+ referring will hold
+ C E G (only)
+ """
+ self.base_repo.fetch(self.all_repo, revision_id='B')
+ self.stacked_repo.fetch(self.all_repo, revision_id='C')
+ self.base_repo.fetch(self.all_repo, revision_id='F')
+ self.stacked_repo.fetch(self.all_repo, revision_id='G')
+
+ def test_unordered_fetch_simple_split(self):
+ self.make_simple_split()
+ keys = [('f-id', r) for r in 'ABCDF']
+ self.stacked_repo.lock_read()
+ self.addCleanup(self.stacked_repo.unlock)
+ stream = self.stacked_repo.texts.get_record_stream(
+ keys, 'unordered', False)
+ record_keys = set()
+ for record in stream:
+ if record.storage_kind == 'absent':
+ raise ValueError('absent record: %s' % (record.key,))
+ record_keys.add(record.key)
+ # everything should be present, we don't care about the order
+ self.assertEqual(keys, sorted(record_keys))
+
+ def test_unordered_fetch_complex_split(self):
+ self.make_complex_split()
+ keys = [('f-id', r) for r in 'ABCDEG']
+ self.stacked_repo.lock_read()
+ self.addCleanup(self.stacked_repo.unlock)
+ stream = self.stacked_repo.texts.get_record_stream(
+ keys, 'unordered', False)
+ record_keys = set()
+ for record in stream:
+ if record.storage_kind == 'absent':
+ raise ValueError('absent record: %s' % (record.key,))
+ record_keys.add(record.key)
+ # everything should be present, we don't care about the order
+ self.assertEqual(keys, sorted(record_keys))
+
+ def test_ordered_no_closure(self):
+ self.make_complex_split()
+ # Topological ordering allows B & C and D & E to be returned with
+ # either one first, so the required ordering is:
+ # [A (B C) (D E) G]
+ keys = [('f-id', r) for r in 'ABCDEG']
+ alt_1 = [('f-id', r) for r in 'ACBDEG']
+ alt_2 = [('f-id', r) for r in 'ABCEDG']
+ alt_3 = [('f-id', r) for r in 'ACBEDG']
+ self.stacked_repo.lock_read()
+ self.addCleanup(self.stacked_repo.unlock)
+ stream = self.stacked_repo.texts.get_record_stream(
+ keys, 'topological', False)
+ record_keys = []
+ for record in stream:
+ if record.storage_kind == 'absent':
+ raise ValueError('absent record: %s' % (record.key,))
+ record_keys.append(record.key)
+ self.assertTrue(record_keys in (keys, alt_1, alt_2, alt_3))
+
+ def test_ordered_fulltext_simple(self):
+ self.make_simple_split()
+ # This is a common case in asking to annotate a file that exists on a
+ # stacked branch.
+ # See https://bugs.launchpad.net/bzr/+bug/393366
+ # Topological ordering allows B & C and D & E to be returned with
+ # either one first, so the required ordering is:
+ # [A (B C) D F]
+ keys = [('f-id', r) for r in 'ABCDF']
+ alt_1 = [('f-id', r) for r in 'ACBDF']
+ self.stacked_repo.lock_read()
+ self.addCleanup(self.stacked_repo.unlock)
+ stream = self.stacked_repo.texts.get_record_stream(
+ keys, 'topological', True)
+ record_keys = []
+ for record in stream:
+ if record.storage_kind == 'absent':
+ raise ValueError('absent record: %s' % (record.key,))
+ record_keys.append(record.key)
+ self.assertTrue(record_keys in (keys, alt_1))
+
+ def test_ordered_fulltext_complex(self):
+ self.make_complex_split()
+ # Topological ordering allows B & C and D & E to be returned with
+ # either one first, so the required ordering is:
+ # [A (B C) (D E) G]
+ keys = [('f-id', r) for r in 'ABCDEG']
+ alt_1 = [('f-id', r) for r in 'ACBDEG']
+ alt_2 = [('f-id', r) for r in 'ABCEDG']
+ alt_3 = [('f-id', r) for r in 'ACBEDG']
+ self.stacked_repo.lock_read()
+ self.addCleanup(self.stacked_repo.unlock)
+ stream = self.stacked_repo.texts.get_record_stream(
+ keys, 'topological', True)
+ record_keys = []
+ for record in stream:
+ if record.storage_kind == 'absent':
+ raise ValueError('absent record: %s' % (record.key,))
+ record_keys.append(record.key)
+ # Note that currently --2a format repositories do this correctly, but
+ # KnitPack format repositories do not.
+ if isinstance(self.stacked_repo.texts, knit.KnitVersionedFiles):
+ # See https://bugs.launchpad.net/bzr/+bug/399884
+ self.expectFailure('KVF does not weave fulltexts from fallback'
+ ' repositories to preserve perfect order',
+ self.assertTrue, record_keys in (keys, alt_1, alt_2, alt_3))
+ self.assertTrue(record_keys in (keys, alt_1, alt_2, alt_3))
diff --git a/bzrlib/tests/per_repository_reference/test_get_rev_id_for_revno.py b/bzrlib/tests/per_repository_reference/test_get_rev_id_for_revno.py
new file mode 100644
index 0000000..f160bf6
--- /dev/null
+++ b/bzrlib/tests/per_repository_reference/test_get_rev_id_for_revno.py
@@ -0,0 +1,47 @@
+# Copyright (C) 2009 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""Tests for get_rev_id_for_revno on a repository with external references."""
+
+from bzrlib import errors
+from bzrlib.tests.per_repository_reference import (
+ TestCaseWithExternalReferenceRepository,
+ )
+
+class TestGetRevIdForRevno(TestCaseWithExternalReferenceRepository):
+
+ def test_uses_fallback(self):
+ tree = self.make_branch_and_tree('base')
+ base = tree.branch.repository
+ revid = tree.commit('one')
+ revid2 = tree.commit('two')
+ spare_tree = tree.bzrdir.sprout('spare').open_workingtree()
+ revid3 = spare_tree.commit('three')
+ branch = spare_tree.branch.create_clone_on_transport(
+ self.get_transport('referring'),
+ stacked_on=tree.branch.base)
+ repo = branch.repository
+ # Sanity check: now repo has 'revid3', and base has 'revid' + 'revid2'
+ self.assertEqual(set([revid3]),
+ set(repo.bzrdir.open_repository().all_revision_ids()))
+ self.assertEqual(set([revid2, revid]),
+ set(base.bzrdir.open_repository().all_revision_ids()))
+ # get_rev_id_for_revno will find revno 1 == 'revid', even though
+ # that revision can only be found in the fallback.
+ repo.lock_read()
+ self.addCleanup(repo.unlock)
+ self.assertEqual(
+ (True, revid), repo.get_rev_id_for_revno(1, (3, revid3)))
diff --git a/bzrlib/tests/per_repository_reference/test_graph.py b/bzrlib/tests/per_repository_reference/test_graph.py
new file mode 100644
index 0000000..e60dab9
--- /dev/null
+++ b/bzrlib/tests/per_repository_reference/test_graph.py
@@ -0,0 +1,116 @@
+# Copyright (C) 2011 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+
+"""Tests for graph operations on stacked repositories."""
+
+
+from bzrlib import (
+ remote,
+ tests,
+ urlutils,
+ )
+from bzrlib.tests.per_repository import TestCaseWithRepository
+
+
+class TestGraph(TestCaseWithRepository):
+
+ def test_get_known_graph_ancestry_stacked(self):
+ """get_known_graph_ancestry works correctly on stacking.
+
+ See <https://bugs.launchpad.net/bugs/715000>.
+ """
+ branch_a, branch_b, branch_c, revid_1 = self.make_double_stacked_branches()
+ for br in [branch_a, branch_b, branch_c]:
+ self.assertEquals(
+ [revid_1],
+ br.repository.get_known_graph_ancestry([revid_1]).topo_sort())
+
+ def make_double_stacked_branches(self):
+ wt_a = self.make_branch_and_tree('a')
+ branch_a = wt_a.branch
+ branch_b = self.make_branch('b')
+ branch_b.set_stacked_on_url(
+ urlutils.relative_url(branch_b.base, branch_a.base))
+ branch_c = self.make_branch('c')
+ branch_c.set_stacked_on_url(
+ urlutils.relative_url(branch_c.base, branch_b.base))
+ revid_1 = wt_a.commit('first commit')
+ return branch_a, branch_b, branch_c, revid_1
+
+ def make_stacked_branch_with_long_history(self):
+ builder = self.make_branch_builder('source')
+ builder.start_series()
+ builder.build_snapshot('A', None, [
+ ('add', ('', 'directory', 'root-id', None))])
+ builder.build_snapshot('B', ['A'], [])
+ builder.build_snapshot('C', ['B'], [])
+ builder.build_snapshot('D', ['C'], [])
+ builder.build_snapshot('E', ['D'], [])
+ builder.build_snapshot('F', ['E'], [])
+ source_b = builder.get_branch()
+ master_b = self.make_branch('master')
+ master_b.pull(source_b, stop_revision='E')
+ stacked_b = self.make_branch('stacked')
+ stacked_b.set_stacked_on_url('../master')
+ stacked_b.pull(source_b, stop_revision='F')
+ builder.finish_series()
+ return master_b, stacked_b
+
+ def assertParentMapCalls(self, expected):
+ """Check that self.hpss_calls has the expected get_parent_map calls."""
+ get_parent_map_calls = []
+ for c in self.hpss_calls:
+ # Right now, the only RPCs that get called are get_parent_map. If
+ # this changes in the future, we can change this to:
+ # if c.call.method != 'Repository.get_parent_map':
+ # continue
+ self.assertEqual('Repository.get_parent_map', c.call.method)
+ args = c.call.args
+ location = args[0]
+ self.assertEqual('include-missing:', args[1])
+ revisions = sorted(args[2:])
+ get_parent_map_calls.append((location, revisions))
+ self.assertEqual(expected, get_parent_map_calls)
+
+ def test_doesnt_call_get_parent_map_on_all_fallback_revs(self):
+ if not isinstance(self.repository_format,
+ remote.RemoteRepositoryFormat):
+ raise tests.TestNotApplicable('only for RemoteRepository')
+ # bug #388269
+ master_b, stacked_b = self.make_stacked_branch_with_long_history()
+ self.addCleanup(stacked_b.lock_read().unlock)
+ self.make_repository('target_repo', shared=True)
+ target_b = self.make_branch('target_repo/branch')
+ self.addCleanup(target_b.lock_write().unlock)
+ self.setup_smart_server_with_call_log()
+ res = target_b.repository.search_missing_revision_ids(
+ stacked_b.repository, revision_ids=['F'],
+ find_ghosts=False)
+ self.assertParentMapCalls([
+ # One call to stacked to start, which returns F=>E, and that E
+ # itself is missing, so when we step, we won't look for it.
+ ('extra/stacked/', ['F']),
+ # One fallback call to extra/master, which will return the rest of
+ # the history.
+ ('extra/master/', ['E']),
+ # And then one get_parent_map call to the target, to see if it
+ # already has any of these revisions.
+ ('extra/target_repo/branch/', ['A', 'B', 'C', 'D', 'E', 'F']),
+ ])
+ # Before bug #388269 was fixed, there would be a bunch of extra calls
+ # to 'extra/stacked', ['D'] then ['C'], then ['B'], then ['A'].
+ # One-at-a-time for the rest of the ancestry.
diff --git a/bzrlib/tests/per_repository_reference/test_initialize.py b/bzrlib/tests/per_repository_reference/test_initialize.py
new file mode 100644
index 0000000..d8260f1
--- /dev/null
+++ b/bzrlib/tests/per_repository_reference/test_initialize.py
@@ -0,0 +1,55 @@
+# Copyright (C) 2009 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""Tests for initializing a repository with external references."""
+
+
+from bzrlib import (
+ errors,
+ tests,
+ )
+from bzrlib.tests.per_repository_reference import (
+ TestCaseWithExternalReferenceRepository,
+ )
+
+
+class TestInitialize(TestCaseWithExternalReferenceRepository):
+
+ def initialize_and_check_on_transport(self, base, trans):
+ network_name = base.repository._format.network_name()
+ result = self.bzrdir_format.initialize_on_transport_ex(
+ trans, use_existing_dir=False, create_prefix=False,
+ stacked_on='../base', stack_on_pwd=base.base,
+ repo_format_name=network_name)
+ result_repo, a_bzrdir, require_stacking, repo_policy = result
+ self.addCleanup(result_repo.unlock)
+ self.assertEqual(1, len(result_repo._fallback_repositories))
+ return result_repo
+
+ def test_initialize_on_transport_ex(self):
+ base = self.make_branch('base')
+ trans = self.get_transport('stacked')
+ repo = self.initialize_and_check_on_transport(base, trans)
+ self.assertEqual(base.repository._format.network_name(),
+ repo._format.network_name())
+
+ def test_remote_initialize_on_transport_ex(self):
+ # All formats can be initialized appropriately over bzr://
+ base = self.make_branch('base')
+ trans = self.make_smart_server('stacked')
+ repo = self.initialize_and_check_on_transport(base, trans)
+ network_name = base.repository._format.network_name()
+ self.assertEqual(network_name, repo._format.network_name())
diff --git a/bzrlib/tests/per_repository_reference/test_unlock.py b/bzrlib/tests/per_repository_reference/test_unlock.py
new file mode 100644
index 0000000..2736c4d
--- /dev/null
+++ b/bzrlib/tests/per_repository_reference/test_unlock.py
@@ -0,0 +1,76 @@
+# Copyright (C) 2009 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""Tests for locking/unlocking a repository with external references."""
+
+from bzrlib import (
+ branch,
+ errors,
+ )
+from bzrlib.tests.per_repository_reference import (
+ TestCaseWithExternalReferenceRepository,
+ )
+
+
+class TestUnlock(TestCaseWithExternalReferenceRepository):
+
+ def create_stacked_branch(self):
+ builder = self.make_branch_builder('source',
+ format=self.bzrdir_format)
+ builder.start_series()
+ repo = builder.get_branch().repository
+ if not repo._format.supports_external_lookups:
+ raise tests.TestNotApplicable('format does not support stacking')
+ builder.build_snapshot('A-id', None, [
+ ('add', ('', 'root-id', 'directory', None)),
+ ('add', ('file', 'file-id', 'file', 'contents\n'))])
+ builder.build_snapshot('B-id', ['A-id'], [
+ ('modify', ('file-id', 'new-content\n'))])
+ builder.build_snapshot('C-id', ['B-id'], [
+ ('modify', ('file-id', 'yet more content\n'))])
+ builder.finish_series()
+ source_b = builder.get_branch()
+ source_b.lock_read()
+ self.addCleanup(source_b.unlock)
+ base = self.make_branch('base')
+ base.pull(source_b, stop_revision='B-id')
+ stacked = self.make_branch('stacked')
+ stacked.set_stacked_on_url('../base')
+ stacked.pull(source_b, stop_revision='C-id')
+
+ return base, stacked
+
+ def test_unlock_unlocks_fallback(self):
+ base = self.make_branch('base')
+ stacked = self.make_branch('stacked')
+ repo = stacked.repository
+ stacked.set_stacked_on_url('../base')
+ self.assertEqual(1, len(repo._fallback_repositories))
+ fallback_repo = repo._fallback_repositories[0]
+ self.assertFalse(repo.is_locked())
+ self.assertFalse(fallback_repo.is_locked())
+ repo.lock_read()
+ self.assertTrue(repo.is_locked())
+ self.assertTrue(fallback_repo.is_locked())
+ repo.unlock()
+ self.assertFalse(repo.is_locked())
+ self.assertFalse(fallback_repo.is_locked())
+ repo.lock_write()
+ self.assertTrue(repo.is_locked())
+ self.assertTrue(fallback_repo.is_locked())
+ repo.unlock()
+ self.assertFalse(repo.is_locked())
+ self.assertFalse(fallback_repo.is_locked())
diff --git a/bzrlib/tests/per_repository_vf/__init__.py b/bzrlib/tests/per_repository_vf/__init__.py
new file mode 100644
index 0000000..7b1461a
--- /dev/null
+++ b/bzrlib/tests/per_repository_vf/__init__.py
@@ -0,0 +1,55 @@
+# Copyright (C) 2011 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+
+"""Implementation tests for VersionedFile-based repositories.
+
+For more generic per-repository tests, see bzrlib.tests.per_repository.
+"""
+
+from bzrlib.tests.per_repository import (
+ all_repository_format_scenarios,
+ TestCaseWithRepository,
+ )
+
+
+def all_repository_vf_format_scenarios():
+ scenarios = []
+ for test_name, scenario_info in all_repository_format_scenarios():
+ format = scenario_info['repository_format']
+ if format.supports_full_versioned_files:
+ scenarios.append((test_name, scenario_info))
+ return scenarios
+
+
+def load_tests(basic_tests, module, loader):
+ testmod_names = [
+ 'test_add_inventory_by_delta',
+ 'test_check',
+ 'test_check_reconcile',
+ 'test_find_text_key_references',
+ 'test__generate_text_key_index',
+ 'test_fetch',
+ 'test_fileid_involved',
+ 'test_merge_directive',
+ 'test_reconcile',
+ 'test_refresh_data',
+ 'test_repository',
+ 'test_write_group',
+ ]
+ basic_tests.addTest(loader.loadTestsFromModuleNames(
+ ["%s.%s" % (__name__, tmn) for tmn in testmod_names]))
+ return basic_tests
diff --git a/bzrlib/tests/per_repository_vf/helpers.py b/bzrlib/tests/per_repository_vf/helpers.py
new file mode 100644
index 0000000..02496a4
--- /dev/null
+++ b/bzrlib/tests/per_repository_vf/helpers.py
@@ -0,0 +1,83 @@
+# Copyright (C) 2007 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""Helper classes for repository implementation tests."""
+
+from cStringIO import StringIO
+
+from bzrlib import (
+ inventory,
+ osutils,
+ revision as _mod_revision,
+ )
+from bzrlib.repofmt.knitrepo import RepositoryFormatKnit
+from bzrlib.tests.per_repository import TestCaseWithRepository
+from bzrlib.tests import TestNotApplicable
+
+
+class TestCaseWithBrokenRevisionIndex(TestCaseWithRepository):
+
+ def make_repo_with_extra_ghost_index(self):
+ """Make a corrupt repository.
+
+ It will contain one revision, 'revision-id'. The knit index will claim
+ that it has one parent, 'incorrect-parent', but the revision text will
+ claim it has no parents.
+
+ Note: only the *cache* of the knit index is corrupted. Thus the
+ corruption will only last while the repository is locked. For this
+ reason, the returned repo is locked.
+ """
+ if not isinstance(self.repository_format, RepositoryFormatKnit):
+ # XXX: Broken revision graphs can happen to weaves too, but they're
+ # pretty deprecated. Ideally these tests should apply to any repo
+ # where repo.revision_graph_can_have_wrong_parents() is True, but
+ # at the moment we only know how to corrupt knit repos.
+ raise TestNotApplicable(
+ "%s isn't a knit format" % self.repository_format)
+
+ repo = self.make_repository('broken')
+ repo.lock_write()
+ repo.start_write_group()
+ try:
+ inv = inventory.Inventory(revision_id='revision-id')
+ inv.root.revision = 'revision-id'
+ inv_sha1 = repo.add_inventory('revision-id', inv, [])
+ if repo.supports_rich_root():
+ root_id = inv.root.file_id
+ repo.texts.add_lines((root_id, 'revision-id'), [], [])
+ revision = _mod_revision.Revision('revision-id',
+ committer='jrandom@example.com', timestamp=0,
+ inventory_sha1=inv_sha1, timezone=0, message='message',
+ parent_ids=[])
+ # Manually add the revision text using the RevisionStore API, with
+ # bad parents.
+ rev_text = repo._serializer.write_revision_to_string(revision)
+ repo.revisions.add_lines((revision.revision_id,),
+ [('incorrect-parent',)],
+ osutils.split_lines(rev_text))
+ except:
+ repo.abort_write_group()
+ repo.unlock()
+ raise
+ else:
+ repo.commit_write_group()
+ repo.unlock()
+
+ repo.lock_write()
+ self.addCleanup(repo.unlock)
+ return repo
+
diff --git a/bzrlib/tests/per_repository_vf/test__generate_text_key_index.py b/bzrlib/tests/per_repository_vf/test__generate_text_key_index.py
new file mode 100644
index 0000000..23a34b6
--- /dev/null
+++ b/bzrlib/tests/per_repository_vf/test__generate_text_key_index.py
@@ -0,0 +1,39 @@
+# Copyright (C) 2007 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+
+"""Tests for the _generate_text_key_index API."""
+
+
+from bzrlib.tests.scenarios import load_tests_apply_scenarios
+from bzrlib.tests.per_repository_vf import (
+ TestCaseWithRepository,
+ all_repository_vf_format_scenarios,
+ )
+
+
+load_tests = load_tests_apply_scenarios
+
+
+class TestGenerateTextKeyIndex(TestCaseWithRepository):
+
+ scenarios = all_repository_vf_format_scenarios()
+
+ def test_empty(self):
+ repo = self.make_repository('.')
+ repo.lock_read()
+ self.addCleanup(repo.unlock)
+ self.assertEqual({}, repo._generate_text_key_index())
diff --git a/bzrlib/tests/per_repository_vf/test_add_inventory_by_delta.py b/bzrlib/tests/per_repository_vf/test_add_inventory_by_delta.py
new file mode 100644
index 0000000..a950eb6
--- /dev/null
+++ b/bzrlib/tests/per_repository_vf/test_add_inventory_by_delta.py
@@ -0,0 +1,111 @@
+# Copyright (C) 2008 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""Tests for Repository.add_inventory_by_delta."""
+
+from bzrlib import errors, revision
+from bzrlib.tests.per_repository_vf import (
+ all_repository_vf_format_scenarios,
+ TestCaseWithRepository
+ )
+from bzrlib.tests.scenarios import load_tests_apply_scenarios
+
+load_tests = load_tests_apply_scenarios
+
+
+class TestAddInventoryByDelta(TestCaseWithRepository):
+
+ scenarios = all_repository_vf_format_scenarios()
+
+ def _get_repo_in_write_group(self, path='repository'):
+ repo = self.make_repository(path)
+ repo.lock_write()
+ self.addCleanup(repo.unlock)
+ repo.start_write_group()
+ return repo
+
+ def test_basis_missing_errors(self):
+ repo = self._get_repo_in_write_group()
+ try:
+ self.assertRaises(errors.NoSuchRevision,
+ repo.add_inventory_by_delta, "missing-revision", [],
+ "new-revision", ["missing-revision"])
+ finally:
+ repo.abort_write_group()
+
+ def test_not_in_write_group_errors(self):
+ repo = self.make_repository('repository')
+ repo.lock_write()
+ self.addCleanup(repo.unlock)
+ self.assertRaises(AssertionError, repo.add_inventory_by_delta,
+ "missing-revision", [], "new-revision", ["missing-revision"])
+
+ def make_inv_delta(self, old, new):
+ """Make an inventory delta from two inventories."""
+ by_id = getattr(old, '_byid', None)
+ if by_id is None:
+ old_ids = set(entry.file_id for entry in old.iter_just_entries())
+ else:
+ old_ids = set(by_id)
+ by_id = getattr(new, '_byid', None)
+ if by_id is None:
+ new_ids = set(entry.file_id for entry in new.iter_just_entries())
+ else:
+ new_ids = set(by_id)
+
+ adds = new_ids - old_ids
+ deletes = old_ids - new_ids
+ common = old_ids.intersection(new_ids)
+ delta = []
+ for file_id in deletes:
+ delta.append((old.id2path(file_id), None, file_id, None))
+ for file_id in adds:
+ delta.append((None, new.id2path(file_id), file_id, new[file_id]))
+ for file_id in common:
+ if old[file_id] != new[file_id]:
+ delta.append((old.id2path(file_id), new.id2path(file_id),
+ file_id, new[file_id]))
+ return delta
+
+ def test_same_validator(self):
+ # Adding an inventory via delta or direct results in the same
+ # validator.
+ tree = self.make_branch_and_tree('tree')
+ revid = tree.commit("empty post")
+ # tree.basis_tree() always uses a plain Inventory from the dirstate, we
+ # want the same format inventory as we have in the repository
+ revtree = tree.branch.repository.revision_tree(
+ tree.branch.last_revision())
+ tree.basis_tree()
+ revtree.lock_read()
+ self.addCleanup(revtree.unlock)
+ old_inv = tree.branch.repository.revision_tree(revision.NULL_REVISION).root_inventory
+ new_inv = revtree.root_inventory
+ delta = self.make_inv_delta(old_inv, new_inv)
+ repo_direct = self._get_repo_in_write_group('direct')
+ add_validator = repo_direct.add_inventory(revid, new_inv, [])
+ repo_direct.commit_write_group()
+ repo_delta = self._get_repo_in_write_group('delta')
+ try:
+ delta_validator, inv = repo_delta.add_inventory_by_delta(
+ revision.NULL_REVISION, delta, revid, [])
+ except:
+ repo_delta.abort_write_group()
+ raise
+ else:
+ repo_delta.commit_write_group()
+ self.assertEqual(add_validator, delta_validator)
+ self.assertEqual(list(new_inv.iter_entries()), list(inv.iter_entries()))
diff --git a/bzrlib/tests/per_repository_vf/test_check.py b/bzrlib/tests/per_repository_vf/test_check.py
new file mode 100644
index 0000000..57d7cec
--- /dev/null
+++ b/bzrlib/tests/per_repository_vf/test_check.py
@@ -0,0 +1,140 @@
+# Copyright (C) 2007-2011 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""Test operations that check the repository for corruption"""
+
+
+from bzrlib import (
+ errors,
+ )
+from bzrlib.tests import (
+ TestNotApplicable,
+ )
+from bzrlib.tests.scenarios import load_tests_apply_scenarios
+from bzrlib.tests.per_repository_vf import (
+ TestCaseWithRepository,
+ all_repository_vf_format_scenarios,
+ )
+from bzrlib.tests.per_repository_vf.helpers import (
+ TestCaseWithBrokenRevisionIndex,
+ )
+
+
+load_tests = load_tests_apply_scenarios
+
+
+class TestFindInconsistentRevisionParents(TestCaseWithBrokenRevisionIndex):
+
+ scenarios = all_repository_vf_format_scenarios()
+
+ def test__find_inconsistent_revision_parents(self):
+ """_find_inconsistent_revision_parents finds revisions with broken
+ parents.
+ """
+ repo = self.make_repo_with_extra_ghost_index()
+ self.assertEqual(
+ [('revision-id', ('incorrect-parent',), ())],
+ list(repo._find_inconsistent_revision_parents()))
+
+ def test__check_for_inconsistent_revision_parents(self):
+ """_check_for_inconsistent_revision_parents raises BzrCheckError if
+ there are any revisions with inconsistent parents.
+ """
+ repo = self.make_repo_with_extra_ghost_index()
+ self.assertRaises(
+ errors.BzrCheckError,
+ repo._check_for_inconsistent_revision_parents)
+
+ def test__check_for_inconsistent_revision_parents_on_clean_repo(self):
+ """_check_for_inconsistent_revision_parents does nothing if there are
+ no broken revisions.
+ """
+ repo = self.make_repository('empty-repo')
+ if not repo._format.revision_graph_can_have_wrong_parents:
+ raise TestNotApplicable(
+ '%r cannot have corrupt revision index.' % repo)
+ repo.lock_read()
+ try:
+ repo._check_for_inconsistent_revision_parents() # nothing happens
+ finally:
+ repo.unlock()
+
+ def test_check_reports_bad_ancestor(self):
+ repo = self.make_repo_with_extra_ghost_index()
+ # XXX: check requires a non-empty revision IDs list, but it ignores the
+ # contents of it!
+ check_object = repo.check(['ignored'])
+ check_object.report_results(verbose=False)
+ self.assertContainsRe(self.get_log(),
+ '1 revisions have incorrect parents in the revision index')
+ check_object.report_results(verbose=True)
+ self.assertContainsRe(
+ self.get_log(),
+ "revision-id has wrong parents in index: "
+ r"\('incorrect-parent',\) should be \(\)")
+
+
+class TestCallbacks(TestCaseWithRepository):
+
+ scenarios = all_repository_vf_format_scenarios()
+
+ def test_callback_tree_and_branch(self):
+ # use a real tree to get actual refs that will work
+ tree = self.make_branch_and_tree('foo')
+ revid = tree.commit('foo')
+ tree.lock_read()
+ self.addCleanup(tree.unlock)
+ needed_refs = {}
+ for ref in tree._get_check_refs():
+ needed_refs.setdefault(ref, []).append(tree)
+ for ref in tree.branch._get_check_refs():
+ needed_refs.setdefault(ref, []).append(tree.branch)
+ self.tree_check = tree._check
+ self.branch_check = tree.branch.check
+ self.overrideAttr(tree, "_check", self.tree_callback)
+ self.overrideAttr(tree.branch, "check", self.branch_callback)
+ self.callbacks = []
+ tree.branch.repository.check([revid], callback_refs=needed_refs)
+ self.assertNotEqual([], self.callbacks)
+
+ def tree_callback(self, refs):
+ self.callbacks.append(('tree', refs))
+ return self.tree_check(refs)
+
+ def branch_callback(self, refs):
+ self.callbacks.append(('branch', refs))
+ return self.branch_check(refs)
+
+
+class TestNoSpuriousInconsistentAncestors(TestCaseWithRepository):
+
+ scenarios = all_repository_vf_format_scenarios()
+
+ def test_two_files_different_versions_no_inconsistencies_bug_165071(self):
+ """Two files, with different versions can be clean."""
+ tree = self.make_branch_and_tree('.')
+ self.build_tree(['foo'])
+ tree.smart_add(['.'])
+ revid1 = tree.commit('1')
+ self.build_tree(['bar'])
+ tree.smart_add(['.'])
+ revid2 = tree.commit('2')
+ check_object = tree.branch.repository.check([revid1, revid2])
+ check_object.report_results(verbose=True)
+ self.assertContainsRe(self.get_log(), "0 unreferenced text versions")
+
+
+
diff --git a/bzrlib/tests/per_repository_vf/test_check_reconcile.py b/bzrlib/tests/per_repository_vf/test_check_reconcile.py
new file mode 100644
index 0000000..8afc8bf
--- /dev/null
+++ b/bzrlib/tests/per_repository_vf/test_check_reconcile.py
@@ -0,0 +1,978 @@
+# Copyright (C) 2007-2010 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""Tests that use BrokenRepoScenario objects.
+
+That is, tests for reconcile and check.
+"""
+
+from bzrlib import osutils
+
+from bzrlib.inventory import (
+ Inventory,
+ InventoryFile,
+ )
+from bzrlib.revision import (
+ NULL_REVISION,
+ Revision,
+ )
+from bzrlib.tests import (
+ TestNotApplicable,
+ multiply_scenarios,
+ )
+from bzrlib.tests.per_repository_vf import (
+ TestCaseWithRepository,
+ all_repository_vf_format_scenarios,
+ )
+from bzrlib.tests.scenarios import load_tests_apply_scenarios
+
+
+load_tests = load_tests_apply_scenarios
+
+
+class BrokenRepoScenario(object):
+ """Base class for defining scenarios for testing check and reconcile.
+
+ A subclass needs to define the following methods:
+ :populate_repository: a method to use to populate a repository with
+ sample revisions, inventories and file versions.
+ :all_versions_after_reconcile: all the versions in repository after
+ reconcile. run_test verifies that the text of each of these
+ versions of the file is unchanged by the reconcile.
+ :populated_parents: a list of (parents list, revision). Each version
+ of the file is verified to have the given parents before running
+ the reconcile. i.e. this is used to assert that the repo from the
+ factory is what we expect.
+ :corrected_parents: a list of (parents list, revision). Each version
+ of the file is verified to have the given parents after the
+ reconcile. i.e. this is used to assert that reconcile made the
+ changes we expect it to make.
+
+ A subclass may define the following optional method as well:
+ :corrected_fulltexts: a list of file versions that should be stored as
+ fulltexts (not deltas) after reconcile. run_test will verify that
+ this occurs.
+ """
+
+ def __init__(self, test_case):
+ self.test_case = test_case
+
+ def make_one_file_inventory(self, repo, revision, parents,
+ inv_revision=None, root_revision=None,
+ file_contents=None, make_file_version=True):
+ return self.test_case.make_one_file_inventory(
+ repo, revision, parents, inv_revision=inv_revision,
+ root_revision=root_revision, file_contents=file_contents,
+ make_file_version=make_file_version)
+
+ def add_revision(self, repo, revision_id, inv, parent_ids):
+ return self.test_case.add_revision(repo, revision_id, inv, parent_ids)
+
+ def corrected_fulltexts(self):
+ return []
+
+ def repository_text_key_index(self):
+ result = {}
+ if self.versioned_root:
+ result.update(self.versioned_repository_text_keys())
+ result.update(self.repository_text_keys())
+ return result
+
+
+class UndamagedRepositoryScenario(BrokenRepoScenario):
+ """A scenario where the repository has no damage.
+
+ It has a single revision, 'rev1a', with a single file.
+ """
+
+ def all_versions_after_reconcile(self):
+ return ('rev1a', )
+
+ def populated_parents(self):
+ return (((), 'rev1a'), )
+
+ def corrected_parents(self):
+ # Same as the populated parents, because there was nothing wrong.
+ return self.populated_parents()
+
+ def check_regexes(self, repo):
+ return ["0 unreferenced text versions"]
+
+ def populate_repository(self, repo):
+ # make rev1a: A well-formed revision, containing 'a-file'
+ inv = self.make_one_file_inventory(
+ repo, 'rev1a', [], root_revision='rev1a')
+ self.add_revision(repo, 'rev1a', inv, [])
+ self.versioned_root = repo.supports_rich_root()
+
+ def repository_text_key_references(self):
+ result = {}
+ if self.versioned_root:
+ result.update({('TREE_ROOT', 'rev1a'): True})
+ result.update({('a-file-id', 'rev1a'): True})
+ return result
+
+ def repository_text_keys(self):
+ return {('a-file-id', 'rev1a'):[NULL_REVISION]}
+
+ def versioned_repository_text_keys(self):
+ return {('TREE_ROOT', 'rev1a'):[NULL_REVISION]}
+
+
+class FileParentIsNotInRevisionAncestryScenario(BrokenRepoScenario):
+ """A scenario where a revision 'rev2' has 'a-file' with a
+ parent 'rev1b' that is not in the revision ancestry.
+
+ Reconcile should remove 'rev1b' from the parents list of 'a-file' in
+ 'rev2', preserving 'rev1a' as a parent.
+ """
+
+ def all_versions_after_reconcile(self):
+ return ('rev1a', 'rev2')
+
+ def populated_parents(self):
+ return (
+ ((), 'rev1a'),
+ ((), 'rev1b'), # Will be gc'd
+ (('rev1a', 'rev1b'), 'rev2')) # Will have parents trimmed
+
+ def corrected_parents(self):
+ return (
+ ((), 'rev1a'),
+ (None, 'rev1b'),
+ (('rev1a',), 'rev2'))
+
+ def check_regexes(self, repo):
+ return [r"\* a-file-id version rev2 has parents \('rev1a', 'rev1b'\) "
+ r"but should have \('rev1a',\)",
+ "1 unreferenced text versions",
+ ]
+
+ def populate_repository(self, repo):
+ # make rev1a: A well-formed revision, containing 'a-file'
+ inv = self.make_one_file_inventory(
+ repo, 'rev1a', [], root_revision='rev1a')
+ self.add_revision(repo, 'rev1a', inv, [])
+
+ # make rev1b, which has no Revision, but has an Inventory, and
+ # a-file
+ inv = self.make_one_file_inventory(
+ repo, 'rev1b', [], root_revision='rev1b')
+ repo.add_inventory('rev1b', inv, [])
+
+ # make rev2, with a-file.
+ # a-file has 'rev1b' as an ancestor, even though this is not
+ # mentioned by 'rev1a', making it an unreferenced ancestor
+ inv = self.make_one_file_inventory(
+ repo, 'rev2', ['rev1a', 'rev1b'])
+ self.add_revision(repo, 'rev2', inv, ['rev1a'])
+ self.versioned_root = repo.supports_rich_root()
+
+ def repository_text_key_references(self):
+ result = {}
+ if self.versioned_root:
+ result.update({('TREE_ROOT', 'rev1a'): True,
+ ('TREE_ROOT', 'rev2'): True})
+ result.update({('a-file-id', 'rev1a'): True,
+ ('a-file-id', 'rev2'): True})
+ return result
+
+ def repository_text_keys(self):
+ return {('a-file-id', 'rev1a'):[NULL_REVISION],
+ ('a-file-id', 'rev2'):[('a-file-id', 'rev1a')]}
+
+ def versioned_repository_text_keys(self):
+ return {('TREE_ROOT', 'rev1a'):[NULL_REVISION],
+ ('TREE_ROOT', 'rev2'):[('TREE_ROOT', 'rev1a')]}
+
+
+class FileParentHasInaccessibleInventoryScenario(BrokenRepoScenario):
+ """A scenario where a revision 'rev3' containing 'a-file' modified in
+ 'rev3', and with a parent which is in the revision ancestory, but whose
+ inventory cannot be accessed at all.
+
+ Reconcile should remove the file version parent whose inventory is
+ inaccessbile (i.e. remove 'rev1c' from the parents of a-file's rev3).
+ """
+
+ def all_versions_after_reconcile(self):
+ return ('rev2', 'rev3')
+
+ def populated_parents(self):
+ return (
+ ((), 'rev2'),
+ (('rev1c',), 'rev3'))
+
+ def corrected_parents(self):
+ return (
+ ((), 'rev2'),
+ ((), 'rev3'))
+
+ def check_regexes(self, repo):
+ return [r"\* a-file-id version rev3 has parents "
+ r"\('rev1c',\) but should have \(\)",
+ ]
+
+ def populate_repository(self, repo):
+ # make rev2, with a-file
+ # a-file is sane
+ inv = self.make_one_file_inventory(repo, 'rev2', [])
+ self.add_revision(repo, 'rev2', inv, [])
+
+ # make ghost revision rev1c, with a version of a-file present so
+ # that we generate a knit delta against this version. In real life
+ # the ghost might never have been present or rev3 might have been
+ # generated against a revision that was present at the time. So
+ # currently we have the full history of a-file present even though
+ # the inventory and revision objects are not.
+ self.make_one_file_inventory(repo, 'rev1c', [])
+
+ # make rev3 with a-file
+ # a-file refers to 'rev1c', which is a ghost in this repository, so
+ # a-file cannot have rev1c as its ancestor.
+ inv = self.make_one_file_inventory(repo, 'rev3', ['rev1c'])
+ self.add_revision(repo, 'rev3', inv, ['rev1c', 'rev1a'])
+ self.versioned_root = repo.supports_rich_root()
+
+ def repository_text_key_references(self):
+ result = {}
+ if self.versioned_root:
+ result.update({('TREE_ROOT', 'rev2'): True,
+ ('TREE_ROOT', 'rev3'): True})
+ result.update({('a-file-id', 'rev2'): True,
+ ('a-file-id', 'rev3'): True})
+ return result
+
+ def repository_text_keys(self):
+ return {('a-file-id', 'rev2'):[NULL_REVISION],
+ ('a-file-id', 'rev3'):[NULL_REVISION]}
+
+ def versioned_repository_text_keys(self):
+ return {('TREE_ROOT', 'rev2'):[NULL_REVISION],
+ ('TREE_ROOT', 'rev3'):[NULL_REVISION]}
+
+
+class FileParentsNotReferencedByAnyInventoryScenario(BrokenRepoScenario):
+ """A scenario where a repository with file 'a-file' which has extra
+ per-file versions that are not referenced by any inventory (even though
+ they have the same ID as actual revisions). The inventory of 'rev2'
+ references 'rev1a' of 'a-file', but there is a 'rev2' of 'some-file' stored
+ and erroneously referenced by later per-file versions (revisions 'rev4' and
+ 'rev5').
+
+ Reconcile should remove the file parents that are not referenced by any
+ inventory.
+ """
+
+ def all_versions_after_reconcile(self):
+ return ('rev1a', 'rev2c', 'rev4', 'rev5')
+
+ def populated_parents(self):
+ return [
+ (('rev1a',), 'rev2'),
+ (('rev1a',), 'rev2b'),
+ (('rev2',), 'rev3'),
+ (('rev2',), 'rev4'),
+ (('rev2', 'rev2c'), 'rev5')]
+
+ def corrected_parents(self):
+ return (
+ # rev2 and rev2b have been removed.
+ (None, 'rev2'),
+ (None, 'rev2b'),
+ # rev3's accessible parent inventories all have rev1a as the last
+ # modifier.
+ (('rev1a',), 'rev3'),
+ # rev1a features in both rev4's parents but should only appear once
+ # in the result
+ (('rev1a',), 'rev4'),
+ # rev2c is the head of rev1a and rev2c, the inventory provided
+ # per-file last-modified revisions.
+ (('rev2c',), 'rev5'))
+
+ def check_regexes(self, repo):
+ if repo.supports_rich_root():
+ # TREE_ROOT will be wrong; but we're not testing it. so just adjust
+ # the expected count of errors.
+ count = 9
+ else:
+ count = 3
+ return [
+ # will be gc'd
+ r"unreferenced version: {rev2} in a-file-id",
+ r"unreferenced version: {rev2b} in a-file-id",
+ # will be corrected
+ r"a-file-id version rev3 has parents \('rev2',\) "
+ r"but should have \('rev1a',\)",
+ r"a-file-id version rev5 has parents \('rev2', 'rev2c'\) "
+ r"but should have \('rev2c',\)",
+ r"a-file-id version rev4 has parents \('rev2',\) "
+ r"but should have \('rev1a',\)",
+ "%d inconsistent parents" % count,
+ ]
+
+ def populate_repository(self, repo):
+ # make rev1a: A well-formed revision, containing 'a-file'
+ inv = self.make_one_file_inventory(
+ repo, 'rev1a', [], root_revision='rev1a')
+ self.add_revision(repo, 'rev1a', inv, [])
+
+ # make rev2, with a-file.
+ # a-file is unmodified from rev1a, and an unreferenced rev2 file
+ # version is present in the repository.
+ self.make_one_file_inventory(
+ repo, 'rev2', ['rev1a'], inv_revision='rev1a')
+ self.add_revision(repo, 'rev2', inv, ['rev1a'])
+
+ # make rev3 with a-file
+ # a-file has 'rev2' as its ancestor, but the revision in 'rev2' was
+ # rev1a so this is inconsistent with rev2's inventory - it should
+ # be rev1a, and at the revision level 1c is not present - it is a
+ # ghost, so only the details from rev1a are available for
+ # determining whether a delta is acceptable, or a full is needed,
+ # and what the correct parents are.
+ inv = self.make_one_file_inventory(repo, 'rev3', ['rev2'])
+ self.add_revision(repo, 'rev3', inv, ['rev1c', 'rev1a'])
+
+ # In rev2b, the true last-modifying-revision of a-file is rev1a,
+ # inherited from rev2, but there is a version rev2b of the file, which
+ # reconcile could remove, leaving no rev2b. Most importantly,
+ # revisions descending from rev2b should not have per-file parents of
+ # a-file-rev2b.
+ # ??? This is to test deduplication in fixing rev4
+ inv = self.make_one_file_inventory(
+ repo, 'rev2b', ['rev1a'], inv_revision='rev1a')
+ self.add_revision(repo, 'rev2b', inv, ['rev1a'])
+
+ # rev4 is for testing that when the last modified of a file in
+ # multiple parent revisions is the same, that it only appears once
+ # in the generated per file parents list: rev2 and rev2b both
+ # descend from 1a and do not change the file a-file, so there should
+ # be no version of a-file 'rev2' or 'rev2b', but rev4 does change
+ # a-file, and is a merge of rev2 and rev2b, so it should end up with
+ # a parent of just rev1a - the starting file parents list is simply
+ # completely wrong.
+ inv = self.make_one_file_inventory(repo, 'rev4', ['rev2'])
+ self.add_revision(repo, 'rev4', inv, ['rev2', 'rev2b'])
+
+ # rev2c changes a-file from rev1a, so the version it of a-file it
+ # introduces is a head revision when rev5 is checked.
+ inv = self.make_one_file_inventory(repo, 'rev2c', ['rev1a'])
+ self.add_revision(repo, 'rev2c', inv, ['rev1a'])
+
+ # rev5 descends from rev2 and rev2c; as rev2 does not alter a-file,
+ # but rev2c does, this should use rev2c as the parent for the per
+ # file history, even though more than one per-file parent is
+ # available, because we use the heads of the revision parents for
+ # the inventory modification revisions of the file to determine the
+ # parents for the per file graph.
+ inv = self.make_one_file_inventory(repo, 'rev5', ['rev2', 'rev2c'])
+ self.add_revision(repo, 'rev5', inv, ['rev2', 'rev2c'])
+ self.versioned_root = repo.supports_rich_root()
+
+ def repository_text_key_references(self):
+ result = {}
+ if self.versioned_root:
+ result.update({('TREE_ROOT', 'rev1a'): True,
+ ('TREE_ROOT', 'rev2'): True,
+ ('TREE_ROOT', 'rev2b'): True,
+ ('TREE_ROOT', 'rev2c'): True,
+ ('TREE_ROOT', 'rev3'): True,
+ ('TREE_ROOT', 'rev4'): True,
+ ('TREE_ROOT', 'rev5'): True})
+ result.update({('a-file-id', 'rev1a'): True,
+ ('a-file-id', 'rev2c'): True,
+ ('a-file-id', 'rev3'): True,
+ ('a-file-id', 'rev4'): True,
+ ('a-file-id', 'rev5'): True})
+ return result
+
+ def repository_text_keys(self):
+ return {('a-file-id', 'rev1a'): [NULL_REVISION],
+ ('a-file-id', 'rev2c'): [('a-file-id', 'rev1a')],
+ ('a-file-id', 'rev3'): [('a-file-id', 'rev1a')],
+ ('a-file-id', 'rev4'): [('a-file-id', 'rev1a')],
+ ('a-file-id', 'rev5'): [('a-file-id', 'rev2c')]}
+
+ def versioned_repository_text_keys(self):
+ return {('TREE_ROOT', 'rev1a'): [NULL_REVISION],
+ ('TREE_ROOT', 'rev2'): [('TREE_ROOT', 'rev1a')],
+ ('TREE_ROOT', 'rev2b'): [('TREE_ROOT', 'rev1a')],
+ ('TREE_ROOT', 'rev2c'): [('TREE_ROOT', 'rev1a')],
+ ('TREE_ROOT', 'rev3'): [('TREE_ROOT', 'rev1a')],
+ ('TREE_ROOT', 'rev4'):
+ [('TREE_ROOT', 'rev2'), ('TREE_ROOT', 'rev2b')],
+ ('TREE_ROOT', 'rev5'):
+ [('TREE_ROOT', 'rev2'), ('TREE_ROOT', 'rev2c')]}
+
+
+class UnreferencedFileParentsFromNoOpMergeScenario(BrokenRepoScenario):
+ """
+ rev1a and rev1b with identical contents
+ rev2 revision has parents of [rev1a, rev1b]
+ There is a a-file:rev2 file version, not referenced by the inventory.
+ """
+
+ def all_versions_after_reconcile(self):
+ return ('rev1a', 'rev1b', 'rev2', 'rev4')
+
+ def populated_parents(self):
+ return (
+ ((), 'rev1a'),
+ ((), 'rev1b'),
+ (('rev1a', 'rev1b'), 'rev2'),
+ (None, 'rev3'),
+ (('rev2',), 'rev4'),
+ )
+
+ def corrected_parents(self):
+ return (
+ ((), 'rev1a'),
+ ((), 'rev1b'),
+ ((), 'rev2'),
+ (None, 'rev3'),
+ (('rev2',), 'rev4'),
+ )
+
+ def corrected_fulltexts(self):
+ return ['rev2']
+
+ def check_regexes(self, repo):
+ return []
+
+ def populate_repository(self, repo):
+ # make rev1a: A well-formed revision, containing 'a-file'
+ inv1a = self.make_one_file_inventory(
+ repo, 'rev1a', [], root_revision='rev1a')
+ self.add_revision(repo, 'rev1a', inv1a, [])
+
+ # make rev1b: A well-formed revision, containing 'a-file'
+ # rev1b of a-file has the exact same contents as rev1a.
+ file_contents = repo.texts.get_record_stream([('a-file-id', 'rev1a')],
+ "unordered", False).next().get_bytes_as('fulltext')
+ inv = self.make_one_file_inventory(
+ repo, 'rev1b', [], root_revision='rev1b',
+ file_contents=file_contents)
+ self.add_revision(repo, 'rev1b', inv, [])
+
+ # make rev2, a merge of rev1a and rev1b, with a-file.
+ # a-file is unmodified from rev1a and rev1b, but a new version is
+ # wrongly present anyway.
+ inv = self.make_one_file_inventory(
+ repo, 'rev2', ['rev1a', 'rev1b'], inv_revision='rev1a',
+ file_contents=file_contents)
+ self.add_revision(repo, 'rev2', inv, ['rev1a', 'rev1b'])
+
+ # rev3: a-file unchanged from rev2, but wrongly referencing rev2 of the
+ # file in its inventory.
+ inv = self.make_one_file_inventory(
+ repo, 'rev3', ['rev2'], inv_revision='rev2',
+ file_contents=file_contents, make_file_version=False)
+ self.add_revision(repo, 'rev3', inv, ['rev2'])
+
+ # rev4: a modification of a-file on top of rev3.
+ inv = self.make_one_file_inventory(repo, 'rev4', ['rev2'])
+ self.add_revision(repo, 'rev4', inv, ['rev3'])
+ self.versioned_root = repo.supports_rich_root()
+
+ def repository_text_key_references(self):
+ result = {}
+ if self.versioned_root:
+ result.update({('TREE_ROOT', 'rev1a'): True,
+ ('TREE_ROOT', 'rev1b'): True,
+ ('TREE_ROOT', 'rev2'): True,
+ ('TREE_ROOT', 'rev3'): True,
+ ('TREE_ROOT', 'rev4'): True})
+ result.update({('a-file-id', 'rev1a'): True,
+ ('a-file-id', 'rev1b'): True,
+ ('a-file-id', 'rev2'): False,
+ ('a-file-id', 'rev4'): True})
+ return result
+
+ def repository_text_keys(self):
+ return {('a-file-id', 'rev1a'): [NULL_REVISION],
+ ('a-file-id', 'rev1b'): [NULL_REVISION],
+ ('a-file-id', 'rev2'): [NULL_REVISION],
+ ('a-file-id', 'rev4'): [('a-file-id', 'rev2')]}
+
+ def versioned_repository_text_keys(self):
+ return {('TREE_ROOT', 'rev1a'): [NULL_REVISION],
+ ('TREE_ROOT', 'rev1b'): [NULL_REVISION],
+ ('TREE_ROOT', 'rev2'):
+ [('TREE_ROOT', 'rev1a'), ('TREE_ROOT', 'rev1b')],
+ ('TREE_ROOT', 'rev3'): [('TREE_ROOT', 'rev2')],
+ ('TREE_ROOT', 'rev4'): [('TREE_ROOT', 'rev3')]}
+
+
+class TooManyParentsScenario(BrokenRepoScenario):
+ """A scenario where 'broken-revision' of 'a-file' claims to have parents
+ ['good-parent', 'bad-parent']. However 'bad-parent' is in the ancestry of
+ 'good-parent', so the correct parent list for that file version are is just
+ ['good-parent'].
+ """
+
+ def all_versions_after_reconcile(self):
+ return ('bad-parent', 'good-parent', 'broken-revision')
+
+ def populated_parents(self):
+ return (
+ ((), 'bad-parent'),
+ (('bad-parent',), 'good-parent'),
+ (('good-parent', 'bad-parent'), 'broken-revision'))
+
+ def corrected_parents(self):
+ return (
+ ((), 'bad-parent'),
+ (('bad-parent',), 'good-parent'),
+ (('good-parent',), 'broken-revision'))
+
+ def check_regexes(self, repo):
+ if repo.supports_rich_root():
+ # TREE_ROOT will be wrong; but we're not testing it. so just adjust
+ # the expected count of errors.
+ count = 3
+ else:
+ count = 1
+ return (
+ ' %d inconsistent parents' % count,
+ (r" \* a-file-id version broken-revision has parents "
+ r"\('good-parent', 'bad-parent'\) but "
+ r"should have \('good-parent',\)"))
+
+ def populate_repository(self, repo):
+ inv = self.make_one_file_inventory(
+ repo, 'bad-parent', (), root_revision='bad-parent')
+ self.add_revision(repo, 'bad-parent', inv, ())
+
+ inv = self.make_one_file_inventory(
+ repo, 'good-parent', ('bad-parent',))
+ self.add_revision(repo, 'good-parent', inv, ('bad-parent',))
+
+ inv = self.make_one_file_inventory(
+ repo, 'broken-revision', ('good-parent', 'bad-parent'))
+ self.add_revision(repo, 'broken-revision', inv, ('good-parent',))
+ self.versioned_root = repo.supports_rich_root()
+
+ def repository_text_key_references(self):
+ result = {}
+ if self.versioned_root:
+ result.update({('TREE_ROOT', 'bad-parent'): True,
+ ('TREE_ROOT', 'broken-revision'): True,
+ ('TREE_ROOT', 'good-parent'): True})
+ result.update({('a-file-id', 'bad-parent'): True,
+ ('a-file-id', 'broken-revision'): True,
+ ('a-file-id', 'good-parent'): True})
+ return result
+
+ def repository_text_keys(self):
+ return {('a-file-id', 'bad-parent'): [NULL_REVISION],
+ ('a-file-id', 'broken-revision'):
+ [('a-file-id', 'good-parent')],
+ ('a-file-id', 'good-parent'): [('a-file-id', 'bad-parent')]}
+
+ def versioned_repository_text_keys(self):
+ return {('TREE_ROOT', 'bad-parent'): [NULL_REVISION],
+ ('TREE_ROOT', 'broken-revision'):
+ [('TREE_ROOT', 'good-parent')],
+ ('TREE_ROOT', 'good-parent'): [('TREE_ROOT', 'bad-parent')]}
+
+
+class ClaimedFileParentDidNotModifyFileScenario(BrokenRepoScenario):
+ """A scenario where the file parent is the same as the revision parent, but
+ should not be because that revision did not modify the file.
+
+ Specifically, the parent revision of 'current' is
+ 'modified-something-else', which does not modify 'a-file', but the
+ 'current' version of 'a-file' erroneously claims that
+ 'modified-something-else' is the parent file version.
+ """
+
+ def all_versions_after_reconcile(self):
+ return ('basis', 'current')
+
+ def populated_parents(self):
+ return (
+ ((), 'basis'),
+ (('basis',), 'modified-something-else'),
+ (('modified-something-else',), 'current'))
+
+ def corrected_parents(self):
+ return (
+ ((), 'basis'),
+ (None, 'modified-something-else'),
+ (('basis',), 'current'))
+
+ def check_regexes(self, repo):
+ if repo.supports_rich_root():
+ # TREE_ROOT will be wrong; but we're not testing it. so just adjust
+ # the expected count of errors.
+ count = 3
+ else:
+ count = 1
+ return (
+ "%d inconsistent parents" % count,
+ r"\* a-file-id version current has parents "
+ r"\('modified-something-else',\) but should have \('basis',\)",
+ )
+
+ def populate_repository(self, repo):
+ inv = self.make_one_file_inventory(repo, 'basis', ())
+ self.add_revision(repo, 'basis', inv, ())
+
+ # 'modified-something-else' is a correctly recorded revision, but it
+ # does not modify the file we are looking at, so the inventory for that
+ # file in this revision points to 'basis'.
+ inv = self.make_one_file_inventory(
+ repo, 'modified-something-else', ('basis',), inv_revision='basis')
+ self.add_revision(repo, 'modified-something-else', inv, ('basis',))
+
+ # The 'current' revision has 'modified-something-else' as its parent,
+ # but the 'current' version of 'a-file' should have 'basis' as its
+ # parent.
+ inv = self.make_one_file_inventory(
+ repo, 'current', ('modified-something-else',))
+ self.add_revision(repo, 'current', inv, ('modified-something-else',))
+ self.versioned_root = repo.supports_rich_root()
+
+ def repository_text_key_references(self):
+ result = {}
+ if self.versioned_root:
+ result.update({('TREE_ROOT', 'basis'): True,
+ ('TREE_ROOT', 'current'): True,
+ ('TREE_ROOT', 'modified-something-else'): True})
+ result.update({('a-file-id', 'basis'): True,
+ ('a-file-id', 'current'): True})
+ return result
+
+ def repository_text_keys(self):
+ return {('a-file-id', 'basis'): [NULL_REVISION],
+ ('a-file-id', 'current'): [('a-file-id', 'basis')]}
+
+ def versioned_repository_text_keys(self):
+ return {('TREE_ROOT', 'basis'): ['null:'],
+ ('TREE_ROOT', 'current'):
+ [('TREE_ROOT', 'modified-something-else')],
+ ('TREE_ROOT', 'modified-something-else'):
+ [('TREE_ROOT', 'basis')]}
+
+
+class IncorrectlyOrderedParentsScenario(BrokenRepoScenario):
+ """A scenario where the set parents of a version of a file are correct, but
+ the order of those parents is incorrect.
+
+ This defines a 'broken-revision-1-2' and a 'broken-revision-2-1' which both
+ have their file version parents reversed compared to the revision parents,
+ which is invalid. (We use two revisions with opposite orderings of the
+ same parents to make sure that accidentally relying on dictionary/set
+ ordering cannot make the test pass; the assumption is that while dict/set
+ iteration order is arbitrary, it is also consistent within a single test).
+ """
+
+ def all_versions_after_reconcile(self):
+ return ['parent-1', 'parent-2', 'broken-revision-1-2',
+ 'broken-revision-2-1']
+
+ def populated_parents(self):
+ return (
+ ((), 'parent-1'),
+ ((), 'parent-2'),
+ (('parent-2', 'parent-1'), 'broken-revision-1-2'),
+ (('parent-1', 'parent-2'), 'broken-revision-2-1'))
+
+ def corrected_parents(self):
+ return (
+ ((), 'parent-1'),
+ ((), 'parent-2'),
+ (('parent-1', 'parent-2'), 'broken-revision-1-2'),
+ (('parent-2', 'parent-1'), 'broken-revision-2-1'))
+
+ def check_regexes(self, repo):
+ if repo.supports_rich_root():
+ # TREE_ROOT will be wrong; but we're not testing it. so just adjust
+ # the expected count of errors.
+ count = 4
+ else:
+ count = 2
+ return (
+ "%d inconsistent parents" % count,
+ r"\* a-file-id version broken-revision-1-2 has parents "
+ r"\('parent-2', 'parent-1'\) but should have "
+ r"\('parent-1', 'parent-2'\)",
+ r"\* a-file-id version broken-revision-2-1 has parents "
+ r"\('parent-1', 'parent-2'\) but should have "
+ r"\('parent-2', 'parent-1'\)")
+
+ def populate_repository(self, repo):
+ inv = self.make_one_file_inventory(repo, 'parent-1', [])
+ self.add_revision(repo, 'parent-1', inv, [])
+
+ inv = self.make_one_file_inventory(repo, 'parent-2', [])
+ self.add_revision(repo, 'parent-2', inv, [])
+
+ inv = self.make_one_file_inventory(
+ repo, 'broken-revision-1-2', ['parent-2', 'parent-1'])
+ self.add_revision(
+ repo, 'broken-revision-1-2', inv, ['parent-1', 'parent-2'])
+
+ inv = self.make_one_file_inventory(
+ repo, 'broken-revision-2-1', ['parent-1', 'parent-2'])
+ self.add_revision(
+ repo, 'broken-revision-2-1', inv, ['parent-2', 'parent-1'])
+ self.versioned_root = repo.supports_rich_root()
+
+ def repository_text_key_references(self):
+ result = {}
+ if self.versioned_root:
+ result.update({('TREE_ROOT', 'broken-revision-1-2'): True,
+ ('TREE_ROOT', 'broken-revision-2-1'): True,
+ ('TREE_ROOT', 'parent-1'): True,
+ ('TREE_ROOT', 'parent-2'): True})
+ result.update({('a-file-id', 'broken-revision-1-2'): True,
+ ('a-file-id', 'broken-revision-2-1'): True,
+ ('a-file-id', 'parent-1'): True,
+ ('a-file-id', 'parent-2'): True})
+ return result
+
+ def repository_text_keys(self):
+ return {('a-file-id', 'broken-revision-1-2'):
+ [('a-file-id', 'parent-1'), ('a-file-id', 'parent-2')],
+ ('a-file-id', 'broken-revision-2-1'):
+ [('a-file-id', 'parent-2'), ('a-file-id', 'parent-1')],
+ ('a-file-id', 'parent-1'): [NULL_REVISION],
+ ('a-file-id', 'parent-2'): [NULL_REVISION]}
+
+ def versioned_repository_text_keys(self):
+ return {('TREE_ROOT', 'broken-revision-1-2'):
+ [('TREE_ROOT', 'parent-1'), ('TREE_ROOT', 'parent-2')],
+ ('TREE_ROOT', 'broken-revision-2-1'):
+ [('TREE_ROOT', 'parent-2'), ('TREE_ROOT', 'parent-1')],
+ ('TREE_ROOT', 'parent-1'): [NULL_REVISION],
+ ('TREE_ROOT', 'parent-2'): [NULL_REVISION]}
+
+
+all_broken_scenario_classes = [
+ UndamagedRepositoryScenario,
+ FileParentIsNotInRevisionAncestryScenario,
+ FileParentHasInaccessibleInventoryScenario,
+ FileParentsNotReferencedByAnyInventoryScenario,
+ TooManyParentsScenario,
+ ClaimedFileParentDidNotModifyFileScenario,
+ IncorrectlyOrderedParentsScenario,
+ UnreferencedFileParentsFromNoOpMergeScenario,
+ ]
+
+
+def broken_scenarios_for_all_formats():
+ format_scenarios = all_repository_vf_format_scenarios()
+ # test_check_reconcile needs to be parameterized by format *and* by broken
+ # repository scenario.
+ broken_scenarios = [(s.__name__, {'scenario_class': s})
+ for s in all_broken_scenario_classes]
+ return multiply_scenarios(format_scenarios, broken_scenarios)
+
+
+class TestFileParentReconciliation(TestCaseWithRepository):
+ """Tests for how reconcile corrects errors in parents of file versions."""
+
+ scenarios = broken_scenarios_for_all_formats()
+
+ def make_populated_repository(self, factory):
+ """Create a new repository populated by the given factory."""
+ repo = self.make_repository('broken-repo')
+ repo.lock_write()
+ try:
+ repo.start_write_group()
+ try:
+ factory(repo)
+ repo.commit_write_group()
+ return repo
+ except:
+ repo.abort_write_group()
+ raise
+ finally:
+ repo.unlock()
+
+ def add_revision(self, repo, revision_id, inv, parent_ids):
+ """Add a revision with a given inventory and parents to a repository.
+
+ :param repo: a repository.
+ :param revision_id: the revision ID for the new revision.
+ :param inv: an inventory (such as created by
+ `make_one_file_inventory`).
+ :param parent_ids: the parents for the new revision.
+ """
+ inv.revision_id = revision_id
+ inv.root.revision = revision_id
+ if repo.supports_rich_root():
+ root_id = inv.root.file_id
+ repo.texts.add_lines((root_id, revision_id), [], [])
+ repo.add_inventory(revision_id, inv, parent_ids)
+ revision = Revision(revision_id, committer='jrandom@example.com',
+ timestamp=0, inventory_sha1='', timezone=0, message='foo',
+ parent_ids=parent_ids)
+ repo.add_revision(revision_id, revision, inv)
+
+ def make_one_file_inventory(self, repo, revision, parents,
+ inv_revision=None, root_revision=None,
+ file_contents=None, make_file_version=True):
+ """Make an inventory containing a version of a file with ID 'a-file'.
+
+ The file's ID will be 'a-file', and its filename will be 'a file name',
+ stored at the tree root.
+
+ :param repo: a repository to add the new file version to.
+ :param revision: the revision ID of the new inventory.
+ :param parents: the parents for this revision of 'a-file'.
+ :param inv_revision: if not None, the revision ID to store in the
+ inventory entry. Otherwise, this defaults to revision.
+ :param root_revision: if not None, the inventory's root.revision will
+ be set to this.
+ :param file_contents: if not None, the contents of this file version.
+ Otherwise a unique default (based on revision ID) will be
+ generated.
+ """
+ inv = Inventory(revision_id=revision)
+ if root_revision is not None:
+ inv.root.revision = root_revision
+ file_id = 'a-file-id'
+ entry = InventoryFile(file_id, 'a file name', 'TREE_ROOT')
+ if inv_revision is not None:
+ entry.revision = inv_revision
+ else:
+ entry.revision = revision
+ entry.text_size = 0
+ if file_contents is None:
+ file_contents = '%sline\n' % entry.revision
+ entry.text_sha1 = osutils.sha_string(file_contents)
+ inv.add(entry)
+ if make_file_version:
+ repo.texts.add_lines((file_id, revision),
+ [(file_id, parent) for parent in parents], [file_contents])
+ return inv
+
+ def require_repo_suffers_text_parent_corruption(self, repo):
+ if not repo._reconcile_fixes_text_parents:
+ raise TestNotApplicable(
+ "Format does not support text parent reconciliation")
+
+ def file_parents(self, repo, revision_id):
+ key = ('a-file-id', revision_id)
+ parent_map = repo.texts.get_parent_map([key])
+ return tuple(parent[-1] for parent in parent_map[key])
+
+ def assertFileVersionAbsent(self, repo, revision_id):
+ self.assertEqual({},
+ repo.texts.get_parent_map([('a-file-id', revision_id)]))
+
+ def assertParentsMatch(self, expected_parents_for_versions, repo,
+ when_description):
+ for expected_parents, version in expected_parents_for_versions:
+ if expected_parents is None:
+ self.assertFileVersionAbsent(repo, version)
+ else:
+ found_parents = self.file_parents(repo, version)
+ self.assertEqual(expected_parents, found_parents,
+ "%s reconcile %s has parents %s, should have %s."
+ % (when_description, version, found_parents,
+ expected_parents))
+
+ def prepare_test_repository(self):
+ """Prepare a repository to test with from the test scenario.
+
+ :return: A repository, and the scenario instance.
+ """
+ scenario = self.scenario_class(self)
+ repo = self.make_populated_repository(scenario.populate_repository)
+ self.require_repo_suffers_text_parent_corruption(repo)
+ return repo, scenario
+
+ def shas_for_versions_of_file(self, repo, versions):
+ """Get the SHA-1 hashes of the versions of 'a-file' in the repository.
+
+ :param repo: the repository to get the hashes from.
+ :param versions: a list of versions to get hashes for.
+
+ :returns: A dict of `{version: hash}`.
+ """
+ keys = [('a-file-id', version) for version in versions]
+ return repo.texts.get_sha1s(keys)
+
+ def test_reconcile_behaviour(self):
+ """Populate a repository and reconcile it, verifying the state before
+ and after.
+ """
+ repo, scenario = self.prepare_test_repository()
+ repo.lock_read()
+ try:
+ self.assertParentsMatch(scenario.populated_parents(), repo,
+ 'before')
+ vf_shas = self.shas_for_versions_of_file(
+ repo, scenario.all_versions_after_reconcile())
+ finally:
+ repo.unlock()
+ result = repo.reconcile(thorough=True)
+ repo.lock_read()
+ try:
+ self.assertParentsMatch(scenario.corrected_parents(), repo,
+ 'after')
+ # The contents of the versions in the versionedfile should be the
+ # same after the reconcile.
+ self.assertEqual(
+ vf_shas,
+ self.shas_for_versions_of_file(
+ repo, scenario.all_versions_after_reconcile()))
+
+ # Scenario.corrected_fulltexts contains texts which the test wants
+ # to assert are now fulltexts. However this is an abstraction
+ # violation; really we care that:
+ # - the text is reconstructable
+ # - it has an empty parents list
+ # (we specify it this way because a store can use arbitrary
+ # compression pointers in principle.
+ for file_version in scenario.corrected_fulltexts():
+ key = ('a-file-id', file_version)
+ self.assertEqual({key:()}, repo.texts.get_parent_map([key]))
+ self.assertIsInstance(
+ repo.texts.get_record_stream([key], 'unordered',
+ True).next().get_bytes_as('fulltext'),
+ str)
+ finally:
+ repo.unlock()
+
+ def test_check_behaviour(self):
+ """Populate a repository and check it, and verify the output."""
+ repo, scenario = self.prepare_test_repository()
+ check_result = repo.check()
+ check_result.report_results(verbose=True)
+ log = self.get_log()
+ for pattern in scenario.check_regexes(repo):
+ self.assertContainsRe(log, pattern)
+
+ def test_find_text_key_references(self):
+ """Test that find_text_key_references finds erroneous references."""
+ repo, scenario = self.prepare_test_repository()
+ repo.lock_read()
+ self.addCleanup(repo.unlock)
+ self.assertEqual(scenario.repository_text_key_references(),
+ repo.find_text_key_references())
+
+ def test__generate_text_key_index(self):
+ """Test that the generated text key index has all entries."""
+ repo, scenario = self.prepare_test_repository()
+ repo.lock_read()
+ self.addCleanup(repo.unlock)
+ self.assertEqual(scenario.repository_text_key_index(),
+ repo._generate_text_key_index())
diff --git a/bzrlib/tests/per_repository_vf/test_fetch.py b/bzrlib/tests/per_repository_vf/test_fetch.py
new file mode 100644
index 0000000..9fd9a0f
--- /dev/null
+++ b/bzrlib/tests/per_repository_vf/test_fetch.py
@@ -0,0 +1,57 @@
+# Copyright (C) 2007-2011 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""Tests for fetch between repositories of the same type."""
+
+from bzrlib import (
+ vf_search,
+ )
+from bzrlib.tests.per_repository_vf import (
+ TestCaseWithRepository,
+ all_repository_vf_format_scenarios,
+ )
+from bzrlib.tests.scenarios import load_tests_apply_scenarios
+
+load_tests = load_tests_apply_scenarios
+
+
+class TestSource(TestCaseWithRepository):
+ """Tests for/about the results of Repository._get_source."""
+
+ scenarios = all_repository_vf_format_scenarios()
+
+ def test_no_absent_records_in_stream_with_ghosts(self):
+ # XXX: Arguably should be in per_interrepository but
+ # doesn't actually gain coverage there; need a specific set of
+ # permutations to cover it.
+ # bug lp:376255 was reported about this.
+ builder = self.make_branch_builder('repo')
+ builder.start_series()
+ builder.build_snapshot('tip', ['ghost'],
+ [('add', ('', 'ROOT_ID', 'directory', ''))],
+ allow_leftmost_as_ghost=True)
+ builder.finish_series()
+ b = builder.get_branch()
+ b.lock_read()
+ self.addCleanup(b.unlock)
+ repo = b.repository
+ source = repo._get_source(repo._format)
+ search = vf_search.PendingAncestryResult(['tip'], repo)
+ stream = source.get_stream(search)
+ for substream_type, substream in stream:
+ for record in substream:
+ self.assertNotEqual('absent', record.storage_kind,
+ "Absent record for %s" % (((substream_type,) + record.key),))
diff --git a/bzrlib/tests/per_repository_vf/test_fileid_involved.py b/bzrlib/tests/per_repository_vf/test_fileid_involved.py
new file mode 100644
index 0000000..6f07902
--- /dev/null
+++ b/bzrlib/tests/per_repository_vf/test_fileid_involved.py
@@ -0,0 +1,432 @@
+# Copyright (C) 2005-2010 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+import sys
+import time
+
+from bzrlib import (
+ errors,
+ inventory,
+ remote,
+ revision as _mod_revision,
+ tests,
+ transform,
+ )
+from bzrlib.tests.scenarios import load_tests_apply_scenarios
+from bzrlib.tests.per_repository_vf import (
+ TestCaseWithRepository,
+ all_repository_vf_format_scenarios,
+ )
+
+
+load_tests = load_tests_apply_scenarios
+
+
+class FileIdInvolvedWGhosts(TestCaseWithRepository):
+
+ scenarios = all_repository_vf_format_scenarios()
+
+ def create_branch_with_ghost_text(self):
+ builder = self.make_branch_builder('ghost')
+ builder.build_snapshot('A-id', None, [
+ ('add', ('', 'root-id', 'directory', None)),
+ ('add', ('a', 'a-file-id', 'file', 'some content\n'))])
+ b = builder.get_branch()
+ old_rt = b.repository.revision_tree('A-id')
+ new_inv = inventory.mutable_inventory_from_tree(old_rt)
+ new_inv.revision_id = 'B-id'
+ new_inv['a-file-id'].revision = 'ghost-id'
+ new_rev = _mod_revision.Revision('B-id',
+ timestamp=time.time(),
+ timezone=0,
+ message='Committing against a ghost',
+ committer='Joe Foo <joe@foo.com>',
+ properties={},
+ parent_ids=('A-id', 'ghost-id'),
+ )
+ b.lock_write()
+ self.addCleanup(b.unlock)
+ b.repository.start_write_group()
+ b.repository.add_revision('B-id', new_rev, new_inv)
+ self.disable_commit_write_group_paranoia(b.repository)
+ b.repository.commit_write_group()
+ return b
+
+ def disable_commit_write_group_paranoia(self, repo):
+ if isinstance(repo, remote.RemoteRepository):
+ # We can't easily disable the checks in a remote repo.
+ repo.abort_write_group()
+ raise tests.TestSkipped(
+ "repository format does not support storing revisions with "
+ "missing texts.")
+ pack_coll = getattr(repo, '_pack_collection', None)
+ if pack_coll is not None:
+ # Monkey-patch the pack collection instance to allow storing
+ # incomplete revisions.
+ pack_coll._check_new_inventories = lambda: []
+
+ def test_file_ids_include_ghosts(self):
+ b = self.create_branch_with_ghost_text()
+ repo = b.repository
+ self.assertEqual(
+ {'a-file-id':set(['ghost-id'])},
+ repo.fileids_altered_by_revision_ids(['B-id']))
+
+ def test_file_ids_uses_fallbacks(self):
+ builder = self.make_branch_builder('source',
+ format=self.bzrdir_format)
+ repo = builder.get_branch().repository
+ if not repo._format.supports_external_lookups:
+ raise tests.TestNotApplicable('format does not support stacking')
+ builder.start_series()
+ builder.build_snapshot('A-id', None, [
+ ('add', ('', 'root-id', 'directory', None)),
+ ('add', ('file', 'file-id', 'file', 'contents\n'))])
+ builder.build_snapshot('B-id', ['A-id'], [
+ ('modify', ('file-id', 'new-content\n'))])
+ builder.build_snapshot('C-id', ['B-id'], [
+ ('modify', ('file-id', 'yet more content\n'))])
+ builder.finish_series()
+ source_b = builder.get_branch()
+ source_b.lock_read()
+ self.addCleanup(source_b.unlock)
+ base = self.make_branch('base')
+ base.pull(source_b, stop_revision='B-id')
+ stacked = self.make_branch('stacked')
+ stacked.set_stacked_on_url('../base')
+ stacked.pull(source_b, stop_revision='C-id')
+
+ stacked.lock_read()
+ self.addCleanup(stacked.unlock)
+ repo = stacked.repository
+ keys = {'file-id': set(['A-id'])}
+ if stacked.repository.supports_rich_root():
+ keys['root-id'] = set(['A-id'])
+ self.assertEqual(keys, repo.fileids_altered_by_revision_ids(['A-id']))
+
+
+class FileIdInvolvedBase(TestCaseWithRepository):
+
+ def touch(self, tree, filename):
+ # use the trees transport to not depend on the tree's location or type.
+ tree.bzrdir.root_transport.append_bytes(filename, "appended line\n")
+
+ def compare_tree_fileids(self, branch, old_rev, new_rev):
+ old_tree = self.branch.repository.revision_tree(old_rev)
+ new_tree = self.branch.repository.revision_tree(new_rev)
+ delta = new_tree.changes_from(old_tree)
+
+ l2 = [id for path, id, kind in delta.added] + \
+ [id for oldpath, newpath, id, kind, text_modified, \
+ meta_modified in delta.renamed] + \
+ [id for path, id, kind, text_modified, meta_modified in \
+ delta.modified]
+ return set(l2)
+
+
+class TestFileIdInvolved(FileIdInvolvedBase):
+
+ scenarios = all_repository_vf_format_scenarios()
+
+ def setUp(self):
+ super(TestFileIdInvolved, self).setUp()
+ # create three branches, and merge it
+ #
+ # ,-->J------>K (branch2)
+ # / \
+ # A --->B --->C---->D-->G (main)
+ # \ / /
+ # '--->E---+---->F (branch1)
+
+ # A changes:
+ # B changes: 'a-file-id-2006-01-01-abcd'
+ # C changes: Nothing (perfect merge)
+ # D changes: 'b-file-id-2006-01-01-defg'
+ # E changes: 'file-d'
+ # F changes: 'file-d'
+ # G changes: 'b-file-id-2006-01-01-defg'
+ # J changes: 'b-file-id-2006-01-01-defg'
+ # K changes: 'c-funky<file-id>quiji%bo'
+
+ main_wt = self.make_branch_and_tree('main')
+ main_branch = main_wt.branch
+ self.build_tree(["main/a","main/b","main/c"])
+
+ main_wt.add(['a', 'b', 'c'], ['a-file-id-2006-01-01-abcd',
+ 'b-file-id-2006-01-01-defg',
+ 'c-funky<file-id>quiji%bo'])
+ try:
+ main_wt.commit("Commit one", rev_id="rev-A")
+ except errors.IllegalPath:
+ # TODO: jam 20060701 Consider raising a different exception
+ # newer formats do support this, and nothin can done to
+ # correct this test - its not a bug.
+ if sys.platform == 'win32':
+ raise tests.TestSkipped('Old repository formats do not'
+ ' support file ids with <> on win32')
+ # This is not a known error condition
+ raise
+
+ #-------- end A -----------
+
+ bt1 = self.make_branch_and_tree('branch1')
+ bt1.pull(main_branch)
+ b1 = bt1.branch
+ self.build_tree(["branch1/d"])
+ bt1.add(['d'], ['file-d'])
+ bt1.commit("branch1, Commit one", rev_id="rev-E")
+
+ #-------- end E -----------
+
+ self.touch(main_wt, "a")
+ main_wt.commit("Commit two", rev_id="rev-B")
+
+ #-------- end B -----------
+
+ bt2 = self.make_branch_and_tree('branch2')
+ bt2.pull(main_branch)
+ branch2_branch = bt2.branch
+ set_executability(bt2, 'b', True)
+ bt2.commit("branch2, Commit one", rev_id="rev-J")
+
+ #-------- end J -----------
+
+ main_wt.merge_from_branch(b1)
+ main_wt.commit("merge branch1, rev-11", rev_id="rev-C")
+
+ #-------- end C -----------
+
+ bt1.rename_one("d","e")
+ bt1.commit("branch1, commit two", rev_id="rev-F")
+
+ #-------- end F -----------
+
+ self.touch(bt2, "c")
+ bt2.commit("branch2, commit two", rev_id="rev-K")
+
+ #-------- end K -----------
+
+ main_wt.merge_from_branch(b1)
+ self.touch(main_wt, "b")
+ # D gets some funky characters to make sure the unescaping works
+ main_wt.commit("merge branch1, rev-12", rev_id="rev-<D>")
+
+ # end D
+
+ main_wt.merge_from_branch(branch2_branch)
+ main_wt.commit("merge branch1, rev-22", rev_id="rev-G")
+
+ # end G
+ self.branch = main_branch
+
+ def test_fileids_altered_between_two_revs(self):
+ self.branch.lock_read()
+ self.addCleanup(self.branch.unlock)
+ self.branch.repository.fileids_altered_by_revision_ids(["rev-J","rev-K"])
+ self.assertEqual(
+ {'b-file-id-2006-01-01-defg':set(['rev-J']),
+ 'c-funky<file-id>quiji%bo':set(['rev-K'])
+ },
+ self.branch.repository.fileids_altered_by_revision_ids(["rev-J","rev-K"]))
+
+ self.assertEqual(
+ {'b-file-id-2006-01-01-defg': set(['rev-<D>']),
+ 'file-d': set(['rev-F']),
+ },
+ self.branch.repository.fileids_altered_by_revision_ids(['rev-<D>', 'rev-F']))
+
+ self.assertEqual(
+ {
+ 'b-file-id-2006-01-01-defg': set(['rev-<D>', 'rev-G', 'rev-J']),
+ 'c-funky<file-id>quiji%bo': set(['rev-K']),
+ 'file-d': set(['rev-F']),
+ },
+ self.branch.repository.fileids_altered_by_revision_ids(
+ ['rev-<D>', 'rev-G', 'rev-F', 'rev-K', 'rev-J']))
+
+ self.assertEqual(
+ {'a-file-id-2006-01-01-abcd': set(['rev-B']),
+ 'b-file-id-2006-01-01-defg': set(['rev-<D>', 'rev-G', 'rev-J']),
+ 'c-funky<file-id>quiji%bo': set(['rev-K']),
+ 'file-d': set(['rev-F']),
+ },
+ self.branch.repository.fileids_altered_by_revision_ids(
+ ['rev-G', 'rev-F', 'rev-C', 'rev-B', 'rev-<D>', 'rev-K', 'rev-J']))
+
+ def fileids_altered_by_revision_ids(self, revision_ids):
+ """This is a wrapper to strip TREE_ROOT if it occurs"""
+ repo = self.branch.repository
+ root_id = self.branch.basis_tree().get_root_id()
+ result = repo.fileids_altered_by_revision_ids(revision_ids)
+ if root_id in result:
+ del result[root_id]
+ return result
+
+ def test_fileids_altered_by_revision_ids(self):
+ self.branch.lock_read()
+ self.addCleanup(self.branch.unlock)
+ self.assertEqual(
+ {'a-file-id-2006-01-01-abcd':set(['rev-A']),
+ 'b-file-id-2006-01-01-defg': set(['rev-A']),
+ 'c-funky<file-id>quiji%bo': set(['rev-A']),
+ },
+ self.fileids_altered_by_revision_ids(["rev-A"]))
+ self.assertEqual(
+ {'a-file-id-2006-01-01-abcd':set(['rev-B'])
+ },
+ self.branch.repository.fileids_altered_by_revision_ids(["rev-B"]))
+ self.assertEqual(
+ {'b-file-id-2006-01-01-defg':set(['rev-<D>'])
+ },
+ self.branch.repository.fileids_altered_by_revision_ids(["rev-<D>"]))
+
+ def test_fileids_involved_full_compare(self):
+ # this tests that the result of each fileid_involved calculation
+ # along a revision history selects only the fileids selected by
+ # comparing the trees - no less, and no more. This is correct
+ # because in our sample data we do not revert any file ids along
+ # the revision history.
+ self.branch.lock_read()
+ self.addCleanup(self.branch.unlock)
+ pp=[]
+ graph = self.branch.repository.get_graph()
+ history = list(graph.iter_lefthand_ancestry(self.branch.last_revision(),
+ [_mod_revision.NULL_REVISION]))
+ history.reverse()
+
+ if len(history) < 2:
+ return
+
+ for start in range(0,len(history)-1):
+ start_id = history[start]
+ for end in range(start+1,len(history)):
+ end_id = history[end]
+ unique_revs = graph.find_unique_ancestors(end_id, [start_id])
+ l1 = self.branch.repository.fileids_altered_by_revision_ids(
+ unique_revs)
+ l1 = set(l1.keys())
+ l2 = self.compare_tree_fileids(self.branch, start_id, end_id)
+ self.assertEquals(l1, l2)
+
+
+class TestFileIdInvolvedNonAscii(FileIdInvolvedBase):
+
+ scenarios = all_repository_vf_format_scenarios()
+
+ def test_utf8_file_ids_and_revision_ids(self):
+ main_wt = self.make_branch_and_tree('main')
+ main_branch = main_wt.branch
+ self.build_tree(["main/a"])
+
+ file_id = u'a-f\xedle-id'.encode('utf8')
+ main_wt.add(['a'], [file_id])
+ revision_id = u'r\xe9v-a'.encode('utf8')
+ try:
+ main_wt.commit('a', rev_id=revision_id)
+ except errors.NonAsciiRevisionId:
+ raise tests.TestSkipped('non-ascii revision ids not supported by %s'
+ % self.repository_format)
+
+ repo = main_wt.branch.repository
+ repo.lock_read()
+ self.addCleanup(repo.unlock)
+ file_ids = repo.fileids_altered_by_revision_ids([revision_id])
+ root_id = main_wt.basis_tree().get_root_id()
+ if root_id in file_ids:
+ self.assertEqual({file_id:set([revision_id]),
+ root_id:set([revision_id])
+ }, file_ids)
+ else:
+ self.assertEqual({file_id:set([revision_id])}, file_ids)
+
+
+class TestFileIdInvolvedSuperset(FileIdInvolvedBase):
+
+ scenarios = all_repository_vf_format_scenarios()
+
+ def setUp(self):
+ super(TestFileIdInvolvedSuperset, self).setUp()
+
+ self.branch = None
+ main_wt = self.make_branch_and_tree('main')
+ main_branch = main_wt.branch
+ self.build_tree(["main/a","main/b","main/c"])
+
+ main_wt.add(['a', 'b', 'c'], ['a-file-id-2006-01-01-abcd',
+ 'b-file-id-2006-01-01-defg',
+ 'c-funky<file-id>quiji\'"%bo'])
+ try:
+ main_wt.commit("Commit one", rev_id="rev-A")
+ except errors.IllegalPath:
+ # TODO: jam 20060701 Consider raising a different exception
+ # newer formats do support this, and nothin can done to
+ # correct this test - its not a bug.
+ if sys.platform == 'win32':
+ raise tests.TestSkipped('Old repository formats do not'
+ ' support file ids with <> on win32')
+ # This is not a known error condition
+ raise
+
+ branch2_wt = self.make_branch_and_tree('branch2')
+ branch2_wt.pull(main_branch)
+ branch2_bzrdir = branch2_wt.bzrdir
+ branch2_branch = branch2_bzrdir.open_branch()
+ set_executability(branch2_wt, 'b', True)
+ branch2_wt.commit("branch2, Commit one", rev_id="rev-J")
+
+ main_wt.merge_from_branch(branch2_branch)
+ set_executability(main_wt, 'b', False)
+ main_wt.commit("merge branch1, rev-22", rev_id="rev-G")
+
+ # end G
+ self.branch = main_branch
+
+ def test_fileid_involved_full_compare2(self):
+ # this tests that fileids_altered_by_revision_ids returns
+ # more information than compare_tree can, because it
+ # sees each change rather than the aggregate delta.
+ self.branch.lock_read()
+ self.addCleanup(self.branch.unlock)
+ graph = self.branch.repository.get_graph()
+ history = list(graph.iter_lefthand_ancestry(self.branch.last_revision(),
+ [_mod_revision.NULL_REVISION]))
+ history.reverse()
+ old_rev = history[0]
+ new_rev = history[1]
+ unique_revs = graph.find_unique_ancestors(new_rev, [old_rev])
+
+ l1 = self.branch.repository.fileids_altered_by_revision_ids(
+ unique_revs)
+ l1 = set(l1.keys())
+
+ l2 = self.compare_tree_fileids(self.branch, old_rev, new_rev)
+ self.assertNotEqual(l2, l1)
+ self.assertSubset(l2, l1)
+
+
+def set_executability(wt, path, executable=True):
+ """Set the executable bit for the file at path in the working tree
+
+ os.chmod() doesn't work on windows. But TreeTransform can mark or
+ unmark a file as executable.
+ """
+ file_id = wt.path2id(path)
+ tt = transform.TreeTransform(wt)
+ try:
+ tt.set_executability(executable, tt.trans_id_tree_file_id(file_id))
+ tt.apply()
+ finally:
+ tt.finalize()
diff --git a/bzrlib/tests/per_repository_vf/test_find_text_key_references.py b/bzrlib/tests/per_repository_vf/test_find_text_key_references.py
new file mode 100644
index 0000000..69bdc40
--- /dev/null
+++ b/bzrlib/tests/per_repository_vf/test_find_text_key_references.py
@@ -0,0 +1,38 @@
+# Copyright (C) 2007 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+
+"""Test the find_text_key_references API."""
+
+
+from bzrlib.tests.per_repository_vf import (
+ TestCaseWithRepository,
+ all_repository_vf_format_scenarios,
+ )
+from bzrlib.tests.scenarios import load_tests_apply_scenarios
+
+load_tests = load_tests_apply_scenarios
+
+
+class TestFindTextKeyReferences(TestCaseWithRepository):
+
+ scenarios = all_repository_vf_format_scenarios()
+
+ def test_empty(self):
+ repo = self.make_repository('.')
+ repo.lock_read()
+ self.addCleanup(repo.unlock)
+ self.assertEqual({}, repo.find_text_key_references())
diff --git a/bzrlib/tests/per_repository_vf/test_merge_directive.py b/bzrlib/tests/per_repository_vf/test_merge_directive.py
new file mode 100644
index 0000000..9f12d5e
--- /dev/null
+++ b/bzrlib/tests/per_repository_vf/test_merge_directive.py
@@ -0,0 +1,79 @@
+# Copyright (C) 2009 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""Tests for how merge directives interact with various repository formats.
+
+Bundles contain the serialized form, so changes in serialization based on
+repository effects the final bundle.
+"""
+
+from bzrlib import (
+ chk_map,
+ merge_directive,
+ )
+
+from bzrlib.tests.scenarios import load_tests_apply_scenarios
+from bzrlib.tests.per_repository_vf import (
+ TestCaseWithRepository,
+ all_repository_vf_format_scenarios,
+ )
+
+load_tests = load_tests_apply_scenarios
+
+
+class TestMergeDirective(TestCaseWithRepository):
+
+ scenarios = all_repository_vf_format_scenarios()
+
+ def make_two_branches(self):
+ builder = self.make_branch_builder('source')
+ builder.start_series()
+ builder.build_snapshot('A', None, [
+ ('add', ('', 'root-id', 'directory', None)),
+ ('add', ('f', 'f-id', 'file', 'initial content\n')),
+ ])
+ builder.build_snapshot('B', 'A', [
+ ('modify', ('f-id', 'new content\n')),
+ ])
+ builder.finish_series()
+ b1 = builder.get_branch()
+ b2 = b1.bzrdir.sprout('target', revision_id='A').open_branch()
+ return b1, b2
+
+ def create_merge_directive(self, source_branch, submit_url):
+ return merge_directive.MergeDirective2.from_objects(
+ source_branch.repository,
+ source_branch.last_revision(),
+ time=1247775710, timezone=0,
+ target_branch=submit_url)
+
+ def test_create_merge_directive(self):
+ source_branch, target_branch = self.make_two_branches()
+ directive = self.create_merge_directive(source_branch,
+ target_branch.base)
+ self.assertIsInstance(directive, merge_directive.MergeDirective2)
+
+
+ def test_create_and_install_directive(self):
+ source_branch, target_branch = self.make_two_branches()
+ directive = self.create_merge_directive(source_branch,
+ target_branch.base)
+ chk_map.clear_cache()
+ directive.install_revisions(target_branch.repository)
+ rt = target_branch.repository.revision_tree('B')
+ rt.lock_read()
+ self.assertEqualDiff('new content\n', rt.get_file_text('f-id'))
+ rt.unlock()
diff --git a/bzrlib/tests/per_repository_vf/test_reconcile.py b/bzrlib/tests/per_repository_vf/test_reconcile.py
new file mode 100644
index 0000000..0a626fa
--- /dev/null
+++ b/bzrlib/tests/per_repository_vf/test_reconcile.py
@@ -0,0 +1,442 @@
+# Copyright (C) 2006-2010 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""Tests for reconciliation of repositories."""
+
+import bzrlib
+from bzrlib import (
+ errors,
+ )
+from bzrlib.bzrdir import BzrDir
+from bzrlib.inventory import Inventory
+from bzrlib.reconcile import reconcile, Reconciler
+from bzrlib.revision import Revision
+from bzrlib.tests import TestSkipped
+
+from bzrlib.tests.per_repository_vf.helpers import (
+ TestCaseWithBrokenRevisionIndex,
+ )
+from bzrlib.tests.per_repository_vf import (
+ TestCaseWithRepository,
+ all_repository_vf_format_scenarios,
+ )
+from bzrlib.tests.matchers import MatchesAncestry
+from bzrlib.tests.scenarios import load_tests_apply_scenarios
+from bzrlib.uncommit import uncommit
+
+
+load_tests = load_tests_apply_scenarios
+
+
+class TestReconcile(TestCaseWithRepository):
+
+ scenarios = all_repository_vf_format_scenarios()
+
+ def checkUnreconciled(self, d, reconciler):
+ """Check that d did not get reconciled."""
+ # nothing should have been fixed yet:
+ self.assertEqual(0, reconciler.inconsistent_parents)
+ # and no garbage inventories
+ self.assertEqual(0, reconciler.garbage_inventories)
+ self.checkNoBackupInventory(d)
+
+ def checkNoBackupInventory(self, aBzrDir):
+ """Check that there is no backup inventory in aBzrDir."""
+ repo = aBzrDir.open_repository()
+ for path in repo.control_transport.list_dir('.'):
+ self.assertFalse('inventory.backup' in path)
+
+
+class TestBadRevisionParents(TestCaseWithBrokenRevisionIndex):
+
+ scenarios = all_repository_vf_format_scenarios()
+
+ def test_aborts_if_bad_parents_in_index(self):
+ """Reconcile refuses to proceed if the revision index is wrong when
+ checked against the revision texts, so that it does not generate broken
+ data.
+
+ Ideally reconcile would fix this, but until we implement that we just
+ make sure we safely detect this problem.
+ """
+ repo = self.make_repo_with_extra_ghost_index()
+ reconciler = repo.reconcile(thorough=True)
+ self.assertTrue(reconciler.aborted,
+ "reconcile should have aborted due to bad parents.")
+
+ def test_does_not_abort_on_clean_repo(self):
+ repo = self.make_repository('.')
+ reconciler = repo.reconcile(thorough=True)
+ self.assertFalse(reconciler.aborted,
+ "reconcile should not have aborted on an unbroken repository.")
+
+
+class TestsNeedingReweave(TestReconcile):
+
+ def setUp(self):
+ super(TestsNeedingReweave, self).setUp()
+
+ t = self.get_transport()
+ # an empty inventory with no revision for testing with.
+ repo = self.make_repository('inventory_without_revision')
+ repo.lock_write()
+ repo.start_write_group()
+ inv = Inventory(revision_id='missing')
+ inv.root.revision = 'missing'
+ repo.add_inventory('missing', inv, [])
+ repo.commit_write_group()
+ repo.unlock()
+
+ def add_commit(repo, revision_id, parent_ids):
+ repo.lock_write()
+ repo.start_write_group()
+ inv = Inventory(revision_id=revision_id)
+ inv.root.revision = revision_id
+ root_id = inv.root.file_id
+ sha1 = repo.add_inventory(revision_id, inv, parent_ids)
+ repo.texts.add_lines((root_id, revision_id), [], [])
+ rev = bzrlib.revision.Revision(timestamp=0,
+ timezone=None,
+ committer="Foo Bar <foo@example.com>",
+ message="Message",
+ inventory_sha1=sha1,
+ revision_id=revision_id)
+ rev.parent_ids = parent_ids
+ repo.add_revision(revision_id, rev)
+ repo.commit_write_group()
+ repo.unlock()
+ # an empty inventory with no revision for testing with.
+ # this is referenced by 'references_missing' to let us test
+ # that all the cached data is correctly converted into ghost links
+ # and the referenced inventory still cleaned.
+ repo = self.make_repository('inventory_without_revision_and_ghost')
+ repo.lock_write()
+ repo.start_write_group()
+ repo.add_inventory('missing', inv, [])
+ repo.commit_write_group()
+ repo.unlock()
+ add_commit(repo, 'references_missing', ['missing'])
+
+ # a inventory with no parents and the revision has parents..
+ # i.e. a ghost.
+ repo = self.make_repository('inventory_one_ghost')
+ add_commit(repo, 'ghost', ['the_ghost'])
+
+ # a inventory with a ghost that can be corrected now.
+ t.copy_tree('inventory_one_ghost', 'inventory_ghost_present')
+ bzrdir_url = self.get_url('inventory_ghost_present')
+ bzrdir = BzrDir.open(bzrdir_url)
+ repo = bzrdir.open_repository()
+ add_commit(repo, 'the_ghost', [])
+
+ def checkEmptyReconcile(self, **kwargs):
+ """Check a reconcile on an empty repository."""
+ self.make_repository('empty')
+ d = BzrDir.open(self.get_url('empty'))
+ # calling on a empty repository should do nothing
+ reconciler = d.find_repository().reconcile(**kwargs)
+ # no inconsistent parents should have been found
+ self.assertEqual(0, reconciler.inconsistent_parents)
+ # and no garbage inventories
+ self.assertEqual(0, reconciler.garbage_inventories)
+ # and no backup weave should have been needed/made.
+ self.checkNoBackupInventory(d)
+
+ def test_reconcile_empty(self):
+ # in an empty repo, theres nothing to do.
+ self.checkEmptyReconcile()
+
+ def test_repo_has_reconcile_does_inventory_gc_attribute(self):
+ repo = self.make_repository('repo')
+ self.assertNotEqual(None, repo._reconcile_does_inventory_gc)
+
+ def test_reconcile_empty_thorough(self):
+ # reconcile should accept thorough=True
+ self.checkEmptyReconcile(thorough=True)
+
+ def test_convenience_reconcile_inventory_without_revision_reconcile(self):
+ # smoke test for the all in one ui tool
+ bzrdir_url = self.get_url('inventory_without_revision')
+ bzrdir = BzrDir.open(bzrdir_url)
+ repo = bzrdir.open_repository()
+ if not repo._reconcile_does_inventory_gc:
+ raise TestSkipped('Irrelevant test')
+ reconcile(bzrdir)
+ # now the backup should have it but not the current inventory
+ repo = bzrdir.open_repository()
+ self.check_missing_was_removed(repo)
+
+ def test_reweave_inventory_without_revision(self):
+ # an excess inventory on its own is only reconciled by using thorough
+ d_url = self.get_url('inventory_without_revision')
+ d = BzrDir.open(d_url)
+ repo = d.open_repository()
+ if not repo._reconcile_does_inventory_gc:
+ raise TestSkipped('Irrelevant test')
+ self.checkUnreconciled(d, repo.reconcile())
+ reconciler = repo.reconcile(thorough=True)
+ # no bad parents
+ self.assertEqual(0, reconciler.inconsistent_parents)
+ # and one garbage inventory
+ self.assertEqual(1, reconciler.garbage_inventories)
+ self.check_missing_was_removed(repo)
+
+ def check_thorough_reweave_missing_revision(self, aBzrDir, reconcile,
+ **kwargs):
+ # actual low level test.
+ repo = aBzrDir.open_repository()
+ if not repo.has_revision('missing'):
+ # the repo handles ghosts without corruption, so reconcile has
+ # nothing to do here. Specifically, this test has the inventory
+ # 'missing' present and the revision 'missing' missing, so clearly
+ # 'missing' cannot be reported in the present ancestry -> missing
+ # is something that can be filled as a ghost.
+ expected_inconsistent_parents = 0
+ else:
+ expected_inconsistent_parents = 1
+ reconciler = reconcile(**kwargs)
+ # some number of inconsistent parents should have been found
+ self.assertEqual(expected_inconsistent_parents,
+ reconciler.inconsistent_parents)
+ # and one garbage inventories
+ self.assertEqual(1, reconciler.garbage_inventories)
+ # now the backup should have it but not the current inventory
+ repo = aBzrDir.open_repository()
+ self.check_missing_was_removed(repo)
+ # and the parent list for 'references_missing' should have that
+ # revision a ghost now.
+ self.assertFalse(repo.has_revision('missing'))
+
+ def check_missing_was_removed(self, repo):
+ if repo._reconcile_backsup_inventory:
+ backed_up = False
+ for path in repo.control_transport.list_dir('.'):
+ if 'inventory.backup' in path:
+ backed_up = True
+ self.assertTrue(backed_up)
+ # Not clear how to do this at an interface level:
+ # self.assertTrue('missing' in backup.versions())
+ self.assertRaises(errors.NoSuchRevision, repo.get_inventory, 'missing')
+
+ def test_reweave_inventory_without_revision_reconciler(self):
+ # smoke test for the all in one Reconciler class,
+ # other tests use the lower level repo.reconcile()
+ d_url = self.get_url('inventory_without_revision_and_ghost')
+ d = BzrDir.open(d_url)
+ if not d.open_repository()._reconcile_does_inventory_gc:
+ raise TestSkipped('Irrelevant test')
+ def reconcile():
+ reconciler = Reconciler(d)
+ reconciler.reconcile()
+ return reconciler
+ self.check_thorough_reweave_missing_revision(d, reconcile)
+
+ def test_reweave_inventory_without_revision_and_ghost(self):
+ # actual low level test.
+ d_url = self.get_url('inventory_without_revision_and_ghost')
+ d = BzrDir.open(d_url)
+ repo = d.open_repository()
+ if not repo._reconcile_does_inventory_gc:
+ raise TestSkipped('Irrelevant test')
+ # nothing should have been altered yet : inventories without
+ # revisions are not data loss incurring for current format
+ self.check_thorough_reweave_missing_revision(d, repo.reconcile,
+ thorough=True)
+
+ def test_reweave_inventory_preserves_a_revision_with_ghosts(self):
+ d = BzrDir.open(self.get_url('inventory_one_ghost'))
+ reconciler = d.open_repository().reconcile(thorough=True)
+ # no inconsistent parents should have been found:
+ # the lack of a parent for ghost is normal
+ self.assertEqual(0, reconciler.inconsistent_parents)
+ # and one garbage inventories
+ self.assertEqual(0, reconciler.garbage_inventories)
+ # now the current inventory should still have 'ghost'
+ repo = d.open_repository()
+ repo.get_inventory('ghost')
+ self.assertThat(['ghost', 'the_ghost'], MatchesAncestry(repo, 'ghost'))
+
+ def test_reweave_inventory_fixes_ancestryfor_a_present_ghost(self):
+ d = BzrDir.open(self.get_url('inventory_ghost_present'))
+ repo = d.open_repository()
+ m = MatchesAncestry(repo, 'ghost')
+ if m.match(['the_ghost', 'ghost']) is None:
+ # the repo handles ghosts without corruption, so reconcile has
+ # nothing to do
+ return
+ self.assertThat(['ghost'], m)
+ reconciler = repo.reconcile()
+ # this is a data corrupting error, so a normal reconcile should fix it.
+ # one inconsistent parents should have been found : the
+ # available but not reference parent for ghost.
+ self.assertEqual(1, reconciler.inconsistent_parents)
+ # and no garbage inventories
+ self.assertEqual(0, reconciler.garbage_inventories)
+ # now the current inventory should still have 'ghost'
+ repo = d.open_repository()
+ repo.get_inventory('ghost')
+ repo.get_inventory('the_ghost')
+ self.assertThat(['the_ghost', 'ghost'],
+ MatchesAncestry(repo, 'ghost'))
+ self.assertThat(['the_ghost'],
+ MatchesAncestry(repo, 'the_ghost'))
+
+ def test_text_from_ghost_revision(self):
+ repo = self.make_repository('text-from-ghost')
+ inv = Inventory(revision_id='final-revid')
+ inv.root.revision = 'root-revid'
+ ie = inv.add_path('bla', 'file', 'myfileid')
+ ie.revision = 'ghostrevid'
+ ie.text_size = 42
+ ie.text_sha1 = "bee68c8acd989f5f1765b4660695275948bf5c00"
+ rev = bzrlib.revision.Revision(timestamp=0,
+ timezone=None,
+ committer="Foo Bar <foo@example.com>",
+ message="Message",
+ revision_id='final-revid')
+ repo.lock_write()
+ try:
+ repo.start_write_group()
+ try:
+ repo.add_revision('final-revid', rev, inv)
+ try:
+ repo.texts.add_lines(('myfileid', 'ghostrevid'),
+ (('myfileid', 'ghost-text-parent'),),
+ ["line1\n", "line2\n"])
+ except errors.RevisionNotPresent:
+ raise TestSkipped("text ghost parents not supported")
+ if repo.supports_rich_root():
+ root_id = inv.root.file_id
+ repo.texts.add_lines((inv.root.file_id, inv.root.revision),
+ [], [])
+ finally:
+ repo.commit_write_group()
+ finally:
+ repo.unlock()
+ repo.reconcile(thorough=True)
+
+
+class TestReconcileWithIncorrectRevisionCache(TestReconcile):
+ """Ancestry data gets cached in knits and weaves should be reconcilable.
+
+ This class tests that reconcile can correct invalid caches (such as after
+ a reconcile).
+ """
+
+ def setUp(self):
+ self.reduceLockdirTimeout()
+ super(TestReconcileWithIncorrectRevisionCache, self).setUp()
+
+ t = self.get_transport()
+ # we need a revision with two parents in the wrong order
+ # which should trigger reinsertion.
+ # and another with the first one correct but the other two not
+ # which should not trigger reinsertion.
+ # these need to be in different repositories so that we don't
+ # trigger a reconcile based on the other case.
+ # there is no api to construct a broken knit repository at
+ # this point. if we ever encounter a bad graph in a knit repo
+ # we should add a lower level api to allow constructing such cases.
+
+ # first off the common logic:
+ self.first_tree = self.make_branch_and_tree('wrong-first-parent')
+ self.second_tree = self.make_branch_and_tree(
+ 'reversed-secondary-parents')
+ for t in [self.first_tree, self.second_tree]:
+ t.commit('1', rev_id='1')
+ uncommit(t.branch, tree=t)
+ t.commit('2', rev_id='2')
+ uncommit(t.branch, tree=t)
+ t.commit('3', rev_id='3')
+ uncommit(t.branch, tree=t)
+ #second_tree = self.make_branch_and_tree('reversed-secondary-parents')
+ #second_tree.pull(tree) # XXX won't copy the repo?
+ repo_secondary = self.second_tree.branch.repository
+
+ # now setup the wrong-first parent case
+ repo = self.first_tree.branch.repository
+ repo.lock_write()
+ repo.start_write_group()
+ inv = Inventory(revision_id='wrong-first-parent')
+ inv.root.revision = 'wrong-first-parent'
+ if repo.supports_rich_root():
+ root_id = inv.root.file_id
+ repo.texts.add_lines((root_id, 'wrong-first-parent'), [], [])
+ sha1 = repo.add_inventory('wrong-first-parent', inv, ['2', '1'])
+ rev = Revision(timestamp=0,
+ timezone=None,
+ committer="Foo Bar <foo@example.com>",
+ message="Message",
+ inventory_sha1=sha1,
+ revision_id='wrong-first-parent')
+ rev.parent_ids = ['1', '2']
+ repo.add_revision('wrong-first-parent', rev)
+ repo.commit_write_group()
+ repo.unlock()
+
+ # now setup the wrong-secondary parent case
+ repo = repo_secondary
+ repo.lock_write()
+ repo.start_write_group()
+ inv = Inventory(revision_id='wrong-secondary-parent')
+ inv.root.revision = 'wrong-secondary-parent'
+ if repo.supports_rich_root():
+ root_id = inv.root.file_id
+ repo.texts.add_lines((root_id, 'wrong-secondary-parent'), [], [])
+ sha1 = repo.add_inventory('wrong-secondary-parent', inv, ['1', '3', '2'])
+ rev = Revision(timestamp=0,
+ timezone=None,
+ committer="Foo Bar <foo@example.com>",
+ message="Message",
+ inventory_sha1=sha1,
+ revision_id='wrong-secondary-parent')
+ rev.parent_ids = ['1', '2', '3']
+ repo.add_revision('wrong-secondary-parent', rev)
+ repo.commit_write_group()
+ repo.unlock()
+
+ def test_reconcile_wrong_order(self):
+ # a wrong order in primary parents is optionally correctable
+ repo = self.first_tree.branch.repository
+ repo.lock_read()
+ try:
+ g = repo.get_graph()
+ if g.get_parent_map(['wrong-first-parent'])['wrong-first-parent'] \
+ == ('1', '2'):
+ raise TestSkipped('wrong-first-parent is not setup for testing')
+ finally:
+ repo.unlock()
+ self.checkUnreconciled(repo.bzrdir, repo.reconcile())
+ # nothing should have been altered yet : inventories without
+ # revisions are not data loss incurring for current format
+ reconciler = repo.reconcile(thorough=True)
+ # these show up as inconsistent parents
+ self.assertEqual(1, reconciler.inconsistent_parents)
+ # and no garbage inventories
+ self.assertEqual(0, reconciler.garbage_inventories)
+ # and should have been fixed:
+ repo.lock_read()
+ self.addCleanup(repo.unlock)
+ g = repo.get_graph()
+ self.assertEqual(
+ {'wrong-first-parent':('1', '2')},
+ g.get_parent_map(['wrong-first-parent']))
+
+ def test_reconcile_wrong_order_secondary_inventory(self):
+ # a wrong order in the parents for inventories is ignored.
+ repo = self.second_tree.branch.repository
+ self.checkUnreconciled(repo.bzrdir, repo.reconcile())
+ self.checkUnreconciled(repo.bzrdir, repo.reconcile(thorough=True))
diff --git a/bzrlib/tests/per_repository_vf/test_refresh_data.py b/bzrlib/tests/per_repository_vf/test_refresh_data.py
new file mode 100644
index 0000000..9eebc27
--- /dev/null
+++ b/bzrlib/tests/per_repository_vf/test_refresh_data.py
@@ -0,0 +1,91 @@
+# Copyright (C) 2011 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""Tests for VersionedFileRepository.refresh_data."""
+
+from bzrlib import repository
+
+from bzrlib.tests.per_repository_vf import (
+ TestCaseWithRepository,
+ all_repository_vf_format_scenarios,
+ )
+from bzrlib.tests.scenarios import load_tests_apply_scenarios
+
+load_tests = load_tests_apply_scenarios
+
+
+class TestRefreshData(TestCaseWithRepository):
+
+ scenarios = all_repository_vf_format_scenarios()
+
+ def fetch_new_revision_into_concurrent_instance(self, repo, token):
+ """Create a new revision (revid 'new-rev') and fetch it into a
+ concurrent instance of repo.
+ """
+ source = self.make_branch_and_memory_tree('source')
+ source.lock_write()
+ self.addCleanup(source.unlock)
+ source.add([''], ['root-id'])
+ revid = source.commit('foo', rev_id='new-rev')
+ # Force data reading on weaves/knits
+ repo.all_revision_ids()
+ repo.revisions.keys()
+ repo.inventories.keys()
+ # server repo is the instance a smart server might hold for this
+ # repository.
+ server_repo = repo.bzrdir.open_repository()
+ try:
+ server_repo.lock_write(token)
+ except errors.TokenLockingNotSupported:
+ raise TestSkipped('Cannot concurrently insert into repo format %r'
+ % self.repository_format)
+ try:
+ server_repo.fetch(source.branch.repository, revid)
+ finally:
+ server_repo.unlock()
+
+ def test_refresh_data_after_fetch_new_data_visible_in_write_group(self):
+ tree = self.make_branch_and_memory_tree('target')
+ tree.lock_write()
+ self.addCleanup(tree.unlock)
+ tree.add([''], ['root-id'])
+ tree.commit('foo', rev_id='commit-in-target')
+ repo = tree.branch.repository
+ token = repo.lock_write().repository_token
+ self.addCleanup(repo.unlock)
+ repo.start_write_group()
+ self.addCleanup(repo.abort_write_group)
+ self.fetch_new_revision_into_concurrent_instance(repo, token)
+ # Call refresh_data. It either fails with IsInWriteGroupError, or it
+ # succeeds and the new revisions are visible.
+ try:
+ repo.refresh_data()
+ except repository.IsInWriteGroupError:
+ pass
+ else:
+ self.assertEqual(
+ ['commit-in-target', 'new-rev'],
+ sorted(repo.all_revision_ids()))
+
+ def test_refresh_data_after_fetch_new_data_visible(self):
+ repo = self.make_repository('target')
+ token = repo.lock_write().repository_token
+ self.addCleanup(repo.unlock)
+ self.fetch_new_revision_into_concurrent_instance(repo, token)
+ repo.refresh_data()
+ self.assertNotEqual({}, repo.get_graph().get_parent_map(['new-rev']))
+
+
diff --git a/bzrlib/tests/per_repository_vf/test_repository.py b/bzrlib/tests/per_repository_vf/test_repository.py
new file mode 100644
index 0000000..3388743
--- /dev/null
+++ b/bzrlib/tests/per_repository_vf/test_repository.py
@@ -0,0 +1,466 @@
+# Copyright (C) 2011 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""Tests for repository implementations - tests a repository format."""
+
+from bzrlib import (
+ errors,
+ gpg,
+ inventory,
+ repository as _mod_repository,
+ revision as _mod_revision,
+ tests,
+ versionedfile,
+ vf_repository,
+ )
+
+from bzrlib.symbol_versioning import deprecated_in
+from bzrlib.tests.matchers import MatchesAncestry
+from bzrlib.tests.per_repository_vf import (
+ TestCaseWithRepository,
+ all_repository_vf_format_scenarios,
+ )
+from bzrlib.tests.scenarios import load_tests_apply_scenarios
+
+
+load_tests = load_tests_apply_scenarios
+
+
+class TestRepository(TestCaseWithRepository):
+
+ scenarios = all_repository_vf_format_scenarios()
+
+ def assertFormatAttribute(self, attribute, allowed_values):
+ """Assert that the format has an attribute 'attribute'."""
+ repo = self.make_repository('repo')
+ self.assertSubset([getattr(repo._format, attribute)], allowed_values)
+
+ def test_attribute__fetch_order(self):
+ """Test the _fetch_order attribute."""
+ self.assertFormatAttribute('_fetch_order', ('topological', 'unordered'))
+
+ def test_attribute__fetch_uses_deltas(self):
+ """Test the _fetch_uses_deltas attribute."""
+ self.assertFormatAttribute('_fetch_uses_deltas', (True, False))
+
+ def test_attribute_inventories_store(self):
+ """Test the existence of the inventories attribute."""
+ tree = self.make_branch_and_tree('tree')
+ repo = tree.branch.repository
+ self.assertIsInstance(repo.inventories, versionedfile.VersionedFiles)
+
+ def test_attribute_inventories_basics(self):
+ """Test basic aspects of the inventories attribute."""
+ tree = self.make_branch_and_tree('tree')
+ repo = tree.branch.repository
+ rev_id = (tree.commit('a'),)
+ tree.lock_read()
+ self.addCleanup(tree.unlock)
+ self.assertEqual(set([rev_id]), set(repo.inventories.keys()))
+
+ def test_attribute_revision_store(self):
+ """Test the existence of the revisions attribute."""
+ tree = self.make_branch_and_tree('tree')
+ repo = tree.branch.repository
+ self.assertIsInstance(repo.revisions,
+ versionedfile.VersionedFiles)
+
+ def test_attribute_revision_store_basics(self):
+ """Test the basic behaviour of the revisions attribute."""
+ tree = self.make_branch_and_tree('tree')
+ repo = tree.branch.repository
+ repo.lock_write()
+ try:
+ self.assertEqual(set(), set(repo.revisions.keys()))
+ revid = (tree.commit("foo"),)
+ self.assertEqual(set([revid]), set(repo.revisions.keys()))
+ self.assertEqual({revid:()},
+ repo.revisions.get_parent_map([revid]))
+ finally:
+ repo.unlock()
+ tree2 = self.make_branch_and_tree('tree2')
+ tree2.pull(tree.branch)
+ left_id = (tree2.commit('left'),)
+ right_id = (tree.commit('right'),)
+ tree.merge_from_branch(tree2.branch)
+ merge_id = (tree.commit('merged'),)
+ repo.lock_read()
+ self.addCleanup(repo.unlock)
+ self.assertEqual(set([revid, left_id, right_id, merge_id]),
+ set(repo.revisions.keys()))
+ self.assertEqual({revid:(), left_id:(revid,), right_id:(revid,),
+ merge_id:(right_id, left_id)},
+ repo.revisions.get_parent_map(repo.revisions.keys()))
+
+ def test_attribute_signature_store(self):
+ """Test the existence of the signatures attribute."""
+ tree = self.make_branch_and_tree('tree')
+ repo = tree.branch.repository
+ self.assertIsInstance(repo.signatures,
+ versionedfile.VersionedFiles)
+
+ def test_exposed_versioned_files_are_marked_dirty(self):
+ repo = self.make_repository('.')
+ repo.lock_write()
+ signatures = repo.signatures
+ revisions = repo.revisions
+ inventories = repo.inventories
+ repo.unlock()
+ self.assertRaises(errors.ObjectNotLocked,
+ signatures.keys)
+ self.assertRaises(errors.ObjectNotLocked,
+ revisions.keys)
+ self.assertRaises(errors.ObjectNotLocked,
+ inventories.keys)
+ self.assertRaises(errors.ObjectNotLocked,
+ signatures.add_lines, ('foo',), [], [])
+ self.assertRaises(errors.ObjectNotLocked,
+ revisions.add_lines, ('foo',), [], [])
+ self.assertRaises(errors.ObjectNotLocked,
+ inventories.add_lines, ('foo',), [], [])
+
+ def test__get_sink(self):
+ repo = self.make_repository('repo')
+ sink = repo._get_sink()
+ self.assertIsInstance(sink, vf_repository.StreamSink)
+
+ def test_get_serializer_format(self):
+ repo = self.make_repository('.')
+ format = repo.get_serializer_format()
+ self.assertEqual(repo._serializer.format_num, format)
+
+ def test_add_revision_inventory_sha1(self):
+ inv = inventory.Inventory(revision_id='A')
+ inv.root.revision = 'A'
+ inv.root.file_id = 'fixed-root'
+ # Insert the inventory on its own to an identical repository, to get
+ # its sha1.
+ reference_repo = self.make_repository('reference_repo')
+ reference_repo.lock_write()
+ reference_repo.start_write_group()
+ inv_sha1 = reference_repo.add_inventory('A', inv, [])
+ reference_repo.abort_write_group()
+ reference_repo.unlock()
+ # Now insert a revision with this inventory, and it should get the same
+ # sha1.
+ repo = self.make_repository('repo')
+ repo.lock_write()
+ repo.start_write_group()
+ root_id = inv.root.file_id
+ repo.texts.add_lines(('fixed-root', 'A'), [], [])
+ repo.add_revision('A', _mod_revision.Revision(
+ 'A', committer='B', timestamp=0,
+ timezone=0, message='C'), inv=inv)
+ repo.commit_write_group()
+ repo.unlock()
+ repo.lock_read()
+ self.assertEquals(inv_sha1, repo.get_revision('A').inventory_sha1)
+ repo.unlock()
+
+ def test_install_revisions(self):
+ wt = self.make_branch_and_tree('source')
+ wt.commit('A', allow_pointless=True, rev_id='A')
+ repo = wt.branch.repository
+ repo.lock_write()
+ repo.start_write_group()
+ repo.sign_revision('A', gpg.LoopbackGPGStrategy(None))
+ repo.commit_write_group()
+ repo.unlock()
+ repo.lock_read()
+ self.addCleanup(repo.unlock)
+ repo2 = self.make_repository('repo2')
+ revision = repo.get_revision('A')
+ tree = repo.revision_tree('A')
+ signature = repo.get_signature_text('A')
+ repo2.lock_write()
+ self.addCleanup(repo2.unlock)
+ vf_repository.install_revisions(repo2, [(revision, tree, signature)])
+ self.assertEqual(revision, repo2.get_revision('A'))
+ self.assertEqual(signature, repo2.get_signature_text('A'))
+
+ def test_attribute_text_store(self):
+ """Test the existence of the texts attribute."""
+ tree = self.make_branch_and_tree('tree')
+ repo = tree.branch.repository
+ self.assertIsInstance(repo.texts,
+ versionedfile.VersionedFiles)
+
+ def test_iter_inventories_is_ordered(self):
+ # just a smoke test
+ tree = self.make_branch_and_tree('a')
+ first_revision = tree.commit('')
+ second_revision = tree.commit('')
+ tree.lock_read()
+ self.addCleanup(tree.unlock)
+ revs = (first_revision, second_revision)
+ invs = tree.branch.repository.iter_inventories(revs)
+ for rev_id, inv in zip(revs, invs):
+ self.assertEqual(rev_id, inv.revision_id)
+ self.assertIsInstance(inv, inventory.CommonInventory)
+
+ def test_item_keys_introduced_by(self):
+ # Make a repo with one revision and one versioned file.
+ tree = self.make_branch_and_tree('t')
+ self.build_tree(['t/foo'])
+ tree.add('foo', 'file1')
+ tree.commit('message', rev_id='rev_id')
+ repo = tree.branch.repository
+ repo.lock_write()
+ repo.start_write_group()
+ try:
+ repo.sign_revision('rev_id', gpg.LoopbackGPGStrategy(None))
+ except errors.UnsupportedOperation:
+ signature_texts = []
+ else:
+ signature_texts = ['rev_id']
+ repo.commit_write_group()
+ repo.unlock()
+ repo.lock_read()
+ self.addCleanup(repo.unlock)
+
+ # Item keys will be in this order, for maximum convenience for
+ # generating data to insert into knit repository:
+ # * files
+ # * inventory
+ # * signatures
+ # * revisions
+ expected_item_keys = [
+ ('file', 'file1', ['rev_id']),
+ ('inventory', None, ['rev_id']),
+ ('signatures', None, signature_texts),
+ ('revisions', None, ['rev_id'])]
+ item_keys = list(repo.item_keys_introduced_by(['rev_id']))
+ item_keys = [
+ (kind, file_id, list(versions))
+ for (kind, file_id, versions) in item_keys]
+
+ if repo.supports_rich_root():
+ # Check for the root versioned file in the item_keys, then remove
+ # it from streamed_names so we can compare that with
+ # expected_record_names.
+ # Note that the file keys can be in any order, so this test is
+ # written to allow that.
+ inv = repo.get_inventory('rev_id')
+ root_item_key = ('file', inv.root.file_id, ['rev_id'])
+ self.assertTrue(root_item_key in item_keys)
+ item_keys.remove(root_item_key)
+
+ self.assertEqual(expected_item_keys, item_keys)
+
+ def test_attribute_text_store_basics(self):
+ """Test the basic behaviour of the text store."""
+ tree = self.make_branch_and_tree('tree')
+ repo = tree.branch.repository
+ file_id = "Foo:Bar"
+ file_key = (file_id,)
+ tree.lock_write()
+ try:
+ self.assertEqual(set(), set(repo.texts.keys()))
+ tree.add(['foo'], [file_id], ['file'])
+ tree.put_file_bytes_non_atomic(file_id, 'content\n')
+ try:
+ rev_key = (tree.commit("foo"),)
+ except errors.IllegalPath:
+ raise tests.TestNotApplicable(
+ 'file_id %r cannot be stored on this'
+ ' platform for this repo format' % (file_id,))
+ if repo._format.rich_root_data:
+ root_commit = (tree.get_root_id(),) + rev_key
+ keys = set([root_commit])
+ parents = {root_commit:()}
+ else:
+ keys = set()
+ parents = {}
+ keys.add(file_key + rev_key)
+ parents[file_key + rev_key] = ()
+ self.assertEqual(keys, set(repo.texts.keys()))
+ self.assertEqual(parents,
+ repo.texts.get_parent_map(repo.texts.keys()))
+ finally:
+ tree.unlock()
+ tree2 = self.make_branch_and_tree('tree2')
+ tree2.pull(tree.branch)
+ tree2.put_file_bytes_non_atomic('Foo:Bar', 'right\n')
+ right_key = (tree2.commit('right'),)
+ keys.add(file_key + right_key)
+ parents[file_key + right_key] = (file_key + rev_key,)
+ tree.put_file_bytes_non_atomic('Foo:Bar', 'left\n')
+ left_key = (tree.commit('left'),)
+ keys.add(file_key + left_key)
+ parents[file_key + left_key] = (file_key + rev_key,)
+ tree.merge_from_branch(tree2.branch)
+ tree.put_file_bytes_non_atomic('Foo:Bar', 'merged\n')
+ try:
+ tree.auto_resolve()
+ except errors.UnsupportedOperation:
+ pass
+ merge_key = (tree.commit('merged'),)
+ keys.add(file_key + merge_key)
+ parents[file_key + merge_key] = (file_key + left_key,
+ file_key + right_key)
+ repo.lock_read()
+ self.addCleanup(repo.unlock)
+ self.assertEqual(keys, set(repo.texts.keys()))
+ self.assertEqual(parents, repo.texts.get_parent_map(repo.texts.keys()))
+
+
+class TestCaseWithComplexRepository(TestCaseWithRepository):
+
+ scenarios = all_repository_vf_format_scenarios()
+
+ def setUp(self):
+ super(TestCaseWithComplexRepository, self).setUp()
+ tree_a = self.make_branch_and_tree('a')
+ self.bzrdir = tree_a.branch.bzrdir
+ # add a corrupt inventory 'orphan'
+ # this may need some generalising for knits.
+ tree_a.lock_write()
+ try:
+ tree_a.branch.repository.start_write_group()
+ try:
+ inv_file = tree_a.branch.repository.inventories
+ inv_file.add_lines(('orphan',), [], [])
+ except:
+ tree_a.branch.repository.commit_write_group()
+ raise
+ else:
+ tree_a.branch.repository.abort_write_group()
+ finally:
+ tree_a.unlock()
+ # add a real revision 'rev1'
+ tree_a.commit('rev1', rev_id='rev1', allow_pointless=True)
+ # add a real revision 'rev2' based on rev1
+ tree_a.commit('rev2', rev_id='rev2', allow_pointless=True)
+ # add a reference to a ghost
+ tree_a.add_parent_tree_id('ghost1')
+ try:
+ tree_a.commit('rev3', rev_id='rev3', allow_pointless=True)
+ except errors.RevisionNotPresent:
+ raise tests.TestNotApplicable(
+ "Cannot test with ghosts for this format.")
+ # add another reference to a ghost, and a second ghost.
+ tree_a.add_parent_tree_id('ghost1')
+ tree_a.add_parent_tree_id('ghost2')
+ tree_a.commit('rev4', rev_id='rev4', allow_pointless=True)
+
+ def test_revision_trees(self):
+ revision_ids = ['rev1', 'rev2', 'rev3', 'rev4']
+ repository = self.bzrdir.open_repository()
+ repository.lock_read()
+ self.addCleanup(repository.unlock)
+ trees1 = list(repository.revision_trees(revision_ids))
+ trees2 = [repository.revision_tree(t) for t in revision_ids]
+ self.assertEqual(len(trees1), len(trees2))
+ for tree1, tree2 in zip(trees1, trees2):
+ self.assertFalse(tree2.changes_from(tree1).has_changed())
+
+ def test_get_deltas_for_revisions(self):
+ repository = self.bzrdir.open_repository()
+ repository.lock_read()
+ self.addCleanup(repository.unlock)
+ revisions = [repository.get_revision(r) for r in
+ ['rev1', 'rev2', 'rev3', 'rev4']]
+ deltas1 = list(repository.get_deltas_for_revisions(revisions))
+ deltas2 = [repository.get_revision_delta(r.revision_id) for r in
+ revisions]
+ self.assertEqual(deltas1, deltas2)
+
+ def test_all_revision_ids(self):
+ # all_revision_ids -> all revisions
+ self.assertEqual(set(['rev1', 'rev2', 'rev3', 'rev4']),
+ set(self.bzrdir.open_repository().all_revision_ids()))
+
+ def test_reserved_id(self):
+ repo = self.make_repository('repository')
+ repo.lock_write()
+ repo.start_write_group()
+ try:
+ self.assertRaises(errors.ReservedId, repo.add_inventory,
+ 'reserved:', None, None)
+ self.assertRaises(errors.ReservedId, repo.add_inventory_by_delta,
+ "foo", [], 'reserved:', None)
+ self.assertRaises(errors.ReservedId, repo.add_revision,
+ 'reserved:', None)
+ finally:
+ repo.abort_write_group()
+ repo.unlock()
+
+
+class TestCaseWithCorruptRepository(TestCaseWithRepository):
+
+ scenarios = all_repository_vf_format_scenarios()
+
+ def setUp(self):
+ super(TestCaseWithCorruptRepository, self).setUp()
+ # a inventory with no parents and the revision has parents..
+ # i.e. a ghost.
+ repo = self.make_repository('inventory_with_unnecessary_ghost')
+ repo.lock_write()
+ repo.start_write_group()
+ inv = inventory.Inventory(revision_id = 'ghost')
+ inv.root.revision = 'ghost'
+ if repo.supports_rich_root():
+ root_id = inv.root.file_id
+ repo.texts.add_lines((root_id, 'ghost'), [], [])
+ sha1 = repo.add_inventory('ghost', inv, [])
+ rev = _mod_revision.Revision(
+ timestamp=0, timezone=None, committer="Foo Bar <foo@example.com>",
+ message="Message", inventory_sha1=sha1, revision_id='ghost')
+ rev.parent_ids = ['the_ghost']
+ try:
+ repo.add_revision('ghost', rev)
+ except (errors.NoSuchRevision, errors.RevisionNotPresent):
+ raise tests.TestNotApplicable(
+ "Cannot test with ghosts for this format.")
+
+ inv = inventory.Inventory(revision_id = 'the_ghost')
+ inv.root.revision = 'the_ghost'
+ if repo.supports_rich_root():
+ root_id = inv.root.file_id
+ repo.texts.add_lines((root_id, 'the_ghost'), [], [])
+ sha1 = repo.add_inventory('the_ghost', inv, [])
+ rev = _mod_revision.Revision(
+ timestamp=0, timezone=None, committer="Foo Bar <foo@example.com>",
+ message="Message", inventory_sha1=sha1, revision_id='the_ghost')
+ rev.parent_ids = []
+ repo.add_revision('the_ghost', rev)
+ # check its setup usefully
+ inv_weave = repo.inventories
+ possible_parents = (None, (('ghost',),))
+ self.assertSubset(inv_weave.get_parent_map([('ghost',)])[('ghost',)],
+ possible_parents)
+ repo.commit_write_group()
+ repo.unlock()
+
+ def test_corrupt_revision_access_asserts_if_reported_wrong(self):
+ repo_url = self.get_url('inventory_with_unnecessary_ghost')
+ repo = _mod_repository.Repository.open(repo_url)
+ m = MatchesAncestry(repo, 'ghost')
+ reported_wrong = False
+ try:
+ if m.match(['the_ghost', 'ghost']) is not None:
+ reported_wrong = True
+ except errors.CorruptRepository:
+ # caught the bad data:
+ return
+ if not reported_wrong:
+ return
+ self.assertRaises(errors.CorruptRepository, repo.get_revision, 'ghost')
+
+ def test_corrupt_revision_get_revision_reconcile(self):
+ repo_url = self.get_url('inventory_with_unnecessary_ghost')
+ repo = _mod_repository.Repository.open(repo_url)
+ repo.get_revision_reconcile('ghost')
diff --git a/bzrlib/tests/per_repository_vf/test_write_group.py b/bzrlib/tests/per_repository_vf/test_write_group.py
new file mode 100644
index 0000000..28d04b1
--- /dev/null
+++ b/bzrlib/tests/per_repository_vf/test_write_group.py
@@ -0,0 +1,656 @@
+# Copyright (C) 2007-2011 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""Tests for repository write groups."""
+
+import sys
+
+from bzrlib import (
+ branch,
+ controldir,
+ errors,
+ memorytree,
+ remote,
+ tests,
+ versionedfile,
+ )
+from bzrlib.tests.per_repository_vf import (
+ TestCaseWithRepository,
+ all_repository_vf_format_scenarios,
+ )
+from bzrlib.tests.scenarios import load_tests_apply_scenarios
+
+
+load_tests = load_tests_apply_scenarios
+
+
+class TestGetMissingParentInventories(TestCaseWithRepository):
+
+ scenarios = all_repository_vf_format_scenarios()
+
+ def test_empty_get_missing_parent_inventories(self):
+ """A new write group has no missing parent inventories."""
+ repo = self.make_repository('.')
+ repo.lock_write()
+ repo.start_write_group()
+ try:
+ self.assertEqual(set(), set(repo.get_missing_parent_inventories()))
+ finally:
+ repo.commit_write_group()
+ repo.unlock()
+
+ def branch_trunk_and_make_tree(self, trunk_repo, relpath):
+ tree = self.make_branch_and_memory_tree('branch')
+ trunk_repo.lock_read()
+ self.addCleanup(trunk_repo.unlock)
+ tree.branch.repository.fetch(trunk_repo, revision_id='rev-1')
+ tree.set_parent_ids(['rev-1'])
+ return tree
+
+ def make_first_commit(self, repo):
+ trunk = repo.bzrdir.create_branch()
+ tree = memorytree.MemoryTree.create_on_branch(trunk)
+ tree.lock_write()
+ tree.add([''], ['TREE_ROOT'], ['directory'])
+ tree.add(['dir'], ['dir-id'], ['directory'])
+ tree.add(['filename'], ['file-id'], ['file'])
+ tree.put_file_bytes_non_atomic('file-id', 'content\n')
+ tree.commit('Trunk commit', rev_id='rev-0')
+ tree.commit('Trunk commit', rev_id='rev-1')
+ tree.unlock()
+
+ def make_new_commit_in_new_repo(self, trunk_repo, parents=None):
+ tree = self.branch_trunk_and_make_tree(trunk_repo, 'branch')
+ tree.set_parent_ids(parents)
+ tree.commit('Branch commit', rev_id='rev-2')
+ branch_repo = tree.branch.repository
+ branch_repo.lock_read()
+ self.addCleanup(branch_repo.unlock)
+ return branch_repo
+
+ def make_stackable_repo(self, relpath='trunk'):
+ if isinstance(self.repository_format, remote.RemoteRepositoryFormat):
+ # RemoteRepository by default builds a default format real
+ # repository, but the default format is unstackble. So explicitly
+ # make a stackable real repository and use that.
+ repo = self.make_repository(relpath, format='1.9')
+ dir = controldir.ControlDir.open(self.get_url(relpath))
+ repo = dir.open_repository()
+ else:
+ repo = self.make_repository(relpath)
+ if not repo._format.supports_external_lookups:
+ raise tests.TestNotApplicable('format not stackable')
+ repo.bzrdir._format.set_branch_format(branch.BzrBranchFormat7())
+ return repo
+
+ def reopen_repo_and_resume_write_group(self, repo):
+ try:
+ resume_tokens = repo.suspend_write_group()
+ except errors.UnsuspendableWriteGroup:
+ # If we got this far, and this repo does not support resuming write
+ # groups, then get_missing_parent_inventories works in all
+ # cases this repo supports.
+ repo.unlock()
+ return
+ repo.unlock()
+ reopened_repo = repo.bzrdir.open_repository()
+ reopened_repo.lock_write()
+ self.addCleanup(reopened_repo.unlock)
+ reopened_repo.resume_write_group(resume_tokens)
+ return reopened_repo
+
+ def test_ghost_revision(self):
+ """A parent inventory may be absent if all the needed texts are present.
+ i.e., a ghost revision isn't (necessarily) considered to be a missing
+ parent inventory.
+ """
+ # Make a trunk with one commit.
+ trunk_repo = self.make_stackable_repo()
+ self.make_first_commit(trunk_repo)
+ trunk_repo.lock_read()
+ self.addCleanup(trunk_repo.unlock)
+ # Branch the trunk, add a new commit.
+ branch_repo = self.make_new_commit_in_new_repo(
+ trunk_repo, parents=['rev-1', 'ghost-rev'])
+ inv = branch_repo.get_inventory('rev-2')
+ # Make a new repo stacked on trunk, and then copy into it:
+ # - all texts in rev-2
+ # - the new inventory (rev-2)
+ # - the new revision (rev-2)
+ repo = self.make_stackable_repo('stacked')
+ repo.lock_write()
+ repo.start_write_group()
+ # Add all texts from in rev-2 inventory. Note that this has to exclude
+ # the root if the repo format does not support rich roots.
+ rich_root = branch_repo._format.rich_root_data
+ all_texts = [
+ (ie.file_id, ie.revision) for ie in inv.iter_just_entries()
+ if rich_root or inv.id2path(ie.file_id) != '']
+ repo.texts.insert_record_stream(
+ branch_repo.texts.get_record_stream(all_texts, 'unordered', False))
+ # Add inventory and revision for rev-2.
+ repo.add_inventory('rev-2', inv, ['rev-1', 'ghost-rev'])
+ repo.revisions.insert_record_stream(
+ branch_repo.revisions.get_record_stream(
+ [('rev-2',)], 'unordered', False))
+ # Now, no inventories are reported as missing, even though there is a
+ # ghost.
+ self.assertEqual(set(), repo.get_missing_parent_inventories())
+ # Resuming the write group does not affect
+ # get_missing_parent_inventories.
+ reopened_repo = self.reopen_repo_and_resume_write_group(repo)
+ self.assertEqual(set(), reopened_repo.get_missing_parent_inventories())
+ reopened_repo.abort_write_group()
+
+ def test_get_missing_parent_inventories(self):
+ """A stacked repo with a single revision and inventory (no parent
+ inventory) in it must have all the texts in its inventory (even if not
+ changed w.r.t. to the absent parent), otherwise it will report missing
+ texts/parent inventory.
+
+ The core of this test is that a file was changed in rev-1, but in a
+ stacked repo that only has rev-2
+ """
+ # Make a trunk with one commit.
+ trunk_repo = self.make_stackable_repo()
+ self.make_first_commit(trunk_repo)
+ trunk_repo.lock_read()
+ self.addCleanup(trunk_repo.unlock)
+ # Branch the trunk, add a new commit.
+ branch_repo = self.make_new_commit_in_new_repo(
+ trunk_repo, parents=['rev-1'])
+ inv = branch_repo.get_inventory('rev-2')
+ # Make a new repo stacked on trunk, and copy the new commit's revision
+ # and inventory records to it.
+ repo = self.make_stackable_repo('stacked')
+ repo.lock_write()
+ repo.start_write_group()
+ # Insert a single fulltext inv (using add_inventory because it's
+ # simpler than insert_record_stream)
+ repo.add_inventory('rev-2', inv, ['rev-1'])
+ repo.revisions.insert_record_stream(
+ branch_repo.revisions.get_record_stream(
+ [('rev-2',)], 'unordered', False))
+ # There should be no missing compression parents
+ self.assertEqual(set(),
+ repo.inventories.get_missing_compression_parent_keys())
+ self.assertEqual(
+ set([('inventories', 'rev-1')]),
+ repo.get_missing_parent_inventories())
+ # Resuming the write group does not affect
+ # get_missing_parent_inventories.
+ reopened_repo = self.reopen_repo_and_resume_write_group(repo)
+ self.assertEqual(
+ set([('inventories', 'rev-1')]),
+ reopened_repo.get_missing_parent_inventories())
+ # Adding the parent inventory satisfies get_missing_parent_inventories.
+ reopened_repo.inventories.insert_record_stream(
+ branch_repo.inventories.get_record_stream(
+ [('rev-1',)], 'unordered', False))
+ self.assertEqual(
+ set(), reopened_repo.get_missing_parent_inventories())
+ reopened_repo.abort_write_group()
+
+ def test_get_missing_parent_inventories_check(self):
+ builder = self.make_branch_builder('test')
+ builder.build_snapshot('A-id', ['ghost-parent-id'], [
+ ('add', ('', 'root-id', 'directory', None)),
+ ('add', ('file', 'file-id', 'file', 'content\n'))],
+ allow_leftmost_as_ghost=True)
+ b = builder.get_branch()
+ b.lock_read()
+ self.addCleanup(b.unlock)
+ repo = self.make_repository('test-repo')
+ repo.lock_write()
+ self.addCleanup(repo.unlock)
+ repo.start_write_group()
+ self.addCleanup(repo.abort_write_group)
+ # Now, add the objects manually
+ text_keys = [('file-id', 'A-id')]
+ if repo.supports_rich_root():
+ text_keys.append(('root-id', 'A-id'))
+ # Directly add the texts, inventory, and revision object for 'A-id'
+ repo.texts.insert_record_stream(b.repository.texts.get_record_stream(
+ text_keys, 'unordered', True))
+ repo.add_revision('A-id', b.repository.get_revision('A-id'),
+ b.repository.get_inventory('A-id'))
+ get_missing = repo.get_missing_parent_inventories
+ if repo._format.supports_external_lookups:
+ self.assertEqual(set([('inventories', 'ghost-parent-id')]),
+ get_missing(check_for_missing_texts=False))
+ self.assertEqual(set(), get_missing(check_for_missing_texts=True))
+ self.assertEqual(set(), get_missing())
+ else:
+ # If we don't support external lookups, we always return empty
+ self.assertEqual(set(), get_missing(check_for_missing_texts=False))
+ self.assertEqual(set(), get_missing(check_for_missing_texts=True))
+ self.assertEqual(set(), get_missing())
+
+ def test_insert_stream_passes_resume_info(self):
+ repo = self.make_repository('test-repo')
+ if (not repo._format.supports_external_lookups or
+ isinstance(repo, remote.RemoteRepository)):
+ raise tests.TestNotApplicable(
+ 'only valid for direct connections to resumable repos')
+ # log calls to get_missing_parent_inventories, so that we can assert it
+ # is called with the correct parameters
+ call_log = []
+ orig = repo.get_missing_parent_inventories
+ def get_missing(check_for_missing_texts=True):
+ call_log.append(check_for_missing_texts)
+ return orig(check_for_missing_texts=check_for_missing_texts)
+ repo.get_missing_parent_inventories = get_missing
+ repo.lock_write()
+ self.addCleanup(repo.unlock)
+ sink = repo._get_sink()
+ sink.insert_stream((), repo._format, [])
+ self.assertEqual([False], call_log)
+ del call_log[:]
+ repo.start_write_group()
+ # We need to insert something, or suspend_write_group won't actually
+ # create a token
+ repo.texts.insert_record_stream([versionedfile.FulltextContentFactory(
+ ('file-id', 'rev-id'), (), None, 'lines\n')])
+ tokens = repo.suspend_write_group()
+ self.assertNotEqual([], tokens)
+ sink.insert_stream((), repo._format, tokens)
+ self.assertEqual([True], call_log)
+
+ def test_insert_stream_without_locking_fails_without_lock(self):
+ repo = self.make_repository('test-repo')
+ sink = repo._get_sink()
+ stream = [('texts', [versionedfile.FulltextContentFactory(
+ ('file-id', 'rev-id'), (), None, 'lines\n')])]
+ self.assertRaises(errors.ObjectNotLocked,
+ sink.insert_stream_without_locking, stream, repo._format)
+
+ def test_insert_stream_without_locking_fails_without_write_group(self):
+ repo = self.make_repository('test-repo')
+ self.addCleanup(repo.lock_write().unlock)
+ sink = repo._get_sink()
+ stream = [('texts', [versionedfile.FulltextContentFactory(
+ ('file-id', 'rev-id'), (), None, 'lines\n')])]
+ self.assertRaises(errors.BzrError,
+ sink.insert_stream_without_locking, stream, repo._format)
+
+ def test_insert_stream_without_locking(self):
+ repo = self.make_repository('test-repo')
+ self.addCleanup(repo.lock_write().unlock)
+ repo.start_write_group()
+ sink = repo._get_sink()
+ stream = [('texts', [versionedfile.FulltextContentFactory(
+ ('file-id', 'rev-id'), (), None, 'lines\n')])]
+ missing_keys = sink.insert_stream_without_locking(stream, repo._format)
+ repo.commit_write_group()
+ self.assertEqual(set(), missing_keys)
+
+
+class TestResumeableWriteGroup(TestCaseWithRepository):
+
+ scenarios = all_repository_vf_format_scenarios()
+
+ def make_write_locked_repo(self, relpath='repo'):
+ repo = self.make_repository(relpath)
+ repo.lock_write()
+ self.addCleanup(repo.unlock)
+ return repo
+
+ def reopen_repo(self, repo):
+ same_repo = repo.bzrdir.open_repository()
+ same_repo.lock_write()
+ self.addCleanup(same_repo.unlock)
+ return same_repo
+
+ def require_suspendable_write_groups(self, reason):
+ repo = self.make_repository('__suspend_test')
+ repo.lock_write()
+ self.addCleanup(repo.unlock)
+ repo.start_write_group()
+ try:
+ wg_tokens = repo.suspend_write_group()
+ except errors.UnsuspendableWriteGroup:
+ repo.abort_write_group()
+ raise tests.TestNotApplicable(reason)
+
+ def test_suspend_write_group(self):
+ repo = self.make_write_locked_repo()
+ repo.start_write_group()
+ # Add some content so this isn't an empty write group (which may return
+ # 0 tokens)
+ repo.texts.add_lines(('file-id', 'revid'), (), ['lines'])
+ try:
+ wg_tokens = repo.suspend_write_group()
+ except errors.UnsuspendableWriteGroup:
+ # The contract for repos that don't support suspending write groups
+ # is that suspend_write_group raises UnsuspendableWriteGroup, but
+ # is otherwise a no-op. So we can still e.g. abort the write group
+ # as usual.
+ self.assertTrue(repo.is_in_write_group())
+ repo.abort_write_group()
+ else:
+ # After suspending a write group we are no longer in a write group
+ self.assertFalse(repo.is_in_write_group())
+ # suspend_write_group returns a list of tokens, which are strs. If
+ # no other write groups were resumed, there will only be one token.
+ self.assertEqual(1, len(wg_tokens))
+ self.assertIsInstance(wg_tokens[0], str)
+ # See also test_pack_repository's test of the same name.
+
+ def test_resume_write_group_then_abort(self):
+ repo = self.make_write_locked_repo()
+ repo.start_write_group()
+ # Add some content so this isn't an empty write group (which may return
+ # 0 tokens)
+ text_key = ('file-id', 'revid')
+ repo.texts.add_lines(text_key, (), ['lines'])
+ try:
+ wg_tokens = repo.suspend_write_group()
+ except errors.UnsuspendableWriteGroup:
+ # If the repo does not support suspending write groups, it doesn't
+ # support resuming them either.
+ repo.abort_write_group()
+ self.assertRaises(
+ errors.UnsuspendableWriteGroup, repo.resume_write_group, [])
+ else:
+ #self.assertEqual([], list(repo.texts.keys()))
+ same_repo = self.reopen_repo(repo)
+ same_repo.resume_write_group(wg_tokens)
+ self.assertEqual([text_key], list(same_repo.texts.keys()))
+ self.assertTrue(same_repo.is_in_write_group())
+ same_repo.abort_write_group()
+ self.assertEqual([], list(repo.texts.keys()))
+ # See also test_pack_repository's test of the same name.
+
+ def test_multiple_resume_write_group(self):
+ self.require_suspendable_write_groups(
+ 'Cannot test resume on repo that does not support suspending')
+ repo = self.make_write_locked_repo()
+ repo.start_write_group()
+ # Add some content so this isn't an empty write group (which may return
+ # 0 tokens)
+ first_key = ('file-id', 'revid')
+ repo.texts.add_lines(first_key, (), ['lines'])
+ wg_tokens = repo.suspend_write_group()
+ same_repo = self.reopen_repo(repo)
+ same_repo.resume_write_group(wg_tokens)
+ self.assertTrue(same_repo.is_in_write_group())
+ second_key = ('file-id', 'second-revid')
+ same_repo.texts.add_lines(second_key, (first_key,), ['more lines'])
+ try:
+ new_wg_tokens = same_repo.suspend_write_group()
+ except:
+ e = sys.exc_info()
+ same_repo.abort_write_group(suppress_errors=True)
+ raise e[0], e[1], e[2]
+ self.assertEqual(2, len(new_wg_tokens))
+ self.assertSubset(wg_tokens, new_wg_tokens)
+ same_repo = self.reopen_repo(repo)
+ same_repo.resume_write_group(new_wg_tokens)
+ both_keys = set([first_key, second_key])
+ self.assertEqual(both_keys, same_repo.texts.keys())
+ same_repo.abort_write_group()
+
+ def test_no_op_suspend_resume(self):
+ self.require_suspendable_write_groups(
+ 'Cannot test resume on repo that does not support suspending')
+ repo = self.make_write_locked_repo()
+ repo.start_write_group()
+ # Add some content so this isn't an empty write group (which may return
+ # 0 tokens)
+ text_key = ('file-id', 'revid')
+ repo.texts.add_lines(text_key, (), ['lines'])
+ wg_tokens = repo.suspend_write_group()
+ same_repo = self.reopen_repo(repo)
+ same_repo.resume_write_group(wg_tokens)
+ new_wg_tokens = same_repo.suspend_write_group()
+ self.assertEqual(wg_tokens, new_wg_tokens)
+ same_repo = self.reopen_repo(repo)
+ same_repo.resume_write_group(wg_tokens)
+ self.assertEqual([text_key], list(same_repo.texts.keys()))
+ same_repo.abort_write_group()
+
+ def test_read_after_suspend_fails(self):
+ self.require_suspendable_write_groups(
+ 'Cannot test suspend on repo that does not support suspending')
+ repo = self.make_write_locked_repo()
+ repo.start_write_group()
+ # Add some content so this isn't an empty write group (which may return
+ # 0 tokens)
+ text_key = ('file-id', 'revid')
+ repo.texts.add_lines(text_key, (), ['lines'])
+ wg_tokens = repo.suspend_write_group()
+ self.assertEqual([], list(repo.texts.keys()))
+
+ def test_read_after_second_suspend_fails(self):
+ self.require_suspendable_write_groups(
+ 'Cannot test suspend on repo that does not support suspending')
+ repo = self.make_write_locked_repo()
+ repo.start_write_group()
+ # Add some content so this isn't an empty write group (which may return
+ # 0 tokens)
+ text_key = ('file-id', 'revid')
+ repo.texts.add_lines(text_key, (), ['lines'])
+ wg_tokens = repo.suspend_write_group()
+ same_repo = self.reopen_repo(repo)
+ same_repo.resume_write_group(wg_tokens)
+ same_repo.suspend_write_group()
+ self.assertEqual([], list(same_repo.texts.keys()))
+
+ def test_read_after_resume_abort_fails(self):
+ self.require_suspendable_write_groups(
+ 'Cannot test suspend on repo that does not support suspending')
+ repo = self.make_write_locked_repo()
+ repo.start_write_group()
+ # Add some content so this isn't an empty write group (which may return
+ # 0 tokens)
+ text_key = ('file-id', 'revid')
+ repo.texts.add_lines(text_key, (), ['lines'])
+ wg_tokens = repo.suspend_write_group()
+ same_repo = self.reopen_repo(repo)
+ same_repo.resume_write_group(wg_tokens)
+ same_repo.abort_write_group()
+ self.assertEqual([], list(same_repo.texts.keys()))
+
+ def test_cannot_resume_aborted_write_group(self):
+ self.require_suspendable_write_groups(
+ 'Cannot test resume on repo that does not support suspending')
+ repo = self.make_write_locked_repo()
+ repo.start_write_group()
+ # Add some content so this isn't an empty write group (which may return
+ # 0 tokens)
+ text_key = ('file-id', 'revid')
+ repo.texts.add_lines(text_key, (), ['lines'])
+ wg_tokens = repo.suspend_write_group()
+ same_repo = self.reopen_repo(repo)
+ same_repo.resume_write_group(wg_tokens)
+ same_repo.abort_write_group()
+ same_repo = self.reopen_repo(repo)
+ self.assertRaises(
+ errors.UnresumableWriteGroup, same_repo.resume_write_group,
+ wg_tokens)
+
+ def test_commit_resumed_write_group_no_new_data(self):
+ self.require_suspendable_write_groups(
+ 'Cannot test resume on repo that does not support suspending')
+ repo = self.make_write_locked_repo()
+ repo.start_write_group()
+ # Add some content so this isn't an empty write group (which may return
+ # 0 tokens)
+ text_key = ('file-id', 'revid')
+ repo.texts.add_lines(text_key, (), ['lines'])
+ wg_tokens = repo.suspend_write_group()
+ same_repo = self.reopen_repo(repo)
+ same_repo.resume_write_group(wg_tokens)
+ same_repo.commit_write_group()
+ self.assertEqual([text_key], list(same_repo.texts.keys()))
+ self.assertEqual(
+ 'lines', same_repo.texts.get_record_stream([text_key],
+ 'unordered', True).next().get_bytes_as('fulltext'))
+ self.assertRaises(
+ errors.UnresumableWriteGroup, same_repo.resume_write_group,
+ wg_tokens)
+
+ def test_commit_resumed_write_group_plus_new_data(self):
+ self.require_suspendable_write_groups(
+ 'Cannot test resume on repo that does not support suspending')
+ repo = self.make_write_locked_repo()
+ repo.start_write_group()
+ # Add some content so this isn't an empty write group (which may return
+ # 0 tokens)
+ first_key = ('file-id', 'revid')
+ repo.texts.add_lines(first_key, (), ['lines'])
+ wg_tokens = repo.suspend_write_group()
+ same_repo = self.reopen_repo(repo)
+ same_repo.resume_write_group(wg_tokens)
+ second_key = ('file-id', 'second-revid')
+ same_repo.texts.add_lines(second_key, (first_key,), ['more lines'])
+ same_repo.commit_write_group()
+ self.assertEqual(
+ set([first_key, second_key]), set(same_repo.texts.keys()))
+ self.assertEqual(
+ 'lines', same_repo.texts.get_record_stream([first_key],
+ 'unordered', True).next().get_bytes_as('fulltext'))
+ self.assertEqual(
+ 'more lines', same_repo.texts.get_record_stream([second_key],
+ 'unordered', True).next().get_bytes_as('fulltext'))
+
+ def make_source_with_delta_record(self):
+ # Make a source repository with a delta record in it.
+ source_repo = self.make_write_locked_repo('source')
+ source_repo.start_write_group()
+ key_base = ('file-id', 'base')
+ key_delta = ('file-id', 'delta')
+ def text_stream():
+ yield versionedfile.FulltextContentFactory(
+ key_base, (), None, 'lines\n')
+ yield versionedfile.FulltextContentFactory(
+ key_delta, (key_base,), None, 'more\nlines\n')
+ source_repo.texts.insert_record_stream(text_stream())
+ source_repo.commit_write_group()
+ return source_repo
+
+ def test_commit_resumed_write_group_with_missing_parents(self):
+ self.require_suspendable_write_groups(
+ 'Cannot test resume on repo that does not support suspending')
+ source_repo = self.make_source_with_delta_record()
+ key_base = ('file-id', 'base')
+ key_delta = ('file-id', 'delta')
+ # Start a write group, insert just a delta.
+ repo = self.make_write_locked_repo()
+ repo.start_write_group()
+ stream = source_repo.texts.get_record_stream(
+ [key_delta], 'unordered', False)
+ repo.texts.insert_record_stream(stream)
+ # It's either not commitable due to the missing compression parent, or
+ # the stacked location has already filled in the fulltext.
+ try:
+ repo.commit_write_group()
+ except errors.BzrCheckError:
+ # It refused to commit because we have a missing parent
+ pass
+ else:
+ same_repo = self.reopen_repo(repo)
+ same_repo.lock_read()
+ record = same_repo.texts.get_record_stream([key_delta],
+ 'unordered', True).next()
+ self.assertEqual('more\nlines\n', record.get_bytes_as('fulltext'))
+ return
+ # Merely suspending and resuming doesn't make it commitable either.
+ wg_tokens = repo.suspend_write_group()
+ same_repo = self.reopen_repo(repo)
+ same_repo.resume_write_group(wg_tokens)
+ self.assertRaises(
+ errors.BzrCheckError, same_repo.commit_write_group)
+ same_repo.abort_write_group()
+
+ def test_commit_resumed_write_group_adding_missing_parents(self):
+ self.require_suspendable_write_groups(
+ 'Cannot test resume on repo that does not support suspending')
+ source_repo = self.make_source_with_delta_record()
+ key_base = ('file-id', 'base')
+ key_delta = ('file-id', 'delta')
+ # Start a write group.
+ repo = self.make_write_locked_repo()
+ repo.start_write_group()
+ # Add some content so this isn't an empty write group (which may return
+ # 0 tokens)
+ text_key = ('file-id', 'revid')
+ repo.texts.add_lines(text_key, (), ['lines'])
+ # Suspend it, then resume it.
+ wg_tokens = repo.suspend_write_group()
+ same_repo = self.reopen_repo(repo)
+ same_repo.resume_write_group(wg_tokens)
+ # Add a record with a missing compression parent
+ stream = source_repo.texts.get_record_stream(
+ [key_delta], 'unordered', False)
+ same_repo.texts.insert_record_stream(stream)
+ # Just like if we'd added that record without a suspend/resume cycle,
+ # commit_write_group fails.
+ try:
+ same_repo.commit_write_group()
+ except errors.BzrCheckError:
+ pass
+ else:
+ # If the commit_write_group didn't fail, that is because the
+ # insert_record_stream already gave it a fulltext.
+ same_repo = self.reopen_repo(repo)
+ same_repo.lock_read()
+ record = same_repo.texts.get_record_stream([key_delta],
+ 'unordered', True).next()
+ self.assertEqual('more\nlines\n', record.get_bytes_as('fulltext'))
+ return
+ same_repo.abort_write_group()
+
+ def test_add_missing_parent_after_resume(self):
+ self.require_suspendable_write_groups(
+ 'Cannot test resume on repo that does not support suspending')
+ source_repo = self.make_source_with_delta_record()
+ key_base = ('file-id', 'base')
+ key_delta = ('file-id', 'delta')
+ # Start a write group, insert just a delta.
+ repo = self.make_write_locked_repo()
+ repo.start_write_group()
+ stream = source_repo.texts.get_record_stream(
+ [key_delta], 'unordered', False)
+ repo.texts.insert_record_stream(stream)
+ # Suspend it, then resume it.
+ wg_tokens = repo.suspend_write_group()
+ same_repo = self.reopen_repo(repo)
+ same_repo.resume_write_group(wg_tokens)
+ # Fill in the missing compression parent.
+ stream = source_repo.texts.get_record_stream(
+ [key_base], 'unordered', False)
+ same_repo.texts.insert_record_stream(stream)
+ same_repo.commit_write_group()
+
+ def test_suspend_empty_initial_write_group(self):
+ """Suspending a write group with no writes returns an empty token
+ list.
+ """
+ self.require_suspendable_write_groups(
+ 'Cannot test suspend on repo that does not support suspending')
+ repo = self.make_write_locked_repo()
+ repo.start_write_group()
+ wg_tokens = repo.suspend_write_group()
+ self.assertEqual([], wg_tokens)
+
+ def test_resume_empty_initial_write_group(self):
+ """Resuming an empty token list is equivalent to start_write_group."""
+ self.require_suspendable_write_groups(
+ 'Cannot test resume on repo that does not support suspending')
+ repo = self.make_write_locked_repo()
+ repo.resume_write_group([])
+ repo.abort_write_group()
diff --git a/bzrlib/tests/per_transport.py b/bzrlib/tests/per_transport.py
new file mode 100644
index 0000000..820dcc8
--- /dev/null
+++ b/bzrlib/tests/per_transport.py
@@ -0,0 +1,1872 @@
+# Copyright (C) 2005-2011 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""Tests for Transport implementations.
+
+Transport implementations tested here are supplied by
+TransportTestProviderAdapter.
+"""
+
+import itertools
+import os
+from cStringIO import StringIO
+from StringIO import StringIO as pyStringIO
+import stat
+import sys
+
+from bzrlib import (
+ errors,
+ osutils,
+ pyutils,
+ tests,
+ transport as _mod_transport,
+ urlutils,
+ )
+from bzrlib.errors import (ConnectionError,
+ FileExists,
+ InvalidURL,
+ NoSuchFile,
+ PathError,
+ TransportNotPossible,
+ )
+from bzrlib.osutils import getcwd
+from bzrlib.smart import medium
+from bzrlib.tests import (
+ TestSkipped,
+ TestNotApplicable,
+ multiply_tests,
+ )
+from bzrlib.tests import test_server
+from bzrlib.tests.test_transport import TestTransportImplementation
+from bzrlib.transport import (
+ ConnectedTransport,
+ Transport,
+ _get_transport_modules,
+ )
+from bzrlib.transport.memory import MemoryTransport
+from bzrlib.transport.remote import RemoteTransport
+
+
+def get_transport_test_permutations(module):
+ """Get the permutations module wants to have tested."""
+ if getattr(module, 'get_test_permutations', None) is None:
+ raise AssertionError(
+ "transport module %s doesn't provide get_test_permutations()"
+ % module.__name__)
+ return []
+ return module.get_test_permutations()
+
+
+def transport_test_permutations():
+ """Return a list of the klass, server_factory pairs to test."""
+ result = []
+ for module in _get_transport_modules():
+ try:
+ permutations = get_transport_test_permutations(
+ pyutils.get_named_object(module))
+ for (klass, server_factory) in permutations:
+ scenario = ('%s,%s' % (klass.__name__, server_factory.__name__),
+ {"transport_class":klass,
+ "transport_server":server_factory})
+ result.append(scenario)
+ except errors.DependencyNotPresent, e:
+ # Continue even if a dependency prevents us
+ # from adding this test
+ pass
+ return result
+
+
+def load_tests(standard_tests, module, loader):
+ """Multiply tests for tranport implementations."""
+ result = loader.suiteClass()
+ scenarios = transport_test_permutations()
+ return multiply_tests(standard_tests, scenarios, result)
+
+
+class TransportTests(TestTransportImplementation):
+
+ def setUp(self):
+ super(TransportTests, self).setUp()
+ self.overrideEnv('BZR_NO_SMART_VFS', None)
+
+ def check_transport_contents(self, content, transport, relpath):
+ """Check that transport.get_bytes(relpath) == content."""
+ self.assertEqualDiff(content, transport.get_bytes(relpath))
+
+ def test_ensure_base_missing(self):
+ """.ensure_base() should create the directory if it doesn't exist"""
+ t = self.get_transport()
+ t_a = t.clone('a')
+ if t_a.is_readonly():
+ self.assertRaises(TransportNotPossible,
+ t_a.ensure_base)
+ return
+ self.assertTrue(t_a.ensure_base())
+ self.assertTrue(t.has('a'))
+
+ def test_ensure_base_exists(self):
+ """.ensure_base() should just be happy if it already exists"""
+ t = self.get_transport()
+ if t.is_readonly():
+ return
+
+ t.mkdir('a')
+ t_a = t.clone('a')
+ # ensure_base returns False if it didn't create the base
+ self.assertFalse(t_a.ensure_base())
+
+ def test_ensure_base_missing_parent(self):
+ """.ensure_base() will fail if the parent dir doesn't exist"""
+ t = self.get_transport()
+ if t.is_readonly():
+ return
+
+ t_a = t.clone('a')
+ t_b = t_a.clone('b')
+ self.assertRaises(NoSuchFile, t_b.ensure_base)
+
+ def test_external_url(self):
+ """.external_url either works or raises InProcessTransport."""
+ t = self.get_transport()
+ try:
+ t.external_url()
+ except errors.InProcessTransport:
+ pass
+
+ def test_has(self):
+ t = self.get_transport()
+
+ files = ['a', 'b', 'e', 'g', '%']
+ self.build_tree(files, transport=t)
+ self.assertEqual(True, t.has('a'))
+ self.assertEqual(False, t.has('c'))
+ self.assertEqual(True, t.has(urlutils.escape('%')))
+ self.assertEqual(list(t.has_multi(['a', 'b', 'c', 'd',
+ 'e', 'f', 'g', 'h'])),
+ [True, True, False, False,
+ True, False, True, False])
+ self.assertEqual(True, t.has_any(['a', 'b', 'c']))
+ self.assertEqual(False, t.has_any(['c', 'd', 'f',
+ urlutils.escape('%%')]))
+ self.assertEqual(list(t.has_multi(iter(['a', 'b', 'c', 'd',
+ 'e', 'f', 'g', 'h']))),
+ [True, True, False, False,
+ True, False, True, False])
+ self.assertEqual(False, t.has_any(['c', 'c', 'c']))
+ self.assertEqual(True, t.has_any(['b', 'b', 'b']))
+
+ def test_has_root_works(self):
+ if self.transport_server is test_server.SmartTCPServer_for_testing:
+ raise TestNotApplicable(
+ "SmartTCPServer_for_testing intentionally does not allow "
+ "access to /.")
+ current_transport = self.get_transport()
+ self.assertTrue(current_transport.has('/'))
+ root = current_transport.clone('/')
+ self.assertTrue(root.has(''))
+
+ def test_get(self):
+ t = self.get_transport()
+
+ files = ['a', 'b', 'e', 'g']
+ contents = ['contents of a\n',
+ 'contents of b\n',
+ 'contents of e\n',
+ 'contents of g\n',
+ ]
+ self.build_tree(files, transport=t, line_endings='binary')
+ self.check_transport_contents('contents of a\n', t, 'a')
+ content_f = t.get_multi(files)
+ # Use itertools.izip() instead of use zip() or map(), since they fully
+ # evaluate their inputs, the transport requests should be issued and
+ # handled sequentially (we don't want to force transport to buffer).
+ for content, f in itertools.izip(contents, content_f):
+ self.assertEqual(content, f.read())
+
+ content_f = t.get_multi(iter(files))
+ # Use itertools.izip() for the same reason
+ for content, f in itertools.izip(contents, content_f):
+ self.assertEqual(content, f.read())
+
+ def test_get_unknown_file(self):
+ t = self.get_transport()
+ files = ['a', 'b']
+ contents = ['contents of a\n',
+ 'contents of b\n',
+ ]
+ self.build_tree(files, transport=t, line_endings='binary')
+ self.assertRaises(NoSuchFile, t.get, 'c')
+ def iterate_and_close(func, *args):
+ for f in func(*args):
+ # We call f.read() here because things like paramiko actually
+ # spawn a thread to prefetch the content, which we want to
+ # consume before we close the handle.
+ content = f.read()
+ f.close()
+ self.assertRaises(NoSuchFile, iterate_and_close,
+ t.get_multi, ['a', 'b', 'c'])
+ self.assertRaises(NoSuchFile, iterate_and_close,
+ t.get_multi, iter(['a', 'b', 'c']))
+
+ def test_get_directory_read_gives_ReadError(self):
+ """consistent errors for read() on a file returned by get()."""
+ t = self.get_transport()
+ if t.is_readonly():
+ self.build_tree(['a directory/'])
+ else:
+ t.mkdir('a%20directory')
+ # getting the file must either work or fail with a PathError
+ try:
+ a_file = t.get('a%20directory')
+ except (errors.PathError, errors.RedirectRequested):
+ # early failure return immediately.
+ return
+ # having got a file, read() must either work (i.e. http reading a dir
+ # listing) or fail with ReadError
+ try:
+ a_file.read()
+ except errors.ReadError:
+ pass
+
+ def test_get_bytes(self):
+ t = self.get_transport()
+
+ files = ['a', 'b', 'e', 'g']
+ contents = ['contents of a\n',
+ 'contents of b\n',
+ 'contents of e\n',
+ 'contents of g\n',
+ ]
+ self.build_tree(files, transport=t, line_endings='binary')
+ self.check_transport_contents('contents of a\n', t, 'a')
+
+ for content, fname in zip(contents, files):
+ self.assertEqual(content, t.get_bytes(fname))
+
+ def test_get_bytes_unknown_file(self):
+ t = self.get_transport()
+ self.assertRaises(NoSuchFile, t.get_bytes, 'c')
+
+ def test_get_with_open_write_stream_sees_all_content(self):
+ t = self.get_transport()
+ if t.is_readonly():
+ return
+ handle = t.open_write_stream('foo')
+ try:
+ handle.write('b')
+ self.assertEqual('b', t.get_bytes('foo'))
+ finally:
+ handle.close()
+
+ def test_get_bytes_with_open_write_stream_sees_all_content(self):
+ t = self.get_transport()
+ if t.is_readonly():
+ return
+ handle = t.open_write_stream('foo')
+ try:
+ handle.write('b')
+ self.assertEqual('b', t.get_bytes('foo'))
+ f = t.get('foo')
+ try:
+ self.assertEqual('b', f.read())
+ finally:
+ f.close()
+ finally:
+ handle.close()
+
+ def test_put_bytes(self):
+ t = self.get_transport()
+
+ if t.is_readonly():
+ self.assertRaises(TransportNotPossible,
+ t.put_bytes, 'a', 'some text for a\n')
+ return
+
+ t.put_bytes('a', 'some text for a\n')
+ self.assertTrue(t.has('a'))
+ self.check_transport_contents('some text for a\n', t, 'a')
+
+ # The contents should be overwritten
+ t.put_bytes('a', 'new text for a\n')
+ self.check_transport_contents('new text for a\n', t, 'a')
+
+ self.assertRaises(NoSuchFile,
+ t.put_bytes, 'path/doesnt/exist/c', 'contents')
+
+ def test_put_bytes_non_atomic(self):
+ t = self.get_transport()
+
+ if t.is_readonly():
+ self.assertRaises(TransportNotPossible,
+ t.put_bytes_non_atomic, 'a', 'some text for a\n')
+ return
+
+ self.assertFalse(t.has('a'))
+ t.put_bytes_non_atomic('a', 'some text for a\n')
+ self.assertTrue(t.has('a'))
+ self.check_transport_contents('some text for a\n', t, 'a')
+ # Put also replaces contents
+ t.put_bytes_non_atomic('a', 'new\ncontents for\na\n')
+ self.check_transport_contents('new\ncontents for\na\n', t, 'a')
+
+ # Make sure we can create another file
+ t.put_bytes_non_atomic('d', 'contents for\nd\n')
+ # And overwrite 'a' with empty contents
+ t.put_bytes_non_atomic('a', '')
+ self.check_transport_contents('contents for\nd\n', t, 'd')
+ self.check_transport_contents('', t, 'a')
+
+ self.assertRaises(NoSuchFile, t.put_bytes_non_atomic, 'no/such/path',
+ 'contents\n')
+ # Now test the create_parent flag
+ self.assertRaises(NoSuchFile, t.put_bytes_non_atomic, 'dir/a',
+ 'contents\n')
+ self.assertFalse(t.has('dir/a'))
+ t.put_bytes_non_atomic('dir/a', 'contents for dir/a\n',
+ create_parent_dir=True)
+ self.check_transport_contents('contents for dir/a\n', t, 'dir/a')
+
+ # But we still get NoSuchFile if we can't make the parent dir
+ self.assertRaises(NoSuchFile, t.put_bytes_non_atomic, 'not/there/a',
+ 'contents\n',
+ create_parent_dir=True)
+
+ def test_put_bytes_permissions(self):
+ t = self.get_transport()
+
+ if t.is_readonly():
+ return
+ if not t._can_roundtrip_unix_modebits():
+ # Can't roundtrip, so no need to run this test
+ return
+ t.put_bytes('mode644', 'test text\n', mode=0644)
+ self.assertTransportMode(t, 'mode644', 0644)
+ t.put_bytes('mode666', 'test text\n', mode=0666)
+ self.assertTransportMode(t, 'mode666', 0666)
+ t.put_bytes('mode600', 'test text\n', mode=0600)
+ self.assertTransportMode(t, 'mode600', 0600)
+ # Yes, you can put_bytes a file such that it becomes readonly
+ t.put_bytes('mode400', 'test text\n', mode=0400)
+ self.assertTransportMode(t, 'mode400', 0400)
+
+ # The default permissions should be based on the current umask
+ umask = osutils.get_umask()
+ t.put_bytes('nomode', 'test text\n', mode=None)
+ self.assertTransportMode(t, 'nomode', 0666 & ~umask)
+
+ def test_put_bytes_non_atomic_permissions(self):
+ t = self.get_transport()
+
+ if t.is_readonly():
+ return
+ if not t._can_roundtrip_unix_modebits():
+ # Can't roundtrip, so no need to run this test
+ return
+ t.put_bytes_non_atomic('mode644', 'test text\n', mode=0644)
+ self.assertTransportMode(t, 'mode644', 0644)
+ t.put_bytes_non_atomic('mode666', 'test text\n', mode=0666)
+ self.assertTransportMode(t, 'mode666', 0666)
+ t.put_bytes_non_atomic('mode600', 'test text\n', mode=0600)
+ self.assertTransportMode(t, 'mode600', 0600)
+ t.put_bytes_non_atomic('mode400', 'test text\n', mode=0400)
+ self.assertTransportMode(t, 'mode400', 0400)
+
+ # The default permissions should be based on the current umask
+ umask = osutils.get_umask()
+ t.put_bytes_non_atomic('nomode', 'test text\n', mode=None)
+ self.assertTransportMode(t, 'nomode', 0666 & ~umask)
+
+ # We should also be able to set the mode for a parent directory
+ # when it is created
+ t.put_bytes_non_atomic('dir700/mode664', 'test text\n', mode=0664,
+ dir_mode=0700, create_parent_dir=True)
+ self.assertTransportMode(t, 'dir700', 0700)
+ t.put_bytes_non_atomic('dir770/mode664', 'test text\n', mode=0664,
+ dir_mode=0770, create_parent_dir=True)
+ self.assertTransportMode(t, 'dir770', 0770)
+ t.put_bytes_non_atomic('dir777/mode664', 'test text\n', mode=0664,
+ dir_mode=0777, create_parent_dir=True)
+ self.assertTransportMode(t, 'dir777', 0777)
+
+ def test_put_file(self):
+ t = self.get_transport()
+
+ if t.is_readonly():
+ self.assertRaises(TransportNotPossible,
+ t.put_file, 'a', StringIO('some text for a\n'))
+ return
+
+ result = t.put_file('a', StringIO('some text for a\n'))
+ # put_file returns the length of the data written
+ self.assertEqual(16, result)
+ self.assertTrue(t.has('a'))
+ self.check_transport_contents('some text for a\n', t, 'a')
+ # Put also replaces contents
+ result = t.put_file('a', StringIO('new\ncontents for\na\n'))
+ self.assertEqual(19, result)
+ self.check_transport_contents('new\ncontents for\na\n', t, 'a')
+ self.assertRaises(NoSuchFile,
+ t.put_file, 'path/doesnt/exist/c',
+ StringIO('contents'))
+
+ def test_put_file_non_atomic(self):
+ t = self.get_transport()
+
+ if t.is_readonly():
+ self.assertRaises(TransportNotPossible,
+ t.put_file_non_atomic, 'a', StringIO('some text for a\n'))
+ return
+
+ self.assertFalse(t.has('a'))
+ t.put_file_non_atomic('a', StringIO('some text for a\n'))
+ self.assertTrue(t.has('a'))
+ self.check_transport_contents('some text for a\n', t, 'a')
+ # Put also replaces contents
+ t.put_file_non_atomic('a', StringIO('new\ncontents for\na\n'))
+ self.check_transport_contents('new\ncontents for\na\n', t, 'a')
+
+ # Make sure we can create another file
+ t.put_file_non_atomic('d', StringIO('contents for\nd\n'))
+ # And overwrite 'a' with empty contents
+ t.put_file_non_atomic('a', StringIO(''))
+ self.check_transport_contents('contents for\nd\n', t, 'd')
+ self.check_transport_contents('', t, 'a')
+
+ self.assertRaises(NoSuchFile, t.put_file_non_atomic, 'no/such/path',
+ StringIO('contents\n'))
+ # Now test the create_parent flag
+ self.assertRaises(NoSuchFile, t.put_file_non_atomic, 'dir/a',
+ StringIO('contents\n'))
+ self.assertFalse(t.has('dir/a'))
+ t.put_file_non_atomic('dir/a', StringIO('contents for dir/a\n'),
+ create_parent_dir=True)
+ self.check_transport_contents('contents for dir/a\n', t, 'dir/a')
+
+ # But we still get NoSuchFile if we can't make the parent dir
+ self.assertRaises(NoSuchFile, t.put_file_non_atomic, 'not/there/a',
+ StringIO('contents\n'),
+ create_parent_dir=True)
+
+ def test_put_file_permissions(self):
+
+ t = self.get_transport()
+
+ if t.is_readonly():
+ return
+ if not t._can_roundtrip_unix_modebits():
+ # Can't roundtrip, so no need to run this test
+ return
+ t.put_file('mode644', StringIO('test text\n'), mode=0644)
+ self.assertTransportMode(t, 'mode644', 0644)
+ t.put_file('mode666', StringIO('test text\n'), mode=0666)
+ self.assertTransportMode(t, 'mode666', 0666)
+ t.put_file('mode600', StringIO('test text\n'), mode=0600)
+ self.assertTransportMode(t, 'mode600', 0600)
+ # Yes, you can put a file such that it becomes readonly
+ t.put_file('mode400', StringIO('test text\n'), mode=0400)
+ self.assertTransportMode(t, 'mode400', 0400)
+ # The default permissions should be based on the current umask
+ umask = osutils.get_umask()
+ t.put_file('nomode', StringIO('test text\n'), mode=None)
+ self.assertTransportMode(t, 'nomode', 0666 & ~umask)
+
+ def test_put_file_non_atomic_permissions(self):
+ t = self.get_transport()
+
+ if t.is_readonly():
+ return
+ if not t._can_roundtrip_unix_modebits():
+ # Can't roundtrip, so no need to run this test
+ return
+ t.put_file_non_atomic('mode644', StringIO('test text\n'), mode=0644)
+ self.assertTransportMode(t, 'mode644', 0644)
+ t.put_file_non_atomic('mode666', StringIO('test text\n'), mode=0666)
+ self.assertTransportMode(t, 'mode666', 0666)
+ t.put_file_non_atomic('mode600', StringIO('test text\n'), mode=0600)
+ self.assertTransportMode(t, 'mode600', 0600)
+ # Yes, you can put_file_non_atomic a file such that it becomes readonly
+ t.put_file_non_atomic('mode400', StringIO('test text\n'), mode=0400)
+ self.assertTransportMode(t, 'mode400', 0400)
+
+ # The default permissions should be based on the current umask
+ umask = osutils.get_umask()
+ t.put_file_non_atomic('nomode', StringIO('test text\n'), mode=None)
+ self.assertTransportMode(t, 'nomode', 0666 & ~umask)
+
+ # We should also be able to set the mode for a parent directory
+ # when it is created
+ sio = StringIO()
+ t.put_file_non_atomic('dir700/mode664', sio, mode=0664,
+ dir_mode=0700, create_parent_dir=True)
+ self.assertTransportMode(t, 'dir700', 0700)
+ t.put_file_non_atomic('dir770/mode664', sio, mode=0664,
+ dir_mode=0770, create_parent_dir=True)
+ self.assertTransportMode(t, 'dir770', 0770)
+ t.put_file_non_atomic('dir777/mode664', sio, mode=0664,
+ dir_mode=0777, create_parent_dir=True)
+ self.assertTransportMode(t, 'dir777', 0777)
+
+ def test_put_bytes_unicode(self):
+ # Expect put_bytes to raise AssertionError or UnicodeEncodeError if
+ # given unicode "bytes". UnicodeEncodeError doesn't really make sense
+ # (we don't want to encode unicode here at all, callers should be
+ # strictly passing bytes to put_bytes), but we allow it for backwards
+ # compatibility. At some point we should use a specific exception.
+ # See https://bugs.launchpad.net/bzr/+bug/106898.
+ t = self.get_transport()
+ if t.is_readonly():
+ return
+ unicode_string = u'\u1234'
+ self.assertRaises(
+ (AssertionError, UnicodeEncodeError),
+ t.put_bytes, 'foo', unicode_string)
+
+ def test_put_file_unicode(self):
+ # Like put_bytes, except with a StringIO.StringIO of a unicode string.
+ # This situation can happen (and has) if code is careless about the type
+ # of "string" they initialise/write to a StringIO with. We cannot use
+ # cStringIO, because it never returns unicode from read.
+ # Like put_bytes, UnicodeEncodeError isn't quite the right exception to
+ # raise, but we raise it for hysterical raisins.
+ t = self.get_transport()
+ if t.is_readonly():
+ return
+ unicode_file = pyStringIO(u'\u1234')
+ self.assertRaises(UnicodeEncodeError, t.put_file, 'foo', unicode_file)
+
+ def test_mkdir(self):
+ t = self.get_transport()
+
+ if t.is_readonly():
+ # cannot mkdir on readonly transports. We're not testing for
+ # cache coherency because cache behaviour is not currently
+ # defined for the transport interface.
+ self.assertRaises(TransportNotPossible, t.mkdir, '.')
+ self.assertRaises(TransportNotPossible, t.mkdir, 'new_dir')
+ self.assertRaises(TransportNotPossible, t.mkdir_multi, ['new_dir'])
+ self.assertRaises(TransportNotPossible, t.mkdir, 'path/doesnt/exist')
+ return
+ # Test mkdir
+ t.mkdir('dir_a')
+ self.assertEqual(t.has('dir_a'), True)
+ self.assertEqual(t.has('dir_b'), False)
+
+ t.mkdir('dir_b')
+ self.assertEqual(t.has('dir_b'), True)
+
+ t.mkdir_multi(['dir_c', 'dir_d'])
+
+ t.mkdir_multi(iter(['dir_e', 'dir_f']))
+ self.assertEqual(list(t.has_multi(
+ ['dir_a', 'dir_b', 'dir_c', 'dir_q',
+ 'dir_d', 'dir_e', 'dir_f', 'dir_b'])),
+ [True, True, True, False,
+ True, True, True, True])
+
+ # we were testing that a local mkdir followed by a transport
+ # mkdir failed thusly, but given that we * in one process * do not
+ # concurrently fiddle with disk dirs and then use transport to do
+ # things, the win here seems marginal compared to the constraint on
+ # the interface. RBC 20051227
+ t.mkdir('dir_g')
+ self.assertRaises(FileExists, t.mkdir, 'dir_g')
+
+ # Test get/put in sub-directories
+ t.put_bytes('dir_a/a', 'contents of dir_a/a')
+ t.put_file('dir_b/b', StringIO('contents of dir_b/b'))
+ self.check_transport_contents('contents of dir_a/a', t, 'dir_a/a')
+ self.check_transport_contents('contents of dir_b/b', t, 'dir_b/b')
+
+ # mkdir of a dir with an absent parent
+ self.assertRaises(NoSuchFile, t.mkdir, 'missing/dir')
+
+ def test_mkdir_permissions(self):
+ t = self.get_transport()
+ if t.is_readonly():
+ return
+ if not t._can_roundtrip_unix_modebits():
+ # no sense testing on this transport
+ return
+ # Test mkdir with a mode
+ t.mkdir('dmode755', mode=0755)
+ self.assertTransportMode(t, 'dmode755', 0755)
+ t.mkdir('dmode555', mode=0555)
+ self.assertTransportMode(t, 'dmode555', 0555)
+ t.mkdir('dmode777', mode=0777)
+ self.assertTransportMode(t, 'dmode777', 0777)
+ t.mkdir('dmode700', mode=0700)
+ self.assertTransportMode(t, 'dmode700', 0700)
+ t.mkdir_multi(['mdmode755'], mode=0755)
+ self.assertTransportMode(t, 'mdmode755', 0755)
+
+ # Default mode should be based on umask
+ umask = osutils.get_umask()
+ t.mkdir('dnomode', mode=None)
+ self.assertTransportMode(t, 'dnomode', 0777 & ~umask)
+
+ def test_opening_a_file_stream_creates_file(self):
+ t = self.get_transport()
+ if t.is_readonly():
+ return
+ handle = t.open_write_stream('foo')
+ try:
+ self.assertEqual('', t.get_bytes('foo'))
+ finally:
+ handle.close()
+
+ def test_opening_a_file_stream_can_set_mode(self):
+ t = self.get_transport()
+ if t.is_readonly():
+ return
+ if not t._can_roundtrip_unix_modebits():
+ # Can't roundtrip, so no need to run this test
+ return
+ def check_mode(name, mode, expected):
+ handle = t.open_write_stream(name, mode=mode)
+ handle.close()
+ self.assertTransportMode(t, name, expected)
+ check_mode('mode644', 0644, 0644)
+ check_mode('mode666', 0666, 0666)
+ check_mode('mode600', 0600, 0600)
+ # The default permissions should be based on the current umask
+ check_mode('nomode', None, 0666 & ~osutils.get_umask())
+
+ def test_copy_to(self):
+ # FIXME: test: same server to same server (partly done)
+ # same protocol two servers
+ # and different protocols (done for now except for MemoryTransport.
+ # - RBC 20060122
+
+ def simple_copy_files(transport_from, transport_to):
+ files = ['a', 'b', 'c', 'd']
+ self.build_tree(files, transport=transport_from)
+ self.assertEqual(4, transport_from.copy_to(files, transport_to))
+ for f in files:
+ self.check_transport_contents(transport_to.get_bytes(f),
+ transport_from, f)
+
+ t = self.get_transport()
+ temp_transport = MemoryTransport('memory:///')
+ simple_copy_files(t, temp_transport)
+ if not t.is_readonly():
+ t.mkdir('copy_to_simple')
+ t2 = t.clone('copy_to_simple')
+ simple_copy_files(t, t2)
+
+
+ # Test that copying into a missing directory raises
+ # NoSuchFile
+ if t.is_readonly():
+ self.build_tree(['e/', 'e/f'])
+ else:
+ t.mkdir('e')
+ t.put_bytes('e/f', 'contents of e')
+ self.assertRaises(NoSuchFile, t.copy_to, ['e/f'], temp_transport)
+ temp_transport.mkdir('e')
+ t.copy_to(['e/f'], temp_transport)
+
+ del temp_transport
+ temp_transport = MemoryTransport('memory:///')
+
+ files = ['a', 'b', 'c', 'd']
+ t.copy_to(iter(files), temp_transport)
+ for f in files:
+ self.check_transport_contents(temp_transport.get_bytes(f),
+ t, f)
+ del temp_transport
+
+ for mode in (0666, 0644, 0600, 0400):
+ temp_transport = MemoryTransport("memory:///")
+ t.copy_to(files, temp_transport, mode=mode)
+ for f in files:
+ self.assertTransportMode(temp_transport, f, mode)
+
+ def test_create_prefix(self):
+ t = self.get_transport()
+ sub = t.clone('foo').clone('bar')
+ try:
+ sub.create_prefix()
+ except TransportNotPossible:
+ self.assertTrue(t.is_readonly())
+ else:
+ self.assertTrue(t.has('foo/bar'))
+
+ def test_append_file(self):
+ t = self.get_transport()
+
+ if t.is_readonly():
+ self.assertRaises(TransportNotPossible,
+ t.append_file, 'a', 'add\nsome\nmore\ncontents\n')
+ return
+ t.put_bytes('a', 'diff\ncontents for\na\n')
+ t.put_bytes('b', 'contents\nfor b\n')
+
+ self.assertEqual(20,
+ t.append_file('a', StringIO('add\nsome\nmore\ncontents\n')))
+
+ self.check_transport_contents(
+ 'diff\ncontents for\na\nadd\nsome\nmore\ncontents\n',
+ t, 'a')
+
+ # a file with no parent should fail..
+ self.assertRaises(NoSuchFile,
+ t.append_file, 'missing/path', StringIO('content'))
+
+ # And we can create new files, too
+ self.assertEqual(0,
+ t.append_file('c', StringIO('some text\nfor a missing file\n')))
+ self.check_transport_contents('some text\nfor a missing file\n',
+ t, 'c')
+
+ def test_append_bytes(self):
+ t = self.get_transport()
+
+ if t.is_readonly():
+ self.assertRaises(TransportNotPossible,
+ t.append_bytes, 'a', 'add\nsome\nmore\ncontents\n')
+ return
+
+ self.assertEqual(0, t.append_bytes('a', 'diff\ncontents for\na\n'))
+ self.assertEqual(0, t.append_bytes('b', 'contents\nfor b\n'))
+
+ self.assertEqual(20,
+ t.append_bytes('a', 'add\nsome\nmore\ncontents\n'))
+
+ self.check_transport_contents(
+ 'diff\ncontents for\na\nadd\nsome\nmore\ncontents\n',
+ t, 'a')
+
+ # a file with no parent should fail..
+ self.assertRaises(NoSuchFile,
+ t.append_bytes, 'missing/path', 'content')
+
+ def test_append_multi(self):
+ t = self.get_transport()
+
+ if t.is_readonly():
+ return
+ t.put_bytes('a', 'diff\ncontents for\na\n'
+ 'add\nsome\nmore\ncontents\n')
+ t.put_bytes('b', 'contents\nfor b\n')
+
+ self.assertEqual((43, 15),
+ t.append_multi([('a', StringIO('and\nthen\nsome\nmore\n')),
+ ('b', StringIO('some\nmore\nfor\nb\n'))]))
+
+ self.check_transport_contents(
+ 'diff\ncontents for\na\n'
+ 'add\nsome\nmore\ncontents\n'
+ 'and\nthen\nsome\nmore\n',
+ t, 'a')
+ self.check_transport_contents(
+ 'contents\nfor b\n'
+ 'some\nmore\nfor\nb\n',
+ t, 'b')
+
+ self.assertEqual((62, 31),
+ t.append_multi(iter([('a', StringIO('a little bit more\n')),
+ ('b', StringIO('from an iterator\n'))])))
+ self.check_transport_contents(
+ 'diff\ncontents for\na\n'
+ 'add\nsome\nmore\ncontents\n'
+ 'and\nthen\nsome\nmore\n'
+ 'a little bit more\n',
+ t, 'a')
+ self.check_transport_contents(
+ 'contents\nfor b\n'
+ 'some\nmore\nfor\nb\n'
+ 'from an iterator\n',
+ t, 'b')
+
+ self.assertEqual((80, 0),
+ t.append_multi([('a', StringIO('some text in a\n')),
+ ('d', StringIO('missing file r\n'))]))
+
+ self.check_transport_contents(
+ 'diff\ncontents for\na\n'
+ 'add\nsome\nmore\ncontents\n'
+ 'and\nthen\nsome\nmore\n'
+ 'a little bit more\n'
+ 'some text in a\n',
+ t, 'a')
+ self.check_transport_contents('missing file r\n', t, 'd')
+
+ def test_append_file_mode(self):
+ """Check that append accepts a mode parameter"""
+ # check append accepts a mode
+ t = self.get_transport()
+ if t.is_readonly():
+ self.assertRaises(TransportNotPossible,
+ t.append_file, 'f', StringIO('f'), mode=None)
+ return
+ t.append_file('f', StringIO('f'), mode=None)
+
+ def test_append_bytes_mode(self):
+ # check append_bytes accepts a mode
+ t = self.get_transport()
+ if t.is_readonly():
+ self.assertRaises(TransportNotPossible,
+ t.append_bytes, 'f', 'f', mode=None)
+ return
+ t.append_bytes('f', 'f', mode=None)
+
+ def test_delete(self):
+ # TODO: Test Transport.delete
+ t = self.get_transport()
+
+ # Not much to do with a readonly transport
+ if t.is_readonly():
+ self.assertRaises(TransportNotPossible, t.delete, 'missing')
+ return
+
+ t.put_bytes('a', 'a little bit of text\n')
+ self.assertTrue(t.has('a'))
+ t.delete('a')
+ self.assertFalse(t.has('a'))
+
+ self.assertRaises(NoSuchFile, t.delete, 'a')
+
+ t.put_bytes('a', 'a text\n')
+ t.put_bytes('b', 'b text\n')
+ t.put_bytes('c', 'c text\n')
+ self.assertEqual([True, True, True],
+ list(t.has_multi(['a', 'b', 'c'])))
+ t.delete_multi(['a', 'c'])
+ self.assertEqual([False, True, False],
+ list(t.has_multi(['a', 'b', 'c'])))
+ self.assertFalse(t.has('a'))
+ self.assertTrue(t.has('b'))
+ self.assertFalse(t.has('c'))
+
+ self.assertRaises(NoSuchFile,
+ t.delete_multi, ['a', 'b', 'c'])
+
+ self.assertRaises(NoSuchFile,
+ t.delete_multi, iter(['a', 'b', 'c']))
+
+ t.put_bytes('a', 'another a text\n')
+ t.put_bytes('c', 'another c text\n')
+ t.delete_multi(iter(['a', 'b', 'c']))
+
+ # We should have deleted everything
+ # SftpServer creates control files in the
+ # working directory, so we can just do a
+ # plain "listdir".
+ # self.assertEqual([], os.listdir('.'))
+
+ def test_recommended_page_size(self):
+ """Transports recommend a page size for partial access to files."""
+ t = self.get_transport()
+ self.assertIsInstance(t.recommended_page_size(), int)
+
+ def test_rmdir(self):
+ t = self.get_transport()
+ # Not much to do with a readonly transport
+ if t.is_readonly():
+ self.assertRaises(TransportNotPossible, t.rmdir, 'missing')
+ return
+ t.mkdir('adir')
+ t.mkdir('adir/bdir')
+ t.rmdir('adir/bdir')
+ # ftp may not be able to raise NoSuchFile for lack of
+ # details when failing
+ self.assertRaises((NoSuchFile, PathError), t.rmdir, 'adir/bdir')
+ t.rmdir('adir')
+ self.assertRaises((NoSuchFile, PathError), t.rmdir, 'adir')
+
+ def test_rmdir_not_empty(self):
+ """Deleting a non-empty directory raises an exception
+
+ sftp (and possibly others) don't give us a specific "directory not
+ empty" exception -- we can just see that the operation failed.
+ """
+ t = self.get_transport()
+ if t.is_readonly():
+ return
+ t.mkdir('adir')
+ t.mkdir('adir/bdir')
+ self.assertRaises(PathError, t.rmdir, 'adir')
+
+ def test_rmdir_empty_but_similar_prefix(self):
+ """rmdir does not get confused by sibling paths.
+
+ A naive implementation of MemoryTransport would refuse to rmdir
+ ".bzr/branch" if there is a ".bzr/branch-format" directory, because it
+ uses "path.startswith(dir)" on all file paths to determine if directory
+ is empty.
+ """
+ t = self.get_transport()
+ if t.is_readonly():
+ return
+ t.mkdir('foo')
+ t.put_bytes('foo-bar', '')
+ t.mkdir('foo-baz')
+ t.rmdir('foo')
+ self.assertRaises((NoSuchFile, PathError), t.rmdir, 'foo')
+ self.assertTrue(t.has('foo-bar'))
+
+ def test_rename_dir_succeeds(self):
+ t = self.get_transport()
+ if t.is_readonly():
+ raise TestSkipped("transport is readonly")
+ t.mkdir('adir')
+ t.mkdir('adir/asubdir')
+ t.rename('adir', 'bdir')
+ self.assertTrue(t.has('bdir/asubdir'))
+ self.assertFalse(t.has('adir'))
+
+ def test_rename_dir_nonempty(self):
+ """Attempting to replace a nonemtpy directory should fail"""
+ t = self.get_transport()
+ if t.is_readonly():
+ raise TestSkipped("transport is readonly")
+ t.mkdir('adir')
+ t.mkdir('adir/asubdir')
+ t.mkdir('bdir')
+ t.mkdir('bdir/bsubdir')
+ # any kind of PathError would be OK, though we normally expect
+ # DirectoryNotEmpty
+ self.assertRaises(PathError, t.rename, 'bdir', 'adir')
+ # nothing was changed so it should still be as before
+ self.assertTrue(t.has('bdir/bsubdir'))
+ self.assertFalse(t.has('adir/bdir'))
+ self.assertFalse(t.has('adir/bsubdir'))
+
+ def test_rename_across_subdirs(self):
+ t = self.get_transport()
+ if t.is_readonly():
+ raise TestNotApplicable("transport is readonly")
+ t.mkdir('a')
+ t.mkdir('b')
+ ta = t.clone('a')
+ tb = t.clone('b')
+ ta.put_bytes('f', 'aoeu')
+ ta.rename('f', '../b/f')
+ self.assertTrue(tb.has('f'))
+ self.assertFalse(ta.has('f'))
+ self.assertTrue(t.has('b/f'))
+
+ def test_delete_tree(self):
+ t = self.get_transport()
+
+ # Not much to do with a readonly transport
+ if t.is_readonly():
+ self.assertRaises(TransportNotPossible, t.delete_tree, 'missing')
+ return
+
+ # and does it like listing ?
+ t.mkdir('adir')
+ try:
+ t.delete_tree('adir')
+ except TransportNotPossible:
+ # ok, this transport does not support delete_tree
+ return
+
+ # did it delete that trivial case?
+ self.assertRaises(NoSuchFile, t.stat, 'adir')
+
+ self.build_tree(['adir/',
+ 'adir/file',
+ 'adir/subdir/',
+ 'adir/subdir/file',
+ 'adir/subdir2/',
+ 'adir/subdir2/file',
+ ], transport=t)
+
+ t.delete_tree('adir')
+ # adir should be gone now.
+ self.assertRaises(NoSuchFile, t.stat, 'adir')
+
+ def test_move(self):
+ t = self.get_transport()
+
+ if t.is_readonly():
+ return
+
+ # TODO: I would like to use os.listdir() to
+ # make sure there are no extra files, but SftpServer
+ # creates control files in the working directory
+ # perhaps all of this could be done in a subdirectory
+
+ t.put_bytes('a', 'a first file\n')
+ self.assertEquals([True, False], list(t.has_multi(['a', 'b'])))
+
+ t.move('a', 'b')
+ self.assertTrue(t.has('b'))
+ self.assertFalse(t.has('a'))
+
+ self.check_transport_contents('a first file\n', t, 'b')
+ self.assertEquals([False, True], list(t.has_multi(['a', 'b'])))
+
+ # Overwrite a file
+ t.put_bytes('c', 'c this file\n')
+ t.move('c', 'b')
+ self.assertFalse(t.has('c'))
+ self.check_transport_contents('c this file\n', t, 'b')
+
+ # TODO: Try to write a test for atomicity
+ # TODO: Test moving into a non-existent subdirectory
+ # TODO: Test Transport.move_multi
+
+ def test_copy(self):
+ t = self.get_transport()
+
+ if t.is_readonly():
+ return
+
+ t.put_bytes('a', 'a file\n')
+ t.copy('a', 'b')
+ self.check_transport_contents('a file\n', t, 'b')
+
+ self.assertRaises(NoSuchFile, t.copy, 'c', 'd')
+ os.mkdir('c')
+ # What should the assert be if you try to copy a
+ # file over a directory?
+ #self.assertRaises(Something, t.copy, 'a', 'c')
+ t.put_bytes('d', 'text in d\n')
+ t.copy('d', 'b')
+ self.check_transport_contents('text in d\n', t, 'b')
+
+ # TODO: test copy_multi
+
+ def test_connection_error(self):
+ """ConnectionError is raised when connection is impossible.
+
+ The error should be raised from the first operation on the transport.
+ """
+ try:
+ url = self._server.get_bogus_url()
+ except NotImplementedError:
+ raise TestSkipped("Transport %s has no bogus URL support." %
+ self._server.__class__)
+ t = _mod_transport.get_transport_from_url(url)
+ self.assertRaises((ConnectionError, NoSuchFile), t.get, '.bzr/branch')
+
+ def test_stat(self):
+ # TODO: Test stat, just try once, and if it throws, stop testing
+ from stat import S_ISDIR, S_ISREG
+
+ t = self.get_transport()
+
+ try:
+ st = t.stat('.')
+ except TransportNotPossible, e:
+ # This transport cannot stat
+ return
+
+ paths = ['a', 'b/', 'b/c', 'b/d/', 'b/d/e']
+ sizes = [14, 0, 16, 0, 18]
+ self.build_tree(paths, transport=t, line_endings='binary')
+
+ for path, size in zip(paths, sizes):
+ st = t.stat(path)
+ if path.endswith('/'):
+ self.assertTrue(S_ISDIR(st.st_mode))
+ # directory sizes are meaningless
+ else:
+ self.assertTrue(S_ISREG(st.st_mode))
+ self.assertEqual(size, st.st_size)
+
+ remote_stats = list(t.stat_multi(paths))
+ remote_iter_stats = list(t.stat_multi(iter(paths)))
+
+ self.assertRaises(NoSuchFile, t.stat, 'q')
+ self.assertRaises(NoSuchFile, t.stat, 'b/a')
+
+ self.assertListRaises(NoSuchFile, t.stat_multi, ['a', 'c', 'd'])
+ self.assertListRaises(NoSuchFile, t.stat_multi, iter(['a', 'c', 'd']))
+ self.build_tree(['subdir/', 'subdir/file'], transport=t)
+ subdir = t.clone('subdir')
+ st = subdir.stat('./file')
+ st = subdir.stat('.')
+
+ def test_hardlink(self):
+ from stat import ST_NLINK
+
+ t = self.get_transport()
+
+ source_name = "original_target"
+ link_name = "target_link"
+
+ self.build_tree([source_name], transport=t)
+
+ try:
+ t.hardlink(source_name, link_name)
+
+ self.assertTrue(t.has(source_name))
+ self.assertTrue(t.has(link_name))
+
+ st = t.stat(link_name)
+ self.assertEqual(st[ST_NLINK], 2)
+ except TransportNotPossible:
+ raise TestSkipped("Transport %s does not support hardlinks." %
+ self._server.__class__)
+
+ def test_symlink(self):
+ from stat import S_ISLNK
+
+ t = self.get_transport()
+
+ source_name = "original_target"
+ link_name = "target_link"
+
+ self.build_tree([source_name], transport=t)
+
+ try:
+ t.symlink(source_name, link_name)
+
+ self.assertTrue(t.has(source_name))
+ self.assertTrue(t.has(link_name))
+
+ st = t.stat(link_name)
+ self.assertTrue(S_ISLNK(st.st_mode),
+ "expected symlink, got mode %o" % st.st_mode)
+ except TransportNotPossible:
+ raise TestSkipped("Transport %s does not support symlinks." %
+ self._server.__class__)
+ except IOError:
+ self.knownFailure("Paramiko fails to create symlinks during tests")
+
+ def test_list_dir(self):
+ # TODO: Test list_dir, just try once, and if it throws, stop testing
+ t = self.get_transport()
+
+ if not t.listable():
+ self.assertRaises(TransportNotPossible, t.list_dir, '.')
+ return
+
+ def sorted_list(d, transport):
+ l = list(transport.list_dir(d))
+ l.sort()
+ return l
+
+ self.assertEqual([], sorted_list('.', t))
+ # c2 is precisely one letter longer than c here to test that
+ # suffixing is not confused.
+ # a%25b checks that quoting is done consistently across transports
+ tree_names = ['a', 'a%25b', 'b', 'c/', 'c/d', 'c/e', 'c2/']
+
+ if not t.is_readonly():
+ self.build_tree(tree_names, transport=t)
+ else:
+ self.build_tree(tree_names)
+
+ self.assertEqual(
+ ['a', 'a%2525b', 'b', 'c', 'c2'], sorted_list('', t))
+ self.assertEqual(
+ ['a', 'a%2525b', 'b', 'c', 'c2'], sorted_list('.', t))
+ self.assertEqual(['d', 'e'], sorted_list('c', t))
+
+ # Cloning the transport produces an equivalent listing
+ self.assertEqual(['d', 'e'], sorted_list('', t.clone('c')))
+
+ if not t.is_readonly():
+ t.delete('c/d')
+ t.delete('b')
+ else:
+ os.unlink('c/d')
+ os.unlink('b')
+
+ self.assertEqual(['a', 'a%2525b', 'c', 'c2'], sorted_list('.', t))
+ self.assertEqual(['e'], sorted_list('c', t))
+
+ self.assertListRaises(PathError, t.list_dir, 'q')
+ self.assertListRaises(PathError, t.list_dir, 'c/f')
+ # 'a' is a file, list_dir should raise an error
+ self.assertListRaises(PathError, t.list_dir, 'a')
+
+ def test_list_dir_result_is_url_escaped(self):
+ t = self.get_transport()
+ if not t.listable():
+ raise TestSkipped("transport not listable")
+
+ if not t.is_readonly():
+ self.build_tree(['a/', 'a/%'], transport=t)
+ else:
+ self.build_tree(['a/', 'a/%'])
+
+ names = list(t.list_dir('a'))
+ self.assertEqual(['%25'], names)
+ self.assertIsInstance(names[0], str)
+
+ def test_clone_preserve_info(self):
+ t1 = self.get_transport()
+ if not isinstance(t1, ConnectedTransport):
+ raise TestSkipped("not a connected transport")
+
+ t2 = t1.clone('subdir')
+ self.assertEquals(t1._parsed_url.scheme, t2._parsed_url.scheme)
+ self.assertEquals(t1._parsed_url.user, t2._parsed_url.user)
+ self.assertEquals(t1._parsed_url.password, t2._parsed_url.password)
+ self.assertEquals(t1._parsed_url.host, t2._parsed_url.host)
+ self.assertEquals(t1._parsed_url.port, t2._parsed_url.port)
+
+ def test__reuse_for(self):
+ t = self.get_transport()
+ if not isinstance(t, ConnectedTransport):
+ raise TestSkipped("not a connected transport")
+
+ def new_url(scheme=None, user=None, password=None,
+ host=None, port=None, path=None):
+ """Build a new url from t.base changing only parts of it.
+
+ Only the parameters different from None will be changed.
+ """
+ if scheme is None: scheme = t._parsed_url.scheme
+ if user is None: user = t._parsed_url.user
+ if password is None: password = t._parsed_url.password
+ if user is None: user = t._parsed_url.user
+ if host is None: host = t._parsed_url.host
+ if port is None: port = t._parsed_url.port
+ if path is None: path = t._parsed_url.path
+ return str(urlutils.URL(scheme, user, password, host, port, path))
+
+ if t._parsed_url.scheme == 'ftp':
+ scheme = 'sftp'
+ else:
+ scheme = 'ftp'
+ self.assertIsNot(t, t._reuse_for(new_url(scheme=scheme)))
+ if t._parsed_url.user == 'me':
+ user = 'you'
+ else:
+ user = 'me'
+ self.assertIsNot(t, t._reuse_for(new_url(user=user)))
+ # passwords are not taken into account because:
+ # - it makes no sense to have two different valid passwords for the
+ # same user
+ # - _password in ConnectedTransport is intended to collect what the
+ # user specified from the command-line and there are cases where the
+ # new url can contain no password (if the url was built from an
+ # existing transport.base for example)
+ # - password are considered part of the credentials provided at
+ # connection creation time and as such may not be present in the url
+ # (they may be typed by the user when prompted for example)
+ self.assertIs(t, t._reuse_for(new_url(password='from space')))
+ # We will not connect, we can use a invalid host
+ self.assertIsNot(t, t._reuse_for(new_url(host=t._parsed_url.host + 'bar')))
+ if t._parsed_url.port == 1234:
+ port = 4321
+ else:
+ port = 1234
+ self.assertIsNot(t, t._reuse_for(new_url(port=port)))
+ # No point in trying to reuse a transport for a local URL
+ self.assertIs(None, t._reuse_for('/valid_but_not_existing'))
+
+ def test_connection_sharing(self):
+ t = self.get_transport()
+ if not isinstance(t, ConnectedTransport):
+ raise TestSkipped("not a connected transport")
+
+ c = t.clone('subdir')
+ # Some transports will create the connection only when needed
+ t.has('surely_not') # Force connection
+ self.assertIs(t._get_connection(), c._get_connection())
+
+ # Temporary failure, we need to create a new dummy connection
+ new_connection = None
+ t._set_connection(new_connection)
+ # Check that both transports use the same connection
+ self.assertIs(new_connection, t._get_connection())
+ self.assertIs(new_connection, c._get_connection())
+
+ def test_reuse_connection_for_various_paths(self):
+ t = self.get_transport()
+ if not isinstance(t, ConnectedTransport):
+ raise TestSkipped("not a connected transport")
+
+ t.has('surely_not') # Force connection
+ self.assertIsNot(None, t._get_connection())
+
+ subdir = t._reuse_for(t.base + 'whatever/but/deep/down/the/path')
+ self.assertIsNot(t, subdir)
+ self.assertIs(t._get_connection(), subdir._get_connection())
+
+ home = subdir._reuse_for(t.base + 'home')
+ self.assertIs(t._get_connection(), home._get_connection())
+ self.assertIs(subdir._get_connection(), home._get_connection())
+
+ def test_clone(self):
+ # TODO: Test that clone moves up and down the filesystem
+ t1 = self.get_transport()
+
+ self.build_tree(['a', 'b/', 'b/c'], transport=t1)
+
+ self.assertTrue(t1.has('a'))
+ self.assertTrue(t1.has('b/c'))
+ self.assertFalse(t1.has('c'))
+
+ t2 = t1.clone('b')
+ self.assertEqual(t1.base + 'b/', t2.base)
+
+ self.assertTrue(t2.has('c'))
+ self.assertFalse(t2.has('a'))
+
+ t3 = t2.clone('..')
+ self.assertTrue(t3.has('a'))
+ self.assertFalse(t3.has('c'))
+
+ self.assertFalse(t1.has('b/d'))
+ self.assertFalse(t2.has('d'))
+ self.assertFalse(t3.has('b/d'))
+
+ if t1.is_readonly():
+ self.build_tree_contents([('b/d', 'newfile\n')])
+ else:
+ t2.put_bytes('d', 'newfile\n')
+
+ self.assertTrue(t1.has('b/d'))
+ self.assertTrue(t2.has('d'))
+ self.assertTrue(t3.has('b/d'))
+
+ def test_clone_to_root(self):
+ orig_transport = self.get_transport()
+ # Repeatedly go up to a parent directory until we're at the root
+ # directory of this transport
+ root_transport = orig_transport
+ new_transport = root_transport.clone("..")
+ # as we are walking up directories, the path must be
+ # growing less, except at the top
+ self.assertTrue(len(new_transport.base) < len(root_transport.base)
+ or new_transport.base == root_transport.base)
+ while new_transport.base != root_transport.base:
+ root_transport = new_transport
+ new_transport = root_transport.clone("..")
+ # as we are walking up directories, the path must be
+ # growing less, except at the top
+ self.assertTrue(len(new_transport.base) < len(root_transport.base)
+ or new_transport.base == root_transport.base)
+
+ # Cloning to "/" should take us to exactly the same location.
+ self.assertEqual(root_transport.base, orig_transport.clone("/").base)
+ # the abspath of "/" from the original transport should be the same
+ # as the base at the root:
+ self.assertEqual(orig_transport.abspath("/"), root_transport.base)
+
+ # At the root, the URL must still end with / as its a directory
+ self.assertEqual(root_transport.base[-1], '/')
+
+ def test_clone_from_root(self):
+ """At the root, cloning to a simple dir should just do string append."""
+ orig_transport = self.get_transport()
+ root_transport = orig_transport.clone('/')
+ self.assertEqual(root_transport.base + '.bzr/',
+ root_transport.clone('.bzr').base)
+
+ def test_base_url(self):
+ t = self.get_transport()
+ self.assertEqual('/', t.base[-1])
+
+ def test_relpath(self):
+ t = self.get_transport()
+ self.assertEqual('', t.relpath(t.base))
+ # base ends with /
+ self.assertEqual('', t.relpath(t.base[:-1]))
+ # subdirs which don't exist should still give relpaths.
+ self.assertEqual('foo', t.relpath(t.base + 'foo'))
+ # trailing slash should be the same.
+ self.assertEqual('foo', t.relpath(t.base + 'foo/'))
+
+ def test_relpath_at_root(self):
+ t = self.get_transport()
+ # clone all the way to the top
+ new_transport = t.clone('..')
+ while new_transport.base != t.base:
+ t = new_transport
+ new_transport = t.clone('..')
+ # we must be able to get a relpath below the root
+ self.assertEqual('', t.relpath(t.base))
+ # and a deeper one should work too
+ self.assertEqual('foo/bar', t.relpath(t.base + 'foo/bar'))
+
+ def test_abspath(self):
+ # smoke test for abspath. Corner cases for backends like unix fs's
+ # that have aliasing problems like symlinks should go in backend
+ # specific test cases.
+ transport = self.get_transport()
+
+ self.assertEqual(transport.base + 'relpath',
+ transport.abspath('relpath'))
+
+ # This should work without raising an error.
+ transport.abspath("/")
+
+ # the abspath of "/" and "/foo/.." should result in the same location
+ self.assertEqual(transport.abspath("/"), transport.abspath("/foo/.."))
+
+ self.assertEqual(transport.clone("/").abspath('foo'),
+ transport.abspath("/foo"))
+
+ # GZ 2011-01-26: Test in per_transport but not using self.get_transport?
+ def test_win32_abspath(self):
+ # Note: we tried to set sys.platform='win32' so we could test on
+ # other platforms too, but then osutils does platform specific
+ # things at import time which defeated us...
+ if sys.platform != 'win32':
+ raise TestSkipped(
+ 'Testing drive letters in abspath implemented only for win32')
+
+ # smoke test for abspath on win32.
+ # a transport based on 'file:///' never fully qualifies the drive.
+ transport = _mod_transport.get_transport_from_url("file:///")
+ self.assertEqual(transport.abspath("/"), "file:///")
+
+ # but a transport that starts with a drive spec must keep it.
+ transport = _mod_transport.get_transport_from_url("file:///C:/")
+ self.assertEqual(transport.abspath("/"), "file:///C:/")
+
+ def test_local_abspath(self):
+ transport = self.get_transport()
+ try:
+ p = transport.local_abspath('.')
+ except (errors.NotLocalUrl, TransportNotPossible), e:
+ # should be formattable
+ s = str(e)
+ else:
+ self.assertEqual(getcwd(), p)
+
+ def test_abspath_at_root(self):
+ t = self.get_transport()
+ # clone all the way to the top
+ new_transport = t.clone('..')
+ while new_transport.base != t.base:
+ t = new_transport
+ new_transport = t.clone('..')
+ # we must be able to get a abspath of the root when we ask for
+ # t.abspath('..') - this due to our choice that clone('..')
+ # should return the root from the root, combined with the desire that
+ # the url from clone('..') and from abspath('..') should be the same.
+ self.assertEqual(t.base, t.abspath('..'))
+ # '' should give us the root
+ self.assertEqual(t.base, t.abspath(''))
+ # and a path should append to the url
+ self.assertEqual(t.base + 'foo', t.abspath('foo'))
+
+ def test_iter_files_recursive(self):
+ transport = self.get_transport()
+ if not transport.listable():
+ self.assertRaises(TransportNotPossible,
+ transport.iter_files_recursive)
+ return
+ self.build_tree(['isolated/',
+ 'isolated/dir/',
+ 'isolated/dir/foo',
+ 'isolated/dir/bar',
+ 'isolated/dir/b%25z', # make sure quoting is correct
+ 'isolated/bar'],
+ transport=transport)
+ paths = set(transport.iter_files_recursive())
+ # nb the directories are not converted
+ self.assertEqual(paths,
+ set(['isolated/dir/foo',
+ 'isolated/dir/bar',
+ 'isolated/dir/b%2525z',
+ 'isolated/bar']))
+ sub_transport = transport.clone('isolated')
+ paths = set(sub_transport.iter_files_recursive())
+ self.assertEqual(paths,
+ set(['dir/foo', 'dir/bar', 'dir/b%2525z', 'bar']))
+
+ def test_copy_tree(self):
+ # TODO: test file contents and permissions are preserved. This test was
+ # added just to ensure that quoting was handled correctly.
+ # -- David Allouche 2006-08-11
+ transport = self.get_transport()
+ if not transport.listable():
+ self.assertRaises(TransportNotPossible,
+ transport.iter_files_recursive)
+ return
+ if transport.is_readonly():
+ return
+ self.build_tree(['from/',
+ 'from/dir/',
+ 'from/dir/foo',
+ 'from/dir/bar',
+ 'from/dir/b%25z', # make sure quoting is correct
+ 'from/bar'],
+ transport=transport)
+ transport.copy_tree('from', 'to')
+ paths = set(transport.iter_files_recursive())
+ self.assertEqual(paths,
+ set(['from/dir/foo',
+ 'from/dir/bar',
+ 'from/dir/b%2525z',
+ 'from/bar',
+ 'to/dir/foo',
+ 'to/dir/bar',
+ 'to/dir/b%2525z',
+ 'to/bar',]))
+
+ def test_copy_tree_to_transport(self):
+ transport = self.get_transport()
+ if not transport.listable():
+ self.assertRaises(TransportNotPossible,
+ transport.iter_files_recursive)
+ return
+ if transport.is_readonly():
+ return
+ self.build_tree(['from/',
+ 'from/dir/',
+ 'from/dir/foo',
+ 'from/dir/bar',
+ 'from/dir/b%25z', # make sure quoting is correct
+ 'from/bar'],
+ transport=transport)
+ from_transport = transport.clone('from')
+ to_transport = transport.clone('to')
+ to_transport.ensure_base()
+ from_transport.copy_tree_to_transport(to_transport)
+ paths = set(transport.iter_files_recursive())
+ self.assertEqual(paths,
+ set(['from/dir/foo',
+ 'from/dir/bar',
+ 'from/dir/b%2525z',
+ 'from/bar',
+ 'to/dir/foo',
+ 'to/dir/bar',
+ 'to/dir/b%2525z',
+ 'to/bar',]))
+
+ def test_unicode_paths(self):
+ """Test that we can read/write files with Unicode names."""
+ t = self.get_transport()
+
+ # With FAT32 and certain encodings on win32
+ # '\xe5' and '\xe4' actually map to the same file
+ # adding a suffix kicks in the 'preserving but insensitive'
+ # route, and maintains the right files
+ files = [u'\xe5.1', # a w/ circle iso-8859-1
+ u'\xe4.2', # a w/ dots iso-8859-1
+ u'\u017d', # Z with umlat iso-8859-2
+ u'\u062c', # Arabic j
+ u'\u0410', # Russian A
+ u'\u65e5', # Kanji person
+ ]
+
+ no_unicode_support = getattr(self._server, 'no_unicode_support', False)
+ if no_unicode_support:
+ self.knownFailure("test server cannot handle unicode paths")
+
+ try:
+ self.build_tree(files, transport=t, line_endings='binary')
+ except UnicodeError:
+ raise TestSkipped("cannot handle unicode paths in current encoding")
+
+ # A plain unicode string is not a valid url
+ for fname in files:
+ self.assertRaises(InvalidURL, t.get, fname)
+
+ for fname in files:
+ fname_utf8 = fname.encode('utf-8')
+ contents = 'contents of %s\n' % (fname_utf8,)
+ self.check_transport_contents(contents, t, urlutils.escape(fname))
+
+ def test_connect_twice_is_same_content(self):
+ # check that our server (whatever it is) is accessible reliably
+ # via get_transport and multiple connections share content.
+ transport = self.get_transport()
+ if transport.is_readonly():
+ return
+ transport.put_bytes('foo', 'bar')
+ transport3 = self.get_transport()
+ self.check_transport_contents('bar', transport3, 'foo')
+
+ # now opening at a relative url should give use a sane result:
+ transport.mkdir('newdir')
+ transport5 = self.get_transport('newdir')
+ transport6 = transport5.clone('..')
+ self.check_transport_contents('bar', transport6, 'foo')
+
+ def test_lock_write(self):
+ """Test transport-level write locks.
+
+ These are deprecated and transports may decline to support them.
+ """
+ transport = self.get_transport()
+ if transport.is_readonly():
+ self.assertRaises(TransportNotPossible, transport.lock_write, 'foo')
+ return
+ transport.put_bytes('lock', '')
+ try:
+ lock = transport.lock_write('lock')
+ except TransportNotPossible:
+ return
+ # TODO make this consistent on all platforms:
+ # self.assertRaises(LockError, transport.lock_write, 'lock')
+ lock.unlock()
+
+ def test_lock_read(self):
+ """Test transport-level read locks.
+
+ These are deprecated and transports may decline to support them.
+ """
+ transport = self.get_transport()
+ if transport.is_readonly():
+ file('lock', 'w').close()
+ else:
+ transport.put_bytes('lock', '')
+ try:
+ lock = transport.lock_read('lock')
+ except TransportNotPossible:
+ return
+ # TODO make this consistent on all platforms:
+ # self.assertRaises(LockError, transport.lock_read, 'lock')
+ lock.unlock()
+
+ def test_readv(self):
+ transport = self.get_transport()
+ if transport.is_readonly():
+ with file('a', 'w') as f: f.write('0123456789')
+ else:
+ transport.put_bytes('a', '0123456789')
+
+ d = list(transport.readv('a', ((0, 1),)))
+ self.assertEqual(d[0], (0, '0'))
+
+ d = list(transport.readv('a', ((0, 1), (1, 1), (3, 2), (9, 1))))
+ self.assertEqual(d[0], (0, '0'))
+ self.assertEqual(d[1], (1, '1'))
+ self.assertEqual(d[2], (3, '34'))
+ self.assertEqual(d[3], (9, '9'))
+
+ def test_readv_out_of_order(self):
+ transport = self.get_transport()
+ if transport.is_readonly():
+ with file('a', 'w') as f: f.write('0123456789')
+ else:
+ transport.put_bytes('a', '01234567890')
+
+ d = list(transport.readv('a', ((1, 1), (9, 1), (0, 1), (3, 2))))
+ self.assertEqual(d[0], (1, '1'))
+ self.assertEqual(d[1], (9, '9'))
+ self.assertEqual(d[2], (0, '0'))
+ self.assertEqual(d[3], (3, '34'))
+
+ def test_readv_with_adjust_for_latency(self):
+ transport = self.get_transport()
+ # the adjust for latency flag expands the data region returned
+ # according to a per-transport heuristic, so testing is a little
+ # tricky as we need more data than the largest combining that our
+ # transports do. To accomodate this we generate random data and cross
+ # reference the returned data with the random data. To avoid doing
+ # multiple large random byte look ups we do several tests on the same
+ # backing data.
+ content = osutils.rand_bytes(200*1024)
+ content_size = len(content)
+ if transport.is_readonly():
+ self.build_tree_contents([('a', content)])
+ else:
+ transport.put_bytes('a', content)
+ def check_result_data(result_vector):
+ for item in result_vector:
+ data_len = len(item[1])
+ self.assertEqual(content[item[0]:item[0] + data_len], item[1])
+
+ # start corner case
+ result = list(transport.readv('a', ((0, 30),),
+ adjust_for_latency=True, upper_limit=content_size))
+ # we expect 1 result, from 0, to something > 30
+ self.assertEqual(1, len(result))
+ self.assertEqual(0, result[0][0])
+ self.assertTrue(len(result[0][1]) >= 30)
+ check_result_data(result)
+ # end of file corner case
+ result = list(transport.readv('a', ((204700, 100),),
+ adjust_for_latency=True, upper_limit=content_size))
+ # we expect 1 result, from 204800- its length, to the end
+ self.assertEqual(1, len(result))
+ data_len = len(result[0][1])
+ self.assertEqual(204800-data_len, result[0][0])
+ self.assertTrue(data_len >= 100)
+ check_result_data(result)
+ # out of order ranges are made in order
+ result = list(transport.readv('a', ((204700, 100), (0, 50)),
+ adjust_for_latency=True, upper_limit=content_size))
+ # we expect 2 results, in order, start and end.
+ self.assertEqual(2, len(result))
+ # start
+ data_len = len(result[0][1])
+ self.assertEqual(0, result[0][0])
+ self.assertTrue(data_len >= 30)
+ # end
+ data_len = len(result[1][1])
+ self.assertEqual(204800-data_len, result[1][0])
+ self.assertTrue(data_len >= 100)
+ check_result_data(result)
+ # close ranges get combined (even if out of order)
+ for request_vector in [((400,50), (800, 234)), ((800, 234), (400,50))]:
+ result = list(transport.readv('a', request_vector,
+ adjust_for_latency=True, upper_limit=content_size))
+ self.assertEqual(1, len(result))
+ data_len = len(result[0][1])
+ # minimum length is from 400 to 1034 - 634
+ self.assertTrue(data_len >= 634)
+ # must contain the region 400 to 1034
+ self.assertTrue(result[0][0] <= 400)
+ self.assertTrue(result[0][0] + data_len >= 1034)
+ check_result_data(result)
+
+ def test_readv_with_adjust_for_latency_with_big_file(self):
+ transport = self.get_transport()
+ # test from observed failure case.
+ if transport.is_readonly():
+ with file('a', 'w') as f: f.write('a'*1024*1024)
+ else:
+ transport.put_bytes('a', 'a'*1024*1024)
+ broken_vector = [(465219, 800), (225221, 800), (445548, 800),
+ (225037, 800), (221357, 800), (437077, 800), (947670, 800),
+ (465373, 800), (947422, 800)]
+ results = list(transport.readv('a', broken_vector, True, 1024*1024))
+ found_items = [False]*9
+ for pos, (start, length) in enumerate(broken_vector):
+ # check the range is covered by the result
+ for offset, data in results:
+ if offset <= start and start + length <= offset + len(data):
+ found_items[pos] = True
+ self.assertEqual([True]*9, found_items)
+
+ def test_get_with_open_write_stream_sees_all_content(self):
+ t = self.get_transport()
+ if t.is_readonly():
+ return
+ handle = t.open_write_stream('foo')
+ try:
+ handle.write('bcd')
+ self.assertEqual([(0, 'b'), (2, 'd')], list(t.readv('foo', ((0,1), (2,1)))))
+ finally:
+ handle.close()
+
+ def test_get_smart_medium(self):
+ """All transports must either give a smart medium, or know they can't.
+ """
+ transport = self.get_transport()
+ try:
+ client_medium = transport.get_smart_medium()
+ self.assertIsInstance(client_medium, medium.SmartClientMedium)
+ except errors.NoSmartMedium:
+ # as long as we got it we're fine
+ pass
+
+ def test_readv_short_read(self):
+ transport = self.get_transport()
+ if transport.is_readonly():
+ with file('a', 'w') as f: f.write('0123456789')
+ else:
+ transport.put_bytes('a', '01234567890')
+
+ # This is intentionally reading off the end of the file
+ # since we are sure that it cannot get there
+ self.assertListRaises((errors.ShortReadvError, errors.InvalidRange,
+ # Can be raised by paramiko
+ AssertionError),
+ transport.readv, 'a', [(1,1), (8,10)])
+
+ # This is trying to seek past the end of the file, it should
+ # also raise a special error
+ self.assertListRaises((errors.ShortReadvError, errors.InvalidRange),
+ transport.readv, 'a', [(12,2)])
+
+ def test_no_segment_parameters(self):
+ """Segment parameters should be stripped and stored in
+ transport.segment_parameters."""
+ transport = self.get_transport("foo")
+ self.assertEquals({}, transport.get_segment_parameters())
+
+ def test_segment_parameters(self):
+ """Segment parameters should be stripped and stored in
+ transport.get_segment_parameters()."""
+ base_url = self._server.get_url()
+ parameters = {"key1": "val1", "key2": "val2"}
+ url = urlutils.join_segment_parameters(base_url, parameters)
+ transport = _mod_transport.get_transport_from_url(url)
+ self.assertEquals(parameters, transport.get_segment_parameters())
+
+ def test_set_segment_parameters(self):
+ """Segment parameters can be set and show up in base."""
+ transport = self.get_transport("foo")
+ orig_base = transport.base
+ transport.set_segment_parameter("arm", "board")
+ self.assertEquals("%s,arm=board" % orig_base, transport.base)
+ self.assertEquals({"arm": "board"}, transport.get_segment_parameters())
+ transport.set_segment_parameter("arm", None)
+ transport.set_segment_parameter("nonexistant", None)
+ self.assertEquals({}, transport.get_segment_parameters())
+ self.assertEquals(orig_base, transport.base)
+
+ def test_stat_symlink(self):
+ # if a transport points directly to a symlink (and supports symlinks
+ # at all) you can tell this. helps with bug 32669.
+ t = self.get_transport()
+ try:
+ t.symlink('target', 'link')
+ except TransportNotPossible:
+ raise TestSkipped("symlinks not supported")
+ t2 = t.clone('link')
+ st = t2.stat('')
+ self.assertTrue(stat.S_ISLNK(st.st_mode))
+
+ def test_abspath_url_unquote_unreserved(self):
+ """URLs from abspath should have unreserved characters unquoted
+
+ Need consistent quoting notably for tildes, see lp:842223 for more.
+ """
+ t = self.get_transport()
+ needlessly_escaped_dir = "%2D%2E%30%39%41%5A%5F%61%7A%7E/"
+ self.assertEqual(t.base + "-.09AZ_az~",
+ t.abspath(needlessly_escaped_dir))
+
+ def test_clone_url_unquote_unreserved(self):
+ """Base URL of a cloned branch needs unreserved characters unquoted
+
+ Cloned transports should be prefix comparable for things like the
+ isolation checking of tests, see lp:842223 for more.
+ """
+ t1 = self.get_transport()
+ needlessly_escaped_dir = "%2D%2E%30%39%41%5A%5F%61%7A%7E/"
+ self.build_tree([needlessly_escaped_dir], transport=t1)
+ t2 = t1.clone(needlessly_escaped_dir)
+ self.assertEqual(t1.base + "-.09AZ_az~/", t2.base)
+
+ def test_hook_post_connection_one(self):
+ """Fire post_connect hook after a ConnectedTransport is first used"""
+ log = []
+ Transport.hooks.install_named_hook("post_connect", log.append, None)
+ t = self.get_transport()
+ self.assertEqual([], log)
+ t.has("non-existant")
+ if isinstance(t, RemoteTransport):
+ self.assertEqual([t.get_smart_medium()], log)
+ elif isinstance(t, ConnectedTransport):
+ self.assertEqual([t], log)
+ else:
+ self.assertEqual([], log)
+
+ def test_hook_post_connection_multi(self):
+ """Fire post_connect hook once per unshared underlying connection"""
+ log = []
+ Transport.hooks.install_named_hook("post_connect", log.append, None)
+ t1 = self.get_transport()
+ t2 = t1.clone(".")
+ t3 = self.get_transport()
+ self.assertEqual([], log)
+ t1.has("x")
+ t2.has("x")
+ t3.has("x")
+ if isinstance(t1, RemoteTransport):
+ self.assertEqual([t.get_smart_medium() for t in [t1, t3]], log)
+ elif isinstance(t1, ConnectedTransport):
+ self.assertEqual([t1, t3], log)
+ else:
+ self.assertEqual([], log)
diff --git a/bzrlib/tests/per_tree/__init__.py b/bzrlib/tests/per_tree/__init__.py
new file mode 100644
index 0000000..48affb6
--- /dev/null
+++ b/bzrlib/tests/per_tree/__init__.py
@@ -0,0 +1,407 @@
+# Copyright (C) 2006-2010 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+
+"""Tree implementation tests for bzr.
+
+These test the conformance of all the tree variations to the expected API.
+Specific tests for individual variations are in other places such as:
+ - tests/per_workingtree/*.py.
+ - tests/test_tree.py
+ - tests/test_revision.py
+ - tests/test_workingtree.py
+"""
+
+from bzrlib import (
+ errors,
+ tests,
+ transform,
+ )
+from bzrlib.tests.per_controldir.test_controldir import TestCaseWithControlDir
+from bzrlib.tests.per_workingtree import (
+ make_scenarios as wt_make_scenarios,
+ make_scenario as wt_make_scenario,
+ )
+from bzrlib.revisiontree import RevisionTree
+from bzrlib.transform import TransformPreview
+from bzrlib.tests import (
+ features,
+ )
+from bzrlib.workingtree import (
+ format_registry,
+ )
+from bzrlib.workingtree_4 import (
+ DirStateRevisionTree,
+ WorkingTreeFormat4,
+ WorkingTreeFormat5,
+ )
+
+
+def return_parameter(testcase, something):
+ """A trivial thunk to return its input."""
+ return something
+
+
+def revision_tree_from_workingtree(testcase, tree):
+ """Create a revision tree from a working tree."""
+ revid = tree.commit('save tree', allow_pointless=True, recursive=None)
+ return tree.branch.repository.revision_tree(revid)
+
+
+def _dirstate_tree_from_workingtree(testcase, tree):
+ revid = tree.commit('save tree', allow_pointless=True, recursive=None)
+ return tree.basis_tree()
+
+
+def preview_tree_pre(testcase, tree):
+ tt = TransformPreview(tree)
+ testcase.addCleanup(tt.finalize)
+ preview_tree = tt.get_preview_tree()
+ preview_tree.set_parent_ids(tree.get_parent_ids())
+ return preview_tree
+
+
+def preview_tree_post(testcase, tree):
+ basis = tree.basis_tree()
+ tt = TransformPreview(basis)
+ testcase.addCleanup(tt.finalize)
+ tree.lock_read()
+ testcase.addCleanup(tree.unlock)
+ pp = None
+ transform._prepare_revert_transform(basis, tree, tt, None, False, None,
+ basis, {})
+ preview_tree = tt.get_preview_tree()
+ preview_tree.set_parent_ids(tree.get_parent_ids())
+ return preview_tree
+
+
+class TestTreeImplementationSupport(tests.TestCaseWithTransport):
+
+ def test_revision_tree_from_workingtree(self):
+ tree = self.make_branch_and_tree('.')
+ tree = revision_tree_from_workingtree(self, tree)
+ self.assertIsInstance(tree, RevisionTree)
+
+
+class TestCaseWithTree(TestCaseWithControlDir):
+
+ def make_branch_and_tree(self, relpath):
+ made_control = self.make_bzrdir(relpath, format=
+ self.workingtree_format._matchingbzrdir)
+ made_control.create_repository()
+ made_control.create_branch()
+ return self.workingtree_format.initialize(made_control)
+
+ def workingtree_to_test_tree(self, tree):
+ return self._workingtree_to_test_tree(self, tree)
+
+ def _convert_tree(self, tree, converter=None):
+ """helper to convert using the converter or a supplied one."""
+ # convert that to the final shape
+ if converter is None:
+ converter = self.workingtree_to_test_tree
+ return converter(tree)
+
+ def get_tree_no_parents_no_content(self, empty_tree, converter=None):
+ """Make a tree with no parents and no contents from empty_tree.
+
+ :param empty_tree: A working tree with no content and no parents to
+ modify.
+ """
+ empty_tree.set_root_id('empty-root-id')
+ return self._convert_tree(empty_tree, converter)
+
+ def _make_abc_tree(self, tree):
+ """setup an abc content tree."""
+ files = ['a', 'b/', 'b/c']
+ self.build_tree(files, line_endings='binary',
+ transport=tree.bzrdir.root_transport)
+ tree.set_root_id('root-id')
+ tree.add(files, ['a-id', 'b-id', 'c-id'])
+
+ def get_tree_no_parents_abc_content(self, tree, converter=None):
+ """return a test tree with a, b/, b/c contents."""
+ self._make_abc_tree(tree)
+ return self._convert_tree(tree, converter)
+
+ def get_tree_no_parents_abc_content_2(self, tree, converter=None):
+ """return a test tree with a, b/, b/c contents.
+
+ This variation changes the content of 'a' to foobar\n.
+ """
+ self._make_abc_tree(tree)
+ f = open(tree.basedir + '/a', 'wb')
+ try:
+ f.write('foobar\n')
+ finally:
+ f.close()
+ return self._convert_tree(tree, converter)
+
+ def get_tree_no_parents_abc_content_3(self, tree, converter=None):
+ """return a test tree with a, b/, b/c contents.
+
+ This variation changes the executable flag of b/c to True.
+ """
+ self._make_abc_tree(tree)
+ tt = transform.TreeTransform(tree)
+ trans_id = tt.trans_id_tree_path('b/c')
+ tt.set_executability(True, trans_id)
+ tt.apply()
+ return self._convert_tree(tree, converter)
+
+ def get_tree_no_parents_abc_content_4(self, tree, converter=None):
+ """return a test tree with d, b/, b/c contents.
+
+ This variation renames a to d.
+ """
+ self._make_abc_tree(tree)
+ tree.rename_one('a', 'd')
+ return self._convert_tree(tree, converter)
+
+ def get_tree_no_parents_abc_content_5(self, tree, converter=None):
+ """return a test tree with d, b/, b/c contents.
+
+ This variation renames a to d and alters its content to 'bar\n'.
+ """
+ self._make_abc_tree(tree)
+ tree.rename_one('a', 'd')
+ f = open(tree.basedir + '/d', 'wb')
+ try:
+ f.write('bar\n')
+ finally:
+ f.close()
+ return self._convert_tree(tree, converter)
+
+ def get_tree_no_parents_abc_content_6(self, tree, converter=None):
+ """return a test tree with a, b/, e contents.
+
+ This variation renames b/c to e, and makes it executable.
+ """
+ self._make_abc_tree(tree)
+ tt = transform.TreeTransform(tree)
+ trans_id = tt.trans_id_tree_path('b/c')
+ parent_trans_id = tt.trans_id_tree_path('')
+ tt.adjust_path('e', parent_trans_id, trans_id)
+ tt.set_executability(True, trans_id)
+ tt.apply()
+ return self._convert_tree(tree, converter)
+
+ def get_tree_no_parents_abc_content_7(self, tree, converter=None):
+ """return a test tree with a, b/, d/e contents.
+
+ This variation adds a dir 'd' ('d-id'), renames b to d/e.
+ """
+ self._make_abc_tree(tree)
+ self.build_tree(['d/'], transport=tree.bzrdir.root_transport)
+ tree.add(['d'], ['d-id'])
+ tt = transform.TreeTransform(tree)
+ trans_id = tt.trans_id_tree_path('b')
+ parent_trans_id = tt.trans_id_tree_path('d')
+ tt.adjust_path('e', parent_trans_id, trans_id)
+ tt.apply()
+ return self._convert_tree(tree, converter)
+
+ def get_tree_with_subdirs_and_all_content_types(self):
+ """Return a test tree with subdirs and all content types.
+ See get_tree_with_subdirs_and_all_supported_content_types for details.
+ """
+ return self.get_tree_with_subdirs_and_all_supported_content_types(True)
+
+ def get_tree_with_subdirs_and_all_supported_content_types(self, symlinks):
+ """Return a test tree with subdirs and all supported content types.
+ Some content types may not be created on some platforms
+ (like symlinks on native win32)
+
+ :param symlinks: control is symlink should be created in the tree.
+ Note: if you wish to automatically set this
+ parameters depending on underlying system,
+ please use value returned
+ by bzrlib.osutils.has_symlinks() function.
+
+ The returned tree has the following inventory:
+ [('', inventory.ROOT_ID),
+ ('0file', '2file'),
+ ('1top-dir', '1top-dir'),
+ (u'2utf\u1234file', u'0utf\u1234file'),
+ ('symlink', 'symlink'), # only if symlinks arg is True
+ ('1top-dir/0file-in-1topdir', '1file-in-1topdir'),
+ ('1top-dir/1dir-in-1topdir', '0dir-in-1topdir')]
+ where each component has the type of its name -
+ i.e. '1file..' is afile.
+
+ note that the order of the paths and fileids is deliberately
+ mismatched to ensure that the result order is path based.
+ """
+ self.requireFeature(features.UnicodeFilenameFeature)
+ tree = self.make_branch_and_tree('.')
+ paths = ['0file',
+ '1top-dir/',
+ u'2utf\u1234file',
+ '1top-dir/0file-in-1topdir',
+ '1top-dir/1dir-in-1topdir/'
+ ]
+ ids = [
+ '2file',
+ '1top-dir',
+ u'0utf\u1234file'.encode('utf8'),
+ '1file-in-1topdir',
+ '0dir-in-1topdir'
+ ]
+ self.build_tree(paths)
+ tree.add(paths, ids)
+ tt = transform.TreeTransform(tree)
+ if symlinks:
+ root_transaction_id = tt.trans_id_tree_path('')
+ tt.new_symlink('symlink',
+ root_transaction_id, 'link-target', 'symlink')
+ tt.apply()
+ return self.workingtree_to_test_tree(tree)
+
+ def get_tree_with_utf8(self, tree):
+ """Generate a tree with a utf8 revision and unicode paths."""
+ self._create_tree_with_utf8(tree)
+ return self.workingtree_to_test_tree(tree)
+
+ def _create_tree_with_utf8(self, tree):
+ """Generate a tree with a utf8 revision and unicode paths."""
+ self.requireFeature(features.UnicodeFilenameFeature)
+ # We avoid combining characters in file names here, normalization
+ # checks (as performed by some file systems (OSX) are outside the scope
+ # of these tests). We use the euro sign \N{Euro Sign} or \u20ac in
+ # unicode strings or '\xe2\x82\ac' (its utf-8 encoding) in raw strings.
+ paths = [u'',
+ u'fo\N{Euro Sign}o',
+ u'ba\N{Euro Sign}r/',
+ u'ba\N{Euro Sign}r/ba\N{Euro Sign}z',
+ ]
+ # bzr itself does not create unicode file ids, but we want them for
+ # testing.
+ file_ids = ['TREE_ROOT',
+ 'fo\xe2\x82\xaco-id',
+ 'ba\xe2\x82\xacr-id',
+ 'ba\xe2\x82\xacz-id',
+ ]
+ self.build_tree(paths[1:])
+ if tree.get_root_id() is None:
+ # Some trees do not have a root yet.
+ tree.add(paths, file_ids)
+ else:
+ # Some trees will already have a root
+ tree.set_root_id(file_ids[0])
+ tree.add(paths[1:], file_ids[1:])
+ try:
+ tree.commit(u'in\xedtial', rev_id=u'r\xe9v-1'.encode('utf8'))
+ except errors.NonAsciiRevisionId:
+ raise tests.TestSkipped('non-ascii revision ids not supported')
+
+ def get_tree_with_merged_utf8(self, tree):
+ """Generate a tree with utf8 ancestors."""
+ self._create_tree_with_utf8(tree)
+ tree2 = tree.bzrdir.sprout('tree2').open_workingtree()
+ self.build_tree([u'tree2/ba\N{Euro Sign}r/qu\N{Euro Sign}x'])
+ tree2.add([u'ba\N{Euro Sign}r/qu\N{Euro Sign}x'],
+ [u'qu\N{Euro Sign}x-id'.encode('utf-8')])
+ tree2.commit(u'to m\xe9rge', rev_id=u'r\xe9v-2'.encode('utf8'))
+
+ tree.merge_from_branch(tree2.branch)
+ tree.commit(u'm\xe9rge', rev_id=u'r\xe9v-3'.encode('utf8'))
+ return self.workingtree_to_test_tree(tree)
+
+
+def make_scenarios(transport_server, transport_readonly_server, formats):
+ """Generate test suites for each Tree implementation in bzrlib.
+
+ Currently this covers all working tree formats, and RevisionTree and
+ DirStateRevisionTree by committing a working tree to create the revision
+ tree.
+ """
+ scenarios = wt_make_scenarios(transport_server, transport_readonly_server,
+ formats)
+ # now adjust the scenarios and add the non-working-tree tree scenarios.
+ for scenario in scenarios:
+ # for working tree format tests, preserve the tree
+ scenario[1]["_workingtree_to_test_tree"] = return_parameter
+ # add RevisionTree scenario
+ workingtree_format = format_registry.get_default()
+ scenarios.append((RevisionTree.__name__,
+ create_tree_scenario(transport_server, transport_readonly_server,
+ workingtree_format, revision_tree_from_workingtree,)))
+
+ # also test WorkingTree4/5's RevisionTree implementation which is
+ # specialised.
+ # XXX: Ask igc if WT5 revision tree actually is different.
+ scenarios.append((DirStateRevisionTree.__name__ + ",WT4",
+ create_tree_scenario(transport_server, transport_readonly_server,
+ WorkingTreeFormat4(), _dirstate_tree_from_workingtree)))
+ scenarios.append((DirStateRevisionTree.__name__ + ",WT5",
+ create_tree_scenario(transport_server, transport_readonly_server,
+ WorkingTreeFormat5(), _dirstate_tree_from_workingtree)))
+ scenarios.append(("PreviewTree", create_tree_scenario(transport_server,
+ transport_readonly_server, workingtree_format, preview_tree_pre)))
+ scenarios.append(("PreviewTreePost", create_tree_scenario(transport_server,
+ transport_readonly_server, workingtree_format, preview_tree_post)))
+ return scenarios
+
+
+def create_tree_scenario(transport_server, transport_readonly_server,
+ workingtree_format, converter):
+ """Create a scenario for the specified converter
+
+ :param converter: A function that converts a workingtree into the
+ desired format.
+ :param workingtree_format: The particular workingtree format to
+ convert from.
+ :return: a (name, options) tuple, where options is a dict of values
+ to be used as members of the TestCase.
+ """
+ scenario_options = wt_make_scenario(transport_server,
+ transport_readonly_server,
+ workingtree_format)
+ scenario_options["_workingtree_to_test_tree"] = converter
+ return scenario_options
+
+
+def load_tests(standard_tests, module, loader):
+ per_tree_mod_names = [
+ 'annotate_iter',
+ 'export',
+ 'get_file_mtime',
+ 'get_file_with_stat',
+ 'get_root_id',
+ 'get_symlink_target',
+ 'ids',
+ 'inv',
+ 'iter_search_rules',
+ 'is_executable',
+ 'list_files',
+ 'locking',
+ 'path_content_summary',
+ 'revision_tree',
+ 'test_trees',
+ 'tree',
+ 'walkdirs',
+ ]
+ submod_tests = loader.loadTestsFromModuleNames(
+ ['bzrlib.tests.per_tree.test_' + name
+ for name in per_tree_mod_names])
+ scenarios = make_scenarios(
+ tests.default_transport,
+ # None here will cause a readonly decorator to be created
+ # by the TestCaseWithTransport.get_readonly_transport method.
+ None,
+ format_registry._get_all())
+ # add the tests for the sub modules
+ return tests.multiply_tests(submod_tests, scenarios, standard_tests)
diff --git a/bzrlib/tests/per_tree/test_annotate_iter.py b/bzrlib/tests/per_tree/test_annotate_iter.py
new file mode 100644
index 0000000..358d0f6
--- /dev/null
+++ b/bzrlib/tests/per_tree/test_annotate_iter.py
@@ -0,0 +1,55 @@
+# Copyright (C) 2008 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""Test that all Tree's implement .annotate_iter()"""
+
+from bzrlib.tests.per_tree import TestCaseWithTree
+
+
+class TestAnnotate(TestCaseWithTree):
+
+ def get_simple_tree(self):
+ tree = self.make_branch_and_tree('tree')
+ self.build_tree_contents([('tree/one', 'first\ncontent\n')])
+ tree.add(['one'], ['one-id'])
+ tree.commit('one', rev_id='one')
+ self.build_tree_contents([('tree/one', 'second\ncontent\n')])
+ tree.commit('two', rev_id='two')
+ return self._convert_tree(tree)
+
+ def get_tree_with_ghost(self):
+ tree = self.make_branch_and_tree('tree')
+ self.build_tree_contents([('tree/one', 'first\ncontent\n')])
+ tree.add(['one'], ['one-id'])
+ tree.commit('one', rev_id='one')
+ tree.set_parent_ids(['one', 'ghost-one'])
+ self.build_tree_contents([('tree/one', 'second\ncontent\n')])
+ tree.commit('two', rev_id='two')
+ return self._convert_tree(tree)
+
+ def test_annotate_simple(self):
+ tree = self.get_simple_tree()
+ tree.lock_read()
+ self.addCleanup(tree.unlock)
+ self.assertEqual([('two', 'second\n'), ('one', 'content\n')],
+ list(tree.annotate_iter('one-id')))
+
+ def test_annotate_with_ghost(self):
+ tree = self.get_tree_with_ghost()
+ tree.lock_read()
+ self.addCleanup(tree.unlock)
+ self.assertEqual([('two', 'second\n'), ('one', 'content\n')],
+ list(tree.annotate_iter('one-id')))
diff --git a/bzrlib/tests/per_tree/test_export.py b/bzrlib/tests/per_tree/test_export.py
new file mode 100644
index 0000000..cb9afca
--- /dev/null
+++ b/bzrlib/tests/per_tree/test_export.py
@@ -0,0 +1,98 @@
+# Copyright (C) 2011 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+import os
+import tarfile
+import zipfile
+
+from bzrlib.export import export
+from bzrlib import osutils
+from bzrlib import tests
+from bzrlib.tests.per_tree import TestCaseWithTree
+from bzrlib.tests import (
+ features,
+ )
+
+
+class ExportTest(object):
+
+ def prepare_export(self):
+ work_a = self.make_branch_and_tree('wta')
+ self.build_tree_contents(
+ [('wta/file', 'a\nb\nc\nd\n'), ('wta/dir', '')])
+ work_a.add('file', 'file-id')
+ work_a.add('dir', 'dir-id')
+ work_a.commit('add file')
+ tree_a = self.workingtree_to_test_tree(work_a)
+ export(tree_a, 'output', self.exporter)
+
+ def prepare_symlink_export(self):
+ self.requireFeature(features.SymlinkFeature)
+ work_a = self.make_branch_and_tree('wta')
+ os.symlink('target', 'wta/link')
+ work_a.add('link', 'link-id')
+ work_a.commit('add link')
+ tree_a = self.workingtree_to_test_tree(work_a)
+ export(tree_a, 'output', self.exporter)
+
+ def test_export(self):
+ self.prepare_export()
+ names = self.get_export_names()
+ self.assertIn('output/file', names)
+ self.assertIn('output/dir', names)
+
+ def test_export_symlink(self):
+ self.prepare_symlink_export()
+ names = self.get_export_names()
+ self.assertIn('output/link', names)
+
+
+class TestTar(ExportTest, TestCaseWithTree):
+
+ exporter = 'tar'
+
+ def get_export_names(self):
+ tf = tarfile.open('output')
+ try:
+ return tf.getnames()
+ finally:
+ tf.close()
+
+
+class TestZip(ExportTest, TestCaseWithTree):
+
+ exporter = 'zip'
+
+ def get_export_names(self):
+ zf = zipfile.ZipFile('output')
+ try:
+ return zf.namelist()
+ finally:
+ zf.close()
+
+ def test_export_symlink(self):
+ self.prepare_symlink_export()
+ names = self.get_export_names()
+ self.assertIn('output/link.lnk', names)
+
+
+class TestDir(ExportTest, TestCaseWithTree):
+
+ exporter = 'dir'
+
+ def get_export_names(self):
+ return [osutils.pathjoin('output', name)
+ for name in os.listdir('output')]
diff --git a/bzrlib/tests/per_tree/test_get_file_mtime.py b/bzrlib/tests/per_tree/test_get_file_mtime.py
new file mode 100644
index 0000000..c2d24ad
--- /dev/null
+++ b/bzrlib/tests/per_tree/test_get_file_mtime.py
@@ -0,0 +1,53 @@
+# Copyright (C) 2007, 2009, 2010 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""Test that all Tree's implement get_file_mtime"""
+
+import time
+
+from bzrlib import errors
+
+from bzrlib.tests.per_tree import TestCaseWithTree
+
+
+class TestGetFileMTime(TestCaseWithTree):
+
+ def get_basic_tree(self):
+ tree = self.make_branch_and_tree('tree')
+ self.build_tree(['tree/one'])
+ tree.add(['one'], ['one-id'])
+ return self._convert_tree(tree)
+
+ def test_get_file_mtime(self):
+ now = time.time()
+ tree = self.get_basic_tree()
+ tree.lock_read()
+ self.addCleanup(tree.unlock)
+ # Committed trees return the time of the commit that last changed the
+ # file, working trees return the on-disk time.
+ mtime_file_id = tree.get_file_mtime(file_id='one-id')
+ self.assertIsInstance(mtime_file_id, (float, int))
+ self.assertTrue(now - 5 < mtime_file_id < now + 5,
+ 'now: %f, mtime_file_id: %f' % (now, mtime_file_id ))
+ mtime_path = tree.get_file_mtime(file_id='one-id', path='one')
+ self.assertEqual(mtime_file_id, mtime_path)
+
+ def test_nonexistant(self):
+ tree = self.get_basic_tree()
+ tree.lock_read()
+ self.addCleanup(tree.unlock)
+ self.assertRaises(errors.NoSuchId,
+ tree.get_file_mtime, file_id='unexistant')
diff --git a/bzrlib/tests/per_tree/test_get_file_with_stat.py b/bzrlib/tests/per_tree/test_get_file_with_stat.py
new file mode 100644
index 0000000..fc397a0
--- /dev/null
+++ b/bzrlib/tests/per_tree/test_get_file_with_stat.py
@@ -0,0 +1,53 @@
+# Copyright (C) 2008, 2009, 2010 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""Test that all WorkingTree's implement get_file_with_stat."""
+
+import os
+
+from bzrlib.tests.per_tree import TestCaseWithTree
+
+
+
+class TestGetFileWithStat(TestCaseWithTree):
+
+ def test_get_file_with_stat_id_only(self):
+ work_tree = self.make_branch_and_tree('.')
+ self.build_tree(['foo'])
+ work_tree.add(['foo'], ['foo-id'])
+ tree = self._convert_tree(work_tree)
+ tree.lock_read()
+ self.addCleanup(tree.unlock)
+ file_obj, statvalue = tree.get_file_with_stat('foo-id')
+ self.addCleanup(file_obj.close)
+ if statvalue is not None:
+ expected = os.lstat('foo')
+ self.assertEqualStat(expected, statvalue)
+ self.assertEqual(["contents of foo\n"], file_obj.readlines())
+
+ def test_get_file_with_stat_id_and_path(self):
+ work_tree = self.make_branch_and_tree('.')
+ self.build_tree(['foo'])
+ work_tree.add(['foo'], ['foo-id'])
+ tree = self._convert_tree(work_tree)
+ tree.lock_read()
+ self.addCleanup(tree.unlock)
+ file_obj, statvalue = tree.get_file_with_stat('foo-id', 'foo')
+ self.addCleanup(file_obj.close)
+ if statvalue is not None:
+ expected = os.lstat('foo')
+ self.assertEqualStat(expected, statvalue)
+ self.assertEqual(["contents of foo\n"], file_obj.readlines())
diff --git a/bzrlib/tests/per_tree/test_get_root_id.py b/bzrlib/tests/per_tree/test_get_root_id.py
new file mode 100644
index 0000000..5ba9753
--- /dev/null
+++ b/bzrlib/tests/per_tree/test_get_root_id.py
@@ -0,0 +1,44 @@
+# Copyright (C) 2007 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""Tests for Tree.get_root_id()"""
+
+from bzrlib.tests.per_tree import TestCaseWithTree
+
+
+class TestGetRootID(TestCaseWithTree):
+
+ def make_tree_with_default_root_id(self):
+ tree = self.make_branch_and_tree('tree')
+ return self._convert_tree(tree)
+
+ def make_tree_with_fixed_root_id(self):
+ tree = self.make_branch_and_tree('tree')
+ tree.set_root_id('custom-tree-root-id')
+ return self._convert_tree(tree)
+
+ def test_get_root_id_default(self):
+ tree = self.make_tree_with_default_root_id()
+ tree.lock_read()
+ self.addCleanup(tree.unlock)
+ self.assertIsNot(None, tree.get_root_id())
+
+ def test_get_root_id_fixed(self):
+ tree = self.make_tree_with_fixed_root_id()
+ tree.lock_read()
+ self.addCleanup(tree.unlock)
+ self.assertEqual('custom-tree-root-id', tree.get_root_id())
+
diff --git a/bzrlib/tests/per_tree/test_get_symlink_target.py b/bzrlib/tests/per_tree/test_get_symlink_target.py
new file mode 100644
index 0000000..26b229b
--- /dev/null
+++ b/bzrlib/tests/per_tree/test_get_symlink_target.py
@@ -0,0 +1,64 @@
+# Copyright (C) 2007 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""Test that all Tree's implement get_symlink_target"""
+
+import os
+
+from bzrlib import (
+ osutils,
+ tests,
+ )
+from bzrlib.tests import per_tree
+from bzrlib.tests import (
+ features,
+ )
+
+
+class TestGetSymlinkTarget(per_tree.TestCaseWithTree):
+
+ def get_tree_with_symlinks(self):
+ self.requireFeature(features.SymlinkFeature)
+ tree = self.make_branch_and_tree('tree')
+ os.symlink('foo', 'tree/link')
+ os.symlink('../bar', 'tree/rel_link')
+ os.symlink('/baz/bing', 'tree/abs_link')
+
+ tree.add(['link', 'rel_link', 'abs_link'],
+ ['link-id', 'rel-link-id', 'abs-link-id'])
+ return self._convert_tree(tree)
+
+ def test_get_symlink_target(self):
+ tree = self.get_tree_with_symlinks()
+ tree.lock_read()
+ self.addCleanup(tree.unlock)
+ self.assertEqual('foo', tree.get_symlink_target('link-id'))
+ self.assertEqual('../bar', tree.get_symlink_target('rel-link-id'))
+ self.assertEqual('/baz/bing', tree.get_symlink_target('abs-link-id'))
+ self.assertEqual('foo', tree.get_symlink_target('link-id', 'link'))
+
+ def test_get_unicode_symlink_target(self):
+ self.requireFeature(features.SymlinkFeature)
+ self.requireFeature(features.UnicodeFilenameFeature)
+ tree = self.make_branch_and_tree('tree')
+ target = u'targ\N{Euro Sign}t'
+ os.symlink(target, u'tree/\u03b2_link'.encode(osutils._fs_enc))
+ tree.add([u'\u03b2_link'], ['link-id'])
+ tree.lock_read()
+ self.addCleanup(tree.unlock)
+ actual = tree.get_symlink_target('link-id')
+ self.assertEqual(target, actual)
+
diff --git a/bzrlib/tests/per_tree/test_ids.py b/bzrlib/tests/per_tree/test_ids.py
new file mode 100644
index 0000000..cdcc470
--- /dev/null
+++ b/bzrlib/tests/per_tree/test_ids.py
@@ -0,0 +1,51 @@
+# Copyright (C) 2012 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+from bzrlib import (
+ errors,
+ )
+from bzrlib.tests.per_tree import TestCaseWithTree
+
+class IdTests(TestCaseWithTree):
+
+ def setUp(self):
+ super(IdTests, self).setUp()
+ work_a = self.make_branch_and_tree('wta')
+ self.build_tree(['wta/bla', 'wta/dir/', 'wta/dir/file'])
+ work_a.add(['bla', 'dir', 'dir/file'], ['bla-id', 'dir-id', 'file-id'])
+ work_a.commit('add files')
+ self.tree_a = self.workingtree_to_test_tree(work_a)
+
+ def test_path2id(self):
+ self.assertEquals('bla-id', self.tree_a.path2id('bla'))
+ self.assertEquals('dir-id', self.tree_a.path2id('dir'))
+ self.assertIs(None, self.tree_a.path2id('idontexist'))
+
+ def test_path2id_list(self):
+ self.assertEquals('bla-id', self.tree_a.path2id(['bla']))
+ self.assertEquals('dir-id', self.tree_a.path2id(['dir']))
+ self.assertEquals('file-id', self.tree_a.path2id(['dir', 'file']))
+ self.assertEquals(self.tree_a.get_root_id(),
+ self.tree_a.path2id([]))
+ self.assertIs(None, self.tree_a.path2id(['idontexist']))
+ self.assertIs(None, self.tree_a.path2id(['dir', 'idontexist']))
+
+ def test_id2path(self):
+ self.addCleanup(self.tree_a.lock_read().unlock)
+ self.assertEquals('bla', self.tree_a.id2path('bla-id'))
+ self.assertEquals('dir', self.tree_a.id2path('dir-id'))
+ self.assertEquals('dir/file', self.tree_a.id2path('file-id'))
+ self.assertRaises(errors.NoSuchId, self.tree_a.id2path, 'nonexistant')
diff --git a/bzrlib/tests/per_tree/test_inv.py b/bzrlib/tests/per_tree/test_inv.py
new file mode 100644
index 0000000..64c595b
--- /dev/null
+++ b/bzrlib/tests/per_tree/test_inv.py
@@ -0,0 +1,173 @@
+# Copyright (C) 2007-2010 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""Tests for interface conformance of inventories of trees."""
+
+
+from bzrlib import (
+ tests,
+ )
+from bzrlib.tests import (
+ per_tree,
+ )
+from bzrlib.mutabletree import MutableTree
+from bzrlib.tests import TestSkipped
+from bzrlib.tree import InventoryTree
+from bzrlib.transform import _PreviewTree
+from bzrlib.uncommit import uncommit
+from bzrlib.tests import (
+ features,
+ )
+
+
+def get_entry(tree, file_id):
+ return tree.iter_entries_by_dir([file_id]).next()[1]
+
+
+class TestInventoryWithSymlinks(per_tree.TestCaseWithTree):
+
+ _test_needs_features = [features.SymlinkFeature]
+
+ def setUp(self):
+ per_tree.TestCaseWithTree.setUp(self)
+ self.tree = self.get_tree_with_subdirs_and_all_content_types()
+ self.tree.lock_read()
+ self.addCleanup(self.tree.unlock)
+
+ def test_symlink_target(self):
+ if isinstance(self.tree, (MutableTree, _PreviewTree)):
+ raise TestSkipped(
+ 'symlinks not accurately represented in working trees and'
+ ' preview trees')
+ entry = get_entry(self.tree, self.tree.path2id('symlink'))
+ self.assertEqual(entry.symlink_target, 'link-target')
+
+ def test_symlink_target_tree(self):
+ self.assertEqual('link-target',
+ self.tree.get_symlink_target('symlink'))
+
+ def test_kind_symlink(self):
+ self.assertEqual('symlink', self.tree.kind('symlink'))
+ self.assertIs(None, self.tree.get_file_size('symlink'))
+
+ def test_symlink(self):
+ entry = get_entry(self.tree, self.tree.path2id('symlink'))
+ self.assertEqual(entry.kind, 'symlink')
+ self.assertEqual(None, entry.text_size)
+
+
+class TestInventory(per_tree.TestCaseWithTree):
+
+ def test_paths2ids_recursive(self):
+ work_tree = self.make_branch_and_tree('tree')
+ self.build_tree(['tree/dir/', 'tree/dir/file'])
+ work_tree.add(['dir', 'dir/file'], ['dir-id', 'file-id'])
+ tree = self._convert_tree(work_tree)
+ tree.lock_read()
+ self.addCleanup(tree.unlock)
+ self.assertEqual(set(['dir-id', 'file-id']), tree.paths2ids(['dir']))
+
+ def test_paths2ids_forget_old(self):
+ work_tree = self.make_branch_and_tree('tree')
+ self.build_tree(['tree/file'])
+ work_tree.add('file', 'first-id')
+ work_tree.commit('commit old state')
+ work_tree.remove('file')
+ tree = self._convert_tree(work_tree)
+ tree.lock_read()
+ self.addCleanup(tree.unlock)
+ self.assertEqual(set([]), tree.paths2ids(['file'],
+ require_versioned=False))
+
+ def _make_canonical_test_tree(self, commit=True):
+ # make a tree used by all the 'canonical' tests below.
+ work_tree = self.make_branch_and_tree('tree')
+ self.build_tree(['tree/dir/', 'tree/dir/file'])
+ work_tree.add(['dir', 'dir/file'])
+ if commit:
+ work_tree.commit('commit 1')
+ # XXX: this isn't actually guaranteed to return the class we want to
+ # test -- mbp 2010-02-12
+ return work_tree
+
+ def test_canonical_path(self):
+ work_tree = self._make_canonical_test_tree()
+ if not isinstance(work_tree, InventoryTree):
+ raise tests.TestNotApplicable(
+ "test not applicable on non-inventory tests")
+ self.assertEqual('dir/file',
+ work_tree.get_canonical_inventory_path('Dir/File'))
+
+ def test_canonical_path_before_commit(self):
+ work_tree = self._make_canonical_test_tree(False)
+ if not isinstance(work_tree, InventoryTree):
+ raise tests.TestNotApplicable(
+ "test not applicable on non-inventory tests") # note: not committed.
+ self.assertEqual('dir/file',
+ work_tree.get_canonical_inventory_path('Dir/File'))
+
+ def test_canonical_path_dir(self):
+ # check it works when asked for just the directory portion.
+ work_tree = self._make_canonical_test_tree()
+ if not isinstance(work_tree, InventoryTree):
+ raise tests.TestNotApplicable(
+ "test not applicable on non-inventory tests")
+ self.assertEqual('dir', work_tree.get_canonical_inventory_path('Dir'))
+
+ def test_canonical_path_root(self):
+ work_tree = self._make_canonical_test_tree()
+ if not isinstance(work_tree, InventoryTree):
+ raise tests.TestNotApplicable(
+ "test not applicable on non-inventory tests")
+ self.assertEqual('', work_tree.get_canonical_inventory_path(''))
+ self.assertEqual('/', work_tree.get_canonical_inventory_path('/'))
+
+ def test_canonical_path_invalid_all(self):
+ work_tree = self._make_canonical_test_tree()
+ if not isinstance(work_tree, InventoryTree):
+ raise tests.TestNotApplicable(
+ "test not applicable on non-inventory tests")
+ self.assertEqual('foo/bar',
+ work_tree.get_canonical_inventory_path('foo/bar'))
+
+ def test_canonical_invalid_child(self):
+ work_tree = self._make_canonical_test_tree()
+ if not isinstance(work_tree, InventoryTree):
+ raise tests.TestNotApplicable(
+ "test not applicable on non-inventory tests")
+ self.assertEqual('dir/None',
+ work_tree.get_canonical_inventory_path('Dir/None'))
+
+ def test_canonical_tree_name_mismatch(self):
+ # see <https://bugs.launchpad.net/bzr/+bug/368931>
+ # some of the trees we want to use can only exist on a disk, not in
+ # memory - therefore we can only test this if the filesystem is
+ # case-sensitive.
+ self.requireFeature(features.case_sensitive_filesystem_feature)
+ work_tree = self.make_branch_and_tree('.')
+ self.build_tree(['test/', 'test/file', 'Test'])
+ work_tree.add(['test/', 'test/file', 'Test'])
+
+ test_tree = self._convert_tree(work_tree)
+ if not isinstance(test_tree, InventoryTree):
+ raise tests.TestNotApplicable(
+ "test not applicable on non-inventory tests")
+ test_tree.lock_read()
+ self.addCleanup(test_tree.unlock)
+
+ self.assertEqual(['test', 'test/file', 'Test', 'test/foo', 'Test/foo'],
+ test_tree.get_canonical_inventory_paths(
+ ['test', 'test/file', 'Test', 'test/foo', 'Test/foo']))
diff --git a/bzrlib/tests/per_tree/test_is_executable.py b/bzrlib/tests/per_tree/test_is_executable.py
new file mode 100644
index 0000000..61519ca
--- /dev/null
+++ b/bzrlib/tests/per_tree/test_is_executable.py
@@ -0,0 +1,39 @@
+# Copyright (C) 2010 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+from bzrlib.tests import (
+ per_tree,
+ )
+from bzrlib.tests.features import (
+ SymlinkFeature,
+ )
+
+
+class TestIsExecutable(per_tree.TestCaseWithTree):
+
+ def test_is_executable_dir(self):
+ tree = self.get_tree_with_subdirs_and_all_supported_content_types(
+ False)
+ tree.lock_read()
+ self.addCleanup(tree.unlock)
+ self.assertEqual(False, tree.is_executable('1top-dir'))
+
+ def test_is_executable_symlink(self):
+ self.requireFeature(SymlinkFeature)
+ tree = self.get_tree_with_subdirs_and_all_content_types()
+ tree.lock_read()
+ self.addCleanup(tree.unlock)
+ self.assertEqual(False, tree.is_executable('symlink'))
diff --git a/bzrlib/tests/per_tree/test_iter_search_rules.py b/bzrlib/tests/per_tree/test_iter_search_rules.py
new file mode 100644
index 0000000..1fb7476
--- /dev/null
+++ b/bzrlib/tests/per_tree/test_iter_search_rules.py
@@ -0,0 +1,77 @@
+# Copyright (C) 2008, 2009, 2010 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""Test that all Tree's implement iter_search_rules."""
+
+from bzrlib import (
+ rules,
+ )
+from bzrlib.tests.per_tree import TestCaseWithTree
+
+
+class TestIterSearchRules(TestCaseWithTree):
+
+ def make_per_user_searcher(self, text):
+ """Make a _RulesSearcher from a string"""
+ return rules._IniBasedRulesSearcher(text.splitlines(True))
+
+ def make_tree_with_rules(self, text):
+ tree = self.make_branch_and_tree('.')
+ if text is not None:
+ self.fail("No method for in-tree rules agreed on yet.")
+ text_utf8 = text.encode('utf-8')
+ self.build_tree_contents([(rules.RULES_TREE_FILENAME, text_utf8)])
+ tree.add(rules.RULES_TREE_FILENAME)
+ tree.commit("add rules file")
+ result = self._convert_tree(tree)
+ result.lock_read()
+ self.addCleanup(result.unlock)
+ return result
+
+ def test_iter_search_rules_no_tree(self):
+ per_user = self.make_per_user_searcher(
+ "[name ./a.txt]\nfoo=baz\n"
+ "[name *.txt]\nfoo=bar\na=True\n")
+ tree = self.make_tree_with_rules(None)
+ result = list(tree.iter_search_rules(['a.txt', 'dir/a.txt'],
+ _default_searcher=per_user))
+ self.assertEquals((('foo', 'baz'),), result[0])
+ self.assertEquals((('foo', 'bar'), ('a', 'True')), result[1])
+
+ def _disabled_test_iter_search_rules_just_tree(self):
+ per_user = self.make_per_user_searcher('')
+ tree = self.make_tree_with_rules(
+ "[name ./a.txt]\n"
+ "foo=baz\n"
+ "[name *.txt]\n"
+ "foo=bar\n"
+ "a=True\n")
+ result = list(tree.iter_search_rules(['a.txt', 'dir/a.txt'],
+ _default_searcher=per_user))
+ self.assertEquals((('foo', 'baz'),), result[0])
+ self.assertEquals((('foo', 'bar'), ('a', 'True')), result[1])
+
+ def _disabled_test_iter_search_rules_tree_and_per_user(self):
+ per_user = self.make_per_user_searcher(
+ "[name ./a.txt]\nfoo=baz\n"
+ "[name *.txt]\nfoo=bar\na=True\n")
+ tree = self.make_tree_with_rules(
+ "[name ./a.txt]\n"
+ "foo=qwerty\n")
+ result = list(tree.iter_search_rules(['a.txt', 'dir/a.txt'],
+ _default_searcher=per_user))
+ self.assertEquals((('foo', 'qwerty'),), result[0])
+ self.assertEquals((('foo', 'bar'), ('a', 'True')), result[1])
diff --git a/bzrlib/tests/per_tree/test_list_files.py b/bzrlib/tests/per_tree/test_list_files.py
new file mode 100644
index 0000000..572d8e7
--- /dev/null
+++ b/bzrlib/tests/per_tree/test_list_files.py
@@ -0,0 +1,116 @@
+# Copyright (C) 2007 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""Test that all trees support Tree.list_files()"""
+
+from bzrlib.tests.per_tree import TestCaseWithTree
+
+
+class TestListFiles(TestCaseWithTree):
+
+ def test_list_files_with_root(self):
+ work_tree = self.make_branch_and_tree('wt')
+ tree = self.get_tree_no_parents_abc_content(work_tree)
+ expected = [('', 'V', 'directory', 'root-id'),
+ ('a', 'V', 'file', 'a-id'),
+ ('b', 'V', 'directory', 'b-id'),
+ ('b/c', 'V', 'file', 'c-id'),
+ ]
+ tree.lock_read()
+ try:
+ actual = [(path, status, kind, file_id)
+ for path, status, kind, file_id, ie in
+ tree.list_files(include_root=True)]
+ finally:
+ tree.unlock()
+ self.assertEqual(expected, actual)
+
+ def test_list_files_no_root(self):
+ work_tree = self.make_branch_and_tree('wt')
+ tree = self.get_tree_no_parents_abc_content(work_tree)
+ expected = [('a', 'V', 'file', 'a-id'),
+ ('b', 'V', 'directory', 'b-id'),
+ ('b/c', 'V', 'file', 'c-id'),
+ ]
+ tree.lock_read()
+ try:
+ actual = [(path, status, kind, file_id)
+ for path, status, kind, file_id, ie in
+ tree.list_files()]
+ finally:
+ tree.unlock()
+ self.assertEqual(expected, actual)
+
+ def test_list_files_with_root_no_recurse(self):
+ work_tree = self.make_branch_and_tree('wt')
+ tree = self.get_tree_no_parents_abc_content(work_tree)
+ expected = [('', 'V', 'directory', 'root-id'),
+ ('a', 'V', 'file', 'a-id'),
+ ('b', 'V', 'directory', 'b-id'),
+ ]
+ tree.lock_read()
+ try:
+ actual = [(path, status, kind, file_id)
+ for path, status, kind, file_id, ie in
+ tree.list_files(include_root=True, recursive=False)]
+ finally:
+ tree.unlock()
+ self.assertEqual(expected, actual)
+
+ def test_list_files_no_root_no_recurse(self):
+ work_tree = self.make_branch_and_tree('wt')
+ tree = self.get_tree_no_parents_abc_content(work_tree)
+ expected = [('a', 'V', 'file', 'a-id'),
+ ('b', 'V', 'directory', 'b-id'),
+ ]
+ tree.lock_read()
+ try:
+ actual = [(path, status, kind, file_id)
+ for path, status, kind, file_id, ie in
+ tree.list_files(recursive=False)]
+ finally:
+ tree.unlock()
+ self.assertEqual(expected, actual)
+
+ def test_list_files_from_dir(self):
+ work_tree = self.make_branch_and_tree('wt')
+ tree = self.get_tree_no_parents_abc_content(work_tree)
+ expected = [('c', 'V', 'file', 'c-id'),
+ ]
+ tree.lock_read()
+ try:
+ actual = [(path, status, kind, file_id)
+ for path, status, kind, file_id, ie in
+ tree.list_files(from_dir='b')]
+ finally:
+ tree.unlock()
+ self.assertEqual(expected, actual)
+
+ def test_list_files_from_dir_no_recurse(self):
+ # The test trees don't have much nesting so test with an explicit root
+ work_tree = self.make_branch_and_tree('wt')
+ tree = self.get_tree_no_parents_abc_content(work_tree)
+ expected = [('a', 'V', 'file', 'a-id'),
+ ('b', 'V', 'directory', 'b-id'),
+ ]
+ tree.lock_read()
+ try:
+ actual = [(path, status, kind, file_id)
+ for path, status, kind, file_id, ie in
+ tree.list_files(from_dir='', recursive=False)]
+ finally:
+ tree.unlock()
+ self.assertEqual(expected, actual)
diff --git a/bzrlib/tests/per_tree/test_locking.py b/bzrlib/tests/per_tree/test_locking.py
new file mode 100644
index 0000000..886cfc8
--- /dev/null
+++ b/bzrlib/tests/per_tree/test_locking.py
@@ -0,0 +1,28 @@
+# Copyright (C) 2010 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""Test that all trees support Tree.lock_read()"""
+
+from bzrlib.tests.matchers import *
+from bzrlib.tests.per_tree import TestCaseWithTree
+
+
+class TestLocking(TestCaseWithTree):
+
+ def test_lock_read(self):
+ work_tree = self.make_branch_and_tree('wt')
+ tree = self.workingtree_to_test_tree(work_tree)
+ self.assertThat(tree.lock_read, ReturnsUnlockable(tree))
diff --git a/bzrlib/tests/per_tree/test_path_content_summary.py b/bzrlib/tests/per_tree/test_path_content_summary.py
new file mode 100644
index 0000000..aea3913
--- /dev/null
+++ b/bzrlib/tests/per_tree/test_path_content_summary.py
@@ -0,0 +1,156 @@
+# Copyright (C) 2007, 2009, 2010 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""Test that all Trees implement path_content_summary."""
+
+import os
+
+from bzrlib import (
+ osutils,
+ tests,
+ transform,
+ )
+
+from bzrlib.tests import (
+ features,
+ per_tree,
+ )
+from bzrlib.tests.features import (
+ SymlinkFeature,
+ )
+
+
+class TestPathContentSummary(per_tree.TestCaseWithTree):
+
+ def _convert_tree(self, tree):
+ result = per_tree.TestCaseWithTree._convert_tree(self, tree)
+ result.lock_read()
+ self.addCleanup(result.unlock)
+ return result
+
+ def check_content_summary_size(self, tree, summary, expected_size):
+ # if the tree supports content filters, then it's allowed to leave out
+ # the size because it might be difficult to compute. otherwise, it
+ # must be present and correct
+ returned_size = summary[1]
+ if returned_size == expected_size or (
+ tree.supports_content_filtering()
+ and returned_size is None):
+ pass
+ else:
+ self.fail("invalid size in summary: %r" % (returned_size,))
+
+ def test_symlink_content_summary(self):
+ self.requireFeature(SymlinkFeature)
+ tree = self.make_branch_and_tree('tree')
+ os.symlink('target', 'tree/path')
+ tree.add(['path'])
+ summary = self._convert_tree(tree).path_content_summary('path')
+ self.assertEqual(('symlink', None, None, 'target'), summary)
+
+ def test_unicode_symlink_content_summary(self):
+ self.requireFeature(features.SymlinkFeature)
+ self.requireFeature(features.UnicodeFilenameFeature)
+ tree = self.make_branch_and_tree('tree')
+ os.symlink('target', u'tree/\u03b2-path'.encode(osutils._fs_enc))
+ tree.add([u'\u03b2-path'])
+ summary = self._convert_tree(tree).path_content_summary(u'\u03b2-path')
+ self.assertEqual(('symlink', None, None, 'target'), summary)
+
+ def test_unicode_symlink_target_summary(self):
+ self.requireFeature(features.SymlinkFeature)
+ self.requireFeature(features.UnicodeFilenameFeature)
+ tree = self.make_branch_and_tree('tree')
+ os.symlink(u'tree/\u03b2-path'.encode(osutils._fs_enc), 'tree/link')
+ tree.add(['link'])
+ summary = self._convert_tree(tree).path_content_summary('link')
+ self.assertEqual(('symlink', None, None, u'tree/\u03b2-path'), summary)
+
+ def test_missing_content_summary(self):
+ tree = self.make_branch_and_tree('tree')
+ summary = self._convert_tree(tree).path_content_summary('path')
+ self.assertEqual(('missing', None, None, None), summary)
+
+ def test_file_content_summary_executable(self):
+ tree = self.make_branch_and_tree('tree')
+ self.build_tree(['tree/path'])
+ tree.add(['path'])
+ tt = transform.TreeTransform(tree)
+ self.addCleanup(tt.finalize)
+ tt.set_executability(True, tt.trans_id_tree_path('path'))
+ tt.apply()
+ summary = self._convert_tree(tree).path_content_summary('path')
+ self.assertEqual(4, len(summary))
+ self.assertEqual('file', summary[0])
+ self.check_content_summary_size(tree, summary, 22)
+ # executable
+ self.assertEqual(True, summary[2])
+ # may have hash,
+ self.assertSubset((summary[3],),
+ (None, '0c352290ae1c26ca7f97d5b2906c4624784abd60'))
+
+ def test_file_content_summary_not_versioned(self):
+ tree = self.make_branch_and_tree('tree')
+ self.build_tree(['tree/path'])
+ tree = self._convert_tree(tree)
+ summary = tree.path_content_summary('path')
+ self.assertEqual(4, len(summary))
+ if isinstance(tree, (per_tree.DirStateRevisionTree,
+ per_tree.RevisionTree)):
+ self.assertEqual('missing', summary[0])
+ self.assertIs(None, summary[2])
+ self.assertIs(None, summary[3])
+ elif isinstance(tree, transform._PreviewTree):
+ self.expectFailure('PreviewTree returns "missing" for unversioned'
+ 'files', self.assertEqual, 'file', summary[0])
+ self.assertEqual('file', summary[0])
+ else:
+ self.assertEqual('file', summary[0])
+ self.check_content_summary_size(tree, summary, 22)
+ self.assertEqual(False, summary[2])
+ self.assertSubset((summary[3],),
+ (None, '0c352290ae1c26ca7f97d5b2906c4624784abd60'))
+
+ def test_file_content_summary_non_exec(self):
+ tree = self.make_branch_and_tree('tree')
+ self.build_tree(['tree/path'])
+ tree.add(['path'])
+ summary = self._convert_tree(tree).path_content_summary('path')
+ self.assertEqual(4, len(summary))
+ self.assertEqual('file', summary[0])
+ self.check_content_summary_size(tree, summary, 22)
+ # not executable
+ self.assertEqual(False, summary[2])
+ # may have hash,
+ self.assertSubset((summary[3],),
+ (None, '0c352290ae1c26ca7f97d5b2906c4624784abd60'))
+
+ def test_dir_content_summary(self):
+ tree = self.make_branch_and_tree('tree')
+ self.build_tree(['tree/path/'])
+ tree.add(['path'])
+ summary = self._convert_tree(tree).path_content_summary('path')
+ self.assertEqual(('directory', None, None, None), summary)
+
+ def test_tree_content_summary(self):
+ tree = self.make_branch_and_tree('tree')
+ if not tree.branch.repository._format.supports_tree_reference:
+ raise tests.TestNotApplicable("Tree references not supported.")
+ subtree = self.make_branch_and_tree('tree/path')
+ tree.add(['path'])
+ summary = self._convert_tree(tree).path_content_summary('path')
+ self.assertEqual(4, len(summary))
+ self.assertEqual('tree-reference', summary[0])
diff --git a/bzrlib/tests/per_tree/test_revision_tree.py b/bzrlib/tests/per_tree/test_revision_tree.py
new file mode 100644
index 0000000..9c13444
--- /dev/null
+++ b/bzrlib/tests/per_tree/test_revision_tree.py
@@ -0,0 +1,35 @@
+# Copyright (C) 2006, 2007, 2009, 2010 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""Tests for Tree.revision_tree."""
+
+from bzrlib import (
+ errors,
+ tests,
+ )
+from bzrlib.tests import per_tree
+
+
+class TestRevisionTree(per_tree.TestCaseWithTree):
+
+ def create_tree_no_parents_no_content(self):
+ tree = self.make_branch_and_tree('.')
+ return self.get_tree_no_parents_no_content(tree)
+
+ def test_get_random_tree_raises(self):
+ test_tree = self.create_tree_no_parents_no_content()
+ self.assertRaises(errors.NoSuchRevision, test_tree.revision_tree,
+ 'this-should-not-exist')
diff --git a/bzrlib/tests/per_tree/test_test_trees.py b/bzrlib/tests/per_tree/test_test_trees.py
new file mode 100644
index 0000000..aa435b2
--- /dev/null
+++ b/bzrlib/tests/per_tree/test_test_trees.py
@@ -0,0 +1,307 @@
+# Copyright (C) 2006, 2007 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""Tests for the test trees used by the per_tree tests."""
+
+from bzrlib.tests import per_tree
+from bzrlib.tests import (
+ features,
+ )
+
+
+class TestTreeShapes(per_tree.TestCaseWithTree):
+
+ def test_empty_tree_no_parents(self):
+ tree = self.make_branch_and_tree('.')
+ tree = self.get_tree_no_parents_no_content(tree)
+ tree.lock_read()
+ self.addCleanup(tree.unlock)
+ self.assertEqual([], tree.get_parent_ids())
+ self.assertEqual([], tree.conflicts())
+ self.assertEqual([], list(tree.unknowns()))
+ self.assertEqual(['empty-root-id'], list(tree.all_file_ids()))
+ self.assertEqual(
+ [('', 'empty-root-id')],
+ [(path, node.file_id) for path, node in tree.iter_entries_by_dir()])
+
+ def test_abc_tree_no_parents(self):
+ tree = self.make_branch_and_tree('.')
+ tree = self.get_tree_no_parents_abc_content(tree)
+ tree.lock_read()
+ self.addCleanup(tree.unlock)
+ self.assertEqual([], tree.get_parent_ids())
+ self.assertEqual([], tree.conflicts())
+ self.assertEqual([], list(tree.unknowns()))
+ # __iter__ has no strongly defined order
+ self.assertEqual(
+ set(['root-id', 'a-id', 'b-id', 'c-id']),
+ set(tree.all_file_ids()))
+ self.assertEqual(
+ [('', 'root-id'), ('a', 'a-id'), ('b', 'b-id'), ('b/c', 'c-id')],
+ [(path, node.file_id) for path, node in tree.iter_entries_by_dir()])
+ self.assertEqualDiff('contents of a\n', tree.get_file_text('a-id'))
+ self.assertFalse(tree.is_executable('c-id', path='b/c'))
+
+ def test_abc_tree_content_2_no_parents(self):
+ tree = self.make_branch_and_tree('.')
+ tree = self.get_tree_no_parents_abc_content_2(tree)
+ tree.lock_read()
+ self.addCleanup(tree.unlock)
+ self.assertEqual([], tree.get_parent_ids())
+ self.assertEqual([], tree.conflicts())
+ self.assertEqual([], list(tree.unknowns()))
+ # __iter__ has no strongly defined order
+ self.assertEqual(
+ set(['root-id', 'a-id', 'b-id', 'c-id']),
+ set(tree.all_file_ids()))
+ self.assertEqual(
+ [('', 'root-id'), ('a', 'a-id'), ('b', 'b-id'), ('b/c', 'c-id')],
+ [(path, node.file_id) for path, node in tree.iter_entries_by_dir()])
+ self.assertEqualDiff('foobar\n', tree.get_file_text('a-id'))
+ self.assertFalse(tree.is_executable('c-id'))
+
+ def test_abc_tree_content_3_no_parents(self):
+ tree = self.make_branch_and_tree('.')
+ tree = self.get_tree_no_parents_abc_content_3(tree)
+ tree.lock_read()
+ self.addCleanup(tree.unlock)
+ self.assertEqual([], tree.get_parent_ids())
+ self.assertEqual([], tree.conflicts())
+ self.assertEqual([], list(tree.unknowns()))
+ # __iter__ has no strongly defined order
+ self.assertEqual(
+ set(['root-id', 'a-id', 'b-id', 'c-id']),
+ set(tree.all_file_ids()))
+ self.assertEqual(
+ [('', 'root-id'), ('a', 'a-id'), ('b', 'b-id'), ('b/c', 'c-id')],
+ [(path, node.file_id) for path, node in tree.iter_entries_by_dir()])
+ self.assertEqualDiff('contents of a\n', tree.get_file_text('a-id'))
+ self.assertTrue(tree.is_executable('c-id'))
+
+ def test_abc_tree_content_4_no_parents(self):
+ tree = self.make_branch_and_tree('.')
+ tree = self.get_tree_no_parents_abc_content_4(tree)
+ tree.lock_read()
+ self.addCleanup(tree.unlock)
+ self.assertEqual([], tree.get_parent_ids())
+ self.assertEqual([], tree.conflicts())
+ self.assertEqual([], list(tree.unknowns()))
+ # __iter__ has no strongly defined order
+ self.assertEqual(
+ set(['root-id', 'a-id', 'b-id', 'c-id']),
+ set(tree.all_file_ids()))
+ self.assertEqual(
+ [('', 'root-id'), ('b', 'b-id'), ('d', 'a-id'), ('b/c', 'c-id')],
+ [(path, node.file_id) for path, node in tree.iter_entries_by_dir()])
+ self.assertEqualDiff('contents of a\n', tree.get_file_text('a-id'))
+ self.assertFalse(tree.is_executable('c-id'))
+
+ def test_abc_tree_content_5_no_parents(self):
+ tree = self.make_branch_and_tree('.')
+ tree = self.get_tree_no_parents_abc_content_5(tree)
+ tree.lock_read()
+ self.addCleanup(tree.unlock)
+ self.assertEqual([], tree.get_parent_ids())
+ self.assertEqual([], tree.conflicts())
+ self.assertEqual([], list(tree.unknowns()))
+ # __iter__ has no strongly defined order
+ self.assertEqual(
+ set(['root-id', 'a-id', 'b-id', 'c-id']),
+ set(tree.all_file_ids()))
+ self.assertEqual(
+ [('', 'root-id'), ('b', 'b-id'), ('d', 'a-id'), ('b/c', 'c-id')],
+ [(path, node.file_id) for path, node in tree.iter_entries_by_dir()])
+ self.assertEqualDiff('bar\n', tree.get_file_text('a-id'))
+ self.assertFalse(tree.is_executable('c-id'))
+
+ def test_abc_tree_content_6_no_parents(self):
+ tree = self.make_branch_and_tree('.')
+ tree = self.get_tree_no_parents_abc_content_6(tree)
+ tree.lock_read()
+ self.addCleanup(tree.unlock)
+ self.assertEqual([], tree.get_parent_ids())
+ self.assertEqual([], tree.conflicts())
+ self.assertEqual([], list(tree.unknowns()))
+ # __iter__ has no strongly defined order
+ self.assertEqual(
+ set(['root-id', 'a-id', 'b-id', 'c-id']),
+ set(tree.all_file_ids()))
+ self.assertEqual(
+ [('', 'root-id'), ('a', 'a-id'), ('b', 'b-id'), ('e', 'c-id')],
+ [(path, node.file_id) for path, node in tree.iter_entries_by_dir()])
+ self.assertEqualDiff('contents of a\n', tree.get_file_text('a-id'))
+ self.assertTrue(tree.is_executable('c-id'))
+
+ def test_tree_with_subdirs_and_all_content_types(self):
+ # currently this test tree requires unicode. It might be good
+ # to have it simply stop having the single unicode file in it
+ # when dealing with a non-unicode filesystem.
+ self.requireFeature(features.SymlinkFeature)
+ tree = self.get_tree_with_subdirs_and_all_content_types()
+ tree.lock_read()
+ self.addCleanup(tree.unlock)
+ self.assertEqual([], tree.get_parent_ids())
+ self.assertEqual([], tree.conflicts())
+ self.assertEqual([], list(tree.unknowns()))
+ # __iter__ has no strongly defined order
+ tree_root = tree.path2id('')
+ self.assertEqual(
+ set([tree_root,
+ '2file',
+ '1top-dir',
+ '1file-in-1topdir',
+ '0dir-in-1topdir',
+ u'0utf\u1234file'.encode('utf8'),
+ 'symlink',
+ ]),
+ set(tree.all_file_ids()))
+ # note that the order of the paths and fileids is deliberately
+ # mismatched to ensure that the result order is path based.
+ self.assertEqual(
+ [('', tree_root, 'directory'),
+ ('0file', '2file', 'file'),
+ ('1top-dir', '1top-dir', 'directory'),
+ (u'2utf\u1234file', u'0utf\u1234file'.encode('utf8'), 'file'),
+ ('symlink', 'symlink', 'symlink'),
+ ('1top-dir/0file-in-1topdir', '1file-in-1topdir', 'file'),
+ ('1top-dir/1dir-in-1topdir', '0dir-in-1topdir', 'directory')],
+ [(path, node.file_id, node.kind) for path, node in tree.iter_entries_by_dir()])
+
+ def test_tree_with_subdirs_and_all_content_types_wo_symlinks(self):
+ # currently this test tree requires unicode. It might be good
+ # to have it simply stop having the single unicode file in it
+ # when dealing with a non-unicode filesystem.
+ tree = self.get_tree_with_subdirs_and_all_supported_content_types(False)
+ tree.lock_read()
+ self.addCleanup(tree.unlock)
+ self.assertEqual([], tree.get_parent_ids())
+ self.assertEqual([], tree.conflicts())
+ self.assertEqual([], list(tree.unknowns()))
+ # __iter__ has no strongly defined order
+ tree_root = tree.path2id('')
+ self.assertEqual(
+ set([tree_root,
+ '2file',
+ '1top-dir',
+ '1file-in-1topdir',
+ '0dir-in-1topdir',
+ u'0utf\u1234file'.encode('utf8'),
+ ]),
+ set(tree.all_file_ids()))
+ # note that the order of the paths and fileids is deliberately
+ # mismatched to ensure that the result order is path based.
+ self.assertEqual(
+ [('', tree_root, 'directory'),
+ ('0file', '2file', 'file'),
+ ('1top-dir', '1top-dir', 'directory'),
+ (u'2utf\u1234file', u'0utf\u1234file'.encode('utf8'), 'file'),
+ ('1top-dir/0file-in-1topdir', '1file-in-1topdir', 'file'),
+ ('1top-dir/1dir-in-1topdir', '0dir-in-1topdir', 'directory')],
+ [(path, node.file_id, node.kind) for path, node in tree.iter_entries_by_dir()])
+
+ def test_tree_with_utf8(self):
+ tree = self.make_branch_and_tree('.')
+ tree = self.get_tree_with_utf8(tree)
+
+ revision_id = u'r\xe9v-1'.encode('utf8')
+ root_id = 'TREE_ROOT'
+ bar_id = u'ba\N{Euro Sign}r-id'.encode('utf8')
+ foo_id = u'fo\N{Euro Sign}o-id'.encode('utf8')
+ baz_id = u'ba\N{Euro Sign}z-id'.encode('utf8')
+ path_and_ids = [(u'', root_id, None, None),
+ (u'ba\N{Euro Sign}r', bar_id, root_id, revision_id),
+ (u'fo\N{Euro Sign}o', foo_id, root_id, revision_id),
+ (u'ba\N{Euro Sign}r/ba\N{Euro Sign}z',
+ baz_id, bar_id, revision_id),
+ ]
+ tree.lock_read()
+ try:
+ path_entries = list(tree.iter_entries_by_dir())
+ finally:
+ tree.unlock()
+
+ for expected, (path, ie) in zip(path_and_ids, path_entries):
+ self.assertEqual(expected[0], path) # Paths should match
+ self.assertIsInstance(path, unicode)
+ self.assertEqual(expected[1], ie.file_id)
+ self.assertIsInstance(ie.file_id, str)
+ self.assertEqual(expected[2], ie.parent_id)
+ if expected[2] is not None:
+ self.assertIsInstance(ie.parent_id, str)
+ # WorkingTree's return None for the last modified revision
+ if ie.revision is not None:
+ self.assertIsInstance(ie.revision, str)
+ if expected[0] != '':
+ # Some trees will preserve the revision id of the tree root,
+ # but not all will
+ self.assertEqual(revision_id, ie.revision)
+ self.assertEqual(len(path_and_ids), len(path_entries))
+ get_revision_id = getattr(tree, 'get_revision_id', None)
+ if get_revision_id is not None:
+ self.assertIsInstance(get_revision_id(), str)
+ last_revision = getattr(tree, 'last_revision', None)
+ if last_revision is not None:
+ self.assertIsInstance(last_revision(), str)
+
+ def test_tree_with_merged_utf8(self):
+ tree = self.make_branch_and_tree('.')
+ tree = self.get_tree_with_merged_utf8(tree)
+
+ revision_id_1 = u'r\xe9v-1'.encode('utf8')
+ revision_id_2 = u'r\xe9v-2'.encode('utf8')
+ root_id = 'TREE_ROOT'
+ bar_id = u'ba\N{Euro Sign}r-id'.encode('utf8')
+ foo_id = u'fo\N{Euro Sign}o-id'.encode('utf8')
+ baz_id = u'ba\N{Euro Sign}z-id'.encode('utf8')
+ qux_id = u'qu\N{Euro Sign}x-id'.encode('utf8')
+ path_and_ids = [(u'', root_id, None, None),
+ (u'ba\N{Euro Sign}r', bar_id, root_id, revision_id_1),
+ (u'fo\N{Euro Sign}o', foo_id, root_id, revision_id_1),
+ (u'ba\N{Euro Sign}r/ba\N{Euro Sign}z',
+ baz_id, bar_id, revision_id_1),
+ (u'ba\N{Euro Sign}r/qu\N{Euro Sign}x',
+ qux_id, bar_id, revision_id_2),
+ ]
+ tree.lock_read()
+ try:
+ path_entries = list(tree.iter_entries_by_dir())
+ finally:
+ tree.unlock()
+
+ for (epath, efid, eparent, erev), (path, ie) in zip(path_and_ids,
+ path_entries):
+ self.assertEqual(epath, path) # Paths should match
+ self.assertIsInstance(path, unicode)
+ self.assertEqual(efid, ie.file_id)
+ self.assertIsInstance(ie.file_id, str)
+ self.assertEqual(eparent, ie.parent_id)
+ if eparent is not None:
+ self.assertIsInstance(ie.parent_id, str)
+ # WorkingTree's return None for the last modified revision
+ if ie.revision is not None:
+ self.assertIsInstance(ie.revision, str)
+ if epath == '':
+ # Some trees will preserve the revision id of the tree root,
+ # but not all will
+ continue
+ self.assertEqual(erev, ie.revision)
+ self.assertEqual(len(path_and_ids), len(path_entries))
+ get_revision_id = getattr(tree, 'get_revision_id', None)
+ if get_revision_id is not None:
+ self.assertIsInstance(get_revision_id(), str)
+ last_revision = getattr(tree, 'last_revision', None)
+ if last_revision is not None:
+ self.assertIsInstance(last_revision(), str)
diff --git a/bzrlib/tests/per_tree/test_tree.py b/bzrlib/tests/per_tree/test_tree.py
new file mode 100644
index 0000000..8d375ca
--- /dev/null
+++ b/bzrlib/tests/per_tree/test_tree.py
@@ -0,0 +1,362 @@
+# Copyright (C) 2006, 2007 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+from bzrlib import (
+ errors,
+ conflicts,
+ osutils,
+ revisiontree,
+ tests,
+ workingtree_4,
+ )
+from bzrlib.tests import TestSkipped
+from bzrlib.tests.per_tree import TestCaseWithTree
+from bzrlib.symbol_versioning import (
+ deprecated_in,
+ )
+
+
+class TestAnnotate(TestCaseWithTree):
+
+ def test_annotate(self):
+ work_tree = self.make_branch_and_tree('wt')
+ tree = self.get_tree_no_parents_abc_content(work_tree)
+ tree_revision = getattr(tree, 'get_revision_id', lambda: 'current:')()
+ tree.lock_read()
+ self.addCleanup(tree.unlock)
+ for revision, line in tree.annotate_iter('a-id'):
+ self.assertEqual('contents of a\n', line)
+ self.assertEqual(tree_revision, revision)
+ tree_revision = getattr(tree, 'get_revision_id', lambda: 'random:')()
+ for revision, line in tree.annotate_iter('a-id', 'random:'):
+ self.assertEqual('contents of a\n', line)
+ self.assertEqual(tree_revision, revision)
+
+
+class TestPlanFileMerge(TestCaseWithTree):
+
+ def test_plan_file_merge(self):
+ work_a = self.make_branch_and_tree('wta')
+ self.build_tree_contents([('wta/file', 'a\nb\nc\nd\n')])
+ work_a.add('file', 'file-id')
+ work_a.commit('base version')
+ work_b = work_a.bzrdir.sprout('wtb').open_workingtree()
+ self.build_tree_contents([('wta/file', 'b\nc\nd\ne\n')])
+ tree_a = self.workingtree_to_test_tree(work_a)
+ tree_a.lock_read()
+ self.addCleanup(tree_a.unlock)
+ self.build_tree_contents([('wtb/file', 'a\nc\nd\nf\n')])
+ tree_b = self.workingtree_to_test_tree(work_b)
+ tree_b.lock_read()
+ self.addCleanup(tree_b.unlock)
+ self.assertEqual([
+ ('killed-a', 'a\n'),
+ ('killed-b', 'b\n'),
+ ('unchanged', 'c\n'),
+ ('unchanged', 'd\n'),
+ ('new-a', 'e\n'),
+ ('new-b', 'f\n'),
+ ], list(tree_a.plan_file_merge('file-id', tree_b)))
+
+
+class TestReference(TestCaseWithTree):
+
+ def skip_if_no_reference(self, tree):
+ if not getattr(tree, 'supports_tree_reference', lambda: False)():
+ raise tests.TestNotApplicable('Tree references not supported')
+
+ def create_nested(self):
+ work_tree = self.make_branch_and_tree('wt')
+ work_tree.lock_write()
+ try:
+ self.skip_if_no_reference(work_tree)
+ subtree = self.make_branch_and_tree('wt/subtree')
+ subtree.set_root_id('sub-root')
+ subtree.commit('foo', rev_id='sub-1')
+ work_tree.add_reference(subtree)
+ finally:
+ work_tree.unlock()
+ tree = self._convert_tree(work_tree)
+ self.skip_if_no_reference(tree)
+ return tree
+
+ def test_get_reference_revision(self):
+ tree = self.create_nested()
+ tree.lock_read()
+ self.addCleanup(tree.unlock)
+ path = tree.id2path('sub-root')
+ self.assertEqual('sub-1',
+ tree.get_reference_revision('sub-root', path))
+
+ def test_iter_references(self):
+ tree = self.create_nested()
+ tree.lock_read()
+ self.addCleanup(tree.unlock)
+ entry = tree.root_inventory['sub-root']
+ self.assertEqual([(u'subtree', 'sub-root')],
+ list(tree.iter_references()))
+
+ def test_get_root_id(self):
+ # trees should return some kind of root id; it can be none
+ tree = self.make_branch_and_tree('tree')
+ root_id = tree.get_root_id()
+ if root_id is not None:
+ self.assertIsInstance(root_id, str)
+
+
+class TestFileIds(TestCaseWithTree):
+
+ def test_id2path(self):
+ # translate from file-id back to path
+ work_tree = self.make_branch_and_tree('wt')
+ tree = self.get_tree_no_parents_abc_content(work_tree)
+ tree.lock_read()
+ try:
+ self.assertEqual(u'a', tree.id2path('a-id'))
+ # other ids give an error- don't return None for this case
+ self.assertRaises(errors.NoSuchId, tree.id2path, 'a')
+ finally:
+ tree.unlock()
+
+ def test_all_file_ids(self):
+ work_tree = self.make_branch_and_tree('wt')
+ tree = self.get_tree_no_parents_abc_content(work_tree)
+ tree.lock_read()
+ self.addCleanup(tree.unlock)
+ self.assertEqual(tree.all_file_ids(),
+ set(['b-id', 'root-id', 'c-id', 'a-id']))
+
+
+class TestStoredKind(TestCaseWithTree):
+
+ def test_stored_kind(self):
+ tree = self.make_branch_and_tree('tree')
+ work_tree = self.make_branch_and_tree('wt')
+ tree = self.get_tree_no_parents_abc_content(work_tree)
+ tree.lock_read()
+ self.addCleanup(tree.unlock)
+ self.assertEqual('file', tree.stored_kind('a-id'))
+ self.assertEqual('directory', tree.stored_kind('b-id'))
+
+
+class TestFileContent(TestCaseWithTree):
+
+ def test_get_file(self):
+ work_tree = self.make_branch_and_tree('wt')
+ tree = self.get_tree_no_parents_abc_content_2(work_tree)
+ tree.lock_read()
+ self.addCleanup(tree.unlock)
+ # Test lookup without path works
+ file_without_path = tree.get_file('a-id')
+ try:
+ lines = file_without_path.readlines()
+ self.assertEqual(['foobar\n'], lines)
+ finally:
+ file_without_path.close()
+ # Test lookup with path works
+ file_with_path = tree.get_file('a-id', path='a')
+ try:
+ lines = file_with_path.readlines()
+ self.assertEqual(['foobar\n'], lines)
+ finally:
+ file_with_path.close()
+
+ def test_get_file_text(self):
+ work_tree = self.make_branch_and_tree('wt')
+ tree = self.get_tree_no_parents_abc_content_2(work_tree)
+ tree.lock_read()
+ self.addCleanup(tree.unlock)
+ # test read by file-id
+ self.assertEqual('foobar\n', tree.get_file_text('a-id'))
+ # test read by path
+ self.assertEqual('foobar\n', tree.get_file_text('a-id', path='a'))
+
+ def test_get_file_lines(self):
+ work_tree = self.make_branch_and_tree('wt')
+ tree = self.get_tree_no_parents_abc_content_2(work_tree)
+ tree.lock_read()
+ self.addCleanup(tree.unlock)
+ # test read by file-id
+ self.assertEqual(['foobar\n'], tree.get_file_lines('a-id'))
+ # test read by path
+ self.assertEqual(['foobar\n'], tree.get_file_lines('a-id', path='a'))
+
+ def test_get_file_lines_multi_line_breaks(self):
+ work_tree = self.make_branch_and_tree('wt')
+ self.build_tree_contents([('wt/foobar', 'a\rb\nc\r\nd')])
+ work_tree.add('foobar', 'foobar-id')
+ tree = self._convert_tree(work_tree)
+ tree.lock_read()
+ self.addCleanup(tree.unlock)
+ self.assertEqual(['a\rb\n', 'c\r\n', 'd'],
+ tree.get_file_lines('foobar-id'))
+
+
+class TestExtractFilesBytes(TestCaseWithTree):
+
+ def test_iter_files_bytes(self):
+ work_tree = self.make_branch_and_tree('wt')
+ self.build_tree_contents([('wt/foo', 'foo'),
+ ('wt/bar', 'bar'),
+ ('wt/baz', 'baz')])
+ work_tree.add(['foo', 'bar', 'baz'], ['foo-id', 'bar-id', 'baz-id'])
+ tree = self._convert_tree(work_tree)
+ tree.lock_read()
+ self.addCleanup(tree.unlock)
+ extracted = dict((i, ''.join(b)) for i, b in
+ tree.iter_files_bytes([('foo-id', 'id1'),
+ ('bar-id', 'id2'),
+ ('baz-id', 'id3')]))
+ self.assertEqual('foo', extracted['id1'])
+ self.assertEqual('bar', extracted['id2'])
+ self.assertEqual('baz', extracted['id3'])
+ self.assertRaises(errors.NoSuchId, lambda: list(
+ tree.iter_files_bytes(
+ [('qux-id', 'file1-notpresent')])))
+
+
+class TestConflicts(TestCaseWithTree):
+
+ def test_conflicts(self):
+ """Tree.conflicts() should return a ConflictList instance."""
+ work_tree = self.make_branch_and_tree('wt')
+ tree = self._convert_tree(work_tree)
+ self.assertIsInstance(tree.conflicts(), conflicts.ConflictList)
+
+
+class TestIterEntriesByDir(TestCaseWithTree):
+
+ def test_iteration_order(self):
+ work_tree = self.make_branch_and_tree('.')
+ self.build_tree(['a/', 'a/b/', 'a/b/c', 'a/d/', 'a/d/e', 'f/', 'f/g'])
+ work_tree.add(['a', 'a/b', 'a/b/c', 'a/d', 'a/d/e', 'f', 'f/g'])
+ tree = self._convert_tree(work_tree)
+ output_order = [p for p, e in tree.iter_entries_by_dir()]
+ self.assertEqual(['', 'a', 'f', 'a/b', 'a/d', 'a/b/c', 'a/d/e', 'f/g'],
+ output_order)
+
+
+class TestIterChildEntries(TestCaseWithTree):
+
+ def test_iteration_order(self):
+ work_tree = self.make_branch_and_tree('.')
+ self.build_tree(['a/', 'a/b/', 'a/b/c', 'a/d/', 'a/d/e', 'f/', 'f/g'])
+ work_tree.add(['a', 'a/b', 'a/b/c', 'a/d', 'a/d/e', 'f', 'f/g'])
+ tree = self._convert_tree(work_tree)
+ output = [e.name for e in
+ tree.iter_child_entries(tree.get_root_id())]
+ self.assertEqual(set(['a', 'f']), set(output))
+ output = [e.name for e in
+ tree.iter_child_entries(tree.path2id('a'))]
+ self.assertEqual(set(['b', 'd']), set(output))
+
+ def test_does_not_exist(self):
+ work_tree = self.make_branch_and_tree('.')
+ self.build_tree(['a/'])
+ work_tree.add(['a'])
+ tree = self._convert_tree(work_tree)
+ self.assertRaises(errors.NoSuchId, lambda:
+ list(tree.iter_child_entries('unknown')))
+
+
+class TestHasId(TestCaseWithTree):
+
+ def test_has_id(self):
+ work_tree = self.make_branch_and_tree('tree')
+ self.build_tree(['tree/file'])
+ work_tree.add('file', 'file-id')
+ tree = self._convert_tree(work_tree)
+ tree.lock_read()
+ self.addCleanup(tree.unlock)
+ self.assertTrue(tree.has_id('file-id'))
+ self.assertFalse(tree.has_id('dir-id'))
+
+ def test___contains__(self):
+ work_tree = self.make_branch_and_tree('tree')
+ self.build_tree(['tree/file'])
+ work_tree.add('file', 'file-id')
+ tree = self._convert_tree(work_tree)
+ tree.lock_read()
+ self.addCleanup(tree.unlock)
+ self.assertTrue(
+ self.applyDeprecated(
+ deprecated_in((2, 4, 0)),
+ tree.__contains__, 'file-id'))
+ self.assertFalse(
+ self.applyDeprecated(
+ deprecated_in((2, 4, 0)),
+ tree.__contains__, 'dir-id'))
+
+
+class TestExtras(TestCaseWithTree):
+
+ def test_extras(self):
+ work_tree = self.make_branch_and_tree('tree')
+ self.build_tree(['tree/file', 'tree/versioned-file'])
+ work_tree.add(['file', 'versioned-file'])
+ work_tree.commit('add files')
+ work_tree.remove('file')
+ tree = self._convert_tree(work_tree)
+ if isinstance(tree,
+ (revisiontree.RevisionTree,
+ workingtree_4.DirStateRevisionTree)):
+ expected = []
+ else:
+ expected = ['file']
+ tree.lock_read()
+ self.addCleanup(tree.unlock)
+ self.assertEqual(expected, list(tree.extras()))
+
+
+class TestGetFileSha1(TestCaseWithTree):
+
+ def test_get_file_sha1(self):
+ work_tree = self.make_branch_and_tree('tree')
+ self.build_tree_contents([('tree/file', 'file content')])
+ work_tree.add('file', 'file-id')
+ tree = self._convert_tree(work_tree)
+ tree.lock_read()
+ self.addCleanup(tree.unlock)
+ expected = osutils.sha_strings('file content')
+ self.assertEqual(expected, tree.get_file_sha1('file-id'))
+
+
+class TestGetFileVerifier(TestCaseWithTree):
+
+ def test_get_file_verifier(self):
+ work_tree = self.make_branch_and_tree('tree')
+ self.build_tree_contents([
+ ('tree/file1', 'file content'),
+ ('tree/file2', 'file content')])
+ work_tree.add(['file1', 'file2'], ['file-id-1', 'file-id-2'])
+ tree = self._convert_tree(work_tree)
+ tree.lock_read()
+ self.addCleanup(tree.unlock)
+ (kind, data) = tree.get_file_verifier('file-id-1')
+ self.assertEquals(
+ tree.get_file_verifier('file-id-1'),
+ tree.get_file_verifier('file-id-2'))
+ if kind == "SHA1":
+ expected = osutils.sha_strings('file content')
+ self.assertEqual(expected, data)
+
+
+class TestHasVersionedDirectories(TestCaseWithTree):
+
+ def test_has_versioned_directories(self):
+ work_tree = self.make_branch_and_tree('tree')
+ tree = self._convert_tree(work_tree)
+ self.assertSubset([tree.has_versioned_directories()], (True, False))
diff --git a/bzrlib/tests/per_tree/test_walkdirs.py b/bzrlib/tests/per_tree/test_walkdirs.py
new file mode 100644
index 0000000..81e35e1
--- /dev/null
+++ b/bzrlib/tests/per_tree/test_walkdirs.py
@@ -0,0 +1,109 @@
+# Copyright (C) 2006, 2007 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""Tests for the generic Tree.walkdirs interface."""
+
+import os
+
+from bzrlib import tests
+from bzrlib.osutils import has_symlinks
+from bzrlib.tests.per_tree import TestCaseWithTree
+
+
+class TestWalkdirs(TestCaseWithTree):
+
+ def get_all_subdirs_expected(self, tree, symlinks):
+ dirblocks = [
+ (('', tree.path2id('')),
+ [('0file', '0file', 'file', None, '2file', 'file'),
+ ('1top-dir', '1top-dir', 'directory', None,
+ '1top-dir', 'directory'),
+ (u'2utf\u1234file', u'2utf\u1234file', 'file', None,
+ u'0utf\u1234file'.encode('utf8'), 'file'),
+ ]),
+ (('1top-dir', '1top-dir'),
+ [('1top-dir/0file-in-1topdir', '0file-in-1topdir',
+ 'file', None, '1file-in-1topdir', 'file'),
+ ('1top-dir/1dir-in-1topdir', '1dir-in-1topdir',
+ 'directory', None, '0dir-in-1topdir', 'directory'),
+ ]),
+ (('1top-dir/1dir-in-1topdir', '0dir-in-1topdir'),
+ []),
+ ]
+ if symlinks:
+ dirblocks[0][1].append(('symlink', 'symlink', 'symlink', None,
+ 'symlink', 'symlink'))
+ return dirblocks
+
+ def test_walkdir_root(self):
+ tree = self.get_tree_with_subdirs_and_all_supported_content_types(
+ has_symlinks())
+ tree.lock_read()
+ expected_dirblocks = self.get_all_subdirs_expected(tree, has_symlinks())
+ # test that its iterable by iterating
+ result = []
+ for dirinfo, block in tree.walkdirs():
+ newblock = []
+ for row in block:
+ if row[4] is not None:
+ newblock.append(row[0:3] + (None,) + row[4:])
+ else:
+ newblock.append(row)
+ result.append((dirinfo, newblock))
+ tree.unlock()
+ # check each return value for debugging ease.
+ for pos, item in enumerate(expected_dirblocks):
+ self.assertEqual(item, result[pos])
+ self.assertEqual(len(expected_dirblocks), len(result))
+
+ def test_walkdir_subtree(self):
+ tree = self.get_tree_with_subdirs_and_all_supported_content_types(has_symlinks())
+ # test that its iterable by iterating
+ result = []
+ tree.lock_read()
+ expected_dirblocks = self.get_all_subdirs_expected(tree, has_symlinks())[1:]
+ for dirinfo, block in tree.walkdirs('1top-dir'):
+ newblock = []
+ for row in block:
+ if row[4] is not None:
+ newblock.append(row[0:3] + (None,) + row[4:])
+ else:
+ newblock.append(row)
+ result.append((dirinfo, newblock))
+ tree.unlock()
+ # check each return value for debugging ease.
+ for pos, item in enumerate(expected_dirblocks):
+ self.assertEqual(item, result[pos])
+ self.assertEqual(len(expected_dirblocks), len(result))
+
+ def test_walkdir_versioned_kind(self):
+ work_tree = self.make_branch_and_tree('tree')
+ work_tree.set_root_id('tree-root')
+ self.build_tree(['tree/file', 'tree/dir/'])
+ work_tree.add(['file', 'dir'], ['file-id', 'dir-id'])
+ os.unlink('tree/file')
+ os.rmdir('tree/dir')
+ tree = self._convert_tree(work_tree)
+ tree.lock_read()
+ self.addCleanup(tree.unlock)
+ if tree.path2id('file') is None:
+ raise tests.TestNotApplicable(
+ 'Tree type cannot represent dangling ids.')
+ expected = [(('', 'tree-root'), [
+ ('dir', 'dir', 'unknown', None, 'dir-id', 'directory'),
+ ('file', 'file', 'unknown', None, 'file-id', 'file')]),
+ (('dir', 'dir-id'), [])]
+ self.assertEqual(expected, list(tree.walkdirs()))
diff --git a/bzrlib/tests/per_uifactory/__init__.py b/bzrlib/tests/per_uifactory/__init__.py
new file mode 100644
index 0000000..1a72f6e
--- /dev/null
+++ b/bzrlib/tests/per_uifactory/__init__.py
@@ -0,0 +1,274 @@
+# Copyright (C) 2009, 2010, 2011 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""Tests run per UIFactory."""
+
+# Testing UIFactories is a bit interesting because we require they all support a
+# common interface, but the way they implement it can vary very widely. Between
+# text, batch-mode, graphical and other potential UIFactories, the requirements
+# to set up a factory, to make it respond to requests, and to simulate user
+# input can vary a lot.
+#
+# We want tests that therefore allow for the evaluation of the result to vary
+# per implementation, but we want to check that the supported facilities are
+# the same across all UIFactorys, unless they're specifically skipped.
+#
+# Our normal approach is to use test scenarios but that seems to just end up
+# creating test-like objects inside the scenario. Therefore we fall back to
+# the older method of putting the common tests in a mixin.
+#
+# Plugins that add new UIFactorys can create their own subclasses.
+
+
+from cStringIO import StringIO
+import unittest
+
+
+from bzrlib import (
+ tests,
+ transport,
+ ui,
+ )
+
+
+class UIFactoryTestMixin(object):
+ """Common tests for UIFactories.
+
+ These are supposed to be expressed with no assumptions about how the
+ UIFactory implements the method, only that it does implement them (or
+ fails cleanly), and that the concrete subclass will make arrangements to
+ build a factory and to examine its behaviour.
+
+ Note that this is *not* a TestCase, because it can't be directly run, but
+ the concrete subclasses should be.
+ """
+
+ def test_be_quiet(self):
+ self.factory.be_quiet(True)
+ self.assertEquals(True, self.factory.is_quiet())
+ self.factory.be_quiet(False)
+ self.assertEquals(False, self.factory.is_quiet())
+
+ def test_confirm_action(self):
+ # confirm_action should be answered by every ui factory; even
+ # noninteractive ones should have a reasonable default
+ self._load_responses([True])
+ result = self.factory.confirm_action(u'Break a lock?',
+ 'bzr.lock.break.confirm',
+ {})
+ # will be true either because we read it from the input or because
+ # that's the default
+ self.assertEquals(result, True)
+
+ def test_note(self):
+ self.factory.note("a note to the user")
+ self._check_note("a note to the user")
+
+ def test_show_error(self):
+ msg = 'an error occurred'
+ self.factory.show_error(msg)
+ self._check_show_error(msg)
+
+ def test_show_message(self):
+ msg = 'a message'
+ self.factory.show_message(msg)
+ self._check_show_message(msg)
+
+ def test_show_warning(self):
+ msg = 'a warning'
+ self.factory.show_warning(msg)
+ self._check_show_warning(msg)
+
+ def test_make_output_stream(self):
+ # All UIs must now be able to at least accept output, even if they
+ # just discard it.
+ output_stream = self.factory.make_output_stream()
+ output_stream.write('hello!')
+
+ def test_transport_activity(self):
+ # It doesn't matter what the implementation does, we just want to make
+ # sure the interface is there
+ t = transport.get_transport_from_url('memory:///')
+ self.factory.report_transport_activity(t, 1000, 'write')
+ self.factory.report_transport_activity(t, 2000, 'read')
+ self.factory.report_transport_activity(t, 4000, None)
+ self.factory.log_transport_activity()
+ self._check_log_transport_activity_noarg()
+ self.factory.log_transport_activity(display=True)
+ self._check_log_transport_activity_display()
+
+ def test_no_transport_activity(self):
+ # No activity to report
+ t = transport.get_transport_from_url('memory:///')
+ self.factory.log_transport_activity(display=True)
+ self._check_log_transport_activity_display_no_bytes()
+
+
+class TestTextUIFactory(tests.TestCase, UIFactoryTestMixin):
+
+ def setUp(self):
+ super(TestTextUIFactory, self).setUp()
+ self.stdin = StringIO()
+ self.stdout = StringIO()
+ self.stderr = StringIO()
+ self.factory = ui.text.TextUIFactory(self.stdin, self.stdout,
+ self.stderr)
+
+ def _check_note(self, note_text):
+ self.assertEquals("%s\n" % note_text,
+ self.stdout.getvalue())
+
+ def _check_show_error(self, msg):
+ self.assertEquals("bzr: error: %s\n" % msg,
+ self.stderr.getvalue())
+ self.assertEquals("", self.stdout.getvalue())
+
+ def _check_show_message(self, msg):
+ self.assertEquals("%s\n" % msg,
+ self.stdout.getvalue())
+ self.assertEquals("", self.stderr.getvalue())
+
+ def _check_show_warning(self, msg):
+ self.assertEquals("bzr: warning: %s\n" % msg,
+ self.stderr.getvalue())
+ self.assertEquals("", self.stdout.getvalue())
+
+ def _check_log_transport_activity_noarg(self):
+ self.assertEqual('', self.stdout.getvalue())
+ self.assertContainsRe(self.stderr.getvalue(), r'\d+kB\s+\dkB/s |')
+ self.assertNotContainsRe(self.stderr.getvalue(), r'Transferred:')
+
+ def _check_log_transport_activity_display(self):
+ self.assertEqual('', self.stdout.getvalue())
+ # Without a TTY, we shouldn't display anything
+ self.assertEqual('', self.stderr.getvalue())
+
+ def _check_log_transport_activity_display_no_bytes(self):
+ self.assertEqual('', self.stdout.getvalue())
+ # Without a TTY, we shouldn't display anything
+ self.assertEqual('', self.stderr.getvalue())
+
+ def _load_responses(self, responses):
+ self.factory.stdin.seek(0)
+ self.factory.stdin.writelines([(r and "y\n" or "n\n") for r in responses])
+ self.factory.stdin.seek(0)
+
+
+class TestTTYTextUIFactory(TestTextUIFactory):
+
+ def setUp(self):
+ super(TestTTYTextUIFactory, self).setUp()
+
+ class TTYStringIO(object):
+ """Thunk over to StringIO() for everything but 'isatty'"""
+
+ def __init__(self):
+ self.__dict__['_sio'] = StringIO()
+
+ def isatty(self):
+ return True
+
+ def __getattr__(self, name):
+ return getattr(self._sio, name)
+
+ def __setattr__(self, name, value):
+ return setattr(self._sio, name, value)
+
+ # Remove 'TERM' == 'dumb' which causes us to *not* treat output as a
+ # real terminal, even though isatty returns True
+ self.overrideEnv('TERM', None)
+ self.stderr = TTYStringIO()
+ self.stdout = TTYStringIO()
+ self.factory = ui.text.TextUIFactory(self.stdin, self.stdout,
+ self.stderr)
+
+ def _check_log_transport_activity_display(self):
+ self.assertEqual('', self.stdout.getvalue())
+ # Displaying the result should write to the progress stream using
+ # base-10 units (see HACKING.txt).
+ self.assertContainsRe(self.stderr.getvalue(),
+ r'Transferred: 7kB'
+ r' \(\d+\.\dkB/s r:2kB w:1kB u:4kB\)')
+
+ def _check_log_transport_activity_display_no_bytes(self):
+ self.assertEqual('', self.stdout.getvalue())
+ # Without actual bytes transferred, we should report nothing
+ self.assertEqual('', self.stderr.getvalue())
+
+
+class TestSilentUIFactory(tests.TestCase, UIFactoryTestMixin):
+ # discards output, therefore tests for output expect nothing
+
+ def setUp(self):
+ super(TestSilentUIFactory, self).setUp()
+ self.factory = ui.SilentUIFactory()
+
+ def _check_note(self, note_text):
+ # it's just discarded
+ pass
+
+ def _check_show_error(self, msg):
+ pass
+
+ def _check_show_message(self, msg):
+ pass
+
+ def _check_show_warning(self, msg):
+ pass
+
+ def _check_log_transport_activity_noarg(self):
+ pass
+
+ def _check_log_transport_activity_display(self):
+ pass
+
+ def _check_log_transport_activity_display_no_bytes(self):
+ pass
+
+ def _load_responses(self, responses):
+ pass
+
+
+class TestCannedInputUIFactory(tests.TestCase, UIFactoryTestMixin):
+ # discards output, reads input from variables
+
+ def setUp(self):
+ super(TestCannedInputUIFactory, self).setUp()
+ self.factory = ui.CannedInputUIFactory([])
+
+ def _check_note(self, note_text):
+ pass
+
+ def _check_show_error(self, msg):
+ pass
+
+ def _check_show_message(self, msg):
+ pass
+
+ def _check_show_warning(self, msg):
+ pass
+
+ def _check_log_transport_activity_noarg(self):
+ pass
+
+ def _check_log_transport_activity_display(self):
+ pass
+
+ def _check_log_transport_activity_display_no_bytes(self):
+ pass
+
+ def _load_responses(self, responses):
+ self.factory.responses.extend(responses)
diff --git a/bzrlib/tests/per_versionedfile.py b/bzrlib/tests/per_versionedfile.py
new file mode 100644
index 0000000..1f06c30
--- /dev/null
+++ b/bzrlib/tests/per_versionedfile.py
@@ -0,0 +1,2858 @@
+# Copyright (C) 2006-2011 Canonical Ltd
+#
+# Authors:
+# Johan Rydberg <jrydberg@gnu.org>
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+
+# TODO: might be nice to create a versionedfile with some type of corruption
+# considered typical and check that it can be detected/corrected.
+
+from gzip import GzipFile
+from itertools import chain, izip
+from StringIO import StringIO
+
+from bzrlib import (
+ errors,
+ graph as _mod_graph,
+ groupcompress,
+ knit as _mod_knit,
+ osutils,
+ progress,
+ transport,
+ ui,
+ )
+from bzrlib.errors import (
+ RevisionNotPresent,
+ RevisionAlreadyPresent,
+ )
+from bzrlib.knit import (
+ cleanup_pack_knit,
+ make_file_factory,
+ make_pack_factory,
+ )
+from bzrlib.tests import (
+ TestCase,
+ TestCaseWithMemoryTransport,
+ TestNotApplicable,
+ TestSkipped,
+ )
+from bzrlib.tests.http_utils import TestCaseWithWebserver
+from bzrlib.transport.memory import MemoryTransport
+import bzrlib.versionedfile as versionedfile
+from bzrlib.versionedfile import (
+ ConstantMapper,
+ HashEscapedPrefixMapper,
+ PrefixMapper,
+ VirtualVersionedFiles,
+ make_versioned_files_factory,
+ )
+from bzrlib.weave import WeaveFile
+from bzrlib.weavefile import write_weave
+from bzrlib.tests.scenarios import load_tests_apply_scenarios
+
+
+load_tests = load_tests_apply_scenarios
+
+
+def get_diamond_vf(f, trailing_eol=True, left_only=False):
+ """Get a diamond graph to exercise deltas and merges.
+
+ :param trailing_eol: If True end the last line with \n.
+ """
+ parents = {
+ 'origin': (),
+ 'base': (('origin',),),
+ 'left': (('base',),),
+ 'right': (('base',),),
+ 'merged': (('left',), ('right',)),
+ }
+ # insert a diamond graph to exercise deltas and merges.
+ if trailing_eol:
+ last_char = '\n'
+ else:
+ last_char = ''
+ f.add_lines('origin', [], ['origin' + last_char])
+ f.add_lines('base', ['origin'], ['base' + last_char])
+ f.add_lines('left', ['base'], ['base\n', 'left' + last_char])
+ if not left_only:
+ f.add_lines('right', ['base'],
+ ['base\n', 'right' + last_char])
+ f.add_lines('merged', ['left', 'right'],
+ ['base\n', 'left\n', 'right\n', 'merged' + last_char])
+ return f, parents
+
+
+def get_diamond_files(files, key_length, trailing_eol=True, left_only=False,
+ nograph=False, nokeys=False):
+ """Get a diamond graph to exercise deltas and merges.
+
+ This creates a 5-node graph in files. If files supports 2-length keys two
+ graphs are made to exercise the support for multiple ids.
+
+ :param trailing_eol: If True end the last line with \n.
+ :param key_length: The length of keys in files. Currently supports length 1
+ and 2 keys.
+ :param left_only: If True do not add the right and merged nodes.
+ :param nograph: If True, do not provide parents to the add_lines calls;
+ this is useful for tests that need inserted data but have graphless
+ stores.
+ :param nokeys: If True, pass None is as the key for all insertions.
+ Currently implies nograph.
+ :return: The results of the add_lines calls.
+ """
+ if nokeys:
+ nograph = True
+ if key_length == 1:
+ prefixes = [()]
+ else:
+ prefixes = [('FileA',), ('FileB',)]
+ # insert a diamond graph to exercise deltas and merges.
+ if trailing_eol:
+ last_char = '\n'
+ else:
+ last_char = ''
+ result = []
+ def get_parents(suffix_list):
+ if nograph:
+ return ()
+ else:
+ result = [prefix + suffix for suffix in suffix_list]
+ return result
+ def get_key(suffix):
+ if nokeys:
+ return (None, )
+ else:
+ return (suffix,)
+ # we loop over each key because that spreads the inserts across prefixes,
+ # which is how commit operates.
+ for prefix in prefixes:
+ result.append(files.add_lines(prefix + get_key('origin'), (),
+ ['origin' + last_char]))
+ for prefix in prefixes:
+ result.append(files.add_lines(prefix + get_key('base'),
+ get_parents([('origin',)]), ['base' + last_char]))
+ for prefix in prefixes:
+ result.append(files.add_lines(prefix + get_key('left'),
+ get_parents([('base',)]),
+ ['base\n', 'left' + last_char]))
+ if not left_only:
+ for prefix in prefixes:
+ result.append(files.add_lines(prefix + get_key('right'),
+ get_parents([('base',)]),
+ ['base\n', 'right' + last_char]))
+ for prefix in prefixes:
+ result.append(files.add_lines(prefix + get_key('merged'),
+ get_parents([('left',), ('right',)]),
+ ['base\n', 'left\n', 'right\n', 'merged' + last_char]))
+ return result
+
+
+class VersionedFileTestMixIn(object):
+ """A mixin test class for testing VersionedFiles.
+
+ This is not an adaptor-style test at this point because
+ theres no dynamic substitution of versioned file implementations,
+ they are strictly controlled by their owning repositories.
+ """
+
+ def get_transaction(self):
+ if not hasattr(self, '_transaction'):
+ self._transaction = None
+ return self._transaction
+
+ def test_add(self):
+ f = self.get_file()
+ f.add_lines('r0', [], ['a\n', 'b\n'])
+ f.add_lines('r1', ['r0'], ['b\n', 'c\n'])
+ def verify_file(f):
+ versions = f.versions()
+ self.assertTrue('r0' in versions)
+ self.assertTrue('r1' in versions)
+ self.assertEquals(f.get_lines('r0'), ['a\n', 'b\n'])
+ self.assertEquals(f.get_text('r0'), 'a\nb\n')
+ self.assertEquals(f.get_lines('r1'), ['b\n', 'c\n'])
+ self.assertEqual(2, len(f))
+ self.assertEqual(2, f.num_versions())
+
+ self.assertRaises(RevisionNotPresent,
+ f.add_lines, 'r2', ['foo'], [])
+ self.assertRaises(RevisionAlreadyPresent,
+ f.add_lines, 'r1', [], [])
+ verify_file(f)
+ # this checks that reopen with create=True does not break anything.
+ f = self.reopen_file(create=True)
+ verify_file(f)
+
+ def test_adds_with_parent_texts(self):
+ f = self.get_file()
+ parent_texts = {}
+ _, _, parent_texts['r0'] = f.add_lines('r0', [], ['a\n', 'b\n'])
+ try:
+ _, _, parent_texts['r1'] = f.add_lines_with_ghosts('r1',
+ ['r0', 'ghost'], ['b\n', 'c\n'], parent_texts=parent_texts)
+ except NotImplementedError:
+ # if the format doesn't support ghosts, just add normally.
+ _, _, parent_texts['r1'] = f.add_lines('r1',
+ ['r0'], ['b\n', 'c\n'], parent_texts=parent_texts)
+ f.add_lines('r2', ['r1'], ['c\n', 'd\n'], parent_texts=parent_texts)
+ self.assertNotEqual(None, parent_texts['r0'])
+ self.assertNotEqual(None, parent_texts['r1'])
+ def verify_file(f):
+ versions = f.versions()
+ self.assertTrue('r0' in versions)
+ self.assertTrue('r1' in versions)
+ self.assertTrue('r2' in versions)
+ self.assertEquals(f.get_lines('r0'), ['a\n', 'b\n'])
+ self.assertEquals(f.get_lines('r1'), ['b\n', 'c\n'])
+ self.assertEquals(f.get_lines('r2'), ['c\n', 'd\n'])
+ self.assertEqual(3, f.num_versions())
+ origins = f.annotate('r1')
+ self.assertEquals(origins[0][0], 'r0')
+ self.assertEquals(origins[1][0], 'r1')
+ origins = f.annotate('r2')
+ self.assertEquals(origins[0][0], 'r1')
+ self.assertEquals(origins[1][0], 'r2')
+
+ verify_file(f)
+ f = self.reopen_file()
+ verify_file(f)
+
+ def test_add_unicode_content(self):
+ # unicode content is not permitted in versioned files.
+ # versioned files version sequences of bytes only.
+ vf = self.get_file()
+ self.assertRaises(errors.BzrBadParameterUnicode,
+ vf.add_lines, 'a', [], ['a\n', u'b\n', 'c\n'])
+ self.assertRaises(
+ (errors.BzrBadParameterUnicode, NotImplementedError),
+ vf.add_lines_with_ghosts, 'a', [], ['a\n', u'b\n', 'c\n'])
+
+ def test_add_follows_left_matching_blocks(self):
+ """If we change left_matching_blocks, delta changes
+
+ Note: There are multiple correct deltas in this case, because
+ we start with 1 "a" and we get 3.
+ """
+ vf = self.get_file()
+ if isinstance(vf, WeaveFile):
+ raise TestSkipped("WeaveFile ignores left_matching_blocks")
+ vf.add_lines('1', [], ['a\n'])
+ vf.add_lines('2', ['1'], ['a\n', 'a\n', 'a\n'],
+ left_matching_blocks=[(0, 0, 1), (1, 3, 0)])
+ self.assertEqual(['a\n', 'a\n', 'a\n'], vf.get_lines('2'))
+ vf.add_lines('3', ['1'], ['a\n', 'a\n', 'a\n'],
+ left_matching_blocks=[(0, 2, 1), (1, 3, 0)])
+ self.assertEqual(['a\n', 'a\n', 'a\n'], vf.get_lines('3'))
+
+ def test_inline_newline_throws(self):
+ # \r characters are not permitted in lines being added
+ vf = self.get_file()
+ self.assertRaises(errors.BzrBadParameterContainsNewline,
+ vf.add_lines, 'a', [], ['a\n\n'])
+ self.assertRaises(
+ (errors.BzrBadParameterContainsNewline, NotImplementedError),
+ vf.add_lines_with_ghosts, 'a', [], ['a\n\n'])
+ # but inline CR's are allowed
+ vf.add_lines('a', [], ['a\r\n'])
+ try:
+ vf.add_lines_with_ghosts('b', [], ['a\r\n'])
+ except NotImplementedError:
+ pass
+
+ def test_add_reserved(self):
+ vf = self.get_file()
+ self.assertRaises(errors.ReservedId,
+ vf.add_lines, 'a:', [], ['a\n', 'b\n', 'c\n'])
+
+ def test_add_lines_nostoresha(self):
+ """When nostore_sha is supplied using old content raises."""
+ vf = self.get_file()
+ empty_text = ('a', [])
+ sample_text_nl = ('b', ["foo\n", "bar\n"])
+ sample_text_no_nl = ('c', ["foo\n", "bar"])
+ shas = []
+ for version, lines in (empty_text, sample_text_nl, sample_text_no_nl):
+ sha, _, _ = vf.add_lines(version, [], lines)
+ shas.append(sha)
+ # we now have a copy of all the lines in the vf.
+ for sha, (version, lines) in zip(
+ shas, (empty_text, sample_text_nl, sample_text_no_nl)):
+ self.assertRaises(errors.ExistingContent,
+ vf.add_lines, version + "2", [], lines,
+ nostore_sha=sha)
+ # and no new version should have been added.
+ self.assertRaises(errors.RevisionNotPresent, vf.get_lines,
+ version + "2")
+
+ def test_add_lines_with_ghosts_nostoresha(self):
+ """When nostore_sha is supplied using old content raises."""
+ vf = self.get_file()
+ empty_text = ('a', [])
+ sample_text_nl = ('b', ["foo\n", "bar\n"])
+ sample_text_no_nl = ('c', ["foo\n", "bar"])
+ shas = []
+ for version, lines in (empty_text, sample_text_nl, sample_text_no_nl):
+ sha, _, _ = vf.add_lines(version, [], lines)
+ shas.append(sha)
+ # we now have a copy of all the lines in the vf.
+ # is the test applicable to this vf implementation?
+ try:
+ vf.add_lines_with_ghosts('d', [], [])
+ except NotImplementedError:
+ raise TestSkipped("add_lines_with_ghosts is optional")
+ for sha, (version, lines) in zip(
+ shas, (empty_text, sample_text_nl, sample_text_no_nl)):
+ self.assertRaises(errors.ExistingContent,
+ vf.add_lines_with_ghosts, version + "2", [], lines,
+ nostore_sha=sha)
+ # and no new version should have been added.
+ self.assertRaises(errors.RevisionNotPresent, vf.get_lines,
+ version + "2")
+
+ def test_add_lines_return_value(self):
+ # add_lines should return the sha1 and the text size.
+ vf = self.get_file()
+ empty_text = ('a', [])
+ sample_text_nl = ('b', ["foo\n", "bar\n"])
+ sample_text_no_nl = ('c', ["foo\n", "bar"])
+ # check results for the three cases:
+ for version, lines in (empty_text, sample_text_nl, sample_text_no_nl):
+ # the first two elements are the same for all versioned files:
+ # - the digest and the size of the text. For some versioned files
+ # additional data is returned in additional tuple elements.
+ result = vf.add_lines(version, [], lines)
+ self.assertEqual(3, len(result))
+ self.assertEqual((osutils.sha_strings(lines), sum(map(len, lines))),
+ result[0:2])
+ # parents should not affect the result:
+ lines = sample_text_nl[1]
+ self.assertEqual((osutils.sha_strings(lines), sum(map(len, lines))),
+ vf.add_lines('d', ['b', 'c'], lines)[0:2])
+
+ def test_get_reserved(self):
+ vf = self.get_file()
+ self.assertRaises(errors.ReservedId, vf.get_texts, ['b:'])
+ self.assertRaises(errors.ReservedId, vf.get_lines, 'b:')
+ self.assertRaises(errors.ReservedId, vf.get_text, 'b:')
+
+ def test_add_unchanged_last_line_noeol_snapshot(self):
+ """Add a text with an unchanged last line with no eol should work."""
+ # Test adding this in a number of chain lengths; because the interface
+ # for VersionedFile does not allow forcing a specific chain length, we
+ # just use a small base to get the first snapshot, then a much longer
+ # first line for the next add (which will make the third add snapshot)
+ # and so on. 20 has been chosen as an aribtrary figure - knits use 200
+ # as a capped delta length, but ideally we would have some way of
+ # tuning the test to the store (e.g. keep going until a snapshot
+ # happens).
+ for length in range(20):
+ version_lines = {}
+ vf = self.get_file('case-%d' % length)
+ prefix = 'step-%d'
+ parents = []
+ for step in range(length):
+ version = prefix % step
+ lines = (['prelude \n'] * step) + ['line']
+ vf.add_lines(version, parents, lines)
+ version_lines[version] = lines
+ parents = [version]
+ vf.add_lines('no-eol', parents, ['line'])
+ vf.get_texts(version_lines.keys())
+ self.assertEqualDiff('line', vf.get_text('no-eol'))
+
+ def test_get_texts_eol_variation(self):
+ # similar to the failure in <http://bugs.launchpad.net/234748>
+ vf = self.get_file()
+ sample_text_nl = ["line\n"]
+ sample_text_no_nl = ["line"]
+ versions = []
+ version_lines = {}
+ parents = []
+ for i in range(4):
+ version = 'v%d' % i
+ if i % 2:
+ lines = sample_text_nl
+ else:
+ lines = sample_text_no_nl
+ # left_matching blocks is an internal api; it operates on the
+ # *internal* representation for a knit, which is with *all* lines
+ # being normalised to end with \n - even the final line in a no_nl
+ # file. Using it here ensures that a broken internal implementation
+ # (which is what this test tests) will generate a correct line
+ # delta (which is to say, an empty delta).
+ vf.add_lines(version, parents, lines,
+ left_matching_blocks=[(0, 0, 1)])
+ parents = [version]
+ versions.append(version)
+ version_lines[version] = lines
+ vf.check()
+ vf.get_texts(versions)
+ vf.get_texts(reversed(versions))
+
+ def test_add_lines_with_matching_blocks_noeol_last_line(self):
+ """Add a text with an unchanged last line with no eol should work."""
+ from bzrlib import multiparent
+ # Hand verified sha1 of the text we're adding.
+ sha1 = '6a1d115ec7b60afb664dc14890b5af5ce3c827a4'
+ # Create a mpdiff which adds a new line before the trailing line, and
+ # reuse the last line unaltered (which can cause annotation reuse).
+ # Test adding this in two situations:
+ # On top of a new insertion
+ vf = self.get_file('fulltext')
+ vf.add_lines('noeol', [], ['line'])
+ vf.add_lines('noeol2', ['noeol'], ['newline\n', 'line'],
+ left_matching_blocks=[(0, 1, 1)])
+ self.assertEqualDiff('newline\nline', vf.get_text('noeol2'))
+ # On top of a delta
+ vf = self.get_file('delta')
+ vf.add_lines('base', [], ['line'])
+ vf.add_lines('noeol', ['base'], ['prelude\n', 'line'])
+ vf.add_lines('noeol2', ['noeol'], ['newline\n', 'line'],
+ left_matching_blocks=[(1, 1, 1)])
+ self.assertEqualDiff('newline\nline', vf.get_text('noeol2'))
+
+ def test_make_mpdiffs(self):
+ from bzrlib import multiparent
+ vf = self.get_file('foo')
+ sha1s = self._setup_for_deltas(vf)
+ new_vf = self.get_file('bar')
+ for version in multiparent.topo_iter(vf):
+ mpdiff = vf.make_mpdiffs([version])[0]
+ new_vf.add_mpdiffs([(version, vf.get_parent_map([version])[version],
+ vf.get_sha1s([version])[version], mpdiff)])
+ self.assertEqualDiff(vf.get_text(version),
+ new_vf.get_text(version))
+
+ def test_make_mpdiffs_with_ghosts(self):
+ vf = self.get_file('foo')
+ try:
+ vf.add_lines_with_ghosts('text', ['ghost'], ['line\n'])
+ except NotImplementedError:
+ # old Weave formats do not allow ghosts
+ return
+ self.assertRaises(errors.RevisionNotPresent, vf.make_mpdiffs, ['ghost'])
+
+ def _setup_for_deltas(self, f):
+ self.assertFalse(f.has_version('base'))
+ # add texts that should trip the knit maximum delta chain threshold
+ # as well as doing parallel chains of data in knits.
+ # this is done by two chains of 25 insertions
+ f.add_lines('base', [], ['line\n'])
+ f.add_lines('noeol', ['base'], ['line'])
+ # detailed eol tests:
+ # shared last line with parent no-eol
+ f.add_lines('noeolsecond', ['noeol'], ['line\n', 'line'])
+ # differing last line with parent, both no-eol
+ f.add_lines('noeolnotshared', ['noeolsecond'], ['line\n', 'phone'])
+ # add eol following a noneol parent, change content
+ f.add_lines('eol', ['noeol'], ['phone\n'])
+ # add eol following a noneol parent, no change content
+ f.add_lines('eolline', ['noeol'], ['line\n'])
+ # noeol with no parents:
+ f.add_lines('noeolbase', [], ['line'])
+ # noeol preceeding its leftmost parent in the output:
+ # this is done by making it a merge of two parents with no common
+ # anestry: noeolbase and noeol with the
+ # later-inserted parent the leftmost.
+ f.add_lines('eolbeforefirstparent', ['noeolbase', 'noeol'], ['line'])
+ # two identical eol texts
+ f.add_lines('noeoldup', ['noeol'], ['line'])
+ next_parent = 'base'
+ text_name = 'chain1-'
+ text = ['line\n']
+ sha1s = {0 :'da6d3141cb4a5e6f464bf6e0518042ddc7bfd079',
+ 1 :'45e21ea146a81ea44a821737acdb4f9791c8abe7',
+ 2 :'e1f11570edf3e2a070052366c582837a4fe4e9fa',
+ 3 :'26b4b8626da827088c514b8f9bbe4ebf181edda1',
+ 4 :'e28a5510be25ba84d31121cff00956f9970ae6f6',
+ 5 :'d63ec0ce22e11dcf65a931b69255d3ac747a318d',
+ 6 :'2c2888d288cb5e1d98009d822fedfe6019c6a4ea',
+ 7 :'95c14da9cafbf828e3e74a6f016d87926ba234ab',
+ 8 :'779e9a0b28f9f832528d4b21e17e168c67697272',
+ 9 :'1f8ff4e5c6ff78ac106fcfe6b1e8cb8740ff9a8f',
+ 10:'131a2ae712cf51ed62f143e3fbac3d4206c25a05',
+ 11:'c5a9d6f520d2515e1ec401a8f8a67e6c3c89f199',
+ 12:'31a2286267f24d8bedaa43355f8ad7129509ea85',
+ 13:'dc2a7fe80e8ec5cae920973973a8ee28b2da5e0a',
+ 14:'2c4b1736566b8ca6051e668de68650686a3922f2',
+ 15:'5912e4ecd9b0c07be4d013e7e2bdcf9323276cde',
+ 16:'b0d2e18d3559a00580f6b49804c23fea500feab3',
+ 17:'8e1d43ad72f7562d7cb8f57ee584e20eb1a69fc7',
+ 18:'5cf64a3459ae28efa60239e44b20312d25b253f3',
+ 19:'1ebed371807ba5935958ad0884595126e8c4e823',
+ 20:'2aa62a8b06fb3b3b892a3292a068ade69d5ee0d3',
+ 21:'01edc447978004f6e4e962b417a4ae1955b6fe5d',
+ 22:'d8d8dc49c4bf0bab401e0298bb5ad827768618bb',
+ 23:'c21f62b1c482862983a8ffb2b0c64b3451876e3f',
+ 24:'c0593fe795e00dff6b3c0fe857a074364d5f04fc',
+ 25:'dd1a1cf2ba9cc225c3aff729953e6364bf1d1855',
+ }
+ for depth in range(26):
+ new_version = text_name + '%s' % depth
+ text = text + ['line\n']
+ f.add_lines(new_version, [next_parent], text)
+ next_parent = new_version
+ next_parent = 'base'
+ text_name = 'chain2-'
+ text = ['line\n']
+ for depth in range(26):
+ new_version = text_name + '%s' % depth
+ text = text + ['line\n']
+ f.add_lines(new_version, [next_parent], text)
+ next_parent = new_version
+ return sha1s
+
+ def test_ancestry(self):
+ f = self.get_file()
+ self.assertEqual([], f.get_ancestry([]))
+ f.add_lines('r0', [], ['a\n', 'b\n'])
+ f.add_lines('r1', ['r0'], ['b\n', 'c\n'])
+ f.add_lines('r2', ['r0'], ['b\n', 'c\n'])
+ f.add_lines('r3', ['r2'], ['b\n', 'c\n'])
+ f.add_lines('rM', ['r1', 'r2'], ['b\n', 'c\n'])
+ self.assertEqual([], f.get_ancestry([]))
+ versions = f.get_ancestry(['rM'])
+ # there are some possibilities:
+ # r0 r1 r2 rM r3
+ # r0 r1 r2 r3 rM
+ # etc
+ # so we check indexes
+ r0 = versions.index('r0')
+ r1 = versions.index('r1')
+ r2 = versions.index('r2')
+ self.assertFalse('r3' in versions)
+ rM = versions.index('rM')
+ self.assertTrue(r0 < r1)
+ self.assertTrue(r0 < r2)
+ self.assertTrue(r1 < rM)
+ self.assertTrue(r2 < rM)
+
+ self.assertRaises(RevisionNotPresent,
+ f.get_ancestry, ['rM', 'rX'])
+
+ self.assertEqual(set(f.get_ancestry('rM')),
+ set(f.get_ancestry('rM', topo_sorted=False)))
+
+ def test_mutate_after_finish(self):
+ self._transaction = 'before'
+ f = self.get_file()
+ self._transaction = 'after'
+ self.assertRaises(errors.OutSideTransaction, f.add_lines, '', [], [])
+ self.assertRaises(errors.OutSideTransaction, f.add_lines_with_ghosts, '', [], [])
+
+ def test_copy_to(self):
+ f = self.get_file()
+ f.add_lines('0', [], ['a\n'])
+ t = MemoryTransport()
+ f.copy_to('foo', t)
+ for suffix in self.get_factory().get_suffixes():
+ self.assertTrue(t.has('foo' + suffix))
+
+ def test_get_suffixes(self):
+ f = self.get_file()
+ # and should be a list
+ self.assertTrue(isinstance(self.get_factory().get_suffixes(), list))
+
+ def test_get_parent_map(self):
+ f = self.get_file()
+ f.add_lines('r0', [], ['a\n', 'b\n'])
+ self.assertEqual(
+ {'r0':()}, f.get_parent_map(['r0']))
+ f.add_lines('r1', ['r0'], ['a\n', 'b\n'])
+ self.assertEqual(
+ {'r1':('r0',)}, f.get_parent_map(['r1']))
+ self.assertEqual(
+ {'r0':(),
+ 'r1':('r0',)},
+ f.get_parent_map(['r0', 'r1']))
+ f.add_lines('r2', [], ['a\n', 'b\n'])
+ f.add_lines('r3', [], ['a\n', 'b\n'])
+ f.add_lines('m', ['r0', 'r1', 'r2', 'r3'], ['a\n', 'b\n'])
+ self.assertEqual(
+ {'m':('r0', 'r1', 'r2', 'r3')}, f.get_parent_map(['m']))
+ self.assertEqual({}, f.get_parent_map('y'))
+ self.assertEqual(
+ {'r0':(),
+ 'r1':('r0',)},
+ f.get_parent_map(['r0', 'y', 'r1']))
+
+ def test_annotate(self):
+ f = self.get_file()
+ f.add_lines('r0', [], ['a\n', 'b\n'])
+ f.add_lines('r1', ['r0'], ['c\n', 'b\n'])
+ origins = f.annotate('r1')
+ self.assertEquals(origins[0][0], 'r1')
+ self.assertEquals(origins[1][0], 'r0')
+
+ self.assertRaises(RevisionNotPresent,
+ f.annotate, 'foo')
+
+ def test_detection(self):
+ # Test weaves detect corruption.
+ #
+ # Weaves contain a checksum of their texts.
+ # When a text is extracted, this checksum should be
+ # verified.
+
+ w = self.get_file_corrupted_text()
+
+ self.assertEqual('hello\n', w.get_text('v1'))
+ self.assertRaises(errors.WeaveInvalidChecksum, w.get_text, 'v2')
+ self.assertRaises(errors.WeaveInvalidChecksum, w.get_lines, 'v2')
+ self.assertRaises(errors.WeaveInvalidChecksum, w.check)
+
+ w = self.get_file_corrupted_checksum()
+
+ self.assertEqual('hello\n', w.get_text('v1'))
+ self.assertRaises(errors.WeaveInvalidChecksum, w.get_text, 'v2')
+ self.assertRaises(errors.WeaveInvalidChecksum, w.get_lines, 'v2')
+ self.assertRaises(errors.WeaveInvalidChecksum, w.check)
+
+ def get_file_corrupted_text(self):
+ """Return a versioned file with corrupt text but valid metadata."""
+ raise NotImplementedError(self.get_file_corrupted_text)
+
+ def reopen_file(self, name='foo'):
+ """Open the versioned file from disk again."""
+ raise NotImplementedError(self.reopen_file)
+
+ def test_iter_lines_added_or_present_in_versions(self):
+ # test that we get at least an equalset of the lines added by
+ # versions in the weave
+ # the ordering here is to make a tree so that dumb searches have
+ # more changes to muck up.
+
+ class InstrumentedProgress(progress.ProgressTask):
+
+ def __init__(self):
+ progress.ProgressTask.__init__(self)
+ self.updates = []
+
+ def update(self, msg=None, current=None, total=None):
+ self.updates.append((msg, current, total))
+
+ vf = self.get_file()
+ # add a base to get included
+ vf.add_lines('base', [], ['base\n'])
+ # add a ancestor to be included on one side
+ vf.add_lines('lancestor', [], ['lancestor\n'])
+ # add a ancestor to be included on the other side
+ vf.add_lines('rancestor', ['base'], ['rancestor\n'])
+ # add a child of rancestor with no eofile-nl
+ vf.add_lines('child', ['rancestor'], ['base\n', 'child\n'])
+ # add a child of lancestor and base to join the two roots
+ vf.add_lines('otherchild',
+ ['lancestor', 'base'],
+ ['base\n', 'lancestor\n', 'otherchild\n'])
+ def iter_with_versions(versions, expected):
+ # now we need to see what lines are returned, and how often.
+ lines = {}
+ progress = InstrumentedProgress()
+ # iterate over the lines
+ for line in vf.iter_lines_added_or_present_in_versions(versions,
+ pb=progress):
+ lines.setdefault(line, 0)
+ lines[line] += 1
+ if []!= progress.updates:
+ self.assertEqual(expected, progress.updates)
+ return lines
+ lines = iter_with_versions(['child', 'otherchild'],
+ [('Walking content', 0, 2),
+ ('Walking content', 1, 2),
+ ('Walking content', 2, 2)])
+ # we must see child and otherchild
+ self.assertTrue(lines[('child\n', 'child')] > 0)
+ self.assertTrue(lines[('otherchild\n', 'otherchild')] > 0)
+ # we dont care if we got more than that.
+
+ # test all lines
+ lines = iter_with_versions(None, [('Walking content', 0, 5),
+ ('Walking content', 1, 5),
+ ('Walking content', 2, 5),
+ ('Walking content', 3, 5),
+ ('Walking content', 4, 5),
+ ('Walking content', 5, 5)])
+ # all lines must be seen at least once
+ self.assertTrue(lines[('base\n', 'base')] > 0)
+ self.assertTrue(lines[('lancestor\n', 'lancestor')] > 0)
+ self.assertTrue(lines[('rancestor\n', 'rancestor')] > 0)
+ self.assertTrue(lines[('child\n', 'child')] > 0)
+ self.assertTrue(lines[('otherchild\n', 'otherchild')] > 0)
+
+ def test_add_lines_with_ghosts(self):
+ # some versioned file formats allow lines to be added with parent
+ # information that is > than that in the format. Formats that do
+ # not support this need to raise NotImplementedError on the
+ # add_lines_with_ghosts api.
+ vf = self.get_file()
+ # add a revision with ghost parents
+ # The preferred form is utf8, but we should translate when needed
+ parent_id_unicode = u'b\xbfse'
+ parent_id_utf8 = parent_id_unicode.encode('utf8')
+ try:
+ vf.add_lines_with_ghosts('notbxbfse', [parent_id_utf8], [])
+ except NotImplementedError:
+ # check the other ghost apis are also not implemented
+ self.assertRaises(NotImplementedError, vf.get_ancestry_with_ghosts, ['foo'])
+ self.assertRaises(NotImplementedError, vf.get_parents_with_ghosts, 'foo')
+ return
+ vf = self.reopen_file()
+ # test key graph related apis: getncestry, _graph, get_parents
+ # has_version
+ # - these are ghost unaware and must not be reflect ghosts
+ self.assertEqual(['notbxbfse'], vf.get_ancestry('notbxbfse'))
+ self.assertFalse(vf.has_version(parent_id_utf8))
+ # we have _with_ghost apis to give us ghost information.
+ self.assertEqual([parent_id_utf8, 'notbxbfse'], vf.get_ancestry_with_ghosts(['notbxbfse']))
+ self.assertEqual([parent_id_utf8], vf.get_parents_with_ghosts('notbxbfse'))
+ # if we add something that is a ghost of another, it should correct the
+ # results of the prior apis
+ vf.add_lines(parent_id_utf8, [], [])
+ self.assertEqual([parent_id_utf8, 'notbxbfse'], vf.get_ancestry(['notbxbfse']))
+ self.assertEqual({'notbxbfse':(parent_id_utf8,)},
+ vf.get_parent_map(['notbxbfse']))
+ self.assertTrue(vf.has_version(parent_id_utf8))
+ # we have _with_ghost apis to give us ghost information.
+ self.assertEqual([parent_id_utf8, 'notbxbfse'],
+ vf.get_ancestry_with_ghosts(['notbxbfse']))
+ self.assertEqual([parent_id_utf8], vf.get_parents_with_ghosts('notbxbfse'))
+
+ def test_add_lines_with_ghosts_after_normal_revs(self):
+ # some versioned file formats allow lines to be added with parent
+ # information that is > than that in the format. Formats that do
+ # not support this need to raise NotImplementedError on the
+ # add_lines_with_ghosts api.
+ vf = self.get_file()
+ # probe for ghost support
+ try:
+ vf.add_lines_with_ghosts('base', [], ['line\n', 'line_b\n'])
+ except NotImplementedError:
+ return
+ vf.add_lines_with_ghosts('references_ghost',
+ ['base', 'a_ghost'],
+ ['line\n', 'line_b\n', 'line_c\n'])
+ origins = vf.annotate('references_ghost')
+ self.assertEquals(('base', 'line\n'), origins[0])
+ self.assertEquals(('base', 'line_b\n'), origins[1])
+ self.assertEquals(('references_ghost', 'line_c\n'), origins[2])
+
+ def test_readonly_mode(self):
+ t = self.get_transport()
+ factory = self.get_factory()
+ vf = factory('id', t, 0777, create=True, access_mode='w')
+ vf = factory('id', t, access_mode='r')
+ self.assertRaises(errors.ReadOnlyError, vf.add_lines, 'base', [], [])
+ self.assertRaises(errors.ReadOnlyError,
+ vf.add_lines_with_ghosts,
+ 'base',
+ [],
+ [])
+
+ def test_get_sha1s(self):
+ # check the sha1 data is available
+ vf = self.get_file()
+ # a simple file
+ vf.add_lines('a', [], ['a\n'])
+ # the same file, different metadata
+ vf.add_lines('b', ['a'], ['a\n'])
+ # a file differing only in last newline.
+ vf.add_lines('c', [], ['a'])
+ self.assertEqual({
+ 'a': '3f786850e387550fdab836ed7e6dc881de23001b',
+ 'c': '86f7e437faa5a7fce15d1ddcb9eaeaea377667b8',
+ 'b': '3f786850e387550fdab836ed7e6dc881de23001b',
+ },
+ vf.get_sha1s(['a', 'c', 'b']))
+
+
+class TestWeave(TestCaseWithMemoryTransport, VersionedFileTestMixIn):
+
+ def get_file(self, name='foo'):
+ return WeaveFile(name, self.get_transport(),
+ create=True,
+ get_scope=self.get_transaction)
+
+ def get_file_corrupted_text(self):
+ w = WeaveFile('foo', self.get_transport(),
+ create=True,
+ get_scope=self.get_transaction)
+ w.add_lines('v1', [], ['hello\n'])
+ w.add_lines('v2', ['v1'], ['hello\n', 'there\n'])
+
+ # We are going to invasively corrupt the text
+ # Make sure the internals of weave are the same
+ self.assertEqual([('{', 0)
+ , 'hello\n'
+ , ('}', None)
+ , ('{', 1)
+ , 'there\n'
+ , ('}', None)
+ ], w._weave)
+
+ self.assertEqual(['f572d396fae9206628714fb2ce00f72e94f2258f'
+ , '90f265c6e75f1c8f9ab76dcf85528352c5f215ef'
+ ], w._sha1s)
+ w.check()
+
+ # Corrupted
+ w._weave[4] = 'There\n'
+ return w
+
+ def get_file_corrupted_checksum(self):
+ w = self.get_file_corrupted_text()
+ # Corrected
+ w._weave[4] = 'there\n'
+ self.assertEqual('hello\nthere\n', w.get_text('v2'))
+
+ #Invalid checksum, first digit changed
+ w._sha1s[1] = 'f0f265c6e75f1c8f9ab76dcf85528352c5f215ef'
+ return w
+
+ def reopen_file(self, name='foo', create=False):
+ return WeaveFile(name, self.get_transport(),
+ create=create,
+ get_scope=self.get_transaction)
+
+ def test_no_implicit_create(self):
+ self.assertRaises(errors.NoSuchFile,
+ WeaveFile,
+ 'foo',
+ self.get_transport(),
+ get_scope=self.get_transaction)
+
+ def get_factory(self):
+ return WeaveFile
+
+
+class TestPlanMergeVersionedFile(TestCaseWithMemoryTransport):
+
+ def setUp(self):
+ TestCaseWithMemoryTransport.setUp(self)
+ mapper = PrefixMapper()
+ factory = make_file_factory(True, mapper)
+ self.vf1 = factory(self.get_transport('root-1'))
+ self.vf2 = factory(self.get_transport('root-2'))
+ self.plan_merge_vf = versionedfile._PlanMergeVersionedFile('root')
+ self.plan_merge_vf.fallback_versionedfiles.extend([self.vf1, self.vf2])
+
+ def test_add_lines(self):
+ self.plan_merge_vf.add_lines(('root', 'a:'), [], [])
+ self.assertRaises(ValueError, self.plan_merge_vf.add_lines,
+ ('root', 'a'), [], [])
+ self.assertRaises(ValueError, self.plan_merge_vf.add_lines,
+ ('root', 'a:'), None, [])
+ self.assertRaises(ValueError, self.plan_merge_vf.add_lines,
+ ('root', 'a:'), [], None)
+
+ def setup_abcde(self):
+ self.vf1.add_lines(('root', 'A'), [], ['a'])
+ self.vf1.add_lines(('root', 'B'), [('root', 'A')], ['b'])
+ self.vf2.add_lines(('root', 'C'), [], ['c'])
+ self.vf2.add_lines(('root', 'D'), [('root', 'C')], ['d'])
+ self.plan_merge_vf.add_lines(('root', 'E:'),
+ [('root', 'B'), ('root', 'D')], ['e'])
+
+ def test_get_parents(self):
+ self.setup_abcde()
+ self.assertEqual({('root', 'B'):(('root', 'A'),)},
+ self.plan_merge_vf.get_parent_map([('root', 'B')]))
+ self.assertEqual({('root', 'D'):(('root', 'C'),)},
+ self.plan_merge_vf.get_parent_map([('root', 'D')]))
+ self.assertEqual({('root', 'E:'):(('root', 'B'),('root', 'D'))},
+ self.plan_merge_vf.get_parent_map([('root', 'E:')]))
+ self.assertEqual({},
+ self.plan_merge_vf.get_parent_map([('root', 'F')]))
+ self.assertEqual({
+ ('root', 'B'):(('root', 'A'),),
+ ('root', 'D'):(('root', 'C'),),
+ ('root', 'E:'):(('root', 'B'),('root', 'D')),
+ },
+ self.plan_merge_vf.get_parent_map(
+ [('root', 'B'), ('root', 'D'), ('root', 'E:'), ('root', 'F')]))
+
+ def test_get_record_stream(self):
+ self.setup_abcde()
+ def get_record(suffix):
+ return self.plan_merge_vf.get_record_stream(
+ [('root', suffix)], 'unordered', True).next()
+ self.assertEqual('a', get_record('A').get_bytes_as('fulltext'))
+ self.assertEqual('c', get_record('C').get_bytes_as('fulltext'))
+ self.assertEqual('e', get_record('E:').get_bytes_as('fulltext'))
+ self.assertEqual('absent', get_record('F').storage_kind)
+
+
+class TestReadonlyHttpMixin(object):
+
+ def get_transaction(self):
+ return 1
+
+ def test_readonly_http_works(self):
+ # we should be able to read from http with a versioned file.
+ vf = self.get_file()
+ # try an empty file access
+ readonly_vf = self.get_factory()('foo',
+ transport.get_transport_from_url(self.get_readonly_url('.')))
+ self.assertEqual([], readonly_vf.versions())
+
+ def test_readonly_http_works_with_feeling(self):
+ # we should be able to read from http with a versioned file.
+ vf = self.get_file()
+ # now with feeling.
+ vf.add_lines('1', [], ['a\n'])
+ vf.add_lines('2', ['1'], ['b\n', 'a\n'])
+ readonly_vf = self.get_factory()('foo',
+ transport.get_transport_from_url(self.get_readonly_url('.')))
+ self.assertEqual(['1', '2'], vf.versions())
+ self.assertEqual(['1', '2'], readonly_vf.versions())
+ for version in readonly_vf.versions():
+ readonly_vf.get_lines(version)
+
+
+class TestWeaveHTTP(TestCaseWithWebserver, TestReadonlyHttpMixin):
+
+ def get_file(self):
+ return WeaveFile('foo', self.get_transport(),
+ create=True,
+ get_scope=self.get_transaction)
+
+ def get_factory(self):
+ return WeaveFile
+
+
+class MergeCasesMixin(object):
+
+ def doMerge(self, base, a, b, mp):
+ from cStringIO import StringIO
+ from textwrap import dedent
+
+ def addcrlf(x):
+ return x + '\n'
+
+ w = self.get_file()
+ w.add_lines('text0', [], map(addcrlf, base))
+ w.add_lines('text1', ['text0'], map(addcrlf, a))
+ w.add_lines('text2', ['text0'], map(addcrlf, b))
+
+ self.log_contents(w)
+
+ self.log('merge plan:')
+ p = list(w.plan_merge('text1', 'text2'))
+ for state, line in p:
+ if line:
+ self.log('%12s | %s' % (state, line[:-1]))
+
+ self.log('merge:')
+ mt = StringIO()
+ mt.writelines(w.weave_merge(p))
+ mt.seek(0)
+ self.log(mt.getvalue())
+
+ mp = map(addcrlf, mp)
+ self.assertEqual(mt.readlines(), mp)
+
+
+ def testOneInsert(self):
+ self.doMerge([],
+ ['aa'],
+ [],
+ ['aa'])
+
+ def testSeparateInserts(self):
+ self.doMerge(['aaa', 'bbb', 'ccc'],
+ ['aaa', 'xxx', 'bbb', 'ccc'],
+ ['aaa', 'bbb', 'yyy', 'ccc'],
+ ['aaa', 'xxx', 'bbb', 'yyy', 'ccc'])
+
+ def testSameInsert(self):
+ self.doMerge(['aaa', 'bbb', 'ccc'],
+ ['aaa', 'xxx', 'bbb', 'ccc'],
+ ['aaa', 'xxx', 'bbb', 'yyy', 'ccc'],
+ ['aaa', 'xxx', 'bbb', 'yyy', 'ccc'])
+ overlappedInsertExpected = ['aaa', 'xxx', 'yyy', 'bbb']
+ def testOverlappedInsert(self):
+ self.doMerge(['aaa', 'bbb'],
+ ['aaa', 'xxx', 'yyy', 'bbb'],
+ ['aaa', 'xxx', 'bbb'], self.overlappedInsertExpected)
+
+ # really it ought to reduce this to
+ # ['aaa', 'xxx', 'yyy', 'bbb']
+
+
+ def testClashReplace(self):
+ self.doMerge(['aaa'],
+ ['xxx'],
+ ['yyy', 'zzz'],
+ ['<<<<<<< ', 'xxx', '=======', 'yyy', 'zzz',
+ '>>>>>>> '])
+
+ def testNonClashInsert1(self):
+ self.doMerge(['aaa'],
+ ['xxx', 'aaa'],
+ ['yyy', 'zzz'],
+ ['<<<<<<< ', 'xxx', 'aaa', '=======', 'yyy', 'zzz',
+ '>>>>>>> '])
+
+ def testNonClashInsert2(self):
+ self.doMerge(['aaa'],
+ ['aaa'],
+ ['yyy', 'zzz'],
+ ['yyy', 'zzz'])
+
+
+ def testDeleteAndModify(self):
+ """Clashing delete and modification.
+
+ If one side modifies a region and the other deletes it then
+ there should be a conflict with one side blank.
+ """
+
+ #######################################
+ # skippd, not working yet
+ return
+
+ self.doMerge(['aaa', 'bbb', 'ccc'],
+ ['aaa', 'ddd', 'ccc'],
+ ['aaa', 'ccc'],
+ ['<<<<<<<< ', 'aaa', '=======', '>>>>>>> ', 'ccc'])
+
+ def _test_merge_from_strings(self, base, a, b, expected):
+ w = self.get_file()
+ w.add_lines('text0', [], base.splitlines(True))
+ w.add_lines('text1', ['text0'], a.splitlines(True))
+ w.add_lines('text2', ['text0'], b.splitlines(True))
+ self.log('merge plan:')
+ p = list(w.plan_merge('text1', 'text2'))
+ for state, line in p:
+ if line:
+ self.log('%12s | %s' % (state, line[:-1]))
+ self.log('merge result:')
+ result_text = ''.join(w.weave_merge(p))
+ self.log(result_text)
+ self.assertEqualDiff(result_text, expected)
+
+ def test_weave_merge_conflicts(self):
+ # does weave merge properly handle plans that end with unchanged?
+ result = ''.join(self.get_file().weave_merge([('new-a', 'hello\n')]))
+ self.assertEqual(result, 'hello\n')
+
+ def test_deletion_extended(self):
+ """One side deletes, the other deletes more.
+ """
+ base = """\
+ line 1
+ line 2
+ line 3
+ """
+ a = """\
+ line 1
+ line 2
+ """
+ b = """\
+ line 1
+ """
+ result = """\
+ line 1
+<<<<<<<\x20
+ line 2
+=======
+>>>>>>>\x20
+ """
+ self._test_merge_from_strings(base, a, b, result)
+
+ def test_deletion_overlap(self):
+ """Delete overlapping regions with no other conflict.
+
+ Arguably it'd be better to treat these as agreement, rather than
+ conflict, but for now conflict is safer.
+ """
+ base = """\
+ start context
+ int a() {}
+ int b() {}
+ int c() {}
+ end context
+ """
+ a = """\
+ start context
+ int a() {}
+ end context
+ """
+ b = """\
+ start context
+ int c() {}
+ end context
+ """
+ result = """\
+ start context
+<<<<<<<\x20
+ int a() {}
+=======
+ int c() {}
+>>>>>>>\x20
+ end context
+ """
+ self._test_merge_from_strings(base, a, b, result)
+
+ def test_agreement_deletion(self):
+ """Agree to delete some lines, without conflicts."""
+ base = """\
+ start context
+ base line 1
+ base line 2
+ end context
+ """
+ a = """\
+ start context
+ base line 1
+ end context
+ """
+ b = """\
+ start context
+ base line 1
+ end context
+ """
+ result = """\
+ start context
+ base line 1
+ end context
+ """
+ self._test_merge_from_strings(base, a, b, result)
+
+ def test_sync_on_deletion(self):
+ """Specific case of merge where we can synchronize incorrectly.
+
+ A previous version of the weave merge concluded that the two versions
+ agreed on deleting line 2, and this could be a synchronization point.
+ Line 1 was then considered in isolation, and thought to be deleted on
+ both sides.
+
+ It's better to consider the whole thing as a disagreement region.
+ """
+ base = """\
+ start context
+ base line 1
+ base line 2
+ end context
+ """
+ a = """\
+ start context
+ base line 1
+ a's replacement line 2
+ end context
+ """
+ b = """\
+ start context
+ b replaces
+ both lines
+ end context
+ """
+ result = """\
+ start context
+<<<<<<<\x20
+ base line 1
+ a's replacement line 2
+=======
+ b replaces
+ both lines
+>>>>>>>\x20
+ end context
+ """
+ self._test_merge_from_strings(base, a, b, result)
+
+
+class TestWeaveMerge(TestCaseWithMemoryTransport, MergeCasesMixin):
+
+ def get_file(self, name='foo'):
+ return WeaveFile(name, self.get_transport(),
+ create=True)
+
+ def log_contents(self, w):
+ self.log('weave is:')
+ tmpf = StringIO()
+ write_weave(w, tmpf)
+ self.log(tmpf.getvalue())
+
+ overlappedInsertExpected = ['aaa', '<<<<<<< ', 'xxx', 'yyy', '=======',
+ 'xxx', '>>>>>>> ', 'bbb']
+
+
+class TestContentFactoryAdaption(TestCaseWithMemoryTransport):
+
+ def test_select_adaptor(self):
+ """Test expected adapters exist."""
+ # One scenario for each lookup combination we expect to use.
+ # Each is source_kind, requested_kind, adapter class
+ scenarios = [
+ ('knit-delta-gz', 'fulltext', _mod_knit.DeltaPlainToFullText),
+ ('knit-ft-gz', 'fulltext', _mod_knit.FTPlainToFullText),
+ ('knit-annotated-delta-gz', 'knit-delta-gz',
+ _mod_knit.DeltaAnnotatedToUnannotated),
+ ('knit-annotated-delta-gz', 'fulltext',
+ _mod_knit.DeltaAnnotatedToFullText),
+ ('knit-annotated-ft-gz', 'knit-ft-gz',
+ _mod_knit.FTAnnotatedToUnannotated),
+ ('knit-annotated-ft-gz', 'fulltext',
+ _mod_knit.FTAnnotatedToFullText),
+ ]
+ for source, requested, klass in scenarios:
+ adapter_factory = versionedfile.adapter_registry.get(
+ (source, requested))
+ adapter = adapter_factory(None)
+ self.assertIsInstance(adapter, klass)
+
+ def get_knit(self, annotated=True):
+ mapper = ConstantMapper('knit')
+ transport = self.get_transport()
+ return make_file_factory(annotated, mapper)(transport)
+
+ def helpGetBytes(self, f, ft_adapter, delta_adapter):
+ """Grab the interested adapted texts for tests."""
+ # origin is a fulltext
+ entries = f.get_record_stream([('origin',)], 'unordered', False)
+ base = entries.next()
+ ft_data = ft_adapter.get_bytes(base)
+ # merged is both a delta and multiple parents.
+ entries = f.get_record_stream([('merged',)], 'unordered', False)
+ merged = entries.next()
+ delta_data = delta_adapter.get_bytes(merged)
+ return ft_data, delta_data
+
+ def test_deannotation_noeol(self):
+ """Test converting annotated knits to unannotated knits."""
+ # we need a full text, and a delta
+ f = self.get_knit()
+ get_diamond_files(f, 1, trailing_eol=False)
+ ft_data, delta_data = self.helpGetBytes(f,
+ _mod_knit.FTAnnotatedToUnannotated(None),
+ _mod_knit.DeltaAnnotatedToUnannotated(None))
+ self.assertEqual(
+ 'version origin 1 b284f94827db1fa2970d9e2014f080413b547a7e\n'
+ 'origin\n'
+ 'end origin\n',
+ GzipFile(mode='rb', fileobj=StringIO(ft_data)).read())
+ self.assertEqual(
+ 'version merged 4 32c2e79763b3f90e8ccde37f9710b6629c25a796\n'
+ '1,2,3\nleft\nright\nmerged\nend merged\n',
+ GzipFile(mode='rb', fileobj=StringIO(delta_data)).read())
+
+ def test_deannotation(self):
+ """Test converting annotated knits to unannotated knits."""
+ # we need a full text, and a delta
+ f = self.get_knit()
+ get_diamond_files(f, 1)
+ ft_data, delta_data = self.helpGetBytes(f,
+ _mod_knit.FTAnnotatedToUnannotated(None),
+ _mod_knit.DeltaAnnotatedToUnannotated(None))
+ self.assertEqual(
+ 'version origin 1 00e364d235126be43292ab09cb4686cf703ddc17\n'
+ 'origin\n'
+ 'end origin\n',
+ GzipFile(mode='rb', fileobj=StringIO(ft_data)).read())
+ self.assertEqual(
+ 'version merged 3 ed8bce375198ea62444dc71952b22cfc2b09226d\n'
+ '2,2,2\nright\nmerged\nend merged\n',
+ GzipFile(mode='rb', fileobj=StringIO(delta_data)).read())
+
+ def test_annotated_to_fulltext_no_eol(self):
+ """Test adapting annotated knits to full texts (for -> weaves)."""
+ # we need a full text, and a delta
+ f = self.get_knit()
+ get_diamond_files(f, 1, trailing_eol=False)
+ # Reconstructing a full text requires a backing versioned file, and it
+ # must have the base lines requested from it.
+ logged_vf = versionedfile.RecordingVersionedFilesDecorator(f)
+ ft_data, delta_data = self.helpGetBytes(f,
+ _mod_knit.FTAnnotatedToFullText(None),
+ _mod_knit.DeltaAnnotatedToFullText(logged_vf))
+ self.assertEqual('origin', ft_data)
+ self.assertEqual('base\nleft\nright\nmerged', delta_data)
+ self.assertEqual([('get_record_stream', [('left',)], 'unordered',
+ True)], logged_vf.calls)
+
+ def test_annotated_to_fulltext(self):
+ """Test adapting annotated knits to full texts (for -> weaves)."""
+ # we need a full text, and a delta
+ f = self.get_knit()
+ get_diamond_files(f, 1)
+ # Reconstructing a full text requires a backing versioned file, and it
+ # must have the base lines requested from it.
+ logged_vf = versionedfile.RecordingVersionedFilesDecorator(f)
+ ft_data, delta_data = self.helpGetBytes(f,
+ _mod_knit.FTAnnotatedToFullText(None),
+ _mod_knit.DeltaAnnotatedToFullText(logged_vf))
+ self.assertEqual('origin\n', ft_data)
+ self.assertEqual('base\nleft\nright\nmerged\n', delta_data)
+ self.assertEqual([('get_record_stream', [('left',)], 'unordered',
+ True)], logged_vf.calls)
+
+ def test_unannotated_to_fulltext(self):
+ """Test adapting unannotated knits to full texts.
+
+ This is used for -> weaves, and for -> annotated knits.
+ """
+ # we need a full text, and a delta
+ f = self.get_knit(annotated=False)
+ get_diamond_files(f, 1)
+ # Reconstructing a full text requires a backing versioned file, and it
+ # must have the base lines requested from it.
+ logged_vf = versionedfile.RecordingVersionedFilesDecorator(f)
+ ft_data, delta_data = self.helpGetBytes(f,
+ _mod_knit.FTPlainToFullText(None),
+ _mod_knit.DeltaPlainToFullText(logged_vf))
+ self.assertEqual('origin\n', ft_data)
+ self.assertEqual('base\nleft\nright\nmerged\n', delta_data)
+ self.assertEqual([('get_record_stream', [('left',)], 'unordered',
+ True)], logged_vf.calls)
+
+ def test_unannotated_to_fulltext_no_eol(self):
+ """Test adapting unannotated knits to full texts.
+
+ This is used for -> weaves, and for -> annotated knits.
+ """
+ # we need a full text, and a delta
+ f = self.get_knit(annotated=False)
+ get_diamond_files(f, 1, trailing_eol=False)
+ # Reconstructing a full text requires a backing versioned file, and it
+ # must have the base lines requested from it.
+ logged_vf = versionedfile.RecordingVersionedFilesDecorator(f)
+ ft_data, delta_data = self.helpGetBytes(f,
+ _mod_knit.FTPlainToFullText(None),
+ _mod_knit.DeltaPlainToFullText(logged_vf))
+ self.assertEqual('origin', ft_data)
+ self.assertEqual('base\nleft\nright\nmerged', delta_data)
+ self.assertEqual([('get_record_stream', [('left',)], 'unordered',
+ True)], logged_vf.calls)
+
+
+class TestKeyMapper(TestCaseWithMemoryTransport):
+ """Tests for various key mapping logic."""
+
+ def test_identity_mapper(self):
+ mapper = versionedfile.ConstantMapper("inventory")
+ self.assertEqual("inventory", mapper.map(('foo@ar',)))
+ self.assertEqual("inventory", mapper.map(('quux',)))
+
+ def test_prefix_mapper(self):
+ #format5: plain
+ mapper = versionedfile.PrefixMapper()
+ self.assertEqual("file-id", mapper.map(("file-id", "revision-id")))
+ self.assertEqual("new-id", mapper.map(("new-id", "revision-id")))
+ self.assertEqual(('file-id',), mapper.unmap("file-id"))
+ self.assertEqual(('new-id',), mapper.unmap("new-id"))
+
+ def test_hash_prefix_mapper(self):
+ #format6: hash + plain
+ mapper = versionedfile.HashPrefixMapper()
+ self.assertEqual("9b/file-id", mapper.map(("file-id", "revision-id")))
+ self.assertEqual("45/new-id", mapper.map(("new-id", "revision-id")))
+ self.assertEqual(('file-id',), mapper.unmap("9b/file-id"))
+ self.assertEqual(('new-id',), mapper.unmap("45/new-id"))
+
+ def test_hash_escaped_mapper(self):
+ #knit1: hash + escaped
+ mapper = versionedfile.HashEscapedPrefixMapper()
+ self.assertEqual("88/%2520", mapper.map((" ", "revision-id")))
+ self.assertEqual("ed/fil%2545-%2549d", mapper.map(("filE-Id",
+ "revision-id")))
+ self.assertEqual("88/ne%2557-%2549d", mapper.map(("neW-Id",
+ "revision-id")))
+ self.assertEqual(('filE-Id',), mapper.unmap("ed/fil%2545-%2549d"))
+ self.assertEqual(('neW-Id',), mapper.unmap("88/ne%2557-%2549d"))
+
+
+class TestVersionedFiles(TestCaseWithMemoryTransport):
+ """Tests for the multiple-file variant of VersionedFile."""
+
+ # We want to be sure of behaviour for:
+ # weaves prefix layout (weave texts)
+ # individually named weaves (weave inventories)
+ # annotated knits - prefix|hash|hash-escape layout, we test the third only
+ # as it is the most complex mapper.
+ # individually named knits
+ # individual no-graph knits in packs (signatures)
+ # individual graph knits in packs (inventories)
+ # individual graph nocompression knits in packs (revisions)
+ # plain text knits in packs (texts)
+ len_one_scenarios = [
+ ('weave-named', {
+ 'cleanup':None,
+ 'factory':make_versioned_files_factory(WeaveFile,
+ ConstantMapper('inventory')),
+ 'graph':True,
+ 'key_length':1,
+ 'support_partial_insertion': False,
+ }),
+ ('named-knit', {
+ 'cleanup':None,
+ 'factory':make_file_factory(False, ConstantMapper('revisions')),
+ 'graph':True,
+ 'key_length':1,
+ 'support_partial_insertion': False,
+ }),
+ ('named-nograph-nodelta-knit-pack', {
+ 'cleanup':cleanup_pack_knit,
+ 'factory':make_pack_factory(False, False, 1),
+ 'graph':False,
+ 'key_length':1,
+ 'support_partial_insertion': False,
+ }),
+ ('named-graph-knit-pack', {
+ 'cleanup':cleanup_pack_knit,
+ 'factory':make_pack_factory(True, True, 1),
+ 'graph':True,
+ 'key_length':1,
+ 'support_partial_insertion': True,
+ }),
+ ('named-graph-nodelta-knit-pack', {
+ 'cleanup':cleanup_pack_knit,
+ 'factory':make_pack_factory(True, False, 1),
+ 'graph':True,
+ 'key_length':1,
+ 'support_partial_insertion': False,
+ }),
+ ('groupcompress-nograph', {
+ 'cleanup':groupcompress.cleanup_pack_group,
+ 'factory':groupcompress.make_pack_factory(False, False, 1),
+ 'graph': False,
+ 'key_length':1,
+ 'support_partial_insertion':False,
+ }),
+ ]
+ len_two_scenarios = [
+ ('weave-prefix', {
+ 'cleanup':None,
+ 'factory':make_versioned_files_factory(WeaveFile,
+ PrefixMapper()),
+ 'graph':True,
+ 'key_length':2,
+ 'support_partial_insertion': False,
+ }),
+ ('annotated-knit-escape', {
+ 'cleanup':None,
+ 'factory':make_file_factory(True, HashEscapedPrefixMapper()),
+ 'graph':True,
+ 'key_length':2,
+ 'support_partial_insertion': False,
+ }),
+ ('plain-knit-pack', {
+ 'cleanup':cleanup_pack_knit,
+ 'factory':make_pack_factory(True, True, 2),
+ 'graph':True,
+ 'key_length':2,
+ 'support_partial_insertion': True,
+ }),
+ ('groupcompress', {
+ 'cleanup':groupcompress.cleanup_pack_group,
+ 'factory':groupcompress.make_pack_factory(True, False, 1),
+ 'graph': True,
+ 'key_length':1,
+ 'support_partial_insertion':False,
+ }),
+ ]
+
+ scenarios = len_one_scenarios + len_two_scenarios
+
+ def get_versionedfiles(self, relpath='files'):
+ transport = self.get_transport(relpath)
+ if relpath != '.':
+ transport.mkdir('.')
+ files = self.factory(transport)
+ if self.cleanup is not None:
+ self.addCleanup(self.cleanup, files)
+ return files
+
+ def get_simple_key(self, suffix):
+ """Return a key for the object under test."""
+ if self.key_length == 1:
+ return (suffix,)
+ else:
+ return ('FileA',) + (suffix,)
+
+ def test_add_fallback_implies_without_fallbacks(self):
+ f = self.get_versionedfiles('files')
+ if getattr(f, 'add_fallback_versioned_files', None) is None:
+ raise TestNotApplicable("%s doesn't support fallbacks"
+ % (f.__class__.__name__,))
+ g = self.get_versionedfiles('fallback')
+ key_a = self.get_simple_key('a')
+ g.add_lines(key_a, [], ['\n'])
+ f.add_fallback_versioned_files(g)
+ self.assertTrue(key_a in f.get_parent_map([key_a]))
+ self.assertFalse(key_a in f.without_fallbacks().get_parent_map([key_a]))
+
+ def test_add_lines(self):
+ f = self.get_versionedfiles()
+ key0 = self.get_simple_key('r0')
+ key1 = self.get_simple_key('r1')
+ key2 = self.get_simple_key('r2')
+ keyf = self.get_simple_key('foo')
+ f.add_lines(key0, [], ['a\n', 'b\n'])
+ if self.graph:
+ f.add_lines(key1, [key0], ['b\n', 'c\n'])
+ else:
+ f.add_lines(key1, [], ['b\n', 'c\n'])
+ keys = f.keys()
+ self.assertTrue(key0 in keys)
+ self.assertTrue(key1 in keys)
+ records = []
+ for record in f.get_record_stream([key0, key1], 'unordered', True):
+ records.append((record.key, record.get_bytes_as('fulltext')))
+ records.sort()
+ self.assertEqual([(key0, 'a\nb\n'), (key1, 'b\nc\n')], records)
+
+ def test__add_text(self):
+ f = self.get_versionedfiles()
+ key0 = self.get_simple_key('r0')
+ key1 = self.get_simple_key('r1')
+ key2 = self.get_simple_key('r2')
+ keyf = self.get_simple_key('foo')
+ f._add_text(key0, [], 'a\nb\n')
+ if self.graph:
+ f._add_text(key1, [key0], 'b\nc\n')
+ else:
+ f._add_text(key1, [], 'b\nc\n')
+ keys = f.keys()
+ self.assertTrue(key0 in keys)
+ self.assertTrue(key1 in keys)
+ records = []
+ for record in f.get_record_stream([key0, key1], 'unordered', True):
+ records.append((record.key, record.get_bytes_as('fulltext')))
+ records.sort()
+ self.assertEqual([(key0, 'a\nb\n'), (key1, 'b\nc\n')], records)
+
+ def test_annotate(self):
+ files = self.get_versionedfiles()
+ self.get_diamond_files(files)
+ if self.key_length == 1:
+ prefix = ()
+ else:
+ prefix = ('FileA',)
+ # introduced full text
+ origins = files.annotate(prefix + ('origin',))
+ self.assertEqual([
+ (prefix + ('origin',), 'origin\n')],
+ origins)
+ # a delta
+ origins = files.annotate(prefix + ('base',))
+ self.assertEqual([
+ (prefix + ('base',), 'base\n')],
+ origins)
+ # a merge
+ origins = files.annotate(prefix + ('merged',))
+ if self.graph:
+ self.assertEqual([
+ (prefix + ('base',), 'base\n'),
+ (prefix + ('left',), 'left\n'),
+ (prefix + ('right',), 'right\n'),
+ (prefix + ('merged',), 'merged\n')
+ ],
+ origins)
+ else:
+ # Without a graph everything is new.
+ self.assertEqual([
+ (prefix + ('merged',), 'base\n'),
+ (prefix + ('merged',), 'left\n'),
+ (prefix + ('merged',), 'right\n'),
+ (prefix + ('merged',), 'merged\n')
+ ],
+ origins)
+ self.assertRaises(RevisionNotPresent,
+ files.annotate, prefix + ('missing-key',))
+
+ def test_check_no_parameters(self):
+ files = self.get_versionedfiles()
+
+ def test_check_progressbar_parameter(self):
+ """A progress bar can be supplied because check can be a generator."""
+ pb = ui.ui_factory.nested_progress_bar()
+ self.addCleanup(pb.finished)
+ files = self.get_versionedfiles()
+ files.check(progress_bar=pb)
+
+ def test_check_with_keys_becomes_generator(self):
+ files = self.get_versionedfiles()
+ self.get_diamond_files(files)
+ keys = files.keys()
+ entries = files.check(keys=keys)
+ seen = set()
+ # Texts output should be fulltexts.
+ self.capture_stream(files, entries, seen.add,
+ files.get_parent_map(keys), require_fulltext=True)
+ # All texts should be output.
+ self.assertEqual(set(keys), seen)
+
+ def test_clear_cache(self):
+ files = self.get_versionedfiles()
+ files.clear_cache()
+
+ def test_construct(self):
+ """Each parameterised test can be constructed on a transport."""
+ files = self.get_versionedfiles()
+
+ def get_diamond_files(self, files, trailing_eol=True, left_only=False,
+ nokeys=False):
+ return get_diamond_files(files, self.key_length,
+ trailing_eol=trailing_eol, nograph=not self.graph,
+ left_only=left_only, nokeys=nokeys)
+
+ def _add_content_nostoresha(self, add_lines):
+ """When nostore_sha is supplied using old content raises."""
+ vf = self.get_versionedfiles()
+ empty_text = ('a', [])
+ sample_text_nl = ('b', ["foo\n", "bar\n"])
+ sample_text_no_nl = ('c', ["foo\n", "bar"])
+ shas = []
+ for version, lines in (empty_text, sample_text_nl, sample_text_no_nl):
+ if add_lines:
+ sha, _, _ = vf.add_lines(self.get_simple_key(version), [],
+ lines)
+ else:
+ sha, _, _ = vf._add_text(self.get_simple_key(version), [],
+ ''.join(lines))
+ shas.append(sha)
+ # we now have a copy of all the lines in the vf.
+ for sha, (version, lines) in zip(
+ shas, (empty_text, sample_text_nl, sample_text_no_nl)):
+ new_key = self.get_simple_key(version + "2")
+ self.assertRaises(errors.ExistingContent,
+ vf.add_lines, new_key, [], lines,
+ nostore_sha=sha)
+ self.assertRaises(errors.ExistingContent,
+ vf._add_text, new_key, [], ''.join(lines),
+ nostore_sha=sha)
+ # and no new version should have been added.
+ record = vf.get_record_stream([new_key], 'unordered', True).next()
+ self.assertEqual('absent', record.storage_kind)
+
+ def test_add_lines_nostoresha(self):
+ self._add_content_nostoresha(add_lines=True)
+
+ def test__add_text_nostoresha(self):
+ self._add_content_nostoresha(add_lines=False)
+
+ def test_add_lines_return(self):
+ files = self.get_versionedfiles()
+ # save code by using the stock data insertion helper.
+ adds = self.get_diamond_files(files)
+ results = []
+ # We can only validate the first 2 elements returned from add_lines.
+ for add in adds:
+ self.assertEqual(3, len(add))
+ results.append(add[:2])
+ if self.key_length == 1:
+ self.assertEqual([
+ ('00e364d235126be43292ab09cb4686cf703ddc17', 7),
+ ('51c64a6f4fc375daf0d24aafbabe4d91b6f4bb44', 5),
+ ('a8478686da38e370e32e42e8a0c220e33ee9132f', 10),
+ ('9ef09dfa9d86780bdec9219a22560c6ece8e0ef1', 11),
+ ('ed8bce375198ea62444dc71952b22cfc2b09226d', 23)],
+ results)
+ elif self.key_length == 2:
+ self.assertEqual([
+ ('00e364d235126be43292ab09cb4686cf703ddc17', 7),
+ ('00e364d235126be43292ab09cb4686cf703ddc17', 7),
+ ('51c64a6f4fc375daf0d24aafbabe4d91b6f4bb44', 5),
+ ('51c64a6f4fc375daf0d24aafbabe4d91b6f4bb44', 5),
+ ('a8478686da38e370e32e42e8a0c220e33ee9132f', 10),
+ ('a8478686da38e370e32e42e8a0c220e33ee9132f', 10),
+ ('9ef09dfa9d86780bdec9219a22560c6ece8e0ef1', 11),
+ ('9ef09dfa9d86780bdec9219a22560c6ece8e0ef1', 11),
+ ('ed8bce375198ea62444dc71952b22cfc2b09226d', 23),
+ ('ed8bce375198ea62444dc71952b22cfc2b09226d', 23)],
+ results)
+
+ def test_add_lines_no_key_generates_chk_key(self):
+ files = self.get_versionedfiles()
+ # save code by using the stock data insertion helper.
+ adds = self.get_diamond_files(files, nokeys=True)
+ results = []
+ # We can only validate the first 2 elements returned from add_lines.
+ for add in adds:
+ self.assertEqual(3, len(add))
+ results.append(add[:2])
+ if self.key_length == 1:
+ self.assertEqual([
+ ('00e364d235126be43292ab09cb4686cf703ddc17', 7),
+ ('51c64a6f4fc375daf0d24aafbabe4d91b6f4bb44', 5),
+ ('a8478686da38e370e32e42e8a0c220e33ee9132f', 10),
+ ('9ef09dfa9d86780bdec9219a22560c6ece8e0ef1', 11),
+ ('ed8bce375198ea62444dc71952b22cfc2b09226d', 23)],
+ results)
+ # Check the added items got CHK keys.
+ self.assertEqual(set([
+ ('sha1:00e364d235126be43292ab09cb4686cf703ddc17',),
+ ('sha1:51c64a6f4fc375daf0d24aafbabe4d91b6f4bb44',),
+ ('sha1:9ef09dfa9d86780bdec9219a22560c6ece8e0ef1',),
+ ('sha1:a8478686da38e370e32e42e8a0c220e33ee9132f',),
+ ('sha1:ed8bce375198ea62444dc71952b22cfc2b09226d',),
+ ]),
+ files.keys())
+ elif self.key_length == 2:
+ self.assertEqual([
+ ('00e364d235126be43292ab09cb4686cf703ddc17', 7),
+ ('00e364d235126be43292ab09cb4686cf703ddc17', 7),
+ ('51c64a6f4fc375daf0d24aafbabe4d91b6f4bb44', 5),
+ ('51c64a6f4fc375daf0d24aafbabe4d91b6f4bb44', 5),
+ ('a8478686da38e370e32e42e8a0c220e33ee9132f', 10),
+ ('a8478686da38e370e32e42e8a0c220e33ee9132f', 10),
+ ('9ef09dfa9d86780bdec9219a22560c6ece8e0ef1', 11),
+ ('9ef09dfa9d86780bdec9219a22560c6ece8e0ef1', 11),
+ ('ed8bce375198ea62444dc71952b22cfc2b09226d', 23),
+ ('ed8bce375198ea62444dc71952b22cfc2b09226d', 23)],
+ results)
+ # Check the added items got CHK keys.
+ self.assertEqual(set([
+ ('FileA', 'sha1:00e364d235126be43292ab09cb4686cf703ddc17'),
+ ('FileA', 'sha1:51c64a6f4fc375daf0d24aafbabe4d91b6f4bb44'),
+ ('FileA', 'sha1:9ef09dfa9d86780bdec9219a22560c6ece8e0ef1'),
+ ('FileA', 'sha1:a8478686da38e370e32e42e8a0c220e33ee9132f'),
+ ('FileA', 'sha1:ed8bce375198ea62444dc71952b22cfc2b09226d'),
+ ('FileB', 'sha1:00e364d235126be43292ab09cb4686cf703ddc17'),
+ ('FileB', 'sha1:51c64a6f4fc375daf0d24aafbabe4d91b6f4bb44'),
+ ('FileB', 'sha1:9ef09dfa9d86780bdec9219a22560c6ece8e0ef1'),
+ ('FileB', 'sha1:a8478686da38e370e32e42e8a0c220e33ee9132f'),
+ ('FileB', 'sha1:ed8bce375198ea62444dc71952b22cfc2b09226d'),
+ ]),
+ files.keys())
+
+ def test_empty_lines(self):
+ """Empty files can be stored."""
+ f = self.get_versionedfiles()
+ key_a = self.get_simple_key('a')
+ f.add_lines(key_a, [], [])
+ self.assertEqual('',
+ f.get_record_stream([key_a], 'unordered', True
+ ).next().get_bytes_as('fulltext'))
+ key_b = self.get_simple_key('b')
+ f.add_lines(key_b, self.get_parents([key_a]), [])
+ self.assertEqual('',
+ f.get_record_stream([key_b], 'unordered', True
+ ).next().get_bytes_as('fulltext'))
+
+ def test_newline_only(self):
+ f = self.get_versionedfiles()
+ key_a = self.get_simple_key('a')
+ f.add_lines(key_a, [], ['\n'])
+ self.assertEqual('\n',
+ f.get_record_stream([key_a], 'unordered', True
+ ).next().get_bytes_as('fulltext'))
+ key_b = self.get_simple_key('b')
+ f.add_lines(key_b, self.get_parents([key_a]), ['\n'])
+ self.assertEqual('\n',
+ f.get_record_stream([key_b], 'unordered', True
+ ).next().get_bytes_as('fulltext'))
+
+ def test_get_known_graph_ancestry(self):
+ f = self.get_versionedfiles()
+ if not self.graph:
+ raise TestNotApplicable('ancestry info only relevant with graph.')
+ key_a = self.get_simple_key('a')
+ key_b = self.get_simple_key('b')
+ key_c = self.get_simple_key('c')
+ # A
+ # |\
+ # | B
+ # |/
+ # C
+ f.add_lines(key_a, [], ['\n'])
+ f.add_lines(key_b, [key_a], ['\n'])
+ f.add_lines(key_c, [key_a, key_b], ['\n'])
+ kg = f.get_known_graph_ancestry([key_c])
+ self.assertIsInstance(kg, _mod_graph.KnownGraph)
+ self.assertEqual([key_a, key_b, key_c], list(kg.topo_sort()))
+
+ def test_known_graph_with_fallbacks(self):
+ f = self.get_versionedfiles('files')
+ if not self.graph:
+ raise TestNotApplicable('ancestry info only relevant with graph.')
+ if getattr(f, 'add_fallback_versioned_files', None) is None:
+ raise TestNotApplicable("%s doesn't support fallbacks"
+ % (f.__class__.__name__,))
+ key_a = self.get_simple_key('a')
+ key_b = self.get_simple_key('b')
+ key_c = self.get_simple_key('c')
+ # A only in fallback
+ # |\
+ # | B
+ # |/
+ # C
+ g = self.get_versionedfiles('fallback')
+ g.add_lines(key_a, [], ['\n'])
+ f.add_fallback_versioned_files(g)
+ f.add_lines(key_b, [key_a], ['\n'])
+ f.add_lines(key_c, [key_a, key_b], ['\n'])
+ kg = f.get_known_graph_ancestry([key_c])
+ self.assertEqual([key_a, key_b, key_c], list(kg.topo_sort()))
+
+ def test_get_record_stream_empty(self):
+ """An empty stream can be requested without error."""
+ f = self.get_versionedfiles()
+ entries = f.get_record_stream([], 'unordered', False)
+ self.assertEqual([], list(entries))
+
+ def assertValidStorageKind(self, storage_kind):
+ """Assert that storage_kind is a valid storage_kind."""
+ self.assertSubset([storage_kind],
+ ['mpdiff', 'knit-annotated-ft', 'knit-annotated-delta',
+ 'knit-ft', 'knit-delta', 'chunked', 'fulltext',
+ 'knit-annotated-ft-gz', 'knit-annotated-delta-gz', 'knit-ft-gz',
+ 'knit-delta-gz',
+ 'knit-delta-closure', 'knit-delta-closure-ref',
+ 'groupcompress-block', 'groupcompress-block-ref'])
+
+ def capture_stream(self, f, entries, on_seen, parents,
+ require_fulltext=False):
+ """Capture a stream for testing."""
+ for factory in entries:
+ on_seen(factory.key)
+ self.assertValidStorageKind(factory.storage_kind)
+ if factory.sha1 is not None:
+ self.assertEqual(f.get_sha1s([factory.key])[factory.key],
+ factory.sha1)
+ self.assertEqual(parents[factory.key], factory.parents)
+ self.assertIsInstance(factory.get_bytes_as(factory.storage_kind),
+ str)
+ if require_fulltext:
+ factory.get_bytes_as('fulltext')
+
+ def test_get_record_stream_interface(self):
+ """each item in a stream has to provide a regular interface."""
+ files = self.get_versionedfiles()
+ self.get_diamond_files(files)
+ keys, _ = self.get_keys_and_sort_order()
+ parent_map = files.get_parent_map(keys)
+ entries = files.get_record_stream(keys, 'unordered', False)
+ seen = set()
+ self.capture_stream(files, entries, seen.add, parent_map)
+ self.assertEqual(set(keys), seen)
+
+ def get_keys_and_sort_order(self):
+ """Get diamond test keys list, and their sort ordering."""
+ if self.key_length == 1:
+ keys = [('merged',), ('left',), ('right',), ('base',)]
+ sort_order = {('merged',):2, ('left',):1, ('right',):1, ('base',):0}
+ else:
+ keys = [
+ ('FileA', 'merged'), ('FileA', 'left'), ('FileA', 'right'),
+ ('FileA', 'base'),
+ ('FileB', 'merged'), ('FileB', 'left'), ('FileB', 'right'),
+ ('FileB', 'base'),
+ ]
+ sort_order = {
+ ('FileA', 'merged'):2, ('FileA', 'left'):1, ('FileA', 'right'):1,
+ ('FileA', 'base'):0,
+ ('FileB', 'merged'):2, ('FileB', 'left'):1, ('FileB', 'right'):1,
+ ('FileB', 'base'):0,
+ }
+ return keys, sort_order
+
+ def get_keys_and_groupcompress_sort_order(self):
+ """Get diamond test keys list, and their groupcompress sort ordering."""
+ if self.key_length == 1:
+ keys = [('merged',), ('left',), ('right',), ('base',)]
+ sort_order = {('merged',):0, ('left',):1, ('right',):1, ('base',):2}
+ else:
+ keys = [
+ ('FileA', 'merged'), ('FileA', 'left'), ('FileA', 'right'),
+ ('FileA', 'base'),
+ ('FileB', 'merged'), ('FileB', 'left'), ('FileB', 'right'),
+ ('FileB', 'base'),
+ ]
+ sort_order = {
+ ('FileA', 'merged'):0, ('FileA', 'left'):1, ('FileA', 'right'):1,
+ ('FileA', 'base'):2,
+ ('FileB', 'merged'):3, ('FileB', 'left'):4, ('FileB', 'right'):4,
+ ('FileB', 'base'):5,
+ }
+ return keys, sort_order
+
+ def test_get_record_stream_interface_ordered(self):
+ """each item in a stream has to provide a regular interface."""
+ files = self.get_versionedfiles()
+ self.get_diamond_files(files)
+ keys, sort_order = self.get_keys_and_sort_order()
+ parent_map = files.get_parent_map(keys)
+ entries = files.get_record_stream(keys, 'topological', False)
+ seen = []
+ self.capture_stream(files, entries, seen.append, parent_map)
+ self.assertStreamOrder(sort_order, seen, keys)
+
+ def test_get_record_stream_interface_ordered_with_delta_closure(self):
+ """each item must be accessible as a fulltext."""
+ files = self.get_versionedfiles()
+ self.get_diamond_files(files)
+ keys, sort_order = self.get_keys_and_sort_order()
+ parent_map = files.get_parent_map(keys)
+ entries = files.get_record_stream(keys, 'topological', True)
+ seen = []
+ for factory in entries:
+ seen.append(factory.key)
+ self.assertValidStorageKind(factory.storage_kind)
+ self.assertSubset([factory.sha1],
+ [None, files.get_sha1s([factory.key])[factory.key]])
+ self.assertEqual(parent_map[factory.key], factory.parents)
+ # self.assertEqual(files.get_text(factory.key),
+ ft_bytes = factory.get_bytes_as('fulltext')
+ self.assertIsInstance(ft_bytes, str)
+ chunked_bytes = factory.get_bytes_as('chunked')
+ self.assertEqualDiff(ft_bytes, ''.join(chunked_bytes))
+
+ self.assertStreamOrder(sort_order, seen, keys)
+
+ def test_get_record_stream_interface_groupcompress(self):
+ """each item in a stream has to provide a regular interface."""
+ files = self.get_versionedfiles()
+ self.get_diamond_files(files)
+ keys, sort_order = self.get_keys_and_groupcompress_sort_order()
+ parent_map = files.get_parent_map(keys)
+ entries = files.get_record_stream(keys, 'groupcompress', False)
+ seen = []
+ self.capture_stream(files, entries, seen.append, parent_map)
+ self.assertStreamOrder(sort_order, seen, keys)
+
+ def assertStreamOrder(self, sort_order, seen, keys):
+ self.assertEqual(len(set(seen)), len(keys))
+ if self.key_length == 1:
+ lows = {():0}
+ else:
+ lows = {('FileA',):0, ('FileB',):0}
+ if not self.graph:
+ self.assertEqual(set(keys), set(seen))
+ else:
+ for key in seen:
+ sort_pos = sort_order[key]
+ self.assertTrue(sort_pos >= lows[key[:-1]],
+ "Out of order in sorted stream: %r, %r" % (key, seen))
+ lows[key[:-1]] = sort_pos
+
+ def test_get_record_stream_unknown_storage_kind_raises(self):
+ """Asking for a storage kind that the stream cannot supply raises."""
+ files = self.get_versionedfiles()
+ self.get_diamond_files(files)
+ if self.key_length == 1:
+ keys = [('merged',), ('left',), ('right',), ('base',)]
+ else:
+ keys = [
+ ('FileA', 'merged'), ('FileA', 'left'), ('FileA', 'right'),
+ ('FileA', 'base'),
+ ('FileB', 'merged'), ('FileB', 'left'), ('FileB', 'right'),
+ ('FileB', 'base'),
+ ]
+ parent_map = files.get_parent_map(keys)
+ entries = files.get_record_stream(keys, 'unordered', False)
+ # We track the contents because we should be able to try, fail a
+ # particular kind and then ask for one that works and continue.
+ seen = set()
+ for factory in entries:
+ seen.add(factory.key)
+ self.assertValidStorageKind(factory.storage_kind)
+ if factory.sha1 is not None:
+ self.assertEqual(files.get_sha1s([factory.key])[factory.key],
+ factory.sha1)
+ self.assertEqual(parent_map[factory.key], factory.parents)
+ # currently no stream emits mpdiff
+ self.assertRaises(errors.UnavailableRepresentation,
+ factory.get_bytes_as, 'mpdiff')
+ self.assertIsInstance(factory.get_bytes_as(factory.storage_kind),
+ str)
+ self.assertEqual(set(keys), seen)
+
+ def test_get_record_stream_missing_records_are_absent(self):
+ files = self.get_versionedfiles()
+ self.get_diamond_files(files)
+ if self.key_length == 1:
+ keys = [('merged',), ('left',), ('right',), ('absent',), ('base',)]
+ else:
+ keys = [
+ ('FileA', 'merged'), ('FileA', 'left'), ('FileA', 'right'),
+ ('FileA', 'absent'), ('FileA', 'base'),
+ ('FileB', 'merged'), ('FileB', 'left'), ('FileB', 'right'),
+ ('FileB', 'absent'), ('FileB', 'base'),
+ ('absent', 'absent'),
+ ]
+ parent_map = files.get_parent_map(keys)
+ entries = files.get_record_stream(keys, 'unordered', False)
+ self.assertAbsentRecord(files, keys, parent_map, entries)
+ entries = files.get_record_stream(keys, 'topological', False)
+ self.assertAbsentRecord(files, keys, parent_map, entries)
+
+ def assertRecordHasContent(self, record, bytes):
+ """Assert that record has the bytes bytes."""
+ self.assertEqual(bytes, record.get_bytes_as('fulltext'))
+ self.assertEqual(bytes, ''.join(record.get_bytes_as('chunked')))
+
+ def test_get_record_stream_native_formats_are_wire_ready_one_ft(self):
+ files = self.get_versionedfiles()
+ key = self.get_simple_key('foo')
+ files.add_lines(key, (), ['my text\n', 'content'])
+ stream = files.get_record_stream([key], 'unordered', False)
+ record = stream.next()
+ if record.storage_kind in ('chunked', 'fulltext'):
+ # chunked and fulltext representations are for direct use not wire
+ # serialisation: check they are able to be used directly. To send
+ # such records over the wire translation will be needed.
+ self.assertRecordHasContent(record, "my text\ncontent")
+ else:
+ bytes = [record.get_bytes_as(record.storage_kind)]
+ network_stream = versionedfile.NetworkRecordStream(bytes).read()
+ source_record = record
+ records = []
+ for record in network_stream:
+ records.append(record)
+ self.assertEqual(source_record.storage_kind,
+ record.storage_kind)
+ self.assertEqual(source_record.parents, record.parents)
+ self.assertEqual(
+ source_record.get_bytes_as(source_record.storage_kind),
+ record.get_bytes_as(record.storage_kind))
+ self.assertEqual(1, len(records))
+
+ def assertStreamMetaEqual(self, records, expected, stream):
+ """Assert that streams expected and stream have the same records.
+
+ :param records: A list to collect the seen records.
+ :return: A generator of the records in stream.
+ """
+ # We make assertions during copying to catch things early for
+ # easier debugging.
+ for record, ref_record in izip(stream, expected):
+ records.append(record)
+ self.assertEqual(ref_record.key, record.key)
+ self.assertEqual(ref_record.storage_kind, record.storage_kind)
+ self.assertEqual(ref_record.parents, record.parents)
+ yield record
+
+ def stream_to_bytes_or_skip_counter(self, skipped_records, full_texts,
+ stream):
+ """Convert a stream to a bytes iterator.
+
+ :param skipped_records: A list with one element to increment when a
+ record is skipped.
+ :param full_texts: A dict from key->fulltext representation, for
+ checking chunked or fulltext stored records.
+ :param stream: A record_stream.
+ :return: An iterator over the bytes of each record.
+ """
+ for record in stream:
+ if record.storage_kind in ('chunked', 'fulltext'):
+ skipped_records[0] += 1
+ # check the content is correct for direct use.
+ self.assertRecordHasContent(record, full_texts[record.key])
+ else:
+ yield record.get_bytes_as(record.storage_kind)
+
+ def test_get_record_stream_native_formats_are_wire_ready_ft_delta(self):
+ files = self.get_versionedfiles()
+ target_files = self.get_versionedfiles('target')
+ key = self.get_simple_key('ft')
+ key_delta = self.get_simple_key('delta')
+ files.add_lines(key, (), ['my text\n', 'content'])
+ if self.graph:
+ delta_parents = (key,)
+ else:
+ delta_parents = ()
+ files.add_lines(key_delta, delta_parents, ['different\n', 'content\n'])
+ local = files.get_record_stream([key, key_delta], 'unordered', False)
+ ref = files.get_record_stream([key, key_delta], 'unordered', False)
+ skipped_records = [0]
+ full_texts = {
+ key: "my text\ncontent",
+ key_delta: "different\ncontent\n",
+ }
+ byte_stream = self.stream_to_bytes_or_skip_counter(
+ skipped_records, full_texts, local)
+ network_stream = versionedfile.NetworkRecordStream(byte_stream).read()
+ records = []
+ # insert the stream from the network into a versioned files object so we can
+ # check the content was carried across correctly without doing delta
+ # inspection.
+ target_files.insert_record_stream(
+ self.assertStreamMetaEqual(records, ref, network_stream))
+ # No duplicates on the wire thank you!
+ self.assertEqual(2, len(records) + skipped_records[0])
+ if len(records):
+ # if any content was copied it all must have all been.
+ self.assertIdenticalVersionedFile(files, target_files)
+
+ def test_get_record_stream_native_formats_are_wire_ready_delta(self):
+ # copy a delta over the wire
+ files = self.get_versionedfiles()
+ target_files = self.get_versionedfiles('target')
+ key = self.get_simple_key('ft')
+ key_delta = self.get_simple_key('delta')
+ files.add_lines(key, (), ['my text\n', 'content'])
+ if self.graph:
+ delta_parents = (key,)
+ else:
+ delta_parents = ()
+ files.add_lines(key_delta, delta_parents, ['different\n', 'content\n'])
+ # Copy the basis text across so we can reconstruct the delta during
+ # insertion into target.
+ target_files.insert_record_stream(files.get_record_stream([key],
+ 'unordered', False))
+ local = files.get_record_stream([key_delta], 'unordered', False)
+ ref = files.get_record_stream([key_delta], 'unordered', False)
+ skipped_records = [0]
+ full_texts = {
+ key_delta: "different\ncontent\n",
+ }
+ byte_stream = self.stream_to_bytes_or_skip_counter(
+ skipped_records, full_texts, local)
+ network_stream = versionedfile.NetworkRecordStream(byte_stream).read()
+ records = []
+ # insert the stream from the network into a versioned files object so we can
+ # check the content was carried across correctly without doing delta
+ # inspection during check_stream.
+ target_files.insert_record_stream(
+ self.assertStreamMetaEqual(records, ref, network_stream))
+ # No duplicates on the wire thank you!
+ self.assertEqual(1, len(records) + skipped_records[0])
+ if len(records):
+ # if any content was copied it all must have all been
+ self.assertIdenticalVersionedFile(files, target_files)
+
+ def test_get_record_stream_wire_ready_delta_closure_included(self):
+ # copy a delta over the wire with the ability to get its full text.
+ files = self.get_versionedfiles()
+ key = self.get_simple_key('ft')
+ key_delta = self.get_simple_key('delta')
+ files.add_lines(key, (), ['my text\n', 'content'])
+ if self.graph:
+ delta_parents = (key,)
+ else:
+ delta_parents = ()
+ files.add_lines(key_delta, delta_parents, ['different\n', 'content\n'])
+ local = files.get_record_stream([key_delta], 'unordered', True)
+ ref = files.get_record_stream([key_delta], 'unordered', True)
+ skipped_records = [0]
+ full_texts = {
+ key_delta: "different\ncontent\n",
+ }
+ byte_stream = self.stream_to_bytes_or_skip_counter(
+ skipped_records, full_texts, local)
+ network_stream = versionedfile.NetworkRecordStream(byte_stream).read()
+ records = []
+ # insert the stream from the network into a versioned files object so we can
+ # check the content was carried across correctly without doing delta
+ # inspection during check_stream.
+ for record in self.assertStreamMetaEqual(records, ref, network_stream):
+ # we have to be able to get the full text out:
+ self.assertRecordHasContent(record, full_texts[record.key])
+ # No duplicates on the wire thank you!
+ self.assertEqual(1, len(records) + skipped_records[0])
+
+ def assertAbsentRecord(self, files, keys, parents, entries):
+ """Helper for test_get_record_stream_missing_records_are_absent."""
+ seen = set()
+ for factory in entries:
+ seen.add(factory.key)
+ if factory.key[-1] == 'absent':
+ self.assertEqual('absent', factory.storage_kind)
+ self.assertEqual(None, factory.sha1)
+ self.assertEqual(None, factory.parents)
+ else:
+ self.assertValidStorageKind(factory.storage_kind)
+ if factory.sha1 is not None:
+ sha1 = files.get_sha1s([factory.key])[factory.key]
+ self.assertEqual(sha1, factory.sha1)
+ self.assertEqual(parents[factory.key], factory.parents)
+ self.assertIsInstance(factory.get_bytes_as(factory.storage_kind),
+ str)
+ self.assertEqual(set(keys), seen)
+
+ def test_filter_absent_records(self):
+ """Requested missing records can be filter trivially."""
+ files = self.get_versionedfiles()
+ self.get_diamond_files(files)
+ keys, _ = self.get_keys_and_sort_order()
+ parent_map = files.get_parent_map(keys)
+ # Add an absent record in the middle of the present keys. (We don't ask
+ # for just absent keys to ensure that content before and after the
+ # absent keys is still delivered).
+ present_keys = list(keys)
+ if self.key_length == 1:
+ keys.insert(2, ('extra',))
+ else:
+ keys.insert(2, ('extra', 'extra'))
+ entries = files.get_record_stream(keys, 'unordered', False)
+ seen = set()
+ self.capture_stream(files, versionedfile.filter_absent(entries), seen.add,
+ parent_map)
+ self.assertEqual(set(present_keys), seen)
+
+ def get_mapper(self):
+ """Get a mapper suitable for the key length of the test interface."""
+ if self.key_length == 1:
+ return ConstantMapper('source')
+ else:
+ return HashEscapedPrefixMapper()
+
+ def get_parents(self, parents):
+ """Get parents, taking self.graph into consideration."""
+ if self.graph:
+ return parents
+ else:
+ return None
+
+ def test_get_annotator(self):
+ files = self.get_versionedfiles()
+ self.get_diamond_files(files)
+ origin_key = self.get_simple_key('origin')
+ base_key = self.get_simple_key('base')
+ left_key = self.get_simple_key('left')
+ right_key = self.get_simple_key('right')
+ merged_key = self.get_simple_key('merged')
+ # annotator = files.get_annotator()
+ # introduced full text
+ origins, lines = files.get_annotator().annotate(origin_key)
+ self.assertEqual([(origin_key,)], origins)
+ self.assertEqual(['origin\n'], lines)
+ # a delta
+ origins, lines = files.get_annotator().annotate(base_key)
+ self.assertEqual([(base_key,)], origins)
+ # a merge
+ origins, lines = files.get_annotator().annotate(merged_key)
+ if self.graph:
+ self.assertEqual([
+ (base_key,),
+ (left_key,),
+ (right_key,),
+ (merged_key,),
+ ], origins)
+ else:
+ # Without a graph everything is new.
+ self.assertEqual([
+ (merged_key,),
+ (merged_key,),
+ (merged_key,),
+ (merged_key,),
+ ], origins)
+ self.assertRaises(RevisionNotPresent,
+ files.get_annotator().annotate, self.get_simple_key('missing-key'))
+
+ def test_get_parent_map(self):
+ files = self.get_versionedfiles()
+ if self.key_length == 1:
+ parent_details = [
+ (('r0',), self.get_parents(())),
+ (('r1',), self.get_parents((('r0',),))),
+ (('r2',), self.get_parents(())),
+ (('r3',), self.get_parents(())),
+ (('m',), self.get_parents((('r0',),('r1',),('r2',),('r3',)))),
+ ]
+ else:
+ parent_details = [
+ (('FileA', 'r0'), self.get_parents(())),
+ (('FileA', 'r1'), self.get_parents((('FileA', 'r0'),))),
+ (('FileA', 'r2'), self.get_parents(())),
+ (('FileA', 'r3'), self.get_parents(())),
+ (('FileA', 'm'), self.get_parents((('FileA', 'r0'),
+ ('FileA', 'r1'), ('FileA', 'r2'), ('FileA', 'r3')))),
+ ]
+ for key, parents in parent_details:
+ files.add_lines(key, parents, [])
+ # immediately after adding it should be queryable.
+ self.assertEqual({key:parents}, files.get_parent_map([key]))
+ # We can ask for an empty set
+ self.assertEqual({}, files.get_parent_map([]))
+ # We can ask for many keys
+ all_parents = dict(parent_details)
+ self.assertEqual(all_parents, files.get_parent_map(all_parents.keys()))
+ # Absent keys are just not included in the result.
+ keys = all_parents.keys()
+ if self.key_length == 1:
+ keys.insert(1, ('missing',))
+ else:
+ keys.insert(1, ('missing', 'missing'))
+ # Absent keys are just ignored
+ self.assertEqual(all_parents, files.get_parent_map(keys))
+
+ def test_get_sha1s(self):
+ files = self.get_versionedfiles()
+ self.get_diamond_files(files)
+ if self.key_length == 1:
+ keys = [('base',), ('origin',), ('left',), ('merged',), ('right',)]
+ else:
+ # ask for shas from different prefixes.
+ keys = [
+ ('FileA', 'base'), ('FileB', 'origin'), ('FileA', 'left'),
+ ('FileA', 'merged'), ('FileB', 'right'),
+ ]
+ self.assertEqual({
+ keys[0]: '51c64a6f4fc375daf0d24aafbabe4d91b6f4bb44',
+ keys[1]: '00e364d235126be43292ab09cb4686cf703ddc17',
+ keys[2]: 'a8478686da38e370e32e42e8a0c220e33ee9132f',
+ keys[3]: 'ed8bce375198ea62444dc71952b22cfc2b09226d',
+ keys[4]: '9ef09dfa9d86780bdec9219a22560c6ece8e0ef1',
+ },
+ files.get_sha1s(keys))
+
+ def test_insert_record_stream_empty(self):
+ """Inserting an empty record stream should work."""
+ files = self.get_versionedfiles()
+ files.insert_record_stream([])
+
+ def assertIdenticalVersionedFile(self, expected, actual):
+ """Assert that left and right have the same contents."""
+ self.assertEqual(set(actual.keys()), set(expected.keys()))
+ actual_parents = actual.get_parent_map(actual.keys())
+ if self.graph:
+ self.assertEqual(actual_parents, expected.get_parent_map(expected.keys()))
+ else:
+ for key, parents in actual_parents.items():
+ self.assertEqual(None, parents)
+ for key in actual.keys():
+ actual_text = actual.get_record_stream(
+ [key], 'unordered', True).next().get_bytes_as('fulltext')
+ expected_text = expected.get_record_stream(
+ [key], 'unordered', True).next().get_bytes_as('fulltext')
+ self.assertEqual(actual_text, expected_text)
+
+ def test_insert_record_stream_fulltexts(self):
+ """Any file should accept a stream of fulltexts."""
+ files = self.get_versionedfiles()
+ mapper = self.get_mapper()
+ source_transport = self.get_transport('source')
+ source_transport.mkdir('.')
+ # weaves always output fulltexts.
+ source = make_versioned_files_factory(WeaveFile, mapper)(
+ source_transport)
+ self.get_diamond_files(source, trailing_eol=False)
+ stream = source.get_record_stream(source.keys(), 'topological',
+ False)
+ files.insert_record_stream(stream)
+ self.assertIdenticalVersionedFile(source, files)
+
+ def test_insert_record_stream_fulltexts_noeol(self):
+ """Any file should accept a stream of fulltexts."""
+ files = self.get_versionedfiles()
+ mapper = self.get_mapper()
+ source_transport = self.get_transport('source')
+ source_transport.mkdir('.')
+ # weaves always output fulltexts.
+ source = make_versioned_files_factory(WeaveFile, mapper)(
+ source_transport)
+ self.get_diamond_files(source, trailing_eol=False)
+ stream = source.get_record_stream(source.keys(), 'topological',
+ False)
+ files.insert_record_stream(stream)
+ self.assertIdenticalVersionedFile(source, files)
+
+ def test_insert_record_stream_annotated_knits(self):
+ """Any file should accept a stream from plain knits."""
+ files = self.get_versionedfiles()
+ mapper = self.get_mapper()
+ source_transport = self.get_transport('source')
+ source_transport.mkdir('.')
+ source = make_file_factory(True, mapper)(source_transport)
+ self.get_diamond_files(source)
+ stream = source.get_record_stream(source.keys(), 'topological',
+ False)
+ files.insert_record_stream(stream)
+ self.assertIdenticalVersionedFile(source, files)
+
+ def test_insert_record_stream_annotated_knits_noeol(self):
+ """Any file should accept a stream from plain knits."""
+ files = self.get_versionedfiles()
+ mapper = self.get_mapper()
+ source_transport = self.get_transport('source')
+ source_transport.mkdir('.')
+ source = make_file_factory(True, mapper)(source_transport)
+ self.get_diamond_files(source, trailing_eol=False)
+ stream = source.get_record_stream(source.keys(), 'topological',
+ False)
+ files.insert_record_stream(stream)
+ self.assertIdenticalVersionedFile(source, files)
+
+ def test_insert_record_stream_plain_knits(self):
+ """Any file should accept a stream from plain knits."""
+ files = self.get_versionedfiles()
+ mapper = self.get_mapper()
+ source_transport = self.get_transport('source')
+ source_transport.mkdir('.')
+ source = make_file_factory(False, mapper)(source_transport)
+ self.get_diamond_files(source)
+ stream = source.get_record_stream(source.keys(), 'topological',
+ False)
+ files.insert_record_stream(stream)
+ self.assertIdenticalVersionedFile(source, files)
+
+ def test_insert_record_stream_plain_knits_noeol(self):
+ """Any file should accept a stream from plain knits."""
+ files = self.get_versionedfiles()
+ mapper = self.get_mapper()
+ source_transport = self.get_transport('source')
+ source_transport.mkdir('.')
+ source = make_file_factory(False, mapper)(source_transport)
+ self.get_diamond_files(source, trailing_eol=False)
+ stream = source.get_record_stream(source.keys(), 'topological',
+ False)
+ files.insert_record_stream(stream)
+ self.assertIdenticalVersionedFile(source, files)
+
+ def test_insert_record_stream_existing_keys(self):
+ """Inserting keys already in a file should not error."""
+ files = self.get_versionedfiles()
+ source = self.get_versionedfiles('source')
+ self.get_diamond_files(source)
+ # insert some keys into f.
+ self.get_diamond_files(files, left_only=True)
+ stream = source.get_record_stream(source.keys(), 'topological',
+ False)
+ files.insert_record_stream(stream)
+ self.assertIdenticalVersionedFile(source, files)
+
+ def test_insert_record_stream_missing_keys(self):
+ """Inserting a stream with absent keys should raise an error."""
+ files = self.get_versionedfiles()
+ source = self.get_versionedfiles('source')
+ stream = source.get_record_stream([('missing',) * self.key_length],
+ 'topological', False)
+ self.assertRaises(errors.RevisionNotPresent, files.insert_record_stream,
+ stream)
+
+ def test_insert_record_stream_out_of_order(self):
+ """An out of order stream can either error or work."""
+ files = self.get_versionedfiles()
+ source = self.get_versionedfiles('source')
+ self.get_diamond_files(source)
+ if self.key_length == 1:
+ origin_keys = [('origin',)]
+ end_keys = [('merged',), ('left',)]
+ start_keys = [('right',), ('base',)]
+ else:
+ origin_keys = [('FileA', 'origin'), ('FileB', 'origin')]
+ end_keys = [('FileA', 'merged',), ('FileA', 'left',),
+ ('FileB', 'merged',), ('FileB', 'left',)]
+ start_keys = [('FileA', 'right',), ('FileA', 'base',),
+ ('FileB', 'right',), ('FileB', 'base',)]
+ origin_entries = source.get_record_stream(origin_keys, 'unordered', False)
+ end_entries = source.get_record_stream(end_keys, 'topological', False)
+ start_entries = source.get_record_stream(start_keys, 'topological', False)
+ entries = chain(origin_entries, end_entries, start_entries)
+ try:
+ files.insert_record_stream(entries)
+ except RevisionNotPresent:
+ # Must not have corrupted the file.
+ files.check()
+ else:
+ self.assertIdenticalVersionedFile(source, files)
+
+ def test_insert_record_stream_long_parent_chain_out_of_order(self):
+ """An out of order stream can either error or work."""
+ if not self.graph:
+ raise TestNotApplicable('ancestry info only relevant with graph.')
+ # Create a reasonably long chain of records based on each other, where
+ # most will be deltas.
+ source = self.get_versionedfiles('source')
+ parents = ()
+ keys = []
+ content = [('same same %d\n' % n) for n in range(500)]
+ for letter in 'abcdefghijklmnopqrstuvwxyz':
+ key = ('key-' + letter,)
+ if self.key_length == 2:
+ key = ('prefix',) + key
+ content.append('content for ' + letter + '\n')
+ source.add_lines(key, parents, content)
+ keys.append(key)
+ parents = (key,)
+ # Create a stream of these records, excluding the first record that the
+ # rest ultimately depend upon, and insert it into a new vf.
+ streams = []
+ for key in reversed(keys):
+ streams.append(source.get_record_stream([key], 'unordered', False))
+ deltas = chain(*streams[:-1])
+ files = self.get_versionedfiles()
+ try:
+ files.insert_record_stream(deltas)
+ except RevisionNotPresent:
+ # Must not have corrupted the file.
+ files.check()
+ else:
+ # Must only report either just the first key as a missing parent,
+ # no key as missing (for nodelta scenarios).
+ missing = set(files.get_missing_compression_parent_keys())
+ missing.discard(keys[0])
+ self.assertEqual(set(), missing)
+
+ def get_knit_delta_source(self):
+ """Get a source that can produce a stream with knit delta records,
+ regardless of this test's scenario.
+ """
+ mapper = self.get_mapper()
+ source_transport = self.get_transport('source')
+ source_transport.mkdir('.')
+ source = make_file_factory(False, mapper)(source_transport)
+ get_diamond_files(source, self.key_length, trailing_eol=True,
+ nograph=False, left_only=False)
+ return source
+
+ def test_insert_record_stream_delta_missing_basis_no_corruption(self):
+ """Insertion where a needed basis is not included notifies the caller
+ of the missing basis. In the meantime a record missing its basis is
+ not added.
+ """
+ source = self.get_knit_delta_source()
+ keys = [self.get_simple_key('origin'), self.get_simple_key('merged')]
+ entries = source.get_record_stream(keys, 'unordered', False)
+ files = self.get_versionedfiles()
+ if self.support_partial_insertion:
+ self.assertEqual([],
+ list(files.get_missing_compression_parent_keys()))
+ files.insert_record_stream(entries)
+ missing_bases = files.get_missing_compression_parent_keys()
+ self.assertEqual(set([self.get_simple_key('left')]),
+ set(missing_bases))
+ self.assertEqual(set(keys), set(files.get_parent_map(keys)))
+ else:
+ self.assertRaises(
+ errors.RevisionNotPresent, files.insert_record_stream, entries)
+ files.check()
+
+ def test_insert_record_stream_delta_missing_basis_can_be_added_later(self):
+ """Insertion where a needed basis is not included notifies the caller
+ of the missing basis. That basis can be added in a second
+ insert_record_stream call that does not need to repeat records present
+ in the previous stream. The record(s) that required that basis are
+ fully inserted once their basis is no longer missing.
+ """
+ if not self.support_partial_insertion:
+ raise TestNotApplicable(
+ 'versioned file scenario does not support partial insertion')
+ source = self.get_knit_delta_source()
+ entries = source.get_record_stream([self.get_simple_key('origin'),
+ self.get_simple_key('merged')], 'unordered', False)
+ files = self.get_versionedfiles()
+ files.insert_record_stream(entries)
+ missing_bases = files.get_missing_compression_parent_keys()
+ self.assertEqual(set([self.get_simple_key('left')]),
+ set(missing_bases))
+ # 'merged' is inserted (although a commit of a write group involving
+ # this versionedfiles would fail).
+ merged_key = self.get_simple_key('merged')
+ self.assertEqual(
+ [merged_key], files.get_parent_map([merged_key]).keys())
+ # Add the full delta closure of the missing records
+ missing_entries = source.get_record_stream(
+ missing_bases, 'unordered', True)
+ files.insert_record_stream(missing_entries)
+ # Now 'merged' is fully inserted (and a commit would succeed).
+ self.assertEqual([], list(files.get_missing_compression_parent_keys()))
+ self.assertEqual(
+ [merged_key], files.get_parent_map([merged_key]).keys())
+ files.check()
+
+ def test_iter_lines_added_or_present_in_keys(self):
+ # test that we get at least an equalset of the lines added by
+ # versions in the store.
+ # the ordering here is to make a tree so that dumb searches have
+ # more changes to muck up.
+
+ class InstrumentedProgress(progress.ProgressTask):
+
+ def __init__(self):
+ progress.ProgressTask.__init__(self)
+ self.updates = []
+
+ def update(self, msg=None, current=None, total=None):
+ self.updates.append((msg, current, total))
+
+ files = self.get_versionedfiles()
+ # add a base to get included
+ files.add_lines(self.get_simple_key('base'), (), ['base\n'])
+ # add a ancestor to be included on one side
+ files.add_lines(self.get_simple_key('lancestor'), (), ['lancestor\n'])
+ # add a ancestor to be included on the other side
+ files.add_lines(self.get_simple_key('rancestor'),
+ self.get_parents([self.get_simple_key('base')]), ['rancestor\n'])
+ # add a child of rancestor with no eofile-nl
+ files.add_lines(self.get_simple_key('child'),
+ self.get_parents([self.get_simple_key('rancestor')]),
+ ['base\n', 'child\n'])
+ # add a child of lancestor and base to join the two roots
+ files.add_lines(self.get_simple_key('otherchild'),
+ self.get_parents([self.get_simple_key('lancestor'),
+ self.get_simple_key('base')]),
+ ['base\n', 'lancestor\n', 'otherchild\n'])
+ def iter_with_keys(keys, expected):
+ # now we need to see what lines are returned, and how often.
+ lines = {}
+ progress = InstrumentedProgress()
+ # iterate over the lines
+ for line in files.iter_lines_added_or_present_in_keys(keys,
+ pb=progress):
+ lines.setdefault(line, 0)
+ lines[line] += 1
+ if []!= progress.updates:
+ self.assertEqual(expected, progress.updates)
+ return lines
+ lines = iter_with_keys(
+ [self.get_simple_key('child'), self.get_simple_key('otherchild')],
+ [('Walking content', 0, 2),
+ ('Walking content', 1, 2),
+ ('Walking content', 2, 2)])
+ # we must see child and otherchild
+ self.assertTrue(lines[('child\n', self.get_simple_key('child'))] > 0)
+ self.assertTrue(
+ lines[('otherchild\n', self.get_simple_key('otherchild'))] > 0)
+ # we dont care if we got more than that.
+
+ # test all lines
+ lines = iter_with_keys(files.keys(),
+ [('Walking content', 0, 5),
+ ('Walking content', 1, 5),
+ ('Walking content', 2, 5),
+ ('Walking content', 3, 5),
+ ('Walking content', 4, 5),
+ ('Walking content', 5, 5)])
+ # all lines must be seen at least once
+ self.assertTrue(lines[('base\n', self.get_simple_key('base'))] > 0)
+ self.assertTrue(
+ lines[('lancestor\n', self.get_simple_key('lancestor'))] > 0)
+ self.assertTrue(
+ lines[('rancestor\n', self.get_simple_key('rancestor'))] > 0)
+ self.assertTrue(lines[('child\n', self.get_simple_key('child'))] > 0)
+ self.assertTrue(
+ lines[('otherchild\n', self.get_simple_key('otherchild'))] > 0)
+
+ def test_make_mpdiffs(self):
+ from bzrlib import multiparent
+ files = self.get_versionedfiles('source')
+ # add texts that should trip the knit maximum delta chain threshold
+ # as well as doing parallel chains of data in knits.
+ # this is done by two chains of 25 insertions
+ files.add_lines(self.get_simple_key('base'), [], ['line\n'])
+ files.add_lines(self.get_simple_key('noeol'),
+ self.get_parents([self.get_simple_key('base')]), ['line'])
+ # detailed eol tests:
+ # shared last line with parent no-eol
+ files.add_lines(self.get_simple_key('noeolsecond'),
+ self.get_parents([self.get_simple_key('noeol')]),
+ ['line\n', 'line'])
+ # differing last line with parent, both no-eol
+ files.add_lines(self.get_simple_key('noeolnotshared'),
+ self.get_parents([self.get_simple_key('noeolsecond')]),
+ ['line\n', 'phone'])
+ # add eol following a noneol parent, change content
+ files.add_lines(self.get_simple_key('eol'),
+ self.get_parents([self.get_simple_key('noeol')]), ['phone\n'])
+ # add eol following a noneol parent, no change content
+ files.add_lines(self.get_simple_key('eolline'),
+ self.get_parents([self.get_simple_key('noeol')]), ['line\n'])
+ # noeol with no parents:
+ files.add_lines(self.get_simple_key('noeolbase'), [], ['line'])
+ # noeol preceeding its leftmost parent in the output:
+ # this is done by making it a merge of two parents with no common
+ # anestry: noeolbase and noeol with the
+ # later-inserted parent the leftmost.
+ files.add_lines(self.get_simple_key('eolbeforefirstparent'),
+ self.get_parents([self.get_simple_key('noeolbase'),
+ self.get_simple_key('noeol')]),
+ ['line'])
+ # two identical eol texts
+ files.add_lines(self.get_simple_key('noeoldup'),
+ self.get_parents([self.get_simple_key('noeol')]), ['line'])
+ next_parent = self.get_simple_key('base')
+ text_name = 'chain1-'
+ text = ['line\n']
+ sha1s = {0 :'da6d3141cb4a5e6f464bf6e0518042ddc7bfd079',
+ 1 :'45e21ea146a81ea44a821737acdb4f9791c8abe7',
+ 2 :'e1f11570edf3e2a070052366c582837a4fe4e9fa',
+ 3 :'26b4b8626da827088c514b8f9bbe4ebf181edda1',
+ 4 :'e28a5510be25ba84d31121cff00956f9970ae6f6',
+ 5 :'d63ec0ce22e11dcf65a931b69255d3ac747a318d',
+ 6 :'2c2888d288cb5e1d98009d822fedfe6019c6a4ea',
+ 7 :'95c14da9cafbf828e3e74a6f016d87926ba234ab',
+ 8 :'779e9a0b28f9f832528d4b21e17e168c67697272',
+ 9 :'1f8ff4e5c6ff78ac106fcfe6b1e8cb8740ff9a8f',
+ 10:'131a2ae712cf51ed62f143e3fbac3d4206c25a05',
+ 11:'c5a9d6f520d2515e1ec401a8f8a67e6c3c89f199',
+ 12:'31a2286267f24d8bedaa43355f8ad7129509ea85',
+ 13:'dc2a7fe80e8ec5cae920973973a8ee28b2da5e0a',
+ 14:'2c4b1736566b8ca6051e668de68650686a3922f2',
+ 15:'5912e4ecd9b0c07be4d013e7e2bdcf9323276cde',
+ 16:'b0d2e18d3559a00580f6b49804c23fea500feab3',
+ 17:'8e1d43ad72f7562d7cb8f57ee584e20eb1a69fc7',
+ 18:'5cf64a3459ae28efa60239e44b20312d25b253f3',
+ 19:'1ebed371807ba5935958ad0884595126e8c4e823',
+ 20:'2aa62a8b06fb3b3b892a3292a068ade69d5ee0d3',
+ 21:'01edc447978004f6e4e962b417a4ae1955b6fe5d',
+ 22:'d8d8dc49c4bf0bab401e0298bb5ad827768618bb',
+ 23:'c21f62b1c482862983a8ffb2b0c64b3451876e3f',
+ 24:'c0593fe795e00dff6b3c0fe857a074364d5f04fc',
+ 25:'dd1a1cf2ba9cc225c3aff729953e6364bf1d1855',
+ }
+ for depth in range(26):
+ new_version = self.get_simple_key(text_name + '%s' % depth)
+ text = text + ['line\n']
+ files.add_lines(new_version, self.get_parents([next_parent]), text)
+ next_parent = new_version
+ next_parent = self.get_simple_key('base')
+ text_name = 'chain2-'
+ text = ['line\n']
+ for depth in range(26):
+ new_version = self.get_simple_key(text_name + '%s' % depth)
+ text = text + ['line\n']
+ files.add_lines(new_version, self.get_parents([next_parent]), text)
+ next_parent = new_version
+ target = self.get_versionedfiles('target')
+ for key in multiparent.topo_iter_keys(files, files.keys()):
+ mpdiff = files.make_mpdiffs([key])[0]
+ parents = files.get_parent_map([key])[key] or []
+ target.add_mpdiffs(
+ [(key, parents, files.get_sha1s([key])[key], mpdiff)])
+ self.assertEqualDiff(
+ files.get_record_stream([key], 'unordered',
+ True).next().get_bytes_as('fulltext'),
+ target.get_record_stream([key], 'unordered',
+ True).next().get_bytes_as('fulltext')
+ )
+
+ def test_keys(self):
+ # While use is discouraged, versions() is still needed by aspects of
+ # bzr.
+ files = self.get_versionedfiles()
+ self.assertEqual(set(), set(files.keys()))
+ if self.key_length == 1:
+ key = ('foo',)
+ else:
+ key = ('foo', 'bar',)
+ files.add_lines(key, (), [])
+ self.assertEqual(set([key]), set(files.keys()))
+
+
+class VirtualVersionedFilesTests(TestCase):
+ """Basic tests for the VirtualVersionedFiles implementations."""
+
+ def _get_parent_map(self, keys):
+ ret = {}
+ for k in keys:
+ if k in self._parent_map:
+ ret[k] = self._parent_map[k]
+ return ret
+
+ def setUp(self):
+ TestCase.setUp(self)
+ self._lines = {}
+ self._parent_map = {}
+ self.texts = VirtualVersionedFiles(self._get_parent_map,
+ self._lines.get)
+
+ def test_add_lines(self):
+ self.assertRaises(NotImplementedError,
+ self.texts.add_lines, "foo", [], [])
+
+ def test_add_mpdiffs(self):
+ self.assertRaises(NotImplementedError,
+ self.texts.add_mpdiffs, [])
+
+ def test_check_noerrors(self):
+ self.texts.check()
+
+ def test_insert_record_stream(self):
+ self.assertRaises(NotImplementedError, self.texts.insert_record_stream,
+ [])
+
+ def test_get_sha1s_nonexistent(self):
+ self.assertEquals({}, self.texts.get_sha1s([("NONEXISTENT",)]))
+
+ def test_get_sha1s(self):
+ self._lines["key"] = ["dataline1", "dataline2"]
+ self.assertEquals({("key",): osutils.sha_strings(self._lines["key"])},
+ self.texts.get_sha1s([("key",)]))
+
+ def test_get_parent_map(self):
+ self._parent_map = {"G": ("A", "B")}
+ self.assertEquals({("G",): (("A",),("B",))},
+ self.texts.get_parent_map([("G",), ("L",)]))
+
+ def test_get_record_stream(self):
+ self._lines["A"] = ["FOO", "BAR"]
+ it = self.texts.get_record_stream([("A",)], "unordered", True)
+ record = it.next()
+ self.assertEquals("chunked", record.storage_kind)
+ self.assertEquals("FOOBAR", record.get_bytes_as("fulltext"))
+ self.assertEquals(["FOO", "BAR"], record.get_bytes_as("chunked"))
+
+ def test_get_record_stream_absent(self):
+ it = self.texts.get_record_stream([("A",)], "unordered", True)
+ record = it.next()
+ self.assertEquals("absent", record.storage_kind)
+
+ def test_iter_lines_added_or_present_in_keys(self):
+ self._lines["A"] = ["FOO", "BAR"]
+ self._lines["B"] = ["HEY"]
+ self._lines["C"] = ["Alberta"]
+ it = self.texts.iter_lines_added_or_present_in_keys([("A",), ("B",)])
+ self.assertEquals(sorted([("FOO", "A"), ("BAR", "A"), ("HEY", "B")]),
+ sorted(list(it)))
+
+
+class TestOrderingVersionedFilesDecorator(TestCaseWithMemoryTransport):
+
+ def get_ordering_vf(self, key_priority):
+ builder = self.make_branch_builder('test')
+ builder.start_series()
+ builder.build_snapshot('A', None, [
+ ('add', ('', 'TREE_ROOT', 'directory', None))])
+ builder.build_snapshot('B', ['A'], [])
+ builder.build_snapshot('C', ['B'], [])
+ builder.build_snapshot('D', ['C'], [])
+ builder.finish_series()
+ b = builder.get_branch()
+ b.lock_read()
+ self.addCleanup(b.unlock)
+ vf = b.repository.inventories
+ return versionedfile.OrderingVersionedFilesDecorator(vf, key_priority)
+
+ def test_get_empty(self):
+ vf = self.get_ordering_vf({})
+ self.assertEqual([], vf.calls)
+
+ def test_get_record_stream_topological(self):
+ vf = self.get_ordering_vf({('A',): 3, ('B',): 2, ('C',): 4, ('D',): 1})
+ request_keys = [('B',), ('C',), ('D',), ('A',)]
+ keys = [r.key for r in vf.get_record_stream(request_keys,
+ 'topological', False)]
+ # We should have gotten the keys in topological order
+ self.assertEqual([('A',), ('B',), ('C',), ('D',)], keys)
+ # And recorded that the request was made
+ self.assertEqual([('get_record_stream', request_keys, 'topological',
+ False)], vf.calls)
+
+ def test_get_record_stream_ordered(self):
+ vf = self.get_ordering_vf({('A',): 3, ('B',): 2, ('C',): 4, ('D',): 1})
+ request_keys = [('B',), ('C',), ('D',), ('A',)]
+ keys = [r.key for r in vf.get_record_stream(request_keys,
+ 'unordered', False)]
+ # They should be returned based on their priority
+ self.assertEqual([('D',), ('B',), ('A',), ('C',)], keys)
+ # And the request recorded
+ self.assertEqual([('get_record_stream', request_keys, 'unordered',
+ False)], vf.calls)
+
+ def test_get_record_stream_implicit_order(self):
+ vf = self.get_ordering_vf({('B',): 2, ('D',): 1})
+ request_keys = [('B',), ('C',), ('D',), ('A',)]
+ keys = [r.key for r in vf.get_record_stream(request_keys,
+ 'unordered', False)]
+ # A and C are not in the map, so they get sorted to the front. A comes
+ # before C alphabetically, so it comes back first
+ self.assertEqual([('A',), ('C',), ('D',), ('B',)], keys)
+ # And the request recorded
+ self.assertEqual([('get_record_stream', request_keys, 'unordered',
+ False)], vf.calls)
diff --git a/bzrlib/tests/per_workingtree/__init__.py b/bzrlib/tests/per_workingtree/__init__.py
new file mode 100644
index 0000000..4dc2612
--- /dev/null
+++ b/bzrlib/tests/per_workingtree/__init__.py
@@ -0,0 +1,145 @@
+# Copyright (C) 2006-2011 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+
+"""WorkingTree implementation tests for bzr.
+
+This test the conformance of all the workingtre variations to the expected API.
+Specific tests for individual formats are in the tests/test_workingtree file
+rather than in tests/per_workingtree/*.py.
+"""
+
+from bzrlib import (
+ branchbuilder,
+ tests,
+ workingtree,
+ )
+from bzrlib.tests import per_controldir
+
+
+def make_scenarios(transport_server, transport_readonly_server, formats):
+ result = []
+ for workingtree_format in formats:
+ result.append((workingtree_format.__class__.__name__,
+ make_scenario(transport_server,
+ transport_readonly_server,
+ workingtree_format)))
+ return result
+
+
+def make_scenario(transport_server, transport_readonly_server,
+ workingtree_format):
+ return {
+ "transport_server": transport_server,
+ "transport_readonly_server": transport_readonly_server,
+ "bzrdir_format": workingtree_format._matchingbzrdir,
+ "workingtree_format": workingtree_format,
+ }
+
+
+def wt_scenarios():
+ """Returns the scenarios for all registered working trees.
+
+ This can used by plugins that want to define tests against these working
+ trees.
+ """
+ scenarios = make_scenarios(
+ tests.default_transport,
+ # None here will cause a readonly decorator to be created
+ # by the TestCaseWithTransport.get_readonly_transport method.
+ None,
+ workingtree.format_registry._get_all()
+ )
+ return scenarios
+
+
+class TestCaseWithWorkingTree(per_controldir.TestCaseWithControlDir):
+
+ def make_branch_and_tree(self, relpath, format=None):
+ made_control = self.make_bzrdir(relpath, format=format)
+ made_control.create_repository()
+ made_control.create_branch()
+ return self.workingtree_format.initialize(made_control)
+
+ def make_branch_builder(self, relpath, format=None):
+ if format is None:
+ format = self.workingtree_format.get_controldir_for_branch()
+ builder = branchbuilder.BranchBuilder(self.get_transport(relpath),
+ format=format)
+ return builder
+
+
+def load_tests(standard_tests, module, loader):
+ test_names = [
+ 'add_reference',
+ 'add',
+ 'annotate_iter',
+ 'basis_inventory',
+ 'basis_tree',
+ 'break_lock',
+ 'changes_from',
+ 'check',
+ 'check_state',
+ 'content_filters',
+ 'commit',
+ 'eol_conversion',
+ 'executable',
+ 'flush',
+ 'get_file_mtime',
+ 'get_parent_ids',
+ 'inv',
+ 'is_control_filename',
+ 'is_ignored',
+ 'locking',
+ 'merge_from_branch',
+ 'mkdir',
+ 'move',
+ 'nested_specifics',
+ 'parents',
+ 'paths2ids',
+ 'pull',
+ 'put_file',
+ 'readonly',
+ 'read_working_inventory',
+ 'remove',
+ 'rename_one',
+ 'revision_tree',
+ 'set_root_id',
+ 'smart_add',
+ 'symlinks',
+ 'uncommit',
+ 'unversion',
+ 'views',
+ 'walkdirs',
+ 'workingtree',
+ ]
+ test_workingtree_implementations = [
+ 'bzrlib.tests.per_workingtree.test_' + name for
+ name in test_names]
+
+ scenarios = wt_scenarios()
+
+ # add the tests for the sub modules
+ return tests.multiply_tests(
+ loader.loadTestsFromModuleNames(test_workingtree_implementations),
+ scenarios, standard_tests)
+
+
+class TestWtScenarios(tests.TestCase):
+
+ def test_protect_wt_scenarios(self):
+ # Just make sure we don't accidentally delete the helper again
+ scenarios = wt_scenarios()
diff --git a/bzrlib/tests/per_workingtree/test_add.py b/bzrlib/tests/per_workingtree/test_add.py
new file mode 100644
index 0000000..8b02637
--- /dev/null
+++ b/bzrlib/tests/per_workingtree/test_add.py
@@ -0,0 +1,166 @@
+# Copyright (C) 2007 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""Tests for interface conformance of 'WorkingTree.add'"""
+
+from bzrlib import (
+ errors,
+ inventory,
+ tests,
+ )
+from bzrlib.tests.matchers import HasLayout
+from bzrlib.tests.per_workingtree import TestCaseWithWorkingTree
+
+
+class TestAdd(TestCaseWithWorkingTree):
+
+ def assertTreeLayout(self, expected, tree):
+ """Check that the tree has the correct layout."""
+ self.assertThat(tree, HasLayout(expected))
+
+ def test_add_one(self):
+ tree = self.make_branch_and_tree('.')
+ self.build_tree(['one'])
+ tree.add('one', 'one-id')
+ root_id = tree.get_root_id()
+
+ self.assertTreeLayout([('', root_id), ('one', 'one-id')], tree)
+
+ def test_add_existing_id(self):
+ """Adding an entry with a pre-existing id raises DuplicateFileId"""
+ tree = self.make_branch_and_tree('.')
+ self.build_tree(['a', 'b'])
+ tree.add(['a'], ['an-id'])
+ self.assertRaises(errors.DuplicateFileId,
+ tree.add, ['b'], ['an-id'])
+ root_id = tree.get_root_id()
+ # And the entry should not have been added.
+ self.assertTreeLayout([('', root_id), ('a', 'an-id')], tree)
+
+ def test_add_old_id(self):
+ """We can add an old id, as long as it doesn't exist now."""
+ tree = self.make_branch_and_tree('.')
+ self.build_tree(['a', 'b'])
+ tree.add(['a'], ['an-id'])
+ tree.commit('first', rev_id='rev-1')
+ root_id = tree.get_root_id()
+ # And the entry should not have been added.
+ tree.unversion(['an-id'])
+ tree.add(['b'], ['an-id'])
+ self.assertTreeLayout([('', root_id), ('b', 'an-id')], tree)
+ self.assertTreeLayout([('', root_id), ('a', 'an-id')],
+ tree.basis_tree())
+
+ def test_add_one_list(self):
+ tree = self.make_branch_and_tree('.')
+ self.build_tree(['one'])
+ tree.add(['one'], ['one-id'])
+ root_id = tree.get_root_id()
+
+ self.assertTreeLayout([('', root_id), ('one', 'one-id')], tree)
+
+ def test_add_one_new_id(self):
+ tree = self.make_branch_and_tree('.')
+ self.build_tree(['one'])
+ tree.add(['one'])
+ root_id = tree.get_root_id()
+ one_id = tree.path2id('one')
+
+ self.assertTreeLayout([('', root_id), ('one', one_id)], tree)
+
+ def test_add_unicode(self):
+ tree = self.make_branch_and_tree('.')
+ try:
+ self.build_tree([u'f\xf6'])
+ except UnicodeError:
+ raise tests.TestSkipped('Filesystem does not support filename.')
+ tree.add([u'f\xf6'])
+ root_id = tree.get_root_id()
+ foo_id = tree.path2id(u'f\xf6')
+
+ self.assertTreeLayout([('', root_id), (u'f\xf6', foo_id)], tree)
+
+ def test_add_subdir(self):
+ tree = self.make_branch_and_tree('.')
+ self.build_tree(['dir/', 'dir/subdir/', 'dir/subdir/foo'])
+ tree.add(['dir'], ['dir-id'])
+ tree.add(['dir/subdir'], ['subdir-id'])
+ tree.add(['dir/subdir/foo'], ['foo-id'])
+ root_id = tree.get_root_id()
+
+ self.assertTreeLayout([('', root_id), ('dir/', 'dir-id'),
+ ('dir/subdir/', 'subdir-id'),
+ ('dir/subdir/foo', 'foo-id')], tree)
+
+ def test_add_multiple(self):
+ tree = self.make_branch_and_tree('.')
+ self.build_tree(['a', 'b', 'dir/', 'dir/subdir/', 'dir/subdir/foo'])
+ tree.add(['a', 'b', 'dir', 'dir/subdir', 'dir/subdir/foo'],
+ ['a-id', 'b-id', 'dir-id', 'subdir-id', 'foo-id'])
+ root_id = tree.get_root_id()
+
+ self.assertTreeLayout([('', root_id), ('a', 'a-id'), ('b', 'b-id'),
+ ('dir/', 'dir-id'), ('dir/subdir/', 'subdir-id'),
+ ('dir/subdir/foo', 'foo-id')], tree)
+
+ def test_add_invalid(self):
+ tree = self.make_branch_and_tree('.')
+ self.build_tree(['dir/', 'dir/subdir/', 'dir/subdir/foo'])
+ root_id = tree.get_root_id()
+
+ self.assertRaises(errors.NotVersionedError,
+ tree.add, ['dir/subdir'])
+ self.assertTreeLayout([('', root_id)], tree)
+
+ def test_add_after_remove(self):
+ tree = self.make_branch_and_tree('.')
+ self.build_tree(['dir/', 'dir/subdir/', 'dir/subdir/foo'])
+ root_id = tree.get_root_id()
+ tree.add(['dir'], ['dir-id'])
+ tree.commit('dir', rev_id='rev-1')
+ tree.unversion(['dir-id'])
+ self.assertRaises(errors.NotVersionedError,
+ tree.add, ['dir/subdir'])
+
+ def test_add_root(self):
+ # adding the root should be a no-op, or at least not
+ # do anything whacky.
+ tree = self.make_branch_and_tree('.')
+ tree.lock_write()
+ tree.add('')
+ self.assertEqual([tree.path2id('')], list(tree.all_file_ids()))
+ # the root should have been changed to be a new unique root.
+ self.assertNotEqual(inventory.ROOT_ID, tree.path2id(''))
+ tree.unlock()
+
+ def test_add_previously_added(self):
+ # adding a path that was previously added should work
+ tree = self.make_branch_and_tree('.')
+ self.build_tree(['foo'])
+ tree.add(['foo'], ['foo-id'])
+ tree.unversion(['foo-id'])
+ tree.add(['foo'], ['foo-id'])
+ self.assertEqual('foo-id', tree.path2id('foo'))
+
+ def test_add_present_in_basis(self):
+ # adding a path that was present in the basis should work.
+ tree = self.make_branch_and_tree('.')
+ self.build_tree(['foo'])
+ tree.add(['foo'], ['foo-id'])
+ tree.commit('add foo')
+ tree.unversion(['foo-id'])
+ tree.add(['foo'], ['foo-id'])
+ self.assertEqual('foo-id', tree.path2id('foo'))
diff --git a/bzrlib/tests/per_workingtree/test_add_reference.py b/bzrlib/tests/per_workingtree/test_add_reference.py
new file mode 100644
index 0000000..a6f7112
--- /dev/null
+++ b/bzrlib/tests/per_workingtree/test_add_reference.py
@@ -0,0 +1,111 @@
+# Copyright (C) 2006-2009, 2011 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+import os
+
+from bzrlib import errors, tests, workingtree
+from bzrlib.tests.per_workingtree import TestCaseWithWorkingTree
+
+
+class TestBasisInventory(TestCaseWithWorkingTree):
+
+ def make_trees(self):
+ tree = self.make_branch_and_tree('tree')
+ tree.set_root_id('root-id')
+ self.build_tree(['tree/file1'])
+ tree.add('file1', 'file1-id')
+ sub_tree = self.make_branch_and_tree('tree/sub-tree')
+ sub_tree.set_root_id('sub-tree-root-id')
+ sub_tree.commit('commit', rev_id='sub_1')
+ return tree, sub_tree
+
+ def _references_unsupported(self, tree):
+ if not tree.supports_tree_reference():
+ raise tests.TestNotApplicable(
+ 'Tree format does not support references')
+ else:
+ self.fail('%r does not support references but should'
+ % (tree, ))
+
+ def make_nested_trees(self):
+ tree, sub_tree = self.make_trees()
+ try:
+ tree.add_reference(sub_tree)
+ except errors.UnsupportedOperation:
+ self._references_unsupported(tree)
+ return tree, sub_tree
+
+ def test_add_reference(self):
+ self.make_nested_trees()
+ tree = workingtree.WorkingTree.open('tree')
+ tree.lock_write()
+ try:
+ self.assertEqual(tree.path2id('sub-tree'), 'sub-tree-root-id')
+ self.assertEqual(tree.kind('sub-tree-root-id'), 'tree-reference')
+ tree.commit('commit reference')
+ basis = tree.basis_tree()
+ basis.lock_read()
+ try:
+ sub_tree = tree.get_nested_tree('sub-tree-root-id')
+ self.assertEqual(sub_tree.last_revision(),
+ tree.get_reference_revision('sub-tree-root-id'))
+ finally:
+ basis.unlock()
+ finally:
+ tree.unlock()
+
+ def test_add_reference_same_root(self):
+ tree = self.make_branch_and_tree('tree')
+ self.build_tree(['tree/file1'])
+ tree.add('file1', 'file1-id')
+ tree.set_root_id('root-id')
+ sub_tree = self.make_branch_and_tree('tree/sub-tree')
+ sub_tree.set_root_id('root-id')
+ try:
+ self.assertRaises(errors.BadReferenceTarget, tree.add_reference,
+ sub_tree)
+ except errors.UnsupportedOperation:
+ self._references_unsupported(tree)
+
+ def test_root_present(self):
+ """Subtree root is present, though not the working tree root"""
+ tree, sub_tree = self.make_trees()
+ sub_tree.set_root_id('file1-id')
+ try:
+ self.assertRaises(errors.BadReferenceTarget, tree.add_reference,
+ sub_tree)
+ except errors.UnsupportedOperation:
+ self._references_unsupported(tree)
+
+ def test_add_non_subtree(self):
+ tree, sub_tree = self.make_trees()
+ os.rename('tree/sub-tree', 'sibling')
+ sibling = workingtree.WorkingTree.open('sibling')
+ try:
+ self.assertRaises(errors.BadReferenceTarget, tree.add_reference,
+ sibling)
+ except errors.UnsupportedOperation:
+ self._references_unsupported(tree)
+
+ def test_get_nested_tree(self):
+ tree, sub_tree = self.make_nested_trees()
+ tree.lock_read()
+ try:
+ sub_tree2 = tree.get_nested_tree('sub-tree-root-id')
+ self.assertEqual(sub_tree.basedir, sub_tree2.basedir)
+ sub_tree2 = tree.get_nested_tree('sub-tree-root-id', 'sub-tree')
+ finally:
+ tree.unlock()
diff --git a/bzrlib/tests/per_workingtree/test_annotate_iter.py b/bzrlib/tests/per_workingtree/test_annotate_iter.py
new file mode 100644
index 0000000..b9cce24
--- /dev/null
+++ b/bzrlib/tests/per_workingtree/test_annotate_iter.py
@@ -0,0 +1,181 @@
+# Copyright (C) 2009 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""Tests for interface conformance of 'WorkingTree.annotate_iter'"""
+
+from bzrlib.tests.per_workingtree import TestCaseWithWorkingTree
+
+
+class TestAnnotateIter(TestCaseWithWorkingTree):
+
+ def make_single_rev_tree(self):
+ builder = self.make_branch_builder('branch')
+ builder.build_snapshot('rev-1', None, [
+ ('add', ('', 'TREE_ROOT', 'directory', None)),
+ ('add', ('file', 'file-id', 'file', 'initial content\n')),
+ ])
+ b = builder.get_branch()
+ tree = b.create_checkout('tree', lightweight=True)
+ tree.lock_read()
+ self.addCleanup(tree.unlock)
+ return tree
+
+ def test_annotate_same_as_parent(self):
+ tree = self.make_single_rev_tree()
+ annotations = tree.annotate_iter('file-id')
+ self.assertEqual([('rev-1', 'initial content\n')],
+ annotations)
+
+ def test_annotate_mod_from_parent(self):
+ tree = self.make_single_rev_tree()
+ self.build_tree_contents([('tree/file',
+ 'initial content\nnew content\n')])
+ annotations = tree.annotate_iter('file-id')
+ self.assertEqual([('rev-1', 'initial content\n'),
+ ('current:', 'new content\n'),
+ ], annotations)
+
+ def test_annotate_merge_parents(self):
+ builder = self.make_branch_builder('branch')
+ builder.start_series()
+ builder.build_snapshot('rev-1', None, [
+ ('add', ('', 'TREE_ROOT', 'directory', None)),
+ ('add', ('file', 'file-id', 'file', 'initial content\n')),
+ ])
+ builder.build_snapshot('rev-2', ['rev-1'], [
+ ('modify', ('file-id', 'initial content\ncontent in 2\n')),
+ ])
+ builder.build_snapshot('rev-3', ['rev-1'], [
+ ('modify', ('file-id', 'initial content\ncontent in 3\n')),
+ ])
+ builder.finish_series()
+ b = builder.get_branch()
+ tree = b.create_checkout('tree', revision_id='rev-2', lightweight=True)
+ tree.lock_write()
+ self.addCleanup(tree.unlock)
+ tree.set_parent_ids(['rev-2', 'rev-3'])
+ self.build_tree_contents([('tree/file',
+ 'initial content\ncontent in 2\n'
+ 'content in 3\nnew content\n')])
+ annotations = tree.annotate_iter('file-id')
+ self.assertEqual([('rev-1', 'initial content\n'),
+ ('rev-2', 'content in 2\n'),
+ ('rev-3', 'content in 3\n'),
+ ('current:', 'new content\n'),
+ ], annotations)
+
+ def test_annotate_merge_parent_no_file(self):
+ builder = self.make_branch_builder('branch')
+ builder.start_series()
+ builder.build_snapshot('rev-1', None, [
+ ('add', ('', 'TREE_ROOT', 'directory', None)),
+ ])
+ builder.build_snapshot('rev-2', ['rev-1'], [
+ ('add', ('file', 'file-id', 'file', 'initial content\n')),
+ ])
+ builder.build_snapshot('rev-3', ['rev-1'], [])
+ builder.finish_series()
+ b = builder.get_branch()
+ tree = b.create_checkout('tree', revision_id='rev-2', lightweight=True)
+ tree.lock_write()
+ self.addCleanup(tree.unlock)
+ tree.set_parent_ids(['rev-2', 'rev-3'])
+ self.build_tree_contents([('tree/file',
+ 'initial content\nnew content\n')])
+ annotations = tree.annotate_iter('file-id')
+ self.assertEqual([('rev-2', 'initial content\n'),
+ ('current:', 'new content\n'),
+ ], annotations)
+
+ def test_annotate_merge_parent_was_directory(self):
+ builder = self.make_branch_builder('branch')
+ builder.start_series()
+ builder.build_snapshot('rev-1', None, [
+ ('add', ('', 'TREE_ROOT', 'directory', None)),
+ ])
+ builder.build_snapshot('rev-2', ['rev-1'], [
+ ('add', ('file', 'file-id', 'file', 'initial content\n')),
+ ])
+ builder.build_snapshot('rev-3', ['rev-1'], [
+ ('add', ('a_dir', 'file-id', 'directory', None)),
+ ])
+ builder.finish_series()
+ b = builder.get_branch()
+ tree = b.create_checkout('tree', revision_id='rev-2', lightweight=True)
+ tree.lock_write()
+ self.addCleanup(tree.unlock)
+ tree.set_parent_ids(['rev-2', 'rev-3'])
+ self.build_tree_contents([('tree/file',
+ 'initial content\nnew content\n')])
+ annotations = tree.annotate_iter('file-id')
+ self.assertEqual([('rev-2', 'initial content\n'),
+ ('current:', 'new content\n'),
+ ], annotations)
+
+ def test_annotate_same_as_merge_parent(self):
+ builder = self.make_branch_builder('branch')
+ builder.start_series()
+ builder.build_snapshot('rev-1', None, [
+ ('add', ('', 'TREE_ROOT', 'directory', None)),
+ ('add', ('file', 'file-id', 'file', 'initial content\n')),
+ ])
+ builder.build_snapshot('rev-2', ['rev-1'], [
+ ])
+ builder.build_snapshot('rev-3', ['rev-1'], [
+ ('modify', ('file-id', 'initial content\ncontent in 3\n')),
+ ])
+ builder.finish_series()
+ b = builder.get_branch()
+ tree = b.create_checkout('tree', revision_id='rev-2', lightweight=True)
+ tree.lock_write()
+ self.addCleanup(tree.unlock)
+ tree.set_parent_ids(['rev-2', 'rev-3'])
+ self.build_tree_contents([('tree/file',
+ 'initial content\ncontent in 3\n')])
+ annotations = tree.annotate_iter('file-id')
+ self.assertEqual([('rev-1', 'initial content\n'),
+ ('rev-3', 'content in 3\n'),
+ ], annotations)
+
+ def test_annotate_same_as_merge_parent_supersedes(self):
+ builder = self.make_branch_builder('branch')
+ builder.start_series()
+ builder.build_snapshot('rev-1', None, [
+ ('add', ('', 'TREE_ROOT', 'directory', None)),
+ ('add', ('file', 'file-id', 'file', 'initial content\n')),
+ ])
+ builder.build_snapshot('rev-2', ['rev-1'], [
+ ('modify', ('file-id', 'initial content\nnew content\n')),
+ ])
+ builder.build_snapshot('rev-3', ['rev-2'], [
+ ('modify', ('file-id', 'initial content\ncontent in 3\n')),
+ ])
+ builder.build_snapshot('rev-4', ['rev-3'], [
+ ('modify', ('file-id', 'initial content\nnew content\n')),
+ ])
+ # In this case, the content locally is the same as content in basis
+ # tree, but the merge revision states that *it* should win
+ builder.finish_series()
+ b = builder.get_branch()
+ tree = b.create_checkout('tree', revision_id='rev-2', lightweight=True)
+ tree.lock_write()
+ self.addCleanup(tree.unlock)
+ tree.set_parent_ids(['rev-2', 'rev-4'])
+ annotations = tree.annotate_iter('file-id')
+ self.assertEqual([('rev-1', 'initial content\n'),
+ ('rev-4', 'new content\n'),
+ ], annotations)
+
diff --git a/bzrlib/tests/per_workingtree/test_basis_inventory.py b/bzrlib/tests/per_workingtree/test_basis_inventory.py
new file mode 100644
index 0000000..4da259c
--- /dev/null
+++ b/bzrlib/tests/per_workingtree/test_basis_inventory.py
@@ -0,0 +1,83 @@
+# Copyright (C) 2004, 2005, 2008 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+from bzrlib.tests import TestNotApplicable
+from bzrlib.tests.per_workingtree import TestCaseWithWorkingTree
+import bzrlib.xml6
+
+
+class TestBasisInventory(TestCaseWithWorkingTree):
+
+ def test_create(self):
+ # This test is not applicable to DirState based trees: the basis is
+ # not separate is mandatory.
+ if isinstance(self.workingtree_format,
+ bzrlib.workingtree_4.DirStateWorkingTreeFormat):
+ raise TestNotApplicable("not applicable to %r"
+ % (self.workingtree_format,))
+ # TODO: jam 20051218 this probably should add more than just
+ # a couple files to the inventory
+
+ # Make sure the basis file is created by a commit
+ t = self.make_branch_and_tree('.')
+ b = t.branch
+ with open('a', 'wb') as f: f.write('a\n')
+ t.add('a')
+ t.commit('a', rev_id='r1')
+
+ self.assertTrue(t._transport.has('basis-inventory-cache'))
+
+ basis_inv = t.basis_tree().root_inventory
+ self.assertEquals('r1', basis_inv.revision_id)
+
+ store_inv = b.repository.get_inventory('r1')
+ self.assertEqual([], store_inv._make_delta(basis_inv))
+
+ with open('b', 'wb') as f: f.write('b\n')
+ t.add('b')
+ t.commit('b', rev_id='r2')
+
+ self.assertTrue(t._transport.has('basis-inventory-cache'))
+
+ basis_inv_txt = t.read_basis_inventory()
+ basis_inv = bzrlib.xml7.serializer_v7.read_inventory_from_string(basis_inv_txt)
+ self.assertEquals('r2', basis_inv.revision_id)
+ store_inv = b.repository.get_inventory('r2')
+
+ self.assertEqual([], store_inv._make_delta(basis_inv))
+
+ def test_wrong_format(self):
+ """WorkingTree.basis safely ignores junk basis inventories"""
+ # This test is not applicable to DirState based trees: the basis is
+ # not separate and ignorable.
+ if isinstance(self.workingtree_format,
+ bzrlib.workingtree_4.DirStateWorkingTreeFormat):
+ raise TestNotApplicable("not applicable to %r"
+ % (self.workingtree_format,))
+ t = self.make_branch_and_tree('.')
+ b = t.branch
+ with open('a', 'wb') as f: f.write('a\n')
+ t.add('a')
+ t.commit('a', rev_id='r1')
+ t._transport.put_bytes('basis-inventory-cache', 'booga')
+ t.basis_tree()
+ t._transport.put_bytes('basis-inventory-cache', '<xml/>')
+ t.basis_tree()
+ t._transport.put_bytes('basis-inventory-cache', '<inventory />')
+ t.basis_tree()
+ t._transport.put_bytes('basis-inventory-cache',
+ '<inventory format="pi"/>')
+ t.basis_tree()
diff --git a/bzrlib/tests/per_workingtree/test_basis_tree.py b/bzrlib/tests/per_workingtree/test_basis_tree.py
new file mode 100644
index 0000000..91cb4cd
--- /dev/null
+++ b/bzrlib/tests/per_workingtree/test_basis_tree.py
@@ -0,0 +1,74 @@
+# Copyright (C) 2007 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""Test that WorkingTree.basis_tree() yields a valid tree."""
+
+from bzrlib.tests.per_workingtree import TestCaseWithWorkingTree
+
+
+class TestBasisTree(TestCaseWithWorkingTree):
+
+ def test_emtpy_tree(self):
+ """A working tree with no parents."""
+ tree = self.make_branch_and_tree('tree')
+ basis_tree = tree.basis_tree()
+
+ basis_tree.lock_read()
+ try:
+ self.assertEqual([], list(basis_tree.list_files(include_root=True)))
+ finally:
+ basis_tree.unlock()
+
+ def test_same_tree(self):
+ """Test basis_tree when working tree hasn't been modified."""
+ tree = self.make_branch_and_tree('.')
+ self.build_tree(['file', 'dir/', 'dir/subfile'])
+ tree.add(['file', 'dir', 'dir/subfile'])
+ revision_id = tree.commit('initial tree')
+
+ basis_tree = tree.basis_tree()
+ basis_tree.lock_read()
+ try:
+ self.assertEqual(revision_id, basis_tree.get_revision_id())
+ # list_files() may return in either dirblock or sorted order
+ # TODO: jam 20070215 Should list_files have an explicit order?
+ self.assertEqual(['', 'dir', 'dir/subfile', 'file'],
+ sorted(info[0] for info in basis_tree.list_files(True)))
+ finally:
+ basis_tree.unlock()
+
+ def test_altered_tree(self):
+ """Test basis really is basis after working has been modified."""
+ tree = self.make_branch_and_tree('.')
+ self.build_tree(['file', 'dir/', 'dir/subfile'])
+ tree.add(['file', 'dir', 'dir/subfile'])
+ revision_id = tree.commit('initial tree')
+
+ self.build_tree(['new file', 'new dir/'])
+ tree.rename_one('file', 'dir/new file')
+ tree.unversion([tree.path2id('dir/subfile')])
+ tree.add(['new file', 'new dir'])
+
+ basis_tree = tree.basis_tree()
+ basis_tree.lock_read()
+ try:
+ self.assertEqual(revision_id, basis_tree.get_revision_id())
+ # list_files() may return in either dirblock or sorted order
+ # TODO: jam 20070215 Should list_files have an explicit order?
+ self.assertEqual(['', 'dir', 'dir/subfile', 'file'],
+ sorted(info[0] for info in basis_tree.list_files(True)))
+ finally:
+ basis_tree.unlock()
diff --git a/bzrlib/tests/per_workingtree/test_break_lock.py b/bzrlib/tests/per_workingtree/test_break_lock.py
new file mode 100644
index 0000000..c881cbb
--- /dev/null
+++ b/bzrlib/tests/per_workingtree/test_break_lock.py
@@ -0,0 +1,69 @@
+# Copyright (C) 2006 Canonical Ltd
+# Authors: Robert Collins <robert.collins@canonical.com>
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+from bzrlib import (
+ errors,
+ ui,
+ )
+from bzrlib.tests import TestNotApplicable
+from bzrlib.tests.per_workingtree import TestCaseWithWorkingTree
+
+
+class TestBreakLock(TestCaseWithWorkingTree):
+
+ def setUp(self):
+ super(TestBreakLock, self).setUp()
+ self.unused_workingtree = self.make_branch_and_tree('.')
+ self.workingtree = self.unused_workingtree.bzrdir.open_workingtree()
+
+ def test_unlocked(self):
+ # break lock when nothing is locked should just return
+ try:
+ self.workingtree.break_lock()
+ except NotImplementedError:
+ pass
+
+ def test_unlocked_repo_locked(self):
+ # break lock on the workingtree should try on the branch even
+ # if the workingtree isn't locked - and the easiest way
+ # to see if that happened is to lock the repo.
+ self.workingtree.branch.repository.lock_write()
+ ui.ui_factory = ui.CannedInputUIFactory([True])
+ try:
+ self.unused_workingtree.break_lock()
+ except NotImplementedError:
+ # workingtree does not support break_lock
+ self.workingtree.branch.repository.unlock()
+ return
+ if ui.ui_factory.responses == [True]:
+ raise TestNotApplicable("repository does not physically lock.")
+ self.assertRaises(errors.LockBroken,
+ self.workingtree.branch.repository.unlock)
+
+ def test_locked(self):
+ # break_lock when locked should
+ self.workingtree.lock_write()
+ ui.ui_factory = ui.CannedInputUIFactory([True, True, True])
+ try:
+ self.unused_workingtree.break_lock()
+ except (NotImplementedError, errors.LockActive):
+ # workingtree does not support break_lock,
+ # or does not support breaking a lock held by an alive
+ # object/process.
+ self.workingtree.unlock()
+ return
+ self.assertRaises(errors.LockBroken, self.workingtree.unlock)
diff --git a/bzrlib/tests/per_workingtree/test_changes_from.py b/bzrlib/tests/per_workingtree/test_changes_from.py
new file mode 100644
index 0000000..65a1e12
--- /dev/null
+++ b/bzrlib/tests/per_workingtree/test_changes_from.py
@@ -0,0 +1,54 @@
+# Copyright (C) 2006 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""Test Tree.changes_from() for WorkingTree specific scenarios."""
+
+from bzrlib import revision
+from bzrlib.tests.per_workingtree import TestCaseWithWorkingTree
+
+
+class TestChangesFrom(TestCaseWithWorkingTree):
+
+ def setUp(self):
+ super(TestChangesFrom, self).setUp()
+ self.tree = self.make_branch_and_tree('tree')
+ files = ['a', 'b/', 'b/c']
+ self.build_tree(files, transport=self.tree.bzrdir.root_transport)
+ self.tree.add(files, ['a-id', 'b-id', 'c-id'])
+ self.tree.commit('initial tree')
+
+ def test_unknown(self):
+ self.build_tree(['tree/unknown'])
+ # Unknowns are not reported by changes_from
+ d = self.tree.changes_from(self.tree.basis_tree())
+ self.assertEqual([], d.added)
+ self.assertEqual([], d.removed)
+ self.assertEqual([], d.renamed)
+ self.assertEqual([], d.modified)
+
+ def test_unknown_specific_file(self):
+ self.build_tree(['tree/unknown'])
+ empty_tree = self.tree.branch.repository.revision_tree(
+ revision.NULL_REVISION)
+
+ # If a specific_files list is present, even if none of the
+ # files are versioned, only paths that are present in the list
+ # should be compared
+ d = self.tree.changes_from(empty_tree, specific_files=['unknown'])
+ self.assertEqual([], d.added)
+ self.assertEqual([], d.removed)
+ self.assertEqual([], d.renamed)
+ self.assertEqual([], d.modified)
diff --git a/bzrlib/tests/per_workingtree/test_check.py b/bzrlib/tests/per_workingtree/test_check.py
new file mode 100644
index 0000000..bfd2495
--- /dev/null
+++ b/bzrlib/tests/per_workingtree/test_check.py
@@ -0,0 +1,64 @@
+# Copyright (C) 2009 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""Tests for checking of trees."""
+
+from bzrlib.tests.per_workingtree import TestCaseWithWorkingTree
+
+from bzrlib.workingtree import InventoryWorkingTree
+from bzrlib.tests import TestNotApplicable
+
+
+class TestCheck(TestCaseWithWorkingTree):
+
+ def test__get_check_refs_new(self):
+ tree = self.make_branch_and_tree('tree')
+ if not isinstance(tree, InventoryWorkingTree):
+ raise TestNotApplicable(
+ "_get_check_refs only relevant for inventory working trees")
+ self.assertEqual(set([('trees', 'null:')]),
+ set(tree._get_check_refs()))
+
+ def test__get_check_refs_basis(self):
+ # with a basis, all current bzr trees cache it and so need the
+ # inventory to cross-check.
+ tree = self.make_branch_and_tree('tree')
+ if not isinstance(tree, InventoryWorkingTree):
+ raise TestNotApplicable(
+ "_get_check_refs only relevant for inventory working trees")
+ revid = tree.commit('first post')
+ self.assertEqual(set([('trees', revid)]),
+ set(tree._get_check_refs()))
+
+ def test__check_with_refs(self):
+ # _check can be called with a dict of the things required.
+ tree = self.make_branch_and_tree('tree')
+ if not isinstance(tree, InventoryWorkingTree):
+ raise TestNotApplicable(
+ "_get_check_refs only relevant for inventory working trees")
+ tree.lock_write()
+ self.addCleanup(tree.unlock)
+ revid = tree.commit('first post')
+ needed_refs = tree._get_check_refs()
+ repo = tree.branch.repository
+ for ref in needed_refs:
+ kind, revid = ref
+ refs = {}
+ if kind == 'trees':
+ refs[ref] = repo.revision_tree(revid)
+ else:
+ self.fail('unknown ref kind')
+ tree._check(refs)
diff --git a/bzrlib/tests/per_workingtree/test_check_state.py b/bzrlib/tests/per_workingtree/test_check_state.py
new file mode 100644
index 0000000..475ed36
--- /dev/null
+++ b/bzrlib/tests/per_workingtree/test_check_state.py
@@ -0,0 +1,110 @@
+# Copyright (C) 2011 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""Tests for WorkingTree.check_state."""
+
+from bzrlib import (
+ errors,
+ tests,
+ )
+from bzrlib.tests.per_workingtree import TestCaseWithWorkingTree
+
+
+
+class TestCaseWithState(TestCaseWithWorkingTree):
+
+ def make_tree_with_broken_dirstate(self, path):
+ tree = self.make_branch_and_tree(path)
+ self.break_dirstate(tree)
+ return tree
+
+ def break_dirstate(self, tree, completely=False):
+ """Write garbage into the dirstate file."""
+ if getattr(tree, 'current_dirstate', None) is None:
+ raise tests.TestNotApplicable(
+ 'Only applies to dirstate-based trees')
+ tree.lock_read()
+ try:
+ dirstate = tree.current_dirstate()
+ dirstate_path = dirstate._filename
+ self.assertPathExists(dirstate_path)
+ finally:
+ tree.unlock()
+ # We have to have the tree unlocked at this point, so we can safely
+ # mutate the state file on all platforms.
+ if completely:
+ f = open(dirstate_path, 'wb')
+ else:
+ f = open(dirstate_path, 'ab')
+ try:
+ f.write('garbage-at-end-of-file\n')
+ finally:
+ f.close()
+
+
+class TestCheckState(TestCaseWithState):
+
+ def test_check_state(self):
+ tree = self.make_branch_and_tree('tree')
+ # Everything should be fine with an unmodified tree, no exception
+ # should be raised.
+ tree.check_state()
+
+ def test_check_broken_dirstate(self):
+ tree = self.make_tree_with_broken_dirstate('tree')
+ self.assertRaises(errors.BzrError, tree.check_state)
+
+
+class TestResetState(TestCaseWithState):
+
+ def make_initial_tree(self):
+ tree = self.make_branch_and_tree('tree')
+ self.build_tree(['tree/foo', 'tree/dir/', 'tree/dir/bar'])
+ tree.add(['foo', 'dir', 'dir/bar'])
+ tree.commit('initial')
+ return tree
+
+ def test_reset_state_forgets_changes(self):
+ tree = self.make_initial_tree()
+ foo_id = tree.path2id('foo')
+ tree.rename_one('foo', 'baz')
+ self.assertEqual(None, tree.path2id('foo'))
+ self.assertEqual(foo_id, tree.path2id('baz'))
+ tree.reset_state()
+ # After reset, we should have forgotten about the rename, but we won't
+ # have
+ self.assertEqual(foo_id, tree.path2id('foo'))
+ self.assertEqual(None, tree.path2id('baz'))
+ self.assertPathDoesNotExist('tree/foo')
+ self.assertPathExists('tree/baz')
+
+ def test_reset_state_handles_corrupted_dirstate(self):
+ tree = self.make_initial_tree()
+ rev_id = tree.last_revision()
+ self.break_dirstate(tree)
+ tree.reset_state()
+ tree.check_state()
+ self.assertEqual(rev_id, tree.last_revision())
+
+ def test_reset_state_handles_destroyed_dirstate(self):
+ # If you pass the revision_id, we can handle a completely destroyed
+ # dirstate file.
+ tree = self.make_initial_tree()
+ rev_id = tree.last_revision()
+ self.break_dirstate(tree, completely=True)
+ tree.reset_state(revision_ids=[rev_id])
+ tree.check_state()
+ self.assertEqual(rev_id, tree.last_revision())
diff --git a/bzrlib/tests/per_workingtree/test_commit.py b/bzrlib/tests/per_workingtree/test_commit.py
new file mode 100644
index 0000000..53e8459
--- /dev/null
+++ b/bzrlib/tests/per_workingtree/test_commit.py
@@ -0,0 +1,587 @@
+# Copyright (C) 2006-2010 Canonical Ltd
+# Authors: Robert Collins <robert.collins@canonical.com>
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+import os
+
+from bzrlib import (
+ branch,
+ conflicts,
+ controldir,
+ errors,
+ mutabletree,
+ osutils,
+ revision as _mod_revision,
+ tests,
+ ui,
+ )
+from bzrlib.tests.per_workingtree import TestCaseWithWorkingTree
+from bzrlib.tests.testui import ProgressRecordingUIFactory
+
+
+class TestCommit(TestCaseWithWorkingTree):
+
+ def test_autodelete_renamed(self):
+ tree_a = self.make_branch_and_tree('a')
+ self.build_tree(['a/dir/', 'a/dir/f1', 'a/dir/f2'])
+ tree_a.add(['dir', 'dir/f1', 'dir/f2'], ['dir-id', 'f1-id', 'f2-id'])
+ rev_id1 = tree_a.commit('init')
+ # Start off by renaming entries,
+ # but then actually auto delete the whole tree
+ # https://bugs.launchpad.net/bzr/+bug/114615
+ tree_a.rename_one('dir/f1', 'dir/a')
+ tree_a.rename_one('dir/f2', 'dir/z')
+ osutils.rmtree('a/dir')
+ tree_a.commit('autoremoved')
+
+ tree_a.lock_read()
+ try:
+ root_id = tree_a.get_root_id()
+ paths = [(path, ie.file_id)
+ for path, ie in tree_a.iter_entries_by_dir()]
+ finally:
+ tree_a.unlock()
+ # The only paths left should be the root
+ self.assertEqual([('', root_id)], paths)
+
+ def test_no_autodelete_renamed_away(self):
+ tree_a = self.make_branch_and_tree('a')
+ self.build_tree(['a/dir/', 'a/dir/f1', 'a/dir/f2', 'a/dir2/'])
+ tree_a.add(['dir', 'dir/f1', 'dir/f2', 'dir2'],
+ ['dir-id', 'f1-id', 'f2-id', 'dir2-id'])
+ rev_id1 = tree_a.commit('init')
+ # Rename one entry out of this directory
+ tree_a.rename_one('dir/f1', 'dir2/a')
+ osutils.rmtree('a/dir')
+ tree_a.commit('autoremoved')
+
+ tree_a.lock_read()
+ try:
+ root_id = tree_a.get_root_id()
+ paths = [(path, ie.file_id)
+ for path, ie in tree_a.iter_entries_by_dir()]
+ finally:
+ tree_a.unlock()
+ # The only paths left should be the root
+ self.assertEqual([('', root_id), ('dir2', 'dir2-id'),
+ ('dir2/a', 'f1-id'),
+ ], paths)
+
+ def test_no_autodelete_alternate_renamed(self):
+ # Test for bug #114615
+ tree_a = self.make_branch_and_tree('A')
+ self.build_tree(['A/a/', 'A/a/m', 'A/a/n'])
+ tree_a.add(['a', 'a/m', 'a/n'], ['a-id', 'm-id', 'n-id'])
+ tree_a.commit('init')
+
+ tree_a.lock_read()
+ try:
+ root_id = tree_a.get_root_id()
+ finally:
+ tree_a.unlock()
+
+ tree_b = tree_a.bzrdir.sprout('B').open_workingtree()
+ self.build_tree(['B/xyz/'])
+ tree_b.add(['xyz'], ['xyz-id'])
+ tree_b.rename_one('a/m', 'xyz/m')
+ osutils.rmtree('B/a')
+ tree_b.commit('delete in B')
+
+ paths = [(path, ie.file_id)
+ for path, ie in tree_b.iter_entries_by_dir()]
+ self.assertEqual([('', root_id),
+ ('xyz', 'xyz-id'),
+ ('xyz/m', 'm-id'),
+ ], paths)
+
+ self.build_tree_contents([('A/a/n', 'new contents for n\n')])
+ tree_a.commit('change n in A')
+
+ # Merging from A should introduce conflicts because 'n' was modified
+ # (in A) and removed (in B), so 'a' needs to be restored.
+ num_conflicts = tree_b.merge_from_branch(tree_a.branch)
+ self.assertEqual(3, num_conflicts)
+ paths = [(path, ie.file_id)
+ for path, ie in tree_b.iter_entries_by_dir()]
+ self.assertEqual([('', root_id),
+ ('a', 'a-id'),
+ ('xyz', 'xyz-id'),
+ ('a/n.OTHER', 'n-id'),
+ ('xyz/m', 'm-id'),
+ ], paths)
+ osutils.rmtree('B/a')
+ try:
+ # bzr resolve --all
+ tree_b.set_conflicts(conflicts.ConflictList())
+ except errors.UnsupportedOperation:
+ # On WT2, set_conflicts is unsupported, but the rmtree has the same
+ # effect.
+ pass
+ tree_b.commit('autoremove a, without touching xyz/m')
+ paths = [(path, ie.file_id)
+ for path, ie in tree_b.iter_entries_by_dir()]
+ self.assertEqual([('', root_id),
+ ('xyz', 'xyz-id'),
+ ('xyz/m', 'm-id'),
+ ], paths)
+
+ def test_commit_exclude_pending_merge_fails(self):
+ """Excludes are a form of partial commit."""
+ wt = self.make_branch_and_tree('.')
+ self.build_tree(['foo'])
+ wt.add('foo')
+ wt.commit('commit one')
+ wt2 = wt.bzrdir.sprout('to').open_workingtree()
+ wt2.commit('change_right')
+ wt.merge_from_branch(wt2.branch)
+ try:
+ self.assertRaises(errors.CannotCommitSelectedFileMerge,
+ wt.commit, 'test', exclude=['foo'])
+ except errors.ExcludesUnsupported:
+ raise tests.TestNotApplicable("excludes not supported by this "
+ "repository format")
+
+ def test_commit_exclude_exclude_changed_is_pointless(self):
+ tree = self.make_branch_and_tree('.')
+ self.build_tree(['a'])
+ tree.smart_add(['.'])
+ tree.commit('setup test')
+ self.build_tree_contents([('a', 'new contents for "a"\n')])
+ try:
+ self.assertRaises(errors.PointlessCommit, tree.commit, 'test',
+ exclude=['a'], allow_pointless=False)
+ except errors.ExcludesUnsupported:
+ raise tests.TestNotApplicable("excludes not supported by this "
+ "repository format")
+
+ def test_commit_exclude_excludes_modified_files(self):
+ tree = self.make_branch_and_tree('.')
+ self.build_tree(['a', 'b', 'c'])
+ tree.smart_add(['.'])
+ try:
+ tree.commit('test', exclude=['b', 'c'])
+ except errors.ExcludesUnsupported:
+ raise tests.TestNotApplicable("excludes not supported by this "
+ "repository format")
+ # If b was excluded it will still be 'added' in status.
+ tree.lock_read()
+ self.addCleanup(tree.unlock)
+ changes = list(tree.iter_changes(tree.basis_tree()))
+ self.assertEqual(2, len(changes))
+ self.assertEqual((None, 'b'), changes[0][1])
+ self.assertEqual((None, 'c'), changes[1][1])
+
+ def test_commit_exclude_subtree_of_selected(self):
+ tree = self.make_branch_and_tree('.')
+ self.build_tree(['a/', 'a/b'])
+ tree.smart_add(['.'])
+ try:
+ tree.commit('test', specific_files=['a'], exclude=['a/b'])
+ except errors.ExcludesUnsupported:
+ raise tests.TestNotApplicable("excludes not supported by this "
+ "repository format")
+ # If a/b was excluded it will still be 'added' in status.
+ tree.lock_read()
+ self.addCleanup(tree.unlock)
+ changes = list(tree.iter_changes(tree.basis_tree()))
+ self.assertEqual(1, len(changes))
+ self.assertEqual((None, 'a/b'), changes[0][1])
+
+ def test_commit_sets_last_revision(self):
+ tree = self.make_branch_and_tree('tree')
+ committed_id = tree.commit('foo', rev_id='foo')
+ self.assertEqual(['foo'], tree.get_parent_ids())
+ # the commit should have returned the same id we asked for.
+ self.assertEqual('foo', committed_id)
+
+ def test_commit_returns_revision_id(self):
+ tree = self.make_branch_and_tree('.')
+ committed_id = tree.commit('message')
+ self.assertTrue(tree.branch.repository.has_revision(committed_id))
+ self.assertNotEqual(None, committed_id)
+
+ def test_commit_local_unbound(self):
+ # using the library api to do a local commit on unbound branches is
+ # also an error
+ tree = self.make_branch_and_tree('tree')
+ self.assertRaises(errors.LocalRequiresBoundBranch,
+ tree.commit,
+ 'foo',
+ local=True)
+
+ def test_commit_merged_kind_change(self):
+ """Test merging a kind change.
+
+ Test making a kind change in a working tree, and then merging that
+ from another. When committed it should commit the new kind.
+ """
+ wt = self.make_branch_and_tree('.')
+ self.build_tree(['a'])
+ wt.add(['a'])
+ wt.commit('commit one')
+ wt2 = wt.bzrdir.sprout('to').open_workingtree()
+ os.remove('a')
+ os.mkdir('a')
+ wt.commit('changed kind')
+ wt2.merge_from_branch(wt.branch)
+ wt2.commit('merged kind change')
+
+ def test_commit_aborted_does_not_apply_automatic_changes_bug_282402(self):
+ wt = self.make_branch_and_tree('.')
+ wt.add(['a'], ['a-id'], ['file'])
+ def fail_message(obj):
+ raise errors.BzrCommandError("empty commit message")
+ self.assertRaises(errors.BzrCommandError, wt.commit,
+ message_callback=fail_message)
+ self.assertEqual('a', wt.id2path('a-id'))
+
+ def test_local_commit_ignores_master(self):
+ # a --local commit does not require access to the master branch
+ # at all, or even for it to exist.
+ # we test this by setting up a bound branch and then corrupting
+ # the master.
+ master = self.make_branch('master')
+ tree = self.make_branch_and_tree('tree')
+ try:
+ tree.branch.bind(master)
+ except errors.UpgradeRequired:
+ # older format.
+ return
+ master.bzrdir.transport.put_bytes('branch-format', 'garbage')
+ del master
+ # check its corrupted.
+ self.assertRaises(errors.UnknownFormatError,
+ controldir.ControlDir.open,
+ 'master')
+ tree.commit('foo', rev_id='foo', local=True)
+
+ def test_local_commit_does_not_push_to_master(self):
+ # a --local commit does not require access to the master branch
+ # at all, or even for it to exist.
+ # we test that even when its available it does not push to it.
+ master = self.make_branch('master')
+ tree = self.make_branch_and_tree('tree')
+ try:
+ tree.branch.bind(master)
+ except errors.UpgradeRequired:
+ # older format.
+ return
+ tree.commit('foo', rev_id='foo', local=True)
+ self.assertFalse(master.repository.has_revision('foo'))
+ self.assertEqual(_mod_revision.NULL_REVISION,
+ (_mod_revision.ensure_null(master.last_revision())))
+
+ def test_record_initial_ghost(self):
+ """The working tree needs to record ghosts during commit."""
+ wt = self.make_branch_and_tree('.')
+ wt.set_parent_ids(['non:existent@rev--ision--0--2'],
+ allow_leftmost_as_ghost=True)
+ rev_id = wt.commit('commit against a ghost first parent.')
+ rev = wt.branch.repository.get_revision(rev_id)
+ self.assertEqual(rev.parent_ids, ['non:existent@rev--ision--0--2'])
+ # parent_sha1s is not populated now, WTF. rbc 20051003
+ self.assertEqual(len(rev.parent_sha1s), 0)
+
+ def test_record_two_ghosts(self):
+ """The working tree should preserve all the parents during commit."""
+ wt = self.make_branch_and_tree('.')
+ wt.set_parent_ids([
+ 'foo@azkhazan-123123-abcabc',
+ 'wibble@fofof--20050401--1928390812',
+ ],
+ allow_leftmost_as_ghost=True)
+ rev_id = wt.commit("commit from ghost base with one merge")
+ # the revision should have been committed with two parents
+ rev = wt.branch.repository.get_revision(rev_id)
+ self.assertEqual(['foo@azkhazan-123123-abcabc',
+ 'wibble@fofof--20050401--1928390812'],
+ rev.parent_ids)
+
+ def test_commit_deleted_subtree_and_files_updates_workingtree(self):
+ """The working trees inventory may be adjusted by commit."""
+ wt = self.make_branch_and_tree('.')
+ wt.lock_write()
+ self.build_tree(['a', 'b/', 'b/c', 'd'])
+ wt.add(['a', 'b', 'b/c', 'd'], ['a-id', 'b-id', 'c-id', 'd-id'])
+ this_dir = self.get_transport()
+ this_dir.delete_tree('b')
+ this_dir.delete('d')
+ # now we have a tree with a through d in the inventory, but only
+ # a present on disk. After commit b-id, c-id and d-id should be
+ # missing from the inventory, within the same tree transaction.
+ wt.commit('commit stuff')
+ self.assertTrue(wt.has_id('a-id'))
+ self.assertFalse(wt.has_or_had_id('b-id'))
+ self.assertFalse(wt.has_or_had_id('c-id'))
+ self.assertFalse(wt.has_or_had_id('d-id'))
+ self.assertTrue(wt.has_filename('a'))
+ self.assertFalse(wt.has_filename('b'))
+ self.assertFalse(wt.has_filename('b/c'))
+ self.assertFalse(wt.has_filename('d'))
+ wt.unlock()
+ # the changes should have persisted to disk - reopen the workingtree
+ # to be sure.
+ wt = wt.bzrdir.open_workingtree()
+ wt.lock_read()
+ self.assertTrue(wt.has_id('a-id'))
+ self.assertFalse(wt.has_or_had_id('b-id'))
+ self.assertFalse(wt.has_or_had_id('c-id'))
+ self.assertFalse(wt.has_or_had_id('d-id'))
+ self.assertTrue(wt.has_filename('a'))
+ self.assertFalse(wt.has_filename('b'))
+ self.assertFalse(wt.has_filename('b/c'))
+ self.assertFalse(wt.has_filename('d'))
+ wt.unlock()
+
+ def test_commit_deleted_subtree_with_removed(self):
+ wt = self.make_branch_and_tree('.')
+ self.build_tree(['a', 'b/', 'b/c', 'd'])
+ wt.add(['a', 'b', 'b/c'], ['a-id', 'b-id', 'c-id'])
+ wt.commit('first')
+ wt.remove('b/c')
+ this_dir = self.get_transport()
+ this_dir.delete_tree('b')
+ wt.lock_write()
+ wt.commit('commit deleted rename')
+ self.assertTrue(wt.has_id('a-id'))
+ self.assertFalse(wt.has_or_had_id('b-id'))
+ self.assertFalse(wt.has_or_had_id('c-id'))
+ self.assertTrue(wt.has_filename('a'))
+ self.assertFalse(wt.has_filename('b'))
+ self.assertFalse(wt.has_filename('b/c'))
+ wt.unlock()
+
+ def test_commit_move_new(self):
+ wt = self.make_branch_and_tree('first')
+ wt.commit('first')
+ wt2 = wt.bzrdir.sprout('second').open_workingtree()
+ self.build_tree(['second/name1'])
+ wt2.add('name1', 'name1-id')
+ wt2.commit('second')
+ wt.merge_from_branch(wt2.branch)
+ wt.rename_one('name1', 'name2')
+ wt.commit('third')
+ wt.path2id('name1-id')
+
+ def test_nested_commit(self):
+ """Commit in multiply-nested trees"""
+ tree = self.make_branch_and_tree('.')
+ if not tree.supports_tree_reference():
+ # inapplicable test.
+ return
+ subtree = self.make_branch_and_tree('subtree')
+ subsubtree = self.make_branch_and_tree('subtree/subtree')
+ subtree.add(['subtree'])
+ tree.add(['subtree'])
+ # use allow_pointless=False to ensure that the deepest tree, which
+ # has no commits made to it, does not get a pointless commit.
+ rev_id = tree.commit('added reference', allow_pointless=False)
+ tree.lock_read()
+ self.addCleanup(tree.unlock)
+ # the deepest subtree has not changed, so no commit should take place.
+ self.assertEqual('null:', subsubtree.last_revision())
+ # the intermediate tree should have committed a pointer to the current
+ # subtree revision.
+ sub_basis = subtree.basis_tree()
+ sub_basis.lock_read()
+ self.addCleanup(sub_basis.unlock)
+ self.assertEqual(subsubtree.last_revision(),
+ sub_basis.get_reference_revision(sub_basis.path2id('subtree')))
+ # the intermediate tree has changed, so should have had a commit
+ # take place.
+ self.assertNotEqual(None, subtree.last_revision())
+ # the outer tree should have committed a pointer to the current
+ # subtree revision.
+ basis = tree.basis_tree()
+ basis.lock_read()
+ self.addCleanup(basis.unlock)
+ self.assertEqual(subtree.last_revision(),
+ basis.get_reference_revision(basis.path2id('subtree')))
+ # the outer tree must have have changed too.
+ self.assertNotEqual(None, rev_id)
+
+ def test_nested_commit_second_commit_detects_changes(self):
+ """Commit with a nested tree picks up the correct child revid."""
+ tree = self.make_branch_and_tree('.')
+ if not tree.supports_tree_reference():
+ # inapplicable test.
+ return
+ subtree = self.make_branch_and_tree('subtree')
+ tree.add(['subtree'])
+ self.build_tree(['subtree/file'])
+ subtree.add(['file'], ['file-id'])
+ rev_id = tree.commit('added reference', allow_pointless=False)
+ tree.get_reference_revision(tree.path2id('subtree'))
+ child_revid = subtree.last_revision()
+ # now change the child tree
+ self.build_tree_contents([('subtree/file', 'new-content')])
+ # and commit in the parent should commit the child and grab its revid,
+ # we test with allow_pointless=False here so that we are simulating
+ # what users will see.
+ rev_id2 = tree.commit('changed subtree only', allow_pointless=False)
+ # the child tree has changed, so should have had a commit
+ # take place.
+ self.assertNotEqual(None, subtree.last_revision())
+ self.assertNotEqual(child_revid, subtree.last_revision())
+ # the outer tree should have committed a pointer to the current
+ # subtree revision.
+ basis = tree.basis_tree()
+ basis.lock_read()
+ self.addCleanup(basis.unlock)
+ self.assertEqual(subtree.last_revision(),
+ basis.get_reference_revision(basis.path2id('subtree')))
+ self.assertNotEqual(rev_id, rev_id2)
+
+ def test_nested_pointless_commits_are_pointless(self):
+ tree = self.make_branch_and_tree('.')
+ if not tree.supports_tree_reference():
+ # inapplicable test.
+ return
+ subtree = self.make_branch_and_tree('subtree')
+ tree.add(['subtree'])
+ # record the reference.
+ rev_id = tree.commit('added reference')
+ child_revid = subtree.last_revision()
+ # now do a no-op commit with allow_pointless=False
+ self.assertRaises(errors.PointlessCommit, tree.commit, '',
+ allow_pointless=False)
+ self.assertEqual(child_revid, subtree.last_revision())
+ self.assertEqual(rev_id, tree.last_revision())
+
+
+class TestCommitProgress(TestCaseWithWorkingTree):
+
+ def setUp(self):
+ super(TestCommitProgress, self).setUp()
+ ui.ui_factory = ProgressRecordingUIFactory()
+
+ def test_commit_progress_steps(self):
+ # during commit we one progress update for every entry in the
+ # inventory, and then one for the inventory, and one for the
+ # inventory, and one for the revision insertions.
+ # first we need a test commit to do. Lets setup a branch with
+ # 3 files, and alter one in a selected-file commit. This exercises
+ # a number of cases quickly. We should also test things like
+ # selective commits which excludes newly added files.
+ tree = self.make_branch_and_tree('.')
+ self.build_tree(['a', 'b', 'c'])
+ tree.add(['a', 'b', 'c'])
+ tree.commit('first post')
+ f = file('b', 'wt')
+ f.write('new content')
+ f.close()
+ # set a progress bar that captures the calls so we can see what is
+ # emitted
+ factory = ProgressRecordingUIFactory()
+ ui.ui_factory = factory
+ # TODO RBC 20060421 it would be nice to merge the reporter output
+ # into the factory for this test - just make the test ui factory
+ # pun as a reporter. Then we can check the ordering is right.
+ tree.commit('second post', specific_files=['b'])
+ # 5 steps, the first of which is reported 2 times, once per dir
+ self.assertEqual(
+ [('update', 1, 5, 'Collecting changes [0] - Stage'),
+ ('update', 1, 5, 'Collecting changes [1] - Stage'),
+ ('update', 2, 5, 'Saving data locally - Stage'),
+ ('update', 3, 5, 'Running pre_commit hooks - Stage'),
+ ('update', 4, 5, 'Updating the working tree - Stage'),
+ ('update', 5, 5, 'Running post_commit hooks - Stage')],
+ factory._calls
+ )
+
+ def test_commit_progress_shows_post_hook_names(self):
+ tree = self.make_branch_and_tree('.')
+ # set a progress bar that captures the calls so we can see what is
+ # emitted
+ factory = ProgressRecordingUIFactory()
+ ui.ui_factory = factory
+ def a_hook(_, _2, _3, _4, _5, _6):
+ pass
+ branch.Branch.hooks.install_named_hook('post_commit', a_hook,
+ 'hook name')
+ tree.commit('first post')
+ self.assertEqual(
+ [('update', 1, 5, 'Collecting changes [0] - Stage'),
+ ('update', 1, 5, 'Collecting changes [1] - Stage'),
+ ('update', 2, 5, 'Saving data locally - Stage'),
+ ('update', 3, 5, 'Running pre_commit hooks - Stage'),
+ ('update', 4, 5, 'Updating the working tree - Stage'),
+ ('update', 5, 5, 'Running post_commit hooks - Stage'),
+ ('update', 5, 5, 'Running post_commit hooks [hook name] - Stage'),
+ ],
+ factory._calls
+ )
+
+ def test_commit_progress_shows_pre_hook_names(self):
+ tree = self.make_branch_and_tree('.')
+ # set a progress bar that captures the calls so we can see what is
+ # emitted
+ factory = ProgressRecordingUIFactory()
+ ui.ui_factory = factory
+ def a_hook(_, _2, _3, _4, _5, _6, _7, _8):
+ pass
+ branch.Branch.hooks.install_named_hook('pre_commit', a_hook,
+ 'hook name')
+ tree.commit('first post')
+ self.assertEqual(
+ [('update', 1, 5, 'Collecting changes [0] - Stage'),
+ ('update', 1, 5, 'Collecting changes [1] - Stage'),
+ ('update', 2, 5, 'Saving data locally - Stage'),
+ ('update', 3, 5, 'Running pre_commit hooks - Stage'),
+ ('update', 3, 5, 'Running pre_commit hooks [hook name] - Stage'),
+ ('update', 4, 5, 'Updating the working tree - Stage'),
+ ('update', 5, 5, 'Running post_commit hooks - Stage'),
+ ],
+ factory._calls
+ )
+
+ def test_start_commit_hook(self):
+ """Make sure a start commit hook can modify the tree that is
+ committed."""
+ def start_commit_hook_adds_file(tree):
+ with open(tree.abspath("newfile"), 'w') as f: f.write("data")
+ tree.add(["newfile"])
+ def restoreDefaults():
+ mutabletree.MutableTree.hooks['start_commit'] = []
+ self.addCleanup(restoreDefaults)
+ tree = self.make_branch_and_tree('.')
+ mutabletree.MutableTree.hooks.install_named_hook(
+ 'start_commit',
+ start_commit_hook_adds_file,
+ None)
+ revid = tree.commit('first post')
+ committed_tree = tree.basis_tree()
+ self.assertTrue(committed_tree.has_filename("newfile"))
+
+ def test_post_commit_hook(self):
+ """Make sure a post_commit hook is called after a commit."""
+ def post_commit_hook_test_params(params):
+ self.assertTrue(isinstance(params,
+ mutabletree.PostCommitHookParams))
+ self.assertTrue(isinstance(params.mutable_tree,
+ mutabletree.MutableTree))
+ with open(tree.abspath("newfile"), 'w') as f: f.write("data")
+ params.mutable_tree.add(["newfile"])
+ tree = self.make_branch_and_tree('.')
+ mutabletree.MutableTree.hooks.install_named_hook(
+ 'post_commit',
+ post_commit_hook_test_params,
+ None)
+ self.assertFalse(tree.has_filename("newfile"))
+ revid = tree.commit('first post')
+ self.assertTrue(tree.has_filename("newfile"))
+ committed_tree = tree.basis_tree()
+ self.assertFalse(committed_tree.has_filename("newfile"))
diff --git a/bzrlib/tests/per_workingtree/test_content_filters.py b/bzrlib/tests/per_workingtree/test_content_filters.py
new file mode 100644
index 0000000..afa7591
--- /dev/null
+++ b/bzrlib/tests/per_workingtree/test_content_filters.py
@@ -0,0 +1,377 @@
+# Copyright (C) 2008, 2009, 2010 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""Tests for content filtering conformance"""
+
+import os
+
+from bzrlib.controldir import ControlDir
+from bzrlib.filters import ContentFilter
+from bzrlib.switch import switch
+from bzrlib.workingtree import WorkingTree
+from bzrlib.tests.per_workingtree import TestCaseWithWorkingTree
+
+
+def _converter_helper(chunks, fn):
+ result = []
+ for chunk in chunks:
+ result.append(getattr(chunk, fn)())
+ return iter(result)
+
+
+def _swapcase(chunks, context=None):
+ """A converter that swaps the case of text."""
+ return _converter_helper(chunks, 'swapcase')
+
+
+def _uppercase(chunks, context=None):
+ """A converter that converts text to uppercase."""
+ return _converter_helper(chunks, 'upper')
+
+
+def _lowercase(chunks, context=None):
+ """A converter that converts text to lowercase."""
+ return _converter_helper(chunks, 'lower')
+
+
+_trailer_string = '\nend string\n'
+
+
+def _append_text(chunks, context=None):
+ """A content filter that appends a string to the end of the file.
+
+ This tests filters that change the length."""
+ return chunks + [_trailer_string]
+
+
+def _remove_appended_text(chunks, context=None):
+ """Remove the appended text."""
+
+ text = ''.join(chunks)
+ if text.endswith(_trailer_string):
+ text = text[:-len(_trailer_string)]
+ return [text]
+
+
+class TestWorkingTreeWithContentFilters(TestCaseWithWorkingTree):
+
+ def create_cf_tree(self, txt_reader, txt_writer, dir='.'):
+ tree = self.make_branch_and_tree(dir)
+ def _content_filter_stack(path=None, file_id=None):
+ if path.endswith('.txt'):
+ return [ContentFilter(txt_reader, txt_writer)]
+ else:
+ return []
+ tree._content_filter_stack = _content_filter_stack
+ self.build_tree_contents([
+ (dir + '/file1.txt', 'Foo Txt'),
+ (dir + '/file2.bin', 'Foo Bin')])
+ tree.add(['file1.txt', 'file2.bin'])
+ tree.commit('commit raw content')
+ txt_fileid = tree.path2id('file1.txt')
+ bin_fileid = tree.path2id('file2.bin')
+ return tree, txt_fileid, bin_fileid
+
+ def create_cf_tree_with_two_revisions(self, txt_reader, txt_writer,
+ dir='.'):
+ tree = self.make_branch_and_tree(dir)
+ def _content_filter_stack(path=None, file_id=None):
+ if path.endswith('.txt'):
+ return [ContentFilter(txt_reader, txt_writer)]
+ else:
+ return []
+ tree._content_filter_stack = _content_filter_stack
+ self.build_tree_contents([
+ (dir + '/file1.txt', 'Foo Txt'),
+ (dir + '/file2.bin', 'Foo Bin'),
+ (dir + '/file3.txt', 'Bar Txt'),
+ ])
+ tree.add(['file1.txt', 'file2.bin', 'file3.txt'])
+ tree.commit('commit raw content')
+ fileid_1 = tree.path2id('file1.txt')
+ fileid_2 = tree.path2id('file2.bin')
+ fileid_3 = tree.path2id('file3.txt')
+ # Commit another revision with various changes. We make sure
+ # the change includes a modification, an addition and a deletion.
+ # Renames are more complex and need a separate set of tests later.
+ self.build_tree_contents([
+ (dir + '/file1.txt', 'Foo ROCKS!'),
+ (dir + '/file4.txt', 'Hello World'),
+ ])
+ tree.add(['file4.txt'])
+ tree.remove(['file3.txt'], keep_files=False)
+ tree.commit("change, add and rename stuff")
+ fileid_4 = tree.path2id('file4.txt')
+ return tree, fileid_1, fileid_2, fileid_3, fileid_4
+
+ def patch_in_content_filter(self):
+ # Patch in a custom, symmetric content filter stack. It's pretty gross
+ # that we need to monkey-patch a class method to do this, bit it's
+ # the easiest way currently given we don't have easy access to the
+ # WorkingTree after it is created but before the filter stack is used
+ # to populate content.
+ def new_stack(tree, path=None, file_id=None):
+ if path.endswith('.txt'):
+ return [ContentFilter(_swapcase, _swapcase)]
+ else:
+ return []
+ self.overrideAttr(WorkingTree, '_content_filter_stack', new_stack)
+
+ def assert_basis_content(self, expected_content, branch, file_id):
+ # Note: We need to use try/finally here instead of addCleanup()
+ # as the latter leaves the read lock in place too long
+ basis = branch.basis_tree()
+ basis.lock_read()
+ try:
+ self.assertEqual(expected_content, basis.get_file_text(file_id))
+ finally:
+ basis.unlock()
+
+ def test_symmetric_content_filtering(self):
+ # test handling when read then write gives back the initial content
+ tree, txt_fileid, bin_fileid = self.create_cf_tree(
+ txt_reader=_swapcase, txt_writer=_swapcase)
+ # Check that the basis tree has the expected content
+ basis = tree.basis_tree()
+ basis.lock_read()
+ self.addCleanup(basis.unlock)
+ if tree.supports_content_filtering():
+ expected = "fOO tXT"
+ else:
+ expected = "Foo Txt"
+ self.assertEqual(expected, basis.get_file_text(txt_fileid))
+ self.assertEqual('Foo Bin', basis.get_file_text(bin_fileid))
+ # Check that the working tree has the original content
+ tree.lock_read()
+ self.addCleanup(tree.unlock)
+ self.assertEqual('Foo Txt', tree.get_file(txt_fileid,
+ filtered=False).read())
+ self.assertEqual('Foo Bin', tree.get_file(bin_fileid,
+ filtered=False).read())
+
+ def test_readonly_content_filtering(self):
+ # test handling with a read filter but no write filter
+ tree, txt_fileid, bin_fileid = self.create_cf_tree(
+ txt_reader=_uppercase, txt_writer=None)
+ # Check that the basis tree has the expected content
+ basis = tree.basis_tree()
+ basis.lock_read()
+ self.addCleanup(basis.unlock)
+ if tree.supports_content_filtering():
+ expected = "FOO TXT"
+ else:
+ expected = "Foo Txt"
+ self.assertEqual(expected, basis.get_file_text(txt_fileid))
+ self.assertEqual('Foo Bin', basis.get_file_text(bin_fileid))
+ # We expect the workingtree content to be unchanged (for now at least)
+ tree.lock_read()
+ self.addCleanup(tree.unlock)
+ self.assertEqual('Foo Txt', tree.get_file(txt_fileid,
+ filtered=False).read())
+ self.assertEqual('Foo Bin', tree.get_file(bin_fileid,
+ filtered=False).read())
+
+ def test_branch_source_filtered_target_not(self):
+ # Create a source branch with content filtering
+ source, txt_fileid, bin_fileid = self.create_cf_tree(
+ txt_reader=_uppercase, txt_writer=_lowercase, dir='source')
+ if not source.supports_content_filtering():
+ return
+ self.assertFileEqual("Foo Txt", 'source/file1.txt')
+ self.assert_basis_content("FOO TXT", source, txt_fileid)
+
+ # Now branch it
+ self.run_bzr('branch source target')
+ target = WorkingTree.open('target')
+ # Even though the content in source and target are different
+ # due to different filters, iter_changes should be clean
+ self.assertFileEqual("FOO TXT", 'target/file1.txt')
+ changes = target.changes_from(source.basis_tree())
+ self.assertFalse(changes.has_changed())
+
+ def test_branch_source_not_filtered_target_is(self):
+ # Create a source branch with content filtering
+ source, txt_fileid, bin_fileid = self.create_cf_tree(
+ txt_reader=None, txt_writer=None, dir='source')
+ if not source.supports_content_filtering():
+ return
+ self.assertFileEqual("Foo Txt", 'source/file1.txt')
+ self.assert_basis_content("Foo Txt", source, txt_fileid)
+
+ # Now patch in content filtering and branch the source
+ self.patch_in_content_filter()
+ self.run_bzr('branch source target')
+ target = WorkingTree.open('target')
+ # Even though the content in source and target are different
+ # due to different filters, iter_changes should be clean
+ self.assertFileEqual("fOO tXT", 'target/file1.txt')
+ changes = target.changes_from(source.basis_tree())
+ self.assertFalse(changes.has_changed())
+
+ def test_path_content_summary(self):
+ """path_content_summary should always talk about the canonical form."""
+ # see https://bugs.launchpad.net/bzr/+bug/415508
+ #
+ # set up a tree where the canonical form has a string added to the
+ # end
+ source, txt_fileid, bin_fileid = self.create_cf_tree(
+ txt_reader=_append_text,
+ txt_writer=_remove_appended_text,
+ dir='source')
+ if not source.supports_content_filtering():
+ return
+ source.lock_read()
+ self.addCleanup(source.unlock)
+
+ expected_canonical_form = 'Foo Txt\nend string\n'
+ self.assertEquals(source.get_file(txt_fileid, filtered=True).read(),
+ expected_canonical_form)
+ self.assertEquals(source.get_file(txt_fileid, filtered=False).read(),
+ 'Foo Txt')
+
+ # results are: kind, size, executable, sha1_or_link_target
+ result = source.path_content_summary('file1.txt')
+
+ self.assertEquals(result,
+ ('file', None, False, None))
+
+ # we could give back the length of the canonical form, but in general
+ # that will be expensive to compute, so it's acceptable to just return
+ # None.
+
+ def test_content_filtering_applied_on_pull(self):
+ # Create a source branch with two revisions
+ source, fileid_1, fileid_2, fileid_3, fileid_4 = \
+ self.create_cf_tree_with_two_revisions(txt_reader=None,
+ txt_writer=None, dir='source')
+ if not source.supports_content_filtering():
+ return
+ self.assertFileEqual("Foo ROCKS!", 'source/file1.txt')
+ self.assert_basis_content("Foo ROCKS!", source, fileid_1)
+
+ # Now patch in content filtering and branch from revision 1
+ self.patch_in_content_filter()
+ self.run_bzr('branch -r1 source target')
+ target = WorkingTree.open('target')
+ self.assert_basis_content("Foo Txt", target, fileid_1)
+ self.assertFileEqual("fOO tXT", 'target/file1.txt')
+ self.assert_basis_content("Foo Bin", target, fileid_2)
+ self.assertFileEqual("Foo Bin", 'target/file2.bin')
+ self.assert_basis_content("Bar Txt", target, fileid_3)
+ self.assertFileEqual("bAR tXT", 'target/file3.txt')
+
+ # Pull the latter change and check the target tree is updated
+ self.run_bzr('pull -d target')
+ self.assert_basis_content("Foo ROCKS!", target, fileid_1)
+ self.assertFileEqual("fOO rocks!", 'target/file1.txt')
+ self.assert_basis_content("Foo Bin", target, fileid_2)
+ self.assert_basis_content("Hello World", target, fileid_4)
+ self.assertFileEqual("hELLO wORLD", 'target/file4.txt')
+
+ def test_content_filtering_applied_on_merge(self):
+ # Create a source branch with two revisions
+ source, fileid_1, fileid_2, fileid_3, fileid_4 = \
+ self.create_cf_tree_with_two_revisions(txt_reader=None,
+ txt_writer=None, dir='source')
+ if not source.supports_content_filtering():
+ return
+ self.assert_basis_content("Foo ROCKS!", source, fileid_1)
+ self.assertFileEqual("Foo ROCKS!", 'source/file1.txt')
+ self.assert_basis_content("Foo Bin", source, fileid_2)
+ self.assert_basis_content("Hello World", source, fileid_4)
+ self.assertFileEqual("Hello World", 'source/file4.txt')
+
+ # Now patch in content filtering and branch from revision 1
+ self.patch_in_content_filter()
+ self.run_bzr('branch -r1 source target')
+ target = WorkingTree.open('target')
+ self.assert_basis_content("Foo Txt", target, fileid_1)
+ self.assertFileEqual("fOO tXT", 'target/file1.txt')
+ self.assertFileEqual("Foo Bin", 'target/file2.bin')
+ self.assertFileEqual("bAR tXT", 'target/file3.txt')
+
+ # Merge the latter change and check the target tree is updated
+ self.run_bzr('merge -d target source')
+ self.assertFileEqual("fOO rocks!", 'target/file1.txt')
+ self.assertFileEqual("hELLO wORLD", 'target/file4.txt')
+
+ # Commit the merge and check the right content is stored
+ target.commit("merge file1.txt changes from source")
+ self.assert_basis_content("Foo ROCKS!", target, fileid_1)
+ self.assert_basis_content("Hello World", target, fileid_4)
+
+ def test_content_filtering_applied_on_switch(self):
+ # Create a source branch with two revisions
+ source, fileid_1, fileid_2, fileid_3, fileid_4 = \
+ self.create_cf_tree_with_two_revisions(txt_reader=None,
+ txt_writer=None, dir='branch-a')
+ if not source.supports_content_filtering():
+ return
+
+ # Now patch in content filtering and branch from revision 1
+ self.patch_in_content_filter()
+ self.run_bzr('branch -r1 branch-a branch-b')
+
+ # Now create a lightweight checkout referring to branch-b
+ self.run_bzr('checkout --lightweight branch-b checkout')
+ self.assertFileEqual("fOO tXT", 'checkout/file1.txt')
+
+ # Switch it to branch-b and check the tree is updated
+ checkout_control_dir = ControlDir.open_containing('checkout')[0]
+ switch(checkout_control_dir, source.branch)
+ self.assertFileEqual("fOO rocks!", 'checkout/file1.txt')
+ self.assertFileEqual("hELLO wORLD", 'checkout/file4.txt')
+
+ def test_content_filtering_applied_on_revert_delete(self):
+ # Create a source branch with content filtering
+ source, txt_fileid, bin_fileid = self.create_cf_tree(
+ txt_reader=_uppercase, txt_writer=_lowercase, dir='source')
+ if not source.supports_content_filtering():
+ return
+ self.assertFileEqual("Foo Txt", 'source/file1.txt')
+ self.assert_basis_content("FOO TXT", source, txt_fileid)
+
+ # Now delete the file, revert it and check the content
+ os.unlink('source/file1.txt')
+ self.assertFalse(os.path.exists('source/file1.txt'))
+ source.revert(['file1.txt'])
+ self.assertTrue(os.path.exists('source/file1.txt'))
+ # Note: we don't get back exactly what was in the tree
+ # previously because lower(upper(text)) is a lossy transformation
+ self.assertFileEqual("foo txt", 'source/file1.txt')
+
+ def test_content_filtering_applied_on_revert_rename(self):
+ # Create a source branch with content filtering
+ source, txt_fileid, bin_fileid = self.create_cf_tree(
+ txt_reader=_uppercase, txt_writer=_lowercase, dir='source')
+ if not source.supports_content_filtering():
+ return
+ self.assertFileEqual("Foo Txt", 'source/file1.txt')
+ self.assert_basis_content("FOO TXT", source, txt_fileid)
+
+ # Now modify & rename a file, revert it and check the content
+ self.build_tree_contents([
+ ('source/file1.txt', 'Foo Txt with new content')])
+ source.rename_one('file1.txt', 'file1.bin')
+ self.assertTrue(os.path.exists('source/file1.bin'))
+ self.assertFalse(os.path.exists('source/file1.txt'))
+ self.assertFileEqual("Foo Txt with new content", 'source/file1.bin')
+ source.revert(['file1.bin'])
+ self.assertFalse(os.path.exists('source/file1.bin'))
+ self.assertTrue(os.path.exists('source/file1.txt'))
+ # Note: we don't get back exactly what was in the tree
+ # previously because lower(upper(text)) is a lossy transformation
+ self.assertFileEqual("foo txt", 'source/file1.txt')
diff --git a/bzrlib/tests/per_workingtree/test_eol_conversion.py b/bzrlib/tests/per_workingtree/test_eol_conversion.py
new file mode 100644
index 0000000..271b0d2
--- /dev/null
+++ b/bzrlib/tests/per_workingtree/test_eol_conversion.py
@@ -0,0 +1,335 @@
+# Copyright (C) 2009 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""Tests for eol conversion."""
+
+import sys
+from cStringIO import StringIO
+
+from bzrlib import rules, status
+from bzrlib.tests import TestSkipped
+from bzrlib.tests.per_workingtree import TestCaseWithWorkingTree
+from bzrlib.workingtree import WorkingTree
+
+
+# Sample files
+_sample_text = """hello\nworld\r\n"""
+_sample_text_on_win = """hello\r\nworld\r\n"""
+_sample_text_on_unix = """hello\nworld\n"""
+_sample_binary = """hello\nworld\r\n\x00"""
+_sample_clean_lf = _sample_text_on_unix
+_sample_clean_crlf = _sample_text_on_win
+
+
+# Lists of formats for each storage policy
+_LF_IN_REPO = ['native', 'lf', 'crlf']
+_CRLF_IN_REPO = [ '%s-with-crlf-in-repo' % (f,) for f in _LF_IN_REPO]
+
+
+class TestEolConversion(TestCaseWithWorkingTree):
+
+ def setUp(self):
+ # formats that don't support content filtering can skip these tests
+ fmt = self.workingtree_format
+ f = getattr(fmt, 'supports_content_filtering')
+ if f is None:
+ raise TestSkipped("format %s doesn't declare whether it "
+ "supports content filtering, assuming not" % fmt)
+ if not f():
+ raise TestSkipped("format %s doesn't support content filtering"
+ % fmt)
+ TestCaseWithWorkingTree.setUp(self)
+
+ def patch_rules_searcher(self, eol):
+ """Patch in a custom rules searcher with a given eol setting."""
+ if eol is None:
+ WorkingTree._get_rules_searcher = self.real_rules_searcher
+ else:
+ def custom_eol_rules_searcher(tree, default_searcher):
+ return rules._IniBasedRulesSearcher([
+ '[name *]\n',
+ 'eol=%s\n' % eol,
+ ])
+ WorkingTree._get_rules_searcher = custom_eol_rules_searcher
+
+ def prepare_tree(self, content, eol=None):
+ """Prepare a working tree and commit some content."""
+ self.real_rules_searcher = self.overrideAttr(
+ WorkingTree, '_get_rules_searcher')
+ self.patch_rules_searcher(eol)
+ t = self.make_branch_and_tree('tree1')
+ self.build_tree_contents([('tree1/file1', content)])
+ t.add('file1', 'file1-id')
+ t.commit("add file1")
+ basis = t.basis_tree()
+ basis.lock_read()
+ self.addCleanup(basis.unlock)
+ return t, basis
+
+ def assertNewContentForSetting(self, wt, eol, expected_unix,
+ expected_win, roundtrip):
+ """Clone a working tree and check the convenience content.
+
+ If roundtrip is True, status and commit should see no changes.
+ """
+ if expected_win is None:
+ expected_win = expected_unix
+ self.patch_rules_searcher(eol)
+ wt2 = wt.bzrdir.sprout('tree-%s' % eol).open_workingtree()
+ # To see exactly what got written to disk, we need an unfiltered read
+ content = wt2.get_file('file1-id', filtered=False).read()
+ if sys.platform == 'win32':
+ self.assertEqual(expected_win, content)
+ else:
+ self.assertEqual(expected_unix, content)
+ # Confirm that status thinks nothing has changed if the text roundtrips
+ if roundtrip:
+ status_io = StringIO()
+ status.show_tree_status(wt2, to_file=status_io)
+ self.assertEqual('', status_io.getvalue())
+
+ def assertContent(self, wt, basis, expected_raw, expected_unix,
+ expected_win, roundtrip_to=None):
+ """Check the committed content and content in cloned trees.
+
+ :param roundtrip_to: the set of formats (excluding exact) we
+ can round-trip to or None for all
+ """
+ basis_content = basis.get_file('file1-id').read()
+ self.assertEqual(expected_raw, basis_content)
+
+ # No setting and exact should always roundtrip
+ self.assertNewContentForSetting(wt, None,
+ expected_raw, expected_raw, roundtrip=True)
+ self.assertNewContentForSetting(wt, 'exact',
+ expected_raw, expected_raw, roundtrip=True)
+
+ # Roundtripping is otherwise dependent on whether the original
+ # text is clean - mixed line endings will prevent it. It also
+ # depends on whether the format in the repository is being changed.
+ if roundtrip_to is None:
+ roundtrip_to = _LF_IN_REPO + _CRLF_IN_REPO
+ self.assertNewContentForSetting(wt, 'native',
+ expected_unix, expected_win, 'native' in roundtrip_to)
+ self.assertNewContentForSetting(wt, 'lf',
+ expected_unix, expected_unix, 'lf' in roundtrip_to)
+ self.assertNewContentForSetting(wt, 'crlf',
+ expected_win, expected_win, 'crlf' in roundtrip_to)
+ self.assertNewContentForSetting(wt, 'native-with-crlf-in-repo',
+ expected_unix, expected_win,
+ 'native-with-crlf-in-repo' in roundtrip_to)
+ self.assertNewContentForSetting(wt, 'lf-with-crlf-in-repo',
+ expected_unix, expected_unix,
+ 'lf-with-crlf-in-repo' in roundtrip_to)
+ self.assertNewContentForSetting(wt, 'crlf-with-crlf-in-repo',
+ expected_win, expected_win,
+ 'crlf-with-crlf-in-repo' in roundtrip_to)
+
+ # Test binary files. These always roundtrip.
+
+ def test_eol_no_rules_binary(self):
+ wt, basis = self.prepare_tree(_sample_binary)
+ self.assertContent(wt, basis, _sample_binary, _sample_binary,
+ _sample_binary)
+
+ def test_eol_exact_binary(self):
+ wt, basis = self.prepare_tree(_sample_binary, eol='exact')
+ self.assertContent(wt, basis, _sample_binary, _sample_binary,
+ _sample_binary)
+
+ def test_eol_native_binary(self):
+ wt, basis = self.prepare_tree(_sample_binary, eol='native')
+ self.assertContent(wt, basis, _sample_binary, _sample_binary,
+ _sample_binary)
+
+ def test_eol_lf_binary(self):
+ wt, basis = self.prepare_tree(_sample_binary, eol='lf')
+ self.assertContent(wt, basis, _sample_binary, _sample_binary,
+ _sample_binary)
+
+ def test_eol_crlf_binary(self):
+ wt, basis = self.prepare_tree(_sample_binary, eol='crlf')
+ self.assertContent(wt, basis, _sample_binary, _sample_binary,
+ _sample_binary)
+
+ def test_eol_native_with_crlf_in_repo_binary(self):
+ wt, basis = self.prepare_tree(_sample_binary,
+ eol='native-with-crlf-in-repo')
+ self.assertContent(wt, basis, _sample_binary, _sample_binary,
+ _sample_binary)
+
+ def test_eol_lf_with_crlf_in_repo_binary(self):
+ wt, basis = self.prepare_tree(_sample_binary,
+ eol='lf-with-crlf-in-repo')
+ self.assertContent(wt, basis, _sample_binary, _sample_binary,
+ _sample_binary)
+
+ def test_eol_crlf_with_crlf_in_repo_binary(self):
+ wt, basis = self.prepare_tree(_sample_binary,
+ eol='crlf-with-crlf-in-repo')
+ self.assertContent(wt, basis, _sample_binary, _sample_binary,
+ _sample_binary)
+
+ # Test text with mixed line endings ("dirty text").
+ # This doesn't roundtrip so status always thinks something has changed.
+
+ def test_eol_no_rules_dirty(self):
+ wt, basis = self.prepare_tree(_sample_text)
+ self.assertContent(wt, basis, _sample_text,
+ _sample_text_on_unix, _sample_text_on_win, roundtrip_to=[])
+
+ def test_eol_exact_dirty(self):
+ wt, basis = self.prepare_tree(_sample_text, eol='exact')
+ self.assertContent(wt, basis, _sample_text,
+ _sample_text_on_unix, _sample_text_on_win, roundtrip_to=[])
+
+ def test_eol_native_dirty(self):
+ wt, basis = self.prepare_tree(_sample_text, eol='native')
+ self.assertContent(wt, basis, _sample_text_on_unix,
+ _sample_text_on_unix, _sample_text_on_win, roundtrip_to=[])
+
+ def test_eol_lf_dirty(self):
+ wt, basis = self.prepare_tree(_sample_text, eol='lf')
+ self.assertContent(wt, basis, _sample_text_on_unix,
+ _sample_text_on_unix, _sample_text_on_win, roundtrip_to=[])
+
+ def test_eol_crlf_dirty(self):
+ wt, basis = self.prepare_tree(_sample_text, eol='crlf')
+ self.assertContent(wt, basis, _sample_text_on_unix,
+ _sample_text_on_unix, _sample_text_on_win, roundtrip_to=[])
+
+ def test_eol_native_with_crlf_in_repo_dirty(self):
+ wt, basis = self.prepare_tree(_sample_text,
+ eol='native-with-crlf-in-repo')
+ self.assertContent(wt, basis, _sample_text_on_win,
+ _sample_text_on_unix, _sample_text_on_win, roundtrip_to=[])
+
+ def test_eol_lf_with_crlf_in_repo_dirty(self):
+ wt, basis = self.prepare_tree(_sample_text,
+ eol='lf-with-crlf-in-repo')
+ self.assertContent(wt, basis, _sample_text_on_win,
+ _sample_text_on_unix, _sample_text_on_win, roundtrip_to=[])
+
+ def test_eol_crlf_with_crlf_in_repo_dirty(self):
+ wt, basis = self.prepare_tree(_sample_text,
+ eol='crlf-with-crlf-in-repo')
+ self.assertContent(wt, basis, _sample_text_on_win,
+ _sample_text_on_unix, _sample_text_on_win, roundtrip_to=[])
+
+ # Test text with clean line endings, either always lf or always crlf.
+ # This selectively roundtrips (based on what's stored in the repo).
+
+ def test_eol_no_rules_clean_lf(self):
+ wt, basis = self.prepare_tree(_sample_clean_lf)
+ self.assertContent(wt, basis, _sample_clean_lf,
+ _sample_text_on_unix, _sample_text_on_win,
+ roundtrip_to=_LF_IN_REPO)
+
+ def test_eol_no_rules_clean_crlf(self):
+ wt, basis = self.prepare_tree(_sample_clean_crlf)
+ self.assertContent(wt, basis, _sample_clean_crlf,
+ _sample_text_on_unix, _sample_text_on_win,
+ roundtrip_to=_CRLF_IN_REPO)
+
+ def test_eol_exact_clean_lf(self):
+ wt, basis = self.prepare_tree(_sample_clean_lf, eol='exact')
+ self.assertContent(wt, basis, _sample_clean_lf,
+ _sample_text_on_unix, _sample_text_on_win,
+ roundtrip_to=_LF_IN_REPO)
+
+ def test_eol_exact_clean_crlf(self):
+ wt, basis = self.prepare_tree(_sample_clean_crlf, eol='exact')
+ self.assertContent(wt, basis, _sample_clean_crlf,
+ _sample_text_on_unix, _sample_text_on_win,
+ roundtrip_to=_CRLF_IN_REPO)
+
+ def test_eol_native_clean_lf(self):
+ wt, basis = self.prepare_tree(_sample_clean_lf, eol='native')
+ self.assertContent(wt, basis, _sample_text_on_unix,
+ _sample_text_on_unix, _sample_text_on_win,
+ roundtrip_to=_LF_IN_REPO)
+
+ def test_eol_native_clean_crlf(self):
+ wt, basis = self.prepare_tree(_sample_clean_crlf, eol='native')
+ self.assertContent(wt, basis, _sample_text_on_unix,
+ _sample_text_on_unix, _sample_text_on_win,
+ roundtrip_to=_LF_IN_REPO)
+
+ def test_eol_lf_clean_lf(self):
+ wt, basis = self.prepare_tree(_sample_clean_lf, eol='lf')
+ self.assertContent(wt, basis, _sample_text_on_unix,
+ _sample_text_on_unix, _sample_text_on_win,
+ roundtrip_to=_LF_IN_REPO)
+
+ def test_eol_lf_clean_crlf(self):
+ wt, basis = self.prepare_tree(_sample_clean_crlf, eol='lf')
+ self.assertContent(wt, basis, _sample_text_on_unix,
+ _sample_text_on_unix, _sample_text_on_win,
+ roundtrip_to=_LF_IN_REPO)
+
+ def test_eol_crlf_clean_lf(self):
+ wt, basis = self.prepare_tree(_sample_clean_lf, eol='crlf')
+ self.assertContent(wt, basis, _sample_text_on_unix,
+ _sample_text_on_unix, _sample_text_on_win,
+ roundtrip_to=_LF_IN_REPO)
+
+ def test_eol_crlf_clean_crlf(self):
+ wt, basis = self.prepare_tree(_sample_clean_crlf, eol='crlf')
+ self.assertContent(wt, basis, _sample_text_on_unix,
+ _sample_text_on_unix, _sample_text_on_win,
+ roundtrip_to=_LF_IN_REPO)
+
+ def test_eol_native_with_crlf_in_repo_clean_lf(self):
+ wt, basis = self.prepare_tree(_sample_clean_lf,
+ eol='native-with-crlf-in-repo')
+ self.assertContent(wt, basis, _sample_text_on_win,
+ _sample_text_on_unix, _sample_text_on_win,
+ roundtrip_to=_CRLF_IN_REPO)
+
+ def test_eol_native_with_crlf_in_repo_clean_crlf(self):
+ wt, basis = self.prepare_tree(_sample_clean_crlf,
+ eol='native-with-crlf-in-repo')
+ self.assertContent(wt, basis, _sample_text_on_win,
+ _sample_text_on_unix, _sample_text_on_win,
+ roundtrip_to=_CRLF_IN_REPO)
+
+ def test_eol_lf_with_crlf_in_repo_clean_lf(self):
+ wt, basis = self.prepare_tree(_sample_clean_lf,
+ eol='lf-with-crlf-in-repo')
+ self.assertContent(wt, basis, _sample_text_on_win,
+ _sample_text_on_unix, _sample_text_on_win,
+ roundtrip_to=_CRLF_IN_REPO)
+
+ def test_eol_lf_with_crlf_in_repo_clean_crlf(self):
+ wt, basis = self.prepare_tree(_sample_clean_crlf,
+ eol='lf-with-crlf-in-repo')
+ self.assertContent(wt, basis, _sample_text_on_win,
+ _sample_text_on_unix, _sample_text_on_win,
+ roundtrip_to=_CRLF_IN_REPO)
+
+ def test_eol_crlf_with_crlf_in_repo_clean_lf(self):
+ wt, basis = self.prepare_tree(_sample_clean_lf,
+ eol='crlf-with-crlf-in-repo')
+ self.assertContent(wt, basis, _sample_text_on_win,
+ _sample_text_on_unix, _sample_text_on_win,
+ roundtrip_to=_CRLF_IN_REPO)
+
+ def test_eol_crlf_with_crlf_in_repo_clean_crlf(self):
+ wt, basis = self.prepare_tree(_sample_clean_crlf,
+ eol='crlf-with-crlf-in-repo')
+ self.assertContent(wt, basis, _sample_text_on_win,
+ _sample_text_on_unix, _sample_text_on_win,
+ roundtrip_to=_CRLF_IN_REPO)
diff --git a/bzrlib/tests/per_workingtree/test_executable.py b/bzrlib/tests/per_workingtree/test_executable.py
new file mode 100644
index 0000000..2ecef54
--- /dev/null
+++ b/bzrlib/tests/per_workingtree/test_executable.py
@@ -0,0 +1,195 @@
+# Copyright (C) 2006 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""Test the executable bit under various working tree formats."""
+
+import os
+
+from bzrlib import (
+ osutils,
+ )
+from bzrlib.transform import TreeTransform
+from bzrlib.tests.per_workingtree import TestCaseWithWorkingTree
+
+
+class TestExecutable(TestCaseWithWorkingTree):
+
+ def setUp(self):
+ super(TestExecutable, self).setUp()
+ self.a_id = "a-20051208024829-849e76f7968d7a86"
+ self.b_id = "b-20051208024829-849e76f7968d7a86"
+ wt = self.make_branch_and_tree('b1')
+ b = wt.branch
+ tt = TreeTransform(wt)
+ tt.new_file('a', tt.root, 'a test\n', self.a_id, True)
+ tt.new_file('b', tt.root, 'b test\n', self.b_id, False)
+ tt.apply()
+
+ self.wt = wt
+
+ def check_exist(self, tree):
+ """Just check that both files have the right executable bits set"""
+ tree.lock_read()
+ self.assertTrue(tree.is_executable(self.a_id),
+ "'a' lost the execute bit")
+ self.assertFalse(tree.is_executable(self.b_id),
+ "'b' gained an execute bit")
+ tree.unlock()
+
+ def check_empty(self, tree, ignore_inv=False):
+ """Check that the files are truly missing
+ :param ignore_inv: If you just delete files from a working tree
+ the inventory still shows them, so don't assert that
+ the inventory is empty, just that the tree doesn't have them
+ """
+ tree.lock_read()
+ if not ignore_inv:
+ self.assertEqual(
+ [('', tree.root_inventory.root)],
+ list(tree.root_inventory.iter_entries()))
+ self.assertFalse(tree.has_id(self.a_id))
+ self.assertFalse(tree.has_filename('a'))
+ self.assertFalse(tree.has_id(self.b_id))
+ self.assertFalse(tree.has_filename('b'))
+ tree.unlock()
+
+ def commit_and_branch(self):
+ """Commit the current tree, and create a second tree"""
+ self.wt.commit('adding a,b', rev_id='r1')
+ # Now make sure that 'bzr branch' also preserves the
+ # executable bit
+ # TODO: Maybe this should be a blackbox test
+ dir2 = self.wt.branch.bzrdir.clone('b2', revision_id='r1')
+ wt2 = dir2.open_workingtree()
+ self.assertEqual(['r1'], wt2.get_parent_ids())
+ self.assertEqual('r1', wt2.branch.last_revision())
+ return wt2
+
+ def test_01_is_executable(self):
+ """Make sure that the tree was created and has the executable bit set"""
+ self.check_exist(self.wt)
+
+ def test_02_stays_executable(self):
+ """reopen the tree and ensure it stuck."""
+ self.wt = self.wt.bzrdir.open_workingtree()
+ self.check_exist(self.wt)
+
+ def test_03_after_commit(self):
+ """Commit the change, and check the history"""
+ self.wt.commit('adding a,b', rev_id='r1')
+
+ rev_tree = self.wt.branch.repository.revision_tree('r1')
+ self.check_exist(rev_tree)
+
+ def test_04_after_removed(self):
+ """Make sure reverting removed files brings them back correctly"""
+ self.wt.commit('adding a,b', rev_id='r1')
+
+ # Make sure the entries are gone
+ os.remove('b1/a')
+ os.remove('b1/b')
+ self.check_empty(self.wt, ignore_inv=True)
+
+ # Make sure that revert is able to bring them back,
+ # and sets 'a' back to being executable
+
+ rev_tree = self.wt.branch.repository.revision_tree('r1')
+
+ self.wt.revert(['a', 'b'], rev_tree, backups=False)
+ self.check_exist(self.wt)
+
+ def test_05_removed_and_committed(self):
+ """Check that reverting to an earlier commit restores them"""
+ self.wt.commit('adding a,b', rev_id='r1')
+
+ # Now remove them again, and make sure that after a
+ # commit, they are still marked correctly
+ os.remove('b1/a')
+ os.remove('b1/b')
+ self.wt.commit('removed', rev_id='r2')
+
+ self.check_empty(self.wt)
+
+ rev_tree = self.wt.branch.repository.revision_tree('r1')
+ # Now revert back to the previous commit
+ self.wt.revert(old_tree=rev_tree, backups=False)
+
+ self.check_exist(self.wt)
+
+ def test_06_branch(self):
+ """branch b1=>b2 should preserve the executable bits"""
+ # TODO: Maybe this should be a blackbox test
+ wt2 = self.commit_and_branch()
+
+ self.check_exist(wt2)
+
+ def test_07_pull(self):
+ """Test that pull will handle bits correctly"""
+ wt2 = self.commit_and_branch()
+
+ os.remove('b1/a')
+ os.remove('b1/b')
+ self.wt.commit('removed', rev_id='r2')
+
+ # now wt2 can pull and the files should be removed
+
+ # Make sure pull will delete the files
+ wt2.pull(self.wt.branch)
+ self.assertEquals(['r2'], wt2.get_parent_ids())
+ self.assertEquals('r2', wt2.branch.last_revision())
+ self.check_empty(wt2)
+
+ # Now restore the files on the first branch and commit
+ # so that the second branch can pull the changes
+ # and make sure that the executable bit has been copied
+ rev_tree = self.wt.branch.repository.revision_tree('r1')
+ self.wt.revert(old_tree=rev_tree, backups=False)
+ self.wt.commit('resurrected', rev_id='r3')
+
+ self.check_exist(self.wt)
+
+ wt2.pull(self.wt.branch)
+ self.assertEquals(['r3'], wt2.get_parent_ids())
+ self.assertEquals('r3', wt2.branch.last_revision())
+ self.check_exist(wt2)
+
+ def test_08_no_op_revert(self):
+ """Just do a simple revert without anything changed
+
+ The bits shouldn't swap.
+ """
+ self.wt.commit('adding a,b', rev_id='r1')
+ rev_tree = self.wt.branch.repository.revision_tree('r1')
+ self.wt.revert(old_tree=rev_tree, backups=False)
+ self.check_exist(self.wt)
+
+ def test_commit_with_exec_from_basis(self):
+ self.wt._is_executable_from_path_and_stat = \
+ self.wt._is_executable_from_path_and_stat_from_basis
+ rev_id1 = self.wt.commit('one')
+ rev_tree1 = self.wt.branch.repository.revision_tree(rev_id1)
+ a_executable = rev_tree1.root_inventory[self.a_id].executable
+ b_executable = rev_tree1.root_inventory[self.b_id].executable
+ self.assertIsNot(None, a_executable)
+ self.assertTrue(a_executable)
+ self.assertIsNot(None, b_executable)
+ self.assertFalse(b_executable)
+
+ def test_use_exec_from_basis(self):
+ self.wt._supports_executable = lambda: False
+ self.addCleanup(self.wt.lock_read().unlock)
+ self.assertTrue(self.wt.is_executable(self.a_id))
+ self.assertFalse(self.wt.is_executable(self.b_id))
diff --git a/bzrlib/tests/per_workingtree/test_flush.py b/bzrlib/tests/per_workingtree/test_flush.py
new file mode 100644
index 0000000..2d6210d
--- /dev/null
+++ b/bzrlib/tests/per_workingtree/test_flush.py
@@ -0,0 +1,79 @@
+# Copyright (C) 2006 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""Tests for WorkingTree.flush."""
+
+import sys
+from bzrlib import errors
+from bzrlib.tests import TestSkipped
+from bzrlib.tests.per_workingtree import TestCaseWithWorkingTree
+
+
+class TestFlush(TestCaseWithWorkingTree):
+
+ def test_flush_fresh_tree(self):
+ tree = self.make_branch_and_tree('t1')
+ tree.lock_write()
+ try:
+ tree.flush()
+ finally:
+ tree.unlock()
+
+ def test_flush_when_inventory_is_modified(self):
+ if sys.platform == "win32":
+ raise TestSkipped("don't use oslocks on win32 in unix manner")
+ # This takes a write lock on the source tree, then opens a second copy
+ # and tries to grab a read lock. This works on Unix and is a reasonable
+ # way to detect when the file is actually written to, but it won't work
+ # (as a test) on Windows. It might be nice to instead stub out the
+ # functions used to write and that way do both less work and also be
+ # able to execute on Windows.
+ self.thisFailsStrictLockCheck()
+ # when doing a flush the inventory should be written if needed.
+ # we test that by changing the inventory (using
+ # _set_inventory for now until add etc have lazy writes of
+ # the inventory on unlock).
+ tree = self.make_branch_and_tree('tree')
+ # prepare for a series of changes that will modify the
+ # inventory
+ tree.lock_write()
+ try:
+ old_root = tree.get_root_id()
+ tree.set_root_id('new-root')
+ # to detect that the inventory is written by flush, we
+ # first check that it was not written yet.
+ reference_tree = tree.bzrdir.open_workingtree()
+ self.assertEqual(old_root, reference_tree.get_root_id())
+ # now flush the tree which should write the inventory.
+ tree.flush()
+ # and check it was written using another reference tree
+ reference_tree = tree.bzrdir.open_workingtree()
+ self.assertEqual('new-root', reference_tree.get_root_id())
+ finally:
+ tree.unlock()
+
+ def test_flush_with_read_lock_fails(self):
+ """Flush cannot be used during a read lock."""
+ tree = self.make_branch_and_tree('t1')
+ tree.lock_read()
+ try:
+ self.assertRaises(errors.NotWriteLocked, tree.flush)
+ finally:
+ tree.unlock()
+
+ def test_flush_with_no_lock_fails(self):
+ tree = self.make_branch_and_tree('t1')
+ self.assertRaises(errors.NotWriteLocked, tree.flush)
diff --git a/bzrlib/tests/per_workingtree/test_get_file_mtime.py b/bzrlib/tests/per_workingtree/test_get_file_mtime.py
new file mode 100644
index 0000000..39c152e
--- /dev/null
+++ b/bzrlib/tests/per_workingtree/test_get_file_mtime.py
@@ -0,0 +1,113 @@
+# Copyright (C) 2007 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""Test that all WorkingTree's implement get_file_mtime."""
+
+import os
+
+from bzrlib import errors
+from bzrlib.tests.per_workingtree import TestCaseWithWorkingTree
+
+
+class TestGetFileMTime(TestCaseWithWorkingTree):
+ """Test WorkingTree.get_file_mtime.
+
+ These are more involved because we need to handle files which have been
+ renamed, etc.
+ """
+
+ def make_basic_tree(self):
+ tree = self.make_branch_and_tree('tree')
+ self.build_tree(['tree/one'])
+ tree.add(['one'], ['one-id'])
+ return tree
+
+ def test_get_file_mtime(self):
+ tree = self.make_basic_tree()
+
+ st = os.lstat('tree/one')
+ tree.lock_read()
+ try:
+ mtime_file_id = tree.get_file_mtime(file_id='one-id')
+ self.assertIsInstance(mtime_file_id, (float, int))
+ self.assertAlmostEqual(st.st_mtime, mtime_file_id)
+ mtime_path = tree.get_file_mtime(file_id='one-id', path='one')
+ self.assertAlmostEqual(mtime_file_id, mtime_path)
+ finally:
+ tree.unlock()
+
+ def test_after_commit(self):
+ """Committing shouldn't change the mtime."""
+ tree = self.make_basic_tree()
+
+ st = os.lstat('tree/one')
+ tree.commit('one', rev_id='rev-1')
+
+ tree.lock_read()
+ try:
+ mtime = tree.get_file_mtime(file_id='one-id')
+ self.assertAlmostEqual(st.st_mtime, mtime)
+
+ mtime = tree.get_file_mtime(file_id='one-id', path='one')
+ self.assertAlmostEqual(st.st_mtime, mtime)
+ finally:
+ tree.unlock()
+
+ def test_get_renamed_time(self):
+ """We should handle renamed files."""
+ tree = self.make_basic_tree()
+
+ tree.rename_one('one', 'two')
+ st = os.lstat('tree/two')
+
+ tree.lock_read()
+ try:
+ mtime = tree.get_file_mtime(file_id='one-id')
+ self.assertAlmostEqual(st.st_mtime, mtime)
+ mtime = tree.get_file_mtime(file_id='one-id', path='two')
+ self.assertAlmostEqual(st.st_mtime, mtime)
+ finally:
+ tree.unlock()
+
+ def test_get_renamed_in_subdir_time(self):
+ tree = self.make_branch_and_tree('tree')
+ self.build_tree(['tree/d/', 'tree/d/a'])
+ tree.add(['d', 'd/a'], ['d-id', 'a-id'])
+ tree.commit('1', rev_id='rev-1')
+
+ tree.rename_one('d', 'e')
+
+ st = os.lstat('tree/e/a')
+ tree.lock_read()
+ try:
+ mtime = tree.get_file_mtime(file_id='a-id')
+ self.assertAlmostEqual(st.st_mtime, mtime)
+ mtime = tree.get_file_mtime(file_id='a-id', path='e/a')
+ self.assertAlmostEqual(st.st_mtime, mtime)
+ finally:
+ tree.unlock()
+
+ def test_missing(self):
+ tree = self.make_basic_tree()
+
+ os.remove('tree/one')
+ tree.lock_read()
+ try:
+ self.assertRaises(errors.FileTimestampUnavailable,
+ tree.get_file_mtime, file_id='one-id')
+ finally:
+ tree.unlock()
+
diff --git a/bzrlib/tests/per_workingtree/test_get_parent_ids.py b/bzrlib/tests/per_workingtree/test_get_parent_ids.py
new file mode 100644
index 0000000..cf4e355
--- /dev/null
+++ b/bzrlib/tests/per_workingtree/test_get_parent_ids.py
@@ -0,0 +1,54 @@
+# Copyright (C) 2006 Canonical Ltd
+# Authors: Robert Collins <robert.collins@canonical.com>
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""Tests for interface conformance of 'workingtree.get_parent_ids'"""
+
+from bzrlib.tests.per_workingtree import TestCaseWithWorkingTree
+
+
+class TestGetParentIds(TestCaseWithWorkingTree):
+
+ def test_get_parent_ids(self):
+ t = self.make_branch_and_tree('t1')
+ self.assertEqual([], t.get_parent_ids())
+ rev1_id = t.commit('foo', allow_pointless=True)
+ self.assertEqual([rev1_id], t.get_parent_ids())
+ t2 = t.bzrdir.sprout('t2').open_workingtree()
+ rev2_id = t2.commit('foo', allow_pointless=True)
+ self.assertEqual([rev2_id], t2.get_parent_ids())
+ t.merge_from_branch(t2.branch)
+ self.assertEqual([rev1_id, rev2_id], t.get_parent_ids())
+ for parent_id in t.get_parent_ids():
+ self.assertIsInstance(parent_id, str)
+
+ def test_pending_merges(self):
+ """Test the correspondence between set pending merges and get_parent_ids."""
+ wt = self.make_branch_and_tree('.')
+ self.assertEqual([], wt.get_parent_ids())
+ # the first pending merge replaces the 'last revision' because
+ # 'last revision' is shorthand for 'left most parent'
+ wt.add_pending_merge('foo@azkhazan-123123-abcabc')
+ self.assertEqual(['foo@azkhazan-123123-abcabc'], wt.get_parent_ids())
+ # adding a merge which is already in the parents list gets ignored.
+ wt.add_pending_merge('foo@azkhazan-123123-abcabc')
+ self.assertEqual(['foo@azkhazan-123123-abcabc'], wt.get_parent_ids())
+ # adding a different merge results in it being appended to the list -
+ # order is preserved.
+ wt.add_pending_merge('wibble@fofof--20050401--1928390812')
+ self.assertEqual(['foo@azkhazan-123123-abcabc',
+ 'wibble@fofof--20050401--1928390812'],
+ wt.get_parent_ids())
diff --git a/bzrlib/tests/per_workingtree/test_inv.py b/bzrlib/tests/per_workingtree/test_inv.py
new file mode 100644
index 0000000..6351597
--- /dev/null
+++ b/bzrlib/tests/per_workingtree/test_inv.py
@@ -0,0 +1,182 @@
+# Copyright (C) 2007 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""Tests for interface conformance of inventories of working trees."""
+
+
+import os
+
+from bzrlib import inventory, tests
+from bzrlib.tests.per_workingtree import TestCaseWithWorkingTree
+
+
+class TestRevert(TestCaseWithWorkingTree):
+
+ def test_dangling_id(self):
+ wt = self.make_branch_and_tree('b1')
+ wt.lock_tree_write()
+ self.addCleanup(wt.unlock)
+ self.assertEqual(len(wt.all_file_ids()), 1)
+ with open('b1/a', 'wb') as f: f.write('a test\n')
+ wt.add('a')
+ self.assertEqual(len(wt.all_file_ids()), 2)
+ wt.flush() # workaround revert doing wt._write_inventory for now.
+ os.unlink('b1/a')
+ wt.revert()
+ self.assertEqual(len(wt.all_file_ids()), 1)
+
+
+class TestApplyInventoryDelta(TestCaseWithWorkingTree):
+
+ def setUp(self):
+ super(TestApplyInventoryDelta, self).setUp()
+ if not self.bzrdir_format.repository_format.supports_full_versioned_files:
+ raise tests.TestNotApplicable(
+ "format does not support inventory deltas")
+
+ def test_add(self):
+ wt = self.make_branch_and_tree('.')
+ wt.lock_write()
+ self.addCleanup(wt.unlock)
+ root_id = wt.get_root_id()
+ wt.apply_inventory_delta([(None, 'bar/foo', 'foo-id',
+ inventory.InventoryFile('foo-id', 'foo', parent_id='bar-id')),
+ (None, 'bar', 'bar-id', inventory.InventoryDirectory('bar-id',
+ 'bar', parent_id=root_id))])
+ self.assertEqual('bar/foo', wt.id2path('foo-id'))
+ self.assertEqual('bar', wt.id2path('bar-id'))
+
+ def test_remove(self):
+ wt = self.make_branch_and_tree('.')
+ wt.lock_write()
+ self.addCleanup(wt.unlock)
+ self.build_tree(['foo/', 'foo/bar'])
+ wt.add(['foo', 'foo/bar'], ['foo-id', 'bar-id'])
+ wt.apply_inventory_delta([('foo', None, 'foo-id', None),
+ ('foo/bar', None, 'bar-id', None)])
+ self.assertIs(None, wt.path2id('foo'))
+
+ def test_rename_dir_with_children(self):
+ wt = self.make_branch_and_tree('.')
+ wt.lock_write()
+ root_id = wt.get_root_id()
+ self.addCleanup(wt.unlock)
+ self.build_tree(['foo/', 'foo/bar'])
+ wt.add(['foo', 'foo/bar'],
+ ['foo-id', 'bar-id'])
+ wt.apply_inventory_delta([('foo', 'baz', 'foo-id',
+ inventory.InventoryDirectory('foo-id', 'baz', root_id))])
+ # foo/bar should have been followed the rename of its parent to baz/bar
+ self.assertEqual('baz', wt.id2path('foo-id'))
+ self.assertEqual('baz/bar', wt.id2path('bar-id'))
+
+ def test_rename_dir_with_children_with_children(self):
+ wt = self.make_branch_and_tree('.')
+ wt.lock_write()
+ root_id = wt.get_root_id()
+ self.addCleanup(wt.unlock)
+ self.build_tree(['foo/', 'foo/bar/', 'foo/bar/baz'])
+ wt.add(['foo', 'foo/bar', 'foo/bar/baz'],
+ ['foo-id', 'bar-id', 'baz-id'])
+ wt.apply_inventory_delta([('foo', 'quux', 'foo-id',
+ inventory.InventoryDirectory('foo-id', 'quux', root_id))])
+ # foo/bar/baz should have been followed the rename of its parent's
+ # parent to quux/bar/baz
+ self.assertEqual('quux/bar/baz', wt.id2path('baz-id'))
+
+ def test_rename_file(self):
+ wt = self.make_branch_and_tree('.')
+ wt.lock_write()
+ self.addCleanup(wt.unlock)
+ self.build_tree(['foo/', 'foo/bar', 'baz/'])
+ wt.add(['foo', 'foo/bar', 'baz'],
+ ['foo-id', 'bar-id', 'baz-id'])
+ wt.apply_inventory_delta([('foo/bar', 'baz/bar', 'bar-id',
+ inventory.InventoryFile('bar-id', 'bar', 'baz-id'))])
+ self.assertEqual('baz/bar', wt.id2path('bar-id'))
+
+ def test_rename_swap(self):
+ """Test the swap-names edge case.
+
+ foo and bar should swap names, but retain their children. If this
+ works, any simpler rename ought to work.
+ """
+ wt = self.make_branch_and_tree('.')
+ wt.lock_write()
+ root_id = wt.get_root_id()
+ self.addCleanup(wt.unlock)
+ self.build_tree(['foo/', 'foo/bar', 'baz/', 'baz/qux'])
+ wt.add(['foo', 'foo/bar', 'baz', 'baz/qux'],
+ ['foo-id', 'bar-id', 'baz-id', 'qux-id'])
+ wt.apply_inventory_delta([('foo', 'baz', 'foo-id',
+ inventory.InventoryDirectory('foo-id', 'baz', root_id)),
+ ('baz', 'foo', 'baz-id',
+ inventory.InventoryDirectory('baz-id', 'foo', root_id))])
+ self.assertEqual('baz/bar', wt.id2path('bar-id'))
+ self.assertEqual('foo/qux', wt.id2path('qux-id'))
+
+ def test_child_rename_ordering(self):
+ """Test the rename-parent, move child edge case.
+
+ (A naive implementation may move the parent first, and then be
+ unable to find the child.)
+ """
+ wt = self.make_branch_and_tree('.')
+ root_id = wt.get_root_id()
+ self.build_tree(['dir/', 'dir/child', 'other/'])
+ wt.add(['dir', 'dir/child', 'other'],
+ ['dir-id', 'child-id', 'other-id'])
+ # this delta moves dir-id to dir2 and reparents
+ # child-id to a parent of other-id
+ wt.apply_inventory_delta([('dir', 'dir2', 'dir-id',
+ inventory.InventoryDirectory('dir-id', 'dir2', root_id)),
+ ('dir/child', 'other/child', 'child-id',
+ inventory.InventoryFile('child-id', 'child', 'other-id'))])
+ self.assertEqual('dir2', wt.id2path('dir-id'))
+ self.assertEqual('other/child', wt.id2path('child-id'))
+
+ def test_replace_root(self):
+ wt = self.make_branch_and_tree('.')
+ wt.lock_write()
+ self.addCleanup(wt.unlock)
+
+ root_id = wt.get_root_id()
+ wt.apply_inventory_delta([('', None, root_id, None),
+ (None, '', 'root-id',
+ inventory.InventoryDirectory('root-id', '', None))])
+
+
+class TestTreeReference(TestCaseWithWorkingTree):
+
+ def test_tree_reference_matches_inv(self):
+ base = self.make_branch_and_tree('base')
+ if base.branch.repository._format.supports_full_versioned_files:
+ raise tests.TestNotApplicable(
+ "format does not support inventory deltas")
+ if not base.supports_tree_reference():
+ raise tests.TestNotApplicable("wt doesn't support nested trees")
+ # We add it as a directory, but it becomes a tree-reference
+ base.add(['subdir'], ['subdir-id'], ['directory'])
+ subdir = self.make_branch_and_tree('base/subdir')
+ self.addCleanup(base.lock_read().unlock)
+ # Note: we aren't strict about ie.kind being 'directory' here, what we
+ # are strict about is that wt.inventory should match
+ # wt.current_dirstate()'s idea about what files are where.
+ ie = base.inventory['subdir-id']
+ self.assertEqual('directory', ie.kind)
+ path, ie = base.iter_entries_by_dir(['subdir-id']).next()
+ self.assertEqual('subdir', path)
+ self.assertEqual('tree-reference', ie.kind)
diff --git a/bzrlib/tests/per_workingtree/test_is_control_filename.py b/bzrlib/tests/per_workingtree/test_is_control_filename.py
new file mode 100644
index 0000000..861a4e4
--- /dev/null
+++ b/bzrlib/tests/per_workingtree/test_is_control_filename.py
@@ -0,0 +1,39 @@
+# Copyright (C) 2006 Canonical Ltd
+# Authors: Robert Collins <robert.collins@canonical.com>
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+from bzrlib.osutils import basename
+from bzrlib.tests.per_workingtree import TestCaseWithWorkingTree
+
+
+class TestIsControlFilename(TestCaseWithWorkingTree):
+
+ def validate_tree_is_controlfilename(self, tree):
+ """check that 'tree' obeys the contract for is_control_filename."""
+ bzrdirname = basename(tree.bzrdir.transport.base[:-1])
+ self.assertTrue(tree.is_control_filename(bzrdirname))
+ self.assertTrue(tree.is_control_filename(bzrdirname + '/subdir'))
+ self.assertFalse(tree.is_control_filename('dir/' + bzrdirname))
+ self.assertFalse(tree.is_control_filename('dir/' + bzrdirname + '/sub'))
+
+ def test_dotbzr_is_control_in_cwd(self):
+ tree = self.make_branch_and_tree('.')
+ self.validate_tree_is_controlfilename(tree)
+
+ def test_dotbzr_is_control_in_subdir(self):
+ tree = self.make_branch_and_tree('subdir')
+ self.validate_tree_is_controlfilename(tree)
+
diff --git a/bzrlib/tests/per_workingtree/test_is_ignored.py b/bzrlib/tests/per_workingtree/test_is_ignored.py
new file mode 100644
index 0000000..9983571
--- /dev/null
+++ b/bzrlib/tests/per_workingtree/test_is_ignored.py
@@ -0,0 +1,224 @@
+# Copyright (C) 2006, 2007, 2009, 2010 Canonical Ltd
+# Authors: Robert Collins <robert.collins@canonical.com>
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+from bzrlib import config, ignores
+from bzrlib.tests.per_workingtree import TestCaseWithWorkingTree
+
+
+class TestIsIgnored(TestCaseWithWorkingTree):
+
+ def _set_user_ignore_content(self, ignores):
+ """Create user ignore file and set its content to ignores."""
+ config.ensure_config_dir_exists()
+ user_ignore_file = config.user_ignore_config_filename()
+ f = open(user_ignore_file, 'wb')
+ try:
+ f.write(ignores)
+ finally:
+ f.close()
+
+ def test_is_ignored(self):
+ tree = self.make_branch_and_tree('.')
+ # this will break if a tree changes the ignored format. That is fine
+ # because at the moment tree format is orthogonal to user data, and
+ # .bzrignore is user data so must not be changed by a tree format.
+ self.build_tree_contents([
+ ('.bzrignore', './rootdir\n'
+ 'randomfile*\n'
+ '*bar\n'
+ '!bazbar\n'
+ '?foo\n'
+ '*.~*\n'
+ 'dir1/*f1\n'
+ 'dir1/?f2\n'
+ 'RE:dir2/.*\.wombat\n'
+ 'path/from/ro?t\n'
+ '**/piffle.py\n'
+ '!b/piffle.py\n'
+ 'unicode\xc2\xb5\n' # u'\xb5'.encode('utf8')
+ 'dos\r\n'
+ '\n' # empty line
+ '#comment\n'
+ ' xx \n' # whitespace
+ )])
+ # We set user ignore file to contain '' to avoid patterns from
+ # user ignore being used instead of bzrignore. For .e.g. If we
+ # don't do this 'foo.~1~' will match '*~' default user ignore
+ # pattern instead of '*.~*' from bzr ignore as we expect below.
+ self._set_user_ignore_content('')
+ # is_ignored returns the matching ignore regex when a path is ignored.
+ # we check some expected matches for each rule, and one or more
+ # relevant not-matches that look plausible as cases for bugs.
+ self.assertEqual('./rootdir', tree.is_ignored('rootdir'))
+ self.assertEqual(None, tree.is_ignored('foo/rootdir'))
+ self.assertEqual(None, tree.is_ignored('rootdirtrailer'))
+
+ self.assertEqual('randomfile*', tree.is_ignored('randomfile'))
+ self.assertEqual('randomfile*', tree.is_ignored('randomfiles'))
+ self.assertEqual('randomfile*', tree.is_ignored('foo/randomfiles'))
+ self.assertEqual(None, tree.is_ignored('randomfil'))
+ self.assertEqual(None, tree.is_ignored('foo/randomfil'))
+
+ self.assertEqual("path/from/ro?t", tree.is_ignored('path/from/root'))
+ self.assertEqual("path/from/ro?t", tree.is_ignored('path/from/roat'))
+ self.assertEqual(None, tree.is_ignored('roat'))
+
+ self.assertEqual('**/piffle.py', tree.is_ignored('piffle.py'))
+ self.assertEqual('**/piffle.py', tree.is_ignored('a/piffle.py'))
+ self.assertEqual(None, tree.is_ignored('b/piffle.py')) # exclusion
+ self.assertEqual('**/piffle.py', tree.is_ignored('foo/bar/piffle.py'))
+ self.assertEqual(None, tree.is_ignored('p/iffle.py'))
+
+ self.assertEqual(u'unicode\xb5', tree.is_ignored(u'unicode\xb5'))
+ self.assertEqual(u'unicode\xb5', tree.is_ignored(u'subdir/unicode\xb5'))
+ self.assertEqual(None, tree.is_ignored(u'unicode\xe5'))
+ self.assertEqual(None, tree.is_ignored(u'unicode'))
+ self.assertEqual(None, tree.is_ignored(u'\xb5'))
+
+ self.assertEqual('dos', tree.is_ignored('dos'))
+ self.assertEqual(None, tree.is_ignored('dosfoo'))
+
+ self.assertEqual('*bar', tree.is_ignored('foobar'))
+ self.assertEqual('*bar', tree.is_ignored(r'foo\nbar'))
+ self.assertEqual('*bar', tree.is_ignored('bar'))
+ self.assertEqual('*bar', tree.is_ignored('.bar'))
+
+ self.assertEqual(None, tree.is_ignored('bazbar')) # exclusion
+
+ self.assertEqual('?foo', tree.is_ignored('afoo'))
+ self.assertEqual('?foo', tree.is_ignored('.foo'))
+
+ self.assertEqual('*.~*', tree.is_ignored('blah.py.~1~'))
+
+ self.assertEqual('dir1/*f1', tree.is_ignored('dir1/foof1'))
+ self.assertEqual('dir1/*f1', tree.is_ignored('dir1/f1'))
+ self.assertEqual('dir1/*f1', tree.is_ignored('dir1/.f1'))
+
+ self.assertEqual('dir1/?f2', tree.is_ignored('dir1/ff2'))
+ self.assertEqual('dir1/?f2', tree.is_ignored('dir1/.f2'))
+
+ self.assertEqual('RE:dir2/.*\.wombat', tree.is_ignored('dir2/foo.wombat'))
+ self.assertEqual(None, tree.is_ignored('dir2/foo'))
+
+ # Blank lines and comments should be ignored
+ self.assertEqual(None, tree.is_ignored(''))
+ self.assertEqual(None, tree.is_ignored('test/'))
+
+ self.assertEqual(None, tree.is_ignored('#comment'))
+
+ # Whitespace should not be stripped
+ self.assertEqual(' xx ', tree.is_ignored(' xx '))
+ self.assertEqual(' xx ', tree.is_ignored('subdir/ xx '))
+ self.assertEqual(None, tree.is_ignored('xx'))
+ self.assertEqual(None, tree.is_ignored('xx '))
+ self.assertEqual(None, tree.is_ignored(' xx'))
+ self.assertEqual(None, tree.is_ignored('subdir/xx '))
+
+ def test_global_ignored(self):
+ tree = self.make_branch_and_tree('.')
+
+ config.ensure_config_dir_exists()
+ user_ignore_file = config.user_ignore_config_filename()
+ self._set_user_ignore_content(
+ '*.py[co]\n'
+ './.shelf\n'
+ '# comment line\n'
+ '\n' #Blank line
+ '\r\n' #Blank dos line
+ ' * \n' #Trailing and suffix spaces
+ 'crlf\r\n' # dos style line
+ '*\xc3\xa5*\n' # u'\xe5'.encode('utf8')
+ )
+
+ # Rooted
+ self.assertEqual('./.shelf', tree.is_ignored('.shelf'))
+ self.assertEqual(None, tree.is_ignored('foo/.shelf'))
+
+ # Glob style
+ self.assertEqual('*.py[co]', tree.is_ignored('foo.pyc'))
+ self.assertEqual('*.py[co]', tree.is_ignored('foo.pyo'))
+ self.assertEqual(None, tree.is_ignored('foo.py'))
+
+ # Glob in subdir
+ self.assertEqual('*.py[co]', tree.is_ignored('bar/foo.pyc'))
+ self.assertEqual('*.py[co]', tree.is_ignored('bar/foo.pyo'))
+ self.assertEqual(None, tree.is_ignored('bar/foo.py'))
+
+ # Unicode
+ self.assertEqual(u'*\xe5*', tree.is_ignored(u'b\xe5gfors'))
+ self.assertEqual(u'*\xe5*', tree.is_ignored(u'\xe5gfors'))
+ self.assertEqual(u'*\xe5*', tree.is_ignored(u'\xe5'))
+ self.assertEqual(u'*\xe5*', tree.is_ignored(u'b\xe5'))
+ self.assertEqual(u'*\xe5*', tree.is_ignored(u'b/\xe5'))
+
+ # Whitespace
+ self.assertEqual(' * ', tree.is_ignored(' bbb '))
+ self.assertEqual(' * ', tree.is_ignored('subdir/ bbb '))
+ self.assertEqual(None, tree.is_ignored('bbb '))
+ self.assertEqual(None, tree.is_ignored(' bbb'))
+
+ # Dos lines
+ self.assertEqual('crlf', tree.is_ignored('crlf'))
+ self.assertEqual('crlf', tree.is_ignored('subdir/crlf'))
+
+ # Comment line should be ignored
+ self.assertEqual(None, tree.is_ignored('# comment line'))
+
+ # Blank line should also be ignored
+ self.assertEqual(None, tree.is_ignored(''))
+ self.assertEqual(None, tree.is_ignored('baz/'))
+
+ def test_mixed_is_ignored(self):
+ tree = self.make_branch_and_tree('.')
+ ignores._set_user_ignores(['*.py[co]', './.shelf'])
+ self.build_tree_contents([('.bzrignore', './rootdir\n*.swp\n')])
+
+ self.assertEqual('*.py[co]', tree.is_ignored('foo.pyc'))
+ self.assertEqual('./.shelf', tree.is_ignored('.shelf'))
+ self.assertEqual('./rootdir', tree.is_ignored('rootdir'))
+ self.assertEqual('*.swp', tree.is_ignored('foo.py.swp'))
+ self.assertEqual('*.swp', tree.is_ignored('.foo.py.swp'))
+ self.assertEqual(None, tree.is_ignored('.foo.py.swo'))
+
+ def test_runtime_ignores(self):
+ tree = self.make_branch_and_tree('.')
+ self.build_tree_contents([('.bzrignore', '')])
+ ignores._set_user_ignores([])
+
+ orig_runtime = ignores._runtime_ignores
+ try:
+ ignores._runtime_ignores = set()
+ self.assertEqual(None, tree.is_ignored('foobar.py'))
+
+ tree._flush_ignore_list_cache()
+ ignores.add_runtime_ignores(['./foobar.py'])
+ self.assertEqual(set(['./foobar.py']), ignores.get_runtime_ignores())
+ self.assertEqual('./foobar.py', tree.is_ignored('foobar.py'))
+ finally:
+ ignores._runtime_ignores = orig_runtime
+
+ def test_ignore_caching(self):
+ tree = self.make_branch_and_tree('.')
+ self.build_tree(['ignoreme'])
+
+ self.assertEqual(None, tree.is_ignored('ignoreme'))
+
+ # Bug #129694 specifically references WorkingTree.unknowns()
+ tree.unknowns()
+
+ self.build_tree_contents([('.bzrignore', 'ignoreme')])
+ self.assertEqual('ignoreme', tree.is_ignored('ignoreme'))
diff --git a/bzrlib/tests/per_workingtree/test_locking.py b/bzrlib/tests/per_workingtree/test_locking.py
new file mode 100644
index 0000000..692c8c7
--- /dev/null
+++ b/bzrlib/tests/per_workingtree/test_locking.py
@@ -0,0 +1,292 @@
+# Copyright (C) 2006, 2008, 2009, 2010 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""Tests for the (un)lock interfaces on all working tree implemenations."""
+
+import sys
+
+from bzrlib import (
+ branch,
+ errors,
+ )
+from bzrlib.tests import TestSkipped
+from bzrlib.tests.matchers import *
+from bzrlib.tests.per_workingtree import TestCaseWithWorkingTree
+
+
+class TestWorkingTreeLocking(TestCaseWithWorkingTree):
+
+ def test_trivial_lock_read_unlock(self):
+ """Locking and unlocking should work trivially."""
+ wt = self.make_branch_and_tree('.')
+
+ self.assertFalse(wt.is_locked())
+ self.assertFalse(wt.branch.is_locked())
+ wt.lock_read()
+ try:
+ self.assertTrue(wt.is_locked())
+ self.assertTrue(wt.branch.is_locked())
+ finally:
+ wt.unlock()
+ self.assertFalse(wt.is_locked())
+ self.assertFalse(wt.branch.is_locked())
+
+ def test_lock_read_returns_unlocker(self):
+ wt = self.make_branch_and_tree('.')
+ self.assertThat(wt.lock_read, ReturnsUnlockable(wt))
+
+ def test_trivial_lock_write_unlock(self):
+ """Locking for write and unlocking should work trivially."""
+ wt = self.make_branch_and_tree('.')
+
+ self.assertFalse(wt.is_locked())
+ self.assertFalse(wt.branch.is_locked())
+ wt.lock_write()
+ try:
+ self.assertTrue(wt.is_locked())
+ self.assertTrue(wt.branch.is_locked())
+ finally:
+ wt.unlock()
+ self.assertFalse(wt.is_locked())
+ self.assertFalse(wt.branch.is_locked())
+
+ def test_lock_write_returns_unlocker(self):
+ wt = self.make_branch_and_tree('.')
+ self.assertThat(wt.lock_write, ReturnsUnlockable(wt))
+
+ def test_trivial_lock_tree_write_unlock(self):
+ """Locking for tree write is ok when the branch is not locked."""
+ wt = self.make_branch_and_tree('.')
+
+ self.assertFalse(wt.is_locked())
+ self.assertFalse(wt.branch.is_locked())
+ wt.lock_tree_write()
+ try:
+ self.assertTrue(wt.is_locked())
+ self.assertTrue(wt.branch.is_locked())
+ finally:
+ wt.unlock()
+ self.assertFalse(wt.is_locked())
+ self.assertFalse(wt.branch.is_locked())
+
+ def test_lock_tree_write_returns_unlocker(self):
+ wt = self.make_branch_and_tree('.')
+ self.assertThat(wt.lock_tree_write, ReturnsUnlockable(wt))
+
+ def test_trivial_lock_tree_write_branch_read_locked(self):
+ """It is ok to lock_tree_write when the branch is read locked."""
+ wt = self.make_branch_and_tree('.')
+
+ self.assertFalse(wt.is_locked())
+ self.assertFalse(wt.branch.is_locked())
+ wt.branch.lock_read()
+ try:
+ wt.lock_tree_write()
+ except errors.ReadOnlyError:
+ # When ReadOnlyError is raised, it indicates that the
+ # workingtree shares its lock with the branch, which is what
+ # the git/hg/bzr0.6 formats do.
+ # in this case, no lock should have been taken - but the tree
+ # will have been locked because they share a lock. Unlocking
+ # just the branch should make everything match again correctly.
+ wt.branch.unlock()
+ self.assertFalse(wt.is_locked())
+ self.assertFalse(wt.branch.is_locked())
+ return
+ try:
+ self.assertTrue(wt.is_locked())
+ self.assertTrue(wt.branch.is_locked())
+ finally:
+ wt.unlock()
+ self.assertFalse(wt.is_locked())
+ self.assertTrue(wt.branch.is_locked())
+ wt.branch.unlock()
+
+ def _test_unlock_with_lock_method(self, methodname):
+ """Create a tree and then test its unlocking behaviour.
+
+ :param methodname: The lock method to use to establish locks.
+ """
+ if sys.platform == "win32":
+ raise TestSkipped("don't use oslocks on win32 in unix manner")
+ # This helper takes a write lock on the source tree, then opens a
+ # second copy and tries to grab a read lock. This works on Unix and is
+ # a reasonable way to detect when the file is actually written to, but
+ # it won't work (as a test) on Windows. It might be nice to instead
+ # stub out the functions used to write and that way do both less work
+ # and also be able to execute on Windows.
+ self.thisFailsStrictLockCheck()
+ # when unlocking the last lock count from tree_write_lock,
+ # the tree should do a flush().
+ # we test that by changing the inventory using set_root_id
+ tree = self.make_branch_and_tree('tree')
+ # prepare for a series of changes that will modify the
+ # inventory
+ getattr(tree, methodname)()
+ # note that we dont have a try:finally here because of two reasons:
+ # firstly there will only be errors reported if the test fails, and
+ # when it fails thats ok as long as the test suite cleanup still works,
+ # which it will as the lock objects are released (thats where the
+ # warning comes from. Secondly, it is hard in this test to be
+ # sure that we've got the right interactions between try:finally
+ # and the lock/unlocks we are doing.
+ getattr(tree, methodname)()
+ # this should really do something within the public api
+ # e.g. mkdir('foo') but all the mutating methods at the
+ # moment trigger inventory writes and thus will not
+ # let us trigger a read-when-dirty situation.
+ old_root = tree.get_root_id()
+ tree.set_root_id('new-root')
+ # to detect that the inventory is written by unlock, we
+ # first check that it was not written yet.
+ # TODO: This requires taking a read lock while we are holding the above
+ # write lock, which shouldn't actually be possible
+ reference_tree = tree.bzrdir.open_workingtree()
+ self.assertEqual(old_root, reference_tree.get_root_id())
+ # now unlock the second held lock, which should do nothing.
+ tree.unlock()
+ reference_tree = tree.bzrdir.open_workingtree()
+ self.assertEqual(old_root, reference_tree.get_root_id())
+ # unlocking the first lock we took will now flush.
+ tree.unlock()
+ # and check it was written using another reference tree
+ reference_tree = tree.bzrdir.open_workingtree()
+ self.assertEqual('new-root', reference_tree.get_root_id())
+
+ def test_unlock_from_tree_write_lock_flushes(self):
+ self._test_unlock_with_lock_method("lock_tree_write")
+
+ def test_unlock_from_write_lock_flushes(self):
+ self._test_unlock_with_lock_method("lock_write")
+
+ def test_unlock_branch_failures(self):
+ """If the branch unlock fails the tree must still unlock."""
+ # The public interface for WorkingTree requires a branch, but
+ # does not require that the working tree use the branch - its
+ # implementation specific how the WorkingTree, Branch, and Repository
+ # hang together.
+ # in order to test that implementations which *do* unlock via the branch
+ # do so correctly, we unlock the branch after locking the working tree.
+ # The next unlock on working tree should trigger a LockNotHeld exception
+ # from the branch object, which must be exposed to the caller. To meet
+ # our object model - where locking a tree locks its branch, and
+ # unlocking a branch does not unlock a working tree, *even* for
+ # all-in-one implementations like bzr 0.6, git, and hg, implementations
+ # must have some separate counter for each object, so our explicit
+ # unlock should trigger some error on all implementations, and
+ # requiring that to be LockNotHeld seems reasonable.
+ #
+ # we use this approach rather than decorating the Branch, because the
+ # public interface of WorkingTree does not permit altering the branch
+ # object - and we cannot tell which attribute might allow us to
+ # backdoor-in and change it reliably. For implementation specific tests
+ # we can do such skullduggery, but not for interface specific tests.
+ # And, its simpler :)
+ wt = self.make_branch_and_tree('.')
+
+ self.assertFalse(wt.is_locked())
+ self.assertFalse(wt.branch.is_locked())
+ wt.lock_write()
+ self.assertTrue(wt.is_locked())
+ self.assertTrue(wt.branch.is_locked())
+
+ # manually unlock the branch, preparing a LockNotHeld error.
+ wt.branch.unlock()
+ # the branch *may* still be locked here, if its an all-in-one
+ # implementation because there is a single lock object with three
+ # references on it, and unlocking the branch only drops this by two
+ self.assertRaises(errors.LockNotHeld, wt.unlock)
+ # but now, the tree must be unlocked
+ self.assertFalse(wt.is_locked())
+ # and the branch too.
+ self.assertFalse(wt.branch.is_locked())
+
+ def test_failing_to_lock_branch_does_not_lock(self):
+ """If the branch cannot be locked, dont lock the tree."""
+ # Many implementations treat read-locks as non-blocking, but some
+ # treat them as blocking with writes.. Accordingly we test this by
+ # opening the branch twice, and locking the branch for write in the
+ # second instance. Our lock contract requires separate instances to
+ # mutually exclude if a lock is exclusive at all: If we get no error
+ # locking, the test still passes.
+ wt = self.make_branch_and_tree('.')
+ branch_copy = branch.Branch.open('.')
+ branch_copy.lock_write()
+ try:
+ try:
+ wt.lock_read()
+ except errors.LockError:
+ # any error here means the locks are exclusive in some
+ # manner
+ self.assertFalse(wt.is_locked())
+ self.assertFalse(wt.branch.is_locked())
+ return
+ else:
+ # no error - the branch allows read locks while writes
+ # are taken, just pass.
+ wt.unlock()
+ finally:
+ branch_copy.unlock()
+
+ def test_failing_to_lock_write_branch_does_not_lock(self):
+ """If the branch cannot be write locked, dont lock the tree."""
+ # all implementations of branch are required to treat write
+ # locks as blocking (compare to repositories which are not required
+ # to do so).
+ # Accordingly we test this by opening the branch twice, and locking the
+ # branch for write in the second instance. Our lock contract requires
+ # separate instances to mutually exclude.
+ wt = self.make_branch_and_tree('.')
+ branch_copy = branch.Branch.open('.')
+ branch_copy.lock_write()
+ try:
+ try:
+ self.assertRaises(errors.LockError, wt.lock_write)
+ self.assertFalse(wt.is_locked())
+ self.assertFalse(wt.branch.is_locked())
+ finally:
+ if wt.is_locked():
+ wt.unlock()
+ finally:
+ branch_copy.unlock()
+
+ def test_failing_to_lock_tree_write_branch_does_not_lock(self):
+ """If the branch cannot be read locked, dont lock the tree."""
+ # Many implementations treat read-locks as non-blocking, but some
+ # treat them as blocking with writes.. Accordingly we test this by
+ # opening the branch twice, and locking the branch for write in the
+ # second instance. Our lock contract requires separate instances to
+ # mutually exclude if a lock is exclusive at all: If we get no error
+ # locking, the test still passes.
+ wt = self.make_branch_and_tree('.')
+ branch_copy = branch.Branch.open('.')
+
+ branch_copy.lock_write()
+ try:
+ try:
+ wt.lock_tree_write()
+ except errors.LockError:
+ # any error here means the locks are exclusive in some
+ # manner
+ self.assertFalse(wt.is_locked())
+ self.assertFalse(wt.branch.is_locked())
+ return
+ else:
+ # no error - the branch allows read locks while writes
+ # are taken, just pass.
+ wt.unlock()
+ finally:
+ branch_copy.unlock()
diff --git a/bzrlib/tests/per_workingtree/test_merge_from_branch.py b/bzrlib/tests/per_workingtree/test_merge_from_branch.py
new file mode 100644
index 0000000..d8e149f
--- /dev/null
+++ b/bzrlib/tests/per_workingtree/test_merge_from_branch.py
@@ -0,0 +1,254 @@
+# Copyright (C) 2006-2010 Canonical Ltd
+# Authors: Robert Collins <robert.collins@canonical.com>
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""Tests for the WorkingTree.merge_from_branch api."""
+
+import os
+
+from bzrlib import (
+ conflicts,
+ errors,
+ merge,
+ )
+from bzrlib.tests import per_workingtree
+
+
+class TestMergeFromBranch(per_workingtree.TestCaseWithWorkingTree):
+
+ def create_two_trees_for_merging(self):
+ """Create two trees that can be merged from.
+
+ This sets self.tree_from, self.first_rev, self.tree_to, self.second_rev
+ and self.to_second_rev.
+ """
+ self.tree_from = self.make_branch_and_tree('from')
+ self.first_rev = self.tree_from.commit('first post')
+ self.tree_to = self.tree_from.bzrdir.sprout('to').open_workingtree()
+ self.second_rev = self.tree_from.commit('second rev', allow_pointless=True)
+ self.to_second_rev = self.tree_to.commit('second rev', allow_pointless=True)
+
+ def test_smoking_merge(self):
+ """Smoke test of merge_from_branch."""
+ self.create_two_trees_for_merging()
+ self.tree_to.merge_from_branch(self.tree_from.branch)
+ self.assertEqual([self.to_second_rev, self.second_rev],
+ self.tree_to.get_parent_ids())
+
+ def test_merge_to_revision(self):
+ """Merge from a branch to a revision that is not the tip."""
+ self.create_two_trees_for_merging()
+ self.third_rev = self.tree_from.commit('real_tip')
+ self.tree_to.merge_from_branch(self.tree_from.branch,
+ to_revision=self.second_rev)
+ self.assertEqual([self.to_second_rev, self.second_rev],
+ self.tree_to.get_parent_ids())
+
+ def test_compare_after_merge(self):
+ tree_a = self.make_branch_and_tree('tree_a')
+ self.build_tree_contents([('tree_a/file', 'text-a')])
+ tree_a.add('file')
+ tree_a.commit('added file')
+ tree_b = tree_a.bzrdir.sprout('tree_b').open_workingtree()
+ os.unlink('tree_a/file')
+ tree_a.commit('deleted file')
+ self.build_tree_contents([('tree_b/file', 'text-b')])
+ tree_b.commit('changed file')
+ tree_a.merge_from_branch(tree_b.branch)
+ tree_a.lock_read()
+ self.addCleanup(tree_a.unlock)
+ list(tree_a.iter_changes(tree_a.basis_tree()))
+
+ def test_merge_empty(self):
+ tree_a = self.make_branch_and_tree('tree_a')
+ self.build_tree_contents([('tree_a/file', 'text-a')])
+ tree_a.add('file')
+ tree_a.commit('added file')
+ tree_b = self.make_branch_and_tree('treeb')
+ self.assertRaises(errors.NoCommits, tree_a.merge_from_branch,
+ tree_b.branch)
+ tree_b.merge_from_branch(tree_a.branch)
+
+ def test_merge_base(self):
+ tree_a = self.make_branch_and_tree('tree_a')
+ self.build_tree_contents([('tree_a/file', 'text-a')])
+ tree_a.add('file')
+ tree_a.commit('added file', rev_id='rev_1')
+ tree_b = tree_a.bzrdir.sprout('tree_b').open_workingtree()
+ os.unlink('tree_a/file')
+ tree_a.commit('deleted file')
+ self.build_tree_contents([('tree_b/file', 'text-b')])
+ tree_b.commit('changed file')
+ self.assertRaises(errors.PointlessMerge, tree_a.merge_from_branch,
+ tree_b.branch, from_revision=tree_b.branch.last_revision())
+ tree_a.merge_from_branch(tree_b.branch, from_revision='rev_1')
+ tree_a.lock_read()
+ self.addCleanup(tree_a.unlock)
+ changes = list(tree_a.iter_changes(tree_a.basis_tree()))
+ self.assertEqual(1, len(changes))
+
+ def test_merge_type(self):
+ this = self.make_branch_and_tree('this')
+ self.build_tree_contents([('this/foo', 'foo')])
+ this.add('foo', 'foo-id')
+ this.commit('added foo')
+ other = this.bzrdir.sprout('other').open_workingtree()
+ self.build_tree_contents([('other/foo', 'bar')])
+ other.commit('content -> bar')
+ self.build_tree_contents([('this/foo', 'baz')])
+ this.commit('content -> baz')
+ class QuxMerge(merge.Merge3Merger):
+ def text_merge(self, file_id, trans_id):
+ self.tt.create_file('qux', trans_id)
+ this.merge_from_branch(other.branch, merge_type=QuxMerge)
+ self.assertEqual('qux', this.get_file_text('foo-id'))
+
+
+class TestMergedBranch(per_workingtree.TestCaseWithWorkingTree):
+
+ def make_inner_branch(self):
+ bld_inner = self.make_branch_builder('inner')
+ bld_inner.start_series()
+ bld_inner.build_snapshot(
+ '1', None,
+ [('add', ('', 'inner-root-id', 'directory', '')),
+ ('add', ('dir', 'dir-id', 'directory', '')),
+ ('add', ('dir/file1', 'file1-id', 'file', 'file1 content\n')),
+ ('add', ('file3', 'file3-id', 'file', 'file3 content\n')),
+ ])
+ bld_inner.build_snapshot(
+ '4', ['1'],
+ [('add', ('file4', 'file4-id', 'file', 'file4 content\n'))
+ ])
+ bld_inner.build_snapshot(
+ '5', ['4'], [('rename', ('file4', 'dir/file4'))])
+ bld_inner.build_snapshot(
+ '3', ['1'], [('modify', ('file3-id', 'new file3 contents\n')),])
+ bld_inner.build_snapshot(
+ '2', ['1'],
+ [('add', ('dir/file2', 'file2-id', 'file', 'file2 content\n')),
+ ])
+ bld_inner.finish_series()
+ br = bld_inner.get_branch()
+ return br
+
+ def assertTreeLayout(self, expected, tree):
+ tree.lock_read()
+ try:
+ actual = [e[0] for e in tree.list_files()]
+ # list_files doesn't guarantee order
+ actual = sorted(actual)
+ self.assertEqual(expected, actual)
+ finally:
+ tree.unlock()
+
+ def make_outer_tree(self):
+ outer = self.make_branch_and_tree('outer')
+ self.build_tree_contents([('outer/foo', 'foo')])
+ outer.add('foo', 'foo-id')
+ outer.commit('added foo')
+ inner = self.make_inner_branch()
+ outer.merge_from_branch(inner, to_revision='1', from_revision='null:')
+ #retain original root id.
+ outer.set_root_id(outer.basis_tree().get_root_id())
+ outer.commit('merge inner branch')
+ outer.mkdir('dir-outer', 'dir-outer-id')
+ outer.move(['dir', 'file3'], to_dir='dir-outer')
+ outer.commit('rename imported dir and file3 to dir-outer')
+ return outer, inner
+
+ def test_file1_deleted_in_dir(self):
+ outer, inner = self.make_outer_tree()
+ outer.remove(['dir-outer/dir/file1'], keep_files=False)
+ outer.commit('delete file1')
+ outer.merge_from_branch(inner)
+ outer.commit('merge the rest')
+ self.assertTreeLayout(['dir-outer',
+ 'dir-outer/dir',
+ 'dir-outer/dir/file2',
+ 'dir-outer/file3',
+ 'foo'],
+ outer)
+
+ def test_file3_deleted_in_root(self):
+ # Reproduce bug #375898
+ outer, inner = self.make_outer_tree()
+ outer.remove(['dir-outer/file3'], keep_files=False)
+ outer.commit('delete file3')
+ outer.merge_from_branch(inner)
+ outer.commit('merge the rest')
+ self.assertTreeLayout(['dir-outer',
+ 'dir-outer/dir',
+ 'dir-outer/dir/file1',
+ 'dir-outer/dir/file2',
+ 'foo'],
+ outer)
+
+
+ def test_file3_in_root_conflicted(self):
+ outer, inner = self.make_outer_tree()
+ outer.remove(['dir-outer/file3'], keep_files=False)
+ outer.commit('delete file3')
+ nb_conflicts = outer.merge_from_branch(inner, to_revision='3')
+ self.assertEqual(4, nb_conflicts)
+ self.assertTreeLayout(['dir-outer',
+ 'dir-outer/dir',
+ 'dir-outer/dir/file1',
+ # Ideally th conflict helpers should be in
+ # dir-outer/dir but since we can't easily find
+ # back the file3 -> outer-dir/dir rename, root
+ # is good enough -- vila 20100401
+ 'file3.BASE',
+ 'file3.OTHER',
+ 'foo'],
+ outer)
+
+ def test_file4_added_in_root(self):
+ outer, inner = self.make_outer_tree()
+ nb_conflicts = outer.merge_from_branch(inner, to_revision='4')
+ # file4 could not be added to its original root, so it gets added to
+ # the new root with a conflict.
+ self.assertEqual(1, nb_conflicts)
+ self.assertTreeLayout(['dir-outer',
+ 'dir-outer/dir',
+ 'dir-outer/dir/file1',
+ 'dir-outer/file3',
+ 'file4',
+ 'foo'],
+ outer)
+
+ def test_file4_added_then_renamed(self):
+ outer, inner = self.make_outer_tree()
+ # 1 conflict, because file4 can't be put into the old root
+ self.assertEqual(1, outer.merge_from_branch(inner, to_revision='4'))
+ try:
+ outer.set_conflicts(conflicts.ConflictList())
+ except errors.UnsupportedOperation:
+ # WT2 doesn't have a separate list of conflicts to clear. It
+ # actually says there is a conflict, but happily forgets all about
+ # it.
+ pass
+ outer.commit('added file4')
+ # And now file4 gets renamed into an existing dir
+ nb_conflicts = outer.merge_from_branch(inner, to_revision='5')
+ self.assertEqual(1, nb_conflicts)
+ self.assertTreeLayout(['dir-outer',
+ 'dir-outer/dir',
+ 'dir-outer/dir/file1',
+ 'dir-outer/dir/file4',
+ 'dir-outer/file3',
+ 'foo'],
+ outer)
diff --git a/bzrlib/tests/per_workingtree/test_mkdir.py b/bzrlib/tests/per_workingtree/test_mkdir.py
new file mode 100644
index 0000000..7242c78
--- /dev/null
+++ b/bzrlib/tests/per_workingtree/test_mkdir.py
@@ -0,0 +1,38 @@
+# Copyright (C) 2006 Canonical Ltd
+# Authors: Robert Collins <robert.collins@canonical.com>
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""Tests for interface conformance of 'workingtree.put_mkdir'"""
+
+from bzrlib.tests.per_workingtree import TestCaseWithWorkingTree
+
+
+class TestMkdir(TestCaseWithWorkingTree):
+
+ def test_mkdir_no_id(self):
+ t = self.make_branch_and_tree('t1')
+ t.lock_write()
+ self.addCleanup(t.unlock)
+ file_id = t.mkdir('path')
+ self.assertEqual('directory', t.kind(file_id))
+
+ def test_mkdir_with_id(self):
+ t = self.make_branch_and_tree('t1')
+ t.lock_write()
+ self.addCleanup(t.unlock)
+ file_id = t.mkdir('path', 'my-id')
+ self.assertEqual('my-id', file_id)
+ self.assertEqual('directory', t.kind(file_id))
diff --git a/bzrlib/tests/per_workingtree/test_move.py b/bzrlib/tests/per_workingtree/test_move.py
new file mode 100644
index 0000000..aa1c421
--- /dev/null
+++ b/bzrlib/tests/per_workingtree/test_move.py
@@ -0,0 +1,563 @@
+# Copyright (C) 2006, 2007 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""Tests for interface conformance of 'WorkingTree.move'"""
+
+import os
+
+from bzrlib import (
+ errors,
+ osutils,
+ tests,
+ )
+
+from bzrlib.tests.matchers import HasLayout
+from bzrlib.tests.per_workingtree import TestCaseWithWorkingTree
+from bzrlib.tests import (
+ features,
+ )
+
+
+class TestMove(TestCaseWithWorkingTree):
+
+ def assertTreeLayout(self, expected, tree):
+ """Check that the tree has the correct layout."""
+ self.assertThat(tree, HasLayout(expected))
+
+ def test_move_via_rm_and_add(self):
+ """Move by remove and add-with-id"""
+ self.build_tree(['a1', 'b1'])
+ tree = self.make_branch_and_tree('.')
+ tree.add(['a1'], ids=['a1-id'])
+ tree.commit('initial commit')
+ tree.remove('a1', force=True, keep_files=False)
+ tree.add(['b1'], ids=['a1-id'])
+ tree._validate()
+
+ def test_move_correct_call_named(self):
+ """tree.move has the deprecated parameter 'to_name'.
+ It has been replaced by 'to_dir' for consistency.
+ Test the new API using named parameter
+ """
+ self.build_tree(['a1', 'sub1/'])
+ tree = self.make_branch_and_tree('.')
+ tree.add(['a1', 'sub1'])
+ tree.commit('initial commit')
+ self.assertEqual([('a1', 'sub1/a1')],
+ tree.move(['a1'], to_dir='sub1', after=False))
+ tree._validate()
+
+ def test_move_correct_call_unnamed(self):
+ """tree.move has the deprecated parameter 'to_name'.
+ It has been replaced by 'to_dir' for consistency.
+ Test the new API using unnamed parameter
+ """
+ self.build_tree(['a1', 'sub1/'])
+ tree = self.make_branch_and_tree('.')
+ tree.add(['a1', 'sub1'])
+ tree.commit('initial commit')
+ self.assertEqual([('a1', 'sub1/a1')],
+ tree.move(['a1'], 'sub1', after=False))
+ tree._validate()
+
+ def test_move_target_not_dir(self):
+ tree = self.make_branch_and_tree('.')
+ self.build_tree(['a'])
+ tree.add(['a'])
+ tree.commit('initial', rev_id='rev-1')
+
+ self.assertRaises(errors.BzrMoveFailedError,
+ tree.move, ['a'], 'not-a-dir')
+ tree._validate()
+
+ def test_move_non_existent(self):
+ tree = self.make_branch_and_tree('.')
+ self.build_tree(['a/'])
+ tree.add(['a'])
+ tree.commit('initial', rev_id='rev-1')
+ self.assertRaises(errors.BzrMoveFailedError,
+ tree.move, ['not-a-file'], 'a')
+ self.assertRaises(errors.BzrMoveFailedError,
+ tree.move, ['not-a-file'], '')
+ tree._validate()
+
+ def test_move_target_not_versioned(self):
+ tree = self.make_branch_and_tree('.')
+ self.build_tree(['a/', 'b'])
+ tree.add(['b'])
+ tree.commit('initial', rev_id='rev-1')
+ self.assertRaises(errors.BzrMoveFailedError,
+ tree.move, ['b'], 'a')
+ tree._validate()
+
+ def test_move_unversioned(self):
+ tree = self.make_branch_and_tree('.')
+ self.build_tree(['a/', 'b'])
+ tree.add(['a'])
+ tree.commit('initial', rev_id='rev-1')
+ self.assertRaises(errors.BzrMoveFailedError,
+ tree.move, ['b'], 'a')
+ tree._validate()
+
+ def test_move_multi_unversioned(self):
+ tree = self.make_branch_and_tree('.')
+ self.build_tree(['a/', 'b', 'c', 'd'])
+ tree.add(['a', 'c', 'd'], ['a-id', 'c-id', 'd-id'])
+ tree.commit('initial', rev_id='rev-1')
+ root_id = tree.get_root_id()
+ self.assertRaises(errors.BzrMoveFailedError,
+ tree.move, ['c', 'b', 'd'], 'a')
+ self.assertRaises(errors.BzrMoveFailedError,
+ tree.move, ['b', 'c', 'd'], 'a')
+ self.assertRaises(errors.BzrMoveFailedError,
+ tree.move, ['d', 'c', 'b'], 'a')
+ if osutils.lexists('a/c'):
+ # If 'c' was actually moved, then 'd' should have also been moved
+ self.assertTreeLayout([('', root_id), ('a/', 'a-id'),
+ ('a/c', 'c-id'), ('a/d', 'd-id')], tree)
+ else:
+ self.assertTreeLayout([('', root_id), ('a/', 'a-id'), ('c', 'c-id'),
+ ('d', 'd-id')], tree)
+ self.assertTreeLayout([('', root_id), ('a/', 'a-id'), ('c', 'c-id'),
+ ('d', 'd-id')], tree.basis_tree())
+ tree._validate()
+
+ def test_move_over_deleted(self):
+ tree = self.make_branch_and_tree('.')
+ self.build_tree(['a/', 'a/b', 'b'])
+ tree.add(['a', 'a/b', 'b'], ['a-id', 'ab-id', 'b-id'])
+ tree.commit('initial', rev_id='rev-1')
+
+ root_id = tree.get_root_id()
+ tree.remove(['a/b'], keep_files=False)
+ self.assertEqual([('b', 'a/b')], tree.move(['b'], 'a'))
+ self.assertTreeLayout([('', root_id),
+ ('a/', 'a-id'),
+ ('a/b', 'b-id'),
+ ], tree)
+ tree._validate()
+
+ def test_move_subdir(self):
+ tree = self.make_branch_and_tree('.')
+ self.build_tree(['a', 'b/', 'b/c'])
+ tree.add(['a', 'b', 'b/c'], ['a-id', 'b-id', 'c-id'])
+ tree.commit('initial', rev_id='rev-1')
+ root_id = tree.get_root_id()
+ self.assertTreeLayout([('', root_id), ('a', 'a-id'), ('b/', 'b-id'),
+ ('b/c', 'c-id')], tree)
+ self.assertTreeLayout([('', root_id), ('a', 'a-id'), ('b/', 'b-id'),
+ ('b/c', 'c-id')], tree.basis_tree())
+ a_contents = tree.get_file_text('a-id')
+ self.assertEqual([('a', 'b/a')],
+ tree.move(['a'], 'b'))
+ self.assertTreeLayout([('', root_id), ('b/', 'b-id'), ('b/a', 'a-id'),
+ ('b/c', 'c-id')], tree)
+ self.assertTreeLayout([('', root_id), ('a', 'a-id'), ('b/', 'b-id'),
+ ('b/c', 'c-id')], tree.basis_tree())
+ self.assertPathDoesNotExist('a')
+ self.assertFileEqual(a_contents, 'b/a')
+ tree._validate()
+
+ def test_move_parent_dir(self):
+ tree = self.make_branch_and_tree('.')
+ self.build_tree(['a', 'b/', 'b/c'])
+ tree.add(['a', 'b', 'b/c'], ['a-id', 'b-id', 'c-id'])
+ tree.commit('initial', rev_id='rev-1')
+ root_id = tree.get_root_id()
+ c_contents = tree.get_file_text('c-id')
+ self.assertEqual([('b/c', 'c')],
+ tree.move(['b/c'], ''))
+ self.assertTreeLayout([('', root_id), ('a', 'a-id'), ('b/', 'b-id'),
+ ('c', 'c-id')], tree)
+ self.assertTreeLayout([('', root_id), ('a', 'a-id'), ('b/', 'b-id'),
+ ('b/c', 'c-id')], tree.basis_tree())
+ self.assertPathDoesNotExist('b/c')
+ self.assertFileEqual(c_contents, 'c')
+ tree._validate()
+
+ def test_move_fail_consistent(self):
+ tree = self.make_branch_and_tree('.')
+ self.build_tree(['a', 'b/', 'b/a', 'c'])
+ tree.add(['a', 'b', 'c'], ['a-id', 'b-id', 'c-id'])
+ tree.commit('initial', rev_id='rev-1')
+ root_id = tree.get_root_id()
+ # Target already exists
+ self.assertRaises(errors.RenameFailedFilesExist,
+ tree.move, ['c', 'a'], 'b')
+ # 'c' may or may not have been moved, but either way the tree should
+ # maintain a consistent state.
+ if osutils.lexists('c'):
+ self.assertTreeLayout([('', root_id), ('a', 'a-id'), ('b/', 'b-id'),
+ ('c', 'c-id')], tree)
+ else:
+ self.assertPathExists('b/c')
+ self.assertTreeLayout([('', root_id), ('a', 'a-id'), ('b/', 'b-id'),
+ ('b/c', 'c-id')], tree)
+ self.assertTreeLayout([('', root_id), ('a', 'a-id'), ('b/', 'b-id'),
+ ('c', 'c-id')], tree.basis_tree())
+ tree._validate()
+
+ def test_move_onto_self(self):
+ tree = self.make_branch_and_tree('.')
+ self.build_tree(['b/', 'b/a'])
+ tree.add(['b', 'b/a'], ['b-id', 'a-id'])
+ tree.commit('initial', rev_id='rev-1')
+
+ self.assertRaises(errors.BzrMoveFailedError,
+ tree.move, ['b/a'], 'b')
+ tree._validate()
+
+ def test_move_onto_self_root(self):
+ tree = self.make_branch_and_tree('.')
+ self.build_tree(['a'])
+ tree.add(['a'], ['a-id'])
+ tree.commit('initial', rev_id='rev-1')
+
+ self.assertRaises(errors.BzrMoveFailedError,
+ tree.move, ['a'], 'a')
+ tree._validate()
+
+ def test_move_after(self):
+ tree = self.make_branch_and_tree('.')
+ self.build_tree(['a', 'b/'])
+ tree.add(['a', 'b'], ['a-id', 'b-id'])
+ tree.commit('initial', rev_id='rev-1')
+ root_id = tree.get_root_id()
+ os.rename('a', 'b/a')
+
+ self.assertTreeLayout([('', root_id), ('a', 'a-id'), ('b/', 'b-id')],
+ tree)
+ # We don't need after=True as long as source is missing and target
+ # exists.
+ self.assertEqual([('a', 'b/a')],
+ tree.move(['a'], 'b'))
+ self.assertTreeLayout([('', root_id), ('b/', 'b-id'), ('b/a', 'a-id')],
+ tree)
+ self.assertTreeLayout([('', root_id), ('a', 'a-id'), ('b/', 'b-id')],
+ tree.basis_tree())
+ tree._validate()
+
+ def test_move_after_with_after(self):
+ tree = self.make_branch_and_tree('.')
+ self.build_tree(['a', 'b/'])
+ tree.add(['a', 'b'], ['a-id', 'b-id'])
+ tree.commit('initial', rev_id='rev-1')
+ root_id = tree.get_root_id()
+ os.rename('a', 'b/a')
+
+ self.assertTreeLayout([('', root_id), ('a', 'a-id'), ('b/', 'b-id')],
+ tree)
+ # Passing after=True should work as well
+ self.assertEqual([('a', 'b/a')],
+ tree.move(['a'], 'b', after=True))
+ self.assertTreeLayout([('', root_id), ('b/', 'b-id'), ('b/a', 'a-id')],
+ tree)
+ self.assertTreeLayout([('', root_id), ('a', 'a-id'), ('b/', 'b-id')],
+ tree.basis_tree())
+ tree._validate()
+
+ def test_move_after_no_target(self):
+ tree = self.make_branch_and_tree('.')
+ self.build_tree(['a', 'b/'])
+ tree.add(['a', 'b'], ['a-id', 'b-id'])
+ tree.commit('initial', rev_id='rev-1')
+ root_id = tree.get_root_id()
+
+ # Passing after when the file hasn't been move raises an exception
+ self.assertRaises(errors.BzrMoveFailedError,
+ tree.move, ['a'], 'b', after=True)
+ self.assertTreeLayout([('', root_id), ('a', 'a-id'), ('b/', 'b-id')],
+ tree.basis_tree())
+ tree._validate()
+
+ def test_move_after_source_and_dest(self):
+ tree = self.make_branch_and_tree('.')
+ self.build_tree(['a', 'b/', 'b/a'])
+ tree.add(['a', 'b'], ['a-id', 'b-id'])
+ tree.commit('initial', rev_id='rev-1')
+ root_id = tree.get_root_id()
+
+ # TODO: jam 20070225 I would usually use 'rb', but assertFileEqual
+ # uses 'r'.
+ a_file = open('a', 'r')
+ try:
+ a_text = a_file.read()
+ finally:
+ a_file.close()
+ ba_file = open('b/a', 'r')
+ try:
+ ba_text = ba_file.read()
+ finally:
+ ba_file.close()
+
+ self.assertTreeLayout([('', root_id), ('a', 'a-id'), ('b/', 'b-id')],
+ tree)
+ self.assertRaises(errors.RenameFailedFilesExist,
+ tree.move, ['a'], 'b', after=False)
+ self.assertTreeLayout([('', root_id), ('a', 'a-id'), ('b/', 'b-id')],
+ tree)
+ self.assertFileEqual(a_text, 'a')
+ self.assertFileEqual(ba_text, 'b/a')
+ # But you can pass after=True
+ self.assertEqual([('a', 'b/a')],
+ tree.move(['a'], 'b', after=True))
+ self.assertTreeLayout([('', root_id), ('b/', 'b-id'), ('b/a', 'a-id')],
+ tree)
+ self.assertTreeLayout([('', root_id), ('a', 'a-id'), ('b/', 'b-id')],
+ tree.basis_tree())
+ # But it shouldn't actually move anything
+ self.assertFileEqual(a_text, 'a')
+ self.assertFileEqual(ba_text, 'b/a')
+ tree._validate()
+
+ def test_move_directory(self):
+ tree = self.make_branch_and_tree('.')
+ self.build_tree(['a/', 'a/b', 'a/c/', 'a/c/d', 'e/'])
+ tree.add(['a', 'a/b', 'a/c', 'a/c/d', 'e'],
+ ['a-id', 'b-id', 'c-id', 'd-id', 'e-id'])
+ tree.commit('initial', rev_id='rev-1')
+ root_id = tree.get_root_id()
+
+ self.assertEqual([('a', 'e/a')],
+ tree.move(['a'], 'e'))
+ self.assertTreeLayout([('', root_id), ('e/', 'e-id'), ('e/a/', 'a-id'),
+ ('e/a/b', 'b-id'), ('e/a/c/', 'c-id'),
+ ('e/a/c/d', 'd-id')], tree)
+ self.assertTreeLayout([('', root_id), ('a/', 'a-id'), ('e/', 'e-id'),
+ ('a/b', 'b-id'), ('a/c/', 'c-id'),
+ ('a/c/d', 'd-id')], tree.basis_tree())
+ tree._validate()
+
+ def test_move_directory_into_parent(self):
+ if not self.workingtree_format.supports_versioned_directories:
+ raise tests.TestNotApplicable(
+ "test requires versioned directories")
+ tree = self.make_branch_and_tree('.')
+ self.build_tree(['c/', 'c/b/', 'c/b/d/'])
+ tree.add(['c', 'c/b', 'c/b/d'],
+ ['c-id', 'b-id', 'd-id'])
+ tree.commit('initial', rev_id='rev-1')
+ root_id = tree.get_root_id()
+
+ self.assertEqual([('c/b', 'b')],
+ tree.move(['c/b'], ''))
+ self.assertTreeLayout([('', root_id),
+ ('b/', 'b-id'),
+ ('c/', 'c-id'),
+ ('b/d/', 'd-id'),
+ ], tree)
+ tree._validate()
+
+ def test_move_directory_with_children_in_subdir(self):
+ tree = self.make_branch_and_tree('.')
+ self.build_tree(['a/', 'a/b', 'a/c/', 'd/'])
+ tree.add(['a', 'a/b', 'a/c', 'd'],
+ ['a-id', 'b-id', 'c-id', 'd-id'])
+ tree.commit('initial', rev_id='rev-1')
+ root_id = tree.get_root_id()
+
+ tree.rename_one('a/b', 'a/c/b')
+ self.assertTreeLayout([('', root_id),
+ ('a/', 'a-id'),
+ ('d/', 'd-id'),
+ ('a/c/', 'c-id'),
+ ('a/c/b', 'b-id'),
+ ], tree)
+ self.assertEqual([('a', 'd/a')],
+ tree.move(['a'], 'd'))
+ self.assertTreeLayout([('', root_id),
+ ('d/', 'd-id'),
+ ('d/a/', 'a-id'),
+ ('d/a/c/', 'c-id'),
+ ('d/a/c/b', 'b-id'),
+ ], tree)
+ tree._validate()
+
+ def test_move_directory_with_deleted_children(self):
+ tree = self.make_branch_and_tree('.')
+ self.build_tree(['a/', 'a/b', 'a/c', 'a/d', 'b/'])
+ tree.add(['a', 'b', 'a/b', 'a/c', 'a/d'],
+ ['a-id', 'b-id', 'ab-id', 'ac-id', 'ad-id'])
+ tree.commit('initial', rev_id='rev-1')
+ root_id = tree.get_root_id()
+
+ tree.remove(['a/b', 'a/d'])
+
+ self.assertEqual([('a', 'b/a')],
+ tree.move(['a'], 'b'))
+ self.assertTreeLayout([('', root_id),
+ ('b/', 'b-id'),
+ ('b/a/', 'a-id'),
+ ('b/a/c', 'ac-id'),
+ ], tree)
+ tree._validate()
+
+ def test_move_directory_with_new_children(self):
+ tree = self.make_branch_and_tree('.')
+ self.build_tree(['a/', 'a/c', 'b/'])
+ tree.add(['a', 'b', 'a/c'], ['a-id', 'b-id', 'ac-id'])
+ tree.commit('initial', rev_id='rev-1')
+ root_id = tree.get_root_id()
+
+ self.build_tree(['a/b', 'a/d'])
+ tree.add(['a/b', 'a/d'], ['ab-id', 'ad-id'])
+
+ self.assertEqual([('a', 'b/a')],
+ tree.move(['a'], 'b'))
+ self.assertTreeLayout([('', root_id),
+ ('b/', 'b-id'),
+ ('b/a/', 'a-id'),
+ ('b/a/b', 'ab-id'),
+ ('b/a/c', 'ac-id'),
+ ('b/a/d', 'ad-id'),
+ ], tree)
+ tree._validate()
+
+ def test_move_directory_with_moved_children(self):
+ tree = self.make_branch_and_tree('.')
+ self.build_tree(['a/', 'a/b', 'a/c', 'd', 'e/'])
+ tree.add(['a', 'a/b', 'a/c', 'd', 'e'],
+ ['a-id', 'b-id', 'c-id', 'd-id', 'e-id'])
+ tree.commit('initial', rev_id='rev-1')
+ root_id = tree.get_root_id()
+
+ self.assertEqual([('a/b', 'b')],
+ tree.move(['a/b'], ''))
+ self.assertTreeLayout([('', root_id),
+ ('a/', 'a-id'),
+ ('b', 'b-id'),
+ ('d', 'd-id'),
+ ('e/', 'e-id'),
+ ('a/c', 'c-id'),
+ ], tree)
+ self.assertEqual([('d', 'a/d')],
+ tree.move(['d'], 'a'))
+ self.assertTreeLayout([('', root_id),
+ ('a/', 'a-id'),
+ ('b', 'b-id'),
+ ('e/', 'e-id'),
+ ('a/c', 'c-id'),
+ ('a/d', 'd-id'),
+ ], tree)
+ self.assertEqual([('a', 'e/a')],
+ tree.move(['a'], 'e'))
+ self.assertTreeLayout([('', root_id),
+ ('b', 'b-id'),
+ ('e/', 'e-id'),
+ ('e/a/', 'a-id'),
+ ('e/a/c', 'c-id'),
+ ('e/a/d', 'd-id'),
+ ], tree)
+ tree._validate()
+
+ def test_move_directory_with_renamed_child(self):
+ tree = self.make_branch_and_tree('.')
+ self.build_tree(['a/', 'a/b', 'a/c', 'd/'])
+ tree.add(['a', 'a/b', 'a/c', 'd'],
+ ['a-id', 'b-id', 'c-id', 'd-id'])
+ tree.commit('initial', rev_id='rev-1')
+ root_id = tree.get_root_id()
+
+ tree.rename_one('a/b', 'a/d')
+ self.assertTreeLayout([('', root_id),
+ ('a/', 'a-id'),
+ ('d/', 'd-id'),
+ ('a/c', 'c-id'),
+ ('a/d', 'b-id'),
+ ], tree)
+ self.assertEqual([('a', 'd/a')],
+ tree.move(['a'], 'd'))
+ self.assertTreeLayout([('', root_id),
+ ('d/', 'd-id'),
+ ('d/a/', 'a-id'),
+ ('d/a/c', 'c-id'),
+ ('d/a/d', 'b-id'),
+ ], tree)
+ tree._validate()
+
+ def test_move_directory_with_swapped_children(self):
+ tree = self.make_branch_and_tree('.')
+ self.build_tree(['a/', 'a/b', 'a/c', 'a/d', 'e/'])
+ tree.add(['a', 'a/b', 'a/c', 'a/d', 'e'],
+ ['a-id', 'b-id', 'c-id', 'd-id', 'e-id'])
+ tree.commit('initial', rev_id='rev-1')
+ root_id = tree.get_root_id()
+
+ tree.rename_one('a/b', 'a/bb')
+ tree.rename_one('a/d', 'a/b')
+ tree.rename_one('a/bb', 'a/d')
+ self.assertTreeLayout([('', root_id),
+ ('a/', 'a-id'),
+ ('e/', 'e-id'),
+ ('a/b', 'd-id'),
+ ('a/c', 'c-id'),
+ ('a/d', 'b-id'),
+ ], tree)
+ self.assertEqual([('a', 'e/a')],
+ tree.move(['a'], 'e'))
+ self.assertTreeLayout([('', root_id),
+ ('e/', 'e-id'),
+ ('e/a/', 'a-id'),
+ ('e/a/b', 'd-id'),
+ ('e/a/c', 'c-id'),
+ ('e/a/d', 'b-id'),
+ ], tree)
+ tree._validate()
+
+ def test_move_moved(self):
+ """Moving a moved entry works as expected."""
+ tree = self.make_branch_and_tree('.')
+ self.build_tree(['a/', 'a/b', 'c/'])
+ tree.add(['a', 'a/b', 'c'], ['a-id', 'b-id', 'c-id'])
+ tree.commit('initial', rev_id='rev-1')
+ root_id = tree.get_root_id()
+
+ self.assertEqual([('a/b', 'c/b')],
+ tree.move(['a/b'], 'c'))
+ self.assertTreeLayout([('', root_id), ('a/', 'a-id'), ('c/', 'c-id'),
+ ('c/b', 'b-id')], tree)
+ self.assertTreeLayout([('', root_id), ('a/', 'a-id'), ('c/', 'c-id'),
+ ('a/b', 'b-id')], tree.basis_tree())
+
+ self.assertEqual([('c/b', 'b')],
+ tree.move(['c/b'], ''))
+ self.assertTreeLayout([('', root_id), ('a/', 'a-id'), ('b', 'b-id'),
+ ('c/', 'c-id')], tree)
+ self.assertTreeLayout([('', root_id), ('a/', 'a-id'), ('c/', 'c-id'),
+ ('a/b', 'b-id')], tree.basis_tree())
+ tree._validate()
+
+ def test_move_to_unversioned_non_ascii_dir(self):
+ """Check error when moving to unversioned non-ascii directory"""
+ self.requireFeature(features.UnicodeFilenameFeature)
+ tree = self.make_branch_and_tree(".")
+ self.build_tree(["a", u"\xA7/"])
+ tree.add(["a"])
+ e = self.assertRaises(errors.BzrMoveFailedError,
+ tree.move, ["a"], u"\xA7")
+ self.assertIsInstance(e.extra, errors.NotVersionedError)
+ self.assertEqual(e.extra.path, u"\xA7")
+
+ def test_move_unversioned_non_ascii(self):
+ """Check error when moving an unversioned non-ascii file"""
+ self.requireFeature(features.UnicodeFilenameFeature)
+ tree = self.make_branch_and_tree(".")
+ self.build_tree([u"\xA7", "dir/"])
+ tree.add("dir")
+ e = self.assertRaises(errors.BzrMoveFailedError,
+ tree.move, [u"\xA7"], "dir")
+ self.assertIsInstance(e.extra, errors.NotVersionedError)
+ self.assertEqual(e.extra.path, u"\xA7")
diff --git a/bzrlib/tests/per_workingtree/test_nested_specifics.py b/bzrlib/tests/per_workingtree/test_nested_specifics.py
new file mode 100644
index 0000000..7e5e5ca
--- /dev/null
+++ b/bzrlib/tests/per_workingtree/test_nested_specifics.py
@@ -0,0 +1,83 @@
+# Copyright (C) 2007 Canonical Ltd
+# Authors: Robert Collins <robert.collins@canonical.com>
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+
+from bzrlib import (
+ inventory,
+ )
+from bzrlib.tests import TestNotApplicable
+from bzrlib.transform import TreeTransform
+from bzrlib.tests.per_workingtree import TestCaseWithWorkingTree
+
+
+class TestNestedSupport(TestCaseWithWorkingTree):
+
+ def make_branch_and_tree(self, path):
+ tree = TestCaseWithWorkingTree.make_branch_and_tree(self, path)
+ if not tree.supports_tree_reference():
+ raise TestNotApplicable('Tree references not supported')
+ return tree
+
+ def test_set_get_tree_reference(self):
+ """This tests that setting a tree reference is persistent."""
+ tree = self.make_branch_and_tree('.')
+ transform = TreeTransform(tree)
+ trans_id = transform.new_directory('reference', transform.root,
+ 'subtree-id')
+ transform.set_tree_reference('subtree-revision', trans_id)
+ transform.apply()
+ tree = tree.bzrdir.open_workingtree()
+ tree.lock_read()
+ self.addCleanup(tree.unlock)
+ self.assertEqual('subtree-revision',
+ tree.root_inventory['subtree-id'].reference_revision)
+
+ def test_extract_while_locked(self):
+ tree = self.make_branch_and_tree('.')
+ tree.lock_write()
+ self.addCleanup(tree.unlock)
+ self.build_tree(['subtree/'])
+ tree.add(['subtree'], ['subtree-id'])
+ subtree = tree.extract('subtree-id')
+
+ def prepare_with_subtree(self):
+ tree = self.make_branch_and_tree('.')
+ tree.lock_write()
+ self.addCleanup(tree.unlock)
+ subtree = self.make_branch_and_tree('subtree')
+ tree.add(['subtree'], ['subtree-id'])
+ return tree
+
+ def test_kind_does_not_autodetect_subtree(self):
+ tree = self.prepare_with_subtree()
+ self.assertEqual('directory', tree.kind('subtree-id'))
+
+ def test_comparison_data_does_not_autodetect_subtree(self):
+ tree = self.prepare_with_subtree()
+ ie = inventory.InventoryDirectory('subtree-id', 'subtree',
+ tree.path2id(''))
+ self.assertEqual('directory',
+ tree._comparison_data(ie, 'subtree')[0])
+
+ def test_inventory_does_not_autodetect_subtree(self):
+ tree = self.prepare_with_subtree()
+ self.assertEqual('directory', tree.kind('subtree-id'))
+
+ def test_iter_entries_by_dir_autodetects_subtree(self):
+ tree = self.prepare_with_subtree()
+ path, ie = tree.iter_entries_by_dir(['subtree-id']).next()
+ self.assertEqual('tree-reference', ie.kind)
diff --git a/bzrlib/tests/per_workingtree/test_parents.py b/bzrlib/tests/per_workingtree/test_parents.py
new file mode 100644
index 0000000..ffbf21e
--- /dev/null
+++ b/bzrlib/tests/per_workingtree/test_parents.py
@@ -0,0 +1,764 @@
+# Copyright (C) 2006 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""Tests of the parent related functions of WorkingTrees."""
+
+from cStringIO import StringIO
+import os
+
+from bzrlib import (
+ errors,
+ osutils,
+ revision as _mod_revision,
+ tests,
+ )
+from bzrlib.inventory import (
+ Inventory,
+ InventoryFile,
+ InventoryDirectory,
+ InventoryLink,
+ )
+from bzrlib.revisiontree import InventoryRevisionTree
+from bzrlib.tests.per_workingtree import TestCaseWithWorkingTree
+from bzrlib.tests import (
+ features,
+ )
+from bzrlib.uncommit import uncommit
+
+
+class TestParents(TestCaseWithWorkingTree):
+
+ def assertConsistentParents(self, expected, tree):
+ """Check that the parents found are as expected.
+
+ This test helper also checks that they are consistent with
+ the pre-get_parent_ids() api - which is now deprecated.
+ """
+ self.assertEqual(expected, tree.get_parent_ids())
+ if expected == []:
+ self.assertEqual(_mod_revision.NULL_REVISION,
+ _mod_revision.ensure_null(tree.last_revision()))
+ else:
+ self.assertEqual(expected[0], tree.last_revision())
+
+
+class TestGetParents(TestParents):
+
+ def test_get_parents(self):
+ t = self.make_branch_and_tree('.')
+ self.assertEqual([], t.get_parent_ids())
+
+
+class TestSetParents(TestParents):
+
+ def test_set_no_parents(self):
+ t = self.make_branch_and_tree('.')
+ t.set_parent_trees([])
+ self.assertEqual([], t.get_parent_ids())
+ # now give it a real parent, and then set it to no parents again.
+ t.commit('first post')
+ t.set_parent_trees([])
+ self.assertConsistentParents([], t)
+
+ def test_set_null_parent(self):
+ t = self.make_branch_and_tree('.')
+ self.assertRaises(errors.ReservedId, t.set_parent_ids, ['null:'],
+ allow_leftmost_as_ghost=True)
+ self.assertRaises(errors.ReservedId, t.set_parent_trees,
+ [('null:', None)], allow_leftmost_as_ghost=True)
+
+ def test_set_one_ghost_parent_rejects(self):
+ t = self.make_branch_and_tree('.')
+ self.assertRaises(errors.GhostRevisionUnusableHere,
+ t.set_parent_trees, [('missing-revision-id', None)])
+
+ def test_set_one_ghost_parent_force(self):
+ t = self.make_branch_and_tree('.')
+ t.set_parent_trees([('missing-revision-id', None)],
+ allow_leftmost_as_ghost=True)
+ self.assertConsistentParents(['missing-revision-id'], t)
+
+ def test_set_two_parents_one_ghost(self):
+ t = self.make_branch_and_tree('.')
+ revision_in_repo = t.commit('first post')
+ # remove the tree's history
+ uncommit(t.branch, tree=t)
+ rev_tree = t.branch.repository.revision_tree(revision_in_repo)
+ t.set_parent_trees([(revision_in_repo, rev_tree),
+ ('another-missing', None)])
+ self.assertConsistentParents([revision_in_repo, 'another-missing'], t)
+
+ def test_set_three_parents(self):
+ t = self.make_branch_and_tree('.')
+ first_revision = t.commit('first post')
+ uncommit(t.branch, tree=t)
+ second_revision = t.commit('second post')
+ uncommit(t.branch, tree=t)
+ third_revision = t.commit('third post')
+ uncommit(t.branch, tree=t)
+ rev_tree1 = t.branch.repository.revision_tree(first_revision)
+ rev_tree2 = t.branch.repository.revision_tree(second_revision)
+ rev_tree3 = t.branch.repository.revision_tree(third_revision)
+ t.set_parent_trees([(first_revision, rev_tree1),
+ (second_revision, rev_tree2),
+ (third_revision, rev_tree3)])
+ self.assertConsistentParents(
+ [first_revision, second_revision, third_revision], t)
+
+ def test_set_no_parents_ids(self):
+ t = self.make_branch_and_tree('.')
+ t.set_parent_ids([])
+ self.assertEqual([], t.get_parent_ids())
+ # now give it a real parent, and then set it to no parents again.
+ t.commit('first post')
+ t.set_parent_ids([])
+ self.assertConsistentParents([], t)
+
+ def test_set_one_ghost_parent_ids_rejects(self):
+ t = self.make_branch_and_tree('.')
+ self.assertRaises(errors.GhostRevisionUnusableHere,
+ t.set_parent_ids, ['missing-revision-id'])
+
+ def test_set_one_ghost_parent_ids_force(self):
+ t = self.make_branch_and_tree('.')
+ t.set_parent_ids(['missing-revision-id'],
+ allow_leftmost_as_ghost=True)
+ self.assertConsistentParents(['missing-revision-id'], t)
+
+ def test_set_two_parents_one_ghost_ids(self):
+ t = self.make_branch_and_tree('.')
+ revision_in_repo = t.commit('first post')
+ # remove the tree's history
+ uncommit(t.branch, tree=t)
+ rev_tree = t.branch.repository.revision_tree(revision_in_repo)
+ t.set_parent_ids([revision_in_repo, 'another-missing'])
+ self.assertConsistentParents([revision_in_repo, 'another-missing'], t)
+
+ def test_set_three_parents_ids(self):
+ t = self.make_branch_and_tree('.')
+ first_revision = t.commit('first post')
+ uncommit(t.branch, tree=t)
+ second_revision = t.commit('second post')
+ uncommit(t.branch, tree=t)
+ third_revision = t.commit('third post')
+ uncommit(t.branch, tree=t)
+ rev_tree1 = t.branch.repository.revision_tree(first_revision)
+ rev_tree2 = t.branch.repository.revision_tree(second_revision)
+ rev_tree3 = t.branch.repository.revision_tree(third_revision)
+ t.set_parent_ids([first_revision, second_revision, third_revision])
+ self.assertConsistentParents(
+ [first_revision, second_revision, third_revision], t)
+
+ def test_set_duplicate_parent_ids(self):
+ t = self.make_branch_and_tree('.')
+ rev1 = t.commit('first post')
+ uncommit(t.branch, tree=t)
+ rev2 = t.commit('second post')
+ uncommit(t.branch, tree=t)
+ rev3 = t.commit('third post')
+ uncommit(t.branch, tree=t)
+ t.set_parent_ids([rev1, rev2, rev2, rev3])
+ # We strip the duplicate, but preserve the ordering
+ self.assertConsistentParents([rev1, rev2, rev3], t)
+
+ def test_set_duplicate_parent_trees(self):
+ t = self.make_branch_and_tree('.')
+ rev1 = t.commit('first post')
+ uncommit(t.branch, tree=t)
+ rev2 = t.commit('second post')
+ uncommit(t.branch, tree=t)
+ rev3 = t.commit('third post')
+ uncommit(t.branch, tree=t)
+ rev_tree1 = t.branch.repository.revision_tree(rev1)
+ rev_tree2 = t.branch.repository.revision_tree(rev2)
+ rev_tree3 = t.branch.repository.revision_tree(rev3)
+ t.set_parent_trees([(rev1, rev_tree1), (rev2, rev_tree2),
+ (rev2, rev_tree2), (rev3, rev_tree3)])
+ # We strip the duplicate, but preserve the ordering
+ self.assertConsistentParents([rev1, rev2, rev3], t)
+
+ def test_set_parent_ids_in_ancestry(self):
+ t = self.make_branch_and_tree('.')
+ rev1 = t.commit('first post')
+ rev2 = t.commit('second post')
+ rev3 = t.commit('third post')
+ # Reset the tree, back to rev1
+ t.set_parent_ids([rev1])
+ t.branch.set_last_revision_info(1, rev1)
+ self.assertConsistentParents([rev1], t)
+ t.set_parent_ids([rev1, rev2, rev3])
+ # rev2 is in the ancestry of rev3, so it will be filtered out
+ self.assertConsistentParents([rev1, rev3], t)
+ # Order should be preserved, and the first revision should always be
+ # kept
+ t.set_parent_ids([rev2, rev3, rev1])
+ self.assertConsistentParents([rev2, rev3], t)
+
+ def test_set_parent_trees_in_ancestry(self):
+ t = self.make_branch_and_tree('.')
+ rev1 = t.commit('first post')
+ rev2 = t.commit('second post')
+ rev3 = t.commit('third post')
+ # Reset the tree, back to rev1
+ t.set_parent_ids([rev1])
+ t.branch.set_last_revision_info(1, rev1)
+ self.assertConsistentParents([rev1], t)
+ rev_tree1 = t.branch.repository.revision_tree(rev1)
+ rev_tree2 = t.branch.repository.revision_tree(rev2)
+ rev_tree3 = t.branch.repository.revision_tree(rev3)
+ t.set_parent_trees([(rev1, rev_tree1), (rev2, rev_tree2),
+ (rev3, rev_tree3)])
+ # rev2 is in the ancestry of rev3, so it will be filtered out
+ self.assertConsistentParents([rev1, rev3], t)
+ # Order should be preserved, and the first revision should always be
+ # kept
+ t.set_parent_trees([(rev2, rev_tree2), (rev1, rev_tree1),
+ (rev3, rev_tree3)])
+ self.assertConsistentParents([rev2, rev3], t)
+
+ def test_unicode_symlink(self):
+ # this tests bug #272444
+ self.requireFeature(features.SymlinkFeature)
+ self.requireFeature(features.UnicodeFilenameFeature)
+
+ tree = self.make_branch_and_tree('tree1')
+
+ # The link points to a file whose name is an omega
+ # U+03A9 GREEK CAPITAL LETTER OMEGA
+ # UTF-8: ce a9 UTF-16BE: 03a9 Decimal: &#937;
+ target = u'\u03a9'
+ link_name = u'\N{Euro Sign}link'
+ os.symlink(target, 'tree1/' + link_name)
+ tree.add([link_name], ['link-id'])
+
+ revision1 = tree.commit('added a link to a Unicode target')
+ revision2 = tree.commit('this revision will be discarded')
+ tree.set_parent_ids([revision1])
+ tree.lock_read()
+ self.addCleanup(tree.unlock)
+ # Check that the symlink target is safely round-tripped in the trees.
+ self.assertEqual(target, tree.get_symlink_target('link-id'))
+ basis = tree.basis_tree()
+ self.assertEqual(target, basis.get_symlink_target('link-id'))
+
+
+class TestAddParent(TestParents):
+
+ def test_add_first_parent_id(self):
+ """Test adding the first parent id"""
+ tree = self.make_branch_and_tree('.')
+ first_revision = tree.commit('first post')
+ uncommit(tree.branch, tree=tree)
+ tree.add_parent_tree_id(first_revision)
+ self.assertConsistentParents([first_revision], tree)
+
+ def test_add_first_parent_id_ghost_rejects(self):
+ """Test adding the first parent id - as a ghost"""
+ tree = self.make_branch_and_tree('.')
+ self.assertRaises(errors.GhostRevisionUnusableHere,
+ tree.add_parent_tree_id, 'first-revision')
+
+ def test_add_first_parent_id_ghost_force(self):
+ """Test adding the first parent id - as a ghost"""
+ tree = self.make_branch_and_tree('.')
+ tree.add_parent_tree_id('first-revision', allow_leftmost_as_ghost=True)
+ self.assertConsistentParents(['first-revision'], tree)
+
+ def test_add_second_parent_id_with_ghost_first(self):
+ """Test adding the second parent when the first is a ghost."""
+ tree = self.make_branch_and_tree('.')
+ tree.add_parent_tree_id('first-revision', allow_leftmost_as_ghost=True)
+ tree.add_parent_tree_id('second')
+ self.assertConsistentParents(['first-revision', 'second'], tree)
+
+ def test_add_second_parent_id(self):
+ """Test adding the second parent id"""
+ tree = self.make_branch_and_tree('.')
+ first_revision = tree.commit('first post')
+ uncommit(tree.branch, tree=tree)
+ second_revision = tree.commit('second post')
+ tree.add_parent_tree_id(first_revision)
+ self.assertConsistentParents([second_revision, first_revision], tree)
+
+ def test_add_second_parent_id_ghost(self):
+ """Test adding the second parent id - as a ghost"""
+ tree = self.make_branch_and_tree('.')
+ first_revision = tree.commit('first post')
+ tree.add_parent_tree_id('second')
+ self.assertConsistentParents([first_revision, 'second'], tree)
+
+ def test_add_first_parent_tree(self):
+ """Test adding the first parent id"""
+ tree = self.make_branch_and_tree('.')
+ first_revision = tree.commit('first post')
+ uncommit(tree.branch, tree=tree)
+ tree.add_parent_tree((first_revision,
+ tree.branch.repository.revision_tree(first_revision)))
+ self.assertConsistentParents([first_revision], tree)
+
+ def test_add_first_parent_tree_ghost_rejects(self):
+ """Test adding the first parent id - as a ghost"""
+ tree = self.make_branch_and_tree('.')
+ self.assertRaises(errors.GhostRevisionUnusableHere,
+ tree.add_parent_tree, ('first-revision', None))
+
+ def test_add_first_parent_tree_ghost_force(self):
+ """Test adding the first parent id - as a ghost"""
+ tree = self.make_branch_and_tree('.')
+ tree.add_parent_tree(('first-revision', None),
+ allow_leftmost_as_ghost=True)
+ self.assertConsistentParents(['first-revision'], tree)
+
+ def test_add_second_parent_tree(self):
+ """Test adding the second parent id"""
+ tree = self.make_branch_and_tree('.')
+ first_revision = tree.commit('first post')
+ uncommit(tree.branch, tree=tree)
+ second_revision = tree.commit('second post')
+ tree.add_parent_tree((first_revision,
+ tree.branch.repository.revision_tree(first_revision)))
+ self.assertConsistentParents([second_revision, first_revision], tree)
+
+ def test_add_second_parent_tree_ghost(self):
+ """Test adding the second parent id - as a ghost"""
+ tree = self.make_branch_and_tree('.')
+ first_revision = tree.commit('first post')
+ tree.add_parent_tree(('second', None))
+ self.assertConsistentParents([first_revision, 'second'], tree)
+
+
+class UpdateToOneParentViaDeltaTests(TestCaseWithWorkingTree):
+ """Tests for the update_basis_by_delta call.
+
+ This is intuitively defined as 'apply an inventory delta to the basis and
+ discard other parents', but for trees that have an inventory that is not
+ managed as a tree-by-id, the implementation requires roughly duplicated
+ tests with those for apply_inventory_delta on the main tree.
+ """
+
+ def assertDeltaApplicationResultsInExpectedBasis(self, tree, revid, delta,
+ expected_inventory):
+ tree.lock_write()
+ try:
+ tree.update_basis_by_delta(revid, delta)
+ finally:
+ tree.unlock()
+ # check the last revision was adjusted to rev_id
+ self.assertEqual(revid, tree.last_revision())
+ # check the parents are what we expect
+ self.assertEqual([revid], tree.get_parent_ids())
+ # check that the basis tree has the inventory we expect from applying
+ # the delta.
+ result_basis = tree.basis_tree()
+ result_basis.lock_read()
+ try:
+ self.assertEqual(expected_inventory, result_basis.root_inventory)
+ finally:
+ result_basis.unlock()
+
+ def make_inv_delta(self, old, new):
+ """Make an inventory delta from two inventories."""
+ old_ids = set(old._byid.iterkeys())
+ new_ids = set(new._byid.iterkeys())
+ adds = new_ids - old_ids
+ deletes = old_ids - new_ids
+ common = old_ids.intersection(new_ids)
+ delta = []
+ for file_id in deletes:
+ delta.append((old.id2path(file_id), None, file_id, None))
+ for file_id in adds:
+ delta.append((None, new.id2path(file_id), file_id, new[file_id]))
+ for file_id in common:
+ if old[file_id] != new[file_id]:
+ delta.append((old.id2path(file_id), new.id2path(file_id),
+ file_id, new[file_id]))
+ return delta
+
+ def fake_up_revision(self, tree, revid, shape):
+
+ class ShapeTree(InventoryRevisionTree):
+
+ def __init__(self, shape):
+ self._repository = tree.branch.repository
+ self._inventory = shape
+
+ def get_file_text(self, file_id, path=None):
+ ie = self.root_inventory[file_id]
+ if ie.kind != "file":
+ return ""
+ return 'a' * ie.text_size
+
+ def get_file(self, file_id, path=None):
+ return StringIO(self.get_file_text(file_id))
+
+ tree.lock_write()
+ try:
+ if shape.root.revision is None:
+ shape.root.revision = revid
+ builder = tree.branch.get_commit_builder(
+ parents=[],
+ timestamp=0,
+ timezone=None,
+ committer="Foo Bar <foo@example.com>",
+ revision_id=revid)
+ shape_tree = ShapeTree(shape)
+ base_tree = tree.branch.repository.revision_tree(
+ _mod_revision.NULL_REVISION)
+ changes = shape_tree.iter_changes(
+ base_tree)
+ list(builder.record_iter_changes(shape_tree,
+ base_tree.get_revision_id(), changes))
+ builder.finish_inventory()
+ builder.commit("Message")
+ finally:
+ tree.unlock()
+
+ def add_entry(self, inv, rev_id, entry):
+ entry.revision = rev_id
+ inv.add(entry)
+
+ def add_dir(self, inv, rev_id, file_id, parent_id, name):
+ new_dir = InventoryDirectory(file_id, name, parent_id)
+ self.add_entry(inv, rev_id, new_dir)
+
+ def add_file(self, inv, rev_id, file_id, parent_id, name, sha, size):
+ new_file = InventoryFile(file_id, name, parent_id)
+ new_file.text_sha1 = sha
+ new_file.text_size = size
+ self.add_entry(inv, rev_id, new_file)
+
+ def add_link(self, inv, rev_id, file_id, parent_id, name, target):
+ new_link = InventoryLink(file_id, name, parent_id)
+ new_link.symlink_target = target
+ self.add_entry(inv, rev_id, new_link)
+
+ def add_new_root(self, new_shape, old_revid, new_revid):
+ if self.bzrdir_format.repository_format.rich_root_data:
+ self.add_dir(new_shape, old_revid, 'root-id', None, '')
+ else:
+ self.add_dir(new_shape, new_revid, 'root-id', None, '')
+
+ def assertTransitionFromBasisToShape(self, basis_shape, basis_revid,
+ new_shape, new_revid, extra_parent=None):
+ # set the inventory revision ids.
+ basis_shape.revision_id = basis_revid
+ new_shape.revision_id = new_revid
+ delta = self.make_inv_delta(basis_shape, new_shape)
+ tree = self.make_branch_and_tree('tree')
+ # the shapes need to be in the tree's repository to be able to set them
+ # as a parent, but the file content is not needed.
+ if basis_revid is not None:
+ self.fake_up_revision(tree, basis_revid, basis_shape)
+ parents = [basis_revid]
+ if extra_parent is not None:
+ parents.append(extra_parent)
+ tree.set_parent_ids(parents)
+ self.fake_up_revision(tree, new_revid, new_shape)
+ # give tree an inventory of new_shape
+ tree._write_inventory(new_shape)
+ self.assertDeltaApplicationResultsInExpectedBasis(tree, new_revid,
+ delta, new_shape)
+ # The tree should be internally consistent; while this is a moderately
+ # large hammer, this is a particularly sensitive area of code, so the
+ # extra assurance is well worth it.
+ tree._validate()
+ osutils.rmtree('tree')
+
+ def test_no_parents_just_root(self):
+ """Test doing an empty commit - no parent, set a root only."""
+ basis_shape = Inventory(root_id=None) # empty tree
+ new_shape = Inventory() # tree with a root
+ self.assertTransitionFromBasisToShape(basis_shape, None, new_shape,
+ 'new_parent')
+
+ def test_no_parents_full_tree(self):
+ """Test doing a regular initial commit with files and dirs."""
+ basis_shape = Inventory(root_id=None) # empty tree
+ revid = 'new-parent'
+ new_shape = Inventory(root_id=None)
+ self.add_dir(new_shape, revid, 'root-id', None, '')
+ self.add_link(new_shape, revid, 'link-id', 'root-id', 'link', 'target')
+ self.add_file(new_shape, revid, 'file-id', 'root-id', 'file', '1' * 32,
+ 12)
+ self.add_dir(new_shape, revid, 'dir-id', 'root-id', 'dir')
+ self.add_file(new_shape, revid, 'subfile-id', 'dir-id', 'subfile',
+ '2' * 32, 24)
+ self.assertTransitionFromBasisToShape(basis_shape, None, new_shape,
+ revid)
+
+ def test_file_content_change(self):
+ old_revid = 'old-parent'
+ basis_shape = Inventory(root_id=None)
+ self.add_dir(basis_shape, old_revid, 'root-id', None, '')
+ self.add_file(basis_shape, old_revid, 'file-id', 'root-id', 'file',
+ '1' * 32, 12)
+ new_revid = 'new-parent'
+ new_shape = Inventory(root_id=None)
+ self.add_new_root(new_shape, old_revid, new_revid)
+ self.add_file(new_shape, new_revid, 'file-id', 'root-id', 'file',
+ '2' * 32, 24)
+ self.assertTransitionFromBasisToShape(basis_shape, old_revid,
+ new_shape, new_revid)
+
+ def test_link_content_change(self):
+ old_revid = 'old-parent'
+ basis_shape = Inventory(root_id=None)
+ self.add_dir(basis_shape, old_revid, 'root-id', None, '')
+ self.add_link(basis_shape, old_revid, 'link-id', 'root-id', 'link',
+ 'old-target')
+ new_revid = 'new-parent'
+ new_shape = Inventory(root_id=None)
+ self.add_new_root(new_shape, old_revid, new_revid)
+ self.add_link(new_shape, new_revid, 'link-id', 'root-id', 'link',
+ 'new-target')
+ self.assertTransitionFromBasisToShape(basis_shape, old_revid,
+ new_shape, new_revid)
+
+ def test_kind_changes(self):
+ def do_file(inv, revid):
+ self.add_file(inv, revid, 'path-id', 'root-id', 'path', '1' * 32,
+ 12)
+
+ def do_link(inv, revid):
+ self.add_link(inv, revid, 'path-id', 'root-id', 'path', 'target')
+
+ def do_dir(inv, revid):
+ self.add_dir(inv, revid, 'path-id', 'root-id', 'path')
+
+ for old_factory in (do_file, do_link, do_dir):
+ for new_factory in (do_file, do_link, do_dir):
+ if old_factory == new_factory:
+ continue
+ old_revid = 'old-parent'
+ basis_shape = Inventory(root_id=None)
+ self.add_dir(basis_shape, old_revid, 'root-id', None, '')
+ old_factory(basis_shape, old_revid)
+ new_revid = 'new-parent'
+ new_shape = Inventory(root_id=None)
+ self.add_new_root(new_shape, old_revid, new_revid)
+ new_factory(new_shape, new_revid)
+ self.assertTransitionFromBasisToShape(basis_shape, old_revid,
+ new_shape, new_revid)
+
+ def test_content_from_second_parent_is_dropped(self):
+ left_revid = 'left-parent'
+ basis_shape = Inventory(root_id=None)
+ self.add_dir(basis_shape, left_revid, 'root-id', None, '')
+ self.add_link(basis_shape, left_revid, 'link-id', 'root-id', 'link',
+ 'left-target')
+ # the right shape has content - file, link, subdir with a child,
+ # that should all be discarded by the call.
+ right_revid = 'right-parent'
+ right_shape = Inventory(root_id=None)
+ self.add_dir(right_shape, left_revid, 'root-id', None, '')
+ self.add_link(right_shape, right_revid, 'link-id', 'root-id', 'link',
+ 'some-target')
+ self.add_dir(right_shape, right_revid, 'subdir-id', 'root-id', 'dir')
+ self.add_file(right_shape, right_revid, 'file-id', 'subdir-id', 'file',
+ '2' * 32, 24)
+ new_revid = 'new-parent'
+ new_shape = Inventory(root_id=None)
+ self.add_new_root(new_shape, left_revid, new_revid)
+ self.add_link(new_shape, new_revid, 'link-id', 'root-id', 'link',
+ 'new-target')
+ self.assertTransitionFromBasisToShape(basis_shape, left_revid,
+ new_shape, new_revid, right_revid)
+
+ def test_parent_id_changed(self):
+ # test that when the only change to an entry is its parent id changing
+ # that it is handled correctly (that is it keeps the same path)
+ old_revid = 'old-parent'
+ basis_shape = Inventory(root_id=None)
+ self.add_dir(basis_shape, old_revid, 'root-id', None, '')
+ self.add_dir(basis_shape, old_revid, 'orig-parent-id', 'root-id', 'dir')
+ self.add_dir(basis_shape, old_revid, 'dir-id', 'orig-parent-id', 'dir')
+ new_revid = 'new-parent'
+ new_shape = Inventory(root_id=None)
+ self.add_new_root(new_shape, old_revid, new_revid)
+ self.add_dir(new_shape, new_revid, 'new-parent-id', 'root-id', 'dir')
+ self.add_dir(new_shape, new_revid, 'dir-id', 'new-parent-id', 'dir')
+ self.assertTransitionFromBasisToShape(basis_shape, old_revid,
+ new_shape, new_revid)
+
+ def test_name_changed(self):
+ # test that when the only change to an entry is its name changing that
+ # it is handled correctly (that is it keeps the same parent id)
+ old_revid = 'old-parent'
+ basis_shape = Inventory(root_id=None)
+ self.add_dir(basis_shape, old_revid, 'root-id', None, '')
+ self.add_dir(basis_shape, old_revid, 'parent-id', 'root-id', 'origdir')
+ self.add_dir(basis_shape, old_revid, 'dir-id', 'parent-id', 'olddir')
+ new_revid = 'new-parent'
+ new_shape = Inventory(root_id=None)
+ self.add_new_root(new_shape, old_revid, new_revid)
+ self.add_dir(new_shape, new_revid, 'parent-id', 'root-id', 'newdir')
+ self.add_dir(new_shape, new_revid, 'dir-id', 'parent-id', 'newdir')
+ self.assertTransitionFromBasisToShape(basis_shape, old_revid,
+ new_shape, new_revid)
+
+ def test_parent_child_swap(self):
+ # test a A->A/B and A/B->A path swap.
+ old_revid = 'old-parent'
+ basis_shape = Inventory(root_id=None)
+ self.add_dir(basis_shape, old_revid, 'root-id', None, '')
+ self.add_dir(basis_shape, old_revid, 'dir-id-A', 'root-id', 'A')
+ self.add_dir(basis_shape, old_revid, 'dir-id-B', 'dir-id-A', 'B')
+ self.add_link(basis_shape, old_revid, 'link-id-C', 'dir-id-B', 'C', 'C')
+ new_revid = 'new-parent'
+ new_shape = Inventory(root_id=None)
+ self.add_new_root(new_shape, old_revid, new_revid)
+ self.add_dir(new_shape, new_revid, 'dir-id-B', 'root-id', 'A')
+ self.add_dir(new_shape, new_revid, 'dir-id-A', 'dir-id-B', 'B')
+ self.add_link(new_shape, new_revid, 'link-id-C', 'dir-id-A', 'C', 'C')
+ self.assertTransitionFromBasisToShape(basis_shape, old_revid,
+ new_shape, new_revid)
+
+ def test_parent_deleted_child_renamed(self):
+ # test a A->None and A/B->A.
+ old_revid = 'old-parent'
+ basis_shape = Inventory(root_id=None)
+ self.add_dir(basis_shape, old_revid, 'root-id', None, '')
+ self.add_dir(basis_shape, old_revid, 'dir-id-A', 'root-id', 'A')
+ self.add_dir(basis_shape, old_revid, 'dir-id-B', 'dir-id-A', 'B')
+ self.add_link(basis_shape, old_revid, 'link-id-C', 'dir-id-B', 'C', 'C')
+ new_revid = 'new-parent'
+ new_shape = Inventory(root_id=None)
+ self.add_new_root(new_shape, old_revid, new_revid)
+ self.add_dir(new_shape, new_revid, 'dir-id-B', 'root-id', 'A')
+ self.add_link(new_shape, old_revid, 'link-id-C', 'dir-id-B', 'C', 'C')
+ self.assertTransitionFromBasisToShape(basis_shape, old_revid,
+ new_shape, new_revid)
+
+ def test_dir_to_root(self):
+ # test a A->''.
+ old_revid = 'old-parent'
+ basis_shape = Inventory(root_id=None)
+ self.add_dir(basis_shape, old_revid, 'root-id', None, '')
+ self.add_dir(basis_shape, old_revid, 'dir-id-A', 'root-id', 'A')
+ self.add_link(basis_shape, old_revid, 'link-id-B', 'dir-id-A', 'B', 'B')
+ new_revid = 'new-parent'
+ new_shape = Inventory(root_id=None)
+ self.add_dir(new_shape, new_revid, 'dir-id-A', None, '')
+ self.add_link(new_shape, old_revid, 'link-id-B', 'dir-id-A', 'B', 'B')
+ self.assertTransitionFromBasisToShape(basis_shape, old_revid,
+ new_shape, new_revid)
+
+ def test_path_swap(self):
+ # test a A->B and B->A path swap.
+ old_revid = 'old-parent'
+ basis_shape = Inventory(root_id=None)
+ self.add_dir(basis_shape, old_revid, 'root-id', None, '')
+ self.add_dir(basis_shape, old_revid, 'dir-id-A', 'root-id', 'A')
+ self.add_dir(basis_shape, old_revid, 'dir-id-B', 'root-id', 'B')
+ self.add_link(basis_shape, old_revid, 'link-id-C', 'root-id', 'C', 'C')
+ self.add_link(basis_shape, old_revid, 'link-id-D', 'root-id', 'D', 'D')
+ self.add_file(basis_shape, old_revid, 'file-id-E', 'root-id', 'E',
+ '1' * 32, 12)
+ self.add_file(basis_shape, old_revid, 'file-id-F', 'root-id', 'F',
+ '2' * 32, 24)
+ new_revid = 'new-parent'
+ new_shape = Inventory(root_id=None)
+ self.add_new_root(new_shape, old_revid, new_revid)
+ self.add_dir(new_shape, new_revid, 'dir-id-A', 'root-id', 'B')
+ self.add_dir(new_shape, new_revid, 'dir-id-B', 'root-id', 'A')
+ self.add_link(new_shape, new_revid, 'link-id-C', 'root-id', 'D', 'C')
+ self.add_link(new_shape, new_revid, 'link-id-D', 'root-id', 'C', 'D')
+ self.add_file(new_shape, new_revid, 'file-id-E', 'root-id', 'F',
+ '1' * 32, 12)
+ self.add_file(new_shape, new_revid, 'file-id-F', 'root-id', 'E',
+ '2' * 32, 24)
+ self.assertTransitionFromBasisToShape(basis_shape, old_revid,
+ new_shape, new_revid)
+
+ def test_adds(self):
+ # test adding paths and dirs, including adding to a newly added dir.
+ old_revid = 'old-parent'
+ basis_shape = Inventory(root_id=None)
+ # with a root, so its a commit after the first.
+ self.add_dir(basis_shape, old_revid, 'root-id', None, '')
+ new_revid = 'new-parent'
+ new_shape = Inventory(root_id=None)
+ self.add_new_root(new_shape, old_revid, new_revid)
+ self.add_dir(new_shape, new_revid, 'dir-id-A', 'root-id', 'A')
+ self.add_link(new_shape, new_revid, 'link-id-B', 'root-id', 'B', 'C')
+ self.add_file(new_shape, new_revid, 'file-id-C', 'root-id', 'C',
+ '1' * 32, 12)
+ self.add_file(new_shape, new_revid, 'file-id-D', 'dir-id-A', 'D',
+ '2' * 32, 24)
+ self.assertTransitionFromBasisToShape(basis_shape, old_revid,
+ new_shape, new_revid)
+
+ def test_removes(self):
+ # test removing paths, including paths that are within other also
+ # removed paths.
+ old_revid = 'old-parent'
+ basis_shape = Inventory(root_id=None)
+ self.add_dir(basis_shape, old_revid, 'root-id', None, '')
+ self.add_dir(basis_shape, old_revid, 'dir-id-A', 'root-id', 'A')
+ self.add_link(basis_shape, old_revid, 'link-id-B', 'root-id', 'B', 'C')
+ self.add_file(basis_shape, old_revid, 'file-id-C', 'root-id', 'C',
+ '1' * 32, 12)
+ self.add_file(basis_shape, old_revid, 'file-id-D', 'dir-id-A', 'D',
+ '2' * 32, 24)
+ new_revid = 'new-parent'
+ new_shape = Inventory(root_id=None)
+ self.add_new_root(new_shape, old_revid, new_revid)
+ self.assertTransitionFromBasisToShape(basis_shape, old_revid,
+ new_shape, new_revid)
+
+ def test_move_to_added_dir(self):
+ old_revid = 'old-parent'
+ basis_shape = Inventory(root_id=None)
+ self.add_dir(basis_shape, old_revid, 'root-id', None, '')
+ self.add_link(basis_shape, old_revid, 'link-id-B', 'root-id', 'B', 'C')
+ new_revid = 'new-parent'
+ new_shape = Inventory(root_id=None)
+ self.add_new_root(new_shape, old_revid, new_revid)
+ self.add_dir(new_shape, new_revid, 'dir-id-A', 'root-id', 'A')
+ self.add_link(new_shape, new_revid, 'link-id-B', 'dir-id-A', 'B', 'C')
+ self.assertTransitionFromBasisToShape(basis_shape, old_revid,
+ new_shape, new_revid)
+
+ def test_move_from_removed_dir(self):
+ old_revid = 'old-parent'
+ basis_shape = Inventory(root_id=None)
+ self.add_dir(basis_shape, old_revid, 'root-id', None, '')
+ self.add_dir(basis_shape, old_revid, 'dir-id-A', 'root-id', 'A')
+ self.add_link(basis_shape, old_revid, 'link-id-B', 'dir-id-A', 'B', 'C')
+ new_revid = 'new-parent'
+ new_shape = Inventory(root_id=None)
+ self.add_new_root(new_shape, old_revid, new_revid)
+ self.add_link(new_shape, new_revid, 'link-id-B', 'root-id', 'B', 'C')
+ self.assertTransitionFromBasisToShape(basis_shape, old_revid,
+ new_shape, new_revid)
+
+ def test_move_moves_children_recursively(self):
+ old_revid = 'old-parent'
+ basis_shape = Inventory(root_id=None)
+ self.add_dir(basis_shape, old_revid, 'root-id', None, '')
+ self.add_dir(basis_shape, old_revid, 'dir-id-A', 'root-id', 'A')
+ self.add_dir(basis_shape, old_revid, 'dir-id-B', 'dir-id-A', 'B')
+ self.add_link(basis_shape, old_revid, 'link-id-C', 'dir-id-B', 'C', 'D')
+ new_revid = 'new-parent'
+ new_shape = Inventory(root_id=None)
+ self.add_new_root(new_shape, old_revid, new_revid)
+ # the moved path:
+ self.add_dir(new_shape, new_revid, 'dir-id-A', 'root-id', 'B')
+ # unmoved children.
+ self.add_dir(new_shape, old_revid, 'dir-id-B', 'dir-id-A', 'B')
+ self.add_link(new_shape, old_revid, 'link-id-C', 'dir-id-B', 'C', 'D')
+ self.assertTransitionFromBasisToShape(basis_shape, old_revid,
+ new_shape, new_revid)
diff --git a/bzrlib/tests/per_workingtree/test_paths2ids.py b/bzrlib/tests/per_workingtree/test_paths2ids.py
new file mode 100644
index 0000000..1b19a5f
--- /dev/null
+++ b/bzrlib/tests/per_workingtree/test_paths2ids.py
@@ -0,0 +1,201 @@
+# Copyright (C) 2007 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""Tests for WorkingTree.paths2ids.
+
+This API probably needs to be exposed as a tree implementation test, but these
+initial tests are for the specific cases being refactored from
+find_ids_across_trees.
+"""
+
+from operator import attrgetter
+
+from bzrlib import errors
+from bzrlib.tests import features
+from bzrlib.tests.per_workingtree import TestCaseWithWorkingTree
+
+
+# TODO: This needs an additional test: do a merge, then do a
+# paths2id(trees=left parent only), and also with (trees=all parents) to check
+# that only the requested trees are considered - i.e. have an unversioned path
+# in the unlisted tree, or an extra file that moves into the selected path but
+# should not be returned
+
+# TODO: test that supplying paths with duplication - i.e. foo, foo, foo/bar -
+# does not result in garbage out.
+
+# TODO: Are we meant to raise the precise unversioned paths when some are
+# unversioned - if so, test this.
+
+class TestPaths2Ids(TestCaseWithWorkingTree):
+
+ def assertExpectedIds(self, ids, tree, paths, trees=None,
+ require_versioned=True):
+ """Run paths2ids for tree, and check the result."""
+ tree.lock_read()
+ if trees:
+ map(apply, map(attrgetter('lock_read'), trees))
+ result = tree.paths2ids(paths, trees,
+ require_versioned=require_versioned)
+ map(apply, map(attrgetter('unlock'), trees))
+ else:
+ result = tree.paths2ids(paths,
+ require_versioned=require_versioned)
+ self.assertEqual(set(ids), result)
+ tree.unlock()
+
+ def test_paths_none_result_none(self):
+ tree = self.make_branch_and_tree('tree')
+ tree.lock_read()
+ self.assertEqual(None, tree.paths2ids(None))
+ tree.unlock()
+
+ def test_find_single_root(self):
+ tree = self.make_branch_and_tree('tree')
+ self.assertExpectedIds([tree.path2id('')], tree, [''])
+
+ def test_find_tree_and_clone_roots(self):
+ tree = self.make_branch_and_tree('tree')
+ clone = tree.bzrdir.clone('clone').open_workingtree()
+ clone.lock_tree_write()
+ clone_root_id = 'new-id'
+ clone.set_root_id(clone_root_id)
+ tree_root_id = tree.path2id('')
+ clone.unlock()
+ self.assertExpectedIds([tree_root_id, clone_root_id], tree, [''], [clone])
+
+ def test_find_tree_basis_roots(self):
+ tree = self.make_branch_and_tree('tree')
+ tree.commit('basis')
+ basis = tree.basis_tree()
+ basis_root_id = basis.path2id('')
+ tree.lock_tree_write()
+ tree_root_id = 'new-id'
+ tree.set_root_id(tree_root_id)
+ tree.unlock()
+ self.assertExpectedIds([tree_root_id, basis_root_id], tree, [''], [basis])
+
+ def test_find_children_of_moved_directories(self):
+ """Check the basic nasty corner case that path2ids should handle.
+
+ This is the following situation:
+ basis:
+ / ROOT
+ /dir dir
+ /dir/child-moves child-moves
+ /dir/child-stays child-stays
+ /dir/child-goes child-goes
+
+ current tree:
+ / ROOT
+ /child-moves child-moves
+ /newdir newdir
+ /newdir/dir dir
+ /newdir/dir/child-stays child-stays
+ /newdir/dir/new-child new-child
+
+ In english: we move a directory under a directory that was a sibling,
+ and at the same time remove, or move out of the directory, some of its
+ children, and give it a new child previous absent or a sibling.
+
+ current_tree.path2ids(['newdir'], [basis]) is meant to handle this
+ correctly: that is it should return the ids:
+ newdir because it was provided
+ dir, because its under newdir in current
+ child-moves because its under dir in old
+ child-stays either because its under newdir/dir in current, or under dir in old
+ child-goes because its under dir in old.
+ new-child because its under dir in new
+
+ Symmetrically, current_tree.path2ids(['dir'], [basis]) is meant to show
+ new-child, even though its not under the path 'dir' in current, because
+ its under a path selected by 'dir' in basis:
+ dir because its selected in basis.
+ child-moves because its under dir in old
+ child-stays either because its under newdir/dir in current, or under dir in old
+ child-goes because its under dir in old.
+ new-child because its under dir in new.
+ """
+ tree = self.make_branch_and_tree('tree')
+ self.build_tree(
+ ['tree/dir/', 'tree/dir/child-moves', 'tree/dir/child-stays',
+ 'tree/dir/child-goes'])
+ tree.add(['dir', 'dir/child-moves', 'dir/child-stays', 'dir/child-goes'],
+ ['dir', 'child-moves', 'child-stays', 'child-goes'])
+ tree.commit('create basis')
+ basis = tree.basis_tree()
+ tree.unversion(['child-goes'])
+ tree.rename_one('dir/child-moves', 'child-moves')
+ self.build_tree(['tree/newdir/'])
+ tree.add(['newdir'], ['newdir'])
+ tree.rename_one('dir/child-stays', 'child-stays')
+ tree.rename_one('dir', 'newdir/dir')
+ tree.rename_one('child-stays', 'newdir/dir/child-stays')
+ self.build_tree(['tree/newdir/dir/new-child'])
+ tree.add(['newdir/dir/new-child'], ['new-child'])
+ self.assertExpectedIds(
+ ['newdir', 'dir', 'child-moves', 'child-stays', 'child-goes',
+ 'new-child'], tree, ['newdir'], [basis])
+ self.assertExpectedIds(
+ ['dir', 'child-moves', 'child-stays', 'child-goes', 'new-child'],
+ tree, ['dir'], [basis])
+
+ def test_unversioned_one_tree(self):
+ tree = self.make_branch_and_tree('tree')
+ self.build_tree(['tree/unversioned'])
+ self.assertExpectedIds([], tree, ['unversioned'], require_versioned=False)
+ tree.lock_read()
+ self.assertRaises(errors.PathsNotVersionedError, tree.paths2ids, ['unversioned'])
+ tree.unlock()
+
+ def test_unversioned_in_one_of_multiple_trees(self):
+ # in this test, the path is unversioned in only one tree, and thus
+ # should not raise an error: it must be unversioned in *all* trees to
+ # error.
+ tree = self.make_branch_and_tree('tree')
+ tree.commit('make basis')
+ basis = tree.basis_tree()
+ self.build_tree(['tree/in-one'])
+ tree.add(['in-one'], ['in-one'])
+ self.assertExpectedIds(['in-one'], tree, ['in-one'], [basis])
+
+ def test_unversioned_all_of_multiple_trees(self):
+ # in this test, the path is unversioned in every tree, and thus
+ # should not raise an error: it must be unversioned in *all* trees to
+ # error.
+ tree = self.make_branch_and_tree('tree')
+ tree.commit('make basis')
+ basis = tree.basis_tree()
+ self.assertExpectedIds([], tree, ['unversioned'], [basis],
+ require_versioned=False)
+ tree.lock_read()
+ basis.lock_read()
+ self.assertRaises(errors.PathsNotVersionedError, tree.paths2ids,
+ ['unversioned'], [basis])
+ self.assertRaises(errors.PathsNotVersionedError, basis.paths2ids,
+ ['unversioned'], [tree])
+ basis.unlock()
+ tree.unlock()
+
+ def test_unversioned_non_ascii_one_tree(self):
+ self.requireFeature(features.UnicodeFilenameFeature)
+ tree = self.make_branch_and_tree('.')
+ self.build_tree([u"\xa7"])
+ self.assertExpectedIds([], tree, [u"\xa7"], require_versioned=False)
+ self.addCleanup(tree.lock_read().unlock)
+ e = self.assertRaises(errors.PathsNotVersionedError,
+ tree.paths2ids, [u"\xa7"])
+ self.assertEqual([u"\xa7"], e.paths)
diff --git a/bzrlib/tests/per_workingtree/test_pull.py b/bzrlib/tests/per_workingtree/test_pull.py
new file mode 100644
index 0000000..6734eb4
--- /dev/null
+++ b/bzrlib/tests/per_workingtree/test_pull.py
@@ -0,0 +1,109 @@
+# Copyright (C) 2006, 2007, 2009, 2010 Canonical Ltd
+# Authors: Robert Collins <robert.collins@canonical.com>
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+
+from bzrlib import tests
+from bzrlib.revision import NULL_REVISION
+from bzrlib.tests import per_workingtree
+
+
+class TestPull(per_workingtree.TestCaseWithWorkingTree):
+
+ def get_pullable_trees(self):
+ self.build_tree(['from/', 'from/file', 'to/'])
+ tree = self.make_branch_and_tree('from')
+ tree.add('file')
+ tree.commit('foo', rev_id='A')
+ tree_b = self.make_branch_and_tree('to')
+ return tree, tree_b
+
+ def test_pull_null(self):
+ tree_a, tree_b = self.get_pullable_trees()
+ root_id = tree_a.get_root_id()
+ tree_a.pull(tree_b.branch, stop_revision=NULL_REVISION, overwrite=True)
+ self.assertEquals(root_id, tree_a.get_root_id())
+
+ def test_pull(self):
+ tree_a, tree_b = self.get_pullable_trees()
+ tree_b.pull(tree_a.branch)
+ self.assertTrue(tree_b.branch.repository.has_revision('A'))
+ self.assertEqual(['A'], tree_b.get_parent_ids())
+
+ def test_pull_overwrites(self):
+ tree_a, tree_b = self.get_pullable_trees()
+ tree_b.commit('foo', rev_id='B')
+ self.assertEqual('B', tree_b.branch.last_revision())
+ tree_b.pull(tree_a.branch, overwrite=True)
+ self.assertTrue(tree_b.branch.repository.has_revision('A'))
+ self.assertTrue(tree_b.branch.repository.has_revision('B'))
+ self.assertEqual(['A'], tree_b.get_parent_ids())
+
+ def test_pull_merges_tree_content(self):
+ tree_a, tree_b = self.get_pullable_trees()
+ tree_b.pull(tree_a.branch)
+ self.assertFileEqual('contents of from/file\n', 'to/file')
+
+ def test_pull_changes_root_id(self):
+ tree = self.make_branch_and_tree('from')
+ tree.set_root_id('first_root_id')
+ self.build_tree(['from/file'])
+ tree.add(['file'])
+ tree.commit('first')
+ to_tree = tree.bzrdir.sprout('to').open_workingtree()
+ self.assertEqual('first_root_id', to_tree.get_root_id())
+ tree.set_root_id('second_root_id')
+ tree.commit('second')
+ to_tree.pull(tree.branch)
+ self.assertEqual('second_root_id', to_tree.get_root_id())
+
+
+class TestPullWithOrphans(per_workingtree.TestCaseWithWorkingTree):
+
+ def make_branch_deleting_dir(self, relpath=None):
+ if relpath is None:
+ relpath = 'trunk'
+ builder = self.make_branch_builder(relpath)
+ builder.start_series()
+
+ # Create an empty trunk
+ builder.build_snapshot('1', None, [
+ ('add', ('', 'root-id', 'directory', ''))])
+ builder.build_snapshot('2', ['1'], [
+ ('add', ('dir', 'dir-id', 'directory', '')),
+ ('add', ('file', 'file-id', 'file', 'trunk content\n')),])
+ builder.build_snapshot('3', ['2'], [
+ ('unversion', 'dir-id'),])
+ builder.finish_series()
+ return builder.get_branch()
+
+ def test_pull_orphans(self):
+ if not self.workingtree_format.missing_parent_conflicts:
+ raise tests.TestSkipped(
+ '%r does not support missing parent conflicts' %
+ self.workingtree_format)
+ trunk = self.make_branch_deleting_dir('trunk')
+ work = trunk.bzrdir.sprout('work', revision_id='2').open_workingtree()
+ work.branch.get_config_stack().set(
+ 'bzr.transform.orphan_policy', 'move')
+ # Add some unversioned files in dir
+ self.build_tree(['work/dir/foo',
+ 'work/dir/subdir/',
+ 'work/dir/subdir/foo'])
+ work.pull(trunk)
+ self.assertLength(0, work.conflicts())
+ # The directory removal should succeed
+ self.assertPathDoesNotExist('work/dir')
diff --git a/bzrlib/tests/per_workingtree/test_put_file.py b/bzrlib/tests/per_workingtree/test_put_file.py
new file mode 100644
index 0000000..d76f337
--- /dev/null
+++ b/bzrlib/tests/per_workingtree/test_put_file.py
@@ -0,0 +1,37 @@
+# Copyright (C) 2006 Canonical Ltd
+# Authors: Robert Collins <robert.collins@canonical.com>
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""Tests for interface conformance of 'workingtree.put_file*'"""
+
+from bzrlib.tests.per_workingtree import TestCaseWithWorkingTree
+
+
+class TestPutFileBytesNonAtomic(TestCaseWithWorkingTree):
+
+ def test_put_new_file(self):
+ t = self.make_branch_and_tree('t1')
+ t.add(['foo'], ids=['foo-id'], kinds=['file'])
+ t.put_file_bytes_non_atomic('foo-id', 'barshoom')
+ self.assertEqual('barshoom', t.get_file('foo-id').read())
+
+ def test_put_existing_file(self):
+ t = self.make_branch_and_tree('t1')
+ t.add(['foo'], ids=['foo-id'], kinds=['file'])
+ t.put_file_bytes_non_atomic('foo-id', 'first-content')
+ t.put_file_bytes_non_atomic('foo-id', 'barshoom')
+ self.assertEqual('barshoom', t.get_file('foo-id').read())
+
diff --git a/bzrlib/tests/per_workingtree/test_read_working_inventory.py b/bzrlib/tests/per_workingtree/test_read_working_inventory.py
new file mode 100644
index 0000000..f94b3d4
--- /dev/null
+++ b/bzrlib/tests/per_workingtree/test_read_working_inventory.py
@@ -0,0 +1,56 @@
+# Copyright (C) 2006 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""Tests for WorkingTree.read_working_inventory."""
+
+from bzrlib import errors, inventory
+from bzrlib.tests import TestNotApplicable
+from bzrlib.tests.per_workingtree import TestCaseWithWorkingTree
+from bzrlib.workingtree import InventoryWorkingTree
+
+
+class TestReadWorkingInventory(TestCaseWithWorkingTree):
+
+ def test_trivial_read(self):
+ tree = self.make_branch_and_tree('t1')
+ if not isinstance(tree, InventoryWorkingTree):
+ raise TestNotApplicable("read_working_inventory not usable on "
+ "non-inventory working trees")
+ tree.lock_read()
+ self.assertIsInstance(tree.read_working_inventory(), inventory.Inventory)
+ tree.unlock()
+
+ def test_read_after_inventory_modification(self):
+ tree = self.make_branch_and_tree('tree')
+ if not isinstance(tree, InventoryWorkingTree):
+ raise TestNotApplicable("read_working_inventory not usable on "
+ "non-inventory working trees")
+ # prepare for a series of changes that will modify the
+ # inventory
+ tree.lock_write()
+ try:
+ tree.set_root_id('new-root')
+ # having dirtied the inventory, we can now expect an
+ # InventoryModified exception when doing a read_working_inventory()
+ # OR, the call can be ignored and the changes preserved
+ try:
+ tree.read_working_inventory()
+ except errors.InventoryModified:
+ pass
+ else:
+ self.assertEqual('new-root', tree.path2id(''))
+ finally:
+ tree.unlock()
diff --git a/bzrlib/tests/per_workingtree/test_readonly.py b/bzrlib/tests/per_workingtree/test_readonly.py
new file mode 100644
index 0000000..f1a3614
--- /dev/null
+++ b/bzrlib/tests/per_workingtree/test_readonly.py
@@ -0,0 +1,116 @@
+# Copyright (C) 2006 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""Test that WorkingTrees don't fail if they are in a readonly dir."""
+
+import os
+import sys
+import time
+
+from bzrlib import (
+ hashcache,
+ tests,
+ )
+from bzrlib.tests.per_workingtree import TestCaseWithWorkingTree
+
+from bzrlib.workingtree import InventoryWorkingTree
+
+
+class TestReadonly(TestCaseWithWorkingTree):
+
+ def setUp(self):
+ if not self.platform_supports_readonly_dirs():
+ raise tests.TestSkipped('platform does not support readonly'
+ ' directories.')
+ super(TestReadonly, self).setUp()
+
+ def platform_supports_readonly_dirs(self):
+ if sys.platform in ('win32', 'cygwin'):
+ # Setting a directory to readonly in windows or cygwin doesn't seem
+ # to have any effect. You can still create files in subdirectories.
+ # TODO: jam 20061219 We could cheat and set just the hashcache file
+ # to readonly, which would make it fail when we try to delete
+ # or rewrite it. But that is a lot of cheating...
+ return False
+ return True
+
+ def _set_all_dirs(self, basedir, readonly=True):
+ """Recursively set all directories beneath this one."""
+ if readonly:
+ mode = 0555
+ else:
+ mode = 0755
+
+ for root, dirs, files in os.walk(basedir, topdown=False):
+ for d in dirs:
+ path = os.path.join(root, d)
+ os.chmod(path, mode)
+
+ def set_dirs_readonly(self, basedir):
+ """Set all directories readonly, and have it cleanup on test exit."""
+ self.addCleanup(self._set_all_dirs, basedir, readonly=False)
+ self._set_all_dirs(basedir, readonly=True)
+
+ def create_basic_tree(self):
+ tree = self.make_branch_and_tree('tree')
+ self.build_tree(['tree/a', 'tree/b/', 'tree/b/c'])
+ tree.add(['a', 'b', 'b/c'])
+ tree.commit('creating an initial tree.')
+ return tree
+
+ def _custom_cutoff_time(self):
+ """We need to fake the cutoff time."""
+ return time.time() + 10
+
+ def test_readonly_unclean(self):
+ """Even if the tree is unclean, we should still handle readonly dirs."""
+ # First create a tree
+ tree = self.create_basic_tree()
+ if not isinstance(tree, InventoryWorkingTree):
+ raise tests.TestNotApplicable("requires inventory working tree")
+
+ # XXX: *Ugly* *ugly* hack, we need the hashcache to think it is out of
+ # date, but we don't want to actually wait 3 seconds doing nothing.
+ # WorkingTree formats that don't have a _hashcache should update this
+ # test so that they pass. For now, we just assert that we have the
+ # right type of objects available.
+ the_hashcache = getattr(tree, '_hashcache', None)
+ if the_hashcache is not None:
+ self.assertIsInstance(the_hashcache, hashcache.HashCache)
+ the_hashcache._cutoff_time = self._custom_cutoff_time
+ hack_dirstate = False
+ else:
+ # DirState trees don't have a HashCache, but they do have the same
+ # function as part of the DirState. However, until the tree is
+ # locked, we don't have a DirState to modify
+ hack_dirstate = True
+
+ # Make it a little dirty
+ self.build_tree_contents([('tree/a', 'new contents of a\n')])
+
+ # Make it readonly, and do some operations and then unlock
+ self.set_dirs_readonly('tree')
+
+ tree.lock_read()
+ try:
+ if hack_dirstate:
+ tree._dirstate._sha_cutoff_time = self._custom_cutoff_time
+ # Make sure we check all the files
+ for file_id in tree.all_file_ids():
+ size = tree.get_file_size(file_id)
+ sha1 = tree.get_file_sha1(file_id)
+ finally:
+ tree.unlock()
diff --git a/bzrlib/tests/per_workingtree/test_remove.py b/bzrlib/tests/per_workingtree/test_remove.py
new file mode 100644
index 0000000..4832d81
--- /dev/null
+++ b/bzrlib/tests/per_workingtree/test_remove.py
@@ -0,0 +1,345 @@
+# Copyright (C) 2007-2010 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""Tests for interface conformance of 'WorkingTree.remove'"""
+
+from bzrlib.tests.per_workingtree import TestCaseWithWorkingTree
+from bzrlib import ignores, osutils
+
+class TestRemove(TestCaseWithWorkingTree):
+ """Tests WorkingTree.remove"""
+
+ files = ['a', 'b/', 'b/c', 'd/']
+ rfiles = ['b/c', 'b', 'a', 'd']
+ backup_files = ['a.~1~', 'b.~1~/', 'b.~1~/c.~1~', 'd.~1~/']
+
+ def get_tree(self, files):
+ tree = self.make_branch_and_tree('.')
+ self.build_tree(files)
+ self.assertPathExists(files)
+ return tree
+
+ def get_committed_tree(self, files, message="Committing"):
+ tree = self.get_tree(files)
+ tree.add(files)
+ tree.commit(message)
+ self.assertInWorkingTree(files)
+ return tree
+
+ def assertRemovedAndDeleted(self, files):
+ self.assertNotInWorkingTree(files)
+ self.assertPathDoesNotExist(files)
+
+ def assertRemovedAndNotDeleted(self, files):
+ self.assertNotInWorkingTree(files)
+ self.assertPathExists(files)
+
+ def test_remove_keep(self):
+ """Check that files and directories are unversioned but not deleted."""
+ tree = self.get_tree(TestRemove.files)
+ tree.add(TestRemove.files)
+ self.assertInWorkingTree(TestRemove.files)
+
+ tree.remove(TestRemove.files)
+ self.assertRemovedAndNotDeleted(TestRemove.files)
+
+ def test_remove_keep_subtree(self):
+ """Check that a directory is unversioned but not deleted."""
+ tree = self.make_branch_and_tree('.')
+ subtree = self.make_branch_and_tree('subtree')
+ tree.add('subtree', 'subtree-id')
+
+ tree.remove('subtree')
+ self.assertRemovedAndNotDeleted('subtree')
+
+ def test_remove_unchanged_files(self):
+ """Check that unchanged files are removed and deleted."""
+ tree = self.get_committed_tree(TestRemove.files)
+ tree.remove(TestRemove.files, keep_files=False)
+ self.assertRemovedAndDeleted(TestRemove.files)
+ tree._validate()
+
+ def test_remove_added_files(self):
+ """Removal of newly added files must back them up."""
+ tree = self.get_tree(TestRemove.files)
+ tree.add(TestRemove.files)
+ self.assertInWorkingTree(TestRemove.files)
+ tree.remove(TestRemove.files, keep_files=False)
+ self.assertNotInWorkingTree(TestRemove.files)
+ self.assertPathExists(TestRemove.backup_files)
+ tree._validate()
+
+ def test_remove_changed_file(self):
+ """Removal of changed files must back it up."""
+ tree = self.get_committed_tree(['a'])
+ self.build_tree_contents([('a', "some other new content!")])
+ self.assertInWorkingTree('a')
+ tree.remove('a', keep_files=False)
+ self.assertNotInWorkingTree(TestRemove.files)
+ self.assertPathExists('a.~1~')
+ tree._validate()
+
+ def test_remove_deleted_files(self):
+ """Check that files are removed if they don't exist any more."""
+ tree = self.get_committed_tree(TestRemove.files)
+ for f in TestRemove.rfiles:
+ osutils.delete_any(f)
+ self.assertInWorkingTree(TestRemove.files)
+ self.assertPathDoesNotExist(TestRemove.files)
+ tree.remove(TestRemove.files, keep_files=False)
+ self.assertRemovedAndDeleted(TestRemove.files)
+ tree._validate()
+
+ def test_remove_renamed_files(self):
+ """Check that files are removed even if they are renamed."""
+ tree = self.get_committed_tree(TestRemove.files)
+
+ for f in TestRemove.rfiles:
+ tree.rename_one(f,f+'x')
+ rfilesx = ['bx/cx', 'bx', 'ax', 'dx']
+ self.assertInWorkingTree(rfilesx)
+ self.assertPathExists(rfilesx)
+
+ tree.remove(rfilesx, keep_files=False)
+ self.assertRemovedAndDeleted(rfilesx)
+ tree._validate()
+
+ def test_remove_renamed_changed_files(self):
+ """Check that files that are renamed and changed are backed up."""
+ tree = self.get_committed_tree(TestRemove.files)
+
+ for f in TestRemove.rfiles:
+ tree.rename_one(f,f+'x')
+ rfilesx = ['bx/cx', 'bx', 'ax', 'dx']
+ self.build_tree_contents([('ax','changed and renamed!'),
+ ('bx/cx','changed and renamed!')])
+ self.assertInWorkingTree(rfilesx)
+ self.assertPathExists(rfilesx)
+
+ tree.remove(rfilesx, keep_files=False)
+ self.assertNotInWorkingTree(rfilesx)
+ self.assertPathExists(['bx.~1~/cx.~1~', 'bx.~1~', 'ax.~1~'])
+ self.assertPathDoesNotExist('dx.~1~') # unchanged file
+ tree._validate()
+
+ def test_force_remove_changed_files(self):
+ """Check that changed files are removed and deleted when forced."""
+ tree = self.get_tree(TestRemove.files)
+ tree.add(TestRemove.files)
+ self.assertInWorkingTree(TestRemove.files)
+
+ tree.remove(TestRemove.files, keep_files=False, force=True)
+ self.assertRemovedAndDeleted(TestRemove.files)
+ self.assertPathDoesNotExist(['a.~1~', 'b.~1~/', 'b.~1~/c', 'd.~1~/'])
+ tree._validate()
+
+ def test_remove_unknown_files(self):
+ """Unknown files shuld be backed up"""
+ tree = self.get_tree(TestRemove.files)
+ tree.remove(TestRemove.files, keep_files=False)
+ self.assertRemovedAndDeleted(TestRemove.files)
+ self.assertPathExists(TestRemove.backup_files)
+ tree._validate()
+
+ def test_remove_nonexisting_files(self):
+ """Try to delete non-existing files."""
+ tree = self.get_tree(TestRemove.files)
+ tree.remove([''], keep_files=False)
+ tree.remove(['xyz', 'abc/def'], keep_files=False)
+ tree._validate()
+
+ def test_remove_unchanged_directory(self):
+ """Unchanged directories should be deleted."""
+ files = ['b/', 'b/c', 'b/sub_directory/', 'b/sub_directory/with_file']
+ tree = self.get_committed_tree(files)
+ tree.remove('b', keep_files=False)
+ self.assertRemovedAndDeleted('b')
+ tree._validate()
+
+ def test_remove_absent_directory(self):
+ """Removing a absent directory succeeds without corruption (#150438)."""
+ paths = ['a/', 'a/b']
+ tree = self.get_committed_tree(paths)
+ self.get_transport('.').delete_tree('a')
+ tree.remove(['a'])
+ self.assertRemovedAndDeleted('b')
+ tree._validate()
+
+ def test_remove_unknown_ignored_files(self):
+ """Unknown ignored files should be deleted."""
+ tree = self.get_committed_tree(['b/'])
+ ignores.add_runtime_ignores(["*ignored*"])
+
+ self.build_tree(['unknown_ignored_file'])
+ self.assertNotEquals(None, tree.is_ignored('unknown_ignored_file'))
+ tree.remove('unknown_ignored_file', keep_files=False)
+ self.assertRemovedAndDeleted('unknown_ignored_file')
+
+ self.build_tree(['b/unknown_ignored_file', 'b/unknown_ignored_dir/'])
+ self.assertNotEquals(None, tree.is_ignored('b/unknown_ignored_file'))
+ self.assertNotEquals(None, tree.is_ignored('b/unknown_ignored_dir'))
+ tree.remove('b', keep_files=False)
+ self.assertRemovedAndDeleted('b')
+ tree._validate()
+
+ def test_remove_changed_ignored_files(self):
+ """Changed ignored files should be backed up."""
+ files = ['an_ignored_file']
+ tree = self.get_tree(files)
+ tree.add(files)
+ ignores.add_runtime_ignores(["*ignored*"])
+ self.assertInWorkingTree(files)
+ self.assertNotEquals(None, tree.is_ignored(files[0]))
+
+ tree.remove(files, keep_files=False)
+ self.assertNotInWorkingTree(files)
+ self.assertPathExists('an_ignored_file.~1~')
+ tree._validate()
+
+ def test_dont_remove_directory_with_unknowns(self):
+ """Directories with unknowns should be backed up."""
+ directories = ['a/', 'b/', 'c/', 'c/c/']
+ tree = self.get_committed_tree(directories)
+
+ self.build_tree(['a/unknown_file'])
+ tree.remove('a', keep_files=False)
+ self.assertPathExists('a.~1~/unknown_file')
+
+ self.build_tree(['b/unknown_directory'])
+ tree.remove('b', keep_files=False)
+ self.assertPathExists('b.~1~/unknown_directory')
+
+ self.build_tree(['c/c/unknown_file'])
+ tree.remove('c/c', keep_files=False)
+ self.assertPathExists('c/c.~1~/unknown_file')
+
+ tree.remove('c', keep_files=False)
+ self.assertPathExists('c.~1~/')
+
+ self.assertNotInWorkingTree(directories)
+ tree._validate()
+
+ def test_force_remove_directory_with_unknowns(self):
+ """Unchanged non-empty directories should be deleted when forced."""
+ files = ['b/', 'b/c']
+ tree = self.get_committed_tree(files)
+
+ other_files = ['b/unknown_file', 'b/sub_directory/',
+ 'b/sub_directory/with_file', 'b/sub_directory/sub_directory/']
+ self.build_tree(other_files)
+
+ self.assertInWorkingTree(files)
+ self.assertPathExists(files)
+
+ tree.remove('b', keep_files=False, force=True)
+
+ self.assertRemovedAndDeleted(files)
+ self.assertRemovedAndDeleted(other_files)
+ tree._validate()
+
+ def test_remove_directory_with_changed_file(self):
+ """Backup directories with changed files."""
+ files = ['b/', 'b/c']
+ tree = self.get_committed_tree(files)
+ self.build_tree_contents([('b/c', "some other new content!")])
+
+ tree.remove('b', keep_files=False)
+ self.assertPathExists('b.~1~/c.~1~')
+ self.assertNotInWorkingTree(files)
+
+ def test_remove_force_directory_with_changed_file(self):
+ """Delete directories with changed files when forced."""
+ files = ['b/', 'b/c']
+ tree = self.get_committed_tree(files)
+ self.build_tree_contents([('b/c', "some other new content!")])
+
+ # see if we can force it now..
+ tree.remove('b', keep_files=False, force=True)
+ self.assertRemovedAndDeleted(files)
+ tree._validate()
+
+ def test_remove_directory_with_changed_emigrated_file(self):
+ # As per bug #129880
+ tree = self.make_branch_and_tree('.')
+ self.build_tree_contents([('somedir/',), ('somedir/file', 'contents')])
+ tree.add(['somedir', 'somedir/file'])
+ tree.commit(message="first")
+ self.build_tree_contents([('somedir/file', 'changed')])
+ tree.rename_one('somedir/file', 'moved-file')
+ tree.remove('somedir', keep_files=False)
+ self.assertNotInWorkingTree('somedir')
+ self.assertPathDoesNotExist('somedir')
+ self.assertInWorkingTree('moved-file')
+ self.assertPathExists('moved-file')
+
+ def test_remove_directory_with_renames(self):
+ """Delete directory with renames in or out."""
+
+ files = ['a/', 'a/file', 'a/directory/', 'b/']
+ files_to_move = ['a/file', 'a/directory/']
+
+ tree = self.get_committed_tree(files)
+ # move stuff from a=>b
+ tree.move(['a/file', 'a/directory'], to_dir='b')
+
+ moved_files = ['b/file', 'b/directory/']
+ self.assertRemovedAndDeleted(files_to_move)
+ self.assertInWorkingTree(moved_files)
+ self.assertPathExists(moved_files)
+
+ # check if it works with renames out
+ tree.remove('a', keep_files=False)
+ self.assertRemovedAndDeleted(['a/'])
+
+ # check if it works with renames in
+ tree.remove('b', keep_files=False)
+ self.assertRemovedAndDeleted(['b/'])
+ tree._validate()
+
+ def test_non_cwd(self):
+ tree = self.make_branch_and_tree('tree')
+ self.build_tree(['tree/dir/', 'tree/dir/file'])
+ tree.add(['dir', 'dir/file'])
+ tree.commit('add file')
+ tree.remove('dir/', keep_files=False)
+ self.assertPathDoesNotExist('tree/dir/file')
+ self.assertNotInWorkingTree('tree/dir/file', 'tree')
+ tree._validate()
+
+ def test_remove_uncommitted_removed_file(self):
+ # As per bug #152811
+ tree = self.get_committed_tree(['a'])
+ tree.remove('a', keep_files=False)
+ tree.remove('a', keep_files=False)
+ self.assertPathDoesNotExist('a')
+ tree._validate()
+
+ def test_remove_file_and_containing_dir(self):
+ tree = self.get_committed_tree(['config/', 'config/file'])
+ tree.remove('config/file', keep_files=False)
+ tree.remove('config', keep_files=False)
+ self.assertPathDoesNotExist('config/file')
+ self.assertPathDoesNotExist('config')
+ tree._validate()
+
+ def test_remove_dir_before_bzr(self):
+ # As per bug #272648. Note that a file must be present in the directory
+ # or the bug doesn't manifest itself.
+ tree = self.get_committed_tree(['.aaa/', '.aaa/file'])
+ tree.remove('.aaa/', keep_files=False)
+ self.assertPathDoesNotExist('.aaa/file')
+ self.assertPathDoesNotExist('.aaa')
+ tree._validate()
diff --git a/bzrlib/tests/per_workingtree/test_rename_one.py b/bzrlib/tests/per_workingtree/test_rename_one.py
new file mode 100644
index 0000000..d0e5c73
--- /dev/null
+++ b/bzrlib/tests/per_workingtree/test_rename_one.py
@@ -0,0 +1,420 @@
+# Copyright (C) 2007 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""Tests for interface conformance of 'WorkingTree.rename_one'"""
+
+import os
+
+from bzrlib import (
+ errors,
+ osutils,
+ tests,
+ )
+from bzrlib.tests import (
+ features,
+ )
+from bzrlib.tests.matchers import HasLayout
+
+from bzrlib.tests.per_workingtree import TestCaseWithWorkingTree
+
+
+class TestRenameOne(TestCaseWithWorkingTree):
+
+ def assertTreeLayout(self, expected, tree):
+ """Check that the tree has the correct layout."""
+ self.assertThat(tree, HasLayout(expected))
+
+ def test_rename_one_target_not_dir(self):
+ tree = self.make_branch_and_tree('.')
+ self.build_tree(['a'])
+ tree.add(['a'])
+ tree.commit('initial', rev_id='rev-1')
+
+ self.assertRaises(errors.BzrMoveFailedError,
+ tree.rename_one, 'a', 'not-a-dir/b')
+
+ def test_rename_one_non_existent(self):
+ tree = self.make_branch_and_tree('.')
+ self.build_tree(['a/'])
+ tree.add(['a'])
+ tree.commit('initial', rev_id='rev-1')
+ self.assertRaises(errors.BzrMoveFailedError,
+ tree.rename_one, 'not-a-file', 'a/failure')
+ self.assertRaises(errors.BzrMoveFailedError,
+ tree.rename_one, 'not-a-file', 'also_not')
+
+ def test_rename_one_target_not_versioned(self):
+ tree = self.make_branch_and_tree('.')
+ self.build_tree(['a/', 'b'])
+ tree.add(['b'])
+ tree.commit('initial', rev_id='rev-1')
+ self.assertRaises(errors.BzrMoveFailedError,
+ tree.rename_one, 'b', 'a/b')
+
+ def test_rename_one_unversioned(self):
+ tree = self.make_branch_and_tree('.')
+ self.build_tree(['a/', 'b'])
+ tree.add(['a'])
+ tree.commit('initial', rev_id='rev-1')
+ self.assertRaises(errors.BzrMoveFailedError,
+ tree.rename_one, 'b', 'a/b')
+
+ def test_rename_one_samedir(self):
+ tree = self.make_branch_and_tree('.')
+ self.build_tree(['a', 'b/'])
+ tree.add(['a', 'b'], ['a-id', 'b-id'])
+ tree.commit('initial', rev_id='rev-1')
+ root_id = tree.get_root_id()
+
+ a_contents = tree.get_file_text('a-id')
+ tree.rename_one('a', 'foo')
+ self.assertTreeLayout([('', root_id), ('b/', 'b-id'), ('foo', 'a-id')],
+ tree)
+ self.assertTreeLayout([('', root_id), ('a', 'a-id'), ('b/', 'b-id')],
+ tree.basis_tree())
+ self.assertPathDoesNotExist('a')
+ self.assertFileEqual(a_contents, 'foo')
+
+ def test_rename_one_not_localdir(self):
+ tree = self.make_branch_and_tree('tree')
+ self.build_tree(['tree/a', 'tree/b/'])
+ tree.add(['a', 'b'], ['a-id', 'b-id'])
+ tree.commit('initial', rev_id='rev-1')
+ root_id = tree.get_root_id()
+
+ a_contents = tree.get_file_text('a-id')
+ tree.rename_one('a', 'b/foo')
+ self.assertTreeLayout([('', root_id), ('b/', 'b-id'), ('b/foo', 'a-id')],
+ tree)
+ self.assertTreeLayout([('', root_id), ('a', 'a-id'), ('b/', 'b-id')],
+ tree.basis_tree())
+ self.assertPathDoesNotExist('tree/a')
+ self.assertFileEqual(a_contents, 'tree/b/foo')
+
+ def test_rename_one_subdir(self):
+ tree = self.make_branch_and_tree('.')
+ self.build_tree(['a', 'b/', 'b/c'])
+ tree.add(['a', 'b', 'b/c'], ['a-id', 'b-id', 'c-id'])
+ tree.commit('initial', rev_id='rev-1')
+ root_id = tree.get_root_id()
+ self.assertTreeLayout([('', root_id), ('a', 'a-id'), ('b/', 'b-id'),
+ ('b/c', 'c-id')], tree)
+ self.assertTreeLayout([('', root_id), ('a', 'a-id'), ('b/', 'b-id'),
+ ('b/c', 'c-id')], tree.basis_tree())
+ a_contents = tree.get_file_text('a-id')
+ tree.rename_one('a', 'b/d')
+ self.assertTreeLayout([('', root_id), ('b/', 'b-id'), ('b/c', 'c-id'),
+ ('b/d', 'a-id')], tree)
+ self.assertTreeLayout([('', root_id), ('a', 'a-id'), ('b/', 'b-id'),
+ ('b/c', 'c-id')], tree.basis_tree())
+ self.assertPathDoesNotExist('a')
+ self.assertFileEqual(a_contents, 'b/d')
+
+ def test_rename_one_parent_dir(self):
+ tree = self.make_branch_and_tree('.')
+ self.build_tree(['a', 'b/', 'b/c'])
+ tree.add(['a', 'b', 'b/c'], ['a-id', 'b-id', 'c-id'])
+ tree.commit('initial', rev_id='rev-1')
+ root_id = tree.get_root_id()
+ c_contents = tree.get_file_text('c-id')
+ tree.rename_one('b/c', 'd')
+ self.assertTreeLayout([('', root_id), ('a', 'a-id'), ('b/', 'b-id'),
+ ('d', 'c-id')], tree)
+ self.assertTreeLayout([('', root_id), ('a', 'a-id'), ('b/', 'b-id'),
+ ('b/c', 'c-id')], tree.basis_tree())
+ self.assertPathDoesNotExist('b/c')
+ self.assertFileEqual(c_contents, 'd')
+
+ def test_rename_one_fail_consistent(self):
+ tree = self.make_branch_and_tree('.')
+ self.build_tree(['a', 'b/', 'b/a', 'c'])
+ tree.add(['a', 'b', 'c'], ['a-id', 'b-id', 'c-id'])
+ tree.commit('initial', rev_id='rev-1')
+ root_id = tree.get_root_id()
+ # Target already exists
+ self.assertRaises(errors.RenameFailedFilesExist,
+ tree.rename_one, 'a', 'b/a')
+ self.assertTreeLayout([('', root_id), ('a', 'a-id'), ('b/', 'b-id'),
+ ('c', 'c-id')], tree)
+ self.assertTreeLayout([('', root_id), ('a', 'a-id'), ('b/', 'b-id'),
+ ('c', 'c-id')], tree.basis_tree())
+
+ def test_rename_one_onto_existing(self):
+ tree = self.make_branch_and_tree('.')
+ self.build_tree(['a', 'b'])
+ tree.add(['a', 'b'], ['a-id', 'b-id'])
+ tree.commit('initial', rev_id='rev-1')
+
+ self.assertRaises(errors.BzrMoveFailedError,
+ tree.rename_one, 'a', 'b')
+
+ def test_rename_one_onto_self(self):
+ tree = self.make_branch_and_tree('.')
+ self.build_tree(['b/', 'b/a'])
+ tree.add(['b', 'b/a'], ['b-id', 'a-id'])
+ tree.commit('initial', rev_id='rev-1')
+
+ self.assertRaises(errors.BzrMoveFailedError,
+ tree.rename_one, 'b/a', 'b/a')
+
+ def test_rename_one_onto_self_root(self):
+ tree = self.make_branch_and_tree('.')
+ self.build_tree(['a'])
+ tree.add(['a'], ['a-id'])
+ tree.commit('initial', rev_id='rev-1')
+
+ self.assertRaises(errors.BzrMoveFailedError,
+ tree.rename_one, 'a', 'a')
+
+ def test_rename_one_after(self):
+ tree = self.make_branch_and_tree('.')
+ self.build_tree(['a', 'b/'])
+ tree.add(['a', 'b'], ['a-id', 'b-id'])
+ tree.commit('initial', rev_id='rev-1')
+ root_id = tree.get_root_id()
+ os.rename('a', 'b/foo')
+
+ self.assertTreeLayout([('', root_id), ('a', 'a-id'), ('b/', 'b-id')],
+ tree)
+ # We don't need after=True as long as source is missing and target
+ # exists.
+ tree.rename_one('a', 'b/foo')
+ self.assertTreeLayout([('', root_id), ('b/', 'b-id'),
+ ('b/foo', 'a-id')], tree)
+ self.assertTreeLayout([('', root_id), ('a', 'a-id'), ('b/', 'b-id')],
+ tree.basis_tree())
+
+ def test_rename_one_after_with_after(self):
+ tree = self.make_branch_and_tree('.')
+ self.build_tree(['a', 'b/'])
+ tree.add(['a', 'b'], ['a-id', 'b-id'])
+ tree.commit('initial', rev_id='rev-1')
+ root_id = tree.get_root_id()
+ os.rename('a', 'b/foo')
+
+ self.assertTreeLayout([('', root_id), ('a', 'a-id'), ('b/', 'b-id')],
+ tree)
+ # Passing after=True should work as well
+ tree.rename_one('a', 'b/foo', after=True)
+ self.assertTreeLayout([('', root_id), ('b/', 'b-id'),
+ ('b/foo', 'a-id')], tree)
+ self.assertTreeLayout([('', root_id), ('a', 'a-id'), ('b/', 'b-id')],
+ tree.basis_tree())
+
+ def test_rename_one_after_dest_versioned(self):
+ tree = self.make_branch_and_tree('.')
+ self.build_tree(['a'])
+ tree.add(['a'], ['a-id'])
+ tree.commit('initial', rev_id='rev-1')
+ root_id = tree.get_root_id()
+ os.rename('a', 'b')
+ tree.add(['b'], ['b-id'])
+
+ self.assertTreeLayout([('', root_id), ('a', 'a-id'), ('b', 'b-id')],
+ tree)
+ e = self.assertRaises(errors.BzrMoveFailedError,
+ tree.rename_one, 'a', 'b')
+ self.assertIsInstance(e.extra, errors.AlreadyVersionedError)
+
+ def test_rename_one_after_with_after_dest_versioned(self):
+ ''' using after with an already versioned file should fail '''
+ tree = self.make_branch_and_tree('.')
+ self.build_tree(['a', 'b'])
+ tree.add(['a', 'b'], ['a-id', 'b-id'])
+ tree.commit('initial', rev_id='rev-1')
+ root_id = tree.get_root_id()
+ os.unlink('a')
+
+ self.assertTreeLayout([('', root_id), ('a', 'a-id'), ('b', 'b-id')],
+ tree)
+ e = self.assertRaises(errors.BzrMoveFailedError,
+ tree.rename_one, 'a', 'b', after=True)
+ self.assertIsInstance(e.extra, errors.AlreadyVersionedError)
+
+ def test_rename_one_after_with_after_dest_added(self):
+ ''' using after with a newly added file should work '''
+ tree = self.make_branch_and_tree('.')
+ self.build_tree(['a'])
+ tree.add(['a'], ['a-id'])
+ tree.commit('initial', rev_id='rev-1')
+ root_id = tree.get_root_id()
+ os.rename('a', 'b')
+ tree.add(['b'], ['b-id'])
+
+ self.assertTreeLayout([('', root_id), ('a', 'a-id'), ('b', 'b-id')],
+ tree)
+ tree.rename_one('a', 'b', after=True)
+ self.assertTreeLayout([('', root_id), ('b', 'a-id')], tree)
+
+ def test_rename_one_after_source_removed(self):
+ """Rename even if the source was already unversioned."""
+ tree = self.make_branch_and_tree('.')
+ self.build_tree(['a', 'b/'])
+ tree.add(['a', 'b'], ['a-id', 'b-id'])
+ tree.commit('initial', rev_id='rev-1')
+ root_id = tree.get_root_id()
+ os.rename('a', 'b/foo')
+ tree.remove(['a'])
+
+ self.assertTreeLayout([('', root_id), ('b/', 'b-id')], tree)
+ # We don't need after=True as long as source is missing and target
+ # exists.
+ tree.rename_one('a', 'b/foo')
+ self.assertTreeLayout([('', root_id), ('b/', 'b-id'),
+ ('b/foo', 'a-id')], tree)
+ self.assertTreeLayout([('', root_id), ('a', 'a-id'), ('b/', 'b-id')],
+ tree.basis_tree())
+
+ def test_rename_one_after_no_target(self):
+ tree = self.make_branch_and_tree('.')
+ self.build_tree(['a', 'b/'])
+ tree.add(['a', 'b'], ['a-id', 'b-id'])
+ tree.commit('initial', rev_id='rev-1')
+ root_id = tree.get_root_id()
+
+ # Passing after when the file hasn't been rename_one raises an exception
+ self.assertRaises(errors.BzrMoveFailedError,
+ tree.rename_one, 'a', 'b/foo', after=True)
+ self.assertTreeLayout([('', root_id), ('a', 'a-id'), ('b/', 'b-id')],
+ tree)
+ self.assertTreeLayout([('', root_id), ('a', 'a-id'), ('b/', 'b-id')],
+ tree.basis_tree())
+
+ def test_rename_one_after_source_and_dest(self):
+ tree = self.make_branch_and_tree('.')
+ self.build_tree(['a', 'b/', 'b/foo'])
+ tree.add(['a', 'b'], ['a-id', 'b-id'])
+ tree.commit('initial', rev_id='rev-1')
+ root_id = tree.get_root_id()
+
+ # TODO: jam 20070225 I would usually use 'rb', but assertFileEqual
+ # uses 'r'.
+ a_file = open('a', 'r')
+ try:
+ a_text = a_file.read()
+ finally:
+ a_file.close()
+ foo_file = open('b/foo', 'r')
+ try:
+ foo_text = foo_file.read()
+ finally:
+ foo_file.close()
+
+ self.assertTreeLayout([('', root_id), ('a', 'a-id'), ('b/', 'b-id')],
+ tree)
+ self.assertRaises(errors.RenameFailedFilesExist,
+ tree.rename_one, 'a', 'b/foo', after=False)
+ self.assertTreeLayout([('', root_id), ('a', 'a-id'), ('b/', 'b-id')],
+ tree)
+ self.assertFileEqual(a_text, 'a')
+ self.assertFileEqual(foo_text, 'b/foo')
+ # But you can pass after=True
+ tree.rename_one('a', 'b/foo', after=True)
+ self.assertTreeLayout([('', root_id), ('b/', 'b-id'),
+ ('b/foo', 'a-id')], tree)
+ self.assertTreeLayout([('', root_id), ('a', 'a-id'), ('b/', 'b-id')],
+ tree.basis_tree())
+ # But it shouldn't actually move anything
+ self.assertFileEqual(a_text, 'a')
+ self.assertFileEqual(foo_text, 'b/foo')
+
+ def test_rename_one_directory(self):
+ tree = self.make_branch_and_tree('.')
+ self.build_tree(['a/', 'a/b', 'a/c/', 'a/c/d', 'e/'])
+ tree.add(['a', 'a/b', 'a/c', 'a/c/d', 'e'],
+ ['a-id', 'b-id', 'c-id', 'd-id', 'e-id'])
+ tree.commit('initial', rev_id='rev-1')
+ root_id = tree.get_root_id()
+
+ tree.rename_one('a', 'e/f')
+ self.assertTreeLayout([('', root_id), ('e/', 'e-id'), ('e/f/', 'a-id'),
+ ('e/f/b', 'b-id'), ('e/f/c/', 'c-id'),
+ ('e/f/c/d', 'd-id')], tree)
+ self.assertTreeLayout([('', root_id), ('a/', 'a-id'), ('e/', 'e-id'),
+ ('a/b', 'b-id'), ('a/c/', 'c-id'),
+ ('a/c/d', 'd-id')], tree.basis_tree())
+
+ def test_rename_one_moved(self):
+ """Moving a moved entry works as expected."""
+ tree = self.make_branch_and_tree('.')
+ self.build_tree(['a/', 'a/b', 'c/'])
+ tree.add(['a', 'a/b', 'c'], ['a-id', 'b-id', 'c-id'])
+ tree.commit('initial', rev_id='rev-1')
+ root_id = tree.get_root_id()
+
+ tree.rename_one('a/b', 'c/foo')
+ self.assertTreeLayout([('', root_id), ('a/', 'a-id'), ('c/', 'c-id'),
+ ('c/foo', 'b-id')], tree)
+ self.assertTreeLayout([('', root_id), ('a/', 'a-id'), ('c/', 'c-id'),
+ ('a/b', 'b-id')], tree.basis_tree())
+
+ tree.rename_one('c/foo', 'bar')
+ self.assertTreeLayout([('', root_id), ('a/', 'a-id'), ('bar', 'b-id'),
+ ('c/', 'c-id')], tree)
+ self.assertTreeLayout([('', root_id), ('a/', 'a-id'), ('c/', 'c-id'),
+ ('a/b', 'b-id')], tree.basis_tree())
+
+ def test_rename_to_denormalised_fails(self):
+ if osutils.normalizes_filenames():
+ raise tests.TestNotApplicable('OSX normalizes filenames')
+ tree = self.make_branch_and_tree('.')
+ self.build_tree(['a'])
+ tree.add(['a'])
+ self.assertRaises((errors.InvalidNormalization, UnicodeEncodeError),
+ tree.rename_one, 'a', u'ba\u030arry')
+
+ def test_rename_unversioned_non_ascii(self):
+ """Check error when renaming an unversioned non-ascii file"""
+ self.requireFeature(features.UnicodeFilenameFeature)
+ tree = self.make_branch_and_tree(".")
+ self.build_tree([u"\xA7"])
+ e = self.assertRaises(errors.BzrRenameFailedError,
+ tree.rename_one, u"\xA7", "b")
+ self.assertIsInstance(e.extra, errors.NotVersionedError)
+ self.assertEqual(e.extra.path, u"\xA7")
+
+ def test_rename_into_unversioned_non_ascii_dir(self):
+ """Check error when renaming into unversioned non-ascii directory"""
+ self.requireFeature(features.UnicodeFilenameFeature)
+ tree = self.make_branch_and_tree(".")
+ self.build_tree(["a", u"\xA7/"])
+ tree.add(["a"])
+ e = self.assertRaises(errors.BzrMoveFailedError,
+ tree.rename_one, "a", u"\xA7/a")
+ self.assertIsInstance(e.extra, errors.NotVersionedError)
+ self.assertEqual(e.extra.path, u"\xA7")
+
+ def test_rename_over_already_versioned_non_ascii(self):
+ """Check error renaming over an already versioned non-ascii file"""
+ self.requireFeature(features.UnicodeFilenameFeature)
+ tree = self.make_branch_and_tree(".")
+ self.build_tree(["a", u"\xA7"])
+ tree.add(["a", u"\xA7"])
+ e = self.assertRaises(errors.BzrMoveFailedError,
+ tree.rename_one, "a", u"\xA7")
+ self.assertIsInstance(e.extra, errors.AlreadyVersionedError)
+ self.assertEqual(e.extra.path, u"\xA7")
+
+ def test_rename_after_non_existant_non_ascii(self):
+ """Check error renaming after move with missing non-ascii file"""
+ self.requireFeature(features.UnicodeFilenameFeature)
+ tree = self.make_branch_and_tree(".")
+ self.build_tree(["a"])
+ tree.add(["a"])
+ e = self.assertRaises(errors.BzrMoveFailedError,
+ tree.rename_one, "a", u"\xA7", after=True)
+ self.assertIsInstance(e.extra, errors.NoSuchFile)
+ self.assertEqual(e.extra.path, u"\xA7")
diff --git a/bzrlib/tests/per_workingtree/test_revision_tree.py b/bzrlib/tests/per_workingtree/test_revision_tree.py
new file mode 100644
index 0000000..4a42822
--- /dev/null
+++ b/bzrlib/tests/per_workingtree/test_revision_tree.py
@@ -0,0 +1,135 @@
+# Copyright (C) 2006, 2007, 2009, 2010 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""Tests for WorkingTree.revision_tree.
+
+These tests are in addition to the tests from
+per_tree.test_revision_tree which cover the behaviour expected from
+all Trees. WorkingTrees implement the revision_tree api to allow access to
+cached data, but we don't require that all WorkingTrees have such a cache,
+so these tests are testing that when there is a cache, it performs correctly.
+"""
+
+from bzrlib import (
+ errors,
+ tests,
+ )
+from bzrlib.tests import per_workingtree
+
+
+class TestRevisionTree(per_workingtree.TestCaseWithWorkingTree):
+
+ def test_get_zeroth_basis_tree_via_revision_tree(self):
+ tree = self.make_branch_and_tree('.')
+ try:
+ revision_tree = tree.revision_tree(tree.last_revision())
+ except errors.NoSuchRevision:
+ # its ok for a working tree to not cache trees, so just return.
+ return
+ basis_tree = tree.basis_tree()
+ self.assertTreesEqual(revision_tree, basis_tree)
+
+ def test_get_nonzeroth_basis_tree_via_revision_tree(self):
+ tree = self.make_branch_and_tree('.')
+ revision1 = tree.commit('first post')
+ revision_tree = tree.revision_tree(revision1)
+ basis_tree = tree.basis_tree()
+ self.assertTreesEqual(revision_tree, basis_tree)
+
+ def test_get_pending_merge_revision_tree(self):
+ tree = self.make_branch_and_tree('tree1')
+ tree.commit('first post')
+ tree2 = tree.bzrdir.sprout('tree2').open_workingtree()
+ revision1 = tree2.commit('commit in branch', allow_pointless=True)
+ tree.merge_from_branch(tree2.branch)
+ try:
+ cached_revision_tree = tree.revision_tree(revision1)
+ except errors.NoSuchRevision:
+ # its ok for a working tree to not cache trees, so just return.
+ return
+ real_revision_tree = tree2.basis_tree()
+ self.assertTreesEqual(real_revision_tree, cached_revision_tree)
+
+ def test_get_uncached_basis_via_revision_tree(self):
+ # The basis_tree method returns an empty tree when you ask for the
+ # basis if the basis is not cached, and it is a ghost. However the
+ # revision_tree method should always raise when a request tree is not
+ # cached, so we force this by setting a basis that is a ghost and
+ # thus cannot be cached.
+ tree = self.make_branch_and_tree('.')
+ tree.set_parent_ids(['a-ghost'], allow_leftmost_as_ghost=True)
+ self.assertRaises(errors.NoSuchRevision, tree.revision_tree, 'a-ghost')
+
+ def test_revision_tree_different_root_id(self):
+ """A revision tree might have a very different root."""
+ tree = self.make_branch_and_tree('tree1')
+ tree.set_root_id('one')
+ rev1 = tree.commit('first post')
+ tree.set_root_id('two')
+ try:
+ cached_revision_tree = tree.revision_tree(rev1)
+ except errors.NoSuchRevision:
+ # its ok for a working tree to not cache trees, so just return.
+ return
+ repository_revision_tree = tree.branch.repository.revision_tree(rev1)
+ self.assertTreesEqual(repository_revision_tree, cached_revision_tree)
+
+
+class TestRevisionTreeKind(per_workingtree.TestCaseWithWorkingTree):
+
+ def make_branch_with_merged_deletions(self, relpath='tree'):
+ tree = self.make_branch_and_tree(relpath)
+ files = ['a', 'b/', 'b/c']
+ self.build_tree(files, line_endings='binary',
+ transport=tree.bzrdir.root_transport)
+ tree.set_root_id('root-id')
+ tree.add(files, ['a-id', 'b-id', 'c-id'])
+ tree.commit('a, b and b/c', rev_id='base')
+ tree2 = tree.bzrdir.sprout(relpath + '2').open_workingtree()
+ # Delete 'a' in tree
+ tree.remove('a', keep_files=False)
+ tree.commit('remove a', rev_id='this')
+ # Delete 'c' in tree2
+ tree2.remove('b/c', keep_files=False)
+ tree2.remove('b', keep_files=False)
+ tree2.commit('remove b/c', rev_id='other')
+ # Merge tree2 into tree
+ tree.merge_from_branch(tree2.branch)
+ return tree
+
+ def test_kind_parent_tree(self):
+ tree = self.make_branch_with_merged_deletions()
+ tree.lock_read()
+ self.addCleanup(tree.unlock)
+ parents = tree.get_parent_ids()
+ self.assertEqual(['this', 'other'], parents)
+ basis = tree.revision_tree(parents[0])
+ basis.lock_read()
+ self.addCleanup(basis.unlock)
+ self.assertRaises(errors.NoSuchId, basis.kind, 'a-id')
+ self.assertEqual(['directory', 'file'],
+ [basis.kind('b-id'), basis.kind('c-id')])
+ try:
+ other = tree.revision_tree(parents[1])
+ except errors.NoSuchRevisionInTree:
+ raise tests.TestNotApplicable(
+ 'Tree type %s caches only the basis revision tree.'
+ % type(tree))
+ other.lock_read()
+ self.addCleanup(other.unlock)
+ self.assertRaises(errors.NoSuchId, other.kind, 'b-id')
+ self.assertRaises(errors.NoSuchId, other.kind, 'c-id')
+ self.assertEqual('file', other.kind('a-id'))
diff --git a/bzrlib/tests/per_workingtree/test_set_root_id.py b/bzrlib/tests/per_workingtree/test_set_root_id.py
new file mode 100644
index 0000000..40c46b6
--- /dev/null
+++ b/bzrlib/tests/per_workingtree/test_set_root_id.py
@@ -0,0 +1,71 @@
+# Copyright (C) 2006-2010 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""Tests for WorkingTree.set_root_id"""
+
+import sys
+
+from bzrlib import errors
+from bzrlib.tests import TestSkipped
+from bzrlib.tests.per_workingtree import TestCaseWithWorkingTree
+
+
+class TestSetRootId(TestCaseWithWorkingTree):
+
+ def test_set_and_read_unicode(self):
+ if sys.platform == "win32":
+ raise TestSkipped("don't use oslocks on win32 in unix manner")
+ # This test tests that setting the root doesn't flush, so it
+ # deliberately tests concurrent access that isn't possible on windows.
+ self.thisFailsStrictLockCheck()
+ tree = self.make_branch_and_tree('a-tree')
+ # setting the root id allows it to be read via get_root_id.
+ root_id = u'\xe5n-id'.encode('utf8')
+ tree.lock_write()
+ try:
+ old_id = tree.get_root_id()
+ tree.set_root_id(root_id)
+ self.assertEqual(root_id, tree.get_root_id())
+ # set root id should not have triggered a flush of the tree,
+ # so check a new tree sees the old state.
+ reference_tree = tree.bzrdir.open_workingtree()
+ self.assertEqual(old_id, reference_tree.get_root_id())
+ finally:
+ tree.unlock()
+ # having unlocked the tree, the value should have been
+ # preserved into the next lock, which is an implicit read
+ # lock around the get_root_id call.
+ self.assertEqual(root_id, tree.get_root_id())
+ # and if we get a new working tree instance, then the value
+ # should still be retained
+ tree = tree.bzrdir.open_workingtree()
+ self.assertEqual(root_id, tree.get_root_id())
+ tree._validate()
+
+ def test_set_root_id(self):
+ tree = self.make_branch_and_tree('.')
+ tree.lock_write()
+ self.addCleanup(tree.unlock)
+ orig_root_id = tree.get_root_id()
+ self.assertNotEqual('custom-root-id', orig_root_id)
+ self.assertEqual('', tree.id2path(orig_root_id))
+ self.assertRaises(errors.NoSuchId, tree.id2path, 'custom-root-id')
+ tree.set_root_id('custom-root-id')
+ self.assertEqual('custom-root-id', tree.get_root_id())
+ self.assertEqual('custom-root-id', tree.path2id(''))
+ self.assertEqual('', tree.id2path('custom-root-id'))
+ self.assertRaises(errors.NoSuchId, tree.id2path, orig_root_id)
+ tree._validate()
diff --git a/bzrlib/tests/per_workingtree/test_smart_add.py b/bzrlib/tests/per_workingtree/test_smart_add.py
new file mode 100644
index 0000000..e341d03
--- /dev/null
+++ b/bzrlib/tests/per_workingtree/test_smart_add.py
@@ -0,0 +1,364 @@
+# Copyright (C) 2007, 2009, 2010 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""Test that we can use smart_add on all Tree implementations."""
+
+from cStringIO import StringIO
+import os
+import sys
+
+from bzrlib import (
+ errors,
+ ignores,
+ osutils,
+ tests,
+ trace,
+ )
+from bzrlib.tests import (
+ features,
+ per_workingtree,
+ test_smart_add,
+ )
+
+
+class TestSmartAddTree(per_workingtree.TestCaseWithWorkingTree):
+
+ def test_single_file(self):
+ tree = self.make_branch_and_tree('tree')
+ self.build_tree(['tree/a'])
+ tree.smart_add(['tree'])
+
+ tree.lock_read()
+ try:
+ files = [(path, status, kind)
+ for path, status, kind, file_id, parent_id
+ in tree.list_files(include_root=True)]
+ finally:
+ tree.unlock()
+ self.assertEqual([('', 'V', 'directory'), ('a', 'V', 'file')],
+ files)
+
+ def assertFilenameSkipped(self, filename):
+ tree = self.make_branch_and_tree('tree')
+ try:
+ self.build_tree(['tree/'+filename])
+ except errors.NoSuchFile:
+ if sys.platform == 'win32':
+ raise tests.TestNotApplicable('Cannot create files named %r on'
+ ' win32' % (filename,))
+ tree.smart_add(['tree'])
+ self.assertEqual(None, tree.path2id(filename))
+
+ def test_path_containing_newline_skips(self):
+ self.assertFilenameSkipped('a\nb')
+
+ def test_path_containing_carriagereturn_skips(self):
+ self.assertFilenameSkipped('a\rb')
+
+ def test_save_false(self):
+ """Dry-run add doesn't permanently affect the tree."""
+ wt = self.make_branch_and_tree('.')
+ wt.lock_write()
+ try:
+ self.build_tree(['file'])
+ wt.smart_add(['file'], save=False)
+ # the file should not be added - no id.
+ self.assertEqual(wt.path2id('file'), None)
+ finally:
+ wt.unlock()
+ # and the disk state should be the same - reopen to check.
+ wt = wt.bzrdir.open_workingtree()
+ self.assertEqual(wt.path2id('file'), None)
+
+ def test_add_dot_from_root(self):
+ """Test adding . from the root of the tree."""
+ paths = ("original/", "original/file1", "original/file2")
+ self.build_tree(paths)
+ wt = self.make_branch_and_tree('.')
+ wt.smart_add((u".",))
+ for path in paths:
+ self.assertNotEqual(wt.path2id(path), None)
+
+ def test_skip_nested_trees(self):
+ """Test smart-adding a nested tree ignors it and warns."""
+ wt = self.make_branch_and_tree('.')
+ nested_wt = self.make_branch_and_tree('nested')
+ warnings = []
+ def warning(*args):
+ warnings.append(args[0] % args[1:])
+ self.overrideAttr(trace, 'warning', warning)
+ wt.smart_add((u".",))
+ self.assertIs(wt.path2id("nested"), None)
+ self.assertEquals(
+ ['skipping nested tree %r' % nested_wt.basedir], warnings)
+
+ def test_add_dot_from_subdir(self):
+ """Test adding . from a subdir of the tree."""
+ paths = ("original/", "original/file1", "original/file2")
+ self.build_tree(paths)
+ wt = self.make_branch_and_tree('.')
+ wt.smart_add((u".",))
+ for path in paths:
+ self.assertNotEqual(wt.path2id(path), None)
+
+ def test_add_tree_from_above_tree(self):
+ """Test adding a tree from above the tree."""
+ paths = ("original/", "original/file1", "original/file2")
+ branch_paths = ("branch/", "branch/original/", "branch/original/file1",
+ "branch/original/file2")
+ self.build_tree(branch_paths)
+ wt = self.make_branch_and_tree('branch')
+ wt.smart_add(("branch",))
+ for path in paths:
+ self.assertNotEqual(wt.path2id(path), None)
+
+ def test_add_above_tree_preserves_tree(self):
+ """Test nested trees are not affect by an add above them."""
+ paths = ("original/", "original/file1", "original/file2")
+ child_paths = ("path",)
+ full_child_paths = ("original/child", "original/child/path")
+ build_paths = ("original/", "original/file1", "original/file2",
+ "original/child/", "original/child/path")
+
+ self.build_tree(build_paths)
+ wt = self.make_branch_and_tree('.')
+ child_tree = self.make_branch_and_tree('original/child')
+ wt.smart_add((".",))
+ for path in paths:
+ self.assertNotEqual((path, wt.path2id(path)),
+ (path, None))
+ for path in full_child_paths:
+ self.assertEqual((path, wt.path2id(path)),
+ (path, None))
+ for path in child_paths:
+ self.assertEqual(child_tree.path2id(path), None)
+
+ def test_add_paths(self):
+ """Test smart-adding a list of paths."""
+ paths = ("file1", "file2")
+ self.build_tree(paths)
+ wt = self.make_branch_and_tree('.')
+ wt.smart_add(paths)
+ for path in paths:
+ self.assertNotEqual(wt.path2id(path), None)
+
+ def test_add_ignored_nested_paths(self):
+ """Test smart-adding a list of paths which includes ignored ones."""
+ wt = self.make_branch_and_tree('.')
+ tree_shape = ("adir/", "adir/CVS/", "adir/CVS/afile", "adir/CVS/afile2")
+ add_paths = ("adir/CVS", "adir/CVS/afile", "adir")
+ expected_paths = ("adir", "adir/CVS", "adir/CVS/afile", "adir/CVS/afile2")
+ self.build_tree(tree_shape)
+ wt.smart_add(add_paths)
+ for path in expected_paths:
+ self.assertNotEqual(wt.path2id(path), None,
+ "No id added for %s" % path)
+
+ def test_add_non_existant(self):
+ """Test smart-adding a file that does not exist."""
+ wt = self.make_branch_and_tree('.')
+ self.assertRaises(errors.NoSuchFile, wt.smart_add, ['non-existant-file'])
+
+ def test_returns_and_ignores(self):
+ """Correctly returns added/ignored files"""
+ wt = self.make_branch_and_tree('.')
+ # The default ignore list includes '*.py[co]', but not CVS
+ ignores._set_user_ignores(['*.py[co]'])
+ self.build_tree(['inertiatic/', 'inertiatic/esp', 'inertiatic/CVS',
+ 'inertiatic/foo.pyc'])
+ added, ignored = wt.smart_add(u'.')
+ self.assertSubset(('inertiatic', 'inertiatic/esp', 'inertiatic/CVS'),
+ added)
+ self.assertSubset(('*.py[co]',), ignored)
+ self.assertSubset(('inertiatic/foo.pyc',), ignored['*.py[co]'])
+
+ def test_add_multiple_dirs(self):
+ """Test smart adding multiple directories at once."""
+ added_paths = ['file1', 'file2',
+ 'dir1/', 'dir1/file3',
+ 'dir1/subdir2/', 'dir1/subdir2/file4',
+ 'dir2/', 'dir2/file5',
+ ]
+ not_added = ['file6', 'dir3/', 'dir3/file7', 'dir3/file8']
+ self.build_tree(added_paths)
+ self.build_tree(not_added)
+
+ wt = self.make_branch_and_tree('.')
+ wt.smart_add(['file1', 'file2', 'dir1', 'dir2'])
+
+ for path in added_paths:
+ self.assertNotEqual(None, wt.path2id(path.rstrip('/')),
+ 'Failed to add path: %s' % (path,))
+ for path in not_added:
+ self.assertEqual(None, wt.path2id(path.rstrip('/')),
+ 'Accidentally added path: %s' % (path,))
+
+ def test_add_file_in_unknown_dir(self):
+ # Test that parent directory addition is implicit
+ tree = self.make_branch_and_tree('.')
+ self.build_tree(['dir/', 'dir/subdir/', 'dir/subdir/foo'])
+ tree.smart_add(['dir/subdir/foo'])
+ tree.lock_read()
+ self.addCleanup(tree.unlock)
+ self.assertEqual(['', 'dir', 'dir/subdir', 'dir/subdir/foo'],
+ [path for path, ie in tree.iter_entries_by_dir()])
+
+ def test_add_dir_bug_251864(self):
+ """Added file turning into a dir should be detected on add dir
+
+ Similar to bug 205636 but with automatic adding of directory contents.
+ """
+ tree = self.make_branch_and_tree(".")
+ self.build_tree(["dir"]) # whoops, make a file called dir
+ tree.smart_add(["dir"])
+ os.remove("dir")
+ self.build_tree(["dir/", "dir/file"])
+ tree.smart_add(["dir"])
+ tree.commit("Add dir contents")
+ self.addCleanup(tree.lock_read().unlock)
+ self.assertEqual([(u"dir", "directory"), (u"dir/file", "file")],
+ [(t[0], t[2]) for t in tree.list_files()])
+ self.assertFalse(list(tree.iter_changes(tree.basis_tree())))
+
+ def test_add_subdir_file_bug_205636(self):
+ """Added file turning into a dir should be detected on add dir/file"""
+ tree = self.make_branch_and_tree(".")
+ self.build_tree(["dir"]) # whoops, make a file called dir
+ tree.smart_add(["dir"])
+ os.remove("dir")
+ self.build_tree(["dir/", "dir/file"])
+ tree.smart_add(["dir/file"])
+ tree.commit("Add file in dir")
+ self.addCleanup(tree.lock_read().unlock)
+ self.assertEqual([(u"dir", "directory"), (u"dir/file", "file")],
+ [(t[0], t[2]) for t in tree.list_files()])
+ self.assertFalse(list(tree.iter_changes(tree.basis_tree())))
+
+ def test_custom_ids(self):
+ sio = StringIO()
+ action = test_smart_add.AddCustomIDAction(to_file=sio,
+ should_print=True)
+ self.build_tree(['file1', 'dir1/', 'dir1/file2'])
+
+ wt = self.make_branch_and_tree('.')
+ wt.smart_add(['.'], action=action)
+ # The order of adds is not strictly fixed:
+ sio.seek(0)
+ lines = sorted(sio.readlines())
+ self.assertEqualDiff(['added dir1 with id directory-dir1\n',
+ 'added dir1/file2 with id file-dir1%file2\n',
+ 'added file1 with id file-file1\n',
+ ], lines)
+ wt.lock_read()
+ self.addCleanup(wt.unlock)
+ self.assertEqual([('', wt.path2id('')),
+ ('dir1', 'directory-dir1'),
+ ('file1', 'file-file1'),
+ ('dir1/file2', 'file-dir1%file2'),
+ ], [(path, ie.file_id) for path, ie
+ in wt.iter_entries_by_dir()])
+
+
+class TestSmartAddConflictRelatedFiles(per_workingtree.TestCaseWithWorkingTree):
+
+ def make_tree_with_text_conflict(self):
+ tb = self.make_branch_and_tree('base')
+ self.build_tree_contents([('base/file', 'content in base')])
+ tb.add('file')
+ tb.commit('Adding file')
+
+ t1 = tb.bzrdir.sprout('t1').open_workingtree()
+
+ self.build_tree_contents([('base/file', 'content changed in base')])
+ tb.commit('Changing file in base')
+
+ self.build_tree_contents([('t1/file', 'content in t1')])
+ t1.commit('Changing file in t1')
+ t1.merge_from_branch(tb.branch)
+ return t1
+
+ def test_cant_add_generated_files_implicitly(self):
+ t = self.make_tree_with_text_conflict()
+ added, ignored = t.smart_add([t.basedir])
+ self.assertEqual(([], {}), (added, ignored))
+
+ def test_can_add_generated_files_explicitly(self):
+ fnames = ['file.%s' % s for s in ('BASE', 'THIS', 'OTHER')]
+ t = self.make_tree_with_text_conflict()
+ added, ignored = t.smart_add([t.basedir + '/%s' % f for f in fnames])
+ self.assertEqual((fnames, {}), (added, ignored))
+
+
+class TestSmartAddTreeUnicode(per_workingtree.TestCaseWithWorkingTree):
+
+ _test_needs_features = [features.UnicodeFilenameFeature]
+
+ def setUp(self):
+ super(TestSmartAddTreeUnicode, self).setUp()
+ self.build_tree([u'a\u030a'])
+ self.wt = self.make_branch_and_tree('.')
+ self.overrideAttr(osutils, 'normalized_filename')
+
+ def test_requires_normalized_unicode_filenames_fails_on_unnormalized(self):
+ """Adding unnormalized unicode filenames fail if and only if the
+ workingtree format has the requires_normalized_unicode_filenames flag
+ set and the underlying filesystem doesn't normalize.
+ """
+ osutils.normalized_filename = osutils._accessible_normalized_filename
+ if (self.workingtree_format.requires_normalized_unicode_filenames
+ and sys.platform != 'darwin'):
+ self.assertRaises(
+ errors.NoSuchFile, self.wt.smart_add, [u'a\u030a'])
+ else:
+ self.wt.smart_add([u'a\u030a'])
+
+ def test_accessible_explicit(self):
+ osutils.normalized_filename = osutils._accessible_normalized_filename
+ if self.workingtree_format.requires_normalized_unicode_filenames:
+ raise tests.TestNotApplicable(
+ 'Working tree format smart_add requires normalized unicode '
+ 'filenames')
+ self.wt.smart_add([u'a\u030a'])
+ self.wt.lock_read()
+ self.addCleanup(self.wt.unlock)
+ self.assertEqual([('', 'directory'), (u'\xe5', 'file')],
+ [(path, ie.kind) for path,ie in
+ self.wt.iter_entries_by_dir()])
+
+ def test_accessible_implicit(self):
+ osutils.normalized_filename = osutils._accessible_normalized_filename
+ if self.workingtree_format.requires_normalized_unicode_filenames:
+ raise tests.TestNotApplicable(
+ 'Working tree format smart_add requires normalized unicode '
+ 'filenames')
+ self.wt.smart_add([])
+ self.wt.lock_read()
+ self.addCleanup(self.wt.unlock)
+ self.assertEqual([('', 'directory'), (u'\xe5', 'file')],
+ [(path, ie.kind) for path,ie
+ in self.wt.iter_entries_by_dir()])
+
+ def test_inaccessible_explicit(self):
+ osutils.normalized_filename = osutils._inaccessible_normalized_filename
+ self.assertRaises(errors.InvalidNormalization,
+ self.wt.smart_add, [u'a\u030a'])
+
+ def test_inaccessible_implicit(self):
+ osutils.normalized_filename = osutils._inaccessible_normalized_filename
+ # TODO: jam 20060701 In the future, this should probably
+ # just ignore files that don't fit the normalization
+ # rules, rather than exploding
+ self.assertRaises(errors.InvalidNormalization, self.wt.smart_add, [])
diff --git a/bzrlib/tests/per_workingtree/test_symlinks.py b/bzrlib/tests/per_workingtree/test_symlinks.py
new file mode 100644
index 0000000..72d63dc
--- /dev/null
+++ b/bzrlib/tests/per_workingtree/test_symlinks.py
@@ -0,0 +1,180 @@
+# Copyright (C) 2010 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""Test symlink support.
+"""
+
+import os
+
+from bzrlib import (
+ osutils,
+ tests,
+ workingtree,
+ )
+from bzrlib.tests.per_workingtree import TestCaseWithWorkingTree
+from bzrlib.tests import (
+ features,
+ )
+
+
+class TestSmartAddTree(TestCaseWithWorkingTree):
+
+ # See eg <https://bugs.launchpad.net/bzr/+bug/192859>
+
+ _test_needs_features = [features.SymlinkFeature]
+
+ def test_smart_add_symlink(self):
+ tree = self.make_branch_and_tree('tree')
+ self.build_tree_contents([
+ ('tree/link@', 'target'),
+ ])
+ tree.smart_add(['tree/link'])
+ self.assertIsNot(None, tree.path2id('link'))
+ self.assertIs(None, tree.path2id('target'))
+ self.assertEqual('symlink',
+ tree.kind(tree.path2id('link')))
+
+ def test_smart_add_symlink_pointing_outside(self):
+ tree = self.make_branch_and_tree('tree')
+ self.build_tree_contents([
+ ('tree/link@', '../../../../target'),
+ ])
+ tree.smart_add(['tree/link'])
+ self.assertIsNot(None, tree.path2id('link'))
+ self.assertIs(None, tree.path2id('target'))
+ self.assertEqual('symlink',
+ tree.kind(tree.path2id('link')))
+
+ def test_add_file_under_symlink(self):
+ # similar to
+ # https://bugs.launchpad.net/bzr/+bug/192859/comments/3
+ tree = self.make_branch_and_tree('tree')
+ self.build_tree_contents([
+ ('tree/link@', 'dir'),
+ ('tree/dir/',),
+ ('tree/dir/file', 'content'),
+ ])
+ self.assertEquals(
+ tree.smart_add(['tree/link/file']),
+ ([u'dir', u'dir/file'], {}))
+ # should add the actual parent directory, not the apparent parent
+ # (which is actually a symlink)
+ self.assertTrue(tree.path2id('dir/file'))
+ self.assertTrue(tree.path2id('dir'))
+ self.assertIs(None, tree.path2id('link'))
+ self.assertIs(None, tree.path2id('link/file'))
+
+
+class TestKindChanges(TestCaseWithWorkingTree):
+
+ _test_needs_features = [features.SymlinkFeature]
+
+ def test_symlink_changes_to_dir(self):
+ # <https://bugs.launchpad.net/bzr/+bug/192859>:
+ # we had some past problems with the workingtree remembering for too
+ # long what kind of object was at a particular name; we really
+ # shouldn't do that. Operating on the dirstate through passing
+ # inventory deltas rather than mutating the inventory largely avoids
+ # that.
+ tree = self.make_branch_and_tree('tree')
+ self.build_tree_contents([
+ ('tree/a@', 'target')])
+ tree.smart_add(['tree/a'])
+ tree.commit('add symlink')
+ os.unlink('tree/a')
+ self.build_tree_contents([
+ ('tree/a/',),
+ ('tree/a/f', 'content'),
+ ])
+ tree.smart_add(['tree/a/f'])
+ tree.commit('change to dir')
+ tree.lock_read()
+ self.addCleanup(tree.unlock)
+ self.assertEquals([], list(tree.iter_changes(tree.basis_tree())))
+ if tree._format.supports_versioned_directories:
+ self.assertEquals(
+ ['a', 'a/f'], sorted(info[0] for info in tree.list_files()))
+ else:
+ self.assertEquals([], list(tree.list_files()))
+
+ def test_dir_changes_to_symlink(self):
+ # <https://bugs.launchpad.net/bzr/+bug/192859>:
+ # we had some past problems with the workingtree remembering for too
+ # long what kind of object was at a particular name; we really
+ # shouldn't do that. Operating on the dirstate through passing
+ # inventory deltas rather than mutating the inventory largely avoids
+ # that.
+ tree = self.make_branch_and_tree('tree')
+ self.build_tree_contents([
+ ('tree/a/',),
+ ('tree/a/file', 'content'),
+ ])
+ tree.smart_add(['tree/a'])
+ tree.commit('add dir')
+ osutils.rmtree('tree/a')
+ self.build_tree_contents([
+ ('tree/a@', 'target'),
+ ])
+ tree.commit('change to symlink')
+
+
+class TestOpenTree(TestCaseWithWorkingTree):
+
+ _test_needs_features = [features.SymlinkFeature]
+
+ def test_open_containing_through_symlink(self):
+ self.make_test_tree()
+ self.check_open_containing('link/content', 'tree', 'content')
+ self.check_open_containing('link/sublink', 'tree', 'sublink')
+ # this next one is a bit debatable, but arguably it's better that
+ # open_containing is only concerned with opening the tree
+ # and then you can deal with symlinks along the way if you want
+ self.check_open_containing('link/sublink/subcontent', 'tree',
+ 'sublink/subcontent')
+
+ def check_open_containing(self, to_open, expected_tree_name,
+ expected_relpath):
+ wt, relpath = workingtree.WorkingTree.open_containing(to_open)
+ self.assertEquals(relpath, expected_relpath)
+ self.assertEndsWith(wt.basedir, expected_tree_name)
+
+ def test_tree_files(self):
+ # not strictly a WorkingTree method, but it should be
+ # probably the root cause for
+ # <https://bugs.launchpad.net/bzr/+bug/128562>
+ self.make_test_tree()
+ self.check_tree_files(['tree/outerlink'],
+ 'tree', ['outerlink'])
+ self.check_tree_files(['link/outerlink'],
+ 'tree', ['outerlink'])
+ self.check_tree_files(['link/sublink/subcontent'],
+ 'tree', ['subdir/subcontent'])
+
+ def check_tree_files(self, to_open, expected_tree, expect_paths):
+ tree, relpaths = workingtree.WorkingTree.open_containing_paths(to_open)
+ self.assertEndsWith(tree.basedir, expected_tree)
+ self.assertEquals(expect_paths, relpaths)
+
+ def make_test_tree(self):
+ tree = self.make_branch_and_tree('tree')
+ self.build_tree_contents([
+ ('link@', 'tree'),
+ ('tree/outerlink@', '/not/there'),
+ ('tree/content', 'hello'),
+ ('tree/sublink@', 'subdir'),
+ ('tree/subdir/',),
+ ('tree/subdir/subcontent', 'subcontent stuff')
+ ])
diff --git a/bzrlib/tests/per_workingtree/test_uncommit.py b/bzrlib/tests/per_workingtree/test_uncommit.py
new file mode 100644
index 0000000..c57f016
--- /dev/null
+++ b/bzrlib/tests/per_workingtree/test_uncommit.py
@@ -0,0 +1,33 @@
+# Copyright (C) 2007 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""Tests of the parent related functions of WorkingTrees."""
+
+from bzrlib import (
+ uncommit,
+ )
+from bzrlib.tests.per_workingtree import TestCaseWithWorkingTree
+
+
+class TestUncommit(TestCaseWithWorkingTree):
+
+ def test_uncommit_to_null(self):
+ tree = self.make_branch_and_tree('branch')
+ tree.lock_write()
+ revid = tree.commit('a revision')
+ tree.unlock()
+ uncommit.uncommit(tree.branch, tree=tree)
+ self.assertEqual([], tree.get_parent_ids())
diff --git a/bzrlib/tests/per_workingtree/test_unversion.py b/bzrlib/tests/per_workingtree/test_unversion.py
new file mode 100644
index 0000000..b911c2a
--- /dev/null
+++ b/bzrlib/tests/per_workingtree/test_unversion.py
@@ -0,0 +1,205 @@
+# Copyright (C) 2006, 2007, 2009, 2010 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""Tests of the WorkingTree.unversion API."""
+
+from bzrlib import (
+ errors,
+ )
+from bzrlib.tests.per_workingtree import TestCaseWithWorkingTree
+
+
+class TestUnversion(TestCaseWithWorkingTree):
+
+ def test_unversion_requires_write_lock(self):
+ """WT.unversion([]) in a read lock raises ReadOnlyError."""
+ tree = self.make_branch_and_tree('.')
+ tree.lock_read()
+ self.assertRaises(errors.ReadOnlyError, tree.unversion, [])
+ tree.unlock()
+
+ def test_unversion_missing_file(self):
+ """WT.unversion(['missing-id']) raises NoSuchId."""
+ tree = self.make_branch_and_tree('.')
+ self.assertRaises(errors.NoSuchId, tree.unversion, ['missing-id'])
+
+ def test_unversion_parent_and_child_renamed_bug_187207(self):
+ # When unversioning dirstate trees show a bug in dealing with
+ # unversioning children of reparented children of unversioned
+ # paths when relocation entries are present and the relocation
+ # points later into the dirstate.
+ tree = self.make_branch_and_tree('.')
+ self.build_tree(['del/', 'del/sub/', 'del/sub/b'])
+ tree.add(['del', 'del/sub', 'del/sub/b'], ['del', 'sub', 'b'])
+ tree.commit('setup')
+ tree.rename_one('del/sub', 'sub')
+ self.assertEqual('sub/b', tree.id2path('b'))
+ tree.unversion(['del', 'b'])
+ self.assertRaises(errors.NoSuchId, tree.id2path, 'b')
+
+ def test_unversion_several_files(self):
+ """After unversioning several files, they should not be versioned."""
+ tree = self.make_branch_and_tree('.')
+ self.build_tree(['a', 'b', 'c'])
+ tree.add(['a', 'b', 'c'], ['a-id', 'b-id', 'c-id'])
+ # within a lock unversion should take effect
+ tree.lock_write()
+ tree.unversion(['a-id', 'b-id'])
+ self.assertFalse(tree.has_id('a-id'))
+ self.assertFalse(tree.has_id('b-id'))
+ self.assertTrue(tree.has_id('c-id'))
+ self.assertTrue(tree.has_filename('a'))
+ self.assertTrue(tree.has_filename('b'))
+ self.assertTrue(tree.has_filename('c'))
+ tree.unlock()
+ # the changes should have persisted to disk - reopen the workingtree
+ # to be sure.
+ tree = tree.bzrdir.open_workingtree()
+ tree.lock_read()
+ self.assertFalse(tree.has_id('a-id'))
+ self.assertFalse(tree.has_id('b-id'))
+ self.assertTrue(tree.has_id('c-id'))
+ self.assertTrue(tree.has_filename('a'))
+ self.assertTrue(tree.has_filename('b'))
+ self.assertTrue(tree.has_filename('c'))
+ tree.unlock()
+
+ def test_unversion_subtree(self):
+ """Unversioning the root of a subtree unversions the entire subtree."""
+ tree = self.make_branch_and_tree('.')
+ self.build_tree(['a/', 'a/b', 'c'])
+ tree.add(['a', 'a/b', 'c'], ['a-id', 'b-id', 'c-id'])
+ # within a lock unversion should take effect
+ tree.lock_write()
+ tree.unversion(['a-id'])
+ self.assertFalse(tree.has_id('a-id'))
+ self.assertFalse(tree.has_id('b-id'))
+ self.assertTrue(tree.has_id('c-id'))
+ self.assertTrue(tree.has_filename('a'))
+ self.assertTrue(tree.has_filename('a/b'))
+ self.assertTrue(tree.has_filename('c'))
+ tree.unlock()
+
+ def test_unversion_subtree_and_children(self):
+ """Passing a child id will raise NoSuchId.
+
+ This is because the parent directory will have already been removed.
+ """
+ tree = self.make_branch_and_tree('.')
+ self.build_tree(['a/', 'a/b', 'a/c', 'd'])
+ tree.add(['a', 'a/b', 'a/c', 'd'], ['a-id', 'b-id', 'c-id', 'd-id'])
+ tree.lock_write()
+ try:
+ tree.unversion(['b-id', 'a-id'])
+ self.assertFalse(tree.has_id('a-id'))
+ self.assertFalse(tree.has_id('b-id'))
+ self.assertFalse(tree.has_id('c-id'))
+ self.assertTrue(tree.has_id('d-id'))
+ # The files are still on disk
+ self.assertTrue(tree.has_filename('a'))
+ self.assertTrue(tree.has_filename('a/b'))
+ self.assertTrue(tree.has_filename('a/c'))
+ self.assertTrue(tree.has_filename('d'))
+ finally:
+ tree.unlock()
+
+ def test_unversion_renamed(self):
+ tree = self.make_branch_and_tree('a')
+ self.build_tree(['a/dir/', 'a/dir/f1', 'a/dir/f2', 'a/dir/f3',
+ 'a/dir2/'])
+ tree.add(['dir', 'dir/f1', 'dir/f2', 'dir/f3', 'dir2'],
+ ['dir-id', 'f1-id', 'f2-id', 'f3-id', 'dir2-id'])
+ rev_id1 = tree.commit('init')
+ # Start off by renaming entries, and then unversion a bunch of entries
+ # https://bugs.launchpad.net/bzr/+bug/114615
+ tree.rename_one('dir/f1', 'dir/a')
+ tree.rename_one('dir/f2', 'dir/z')
+ tree.move(['dir/f3'], 'dir2')
+
+ tree.lock_read()
+ try:
+ root_id = tree.get_root_id()
+ paths = [(path, ie.file_id)
+ for path, ie in tree.iter_entries_by_dir()]
+ finally:
+ tree.unlock()
+ self.assertEqual([('', root_id),
+ ('dir', 'dir-id'),
+ ('dir2', 'dir2-id'),
+ ('dir/a', 'f1-id'),
+ ('dir/z', 'f2-id'),
+ ('dir2/f3', 'f3-id'),
+ ], paths)
+
+ tree.unversion(set(['dir-id']))
+ paths = [(path, ie.file_id)
+ for path, ie in tree.iter_entries_by_dir()]
+
+ self.assertEqual([('', root_id),
+ ('dir2', 'dir2-id'),
+ ('dir2/f3', 'f3-id'),
+ ], paths)
+
+ def test_unversion_after_conflicted_merge(self):
+ # Test for bug #114615
+ tree_a = self.make_branch_and_tree('A')
+ self.build_tree(['A/a/', 'A/a/m', 'A/a/n'])
+ tree_a.add(['a', 'a/m', 'a/n'], ['a-id', 'm-id', 'n-id'])
+ tree_a.commit('init')
+
+ tree_a.lock_read()
+ try:
+ root_id = tree_a.get_root_id()
+ finally:
+ tree_a.unlock()
+
+ tree_b = tree_a.bzrdir.sprout('B').open_workingtree()
+ self.build_tree(['B/xyz/'])
+ tree_b.add(['xyz'], ['xyz-id'])
+ tree_b.rename_one('a/m', 'xyz/m')
+ tree_b.unversion(['a-id'])
+ tree_b.commit('delete in B')
+
+ paths = [(path, ie.file_id)
+ for path, ie in tree_b.iter_entries_by_dir()]
+ self.assertEqual([('', root_id),
+ ('xyz', 'xyz-id'),
+ ('xyz/m', 'm-id'),
+ ], paths)
+
+ self.build_tree_contents([('A/a/n', 'new contents for n\n')])
+ tree_a.commit('change n in A')
+
+ # Merging from A should introduce conflicts because 'n' was modified
+ # and removed, so 'a' needs to be restored. We also have a conflict
+ # because 'a' is still an existing directory
+ num_conflicts = tree_b.merge_from_branch(tree_a.branch)
+ self.assertEqual(4, num_conflicts)
+ paths = [(path, ie.file_id)
+ for path, ie in tree_b.iter_entries_by_dir()]
+ self.assertEqual([('', root_id),
+ ('a', 'a-id'),
+ ('xyz', 'xyz-id'),
+ ('a/n.OTHER', 'n-id'),
+ ('xyz/m', 'm-id'),
+ ], paths)
+ tree_b.unversion(['a-id'])
+ paths = [(path, ie.file_id)
+ for path, ie in tree_b.iter_entries_by_dir()]
+ self.assertEqual([('', root_id),
+ ('xyz', 'xyz-id'),
+ ('xyz/m', 'm-id'),
+ ], paths)
diff --git a/bzrlib/tests/per_workingtree/test_views.py b/bzrlib/tests/per_workingtree/test_views.py
new file mode 100644
index 0000000..67e3439
--- /dev/null
+++ b/bzrlib/tests/per_workingtree/test_views.py
@@ -0,0 +1,188 @@
+# Copyright (C) 2008 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""Views stored within a working tree.
+
+The views are actually in the WorkingTree.views namespace, but these are
+1:1 with WorkingTree implementations so can be tested from here.
+"""
+
+
+from bzrlib import views, errors
+from bzrlib.tests import TestSkipped
+from bzrlib.workingtree import WorkingTree
+
+from bzrlib.tests.per_workingtree import TestCaseWithWorkingTree
+
+
+class TestTreeViews(TestCaseWithWorkingTree):
+
+ def setUp(self):
+ # formats that don't support views can skip the rest of these
+ # tests...
+ fmt = self.workingtree_format
+ f = getattr(fmt, 'supports_views')
+ if f is None:
+ raise TestSkipped("format %s doesn't declare whether it "
+ "supports views, assuming not" % fmt)
+ if not f():
+ raise TestSkipped("format %s doesn't support views" % fmt)
+ TestCaseWithWorkingTree.setUp(self)
+
+ def test_views_initially_empty(self):
+ wt = self.make_branch_and_tree('wt')
+ current, views = wt.views.get_view_info()
+ self.assertEqual(None, current)
+ self.assertEqual({}, views)
+
+ def test_set_and_get_view_info(self):
+ wt = self.make_branch_and_tree('wt')
+ view_current = 'view-name'
+ view_dict = {
+ view_current: ['dir-1'],
+ 'other-name': ['dir-2']}
+ wt.views.set_view_info(view_current, view_dict)
+ current, views = wt.views.get_view_info()
+ self.assertEquals(view_current, current)
+ self.assertEquals(view_dict, views)
+ # then reopen the tree and see they're still there
+ wt = WorkingTree.open('wt')
+ current, views = wt.views.get_view_info()
+ self.assertEquals(view_current, current)
+ self.assertEquals(view_dict, views)
+ # test setting a current view which does not exist
+ self.assertRaises(errors.NoSuchView,
+ wt.views.set_view_info, 'yet-another', view_dict)
+ current, views = wt.views.get_view_info()
+ self.assertEquals(view_current, current)
+ self.assertEquals(view_dict, views)
+ # test clearing the current view
+ wt.views.set_view_info(None, view_dict)
+ current, views = wt.views.get_view_info()
+ self.assertEquals(None, current)
+ self.assertEquals(view_dict, views)
+
+ def test_lookup_view(self):
+ wt = self.make_branch_and_tree('wt')
+ view_current = 'view-name'
+ view_dict = {
+ view_current: ['dir-1'],
+ 'other-name': ['dir-2']}
+ wt.views.set_view_info(view_current, view_dict)
+ # test lookup of the default view
+ result = wt.views.lookup_view()
+ self.assertEqual(result, ['dir-1'])
+ # test lookup of a named view
+ result = wt.views.lookup_view('other-name')
+ self.assertEqual(result, ['dir-2'])
+
+ def test_set_view(self):
+ wt = self.make_branch_and_tree('wt')
+ # test that set_view sets the current view by default
+ wt.views.set_view('view-1', ['dir-1'])
+ current, views = wt.views.get_view_info()
+ self.assertEquals('view-1', current)
+ self.assertEquals({'view-1': ['dir-1']}, views)
+ # test adding a view and not making it the current one
+ wt.views.set_view('view-2', ['dir-2'], make_current=False)
+ current, views = wt.views.get_view_info()
+ self.assertEquals('view-1', current)
+ self.assertEquals({'view-1': ['dir-1'], 'view-2': ['dir-2']}, views)
+
+ def test_unicode_view(self):
+ wt = self.make_branch_and_tree('wt')
+ view_name = u'\u3070'
+ view_files = ['foo', 'bar/']
+ view_dict = {view_name: view_files}
+ wt.views.set_view_info(view_name, view_dict)
+ current, views = wt.views.get_view_info()
+ self.assertEquals(view_name, current)
+ self.assertEquals(view_dict, views)
+
+ def test_no_such_view(self):
+ wt = self.make_branch_and_tree('wt')
+ try:
+ wt.views.lookup_view('opaque')
+ except errors.NoSuchView, e:
+ self.assertEquals(e.view_name, 'opaque')
+ self.assertEquals(str(e), 'No such view: opaque.')
+ else:
+ self.fail("didn't get expected exception")
+
+ def test_delete_view(self):
+ wt = self.make_branch_and_tree('wt')
+ view_name = u'\N{GREEK SMALL LETTER ALPHA}'
+ view_files = ['alphas/']
+ wt.views.set_view(view_name, view_files)
+ # now try to delete it
+ wt.views.delete_view(view_name)
+ # now you can't look it up
+ self.assertRaises(errors.NoSuchView,
+ wt.views.lookup_view, view_name)
+ # and it's not in the dictionary
+ self.assertEquals(wt.views.get_view_info()[1], {})
+ # and you can't remove it a second time
+ self.assertRaises(errors.NoSuchView,
+ wt.views.delete_view, view_name)
+ # or remove a view that never existed
+ self.assertRaises(errors.NoSuchView,
+ wt.views.delete_view, view_name + '2')
+
+ def test_check_path_in_view(self):
+ wt = self.make_branch_and_tree('wt')
+ view_current = 'view-name'
+ view_dict = {
+ view_current: ['dir-1'],
+ 'other-name': ['dir-2']}
+ wt.views.set_view_info(view_current, view_dict)
+ self.assertEqual(views.check_path_in_view(wt, 'dir-1'), None)
+ self.assertEqual(views.check_path_in_view(wt, 'dir-1/sub'), None)
+ self.assertRaises(errors.FileOutsideView,
+ views.check_path_in_view, wt, 'dir-2')
+ self.assertRaises(errors.FileOutsideView,
+ views.check_path_in_view, wt, 'dir-2/sub')
+ self.assertRaises(errors.FileOutsideView,
+ views.check_path_in_view, wt, 'other')
+
+
+class TestUnsupportedViews(TestCaseWithWorkingTree):
+ """Formats that don't support views should give reasonable errors."""
+
+ def setUp(self):
+ fmt = self.workingtree_format
+ supported = getattr(fmt, 'supports_views')
+ if supported is None:
+ warn("Format %s doesn't declare whether it supports views or not"
+ % fmt)
+ raise TestSkipped('No view support at all')
+ if supported():
+ raise TestSkipped("Format %s declares that views are supported"
+ % fmt)
+ # it's covered by TestTreeViews
+ TestCaseWithWorkingTree.setUp(self)
+
+ def test_view_methods_raise(self):
+ wt = self.make_branch_and_tree('wt')
+ self.assertRaises(errors.ViewsNotSupported,
+ wt.views.set_view_info, 'bar', {'bar': ['bars/']})
+ self.assertRaises(errors.ViewsNotSupported,
+ wt.views.get_view_info)
+ self.assertRaises(errors.ViewsNotSupported,
+ wt.views.lookup_view, 'foo')
+ self.assertRaises(errors.ViewsNotSupported,
+ wt.views.set_view, 'foo', 'bar')
+ self.assertRaises(errors.ViewsNotSupported,
+ wt.views.delete_view, 'foo')
diff --git a/bzrlib/tests/per_workingtree/test_walkdirs.py b/bzrlib/tests/per_workingtree/test_walkdirs.py
new file mode 100644
index 0000000..d754269
--- /dev/null
+++ b/bzrlib/tests/per_workingtree/test_walkdirs.py
@@ -0,0 +1,266 @@
+# Copyright (C) 2006, 2007 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""Tests for the extra cases that WorkingTree.walkdirs can encounter."""
+
+import os
+
+from bzrlib import transform
+from bzrlib.tests.features import SymlinkFeature
+from bzrlib.tests.per_workingtree import TestCaseWithWorkingTree
+
+# tests to write:
+# type mismatches - file to link, dir, dir to file, link, link to file, dir
+
+class DirBlock:
+ """Object representation of the tuples returned by dirstate."""
+
+ def __init__(self, tree, file_path, file_name=None, id=None,
+ inventory_kind=None, stat=None, disk_kind='unknown'):
+ self.file_path = file_path
+ self.abspath = tree.abspath(file_path)
+ self.relpath = tree.relpath(file_path)
+ if file_name is None:
+ file_name = os.path.split(file_path)[-1]
+ if len(file_name) == 0:
+ file_name = os.path.split(file_path)[-2]
+ self.file_name = file_name
+ self.id = id
+ self.inventory_kind = inventory_kind
+ self.stat = stat
+ self.disk_kind = disk_kind
+
+ def as_tuple(self):
+ return (self.relpath, self.file_name, self.disk_kind,
+ self.stat, self.id, self.inventory_kind)
+
+ def as_dir_tuple(self):
+ return (self.relpath, self.id)
+
+ def __str__(self):
+ return """
+file_path = %r
+abspath = %r
+relpath = %r
+file_name = %r
+id = %r
+inventory_kind = %r
+stat = %r
+disk_kind = %r""" % (self.file_path, self.abspath, self.relpath,
+ self.file_name, self.id, self.inventory_kind, self.stat,
+ self.disk_kind)
+
+
+class TestWalkdirs(TestCaseWithWorkingTree):
+
+ added='added'
+ missing='missing'
+ unknown='unknown'
+
+ def get_tree(self, file_status, prefix=None):
+ tree = self.make_branch_and_tree('.')
+ dirblocks = []
+ paths = [
+ file_status + ' file',
+ file_status + ' dir/',
+ file_status + ' dir/a file',
+ file_status + ' empty dir/',
+ ]
+ self.build_tree(paths)
+
+ def add_dirblock(path, kind):
+ dirblock = DirBlock(tree, path)
+ if file_status != self.unknown:
+ dirblock.id = 'a ' + str(path).replace('/','-') + '-id'
+ dirblock.inventory_kind = kind
+ if file_status != self.missing:
+ dirblock.disk_kind = kind
+ dirblock.stat = os.lstat(dirblock.relpath)
+ dirblocks.append(dirblock)
+
+ add_dirblock(paths[0], 'file')
+ add_dirblock(paths[1], 'directory')
+ add_dirblock(paths[2], 'file')
+ add_dirblock(paths[3], 'directory')
+
+ if file_status != self.unknown:
+ tree.add(paths, [db.id for db in dirblocks])
+
+ if file_status == self.missing:
+ # now make the files be missing
+ tree.bzrdir.root_transport.delete(dirblocks[0].relpath)
+ tree.bzrdir.root_transport.delete_tree(dirblocks[1].relpath)
+ tree.bzrdir.root_transport.delete_tree(dirblocks[3].relpath)
+
+ expected_dirblocks = [
+ (('', tree.path2id('')),
+ [dirblocks[1].as_tuple(), dirblocks[3].as_tuple(),
+ dirblocks[0].as_tuple()]
+ ),
+ (dirblocks[1].as_dir_tuple(),
+ [dirblocks[2].as_tuple()]
+ ),
+ (dirblocks[3].as_dir_tuple(),
+ []
+ ),
+ ]
+ if prefix:
+ expected_dirblocks = [e for e in expected_dirblocks
+ if len(e) > 0 and len(e[0]) > 0 and e[0][0] == prefix]
+ return tree, expected_dirblocks
+
+ def _test_walkdir(self, file_status, prefix=""):
+ result = []
+ tree, expected_dirblocks = self.get_tree(file_status, prefix)
+ tree.lock_read()
+ for dirinfo, dirblock in tree.walkdirs(prefix):
+ result.append((dirinfo, list(dirblock)))
+ tree.unlock()
+
+ # check each return value for debugging ease.
+ for pos, item in enumerate(expected_dirblocks):
+ result_pos = []
+ if len(result) > pos:
+ result_pos = result[pos]
+ self.assertEqual(item, result_pos)
+ self.assertEqual(expected_dirblocks, result)
+
+ def test_walkdir_unknowns(self):
+ """unknown files and directories should be reported by walkdirs."""
+ self._test_walkdir(self.unknown)
+
+ def test_walkdir_from_unknown_dir(self):
+ """Doing a walkdir when the requested prefix is unknown but on disk."""
+ self._test_walkdir(self.unknown, 'unknown dir')
+
+ def test_walkdir_missings(self):
+ """missing files and directories should be reported by walkdirs."""
+ self._test_walkdir(self.missing)
+
+ def test_walkdir_from_dir(self):
+ """Doing a walkdir when the requested prefix is known and on disk."""
+ self._test_walkdir(self.added, 'added dir')
+
+ def test_walkdir_from_empty_dir(self):
+ """Doing a walkdir when the requested prefix is empty dir."""
+ self._test_walkdir(self.added, 'added empty dir')
+
+ def test_walkdir_from_missing_dir(self):
+ """Doing a walkdir when the requested prefix is missing but on disk."""
+ self._test_walkdir(self.missing, 'missing dir')
+
+ def test_walkdirs_type_changes(self):
+ """Walkdir shows the actual kinds on disk and the recorded kinds."""
+ self.requireFeature(SymlinkFeature)
+ tree = self.make_branch_and_tree('.')
+ paths = ['file1', 'file2', 'dir1/', 'dir2/']
+ ids = ['file1', 'file2', 'dir1', 'dir2']
+ self.build_tree(paths)
+ tree.add(paths, ids)
+ tt = transform.TreeTransform(tree)
+ root_transaction_id = tt.trans_id_tree_path('')
+ tt.new_symlink('link1',
+ root_transaction_id, 'link-target', 'link1')
+ tt.new_symlink('link2',
+ root_transaction_id, 'link-target', 'link2')
+ tt.apply()
+ tree.bzrdir.root_transport.delete_tree('dir1')
+ tree.bzrdir.root_transport.delete_tree('dir2')
+ tree.bzrdir.root_transport.delete('file1')
+ tree.bzrdir.root_transport.delete('file2')
+ tree.bzrdir.root_transport.delete('link1')
+ tree.bzrdir.root_transport.delete('link2')
+ changed_paths = ['dir1', 'file1/', 'link1', 'link2/']
+ self.build_tree(changed_paths)
+ os.symlink('target', 'dir2')
+ os.symlink('target', 'file2')
+ dir1_stat = os.lstat('dir1')
+ dir2_stat = os.lstat('dir2')
+ file1_stat = os.lstat('file1')
+ file2_stat = os.lstat('file2')
+ link1_stat = os.lstat('link1')
+ link2_stat = os.lstat('link2')
+ expected_dirblocks = [
+ (('', tree.path2id('')),
+ [('dir1', 'dir1', 'file', dir1_stat, 'dir1', 'directory'),
+ ('dir2', 'dir2', 'symlink', dir2_stat, 'dir2', 'directory'),
+ ('file1', 'file1', 'directory', file1_stat, 'file1', 'file'),
+ ('file2', 'file2', 'symlink', file2_stat, 'file2', 'file'),
+ ('link1', 'link1', 'file', link1_stat, 'link1', 'symlink'),
+ ('link2', 'link2', 'directory', link2_stat, 'link2', 'symlink'),
+ ]
+ ),
+ (('dir1', 'dir1'),
+ [
+ ]
+ ),
+ (('dir2', 'dir2'),
+ [
+ ]
+ ),
+ (('file1', None),
+ [
+ ]
+ ),
+ (('link2', None),
+ [
+ ]
+ ),
+ ]
+ tree.lock_read()
+ result = list(tree.walkdirs())
+ tree.unlock()
+ # check each return value for debugging ease.
+ for pos, item in enumerate(expected_dirblocks):
+ self.assertEqual(item, result[pos])
+ self.assertEqual(len(expected_dirblocks), len(result))
+
+ def test_walkdirs_type_changes_wo_symlinks(self):
+ # similar to test_walkdirs_type_changes
+ # but don't use symlinks for safe testing on win32
+ tree = self.make_branch_and_tree('.')
+ paths = ['file1', 'dir1/']
+ ids = ['file1', 'dir1']
+ self.build_tree(paths)
+ tree.add(paths, ids)
+ tree.bzrdir.root_transport.delete_tree('dir1')
+ tree.bzrdir.root_transport.delete('file1')
+ changed_paths = ['dir1', 'file1/']
+ self.build_tree(changed_paths)
+ dir1_stat = os.lstat('dir1')
+ file1_stat = os.lstat('file1')
+ expected_dirblocks = [
+ (('', tree.path2id('')),
+ [('dir1', 'dir1', 'file', dir1_stat, 'dir1', 'directory'),
+ ('file1', 'file1', 'directory', file1_stat, 'file1', 'file'),
+ ]
+ ),
+ (('dir1', 'dir1'),
+ [
+ ]
+ ),
+ (('file1', None),
+ [
+ ]
+ ),
+ ]
+ tree.lock_read()
+ result = list(tree.walkdirs())
+ tree.unlock()
+ # check each return value for debugging ease.
+ for pos, item in enumerate(expected_dirblocks):
+ self.assertEqual(item, result[pos])
+ self.assertEqual(len(expected_dirblocks), len(result))
diff --git a/bzrlib/tests/per_workingtree/test_workingtree.py b/bzrlib/tests/per_workingtree/test_workingtree.py
new file mode 100644
index 0000000..4177eae
--- /dev/null
+++ b/bzrlib/tests/per_workingtree/test_workingtree.py
@@ -0,0 +1,1234 @@
+# Copyright (C) 2006-2011 Canonical Ltd
+# Authors: Robert Collins <robert.collins@canonical.com>
+# and others
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+from cStringIO import StringIO
+import errno
+import os
+
+from bzrlib import (
+ branch,
+ bzrdir,
+ config,
+ controldir,
+ errors,
+ osutils,
+ revision as _mod_revision,
+ symbol_versioning,
+ tests,
+ trace,
+ urlutils,
+ )
+from bzrlib.errors import (
+ UnsupportedOperation,
+ PathsNotVersionedError,
+ )
+from bzrlib.inventory import Inventory
+from bzrlib.mutabletree import MutableTree
+from bzrlib.osutils import pathjoin, getcwd, has_symlinks
+from bzrlib.tests import (
+ features,
+ TestSkipped,
+ TestNotApplicable,
+ )
+from bzrlib.tests.per_workingtree import TestCaseWithWorkingTree
+from bzrlib.workingtree import (
+ TreeDirectory,
+ TreeFile,
+ TreeLink,
+ InventoryWorkingTree,
+ WorkingTree,
+ )
+from bzrlib.conflicts import ConflictList, TextConflict, ContentsConflict
+
+
+class TestWorkingTree(TestCaseWithWorkingTree):
+
+ def test_branch_builder(self):
+ # Just a smoke test that we get a branch at the specified relpath
+ builder = self.make_branch_builder('foobar')
+ br = branch.Branch.open('foobar')
+
+ def test_list_files(self):
+ tree = self.make_branch_and_tree('.')
+ self.build_tree(['dir/', 'file'])
+ if has_symlinks():
+ os.symlink('target', 'symlink')
+ tree.lock_read()
+ files = list(tree.list_files())
+ tree.unlock()
+ self.assertEqual(files[0], ('dir', '?', 'directory', None, TreeDirectory()))
+ self.assertEqual(files[1], ('file', '?', 'file', None, TreeFile()))
+ if has_symlinks():
+ self.assertEqual(files[2], ('symlink', '?', 'symlink', None, TreeLink()))
+
+ def test_list_files_sorted(self):
+ tree = self.make_branch_and_tree('.')
+ self.build_tree(['dir/', 'file', 'dir/file', 'dir/b',
+ 'dir/subdir/', 'a', 'dir/subfile',
+ 'zz_dir/', 'zz_dir/subfile'])
+ tree.lock_read()
+ files = [(path, kind) for (path, v, kind, file_id, entry)
+ in tree.list_files()]
+ tree.unlock()
+ self.assertEqual([
+ ('a', 'file'),
+ ('dir', 'directory'),
+ ('file', 'file'),
+ ('zz_dir', 'directory'),
+ ], files)
+
+ tree.add(['dir', 'zz_dir'])
+ tree.lock_read()
+ files = [(path, kind) for (path, v, kind, file_id, entry)
+ in tree.list_files()]
+ tree.unlock()
+ self.assertEqual([
+ ('a', 'file'),
+ ('dir', 'directory'),
+ ('dir/b', 'file'),
+ ('dir/file', 'file'),
+ ('dir/subdir', 'directory'),
+ ('dir/subfile', 'file'),
+ ('file', 'file'),
+ ('zz_dir', 'directory'),
+ ('zz_dir/subfile', 'file'),
+ ], files)
+
+ def test_list_files_kind_change(self):
+ tree = self.make_branch_and_tree('tree')
+ self.build_tree(['tree/filename'])
+ tree.add('filename', 'file-id')
+ os.unlink('tree/filename')
+ self.build_tree(['tree/filename/'])
+ tree.lock_read()
+ self.addCleanup(tree.unlock)
+ result = list(tree.list_files())
+ self.assertEqual(1, len(result))
+ self.assertEqual(('filename', 'V', 'directory', 'file-id'),
+ result[0][:4])
+
+ def test_get_config_stack(self):
+ # Smoke test that all working trees succeed getting a config
+ wt = self.make_branch_and_tree('.')
+ conf = wt.get_config_stack()
+ self.assertIsInstance(conf, config.Stack)
+
+ def test_open_containing(self):
+ branch = self.make_branch_and_tree('.').branch
+ local_base = urlutils.local_path_from_url(branch.base)
+
+ # Empty opens '.'
+ wt, relpath = WorkingTree.open_containing()
+ self.assertEqual('', relpath)
+ self.assertEqual(wt.basedir + '/', local_base)
+
+ # '.' opens this dir
+ wt, relpath = WorkingTree.open_containing(u'.')
+ self.assertEqual('', relpath)
+ self.assertEqual(wt.basedir + '/', local_base)
+
+ # './foo' finds '.' and a relpath of 'foo'
+ wt, relpath = WorkingTree.open_containing('./foo')
+ self.assertEqual('foo', relpath)
+ self.assertEqual(wt.basedir + '/', local_base)
+
+ # abspath(foo) finds '.' and relpath of 'foo'
+ wt, relpath = WorkingTree.open_containing('./foo')
+ wt, relpath = WorkingTree.open_containing(getcwd() + '/foo')
+ self.assertEqual('foo', relpath)
+ self.assertEqual(wt.basedir + '/', local_base)
+
+ # can even be a url: finds '.' and relpath of 'foo'
+ wt, relpath = WorkingTree.open_containing('./foo')
+ wt, relpath = WorkingTree.open_containing(
+ urlutils.local_path_to_url(getcwd() + '/foo'))
+ self.assertEqual('foo', relpath)
+ self.assertEqual(wt.basedir + '/', local_base)
+
+ def test_basic_relpath(self):
+ # for comprehensive relpath tests, see whitebox.py.
+ tree = self.make_branch_and_tree('.')
+ self.assertEqual('child',
+ tree.relpath(pathjoin(getcwd(), 'child')))
+
+ def test_lock_locks_branch(self):
+ tree = self.make_branch_and_tree('.')
+ tree.lock_read()
+ self.assertEqual('r', tree.branch.peek_lock_mode())
+ tree.unlock()
+ self.assertEqual(None, tree.branch.peek_lock_mode())
+ tree.lock_write()
+ self.assertEqual('w', tree.branch.peek_lock_mode())
+ tree.unlock()
+ self.assertEqual(None, tree.branch.peek_lock_mode())
+
+ def test_revert(self):
+ """Test selected-file revert"""
+ tree = self.make_branch_and_tree('.')
+
+ self.build_tree(['hello.txt'])
+ with file('hello.txt', 'w') as f: f.write('initial hello')
+
+ self.assertRaises(PathsNotVersionedError,
+ tree.revert, ['hello.txt'])
+ tree.add(['hello.txt'])
+ tree.commit('create initial hello.txt')
+
+ self.check_file_contents('hello.txt', 'initial hello')
+ with file('hello.txt', 'w') as f: f.write('new hello')
+ self.check_file_contents('hello.txt', 'new hello')
+
+ # revert file modified since last revision
+ tree.revert(['hello.txt'])
+ self.check_file_contents('hello.txt', 'initial hello')
+ self.check_file_contents('hello.txt.~1~', 'new hello')
+
+ # reverting again does not clobber the backup
+ tree.revert(['hello.txt'])
+ self.check_file_contents('hello.txt', 'initial hello')
+ self.check_file_contents('hello.txt.~1~', 'new hello')
+
+ # backup files are numbered
+ with file('hello.txt', 'w') as f: f.write('new hello2')
+ tree.revert(['hello.txt'])
+ self.check_file_contents('hello.txt', 'initial hello')
+ self.check_file_contents('hello.txt.~1~', 'new hello')
+ self.check_file_contents('hello.txt.~2~', 'new hello2')
+
+ def test_revert_missing(self):
+ # Revert a file that has been deleted since last commit
+ tree = self.make_branch_and_tree('.')
+ with file('hello.txt', 'w') as f: f.write('initial hello')
+ tree.add('hello.txt')
+ tree.commit('added hello.txt')
+ os.unlink('hello.txt')
+ tree.remove('hello.txt')
+ tree.revert(['hello.txt'])
+ self.assertPathExists('hello.txt')
+
+ def test_versioned_files_not_unknown(self):
+ tree = self.make_branch_and_tree('.')
+ self.build_tree(['hello.txt'])
+ tree.add('hello.txt')
+ self.assertEquals(list(tree.unknowns()),
+ [])
+
+ def test_unknowns(self):
+ tree = self.make_branch_and_tree('.')
+ self.build_tree(['hello.txt',
+ 'hello.txt.~1~'])
+ self.build_tree_contents([('.bzrignore', '*.~*\n')])
+ tree.add('.bzrignore')
+ self.assertEquals(list(tree.unknowns()),
+ ['hello.txt'])
+
+ def test_initialize(self):
+ # initialize should create a working tree and branch in an existing dir
+ t = self.make_branch_and_tree('.')
+ b = branch.Branch.open('.')
+ self.assertEqual(t.branch.base, b.base)
+ t2 = WorkingTree.open('.')
+ self.assertEqual(t.basedir, t2.basedir)
+ self.assertEqual(b.base, t2.branch.base)
+ # TODO maybe we should check the branch format? not sure if its
+ # appropriate here.
+
+ def test_rename_dirs(self):
+ """Test renaming directories and the files within them."""
+ wt = self.make_branch_and_tree('.')
+ b = wt.branch
+ self.build_tree(['dir/', 'dir/sub/', 'dir/sub/file'])
+ wt.add(['dir', 'dir/sub', 'dir/sub/file'])
+
+ wt.commit('create initial state')
+
+ revid = b.last_revision()
+ self.log('first revision_id is {%s}' % revid)
+
+ tree = b.repository.revision_tree(revid)
+ self.log('contents of tree: %r' % list(tree.iter_entries_by_dir()))
+
+ self.check_tree_shape(tree, ['dir/', 'dir/sub/', 'dir/sub/file'])
+ wt.rename_one('dir', 'newdir')
+
+ wt.lock_read()
+ self.check_tree_shape(wt,
+ ['newdir/', 'newdir/sub/', 'newdir/sub/file'])
+ wt.unlock()
+ wt.rename_one('newdir/sub', 'newdir/newsub')
+ wt.lock_read()
+ self.check_tree_shape(wt, ['newdir/', 'newdir/newsub/',
+ 'newdir/newsub/file'])
+ wt.unlock()
+
+ def test_add_in_unversioned(self):
+ """Try to add a file in an unversioned directory.
+
+ "bzr add" adds the parent as necessary, but simple working tree add
+ doesn't do that.
+ """
+ from bzrlib.errors import NotVersionedError
+ wt = self.make_branch_and_tree('.')
+ self.build_tree(['foo/',
+ 'foo/hello'])
+ if not wt._format.supports_versioned_directories:
+ wt.add('foo/hello')
+ else:
+ self.assertRaises(NotVersionedError,
+ wt.add,
+ 'foo/hello')
+
+ def test_add_missing(self):
+ # adding a msising file -> NoSuchFile
+ wt = self.make_branch_and_tree('.')
+ self.assertRaises(errors.NoSuchFile, wt.add, 'fpp')
+
+ def test_remove_verbose(self):
+ #FIXME the remove api should not print or otherwise depend on the
+ # text UI - RBC 20060124
+ wt = self.make_branch_and_tree('.')
+ self.build_tree(['hello'])
+ wt.add(['hello'])
+ wt.commit(message='add hello')
+ stdout = StringIO()
+ stderr = StringIO()
+ self.assertEqual(None, self.apply_redirected(None, stdout, stderr,
+ wt.remove,
+ ['hello'],
+ verbose=True))
+ self.assertEqual('? hello\n', stdout.getvalue())
+ self.assertEqual('', stderr.getvalue())
+
+ def test_clone_trivial(self):
+ wt = self.make_branch_and_tree('source')
+ cloned_dir = wt.bzrdir.clone('target')
+ cloned = cloned_dir.open_workingtree()
+ self.assertEqual(cloned.get_parent_ids(), wt.get_parent_ids())
+
+ def test_clone_empty(self):
+ wt = self.make_branch_and_tree('source')
+ cloned_dir = wt.bzrdir.clone('target', revision_id=_mod_revision.NULL_REVISION)
+ cloned = cloned_dir.open_workingtree()
+ self.assertEqual(cloned.get_parent_ids(), wt.get_parent_ids())
+
+ def test_last_revision(self):
+ wt = self.make_branch_and_tree('source')
+ self.assertEqual([], wt.get_parent_ids())
+ wt.commit('A', allow_pointless=True, rev_id='A')
+ parent_ids = wt.get_parent_ids()
+ self.assertEqual(['A'], parent_ids)
+ for parent_id in parent_ids:
+ self.assertIsInstance(parent_id, str)
+
+ def test_set_last_revision(self):
+ wt = self.make_branch_and_tree('source')
+ # set last-revision to one not in the history
+ wt.set_last_revision('A')
+ # set it back to None for an empty tree.
+ wt.set_last_revision('null:')
+ wt.commit('A', allow_pointless=True, rev_id='A')
+ self.assertEqual(['A'], wt.get_parent_ids())
+ # null: is aways in the branch
+ wt.set_last_revision('null:')
+ self.assertEqual([], wt.get_parent_ids())
+ # and now we can set it to 'A'
+ # because some formats mutate the branch to set it on the tree
+ # we need to alter the branch to let this pass.
+ if getattr(wt.branch, "_set_revision_history", None) is None:
+ raise TestSkipped("Branch format does not permit arbitrary"
+ " history")
+ wt.branch._set_revision_history(['A', 'B'])
+ wt.set_last_revision('A')
+ self.assertEqual(['A'], wt.get_parent_ids())
+ self.assertRaises(errors.ReservedId, wt.set_last_revision, 'A:')
+
+ def test_set_last_revision_different_to_branch(self):
+ # working tree formats from the meta-dir format and newer support
+ # setting the last revision on a tree independently of that on the
+ # branch. Its concievable that some future formats may want to
+ # couple them again (i.e. because its really a smart server and
+ # the working tree will always match the branch). So we test
+ # that formats where initialising a branch does not initialise a
+ # tree - and thus have separable entities - support skewing the
+ # two things.
+ branch = self.make_branch('tree')
+ try:
+ # if there is a working tree now, this is not supported.
+ branch.bzrdir.open_workingtree()
+ return
+ except errors.NoWorkingTree:
+ pass
+ wt = branch.bzrdir.create_workingtree()
+ wt.commit('A', allow_pointless=True, rev_id='A')
+ wt.set_last_revision(None)
+ self.assertEqual([], wt.get_parent_ids())
+ self.assertEqual('A', wt.branch.last_revision())
+ # and now we can set it back to 'A'
+ wt.set_last_revision('A')
+ self.assertEqual(['A'], wt.get_parent_ids())
+ self.assertEqual('A', wt.branch.last_revision())
+
+ def test_clone_and_commit_preserves_last_revision(self):
+ """Doing a commit into a clone tree does not affect the source."""
+ wt = self.make_branch_and_tree('source')
+ cloned_dir = wt.bzrdir.clone('target')
+ wt.commit('A', allow_pointless=True, rev_id='A')
+ self.assertNotEqual(cloned_dir.open_workingtree().get_parent_ids(),
+ wt.get_parent_ids())
+
+ def test_clone_preserves_content(self):
+ wt = self.make_branch_and_tree('source')
+ self.build_tree(['added', 'deleted', 'notadded'],
+ transport=wt.bzrdir.transport.clone('..'))
+ wt.add('deleted', 'deleted')
+ wt.commit('add deleted')
+ wt.remove('deleted')
+ wt.add('added', 'added')
+ cloned_dir = wt.bzrdir.clone('target')
+ cloned = cloned_dir.open_workingtree()
+ cloned_transport = cloned.bzrdir.transport.clone('..')
+ self.assertFalse(cloned_transport.has('deleted'))
+ self.assertTrue(cloned_transport.has('added'))
+ self.assertFalse(cloned_transport.has('notadded'))
+ self.assertEqual('added', cloned.path2id('added'))
+ self.assertEqual(None, cloned.path2id('deleted'))
+ self.assertEqual(None, cloned.path2id('notadded'))
+
+ def test_basis_tree_returns_last_revision(self):
+ wt = self.make_branch_and_tree('.')
+ self.build_tree(['foo'])
+ wt.add('foo', 'foo-id')
+ wt.commit('A', rev_id='A')
+ wt.rename_one('foo', 'bar')
+ wt.commit('B', rev_id='B')
+ wt.set_parent_ids(['B'])
+ tree = wt.basis_tree()
+ tree.lock_read()
+ self.assertTrue(tree.has_filename('bar'))
+ tree.unlock()
+ wt.set_parent_ids(['A'])
+ tree = wt.basis_tree()
+ tree.lock_read()
+ self.assertTrue(tree.has_filename('foo'))
+ tree.unlock()
+
+ def test_clone_tree_revision(self):
+ # make a tree with a last-revision,
+ # and clone it with a different last-revision, this should switch
+ # do it.
+ #
+ # also test that the content is merged
+ # and conflicts recorded.
+ # This should merge between the trees - local edits should be preserved
+ # but other changes occured.
+ # we test this by having one file that does
+ # not change between two revisions, and another that does -
+ # if the changed one is not changed, fail,
+ # if the one that did not change has lost a local change, fail.
+ #
+ raise TestSkipped('revision limiting is not implemented yet.')
+
+ def test_initialize_with_revision_id(self):
+ # a bzrdir can construct a working tree for itself @ a specific revision.
+ source = self.make_branch_and_tree('source')
+ source.commit('a', rev_id='a', allow_pointless=True)
+ source.commit('b', rev_id='b', allow_pointless=True)
+ self.build_tree(['new/'])
+ made_control = self.bzrdir_format.initialize('new')
+ source.branch.repository.clone(made_control)
+ source.branch.clone(made_control)
+ made_tree = self.workingtree_format.initialize(made_control,
+ revision_id='a')
+ self.assertEqual(['a'], made_tree.get_parent_ids())
+
+ def test_post_build_tree_hook(self):
+ calls = []
+ def track_post_build_tree(tree):
+ calls.append(tree.last_revision())
+ source = self.make_branch_and_tree('source')
+ source.commit('a', rev_id='a', allow_pointless=True)
+ source.commit('b', rev_id='b', allow_pointless=True)
+ self.build_tree(['new/'])
+ made_control = self.bzrdir_format.initialize('new')
+ source.branch.repository.clone(made_control)
+ source.branch.clone(made_control)
+ MutableTree.hooks.install_named_hook("post_build_tree",
+ track_post_build_tree, "Test")
+ made_tree = self.workingtree_format.initialize(made_control,
+ revision_id='a')
+ self.assertEqual(['a'], calls)
+
+ def test_update_sets_last_revision(self):
+ # working tree formats from the meta-dir format and newer support
+ # setting the last revision on a tree independently of that on the
+ # branch. Its concievable that some future formats may want to
+ # couple them again (i.e. because its really a smart server and
+ # the working tree will always match the branch). So we test
+ # that formats where initialising a branch does not initialise a
+ # tree - and thus have separable entities - support skewing the
+ # two things.
+ main_branch = self.make_branch('tree')
+ try:
+ # if there is a working tree now, this is not supported.
+ main_branch.bzrdir.open_workingtree()
+ return
+ except errors.NoWorkingTree:
+ pass
+ wt = main_branch.bzrdir.create_workingtree()
+ # create an out of date working tree by making a checkout in this
+ # current format
+ self.build_tree(['checkout/', 'tree/file'])
+ checkout = bzrdir.BzrDirMetaFormat1().initialize('checkout')
+ checkout.set_branch_reference(main_branch)
+ old_tree = self.workingtree_format.initialize(checkout)
+ # now commit to 'tree'
+ wt.add('file')
+ wt.commit('A', rev_id='A')
+ # and update old_tree
+ self.assertEqual(0, old_tree.update())
+ self.assertPathExists('checkout/file')
+ self.assertEqual(['A'], old_tree.get_parent_ids())
+
+ def test_update_sets_root_id(self):
+ """Ensure tree root is set properly by update.
+
+ Since empty trees don't have root_ids, but workingtrees do,
+ an update of a checkout of revision 0 to a new revision, should set
+ the root id.
+ """
+ wt = self.make_branch_and_tree('tree')
+ main_branch = wt.branch
+ # create an out of date working tree by making a checkout in this
+ # current format
+ self.build_tree(['checkout/', 'tree/file'])
+ checkout = main_branch.create_checkout('checkout')
+ # now commit to 'tree'
+ wt.add('file')
+ wt.commit('A', rev_id='A')
+ # and update checkout
+ self.assertEqual(0, checkout.update())
+ self.assertPathExists('checkout/file')
+ self.assertEqual(wt.get_root_id(), checkout.get_root_id())
+ self.assertNotEqual(None, wt.get_root_id())
+
+ def test_update_sets_updated_root_id(self):
+ wt = self.make_branch_and_tree('tree')
+ wt.set_root_id('first_root_id')
+ self.assertEqual('first_root_id', wt.get_root_id())
+ self.build_tree(['tree/file'])
+ wt.add(['file'])
+ wt.commit('first')
+ co = wt.branch.create_checkout('checkout')
+ wt.set_root_id('second_root_id')
+ wt.commit('second')
+ self.assertEqual('second_root_id', wt.get_root_id())
+ self.assertEqual(0, co.update())
+ self.assertEqual('second_root_id', co.get_root_id())
+
+ def test_update_returns_conflict_count(self):
+ # working tree formats from the meta-dir format and newer support
+ # setting the last revision on a tree independently of that on the
+ # branch. Its concievable that some future formats may want to
+ # couple them again (i.e. because its really a smart server and
+ # the working tree will always match the branch). So we test
+ # that formats where initialising a branch does not initialise a
+ # tree - and thus have separable entities - support skewing the
+ # two things.
+ main_branch = self.make_branch('tree')
+ try:
+ # if there is a working tree now, this is not supported.
+ main_branch.bzrdir.open_workingtree()
+ return
+ except errors.NoWorkingTree:
+ pass
+ wt = main_branch.bzrdir.create_workingtree()
+ # create an out of date working tree by making a checkout in this
+ # current format
+ self.build_tree(['checkout/', 'tree/file'])
+ checkout = bzrdir.BzrDirMetaFormat1().initialize('checkout')
+ checkout.set_branch_reference(main_branch)
+ old_tree = self.workingtree_format.initialize(checkout)
+ # now commit to 'tree'
+ wt.add('file')
+ wt.commit('A', rev_id='A')
+ # and add a file file to the checkout
+ self.build_tree(['checkout/file'])
+ old_tree.add('file')
+ # and update old_tree
+ self.assertEqual(1, old_tree.update())
+ self.assertEqual(['A'], old_tree.get_parent_ids())
+
+ def test_merge_revert(self):
+ from bzrlib.merge import merge_inner
+ this = self.make_branch_and_tree('b1')
+ self.build_tree_contents([('b1/a', 'a test\n'), ('b1/b', 'b test\n')])
+ this.add(['a', 'b'])
+ this.commit(message='')
+ base = this.bzrdir.clone('b2').open_workingtree()
+ self.build_tree_contents([('b2/a', 'b test\n')])
+ other = this.bzrdir.clone('b3').open_workingtree()
+ self.build_tree_contents([('b3/a', 'c test\n'), ('b3/c', 'c test\n')])
+ other.add('c')
+
+ self.build_tree_contents([('b1/b', 'q test\n'), ('b1/d', 'd test\n')])
+ # Note: If we don't lock this before calling merge_inner, then we get a
+ # lock-contention failure. This probably indicates something
+ # weird going on inside merge_inner. Probably something about
+ # calling bt = this_tree.basis_tree() in one lock, and then
+ # locking both this_tree and bt separately, causing a dirstate
+ # locking race.
+ this.lock_write()
+ self.addCleanup(this.unlock)
+ merge_inner(this.branch, other, base, this_tree=this)
+ a = open('b1/a', 'rb')
+ try:
+ self.assertNotEqual(a.read(), 'a test\n')
+ finally:
+ a.close()
+ this.revert()
+ self.assertFileEqual('a test\n', 'b1/a')
+ self.assertPathExists('b1/b.~1~')
+ self.assertPathDoesNotExist('b1/c')
+ self.assertPathDoesNotExist('b1/a.~1~')
+ self.assertPathExists('b1/d')
+
+ def test_update_updates_bound_branch_no_local_commits(self):
+ # doing an update in a tree updates the branch its bound to too.
+ master_tree = self.make_branch_and_tree('master')
+ tree = self.make_branch_and_tree('tree')
+ try:
+ tree.branch.bind(master_tree.branch)
+ except errors.UpgradeRequired:
+ # legacy branches cannot bind
+ return
+ master_tree.commit('foo', rev_id='foo', allow_pointless=True)
+ tree.update()
+ self.assertEqual(['foo'], tree.get_parent_ids())
+ self.assertEqual('foo', tree.branch.last_revision())
+
+ def test_update_turns_local_commit_into_merge(self):
+ # doing an update with a few local commits and no master commits
+ # makes pending-merges.
+ # this is done so that 'bzr update; bzr revert' will always produce
+ # an exact copy of the 'logical branch' - the referenced branch for
+ # a checkout, and the master for a bound branch.
+ # its possible that we should instead have 'bzr update' when there
+ # is nothing new on the master leave the current commits intact and
+ # alter 'revert' to revert to the master always. But for now, its
+ # good.
+ master_tree = self.make_branch_and_tree('master')
+ master_tip = master_tree.commit('first master commit')
+ tree = self.make_branch_and_tree('tree')
+ try:
+ tree.branch.bind(master_tree.branch)
+ except errors.UpgradeRequired:
+ # legacy branches cannot bind
+ return
+ # sync with master
+ tree.update()
+ # work locally
+ tree.commit('foo', rev_id='foo', allow_pointless=True, local=True)
+ tree.commit('bar', rev_id='bar', allow_pointless=True, local=True)
+ # sync with master prepatory to committing
+ tree.update()
+ # which should have pivoted the local tip into a merge
+ self.assertEqual([master_tip, 'bar'], tree.get_parent_ids())
+ # and the local branch history should match the masters now.
+ self.assertEqual(master_tree.branch.last_revision(),
+ tree.branch.last_revision())
+
+ def test_update_takes_revision_parameter(self):
+ wt = self.make_branch_and_tree('wt')
+ self.build_tree_contents([('wt/a', 'old content')])
+ wt.add(['a'])
+ rev1 = wt.commit('first master commit')
+ self.build_tree_contents([('wt/a', 'new content')])
+ rev2 = wt.commit('second master commit')
+ # https://bugs.launchpad.net/bzr/+bug/45719/comments/20
+ # when adding 'update -r' we should make sure all wt formats support
+ # it
+ conflicts = wt.update(revision=rev1)
+ self.assertFileEqual('old content', 'wt/a')
+ self.assertEqual([rev1], wt.get_parent_ids())
+
+ def test_merge_modified_detects_corruption(self):
+ # FIXME: This doesn't really test that it works; also this is not
+ # implementation-independent. mbp 20070226
+ tree = self.make_branch_and_tree('master')
+ if not isinstance(tree, InventoryWorkingTree):
+ raise TestNotApplicable("merge-hashes is specific to bzr "
+ "working trees")
+ tree._transport.put_bytes('merge-hashes', 'asdfasdf')
+ self.assertRaises(errors.MergeModifiedFormatError, tree.merge_modified)
+
+ def test_merge_modified(self):
+ # merge_modified stores a map from file id to hash
+ tree = self.make_branch_and_tree('tree')
+ d = {'file-id': osutils.sha_string('hello')}
+ self.build_tree_contents([('tree/somefile', 'hello')])
+ tree.lock_write()
+ try:
+ tree.add(['somefile'], ['file-id'])
+ tree.set_merge_modified(d)
+ mm = tree.merge_modified()
+ self.assertEquals(mm, d)
+ finally:
+ tree.unlock()
+ mm = tree.merge_modified()
+ self.assertEquals(mm, d)
+
+ def test_conflicts(self):
+ from bzrlib.tests.test_conflicts import example_conflicts
+ tree = self.make_branch_and_tree('master')
+ try:
+ tree.set_conflicts(example_conflicts)
+ except UnsupportedOperation:
+ raise TestSkipped('set_conflicts not supported')
+
+ tree2 = WorkingTree.open('master')
+ self.assertEqual(tree2.conflicts(), example_conflicts)
+ tree2._transport.put_bytes('conflicts', '')
+ self.assertRaises(errors.ConflictFormatError,
+ tree2.conflicts)
+ tree2._transport.put_bytes('conflicts', 'a')
+ self.assertRaises(errors.ConflictFormatError,
+ tree2.conflicts)
+
+ def make_merge_conflicts(self):
+ from bzrlib.merge import merge_inner
+ tree = self.make_branch_and_tree('mine')
+ with file('mine/bloo', 'wb') as f: f.write('one')
+ with file('mine/blo', 'wb') as f: f.write('on')
+ tree.add(['bloo', 'blo'])
+ tree.commit("blah", allow_pointless=False)
+ base = tree.branch.repository.revision_tree(tree.last_revision())
+ controldir.ControlDir.open("mine").sprout("other")
+ with file('other/bloo', 'wb') as f: f.write('two')
+ othertree = WorkingTree.open('other')
+ othertree.commit('blah', allow_pointless=False)
+ with file('mine/bloo', 'wb') as f: f.write('three')
+ tree.commit("blah", allow_pointless=False)
+ merge_inner(tree.branch, othertree, base, this_tree=tree)
+ return tree
+
+ def test_merge_conflicts(self):
+ tree = self.make_merge_conflicts()
+ self.assertEqual(len(tree.conflicts()), 1)
+
+ def test_clear_merge_conflicts(self):
+ tree = self.make_merge_conflicts()
+ self.assertEqual(len(tree.conflicts()), 1)
+ try:
+ tree.set_conflicts(ConflictList())
+ except UnsupportedOperation:
+ raise TestSkipped('unsupported operation')
+ self.assertEqual(tree.conflicts(), ConflictList())
+
+ def test_add_conflicts(self):
+ tree = self.make_branch_and_tree('tree')
+ try:
+ tree.add_conflicts([TextConflict('path_a')])
+ except UnsupportedOperation:
+ raise TestSkipped('unsupported operation')
+ self.assertEqual(ConflictList([TextConflict('path_a')]),
+ tree.conflicts())
+ tree.add_conflicts([TextConflict('path_a')])
+ self.assertEqual(ConflictList([TextConflict('path_a')]),
+ tree.conflicts())
+ tree.add_conflicts([ContentsConflict('path_a')])
+ self.assertEqual(ConflictList([ContentsConflict('path_a'),
+ TextConflict('path_a')]),
+ tree.conflicts())
+ tree.add_conflicts([TextConflict('path_b')])
+ self.assertEqual(ConflictList([ContentsConflict('path_a'),
+ TextConflict('path_a'),
+ TextConflict('path_b')]),
+ tree.conflicts())
+
+ def test_revert_clear_conflicts(self):
+ tree = self.make_merge_conflicts()
+ self.assertEqual(len(tree.conflicts()), 1)
+ tree.revert(["blo"])
+ self.assertEqual(len(tree.conflicts()), 1)
+ tree.revert(["bloo"])
+ self.assertEqual(len(tree.conflicts()), 0)
+
+ def test_revert_clear_conflicts2(self):
+ tree = self.make_merge_conflicts()
+ self.assertEqual(len(tree.conflicts()), 1)
+ tree.revert()
+ self.assertEqual(len(tree.conflicts()), 0)
+
+ def test_format_description(self):
+ tree = self.make_branch_and_tree('tree')
+ text = tree._format.get_format_description()
+ self.assertTrue(len(text))
+
+ def test_branch_attribute_is_not_settable(self):
+ # the branch attribute is an aspect of the working tree, not a
+ # configurable attribute
+ tree = self.make_branch_and_tree('tree')
+ def set_branch():
+ tree.branch = tree.branch
+ self.assertRaises(AttributeError, set_branch)
+
+ def test_list_files_versioned_before_ignored(self):
+ """A versioned file matching an ignore rule should not be ignored."""
+ tree = self.make_branch_and_tree('.')
+ self.build_tree(['foo.pyc'])
+ # ensure that foo.pyc is ignored
+ self.build_tree_contents([('.bzrignore', 'foo.pyc')])
+ tree.add('foo.pyc', 'anid')
+ tree.lock_read()
+ files = sorted(list(tree.list_files()))
+ tree.unlock()
+ self.assertEqual((u'.bzrignore', '?', 'file', None), files[0][:-1])
+ self.assertEqual((u'foo.pyc', 'V', 'file', 'anid'), files[1][:-1])
+ self.assertEqual(2, len(files))
+
+ def test_non_normalized_add_accessible(self):
+ try:
+ self.build_tree([u'a\u030a'])
+ except UnicodeError:
+ raise TestSkipped('Filesystem does not support unicode filenames')
+ tree = self.make_branch_and_tree('.')
+ orig = osutils.normalized_filename
+ osutils.normalized_filename = osutils._accessible_normalized_filename
+ try:
+ tree.add([u'a\u030a'])
+ tree.lock_read()
+ self.assertEqual([('', 'directory'), (u'\xe5', 'file')],
+ [(path, ie.kind) for path,ie in
+ tree.iter_entries_by_dir()])
+ tree.unlock()
+ finally:
+ osutils.normalized_filename = orig
+
+ def test_non_normalized_add_inaccessible(self):
+ try:
+ self.build_tree([u'a\u030a'])
+ except UnicodeError:
+ raise TestSkipped('Filesystem does not support unicode filenames')
+ tree = self.make_branch_and_tree('.')
+ orig = osutils.normalized_filename
+ osutils.normalized_filename = osutils._inaccessible_normalized_filename
+ try:
+ self.assertRaises(errors.InvalidNormalization,
+ tree.add, [u'a\u030a'])
+ finally:
+ osutils.normalized_filename = orig
+
+ def test__write_inventory(self):
+ # The private interface _write_inventory is currently used by transform.
+ tree = self.make_branch_and_tree('.')
+ if not isinstance(tree, InventoryWorkingTree):
+ raise TestNotApplicable("_write_inventory does not exist on "
+ "non-inventory working trees")
+ # if we write write an inventory then do a walkdirs we should get back
+ # missing entries, and actual, and unknowns as appropriate.
+ self.build_tree(['present', 'unknown'])
+ inventory = Inventory(tree.get_root_id())
+ inventory.add_path('missing', 'file', 'missing-id')
+ inventory.add_path('present', 'file', 'present-id')
+ # there is no point in being able to write an inventory to an unlocked
+ # tree object - its a low level api not a convenience api.
+ tree.lock_write()
+ tree._write_inventory(inventory)
+ tree.unlock()
+ tree.lock_read()
+ try:
+ present_stat = os.lstat('present')
+ unknown_stat = os.lstat('unknown')
+ expected_results = [
+ (('', tree.get_root_id()),
+ [('missing', 'missing', 'unknown', None, 'missing-id', 'file'),
+ ('present', 'present', 'file', present_stat, 'present-id', 'file'),
+ ('unknown', 'unknown', 'file', unknown_stat, None, None),
+ ]
+ )]
+ self.assertEqual(expected_results, list(tree.walkdirs()))
+ finally:
+ tree.unlock()
+
+ def test_path2id(self):
+ # smoke test for path2id
+ tree = self.make_branch_and_tree('.')
+ self.build_tree(['foo'])
+ tree.add(['foo'], ['foo-id'])
+ self.assertEqual('foo-id', tree.path2id('foo'))
+ # the next assertion is for backwards compatability with WorkingTree3,
+ # though its probably a bad idea, it makes things work. Perhaps
+ # it should raise a deprecation warning?
+ self.assertEqual('foo-id', tree.path2id('foo/'))
+
+ def test_filter_unversioned_files(self):
+ # smoke test for filter_unversioned_files
+ tree = self.make_branch_and_tree('.')
+ paths = ['here-and-versioned', 'here-and-not-versioned',
+ 'not-here-and-versioned', 'not-here-and-not-versioned']
+ tree.add(['here-and-versioned', 'not-here-and-versioned'],
+ kinds=['file', 'file'])
+ self.build_tree(['here-and-versioned', 'here-and-not-versioned'])
+ tree.lock_read()
+ self.addCleanup(tree.unlock)
+ self.assertEqual(
+ set(['not-here-and-not-versioned', 'here-and-not-versioned']),
+ tree.filter_unversioned_files(paths))
+
+ def test_detect_real_kind(self):
+ # working trees report the real kind of the file on disk, not the kind
+ # they had when they were first added
+ # create one file of every interesting type
+ tree = self.make_branch_and_tree('.')
+ tree.lock_write()
+ self.addCleanup(tree.unlock)
+ self.build_tree(['file', 'directory/'])
+ names = ['file', 'directory']
+ if has_symlinks():
+ os.symlink('target', 'symlink')
+ names.append('symlink')
+ tree.add(names, [n + '-id' for n in names])
+ # now when we first look, we should see everything with the same kind
+ # with which they were initially added
+ for n in names:
+ actual_kind = tree.kind(n + '-id')
+ self.assertEqual(n, actual_kind)
+ # move them around so the names no longer correspond to the types
+ os.rename(names[0], 'tmp')
+ for i in range(1, len(names)):
+ os.rename(names[i], names[i-1])
+ os.rename('tmp', names[-1])
+ # now look and expect to see the correct types again
+ for i in range(len(names)):
+ actual_kind = tree.kind(names[i-1] + '-id')
+ expected_kind = names[i]
+ self.assertEqual(expected_kind, actual_kind)
+
+ def test_stored_kind_with_missing(self):
+ tree = self.make_branch_and_tree('tree')
+ tree.lock_write()
+ self.addCleanup(tree.unlock)
+ self.build_tree(['tree/a', 'tree/b/'])
+ tree.add(['a', 'b'], ['a-id', 'b-id'])
+ os.unlink('tree/a')
+ os.rmdir('tree/b')
+ self.assertEqual('file', tree.stored_kind('a-id'))
+ self.assertEqual('directory', tree.stored_kind('b-id'))
+
+ def test_missing_file_sha1(self):
+ """If a file is missing, its sha1 should be reported as None."""
+ tree = self.make_branch_and_tree('.')
+ tree.lock_write()
+ self.addCleanup(tree.unlock)
+ self.build_tree(['file'])
+ tree.add('file', 'file-id')
+ tree.commit('file added')
+ os.unlink('file')
+ self.assertIs(None, tree.get_file_sha1('file-id'))
+
+ def test_no_file_sha1(self):
+ """If a file is not present, get_file_sha1 should raise NoSuchId"""
+ tree = self.make_branch_and_tree('.')
+ tree.lock_write()
+ self.addCleanup(tree.unlock)
+ self.assertRaises(errors.NoSuchId, tree.get_file_sha1, 'file-id')
+ self.build_tree(['file'])
+ tree.add('file', 'file-id')
+ tree.commit('foo')
+ tree.remove('file')
+ self.assertRaises(errors.NoSuchId, tree.get_file_sha1, 'file-id')
+
+ def test_case_sensitive(self):
+ """If filesystem is case-sensitive, tree should report this.
+
+ We check case-sensitivity by creating a file with a lowercase name,
+ then testing whether it exists with an uppercase name.
+ """
+ self.build_tree(['filename'])
+ if os.path.exists('FILENAME'):
+ case_sensitive = False
+ else:
+ case_sensitive = True
+ tree = self.make_branch_and_tree('test')
+ self.assertEqual(case_sensitive, tree.case_sensitive)
+ if not isinstance(tree, InventoryWorkingTree):
+ raise TestNotApplicable("get_format_string is only available "
+ "on bzr working trees")
+ # now we cheat, and make a file that matches the case-sensitive name
+ t = tree.bzrdir.get_workingtree_transport(None)
+ try:
+ content = tree._format.get_format_string()
+ except NotImplementedError:
+ # All-in-one formats didn't have a separate format string.
+ content = tree.bzrdir._format.get_format_string()
+ t.put_bytes(tree._format.case_sensitive_filename, content)
+ tree = tree.bzrdir.open_workingtree()
+ self.assertFalse(tree.case_sensitive)
+
+ def test_supports_executable(self):
+ self.build_tree(['filename'])
+ tree = self.make_branch_and_tree('.')
+ tree.add('filename')
+ self.assertIsInstance(tree._supports_executable(), bool)
+ if tree._supports_executable():
+ tree.lock_read()
+ try:
+ self.assertFalse(tree.is_executable(tree.path2id('filename')))
+ finally:
+ tree.unlock()
+ os.chmod('filename', 0755)
+ self.addCleanup(tree.lock_read().unlock)
+ self.assertTrue(tree.is_executable(tree.path2id('filename')))
+ else:
+ self.addCleanup(tree.lock_read().unlock)
+ self.assertFalse(tree.is_executable(tree.path2id('filename')))
+
+ def test_all_file_ids_with_missing(self):
+ tree = self.make_branch_and_tree('tree')
+ tree.lock_write()
+ self.addCleanup(tree.unlock)
+ self.build_tree(['tree/a', 'tree/b'])
+ tree.add(['a', 'b'], ['a-id', 'b-id'])
+ os.unlink('tree/a')
+ self.assertEqual(set(['a-id', 'b-id', tree.get_root_id()]),
+ tree.all_file_ids())
+
+ def test_sprout_hardlink(self):
+ real_os_link = getattr(os, 'link', None)
+ if real_os_link is None:
+ raise TestNotApplicable("This platform doesn't provide os.link")
+ source = self.make_branch_and_tree('source')
+ self.build_tree(['source/file'])
+ source.add('file')
+ source.commit('added file')
+ def fake_link(source, target):
+ raise OSError(errno.EPERM, 'Operation not permitted')
+ os.link = fake_link
+ try:
+ # Hard-link support is optional, so supplying hardlink=True may
+ # or may not raise an exception. But if it does, it must be
+ # HardLinkNotSupported
+ try:
+ source.bzrdir.sprout('target', accelerator_tree=source,
+ hardlink=True)
+ except errors.HardLinkNotSupported:
+ pass
+ finally:
+ os.link = real_os_link
+
+
+class TestWorkingTreeUpdate(TestCaseWithWorkingTree):
+
+ def make_diverged_master_branch(self):
+ """
+ B: wt.branch.last_revision()
+ M: wt.branch.get_master_branch().last_revision()
+ W: wt.last_revision()
+
+
+ 1
+ |\
+ B-2 3
+ | |
+ 4 5-M
+ |
+ W
+ """
+ format = self.workingtree_format.get_controldir_for_branch()
+ builder = self.make_branch_builder(".", format=format)
+ builder.start_series()
+ # mainline
+ builder.build_snapshot(
+ '1', None,
+ [('add', ('', 'root-id', 'directory', '')),
+ ('add', ('file1', 'file1-id', 'file', 'file1 content\n'))])
+ # branch
+ builder.build_snapshot('2', ['1'], [])
+ builder.build_snapshot(
+ '4', ['2'],
+ [('add', ('file4', 'file4-id', 'file', 'file4 content\n'))])
+ # master
+ builder.build_snapshot('3', ['1'], [])
+ builder.build_snapshot(
+ '5', ['3'],
+ [('add', ('file5', 'file5-id', 'file', 'file5 content\n'))])
+ builder.finish_series()
+ return builder, builder._branch.last_revision()
+
+ def make_checkout_and_master(self, builder, wt_path, master_path, wt_revid,
+ master_revid=None, branch_revid=None):
+ """Build a lightweight checkout and its master branch."""
+ if master_revid is None:
+ master_revid = wt_revid
+ if branch_revid is None:
+ branch_revid = master_revid
+ final_branch = builder.get_branch()
+ # The master branch
+ master = final_branch.bzrdir.sprout(master_path,
+ master_revid).open_branch()
+ # The checkout
+ wt = self.make_branch_and_tree(wt_path)
+ wt.pull(final_branch, stop_revision=wt_revid)
+ wt.branch.pull(final_branch, stop_revision=branch_revid, overwrite=True)
+ try:
+ wt.branch.bind(master)
+ except errors.UpgradeRequired:
+ raise TestNotApplicable(
+ "Can't bind %s" % wt.branch._format.__class__)
+ return wt, master
+
+ def test_update_remove_commit(self):
+ """Update should remove revisions when the branch has removed
+ some commits.
+
+ We want to revert 4, so that strating with the
+ make_diverged_master_branch() graph the final result should be
+ equivalent to:
+
+ 1
+ |\
+ 3 2
+ | |\
+ MB-5 | 4
+ |/
+ W
+
+ And the changes in 4 have been removed from the WT.
+ """
+ builder, tip = self.make_diverged_master_branch()
+ wt, master = self.make_checkout_and_master(
+ builder, 'checkout', 'master', '4',
+ master_revid=tip, branch_revid='2')
+ # First update the branch
+ old_tip = wt.branch.update()
+ self.assertEqual('2', old_tip)
+ # No conflicts should occur
+ self.assertEqual(0, wt.update(old_tip=old_tip))
+ # We are in sync with the master
+ self.assertEqual(tip, wt.branch.last_revision())
+ # We have the right parents ready to be committed
+ self.assertEqual(['5', '2'], wt.get_parent_ids())
+
+ def test_update_revision(self):
+ builder, tip = self.make_diverged_master_branch()
+ wt, master = self.make_checkout_and_master(
+ builder, 'checkout', 'master', '4',
+ master_revid=tip, branch_revid='2')
+ self.assertEqual(0, wt.update(revision='1'))
+ self.assertEqual('1', wt.last_revision())
+ self.assertEqual(tip, wt.branch.last_revision())
+ self.assertPathExists('checkout/file1')
+ self.assertPathDoesNotExist('checkout/file4')
+ self.assertPathDoesNotExist('checkout/file5')
+
+
+class TestIllegalPaths(TestCaseWithWorkingTree):
+
+ def test_bad_fs_path(self):
+ if osutils.normalizes_filenames():
+ # You *can't* create an illegal filename on OSX.
+ raise tests.TestNotApplicable('OSX normalizes filenames')
+ self.requireFeature(features.UTF8Filesystem)
+ # We require a UTF8 filesystem, because otherwise we would need to get
+ # tricky to figure out how to create an illegal filename.
+ # \xb5 is an illegal path because it should be \xc2\xb5 for UTF-8
+ tree = self.make_branch_and_tree('tree')
+ self.build_tree(['tree/subdir/'])
+ tree.add('subdir')
+
+ f = open('tree/subdir/m\xb5', 'wb')
+ try:
+ f.write('trivial\n')
+ finally:
+ f.close()
+
+ tree.lock_read()
+ self.addCleanup(tree.unlock)
+ basis = tree.basis_tree()
+ basis.lock_read()
+ self.addCleanup(basis.unlock)
+
+ e = self.assertListRaises(errors.BadFilenameEncoding,
+ tree.iter_changes, tree.basis_tree(),
+ want_unversioned=True)
+ # We should display the relative path
+ self.assertEqual('subdir/m\xb5', e.filename)
+ self.assertEqual(osutils._fs_enc, e.fs_encoding)
+
+
+class TestControlComponent(TestCaseWithWorkingTree):
+ """WorkingTree implementations adequately implement ControlComponent."""
+
+ def test_urls(self):
+ wt = self.make_branch_and_tree('wt')
+ self.assertIsInstance(wt.user_url, str)
+ self.assertEqual(wt.user_url, wt.user_transport.base)
+ # for all current bzrdir implementations the user dir must be
+ # above the control dir but we might need to relax that?
+ self.assertEqual(wt.control_url.find(wt.user_url), 0)
+ self.assertEqual(wt.control_url, wt.control_transport.base)
+
+
+class TestWorthSavingLimit(TestCaseWithWorkingTree):
+
+ def make_wt_with_worth_saving_limit(self):
+ wt = self.make_branch_and_tree('wt')
+ if getattr(wt, '_worth_saving_limit', None) is None:
+ raise tests.TestNotApplicable('no _worth_saving_limit for'
+ ' this tree type')
+ wt.lock_write()
+ self.addCleanup(wt.unlock)
+ return wt
+
+ def test_not_set(self):
+ # Default should be 10
+ wt = self.make_wt_with_worth_saving_limit()
+ self.assertEqual(10, wt._worth_saving_limit())
+ ds = wt.current_dirstate()
+ self.assertEqual(10, ds._worth_saving_limit)
+
+ def test_set_in_branch(self):
+ wt = self.make_wt_with_worth_saving_limit()
+ conf = wt.get_config_stack()
+ conf.set('bzr.workingtree.worth_saving_limit', '20')
+ self.assertEqual(20, wt._worth_saving_limit())
+ ds = wt.current_dirstate()
+ self.assertEqual(10, ds._worth_saving_limit)
+
+ def test_invalid(self):
+ wt = self.make_wt_with_worth_saving_limit()
+ conf = wt.get_config_stack()
+ conf.set('bzr.workingtree.worth_saving_limit', 'a')
+ # If the config entry is invalid, default to 10
+ warnings = []
+ def warning(*args):
+ warnings.append(args[0] % args[1:])
+ self.overrideAttr(trace, 'warning', warning)
+ self.assertEqual(10, wt._worth_saving_limit())
+ self.assertLength(1, warnings)
+ self.assertEquals('Value "a" is not valid for'
+ ' "bzr.workingtree.worth_saving_limit"',
+ warnings[0])
+
+
+class TestFormatAttributes(TestCaseWithWorkingTree):
+
+ def test_versioned_directories(self):
+ self.assertSubset(
+ [self.workingtree_format.supports_versioned_directories],
+ (True, False))
diff --git a/bzrlib/tests/scenarios.py b/bzrlib/tests/scenarios.py
new file mode 100644
index 0000000..62c0bf6
--- /dev/null
+++ b/bzrlib/tests/scenarios.py
@@ -0,0 +1,61 @@
+# Copyright (C) 2010 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+
+"""Generate multiple variations in different scenarios.
+
+For a class whose tests should be repeated in varying scenarios, set a
+`scenarios` member to a list of scenarios where it should be repeated.
+
+This is similar to the interface provided by
+<http://launchpad.net/testscenarios/>.
+"""
+
+
+from bzrlib.tests import (
+ iter_suite_tests,
+ multiply_scenarios,
+ multiply_tests,
+ )
+
+
+def load_tests_apply_scenarios(standard_tests, module, loader):
+ """Multiply tests depending on their 'scenarios' attribute.
+
+ This can be assigned to 'load_tests' in any test module to make this
+ automatically work across tests in the module.
+ """
+ result = loader.suiteClass()
+ multiply_tests_by_their_scenarios(standard_tests, result)
+ return result
+
+
+def multiply_tests_by_their_scenarios(some_tests, into_suite):
+ """Multiply the tests in the given suite by their declared scenarios.
+
+ Each test must have a 'scenarios' attribute which is a list of
+ (name, params) pairs.
+
+ :param some_tests: TestSuite or Test.
+ :param into_suite: A TestSuite into which the resulting tests will be
+ inserted.
+ """
+ for test in iter_suite_tests(some_tests):
+ scenarios = getattr(test, 'scenarios', None)
+ if scenarios is None:
+ into_suite.addTest(test)
+ else:
+ multiply_tests(test, test.scenarios, into_suite)
diff --git a/bzrlib/tests/script.py b/bzrlib/tests/script.py
new file mode 100644
index 0000000..4a03bc5
--- /dev/null
+++ b/bzrlib/tests/script.py
@@ -0,0 +1,535 @@
+# Copyright (C) 2009, 2010, 2011 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""Shell-like test scripts.
+
+See developers/testing.html for more explanations.
+"""
+
+import doctest
+import errno
+import glob
+import os
+import shlex
+import textwrap
+
+from bzrlib import (
+ osutils,
+ tests,
+ )
+
+
+def split(s):
+ """Split a command line respecting quotes."""
+ scanner = shlex.shlex(s)
+ scanner.quotes = '\'"`'
+ scanner.whitespace_split = True
+ for t in list(scanner):
+ yield t
+
+
+def _script_to_commands(text, file_name=None):
+ """Turn a script into a list of commands with their associated IOs.
+
+ Each command appears on a line by itself starting with '$ '. It can be
+ associated with an input that will feed it and an expected output.
+
+ Comments starts with '#' until the end of line.
+ Empty lines are ignored.
+
+ Input and output are full lines terminated by a '\n'.
+
+ Input lines start with '<'.
+ Output lines start with nothing.
+ Error lines start with '2>'.
+
+ :return: A sequence of ([args], input, output, errors), where the args are
+ split in to words, and the input, output, and errors are just strings,
+ typically containing newlines.
+ """
+
+ commands = []
+
+ def add_command(cmd, input, output, error):
+ if cmd is not None:
+ if input is not None:
+ input = ''.join(input)
+ if output is not None:
+ output = ''.join(output)
+ if error is not None:
+ error = ''.join(error)
+ commands.append((cmd, input, output, error))
+
+ cmd_cur = None
+ cmd_line = 1
+ lineno = 0
+ input, output, error = None, None, None
+ text = textwrap.dedent(text)
+ lines = text.split('\n')
+ # to make use of triple-quoted strings easier, we ignore a blank line
+ # right at the start and right at the end; the rest are meaningful
+ if lines and lines[0] == '':
+ del lines[0]
+ if lines and lines[-1] == '':
+ del lines[-1]
+ for line in lines:
+ lineno += 1
+ # Keep a copy for error reporting
+ orig = line
+ comment = line.find('#')
+ if comment >= 0:
+ # Delete comments
+ # NB: this syntax means comments are allowed inside output, which
+ # may be confusing...
+ line = line[0:comment]
+ line = line.rstrip()
+ if line == '':
+ continue
+ if line.startswith('$'):
+ # Time to output the current command
+ add_command(cmd_cur, input, output, error)
+ # And start a new one
+ cmd_cur = list(split(line[1:]))
+ cmd_line = lineno
+ input, output, error = None, None, None
+ elif line.startswith('<'):
+ if input is None:
+ if cmd_cur is None:
+ raise SyntaxError('No command for that input',
+ (file_name, lineno, 1, orig))
+ input = []
+ input.append(line[1:] + '\n')
+ elif line.startswith('2>'):
+ if error is None:
+ if cmd_cur is None:
+ raise SyntaxError('No command for that error',
+ (file_name, lineno, 1, orig))
+ error = []
+ error.append(line[2:] + '\n')
+ else:
+ # can happen if the first line is not recognized as a command, eg
+ # if the prompt has leading whitespace
+ if output is None:
+ if cmd_cur is None:
+ raise SyntaxError('No command for line %r' % (line,),
+ (file_name, lineno, 1, orig))
+ output = []
+ output.append(line + '\n')
+ # Add the last seen command
+ add_command(cmd_cur, input, output, error)
+ return commands
+
+
+def _scan_redirection_options(args):
+ """Recognize and process input and output redirections.
+
+ :param args: The command line arguments
+
+ :return: A tuple containing:
+ - The file name redirected from or None
+ - The file name redirected to or None
+ - The mode to open the output file or None
+ - The reamining arguments
+ """
+ def redirected_file_name(direction, name, args):
+ if name == '':
+ try:
+ name = args.pop(0)
+ except IndexError:
+ # We leave the error handling to higher levels, an empty name
+ # can't be legal.
+ name = ''
+ return name
+
+ remaining = []
+ in_name = None
+ out_name, out_mode = None, None
+ while args:
+ arg = args.pop(0)
+ if arg.startswith('<'):
+ in_name = redirected_file_name('<', arg[1:], args)
+ elif arg.startswith('>>'):
+ out_name = redirected_file_name('>>', arg[2:], args)
+ out_mode = 'ab+'
+ elif arg.startswith('>',):
+ out_name = redirected_file_name('>', arg[1:], args)
+ out_mode = 'wb+'
+ else:
+ remaining.append(arg)
+ return in_name, out_name, out_mode, remaining
+
+
+class ScriptRunner(object):
+ """Run a shell-like script from a test.
+
+ Can be used as:
+
+ from bzrlib.tests import script
+
+ ...
+
+ def test_bug_nnnnn(self):
+ sr = script.ScriptRunner()
+ sr.run_script(self, '''
+ $ bzr init
+ $ bzr do-this
+ # Boom, error
+ ''')
+ """
+
+ def __init__(self):
+ self.output_checker = doctest.OutputChecker()
+ self.check_options = doctest.ELLIPSIS
+
+ def run_script(self, test_case, text, null_output_matches_anything=False):
+ """Run a shell-like script as a test.
+
+ :param test_case: A TestCase instance that should provide the fail(),
+ assertEqualDiff and _run_bzr_core() methods as well as a 'test_dir'
+ attribute used as a jail root.
+
+ :param text: A shell-like script (see _script_to_commands for syntax).
+
+ :param null_output_matches_anything: For commands with no specified
+ output, ignore any output that does happen, including output on
+ standard error.
+ """
+ self.null_output_matches_anything = null_output_matches_anything
+ for cmd, input, output, error in _script_to_commands(text):
+ self.run_command(test_case, cmd, input, output, error)
+
+ def run_command(self, test_case, cmd, input, output, error):
+ mname = 'do_' + cmd[0]
+ method = getattr(self, mname, None)
+ if method is None:
+ raise SyntaxError('Command not found "%s"' % (cmd[0],),
+ (None, 1, 1, ' '.join(cmd)))
+ if input is None:
+ str_input = ''
+ else:
+ str_input = ''.join(input)
+ args = list(self._pre_process_args(cmd[1:]))
+ retcode, actual_output, actual_error = method(test_case,
+ str_input, args)
+
+ try:
+ self._check_output(output, actual_output, test_case)
+ except AssertionError, e:
+ raise AssertionError(str(e) + " in stdout of command %s" % cmd)
+ try:
+ self._check_output(error, actual_error, test_case)
+ except AssertionError, e:
+ raise AssertionError(str(e) +
+ " in stderr of running command %s" % cmd)
+ if retcode and not error and actual_error:
+ test_case.fail('In \n\t%s\nUnexpected error: %s'
+ % (' '.join(cmd), actual_error))
+ return retcode, actual_output, actual_error
+
+ def _check_output(self, expected, actual, test_case):
+ if not actual:
+ if expected is None:
+ return
+ elif expected == '...\n':
+ return
+ else:
+ test_case.fail('expected output: %r, but found nothing'
+ % (expected,))
+
+ null_output_matches_anything = getattr(
+ self, 'null_output_matches_anything', False)
+ if null_output_matches_anything and expected is None:
+ return
+
+ expected = expected or ''
+ matching = self.output_checker.check_output(
+ expected, actual, self.check_options)
+ if not matching:
+ # Note that we can't use output_checker.output_difference() here
+ # because... the API is broken ('expected' must be a doctest
+ # specific object of which a 'want' attribute will be our
+ # 'expected' parameter. So we just fallback to our good old
+ # assertEqualDiff since we know there *are* differences and the
+ # output should be decently readable.
+ #
+ # As a special case, we allow output that's missing a final
+ # newline to match an expected string that does have one, so that
+ # we can match a prompt printed on one line, then input given on
+ # the next line.
+ if expected == actual + '\n':
+ pass
+ else:
+ test_case.assertEqualDiff(expected, actual)
+
+ def _pre_process_args(self, args):
+ new_args = []
+ for arg in args:
+ # Strip the simple and double quotes since we don't care about
+ # them. We leave the backquotes in place though since they have a
+ # different semantic.
+ if arg[0] in ('"', "'") and arg[0] == arg[-1]:
+ yield arg[1:-1]
+ else:
+ if glob.has_magic(arg):
+ matches = glob.glob(arg)
+ if matches:
+ # We care more about order stability than performance
+ # here
+ matches.sort()
+ for m in matches:
+ yield m
+ else:
+ yield arg
+
+ def _read_input(self, input, in_name):
+ if in_name is not None:
+ infile = open(in_name, 'rb')
+ try:
+ # Command redirection takes precedence over provided input
+ input = infile.read()
+ finally:
+ infile.close()
+ return input
+
+ def _write_output(self, output, out_name, out_mode):
+ if out_name is not None:
+ outfile = open(out_name, out_mode)
+ try:
+ outfile.write(output)
+ finally:
+ outfile.close()
+ output = None
+ return output
+
+ def do_bzr(self, test_case, input, args):
+ retcode, out, err = test_case._run_bzr_core(
+ args, retcode=None, encoding=None, stdin=input, working_dir=None)
+ return retcode, out, err
+
+ def do_cat(self, test_case, input, args):
+ (in_name, out_name, out_mode, args) = _scan_redirection_options(args)
+ if args and in_name is not None:
+ raise SyntaxError('Specify a file OR use redirection')
+
+ inputs = []
+ if input:
+ inputs.append(input)
+ input_names = args
+ if in_name:
+ args.append(in_name)
+ for in_name in input_names:
+ try:
+ inputs.append(self._read_input(None, in_name))
+ except IOError, e:
+ # Some filenames are illegal on Windows and generate EINVAL
+ # rather than just saying the filename doesn't exist
+ if e.errno in (errno.ENOENT, errno.EINVAL):
+ return (1, None,
+ '%s: No such file or directory\n' % (in_name,))
+ raise
+ # Basically cat copy input to output
+ output = ''.join(inputs)
+ # Handle output redirections
+ try:
+ output = self._write_output(output, out_name, out_mode)
+ except IOError, e:
+ # If out_name cannot be created, we may get 'ENOENT', however if
+ # out_name is something like '', we can get EINVAL
+ if e.errno in (errno.ENOENT, errno.EINVAL):
+ return 1, None, '%s: No such file or directory\n' % (out_name,)
+ raise
+ return 0, output, None
+
+ def do_echo(self, test_case, input, args):
+ (in_name, out_name, out_mode, args) = _scan_redirection_options(args)
+ if input or in_name:
+ raise SyntaxError('echo doesn\'t read from stdin')
+ if args:
+ input = ' '.join(args)
+ # Always append a \n'
+ input += '\n'
+ # Process output
+ output = input
+ # Handle output redirections
+ try:
+ output = self._write_output(output, out_name, out_mode)
+ except IOError, e:
+ if e.errno in (errno.ENOENT, errno.EINVAL):
+ return 1, None, '%s: No such file or directory\n' % (out_name,)
+ raise
+ return 0, output, None
+
+ def _get_jail_root(self, test_case):
+ return test_case.test_dir
+
+ def _ensure_in_jail(self, test_case, path):
+ jail_root = self._get_jail_root(test_case)
+ if not osutils.is_inside(jail_root, osutils.normalizepath(path)):
+ raise ValueError('%s is not inside %s' % (path, jail_root))
+
+ def do_cd(self, test_case, input, args):
+ if len(args) > 1:
+ raise SyntaxError('Usage: cd [dir]')
+ if len(args) == 1:
+ d = args[0]
+ self._ensure_in_jail(test_case, d)
+ else:
+ # The test "home" directory is the root of its jail
+ d = self._get_jail_root(test_case)
+ os.chdir(d)
+ return 0, None, None
+
+ def do_mkdir(self, test_case, input, args):
+ if not args or len(args) != 1:
+ raise SyntaxError('Usage: mkdir dir')
+ d = args[0]
+ self._ensure_in_jail(test_case, d)
+ os.mkdir(d)
+ return 0, None, None
+
+ def do_rm(self, test_case, input, args):
+ err = None
+
+ def error(msg, path):
+ return "rm: cannot remove '%s': %s\n" % (path, msg)
+
+ force, recursive = False, False
+ opts = None
+ if args and args[0][0] == '-':
+ opts = args.pop(0)[1:]
+ if 'f' in opts:
+ force = True
+ opts = opts.replace('f', '', 1)
+ if 'r' in opts:
+ recursive = True
+ opts = opts.replace('r', '', 1)
+ if not args or opts:
+ raise SyntaxError('Usage: rm [-fr] path+')
+ for p in args:
+ self._ensure_in_jail(test_case, p)
+ # FIXME: Should we put that in osutils ?
+ try:
+ os.remove(p)
+ except OSError, e:
+ # Various OSes raises different exceptions (linux: EISDIR,
+ # win32: EACCES, OSX: EPERM) when invoked on a directory
+ if e.errno in (errno.EISDIR, errno.EPERM, errno.EACCES):
+ if recursive:
+ osutils.rmtree(p)
+ else:
+ err = error('Is a directory', p)
+ break
+ elif e.errno == errno.ENOENT:
+ if not force:
+ err = error('No such file or directory', p)
+ break
+ else:
+ raise
+ if err:
+ retcode = 1
+ else:
+ retcode = 0
+ return retcode, None, err
+
+ def do_mv(self, test_case, input, args):
+ err = None
+ def error(msg, src, dst):
+ return "mv: cannot move %s to %s: %s\n" % (src, dst, msg)
+
+ if not args or len(args) != 2:
+ raise SyntaxError("Usage: mv path1 path2")
+ src, dst = args
+ try:
+ real_dst = dst
+ if os.path.isdir(dst):
+ real_dst = os.path.join(dst, os.path.basename(src))
+ os.rename(src, real_dst)
+ except OSError, e:
+ if e.errno == errno.ENOENT:
+ err = error('No such file or directory', src, dst)
+ else:
+ raise
+ if err:
+ retcode = 1
+ else:
+ retcode = 0
+ return retcode, None, err
+
+
+
+class TestCaseWithMemoryTransportAndScript(tests.TestCaseWithMemoryTransport):
+ """Helper class to experiment shell-like test and memory fs.
+
+ This not intended to be used outside of experiments in implementing memoy
+ based file systems and evolving bzr so that test can use only memory based
+ resources.
+ """
+
+ def setUp(self):
+ super(TestCaseWithMemoryTransportAndScript, self).setUp()
+ self.script_runner = ScriptRunner()
+ # FIXME: See shelf_ui.Shelver._char_based. This allow using shelve in
+ # scripts while providing a line-based input (better solution in
+ # progress). -- vila 2011-09-28
+ self.overrideEnv('INSIDE_EMACS', '1')
+
+ def run_script(self, script, null_output_matches_anything=False):
+ return self.script_runner.run_script(self, script,
+ null_output_matches_anything=null_output_matches_anything)
+
+ def run_command(self, cmd, input, output, error):
+ return self.script_runner.run_command(self, cmd, input, output, error)
+
+
+class TestCaseWithTransportAndScript(tests.TestCaseWithTransport):
+ """Helper class to quickly define shell-like tests.
+
+ Can be used as:
+
+ from bzrlib.tests import script
+
+
+ class TestBug(script.TestCaseWithTransportAndScript):
+
+ def test_bug_nnnnn(self):
+ self.run_script('''
+ $ bzr init
+ $ bzr do-this
+ # Boom, error
+ ''')
+ """
+
+ def setUp(self):
+ super(TestCaseWithTransportAndScript, self).setUp()
+ self.script_runner = ScriptRunner()
+ # FIXME: See shelf_ui.Shelver._char_based. This allow using shelve in
+ # scripts while providing a line-based input (better solution in
+ # progress). -- vila 2011-09-28
+ self.overrideEnv('INSIDE_EMACS', '1')
+
+ def run_script(self, script, null_output_matches_anything=False):
+ return self.script_runner.run_script(self, script,
+ null_output_matches_anything=null_output_matches_anything)
+
+ def run_command(self, cmd, input, output, error):
+ return self.script_runner.run_command(self, cmd, input, output, error)
+
+
+def run_script(test_case, script_string, null_output_matches_anything=False):
+ """Run the given script within a testcase"""
+ return ScriptRunner().run_script(test_case, script_string,
+ null_output_matches_anything=null_output_matches_anything)
+
diff --git a/bzrlib/tests/ssl_certs/__init__.py b/bzrlib/tests/ssl_certs/__init__.py
new file mode 100644
index 0000000..66d7574
--- /dev/null
+++ b/bzrlib/tests/ssl_certs/__init__.py
@@ -0,0 +1,29 @@
+# Copyright (C) 2007-2008 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""ssl_certs -- provides access to ssl keys and certificates needed by tests"""
+
+from bzrlib import (
+ osutils,
+ )
+
+# Directory containing all ssl files, keys or certificates
+base_dir = osutils.dirname(osutils.realpath(__file__))
+
+
+def build_path(name):
+ """Build and return a path in ssl_certs directory for name"""
+ return osutils.pathjoin(base_dir, name)
diff --git a/bzrlib/tests/ssl_certs/ca.crt b/bzrlib/tests/ssl_certs/ca.crt
new file mode 100644
index 0000000..ccfb2ee
--- /dev/null
+++ b/bzrlib/tests/ssl_certs/ca.crt
@@ -0,0 +1,38 @@
+-----BEGIN CERTIFICATE-----
+MIIGpzCCBI+gAwIBAgIJAP/mTZ8oqGS7MA0GCSqGSIb3DQEBBQUAMIGTMQswCQYD
+VQQGEwJCWjERMA8GA1UECBMISW50ZXJuZXQxDzANBgNVBAcTBkJhemFhcjEUMBIG
+A1UEChMLRGlzdHJpYnV0ZWQxDDAKBgNVBAsTA1ZDUzEfMB0GA1UEAxMWTWFzdGVy
+IG9mIGNlcnRpZmljYXRlczEbMBkGCSqGSIb3DQEJARYMY2VydEBuby5zcGFtMB4X
+DTA3MTEyNTE0MDEyM1oXDTE3MTEyMjE0MDEyM1owgZMxCzAJBgNVBAYTAkJaMREw
+DwYDVQQIEwhJbnRlcm5ldDEPMA0GA1UEBxMGQmF6YWFyMRQwEgYDVQQKEwtEaXN0
+cmlidXRlZDEMMAoGA1UECxMDVkNTMR8wHQYDVQQDExZNYXN0ZXIgb2YgY2VydGlm
+aWNhdGVzMRswGQYJKoZIhvcNAQkBFgxjZXJ0QG5vLnNwYW0wggIiMA0GCSqGSIb3
+DQEBAQUAA4ICDwAwggIKAoICAQDD7JXMwqUTBC7wv9HjbsAbfPgOGg0s9STxKQiP
+T6P6pscLjC+twHC8BnThTT1wiFMELt95Fnl/RLAy1cDloKLIu+Xx3yrTiyydtBK+
+z5iTPyCqbHXodEFaKPChYN7q+TZD1OK+q9/zN2cT3yPm8wKBOz1MQ8C6y7T1MsYL
+wbJYYULCIN/B8gNcKNp3S1JRWQ5BF08asaaguCqY5mCgJYPzOrJ76gRuTQvpjop5
+tQ6NkxE1ln3gVkIjE4SYLRKGfHgYA9B596YGa/tcdAkLJs7+CXs0csR1BZjJkhgY
+iP34xGuCz8DoRDo2WN8jptcCosQQlYMWCQnSPwt6jUsss6CWEvIFPFnN8iOTETsY
+Pae3KdYZyFGbSA7UEvjXuFHXpN8AVfdd/oIrirIrEc1UQfe2Hyds6ZIJRhpfQxju
+5TTnM8yB6gt03bg8lmaXPlypQ3QepfFccKNstQz5pvF2yzdu0kWxD+wXHX7DR5p0
+dbIsobKzJz7ovnYJ/cTME0dDvwbNLsD5+cXNOkYwEqumP50nL1Fjdw6nVIZbnxZo
+USk7hsSlv6X842JUhEEgLQBN8B6pXO2iFdvLK1MdrQBbSttdbkcizf8SC7Yx7KKs
+IXDglQkGXXYNxLzXM3hWDaNDcd5wF9F97R5ufuyUhO7AhcyrQS4Lh9t8iytfQmwO
+8FB49wIDAQABo4H7MIH4MB0GA1UdDgQWBBRFJPYOM3D/bdKfqYlY+QfWoDmTfzCB
+yAYDVR0jBIHAMIG9gBRFJPYOM3D/bdKfqYlY+QfWoDmTf6GBmaSBljCBkzELMAkG
+A1UEBhMCQloxETAPBgNVBAgTCEludGVybmV0MQ8wDQYDVQQHEwZCYXphYXIxFDAS
+BgNVBAoTC0Rpc3RyaWJ1dGVkMQwwCgYDVQQLEwNWQ1MxHzAdBgNVBAMTFk1hc3Rl
+ciBvZiBjZXJ0aWZpY2F0ZXMxGzAZBgkqhkiG9w0BCQEWDGNlcnRAbm8uc3BhbYIJ
+AP/mTZ8oqGS7MAwGA1UdEwQFMAMBAf8wDQYJKoZIhvcNAQEFBQADggIBAA/5KGB4
+XGnrI8BhI+2XDjejHwm4sj+f3NZU8zDpEktT1hwZ+zezWegK40X5fYknQAbeiZEK
+hbmaLnB5K+e+LN/3d4Aloa7JTSeAYGdixANZBZu6a+BYHNkNxoERByrgBuWPyKCW
+9OX1nESw3Fd/ci8Q3rNyFUtds3mgSinjcaGep+9pix6BCDBrlnxb4D2HY7ZTjrmP
+3t2epJq6vP1l+g4jUMMlvegbQ2Bf91cx1r1BoGjxul4iOj/N3kCv+b+CZzEToXKA
+MybQLHmGrhO8CudXUBONIQLoc4YInGkJA+xAv51IWthXc78YFjI06HJUQzK5tfLR
+jGKz7h+McrM/osF6OUNgpPnQK4+Da6li6mmmcbyvHI5qCbVDaC7g2l06Rm/aweNy
+O8jH07UKi4ZRc8b3HorzwU6Lhus3JYZbGDStHHiFBfawnnsZ3pEO6bjPElqYRC1+
+sEmGoH4Qshi2WOoUzcQx7GsNHbMaY4TXBdo74TTm2fKM5zkcXlSX28apdl2Vdg3H
+cemArxUClzfOkr7dJLRKQEcHsqUj7znP0HfdVl/vEHVkCPCV8rrSbXCxk7TbOe90
+SzdheHtXyEqDPdinPYIkb4xFxu6MZc2c5pLRrUVRons9voQsXWhm0BA48nAEUld/
+8HmzmgqJXf1yxgFKgir0OQ6VXIXyFmr4lwDF
+-----END CERTIFICATE-----
diff --git a/bzrlib/tests/ssl_certs/ca.key b/bzrlib/tests/ssl_certs/ca.key
new file mode 100644
index 0000000..19216c5
--- /dev/null
+++ b/bzrlib/tests/ssl_certs/ca.key
@@ -0,0 +1,54 @@
+-----BEGIN RSA PRIVATE KEY-----
+Proc-Type: 4,ENCRYPTED
+DEK-Info: DES-EDE3-CBC,79F761A5C7882806
+
+iS1sEvtEAybrvlUtC3yTEjDDyXwmtuxYat844bKc0C9fh27hbCh04gw4AsDAYDlq
+/L5gc4ymHGI2L1ztDvjLVrOiRr78DCejXoIGHmF83Ijokkr/z9X0eJgeOl+akXu9
+F+DkzIGNAJ/+jYAoaU03Zn/3vGhRGGKkXHnI6TE3xRit6s9QU2GjvwzNOM2chB5J
+5IGE0M3uwTPpEVVoeDphDKzsibVGSFDH8z+tfm9rdWkRRmtXDeZFgFr21+qLx4co
+/IqJmFZ8ZkmrD5O8ExO/AcPDYCXtAJMPAnTR52UpeJ4eYbdOG23E95LWsG0VOgej
+AOsoHJV49MCwgaD6dmzDgucoxftYHgr0+wleAQeB/27zQsU4GjrviVGkVhRJB44F
+VqVNaRyA2tPIN2CQAYCWTKIuLZecNG0HBnV8VeDQDuF4hwmCV0Iy5EqC4tDMbfq/
+Rxgllkz+WA/b9tNs0pjyDv17PfbmlHq8vBx+UAgpz5CcCBqEgXkCo3ZEirTM58Ro
+SBXuuNJIV0ULmfNLDmUPOosWbzNgTPWH4QYOEs1iNRyhaOGJ2L9Kx+wiLy8o/n+7
+fz7z69ToX/Y5gBOJI6ZXCkwrbP96jV1mqKwLPYAW7z7koxvGdum2NhpzSY+e01NN
+6BWnSs4benO+acyMAil8As4/SMP1vph+ty0ytKZ+LbLUxNCgFbuERBODlguzF9CE
+Yff8ws288eTA21t4GqRsVldzc5pX/5GNjgGiNP60djDX8LuFi5Do934CAytCYvZg
+ziZZ8F7pazFK+YMFGvOrXobIzLPz0rZIAnT0wAXPCHb/huWGsWlrnuMBMLDevQTb
+r+qd61aiDBel4YN+fMdjYlGEqNdPbEgt0OYJwsDMnOiXrJXwE1Tagbazimb8Yehs
+ocqoTz3SF+luwhd8+56CFgW13/1SP8IWxeb0yM7BbI+/LjDLE7in0GfiZgb42ClX
+Fbeiz8FW7vLsu8FX0g3Kemdy0DOp9sc3ZcS45y6Ij53OD4PQGFG2Xg5w2PiFd6J2
+4LfL77wVCcahyIM9AzCsbzCiPT5zyas+Q3DbNiBvAec28ci/u4Aqzd85iABElbkm
+TGbAbVyMVJt60ADpLVIrlw0N90HR6Kh8OB+JzH9g01oKgnoR0SpQsSPNUapJhed3
+hrw8ETUFgpNobjt6quDjZVpCgo1E+VmX5oCftnpcRbO36DIZj5Uuj2HlUpbIMytS
+uTUxrgLmYEbfa8EQwkPfQFmgbk212OOJntKswVTYTcqi+gEn0vxuWJ/7C2M89pJQ
+D9i4oxH3Op+58JK/N7By7LZxMYsX5FjyQD9/bssj5MgY/6ClYEMQUiqq/cVRLKMx
+rp+hImyWMPvHMNm/Czr/FF7aYW8zMpQ/0BDH3oQDeeA+zHvpTRnB4mNuX+QkPsYz
+uCo965wQsZDmvUUgryQBukBOsP9CpQB0wxUvlaemWKuCmPYaYzr86IxdRbyqF1nj
+uBF5Ut5PYWsCutp9MrBLkq6+kCCAkZsSxlSxsKrtvPWOGgd+9WmjylMwNC1zsMFO
+pkXcLiRAIn7gFvAoDcP4nxtTkCoMamVFjXMdH99h6IhMZGy+ncjK9KmqCDBVFFpr
+BHMOdv4a5RmhnvHKWAxADpIyWgWTwPppw68BNmVIEBANDTNe7QH6+ZMDmluJdDGQ
+RT34hERw23NX5DN+wAQMbmO4Hf5HhSiN/JTMRu+RcIMJ+FxQagFMPSev3Bgy1711
+qsKFnrdK6j0sjgnm366I2M/9sNB6LluERSoKMuqc4kwKiR7S5jOhYSgqneqLzb/Y
+djxrSrpqpO6bXhdG3Dn7mtWuTbbQp44e98xsEfixNIc6u2/RkxDn1SUYk11UWipR
+tQjy4ML4ntmiEo/AIXCVxBOvGpng3aPXs7CLfZrpg01HdIdKE2LQDIpyZCTtg9pI
+BRVGqlUYmqdxb3tHJsa1Q2W21IoisDI0ydZaRA02j9dMJxbjVxaJEGqaJi/78Lcn
+FaaloEDqZN31/xDC5rYQpjczu2CawsLNSuewLmvWM4SeksHEd7ZeBDqNjZBAoFGo
+cfiHwYcgWnFYVacpIeJ3QzHdy5HDQyXXVVZiTaoWjILo2hdhukS5izcCbbPiAZ1b
+H/hUzBI+6M45hIOjZ4yCp1YZrTOtES2diLpET0Zy3+MA8Hs3R0VriTGmWzEsK0ng
+8O13An2tuKUwQPfXkXMqb42PkpVIIIamGhYu5p+IXZ3I2ViY0sI503wCFQvlVNa9
+ijEuP7eigFzX/ANtxcaw9G5rUOPvaY7ppBOf8+69vAV1n6Pph1yymfxBiNB+LS1O
+Ty108vSykU/Xjjc+lUNT8Zb8zlLyrhuJMLnxqa80gHX4py2R8trNIDR3gwhOE6o/
+WIoOAOWLZcJTBAYcrPiNSLAHCKnVWOvMPFn014wmvOVK9Air+mz739URpsJqPcFN
+hrzzLU01mheKR4KlTP8hTotPoU4heNrImzyZFyoX3qe9sH1N6B359gTJacR+CAxf
+5LxONx876kMVjWHr4Fa55qnjSs0lwmb9UNkQa03VPxYbNo71SNv16Tf2TADu9R4N
+O11SElm1Xz64w/f+fHgjOzFvZqAto5L3B6nWCcbza9TJH8EjvkINwXjnPnmwB/+M
+IEy9G893tM3ndO4Ku1YgimPB3ly8NN5FldZcQegy0W9S95ZWk4S1M0pPpZTp59RJ
+YfFIXqt0wALRg1oajLzft1aJ7AVBuctOv6QfJAPKsCuvSfscbBy8+anNDmC2Rbrh
+JA1Q2c6Eyw227sU/4P6Zg4GXCC7CovWryYV9URp0pkN6AJxxnqIVQGxNqnyJvGxE
+M7OKtBMPsXn5WCpBuUmOmw5MVh3lNhtiLMm6Da6C9PwEBbeKZuodzd//ewOcxj6w
+9IwbBymzqgZQw9XlHMx/kJH67fBanHFVAAPdIzr9Gzmh2oDmLR47gLeNTSKYSMwJ
+J/3AFIbKy6OXjetYoY4SWmS6JI3OABjWwnBXGK9AJE05pImhYixxEr8rHZw40BP6
+iM0rSn5bQjYZwyZLfQ29AM7hsaceov6SYRP1jogmHSHaTPcwTu5V2fATIHgvRXLd
+cA9e3JSLsF+b0PAR62jkdfStT1y3gIgvl6sCdV/A1V84/VG6jUpLh7iKyQRhuVK2
+-----END RSA PRIVATE KEY-----
diff --git a/bzrlib/tests/ssl_certs/create_ssls.py b/bzrlib/tests/ssl_certs/create_ssls.py
new file mode 100755
index 0000000..cd37620
--- /dev/null
+++ b/bzrlib/tests/ssl_certs/create_ssls.py
@@ -0,0 +1,268 @@
+#! /usr/bin/env python
+
+# Copyright (C) 2007 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""create_ssls.py -- create sll keys and certificates for tests.
+
+The https server requires at least a key and a certificate to start.
+
+SSL keys and certificates are created with openssl which may not be available
+everywhere we want to run the test suite.
+
+To simplify test writing, the necessary keys and certificates are generated by
+this script and used by the tests.
+
+Since creating these test keys and certificates requires a good knowledge of
+openssl and a lot of typing, we record all the needed parameters here.
+
+Since this will be used rarely, no effort has been made to handle exotic
+errors, the basic policy is that openssl should be available in the path and
+the parameters should be correct, any error will abort the script. Feel free to
+enhance that.
+
+This script provides options for building any individual files or two options
+to build the certificate authority files (--ca) or the server files (--server).
+"""
+
+from cStringIO import StringIO
+import optparse
+import os
+from subprocess import (
+ CalledProcessError,
+ Popen,
+ PIPE,
+ )
+import sys
+
+# We want to use the right bzrlib: the one we are part of
+# FIXME: The fllowing is correct but looks a bit ugly
+_dir = os.path.dirname
+our_bzr = _dir(_dir(_dir(_dir(os.path.realpath(__file__)))))
+sys.path.insert(0, our_bzr)
+
+from bzrlib import (
+ osutils,
+ )
+from bzrlib.tests import (
+ ssl_certs,
+ )
+
+def error(s):
+ print s
+ exit(1)
+
+def needs(request, *paths):
+ """Errors out if the specified path does not exists"""
+ missing = [p for p in paths if not os.path.exists(p)]
+ if missing:
+ error('%s needs: %s' % (request, ','.join(missing)))
+
+
+def rm_f(path):
+ """rm -f path"""
+ try:
+ os.unlink(path)
+ except:
+ pass
+
+def _openssl(args, input=None):
+ """Execute a command in a subproces feeding stdin with the provided input.
+
+ :return: (returncode, stdout, stderr)
+ """
+ cmd = ['openssl'] + args
+ proc = Popen(cmd, stdin=PIPE)
+ (stdout, stderr) = proc.communicate(input)
+ if proc.returncode:
+ # Basic error handling, all commands should succeed
+ raise CalledProcessError(proc.returncode, cmd)
+ return proc.returncode, stdout, stderr
+
+
+ssl_params=dict(
+ # Passwords
+ server_pass='I will protect the communications',
+ server_challenge_pass='Challenge for the CA',
+ ca_pass='I am the authority for the whole... localhost',
+ # CA identity
+ ca_country_code='BZ',
+ ca_state='Internet',
+ ca_locality='Bazaar',
+ ca_organization='Distributed',
+ ca_section='VCS',
+ ca_name='Master of certificates',
+ ca_email='cert@no.spam',
+ # Server identity
+ server_country_code='LH',
+ server_state='Internet',
+ server_locality='LocalHost',
+ server_organization='Testing Ltd',
+ server_section='https server',
+ server_name='127.0.0.1', # Always accessed under that name
+ server_email='https_server@locahost',
+ server_optional_company_name='',
+ )
+
+
+def build_ca_key():
+ """Generate an ssl certificate authority private key."""
+ key_path = ssl_certs.build_path('ca.key')
+ rm_f(key_path)
+ _openssl(['genrsa', '-passout', 'stdin', '-des3', '-out', key_path, '4096'],
+ input='%(ca_pass)s\n%(ca_pass)s\n' % ssl_params)
+
+
+def build_ca_certificate():
+ """Generate an ssl certificate authority private key."""
+ key_path = ssl_certs.build_path('ca.key')
+ needs('Building ca.crt', key_path)
+ cert_path = ssl_certs.build_path('ca.crt')
+ rm_f(cert_path)
+ _openssl(['req', '-passin', 'stdin', '-new', '-x509',
+ # Will need to be generated again in 10 years -- vila 20071122
+ '-days', '3650',
+ '-key', key_path, '-out', cert_path],
+ input='%(ca_pass)s\n'
+ '%(ca_country_code)s\n'
+ '%(ca_state)s\n'
+ '%(ca_locality)s\n'
+ '%(ca_organization)s\n'
+ '%(ca_section)s\n'
+ '%(ca_name)s\n'
+ '%(ca_email)s\n'
+ % ssl_params)
+
+
+def build_server_key():
+ """Generate an ssl server private key.
+
+ We generates a key with a password and then copy it without password so
+ that as server can user it without prompting.
+ """
+ key_path = ssl_certs.build_path('server_with_pass.key')
+ rm_f(key_path)
+ _openssl(['genrsa', '-passout', 'stdin', '-des3', '-out', key_path, '4096'],
+ input='%(server_pass)s\n%(server_pass)s\n' % ssl_params)
+
+ key_nopass_path = ssl_certs.build_path('server_without_pass.key')
+ rm_f(key_nopass_path)
+ _openssl(['rsa', '-passin', 'stdin', '-in', key_path,
+ '-out', key_nopass_path,],
+ input='%(server_pass)s\n' % ssl_params)
+
+
+def build_server_signing_request():
+ """Create a CSR (certificate signing request) to get signed by the CA"""
+ key_path = ssl_certs.build_path('server_with_pass.key')
+ needs('Building server.csr', key_path)
+ server_csr_path = ssl_certs.build_path('server.csr')
+ rm_f(server_csr_path)
+ _openssl(['req', '-passin', 'stdin', '-new', '-key', key_path,
+ '-out', server_csr_path],
+ input='%(server_pass)s\n'
+ '%(server_country_code)s\n'
+ '%(server_state)s\n'
+ '%(server_locality)s\n'
+ '%(server_organization)s\n'
+ '%(server_section)s\n'
+ '%(server_name)s\n'
+ '%(server_email)s\n'
+ '%(server_challenge_pass)s\n'
+ '%(server_optional_company_name)s\n'
+ % ssl_params)
+
+
+def sign_server_certificate():
+ """CA signs server csr"""
+ server_csr_path = ssl_certs.build_path('server.csr')
+ ca_cert_path = ssl_certs.build_path('ca.crt')
+ ca_key_path = ssl_certs.build_path('ca.key')
+ needs('Signing server.crt', server_csr_path, ca_cert_path, ca_key_path)
+ server_cert_path = ssl_certs.build_path('server.crt')
+ rm_f(server_cert_path)
+ _openssl(['x509', '-req', '-passin', 'stdin',
+ # Will need to be generated again in 10 years -- vila 20071122
+ '-days', '3650',
+ '-in', server_csr_path,
+ '-CA', ca_cert_path, '-CAkey', ca_key_path,
+ '-set_serial', '01',
+ '-out', server_cert_path,],
+ input='%(ca_pass)s\n' % ssl_params)
+
+
+def build_ssls(name, options, builders):
+ if options is not None:
+ for item in options:
+ builder = builders.get(item, None)
+ if builder is None:
+ error('%s is not a known %s' % (item, name))
+ builder()
+
+
+opt_parser = optparse.OptionParser(usage="usage: %prog [options]")
+opt_parser.set_defaults(ca=False)
+opt_parser.set_defaults(server=False)
+opt_parser.add_option(
+ "--ca", dest="ca", action="store_true",
+ help="Generate CA key and certificate")
+opt_parser.add_option(
+ "--server", dest="server", action="store_true",
+ help="Generate server key, certificate signing request and certificate")
+opt_parser.add_option(
+ "-k", "--key", dest="keys", action="append", metavar="KEY",
+ help="generate a new KEY (several -k options can be specified)")
+opt_parser.add_option(
+ "-c", "--certificate", dest="certificates", action="append",
+ metavar="CERTIFICATE",
+ help="generate a new CERTIFICATE (several -c options can be specified)")
+opt_parser.add_option(
+ "-r", "--sign-request", dest="signing_requests", action="append",
+ metavar="REQUEST",
+ help="generate a new signing REQUEST (several -r options can be specified)")
+opt_parser.add_option(
+ "-s", "--sign", dest="signings", action="append",
+ metavar="SIGNING",
+ help="generate a new SIGNING (several -s options can be specified)")
+
+
+key_builders = dict(ca=build_ca_key, server=build_server_key,)
+certificate_builders = dict(ca=build_ca_certificate,)
+signing_request_builders = dict(server=build_server_signing_request,)
+signing_builders = dict(server=sign_server_certificate,)
+
+
+if __name__ == '__main__':
+ (Options, args) = opt_parser.parse_args()
+ if (Options.ca or Options.server):
+ if (Options.keys or Options.certificates or Options.signing_requests
+ or Options.signings):
+ error("--ca and --server can't be used with other options")
+ # Handles --ca before --server so that both can be used in the same run
+ # to generate all the files needed by the https test server
+ if Options.ca:
+ build_ca_key()
+ build_ca_certificate()
+ if Options.server:
+ build_server_key()
+ build_server_signing_request()
+ sign_server_certificate()
+ else:
+ build_ssls('key', Options.keys, key_builders)
+ build_ssls('certificate', Options.certificates, certificate_builders)
+ build_ssls('signing request', Options.signing_requests,
+ signing_request_builders)
+ build_ssls('signing', Options.signings, signing_builders)
diff --git a/bzrlib/tests/ssl_certs/server.crt b/bzrlib/tests/ssl_certs/server.crt
new file mode 100644
index 0000000..9fda40b
--- /dev/null
+++ b/bzrlib/tests/ssl_certs/server.crt
@@ -0,0 +1,33 @@
+-----BEGIN CERTIFICATE-----
+MIIFpDCCA4wCAQEwDQYJKoZIhvcNAQEFBQAwgZMxCzAJBgNVBAYTAkJaMREwDwYD
+VQQIEwhJbnRlcm5ldDEPMA0GA1UEBxMGQmF6YWFyMRQwEgYDVQQKEwtEaXN0cmli
+dXRlZDEMMAoGA1UECxMDVkNTMR8wHQYDVQQDExZNYXN0ZXIgb2YgY2VydGlmaWNh
+dGVzMRswGQYJKoZIhvcNAQkBFgxjZXJ0QG5vLnNwYW0wHhcNMDkxMTI2MTMyMjE4
+WhcNMTkxMTI0MTMyMjE4WjCBmzELMAkGA1UEBhMCTEgxETAPBgNVBAgTCEludGVy
+bmV0MRIwEAYDVQQHEwlMb2NhbEhvc3QxFDASBgNVBAoTC1Rlc3RpbmcgTHRkMRUw
+EwYDVQQLEwxodHRwcyBzZXJ2ZXIxEjAQBgNVBAMTCTEyNy4wLjAuMTEkMCIGCSqG
+SIb3DQEJARYVaHR0cHNfc2VydmVyQGxvY2Fob3N0MIICIjANBgkqhkiG9w0BAQEF
+AAOCAg8AMIICCgKCAgEAq2uwbT6Zs/gC5ytKiAZfXGNVMQyGLuetgUrfQTbOL+/p
+4sK97hhWk/qbJRQVy8qQ5o4JKmTwAmf/exGxh94E3eacC9EEIdI3A0I2dbno9qAu
+zBXXacjqcKT/PczIN9U1/+S0e5vOssCtvVFWzdbG7vTg3Shs5BG8y1Ue8CDUtczB
+8gGjbQWkugzAqK8IogeMKkCGmiWFzRubHwL9HH5tYm1w5EHJzx/avepOyTKKBQpF
+A0sza0IYwEIuREMWcN64YPqs/OkLFmGCfpMCZvOmsmwGTxf+MoDNiqBco67xBuaK
+e4pSUf1f1PrbgHpEJq68QuCsRgWFYig0jRkh06ox5beyV9n/bQ/bA7W60JT/NUo9
+uNtFgC2ui2yHA5c3ozW+Muk7tS42r9Sde3gW/81c+kZbVS87EM42XQFMP1stAR8W
+XMwzlVnjrbQ/ERwt+OoGbOt9woHeEKpi9chmFtDR5irY3uJoP9/+9D51glxRGQql
+kpNc/FL4VYbmCZY8QCeEXHaxl1MeuZCI6ovGHojpUvFNQK+7qFOhbK0G8me7o+ly
+P/qCbjTTvom/Kvg9kFtAvwuz6ErWYKh0xZQxNhFl3uGtxycAti0+5O6l/lLUQwv8
+n79IWawWDTy4s8kYfWBiH8NNQBLib6tlllzt7LXtcPJjTkZ1finT5YFqweQCiR8C
+AwEAATANBgkqhkiG9w0BAQUFAAOCAgEAutvkG9R7oxOiRNHhMnDtcMFS92PdgrFU
+eAANojT2eC359K+vzIG19EReq6OEk8pLjbzehVW808hxtdMaMOQc3Np4AHl9Z4Sp
+YEMpRsfIbjwgM02aT7Ig5ZztRjKq78G5WzQq71Q5tzK9gCotGYH5B9QEbEpEjnb0
+UGaGi4bB+/WJsAuxKt5UyvN1h4n6oJYY4/Dy20H0LfnzjGOayuOoRaZt8cBFS0XQ
+8Gp6WAJwl8CJms84XL8IN6Zj6fYCWPP4fT6kiu7VHof+/zK99ajkEKaykl7mkyEk
+NfpaGIgVsPgd6BkWDavbW7DJg0LNlPry+Mh5MNAcHNaGUpJ5mYoazLBDw9rd70ss
+70TgT1te7WyB2yHbKLHdKDtRshfemXZaScVxoWsRrlDJQQn+1CiBWAmuVgZNiAIY
+gqM5kcFiLPP8hjw1GzPcSwjg8uxyM1lY6m1KLwdO2hW+iZCj1vhz//lBBHmYYTqk
+8EwtbWJ7XVQaLVRhAc1HIIA6TSAPiP7Dgn2rhQHcqODYSmfieXPQetlFhXNJG508
+voSrJMpsKQOId+2Ua00Wi6ajsje00vklG7IzvLY7MdeiSxNrXLNGlSjHI9/+ea0E
+lHJQbJUjR/kXNT/2WBnjBkSVclsz5Xx2dAf3kU0AlUA+b962Y/fxlma5wd8PIHx6
+tW5I+uMneHg=
+-----END CERTIFICATE-----
diff --git a/bzrlib/tests/ssl_certs/server.csr b/bzrlib/tests/ssl_certs/server.csr
new file mode 100644
index 0000000..102e062
--- /dev/null
+++ b/bzrlib/tests/ssl_certs/server.csr
@@ -0,0 +1,29 @@
+-----BEGIN CERTIFICATE REQUEST-----
+MIIFBjCCAu4CAQAwgZsxCzAJBgNVBAYTAkxIMREwDwYDVQQIEwhJbnRlcm5ldDES
+MBAGA1UEBxMJTG9jYWxIb3N0MRQwEgYDVQQKEwtUZXN0aW5nIEx0ZDEVMBMGA1UE
+CxMMaHR0cHMgc2VydmVyMRIwEAYDVQQDEwkxMjcuMC4wLjExJDAiBgkqhkiG9w0B
+CQEWFWh0dHBzX3NlcnZlckBsb2NhaG9zdDCCAiIwDQYJKoZIhvcNAQEBBQADggIP
+ADCCAgoCggIBAKtrsG0+mbP4AucrSogGX1xjVTEMhi7nrYFK30E2zi/v6eLCve4Y
+VpP6myUUFcvKkOaOCSpk8AJn/3sRsYfeBN3mnAvRBCHSNwNCNnW56PagLswV12nI
+6nCk/z3MyDfVNf/ktHubzrLArb1RVs3Wxu704N0obOQRvMtVHvAg1LXMwfIBo20F
+pLoMwKivCKIHjCpAhpolhc0bmx8C/Rx+bWJtcORByc8f2r3qTskyigUKRQNLM2tC
+GMBCLkRDFnDeuGD6rPzpCxZhgn6TAmbzprJsBk8X/jKAzYqgXKOu8QbminuKUlH9
+X9T624B6RCauvELgrEYFhWIoNI0ZIdOqMeW3slfZ/20P2wO1utCU/zVKPbjbRYAt
+rotshwOXN6M1vjLpO7UuNq/UnXt4Fv/NXPpGW1UvOxDONl0BTD9bLQEfFlzMM5VZ
+4620PxEcLfjqBmzrfcKB3hCqYvXIZhbQ0eYq2N7iaD/f/vQ+dYJcURkKpZKTXPxS
++FWG5gmWPEAnhFx2sZdTHrmQiOqLxh6I6VLxTUCvu6hToWytBvJnu6Ppcj/6gm40
+076Jvyr4PZBbQL8Ls+hK1mCodMWUMTYRZd7hrccnALYtPuTupf5S1EML/J+/SFms
+Fg08uLPJGH1gYh/DTUAS4m+rZZZc7ey17XDyY05GdX4p0+WBasHkAokfAgMBAAGg
+JTAjBgkqhkiG9w0BCQcxFhMUQ2hhbGxlbmdlIGZvciB0aGUgQ0EwDQYJKoZIhvcN
+AQEFBQADggIBAGeE6ml7EshJvPvOv+I8DrcjzHiAO4+0w/weknWfS/ndAafkLBa7
+5TMFhnBFHQ9/XbvctyNCc5u5OQfRxQwJccV4OUiTPPXqN4UWWSzxIw8zVM/8MdiH
+yaVFYqr2gyAKlcCvKavWNKMXVdeoOFnAwA7rfgtA2mMNM0ZoSJr/PISkzN6ZFyh2
+VSCrVdBZ1pTMslIKqWkRgqeqlrRhEVXZIljrG5+vVVnzfxycPjeN3VD5US8Pr9B0
+im6sbDTn2z5pRTkVWc/cL1F7spxsa67KVqwwHx9GNojo7SY/NNeWQTaTe3uVa/Hp
+VXRAcutovboGVcMhu92Udz+bpn2XpbNZnWJGRkRZRLWG2Z+k/REGhMk+5LuMh/+N
+uumIThiNtCPeg3/esVxlAqDokteqnoE0XHtcBUTacvedgmxASV98Wu+Rx7MnVgTf
+29wGb132ZSRMEzoB0hSm9tjB2DVI/O1+YOcPDhoLHpfkUE+qXQTWPKCvrZCspJp9
+UYkWVTfbRJavXOOi8tRHvt0AfxEnyq3o1H7veqDxL0NMuXvNAL2fEt0Av1zfyHyS
+Ch+NpByF9m2BMG0TUrrMhtb83X+Rb7VOC6sbs8H+ste9DOwh2930T1eFsb0/32R8
+ix0EMI5Yi60RLcL9WOjdQ1JAMeO6+dywL8WpjyAyUegeMxwlzZCMRSvv
+-----END CERTIFICATE REQUEST-----
diff --git a/bzrlib/tests/ssl_certs/server_with_pass.key b/bzrlib/tests/ssl_certs/server_with_pass.key
new file mode 100644
index 0000000..1196af3
--- /dev/null
+++ b/bzrlib/tests/ssl_certs/server_with_pass.key
@@ -0,0 +1,54 @@
+-----BEGIN RSA PRIVATE KEY-----
+Proc-Type: 4,ENCRYPTED
+DEK-Info: DES-EDE3-CBC,60937C843264DF9B
+
+1Dt8AebtYSGAI9lxksU1miwsRvvPVbUvm/XQyZrgp6vfEAj/RmQj62x3CBR0XqbF
+HwphoyR1x4IJa0qpGlbrrjAB8YqPKmHC0X1PkeRRa5g+1eaQtbV2YOSrVvpmhAOa
+6eii++ct0AWVkdXq1Aavo2apzMCDghMEUKZrim4Lqxa8RqjcmtPXWEA822rWHPtu
+mmhuZ6uLDMfSsn7jqtiV3fp85toKAf7EY85gWjS4n77LRElGloW8i5y3EBVt+ttD
+Yq2aXgg3XvfC/AyyzGmuTq8FhK6Tt0N5hewJ66kWoXkyIauBEds7fzoHhRrMC6ha
+M4g46rbXIOpt1qWkPNvVYUHDMF4rvf5U+aDJwyYUgyYZleVbfxn1JqxI0MJcmGGt
+gY5s/Ygn388Kk0UbmYdgFFg8I3AFILywPu+OA1C13KPOQUNjSDYxOG8WO53mDhF2
+ENDFXN3Y0K4uxmCcrr/5FnMQiQ+qY9jvqtrkwLyL/vAqHVCriqimIJwPf4zwD71n
+TzUTt0EaqguwTUMM8vuZ3edj60Y8zMuIDPVvYLIjy/tyu7UGLOSu1rtGCWADYhBe
+zJQM+97uMTJFkdHCfs2U+RueC2WJMXDs5Qicfzhn0BYryAP5j5YCW/NWxypaOlHx
++VKN8P0DH7QjMo8am0u8rduhaNQqx/0AvB4DlW0kUNbHYoZH+JL1L79UR9a+8pHy
+vWf1VJH3RalG9+Ws4govVk+giAUvELq27yqUOp3ZwdaHKqEUEU74EHPRPhq5y+qU
+nvICb65Au9KCPNefU7YVjJ9QCSYs2wTFJH6WwyIy3a3oYRW7vfzvg+VtAaAmeEJQ
+h6jxFWwXxjqh5m6LirlCBcCDW1bkgm5Umjq6CT8K+xTAEmY1lqQ2DEe0VVNs2bPS
+nvLxi7JFUHzgRqMJo4ZSeF2sq5sffoI3DDY6aCa5KFi+gowNH29VETU2O1p51OWy
+9xi1aD540HNFS8tCvxMz18xxHgnK+KpV0wtirrjEypx3y4iEe+BA2mp9EKZYYjhq
+mxdfnZRj7U3wSI+1FyVlbnOb665sZEMjzX60IubO4qkjkeL1yZ5l17yLtKtDKlrF
+W+HwjWr9hbAv3MuNa76g6k8BlHWDrdgHIqrPLe0D37Tm6WKjBQ0eGlRC6QSMzdPa
+Efl6EeUCFTvIMiFPO1ZWutJj4SsIexRQBnCPyQgMg1GoVAASuY8/uyaBcWQEqeyG
+R+d0jTIsKouNbbPWjwHdC3PqGkJEoL92xMDhKavKkU4pNNgi9hlSGcAS1+IVCUML
+f3yhWqs+3zAsqrH41IdC9zbhS6E3wHxNlhjmf99fTSyQ3bj3pbcotQYnXKKSAPSB
+H+3NdR4tYeCU53SP1WgsMbN+auFP0ByU1W4U3xjNSYEclnTeJ1bsfbNHEtQG5aRW
+KaRleecn952nQQuTyd+v2jZiucGYKMiOrWPFRBnYYTkKAujITGxg2XmiYUUbfqwd
+e0KFyYOU/8ylpnLh2tjnbs6Re0ytJW9gifkhRDCMQzr+b6jg+hwUnqIE4GV0igNy
+aoR45oTCLMHpLB/h5m+oIcNsPZWpBmJ3uZjHCCh3HVkkjTddBCwU4kbfUkSU+tn1
+A8fD8GFutaaSnKqDqHS09a2Ov7XEwGM7z1WKdWPO/7xkXNhhGmO8m/lQP6Wt4mmQ
+WWkL7Z9UqdoYCz/8X629TI8ueR7mosiz+UxLnwpn1tvPch02z71iQmfhRfxSM44J
+bm4Ro0G1iW7YyemqspRobipi4o/VSg/VdfGTe3a9j3/C398qMw1freH6hgY0dgjV
+Zf6dCJqF8TFOJBWdVT0OUySJs8auCZHDlWslAHqXueu5QX203uWfFtU1c1HEmjZp
+fXCzFUSP4IFk8FOD2/CX3JQF57Hzb7Ityp54BuUG5yn27oBZpuvL/PPLHu+cGDx2
+/6/9D6kV5Wd3xcaqsDPOZqxbY4TcOceYJ53h26fK8esyWfgD2v1DsvV41JdZjBml
+WaObVMbl8aw2WbtJg6qRQvWaRLByEkIzqa93oGP2uwZH2pa2MYLnHhFDv7l+uae7
+tG/B5AylM83PF8Pp13Ayey7nT0hjuGcGIoz/TlYkEE5e516d6xuVeGl2rom1iwCC
+Ib3IWWh2tPR7WaMkFaetWcJ/30NGm6KUAjzqbeTq6QUcDuF1M8mlbyJYqFwSkM4G
+3d3nrSWZtFxtOnY9/Yk6fQyvONwU1QMqfYyCcbJJcCVflHCDtKPh0STwz8+DuGsZ
+sdemYGOdb2CIznlAvDI6rC67BQAGt6jR1mXHpySgfv3gPhQguQZgTD9GqKxAGFxb
+PzeSXXeswhRKb1DpuE/Lx0C0j0nvpsCgV7OO3+lSVe0EphALdvB48FecdAPYdhdZ
+bBsTxtrRVdHSBvux3sTRSfkyvK3Qmf8v7kwmr5kwXp8CRQx7E3dcjSi4W+5pUnOC
+h8D6ziIbgfpyrd4qEqtwPF/tGudH1Na08OfE7QW6tLXmiK6zPiVCQfK3crTikydK
+Qv3DO0QfnPPX5JUiFNbJ4jr1gTd56pm5ffZLPIkC+kGYNXXAO5rsV6Jde4OiaFIw
+WaUkkyBGnTws7Jx9DJLZdi8RQShSyX2lCZIZvV6zYrlz+5TuEjhIxQ8Y7GZe9Cp9
+hi6IZIEFyjwrUG8QR16PmMDzORbkWVba2JRAZZQANHtl475GOod+dxA6sy6FxjPV
+XCvX49W6OSPeFj8sf4pPt5EdFFBWKj3G0aD3AQCzSJj/Cg8kcXrKswUuwU5FwVbE
+uuCEXdwM1kyHARxIOoNBbrYwigXLXsN4owO7mQMU5ri2hvxcJg0BI0YbbCJcIyg5
+77FltynXfuw1j6ZC0346VVVLQ/vqRbkDLBt5hdsYBf93Jd2+oc40necuMbaGpsLz
+RdjEqfRP9SW46ewi9DNwmdQ5Xyw/jn0vseQcQWSvQ6q96Oq8WO1cX52rOeuA0pGC
+JnrQKaUnCCYAZxEeshLXre1Sy48pThCGDNTYw1VAt3YdcMRD65VGnttTlU+7UodE
+xLGAx68ze1e1bkf9DjUVUkzqXO/jTswmWRC5RiEDBacUV6CHEGenSjIg0t+v8qCf
+n9K/sZMYH17Zqh1dbehmudJE9r+8crAYv3t1fq87VEoxQgQaEeQmcD4XTF82Hlz1
+-----END RSA PRIVATE KEY-----
diff --git a/bzrlib/tests/ssl_certs/server_without_pass.key b/bzrlib/tests/ssl_certs/server_without_pass.key
new file mode 100644
index 0000000..029be49
--- /dev/null
+++ b/bzrlib/tests/ssl_certs/server_without_pass.key
@@ -0,0 +1,51 @@
+-----BEGIN RSA PRIVATE KEY-----
+MIIJKgIBAAKCAgEAq2uwbT6Zs/gC5ytKiAZfXGNVMQyGLuetgUrfQTbOL+/p4sK9
+7hhWk/qbJRQVy8qQ5o4JKmTwAmf/exGxh94E3eacC9EEIdI3A0I2dbno9qAuzBXX
+acjqcKT/PczIN9U1/+S0e5vOssCtvVFWzdbG7vTg3Shs5BG8y1Ue8CDUtczB8gGj
+bQWkugzAqK8IogeMKkCGmiWFzRubHwL9HH5tYm1w5EHJzx/avepOyTKKBQpFA0sz
+a0IYwEIuREMWcN64YPqs/OkLFmGCfpMCZvOmsmwGTxf+MoDNiqBco67xBuaKe4pS
+Uf1f1PrbgHpEJq68QuCsRgWFYig0jRkh06ox5beyV9n/bQ/bA7W60JT/NUo9uNtF
+gC2ui2yHA5c3ozW+Muk7tS42r9Sde3gW/81c+kZbVS87EM42XQFMP1stAR8WXMwz
+lVnjrbQ/ERwt+OoGbOt9woHeEKpi9chmFtDR5irY3uJoP9/+9D51glxRGQqlkpNc
+/FL4VYbmCZY8QCeEXHaxl1MeuZCI6ovGHojpUvFNQK+7qFOhbK0G8me7o+lyP/qC
+bjTTvom/Kvg9kFtAvwuz6ErWYKh0xZQxNhFl3uGtxycAti0+5O6l/lLUQwv8n79I
+WawWDTy4s8kYfWBiH8NNQBLib6tlllzt7LXtcPJjTkZ1finT5YFqweQCiR8CAwEA
+AQKCAgEApb6nRMm9hfSsoeYK14EJ3WOyMI3ChEHAUn4ZZY3fSRx5EadoS3m+5K/w
+KKbF+FjxR+tX02TrZ/g0X0OZWxyBqijbAtXjX+xJAt6Xl7bdGPGCrhQER6VbgPUY
+UiHaCZoe70CsfcQF1HJWyvGFs9sSWGPvtAbtWowH9rZkBpxH70x+zkdlQ/mr77R6
+597twvT3QrdHLm5PdCVYFGEoaM+ZZg0ZlapCvgE32F9gNEuohf0LA4dHvVKYafTT
+lW8kmJfQwPxAHP4bmVVI/qeYMAEwUd3/L6SsRRqjIoGxcRaquBhbF96DgAE0EtNH
+P294y8ox/5wLFl0qPeFQBomhWm56dON9HP18XeJs9Xtn2wipFH8W3BN5gZLQOOPe
+ZW7E9lTkzecZa/a6R1Eoqy0vuc6Uh/N+qk8rdHl1JSa8ofTwObKtYq8/f7W05AZp
+lTx7NujH/UH/z5RIKdMtqjVQsOra+LBqU09B3KWOf37M02nbd5eQCubFPlvCulbz
+kWcyBUb74r2jyqnbxqfroSFG09qdEgP5KvGSWJSiyS8yb0HSeTom72jdf1kJtOf7
+D9xZlBSHeP2F09peCUPHGg8NQmhY9BczhoRN8YIl7RPPJaCUHHaRZNFezPXuHEfC
+/cC+gdBnf91aqY4DSovl4/GzYM4qu7nGjYabg34aQ3zi/Bz/d0ECggEBAOMrAeI/
+jjlBvsxE6WzF6/tVc0sjGmxz6Vd9ueRHIlU9ZUP8fYFkczMAGD28ejlZx9HPR192
+h5jKXXmZpxohR3nznJNFr2A7bDN2UYeguj6JbqJaQ0EtgLX05DkBcULmDbidScbw
+XaJ7LNYo6hxA/hvvc6w4jNhiZQeIxD5fzogo/liq4w02CmIKTzn7MAb1+F+VzWah
+QpYsfj4TWFnzLvCI7+ncN6fkxcfrfc/B+G5GfBJhbzT3Pu9MqMeMaBwSGGeUoHdu
+AOJF6jxAzkEP4ouUggaQW9vLYgkVvfeoAH96VHuFzz3o+59cHtc099WwVffWMFUz
+8TwGoyjpryJhPKECggEBAMEtYEqepPRgvAEgEXg01nWqTFEFETOEJ0qUqkswDjtL
+7d1TBF30zGppses4JEt1IDPfec/8oNCDalOq+wyHl3zUKPzwcIyKI5oj+d9gxXie
+FZ3S8B/sMhnmCaE4zdvcT0Wdili2nuJpwur2VjPHI0ExUkaYBKRkDbtg1zwwkewD
+spg/uRHmmPzA4cwEi4PG51RIxaEIGIZA2E12PGswHSxlXIZrH7G+z8Tcalwclowx
+siEbwBAK2ogpot9yz+5wCoZ+kDK2WGwEhkpjwoj+0Z4fBQOd9e7J/cAsaN11DPrq
+3xW4y9hs2CgKSqsZT8CpA1naS4t4Gw5hiQxkvbUOLb8CggEAKvGFYKTNGQvRqsHY
+MqTTsGZ97hVLuHbNBKoSyxGbbuu1Sf3gJ+a6VEjPTFRl8pODT6lUf7pjdcHDm+ot
+CaaC4Y3o6npA5fs1wCC81tvOeWX3kR9pGVglvb/2qMQaCiB9NYoeWUo1GTMZzb5c
+S00J/HPutP7XuUwYdy0mk/f+jgTZ2B8t3uB5ZAE46ugOMzKqaj9iXeVcNhFhKBD2
+IcLz+6qXV+k2aDm6Hcnpta2gbhBLAhqDMEsRDaG7QkXLpIrTLDmPgCQ+bIf0+MDz
+fJ8pH2PKlL0TcGBpPvhqOff7aB3S/KrV+4kgkAaVGe+8bhvwlHJcHq88CjHjBCfw
+4JnrIQKCAQEAu0Cn8U0cqsJGG3UhWfsl04MTXQnjZaN5jsLS2/UmJnZwiTSarBTK
+PXBdBZwzGm3bnEzIwEKrA9QPMjEwDzI0Ns1VOfI7tw+EeTrcUJp2cHgUjhrVpqwG
+2LHyrbPpYHCK6vlODLZhBynrxmSopKIo213a2hErlsphwTubB34xRK1E5m4GkIKN
+05uIhIH3VvJsdHfiEjlNxH2A7n1pDaHi9ZD/glC+tUY4EiRyeP1BNX5Ko+tJXoOW
+15w8hAq/4en7Wem/D1lQFsy/+8X94VvYh0ScUKhCp4hnD2w87kYzIdqgCmdKOW0w
+joeDX502gJOUavm+duAioesLtNbGSYYH8wKCAQEArz1Cp2OIDYroY3FWRw5yN044
+Rs2CZWMTHe3Q4UdHPuMe6oMIXGQVcYO7iPkYcU0dBpzehGXgS7bqTyPLTO8pnVqV
+4uwvUfLInV8mCb9Pcg8vWaar/30nvjFL/IFIYXoHr9+jffLdbp18sNrGJaiqLv/S
+SwgRiHNzhUmg1y66P2sBMsJl9iOGldKkCWiX3lCnxrsM7BJYoRQS+wB+9qEAvmLg
+sn+FQFEXUZf05+eTwFJ1c2KYtaGuT8SQgc86DRlZ/qe1HbteCXpV1pd51Zh+ndgX
+4QneTJUzopJ2FQECnXrMZfgUS5w8qQ4Tpce6kjIyD3FntQ20TMX9ySZTa7dX9Q==
+-----END RSA PRIVATE KEY-----
diff --git a/bzrlib/tests/stub_sftp.py b/bzrlib/tests/stub_sftp.py
new file mode 100644
index 0000000..adea45b
--- /dev/null
+++ b/bzrlib/tests/stub_sftp.py
@@ -0,0 +1,570 @@
+# Copyright (C) 2005, 2006, 2008-2011 Robey Pointer <robey@lag.net>, Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""
+A stub SFTP server for loopback SFTP testing.
+Adapted from the one in paramiko's unit tests.
+"""
+
+import os
+import paramiko
+import socket
+import SocketServer
+import sys
+import time
+
+from bzrlib import (
+ osutils,
+ trace,
+ urlutils,
+ )
+from bzrlib.transport import (
+ ssh,
+ )
+from bzrlib.tests import test_server
+
+
+class StubServer(paramiko.ServerInterface):
+
+ def __init__(self, test_case_server):
+ paramiko.ServerInterface.__init__(self)
+ self.log = test_case_server.log
+
+ def check_auth_password(self, username, password):
+ # all are allowed
+ self.log('sftpserver - authorizing: %s' % (username,))
+ return paramiko.AUTH_SUCCESSFUL
+
+ def check_channel_request(self, kind, chanid):
+ self.log('sftpserver - channel request: %s, %s' % (kind, chanid))
+ return paramiko.OPEN_SUCCEEDED
+
+
+class StubSFTPHandle(paramiko.SFTPHandle):
+
+ def stat(self):
+ try:
+ return paramiko.SFTPAttributes.from_stat(
+ os.fstat(self.readfile.fileno()))
+ except OSError, e:
+ return paramiko.SFTPServer.convert_errno(e.errno)
+
+ def chattr(self, attr):
+ # python doesn't have equivalents to fchown or fchmod, so we have to
+ # use the stored filename
+ trace.mutter('Changing permissions on %s to %s', self.filename, attr)
+ try:
+ paramiko.SFTPServer.set_file_attr(self.filename, attr)
+ except OSError, e:
+ return paramiko.SFTPServer.convert_errno(e.errno)
+
+
+class StubSFTPServer(paramiko.SFTPServerInterface):
+
+ def __init__(self, server, root, home=None):
+ paramiko.SFTPServerInterface.__init__(self, server)
+ # All paths are actually relative to 'root'.
+ # this is like implementing chroot().
+ self.root = root
+ if home is None:
+ self.home = ''
+ else:
+ if not home.startswith(self.root):
+ raise AssertionError(
+ "home must be a subdirectory of root (%s vs %s)"
+ % (home, root))
+ self.home = home[len(self.root):]
+ if self.home.startswith('/'):
+ self.home = self.home[1:]
+ server.log('sftpserver - new connection')
+
+ def _realpath(self, path):
+ # paths returned from self.canonicalize() always start with
+ # a path separator. So if 'root' is just '/', this would cause
+ # a double slash at the beginning '//home/dir'.
+ if self.root == '/':
+ return self.canonicalize(path)
+ return self.root + self.canonicalize(path)
+
+ if sys.platform == 'win32':
+ def canonicalize(self, path):
+ # Win32 sftp paths end up looking like
+ # sftp://host@foo/h:/foo/bar
+ # which means absolute paths look like:
+ # /h:/foo/bar
+ # and relative paths stay the same:
+ # foo/bar
+ # win32 needs to use the Unicode APIs. so we require the
+ # paths to be utf8 (Linux just uses bytestreams)
+ thispath = path.decode('utf8')
+ if path.startswith('/'):
+ # Abspath H:/foo/bar
+ return os.path.normpath(thispath[1:])
+ else:
+ return os.path.normpath(os.path.join(self.home, thispath))
+ else:
+ def canonicalize(self, path):
+ if os.path.isabs(path):
+ return osutils.normpath(path)
+ else:
+ return osutils.normpath('/' + os.path.join(self.home, path))
+
+ def chattr(self, path, attr):
+ try:
+ paramiko.SFTPServer.set_file_attr(path, attr)
+ except OSError, e:
+ return paramiko.SFTPServer.convert_errno(e.errno)
+ return paramiko.SFTP_OK
+
+ def list_folder(self, path):
+ path = self._realpath(path)
+ try:
+ out = [ ]
+ # TODO: win32 incorrectly lists paths with non-ascii if path is not
+ # unicode. However on unix the server should only deal with
+ # bytestreams and posix.listdir does the right thing
+ if sys.platform == 'win32':
+ flist = [f.encode('utf8') for f in os.listdir(path)]
+ else:
+ flist = os.listdir(path)
+ for fname in flist:
+ attr = paramiko.SFTPAttributes.from_stat(
+ os.stat(osutils.pathjoin(path, fname)))
+ attr.filename = fname
+ out.append(attr)
+ return out
+ except OSError, e:
+ return paramiko.SFTPServer.convert_errno(e.errno)
+
+ def stat(self, path):
+ path = self._realpath(path)
+ try:
+ return paramiko.SFTPAttributes.from_stat(os.stat(path))
+ except OSError, e:
+ return paramiko.SFTPServer.convert_errno(e.errno)
+
+ def lstat(self, path):
+ path = self._realpath(path)
+ try:
+ return paramiko.SFTPAttributes.from_stat(os.lstat(path))
+ except OSError, e:
+ return paramiko.SFTPServer.convert_errno(e.errno)
+
+ def open(self, path, flags, attr):
+ path = self._realpath(path)
+ try:
+ flags |= getattr(os, 'O_BINARY', 0)
+ if getattr(attr, 'st_mode', None):
+ fd = os.open(path, flags, attr.st_mode)
+ else:
+ # os.open() defaults to 0777 which is
+ # an odd default mode for files
+ fd = os.open(path, flags, 0666)
+ except OSError, e:
+ return paramiko.SFTPServer.convert_errno(e.errno)
+
+ if (flags & os.O_CREAT) and (attr is not None):
+ attr._flags &= ~attr.FLAG_PERMISSIONS
+ paramiko.SFTPServer.set_file_attr(path, attr)
+ if flags & os.O_WRONLY:
+ fstr = 'wb'
+ elif flags & os.O_RDWR:
+ fstr = 'rb+'
+ else:
+ # O_RDONLY (== 0)
+ fstr = 'rb'
+ try:
+ f = os.fdopen(fd, fstr)
+ except (IOError, OSError), e:
+ return paramiko.SFTPServer.convert_errno(e.errno)
+ fobj = StubSFTPHandle()
+ fobj.filename = path
+ fobj.readfile = f
+ fobj.writefile = f
+ return fobj
+
+ def remove(self, path):
+ path = self._realpath(path)
+ try:
+ os.remove(path)
+ except OSError, e:
+ return paramiko.SFTPServer.convert_errno(e.errno)
+ return paramiko.SFTP_OK
+
+ def rename(self, oldpath, newpath):
+ oldpath = self._realpath(oldpath)
+ newpath = self._realpath(newpath)
+ try:
+ os.rename(oldpath, newpath)
+ except OSError, e:
+ return paramiko.SFTPServer.convert_errno(e.errno)
+ return paramiko.SFTP_OK
+
+ def mkdir(self, path, attr):
+ path = self._realpath(path)
+ try:
+ # Using getattr() in case st_mode is None or 0
+ # both evaluate to False
+ if getattr(attr, 'st_mode', None):
+ os.mkdir(path, attr.st_mode)
+ else:
+ os.mkdir(path)
+ if attr is not None:
+ attr._flags &= ~attr.FLAG_PERMISSIONS
+ paramiko.SFTPServer.set_file_attr(path, attr)
+ except OSError, e:
+ return paramiko.SFTPServer.convert_errno(e.errno)
+ return paramiko.SFTP_OK
+
+ def rmdir(self, path):
+ path = self._realpath(path)
+ try:
+ os.rmdir(path)
+ except OSError, e:
+ return paramiko.SFTPServer.convert_errno(e.errno)
+ return paramiko.SFTP_OK
+
+ # removed: chattr, symlink, readlink
+ # (nothing in bzr's sftp transport uses those)
+
+
+# ------------- server test implementation --------------
+
+STUB_SERVER_KEY = """
+-----BEGIN RSA PRIVATE KEY-----
+MIICWgIBAAKBgQDTj1bqB4WmayWNPB+8jVSYpZYk80Ujvj680pOTh2bORBjbIAyz
+oWGW+GUjzKxTiiPvVmxFgx5wdsFvF03v34lEVVhMpouqPAYQ15N37K/ir5XY+9m/
+d8ufMCkjeXsQkKqFbAlQcnWMCRnOoPHS3I4vi6hmnDDeeYTSRvfLbW0fhwIBIwKB
+gBIiOqZYaoqbeD9OS9z2K9KR2atlTxGxOJPXiP4ESqP3NVScWNwyZ3NXHpyrJLa0
+EbVtzsQhLn6rF+TzXnOlcipFvjsem3iYzCpuChfGQ6SovTcOjHV9z+hnpXvQ/fon
+soVRZY65wKnF7IAoUwTmJS9opqgrN6kRgCd3DASAMd1bAkEA96SBVWFt/fJBNJ9H
+tYnBKZGw0VeHOYmVYbvMSstssn8un+pQpUm9vlG/bp7Oxd/m+b9KWEh2xPfv6zqU
+avNwHwJBANqzGZa/EpzF4J8pGti7oIAPUIDGMtfIcmqNXVMckrmzQ2vTfqtkEZsA
+4rE1IERRyiJQx6EJsz21wJmGV9WJQ5kCQQDwkS0uXqVdFzgHO6S++tjmjYcxwr3g
+H0CoFYSgbddOT6miqRskOQF3DZVkJT3kyuBgU2zKygz52ukQZMqxCb1fAkASvuTv
+qfpH87Qq5kQhNKdbbwbmd2NxlNabazPijWuphGTdW0VfJdWfklyS2Kr+iqrs/5wV
+HhathJt636Eg7oIjAkA8ht3MQ+XSl9yIJIS8gVpbPxSw5OMfw0PjVE7tBdQruiSc
+nvuQES5C9BMHjF39LZiGH1iLQy7FgdHyoP+eodI7
+-----END RSA PRIVATE KEY-----
+"""
+
+
+class SocketDelay(object):
+ """A socket decorator to make TCP appear slower.
+
+ This changes recv, send, and sendall to add a fixed latency to each python
+ call if a new roundtrip is detected. That is, when a recv is called and the
+ flag new_roundtrip is set, latency is charged. Every send and send_all
+ sets this flag.
+
+ In addition every send, sendall and recv sleeps a bit per character send to
+ simulate bandwidth.
+
+ Not all methods are implemented, this is deliberate as this class is not a
+ replacement for the builtin sockets layer. fileno is not implemented to
+ prevent the proxy being bypassed.
+ """
+
+ simulated_time = 0
+ _proxied_arguments = dict.fromkeys([
+ "close", "getpeername", "getsockname", "getsockopt", "gettimeout",
+ "setblocking", "setsockopt", "settimeout", "shutdown"])
+
+ def __init__(self, sock, latency, bandwidth=1.0,
+ really_sleep=True):
+ """
+ :param bandwith: simulated bandwith (MegaBit)
+ :param really_sleep: If set to false, the SocketDelay will just
+ increase a counter, instead of calling time.sleep. This is useful for
+ unittesting the SocketDelay.
+ """
+ self.sock = sock
+ self.latency = latency
+ self.really_sleep = really_sleep
+ self.time_per_byte = 1 / (bandwidth / 8.0 * 1024 * 1024)
+ self.new_roundtrip = False
+
+ def sleep(self, s):
+ if self.really_sleep:
+ time.sleep(s)
+ else:
+ SocketDelay.simulated_time += s
+
+ def __getattr__(self, attr):
+ if attr in SocketDelay._proxied_arguments:
+ return getattr(self.sock, attr)
+ raise AttributeError("'SocketDelay' object has no attribute %r" %
+ attr)
+
+ def dup(self):
+ return SocketDelay(self.sock.dup(), self.latency, self.time_per_byte,
+ self._sleep)
+
+ def recv(self, *args):
+ data = self.sock.recv(*args)
+ if data and self.new_roundtrip:
+ self.new_roundtrip = False
+ self.sleep(self.latency)
+ self.sleep(len(data) * self.time_per_byte)
+ return data
+
+ def sendall(self, data, flags=0):
+ if not self.new_roundtrip:
+ self.new_roundtrip = True
+ self.sleep(self.latency)
+ self.sleep(len(data) * self.time_per_byte)
+ return self.sock.sendall(data, flags)
+
+ def send(self, data, flags=0):
+ if not self.new_roundtrip:
+ self.new_roundtrip = True
+ self.sleep(self.latency)
+ bytes_sent = self.sock.send(data, flags)
+ self.sleep(bytes_sent * self.time_per_byte)
+ return bytes_sent
+
+
+class TestingSFTPConnectionHandler(SocketServer.BaseRequestHandler):
+
+ def setup(self):
+ self.wrap_for_latency()
+ tcs = self.server.test_case_server
+ ptrans = paramiko.Transport(self.request)
+ self.paramiko_transport = ptrans
+ # Set it to a channel under 'bzr' so that we get debug info
+ ptrans.set_log_channel('bzr.paramiko.transport')
+ ptrans.add_server_key(tcs.get_host_key())
+ ptrans.set_subsystem_handler('sftp', paramiko.SFTPServer,
+ StubSFTPServer, root=tcs._root,
+ home=tcs._server_homedir)
+ server = tcs._server_interface(tcs)
+ # This blocks until the key exchange has been done
+ ptrans.start_server(None, server)
+
+ def finish(self):
+ # Wait for the conversation to finish, when the paramiko.Transport
+ # thread finishes
+ # TODO: Consider timing out after XX seconds rather than hanging.
+ # Also we could check paramiko_transport.active and possibly
+ # paramiko_transport.getException().
+ self.paramiko_transport.join()
+
+ def wrap_for_latency(self):
+ tcs = self.server.test_case_server
+ if tcs.add_latency:
+ # Give the socket (which the request really is) a latency adding
+ # decorator.
+ self.request = SocketDelay(self.request, tcs.add_latency)
+
+
+class TestingSFTPWithoutSSHConnectionHandler(TestingSFTPConnectionHandler):
+
+ def setup(self):
+ self.wrap_for_latency()
+ # Re-import these as locals, so that they're still accessible during
+ # interpreter shutdown (when all module globals get set to None, leading
+ # to confusing errors like "'NoneType' object has no attribute 'error'".
+ class FakeChannel(object):
+ def get_transport(self):
+ return self
+ def get_log_channel(self):
+ return 'bzr.paramiko'
+ def get_name(self):
+ return '1'
+ def get_hexdump(self):
+ return False
+ def close(self):
+ pass
+
+ tcs = self.server.test_case_server
+ sftp_server = paramiko.SFTPServer(
+ FakeChannel(), 'sftp', StubServer(tcs), StubSFTPServer,
+ root=tcs._root, home=tcs._server_homedir)
+ self.sftp_server = sftp_server
+ sys_stderr = sys.stderr # Used in error reporting during shutdown
+ try:
+ sftp_server.start_subsystem(
+ 'sftp', None, ssh.SocketAsChannelAdapter(self.request))
+ except socket.error, e:
+ if (len(e.args) > 0) and (e.args[0] == errno.EPIPE):
+ # it's okay for the client to disconnect abruptly
+ # (bug in paramiko 1.6: it should absorb this exception)
+ pass
+ else:
+ raise
+ except Exception, e:
+ # This typically seems to happen during interpreter shutdown, so
+ # most of the useful ways to report this error won't work.
+ # Writing the exception type, and then the text of the exception,
+ # seems to be the best we can do.
+ # FIXME: All interpreter shutdown errors should have been related
+ # to daemon threads, cleanup needed -- vila 20100623
+ sys_stderr.write('\nEXCEPTION %r: ' % (e.__class__,))
+ sys_stderr.write('%s\n\n' % (e,))
+
+ def finish(self):
+ self.sftp_server.finish_subsystem()
+
+
+class TestingSFTPServer(test_server.TestingThreadingTCPServer):
+
+ def __init__(self, server_address, request_handler_class, test_case_server):
+ test_server.TestingThreadingTCPServer.__init__(
+ self, server_address, request_handler_class)
+ self.test_case_server = test_case_server
+
+
+class SFTPServer(test_server.TestingTCPServerInAThread):
+ """Common code for SFTP server facilities."""
+
+ def __init__(self, server_interface=StubServer):
+ self.host = '127.0.0.1'
+ self.port = 0
+ super(SFTPServer, self).__init__((self.host, self.port),
+ TestingSFTPServer,
+ TestingSFTPConnectionHandler)
+ self._original_vendor = None
+ self._vendor = ssh.ParamikoVendor()
+ self._server_interface = server_interface
+ self._host_key = None
+ self.logs = []
+ self.add_latency = 0
+ self._homedir = None
+ self._server_homedir = None
+ self._root = None
+
+ def _get_sftp_url(self, path):
+ """Calculate an sftp url to this server for path."""
+ return "sftp://foo:bar@%s:%s/%s" % (self.host, self.port, path)
+
+ def log(self, message):
+ """StubServer uses this to log when a new server is created."""
+ self.logs.append(message)
+
+ def create_server(self):
+ server = self.server_class((self.host, self.port),
+ self.request_handler_class,
+ self)
+ return server
+
+ def get_host_key(self):
+ if self._host_key is None:
+ key_file = osutils.pathjoin(self._homedir, 'test_rsa.key')
+ f = open(key_file, 'w')
+ try:
+ f.write(STUB_SERVER_KEY)
+ finally:
+ f.close()
+ self._host_key = paramiko.RSAKey.from_private_key_file(key_file)
+ return self._host_key
+
+ def start_server(self, backing_server=None):
+ # XXX: TODO: make sftpserver back onto backing_server rather than local
+ # disk.
+ if not (backing_server is None or
+ isinstance(backing_server, test_server.LocalURLServer)):
+ raise AssertionError(
+ 'backing_server should not be %r, because this can only serve '
+ 'the local current working directory.' % (backing_server,))
+ self._original_vendor = ssh._ssh_vendor_manager._cached_ssh_vendor
+ ssh._ssh_vendor_manager._cached_ssh_vendor = self._vendor
+ if sys.platform == 'win32':
+ # Win32 needs to use the UNICODE api
+ self._homedir = os.getcwdu()
+ # Normalize the path or it will be wrongly escaped
+ self._homedir = osutils.normpath(self._homedir)
+ else:
+ # But unix SFTP servers should just deal in bytestreams
+ self._homedir = os.getcwd()
+ if self._server_homedir is None:
+ self._server_homedir = self._homedir
+ self._root = '/'
+ if sys.platform == 'win32':
+ self._root = ''
+ super(SFTPServer, self).start_server()
+
+ def stop_server(self):
+ try:
+ super(SFTPServer, self).stop_server()
+ finally:
+ ssh._ssh_vendor_manager._cached_ssh_vendor = self._original_vendor
+
+ def get_bogus_url(self):
+ """See bzrlib.transport.Server.get_bogus_url."""
+ # this is chosen to try to prevent trouble with proxies, weird dns, etc
+ # we bind a random socket, so that we get a guaranteed unused port
+ # we just never listen on that port
+ s = socket.socket()
+ s.bind(('localhost', 0))
+ return 'sftp://%s:%s/' % s.getsockname()
+
+
+class SFTPFullAbsoluteServer(SFTPServer):
+ """A test server for sftp transports, using absolute urls and ssh."""
+
+ def get_url(self):
+ """See bzrlib.transport.Server.get_url."""
+ homedir = self._homedir
+ if sys.platform != 'win32':
+ # Remove the initial '/' on all platforms but win32
+ homedir = homedir[1:]
+ return self._get_sftp_url(urlutils.escape(homedir))
+
+
+class SFTPServerWithoutSSH(SFTPServer):
+ """An SFTP server that uses a simple TCP socket pair rather than SSH."""
+
+ def __init__(self):
+ super(SFTPServerWithoutSSH, self).__init__()
+ self._vendor = ssh.LoopbackVendor()
+ self.request_handler_class = TestingSFTPWithoutSSHConnectionHandler
+
+ def get_host_key():
+ return None
+
+
+class SFTPAbsoluteServer(SFTPServerWithoutSSH):
+ """A test server for sftp transports, using absolute urls."""
+
+ def get_url(self):
+ """See bzrlib.transport.Server.get_url."""
+ homedir = self._homedir
+ if sys.platform != 'win32':
+ # Remove the initial '/' on all platforms but win32
+ homedir = homedir[1:]
+ return self._get_sftp_url(urlutils.escape(homedir))
+
+
+class SFTPHomeDirServer(SFTPServerWithoutSSH):
+ """A test server for sftp transports, using homedir relative urls."""
+
+ def get_url(self):
+ """See bzrlib.transport.Server.get_url."""
+ return self._get_sftp_url("%7E/")
+
+
+class SFTPSiblingAbsoluteServer(SFTPAbsoluteServer):
+ """A test server for sftp transports where only absolute paths will work.
+
+ It does this by serving from a deeply-nested directory that doesn't exist.
+ """
+
+ def create_server(self):
+ # FIXME: Can't we do that in a cleaner way ? -- vila 20100623
+ server = super(SFTPSiblingAbsoluteServer, self).create_server()
+ server._server_homedir = '/dev/noone/runs/tests/here'
+ return server
+
diff --git a/bzrlib/tests/test__annotator.py b/bzrlib/tests/test__annotator.py
new file mode 100644
index 0000000..c786a53
--- /dev/null
+++ b/bzrlib/tests/test__annotator.py
@@ -0,0 +1,375 @@
+# Copyright (C) 2009, 2010, 2011 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""Tests for Annotators."""
+
+from bzrlib import (
+ annotate,
+ errors,
+ knit,
+ revision,
+ tests,
+ )
+
+
+def load_tests(standard_tests, module, loader):
+ """Parameterize tests for all versions of groupcompress."""
+ suite, _ = tests.permute_tests_for_extension(standard_tests, loader,
+ 'bzrlib._annotator_py', 'bzrlib._annotator_pyx')
+ return suite
+
+
+class TestAnnotator(tests.TestCaseWithMemoryTransport):
+
+ module = None # Set by load_tests
+
+ fa_key = ('f-id', 'a-id')
+ fb_key = ('f-id', 'b-id')
+ fc_key = ('f-id', 'c-id')
+ fd_key = ('f-id', 'd-id')
+ fe_key = ('f-id', 'e-id')
+ ff_key = ('f-id', 'f-id')
+
+ def make_no_graph_texts(self):
+ factory = knit.make_pack_factory(False, False, 2)
+ self.vf = factory(self.get_transport())
+ self.ann = self.module.Annotator(self.vf)
+ self.vf.add_lines(self.fa_key, (), ['simple\n', 'content\n'])
+ self.vf.add_lines(self.fb_key, (), ['simple\n', 'new content\n'])
+
+ def make_simple_text(self):
+ # TODO: all we really need is a VersionedFile instance, we'd like to
+ # avoid creating all the intermediate stuff
+ factory = knit.make_pack_factory(True, True, 2)
+ self.vf = factory(self.get_transport())
+ # This assumes nothing special happens during __init__, which may be
+ # valid
+ self.ann = self.module.Annotator(self.vf)
+ # A 'simple|content|'
+ # |
+ # B 'simple|new content|'
+ self.vf.add_lines(self.fa_key, [], ['simple\n', 'content\n'])
+ self.vf.add_lines(self.fb_key, [self.fa_key],
+ ['simple\n', 'new content\n'])
+
+ def make_merge_text(self):
+ self.make_simple_text()
+ # A 'simple|content|'
+ # |\
+ # B | 'simple|new content|'
+ # | |
+ # | C 'simple|from c|content|'
+ # |/
+ # D 'simple|from c|new content|introduced in merge|'
+ self.vf.add_lines(self.fc_key, [self.fa_key],
+ ['simple\n', 'from c\n', 'content\n'])
+ self.vf.add_lines(self.fd_key, [self.fb_key, self.fc_key],
+ ['simple\n', 'from c\n', 'new content\n',
+ 'introduced in merge\n'])
+
+ def make_common_merge_text(self):
+ """Both sides of the merge will have introduced a line."""
+ self.make_simple_text()
+ # A 'simple|content|'
+ # |\
+ # B | 'simple|new content|'
+ # | |
+ # | C 'simple|new content|'
+ # |/
+ # D 'simple|new content|'
+ self.vf.add_lines(self.fc_key, [self.fa_key],
+ ['simple\n', 'new content\n'])
+ self.vf.add_lines(self.fd_key, [self.fb_key, self.fc_key],
+ ['simple\n', 'new content\n'])
+
+ def make_many_way_common_merge_text(self):
+ self.make_simple_text()
+ # A-. 'simple|content|'
+ # |\ \
+ # B | | 'simple|new content|'
+ # | | |
+ # | C | 'simple|new content|'
+ # |/ |
+ # D | 'simple|new content|'
+ # | |
+ # | E 'simple|new content|'
+ # | /
+ # F-' 'simple|new content|'
+ self.vf.add_lines(self.fc_key, [self.fa_key],
+ ['simple\n', 'new content\n'])
+ self.vf.add_lines(self.fd_key, [self.fb_key, self.fc_key],
+ ['simple\n', 'new content\n'])
+ self.vf.add_lines(self.fe_key, [self.fa_key],
+ ['simple\n', 'new content\n'])
+ self.vf.add_lines(self.ff_key, [self.fd_key, self.fe_key],
+ ['simple\n', 'new content\n'])
+
+ def make_merge_and_restored_text(self):
+ self.make_simple_text()
+ # A 'simple|content|'
+ # |\
+ # B | 'simple|new content|'
+ # | |
+ # C | 'simple|content|' # reverted to A
+ # \|
+ # D 'simple|content|'
+ # c reverts back to 'a' for the new content line
+ self.vf.add_lines(self.fc_key, [self.fb_key],
+ ['simple\n', 'content\n'])
+ # d merges 'a' and 'c', to find both claim last modified
+ self.vf.add_lines(self.fd_key, [self.fa_key, self.fc_key],
+ ['simple\n', 'content\n'])
+
+ def assertAnnotateEqual(self, expected_annotation, key, exp_text=None):
+ annotation, lines = self.ann.annotate(key)
+ self.assertEqual(expected_annotation, annotation)
+ if exp_text is None:
+ record = self.vf.get_record_stream([key], 'unordered', True).next()
+ exp_text = record.get_bytes_as('fulltext')
+ self.assertEqualDiff(exp_text, ''.join(lines))
+
+ def test_annotate_missing(self):
+ self.make_simple_text()
+ self.assertRaises(errors.RevisionNotPresent,
+ self.ann.annotate, ('not', 'present'))
+
+ def test_annotate_simple(self):
+ self.make_simple_text()
+ self.assertAnnotateEqual([(self.fa_key,)]*2, self.fa_key)
+ self.assertAnnotateEqual([(self.fa_key,), (self.fb_key,)], self.fb_key)
+
+ def test_annotate_merge_text(self):
+ self.make_merge_text()
+ self.assertAnnotateEqual([(self.fa_key,), (self.fc_key,),
+ (self.fb_key,), (self.fd_key,)],
+ self.fd_key)
+
+ def test_annotate_common_merge_text(self):
+ self.make_common_merge_text()
+ self.assertAnnotateEqual([(self.fa_key,), (self.fb_key, self.fc_key)],
+ self.fd_key)
+
+ def test_annotate_many_way_common_merge_text(self):
+ self.make_many_way_common_merge_text()
+ self.assertAnnotateEqual([(self.fa_key,),
+ (self.fb_key, self.fc_key, self.fe_key)],
+ self.ff_key)
+
+ def test_annotate_merge_and_restored(self):
+ self.make_merge_and_restored_text()
+ self.assertAnnotateEqual([(self.fa_key,), (self.fa_key, self.fc_key)],
+ self.fd_key)
+
+ def test_annotate_flat_simple(self):
+ self.make_simple_text()
+ self.assertEqual([(self.fa_key, 'simple\n'),
+ (self.fa_key, 'content\n'),
+ ], self.ann.annotate_flat(self.fa_key))
+ self.assertEqual([(self.fa_key, 'simple\n'),
+ (self.fb_key, 'new content\n'),
+ ], self.ann.annotate_flat(self.fb_key))
+
+ def test_annotate_flat_merge_and_restored_text(self):
+ self.make_merge_and_restored_text()
+ # fc is a simple dominator of fa
+ self.assertEqual([(self.fa_key, 'simple\n'),
+ (self.fc_key, 'content\n'),
+ ], self.ann.annotate_flat(self.fd_key))
+
+ def test_annotate_common_merge_text(self):
+ self.make_common_merge_text()
+ # there is no common point, so we just pick the lexicographical lowest
+ # and 'b-id' comes before 'c-id'
+ self.assertEqual([(self.fa_key, 'simple\n'),
+ (self.fb_key, 'new content\n'),
+ ], self.ann.annotate_flat(self.fd_key))
+
+ def test_annotate_many_way_common_merge_text(self):
+ self.make_many_way_common_merge_text()
+ self.assertEqual([(self.fa_key, 'simple\n'),
+ (self.fb_key, 'new content\n')],
+ self.ann.annotate_flat(self.ff_key))
+
+ def test_annotate_flat_respects_break_ann_tie(self):
+ tiebreaker = annotate._break_annotation_tie
+ try:
+ calls = []
+ def custom_tiebreaker(annotated_lines):
+ self.assertEqual(2, len(annotated_lines))
+ left = annotated_lines[0]
+ self.assertEqual(2, len(left))
+ self.assertEqual('new content\n', left[1])
+ right = annotated_lines[1]
+ self.assertEqual(2, len(right))
+ self.assertEqual('new content\n', right[1])
+ calls.append((left[0], right[0]))
+ # Our custom tiebreaker takes the *largest* value, rather than
+ # the *smallest* value
+ if left[0] < right[0]:
+ return right
+ else:
+ return left
+ annotate._break_annotation_tie = custom_tiebreaker
+ self.make_many_way_common_merge_text()
+ self.assertEqual([(self.fa_key, 'simple\n'),
+ (self.fe_key, 'new content\n')],
+ self.ann.annotate_flat(self.ff_key))
+ self.assertEqual([(self.fe_key, self.fc_key),
+ (self.fe_key, self.fb_key)], calls)
+ finally:
+ annotate._break_annotation_tie = tiebreaker
+
+
+ def test_needed_keys_simple(self):
+ self.make_simple_text()
+ keys, ann_keys = self.ann._get_needed_keys(self.fb_key)
+ self.assertEqual([self.fa_key, self.fb_key], sorted(keys))
+ self.assertEqual({self.fa_key: 1, self.fb_key: 1},
+ self.ann._num_needed_children)
+ self.assertEqual(set(), ann_keys)
+
+ def test_needed_keys_many(self):
+ self.make_many_way_common_merge_text()
+ keys, ann_keys = self.ann._get_needed_keys(self.ff_key)
+ self.assertEqual([self.fa_key, self.fb_key, self.fc_key,
+ self.fd_key, self.fe_key, self.ff_key,
+ ], sorted(keys))
+ self.assertEqual({self.fa_key: 3,
+ self.fb_key: 1,
+ self.fc_key: 1,
+ self.fd_key: 1,
+ self.fe_key: 1,
+ self.ff_key: 1,
+ }, self.ann._num_needed_children)
+ self.assertEqual(set(), ann_keys)
+
+ def test_needed_keys_with_special_text(self):
+ self.make_many_way_common_merge_text()
+ spec_key = ('f-id', revision.CURRENT_REVISION)
+ spec_text = 'simple\nnew content\nlocally modified\n'
+ self.ann.add_special_text(spec_key, [self.fd_key, self.fe_key],
+ spec_text)
+ keys, ann_keys = self.ann._get_needed_keys(spec_key)
+ self.assertEqual([self.fa_key, self.fb_key, self.fc_key,
+ self.fd_key, self.fe_key,
+ ], sorted(keys))
+ self.assertEqual([spec_key], sorted(ann_keys))
+
+ def test_needed_keys_with_parent_texts(self):
+ self.make_many_way_common_merge_text()
+ # If 'D' and 'E' are already annotated, we don't need to extract all
+ # the texts
+ # D | 'simple|new content|'
+ # | |
+ # | E 'simple|new content|'
+ # | /
+ # F-' 'simple|new content|'
+ self.ann._parent_map[self.fd_key] = (self.fb_key, self.fc_key)
+ self.ann._text_cache[self.fd_key] = ['simple\n', 'new content\n']
+ self.ann._annotations_cache[self.fd_key] = [
+ (self.fa_key,),
+ (self.fb_key, self.fc_key),
+ ]
+ self.ann._parent_map[self.fe_key] = (self.fa_key,)
+ self.ann._text_cache[self.fe_key] = ['simple\n', 'new content\n']
+ self.ann._annotations_cache[self.fe_key] = [
+ (self.fa_key,),
+ (self.fe_key,),
+ ]
+ keys, ann_keys = self.ann._get_needed_keys(self.ff_key)
+ self.assertEqual([self.ff_key], sorted(keys))
+ self.assertEqual({self.fd_key: 1,
+ self.fe_key: 1,
+ self.ff_key: 1,
+ }, self.ann._num_needed_children)
+ self.assertEqual([], sorted(ann_keys))
+
+ def test_record_annotation_removes_texts(self):
+ self.make_many_way_common_merge_text()
+ # Populate the caches
+ for x in self.ann._get_needed_texts(self.ff_key):
+ continue
+ self.assertEqual({self.fa_key: 3,
+ self.fb_key: 1,
+ self.fc_key: 1,
+ self.fd_key: 1,
+ self.fe_key: 1,
+ self.ff_key: 1,
+ }, self.ann._num_needed_children)
+ self.assertEqual([self.fa_key, self.fb_key, self.fc_key,
+ self.fd_key, self.fe_key, self.ff_key,
+ ], sorted(self.ann._text_cache.keys()))
+ self.ann._record_annotation(self.fa_key, [], [])
+ self.ann._record_annotation(self.fb_key, [self.fa_key], [])
+ self.assertEqual({self.fa_key: 2,
+ self.fb_key: 1,
+ self.fc_key: 1,
+ self.fd_key: 1,
+ self.fe_key: 1,
+ self.ff_key: 1,
+ }, self.ann._num_needed_children)
+ self.assertTrue(self.fa_key in self.ann._text_cache)
+ self.assertTrue(self.fa_key in self.ann._annotations_cache)
+ self.ann._record_annotation(self.fc_key, [self.fa_key], [])
+ self.ann._record_annotation(self.fd_key, [self.fb_key, self.fc_key], [])
+ self.assertEqual({self.fa_key: 1,
+ self.fb_key: 0,
+ self.fc_key: 0,
+ self.fd_key: 1,
+ self.fe_key: 1,
+ self.ff_key: 1,
+ }, self.ann._num_needed_children)
+ self.assertTrue(self.fa_key in self.ann._text_cache)
+ self.assertTrue(self.fa_key in self.ann._annotations_cache)
+ self.assertFalse(self.fb_key in self.ann._text_cache)
+ self.assertFalse(self.fb_key in self.ann._annotations_cache)
+ self.assertFalse(self.fc_key in self.ann._text_cache)
+ self.assertFalse(self.fc_key in self.ann._annotations_cache)
+
+ def test_annotate_special_text(self):
+ # Things like WT and PreviewTree want to annotate an arbitrary text
+ # ('current:') so we need a way to add that to the group of files to be
+ # annotated.
+ self.make_many_way_common_merge_text()
+ # A-. 'simple|content|'
+ # |\ \
+ # B | | 'simple|new content|'
+ # | | |
+ # | C | 'simple|new content|'
+ # |/ |
+ # D | 'simple|new content|'
+ # | |
+ # | E 'simple|new content|'
+ # | /
+ # SPEC 'simple|new content|locally modified|'
+ spec_key = ('f-id', revision.CURRENT_REVISION)
+ spec_text = 'simple\nnew content\nlocally modified\n'
+ self.ann.add_special_text(spec_key, [self.fd_key, self.fe_key],
+ spec_text)
+ self.assertAnnotateEqual([(self.fa_key,),
+ (self.fb_key, self.fc_key, self.fe_key),
+ (spec_key,),
+ ], spec_key,
+ exp_text=spec_text)
+
+ def test_no_graph(self):
+ self.make_no_graph_texts()
+ self.assertAnnotateEqual([(self.fa_key,),
+ (self.fa_key,),
+ ], self.fa_key)
+ self.assertAnnotateEqual([(self.fb_key,),
+ (self.fb_key,),
+ ], self.fb_key)
diff --git a/bzrlib/tests/test__bencode.py b/bzrlib/tests/test__bencode.py
new file mode 100644
index 0000000..87cc684
--- /dev/null
+++ b/bzrlib/tests/test__bencode.py
@@ -0,0 +1,217 @@
+# Copyright (C) 2007, 2009, 2010 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""Tests for bencode structured encoding"""
+
+import sys
+
+from bzrlib import tests
+
+def load_tests(standard_tests, module, loader):
+ suite, _ = tests.permute_tests_for_extension(standard_tests, loader,
+ 'bzrlib.util._bencode_py', 'bzrlib._bencode_pyx')
+ return suite
+
+
+class TestBencodeDecode(tests.TestCase):
+
+ module = None
+
+ def _check(self, expected, source):
+ self.assertEquals(expected, self.module.bdecode(source))
+
+ def _run_check_error(self, exc, bad):
+ """Check that bdecoding a string raises a particular exception."""
+ self.assertRaises(exc, self.module.bdecode, bad)
+
+ def test_int(self):
+ self._check(0, 'i0e')
+ self._check(4, 'i4e')
+ self._check(123456789, 'i123456789e')
+ self._check(-10, 'i-10e')
+ self._check(int('1' * 1000), 'i' + ('1' * 1000) + 'e')
+
+ def test_long(self):
+ self._check(12345678901234567890L, 'i12345678901234567890e')
+ self._check(-12345678901234567890L, 'i-12345678901234567890e')
+
+ def test_malformed_int(self):
+ self._run_check_error(ValueError, 'ie')
+ self._run_check_error(ValueError, 'i-e')
+ self._run_check_error(ValueError, 'i-010e')
+ self._run_check_error(ValueError, 'i-0e')
+ self._run_check_error(ValueError, 'i00e')
+ self._run_check_error(ValueError, 'i01e')
+ self._run_check_error(ValueError, 'i-03e')
+ self._run_check_error(ValueError, 'i')
+ self._run_check_error(ValueError, 'i123')
+ self._run_check_error(ValueError, 'i341foo382e')
+
+ def test_string(self):
+ self._check('', '0:')
+ self._check('abc', '3:abc')
+ self._check('1234567890', '10:1234567890')
+
+ def test_large_string(self):
+ self.assertRaises(ValueError, self.module.bdecode, "2147483639:foo")
+
+ def test_malformed_string(self):
+ self._run_check_error(ValueError, '10:x')
+ self._run_check_error(ValueError, '10:')
+ self._run_check_error(ValueError, '10')
+ self._run_check_error(ValueError, '01:x')
+ self._run_check_error(ValueError, '00:')
+ self._run_check_error(ValueError, '35208734823ljdahflajhdf')
+ self._run_check_error(ValueError, '432432432432432:foo')
+ self._run_check_error(ValueError, ' 1:x') # leading whitespace
+ self._run_check_error(ValueError, '-1:x') # negative
+ self._run_check_error(ValueError, '1 x') # space vs colon
+ self._run_check_error(ValueError, '1x') # missing colon
+ self._run_check_error(ValueError, ('1' * 1000) + ':')
+
+ def test_list(self):
+ self._check([], 'le')
+ self._check(['', '', ''], 'l0:0:0:e')
+ self._check([1, 2, 3], 'li1ei2ei3ee')
+ self._check(['asd', 'xy'], 'l3:asd2:xye')
+ self._check([['Alice', 'Bob'], [2, 3]], 'll5:Alice3:Bobeli2ei3eee')
+
+ def test_list_deepnested(self):
+ self._run_check_error(RuntimeError, ("l" * 10000) + ("e" * 10000))
+
+ def test_malformed_list(self):
+ self._run_check_error(ValueError, 'l')
+ self._run_check_error(ValueError, 'l01:ae')
+ self._run_check_error(ValueError, 'l0:')
+ self._run_check_error(ValueError, 'li1e')
+ self._run_check_error(ValueError, 'l-3:e')
+
+ def test_dict(self):
+ self._check({}, 'de')
+ self._check({'':3}, 'd0:i3ee')
+ self._check({'age': 25, 'eyes': 'blue'}, 'd3:agei25e4:eyes4:bluee')
+ self._check({'spam.mp3': {'author': 'Alice', 'length': 100000}},
+ 'd8:spam.mp3d6:author5:Alice6:lengthi100000eee')
+
+ def test_dict_deepnested(self):
+ # The recursion here provokes CPython into emitting a warning on
+ # stderr, "maximum recursion depth exceeded in __subclasscheck__", due
+ # to running out of stack space while evaluating "except (...):" in
+ # _bencode_py. This is harmless, so we temporarily override stderr to
+ # avoid distracting noise in the test output.
+ self.overrideAttr(sys, 'stderr', self._log_file)
+ self._run_check_error(
+ RuntimeError, ("d0:" * 10000) + 'i1e' + ("e" * 10000))
+
+ def test_malformed_dict(self):
+ self._run_check_error(ValueError, 'd')
+ self._run_check_error(ValueError, 'defoobar')
+ self._run_check_error(ValueError, 'd3:fooe')
+ self._run_check_error(ValueError, 'di1e0:e')
+ self._run_check_error(ValueError, 'd1:b0:1:a0:e')
+ self._run_check_error(ValueError, 'd1:a0:1:a0:e')
+ self._run_check_error(ValueError, 'd0:0:')
+ self._run_check_error(ValueError, 'd0:')
+ self._run_check_error(ValueError, 'd432432432432432432:e')
+
+ def test_empty_string(self):
+ self.assertRaises(ValueError, self.module.bdecode, '')
+
+ def test_junk(self):
+ self._run_check_error(ValueError, 'i6easd')
+ self._run_check_error(ValueError, '2:abfdjslhfld')
+ self._run_check_error(ValueError, '0:0:')
+ self._run_check_error(ValueError, 'leanfdldjfh')
+
+ def test_unknown_object(self):
+ self.assertRaises(ValueError, self.module.bdecode, 'relwjhrlewjh')
+
+ def test_unsupported_type(self):
+ self._run_check_error(TypeError, float(1.5))
+ self._run_check_error(TypeError, None)
+ self._run_check_error(TypeError, lambda x: x)
+ self._run_check_error(TypeError, object)
+ self._run_check_error(TypeError, u"ie")
+
+ def test_decoder_type_error(self):
+ self.assertRaises(TypeError, self.module.bdecode, 1)
+
+
+class TestBencodeEncode(tests.TestCase):
+
+ module = None
+
+ def _check(self, expected, source):
+ self.assertEquals(expected, self.module.bencode(source))
+
+ def test_int(self):
+ self._check('i4e', 4)
+ self._check('i0e', 0)
+ self._check('i-10e', -10)
+
+ def test_long(self):
+ self._check('i12345678901234567890e', 12345678901234567890L)
+ self._check('i-12345678901234567890e', -12345678901234567890L)
+
+ def test_string(self):
+ self._check('0:', '')
+ self._check('3:abc', 'abc')
+ self._check('10:1234567890', '1234567890')
+
+ def test_list(self):
+ self._check('le', [])
+ self._check('li1ei2ei3ee', [1, 2, 3])
+ self._check('ll5:Alice3:Bobeli2ei3eee', [['Alice', 'Bob'], [2, 3]])
+
+ def test_list_as_tuple(self):
+ self._check('le', ())
+ self._check('li1ei2ei3ee', (1, 2, 3))
+ self._check('ll5:Alice3:Bobeli2ei3eee', (('Alice', 'Bob'), (2, 3)))
+
+ def test_list_deep_nested(self):
+ top = []
+ l = top
+ for i in range(10000):
+ l.append([])
+ l = l[0]
+ self.assertRaises(RuntimeError, self.module.bencode,
+ top)
+
+ def test_dict(self):
+ self._check('de', {})
+ self._check('d3:agei25e4:eyes4:bluee', {'age': 25, 'eyes': 'blue'})
+ self._check('d8:spam.mp3d6:author5:Alice6:lengthi100000eee',
+ {'spam.mp3': {'author': 'Alice',
+ 'length': 100000}})
+
+ def test_dict_deep_nested(self):
+ d = top = {}
+ for i in range(10000):
+ d[''] = {}
+ d = d['']
+ self.assertRaises(RuntimeError, self.module.bencode,
+ top)
+
+ def test_bencached(self):
+ self._check('i3e', self.module.Bencached(self.module.bencode(3)))
+
+ def test_invalid_dict(self):
+ self.assertRaises(TypeError, self.module.bencode, {1:"foo"})
+
+ def test_bool(self):
+ self._check('i1e', True)
+ self._check('i0e', False)
+
diff --git a/bzrlib/tests/test__btree_serializer.py b/bzrlib/tests/test__btree_serializer.py
new file mode 100644
index 0000000..e6bf608
--- /dev/null
+++ b/bzrlib/tests/test__btree_serializer.py
@@ -0,0 +1,305 @@
+# Copyright (C) 2010 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+#
+
+"""Direct tests of the btree serializer extension"""
+
+import binascii
+import bisect
+
+from bzrlib import tests
+
+from bzrlib.tests.test_btree_index import compiled_btreeparser_feature
+
+
+class TestBtreeSerializer(tests.TestCase):
+
+ _test_needs_features = [compiled_btreeparser_feature]
+
+ def setUp(self):
+ super(TestBtreeSerializer, self).setUp()
+ self.module = compiled_btreeparser_feature.module
+
+
+class TestHexAndUnhex(TestBtreeSerializer):
+
+ def assertHexlify(self, as_binary):
+ self.assertEqual(binascii.hexlify(as_binary),
+ self.module._py_hexlify(as_binary))
+
+ def assertUnhexlify(self, as_hex):
+ ba_unhex = binascii.unhexlify(as_hex)
+ mod_unhex = self.module._py_unhexlify(as_hex)
+ if ba_unhex != mod_unhex:
+ if mod_unhex is None:
+ mod_hex = '<None>'
+ else:
+ mod_hex = binascii.hexlify(mod_unhex)
+ self.fail('_py_unhexlify returned a different answer'
+ ' from binascii:\n %s\n != %s'
+ % (binascii.hexlify(ba_unhex), mod_hex))
+
+ def assertFailUnhexlify(self, as_hex):
+ # Invalid hex content
+ self.assertIs(None, self.module._py_unhexlify(as_hex))
+
+ def test_to_hex(self):
+ raw_bytes = ''.join(map(chr, range(256)))
+ for i in range(0, 240, 20):
+ self.assertHexlify(raw_bytes[i:i+20])
+ self.assertHexlify(raw_bytes[240:]+raw_bytes[0:4])
+
+ def test_from_hex(self):
+ self.assertUnhexlify('0123456789abcdef0123456789abcdef01234567')
+ self.assertUnhexlify('123456789abcdef0123456789abcdef012345678')
+ self.assertUnhexlify('0123456789ABCDEF0123456789ABCDEF01234567')
+ self.assertUnhexlify('123456789ABCDEF0123456789ABCDEF012345678')
+ hex_chars = binascii.hexlify(''.join(map(chr, range(256))))
+ for i in range(0, 480, 40):
+ self.assertUnhexlify(hex_chars[i:i+40])
+ self.assertUnhexlify(hex_chars[480:]+hex_chars[0:8])
+
+ def test_from_invalid_hex(self):
+ self.assertFailUnhexlify('123456789012345678901234567890123456789X')
+ self.assertFailUnhexlify('12345678901234567890123456789012345678X9')
+
+
+_hex_form = '123456789012345678901234567890abcdefabcd'
+
+class Test_KeyToSha1(TestBtreeSerializer):
+
+ def assertKeyToSha1(self, expected, key):
+ if expected is None:
+ expected_bin = None
+ else:
+ expected_bin = binascii.unhexlify(expected)
+ actual_sha1 = self.module._py_key_to_sha1(key)
+ if expected_bin != actual_sha1:
+ actual_hex_sha1 = None
+ if actual_sha1 is not None:
+ actual_hex_sha1 = binascii.hexlify(actual_sha1)
+ self.fail('_key_to_sha1 returned:\n %s\n != %s'
+ % (actual_sha1, expected))
+
+ def test_simple(self):
+ self.assertKeyToSha1(_hex_form, ('sha1:' + _hex_form,))
+
+ def test_invalid_not_tuple(self):
+ self.assertKeyToSha1(None, _hex_form)
+ self.assertKeyToSha1(None, 'sha1:' + _hex_form)
+
+ def test_invalid_empty(self):
+ self.assertKeyToSha1(None, ())
+
+ def test_invalid_not_string(self):
+ self.assertKeyToSha1(None, (None,))
+ self.assertKeyToSha1(None, (list(_hex_form),))
+
+ def test_invalid_not_sha1(self):
+ self.assertKeyToSha1(None, (_hex_form,))
+ self.assertKeyToSha1(None, ('sha2:' + _hex_form,))
+
+ def test_invalid_not_hex(self):
+ self.assertKeyToSha1(None,
+ ('sha1:abcdefghijklmnopqrstuvwxyz12345678901234',))
+
+
+class Test_Sha1ToKey(TestBtreeSerializer):
+
+ def assertSha1ToKey(self, hex_sha1):
+ bin_sha1 = binascii.unhexlify(hex_sha1)
+ key = self.module._py_sha1_to_key(bin_sha1)
+ self.assertEqual(('sha1:' + hex_sha1,), key)
+
+ def test_simple(self):
+ self.assertSha1ToKey(_hex_form)
+
+
+_one_key_content = """type=leaf
+sha1:123456789012345678901234567890abcdefabcd\x00\x001 2 3 4
+"""
+
+_large_offsets = """type=leaf
+sha1:123456789012345678901234567890abcdefabcd\x00\x0012345678901 1234567890 0 1
+sha1:abcd123456789012345678901234567890abcdef\x00\x002147483648 2147483647 0 1
+sha1:abcdefabcd123456789012345678901234567890\x00\x004294967296 4294967295 4294967294 1
+"""
+
+_multi_key_content = """type=leaf
+sha1:c80c881d4a26984ddce795f6f71817c9cf4480e7\x00\x000 0 0 0
+sha1:c86f7e437faa5a7fce15d1ddcb9eaeaea377667b\x00\x001 1 1 1
+sha1:c8e240de74fb1ed08fa08d38063f6a6a91462a81\x00\x002 2 2 2
+sha1:cda39a3ee5e6b4b0d3255bfef95601890afd8070\x00\x003 3 3 3
+sha1:cdf51e37c269aa94d38f93e537bf6e2020b21406\x00\x004 4 4 4
+sha1:ce0c9035898dd52fc65c41454cec9c4d2611bfb3\x00\x005 5 5 5
+sha1:ce93b4e3c464ffd51732fbd6ded717e9efda28aa\x00\x006 6 6 6
+sha1:cf7a9e24777ec23212c54d7a350bc5bea5477fdb\x00\x007 7 7 7
+"""
+
+_multi_key_same_offset = """type=leaf
+sha1:080c881d4a26984ddce795f6f71817c9cf4480e7\x00\x000 0 0 0
+sha1:c86f7e437faa5a7fce15d1ddcb9eaeaea377667b\x00\x001 1 1 1
+sha1:cd0c9035898dd52fc65c41454cec9c4d2611bfb3\x00\x002 2 2 2
+sha1:cda39a3ee5e6b4b0d3255bfef95601890afd8070\x00\x003 3 3 3
+sha1:cde240de74fb1ed08fa08d38063f6a6a91462a81\x00\x004 4 4 4
+sha1:cdf51e37c269aa94d38f93e537bf6e2020b21406\x00\x005 5 5 5
+sha1:ce7a9e24777ec23212c54d7a350bc5bea5477fdb\x00\x006 6 6 6
+sha1:ce93b4e3c464ffd51732fbd6ded717e9efda28aa\x00\x007 7 7 7
+"""
+
+_common_32_bits = """type=leaf
+sha1:123456784a26984ddce795f6f71817c9cf4480e7\x00\x000 0 0 0
+sha1:1234567874fb1ed08fa08d38063f6a6a91462a81\x00\x001 1 1 1
+sha1:12345678777ec23212c54d7a350bc5bea5477fdb\x00\x002 2 2 2
+sha1:123456787faa5a7fce15d1ddcb9eaeaea377667b\x00\x003 3 3 3
+sha1:12345678898dd52fc65c41454cec9c4d2611bfb3\x00\x004 4 4 4
+sha1:12345678c269aa94d38f93e537bf6e2020b21406\x00\x005 5 5 5
+sha1:12345678c464ffd51732fbd6ded717e9efda28aa\x00\x006 6 6 6
+sha1:12345678e5e6b4b0d3255bfef95601890afd8070\x00\x007 7 7 7
+"""
+
+
+class TestGCCKHSHA1LeafNode(TestBtreeSerializer):
+
+ def assertInvalid(self, bytes):
+ """Ensure that we get a proper error when trying to parse invalid bytes.
+
+ (mostly this is testing that bad input doesn't cause us to segfault)
+ """
+ self.assertRaises((ValueError, TypeError),
+ self.module._parse_into_chk, bytes, 1, 0)
+
+ def test_non_str(self):
+ self.assertInvalid(u'type=leaf\n')
+
+ def test_not_leaf(self):
+ self.assertInvalid('type=internal\n')
+
+ def test_empty_leaf(self):
+ leaf = self.module._parse_into_chk('type=leaf\n', 1, 0)
+ self.assertEqual(0, len(leaf))
+ self.assertEqual([], leaf.all_items())
+ self.assertEqual([], leaf.all_keys())
+ # It should allow any key to be queried
+ self.assertFalse(('key',) in leaf)
+
+ def test_one_key_leaf(self):
+ leaf = self.module._parse_into_chk(_one_key_content, 1, 0)
+ self.assertEqual(1, len(leaf))
+ sha_key = ('sha1:' + _hex_form,)
+ self.assertEqual([sha_key], leaf.all_keys())
+ self.assertEqual([(sha_key, ('1 2 3 4', ()))], leaf.all_items())
+ self.assertTrue(sha_key in leaf)
+
+ def test_large_offsets(self):
+ leaf = self.module._parse_into_chk(_large_offsets, 1, 0)
+ self.assertEqual(['12345678901 1234567890 0 1',
+ '2147483648 2147483647 0 1',
+ '4294967296 4294967295 4294967294 1',
+ ], [x[1][0] for x in leaf.all_items()])
+
+ def test_many_key_leaf(self):
+ leaf = self.module._parse_into_chk(_multi_key_content, 1, 0)
+ self.assertEqual(8, len(leaf))
+ all_keys = leaf.all_keys()
+ self.assertEqual(8, len(leaf.all_keys()))
+ for idx, key in enumerate(all_keys):
+ self.assertEqual(str(idx), leaf[key][0].split()[0])
+
+ def test_common_shift(self):
+ # The keys were deliberately chosen so that the first 5 bits all
+ # overlapped, it also happens that a later bit overlaps
+ # Note that by 'overlap' we mean that given bit is either on in all
+ # keys, or off in all keys
+ leaf = self.module._parse_into_chk(_multi_key_content, 1, 0)
+ self.assertEqual(19, leaf.common_shift)
+ # The interesting byte for each key is
+ # (defined as the 8-bits that come after the common prefix)
+ lst = [1, 13, 28, 180, 190, 193, 210, 239]
+ offsets = leaf._get_offsets()
+ self.assertEqual([bisect.bisect_left(lst, x) for x in range(0, 257)],
+ offsets)
+ for idx, val in enumerate(lst):
+ self.assertEqual(idx, offsets[val])
+ for idx, key in enumerate(leaf.all_keys()):
+ self.assertEqual(str(idx), leaf[key][0].split()[0])
+
+ def test_multi_key_same_offset(self):
+ # there is no common prefix, though there are some common bits
+ leaf = self.module._parse_into_chk(_multi_key_same_offset, 1, 0)
+ self.assertEqual(24, leaf.common_shift)
+ offsets = leaf._get_offsets()
+ # The interesting byte is just the first 8-bits of the key
+ lst = [8, 200, 205, 205, 205, 205, 206, 206]
+ self.assertEqual([bisect.bisect_left(lst, x) for x in range(0, 257)],
+ offsets)
+ for val in lst:
+ self.assertEqual(lst.index(val), offsets[val])
+ for idx, key in enumerate(leaf.all_keys()):
+ self.assertEqual(str(idx), leaf[key][0].split()[0])
+
+ def test_all_common_prefix(self):
+ # The first 32 bits of all hashes are the same. This is going to be
+ # pretty much impossible, but I don't want to fail because of this
+ leaf = self.module._parse_into_chk(_common_32_bits, 1, 0)
+ self.assertEqual(0, leaf.common_shift)
+ lst = [0x78] * 8
+ offsets = leaf._get_offsets()
+ self.assertEqual([bisect.bisect_left(lst, x) for x in range(0, 257)],
+ offsets)
+ for val in lst:
+ self.assertEqual(lst.index(val), offsets[val])
+ for idx, key in enumerate(leaf.all_keys()):
+ self.assertEqual(str(idx), leaf[key][0].split()[0])
+
+ def test_many_entries(self):
+ # Again, this is almost impossible, but we should still work
+ # It would be hard to fit more that 120 entries in a 4k page, much less
+ # more than 256 of them. but hey, weird stuff happens sometimes
+ lines = ['type=leaf\n']
+ for i in range(500):
+ key_str = 'sha1:%04x%s' % (i, _hex_form[:36])
+ key = (key_str,)
+ lines.append('%s\0\0%d %d %d %d\n' % (key_str, i, i, i, i))
+ bytes = ''.join(lines)
+ leaf = self.module._parse_into_chk(bytes, 1, 0)
+ self.assertEqual(24-7, leaf.common_shift)
+ offsets = leaf._get_offsets()
+ # This is the interesting bits for each entry
+ lst = [x // 2 for x in range(500)]
+ expected_offsets = [x * 2 for x in range(128)] + [255]*129
+ self.assertEqual(expected_offsets, offsets)
+ # We truncate because offsets is an unsigned char. So the bisection
+ # will just say 'greater than the last one' for all the rest
+ lst = lst[:255]
+ self.assertEqual([bisect.bisect_left(lst, x) for x in range(0, 257)],
+ offsets)
+ for val in lst:
+ self.assertEqual(lst.index(val), offsets[val])
+ for idx, key in enumerate(leaf.all_keys()):
+ self.assertEqual(str(idx), leaf[key][0].split()[0])
+
+ def test__sizeof__(self):
+ # We can't use the exact numbers because of platform variations, etc.
+ # But what we really care about is that it does get bigger with more
+ # content.
+ leaf0 = self.module._parse_into_chk('type=leaf\n', 1, 0)
+ leaf1 = self.module._parse_into_chk(_one_key_content, 1, 0)
+ leafN = self.module._parse_into_chk(_multi_key_content, 1, 0)
+ sizeof_1 = leaf1.__sizeof__() - leaf0.__sizeof__()
+ self.assertTrue(sizeof_1 > 0)
+ sizeof_N = leafN.__sizeof__() - leaf0.__sizeof__()
+ self.assertEqual(sizeof_1 * len(leafN), sizeof_N)
diff --git a/bzrlib/tests/test__chk_map.py b/bzrlib/tests/test__chk_map.py
new file mode 100644
index 0000000..509bc88
--- /dev/null
+++ b/bzrlib/tests/test__chk_map.py
@@ -0,0 +1,279 @@
+# Copyright (C) 2009, 2010, 2011 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""Tests for _chk_map_*."""
+
+from bzrlib import (
+ chk_map,
+ tests,
+ )
+from bzrlib.static_tuple import StaticTuple
+stuple = StaticTuple
+
+
+def load_tests(standard_tests, module, loader):
+ suite, _ = tests.permute_tests_for_extension(standard_tests, loader,
+ 'bzrlib._chk_map_py', 'bzrlib._chk_map_pyx')
+ return suite
+
+
+class TestSearchKeys(tests.TestCase):
+
+ module = None # Filled in by test parameterization
+
+ def assertSearchKey16(self, expected, key):
+ self.assertEqual(expected, self.module._search_key_16(key))
+
+ def assertSearchKey255(self, expected, key):
+ actual = self.module._search_key_255(key)
+ self.assertEqual(expected, actual, 'actual: %r' % (actual,))
+
+ def test_simple_16(self):
+ self.assertSearchKey16('8C736521', stuple('foo',))
+ self.assertSearchKey16('8C736521\x008C736521', stuple('foo', 'foo'))
+ self.assertSearchKey16('8C736521\x0076FF8CAA', stuple('foo', 'bar'))
+ self.assertSearchKey16('ED82CD11', stuple('abcd',))
+
+ def test_simple_255(self):
+ self.assertSearchKey255('\x8cse!', stuple('foo',))
+ self.assertSearchKey255('\x8cse!\x00\x8cse!', stuple('foo', 'foo'))
+ self.assertSearchKey255('\x8cse!\x00v\xff\x8c\xaa', stuple('foo', 'bar'))
+ # The standard mapping for these would include '\n', so it should be
+ # mapped to '_'
+ self.assertSearchKey255('\xfdm\x93_\x00P_\x1bL', stuple('<', 'V'))
+
+ def test_255_does_not_include_newline(self):
+ # When mapping via _search_key_255, we should never have the '\n'
+ # character, but all other 255 values should be present
+ chars_used = set()
+ for char_in in range(256):
+ search_key = self.module._search_key_255(stuple(chr(char_in),))
+ chars_used.update(search_key)
+ all_chars = set([chr(x) for x in range(256)])
+ unused_chars = all_chars.symmetric_difference(chars_used)
+ self.assertEqual(set('\n'), unused_chars)
+
+
+class TestDeserialiseLeafNode(tests.TestCase):
+
+ module = None
+
+ def assertDeserialiseErrors(self, text):
+ self.assertRaises((ValueError, IndexError),
+ self.module._deserialise_leaf_node, text, 'not-a-real-sha')
+
+ def test_raises_on_non_leaf(self):
+ self.assertDeserialiseErrors('')
+ self.assertDeserialiseErrors('short\n')
+ self.assertDeserialiseErrors('chknotleaf:\n')
+ self.assertDeserialiseErrors('chkleaf:x\n')
+ self.assertDeserialiseErrors('chkleaf:\n')
+ self.assertDeserialiseErrors('chkleaf:\nnotint\n')
+ self.assertDeserialiseErrors('chkleaf:\n10\n')
+ self.assertDeserialiseErrors('chkleaf:\n10\n256\n')
+ self.assertDeserialiseErrors('chkleaf:\n10\n256\n10\n')
+
+ def test_deserialise_empty(self):
+ node = self.module._deserialise_leaf_node(
+ "chkleaf:\n10\n1\n0\n\n", stuple("sha1:1234",))
+ self.assertEqual(0, len(node))
+ self.assertEqual(10, node.maximum_size)
+ self.assertEqual(("sha1:1234",), node.key())
+ self.assertIsInstance(node.key(), StaticTuple)
+ self.assertIs(None, node._search_prefix)
+ self.assertIs(None, node._common_serialised_prefix)
+
+ def test_deserialise_items(self):
+ node = self.module._deserialise_leaf_node(
+ "chkleaf:\n0\n1\n2\n\nfoo bar\x001\nbaz\nquux\x001\nblarh\n",
+ ("sha1:1234",))
+ self.assertEqual(2, len(node))
+ self.assertEqual([(("foo bar",), "baz"), (("quux",), "blarh")],
+ sorted(node.iteritems(None)))
+
+ def test_deserialise_item_with_null_width_1(self):
+ node = self.module._deserialise_leaf_node(
+ "chkleaf:\n0\n1\n2\n\nfoo\x001\nbar\x00baz\nquux\x001\nblarh\n",
+ ("sha1:1234",))
+ self.assertEqual(2, len(node))
+ self.assertEqual([(("foo",), "bar\x00baz"), (("quux",), "blarh")],
+ sorted(node.iteritems(None)))
+
+ def test_deserialise_item_with_null_width_2(self):
+ node = self.module._deserialise_leaf_node(
+ "chkleaf:\n0\n2\n2\n\nfoo\x001\x001\nbar\x00baz\n"
+ "quux\x00\x001\nblarh\n",
+ ("sha1:1234",))
+ self.assertEqual(2, len(node))
+ self.assertEqual([(("foo", "1"), "bar\x00baz"), (("quux", ""), "blarh")],
+ sorted(node.iteritems(None)))
+
+ def test_iteritems_selected_one_of_two_items(self):
+ node = self.module._deserialise_leaf_node(
+ "chkleaf:\n0\n1\n2\n\nfoo bar\x001\nbaz\nquux\x001\nblarh\n",
+ ("sha1:1234",))
+ self.assertEqual(2, len(node))
+ self.assertEqual([(("quux",), "blarh")],
+ sorted(node.iteritems(None, [("quux",), ("qaz",)])))
+
+ def test_deserialise_item_with_common_prefix(self):
+ node = self.module._deserialise_leaf_node(
+ "chkleaf:\n0\n2\n2\nfoo\x00\n1\x001\nbar\x00baz\n2\x001\nblarh\n",
+ ("sha1:1234",))
+ self.assertEqual(2, len(node))
+ self.assertEqual([(("foo", "1"), "bar\x00baz"), (("foo", "2"), "blarh")],
+ sorted(node.iteritems(None)))
+ self.assertIs(chk_map._unknown, node._search_prefix)
+ self.assertEqual('foo\x00', node._common_serialised_prefix)
+
+ def test_deserialise_multi_line(self):
+ node = self.module._deserialise_leaf_node(
+ "chkleaf:\n0\n2\n2\nfoo\x00\n1\x002\nbar\nbaz\n2\x002\nblarh\n\n",
+ ("sha1:1234",))
+ self.assertEqual(2, len(node))
+ self.assertEqual([(("foo", "1"), "bar\nbaz"),
+ (("foo", "2"), "blarh\n"),
+ ], sorted(node.iteritems(None)))
+ self.assertIs(chk_map._unknown, node._search_prefix)
+ self.assertEqual('foo\x00', node._common_serialised_prefix)
+
+ def test_key_after_map(self):
+ node = self.module._deserialise_leaf_node(
+ "chkleaf:\n10\n1\n0\n\n", ("sha1:1234",))
+ node.map(None, ("foo bar",), "baz quux")
+ self.assertEqual(None, node.key())
+
+ def test_key_after_unmap(self):
+ node = self.module._deserialise_leaf_node(
+ "chkleaf:\n0\n1\n2\n\nfoo bar\x001\nbaz\nquux\x001\nblarh\n",
+ ("sha1:1234",))
+ node.unmap(None, ("foo bar",))
+ self.assertEqual(None, node.key())
+
+
+class TestDeserialiseInternalNode(tests.TestCase):
+
+ module = None
+
+ def assertDeserialiseErrors(self, text):
+ self.assertRaises((ValueError, IndexError),
+ self.module._deserialise_internal_node, text,
+ stuple('not-a-real-sha',))
+
+ def test_raises_on_non_internal(self):
+ self.assertDeserialiseErrors('')
+ self.assertDeserialiseErrors('short\n')
+ self.assertDeserialiseErrors('chknotnode:\n')
+ self.assertDeserialiseErrors('chknode:x\n')
+ self.assertDeserialiseErrors('chknode:\n')
+ self.assertDeserialiseErrors('chknode:\nnotint\n')
+ self.assertDeserialiseErrors('chknode:\n10\n')
+ self.assertDeserialiseErrors('chknode:\n10\n256\n')
+ self.assertDeserialiseErrors('chknode:\n10\n256\n10\n')
+ # no trailing newline
+ self.assertDeserialiseErrors('chknode:\n10\n256\n0\n1\nfo')
+
+ def test_deserialise_one(self):
+ node = self.module._deserialise_internal_node(
+ "chknode:\n10\n1\n1\n\na\x00sha1:abcd\n", stuple('sha1:1234',))
+ self.assertIsInstance(node, chk_map.InternalNode)
+ self.assertEqual(1, len(node))
+ self.assertEqual(10, node.maximum_size)
+ self.assertEqual(("sha1:1234",), node.key())
+ self.assertEqual('', node._search_prefix)
+ self.assertEqual({'a': ('sha1:abcd',)}, node._items)
+
+ def test_deserialise_with_prefix(self):
+ node = self.module._deserialise_internal_node(
+ "chknode:\n10\n1\n1\npref\na\x00sha1:abcd\n", stuple('sha1:1234',))
+ self.assertIsInstance(node, chk_map.InternalNode)
+ self.assertEqual(1, len(node))
+ self.assertEqual(10, node.maximum_size)
+ self.assertEqual(("sha1:1234",), node.key())
+ self.assertEqual('pref', node._search_prefix)
+ self.assertEqual({'prefa': ('sha1:abcd',)}, node._items)
+
+ node = self.module._deserialise_internal_node(
+ "chknode:\n10\n1\n1\npref\n\x00sha1:abcd\n", stuple('sha1:1234',))
+ self.assertIsInstance(node, chk_map.InternalNode)
+ self.assertEqual(1, len(node))
+ self.assertEqual(10, node.maximum_size)
+ self.assertEqual(("sha1:1234",), node.key())
+ self.assertEqual('pref', node._search_prefix)
+ self.assertEqual({'pref': ('sha1:abcd',)}, node._items)
+
+ def test_deserialise_pref_with_null(self):
+ node = self.module._deserialise_internal_node(
+ "chknode:\n10\n1\n1\npref\x00fo\n\x00sha1:abcd\n",
+ stuple('sha1:1234',))
+ self.assertIsInstance(node, chk_map.InternalNode)
+ self.assertEqual(1, len(node))
+ self.assertEqual(10, node.maximum_size)
+ self.assertEqual(("sha1:1234",), node.key())
+ self.assertEqual('pref\x00fo', node._search_prefix)
+ self.assertEqual({'pref\x00fo': ('sha1:abcd',)}, node._items)
+
+ def test_deserialise_with_null_pref(self):
+ node = self.module._deserialise_internal_node(
+ "chknode:\n10\n1\n1\npref\x00fo\n\x00\x00sha1:abcd\n",
+ stuple('sha1:1234',))
+ self.assertIsInstance(node, chk_map.InternalNode)
+ self.assertEqual(1, len(node))
+ self.assertEqual(10, node.maximum_size)
+ self.assertEqual(("sha1:1234",), node.key())
+ self.assertEqual('pref\x00fo', node._search_prefix)
+ self.assertEqual({'pref\x00fo\x00': ('sha1:abcd',)}, node._items)
+
+
+class Test_BytesToTextKey(tests.TestCase):
+
+ def assertBytesToTextKey(self, key, bytes):
+ self.assertEqual(key,
+ self.module._bytes_to_text_key(bytes))
+
+ def assertBytesToTextKeyRaises(self, bytes):
+ # These are invalid bytes, and we want to make sure the code under test
+ # raises an exception rather than segfaults, etc. We don't particularly
+ # care what exception.
+ self.assertRaises(Exception, self.module._bytes_to_text_key, bytes)
+
+ def test_file(self):
+ self.assertBytesToTextKey(('file-id', 'revision-id'),
+ 'file: file-id\nparent-id\nname\nrevision-id\n'
+ 'da39a3ee5e6b4b0d3255bfef95601890afd80709\n100\nN')
+
+ def test_invalid_no_kind(self):
+ self.assertBytesToTextKeyRaises(
+ 'file file-id\nparent-id\nname\nrevision-id\n'
+ 'da39a3ee5e6b4b0d3255bfef95601890afd80709\n100\nN')
+
+ def test_invalid_no_space(self):
+ self.assertBytesToTextKeyRaises(
+ 'file:file-id\nparent-id\nname\nrevision-id\n'
+ 'da39a3ee5e6b4b0d3255bfef95601890afd80709\n100\nN')
+
+ def test_invalid_too_short_file_id(self):
+ self.assertBytesToTextKeyRaises('file:file-id')
+
+ def test_invalid_too_short_parent_id(self):
+ self.assertBytesToTextKeyRaises('file:file-id\nparent-id')
+
+ def test_invalid_too_short_name(self):
+ self.assertBytesToTextKeyRaises('file:file-id\nparent-id\nname')
+
+ def test_dir(self):
+ self.assertBytesToTextKey(('dir-id', 'revision-id'),
+ 'dir: dir-id\nparent-id\nname\nrevision-id')
diff --git a/bzrlib/tests/test__chunks_to_lines.py b/bzrlib/tests/test__chunks_to_lines.py
new file mode 100644
index 0000000..6a9c636
--- /dev/null
+++ b/bzrlib/tests/test__chunks_to_lines.py
@@ -0,0 +1,110 @@
+# Copyright (C) 2008, 2009, 2010 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+#
+
+"""Tests for chunks_to_lines."""
+
+from bzrlib import tests
+from bzrlib.tests import (
+ features,
+ )
+
+
+def load_tests(standard_tests, module, loader):
+ suite, _ = tests.permute_tests_for_extension(
+ standard_tests, loader, 'bzrlib._chunks_to_lines_py',
+ 'bzrlib._chunks_to_lines_pyx')
+ return suite
+
+# test_osutils depends on this feature being around. We can't just use the one
+# generated by load_tests, because if we only load osutils but not this module,
+# then that code never gets run
+compiled_chunkstolines_feature = features.ModuleAvailableFeature(
+ 'bzrlib._chunks_to_lines_pyx')
+
+
+class TestChunksToLines(tests.TestCase):
+
+ module = None # Filled in by test parameterization
+
+ def assertChunksToLines(self, lines, chunks, alreadly_lines=False):
+ result = self.module.chunks_to_lines(chunks)
+ self.assertEqual(lines, result)
+ if alreadly_lines:
+ self.assertIs(chunks, result)
+
+ def test_fulltext_chunk_to_lines(self):
+ self.assertChunksToLines(['foo\n', 'bar\r\n', 'ba\rz\n'],
+ ['foo\nbar\r\nba\rz\n'])
+ self.assertChunksToLines(['foobarbaz\n'], ['foobarbaz\n'],
+ alreadly_lines=True)
+ self.assertChunksToLines(['foo\n', 'bar\n', '\n', 'baz\n', '\n', '\n'],
+ ['foo\nbar\n\nbaz\n\n\n'])
+ self.assertChunksToLines(['foobarbaz'], ['foobarbaz'],
+ alreadly_lines=True)
+ self.assertChunksToLines(['foobarbaz'], ['foo', 'bar', 'baz'])
+
+ def test_newlines(self):
+ self.assertChunksToLines(['\n'], ['\n'], alreadly_lines=True)
+ self.assertChunksToLines(['\n'], ['', '\n', ''])
+ self.assertChunksToLines(['\n'], ['\n', ''])
+ self.assertChunksToLines(['\n'], ['', '\n'])
+ self.assertChunksToLines(['\n', '\n', '\n'], ['\n\n\n'])
+ self.assertChunksToLines(['\n', '\n', '\n'], ['\n', '\n', '\n'],
+ alreadly_lines=True)
+
+ def test_lines_to_lines(self):
+ self.assertChunksToLines(['foo\n', 'bar\r\n', 'ba\rz\n'],
+ ['foo\n', 'bar\r\n', 'ba\rz\n'],
+ alreadly_lines=True)
+
+ def test_no_final_newline(self):
+ self.assertChunksToLines(['foo\n', 'bar\r\n', 'ba\rz'],
+ ['foo\nbar\r\nba\rz'])
+ self.assertChunksToLines(['foo\n', 'bar\r\n', 'ba\rz'],
+ ['foo\n', 'bar\r\n', 'ba\rz'],
+ alreadly_lines=True)
+ self.assertChunksToLines(('foo\n', 'bar\r\n', 'ba\rz'),
+ ('foo\n', 'bar\r\n', 'ba\rz'),
+ alreadly_lines=True)
+ self.assertChunksToLines([], [], alreadly_lines=True)
+ self.assertChunksToLines(['foobarbaz'], ['foobarbaz'],
+ alreadly_lines=True)
+ self.assertChunksToLines([], [''])
+
+ def test_mixed(self):
+ self.assertChunksToLines(['foo\n', 'bar\r\n', 'ba\rz'],
+ ['foo\n', 'bar\r\nba\r', 'z'])
+ self.assertChunksToLines(['foo\n', 'bar\r\n', 'ba\rz'],
+ ['foo\nb', 'a', 'r\r\nba\r', 'z'])
+ self.assertChunksToLines(['foo\n', 'bar\r\n', 'ba\rz'],
+ ['foo\nbar\r\nba', '\r', 'z'])
+
+ self.assertChunksToLines(['foo\n', 'bar\r\n', 'ba\rz'],
+ ['foo\n', '', 'bar\r\nba', '\r', 'z'])
+ self.assertChunksToLines(['foo\n', 'bar\r\n', 'ba\rz\n'],
+ ['foo\n', 'bar\r\n', 'ba\rz\n', ''])
+ self.assertChunksToLines(['foo\n', 'bar\r\n', 'ba\rz\n'],
+ ['foo\n', 'bar', '\r\n', 'ba\rz\n'])
+
+ def test_not_lines(self):
+ # We should raise a TypeError, not crash
+ self.assertRaises(TypeError, self.module.chunks_to_lines,
+ object())
+ self.assertRaises(TypeError, self.module.chunks_to_lines,
+ [object()])
+ self.assertRaises(TypeError, self.module.chunks_to_lines,
+ ['foo', object()])
diff --git a/bzrlib/tests/test__dirstate_helpers.py b/bzrlib/tests/test__dirstate_helpers.py
new file mode 100644
index 0000000..6c5080f
--- /dev/null
+++ b/bzrlib/tests/test__dirstate_helpers.py
@@ -0,0 +1,1397 @@
+# Copyright (C) 2007-2011 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""Tests for the compiled dirstate helpers."""
+
+import bisect
+import os
+import time
+
+from bzrlib import (
+ dirstate,
+ errors,
+ osutils,
+ tests,
+ _dirstate_helpers_py,
+ )
+from bzrlib.tests import (
+ test_dirstate,
+ )
+from bzrlib.tests.test_osutils import dir_reader_scenarios
+from bzrlib.tests.scenarios import (
+ load_tests_apply_scenarios,
+ multiply_scenarios,
+ )
+from bzrlib.tests import (
+ features,
+ )
+
+
+load_tests = load_tests_apply_scenarios
+
+
+compiled_dirstate_helpers_feature = features.ModuleAvailableFeature(
+ 'bzrlib._dirstate_helpers_pyx')
+
+
+# FIXME: we should also parametrize against SHA1Provider !
+
+ue_scenarios = [('dirstate_Python',
+ {'update_entry': dirstate.py_update_entry})]
+if compiled_dirstate_helpers_feature.available():
+ update_entry = compiled_dirstate_helpers_feature.module.update_entry
+ ue_scenarios.append(('dirstate_Pyrex', {'update_entry': update_entry}))
+
+pe_scenarios = [('dirstate_Python',
+ {'_process_entry': dirstate.ProcessEntryPython})]
+if compiled_dirstate_helpers_feature.available():
+ process_entry = compiled_dirstate_helpers_feature.module.ProcessEntryC
+ pe_scenarios.append(('dirstate_Pyrex', {'_process_entry': process_entry}))
+
+helper_scenarios = [('dirstate_Python', {'helpers': _dirstate_helpers_py})]
+if compiled_dirstate_helpers_feature.available():
+ helper_scenarios.append(('dirstate_Pyrex',
+ {'helpers': compiled_dirstate_helpers_feature.module}))
+
+
+class TestBisectPathMixin(object):
+ """Test that _bisect_path_*() returns the expected values.
+
+ _bisect_path_* is intended to work like bisect.bisect_*() except it
+ knows it is working on paths that are sorted by ('path', 'to', 'foo')
+ chunks rather than by raw 'path/to/foo'.
+
+ Test Cases should inherit from this and override ``get_bisect_path`` return
+ their implementation, and ``get_bisect`` to return the matching
+ bisect.bisect_* function.
+ """
+
+ def get_bisect_path(self):
+ """Return an implementation of _bisect_path_*"""
+ raise NotImplementedError
+
+ def get_bisect(self):
+ """Return a version of bisect.bisect_*.
+
+ Also, for the 'exists' check, return the offset to the real values.
+ For example bisect_left returns the index of an entry, while
+ bisect_right returns the index *after* an entry
+
+ :return: (bisect_func, offset)
+ """
+ raise NotImplementedError
+
+ def assertBisect(self, paths, split_paths, path, exists=True):
+ """Assert that bisect_split works like bisect_left on the split paths.
+
+ :param paths: A list of path names
+ :param split_paths: A list of path names that are already split up by directory
+ ('path/to/foo' => ('path', 'to', 'foo'))
+ :param path: The path we are indexing.
+ :param exists: The path should be present, so make sure the
+ final location actually points to the right value.
+
+ All other arguments will be passed along.
+ """
+ bisect_path = self.get_bisect_path()
+ self.assertIsInstance(paths, list)
+ bisect_path_idx = bisect_path(paths, path)
+ split_path = self.split_for_dirblocks([path])[0]
+ bisect_func, offset = self.get_bisect()
+ bisect_split_idx = bisect_func(split_paths, split_path)
+ self.assertEqual(bisect_split_idx, bisect_path_idx,
+ '%s disagreed. %s != %s'
+ ' for key %r'
+ % (bisect_path.__name__,
+ bisect_split_idx, bisect_path_idx, path)
+ )
+ if exists:
+ self.assertEqual(path, paths[bisect_path_idx+offset])
+
+ def split_for_dirblocks(self, paths):
+ dir_split_paths = []
+ for path in paths:
+ dirname, basename = os.path.split(path)
+ dir_split_paths.append((dirname.split('/'), basename))
+ dir_split_paths.sort()
+ return dir_split_paths
+
+ def test_simple(self):
+ """In the simple case it works just like bisect_left"""
+ paths = ['', 'a', 'b', 'c', 'd']
+ split_paths = self.split_for_dirblocks(paths)
+ for path in paths:
+ self.assertBisect(paths, split_paths, path, exists=True)
+ self.assertBisect(paths, split_paths, '_', exists=False)
+ self.assertBisect(paths, split_paths, 'aa', exists=False)
+ self.assertBisect(paths, split_paths, 'bb', exists=False)
+ self.assertBisect(paths, split_paths, 'cc', exists=False)
+ self.assertBisect(paths, split_paths, 'dd', exists=False)
+ self.assertBisect(paths, split_paths, 'a/a', exists=False)
+ self.assertBisect(paths, split_paths, 'b/b', exists=False)
+ self.assertBisect(paths, split_paths, 'c/c', exists=False)
+ self.assertBisect(paths, split_paths, 'd/d', exists=False)
+
+ def test_involved(self):
+ """This is where bisect_path_* diverges slightly."""
+ # This is the list of paths and their contents
+ # a/
+ # a/
+ # a
+ # z
+ # a-a/
+ # a
+ # a-z/
+ # z
+ # a=a/
+ # a
+ # a=z/
+ # z
+ # z/
+ # a
+ # z
+ # z-a
+ # z-z
+ # z=a
+ # z=z
+ # a-a/
+ # a
+ # a-z/
+ # z
+ # a=a/
+ # a
+ # a=z/
+ # z
+ # This is the exact order that is stored by dirstate
+ # All children in a directory are mentioned before an children of
+ # children are mentioned.
+ # So all the root-directory paths, then all the
+ # first sub directory, etc.
+ paths = [# content of '/'
+ '', 'a', 'a-a', 'a-z', 'a=a', 'a=z',
+ # content of 'a/'
+ 'a/a', 'a/a-a', 'a/a-z',
+ 'a/a=a', 'a/a=z',
+ 'a/z', 'a/z-a', 'a/z-z',
+ 'a/z=a', 'a/z=z',
+ # content of 'a/a/'
+ 'a/a/a', 'a/a/z',
+ # content of 'a/a-a'
+ 'a/a-a/a',
+ # content of 'a/a-z'
+ 'a/a-z/z',
+ # content of 'a/a=a'
+ 'a/a=a/a',
+ # content of 'a/a=z'
+ 'a/a=z/z',
+ # content of 'a/z/'
+ 'a/z/a', 'a/z/z',
+ # content of 'a-a'
+ 'a-a/a',
+ # content of 'a-z'
+ 'a-z/z',
+ # content of 'a=a'
+ 'a=a/a',
+ # content of 'a=z'
+ 'a=z/z',
+ ]
+ split_paths = self.split_for_dirblocks(paths)
+ sorted_paths = []
+ for dir_parts, basename in split_paths:
+ if dir_parts == ['']:
+ sorted_paths.append(basename)
+ else:
+ sorted_paths.append('/'.join(dir_parts + [basename]))
+
+ self.assertEqual(sorted_paths, paths)
+
+ for path in paths:
+ self.assertBisect(paths, split_paths, path, exists=True)
+
+
+class TestBisectPathLeft(tests.TestCase, TestBisectPathMixin):
+ """Run all Bisect Path tests against _bisect_path_left."""
+
+ def get_bisect_path(self):
+ from bzrlib._dirstate_helpers_py import _bisect_path_left
+ return _bisect_path_left
+
+ def get_bisect(self):
+ return bisect.bisect_left, 0
+
+
+class TestCompiledBisectPathLeft(TestBisectPathLeft):
+ """Run all Bisect Path tests against _bisect_path_lect"""
+
+ _test_needs_features = [compiled_dirstate_helpers_feature]
+
+ def get_bisect_path(self):
+ from bzrlib._dirstate_helpers_pyx import _bisect_path_left
+ return _bisect_path_left
+
+
+class TestBisectPathRight(tests.TestCase, TestBisectPathMixin):
+ """Run all Bisect Path tests against _bisect_path_right"""
+
+ def get_bisect_path(self):
+ from bzrlib._dirstate_helpers_py import _bisect_path_right
+ return _bisect_path_right
+
+ def get_bisect(self):
+ return bisect.bisect_right, -1
+
+
+class TestCompiledBisectPathRight(TestBisectPathRight):
+ """Run all Bisect Path tests against _bisect_path_right"""
+
+ _test_needs_features = [compiled_dirstate_helpers_feature]
+
+ def get_bisect_path(self):
+ from bzrlib._dirstate_helpers_pyx import _bisect_path_right
+ return _bisect_path_right
+
+
+class TestBisectDirblock(tests.TestCase):
+ """Test that bisect_dirblock() returns the expected values.
+
+ bisect_dirblock is intended to work like bisect.bisect_left() except it
+ knows it is working on dirblocks and that dirblocks are sorted by ('path',
+ 'to', 'foo') chunks rather than by raw 'path/to/foo'.
+
+ This test is parameterized by calling get_bisect_dirblock(). Child test
+ cases can override this function to test against a different
+ implementation.
+ """
+
+ def get_bisect_dirblock(self):
+ """Return an implementation of bisect_dirblock"""
+ from bzrlib._dirstate_helpers_py import bisect_dirblock
+ return bisect_dirblock
+
+ def assertBisect(self, dirblocks, split_dirblocks, path, *args, **kwargs):
+ """Assert that bisect_split works like bisect_left on the split paths.
+
+ :param dirblocks: A list of (path, [info]) pairs.
+ :param split_dirblocks: A list of ((split, path), [info]) pairs.
+ :param path: The path we are indexing.
+
+ All other arguments will be passed along.
+ """
+ bisect_dirblock = self.get_bisect_dirblock()
+ self.assertIsInstance(dirblocks, list)
+ bisect_split_idx = bisect_dirblock(dirblocks, path, *args, **kwargs)
+ split_dirblock = (path.split('/'), [])
+ bisect_left_idx = bisect.bisect_left(split_dirblocks, split_dirblock,
+ *args)
+ self.assertEqual(bisect_left_idx, bisect_split_idx,
+ 'bisect_split disagreed. %s != %s'
+ ' for key %r'
+ % (bisect_left_idx, bisect_split_idx, path)
+ )
+
+ def paths_to_dirblocks(self, paths):
+ """Convert a list of paths into dirblock form.
+
+ Also, ensure that the paths are in proper sorted order.
+ """
+ dirblocks = [(path, []) for path in paths]
+ split_dirblocks = [(path.split('/'), []) for path in paths]
+ self.assertEqual(sorted(split_dirblocks), split_dirblocks)
+ return dirblocks, split_dirblocks
+
+ def test_simple(self):
+ """In the simple case it works just like bisect_left"""
+ paths = ['', 'a', 'b', 'c', 'd']
+ dirblocks, split_dirblocks = self.paths_to_dirblocks(paths)
+ for path in paths:
+ self.assertBisect(dirblocks, split_dirblocks, path)
+ self.assertBisect(dirblocks, split_dirblocks, '_')
+ self.assertBisect(dirblocks, split_dirblocks, 'aa')
+ self.assertBisect(dirblocks, split_dirblocks, 'bb')
+ self.assertBisect(dirblocks, split_dirblocks, 'cc')
+ self.assertBisect(dirblocks, split_dirblocks, 'dd')
+ self.assertBisect(dirblocks, split_dirblocks, 'a/a')
+ self.assertBisect(dirblocks, split_dirblocks, 'b/b')
+ self.assertBisect(dirblocks, split_dirblocks, 'c/c')
+ self.assertBisect(dirblocks, split_dirblocks, 'd/d')
+
+ def test_involved(self):
+ """This is where bisect_left diverges slightly."""
+ paths = ['', 'a',
+ 'a/a', 'a/a/a', 'a/a/z', 'a/a-a', 'a/a-z',
+ 'a/z', 'a/z/a', 'a/z/z', 'a/z-a', 'a/z-z',
+ 'a-a', 'a-z',
+ 'z', 'z/a/a', 'z/a/z', 'z/a-a', 'z/a-z',
+ 'z/z', 'z/z/a', 'z/z/z', 'z/z-a', 'z/z-z',
+ 'z-a', 'z-z',
+ ]
+ dirblocks, split_dirblocks = self.paths_to_dirblocks(paths)
+ for path in paths:
+ self.assertBisect(dirblocks, split_dirblocks, path)
+
+ def test_involved_cached(self):
+ """This is where bisect_left diverges slightly."""
+ paths = ['', 'a',
+ 'a/a', 'a/a/a', 'a/a/z', 'a/a-a', 'a/a-z',
+ 'a/z', 'a/z/a', 'a/z/z', 'a/z-a', 'a/z-z',
+ 'a-a', 'a-z',
+ 'z', 'z/a/a', 'z/a/z', 'z/a-a', 'z/a-z',
+ 'z/z', 'z/z/a', 'z/z/z', 'z/z-a', 'z/z-z',
+ 'z-a', 'z-z',
+ ]
+ cache = {}
+ dirblocks, split_dirblocks = self.paths_to_dirblocks(paths)
+ for path in paths:
+ self.assertBisect(dirblocks, split_dirblocks, path, cache=cache)
+
+
+class TestCompiledBisectDirblock(TestBisectDirblock):
+ """Test that bisect_dirblock() returns the expected values.
+
+ bisect_dirblock is intended to work like bisect.bisect_left() except it
+ knows it is working on dirblocks and that dirblocks are sorted by ('path',
+ 'to', 'foo') chunks rather than by raw 'path/to/foo'.
+
+ This runs all the normal tests that TestBisectDirblock did, but uses the
+ compiled version.
+ """
+
+ _test_needs_features = [compiled_dirstate_helpers_feature]
+
+ def get_bisect_dirblock(self):
+ from bzrlib._dirstate_helpers_pyx import bisect_dirblock
+ return bisect_dirblock
+
+
+class TestCmpByDirs(tests.TestCase):
+ """Test an implementation of cmp_by_dirs()
+
+ cmp_by_dirs() compares 2 paths by their directory sections, rather than as
+ plain strings.
+
+ Child test cases can override ``get_cmp_by_dirs`` to test a specific
+ implementation.
+ """
+
+ def get_cmp_by_dirs(self):
+ """Get a specific implementation of cmp_by_dirs."""
+ from bzrlib._dirstate_helpers_py import cmp_by_dirs
+ return cmp_by_dirs
+
+ def assertCmpByDirs(self, expected, str1, str2):
+ """Compare the two strings, in both directions.
+
+ :param expected: The expected comparison value. -1 means str1 comes
+ first, 0 means they are equal, 1 means str2 comes first
+ :param str1: string to compare
+ :param str2: string to compare
+ """
+ cmp_by_dirs = self.get_cmp_by_dirs()
+ if expected == 0:
+ self.assertEqual(str1, str2)
+ self.assertEqual(0, cmp_by_dirs(str1, str2))
+ self.assertEqual(0, cmp_by_dirs(str2, str1))
+ elif expected > 0:
+ self.assertPositive(cmp_by_dirs(str1, str2))
+ self.assertNegative(cmp_by_dirs(str2, str1))
+ else:
+ self.assertNegative(cmp_by_dirs(str1, str2))
+ self.assertPositive(cmp_by_dirs(str2, str1))
+
+ def test_cmp_empty(self):
+ """Compare against the empty string."""
+ self.assertCmpByDirs(0, '', '')
+ self.assertCmpByDirs(1, 'a', '')
+ self.assertCmpByDirs(1, 'ab', '')
+ self.assertCmpByDirs(1, 'abc', '')
+ self.assertCmpByDirs(1, 'abcd', '')
+ self.assertCmpByDirs(1, 'abcde', '')
+ self.assertCmpByDirs(1, 'abcdef', '')
+ self.assertCmpByDirs(1, 'abcdefg', '')
+ self.assertCmpByDirs(1, 'abcdefgh', '')
+ self.assertCmpByDirs(1, 'abcdefghi', '')
+ self.assertCmpByDirs(1, 'test/ing/a/path/', '')
+
+ def test_cmp_same_str(self):
+ """Compare the same string"""
+ self.assertCmpByDirs(0, 'a', 'a')
+ self.assertCmpByDirs(0, 'ab', 'ab')
+ self.assertCmpByDirs(0, 'abc', 'abc')
+ self.assertCmpByDirs(0, 'abcd', 'abcd')
+ self.assertCmpByDirs(0, 'abcde', 'abcde')
+ self.assertCmpByDirs(0, 'abcdef', 'abcdef')
+ self.assertCmpByDirs(0, 'abcdefg', 'abcdefg')
+ self.assertCmpByDirs(0, 'abcdefgh', 'abcdefgh')
+ self.assertCmpByDirs(0, 'abcdefghi', 'abcdefghi')
+ self.assertCmpByDirs(0, 'testing a long string', 'testing a long string')
+ self.assertCmpByDirs(0, 'x'*10000, 'x'*10000)
+ self.assertCmpByDirs(0, 'a/b', 'a/b')
+ self.assertCmpByDirs(0, 'a/b/c', 'a/b/c')
+ self.assertCmpByDirs(0, 'a/b/c/d', 'a/b/c/d')
+ self.assertCmpByDirs(0, 'a/b/c/d/e', 'a/b/c/d/e')
+
+ def test_simple_paths(self):
+ """Compare strings that act like normal string comparison"""
+ self.assertCmpByDirs(-1, 'a', 'b')
+ self.assertCmpByDirs(-1, 'aa', 'ab')
+ self.assertCmpByDirs(-1, 'ab', 'bb')
+ self.assertCmpByDirs(-1, 'aaa', 'aab')
+ self.assertCmpByDirs(-1, 'aab', 'abb')
+ self.assertCmpByDirs(-1, 'abb', 'bbb')
+ self.assertCmpByDirs(-1, 'aaaa', 'aaab')
+ self.assertCmpByDirs(-1, 'aaab', 'aabb')
+ self.assertCmpByDirs(-1, 'aabb', 'abbb')
+ self.assertCmpByDirs(-1, 'abbb', 'bbbb')
+ self.assertCmpByDirs(-1, 'aaaaa', 'aaaab')
+ self.assertCmpByDirs(-1, 'a/a', 'a/b')
+ self.assertCmpByDirs(-1, 'a/b', 'b/b')
+ self.assertCmpByDirs(-1, 'a/a/a', 'a/a/b')
+ self.assertCmpByDirs(-1, 'a/a/b', 'a/b/b')
+ self.assertCmpByDirs(-1, 'a/b/b', 'b/b/b')
+ self.assertCmpByDirs(-1, 'a/a/a/a', 'a/a/a/b')
+ self.assertCmpByDirs(-1, 'a/a/a/b', 'a/a/b/b')
+ self.assertCmpByDirs(-1, 'a/a/b/b', 'a/b/b/b')
+ self.assertCmpByDirs(-1, 'a/b/b/b', 'b/b/b/b')
+ self.assertCmpByDirs(-1, 'a/a/a/a/a', 'a/a/a/a/b')
+
+ def test_tricky_paths(self):
+ self.assertCmpByDirs(1, 'ab/cd/ef', 'ab/cc/ef')
+ self.assertCmpByDirs(1, 'ab/cd/ef', 'ab/c/ef')
+ self.assertCmpByDirs(-1, 'ab/cd/ef', 'ab/cd-ef')
+ self.assertCmpByDirs(-1, 'ab/cd', 'ab/cd-')
+ self.assertCmpByDirs(-1, 'ab/cd', 'ab-cd')
+
+ def test_cmp_unicode_not_allowed(self):
+ cmp_by_dirs = self.get_cmp_by_dirs()
+ self.assertRaises(TypeError, cmp_by_dirs, u'Unicode', 'str')
+ self.assertRaises(TypeError, cmp_by_dirs, 'str', u'Unicode')
+ self.assertRaises(TypeError, cmp_by_dirs, u'Unicode', u'Unicode')
+
+ def test_cmp_non_ascii(self):
+ self.assertCmpByDirs(-1, '\xc2\xb5', '\xc3\xa5') # u'\xb5', u'\xe5'
+ self.assertCmpByDirs(-1, 'a', '\xc3\xa5') # u'a', u'\xe5'
+ self.assertCmpByDirs(-1, 'b', '\xc2\xb5') # u'b', u'\xb5'
+ self.assertCmpByDirs(-1, 'a/b', 'a/\xc3\xa5') # u'a/b', u'a/\xe5'
+ self.assertCmpByDirs(-1, 'b/a', 'b/\xc2\xb5') # u'b/a', u'b/\xb5'
+
+
+class TestCompiledCmpByDirs(TestCmpByDirs):
+ """Test the pyrex implementation of cmp_by_dirs"""
+
+ _test_needs_features = [compiled_dirstate_helpers_feature]
+
+ def get_cmp_by_dirs(self):
+ from bzrlib._dirstate_helpers_pyx import cmp_by_dirs
+ return cmp_by_dirs
+
+
+class TestCmpPathByDirblock(tests.TestCase):
+ """Test an implementation of _cmp_path_by_dirblock()
+
+ _cmp_path_by_dirblock() compares two paths using the sort order used by
+ DirState. All paths in the same directory are sorted together.
+
+ Child test cases can override ``get_cmp_path_by_dirblock`` to test a specific
+ implementation.
+ """
+
+ def get_cmp_path_by_dirblock(self):
+ """Get a specific implementation of _cmp_path_by_dirblock."""
+ from bzrlib._dirstate_helpers_py import _cmp_path_by_dirblock
+ return _cmp_path_by_dirblock
+
+ def assertCmpPathByDirblock(self, paths):
+ """Compare all paths and make sure they evaluate to the correct order.
+
+ This does N^2 comparisons. It is assumed that ``paths`` is properly
+ sorted list.
+
+ :param paths: a sorted list of paths to compare
+ """
+ # First, make sure the paths being passed in are correct
+ def _key(p):
+ dirname, basename = os.path.split(p)
+ return dirname.split('/'), basename
+ self.assertEqual(sorted(paths, key=_key), paths)
+
+ cmp_path_by_dirblock = self.get_cmp_path_by_dirblock()
+ for idx1, path1 in enumerate(paths):
+ for idx2, path2 in enumerate(paths):
+ cmp_val = cmp_path_by_dirblock(path1, path2)
+ if idx1 < idx2:
+ self.assertTrue(cmp_val < 0,
+ '%s did not state that %r came before %r, cmp=%s'
+ % (cmp_path_by_dirblock.__name__,
+ path1, path2, cmp_val))
+ elif idx1 > idx2:
+ self.assertTrue(cmp_val > 0,
+ '%s did not state that %r came after %r, cmp=%s'
+ % (cmp_path_by_dirblock.__name__,
+ path1, path2, cmp_val))
+ else: # idx1 == idx2
+ self.assertTrue(cmp_val == 0,
+ '%s did not state that %r == %r, cmp=%s'
+ % (cmp_path_by_dirblock.__name__,
+ path1, path2, cmp_val))
+
+ def test_cmp_simple_paths(self):
+ """Compare against the empty string."""
+ self.assertCmpPathByDirblock(['', 'a', 'ab', 'abc', 'a/b/c', 'b/d/e'])
+ self.assertCmpPathByDirblock(['kl', 'ab/cd', 'ab/ef', 'gh/ij'])
+
+ def test_tricky_paths(self):
+ self.assertCmpPathByDirblock([
+ # Contents of ''
+ '', 'a', 'a-a', 'a=a', 'b',
+ # Contents of 'a'
+ 'a/a', 'a/a-a', 'a/a=a', 'a/b',
+ # Contents of 'a/a'
+ 'a/a/a', 'a/a/a-a', 'a/a/a=a',
+ # Contents of 'a/a/a'
+ 'a/a/a/a', 'a/a/a/b',
+ # Contents of 'a/a/a-a',
+ 'a/a/a-a/a', 'a/a/a-a/b',
+ # Contents of 'a/a/a=a',
+ 'a/a/a=a/a', 'a/a/a=a/b',
+ # Contents of 'a/a-a'
+ 'a/a-a/a',
+ # Contents of 'a/a-a/a'
+ 'a/a-a/a/a', 'a/a-a/a/b',
+ # Contents of 'a/a=a'
+ 'a/a=a/a',
+ # Contents of 'a/b'
+ 'a/b/a', 'a/b/b',
+ # Contents of 'a-a',
+ 'a-a/a', 'a-a/b',
+ # Contents of 'a=a',
+ 'a=a/a', 'a=a/b',
+ # Contents of 'b',
+ 'b/a', 'b/b',
+ ])
+ self.assertCmpPathByDirblock([
+ # content of '/'
+ '', 'a', 'a-a', 'a-z', 'a=a', 'a=z',
+ # content of 'a/'
+ 'a/a', 'a/a-a', 'a/a-z',
+ 'a/a=a', 'a/a=z',
+ 'a/z', 'a/z-a', 'a/z-z',
+ 'a/z=a', 'a/z=z',
+ # content of 'a/a/'
+ 'a/a/a', 'a/a/z',
+ # content of 'a/a-a'
+ 'a/a-a/a',
+ # content of 'a/a-z'
+ 'a/a-z/z',
+ # content of 'a/a=a'
+ 'a/a=a/a',
+ # content of 'a/a=z'
+ 'a/a=z/z',
+ # content of 'a/z/'
+ 'a/z/a', 'a/z/z',
+ # content of 'a-a'
+ 'a-a/a',
+ # content of 'a-z'
+ 'a-z/z',
+ # content of 'a=a'
+ 'a=a/a',
+ # content of 'a=z'
+ 'a=z/z',
+ ])
+
+ def test_unicode_not_allowed(self):
+ cmp_path_by_dirblock = self.get_cmp_path_by_dirblock()
+ self.assertRaises(TypeError, cmp_path_by_dirblock, u'Uni', 'str')
+ self.assertRaises(TypeError, cmp_path_by_dirblock, 'str', u'Uni')
+ self.assertRaises(TypeError, cmp_path_by_dirblock, u'Uni', u'Uni')
+ self.assertRaises(TypeError, cmp_path_by_dirblock, u'x/Uni', 'x/str')
+ self.assertRaises(TypeError, cmp_path_by_dirblock, 'x/str', u'x/Uni')
+ self.assertRaises(TypeError, cmp_path_by_dirblock, u'x/Uni', u'x/Uni')
+
+ def test_nonascii(self):
+ self.assertCmpPathByDirblock([
+ # content of '/'
+ '', 'a', '\xc2\xb5', '\xc3\xa5',
+ # content of 'a'
+ 'a/a', 'a/\xc2\xb5', 'a/\xc3\xa5',
+ # content of 'a/a'
+ 'a/a/a', 'a/a/\xc2\xb5', 'a/a/\xc3\xa5',
+ # content of 'a/\xc2\xb5'
+ 'a/\xc2\xb5/a', 'a/\xc2\xb5/\xc2\xb5', 'a/\xc2\xb5/\xc3\xa5',
+ # content of 'a/\xc3\xa5'
+ 'a/\xc3\xa5/a', 'a/\xc3\xa5/\xc2\xb5', 'a/\xc3\xa5/\xc3\xa5',
+ # content of '\xc2\xb5'
+ '\xc2\xb5/a', '\xc2\xb5/\xc2\xb5', '\xc2\xb5/\xc3\xa5',
+ # content of '\xc2\xe5'
+ '\xc3\xa5/a', '\xc3\xa5/\xc2\xb5', '\xc3\xa5/\xc3\xa5',
+ ])
+
+
+class TestCompiledCmpPathByDirblock(TestCmpPathByDirblock):
+ """Test the pyrex implementation of _cmp_path_by_dirblock"""
+
+ _test_needs_features = [compiled_dirstate_helpers_feature]
+
+ def get_cmp_by_dirs(self):
+ from bzrlib._dirstate_helpers_pyx import _cmp_path_by_dirblock
+ return _cmp_path_by_dirblock
+
+
+class TestMemRChr(tests.TestCase):
+ """Test memrchr functionality"""
+
+ _test_needs_features = [compiled_dirstate_helpers_feature]
+
+ def assertMemRChr(self, expected, s, c):
+ from bzrlib._dirstate_helpers_pyx import _py_memrchr
+ self.assertEqual(expected, _py_memrchr(s, c))
+
+ def test_missing(self):
+ self.assertMemRChr(None, '', 'a')
+ self.assertMemRChr(None, '', 'c')
+ self.assertMemRChr(None, 'abcdefghijklm', 'q')
+ self.assertMemRChr(None, 'aaaaaaaaaaaaaaaaaaaaaaa', 'b')
+
+ def test_single_entry(self):
+ self.assertMemRChr(0, 'abcdefghijklm', 'a')
+ self.assertMemRChr(1, 'abcdefghijklm', 'b')
+ self.assertMemRChr(2, 'abcdefghijklm', 'c')
+ self.assertMemRChr(10, 'abcdefghijklm', 'k')
+ self.assertMemRChr(11, 'abcdefghijklm', 'l')
+ self.assertMemRChr(12, 'abcdefghijklm', 'm')
+
+ def test_multiple(self):
+ self.assertMemRChr(10, 'abcdefjklmabcdefghijklm', 'a')
+ self.assertMemRChr(11, 'abcdefjklmabcdefghijklm', 'b')
+ self.assertMemRChr(12, 'abcdefjklmabcdefghijklm', 'c')
+ self.assertMemRChr(20, 'abcdefjklmabcdefghijklm', 'k')
+ self.assertMemRChr(21, 'abcdefjklmabcdefghijklm', 'l')
+ self.assertMemRChr(22, 'abcdefjklmabcdefghijklm', 'm')
+ self.assertMemRChr(22, 'aaaaaaaaaaaaaaaaaaaaaaa', 'a')
+
+ def test_with_nulls(self):
+ self.assertMemRChr(10, 'abc\0\0\0jklmabc\0\0\0ghijklm', 'a')
+ self.assertMemRChr(11, 'abc\0\0\0jklmabc\0\0\0ghijklm', 'b')
+ self.assertMemRChr(12, 'abc\0\0\0jklmabc\0\0\0ghijklm', 'c')
+ self.assertMemRChr(20, 'abc\0\0\0jklmabc\0\0\0ghijklm', 'k')
+ self.assertMemRChr(21, 'abc\0\0\0jklmabc\0\0\0ghijklm', 'l')
+ self.assertMemRChr(22, 'abc\0\0\0jklmabc\0\0\0ghijklm', 'm')
+ self.assertMemRChr(22, 'aaa\0\0\0aaaaaaa\0\0\0aaaaaaa', 'a')
+ self.assertMemRChr(9, '\0\0\0\0\0\0\0\0\0\0', '\0')
+
+
+class TestReadDirblocks(test_dirstate.TestCaseWithDirState):
+ """Test an implementation of _read_dirblocks()
+
+ _read_dirblocks() reads in all of the dirblock information from the disk
+ file.
+
+ Child test cases can override ``get_read_dirblocks`` to test a specific
+ implementation.
+ """
+
+ # inherits scenarios from test_dirstate
+
+ def get_read_dirblocks(self):
+ from bzrlib._dirstate_helpers_py import _read_dirblocks
+ return _read_dirblocks
+
+ def test_smoketest(self):
+ """Make sure that we can create and read back a simple file."""
+ tree, state, expected = self.create_basic_dirstate()
+ del tree
+ state._read_header_if_needed()
+ self.assertEqual(dirstate.DirState.NOT_IN_MEMORY,
+ state._dirblock_state)
+ read_dirblocks = self.get_read_dirblocks()
+ read_dirblocks(state)
+ self.assertEqual(dirstate.DirState.IN_MEMORY_UNMODIFIED,
+ state._dirblock_state)
+
+ def test_trailing_garbage(self):
+ tree, state, expected = self.create_basic_dirstate()
+ # On Unix, we can write extra data as long as we haven't read yet, but
+ # on Win32, if you've opened the file with FILE_SHARE_READ, trying to
+ # open it in append mode will fail.
+ state.unlock()
+ f = open('dirstate', 'ab')
+ try:
+ # Add bogus trailing garbage
+ f.write('bogus\n')
+ finally:
+ f.close()
+ state.lock_read()
+ e = self.assertRaises(errors.DirstateCorrupt,
+ state._read_dirblocks_if_needed)
+ # Make sure we mention the bogus characters in the error
+ self.assertContainsRe(str(e), 'bogus')
+
+
+class TestCompiledReadDirblocks(TestReadDirblocks):
+ """Test the pyrex implementation of _read_dirblocks"""
+
+ _test_needs_features = [compiled_dirstate_helpers_feature]
+
+ def get_read_dirblocks(self):
+ from bzrlib._dirstate_helpers_pyx import _read_dirblocks
+ return _read_dirblocks
+
+
+class TestUsingCompiledIfAvailable(tests.TestCase):
+ """Check that any compiled functions that are available are the default.
+
+ It is possible to have typos, etc in the import line, such that
+ _dirstate_helpers_pyx is actually available, but the compiled functions are
+ not being used.
+ """
+
+ def test_bisect_dirblock(self):
+ if compiled_dirstate_helpers_feature.available():
+ from bzrlib._dirstate_helpers_pyx import bisect_dirblock
+ else:
+ from bzrlib._dirstate_helpers_py import bisect_dirblock
+ self.assertIs(bisect_dirblock, dirstate.bisect_dirblock)
+
+ def test__bisect_path_left(self):
+ if compiled_dirstate_helpers_feature.available():
+ from bzrlib._dirstate_helpers_pyx import _bisect_path_left
+ else:
+ from bzrlib._dirstate_helpers_py import _bisect_path_left
+ self.assertIs(_bisect_path_left, dirstate._bisect_path_left)
+
+ def test__bisect_path_right(self):
+ if compiled_dirstate_helpers_feature.available():
+ from bzrlib._dirstate_helpers_pyx import _bisect_path_right
+ else:
+ from bzrlib._dirstate_helpers_py import _bisect_path_right
+ self.assertIs(_bisect_path_right, dirstate._bisect_path_right)
+
+ def test_cmp_by_dirs(self):
+ if compiled_dirstate_helpers_feature.available():
+ from bzrlib._dirstate_helpers_pyx import cmp_by_dirs
+ else:
+ from bzrlib._dirstate_helpers_py import cmp_by_dirs
+ self.assertIs(cmp_by_dirs, dirstate.cmp_by_dirs)
+
+ def test__read_dirblocks(self):
+ if compiled_dirstate_helpers_feature.available():
+ from bzrlib._dirstate_helpers_pyx import _read_dirblocks
+ else:
+ from bzrlib._dirstate_helpers_py import _read_dirblocks
+ self.assertIs(_read_dirblocks, dirstate._read_dirblocks)
+
+ def test_update_entry(self):
+ if compiled_dirstate_helpers_feature.available():
+ from bzrlib._dirstate_helpers_pyx import update_entry
+ else:
+ from bzrlib.dirstate import update_entry
+ self.assertIs(update_entry, dirstate.update_entry)
+
+ def test_process_entry(self):
+ if compiled_dirstate_helpers_feature.available():
+ from bzrlib._dirstate_helpers_pyx import ProcessEntryC
+ self.assertIs(ProcessEntryC, dirstate._process_entry)
+ else:
+ from bzrlib.dirstate import ProcessEntryPython
+ self.assertIs(ProcessEntryPython, dirstate._process_entry)
+
+
+class TestUpdateEntry(test_dirstate.TestCaseWithDirState):
+ """Test the DirState.update_entry functions"""
+
+ scenarios = multiply_scenarios(
+ dir_reader_scenarios(), ue_scenarios)
+
+ # Set by load_tests
+ update_entry = None
+
+ def setUp(self):
+ super(TestUpdateEntry, self).setUp()
+ self.overrideAttr(dirstate, 'update_entry', self.update_entry)
+
+ def get_state_with_a(self):
+ """Create a DirState tracking a single object named 'a'"""
+ state = test_dirstate.InstrumentedDirState.initialize('dirstate')
+ self.addCleanup(state.unlock)
+ state.add('a', 'a-id', 'file', None, '')
+ entry = state._get_entry(0, path_utf8='a')
+ return state, entry
+
+ def test_observed_sha1_cachable(self):
+ state, entry = self.get_state_with_a()
+ state.save()
+ atime = time.time() - 10
+ self.build_tree(['a'])
+ statvalue = test_dirstate._FakeStat.from_stat(os.lstat('a'))
+ statvalue.st_mtime = statvalue.st_ctime = atime
+ self.assertEqual(dirstate.DirState.IN_MEMORY_UNMODIFIED,
+ state._dirblock_state)
+ state._observed_sha1(entry, "foo", statvalue)
+ self.assertEqual('foo', entry[1][0][1])
+ packed_stat = dirstate.pack_stat(statvalue)
+ self.assertEqual(packed_stat, entry[1][0][4])
+ self.assertEqual(dirstate.DirState.IN_MEMORY_HASH_MODIFIED,
+ state._dirblock_state)
+
+ def test_observed_sha1_not_cachable(self):
+ state, entry = self.get_state_with_a()
+ state.save()
+ oldval = entry[1][0][1]
+ oldstat = entry[1][0][4]
+ self.build_tree(['a'])
+ statvalue = os.lstat('a')
+ self.assertEqual(dirstate.DirState.IN_MEMORY_UNMODIFIED,
+ state._dirblock_state)
+ state._observed_sha1(entry, "foo", statvalue)
+ self.assertEqual(oldval, entry[1][0][1])
+ self.assertEqual(oldstat, entry[1][0][4])
+ self.assertEqual(dirstate.DirState.IN_MEMORY_UNMODIFIED,
+ state._dirblock_state)
+
+ def test_update_entry(self):
+ state, _ = self.get_state_with_a()
+ tree = self.make_branch_and_tree('tree')
+ tree.lock_write()
+ empty_revid = tree.commit('empty')
+ self.build_tree(['tree/a'])
+ tree.add(['a'], ['a-id'])
+ with_a_id = tree.commit('with_a')
+ self.addCleanup(tree.unlock)
+ state.set_parent_trees(
+ [(empty_revid, tree.branch.repository.revision_tree(empty_revid))],
+ [])
+ entry = state._get_entry(0, path_utf8='a')
+ self.build_tree(['a'])
+ # Add one where we don't provide the stat or sha already
+ self.assertEqual(('', 'a', 'a-id'), entry[0])
+ self.assertEqual(('f', '', 0, False, dirstate.DirState.NULLSTAT),
+ entry[1][0])
+ # Flush the buffers to disk
+ state.save()
+ self.assertEqual(dirstate.DirState.IN_MEMORY_UNMODIFIED,
+ state._dirblock_state)
+
+ stat_value = os.lstat('a')
+ packed_stat = dirstate.pack_stat(stat_value)
+ link_or_sha1 = self.update_entry(state, entry, abspath='a',
+ stat_value=stat_value)
+ self.assertEqual(None, link_or_sha1)
+
+ # The dirblock entry should not have computed or cached the file's
+ # sha1, but it did update the files' st_size. However, this is not
+ # worth writing a dirstate file for, so we leave the state UNMODIFIED
+ self.assertEqual(('f', '', 14, False, dirstate.DirState.NULLSTAT),
+ entry[1][0])
+ self.assertEqual(dirstate.DirState.IN_MEMORY_UNMODIFIED,
+ state._dirblock_state)
+ mode = stat_value.st_mode
+ self.assertEqual([('is_exec', mode, False)], state._log)
+
+ state.save()
+ self.assertEqual(dirstate.DirState.IN_MEMORY_UNMODIFIED,
+ state._dirblock_state)
+
+ # Roll the clock back so the file is guaranteed to look too new. We
+ # should still not compute the sha1.
+ state.adjust_time(-10)
+ del state._log[:]
+
+ link_or_sha1 = self.update_entry(state, entry, abspath='a',
+ stat_value=stat_value)
+ self.assertEqual([('is_exec', mode, False)], state._log)
+ self.assertEqual(None, link_or_sha1)
+ self.assertEqual(dirstate.DirState.IN_MEMORY_UNMODIFIED,
+ state._dirblock_state)
+ self.assertEqual(('f', '', 14, False, dirstate.DirState.NULLSTAT),
+ entry[1][0])
+ state.save()
+
+ # If it is cachable (the clock has moved forward) but new it still
+ # won't calculate the sha or cache it.
+ state.adjust_time(+20)
+ del state._log[:]
+ link_or_sha1 = dirstate.update_entry(state, entry, abspath='a',
+ stat_value=stat_value)
+ self.assertEqual(None, link_or_sha1)
+ self.assertEqual([('is_exec', mode, False)], state._log)
+ self.assertEqual(('f', '', 14, False, dirstate.DirState.NULLSTAT),
+ entry[1][0])
+ self.assertEqual(dirstate.DirState.IN_MEMORY_UNMODIFIED,
+ state._dirblock_state)
+
+ # If the file is no longer new, and the clock has been moved forward
+ # sufficiently, it will cache the sha.
+ del state._log[:]
+ state.set_parent_trees(
+ [(with_a_id, tree.branch.repository.revision_tree(with_a_id))],
+ [])
+ entry = state._get_entry(0, path_utf8='a')
+
+ link_or_sha1 = self.update_entry(state, entry, abspath='a',
+ stat_value=stat_value)
+ self.assertEqual('b50e5406bb5e153ebbeb20268fcf37c87e1ecfb6',
+ link_or_sha1)
+ self.assertEqual([('is_exec', mode, False), ('sha1', 'a')],
+ state._log)
+ self.assertEqual(('f', link_or_sha1, 14, False, packed_stat),
+ entry[1][0])
+
+ # Subsequent calls will just return the cached value
+ del state._log[:]
+ link_or_sha1 = self.update_entry(state, entry, abspath='a',
+ stat_value=stat_value)
+ self.assertEqual('b50e5406bb5e153ebbeb20268fcf37c87e1ecfb6',
+ link_or_sha1)
+ self.assertEqual([], state._log)
+ self.assertEqual(('f', link_or_sha1, 14, False, packed_stat),
+ entry[1][0])
+
+ def test_update_entry_symlink(self):
+ """Update entry should read symlinks."""
+ self.requireFeature(features.SymlinkFeature)
+ state, entry = self.get_state_with_a()
+ state.save()
+ self.assertEqual(dirstate.DirState.IN_MEMORY_UNMODIFIED,
+ state._dirblock_state)
+ os.symlink('target', 'a')
+
+ state.adjust_time(-10) # Make the symlink look new
+ stat_value = os.lstat('a')
+ packed_stat = dirstate.pack_stat(stat_value)
+ link_or_sha1 = self.update_entry(state, entry, abspath='a',
+ stat_value=stat_value)
+ self.assertEqual('target', link_or_sha1)
+ self.assertEqual([('read_link', 'a', '')], state._log)
+ # Dirblock is not updated (the link is too new)
+ self.assertEqual([('l', '', 6, False, dirstate.DirState.NULLSTAT)],
+ entry[1])
+ # The file entry turned into a symlink, that is considered
+ # HASH modified worthy.
+ self.assertEqual(dirstate.DirState.IN_MEMORY_HASH_MODIFIED,
+ state._dirblock_state)
+
+ # Because the stat_value looks new, we should re-read the target
+ del state._log[:]
+ link_or_sha1 = self.update_entry(state, entry, abspath='a',
+ stat_value=stat_value)
+ self.assertEqual('target', link_or_sha1)
+ self.assertEqual([('read_link', 'a', '')], state._log)
+ self.assertEqual([('l', '', 6, False, dirstate.DirState.NULLSTAT)],
+ entry[1])
+ state.save()
+ state.adjust_time(+20) # Skip into the future, all files look old
+ del state._log[:]
+ link_or_sha1 = self.update_entry(state, entry, abspath='a',
+ stat_value=stat_value)
+ # The symlink stayed a symlink. So while it is new enough to cache, we
+ # don't bother setting the flag, because it is not really worth saving
+ # (when we stat the symlink, we'll have paged in the target.)
+ self.assertEqual(dirstate.DirState.IN_MEMORY_UNMODIFIED,
+ state._dirblock_state)
+ self.assertEqual('target', link_or_sha1)
+ # We need to re-read the link because only now can we cache it
+ self.assertEqual([('read_link', 'a', '')], state._log)
+ self.assertEqual([('l', 'target', 6, False, packed_stat)],
+ entry[1])
+
+ del state._log[:]
+ # Another call won't re-read the link
+ self.assertEqual([], state._log)
+ link_or_sha1 = self.update_entry(state, entry, abspath='a',
+ stat_value=stat_value)
+ self.assertEqual('target', link_or_sha1)
+ self.assertEqual([('l', 'target', 6, False, packed_stat)],
+ entry[1])
+
+ def do_update_entry(self, state, entry, abspath):
+ stat_value = os.lstat(abspath)
+ return self.update_entry(state, entry, abspath, stat_value)
+
+ def test_update_entry_dir(self):
+ state, entry = self.get_state_with_a()
+ self.build_tree(['a/'])
+ self.assertIs(None, self.do_update_entry(state, entry, 'a'))
+
+ def test_update_entry_dir_unchanged(self):
+ state, entry = self.get_state_with_a()
+ self.build_tree(['a/'])
+ state.adjust_time(+20)
+ self.assertIs(None, self.do_update_entry(state, entry, 'a'))
+ # a/ used to be a file, but is now a directory, worth saving
+ self.assertEqual(dirstate.DirState.IN_MEMORY_MODIFIED,
+ state._dirblock_state)
+ state.save()
+ self.assertEqual(dirstate.DirState.IN_MEMORY_UNMODIFIED,
+ state._dirblock_state)
+ # No changes to a/ means not worth saving.
+ self.assertIs(None, self.do_update_entry(state, entry, 'a'))
+ self.assertEqual(dirstate.DirState.IN_MEMORY_UNMODIFIED,
+ state._dirblock_state)
+ # Change the last-modified time for the directory
+ t = time.time() - 100.0
+ try:
+ os.utime('a', (t, t))
+ except OSError:
+ # It looks like Win32 + FAT doesn't allow to change times on a dir.
+ raise tests.TestSkipped("can't update mtime of a dir on FAT")
+ saved_packed_stat = entry[1][0][-1]
+ self.assertIs(None, self.do_update_entry(state, entry, 'a'))
+ # We *do* go ahead and update the information in the dirblocks, but we
+ # don't bother setting IN_MEMORY_MODIFIED because it is trivial to
+ # recompute.
+ self.assertNotEqual(saved_packed_stat, entry[1][0][-1])
+ self.assertEqual(dirstate.DirState.IN_MEMORY_UNMODIFIED,
+ state._dirblock_state)
+
+ def test_update_entry_file_unchanged(self):
+ state, _ = self.get_state_with_a()
+ tree = self.make_branch_and_tree('tree')
+ tree.lock_write()
+ self.build_tree(['tree/a'])
+ tree.add(['a'], ['a-id'])
+ with_a_id = tree.commit('witha')
+ self.addCleanup(tree.unlock)
+ state.set_parent_trees(
+ [(with_a_id, tree.branch.repository.revision_tree(with_a_id))],
+ [])
+ entry = state._get_entry(0, path_utf8='a')
+ self.build_tree(['a'])
+ sha1sum = 'b50e5406bb5e153ebbeb20268fcf37c87e1ecfb6'
+ state.adjust_time(+20)
+ self.assertEqual(sha1sum, self.do_update_entry(state, entry, 'a'))
+ self.assertEqual(dirstate.DirState.IN_MEMORY_MODIFIED,
+ state._dirblock_state)
+ state.save()
+ self.assertEqual(dirstate.DirState.IN_MEMORY_UNMODIFIED,
+ state._dirblock_state)
+ self.assertEqual(sha1sum, self.do_update_entry(state, entry, 'a'))
+ self.assertEqual(dirstate.DirState.IN_MEMORY_UNMODIFIED,
+ state._dirblock_state)
+
+ def test_update_entry_tree_reference(self):
+ state = test_dirstate.InstrumentedDirState.initialize('dirstate')
+ self.addCleanup(state.unlock)
+ state.add('r', 'r-id', 'tree-reference', None, '')
+ self.build_tree(['r/'])
+ entry = state._get_entry(0, path_utf8='r')
+ self.do_update_entry(state, entry, 'r')
+ entry = state._get_entry(0, path_utf8='r')
+ self.assertEqual('t', entry[1][0][0])
+
+ def create_and_test_file(self, state, entry):
+ """Create a file at 'a' and verify the state finds it during update.
+
+ The state should already be versioning *something* at 'a'. This makes
+ sure that state.update_entry recognizes it as a file.
+ """
+ self.build_tree(['a'])
+ stat_value = os.lstat('a')
+ packed_stat = dirstate.pack_stat(stat_value)
+
+ link_or_sha1 = self.do_update_entry(state, entry, abspath='a')
+ self.assertEqual(None, link_or_sha1)
+ self.assertEqual([('f', '', 14, False, dirstate.DirState.NULLSTAT)],
+ entry[1])
+ return packed_stat
+
+ def create_and_test_dir(self, state, entry):
+ """Create a directory at 'a' and verify the state finds it.
+
+ The state should already be versioning *something* at 'a'. This makes
+ sure that state.update_entry recognizes it as a directory.
+ """
+ self.build_tree(['a/'])
+ stat_value = os.lstat('a')
+ packed_stat = dirstate.pack_stat(stat_value)
+
+ link_or_sha1 = self.do_update_entry(state, entry, abspath='a')
+ self.assertIs(None, link_or_sha1)
+ self.assertEqual([('d', '', 0, False, packed_stat)], entry[1])
+
+ return packed_stat
+
+ # FIXME: Add unicode version
+ def create_and_test_symlink(self, state, entry):
+ """Create a symlink at 'a' and verify the state finds it.
+
+ The state should already be versioning *something* at 'a'. This makes
+ sure that state.update_entry recognizes it as a symlink.
+
+ This should not be called if this platform does not have symlink
+ support.
+ """
+ # caller should care about skipping test on platforms without symlinks
+ os.symlink('path/to/foo', 'a')
+
+ stat_value = os.lstat('a')
+ packed_stat = dirstate.pack_stat(stat_value)
+
+ link_or_sha1 = self.do_update_entry(state, entry, abspath='a')
+ self.assertEqual('path/to/foo', link_or_sha1)
+ self.assertEqual([('l', 'path/to/foo', 11, False, packed_stat)],
+ entry[1])
+ return packed_stat
+
+ def test_update_file_to_dir(self):
+ """If a file changes to a directory we return None for the sha.
+ We also update the inventory record.
+ """
+ state, entry = self.get_state_with_a()
+ # The file sha1 won't be cached unless the file is old
+ state.adjust_time(+10)
+ self.create_and_test_file(state, entry)
+ os.remove('a')
+ self.create_and_test_dir(state, entry)
+
+ def test_update_file_to_symlink(self):
+ """File becomes a symlink"""
+ self.requireFeature(features.SymlinkFeature)
+ state, entry = self.get_state_with_a()
+ # The file sha1 won't be cached unless the file is old
+ state.adjust_time(+10)
+ self.create_and_test_file(state, entry)
+ os.remove('a')
+ self.create_and_test_symlink(state, entry)
+
+ def test_update_dir_to_file(self):
+ """Directory becoming a file updates the entry."""
+ state, entry = self.get_state_with_a()
+ # The file sha1 won't be cached unless the file is old
+ state.adjust_time(+10)
+ self.create_and_test_dir(state, entry)
+ os.rmdir('a')
+ self.create_and_test_file(state, entry)
+
+ def test_update_dir_to_symlink(self):
+ """Directory becomes a symlink"""
+ self.requireFeature(features.SymlinkFeature)
+ state, entry = self.get_state_with_a()
+ # The symlink target won't be cached if it isn't old
+ state.adjust_time(+10)
+ self.create_and_test_dir(state, entry)
+ os.rmdir('a')
+ self.create_and_test_symlink(state, entry)
+
+ def test_update_symlink_to_file(self):
+ """Symlink becomes a file"""
+ self.requireFeature(features.SymlinkFeature)
+ state, entry = self.get_state_with_a()
+ # The symlink and file info won't be cached unless old
+ state.adjust_time(+10)
+ self.create_and_test_symlink(state, entry)
+ os.remove('a')
+ self.create_and_test_file(state, entry)
+
+ def test_update_symlink_to_dir(self):
+ """Symlink becomes a directory"""
+ self.requireFeature(features.SymlinkFeature)
+ state, entry = self.get_state_with_a()
+ # The symlink target won't be cached if it isn't old
+ state.adjust_time(+10)
+ self.create_and_test_symlink(state, entry)
+ os.remove('a')
+ self.create_and_test_dir(state, entry)
+
+ def test__is_executable_win32(self):
+ state, entry = self.get_state_with_a()
+ self.build_tree(['a'])
+
+ # Make sure we are using the win32 implementation of _is_executable
+ state._is_executable = state._is_executable_win32
+
+ # The file on disk is not executable, but we are marking it as though
+ # it is. With _is_executable_win32 we ignore what is on disk.
+ entry[1][0] = ('f', '', 0, True, dirstate.DirState.NULLSTAT)
+
+ stat_value = os.lstat('a')
+ packed_stat = dirstate.pack_stat(stat_value)
+
+ state.adjust_time(-10) # Make sure everything is new
+ self.update_entry(state, entry, abspath='a', stat_value=stat_value)
+
+ # The row is updated, but the executable bit stays set.
+ self.assertEqual([('f', '', 14, True, dirstate.DirState.NULLSTAT)],
+ entry[1])
+
+ # Make the disk object look old enough to cache (but it won't cache the
+ # sha as it is a new file).
+ state.adjust_time(+20)
+ digest = 'b50e5406bb5e153ebbeb20268fcf37c87e1ecfb6'
+ self.update_entry(state, entry, abspath='a', stat_value=stat_value)
+ self.assertEqual([('f', '', 14, True, dirstate.DirState.NULLSTAT)],
+ entry[1])
+
+ def _prepare_tree(self):
+ # Create a tree
+ text = 'Hello World\n'
+ tree = self.make_branch_and_tree('tree')
+ self.build_tree_contents([('tree/a file', text)])
+ tree.add('a file', 'a-file-id')
+ # Note: dirstate does not sha prior to the first commit
+ # so commit now in order for the test to work
+ tree.commit('first')
+ return tree, text
+
+ def test_sha1provider_sha1_used(self):
+ tree, text = self._prepare_tree()
+ state = dirstate.DirState.from_tree(tree, 'dirstate',
+ UppercaseSHA1Provider())
+ self.addCleanup(state.unlock)
+ expected_sha = osutils.sha_string(text.upper() + "foo")
+ entry = state._get_entry(0, path_utf8='a file')
+ state._sha_cutoff_time()
+ state._cutoff_time += 10
+ sha1 = self.update_entry(state, entry, 'tree/a file',
+ os.lstat('tree/a file'))
+ self.assertEqual(expected_sha, sha1)
+
+ def test_sha1provider_stat_and_sha1_used(self):
+ tree, text = self._prepare_tree()
+ tree.lock_write()
+ self.addCleanup(tree.unlock)
+ state = tree._current_dirstate()
+ state._sha1_provider = UppercaseSHA1Provider()
+ # If we used the standard provider, it would look like nothing has
+ # changed
+ file_ids_changed = [change[0] for change
+ in tree.iter_changes(tree.basis_tree())]
+ self.assertEqual(['a-file-id'], file_ids_changed)
+
+
+class UppercaseSHA1Provider(dirstate.SHA1Provider):
+ """A custom SHA1Provider."""
+
+ def sha1(self, abspath):
+ return self.stat_and_sha1(abspath)[1]
+
+ def stat_and_sha1(self, abspath):
+ file_obj = file(abspath, 'rb')
+ try:
+ statvalue = os.fstat(file_obj.fileno())
+ text = ''.join(file_obj.readlines())
+ sha1 = osutils.sha_string(text.upper() + "foo")
+ finally:
+ file_obj.close()
+ return statvalue, sha1
+
+
+class TestProcessEntry(test_dirstate.TestCaseWithDirState):
+
+ scenarios = multiply_scenarios(dir_reader_scenarios(), pe_scenarios)
+
+ # Set by load_tests
+ _process_entry = None
+
+ def setUp(self):
+ super(TestProcessEntry, self).setUp()
+ self.overrideAttr(dirstate, '_process_entry', self._process_entry)
+
+ def assertChangedFileIds(self, expected, tree):
+ tree.lock_read()
+ try:
+ file_ids = [info[0] for info
+ in tree.iter_changes(tree.basis_tree())]
+ finally:
+ tree.unlock()
+ self.assertEqual(sorted(expected), sorted(file_ids))
+
+ def test_exceptions_raised(self):
+ # This is a direct test of bug #495023, it relies on osutils.is_inside
+ # getting called in an inner function. Which makes it a bit brittle,
+ # but at least it does reproduce the bug.
+ tree = self.make_branch_and_tree('tree')
+ self.build_tree(['tree/file', 'tree/dir/', 'tree/dir/sub',
+ 'tree/dir2/', 'tree/dir2/sub2'])
+ tree.add(['file', 'dir', 'dir/sub', 'dir2', 'dir2/sub2'])
+ tree.commit('first commit')
+ tree.lock_read()
+ self.addCleanup(tree.unlock)
+ basis_tree = tree.basis_tree()
+ def is_inside_raises(*args, **kwargs):
+ raise RuntimeError('stop this')
+ self.overrideAttr(osutils, 'is_inside', is_inside_raises)
+ self.assertListRaises(RuntimeError, tree.iter_changes, basis_tree)
+
+ def test_simple_changes(self):
+ tree = self.make_branch_and_tree('tree')
+ self.build_tree(['tree/file'])
+ tree.add(['file'], ['file-id'])
+ self.assertChangedFileIds([tree.get_root_id(), 'file-id'], tree)
+ tree.commit('one')
+ self.assertChangedFileIds([], tree)
+
+ def test_sha1provider_stat_and_sha1_used(self):
+ tree = self.make_branch_and_tree('tree')
+ self.build_tree(['tree/file'])
+ tree.add(['file'], ['file-id'])
+ tree.commit('one')
+ tree.lock_write()
+ self.addCleanup(tree.unlock)
+ state = tree._current_dirstate()
+ state._sha1_provider = UppercaseSHA1Provider()
+ self.assertChangedFileIds(['file-id'], tree)
+
+
+class TestPackStat(tests.TestCase):
+ """Check packed representaton of stat values is robust on all inputs"""
+
+ scenarios = helper_scenarios
+
+ def pack(self, statlike_tuple):
+ return self.helpers.pack_stat(os.stat_result(statlike_tuple))
+
+ @staticmethod
+ def unpack_field(packed_string, stat_field):
+ return _dirstate_helpers_py._unpack_stat(packed_string)[stat_field]
+
+ def test_result(self):
+ self.assertEqual("AAAQAAAAABAAAAARAAAAAgAAAAEAAIHk",
+ self.pack((33252, 1, 2, 0, 0, 0, 4096, 15.5, 16.5, 17.5)))
+
+ def test_giant_inode(self):
+ packed = self.pack((33252, 0xF80000ABC, 0, 0, 0, 0, 0, 0, 0, 0))
+ self.assertEqual(0x80000ABC, self.unpack_field(packed, "st_ino"))
+
+ def test_giant_size(self):
+ packed = self.pack((33252, 0, 0, 0, 0, 0, (1 << 33) + 4096, 0, 0, 0))
+ self.assertEqual(4096, self.unpack_field(packed, "st_size"))
+
+ def test_fractional_mtime(self):
+ packed = self.pack((33252, 0, 0, 0, 0, 0, 0, 0, 16.9375, 0))
+ self.assertEqual(16, self.unpack_field(packed, "st_mtime"))
+
+ def test_ancient_mtime(self):
+ packed = self.pack((33252, 0, 0, 0, 0, 0, 0, 0, -11644473600.0, 0))
+ self.assertEqual(1240428288, self.unpack_field(packed, "st_mtime"))
+
+ def test_distant_mtime(self):
+ packed = self.pack((33252, 0, 0, 0, 0, 0, 0, 0, 64060588800.0, 0))
+ self.assertEqual(3931046656, self.unpack_field(packed, "st_mtime"))
+
+ def test_fractional_ctime(self):
+ packed = self.pack((33252, 0, 0, 0, 0, 0, 0, 0, 0, 17.5625))
+ self.assertEqual(17, self.unpack_field(packed, "st_ctime"))
+
+ def test_ancient_ctime(self):
+ packed = self.pack((33252, 0, 0, 0, 0, 0, 0, 0, 0, -11644473600.0))
+ self.assertEqual(1240428288, self.unpack_field(packed, "st_ctime"))
+
+ def test_distant_ctime(self):
+ packed = self.pack((33252, 0, 0, 0, 0, 0, 0, 0, 0, 64060588800.0))
+ self.assertEqual(3931046656, self.unpack_field(packed, "st_ctime"))
+
+ def test_negative_dev(self):
+ packed = self.pack((33252, 0, -0xFFFFFCDE, 0, 0, 0, 0, 0, 0, 0))
+ self.assertEqual(0x322, self.unpack_field(packed, "st_dev"))
diff --git a/bzrlib/tests/test__groupcompress.py b/bzrlib/tests/test__groupcompress.py
new file mode 100644
index 0000000..e37333f
--- /dev/null
+++ b/bzrlib/tests/test__groupcompress.py
@@ -0,0 +1,582 @@
+# Copyright (C) 2008-2011 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""Tests for the python and pyrex extensions of groupcompress"""
+
+from bzrlib import (
+ _groupcompress_py,
+ tests,
+ )
+from bzrlib.tests.scenarios import (
+ load_tests_apply_scenarios,
+ )
+from bzrlib.tests import (
+ features,
+ )
+
+
+def module_scenarios():
+ scenarios = [
+ ('python', {'_gc_module': _groupcompress_py}),
+ ]
+ if compiled_groupcompress_feature.available():
+ gc_module = compiled_groupcompress_feature.module
+ scenarios.append(('C',
+ {'_gc_module': gc_module}))
+ return scenarios
+
+
+def two_way_scenarios():
+ scenarios = [
+ ('PP', {'make_delta': _groupcompress_py.make_delta,
+ 'apply_delta': _groupcompress_py.apply_delta})
+ ]
+ if compiled_groupcompress_feature.available():
+ gc_module = compiled_groupcompress_feature.module
+ scenarios.extend([
+ ('CC', {'make_delta': gc_module.make_delta,
+ 'apply_delta': gc_module.apply_delta}),
+ ('PC', {'make_delta': _groupcompress_py.make_delta,
+ 'apply_delta': gc_module.apply_delta}),
+ ('CP', {'make_delta': gc_module.make_delta,
+ 'apply_delta': _groupcompress_py.apply_delta}),
+ ])
+ return scenarios
+
+
+load_tests = load_tests_apply_scenarios
+
+
+compiled_groupcompress_feature = features.ModuleAvailableFeature(
+ 'bzrlib._groupcompress_pyx')
+
+_text1 = """\
+This is a bit
+of source text
+which is meant to be matched
+against other text
+"""
+
+_text2 = """\
+This is a bit
+of source text
+which is meant to differ from
+against other text
+"""
+
+_text3 = """\
+This is a bit
+of source text
+which is meant to be matched
+against other text
+except it also
+has a lot more data
+at the end of the file
+"""
+
+_first_text = """\
+a bit of text, that
+does not have much in
+common with the next text
+"""
+
+_second_text = """\
+some more bit of text, that
+does not have much in
+common with the previous text
+and has some extra text
+"""
+
+
+_third_text = """\
+a bit of text, that
+has some in common with the previous text
+and has some extra text
+and not have much in
+common with the next text
+"""
+
+_fourth_text = """\
+123456789012345
+same rabin hash
+123456789012345
+same rabin hash
+123456789012345
+same rabin hash
+123456789012345
+same rabin hash
+"""
+
+class TestMakeAndApplyDelta(tests.TestCase):
+
+ scenarios = module_scenarios()
+ _gc_module = None # Set by load_tests
+
+ def setUp(self):
+ super(TestMakeAndApplyDelta, self).setUp()
+ self.make_delta = self._gc_module.make_delta
+ self.apply_delta = self._gc_module.apply_delta
+ self.apply_delta_to_source = self._gc_module.apply_delta_to_source
+
+ def test_make_delta_is_typesafe(self):
+ self.make_delta('a string', 'another string')
+
+ def _check_make_delta(string1, string2):
+ self.assertRaises(TypeError, self.make_delta, string1, string2)
+
+ _check_make_delta('a string', object())
+ _check_make_delta('a string', u'not a string')
+ _check_make_delta(object(), 'a string')
+ _check_make_delta(u'not a string', 'a string')
+
+ def test_make_noop_delta(self):
+ ident_delta = self.make_delta(_text1, _text1)
+ self.assertEqual('M\x90M', ident_delta)
+ ident_delta = self.make_delta(_text2, _text2)
+ self.assertEqual('N\x90N', ident_delta)
+ ident_delta = self.make_delta(_text3, _text3)
+ self.assertEqual('\x87\x01\x90\x87', ident_delta)
+
+ def assertDeltaIn(self, delta1, delta2, delta):
+ """Make sure that the delta bytes match one of the expectations."""
+ # In general, the python delta matcher gives different results than the
+ # pyrex delta matcher. Both should be valid deltas, though.
+ if delta not in (delta1, delta2):
+ self.fail("Delta bytes:\n"
+ " %r\n"
+ "not in %r\n"
+ " or %r"
+ % (delta, delta1, delta2))
+
+ def test_make_delta(self):
+ delta = self.make_delta(_text1, _text2)
+ self.assertDeltaIn(
+ 'N\x90/\x1fdiffer from\nagainst other text\n',
+ 'N\x90\x1d\x1ewhich is meant to differ from\n\x91:\x13',
+ delta)
+ delta = self.make_delta(_text2, _text1)
+ self.assertDeltaIn(
+ 'M\x90/\x1ebe matched\nagainst other text\n',
+ 'M\x90\x1d\x1dwhich is meant to be matched\n\x91;\x13',
+ delta)
+ delta = self.make_delta(_text3, _text1)
+ self.assertEqual('M\x90M', delta)
+ delta = self.make_delta(_text3, _text2)
+ self.assertDeltaIn(
+ 'N\x90/\x1fdiffer from\nagainst other text\n',
+ 'N\x90\x1d\x1ewhich is meant to differ from\n\x91:\x13',
+ delta)
+
+ def test_make_delta_with_large_copies(self):
+ # We want to have a copy that is larger than 64kB, which forces us to
+ # issue multiple copy instructions.
+ big_text = _text3 * 1220
+ delta = self.make_delta(big_text, big_text)
+ self.assertDeltaIn(
+ '\xdc\x86\x0a' # Encoding the length of the uncompressed text
+ '\x80' # Copy 64kB, starting at byte 0
+ '\x84\x01' # and another 64kB starting at 64kB
+ '\xb4\x02\x5c\x83', # And the bit of tail.
+ None, # Both implementations should be identical
+ delta)
+
+ def test_apply_delta_is_typesafe(self):
+ self.apply_delta(_text1, 'M\x90M')
+ self.assertRaises(TypeError, self.apply_delta, object(), 'M\x90M')
+ self.assertRaises(TypeError, self.apply_delta,
+ unicode(_text1), 'M\x90M')
+ self.assertRaises(TypeError, self.apply_delta, _text1, u'M\x90M')
+ self.assertRaises(TypeError, self.apply_delta, _text1, object())
+
+ def test_apply_delta(self):
+ target = self.apply_delta(_text1,
+ 'N\x90/\x1fdiffer from\nagainst other text\n')
+ self.assertEqual(_text2, target)
+ target = self.apply_delta(_text2,
+ 'M\x90/\x1ebe matched\nagainst other text\n')
+ self.assertEqual(_text1, target)
+
+ def test_apply_delta_to_source_is_safe(self):
+ self.assertRaises(TypeError,
+ self.apply_delta_to_source, object(), 0, 1)
+ self.assertRaises(TypeError,
+ self.apply_delta_to_source, u'unicode str', 0, 1)
+ # end > length
+ self.assertRaises(ValueError,
+ self.apply_delta_to_source, 'foo', 1, 4)
+ # start > length
+ self.assertRaises(ValueError,
+ self.apply_delta_to_source, 'foo', 5, 3)
+ # start > end
+ self.assertRaises(ValueError,
+ self.apply_delta_to_source, 'foo', 3, 2)
+
+ def test_apply_delta_to_source(self):
+ source_and_delta = (_text1
+ + 'N\x90/\x1fdiffer from\nagainst other text\n')
+ self.assertEqual(_text2, self.apply_delta_to_source(source_and_delta,
+ len(_text1), len(source_and_delta)))
+
+
+class TestMakeAndApplyCompatible(tests.TestCase):
+
+ scenarios = two_way_scenarios()
+
+ make_delta = None # Set by load_tests
+ apply_delta = None # Set by load_tests
+
+ def assertMakeAndApply(self, source, target):
+ """Assert that generating a delta and applying gives success."""
+ delta = self.make_delta(source, target)
+ bytes = self.apply_delta(source, delta)
+ self.assertEqualDiff(target, bytes)
+
+ def test_direct(self):
+ self.assertMakeAndApply(_text1, _text2)
+ self.assertMakeAndApply(_text2, _text1)
+ self.assertMakeAndApply(_text1, _text3)
+ self.assertMakeAndApply(_text3, _text1)
+ self.assertMakeAndApply(_text2, _text3)
+ self.assertMakeAndApply(_text3, _text2)
+
+
+class TestDeltaIndex(tests.TestCase):
+
+ def setUp(self):
+ super(TestDeltaIndex, self).setUp()
+ # This test isn't multiplied, because we only have DeltaIndex for the
+ # compiled form
+ # We call this here, because _test_needs_features happens after setUp
+ self.requireFeature(compiled_groupcompress_feature)
+ self._gc_module = compiled_groupcompress_feature.module
+
+ def test_repr(self):
+ di = self._gc_module.DeltaIndex('test text\n')
+ self.assertEqual('DeltaIndex(1, 10)', repr(di))
+
+ def test__dump_no_index(self):
+ di = self._gc_module.DeltaIndex()
+ self.assertEqual(None, di._dump_index())
+
+ def test__dump_index_simple(self):
+ di = self._gc_module.DeltaIndex()
+ di.add_source(_text1, 0)
+ self.assertFalse(di._has_index())
+ self.assertEqual(None, di._dump_index())
+ _ = di.make_delta(_text1)
+ self.assertTrue(di._has_index())
+ hash_list, entry_list = di._dump_index()
+ self.assertEqual(16, len(hash_list))
+ self.assertEqual(68, len(entry_list))
+ just_entries = [(idx, text_offset, hash_val)
+ for idx, (text_offset, hash_val)
+ in enumerate(entry_list)
+ if text_offset != 0 or hash_val != 0]
+ rabin_hash = self._gc_module._rabin_hash
+ self.assertEqual([(8, 16, rabin_hash(_text1[1:17])),
+ (25, 48, rabin_hash(_text1[33:49])),
+ (34, 32, rabin_hash(_text1[17:33])),
+ (47, 64, rabin_hash(_text1[49:65])),
+ ], just_entries)
+ # This ensures that the hash map points to the location we expect it to
+ for entry_idx, text_offset, hash_val in just_entries:
+ self.assertEqual(entry_idx, hash_list[hash_val & 0xf])
+
+ def test__dump_index_two_sources(self):
+ di = self._gc_module.DeltaIndex()
+ di.add_source(_text1, 0)
+ di.add_source(_text2, 2)
+ start2 = len(_text1) + 2
+ self.assertTrue(di._has_index())
+ hash_list, entry_list = di._dump_index()
+ self.assertEqual(16, len(hash_list))
+ self.assertEqual(68, len(entry_list))
+ just_entries = [(idx, text_offset, hash_val)
+ for idx, (text_offset, hash_val)
+ in enumerate(entry_list)
+ if text_offset != 0 or hash_val != 0]
+ rabin_hash = self._gc_module._rabin_hash
+ self.assertEqual([(8, 16, rabin_hash(_text1[1:17])),
+ (9, start2+16, rabin_hash(_text2[1:17])),
+ (25, 48, rabin_hash(_text1[33:49])),
+ (30, start2+64, rabin_hash(_text2[49:65])),
+ (34, 32, rabin_hash(_text1[17:33])),
+ (35, start2+32, rabin_hash(_text2[17:33])),
+ (43, start2+48, rabin_hash(_text2[33:49])),
+ (47, 64, rabin_hash(_text1[49:65])),
+ ], just_entries)
+ # Each entry should be in the appropriate hash bucket.
+ for entry_idx, text_offset, hash_val in just_entries:
+ hash_idx = hash_val & 0xf
+ self.assertTrue(
+ hash_list[hash_idx] <= entry_idx < hash_list[hash_idx+1])
+
+ def test_first_add_source_doesnt_index_until_make_delta(self):
+ di = self._gc_module.DeltaIndex()
+ self.assertFalse(di._has_index())
+ di.add_source(_text1, 0)
+ self.assertFalse(di._has_index())
+ # However, asking to make a delta will trigger the index to be
+ # generated, and will generate a proper delta
+ delta = di.make_delta(_text2)
+ self.assertTrue(di._has_index())
+ self.assertEqual('N\x90/\x1fdiffer from\nagainst other text\n', delta)
+
+ def test_add_source_max_bytes_to_index(self):
+ di = self._gc_module.DeltaIndex()
+ di._max_bytes_to_index = 3*16
+ di.add_source(_text1, 0) # (77 bytes -1) // 3 = 25 byte stride
+ di.add_source(_text3, 3) # (135 bytes -1) // 3 = 44 byte stride
+ start2 = len(_text1) + 3
+ hash_list, entry_list = di._dump_index()
+ self.assertEqual(16, len(hash_list))
+ self.assertEqual(67, len(entry_list))
+ just_entries = sorted([(text_offset, hash_val)
+ for text_offset, hash_val in entry_list
+ if text_offset != 0 or hash_val != 0])
+ rabin_hash = self._gc_module._rabin_hash
+ self.assertEqual([(25, rabin_hash(_text1[10:26])),
+ (50, rabin_hash(_text1[35:51])),
+ (75, rabin_hash(_text1[60:76])),
+ (start2+44, rabin_hash(_text3[29:45])),
+ (start2+88, rabin_hash(_text3[73:89])),
+ (start2+132, rabin_hash(_text3[117:133])),
+ ], just_entries)
+
+ def test_second_add_source_triggers_make_index(self):
+ di = self._gc_module.DeltaIndex()
+ self.assertFalse(di._has_index())
+ di.add_source(_text1, 0)
+ self.assertFalse(di._has_index())
+ di.add_source(_text2, 0)
+ self.assertTrue(di._has_index())
+
+ def test_make_delta(self):
+ di = self._gc_module.DeltaIndex(_text1)
+ delta = di.make_delta(_text2)
+ self.assertEqual('N\x90/\x1fdiffer from\nagainst other text\n', delta)
+
+ def test_delta_against_multiple_sources(self):
+ di = self._gc_module.DeltaIndex()
+ di.add_source(_first_text, 0)
+ self.assertEqual(len(_first_text), di._source_offset)
+ di.add_source(_second_text, 0)
+ self.assertEqual(len(_first_text) + len(_second_text),
+ di._source_offset)
+ delta = di.make_delta(_third_text)
+ result = self._gc_module.apply_delta(_first_text + _second_text, delta)
+ self.assertEqualDiff(_third_text, result)
+ self.assertEqual('\x85\x01\x90\x14\x0chas some in '
+ '\x91v6\x03and\x91d"\x91:\n', delta)
+
+ def test_delta_with_offsets(self):
+ di = self._gc_module.DeltaIndex()
+ di.add_source(_first_text, 5)
+ self.assertEqual(len(_first_text) + 5, di._source_offset)
+ di.add_source(_second_text, 10)
+ self.assertEqual(len(_first_text) + len(_second_text) + 15,
+ di._source_offset)
+ delta = di.make_delta(_third_text)
+ self.assertIsNot(None, delta)
+ result = self._gc_module.apply_delta(
+ '12345' + _first_text + '1234567890' + _second_text, delta)
+ self.assertIsNot(None, result)
+ self.assertEqualDiff(_third_text, result)
+ self.assertEqual('\x85\x01\x91\x05\x14\x0chas some in '
+ '\x91\x856\x03and\x91s"\x91?\n', delta)
+
+ def test_delta_with_delta_bytes(self):
+ di = self._gc_module.DeltaIndex()
+ source = _first_text
+ di.add_source(_first_text, 0)
+ self.assertEqual(len(_first_text), di._source_offset)
+ delta = di.make_delta(_second_text)
+ self.assertEqual('h\tsome more\x91\x019'
+ '&previous text\nand has some extra text\n', delta)
+ di.add_delta_source(delta, 0)
+ source += delta
+ self.assertEqual(len(_first_text) + len(delta), di._source_offset)
+ second_delta = di.make_delta(_third_text)
+ result = self._gc_module.apply_delta(source, second_delta)
+ self.assertEqualDiff(_third_text, result)
+ # We should be able to match against the
+ # 'previous text\nand has some...' that was part of the delta bytes
+ # Note that we don't match the 'common with the', because it isn't long
+ # enough to match in the original text, and those bytes are not present
+ # in the delta for the second text.
+ self.assertEqual('\x85\x01\x90\x14\x1chas some in common with the '
+ '\x91S&\x03and\x91\x18,', second_delta)
+ # Add this delta, and create a new delta for the same text. We should
+ # find the remaining text, and only insert the short 'and' text.
+ di.add_delta_source(second_delta, 0)
+ source += second_delta
+ third_delta = di.make_delta(_third_text)
+ result = self._gc_module.apply_delta(source, third_delta)
+ self.assertEqualDiff(_third_text, result)
+ self.assertEqual('\x85\x01\x90\x14\x91\x7e\x1c'
+ '\x91S&\x03and\x91\x18,', third_delta)
+ # Now create a delta, which we know won't be able to be 'fit' into the
+ # existing index
+ fourth_delta = di.make_delta(_fourth_text)
+ self.assertEqual(_fourth_text,
+ self._gc_module.apply_delta(source, fourth_delta))
+ self.assertEqual('\x80\x01'
+ '\x7f123456789012345\nsame rabin hash\n'
+ '123456789012345\nsame rabin hash\n'
+ '123456789012345\nsame rabin hash\n'
+ '123456789012345\nsame rabin hash'
+ '\x01\n', fourth_delta)
+ di.add_delta_source(fourth_delta, 0)
+ source += fourth_delta
+ # With the next delta, everything should be found
+ fifth_delta = di.make_delta(_fourth_text)
+ self.assertEqual(_fourth_text,
+ self._gc_module.apply_delta(source, fifth_delta))
+ self.assertEqual('\x80\x01\x91\xa7\x7f\x01\n', fifth_delta)
+
+
+class TestCopyInstruction(tests.TestCase):
+
+ def assertEncode(self, expected, offset, length):
+ bytes = _groupcompress_py.encode_copy_instruction(offset, length)
+ if expected != bytes:
+ self.assertEqual([hex(ord(e)) for e in expected],
+ [hex(ord(b)) for b in bytes])
+
+ def assertDecode(self, exp_offset, exp_length, exp_newpos, bytes, pos):
+ cmd = ord(bytes[pos])
+ pos += 1
+ out = _groupcompress_py.decode_copy_instruction(bytes, cmd, pos)
+ self.assertEqual((exp_offset, exp_length, exp_newpos), out)
+
+ def test_encode_no_length(self):
+ self.assertEncode('\x80', 0, 64*1024)
+ self.assertEncode('\x81\x01', 1, 64*1024)
+ self.assertEncode('\x81\x0a', 10, 64*1024)
+ self.assertEncode('\x81\xff', 255, 64*1024)
+ self.assertEncode('\x82\x01', 256, 64*1024)
+ self.assertEncode('\x83\x01\x01', 257, 64*1024)
+ self.assertEncode('\x8F\xff\xff\xff\xff', 0xFFFFFFFF, 64*1024)
+ self.assertEncode('\x8E\xff\xff\xff', 0xFFFFFF00, 64*1024)
+ self.assertEncode('\x8D\xff\xff\xff', 0xFFFF00FF, 64*1024)
+ self.assertEncode('\x8B\xff\xff\xff', 0xFF00FFFF, 64*1024)
+ self.assertEncode('\x87\xff\xff\xff', 0x00FFFFFF, 64*1024)
+ self.assertEncode('\x8F\x04\x03\x02\x01', 0x01020304, 64*1024)
+
+ def test_encode_no_offset(self):
+ self.assertEncode('\x90\x01', 0, 1)
+ self.assertEncode('\x90\x0a', 0, 10)
+ self.assertEncode('\x90\xff', 0, 255)
+ self.assertEncode('\xA0\x01', 0, 256)
+ self.assertEncode('\xB0\x01\x01', 0, 257)
+ self.assertEncode('\xB0\xff\xff', 0, 0xFFFF)
+ # Special case, if copy == 64KiB, then we store exactly 0
+ # Note that this puns with a copy of exactly 0 bytes, but we don't care
+ # about that, as we would never actually copy 0 bytes
+ self.assertEncode('\x80', 0, 64*1024)
+
+ def test_encode(self):
+ self.assertEncode('\x91\x01\x01', 1, 1)
+ self.assertEncode('\x91\x09\x0a', 9, 10)
+ self.assertEncode('\x91\xfe\xff', 254, 255)
+ self.assertEncode('\xA2\x02\x01', 512, 256)
+ self.assertEncode('\xB3\x02\x01\x01\x01', 258, 257)
+ self.assertEncode('\xB0\x01\x01', 0, 257)
+ # Special case, if copy == 64KiB, then we store exactly 0
+ # Note that this puns with a copy of exactly 0 bytes, but we don't care
+ # about that, as we would never actually copy 0 bytes
+ self.assertEncode('\x81\x0a', 10, 64*1024)
+
+ def test_decode_no_length(self):
+ # If length is 0, it is interpreted as 64KiB
+ # The shortest possible instruction is a copy of 64KiB from offset 0
+ self.assertDecode(0, 65536, 1, '\x80', 0)
+ self.assertDecode(1, 65536, 2, '\x81\x01', 0)
+ self.assertDecode(10, 65536, 2, '\x81\x0a', 0)
+ self.assertDecode(255, 65536, 2, '\x81\xff', 0)
+ self.assertDecode(256, 65536, 2, '\x82\x01', 0)
+ self.assertDecode(257, 65536, 3, '\x83\x01\x01', 0)
+ self.assertDecode(0xFFFFFFFF, 65536, 5, '\x8F\xff\xff\xff\xff', 0)
+ self.assertDecode(0xFFFFFF00, 65536, 4, '\x8E\xff\xff\xff', 0)
+ self.assertDecode(0xFFFF00FF, 65536, 4, '\x8D\xff\xff\xff', 0)
+ self.assertDecode(0xFF00FFFF, 65536, 4, '\x8B\xff\xff\xff', 0)
+ self.assertDecode(0x00FFFFFF, 65536, 4, '\x87\xff\xff\xff', 0)
+ self.assertDecode(0x01020304, 65536, 5, '\x8F\x04\x03\x02\x01', 0)
+
+ def test_decode_no_offset(self):
+ self.assertDecode(0, 1, 2, '\x90\x01', 0)
+ self.assertDecode(0, 10, 2, '\x90\x0a', 0)
+ self.assertDecode(0, 255, 2, '\x90\xff', 0)
+ self.assertDecode(0, 256, 2, '\xA0\x01', 0)
+ self.assertDecode(0, 257, 3, '\xB0\x01\x01', 0)
+ self.assertDecode(0, 65535, 3, '\xB0\xff\xff', 0)
+ # Special case, if copy == 64KiB, then we store exactly 0
+ # Note that this puns with a copy of exactly 0 bytes, but we don't care
+ # about that, as we would never actually copy 0 bytes
+ self.assertDecode(0, 65536, 1, '\x80', 0)
+
+ def test_decode(self):
+ self.assertDecode(1, 1, 3, '\x91\x01\x01', 0)
+ self.assertDecode(9, 10, 3, '\x91\x09\x0a', 0)
+ self.assertDecode(254, 255, 3, '\x91\xfe\xff', 0)
+ self.assertDecode(512, 256, 3, '\xA2\x02\x01', 0)
+ self.assertDecode(258, 257, 5, '\xB3\x02\x01\x01\x01', 0)
+ self.assertDecode(0, 257, 3, '\xB0\x01\x01', 0)
+
+ def test_decode_not_start(self):
+ self.assertDecode(1, 1, 6, 'abc\x91\x01\x01def', 3)
+ self.assertDecode(9, 10, 5, 'ab\x91\x09\x0ade', 2)
+ self.assertDecode(254, 255, 6, 'not\x91\xfe\xffcopy', 3)
+
+
+class TestBase128Int(tests.TestCase):
+
+ scenarios = module_scenarios()
+
+ _gc_module = None # Set by load_tests
+
+ def assertEqualEncode(self, bytes, val):
+ self.assertEqual(bytes, self._gc_module.encode_base128_int(val))
+
+ def assertEqualDecode(self, val, num_decode, bytes):
+ self.assertEqual((val, num_decode),
+ self._gc_module.decode_base128_int(bytes))
+
+ def test_encode(self):
+ self.assertEqualEncode('\x01', 1)
+ self.assertEqualEncode('\x02', 2)
+ self.assertEqualEncode('\x7f', 127)
+ self.assertEqualEncode('\x80\x01', 128)
+ self.assertEqualEncode('\xff\x01', 255)
+ self.assertEqualEncode('\x80\x02', 256)
+ self.assertEqualEncode('\xff\xff\xff\xff\x0f', 0xFFFFFFFF)
+
+ def test_decode(self):
+ self.assertEqualDecode(1, 1, '\x01')
+ self.assertEqualDecode(2, 1, '\x02')
+ self.assertEqualDecode(127, 1, '\x7f')
+ self.assertEqualDecode(128, 2, '\x80\x01')
+ self.assertEqualDecode(255, 2, '\xff\x01')
+ self.assertEqualDecode(256, 2, '\x80\x02')
+ self.assertEqualDecode(0xFFFFFFFF, 5, '\xff\xff\xff\xff\x0f')
+
+ def test_decode_with_trailing_bytes(self):
+ self.assertEqualDecode(1, 1, '\x01abcdef')
+ self.assertEqualDecode(127, 1, '\x7f\x01')
+ self.assertEqualDecode(128, 2, '\x80\x01abcdef')
+ self.assertEqualDecode(255, 2, '\xff\x01\xff')
+
+
diff --git a/bzrlib/tests/test__known_graph.py b/bzrlib/tests/test__known_graph.py
new file mode 100644
index 0000000..4f03ea8
--- /dev/null
+++ b/bzrlib/tests/test__known_graph.py
@@ -0,0 +1,915 @@
+# Copyright (C) 2009, 2010, 2011 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""Tests for the python and pyrex extensions of KnownGraph"""
+
+import pprint
+
+from bzrlib import (
+ errors,
+ _known_graph_py,
+ tests,
+ )
+from bzrlib.tests import test_graph
+from bzrlib.revision import NULL_REVISION
+from bzrlib.tests.scenarios import load_tests_apply_scenarios
+from bzrlib.tests import (
+ features,
+ )
+
+
+def caching_scenarios():
+ scenarios = [
+ ('python', {'module': _known_graph_py, 'do_cache': True}),
+ ]
+ if compiled_known_graph_feature.available():
+ scenarios.append(('C', {'module': compiled_known_graph_feature.module,
+ 'do_cache': True}))
+ return scenarios
+
+
+def non_caching_scenarios():
+ scenarios = [
+ ('python-nocache', {'module': _known_graph_py, 'do_cache': False}),
+ ]
+ if compiled_known_graph_feature.available():
+ scenarios.append(
+ ('C-nocache', {'module': compiled_known_graph_feature.module,
+ 'do_cache': False}))
+ return scenarios
+
+
+load_tests = load_tests_apply_scenarios
+
+
+compiled_known_graph_feature = features.ModuleAvailableFeature(
+ 'bzrlib._known_graph_pyx')
+
+
+# a
+# |\
+# b |
+# | |
+# c |
+# \|
+# d
+alt_merge = {'a': [], 'b': ['a'], 'c': ['b'], 'd': ['a', 'c']}
+
+
+class TestCaseWithKnownGraph(tests.TestCase):
+
+ scenarios = caching_scenarios()
+ module = None # Set by load_tests
+
+ def make_known_graph(self, ancestry):
+ return self.module.KnownGraph(ancestry, do_cache=self.do_cache)
+
+
+class TestKnownGraph(TestCaseWithKnownGraph):
+
+ def assertGDFO(self, graph, rev, gdfo):
+ node = graph._nodes[rev]
+ self.assertEqual(gdfo, node.gdfo)
+
+ def test_children_ancestry1(self):
+ graph = self.make_known_graph(test_graph.ancestry_1)
+ self.assertEqual(['rev1'], graph.get_child_keys(NULL_REVISION))
+ self.assertEqual(['rev2a', 'rev2b'],
+ sorted(graph.get_child_keys('rev1')))
+ self.assertEqual(['rev3'], graph.get_child_keys('rev2a'))
+ self.assertEqual(['rev4'], graph.get_child_keys('rev3'))
+ self.assertEqual(['rev4'], graph.get_child_keys('rev2b'))
+ self.assertRaises(KeyError, graph.get_child_keys, 'not_in_graph')
+
+ def test_parent_ancestry1(self):
+ graph = self.make_known_graph(test_graph.ancestry_1)
+ self.assertEqual([NULL_REVISION], graph.get_parent_keys('rev1'))
+ self.assertEqual(['rev1'], graph.get_parent_keys('rev2a'))
+ self.assertEqual(['rev1'], graph.get_parent_keys('rev2b'))
+ self.assertEqual(['rev2a'], graph.get_parent_keys('rev3'))
+ self.assertEqual(['rev2b', 'rev3'],
+ sorted(graph.get_parent_keys('rev4')))
+ self.assertRaises(KeyError, graph.get_child_keys, 'not_in_graph')
+
+ def test_parent_with_ghost(self):
+ graph = self.make_known_graph(test_graph.with_ghost)
+ self.assertEqual(None, graph.get_parent_keys('g'))
+
+ def test_gdfo_ancestry_1(self):
+ graph = self.make_known_graph(test_graph.ancestry_1)
+ self.assertGDFO(graph, 'rev1', 2)
+ self.assertGDFO(graph, 'rev2b', 3)
+ self.assertGDFO(graph, 'rev2a', 3)
+ self.assertGDFO(graph, 'rev3', 4)
+ self.assertGDFO(graph, 'rev4', 5)
+
+ def test_gdfo_feature_branch(self):
+ graph = self.make_known_graph(test_graph.feature_branch)
+ self.assertGDFO(graph, 'rev1', 2)
+ self.assertGDFO(graph, 'rev2b', 3)
+ self.assertGDFO(graph, 'rev3b', 4)
+
+ def test_gdfo_extended_history_shortcut(self):
+ graph = self.make_known_graph(test_graph.extended_history_shortcut)
+ self.assertGDFO(graph, 'a', 2)
+ self.assertGDFO(graph, 'b', 3)
+ self.assertGDFO(graph, 'c', 4)
+ self.assertGDFO(graph, 'd', 5)
+ self.assertGDFO(graph, 'e', 6)
+ self.assertGDFO(graph, 'f', 6)
+
+ def test_gdfo_with_ghost(self):
+ graph = self.make_known_graph(test_graph.with_ghost)
+ self.assertGDFO(graph, 'f', 2)
+ self.assertGDFO(graph, 'e', 3)
+ self.assertGDFO(graph, 'g', 1)
+ self.assertGDFO(graph, 'b', 4)
+ self.assertGDFO(graph, 'd', 4)
+ self.assertGDFO(graph, 'a', 5)
+ self.assertGDFO(graph, 'c', 5)
+
+ def test_add_existing_node(self):
+ graph = self.make_known_graph(test_graph.ancestry_1)
+ # Add a node that already exists with identical content
+ # This is a 'no-op'
+ self.assertGDFO(graph, 'rev4', 5)
+ graph.add_node('rev4', ['rev3', 'rev2b'])
+ self.assertGDFO(graph, 'rev4', 5)
+ # This also works if we use a tuple rather than a list
+ graph.add_node('rev4', ('rev3', 'rev2b'))
+
+ def test_add_existing_node_mismatched_parents(self):
+ graph = self.make_known_graph(test_graph.ancestry_1)
+ self.assertRaises(ValueError, graph.add_node, 'rev4',
+ ['rev2b', 'rev3'])
+
+ def test_add_node_with_ghost_parent(self):
+ graph = self.make_known_graph(test_graph.ancestry_1)
+ graph.add_node('rev5', ['rev2b', 'revGhost'])
+ self.assertGDFO(graph, 'rev5', 4)
+ self.assertGDFO(graph, 'revGhost', 1)
+
+ def test_add_new_root(self):
+ graph = self.make_known_graph(test_graph.ancestry_1)
+ graph.add_node('rev5', [])
+ self.assertGDFO(graph, 'rev5', 1)
+
+ def test_add_with_all_ghost_parents(self):
+ graph = self.make_known_graph(test_graph.ancestry_1)
+ graph.add_node('rev5', ['ghost'])
+ self.assertGDFO(graph, 'rev5', 2)
+ self.assertGDFO(graph, 'ghost', 1)
+
+ def test_gdfo_after_add_node(self):
+ graph = self.make_known_graph(test_graph.ancestry_1)
+ self.assertEqual([], graph.get_child_keys('rev4'))
+ graph.add_node('rev5', ['rev4'])
+ self.assertEqual(['rev4'], graph.get_parent_keys('rev5'))
+ self.assertEqual(['rev5'], graph.get_child_keys('rev4'))
+ self.assertEqual([], graph.get_child_keys('rev5'))
+ self.assertGDFO(graph, 'rev5', 6)
+ graph.add_node('rev6', ['rev2b'])
+ graph.add_node('rev7', ['rev6'])
+ graph.add_node('rev8', ['rev7', 'rev5'])
+ self.assertGDFO(graph, 'rev5', 6)
+ self.assertGDFO(graph, 'rev6', 4)
+ self.assertGDFO(graph, 'rev7', 5)
+ self.assertGDFO(graph, 'rev8', 7)
+
+ def test_fill_in_ghost(self):
+ graph = self.make_known_graph(test_graph.with_ghost)
+ # Add in a couple nodes and then fill in the 'ghost' so that it should
+ # cause renumbering of children nodes
+ graph.add_node('x', [])
+ graph.add_node('y', ['x'])
+ graph.add_node('z', ['y'])
+ graph.add_node('g', ['z'])
+ self.assertGDFO(graph, 'f', 2)
+ self.assertGDFO(graph, 'e', 3)
+ self.assertGDFO(graph, 'x', 1)
+ self.assertGDFO(graph, 'y', 2)
+ self.assertGDFO(graph, 'z', 3)
+ self.assertGDFO(graph, 'g', 4)
+ self.assertGDFO(graph, 'b', 4)
+ self.assertGDFO(graph, 'd', 5)
+ self.assertGDFO(graph, 'a', 5)
+ self.assertGDFO(graph, 'c', 6)
+
+
+class TestKnownGraphHeads(TestCaseWithKnownGraph):
+
+ scenarios = caching_scenarios() + non_caching_scenarios()
+ do_cache = None # Set by load_tests
+
+ def test_heads_null(self):
+ graph = self.make_known_graph(test_graph.ancestry_1)
+ self.assertEqual(set(['null:']), graph.heads(['null:']))
+ self.assertEqual(set(['rev1']), graph.heads(['null:', 'rev1']))
+ self.assertEqual(set(['rev1']), graph.heads(['rev1', 'null:']))
+ self.assertEqual(set(['rev1']), graph.heads(set(['rev1', 'null:'])))
+ self.assertEqual(set(['rev1']), graph.heads(('rev1', 'null:')))
+
+ def test_heads_one(self):
+ # A single node will always be a head
+ graph = self.make_known_graph(test_graph.ancestry_1)
+ self.assertEqual(set(['null:']), graph.heads(['null:']))
+ self.assertEqual(set(['rev1']), graph.heads(['rev1']))
+ self.assertEqual(set(['rev2a']), graph.heads(['rev2a']))
+ self.assertEqual(set(['rev2b']), graph.heads(['rev2b']))
+ self.assertEqual(set(['rev3']), graph.heads(['rev3']))
+ self.assertEqual(set(['rev4']), graph.heads(['rev4']))
+
+ def test_heads_single(self):
+ graph = self.make_known_graph(test_graph.ancestry_1)
+ self.assertEqual(set(['rev4']), graph.heads(['null:', 'rev4']))
+ self.assertEqual(set(['rev2a']), graph.heads(['rev1', 'rev2a']))
+ self.assertEqual(set(['rev2b']), graph.heads(['rev1', 'rev2b']))
+ self.assertEqual(set(['rev3']), graph.heads(['rev1', 'rev3']))
+ self.assertEqual(set(['rev3']), graph.heads(['rev3', 'rev2a']))
+ self.assertEqual(set(['rev4']), graph.heads(['rev1', 'rev4']))
+ self.assertEqual(set(['rev4']), graph.heads(['rev2a', 'rev4']))
+ self.assertEqual(set(['rev4']), graph.heads(['rev2b', 'rev4']))
+ self.assertEqual(set(['rev4']), graph.heads(['rev3', 'rev4']))
+
+ def test_heads_two_heads(self):
+ graph = self.make_known_graph(test_graph.ancestry_1)
+ self.assertEqual(set(['rev2a', 'rev2b']),
+ graph.heads(['rev2a', 'rev2b']))
+ self.assertEqual(set(['rev3', 'rev2b']),
+ graph.heads(['rev3', 'rev2b']))
+
+ def test_heads_criss_cross(self):
+ graph = self.make_known_graph(test_graph.criss_cross)
+ self.assertEqual(set(['rev2a']),
+ graph.heads(['rev2a', 'rev1']))
+ self.assertEqual(set(['rev2b']),
+ graph.heads(['rev2b', 'rev1']))
+ self.assertEqual(set(['rev3a']),
+ graph.heads(['rev3a', 'rev1']))
+ self.assertEqual(set(['rev3b']),
+ graph.heads(['rev3b', 'rev1']))
+ self.assertEqual(set(['rev2a', 'rev2b']),
+ graph.heads(['rev2a', 'rev2b']))
+ self.assertEqual(set(['rev3a']),
+ graph.heads(['rev3a', 'rev2a']))
+ self.assertEqual(set(['rev3a']),
+ graph.heads(['rev3a', 'rev2b']))
+ self.assertEqual(set(['rev3a']),
+ graph.heads(['rev3a', 'rev2a', 'rev2b']))
+ self.assertEqual(set(['rev3b']),
+ graph.heads(['rev3b', 'rev2a']))
+ self.assertEqual(set(['rev3b']),
+ graph.heads(['rev3b', 'rev2b']))
+ self.assertEqual(set(['rev3b']),
+ graph.heads(['rev3b', 'rev2a', 'rev2b']))
+ self.assertEqual(set(['rev3a', 'rev3b']),
+ graph.heads(['rev3a', 'rev3b']))
+ self.assertEqual(set(['rev3a', 'rev3b']),
+ graph.heads(['rev3a', 'rev3b', 'rev2a', 'rev2b']))
+
+ def test_heads_shortcut(self):
+ graph = self.make_known_graph(test_graph.history_shortcut)
+ self.assertEqual(set(['rev2a', 'rev2b', 'rev2c']),
+ graph.heads(['rev2a', 'rev2b', 'rev2c']))
+ self.assertEqual(set(['rev3a', 'rev3b']),
+ graph.heads(['rev3a', 'rev3b']))
+ self.assertEqual(set(['rev3a', 'rev3b']),
+ graph.heads(['rev2a', 'rev3a', 'rev3b']))
+ self.assertEqual(set(['rev2a', 'rev3b']),
+ graph.heads(['rev2a', 'rev3b']))
+ self.assertEqual(set(['rev2c', 'rev3a']),
+ graph.heads(['rev2c', 'rev3a']))
+
+ def test_heads_linear(self):
+ graph = self.make_known_graph(test_graph.racing_shortcuts)
+ self.assertEqual(set(['w']), graph.heads(['w', 's']))
+ self.assertEqual(set(['z']), graph.heads(['w', 's', 'z']))
+ self.assertEqual(set(['w', 'q']), graph.heads(['w', 's', 'q']))
+ self.assertEqual(set(['z']), graph.heads(['s', 'z']))
+
+ def test_heads_alt_merge(self):
+ graph = self.make_known_graph(alt_merge)
+ self.assertEqual(set(['c']), graph.heads(['a', 'c']))
+
+ def test_heads_with_ghost(self):
+ graph = self.make_known_graph(test_graph.with_ghost)
+ self.assertEqual(set(['e', 'g']), graph.heads(['e', 'g']))
+ self.assertEqual(set(['a', 'c']), graph.heads(['a', 'c']))
+ self.assertEqual(set(['a', 'g']), graph.heads(['a', 'g']))
+ self.assertEqual(set(['f', 'g']), graph.heads(['f', 'g']))
+ self.assertEqual(set(['c']), graph.heads(['c', 'g']))
+ self.assertEqual(set(['c']), graph.heads(['c', 'b', 'd', 'g']))
+ self.assertEqual(set(['a', 'c']), graph.heads(['a', 'c', 'e', 'g']))
+ self.assertEqual(set(['a', 'c']), graph.heads(['a', 'c', 'f']))
+
+ def test_filling_in_ghosts_resets_head_cache(self):
+ graph = self.make_known_graph(test_graph.with_ghost)
+ self.assertEqual(set(['e', 'g']), graph.heads(['e', 'g']))
+ # 'g' is filled in, and decends from 'e', so the heads result is now
+ # different
+ graph.add_node('g', ['e'])
+ self.assertEqual(set(['g']), graph.heads(['e', 'g']))
+
+
+class TestKnownGraphTopoSort(TestCaseWithKnownGraph):
+
+ def assertTopoSortOrder(self, ancestry):
+ """Check topo_sort and iter_topo_order is genuinely topological order.
+
+ For every child in the graph, check if it comes after all of it's
+ parents.
+ """
+ graph = self.make_known_graph(ancestry)
+ sort_result = graph.topo_sort()
+ # We should have an entry in sort_result for every entry present in the
+ # graph.
+ self.assertEqual(len(ancestry), len(sort_result))
+ node_idx = dict((node, idx) for idx, node in enumerate(sort_result))
+ for node in sort_result:
+ parents = ancestry[node]
+ for parent in parents:
+ if parent not in ancestry:
+ # ghost
+ continue
+ if node_idx[node] <= node_idx[parent]:
+ self.fail("parent %s must come before child %s:\n%s"
+ % (parent, node, sort_result))
+
+ def test_topo_sort_empty(self):
+ """TopoSort empty list"""
+ self.assertTopoSortOrder({})
+
+ def test_topo_sort_easy(self):
+ """TopoSort list with one node"""
+ self.assertTopoSortOrder({0: []})
+
+ def test_topo_sort_cycle(self):
+ """TopoSort traps graph with cycles"""
+ g = self.make_known_graph({0: [1],
+ 1: [0]})
+ self.assertRaises(errors.GraphCycleError, g.topo_sort)
+
+ def test_topo_sort_cycle_2(self):
+ """TopoSort traps graph with longer cycle"""
+ g = self.make_known_graph({0: [1],
+ 1: [2],
+ 2: [0]})
+ self.assertRaises(errors.GraphCycleError, g.topo_sort)
+
+ def test_topo_sort_cycle_with_tail(self):
+ """TopoSort traps graph with longer cycle"""
+ g = self.make_known_graph({0: [1],
+ 1: [2],
+ 2: [3, 4],
+ 3: [0],
+ 4: []})
+ self.assertRaises(errors.GraphCycleError, g.topo_sort)
+
+ def test_topo_sort_1(self):
+ """TopoSort simple nontrivial graph"""
+ self.assertTopoSortOrder({0: [3],
+ 1: [4],
+ 2: [1, 4],
+ 3: [],
+ 4: [0, 3]})
+
+ def test_topo_sort_partial(self):
+ """Topological sort with partial ordering.
+
+ Multiple correct orderings are possible, so test for
+ correctness, not for exact match on the resulting list.
+ """
+ self.assertTopoSortOrder({0: [],
+ 1: [0],
+ 2: [0],
+ 3: [0],
+ 4: [1, 2, 3],
+ 5: [1, 2],
+ 6: [1, 2],
+ 7: [2, 3],
+ 8: [0, 1, 4, 5, 6]})
+
+ def test_topo_sort_ghost_parent(self):
+ """Sort nodes, but don't include some parents in the output"""
+ self.assertTopoSortOrder({0: [1],
+ 1: [2]})
+
+
+class TestKnownGraphMergeSort(TestCaseWithKnownGraph):
+
+ def assertSortAndIterate(self, ancestry, branch_tip, result_list):
+ """Check that merge based sorting and iter_topo_order on graph works."""
+ graph = self.make_known_graph(ancestry)
+ value = graph.merge_sort(branch_tip)
+ value = [(n.key, n.merge_depth, n.revno, n.end_of_merge)
+ for n in value]
+ if result_list != value:
+ self.assertEqualDiff(pprint.pformat(result_list),
+ pprint.pformat(value))
+
+ def test_merge_sort_empty(self):
+ # sorting of an emptygraph does not error
+ self.assertSortAndIterate({}, None, [])
+ self.assertSortAndIterate({}, NULL_REVISION, [])
+ self.assertSortAndIterate({}, (NULL_REVISION,), [])
+
+ def test_merge_sort_not_empty_no_tip(self):
+ # merge sorting of a branch starting with None should result
+ # in an empty list: no revisions are dragged in.
+ self.assertSortAndIterate({0: []}, None, [])
+ self.assertSortAndIterate({0: []}, NULL_REVISION, [])
+ self.assertSortAndIterate({0: []}, (NULL_REVISION,), [])
+
+ def test_merge_sort_one_revision(self):
+ # sorting with one revision as the tip returns the correct fields:
+ # sequence - 0, revision id, merge depth - 0, end_of_merge
+ self.assertSortAndIterate({'id': []},
+ 'id',
+ [('id', 0, (1,), True)])
+
+ def test_sequence_numbers_increase_no_merges(self):
+ # emit a few revisions with no merges to check the sequence
+ # numbering works in trivial cases
+ self.assertSortAndIterate(
+ {'A': [],
+ 'B': ['A'],
+ 'C': ['B']},
+ 'C',
+ [('C', 0, (3,), False),
+ ('B', 0, (2,), False),
+ ('A', 0, (1,), True),
+ ],
+ )
+
+ def test_sequence_numbers_increase_with_merges(self):
+ # test that sequence numbers increase across merges
+ self.assertSortAndIterate(
+ {'A': [],
+ 'B': ['A'],
+ 'C': ['A', 'B']},
+ 'C',
+ [('C', 0, (2,), False),
+ ('B', 1, (1,1,1), True),
+ ('A', 0, (1,), True),
+ ],
+ )
+
+ def test_merge_sort_race(self):
+ # A
+ # |
+ # B-.
+ # |\ \
+ # | | C
+ # | |/
+ # | D
+ # |/
+ # F
+ graph = {'A': [],
+ 'B': ['A'],
+ 'C': ['B'],
+ 'D': ['B', 'C'],
+ 'F': ['B', 'D'],
+ }
+ self.assertSortAndIterate(graph, 'F',
+ [('F', 0, (3,), False),
+ ('D', 1, (2,2,1), False),
+ ('C', 2, (2,1,1), True),
+ ('B', 0, (2,), False),
+ ('A', 0, (1,), True),
+ ])
+ # A
+ # |
+ # B-.
+ # |\ \
+ # | X C
+ # | |/
+ # | D
+ # |/
+ # F
+ graph = {'A': [],
+ 'B': ['A'],
+ 'C': ['B'],
+ 'X': ['B'],
+ 'D': ['X', 'C'],
+ 'F': ['B', 'D'],
+ }
+ self.assertSortAndIterate(graph, 'F',
+ [('F', 0, (3,), False),
+ ('D', 1, (2,1,2), False),
+ ('C', 2, (2,2,1), True),
+ ('X', 1, (2,1,1), True),
+ ('B', 0, (2,), False),
+ ('A', 0, (1,), True),
+ ])
+
+ def test_merge_depth_with_nested_merges(self):
+ # the merge depth marker should reflect the depth of the revision
+ # in terms of merges out from the mainline
+ # revid, depth, parents:
+ # A 0 [D, B]
+ # B 1 [C, F]
+ # C 1 [H]
+ # D 0 [H, E]
+ # E 1 [G, F]
+ # F 2 [G]
+ # G 1 [H]
+ # H 0
+ self.assertSortAndIterate(
+ {'A': ['D', 'B'],
+ 'B': ['C', 'F'],
+ 'C': ['H'],
+ 'D': ['H', 'E'],
+ 'E': ['G', 'F'],
+ 'F': ['G'],
+ 'G': ['H'],
+ 'H': []
+ },
+ 'A',
+ [('A', 0, (3,), False),
+ ('B', 1, (1,3,2), False),
+ ('C', 1, (1,3,1), True),
+ ('D', 0, (2,), False),
+ ('E', 1, (1,1,2), False),
+ ('F', 2, (1,2,1), True),
+ ('G', 1, (1,1,1), True),
+ ('H', 0, (1,), True),
+ ],
+ )
+
+ def test_dotted_revnos_with_simple_merges(self):
+ # A 1
+ # |\
+ # B C 2, 1.1.1
+ # | |\
+ # D E F 3, 1.1.2, 1.2.1
+ # |/ /|
+ # G H I 4, 1.2.2, 1.3.1
+ # |/ /
+ # J K 5, 1.3.2
+ # |/
+ # L 6
+ self.assertSortAndIterate(
+ {'A': [],
+ 'B': ['A'],
+ 'C': ['A'],
+ 'D': ['B'],
+ 'E': ['C'],
+ 'F': ['C'],
+ 'G': ['D', 'E'],
+ 'H': ['F'],
+ 'I': ['F'],
+ 'J': ['G', 'H'],
+ 'K': ['I'],
+ 'L': ['J', 'K'],
+ },
+ 'L',
+ [('L', 0, (6,), False),
+ ('K', 1, (1,3,2), False),
+ ('I', 1, (1,3,1), True),
+ ('J', 0, (5,), False),
+ ('H', 1, (1,2,2), False),
+ ('F', 1, (1,2,1), True),
+ ('G', 0, (4,), False),
+ ('E', 1, (1,1,2), False),
+ ('C', 1, (1,1,1), True),
+ ('D', 0, (3,), False),
+ ('B', 0, (2,), False),
+ ('A', 0, (1,), True),
+ ],
+ )
+ # Adding a shortcut from the first revision should not change any of
+ # the existing numbers
+ self.assertSortAndIterate(
+ {'A': [],
+ 'B': ['A'],
+ 'C': ['A'],
+ 'D': ['B'],
+ 'E': ['C'],
+ 'F': ['C'],
+ 'G': ['D', 'E'],
+ 'H': ['F'],
+ 'I': ['F'],
+ 'J': ['G', 'H'],
+ 'K': ['I'],
+ 'L': ['J', 'K'],
+ 'M': ['A'],
+ 'N': ['L', 'M'],
+ },
+ 'N',
+ [('N', 0, (7,), False),
+ ('M', 1, (1,4,1), True),
+ ('L', 0, (6,), False),
+ ('K', 1, (1,3,2), False),
+ ('I', 1, (1,3,1), True),
+ ('J', 0, (5,), False),
+ ('H', 1, (1,2,2), False),
+ ('F', 1, (1,2,1), True),
+ ('G', 0, (4,), False),
+ ('E', 1, (1,1,2), False),
+ ('C', 1, (1,1,1), True),
+ ('D', 0, (3,), False),
+ ('B', 0, (2,), False),
+ ('A', 0, (1,), True),
+ ],
+ )
+
+ def test_end_of_merge_not_last_revision_in_branch(self):
+ # within a branch only the last revision gets an
+ # end of merge marker.
+ self.assertSortAndIterate(
+ {'A': ['B'],
+ 'B': [],
+ },
+ 'A',
+ [('A', 0, (2,), False),
+ ('B', 0, (1,), True)
+ ],
+ )
+
+ def test_end_of_merge_multiple_revisions_merged_at_once(self):
+ # when multiple branches are merged at once, both of their
+ # branch-endpoints should be listed as end-of-merge.
+ # Also, the order of the multiple merges should be
+ # left-right shown top to bottom.
+ # * means end of merge
+ # A 0 [H, B, E]
+ # B 1 [D, C]
+ # C 2 [D] *
+ # D 1 [H] *
+ # E 1 [G, F]
+ # F 2 [G] *
+ # G 1 [H] *
+ # H 0 [] *
+ self.assertSortAndIterate(
+ {'A': ['H', 'B', 'E'],
+ 'B': ['D', 'C'],
+ 'C': ['D'],
+ 'D': ['H'],
+ 'E': ['G', 'F'],
+ 'F': ['G'],
+ 'G': ['H'],
+ 'H': [],
+ },
+ 'A',
+ [('A', 0, (2,), False),
+ ('B', 1, (1,3,2), False),
+ ('C', 2, (1,4,1), True),
+ ('D', 1, (1,3,1), True),
+ ('E', 1, (1,1,2), False),
+ ('F', 2, (1,2,1), True),
+ ('G', 1, (1,1,1), True),
+ ('H', 0, (1,), True),
+ ],
+ )
+
+ def test_parallel_root_sequence_numbers_increase_with_merges(self):
+ """When there are parallel roots, check their revnos."""
+ self.assertSortAndIterate(
+ {'A': [],
+ 'B': [],
+ 'C': ['A', 'B']},
+ 'C',
+ [('C', 0, (2,), False),
+ ('B', 1, (0,1,1), True),
+ ('A', 0, (1,), True),
+ ],
+ )
+
+ def test_revnos_are_globally_assigned(self):
+ """revnos are assigned according to the revision they derive from."""
+ # in this test we setup a number of branches that all derive from
+ # the first revision, and then merge them one at a time, which
+ # should give the revisions as they merge numbers still deriving from
+ # the revision were based on.
+ # merge 3: J: ['G', 'I']
+ # branch 3:
+ # I: ['H']
+ # H: ['A']
+ # merge 2: G: ['D', 'F']
+ # branch 2:
+ # F: ['E']
+ # E: ['A']
+ # merge 1: D: ['A', 'C']
+ # branch 1:
+ # C: ['B']
+ # B: ['A']
+ # root: A: []
+ self.assertSortAndIterate(
+ {'J': ['G', 'I'],
+ 'I': ['H',],
+ 'H': ['A'],
+ 'G': ['D', 'F'],
+ 'F': ['E'],
+ 'E': ['A'],
+ 'D': ['A', 'C'],
+ 'C': ['B'],
+ 'B': ['A'],
+ 'A': [],
+ },
+ 'J',
+ [('J', 0, (4,), False),
+ ('I', 1, (1,3,2), False),
+ ('H', 1, (1,3,1), True),
+ ('G', 0, (3,), False),
+ ('F', 1, (1,2,2), False),
+ ('E', 1, (1,2,1), True),
+ ('D', 0, (2,), False),
+ ('C', 1, (1,1,2), False),
+ ('B', 1, (1,1,1), True),
+ ('A', 0, (1,), True),
+ ],
+ )
+
+ def test_roots_and_sub_branches_versus_ghosts(self):
+ """Extra roots and their mini branches use the same numbering.
+
+ All of them use the 0-node numbering.
+ """
+ # A D K
+ # | |\ |\
+ # B E F L M
+ # | |/ |/
+ # C G N
+ # |/ |\
+ # H I O P
+ # |/ |/
+ # J Q
+ # |.---'
+ # R
+ self.assertSortAndIterate(
+ {'A': [],
+ 'B': ['A'],
+ 'C': ['B'],
+ 'D': [],
+ 'E': ['D'],
+ 'F': ['D'],
+ 'G': ['E', 'F'],
+ 'H': ['C', 'G'],
+ 'I': [],
+ 'J': ['H', 'I'],
+ 'K': [],
+ 'L': ['K'],
+ 'M': ['K'],
+ 'N': ['L', 'M'],
+ 'O': ['N'],
+ 'P': ['N'],
+ 'Q': ['O', 'P'],
+ 'R': ['J', 'Q'],
+ },
+ 'R',
+ [('R', 0, (6,), False),
+ ('Q', 1, (0,4,5), False),
+ ('P', 2, (0,6,1), True),
+ ('O', 1, (0,4,4), False),
+ ('N', 1, (0,4,3), False),
+ ('M', 2, (0,5,1), True),
+ ('L', 1, (0,4,2), False),
+ ('K', 1, (0,4,1), True),
+ ('J', 0, (5,), False),
+ ('I', 1, (0,3,1), True),
+ ('H', 0, (4,), False),
+ ('G', 1, (0,1,3), False),
+ ('F', 2, (0,2,1), True),
+ ('E', 1, (0,1,2), False),
+ ('D', 1, (0,1,1), True),
+ ('C', 0, (3,), False),
+ ('B', 0, (2,), False),
+ ('A', 0, (1,), True),
+ ],
+ )
+
+ def test_ghost(self):
+ # merge_sort should be able to ignore ghosts
+ # A
+ # |
+ # B ghost
+ # |/
+ # C
+ self.assertSortAndIterate(
+ {'A': [],
+ 'B': ['A'],
+ 'C': ['B', 'ghost'],
+ },
+ 'C',
+ [('C', 0, (3,), False),
+ ('B', 0, (2,), False),
+ ('A', 0, (1,), True),
+ ])
+
+ def test_lefthand_ghost(self):
+ # ghost
+ # |
+ # A
+ # |
+ # B
+ self.assertSortAndIterate(
+ {'A': ['ghost'],
+ 'B': ['A'],
+ }, 'B',
+ [('B', 0, (2,), False),
+ ('A', 0, (1,), True),
+ ])
+
+ def test_graph_cycle(self):
+ # merge_sort should fail with a simple error when a graph cycle is
+ # encountered.
+ #
+ # A
+ # |,-.
+ # B |
+ # | |
+ # C ^
+ # | |
+ # D |
+ # |'-'
+ # E
+ self.assertRaises(errors.GraphCycleError,
+ self.assertSortAndIterate,
+ {'A': [],
+ 'B': ['D'],
+ 'C': ['B'],
+ 'D': ['C'],
+ 'E': ['D'],
+ },
+ 'E',
+ [])
+
+
+class TestKnownGraphStableReverseTopoSort(TestCaseWithKnownGraph):
+ """Test the sort order returned by gc_sort."""
+
+ def assertSorted(self, expected, parent_map):
+ graph = self.make_known_graph(parent_map)
+ value = graph.gc_sort()
+ if expected != value:
+ self.assertEqualDiff(pprint.pformat(expected),
+ pprint.pformat(value))
+
+ def test_empty(self):
+ self.assertSorted([], {})
+
+ def test_single(self):
+ self.assertSorted(['a'], {'a':()})
+ self.assertSorted([('a',)], {('a',):()})
+ self.assertSorted([('F', 'a')], {('F', 'a'):()})
+
+ def test_linear(self):
+ self.assertSorted(['c', 'b', 'a'], {'a':(), 'b':('a',), 'c':('b',)})
+ self.assertSorted([('c',), ('b',), ('a',)],
+ {('a',):(), ('b',): (('a',),), ('c',): (('b',),)})
+ self.assertSorted([('F', 'c'), ('F', 'b'), ('F', 'a')],
+ {('F', 'a'):(), ('F', 'b'): (('F', 'a'),),
+ ('F', 'c'): (('F', 'b'),)})
+
+ def test_mixed_ancestries(self):
+ # Each prefix should be sorted separately
+ self.assertSorted([('F', 'c'), ('F', 'b'), ('F', 'a'),
+ ('G', 'c'), ('G', 'b'), ('G', 'a'),
+ ('Q', 'c'), ('Q', 'b'), ('Q', 'a'),
+ ],
+ {('F', 'a'):(), ('F', 'b'): (('F', 'a'),),
+ ('F', 'c'): (('F', 'b'),),
+ ('G', 'a'):(), ('G', 'b'): (('G', 'a'),),
+ ('G', 'c'): (('G', 'b'),),
+ ('Q', 'a'):(), ('Q', 'b'): (('Q', 'a'),),
+ ('Q', 'c'): (('Q', 'b'),),
+ })
+
+ def test_stable_sorting(self):
+ # the sort order should be stable even when extra nodes are added
+ self.assertSorted(['b', 'c', 'a'],
+ {'a':(), 'b':('a',), 'c':('a',)})
+ self.assertSorted(['b', 'c', 'd', 'a'],
+ {'a':(), 'b':('a',), 'c':('a',), 'd':('a',)})
+ self.assertSorted(['b', 'c', 'd', 'a'],
+ {'a':(), 'b':('a',), 'c':('a',), 'd':('a',)})
+ self.assertSorted(['Z', 'b', 'c', 'd', 'a'],
+ {'a':(), 'b':('a',), 'c':('a',), 'd':('a',),
+ 'Z':('a',)})
+ self.assertSorted(['e', 'b', 'c', 'f', 'Z', 'd', 'a'],
+ {'a':(), 'b':('a',), 'c':('a',), 'd':('a',),
+ 'Z':('a',),
+ 'e':('b', 'c', 'd'),
+ 'f':('d', 'Z'),
+ })
+
+ def test_skip_ghost(self):
+ self.assertSorted(['b', 'c', 'a'],
+ {'a':(), 'b':('a', 'ghost'), 'c':('a',)})
+
+ def test_skip_mainline_ghost(self):
+ self.assertSorted(['b', 'c', 'a'],
+ {'a':(), 'b':('ghost', 'a'), 'c':('a',)})
diff --git a/bzrlib/tests/test__rio.py b/bzrlib/tests/test__rio.py
new file mode 100644
index 0000000..ef03361
--- /dev/null
+++ b/bzrlib/tests/test__rio.py
@@ -0,0 +1,169 @@
+# Copyright (C) 2009, 2010 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""Tests for _rio_*."""
+
+from bzrlib import (
+ rio,
+ tests,
+ )
+
+
+def load_tests(standard_tests, module, loader):
+ suite, _ = tests.permute_tests_for_extension(standard_tests, loader,
+ 'bzrlib._rio_py', 'bzrlib._rio_pyx')
+ return suite
+
+
+class TestValidTag(tests.TestCase):
+
+ module = None # Filled in by test parameterization
+
+ def test_ok(self):
+ self.assertTrue(self.module._valid_tag("foo"))
+
+ def test_no_spaces(self):
+ self.assertFalse(self.module._valid_tag("foo bla"))
+
+ def test_numeric(self):
+ self.assertTrue(self.module._valid_tag("3foo423"))
+
+ def test_no_colon(self):
+ self.assertFalse(self.module._valid_tag("foo:bla"))
+
+ def test_type_error(self):
+ self.assertRaises(TypeError, self.module._valid_tag, 423)
+
+ def test_empty(self):
+ self.assertFalse(self.module._valid_tag(""))
+
+ def test_unicode(self):
+ self.assertRaises(TypeError, self.module._valid_tag, u"foo")
+
+ def test_non_ascii_char(self):
+ self.assertFalse(self.module._valid_tag("\xb5"))
+
+
+class TestReadUTF8Stanza(tests.TestCase):
+
+ module = None # Filled in by test parameterization
+
+ def assertReadStanza(self, result, line_iter):
+ s = self.module._read_stanza_utf8(line_iter)
+ self.assertEquals(result, s)
+ if s is not None:
+ for tag, value in s.iter_pairs():
+ self.assertIsInstance(tag, str)
+ self.assertIsInstance(value, unicode)
+
+ def assertReadStanzaRaises(self, exception, line_iter):
+ self.assertRaises(exception, self.module._read_stanza_utf8, line_iter)
+
+ def test_no_string(self):
+ self.assertReadStanzaRaises(TypeError, [21323])
+
+ def test_empty(self):
+ self.assertReadStanza(None, [])
+
+ def test_none(self):
+ self.assertReadStanza(None, [""])
+
+ def test_simple(self):
+ self.assertReadStanza(rio.Stanza(foo="bar"), ["foo: bar\n", ""])
+
+ def test_multi_line(self):
+ self.assertReadStanza(rio.Stanza(foo="bar\nbla"),
+ ["foo: bar\n", "\tbla\n"])
+
+ def test_repeated(self):
+ s = rio.Stanza()
+ s.add("foo", "bar")
+ s.add("foo", "foo")
+ self.assertReadStanza(s, ["foo: bar\n", "foo: foo\n"])
+
+ def test_invalid_early_colon(self):
+ self.assertReadStanzaRaises(ValueError, ["f:oo: bar\n"])
+
+ def test_invalid_tag(self):
+ self.assertReadStanzaRaises(ValueError, ["f%oo: bar\n"])
+
+ def test_continuation_too_early(self):
+ self.assertReadStanzaRaises(ValueError, ["\tbar\n"])
+
+ def test_large(self):
+ value = "bla" * 9000
+ self.assertReadStanza(rio.Stanza(foo=value),
+ ["foo: %s\n" % value])
+
+ def test_non_ascii_char(self):
+ self.assertReadStanza(rio.Stanza(foo=u"n\xe5me"),
+ [u"foo: n\xe5me\n".encode("utf-8")])
+
+
+class TestReadUnicodeStanza(tests.TestCase):
+
+ module = None # Filled in by test parameterization
+
+ def assertReadStanza(self, result, line_iter):
+ s = self.module._read_stanza_unicode(line_iter)
+ self.assertEquals(result, s)
+ if s is not None:
+ for tag, value in s.iter_pairs():
+ self.assertIsInstance(tag, str)
+ self.assertIsInstance(value, unicode)
+
+ def assertReadStanzaRaises(self, exception, line_iter):
+ self.assertRaises(exception, self.module._read_stanza_unicode,
+ line_iter)
+
+ def test_no_string(self):
+ self.assertReadStanzaRaises(TypeError, [21323])
+
+ def test_empty(self):
+ self.assertReadStanza(None, [])
+
+ def test_none(self):
+ self.assertReadStanza(None, [u""])
+
+ def test_simple(self):
+ self.assertReadStanza(rio.Stanza(foo="bar"), [u"foo: bar\n", u""])
+
+ def test_multi_line(self):
+ self.assertReadStanza(rio.Stanza(foo="bar\nbla"),
+ [u"foo: bar\n", u"\tbla\n"])
+
+ def test_repeated(self):
+ s = rio.Stanza()
+ s.add("foo", "bar")
+ s.add("foo", "foo")
+ self.assertReadStanza(s, [u"foo: bar\n", u"foo: foo\n"])
+
+ def test_invalid_early_colon(self):
+ self.assertReadStanzaRaises(ValueError, [u"f:oo: bar\n"])
+
+ def test_invalid_tag(self):
+ self.assertReadStanzaRaises(ValueError, [u"f%oo: bar\n"])
+
+ def test_continuation_too_early(self):
+ self.assertReadStanzaRaises(ValueError, [u"\tbar\n"])
+
+ def test_large(self):
+ value = u"bla" * 9000
+ self.assertReadStanza(rio.Stanza(foo=value),
+ [u"foo: %s\n" % value])
+
+ def test_non_ascii_char(self):
+ self.assertReadStanza(rio.Stanza(foo=u"n\xe5me"), [u"foo: n\xe5me\n"])
diff --git a/bzrlib/tests/test__simple_set.py b/bzrlib/tests/test__simple_set.py
new file mode 100644
index 0000000..3e9b6c4
--- /dev/null
+++ b/bzrlib/tests/test__simple_set.py
@@ -0,0 +1,392 @@
+# Copyright (C) 2009, 2010, 2011 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""Tests for the StaticTupleInterned type."""
+
+import sys
+
+from bzrlib import (
+ tests,
+ )
+from bzrlib.tests import (
+ features,
+ )
+
+try:
+ from bzrlib import _simple_set_pyx
+except ImportError:
+ _simple_set_pyx = None
+
+
+class _Hashable(object):
+ """A simple object which has a fixed hash value.
+
+ We could have used an 'int', but it turns out that Int objects don't
+ implement tp_richcompare...
+ """
+
+ def __init__(self, the_hash):
+ self.hash = the_hash
+
+ def __hash__(self):
+ return self.hash
+
+ def __eq__(self, other):
+ if not isinstance(other, _Hashable):
+ return NotImplemented
+ return other.hash == self.hash
+
+
+class _BadSecondHash(_Hashable):
+
+ def __init__(self, the_hash):
+ _Hashable.__init__(self, the_hash)
+ self._first = True
+
+ def __hash__(self):
+ if self._first:
+ self._first = False
+ return self.hash
+ else:
+ raise ValueError('I can only be hashed once.')
+
+
+class _BadCompare(_Hashable):
+
+ def __eq__(self, other):
+ raise RuntimeError('I refuse to play nice')
+
+
+class _NoImplementCompare(_Hashable):
+
+ def __eq__(self, other):
+ return NotImplemented
+
+
+# Even though this is an extension, we don't permute the tests for a python
+# version. As the plain python version is just a dict or set
+compiled_simpleset_feature = features.ModuleAvailableFeature(
+ 'bzrlib._simple_set_pyx')
+
+
+class TestSimpleSet(tests.TestCase):
+
+ _test_needs_features = [compiled_simpleset_feature]
+ module = _simple_set_pyx
+
+ def assertIn(self, obj, container):
+ self.assertTrue(obj in container,
+ '%s not found in %s' % (obj, container))
+
+ def assertNotIn(self, obj, container):
+ self.assertTrue(obj not in container,
+ 'We found %s in %s' % (obj, container))
+
+ def assertFillState(self, used, fill, mask, obj):
+ self.assertEqual((used, fill, mask), (obj.used, obj.fill, obj.mask))
+
+ def assertLookup(self, offset, value, obj, key):
+ self.assertEqual((offset, value), obj._test_lookup(key))
+
+ def assertRefcount(self, count, obj):
+ """Assert that the refcount for obj is what we expect.
+
+ Note that this automatically adjusts for the fact that calling
+ assertRefcount actually creates a new pointer, as does calling
+ sys.getrefcount. So pass the expected value *before* the call.
+ """
+ # I'm not sure why the offset is 3, but I've check that in the caller,
+ # an offset of 1 works, which is expected. Not sure why assertRefcount
+ # is incrementing/decrementing 2 times
+ self.assertEqual(count, sys.getrefcount(obj)-3)
+
+ def test_initial(self):
+ obj = self.module.SimpleSet()
+ self.assertEqual(0, len(obj))
+ st = ('foo', 'bar')
+ self.assertFillState(0, 0, 0x3ff, obj)
+
+ def test__lookup(self):
+ # These are carefully chosen integers to force hash collisions in the
+ # algorithm, based on the initial set size of 1024
+ obj = self.module.SimpleSet()
+ self.assertLookup(643, '<null>', obj, _Hashable(643))
+ self.assertLookup(643, '<null>', obj, _Hashable(643 + 1024))
+ self.assertLookup(643, '<null>', obj, _Hashable(643 + 50*1024))
+
+ def test__lookup_collision(self):
+ obj = self.module.SimpleSet()
+ k1 = _Hashable(643)
+ k2 = _Hashable(643 + 1024)
+ self.assertLookup(643, '<null>', obj, k1)
+ self.assertLookup(643, '<null>', obj, k2)
+ obj.add(k1)
+ self.assertLookup(643, k1, obj, k1)
+ self.assertLookup(644, '<null>', obj, k2)
+
+ def test__lookup_after_resize(self):
+ obj = self.module.SimpleSet()
+ k1 = _Hashable(643)
+ k2 = _Hashable(643 + 1024)
+ obj.add(k1)
+ obj.add(k2)
+ self.assertLookup(643, k1, obj, k1)
+ self.assertLookup(644, k2, obj, k2)
+ obj._py_resize(2047) # resized to 2048
+ self.assertEqual(2048, obj.mask + 1)
+ self.assertLookup(643, k1, obj, k1)
+ self.assertLookup(643+1024, k2, obj, k2)
+ obj._py_resize(1023) # resized back to 1024
+ self.assertEqual(1024, obj.mask + 1)
+ self.assertLookup(643, k1, obj, k1)
+ self.assertLookup(644, k2, obj, k2)
+
+ def test_get_set_del_with_collisions(self):
+ obj = self.module.SimpleSet()
+
+ h1 = 643
+ h2 = 643 + 1024
+ h3 = 643 + 1024*50
+ h4 = 643 + 1024*25
+ h5 = 644
+ h6 = 644 + 1024
+
+ k1 = _Hashable(h1)
+ k2 = _Hashable(h2)
+ k3 = _Hashable(h3)
+ k4 = _Hashable(h4)
+ k5 = _Hashable(h5)
+ k6 = _Hashable(h6)
+ self.assertLookup(643, '<null>', obj, k1)
+ self.assertLookup(643, '<null>', obj, k2)
+ self.assertLookup(643, '<null>', obj, k3)
+ self.assertLookup(643, '<null>', obj, k4)
+ self.assertLookup(644, '<null>', obj, k5)
+ self.assertLookup(644, '<null>', obj, k6)
+ obj.add(k1)
+ self.assertIn(k1, obj)
+ self.assertNotIn(k2, obj)
+ self.assertNotIn(k3, obj)
+ self.assertNotIn(k4, obj)
+ self.assertLookup(643, k1, obj, k1)
+ self.assertLookup(644, '<null>', obj, k2)
+ self.assertLookup(644, '<null>', obj, k3)
+ self.assertLookup(644, '<null>', obj, k4)
+ self.assertLookup(644, '<null>', obj, k5)
+ self.assertLookup(644, '<null>', obj, k6)
+ self.assertIs(k1, obj[k1])
+ self.assertIs(k2, obj.add(k2))
+ self.assertIs(k2, obj[k2])
+ self.assertLookup(643, k1, obj, k1)
+ self.assertLookup(644, k2, obj, k2)
+ self.assertLookup(646, '<null>', obj, k3)
+ self.assertLookup(646, '<null>', obj, k4)
+ self.assertLookup(645, '<null>', obj, k5)
+ self.assertLookup(645, '<null>', obj, k6)
+ self.assertLookup(643, k1, obj, _Hashable(h1))
+ self.assertLookup(644, k2, obj, _Hashable(h2))
+ self.assertLookup(646, '<null>', obj, _Hashable(h3))
+ self.assertLookup(646, '<null>', obj, _Hashable(h4))
+ self.assertLookup(645, '<null>', obj, _Hashable(h5))
+ self.assertLookup(645, '<null>', obj, _Hashable(h6))
+ obj.add(k3)
+ self.assertIs(k3, obj[k3])
+ self.assertIn(k1, obj)
+ self.assertIn(k2, obj)
+ self.assertIn(k3, obj)
+ self.assertNotIn(k4, obj)
+
+ obj.discard(k1)
+ self.assertLookup(643, '<dummy>', obj, k1)
+ self.assertLookup(644, k2, obj, k2)
+ self.assertLookup(646, k3, obj, k3)
+ self.assertLookup(643, '<dummy>', obj, k4)
+ self.assertNotIn(k1, obj)
+ self.assertIn(k2, obj)
+ self.assertIn(k3, obj)
+ self.assertNotIn(k4, obj)
+
+ def test_add(self):
+ obj = self.module.SimpleSet()
+ self.assertFillState(0, 0, 0x3ff, obj)
+ # We use this clumsy notation, because otherwise the refcounts are off.
+ # I'm guessing the python compiler sees it is a static tuple, and adds
+ # it to the function variables, or somesuch
+ k1 = tuple(['foo'])
+ self.assertRefcount(1, k1)
+ self.assertIs(k1, obj.add(k1))
+ self.assertFillState(1, 1, 0x3ff, obj)
+ self.assertRefcount(2, k1)
+ ktest = obj[k1]
+ self.assertRefcount(3, k1)
+ self.assertIs(k1, ktest)
+ del ktest
+ self.assertRefcount(2, k1)
+ k2 = tuple(['foo'])
+ self.assertRefcount(1, k2)
+ self.assertIsNot(k1, k2)
+ # doesn't add anything, so the counters shouldn't be adjusted
+ self.assertIs(k1, obj.add(k2))
+ self.assertFillState(1, 1, 0x3ff, obj)
+ self.assertRefcount(2, k1) # not changed
+ self.assertRefcount(1, k2) # not incremented
+ self.assertIs(k1, obj[k1])
+ self.assertIs(k1, obj[k2])
+ self.assertRefcount(2, k1)
+ self.assertRefcount(1, k2)
+ # Deleting an entry should remove the fill, but not the used
+ obj.discard(k1)
+ self.assertFillState(0, 1, 0x3ff, obj)
+ self.assertRefcount(1, k1)
+ k3 = tuple(['bar'])
+ self.assertRefcount(1, k3)
+ self.assertIs(k3, obj.add(k3))
+ self.assertFillState(1, 2, 0x3ff, obj)
+ self.assertRefcount(2, k3)
+ self.assertIs(k2, obj.add(k2))
+ self.assertFillState(2, 2, 0x3ff, obj)
+ self.assertRefcount(1, k1)
+ self.assertRefcount(2, k2)
+ self.assertRefcount(2, k3)
+
+ def test_discard(self):
+ obj = self.module.SimpleSet()
+ k1 = tuple(['foo'])
+ k2 = tuple(['foo'])
+ k3 = tuple(['bar'])
+ self.assertRefcount(1, k1)
+ self.assertRefcount(1, k2)
+ self.assertRefcount(1, k3)
+ obj.add(k1)
+ self.assertRefcount(2, k1)
+ self.assertEqual(0, obj.discard(k3))
+ self.assertRefcount(1, k3)
+ obj.add(k3)
+ self.assertRefcount(2, k3)
+ self.assertEqual(1, obj.discard(k3))
+ self.assertRefcount(1, k3)
+
+ def test__resize(self):
+ obj = self.module.SimpleSet()
+ k1 = ('foo',)
+ k2 = ('bar',)
+ k3 = ('baz',)
+ obj.add(k1)
+ obj.add(k2)
+ obj.add(k3)
+ obj.discard(k2)
+ self.assertFillState(2, 3, 0x3ff, obj)
+ self.assertEqual(1024, obj._py_resize(500))
+ # Doesn't change the size, but does change the content
+ self.assertFillState(2, 2, 0x3ff, obj)
+ obj.add(k2)
+ obj.discard(k3)
+ self.assertFillState(2, 3, 0x3ff, obj)
+ self.assertEqual(4096, obj._py_resize(4095))
+ self.assertFillState(2, 2, 0xfff, obj)
+ self.assertIn(k1, obj)
+ self.assertIn(k2, obj)
+ self.assertNotIn(k3, obj)
+ obj.add(k2)
+ self.assertIn(k2, obj)
+ obj.discard(k2)
+ self.assertEqual((591, '<dummy>'), obj._test_lookup(k2))
+ self.assertFillState(1, 2, 0xfff, obj)
+ self.assertEqual(2048, obj._py_resize(1024))
+ self.assertFillState(1, 1, 0x7ff, obj)
+ self.assertEqual((591, '<null>'), obj._test_lookup(k2))
+
+ def test_second_hash_failure(self):
+ obj = self.module.SimpleSet()
+ k1 = _BadSecondHash(200)
+ k2 = _Hashable(200)
+ # Should only call hash() one time
+ obj.add(k1)
+ self.assertFalse(k1._first)
+ self.assertRaises(ValueError, obj.add, k2)
+
+ def test_richcompare_failure(self):
+ obj = self.module.SimpleSet()
+ k1 = _Hashable(200)
+ k2 = _BadCompare(200)
+ obj.add(k1)
+ # Tries to compare with k1, fails
+ self.assertRaises(RuntimeError, obj.add, k2)
+
+ def test_richcompare_not_implemented(self):
+ obj = self.module.SimpleSet()
+ # Even though their hashes are the same, tp_richcompare returns
+ # NotImplemented, which means we treat them as not equal
+ k1 = _NoImplementCompare(200)
+ k2 = _NoImplementCompare(200)
+ self.assertLookup(200, '<null>', obj, k1)
+ self.assertLookup(200, '<null>', obj, k2)
+ self.assertIs(k1, obj.add(k1))
+ self.assertLookup(200, k1, obj, k1)
+ self.assertLookup(201, '<null>', obj, k2)
+ self.assertIs(k2, obj.add(k2))
+ self.assertIs(k1, obj[k1])
+
+ def test_add_and_remove_lots_of_items(self):
+ obj = self.module.SimpleSet()
+ chars = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz1234567890'
+ for i in chars:
+ for j in chars:
+ k = (i, j)
+ obj.add(k)
+ num = len(chars)*len(chars)
+ self.assertFillState(num, num, 0x1fff, obj)
+ # Now delete all of the entries and it should shrink again
+ for i in chars:
+ for j in chars:
+ k = (i, j)
+ obj.discard(k)
+ # It should be back to 1024 wide mask, though there may still be some
+ # dummy values in there
+ self.assertFillState(0, obj.fill, 0x3ff, obj)
+ # but there should be fewer than 1/5th dummy entries
+ self.assertTrue(obj.fill < 1024 / 5)
+
+ def test__iter__(self):
+ obj = self.module.SimpleSet()
+ k1 = ('1',)
+ k2 = ('1', '2')
+ k3 = ('3', '4')
+ obj.add(k1)
+ obj.add(k2)
+ obj.add(k3)
+ all = set()
+ for key in obj:
+ all.add(key)
+ self.assertEqual(sorted([k1, k2, k3]), sorted(all))
+ iterator = iter(obj)
+ iterator.next()
+ obj.add(('foo',))
+ # Set changed size
+ self.assertRaises(RuntimeError, iterator.next)
+ # And even removing an item still causes it to fail
+ obj.discard(k2)
+ self.assertRaises(RuntimeError, iterator.next)
+
+ def test__sizeof__(self):
+ # SimpleSet needs a custom sizeof implementation, because it allocates
+ # memory that Python cannot directly see (_table).
+ # Too much variability in platform sizes for us to give a fixed size
+ # here. However without a custom implementation, __sizeof__ would give
+ # us only the size of the object, and not its table. We know the table
+ # is at least 4bytes*1024entries in size.
+ obj = self.module.SimpleSet()
+ self.assertTrue(obj.__sizeof__() > 4096)
diff --git a/bzrlib/tests/test__static_tuple.py b/bzrlib/tests/test__static_tuple.py
new file mode 100644
index 0000000..8e63de6
--- /dev/null
+++ b/bzrlib/tests/test__static_tuple.py
@@ -0,0 +1,634 @@
+# Copyright (C) 2009, 2010, 2011 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""Tests for the StaticTuple type."""
+
+import cPickle
+import sys
+
+from bzrlib import (
+ _static_tuple_py,
+ debug,
+ osutils,
+ static_tuple,
+ tests,
+ )
+from bzrlib.tests import (
+ features,
+ )
+
+
+def load_tests(standard_tests, module, loader):
+ """Parameterize tests for all versions of groupcompress."""
+ global compiled_static_tuple_feature
+ suite, compiled_static_tuple_feature = tests.permute_tests_for_extension(
+ standard_tests, loader, 'bzrlib._static_tuple_py',
+ 'bzrlib._static_tuple_c')
+ return suite
+
+
+class TestStaticTuple(tests.TestCase):
+
+ def assertRefcount(self, count, obj):
+ """Assert that the refcount for obj is what we expect.
+
+ Note that this automatically adjusts for the fact that calling
+ assertRefcount actually creates a new pointer, as does calling
+ sys.getrefcount. So pass the expected value *before* the call.
+ """
+ # I don't understand why it is getrefcount()-3 here, but it seems to be
+ # correct. If I check in the calling function, with:
+ # self.assertEqual(count, sys.getrefcount(obj)-1)
+ # Then it works fine. Something about passing it to assertRefcount is
+ # actually double-incrementing (and decrementing) the refcount
+ self.assertEqual(count, sys.getrefcount(obj)-3)
+
+ def test_create(self):
+ k = self.module.StaticTuple('foo')
+ k = self.module.StaticTuple('foo', 'bar')
+
+ def test_create_bad_args(self):
+ args_256 = ['a']*256
+ # too many args
+ self.assertRaises(TypeError, self.module.StaticTuple, *args_256)
+ args_300 = ['a']*300
+ self.assertRaises(TypeError, self.module.StaticTuple, *args_300)
+ # not a string
+ self.assertRaises(TypeError, self.module.StaticTuple, object())
+
+ def test_concat(self):
+ st1 = self.module.StaticTuple('foo')
+ st2 = self.module.StaticTuple('bar')
+ st3 = self.module.StaticTuple('foo', 'bar')
+ st4 = st1 + st2
+ self.assertEqual(st3, st4)
+ self.assertIsInstance(st4, self.module.StaticTuple)
+
+ def test_concat_with_tuple(self):
+ st1 = self.module.StaticTuple('foo')
+ t2 = ('bar',)
+ st3 = self.module.StaticTuple('foo', 'bar')
+ st4 = self.module.StaticTuple('bar', 'foo')
+ st5 = st1 + t2
+ st6 = t2 + st1
+ self.assertEqual(st3, st5)
+ self.assertIsInstance(st5, self.module.StaticTuple)
+ self.assertEqual(st4, st6)
+ if self.module is _static_tuple_py:
+ # _static_tuple_py has StaticTuple(tuple), so tuple thinks it
+ # already knows how to concatenate, as such we can't "inject" our
+ # own concatenation...
+ self.assertIsInstance(st6, tuple)
+ else:
+ self.assertIsInstance(st6, self.module.StaticTuple)
+
+ def test_concat_with_bad_tuple(self):
+ st1 = self.module.StaticTuple('foo')
+ t2 = (object(),)
+ # Using st1.__add__ doesn't give the same results as doing the '+' form
+ self.assertRaises(TypeError, lambda: st1 + t2)
+
+ def test_concat_with_non_tuple(self):
+ st1 = self.module.StaticTuple('foo')
+ self.assertRaises(TypeError, lambda: st1 + 10)
+
+ def test_as_tuple(self):
+ k = self.module.StaticTuple('foo')
+ t = k.as_tuple()
+ self.assertEqual(('foo',), t)
+ self.assertIsInstance(t, tuple)
+ self.assertFalse(isinstance(t, self.module.StaticTuple))
+ k = self.module.StaticTuple('foo', 'bar')
+ t = k.as_tuple()
+ self.assertEqual(('foo', 'bar'), t)
+ k2 = self.module.StaticTuple(1, k)
+ t = k2.as_tuple()
+ self.assertIsInstance(t, tuple)
+ # For pickling to work, we need to keep the sub-items as StaticTuple so
+ # that it knows that they also need to be converted.
+ self.assertIsInstance(t[1], self.module.StaticTuple)
+ self.assertEqual((1, ('foo', 'bar')), t)
+
+ def test_as_tuples(self):
+ k1 = self.module.StaticTuple('foo', 'bar')
+ t = static_tuple.as_tuples(k1)
+ self.assertIsInstance(t, tuple)
+ self.assertEqual(('foo', 'bar'), t)
+ k2 = self.module.StaticTuple(1, k1)
+ t = static_tuple.as_tuples(k2)
+ self.assertIsInstance(t, tuple)
+ self.assertIsInstance(t[1], tuple)
+ self.assertEqual((1, ('foo', 'bar')), t)
+ mixed = (1, k1)
+ t = static_tuple.as_tuples(mixed)
+ self.assertIsInstance(t, tuple)
+ self.assertIsInstance(t[1], tuple)
+ self.assertEqual((1, ('foo', 'bar')), t)
+
+ def test_len(self):
+ k = self.module.StaticTuple()
+ self.assertEqual(0, len(k))
+ k = self.module.StaticTuple('foo')
+ self.assertEqual(1, len(k))
+ k = self.module.StaticTuple('foo', 'bar')
+ self.assertEqual(2, len(k))
+ k = self.module.StaticTuple('foo', 'bar', 'b', 'b', 'b', 'b', 'b')
+ self.assertEqual(7, len(k))
+ args = ['foo']*255
+ k = self.module.StaticTuple(*args)
+ self.assertEqual(255, len(k))
+
+ def test_hold_other_static_tuples(self):
+ k = self.module.StaticTuple('foo', 'bar')
+ k2 = self.module.StaticTuple(k, k)
+ self.assertEqual(2, len(k2))
+ self.assertIs(k, k2[0])
+ self.assertIs(k, k2[1])
+
+ def test_getitem(self):
+ k = self.module.StaticTuple('foo', 'bar', 'b', 'b', 'b', 'b', 'z')
+ self.assertEqual('foo', k[0])
+ self.assertEqual('foo', k[0])
+ self.assertEqual('foo', k[0])
+ self.assertEqual('z', k[6])
+ self.assertEqual('z', k[-1])
+ self.assertRaises(IndexError, k.__getitem__, 7)
+ self.assertRaises(IndexError, k.__getitem__, 256+7)
+ self.assertRaises(IndexError, k.__getitem__, 12024)
+ # Python's [] resolver handles the negative arguments, so we can't
+ # really test StaticTuple_item() with negative values.
+ self.assertRaises(TypeError, k.__getitem__, 'not-an-int')
+ self.assertRaises(TypeError, k.__getitem__, '5')
+
+ def test_refcount(self):
+ f = 'fo' + 'oo'
+ num_refs = sys.getrefcount(f) - 1 #sys.getrefcount() adds one
+ k = self.module.StaticTuple(f)
+ self.assertRefcount(num_refs + 1, f)
+ b = k[0]
+ self.assertRefcount(num_refs + 2, f)
+ b = k[0]
+ self.assertRefcount(num_refs + 2, f)
+ c = k[0]
+ self.assertRefcount(num_refs + 3, f)
+ del b, c
+ self.assertRefcount(num_refs + 1, f)
+ del k
+ self.assertRefcount(num_refs, f)
+
+ def test__repr__(self):
+ k = self.module.StaticTuple('foo', 'bar', 'baz', 'bing')
+ self.assertEqual("StaticTuple('foo', 'bar', 'baz', 'bing')", repr(k))
+
+ def assertCompareEqual(self, k1, k2):
+ self.assertTrue(k1 == k2)
+ self.assertTrue(k1 <= k2)
+ self.assertTrue(k1 >= k2)
+ self.assertFalse(k1 != k2)
+ self.assertFalse(k1 < k2)
+ self.assertFalse(k1 > k2)
+
+ def test_holds_None(self):
+ k1 = self.module.StaticTuple(None)
+ # You cannot subclass None anyway
+
+ def test_holds_int(self):
+ k1 = self.module.StaticTuple(1)
+ class subint(int):
+ pass
+ # But not a subclass, because subint could introduce refcycles
+ self.assertRaises(TypeError, self.module.StaticTuple, subint(2))
+
+ def test_holds_long(self):
+ k1 = self.module.StaticTuple(2L**65)
+ class sublong(long):
+ pass
+ # But not a subclass
+ self.assertRaises(TypeError, self.module.StaticTuple, sublong(1))
+
+ def test_holds_float(self):
+ k1 = self.module.StaticTuple(1.2)
+ class subfloat(float):
+ pass
+ self.assertRaises(TypeError, self.module.StaticTuple, subfloat(1.5))
+
+ def test_holds_str(self):
+ k1 = self.module.StaticTuple('astring')
+ class substr(str):
+ pass
+ self.assertRaises(TypeError, self.module.StaticTuple, substr('a'))
+
+ def test_holds_unicode(self):
+ k1 = self.module.StaticTuple(u'\xb5')
+ class subunicode(unicode):
+ pass
+ self.assertRaises(TypeError, self.module.StaticTuple,
+ subunicode(u'\xb5'))
+
+ def test_hold_bool(self):
+ k1 = self.module.StaticTuple(True)
+ k2 = self.module.StaticTuple(False)
+ # Cannot subclass bool
+
+ def test_compare_same_obj(self):
+ k1 = self.module.StaticTuple('foo', 'bar')
+ self.assertCompareEqual(k1, k1)
+ k2 = self.module.StaticTuple(k1, k1)
+ self.assertCompareEqual(k2, k2)
+ k3 = self.module.StaticTuple('foo', 1, None, u'\xb5', 1.2, 2**65, True,
+ k1)
+ self.assertCompareEqual(k3, k3)
+
+ def test_compare_equivalent_obj(self):
+ k1 = self.module.StaticTuple('foo', 'bar')
+ k2 = self.module.StaticTuple('foo', 'bar')
+ self.assertCompareEqual(k1, k2)
+ k3 = self.module.StaticTuple(k1, k2)
+ k4 = self.module.StaticTuple(k2, k1)
+ self.assertCompareEqual(k1, k2)
+ k5 = self.module.StaticTuple('foo', 1, None, u'\xb5', 1.2, 2**65, True,
+ k1)
+ k6 = self.module.StaticTuple('foo', 1, None, u'\xb5', 1.2, 2**65, True,
+ k1)
+ self.assertCompareEqual(k5, k6)
+ k7 = self.module.StaticTuple(None)
+ k8 = self.module.StaticTuple(None)
+ self.assertCompareEqual(k7, k8)
+
+ def test_compare_similar_obj(self):
+ k1 = self.module.StaticTuple('foo' + ' bar', 'bar' + ' baz')
+ k2 = self.module.StaticTuple('fo' + 'o bar', 'ba' + 'r baz')
+ self.assertCompareEqual(k1, k2)
+ k3 = self.module.StaticTuple('foo ' + 'bar', 'bar ' + 'baz')
+ k4 = self.module.StaticTuple('f' + 'oo bar', 'b' + 'ar baz')
+ k5 = self.module.StaticTuple(k1, k2)
+ k6 = self.module.StaticTuple(k3, k4)
+ self.assertCompareEqual(k5, k6)
+
+ def assertCompareDifferent(self, k_small, k_big):
+ self.assertFalse(k_small == k_big)
+ self.assertFalse(k_small >= k_big)
+ self.assertFalse(k_small > k_big)
+ self.assertTrue(k_small != k_big)
+ self.assertTrue(k_small <= k_big)
+ self.assertTrue(k_small < k_big)
+
+ def assertCompareNoRelation(self, k1, k2):
+ """Run the comparison operators, make sure they do something.
+
+ However, we don't actually care what comes first or second. This is
+ stuff like cross-class comparisons. We don't want to segfault/raise an
+ exception, but we don't care about the sort order.
+ """
+ self.assertFalse(k1 == k2)
+ self.assertTrue(k1 != k2)
+ # Do the comparison, but we don't care about the result
+ k1 >= k2
+ k1 > k2
+ k1 <= k2
+ k1 < k2
+
+ def test_compare_vs_none(self):
+ k1 = self.module.StaticTuple('baz', 'bing')
+ self.assertCompareDifferent(None, k1)
+
+ def test_compare_cross_class(self):
+ k1 = self.module.StaticTuple('baz', 'bing')
+ self.assertCompareNoRelation(10, k1)
+ self.assertCompareNoRelation('baz', k1)
+
+ def test_compare_all_different_same_width(self):
+ k1 = self.module.StaticTuple('baz', 'bing')
+ k2 = self.module.StaticTuple('foo', 'bar')
+ self.assertCompareDifferent(k1, k2)
+ k3 = self.module.StaticTuple(k1, k2)
+ k4 = self.module.StaticTuple(k2, k1)
+ self.assertCompareDifferent(k3, k4)
+ k5 = self.module.StaticTuple(1)
+ k6 = self.module.StaticTuple(2)
+ self.assertCompareDifferent(k5, k6)
+ k7 = self.module.StaticTuple(1.2)
+ k8 = self.module.StaticTuple(2.4)
+ self.assertCompareDifferent(k7, k8)
+ k9 = self.module.StaticTuple(u's\xb5')
+ k10 = self.module.StaticTuple(u's\xe5')
+ self.assertCompareDifferent(k9, k10)
+
+ def test_compare_some_different(self):
+ k1 = self.module.StaticTuple('foo', 'bar')
+ k2 = self.module.StaticTuple('foo', 'zzz')
+ self.assertCompareDifferent(k1, k2)
+ k3 = self.module.StaticTuple(k1, k1)
+ k4 = self.module.StaticTuple(k1, k2)
+ self.assertCompareDifferent(k3, k4)
+ k5 = self.module.StaticTuple('foo', None)
+ self.assertCompareDifferent(k5, k1)
+ self.assertCompareDifferent(k5, k2)
+
+ def test_compare_diff_width(self):
+ k1 = self.module.StaticTuple('foo')
+ k2 = self.module.StaticTuple('foo', 'bar')
+ self.assertCompareDifferent(k1, k2)
+ k3 = self.module.StaticTuple(k1)
+ k4 = self.module.StaticTuple(k1, k2)
+ self.assertCompareDifferent(k3, k4)
+
+ def test_compare_different_types(self):
+ k1 = self.module.StaticTuple('foo', 'bar')
+ k2 = self.module.StaticTuple('foo', 1, None, u'\xb5', 1.2, 2**65, True,
+ k1)
+ self.assertCompareNoRelation(k1, k2)
+ k3 = self.module.StaticTuple('foo')
+ self.assertCompareDifferent(k3, k1)
+ k4 = self.module.StaticTuple(None)
+ self.assertCompareDifferent(k4, k1)
+ k5 = self.module.StaticTuple(1)
+ self.assertCompareNoRelation(k1, k5)
+
+ def test_compare_to_tuples(self):
+ k1 = self.module.StaticTuple('foo')
+ self.assertCompareEqual(k1, ('foo',))
+ self.assertCompareEqual(('foo',), k1)
+ self.assertCompareDifferent(k1, ('foo', 'bar'))
+ self.assertCompareDifferent(k1, ('foo', 10))
+
+ k2 = self.module.StaticTuple('foo', 'bar')
+ self.assertCompareEqual(k2, ('foo', 'bar'))
+ self.assertCompareEqual(('foo', 'bar'), k2)
+ self.assertCompareDifferent(k2, ('foo', 'zzz'))
+ self.assertCompareDifferent(('foo',), k2)
+ self.assertCompareDifferent(('foo', 'aaa'), k2)
+ self.assertCompareDifferent(('baz', 'bing'), k2)
+ self.assertCompareDifferent(('foo', 10), k2)
+
+ k3 = self.module.StaticTuple(k1, k2)
+ self.assertCompareEqual(k3, (('foo',), ('foo', 'bar')))
+ self.assertCompareEqual((('foo',), ('foo', 'bar')), k3)
+ self.assertCompareEqual(k3, (k1, ('foo', 'bar')))
+ self.assertCompareEqual((k1, ('foo', 'bar')), k3)
+
+ def test_compare_mixed_depths(self):
+ stuple = self.module.StaticTuple
+ k1 = stuple(stuple('a',), stuple('b',))
+ k2 = stuple(stuple(stuple('c',), stuple('d',)),
+ stuple('b',))
+ # This requires comparing a StaticTuple to a 'string', and then
+ # interpreting that value in the next higher StaticTuple. This used to
+ # generate a PyErr_BadIternalCall. We now fall back to *something*.
+ self.assertCompareNoRelation(k1, k2)
+
+ def test_hash(self):
+ k = self.module.StaticTuple('foo')
+ self.assertEqual(hash(k), hash(('foo',)))
+ k = self.module.StaticTuple('foo', 'bar', 'baz', 'bing')
+ as_tuple = ('foo', 'bar', 'baz', 'bing')
+ self.assertEqual(hash(k), hash(as_tuple))
+ x = {k: 'foo'}
+ # Because k == , it replaces the slot, rather than having both
+ # present in the dict.
+ self.assertEqual('foo', x[as_tuple])
+ x[as_tuple] = 'bar'
+ self.assertEqual({as_tuple: 'bar'}, x)
+
+ k2 = self.module.StaticTuple(k)
+ as_tuple2 = (('foo', 'bar', 'baz', 'bing'),)
+ self.assertEqual(hash(k2), hash(as_tuple2))
+
+ k3 = self.module.StaticTuple('foo', 1, None, u'\xb5', 1.2, 2**65, True,
+ k)
+ as_tuple3 = ('foo', 1, None, u'\xb5', 1.2, 2**65, True, k)
+ self.assertEqual(hash(as_tuple3), hash(k3))
+
+ def test_slice(self):
+ k = self.module.StaticTuple('foo', 'bar', 'baz', 'bing')
+ self.assertEqual(('foo', 'bar'), k[:2])
+ self.assertEqual(('baz',), k[2:-1])
+ try:
+ val = k[::2]
+ except TypeError:
+ # C implementation raises a TypeError, we don't need the
+ # implementation yet, so allow this to pass
+ pass
+ else:
+ # Python implementation uses a regular Tuple, so make sure it gives
+ # the right result
+ self.assertEqual(('foo', 'baz'), val)
+
+ def test_referents(self):
+ # We implement tp_traverse so that things like 'meliae' can measure the
+ # amount of referenced memory. Unfortunately gc.get_referents() first
+ # checks the IS_GC flag before it traverses anything. We could write a
+ # helper func, but that won't work for the generic implementation...
+ self.requireFeature(features.meliae)
+ from meliae import scanner
+ strs = ['foo', 'bar', 'baz', 'bing']
+ k = self.module.StaticTuple(*strs)
+ if self.module is _static_tuple_py:
+ refs = strs + [self.module.StaticTuple]
+ else:
+ refs = strs
+ self.assertEqual(sorted(refs), sorted(scanner.get_referents(k)))
+
+ def test_nested_referents(self):
+ self.requireFeature(features.meliae)
+ from meliae import scanner
+ strs = ['foo', 'bar', 'baz', 'bing']
+ k1 = self.module.StaticTuple(*strs[:2])
+ k2 = self.module.StaticTuple(*strs[2:])
+ k3 = self.module.StaticTuple(k1, k2)
+ refs = [k1, k2]
+ if self.module is _static_tuple_py:
+ refs.append(self.module.StaticTuple)
+ self.assertEqual(sorted(refs),
+ sorted(scanner.get_referents(k3)))
+
+ def test_empty_is_singleton(self):
+ key = self.module.StaticTuple()
+ self.assertIs(key, self.module._empty_tuple)
+
+ def test_intern(self):
+ unique_str1 = 'unique str ' + osutils.rand_chars(20)
+ unique_str2 = 'unique str ' + osutils.rand_chars(20)
+ key = self.module.StaticTuple(unique_str1, unique_str2)
+ self.assertFalse(key in self.module._interned_tuples)
+ key2 = self.module.StaticTuple(unique_str1, unique_str2)
+ self.assertEqual(key, key2)
+ self.assertIsNot(key, key2)
+ key3 = key.intern()
+ self.assertIs(key, key3)
+ self.assertTrue(key in self.module._interned_tuples)
+ self.assertEqual(key, self.module._interned_tuples[key])
+ key2 = key2.intern()
+ self.assertIs(key, key2)
+
+ def test__c_intern_handles_refcount(self):
+ if self.module is _static_tuple_py:
+ return # Not applicable
+ unique_str1 = 'unique str ' + osutils.rand_chars(20)
+ unique_str2 = 'unique str ' + osutils.rand_chars(20)
+ key = self.module.StaticTuple(unique_str1, unique_str2)
+ self.assertRefcount(1, key)
+ self.assertFalse(key in self.module._interned_tuples)
+ self.assertFalse(key._is_interned())
+ key2 = self.module.StaticTuple(unique_str1, unique_str2)
+ self.assertRefcount(1, key)
+ self.assertRefcount(1, key2)
+ self.assertEqual(key, key2)
+ self.assertIsNot(key, key2)
+
+ key3 = key.intern()
+ self.assertIs(key, key3)
+ self.assertTrue(key in self.module._interned_tuples)
+ self.assertEqual(key, self.module._interned_tuples[key])
+ # key and key3, but we 'hide' the one in _interned_tuples
+ self.assertRefcount(2, key)
+ del key3
+ self.assertRefcount(1, key)
+ self.assertTrue(key._is_interned())
+ self.assertRefcount(1, key2)
+ key3 = key2.intern()
+ # key3 now points to key as well, and *not* to key2
+ self.assertRefcount(2, key)
+ self.assertRefcount(1, key2)
+ self.assertIs(key, key3)
+ self.assertIsNot(key3, key2)
+ del key2
+ del key3
+ self.assertRefcount(1, key)
+
+ def test__c_keys_are_not_immortal(self):
+ if self.module is _static_tuple_py:
+ return # Not applicable
+ unique_str1 = 'unique str ' + osutils.rand_chars(20)
+ unique_str2 = 'unique str ' + osutils.rand_chars(20)
+ key = self.module.StaticTuple(unique_str1, unique_str2)
+ self.assertFalse(key in self.module._interned_tuples)
+ self.assertRefcount(1, key)
+ key = key.intern()
+ self.assertRefcount(1, key)
+ self.assertTrue(key in self.module._interned_tuples)
+ self.assertTrue(key._is_interned())
+ del key
+ # Create a new entry, which would point to the same location
+ key = self.module.StaticTuple(unique_str1, unique_str2)
+ self.assertRefcount(1, key)
+ # This old entry in _interned_tuples should be gone
+ self.assertFalse(key in self.module._interned_tuples)
+ self.assertFalse(key._is_interned())
+
+ def test__c_has_C_API(self):
+ if self.module is _static_tuple_py:
+ return
+ self.assertIsNot(None, self.module._C_API)
+
+ def test_from_sequence_tuple(self):
+ st = self.module.StaticTuple.from_sequence(('foo', 'bar'))
+ self.assertIsInstance(st, self.module.StaticTuple)
+ self.assertEqual(('foo', 'bar'), st)
+
+ def test_from_sequence_str(self):
+ st = self.module.StaticTuple.from_sequence('foo')
+ self.assertIsInstance(st, self.module.StaticTuple)
+ self.assertEqual(('f', 'o', 'o'), st)
+
+ def test_from_sequence_list(self):
+ st = self.module.StaticTuple.from_sequence(['foo', 'bar'])
+ self.assertIsInstance(st, self.module.StaticTuple)
+ self.assertEqual(('foo', 'bar'), st)
+
+ def test_from_sequence_static_tuple(self):
+ st = self.module.StaticTuple('foo', 'bar')
+ st2 = self.module.StaticTuple.from_sequence(st)
+ # If the source is a StaticTuple already, we return the exact object
+ self.assertIs(st, st2)
+
+ def test_from_sequence_not_sequence(self):
+ self.assertRaises(TypeError,
+ self.module.StaticTuple.from_sequence, object())
+ self.assertRaises(TypeError,
+ self.module.StaticTuple.from_sequence, 10)
+
+ def test_from_sequence_incorrect_args(self):
+ self.assertRaises(TypeError,
+ self.module.StaticTuple.from_sequence, object(), 'a')
+ self.assertRaises(TypeError,
+ self.module.StaticTuple.from_sequence, foo='a')
+
+ def test_from_sequence_iterable(self):
+ st = self.module.StaticTuple.from_sequence(iter(['foo', 'bar']))
+ self.assertIsInstance(st, self.module.StaticTuple)
+ self.assertEqual(('foo', 'bar'), st)
+
+ def test_from_sequence_generator(self):
+ def generate_tuple():
+ yield 'foo'
+ yield 'bar'
+ st = self.module.StaticTuple.from_sequence(generate_tuple())
+ self.assertIsInstance(st, self.module.StaticTuple)
+ self.assertEqual(('foo', 'bar'), st)
+
+ def test_pickle(self):
+ st = self.module.StaticTuple('foo', 'bar')
+ pickled = cPickle.dumps(st)
+ unpickled = cPickle.loads(pickled)
+ self.assertEqual(unpickled, st)
+
+ def test_pickle_empty(self):
+ st = self.module.StaticTuple()
+ pickled = cPickle.dumps(st)
+ unpickled = cPickle.loads(pickled)
+ self.assertIs(st, unpickled)
+
+ def test_pickle_nested(self):
+ st = self.module.StaticTuple('foo', self.module.StaticTuple('bar'))
+ pickled = cPickle.dumps(st)
+ unpickled = cPickle.loads(pickled)
+ self.assertEqual(unpickled, st)
+
+ def test_static_tuple_thunk(self):
+ # Make sure the right implementation is available from
+ # bzrlib.static_tuple.StaticTuple.
+ if self.module is _static_tuple_py:
+ if compiled_static_tuple_feature.available():
+ # We will be using the C version
+ return
+ self.assertIs(static_tuple.StaticTuple,
+ self.module.StaticTuple)
+
+
+class TestEnsureStaticTuple(tests.TestCase):
+
+ def test_is_static_tuple(self):
+ st = static_tuple.StaticTuple('foo')
+ st2 = static_tuple.expect_static_tuple(st)
+ self.assertIs(st, st2)
+
+ def test_is_tuple(self):
+ t = ('foo',)
+ st = static_tuple.expect_static_tuple(t)
+ self.assertIsInstance(st, static_tuple.StaticTuple)
+ self.assertEqual(t, st)
+
+ def test_flagged_is_static_tuple(self):
+ debug.debug_flags.add('static_tuple')
+ st = static_tuple.StaticTuple('foo')
+ st2 = static_tuple.expect_static_tuple(st)
+ self.assertIs(st, st2)
+
+ def test_flagged_is_tuple(self):
+ debug.debug_flags.add('static_tuple')
+ t = ('foo',)
+ self.assertRaises(TypeError, static_tuple.expect_static_tuple, t)
diff --git a/bzrlib/tests/test__walkdirs_win32.py b/bzrlib/tests/test__walkdirs_win32.py
new file mode 100644
index 0000000..665e881
--- /dev/null
+++ b/bzrlib/tests/test__walkdirs_win32.py
@@ -0,0 +1,117 @@
+# Copyright (C) 2008, 2009, 2010 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""Tests for the win32 walkdir extension."""
+
+import errno
+
+from bzrlib import (
+ osutils,
+ tests,
+ )
+from bzrlib.tests import (
+ features,
+ )
+
+
+win32_readdir_feature = features.ModuleAvailableFeature('bzrlib._walkdirs_win32')
+
+
+class TestWin32Finder(tests.TestCaseInTempDir):
+
+ _test_needs_features = [win32_readdir_feature]
+
+ def setUp(self):
+ super(TestWin32Finder, self).setUp()
+ from bzrlib._walkdirs_win32 import (
+ Win32ReadDir,
+ )
+ self.reader = Win32ReadDir()
+
+ def _remove_stat_from_dirblock(self, dirblock):
+ return [info[:3] + info[4:] for info in dirblock]
+
+ def assertWalkdirs(self, expected, top, prefix=''):
+ old_selected_dir_reader = osutils._selected_dir_reader
+ try:
+ osutils._selected_dir_reader = self.reader
+ finder = osutils._walkdirs_utf8(top, prefix=prefix)
+ result = []
+ for dirname, dirblock in finder:
+ dirblock = self._remove_stat_from_dirblock(dirblock)
+ result.append((dirname, dirblock))
+ self.assertEqual(expected, result)
+ finally:
+ osutils._selected_dir_reader = old_selected_dir_reader
+
+ def assertReadDir(self, expected, prefix, top_unicode):
+ result = self._remove_stat_from_dirblock(
+ self.reader.read_dir(prefix, top_unicode))
+ self.assertEqual(expected, result)
+
+ def test_top_prefix_to_starting_dir(self):
+ # preparing an iteration should create a unicode native path.
+ self.assertEqual(('prefix', None, None, None, u'\x12'),
+ self.reader.top_prefix_to_starting_dir(u'\x12'.encode('utf8'),
+ 'prefix'))
+
+ def test_empty_directory(self):
+ self.assertReadDir([], 'prefix', u'.')
+ self.assertWalkdirs([(('', u'.'), [])], u'.')
+
+ def test_file(self):
+ self.build_tree(['foo'])
+ self.assertReadDir([('foo', 'foo', 'file', u'./foo')],
+ '', u'.')
+
+ def test_directory(self):
+ self.build_tree(['bar/'])
+ self.assertReadDir([('bar', 'bar', 'directory', u'./bar')],
+ '', u'.')
+
+ def test_prefix(self):
+ self.build_tree(['bar/', 'baf'])
+ self.assertReadDir([
+ ('xxx/baf', 'baf', 'file', u'./baf'),
+ ('xxx/bar', 'bar', 'directory', u'./bar'),
+ ],
+ 'xxx', u'.')
+
+ def test_missing_dir(self):
+ e = self.assertRaises(WindowsError,
+ self.reader.read_dir, 'prefix', u'no_such_dir')
+ self.assertEqual(errno.ENOENT, e.errno)
+ self.assertEqual(3, e.winerror)
+ self.assertEqual((3, u'no_such_dir/*'), e.args)
+
+
+class Test_Win32Stat(tests.TestCaseInTempDir):
+
+ _test_needs_features = [win32_readdir_feature]
+
+ def setUp(self):
+ super(Test_Win32Stat, self).setUp()
+ from bzrlib._walkdirs_win32 import lstat
+ self.win32_lstat = lstat
+
+ def test_zero_members_present(self):
+ self.build_tree(['foo'])
+ st = self.win32_lstat('foo')
+ # we only want to ensure that some members are present
+ self.assertEqual(0, st.st_dev)
+ self.assertEqual(0, st.st_ino)
+ self.assertEqual(0, st.st_uid)
+ self.assertEqual(0, st.st_gid)
diff --git a/bzrlib/tests/test_ancestry.py b/bzrlib/tests/test_ancestry.py
new file mode 100644
index 0000000..5747b81
--- /dev/null
+++ b/bzrlib/tests/test_ancestry.py
@@ -0,0 +1,36 @@
+# Copyright (C) 2005-2009, 2011 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+
+from bzrlib.branchbuilder import BranchBuilder
+from bzrlib.tests import TestCaseWithMemoryTransport
+from bzrlib.tests.matchers import MatchesAncestry
+
+
+class TestAncestry(TestCaseWithMemoryTransport):
+
+ def test_straightline_ancestry(self):
+ """Test ancestry file when just committing."""
+ builder = BranchBuilder(self.get_transport())
+ rev_id_one = builder.build_commit()
+ rev_id_two = builder.build_commit()
+ branch = builder.get_branch()
+ self.assertThat([rev_id_one, rev_id_two],
+ MatchesAncestry(branch.repository, rev_id_two))
+ self.assertThat([rev_id_one],
+ MatchesAncestry(branch.repository, rev_id_one))
+
+# TODO: check that ancestry is updated to include indirectly merged revisions
diff --git a/bzrlib/tests/test_annotate.py b/bzrlib/tests/test_annotate.py
new file mode 100644
index 0000000..ac8fa3e
--- /dev/null
+++ b/bzrlib/tests/test_annotate.py
@@ -0,0 +1,489 @@
+# Copyright (C) 2006-2009, 2011 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""Whitebox tests for annotate functionality."""
+
+import codecs
+from cStringIO import StringIO
+
+from bzrlib import (
+ annotate,
+ symbol_versioning,
+ tests,
+ )
+
+
+def annotation(text):
+ return [tuple(l.split(' ', 1)) for l in text.splitlines(True)]
+
+
+parent_1 = annotation("""\
+rev1 a
+rev2 b
+rev3 c
+rev4 d
+rev5 e
+""")
+
+
+parent_2 = annotation("""\
+rev1 a
+rev3 c
+rev4 d
+rev6 f
+rev7 e
+rev8 h
+""")
+
+
+expected_2_1 = annotation("""\
+rev1 a
+blahblah b
+rev3 c
+rev4 d
+rev7 e
+""")
+
+
+# a: in both, same value, kept
+# b: in 1, kept
+# c: in both, same value, kept
+# d: in both, same value, kept
+# e: 1 and 2 disagree, so it goes to blahblah
+# f: in 2, but not in new, so ignored
+# g: not in 1 or 2, so it goes to blahblah
+# h: only in parent 2, so 2 gets it
+expected_1_2_2 = annotation("""\
+rev1 a
+rev2 b
+rev3 c
+rev4 d
+blahblah e
+blahblah g
+rev8 h
+""")
+
+
+new_1 = """\
+a
+b
+c
+d
+e
+""".splitlines(True)
+
+expected_1 = annotation("""\
+blahblah a
+blahblah b
+blahblah c
+blahblah d
+blahblah e
+""")
+
+
+new_2 = """\
+a
+b
+c
+d
+e
+g
+h
+""".splitlines(True)
+
+
+# For the 'duplicate' series, both sides introduce the same change, which then
+# gets merged around. The last-modified should properly reflect this.
+# We always change the fourth line so that the file is properly tracked as
+# being modified in each revision. In reality, this probably would happen over
+# many revisions, and it would be a different line that changes.
+# BASE
+# |\
+# A B # line should be annotated as new for A and B
+# |\|
+# C D # line should 'converge' and say A
+# |/
+# E # D should supersede A and stay as D (not become E because C references
+# A)
+duplicate_base = annotation("""\
+rev-base first
+rev-base second
+rev-base third
+rev-base fourth-base
+""")
+
+duplicate_A = annotation("""\
+rev-base first
+rev-A alt-second
+rev-base third
+rev-A fourth-A
+""")
+
+duplicate_B = annotation("""\
+rev-base first
+rev-B alt-second
+rev-base third
+rev-B fourth-B
+""")
+
+duplicate_C = annotation("""\
+rev-base first
+rev-A alt-second
+rev-base third
+rev-C fourth-C
+""")
+
+duplicate_D = annotation("""\
+rev-base first
+rev-A alt-second
+rev-base third
+rev-D fourth-D
+""")
+
+duplicate_E = annotation("""\
+rev-base first
+rev-A alt-second
+rev-base third
+rev-E fourth-E
+""")
+
+
+class TestAnnotate(tests.TestCaseWithTransport):
+
+ def create_merged_trees(self):
+ """create 2 trees with merges between them.
+
+ rev-1 --+
+ | |
+ rev-2 rev-1_1_1
+ | |
+ +------+
+ |
+ rev-3
+ """
+ builder = self.make_branch_builder('branch')
+ builder.start_series()
+ self.addCleanup(builder.finish_series)
+ builder.build_snapshot('rev-1', None, [
+ ('add', ('', 'root-id', 'directory', None)),
+ ('add', ('a', 'a-id', 'file', 'first\n')),
+ ], timestamp=1166046000.00, timezone=0, committer="joe@foo.com")
+ builder.build_snapshot('rev-2', ['rev-1'], [
+ ('modify', ('a-id', 'first\nsecond\n')),
+ ], timestamp=1166046001.00, timezone=0, committer="joe@foo.com")
+ builder.build_snapshot('rev-1_1_1', ['rev-1'], [
+ ('modify', ('a-id', 'first\nthird\n')),
+ ], timestamp=1166046002.00, timezone=0, committer="barry@foo.com")
+ builder.build_snapshot('rev-3', ['rev-2', 'rev-1_1_1'], [
+ ('modify', ('a-id', 'first\nsecond\nthird\n')),
+ ], timestamp=1166046003.00, timezone=0, committer="sal@foo.com")
+ return builder
+
+ def create_deeply_merged_trees(self):
+ """Create some trees with a more complex merge history.
+
+ rev-1 --+
+ | |
+ rev-2 rev-1_1_1 --+
+ | | |
+ +------+ |
+ | | |
+ rev-3 rev-1_1_2 rev-1_2_1 ------+
+ | | | |
+ +------+ | |
+ | | |
+ rev-4 rev-1_2_2 rev-1_3_1
+ | | |
+ +-----------------+ |
+ | |
+ rev-5 |
+ | |
+ +--------------------------------+
+ |
+ rev-6
+ """
+ builder = self.create_merged_trees()
+ builder.build_snapshot('rev-1_1_2', ['rev-1_1_1'], [])
+ builder.build_snapshot('rev-4', ['rev-3', 'rev-1_1_2'], [])
+ builder.build_snapshot('rev-1_2_1', ['rev-1_1_1'], [
+ ('modify', ('a-id', 'first\nthird\nfourth\n')),
+ ], timestamp=1166046003.00, timezone=0, committer="jerry@foo.com")
+ builder.build_snapshot('rev-1_2_2', ['rev-1_2_1'], [],
+ timestamp=1166046004.00, timezone=0, committer="jerry@foo.com")
+ builder.build_snapshot('rev-5', ['rev-4', 'rev-1_2_2'], [
+ ('modify', ('a-id', 'first\nsecond\nthird\nfourth\n')),
+ ], timestamp=1166046004.00, timezone=0, committer="jerry@foo.com")
+ builder.build_snapshot('rev-1_3_1', ['rev-1_2_1'], [
+ ('modify', ('a-id', 'first\nthird\nfourth\nfifth\nsixth\n')),
+ ], timestamp=1166046005.00, timezone=0, committer="george@foo.com")
+ builder.build_snapshot('rev-6', ['rev-5', 'rev-1_3_1'], [
+ ('modify', ('a-id',
+ 'first\nsecond\nthird\nfourth\nfifth\nsixth\n')),
+ ])
+ return builder
+
+ def create_duplicate_lines_tree(self):
+ builder = self.make_branch_builder('branch')
+ builder.start_series()
+ self.addCleanup(builder.finish_series)
+ base_text = ''.join(l for r, l in duplicate_base)
+ a_text = ''.join(l for r, l in duplicate_A)
+ b_text = ''.join(l for r, l in duplicate_B)
+ c_text = ''.join(l for r, l in duplicate_C)
+ d_text = ''.join(l for r, l in duplicate_D)
+ e_text = ''.join(l for r, l in duplicate_E)
+ builder.build_snapshot('rev-base', None, [
+ ('add', ('', 'root-id', 'directory', None)),
+ ('add', ('file', 'file-id', 'file', base_text)),
+ ])
+ builder.build_snapshot('rev-A', ['rev-base'], [
+ ('modify', ('file-id', a_text))])
+ builder.build_snapshot('rev-B', ['rev-base'], [
+ ('modify', ('file-id', b_text))])
+ builder.build_snapshot('rev-C', ['rev-A'], [
+ ('modify', ('file-id', c_text))])
+ builder.build_snapshot('rev-D', ['rev-B', 'rev-A'], [
+ ('modify', ('file-id', d_text))])
+ builder.build_snapshot('rev-E', ['rev-C', 'rev-D'], [
+ ('modify', ('file-id', e_text))])
+ return builder
+
+ def assertAnnotateEqualDiff(self, actual, expected):
+ if actual != expected:
+ # Create an easier to understand diff when the lines don't actually
+ # match
+ self.assertEqualDiff(''.join('\t'.join(l) for l in expected),
+ ''.join('\t'.join(l) for l in actual))
+
+ def assertBranchAnnotate(self, expected, branch, file_id, revision_id,
+ verbose=False, full=False, show_ids=False):
+ tree = branch.repository.revision_tree(revision_id)
+ to_file = StringIO()
+ annotate.annotate_file_tree(tree, file_id, to_file,
+ verbose=verbose, full=full, show_ids=show_ids, branch=branch)
+ self.assertAnnotateEqualDiff(to_file.getvalue(), expected)
+
+ def assertRepoAnnotate(self, expected, repo, file_id, revision_id):
+ """Assert that the revision is properly annotated."""
+ actual = list(repo.revision_tree(revision_id).annotate_iter(file_id))
+ self.assertAnnotateEqualDiff(actual, expected)
+
+ def test_annotate_duplicate_lines(self):
+ # XXX: Should this be a per_repository test?
+ builder = self.create_duplicate_lines_tree()
+ repo = builder.get_branch().repository
+ repo.lock_read()
+ self.addCleanup(repo.unlock)
+ self.assertRepoAnnotate(duplicate_base, repo, 'file-id', 'rev-base')
+ self.assertRepoAnnotate(duplicate_A, repo, 'file-id', 'rev-A')
+ self.assertRepoAnnotate(duplicate_B, repo, 'file-id', 'rev-B')
+ self.assertRepoAnnotate(duplicate_C, repo, 'file-id', 'rev-C')
+ self.assertRepoAnnotate(duplicate_D, repo, 'file-id', 'rev-D')
+ self.assertRepoAnnotate(duplicate_E, repo, 'file-id', 'rev-E')
+
+ def test_annotate_shows_dotted_revnos(self):
+ builder = self.create_merged_trees()
+
+ self.assertBranchAnnotate('1 joe@foo | first\n'
+ '2 joe@foo | second\n'
+ '1.1.1 barry@f | third\n',
+ builder.get_branch(), 'a-id', 'rev-3')
+
+ def test_annotate_limits_dotted_revnos(self):
+ """Annotate should limit dotted revnos to a depth of 12"""
+ builder = self.create_deeply_merged_trees()
+
+ self.assertBranchAnnotate('1 joe@foo | first\n'
+ '2 joe@foo | second\n'
+ '1.1.1 barry@f | third\n'
+ '1.2.1 jerry@f | fourth\n'
+ '1.3.1 george@ | fifth\n'
+ ' | sixth\n',
+ builder.get_branch(), 'a-id', 'rev-6',
+ verbose=False, full=False)
+
+ self.assertBranchAnnotate('1 joe@foo | first\n'
+ '2 joe@foo | second\n'
+ '1.1.1 barry@f | third\n'
+ '1.2.1 jerry@f | fourth\n'
+ '1.3.1 george@ | fifth\n'
+ '1.3.1 george@ | sixth\n',
+ builder.get_branch(), 'a-id', 'rev-6',
+ verbose=False, full=True)
+
+ # verbose=True shows everything, the full revno, user id, and date
+ self.assertBranchAnnotate('1 joe@foo.com 20061213 | first\n'
+ '2 joe@foo.com 20061213 | second\n'
+ '1.1.1 barry@foo.com 20061213 | third\n'
+ '1.2.1 jerry@foo.com 20061213 | fourth\n'
+ '1.3.1 george@foo.com 20061213 | fifth\n'
+ ' | sixth\n',
+ builder.get_branch(), 'a-id', 'rev-6',
+ verbose=True, full=False)
+
+ self.assertBranchAnnotate('1 joe@foo.com 20061213 | first\n'
+ '2 joe@foo.com 20061213 | second\n'
+ '1.1.1 barry@foo.com 20061213 | third\n'
+ '1.2.1 jerry@foo.com 20061213 | fourth\n'
+ '1.3.1 george@foo.com 20061213 | fifth\n'
+ '1.3.1 george@foo.com 20061213 | sixth\n',
+ builder.get_branch(), 'a-id', 'rev-6',
+ verbose=True, full=True)
+
+ def test_annotate_uses_branch_context(self):
+ """Dotted revnos should use the Branch context.
+
+ When annotating a non-mainline revision, the annotation should still
+ use dotted revnos from the mainline.
+ """
+ builder = self.create_deeply_merged_trees()
+
+ self.assertBranchAnnotate('1 joe@foo | first\n'
+ '1.1.1 barry@f | third\n'
+ '1.2.1 jerry@f | fourth\n'
+ '1.3.1 george@ | fifth\n'
+ ' | sixth\n',
+ builder.get_branch(), 'a-id', 'rev-1_3_1',
+ verbose=False, full=False)
+
+ def test_annotate_show_ids(self):
+ builder = self.create_deeply_merged_trees()
+
+ # It looks better with real revision ids :)
+ self.assertBranchAnnotate(' rev-1 | first\n'
+ ' rev-2 | second\n'
+ 'rev-1_1_1 | third\n'
+ 'rev-1_2_1 | fourth\n'
+ 'rev-1_3_1 | fifth\n'
+ ' | sixth\n',
+ builder.get_branch(), 'a-id', 'rev-6',
+ show_ids=True, full=False)
+
+ self.assertBranchAnnotate(' rev-1 | first\n'
+ ' rev-2 | second\n'
+ 'rev-1_1_1 | third\n'
+ 'rev-1_2_1 | fourth\n'
+ 'rev-1_3_1 | fifth\n'
+ 'rev-1_3_1 | sixth\n',
+ builder.get_branch(), 'a-id', 'rev-6',
+ show_ids=True, full=True)
+
+ def test_annotate_unicode_author(self):
+ tree1 = self.make_branch_and_tree('tree1')
+
+ self.build_tree_contents([('tree1/a', 'adi\xc3\xb3s')])
+ tree1.add(['a'], ['a-id'])
+ tree1.commit('a', rev_id='rev-1',
+ committer=u'Pepe P\xe9rez <pperez@ejemplo.com>',
+ timestamp=1166046000.00, timezone=0)
+
+ self.build_tree_contents([('tree1/b', 'bye')])
+ tree1.add(['b'], ['b-id'])
+ tree1.commit('b', rev_id='rev-2',
+ committer=u'p\xe9rez',
+ timestamp=1166046000.00, timezone=0)
+
+ tree1.lock_read()
+ self.addCleanup(tree1.unlock)
+
+ revtree_1 = tree1.branch.repository.revision_tree('rev-1')
+ revtree_2 = tree1.branch.repository.revision_tree('rev-2')
+
+ # this passes if no exception is raised
+ to_file = StringIO()
+ annotate.annotate_file_tree(revtree_1, 'a-id',
+ to_file=to_file, branch=tree1.branch)
+
+ sio = StringIO()
+ to_file = codecs.getwriter('ascii')(sio)
+ to_file.encoding = 'ascii' # codecs does not set it
+ annotate.annotate_file_tree(revtree_2, 'b-id',
+ to_file=to_file, branch=tree1.branch)
+ self.assertEqualDiff('2 p?rez | bye\n', sio.getvalue())
+
+ # test now with to_file.encoding = None
+ to_file = tests.StringIOWrapper()
+ to_file.encoding = None
+ annotate.annotate_file_tree(revtree_2, 'b-id',
+ to_file=to_file, branch=tree1.branch)
+ self.assertContainsRe('2 p.rez | bye\n', to_file.getvalue())
+
+ # and when it does not exist
+ to_file = StringIO()
+ annotate.annotate_file_tree(revtree_2, 'b-id',
+ to_file=to_file, branch=tree1.branch)
+ self.assertContainsRe('2 p.rez | bye\n', to_file.getvalue())
+
+ def test_annotate_author_or_committer(self):
+ tree1 = self.make_branch_and_tree('tree1')
+
+ self.build_tree_contents([('tree1/a', 'hello')])
+ tree1.add(['a'], ['a-id'])
+ tree1.commit('a', rev_id='rev-1',
+ committer='Committer <committer@example.com>',
+ timestamp=1166046000.00, timezone=0)
+
+ self.build_tree_contents([('tree1/b', 'bye')])
+ tree1.add(['b'], ['b-id'])
+ tree1.commit('b', rev_id='rev-2',
+ committer='Committer <committer@example.com>',
+ authors=['Author <author@example.com>'],
+ timestamp=1166046000.00, timezone=0)
+
+ tree1.lock_read()
+ self.addCleanup(tree1.unlock)
+
+ self.assertBranchAnnotate('1 committ | hello\n', tree1.branch,
+ 'a-id', 'rev-1')
+
+ to_file = StringIO()
+ self.assertBranchAnnotate('2 author@ | bye\n', tree1.branch,
+ 'b-id', 'rev-2')
+
+
+class TestReannotate(tests.TestCase):
+
+ def annotateEqual(self, expected, parents, newlines, revision_id,
+ blocks=None):
+ annotate_list = list(annotate.reannotate(parents, newlines,
+ revision_id, blocks))
+ self.assertEqual(len(expected), len(annotate_list))
+ for e, a in zip(expected, annotate_list):
+ self.assertEqual(e, a)
+
+ def test_reannotate(self):
+ self.annotateEqual(parent_1, [parent_1], new_1, 'blahblah')
+ self.annotateEqual(expected_2_1, [parent_2], new_1, 'blahblah')
+ self.annotateEqual(expected_1_2_2, [parent_1, parent_2], new_2,
+ 'blahblah')
+
+ def test_reannotate_no_parents(self):
+ self.annotateEqual(expected_1, [], new_1, 'blahblah')
+
+ def test_reannotate_left_matching_blocks(self):
+ """Ensure that left_matching_blocks has an impact.
+
+ In this case, the annotation is ambiguous, so the hint isn't actually
+ lying.
+ """
+ parent = [('rev1', 'a\n')]
+ new_text = ['a\n', 'a\n']
+ blocks = [(0, 0, 1), (1, 2, 0)]
+ self.annotateEqual([('rev1', 'a\n'), ('rev2', 'a\n')], [parent],
+ new_text, 'rev2', blocks)
+ blocks = [(0, 1, 1), (1, 2, 0)]
+ self.annotateEqual([('rev2', 'a\n'), ('rev1', 'a\n')], [parent],
+ new_text, 'rev2', blocks)
diff --git a/bzrlib/tests/test_api.py b/bzrlib/tests/test_api.py
new file mode 100644
index 0000000..18dec13
--- /dev/null
+++ b/bzrlib/tests/test_api.py
@@ -0,0 +1,143 @@
+# Copyright (C) 2005 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""Tests for library API infrastructure
+
+This is specifically for things controlling the interface, such as versioning.
+Tests for particular parts of the library interface should be in specific
+relevant test modules.
+"""
+
+import bzrlib
+import bzrlib.api
+from bzrlib.errors import IncompatibleAPI
+from bzrlib.tests import TestCase
+
+class APITests(TestCase):
+
+ def test_library_version(self):
+ """Library API version is exposed"""
+ self.assert_(isinstance(bzrlib.__version__, str))
+ self.assert_(isinstance(bzrlib.version_string, str))
+ self.assert_(isinstance(bzrlib.version_info, tuple))
+ self.assertEqual(len(bzrlib.version_info), 5)
+
+
+class TrivialObject(object):
+ """This class allows assignment to any attribute."""
+
+
+class TestAPIVersioning(TestCase):
+
+ def test_get_minimum_api_version_reads_api_minimum_version(self):
+ an_object = TrivialObject()
+ an_object.api_minimum_version = (0, 1, 2)
+ self.assertEqual((0, 1, 2),
+ bzrlib.api.get_minimum_api_version(an_object))
+
+ def test_get_minimum_api_version_fallsback_to_bzr_minimum_version(self):
+ an_object = TrivialObject()
+ self.assertEqual(bzrlib.api_minimum_version,
+ bzrlib.api.get_minimum_api_version(an_object))
+
+ def test_get_current_api_version_reads_api_current_version(self):
+ an_object = TrivialObject()
+ an_object.api_current_version = (3, 2, 1)
+ an_object.version_info = (1, 2, 3, "final", 0)
+ self.assertEqual((3, 2, 1),
+ bzrlib.api.get_current_api_version(an_object))
+
+ def test_get_current_api_version_fallsback_to_version_info(self):
+ an_object = TrivialObject()
+ an_object.version_info = (1, 2, 3, "final", 0)
+ self.assertEqual((1, 2, 3),
+ bzrlib.api.get_current_api_version(an_object))
+
+ def test_get_current_api_version_fallsback_to_bzrlib_version_info(self):
+ an_object = TrivialObject()
+ self.assertEqual(bzrlib.version_info[0:3],
+ bzrlib.api.get_current_api_version(an_object))
+
+ def test_require_any_api_wanted_one(self):
+ an_object = TrivialObject()
+ an_object.api_minimum_version = (1, 2, 3)
+ an_object.api_current_version = (4, 5, 6)
+ bzrlib.api.require_any_api(an_object, [(1, 2, 3)])
+
+ def test_require_any_api_wanted_first_compatible(self):
+ an_object = TrivialObject()
+ an_object.api_minimum_version = (1, 2, 3)
+ an_object.api_current_version = (4, 5, 6)
+ bzrlib.api.require_any_api(an_object, [(1, 2, 3), (5, 6, 7)])
+
+ def test_require_any_api_wanted_second_compatible(self):
+ an_object = TrivialObject()
+ an_object.api_minimum_version = (1, 2, 3)
+ an_object.api_current_version = (4, 5, 6)
+ bzrlib.api.require_any_api(an_object, [(5, 6, 7), (1, 2, 3)])
+
+ def test_require_any_api_wanted_none_compatible(self):
+ an_object = TrivialObject()
+ an_object.api_minimum_version = (1, 2, 3)
+ an_object.api_current_version = (4, 5, 6)
+ self.assertRaises(IncompatibleAPI, bzrlib.api.require_any_api,
+ an_object, [(1, 2, 2), (5, 6, 7)])
+
+ def test_require_api_wanted_is_minimum_is_ok(self):
+ an_object = TrivialObject()
+ an_object.api_minimum_version = (1, 2, 3)
+ an_object.api_current_version = (4, 5, 6)
+ bzrlib.api.require_api(an_object, (1, 2, 3))
+
+ def test_require_api_wanted_is_current_is_ok(self):
+ an_object = TrivialObject()
+ an_object.api_minimum_version = (1, 2, 3)
+ an_object.api_current_version = (4, 5, 6)
+ bzrlib.api.require_api(an_object, (4, 5, 6))
+
+ def test_require_api_wanted_is_above_minimum_is_ok(self):
+ an_object = TrivialObject()
+ an_object.api_minimum_version = (1, 2, 3)
+ an_object.api_current_version = (4, 5, 6)
+ bzrlib.api.require_api(an_object, (1, 2, 4))
+
+ def test_require_api_wanted_is_below_current_is_ok(self):
+ an_object = TrivialObject()
+ an_object.api_minimum_version = (1, 2, 3)
+ an_object.api_current_version = (4, 5, 6)
+ bzrlib.api.require_api(an_object, (4, 5, 5))
+
+ def test_require_api_wanted_is_below_minimum_raises(self):
+ an_object = TrivialObject()
+ an_object.api_minimum_version = (1, 2, 3)
+ an_object.api_current_version = (4, 5, 6)
+ err = self.assertRaises(IncompatibleAPI,
+ bzrlib.api.require_api, an_object, (1, 2, 2))
+ self.assertEqual(err.api, an_object)
+ self.assertEqual(err.wanted, (1, 2, 2))
+ self.assertEqual(err.minimum, (1, 2, 3))
+ self.assertEqual(err.current, (4, 5, 6))
+
+ def test_require_api_wanted_is_above_current_raises(self):
+ an_object = TrivialObject()
+ an_object.api_minimum_version = (1, 2, 3)
+ an_object.api_current_version = (4, 5, 6)
+ err = self.assertRaises(IncompatibleAPI,
+ bzrlib.api.require_api, an_object, (4, 5, 7))
+ self.assertEqual(err.api, an_object)
+ self.assertEqual(err.wanted, (4, 5, 7))
+ self.assertEqual(err.minimum, (1, 2, 3))
+ self.assertEqual(err.current, (4, 5, 6))
diff --git a/bzrlib/tests/test_atomicfile.py b/bzrlib/tests/test_atomicfile.py
new file mode 100644
index 0000000..7a9defc
--- /dev/null
+++ b/bzrlib/tests/test_atomicfile.py
@@ -0,0 +1,126 @@
+# Copyright (C) 2006 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""Basic tests for AtomicFile"""
+
+import os
+import stat
+import sys
+
+from bzrlib import (
+ atomicfile,
+ errors,
+ osutils,
+ symbol_versioning,
+ )
+from bzrlib.tests import TestCaseInTempDir, TestSkipped
+
+
+class TestAtomicFile(TestCaseInTempDir):
+
+ def test_commit(self):
+ f = atomicfile.AtomicFile('test')
+ self.assertPathDoesNotExist('test')
+ f.write('foo\n')
+ f.commit()
+
+ self.assertEqual(['test'], os.listdir('.'))
+ self.check_file_contents('test', 'foo\n')
+ self.assertRaises(errors.AtomicFileAlreadyClosed, f.commit)
+ self.assertRaises(errors.AtomicFileAlreadyClosed, f.abort)
+ # close is re-entrant safe
+ f.close()
+
+ def test_abort(self):
+ f = atomicfile.AtomicFile('test')
+ f.write('foo\n')
+ f.abort()
+ self.assertEqual([], os.listdir('.'))
+
+ self.assertRaises(errors.AtomicFileAlreadyClosed, f.abort)
+ self.assertRaises(errors.AtomicFileAlreadyClosed, f.commit)
+
+ # close is re-entrant safe
+ f.close()
+
+ def test_close(self):
+ f = atomicfile.AtomicFile('test')
+ f.write('foo\n')
+ # close on an open file is an abort
+ f.close()
+ self.assertEqual([], os.listdir('.'))
+
+ self.assertRaises(errors.AtomicFileAlreadyClosed, f.abort)
+ self.assertRaises(errors.AtomicFileAlreadyClosed, f.commit)
+
+ # close is re-entrant safe
+ f.close()
+
+ def test_text_mode(self):
+ f = atomicfile.AtomicFile('test', mode='wt')
+ f.write('foo\n')
+ f.commit()
+
+ contents = open('test', 'rb').read()
+ if sys.platform == 'win32':
+ self.assertEqual('foo\r\n', contents)
+ else:
+ self.assertEqual('foo\n', contents)
+
+ def can_sys_preserve_mode(self):
+ # PLATFORM DEFICIENCY/ TestSkipped
+ return sys.platform not in ('win32',)
+
+ def _test_mode(self, mode):
+ if not self.can_sys_preserve_mode():
+ raise TestSkipped("This test cannot be run on your platform")
+ f = atomicfile.AtomicFile('test', mode='wb', new_mode=mode)
+ f.write('foo\n')
+ f.commit()
+ st = os.lstat('test')
+ self.assertEqualMode(mode, stat.S_IMODE(st.st_mode))
+
+ def test_mode_0666(self):
+ self._test_mode(0666)
+
+ def test_mode_0664(self):
+ self._test_mode(0664)
+
+ def test_mode_0660(self):
+ self._test_mode(0660)
+
+ def test_mode_0660(self):
+ self._test_mode(0660)
+
+ def test_mode_0640(self):
+ self._test_mode(0640)
+
+ def test_mode_0600(self):
+ self._test_mode(0600)
+
+ def test_mode_0400(self):
+ self._test_mode(0400)
+ # Make it read-write again so cleanup doesn't complain
+ os.chmod('test', 0600)
+
+ def test_no_mode(self):
+ # The default file permissions should be based on umask
+ umask = osutils.get_umask()
+ f = atomicfile.AtomicFile('test', mode='wb')
+ f.write('foo\n')
+ f.commit()
+ st = os.lstat('test')
+ self.assertEqualMode(0666 & ~umask, stat.S_IMODE(st.st_mode))
diff --git a/bzrlib/tests/test_bad_files.py b/bzrlib/tests/test_bad_files.py
new file mode 100644
index 0000000..1219ad1
--- /dev/null
+++ b/bzrlib/tests/test_bad_files.py
@@ -0,0 +1,95 @@
+# Copyright (C) 2005, 2006, 2007, 2009, 2011 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+
+"""Tests being able to ignore bad filetypes."""
+
+from cStringIO import StringIO
+import os
+
+from bzrlib import (
+ errors,
+ )
+from bzrlib.status import show_tree_status
+from bzrlib.tests import TestCaseWithTransport
+from bzrlib.tests.features import (
+ OsFifoFeature,
+ )
+
+
+def verify_status(tester, tree, value):
+ """Verify the output of show_tree_status"""
+ tof = StringIO()
+ show_tree_status(tree, to_file=tof)
+ tof.seek(0)
+ tester.assertEqual(value, tof.readlines())
+
+
+class TestBadFiles(TestCaseWithTransport):
+
+ def test_bad_files(self):
+ """Test that bzr will ignore files it doesn't like"""
+ self.requireFeature(OsFifoFeature)
+
+ wt = self.make_branch_and_tree('.')
+ b = wt.branch
+
+ files = ['one', 'two', 'three']
+ file_ids = ['one-id', 'two-id', 'three-id']
+ self.build_tree(files)
+ wt.add(files, file_ids)
+ wt.commit("Commit one", rev_id="a@u-0-0")
+
+ # We should now have a few files, lets try to
+ # put some bogus stuff in the tree
+
+ # status with nothing changed
+ verify_status(self, wt, [])
+
+ os.mkfifo('a-fifo')
+ self.build_tree(['six'])
+
+ verify_status(self, wt,
+ ['unknown:\n',
+ ' a-fifo\n',
+ ' six\n'
+ ])
+
+ # We should raise an error if we are adding a bogus file
+ self.assertRaises(errors.BadFileKindError, wt.smart_add, ['a-fifo'])
+
+ # And the list of files shouldn't have been modified
+ verify_status(self, wt,
+ ['unknown:\n',
+ ' a-fifo\n',
+ ' six\n'
+ ])
+
+ # Make sure smart_add can handle having a bogus
+ # file in the way
+ wt.smart_add([])
+ verify_status(self, wt,
+ ['added:\n',
+ ' six\n',
+ 'unknown:\n',
+ ' a-fifo\n',
+ ])
+ wt.commit("Commit four", rev_id="a@u-0-3")
+
+ verify_status(self, wt,
+ ['unknown:\n',
+ ' a-fifo\n',
+ ])
diff --git a/bzrlib/tests/test_bisect_multi.py b/bzrlib/tests/test_bisect_multi.py
new file mode 100644
index 0000000..9821edf
--- /dev/null
+++ b/bzrlib/tests/test_bisect_multi.py
@@ -0,0 +1,343 @@
+# Copyright (C) 2007, 2009, 2011 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""Tests for bisect_multi."""
+
+from bzrlib.bisect_multi import bisect_multi_bytes
+from bzrlib.tests import TestCase
+
+
+class TestBisectMultiBytes(TestCase):
+
+ def test_lookup_no_keys_no_calls(self):
+ calls = []
+ def missing_content(location_keys):
+ calls.append(location_keys)
+ return ((location_key, False) for location_key in location_keys)
+ self.assertEqual([],
+ list(bisect_multi_bytes(missing_content, 100, [])))
+ self.assertEqual([], calls)
+
+ def test_lookup_missing_key_no_content(self):
+ """Doing a lookup in a zero-length file still does a single request.
+
+ This makes sense because the bisector cannot tell how long content is
+ and its more flexible to only stop when the content object says 'False'
+ for a given location, key pair.
+ """
+ calls = []
+ def missing_content(location_keys):
+ calls.append(location_keys)
+ return ((location_key, False) for location_key in location_keys)
+ self.assertEqual([],
+ list(bisect_multi_bytes(missing_content, 0, ['foo', 'bar'])))
+ self.assertEqual([[(0, 'foo'), (0, 'bar')]], calls)
+
+ def test_lookup_missing_key_before_all_others(self):
+ calls = []
+ def missing_first_content(location_keys):
+ # returns -1 for all keys unless the byte offset is 0 when it
+ # returns False
+ calls.append(location_keys)
+ result = []
+ for location_key in location_keys:
+ if location_key[0] == 0:
+ result.append((location_key, False))
+ else:
+ result.append((location_key, -1))
+ return result
+ # given a 0 length file, this should terminate with one call.
+ self.assertEqual([],
+ list(bisect_multi_bytes(missing_first_content, 0, ['foo', 'bar'])))
+ self.assertEqual([[(0, 'foo'), (0, 'bar')]], calls)
+ del calls[:]
+ # given a 2 length file, this should make two calls - 1, 0.
+ self.assertEqual([],
+ list(bisect_multi_bytes(missing_first_content, 2, ['foo', 'bar'])))
+ self.assertEqual([
+ [(1, 'foo'), (1, 'bar')],
+ [(0, 'foo'), (0, 'bar')],
+ ], calls)
+ del calls[:]
+ # given a really long file - 200MB, this should make a series of calls with the
+ # gap between adjactent calls dropping by 50% each time. We choose a
+ # length which just under a power of two to generate a corner case in
+ # bisection - naively using power of two reduction in size can lead to
+ # a very long tail in the bisection process. The current users of
+ # the bisect_multi_bytes api are not expected to be concerned by this,
+ # as the delta gets down to 4K (the minimum we expect to read and
+ # parse) within 16 steps even on a 200MB index (which at 4 keys/K is
+ # 800 thousand keys, and log2 of 800000 is 19 - so we're doing log2
+ # steps in the worst case there.
+ self.assertEqual([],
+ list(bisect_multi_bytes(
+ missing_first_content,268435456 - 1 , ['foo', 'bar'])))
+ self.assertEqual([
+ [(134217727, 'foo'), (134217727, 'bar')],
+ [(67108864, 'foo'), (67108864, 'bar')],
+ [(33554433, 'foo'), (33554433, 'bar')],
+ [(16777218, 'foo'), (16777218, 'bar')],
+ [(8388611, 'foo'), (8388611, 'bar')],
+ [(4194308, 'foo'), (4194308, 'bar')],
+ [(2097157, 'foo'), (2097157, 'bar')],
+ [(1048582, 'foo'), (1048582, 'bar')],
+ [(524295, 'foo'), (524295, 'bar')],
+ [(262152, 'foo'), (262152, 'bar')],
+ [(131081, 'foo'), (131081, 'bar')],
+ [(65546, 'foo'), (65546, 'bar')],
+ [(32779, 'foo'), (32779, 'bar')],
+ [(16396, 'foo'), (16396, 'bar')],
+ [(8205, 'foo'), (8205, 'bar')],
+ [(4110, 'foo'), (4110, 'bar')],
+ [(2063, 'foo'), (2063, 'bar')],
+ [(1040, 'foo'), (1040, 'bar')],
+ [(529, 'foo'), (529, 'bar')],
+ [(274, 'foo'), (274, 'bar')],
+ [(147, 'foo'), (147, 'bar')],
+ [(84, 'foo'), (84, 'bar')],
+ [(53, 'foo'), (53, 'bar')],
+ [(38, 'foo'), (38, 'bar')],
+ [(31, 'foo'), (31, 'bar')],
+ [(28, 'foo'), (28, 'bar')],
+ [(27, 'foo'), (27, 'bar')],
+ [(26, 'foo'), (26, 'bar')],
+ [(25, 'foo'), (25, 'bar')],
+ [(24, 'foo'), (24, 'bar')],
+ [(23, 'foo'), (23, 'bar')],
+ [(22, 'foo'), (22, 'bar')],
+ [(21, 'foo'), (21, 'bar')],
+ [(20, 'foo'), (20, 'bar')],
+ [(19, 'foo'), (19, 'bar')],
+ [(18, 'foo'), (18, 'bar')],
+ [(17, 'foo'), (17, 'bar')],
+ [(16, 'foo'), (16, 'bar')],
+ [(15, 'foo'), (15, 'bar')],
+ [(14, 'foo'), (14, 'bar')],
+ [(13, 'foo'), (13, 'bar')],
+ [(12, 'foo'), (12, 'bar')],
+ [(11, 'foo'), (11, 'bar')],
+ [(10, 'foo'), (10, 'bar')],
+ [(9, 'foo'), (9, 'bar')],
+ [(8, 'foo'), (8, 'bar')],
+ [(7, 'foo'), (7, 'bar')],
+ [(6, 'foo'), (6, 'bar')],
+ [(5, 'foo'), (5, 'bar')],
+ [(4, 'foo'), (4, 'bar')],
+ [(3, 'foo'), (3, 'bar')],
+ [(2, 'foo'), (2, 'bar')],
+ [(1, 'foo'), (1, 'bar')],
+ [(0, 'foo'), (0, 'bar')],
+ ], calls)
+
+ def test_lookup_missing_key_after_all_others(self):
+ calls = []
+ end = None
+ def missing_last_content(location_keys):
+ # returns +1 for all keys unless the byte offset is 'end' when it
+ # returns False
+ calls.append(location_keys)
+ result = []
+ for location_key in location_keys:
+ if location_key[0] == end:
+ result.append((location_key, False))
+ else:
+ result.append((location_key, +1))
+ return result
+ # given a 0 length file, this should terminate with one call.
+ end = 0
+ self.assertEqual([],
+ list(bisect_multi_bytes(missing_last_content, 0, ['foo', 'bar'])))
+ self.assertEqual([[(0, 'foo'), (0, 'bar')]], calls)
+ del calls[:]
+ end = 2
+ # given a 3 length file, this should make two calls - 1, 2.
+ self.assertEqual([],
+ list(bisect_multi_bytes(missing_last_content, 3, ['foo', 'bar'])))
+ self.assertEqual([
+ [(1, 'foo'), (1, 'bar')],
+ [(2, 'foo'), (2, 'bar')],
+ ], calls)
+ del calls[:]
+ end = 268435456 - 2
+ # see the really-big lookup series in
+ # test_lookup_missing_key_before_all_others for details about this
+ # assertion.
+ self.assertEqual([],
+ list(bisect_multi_bytes(
+ missing_last_content,268435456 - 1 , ['foo', 'bar'])))
+ self.assertEqual([
+ [(134217727, 'foo'), (134217727, 'bar')],
+ [(201326590, 'foo'), (201326590, 'bar')],
+ [(234881021, 'foo'), (234881021, 'bar')],
+ [(251658236, 'foo'), (251658236, 'bar')],
+ [(260046843, 'foo'), (260046843, 'bar')],
+ [(264241146, 'foo'), (264241146, 'bar')],
+ [(266338297, 'foo'), (266338297, 'bar')],
+ [(267386872, 'foo'), (267386872, 'bar')],
+ [(267911159, 'foo'), (267911159, 'bar')],
+ [(268173302, 'foo'), (268173302, 'bar')],
+ [(268304373, 'foo'), (268304373, 'bar')],
+ [(268369908, 'foo'), (268369908, 'bar')],
+ [(268402675, 'foo'), (268402675, 'bar')],
+ [(268419058, 'foo'), (268419058, 'bar')],
+ [(268427249, 'foo'), (268427249, 'bar')],
+ [(268431344, 'foo'), (268431344, 'bar')],
+ [(268433391, 'foo'), (268433391, 'bar')],
+ [(268434414, 'foo'), (268434414, 'bar')],
+ [(268434925, 'foo'), (268434925, 'bar')],
+ [(268435180, 'foo'), (268435180, 'bar')],
+ [(268435307, 'foo'), (268435307, 'bar')],
+ [(268435370, 'foo'), (268435370, 'bar')],
+ [(268435401, 'foo'), (268435401, 'bar')],
+ [(268435416, 'foo'), (268435416, 'bar')],
+ [(268435423, 'foo'), (268435423, 'bar')],
+ [(268435426, 'foo'), (268435426, 'bar')],
+ [(268435427, 'foo'), (268435427, 'bar')],
+ [(268435428, 'foo'), (268435428, 'bar')],
+ [(268435429, 'foo'), (268435429, 'bar')],
+ [(268435430, 'foo'), (268435430, 'bar')],
+ [(268435431, 'foo'), (268435431, 'bar')],
+ [(268435432, 'foo'), (268435432, 'bar')],
+ [(268435433, 'foo'), (268435433, 'bar')],
+ [(268435434, 'foo'), (268435434, 'bar')],
+ [(268435435, 'foo'), (268435435, 'bar')],
+ [(268435436, 'foo'), (268435436, 'bar')],
+ [(268435437, 'foo'), (268435437, 'bar')],
+ [(268435438, 'foo'), (268435438, 'bar')],
+ [(268435439, 'foo'), (268435439, 'bar')],
+ [(268435440, 'foo'), (268435440, 'bar')],
+ [(268435441, 'foo'), (268435441, 'bar')],
+ [(268435442, 'foo'), (268435442, 'bar')],
+ [(268435443, 'foo'), (268435443, 'bar')],
+ [(268435444, 'foo'), (268435444, 'bar')],
+ [(268435445, 'foo'), (268435445, 'bar')],
+ [(268435446, 'foo'), (268435446, 'bar')],
+ [(268435447, 'foo'), (268435447, 'bar')],
+ [(268435448, 'foo'), (268435448, 'bar')],
+ [(268435449, 'foo'), (268435449, 'bar')],
+ [(268435450, 'foo'), (268435450, 'bar')],
+ [(268435451, 'foo'), (268435451, 'bar')],
+ [(268435452, 'foo'), (268435452, 'bar')],
+ [(268435453, 'foo'), (268435453, 'bar')],
+ [(268435454, 'foo'), (268435454, 'bar')]
+ ], calls)
+
+ def test_lookup_when_a_key_is_missing_continues(self):
+ calls = []
+ def missing_foo_otherwise_missing_first_content(location_keys):
+ # returns -1 for all keys unless the byte offset is 0 when it
+ # returns False
+ calls.append(location_keys)
+ result = []
+ for location_key in location_keys:
+ if location_key[1] == 'foo' or location_key[0] == 0:
+ result.append((location_key, False))
+ else:
+ result.append((location_key, -1))
+ return result
+ # given a 2 length file, this should terminate with two calls, one for
+ # both keys, and one for bar only.
+ self.assertEqual([],
+ list(bisect_multi_bytes(
+ missing_foo_otherwise_missing_first_content, 2,
+ ['foo', 'bar'])))
+ self.assertEqual([
+ [(1, 'foo'), (1, 'bar')],
+ [(0, 'bar')],
+ ], calls)
+
+ def test_found_keys_returned_other_searches_continue(self):
+ calls = []
+ def find_bar_at_1_foo_missing_at_0(location_keys):
+ calls.append(location_keys)
+ result = []
+ for location_key in location_keys:
+ if location_key == (1, 'bar'):
+ result.append((location_key, 'bar-result'))
+ elif location_key[0] == 0:
+ result.append((location_key, False))
+ else:
+ result.append((location_key, -1))
+ return result
+ # given a 4 length file, this should terminate with three calls, two for
+ # both keys, and one for foo only.
+ self.assertEqual([('bar', 'bar-result')],
+ list(bisect_multi_bytes(
+ find_bar_at_1_foo_missing_at_0, 4,
+ ['foo', 'bar'])))
+ self.assertEqual([
+ [(2, 'foo'), (2, 'bar')],
+ [(1, 'foo'), (1, 'bar')],
+ [(0, 'foo')],
+ ], calls)
+
+ def test_searches_different_keys_in_different_directions(self):
+ calls = []
+ def missing_bar_at_1_foo_at_3(location_keys):
+ calls.append(location_keys)
+ result = []
+ for location_key in location_keys:
+ if location_key[1] == 'bar':
+ if location_key[0] == 1:
+ result.append((location_key, False))
+ else:
+ # search down
+ result.append((location_key, -1))
+ elif location_key[1] == 'foo':
+ if location_key[0] == 3:
+ result.append((location_key, False))
+ else:
+ # search up
+ result.append((location_key, +1))
+ return result
+ # given a 4 length file, this should terminate with two calls.
+ self.assertEqual([],
+ list(bisect_multi_bytes(
+ missing_bar_at_1_foo_at_3, 4,
+ ['foo', 'bar'])))
+ self.assertEqual([
+ [(2, 'foo'), (2, 'bar')],
+ [(3, 'foo'), (1, 'bar')],
+ ], calls)
+
+ def test_change_direction_in_single_key_search(self):
+ # check that we can search down, up, down again -
+ # so length 8, goes 4, 6, 5
+ calls = []
+ def missing_at_5(location_keys):
+ calls.append(location_keys)
+ result = []
+ for location_key in location_keys:
+ if location_key[0] == 5:
+ result.append((location_key, False))
+ elif location_key[0] > 5:
+ # search down
+ result.append((location_key, -1))
+ else:
+ # search up
+ result.append((location_key, +1))
+ return result
+ # given a 8 length file, this should terminate with three calls.
+ self.assertEqual([],
+ list(bisect_multi_bytes(
+ missing_at_5, 8,
+ ['foo', 'bar'])))
+ self.assertEqual([
+ [(4, 'foo'), (4, 'bar')],
+ [(6, 'foo'), (6, 'bar')],
+ [(5, 'foo'), (5, 'bar')],
+ ], calls)
+
diff --git a/bzrlib/tests/test_branch.py b/bzrlib/tests/test_branch.py
new file mode 100644
index 0000000..fe9862d
--- /dev/null
+++ b/bzrlib/tests/test_branch.py
@@ -0,0 +1,724 @@
+# Copyright (C) 2006-2012 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""Tests for the Branch facility that are not interface tests.
+
+For interface tests see `tests/per_branch/*.py`.
+
+For concrete class tests see this file, and for meta-branch tests
+also see this file.
+"""
+
+from cStringIO import StringIO
+
+from bzrlib import (
+ branch as _mod_branch,
+ bzrdir,
+ config,
+ controldir,
+ errors,
+ tests,
+ trace,
+ urlutils,
+ )
+from bzrlib.branchfmt.fullhistory import (
+ BzrBranch5,
+ BzrBranchFormat5,
+ )
+
+
+class TestDefaultFormat(tests.TestCase):
+
+ def test_default_format(self):
+ # update this if you change the default branch format
+ self.assertIsInstance(_mod_branch.format_registry.get_default(),
+ _mod_branch.BzrBranchFormat7)
+
+ def test_default_format_is_same_as_bzrdir_default(self):
+ # XXX: it might be nice if there was only one place the default was
+ # set, but at the moment that's not true -- mbp 20070814 --
+ # https://bugs.launchpad.net/bzr/+bug/132376
+ self.assertEqual(
+ _mod_branch.format_registry.get_default(),
+ bzrdir.BzrDirFormat.get_default_format().get_branch_format())
+
+ def test_get_set_default_format(self):
+ # set the format and then set it back again
+ old_format = _mod_branch.format_registry.get_default()
+ _mod_branch.format_registry.set_default(SampleBranchFormat())
+ try:
+ # the default branch format is used by the meta dir format
+ # which is not the default bzrdir format at this point
+ dir = bzrdir.BzrDirMetaFormat1().initialize('memory:///')
+ result = dir.create_branch()
+ self.assertEqual(result, 'A branch')
+ finally:
+ _mod_branch.format_registry.set_default(old_format)
+ self.assertEqual(old_format,
+ _mod_branch.format_registry.get_default())
+
+
+class TestBranchFormat5(tests.TestCaseWithTransport):
+ """Tests specific to branch format 5"""
+
+ def test_branch_format_5_uses_lockdir(self):
+ url = self.get_url()
+ bdir = bzrdir.BzrDirMetaFormat1().initialize(url)
+ bdir.create_repository()
+ branch = BzrBranchFormat5().initialize(bdir)
+ t = self.get_transport()
+ self.log("branch instance is %r" % branch)
+ self.assert_(isinstance(branch, BzrBranch5))
+ self.assertIsDirectory('.', t)
+ self.assertIsDirectory('.bzr/branch', t)
+ self.assertIsDirectory('.bzr/branch/lock', t)
+ branch.lock_write()
+ self.addCleanup(branch.unlock)
+ self.assertIsDirectory('.bzr/branch/lock/held', t)
+
+ def test_set_push_location(self):
+ conf = config.LocationConfig.from_string('# comment\n', '.', save=True)
+
+ branch = self.make_branch('.', format='knit')
+ branch.set_push_location('foo')
+ local_path = urlutils.local_path_from_url(branch.base[:-1])
+ self.assertFileEqual("# comment\n"
+ "[%s]\n"
+ "push_location = foo\n"
+ "push_location:policy = norecurse\n" % local_path,
+ config.locations_config_filename())
+
+ # TODO RBC 20051029 test getting a push location from a branch in a
+ # recursive section - that is, it appends the branch name.
+
+
+class SampleBranchFormat(_mod_branch.BranchFormatMetadir):
+ """A sample format
+
+ this format is initializable, unsupported to aid in testing the
+ open and open_downlevel routines.
+ """
+
+ @classmethod
+ def get_format_string(cls):
+ """See BzrBranchFormat.get_format_string()."""
+ return "Sample branch format."
+
+ def initialize(self, a_bzrdir, name=None, repository=None,
+ append_revisions_only=None):
+ """Format 4 branches cannot be created."""
+ t = a_bzrdir.get_branch_transport(self, name=name)
+ t.put_bytes('format', self.get_format_string())
+ return 'A branch'
+
+ def is_supported(self):
+ return False
+
+ def open(self, transport, name=None, _found=False, ignore_fallbacks=False,
+ possible_transports=None):
+ return "opened branch."
+
+
+# Demonstrating how lazy loading is often implemented:
+# A constant string is created.
+SampleSupportedBranchFormatString = "Sample supported branch format."
+
+# And the format class can then reference the constant to avoid skew.
+class SampleSupportedBranchFormat(_mod_branch.BranchFormatMetadir):
+ """A sample supported format."""
+
+ @classmethod
+ def get_format_string(cls):
+ """See BzrBranchFormat.get_format_string()."""
+ return SampleSupportedBranchFormatString
+
+ def initialize(self, a_bzrdir, name=None, append_revisions_only=None):
+ t = a_bzrdir.get_branch_transport(self, name=name)
+ t.put_bytes('format', self.get_format_string())
+ return 'A branch'
+
+ def open(self, transport, name=None, _found=False, ignore_fallbacks=False,
+ possible_transports=None):
+ return "opened supported branch."
+
+
+class SampleExtraBranchFormat(_mod_branch.BranchFormat):
+ """A sample format that is not usable in a metadir."""
+
+ def get_format_string(self):
+ # This format is not usable in a metadir.
+ return None
+
+ def network_name(self):
+ # Network name always has to be provided.
+ return "extra"
+
+ def initialize(self, a_bzrdir, name=None):
+ raise NotImplementedError(self.initialize)
+
+ def open(self, transport, name=None, _found=False, ignore_fallbacks=False,
+ possible_transports=None):
+ raise NotImplementedError(self.open)
+
+
+class TestBzrBranchFormat(tests.TestCaseWithTransport):
+ """Tests for the BzrBranchFormat facility."""
+
+ def test_find_format(self):
+ # is the right format object found for a branch?
+ # create a branch with a few known format objects.
+ # this is not quite the same as
+ self.build_tree(["foo/", "bar/"])
+ def check_format(format, url):
+ dir = format._matchingbzrdir.initialize(url)
+ dir.create_repository()
+ format.initialize(dir)
+ found_format = _mod_branch.BranchFormatMetadir.find_format(dir)
+ self.assertIsInstance(found_format, format.__class__)
+ check_format(BzrBranchFormat5(), "bar")
+
+ def test_find_format_factory(self):
+ dir = bzrdir.BzrDirMetaFormat1().initialize(self.get_url())
+ SampleSupportedBranchFormat().initialize(dir)
+ factory = _mod_branch.MetaDirBranchFormatFactory(
+ SampleSupportedBranchFormatString,
+ "bzrlib.tests.test_branch", "SampleSupportedBranchFormat")
+ _mod_branch.format_registry.register(factory)
+ self.addCleanup(_mod_branch.format_registry.remove, factory)
+ b = _mod_branch.Branch.open(self.get_url())
+ self.assertEqual(b, "opened supported branch.")
+
+ def test_from_string(self):
+ self.assertIsInstance(
+ SampleBranchFormat.from_string("Sample branch format."),
+ SampleBranchFormat)
+ self.assertRaises(AssertionError,
+ SampleBranchFormat.from_string, "Different branch format.")
+
+ def test_find_format_not_branch(self):
+ dir = bzrdir.BzrDirMetaFormat1().initialize(self.get_url())
+ self.assertRaises(errors.NotBranchError,
+ _mod_branch.BranchFormatMetadir.find_format,
+ dir)
+
+ def test_find_format_unknown_format(self):
+ dir = bzrdir.BzrDirMetaFormat1().initialize(self.get_url())
+ SampleBranchFormat().initialize(dir)
+ self.assertRaises(errors.UnknownFormatError,
+ _mod_branch.BranchFormatMetadir.find_format,
+ dir)
+
+ def test_find_format_with_features(self):
+ tree = self.make_branch_and_tree('.', format='2a')
+ tree.branch.update_feature_flags({"name": "optional"})
+ found_format = _mod_branch.BranchFormatMetadir.find_format(tree.bzrdir)
+ self.assertIsInstance(found_format, _mod_branch.BranchFormatMetadir)
+ self.assertEquals(found_format.features.get("name"), "optional")
+ tree.branch.update_feature_flags({"name": None})
+ branch = _mod_branch.Branch.open('.')
+ self.assertEquals(branch._format.features, {})
+
+
+class TestBranchFormatRegistry(tests.TestCase):
+
+ def setUp(self):
+ super(TestBranchFormatRegistry, self).setUp()
+ self.registry = _mod_branch.BranchFormatRegistry()
+
+ def test_default(self):
+ self.assertIs(None, self.registry.get_default())
+ format = SampleBranchFormat()
+ self.registry.set_default(format)
+ self.assertEquals(format, self.registry.get_default())
+
+ def test_register_unregister_format(self):
+ format = SampleBranchFormat()
+ self.registry.register(format)
+ self.assertEquals(format,
+ self.registry.get("Sample branch format."))
+ self.registry.remove(format)
+ self.assertRaises(KeyError, self.registry.get,
+ "Sample branch format.")
+
+ def test_get_all(self):
+ format = SampleBranchFormat()
+ self.assertEquals([], self.registry._get_all())
+ self.registry.register(format)
+ self.assertEquals([format], self.registry._get_all())
+
+ def test_register_extra(self):
+ format = SampleExtraBranchFormat()
+ self.assertEquals([], self.registry._get_all())
+ self.registry.register_extra(format)
+ self.assertEquals([format], self.registry._get_all())
+
+ def test_register_extra_lazy(self):
+ self.assertEquals([], self.registry._get_all())
+ self.registry.register_extra_lazy("bzrlib.tests.test_branch",
+ "SampleExtraBranchFormat")
+ formats = self.registry._get_all()
+ self.assertEquals(1, len(formats))
+ self.assertIsInstance(formats[0], SampleExtraBranchFormat)
+
+
+#Used by TestMetaDirBranchFormatFactory
+FakeLazyFormat = None
+
+
+class TestMetaDirBranchFormatFactory(tests.TestCase):
+
+ def test_get_format_string_does_not_load(self):
+ """Formats have a static format string."""
+ factory = _mod_branch.MetaDirBranchFormatFactory("yo", None, None)
+ self.assertEqual("yo", factory.get_format_string())
+
+ def test_call_loads(self):
+ # __call__ is used by the network_format_registry interface to get a
+ # Format.
+ global FakeLazyFormat
+ del FakeLazyFormat
+ factory = _mod_branch.MetaDirBranchFormatFactory(None,
+ "bzrlib.tests.test_branch", "FakeLazyFormat")
+ self.assertRaises(AttributeError, factory)
+
+ def test_call_returns_call_of_referenced_object(self):
+ global FakeLazyFormat
+ FakeLazyFormat = lambda:'called'
+ factory = _mod_branch.MetaDirBranchFormatFactory(None,
+ "bzrlib.tests.test_branch", "FakeLazyFormat")
+ self.assertEqual('called', factory())
+
+
+class TestBranch67(object):
+ """Common tests for both branch 6 and 7 which are mostly the same."""
+
+ def get_format_name(self):
+ raise NotImplementedError(self.get_format_name)
+
+ def get_format_name_subtree(self):
+ raise NotImplementedError(self.get_format_name)
+
+ def get_class(self):
+ raise NotImplementedError(self.get_class)
+
+ def test_creation(self):
+ format = bzrdir.BzrDirMetaFormat1()
+ format.set_branch_format(_mod_branch.BzrBranchFormat6())
+ branch = self.make_branch('a', format=format)
+ self.assertIsInstance(branch, self.get_class())
+ branch = self.make_branch('b', format=self.get_format_name())
+ self.assertIsInstance(branch, self.get_class())
+ branch = _mod_branch.Branch.open('a')
+ self.assertIsInstance(branch, self.get_class())
+
+ def test_layout(self):
+ branch = self.make_branch('a', format=self.get_format_name())
+ self.assertPathExists('a/.bzr/branch/last-revision')
+ self.assertPathDoesNotExist('a/.bzr/branch/revision-history')
+ self.assertPathDoesNotExist('a/.bzr/branch/references')
+
+ def test_config(self):
+ """Ensure that all configuration data is stored in the branch"""
+ branch = self.make_branch('a', format=self.get_format_name())
+ branch.set_parent('http://example.com')
+ self.assertPathDoesNotExist('a/.bzr/branch/parent')
+ self.assertEqual('http://example.com', branch.get_parent())
+ branch.set_push_location('sftp://example.com')
+ conf = branch.get_config_stack()
+ self.assertEqual('sftp://example.com', conf.get('push_location'))
+ branch.set_bound_location('ftp://example.com')
+ self.assertPathDoesNotExist('a/.bzr/branch/bound')
+ self.assertEqual('ftp://example.com', branch.get_bound_location())
+
+ def do_checkout_test(self, lightweight=False):
+ tree = self.make_branch_and_tree('source',
+ format=self.get_format_name_subtree())
+ subtree = self.make_branch_and_tree('source/subtree',
+ format=self.get_format_name_subtree())
+ subsubtree = self.make_branch_and_tree('source/subtree/subsubtree',
+ format=self.get_format_name_subtree())
+ self.build_tree(['source/subtree/file',
+ 'source/subtree/subsubtree/file'])
+ subsubtree.add('file')
+ subtree.add('file')
+ subtree.add_reference(subsubtree)
+ tree.add_reference(subtree)
+ tree.commit('a revision')
+ subtree.commit('a subtree file')
+ subsubtree.commit('a subsubtree file')
+ tree.branch.create_checkout('target', lightweight=lightweight)
+ self.assertPathExists('target')
+ self.assertPathExists('target/subtree')
+ self.assertPathExists('target/subtree/file')
+ self.assertPathExists('target/subtree/subsubtree/file')
+ subbranch = _mod_branch.Branch.open('target/subtree/subsubtree')
+ if lightweight:
+ self.assertEndsWith(subbranch.base, 'source/subtree/subsubtree/')
+ else:
+ self.assertEndsWith(subbranch.base, 'target/subtree/subsubtree/')
+
+ def test_checkout_with_references(self):
+ self.do_checkout_test()
+
+ def test_light_checkout_with_references(self):
+ self.do_checkout_test(lightweight=True)
+
+
+class TestBranch6(TestBranch67, tests.TestCaseWithTransport):
+
+ def get_class(self):
+ return _mod_branch.BzrBranch6
+
+ def get_format_name(self):
+ return "dirstate-tags"
+
+ def get_format_name_subtree(self):
+ return "dirstate-with-subtree"
+
+ def test_set_stacked_on_url_errors(self):
+ branch = self.make_branch('a', format=self.get_format_name())
+ self.assertRaises(errors.UnstackableBranchFormat,
+ branch.set_stacked_on_url, None)
+
+ def test_default_stacked_location(self):
+ branch = self.make_branch('a', format=self.get_format_name())
+ self.assertRaises(errors.UnstackableBranchFormat, branch.get_stacked_on_url)
+
+
+class TestBranch7(TestBranch67, tests.TestCaseWithTransport):
+
+ def get_class(self):
+ return _mod_branch.BzrBranch7
+
+ def get_format_name(self):
+ return "1.9"
+
+ def get_format_name_subtree(self):
+ return "development-subtree"
+
+ def test_set_stacked_on_url_unstackable_repo(self):
+ repo = self.make_repository('a', format='dirstate-tags')
+ control = repo.bzrdir
+ branch = _mod_branch.BzrBranchFormat7().initialize(control)
+ target = self.make_branch('b')
+ self.assertRaises(errors.UnstackableRepositoryFormat,
+ branch.set_stacked_on_url, target.base)
+
+ def test_clone_stacked_on_unstackable_repo(self):
+ repo = self.make_repository('a', format='dirstate-tags')
+ control = repo.bzrdir
+ branch = _mod_branch.BzrBranchFormat7().initialize(control)
+ # Calling clone should not raise UnstackableRepositoryFormat.
+ cloned_bzrdir = control.clone('cloned')
+
+ def _test_default_stacked_location(self):
+ branch = self.make_branch('a', format=self.get_format_name())
+ self.assertRaises(errors.NotStacked, branch.get_stacked_on_url)
+
+ def test_stack_and_unstack(self):
+ branch = self.make_branch('a', format=self.get_format_name())
+ target = self.make_branch_and_tree('b', format=self.get_format_name())
+ branch.set_stacked_on_url(target.branch.base)
+ self.assertEqual(target.branch.base, branch.get_stacked_on_url())
+ revid = target.commit('foo')
+ self.assertTrue(branch.repository.has_revision(revid))
+ branch.set_stacked_on_url(None)
+ self.assertRaises(errors.NotStacked, branch.get_stacked_on_url)
+ self.assertFalse(branch.repository.has_revision(revid))
+
+ def test_open_opens_stacked_reference(self):
+ branch = self.make_branch('a', format=self.get_format_name())
+ target = self.make_branch_and_tree('b', format=self.get_format_name())
+ branch.set_stacked_on_url(target.branch.base)
+ branch = branch.bzrdir.open_branch()
+ revid = target.commit('foo')
+ self.assertTrue(branch.repository.has_revision(revid))
+
+
+class BzrBranch8(tests.TestCaseWithTransport):
+
+ def make_branch(self, location, format=None):
+ if format is None:
+ format = controldir.format_registry.make_bzrdir('1.9')
+ format.set_branch_format(_mod_branch.BzrBranchFormat8())
+ return tests.TestCaseWithTransport.make_branch(
+ self, location, format=format)
+
+ def create_branch_with_reference(self):
+ branch = self.make_branch('branch')
+ branch._set_all_reference_info({'file-id': ('path', 'location')})
+ return branch
+
+ @staticmethod
+ def instrument_branch(branch, gets):
+ old_get = branch._transport.get
+ def get(*args, **kwargs):
+ gets.append((args, kwargs))
+ return old_get(*args, **kwargs)
+ branch._transport.get = get
+
+ def test_reference_info_caching_read_locked(self):
+ gets = []
+ branch = self.create_branch_with_reference()
+ branch.lock_read()
+ self.addCleanup(branch.unlock)
+ self.instrument_branch(branch, gets)
+ branch.get_reference_info('file-id')
+ branch.get_reference_info('file-id')
+ self.assertEqual(1, len(gets))
+
+ def test_reference_info_caching_read_unlocked(self):
+ gets = []
+ branch = self.create_branch_with_reference()
+ self.instrument_branch(branch, gets)
+ branch.get_reference_info('file-id')
+ branch.get_reference_info('file-id')
+ self.assertEqual(2, len(gets))
+
+ def test_reference_info_caching_write_locked(self):
+ gets = []
+ branch = self.make_branch('branch')
+ branch.lock_write()
+ self.instrument_branch(branch, gets)
+ self.addCleanup(branch.unlock)
+ branch._set_all_reference_info({'file-id': ('path2', 'location2')})
+ path, location = branch.get_reference_info('file-id')
+ self.assertEqual(0, len(gets))
+ self.assertEqual('path2', path)
+ self.assertEqual('location2', location)
+
+ def test_reference_info_caches_cleared(self):
+ branch = self.make_branch('branch')
+ branch.lock_write()
+ branch.set_reference_info('file-id', 'path2', 'location2')
+ branch.unlock()
+ doppelganger = _mod_branch.Branch.open('branch')
+ doppelganger.set_reference_info('file-id', 'path3', 'location3')
+ self.assertEqual(('path3', 'location3'),
+ branch.get_reference_info('file-id'))
+
+ def _recordParentMapCalls(self, repo):
+ self._parent_map_calls = []
+ orig_get_parent_map = repo.revisions.get_parent_map
+ def get_parent_map(q):
+ q = list(q)
+ self._parent_map_calls.extend([e[0] for e in q])
+ return orig_get_parent_map(q)
+ repo.revisions.get_parent_map = get_parent_map
+
+
+class TestBranchReference(tests.TestCaseWithTransport):
+ """Tests for the branch reference facility."""
+
+ def test_create_open_reference(self):
+ bzrdirformat = bzrdir.BzrDirMetaFormat1()
+ t = self.get_transport()
+ t.mkdir('repo')
+ dir = bzrdirformat.initialize(self.get_url('repo'))
+ dir.create_repository()
+ target_branch = dir.create_branch()
+ t.mkdir('branch')
+ branch_dir = bzrdirformat.initialize(self.get_url('branch'))
+ made_branch = _mod_branch.BranchReferenceFormat().initialize(
+ branch_dir, target_branch=target_branch)
+ self.assertEqual(made_branch.base, target_branch.base)
+ opened_branch = branch_dir.open_branch()
+ self.assertEqual(opened_branch.base, target_branch.base)
+
+ def test_get_reference(self):
+ """For a BranchReference, get_reference should return the location."""
+ branch = self.make_branch('target')
+ checkout = branch.create_checkout('checkout', lightweight=True)
+ reference_url = branch.bzrdir.root_transport.abspath('') + '/'
+ # if the api for create_checkout changes to return different checkout types
+ # then this file read will fail.
+ self.assertFileEqual(reference_url, 'checkout/.bzr/branch/location')
+ self.assertEqual(reference_url,
+ _mod_branch.BranchReferenceFormat().get_reference(checkout.bzrdir))
+
+
+class TestHooks(tests.TestCaseWithTransport):
+
+ def test_constructor(self):
+ """Check that creating a BranchHooks instance has the right defaults."""
+ hooks = _mod_branch.BranchHooks()
+ self.assertTrue("post_push" in hooks, "post_push not in %s" % hooks)
+ self.assertTrue("post_commit" in hooks, "post_commit not in %s" % hooks)
+ self.assertTrue("pre_commit" in hooks, "pre_commit not in %s" % hooks)
+ self.assertTrue("post_pull" in hooks, "post_pull not in %s" % hooks)
+ self.assertTrue("post_uncommit" in hooks,
+ "post_uncommit not in %s" % hooks)
+ self.assertTrue("post_change_branch_tip" in hooks,
+ "post_change_branch_tip not in %s" % hooks)
+ self.assertTrue("post_branch_init" in hooks,
+ "post_branch_init not in %s" % hooks)
+ self.assertTrue("post_switch" in hooks,
+ "post_switch not in %s" % hooks)
+
+ def test_installed_hooks_are_BranchHooks(self):
+ """The installed hooks object should be a BranchHooks."""
+ # the installed hooks are saved in self._preserved_hooks.
+ self.assertIsInstance(self._preserved_hooks[_mod_branch.Branch][1],
+ _mod_branch.BranchHooks)
+
+ def test_post_branch_init_hook(self):
+ calls = []
+ _mod_branch.Branch.hooks.install_named_hook('post_branch_init',
+ calls.append, None)
+ self.assertLength(0, calls)
+ branch = self.make_branch('a')
+ self.assertLength(1, calls)
+ params = calls[0]
+ self.assertIsInstance(params, _mod_branch.BranchInitHookParams)
+ self.assertTrue(hasattr(params, 'bzrdir'))
+ self.assertTrue(hasattr(params, 'branch'))
+
+ def test_post_branch_init_hook_repr(self):
+ param_reprs = []
+ _mod_branch.Branch.hooks.install_named_hook('post_branch_init',
+ lambda params: param_reprs.append(repr(params)), None)
+ branch = self.make_branch('a')
+ self.assertLength(1, param_reprs)
+ param_repr = param_reprs[0]
+ self.assertStartsWith(param_repr, '<BranchInitHookParams of ')
+
+ def test_post_switch_hook(self):
+ from bzrlib import switch
+ calls = []
+ _mod_branch.Branch.hooks.install_named_hook('post_switch',
+ calls.append, None)
+ tree = self.make_branch_and_tree('branch-1')
+ self.build_tree(['branch-1/file-1'])
+ tree.add('file-1')
+ tree.commit('rev1')
+ to_branch = tree.bzrdir.sprout('branch-2').open_branch()
+ self.build_tree(['branch-1/file-2'])
+ tree.add('file-2')
+ tree.remove('file-1')
+ tree.commit('rev2')
+ checkout = tree.branch.create_checkout('checkout')
+ self.assertLength(0, calls)
+ switch.switch(checkout.bzrdir, to_branch)
+ self.assertLength(1, calls)
+ params = calls[0]
+ self.assertIsInstance(params, _mod_branch.SwitchHookParams)
+ self.assertTrue(hasattr(params, 'to_branch'))
+ self.assertTrue(hasattr(params, 'revision_id'))
+
+
+class TestBranchOptions(tests.TestCaseWithTransport):
+
+ def setUp(self):
+ super(TestBranchOptions, self).setUp()
+ self.branch = self.make_branch('.')
+ self.config_stack = self.branch.get_config_stack()
+
+ def check_append_revisions_only(self, expected_value, value=None):
+ """Set append_revisions_only in config and check its interpretation."""
+ if value is not None:
+ self.config_stack.set('append_revisions_only', value)
+ self.assertEqual(expected_value,
+ self.branch.get_append_revisions_only())
+
+ def test_valid_append_revisions_only(self):
+ self.assertEquals(None,
+ self.config_stack.get('append_revisions_only'))
+ self.check_append_revisions_only(None)
+ self.check_append_revisions_only(False, 'False')
+ self.check_append_revisions_only(True, 'True')
+ # The following values will cause compatibility problems on projects
+ # using older bzr versions (<2.2) but are accepted
+ self.check_append_revisions_only(False, 'false')
+ self.check_append_revisions_only(True, 'true')
+
+ def test_invalid_append_revisions_only(self):
+ """Ensure warning is noted on invalid settings"""
+ self.warnings = []
+ def warning(*args):
+ self.warnings.append(args[0] % args[1:])
+ self.overrideAttr(trace, 'warning', warning)
+ self.check_append_revisions_only(None, 'not-a-bool')
+ self.assertLength(1, self.warnings)
+ self.assertEqual(
+ 'Value "not-a-bool" is not valid for "append_revisions_only"',
+ self.warnings[0])
+
+ def test_use_fresh_values(self):
+ copy = _mod_branch.Branch.open(self.branch.base)
+ copy.lock_write()
+ try:
+ copy.get_config_stack().set('foo', 'bar')
+ finally:
+ copy.unlock()
+ self.assertFalse(self.branch.is_locked())
+ # Since the branch is locked, the option value won't be saved on disk
+ # so trying to access the config of locked branch via another older
+ # non-locked branch object pointing to the same branch is not supported
+ self.assertEqual(None, self.branch.get_config_stack().get('foo'))
+ # Using a newly created branch object works as expected
+ fresh = _mod_branch.Branch.open(self.branch.base)
+ self.assertEqual('bar', fresh.get_config_stack().get('foo'))
+
+ def test_set_from_config_get_from_config_stack(self):
+ self.branch.lock_write()
+ self.addCleanup(self.branch.unlock)
+ self.branch.get_config().set_user_option('foo', 'bar')
+ result = self.branch.get_config_stack().get('foo')
+ # https://bugs.launchpad.net/bzr/+bug/948344
+ self.expectFailure('BranchStack uses cache after set_user_option',
+ self.assertEqual, 'bar', result)
+
+ def test_set_from_config_stack_get_from_config(self):
+ self.branch.lock_write()
+ self.addCleanup(self.branch.unlock)
+ self.branch.get_config_stack().set('foo', 'bar')
+ # Since the branch is locked, the option value won't be saved on disk
+ # so mixing get() and get_user_option() is broken by design.
+ self.assertEqual(None,
+ self.branch.get_config().get_user_option('foo'))
+
+ def test_set_delays_write_when_branch_is_locked(self):
+ self.branch.lock_write()
+ self.addCleanup(self.branch.unlock)
+ self.branch.get_config_stack().set('foo', 'bar')
+ copy = _mod_branch.Branch.open(self.branch.base)
+ result = copy.get_config_stack().get('foo')
+ # Accessing from a different branch object is like accessing from a
+ # different process: the option has not been saved yet and the new
+ # value cannot be seen.
+ self.assertIs(None, result)
+
+
+class TestPullResult(tests.TestCase):
+
+ def test_report_changed(self):
+ r = _mod_branch.PullResult()
+ r.old_revid = "old-revid"
+ r.old_revno = 10
+ r.new_revid = "new-revid"
+ r.new_revno = 20
+ f = StringIO()
+ r.report(f)
+ self.assertEqual("Now on revision 20.\n", f.getvalue())
+ self.assertEqual("Now on revision 20.\n", f.getvalue())
+
+ def test_report_unchanged(self):
+ r = _mod_branch.PullResult()
+ r.old_revid = "same-revid"
+ r.new_revid = "same-revid"
+ f = StringIO()
+ r.report(f)
+ self.assertEqual("No revisions or tags to pull.\n", f.getvalue())
diff --git a/bzrlib/tests/test_branchbuilder.py b/bzrlib/tests/test_branchbuilder.py
new file mode 100644
index 0000000..c31aaa5
--- /dev/null
+++ b/bzrlib/tests/test_branchbuilder.py
@@ -0,0 +1,450 @@
+# Copyright (C) 2007-2011 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""Tests for the BranchBuilder class."""
+
+from bzrlib import (
+ branch as _mod_branch,
+ revision as _mod_revision,
+ tests,
+ )
+from bzrlib.branchbuilder import BranchBuilder
+
+
+class TestBranchBuilder(tests.TestCaseWithMemoryTransport):
+
+ def test_create(self):
+ """Test the constructor api."""
+ builder = BranchBuilder(self.get_transport().clone('foo'))
+ # we dont care if the branch has been built or not at this point.
+
+ def test_get_branch(self):
+ """get_branch returns the created branch."""
+ builder = BranchBuilder(self.get_transport().clone('foo'))
+ branch = builder.get_branch()
+ self.assertIsInstance(branch, _mod_branch.Branch)
+ self.assertEqual(self.get_transport().clone('foo').base,
+ branch.base)
+ self.assertEqual(
+ (0, _mod_revision.NULL_REVISION),
+ branch.last_revision_info())
+
+ def test_format(self):
+ """Making a BranchBuilder with a format option sets the branch type."""
+ builder = BranchBuilder(self.get_transport(), format='dirstate-tags')
+ branch = builder.get_branch()
+ self.assertIsInstance(branch, _mod_branch.BzrBranch6)
+
+ def test_build_one_commit(self):
+ """doing build_commit causes a commit to happen."""
+ builder = BranchBuilder(self.get_transport().clone('foo'))
+ rev_id = builder.build_commit()
+ branch = builder.get_branch()
+ self.assertEqual((1, rev_id), branch.last_revision_info())
+ self.assertEqual(
+ 'commit 1',
+ branch.repository.get_revision(branch.last_revision()).message)
+
+ def test_build_commit_timestamp(self):
+ """You can set a date when committing."""
+ builder = self.make_branch_builder('foo')
+ rev_id = builder.build_commit(timestamp=1236043340)
+ branch = builder.get_branch()
+ self.assertEqual((1, rev_id), branch.last_revision_info())
+ rev = branch.repository.get_revision(branch.last_revision())
+ self.assertEqual(
+ 'commit 1',
+ rev.message)
+ self.assertEqual(
+ 1236043340,
+ int(rev.timestamp))
+
+ def test_build_two_commits(self):
+ """The second commit has the right parents and message."""
+ builder = BranchBuilder(self.get_transport().clone('foo'))
+ rev_id1 = builder.build_commit()
+ rev_id2 = builder.build_commit()
+ branch = builder.get_branch()
+ self.assertEqual((2, rev_id2), branch.last_revision_info())
+ self.assertEqual(
+ 'commit 2',
+ branch.repository.get_revision(branch.last_revision()).message)
+ self.assertEqual(
+ [rev_id1],
+ branch.repository.get_revision(branch.last_revision()).parent_ids)
+
+ def test_build_commit_parent_ids(self):
+ """build_commit() takes a parent_ids argument."""
+ builder = BranchBuilder(self.get_transport().clone('foo'))
+ rev_id1 = builder.build_commit(
+ parent_ids=["ghost"], allow_leftmost_as_ghost=True)
+ rev_id2 = builder.build_commit(parent_ids=[])
+ branch = builder.get_branch()
+ self.assertEqual((1, rev_id2), branch.last_revision_info())
+ self.assertEqual(
+ ["ghost"],
+ branch.repository.get_revision(rev_id1).parent_ids)
+
+
+class TestBranchBuilderBuildSnapshot(tests.TestCaseWithMemoryTransport):
+
+ def assertTreeShape(self, expected_shape, tree):
+ """Check that the tree shape matches expectations."""
+ tree.lock_read()
+ try:
+ entries = [(path, ie.file_id, ie.kind)
+ for path, ie in tree.iter_entries_by_dir()]
+ finally:
+ tree.unlock()
+ self.assertEqual(expected_shape, entries)
+
+ def build_a_rev(self):
+ builder = BranchBuilder(self.get_transport().clone('foo'))
+ rev_id1 = builder.build_snapshot('A-id', None,
+ [('add', ('', 'a-root-id', 'directory', None)),
+ ('add', ('a', 'a-id', 'file', 'contents'))])
+ self.assertEqual('A-id', rev_id1)
+ return builder
+
+ def test_add_one_file(self):
+ builder = self.build_a_rev()
+ branch = builder.get_branch()
+ self.assertEqual((1, 'A-id'), branch.last_revision_info())
+ rev_tree = branch.repository.revision_tree('A-id')
+ rev_tree.lock_read()
+ self.addCleanup(rev_tree.unlock)
+ self.assertTreeShape([(u'', 'a-root-id', 'directory'),
+ (u'a', 'a-id', 'file')], rev_tree)
+ self.assertEqual('contents', rev_tree.get_file_text('a-id'))
+
+ def test_add_second_file(self):
+ builder = self.build_a_rev()
+ rev_id2 = builder.build_snapshot('B-id', None,
+ [('add', ('b', 'b-id', 'file', 'content_b'))])
+ self.assertEqual('B-id', rev_id2)
+ branch = builder.get_branch()
+ self.assertEqual((2, rev_id2), branch.last_revision_info())
+ rev_tree = branch.repository.revision_tree(rev_id2)
+ rev_tree.lock_read()
+ self.addCleanup(rev_tree.unlock)
+ self.assertTreeShape([(u'', 'a-root-id', 'directory'),
+ (u'a', 'a-id', 'file'),
+ (u'b', 'b-id', 'file')], rev_tree)
+ self.assertEqual('content_b', rev_tree.get_file_text('b-id'))
+
+ def test_add_empty_dir(self):
+ builder = self.build_a_rev()
+ rev_id2 = builder.build_snapshot('B-id', None,
+ [('add', ('b', 'b-id', 'directory', None))])
+ rev_tree = builder.get_branch().repository.revision_tree('B-id')
+ self.assertTreeShape([(u'', 'a-root-id', 'directory'),
+ (u'a', 'a-id', 'file'),
+ (u'b', 'b-id', 'directory'),
+ ], rev_tree)
+
+ def test_commit_timestamp(self):
+ builder = self.make_branch_builder('foo')
+ rev_id = builder.build_snapshot(None, None,
+ [('add', (u'', None, 'directory', None))],
+ timestamp=1234567890)
+ rev = builder.get_branch().repository.get_revision(rev_id)
+ self.assertEqual(
+ 1234567890,
+ int(rev.timestamp))
+
+ def test_commit_message_default(self):
+ builder = BranchBuilder(self.get_transport().clone('foo'))
+ rev_id = builder.build_snapshot(None, None,
+ [('add', (u'', None, 'directory', None))])
+ branch = builder.get_branch()
+ rev = branch.repository.get_revision(rev_id)
+ self.assertEqual(u'commit 1', rev.message)
+
+ def test_commit_message_supplied(self):
+ builder = BranchBuilder(self.get_transport().clone('foo'))
+ rev_id = builder.build_snapshot(None, None,
+ [('add', (u'', None, 'directory', None))],
+ message=u'Foo')
+ branch = builder.get_branch()
+ rev = branch.repository.get_revision(rev_id)
+ self.assertEqual(u'Foo', rev.message)
+
+ def test_commit_message_callback(self):
+ builder = BranchBuilder(self.get_transport().clone('foo'))
+ rev_id = builder.build_snapshot(None, None,
+ [('add', (u'', None, 'directory', None))],
+ message_callback=lambda x:u'Foo')
+ branch = builder.get_branch()
+ rev = branch.repository.get_revision(rev_id)
+ self.assertEqual(u'Foo', rev.message)
+
+ def test_modify_file(self):
+ builder = self.build_a_rev()
+ rev_id2 = builder.build_snapshot('B-id', None,
+ [('modify', ('a-id', 'new\ncontent\n'))])
+ self.assertEqual('B-id', rev_id2)
+ branch = builder.get_branch()
+ rev_tree = branch.repository.revision_tree(rev_id2)
+ rev_tree.lock_read()
+ self.addCleanup(rev_tree.unlock)
+ self.assertEqual('new\ncontent\n', rev_tree.get_file_text('a-id'))
+
+ def test_delete_file(self):
+ builder = self.build_a_rev()
+ rev_id2 = builder.build_snapshot('B-id', None,
+ [('unversion', 'a-id')])
+ self.assertEqual('B-id', rev_id2)
+ branch = builder.get_branch()
+ rev_tree = branch.repository.revision_tree(rev_id2)
+ rev_tree.lock_read()
+ self.addCleanup(rev_tree.unlock)
+ self.assertTreeShape([(u'', 'a-root-id', 'directory')], rev_tree)
+
+ def test_delete_directory(self):
+ builder = self.build_a_rev()
+ rev_id2 = builder.build_snapshot('B-id', None,
+ [('add', ('b', 'b-id', 'directory', None)),
+ ('add', ('b/c', 'c-id', 'file', 'foo\n')),
+ ('add', ('b/d', 'd-id', 'directory', None)),
+ ('add', ('b/d/e', 'e-id', 'file', 'eff\n')),
+ ])
+ rev_tree = builder.get_branch().repository.revision_tree('B-id')
+ self.assertTreeShape([(u'', 'a-root-id', 'directory'),
+ (u'a', 'a-id', 'file'),
+ (u'b', 'b-id', 'directory'),
+ (u'b/c', 'c-id', 'file'),
+ (u'b/d', 'd-id', 'directory'),
+ (u'b/d/e', 'e-id', 'file')], rev_tree)
+ # Removing a directory removes all child dirs
+ builder.build_snapshot('C-id', None, [('unversion', 'b-id')])
+ rev_tree = builder.get_branch().repository.revision_tree('C-id')
+ self.assertTreeShape([(u'', 'a-root-id', 'directory'),
+ (u'a', 'a-id', 'file'),
+ ], rev_tree)
+
+ def test_unknown_action(self):
+ builder = self.build_a_rev()
+ e = self.assertRaises(ValueError,
+ builder.build_snapshot, 'B-id', None, [('weirdo', ('foo',))])
+ self.assertEqual('Unknown build action: "weirdo"', str(e))
+
+ def test_rename(self):
+ builder = self.build_a_rev()
+ builder.build_snapshot('B-id', None,
+ [('rename', ('a', 'b'))])
+ rev_tree = builder.get_branch().repository.revision_tree('B-id')
+ self.assertTreeShape([(u'', 'a-root-id', 'directory'),
+ (u'b', 'a-id', 'file')], rev_tree)
+
+ def test_rename_into_subdir(self):
+ builder = self.build_a_rev()
+ builder.build_snapshot('B-id', None,
+ [('add', ('dir', 'dir-id', 'directory', None)),
+ ('rename', ('a', 'dir/a'))])
+ rev_tree = builder.get_branch().repository.revision_tree('B-id')
+ self.assertTreeShape([(u'', 'a-root-id', 'directory'),
+ (u'dir', 'dir-id', 'directory'),
+ (u'dir/a', 'a-id', 'file')], rev_tree)
+
+ def test_rename_out_of_unversioned_subdir(self):
+ builder = self.build_a_rev()
+ builder.build_snapshot('B-id', None,
+ [('add', ('dir', 'dir-id', 'directory', None)),
+ ('rename', ('a', 'dir/a'))])
+ builder.build_snapshot('C-id', None,
+ [('rename', ('dir/a', 'a')),
+ ('unversion', 'dir-id')])
+ rev_tree = builder.get_branch().repository.revision_tree('C-id')
+ self.assertTreeShape([(u'', 'a-root-id', 'directory'),
+ (u'a', 'a-id', 'file')], rev_tree)
+
+ def test_set_parent(self):
+ builder = self.build_a_rev()
+ builder.start_series()
+ self.addCleanup(builder.finish_series)
+ builder.build_snapshot('B-id', ['A-id'],
+ [('modify', ('a-id', 'new\ncontent\n'))])
+ builder.build_snapshot('C-id', ['A-id'],
+ [('add', ('c', 'c-id', 'file', 'alt\ncontent\n'))])
+ # We should now have a graph:
+ # A
+ # |\
+ # C B
+ # And not A => B => C
+ repo = builder.get_branch().repository
+ self.assertEqual({'B-id': ('A-id',), 'C-id': ('A-id',)},
+ repo.get_parent_map(['B-id', 'C-id']))
+ b_tree = repo.revision_tree('B-id')
+ self.assertTreeShape([(u'', 'a-root-id', 'directory'),
+ (u'a', 'a-id', 'file'),
+ ], b_tree)
+ self.assertEqual('new\ncontent\n', b_tree.get_file_text('a-id'))
+
+ # We should still be using the content from A in C, not from B
+ c_tree = repo.revision_tree('C-id')
+ self.assertTreeShape([(u'', 'a-root-id', 'directory'),
+ (u'a', 'a-id', 'file'),
+ (u'c', 'c-id', 'file'),
+ ], c_tree)
+ self.assertEqual('contents', c_tree.get_file_text('a-id'))
+ self.assertEqual('alt\ncontent\n', c_tree.get_file_text('c-id'))
+
+ def test_set_merge_parent(self):
+ builder = self.build_a_rev()
+ builder.start_series()
+ self.addCleanup(builder.finish_series)
+ builder.build_snapshot('B-id', ['A-id'],
+ [('add', ('b', 'b-id', 'file', 'b\ncontent\n'))])
+ builder.build_snapshot('C-id', ['A-id'],
+ [('add', ('c', 'c-id', 'file', 'alt\ncontent\n'))])
+ builder.build_snapshot('D-id', ['B-id', 'C-id'], [])
+ repo = builder.get_branch().repository
+ self.assertEqual({'B-id': ('A-id',), 'C-id': ('A-id',),
+ 'D-id': ('B-id', 'C-id')},
+ repo.get_parent_map(['B-id', 'C-id', 'D-id']))
+ d_tree = repo.revision_tree('D-id')
+ # Note: by default a merge node does *not* pull in the changes from the
+ # merged tree, you have to supply it yourself.
+ self.assertTreeShape([(u'', 'a-root-id', 'directory'),
+ (u'a', 'a-id', 'file'),
+ (u'b', 'b-id', 'file'),
+ ], d_tree)
+
+ def test_set_merge_parent_and_contents(self):
+ builder = self.build_a_rev()
+ builder.start_series()
+ self.addCleanup(builder.finish_series)
+ builder.build_snapshot('B-id', ['A-id'],
+ [('add', ('b', 'b-id', 'file', 'b\ncontent\n'))])
+ builder.build_snapshot('C-id', ['A-id'],
+ [('add', ('c', 'c-id', 'file', 'alt\ncontent\n'))])
+ builder.build_snapshot('D-id', ['B-id', 'C-id'],
+ [('add', ('c', 'c-id', 'file', 'alt\ncontent\n'))])
+ repo = builder.get_branch().repository
+ self.assertEqual({'B-id': ('A-id',), 'C-id': ('A-id',),
+ 'D-id': ('B-id', 'C-id')},
+ repo.get_parent_map(['B-id', 'C-id', 'D-id']))
+ d_tree = repo.revision_tree('D-id')
+ self.assertTreeShape([(u'', 'a-root-id', 'directory'),
+ (u'a', 'a-id', 'file'),
+ (u'b', 'b-id', 'file'),
+ (u'c', 'c-id', 'file'),
+ ], d_tree)
+ # Because we copied the exact text into *this* tree, the 'c' file
+ # should look like it was not modified in the merge
+ self.assertEqual('C-id', d_tree.get_file_revision('c-id'))
+
+ def test_set_parent_to_null(self):
+ builder = self.build_a_rev()
+ builder.start_series()
+ self.addCleanup(builder.finish_series)
+ builder.build_snapshot('B-id', [],
+ [('add', ('', None, 'directory', None))])
+ # We should now have a graph:
+ # A B
+ # And not A => B
+ repo = builder.get_branch().repository
+ self.assertEqual({'A-id': (_mod_revision.NULL_REVISION,),
+ 'B-id': (_mod_revision.NULL_REVISION,),},
+ repo.get_parent_map(['A-id', 'B-id']))
+
+
+ def test_start_finish_series(self):
+ builder = BranchBuilder(self.get_transport().clone('foo'))
+ builder.start_series()
+ try:
+ self.assertIsNot(None, builder._tree)
+ self.assertEqual('w', builder._tree._lock_mode)
+ self.assertTrue(builder._branch.is_locked())
+ finally:
+ builder.finish_series()
+ self.assertIs(None, builder._tree)
+ self.assertFalse(builder._branch.is_locked())
+
+ def test_ghost_mainline_history(self):
+ builder = BranchBuilder(self.get_transport().clone('foo'))
+ builder.start_series()
+ try:
+ builder.build_snapshot('tip', ['ghost'],
+ [('add', ('', 'ROOT_ID', 'directory', ''))],
+ allow_leftmost_as_ghost=True)
+ finally:
+ builder.finish_series()
+ b = builder.get_branch()
+ b.lock_read()
+ self.addCleanup(b.unlock)
+ self.assertEqual(('ghost',),
+ b.repository.get_graph().get_parent_map(['tip'])['tip'])
+
+ def test_unversion_root_add_new_root(self):
+ builder = BranchBuilder(self.get_transport().clone('foo'))
+ builder.start_series()
+ builder.build_snapshot('rev-1', None,
+ [('add', ('', 'TREE_ROOT', 'directory', ''))])
+ builder.build_snapshot('rev-2', None,
+ [('unversion', 'TREE_ROOT'),
+ ('add', ('', 'my-root', 'directory', ''))])
+ builder.finish_series()
+ rev_tree = builder.get_branch().repository.revision_tree('rev-2')
+ self.assertTreeShape([(u'', 'my-root', 'directory')], rev_tree)
+
+ def test_empty_flush(self):
+ """A flush with no actions before it is a no-op."""
+ builder = BranchBuilder(self.get_transport().clone('foo'))
+ builder.start_series()
+ builder.build_snapshot('rev-1', None,
+ [('add', ('', 'TREE_ROOT', 'directory', ''))])
+ builder.build_snapshot('rev-2', None, [('flush', None)])
+ builder.finish_series()
+ rev_tree = builder.get_branch().repository.revision_tree('rev-2')
+ self.assertTreeShape([(u'', 'TREE_ROOT', 'directory')], rev_tree)
+
+ def test_kind_change(self):
+ """It's possible to change the kind of an entry in a single snapshot
+ with a bit of help from the 'flush' action.
+ """
+ builder = BranchBuilder(self.get_transport().clone('foo'))
+ builder.start_series()
+ builder.build_snapshot('A-id', None,
+ [('add', (u'', 'a-root-id', 'directory', None)),
+ ('add', (u'a', 'a-id', 'file', 'content\n'))])
+ builder.build_snapshot('B-id', None,
+ [('unversion', 'a-id'),
+ ('flush', None),
+ ('add', (u'a', 'a-id', 'directory', None))])
+ builder.finish_series()
+ rev_tree = builder.get_branch().repository.revision_tree('B-id')
+ self.assertTreeShape(
+ [(u'', 'a-root-id', 'directory'), (u'a', 'a-id', 'directory')],
+ rev_tree)
+
+ def test_pivot_root(self):
+ """It's possible (albeit awkward) to move an existing dir to the root
+ in a single snapshot by using unversion then flush then add.
+ """
+ builder = BranchBuilder(self.get_transport().clone('foo'))
+ builder.start_series()
+ builder.build_snapshot('A-id', None,
+ [('add', (u'', 'orig-root', 'directory', None)),
+ ('add', (u'dir', 'dir-id', 'directory', None))])
+ builder.build_snapshot('B-id', None,
+ [('unversion', 'orig-root'), # implicitly unversions all children
+ ('flush', None),
+ ('add', (u'', 'dir-id', 'directory', None))])
+ builder.finish_series()
+ rev_tree = builder.get_branch().repository.revision_tree('B-id')
+ self.assertTreeShape([(u'', 'dir-id', 'directory')], rev_tree)
+
diff --git a/bzrlib/tests/test_btree_index.py b/bzrlib/tests/test_btree_index.py
new file mode 100644
index 0000000..0bc3386
--- /dev/null
+++ b/bzrlib/tests/test_btree_index.py
@@ -0,0 +1,1545 @@
+# Copyright (C) 2008-2011 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+#
+
+"""Tests for btree indices."""
+
+import pprint
+import zlib
+
+from bzrlib import (
+ btree_index,
+ errors,
+ fifo_cache,
+ lru_cache,
+ osutils,
+ tests,
+ transport,
+ )
+from bzrlib.tests import (
+ TestCaseWithTransport,
+ scenarios,
+ )
+from bzrlib.tests import (
+ features,
+ )
+
+
+load_tests = scenarios.load_tests_apply_scenarios
+
+
+def btreeparser_scenarios():
+ import bzrlib._btree_serializer_py as py_module
+ scenarios = [('python', {'parse_btree': py_module})]
+ if compiled_btreeparser_feature.available():
+ scenarios.append(('C',
+ {'parse_btree': compiled_btreeparser_feature.module}))
+ return scenarios
+
+
+compiled_btreeparser_feature = features.ModuleAvailableFeature(
+ 'bzrlib._btree_serializer_pyx')
+
+
+class BTreeTestCase(TestCaseWithTransport):
+ # test names here are suffixed by the key length and reference list count
+ # that they test.
+
+ def setUp(self):
+ TestCaseWithTransport.setUp(self)
+ self.overrideAttr(btree_index, '_RESERVED_HEADER_BYTES', 100)
+
+ def make_nodes(self, count, key_elements, reference_lists):
+ """Generate count*key_elements sample nodes."""
+ keys = []
+ for prefix_pos in range(key_elements):
+ if key_elements - 1:
+ prefix = (str(prefix_pos) * 40,)
+ else:
+ prefix = ()
+ for pos in xrange(count):
+ # TODO: This creates odd keys. When count == 100,000, it
+ # creates a 240 byte key
+ key = prefix + (str(pos) * 40,)
+ value = "value:%s" % pos
+ if reference_lists:
+ # generate some references
+ refs = []
+ for list_pos in range(reference_lists):
+ # as many keys in each list as its index + the key depth
+ # mod 2 - this generates both 0 length lists and
+ # ones slightly longer than the number of lists.
+ # It also ensures we have non homogeneous lists.
+ refs.append([])
+ for ref_pos in range(list_pos + pos % 2):
+ if pos % 2:
+ # refer to a nearby key
+ refs[-1].append(prefix + ("ref" + str(pos - 1) * 40,))
+ else:
+ # serial of this ref in the ref list
+ refs[-1].append(prefix + ("ref" + str(ref_pos) * 40,))
+ refs[-1] = tuple(refs[-1])
+ refs = tuple(refs)
+ else:
+ refs = ()
+ keys.append((key, value, refs))
+ return keys
+
+ def shrink_page_size(self):
+ """Shrink the default page size so that less fits in a page."""
+ self.overrideAttr(btree_index, '_PAGE_SIZE')
+ btree_index._PAGE_SIZE = 2048
+
+ def assertEqualsApproxCompressed(self, expected, actual, slop=6):
+ """Check a count of compressed bytes is approximately as expected
+
+ Relying on compressed length being stable even with fixed inputs is
+ slightly bogus, but zlib is stable enough that this mostly works.
+ """
+ if not expected - slop < actual < expected + slop:
+ self.fail("Expected around %d bytes compressed but got %d" %
+ (expected, actual))
+
+
+class TestBTreeBuilder(BTreeTestCase):
+
+ def test_clear_cache(self):
+ builder = btree_index.BTreeBuilder(reference_lists=0, key_elements=1)
+ # This is a no-op, but we need the api to be consistent with other
+ # BTreeGraphIndex apis.
+ builder.clear_cache()
+
+ def test_empty_1_0(self):
+ builder = btree_index.BTreeBuilder(key_elements=1, reference_lists=0)
+ # NamedTemporaryFile dies on builder.finish().read(). weird.
+ temp_file = builder.finish()
+ content = temp_file.read()
+ del temp_file
+ self.assertEqual(
+ "B+Tree Graph Index 2\nnode_ref_lists=0\nkey_elements=1\nlen=0\n"
+ "row_lengths=\n",
+ content)
+
+ def test_empty_2_1(self):
+ builder = btree_index.BTreeBuilder(key_elements=2, reference_lists=1)
+ # NamedTemporaryFile dies on builder.finish().read(). weird.
+ temp_file = builder.finish()
+ content = temp_file.read()
+ del temp_file
+ self.assertEqual(
+ "B+Tree Graph Index 2\nnode_ref_lists=1\nkey_elements=2\nlen=0\n"
+ "row_lengths=\n",
+ content)
+
+ def test_root_leaf_1_0(self):
+ builder = btree_index.BTreeBuilder(key_elements=1, reference_lists=0)
+ nodes = self.make_nodes(5, 1, 0)
+ for node in nodes:
+ builder.add_node(*node)
+ # NamedTemporaryFile dies on builder.finish().read(). weird.
+ temp_file = builder.finish()
+ content = temp_file.read()
+ del temp_file
+ self.assertEqual(131, len(content))
+ self.assertEqual(
+ "B+Tree Graph Index 2\nnode_ref_lists=0\nkey_elements=1\nlen=5\n"
+ "row_lengths=1\n",
+ content[:73])
+ node_content = content[73:]
+ node_bytes = zlib.decompress(node_content)
+ expected_node = ("type=leaf\n"
+ "0000000000000000000000000000000000000000\x00\x00value:0\n"
+ "1111111111111111111111111111111111111111\x00\x00value:1\n"
+ "2222222222222222222222222222222222222222\x00\x00value:2\n"
+ "3333333333333333333333333333333333333333\x00\x00value:3\n"
+ "4444444444444444444444444444444444444444\x00\x00value:4\n")
+ self.assertEqual(expected_node, node_bytes)
+
+ def test_root_leaf_2_2(self):
+ builder = btree_index.BTreeBuilder(key_elements=2, reference_lists=2)
+ nodes = self.make_nodes(5, 2, 2)
+ for node in nodes:
+ builder.add_node(*node)
+ # NamedTemporaryFile dies on builder.finish().read(). weird.
+ temp_file = builder.finish()
+ content = temp_file.read()
+ del temp_file
+ self.assertEqual(238, len(content))
+ self.assertEqual(
+ "B+Tree Graph Index 2\nnode_ref_lists=2\nkey_elements=2\nlen=10\n"
+ "row_lengths=1\n",
+ content[:74])
+ node_content = content[74:]
+ node_bytes = zlib.decompress(node_content)
+ expected_node = (
+ "type=leaf\n"
+ "0000000000000000000000000000000000000000\x000000000000000000000000000000000000000000\x00\t0000000000000000000000000000000000000000\x00ref0000000000000000000000000000000000000000\x00value:0\n"
+ "0000000000000000000000000000000000000000\x001111111111111111111111111111111111111111\x000000000000000000000000000000000000000000\x00ref0000000000000000000000000000000000000000\t0000000000000000000000000000000000000000\x00ref0000000000000000000000000000000000000000\r0000000000000000000000000000000000000000\x00ref0000000000000000000000000000000000000000\x00value:1\n"
+ "0000000000000000000000000000000000000000\x002222222222222222222222222222222222222222\x00\t0000000000000000000000000000000000000000\x00ref0000000000000000000000000000000000000000\x00value:2\n"
+ "0000000000000000000000000000000000000000\x003333333333333333333333333333333333333333\x000000000000000000000000000000000000000000\x00ref2222222222222222222222222222222222222222\t0000000000000000000000000000000000000000\x00ref2222222222222222222222222222222222222222\r0000000000000000000000000000000000000000\x00ref2222222222222222222222222222222222222222\x00value:3\n"
+ "0000000000000000000000000000000000000000\x004444444444444444444444444444444444444444\x00\t0000000000000000000000000000000000000000\x00ref0000000000000000000000000000000000000000\x00value:4\n"
+ "1111111111111111111111111111111111111111\x000000000000000000000000000000000000000000\x00\t1111111111111111111111111111111111111111\x00ref0000000000000000000000000000000000000000\x00value:0\n"
+ "1111111111111111111111111111111111111111\x001111111111111111111111111111111111111111\x001111111111111111111111111111111111111111\x00ref0000000000000000000000000000000000000000\t1111111111111111111111111111111111111111\x00ref0000000000000000000000000000000000000000\r1111111111111111111111111111111111111111\x00ref0000000000000000000000000000000000000000\x00value:1\n"
+ "1111111111111111111111111111111111111111\x002222222222222222222222222222222222222222\x00\t1111111111111111111111111111111111111111\x00ref0000000000000000000000000000000000000000\x00value:2\n"
+ "1111111111111111111111111111111111111111\x003333333333333333333333333333333333333333\x001111111111111111111111111111111111111111\x00ref2222222222222222222222222222222222222222\t1111111111111111111111111111111111111111\x00ref2222222222222222222222222222222222222222\r1111111111111111111111111111111111111111\x00ref2222222222222222222222222222222222222222\x00value:3\n"
+ "1111111111111111111111111111111111111111\x004444444444444444444444444444444444444444\x00\t1111111111111111111111111111111111111111\x00ref0000000000000000000000000000000000000000\x00value:4\n"
+ ""
+ )
+ self.assertEqual(expected_node, node_bytes)
+
+ def test_2_leaves_1_0(self):
+ builder = btree_index.BTreeBuilder(key_elements=1, reference_lists=0)
+ nodes = self.make_nodes(400, 1, 0)
+ for node in nodes:
+ builder.add_node(*node)
+ # NamedTemporaryFile dies on builder.finish().read(). weird.
+ temp_file = builder.finish()
+ content = temp_file.read()
+ del temp_file
+ self.assertEqualsApproxCompressed(9283, len(content))
+ self.assertEqual(
+ "B+Tree Graph Index 2\nnode_ref_lists=0\nkey_elements=1\nlen=400\n"
+ "row_lengths=1,2\n",
+ content[:77])
+ root = content[77:4096]
+ leaf1 = content[4096:8192]
+ leaf2 = content[8192:]
+ root_bytes = zlib.decompress(root)
+ expected_root = (
+ "type=internal\n"
+ "offset=0\n"
+ ) + ("307" * 40) + "\n"
+ self.assertEqual(expected_root, root_bytes)
+ # We already know serialisation works for leaves, check key selection:
+ leaf1_bytes = zlib.decompress(leaf1)
+ sorted_node_keys = sorted(node[0] for node in nodes)
+ node = btree_index._LeafNode(leaf1_bytes, 1, 0)
+ self.assertEqual(231, len(node))
+ self.assertEqual(sorted_node_keys[:231], node.all_keys())
+ leaf2_bytes = zlib.decompress(leaf2)
+ node = btree_index._LeafNode(leaf2_bytes, 1, 0)
+ self.assertEqual(400 - 231, len(node))
+ self.assertEqual(sorted_node_keys[231:], node.all_keys())
+
+ def test_last_page_rounded_1_layer(self):
+ builder = btree_index.BTreeBuilder(key_elements=1, reference_lists=0)
+ nodes = self.make_nodes(10, 1, 0)
+ for node in nodes:
+ builder.add_node(*node)
+ # NamedTemporaryFile dies on builder.finish().read(). weird.
+ temp_file = builder.finish()
+ content = temp_file.read()
+ del temp_file
+ self.assertEqualsApproxCompressed(155, len(content))
+ self.assertEqual(
+ "B+Tree Graph Index 2\nnode_ref_lists=0\nkey_elements=1\nlen=10\n"
+ "row_lengths=1\n",
+ content[:74])
+ # Check thelast page is well formed
+ leaf2 = content[74:]
+ leaf2_bytes = zlib.decompress(leaf2)
+ node = btree_index._LeafNode(leaf2_bytes, 1, 0)
+ self.assertEqual(10, len(node))
+ sorted_node_keys = sorted(node[0] for node in nodes)
+ self.assertEqual(sorted_node_keys, node.all_keys())
+
+ def test_last_page_not_rounded_2_layer(self):
+ builder = btree_index.BTreeBuilder(key_elements=1, reference_lists=0)
+ nodes = self.make_nodes(400, 1, 0)
+ for node in nodes:
+ builder.add_node(*node)
+ # NamedTemporaryFile dies on builder.finish().read(). weird.
+ temp_file = builder.finish()
+ content = temp_file.read()
+ del temp_file
+ self.assertEqualsApproxCompressed(9283, len(content))
+ self.assertEqual(
+ "B+Tree Graph Index 2\nnode_ref_lists=0\nkey_elements=1\nlen=400\n"
+ "row_lengths=1,2\n",
+ content[:77])
+ # Check the last page is well formed
+ leaf2 = content[8192:]
+ leaf2_bytes = zlib.decompress(leaf2)
+ node = btree_index._LeafNode(leaf2_bytes, 1, 0)
+ self.assertEqual(400 - 231, len(node))
+ sorted_node_keys = sorted(node[0] for node in nodes)
+ self.assertEqual(sorted_node_keys[231:], node.all_keys())
+
+ def test_three_level_tree_details(self):
+ # The left most pointer in the second internal node in a row should
+ # pointer to the second node that the internal node is for, _not_
+ # the first, otherwise the first node overlaps with the last node of
+ # the prior internal node on that row.
+ self.shrink_page_size()
+ builder = btree_index.BTreeBuilder(key_elements=2, reference_lists=2)
+ # 40K nodes is enough to create a two internal nodes on the second
+ # level, with a 2K page size
+ nodes = self.make_nodes(20000, 2, 2)
+
+ for node in nodes:
+ builder.add_node(*node)
+ t = transport.get_transport_from_url('trace+' + self.get_url(''))
+ size = t.put_file('index', self.time(builder.finish))
+ del builder
+ index = btree_index.BTreeGraphIndex(t, 'index', size)
+ # Seed the metadata, we're using internal calls now.
+ index.key_count()
+ self.assertEqual(3, len(index._row_lengths),
+ "Not enough rows: %r" % index._row_lengths)
+ self.assertEqual(4, len(index._row_offsets))
+ self.assertEqual(sum(index._row_lengths), index._row_offsets[-1])
+ internal_nodes = index._get_internal_nodes([0, 1, 2])
+ root_node = internal_nodes[0]
+ internal_node1 = internal_nodes[1]
+ internal_node2 = internal_nodes[2]
+ # The left most node node2 points at should be one after the right most
+ # node pointed at by node1.
+ self.assertEqual(internal_node2.offset, 1 + len(internal_node1.keys))
+ # The left most key of the second node pointed at by internal_node2
+ # should be its first key. We can check this by looking for its first key
+ # in the second node it points at
+ pos = index._row_offsets[2] + internal_node2.offset + 1
+ leaf = index._get_leaf_nodes([pos])[pos]
+ self.assertTrue(internal_node2.keys[0] in leaf)
+
+ def test_2_leaves_2_2(self):
+ builder = btree_index.BTreeBuilder(key_elements=2, reference_lists=2)
+ nodes = self.make_nodes(100, 2, 2)
+ for node in nodes:
+ builder.add_node(*node)
+ # NamedTemporaryFile dies on builder.finish().read(). weird.
+ temp_file = builder.finish()
+ content = temp_file.read()
+ del temp_file
+ self.assertEqualsApproxCompressed(12643, len(content))
+ self.assertEqual(
+ "B+Tree Graph Index 2\nnode_ref_lists=2\nkey_elements=2\nlen=200\n"
+ "row_lengths=1,3\n",
+ content[:77])
+ root = content[77:4096]
+ leaf1 = content[4096:8192]
+ leaf2 = content[8192:12288]
+ leaf3 = content[12288:]
+ root_bytes = zlib.decompress(root)
+ expected_root = (
+ "type=internal\n"
+ "offset=0\n"
+ + ("0" * 40) + "\x00" + ("91" * 40) + "\n"
+ + ("1" * 40) + "\x00" + ("81" * 40) + "\n"
+ )
+ self.assertEqual(expected_root, root_bytes)
+ # We assume the other leaf nodes have been written correctly - layering
+ # FTW.
+
+ def test_spill_index_stress_1_1(self):
+ builder = btree_index.BTreeBuilder(key_elements=1, spill_at=2)
+ nodes = [node[0:2] for node in self.make_nodes(16, 1, 0)]
+ builder.add_node(*nodes[0])
+ # Test the parts of the index that take up memory are doing so
+ # predictably.
+ self.assertEqual(1, len(builder._nodes))
+ self.assertIs(None, builder._nodes_by_key)
+ builder.add_node(*nodes[1])
+ self.assertEqual(0, len(builder._nodes))
+ self.assertIs(None, builder._nodes_by_key)
+ self.assertEqual(1, len(builder._backing_indices))
+ self.assertEqual(2, builder._backing_indices[0].key_count())
+ # now back to memory
+ builder.add_node(*nodes[2])
+ self.assertEqual(1, len(builder._nodes))
+ self.assertIs(None, builder._nodes_by_key)
+ # And spills to a second backing index combing all
+ builder.add_node(*nodes[3])
+ self.assertEqual(0, len(builder._nodes))
+ self.assertIs(None, builder._nodes_by_key)
+ self.assertEqual(2, len(builder._backing_indices))
+ self.assertEqual(None, builder._backing_indices[0])
+ self.assertEqual(4, builder._backing_indices[1].key_count())
+ # The next spills to the 2-len slot
+ builder.add_node(*nodes[4])
+ builder.add_node(*nodes[5])
+ self.assertEqual(0, len(builder._nodes))
+ self.assertIs(None, builder._nodes_by_key)
+ self.assertEqual(2, len(builder._backing_indices))
+ self.assertEqual(2, builder._backing_indices[0].key_count())
+ self.assertEqual(4, builder._backing_indices[1].key_count())
+ # Next spill combines
+ builder.add_node(*nodes[6])
+ builder.add_node(*nodes[7])
+ self.assertEqual(3, len(builder._backing_indices))
+ self.assertEqual(None, builder._backing_indices[0])
+ self.assertEqual(None, builder._backing_indices[1])
+ self.assertEqual(8, builder._backing_indices[2].key_count())
+ # And so forth - counting up in binary.
+ builder.add_node(*nodes[8])
+ builder.add_node(*nodes[9])
+ self.assertEqual(3, len(builder._backing_indices))
+ self.assertEqual(2, builder._backing_indices[0].key_count())
+ self.assertEqual(None, builder._backing_indices[1])
+ self.assertEqual(8, builder._backing_indices[2].key_count())
+ builder.add_node(*nodes[10])
+ builder.add_node(*nodes[11])
+ self.assertEqual(3, len(builder._backing_indices))
+ self.assertEqual(None, builder._backing_indices[0])
+ self.assertEqual(4, builder._backing_indices[1].key_count())
+ self.assertEqual(8, builder._backing_indices[2].key_count())
+ builder.add_node(*nodes[12])
+ # Test that memory and disk are both used for query methods; and that
+ # None is skipped over happily.
+ self.assertEqual([(builder,) + node for node in sorted(nodes[:13])],
+ list(builder.iter_all_entries()))
+ # Two nodes - one memory one disk
+ self.assertEqual(set([(builder,) + node for node in nodes[11:13]]),
+ set(builder.iter_entries([nodes[12][0], nodes[11][0]])))
+ self.assertEqual(13, builder.key_count())
+ self.assertEqual(set([(builder,) + node for node in nodes[11:13]]),
+ set(builder.iter_entries_prefix([nodes[12][0], nodes[11][0]])))
+ builder.add_node(*nodes[13])
+ self.assertEqual(3, len(builder._backing_indices))
+ self.assertEqual(2, builder._backing_indices[0].key_count())
+ self.assertEqual(4, builder._backing_indices[1].key_count())
+ self.assertEqual(8, builder._backing_indices[2].key_count())
+ builder.add_node(*nodes[14])
+ builder.add_node(*nodes[15])
+ self.assertEqual(4, len(builder._backing_indices))
+ self.assertEqual(None, builder._backing_indices[0])
+ self.assertEqual(None, builder._backing_indices[1])
+ self.assertEqual(None, builder._backing_indices[2])
+ self.assertEqual(16, builder._backing_indices[3].key_count())
+ # Now finish, and check we got a correctly ordered tree
+ t = self.get_transport('')
+ size = t.put_file('index', builder.finish())
+ index = btree_index.BTreeGraphIndex(t, 'index', size)
+ nodes = list(index.iter_all_entries())
+ self.assertEqual(sorted(nodes), nodes)
+ self.assertEqual(16, len(nodes))
+
+ def test_spill_index_stress_1_1_no_combine(self):
+ builder = btree_index.BTreeBuilder(key_elements=1, spill_at=2)
+ builder.set_optimize(for_size=False, combine_backing_indices=False)
+ nodes = [node[0:2] for node in self.make_nodes(16, 1, 0)]
+ builder.add_node(*nodes[0])
+ # Test the parts of the index that take up memory are doing so
+ # predictably.
+ self.assertEqual(1, len(builder._nodes))
+ self.assertIs(None, builder._nodes_by_key)
+ builder.add_node(*nodes[1])
+ self.assertEqual(0, len(builder._nodes))
+ self.assertIs(None, builder._nodes_by_key)
+ self.assertEqual(1, len(builder._backing_indices))
+ self.assertEqual(2, builder._backing_indices[0].key_count())
+ # now back to memory
+ builder.add_node(*nodes[2])
+ self.assertEqual(1, len(builder._nodes))
+ self.assertIs(None, builder._nodes_by_key)
+ # And spills to a second backing index but doesn't combine
+ builder.add_node(*nodes[3])
+ self.assertEqual(0, len(builder._nodes))
+ self.assertIs(None, builder._nodes_by_key)
+ self.assertEqual(2, len(builder._backing_indices))
+ for backing_index in builder._backing_indices:
+ self.assertEqual(2, backing_index.key_count())
+ # The next spills to the 3rd slot
+ builder.add_node(*nodes[4])
+ builder.add_node(*nodes[5])
+ self.assertEqual(0, len(builder._nodes))
+ self.assertIs(None, builder._nodes_by_key)
+ self.assertEqual(3, len(builder._backing_indices))
+ for backing_index in builder._backing_indices:
+ self.assertEqual(2, backing_index.key_count())
+ # Now spill a few more, and check that we don't combine
+ builder.add_node(*nodes[6])
+ builder.add_node(*nodes[7])
+ builder.add_node(*nodes[8])
+ builder.add_node(*nodes[9])
+ builder.add_node(*nodes[10])
+ builder.add_node(*nodes[11])
+ builder.add_node(*nodes[12])
+ self.assertEqual(6, len(builder._backing_indices))
+ for backing_index in builder._backing_indices:
+ self.assertEqual(2, backing_index.key_count())
+ # Test that memory and disk are both used for query methods; and that
+ # None is skipped over happily.
+ self.assertEqual([(builder,) + node for node in sorted(nodes[:13])],
+ list(builder.iter_all_entries()))
+ # Two nodes - one memory one disk
+ self.assertEqual(set([(builder,) + node for node in nodes[11:13]]),
+ set(builder.iter_entries([nodes[12][0], nodes[11][0]])))
+ self.assertEqual(13, builder.key_count())
+ self.assertEqual(set([(builder,) + node for node in nodes[11:13]]),
+ set(builder.iter_entries_prefix([nodes[12][0], nodes[11][0]])))
+ builder.add_node(*nodes[13])
+ builder.add_node(*nodes[14])
+ builder.add_node(*nodes[15])
+ self.assertEqual(8, len(builder._backing_indices))
+ for backing_index in builder._backing_indices:
+ self.assertEqual(2, backing_index.key_count())
+ # Now finish, and check we got a correctly ordered tree
+ transport = self.get_transport('')
+ size = transport.put_file('index', builder.finish())
+ index = btree_index.BTreeGraphIndex(transport, 'index', size)
+ nodes = list(index.iter_all_entries())
+ self.assertEqual(sorted(nodes), nodes)
+ self.assertEqual(16, len(nodes))
+
+ def test_set_optimize(self):
+ builder = btree_index.BTreeBuilder(key_elements=2, reference_lists=2)
+ builder.set_optimize(for_size=True)
+ self.assertTrue(builder._optimize_for_size)
+ builder.set_optimize(for_size=False)
+ self.assertFalse(builder._optimize_for_size)
+ # test that we can set combine_backing_indices without effecting
+ # _optimize_for_size
+ obj = object()
+ builder._optimize_for_size = obj
+ builder.set_optimize(combine_backing_indices=False)
+ self.assertFalse(builder._combine_backing_indices)
+ self.assertIs(obj, builder._optimize_for_size)
+ builder.set_optimize(combine_backing_indices=True)
+ self.assertTrue(builder._combine_backing_indices)
+ self.assertIs(obj, builder._optimize_for_size)
+
+ def test_spill_index_stress_2_2(self):
+ # test that references and longer keys don't confuse things.
+ builder = btree_index.BTreeBuilder(key_elements=2, reference_lists=2,
+ spill_at=2)
+ nodes = self.make_nodes(16, 2, 2)
+ builder.add_node(*nodes[0])
+ # Test the parts of the index that take up memory are doing so
+ # predictably.
+ self.assertEqual(1, len(builder._nodes))
+ self.assertIs(None, builder._nodes_by_key)
+ builder.add_node(*nodes[1])
+ self.assertEqual(0, len(builder._nodes))
+ self.assertIs(None, builder._nodes_by_key)
+ self.assertEqual(1, len(builder._backing_indices))
+ self.assertEqual(2, builder._backing_indices[0].key_count())
+ # now back to memory
+ old = dict(builder._get_nodes_by_key()) #Build up the nodes by key dict
+ builder.add_node(*nodes[2])
+ self.assertEqual(1, len(builder._nodes))
+ self.assertIsNot(None, builder._nodes_by_key)
+ self.assertNotEqual({}, builder._nodes_by_key)
+ # We should have a new entry
+ self.assertNotEqual(old, builder._nodes_by_key)
+ # And spills to a second backing index combing all
+ builder.add_node(*nodes[3])
+ self.assertEqual(0, len(builder._nodes))
+ self.assertIs(None, builder._nodes_by_key)
+ self.assertEqual(2, len(builder._backing_indices))
+ self.assertEqual(None, builder._backing_indices[0])
+ self.assertEqual(4, builder._backing_indices[1].key_count())
+ # The next spills to the 2-len slot
+ builder.add_node(*nodes[4])
+ builder.add_node(*nodes[5])
+ self.assertEqual(0, len(builder._nodes))
+ self.assertIs(None, builder._nodes_by_key)
+ self.assertEqual(2, len(builder._backing_indices))
+ self.assertEqual(2, builder._backing_indices[0].key_count())
+ self.assertEqual(4, builder._backing_indices[1].key_count())
+ # Next spill combines
+ builder.add_node(*nodes[6])
+ builder.add_node(*nodes[7])
+ self.assertEqual(3, len(builder._backing_indices))
+ self.assertEqual(None, builder._backing_indices[0])
+ self.assertEqual(None, builder._backing_indices[1])
+ self.assertEqual(8, builder._backing_indices[2].key_count())
+ # And so forth - counting up in binary.
+ builder.add_node(*nodes[8])
+ builder.add_node(*nodes[9])
+ self.assertEqual(3, len(builder._backing_indices))
+ self.assertEqual(2, builder._backing_indices[0].key_count())
+ self.assertEqual(None, builder._backing_indices[1])
+ self.assertEqual(8, builder._backing_indices[2].key_count())
+ builder.add_node(*nodes[10])
+ builder.add_node(*nodes[11])
+ self.assertEqual(3, len(builder._backing_indices))
+ self.assertEqual(None, builder._backing_indices[0])
+ self.assertEqual(4, builder._backing_indices[1].key_count())
+ self.assertEqual(8, builder._backing_indices[2].key_count())
+ builder.add_node(*nodes[12])
+ # Test that memory and disk are both used for query methods; and that
+ # None is skipped over happily.
+ self.assertEqual([(builder,) + node for node in sorted(nodes[:13])],
+ list(builder.iter_all_entries()))
+ # Two nodes - one memory one disk
+ self.assertEqual(set([(builder,) + node for node in nodes[11:13]]),
+ set(builder.iter_entries([nodes[12][0], nodes[11][0]])))
+ self.assertEqual(13, builder.key_count())
+ self.assertEqual(set([(builder,) + node for node in nodes[11:13]]),
+ set(builder.iter_entries_prefix([nodes[12][0], nodes[11][0]])))
+ builder.add_node(*nodes[13])
+ self.assertEqual(3, len(builder._backing_indices))
+ self.assertEqual(2, builder._backing_indices[0].key_count())
+ self.assertEqual(4, builder._backing_indices[1].key_count())
+ self.assertEqual(8, builder._backing_indices[2].key_count())
+ builder.add_node(*nodes[14])
+ builder.add_node(*nodes[15])
+ self.assertEqual(4, len(builder._backing_indices))
+ self.assertEqual(None, builder._backing_indices[0])
+ self.assertEqual(None, builder._backing_indices[1])
+ self.assertEqual(None, builder._backing_indices[2])
+ self.assertEqual(16, builder._backing_indices[3].key_count())
+ # Now finish, and check we got a correctly ordered tree
+ transport = self.get_transport('')
+ size = transport.put_file('index', builder.finish())
+ index = btree_index.BTreeGraphIndex(transport, 'index', size)
+ nodes = list(index.iter_all_entries())
+ self.assertEqual(sorted(nodes), nodes)
+ self.assertEqual(16, len(nodes))
+
+ def test_spill_index_duplicate_key_caught_on_finish(self):
+ builder = btree_index.BTreeBuilder(key_elements=1, spill_at=2)
+ nodes = [node[0:2] for node in self.make_nodes(16, 1, 0)]
+ builder.add_node(*nodes[0])
+ builder.add_node(*nodes[1])
+ builder.add_node(*nodes[0])
+ self.assertRaises(errors.BadIndexDuplicateKey, builder.finish)
+
+
+class TestBTreeIndex(BTreeTestCase):
+
+ def make_index(self, ref_lists=0, key_elements=1, nodes=[]):
+ builder = btree_index.BTreeBuilder(reference_lists=ref_lists,
+ key_elements=key_elements)
+ for key, value, references in nodes:
+ builder.add_node(key, value, references)
+ stream = builder.finish()
+ trans = transport.get_transport_from_url('trace+' + self.get_url())
+ size = trans.put_file('index', stream)
+ return btree_index.BTreeGraphIndex(trans, 'index', size)
+
+ def make_index_with_offset(self, ref_lists=1, key_elements=1, nodes=[],
+ offset=0):
+ builder = btree_index.BTreeBuilder(key_elements=key_elements,
+ reference_lists=ref_lists)
+ builder.add_nodes(nodes)
+ transport = self.get_transport('')
+ # NamedTemporaryFile dies on builder.finish().read(). weird.
+ temp_file = builder.finish()
+ content = temp_file.read()
+ del temp_file
+ size = len(content)
+ transport.put_bytes('index', (' '*offset)+content)
+ return btree_index.BTreeGraphIndex(transport, 'index', size=size,
+ offset=offset)
+
+ def test_clear_cache(self):
+ nodes = self.make_nodes(160, 2, 2)
+ index = self.make_index(ref_lists=2, key_elements=2, nodes=nodes)
+ self.assertEqual(1, len(list(index.iter_entries([nodes[30][0]]))))
+ self.assertEqual([1, 4], index._row_lengths)
+ self.assertIsNot(None, index._root_node)
+ internal_node_pre_clear = index._internal_node_cache.keys()
+ self.assertTrue(len(index._leaf_node_cache) > 0)
+ index.clear_cache()
+ # We don't touch _root_node or _internal_node_cache, both should be
+ # small, and can save a round trip or two
+ self.assertIsNot(None, index._root_node)
+ # NOTE: We don't want to affect the _internal_node_cache, as we expect
+ # it will be small, and if we ever do touch this index again, it
+ # will save round-trips. This assertion isn't very strong,
+ # becuase without a 3-level index, we don't have any internal
+ # nodes cached.
+ self.assertEqual(internal_node_pre_clear,
+ index._internal_node_cache.keys())
+ self.assertEqual(0, len(index._leaf_node_cache))
+
+ def test_trivial_constructor(self):
+ t = transport.get_transport_from_url('trace+' + self.get_url(''))
+ index = btree_index.BTreeGraphIndex(t, 'index', None)
+ # Checks the page size at load, but that isn't logged yet.
+ self.assertEqual([], t._activity)
+
+ def test_with_size_constructor(self):
+ t = transport.get_transport_from_url('trace+' + self.get_url(''))
+ index = btree_index.BTreeGraphIndex(t, 'index', 1)
+ # Checks the page size at load, but that isn't logged yet.
+ self.assertEqual([], t._activity)
+
+ def test_empty_key_count_no_size(self):
+ builder = btree_index.BTreeBuilder(key_elements=1, reference_lists=0)
+ t = transport.get_transport_from_url('trace+' + self.get_url(''))
+ t.put_file('index', builder.finish())
+ index = btree_index.BTreeGraphIndex(t, 'index', None)
+ del t._activity[:]
+ self.assertEqual([], t._activity)
+ self.assertEqual(0, index.key_count())
+ # The entire index should have been requested (as we generally have the
+ # size available, and doing many small readvs is inappropriate).
+ # We can't tell how much was actually read here, but - check the code.
+ self.assertEqual([('get', 'index')], t._activity)
+
+ def test_empty_key_count(self):
+ builder = btree_index.BTreeBuilder(key_elements=1, reference_lists=0)
+ t = transport.get_transport_from_url('trace+' + self.get_url(''))
+ size = t.put_file('index', builder.finish())
+ self.assertEqual(72, size)
+ index = btree_index.BTreeGraphIndex(t, 'index', size)
+ del t._activity[:]
+ self.assertEqual([], t._activity)
+ self.assertEqual(0, index.key_count())
+ # The entire index should have been read, as 4K > size
+ self.assertEqual([('readv', 'index', [(0, 72)], False, None)],
+ t._activity)
+
+ def test_non_empty_key_count_2_2(self):
+ builder = btree_index.BTreeBuilder(key_elements=2, reference_lists=2)
+ nodes = self.make_nodes(35, 2, 2)
+ for node in nodes:
+ builder.add_node(*node)
+ t = transport.get_transport_from_url('trace+' + self.get_url(''))
+ size = t.put_file('index', builder.finish())
+ index = btree_index.BTreeGraphIndex(t, 'index', size)
+ del t._activity[:]
+ self.assertEqual([], t._activity)
+ self.assertEqual(70, index.key_count())
+ # The entire index should have been read, as it is one page long.
+ self.assertEqual([('readv', 'index', [(0, size)], False, None)],
+ t._activity)
+ self.assertEqualsApproxCompressed(1173, size)
+
+ def test_with_offset_no_size(self):
+ index = self.make_index_with_offset(key_elements=1, ref_lists=1,
+ offset=1234,
+ nodes=self.make_nodes(200, 1, 1))
+ index._size = None # throw away the size info
+ self.assertEqual(200, index.key_count())
+
+ def test_with_small_offset(self):
+ index = self.make_index_with_offset(key_elements=1, ref_lists=1,
+ offset=1234,
+ nodes=self.make_nodes(200, 1, 1))
+ self.assertEqual(200, index.key_count())
+
+ def test_with_large_offset(self):
+ index = self.make_index_with_offset(key_elements=1, ref_lists=1,
+ offset=123456,
+ nodes=self.make_nodes(200, 1, 1))
+ self.assertEqual(200, index.key_count())
+
+ def test__read_nodes_no_size_one_page_reads_once(self):
+ self.make_index(nodes=[(('key',), 'value', ())])
+ trans = transport.get_transport_from_url('trace+' + self.get_url())
+ index = btree_index.BTreeGraphIndex(trans, 'index', None)
+ del trans._activity[:]
+ nodes = dict(index._read_nodes([0]))
+ self.assertEqual([0], nodes.keys())
+ node = nodes[0]
+ self.assertEqual([('key',)], node.all_keys())
+ self.assertEqual([('get', 'index')], trans._activity)
+
+ def test__read_nodes_no_size_multiple_pages(self):
+ index = self.make_index(2, 2, nodes=self.make_nodes(160, 2, 2))
+ index.key_count()
+ num_pages = index._row_offsets[-1]
+ # Reopen with a traced transport and no size
+ trans = transport.get_transport_from_url('trace+' + self.get_url())
+ index = btree_index.BTreeGraphIndex(trans, 'index', None)
+ del trans._activity[:]
+ nodes = dict(index._read_nodes([0]))
+ self.assertEqual(range(num_pages), nodes.keys())
+
+ def test_2_levels_key_count_2_2(self):
+ builder = btree_index.BTreeBuilder(key_elements=2, reference_lists=2)
+ nodes = self.make_nodes(160, 2, 2)
+ for node in nodes:
+ builder.add_node(*node)
+ t = transport.get_transport_from_url('trace+' + self.get_url(''))
+ size = t.put_file('index', builder.finish())
+ self.assertEqualsApproxCompressed(17692, size)
+ index = btree_index.BTreeGraphIndex(t, 'index', size)
+ del t._activity[:]
+ self.assertEqual([], t._activity)
+ self.assertEqual(320, index.key_count())
+ # The entire index should not have been read.
+ self.assertEqual([('readv', 'index', [(0, 4096)], False, None)],
+ t._activity)
+
+ def test_validate_one_page(self):
+ builder = btree_index.BTreeBuilder(key_elements=2, reference_lists=2)
+ nodes = self.make_nodes(45, 2, 2)
+ for node in nodes:
+ builder.add_node(*node)
+ t = transport.get_transport_from_url('trace+' + self.get_url(''))
+ size = t.put_file('index', builder.finish())
+ index = btree_index.BTreeGraphIndex(t, 'index', size)
+ del t._activity[:]
+ self.assertEqual([], t._activity)
+ index.validate()
+ # The entire index should have been read linearly.
+ self.assertEqual([('readv', 'index', [(0, size)], False, None)],
+ t._activity)
+ self.assertEqualsApproxCompressed(1488, size)
+
+ def test_validate_two_pages(self):
+ builder = btree_index.BTreeBuilder(key_elements=2, reference_lists=2)
+ nodes = self.make_nodes(80, 2, 2)
+ for node in nodes:
+ builder.add_node(*node)
+ t = transport.get_transport_from_url('trace+' + self.get_url(''))
+ size = t.put_file('index', builder.finish())
+ # Root page, 2 leaf pages
+ self.assertEqualsApproxCompressed(9339, size)
+ index = btree_index.BTreeGraphIndex(t, 'index', size)
+ del t._activity[:]
+ self.assertEqual([], t._activity)
+ index.validate()
+ rem = size - 8192 # Number of remaining bytes after second block
+ # The entire index should have been read linearly.
+ self.assertEqual(
+ [('readv', 'index', [(0, 4096)], False, None),
+ ('readv', 'index', [(4096, 4096), (8192, rem)], False, None)],
+ t._activity)
+ # XXX: TODO: write some badly-ordered nodes, and some pointers-to-wrong
+ # node and make validate find them.
+
+ def test_eq_ne(self):
+ # two indices are equal when constructed with the same parameters:
+ t1 = transport.get_transport_from_url('trace+' + self.get_url(''))
+ t2 = self.get_transport()
+ self.assertTrue(
+ btree_index.BTreeGraphIndex(t1, 'index', None) ==
+ btree_index.BTreeGraphIndex(t1, 'index', None))
+ self.assertTrue(
+ btree_index.BTreeGraphIndex(t1, 'index', 20) ==
+ btree_index.BTreeGraphIndex(t1, 'index', 20))
+ self.assertFalse(
+ btree_index.BTreeGraphIndex(t1, 'index', 20) ==
+ btree_index.BTreeGraphIndex(t2, 'index', 20))
+ self.assertFalse(
+ btree_index.BTreeGraphIndex(t1, 'inde1', 20) ==
+ btree_index.BTreeGraphIndex(t1, 'inde2', 20))
+ self.assertFalse(
+ btree_index.BTreeGraphIndex(t1, 'index', 10) ==
+ btree_index.BTreeGraphIndex(t1, 'index', 20))
+ self.assertFalse(
+ btree_index.BTreeGraphIndex(t1, 'index', None) !=
+ btree_index.BTreeGraphIndex(t1, 'index', None))
+ self.assertFalse(
+ btree_index.BTreeGraphIndex(t1, 'index', 20) !=
+ btree_index.BTreeGraphIndex(t1, 'index', 20))
+ self.assertTrue(
+ btree_index.BTreeGraphIndex(t1, 'index', 20) !=
+ btree_index.BTreeGraphIndex(t2, 'index', 20))
+ self.assertTrue(
+ btree_index.BTreeGraphIndex(t1, 'inde1', 20) !=
+ btree_index.BTreeGraphIndex(t1, 'inde2', 20))
+ self.assertTrue(
+ btree_index.BTreeGraphIndex(t1, 'index', 10) !=
+ btree_index.BTreeGraphIndex(t1, 'index', 20))
+
+ def test_key_too_big(self):
+ # the size that matters here is the _compressed_ size of the key, so we can't
+ # do a simple character repeat.
+ bigKey = ''.join(map(repr, xrange(btree_index._PAGE_SIZE)))
+ self.assertRaises(errors.BadIndexKey,
+ self.make_index,
+ nodes=[((bigKey,), 'value', ())])
+
+ def test_iter_all_only_root_no_size(self):
+ self.make_index(nodes=[(('key',), 'value', ())])
+ t = transport.get_transport_from_url('trace+' + self.get_url(''))
+ index = btree_index.BTreeGraphIndex(t, 'index', None)
+ del t._activity[:]
+ self.assertEqual([(('key',), 'value')],
+ [x[1:] for x in index.iter_all_entries()])
+ self.assertEqual([('get', 'index')], t._activity)
+
+ def test_iter_all_entries_reads(self):
+ # iterating all entries reads the header, then does a linear
+ # read.
+ self.shrink_page_size()
+ builder = btree_index.BTreeBuilder(key_elements=2, reference_lists=2)
+ # 20k nodes is enough to create a two internal nodes on the second
+ # level, with a 2K page size
+ nodes = self.make_nodes(10000, 2, 2)
+ for node in nodes:
+ builder.add_node(*node)
+ t = transport.get_transport_from_url('trace+' + self.get_url(''))
+ size = t.put_file('index', builder.finish())
+ page_size = btree_index._PAGE_SIZE
+ del builder
+ index = btree_index.BTreeGraphIndex(t, 'index', size)
+ del t._activity[:]
+ self.assertEqual([], t._activity)
+ found_nodes = self.time(list, index.iter_all_entries())
+ bare_nodes = []
+ for node in found_nodes:
+ self.assertTrue(node[0] is index)
+ bare_nodes.append(node[1:])
+ self.assertEqual(3, len(index._row_lengths),
+ "Not enough rows: %r" % index._row_lengths)
+ # Should be as long as the nodes we supplied
+ self.assertEqual(20000, len(found_nodes))
+ # Should have the same content
+ self.assertEqual(set(nodes), set(bare_nodes))
+ # Should have done linear scan IO up the index, ignoring
+ # the internal nodes:
+ # The entire index should have been read
+ total_pages = sum(index._row_lengths)
+ self.assertEqual(total_pages, index._row_offsets[-1])
+ self.assertEqualsApproxCompressed(1303220, size)
+ # The start of the leaves
+ first_byte = index._row_offsets[-2] * page_size
+ readv_request = []
+ for offset in range(first_byte, size, page_size):
+ readv_request.append((offset, page_size))
+ # The last page is truncated
+ readv_request[-1] = (readv_request[-1][0], size % page_size)
+ expected = [('readv', 'index', [(0, page_size)], False, None),
+ ('readv', 'index', readv_request, False, None)]
+ if expected != t._activity:
+ self.assertEqualDiff(pprint.pformat(expected),
+ pprint.pformat(t._activity))
+
+ def _test_iter_entries_references_resolved(self):
+ index = self.make_index(1, nodes=[
+ (('name', ), 'data', ([('ref', ), ('ref', )], )),
+ (('ref', ), 'refdata', ([], ))])
+ self.assertEqual(set([(index, ('name', ), 'data', ((('ref',),('ref',)),)),
+ (index, ('ref', ), 'refdata', ((), ))]),
+ set(index.iter_entries([('name',), ('ref',)])))
+
+ def test_iter_entries_references_2_refs_resolved(self):
+ # iterating some entries reads just the pages needed. For now, to
+ # get it working and start measuring, only 4K pages are read.
+ builder = btree_index.BTreeBuilder(key_elements=2, reference_lists=2)
+ # 80 nodes is enough to create a two-level index.
+ nodes = self.make_nodes(160, 2, 2)
+ for node in nodes:
+ builder.add_node(*node)
+ t = transport.get_transport_from_url('trace+' + self.get_url(''))
+ size = t.put_file('index', builder.finish())
+ del builder
+ index = btree_index.BTreeGraphIndex(t, 'index', size)
+ del t._activity[:]
+ self.assertEqual([], t._activity)
+ # search for one key
+ found_nodes = list(index.iter_entries([nodes[30][0]]))
+ bare_nodes = []
+ for node in found_nodes:
+ self.assertTrue(node[0] is index)
+ bare_nodes.append(node[1:])
+ # Should be as long as the nodes we supplied
+ self.assertEqual(1, len(found_nodes))
+ # Should have the same content
+ self.assertEqual(nodes[30], bare_nodes[0])
+ # Should have read the root node, then one leaf page:
+ self.assertEqual([('readv', 'index', [(0, 4096)], False, None),
+ ('readv', 'index', [(8192, 4096), ], False, None)],
+ t._activity)
+
+ def test_iter_key_prefix_1_element_key_None(self):
+ index = self.make_index()
+ self.assertRaises(errors.BadIndexKey, list,
+ index.iter_entries_prefix([(None, )]))
+
+ def test_iter_key_prefix_wrong_length(self):
+ index = self.make_index()
+ self.assertRaises(errors.BadIndexKey, list,
+ index.iter_entries_prefix([('foo', None)]))
+ index = self.make_index(key_elements=2)
+ self.assertRaises(errors.BadIndexKey, list,
+ index.iter_entries_prefix([('foo', )]))
+ self.assertRaises(errors.BadIndexKey, list,
+ index.iter_entries_prefix([('foo', None, None)]))
+
+ def test_iter_key_prefix_1_key_element_no_refs(self):
+ index = self.make_index( nodes=[
+ (('name', ), 'data', ()),
+ (('ref', ), 'refdata', ())])
+ self.assertEqual(set([(index, ('name', ), 'data'),
+ (index, ('ref', ), 'refdata')]),
+ set(index.iter_entries_prefix([('name', ), ('ref', )])))
+
+ def test_iter_key_prefix_1_key_element_refs(self):
+ index = self.make_index(1, nodes=[
+ (('name', ), 'data', ([('ref', )], )),
+ (('ref', ), 'refdata', ([], ))])
+ self.assertEqual(set([(index, ('name', ), 'data', ((('ref',),),)),
+ (index, ('ref', ), 'refdata', ((), ))]),
+ set(index.iter_entries_prefix([('name', ), ('ref', )])))
+
+ def test_iter_key_prefix_2_key_element_no_refs(self):
+ index = self.make_index(key_elements=2, nodes=[
+ (('name', 'fin1'), 'data', ()),
+ (('name', 'fin2'), 'beta', ()),
+ (('ref', 'erence'), 'refdata', ())])
+ self.assertEqual(set([(index, ('name', 'fin1'), 'data'),
+ (index, ('ref', 'erence'), 'refdata')]),
+ set(index.iter_entries_prefix([('name', 'fin1'), ('ref', 'erence')])))
+ self.assertEqual(set([(index, ('name', 'fin1'), 'data'),
+ (index, ('name', 'fin2'), 'beta')]),
+ set(index.iter_entries_prefix([('name', None)])))
+
+ def test_iter_key_prefix_2_key_element_refs(self):
+ index = self.make_index(1, key_elements=2, nodes=[
+ (('name', 'fin1'), 'data', ([('ref', 'erence')], )),
+ (('name', 'fin2'), 'beta', ([], )),
+ (('ref', 'erence'), 'refdata', ([], ))])
+ self.assertEqual(set([(index, ('name', 'fin1'), 'data', ((('ref', 'erence'),),)),
+ (index, ('ref', 'erence'), 'refdata', ((), ))]),
+ set(index.iter_entries_prefix([('name', 'fin1'), ('ref', 'erence')])))
+ self.assertEqual(set([(index, ('name', 'fin1'), 'data', ((('ref', 'erence'),),)),
+ (index, ('name', 'fin2'), 'beta', ((), ))]),
+ set(index.iter_entries_prefix([('name', None)])))
+
+ # XXX: external_references tests are duplicated in test_index. We
+ # probably should have per_graph_index tests...
+ def test_external_references_no_refs(self):
+ index = self.make_index(ref_lists=0, nodes=[])
+ self.assertRaises(ValueError, index.external_references, 0)
+
+ def test_external_references_no_results(self):
+ index = self.make_index(ref_lists=1, nodes=[
+ (('key',), 'value', ([],))])
+ self.assertEqual(set(), index.external_references(0))
+
+ def test_external_references_missing_ref(self):
+ missing_key = ('missing',)
+ index = self.make_index(ref_lists=1, nodes=[
+ (('key',), 'value', ([missing_key],))])
+ self.assertEqual(set([missing_key]), index.external_references(0))
+
+ def test_external_references_multiple_ref_lists(self):
+ missing_key = ('missing',)
+ index = self.make_index(ref_lists=2, nodes=[
+ (('key',), 'value', ([], [missing_key]))])
+ self.assertEqual(set([]), index.external_references(0))
+ self.assertEqual(set([missing_key]), index.external_references(1))
+
+ def test_external_references_two_records(self):
+ index = self.make_index(ref_lists=1, nodes=[
+ (('key-1',), 'value', ([('key-2',)],)),
+ (('key-2',), 'value', ([],)),
+ ])
+ self.assertEqual(set([]), index.external_references(0))
+
+ def test__find_ancestors_one_page(self):
+ key1 = ('key-1',)
+ key2 = ('key-2',)
+ index = self.make_index(ref_lists=1, key_elements=1, nodes=[
+ (key1, 'value', ([key2],)),
+ (key2, 'value', ([],)),
+ ])
+ parent_map = {}
+ missing_keys = set()
+ search_keys = index._find_ancestors([key1], 0, parent_map, missing_keys)
+ self.assertEqual({key1: (key2,), key2: ()}, parent_map)
+ self.assertEqual(set(), missing_keys)
+ self.assertEqual(set(), search_keys)
+
+ def test__find_ancestors_one_page_w_missing(self):
+ key1 = ('key-1',)
+ key2 = ('key-2',)
+ key3 = ('key-3',)
+ index = self.make_index(ref_lists=1, key_elements=1, nodes=[
+ (key1, 'value', ([key2],)),
+ (key2, 'value', ([],)),
+ ])
+ parent_map = {}
+ missing_keys = set()
+ search_keys = index._find_ancestors([key2, key3], 0, parent_map,
+ missing_keys)
+ self.assertEqual({key2: ()}, parent_map)
+ # we know that key3 is missing because we read the page that it would
+ # otherwise be on
+ self.assertEqual(set([key3]), missing_keys)
+ self.assertEqual(set(), search_keys)
+
+ def test__find_ancestors_one_parent_missing(self):
+ key1 = ('key-1',)
+ key2 = ('key-2',)
+ key3 = ('key-3',)
+ index = self.make_index(ref_lists=1, key_elements=1, nodes=[
+ (key1, 'value', ([key2],)),
+ (key2, 'value', ([key3],)),
+ ])
+ parent_map = {}
+ missing_keys = set()
+ search_keys = index._find_ancestors([key1], 0, parent_map,
+ missing_keys)
+ self.assertEqual({key1: (key2,), key2: (key3,)}, parent_map)
+ self.assertEqual(set(), missing_keys)
+ # all we know is that key3 wasn't present on the page we were reading
+ # but if you look, the last key is key2 which comes before key3, so we
+ # don't know whether key3 would land on this page or not.
+ self.assertEqual(set([key3]), search_keys)
+ search_keys = index._find_ancestors(search_keys, 0, parent_map,
+ missing_keys)
+ # passing it back in, we are sure it is 'missing'
+ self.assertEqual({key1: (key2,), key2: (key3,)}, parent_map)
+ self.assertEqual(set([key3]), missing_keys)
+ self.assertEqual(set([]), search_keys)
+
+ def test__find_ancestors_dont_search_known(self):
+ key1 = ('key-1',)
+ key2 = ('key-2',)
+ key3 = ('key-3',)
+ index = self.make_index(ref_lists=1, key_elements=1, nodes=[
+ (key1, 'value', ([key2],)),
+ (key2, 'value', ([key3],)),
+ (key3, 'value', ([],)),
+ ])
+ # We already know about key2, so we won't try to search for key3
+ parent_map = {key2: (key3,)}
+ missing_keys = set()
+ search_keys = index._find_ancestors([key1], 0, parent_map,
+ missing_keys)
+ self.assertEqual({key1: (key2,), key2: (key3,)}, parent_map)
+ self.assertEqual(set(), missing_keys)
+ self.assertEqual(set(), search_keys)
+
+ def test__find_ancestors_multiple_pages(self):
+ # We need to use enough keys that we actually cause a split
+ start_time = 1249671539
+ email = "joebob@example.com"
+ nodes = []
+ ref_lists = ((),)
+ rev_keys = []
+ for i in xrange(400):
+ rev_id = '%s-%s-%s' % (email,
+ osutils.compact_date(start_time + i),
+ osutils.rand_chars(16))
+ rev_key = (rev_id,)
+ nodes.append((rev_key, 'value', ref_lists))
+ # We have a ref 'list' of length 1, with a list of parents, with 1
+ # parent which is a key
+ ref_lists = ((rev_key,),)
+ rev_keys.append(rev_key)
+ index = self.make_index(ref_lists=1, key_elements=1, nodes=nodes)
+ self.assertEqual(400, index.key_count())
+ self.assertEqual(3, len(index._row_offsets))
+ nodes = dict(index._read_nodes([1, 2]))
+ l1 = nodes[1]
+ l2 = nodes[2]
+ min_l2_key = l2.min_key
+ max_l1_key = l1.max_key
+ self.assertTrue(max_l1_key < min_l2_key)
+ parents_min_l2_key = l2[min_l2_key][1][0]
+ self.assertEqual((l1.max_key,), parents_min_l2_key)
+ # Now, whatever key we select that would fall on the second page,
+ # should give us all the parents until the page break
+ key_idx = rev_keys.index(min_l2_key)
+ next_key = rev_keys[key_idx+1]
+ # So now when we get the parent map, we should get the key we are
+ # looking for, min_l2_key, and then a reference to go look for the
+ # parent of that key
+ parent_map = {}
+ missing_keys = set()
+ search_keys = index._find_ancestors([next_key], 0, parent_map,
+ missing_keys)
+ self.assertEqual([min_l2_key, next_key], sorted(parent_map))
+ self.assertEqual(set(), missing_keys)
+ self.assertEqual(set([max_l1_key]), search_keys)
+ parent_map = {}
+ search_keys = index._find_ancestors([max_l1_key], 0, parent_map,
+ missing_keys)
+ self.assertEqual(l1.all_keys(), sorted(parent_map))
+ self.assertEqual(set(), missing_keys)
+ self.assertEqual(set(), search_keys)
+
+ def test__find_ancestors_empty_index(self):
+ index = self.make_index(ref_lists=1, key_elements=1, nodes=[])
+ parent_map = {}
+ missing_keys = set()
+ search_keys = index._find_ancestors([('one',), ('two',)], 0, parent_map,
+ missing_keys)
+ self.assertEqual(set(), search_keys)
+ self.assertEqual({}, parent_map)
+ self.assertEqual(set([('one',), ('two',)]), missing_keys)
+
+ def test_supports_unlimited_cache(self):
+ builder = btree_index.BTreeBuilder(reference_lists=0, key_elements=1)
+ # We need enough nodes to cause a page split (so we have both an
+ # internal node and a couple leaf nodes. 500 seems to be enough.)
+ nodes = self.make_nodes(500, 1, 0)
+ for node in nodes:
+ builder.add_node(*node)
+ stream = builder.finish()
+ trans = self.get_transport()
+ size = trans.put_file('index', stream)
+ index = btree_index.BTreeGraphIndex(trans, 'index', size)
+ self.assertEqual(500, index.key_count())
+ # We have an internal node
+ self.assertEqual(2, len(index._row_lengths))
+ # We have at least 2 leaf nodes
+ self.assertTrue(index._row_lengths[-1] >= 2)
+ self.assertIsInstance(index._leaf_node_cache, lru_cache.LRUCache)
+ self.assertEqual(btree_index._NODE_CACHE_SIZE,
+ index._leaf_node_cache._max_cache)
+ self.assertIsInstance(index._internal_node_cache, fifo_cache.FIFOCache)
+ self.assertEqual(100, index._internal_node_cache._max_cache)
+ # No change if unlimited_cache=False is passed
+ index = btree_index.BTreeGraphIndex(trans, 'index', size,
+ unlimited_cache=False)
+ self.assertIsInstance(index._leaf_node_cache, lru_cache.LRUCache)
+ self.assertEqual(btree_index._NODE_CACHE_SIZE,
+ index._leaf_node_cache._max_cache)
+ self.assertIsInstance(index._internal_node_cache, fifo_cache.FIFOCache)
+ self.assertEqual(100, index._internal_node_cache._max_cache)
+ index = btree_index.BTreeGraphIndex(trans, 'index', size,
+ unlimited_cache=True)
+ self.assertIsInstance(index._leaf_node_cache, dict)
+ self.assertIs(type(index._internal_node_cache), dict)
+ # Exercise the lookup code
+ entries = set(index.iter_entries([n[0] for n in nodes]))
+ self.assertEqual(500, len(entries))
+
+
+class TestBTreeNodes(BTreeTestCase):
+
+ scenarios = btreeparser_scenarios()
+
+ def setUp(self):
+ BTreeTestCase.setUp(self)
+ self.overrideAttr(btree_index, '_btree_serializer', self.parse_btree)
+
+ def test_LeafNode_1_0(self):
+ node_bytes = ("type=leaf\n"
+ "0000000000000000000000000000000000000000\x00\x00value:0\n"
+ "1111111111111111111111111111111111111111\x00\x00value:1\n"
+ "2222222222222222222222222222222222222222\x00\x00value:2\n"
+ "3333333333333333333333333333333333333333\x00\x00value:3\n"
+ "4444444444444444444444444444444444444444\x00\x00value:4\n")
+ node = btree_index._LeafNode(node_bytes, 1, 0)
+ # We do direct access, or don't care about order, to leaf nodes most of
+ # the time, so a dict is useful:
+ self.assertEqual({
+ ("0000000000000000000000000000000000000000",): ("value:0", ()),
+ ("1111111111111111111111111111111111111111",): ("value:1", ()),
+ ("2222222222222222222222222222222222222222",): ("value:2", ()),
+ ("3333333333333333333333333333333333333333",): ("value:3", ()),
+ ("4444444444444444444444444444444444444444",): ("value:4", ()),
+ }, dict(node.all_items()))
+
+ def test_LeafNode_2_2(self):
+ node_bytes = ("type=leaf\n"
+ "00\x0000\x00\t00\x00ref00\x00value:0\n"
+ "00\x0011\x0000\x00ref00\t00\x00ref00\r01\x00ref01\x00value:1\n"
+ "11\x0033\x0011\x00ref22\t11\x00ref22\r11\x00ref22\x00value:3\n"
+ "11\x0044\x00\t11\x00ref00\x00value:4\n"
+ ""
+ )
+ node = btree_index._LeafNode(node_bytes, 2, 2)
+ # We do direct access, or don't care about order, to leaf nodes most of
+ # the time, so a dict is useful:
+ self.assertEqual({
+ ('00', '00'): ('value:0', ((), (('00', 'ref00'),))),
+ ('00', '11'): ('value:1',
+ ((('00', 'ref00'),), (('00', 'ref00'), ('01', 'ref01')))),
+ ('11', '33'): ('value:3',
+ ((('11', 'ref22'),), (('11', 'ref22'), ('11', 'ref22')))),
+ ('11', '44'): ('value:4', ((), (('11', 'ref00'),)))
+ }, dict(node.all_items()))
+
+ def test_InternalNode_1(self):
+ node_bytes = ("type=internal\n"
+ "offset=1\n"
+ "0000000000000000000000000000000000000000\n"
+ "1111111111111111111111111111111111111111\n"
+ "2222222222222222222222222222222222222222\n"
+ "3333333333333333333333333333333333333333\n"
+ "4444444444444444444444444444444444444444\n"
+ )
+ node = btree_index._InternalNode(node_bytes)
+ # We want to bisect to find the right children from this node, so a
+ # vector is most useful.
+ self.assertEqual([
+ ("0000000000000000000000000000000000000000",),
+ ("1111111111111111111111111111111111111111",),
+ ("2222222222222222222222222222222222222222",),
+ ("3333333333333333333333333333333333333333",),
+ ("4444444444444444444444444444444444444444",),
+ ], node.keys)
+ self.assertEqual(1, node.offset)
+
+ def test_LeafNode_2_2(self):
+ node_bytes = ("type=leaf\n"
+ "00\x0000\x00\t00\x00ref00\x00value:0\n"
+ "00\x0011\x0000\x00ref00\t00\x00ref00\r01\x00ref01\x00value:1\n"
+ "11\x0033\x0011\x00ref22\t11\x00ref22\r11\x00ref22\x00value:3\n"
+ "11\x0044\x00\t11\x00ref00\x00value:4\n"
+ ""
+ )
+ node = btree_index._LeafNode(node_bytes, 2, 2)
+ # We do direct access, or don't care about order, to leaf nodes most of
+ # the time, so a dict is useful:
+ self.assertEqual({
+ ('00', '00'): ('value:0', ((), (('00', 'ref00'),))),
+ ('00', '11'): ('value:1',
+ ((('00', 'ref00'),), (('00', 'ref00'), ('01', 'ref01')))),
+ ('11', '33'): ('value:3',
+ ((('11', 'ref22'),), (('11', 'ref22'), ('11', 'ref22')))),
+ ('11', '44'): ('value:4', ((), (('11', 'ref00'),)))
+ }, dict(node.all_items()))
+
+ def assertFlattened(self, expected, key, value, refs):
+ flat_key, flat_line = self.parse_btree._flatten_node(
+ (None, key, value, refs), bool(refs))
+ self.assertEqual('\x00'.join(key), flat_key)
+ self.assertEqual(expected, flat_line)
+
+ def test__flatten_node(self):
+ self.assertFlattened('key\0\0value\n', ('key',), 'value', [])
+ self.assertFlattened('key\0tuple\0\0value str\n',
+ ('key', 'tuple'), 'value str', [])
+ self.assertFlattened('key\0tuple\0triple\0\0value str\n',
+ ('key', 'tuple', 'triple'), 'value str', [])
+ self.assertFlattened('k\0t\0s\0ref\0value str\n',
+ ('k', 't', 's'), 'value str', [[('ref',)]])
+ self.assertFlattened('key\0tuple\0ref\0key\0value str\n',
+ ('key', 'tuple'), 'value str', [[('ref', 'key')]])
+ self.assertFlattened("00\x0000\x00\t00\x00ref00\x00value:0\n",
+ ('00', '00'), 'value:0', ((), (('00', 'ref00'),)))
+ self.assertFlattened(
+ "00\x0011\x0000\x00ref00\t00\x00ref00\r01\x00ref01\x00value:1\n",
+ ('00', '11'), 'value:1',
+ ((('00', 'ref00'),), (('00', 'ref00'), ('01', 'ref01'))))
+ self.assertFlattened(
+ "11\x0033\x0011\x00ref22\t11\x00ref22\r11\x00ref22\x00value:3\n",
+ ('11', '33'), 'value:3',
+ ((('11', 'ref22'),), (('11', 'ref22'), ('11', 'ref22'))))
+ self.assertFlattened(
+ "11\x0044\x00\t11\x00ref00\x00value:4\n",
+ ('11', '44'), 'value:4', ((), (('11', 'ref00'),)))
+
+
+class TestCompiledBtree(tests.TestCase):
+
+ def test_exists(self):
+ # This is just to let the user know if they don't have the feature
+ # available
+ self.requireFeature(compiled_btreeparser_feature)
+
+
+class TestMultiBisectRight(tests.TestCase):
+
+ def assertMultiBisectRight(self, offsets, search_keys, fixed_keys):
+ self.assertEqual(offsets,
+ btree_index.BTreeGraphIndex._multi_bisect_right(
+ search_keys, fixed_keys))
+
+ def test_after(self):
+ self.assertMultiBisectRight([(1, ['b'])], ['b'], ['a'])
+ self.assertMultiBisectRight([(3, ['e', 'f', 'g'])],
+ ['e', 'f', 'g'], ['a', 'b', 'c'])
+
+ def test_before(self):
+ self.assertMultiBisectRight([(0, ['a'])], ['a'], ['b'])
+ self.assertMultiBisectRight([(0, ['a', 'b', 'c', 'd'])],
+ ['a', 'b', 'c', 'd'], ['e', 'f', 'g'])
+
+ def test_exact(self):
+ self.assertMultiBisectRight([(1, ['a'])], ['a'], ['a'])
+ self.assertMultiBisectRight([(1, ['a']), (2, ['b'])], ['a', 'b'], ['a', 'b'])
+ self.assertMultiBisectRight([(1, ['a']), (3, ['c'])],
+ ['a', 'c'], ['a', 'b', 'c'])
+
+ def test_inbetween(self):
+ self.assertMultiBisectRight([(1, ['b'])], ['b'], ['a', 'c'])
+ self.assertMultiBisectRight([(1, ['b', 'c', 'd']), (2, ['f', 'g'])],
+ ['b', 'c', 'd', 'f', 'g'], ['a', 'e', 'h'])
+
+ def test_mixed(self):
+ self.assertMultiBisectRight([(0, ['a', 'b']), (2, ['d', 'e']),
+ (4, ['g', 'h'])],
+ ['a', 'b', 'd', 'e', 'g', 'h'],
+ ['c', 'd', 'f', 'g'])
+
+
+class TestExpandOffsets(tests.TestCase):
+
+ def make_index(self, size, recommended_pages=None):
+ """Make an index with a generic size.
+
+ This doesn't actually create anything on disk, it just primes a
+ BTreeGraphIndex with the recommended information.
+ """
+ index = btree_index.BTreeGraphIndex(
+ transport.get_transport_from_url('memory:///'),
+ 'test-index', size=size)
+ if recommended_pages is not None:
+ index._recommended_pages = recommended_pages
+ return index
+
+ def set_cached_offsets(self, index, cached_offsets):
+ """Monkeypatch to give a canned answer for _get_offsets_for...()."""
+ def _get_offsets_to_cached_pages():
+ cached = set(cached_offsets)
+ return cached
+ index._get_offsets_to_cached_pages = _get_offsets_to_cached_pages
+
+ def prepare_index(self, index, node_ref_lists, key_length, key_count,
+ row_lengths, cached_offsets):
+ """Setup the BTreeGraphIndex with some pre-canned information."""
+ index.node_ref_lists = node_ref_lists
+ index._key_length = key_length
+ index._key_count = key_count
+ index._row_lengths = row_lengths
+ index._compute_row_offsets()
+ index._root_node = btree_index._InternalNode('internal\noffset=0\n')
+ self.set_cached_offsets(index, cached_offsets)
+
+ def make_100_node_index(self):
+ index = self.make_index(4096*100, 6)
+ # Consider we've already made a single request at the middle
+ self.prepare_index(index, node_ref_lists=0, key_length=1,
+ key_count=1000, row_lengths=[1, 99],
+ cached_offsets=[0, 50])
+ return index
+
+ def make_1000_node_index(self):
+ index = self.make_index(4096*1000, 6)
+ # Pretend we've already made a single request in the middle
+ self.prepare_index(index, node_ref_lists=0, key_length=1,
+ key_count=90000, row_lengths=[1, 9, 990],
+ cached_offsets=[0, 5, 500])
+ return index
+
+ def assertNumPages(self, expected_pages, index, size):
+ index._size = size
+ self.assertEqual(expected_pages, index._compute_total_pages_in_index())
+
+ def assertExpandOffsets(self, expected, index, offsets):
+ self.assertEqual(expected, index._expand_offsets(offsets),
+ 'We did not get the expected value after expanding'
+ ' %s' % (offsets,))
+
+ def test_default_recommended_pages(self):
+ index = self.make_index(None)
+ # local transport recommends 4096 byte reads, which is 1 page
+ self.assertEqual(1, index._recommended_pages)
+
+ def test__compute_total_pages_in_index(self):
+ index = self.make_index(None)
+ self.assertNumPages(1, index, 1024)
+ self.assertNumPages(1, index, 4095)
+ self.assertNumPages(1, index, 4096)
+ self.assertNumPages(2, index, 4097)
+ self.assertNumPages(2, index, 8192)
+ self.assertNumPages(76, index, 4096*75 + 10)
+
+ def test__find_layer_start_and_stop(self):
+ index = self.make_1000_node_index()
+ self.assertEqual((0, 1), index._find_layer_first_and_end(0))
+ self.assertEqual((1, 10), index._find_layer_first_and_end(1))
+ self.assertEqual((1, 10), index._find_layer_first_and_end(9))
+ self.assertEqual((10, 1000), index._find_layer_first_and_end(10))
+ self.assertEqual((10, 1000), index._find_layer_first_and_end(99))
+ self.assertEqual((10, 1000), index._find_layer_first_and_end(999))
+
+ def test_unknown_size(self):
+ # We should not expand if we don't know the file size
+ index = self.make_index(None, 10)
+ self.assertExpandOffsets([0], index, [0])
+ self.assertExpandOffsets([1, 4, 9], index, [1, 4, 9])
+
+ def test_more_than_recommended(self):
+ index = self.make_index(4096*100, 2)
+ self.assertExpandOffsets([1, 10], index, [1, 10])
+ self.assertExpandOffsets([1, 10, 20], index, [1, 10, 20])
+
+ def test_read_all_from_root(self):
+ index = self.make_index(4096*10, 20)
+ self.assertExpandOffsets(range(10), index, [0])
+
+ def test_read_all_when_cached(self):
+ # We've read enough that we can grab all the rest in a single request
+ index = self.make_index(4096*10, 5)
+ self.prepare_index(index, node_ref_lists=0, key_length=1,
+ key_count=1000, row_lengths=[1, 9],
+ cached_offsets=[0, 1, 2, 5, 6])
+ # It should fill the remaining nodes, regardless of the one requested
+ self.assertExpandOffsets([3, 4, 7, 8, 9], index, [3])
+ self.assertExpandOffsets([3, 4, 7, 8, 9], index, [8])
+ self.assertExpandOffsets([3, 4, 7, 8, 9], index, [9])
+
+ def test_no_root_node(self):
+ index = self.make_index(4096*10, 5)
+ self.assertExpandOffsets([0], index, [0])
+
+ def test_include_neighbors(self):
+ index = self.make_100_node_index()
+ # We expand in both directions, until we have at least 'recommended'
+ # pages
+ self.assertExpandOffsets([9, 10, 11, 12, 13, 14, 15], index, [12])
+ self.assertExpandOffsets([88, 89, 90, 91, 92, 93, 94], index, [91])
+ # If we hit an 'edge' we continue in the other direction
+ self.assertExpandOffsets([1, 2, 3, 4, 5, 6], index, [2])
+ self.assertExpandOffsets([94, 95, 96, 97, 98, 99], index, [98])
+
+ # Requesting many nodes will expand all locations equally
+ self.assertExpandOffsets([1, 2, 3, 80, 81, 82], index, [2, 81])
+ self.assertExpandOffsets([1, 2, 3, 9, 10, 11, 80, 81, 82], index,
+ [2, 10, 81])
+
+ def test_stop_at_cached(self):
+ index = self.make_100_node_index()
+ self.set_cached_offsets(index, [0, 10, 19])
+ self.assertExpandOffsets([11, 12, 13, 14, 15, 16], index, [11])
+ self.assertExpandOffsets([11, 12, 13, 14, 15, 16], index, [12])
+ self.assertExpandOffsets([12, 13, 14, 15, 16, 17, 18], index, [15])
+ self.assertExpandOffsets([13, 14, 15, 16, 17, 18], index, [16])
+ self.assertExpandOffsets([13, 14, 15, 16, 17, 18], index, [17])
+ self.assertExpandOffsets([13, 14, 15, 16, 17, 18], index, [18])
+
+ def test_cannot_fully_expand(self):
+ index = self.make_100_node_index()
+ self.set_cached_offsets(index, [0, 10, 12])
+ # We don't go into an endless loop if we are bound by cached nodes
+ self.assertExpandOffsets([11], index, [11])
+
+ def test_overlap(self):
+ index = self.make_100_node_index()
+ self.assertExpandOffsets([10, 11, 12, 13, 14, 15], index, [12, 13])
+ self.assertExpandOffsets([10, 11, 12, 13, 14, 15], index, [11, 14])
+
+ def test_stay_within_layer(self):
+ index = self.make_1000_node_index()
+ # When expanding a request, we won't read nodes from the next layer
+ self.assertExpandOffsets([1, 2, 3, 4], index, [2])
+ self.assertExpandOffsets([6, 7, 8, 9], index, [6])
+ self.assertExpandOffsets([6, 7, 8, 9], index, [9])
+ self.assertExpandOffsets([10, 11, 12, 13, 14, 15], index, [10])
+ self.assertExpandOffsets([10, 11, 12, 13, 14, 15, 16], index, [13])
+
+ self.set_cached_offsets(index, [0, 4, 12])
+ self.assertExpandOffsets([5, 6, 7, 8, 9], index, [7])
+ self.assertExpandOffsets([10, 11], index, [11])
+
+ def test_small_requests_unexpanded(self):
+ index = self.make_100_node_index()
+ self.set_cached_offsets(index, [0])
+ self.assertExpandOffsets([1], index, [1])
+ self.assertExpandOffsets([50], index, [50])
+ # If we request more than one node, then we'll expand
+ self.assertExpandOffsets([49, 50, 51, 59, 60, 61], index, [50, 60])
+
+ # The first pass does not expand
+ index = self.make_1000_node_index()
+ self.set_cached_offsets(index, [0])
+ self.assertExpandOffsets([1], index, [1])
+ self.set_cached_offsets(index, [0, 1])
+ self.assertExpandOffsets([100], index, [100])
+ self.set_cached_offsets(index, [0, 1, 100])
+ # But after the first depth, we will expand
+ self.assertExpandOffsets([2, 3, 4, 5, 6, 7], index, [2])
+ self.assertExpandOffsets([2, 3, 4, 5, 6, 7], index, [4])
+ self.set_cached_offsets(index, [0, 1, 2, 3, 4, 5, 6, 7, 100])
+ self.assertExpandOffsets([102, 103, 104, 105, 106, 107, 108], index,
+ [105])
diff --git a/bzrlib/tests/test_bugtracker.py b/bzrlib/tests/test_bugtracker.py
new file mode 100644
index 0000000..caa5bc4
--- /dev/null
+++ b/bzrlib/tests/test_bugtracker.py
@@ -0,0 +1,252 @@
+# Copyright (C) 2007-2010 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+
+from bzrlib import bugtracker, errors, urlutils
+from bzrlib.tests import TestCase, TestCaseWithMemoryTransport
+
+
+class TestGetBugURL(TestCaseWithMemoryTransport):
+ """Tests for bugtracker.get_bug_url"""
+
+ class TransientTracker(object):
+ """An transient tracker used for testing."""
+
+ @classmethod
+ def get(klass, abbreviation, branch):
+ klass.log.append(('get', abbreviation, branch))
+ if abbreviation != 'transient':
+ return None
+ return klass()
+
+ def get_bug_url(self, bug_id):
+ self.log.append(('get_bug_url', bug_id))
+ return "http://bugs.com/%s" % bug_id
+
+ def setUp(self):
+ TestCaseWithMemoryTransport.setUp(self)
+ self.tracker_type = TestGetBugURL.TransientTracker
+ self.tracker_type.log = []
+ bugtracker.tracker_registry.register('transient', self.tracker_type)
+ self.addCleanup(bugtracker.tracker_registry.remove, 'transient')
+
+ def test_get_bug_url_for_transient_tracker(self):
+ branch = self.make_branch('some_branch')
+ self.assertEqual('http://bugs.com/1234',
+ bugtracker.get_bug_url('transient', branch, '1234'))
+ self.assertEqual(
+ [('get', 'transient', branch), ('get_bug_url', '1234')],
+ self.tracker_type.log)
+
+ def test_unrecognized_abbreviation_raises_error(self):
+ """If the abbreviation is unrecognized, then raise an error."""
+ branch = self.make_branch('some_branch')
+ self.assertRaises(errors.UnknownBugTrackerAbbreviation,
+ bugtracker.get_bug_url, 'xxx', branch, '1234')
+ self.assertEqual([('get', 'xxx', branch)], self.tracker_type.log)
+
+
+class TestBuiltinTrackers(TestCaseWithMemoryTransport):
+ """Test that the builtin trackers are registered and return sane URLs."""
+
+ def test_launchpad_registered(self):
+ """The Launchpad bug tracker should be registered by default and
+ generate Launchpad bug page URLs.
+ """
+ branch = self.make_branch('some_branch')
+ tracker = bugtracker.tracker_registry.get_tracker('lp', branch)
+ self.assertEqual('https://launchpad.net/bugs/1234',
+ tracker.get_bug_url('1234'))
+
+ def test_debian_registered(self):
+ """The Debian bug tracker should be registered by default and generate
+ bugs.debian.org bug page URLs.
+ """
+ branch = self.make_branch('some_branch')
+ tracker = bugtracker.tracker_registry.get_tracker('deb', branch)
+ self.assertEqual('http://bugs.debian.org/1234',
+ tracker.get_bug_url('1234'))
+
+ def test_gnome_registered(self):
+ branch = self.make_branch('some_branch')
+ tracker = bugtracker.tracker_registry.get_tracker('gnome', branch)
+ self.assertEqual('http://bugzilla.gnome.org/show_bug.cgi?id=1234',
+ tracker.get_bug_url('1234'))
+
+ def test_trac_registered(self):
+ """The Trac bug tracker should be registered by default and generate
+ Trac bug page URLs when the appropriate configuration is present.
+ """
+ branch = self.make_branch('some_branch')
+ config = branch.get_config()
+ config.set_user_option('trac_foo_url', 'http://bugs.com/trac')
+ tracker = bugtracker.tracker_registry.get_tracker('foo', branch)
+ self.assertEqual('http://bugs.com/trac/ticket/1234',
+ tracker.get_bug_url('1234'))
+
+ def test_bugzilla_registered(self):
+ """The Bugzilla bug tracker should be registered by default and
+ generate Bugzilla bug page URLs when the appropriate configuration is
+ present.
+ """
+ branch = self.make_branch('some_branch')
+ config = branch.get_config()
+ config.set_user_option('bugzilla_foo_url', 'http://bugs.com')
+ tracker = bugtracker.tracker_registry.get_tracker('foo', branch)
+ self.assertEqual('http://bugs.com/show_bug.cgi?id=1234',
+ tracker.get_bug_url('1234'))
+
+ def test_generic_registered(self):
+ branch = self.make_branch('some_branch')
+ config = branch.get_config()
+ config.set_user_option('bugtracker_foo_url', 'http://bugs.com/{id}/view.html')
+ tracker = bugtracker.tracker_registry.get_tracker('foo', branch)
+ self.assertEqual('http://bugs.com/1234/view.html',
+ tracker.get_bug_url('1234'))
+
+ def test_generic_registered_non_integer(self):
+ branch = self.make_branch('some_branch')
+ config = branch.get_config()
+ config.set_user_option('bugtracker_foo_url', 'http://bugs.com/{id}/view.html')
+ tracker = bugtracker.tracker_registry.get_tracker('foo', branch)
+ self.assertEqual('http://bugs.com/ABC-1234/view.html',
+ tracker.get_bug_url('ABC-1234'))
+
+ def test_generic_incorrect_url(self):
+ branch = self.make_branch('some_branch')
+ config = branch.get_config()
+ config.set_user_option('bugtracker_foo_url', 'http://bugs.com/view.html')
+ tracker = bugtracker.tracker_registry.get_tracker('foo', branch)
+ self.assertRaises(errors.InvalidBugTrackerURL, tracker.get_bug_url, '1234')
+
+
+class TestUniqueIntegerBugTracker(TestCaseWithMemoryTransport):
+
+ def test_appends_id_to_base_url(self):
+ """The URL of a bug is the base URL joined to the identifier."""
+ tracker = bugtracker.UniqueIntegerBugTracker('xxx',
+ 'http://bugs.com/foo')
+ self.assertEqual('http://bugs.com/foo1234', tracker.get_bug_url('1234'))
+
+ def test_returns_tracker_if_abbreviation_matches(self):
+ """The get() method should return an instance of the tracker if the
+ given abbreviation matches the tracker's abbreviated name.
+ """
+ tracker = bugtracker.UniqueIntegerBugTracker('xxx',
+ 'http://bugs.com/')
+ branch = self.make_branch('some_branch')
+ self.assertIs(tracker, tracker.get('xxx', branch))
+
+ def test_returns_none_if_abbreviation_doesnt_match(self):
+ """The get() method should return None if the given abbreviated name
+ doesn't match the tracker's abbreviation.
+ """
+ tracker = bugtracker.UniqueIntegerBugTracker('xxx',
+ 'http://bugs.com/')
+ branch = self.make_branch('some_branch')
+ self.assertIs(None, tracker.get('yyy', branch))
+
+ def test_doesnt_consult_branch(self):
+ """A UniqueIntegerBugTracker shouldn't consult the branch for tracker
+ information.
+ """
+ tracker = bugtracker.UniqueIntegerBugTracker('xxx',
+ 'http://bugs.com/')
+ self.assertIs(tracker, tracker.get('xxx', None))
+ self.assertIs(None, tracker.get('yyy', None))
+
+ def test_check_bug_id_only_accepts_integers(self):
+ """A UniqueIntegerBugTracker accepts integers as bug IDs."""
+ tracker = bugtracker.UniqueIntegerBugTracker('xxx',
+ 'http://bugs.com/')
+ tracker.check_bug_id('1234')
+
+ def test_check_bug_id_doesnt_accept_non_integers(self):
+ """A UniqueIntegerBugTracker rejects non-integers as bug IDs."""
+ tracker = bugtracker.UniqueIntegerBugTracker('xxx',
+ 'http://bugs.com/')
+ self.assertRaises(
+ errors.MalformedBugIdentifier, tracker.check_bug_id, 'red')
+
+class TestURLParametrizedBugTracker(TestCaseWithMemoryTransport):
+ """Tests for URLParametrizedBugTracker."""
+
+ def setUp(self):
+ TestCaseWithMemoryTransport.setUp(self)
+ self.url = 'http://twistedmatrix.com/trac'
+ self.tracker = bugtracker.URLParametrizedBugTracker('some', 'ticket/')
+
+ def test_get_with_unsupported_tag(self):
+ """If asked for an unrecognized or unconfigured tag, return None."""
+ branch = self.make_branch('some_branch')
+ self.assertEqual(None, self.tracker.get('lp', branch))
+ self.assertEqual(None, self.tracker.get('twisted', branch))
+
+ def test_get_with_supported_tag(self):
+ """If asked for a valid tag, return a tracker instance that can map bug
+ IDs to <base_url>/<bug_area> + <bug_id>."""
+ bugtracker.tracker_registry.register('some', self.tracker)
+ self.addCleanup(bugtracker.tracker_registry.remove, 'some')
+
+ branch = self.make_branch('some_branch')
+ config = branch.get_config()
+ config.set_user_option('some_twisted_url', self.url)
+ tracker = self.tracker.get('twisted', branch)
+ self.assertEqual(
+ urlutils.join(self.url, 'ticket/') + '1234',
+ tracker.get_bug_url('1234'))
+
+ def test_get_bug_url_for_integer_id(self):
+ self.tracker.check_bug_id('1234')
+
+ def test_get_bug_url_for_non_integer_id(self):
+ self.tracker.check_bug_id('ABC-1234')
+
+
+class TestURLParametrizedIntegerBugTracker(TestCaseWithMemoryTransport):
+ """Tests for URLParametrizedIntegerBugTracker."""
+
+ def setUp(self):
+ TestCaseWithMemoryTransport.setUp(self)
+ self.url = 'http://twistedmatrix.com/trac'
+ self.tracker = bugtracker.URLParametrizedIntegerBugTracker('some',
+ 'ticket/')
+
+ def test_get_bug_url_for_bad_bug(self):
+ """When given a bug identifier that is invalid for Trac, get_bug_url
+ should raise an error.
+ """
+ self.assertRaises(
+ errors.MalformedBugIdentifier, self.tracker.get_bug_url, 'bad')
+
+
+class TestPropertyEncoding(TestCase):
+ """Tests for how the bug URLs are encoded as revision properties."""
+
+ def test_encoding_one(self):
+ self.assertEqual(
+ 'http://example.com/bugs/1 fixed',
+ bugtracker.encode_fixes_bug_urls(['http://example.com/bugs/1']))
+
+ def test_encoding_zero(self):
+ self.assertEqual('', bugtracker.encode_fixes_bug_urls([]))
+
+ def test_encoding_two(self):
+ self.assertEqual(
+ 'http://example.com/bugs/1 fixed\n'
+ 'http://example.com/bugs/2 fixed',
+ bugtracker.encode_fixes_bug_urls(
+ ['http://example.com/bugs/1', 'http://example.com/bugs/2']))
diff --git a/bzrlib/tests/test_bundle.py b/bzrlib/tests/test_bundle.py
new file mode 100644
index 0000000..a4ea669
--- /dev/null
+++ b/bzrlib/tests/test_bundle.py
@@ -0,0 +1,1877 @@
+# Copyright (C) 2005-2011 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+from cStringIO import StringIO
+import os
+import SocketServer
+import sys
+
+from bzrlib import (
+ bzrdir,
+ diff,
+ errors,
+ inventory,
+ merge,
+ osutils,
+ revision as _mod_revision,
+ tests,
+ treebuilder,
+ )
+from bzrlib.bundle import read_mergeable_from_url
+from bzrlib.bundle.apply_bundle import install_bundle, merge_bundle
+from bzrlib.bundle.bundle_data import BundleTree
+from bzrlib.directory_service import directories
+from bzrlib.bundle.serializer import write_bundle, read_bundle, v09, v4
+from bzrlib.bundle.serializer.v08 import BundleSerializerV08
+from bzrlib.bundle.serializer.v09 import BundleSerializerV09
+from bzrlib.bundle.serializer.v4 import BundleSerializerV4
+from bzrlib.repofmt import knitrepo
+from bzrlib.tests import (
+ features,
+ test_commit,
+ test_read_bundle,
+ test_server,
+ )
+from bzrlib.transform import TreeTransform
+
+
+def get_text(vf, key):
+ """Get the fulltext for a given revision id that is present in the vf"""
+ stream = vf.get_record_stream([key], 'unordered', True)
+ record = stream.next()
+ return record.get_bytes_as('fulltext')
+
+
+def get_inventory_text(repo, revision_id):
+ """Get the fulltext for the inventory at revision id"""
+ repo.lock_read()
+ try:
+ return get_text(repo.inventories, (revision_id,))
+ finally:
+ repo.unlock()
+
+
+class MockTree(object):
+
+ def __init__(self):
+ from bzrlib.inventory import InventoryDirectory, ROOT_ID
+ object.__init__(self)
+ self.paths = {ROOT_ID: ""}
+ self.ids = {"": ROOT_ID}
+ self.contents = {}
+ self.root = InventoryDirectory(ROOT_ID, '', None)
+
+ inventory = property(lambda x:x)
+ root_inventory = property(lambda x:x)
+
+ def get_root_id(self):
+ return self.root.file_id
+
+ def all_file_ids(self):
+ return set(self.paths.keys())
+
+ def is_executable(self, file_id):
+ # Not all the files are executable.
+ return False
+
+ def __getitem__(self, file_id):
+ if file_id == self.root.file_id:
+ return self.root
+ else:
+ return self.make_entry(file_id, self.paths[file_id])
+
+ def parent_id(self, file_id):
+ parent_dir = os.path.dirname(self.paths[file_id])
+ if parent_dir == "":
+ return None
+ return self.ids[parent_dir]
+
+ def iter_entries(self):
+ for path, file_id in self.ids.iteritems():
+ yield path, self[file_id]
+
+ def kind(self, file_id):
+ if file_id in self.contents:
+ kind = 'file'
+ else:
+ kind = 'directory'
+ return kind
+
+ def make_entry(self, file_id, path):
+ from bzrlib.inventory import (InventoryFile , InventoryDirectory,
+ InventoryLink)
+ name = os.path.basename(path)
+ kind = self.kind(file_id)
+ parent_id = self.parent_id(file_id)
+ text_sha_1, text_size = self.contents_stats(file_id)
+ if kind == 'directory':
+ ie = InventoryDirectory(file_id, name, parent_id)
+ elif kind == 'file':
+ ie = InventoryFile(file_id, name, parent_id)
+ ie.text_sha1 = text_sha_1
+ ie.text_size = text_size
+ elif kind == 'symlink':
+ ie = InventoryLink(file_id, name, parent_id)
+ else:
+ raise errors.BzrError('unknown kind %r' % kind)
+ return ie
+
+ def add_dir(self, file_id, path):
+ self.paths[file_id] = path
+ self.ids[path] = file_id
+
+ def add_file(self, file_id, path, contents):
+ self.add_dir(file_id, path)
+ self.contents[file_id] = contents
+
+ def path2id(self, path):
+ return self.ids.get(path)
+
+ def id2path(self, file_id):
+ return self.paths.get(file_id)
+
+ def has_id(self, file_id):
+ return self.id2path(file_id) is not None
+
+ def get_file(self, file_id):
+ result = StringIO()
+ result.write(self.contents[file_id])
+ result.seek(0,0)
+ return result
+
+ def get_file_revision(self, file_id):
+ return self.inventory[file_id].revision
+
+ def get_file_size(self, file_id):
+ return self.inventory[file_id].text_size
+
+ def get_file_sha1(self, file_id):
+ return self.inventory[file_id].text_sha1
+
+ def contents_stats(self, file_id):
+ if file_id not in self.contents:
+ return None, None
+ text_sha1 = osutils.sha_file(self.get_file(file_id))
+ return text_sha1, len(self.contents[file_id])
+
+
+class BTreeTester(tests.TestCase):
+ """A simple unittest tester for the BundleTree class."""
+
+ def make_tree_1(self):
+ mtree = MockTree()
+ mtree.add_dir("a", "grandparent")
+ mtree.add_dir("b", "grandparent/parent")
+ mtree.add_file("c", "grandparent/parent/file", "Hello\n")
+ mtree.add_dir("d", "grandparent/alt_parent")
+ return BundleTree(mtree, ''), mtree
+
+ def test_renames(self):
+ """Ensure that file renames have the proper effect on children"""
+ btree = self.make_tree_1()[0]
+ self.assertEqual(btree.old_path("grandparent"), "grandparent")
+ self.assertEqual(btree.old_path("grandparent/parent"),
+ "grandparent/parent")
+ self.assertEqual(btree.old_path("grandparent/parent/file"),
+ "grandparent/parent/file")
+
+ self.assertEqual(btree.id2path("a"), "grandparent")
+ self.assertEqual(btree.id2path("b"), "grandparent/parent")
+ self.assertEqual(btree.id2path("c"), "grandparent/parent/file")
+
+ self.assertEqual(btree.path2id("grandparent"), "a")
+ self.assertEqual(btree.path2id("grandparent/parent"), "b")
+ self.assertEqual(btree.path2id("grandparent/parent/file"), "c")
+
+ self.assertTrue(btree.path2id("grandparent2") is None)
+ self.assertTrue(btree.path2id("grandparent2/parent") is None)
+ self.assertTrue(btree.path2id("grandparent2/parent/file") is None)
+
+ btree.note_rename("grandparent", "grandparent2")
+ self.assertTrue(btree.old_path("grandparent") is None)
+ self.assertTrue(btree.old_path("grandparent/parent") is None)
+ self.assertTrue(btree.old_path("grandparent/parent/file") is None)
+
+ self.assertEqual(btree.id2path("a"), "grandparent2")
+ self.assertEqual(btree.id2path("b"), "grandparent2/parent")
+ self.assertEqual(btree.id2path("c"), "grandparent2/parent/file")
+
+ self.assertEqual(btree.path2id("grandparent2"), "a")
+ self.assertEqual(btree.path2id("grandparent2/parent"), "b")
+ self.assertEqual(btree.path2id("grandparent2/parent/file"), "c")
+
+ self.assertTrue(btree.path2id("grandparent") is None)
+ self.assertTrue(btree.path2id("grandparent/parent") is None)
+ self.assertTrue(btree.path2id("grandparent/parent/file") is None)
+
+ btree.note_rename("grandparent/parent", "grandparent2/parent2")
+ self.assertEqual(btree.id2path("a"), "grandparent2")
+ self.assertEqual(btree.id2path("b"), "grandparent2/parent2")
+ self.assertEqual(btree.id2path("c"), "grandparent2/parent2/file")
+
+ self.assertEqual(btree.path2id("grandparent2"), "a")
+ self.assertEqual(btree.path2id("grandparent2/parent2"), "b")
+ self.assertEqual(btree.path2id("grandparent2/parent2/file"), "c")
+
+ self.assertTrue(btree.path2id("grandparent2/parent") is None)
+ self.assertTrue(btree.path2id("grandparent2/parent/file") is None)
+
+ btree.note_rename("grandparent/parent/file",
+ "grandparent2/parent2/file2")
+ self.assertEqual(btree.id2path("a"), "grandparent2")
+ self.assertEqual(btree.id2path("b"), "grandparent2/parent2")
+ self.assertEqual(btree.id2path("c"), "grandparent2/parent2/file2")
+
+ self.assertEqual(btree.path2id("grandparent2"), "a")
+ self.assertEqual(btree.path2id("grandparent2/parent2"), "b")
+ self.assertEqual(btree.path2id("grandparent2/parent2/file2"), "c")
+
+ self.assertTrue(btree.path2id("grandparent2/parent2/file") is None)
+
+ def test_moves(self):
+ """Ensure that file moves have the proper effect on children"""
+ btree = self.make_tree_1()[0]
+ btree.note_rename("grandparent/parent/file",
+ "grandparent/alt_parent/file")
+ self.assertEqual(btree.id2path("c"), "grandparent/alt_parent/file")
+ self.assertEqual(btree.path2id("grandparent/alt_parent/file"), "c")
+ self.assertTrue(btree.path2id("grandparent/parent/file") is None)
+
+ def unified_diff(self, old, new):
+ out = StringIO()
+ diff.internal_diff("old", old, "new", new, out)
+ out.seek(0,0)
+ return out.read()
+
+ def make_tree_2(self):
+ btree = self.make_tree_1()[0]
+ btree.note_rename("grandparent/parent/file",
+ "grandparent/alt_parent/file")
+ self.assertTrue(btree.id2path("e") is None)
+ self.assertTrue(btree.path2id("grandparent/parent/file") is None)
+ btree.note_id("e", "grandparent/parent/file")
+ return btree
+
+ def test_adds(self):
+ """File/inventory adds"""
+ btree = self.make_tree_2()
+ add_patch = self.unified_diff([], ["Extra cheese\n"])
+ btree.note_patch("grandparent/parent/file", add_patch)
+ btree.note_id('f', 'grandparent/parent/symlink', kind='symlink')
+ btree.note_target('grandparent/parent/symlink', 'venus')
+ self.adds_test(btree)
+
+ def adds_test(self, btree):
+ self.assertEqual(btree.id2path("e"), "grandparent/parent/file")
+ self.assertEqual(btree.path2id("grandparent/parent/file"), "e")
+ self.assertEqual(btree.get_file("e").read(), "Extra cheese\n")
+ self.assertEqual(btree.get_symlink_target('f'), 'venus')
+
+ def test_adds2(self):
+ """File/inventory adds, with patch-compatibile renames"""
+ btree = self.make_tree_2()
+ btree.contents_by_id = False
+ add_patch = self.unified_diff(["Hello\n"], ["Extra cheese\n"])
+ btree.note_patch("grandparent/parent/file", add_patch)
+ btree.note_id('f', 'grandparent/parent/symlink', kind='symlink')
+ btree.note_target('grandparent/parent/symlink', 'venus')
+ self.adds_test(btree)
+
+ def make_tree_3(self):
+ btree, mtree = self.make_tree_1()
+ mtree.add_file("e", "grandparent/parent/topping", "Anchovies\n")
+ btree.note_rename("grandparent/parent/file",
+ "grandparent/alt_parent/file")
+ btree.note_rename("grandparent/parent/topping",
+ "grandparent/alt_parent/stopping")
+ return btree
+
+ def get_file_test(self, btree):
+ self.assertEqual(btree.get_file("e").read(), "Lemon\n")
+ self.assertEqual(btree.get_file("c").read(), "Hello\n")
+
+ def test_get_file(self):
+ """Get file contents"""
+ btree = self.make_tree_3()
+ mod_patch = self.unified_diff(["Anchovies\n"], ["Lemon\n"])
+ btree.note_patch("grandparent/alt_parent/stopping", mod_patch)
+ self.get_file_test(btree)
+
+ def test_get_file2(self):
+ """Get file contents, with patch-compatibile renames"""
+ btree = self.make_tree_3()
+ btree.contents_by_id = False
+ mod_patch = self.unified_diff([], ["Lemon\n"])
+ btree.note_patch("grandparent/alt_parent/stopping", mod_patch)
+ mod_patch = self.unified_diff([], ["Hello\n"])
+ btree.note_patch("grandparent/alt_parent/file", mod_patch)
+ self.get_file_test(btree)
+
+ def test_delete(self):
+ "Deletion by bundle"
+ btree = self.make_tree_1()[0]
+ self.assertEqual(btree.get_file("c").read(), "Hello\n")
+ btree.note_deletion("grandparent/parent/file")
+ self.assertTrue(btree.id2path("c") is None)
+ self.assertTrue(btree.path2id("grandparent/parent/file") is None)
+
+ def sorted_ids(self, tree):
+ ids = list(tree.all_file_ids())
+ ids.sort()
+ return ids
+
+ def test_iteration(self):
+ """Ensure that iteration through ids works properly"""
+ btree = self.make_tree_1()[0]
+ self.assertEqual(self.sorted_ids(btree),
+ [inventory.ROOT_ID, 'a', 'b', 'c', 'd'])
+ btree.note_deletion("grandparent/parent/file")
+ btree.note_id("e", "grandparent/alt_parent/fool", kind="directory")
+ btree.note_last_changed("grandparent/alt_parent/fool",
+ "revisionidiguess")
+ self.assertEqual(self.sorted_ids(btree),
+ [inventory.ROOT_ID, 'a', 'b', 'd', 'e'])
+
+
+class BundleTester1(tests.TestCaseWithTransport):
+
+ def test_mismatched_bundle(self):
+ format = bzrdir.BzrDirMetaFormat1()
+ format.repository_format = knitrepo.RepositoryFormatKnit3()
+ serializer = BundleSerializerV08('0.8')
+ b = self.make_branch('.', format=format)
+ self.assertRaises(errors.IncompatibleBundleFormat, serializer.write,
+ b.repository, [], {}, StringIO())
+
+ def test_matched_bundle(self):
+ """Don't raise IncompatibleBundleFormat for knit2 and bundle0.9"""
+ format = bzrdir.BzrDirMetaFormat1()
+ format.repository_format = knitrepo.RepositoryFormatKnit3()
+ serializer = BundleSerializerV09('0.9')
+ b = self.make_branch('.', format=format)
+ serializer.write(b.repository, [], {}, StringIO())
+
+ def test_mismatched_model(self):
+ """Try copying a bundle from knit2 to knit1"""
+ format = bzrdir.BzrDirMetaFormat1()
+ format.repository_format = knitrepo.RepositoryFormatKnit3()
+ source = self.make_branch_and_tree('source', format=format)
+ source.commit('one', rev_id='one-id')
+ source.commit('two', rev_id='two-id')
+ text = StringIO()
+ write_bundle(source.branch.repository, 'two-id', 'null:', text,
+ format='0.9')
+ text.seek(0)
+
+ format = bzrdir.BzrDirMetaFormat1()
+ format.repository_format = knitrepo.RepositoryFormatKnit1()
+ target = self.make_branch('target', format=format)
+ self.assertRaises(errors.IncompatibleRevision, install_bundle,
+ target.repository, read_bundle(text))
+
+
+class BundleTester(object):
+
+ def bzrdir_format(self):
+ format = bzrdir.BzrDirMetaFormat1()
+ format.repository_format = knitrepo.RepositoryFormatKnit1()
+ return format
+
+ def make_branch_and_tree(self, path, format=None):
+ if format is None:
+ format = self.bzrdir_format()
+ return tests.TestCaseWithTransport.make_branch_and_tree(
+ self, path, format)
+
+ def make_branch(self, path, format=None):
+ if format is None:
+ format = self.bzrdir_format()
+ return tests.TestCaseWithTransport.make_branch(self, path, format)
+
+ def create_bundle_text(self, base_rev_id, rev_id):
+ bundle_txt = StringIO()
+ rev_ids = write_bundle(self.b1.repository, rev_id, base_rev_id,
+ bundle_txt, format=self.format)
+ bundle_txt.seek(0)
+ self.assertEqual(bundle_txt.readline(),
+ '# Bazaar revision bundle v%s\n' % self.format)
+ self.assertEqual(bundle_txt.readline(), '#\n')
+
+ rev = self.b1.repository.get_revision(rev_id)
+ self.assertEqual(bundle_txt.readline().decode('utf-8'),
+ u'# message:\n')
+ bundle_txt.seek(0)
+ return bundle_txt, rev_ids
+
+ def get_valid_bundle(self, base_rev_id, rev_id, checkout_dir=None):
+ """Create a bundle from base_rev_id -> rev_id in built-in branch.
+ Make sure that the text generated is valid, and that it
+ can be applied against the base, and generate the same information.
+
+ :return: The in-memory bundle
+ """
+ bundle_txt, rev_ids = self.create_bundle_text(base_rev_id, rev_id)
+
+ # This should also validate the generated bundle
+ bundle = read_bundle(bundle_txt)
+ repository = self.b1.repository
+ for bundle_rev in bundle.real_revisions:
+ # These really should have already been checked when we read the
+ # bundle, since it computes the sha1 hash for the revision, which
+ # only will match if everything is okay, but lets be explicit about
+ # it
+ branch_rev = repository.get_revision(bundle_rev.revision_id)
+ for a in ('inventory_sha1', 'revision_id', 'parent_ids',
+ 'timestamp', 'timezone', 'message', 'committer',
+ 'parent_ids', 'properties'):
+ self.assertEqual(getattr(branch_rev, a),
+ getattr(bundle_rev, a))
+ self.assertEqual(len(branch_rev.parent_ids),
+ len(bundle_rev.parent_ids))
+ self.assertEqual(rev_ids,
+ [r.revision_id for r in bundle.real_revisions])
+ self.valid_apply_bundle(base_rev_id, bundle,
+ checkout_dir=checkout_dir)
+
+ return bundle
+
+ def get_invalid_bundle(self, base_rev_id, rev_id):
+ """Create a bundle from base_rev_id -> rev_id in built-in branch.
+ Munge the text so that it's invalid.
+
+ :return: The in-memory bundle
+ """
+ bundle_txt, rev_ids = self.create_bundle_text(base_rev_id, rev_id)
+ new_text = bundle_txt.getvalue().replace('executable:no',
+ 'executable:yes')
+ bundle_txt = StringIO(new_text)
+ bundle = read_bundle(bundle_txt)
+ self.valid_apply_bundle(base_rev_id, bundle)
+ return bundle
+
+ def test_non_bundle(self):
+ self.assertRaises(errors.NotABundle,
+ read_bundle, StringIO('#!/bin/sh\n'))
+
+ def test_malformed(self):
+ self.assertRaises(errors.BadBundle, read_bundle,
+ StringIO('# Bazaar revision bundle v'))
+
+ def test_crlf_bundle(self):
+ try:
+ read_bundle(StringIO('# Bazaar revision bundle v0.8\r\n'))
+ except errors.BadBundle:
+ # It is currently permitted for bundles with crlf line endings to
+ # make read_bundle raise a BadBundle, but this should be fixed.
+ # Anything else, especially NotABundle, is an error.
+ pass
+
+ def get_checkout(self, rev_id, checkout_dir=None):
+ """Get a new tree, with the specified revision in it.
+ """
+
+ if checkout_dir is None:
+ checkout_dir = osutils.mkdtemp(prefix='test-branch-', dir='.')
+ else:
+ if not os.path.exists(checkout_dir):
+ os.mkdir(checkout_dir)
+ tree = self.make_branch_and_tree(checkout_dir)
+ s = StringIO()
+ ancestors = write_bundle(self.b1.repository, rev_id, 'null:', s,
+ format=self.format)
+ s.seek(0)
+ self.assertIsInstance(s.getvalue(), str)
+ install_bundle(tree.branch.repository, read_bundle(s))
+ for ancestor in ancestors:
+ old = self.b1.repository.revision_tree(ancestor)
+ new = tree.branch.repository.revision_tree(ancestor)
+ old.lock_read()
+ new.lock_read()
+ try:
+ # Check that there aren't any inventory level changes
+ delta = new.changes_from(old)
+ self.assertFalse(delta.has_changed(),
+ 'Revision %s not copied correctly.'
+ % (ancestor,))
+
+ # Now check that the file contents are all correct
+ for inventory_id in old.all_file_ids():
+ try:
+ old_file = old.get_file(inventory_id)
+ except errors.NoSuchFile:
+ continue
+ if old_file is None:
+ continue
+ self.assertEqual(old_file.read(),
+ new.get_file(inventory_id).read())
+ finally:
+ new.unlock()
+ old.unlock()
+ if not _mod_revision.is_null(rev_id):
+ tree.branch.generate_revision_history(rev_id)
+ tree.update()
+ delta = tree.changes_from(self.b1.repository.revision_tree(rev_id))
+ self.assertFalse(delta.has_changed(),
+ 'Working tree has modifications: %s' % delta)
+ return tree
+
+ def valid_apply_bundle(self, base_rev_id, info, checkout_dir=None):
+ """Get the base revision, apply the changes, and make
+ sure everything matches the builtin branch.
+ """
+ to_tree = self.get_checkout(base_rev_id, checkout_dir=checkout_dir)
+ to_tree.lock_write()
+ try:
+ self._valid_apply_bundle(base_rev_id, info, to_tree)
+ finally:
+ to_tree.unlock()
+
+ def _valid_apply_bundle(self, base_rev_id, info, to_tree):
+ original_parents = to_tree.get_parent_ids()
+ repository = to_tree.branch.repository
+ original_parents = to_tree.get_parent_ids()
+ self.assertIs(repository.has_revision(base_rev_id), True)
+ for rev in info.real_revisions:
+ self.assert_(not repository.has_revision(rev.revision_id),
+ 'Revision {%s} present before applying bundle'
+ % rev.revision_id)
+ merge_bundle(info, to_tree, True, merge.Merge3Merger, False, False)
+
+ for rev in info.real_revisions:
+ self.assert_(repository.has_revision(rev.revision_id),
+ 'Missing revision {%s} after applying bundle'
+ % rev.revision_id)
+
+ self.assert_(to_tree.branch.repository.has_revision(info.target))
+ # Do we also want to verify that all the texts have been added?
+
+ self.assertEqual(original_parents + [info.target],
+ to_tree.get_parent_ids())
+
+ rev = info.real_revisions[-1]
+ base_tree = self.b1.repository.revision_tree(rev.revision_id)
+ to_tree = to_tree.branch.repository.revision_tree(rev.revision_id)
+
+ # TODO: make sure the target tree is identical to base tree
+ # we might also check the working tree.
+
+ base_files = list(base_tree.list_files())
+ to_files = list(to_tree.list_files())
+ self.assertEqual(len(base_files), len(to_files))
+ for base_file, to_file in zip(base_files, to_files):
+ self.assertEqual(base_file, to_file)
+
+ for path, status, kind, fileid, entry in base_files:
+ # Check that the meta information is the same
+ self.assertEqual(base_tree.get_file_size(fileid),
+ to_tree.get_file_size(fileid))
+ self.assertEqual(base_tree.get_file_sha1(fileid),
+ to_tree.get_file_sha1(fileid))
+ # Check that the contents are the same
+ # This is pretty expensive
+ # self.assertEqual(base_tree.get_file(fileid).read(),
+ # to_tree.get_file(fileid).read())
+
+ def test_bundle(self):
+ self.tree1 = self.make_branch_and_tree('b1')
+ self.b1 = self.tree1.branch
+
+ self.build_tree_contents([('b1/one', 'one\n')])
+ self.tree1.add('one', 'one-id')
+ self.tree1.set_root_id('root-id')
+ self.tree1.commit('add one', rev_id='a@cset-0-1')
+
+ bundle = self.get_valid_bundle('null:', 'a@cset-0-1')
+
+ # Make sure we can handle files with spaces, tabs, other
+ # bogus characters
+ self.build_tree([
+ 'b1/with space.txt'
+ , 'b1/dir/'
+ , 'b1/dir/filein subdir.c'
+ , 'b1/dir/WithCaps.txt'
+ , 'b1/dir/ pre space'
+ , 'b1/sub/'
+ , 'b1/sub/sub/'
+ , 'b1/sub/sub/nonempty.txt'
+ ])
+ self.build_tree_contents([('b1/sub/sub/emptyfile.txt', ''),
+ ('b1/dir/nolastnewline.txt', 'bloop')])
+ tt = TreeTransform(self.tree1)
+ tt.new_file('executable', tt.root, '#!/bin/sh\n', 'exe-1', True)
+ tt.apply()
+ # have to fix length of file-id so that we can predictably rewrite
+ # a (length-prefixed) record containing it later.
+ self.tree1.add('with space.txt', 'withspace-id')
+ self.tree1.add([
+ 'dir'
+ , 'dir/filein subdir.c'
+ , 'dir/WithCaps.txt'
+ , 'dir/ pre space'
+ , 'dir/nolastnewline.txt'
+ , 'sub'
+ , 'sub/sub'
+ , 'sub/sub/nonempty.txt'
+ , 'sub/sub/emptyfile.txt'
+ ])
+ self.tree1.commit('add whitespace', rev_id='a@cset-0-2')
+
+ bundle = self.get_valid_bundle('a@cset-0-1', 'a@cset-0-2')
+
+ # Check a rollup bundle
+ bundle = self.get_valid_bundle('null:', 'a@cset-0-2')
+
+ # Now delete entries
+ self.tree1.remove(
+ ['sub/sub/nonempty.txt'
+ , 'sub/sub/emptyfile.txt'
+ , 'sub/sub'
+ ])
+ tt = TreeTransform(self.tree1)
+ trans_id = tt.trans_id_tree_file_id('exe-1')
+ tt.set_executability(False, trans_id)
+ tt.apply()
+ self.tree1.commit('removed', rev_id='a@cset-0-3')
+
+ bundle = self.get_valid_bundle('a@cset-0-2', 'a@cset-0-3')
+ self.assertRaises((errors.TestamentMismatch,
+ errors.VersionedFileInvalidChecksum,
+ errors.BadBundle), self.get_invalid_bundle,
+ 'a@cset-0-2', 'a@cset-0-3')
+ # Check a rollup bundle
+ bundle = self.get_valid_bundle('null:', 'a@cset-0-3')
+
+ # Now move the directory
+ self.tree1.rename_one('dir', 'sub/dir')
+ self.tree1.commit('rename dir', rev_id='a@cset-0-4')
+
+ bundle = self.get_valid_bundle('a@cset-0-3', 'a@cset-0-4')
+ # Check a rollup bundle
+ bundle = self.get_valid_bundle('null:', 'a@cset-0-4')
+
+ # Modified files
+ with open('b1/sub/dir/WithCaps.txt', 'ab') as f: f.write('\nAdding some text\n')
+ with open('b1/sub/dir/ pre space', 'ab') as f: f.write(
+ '\r\nAdding some\r\nDOS format lines\r\n')
+ with open('b1/sub/dir/nolastnewline.txt', 'ab') as f: f.write('\n')
+ self.tree1.rename_one('sub/dir/ pre space',
+ 'sub/ start space')
+ self.tree1.commit('Modified files', rev_id='a@cset-0-5')
+ bundle = self.get_valid_bundle('a@cset-0-4', 'a@cset-0-5')
+
+ self.tree1.rename_one('sub/dir/WithCaps.txt', 'temp')
+ self.tree1.rename_one('with space.txt', 'WithCaps.txt')
+ self.tree1.rename_one('temp', 'with space.txt')
+ self.tree1.commit(u'swap filenames', rev_id='a@cset-0-6',
+ verbose=False)
+ bundle = self.get_valid_bundle('a@cset-0-5', 'a@cset-0-6')
+ other = self.get_checkout('a@cset-0-5')
+ tree1_inv = get_inventory_text(self.tree1.branch.repository,
+ 'a@cset-0-5')
+ tree2_inv = get_inventory_text(other.branch.repository,
+ 'a@cset-0-5')
+ self.assertEqualDiff(tree1_inv, tree2_inv)
+ other.rename_one('sub/dir/nolastnewline.txt', 'sub/nolastnewline.txt')
+ other.commit('rename file', rev_id='a@cset-0-6b')
+ self.tree1.merge_from_branch(other.branch)
+ self.tree1.commit(u'Merge', rev_id='a@cset-0-7',
+ verbose=False)
+ bundle = self.get_valid_bundle('a@cset-0-6', 'a@cset-0-7')
+
+ def _test_symlink_bundle(self, link_name, link_target, new_link_target):
+ link_id = 'link-1'
+
+ self.requireFeature(features.SymlinkFeature)
+ self.tree1 = self.make_branch_and_tree('b1')
+ self.b1 = self.tree1.branch
+
+ tt = TreeTransform(self.tree1)
+ tt.new_symlink(link_name, tt.root, link_target, link_id)
+ tt.apply()
+ self.tree1.commit('add symlink', rev_id='l@cset-0-1')
+ bundle = self.get_valid_bundle('null:', 'l@cset-0-1')
+ if getattr(bundle ,'revision_tree', None) is not None:
+ # Not all bundle formats supports revision_tree
+ bund_tree = bundle.revision_tree(self.b1.repository, 'l@cset-0-1')
+ self.assertEqual(link_target, bund_tree.get_symlink_target(link_id))
+
+ tt = TreeTransform(self.tree1)
+ trans_id = tt.trans_id_tree_file_id(link_id)
+ tt.adjust_path('link2', tt.root, trans_id)
+ tt.delete_contents(trans_id)
+ tt.create_symlink(new_link_target, trans_id)
+ tt.apply()
+ self.tree1.commit('rename and change symlink', rev_id='l@cset-0-2')
+ bundle = self.get_valid_bundle('l@cset-0-1', 'l@cset-0-2')
+ if getattr(bundle ,'revision_tree', None) is not None:
+ # Not all bundle formats supports revision_tree
+ bund_tree = bundle.revision_tree(self.b1.repository, 'l@cset-0-2')
+ self.assertEqual(new_link_target,
+ bund_tree.get_symlink_target(link_id))
+
+ tt = TreeTransform(self.tree1)
+ trans_id = tt.trans_id_tree_file_id(link_id)
+ tt.delete_contents(trans_id)
+ tt.create_symlink('jupiter', trans_id)
+ tt.apply()
+ self.tree1.commit('just change symlink target', rev_id='l@cset-0-3')
+ bundle = self.get_valid_bundle('l@cset-0-2', 'l@cset-0-3')
+
+ tt = TreeTransform(self.tree1)
+ trans_id = tt.trans_id_tree_file_id(link_id)
+ tt.delete_contents(trans_id)
+ tt.apply()
+ self.tree1.commit('Delete symlink', rev_id='l@cset-0-4')
+ bundle = self.get_valid_bundle('l@cset-0-3', 'l@cset-0-4')
+
+ def test_symlink_bundle(self):
+ self._test_symlink_bundle('link', 'bar/foo', 'mars')
+
+ def test_unicode_symlink_bundle(self):
+ self.requireFeature(features.UnicodeFilenameFeature)
+ self._test_symlink_bundle(u'\N{Euro Sign}link',
+ u'bar/\N{Euro Sign}foo',
+ u'mars\N{Euro Sign}')
+
+ def test_binary_bundle(self):
+ self.tree1 = self.make_branch_and_tree('b1')
+ self.b1 = self.tree1.branch
+ tt = TreeTransform(self.tree1)
+
+ # Add
+ tt.new_file('file', tt.root, '\x00\n\x00\r\x01\n\x02\r\xff', 'binary-1')
+ tt.new_file('file2', tt.root, '\x01\n\x02\r\x03\n\x04\r\xff',
+ 'binary-2')
+ tt.apply()
+ self.tree1.commit('add binary', rev_id='b@cset-0-1')
+ self.get_valid_bundle('null:', 'b@cset-0-1')
+
+ # Delete
+ tt = TreeTransform(self.tree1)
+ trans_id = tt.trans_id_tree_file_id('binary-1')
+ tt.delete_contents(trans_id)
+ tt.apply()
+ self.tree1.commit('delete binary', rev_id='b@cset-0-2')
+ self.get_valid_bundle('b@cset-0-1', 'b@cset-0-2')
+
+ # Rename & modify
+ tt = TreeTransform(self.tree1)
+ trans_id = tt.trans_id_tree_file_id('binary-2')
+ tt.adjust_path('file3', tt.root, trans_id)
+ tt.delete_contents(trans_id)
+ tt.create_file('file\rcontents\x00\n\x00', trans_id)
+ tt.apply()
+ self.tree1.commit('rename and modify binary', rev_id='b@cset-0-3')
+ self.get_valid_bundle('b@cset-0-2', 'b@cset-0-3')
+
+ # Modify
+ tt = TreeTransform(self.tree1)
+ trans_id = tt.trans_id_tree_file_id('binary-2')
+ tt.delete_contents(trans_id)
+ tt.create_file('\x00file\rcontents', trans_id)
+ tt.apply()
+ self.tree1.commit('just modify binary', rev_id='b@cset-0-4')
+ self.get_valid_bundle('b@cset-0-3', 'b@cset-0-4')
+
+ # Rollup
+ self.get_valid_bundle('null:', 'b@cset-0-4')
+
+ def test_last_modified(self):
+ self.tree1 = self.make_branch_and_tree('b1')
+ self.b1 = self.tree1.branch
+ tt = TreeTransform(self.tree1)
+ tt.new_file('file', tt.root, 'file', 'file')
+ tt.apply()
+ self.tree1.commit('create file', rev_id='a@lmod-0-1')
+
+ tt = TreeTransform(self.tree1)
+ trans_id = tt.trans_id_tree_file_id('file')
+ tt.delete_contents(trans_id)
+ tt.create_file('file2', trans_id)
+ tt.apply()
+ self.tree1.commit('modify text', rev_id='a@lmod-0-2a')
+
+ other = self.get_checkout('a@lmod-0-1')
+ tt = TreeTransform(other)
+ trans_id = tt.trans_id_tree_file_id('file')
+ tt.delete_contents(trans_id)
+ tt.create_file('file2', trans_id)
+ tt.apply()
+ other.commit('modify text in another tree', rev_id='a@lmod-0-2b')
+ self.tree1.merge_from_branch(other.branch)
+ self.tree1.commit(u'Merge', rev_id='a@lmod-0-3',
+ verbose=False)
+ self.tree1.commit(u'Merge', rev_id='a@lmod-0-4')
+ bundle = self.get_valid_bundle('a@lmod-0-2a', 'a@lmod-0-4')
+
+ def test_hide_history(self):
+ self.tree1 = self.make_branch_and_tree('b1')
+ self.b1 = self.tree1.branch
+
+ with open('b1/one', 'wb') as f: f.write('one\n')
+ self.tree1.add('one')
+ self.tree1.commit('add file', rev_id='a@cset-0-1')
+ with open('b1/one', 'wb') as f: f.write('two\n')
+ self.tree1.commit('modify', rev_id='a@cset-0-2')
+ with open('b1/one', 'wb') as f: f.write('three\n')
+ self.tree1.commit('modify', rev_id='a@cset-0-3')
+ bundle_file = StringIO()
+ rev_ids = write_bundle(self.tree1.branch.repository, 'a@cset-0-3',
+ 'a@cset-0-1', bundle_file, format=self.format)
+ self.assertNotContainsRe(bundle_file.getvalue(), '\btwo\b')
+ self.assertContainsRe(self.get_raw(bundle_file), 'one')
+ self.assertContainsRe(self.get_raw(bundle_file), 'three')
+
+ def test_bundle_same_basis(self):
+ """Ensure using the basis as the target doesn't cause an error"""
+ self.tree1 = self.make_branch_and_tree('b1')
+ self.tree1.commit('add file', rev_id='a@cset-0-1')
+ bundle_file = StringIO()
+ rev_ids = write_bundle(self.tree1.branch.repository, 'a@cset-0-1',
+ 'a@cset-0-1', bundle_file)
+
+ @staticmethod
+ def get_raw(bundle_file):
+ return bundle_file.getvalue()
+
+ def test_unicode_bundle(self):
+ self.requireFeature(features.UnicodeFilenameFeature)
+ # Handle international characters
+ os.mkdir('b1')
+ f = open(u'b1/with Dod\N{Euro Sign}', 'wb')
+
+ self.tree1 = self.make_branch_and_tree('b1')
+ self.b1 = self.tree1.branch
+
+ f.write((u'A file\n'
+ u'With international man of mystery\n'
+ u'William Dod\xe9\n').encode('utf-8'))
+ f.close()
+
+ self.tree1.add([u'with Dod\N{Euro Sign}'], ['withdod-id'])
+ self.tree1.commit(u'i18n commit from William Dod\xe9',
+ rev_id='i18n-1', committer=u'William Dod\xe9')
+
+ # Add
+ bundle = self.get_valid_bundle('null:', 'i18n-1')
+
+ # Modified
+ f = open(u'b1/with Dod\N{Euro Sign}', 'wb')
+ f.write(u'Modified \xb5\n'.encode('utf8'))
+ f.close()
+ self.tree1.commit(u'modified', rev_id='i18n-2')
+
+ bundle = self.get_valid_bundle('i18n-1', 'i18n-2')
+
+ # Renamed
+ self.tree1.rename_one(u'with Dod\N{Euro Sign}', u'B\N{Euro Sign}gfors')
+ self.tree1.commit(u'renamed, the new i18n man', rev_id='i18n-3',
+ committer=u'Erik B\xe5gfors')
+
+ bundle = self.get_valid_bundle('i18n-2', 'i18n-3')
+
+ # Removed
+ self.tree1.remove([u'B\N{Euro Sign}gfors'])
+ self.tree1.commit(u'removed', rev_id='i18n-4')
+
+ bundle = self.get_valid_bundle('i18n-3', 'i18n-4')
+
+ # Rollup
+ bundle = self.get_valid_bundle('null:', 'i18n-4')
+
+
+ def test_whitespace_bundle(self):
+ if sys.platform in ('win32', 'cygwin'):
+ raise tests.TestSkipped('Windows doesn\'t support filenames'
+ ' with tabs or trailing spaces')
+ self.tree1 = self.make_branch_and_tree('b1')
+ self.b1 = self.tree1.branch
+
+ self.build_tree(['b1/trailing space '])
+ self.tree1.add(['trailing space '])
+ # TODO: jam 20060701 Check for handling files with '\t' characters
+ # once we actually support them
+
+ # Added
+ self.tree1.commit('funky whitespace', rev_id='white-1')
+
+ bundle = self.get_valid_bundle('null:', 'white-1')
+
+ # Modified
+ with open('b1/trailing space ', 'ab') as f: f.write('add some text\n')
+ self.tree1.commit('add text', rev_id='white-2')
+
+ bundle = self.get_valid_bundle('white-1', 'white-2')
+
+ # Renamed
+ self.tree1.rename_one('trailing space ', ' start and end space ')
+ self.tree1.commit('rename', rev_id='white-3')
+
+ bundle = self.get_valid_bundle('white-2', 'white-3')
+
+ # Removed
+ self.tree1.remove([' start and end space '])
+ self.tree1.commit('removed', rev_id='white-4')
+
+ bundle = self.get_valid_bundle('white-3', 'white-4')
+
+ # Now test a complet roll-up
+ bundle = self.get_valid_bundle('null:', 'white-4')
+
+ def test_alt_timezone_bundle(self):
+ self.tree1 = self.make_branch_and_memory_tree('b1')
+ self.b1 = self.tree1.branch
+ builder = treebuilder.TreeBuilder()
+
+ self.tree1.lock_write()
+ builder.start_tree(self.tree1)
+ builder.build(['newfile'])
+ builder.finish_tree()
+
+ # Asia/Colombo offset = 5 hours 30 minutes
+ self.tree1.commit('non-hour offset timezone', rev_id='tz-1',
+ timezone=19800, timestamp=1152544886.0)
+
+ bundle = self.get_valid_bundle('null:', 'tz-1')
+
+ rev = bundle.revisions[0]
+ self.assertEqual('Mon 2006-07-10 20:51:26.000000000 +0530', rev.date)
+ self.assertEqual(19800, rev.timezone)
+ self.assertEqual(1152544886.0, rev.timestamp)
+ self.tree1.unlock()
+
+ def test_bundle_root_id(self):
+ self.tree1 = self.make_branch_and_tree('b1')
+ self.b1 = self.tree1.branch
+ self.tree1.commit('message', rev_id='revid1')
+ bundle = self.get_valid_bundle('null:', 'revid1')
+ tree = self.get_bundle_tree(bundle, 'revid1')
+ root_revision = tree.get_file_revision(tree.get_root_id())
+ self.assertEqual('revid1', root_revision)
+
+ def test_install_revisions(self):
+ self.tree1 = self.make_branch_and_tree('b1')
+ self.b1 = self.tree1.branch
+ self.tree1.commit('message', rev_id='rev2a')
+ bundle = self.get_valid_bundle('null:', 'rev2a')
+ branch2 = self.make_branch('b2')
+ self.assertFalse(branch2.repository.has_revision('rev2a'))
+ target_revision = bundle.install_revisions(branch2.repository)
+ self.assertTrue(branch2.repository.has_revision('rev2a'))
+ self.assertEqual('rev2a', target_revision)
+
+ def test_bundle_empty_property(self):
+ """Test serializing revision properties with an empty value."""
+ tree = self.make_branch_and_memory_tree('tree')
+ tree.lock_write()
+ self.addCleanup(tree.unlock)
+ tree.add([''], ['TREE_ROOT'])
+ tree.commit('One', revprops={'one':'two', 'empty':''}, rev_id='rev1')
+ self.b1 = tree.branch
+ bundle_sio, revision_ids = self.create_bundle_text('null:', 'rev1')
+ bundle = read_bundle(bundle_sio)
+ revision_info = bundle.revisions[0]
+ self.assertEqual('rev1', revision_info.revision_id)
+ rev = revision_info.as_revision()
+ self.assertEqual({'branch-nick':'tree', 'empty':'', 'one':'two'},
+ rev.properties)
+
+ def test_bundle_sorted_properties(self):
+ """For stability the writer should write properties in sorted order."""
+ tree = self.make_branch_and_memory_tree('tree')
+ tree.lock_write()
+ self.addCleanup(tree.unlock)
+
+ tree.add([''], ['TREE_ROOT'])
+ tree.commit('One', rev_id='rev1',
+ revprops={'a':'4', 'b':'3', 'c':'2', 'd':'1'})
+ self.b1 = tree.branch
+ bundle_sio, revision_ids = self.create_bundle_text('null:', 'rev1')
+ bundle = read_bundle(bundle_sio)
+ revision_info = bundle.revisions[0]
+ self.assertEqual('rev1', revision_info.revision_id)
+ rev = revision_info.as_revision()
+ self.assertEqual({'branch-nick':'tree', 'a':'4', 'b':'3', 'c':'2',
+ 'd':'1'}, rev.properties)
+
+ def test_bundle_unicode_properties(self):
+ """We should be able to round trip a non-ascii property."""
+ tree = self.make_branch_and_memory_tree('tree')
+ tree.lock_write()
+ self.addCleanup(tree.unlock)
+
+ tree.add([''], ['TREE_ROOT'])
+ # Revisions themselves do not require anything about revision property
+ # keys, other than that they are a basestring, and do not contain
+ # whitespace.
+ # However, Testaments assert than they are str(), and thus should not
+ # be Unicode.
+ tree.commit('One', rev_id='rev1',
+ revprops={'omega':u'\u03a9', 'alpha':u'\u03b1'})
+ self.b1 = tree.branch
+ bundle_sio, revision_ids = self.create_bundle_text('null:', 'rev1')
+ bundle = read_bundle(bundle_sio)
+ revision_info = bundle.revisions[0]
+ self.assertEqual('rev1', revision_info.revision_id)
+ rev = revision_info.as_revision()
+ self.assertEqual({'branch-nick':'tree', 'omega':u'\u03a9',
+ 'alpha':u'\u03b1'}, rev.properties)
+
+ def test_bundle_with_ghosts(self):
+ tree = self.make_branch_and_tree('tree')
+ self.b1 = tree.branch
+ self.build_tree_contents([('tree/file', 'content1')])
+ tree.add(['file'])
+ tree.commit('rev1')
+ self.build_tree_contents([('tree/file', 'content2')])
+ tree.add_parent_tree_id('ghost')
+ tree.commit('rev2', rev_id='rev2')
+ bundle = self.get_valid_bundle('null:', 'rev2')
+
+ def make_simple_tree(self, format=None):
+ tree = self.make_branch_and_tree('b1', format=format)
+ self.b1 = tree.branch
+ self.build_tree(['b1/file'])
+ tree.add('file')
+ return tree
+
+ def test_across_serializers(self):
+ tree = self.make_simple_tree('knit')
+ tree.commit('hello', rev_id='rev1')
+ tree.commit('hello', rev_id='rev2')
+ bundle = read_bundle(self.create_bundle_text('null:', 'rev2')[0])
+ repo = self.make_repository('repo', format='dirstate-with-subtree')
+ bundle.install_revisions(repo)
+ inv_text = repo._get_inventory_xml('rev2')
+ self.assertNotContainsRe(inv_text, 'format="5"')
+ self.assertContainsRe(inv_text, 'format="7"')
+
+ def make_repo_with_installed_revisions(self):
+ tree = self.make_simple_tree('knit')
+ tree.commit('hello', rev_id='rev1')
+ tree.commit('hello', rev_id='rev2')
+ bundle = read_bundle(self.create_bundle_text('null:', 'rev2')[0])
+ repo = self.make_repository('repo', format='dirstate-with-subtree')
+ bundle.install_revisions(repo)
+ return repo
+
+ def test_across_models(self):
+ repo = self.make_repo_with_installed_revisions()
+ inv = repo.get_inventory('rev2')
+ self.assertEqual('rev2', inv.root.revision)
+ root_id = inv.root.file_id
+ repo.lock_read()
+ self.addCleanup(repo.unlock)
+ self.assertEqual({(root_id, 'rev1'):(),
+ (root_id, 'rev2'):((root_id, 'rev1'),)},
+ repo.texts.get_parent_map([(root_id, 'rev1'), (root_id, 'rev2')]))
+
+ def test_inv_hash_across_serializers(self):
+ repo = self.make_repo_with_installed_revisions()
+ recorded_inv_sha1 = repo.get_revision('rev2').inventory_sha1
+ xml = repo._get_inventory_xml('rev2')
+ self.assertEqual(osutils.sha_string(xml), recorded_inv_sha1)
+
+ def test_across_models_incompatible(self):
+ tree = self.make_simple_tree('dirstate-with-subtree')
+ tree.commit('hello', rev_id='rev1')
+ tree.commit('hello', rev_id='rev2')
+ try:
+ bundle = read_bundle(self.create_bundle_text('null:', 'rev1')[0])
+ except errors.IncompatibleBundleFormat:
+ raise tests.TestSkipped("Format 0.8 doesn't work with knit3")
+ repo = self.make_repository('repo', format='knit')
+ bundle.install_revisions(repo)
+
+ bundle = read_bundle(self.create_bundle_text('null:', 'rev2')[0])
+ self.assertRaises(errors.IncompatibleRevision,
+ bundle.install_revisions, repo)
+
+ def test_get_merge_request(self):
+ tree = self.make_simple_tree()
+ tree.commit('hello', rev_id='rev1')
+ tree.commit('hello', rev_id='rev2')
+ bundle = read_bundle(self.create_bundle_text('null:', 'rev1')[0])
+ result = bundle.get_merge_request(tree.branch.repository)
+ self.assertEqual((None, 'rev1', 'inapplicable'), result)
+
+ def test_with_subtree(self):
+ tree = self.make_branch_and_tree('tree',
+ format='dirstate-with-subtree')
+ self.b1 = tree.branch
+ subtree = self.make_branch_and_tree('tree/subtree',
+ format='dirstate-with-subtree')
+ tree.add('subtree')
+ tree.commit('hello', rev_id='rev1')
+ try:
+ bundle = read_bundle(self.create_bundle_text('null:', 'rev1')[0])
+ except errors.IncompatibleBundleFormat:
+ raise tests.TestSkipped("Format 0.8 doesn't work with knit3")
+ if isinstance(bundle, v09.BundleInfo09):
+ raise tests.TestSkipped("Format 0.9 doesn't work with subtrees")
+ repo = self.make_repository('repo', format='knit')
+ self.assertRaises(errors.IncompatibleRevision,
+ bundle.install_revisions, repo)
+ repo2 = self.make_repository('repo2', format='dirstate-with-subtree')
+ bundle.install_revisions(repo2)
+
+ def test_revision_id_with_slash(self):
+ self.tree1 = self.make_branch_and_tree('tree')
+ self.b1 = self.tree1.branch
+ try:
+ self.tree1.commit('Revision/id/with/slashes', rev_id='rev/id')
+ except ValueError:
+ raise tests.TestSkipped(
+ "Repository doesn't support revision ids with slashes")
+ bundle = self.get_valid_bundle('null:', 'rev/id')
+
+ def test_skip_file(self):
+ """Make sure we don't accidentally write to the wrong versionedfile"""
+ self.tree1 = self.make_branch_and_tree('tree')
+ self.b1 = self.tree1.branch
+ # rev1 is not present in bundle, done by fetch
+ self.build_tree_contents([('tree/file2', 'contents1')])
+ self.tree1.add('file2', 'file2-id')
+ self.tree1.commit('rev1', rev_id='reva')
+ self.build_tree_contents([('tree/file3', 'contents2')])
+ # rev2 is present in bundle, and done by fetch
+ # having file1 in the bunle causes file1's versionedfile to be opened.
+ self.tree1.add('file3', 'file3-id')
+ self.tree1.commit('rev2')
+ # Updating file2 should not cause an attempt to add to file1's vf
+ target = self.tree1.bzrdir.sprout('target').open_workingtree()
+ self.build_tree_contents([('tree/file2', 'contents3')])
+ self.tree1.commit('rev3', rev_id='rev3')
+ bundle = self.get_valid_bundle('reva', 'rev3')
+ if getattr(bundle, 'get_bundle_reader', None) is None:
+ raise tests.TestSkipped('Bundle format cannot provide reader')
+ # be sure that file1 comes before file2
+ for b, m, k, r, f in bundle.get_bundle_reader().iter_records():
+ if f == 'file3-id':
+ break
+ self.assertNotEqual(f, 'file2-id')
+ bundle.install_revisions(target.branch.repository)
+
+
+class V08BundleTester(BundleTester, tests.TestCaseWithTransport):
+
+ format = '0.8'
+
+ def test_bundle_empty_property(self):
+ """Test serializing revision properties with an empty value."""
+ tree = self.make_branch_and_memory_tree('tree')
+ tree.lock_write()
+ self.addCleanup(tree.unlock)
+ tree.add([''], ['TREE_ROOT'])
+ tree.commit('One', revprops={'one':'two', 'empty':''}, rev_id='rev1')
+ self.b1 = tree.branch
+ bundle_sio, revision_ids = self.create_bundle_text('null:', 'rev1')
+ self.assertContainsRe(bundle_sio.getvalue(),
+ '# properties:\n'
+ '# branch-nick: tree\n'
+ '# empty: \n'
+ '# one: two\n'
+ )
+ bundle = read_bundle(bundle_sio)
+ revision_info = bundle.revisions[0]
+ self.assertEqual('rev1', revision_info.revision_id)
+ rev = revision_info.as_revision()
+ self.assertEqual({'branch-nick':'tree', 'empty':'', 'one':'two'},
+ rev.properties)
+
+ def get_bundle_tree(self, bundle, revision_id):
+ repository = self.make_repository('repo')
+ return bundle.revision_tree(repository, 'revid1')
+
+ def test_bundle_empty_property_alt(self):
+ """Test serializing revision properties with an empty value.
+
+ Older readers had a bug when reading an empty property.
+ They assumed that all keys ended in ': \n'. However they would write an
+ empty value as ':\n'. This tests make sure that all newer bzr versions
+ can handle th second form.
+ """
+ tree = self.make_branch_and_memory_tree('tree')
+ tree.lock_write()
+ self.addCleanup(tree.unlock)
+ tree.add([''], ['TREE_ROOT'])
+ tree.commit('One', revprops={'one':'two', 'empty':''}, rev_id='rev1')
+ self.b1 = tree.branch
+ bundle_sio, revision_ids = self.create_bundle_text('null:', 'rev1')
+ txt = bundle_sio.getvalue()
+ loc = txt.find('# empty: ') + len('# empty:')
+ # Create a new bundle, which strips the trailing space after empty
+ bundle_sio = StringIO(txt[:loc] + txt[loc+1:])
+
+ self.assertContainsRe(bundle_sio.getvalue(),
+ '# properties:\n'
+ '# branch-nick: tree\n'
+ '# empty:\n'
+ '# one: two\n'
+ )
+ bundle = read_bundle(bundle_sio)
+ revision_info = bundle.revisions[0]
+ self.assertEqual('rev1', revision_info.revision_id)
+ rev = revision_info.as_revision()
+ self.assertEqual({'branch-nick':'tree', 'empty':'', 'one':'two'},
+ rev.properties)
+
+ def test_bundle_sorted_properties(self):
+ """For stability the writer should write properties in sorted order."""
+ tree = self.make_branch_and_memory_tree('tree')
+ tree.lock_write()
+ self.addCleanup(tree.unlock)
+
+ tree.add([''], ['TREE_ROOT'])
+ tree.commit('One', rev_id='rev1',
+ revprops={'a':'4', 'b':'3', 'c':'2', 'd':'1'})
+ self.b1 = tree.branch
+ bundle_sio, revision_ids = self.create_bundle_text('null:', 'rev1')
+ self.assertContainsRe(bundle_sio.getvalue(),
+ '# properties:\n'
+ '# a: 4\n'
+ '# b: 3\n'
+ '# branch-nick: tree\n'
+ '# c: 2\n'
+ '# d: 1\n'
+ )
+ bundle = read_bundle(bundle_sio)
+ revision_info = bundle.revisions[0]
+ self.assertEqual('rev1', revision_info.revision_id)
+ rev = revision_info.as_revision()
+ self.assertEqual({'branch-nick':'tree', 'a':'4', 'b':'3', 'c':'2',
+ 'd':'1'}, rev.properties)
+
+ def test_bundle_unicode_properties(self):
+ """We should be able to round trip a non-ascii property."""
+ tree = self.make_branch_and_memory_tree('tree')
+ tree.lock_write()
+ self.addCleanup(tree.unlock)
+
+ tree.add([''], ['TREE_ROOT'])
+ # Revisions themselves do not require anything about revision property
+ # keys, other than that they are a basestring, and do not contain
+ # whitespace.
+ # However, Testaments assert than they are str(), and thus should not
+ # be Unicode.
+ tree.commit('One', rev_id='rev1',
+ revprops={'omega':u'\u03a9', 'alpha':u'\u03b1'})
+ self.b1 = tree.branch
+ bundle_sio, revision_ids = self.create_bundle_text('null:', 'rev1')
+ self.assertContainsRe(bundle_sio.getvalue(),
+ '# properties:\n'
+ '# alpha: \xce\xb1\n'
+ '# branch-nick: tree\n'
+ '# omega: \xce\xa9\n'
+ )
+ bundle = read_bundle(bundle_sio)
+ revision_info = bundle.revisions[0]
+ self.assertEqual('rev1', revision_info.revision_id)
+ rev = revision_info.as_revision()
+ self.assertEqual({'branch-nick':'tree', 'omega':u'\u03a9',
+ 'alpha':u'\u03b1'}, rev.properties)
+
+
+class V09BundleKnit2Tester(V08BundleTester):
+
+ format = '0.9'
+
+ def bzrdir_format(self):
+ format = bzrdir.BzrDirMetaFormat1()
+ format.repository_format = knitrepo.RepositoryFormatKnit3()
+ return format
+
+
+class V09BundleKnit1Tester(V08BundleTester):
+
+ format = '0.9'
+
+ def bzrdir_format(self):
+ format = bzrdir.BzrDirMetaFormat1()
+ format.repository_format = knitrepo.RepositoryFormatKnit1()
+ return format
+
+
+class V4BundleTester(BundleTester, tests.TestCaseWithTransport):
+
+ format = '4'
+
+ def get_valid_bundle(self, base_rev_id, rev_id, checkout_dir=None):
+ """Create a bundle from base_rev_id -> rev_id in built-in branch.
+ Make sure that the text generated is valid, and that it
+ can be applied against the base, and generate the same information.
+
+ :return: The in-memory bundle
+ """
+ bundle_txt, rev_ids = self.create_bundle_text(base_rev_id, rev_id)
+
+ # This should also validate the generated bundle
+ bundle = read_bundle(bundle_txt)
+ repository = self.b1.repository
+ for bundle_rev in bundle.real_revisions:
+ # These really should have already been checked when we read the
+ # bundle, since it computes the sha1 hash for the revision, which
+ # only will match if everything is okay, but lets be explicit about
+ # it
+ branch_rev = repository.get_revision(bundle_rev.revision_id)
+ for a in ('inventory_sha1', 'revision_id', 'parent_ids',
+ 'timestamp', 'timezone', 'message', 'committer',
+ 'parent_ids', 'properties'):
+ self.assertEqual(getattr(branch_rev, a),
+ getattr(bundle_rev, a))
+ self.assertEqual(len(branch_rev.parent_ids),
+ len(bundle_rev.parent_ids))
+ self.assertEqual(set(rev_ids),
+ set([r.revision_id for r in bundle.real_revisions]))
+ self.valid_apply_bundle(base_rev_id, bundle,
+ checkout_dir=checkout_dir)
+
+ return bundle
+
+ def get_invalid_bundle(self, base_rev_id, rev_id):
+ """Create a bundle from base_rev_id -> rev_id in built-in branch.
+ Munge the text so that it's invalid.
+
+ :return: The in-memory bundle
+ """
+ from bzrlib.bundle import serializer
+ bundle_txt, rev_ids = self.create_bundle_text(base_rev_id, rev_id)
+ new_text = self.get_raw(StringIO(''.join(bundle_txt)))
+ new_text = new_text.replace('<file file_id="exe-1"',
+ '<file executable="y" file_id="exe-1"')
+ new_text = new_text.replace('B260', 'B275')
+ bundle_txt = StringIO()
+ bundle_txt.write(serializer._get_bundle_header('4'))
+ bundle_txt.write('\n')
+ bundle_txt.write(new_text.encode('bz2'))
+ bundle_txt.seek(0)
+ bundle = read_bundle(bundle_txt)
+ self.valid_apply_bundle(base_rev_id, bundle)
+ return bundle
+
+ def create_bundle_text(self, base_rev_id, rev_id):
+ bundle_txt = StringIO()
+ rev_ids = write_bundle(self.b1.repository, rev_id, base_rev_id,
+ bundle_txt, format=self.format)
+ bundle_txt.seek(0)
+ self.assertEqual(bundle_txt.readline(),
+ '# Bazaar revision bundle v%s\n' % self.format)
+ self.assertEqual(bundle_txt.readline(), '#\n')
+ rev = self.b1.repository.get_revision(rev_id)
+ bundle_txt.seek(0)
+ return bundle_txt, rev_ids
+
+ def get_bundle_tree(self, bundle, revision_id):
+ repository = self.make_repository('repo')
+ bundle.install_revisions(repository)
+ return repository.revision_tree(revision_id)
+
+ def test_creation(self):
+ tree = self.make_branch_and_tree('tree')
+ self.build_tree_contents([('tree/file', 'contents1\nstatic\n')])
+ tree.add('file', 'fileid-2')
+ tree.commit('added file', rev_id='rev1')
+ self.build_tree_contents([('tree/file', 'contents2\nstatic\n')])
+ tree.commit('changed file', rev_id='rev2')
+ s = StringIO()
+ serializer = BundleSerializerV4('1.0')
+ serializer.write(tree.branch.repository, ['rev1', 'rev2'], {}, s)
+ s.seek(0)
+ tree2 = self.make_branch_and_tree('target')
+ target_repo = tree2.branch.repository
+ install_bundle(target_repo, serializer.read(s))
+ target_repo.lock_read()
+ self.addCleanup(target_repo.unlock)
+ # Turn the 'iterators_of_bytes' back into simple strings for comparison
+ repo_texts = dict((i, ''.join(content)) for i, content
+ in target_repo.iter_files_bytes(
+ [('fileid-2', 'rev1', '1'),
+ ('fileid-2', 'rev2', '2')]))
+ self.assertEqual({'1':'contents1\nstatic\n',
+ '2':'contents2\nstatic\n'},
+ repo_texts)
+ rtree = target_repo.revision_tree('rev2')
+ inventory_vf = target_repo.inventories
+ # If the inventory store has a graph, it must match the revision graph.
+ self.assertSubset(
+ [inventory_vf.get_parent_map([('rev2',)])[('rev2',)]],
+ [None, (('rev1',),)])
+ self.assertEqual('changed file',
+ target_repo.get_revision('rev2').message)
+
+ @staticmethod
+ def get_raw(bundle_file):
+ bundle_file.seek(0)
+ line = bundle_file.readline()
+ line = bundle_file.readline()
+ lines = bundle_file.readlines()
+ return ''.join(lines).decode('bz2')
+
+ def test_copy_signatures(self):
+ tree_a = self.make_branch_and_tree('tree_a')
+ import bzrlib.gpg
+ import bzrlib.commit as commit
+ oldstrategy = bzrlib.gpg.GPGStrategy
+ branch = tree_a.branch
+ repo_a = branch.repository
+ tree_a.commit("base", allow_pointless=True, rev_id='A')
+ self.assertFalse(branch.repository.has_signature_for_revision_id('A'))
+ try:
+ from bzrlib.testament import Testament
+ # monkey patch gpg signing mechanism
+ bzrlib.gpg.GPGStrategy = bzrlib.gpg.LoopbackGPGStrategy
+ new_config = test_commit.MustSignConfig()
+ commit.Commit(config_stack=new_config).commit(message="base",
+ allow_pointless=True,
+ rev_id='B',
+ working_tree=tree_a)
+ def sign(text):
+ return bzrlib.gpg.LoopbackGPGStrategy(None).sign(text)
+ self.assertTrue(repo_a.has_signature_for_revision_id('B'))
+ finally:
+ bzrlib.gpg.GPGStrategy = oldstrategy
+ tree_b = self.make_branch_and_tree('tree_b')
+ repo_b = tree_b.branch.repository
+ s = StringIO()
+ serializer = BundleSerializerV4('4')
+ serializer.write(tree_a.branch.repository, ['A', 'B'], {}, s)
+ s.seek(0)
+ install_bundle(repo_b, serializer.read(s))
+ self.assertTrue(repo_b.has_signature_for_revision_id('B'))
+ self.assertEqual(repo_b.get_signature_text('B'),
+ repo_a.get_signature_text('B'))
+ s.seek(0)
+ # ensure repeat installs are harmless
+ install_bundle(repo_b, serializer.read(s))
+
+
+class V4_2aBundleTester(V4BundleTester):
+
+ def bzrdir_format(self):
+ return '2a'
+
+ def get_invalid_bundle(self, base_rev_id, rev_id):
+ """Create a bundle from base_rev_id -> rev_id in built-in branch.
+ Munge the text so that it's invalid.
+
+ :return: The in-memory bundle
+ """
+ from bzrlib.bundle import serializer
+ bundle_txt, rev_ids = self.create_bundle_text(base_rev_id, rev_id)
+ new_text = self.get_raw(StringIO(''.join(bundle_txt)))
+ # We are going to be replacing some text to set the executable bit on a
+ # file. Make sure the text replacement actually works correctly.
+ self.assertContainsRe(new_text, '(?m)B244\n\ni 1\n<inventory')
+ new_text = new_text.replace('<file file_id="exe-1"',
+ '<file executable="y" file_id="exe-1"')
+ new_text = new_text.replace('B244', 'B259')
+ bundle_txt = StringIO()
+ bundle_txt.write(serializer._get_bundle_header('4'))
+ bundle_txt.write('\n')
+ bundle_txt.write(new_text.encode('bz2'))
+ bundle_txt.seek(0)
+ bundle = read_bundle(bundle_txt)
+ self.valid_apply_bundle(base_rev_id, bundle)
+ return bundle
+
+ def make_merged_branch(self):
+ builder = self.make_branch_builder('source')
+ builder.start_series()
+ builder.build_snapshot('a@cset-0-1', None, [
+ ('add', ('', 'root-id', 'directory', None)),
+ ('add', ('file', 'file-id', 'file', 'original content\n')),
+ ])
+ builder.build_snapshot('a@cset-0-2a', ['a@cset-0-1'], [
+ ('modify', ('file-id', 'new-content\n')),
+ ])
+ builder.build_snapshot('a@cset-0-2b', ['a@cset-0-1'], [
+ ('add', ('other-file', 'file2-id', 'file', 'file2-content\n')),
+ ])
+ builder.build_snapshot('a@cset-0-3', ['a@cset-0-2a', 'a@cset-0-2b'], [
+ ('add', ('other-file', 'file2-id', 'file', 'file2-content\n')),
+ ])
+ builder.finish_series()
+ self.b1 = builder.get_branch()
+ self.b1.lock_read()
+ self.addCleanup(self.b1.unlock)
+
+ def make_bundle_just_inventories(self, base_revision_id,
+ target_revision_id,
+ revision_ids):
+ sio = StringIO()
+ writer = v4.BundleWriteOperation(base_revision_id, target_revision_id,
+ self.b1.repository, sio)
+ writer.bundle.begin()
+ writer._add_inventory_mpdiffs_from_serializer(revision_ids)
+ writer.bundle.end()
+ sio.seek(0)
+ return sio
+
+ def test_single_inventory_multiple_parents_as_xml(self):
+ self.make_merged_branch()
+ sio = self.make_bundle_just_inventories('a@cset-0-1', 'a@cset-0-3',
+ ['a@cset-0-3'])
+ reader = v4.BundleReader(sio, stream_input=False)
+ records = list(reader.iter_records())
+ self.assertEqual(1, len(records))
+ (bytes, metadata, repo_kind, revision_id,
+ file_id) = records[0]
+ self.assertIs(None, file_id)
+ self.assertEqual('a@cset-0-3', revision_id)
+ self.assertEqual('inventory', repo_kind)
+ self.assertEqual({'parents': ['a@cset-0-2a', 'a@cset-0-2b'],
+ 'sha1': '09c53b0c4de0895e11a2aacc34fef60a6e70865c',
+ 'storage_kind': 'mpdiff',
+ }, metadata)
+ # We should have an mpdiff that takes some lines from both parents.
+ self.assertEqualDiff(
+ 'i 1\n'
+ '<inventory format="10" revision_id="a@cset-0-3">\n'
+ '\n'
+ 'c 0 1 1 2\n'
+ 'c 1 3 3 2\n', bytes)
+
+ def test_single_inv_no_parents_as_xml(self):
+ self.make_merged_branch()
+ sio = self.make_bundle_just_inventories('null:', 'a@cset-0-1',
+ ['a@cset-0-1'])
+ reader = v4.BundleReader(sio, stream_input=False)
+ records = list(reader.iter_records())
+ self.assertEqual(1, len(records))
+ (bytes, metadata, repo_kind, revision_id,
+ file_id) = records[0]
+ self.assertIs(None, file_id)
+ self.assertEqual('a@cset-0-1', revision_id)
+ self.assertEqual('inventory', repo_kind)
+ self.assertEqual({'parents': [],
+ 'sha1': 'a13f42b142d544aac9b085c42595d304150e31a2',
+ 'storage_kind': 'mpdiff',
+ }, metadata)
+ # We should have an mpdiff that takes some lines from both parents.
+ self.assertEqualDiff(
+ 'i 4\n'
+ '<inventory format="10" revision_id="a@cset-0-1">\n'
+ '<directory file_id="root-id" name=""'
+ ' revision="a@cset-0-1" />\n'
+ '<file file_id="file-id" name="file" parent_id="root-id"'
+ ' revision="a@cset-0-1"'
+ ' text_sha1="09c2f8647e14e49e922b955c194102070597c2d1"'
+ ' text_size="17" />\n'
+ '</inventory>\n'
+ '\n', bytes)
+
+ def test_multiple_inventories_as_xml(self):
+ self.make_merged_branch()
+ sio = self.make_bundle_just_inventories('a@cset-0-1', 'a@cset-0-3',
+ ['a@cset-0-2a', 'a@cset-0-2b', 'a@cset-0-3'])
+ reader = v4.BundleReader(sio, stream_input=False)
+ records = list(reader.iter_records())
+ self.assertEqual(3, len(records))
+ revision_ids = [rev_id for b, m, k, rev_id, f in records]
+ self.assertEqual(['a@cset-0-2a', 'a@cset-0-2b', 'a@cset-0-3'],
+ revision_ids)
+ metadata_2a = records[0][1]
+ self.assertEqual({'parents': ['a@cset-0-1'],
+ 'sha1': '1e105886d62d510763e22885eec733b66f5f09bf',
+ 'storage_kind': 'mpdiff',
+ }, metadata_2a)
+ metadata_2b = records[1][1]
+ self.assertEqual({'parents': ['a@cset-0-1'],
+ 'sha1': 'f03f12574bdb5ed2204c28636c98a8547544ccd8',
+ 'storage_kind': 'mpdiff',
+ }, metadata_2b)
+ metadata_3 = records[2][1]
+ self.assertEqual({'parents': ['a@cset-0-2a', 'a@cset-0-2b'],
+ 'sha1': '09c53b0c4de0895e11a2aacc34fef60a6e70865c',
+ 'storage_kind': 'mpdiff',
+ }, metadata_3)
+ bytes_2a = records[0][0]
+ self.assertEqualDiff(
+ 'i 1\n'
+ '<inventory format="10" revision_id="a@cset-0-2a">\n'
+ '\n'
+ 'c 0 1 1 1\n'
+ 'i 1\n'
+ '<file file_id="file-id" name="file" parent_id="root-id"'
+ ' revision="a@cset-0-2a"'
+ ' text_sha1="50f545ff40e57b6924b1f3174b267ffc4576e9a9"'
+ ' text_size="12" />\n'
+ '\n'
+ 'c 0 3 3 1\n', bytes_2a)
+ bytes_2b = records[1][0]
+ self.assertEqualDiff(
+ 'i 1\n'
+ '<inventory format="10" revision_id="a@cset-0-2b">\n'
+ '\n'
+ 'c 0 1 1 2\n'
+ 'i 1\n'
+ '<file file_id="file2-id" name="other-file" parent_id="root-id"'
+ ' revision="a@cset-0-2b"'
+ ' text_sha1="b46c0c8ea1e5ef8e46fc8894bfd4752a88ec939e"'
+ ' text_size="14" />\n'
+ '\n'
+ 'c 0 3 4 1\n', bytes_2b)
+ bytes_3 = records[2][0]
+ self.assertEqualDiff(
+ 'i 1\n'
+ '<inventory format="10" revision_id="a@cset-0-3">\n'
+ '\n'
+ 'c 0 1 1 2\n'
+ 'c 1 3 3 2\n', bytes_3)
+
+ def test_creating_bundle_preserves_chk_pages(self):
+ self.make_merged_branch()
+ target = self.b1.bzrdir.sprout('target',
+ revision_id='a@cset-0-2a').open_branch()
+ bundle_txt, rev_ids = self.create_bundle_text('a@cset-0-2a',
+ 'a@cset-0-3')
+ self.assertEqual(['a@cset-0-2b', 'a@cset-0-3'], rev_ids)
+ bundle = read_bundle(bundle_txt)
+ target.lock_write()
+ self.addCleanup(target.unlock)
+ install_bundle(target.repository, bundle)
+ inv1 = self.b1.repository.inventories.get_record_stream([
+ ('a@cset-0-3',)], 'unordered',
+ True).next().get_bytes_as('fulltext')
+ inv2 = target.repository.inventories.get_record_stream([
+ ('a@cset-0-3',)], 'unordered',
+ True).next().get_bytes_as('fulltext')
+ self.assertEqualDiff(inv1, inv2)
+
+
+class MungedBundleTester(object):
+
+ def build_test_bundle(self):
+ wt = self.make_branch_and_tree('b1')
+
+ self.build_tree(['b1/one'])
+ wt.add('one')
+ wt.commit('add one', rev_id='a@cset-0-1')
+ self.build_tree(['b1/two'])
+ wt.add('two')
+ wt.commit('add two', rev_id='a@cset-0-2',
+ revprops={'branch-nick':'test'})
+
+ bundle_txt = StringIO()
+ rev_ids = write_bundle(wt.branch.repository, 'a@cset-0-2',
+ 'a@cset-0-1', bundle_txt, self.format)
+ self.assertEqual(set(['a@cset-0-2']), set(rev_ids))
+ bundle_txt.seek(0, 0)
+ return bundle_txt
+
+ def check_valid(self, bundle):
+ """Check that after whatever munging, the final object is valid."""
+ self.assertEqual(['a@cset-0-2'],
+ [r.revision_id for r in bundle.real_revisions])
+
+ def test_extra_whitespace(self):
+ bundle_txt = self.build_test_bundle()
+
+ # Seek to the end of the file
+ # Adding one extra newline used to give us
+ # TypeError: float() argument must be a string or a number
+ bundle_txt.seek(0, 2)
+ bundle_txt.write('\n')
+ bundle_txt.seek(0)
+
+ bundle = read_bundle(bundle_txt)
+ self.check_valid(bundle)
+
+ def test_extra_whitespace_2(self):
+ bundle_txt = self.build_test_bundle()
+
+ # Seek to the end of the file
+ # Adding two extra newlines used to give us
+ # MalformedPatches: The first line of all patches should be ...
+ bundle_txt.seek(0, 2)
+ bundle_txt.write('\n\n')
+ bundle_txt.seek(0)
+
+ bundle = read_bundle(bundle_txt)
+ self.check_valid(bundle)
+
+
+class MungedBundleTesterV09(tests.TestCaseWithTransport, MungedBundleTester):
+
+ format = '0.9'
+
+ def test_missing_trailing_whitespace(self):
+ bundle_txt = self.build_test_bundle()
+
+ # Remove a trailing newline, it shouldn't kill the parser
+ raw = bundle_txt.getvalue()
+ # The contents of the bundle don't have to be this, but this
+ # test is concerned with the exact case where the serializer
+ # creates a blank line at the end, and fails if that
+ # line is stripped
+ self.assertEqual('\n\n', raw[-2:])
+ bundle_txt = StringIO(raw[:-1])
+
+ bundle = read_bundle(bundle_txt)
+ self.check_valid(bundle)
+
+ def test_opening_text(self):
+ bundle_txt = self.build_test_bundle()
+
+ bundle_txt = StringIO("Some random\nemail comments\n"
+ + bundle_txt.getvalue())
+
+ bundle = read_bundle(bundle_txt)
+ self.check_valid(bundle)
+
+ def test_trailing_text(self):
+ bundle_txt = self.build_test_bundle()
+
+ bundle_txt = StringIO(bundle_txt.getvalue() +
+ "Some trailing\nrandom\ntext\n")
+
+ bundle = read_bundle(bundle_txt)
+ self.check_valid(bundle)
+
+
+class MungedBundleTesterV4(tests.TestCaseWithTransport, MungedBundleTester):
+
+ format = '4'
+
+
+class TestBundleWriterReader(tests.TestCase):
+
+ def test_roundtrip_record(self):
+ fileobj = StringIO()
+ writer = v4.BundleWriter(fileobj)
+ writer.begin()
+ writer.add_info_record(foo='bar')
+ writer._add_record("Record body", {'parents': ['1', '3'],
+ 'storage_kind':'fulltext'}, 'file', 'revid', 'fileid')
+ writer.end()
+ fileobj.seek(0)
+ reader = v4.BundleReader(fileobj, stream_input=True)
+ record_iter = reader.iter_records()
+ record = record_iter.next()
+ self.assertEqual((None, {'foo': 'bar', 'storage_kind': 'header'},
+ 'info', None, None), record)
+ record = record_iter.next()
+ self.assertEqual(("Record body", {'storage_kind': 'fulltext',
+ 'parents': ['1', '3']}, 'file', 'revid', 'fileid'),
+ record)
+
+ def test_roundtrip_record_memory_hungry(self):
+ fileobj = StringIO()
+ writer = v4.BundleWriter(fileobj)
+ writer.begin()
+ writer.add_info_record(foo='bar')
+ writer._add_record("Record body", {'parents': ['1', '3'],
+ 'storage_kind':'fulltext'}, 'file', 'revid', 'fileid')
+ writer.end()
+ fileobj.seek(0)
+ reader = v4.BundleReader(fileobj, stream_input=False)
+ record_iter = reader.iter_records()
+ record = record_iter.next()
+ self.assertEqual((None, {'foo': 'bar', 'storage_kind': 'header'},
+ 'info', None, None), record)
+ record = record_iter.next()
+ self.assertEqual(("Record body", {'storage_kind': 'fulltext',
+ 'parents': ['1', '3']}, 'file', 'revid', 'fileid'),
+ record)
+
+ def test_encode_name(self):
+ self.assertEqual('revision/rev1',
+ v4.BundleWriter.encode_name('revision', 'rev1'))
+ self.assertEqual('file/rev//1/file-id-1',
+ v4.BundleWriter.encode_name('file', 'rev/1', 'file-id-1'))
+ self.assertEqual('info',
+ v4.BundleWriter.encode_name('info', None, None))
+
+ def test_decode_name(self):
+ self.assertEqual(('revision', 'rev1', None),
+ v4.BundleReader.decode_name('revision/rev1'))
+ self.assertEqual(('file', 'rev/1', 'file-id-1'),
+ v4.BundleReader.decode_name('file/rev//1/file-id-1'))
+ self.assertEqual(('info', None, None),
+ v4.BundleReader.decode_name('info'))
+
+ def test_too_many_names(self):
+ fileobj = StringIO()
+ writer = v4.BundleWriter(fileobj)
+ writer.begin()
+ writer.add_info_record(foo='bar')
+ writer._container.add_bytes_record('blah', ['two', 'names'])
+ writer.end()
+ fileobj.seek(0)
+ record_iter = v4.BundleReader(fileobj).iter_records()
+ record = record_iter.next()
+ self.assertEqual((None, {'foo': 'bar', 'storage_kind': 'header'},
+ 'info', None, None), record)
+ self.assertRaises(errors.BadBundle, record_iter.next)
+
+
+class TestReadMergeableFromUrl(tests.TestCaseWithTransport):
+
+ def test_read_mergeable_skips_local(self):
+ """A local bundle named like the URL should not be read.
+ """
+ out, wt = test_read_bundle.create_bundle_file(self)
+ class FooService(object):
+ """A directory service that always returns source"""
+
+ def look_up(self, name, url):
+ return 'source'
+ directories.register('foo:', FooService, 'Testing directory service')
+ self.addCleanup(directories.remove, 'foo:')
+ self.build_tree_contents([('./foo:bar', out.getvalue())])
+ self.assertRaises(errors.NotABundle, read_mergeable_from_url,
+ 'foo:bar')
+
+ def test_infinite_redirects_are_not_a_bundle(self):
+ """If a URL causes TooManyRedirections then NotABundle is raised.
+ """
+ from bzrlib.tests.blackbox.test_push import RedirectingMemoryServer
+ server = RedirectingMemoryServer()
+ self.start_server(server)
+ url = server.get_url() + 'infinite-loop'
+ self.assertRaises(errors.NotABundle, read_mergeable_from_url, url)
+
+ def test_smart_server_connection_reset(self):
+ """If a smart server connection fails during the attempt to read a
+ bundle, then the ConnectionReset error should be propagated.
+ """
+ # Instantiate a server that will provoke a ConnectionReset
+ sock_server = DisconnectingServer()
+ self.start_server(sock_server)
+ # We don't really care what the url is since the server will close the
+ # connection without interpreting it
+ url = sock_server.get_url()
+ self.assertRaises(errors.ConnectionReset, read_mergeable_from_url, url)
+
+
+class DisconnectingHandler(SocketServer.BaseRequestHandler):
+ """A request handler that immediately closes any connection made to it."""
+
+ def handle(self):
+ self.request.close()
+
+
+class DisconnectingServer(test_server.TestingTCPServerInAThread):
+
+ def __init__(self):
+ super(DisconnectingServer, self).__init__(
+ ('127.0.0.1', 0),
+ test_server.TestingTCPServer,
+ DisconnectingHandler)
+
+ def get_url(self):
+ """Return the url of the server"""
+ return "bzr://%s:%d/" % self.server.server_address
diff --git a/bzrlib/tests/test_bzrdir.py b/bzrlib/tests/test_bzrdir.py
new file mode 100644
index 0000000..3e170b0
--- /dev/null
+++ b/bzrlib/tests/test_bzrdir.py
@@ -0,0 +1,1572 @@
+# Copyright (C) 2006-2011 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""Tests for the BzrDir facility and any format specific tests.
+
+For interface contract tests, see tests/per_bzr_dir.
+"""
+
+import os
+import subprocess
+import sys
+
+from bzrlib import (
+ branch,
+ bzrdir,
+ config,
+ controldir,
+ errors,
+ help_topics,
+ lock,
+ repository,
+ revision as _mod_revision,
+ osutils,
+ remote,
+ transport as _mod_transport,
+ urlutils,
+ win32utils,
+ workingtree_3,
+ workingtree_4,
+ )
+import bzrlib.branch
+from bzrlib.branchfmt.fullhistory import BzrBranchFormat5
+from bzrlib.errors import (
+ NotBranchError,
+ NoColocatedBranchSupport,
+ UnknownFormatError,
+ UnsupportedFormatError,
+ )
+from bzrlib.tests import (
+ TestCase,
+ TestCaseWithMemoryTransport,
+ TestCaseWithTransport,
+ TestSkipped,
+ )
+from bzrlib.tests import(
+ http_server,
+ http_utils,
+ )
+from bzrlib.tests.test_http import TestWithTransport_pycurl
+from bzrlib.transport import (
+ memory,
+ pathfilter,
+ )
+from bzrlib.transport.http._urllib import HttpTransport_urllib
+from bzrlib.transport.nosmart import NoSmartTransportDecorator
+from bzrlib.transport.readonly import ReadonlyTransportDecorator
+from bzrlib.repofmt import knitrepo, knitpack_repo
+
+
+class TestDefaultFormat(TestCase):
+
+ def test_get_set_default_format(self):
+ old_format = bzrdir.BzrDirFormat.get_default_format()
+ # default is BzrDirMetaFormat1
+ self.assertIsInstance(old_format, bzrdir.BzrDirMetaFormat1)
+ controldir.ControlDirFormat._set_default_format(SampleBzrDirFormat())
+ # creating a bzr dir should now create an instrumented dir.
+ try:
+ result = bzrdir.BzrDir.create('memory:///')
+ self.assertIsInstance(result, SampleBzrDir)
+ finally:
+ controldir.ControlDirFormat._set_default_format(old_format)
+ self.assertEqual(old_format, bzrdir.BzrDirFormat.get_default_format())
+
+
+class DeprecatedBzrDirFormat(bzrdir.BzrDirFormat):
+ """A deprecated bzr dir format."""
+
+
+class TestFormatRegistry(TestCase):
+
+ def make_format_registry(self):
+ my_format_registry = controldir.ControlDirFormatRegistry()
+ my_format_registry.register('deprecated', DeprecatedBzrDirFormat,
+ 'Some format. Slower and unawesome and deprecated.',
+ deprecated=True)
+ my_format_registry.register_lazy('lazy', 'bzrlib.tests.test_bzrdir',
+ 'DeprecatedBzrDirFormat', 'Format registered lazily',
+ deprecated=True)
+ bzrdir.register_metadir(my_format_registry, 'knit',
+ 'bzrlib.repofmt.knitrepo.RepositoryFormatKnit1',
+ 'Format using knits',
+ )
+ my_format_registry.set_default('knit')
+ bzrdir.register_metadir(my_format_registry,
+ 'branch6',
+ 'bzrlib.repofmt.knitrepo.RepositoryFormatKnit3',
+ 'Experimental successor to knit. Use at your own risk.',
+ branch_format='bzrlib.branch.BzrBranchFormat6',
+ experimental=True)
+ bzrdir.register_metadir(my_format_registry,
+ 'hidden format',
+ 'bzrlib.repofmt.knitrepo.RepositoryFormatKnit3',
+ 'Experimental successor to knit. Use at your own risk.',
+ branch_format='bzrlib.branch.BzrBranchFormat6', hidden=True)
+ my_format_registry.register('hiddendeprecated', DeprecatedBzrDirFormat,
+ 'Old format. Slower and does not support things. ', hidden=True)
+ my_format_registry.register_lazy('hiddenlazy', 'bzrlib.tests.test_bzrdir',
+ 'DeprecatedBzrDirFormat', 'Format registered lazily',
+ deprecated=True, hidden=True)
+ return my_format_registry
+
+ def test_format_registry(self):
+ my_format_registry = self.make_format_registry()
+ my_bzrdir = my_format_registry.make_bzrdir('lazy')
+ self.assertIsInstance(my_bzrdir, DeprecatedBzrDirFormat)
+ my_bzrdir = my_format_registry.make_bzrdir('deprecated')
+ self.assertIsInstance(my_bzrdir, DeprecatedBzrDirFormat)
+ my_bzrdir = my_format_registry.make_bzrdir('default')
+ self.assertIsInstance(my_bzrdir.repository_format,
+ knitrepo.RepositoryFormatKnit1)
+ my_bzrdir = my_format_registry.make_bzrdir('knit')
+ self.assertIsInstance(my_bzrdir.repository_format,
+ knitrepo.RepositoryFormatKnit1)
+ my_bzrdir = my_format_registry.make_bzrdir('branch6')
+ self.assertIsInstance(my_bzrdir.get_branch_format(),
+ bzrlib.branch.BzrBranchFormat6)
+
+ def test_get_help(self):
+ my_format_registry = self.make_format_registry()
+ self.assertEqual('Format registered lazily',
+ my_format_registry.get_help('lazy'))
+ self.assertEqual('Format using knits',
+ my_format_registry.get_help('knit'))
+ self.assertEqual('Format using knits',
+ my_format_registry.get_help('default'))
+ self.assertEqual('Some format. Slower and unawesome and deprecated.',
+ my_format_registry.get_help('deprecated'))
+
+ def test_help_topic(self):
+ topics = help_topics.HelpTopicRegistry()
+ registry = self.make_format_registry()
+ topics.register('current-formats', registry.help_topic,
+ 'Current formats')
+ topics.register('other-formats', registry.help_topic,
+ 'Other formats')
+ new = topics.get_detail('current-formats')
+ rest = topics.get_detail('other-formats')
+ experimental, deprecated = rest.split('Deprecated formats')
+ self.assertContainsRe(new, 'formats-help')
+ self.assertContainsRe(new,
+ ':knit:\n \(native\) \(default\) Format using knits\n')
+ self.assertContainsRe(experimental,
+ ':branch6:\n \(native\) Experimental successor to knit')
+ self.assertContainsRe(deprecated,
+ ':lazy:\n \(native\) Format registered lazily\n')
+ self.assertNotContainsRe(new, 'hidden')
+
+ def test_set_default_repository(self):
+ default_factory = controldir.format_registry.get('default')
+ old_default = [k for k, v in controldir.format_registry.iteritems()
+ if v == default_factory and k != 'default'][0]
+ controldir.format_registry.set_default_repository('dirstate-with-subtree')
+ try:
+ self.assertIs(controldir.format_registry.get('dirstate-with-subtree'),
+ controldir.format_registry.get('default'))
+ self.assertIs(
+ repository.format_registry.get_default().__class__,
+ knitrepo.RepositoryFormatKnit3)
+ finally:
+ controldir.format_registry.set_default_repository(old_default)
+
+ def test_aliases(self):
+ a_registry = controldir.ControlDirFormatRegistry()
+ a_registry.register('deprecated', DeprecatedBzrDirFormat,
+ 'Old format. Slower and does not support stuff',
+ deprecated=True)
+ a_registry.register('deprecatedalias', DeprecatedBzrDirFormat,
+ 'Old format. Slower and does not support stuff',
+ deprecated=True, alias=True)
+ self.assertEqual(frozenset(['deprecatedalias']), a_registry.aliases())
+
+
+class SampleBranch(bzrlib.branch.Branch):
+ """A dummy branch for guess what, dummy use."""
+
+ def __init__(self, dir):
+ self.bzrdir = dir
+
+
+class SampleRepository(bzrlib.repository.Repository):
+ """A dummy repo."""
+
+ def __init__(self, dir):
+ self.bzrdir = dir
+
+
+class SampleBzrDir(bzrdir.BzrDir):
+ """A sample BzrDir implementation to allow testing static methods."""
+
+ def create_repository(self, shared=False):
+ """See ControlDir.create_repository."""
+ return "A repository"
+
+ def open_repository(self):
+ """See ControlDir.open_repository."""
+ return SampleRepository(self)
+
+ def create_branch(self, name=None):
+ """See ControlDir.create_branch."""
+ if name is not None:
+ raise NoColocatedBranchSupport(self)
+ return SampleBranch(self)
+
+ def create_workingtree(self):
+ """See ControlDir.create_workingtree."""
+ return "A tree"
+
+
+class SampleBzrDirFormat(bzrdir.BzrDirFormat):
+ """A sample format
+
+ this format is initializable, unsupported to aid in testing the
+ open and open_downlevel routines.
+ """
+
+ def get_format_string(self):
+ """See BzrDirFormat.get_format_string()."""
+ return "Sample .bzr dir format."
+
+ def initialize_on_transport(self, t):
+ """Create a bzr dir."""
+ t.mkdir('.bzr')
+ t.put_bytes('.bzr/branch-format', self.get_format_string())
+ return SampleBzrDir(t, self)
+
+ def is_supported(self):
+ return False
+
+ def open(self, transport, _found=None):
+ return "opened branch."
+
+ @classmethod
+ def from_string(cls, format_string):
+ return cls()
+
+
+class BzrDirFormatTest1(bzrdir.BzrDirMetaFormat1):
+
+ @staticmethod
+ def get_format_string():
+ return "Test format 1"
+
+
+class BzrDirFormatTest2(bzrdir.BzrDirMetaFormat1):
+
+ @staticmethod
+ def get_format_string():
+ return "Test format 2"
+
+
+class TestBzrDirFormat(TestCaseWithTransport):
+ """Tests for the BzrDirFormat facility."""
+
+ def test_find_format(self):
+ # is the right format object found for a branch?
+ # create a branch with a few known format objects.
+ bzrdir.BzrProber.formats.register(BzrDirFormatTest1.get_format_string(),
+ BzrDirFormatTest1())
+ self.addCleanup(bzrdir.BzrProber.formats.remove,
+ BzrDirFormatTest1.get_format_string())
+ bzrdir.BzrProber.formats.register(BzrDirFormatTest2.get_format_string(),
+ BzrDirFormatTest2())
+ self.addCleanup(bzrdir.BzrProber.formats.remove,
+ BzrDirFormatTest2.get_format_string())
+ t = self.get_transport()
+ self.build_tree(["foo/", "bar/"], transport=t)
+ def check_format(format, url):
+ format.initialize(url)
+ t = _mod_transport.get_transport_from_path(url)
+ found_format = bzrdir.BzrDirFormat.find_format(t)
+ self.assertIsInstance(found_format, format.__class__)
+ check_format(BzrDirFormatTest1(), "foo")
+ check_format(BzrDirFormatTest2(), "bar")
+
+ def test_find_format_nothing_there(self):
+ self.assertRaises(NotBranchError,
+ bzrdir.BzrDirFormat.find_format,
+ _mod_transport.get_transport_from_path('.'))
+
+ def test_find_format_unknown_format(self):
+ t = self.get_transport()
+ t.mkdir('.bzr')
+ t.put_bytes('.bzr/branch-format', '')
+ self.assertRaises(UnknownFormatError,
+ bzrdir.BzrDirFormat.find_format,
+ _mod_transport.get_transport_from_path('.'))
+
+ def test_register_unregister_format(self):
+ format = SampleBzrDirFormat()
+ url = self.get_url()
+ # make a bzrdir
+ format.initialize(url)
+ # register a format for it.
+ bzrdir.BzrProber.formats.register(format.get_format_string(), format)
+ # which bzrdir.Open will refuse (not supported)
+ self.assertRaises(UnsupportedFormatError, bzrdir.BzrDir.open, url)
+ # which bzrdir.open_containing will refuse (not supported)
+ self.assertRaises(UnsupportedFormatError, bzrdir.BzrDir.open_containing, url)
+ # but open_downlevel will work
+ t = _mod_transport.get_transport_from_url(url)
+ self.assertEqual(format.open(t), bzrdir.BzrDir.open_unsupported(url))
+ # unregister the format
+ bzrdir.BzrProber.formats.remove(format.get_format_string())
+ # now open_downlevel should fail too.
+ self.assertRaises(UnknownFormatError, bzrdir.BzrDir.open_unsupported, url)
+
+ def test_create_branch_and_repo_uses_default(self):
+ format = SampleBzrDirFormat()
+ branch = bzrdir.BzrDir.create_branch_and_repo(self.get_url(),
+ format=format)
+ self.assertTrue(isinstance(branch, SampleBranch))
+
+ def test_create_branch_and_repo_under_shared(self):
+ # creating a branch and repo in a shared repo uses the
+ # shared repository
+ format = controldir.format_registry.make_bzrdir('knit')
+ self.make_repository('.', shared=True, format=format)
+ branch = bzrdir.BzrDir.create_branch_and_repo(
+ self.get_url('child'), format=format)
+ self.assertRaises(errors.NoRepositoryPresent,
+ branch.bzrdir.open_repository)
+
+ def test_create_branch_and_repo_under_shared_force_new(self):
+ # creating a branch and repo in a shared repo can be forced to
+ # make a new repo
+ format = controldir.format_registry.make_bzrdir('knit')
+ self.make_repository('.', shared=True, format=format)
+ branch = bzrdir.BzrDir.create_branch_and_repo(self.get_url('child'),
+ force_new_repo=True,
+ format=format)
+ branch.bzrdir.open_repository()
+
+ def test_create_standalone_working_tree(self):
+ format = SampleBzrDirFormat()
+ # note this is deliberately readonly, as this failure should
+ # occur before any writes.
+ self.assertRaises(errors.NotLocalUrl,
+ bzrdir.BzrDir.create_standalone_workingtree,
+ self.get_readonly_url(), format=format)
+ tree = bzrdir.BzrDir.create_standalone_workingtree('.',
+ format=format)
+ self.assertEqual('A tree', tree)
+
+ def test_create_standalone_working_tree_under_shared_repo(self):
+ # create standalone working tree always makes a repo.
+ format = controldir.format_registry.make_bzrdir('knit')
+ self.make_repository('.', shared=True, format=format)
+ # note this is deliberately readonly, as this failure should
+ # occur before any writes.
+ self.assertRaises(errors.NotLocalUrl,
+ bzrdir.BzrDir.create_standalone_workingtree,
+ self.get_readonly_url('child'), format=format)
+ tree = bzrdir.BzrDir.create_standalone_workingtree('child',
+ format=format)
+ tree.bzrdir.open_repository()
+
+ def test_create_branch_convenience(self):
+ # outside a repo the default convenience output is a repo+branch_tree
+ format = controldir.format_registry.make_bzrdir('knit')
+ branch = bzrdir.BzrDir.create_branch_convenience('.', format=format)
+ branch.bzrdir.open_workingtree()
+ branch.bzrdir.open_repository()
+
+ def test_create_branch_convenience_possible_transports(self):
+ """Check that the optional 'possible_transports' is recognized"""
+ format = controldir.format_registry.make_bzrdir('knit')
+ t = self.get_transport()
+ branch = bzrdir.BzrDir.create_branch_convenience(
+ '.', format=format, possible_transports=[t])
+ branch.bzrdir.open_workingtree()
+ branch.bzrdir.open_repository()
+
+ def test_create_branch_convenience_root(self):
+ """Creating a branch at the root of a fs should work."""
+ self.vfs_transport_factory = memory.MemoryServer
+ # outside a repo the default convenience output is a repo+branch_tree
+ format = controldir.format_registry.make_bzrdir('knit')
+ branch = bzrdir.BzrDir.create_branch_convenience(self.get_url(),
+ format=format)
+ self.assertRaises(errors.NoWorkingTree,
+ branch.bzrdir.open_workingtree)
+ branch.bzrdir.open_repository()
+
+ def test_create_branch_convenience_under_shared_repo(self):
+ # inside a repo the default convenience output is a branch+ follow the
+ # repo tree policy
+ format = controldir.format_registry.make_bzrdir('knit')
+ self.make_repository('.', shared=True, format=format)
+ branch = bzrdir.BzrDir.create_branch_convenience('child',
+ format=format)
+ branch.bzrdir.open_workingtree()
+ self.assertRaises(errors.NoRepositoryPresent,
+ branch.bzrdir.open_repository)
+
+ def test_create_branch_convenience_under_shared_repo_force_no_tree(self):
+ # inside a repo the default convenience output is a branch+ follow the
+ # repo tree policy but we can override that
+ format = controldir.format_registry.make_bzrdir('knit')
+ self.make_repository('.', shared=True, format=format)
+ branch = bzrdir.BzrDir.create_branch_convenience('child',
+ force_new_tree=False, format=format)
+ self.assertRaises(errors.NoWorkingTree,
+ branch.bzrdir.open_workingtree)
+ self.assertRaises(errors.NoRepositoryPresent,
+ branch.bzrdir.open_repository)
+
+ def test_create_branch_convenience_under_shared_repo_no_tree_policy(self):
+ # inside a repo the default convenience output is a branch+ follow the
+ # repo tree policy
+ format = controldir.format_registry.make_bzrdir('knit')
+ repo = self.make_repository('.', shared=True, format=format)
+ repo.set_make_working_trees(False)
+ branch = bzrdir.BzrDir.create_branch_convenience('child',
+ format=format)
+ self.assertRaises(errors.NoWorkingTree,
+ branch.bzrdir.open_workingtree)
+ self.assertRaises(errors.NoRepositoryPresent,
+ branch.bzrdir.open_repository)
+
+ def test_create_branch_convenience_under_shared_repo_no_tree_policy_force_tree(self):
+ # inside a repo the default convenience output is a branch+ follow the
+ # repo tree policy but we can override that
+ format = controldir.format_registry.make_bzrdir('knit')
+ repo = self.make_repository('.', shared=True, format=format)
+ repo.set_make_working_trees(False)
+ branch = bzrdir.BzrDir.create_branch_convenience('child',
+ force_new_tree=True, format=format)
+ branch.bzrdir.open_workingtree()
+ self.assertRaises(errors.NoRepositoryPresent,
+ branch.bzrdir.open_repository)
+
+ def test_create_branch_convenience_under_shared_repo_force_new_repo(self):
+ # inside a repo the default convenience output is overridable to give
+ # repo+branch+tree
+ format = controldir.format_registry.make_bzrdir('knit')
+ self.make_repository('.', shared=True, format=format)
+ branch = bzrdir.BzrDir.create_branch_convenience('child',
+ force_new_repo=True, format=format)
+ branch.bzrdir.open_repository()
+ branch.bzrdir.open_workingtree()
+
+
+class TestRepositoryAcquisitionPolicy(TestCaseWithTransport):
+
+ def test_acquire_repository_standalone(self):
+ """The default acquisition policy should create a standalone branch."""
+ my_bzrdir = self.make_bzrdir('.')
+ repo_policy = my_bzrdir.determine_repository_policy()
+ repo, is_new = repo_policy.acquire_repository()
+ self.assertEqual(repo.bzrdir.root_transport.base,
+ my_bzrdir.root_transport.base)
+ self.assertFalse(repo.is_shared())
+
+ def test_determine_stacking_policy(self):
+ parent_bzrdir = self.make_bzrdir('.')
+ child_bzrdir = self.make_bzrdir('child')
+ parent_bzrdir.get_config().set_default_stack_on('http://example.org')
+ repo_policy = child_bzrdir.determine_repository_policy()
+ self.assertEqual('http://example.org', repo_policy._stack_on)
+
+ def test_determine_stacking_policy_relative(self):
+ parent_bzrdir = self.make_bzrdir('.')
+ child_bzrdir = self.make_bzrdir('child')
+ parent_bzrdir.get_config().set_default_stack_on('child2')
+ repo_policy = child_bzrdir.determine_repository_policy()
+ self.assertEqual('child2', repo_policy._stack_on)
+ self.assertEqual(parent_bzrdir.root_transport.base,
+ repo_policy._stack_on_pwd)
+
+ def prepare_default_stacking(self, child_format='1.6'):
+ parent_bzrdir = self.make_bzrdir('.')
+ child_branch = self.make_branch('child', format=child_format)
+ parent_bzrdir.get_config().set_default_stack_on(child_branch.base)
+ new_child_transport = parent_bzrdir.transport.clone('child2')
+ return child_branch, new_child_transport
+
+ def test_clone_on_transport_obeys_stacking_policy(self):
+ child_branch, new_child_transport = self.prepare_default_stacking()
+ new_child = child_branch.bzrdir.clone_on_transport(new_child_transport)
+ self.assertEqual(child_branch.base,
+ new_child.open_branch().get_stacked_on_url())
+
+ def test_default_stacking_with_stackable_branch_unstackable_repo(self):
+ # Make stackable source branch with an unstackable repo format.
+ source_bzrdir = self.make_bzrdir('source')
+ knitpack_repo.RepositoryFormatKnitPack1().initialize(source_bzrdir)
+ source_branch = bzrlib.branch.BzrBranchFormat7().initialize(
+ source_bzrdir)
+ # Make a directory with a default stacking policy
+ parent_bzrdir = self.make_bzrdir('parent')
+ stacked_on = self.make_branch('parent/stacked-on', format='pack-0.92')
+ parent_bzrdir.get_config().set_default_stack_on(stacked_on.base)
+ # Clone source into directory
+ target = source_bzrdir.clone(self.get_url('parent/target'))
+
+ def test_format_initialize_on_transport_ex_stacked_on(self):
+ # trunk is a stackable format. Note that its in the same server area
+ # which is what launchpad does, but not sufficient to exercise the
+ # general case.
+ trunk = self.make_branch('trunk', format='1.9')
+ t = self.get_transport('stacked')
+ old_fmt = controldir.format_registry.make_bzrdir('pack-0.92')
+ repo_name = old_fmt.repository_format.network_name()
+ # Should end up with a 1.9 format (stackable)
+ repo, control, require_stacking, repo_policy = \
+ old_fmt.initialize_on_transport_ex(t,
+ repo_format_name=repo_name, stacked_on='../trunk',
+ stack_on_pwd=t.base)
+ if repo is not None:
+ # Repositories are open write-locked
+ self.assertTrue(repo.is_write_locked())
+ self.addCleanup(repo.unlock)
+ else:
+ repo = control.open_repository()
+ self.assertIsInstance(control, bzrdir.BzrDir)
+ opened = bzrdir.BzrDir.open(t.base)
+ if not isinstance(old_fmt, remote.RemoteBzrDirFormat):
+ self.assertEqual(control._format.network_name(),
+ old_fmt.network_name())
+ self.assertEqual(control._format.network_name(),
+ opened._format.network_name())
+ self.assertEqual(control.__class__, opened.__class__)
+ self.assertLength(1, repo._fallback_repositories)
+
+ def test_sprout_obeys_stacking_policy(self):
+ child_branch, new_child_transport = self.prepare_default_stacking()
+ new_child = child_branch.bzrdir.sprout(new_child_transport.base)
+ self.assertEqual(child_branch.base,
+ new_child.open_branch().get_stacked_on_url())
+
+ def test_clone_ignores_policy_for_unsupported_formats(self):
+ child_branch, new_child_transport = self.prepare_default_stacking(
+ child_format='pack-0.92')
+ new_child = child_branch.bzrdir.clone_on_transport(new_child_transport)
+ self.assertRaises(errors.UnstackableBranchFormat,
+ new_child.open_branch().get_stacked_on_url)
+
+ def test_sprout_ignores_policy_for_unsupported_formats(self):
+ child_branch, new_child_transport = self.prepare_default_stacking(
+ child_format='pack-0.92')
+ new_child = child_branch.bzrdir.sprout(new_child_transport.base)
+ self.assertRaises(errors.UnstackableBranchFormat,
+ new_child.open_branch().get_stacked_on_url)
+
+ def test_sprout_upgrades_format_if_stacked_specified(self):
+ child_branch, new_child_transport = self.prepare_default_stacking(
+ child_format='pack-0.92')
+ new_child = child_branch.bzrdir.sprout(new_child_transport.base,
+ stacked=True)
+ self.assertEqual(child_branch.bzrdir.root_transport.base,
+ new_child.open_branch().get_stacked_on_url())
+ repo = new_child.open_repository()
+ self.assertTrue(repo._format.supports_external_lookups)
+ self.assertFalse(repo.supports_rich_root())
+
+ def test_clone_on_transport_upgrades_format_if_stacked_on_specified(self):
+ child_branch, new_child_transport = self.prepare_default_stacking(
+ child_format='pack-0.92')
+ new_child = child_branch.bzrdir.clone_on_transport(new_child_transport,
+ stacked_on=child_branch.bzrdir.root_transport.base)
+ self.assertEqual(child_branch.bzrdir.root_transport.base,
+ new_child.open_branch().get_stacked_on_url())
+ repo = new_child.open_repository()
+ self.assertTrue(repo._format.supports_external_lookups)
+ self.assertFalse(repo.supports_rich_root())
+
+ def test_sprout_upgrades_to_rich_root_format_if_needed(self):
+ child_branch, new_child_transport = self.prepare_default_stacking(
+ child_format='rich-root-pack')
+ new_child = child_branch.bzrdir.sprout(new_child_transport.base,
+ stacked=True)
+ repo = new_child.open_repository()
+ self.assertTrue(repo._format.supports_external_lookups)
+ self.assertTrue(repo.supports_rich_root())
+
+ def test_add_fallback_repo_handles_absolute_urls(self):
+ stack_on = self.make_branch('stack_on', format='1.6')
+ repo = self.make_repository('repo', format='1.6')
+ policy = bzrdir.UseExistingRepository(repo, stack_on.base)
+ policy._add_fallback(repo)
+
+ def test_add_fallback_repo_handles_relative_urls(self):
+ stack_on = self.make_branch('stack_on', format='1.6')
+ repo = self.make_repository('repo', format='1.6')
+ policy = bzrdir.UseExistingRepository(repo, '.', stack_on.base)
+ policy._add_fallback(repo)
+
+ def test_configure_relative_branch_stacking_url(self):
+ stack_on = self.make_branch('stack_on', format='1.6')
+ stacked = self.make_branch('stack_on/stacked', format='1.6')
+ policy = bzrdir.UseExistingRepository(stacked.repository,
+ '.', stack_on.base)
+ policy.configure_branch(stacked)
+ self.assertEqual('..', stacked.get_stacked_on_url())
+
+ def test_relative_branch_stacking_to_absolute(self):
+ stack_on = self.make_branch('stack_on', format='1.6')
+ stacked = self.make_branch('stack_on/stacked', format='1.6')
+ policy = bzrdir.UseExistingRepository(stacked.repository,
+ '.', self.get_readonly_url('stack_on'))
+ policy.configure_branch(stacked)
+ self.assertEqual(self.get_readonly_url('stack_on'),
+ stacked.get_stacked_on_url())
+
+
+class ChrootedTests(TestCaseWithTransport):
+ """A support class that provides readonly urls outside the local namespace.
+
+ This is done by checking if self.transport_server is a MemoryServer. if it
+ is then we are chrooted already, if it is not then an HttpServer is used
+ for readonly urls.
+ """
+
+ def setUp(self):
+ super(ChrootedTests, self).setUp()
+ if not self.vfs_transport_factory == memory.MemoryServer:
+ self.transport_readonly_server = http_server.HttpServer
+
+ def local_branch_path(self, branch):
+ return os.path.realpath(urlutils.local_path_from_url(branch.base))
+
+ def test_open_containing(self):
+ self.assertRaises(NotBranchError, bzrdir.BzrDir.open_containing,
+ self.get_readonly_url(''))
+ self.assertRaises(NotBranchError, bzrdir.BzrDir.open_containing,
+ self.get_readonly_url('g/p/q'))
+ control = bzrdir.BzrDir.create(self.get_url())
+ branch, relpath = bzrdir.BzrDir.open_containing(self.get_readonly_url(''))
+ self.assertEqual('', relpath)
+ branch, relpath = bzrdir.BzrDir.open_containing(self.get_readonly_url('g/p/q'))
+ self.assertEqual('g/p/q', relpath)
+
+ def test_open_containing_tree_branch_or_repository_empty(self):
+ self.assertRaises(errors.NotBranchError,
+ bzrdir.BzrDir.open_containing_tree_branch_or_repository,
+ self.get_readonly_url(''))
+
+ def test_open_containing_tree_branch_or_repository_all(self):
+ self.make_branch_and_tree('topdir')
+ tree, branch, repo, relpath = \
+ bzrdir.BzrDir.open_containing_tree_branch_or_repository(
+ 'topdir/foo')
+ self.assertEqual(os.path.realpath('topdir'),
+ os.path.realpath(tree.basedir))
+ self.assertEqual(os.path.realpath('topdir'),
+ self.local_branch_path(branch))
+ self.assertEqual(
+ osutils.realpath(os.path.join('topdir', '.bzr', 'repository')),
+ repo.bzrdir.transport.local_abspath('repository'))
+ self.assertEqual(relpath, 'foo')
+
+ def test_open_containing_tree_branch_or_repository_no_tree(self):
+ self.make_branch('branch')
+ tree, branch, repo, relpath = \
+ bzrdir.BzrDir.open_containing_tree_branch_or_repository(
+ 'branch/foo')
+ self.assertEqual(tree, None)
+ self.assertEqual(os.path.realpath('branch'),
+ self.local_branch_path(branch))
+ self.assertEqual(
+ osutils.realpath(os.path.join('branch', '.bzr', 'repository')),
+ repo.bzrdir.transport.local_abspath('repository'))
+ self.assertEqual(relpath, 'foo')
+
+ def test_open_containing_tree_branch_or_repository_repo(self):
+ self.make_repository('repo')
+ tree, branch, repo, relpath = \
+ bzrdir.BzrDir.open_containing_tree_branch_or_repository(
+ 'repo')
+ self.assertEqual(tree, None)
+ self.assertEqual(branch, None)
+ self.assertEqual(
+ osutils.realpath(os.path.join('repo', '.bzr', 'repository')),
+ repo.bzrdir.transport.local_abspath('repository'))
+ self.assertEqual(relpath, '')
+
+ def test_open_containing_tree_branch_or_repository_shared_repo(self):
+ self.make_repository('shared', shared=True)
+ bzrdir.BzrDir.create_branch_convenience('shared/branch',
+ force_new_tree=False)
+ tree, branch, repo, relpath = \
+ bzrdir.BzrDir.open_containing_tree_branch_or_repository(
+ 'shared/branch')
+ self.assertEqual(tree, None)
+ self.assertEqual(os.path.realpath('shared/branch'),
+ self.local_branch_path(branch))
+ self.assertEqual(
+ osutils.realpath(os.path.join('shared', '.bzr', 'repository')),
+ repo.bzrdir.transport.local_abspath('repository'))
+ self.assertEqual(relpath, '')
+
+ def test_open_containing_tree_branch_or_repository_branch_subdir(self):
+ self.make_branch_and_tree('foo')
+ self.build_tree(['foo/bar/'])
+ tree, branch, repo, relpath = \
+ bzrdir.BzrDir.open_containing_tree_branch_or_repository(
+ 'foo/bar')
+ self.assertEqual(os.path.realpath('foo'),
+ os.path.realpath(tree.basedir))
+ self.assertEqual(os.path.realpath('foo'),
+ self.local_branch_path(branch))
+ self.assertEqual(
+ osutils.realpath(os.path.join('foo', '.bzr', 'repository')),
+ repo.bzrdir.transport.local_abspath('repository'))
+ self.assertEqual(relpath, 'bar')
+
+ def test_open_containing_tree_branch_or_repository_repo_subdir(self):
+ self.make_repository('bar')
+ self.build_tree(['bar/baz/'])
+ tree, branch, repo, relpath = \
+ bzrdir.BzrDir.open_containing_tree_branch_or_repository(
+ 'bar/baz')
+ self.assertEqual(tree, None)
+ self.assertEqual(branch, None)
+ self.assertEqual(
+ osutils.realpath(os.path.join('bar', '.bzr', 'repository')),
+ repo.bzrdir.transport.local_abspath('repository'))
+ self.assertEqual(relpath, 'baz')
+
+ def test_open_containing_from_transport(self):
+ self.assertRaises(NotBranchError,
+ bzrdir.BzrDir.open_containing_from_transport,
+ _mod_transport.get_transport_from_url(self.get_readonly_url('')))
+ self.assertRaises(NotBranchError,
+ bzrdir.BzrDir.open_containing_from_transport,
+ _mod_transport.get_transport_from_url(
+ self.get_readonly_url('g/p/q')))
+ control = bzrdir.BzrDir.create(self.get_url())
+ branch, relpath = bzrdir.BzrDir.open_containing_from_transport(
+ _mod_transport.get_transport_from_url(
+ self.get_readonly_url('')))
+ self.assertEqual('', relpath)
+ branch, relpath = bzrdir.BzrDir.open_containing_from_transport(
+ _mod_transport.get_transport_from_url(
+ self.get_readonly_url('g/p/q')))
+ self.assertEqual('g/p/q', relpath)
+
+ def test_open_containing_tree_or_branch(self):
+ self.make_branch_and_tree('topdir')
+ tree, branch, relpath = bzrdir.BzrDir.open_containing_tree_or_branch(
+ 'topdir/foo')
+ self.assertEqual(os.path.realpath('topdir'),
+ os.path.realpath(tree.basedir))
+ self.assertEqual(os.path.realpath('topdir'),
+ self.local_branch_path(branch))
+ self.assertIs(tree.bzrdir, branch.bzrdir)
+ self.assertEqual('foo', relpath)
+ # opening from non-local should not return the tree
+ tree, branch, relpath = bzrdir.BzrDir.open_containing_tree_or_branch(
+ self.get_readonly_url('topdir/foo'))
+ self.assertEqual(None, tree)
+ self.assertEqual('foo', relpath)
+ # without a tree:
+ self.make_branch('topdir/foo')
+ tree, branch, relpath = bzrdir.BzrDir.open_containing_tree_or_branch(
+ 'topdir/foo')
+ self.assertIs(tree, None)
+ self.assertEqual(os.path.realpath('topdir/foo'),
+ self.local_branch_path(branch))
+ self.assertEqual('', relpath)
+
+ def test_open_tree_or_branch(self):
+ self.make_branch_and_tree('topdir')
+ tree, branch = bzrdir.BzrDir.open_tree_or_branch('topdir')
+ self.assertEqual(os.path.realpath('topdir'),
+ os.path.realpath(tree.basedir))
+ self.assertEqual(os.path.realpath('topdir'),
+ self.local_branch_path(branch))
+ self.assertIs(tree.bzrdir, branch.bzrdir)
+ # opening from non-local should not return the tree
+ tree, branch = bzrdir.BzrDir.open_tree_or_branch(
+ self.get_readonly_url('topdir'))
+ self.assertEqual(None, tree)
+ # without a tree:
+ self.make_branch('topdir/foo')
+ tree, branch = bzrdir.BzrDir.open_tree_or_branch('topdir/foo')
+ self.assertIs(tree, None)
+ self.assertEqual(os.path.realpath('topdir/foo'),
+ self.local_branch_path(branch))
+
+ def test_open_from_transport(self):
+ # transport pointing at bzrdir should give a bzrdir with root transport
+ # set to the given transport
+ control = bzrdir.BzrDir.create(self.get_url())
+ t = self.get_transport()
+ opened_bzrdir = bzrdir.BzrDir.open_from_transport(t)
+ self.assertEqual(t.base, opened_bzrdir.root_transport.base)
+ self.assertIsInstance(opened_bzrdir, bzrdir.BzrDir)
+
+ def test_open_from_transport_no_bzrdir(self):
+ t = self.get_transport()
+ self.assertRaises(NotBranchError, bzrdir.BzrDir.open_from_transport, t)
+
+ def test_open_from_transport_bzrdir_in_parent(self):
+ control = bzrdir.BzrDir.create(self.get_url())
+ t = self.get_transport()
+ t.mkdir('subdir')
+ t = t.clone('subdir')
+ self.assertRaises(NotBranchError, bzrdir.BzrDir.open_from_transport, t)
+
+ def test_sprout_recursive(self):
+ tree = self.make_branch_and_tree('tree1',
+ format='development-subtree')
+ sub_tree = self.make_branch_and_tree('tree1/subtree',
+ format='development-subtree')
+ sub_tree.set_root_id('subtree-root')
+ tree.add_reference(sub_tree)
+ self.build_tree(['tree1/subtree/file'])
+ sub_tree.add('file')
+ tree.commit('Initial commit')
+ tree2 = tree.bzrdir.sprout('tree2').open_workingtree()
+ tree2.lock_read()
+ self.addCleanup(tree2.unlock)
+ self.assertPathExists('tree2/subtree/file')
+ self.assertEqual('tree-reference', tree2.kind('subtree-root'))
+
+ def test_cloning_metadir(self):
+ """Ensure that cloning metadir is suitable"""
+ bzrdir = self.make_bzrdir('bzrdir')
+ bzrdir.cloning_metadir()
+ branch = self.make_branch('branch', format='knit')
+ format = branch.bzrdir.cloning_metadir()
+ self.assertIsInstance(format.workingtree_format,
+ workingtree_4.WorkingTreeFormat6)
+
+ def test_sprout_recursive_treeless(self):
+ tree = self.make_branch_and_tree('tree1',
+ format='development-subtree')
+ sub_tree = self.make_branch_and_tree('tree1/subtree',
+ format='development-subtree')
+ tree.add_reference(sub_tree)
+ self.build_tree(['tree1/subtree/file'])
+ sub_tree.add('file')
+ tree.commit('Initial commit')
+ # The following line force the orhaning to reveal bug #634470
+ tree.branch.get_config_stack().set(
+ 'bzr.transform.orphan_policy', 'move')
+ tree.bzrdir.destroy_workingtree()
+ # FIXME: subtree/.bzr is left here which allows the test to pass (or
+ # fail :-( ) -- vila 20100909
+ repo = self.make_repository('repo', shared=True,
+ format='development-subtree')
+ repo.set_make_working_trees(False)
+ # FIXME: we just deleted the workingtree and now we want to use it ????
+ # At a minimum, we should use tree.branch below (but this fails too
+ # currently) or stop calling this test 'treeless'. Specifically, I've
+ # turn the line below into an assertRaises when 'subtree/.bzr' is
+ # orphaned and sprout tries to access the branch there (which is left
+ # by bzrdir.BzrDirMeta1.destroy_workingtree when it ignores the
+ # [DeletingParent('Not deleting', u'subtree', None)] conflict). See bug
+ # #634470. -- vila 20100909
+ self.assertRaises(errors.NotBranchError,
+ tree.bzrdir.sprout, 'repo/tree2')
+# self.assertPathExists('repo/tree2/subtree')
+# self.assertPathDoesNotExist('repo/tree2/subtree/file')
+
+ def make_foo_bar_baz(self):
+ foo = bzrdir.BzrDir.create_branch_convenience('foo').bzrdir
+ bar = self.make_branch('foo/bar').bzrdir
+ baz = self.make_branch('baz').bzrdir
+ return foo, bar, baz
+
+ def test_find_bzrdirs(self):
+ foo, bar, baz = self.make_foo_bar_baz()
+ t = self.get_transport()
+ self.assertEqualBzrdirs([baz, foo, bar], bzrdir.BzrDir.find_bzrdirs(t))
+
+ def make_fake_permission_denied_transport(self, transport, paths):
+ """Create a transport that raises PermissionDenied for some paths."""
+ def filter(path):
+ if path in paths:
+ raise errors.PermissionDenied(path)
+ return path
+ path_filter_server = pathfilter.PathFilteringServer(transport, filter)
+ path_filter_server.start_server()
+ self.addCleanup(path_filter_server.stop_server)
+ path_filter_transport = pathfilter.PathFilteringTransport(
+ path_filter_server, '.')
+ return (path_filter_server, path_filter_transport)
+
+ def assertBranchUrlsEndWith(self, expect_url_suffix, actual_bzrdirs):
+ """Check that each branch url ends with the given suffix."""
+ for actual_bzrdir in actual_bzrdirs:
+ self.assertEndsWith(actual_bzrdir.user_url, expect_url_suffix)
+
+ def test_find_bzrdirs_permission_denied(self):
+ foo, bar, baz = self.make_foo_bar_baz()
+ t = self.get_transport()
+ path_filter_server, path_filter_transport = \
+ self.make_fake_permission_denied_transport(t, ['foo'])
+ # local transport
+ self.assertBranchUrlsEndWith('/baz/',
+ bzrdir.BzrDir.find_bzrdirs(path_filter_transport))
+ # smart server
+ smart_transport = self.make_smart_server('.',
+ backing_server=path_filter_server)
+ self.assertBranchUrlsEndWith('/baz/',
+ bzrdir.BzrDir.find_bzrdirs(smart_transport))
+
+ def test_find_bzrdirs_list_current(self):
+ def list_current(transport):
+ return [s for s in transport.list_dir('') if s != 'baz']
+
+ foo, bar, baz = self.make_foo_bar_baz()
+ t = self.get_transport()
+ self.assertEqualBzrdirs(
+ [foo, bar],
+ bzrdir.BzrDir.find_bzrdirs(t, list_current=list_current))
+
+ def test_find_bzrdirs_evaluate(self):
+ def evaluate(bzrdir):
+ try:
+ repo = bzrdir.open_repository()
+ except errors.NoRepositoryPresent:
+ return True, bzrdir.root_transport.base
+ else:
+ return False, bzrdir.root_transport.base
+
+ foo, bar, baz = self.make_foo_bar_baz()
+ t = self.get_transport()
+ self.assertEqual([baz.root_transport.base, foo.root_transport.base],
+ list(bzrdir.BzrDir.find_bzrdirs(t, evaluate=evaluate)))
+
+ def assertEqualBzrdirs(self, first, second):
+ first = list(first)
+ second = list(second)
+ self.assertEqual(len(first), len(second))
+ for x, y in zip(first, second):
+ self.assertEqual(x.root_transport.base, y.root_transport.base)
+
+ def test_find_branches(self):
+ root = self.make_repository('', shared=True)
+ foo, bar, baz = self.make_foo_bar_baz()
+ qux = self.make_bzrdir('foo/qux')
+ t = self.get_transport()
+ branches = bzrdir.BzrDir.find_branches(t)
+ self.assertEqual(baz.root_transport.base, branches[0].base)
+ self.assertEqual(foo.root_transport.base, branches[1].base)
+ self.assertEqual(bar.root_transport.base, branches[2].base)
+
+ # ensure this works without a top-level repo
+ branches = bzrdir.BzrDir.find_branches(t.clone('foo'))
+ self.assertEqual(foo.root_transport.base, branches[0].base)
+ self.assertEqual(bar.root_transport.base, branches[1].base)
+
+
+class TestMissingRepoBranchesSkipped(TestCaseWithMemoryTransport):
+
+ def test_find_bzrdirs_missing_repo(self):
+ t = self.get_transport()
+ arepo = self.make_repository('arepo', shared=True)
+ abranch_url = arepo.user_url + '/abranch'
+ abranch = bzrdir.BzrDir.create(abranch_url).create_branch()
+ t.delete_tree('arepo/.bzr')
+ self.assertRaises(errors.NoRepositoryPresent,
+ branch.Branch.open, abranch_url)
+ self.make_branch('baz')
+ for actual_bzrdir in bzrdir.BzrDir.find_branches(t):
+ self.assertEndsWith(actual_bzrdir.user_url, '/baz/')
+
+
+class TestMeta1DirFormat(TestCaseWithTransport):
+ """Tests specific to the meta1 dir format."""
+
+ def test_right_base_dirs(self):
+ dir = bzrdir.BzrDirMetaFormat1().initialize(self.get_url())
+ t = dir.transport
+ branch_base = t.clone('branch').base
+ self.assertEqual(branch_base, dir.get_branch_transport(None).base)
+ self.assertEqual(branch_base,
+ dir.get_branch_transport(BzrBranchFormat5()).base)
+ repository_base = t.clone('repository').base
+ self.assertEqual(repository_base, dir.get_repository_transport(None).base)
+ repository_format = repository.format_registry.get_default()
+ self.assertEqual(repository_base,
+ dir.get_repository_transport(repository_format).base)
+ checkout_base = t.clone('checkout').base
+ self.assertEqual(checkout_base, dir.get_workingtree_transport(None).base)
+ self.assertEqual(checkout_base,
+ dir.get_workingtree_transport(workingtree_3.WorkingTreeFormat3()).base)
+
+ def test_meta1dir_uses_lockdir(self):
+ """Meta1 format uses a LockDir to guard the whole directory, not a file."""
+ dir = bzrdir.BzrDirMetaFormat1().initialize(self.get_url())
+ t = dir.transport
+ self.assertIsDirectory('branch-lock', t)
+
+ def test_comparison(self):
+ """Equality and inequality behave properly.
+
+ Metadirs should compare equal iff they have the same repo, branch and
+ tree formats.
+ """
+ mydir = controldir.format_registry.make_bzrdir('knit')
+ self.assertEqual(mydir, mydir)
+ self.assertFalse(mydir != mydir)
+ otherdir = controldir.format_registry.make_bzrdir('knit')
+ self.assertEqual(otherdir, mydir)
+ self.assertFalse(otherdir != mydir)
+ otherdir2 = controldir.format_registry.make_bzrdir('development-subtree')
+ self.assertNotEqual(otherdir2, mydir)
+ self.assertFalse(otherdir2 == mydir)
+
+ def test_with_features(self):
+ tree = self.make_branch_and_tree('tree', format='2a')
+ tree.bzrdir.update_feature_flags({"bar": "required"})
+ self.assertRaises(errors.MissingFeature, bzrdir.BzrDir.open, 'tree')
+ bzrdir.BzrDirMetaFormat1.register_feature('bar')
+ self.addCleanup(bzrdir.BzrDirMetaFormat1.unregister_feature, 'bar')
+ dir = bzrdir.BzrDir.open('tree')
+ self.assertEquals("required", dir._format.features.get("bar"))
+ tree.bzrdir.update_feature_flags({"bar": None, "nonexistant": None})
+ dir = bzrdir.BzrDir.open('tree')
+ self.assertEquals({}, dir._format.features)
+
+ def test_needs_conversion_different_working_tree(self):
+ # meta1dirs need an conversion if any element is not the default.
+ new_format = controldir.format_registry.make_bzrdir('dirstate')
+ tree = self.make_branch_and_tree('tree', format='knit')
+ self.assertTrue(tree.bzrdir.needs_format_conversion(
+ new_format))
+
+ def test_initialize_on_format_uses_smart_transport(self):
+ self.setup_smart_server_with_call_log()
+ new_format = controldir.format_registry.make_bzrdir('dirstate')
+ transport = self.get_transport('target')
+ transport.ensure_base()
+ self.reset_smart_call_log()
+ instance = new_format.initialize_on_transport(transport)
+ self.assertIsInstance(instance, remote.RemoteBzrDir)
+ rpc_count = len(self.hpss_calls)
+ # This figure represent the amount of work to perform this use case. It
+ # is entirely ok to reduce this number if a test fails due to rpc_count
+ # being too low. If rpc_count increases, more network roundtrips have
+ # become necessary for this use case. Please do not adjust this number
+ # upwards without agreement from bzr's network support maintainers.
+ self.assertEqual(2, rpc_count)
+
+
+class NonLocalTests(TestCaseWithTransport):
+ """Tests for bzrdir static behaviour on non local paths."""
+
+ def setUp(self):
+ super(NonLocalTests, self).setUp()
+ self.vfs_transport_factory = memory.MemoryServer
+
+ def test_create_branch_convenience(self):
+ # outside a repo the default convenience output is a repo+branch_tree
+ format = controldir.format_registry.make_bzrdir('knit')
+ branch = bzrdir.BzrDir.create_branch_convenience(
+ self.get_url('foo'), format=format)
+ self.assertRaises(errors.NoWorkingTree,
+ branch.bzrdir.open_workingtree)
+ branch.bzrdir.open_repository()
+
+ def test_create_branch_convenience_force_tree_not_local_fails(self):
+ # outside a repo the default convenience output is a repo+branch_tree
+ format = controldir.format_registry.make_bzrdir('knit')
+ self.assertRaises(errors.NotLocalUrl,
+ bzrdir.BzrDir.create_branch_convenience,
+ self.get_url('foo'),
+ force_new_tree=True,
+ format=format)
+ t = self.get_transport()
+ self.assertFalse(t.has('foo'))
+
+ def test_clone(self):
+ # clone into a nonlocal path works
+ format = controldir.format_registry.make_bzrdir('knit')
+ branch = bzrdir.BzrDir.create_branch_convenience('local',
+ format=format)
+ branch.bzrdir.open_workingtree()
+ result = branch.bzrdir.clone(self.get_url('remote'))
+ self.assertRaises(errors.NoWorkingTree,
+ result.open_workingtree)
+ result.open_branch()
+ result.open_repository()
+
+ def test_checkout_metadir(self):
+ # checkout_metadir has reasonable working tree format even when no
+ # working tree is present
+ self.make_branch('branch-knit2', format='dirstate-with-subtree')
+ my_bzrdir = bzrdir.BzrDir.open(self.get_url('branch-knit2'))
+ checkout_format = my_bzrdir.checkout_metadir()
+ self.assertIsInstance(checkout_format.workingtree_format,
+ workingtree_4.WorkingTreeFormat4)
+
+
+class TestHTTPRedirections(object):
+ """Test redirection between two http servers.
+
+ This MUST be used by daughter classes that also inherit from
+ TestCaseWithTwoWebservers.
+
+ We can't inherit directly from TestCaseWithTwoWebservers or the
+ test framework will try to create an instance which cannot
+ run, its implementation being incomplete.
+ """
+
+ def create_transport_readonly_server(self):
+ # We don't set the http protocol version, relying on the default
+ return http_utils.HTTPServerRedirecting()
+
+ def create_transport_secondary_server(self):
+ # We don't set the http protocol version, relying on the default
+ return http_utils.HTTPServerRedirecting()
+
+ def setUp(self):
+ super(TestHTTPRedirections, self).setUp()
+ # The redirections will point to the new server
+ self.new_server = self.get_readonly_server()
+ # The requests to the old server will be redirected
+ self.old_server = self.get_secondary_server()
+ # Configure the redirections
+ self.old_server.redirect_to(self.new_server.host, self.new_server.port)
+
+ def test_loop(self):
+ # Both servers redirect to each other creating a loop
+ self.new_server.redirect_to(self.old_server.host, self.old_server.port)
+ # Starting from either server should loop
+ old_url = self._qualified_url(self.old_server.host,
+ self.old_server.port)
+ oldt = self._transport(old_url)
+ self.assertRaises(errors.NotBranchError,
+ bzrdir.BzrDir.open_from_transport, oldt)
+ new_url = self._qualified_url(self.new_server.host,
+ self.new_server.port)
+ newt = self._transport(new_url)
+ self.assertRaises(errors.NotBranchError,
+ bzrdir.BzrDir.open_from_transport, newt)
+
+ def test_qualifier_preserved(self):
+ wt = self.make_branch_and_tree('branch')
+ old_url = self._qualified_url(self.old_server.host,
+ self.old_server.port)
+ start = self._transport(old_url).clone('branch')
+ bdir = bzrdir.BzrDir.open_from_transport(start)
+ # Redirection should preserve the qualifier, hence the transport class
+ # itself.
+ self.assertIsInstance(bdir.root_transport, type(start))
+
+
+class TestHTTPRedirections_urllib(TestHTTPRedirections,
+ http_utils.TestCaseWithTwoWebservers):
+ """Tests redirections for urllib implementation"""
+
+ _transport = HttpTransport_urllib
+
+ def _qualified_url(self, host, port):
+ result = 'http+urllib://%s:%s' % (host, port)
+ self.permit_url(result)
+ return result
+
+
+
+class TestHTTPRedirections_pycurl(TestWithTransport_pycurl,
+ TestHTTPRedirections,
+ http_utils.TestCaseWithTwoWebservers):
+ """Tests redirections for pycurl implementation"""
+
+ def _qualified_url(self, host, port):
+ result = 'http+pycurl://%s:%s' % (host, port)
+ self.permit_url(result)
+ return result
+
+
+class TestHTTPRedirections_nosmart(TestHTTPRedirections,
+ http_utils.TestCaseWithTwoWebservers):
+ """Tests redirections for the nosmart decorator"""
+
+ _transport = NoSmartTransportDecorator
+
+ def _qualified_url(self, host, port):
+ result = 'nosmart+http://%s:%s' % (host, port)
+ self.permit_url(result)
+ return result
+
+
+class TestHTTPRedirections_readonly(TestHTTPRedirections,
+ http_utils.TestCaseWithTwoWebservers):
+ """Tests redirections for readonly decoratror"""
+
+ _transport = ReadonlyTransportDecorator
+
+ def _qualified_url(self, host, port):
+ result = 'readonly+http://%s:%s' % (host, port)
+ self.permit_url(result)
+ return result
+
+
+class TestDotBzrHidden(TestCaseWithTransport):
+
+ ls = ['ls']
+ if sys.platform == 'win32':
+ ls = [os.environ['COMSPEC'], '/C', 'dir', '/B']
+
+ def get_ls(self):
+ f = subprocess.Popen(self.ls, stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE)
+ out, err = f.communicate()
+ self.assertEqual(0, f.returncode, 'Calling %s failed: %s'
+ % (self.ls, err))
+ return out.splitlines()
+
+ def test_dot_bzr_hidden(self):
+ if sys.platform == 'win32' and not win32utils.has_win32file:
+ raise TestSkipped('unable to make file hidden without pywin32 library')
+ b = bzrdir.BzrDir.create('.')
+ self.build_tree(['a'])
+ self.assertEquals(['a'], self.get_ls())
+
+ def test_dot_bzr_hidden_with_url(self):
+ if sys.platform == 'win32' and not win32utils.has_win32file:
+ raise TestSkipped('unable to make file hidden without pywin32 library')
+ b = bzrdir.BzrDir.create(urlutils.local_path_to_url('.'))
+ self.build_tree(['a'])
+ self.assertEquals(['a'], self.get_ls())
+
+
+class _TestBzrDirFormat(bzrdir.BzrDirMetaFormat1):
+ """Test BzrDirFormat implementation for TestBzrDirSprout."""
+
+ def _open(self, transport):
+ return _TestBzrDir(transport, self)
+
+
+class _TestBzrDir(bzrdir.BzrDirMeta1):
+ """Test BzrDir implementation for TestBzrDirSprout.
+
+ When created a _TestBzrDir already has repository and a branch. The branch
+ is a test double as well.
+ """
+
+ def __init__(self, *args, **kwargs):
+ super(_TestBzrDir, self).__init__(*args, **kwargs)
+ self.test_branch = _TestBranch(self.transport)
+ self.test_branch.repository = self.create_repository()
+
+ def open_branch(self, unsupported=False, possible_transports=None):
+ return self.test_branch
+
+ def cloning_metadir(self, require_stacking=False):
+ return _TestBzrDirFormat()
+
+
+class _TestBranchFormat(bzrlib.branch.BranchFormat):
+ """Test Branch format for TestBzrDirSprout."""
+
+
+class _TestBranch(bzrlib.branch.Branch):
+ """Test Branch implementation for TestBzrDirSprout."""
+
+ def __init__(self, transport, *args, **kwargs):
+ self._format = _TestBranchFormat()
+ self._transport = transport
+ self.base = transport.base
+ super(_TestBranch, self).__init__(*args, **kwargs)
+ self.calls = []
+ self._parent = None
+
+ def sprout(self, *args, **kwargs):
+ self.calls.append('sprout')
+ return _TestBranch(self._transport)
+
+ def copy_content_into(self, destination, revision_id=None):
+ self.calls.append('copy_content_into')
+
+ def last_revision(self):
+ return _mod_revision.NULL_REVISION
+
+ def get_parent(self):
+ return self._parent
+
+ def _get_config(self):
+ return config.TransportConfig(self._transport, 'branch.conf')
+
+ def _get_config_store(self):
+ return config.BranchStore(self)
+
+ def set_parent(self, parent):
+ self._parent = parent
+
+ def lock_read(self):
+ return lock.LogicalLockResult(self.unlock)
+
+ def unlock(self):
+ return
+
+
+class TestBzrDirSprout(TestCaseWithMemoryTransport):
+
+ def test_sprout_uses_branch_sprout(self):
+ """BzrDir.sprout calls Branch.sprout.
+
+ Usually, BzrDir.sprout should delegate to the branch's sprout method
+ for part of the work. This allows the source branch to control the
+ choice of format for the new branch.
+
+ There are exceptions, but this tests avoids them:
+ - if there's no branch in the source bzrdir,
+ - or if the stacking has been requested and the format needs to be
+ overridden to satisfy that.
+ """
+ # Make an instrumented bzrdir.
+ t = self.get_transport('source')
+ t.ensure_base()
+ source_bzrdir = _TestBzrDirFormat().initialize_on_transport(t)
+ # The instrumented bzrdir has a test_branch attribute that logs calls
+ # made to the branch contained in that bzrdir. Initially the test
+ # branch exists but no calls have been made to it.
+ self.assertEqual([], source_bzrdir.test_branch.calls)
+
+ # Sprout the bzrdir
+ target_url = self.get_url('target')
+ result = source_bzrdir.sprout(target_url, recurse='no')
+
+ # The bzrdir called the branch's sprout method.
+ self.assertSubset(['sprout'], source_bzrdir.test_branch.calls)
+
+ def test_sprout_parent(self):
+ grandparent_tree = self.make_branch('grandparent')
+ parent = grandparent_tree.bzrdir.sprout('parent').open_branch()
+ branch_tree = parent.bzrdir.sprout('branch').open_branch()
+ self.assertContainsRe(branch_tree.get_parent(), '/parent/$')
+
+
+class TestBzrDirHooks(TestCaseWithMemoryTransport):
+
+ def test_pre_open_called(self):
+ calls = []
+ bzrdir.BzrDir.hooks.install_named_hook('pre_open', calls.append, None)
+ transport = self.get_transport('foo')
+ url = transport.base
+ self.assertRaises(errors.NotBranchError, bzrdir.BzrDir.open, url)
+ self.assertEqual([transport.base], [t.base for t in calls])
+
+ def test_pre_open_actual_exceptions_raised(self):
+ count = [0]
+ def fail_once(transport):
+ count[0] += 1
+ if count[0] == 1:
+ raise errors.BzrError("fail")
+ bzrdir.BzrDir.hooks.install_named_hook('pre_open', fail_once, None)
+ transport = self.get_transport('foo')
+ url = transport.base
+ err = self.assertRaises(errors.BzrError, bzrdir.BzrDir.open, url)
+ self.assertEqual('fail', err._preformatted_string)
+
+ def test_post_repo_init(self):
+ from bzrlib.controldir import RepoInitHookParams
+ calls = []
+ bzrdir.BzrDir.hooks.install_named_hook('post_repo_init',
+ calls.append, None)
+ self.make_repository('foo')
+ self.assertLength(1, calls)
+ params = calls[0]
+ self.assertIsInstance(params, RepoInitHookParams)
+ self.assertTrue(hasattr(params, 'bzrdir'))
+ self.assertTrue(hasattr(params, 'repository'))
+
+ def test_post_repo_init_hook_repr(self):
+ param_reprs = []
+ bzrdir.BzrDir.hooks.install_named_hook('post_repo_init',
+ lambda params: param_reprs.append(repr(params)), None)
+ self.make_repository('foo')
+ self.assertLength(1, param_reprs)
+ param_repr = param_reprs[0]
+ self.assertStartsWith(param_repr, '<RepoInitHookParams for ')
+
+
+class TestGenerateBackupName(TestCaseWithMemoryTransport):
+ # FIXME: This may need to be unified with test_osutils.TestBackupNames or
+ # moved to per_bzrdir or per_transport for better coverage ?
+ # -- vila 20100909
+
+ def setUp(self):
+ super(TestGenerateBackupName, self).setUp()
+ self._transport = self.get_transport()
+ bzrdir.BzrDir.create(self.get_url(),
+ possible_transports=[self._transport])
+ self._bzrdir = bzrdir.BzrDir.open_from_transport(self._transport)
+
+ def test_new(self):
+ self.assertEqual("a.~1~", self._bzrdir._available_backup_name("a"))
+
+ def test_exiting(self):
+ self._transport.put_bytes("a.~1~", "some content")
+ self.assertEqual("a.~2~", self._bzrdir._available_backup_name("a"))
+
+
+class TestMeta1DirColoFormat(TestCaseWithTransport):
+ """Tests specific to the meta1 dir with colocated branches format."""
+
+ def test_supports_colo(self):
+ format = bzrdir.BzrDirMetaFormat1Colo()
+ self.assertTrue(format.colocated_branches)
+
+ def test_upgrade_from_2a(self):
+ tree = self.make_branch_and_tree('.', format='2a')
+ format = bzrdir.BzrDirMetaFormat1Colo()
+ self.assertTrue(tree.bzrdir.needs_format_conversion(format))
+ converter = tree.bzrdir._format.get_converter(format)
+ result = converter.convert(tree.bzrdir, None)
+ self.assertIsInstance(result._format, bzrdir.BzrDirMetaFormat1Colo)
+ self.assertFalse(result.needs_format_conversion(format))
+
+ def test_downgrade_to_2a(self):
+ tree = self.make_branch_and_tree('.', format='development-colo')
+ format = bzrdir.BzrDirMetaFormat1()
+ self.assertTrue(tree.bzrdir.needs_format_conversion(format))
+ converter = tree.bzrdir._format.get_converter(format)
+ result = converter.convert(tree.bzrdir, None)
+ self.assertIsInstance(result._format, bzrdir.BzrDirMetaFormat1)
+ self.assertFalse(result.needs_format_conversion(format))
+
+ def test_downgrade_to_2a_too_many_branches(self):
+ tree = self.make_branch_and_tree('.', format='development-colo')
+ tree.bzrdir.create_branch(name="another-colocated-branch")
+ converter = tree.bzrdir._format.get_converter(
+ bzrdir.BzrDirMetaFormat1())
+ result = converter.convert(tree.bzrdir, bzrdir.BzrDirMetaFormat1())
+ self.assertIsInstance(result._format, bzrdir.BzrDirMetaFormat1)
+
+ def test_nested(self):
+ tree = self.make_branch_and_tree('.', format='development-colo')
+ tree.bzrdir.create_branch(name='foo')
+ tree.bzrdir.create_branch(name='fool/bla')
+ self.assertRaises(
+ errors.ParentBranchExists, tree.bzrdir.create_branch,
+ name='foo/bar')
+
+ def test_parent(self):
+ tree = self.make_branch_and_tree('.', format='development-colo')
+ tree.bzrdir.create_branch(name='fool/bla')
+ tree.bzrdir.create_branch(name='foo/bar')
+ self.assertRaises(
+ errors.AlreadyBranchError, tree.bzrdir.create_branch,
+ name='foo')
+
+
+class SampleBzrFormat(bzrdir.BzrFormat):
+
+ @classmethod
+ def get_format_string(cls):
+ return "First line\n"
+
+
+class TestBzrFormat(TestCase):
+ """Tests for BzrFormat."""
+
+ def test_as_string(self):
+ format = SampleBzrFormat()
+ format.features = {"foo": "required"}
+ self.assertEquals(format.as_string(),
+ "First line\n"
+ "required foo\n")
+ format.features["another"] = "optional"
+ self.assertEquals(format.as_string(),
+ "First line\n"
+ "required foo\n"
+ "optional another\n")
+
+ def test_network_name(self):
+ # The network string should include the feature info
+ format = SampleBzrFormat()
+ format.features = {"foo": "required"}
+ self.assertEquals(
+ "First line\nrequired foo\n",
+ format.network_name())
+
+ def test_from_string_no_features(self):
+ # No features
+ format = SampleBzrFormat.from_string(
+ "First line\n")
+ self.assertEquals({}, format.features)
+
+ def test_from_string_with_feature(self):
+ # Proper feature
+ format = SampleBzrFormat.from_string(
+ "First line\nrequired foo\n")
+ self.assertEquals("required", format.features.get("foo"))
+
+ def test_from_string_format_string_mismatch(self):
+ # The first line has to match the format string
+ self.assertRaises(AssertionError, SampleBzrFormat.from_string,
+ "Second line\nrequired foo\n")
+
+ def test_from_string_missing_space(self):
+ # At least one space is required in the feature lines
+ self.assertRaises(errors.ParseFormatError, SampleBzrFormat.from_string,
+ "First line\nfoo\n")
+
+ def test_from_string_with_spaces(self):
+ # Feature with spaces (in case we add stuff like this in the future)
+ format = SampleBzrFormat.from_string(
+ "First line\nrequired foo with spaces\n")
+ self.assertEquals("required", format.features.get("foo with spaces"))
+
+ def test_eq(self):
+ format1 = SampleBzrFormat()
+ format1.features = {"nested-trees": "optional"}
+ format2 = SampleBzrFormat()
+ format2.features = {"nested-trees": "optional"}
+ self.assertEquals(format1, format1)
+ self.assertEquals(format1, format2)
+ format3 = SampleBzrFormat()
+ self.assertNotEquals(format1, format3)
+
+ def test_check_support_status_optional(self):
+ # Optional, so silently ignore
+ format = SampleBzrFormat()
+ format.features = {"nested-trees": "optional"}
+ format.check_support_status(True)
+ self.addCleanup(SampleBzrFormat.unregister_feature, "nested-trees")
+ SampleBzrFormat.register_feature("nested-trees")
+ format.check_support_status(True)
+
+ def test_check_support_status_required(self):
+ # Optional, so trigger an exception
+ format = SampleBzrFormat()
+ format.features = {"nested-trees": "required"}
+ self.assertRaises(errors.MissingFeature, format.check_support_status,
+ True)
+ self.addCleanup(SampleBzrFormat.unregister_feature, "nested-trees")
+ SampleBzrFormat.register_feature("nested-trees")
+ format.check_support_status(True)
+
+ def test_check_support_status_unknown(self):
+ # treat unknown necessity as required
+ format = SampleBzrFormat()
+ format.features = {"nested-trees": "unknown"}
+ self.assertRaises(errors.MissingFeature, format.check_support_status,
+ True)
+ self.addCleanup(SampleBzrFormat.unregister_feature, "nested-trees")
+ SampleBzrFormat.register_feature("nested-trees")
+ format.check_support_status(True)
+
+ def test_feature_already_registered(self):
+ # a feature can only be registered once
+ self.addCleanup(SampleBzrFormat.unregister_feature, "nested-trees")
+ SampleBzrFormat.register_feature("nested-trees")
+ self.assertRaises(errors.FeatureAlreadyRegistered,
+ SampleBzrFormat.register_feature, "nested-trees")
+
+ def test_feature_with_space(self):
+ # spaces are not allowed in feature names
+ self.assertRaises(ValueError, SampleBzrFormat.register_feature,
+ "nested trees")
diff --git a/bzrlib/tests/test_cache_utf8.py b/bzrlib/tests/test_cache_utf8.py
new file mode 100644
index 0000000..7c74660
--- /dev/null
+++ b/bzrlib/tests/test_cache_utf8.py
@@ -0,0 +1,117 @@
+# Copyright (C) 2006 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""Tests for utf8 caching."""
+
+from bzrlib import (
+ cache_utf8,
+ )
+from bzrlib.tests import TestCase
+
+
+class TestEncodeCache(TestCase):
+
+ def setUp(self):
+ super(TestEncodeCache, self).setUp()
+ cache_utf8.clear_encoding_cache()
+ self.addCleanup(cache_utf8.clear_encoding_cache)
+
+ def check_encode(self, rev_id):
+ rev_id_utf8 = rev_id.encode('utf-8')
+ self.assertFalse(rev_id in cache_utf8._unicode_to_utf8_map)
+ self.assertFalse(rev_id_utf8 in cache_utf8._utf8_to_unicode_map)
+
+ # After a single encode, the mapping should exist for
+ # both directions
+ self.assertEqual(rev_id_utf8, cache_utf8.encode(rev_id))
+ self.assertTrue(rev_id in cache_utf8._unicode_to_utf8_map)
+ self.assertTrue(rev_id_utf8 in cache_utf8._utf8_to_unicode_map)
+
+ self.assertEqual(rev_id, cache_utf8.decode(rev_id_utf8))
+
+ cache_utf8.clear_encoding_cache()
+ self.assertFalse(rev_id in cache_utf8._unicode_to_utf8_map)
+ self.assertFalse(rev_id_utf8 in cache_utf8._utf8_to_unicode_map)
+
+ def check_decode(self, rev_id):
+ rev_id_utf8 = rev_id.encode('utf-8')
+ self.assertFalse(rev_id in cache_utf8._unicode_to_utf8_map)
+ self.assertFalse(rev_id_utf8 in cache_utf8._utf8_to_unicode_map)
+
+ # After a single decode, the mapping should exist for
+ # both directions
+ self.assertEqual(rev_id, cache_utf8.decode(rev_id_utf8))
+ self.assertTrue(rev_id in cache_utf8._unicode_to_utf8_map)
+ self.assertTrue(rev_id_utf8 in cache_utf8._utf8_to_unicode_map)
+
+ self.assertEqual(rev_id_utf8, cache_utf8.encode(rev_id))
+ cache_utf8.clear_encoding_cache()
+
+ self.assertFalse(rev_id in cache_utf8._unicode_to_utf8_map)
+ self.assertFalse(rev_id_utf8 in cache_utf8._utf8_to_unicode_map)
+
+ def test_ascii(self):
+ self.check_decode(u'all_ascii_characters123123123')
+ self.check_encode(u'all_ascii_characters123123123')
+
+ def test_unicode(self):
+ self.check_encode(u'some_\xb5_unicode_\xe5_chars')
+ self.check_decode(u'some_\xb5_unicode_\xe5_chars')
+
+ def test_cached_unicode(self):
+ x = u'\xb5yy' + u'\xe5zz'
+ y = u'\xb5yy' + u'\xe5zz'
+ self.assertFalse(x is y)
+ xp = cache_utf8.get_cached_unicode(x)
+ yp = cache_utf8.get_cached_unicode(y)
+
+ self.assertIs(xp, x)
+ self.assertIs(xp, yp)
+
+ def test_cached_utf8(self):
+ x = u'\xb5yy\xe5zz'.encode('utf8')
+ y = u'\xb5yy\xe5zz'.encode('utf8')
+ self.assertFalse(x is y)
+ xp = cache_utf8.get_cached_utf8(x)
+ yp = cache_utf8.get_cached_utf8(y)
+
+ self.assertIs(xp, x)
+ self.assertIs(xp, yp)
+
+ def test_cached_ascii(self):
+ x = '%s %s' % ('simple', 'text')
+ y = '%s %s' % ('simple', 'text')
+ self.assertFalse(x is y)
+ xp = cache_utf8.get_cached_ascii(x)
+ yp = cache_utf8.get_cached_ascii(y)
+
+ self.assertIs(xp, x)
+ self.assertIs(xp, yp)
+
+ # after caching, encode and decode should also return the right
+ # objects.
+ uni_x = cache_utf8.decode(x)
+ self.assertEqual(u'simple text', uni_x)
+ self.assertIsInstance(uni_x, unicode)
+
+ utf8_x = cache_utf8.encode(uni_x)
+ self.assertIs(utf8_x, x)
+
+ def test_decode_with_None(self):
+ self.assertEqual(None, cache_utf8._utf8_decode_with_None(None))
+ self.assertEqual(u'foo', cache_utf8._utf8_decode_with_None('foo'))
+ self.assertEqual(u'f\xb5',
+ cache_utf8._utf8_decode_with_None('f\xc2\xb5'))
diff --git a/bzrlib/tests/test_cethread.py b/bzrlib/tests/test_cethread.py
new file mode 100644
index 0000000..36a845e
--- /dev/null
+++ b/bzrlib/tests/test_cethread.py
@@ -0,0 +1,161 @@
+# Copyright (C) 2010, 2011 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+import threading
+
+from bzrlib import (
+ cethread,
+ tests,
+ )
+
+
+class TestCatchingExceptionThread(tests.TestCase):
+
+ def test_start_and_join_smoke_test(self):
+ def do_nothing():
+ pass
+
+ tt = cethread.CatchingExceptionThread(target=do_nothing)
+ tt.start()
+ tt.join()
+
+ def test_exception_is_re_raised(self):
+ class MyException(Exception):
+ pass
+
+ def raise_my_exception():
+ raise MyException()
+
+ tt = cethread.CatchingExceptionThread(target=raise_my_exception)
+ tt.start()
+ self.assertRaises(MyException, tt.join)
+
+ def test_join_around_exception(self):
+ resume = threading.Event()
+ class MyException(Exception):
+ pass
+
+ def raise_my_exception():
+ # Wait for the test to tell us to resume
+ resume.wait()
+ # Now we can raise
+ raise MyException()
+
+ tt = cethread.CatchingExceptionThread(target=raise_my_exception)
+ tt.start()
+ tt.join(timeout=0)
+ self.assertIs(None, tt.exception)
+ resume.set()
+ self.assertRaises(MyException, tt.join)
+
+ def test_sync_event(self):
+ control = threading.Event()
+ in_thread = threading.Event()
+ class MyException(Exception):
+ pass
+
+ def raise_my_exception():
+ # Wait for the test to tell us to resume
+ control.wait()
+ # Now we can raise
+ raise MyException()
+
+ tt = cethread.CatchingExceptionThread(target=raise_my_exception,
+ sync_event=in_thread)
+ tt.start()
+ tt.join(timeout=0)
+ self.assertIs(None, tt.exception)
+ self.assertIs(in_thread, tt.sync_event)
+ control.set()
+ self.assertRaises(MyException, tt.join)
+ self.assertEquals(True, tt.sync_event.isSet())
+
+ def test_switch_and_set(self):
+ """Caller can precisely control a thread."""
+ control1 = threading.Event()
+ control2 = threading.Event()
+ control3 = threading.Event()
+
+ class TestThread(cethread.CatchingExceptionThread):
+
+ def __init__(self):
+ super(TestThread, self).__init__(target=self.step_by_step)
+ self.current_step = 'starting'
+ self.step1 = threading.Event()
+ self.set_sync_event(self.step1)
+ self.step2 = threading.Event()
+ self.final = threading.Event()
+
+ def step_by_step(self):
+ control1.wait()
+ self.current_step = 'step1'
+ self.switch_and_set(self.step2)
+ control2.wait()
+ self.current_step = 'step2'
+ self.switch_and_set(self.final)
+ control3.wait()
+ self.current_step = 'done'
+
+ tt = TestThread()
+ tt.start()
+ self.assertEquals('starting', tt.current_step)
+ control1.set()
+ tt.step1.wait()
+ self.assertEquals('step1', tt.current_step)
+ control2.set()
+ tt.step2.wait()
+ self.assertEquals('step2', tt.current_step)
+ control3.set()
+ # We don't wait on tt.final
+ tt.join()
+ self.assertEquals('done', tt.current_step)
+
+ def test_exception_while_switch_and_set(self):
+ control1 = threading.Event()
+
+ class MyException(Exception):
+ pass
+
+ class TestThread(cethread.CatchingExceptionThread):
+
+ def __init__(self, *args, **kwargs):
+ self.step1 = threading.Event()
+ self.step2 = threading.Event()
+ super(TestThread, self).__init__(target=self.step_by_step,
+ sync_event=self.step1)
+ self.current_step = 'starting'
+ self.set_sync_event(self.step1)
+
+ def step_by_step(self):
+ control1.wait()
+ self.current_step = 'step1'
+ self.switch_and_set(self.step2)
+
+ def set_sync_event(self, event):
+ # We force an exception while trying to set step2
+ if event is self.step2:
+ raise MyException()
+ super(TestThread, self).set_sync_event(event)
+
+ tt = TestThread()
+ tt.start()
+ self.assertEquals('starting', tt.current_step)
+ control1.set()
+ # We now wait on step1 which will be set when catching the exception
+ tt.step1.wait()
+ self.assertRaises(MyException, tt.pending_exception)
+ self.assertIs(tt.step1, tt.sync_event)
+ self.assertTrue(tt.step1.isSet())
diff --git a/bzrlib/tests/test_chk_map.py b/bzrlib/tests/test_chk_map.py
new file mode 100644
index 0000000..15fd600
--- /dev/null
+++ b/bzrlib/tests/test_chk_map.py
@@ -0,0 +1,2828 @@
+# Copyright (C) 2008, 2009, 2010 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""Tests for maps built on a CHK versionedfiles facility."""
+
+from bzrlib import (
+ chk_map,
+ errors,
+ groupcompress,
+ osutils,
+ tests,
+ )
+from bzrlib.chk_map import (
+ CHKMap,
+ InternalNode,
+ LeafNode,
+ Node,
+ )
+from bzrlib.static_tuple import StaticTuple
+
+
+class TestNode(tests.TestCase):
+
+ def assertCommonPrefix(self, expected_common, prefix, key):
+ common = Node.common_prefix(prefix, key)
+ self.assertTrue(len(common) <= len(prefix))
+ self.assertTrue(len(common) <= len(key))
+ self.assertStartsWith(prefix, common)
+ self.assertStartsWith(key, common)
+ self.assertEquals(expected_common, common)
+
+ def test_common_prefix(self):
+ self.assertCommonPrefix('beg', 'beg', 'begin')
+
+ def test_no_common_prefix(self):
+ self.assertCommonPrefix('', 'begin', 'end')
+
+ def test_equal(self):
+ self.assertCommonPrefix('begin', 'begin', 'begin')
+
+ def test_not_a_prefix(self):
+ self.assertCommonPrefix('b', 'begin', 'b')
+
+ def test_empty(self):
+ self.assertCommonPrefix('', '', 'end')
+ self.assertCommonPrefix('', 'begin', '')
+ self.assertCommonPrefix('', '', '')
+
+
+class TestCaseWithStore(tests.TestCaseWithMemoryTransport):
+
+ def get_chk_bytes(self):
+ # This creates a standalone CHK store.
+ factory = groupcompress.make_pack_factory(False, False, 1)
+ self.chk_bytes = factory(self.get_transport())
+ return self.chk_bytes
+
+ def _get_map(self, a_dict, maximum_size=0, chk_bytes=None, key_width=1,
+ search_key_func=None):
+ if chk_bytes is None:
+ chk_bytes = self.get_chk_bytes()
+ root_key = CHKMap.from_dict(chk_bytes, a_dict,
+ maximum_size=maximum_size, key_width=key_width,
+ search_key_func=search_key_func)
+ root_key2 = CHKMap._create_via_map(chk_bytes, a_dict,
+ maximum_size=maximum_size, key_width=key_width,
+ search_key_func=search_key_func)
+ self.assertEqual(root_key, root_key2, "CHKMap.from_dict() did not"
+ " match CHKMap._create_via_map")
+ chkmap = CHKMap(chk_bytes, root_key, search_key_func=search_key_func)
+ return chkmap
+
+ def read_bytes(self, chk_bytes, key):
+ stream = chk_bytes.get_record_stream([key], 'unordered', True)
+ record = stream.next()
+ if record.storage_kind == 'absent':
+ self.fail('Store does not contain the key %s' % (key,))
+ return record.get_bytes_as("fulltext")
+
+ def to_dict(self, node, *args):
+ return dict(node.iteritems(*args))
+
+
+class TestCaseWithExampleMaps(TestCaseWithStore):
+
+ def get_chk_bytes(self):
+ if getattr(self, '_chk_bytes', None) is None:
+ self._chk_bytes = super(TestCaseWithExampleMaps,
+ self).get_chk_bytes()
+ return self._chk_bytes
+
+ def get_map(self, a_dict, maximum_size=100, search_key_func=None):
+ c_map = self._get_map(a_dict, maximum_size=maximum_size,
+ chk_bytes=self.get_chk_bytes(),
+ search_key_func=search_key_func)
+ return c_map
+
+ def make_root_only_map(self, search_key_func=None):
+ return self.get_map({
+ ('aaa',): 'initial aaa content',
+ ('abb',): 'initial abb content',
+ }, search_key_func=search_key_func)
+
+ def make_root_only_aaa_ddd_map(self, search_key_func=None):
+ return self.get_map({
+ ('aaa',): 'initial aaa content',
+ ('ddd',): 'initial ddd content',
+ }, search_key_func=search_key_func)
+
+ def make_one_deep_map(self, search_key_func=None):
+ # Same as root_only_map, except it forces an InternalNode at the root
+ return self.get_map({
+ ('aaa',): 'initial aaa content',
+ ('abb',): 'initial abb content',
+ ('ccc',): 'initial ccc content',
+ ('ddd',): 'initial ddd content',
+ }, search_key_func=search_key_func)
+
+ def make_two_deep_map(self, search_key_func=None):
+ # Carefully chosen so that it creates a 2-deep map for both
+ # _search_key_plain and for _search_key_16
+ # Also so that things line up with make_one_deep_two_prefix_map
+ return self.get_map({
+ ('aaa',): 'initial aaa content',
+ ('abb',): 'initial abb content',
+ ('acc',): 'initial acc content',
+ ('ace',): 'initial ace content',
+ ('add',): 'initial add content',
+ ('adh',): 'initial adh content',
+ ('adl',): 'initial adl content',
+ ('ccc',): 'initial ccc content',
+ ('ddd',): 'initial ddd content',
+ }, search_key_func=search_key_func)
+
+ def make_one_deep_two_prefix_map(self, search_key_func=None):
+ """Create a map with one internal node, but references are extra long.
+
+ Otherwise has similar content to make_two_deep_map.
+ """
+ return self.get_map({
+ ('aaa',): 'initial aaa content',
+ ('add',): 'initial add content',
+ ('adh',): 'initial adh content',
+ ('adl',): 'initial adl content',
+ }, search_key_func=search_key_func)
+
+ def make_one_deep_one_prefix_map(self, search_key_func=None):
+ """Create a map with one internal node, but references are extra long.
+
+ Similar to make_one_deep_two_prefix_map, except the split is at the
+ first char, rather than the second.
+ """
+ return self.get_map({
+ ('add',): 'initial add content',
+ ('adh',): 'initial adh content',
+ ('adl',): 'initial adl content',
+ ('bbb',): 'initial bbb content',
+ }, search_key_func=search_key_func)
+
+
+class TestTestCaseWithExampleMaps(TestCaseWithExampleMaps):
+ """Actual tests for the provided examples."""
+
+ def test_root_only_map_plain(self):
+ c_map = self.make_root_only_map()
+ self.assertEqualDiff(
+ "'' LeafNode\n"
+ " ('aaa',) 'initial aaa content'\n"
+ " ('abb',) 'initial abb content'\n",
+ c_map._dump_tree())
+
+ def test_root_only_map_16(self):
+ c_map = self.make_root_only_map(search_key_func=chk_map._search_key_16)
+ self.assertEqualDiff(
+ "'' LeafNode\n"
+ " ('aaa',) 'initial aaa content'\n"
+ " ('abb',) 'initial abb content'\n",
+ c_map._dump_tree())
+
+ def test_one_deep_map_plain(self):
+ c_map = self.make_one_deep_map()
+ self.assertEqualDiff(
+ "'' InternalNode\n"
+ " 'a' LeafNode\n"
+ " ('aaa',) 'initial aaa content'\n"
+ " ('abb',) 'initial abb content'\n"
+ " 'c' LeafNode\n"
+ " ('ccc',) 'initial ccc content'\n"
+ " 'd' LeafNode\n"
+ " ('ddd',) 'initial ddd content'\n",
+ c_map._dump_tree())
+
+ def test_one_deep_map_16(self):
+ c_map = self.make_one_deep_map(search_key_func=chk_map._search_key_16)
+ self.assertEqualDiff(
+ "'' InternalNode\n"
+ " '2' LeafNode\n"
+ " ('ccc',) 'initial ccc content'\n"
+ " '4' LeafNode\n"
+ " ('abb',) 'initial abb content'\n"
+ " 'F' LeafNode\n"
+ " ('aaa',) 'initial aaa content'\n"
+ " ('ddd',) 'initial ddd content'\n",
+ c_map._dump_tree())
+
+ def test_root_only_aaa_ddd_plain(self):
+ c_map = self.make_root_only_aaa_ddd_map()
+ self.assertEqualDiff(
+ "'' LeafNode\n"
+ " ('aaa',) 'initial aaa content'\n"
+ " ('ddd',) 'initial ddd content'\n",
+ c_map._dump_tree())
+
+ def test_root_only_aaa_ddd_16(self):
+ c_map = self.make_root_only_aaa_ddd_map(
+ search_key_func=chk_map._search_key_16)
+ # We use 'aaa' and 'ddd' because they happen to map to 'F' when using
+ # _search_key_16
+ self.assertEqualDiff(
+ "'' LeafNode\n"
+ " ('aaa',) 'initial aaa content'\n"
+ " ('ddd',) 'initial ddd content'\n",
+ c_map._dump_tree())
+
+ def test_two_deep_map_plain(self):
+ c_map = self.make_two_deep_map()
+ self.assertEqualDiff(
+ "'' InternalNode\n"
+ " 'a' InternalNode\n"
+ " 'aa' LeafNode\n"
+ " ('aaa',) 'initial aaa content'\n"
+ " 'ab' LeafNode\n"
+ " ('abb',) 'initial abb content'\n"
+ " 'ac' LeafNode\n"
+ " ('acc',) 'initial acc content'\n"
+ " ('ace',) 'initial ace content'\n"
+ " 'ad' LeafNode\n"
+ " ('add',) 'initial add content'\n"
+ " ('adh',) 'initial adh content'\n"
+ " ('adl',) 'initial adl content'\n"
+ " 'c' LeafNode\n"
+ " ('ccc',) 'initial ccc content'\n"
+ " 'd' LeafNode\n"
+ " ('ddd',) 'initial ddd content'\n",
+ c_map._dump_tree())
+
+ def test_two_deep_map_16(self):
+ c_map = self.make_two_deep_map(search_key_func=chk_map._search_key_16)
+ self.assertEqualDiff(
+ "'' InternalNode\n"
+ " '2' LeafNode\n"
+ " ('acc',) 'initial acc content'\n"
+ " ('ccc',) 'initial ccc content'\n"
+ " '4' LeafNode\n"
+ " ('abb',) 'initial abb content'\n"
+ " 'C' LeafNode\n"
+ " ('ace',) 'initial ace content'\n"
+ " 'F' InternalNode\n"
+ " 'F0' LeafNode\n"
+ " ('aaa',) 'initial aaa content'\n"
+ " 'F3' LeafNode\n"
+ " ('adl',) 'initial adl content'\n"
+ " 'F4' LeafNode\n"
+ " ('adh',) 'initial adh content'\n"
+ " 'FB' LeafNode\n"
+ " ('ddd',) 'initial ddd content'\n"
+ " 'FD' LeafNode\n"
+ " ('add',) 'initial add content'\n",
+ c_map._dump_tree())
+
+ def test_one_deep_two_prefix_map_plain(self):
+ c_map = self.make_one_deep_two_prefix_map()
+ self.assertEqualDiff(
+ "'' InternalNode\n"
+ " 'aa' LeafNode\n"
+ " ('aaa',) 'initial aaa content'\n"
+ " 'ad' LeafNode\n"
+ " ('add',) 'initial add content'\n"
+ " ('adh',) 'initial adh content'\n"
+ " ('adl',) 'initial adl content'\n",
+ c_map._dump_tree())
+
+ def test_one_deep_two_prefix_map_16(self):
+ c_map = self.make_one_deep_two_prefix_map(
+ search_key_func=chk_map._search_key_16)
+ self.assertEqualDiff(
+ "'' InternalNode\n"
+ " 'F0' LeafNode\n"
+ " ('aaa',) 'initial aaa content'\n"
+ " 'F3' LeafNode\n"
+ " ('adl',) 'initial adl content'\n"
+ " 'F4' LeafNode\n"
+ " ('adh',) 'initial adh content'\n"
+ " 'FD' LeafNode\n"
+ " ('add',) 'initial add content'\n",
+ c_map._dump_tree())
+
+ def test_one_deep_one_prefix_map_plain(self):
+ c_map = self.make_one_deep_one_prefix_map()
+ self.assertEqualDiff(
+ "'' InternalNode\n"
+ " 'a' LeafNode\n"
+ " ('add',) 'initial add content'\n"
+ " ('adh',) 'initial adh content'\n"
+ " ('adl',) 'initial adl content'\n"
+ " 'b' LeafNode\n"
+ " ('bbb',) 'initial bbb content'\n",
+ c_map._dump_tree())
+
+ def test_one_deep_one_prefix_map_16(self):
+ c_map = self.make_one_deep_one_prefix_map(
+ search_key_func=chk_map._search_key_16)
+ self.assertEqualDiff(
+ "'' InternalNode\n"
+ " '4' LeafNode\n"
+ " ('bbb',) 'initial bbb content'\n"
+ " 'F' LeafNode\n"
+ " ('add',) 'initial add content'\n"
+ " ('adh',) 'initial adh content'\n"
+ " ('adl',) 'initial adl content'\n",
+ c_map._dump_tree())
+
+
+class TestMap(TestCaseWithStore):
+
+ def assertHasABMap(self, chk_bytes):
+ ab_leaf_bytes = 'chkleaf:\n0\n1\n1\na\n\x001\nb\n'
+ ab_sha1 = osutils.sha_string(ab_leaf_bytes)
+ self.assertEqual('90986195696b177c8895d48fdb4b7f2366f798a0', ab_sha1)
+ root_key = ('sha1:' + ab_sha1,)
+ self.assertEqual(ab_leaf_bytes, self.read_bytes(chk_bytes, root_key))
+ return root_key
+
+ def assertHasEmptyMap(self, chk_bytes):
+ empty_leaf_bytes = 'chkleaf:\n0\n1\n0\n\n'
+ empty_sha1 = osutils.sha_string(empty_leaf_bytes)
+ self.assertEqual('8571e09bf1bcc5b9621ce31b3d4c93d6e9a1ed26', empty_sha1)
+ root_key = ('sha1:' + empty_sha1,)
+ self.assertEqual(empty_leaf_bytes, self.read_bytes(chk_bytes, root_key))
+ return root_key
+
+ def assertMapLayoutEqual(self, map_one, map_two):
+ """Assert that the internal structure is identical between the maps."""
+ map_one._ensure_root()
+ node_one_stack = [map_one._root_node]
+ map_two._ensure_root()
+ node_two_stack = [map_two._root_node]
+ while node_one_stack:
+ node_one = node_one_stack.pop()
+ node_two = node_two_stack.pop()
+ if node_one.__class__ != node_two.__class__:
+ self.assertEqualDiff(map_one._dump_tree(include_keys=True),
+ map_two._dump_tree(include_keys=True))
+ self.assertEqual(node_one._search_prefix,
+ node_two._search_prefix)
+ if isinstance(node_one, InternalNode):
+ # Internal nodes must have identical references
+ self.assertEqual(sorted(node_one._items.keys()),
+ sorted(node_two._items.keys()))
+ node_one_stack.extend([n for n, _ in
+ node_one._iter_nodes(map_one._store)])
+ node_two_stack.extend([n for n, _ in
+ node_two._iter_nodes(map_two._store)])
+ else:
+ # Leaf nodes must have identical contents
+ self.assertEqual(node_one._items, node_two._items)
+ self.assertEquals([], node_two_stack)
+
+ def assertCanonicalForm(self, chkmap):
+ """Assert that the chkmap is in 'canonical' form.
+
+ We do this by adding all of the key value pairs from scratch, both in
+ forward order and reverse order, and assert that the final tree layout
+ is identical.
+ """
+ items = list(chkmap.iteritems())
+ map_forward = chk_map.CHKMap(None, None)
+ map_forward._root_node.set_maximum_size(chkmap._root_node.maximum_size)
+ for key, value in items:
+ map_forward.map(key, value)
+ self.assertMapLayoutEqual(map_forward, chkmap)
+ map_reverse = chk_map.CHKMap(None, None)
+ map_reverse._root_node.set_maximum_size(chkmap._root_node.maximum_size)
+ for key, value in reversed(items):
+ map_reverse.map(key, value)
+ self.assertMapLayoutEqual(map_reverse, chkmap)
+
+ def test_assert_map_layout_equal(self):
+ store = self.get_chk_bytes()
+ map_one = CHKMap(store, None)
+ map_one._root_node.set_maximum_size(20)
+ map_two = CHKMap(store, None)
+ map_two._root_node.set_maximum_size(20)
+ self.assertMapLayoutEqual(map_one, map_two)
+ map_one.map('aaa', 'value')
+ self.assertRaises(AssertionError,
+ self.assertMapLayoutEqual, map_one, map_two)
+ map_two.map('aaa', 'value')
+ self.assertMapLayoutEqual(map_one, map_two)
+ # Split the tree, so we ensure that internal nodes and leaf nodes are
+ # properly checked
+ map_one.map('aab', 'value')
+ self.assertIsInstance(map_one._root_node, InternalNode)
+ self.assertRaises(AssertionError,
+ self.assertMapLayoutEqual, map_one, map_two)
+ map_two.map('aab', 'value')
+ self.assertMapLayoutEqual(map_one, map_two)
+ map_one.map('aac', 'value')
+ self.assertRaises(AssertionError,
+ self.assertMapLayoutEqual, map_one, map_two)
+ self.assertCanonicalForm(map_one)
+
+ def test_from_dict_empty(self):
+ chk_bytes = self.get_chk_bytes()
+ root_key = CHKMap.from_dict(chk_bytes, {})
+ # Check the data was saved and inserted correctly.
+ expected_root_key = self.assertHasEmptyMap(chk_bytes)
+ self.assertEqual(expected_root_key, root_key)
+
+ def test_from_dict_ab(self):
+ chk_bytes = self.get_chk_bytes()
+ root_key = CHKMap.from_dict(chk_bytes, {"a": "b"})
+ # Check the data was saved and inserted correctly.
+ expected_root_key = self.assertHasABMap(chk_bytes)
+ self.assertEqual(expected_root_key, root_key)
+
+ def test_apply_empty_ab(self):
+ # applying a delta (None, "a", "b") to an empty chkmap generates the
+ # same map as from_dict_ab.
+ chk_bytes = self.get_chk_bytes()
+ root_key = CHKMap.from_dict(chk_bytes, {})
+ chkmap = CHKMap(chk_bytes, root_key)
+ new_root = chkmap.apply_delta([(None, "a", "b")])
+ # Check the data was saved and inserted correctly.
+ expected_root_key = self.assertHasABMap(chk_bytes)
+ self.assertEqual(expected_root_key, new_root)
+ # The update should have left us with an in memory root node, with an
+ # updated key.
+ self.assertEqual(new_root, chkmap._root_node._key)
+
+ def test_apply_ab_empty(self):
+ # applying a delta ("a", None, None) to a map with 'a' in it generates
+ # an empty map.
+ chk_bytes = self.get_chk_bytes()
+ root_key = CHKMap.from_dict(chk_bytes, {("a",):"b"})
+ chkmap = CHKMap(chk_bytes, root_key)
+ new_root = chkmap.apply_delta([(("a",), None, None)])
+ # Check the data was saved and inserted correctly.
+ expected_root_key = self.assertHasEmptyMap(chk_bytes)
+ self.assertEqual(expected_root_key, new_root)
+ # The update should have left us with an in memory root node, with an
+ # updated key.
+ self.assertEqual(new_root, chkmap._root_node._key)
+
+ def test_apply_delete_to_internal_node(self):
+ # applying a delta should be convert an internal root node to a leaf
+ # node if the delta shrinks the map enough.
+ store = self.get_chk_bytes()
+ chkmap = CHKMap(store, None)
+ # Add three items: 2 small enough to fit in one node, and one huge to
+ # force multiple nodes.
+ chkmap._root_node.set_maximum_size(100)
+ chkmap.map(('small',), 'value')
+ chkmap.map(('little',), 'value')
+ chkmap.map(('very-big',), 'x' * 100)
+ # (Check that we have constructed the scenario we want to test)
+ self.assertIsInstance(chkmap._root_node, InternalNode)
+ # Delete the huge item so that the map fits in one node again.
+ delta = [(('very-big',), None, None)]
+ chkmap.apply_delta(delta)
+ self.assertCanonicalForm(chkmap)
+ self.assertIsInstance(chkmap._root_node, LeafNode)
+
+ def test_apply_new_keys_must_be_new(self):
+ # applying a delta (None, "a", "b") to a map with 'a' in it generates
+ # an error.
+ chk_bytes = self.get_chk_bytes()
+ root_key = CHKMap.from_dict(chk_bytes, {("a",):"b"})
+ chkmap = CHKMap(chk_bytes, root_key)
+ self.assertRaises(errors.InconsistentDelta, chkmap.apply_delta,
+ [(None, ("a",), "b")])
+ # As an error occured, the update should have left us without changing
+ # anything (the root should be unchanged).
+ self.assertEqual(root_key, chkmap._root_node._key)
+
+ def test_apply_delta_is_deterministic(self):
+ chk_bytes = self.get_chk_bytes()
+ chkmap1 = CHKMap(chk_bytes, None)
+ chkmap1._root_node.set_maximum_size(10)
+ chkmap1.apply_delta([(None, ('aaa',), 'common'),
+ (None, ('bba',), 'target2'),
+ (None, ('bbb',), 'common')])
+ root_key1 = chkmap1._save()
+ self.assertCanonicalForm(chkmap1)
+
+ chkmap2 = CHKMap(chk_bytes, None)
+ chkmap2._root_node.set_maximum_size(10)
+ chkmap2.apply_delta([(None, ('bbb',), 'common'),
+ (None, ('bba',), 'target2'),
+ (None, ('aaa',), 'common')])
+ root_key2 = chkmap2._save()
+ self.assertEqualDiff(chkmap1._dump_tree(include_keys=True),
+ chkmap2._dump_tree(include_keys=True))
+ self.assertEqual(root_key1, root_key2)
+ self.assertCanonicalForm(chkmap2)
+
+ def test_stable_splitting(self):
+ store = self.get_chk_bytes()
+ chkmap = CHKMap(store, None)
+ # Should fit 2 keys per LeafNode
+ chkmap._root_node.set_maximum_size(35)
+ chkmap.map(('aaa',), 'v')
+ self.assertEqualDiff("'' LeafNode\n"
+ " ('aaa',) 'v'\n",
+ chkmap._dump_tree())
+ chkmap.map(('aab',), 'v')
+ self.assertEqualDiff("'' LeafNode\n"
+ " ('aaa',) 'v'\n"
+ " ('aab',) 'v'\n",
+ chkmap._dump_tree())
+ self.assertCanonicalForm(chkmap)
+
+ # Creates a new internal node, and splits the others into leaves
+ chkmap.map(('aac',), 'v')
+ self.assertEqualDiff("'' InternalNode\n"
+ " 'aaa' LeafNode\n"
+ " ('aaa',) 'v'\n"
+ " 'aab' LeafNode\n"
+ " ('aab',) 'v'\n"
+ " 'aac' LeafNode\n"
+ " ('aac',) 'v'\n",
+ chkmap._dump_tree())
+ self.assertCanonicalForm(chkmap)
+
+ # Splits again, because it can't fit in the current structure
+ chkmap.map(('bbb',), 'v')
+ self.assertEqualDiff("'' InternalNode\n"
+ " 'a' InternalNode\n"
+ " 'aaa' LeafNode\n"
+ " ('aaa',) 'v'\n"
+ " 'aab' LeafNode\n"
+ " ('aab',) 'v'\n"
+ " 'aac' LeafNode\n"
+ " ('aac',) 'v'\n"
+ " 'b' LeafNode\n"
+ " ('bbb',) 'v'\n",
+ chkmap._dump_tree())
+ self.assertCanonicalForm(chkmap)
+
+ def test_map_splits_with_longer_key(self):
+ store = self.get_chk_bytes()
+ chkmap = CHKMap(store, None)
+ # Should fit 1 key per LeafNode
+ chkmap._root_node.set_maximum_size(10)
+ chkmap.map(('aaa',), 'v')
+ chkmap.map(('aaaa',), 'v')
+ self.assertCanonicalForm(chkmap)
+ self.assertIsInstance(chkmap._root_node, InternalNode)
+
+ def test_with_linefeed_in_key(self):
+ store = self.get_chk_bytes()
+ chkmap = CHKMap(store, None)
+ # Should fit 1 key per LeafNode
+ chkmap._root_node.set_maximum_size(10)
+ chkmap.map(('a\ra',), 'val1')
+ chkmap.map(('a\rb',), 'val2')
+ chkmap.map(('ac',), 'val3')
+ self.assertCanonicalForm(chkmap)
+ self.assertEqualDiff("'' InternalNode\n"
+ " 'a\\r' InternalNode\n"
+ " 'a\\ra' LeafNode\n"
+ " ('a\\ra',) 'val1'\n"
+ " 'a\\rb' LeafNode\n"
+ " ('a\\rb',) 'val2'\n"
+ " 'ac' LeafNode\n"
+ " ('ac',) 'val3'\n",
+ chkmap._dump_tree())
+ # We should also successfully serialise and deserialise these items
+ root_key = chkmap._save()
+ chkmap = CHKMap(store, root_key)
+ self.assertEqualDiff("'' InternalNode\n"
+ " 'a\\r' InternalNode\n"
+ " 'a\\ra' LeafNode\n"
+ " ('a\\ra',) 'val1'\n"
+ " 'a\\rb' LeafNode\n"
+ " ('a\\rb',) 'val2'\n"
+ " 'ac' LeafNode\n"
+ " ('ac',) 'val3'\n",
+ chkmap._dump_tree())
+
+ def test_deep_splitting(self):
+ store = self.get_chk_bytes()
+ chkmap = CHKMap(store, None)
+ # Should fit 2 keys per LeafNode
+ chkmap._root_node.set_maximum_size(40)
+ chkmap.map(('aaaaaaaa',), 'v')
+ chkmap.map(('aaaaabaa',), 'v')
+ self.assertEqualDiff("'' LeafNode\n"
+ " ('aaaaaaaa',) 'v'\n"
+ " ('aaaaabaa',) 'v'\n",
+ chkmap._dump_tree())
+ chkmap.map(('aaabaaaa',), 'v')
+ chkmap.map(('aaababaa',), 'v')
+ self.assertEqualDiff("'' InternalNode\n"
+ " 'aaaa' LeafNode\n"
+ " ('aaaaaaaa',) 'v'\n"
+ " ('aaaaabaa',) 'v'\n"
+ " 'aaab' LeafNode\n"
+ " ('aaabaaaa',) 'v'\n"
+ " ('aaababaa',) 'v'\n",
+ chkmap._dump_tree())
+ chkmap.map(('aaabacaa',), 'v')
+ chkmap.map(('aaabadaa',), 'v')
+ self.assertEqualDiff("'' InternalNode\n"
+ " 'aaaa' LeafNode\n"
+ " ('aaaaaaaa',) 'v'\n"
+ " ('aaaaabaa',) 'v'\n"
+ " 'aaab' InternalNode\n"
+ " 'aaabaa' LeafNode\n"
+ " ('aaabaaaa',) 'v'\n"
+ " 'aaabab' LeafNode\n"
+ " ('aaababaa',) 'v'\n"
+ " 'aaabac' LeafNode\n"
+ " ('aaabacaa',) 'v'\n"
+ " 'aaabad' LeafNode\n"
+ " ('aaabadaa',) 'v'\n",
+ chkmap._dump_tree())
+ chkmap.map(('aaababba',), 'val')
+ chkmap.map(('aaababca',), 'val')
+ self.assertEqualDiff("'' InternalNode\n"
+ " 'aaaa' LeafNode\n"
+ " ('aaaaaaaa',) 'v'\n"
+ " ('aaaaabaa',) 'v'\n"
+ " 'aaab' InternalNode\n"
+ " 'aaabaa' LeafNode\n"
+ " ('aaabaaaa',) 'v'\n"
+ " 'aaabab' InternalNode\n"
+ " 'aaababa' LeafNode\n"
+ " ('aaababaa',) 'v'\n"
+ " 'aaababb' LeafNode\n"
+ " ('aaababba',) 'val'\n"
+ " 'aaababc' LeafNode\n"
+ " ('aaababca',) 'val'\n"
+ " 'aaabac' LeafNode\n"
+ " ('aaabacaa',) 'v'\n"
+ " 'aaabad' LeafNode\n"
+ " ('aaabadaa',) 'v'\n",
+ chkmap._dump_tree())
+ # Now we add a node that should fit around an existing InternalNode,
+ # but has a slightly different key prefix, which causes a new
+ # InternalNode split
+ chkmap.map(('aaabDaaa',), 'v')
+ self.assertEqualDiff("'' InternalNode\n"
+ " 'aaaa' LeafNode\n"
+ " ('aaaaaaaa',) 'v'\n"
+ " ('aaaaabaa',) 'v'\n"
+ " 'aaab' InternalNode\n"
+ " 'aaabD' LeafNode\n"
+ " ('aaabDaaa',) 'v'\n"
+ " 'aaaba' InternalNode\n"
+ " 'aaabaa' LeafNode\n"
+ " ('aaabaaaa',) 'v'\n"
+ " 'aaabab' InternalNode\n"
+ " 'aaababa' LeafNode\n"
+ " ('aaababaa',) 'v'\n"
+ " 'aaababb' LeafNode\n"
+ " ('aaababba',) 'val'\n"
+ " 'aaababc' LeafNode\n"
+ " ('aaababca',) 'val'\n"
+ " 'aaabac' LeafNode\n"
+ " ('aaabacaa',) 'v'\n"
+ " 'aaabad' LeafNode\n"
+ " ('aaabadaa',) 'v'\n",
+ chkmap._dump_tree())
+
+ def test_map_collapses_if_size_changes(self):
+ store = self.get_chk_bytes()
+ chkmap = CHKMap(store, None)
+ # Should fit 2 keys per LeafNode
+ chkmap._root_node.set_maximum_size(35)
+ chkmap.map(('aaa',), 'v')
+ chkmap.map(('aab',), 'very long value that splits')
+ self.assertEqualDiff("'' InternalNode\n"
+ " 'aaa' LeafNode\n"
+ " ('aaa',) 'v'\n"
+ " 'aab' LeafNode\n"
+ " ('aab',) 'very long value that splits'\n",
+ chkmap._dump_tree())
+ self.assertCanonicalForm(chkmap)
+ # Now changing the value to something small should cause a rebuild
+ chkmap.map(('aab',), 'v')
+ self.assertEqualDiff("'' LeafNode\n"
+ " ('aaa',) 'v'\n"
+ " ('aab',) 'v'\n",
+ chkmap._dump_tree())
+ self.assertCanonicalForm(chkmap)
+
+ def test_map_double_deep_collapses(self):
+ store = self.get_chk_bytes()
+ chkmap = CHKMap(store, None)
+ # Should fit 3 small keys per LeafNode
+ chkmap._root_node.set_maximum_size(40)
+ chkmap.map(('aaa',), 'v')
+ chkmap.map(('aab',), 'very long value that splits')
+ chkmap.map(('abc',), 'v')
+ self.assertEqualDiff("'' InternalNode\n"
+ " 'aa' InternalNode\n"
+ " 'aaa' LeafNode\n"
+ " ('aaa',) 'v'\n"
+ " 'aab' LeafNode\n"
+ " ('aab',) 'very long value that splits'\n"
+ " 'ab' LeafNode\n"
+ " ('abc',) 'v'\n",
+ chkmap._dump_tree())
+ chkmap.map(('aab',), 'v')
+ self.assertCanonicalForm(chkmap)
+ self.assertEqualDiff("'' LeafNode\n"
+ " ('aaa',) 'v'\n"
+ " ('aab',) 'v'\n"
+ " ('abc',) 'v'\n",
+ chkmap._dump_tree())
+
+ def test_stable_unmap(self):
+ store = self.get_chk_bytes()
+ chkmap = CHKMap(store, None)
+ # Should fit 2 keys per LeafNode
+ chkmap._root_node.set_maximum_size(35)
+ chkmap.map(('aaa',), 'v')
+ chkmap.map(('aab',), 'v')
+ self.assertEqualDiff("'' LeafNode\n"
+ " ('aaa',) 'v'\n"
+ " ('aab',) 'v'\n",
+ chkmap._dump_tree())
+ # Creates a new internal node, and splits the others into leaves
+ chkmap.map(('aac',), 'v')
+ self.assertEqualDiff("'' InternalNode\n"
+ " 'aaa' LeafNode\n"
+ " ('aaa',) 'v'\n"
+ " 'aab' LeafNode\n"
+ " ('aab',) 'v'\n"
+ " 'aac' LeafNode\n"
+ " ('aac',) 'v'\n",
+ chkmap._dump_tree())
+ self.assertCanonicalForm(chkmap)
+ # Now lets unmap one of the keys, and assert that we collapse the
+ # structures.
+ chkmap.unmap(('aac',))
+ self.assertEqualDiff("'' LeafNode\n"
+ " ('aaa',) 'v'\n"
+ " ('aab',) 'v'\n",
+ chkmap._dump_tree())
+ self.assertCanonicalForm(chkmap)
+
+ def test_unmap_double_deep(self):
+ store = self.get_chk_bytes()
+ chkmap = CHKMap(store, None)
+ # Should fit 3 keys per LeafNode
+ chkmap._root_node.set_maximum_size(40)
+ chkmap.map(('aaa',), 'v')
+ chkmap.map(('aaab',), 'v')
+ chkmap.map(('aab',), 'very long value')
+ chkmap.map(('abc',), 'v')
+ self.assertEqualDiff("'' InternalNode\n"
+ " 'aa' InternalNode\n"
+ " 'aaa' LeafNode\n"
+ " ('aaa',) 'v'\n"
+ " ('aaab',) 'v'\n"
+ " 'aab' LeafNode\n"
+ " ('aab',) 'very long value'\n"
+ " 'ab' LeafNode\n"
+ " ('abc',) 'v'\n",
+ chkmap._dump_tree())
+ # Removing the 'aab' key should cause everything to collapse back to a
+ # single node
+ chkmap.unmap(('aab',))
+ self.assertEqualDiff("'' LeafNode\n"
+ " ('aaa',) 'v'\n"
+ " ('aaab',) 'v'\n"
+ " ('abc',) 'v'\n",
+ chkmap._dump_tree())
+
+ def test_unmap_double_deep_non_empty_leaf(self):
+ store = self.get_chk_bytes()
+ chkmap = CHKMap(store, None)
+ # Should fit 3 keys per LeafNode
+ chkmap._root_node.set_maximum_size(40)
+ chkmap.map(('aaa',), 'v')
+ chkmap.map(('aab',), 'long value')
+ chkmap.map(('aabb',), 'v')
+ chkmap.map(('abc',), 'v')
+ self.assertEqualDiff("'' InternalNode\n"
+ " 'aa' InternalNode\n"
+ " 'aaa' LeafNode\n"
+ " ('aaa',) 'v'\n"
+ " 'aab' LeafNode\n"
+ " ('aab',) 'long value'\n"
+ " ('aabb',) 'v'\n"
+ " 'ab' LeafNode\n"
+ " ('abc',) 'v'\n",
+ chkmap._dump_tree())
+ # Removing the 'aab' key should cause everything to collapse back to a
+ # single node
+ chkmap.unmap(('aab',))
+ self.assertEqualDiff("'' LeafNode\n"
+ " ('aaa',) 'v'\n"
+ " ('aabb',) 'v'\n"
+ " ('abc',) 'v'\n",
+ chkmap._dump_tree())
+
+ def test_unmap_with_known_internal_node_doesnt_page(self):
+ store = self.get_chk_bytes()
+ chkmap = CHKMap(store, None)
+ # Should fit 3 keys per LeafNode
+ chkmap._root_node.set_maximum_size(30)
+ chkmap.map(('aaa',), 'v')
+ chkmap.map(('aab',), 'v')
+ chkmap.map(('aac',), 'v')
+ chkmap.map(('abc',), 'v')
+ chkmap.map(('acd',), 'v')
+ self.assertEqualDiff("'' InternalNode\n"
+ " 'aa' InternalNode\n"
+ " 'aaa' LeafNode\n"
+ " ('aaa',) 'v'\n"
+ " 'aab' LeafNode\n"
+ " ('aab',) 'v'\n"
+ " 'aac' LeafNode\n"
+ " ('aac',) 'v'\n"
+ " 'ab' LeafNode\n"
+ " ('abc',) 'v'\n"
+ " 'ac' LeafNode\n"
+ " ('acd',) 'v'\n",
+ chkmap._dump_tree())
+ # Save everything to the map, and start over
+ chkmap = CHKMap(store, chkmap._save())
+ # Mapping an 'aa' key loads the internal node, but should not map the
+ # 'ab' and 'ac' nodes
+ chkmap.map(('aad',), 'v')
+ self.assertIsInstance(chkmap._root_node._items['aa'], InternalNode)
+ self.assertIsInstance(chkmap._root_node._items['ab'], StaticTuple)
+ self.assertIsInstance(chkmap._root_node._items['ac'], StaticTuple)
+ # Unmapping 'acd' can notice that 'aa' is an InternalNode and not have
+ # to map in 'ab'
+ chkmap.unmap(('acd',))
+ self.assertIsInstance(chkmap._root_node._items['aa'], InternalNode)
+ self.assertIsInstance(chkmap._root_node._items['ab'], StaticTuple)
+
+ def test_unmap_without_fitting_doesnt_page_in(self):
+ store = self.get_chk_bytes()
+ chkmap = CHKMap(store, None)
+ # Should fit 2 keys per LeafNode
+ chkmap._root_node.set_maximum_size(20)
+ chkmap.map(('aaa',), 'v')
+ chkmap.map(('aab',), 'v')
+ self.assertEqualDiff("'' InternalNode\n"
+ " 'aaa' LeafNode\n"
+ " ('aaa',) 'v'\n"
+ " 'aab' LeafNode\n"
+ " ('aab',) 'v'\n",
+ chkmap._dump_tree())
+ # Save everything to the map, and start over
+ chkmap = CHKMap(store, chkmap._save())
+ chkmap.map(('aac',), 'v')
+ chkmap.map(('aad',), 'v')
+ chkmap.map(('aae',), 'v')
+ chkmap.map(('aaf',), 'v')
+ # At this point, the previous nodes should not be paged in, but the
+ # newly added nodes would be
+ self.assertIsInstance(chkmap._root_node._items['aaa'], StaticTuple)
+ self.assertIsInstance(chkmap._root_node._items['aab'], StaticTuple)
+ self.assertIsInstance(chkmap._root_node._items['aac'], LeafNode)
+ self.assertIsInstance(chkmap._root_node._items['aad'], LeafNode)
+ self.assertIsInstance(chkmap._root_node._items['aae'], LeafNode)
+ self.assertIsInstance(chkmap._root_node._items['aaf'], LeafNode)
+ # Now unmapping one of the new nodes will use only the already-paged-in
+ # nodes to determine that we don't need to do more.
+ chkmap.unmap(('aaf',))
+ self.assertIsInstance(chkmap._root_node._items['aaa'], StaticTuple)
+ self.assertIsInstance(chkmap._root_node._items['aab'], StaticTuple)
+ self.assertIsInstance(chkmap._root_node._items['aac'], LeafNode)
+ self.assertIsInstance(chkmap._root_node._items['aad'], LeafNode)
+ self.assertIsInstance(chkmap._root_node._items['aae'], LeafNode)
+
+ def test_unmap_pages_in_if_necessary(self):
+ store = self.get_chk_bytes()
+ chkmap = CHKMap(store, None)
+ # Should fit 2 keys per LeafNode
+ chkmap._root_node.set_maximum_size(30)
+ chkmap.map(('aaa',), 'val')
+ chkmap.map(('aab',), 'val')
+ chkmap.map(('aac',), 'val')
+ self.assertEqualDiff("'' InternalNode\n"
+ " 'aaa' LeafNode\n"
+ " ('aaa',) 'val'\n"
+ " 'aab' LeafNode\n"
+ " ('aab',) 'val'\n"
+ " 'aac' LeafNode\n"
+ " ('aac',) 'val'\n",
+ chkmap._dump_tree())
+ root_key = chkmap._save()
+ # Save everything to the map, and start over
+ chkmap = CHKMap(store, root_key)
+ chkmap.map(('aad',), 'v')
+ # At this point, the previous nodes should not be paged in, but the
+ # newly added node would be
+ self.assertIsInstance(chkmap._root_node._items['aaa'], StaticTuple)
+ self.assertIsInstance(chkmap._root_node._items['aab'], StaticTuple)
+ self.assertIsInstance(chkmap._root_node._items['aac'], StaticTuple)
+ self.assertIsInstance(chkmap._root_node._items['aad'], LeafNode)
+ # Unmapping the new node will check the existing nodes to see if they
+ # would fit.
+ # Clear the page cache so we ensure we have to read all the children
+ chk_map.clear_cache()
+ chkmap.unmap(('aad',))
+ self.assertIsInstance(chkmap._root_node._items['aaa'], LeafNode)
+ self.assertIsInstance(chkmap._root_node._items['aab'], LeafNode)
+ self.assertIsInstance(chkmap._root_node._items['aac'], LeafNode)
+
+ def test_unmap_pages_in_from_page_cache(self):
+ store = self.get_chk_bytes()
+ chkmap = CHKMap(store, None)
+ # Should fit 2 keys per LeafNode
+ chkmap._root_node.set_maximum_size(30)
+ chkmap.map(('aaa',), 'val')
+ chkmap.map(('aab',), 'val')
+ chkmap.map(('aac',), 'val')
+ root_key = chkmap._save()
+ # Save everything to the map, and start over
+ chkmap = CHKMap(store, root_key)
+ chkmap.map(('aad',), 'val')
+ self.assertEqualDiff("'' InternalNode\n"
+ " 'aaa' LeafNode\n"
+ " ('aaa',) 'val'\n"
+ " 'aab' LeafNode\n"
+ " ('aab',) 'val'\n"
+ " 'aac' LeafNode\n"
+ " ('aac',) 'val'\n"
+ " 'aad' LeafNode\n"
+ " ('aad',) 'val'\n",
+ chkmap._dump_tree())
+ # Save everything to the map, start over after _dump_tree
+ chkmap = CHKMap(store, root_key)
+ chkmap.map(('aad',), 'v')
+ # At this point, the previous nodes should not be paged in, but the
+ # newly added node would be
+ self.assertIsInstance(chkmap._root_node._items['aaa'], StaticTuple)
+ self.assertIsInstance(chkmap._root_node._items['aab'], StaticTuple)
+ self.assertIsInstance(chkmap._root_node._items['aac'], StaticTuple)
+ self.assertIsInstance(chkmap._root_node._items['aad'], LeafNode)
+ # Now clear the page cache, and only include 2 of the children in the
+ # cache
+ aab_key = chkmap._root_node._items['aab']
+ aab_bytes = chk_map._get_cache()[aab_key]
+ aac_key = chkmap._root_node._items['aac']
+ aac_bytes = chk_map._get_cache()[aac_key]
+ chk_map.clear_cache()
+ chk_map._get_cache()[aab_key] = aab_bytes
+ chk_map._get_cache()[aac_key] = aac_bytes
+
+ # Unmapping the new node will check the nodes from the page cache
+ # first, and not have to read in 'aaa'
+ chkmap.unmap(('aad',))
+ self.assertIsInstance(chkmap._root_node._items['aaa'], StaticTuple)
+ self.assertIsInstance(chkmap._root_node._items['aab'], LeafNode)
+ self.assertIsInstance(chkmap._root_node._items['aac'], LeafNode)
+
+ def test_unmap_uses_existing_items(self):
+ store = self.get_chk_bytes()
+ chkmap = CHKMap(store, None)
+ # Should fit 2 keys per LeafNode
+ chkmap._root_node.set_maximum_size(30)
+ chkmap.map(('aaa',), 'val')
+ chkmap.map(('aab',), 'val')
+ chkmap.map(('aac',), 'val')
+ root_key = chkmap._save()
+ # Save everything to the map, and start over
+ chkmap = CHKMap(store, root_key)
+ chkmap.map(('aad',), 'val')
+ chkmap.map(('aae',), 'val')
+ chkmap.map(('aaf',), 'val')
+ # At this point, the previous nodes should not be paged in, but the
+ # newly added node would be
+ self.assertIsInstance(chkmap._root_node._items['aaa'], StaticTuple)
+ self.assertIsInstance(chkmap._root_node._items['aab'], StaticTuple)
+ self.assertIsInstance(chkmap._root_node._items['aac'], StaticTuple)
+ self.assertIsInstance(chkmap._root_node._items['aad'], LeafNode)
+ self.assertIsInstance(chkmap._root_node._items['aae'], LeafNode)
+ self.assertIsInstance(chkmap._root_node._items['aaf'], LeafNode)
+
+ # Unmapping a new node will see the other nodes that are already in
+ # memory, and not need to page in anything else
+ chkmap.unmap(('aad',))
+ self.assertIsInstance(chkmap._root_node._items['aaa'], StaticTuple)
+ self.assertIsInstance(chkmap._root_node._items['aab'], StaticTuple)
+ self.assertIsInstance(chkmap._root_node._items['aac'], StaticTuple)
+ self.assertIsInstance(chkmap._root_node._items['aae'], LeafNode)
+ self.assertIsInstance(chkmap._root_node._items['aaf'], LeafNode)
+
+ def test_iter_changes_empty_ab(self):
+ # Asking for changes between an empty dict to a dict with keys returns
+ # all the keys.
+ basis = self._get_map({}, maximum_size=10)
+ target = self._get_map(
+ {('a',): 'content here', ('b',): 'more content'},
+ chk_bytes=basis._store, maximum_size=10)
+ self.assertEqual([(('a',), None, 'content here'),
+ (('b',), None, 'more content')],
+ sorted(list(target.iter_changes(basis))))
+
+ def test_iter_changes_ab_empty(self):
+ # Asking for changes between a dict with keys to an empty dict returns
+ # all the keys.
+ basis = self._get_map({('a',): 'content here', ('b',): 'more content'},
+ maximum_size=10)
+ target = self._get_map({}, chk_bytes=basis._store, maximum_size=10)
+ self.assertEqual([(('a',), 'content here', None),
+ (('b',), 'more content', None)],
+ sorted(list(target.iter_changes(basis))))
+
+ def test_iter_changes_empty_empty_is_empty(self):
+ basis = self._get_map({}, maximum_size=10)
+ target = self._get_map({}, chk_bytes=basis._store, maximum_size=10)
+ self.assertEqual([], sorted(list(target.iter_changes(basis))))
+
+ def test_iter_changes_ab_ab_is_empty(self):
+ basis = self._get_map({('a',): 'content here', ('b',): 'more content'},
+ maximum_size=10)
+ target = self._get_map(
+ {('a',): 'content here', ('b',): 'more content'},
+ chk_bytes=basis._store, maximum_size=10)
+ self.assertEqual([], sorted(list(target.iter_changes(basis))))
+
+ def test_iter_changes_ab_ab_nodes_not_loaded(self):
+ basis = self._get_map({('a',): 'content here', ('b',): 'more content'},
+ maximum_size=10)
+ target = self._get_map(
+ {('a',): 'content here', ('b',): 'more content'},
+ chk_bytes=basis._store, maximum_size=10)
+ list(target.iter_changes(basis))
+ self.assertIsInstance(target._root_node, StaticTuple)
+ self.assertIsInstance(basis._root_node, StaticTuple)
+
+ def test_iter_changes_ab_ab_changed_values_shown(self):
+ basis = self._get_map({('a',): 'content here', ('b',): 'more content'},
+ maximum_size=10)
+ target = self._get_map(
+ {('a',): 'content here', ('b',): 'different content'},
+ chk_bytes=basis._store, maximum_size=10)
+ result = sorted(list(target.iter_changes(basis)))
+ self.assertEqual([(('b',), 'more content', 'different content')],
+ result)
+
+ def test_iter_changes_mixed_node_length(self):
+ # When one side has different node lengths than the other, common
+ # but different keys still need to be show, and new-and-old included
+ # appropriately.
+ # aaa - common unaltered
+ # aab - common altered
+ # b - basis only
+ # at - target only
+ # we expect:
+ # aaa to be not loaded (later test)
+ # aab, b, at to be returned.
+ # basis splits at byte 0,1,2, aaa is commonb is basis only
+ basis_dict = {('aaa',): 'foo bar',
+ ('aab',): 'common altered a', ('b',): 'foo bar b'}
+ # target splits at byte 1,2, at is target only
+ target_dict = {('aaa',): 'foo bar',
+ ('aab',): 'common altered b', ('at',): 'foo bar t'}
+ changes = [
+ (('aab',), 'common altered a', 'common altered b'),
+ (('at',), None, 'foo bar t'),
+ (('b',), 'foo bar b', None),
+ ]
+ basis = self._get_map(basis_dict, maximum_size=10)
+ target = self._get_map(target_dict, maximum_size=10,
+ chk_bytes=basis._store)
+ self.assertEqual(changes, sorted(list(target.iter_changes(basis))))
+
+ def test_iter_changes_common_pages_not_loaded(self):
+ # aaa - common unaltered
+ # aab - common altered
+ # b - basis only
+ # at - target only
+ # we expect:
+ # aaa to be not loaded
+ # aaa not to be in result.
+ basis_dict = {('aaa',): 'foo bar',
+ ('aab',): 'common altered a', ('b',): 'foo bar b'}
+ # target splits at byte 1, at is target only
+ target_dict = {('aaa',): 'foo bar',
+ ('aab',): 'common altered b', ('at',): 'foo bar t'}
+ basis = self._get_map(basis_dict, maximum_size=10)
+ target = self._get_map(target_dict, maximum_size=10,
+ chk_bytes=basis._store)
+ basis_get = basis._store.get_record_stream
+ def get_record_stream(keys, order, fulltext):
+ if ('sha1:1adf7c0d1b9140ab5f33bb64c6275fa78b1580b7',) in keys:
+ raise AssertionError("'aaa' pointer was followed %r" % keys)
+ return basis_get(keys, order, fulltext)
+ basis._store.get_record_stream = get_record_stream
+ result = sorted(list(target.iter_changes(basis)))
+ for change in result:
+ if change[0] == ('aaa',):
+ self.fail("Found unexpected change: %s" % change)
+
+ def test_iter_changes_unchanged_keys_in_multi_key_leafs_ignored(self):
+ # Within a leaf there are no hash's to exclude keys, make sure multi
+ # value leaf nodes are handled well.
+ basis_dict = {('aaa',): 'foo bar',
+ ('aab',): 'common altered a', ('b',): 'foo bar b'}
+ target_dict = {('aaa',): 'foo bar',
+ ('aab',): 'common altered b', ('at',): 'foo bar t'}
+ changes = [
+ (('aab',), 'common altered a', 'common altered b'),
+ (('at',), None, 'foo bar t'),
+ (('b',), 'foo bar b', None),
+ ]
+ basis = self._get_map(basis_dict)
+ target = self._get_map(target_dict, chk_bytes=basis._store)
+ self.assertEqual(changes, sorted(list(target.iter_changes(basis))))
+
+ def test_iteritems_empty(self):
+ chk_bytes = self.get_chk_bytes()
+ root_key = CHKMap.from_dict(chk_bytes, {})
+ chkmap = CHKMap(chk_bytes, root_key)
+ self.assertEqual([], list(chkmap.iteritems()))
+
+ def test_iteritems_two_items(self):
+ chk_bytes = self.get_chk_bytes()
+ root_key = CHKMap.from_dict(chk_bytes,
+ {"a":"content here", "b":"more content"})
+ chkmap = CHKMap(chk_bytes, root_key)
+ self.assertEqual([(("a",), "content here"), (("b",), "more content")],
+ sorted(list(chkmap.iteritems())))
+
+ def test_iteritems_selected_one_of_two_items(self):
+ chkmap = self._get_map( {("a",):"content here", ("b",):"more content"})
+ self.assertEqual({("a",): "content here"},
+ self.to_dict(chkmap, [("a",)]))
+
+ def test_iteritems_keys_prefixed_by_2_width_nodes(self):
+ chkmap = self._get_map(
+ {("a","a"):"content here", ("a", "b",):"more content",
+ ("b", ""): 'boring content'},
+ maximum_size=10, key_width=2)
+ self.assertEqual(
+ {("a", "a"): "content here", ("a", "b"): 'more content'},
+ self.to_dict(chkmap, [("a",)]))
+
+ def test_iteritems_keys_prefixed_by_2_width_nodes_hashed(self):
+ search_key_func = chk_map.search_key_registry.get('hash-16-way')
+ self.assertEqual('E8B7BE43\x00E8B7BE43',
+ search_key_func(StaticTuple('a', 'a')))
+ self.assertEqual('E8B7BE43\x0071BEEFF9',
+ search_key_func(StaticTuple('a', 'b')))
+ self.assertEqual('71BEEFF9\x0000000000',
+ search_key_func(StaticTuple('b', '')))
+ chkmap = self._get_map(
+ {("a","a"):"content here", ("a", "b",):"more content",
+ ("b", ""): 'boring content'},
+ maximum_size=10, key_width=2, search_key_func=search_key_func)
+ self.assertEqual(
+ {("a", "a"): "content here", ("a", "b"): 'more content'},
+ self.to_dict(chkmap, [("a",)]))
+
+ def test_iteritems_keys_prefixed_by_2_width_one_leaf(self):
+ chkmap = self._get_map(
+ {("a","a"):"content here", ("a", "b",):"more content",
+ ("b", ""): 'boring content'}, key_width=2)
+ self.assertEqual(
+ {("a", "a"): "content here", ("a", "b"): 'more content'},
+ self.to_dict(chkmap, [("a",)]))
+
+ def test___len__empty(self):
+ chkmap = self._get_map({})
+ self.assertEqual(0, len(chkmap))
+
+ def test___len__2(self):
+ chkmap = self._get_map({("foo",):"bar", ("gam",):"quux"})
+ self.assertEqual(2, len(chkmap))
+
+ def test_max_size_100_bytes_new(self):
+ # When there is a 100 byte upper node limit, a tree is formed.
+ chkmap = self._get_map({("k1"*50,):"v1", ("k2"*50,):"v2"}, maximum_size=100)
+ # We expect three nodes:
+ # A root, with two children, and with two key prefixes - k1 to one, and
+ # k2 to the other as our node splitting is only just being developed.
+ # The maximum size should be embedded
+ chkmap._ensure_root()
+ self.assertEqual(100, chkmap._root_node.maximum_size)
+ self.assertEqual(1, chkmap._root_node._key_width)
+ # There should be two child nodes, and prefix of 2(bytes):
+ self.assertEqual(2, len(chkmap._root_node._items))
+ self.assertEqual("k", chkmap._root_node._compute_search_prefix())
+ # The actual nodes pointed at will change as serialisers change; so
+ # here we test that the key prefix is correct; then load the nodes and
+ # check they have the right pointed at key; whether they have the
+ # pointed at value inline or not is also unrelated to this test so we
+ # don't check that in detail - rather we just check the aggregate
+ # value.
+ nodes = sorted(chkmap._root_node._items.items())
+ ptr1 = nodes[0]
+ ptr2 = nodes[1]
+ self.assertEqual('k1', ptr1[0])
+ self.assertEqual('k2', ptr2[0])
+ node1 = chk_map._deserialise(chkmap._read_bytes(ptr1[1]), ptr1[1], None)
+ self.assertIsInstance(node1, LeafNode)
+ self.assertEqual(1, len(node1))
+ self.assertEqual({('k1'*50,): 'v1'}, self.to_dict(node1, chkmap._store))
+ node2 = chk_map._deserialise(chkmap._read_bytes(ptr2[1]), ptr2[1], None)
+ self.assertIsInstance(node2, LeafNode)
+ self.assertEqual(1, len(node2))
+ self.assertEqual({('k2'*50,): 'v2'}, self.to_dict(node2, chkmap._store))
+ # Having checked we have a good structure, check that the content is
+ # still accessible.
+ self.assertEqual(2, len(chkmap))
+ self.assertEqual({("k1"*50,): "v1", ("k2"*50,): "v2"},
+ self.to_dict(chkmap))
+
+ def test_init_root_is_LeafNode_new(self):
+ chk_bytes = self.get_chk_bytes()
+ chkmap = CHKMap(chk_bytes, None)
+ self.assertIsInstance(chkmap._root_node, LeafNode)
+ self.assertEqual({}, self.to_dict(chkmap))
+ self.assertEqual(0, len(chkmap))
+
+ def test_init_and_save_new(self):
+ chk_bytes = self.get_chk_bytes()
+ chkmap = CHKMap(chk_bytes, None)
+ key = chkmap._save()
+ leaf_node = LeafNode()
+ self.assertEqual([key], leaf_node.serialise(chk_bytes))
+
+ def test_map_first_item_new(self):
+ chk_bytes = self.get_chk_bytes()
+ chkmap = CHKMap(chk_bytes, None)
+ chkmap.map(("foo,",), "bar")
+ self.assertEqual({('foo,',): 'bar'}, self.to_dict(chkmap))
+ self.assertEqual(1, len(chkmap))
+ key = chkmap._save()
+ leaf_node = LeafNode()
+ leaf_node.map(chk_bytes, ("foo,",), "bar")
+ self.assertEqual([key], leaf_node.serialise(chk_bytes))
+
+ def test_unmap_last_item_root_is_leaf_new(self):
+ chkmap = self._get_map({("k1"*50,): "v1", ("k2"*50,): "v2"})
+ chkmap.unmap(("k1"*50,))
+ chkmap.unmap(("k2"*50,))
+ self.assertEqual(0, len(chkmap))
+ self.assertEqual({}, self.to_dict(chkmap))
+ key = chkmap._save()
+ leaf_node = LeafNode()
+ self.assertEqual([key], leaf_node.serialise(chkmap._store))
+
+ def test__dump_tree(self):
+ chkmap = self._get_map({("aaa",): "value1", ("aab",): "value2",
+ ("bbb",): "value3",},
+ maximum_size=15)
+ self.assertEqualDiff("'' InternalNode\n"
+ " 'a' InternalNode\n"
+ " 'aaa' LeafNode\n"
+ " ('aaa',) 'value1'\n"
+ " 'aab' LeafNode\n"
+ " ('aab',) 'value2'\n"
+ " 'b' LeafNode\n"
+ " ('bbb',) 'value3'\n",
+ chkmap._dump_tree())
+ self.assertEqualDiff("'' InternalNode\n"
+ " 'a' InternalNode\n"
+ " 'aaa' LeafNode\n"
+ " ('aaa',) 'value1'\n"
+ " 'aab' LeafNode\n"
+ " ('aab',) 'value2'\n"
+ " 'b' LeafNode\n"
+ " ('bbb',) 'value3'\n",
+ chkmap._dump_tree())
+ self.assertEqualDiff(
+ "'' InternalNode sha1:0690d471eb0a624f359797d0ee4672bd68f4e236\n"
+ " 'a' InternalNode sha1:1514c35503da9418d8fd90c1bed553077cb53673\n"
+ " 'aaa' LeafNode sha1:4cc5970454d40b4ce297a7f13ddb76f63b88fefb\n"
+ " ('aaa',) 'value1'\n"
+ " 'aab' LeafNode sha1:1d68bc90914ef8a3edbcc8bb28b00cb4fea4b5e2\n"
+ " ('aab',) 'value2'\n"
+ " 'b' LeafNode sha1:3686831435b5596515353364eab0399dc45d49e7\n"
+ " ('bbb',) 'value3'\n",
+ chkmap._dump_tree(include_keys=True))
+
+ def test__dump_tree_in_progress(self):
+ chkmap = self._get_map({("aaa",): "value1", ("aab",): "value2"},
+ maximum_size=10)
+ chkmap.map(('bbb',), 'value3')
+ self.assertEqualDiff("'' InternalNode\n"
+ " 'a' InternalNode\n"
+ " 'aaa' LeafNode\n"
+ " ('aaa',) 'value1'\n"
+ " 'aab' LeafNode\n"
+ " ('aab',) 'value2'\n"
+ " 'b' LeafNode\n"
+ " ('bbb',) 'value3'\n",
+ chkmap._dump_tree())
+ # For things that are updated by adding 'bbb', we don't have a sha key
+ # for them yet, so they are listed as None
+ self.assertEqualDiff(
+ "'' InternalNode None\n"
+ " 'a' InternalNode sha1:6b0d881dd739a66f733c178b24da64395edfaafd\n"
+ " 'aaa' LeafNode sha1:40b39a08d895babce17b20ae5f62d187eaa4f63a\n"
+ " ('aaa',) 'value1'\n"
+ " 'aab' LeafNode sha1:ad1dc7c4e801302c95bf1ba7b20bc45e548cd51a\n"
+ " ('aab',) 'value2'\n"
+ " 'b' LeafNode None\n"
+ " ('bbb',) 'value3'\n",
+ chkmap._dump_tree(include_keys=True))
+
+
+def _search_key_single(key):
+ """A search key function that maps all nodes to the same value"""
+ return 'value'
+
+def _test_search_key(key):
+ return 'test:' + '\x00'.join(key)
+
+
+class TestMapSearchKeys(TestCaseWithStore):
+
+ def test_default_chk_map_uses_flat_search_key(self):
+ chkmap = chk_map.CHKMap(self.get_chk_bytes(), None)
+ self.assertEqual('1',
+ chkmap._search_key_func(('1',)))
+ self.assertEqual('1\x002',
+ chkmap._search_key_func(('1', '2')))
+ self.assertEqual('1\x002\x003',
+ chkmap._search_key_func(('1', '2', '3')))
+
+ def test_search_key_is_passed_to_root_node(self):
+ chkmap = chk_map.CHKMap(self.get_chk_bytes(), None,
+ search_key_func=_test_search_key)
+ self.assertIs(_test_search_key, chkmap._search_key_func)
+ self.assertEqual('test:1\x002\x003',
+ chkmap._search_key_func(('1', '2', '3')))
+ self.assertEqual('test:1\x002\x003',
+ chkmap._root_node._search_key(('1', '2', '3')))
+
+ def test_search_key_passed_via__ensure_root(self):
+ chk_bytes = self.get_chk_bytes()
+ chkmap = chk_map.CHKMap(chk_bytes, None,
+ search_key_func=_test_search_key)
+ root_key = chkmap._save()
+ chkmap = chk_map.CHKMap(chk_bytes, root_key,
+ search_key_func=_test_search_key)
+ chkmap._ensure_root()
+ self.assertEqual('test:1\x002\x003',
+ chkmap._root_node._search_key(('1', '2', '3')))
+
+ def test_search_key_with_internal_node(self):
+ chk_bytes = self.get_chk_bytes()
+ chkmap = chk_map.CHKMap(chk_bytes, None,
+ search_key_func=_test_search_key)
+ chkmap._root_node.set_maximum_size(10)
+ chkmap.map(('1',), 'foo')
+ chkmap.map(('2',), 'bar')
+ chkmap.map(('3',), 'baz')
+ self.assertEqualDiff("'' InternalNode\n"
+ " 'test:1' LeafNode\n"
+ " ('1',) 'foo'\n"
+ " 'test:2' LeafNode\n"
+ " ('2',) 'bar'\n"
+ " 'test:3' LeafNode\n"
+ " ('3',) 'baz'\n"
+ , chkmap._dump_tree())
+ root_key = chkmap._save()
+ chkmap = chk_map.CHKMap(chk_bytes, root_key,
+ search_key_func=_test_search_key)
+ self.assertEqualDiff("'' InternalNode\n"
+ " 'test:1' LeafNode\n"
+ " ('1',) 'foo'\n"
+ " 'test:2' LeafNode\n"
+ " ('2',) 'bar'\n"
+ " 'test:3' LeafNode\n"
+ " ('3',) 'baz'\n"
+ , chkmap._dump_tree())
+
+ def test_search_key_16(self):
+ chk_bytes = self.get_chk_bytes()
+ chkmap = chk_map.CHKMap(chk_bytes, None,
+ search_key_func=chk_map._search_key_16)
+ chkmap._root_node.set_maximum_size(10)
+ chkmap.map(('1',), 'foo')
+ chkmap.map(('2',), 'bar')
+ chkmap.map(('3',), 'baz')
+ self.assertEqualDiff("'' InternalNode\n"
+ " '1' LeafNode\n"
+ " ('2',) 'bar'\n"
+ " '6' LeafNode\n"
+ " ('3',) 'baz'\n"
+ " '8' LeafNode\n"
+ " ('1',) 'foo'\n"
+ , chkmap._dump_tree())
+ root_key = chkmap._save()
+ chkmap = chk_map.CHKMap(chk_bytes, root_key,
+ search_key_func=chk_map._search_key_16)
+ # We can get the values back correctly
+ self.assertEqual([(('1',), 'foo')],
+ list(chkmap.iteritems([('1',)])))
+ self.assertEqualDiff("'' InternalNode\n"
+ " '1' LeafNode\n"
+ " ('2',) 'bar'\n"
+ " '6' LeafNode\n"
+ " ('3',) 'baz'\n"
+ " '8' LeafNode\n"
+ " ('1',) 'foo'\n"
+ , chkmap._dump_tree())
+
+ def test_search_key_255(self):
+ chk_bytes = self.get_chk_bytes()
+ chkmap = chk_map.CHKMap(chk_bytes, None,
+ search_key_func=chk_map._search_key_255)
+ chkmap._root_node.set_maximum_size(10)
+ chkmap.map(('1',), 'foo')
+ chkmap.map(('2',), 'bar')
+ chkmap.map(('3',), 'baz')
+ self.assertEqualDiff("'' InternalNode\n"
+ " '\\x1a' LeafNode\n"
+ " ('2',) 'bar'\n"
+ " 'm' LeafNode\n"
+ " ('3',) 'baz'\n"
+ " '\\x83' LeafNode\n"
+ " ('1',) 'foo'\n"
+ , chkmap._dump_tree())
+ root_key = chkmap._save()
+ chkmap = chk_map.CHKMap(chk_bytes, root_key,
+ search_key_func=chk_map._search_key_255)
+ # We can get the values back correctly
+ self.assertEqual([(('1',), 'foo')],
+ list(chkmap.iteritems([('1',)])))
+ self.assertEqualDiff("'' InternalNode\n"
+ " '\\x1a' LeafNode\n"
+ " ('2',) 'bar'\n"
+ " 'm' LeafNode\n"
+ " ('3',) 'baz'\n"
+ " '\\x83' LeafNode\n"
+ " ('1',) 'foo'\n"
+ , chkmap._dump_tree())
+
+ def test_search_key_collisions(self):
+ chkmap = chk_map.CHKMap(self.get_chk_bytes(), None,
+ search_key_func=_search_key_single)
+ # The node will want to expand, but it cannot, because it knows that
+ # all the keys must map to this node
+ chkmap._root_node.set_maximum_size(20)
+ chkmap.map(('1',), 'foo')
+ chkmap.map(('2',), 'bar')
+ chkmap.map(('3',), 'baz')
+ self.assertEqualDiff("'' LeafNode\n"
+ " ('1',) 'foo'\n"
+ " ('2',) 'bar'\n"
+ " ('3',) 'baz'\n"
+ , chkmap._dump_tree())
+
+
+class TestLeafNode(TestCaseWithStore):
+
+ def test_current_size_empty(self):
+ node = LeafNode()
+ self.assertEqual(16, node._current_size())
+
+ def test_current_size_size_changed(self):
+ node = LeafNode()
+ node.set_maximum_size(10)
+ self.assertEqual(17, node._current_size())
+
+ def test_current_size_width_changed(self):
+ node = LeafNode()
+ node._key_width = 10
+ self.assertEqual(17, node._current_size())
+
+ def test_current_size_items(self):
+ node = LeafNode()
+ base_size = node._current_size()
+ node.map(None, ("foo bar",), "baz")
+ self.assertEqual(base_size + 14, node._current_size())
+
+ def test_deserialise_empty(self):
+ node = LeafNode.deserialise("chkleaf:\n10\n1\n0\n\n", ("sha1:1234",))
+ self.assertEqual(0, len(node))
+ self.assertEqual(10, node.maximum_size)
+ self.assertEqual(("sha1:1234",), node.key())
+ self.assertIs(None, node._search_prefix)
+ self.assertIs(None, node._common_serialised_prefix)
+
+ def test_deserialise_items(self):
+ node = LeafNode.deserialise(
+ "chkleaf:\n0\n1\n2\n\nfoo bar\x001\nbaz\nquux\x001\nblarh\n",
+ ("sha1:1234",))
+ self.assertEqual(2, len(node))
+ self.assertEqual([(("foo bar",), "baz"), (("quux",), "blarh")],
+ sorted(node.iteritems(None)))
+
+ def test_deserialise_item_with_null_width_1(self):
+ node = LeafNode.deserialise(
+ "chkleaf:\n0\n1\n2\n\nfoo\x001\nbar\x00baz\nquux\x001\nblarh\n",
+ ("sha1:1234",))
+ self.assertEqual(2, len(node))
+ self.assertEqual([(("foo",), "bar\x00baz"), (("quux",), "blarh")],
+ sorted(node.iteritems(None)))
+
+ def test_deserialise_item_with_null_width_2(self):
+ node = LeafNode.deserialise(
+ "chkleaf:\n0\n2\n2\n\nfoo\x001\x001\nbar\x00baz\n"
+ "quux\x00\x001\nblarh\n",
+ ("sha1:1234",))
+ self.assertEqual(2, len(node))
+ self.assertEqual([(("foo", "1"), "bar\x00baz"), (("quux", ""), "blarh")],
+ sorted(node.iteritems(None)))
+
+ def test_iteritems_selected_one_of_two_items(self):
+ node = LeafNode.deserialise(
+ "chkleaf:\n0\n1\n2\n\nfoo bar\x001\nbaz\nquux\x001\nblarh\n",
+ ("sha1:1234",))
+ self.assertEqual(2, len(node))
+ self.assertEqual([(("quux",), "blarh")],
+ sorted(node.iteritems(None, [("quux",), ("qaz",)])))
+
+ def test_deserialise_item_with_common_prefix(self):
+ node = LeafNode.deserialise(
+ "chkleaf:\n0\n2\n2\nfoo\x00\n1\x001\nbar\x00baz\n2\x001\nblarh\n",
+ ("sha1:1234",))
+ self.assertEqual(2, len(node))
+ self.assertEqual([(("foo", "1"), "bar\x00baz"), (("foo", "2"), "blarh")],
+ sorted(node.iteritems(None)))
+ self.assertIs(chk_map._unknown, node._search_prefix)
+ self.assertEqual('foo\x00', node._common_serialised_prefix)
+
+ def test_deserialise_multi_line(self):
+ node = LeafNode.deserialise(
+ "chkleaf:\n0\n2\n2\nfoo\x00\n1\x002\nbar\nbaz\n2\x002\nblarh\n\n",
+ ("sha1:1234",))
+ self.assertEqual(2, len(node))
+ self.assertEqual([(("foo", "1"), "bar\nbaz"),
+ (("foo", "2"), "blarh\n"),
+ ], sorted(node.iteritems(None)))
+ self.assertIs(chk_map._unknown, node._search_prefix)
+ self.assertEqual('foo\x00', node._common_serialised_prefix)
+
+ def test_key_new(self):
+ node = LeafNode()
+ self.assertEqual(None, node.key())
+
+ def test_key_after_map(self):
+ node = LeafNode.deserialise("chkleaf:\n10\n1\n0\n\n", ("sha1:1234",))
+ node.map(None, ("foo bar",), "baz quux")
+ self.assertEqual(None, node.key())
+
+ def test_key_after_unmap(self):
+ node = LeafNode.deserialise(
+ "chkleaf:\n0\n1\n2\n\nfoo bar\x001\nbaz\nquux\x001\nblarh\n",
+ ("sha1:1234",))
+ node.unmap(None, ("foo bar",))
+ self.assertEqual(None, node.key())
+
+ def test_map_exceeding_max_size_only_entry_new(self):
+ node = LeafNode()
+ node.set_maximum_size(10)
+ result = node.map(None, ("foo bar",), "baz quux")
+ self.assertEqual(("foo bar", [("", node)]), result)
+ self.assertTrue(10 < node._current_size())
+
+ def test_map_exceeding_max_size_second_entry_early_difference_new(self):
+ node = LeafNode()
+ node.set_maximum_size(10)
+ node.map(None, ("foo bar",), "baz quux")
+ prefix, result = list(node.map(None, ("blue",), "red"))
+ self.assertEqual("", prefix)
+ self.assertEqual(2, len(result))
+ split_chars = set([result[0][0], result[1][0]])
+ self.assertEqual(set(["f", "b"]), split_chars)
+ nodes = dict(result)
+ node = nodes["f"]
+ self.assertEqual({("foo bar",): "baz quux"}, self.to_dict(node, None))
+ self.assertEqual(10, node.maximum_size)
+ self.assertEqual(1, node._key_width)
+ node = nodes["b"]
+ self.assertEqual({("blue",): "red"}, self.to_dict(node, None))
+ self.assertEqual(10, node.maximum_size)
+ self.assertEqual(1, node._key_width)
+
+ def test_map_first(self):
+ node = LeafNode()
+ result = node.map(None, ("foo bar",), "baz quux")
+ self.assertEqual(("foo bar", [("", node)]), result)
+ self.assertEqual({("foo bar",):"baz quux"}, self.to_dict(node, None))
+ self.assertEqual(1, len(node))
+
+ def test_map_second(self):
+ node = LeafNode()
+ node.map(None, ("foo bar",), "baz quux")
+ result = node.map(None, ("bingo",), "bango")
+ self.assertEqual(("", [("", node)]), result)
+ self.assertEqual({("foo bar",):"baz quux", ("bingo",):"bango"},
+ self.to_dict(node, None))
+ self.assertEqual(2, len(node))
+
+ def test_map_replacement(self):
+ node = LeafNode()
+ node.map(None, ("foo bar",), "baz quux")
+ result = node.map(None, ("foo bar",), "bango")
+ self.assertEqual(("foo bar", [("", node)]), result)
+ self.assertEqual({("foo bar",): "bango"},
+ self.to_dict(node, None))
+ self.assertEqual(1, len(node))
+
+ def test_serialise_empty(self):
+ store = self.get_chk_bytes()
+ node = LeafNode()
+ node.set_maximum_size(10)
+ expected_key = ("sha1:f34c3f0634ea3f85953dffa887620c0a5b1f4a51",)
+ self.assertEqual([expected_key],
+ list(node.serialise(store)))
+ self.assertEqual("chkleaf:\n10\n1\n0\n\n", self.read_bytes(store, expected_key))
+ self.assertEqual(expected_key, node.key())
+
+ def test_serialise_items(self):
+ store = self.get_chk_bytes()
+ node = LeafNode()
+ node.set_maximum_size(10)
+ node.map(None, ("foo bar",), "baz quux")
+ expected_key = ("sha1:f89fac7edfc6bdb1b1b54a556012ff0c646ef5e0",)
+ self.assertEqual('foo bar', node._common_serialised_prefix)
+ self.assertEqual([expected_key],
+ list(node.serialise(store)))
+ self.assertEqual("chkleaf:\n10\n1\n1\nfoo bar\n\x001\nbaz quux\n",
+ self.read_bytes(store, expected_key))
+ self.assertEqual(expected_key, node.key())
+
+ def test_unique_serialised_prefix_empty_new(self):
+ node = LeafNode()
+ self.assertIs(None, node._compute_search_prefix())
+
+ def test_unique_serialised_prefix_one_item_new(self):
+ node = LeafNode()
+ node.map(None, ("foo bar", "baz"), "baz quux")
+ self.assertEqual("foo bar\x00baz", node._compute_search_prefix())
+
+ def test_unmap_missing(self):
+ node = LeafNode()
+ self.assertRaises(KeyError, node.unmap, None, ("foo bar",))
+
+ def test_unmap_present(self):
+ node = LeafNode()
+ node.map(None, ("foo bar",), "baz quux")
+ result = node.unmap(None, ("foo bar",))
+ self.assertEqual(node, result)
+ self.assertEqual({}, self.to_dict(node, None))
+ self.assertEqual(0, len(node))
+
+ def test_map_maintains_common_prefixes(self):
+ node = LeafNode()
+ node._key_width = 2
+ node.map(None, ("foo bar", "baz"), "baz quux")
+ self.assertEqual('foo bar\x00baz', node._search_prefix)
+ self.assertEqual('foo bar\x00baz', node._common_serialised_prefix)
+ node.map(None, ("foo bar", "bing"), "baz quux")
+ self.assertEqual('foo bar\x00b', node._search_prefix)
+ self.assertEqual('foo bar\x00b', node._common_serialised_prefix)
+ node.map(None, ("fool", "baby"), "baz quux")
+ self.assertEqual('foo', node._search_prefix)
+ self.assertEqual('foo', node._common_serialised_prefix)
+ node.map(None, ("foo bar", "baz"), "replaced")
+ self.assertEqual('foo', node._search_prefix)
+ self.assertEqual('foo', node._common_serialised_prefix)
+ node.map(None, ("very", "different"), "value")
+ self.assertEqual('', node._search_prefix)
+ self.assertEqual('', node._common_serialised_prefix)
+
+ def test_unmap_maintains_common_prefixes(self):
+ node = LeafNode()
+ node._key_width = 2
+ node.map(None, ("foo bar", "baz"), "baz quux")
+ node.map(None, ("foo bar", "bing"), "baz quux")
+ node.map(None, ("fool", "baby"), "baz quux")
+ node.map(None, ("very", "different"), "value")
+ self.assertEqual('', node._search_prefix)
+ self.assertEqual('', node._common_serialised_prefix)
+ node.unmap(None, ("very", "different"))
+ self.assertEqual("foo", node._search_prefix)
+ self.assertEqual("foo", node._common_serialised_prefix)
+ node.unmap(None, ("fool", "baby"))
+ self.assertEqual('foo bar\x00b', node._search_prefix)
+ self.assertEqual('foo bar\x00b', node._common_serialised_prefix)
+ node.unmap(None, ("foo bar", "baz"))
+ self.assertEqual('foo bar\x00bing', node._search_prefix)
+ self.assertEqual('foo bar\x00bing', node._common_serialised_prefix)
+ node.unmap(None, ("foo bar", "bing"))
+ self.assertEqual(None, node._search_prefix)
+ self.assertEqual(None, node._common_serialised_prefix)
+
+
+class TestInternalNode(TestCaseWithStore):
+
+ def test_add_node_empty_new(self):
+ node = InternalNode('fo')
+ child = LeafNode()
+ child.set_maximum_size(100)
+ child.map(None, ("foo",), "bar")
+ node.add_node("foo", child)
+ # Note that node isn't strictly valid now as a tree (only one child),
+ # but thats ok for this test.
+ # The first child defines the node's width:
+ self.assertEqual(3, node._node_width)
+ # We should be able to iterate over the contents without doing IO.
+ self.assertEqual({('foo',): 'bar'}, self.to_dict(node, None))
+ # The length should be known:
+ self.assertEqual(1, len(node))
+ # serialising the node should serialise the child and the node.
+ chk_bytes = self.get_chk_bytes()
+ keys = list(node.serialise(chk_bytes))
+ child_key = child.serialise(chk_bytes)[0]
+ self.assertEqual(
+ [child_key, ('sha1:cf67e9997d8228a907c1f5bfb25a8bd9cd916fac',)],
+ keys)
+ # We should be able to access deserialised content.
+ bytes = self.read_bytes(chk_bytes, keys[1])
+ node = chk_map._deserialise(bytes, keys[1], None)
+ self.assertEqual(1, len(node))
+ self.assertEqual({('foo',): 'bar'}, self.to_dict(node, chk_bytes))
+ self.assertEqual(3, node._node_width)
+
+ def test_add_node_resets_key_new(self):
+ node = InternalNode('fo')
+ child = LeafNode()
+ child.set_maximum_size(100)
+ child.map(None, ("foo",), "bar")
+ node.add_node("foo", child)
+ chk_bytes = self.get_chk_bytes()
+ keys = list(node.serialise(chk_bytes))
+ self.assertEqual(keys[1], node._key)
+ node.add_node("fos", child)
+ self.assertEqual(None, node._key)
+
+# def test_add_node_empty_oversized_one_ok_new(self):
+# def test_add_node_one_oversized_second_kept_minimum_fan(self):
+# def test_add_node_two_oversized_third_kept_minimum_fan(self):
+# def test_add_node_one_oversized_second_splits_errors(self):
+
+ def test__iter_nodes_no_key_filter(self):
+ node = InternalNode('')
+ child = LeafNode()
+ child.set_maximum_size(100)
+ child.map(None, ("foo",), "bar")
+ node.add_node("f", child)
+ child = LeafNode()
+ child.set_maximum_size(100)
+ child.map(None, ("bar",), "baz")
+ node.add_node("b", child)
+
+ for child, node_key_filter in node._iter_nodes(None, key_filter=None):
+ self.assertEqual(None, node_key_filter)
+
+ def test__iter_nodes_splits_key_filter(self):
+ node = InternalNode('')
+ child = LeafNode()
+ child.set_maximum_size(100)
+ child.map(None, ("foo",), "bar")
+ node.add_node("f", child)
+ child = LeafNode()
+ child.set_maximum_size(100)
+ child.map(None, ("bar",), "baz")
+ node.add_node("b", child)
+
+ # foo and bar both match exactly one leaf node, but 'cat' should not
+ # match any, and should not be placed in one.
+ key_filter = (('foo',), ('bar',), ('cat',))
+ for child, node_key_filter in node._iter_nodes(None,
+ key_filter=key_filter):
+ # each child could only match one key filter, so make sure it was
+ # properly filtered
+ self.assertEqual(1, len(node_key_filter))
+
+ def test__iter_nodes_with_multiple_matches(self):
+ node = InternalNode('')
+ child = LeafNode()
+ child.set_maximum_size(100)
+ child.map(None, ("foo",), "val")
+ child.map(None, ("fob",), "val")
+ node.add_node("f", child)
+ child = LeafNode()
+ child.set_maximum_size(100)
+ child.map(None, ("bar",), "val")
+ child.map(None, ("baz",), "val")
+ node.add_node("b", child)
+
+ # Note that 'ram' doesn't match anything, so it should be freely
+ # ignored
+ key_filter = (('foo',), ('fob',), ('bar',), ('baz',), ('ram',))
+ for child, node_key_filter in node._iter_nodes(None,
+ key_filter=key_filter):
+ # each child could match two key filters, so make sure they were
+ # both included.
+ self.assertEqual(2, len(node_key_filter))
+
+ def make_fo_fa_node(self):
+ node = InternalNode('f')
+ child = LeafNode()
+ child.set_maximum_size(100)
+ child.map(None, ("foo",), "val")
+ child.map(None, ("fob",), "val")
+ node.add_node('fo', child)
+ child = LeafNode()
+ child.set_maximum_size(100)
+ child.map(None, ("far",), "val")
+ child.map(None, ("faz",), "val")
+ node.add_node("fa", child)
+ return node
+
+ def test__iter_nodes_single_entry(self):
+ node = self.make_fo_fa_node()
+ key_filter = [('foo',)]
+ nodes = list(node._iter_nodes(None, key_filter=key_filter))
+ self.assertEqual(1, len(nodes))
+ self.assertEqual(key_filter, nodes[0][1])
+
+ def test__iter_nodes_single_entry_misses(self):
+ node = self.make_fo_fa_node()
+ key_filter = [('bar',)]
+ nodes = list(node._iter_nodes(None, key_filter=key_filter))
+ self.assertEqual(0, len(nodes))
+
+ def test__iter_nodes_mixed_key_width(self):
+ node = self.make_fo_fa_node()
+ key_filter = [('foo', 'bar'), ('foo',), ('fo',), ('b',)]
+ nodes = list(node._iter_nodes(None, key_filter=key_filter))
+ self.assertEqual(1, len(nodes))
+ matches = key_filter[:]
+ matches.remove(('b',))
+ self.assertEqual(sorted(matches), sorted(nodes[0][1]))
+
+ def test__iter_nodes_match_all(self):
+ node = self.make_fo_fa_node()
+ key_filter = [('foo', 'bar'), ('foo',), ('fo',), ('f',)]
+ nodes = list(node._iter_nodes(None, key_filter=key_filter))
+ self.assertEqual(2, len(nodes))
+
+ def test__iter_nodes_fixed_widths_and_misses(self):
+ node = self.make_fo_fa_node()
+ # foo and faa should both match one child, baz should miss
+ key_filter = [('foo',), ('faa',), ('baz',)]
+ nodes = list(node._iter_nodes(None, key_filter=key_filter))
+ self.assertEqual(2, len(nodes))
+ for node, matches in nodes:
+ self.assertEqual(1, len(matches))
+
+ def test_iteritems_empty_new(self):
+ node = InternalNode()
+ self.assertEqual([], sorted(node.iteritems(None)))
+
+ def test_iteritems_two_children(self):
+ node = InternalNode()
+ leaf1 = LeafNode()
+ leaf1.map(None, ('foo bar',), 'quux')
+ leaf2 = LeafNode()
+ leaf2.map(None, ('strange',), 'beast')
+ node.add_node("f", leaf1)
+ node.add_node("s", leaf2)
+ self.assertEqual([(('foo bar',), 'quux'), (('strange',), 'beast')],
+ sorted(node.iteritems(None)))
+
+ def test_iteritems_two_children_partial(self):
+ node = InternalNode()
+ leaf1 = LeafNode()
+ leaf1.map(None, ('foo bar',), 'quux')
+ leaf2 = LeafNode()
+ leaf2.map(None, ('strange',), 'beast')
+ node.add_node("f", leaf1)
+ # This sets up a path that should not be followed - it will error if
+ # the code tries to.
+ node._items['f'] = None
+ node.add_node("s", leaf2)
+ self.assertEqual([(('strange',), 'beast')],
+ sorted(node.iteritems(None, [('strange',), ('weird',)])))
+
+ def test_iteritems_two_children_with_hash(self):
+ search_key_func = chk_map.search_key_registry.get('hash-255-way')
+ node = InternalNode(search_key_func=search_key_func)
+ leaf1 = LeafNode(search_key_func=search_key_func)
+ leaf1.map(None, StaticTuple('foo bar',), 'quux')
+ leaf2 = LeafNode(search_key_func=search_key_func)
+ leaf2.map(None, StaticTuple('strange',), 'beast')
+ self.assertEqual('\xbeF\x014', search_key_func(StaticTuple('foo bar',)))
+ self.assertEqual('\x85\xfa\xf7K', search_key_func(StaticTuple('strange',)))
+ node.add_node("\xbe", leaf1)
+ # This sets up a path that should not be followed - it will error if
+ # the code tries to.
+ node._items['\xbe'] = None
+ node.add_node("\x85", leaf2)
+ self.assertEqual([(('strange',), 'beast')],
+ sorted(node.iteritems(None, [StaticTuple('strange',),
+ StaticTuple('weird',)])))
+
+ def test_iteritems_partial_empty(self):
+ node = InternalNode()
+ self.assertEqual([], sorted(node.iteritems([('missing',)])))
+
+ def test_map_to_new_child_new(self):
+ chkmap = self._get_map({('k1',):'foo', ('k2',):'bar'}, maximum_size=10)
+ chkmap._ensure_root()
+ node = chkmap._root_node
+ # Ensure test validity: nothing paged in below the root.
+ self.assertEqual(2,
+ len([value for value in node._items.values()
+ if type(value) is StaticTuple]))
+ # now, mapping to k3 should add a k3 leaf
+ prefix, nodes = node.map(None, ('k3',), 'quux')
+ self.assertEqual("k", prefix)
+ self.assertEqual([("", node)], nodes)
+ # check new child details
+ child = node._items['k3']
+ self.assertIsInstance(child, LeafNode)
+ self.assertEqual(1, len(child))
+ self.assertEqual({('k3',): 'quux'}, self.to_dict(child, None))
+ self.assertEqual(None, child._key)
+ self.assertEqual(10, child.maximum_size)
+ self.assertEqual(1, child._key_width)
+ # Check overall structure:
+ self.assertEqual(3, len(chkmap))
+ self.assertEqual({('k1',): 'foo', ('k2',): 'bar', ('k3',): 'quux'},
+ self.to_dict(chkmap))
+ # serialising should only serialise the new data - k3 and the internal
+ # node.
+ keys = list(node.serialise(chkmap._store))
+ child_key = child.serialise(chkmap._store)[0]
+ self.assertEqual([child_key, keys[1]], keys)
+
+ def test_map_to_child_child_splits_new(self):
+ chkmap = self._get_map({('k1',):'foo', ('k22',):'bar'}, maximum_size=10)
+ # Check for the canonical root value for this tree:
+ self.assertEqualDiff("'' InternalNode\n"
+ " 'k1' LeafNode\n"
+ " ('k1',) 'foo'\n"
+ " 'k2' LeafNode\n"
+ " ('k22',) 'bar'\n"
+ , chkmap._dump_tree())
+ # _dump_tree pages everything in, so reload using just the root
+ chkmap = CHKMap(chkmap._store, chkmap._root_node)
+ chkmap._ensure_root()
+ node = chkmap._root_node
+ # Ensure test validity: nothing paged in below the root.
+ self.assertEqual(2,
+ len([value for value in node._items.values()
+ if type(value) is StaticTuple]))
+ # now, mapping to k23 causes k22 ('k2' in node) to split into k22 and
+ # k23, which for simplicity in the current implementation generates
+ # a new internal node between node, and k22/k23.
+ prefix, nodes = node.map(chkmap._store, ('k23',), 'quux')
+ self.assertEqual("k", prefix)
+ self.assertEqual([("", node)], nodes)
+ # check new child details
+ child = node._items['k2']
+ self.assertIsInstance(child, InternalNode)
+ self.assertEqual(2, len(child))
+ self.assertEqual({('k22',): 'bar', ('k23',): 'quux'},
+ self.to_dict(child, None))
+ self.assertEqual(None, child._key)
+ self.assertEqual(10, child.maximum_size)
+ self.assertEqual(1, child._key_width)
+ self.assertEqual(3, child._node_width)
+ # Check overall structure:
+ self.assertEqual(3, len(chkmap))
+ self.assertEqual({('k1',): 'foo', ('k22',): 'bar', ('k23',): 'quux'},
+ self.to_dict(chkmap))
+ # serialising should only serialise the new data - although k22 hasn't
+ # changed because its a special corner case (splitting on with only one
+ # key leaves one node unaltered), in general k22 is serialised, so we
+ # expect k22, k23, the new internal node, and node, to be serialised.
+ keys = list(node.serialise(chkmap._store))
+ child_key = child._key
+ k22_key = child._items['k22']._key
+ k23_key = child._items['k23']._key
+ self.assertEqual([k22_key, k23_key, child_key, node.key()], keys)
+ self.assertEqualDiff("'' InternalNode\n"
+ " 'k1' LeafNode\n"
+ " ('k1',) 'foo'\n"
+ " 'k2' InternalNode\n"
+ " 'k22' LeafNode\n"
+ " ('k22',) 'bar'\n"
+ " 'k23' LeafNode\n"
+ " ('k23',) 'quux'\n"
+ , chkmap._dump_tree())
+
+ def test__search_prefix_filter_with_hash(self):
+ search_key_func = chk_map.search_key_registry.get('hash-16-way')
+ node = InternalNode(search_key_func=search_key_func)
+ node._key_width = 2
+ node._node_width = 4
+ self.assertEqual('E8B7BE43\x0071BEEFF9', search_key_func(
+ StaticTuple('a', 'b')))
+ self.assertEqual('E8B7', node._search_prefix_filter(
+ StaticTuple('a', 'b')))
+ self.assertEqual('E8B7', node._search_prefix_filter(
+ StaticTuple('a',)))
+
+ def test_unmap_k23_from_k1_k22_k23_gives_k1_k22_tree_new(self):
+ chkmap = self._get_map(
+ {('k1',):'foo', ('k22',):'bar', ('k23',): 'quux'}, maximum_size=10)
+ # Check we have the expected tree.
+ self.assertEqualDiff("'' InternalNode\n"
+ " 'k1' LeafNode\n"
+ " ('k1',) 'foo'\n"
+ " 'k2' InternalNode\n"
+ " 'k22' LeafNode\n"
+ " ('k22',) 'bar'\n"
+ " 'k23' LeafNode\n"
+ " ('k23',) 'quux'\n"
+ , chkmap._dump_tree())
+ chkmap = CHKMap(chkmap._store, chkmap._root_node)
+ chkmap._ensure_root()
+ node = chkmap._root_node
+ # unmapping k23 should give us a root, with k1 and k22 as direct
+ # children.
+ result = node.unmap(chkmap._store, ('k23',))
+ # check the pointed-at object within node - k2 should now point at the
+ # k22 leaf (which has been paged in to see if we can collapse the tree)
+ child = node._items['k2']
+ self.assertIsInstance(child, LeafNode)
+ self.assertEqual(1, len(child))
+ self.assertEqual({('k22',): 'bar'},
+ self.to_dict(child, None))
+ # Check overall structure is instact:
+ self.assertEqual(2, len(chkmap))
+ self.assertEqual({('k1',): 'foo', ('k22',): 'bar'},
+ self.to_dict(chkmap))
+ # serialising should only serialise the new data - the root node.
+ keys = list(node.serialise(chkmap._store))
+ self.assertEqual([keys[-1]], keys)
+ chkmap = CHKMap(chkmap._store, keys[-1])
+ self.assertEqualDiff("'' InternalNode\n"
+ " 'k1' LeafNode\n"
+ " ('k1',) 'foo'\n"
+ " 'k2' LeafNode\n"
+ " ('k22',) 'bar'\n"
+ , chkmap._dump_tree())
+
+ def test_unmap_k1_from_k1_k22_k23_gives_k22_k23_tree_new(self):
+ chkmap = self._get_map(
+ {('k1',):'foo', ('k22',):'bar', ('k23',): 'quux'}, maximum_size=10)
+ self.assertEqualDiff("'' InternalNode\n"
+ " 'k1' LeafNode\n"
+ " ('k1',) 'foo'\n"
+ " 'k2' InternalNode\n"
+ " 'k22' LeafNode\n"
+ " ('k22',) 'bar'\n"
+ " 'k23' LeafNode\n"
+ " ('k23',) 'quux'\n"
+ , chkmap._dump_tree())
+ orig_root = chkmap._root_node
+ chkmap = CHKMap(chkmap._store, orig_root)
+ chkmap._ensure_root()
+ node = chkmap._root_node
+ k2_ptr = node._items['k2']
+ # unmapping k1 should give us a root, with k22 and k23 as direct
+ # children, and should not have needed to page in the subtree.
+ result = node.unmap(chkmap._store, ('k1',))
+ self.assertEqual(k2_ptr, result)
+ chkmap = CHKMap(chkmap._store, orig_root)
+ # Unmapping at the CHKMap level should switch to the new root
+ chkmap.unmap(('k1',))
+ self.assertEqual(k2_ptr, chkmap._root_node)
+ self.assertEqualDiff("'' InternalNode\n"
+ " 'k22' LeafNode\n"
+ " ('k22',) 'bar'\n"
+ " 'k23' LeafNode\n"
+ " ('k23',) 'quux'\n"
+ , chkmap._dump_tree())
+
+
+# leaf:
+# map -> fits - done
+# map -> doesn't fit - shrink from left till fits
+# key data to return: the common prefix, new nodes.
+
+# unmap -> how to tell if siblings can be combined.
+# combing leaf nodes means expanding the prefix to the left; so gather the size of
+# all the leaf nodes addressed by expanding the prefix by 1; if any adjacent node
+# is an internal node, we know that that is a dense subtree - can't combine.
+# otherwise as soon as the sum of serialised values exceeds the split threshold
+# we know we can't combine - stop.
+# unmap -> key return data - space in node, common prefix length? and key count
+# internal:
+# variable length prefixes? -> later start with fixed width to get something going
+# map -> fits - update pointer to leaf
+# return [prefix and node] - seems sound.
+# map -> doesn't fit - find unique prefix and shift right
+# create internal nodes for all the partitions, return list of unique
+# prefixes and nodes.
+# map -> new prefix - create a leaf
+# unmap -> if child key count 0, remove
+# unmap -> return space in node, common prefix length? (why?), key count
+# map:
+# map, if 1 node returned, use it, otherwise make an internal and populate.
+# map - unmap - if empty, use empty leafnode (avoids special cases in driver
+# code)
+# map inits as empty leafnode.
+# tools:
+# visualiser
+
+
+# how to handle:
+# AA, AB, AC, AD, BA
+# packed internal node - ideal:
+# AA, AB, AC, AD, BA
+# single byte fanout - A,B, AA,AB,AC,AD, BA
+# build order's:
+# BA
+# AB - split, but we want to end up with AB, BA, in one node, with
+# 1-4K get0
+
+
+class TestCHKMapDifference(TestCaseWithExampleMaps):
+
+ def get_difference(self, new_roots, old_roots,
+ search_key_func=None):
+ if search_key_func is None:
+ search_key_func = chk_map._search_key_plain
+ return chk_map.CHKMapDifference(self.get_chk_bytes(),
+ new_roots, old_roots, search_key_func)
+
+ def test__init__(self):
+ c_map = self.make_root_only_map()
+ key1 = c_map.key()
+ c_map.map(('aaa',), 'new aaa content')
+ key2 = c_map._save()
+ diff = self.get_difference([key2], [key1])
+ self.assertEqual(set([key1]), diff._all_old_chks)
+ self.assertEqual([], diff._old_queue)
+ self.assertEqual([], diff._new_queue)
+
+ def help__read_all_roots(self, search_key_func):
+ c_map = self.make_root_only_map(search_key_func=search_key_func)
+ key1 = c_map.key()
+ c_map.map(('aaa',), 'new aaa content')
+ key2 = c_map._save()
+ diff = self.get_difference([key2], [key1], search_key_func)
+ root_results = [record.key for record in diff._read_all_roots()]
+ self.assertEqual([key2], root_results)
+ # We should have queued up only items that aren't in the old
+ # set
+ self.assertEqual([(('aaa',), 'new aaa content')],
+ diff._new_item_queue)
+ self.assertEqual([], diff._new_queue)
+ # And there are no old references, so that queue should be
+ # empty
+ self.assertEqual([], diff._old_queue)
+
+ def test__read_all_roots_plain(self):
+ self.help__read_all_roots(search_key_func=chk_map._search_key_plain)
+
+ def test__read_all_roots_16(self):
+ self.help__read_all_roots(search_key_func=chk_map._search_key_16)
+
+ def test__read_all_roots_skips_known_old(self):
+ c_map = self.make_one_deep_map(chk_map._search_key_plain)
+ key1 = c_map.key()
+ c_map2 = self.make_root_only_map(chk_map._search_key_plain)
+ key2 = c_map2.key()
+ diff = self.get_difference([key2], [key1], chk_map._search_key_plain)
+ root_results = [record.key for record in diff._read_all_roots()]
+ # We should have no results. key2 is completely contained within key1,
+ # and we should have seen that in the first pass
+ self.assertEqual([], root_results)
+
+ def test__read_all_roots_prepares_queues(self):
+ c_map = self.make_one_deep_map(chk_map._search_key_plain)
+ key1 = c_map.key()
+ c_map._dump_tree() # load everything
+ key1_a = c_map._root_node._items['a'].key()
+ c_map.map(('abb',), 'new abb content')
+ key2 = c_map._save()
+ key2_a = c_map._root_node._items['a'].key()
+ diff = self.get_difference([key2], [key1], chk_map._search_key_plain)
+ root_results = [record.key for record in diff._read_all_roots()]
+ self.assertEqual([key2], root_results)
+ # At this point, we should have queued up only the 'a' Leaf on both
+ # sides, both 'c' and 'd' are known to not have changed on both sides
+ self.assertEqual([key2_a], diff._new_queue)
+ self.assertEqual([], diff._new_item_queue)
+ self.assertEqual([key1_a], diff._old_queue)
+
+ def test__read_all_roots_multi_new_prepares_queues(self):
+ c_map = self.make_one_deep_map(chk_map._search_key_plain)
+ key1 = c_map.key()
+ c_map._dump_tree() # load everything
+ key1_a = c_map._root_node._items['a'].key()
+ key1_c = c_map._root_node._items['c'].key()
+ c_map.map(('abb',), 'new abb content')
+ key2 = c_map._save()
+ key2_a = c_map._root_node._items['a'].key()
+ key2_c = c_map._root_node._items['c'].key()
+ c_map = chk_map.CHKMap(self.get_chk_bytes(), key1,
+ chk_map._search_key_plain)
+ c_map.map(('ccc',), 'new ccc content')
+ key3 = c_map._save()
+ key3_a = c_map._root_node._items['a'].key()
+ key3_c = c_map._root_node._items['c'].key()
+ diff = self.get_difference([key2, key3], [key1],
+ chk_map._search_key_plain)
+ root_results = [record.key for record in diff._read_all_roots()]
+ self.assertEqual(sorted([key2, key3]), sorted(root_results))
+ # We should have queued up key2_a, and key3_c, but not key2_c or key3_c
+ self.assertEqual([key2_a, key3_c], diff._new_queue)
+ self.assertEqual([], diff._new_item_queue)
+ # And we should have queued up both a and c for the old set
+ self.assertEqual([key1_a, key1_c], diff._old_queue)
+
+ def test__read_all_roots_different_depths(self):
+ c_map = self.make_two_deep_map(chk_map._search_key_plain)
+ c_map._dump_tree() # load everything
+ key1 = c_map.key()
+ key1_a = c_map._root_node._items['a'].key()
+ key1_c = c_map._root_node._items['c'].key()
+ key1_d = c_map._root_node._items['d'].key()
+
+ c_map2 = self.make_one_deep_two_prefix_map(chk_map._search_key_plain)
+ c_map2._dump_tree()
+ key2 = c_map2.key()
+ key2_aa = c_map2._root_node._items['aa'].key()
+ key2_ad = c_map2._root_node._items['ad'].key()
+
+ diff = self.get_difference([key2], [key1], chk_map._search_key_plain)
+ root_results = [record.key for record in diff._read_all_roots()]
+ self.assertEqual([key2], root_results)
+ # Only the 'a' subset should be queued up, since 'c' and 'd' cannot be
+ # present
+ self.assertEqual([key1_a], diff._old_queue)
+ self.assertEqual([key2_aa, key2_ad], diff._new_queue)
+ self.assertEqual([], diff._new_item_queue)
+
+ diff = self.get_difference([key1], [key2], chk_map._search_key_plain)
+ root_results = [record.key for record in diff._read_all_roots()]
+ self.assertEqual([key1], root_results)
+
+ self.assertEqual([key2_aa, key2_ad], diff._old_queue)
+ self.assertEqual([key1_a, key1_c, key1_d], diff._new_queue)
+ self.assertEqual([], diff._new_item_queue)
+
+ def test__read_all_roots_different_depths_16(self):
+ c_map = self.make_two_deep_map(chk_map._search_key_16)
+ c_map._dump_tree() # load everything
+ key1 = c_map.key()
+ key1_2 = c_map._root_node._items['2'].key()
+ key1_4 = c_map._root_node._items['4'].key()
+ key1_C = c_map._root_node._items['C'].key()
+ key1_F = c_map._root_node._items['F'].key()
+
+ c_map2 = self.make_one_deep_two_prefix_map(chk_map._search_key_16)
+ c_map2._dump_tree()
+ key2 = c_map2.key()
+ key2_F0 = c_map2._root_node._items['F0'].key()
+ key2_F3 = c_map2._root_node._items['F3'].key()
+ key2_F4 = c_map2._root_node._items['F4'].key()
+ key2_FD = c_map2._root_node._items['FD'].key()
+
+ diff = self.get_difference([key2], [key1], chk_map._search_key_16)
+ root_results = [record.key for record in diff._read_all_roots()]
+ self.assertEqual([key2], root_results)
+ # Only the subset of keys that may be present should be queued up.
+ self.assertEqual([key1_F], diff._old_queue)
+ self.assertEqual(sorted([key2_F0, key2_F3, key2_F4, key2_FD]),
+ sorted(diff._new_queue))
+ self.assertEqual([], diff._new_item_queue)
+
+ diff = self.get_difference([key1], [key2], chk_map._search_key_16)
+ root_results = [record.key for record in diff._read_all_roots()]
+ self.assertEqual([key1], root_results)
+
+ self.assertEqual(sorted([key2_F0, key2_F3, key2_F4, key2_FD]),
+ sorted(diff._old_queue))
+ self.assertEqual(sorted([key1_2, key1_4, key1_C, key1_F]),
+ sorted(diff._new_queue))
+ self.assertEqual([], diff._new_item_queue)
+
+ def test__read_all_roots_mixed_depth(self):
+ c_map = self.make_one_deep_two_prefix_map(chk_map._search_key_plain)
+ c_map._dump_tree() # load everything
+ key1 = c_map.key()
+ key1_aa = c_map._root_node._items['aa'].key()
+ key1_ad = c_map._root_node._items['ad'].key()
+
+ c_map2 = self.make_one_deep_one_prefix_map(chk_map._search_key_plain)
+ c_map2._dump_tree()
+ key2 = c_map2.key()
+ key2_a = c_map2._root_node._items['a'].key()
+ key2_b = c_map2._root_node._items['b'].key()
+
+ diff = self.get_difference([key2], [key1], chk_map._search_key_plain)
+ root_results = [record.key for record in diff._read_all_roots()]
+ self.assertEqual([key2], root_results)
+ # 'ad' matches exactly 'a' on the other side, so it should be removed,
+ # and neither side should have it queued for walking
+ self.assertEqual([], diff._old_queue)
+ self.assertEqual([key2_b], diff._new_queue)
+ self.assertEqual([], diff._new_item_queue)
+
+ diff = self.get_difference([key1], [key2], chk_map._search_key_plain)
+ root_results = [record.key for record in diff._read_all_roots()]
+ self.assertEqual([key1], root_results)
+ # Note: This is technically not the 'true minimal' set that we could
+ # use The reason is that 'a' was matched exactly to 'ad' (by sha
+ # sum). However, the code gets complicated in the case of more
+ # than one interesting key, so for now, we live with this
+ # Consider revising, though benchmarking showing it to be a
+ # real-world issue should be done
+ self.assertEqual([key2_a], diff._old_queue)
+ # self.assertEqual([], diff._old_queue)
+ self.assertEqual([key1_aa], diff._new_queue)
+ self.assertEqual([], diff._new_item_queue)
+
+ def test__read_all_roots_yields_extra_deep_records(self):
+ # This is slightly controversial, as we will yield a chk page that we
+ # might later on find out could be filtered out. (If a root node is
+ # referenced deeper in the old set.)
+ # However, even with stacking, we always have all chk pages that we
+ # will need. So as long as we filter out the referenced keys, we'll
+ # never run into problems.
+ # This allows us to yield a root node record immediately, without any
+ # buffering.
+ c_map = self.make_two_deep_map(chk_map._search_key_plain)
+ c_map._dump_tree() # load all keys
+ key1 = c_map.key()
+ key1_a = c_map._root_node._items['a'].key()
+ c_map2 = self.get_map({
+ ('acc',): 'initial acc content',
+ ('ace',): 'initial ace content',
+ }, maximum_size=100)
+ self.assertEqualDiff(
+ "'' LeafNode\n"
+ " ('acc',) 'initial acc content'\n"
+ " ('ace',) 'initial ace content'\n",
+ c_map2._dump_tree())
+ key2 = c_map2.key()
+ diff = self.get_difference([key2], [key1], chk_map._search_key_plain)
+ root_results = [record.key for record in diff._read_all_roots()]
+ self.assertEqual([key2], root_results)
+ # However, even though we have yielded the root node to be fetched,
+ # we should have enqued all of the chk pages to be walked, so that we
+ # can find the keys if they are present
+ self.assertEqual([key1_a], diff._old_queue)
+ self.assertEqual([(('acc',), 'initial acc content'),
+ (('ace',), 'initial ace content'),
+ ], diff._new_item_queue)
+
+ def test__read_all_roots_multiple_targets(self):
+ c_map = self.make_root_only_map()
+ key1 = c_map.key()
+ c_map = self.make_one_deep_map()
+ key2 = c_map.key()
+ c_map._dump_tree()
+ key2_c = c_map._root_node._items['c'].key()
+ key2_d = c_map._root_node._items['d'].key()
+ c_map.map(('ccc',), 'new ccc value')
+ key3 = c_map._save()
+ key3_c = c_map._root_node._items['c'].key()
+ diff = self.get_difference([key2, key3], [key1],
+ chk_map._search_key_plain)
+ root_results = [record.key for record in diff._read_all_roots()]
+ self.assertEqual(sorted([key2, key3]), sorted(root_results))
+ self.assertEqual([], diff._old_queue)
+ # the key 'd' is interesting from key2 and key3, but should only be
+ # entered into the queue 1 time
+ self.assertEqual(sorted([key2_c, key3_c, key2_d]),
+ sorted(diff._new_queue))
+ self.assertEqual([], diff._new_item_queue)
+
+ def test__read_all_roots_no_old(self):
+ # This is the 'initial branch' case. With nothing in the old
+ # set, we can just queue up all root nodes into interesting queue, and
+ # then have them fast-path flushed via _flush_new_queue
+ c_map = self.make_two_deep_map()
+ key1 = c_map.key()
+ diff = self.get_difference([key1], [], chk_map._search_key_plain)
+ root_results = [record.key for record in diff._read_all_roots()]
+ self.assertEqual([], root_results)
+ self.assertEqual([], diff._old_queue)
+ self.assertEqual([key1], diff._new_queue)
+ self.assertEqual([], diff._new_item_queue)
+
+ c_map2 = self.make_one_deep_map()
+ key2 = c_map2.key()
+ diff = self.get_difference([key1, key2], [], chk_map._search_key_plain)
+ root_results = [record.key for record in diff._read_all_roots()]
+ self.assertEqual([], root_results)
+ self.assertEqual([], diff._old_queue)
+ self.assertEqual(sorted([key1, key2]), sorted(diff._new_queue))
+ self.assertEqual([], diff._new_item_queue)
+
+ def test__read_all_roots_no_old_16(self):
+ c_map = self.make_two_deep_map(chk_map._search_key_16)
+ key1 = c_map.key()
+ diff = self.get_difference([key1], [], chk_map._search_key_16)
+ root_results = [record.key for record in diff._read_all_roots()]
+ self.assertEqual([], root_results)
+ self.assertEqual([], diff._old_queue)
+ self.assertEqual([key1], diff._new_queue)
+ self.assertEqual([], diff._new_item_queue)
+
+ c_map2 = self.make_one_deep_map(chk_map._search_key_16)
+ key2 = c_map2.key()
+ diff = self.get_difference([key1, key2], [],
+ chk_map._search_key_16)
+ root_results = [record.key for record in diff._read_all_roots()]
+ self.assertEqual([], root_results)
+ self.assertEqual([], diff._old_queue)
+ self.assertEqual(sorted([key1, key2]),
+ sorted(diff._new_queue))
+ self.assertEqual([], diff._new_item_queue)
+
+ def test__read_all_roots_multiple_old(self):
+ c_map = self.make_two_deep_map()
+ key1 = c_map.key()
+ c_map._dump_tree() # load everything
+ key1_a = c_map._root_node._items['a'].key()
+ c_map.map(('ccc',), 'new ccc value')
+ key2 = c_map._save()
+ key2_a = c_map._root_node._items['a'].key()
+ c_map.map(('add',), 'new add value')
+ key3 = c_map._save()
+ key3_a = c_map._root_node._items['a'].key()
+ diff = self.get_difference([key3], [key1, key2],
+ chk_map._search_key_plain)
+ root_results = [record.key for record in diff._read_all_roots()]
+ self.assertEqual([key3], root_results)
+ # the 'a' keys should not be queued up 2 times, since they are
+ # identical
+ self.assertEqual([key1_a], diff._old_queue)
+ self.assertEqual([key3_a], diff._new_queue)
+ self.assertEqual([], diff._new_item_queue)
+
+ def test__process_next_old_batched_no_dupes(self):
+ c_map = self.make_two_deep_map()
+ key1 = c_map.key()
+ c_map._dump_tree() # load everything
+ key1_a = c_map._root_node._items['a'].key()
+ key1_aa = c_map._root_node._items['a']._items['aa'].key()
+ key1_ab = c_map._root_node._items['a']._items['ab'].key()
+ key1_ac = c_map._root_node._items['a']._items['ac'].key()
+ key1_ad = c_map._root_node._items['a']._items['ad'].key()
+ c_map.map(('aaa',), 'new aaa value')
+ key2 = c_map._save()
+ key2_a = c_map._root_node._items['a'].key()
+ key2_aa = c_map._root_node._items['a']._items['aa'].key()
+ c_map.map(('acc',), 'new acc content')
+ key3 = c_map._save()
+ key3_a = c_map._root_node._items['a'].key()
+ key3_ac = c_map._root_node._items['a']._items['ac'].key()
+ diff = self.get_difference([key3], [key1, key2],
+ chk_map._search_key_plain)
+ root_results = [record.key for record in diff._read_all_roots()]
+ self.assertEqual([key3], root_results)
+ self.assertEqual(sorted([key1_a, key2_a]),
+ sorted(diff._old_queue))
+ self.assertEqual([key3_a], diff._new_queue)
+ self.assertEqual([], diff._new_item_queue)
+ diff._process_next_old()
+ # All of the old records should be brought in and queued up,
+ # but we should not have any duplicates
+ self.assertEqual(sorted([key1_aa, key1_ab, key1_ac, key1_ad, key2_aa]),
+ sorted(diff._old_queue))
+
+
+class TestIterInterestingNodes(TestCaseWithExampleMaps):
+
+ def get_map_key(self, a_dict, maximum_size=10):
+ c_map = self.get_map(a_dict, maximum_size=maximum_size)
+ return c_map.key()
+
+ def assertIterInteresting(self, records, items, interesting_keys,
+ old_keys):
+ """Check the result of iter_interesting_nodes.
+
+ Note that we no longer care how many steps are taken, etc, just that
+ the right contents are returned.
+
+ :param records: A list of record keys that should be yielded
+ :param items: A list of items (key,value) that should be yielded.
+ """
+ store = self.get_chk_bytes()
+ store._search_key_func = chk_map._search_key_plain
+ iter_nodes = chk_map.iter_interesting_nodes(store, interesting_keys,
+ old_keys)
+ record_keys = []
+ all_items = []
+ for record, new_items in iter_nodes:
+ if record is not None:
+ record_keys.append(record.key)
+ if new_items:
+ all_items.extend(new_items)
+ self.assertEqual(sorted(records), sorted(record_keys))
+ self.assertEqual(sorted(items), sorted(all_items))
+
+ def test_empty_to_one_keys(self):
+ target = self.get_map_key({('a',): 'content'})
+ self.assertIterInteresting([target],
+ [(('a',), 'content')],
+ [target], [])
+
+ def test_none_to_one_key(self):
+ basis = self.get_map_key({})
+ target = self.get_map_key({('a',): 'content'})
+ self.assertIterInteresting([target],
+ [(('a',), 'content')],
+ [target], [basis])
+
+ def test_one_to_none_key(self):
+ basis = self.get_map_key({('a',): 'content'})
+ target = self.get_map_key({})
+ self.assertIterInteresting([target],
+ [],
+ [target], [basis])
+
+ def test_common_pages(self):
+ basis = self.get_map_key({('a',): 'content',
+ ('b',): 'content',
+ ('c',): 'content',
+ })
+ target = self.get_map_key({('a',): 'content',
+ ('b',): 'other content',
+ ('c',): 'content',
+ })
+ target_map = CHKMap(self.get_chk_bytes(), target)
+ self.assertEqualDiff(
+ "'' InternalNode\n"
+ " 'a' LeafNode\n"
+ " ('a',) 'content'\n"
+ " 'b' LeafNode\n"
+ " ('b',) 'other content'\n"
+ " 'c' LeafNode\n"
+ " ('c',) 'content'\n",
+ target_map._dump_tree())
+ b_key = target_map._root_node._items['b'].key()
+ # This should return the root node, and the node for the 'b' key
+ self.assertIterInteresting([target, b_key],
+ [(('b',), 'other content')],
+ [target], [basis])
+
+ def test_common_sub_page(self):
+ basis = self.get_map_key({('aaa',): 'common',
+ ('c',): 'common',
+ })
+ target = self.get_map_key({('aaa',): 'common',
+ ('aab',): 'new',
+ ('c',): 'common',
+ })
+ target_map = CHKMap(self.get_chk_bytes(), target)
+ self.assertEqualDiff(
+ "'' InternalNode\n"
+ " 'a' InternalNode\n"
+ " 'aaa' LeafNode\n"
+ " ('aaa',) 'common'\n"
+ " 'aab' LeafNode\n"
+ " ('aab',) 'new'\n"
+ " 'c' LeafNode\n"
+ " ('c',) 'common'\n",
+ target_map._dump_tree())
+ # The key for the internal aa node
+ a_key = target_map._root_node._items['a'].key()
+ # The key for the leaf aab node
+ # aaa_key = target_map._root_node._items['a']._items['aaa'].key()
+ aab_key = target_map._root_node._items['a']._items['aab'].key()
+ self.assertIterInteresting([target, a_key, aab_key],
+ [(('aab',), 'new')],
+ [target], [basis])
+
+ def test_common_leaf(self):
+ basis = self.get_map_key({})
+ target1 = self.get_map_key({('aaa',): 'common'})
+ target2 = self.get_map_key({('aaa',): 'common',
+ ('bbb',): 'new',
+ })
+ target3 = self.get_map_key({('aaa',): 'common',
+ ('aac',): 'other',
+ ('bbb',): 'new',
+ })
+ # The LeafNode containing 'aaa': 'common' occurs at 3 different levels.
+ # Once as a root node, once as a second layer, and once as a third
+ # layer. It should only be returned one time regardless
+ target1_map = CHKMap(self.get_chk_bytes(), target1)
+ self.assertEqualDiff(
+ "'' LeafNode\n"
+ " ('aaa',) 'common'\n",
+ target1_map._dump_tree())
+ target2_map = CHKMap(self.get_chk_bytes(), target2)
+ self.assertEqualDiff(
+ "'' InternalNode\n"
+ " 'a' LeafNode\n"
+ " ('aaa',) 'common'\n"
+ " 'b' LeafNode\n"
+ " ('bbb',) 'new'\n",
+ target2_map._dump_tree())
+ target3_map = CHKMap(self.get_chk_bytes(), target3)
+ self.assertEqualDiff(
+ "'' InternalNode\n"
+ " 'a' InternalNode\n"
+ " 'aaa' LeafNode\n"
+ " ('aaa',) 'common'\n"
+ " 'aac' LeafNode\n"
+ " ('aac',) 'other'\n"
+ " 'b' LeafNode\n"
+ " ('bbb',) 'new'\n",
+ target3_map._dump_tree())
+ aaa_key = target1_map._root_node.key()
+ b_key = target2_map._root_node._items['b'].key()
+ a_key = target3_map._root_node._items['a'].key()
+ aac_key = target3_map._root_node._items['a']._items['aac'].key()
+ self.assertIterInteresting(
+ [target1, target2, target3, a_key, aac_key, b_key],
+ [(('aaa',), 'common'), (('bbb',), 'new'), (('aac',), 'other')],
+ [target1, target2, target3], [basis])
+
+ self.assertIterInteresting(
+ [target2, target3, a_key, aac_key, b_key],
+ [(('bbb',), 'new'), (('aac',), 'other')],
+ [target2, target3], [target1])
+
+ # Technically, target1 could be filtered out, but since it is a root
+ # node, we yield it immediately, rather than waiting to find out much
+ # later on.
+ self.assertIterInteresting(
+ [target1],
+ [],
+ [target1], [target3])
+
+ def test_multiple_maps(self):
+ basis1 = self.get_map_key({('aaa',): 'common',
+ ('aab',): 'basis1',
+ })
+ basis2 = self.get_map_key({('bbb',): 'common',
+ ('bbc',): 'basis2',
+ })
+ target1 = self.get_map_key({('aaa',): 'common',
+ ('aac',): 'target1',
+ ('bbb',): 'common',
+ })
+ target2 = self.get_map_key({('aaa',): 'common',
+ ('bba',): 'target2',
+ ('bbb',): 'common',
+ })
+ target1_map = CHKMap(self.get_chk_bytes(), target1)
+ self.assertEqualDiff(
+ "'' InternalNode\n"
+ " 'a' InternalNode\n"
+ " 'aaa' LeafNode\n"
+ " ('aaa',) 'common'\n"
+ " 'aac' LeafNode\n"
+ " ('aac',) 'target1'\n"
+ " 'b' LeafNode\n"
+ " ('bbb',) 'common'\n",
+ target1_map._dump_tree())
+ # The key for the target1 internal a node
+ a_key = target1_map._root_node._items['a'].key()
+ # The key for the leaf aac node
+ aac_key = target1_map._root_node._items['a']._items['aac'].key()
+
+ target2_map = CHKMap(self.get_chk_bytes(), target2)
+ self.assertEqualDiff(
+ "'' InternalNode\n"
+ " 'a' LeafNode\n"
+ " ('aaa',) 'common'\n"
+ " 'b' InternalNode\n"
+ " 'bba' LeafNode\n"
+ " ('bba',) 'target2'\n"
+ " 'bbb' LeafNode\n"
+ " ('bbb',) 'common'\n",
+ target2_map._dump_tree())
+ # The key for the target2 internal bb node
+ b_key = target2_map._root_node._items['b'].key()
+ # The key for the leaf bba node
+ bba_key = target2_map._root_node._items['b']._items['bba'].key()
+ self.assertIterInteresting(
+ [target1, target2, a_key, aac_key, b_key, bba_key],
+ [(('aac',), 'target1'), (('bba',), 'target2')],
+ [target1, target2], [basis1, basis2])
+
+ def test_multiple_maps_overlapping_common_new(self):
+ # Test that when a node found through the interesting_keys iteration
+ # for *some roots* and also via the old keys iteration, that
+ # it is still scanned for old refs and items, because its
+ # not truely new. This requires 2 levels of InternalNodes to expose,
+ # because of the way the bootstrap in _find_children_info works.
+ # This suggests that the code is probably amenable to/benefit from
+ # consolidation.
+ # How does this test work?
+ # 1) We need a second level InternalNode present in a basis tree.
+ # 2) We need a left side new tree that uses that InternalNode
+ # 3) We need a right side new tree that does not use that InternalNode
+ # at all but that has an unchanged *value* that was reachable inside
+ # that InternalNode
+ basis = self.get_map_key({
+ # InternalNode, unchanged in left:
+ ('aaa',): 'left',
+ ('abb',): 'right',
+ # Forces an internalNode at 'a'
+ ('ccc',): 'common',
+ })
+ left = self.get_map_key({
+ # All of basis unchanged
+ ('aaa',): 'left',
+ ('abb',): 'right',
+ ('ccc',): 'common',
+ # And a new top level node so the root key is different
+ ('ddd',): 'change',
+ })
+ right = self.get_map_key({
+ # A value that is unchanged from basis and thus should be filtered
+ # out.
+ ('abb',): 'right'
+ })
+ basis_map = CHKMap(self.get_chk_bytes(), basis)
+ self.assertEqualDiff(
+ "'' InternalNode\n"
+ " 'a' InternalNode\n"
+ " 'aa' LeafNode\n"
+ " ('aaa',) 'left'\n"
+ " 'ab' LeafNode\n"
+ " ('abb',) 'right'\n"
+ " 'c' LeafNode\n"
+ " ('ccc',) 'common'\n",
+ basis_map._dump_tree())
+ # Get left expected data
+ left_map = CHKMap(self.get_chk_bytes(), left)
+ self.assertEqualDiff(
+ "'' InternalNode\n"
+ " 'a' InternalNode\n"
+ " 'aa' LeafNode\n"
+ " ('aaa',) 'left'\n"
+ " 'ab' LeafNode\n"
+ " ('abb',) 'right'\n"
+ " 'c' LeafNode\n"
+ " ('ccc',) 'common'\n"
+ " 'd' LeafNode\n"
+ " ('ddd',) 'change'\n",
+ left_map._dump_tree())
+ # Keys from left side target
+ l_d_key = left_map._root_node._items['d'].key()
+ # Get right expected data
+ right_map = CHKMap(self.get_chk_bytes(), right)
+ self.assertEqualDiff(
+ "'' LeafNode\n"
+ " ('abb',) 'right'\n",
+ right_map._dump_tree())
+ # Keys from the right side target - none, the root is enough.
+ # Test behaviour
+ self.assertIterInteresting(
+ [right, left, l_d_key],
+ [(('ddd',), 'change')],
+ [left, right], [basis])
+
+ def test_multiple_maps_similar(self):
+ # We want to have a depth=2 tree, with multiple entries in each leaf
+ # node
+ basis = self.get_map_key({
+ ('aaa',): 'unchanged',
+ ('abb',): 'will change left',
+ ('caa',): 'unchanged',
+ ('cbb',): 'will change right',
+ }, maximum_size=60)
+ left = self.get_map_key({
+ ('aaa',): 'unchanged',
+ ('abb',): 'changed left',
+ ('caa',): 'unchanged',
+ ('cbb',): 'will change right',
+ }, maximum_size=60)
+ right = self.get_map_key({
+ ('aaa',): 'unchanged',
+ ('abb',): 'will change left',
+ ('caa',): 'unchanged',
+ ('cbb',): 'changed right',
+ }, maximum_size=60)
+ basis_map = CHKMap(self.get_chk_bytes(), basis)
+ self.assertEqualDiff(
+ "'' InternalNode\n"
+ " 'a' LeafNode\n"
+ " ('aaa',) 'unchanged'\n"
+ " ('abb',) 'will change left'\n"
+ " 'c' LeafNode\n"
+ " ('caa',) 'unchanged'\n"
+ " ('cbb',) 'will change right'\n",
+ basis_map._dump_tree())
+ # Get left expected data
+ left_map = CHKMap(self.get_chk_bytes(), left)
+ self.assertEqualDiff(
+ "'' InternalNode\n"
+ " 'a' LeafNode\n"
+ " ('aaa',) 'unchanged'\n"
+ " ('abb',) 'changed left'\n"
+ " 'c' LeafNode\n"
+ " ('caa',) 'unchanged'\n"
+ " ('cbb',) 'will change right'\n",
+ left_map._dump_tree())
+ # Keys from left side target
+ l_a_key = left_map._root_node._items['a'].key()
+ l_c_key = left_map._root_node._items['c'].key()
+ # Get right expected data
+ right_map = CHKMap(self.get_chk_bytes(), right)
+ self.assertEqualDiff(
+ "'' InternalNode\n"
+ " 'a' LeafNode\n"
+ " ('aaa',) 'unchanged'\n"
+ " ('abb',) 'will change left'\n"
+ " 'c' LeafNode\n"
+ " ('caa',) 'unchanged'\n"
+ " ('cbb',) 'changed right'\n",
+ right_map._dump_tree())
+ r_a_key = right_map._root_node._items['a'].key()
+ r_c_key = right_map._root_node._items['c'].key()
+ self.assertIterInteresting(
+ [right, left, l_a_key, r_c_key],
+ [(('abb',), 'changed left'), (('cbb',), 'changed right')],
+ [left, right], [basis])
diff --git a/bzrlib/tests/test_chk_serializer.py b/bzrlib/tests/test_chk_serializer.py
new file mode 100644
index 0000000..80ee0f5
--- /dev/null
+++ b/bzrlib/tests/test_chk_serializer.py
@@ -0,0 +1,109 @@
+# Copyright (C) 2009, 2011 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+from bzrlib.chk_serializer import (
+ chk_bencode_serializer,
+ )
+from bzrlib.revision import (
+ Revision,
+ )
+from bzrlib.tests import TestCase
+
+_working_revision_bencode1 = ('l'
+ 'l6:formati10ee'
+ 'l9:committer54:Canonical.com Patch Queue Manager <pqm@pqm.ubuntu.com>e'
+ 'l8:timezonei3600ee'
+ 'l10:propertiesd11:branch-nick6:+trunkee'
+ 'l9:timestamp14:1242300770.844e'
+ 'l11:revision-id50:pqm@pqm.ubuntu.com-20090514113250-jntkkpminfn3e0tze'
+ 'l10:parent-ids'
+ 'l'
+ '50:pqm@pqm.ubuntu.com-20090514104039-kggemn7lrretzpvc'
+ '48:jelmer@samba.org-20090510012654-jp9ufxquekaokbeo'
+ 'ee'
+ 'l14:inventory-sha140:4a2c7fb50e077699242cf6eb16a61779c7b680a7e'
+ 'l7:message35:(Jelmer) Move dpush to InterBranch.e'
+ 'e')
+
+_working_revision_bencode1_no_timezone = ('l'
+ 'l6:formati10ee'
+ 'l9:committer54:Canonical.com Patch Queue Manager <pqm@pqm.ubuntu.com>e'
+ 'l9:timestamp14:1242300770.844e'
+ 'l10:propertiesd11:branch-nick6:+trunkee'
+ 'l11:revision-id50:pqm@pqm.ubuntu.com-20090514113250-jntkkpminfn3e0tze'
+ 'l10:parent-ids'
+ 'l'
+ '50:pqm@pqm.ubuntu.com-20090514104039-kggemn7lrretzpvc'
+ '48:jelmer@samba.org-20090510012654-jp9ufxquekaokbeo'
+ 'ee'
+ 'l14:inventory-sha140:4a2c7fb50e077699242cf6eb16a61779c7b680a7e'
+ 'l7:message35:(Jelmer) Move dpush to InterBranch.e'
+ 'e')
+
+
+class TestBEncodeSerializer1(TestCase):
+ """Test BEncode serialization"""
+
+ def test_unpack_revision(self):
+ """Test unpacking a revision"""
+ rev = chk_bencode_serializer.read_revision_from_string(
+ _working_revision_bencode1)
+ self.assertEquals(rev.committer,
+ "Canonical.com Patch Queue Manager <pqm@pqm.ubuntu.com>")
+ self.assertEquals(rev.inventory_sha1,
+ "4a2c7fb50e077699242cf6eb16a61779c7b680a7")
+ self.assertEquals(["pqm@pqm.ubuntu.com-20090514104039-kggemn7lrretzpvc",
+ "jelmer@samba.org-20090510012654-jp9ufxquekaokbeo"],
+ rev.parent_ids)
+ self.assertEquals("(Jelmer) Move dpush to InterBranch.", rev.message)
+ self.assertEquals("pqm@pqm.ubuntu.com-20090514113250-jntkkpminfn3e0tz",
+ rev.revision_id)
+ self.assertEquals({"branch-nick": u"+trunk"}, rev.properties)
+ self.assertEquals(3600, rev.timezone)
+
+ def test_written_form_matches(self):
+ rev = chk_bencode_serializer.read_revision_from_string(
+ _working_revision_bencode1)
+ as_str = chk_bencode_serializer.write_revision_to_string(rev)
+ self.assertEqualDiff(_working_revision_bencode1, as_str)
+
+ def test_unpack_revision_no_timezone(self):
+ rev = chk_bencode_serializer.read_revision_from_string(
+ _working_revision_bencode1_no_timezone)
+ self.assertEquals(None, rev.timezone)
+
+ def assertRoundTrips(self, serializer, orig_rev):
+ text = serializer.write_revision_to_string(orig_rev)
+ new_rev = serializer.read_revision_from_string(text)
+ self.assertEquals(orig_rev, new_rev)
+
+ def test_roundtrips_non_ascii(self):
+ rev = Revision("revid1")
+ rev.message = u"\n\xe5me"
+ rev.committer = u'Erik B\xe5gfors'
+ rev.timestamp = 1242385452
+ rev.inventory_sha1 = "4a2c7fb50e077699242cf6eb16a61779c7b680a7"
+ rev.timezone = 3600
+ self.assertRoundTrips(chk_bencode_serializer, rev)
+
+ def test_roundtrips_xml_invalid_chars(self):
+ rev = Revision("revid1")
+ rev.message = "\t\ue000"
+ rev.committer = u'Erik B\xe5gfors'
+ rev.timestamp = 1242385452
+ rev.timezone = 3600
+ rev.inventory_sha1 = "4a2c7fb50e077699242cf6eb16a61779c7b680a7"
+ self.assertRoundTrips(chk_bencode_serializer, rev)
diff --git a/bzrlib/tests/test_chunk_writer.py b/bzrlib/tests/test_chunk_writer.py
new file mode 100644
index 0000000..6f1f8b9
--- /dev/null
+++ b/bzrlib/tests/test_chunk_writer.py
@@ -0,0 +1,113 @@
+# Copyright (C) 2008 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+#
+
+"""Tests for writing fixed size chunks with compression."""
+
+import zlib
+
+from bzrlib import chunk_writer
+from bzrlib.tests import TestCaseWithTransport
+
+
+class TestWriter(TestCaseWithTransport):
+
+ def check_chunk(self, bytes_list, size):
+ bytes = ''.join(bytes_list)
+ self.assertEqual(size, len(bytes))
+ return zlib.decompress(bytes)
+
+ def test_chunk_writer_empty(self):
+ writer = chunk_writer.ChunkWriter(4096)
+ bytes_list, unused, padding = writer.finish()
+ node_bytes = self.check_chunk(bytes_list, 4096)
+ self.assertEqual("", node_bytes)
+ self.assertEqual(None, unused)
+ # Only a zlib header.
+ self.assertEqual(4088, padding)
+
+ def test_optimize_for_speed(self):
+ writer = chunk_writer.ChunkWriter(4096)
+ writer.set_optimize(for_size=False)
+ self.assertEqual(chunk_writer.ChunkWriter._repack_opts_for_speed,
+ (writer._max_repack, writer._max_zsync))
+ writer = chunk_writer.ChunkWriter(4096, optimize_for_size=False)
+ self.assertEqual(chunk_writer.ChunkWriter._repack_opts_for_speed,
+ (writer._max_repack, writer._max_zsync))
+
+ def test_optimize_for_size(self):
+ writer = chunk_writer.ChunkWriter(4096)
+ writer.set_optimize(for_size=True)
+ self.assertEqual(chunk_writer.ChunkWriter._repack_opts_for_size,
+ (writer._max_repack, writer._max_zsync))
+ writer = chunk_writer.ChunkWriter(4096, optimize_for_size=True)
+ self.assertEqual(chunk_writer.ChunkWriter._repack_opts_for_size,
+ (writer._max_repack, writer._max_zsync))
+
+ def test_some_data(self):
+ writer = chunk_writer.ChunkWriter(4096)
+ writer.write("foo bar baz quux\n")
+ bytes_list, unused, padding = writer.finish()
+ node_bytes = self.check_chunk(bytes_list, 4096)
+ self.assertEqual("foo bar baz quux\n", node_bytes)
+ self.assertEqual(None, unused)
+ # More than just the header..
+ self.assertEqual(4073, padding)
+
+ def test_too_much_data_does_not_exceed_size(self):
+ # Generate enough data to exceed 4K
+ lines = []
+ for group in range(48):
+ offset = group * 50
+ numbers = range(offset, offset + 50)
+ # Create a line with this group
+ lines.append(''.join(map(str, numbers)) + '\n')
+ writer = chunk_writer.ChunkWriter(4096)
+ for idx, line in enumerate(lines):
+ if writer.write(line):
+ self.assertEqual(46, idx)
+ break
+ bytes_list, unused, _ = writer.finish()
+ node_bytes = self.check_chunk(bytes_list, 4096)
+ # the first 46 lines should have been added
+ expected_bytes = ''.join(lines[:46])
+ self.assertEqualDiff(expected_bytes, node_bytes)
+ # And the line that failed should have been saved for us
+ self.assertEqual(lines[46], unused)
+
+ def test_too_much_data_preserves_reserve_space(self):
+ # Generate enough data to exceed 4K
+ lines = []
+ for group in range(48):
+ offset = group * 50
+ numbers = range(offset, offset + 50)
+ # Create a line with this group
+ lines.append(''.join(map(str, numbers)) + '\n')
+ writer = chunk_writer.ChunkWriter(4096, 256)
+ for idx, line in enumerate(lines):
+ if writer.write(line):
+ self.assertEqual(44, idx)
+ break
+ else:
+ self.fail('We were able to write all lines')
+ self.assertFalse(writer.write("A"*256, reserved=True))
+ bytes_list, unused, _ = writer.finish()
+ node_bytes = self.check_chunk(bytes_list, 4096)
+ # the first 44 lines should have been added
+ expected_bytes = ''.join(lines[:44]) + "A"*256
+ self.assertEqualDiff(expected_bytes, node_bytes)
+ # And the line that failed should have been saved for us
+ self.assertEqual(lines[44], unused)
diff --git a/bzrlib/tests/test_clean_tree.py b/bzrlib/tests/test_clean_tree.py
new file mode 100644
index 0000000..d4c5802
--- /dev/null
+++ b/bzrlib/tests/test_clean_tree.py
@@ -0,0 +1,137 @@
+# Copyright (C) 2009, 2010, 2011 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+
+import errno
+import os
+import shutil
+import sys
+
+from bzrlib import tests, ui
+from bzrlib.controldir import (
+ ControlDir,
+ )
+from bzrlib.clean_tree import (
+ clean_tree,
+ iter_deletables,
+ )
+from bzrlib.osutils import (
+ has_symlinks,
+ )
+from bzrlib.tests import (
+ TestCaseInTempDir,
+ )
+
+
+class TestCleanTree(TestCaseInTempDir):
+
+ def test_symlinks(self):
+ if has_symlinks() is False:
+ return
+ os.mkdir('branch')
+ ControlDir.create_standalone_workingtree('branch')
+ os.symlink(os.path.realpath('no-die-please'), 'branch/die-please')
+ os.mkdir('no-die-please')
+ self.assertPathExists('branch/die-please')
+ os.mkdir('no-die-please/child')
+
+ clean_tree('branch', unknown=True, no_prompt=True)
+ self.assertPathExists('no-die-please')
+ self.assertPathExists('no-die-please/child')
+
+ def test_iter_deletable(self):
+ """Files are selected for deletion appropriately"""
+ os.mkdir('branch')
+ tree = ControlDir.create_standalone_workingtree('branch')
+ transport = tree.bzrdir.root_transport
+ transport.put_bytes('.bzrignore', '*~\n*.pyc\n.bzrignore\n')
+ transport.put_bytes('file.BASE', 'contents')
+ tree.lock_write()
+ try:
+ self.assertEqual(len(list(iter_deletables(tree, unknown=True))), 1)
+ transport.put_bytes('file', 'contents')
+ transport.put_bytes('file~', 'contents')
+ transport.put_bytes('file.pyc', 'contents')
+ dels = sorted([r for a,r in iter_deletables(tree, unknown=True)])
+ self.assertEqual(['file', 'file.BASE'], dels)
+
+ dels = [r for a,r in iter_deletables(tree, detritus=True)]
+ self.assertEqual(sorted(['file~', 'file.BASE']), dels)
+
+ dels = [r for a,r in iter_deletables(tree, ignored=True)]
+ self.assertEqual(sorted(['file~', 'file.pyc', '.bzrignore']),
+ dels)
+
+ dels = [r for a,r in iter_deletables(tree, unknown=False)]
+ self.assertEqual([], dels)
+ finally:
+ tree.unlock()
+
+ def test_delete_items_warnings(self):
+ """Ensure delete_items issues warnings on EACCES. (bug #430785)
+ """
+ def _dummy_unlink(path):
+ """unlink() files other than files named '0foo'.
+ """
+ if path.endswith('0foo'):
+ # Simulate 'permission denied' error.
+ # This should show up as a warning for the
+ # user.
+ e = OSError()
+ e.errno = errno.EACCES
+ raise e
+
+ def _dummy_rmtree(path, ignore_errors=False, onerror=None):
+ """Call user supplied error handler onerror.
+ """
+ # Indicate failure in removing 'path' if path is subdir0
+ # We later check to ensure that this is indicated
+ # to the user as a warning. We raise OSError to construct
+ # proper excinfo that needs to be passed to onerror
+ try:
+ raise OSError
+ except OSError, e:
+ e.errno = errno.EACCES
+ excinfo = sys.exc_info()
+ function = os.remove
+ if 'subdir0' not in path:
+ # onerror should show warning only for os.remove
+ # error. For any other failures the error should
+ # be shown to the user.
+ function = os.listdir
+ onerror(function=function,
+ path=path, excinfo=excinfo)
+
+ self.overrideAttr(os, 'unlink', _dummy_unlink)
+ self.overrideAttr(shutil, 'rmtree', _dummy_rmtree)
+ stdout = tests.StringIOWrapper()
+ stderr = tests.StringIOWrapper()
+ ui.ui_factory = tests.TestUIFactory(stdout=stdout, stderr=stderr)
+
+ ControlDir.create_standalone_workingtree('.')
+ self.build_tree(['0foo', '1bar', '2baz', 'subdir0/'])
+ clean_tree('.', unknown=True, no_prompt=True)
+ self.assertContainsRe(stderr.getvalue(),
+ 'bzr: warning: unable to remove.*0foo')
+ self.assertContainsRe(stderr.getvalue(),
+ 'bzr: warning: unable to remove.*subdir0')
+
+ # Ensure that error other than EACCES during os.remove are
+ # not turned into warnings.
+ self.build_tree(['subdir1/'])
+ self.assertRaises(OSError, clean_tree, '.',
+ unknown=True, no_prompt=True)
+
diff --git a/bzrlib/tests/test_cleanup.py b/bzrlib/tests/test_cleanup.py
new file mode 100644
index 0000000..c34ce9c
--- /dev/null
+++ b/bzrlib/tests/test_cleanup.py
@@ -0,0 +1,293 @@
+# Copyright (C) 2009, 2010 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+from cStringIO import StringIO
+import re
+
+from bzrlib.cleanup import (
+ _do_with_cleanups,
+ _run_cleanup,
+ ObjectWithCleanups,
+ OperationWithCleanups,
+ )
+from bzrlib.tests import TestCase
+from bzrlib import (
+ debug,
+ trace,
+ )
+
+
+class CleanupsTestCase(TestCase):
+
+ def setUp(self):
+ super(CleanupsTestCase, self).setUp()
+ self.call_log = []
+
+ def no_op_cleanup(self):
+ self.call_log.append('no_op_cleanup')
+
+ def assertLogContains(self, regex):
+ self.assertContainsRe(self.get_log(), regex, re.DOTALL)
+
+ def failing_cleanup(self):
+ self.call_log.append('failing_cleanup')
+ raise Exception("failing_cleanup goes boom!")
+
+
+class TestRunCleanup(CleanupsTestCase):
+
+ def test_no_errors(self):
+ """The function passed to _run_cleanup is run."""
+ self.assertTrue(_run_cleanup(self.no_op_cleanup))
+ self.assertEqual(['no_op_cleanup'], self.call_log)
+
+ def test_cleanup_with_args_kwargs(self):
+ def func_taking_args_kwargs(*args, **kwargs):
+ self.call_log.append(('func', args, kwargs))
+ _run_cleanup(func_taking_args_kwargs, 'an arg', kwarg='foo')
+ self.assertEqual(
+ [('func', ('an arg',), {'kwarg': 'foo'})], self.call_log)
+
+ def test_cleanup_error(self):
+ """An error from the cleanup function is logged by _run_cleanup, but not
+ propagated.
+
+ This is there's no way for _run_cleanup to know if there's an existing
+ exception in this situation::
+ try:
+ some_func()
+ finally:
+ _run_cleanup(cleanup_func)
+ So, the best _run_cleanup can do is always log errors but never raise
+ them.
+ """
+ self.assertFalse(_run_cleanup(self.failing_cleanup))
+ self.assertLogContains('Cleanup failed:.*failing_cleanup goes boom')
+
+ def test_cleanup_error_debug_flag(self):
+ """The -Dcleanup debug flag causes cleanup errors to be reported to the
+ user.
+ """
+ log = StringIO()
+ trace.push_log_file(log)
+ debug.debug_flags.add('cleanup')
+ self.assertFalse(_run_cleanup(self.failing_cleanup))
+ self.assertContainsRe(
+ log.getvalue(),
+ "bzr: warning: Cleanup failed:.*failing_cleanup goes boom")
+
+ def test_prior_error_cleanup_succeeds(self):
+ """Calling _run_cleanup from a finally block will not interfere with an
+ exception from the try block.
+ """
+ def failing_operation():
+ try:
+ 1/0
+ finally:
+ _run_cleanup(self.no_op_cleanup)
+ self.assertRaises(ZeroDivisionError, failing_operation)
+ self.assertEqual(['no_op_cleanup'], self.call_log)
+
+ def test_prior_error_cleanup_fails(self):
+ """Calling _run_cleanup from a finally block will not interfere with an
+ exception from the try block even when the cleanup itself raises an
+ exception.
+
+ The cleanup exception will be logged.
+ """
+ def failing_operation():
+ try:
+ 1/0
+ finally:
+ _run_cleanup(self.failing_cleanup)
+ self.assertRaises(ZeroDivisionError, failing_operation)
+ self.assertLogContains('Cleanup failed:.*failing_cleanup goes boom')
+
+
+class TestDoWithCleanups(CleanupsTestCase):
+
+ def trivial_func(self):
+ self.call_log.append('trivial_func')
+ return 'trivial result'
+
+ def test_runs_func(self):
+ """_do_with_cleanups runs the function it is given, and returns the
+ result.
+ """
+ result = _do_with_cleanups([], self.trivial_func)
+ self.assertEqual('trivial result', result)
+
+ def test_runs_cleanups(self):
+ """Cleanup functions are run (in the given order)."""
+ cleanup_func_1 = (self.call_log.append, ('cleanup 1',), {})
+ cleanup_func_2 = (self.call_log.append, ('cleanup 2',), {})
+ _do_with_cleanups([cleanup_func_1, cleanup_func_2], self.trivial_func)
+ self.assertEqual(
+ ['trivial_func', 'cleanup 1', 'cleanup 2'], self.call_log)
+
+ def failing_func(self):
+ self.call_log.append('failing_func')
+ 1/0
+
+ def test_func_error_propagates(self):
+ """Errors from the main function are propagated (after running
+ cleanups).
+ """
+ self.assertRaises(
+ ZeroDivisionError, _do_with_cleanups,
+ [(self.no_op_cleanup, (), {})], self.failing_func)
+ self.assertEqual(['failing_func', 'no_op_cleanup'], self.call_log)
+
+ def test_func_error_trumps_cleanup_error(self):
+ """Errors from the main function a propagated even if a cleanup raises
+ an error.
+
+ The cleanup error is be logged.
+ """
+ self.assertRaises(
+ ZeroDivisionError, _do_with_cleanups,
+ [(self.failing_cleanup, (), {})], self.failing_func)
+ self.assertLogContains('Cleanup failed:.*failing_cleanup goes boom')
+
+ def test_func_passes_and_error_from_cleanup(self):
+ """An error from a cleanup is propagated when the main function doesn't
+ raise an error. Later cleanups are still executed.
+ """
+ exc = self.assertRaises(
+ Exception, _do_with_cleanups,
+ [(self.failing_cleanup, (), {}), (self.no_op_cleanup, (), {})],
+ self.trivial_func)
+ self.assertEqual('failing_cleanup goes boom!', exc.args[0])
+ self.assertEqual(
+ ['trivial_func', 'failing_cleanup', 'no_op_cleanup'],
+ self.call_log)
+
+ def test_multiple_cleanup_failures(self):
+ """When multiple cleanups fail (as tends to happen when something has
+ gone wrong), the first error is propagated, and subsequent errors are
+ logged.
+ """
+ cleanups = self.make_two_failing_cleanup_funcs()
+ self.assertRaises(ErrorA, _do_with_cleanups, cleanups,
+ self.trivial_func)
+ self.assertLogContains('Cleanup failed:.*ErrorB')
+ self.assertFalse('ErrorA' in self.get_log())
+
+ def make_two_failing_cleanup_funcs(self):
+ def raise_a():
+ raise ErrorA('Error A')
+ def raise_b():
+ raise ErrorB('Error B')
+ return [(raise_a, (), {}), (raise_b, (), {})]
+
+ def test_multiple_cleanup_failures_debug_flag(self):
+ log = StringIO()
+ trace.push_log_file(log)
+ debug.debug_flags.add('cleanup')
+ cleanups = self.make_two_failing_cleanup_funcs()
+ self.assertRaises(ErrorA, _do_with_cleanups, cleanups,
+ self.trivial_func)
+ self.assertContainsRe(
+ log.getvalue(), "bzr: warning: Cleanup failed:.*Error B\n")
+ self.assertEqual(1, log.getvalue().count('bzr: warning:'),
+ log.getvalue())
+
+ def test_func_and_cleanup_errors_debug_flag(self):
+ log = StringIO()
+ trace.push_log_file(log)
+ debug.debug_flags.add('cleanup')
+ cleanups = self.make_two_failing_cleanup_funcs()
+ self.assertRaises(ZeroDivisionError, _do_with_cleanups, cleanups,
+ self.failing_func)
+ self.assertContainsRe(
+ log.getvalue(), "bzr: warning: Cleanup failed:.*Error A\n")
+ self.assertContainsRe(
+ log.getvalue(), "bzr: warning: Cleanup failed:.*Error B\n")
+ self.assertEqual(2, log.getvalue().count('bzr: warning:'))
+
+ def test_func_may_mutate_cleanups(self):
+ """The main func may mutate the cleanups before it returns.
+
+ This allows a function to gradually add cleanups as it acquires
+ resources, rather than planning all the cleanups up-front. The
+ OperationWithCleanups helper relies on this working.
+ """
+ cleanups_list = []
+ def func_that_adds_cleanups():
+ self.call_log.append('func_that_adds_cleanups')
+ cleanups_list.append((self.no_op_cleanup, (), {}))
+ return 'result'
+ result = _do_with_cleanups(cleanups_list, func_that_adds_cleanups)
+ self.assertEqual('result', result)
+ self.assertEqual(
+ ['func_that_adds_cleanups', 'no_op_cleanup'], self.call_log)
+
+ def test_cleanup_error_debug_flag(self):
+ """The -Dcleanup debug flag causes cleanup errors to be reported to the
+ user.
+ """
+ log = StringIO()
+ trace.push_log_file(log)
+ debug.debug_flags.add('cleanup')
+ self.assertRaises(ZeroDivisionError, _do_with_cleanups,
+ [(self.failing_cleanup, (), {})], self.failing_func)
+ self.assertContainsRe(
+ log.getvalue(),
+ "bzr: warning: Cleanup failed:.*failing_cleanup goes boom")
+ self.assertEqual(1, log.getvalue().count('bzr: warning:'))
+
+
+class ErrorA(Exception): pass
+class ErrorB(Exception): pass
+
+
+class TestOperationWithCleanups(CleanupsTestCase):
+
+ def test_cleanup_ordering(self):
+ """Cleanups are added in LIFO order.
+
+ So cleanups added before run is called are run last, and the last
+ cleanup added during the func is run first.
+ """
+ call_log = []
+ def func(op, foo):
+ call_log.append(('func called', foo))
+ op.add_cleanup(call_log.append, 'cleanup 2')
+ op.add_cleanup(call_log.append, 'cleanup 1')
+ return 'result'
+ owc = OperationWithCleanups(func)
+ owc.add_cleanup(call_log.append, 'cleanup 4')
+ owc.add_cleanup(call_log.append, 'cleanup 3')
+ result = owc.run('foo')
+ self.assertEqual('result', result)
+ self.assertEqual(
+ [('func called', 'foo'), 'cleanup 1', 'cleanup 2', 'cleanup 3',
+ 'cleanup 4'], call_log)
+
+
+class SampleWithCleanups(ObjectWithCleanups):
+
+ pass
+
+
+class TestObjectWithCleanups(TestCase):
+
+ def test_object_with_cleanups(self):
+ a = []
+ s = SampleWithCleanups()
+ s.add_cleanup(a.append, 42)
+ s.cleanup_now()
+ self.assertEqual(a, [42])
diff --git a/bzrlib/tests/test_cmdline.py b/bzrlib/tests/test_cmdline.py
new file mode 100644
index 0000000..38a36d4
--- /dev/null
+++ b/bzrlib/tests/test_cmdline.py
@@ -0,0 +1,115 @@
+# Copyright (C) 2010 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+
+from bzrlib import (
+ cmdline,
+ tests,
+ )
+from bzrlib.tests.features import backslashdir_feature
+
+class TestSplitter(tests.TestCase):
+
+ def assertAsTokens(self, expected, line, single_quotes_allowed=False):
+ s = cmdline.Splitter(line, single_quotes_allowed=single_quotes_allowed)
+ self.assertEqual(expected, list(s))
+
+ def test_simple(self):
+ self.assertAsTokens([(False, u'foo'), (False, u'bar'), (False, u'baz')],
+ u'foo bar baz')
+
+ def test_ignore_multiple_spaces(self):
+ self.assertAsTokens([(False, u'foo'), (False, u'bar')], u'foo bar')
+
+ def test_ignore_leading_space(self):
+ self.assertAsTokens([(False, u'foo'), (False, u'bar')], u' foo bar')
+
+ def test_ignore_trailing_space(self):
+ self.assertAsTokens([(False, u'foo'), (False, u'bar')], u'foo bar ')
+
+ def test_posix_quotations(self):
+ self.assertAsTokens([(True, u'foo bar')], u"'foo bar'",
+ single_quotes_allowed=True)
+ self.assertAsTokens([(True, u'foo bar')], u"'fo''o b''ar'",
+ single_quotes_allowed=True)
+ self.assertAsTokens([(True, u'foo bar')], u'"fo""o b""ar"',
+ single_quotes_allowed=True)
+ self.assertAsTokens([(True, u'foo bar')], u'"fo"\'o b\'"ar"',
+ single_quotes_allowed=True)
+
+ def test_nested_quotations(self):
+ self.assertAsTokens([(True, u'foo"" bar')], u"\"foo\\\"\\\" bar\"")
+ self.assertAsTokens([(True, u'foo\'\' bar')], u"\"foo'' bar\"")
+ self.assertAsTokens([(True, u'foo\'\' bar')], u"\"foo'' bar\"",
+ single_quotes_allowed=True)
+ self.assertAsTokens([(True, u'foo"" bar')], u"'foo\"\" bar'",
+ single_quotes_allowed=True)
+
+ def test_empty_result(self):
+ self.assertAsTokens([], u'')
+ self.assertAsTokens([], u' ')
+
+ def test_quoted_empty(self):
+ self.assertAsTokens([(True, '')], u'""')
+ self.assertAsTokens([(False, u"''")], u"''")
+ self.assertAsTokens([(True, '')], u"''", single_quotes_allowed=True)
+
+ def test_unicode_chars(self):
+ self.assertAsTokens([(False, u'f\xb5\xee'), (False, u'\u1234\u3456')],
+ u'f\xb5\xee \u1234\u3456')
+
+ def test_newline_in_quoted_section(self):
+ self.assertAsTokens([(True, u'foo\nbar\nbaz\n')], u'"foo\nbar\nbaz\n"')
+ self.assertAsTokens([(True, u'foo\nbar\nbaz\n')], u"'foo\nbar\nbaz\n'",
+ single_quotes_allowed=True)
+
+ def test_escape_chars(self):
+ self.assertAsTokens([(False, u'foo\\bar')], u'foo\\bar')
+
+ def test_escape_quote(self):
+ self.assertAsTokens([(True, u'foo"bar')], u'"foo\\"bar"')
+ self.assertAsTokens([(True, u'foo\\"bar')], u'"foo\\\\\\"bar"')
+ self.assertAsTokens([(True, u'foo\\bar')], u'"foo\\\\"bar"')
+
+ def test_double_escape(self):
+ self.assertAsTokens([(True, u'foo\\\\bar')], u'"foo\\\\bar"')
+ self.assertAsTokens([(False, u'foo\\\\bar')], u"foo\\\\bar")
+
+ def test_multiple_quoted_args(self):
+ self.assertAsTokens([(True, u'x x'), (True, u'y y')],
+ u'"x x" "y y"')
+ self.assertAsTokens([(True, u'x x'), (True, u'y y')],
+ u'"x x" \'y y\'', single_quotes_allowed=True)
+
+ def test_n_backslashes_handling(self):
+ # https://bugs.launchpad.net/bzr/+bug/528944
+ # actually we care about the doubled backslashes when they're
+ # represents UNC paths.
+ # But in fact there is too much weird corner cases
+ # (see https://bugs.launchpad.net/tortoisebzr/+bug/569050)
+ # so to reproduce every bit of windows command-line handling
+ # could be not worth of efforts?
+ self.requireFeature(backslashdir_feature)
+ self.assertAsTokens([(True, r'\\host\path')], r'"\\host\path"')
+ self.assertAsTokens([(False, r'\\host\path')], r'\\host\path')
+ # handling of " after the 2n and 2n+1 backslashes
+ # inside and outside the quoted string
+ self.assertAsTokens([(True, r'\\'), (False, r'*.py')], r'"\\\\" *.py')
+ self.assertAsTokens([(True, r'\\" *.py')], r'"\\\\\" *.py"')
+ self.assertAsTokens([(True, r'\\ *.py')], r'\\\\" *.py"')
+ self.assertAsTokens([(False, r'\\"'), (False, r'*.py')],
+ r'\\\\\" *.py')
+ self.assertAsTokens([(True, u'\\\\')], u'"\\\\')
diff --git a/bzrlib/tests/test_commands.py b/bzrlib/tests/test_commands.py
new file mode 100644
index 0000000..97ac313
--- /dev/null
+++ b/bzrlib/tests/test_commands.py
@@ -0,0 +1,451 @@
+# Copyright (C) 2005-2011 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+import errno
+import inspect
+import sys
+
+from bzrlib import (
+ builtins,
+ commands,
+ config,
+ errors,
+ option,
+ tests,
+ trace,
+ )
+from bzrlib.commands import display_command
+from bzrlib.tests import TestSkipped
+
+
+class TestCommands(tests.TestCase):
+
+ def test_all_commands_have_help(self):
+ commands._register_builtin_commands()
+ commands_without_help = set()
+ base_doc = inspect.getdoc(commands.Command)
+ for cmd_name in commands.all_command_names():
+ cmd = commands.get_cmd_object(cmd_name)
+ cmd_help = cmd.help()
+ if not cmd_help or cmd_help == base_doc:
+ commands_without_help.append(cmd_name)
+ self.assertLength(0, commands_without_help)
+
+ def test_display_command(self):
+ """EPIPE message is selectively suppressed"""
+ def pipe_thrower():
+ raise IOError(errno.EPIPE, "Bogus pipe error")
+ self.assertRaises(IOError, pipe_thrower)
+ @display_command
+ def non_thrower():
+ pipe_thrower()
+ non_thrower()
+ @display_command
+ def other_thrower():
+ raise IOError(errno.ESPIPE, "Bogus pipe error")
+ self.assertRaises(IOError, other_thrower)
+
+ def test_unicode_command(self):
+ # This error is thrown when we can't find the command in the
+ # list of available commands
+ self.assertRaises(errors.BzrCommandError,
+ commands.run_bzr, [u'cmd\xb5'])
+
+ def test_unicode_option(self):
+ # This error is actually thrown by optparse, when it
+ # can't find the given option
+ import optparse
+ if optparse.__version__ == "1.5.3":
+ raise TestSkipped("optparse 1.5.3 can't handle unicode options")
+ self.assertRaises(errors.BzrCommandError,
+ commands.run_bzr, ['log', u'--option\xb5'])
+
+ @staticmethod
+ def get_command(options):
+ class cmd_foo(commands.Command):
+ __doc__ = 'Bar'
+
+ takes_options = options
+
+ return cmd_foo()
+
+ def test_help_hidden(self):
+ c = self.get_command([option.Option('foo', hidden=True)])
+ self.assertNotContainsRe(c.get_help_text(), '--foo')
+
+ def test_help_not_hidden(self):
+ c = self.get_command([option.Option('foo', hidden=False)])
+ self.assertContainsRe(c.get_help_text(), '--foo')
+
+
+class TestInsideCommand(tests.TestCaseInTempDir):
+
+ def test_command_see_config_overrides(self):
+ def run(cmd):
+ # We override the run() command method so we can observe the
+ # overrides from inside.
+ c = config.GlobalStack()
+ self.assertEquals('12', c.get('xx'))
+ self.assertEquals('foo', c.get('yy'))
+ self.overrideAttr(builtins.cmd_rocks, 'run', run)
+ self.run_bzr(['rocks', '-Oxx=12', '-Oyy=foo'])
+ c = config.GlobalStack()
+ # Ensure that we don't leak outside of the command
+ self.assertEquals(None, c.get('xx'))
+ self.assertEquals(None, c.get('yy'))
+
+
+class TestInvokedAs(tests.TestCase):
+
+ def test_invoked_as(self):
+ """The command object knows the actual name used to invoke it."""
+ commands.install_bzr_command_hooks()
+ commands._register_builtin_commands()
+ # get one from the real get_cmd_object.
+ c = commands.get_cmd_object('ci')
+ self.assertIsInstance(c, builtins.cmd_commit)
+ self.assertEquals(c.invoked_as, 'ci')
+
+
+class TestGetAlias(tests.TestCase):
+
+ def _get_config(self, config_text):
+ my_config = config.GlobalConfig.from_string(config_text)
+ return my_config
+
+ def test_simple(self):
+ my_config = self._get_config("[ALIASES]\n"
+ "diff=diff -r -2..-1\n")
+ self.assertEqual([u'diff', u'-r', u'-2..-1'],
+ commands.get_alias("diff", config=my_config))
+
+ def test_single_quotes(self):
+ my_config = self._get_config("[ALIASES]\n"
+ "diff=diff -r -2..-1 --diff-options "
+ "'--strip-trailing-cr -wp'\n")
+ self.assertEqual([u'diff', u'-r', u'-2..-1', u'--diff-options',
+ u'--strip-trailing-cr -wp'],
+ commands.get_alias("diff", config=my_config))
+
+ def test_double_quotes(self):
+ my_config = self._get_config("[ALIASES]\n"
+ "diff=diff -r -2..-1 --diff-options "
+ "\"--strip-trailing-cr -wp\"\n")
+ self.assertEqual([u'diff', u'-r', u'-2..-1', u'--diff-options',
+ u'--strip-trailing-cr -wp'],
+ commands.get_alias("diff", config=my_config))
+
+ def test_unicode(self):
+ my_config = self._get_config("[ALIASES]\n"
+ u'iam=whoami "Erik B\u00e5gfors <erik@bagfors.nu>"\n')
+ self.assertEqual([u'whoami', u'Erik B\u00e5gfors <erik@bagfors.nu>'],
+ commands.get_alias("iam", config=my_config))
+
+
+class TestSeeAlso(tests.TestCase):
+ """Tests for the see also functional of Command."""
+
+ @staticmethod
+ def _get_command_with_see_also(see_also):
+ class ACommand(commands.Command):
+ __doc__ = """A sample command."""
+ _see_also = see_also
+ return ACommand()
+
+ def test_default_subclass_no_see_also(self):
+ command = self._get_command_with_see_also([])
+ self.assertEqual([], command.get_see_also())
+
+ def test__see_also(self):
+ """When _see_also is defined, it sets the result of get_see_also()."""
+ command = self._get_command_with_see_also(['bar', 'foo'])
+ self.assertEqual(['bar', 'foo'], command.get_see_also())
+
+ def test_deduplication(self):
+ """Duplicates in _see_also are stripped out."""
+ command = self._get_command_with_see_also(['foo', 'foo'])
+ self.assertEqual(['foo'], command.get_see_also())
+
+ def test_sorted(self):
+ """_see_also is sorted by get_see_also."""
+ command = self._get_command_with_see_also(['foo', 'bar'])
+ self.assertEqual(['bar', 'foo'], command.get_see_also())
+
+ def test_additional_terms(self):
+ """Additional terms can be supplied and are deduped and sorted."""
+ command = self._get_command_with_see_also(['foo', 'bar'])
+ self.assertEqual(['bar', 'foo', 'gam'],
+ command.get_see_also(['gam', 'bar', 'gam']))
+
+
+class TestRegisterLazy(tests.TestCase):
+
+ def setUp(self):
+ tests.TestCase.setUp(self)
+ import bzrlib.tests.fake_command
+ del sys.modules['bzrlib.tests.fake_command']
+ global lazy_command_imported
+ lazy_command_imported = False
+ commands.install_bzr_command_hooks()
+
+ @staticmethod
+ def remove_fake():
+ commands.plugin_cmds.remove('fake')
+
+ def assertIsFakeCommand(self, cmd_obj):
+ from bzrlib.tests.fake_command import cmd_fake
+ self.assertIsInstance(cmd_obj, cmd_fake)
+
+ def test_register_lazy(self):
+ """Ensure lazy registration works"""
+ commands.plugin_cmds.register_lazy('cmd_fake', [],
+ 'bzrlib.tests.fake_command')
+ self.addCleanup(self.remove_fake)
+ self.assertFalse(lazy_command_imported)
+ fake_instance = commands.get_cmd_object('fake')
+ self.assertTrue(lazy_command_imported)
+ self.assertIsFakeCommand(fake_instance)
+
+ def test_get_unrelated_does_not_import(self):
+ commands.plugin_cmds.register_lazy('cmd_fake', [],
+ 'bzrlib.tests.fake_command')
+ self.addCleanup(self.remove_fake)
+ commands.get_cmd_object('status')
+ self.assertFalse(lazy_command_imported)
+
+ def test_aliases(self):
+ commands.plugin_cmds.register_lazy('cmd_fake', ['fake_alias'],
+ 'bzrlib.tests.fake_command')
+ self.addCleanup(self.remove_fake)
+ fake_instance = commands.get_cmd_object('fake_alias')
+ self.assertIsFakeCommand(fake_instance)
+
+
+class TestExtendCommandHook(tests.TestCase):
+
+ def test_fires_on_get_cmd_object(self):
+ # The extend_command(cmd) hook fires when commands are delivered to the
+ # ui, not simply at registration (because lazy registered plugin
+ # commands are registered).
+ # when they are simply created.
+ hook_calls = []
+ commands.install_bzr_command_hooks()
+ commands.Command.hooks.install_named_hook(
+ "extend_command", hook_calls.append, None)
+ # create a command, should not fire
+ class cmd_test_extend_command_hook(commands.Command):
+ __doc__ = """A sample command."""
+ self.assertEqual([], hook_calls)
+ # -- as a builtin
+ # register the command class, should not fire
+ try:
+ commands.builtin_command_registry.register(cmd_test_extend_command_hook)
+ self.assertEqual([], hook_calls)
+ # and ask for the object, should fire
+ cmd = commands.get_cmd_object('test-extend-command-hook')
+ # For resilience - to ensure all code paths hit it - we
+ # fire on everything returned in the 'cmd_dict', which is currently
+ # all known commands, so assert that cmd is in hook_calls
+ self.assertSubset([cmd], hook_calls)
+ del hook_calls[:]
+ finally:
+ commands.builtin_command_registry.remove('test-extend-command-hook')
+ # -- as a plugin lazy registration
+ try:
+ # register the command class, should not fire
+ commands.plugin_cmds.register_lazy('cmd_fake', [],
+ 'bzrlib.tests.fake_command')
+ self.assertEqual([], hook_calls)
+ # and ask for the object, should fire
+ cmd = commands.get_cmd_object('fake')
+ self.assertEqual([cmd], hook_calls)
+ finally:
+ commands.plugin_cmds.remove('fake')
+
+
+class TestGetCommandHook(tests.TestCase):
+
+ def test_fires_on_get_cmd_object(self):
+ # The get_command(cmd) hook fires when commands are delivered to the
+ # ui.
+ commands.install_bzr_command_hooks()
+ hook_calls = []
+ class ACommand(commands.Command):
+ __doc__ = """A sample command."""
+ def get_cmd(cmd_or_None, cmd_name):
+ hook_calls.append(('called', cmd_or_None, cmd_name))
+ if cmd_name in ('foo', 'info'):
+ return ACommand()
+ commands.Command.hooks.install_named_hook(
+ "get_command", get_cmd, None)
+ # create a command directly, should not fire
+ cmd = ACommand()
+ self.assertEqual([], hook_calls)
+ # ask by name, should fire and give us our command
+ cmd = commands.get_cmd_object('foo')
+ self.assertEqual([('called', None, 'foo')], hook_calls)
+ self.assertIsInstance(cmd, ACommand)
+ del hook_calls[:]
+ # ask by a name that is supplied by a builtin - the hook should still
+ # fire and we still get our object, but we should see the builtin
+ # passed to the hook.
+ cmd = commands.get_cmd_object('info')
+ self.assertIsInstance(cmd, ACommand)
+ self.assertEqual(1, len(hook_calls))
+ self.assertEqual('info', hook_calls[0][2])
+ self.assertIsInstance(hook_calls[0][1], builtins.cmd_info)
+
+
+class TestGetMissingCommandHook(tests.TestCase):
+
+ def hook_missing(self):
+ """Hook get_missing_command for testing."""
+ self.hook_calls = []
+ class ACommand(commands.Command):
+ __doc__ = """A sample command."""
+ def get_missing_cmd(cmd_name):
+ self.hook_calls.append(('called', cmd_name))
+ if cmd_name in ('foo', 'info'):
+ return ACommand()
+ commands.Command.hooks.install_named_hook(
+ "get_missing_command", get_missing_cmd, None)
+ self.ACommand = ACommand
+
+ def test_fires_on_get_cmd_object(self):
+ # The get_missing_command(cmd) hook fires when commands are delivered to the
+ # ui.
+ self.hook_missing()
+ # create a command directly, should not fire
+ self.cmd = self.ACommand()
+ self.assertEqual([], self.hook_calls)
+ # ask by name, should fire and give us our command
+ cmd = commands.get_cmd_object('foo')
+ self.assertEqual([('called', 'foo')], self.hook_calls)
+ self.assertIsInstance(cmd, self.ACommand)
+ del self.hook_calls[:]
+ # ask by a name that is supplied by a builtin - the hook should not
+ # fire and we still get our object.
+ commands.install_bzr_command_hooks()
+ cmd = commands.get_cmd_object('info')
+ self.assertNotEqual(None, cmd)
+ self.assertEqual(0, len(self.hook_calls))
+
+ def test_skipped_on_HelpCommandIndex_get_topics(self):
+ # The get_missing_command(cmd_name) hook is not fired when
+ # looking up help topics.
+ self.hook_missing()
+ topic = commands.HelpCommandIndex()
+ topics = topic.get_topics('foo')
+ self.assertEqual([], self.hook_calls)
+
+
+class TestListCommandHook(tests.TestCase):
+
+ def test_fires_on_all_command_names(self):
+ # The list_commands() hook fires when all_command_names() is invoked.
+ hook_calls = []
+ commands.install_bzr_command_hooks()
+ def list_my_commands(cmd_names):
+ hook_calls.append('called')
+ cmd_names.update(['foo', 'bar'])
+ return cmd_names
+ commands.Command.hooks.install_named_hook(
+ "list_commands", list_my_commands, None)
+ # Get a command, which should not trigger the hook.
+ cmd = commands.get_cmd_object('info')
+ self.assertEqual([], hook_calls)
+ # Get all command classes (for docs and shell completion).
+ cmds = list(commands.all_command_names())
+ self.assertEqual(['called'], hook_calls)
+ self.assertSubset(['foo', 'bar'], cmds)
+
+class TestPreAndPostCommandHooks(tests.TestCase):
+ class TestError(StandardError):
+ __doc__ = """A test exception."""
+
+ def test_pre_and_post_hooks(self):
+ hook_calls = []
+
+ def pre_command(cmd):
+ self.assertEqual([], hook_calls)
+ hook_calls.append('pre')
+
+ def post_command(cmd):
+ self.assertEqual(['pre', 'run'], hook_calls)
+ hook_calls.append('post')
+
+ def run(cmd):
+ self.assertEqual(['pre'], hook_calls)
+ hook_calls.append('run')
+
+ self.overrideAttr(builtins.cmd_rocks, 'run', run)
+ commands.install_bzr_command_hooks()
+ commands.Command.hooks.install_named_hook(
+ "pre_command", pre_command, None)
+ commands.Command.hooks.install_named_hook(
+ "post_command", post_command, None)
+
+ self.assertEqual([], hook_calls)
+ self.run_bzr(['rocks', '-Oxx=12', '-Oyy=foo'])
+ self.assertEqual(['pre', 'run', 'post'], hook_calls)
+
+ def test_post_hook_provided_exception(self):
+ hook_calls = []
+
+ def post_command(cmd):
+ hook_calls.append('post')
+
+ def run(cmd):
+ hook_calls.append('run')
+ raise self.TestError()
+
+ self.overrideAttr(builtins.cmd_rocks, 'run', run)
+ commands.install_bzr_command_hooks()
+ commands.Command.hooks.install_named_hook(
+ "post_command", post_command, None)
+
+ self.assertEqual([], hook_calls)
+ self.assertRaises(self.TestError, commands.run_bzr, [u'rocks'])
+ self.assertEqual(['run', 'post'], hook_calls)
+
+ def test_pre_command_error(self):
+ """Ensure an BzrCommandError in pre_command aborts the command"""
+
+ hook_calls = []
+
+ def pre_command(cmd):
+ hook_calls.append('pre')
+ # verify that all subclasses of BzrCommandError caught too
+ raise errors.BzrOptionError()
+
+ def post_command(cmd, e):
+ self.fail('post_command should not be called')
+
+ def run(cmd):
+ self.fail('command should not be called')
+
+ self.overrideAttr(builtins.cmd_rocks, 'run', run)
+ commands.install_bzr_command_hooks()
+ commands.Command.hooks.install_named_hook(
+ "pre_command", pre_command, None)
+ commands.Command.hooks.install_named_hook(
+ "post_command", post_command, None)
+
+ self.assertEqual([], hook_calls)
+ self.assertRaises(errors.BzrCommandError,
+ commands.run_bzr, [u'rocks'])
+ self.assertEqual(['pre'], hook_calls)
+
diff --git a/bzrlib/tests/test_commit.py b/bzrlib/tests/test_commit.py
new file mode 100644
index 0000000..1906a38
--- /dev/null
+++ b/bzrlib/tests/test_commit.py
@@ -0,0 +1,841 @@
+# Copyright (C) 2005-2011 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+
+import os
+
+import bzrlib
+from bzrlib import (
+ config,
+ controldir,
+ errors,
+ )
+from bzrlib.branch import Branch
+from bzrlib.bzrdir import BzrDirMetaFormat1
+from bzrlib.commit import Commit, NullCommitReporter
+from bzrlib.errors import (
+ PointlessCommit,
+ BzrError,
+ SigningFailed,
+ LockContention,
+ )
+from bzrlib.tests import (
+ TestCaseWithTransport,
+ test_foreign,
+ )
+from bzrlib.tests.features import (
+ SymlinkFeature,
+ )
+from bzrlib.tests.matchers import MatchesAncestry
+
+
+# TODO: Test commit with some added, and added-but-missing files
+
+class MustSignConfig(config.MemoryStack):
+
+ def __init__(self):
+ super(MustSignConfig, self).__init__('''
+gpg_signing_command=cat -
+create_signatures=always
+''')
+
+
+class CapturingReporter(NullCommitReporter):
+ """This reporter captures the calls made to it for evaluation later."""
+
+ def __init__(self):
+ # a list of the calls this received
+ self.calls = []
+
+ def snapshot_change(self, change, path):
+ self.calls.append(('change', change, path))
+
+ def deleted(self, file_id):
+ self.calls.append(('deleted', file_id))
+
+ def missing(self, path):
+ self.calls.append(('missing', path))
+
+ def renamed(self, change, old_path, new_path):
+ self.calls.append(('renamed', change, old_path, new_path))
+
+ def is_verbose(self):
+ return True
+
+
+class TestCommit(TestCaseWithTransport):
+
+ def test_simple_commit(self):
+ """Commit and check two versions of a single file."""
+ wt = self.make_branch_and_tree('.')
+ b = wt.branch
+ with file('hello', 'w') as f: f.write('hello world')
+ wt.add('hello')
+ rev1 = wt.commit(message='add hello')
+ file_id = wt.path2id('hello')
+
+ with file('hello', 'w') as f: f.write('version 2')
+ rev2 = wt.commit(message='commit 2')
+
+ eq = self.assertEquals
+ eq(b.revno(), 2)
+ rev = b.repository.get_revision(rev1)
+ eq(rev.message, 'add hello')
+
+ tree1 = b.repository.revision_tree(rev1)
+ tree1.lock_read()
+ text = tree1.get_file_text(file_id)
+ tree1.unlock()
+ self.assertEqual('hello world', text)
+
+ tree2 = b.repository.revision_tree(rev2)
+ tree2.lock_read()
+ text = tree2.get_file_text(file_id)
+ tree2.unlock()
+ self.assertEqual('version 2', text)
+
+ def test_commit_lossy_native(self):
+ """Attempt a lossy commit to a native branch."""
+ wt = self.make_branch_and_tree('.')
+ b = wt.branch
+ with file('hello', 'w') as f: f.write('hello world')
+ wt.add('hello')
+ revid = wt.commit(message='add hello', rev_id='revid', lossy=True)
+ self.assertEquals('revid', revid)
+
+ def test_commit_lossy_foreign(self):
+ """Attempt a lossy commit to a foreign branch."""
+ test_foreign.register_dummy_foreign_for_test(self)
+ wt = self.make_branch_and_tree('.',
+ format=test_foreign.DummyForeignVcsDirFormat())
+ b = wt.branch
+ with file('hello', 'w') as f: f.write('hello world')
+ wt.add('hello')
+ revid = wt.commit(message='add hello', lossy=True,
+ timestamp=1302659388, timezone=0)
+ self.assertEquals('dummy-v1:1302659388.0-0-UNKNOWN', revid)
+
+ def test_commit_bound_lossy_foreign(self):
+ """Attempt a lossy commit to a bzr branch bound to a foreign branch."""
+ test_foreign.register_dummy_foreign_for_test(self)
+ foreign_branch = self.make_branch('foreign',
+ format=test_foreign.DummyForeignVcsDirFormat())
+ wt = foreign_branch.create_checkout("local")
+ b = wt.branch
+ with file('local/hello', 'w') as f: f.write('hello world')
+ wt.add('hello')
+ revid = wt.commit(message='add hello', lossy=True,
+ timestamp=1302659388, timezone=0)
+ self.assertEquals('dummy-v1:1302659388.0-0-0', revid)
+ self.assertEquals('dummy-v1:1302659388.0-0-0',
+ foreign_branch.last_revision())
+ self.assertEquals('dummy-v1:1302659388.0-0-0',
+ wt.branch.last_revision())
+
+ def test_missing_commit(self):
+ """Test a commit with a missing file"""
+ wt = self.make_branch_and_tree('.')
+ b = wt.branch
+ with file('hello', 'w') as f: f.write('hello world')
+ wt.add(['hello'], ['hello-id'])
+ wt.commit(message='add hello')
+
+ os.remove('hello')
+ reporter = CapturingReporter()
+ wt.commit('removed hello', rev_id='rev2', reporter=reporter)
+ self.assertEquals(
+ [('missing', u'hello'), ('deleted', u'hello')],
+ reporter.calls)
+
+ tree = b.repository.revision_tree('rev2')
+ self.assertFalse(tree.has_id('hello-id'))
+
+ def test_partial_commit_move(self):
+ """Test a partial commit where a file was renamed but not committed.
+
+ https://bugs.launchpad.net/bzr/+bug/83039
+
+ If not handled properly, commit will try to snapshot
+ dialog.py with olive/ as a parent, while
+ olive/ has not been snapshotted yet.
+ """
+ wt = self.make_branch_and_tree('.')
+ b = wt.branch
+ self.build_tree(['annotate/', 'annotate/foo.py',
+ 'olive/', 'olive/dialog.py'
+ ])
+ wt.add(['annotate', 'olive', 'annotate/foo.py', 'olive/dialog.py'])
+ wt.commit(message='add files')
+ wt.rename_one("olive/dialog.py", "aaa")
+ self.build_tree_contents([('annotate/foo.py', 'modified\n')])
+ wt.commit('renamed hello', specific_files=["annotate"])
+
+ def test_pointless_commit(self):
+ """Commit refuses unless there are changes or it's forced."""
+ wt = self.make_branch_and_tree('.')
+ b = wt.branch
+ with file('hello', 'w') as f: f.write('hello')
+ wt.add(['hello'])
+ wt.commit(message='add hello')
+ self.assertEquals(b.revno(), 1)
+ self.assertRaises(PointlessCommit,
+ wt.commit,
+ message='fails',
+ allow_pointless=False)
+ self.assertEquals(b.revno(), 1)
+
+ def test_commit_empty(self):
+ """Commiting an empty tree works."""
+ wt = self.make_branch_and_tree('.')
+ b = wt.branch
+ wt.commit(message='empty tree', allow_pointless=True)
+ self.assertRaises(PointlessCommit,
+ wt.commit,
+ message='empty tree',
+ allow_pointless=False)
+ wt.commit(message='empty tree', allow_pointless=True)
+ self.assertEquals(b.revno(), 2)
+
+ def test_selective_delete(self):
+ """Selective commit in tree with deletions"""
+ wt = self.make_branch_and_tree('.')
+ b = wt.branch
+ with file('hello', 'w') as f: f.write('hello')
+ with file('buongia', 'w') as f: f.write('buongia')
+ wt.add(['hello', 'buongia'],
+ ['hello-id', 'buongia-id'])
+ wt.commit(message='add files',
+ rev_id='test@rev-1')
+
+ os.remove('hello')
+ with file('buongia', 'w') as f: f.write('new text')
+ wt.commit(message='update text',
+ specific_files=['buongia'],
+ allow_pointless=False,
+ rev_id='test@rev-2')
+
+ wt.commit(message='remove hello',
+ specific_files=['hello'],
+ allow_pointless=False,
+ rev_id='test@rev-3')
+
+ eq = self.assertEquals
+ eq(b.revno(), 3)
+
+ tree2 = b.repository.revision_tree('test@rev-2')
+ tree2.lock_read()
+ self.addCleanup(tree2.unlock)
+ self.assertTrue(tree2.has_filename('hello'))
+ self.assertEquals(tree2.get_file_text('hello-id'), 'hello')
+ self.assertEquals(tree2.get_file_text('buongia-id'), 'new text')
+
+ tree3 = b.repository.revision_tree('test@rev-3')
+ tree3.lock_read()
+ self.addCleanup(tree3.unlock)
+ self.assertFalse(tree3.has_filename('hello'))
+ self.assertEquals(tree3.get_file_text('buongia-id'), 'new text')
+
+ def test_commit_rename(self):
+ """Test commit of a revision where a file is renamed."""
+ tree = self.make_branch_and_tree('.')
+ b = tree.branch
+ self.build_tree(['hello'], line_endings='binary')
+ tree.add(['hello'], ['hello-id'])
+ tree.commit(message='one', rev_id='test@rev-1', allow_pointless=False)
+
+ tree.rename_one('hello', 'fruity')
+ tree.commit(message='renamed', rev_id='test@rev-2', allow_pointless=False)
+
+ eq = self.assertEquals
+ tree1 = b.repository.revision_tree('test@rev-1')
+ tree1.lock_read()
+ self.addCleanup(tree1.unlock)
+ eq(tree1.id2path('hello-id'), 'hello')
+ eq(tree1.get_file_text('hello-id'), 'contents of hello\n')
+ self.assertFalse(tree1.has_filename('fruity'))
+ self.check_tree_shape(tree1, ['hello'])
+ eq(tree1.get_file_revision('hello-id'), 'test@rev-1')
+
+ tree2 = b.repository.revision_tree('test@rev-2')
+ tree2.lock_read()
+ self.addCleanup(tree2.unlock)
+ eq(tree2.id2path('hello-id'), 'fruity')
+ eq(tree2.get_file_text('hello-id'), 'contents of hello\n')
+ self.check_tree_shape(tree2, ['fruity'])
+ eq(tree2.get_file_revision('hello-id'), 'test@rev-2')
+
+ def test_reused_rev_id(self):
+ """Test that a revision id cannot be reused in a branch"""
+ wt = self.make_branch_and_tree('.')
+ b = wt.branch
+ wt.commit('initial', rev_id='test@rev-1', allow_pointless=True)
+ self.assertRaises(Exception,
+ wt.commit,
+ message='reused id',
+ rev_id='test@rev-1',
+ allow_pointless=True)
+
+ def test_commit_move(self):
+ """Test commit of revisions with moved files and directories"""
+ eq = self.assertEquals
+ wt = self.make_branch_and_tree('.')
+ b = wt.branch
+ r1 = 'test@rev-1'
+ self.build_tree(['hello', 'a/', 'b/'])
+ wt.add(['hello', 'a', 'b'], ['hello-id', 'a-id', 'b-id'])
+ wt.commit('initial', rev_id=r1, allow_pointless=False)
+ wt.move(['hello'], 'a')
+ r2 = 'test@rev-2'
+ wt.commit('two', rev_id=r2, allow_pointless=False)
+ wt.lock_read()
+ try:
+ self.check_tree_shape(wt, ['a/', 'a/hello', 'b/'])
+ finally:
+ wt.unlock()
+
+ wt.move(['b'], 'a')
+ r3 = 'test@rev-3'
+ wt.commit('three', rev_id=r3, allow_pointless=False)
+ wt.lock_read()
+ try:
+ self.check_tree_shape(wt,
+ ['a/', 'a/hello', 'a/b/'])
+ self.check_tree_shape(b.repository.revision_tree(r3),
+ ['a/', 'a/hello', 'a/b/'])
+ finally:
+ wt.unlock()
+
+ wt.move(['a/hello'], 'a/b')
+ r4 = 'test@rev-4'
+ wt.commit('four', rev_id=r4, allow_pointless=False)
+ wt.lock_read()
+ try:
+ self.check_tree_shape(wt, ['a/', 'a/b/hello', 'a/b/'])
+ finally:
+ wt.unlock()
+
+ inv = b.repository.get_inventory(r4)
+ eq(inv['hello-id'].revision, r4)
+ eq(inv['a-id'].revision, r1)
+ eq(inv['b-id'].revision, r3)
+
+ def test_removed_commit(self):
+ """Commit with a removed file"""
+ wt = self.make_branch_and_tree('.')
+ b = wt.branch
+ with file('hello', 'w') as f: f.write('hello world')
+ wt.add(['hello'], ['hello-id'])
+ wt.commit(message='add hello')
+ wt.remove('hello')
+ wt.commit('removed hello', rev_id='rev2')
+
+ tree = b.repository.revision_tree('rev2')
+ self.assertFalse(tree.has_id('hello-id'))
+
+ def test_committed_ancestry(self):
+ """Test commit appends revisions to ancestry."""
+ wt = self.make_branch_and_tree('.')
+ b = wt.branch
+ rev_ids = []
+ for i in range(4):
+ with file('hello', 'w') as f: f.write((str(i) * 4) + '\n')
+ if i == 0:
+ wt.add(['hello'], ['hello-id'])
+ rev_id = 'test@rev-%d' % (i+1)
+ rev_ids.append(rev_id)
+ wt.commit(message='rev %d' % (i+1),
+ rev_id=rev_id)
+ for i in range(4):
+ self.assertThat(rev_ids[:i+1],
+ MatchesAncestry(b.repository, rev_ids[i]))
+
+ def test_commit_new_subdir_child_selective(self):
+ wt = self.make_branch_and_tree('.')
+ b = wt.branch
+ self.build_tree(['dir/', 'dir/file1', 'dir/file2'])
+ wt.add(['dir', 'dir/file1', 'dir/file2'],
+ ['dirid', 'file1id', 'file2id'])
+ wt.commit('dir/file1', specific_files=['dir/file1'], rev_id='1')
+ inv = b.repository.get_inventory('1')
+ self.assertEqual('1', inv['dirid'].revision)
+ self.assertEqual('1', inv['file1id'].revision)
+ # FIXME: This should raise a KeyError I think, rbc20051006
+ self.assertRaises(BzrError, inv.__getitem__, 'file2id')
+
+ def test_strict_commit(self):
+ """Try and commit with unknown files and strict = True, should fail."""
+ from bzrlib.errors import StrictCommitFailed
+ wt = self.make_branch_and_tree('.')
+ b = wt.branch
+ with file('hello', 'w') as f: f.write('hello world')
+ wt.add('hello')
+ with file('goodbye', 'w') as f: f.write('goodbye cruel world!')
+ self.assertRaises(StrictCommitFailed, wt.commit,
+ message='add hello but not goodbye', strict=True)
+
+ def test_strict_commit_without_unknowns(self):
+ """Try and commit with no unknown files and strict = True,
+ should work."""
+ wt = self.make_branch_and_tree('.')
+ b = wt.branch
+ with file('hello', 'w') as f: f.write('hello world')
+ wt.add('hello')
+ wt.commit(message='add hello', strict=True)
+
+ def test_nonstrict_commit(self):
+ """Try and commit with unknown files and strict = False, should work."""
+ wt = self.make_branch_and_tree('.')
+ b = wt.branch
+ with file('hello', 'w') as f: f.write('hello world')
+ wt.add('hello')
+ with file('goodbye', 'w') as f: f.write('goodbye cruel world!')
+ wt.commit(message='add hello but not goodbye', strict=False)
+
+ def test_nonstrict_commit_without_unknowns(self):
+ """Try and commit with no unknown files and strict = False,
+ should work."""
+ wt = self.make_branch_and_tree('.')
+ b = wt.branch
+ with file('hello', 'w') as f: f.write('hello world')
+ wt.add('hello')
+ wt.commit(message='add hello', strict=False)
+
+ def test_signed_commit(self):
+ import bzrlib.gpg
+ import bzrlib.commit as commit
+ oldstrategy = bzrlib.gpg.GPGStrategy
+ wt = self.make_branch_and_tree('.')
+ branch = wt.branch
+ wt.commit("base", allow_pointless=True, rev_id='A')
+ self.assertFalse(branch.repository.has_signature_for_revision_id('A'))
+ try:
+ from bzrlib.testament import Testament
+ # monkey patch gpg signing mechanism
+ bzrlib.gpg.GPGStrategy = bzrlib.gpg.LoopbackGPGStrategy
+ conf = config.MemoryStack('''
+gpg_signing_command=cat -
+create_signatures=always
+''')
+ commit.Commit(config_stack=conf).commit(
+ message="base", allow_pointless=True, rev_id='B',
+ working_tree=wt)
+ def sign(text):
+ return bzrlib.gpg.LoopbackGPGStrategy(None).sign(text)
+ self.assertEqual(sign(Testament.from_revision(branch.repository,
+ 'B').as_short_text()),
+ branch.repository.get_signature_text('B'))
+ finally:
+ bzrlib.gpg.GPGStrategy = oldstrategy
+
+ def test_commit_failed_signature(self):
+ import bzrlib.gpg
+ import bzrlib.commit as commit
+ oldstrategy = bzrlib.gpg.GPGStrategy
+ wt = self.make_branch_and_tree('.')
+ branch = wt.branch
+ wt.commit("base", allow_pointless=True, rev_id='A')
+ self.assertFalse(branch.repository.has_signature_for_revision_id('A'))
+ try:
+ # monkey patch gpg signing mechanism
+ bzrlib.gpg.GPGStrategy = bzrlib.gpg.DisabledGPGStrategy
+ conf = config.MemoryStack('''
+gpg_signing_command=cat -
+create_signatures=always
+''')
+ self.assertRaises(SigningFailed,
+ commit.Commit(config_stack=conf).commit,
+ message="base",
+ allow_pointless=True,
+ rev_id='B',
+ working_tree=wt)
+ branch = Branch.open(self.get_url('.'))
+ self.assertEqual(branch.last_revision(), 'A')
+ self.assertFalse(branch.repository.has_revision('B'))
+ finally:
+ bzrlib.gpg.GPGStrategy = oldstrategy
+
+ def test_commit_invokes_hooks(self):
+ import bzrlib.commit as commit
+ wt = self.make_branch_and_tree('.')
+ branch = wt.branch
+ calls = []
+ def called(branch, rev_id):
+ calls.append('called')
+ bzrlib.ahook = called
+ try:
+ conf = config.MemoryStack('post_commit=bzrlib.ahook bzrlib.ahook')
+ commit.Commit(config_stack=conf).commit(
+ message = "base", allow_pointless=True, rev_id='A',
+ working_tree = wt)
+ self.assertEqual(['called', 'called'], calls)
+ finally:
+ del bzrlib.ahook
+
+ def test_commit_object_doesnt_set_nick(self):
+ # using the Commit object directly does not set the branch nick.
+ wt = self.make_branch_and_tree('.')
+ c = Commit()
+ c.commit(working_tree=wt, message='empty tree', allow_pointless=True)
+ self.assertEquals(wt.branch.revno(), 1)
+ self.assertEqual({},
+ wt.branch.repository.get_revision(
+ wt.branch.last_revision()).properties)
+
+ def test_safe_master_lock(self):
+ os.mkdir('master')
+ master = BzrDirMetaFormat1().initialize('master')
+ master.create_repository()
+ master_branch = master.create_branch()
+ master.create_workingtree()
+ bound = master.sprout('bound')
+ wt = bound.open_workingtree()
+ wt.branch.set_bound_location(os.path.realpath('master'))
+ master_branch.lock_write()
+ try:
+ self.assertRaises(LockContention, wt.commit, 'silly')
+ finally:
+ master_branch.unlock()
+
+ def test_commit_bound_merge(self):
+ # see bug #43959; commit of a merge in a bound branch fails to push
+ # the new commit into the master
+ master_branch = self.make_branch('master')
+ bound_tree = self.make_branch_and_tree('bound')
+ bound_tree.branch.bind(master_branch)
+
+ self.build_tree_contents([('bound/content_file', 'initial contents\n')])
+ bound_tree.add(['content_file'])
+ bound_tree.commit(message='woo!')
+
+ other_bzrdir = master_branch.bzrdir.sprout('other')
+ other_tree = other_bzrdir.open_workingtree()
+
+ # do a commit to the other branch changing the content file so
+ # that our commit after merging will have a merged revision in the
+ # content file history.
+ self.build_tree_contents([('other/content_file', 'change in other\n')])
+ other_tree.commit('change in other')
+
+ # do a merge into the bound branch from other, and then change the
+ # content file locally to force a new revision (rather than using the
+ # revision from other). This forces extra processing in commit.
+ bound_tree.merge_from_branch(other_tree.branch)
+ self.build_tree_contents([('bound/content_file', 'change in bound\n')])
+
+ # before #34959 was fixed, this failed with 'revision not present in
+ # weave' when trying to implicitly push from the bound branch to the master
+ bound_tree.commit(message='commit of merge in bound tree')
+
+ def test_commit_reporting_after_merge(self):
+ # when doing a commit of a merge, the reporter needs to still
+ # be called for each item that is added/removed/deleted.
+ this_tree = self.make_branch_and_tree('this')
+ # we need a bunch of files and dirs, to perform one action on each.
+ self.build_tree([
+ 'this/dirtorename/',
+ 'this/dirtoreparent/',
+ 'this/dirtoleave/',
+ 'this/dirtoremove/',
+ 'this/filetoreparent',
+ 'this/filetorename',
+ 'this/filetomodify',
+ 'this/filetoremove',
+ 'this/filetoleave']
+ )
+ this_tree.add([
+ 'dirtorename',
+ 'dirtoreparent',
+ 'dirtoleave',
+ 'dirtoremove',
+ 'filetoreparent',
+ 'filetorename',
+ 'filetomodify',
+ 'filetoremove',
+ 'filetoleave']
+ )
+ this_tree.commit('create_files')
+ other_dir = this_tree.bzrdir.sprout('other')
+ other_tree = other_dir.open_workingtree()
+ other_tree.lock_write()
+ # perform the needed actions on the files and dirs.
+ try:
+ other_tree.rename_one('dirtorename', 'renameddir')
+ other_tree.rename_one('dirtoreparent', 'renameddir/reparenteddir')
+ other_tree.rename_one('filetorename', 'renamedfile')
+ other_tree.rename_one('filetoreparent', 'renameddir/reparentedfile')
+ other_tree.remove(['dirtoremove', 'filetoremove'])
+ self.build_tree_contents([
+ ('other/newdir/', ),
+ ('other/filetomodify', 'new content'),
+ ('other/newfile', 'new file content')])
+ other_tree.add('newfile')
+ other_tree.add('newdir/')
+ other_tree.commit('modify all sample files and dirs.')
+ finally:
+ other_tree.unlock()
+ this_tree.merge_from_branch(other_tree.branch)
+ reporter = CapturingReporter()
+ this_tree.commit('do the commit', reporter=reporter)
+ expected = set([
+ ('change', 'modified', 'filetomodify'),
+ ('change', 'added', 'newdir'),
+ ('change', 'added', 'newfile'),
+ ('renamed', 'renamed', 'dirtorename', 'renameddir'),
+ ('renamed', 'renamed', 'filetorename', 'renamedfile'),
+ ('renamed', 'renamed', 'dirtoreparent', 'renameddir/reparenteddir'),
+ ('renamed', 'renamed', 'filetoreparent', 'renameddir/reparentedfile'),
+ ('deleted', 'dirtoremove'),
+ ('deleted', 'filetoremove'),
+ ])
+ result = set(reporter.calls)
+ missing = expected - result
+ new = result - expected
+ self.assertEqual((set(), set()), (missing, new))
+
+ def test_commit_removals_respects_filespec(self):
+ """Commit respects the specified_files for removals."""
+ tree = self.make_branch_and_tree('.')
+ self.build_tree(['a', 'b'])
+ tree.add(['a', 'b'])
+ tree.commit('added a, b')
+ tree.remove(['a', 'b'])
+ tree.commit('removed a', specific_files='a')
+ basis = tree.basis_tree()
+ tree.lock_read()
+ try:
+ self.assertIs(None, basis.path2id('a'))
+ self.assertFalse(basis.path2id('b') is None)
+ finally:
+ tree.unlock()
+
+ def test_commit_saves_1ms_timestamp(self):
+ """Passing in a timestamp is saved with 1ms resolution"""
+ tree = self.make_branch_and_tree('.')
+ self.build_tree(['a'])
+ tree.add('a')
+ tree.commit('added a', timestamp=1153248633.4186721, timezone=0,
+ rev_id='a1')
+
+ rev = tree.branch.repository.get_revision('a1')
+ self.assertEqual(1153248633.419, rev.timestamp)
+
+ def test_commit_has_1ms_resolution(self):
+ """Allowing commit to generate the timestamp also has 1ms resolution"""
+ tree = self.make_branch_and_tree('.')
+ self.build_tree(['a'])
+ tree.add('a')
+ tree.commit('added a', rev_id='a1')
+
+ rev = tree.branch.repository.get_revision('a1')
+ timestamp = rev.timestamp
+ timestamp_1ms = round(timestamp, 3)
+ self.assertEqual(timestamp_1ms, timestamp)
+
+ def assertBasisTreeKind(self, kind, tree, file_id):
+ basis = tree.basis_tree()
+ basis.lock_read()
+ try:
+ self.assertEqual(kind, basis.kind(file_id))
+ finally:
+ basis.unlock()
+
+ def test_commit_kind_changes(self):
+ self.requireFeature(SymlinkFeature)
+ tree = self.make_branch_and_tree('.')
+ os.symlink('target', 'name')
+ tree.add('name', 'a-file-id')
+ tree.commit('Added a symlink')
+ self.assertBasisTreeKind('symlink', tree, 'a-file-id')
+
+ os.unlink('name')
+ self.build_tree(['name'])
+ tree.commit('Changed symlink to file')
+ self.assertBasisTreeKind('file', tree, 'a-file-id')
+
+ os.unlink('name')
+ os.symlink('target', 'name')
+ tree.commit('file to symlink')
+ self.assertBasisTreeKind('symlink', tree, 'a-file-id')
+
+ os.unlink('name')
+ os.mkdir('name')
+ tree.commit('symlink to directory')
+ self.assertBasisTreeKind('directory', tree, 'a-file-id')
+
+ os.rmdir('name')
+ os.symlink('target', 'name')
+ tree.commit('directory to symlink')
+ self.assertBasisTreeKind('symlink', tree, 'a-file-id')
+
+ # prepare for directory <-> file tests
+ os.unlink('name')
+ os.mkdir('name')
+ tree.commit('symlink to directory')
+ self.assertBasisTreeKind('directory', tree, 'a-file-id')
+
+ os.rmdir('name')
+ self.build_tree(['name'])
+ tree.commit('Changed directory to file')
+ self.assertBasisTreeKind('file', tree, 'a-file-id')
+
+ os.unlink('name')
+ os.mkdir('name')
+ tree.commit('file to directory')
+ self.assertBasisTreeKind('directory', tree, 'a-file-id')
+
+ def test_commit_unversioned_specified(self):
+ """Commit should raise if specified files isn't in basis or worktree"""
+ tree = self.make_branch_and_tree('.')
+ self.assertRaises(errors.PathsNotVersionedError, tree.commit,
+ 'message', specific_files=['bogus'])
+
+ class Callback(object):
+
+ def __init__(self, message, testcase):
+ self.called = False
+ self.message = message
+ self.testcase = testcase
+
+ def __call__(self, commit_obj):
+ self.called = True
+ self.testcase.assertTrue(isinstance(commit_obj, Commit))
+ return self.message
+
+ def test_commit_callback(self):
+ """Commit should invoke a callback to get the message"""
+
+ tree = self.make_branch_and_tree('.')
+ try:
+ tree.commit()
+ except Exception, e:
+ self.assertTrue(isinstance(e, BzrError))
+ self.assertEqual('The message or message_callback keyword'
+ ' parameter is required for commit().', str(e))
+ else:
+ self.fail('exception not raised')
+ cb = self.Callback(u'commit 1', self)
+ tree.commit(message_callback=cb)
+ self.assertTrue(cb.called)
+ repository = tree.branch.repository
+ message = repository.get_revision(tree.last_revision()).message
+ self.assertEqual('commit 1', message)
+
+ def test_no_callback_pointless(self):
+ """Callback should not be invoked for pointless commit"""
+ tree = self.make_branch_and_tree('.')
+ cb = self.Callback(u'commit 2', self)
+ self.assertRaises(PointlessCommit, tree.commit, message_callback=cb,
+ allow_pointless=False)
+ self.assertFalse(cb.called)
+
+ def test_no_callback_netfailure(self):
+ """Callback should not be invoked if connectivity fails"""
+ tree = self.make_branch_and_tree('.')
+ cb = self.Callback(u'commit 2', self)
+ repository = tree.branch.repository
+ # simulate network failure
+ def raise_(self, arg, arg2, arg3=None, arg4=None):
+ raise errors.NoSuchFile('foo')
+ repository.add_inventory = raise_
+ repository.add_inventory_by_delta = raise_
+ self.assertRaises(errors.NoSuchFile, tree.commit, message_callback=cb)
+ self.assertFalse(cb.called)
+
+ def test_selected_file_merge_commit(self):
+ """Ensure the correct error is raised"""
+ tree = self.make_branch_and_tree('foo')
+ # pending merge would turn into a left parent
+ tree.commit('commit 1')
+ tree.add_parent_tree_id('example')
+ self.build_tree(['foo/bar', 'foo/baz'])
+ tree.add(['bar', 'baz'])
+ err = self.assertRaises(errors.CannotCommitSelectedFileMerge,
+ tree.commit, 'commit 2', specific_files=['bar', 'baz'])
+ self.assertEqual(['bar', 'baz'], err.files)
+ self.assertEqual('Selected-file commit of merges is not supported'
+ ' yet: files bar, baz', str(err))
+
+ def test_commit_ordering(self):
+ """Test of corner-case commit ordering error"""
+ tree = self.make_branch_and_tree('.')
+ self.build_tree(['a/', 'a/z/', 'a/c/', 'a/z/x', 'a/z/y'])
+ tree.add(['a/', 'a/z/', 'a/c/', 'a/z/x', 'a/z/y'])
+ tree.commit('setup')
+ self.build_tree(['a/c/d/'])
+ tree.add('a/c/d')
+ tree.rename_one('a/z/x', 'a/c/d/x')
+ tree.commit('test', specific_files=['a/z/y'])
+
+ def test_commit_no_author(self):
+ """The default kwarg author in MutableTree.commit should not add
+ the 'author' revision property.
+ """
+ tree = self.make_branch_and_tree('foo')
+ rev_id = tree.commit('commit 1')
+ rev = tree.branch.repository.get_revision(rev_id)
+ self.assertFalse('author' in rev.properties)
+ self.assertFalse('authors' in rev.properties)
+
+ def test_commit_author(self):
+ """Passing a non-empty author kwarg to MutableTree.commit should add
+ the 'author' revision property.
+ """
+ tree = self.make_branch_and_tree('foo')
+ rev_id = self.callDeprecated(['The parameter author was '
+ 'deprecated in version 1.13. Use authors instead'],
+ tree.commit, 'commit 1', author='John Doe <jdoe@example.com>')
+ rev = tree.branch.repository.get_revision(rev_id)
+ self.assertEqual('John Doe <jdoe@example.com>',
+ rev.properties['authors'])
+ self.assertFalse('author' in rev.properties)
+
+ def test_commit_empty_authors_list(self):
+ """Passing an empty list to authors shouldn't add the property."""
+ tree = self.make_branch_and_tree('foo')
+ rev_id = tree.commit('commit 1', authors=[])
+ rev = tree.branch.repository.get_revision(rev_id)
+ self.assertFalse('author' in rev.properties)
+ self.assertFalse('authors' in rev.properties)
+
+ def test_multiple_authors(self):
+ tree = self.make_branch_and_tree('foo')
+ rev_id = tree.commit('commit 1',
+ authors=['John Doe <jdoe@example.com>',
+ 'Jane Rey <jrey@example.com>'])
+ rev = tree.branch.repository.get_revision(rev_id)
+ self.assertEqual('John Doe <jdoe@example.com>\n'
+ 'Jane Rey <jrey@example.com>', rev.properties['authors'])
+ self.assertFalse('author' in rev.properties)
+
+ def test_author_and_authors_incompatible(self):
+ tree = self.make_branch_and_tree('foo')
+ self.assertRaises(AssertionError, tree.commit, 'commit 1',
+ authors=['John Doe <jdoe@example.com>',
+ 'Jane Rey <jrey@example.com>'],
+ author="Jack Me <jme@example.com>")
+
+ def test_author_with_newline_rejected(self):
+ tree = self.make_branch_and_tree('foo')
+ self.assertRaises(AssertionError, tree.commit, 'commit 1',
+ authors=['John\nDoe <jdoe@example.com>'])
+
+ def test_commit_with_checkout_and_branch_sharing_repo(self):
+ repo = self.make_repository('repo', shared=True)
+ # make_branch_and_tree ignores shared repos
+ branch = controldir.ControlDir.create_branch_convenience('repo/branch')
+ tree2 = branch.create_checkout('repo/tree2')
+ tree2.commit('message', rev_id='rev1')
+ self.assertTrue(tree2.branch.repository.has_revision('rev1'))
diff --git a/bzrlib/tests/test_commit_merge.py b/bzrlib/tests/test_commit_merge.py
new file mode 100644
index 0000000..4c85741
--- /dev/null
+++ b/bzrlib/tests/test_commit_merge.py
@@ -0,0 +1,121 @@
+# Copyright (C) 2005, 2006, 2007, 2009, 2011 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+
+import os
+
+from bzrlib import (
+ check,
+ osutils,
+ )
+from bzrlib.errors import PointlessCommit
+from bzrlib.tests import (
+ TestCaseWithTransport,
+ )
+from bzrlib.tests.features import (
+ SymlinkFeature,
+ )
+from bzrlib.tests.matchers import RevisionHistoryMatches
+
+
+class TestCommitMerge(TestCaseWithTransport):
+ """Tests for committing the results of a merge.
+
+ These don't currently test the merge code, which is intentional to
+ reduce the scope of testing. We just mark the revision as merged
+ without bothering about the contents much."""
+
+ def test_merge_commit_empty(self):
+ """Simple commit of two-way merge of empty trees."""
+ wtx = self.make_branch_and_tree('x')
+ base_rev = wtx.commit('common parent')
+ bx = wtx.branch
+ wty = wtx.bzrdir.sprout('y').open_workingtree()
+ by = wty.branch
+
+ wtx.commit('commit one', rev_id='x@u-0-1', allow_pointless=True)
+ wty.commit('commit two', rev_id='y@u-0-1', allow_pointless=True)
+
+ by.fetch(bx)
+ # just having the history there does nothing
+ self.assertRaises(PointlessCommit,
+ wty.commit,
+ 'no changes yet', rev_id='y@u-0-2',
+ allow_pointless=False)
+ wty.merge_from_branch(bx)
+ wty.commit('merge from x', rev_id='y@u-0-2', allow_pointless=False)
+
+ self.assertEquals(by.revno(), 3)
+ graph = wty.branch.repository.get_graph()
+ self.addCleanup(wty.lock_read().unlock)
+ self.assertThat(by,
+ RevisionHistoryMatches([base_rev, 'y@u-0-1', 'y@u-0-2'])
+ )
+ rev = by.repository.get_revision('y@u-0-2')
+ self.assertEquals(rev.parent_ids,
+ ['y@u-0-1', 'x@u-0-1'])
+
+ def test_merge_new_file(self):
+ """Commit merge of two trees with no overlapping files."""
+ wtx = self.make_branch_and_tree('x')
+ base_rev = wtx.commit('common parent')
+ bx = wtx.branch
+ wtx.commit('establish root id')
+ wty = wtx.bzrdir.sprout('y').open_workingtree()
+ self.assertEqual(wtx.get_root_id(), wty.get_root_id())
+ by = wty.branch
+
+ self.build_tree(['x/ecks', 'y/why'])
+
+ wtx.add(['ecks'], ['ecks-id'])
+ wty.add(['why'], ['why-id'])
+
+ wtx.commit('commit one', rev_id='x@u-0-1', allow_pointless=True)
+ wty.commit('commit two', rev_id='y@u-0-1', allow_pointless=True)
+
+ wty.merge_from_branch(bx)
+
+ # partial commit of merges is currently not allowed, because
+ # it would give different merge graphs for each file which
+ # might be complex. it can be allowed in the future.
+ self.assertRaises(Exception,
+ wty.commit,
+ 'partial commit', allow_pointless=False,
+ specific_files=['ecks'])
+
+ wty.commit('merge from x', rev_id='y@u-0-2', allow_pointless=False)
+ tree = by.repository.revision_tree('y@u-0-2')
+ self.assertEquals(tree.get_file_revision('ecks-id'), 'x@u-0-1')
+ self.assertEquals(tree.get_file_revision('why-id'), 'y@u-0-1')
+
+ check.check_dwim(bx.base, False, True, True)
+ check.check_dwim(by.base, False, True, True)
+
+ def test_merge_with_symlink(self):
+ self.requireFeature(SymlinkFeature)
+ tree_a = self.make_branch_and_tree('tree_a')
+ os.symlink('target', osutils.pathjoin('tree_a', 'link'))
+ tree_a.add('link')
+ tree_a.commit('added link')
+ tree_b = tree_a.bzrdir.sprout('tree_b').open_workingtree()
+ self.build_tree(['tree_a/file'])
+ tree_a.add('file')
+ tree_a.commit('added file')
+ self.build_tree(['tree_b/another_file'])
+ tree_b.add('another_file')
+ tree_b.commit('add another file')
+ tree_b.merge_from_branch(tree_a.branch)
+ tree_b.commit('merge')
diff --git a/bzrlib/tests/test_config.py b/bzrlib/tests/test_config.py
new file mode 100644
index 0000000..870a04d
--- /dev/null
+++ b/bzrlib/tests/test_config.py
@@ -0,0 +1,4953 @@
+# Copyright (C) 2005-2012 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""Tests for finding and reading the bzr config file[s]."""
+# import system imports here
+from cStringIO import StringIO
+from textwrap import dedent
+import os
+import sys
+import threading
+
+
+from testtools import matchers
+
+#import bzrlib specific imports here
+from bzrlib import (
+ branch,
+ config,
+ controldir,
+ diff,
+ errors,
+ osutils,
+ mail_client,
+ ui,
+ urlutils,
+ registry as _mod_registry,
+ remote,
+ tests,
+ trace,
+ )
+from bzrlib.symbol_versioning import (
+ deprecated_in,
+ )
+from bzrlib.transport import remote as transport_remote
+from bzrlib.tests import (
+ features,
+ scenarios,
+ test_server,
+ )
+from bzrlib.util.configobj import configobj
+
+
+def lockable_config_scenarios():
+ return [
+ ('global',
+ {'config_class': config.GlobalConfig,
+ 'config_args': [],
+ 'config_section': 'DEFAULT'}),
+ ('locations',
+ {'config_class': config.LocationConfig,
+ 'config_args': ['.'],
+ 'config_section': '.'}),]
+
+
+load_tests = scenarios.load_tests_apply_scenarios
+
+# Register helpers to build stores
+config.test_store_builder_registry.register(
+ 'configobj', lambda test: config.TransportIniFileStore(
+ test.get_transport(), 'configobj.conf'))
+config.test_store_builder_registry.register(
+ 'bazaar', lambda test: config.GlobalStore())
+config.test_store_builder_registry.register(
+ 'location', lambda test: config.LocationStore())
+
+
+def build_backing_branch(test, relpath,
+ transport_class=None, server_class=None):
+ """Test helper to create a backing branch only once.
+
+ Some tests needs multiple stores/stacks to check concurrent update
+ behaviours. As such, they need to build different branch *objects* even if
+ they share the branch on disk.
+
+ :param relpath: The relative path to the branch. (Note that the helper
+ should always specify the same relpath).
+
+ :param transport_class: The Transport class the test needs to use.
+
+ :param server_class: The server associated with the ``transport_class``
+ above.
+
+ Either both or neither of ``transport_class`` and ``server_class`` should
+ be specified.
+ """
+ if transport_class is not None and server_class is not None:
+ test.transport_class = transport_class
+ test.transport_server = server_class
+ elif not (transport_class is None and server_class is None):
+ raise AssertionError('Specify both ``transport_class`` and '
+ '``server_class`` or neither of them')
+ if getattr(test, 'backing_branch', None) is None:
+ # First call, let's build the branch on disk
+ test.backing_branch = test.make_branch(relpath)
+
+
+def build_branch_store(test):
+ build_backing_branch(test, 'branch')
+ b = branch.Branch.open('branch')
+ return config.BranchStore(b)
+config.test_store_builder_registry.register('branch', build_branch_store)
+
+
+def build_control_store(test):
+ build_backing_branch(test, 'branch')
+ b = controldir.ControlDir.open('branch')
+ return config.ControlStore(b)
+config.test_store_builder_registry.register('control', build_control_store)
+
+
+def build_remote_branch_store(test):
+ # There is only one permutation (but we won't be able to handle more with
+ # this design anyway)
+ (transport_class,
+ server_class) = transport_remote.get_test_permutations()[0]
+ build_backing_branch(test, 'branch', transport_class, server_class)
+ b = branch.Branch.open(test.get_url('branch'))
+ return config.BranchStore(b)
+config.test_store_builder_registry.register('remote_branch',
+ build_remote_branch_store)
+
+
+config.test_stack_builder_registry.register(
+ 'bazaar', lambda test: config.GlobalStack())
+config.test_stack_builder_registry.register(
+ 'location', lambda test: config.LocationStack('.'))
+
+
+def build_branch_stack(test):
+ build_backing_branch(test, 'branch')
+ b = branch.Branch.open('branch')
+ return config.BranchStack(b)
+config.test_stack_builder_registry.register('branch', build_branch_stack)
+
+
+def build_branch_only_stack(test):
+ # There is only one permutation (but we won't be able to handle more with
+ # this design anyway)
+ (transport_class,
+ server_class) = transport_remote.get_test_permutations()[0]
+ build_backing_branch(test, 'branch', transport_class, server_class)
+ b = branch.Branch.open(test.get_url('branch'))
+ return config.BranchOnlyStack(b)
+config.test_stack_builder_registry.register('branch_only',
+ build_branch_only_stack)
+
+def build_remote_control_stack(test):
+ # There is only one permutation (but we won't be able to handle more with
+ # this design anyway)
+ (transport_class,
+ server_class) = transport_remote.get_test_permutations()[0]
+ # We need only a bzrdir for this, not a full branch, but it's not worth
+ # creating a dedicated helper to create only the bzrdir
+ build_backing_branch(test, 'branch', transport_class, server_class)
+ b = branch.Branch.open(test.get_url('branch'))
+ return config.RemoteControlStack(b.bzrdir)
+config.test_stack_builder_registry.register('remote_control',
+ build_remote_control_stack)
+
+
+sample_long_alias="log -r-15..-1 --line"
+sample_config_text = u"""
+[DEFAULT]
+email=Erik B\u00e5gfors <erik@bagfors.nu>
+editor=vim
+change_editor=vimdiff -of @new_path @old_path
+gpg_signing_command=gnome-gpg
+gpg_signing_key=DD4D5088
+log_format=short
+validate_signatures_in_log=true
+acceptable_keys=amy
+user_global_option=something
+bzr.mergetool.sometool=sometool {base} {this} {other} -o {result}
+bzr.mergetool.funkytool=funkytool "arg with spaces" {this_temp}
+bzr.mergetool.newtool='"newtool with spaces" {this_temp}'
+bzr.default_mergetool=sometool
+[ALIASES]
+h=help
+ll=""" + sample_long_alias + "\n"
+
+
+sample_always_signatures = """
+[DEFAULT]
+check_signatures=ignore
+create_signatures=always
+"""
+
+sample_ignore_signatures = """
+[DEFAULT]
+check_signatures=require
+create_signatures=never
+"""
+
+sample_maybe_signatures = """
+[DEFAULT]
+check_signatures=ignore
+create_signatures=when-required
+"""
+
+sample_branches_text = """
+[http://www.example.com]
+# Top level policy
+email=Robert Collins <robertc@example.org>
+normal_option = normal
+appendpath_option = append
+appendpath_option:policy = appendpath
+norecurse_option = norecurse
+norecurse_option:policy = norecurse
+[http://www.example.com/ignoreparent]
+# different project: ignore parent dir config
+ignore_parents=true
+[http://www.example.com/norecurse]
+# configuration items that only apply to this dir
+recurse=false
+normal_option = norecurse
+[http://www.example.com/dir]
+appendpath_option = normal
+[/b/]
+check_signatures=require
+# test trailing / matching with no children
+[/a/]
+check_signatures=check-available
+gpg_signing_command=false
+gpg_signing_key=default
+user_local_option=local
+# test trailing / matching
+[/a/*]
+#subdirs will match but not the parent
+[/a/c]
+check_signatures=ignore
+post_commit=bzrlib.tests.test_config.post_commit
+#testing explicit beats globs
+"""
+
+
+def create_configs(test):
+ """Create configuration files for a given test.
+
+ This requires creating a tree (and populate the ``test.tree`` attribute)
+ and its associated branch and will populate the following attributes:
+
+ - branch_config: A BranchConfig for the associated branch.
+
+ - locations_config : A LocationConfig for the associated branch
+
+ - bazaar_config: A GlobalConfig.
+
+ The tree and branch are created in a 'tree' subdirectory so the tests can
+ still use the test directory to stay outside of the branch.
+ """
+ tree = test.make_branch_and_tree('tree')
+ test.tree = tree
+ test.branch_config = config.BranchConfig(tree.branch)
+ test.locations_config = config.LocationConfig(tree.basedir)
+ test.bazaar_config = config.GlobalConfig()
+
+
+def create_configs_with_file_option(test):
+ """Create configuration files with a ``file`` option set in each.
+
+ This builds on ``create_configs`` and add one ``file`` option in each
+ configuration with a value which allows identifying the configuration file.
+ """
+ create_configs(test)
+ test.bazaar_config.set_user_option('file', 'bazaar')
+ test.locations_config.set_user_option('file', 'locations')
+ test.branch_config.set_user_option('file', 'branch')
+
+
+class TestOptionsMixin:
+
+ def assertOptions(self, expected, conf):
+ # We don't care about the parser (as it will make tests hard to write
+ # and error-prone anyway)
+ self.assertThat([opt[:4] for opt in conf._get_options()],
+ matchers.Equals(expected))
+
+
+class InstrumentedConfigObj(object):
+ """A config obj look-enough-alike to record calls made to it."""
+
+ def __contains__(self, thing):
+ self._calls.append(('__contains__', thing))
+ return False
+
+ def __getitem__(self, key):
+ self._calls.append(('__getitem__', key))
+ return self
+
+ def __init__(self, input, encoding=None):
+ self._calls = [('__init__', input, encoding)]
+
+ def __setitem__(self, key, value):
+ self._calls.append(('__setitem__', key, value))
+
+ def __delitem__(self, key):
+ self._calls.append(('__delitem__', key))
+
+ def keys(self):
+ self._calls.append(('keys',))
+ return []
+
+ def reload(self):
+ self._calls.append(('reload',))
+
+ def write(self, arg):
+ self._calls.append(('write',))
+
+ def as_bool(self, value):
+ self._calls.append(('as_bool', value))
+ return False
+
+ def get_value(self, section, name):
+ self._calls.append(('get_value', section, name))
+ return None
+
+
+class FakeBranch(object):
+
+ def __init__(self, base=None):
+ if base is None:
+ self.base = "http://example.com/branches/demo"
+ else:
+ self.base = base
+ self._transport = self.control_files = \
+ FakeControlFilesAndTransport()
+
+ def _get_config(self):
+ return config.TransportConfig(self._transport, 'branch.conf')
+
+ def lock_write(self):
+ pass
+
+ def unlock(self):
+ pass
+
+
+class FakeControlFilesAndTransport(object):
+
+ def __init__(self):
+ self.files = {}
+ self._transport = self
+
+ def get(self, filename):
+ # from Transport
+ try:
+ return StringIO(self.files[filename])
+ except KeyError:
+ raise errors.NoSuchFile(filename)
+
+ def get_bytes(self, filename):
+ # from Transport
+ try:
+ return self.files[filename]
+ except KeyError:
+ raise errors.NoSuchFile(filename)
+
+ def put(self, filename, fileobj):
+ self.files[filename] = fileobj.read()
+
+ def put_file(self, filename, fileobj):
+ return self.put(filename, fileobj)
+
+
+class InstrumentedConfig(config.Config):
+ """An instrumented config that supplies stubs for template methods."""
+
+ def __init__(self):
+ super(InstrumentedConfig, self).__init__()
+ self._calls = []
+ self._signatures = config.CHECK_NEVER
+
+ def _get_user_id(self):
+ self._calls.append('_get_user_id')
+ return "Robert Collins <robert.collins@example.org>"
+
+ def _get_signature_checking(self):
+ self._calls.append('_get_signature_checking')
+ return self._signatures
+
+ def _get_change_editor(self):
+ self._calls.append('_get_change_editor')
+ return 'vimdiff -fo @new_path @old_path'
+
+
+bool_config = """[DEFAULT]
+active = true
+inactive = false
+[UPPERCASE]
+active = True
+nonactive = False
+"""
+
+
+class TestConfigObj(tests.TestCase):
+
+ def test_get_bool(self):
+ co = config.ConfigObj(StringIO(bool_config))
+ self.assertIs(co.get_bool('DEFAULT', 'active'), True)
+ self.assertIs(co.get_bool('DEFAULT', 'inactive'), False)
+ self.assertIs(co.get_bool('UPPERCASE', 'active'), True)
+ self.assertIs(co.get_bool('UPPERCASE', 'nonactive'), False)
+
+ def test_hash_sign_in_value(self):
+ """
+ Before 4.5.0, ConfigObj did not quote # signs in values, so they'd be
+ treated as comments when read in again. (#86838)
+ """
+ co = config.ConfigObj()
+ co['test'] = 'foo#bar'
+ outfile = StringIO()
+ co.write(outfile=outfile)
+ lines = outfile.getvalue().splitlines()
+ self.assertEqual(lines, ['test = "foo#bar"'])
+ co2 = config.ConfigObj(lines)
+ self.assertEqual(co2['test'], 'foo#bar')
+
+ def test_triple_quotes(self):
+ # Bug #710410: if the value string has triple quotes
+ # then ConfigObj versions up to 4.7.2 will quote them wrong
+ # and won't able to read them back
+ triple_quotes_value = '''spam
+""" that's my spam """
+eggs'''
+ co = config.ConfigObj()
+ co['test'] = triple_quotes_value
+ # While writing this test another bug in ConfigObj has been found:
+ # method co.write() without arguments produces list of lines
+ # one option per line, and multiline values are not split
+ # across multiple lines,
+ # and that breaks the parsing these lines back by ConfigObj.
+ # This issue only affects test, but it's better to avoid
+ # `co.write()` construct at all.
+ # [bialix 20110222] bug report sent to ConfigObj's author
+ outfile = StringIO()
+ co.write(outfile=outfile)
+ output = outfile.getvalue()
+ # now we're trying to read it back
+ co2 = config.ConfigObj(StringIO(output))
+ self.assertEquals(triple_quotes_value, co2['test'])
+
+
+erroneous_config = """[section] # line 1
+good=good # line 2
+[section] # line 3
+whocares=notme # line 4
+"""
+
+
+class TestConfigObjErrors(tests.TestCase):
+
+ def test_duplicate_section_name_error_line(self):
+ try:
+ co = configobj.ConfigObj(StringIO(erroneous_config),
+ raise_errors=True)
+ except config.configobj.DuplicateError, e:
+ self.assertEqual(3, e.line_number)
+ else:
+ self.fail('Error in config file not detected')
+
+
+class TestConfig(tests.TestCase):
+
+ def test_constructs(self):
+ config.Config()
+
+ def test_user_email(self):
+ my_config = InstrumentedConfig()
+ self.assertEqual('robert.collins@example.org', my_config.user_email())
+ self.assertEqual(['_get_user_id'], my_config._calls)
+
+ def test_username(self):
+ my_config = InstrumentedConfig()
+ self.assertEqual('Robert Collins <robert.collins@example.org>',
+ my_config.username())
+ self.assertEqual(['_get_user_id'], my_config._calls)
+
+ def test_signatures_default(self):
+ my_config = config.Config()
+ self.assertFalse(
+ self.applyDeprecated(deprecated_in((2, 5, 0)),
+ my_config.signature_needed))
+ self.assertEqual(config.CHECK_IF_POSSIBLE,
+ self.applyDeprecated(deprecated_in((2, 5, 0)),
+ my_config.signature_checking))
+ self.assertEqual(config.SIGN_WHEN_REQUIRED,
+ self.applyDeprecated(deprecated_in((2, 5, 0)),
+ my_config.signing_policy))
+
+ def test_signatures_template_method(self):
+ my_config = InstrumentedConfig()
+ self.assertEqual(config.CHECK_NEVER,
+ self.applyDeprecated(deprecated_in((2, 5, 0)),
+ my_config.signature_checking))
+ self.assertEqual(['_get_signature_checking'], my_config._calls)
+
+ def test_signatures_template_method_none(self):
+ my_config = InstrumentedConfig()
+ my_config._signatures = None
+ self.assertEqual(config.CHECK_IF_POSSIBLE,
+ self.applyDeprecated(deprecated_in((2, 5, 0)),
+ my_config.signature_checking))
+ self.assertEqual(['_get_signature_checking'], my_config._calls)
+
+ def test_gpg_signing_command_default(self):
+ my_config = config.Config()
+ self.assertEqual('gpg',
+ self.applyDeprecated(deprecated_in((2, 5, 0)),
+ my_config.gpg_signing_command))
+
+ def test_get_user_option_default(self):
+ my_config = config.Config()
+ self.assertEqual(None, my_config.get_user_option('no_option'))
+
+ def test_post_commit_default(self):
+ my_config = config.Config()
+ self.assertEqual(None, self.applyDeprecated(deprecated_in((2, 5, 0)),
+ my_config.post_commit))
+
+
+ def test_log_format_default(self):
+ my_config = config.Config()
+ self.assertEqual('long',
+ self.applyDeprecated(deprecated_in((2, 5, 0)),
+ my_config.log_format))
+
+ def test_acceptable_keys_default(self):
+ my_config = config.Config()
+ self.assertEqual(None, self.applyDeprecated(deprecated_in((2, 5, 0)),
+ my_config.acceptable_keys))
+
+ def test_validate_signatures_in_log_default(self):
+ my_config = config.Config()
+ self.assertEqual(False, my_config.validate_signatures_in_log())
+
+ def test_get_change_editor(self):
+ my_config = InstrumentedConfig()
+ change_editor = my_config.get_change_editor('old_tree', 'new_tree')
+ self.assertEqual(['_get_change_editor'], my_config._calls)
+ self.assertIs(diff.DiffFromTool, change_editor.__class__)
+ self.assertEqual(['vimdiff', '-fo', '@new_path', '@old_path'],
+ change_editor.command_template)
+
+
+class TestConfigPath(tests.TestCase):
+
+ def setUp(self):
+ super(TestConfigPath, self).setUp()
+ self.overrideEnv('HOME', '/home/bogus')
+ self.overrideEnv('XDG_CACHE_DIR', '')
+ if sys.platform == 'win32':
+ self.overrideEnv(
+ 'BZR_HOME', r'C:\Documents and Settings\bogus\Application Data')
+ self.bzr_home = \
+ 'C:/Documents and Settings/bogus/Application Data/bazaar/2.0'
+ else:
+ self.bzr_home = '/home/bogus/.bazaar'
+
+ def test_config_dir(self):
+ self.assertEqual(config.config_dir(), self.bzr_home)
+
+ def test_config_dir_is_unicode(self):
+ self.assertIsInstance(config.config_dir(), unicode)
+
+ def test_config_filename(self):
+ self.assertEqual(config.config_filename(),
+ self.bzr_home + '/bazaar.conf')
+
+ def test_locations_config_filename(self):
+ self.assertEqual(config.locations_config_filename(),
+ self.bzr_home + '/locations.conf')
+
+ def test_authentication_config_filename(self):
+ self.assertEqual(config.authentication_config_filename(),
+ self.bzr_home + '/authentication.conf')
+
+ def test_xdg_cache_dir(self):
+ self.assertEqual(config.xdg_cache_dir(),
+ '/home/bogus/.cache')
+
+
+class TestXDGConfigDir(tests.TestCaseInTempDir):
+ # must be in temp dir because config tests for the existence of the bazaar
+ # subdirectory of $XDG_CONFIG_HOME
+
+ def setUp(self):
+ if sys.platform in ('darwin', 'win32'):
+ raise tests.TestNotApplicable(
+ 'XDG config dir not used on this platform')
+ super(TestXDGConfigDir, self).setUp()
+ self.overrideEnv('HOME', self.test_home_dir)
+ # BZR_HOME overrides everything we want to test so unset it.
+ self.overrideEnv('BZR_HOME', None)
+
+ def test_xdg_config_dir_exists(self):
+ """When ~/.config/bazaar exists, use it as the config dir."""
+ newdir = osutils.pathjoin(self.test_home_dir, '.config', 'bazaar')
+ os.makedirs(newdir)
+ self.assertEqual(config.config_dir(), newdir)
+
+ def test_xdg_config_home(self):
+ """When XDG_CONFIG_HOME is set, use it."""
+ xdgconfigdir = osutils.pathjoin(self.test_home_dir, 'xdgconfig')
+ self.overrideEnv('XDG_CONFIG_HOME', xdgconfigdir)
+ newdir = osutils.pathjoin(xdgconfigdir, 'bazaar')
+ os.makedirs(newdir)
+ self.assertEqual(config.config_dir(), newdir)
+
+
+class TestIniConfig(tests.TestCaseInTempDir):
+
+ def make_config_parser(self, s):
+ conf = config.IniBasedConfig.from_string(s)
+ return conf, conf._get_parser()
+
+
+class TestIniConfigBuilding(TestIniConfig):
+
+ def test_contructs(self):
+ my_config = config.IniBasedConfig()
+
+ def test_from_fp(self):
+ my_config = config.IniBasedConfig.from_string(sample_config_text)
+ self.assertIsInstance(my_config._get_parser(), configobj.ConfigObj)
+
+ def test_cached(self):
+ my_config = config.IniBasedConfig.from_string(sample_config_text)
+ parser = my_config._get_parser()
+ self.assertTrue(my_config._get_parser() is parser)
+
+ def _dummy_chown(self, path, uid, gid):
+ self.path, self.uid, self.gid = path, uid, gid
+
+ def test_ini_config_ownership(self):
+ """Ensure that chown is happening during _write_config_file"""
+ self.requireFeature(features.chown_feature)
+ self.overrideAttr(os, 'chown', self._dummy_chown)
+ self.path = self.uid = self.gid = None
+ conf = config.IniBasedConfig(file_name='./foo.conf')
+ conf._write_config_file()
+ self.assertEquals(self.path, './foo.conf')
+ self.assertTrue(isinstance(self.uid, int))
+ self.assertTrue(isinstance(self.gid, int))
+
+ def test_get_filename_parameter_is_deprecated_(self):
+ conf = self.callDeprecated([
+ 'IniBasedConfig.__init__(get_filename) was deprecated in 2.3.'
+ ' Use file_name instead.'],
+ config.IniBasedConfig, lambda: 'ini.conf')
+ self.assertEqual('ini.conf', conf.file_name)
+
+ def test_get_parser_file_parameter_is_deprecated_(self):
+ config_file = StringIO(sample_config_text.encode('utf-8'))
+ conf = config.IniBasedConfig.from_string(sample_config_text)
+ conf = self.callDeprecated([
+ 'IniBasedConfig._get_parser(file=xxx) was deprecated in 2.3.'
+ ' Use IniBasedConfig(_content=xxx) instead.'],
+ conf._get_parser, file=config_file)
+
+
+class TestIniConfigSaving(tests.TestCaseInTempDir):
+
+ def test_cant_save_without_a_file_name(self):
+ conf = config.IniBasedConfig()
+ self.assertRaises(AssertionError, conf._write_config_file)
+
+ def test_saved_with_content(self):
+ content = 'foo = bar\n'
+ conf = config.IniBasedConfig.from_string(
+ content, file_name='./test.conf', save=True)
+ self.assertFileEqual(content, 'test.conf')
+
+
+class TestIniConfigOptionExpansion(tests.TestCase):
+ """Test option expansion from the IniConfig level.
+
+ What we really want here is to test the Config level, but the class being
+ abstract as far as storing values is concerned, this can't be done
+ properly (yet).
+ """
+ # FIXME: This should be rewritten when all configs share a storage
+ # implementation -- vila 2011-02-18
+
+ def get_config(self, string=None):
+ if string is None:
+ string = ''
+ c = config.IniBasedConfig.from_string(string)
+ return c
+
+ def assertExpansion(self, expected, conf, string, env=None):
+ self.assertEquals(expected, conf.expand_options(string, env))
+
+ def test_no_expansion(self):
+ c = self.get_config('')
+ self.assertExpansion('foo', c, 'foo')
+
+ def test_env_adding_options(self):
+ c = self.get_config('')
+ self.assertExpansion('bar', c, '{foo}', {'foo': 'bar'})
+
+ def test_env_overriding_options(self):
+ c = self.get_config('foo=baz')
+ self.assertExpansion('bar', c, '{foo}', {'foo': 'bar'})
+
+ def test_simple_ref(self):
+ c = self.get_config('foo=xxx')
+ self.assertExpansion('xxx', c, '{foo}')
+
+ def test_unknown_ref(self):
+ c = self.get_config('')
+ self.assertRaises(errors.ExpandingUnknownOption,
+ c.expand_options, '{foo}')
+
+ def test_indirect_ref(self):
+ c = self.get_config('''
+foo=xxx
+bar={foo}
+''')
+ self.assertExpansion('xxx', c, '{bar}')
+
+ def test_embedded_ref(self):
+ c = self.get_config('''
+foo=xxx
+bar=foo
+''')
+ self.assertExpansion('xxx', c, '{{bar}}')
+
+ def test_simple_loop(self):
+ c = self.get_config('foo={foo}')
+ self.assertRaises(errors.OptionExpansionLoop, c.expand_options, '{foo}')
+
+ def test_indirect_loop(self):
+ c = self.get_config('''
+foo={bar}
+bar={baz}
+baz={foo}''')
+ e = self.assertRaises(errors.OptionExpansionLoop,
+ c.expand_options, '{foo}')
+ self.assertEquals('foo->bar->baz', e.refs)
+ self.assertEquals('{foo}', e.string)
+
+ def test_list(self):
+ conf = self.get_config('''
+foo=start
+bar=middle
+baz=end
+list={foo},{bar},{baz}
+''')
+ self.assertEquals(['start', 'middle', 'end'],
+ conf.get_user_option('list', expand=True))
+
+ def test_cascading_list(self):
+ conf = self.get_config('''
+foo=start,{bar}
+bar=middle,{baz}
+baz=end
+list={foo}
+''')
+ self.assertEquals(['start', 'middle', 'end'],
+ conf.get_user_option('list', expand=True))
+
+ def test_pathological_hidden_list(self):
+ conf = self.get_config('''
+foo=bin
+bar=go
+start={foo
+middle=},{
+end=bar}
+hidden={start}{middle}{end}
+''')
+ # Nope, it's either a string or a list, and the list wins as soon as a
+ # ',' appears, so the string concatenation never occur.
+ self.assertEquals(['{foo', '}', '{', 'bar}'],
+ conf.get_user_option('hidden', expand=True))
+
+
+class TestLocationConfigOptionExpansion(tests.TestCaseInTempDir):
+
+ def get_config(self, location, string=None):
+ if string is None:
+ string = ''
+ # Since we don't save the config we won't strictly require to inherit
+ # from TestCaseInTempDir, but an error occurs so quickly...
+ c = config.LocationConfig.from_string(string, location)
+ return c
+
+ def test_dont_cross_unrelated_section(self):
+ c = self.get_config('/another/branch/path','''
+[/one/branch/path]
+foo = hello
+bar = {foo}/2
+
+[/another/branch/path]
+bar = {foo}/2
+''')
+ self.assertRaises(errors.ExpandingUnknownOption,
+ c.get_user_option, 'bar', expand=True)
+
+ def test_cross_related_sections(self):
+ c = self.get_config('/project/branch/path','''
+[/project]
+foo = qu
+
+[/project/branch/path]
+bar = {foo}ux
+''')
+ self.assertEquals('quux', c.get_user_option('bar', expand=True))
+
+
+class TestIniBaseConfigOnDisk(tests.TestCaseInTempDir):
+
+ def test_cannot_reload_without_name(self):
+ conf = config.IniBasedConfig.from_string(sample_config_text)
+ self.assertRaises(AssertionError, conf.reload)
+
+ def test_reload_see_new_value(self):
+ c1 = config.IniBasedConfig.from_string('editor=vim\n',
+ file_name='./test/conf')
+ c1._write_config_file()
+ c2 = config.IniBasedConfig.from_string('editor=emacs\n',
+ file_name='./test/conf')
+ c2._write_config_file()
+ self.assertEqual('vim', c1.get_user_option('editor'))
+ self.assertEqual('emacs', c2.get_user_option('editor'))
+ # Make sure we get the Right value
+ c1.reload()
+ self.assertEqual('emacs', c1.get_user_option('editor'))
+
+
+class TestLockableConfig(tests.TestCaseInTempDir):
+
+ scenarios = lockable_config_scenarios()
+
+ # Set by load_tests
+ config_class = None
+ config_args = None
+ config_section = None
+
+ def setUp(self):
+ super(TestLockableConfig, self).setUp()
+ self._content = '[%s]\none=1\ntwo=2\n' % (self.config_section,)
+ self.config = self.create_config(self._content)
+
+ def get_existing_config(self):
+ return self.config_class(*self.config_args)
+
+ def create_config(self, content):
+ kwargs = dict(save=True)
+ c = self.config_class.from_string(content, *self.config_args, **kwargs)
+ return c
+
+ def test_simple_read_access(self):
+ self.assertEquals('1', self.config.get_user_option('one'))
+
+ def test_simple_write_access(self):
+ self.config.set_user_option('one', 'one')
+ self.assertEquals('one', self.config.get_user_option('one'))
+
+ def test_listen_to_the_last_speaker(self):
+ c1 = self.config
+ c2 = self.get_existing_config()
+ c1.set_user_option('one', 'ONE')
+ c2.set_user_option('two', 'TWO')
+ self.assertEquals('ONE', c1.get_user_option('one'))
+ self.assertEquals('TWO', c2.get_user_option('two'))
+ # The second update respect the first one
+ self.assertEquals('ONE', c2.get_user_option('one'))
+
+ def test_last_speaker_wins(self):
+ # If the same config is not shared, the same variable modified twice
+ # can only see a single result.
+ c1 = self.config
+ c2 = self.get_existing_config()
+ c1.set_user_option('one', 'c1')
+ c2.set_user_option('one', 'c2')
+ self.assertEquals('c2', c2._get_user_option('one'))
+ # The first modification is still available until another refresh
+ # occur
+ self.assertEquals('c1', c1._get_user_option('one'))
+ c1.set_user_option('two', 'done')
+ self.assertEquals('c2', c1._get_user_option('one'))
+
+ def test_writes_are_serialized(self):
+ c1 = self.config
+ c2 = self.get_existing_config()
+
+ # We spawn a thread that will pause *during* the write
+ before_writing = threading.Event()
+ after_writing = threading.Event()
+ writing_done = threading.Event()
+ c1_orig = c1._write_config_file
+ def c1_write_config_file():
+ before_writing.set()
+ c1_orig()
+ # The lock is held. We wait for the main thread to decide when to
+ # continue
+ after_writing.wait()
+ c1._write_config_file = c1_write_config_file
+ def c1_set_option():
+ c1.set_user_option('one', 'c1')
+ writing_done.set()
+ t1 = threading.Thread(target=c1_set_option)
+ # Collect the thread after the test
+ self.addCleanup(t1.join)
+ # Be ready to unblock the thread if the test goes wrong
+ self.addCleanup(after_writing.set)
+ t1.start()
+ before_writing.wait()
+ self.assertTrue(c1._lock.is_held)
+ self.assertRaises(errors.LockContention,
+ c2.set_user_option, 'one', 'c2')
+ self.assertEquals('c1', c1.get_user_option('one'))
+ # Let the lock be released
+ after_writing.set()
+ writing_done.wait()
+ c2.set_user_option('one', 'c2')
+ self.assertEquals('c2', c2.get_user_option('one'))
+
+ def test_read_while_writing(self):
+ c1 = self.config
+ # We spawn a thread that will pause *during* the write
+ ready_to_write = threading.Event()
+ do_writing = threading.Event()
+ writing_done = threading.Event()
+ c1_orig = c1._write_config_file
+ def c1_write_config_file():
+ ready_to_write.set()
+ # The lock is held. We wait for the main thread to decide when to
+ # continue
+ do_writing.wait()
+ c1_orig()
+ writing_done.set()
+ c1._write_config_file = c1_write_config_file
+ def c1_set_option():
+ c1.set_user_option('one', 'c1')
+ t1 = threading.Thread(target=c1_set_option)
+ # Collect the thread after the test
+ self.addCleanup(t1.join)
+ # Be ready to unblock the thread if the test goes wrong
+ self.addCleanup(do_writing.set)
+ t1.start()
+ # Ensure the thread is ready to write
+ ready_to_write.wait()
+ self.assertTrue(c1._lock.is_held)
+ self.assertEquals('c1', c1.get_user_option('one'))
+ # If we read during the write, we get the old value
+ c2 = self.get_existing_config()
+ self.assertEquals('1', c2.get_user_option('one'))
+ # Let the writing occur and ensure it occurred
+ do_writing.set()
+ writing_done.wait()
+ # Now we get the updated value
+ c3 = self.get_existing_config()
+ self.assertEquals('c1', c3.get_user_option('one'))
+
+
+class TestGetUserOptionAs(TestIniConfig):
+
+ def test_get_user_option_as_bool(self):
+ conf, parser = self.make_config_parser("""
+a_true_bool = true
+a_false_bool = 0
+an_invalid_bool = maybe
+a_list = hmm, who knows ? # This is interpreted as a list !
+""")
+ get_bool = conf.get_user_option_as_bool
+ self.assertEqual(True, get_bool('a_true_bool'))
+ self.assertEqual(False, get_bool('a_false_bool'))
+ warnings = []
+ def warning(*args):
+ warnings.append(args[0] % args[1:])
+ self.overrideAttr(trace, 'warning', warning)
+ msg = 'Value "%s" is not a boolean for "%s"'
+ self.assertIs(None, get_bool('an_invalid_bool'))
+ self.assertEquals(msg % ('maybe', 'an_invalid_bool'), warnings[0])
+ warnings = []
+ self.assertIs(None, get_bool('not_defined_in_this_config'))
+ self.assertEquals([], warnings)
+
+ def test_get_user_option_as_list(self):
+ conf, parser = self.make_config_parser("""
+a_list = a,b,c
+length_1 = 1,
+one_item = x
+""")
+ get_list = conf.get_user_option_as_list
+ self.assertEqual(['a', 'b', 'c'], get_list('a_list'))
+ self.assertEqual(['1'], get_list('length_1'))
+ self.assertEqual('x', conf.get_user_option('one_item'))
+ # automatically cast to list
+ self.assertEqual(['x'], get_list('one_item'))
+
+ def test_get_user_option_as_int_from_SI(self):
+ conf, parser = self.make_config_parser("""
+plain = 100
+si_k = 5k,
+si_kb = 5kb,
+si_m = 5M,
+si_mb = 5MB,
+si_g = 5g,
+si_gb = 5gB,
+""")
+ def get_si(s, default=None):
+ return self.applyDeprecated(
+ deprecated_in((2, 5, 0)),
+ conf.get_user_option_as_int_from_SI, s, default)
+ self.assertEqual(100, get_si('plain'))
+ self.assertEqual(5000, get_si('si_k'))
+ self.assertEqual(5000, get_si('si_kb'))
+ self.assertEqual(5000000, get_si('si_m'))
+ self.assertEqual(5000000, get_si('si_mb'))
+ self.assertEqual(5000000000, get_si('si_g'))
+ self.assertEqual(5000000000, get_si('si_gb'))
+ self.assertEqual(None, get_si('non-exist'))
+ self.assertEqual(42, get_si('non-exist-with-default', 42))
+
+
+class TestSupressWarning(TestIniConfig):
+
+ def make_warnings_config(self, s):
+ conf, parser = self.make_config_parser(s)
+ return conf.suppress_warning
+
+ def test_suppress_warning_unknown(self):
+ suppress_warning = self.make_warnings_config('')
+ self.assertEqual(False, suppress_warning('unknown_warning'))
+
+ def test_suppress_warning_known(self):
+ suppress_warning = self.make_warnings_config('suppress_warnings=a,b')
+ self.assertEqual(False, suppress_warning('c'))
+ self.assertEqual(True, suppress_warning('a'))
+ self.assertEqual(True, suppress_warning('b'))
+
+
+class TestGetConfig(tests.TestCase):
+
+ def test_constructs(self):
+ my_config = config.GlobalConfig()
+
+ def test_calls_read_filenames(self):
+ # replace the class that is constructed, to check its parameters
+ oldparserclass = config.ConfigObj
+ config.ConfigObj = InstrumentedConfigObj
+ my_config = config.GlobalConfig()
+ try:
+ parser = my_config._get_parser()
+ finally:
+ config.ConfigObj = oldparserclass
+ self.assertIsInstance(parser, InstrumentedConfigObj)
+ self.assertEqual(parser._calls, [('__init__', config.config_filename(),
+ 'utf-8')])
+
+
+class TestBranchConfig(tests.TestCaseWithTransport):
+
+ def test_constructs(self):
+ branch = FakeBranch()
+ my_config = config.BranchConfig(branch)
+ self.assertRaises(TypeError, config.BranchConfig)
+
+ def test_get_location_config(self):
+ branch = FakeBranch()
+ my_config = config.BranchConfig(branch)
+ location_config = my_config._get_location_config()
+ self.assertEqual(branch.base, location_config.location)
+ self.assertIs(location_config, my_config._get_location_config())
+
+ def test_get_config(self):
+ """The Branch.get_config method works properly"""
+ b = controldir.ControlDir.create_standalone_workingtree('.').branch
+ my_config = b.get_config()
+ self.assertIs(my_config.get_user_option('wacky'), None)
+ my_config.set_user_option('wacky', 'unlikely')
+ self.assertEqual(my_config.get_user_option('wacky'), 'unlikely')
+
+ # Ensure we get the same thing if we start again
+ b2 = branch.Branch.open('.')
+ my_config2 = b2.get_config()
+ self.assertEqual(my_config2.get_user_option('wacky'), 'unlikely')
+
+ def test_has_explicit_nickname(self):
+ b = self.make_branch('.')
+ self.assertFalse(b.get_config().has_explicit_nickname())
+ b.nick = 'foo'
+ self.assertTrue(b.get_config().has_explicit_nickname())
+
+ def test_config_url(self):
+ """The Branch.get_config will use section that uses a local url"""
+ branch = self.make_branch('branch')
+ self.assertEqual('branch', branch.nick)
+
+ local_url = urlutils.local_path_to_url('branch')
+ conf = config.LocationConfig.from_string(
+ '[%s]\nnickname = foobar' % (local_url,),
+ local_url, save=True)
+ self.assertEqual('foobar', branch.nick)
+
+ def test_config_local_path(self):
+ """The Branch.get_config will use a local system path"""
+ branch = self.make_branch('branch')
+ self.assertEqual('branch', branch.nick)
+
+ local_path = osutils.getcwd().encode('utf8')
+ conf = config.LocationConfig.from_string(
+ '[%s/branch]\nnickname = barry' % (local_path,),
+ 'branch', save=True)
+ self.assertEqual('barry', branch.nick)
+
+ def test_config_creates_local(self):
+ """Creating a new entry in config uses a local path."""
+ branch = self.make_branch('branch', format='knit')
+ branch.set_push_location('http://foobar')
+ local_path = osutils.getcwd().encode('utf8')
+ # Surprisingly ConfigObj doesn't create a trailing newline
+ self.check_file_contents(config.locations_config_filename(),
+ '[%s/branch]\n'
+ 'push_location = http://foobar\n'
+ 'push_location:policy = norecurse\n'
+ % (local_path,))
+
+ def test_autonick_urlencoded(self):
+ b = self.make_branch('!repo')
+ self.assertEqual('!repo', b.get_config().get_nickname())
+
+ def test_autonick_uses_branch_name(self):
+ b = self.make_branch('foo', name='bar')
+ self.assertEqual('bar', b.get_config().get_nickname())
+
+ def test_warn_if_masked(self):
+ warnings = []
+ def warning(*args):
+ warnings.append(args[0] % args[1:])
+ self.overrideAttr(trace, 'warning', warning)
+
+ def set_option(store, warn_masked=True):
+ warnings[:] = []
+ conf.set_user_option('example_option', repr(store), store=store,
+ warn_masked=warn_masked)
+ def assertWarning(warning):
+ if warning is None:
+ self.assertEqual(0, len(warnings))
+ else:
+ self.assertEqual(1, len(warnings))
+ self.assertEqual(warning, warnings[0])
+ branch = self.make_branch('.')
+ conf = branch.get_config()
+ set_option(config.STORE_GLOBAL)
+ assertWarning(None)
+ set_option(config.STORE_BRANCH)
+ assertWarning(None)
+ set_option(config.STORE_GLOBAL)
+ assertWarning('Value "4" is masked by "3" from branch.conf')
+ set_option(config.STORE_GLOBAL, warn_masked=False)
+ assertWarning(None)
+ set_option(config.STORE_LOCATION)
+ assertWarning(None)
+ set_option(config.STORE_BRANCH)
+ assertWarning('Value "3" is masked by "0" from locations.conf')
+ set_option(config.STORE_BRANCH, warn_masked=False)
+ assertWarning(None)
+
+
+class TestGlobalConfigItems(tests.TestCaseInTempDir):
+
+ def test_user_id(self):
+ my_config = config.GlobalConfig.from_string(sample_config_text)
+ self.assertEqual(u"Erik B\u00e5gfors <erik@bagfors.nu>",
+ my_config._get_user_id())
+
+ def test_absent_user_id(self):
+ my_config = config.GlobalConfig()
+ self.assertEqual(None, my_config._get_user_id())
+
+ def test_signatures_always(self):
+ my_config = config.GlobalConfig.from_string(sample_always_signatures)
+ self.assertEqual(config.CHECK_NEVER,
+ self.applyDeprecated(deprecated_in((2, 5, 0)),
+ my_config.signature_checking))
+ self.assertEqual(config.SIGN_ALWAYS,
+ self.applyDeprecated(deprecated_in((2, 5, 0)),
+ my_config.signing_policy))
+ self.assertEqual(True,
+ self.applyDeprecated(deprecated_in((2, 5, 0)),
+ my_config.signature_needed))
+
+ def test_signatures_if_possible(self):
+ my_config = config.GlobalConfig.from_string(sample_maybe_signatures)
+ self.assertEqual(config.CHECK_NEVER,
+ self.applyDeprecated(deprecated_in((2, 5, 0)),
+ my_config.signature_checking))
+ self.assertEqual(config.SIGN_WHEN_REQUIRED,
+ self.applyDeprecated(deprecated_in((2, 5, 0)),
+ my_config.signing_policy))
+ self.assertEqual(False, self.applyDeprecated(deprecated_in((2, 5, 0)),
+ my_config.signature_needed))
+
+ def test_signatures_ignore(self):
+ my_config = config.GlobalConfig.from_string(sample_ignore_signatures)
+ self.assertEqual(config.CHECK_ALWAYS,
+ self.applyDeprecated(deprecated_in((2, 5, 0)),
+ my_config.signature_checking))
+ self.assertEqual(config.SIGN_NEVER,
+ self.applyDeprecated(deprecated_in((2, 5, 0)),
+ my_config.signing_policy))
+ self.assertEqual(False, self.applyDeprecated(deprecated_in((2, 5, 0)),
+ my_config.signature_needed))
+
+ def _get_sample_config(self):
+ my_config = config.GlobalConfig.from_string(sample_config_text)
+ return my_config
+
+ def test_gpg_signing_command(self):
+ my_config = self._get_sample_config()
+ self.assertEqual("gnome-gpg",
+ self.applyDeprecated(
+ deprecated_in((2, 5, 0)), my_config.gpg_signing_command))
+ self.assertEqual(False, self.applyDeprecated(deprecated_in((2, 5, 0)),
+ my_config.signature_needed))
+
+ def test_gpg_signing_key(self):
+ my_config = self._get_sample_config()
+ self.assertEqual("DD4D5088",
+ self.applyDeprecated(deprecated_in((2, 5, 0)),
+ my_config.gpg_signing_key))
+
+ def _get_empty_config(self):
+ my_config = config.GlobalConfig()
+ return my_config
+
+ def test_gpg_signing_command_unset(self):
+ my_config = self._get_empty_config()
+ self.assertEqual("gpg",
+ self.applyDeprecated(
+ deprecated_in((2, 5, 0)), my_config.gpg_signing_command))
+
+ def test_get_user_option_default(self):
+ my_config = self._get_empty_config()
+ self.assertEqual(None, my_config.get_user_option('no_option'))
+
+ def test_get_user_option_global(self):
+ my_config = self._get_sample_config()
+ self.assertEqual("something",
+ my_config.get_user_option('user_global_option'))
+
+ def test_post_commit_default(self):
+ my_config = self._get_sample_config()
+ self.assertEqual(None,
+ self.applyDeprecated(deprecated_in((2, 5, 0)),
+ my_config.post_commit))
+
+ def test_configured_logformat(self):
+ my_config = self._get_sample_config()
+ self.assertEqual("short",
+ self.applyDeprecated(deprecated_in((2, 5, 0)),
+ my_config.log_format))
+
+ def test_configured_acceptable_keys(self):
+ my_config = self._get_sample_config()
+ self.assertEqual("amy",
+ self.applyDeprecated(deprecated_in((2, 5, 0)),
+ my_config.acceptable_keys))
+
+ def test_configured_validate_signatures_in_log(self):
+ my_config = self._get_sample_config()
+ self.assertEqual(True, my_config.validate_signatures_in_log())
+
+ def test_get_alias(self):
+ my_config = self._get_sample_config()
+ self.assertEqual('help', my_config.get_alias('h'))
+
+ def test_get_aliases(self):
+ my_config = self._get_sample_config()
+ aliases = my_config.get_aliases()
+ self.assertEqual(2, len(aliases))
+ sorted_keys = sorted(aliases)
+ self.assertEqual('help', aliases[sorted_keys[0]])
+ self.assertEqual(sample_long_alias, aliases[sorted_keys[1]])
+
+ def test_get_no_alias(self):
+ my_config = self._get_sample_config()
+ self.assertEqual(None, my_config.get_alias('foo'))
+
+ def test_get_long_alias(self):
+ my_config = self._get_sample_config()
+ self.assertEqual(sample_long_alias, my_config.get_alias('ll'))
+
+ def test_get_change_editor(self):
+ my_config = self._get_sample_config()
+ change_editor = my_config.get_change_editor('old', 'new')
+ self.assertIs(diff.DiffFromTool, change_editor.__class__)
+ self.assertEqual('vimdiff -of @new_path @old_path',
+ ' '.join(change_editor.command_template))
+
+ def test_get_no_change_editor(self):
+ my_config = self._get_empty_config()
+ change_editor = my_config.get_change_editor('old', 'new')
+ self.assertIs(None, change_editor)
+
+ def test_get_merge_tools(self):
+ conf = self._get_sample_config()
+ tools = conf.get_merge_tools()
+ self.log(repr(tools))
+ self.assertEqual(
+ {u'funkytool' : u'funkytool "arg with spaces" {this_temp}',
+ u'sometool' : u'sometool {base} {this} {other} -o {result}',
+ u'newtool' : u'"newtool with spaces" {this_temp}'},
+ tools)
+
+ def test_get_merge_tools_empty(self):
+ conf = self._get_empty_config()
+ tools = conf.get_merge_tools()
+ self.assertEqual({}, tools)
+
+ def test_find_merge_tool(self):
+ conf = self._get_sample_config()
+ cmdline = conf.find_merge_tool('sometool')
+ self.assertEqual('sometool {base} {this} {other} -o {result}', cmdline)
+
+ def test_find_merge_tool_not_found(self):
+ conf = self._get_sample_config()
+ cmdline = conf.find_merge_tool('DOES NOT EXIST')
+ self.assertIs(cmdline, None)
+
+ def test_find_merge_tool_known(self):
+ conf = self._get_empty_config()
+ cmdline = conf.find_merge_tool('kdiff3')
+ self.assertEquals('kdiff3 {base} {this} {other} -o {result}', cmdline)
+
+ def test_find_merge_tool_override_known(self):
+ conf = self._get_empty_config()
+ conf.set_user_option('bzr.mergetool.kdiff3', 'kdiff3 blah')
+ cmdline = conf.find_merge_tool('kdiff3')
+ self.assertEqual('kdiff3 blah', cmdline)
+
+
+class TestGlobalConfigSavingOptions(tests.TestCaseInTempDir):
+
+ def test_empty(self):
+ my_config = config.GlobalConfig()
+ self.assertEqual(0, len(my_config.get_aliases()))
+
+ def test_set_alias(self):
+ my_config = config.GlobalConfig()
+ alias_value = 'commit --strict'
+ my_config.set_alias('commit', alias_value)
+ new_config = config.GlobalConfig()
+ self.assertEqual(alias_value, new_config.get_alias('commit'))
+
+ def test_remove_alias(self):
+ my_config = config.GlobalConfig()
+ my_config.set_alias('commit', 'commit --strict')
+ # Now remove the alias again.
+ my_config.unset_alias('commit')
+ new_config = config.GlobalConfig()
+ self.assertIs(None, new_config.get_alias('commit'))
+
+
+class TestLocationConfig(tests.TestCaseInTempDir, TestOptionsMixin):
+
+ def test_constructs(self):
+ my_config = config.LocationConfig('http://example.com')
+ self.assertRaises(TypeError, config.LocationConfig)
+
+ def test_branch_calls_read_filenames(self):
+ # This is testing the correct file names are provided.
+ # TODO: consolidate with the test for GlobalConfigs filename checks.
+ #
+ # replace the class that is constructed, to check its parameters
+ oldparserclass = config.ConfigObj
+ config.ConfigObj = InstrumentedConfigObj
+ try:
+ my_config = config.LocationConfig('http://www.example.com')
+ parser = my_config._get_parser()
+ finally:
+ config.ConfigObj = oldparserclass
+ self.assertIsInstance(parser, InstrumentedConfigObj)
+ self.assertEqual(parser._calls,
+ [('__init__', config.locations_config_filename(),
+ 'utf-8')])
+
+ def test_get_global_config(self):
+ my_config = config.BranchConfig(FakeBranch('http://example.com'))
+ global_config = my_config._get_global_config()
+ self.assertIsInstance(global_config, config.GlobalConfig)
+ self.assertIs(global_config, my_config._get_global_config())
+
+ def assertLocationMatching(self, expected):
+ self.assertEqual(expected,
+ list(self.my_location_config._get_matching_sections()))
+
+ def test__get_matching_sections_no_match(self):
+ self.get_branch_config('/')
+ self.assertLocationMatching([])
+
+ def test__get_matching_sections_exact(self):
+ self.get_branch_config('http://www.example.com')
+ self.assertLocationMatching([('http://www.example.com', '')])
+
+ def test__get_matching_sections_suffix_does_not(self):
+ self.get_branch_config('http://www.example.com-com')
+ self.assertLocationMatching([])
+
+ def test__get_matching_sections_subdir_recursive(self):
+ self.get_branch_config('http://www.example.com/com')
+ self.assertLocationMatching([('http://www.example.com', 'com')])
+
+ def test__get_matching_sections_ignoreparent(self):
+ self.get_branch_config('http://www.example.com/ignoreparent')
+ self.assertLocationMatching([('http://www.example.com/ignoreparent',
+ '')])
+
+ def test__get_matching_sections_ignoreparent_subdir(self):
+ self.get_branch_config(
+ 'http://www.example.com/ignoreparent/childbranch')
+ self.assertLocationMatching([('http://www.example.com/ignoreparent',
+ 'childbranch')])
+
+ def test__get_matching_sections_subdir_trailing_slash(self):
+ self.get_branch_config('/b')
+ self.assertLocationMatching([('/b/', '')])
+
+ def test__get_matching_sections_subdir_child(self):
+ self.get_branch_config('/a/foo')
+ self.assertLocationMatching([('/a/*', ''), ('/a/', 'foo')])
+
+ def test__get_matching_sections_subdir_child_child(self):
+ self.get_branch_config('/a/foo/bar')
+ self.assertLocationMatching([('/a/*', 'bar'), ('/a/', 'foo/bar')])
+
+ def test__get_matching_sections_trailing_slash_with_children(self):
+ self.get_branch_config('/a/')
+ self.assertLocationMatching([('/a/', '')])
+
+ def test__get_matching_sections_explicit_over_glob(self):
+ # XXX: 2006-09-08 jamesh
+ # This test only passes because ord('c') > ord('*'). If there
+ # was a config section for '/a/?', it would get precedence
+ # over '/a/c'.
+ self.get_branch_config('/a/c')
+ self.assertLocationMatching([('/a/c', ''), ('/a/*', ''), ('/a/', 'c')])
+
+ def test__get_option_policy_normal(self):
+ self.get_branch_config('http://www.example.com')
+ self.assertEqual(
+ self.my_location_config._get_config_policy(
+ 'http://www.example.com', 'normal_option'),
+ config.POLICY_NONE)
+
+ def test__get_option_policy_norecurse(self):
+ self.get_branch_config('http://www.example.com')
+ self.assertEqual(
+ self.my_location_config._get_option_policy(
+ 'http://www.example.com', 'norecurse_option'),
+ config.POLICY_NORECURSE)
+ # Test old recurse=False setting:
+ self.assertEqual(
+ self.my_location_config._get_option_policy(
+ 'http://www.example.com/norecurse', 'normal_option'),
+ config.POLICY_NORECURSE)
+
+ def test__get_option_policy_normal(self):
+ self.get_branch_config('http://www.example.com')
+ self.assertEqual(
+ self.my_location_config._get_option_policy(
+ 'http://www.example.com', 'appendpath_option'),
+ config.POLICY_APPENDPATH)
+
+ def test__get_options_with_policy(self):
+ self.get_branch_config('/dir/subdir',
+ location_config="""\
+[/dir]
+other_url = /other-dir
+other_url:policy = appendpath
+[/dir/subdir]
+other_url = /other-subdir
+""")
+ self.assertOptions(
+ [(u'other_url', u'/other-subdir', u'/dir/subdir', 'locations'),
+ (u'other_url', u'/other-dir', u'/dir', 'locations'),
+ (u'other_url:policy', u'appendpath', u'/dir', 'locations')],
+ self.my_location_config)
+
+ def test_location_without_username(self):
+ self.get_branch_config('http://www.example.com/ignoreparent')
+ self.assertEqual(u'Erik B\u00e5gfors <erik@bagfors.nu>',
+ self.my_config.username())
+
+ def test_location_not_listed(self):
+ """Test that the global username is used when no location matches"""
+ self.get_branch_config('/home/robertc/sources')
+ self.assertEqual(u'Erik B\u00e5gfors <erik@bagfors.nu>',
+ self.my_config.username())
+
+ def test_overriding_location(self):
+ self.get_branch_config('http://www.example.com/foo')
+ self.assertEqual('Robert Collins <robertc@example.org>',
+ self.my_config.username())
+
+ def test_signatures_not_set(self):
+ self.get_branch_config('http://www.example.com',
+ global_config=sample_ignore_signatures)
+ self.assertEqual(config.CHECK_ALWAYS,
+ self.applyDeprecated(deprecated_in((2, 5, 0)),
+ self.my_config.signature_checking))
+ self.assertEqual(config.SIGN_NEVER,
+ self.applyDeprecated(deprecated_in((2, 5, 0)),
+ self.my_config.signing_policy))
+
+ def test_signatures_never(self):
+ self.get_branch_config('/a/c')
+ self.assertEqual(config.CHECK_NEVER,
+ self.applyDeprecated(deprecated_in((2, 5, 0)),
+ self.my_config.signature_checking))
+
+ def test_signatures_when_available(self):
+ self.get_branch_config('/a/', global_config=sample_ignore_signatures)
+ self.assertEqual(config.CHECK_IF_POSSIBLE,
+ self.applyDeprecated(deprecated_in((2, 5, 0)),
+ self.my_config.signature_checking))
+
+ def test_signatures_always(self):
+ self.get_branch_config('/b')
+ self.assertEqual(config.CHECK_ALWAYS,
+ self.applyDeprecated(deprecated_in((2, 5, 0)),
+ self.my_config.signature_checking))
+
+ def test_gpg_signing_command(self):
+ self.get_branch_config('/b')
+ self.assertEqual("gnome-gpg",
+ self.applyDeprecated(deprecated_in((2, 5, 0)),
+ self.my_config.gpg_signing_command))
+
+ def test_gpg_signing_command_missing(self):
+ self.get_branch_config('/a')
+ self.assertEqual("false",
+ self.applyDeprecated(deprecated_in((2, 5, 0)),
+ self.my_config.gpg_signing_command))
+
+ def test_gpg_signing_key(self):
+ self.get_branch_config('/b')
+ self.assertEqual("DD4D5088", self.applyDeprecated(deprecated_in((2, 5, 0)),
+ self.my_config.gpg_signing_key))
+
+ def test_gpg_signing_key_default(self):
+ self.get_branch_config('/a')
+ self.assertEqual("erik@bagfors.nu",
+ self.applyDeprecated(deprecated_in((2, 5, 0)),
+ self.my_config.gpg_signing_key))
+
+ def test_get_user_option_global(self):
+ self.get_branch_config('/a')
+ self.assertEqual('something',
+ self.my_config.get_user_option('user_global_option'))
+
+ def test_get_user_option_local(self):
+ self.get_branch_config('/a')
+ self.assertEqual('local',
+ self.my_config.get_user_option('user_local_option'))
+
+ def test_get_user_option_appendpath(self):
+ # returned as is for the base path:
+ self.get_branch_config('http://www.example.com')
+ self.assertEqual('append',
+ self.my_config.get_user_option('appendpath_option'))
+ # Extra path components get appended:
+ self.get_branch_config('http://www.example.com/a/b/c')
+ self.assertEqual('append/a/b/c',
+ self.my_config.get_user_option('appendpath_option'))
+ # Overriden for http://www.example.com/dir, where it is a
+ # normal option:
+ self.get_branch_config('http://www.example.com/dir/a/b/c')
+ self.assertEqual('normal',
+ self.my_config.get_user_option('appendpath_option'))
+
+ def test_get_user_option_norecurse(self):
+ self.get_branch_config('http://www.example.com')
+ self.assertEqual('norecurse',
+ self.my_config.get_user_option('norecurse_option'))
+ self.get_branch_config('http://www.example.com/dir')
+ self.assertEqual(None,
+ self.my_config.get_user_option('norecurse_option'))
+ # http://www.example.com/norecurse is a recurse=False section
+ # that redefines normal_option. Subdirectories do not pick up
+ # this redefinition.
+ self.get_branch_config('http://www.example.com/norecurse')
+ self.assertEqual('norecurse',
+ self.my_config.get_user_option('normal_option'))
+ self.get_branch_config('http://www.example.com/norecurse/subdir')
+ self.assertEqual('normal',
+ self.my_config.get_user_option('normal_option'))
+
+ def test_set_user_option_norecurse(self):
+ self.get_branch_config('http://www.example.com')
+ self.my_config.set_user_option('foo', 'bar',
+ store=config.STORE_LOCATION_NORECURSE)
+ self.assertEqual(
+ self.my_location_config._get_option_policy(
+ 'http://www.example.com', 'foo'),
+ config.POLICY_NORECURSE)
+
+ def test_set_user_option_appendpath(self):
+ self.get_branch_config('http://www.example.com')
+ self.my_config.set_user_option('foo', 'bar',
+ store=config.STORE_LOCATION_APPENDPATH)
+ self.assertEqual(
+ self.my_location_config._get_option_policy(
+ 'http://www.example.com', 'foo'),
+ config.POLICY_APPENDPATH)
+
+ def test_set_user_option_change_policy(self):
+ self.get_branch_config('http://www.example.com')
+ self.my_config.set_user_option('norecurse_option', 'normal',
+ store=config.STORE_LOCATION)
+ self.assertEqual(
+ self.my_location_config._get_option_policy(
+ 'http://www.example.com', 'norecurse_option'),
+ config.POLICY_NONE)
+
+ def test_set_user_option_recurse_false_section(self):
+ # The following section has recurse=False set. The test is to
+ # make sure that a normal option can be added to the section,
+ # converting recurse=False to the norecurse policy.
+ self.get_branch_config('http://www.example.com/norecurse')
+ self.callDeprecated(['The recurse option is deprecated as of 0.14. '
+ 'The section "http://www.example.com/norecurse" '
+ 'has been converted to use policies.'],
+ self.my_config.set_user_option,
+ 'foo', 'bar', store=config.STORE_LOCATION)
+ self.assertEqual(
+ self.my_location_config._get_option_policy(
+ 'http://www.example.com/norecurse', 'foo'),
+ config.POLICY_NONE)
+ # The previously existing option is still norecurse:
+ self.assertEqual(
+ self.my_location_config._get_option_policy(
+ 'http://www.example.com/norecurse', 'normal_option'),
+ config.POLICY_NORECURSE)
+
+ def test_post_commit_default(self):
+ self.get_branch_config('/a/c')
+ self.assertEqual('bzrlib.tests.test_config.post_commit',
+ self.applyDeprecated(deprecated_in((2, 5, 0)),
+ self.my_config.post_commit))
+
+ def get_branch_config(self, location, global_config=None,
+ location_config=None):
+ my_branch = FakeBranch(location)
+ if global_config is None:
+ global_config = sample_config_text
+ if location_config is None:
+ location_config = sample_branches_text
+
+ my_global_config = config.GlobalConfig.from_string(global_config,
+ save=True)
+ my_location_config = config.LocationConfig.from_string(
+ location_config, my_branch.base, save=True)
+ my_config = config.BranchConfig(my_branch)
+ self.my_config = my_config
+ self.my_location_config = my_config._get_location_config()
+
+ def test_set_user_setting_sets_and_saves(self):
+ self.get_branch_config('/a/c')
+ record = InstrumentedConfigObj("foo")
+ self.my_location_config._parser = record
+
+ self.callDeprecated(['The recurse option is deprecated as of '
+ '0.14. The section "/a/c" has been '
+ 'converted to use policies.'],
+ self.my_config.set_user_option,
+ 'foo', 'bar', store=config.STORE_LOCATION)
+ self.assertEqual([('reload',),
+ ('__contains__', '/a/c'),
+ ('__contains__', '/a/c/'),
+ ('__setitem__', '/a/c', {}),
+ ('__getitem__', '/a/c'),
+ ('__setitem__', 'foo', 'bar'),
+ ('__getitem__', '/a/c'),
+ ('as_bool', 'recurse'),
+ ('__getitem__', '/a/c'),
+ ('__delitem__', 'recurse'),
+ ('__getitem__', '/a/c'),
+ ('keys',),
+ ('__getitem__', '/a/c'),
+ ('__contains__', 'foo:policy'),
+ ('write',)],
+ record._calls[1:])
+
+ def test_set_user_setting_sets_and_saves2(self):
+ self.get_branch_config('/a/c')
+ self.assertIs(self.my_config.get_user_option('foo'), None)
+ self.my_config.set_user_option('foo', 'bar')
+ self.assertEqual(
+ self.my_config.branch.control_files.files['branch.conf'].strip(),
+ 'foo = bar')
+ self.assertEqual(self.my_config.get_user_option('foo'), 'bar')
+ self.my_config.set_user_option('foo', 'baz',
+ store=config.STORE_LOCATION)
+ self.assertEqual(self.my_config.get_user_option('foo'), 'baz')
+ self.my_config.set_user_option('foo', 'qux')
+ self.assertEqual(self.my_config.get_user_option('foo'), 'baz')
+
+ def test_get_bzr_remote_path(self):
+ my_config = config.LocationConfig('/a/c')
+ self.assertEqual('bzr', my_config.get_bzr_remote_path())
+ my_config.set_user_option('bzr_remote_path', '/path-bzr')
+ self.assertEqual('/path-bzr', my_config.get_bzr_remote_path())
+ self.overrideEnv('BZR_REMOTE_PATH', '/environ-bzr')
+ self.assertEqual('/environ-bzr', my_config.get_bzr_remote_path())
+
+
+precedence_global = 'option = global'
+precedence_branch = 'option = branch'
+precedence_location = """
+[http://]
+recurse = true
+option = recurse
+[http://example.com/specific]
+option = exact
+"""
+
+class TestBranchConfigItems(tests.TestCaseInTempDir):
+
+ def get_branch_config(self, global_config=None, location=None,
+ location_config=None, branch_data_config=None):
+ my_branch = FakeBranch(location)
+ if global_config is not None:
+ my_global_config = config.GlobalConfig.from_string(global_config,
+ save=True)
+ if location_config is not None:
+ my_location_config = config.LocationConfig.from_string(
+ location_config, my_branch.base, save=True)
+ my_config = config.BranchConfig(my_branch)
+ if branch_data_config is not None:
+ my_config.branch.control_files.files['branch.conf'] = \
+ branch_data_config
+ return my_config
+
+ def test_user_id(self):
+ branch = FakeBranch()
+ my_config = config.BranchConfig(branch)
+ self.assertIsNot(None, my_config.username())
+ my_config.branch.control_files.files['email'] = "John"
+ my_config.set_user_option('email',
+ "Robert Collins <robertc@example.org>")
+ self.assertEqual("Robert Collins <robertc@example.org>",
+ my_config.username())
+
+ def test_BZR_EMAIL_OVERRIDES(self):
+ self.overrideEnv('BZR_EMAIL', "Robert Collins <robertc@example.org>")
+ branch = FakeBranch()
+ my_config = config.BranchConfig(branch)
+ self.assertEqual("Robert Collins <robertc@example.org>",
+ my_config.username())
+
+ def test_signatures_forced(self):
+ my_config = self.get_branch_config(
+ global_config=sample_always_signatures)
+ self.assertEqual(config.CHECK_NEVER,
+ self.applyDeprecated(deprecated_in((2, 5, 0)),
+ my_config.signature_checking))
+ self.assertEqual(config.SIGN_ALWAYS,
+ self.applyDeprecated(deprecated_in((2, 5, 0)),
+ my_config.signing_policy))
+ self.assertTrue(self.applyDeprecated(deprecated_in((2, 5, 0)),
+ my_config.signature_needed))
+
+ def test_signatures_forced_branch(self):
+ my_config = self.get_branch_config(
+ global_config=sample_ignore_signatures,
+ branch_data_config=sample_always_signatures)
+ self.assertEqual(config.CHECK_NEVER,
+ self.applyDeprecated(deprecated_in((2, 5, 0)),
+ my_config.signature_checking))
+ self.assertEqual(config.SIGN_ALWAYS,
+ self.applyDeprecated(deprecated_in((2, 5, 0)),
+ my_config.signing_policy))
+ self.assertTrue(self.applyDeprecated(deprecated_in((2, 5, 0)),
+ my_config.signature_needed))
+
+ def test_gpg_signing_command(self):
+ my_config = self.get_branch_config(
+ global_config=sample_config_text,
+ # branch data cannot set gpg_signing_command
+ branch_data_config="gpg_signing_command=pgp")
+ self.assertEqual('gnome-gpg',
+ self.applyDeprecated(deprecated_in((2, 5, 0)),
+ my_config.gpg_signing_command))
+
+ def test_get_user_option_global(self):
+ my_config = self.get_branch_config(global_config=sample_config_text)
+ self.assertEqual('something',
+ my_config.get_user_option('user_global_option'))
+
+ def test_post_commit_default(self):
+ my_config = self.get_branch_config(global_config=sample_config_text,
+ location='/a/c',
+ location_config=sample_branches_text)
+ self.assertEqual(my_config.branch.base, '/a/c')
+ self.assertEqual('bzrlib.tests.test_config.post_commit',
+ self.applyDeprecated(deprecated_in((2, 5, 0)),
+ my_config.post_commit))
+ my_config.set_user_option('post_commit', 'rmtree_root')
+ # post-commit is ignored when present in branch data
+ self.assertEqual('bzrlib.tests.test_config.post_commit',
+ self.applyDeprecated(deprecated_in((2, 5, 0)),
+ my_config.post_commit))
+ my_config.set_user_option('post_commit', 'rmtree_root',
+ store=config.STORE_LOCATION)
+ self.assertEqual('rmtree_root',
+ self.applyDeprecated(deprecated_in((2, 5, 0)),
+ my_config.post_commit))
+
+ def test_config_precedence(self):
+ # FIXME: eager test, luckily no persitent config file makes it fail
+ # -- vila 20100716
+ my_config = self.get_branch_config(global_config=precedence_global)
+ self.assertEqual(my_config.get_user_option('option'), 'global')
+ my_config = self.get_branch_config(global_config=precedence_global,
+ branch_data_config=precedence_branch)
+ self.assertEqual(my_config.get_user_option('option'), 'branch')
+ my_config = self.get_branch_config(
+ global_config=precedence_global,
+ branch_data_config=precedence_branch,
+ location_config=precedence_location)
+ self.assertEqual(my_config.get_user_option('option'), 'recurse')
+ my_config = self.get_branch_config(
+ global_config=precedence_global,
+ branch_data_config=precedence_branch,
+ location_config=precedence_location,
+ location='http://example.com/specific')
+ self.assertEqual(my_config.get_user_option('option'), 'exact')
+
+
+class TestMailAddressExtraction(tests.TestCase):
+
+ def test_extract_email_address(self):
+ self.assertEqual('jane@test.com',
+ config.extract_email_address('Jane <jane@test.com>'))
+ self.assertRaises(errors.NoEmailInUsername,
+ config.extract_email_address, 'Jane Tester')
+
+ def test_parse_username(self):
+ self.assertEqual(('', 'jdoe@example.com'),
+ config.parse_username('jdoe@example.com'))
+ self.assertEqual(('', 'jdoe@example.com'),
+ config.parse_username('<jdoe@example.com>'))
+ self.assertEqual(('John Doe', 'jdoe@example.com'),
+ config.parse_username('John Doe <jdoe@example.com>'))
+ self.assertEqual(('John Doe', ''),
+ config.parse_username('John Doe'))
+ self.assertEqual(('John Doe', 'jdoe@example.com'),
+ config.parse_username('John Doe jdoe@example.com'))
+
+class TestTreeConfig(tests.TestCaseWithTransport):
+
+ def test_get_value(self):
+ """Test that retreiving a value from a section is possible"""
+ branch = self.make_branch('.')
+ tree_config = config.TreeConfig(branch)
+ tree_config.set_option('value', 'key', 'SECTION')
+ tree_config.set_option('value2', 'key2')
+ tree_config.set_option('value3-top', 'key3')
+ tree_config.set_option('value3-section', 'key3', 'SECTION')
+ value = tree_config.get_option('key', 'SECTION')
+ self.assertEqual(value, 'value')
+ value = tree_config.get_option('key2')
+ self.assertEqual(value, 'value2')
+ self.assertEqual(tree_config.get_option('non-existant'), None)
+ value = tree_config.get_option('non-existant', 'SECTION')
+ self.assertEqual(value, None)
+ value = tree_config.get_option('non-existant', default='default')
+ self.assertEqual(value, 'default')
+ self.assertEqual(tree_config.get_option('key2', 'NOSECTION'), None)
+ value = tree_config.get_option('key2', 'NOSECTION', default='default')
+ self.assertEqual(value, 'default')
+ value = tree_config.get_option('key3')
+ self.assertEqual(value, 'value3-top')
+ value = tree_config.get_option('key3', 'SECTION')
+ self.assertEqual(value, 'value3-section')
+
+
+class TestTransportConfig(tests.TestCaseWithTransport):
+
+ def test_load_utf8(self):
+ """Ensure we can load an utf8-encoded file."""
+ t = self.get_transport()
+ unicode_user = u'b\N{Euro Sign}ar'
+ unicode_content = u'user=%s' % (unicode_user,)
+ utf8_content = unicode_content.encode('utf8')
+ # Store the raw content in the config file
+ t.put_bytes('foo.conf', utf8_content)
+ conf = config.TransportConfig(t, 'foo.conf')
+ self.assertEquals(unicode_user, conf.get_option('user'))
+
+ def test_load_non_ascii(self):
+ """Ensure we display a proper error on non-ascii, non utf-8 content."""
+ t = self.get_transport()
+ t.put_bytes('foo.conf', 'user=foo\n#\xff\n')
+ conf = config.TransportConfig(t, 'foo.conf')
+ self.assertRaises(errors.ConfigContentError, conf._get_configobj)
+
+ def test_load_erroneous_content(self):
+ """Ensure we display a proper error on content that can't be parsed."""
+ t = self.get_transport()
+ t.put_bytes('foo.conf', '[open_section\n')
+ conf = config.TransportConfig(t, 'foo.conf')
+ self.assertRaises(errors.ParseConfigError, conf._get_configobj)
+
+ def test_load_permission_denied(self):
+ """Ensure we get an empty config file if the file is inaccessible."""
+ warnings = []
+ def warning(*args):
+ warnings.append(args[0] % args[1:])
+ self.overrideAttr(trace, 'warning', warning)
+
+ class DenyingTransport(object):
+
+ def __init__(self, base):
+ self.base = base
+
+ def get_bytes(self, relpath):
+ raise errors.PermissionDenied(relpath, "")
+
+ cfg = config.TransportConfig(
+ DenyingTransport("nonexisting://"), 'control.conf')
+ self.assertIs(None, cfg.get_option('non-existant', 'SECTION'))
+ self.assertEquals(
+ warnings,
+ [u'Permission denied while trying to open configuration file '
+ u'nonexisting:///control.conf.'])
+
+ def test_get_value(self):
+ """Test that retreiving a value from a section is possible"""
+ bzrdir_config = config.TransportConfig(self.get_transport('.'),
+ 'control.conf')
+ bzrdir_config.set_option('value', 'key', 'SECTION')
+ bzrdir_config.set_option('value2', 'key2')
+ bzrdir_config.set_option('value3-top', 'key3')
+ bzrdir_config.set_option('value3-section', 'key3', 'SECTION')
+ value = bzrdir_config.get_option('key', 'SECTION')
+ self.assertEqual(value, 'value')
+ value = bzrdir_config.get_option('key2')
+ self.assertEqual(value, 'value2')
+ self.assertEqual(bzrdir_config.get_option('non-existant'), None)
+ value = bzrdir_config.get_option('non-existant', 'SECTION')
+ self.assertEqual(value, None)
+ value = bzrdir_config.get_option('non-existant', default='default')
+ self.assertEqual(value, 'default')
+ self.assertEqual(bzrdir_config.get_option('key2', 'NOSECTION'), None)
+ value = bzrdir_config.get_option('key2', 'NOSECTION',
+ default='default')
+ self.assertEqual(value, 'default')
+ value = bzrdir_config.get_option('key3')
+ self.assertEqual(value, 'value3-top')
+ value = bzrdir_config.get_option('key3', 'SECTION')
+ self.assertEqual(value, 'value3-section')
+
+ def test_set_unset_default_stack_on(self):
+ my_dir = self.make_bzrdir('.')
+ bzrdir_config = config.BzrDirConfig(my_dir)
+ self.assertIs(None, bzrdir_config.get_default_stack_on())
+ bzrdir_config.set_default_stack_on('Foo')
+ self.assertEqual('Foo', bzrdir_config._config.get_option(
+ 'default_stack_on'))
+ self.assertEqual('Foo', bzrdir_config.get_default_stack_on())
+ bzrdir_config.set_default_stack_on(None)
+ self.assertIs(None, bzrdir_config.get_default_stack_on())
+
+
+class TestOldConfigHooks(tests.TestCaseWithTransport):
+
+ def setUp(self):
+ super(TestOldConfigHooks, self).setUp()
+ create_configs_with_file_option(self)
+
+ def assertGetHook(self, conf, name, value):
+ calls = []
+ def hook(*args):
+ calls.append(args)
+ config.OldConfigHooks.install_named_hook('get', hook, None)
+ self.addCleanup(
+ config.OldConfigHooks.uninstall_named_hook, 'get', None)
+ self.assertLength(0, calls)
+ actual_value = conf.get_user_option(name)
+ self.assertEquals(value, actual_value)
+ self.assertLength(1, calls)
+ self.assertEquals((conf, name, value), calls[0])
+
+ def test_get_hook_bazaar(self):
+ self.assertGetHook(self.bazaar_config, 'file', 'bazaar')
+
+ def test_get_hook_locations(self):
+ self.assertGetHook(self.locations_config, 'file', 'locations')
+
+ def test_get_hook_branch(self):
+ # Since locations masks branch, we define a different option
+ self.branch_config.set_user_option('file2', 'branch')
+ self.assertGetHook(self.branch_config, 'file2', 'branch')
+
+ def assertSetHook(self, conf, name, value):
+ calls = []
+ def hook(*args):
+ calls.append(args)
+ config.OldConfigHooks.install_named_hook('set', hook, None)
+ self.addCleanup(
+ config.OldConfigHooks.uninstall_named_hook, 'set', None)
+ self.assertLength(0, calls)
+ conf.set_user_option(name, value)
+ self.assertLength(1, calls)
+ # We can't assert the conf object below as different configs use
+ # different means to implement set_user_option and we care only about
+ # coverage here.
+ self.assertEquals((name, value), calls[0][1:])
+
+ def test_set_hook_bazaar(self):
+ self.assertSetHook(self.bazaar_config, 'foo', 'bazaar')
+
+ def test_set_hook_locations(self):
+ self.assertSetHook(self.locations_config, 'foo', 'locations')
+
+ def test_set_hook_branch(self):
+ self.assertSetHook(self.branch_config, 'foo', 'branch')
+
+ def assertRemoveHook(self, conf, name, section_name=None):
+ calls = []
+ def hook(*args):
+ calls.append(args)
+ config.OldConfigHooks.install_named_hook('remove', hook, None)
+ self.addCleanup(
+ config.OldConfigHooks.uninstall_named_hook, 'remove', None)
+ self.assertLength(0, calls)
+ conf.remove_user_option(name, section_name)
+ self.assertLength(1, calls)
+ # We can't assert the conf object below as different configs use
+ # different means to implement remove_user_option and we care only about
+ # coverage here.
+ self.assertEquals((name,), calls[0][1:])
+
+ def test_remove_hook_bazaar(self):
+ self.assertRemoveHook(self.bazaar_config, 'file')
+
+ def test_remove_hook_locations(self):
+ self.assertRemoveHook(self.locations_config, 'file',
+ self.locations_config.location)
+
+ def test_remove_hook_branch(self):
+ self.assertRemoveHook(self.branch_config, 'file')
+
+ def assertLoadHook(self, name, conf_class, *conf_args):
+ calls = []
+ def hook(*args):
+ calls.append(args)
+ config.OldConfigHooks.install_named_hook('load', hook, None)
+ self.addCleanup(
+ config.OldConfigHooks.uninstall_named_hook, 'load', None)
+ self.assertLength(0, calls)
+ # Build a config
+ conf = conf_class(*conf_args)
+ # Access an option to trigger a load
+ conf.get_user_option(name)
+ self.assertLength(1, calls)
+ # Since we can't assert about conf, we just use the number of calls ;-/
+
+ def test_load_hook_bazaar(self):
+ self.assertLoadHook('file', config.GlobalConfig)
+
+ def test_load_hook_locations(self):
+ self.assertLoadHook('file', config.LocationConfig, self.tree.basedir)
+
+ def test_load_hook_branch(self):
+ self.assertLoadHook('file', config.BranchConfig, self.tree.branch)
+
+ def assertSaveHook(self, conf):
+ calls = []
+ def hook(*args):
+ calls.append(args)
+ config.OldConfigHooks.install_named_hook('save', hook, None)
+ self.addCleanup(
+ config.OldConfigHooks.uninstall_named_hook, 'save', None)
+ self.assertLength(0, calls)
+ # Setting an option triggers a save
+ conf.set_user_option('foo', 'bar')
+ self.assertLength(1, calls)
+ # Since we can't assert about conf, we just use the number of calls ;-/
+
+ def test_save_hook_bazaar(self):
+ self.assertSaveHook(self.bazaar_config)
+
+ def test_save_hook_locations(self):
+ self.assertSaveHook(self.locations_config)
+
+ def test_save_hook_branch(self):
+ self.assertSaveHook(self.branch_config)
+
+
+class TestOldConfigHooksForRemote(tests.TestCaseWithTransport):
+ """Tests config hooks for remote configs.
+
+ No tests for the remove hook as this is not implemented there.
+ """
+
+ def setUp(self):
+ super(TestOldConfigHooksForRemote, self).setUp()
+ self.transport_server = test_server.SmartTCPServer_for_testing
+ create_configs_with_file_option(self)
+
+ def assertGetHook(self, conf, name, value):
+ calls = []
+ def hook(*args):
+ calls.append(args)
+ config.OldConfigHooks.install_named_hook('get', hook, None)
+ self.addCleanup(
+ config.OldConfigHooks.uninstall_named_hook, 'get', None)
+ self.assertLength(0, calls)
+ actual_value = conf.get_option(name)
+ self.assertEquals(value, actual_value)
+ self.assertLength(1, calls)
+ self.assertEquals((conf, name, value), calls[0])
+
+ def test_get_hook_remote_branch(self):
+ remote_branch = branch.Branch.open(self.get_url('tree'))
+ self.assertGetHook(remote_branch._get_config(), 'file', 'branch')
+
+ def test_get_hook_remote_bzrdir(self):
+ remote_bzrdir = controldir.ControlDir.open(self.get_url('tree'))
+ conf = remote_bzrdir._get_config()
+ conf.set_option('remotedir', 'file')
+ self.assertGetHook(conf, 'file', 'remotedir')
+
+ def assertSetHook(self, conf, name, value):
+ calls = []
+ def hook(*args):
+ calls.append(args)
+ config.OldConfigHooks.install_named_hook('set', hook, None)
+ self.addCleanup(
+ config.OldConfigHooks.uninstall_named_hook, 'set', None)
+ self.assertLength(0, calls)
+ conf.set_option(value, name)
+ self.assertLength(1, calls)
+ # We can't assert the conf object below as different configs use
+ # different means to implement set_user_option and we care only about
+ # coverage here.
+ self.assertEquals((name, value), calls[0][1:])
+
+ def test_set_hook_remote_branch(self):
+ remote_branch = branch.Branch.open(self.get_url('tree'))
+ self.addCleanup(remote_branch.lock_write().unlock)
+ self.assertSetHook(remote_branch._get_config(), 'file', 'remote')
+
+ def test_set_hook_remote_bzrdir(self):
+ remote_branch = branch.Branch.open(self.get_url('tree'))
+ self.addCleanup(remote_branch.lock_write().unlock)
+ remote_bzrdir = controldir.ControlDir.open(self.get_url('tree'))
+ self.assertSetHook(remote_bzrdir._get_config(), 'file', 'remotedir')
+
+ def assertLoadHook(self, expected_nb_calls, name, conf_class, *conf_args):
+ calls = []
+ def hook(*args):
+ calls.append(args)
+ config.OldConfigHooks.install_named_hook('load', hook, None)
+ self.addCleanup(
+ config.OldConfigHooks.uninstall_named_hook, 'load', None)
+ self.assertLength(0, calls)
+ # Build a config
+ conf = conf_class(*conf_args)
+ # Access an option to trigger a load
+ conf.get_option(name)
+ self.assertLength(expected_nb_calls, calls)
+ # Since we can't assert about conf, we just use the number of calls ;-/
+
+ def test_load_hook_remote_branch(self):
+ remote_branch = branch.Branch.open(self.get_url('tree'))
+ self.assertLoadHook(1, 'file', remote.RemoteBranchConfig, remote_branch)
+
+ def test_load_hook_remote_bzrdir(self):
+ remote_bzrdir = controldir.ControlDir.open(self.get_url('tree'))
+ # The config file doesn't exist, set an option to force its creation
+ conf = remote_bzrdir._get_config()
+ conf.set_option('remotedir', 'file')
+ # We get one call for the server and one call for the client, this is
+ # caused by the differences in implementations betwen
+ # SmartServerBzrDirRequestConfigFile (in smart/bzrdir.py) and
+ # SmartServerBranchGetConfigFile (in smart/branch.py)
+ self.assertLoadHook(2 ,'file', remote.RemoteBzrDirConfig, remote_bzrdir)
+
+ def assertSaveHook(self, conf):
+ calls = []
+ def hook(*args):
+ calls.append(args)
+ config.OldConfigHooks.install_named_hook('save', hook, None)
+ self.addCleanup(
+ config.OldConfigHooks.uninstall_named_hook, 'save', None)
+ self.assertLength(0, calls)
+ # Setting an option triggers a save
+ conf.set_option('foo', 'bar')
+ self.assertLength(1, calls)
+ # Since we can't assert about conf, we just use the number of calls ;-/
+
+ def test_save_hook_remote_branch(self):
+ remote_branch = branch.Branch.open(self.get_url('tree'))
+ self.addCleanup(remote_branch.lock_write().unlock)
+ self.assertSaveHook(remote_branch._get_config())
+
+ def test_save_hook_remote_bzrdir(self):
+ remote_branch = branch.Branch.open(self.get_url('tree'))
+ self.addCleanup(remote_branch.lock_write().unlock)
+ remote_bzrdir = controldir.ControlDir.open(self.get_url('tree'))
+ self.assertSaveHook(remote_bzrdir._get_config())
+
+
+class TestOption(tests.TestCase):
+
+ def test_default_value(self):
+ opt = config.Option('foo', default='bar')
+ self.assertEquals('bar', opt.get_default())
+
+ def test_callable_default_value(self):
+ def bar_as_unicode():
+ return u'bar'
+ opt = config.Option('foo', default=bar_as_unicode)
+ self.assertEquals('bar', opt.get_default())
+
+ def test_default_value_from_env(self):
+ opt = config.Option('foo', default='bar', default_from_env=['FOO'])
+ self.overrideEnv('FOO', 'quux')
+ # Env variable provides a default taking over the option one
+ self.assertEquals('quux', opt.get_default())
+
+ def test_first_default_value_from_env_wins(self):
+ opt = config.Option('foo', default='bar',
+ default_from_env=['NO_VALUE', 'FOO', 'BAZ'])
+ self.overrideEnv('FOO', 'foo')
+ self.overrideEnv('BAZ', 'baz')
+ # The first env var set wins
+ self.assertEquals('foo', opt.get_default())
+
+ def test_not_supported_list_default_value(self):
+ self.assertRaises(AssertionError, config.Option, 'foo', default=[1])
+
+ def test_not_supported_object_default_value(self):
+ self.assertRaises(AssertionError, config.Option, 'foo',
+ default=object())
+
+ def test_not_supported_callable_default_value_not_unicode(self):
+ def bar_not_unicode():
+ return 'bar'
+ opt = config.Option('foo', default=bar_not_unicode)
+ self.assertRaises(AssertionError, opt.get_default)
+
+ def test_get_help_topic(self):
+ opt = config.Option('foo')
+ self.assertEquals('foo', opt.get_help_topic())
+
+
+class TestOptionConverterMixin(object):
+
+ def assertConverted(self, expected, opt, value):
+ self.assertEquals(expected, opt.convert_from_unicode(None, value))
+
+ def assertWarns(self, opt, value):
+ warnings = []
+ def warning(*args):
+ warnings.append(args[0] % args[1:])
+ self.overrideAttr(trace, 'warning', warning)
+ self.assertEquals(None, opt.convert_from_unicode(None, value))
+ self.assertLength(1, warnings)
+ self.assertEquals(
+ 'Value "%s" is not valid for "%s"' % (value, opt.name),
+ warnings[0])
+
+ def assertErrors(self, opt, value):
+ self.assertRaises(errors.ConfigOptionValueError,
+ opt.convert_from_unicode, None, value)
+
+ def assertConvertInvalid(self, opt, invalid_value):
+ opt.invalid = None
+ self.assertEquals(None, opt.convert_from_unicode(None, invalid_value))
+ opt.invalid = 'warning'
+ self.assertWarns(opt, invalid_value)
+ opt.invalid = 'error'
+ self.assertErrors(opt, invalid_value)
+
+
+class TestOptionWithBooleanConverter(tests.TestCase, TestOptionConverterMixin):
+
+ def get_option(self):
+ return config.Option('foo', help='A boolean.',
+ from_unicode=config.bool_from_store)
+
+ def test_convert_invalid(self):
+ opt = self.get_option()
+ # A string that is not recognized as a boolean
+ self.assertConvertInvalid(opt, u'invalid-boolean')
+ # A list of strings is never recognized as a boolean
+ self.assertConvertInvalid(opt, [u'not', u'a', u'boolean'])
+
+ def test_convert_valid(self):
+ opt = self.get_option()
+ self.assertConverted(True, opt, u'True')
+ self.assertConverted(True, opt, u'1')
+ self.assertConverted(False, opt, u'False')
+
+
+class TestOptionWithIntegerConverter(tests.TestCase, TestOptionConverterMixin):
+
+ def get_option(self):
+ return config.Option('foo', help='An integer.',
+ from_unicode=config.int_from_store)
+
+ def test_convert_invalid(self):
+ opt = self.get_option()
+ # A string that is not recognized as an integer
+ self.assertConvertInvalid(opt, u'forty-two')
+ # A list of strings is never recognized as an integer
+ self.assertConvertInvalid(opt, [u'a', u'list'])
+
+ def test_convert_valid(self):
+ opt = self.get_option()
+ self.assertConverted(16, opt, u'16')
+
+
+class TestOptionWithSIUnitConverter(tests.TestCase, TestOptionConverterMixin):
+
+ def get_option(self):
+ return config.Option('foo', help='An integer in SI units.',
+ from_unicode=config.int_SI_from_store)
+
+ def test_convert_invalid(self):
+ opt = self.get_option()
+ self.assertConvertInvalid(opt, u'not-a-unit')
+ self.assertConvertInvalid(opt, u'Gb') # Forgot the int
+ self.assertConvertInvalid(opt, u'1b') # Forgot the unit
+ self.assertConvertInvalid(opt, u'1GG')
+ self.assertConvertInvalid(opt, u'1Mbb')
+ self.assertConvertInvalid(opt, u'1MM')
+
+ def test_convert_valid(self):
+ opt = self.get_option()
+ self.assertConverted(int(5e3), opt, u'5kb')
+ self.assertConverted(int(5e6), opt, u'5M')
+ self.assertConverted(int(5e6), opt, u'5MB')
+ self.assertConverted(int(5e9), opt, u'5g')
+ self.assertConverted(int(5e9), opt, u'5gB')
+ self.assertConverted(100, opt, u'100')
+
+
+class TestListOption(tests.TestCase, TestOptionConverterMixin):
+
+ def get_option(self):
+ return config.ListOption('foo', help='A list.')
+
+ def test_convert_invalid(self):
+ opt = self.get_option()
+ # We don't even try to convert a list into a list, we only expect
+ # strings
+ self.assertConvertInvalid(opt, [1])
+ # No string is invalid as all forms can be converted to a list
+
+ def test_convert_valid(self):
+ opt = self.get_option()
+ # An empty string is an empty list
+ self.assertConverted([], opt, '') # Using a bare str() just in case
+ self.assertConverted([], opt, u'')
+ # A boolean
+ self.assertConverted([u'True'], opt, u'True')
+ # An integer
+ self.assertConverted([u'42'], opt, u'42')
+ # A single string
+ self.assertConverted([u'bar'], opt, u'bar')
+
+
+class TestRegistryOption(tests.TestCase, TestOptionConverterMixin):
+
+ def get_option(self, registry):
+ return config.RegistryOption('foo', registry,
+ help='A registry option.')
+
+ def test_convert_invalid(self):
+ registry = _mod_registry.Registry()
+ opt = self.get_option(registry)
+ self.assertConvertInvalid(opt, [1])
+ self.assertConvertInvalid(opt, u"notregistered")
+
+ def test_convert_valid(self):
+ registry = _mod_registry.Registry()
+ registry.register("someval", 1234)
+ opt = self.get_option(registry)
+ # Using a bare str() just in case
+ self.assertConverted(1234, opt, "someval")
+ self.assertConverted(1234, opt, u'someval')
+ self.assertConverted(None, opt, None)
+
+ def test_help(self):
+ registry = _mod_registry.Registry()
+ registry.register("someval", 1234, help="some option")
+ registry.register("dunno", 1234, help="some other option")
+ opt = self.get_option(registry)
+ self.assertEquals(
+ 'A registry option.\n'
+ '\n'
+ 'The following values are supported:\n'
+ ' dunno - some other option\n'
+ ' someval - some option\n',
+ opt.help)
+
+ def test_get_help_text(self):
+ registry = _mod_registry.Registry()
+ registry.register("someval", 1234, help="some option")
+ registry.register("dunno", 1234, help="some other option")
+ opt = self.get_option(registry)
+ self.assertEquals(
+ 'A registry option.\n'
+ '\n'
+ 'The following values are supported:\n'
+ ' dunno - some other option\n'
+ ' someval - some option\n',
+ opt.get_help_text())
+
+
+class TestOptionRegistry(tests.TestCase):
+
+ def setUp(self):
+ super(TestOptionRegistry, self).setUp()
+ # Always start with an empty registry
+ self.overrideAttr(config, 'option_registry', config.OptionRegistry())
+ self.registry = config.option_registry
+
+ def test_register(self):
+ opt = config.Option('foo')
+ self.registry.register(opt)
+ self.assertIs(opt, self.registry.get('foo'))
+
+ def test_registered_help(self):
+ opt = config.Option('foo', help='A simple option')
+ self.registry.register(opt)
+ self.assertEquals('A simple option', self.registry.get_help('foo'))
+
+ lazy_option = config.Option('lazy_foo', help='Lazy help')
+
+ def test_register_lazy(self):
+ self.registry.register_lazy('lazy_foo', self.__module__,
+ 'TestOptionRegistry.lazy_option')
+ self.assertIs(self.lazy_option, self.registry.get('lazy_foo'))
+
+ def test_registered_lazy_help(self):
+ self.registry.register_lazy('lazy_foo', self.__module__,
+ 'TestOptionRegistry.lazy_option')
+ self.assertEquals('Lazy help', self.registry.get_help('lazy_foo'))
+
+
+class TestRegisteredOptions(tests.TestCase):
+ """All registered options should verify some constraints."""
+
+ scenarios = [(key, {'option_name': key, 'option': option}) for key, option
+ in config.option_registry.iteritems()]
+
+ def setUp(self):
+ super(TestRegisteredOptions, self).setUp()
+ self.registry = config.option_registry
+
+ def test_proper_name(self):
+ # An option should be registered under its own name, this can't be
+ # checked at registration time for the lazy ones.
+ self.assertEquals(self.option_name, self.option.name)
+
+ def test_help_is_set(self):
+ option_help = self.registry.get_help(self.option_name)
+ # Come on, think about the user, he really wants to know what the
+ # option is about
+ self.assertIsNot(None, option_help)
+ self.assertNotEquals('', option_help)
+
+
+class TestSection(tests.TestCase):
+
+ # FIXME: Parametrize so that all sections produced by Stores run these
+ # tests -- vila 2011-04-01
+
+ def test_get_a_value(self):
+ a_dict = dict(foo='bar')
+ section = config.Section('myID', a_dict)
+ self.assertEquals('bar', section.get('foo'))
+
+ def test_get_unknown_option(self):
+ a_dict = dict()
+ section = config.Section(None, a_dict)
+ self.assertEquals('out of thin air',
+ section.get('foo', 'out of thin air'))
+
+ def test_options_is_shared(self):
+ a_dict = dict()
+ section = config.Section(None, a_dict)
+ self.assertIs(a_dict, section.options)
+
+
+class TestMutableSection(tests.TestCase):
+
+ scenarios = [('mutable',
+ {'get_section':
+ lambda opts: config.MutableSection('myID', opts)},),
+ ]
+
+ def test_set(self):
+ a_dict = dict(foo='bar')
+ section = self.get_section(a_dict)
+ section.set('foo', 'new_value')
+ self.assertEquals('new_value', section.get('foo'))
+ # The change appears in the shared section
+ self.assertEquals('new_value', a_dict.get('foo'))
+ # We keep track of the change
+ self.assertTrue('foo' in section.orig)
+ self.assertEquals('bar', section.orig.get('foo'))
+
+ def test_set_preserve_original_once(self):
+ a_dict = dict(foo='bar')
+ section = self.get_section(a_dict)
+ section.set('foo', 'first_value')
+ section.set('foo', 'second_value')
+ # We keep track of the original value
+ self.assertTrue('foo' in section.orig)
+ self.assertEquals('bar', section.orig.get('foo'))
+
+ def test_remove(self):
+ a_dict = dict(foo='bar')
+ section = self.get_section(a_dict)
+ section.remove('foo')
+ # We get None for unknown options via the default value
+ self.assertEquals(None, section.get('foo'))
+ # Or we just get the default value
+ self.assertEquals('unknown', section.get('foo', 'unknown'))
+ self.assertFalse('foo' in section.options)
+ # We keep track of the deletion
+ self.assertTrue('foo' in section.orig)
+ self.assertEquals('bar', section.orig.get('foo'))
+
+ def test_remove_new_option(self):
+ a_dict = dict()
+ section = self.get_section(a_dict)
+ section.set('foo', 'bar')
+ section.remove('foo')
+ self.assertFalse('foo' in section.options)
+ # The option didn't exist initially so it we need to keep track of it
+ # with a special value
+ self.assertTrue('foo' in section.orig)
+ self.assertEquals(config._NewlyCreatedOption, section.orig['foo'])
+
+
+class TestCommandLineStore(tests.TestCase):
+
+ def setUp(self):
+ super(TestCommandLineStore, self).setUp()
+ self.store = config.CommandLineStore()
+ self.overrideAttr(config, 'option_registry', config.OptionRegistry())
+
+ def get_section(self):
+ """Get the unique section for the command line overrides."""
+ sections = list(self.store.get_sections())
+ self.assertLength(1, sections)
+ store, section = sections[0]
+ self.assertEquals(self.store, store)
+ return section
+
+ def test_no_override(self):
+ self.store._from_cmdline([])
+ section = self.get_section()
+ self.assertLength(0, list(section.iter_option_names()))
+
+ def test_simple_override(self):
+ self.store._from_cmdline(['a=b'])
+ section = self.get_section()
+ self.assertEqual('b', section.get('a'))
+
+ def test_list_override(self):
+ opt = config.ListOption('l')
+ config.option_registry.register(opt)
+ self.store._from_cmdline(['l=1,2,3'])
+ val = self.get_section().get('l')
+ self.assertEqual('1,2,3', val)
+ # Reminder: lists should be registered as such explicitely, otherwise
+ # the conversion needs to be done afterwards.
+ self.assertEqual(['1', '2', '3'],
+ opt.convert_from_unicode(self.store, val))
+
+ def test_multiple_overrides(self):
+ self.store._from_cmdline(['a=b', 'x=y'])
+ section = self.get_section()
+ self.assertEquals('b', section.get('a'))
+ self.assertEquals('y', section.get('x'))
+
+ def test_wrong_syntax(self):
+ self.assertRaises(errors.BzrCommandError,
+ self.store._from_cmdline, ['a=b', 'c'])
+
+class TestStoreMinimalAPI(tests.TestCaseWithTransport):
+
+ scenarios = [(key, {'get_store': builder}) for key, builder
+ in config.test_store_builder_registry.iteritems()] + [
+ ('cmdline', {'get_store': lambda test: config.CommandLineStore()})]
+
+ def test_id(self):
+ store = self.get_store(self)
+ if type(store) == config.TransportIniFileStore:
+ raise tests.TestNotApplicable(
+ "%s is not a concrete Store implementation"
+ " so it doesn't need an id" % (store.__class__.__name__,))
+ self.assertIsNot(None, store.id)
+
+
+class TestStore(tests.TestCaseWithTransport):
+
+ def assertSectionContent(self, expected, (store, section)):
+ """Assert that some options have the proper values in a section."""
+ expected_name, expected_options = expected
+ self.assertEquals(expected_name, section.id)
+ self.assertEquals(
+ expected_options,
+ dict([(k, section.get(k)) for k in expected_options.keys()]))
+
+
+class TestReadonlyStore(TestStore):
+
+ scenarios = [(key, {'get_store': builder}) for key, builder
+ in config.test_store_builder_registry.iteritems()]
+
+ def test_building_delays_load(self):
+ store = self.get_store(self)
+ self.assertEquals(False, store.is_loaded())
+ store._load_from_string('')
+ self.assertEquals(True, store.is_loaded())
+
+ def test_get_no_sections_for_empty(self):
+ store = self.get_store(self)
+ store._load_from_string('')
+ self.assertEquals([], list(store.get_sections()))
+
+ def test_get_default_section(self):
+ store = self.get_store(self)
+ store._load_from_string('foo=bar')
+ sections = list(store.get_sections())
+ self.assertLength(1, sections)
+ self.assertSectionContent((None, {'foo': 'bar'}), sections[0])
+
+ def test_get_named_section(self):
+ store = self.get_store(self)
+ store._load_from_string('[baz]\nfoo=bar')
+ sections = list(store.get_sections())
+ self.assertLength(1, sections)
+ self.assertSectionContent(('baz', {'foo': 'bar'}), sections[0])
+
+ def test_load_from_string_fails_for_non_empty_store(self):
+ store = self.get_store(self)
+ store._load_from_string('foo=bar')
+ self.assertRaises(AssertionError, store._load_from_string, 'bar=baz')
+
+
+class TestStoreQuoting(TestStore):
+
+ scenarios = [(key, {'get_store': builder}) for key, builder
+ in config.test_store_builder_registry.iteritems()]
+
+ def setUp(self):
+ super(TestStoreQuoting, self).setUp()
+ self.store = self.get_store(self)
+ # We need a loaded store but any content will do
+ self.store._load_from_string('')
+
+ def assertIdempotent(self, s):
+ """Assert that quoting an unquoted string is a no-op and vice-versa.
+
+ What matters here is that option values, as they appear in a store, can
+ be safely round-tripped out of the store and back.
+
+ :param s: A string, quoted if required.
+ """
+ self.assertEquals(s, self.store.quote(self.store.unquote(s)))
+ self.assertEquals(s, self.store.unquote(self.store.quote(s)))
+
+ def test_empty_string(self):
+ if isinstance(self.store, config.IniFileStore):
+ # configobj._quote doesn't handle empty values
+ self.assertRaises(AssertionError,
+ self.assertIdempotent, '')
+ else:
+ self.assertIdempotent('')
+ # But quoted empty strings are ok
+ self.assertIdempotent('""')
+
+ def test_embedded_spaces(self):
+ self.assertIdempotent('" a b c "')
+
+ def test_embedded_commas(self):
+ self.assertIdempotent('" a , b c "')
+
+ def test_simple_comma(self):
+ if isinstance(self.store, config.IniFileStore):
+ # configobj requires that lists are special-cased
+ self.assertRaises(AssertionError,
+ self.assertIdempotent, ',')
+ else:
+ self.assertIdempotent(',')
+ # When a single comma is required, quoting is also required
+ self.assertIdempotent('","')
+
+ def test_list(self):
+ if isinstance(self.store, config.IniFileStore):
+ # configobj requires that lists are special-cased
+ self.assertRaises(AssertionError,
+ self.assertIdempotent, 'a,b')
+ else:
+ self.assertIdempotent('a,b')
+
+
+class TestDictFromStore(tests.TestCase):
+
+ def test_unquote_not_string(self):
+ conf = config.MemoryStack('x=2\n[a_section]\na=1\n')
+ value = conf.get('a_section')
+ # Urgh, despite 'conf' asking for the no-name section, we get the
+ # content of another section as a dict o_O
+ self.assertEquals({'a': '1'}, value)
+ unquoted = conf.store.unquote(value)
+ # Which cannot be unquoted but shouldn't crash either (the use cases
+ # are getting the value or displaying it. In the later case, '%s' will
+ # do).
+ self.assertEquals({'a': '1'}, unquoted)
+ self.assertEquals("{u'a': u'1'}", '%s' % (unquoted,))
+
+
+class TestIniFileStoreContent(tests.TestCaseWithTransport):
+ """Simulate loading a config store with content of various encodings.
+
+ All files produced by bzr are in utf8 content.
+
+ Users may modify them manually and end up with a file that can't be
+ loaded. We need to issue proper error messages in this case.
+ """
+
+ invalid_utf8_char = '\xff'
+
+ def test_load_utf8(self):
+ """Ensure we can load an utf8-encoded file."""
+ t = self.get_transport()
+ # From http://pad.lv/799212
+ unicode_user = u'b\N{Euro Sign}ar'
+ unicode_content = u'user=%s' % (unicode_user,)
+ utf8_content = unicode_content.encode('utf8')
+ # Store the raw content in the config file
+ t.put_bytes('foo.conf', utf8_content)
+ store = config.TransportIniFileStore(t, 'foo.conf')
+ store.load()
+ stack = config.Stack([store.get_sections], store)
+ self.assertEquals(unicode_user, stack.get('user'))
+
+ def test_load_non_ascii(self):
+ """Ensure we display a proper error on non-ascii, non utf-8 content."""
+ t = self.get_transport()
+ t.put_bytes('foo.conf', 'user=foo\n#%s\n' % (self.invalid_utf8_char,))
+ store = config.TransportIniFileStore(t, 'foo.conf')
+ self.assertRaises(errors.ConfigContentError, store.load)
+
+ def test_load_erroneous_content(self):
+ """Ensure we display a proper error on content that can't be parsed."""
+ t = self.get_transport()
+ t.put_bytes('foo.conf', '[open_section\n')
+ store = config.TransportIniFileStore(t, 'foo.conf')
+ self.assertRaises(errors.ParseConfigError, store.load)
+
+ def test_load_permission_denied(self):
+ """Ensure we get warned when trying to load an inaccessible file."""
+ warnings = []
+ def warning(*args):
+ warnings.append(args[0] % args[1:])
+ self.overrideAttr(trace, 'warning', warning)
+
+ t = self.get_transport()
+
+ def get_bytes(relpath):
+ raise errors.PermissionDenied(relpath, "")
+ t.get_bytes = get_bytes
+ store = config.TransportIniFileStore(t, 'foo.conf')
+ self.assertRaises(errors.PermissionDenied, store.load)
+ self.assertEquals(
+ warnings,
+ [u'Permission denied while trying to load configuration store %s.'
+ % store.external_url()])
+
+
+class TestIniConfigContent(tests.TestCaseWithTransport):
+ """Simulate loading a IniBasedConfig with content of various encodings.
+
+ All files produced by bzr are in utf8 content.
+
+ Users may modify them manually and end up with a file that can't be
+ loaded. We need to issue proper error messages in this case.
+ """
+
+ invalid_utf8_char = '\xff'
+
+ def test_load_utf8(self):
+ """Ensure we can load an utf8-encoded file."""
+ # From http://pad.lv/799212
+ unicode_user = u'b\N{Euro Sign}ar'
+ unicode_content = u'user=%s' % (unicode_user,)
+ utf8_content = unicode_content.encode('utf8')
+ # Store the raw content in the config file
+ with open('foo.conf', 'wb') as f:
+ f.write(utf8_content)
+ conf = config.IniBasedConfig(file_name='foo.conf')
+ self.assertEquals(unicode_user, conf.get_user_option('user'))
+
+ def test_load_badly_encoded_content(self):
+ """Ensure we display a proper error on non-ascii, non utf-8 content."""
+ with open('foo.conf', 'wb') as f:
+ f.write('user=foo\n#%s\n' % (self.invalid_utf8_char,))
+ conf = config.IniBasedConfig(file_name='foo.conf')
+ self.assertRaises(errors.ConfigContentError, conf._get_parser)
+
+ def test_load_erroneous_content(self):
+ """Ensure we display a proper error on content that can't be parsed."""
+ with open('foo.conf', 'wb') as f:
+ f.write('[open_section\n')
+ conf = config.IniBasedConfig(file_name='foo.conf')
+ self.assertRaises(errors.ParseConfigError, conf._get_parser)
+
+
+class TestMutableStore(TestStore):
+
+ scenarios = [(key, {'store_id': key, 'get_store': builder}) for key, builder
+ in config.test_store_builder_registry.iteritems()]
+
+ def setUp(self):
+ super(TestMutableStore, self).setUp()
+ self.transport = self.get_transport()
+
+ def has_store(self, store):
+ store_basename = urlutils.relative_url(self.transport.external_url(),
+ store.external_url())
+ return self.transport.has(store_basename)
+
+ def test_save_empty_creates_no_file(self):
+ # FIXME: There should be a better way than relying on the test
+ # parametrization to identify branch.conf -- vila 2011-0526
+ if self.store_id in ('branch', 'remote_branch'):
+ raise tests.TestNotApplicable(
+ 'branch.conf is *always* created when a branch is initialized')
+ store = self.get_store(self)
+ store.save()
+ self.assertEquals(False, self.has_store(store))
+
+ def test_mutable_section_shared(self):
+ store = self.get_store(self)
+ store._load_from_string('foo=bar\n')
+ # FIXME: There should be a better way than relying on the test
+ # parametrization to identify branch.conf -- vila 2011-0526
+ if self.store_id in ('branch', 'remote_branch'):
+ # branch stores requires write locked branches
+ self.addCleanup(store.branch.lock_write().unlock)
+ section1 = store.get_mutable_section(None)
+ section2 = store.get_mutable_section(None)
+ # If we get different sections, different callers won't share the
+ # modification
+ self.assertIs(section1, section2)
+
+ def test_save_emptied_succeeds(self):
+ store = self.get_store(self)
+ store._load_from_string('foo=bar\n')
+ # FIXME: There should be a better way than relying on the test
+ # parametrization to identify branch.conf -- vila 2011-0526
+ if self.store_id in ('branch', 'remote_branch'):
+ # branch stores requires write locked branches
+ self.addCleanup(store.branch.lock_write().unlock)
+ section = store.get_mutable_section(None)
+ section.remove('foo')
+ store.save()
+ self.assertEquals(True, self.has_store(store))
+ modified_store = self.get_store(self)
+ sections = list(modified_store.get_sections())
+ self.assertLength(0, sections)
+
+ def test_save_with_content_succeeds(self):
+ # FIXME: There should be a better way than relying on the test
+ # parametrization to identify branch.conf -- vila 2011-0526
+ if self.store_id in ('branch', 'remote_branch'):
+ raise tests.TestNotApplicable(
+ 'branch.conf is *always* created when a branch is initialized')
+ store = self.get_store(self)
+ store._load_from_string('foo=bar\n')
+ self.assertEquals(False, self.has_store(store))
+ store.save()
+ self.assertEquals(True, self.has_store(store))
+ modified_store = self.get_store(self)
+ sections = list(modified_store.get_sections())
+ self.assertLength(1, sections)
+ self.assertSectionContent((None, {'foo': 'bar'}), sections[0])
+
+ def test_set_option_in_empty_store(self):
+ store = self.get_store(self)
+ # FIXME: There should be a better way than relying on the test
+ # parametrization to identify branch.conf -- vila 2011-0526
+ if self.store_id in ('branch', 'remote_branch'):
+ # branch stores requires write locked branches
+ self.addCleanup(store.branch.lock_write().unlock)
+ section = store.get_mutable_section(None)
+ section.set('foo', 'bar')
+ store.save()
+ modified_store = self.get_store(self)
+ sections = list(modified_store.get_sections())
+ self.assertLength(1, sections)
+ self.assertSectionContent((None, {'foo': 'bar'}), sections[0])
+
+ def test_set_option_in_default_section(self):
+ store = self.get_store(self)
+ store._load_from_string('')
+ # FIXME: There should be a better way than relying on the test
+ # parametrization to identify branch.conf -- vila 2011-0526
+ if self.store_id in ('branch', 'remote_branch'):
+ # branch stores requires write locked branches
+ self.addCleanup(store.branch.lock_write().unlock)
+ section = store.get_mutable_section(None)
+ section.set('foo', 'bar')
+ store.save()
+ modified_store = self.get_store(self)
+ sections = list(modified_store.get_sections())
+ self.assertLength(1, sections)
+ self.assertSectionContent((None, {'foo': 'bar'}), sections[0])
+
+ def test_set_option_in_named_section(self):
+ store = self.get_store(self)
+ store._load_from_string('')
+ # FIXME: There should be a better way than relying on the test
+ # parametrization to identify branch.conf -- vila 2011-0526
+ if self.store_id in ('branch', 'remote_branch'):
+ # branch stores requires write locked branches
+ self.addCleanup(store.branch.lock_write().unlock)
+ section = store.get_mutable_section('baz')
+ section.set('foo', 'bar')
+ store.save()
+ modified_store = self.get_store(self)
+ sections = list(modified_store.get_sections())
+ self.assertLength(1, sections)
+ self.assertSectionContent(('baz', {'foo': 'bar'}), sections[0])
+
+ def test_load_hook(self):
+ # First, we need to ensure that the store exists
+ store = self.get_store(self)
+ # FIXME: There should be a better way than relying on the test
+ # parametrization to identify branch.conf -- vila 2011-0526
+ if self.store_id in ('branch', 'remote_branch'):
+ # branch stores requires write locked branches
+ self.addCleanup(store.branch.lock_write().unlock)
+ section = store.get_mutable_section('baz')
+ section.set('foo', 'bar')
+ store.save()
+ # Now we can try to load it
+ store = self.get_store(self)
+ calls = []
+ def hook(*args):
+ calls.append(args)
+ config.ConfigHooks.install_named_hook('load', hook, None)
+ self.assertLength(0, calls)
+ store.load()
+ self.assertLength(1, calls)
+ self.assertEquals((store,), calls[0])
+
+ def test_save_hook(self):
+ calls = []
+ def hook(*args):
+ calls.append(args)
+ config.ConfigHooks.install_named_hook('save', hook, None)
+ self.assertLength(0, calls)
+ store = self.get_store(self)
+ # FIXME: There should be a better way than relying on the test
+ # parametrization to identify branch.conf -- vila 2011-0526
+ if self.store_id in ('branch', 'remote_branch'):
+ # branch stores requires write locked branches
+ self.addCleanup(store.branch.lock_write().unlock)
+ section = store.get_mutable_section('baz')
+ section.set('foo', 'bar')
+ store.save()
+ self.assertLength(1, calls)
+ self.assertEquals((store,), calls[0])
+
+ def test_set_mark_dirty(self):
+ stack = config.MemoryStack('')
+ self.assertLength(0, stack.store.dirty_sections)
+ stack.set('foo', 'baz')
+ self.assertLength(1, stack.store.dirty_sections)
+ self.assertTrue(stack.store._need_saving())
+
+ def test_remove_mark_dirty(self):
+ stack = config.MemoryStack('foo=bar')
+ self.assertLength(0, stack.store.dirty_sections)
+ stack.remove('foo')
+ self.assertLength(1, stack.store.dirty_sections)
+ self.assertTrue(stack.store._need_saving())
+
+
+class TestStoreSaveChanges(tests.TestCaseWithTransport):
+ """Tests that config changes are kept in memory and saved on-demand."""
+
+ def setUp(self):
+ super(TestStoreSaveChanges, self).setUp()
+ self.transport = self.get_transport()
+ # Most of the tests involve two stores pointing to the same persistent
+ # storage to observe the effects of concurrent changes
+ self.st1 = config.TransportIniFileStore(self.transport, 'foo.conf')
+ self.st2 = config.TransportIniFileStore(self.transport, 'foo.conf')
+ self.warnings = []
+ def warning(*args):
+ self.warnings.append(args[0] % args[1:])
+ self.overrideAttr(trace, 'warning', warning)
+
+ def has_store(self, store):
+ store_basename = urlutils.relative_url(self.transport.external_url(),
+ store.external_url())
+ return self.transport.has(store_basename)
+
+ def get_stack(self, store):
+ # Any stack will do as long as it uses the right store, just a single
+ # no-name section is enough
+ return config.Stack([store.get_sections], store)
+
+ def test_no_changes_no_save(self):
+ s = self.get_stack(self.st1)
+ s.store.save_changes()
+ self.assertEquals(False, self.has_store(self.st1))
+
+ def test_unrelated_concurrent_update(self):
+ s1 = self.get_stack(self.st1)
+ s2 = self.get_stack(self.st2)
+ s1.set('foo', 'bar')
+ s2.set('baz', 'quux')
+ s1.store.save()
+ # Changes don't propagate magically
+ self.assertEquals(None, s1.get('baz'))
+ s2.store.save_changes()
+ self.assertEquals('quux', s2.get('baz'))
+ # Changes are acquired when saving
+ self.assertEquals('bar', s2.get('foo'))
+ # Since there is no overlap, no warnings are emitted
+ self.assertLength(0, self.warnings)
+
+ def test_concurrent_update_modified(self):
+ s1 = self.get_stack(self.st1)
+ s2 = self.get_stack(self.st2)
+ s1.set('foo', 'bar')
+ s2.set('foo', 'baz')
+ s1.store.save()
+ # Last speaker wins
+ s2.store.save_changes()
+ self.assertEquals('baz', s2.get('foo'))
+ # But the user get a warning
+ self.assertLength(1, self.warnings)
+ warning = self.warnings[0]
+ self.assertStartsWith(warning, 'Option foo in section None')
+ self.assertEndsWith(warning, 'was changed from <CREATED> to bar.'
+ ' The baz value will be saved.')
+
+ def test_concurrent_deletion(self):
+ self.st1._load_from_string('foo=bar')
+ self.st1.save()
+ s1 = self.get_stack(self.st1)
+ s2 = self.get_stack(self.st2)
+ s1.remove('foo')
+ s2.remove('foo')
+ s1.store.save_changes()
+ # No warning yet
+ self.assertLength(0, self.warnings)
+ s2.store.save_changes()
+ # Now we get one
+ self.assertLength(1, self.warnings)
+ warning = self.warnings[0]
+ self.assertStartsWith(warning, 'Option foo in section None')
+ self.assertEndsWith(warning, 'was changed from bar to <CREATED>.'
+ ' The <DELETED> value will be saved.')
+
+
+class TestQuotingIniFileStore(tests.TestCaseWithTransport):
+
+ def get_store(self):
+ return config.TransportIniFileStore(self.get_transport(), 'foo.conf')
+
+ def test_get_quoted_string(self):
+ store = self.get_store()
+ store._load_from_string('foo= " abc "')
+ stack = config.Stack([store.get_sections])
+ self.assertEquals(' abc ', stack.get('foo'))
+
+ def test_set_quoted_string(self):
+ store = self.get_store()
+ stack = config.Stack([store.get_sections], store)
+ stack.set('foo', ' a b c ')
+ store.save()
+ self.assertFileEqual('foo = " a b c "' + os.linesep, 'foo.conf')
+
+
+class TestTransportIniFileStore(TestStore):
+
+ def test_loading_unknown_file_fails(self):
+ store = config.TransportIniFileStore(self.get_transport(),
+ 'I-do-not-exist')
+ self.assertRaises(errors.NoSuchFile, store.load)
+
+ def test_invalid_content(self):
+ store = config.TransportIniFileStore(self.get_transport(), 'foo.conf')
+ self.assertEquals(False, store.is_loaded())
+ exc = self.assertRaises(
+ errors.ParseConfigError, store._load_from_string,
+ 'this is invalid !')
+ self.assertEndsWith(exc.filename, 'foo.conf')
+ # And the load failed
+ self.assertEquals(False, store.is_loaded())
+
+ def test_get_embedded_sections(self):
+ # A more complicated example (which also shows that section names and
+ # option names share the same name space...)
+ # FIXME: This should be fixed by forbidding dicts as values ?
+ # -- vila 2011-04-05
+ store = config.TransportIniFileStore(self.get_transport(), 'foo.conf')
+ store._load_from_string('''
+foo=bar
+l=1,2
+[DEFAULT]
+foo_in_DEFAULT=foo_DEFAULT
+[bar]
+foo_in_bar=barbar
+[baz]
+foo_in_baz=barbaz
+[[qux]]
+foo_in_qux=quux
+''')
+ sections = list(store.get_sections())
+ self.assertLength(4, sections)
+ # The default section has no name.
+ # List values are provided as strings and need to be explicitly
+ # converted by specifying from_unicode=list_from_store at option
+ # registration
+ self.assertSectionContent((None, {'foo': 'bar', 'l': u'1,2'}),
+ sections[0])
+ self.assertSectionContent(
+ ('DEFAULT', {'foo_in_DEFAULT': 'foo_DEFAULT'}), sections[1])
+ self.assertSectionContent(
+ ('bar', {'foo_in_bar': 'barbar'}), sections[2])
+ # sub sections are provided as embedded dicts.
+ self.assertSectionContent(
+ ('baz', {'foo_in_baz': 'barbaz', 'qux': {'foo_in_qux': 'quux'}}),
+ sections[3])
+
+
+class TestLockableIniFileStore(TestStore):
+
+ def test_create_store_in_created_dir(self):
+ self.assertPathDoesNotExist('dir')
+ t = self.get_transport('dir/subdir')
+ store = config.LockableIniFileStore(t, 'foo.conf')
+ store.get_mutable_section(None).set('foo', 'bar')
+ store.save()
+ self.assertPathExists('dir/subdir')
+
+
+class TestConcurrentStoreUpdates(TestStore):
+ """Test that Stores properly handle conccurent updates.
+
+ New Store implementation may fail some of these tests but until such
+ implementations exist it's hard to properly filter them from the scenarios
+ applied here. If you encounter such a case, contact the bzr devs.
+ """
+
+ scenarios = [(key, {'get_stack': builder}) for key, builder
+ in config.test_stack_builder_registry.iteritems()]
+
+ def setUp(self):
+ super(TestConcurrentStoreUpdates, self).setUp()
+ self.stack = self.get_stack(self)
+ if not isinstance(self.stack, config._CompatibleStack):
+ raise tests.TestNotApplicable(
+ '%s is not meant to be compatible with the old config design'
+ % (self.stack,))
+ self.stack.set('one', '1')
+ self.stack.set('two', '2')
+ # Flush the store
+ self.stack.store.save()
+
+ def test_simple_read_access(self):
+ self.assertEquals('1', self.stack.get('one'))
+
+ def test_simple_write_access(self):
+ self.stack.set('one', 'one')
+ self.assertEquals('one', self.stack.get('one'))
+
+ def test_listen_to_the_last_speaker(self):
+ c1 = self.stack
+ c2 = self.get_stack(self)
+ c1.set('one', 'ONE')
+ c2.set('two', 'TWO')
+ self.assertEquals('ONE', c1.get('one'))
+ self.assertEquals('TWO', c2.get('two'))
+ # The second update respect the first one
+ self.assertEquals('ONE', c2.get('one'))
+
+ def test_last_speaker_wins(self):
+ # If the same config is not shared, the same variable modified twice
+ # can only see a single result.
+ c1 = self.stack
+ c2 = self.get_stack(self)
+ c1.set('one', 'c1')
+ c2.set('one', 'c2')
+ self.assertEquals('c2', c2.get('one'))
+ # The first modification is still available until another refresh
+ # occur
+ self.assertEquals('c1', c1.get('one'))
+ c1.set('two', 'done')
+ self.assertEquals('c2', c1.get('one'))
+
+ def test_writes_are_serialized(self):
+ c1 = self.stack
+ c2 = self.get_stack(self)
+
+ # We spawn a thread that will pause *during* the config saving.
+ before_writing = threading.Event()
+ after_writing = threading.Event()
+ writing_done = threading.Event()
+ c1_save_without_locking_orig = c1.store.save_without_locking
+ def c1_save_without_locking():
+ before_writing.set()
+ c1_save_without_locking_orig()
+ # The lock is held. We wait for the main thread to decide when to
+ # continue
+ after_writing.wait()
+ c1.store.save_without_locking = c1_save_without_locking
+ def c1_set():
+ c1.set('one', 'c1')
+ writing_done.set()
+ t1 = threading.Thread(target=c1_set)
+ # Collect the thread after the test
+ self.addCleanup(t1.join)
+ # Be ready to unblock the thread if the test goes wrong
+ self.addCleanup(after_writing.set)
+ t1.start()
+ before_writing.wait()
+ self.assertRaises(errors.LockContention,
+ c2.set, 'one', 'c2')
+ self.assertEquals('c1', c1.get('one'))
+ # Let the lock be released
+ after_writing.set()
+ writing_done.wait()
+ c2.set('one', 'c2')
+ self.assertEquals('c2', c2.get('one'))
+
+ def test_read_while_writing(self):
+ c1 = self.stack
+ # We spawn a thread that will pause *during* the write
+ ready_to_write = threading.Event()
+ do_writing = threading.Event()
+ writing_done = threading.Event()
+ # We override the _save implementation so we know the store is locked
+ c1_save_without_locking_orig = c1.store.save_without_locking
+ def c1_save_without_locking():
+ ready_to_write.set()
+ # The lock is held. We wait for the main thread to decide when to
+ # continue
+ do_writing.wait()
+ c1_save_without_locking_orig()
+ writing_done.set()
+ c1.store.save_without_locking = c1_save_without_locking
+ def c1_set():
+ c1.set('one', 'c1')
+ t1 = threading.Thread(target=c1_set)
+ # Collect the thread after the test
+ self.addCleanup(t1.join)
+ # Be ready to unblock the thread if the test goes wrong
+ self.addCleanup(do_writing.set)
+ t1.start()
+ # Ensure the thread is ready to write
+ ready_to_write.wait()
+ self.assertEquals('c1', c1.get('one'))
+ # If we read during the write, we get the old value
+ c2 = self.get_stack(self)
+ self.assertEquals('1', c2.get('one'))
+ # Let the writing occur and ensure it occurred
+ do_writing.set()
+ writing_done.wait()
+ # Now we get the updated value
+ c3 = self.get_stack(self)
+ self.assertEquals('c1', c3.get('one'))
+
+ # FIXME: It may be worth looking into removing the lock dir when it's not
+ # needed anymore and look at possible fallouts for concurrent lockers. This
+ # will matter if/when we use config files outside of bazaar directories
+ # (.bazaar or .bzr) -- vila 20110-04-111
+
+
+class TestSectionMatcher(TestStore):
+
+ scenarios = [('location', {'matcher': config.LocationMatcher}),
+ ('id', {'matcher': config.NameMatcher}),]
+
+ def setUp(self):
+ super(TestSectionMatcher, self).setUp()
+ # Any simple store is good enough
+ self.get_store = config.test_store_builder_registry.get('configobj')
+
+ def test_no_matches_for_empty_stores(self):
+ store = self.get_store(self)
+ store._load_from_string('')
+ matcher = self.matcher(store, '/bar')
+ self.assertEquals([], list(matcher.get_sections()))
+
+ def test_build_doesnt_load_store(self):
+ store = self.get_store(self)
+ matcher = self.matcher(store, '/bar')
+ self.assertFalse(store.is_loaded())
+
+
+class TestLocationSection(tests.TestCase):
+
+ def get_section(self, options, extra_path):
+ section = config.Section('foo', options)
+ return config.LocationSection(section, extra_path)
+
+ def test_simple_option(self):
+ section = self.get_section({'foo': 'bar'}, '')
+ self.assertEquals('bar', section.get('foo'))
+
+ def test_option_with_extra_path(self):
+ section = self.get_section({'foo': 'bar', 'foo:policy': 'appendpath'},
+ 'baz')
+ self.assertEquals('bar/baz', section.get('foo'))
+
+ def test_invalid_policy(self):
+ section = self.get_section({'foo': 'bar', 'foo:policy': 'die'},
+ 'baz')
+ # invalid policies are ignored
+ self.assertEquals('bar', section.get('foo'))
+
+
+class TestLocationMatcher(TestStore):
+
+ def setUp(self):
+ super(TestLocationMatcher, self).setUp()
+ # Any simple store is good enough
+ self.get_store = config.test_store_builder_registry.get('configobj')
+
+ def test_unrelated_section_excluded(self):
+ store = self.get_store(self)
+ store._load_from_string('''
+[/foo]
+section=/foo
+[/foo/baz]
+section=/foo/baz
+[/foo/bar]
+section=/foo/bar
+[/foo/bar/baz]
+section=/foo/bar/baz
+[/quux/quux]
+section=/quux/quux
+''')
+ self.assertEquals(['/foo', '/foo/baz', '/foo/bar', '/foo/bar/baz',
+ '/quux/quux'],
+ [section.id for _, section in store.get_sections()])
+ matcher = config.LocationMatcher(store, '/foo/bar/quux')
+ sections = [section for _, section in matcher.get_sections()]
+ self.assertEquals(['/foo/bar', '/foo'],
+ [section.id for section in sections])
+ self.assertEquals(['quux', 'bar/quux'],
+ [section.extra_path for section in sections])
+
+ def test_more_specific_sections_first(self):
+ store = self.get_store(self)
+ store._load_from_string('''
+[/foo]
+section=/foo
+[/foo/bar]
+section=/foo/bar
+''')
+ self.assertEquals(['/foo', '/foo/bar'],
+ [section.id for _, section in store.get_sections()])
+ matcher = config.LocationMatcher(store, '/foo/bar/baz')
+ sections = [section for _, section in matcher.get_sections()]
+ self.assertEquals(['/foo/bar', '/foo'],
+ [section.id for section in sections])
+ self.assertEquals(['baz', 'bar/baz'],
+ [section.extra_path for section in sections])
+
+ def test_appendpath_in_no_name_section(self):
+ # It's a bit weird to allow appendpath in a no-name section, but
+ # someone may found a use for it
+ store = self.get_store(self)
+ store._load_from_string('''
+foo=bar
+foo:policy = appendpath
+''')
+ matcher = config.LocationMatcher(store, 'dir/subdir')
+ sections = list(matcher.get_sections())
+ self.assertLength(1, sections)
+ self.assertEquals('bar/dir/subdir', sections[0][1].get('foo'))
+
+ def test_file_urls_are_normalized(self):
+ store = self.get_store(self)
+ if sys.platform == 'win32':
+ expected_url = 'file:///C:/dir/subdir'
+ expected_location = 'C:/dir/subdir'
+ else:
+ expected_url = 'file:///dir/subdir'
+ expected_location = '/dir/subdir'
+ matcher = config.LocationMatcher(store, expected_url)
+ self.assertEquals(expected_location, matcher.location)
+
+ def test_branch_name_colo(self):
+ store = self.get_store(self)
+ store._load_from_string(dedent("""\
+ [/]
+ push_location=my{branchname}
+ """))
+ matcher = config.LocationMatcher(store, 'file:///,branch=example%3c')
+ self.assertEqual('example<', matcher.branch_name)
+ ((_, section),) = matcher.get_sections()
+ self.assertEqual('example<', section.locals['branchname'])
+
+ def test_branch_name_basename(self):
+ store = self.get_store(self)
+ store._load_from_string(dedent("""\
+ [/]
+ push_location=my{branchname}
+ """))
+ matcher = config.LocationMatcher(store, 'file:///parent/example%3c')
+ self.assertEqual('example<', matcher.branch_name)
+ ((_, section),) = matcher.get_sections()
+ self.assertEqual('example<', section.locals['branchname'])
+
+
+class TestStartingPathMatcher(TestStore):
+
+ def setUp(self):
+ super(TestStartingPathMatcher, self).setUp()
+ # Any simple store is good enough
+ self.store = config.IniFileStore()
+
+ def assertSectionIDs(self, expected, location, content):
+ self.store._load_from_string(content)
+ matcher = config.StartingPathMatcher(self.store, location)
+ sections = list(matcher.get_sections())
+ self.assertLength(len(expected), sections)
+ self.assertEqual(expected, [section.id for _, section in sections])
+ return sections
+
+ def test_empty(self):
+ self.assertSectionIDs([], self.get_url(), '')
+
+ def test_url_vs_local_paths(self):
+ # The matcher location is an url and the section names are local paths
+ sections = self.assertSectionIDs(['/foo/bar', '/foo'],
+ 'file:///foo/bar/baz', '''\
+[/foo]
+[/foo/bar]
+''')
+
+ def test_local_path_vs_url(self):
+ # The matcher location is a local path and the section names are urls
+ sections = self.assertSectionIDs(['file:///foo/bar', 'file:///foo'],
+ '/foo/bar/baz', '''\
+[file:///foo]
+[file:///foo/bar]
+''')
+
+
+ def test_no_name_section_included_when_present(self):
+ # Note that other tests will cover the case where the no-name section
+ # is empty and as such, not included.
+ sections = self.assertSectionIDs(['/foo/bar', '/foo', None],
+ '/foo/bar/baz', '''\
+option = defined so the no-name section exists
+[/foo]
+[/foo/bar]
+''')
+ self.assertEquals(['baz', 'bar/baz', '/foo/bar/baz'],
+ [s.locals['relpath'] for _, s in sections])
+
+ def test_order_reversed(self):
+ self.assertSectionIDs(['/foo/bar', '/foo'], '/foo/bar/baz', '''\
+[/foo]
+[/foo/bar]
+''')
+
+ def test_unrelated_section_excluded(self):
+ self.assertSectionIDs(['/foo/bar', '/foo'], '/foo/bar/baz', '''\
+[/foo]
+[/foo/qux]
+[/foo/bar]
+''')
+
+ def test_glob_included(self):
+ sections = self.assertSectionIDs(['/foo/*/baz', '/foo/b*', '/foo'],
+ '/foo/bar/baz', '''\
+[/foo]
+[/foo/qux]
+[/foo/b*]
+[/foo/*/baz]
+''')
+ # Note that 'baz' as a relpath for /foo/b* is not fully correct, but
+ # nothing really is... as far using {relpath} to append it to something
+ # else, this seems good enough though.
+ self.assertEquals(['', 'baz', 'bar/baz'],
+ [s.locals['relpath'] for _, s in sections])
+
+ def test_respect_order(self):
+ self.assertSectionIDs(['/foo', '/foo/b*', '/foo/*/baz'],
+ '/foo/bar/baz', '''\
+[/foo/*/baz]
+[/foo/qux]
+[/foo/b*]
+[/foo]
+''')
+
+
+class TestNameMatcher(TestStore):
+
+ def setUp(self):
+ super(TestNameMatcher, self).setUp()
+ self.matcher = config.NameMatcher
+ # Any simple store is good enough
+ self.get_store = config.test_store_builder_registry.get('configobj')
+
+ def get_matching_sections(self, name):
+ store = self.get_store(self)
+ store._load_from_string('''
+[foo]
+option=foo
+[foo/baz]
+option=foo/baz
+[bar]
+option=bar
+''')
+ matcher = self.matcher(store, name)
+ return list(matcher.get_sections())
+
+ def test_matching(self):
+ sections = self.get_matching_sections('foo')
+ self.assertLength(1, sections)
+ self.assertSectionContent(('foo', {'option': 'foo'}), sections[0])
+
+ def test_not_matching(self):
+ sections = self.get_matching_sections('baz')
+ self.assertLength(0, sections)
+
+
+class TestBaseStackGet(tests.TestCase):
+
+ def setUp(self):
+ super(TestBaseStackGet, self).setUp()
+ self.overrideAttr(config, 'option_registry', config.OptionRegistry())
+
+ def test_get_first_definition(self):
+ store1 = config.IniFileStore()
+ store1._load_from_string('foo=bar')
+ store2 = config.IniFileStore()
+ store2._load_from_string('foo=baz')
+ conf = config.Stack([store1.get_sections, store2.get_sections])
+ self.assertEquals('bar', conf.get('foo'))
+
+ def test_get_with_registered_default_value(self):
+ config.option_registry.register(config.Option('foo', default='bar'))
+ conf_stack = config.Stack([])
+ self.assertEquals('bar', conf_stack.get('foo'))
+
+ def test_get_without_registered_default_value(self):
+ config.option_registry.register(config.Option('foo'))
+ conf_stack = config.Stack([])
+ self.assertEquals(None, conf_stack.get('foo'))
+
+ def test_get_without_default_value_for_not_registered(self):
+ conf_stack = config.Stack([])
+ self.assertEquals(None, conf_stack.get('foo'))
+
+ def test_get_for_empty_section_callable(self):
+ conf_stack = config.Stack([lambda : []])
+ self.assertEquals(None, conf_stack.get('foo'))
+
+ def test_get_for_broken_callable(self):
+ # Trying to use and invalid callable raises an exception on first use
+ conf_stack = config.Stack([object])
+ self.assertRaises(TypeError, conf_stack.get, 'foo')
+
+
+class TestStackWithSimpleStore(tests.TestCase):
+
+ def setUp(self):
+ super(TestStackWithSimpleStore, self).setUp()
+ self.overrideAttr(config, 'option_registry', config.OptionRegistry())
+ self.registry = config.option_registry
+
+ def get_conf(self, content=None):
+ return config.MemoryStack(content)
+
+ def test_override_value_from_env(self):
+ self.overrideEnv('FOO', None)
+ self.registry.register(
+ config.Option('foo', default='bar', override_from_env=['FOO']))
+ self.overrideEnv('FOO', 'quux')
+ # Env variable provides a default taking over the option one
+ conf = self.get_conf('foo=store')
+ self.assertEquals('quux', conf.get('foo'))
+
+ def test_first_override_value_from_env_wins(self):
+ self.overrideEnv('NO_VALUE', None)
+ self.overrideEnv('FOO', None)
+ self.overrideEnv('BAZ', None)
+ self.registry.register(
+ config.Option('foo', default='bar',
+ override_from_env=['NO_VALUE', 'FOO', 'BAZ']))
+ self.overrideEnv('FOO', 'foo')
+ self.overrideEnv('BAZ', 'baz')
+ # The first env var set wins
+ conf = self.get_conf('foo=store')
+ self.assertEquals('foo', conf.get('foo'))
+
+
+class TestMemoryStack(tests.TestCase):
+
+ def test_get(self):
+ conf = config.MemoryStack('foo=bar')
+ self.assertEquals('bar', conf.get('foo'))
+
+ def test_set(self):
+ conf = config.MemoryStack('foo=bar')
+ conf.set('foo', 'baz')
+ self.assertEquals('baz', conf.get('foo'))
+
+ def test_no_content(self):
+ conf = config.MemoryStack()
+ # No content means no loading
+ self.assertFalse(conf.store.is_loaded())
+ self.assertRaises(NotImplementedError, conf.get, 'foo')
+ # But a content can still be provided
+ conf.store._load_from_string('foo=bar')
+ self.assertEquals('bar', conf.get('foo'))
+
+
+class TestStackIterSections(tests.TestCase):
+
+ def test_empty_stack(self):
+ conf = config.Stack([])
+ sections = list(conf.iter_sections())
+ self.assertLength(0, sections)
+
+ def test_empty_store(self):
+ store = config.IniFileStore()
+ store._load_from_string('')
+ conf = config.Stack([store.get_sections])
+ sections = list(conf.iter_sections())
+ self.assertLength(0, sections)
+
+ def test_simple_store(self):
+ store = config.IniFileStore()
+ store._load_from_string('foo=bar')
+ conf = config.Stack([store.get_sections])
+ tuples = list(conf.iter_sections())
+ self.assertLength(1, tuples)
+ (found_store, found_section) = tuples[0]
+ self.assertIs(store, found_store)
+
+ def test_two_stores(self):
+ store1 = config.IniFileStore()
+ store1._load_from_string('foo=bar')
+ store2 = config.IniFileStore()
+ store2._load_from_string('bar=qux')
+ conf = config.Stack([store1.get_sections, store2.get_sections])
+ tuples = list(conf.iter_sections())
+ self.assertLength(2, tuples)
+ self.assertIs(store1, tuples[0][0])
+ self.assertIs(store2, tuples[1][0])
+
+
+class TestStackWithTransport(tests.TestCaseWithTransport):
+
+ scenarios = [(key, {'get_stack': builder}) for key, builder
+ in config.test_stack_builder_registry.iteritems()]
+
+
+class TestConcreteStacks(TestStackWithTransport):
+
+ def test_build_stack(self):
+ # Just a smoke test to help debug builders
+ stack = self.get_stack(self)
+
+
+class TestStackGet(TestStackWithTransport):
+
+ def setUp(self):
+ super(TestStackGet, self).setUp()
+ self.conf = self.get_stack(self)
+
+ def test_get_for_empty_stack(self):
+ self.assertEquals(None, self.conf.get('foo'))
+
+ def test_get_hook(self):
+ self.conf.set('foo', 'bar')
+ calls = []
+ def hook(*args):
+ calls.append(args)
+ config.ConfigHooks.install_named_hook('get', hook, None)
+ self.assertLength(0, calls)
+ value = self.conf.get('foo')
+ self.assertEquals('bar', value)
+ self.assertLength(1, calls)
+ self.assertEquals((self.conf, 'foo', 'bar'), calls[0])
+
+
+class TestStackGetWithConverter(tests.TestCase):
+
+ def setUp(self):
+ super(TestStackGetWithConverter, self).setUp()
+ self.overrideAttr(config, 'option_registry', config.OptionRegistry())
+ self.registry = config.option_registry
+
+ def get_conf(self, content=None):
+ return config.MemoryStack(content)
+
+ def register_bool_option(self, name, default=None, default_from_env=None):
+ b = config.Option(name, help='A boolean.',
+ default=default, default_from_env=default_from_env,
+ from_unicode=config.bool_from_store)
+ self.registry.register(b)
+
+ def test_get_default_bool_None(self):
+ self.register_bool_option('foo')
+ conf = self.get_conf('')
+ self.assertEquals(None, conf.get('foo'))
+
+ def test_get_default_bool_True(self):
+ self.register_bool_option('foo', u'True')
+ conf = self.get_conf('')
+ self.assertEquals(True, conf.get('foo'))
+
+ def test_get_default_bool_False(self):
+ self.register_bool_option('foo', False)
+ conf = self.get_conf('')
+ self.assertEquals(False, conf.get('foo'))
+
+ def test_get_default_bool_False_as_string(self):
+ self.register_bool_option('foo', u'False')
+ conf = self.get_conf('')
+ self.assertEquals(False, conf.get('foo'))
+
+ def test_get_default_bool_from_env_converted(self):
+ self.register_bool_option('foo', u'True', default_from_env=['FOO'])
+ self.overrideEnv('FOO', 'False')
+ conf = self.get_conf('')
+ self.assertEquals(False, conf.get('foo'))
+
+ def test_get_default_bool_when_conversion_fails(self):
+ self.register_bool_option('foo', default='True')
+ conf = self.get_conf('foo=invalid boolean')
+ self.assertEquals(True, conf.get('foo'))
+
+ def register_integer_option(self, name,
+ default=None, default_from_env=None):
+ i = config.Option(name, help='An integer.',
+ default=default, default_from_env=default_from_env,
+ from_unicode=config.int_from_store)
+ self.registry.register(i)
+
+ def test_get_default_integer_None(self):
+ self.register_integer_option('foo')
+ conf = self.get_conf('')
+ self.assertEquals(None, conf.get('foo'))
+
+ def test_get_default_integer(self):
+ self.register_integer_option('foo', 42)
+ conf = self.get_conf('')
+ self.assertEquals(42, conf.get('foo'))
+
+ def test_get_default_integer_as_string(self):
+ self.register_integer_option('foo', u'42')
+ conf = self.get_conf('')
+ self.assertEquals(42, conf.get('foo'))
+
+ def test_get_default_integer_from_env(self):
+ self.register_integer_option('foo', default_from_env=['FOO'])
+ self.overrideEnv('FOO', '18')
+ conf = self.get_conf('')
+ self.assertEquals(18, conf.get('foo'))
+
+ def test_get_default_integer_when_conversion_fails(self):
+ self.register_integer_option('foo', default='12')
+ conf = self.get_conf('foo=invalid integer')
+ self.assertEquals(12, conf.get('foo'))
+
+ def register_list_option(self, name, default=None, default_from_env=None):
+ l = config.ListOption(name, help='A list.', default=default,
+ default_from_env=default_from_env)
+ self.registry.register(l)
+
+ def test_get_default_list_None(self):
+ self.register_list_option('foo')
+ conf = self.get_conf('')
+ self.assertEquals(None, conf.get('foo'))
+
+ def test_get_default_list_empty(self):
+ self.register_list_option('foo', '')
+ conf = self.get_conf('')
+ self.assertEquals([], conf.get('foo'))
+
+ def test_get_default_list_from_env(self):
+ self.register_list_option('foo', default_from_env=['FOO'])
+ self.overrideEnv('FOO', '')
+ conf = self.get_conf('')
+ self.assertEquals([], conf.get('foo'))
+
+ def test_get_with_list_converter_no_item(self):
+ self.register_list_option('foo', None)
+ conf = self.get_conf('foo=,')
+ self.assertEquals([], conf.get('foo'))
+
+ def test_get_with_list_converter_many_items(self):
+ self.register_list_option('foo', None)
+ conf = self.get_conf('foo=m,o,r,e')
+ self.assertEquals(['m', 'o', 'r', 'e'], conf.get('foo'))
+
+ def test_get_with_list_converter_embedded_spaces_many_items(self):
+ self.register_list_option('foo', None)
+ conf = self.get_conf('foo=" bar", "baz "')
+ self.assertEquals([' bar', 'baz '], conf.get('foo'))
+
+ def test_get_with_list_converter_stripped_spaces_many_items(self):
+ self.register_list_option('foo', None)
+ conf = self.get_conf('foo= bar , baz ')
+ self.assertEquals(['bar', 'baz'], conf.get('foo'))
+
+
+class TestIterOptionRefs(tests.TestCase):
+ """iter_option_refs is a bit unusual, document some cases."""
+
+ def assertRefs(self, expected, string):
+ self.assertEquals(expected, list(config.iter_option_refs(string)))
+
+ def test_empty(self):
+ self.assertRefs([(False, '')], '')
+
+ def test_no_refs(self):
+ self.assertRefs([(False, 'foo bar')], 'foo bar')
+
+ def test_single_ref(self):
+ self.assertRefs([(False, ''), (True, '{foo}'), (False, '')], '{foo}')
+
+ def test_broken_ref(self):
+ self.assertRefs([(False, '{foo')], '{foo')
+
+ def test_embedded_ref(self):
+ self.assertRefs([(False, '{'), (True, '{foo}'), (False, '}')],
+ '{{foo}}')
+
+ def test_two_refs(self):
+ self.assertRefs([(False, ''), (True, '{foo}'),
+ (False, ''), (True, '{bar}'),
+ (False, ''),],
+ '{foo}{bar}')
+
+ def test_newline_in_refs_are_not_matched(self):
+ self.assertRefs([(False, '{\nxx}{xx\n}{{\n}}')], '{\nxx}{xx\n}{{\n}}')
+
+
+class TestStackExpandOptions(tests.TestCaseWithTransport):
+
+ def setUp(self):
+ super(TestStackExpandOptions, self).setUp()
+ self.overrideAttr(config, 'option_registry', config.OptionRegistry())
+ self.registry = config.option_registry
+ store = config.TransportIniFileStore(self.get_transport(), 'foo.conf')
+ self.conf = config.Stack([store.get_sections], store)
+
+ def assertExpansion(self, expected, string, env=None):
+ self.assertEquals(expected, self.conf.expand_options(string, env))
+
+ def test_no_expansion(self):
+ self.assertExpansion('foo', 'foo')
+
+ def test_expand_default_value(self):
+ self.conf.store._load_from_string('bar=baz')
+ self.registry.register(config.Option('foo', default=u'{bar}'))
+ self.assertEquals('baz', self.conf.get('foo', expand=True))
+
+ def test_expand_default_from_env(self):
+ self.conf.store._load_from_string('bar=baz')
+ self.registry.register(config.Option('foo', default_from_env=['FOO']))
+ self.overrideEnv('FOO', '{bar}')
+ self.assertEquals('baz', self.conf.get('foo', expand=True))
+
+ def test_expand_default_on_failed_conversion(self):
+ self.conf.store._load_from_string('baz=bogus\nbar=42\nfoo={baz}')
+ self.registry.register(
+ config.Option('foo', default=u'{bar}',
+ from_unicode=config.int_from_store))
+ self.assertEquals(42, self.conf.get('foo', expand=True))
+
+ def test_env_adding_options(self):
+ self.assertExpansion('bar', '{foo}', {'foo': 'bar'})
+
+ def test_env_overriding_options(self):
+ self.conf.store._load_from_string('foo=baz')
+ self.assertExpansion('bar', '{foo}', {'foo': 'bar'})
+
+ def test_simple_ref(self):
+ self.conf.store._load_from_string('foo=xxx')
+ self.assertExpansion('xxx', '{foo}')
+
+ def test_unknown_ref(self):
+ self.assertRaises(errors.ExpandingUnknownOption,
+ self.conf.expand_options, '{foo}')
+
+ def test_indirect_ref(self):
+ self.conf.store._load_from_string('''
+foo=xxx
+bar={foo}
+''')
+ self.assertExpansion('xxx', '{bar}')
+
+ def test_embedded_ref(self):
+ self.conf.store._load_from_string('''
+foo=xxx
+bar=foo
+''')
+ self.assertExpansion('xxx', '{{bar}}')
+
+ def test_simple_loop(self):
+ self.conf.store._load_from_string('foo={foo}')
+ self.assertRaises(errors.OptionExpansionLoop,
+ self.conf.expand_options, '{foo}')
+
+ def test_indirect_loop(self):
+ self.conf.store._load_from_string('''
+foo={bar}
+bar={baz}
+baz={foo}''')
+ e = self.assertRaises(errors.OptionExpansionLoop,
+ self.conf.expand_options, '{foo}')
+ self.assertEquals('foo->bar->baz', e.refs)
+ self.assertEquals('{foo}', e.string)
+
+ def test_list(self):
+ self.conf.store._load_from_string('''
+foo=start
+bar=middle
+baz=end
+list={foo},{bar},{baz}
+''')
+ self.registry.register(
+ config.ListOption('list'))
+ self.assertEquals(['start', 'middle', 'end'],
+ self.conf.get('list', expand=True))
+
+ def test_cascading_list(self):
+ self.conf.store._load_from_string('''
+foo=start,{bar}
+bar=middle,{baz}
+baz=end
+list={foo}
+''')
+ self.registry.register(config.ListOption('list'))
+ # Register an intermediate option as a list to ensure no conversion
+ # happen while expanding. Conversion should only occur for the original
+ # option ('list' here).
+ self.registry.register(config.ListOption('baz'))
+ self.assertEquals(['start', 'middle', 'end'],
+ self.conf.get('list', expand=True))
+
+ def test_pathologically_hidden_list(self):
+ self.conf.store._load_from_string('''
+foo=bin
+bar=go
+start={foo
+middle=},{
+end=bar}
+hidden={start}{middle}{end}
+''')
+ # What matters is what the registration says, the conversion happens
+ # only after all expansions have been performed
+ self.registry.register(config.ListOption('hidden'))
+ self.assertEquals(['bin', 'go'],
+ self.conf.get('hidden', expand=True))
+
+
+class TestStackCrossSectionsExpand(tests.TestCaseWithTransport):
+
+ def setUp(self):
+ super(TestStackCrossSectionsExpand, self).setUp()
+
+ def get_config(self, location, string):
+ if string is None:
+ string = ''
+ # Since we don't save the config we won't strictly require to inherit
+ # from TestCaseInTempDir, but an error occurs so quickly...
+ c = config.LocationStack(location)
+ c.store._load_from_string(string)
+ return c
+
+ def test_dont_cross_unrelated_section(self):
+ c = self.get_config('/another/branch/path','''
+[/one/branch/path]
+foo = hello
+bar = {foo}/2
+
+[/another/branch/path]
+bar = {foo}/2
+''')
+ self.assertRaises(errors.ExpandingUnknownOption,
+ c.get, 'bar', expand=True)
+
+ def test_cross_related_sections(self):
+ c = self.get_config('/project/branch/path','''
+[/project]
+foo = qu
+
+[/project/branch/path]
+bar = {foo}ux
+''')
+ self.assertEquals('quux', c.get('bar', expand=True))
+
+
+class TestStackCrossStoresExpand(tests.TestCaseWithTransport):
+
+ def test_cross_global_locations(self):
+ l_store = config.LocationStore()
+ l_store._load_from_string('''
+[/branch]
+lfoo = loc-foo
+lbar = {gbar}
+''')
+ l_store.save()
+ g_store = config.GlobalStore()
+ g_store._load_from_string('''
+[DEFAULT]
+gfoo = {lfoo}
+gbar = glob-bar
+''')
+ g_store.save()
+ stack = config.LocationStack('/branch')
+ self.assertEquals('glob-bar', stack.get('lbar', expand=True))
+ self.assertEquals('loc-foo', stack.get('gfoo', expand=True))
+
+
+class TestStackExpandSectionLocals(tests.TestCaseWithTransport):
+
+ def test_expand_locals_empty(self):
+ l_store = config.LocationStore()
+ l_store._load_from_string('''
+[/home/user/project]
+base = {basename}
+rel = {relpath}
+''')
+ l_store.save()
+ stack = config.LocationStack('/home/user/project/')
+ self.assertEquals('', stack.get('base', expand=True))
+ self.assertEquals('', stack.get('rel', expand=True))
+
+ def test_expand_basename_locally(self):
+ l_store = config.LocationStore()
+ l_store._load_from_string('''
+[/home/user/project]
+bfoo = {basename}
+''')
+ l_store.save()
+ stack = config.LocationStack('/home/user/project/branch')
+ self.assertEquals('branch', stack.get('bfoo', expand=True))
+
+ def test_expand_basename_locally_longer_path(self):
+ l_store = config.LocationStore()
+ l_store._load_from_string('''
+[/home/user]
+bfoo = {basename}
+''')
+ l_store.save()
+ stack = config.LocationStack('/home/user/project/dir/branch')
+ self.assertEquals('branch', stack.get('bfoo', expand=True))
+
+ def test_expand_relpath_locally(self):
+ l_store = config.LocationStore()
+ l_store._load_from_string('''
+[/home/user/project]
+lfoo = loc-foo/{relpath}
+''')
+ l_store.save()
+ stack = config.LocationStack('/home/user/project/branch')
+ self.assertEquals('loc-foo/branch', stack.get('lfoo', expand=True))
+
+ def test_expand_relpath_unknonw_in_global(self):
+ g_store = config.GlobalStore()
+ g_store._load_from_string('''
+[DEFAULT]
+gfoo = {relpath}
+''')
+ g_store.save()
+ stack = config.LocationStack('/home/user/project/branch')
+ self.assertRaises(errors.ExpandingUnknownOption,
+ stack.get, 'gfoo', expand=True)
+
+ def test_expand_local_option_locally(self):
+ l_store = config.LocationStore()
+ l_store._load_from_string('''
+[/home/user/project]
+lfoo = loc-foo/{relpath}
+lbar = {gbar}
+''')
+ l_store.save()
+ g_store = config.GlobalStore()
+ g_store._load_from_string('''
+[DEFAULT]
+gfoo = {lfoo}
+gbar = glob-bar
+''')
+ g_store.save()
+ stack = config.LocationStack('/home/user/project/branch')
+ self.assertEquals('glob-bar', stack.get('lbar', expand=True))
+ self.assertEquals('loc-foo/branch', stack.get('gfoo', expand=True))
+
+ def test_locals_dont_leak(self):
+ """Make sure we chose the right local in presence of several sections.
+ """
+ l_store = config.LocationStore()
+ l_store._load_from_string('''
+[/home/user]
+lfoo = loc-foo/{relpath}
+[/home/user/project]
+lfoo = loc-foo/{relpath}
+''')
+ l_store.save()
+ stack = config.LocationStack('/home/user/project/branch')
+ self.assertEquals('loc-foo/branch', stack.get('lfoo', expand=True))
+ stack = config.LocationStack('/home/user/bar/baz')
+ self.assertEquals('loc-foo/bar/baz', stack.get('lfoo', expand=True))
+
+
+
+class TestStackSet(TestStackWithTransport):
+
+ def test_simple_set(self):
+ conf = self.get_stack(self)
+ self.assertEquals(None, conf.get('foo'))
+ conf.set('foo', 'baz')
+ # Did we get it back ?
+ self.assertEquals('baz', conf.get('foo'))
+
+ def test_set_creates_a_new_section(self):
+ conf = self.get_stack(self)
+ conf.set('foo', 'baz')
+ self.assertEquals, 'baz', conf.get('foo')
+
+ def test_set_hook(self):
+ calls = []
+ def hook(*args):
+ calls.append(args)
+ config.ConfigHooks.install_named_hook('set', hook, None)
+ self.assertLength(0, calls)
+ conf = self.get_stack(self)
+ conf.set('foo', 'bar')
+ self.assertLength(1, calls)
+ self.assertEquals((conf, 'foo', 'bar'), calls[0])
+
+
+class TestStackRemove(TestStackWithTransport):
+
+ def test_remove_existing(self):
+ conf = self.get_stack(self)
+ conf.set('foo', 'bar')
+ self.assertEquals('bar', conf.get('foo'))
+ conf.remove('foo')
+ # Did we get it back ?
+ self.assertEquals(None, conf.get('foo'))
+
+ def test_remove_unknown(self):
+ conf = self.get_stack(self)
+ self.assertRaises(KeyError, conf.remove, 'I_do_not_exist')
+
+ def test_remove_hook(self):
+ calls = []
+ def hook(*args):
+ calls.append(args)
+ config.ConfigHooks.install_named_hook('remove', hook, None)
+ self.assertLength(0, calls)
+ conf = self.get_stack(self)
+ conf.set('foo', 'bar')
+ conf.remove('foo')
+ self.assertLength(1, calls)
+ self.assertEquals((conf, 'foo'), calls[0])
+
+
+class TestConfigGetOptions(tests.TestCaseWithTransport, TestOptionsMixin):
+
+ def setUp(self):
+ super(TestConfigGetOptions, self).setUp()
+ create_configs(self)
+
+ def test_no_variable(self):
+ # Using branch should query branch, locations and bazaar
+ self.assertOptions([], self.branch_config)
+
+ def test_option_in_bazaar(self):
+ self.bazaar_config.set_user_option('file', 'bazaar')
+ self.assertOptions([('file', 'bazaar', 'DEFAULT', 'bazaar')],
+ self.bazaar_config)
+
+ def test_option_in_locations(self):
+ self.locations_config.set_user_option('file', 'locations')
+ self.assertOptions(
+ [('file', 'locations', self.tree.basedir, 'locations')],
+ self.locations_config)
+
+ def test_option_in_branch(self):
+ self.branch_config.set_user_option('file', 'branch')
+ self.assertOptions([('file', 'branch', 'DEFAULT', 'branch')],
+ self.branch_config)
+
+ def test_option_in_bazaar_and_branch(self):
+ self.bazaar_config.set_user_option('file', 'bazaar')
+ self.branch_config.set_user_option('file', 'branch')
+ self.assertOptions([('file', 'branch', 'DEFAULT', 'branch'),
+ ('file', 'bazaar', 'DEFAULT', 'bazaar'),],
+ self.branch_config)
+
+ def test_option_in_branch_and_locations(self):
+ # Hmm, locations override branch :-/
+ self.locations_config.set_user_option('file', 'locations')
+ self.branch_config.set_user_option('file', 'branch')
+ self.assertOptions(
+ [('file', 'locations', self.tree.basedir, 'locations'),
+ ('file', 'branch', 'DEFAULT', 'branch'),],
+ self.branch_config)
+
+ def test_option_in_bazaar_locations_and_branch(self):
+ self.bazaar_config.set_user_option('file', 'bazaar')
+ self.locations_config.set_user_option('file', 'locations')
+ self.branch_config.set_user_option('file', 'branch')
+ self.assertOptions(
+ [('file', 'locations', self.tree.basedir, 'locations'),
+ ('file', 'branch', 'DEFAULT', 'branch'),
+ ('file', 'bazaar', 'DEFAULT', 'bazaar'),],
+ self.branch_config)
+
+
+class TestConfigRemoveOption(tests.TestCaseWithTransport, TestOptionsMixin):
+
+ def setUp(self):
+ super(TestConfigRemoveOption, self).setUp()
+ create_configs_with_file_option(self)
+
+ def test_remove_in_locations(self):
+ self.locations_config.remove_user_option('file', self.tree.basedir)
+ self.assertOptions(
+ [('file', 'branch', 'DEFAULT', 'branch'),
+ ('file', 'bazaar', 'DEFAULT', 'bazaar'),],
+ self.branch_config)
+
+ def test_remove_in_branch(self):
+ self.branch_config.remove_user_option('file')
+ self.assertOptions(
+ [('file', 'locations', self.tree.basedir, 'locations'),
+ ('file', 'bazaar', 'DEFAULT', 'bazaar'),],
+ self.branch_config)
+
+ def test_remove_in_bazaar(self):
+ self.bazaar_config.remove_user_option('file')
+ self.assertOptions(
+ [('file', 'locations', self.tree.basedir, 'locations'),
+ ('file', 'branch', 'DEFAULT', 'branch'),],
+ self.branch_config)
+
+
+class TestConfigGetSections(tests.TestCaseWithTransport):
+
+ def setUp(self):
+ super(TestConfigGetSections, self).setUp()
+ create_configs(self)
+
+ def assertSectionNames(self, expected, conf, name=None):
+ """Check which sections are returned for a given config.
+
+ If fallback configurations exist their sections can be included.
+
+ :param expected: A list of section names.
+
+ :param conf: The configuration that will be queried.
+
+ :param name: An optional section name that will be passed to
+ get_sections().
+ """
+ sections = list(conf._get_sections(name))
+ self.assertLength(len(expected), sections)
+ self.assertEqual(expected, [name for name, _, _ in sections])
+
+ def test_bazaar_default_section(self):
+ self.assertSectionNames(['DEFAULT'], self.bazaar_config)
+
+ def test_locations_default_section(self):
+ # No sections are defined in an empty file
+ self.assertSectionNames([], self.locations_config)
+
+ def test_locations_named_section(self):
+ self.locations_config.set_user_option('file', 'locations')
+ self.assertSectionNames([self.tree.basedir], self.locations_config)
+
+ def test_locations_matching_sections(self):
+ loc_config = self.locations_config
+ loc_config.set_user_option('file', 'locations')
+ # We need to cheat a bit here to create an option in sections above and
+ # below the 'location' one.
+ parser = loc_config._get_parser()
+ # locations.cong deals with '/' ignoring native os.sep
+ location_names = self.tree.basedir.split('/')
+ parent = '/'.join(location_names[:-1])
+ child = '/'.join(location_names + ['child'])
+ parser[parent] = {}
+ parser[parent]['file'] = 'parent'
+ parser[child] = {}
+ parser[child]['file'] = 'child'
+ self.assertSectionNames([self.tree.basedir, parent], loc_config)
+
+ def test_branch_data_default_section(self):
+ self.assertSectionNames([None],
+ self.branch_config._get_branch_data_config())
+
+ def test_branch_default_sections(self):
+ # No sections are defined in an empty locations file
+ self.assertSectionNames([None, 'DEFAULT'],
+ self.branch_config)
+ # Unless we define an option
+ self.branch_config._get_location_config().set_user_option(
+ 'file', 'locations')
+ self.assertSectionNames([self.tree.basedir, None, 'DEFAULT'],
+ self.branch_config)
+
+ def test_bazaar_named_section(self):
+ # We need to cheat as the API doesn't give direct access to sections
+ # other than DEFAULT.
+ self.bazaar_config.set_alias('bazaar', 'bzr')
+ self.assertSectionNames(['ALIASES'], self.bazaar_config, 'ALIASES')
+
+
+class TestAuthenticationConfigFile(tests.TestCase):
+ """Test the authentication.conf file matching"""
+
+ def _got_user_passwd(self, expected_user, expected_password,
+ config, *args, **kwargs):
+ credentials = config.get_credentials(*args, **kwargs)
+ if credentials is None:
+ user = None
+ password = None
+ else:
+ user = credentials['user']
+ password = credentials['password']
+ self.assertEquals(expected_user, user)
+ self.assertEquals(expected_password, password)
+
+ def test_empty_config(self):
+ conf = config.AuthenticationConfig(_file=StringIO())
+ self.assertEquals({}, conf._get_config())
+ self._got_user_passwd(None, None, conf, 'http', 'foo.net')
+
+ def test_non_utf8_config(self):
+ conf = config.AuthenticationConfig(_file=StringIO(
+ 'foo = bar\xff'))
+ self.assertRaises(errors.ConfigContentError, conf._get_config)
+
+ def test_missing_auth_section_header(self):
+ conf = config.AuthenticationConfig(_file=StringIO('foo = bar'))
+ self.assertRaises(ValueError, conf.get_credentials, 'ftp', 'foo.net')
+
+ def test_auth_section_header_not_closed(self):
+ conf = config.AuthenticationConfig(_file=StringIO('[DEF'))
+ self.assertRaises(errors.ParseConfigError, conf._get_config)
+
+ def test_auth_value_not_boolean(self):
+ conf = config.AuthenticationConfig(_file=StringIO(
+ """[broken]
+scheme=ftp
+user=joe
+verify_certificates=askme # Error: Not a boolean
+"""))
+ self.assertRaises(ValueError, conf.get_credentials, 'ftp', 'foo.net')
+
+ def test_auth_value_not_int(self):
+ conf = config.AuthenticationConfig(_file=StringIO(
+ """[broken]
+scheme=ftp
+user=joe
+port=port # Error: Not an int
+"""))
+ self.assertRaises(ValueError, conf.get_credentials, 'ftp', 'foo.net')
+
+ def test_unknown_password_encoding(self):
+ conf = config.AuthenticationConfig(_file=StringIO(
+ """[broken]
+scheme=ftp
+user=joe
+password_encoding=unknown
+"""))
+ self.assertRaises(ValueError, conf.get_password,
+ 'ftp', 'foo.net', 'joe')
+
+ def test_credentials_for_scheme_host(self):
+ conf = config.AuthenticationConfig(_file=StringIO(
+ """# Identity on foo.net
+[ftp definition]
+scheme=ftp
+host=foo.net
+user=joe
+password=secret-pass
+"""))
+ # Basic matching
+ self._got_user_passwd('joe', 'secret-pass', conf, 'ftp', 'foo.net')
+ # different scheme
+ self._got_user_passwd(None, None, conf, 'http', 'foo.net')
+ # different host
+ self._got_user_passwd(None, None, conf, 'ftp', 'bar.net')
+
+ def test_credentials_for_host_port(self):
+ conf = config.AuthenticationConfig(_file=StringIO(
+ """# Identity on foo.net
+[ftp definition]
+scheme=ftp
+port=10021
+host=foo.net
+user=joe
+password=secret-pass
+"""))
+ # No port
+ self._got_user_passwd('joe', 'secret-pass',
+ conf, 'ftp', 'foo.net', port=10021)
+ # different port
+ self._got_user_passwd(None, None, conf, 'ftp', 'foo.net')
+
+ def test_for_matching_host(self):
+ conf = config.AuthenticationConfig(_file=StringIO(
+ """# Identity on foo.net
+[sourceforge]
+scheme=bzr
+host=bzr.sf.net
+user=joe
+password=joepass
+[sourceforge domain]
+scheme=bzr
+host=.bzr.sf.net
+user=georges
+password=bendover
+"""))
+ # matching domain
+ self._got_user_passwd('georges', 'bendover',
+ conf, 'bzr', 'foo.bzr.sf.net')
+ # phishing attempt
+ self._got_user_passwd(None, None,
+ conf, 'bzr', 'bbzr.sf.net')
+
+ def test_for_matching_host_None(self):
+ conf = config.AuthenticationConfig(_file=StringIO(
+ """# Identity on foo.net
+[catchup bzr]
+scheme=bzr
+user=joe
+password=joepass
+[DEFAULT]
+user=georges
+password=bendover
+"""))
+ # match no host
+ self._got_user_passwd('joe', 'joepass',
+ conf, 'bzr', 'quux.net')
+ # no host but different scheme
+ self._got_user_passwd('georges', 'bendover',
+ conf, 'ftp', 'quux.net')
+
+ def test_credentials_for_path(self):
+ conf = config.AuthenticationConfig(_file=StringIO(
+ """
+[http dir1]
+scheme=http
+host=bar.org
+path=/dir1
+user=jim
+password=jimpass
+[http dir2]
+scheme=http
+host=bar.org
+path=/dir2
+user=georges
+password=bendover
+"""))
+ # no path no dice
+ self._got_user_passwd(None, None,
+ conf, 'http', host='bar.org', path='/dir3')
+ # matching path
+ self._got_user_passwd('georges', 'bendover',
+ conf, 'http', host='bar.org', path='/dir2')
+ # matching subdir
+ self._got_user_passwd('jim', 'jimpass',
+ conf, 'http', host='bar.org',path='/dir1/subdir')
+
+ def test_credentials_for_user(self):
+ conf = config.AuthenticationConfig(_file=StringIO(
+ """
+[with user]
+scheme=http
+host=bar.org
+user=jim
+password=jimpass
+"""))
+ # Get user
+ self._got_user_passwd('jim', 'jimpass',
+ conf, 'http', 'bar.org')
+ # Get same user
+ self._got_user_passwd('jim', 'jimpass',
+ conf, 'http', 'bar.org', user='jim')
+ # Don't get a different user if one is specified
+ self._got_user_passwd(None, None,
+ conf, 'http', 'bar.org', user='georges')
+
+ def test_credentials_for_user_without_password(self):
+ conf = config.AuthenticationConfig(_file=StringIO(
+ """
+[without password]
+scheme=http
+host=bar.org
+user=jim
+"""))
+ # Get user but no password
+ self._got_user_passwd('jim', None,
+ conf, 'http', 'bar.org')
+
+ def test_verify_certificates(self):
+ conf = config.AuthenticationConfig(_file=StringIO(
+ """
+[self-signed]
+scheme=https
+host=bar.org
+user=jim
+password=jimpass
+verify_certificates=False
+[normal]
+scheme=https
+host=foo.net
+user=georges
+password=bendover
+"""))
+ credentials = conf.get_credentials('https', 'bar.org')
+ self.assertEquals(False, credentials.get('verify_certificates'))
+ credentials = conf.get_credentials('https', 'foo.net')
+ self.assertEquals(True, credentials.get('verify_certificates'))
+
+
+class TestAuthenticationStorage(tests.TestCaseInTempDir):
+
+ def test_set_credentials(self):
+ conf = config.AuthenticationConfig()
+ conf.set_credentials('name', 'host', 'user', 'scheme', 'password',
+ 99, path='/foo', verify_certificates=False, realm='realm')
+ credentials = conf.get_credentials(host='host', scheme='scheme',
+ port=99, path='/foo',
+ realm='realm')
+ CREDENTIALS = {'name': 'name', 'user': 'user', 'password': 'password',
+ 'verify_certificates': False, 'scheme': 'scheme',
+ 'host': 'host', 'port': 99, 'path': '/foo',
+ 'realm': 'realm'}
+ self.assertEqual(CREDENTIALS, credentials)
+ credentials_from_disk = config.AuthenticationConfig().get_credentials(
+ host='host', scheme='scheme', port=99, path='/foo', realm='realm')
+ self.assertEqual(CREDENTIALS, credentials_from_disk)
+
+ def test_reset_credentials_different_name(self):
+ conf = config.AuthenticationConfig()
+ conf.set_credentials('name', 'host', 'user', 'scheme', 'password'),
+ conf.set_credentials('name2', 'host', 'user2', 'scheme', 'password'),
+ self.assertIs(None, conf._get_config().get('name'))
+ credentials = conf.get_credentials(host='host', scheme='scheme')
+ CREDENTIALS = {'name': 'name2', 'user': 'user2', 'password':
+ 'password', 'verify_certificates': True,
+ 'scheme': 'scheme', 'host': 'host', 'port': None,
+ 'path': None, 'realm': None}
+ self.assertEqual(CREDENTIALS, credentials)
+
+
+class TestAuthenticationConfig(tests.TestCase):
+ """Test AuthenticationConfig behaviour"""
+
+ def _check_default_password_prompt(self, expected_prompt_format, scheme,
+ host=None, port=None, realm=None,
+ path=None):
+ if host is None:
+ host = 'bar.org'
+ user, password = 'jim', 'precious'
+ expected_prompt = expected_prompt_format % {
+ 'scheme': scheme, 'host': host, 'port': port,
+ 'user': user, 'realm': realm}
+
+ stdout = tests.StringIOWrapper()
+ stderr = tests.StringIOWrapper()
+ ui.ui_factory = tests.TestUIFactory(stdin=password + '\n',
+ stdout=stdout, stderr=stderr)
+ # We use an empty conf so that the user is always prompted
+ conf = config.AuthenticationConfig()
+ self.assertEquals(password,
+ conf.get_password(scheme, host, user, port=port,
+ realm=realm, path=path))
+ self.assertEquals(expected_prompt, stderr.getvalue())
+ self.assertEquals('', stdout.getvalue())
+
+ def _check_default_username_prompt(self, expected_prompt_format, scheme,
+ host=None, port=None, realm=None,
+ path=None):
+ if host is None:
+ host = 'bar.org'
+ username = 'jim'
+ expected_prompt = expected_prompt_format % {
+ 'scheme': scheme, 'host': host, 'port': port,
+ 'realm': realm}
+ stdout = tests.StringIOWrapper()
+ stderr = tests.StringIOWrapper()
+ ui.ui_factory = tests.TestUIFactory(stdin=username+ '\n',
+ stdout=stdout, stderr=stderr)
+ # We use an empty conf so that the user is always prompted
+ conf = config.AuthenticationConfig()
+ self.assertEquals(username, conf.get_user(scheme, host, port=port,
+ realm=realm, path=path, ask=True))
+ self.assertEquals(expected_prompt, stderr.getvalue())
+ self.assertEquals('', stdout.getvalue())
+
+ def test_username_defaults_prompts(self):
+ # HTTP prompts can't be tested here, see test_http.py
+ self._check_default_username_prompt(u'FTP %(host)s username: ', 'ftp')
+ self._check_default_username_prompt(
+ u'FTP %(host)s:%(port)d username: ', 'ftp', port=10020)
+ self._check_default_username_prompt(
+ u'SSH %(host)s:%(port)d username: ', 'ssh', port=12345)
+
+ def test_username_default_no_prompt(self):
+ conf = config.AuthenticationConfig()
+ self.assertEquals(None,
+ conf.get_user('ftp', 'example.com'))
+ self.assertEquals("explicitdefault",
+ conf.get_user('ftp', 'example.com', default="explicitdefault"))
+
+ def test_password_default_prompts(self):
+ # HTTP prompts can't be tested here, see test_http.py
+ self._check_default_password_prompt(
+ u'FTP %(user)s@%(host)s password: ', 'ftp')
+ self._check_default_password_prompt(
+ u'FTP %(user)s@%(host)s:%(port)d password: ', 'ftp', port=10020)
+ self._check_default_password_prompt(
+ u'SSH %(user)s@%(host)s:%(port)d password: ', 'ssh', port=12345)
+ # SMTP port handling is a bit special (it's handled if embedded in the
+ # host too)
+ # FIXME: should we: forbid that, extend it to other schemes, leave
+ # things as they are that's fine thank you ?
+ self._check_default_password_prompt(
+ u'SMTP %(user)s@%(host)s password: ', 'smtp')
+ self._check_default_password_prompt(
+ u'SMTP %(user)s@%(host)s password: ', 'smtp', host='bar.org:10025')
+ self._check_default_password_prompt(
+ u'SMTP %(user)s@%(host)s:%(port)d password: ', 'smtp', port=10025)
+
+ def test_ssh_password_emits_warning(self):
+ conf = config.AuthenticationConfig(_file=StringIO(
+ """
+[ssh with password]
+scheme=ssh
+host=bar.org
+user=jim
+password=jimpass
+"""))
+ entered_password = 'typed-by-hand'
+ stdout = tests.StringIOWrapper()
+ stderr = tests.StringIOWrapper()
+ ui.ui_factory = tests.TestUIFactory(stdin=entered_password + '\n',
+ stdout=stdout, stderr=stderr)
+
+ # Since the password defined in the authentication config is ignored,
+ # the user is prompted
+ self.assertEquals(entered_password,
+ conf.get_password('ssh', 'bar.org', user='jim'))
+ self.assertContainsRe(
+ self.get_log(),
+ 'password ignored in section \[ssh with password\]')
+
+ def test_ssh_without_password_doesnt_emit_warning(self):
+ conf = config.AuthenticationConfig(_file=StringIO(
+ """
+[ssh with password]
+scheme=ssh
+host=bar.org
+user=jim
+"""))
+ entered_password = 'typed-by-hand'
+ stdout = tests.StringIOWrapper()
+ stderr = tests.StringIOWrapper()
+ ui.ui_factory = tests.TestUIFactory(stdin=entered_password + '\n',
+ stdout=stdout,
+ stderr=stderr)
+
+ # Since the password defined in the authentication config is ignored,
+ # the user is prompted
+ self.assertEquals(entered_password,
+ conf.get_password('ssh', 'bar.org', user='jim'))
+ # No warning shoud be emitted since there is no password. We are only
+ # providing "user".
+ self.assertNotContainsRe(
+ self.get_log(),
+ 'password ignored in section \[ssh with password\]')
+
+ def test_uses_fallback_stores(self):
+ self.overrideAttr(config, 'credential_store_registry',
+ config.CredentialStoreRegistry())
+ store = StubCredentialStore()
+ store.add_credentials("http", "example.com", "joe", "secret")
+ config.credential_store_registry.register("stub", store, fallback=True)
+ conf = config.AuthenticationConfig(_file=StringIO())
+ creds = conf.get_credentials("http", "example.com")
+ self.assertEquals("joe", creds["user"])
+ self.assertEquals("secret", creds["password"])
+
+
+class StubCredentialStore(config.CredentialStore):
+
+ def __init__(self):
+ self._username = {}
+ self._password = {}
+
+ def add_credentials(self, scheme, host, user, password=None):
+ self._username[(scheme, host)] = user
+ self._password[(scheme, host)] = password
+
+ def get_credentials(self, scheme, host, port=None, user=None,
+ path=None, realm=None):
+ key = (scheme, host)
+ if not key in self._username:
+ return None
+ return { "scheme": scheme, "host": host, "port": port,
+ "user": self._username[key], "password": self._password[key]}
+
+
+class CountingCredentialStore(config.CredentialStore):
+
+ def __init__(self):
+ self._calls = 0
+
+ def get_credentials(self, scheme, host, port=None, user=None,
+ path=None, realm=None):
+ self._calls += 1
+ return None
+
+
+class TestCredentialStoreRegistry(tests.TestCase):
+
+ def _get_cs_registry(self):
+ return config.credential_store_registry
+
+ def test_default_credential_store(self):
+ r = self._get_cs_registry()
+ default = r.get_credential_store(None)
+ self.assertIsInstance(default, config.PlainTextCredentialStore)
+
+ def test_unknown_credential_store(self):
+ r = self._get_cs_registry()
+ # It's hard to imagine someone creating a credential store named
+ # 'unknown' so we use that as an never registered key.
+ self.assertRaises(KeyError, r.get_credential_store, 'unknown')
+
+ def test_fallback_none_registered(self):
+ r = config.CredentialStoreRegistry()
+ self.assertEquals(None,
+ r.get_fallback_credentials("http", "example.com"))
+
+ def test_register(self):
+ r = config.CredentialStoreRegistry()
+ r.register("stub", StubCredentialStore(), fallback=False)
+ r.register("another", StubCredentialStore(), fallback=True)
+ self.assertEquals(["another", "stub"], r.keys())
+
+ def test_register_lazy(self):
+ r = config.CredentialStoreRegistry()
+ r.register_lazy("stub", "bzrlib.tests.test_config",
+ "StubCredentialStore", fallback=False)
+ self.assertEquals(["stub"], r.keys())
+ self.assertIsInstance(r.get_credential_store("stub"),
+ StubCredentialStore)
+
+ def test_is_fallback(self):
+ r = config.CredentialStoreRegistry()
+ r.register("stub1", None, fallback=False)
+ r.register("stub2", None, fallback=True)
+ self.assertEquals(False, r.is_fallback("stub1"))
+ self.assertEquals(True, r.is_fallback("stub2"))
+
+ def test_no_fallback(self):
+ r = config.CredentialStoreRegistry()
+ store = CountingCredentialStore()
+ r.register("count", store, fallback=False)
+ self.assertEquals(None,
+ r.get_fallback_credentials("http", "example.com"))
+ self.assertEquals(0, store._calls)
+
+ def test_fallback_credentials(self):
+ r = config.CredentialStoreRegistry()
+ store = StubCredentialStore()
+ store.add_credentials("http", "example.com",
+ "somebody", "geheim")
+ r.register("stub", store, fallback=True)
+ creds = r.get_fallback_credentials("http", "example.com")
+ self.assertEquals("somebody", creds["user"])
+ self.assertEquals("geheim", creds["password"])
+
+ def test_fallback_first_wins(self):
+ r = config.CredentialStoreRegistry()
+ stub1 = StubCredentialStore()
+ stub1.add_credentials("http", "example.com",
+ "somebody", "stub1")
+ r.register("stub1", stub1, fallback=True)
+ stub2 = StubCredentialStore()
+ stub2.add_credentials("http", "example.com",
+ "somebody", "stub2")
+ r.register("stub2", stub1, fallback=True)
+ creds = r.get_fallback_credentials("http", "example.com")
+ self.assertEquals("somebody", creds["user"])
+ self.assertEquals("stub1", creds["password"])
+
+
+class TestPlainTextCredentialStore(tests.TestCase):
+
+ def test_decode_password(self):
+ r = config.credential_store_registry
+ plain_text = r.get_credential_store()
+ decoded = plain_text.decode_password(dict(password='secret'))
+ self.assertEquals('secret', decoded)
+
+
+# FIXME: Once we have a way to declare authentication to all test servers, we
+# can implement generic tests.
+# test_user_password_in_url
+# test_user_in_url_password_from_config
+# test_user_in_url_password_prompted
+# test_user_in_config
+# test_user_getpass.getuser
+# test_user_prompted ?
+class TestAuthenticationRing(tests.TestCaseWithTransport):
+ pass
+
+
+class TestAutoUserId(tests.TestCase):
+ """Test inferring an automatic user name."""
+
+ def test_auto_user_id(self):
+ """Automatic inference of user name.
+
+ This is a bit hard to test in an isolated way, because it depends on
+ system functions that go direct to /etc or perhaps somewhere else.
+ But it's reasonable to say that on Unix, with an /etc/mailname, we ought
+ to be able to choose a user name with no configuration.
+ """
+ if sys.platform == 'win32':
+ raise tests.TestSkipped(
+ "User name inference not implemented on win32")
+ realname, address = config._auto_user_id()
+ if os.path.exists('/etc/mailname'):
+ self.assertIsNot(None, realname)
+ self.assertIsNot(None, address)
+ else:
+ self.assertEquals((None, None), (realname, address))
+
+
+class EmailOptionTests(tests.TestCase):
+
+ def test_default_email_uses_BZR_EMAIL(self):
+ conf = config.MemoryStack('email=jelmer@debian.org')
+ # BZR_EMAIL takes precedence over EMAIL
+ self.overrideEnv('BZR_EMAIL', 'jelmer@samba.org')
+ self.overrideEnv('EMAIL', 'jelmer@apache.org')
+ self.assertEquals('jelmer@samba.org', conf.get('email'))
+
+ def test_default_email_uses_EMAIL(self):
+ conf = config.MemoryStack('')
+ self.overrideEnv('BZR_EMAIL', None)
+ self.overrideEnv('EMAIL', 'jelmer@apache.org')
+ self.assertEquals('jelmer@apache.org', conf.get('email'))
+
+ def test_BZR_EMAIL_overrides(self):
+ conf = config.MemoryStack('email=jelmer@debian.org')
+ self.overrideEnv('BZR_EMAIL', 'jelmer@apache.org')
+ self.assertEquals('jelmer@apache.org', conf.get('email'))
+ self.overrideEnv('BZR_EMAIL', None)
+ self.overrideEnv('EMAIL', 'jelmer@samba.org')
+ self.assertEquals('jelmer@debian.org', conf.get('email'))
+
+
+class MailClientOptionTests(tests.TestCase):
+
+ def test_default(self):
+ conf = config.MemoryStack('')
+ client = conf.get('mail_client')
+ self.assertIs(client, mail_client.DefaultMail)
+
+ def test_evolution(self):
+ conf = config.MemoryStack('mail_client=evolution')
+ client = conf.get('mail_client')
+ self.assertIs(client, mail_client.Evolution)
+
+ def test_kmail(self):
+ conf = config.MemoryStack('mail_client=kmail')
+ client = conf.get('mail_client')
+ self.assertIs(client, mail_client.KMail)
+
+ def test_mutt(self):
+ conf = config.MemoryStack('mail_client=mutt')
+ client = conf.get('mail_client')
+ self.assertIs(client, mail_client.Mutt)
+
+ def test_thunderbird(self):
+ conf = config.MemoryStack('mail_client=thunderbird')
+ client = conf.get('mail_client')
+ self.assertIs(client, mail_client.Thunderbird)
+
+ def test_explicit_default(self):
+ conf = config.MemoryStack('mail_client=default')
+ client = conf.get('mail_client')
+ self.assertIs(client, mail_client.DefaultMail)
+
+ def test_editor(self):
+ conf = config.MemoryStack('mail_client=editor')
+ client = conf.get('mail_client')
+ self.assertIs(client, mail_client.Editor)
+
+ def test_mapi(self):
+ conf = config.MemoryStack('mail_client=mapi')
+ client = conf.get('mail_client')
+ self.assertIs(client, mail_client.MAPIClient)
+
+ def test_xdg_email(self):
+ conf = config.MemoryStack('mail_client=xdg-email')
+ client = conf.get('mail_client')
+ self.assertIs(client, mail_client.XDGEmail)
+
+ def test_unknown(self):
+ conf = config.MemoryStack('mail_client=firebird')
+ self.assertRaises(errors.ConfigOptionValueError, conf.get,
+ 'mail_client')
diff --git a/bzrlib/tests/test_conflicts.py b/bzrlib/tests/test_conflicts.py
new file mode 100644
index 0000000..4b1fc2a
--- /dev/null
+++ b/bzrlib/tests/test_conflicts.py
@@ -0,0 +1,1198 @@
+# Copyright (C) 2005-2011 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+
+import os
+
+from bzrlib import (
+ conflicts,
+ errors,
+ option,
+ osutils,
+ tests,
+ )
+from bzrlib.tests import (
+ script,
+ scenarios,
+ )
+
+
+load_tests = scenarios.load_tests_apply_scenarios
+
+
+# TODO: Test commit with some added, and added-but-missing files
+# RBC 20060124 is that not tested in test_commit.py ?
+
+# The order of 'path' here is important - do not let it
+# be a sorted list.
+# u'\xe5' == a with circle
+# '\xc3\xae' == u'\xee' == i with hat
+# So these are u'path' and 'id' only with a circle and a hat. (shappo?)
+example_conflicts = conflicts.ConflictList(
+ [conflicts.MissingParent('Not deleting', u'p\xe5thg', '\xc3\xaedg'),
+ conflicts.ContentsConflict(u'p\xe5tha', None, '\xc3\xaeda'),
+ conflicts.TextConflict(u'p\xe5tha'),
+ conflicts.PathConflict(u'p\xe5thb', u'p\xe5thc', '\xc3\xaedb'),
+ conflicts.DuplicateID('Unversioned existing file',
+ u'p\xe5thc', u'p\xe5thc2',
+ '\xc3\xaedc', '\xc3\xaedc'),
+ conflicts.DuplicateEntry('Moved existing file to',
+ u'p\xe5thdd.moved', u'p\xe5thd',
+ '\xc3\xaedd', None),
+ conflicts.ParentLoop('Cancelled move', u'p\xe5the', u'p\xe5th2e',
+ None, '\xc3\xaed2e'),
+ conflicts.UnversionedParent('Versioned directory',
+ u'p\xe5thf', '\xc3\xaedf'),
+ conflicts.NonDirectoryParent('Created directory',
+ u'p\xe5thg', '\xc3\xaedg'),
+])
+
+
+def vary_by_conflicts():
+ for conflict in example_conflicts:
+ yield (conflict.__class__.__name__, {"conflict": conflict})
+
+
+class TestConflicts(tests.TestCaseWithTransport):
+
+ def test_resolve_conflict_dir(self):
+ tree = self.make_branch_and_tree('.')
+ self.build_tree_contents([('hello', 'hello world4'),
+ ('hello.THIS', 'hello world2'),
+ ('hello.BASE', 'hello world1'),
+ ])
+ os.mkdir('hello.OTHER')
+ tree.add('hello', 'q')
+ l = conflicts.ConflictList([conflicts.TextConflict('hello')])
+ l.remove_files(tree)
+
+ def test_select_conflicts(self):
+ tree = self.make_branch_and_tree('.')
+ clist = conflicts.ConflictList
+
+ def check_select(not_selected, selected, paths, **kwargs):
+ self.assertEqual(
+ (not_selected, selected),
+ tree_conflicts.select_conflicts(tree, paths, **kwargs))
+
+ foo = conflicts.ContentsConflict('foo')
+ bar = conflicts.ContentsConflict('bar')
+ tree_conflicts = clist([foo, bar])
+
+ check_select(clist([bar]), clist([foo]), ['foo'])
+ check_select(clist(), tree_conflicts,
+ [''], ignore_misses=True, recurse=True)
+
+ foobaz = conflicts.ContentsConflict('foo/baz')
+ tree_conflicts = clist([foobaz, bar])
+
+ check_select(clist([bar]), clist([foobaz]),
+ ['foo'], ignore_misses=True, recurse=True)
+
+ qux = conflicts.PathConflict('qux', 'foo/baz')
+ tree_conflicts = clist([qux])
+
+ check_select(clist(), tree_conflicts,
+ ['foo'], ignore_misses=True, recurse=True)
+ check_select (tree_conflicts, clist(), ['foo'], ignore_misses=True)
+
+ def test_resolve_conflicts_recursive(self):
+ tree = self.make_branch_and_tree('.')
+ self.build_tree(['dir/', 'dir/hello'])
+ tree.add(['dir', 'dir/hello'])
+
+ dirhello = conflicts.ConflictList([conflicts.TextConflict('dir/hello')])
+ tree.set_conflicts(dirhello)
+
+ conflicts.resolve(tree, ['dir'], recursive=False, ignore_misses=True)
+ self.assertEqual(dirhello, tree.conflicts())
+
+ conflicts.resolve(tree, ['dir'], recursive=True, ignore_misses=True)
+ self.assertEqual(conflicts.ConflictList([]), tree.conflicts())
+
+
+class TestPerConflict(tests.TestCase):
+
+ scenarios = scenarios.multiply_scenarios(vary_by_conflicts())
+
+ def test_stringification(self):
+ text = unicode(self.conflict)
+ self.assertContainsString(text, self.conflict.path)
+ self.assertContainsString(text.lower(), "conflict")
+ self.assertContainsString(repr(self.conflict),
+ self.conflict.__class__.__name__)
+
+ def test_stanza_roundtrip(self):
+ p = self.conflict
+ o = conflicts.Conflict.factory(**p.as_stanza().as_dict())
+ self.assertEqual(o, p)
+
+ self.assertIsInstance(o.path, unicode)
+
+ if o.file_id is not None:
+ self.assertIsInstance(o.file_id, str)
+
+ conflict_path = getattr(o, 'conflict_path', None)
+ if conflict_path is not None:
+ self.assertIsInstance(conflict_path, unicode)
+
+ conflict_file_id = getattr(o, 'conflict_file_id', None)
+ if conflict_file_id is not None:
+ self.assertIsInstance(conflict_file_id, str)
+
+ def test_stanzification(self):
+ stanza = self.conflict.as_stanza()
+ if 'file_id' in stanza:
+ # In Stanza form, the file_id has to be unicode.
+ self.assertStartsWith(stanza['file_id'], u'\xeed')
+ self.assertStartsWith(stanza['path'], u'p\xe5th')
+ if 'conflict_path' in stanza:
+ self.assertStartsWith(stanza['conflict_path'], u'p\xe5th')
+ if 'conflict_file_id' in stanza:
+ self.assertStartsWith(stanza['conflict_file_id'], u'\xeed')
+
+
+class TestConflictList(tests.TestCase):
+
+ def test_stanzas_roundtrip(self):
+ stanzas_iter = example_conflicts.to_stanzas()
+ processed = conflicts.ConflictList.from_stanzas(stanzas_iter)
+ self.assertEqual(example_conflicts, processed)
+
+ def test_stringification(self):
+ for text, o in zip(example_conflicts.to_strings(), example_conflicts):
+ self.assertEqual(text, unicode(o))
+
+
+# FIXME: The shell-like tests should be converted to real whitebox tests... or
+# moved to a blackbox module -- vila 20100205
+
+# FIXME: test missing for multiple conflicts
+
+# FIXME: Tests missing for DuplicateID conflict type
+class TestResolveConflicts(script.TestCaseWithTransportAndScript):
+
+ preamble = None # The setup script set by daughter classes
+
+ def setUp(self):
+ super(TestResolveConflicts, self).setUp()
+ self.run_script(self.preamble)
+
+
+def mirror_scenarios(base_scenarios):
+ """Return a list of mirrored scenarios.
+
+ Each scenario in base_scenarios is duplicated switching the roles of 'this'
+ and 'other'
+ """
+ scenarios = []
+ for common, (lname, ldict), (rname, rdict) in base_scenarios:
+ a = tests.multiply_scenarios([(lname, dict(_this=ldict))],
+ [(rname, dict(_other=rdict))])
+ b = tests.multiply_scenarios([(rname, dict(_this=rdict))],
+ [(lname, dict(_other=ldict))])
+ # Inject the common parameters in all scenarios
+ for name, d in a + b:
+ d.update(common)
+ scenarios.extend(a + b)
+ return scenarios
+
+
+# FIXME: Get rid of parametrized (in the class name) once we delete
+# TestResolveConflicts -- vila 20100308
+class TestParametrizedResolveConflicts(tests.TestCaseWithTransport):
+ """This class provides a base to test single conflict resolution.
+
+ Since all conflict objects are created with specific semantics for their
+ attributes, each class should implement the necessary functions and
+ attributes described below.
+
+ Each class should define the scenarios that create the expected (single)
+ conflict.
+
+ Each scenario describes:
+ * how to create 'base' tree (and revision)
+ * how to create 'left' tree (and revision, parent rev 'base')
+ * how to create 'right' tree (and revision, parent rev 'base')
+ * how to check that changes in 'base'->'left' have been taken
+ * how to check that changes in 'base'->'right' have been taken
+
+ From each base scenario, we generate two concrete scenarios where:
+ * this=left, other=right
+ * this=right, other=left
+
+ Then the test case verifies each concrete scenario by:
+ * creating a branch containing the 'base', 'this' and 'other' revisions
+ * creating a working tree for the 'this' revision
+ * performing the merge of 'other' into 'this'
+ * verifying the expected conflict was generated
+ * resolving with --take-this or --take-other, and running the corresponding
+ checks (for either 'base'->'this', or 'base'->'other')
+
+ :cvar _conflict_type: The expected class of the generated conflict.
+
+ :cvar _assert_conflict: A method receiving the working tree and the
+ conflict object and checking its attributes.
+
+ :cvar _base_actions: The branchbuilder actions to create the 'base'
+ revision.
+
+ :cvar _this: The dict related to 'base' -> 'this'. It contains at least:
+ * 'actions': The branchbuilder actions to create the 'this'
+ revision.
+ * 'check': how to check the changes after resolution with --take-this.
+
+ :cvar _other: The dict related to 'base' -> 'other'. It contains at least:
+ * 'actions': The branchbuilder actions to create the 'other'
+ revision.
+ * 'check': how to check the changes after resolution with --take-other.
+ """
+
+ # Set by daughter classes
+ _conflict_type = None
+ _assert_conflict = None
+
+ # Set by load_tests
+ _base_actions = None
+ _this = None
+ _other = None
+
+ scenarios = []
+ """The scenario list for the conflict type defined by the class.
+
+ Each scenario is of the form:
+ (common, (left_name, left_dict), (right_name, right_dict))
+
+ * common is a dict
+
+ * left_name and right_name are the scenario names that will be combined
+
+ * left_dict and right_dict are the attributes specific to each half of
+ the scenario. They should include at least 'actions' and 'check' and
+ will be available as '_this' and '_other' test instance attributes.
+
+ Daughters classes are free to add their specific attributes as they see
+ fit in any of the three dicts.
+
+ This is a class method so that load_tests can find it.
+
+ '_base_actions' in the common dict, 'actions' and 'check' in the left
+ and right dicts use names that map to methods in the test classes. Some
+ prefixes are added to these names to get the correspong methods (see
+ _get_actions() and _get_check()). The motivation here is to avoid
+ collisions in the class namespace.
+ """
+
+ def setUp(self):
+ super(TestParametrizedResolveConflicts, self).setUp()
+ builder = self.make_branch_builder('trunk')
+ builder.start_series()
+
+ # Create an empty trunk
+ builder.build_snapshot('start', None, [
+ ('add', ('', 'root-id', 'directory', ''))])
+ # Add a minimal base content
+ base_actions = self._get_actions(self._base_actions)()
+ builder.build_snapshot('base', ['start'], base_actions)
+ # Modify the base content in branch
+ actions_other = self._get_actions(self._other['actions'])()
+ builder.build_snapshot('other', ['base'], actions_other)
+ # Modify the base content in trunk
+ actions_this = self._get_actions(self._this['actions'])()
+ builder.build_snapshot('this', ['base'], actions_this)
+ # builder.get_branch() tip is now 'this'
+
+ builder.finish_series()
+ self.builder = builder
+
+ def _get_actions(self, name):
+ return getattr(self, 'do_%s' % name)
+
+ def _get_check(self, name):
+ return getattr(self, 'check_%s' % name)
+
+ def _merge_other_into_this(self):
+ b = self.builder.get_branch()
+ wt = b.bzrdir.sprout('branch').open_workingtree()
+ wt.merge_from_branch(b, 'other')
+ return wt
+
+ def assertConflict(self, wt):
+ confs = wt.conflicts()
+ self.assertLength(1, confs)
+ c = confs[0]
+ self.assertIsInstance(c, self._conflict_type)
+ self._assert_conflict(wt, c)
+
+ def _get_resolve_path_arg(self, wt, action):
+ raise NotImplementedError(self._get_resolve_path_arg)
+
+ def check_resolved(self, wt, action):
+ path = self._get_resolve_path_arg(wt, action)
+ conflicts.resolve(wt, [path], action=action)
+ # Check that we don't have any conflicts nor unknown left
+ self.assertLength(0, wt.conflicts())
+ self.assertLength(0, list(wt.unknowns()))
+
+ def test_resolve_taking_this(self):
+ wt = self._merge_other_into_this()
+ self.assertConflict(wt)
+ self.check_resolved(wt, 'take_this')
+ check_this = self._get_check(self._this['check'])
+ check_this()
+
+ def test_resolve_taking_other(self):
+ wt = self._merge_other_into_this()
+ self.assertConflict(wt)
+ self.check_resolved(wt, 'take_other')
+ check_other = self._get_check(self._other['check'])
+ check_other()
+
+
+class TestResolveTextConflicts(TestParametrizedResolveConflicts):
+
+ _conflict_type = conflicts.TextConflict
+
+ # Set by the scenarios
+ # path and file-id for the file involved in the conflict
+ _path = None
+ _file_id = None
+
+ scenarios = mirror_scenarios(
+ [
+ # File modified on both sides
+ (dict(_base_actions='create_file',
+ _path='file', _file_id='file-id'),
+ ('filed_modified_A',
+ dict(actions='modify_file_A', check='file_has_content_A')),
+ ('file_modified_B',
+ dict(actions='modify_file_B', check='file_has_content_B')),),
+ # File modified on both sides in dir
+ (dict(_base_actions='create_file_in_dir',
+ _path='dir/file', _file_id='file-id'),
+ ('filed_modified_A_in_dir',
+ dict(actions='modify_file_A',
+ check='file_in_dir_has_content_A')),
+ ('file_modified_B',
+ dict(actions='modify_file_B',
+ check='file_in_dir_has_content_B')),),
+ ])
+
+ def do_create_file(self, path='file'):
+ return [('add', (path, 'file-id', 'file', 'trunk content\n'))]
+
+ def do_modify_file_A(self):
+ return [('modify', ('file-id', 'trunk content\nfeature A\n'))]
+
+ def do_modify_file_B(self):
+ return [('modify', ('file-id', 'trunk content\nfeature B\n'))]
+
+ def check_file_has_content_A(self, path='file'):
+ self.assertFileEqual('trunk content\nfeature A\n',
+ osutils.pathjoin('branch', path))
+
+ def check_file_has_content_B(self, path='file'):
+ self.assertFileEqual('trunk content\nfeature B\n',
+ osutils.pathjoin('branch', path))
+
+ def do_create_file_in_dir(self):
+ return [('add', ('dir', 'dir-id', 'directory', '')),
+ ] + self.do_create_file('dir/file')
+
+ def check_file_in_dir_has_content_A(self):
+ self.check_file_has_content_A('dir/file')
+
+ def check_file_in_dir_has_content_B(self):
+ self.check_file_has_content_B('dir/file')
+
+ def _get_resolve_path_arg(self, wt, action):
+ return self._path
+
+ def assertTextConflict(self, wt, c):
+ self.assertEqual(self._file_id, c.file_id)
+ self.assertEqual(self._path, c.path)
+ _assert_conflict = assertTextConflict
+
+
+class TestResolveContentsConflict(TestParametrizedResolveConflicts):
+
+ _conflict_type = conflicts.ContentsConflict
+
+ # Set by the scenarios
+ # path and file-id for the file involved in the conflict
+ _path = None
+ _file_id = None
+
+ scenarios = mirror_scenarios(
+ [
+ # File modified/deleted
+ (dict(_base_actions='create_file',
+ _path='file', _file_id='file-id'),
+ ('file_modified',
+ dict(actions='modify_file', check='file_has_more_content')),
+ ('file_deleted',
+ dict(actions='delete_file', check='file_doesnt_exist')),),
+ # File renamed-modified/deleted
+ (dict(_base_actions='create_file',
+ _path='new-file', _file_id='file-id'),
+ ('file_renamed_and_modified',
+ dict(actions='modify_and_rename_file',
+ check='file_renamed_and_more_content')),
+ ('file_deleted',
+ dict(actions='delete_file', check='file_doesnt_exist')),),
+ # File modified/deleted in dir
+ (dict(_base_actions='create_file_in_dir',
+ _path='dir/file', _file_id='file-id'),
+ ('file_modified_in_dir',
+ dict(actions='modify_file_in_dir',
+ check='file_in_dir_has_more_content')),
+ ('file_deleted_in_dir',
+ dict(actions='delete_file',
+ check='file_in_dir_doesnt_exist')),),
+ ])
+
+ def do_create_file(self):
+ return [('add', ('file', 'file-id', 'file', 'trunk content\n'))]
+
+ def do_modify_file(self):
+ return [('modify', ('file-id', 'trunk content\nmore content\n'))]
+
+ def do_modify_and_rename_file(self):
+ return [('modify', ('file-id', 'trunk content\nmore content\n')),
+ ('rename', ('file', 'new-file'))]
+
+ def check_file_has_more_content(self):
+ self.assertFileEqual('trunk content\nmore content\n', 'branch/file')
+
+ def check_file_renamed_and_more_content(self):
+ self.assertFileEqual('trunk content\nmore content\n', 'branch/new-file')
+
+ def do_delete_file(self):
+ return [('unversion', 'file-id')]
+
+ def check_file_doesnt_exist(self):
+ self.assertPathDoesNotExist('branch/file')
+
+ def do_create_file_in_dir(self):
+ return [('add', ('dir', 'dir-id', 'directory', '')),
+ ('add', ('dir/file', 'file-id', 'file', 'trunk content\n'))]
+
+ def do_modify_file_in_dir(self):
+ return [('modify', ('file-id', 'trunk content\nmore content\n'))]
+
+ def check_file_in_dir_has_more_content(self):
+ self.assertFileEqual('trunk content\nmore content\n', 'branch/dir/file')
+
+ def check_file_in_dir_doesnt_exist(self):
+ self.assertPathDoesNotExist('branch/dir/file')
+
+ def _get_resolve_path_arg(self, wt, action):
+ return self._path
+
+ def assertContentsConflict(self, wt, c):
+ self.assertEqual(self._file_id, c.file_id)
+ self.assertEqual(self._path, c.path)
+ _assert_conflict = assertContentsConflict
+
+
+class TestResolvePathConflict(TestParametrizedResolveConflicts):
+
+ _conflict_type = conflicts.PathConflict
+
+ def do_nothing(self):
+ return []
+
+ # Each side dict additionally defines:
+ # - path path involved (can be '<deleted>')
+ # - file-id involved
+ scenarios = mirror_scenarios(
+ [
+ # File renamed/deleted
+ (dict(_base_actions='create_file'),
+ ('file_renamed',
+ dict(actions='rename_file', check='file_renamed',
+ path='new-file', file_id='file-id')),
+ ('file_deleted',
+ dict(actions='delete_file', check='file_doesnt_exist',
+ # PathConflicts deletion handling requires a special
+ # hard-coded value
+ path='<deleted>', file_id='file-id')),),
+ # File renamed/deleted in dir
+ (dict(_base_actions='create_file_in_dir'),
+ ('file_renamed_in_dir',
+ dict(actions='rename_file_in_dir', check='file_in_dir_renamed',
+ path='dir/new-file', file_id='file-id')),
+ ('file_deleted',
+ dict(actions='delete_file', check='file_in_dir_doesnt_exist',
+ # PathConflicts deletion handling requires a special
+ # hard-coded value
+ path='<deleted>', file_id='file-id')),),
+ # File renamed/renamed differently
+ (dict(_base_actions='create_file'),
+ ('file_renamed',
+ dict(actions='rename_file', check='file_renamed',
+ path='new-file', file_id='file-id')),
+ ('file_renamed2',
+ dict(actions='rename_file2', check='file_renamed2',
+ path='new-file2', file_id='file-id')),),
+ # Dir renamed/deleted
+ (dict(_base_actions='create_dir'),
+ ('dir_renamed',
+ dict(actions='rename_dir', check='dir_renamed',
+ path='new-dir', file_id='dir-id')),
+ ('dir_deleted',
+ dict(actions='delete_dir', check='dir_doesnt_exist',
+ # PathConflicts deletion handling requires a special
+ # hard-coded value
+ path='<deleted>', file_id='dir-id')),),
+ # Dir renamed/renamed differently
+ (dict(_base_actions='create_dir'),
+ ('dir_renamed',
+ dict(actions='rename_dir', check='dir_renamed',
+ path='new-dir', file_id='dir-id')),
+ ('dir_renamed2',
+ dict(actions='rename_dir2', check='dir_renamed2',
+ path='new-dir2', file_id='dir-id')),),
+ ])
+
+ def do_create_file(self):
+ return [('add', ('file', 'file-id', 'file', 'trunk content\n'))]
+
+ def do_create_dir(self):
+ return [('add', ('dir', 'dir-id', 'directory', ''))]
+
+ def do_rename_file(self):
+ return [('rename', ('file', 'new-file'))]
+
+ def check_file_renamed(self):
+ self.assertPathDoesNotExist('branch/file')
+ self.assertPathExists('branch/new-file')
+
+ def do_rename_file2(self):
+ return [('rename', ('file', 'new-file2'))]
+
+ def check_file_renamed2(self):
+ self.assertPathDoesNotExist('branch/file')
+ self.assertPathExists('branch/new-file2')
+
+ def do_rename_dir(self):
+ return [('rename', ('dir', 'new-dir'))]
+
+ def check_dir_renamed(self):
+ self.assertPathDoesNotExist('branch/dir')
+ self.assertPathExists('branch/new-dir')
+
+ def do_rename_dir2(self):
+ return [('rename', ('dir', 'new-dir2'))]
+
+ def check_dir_renamed2(self):
+ self.assertPathDoesNotExist('branch/dir')
+ self.assertPathExists('branch/new-dir2')
+
+ def do_delete_file(self):
+ return [('unversion', 'file-id')]
+
+ def check_file_doesnt_exist(self):
+ self.assertPathDoesNotExist('branch/file')
+
+ def do_delete_dir(self):
+ return [('unversion', 'dir-id')]
+
+ def check_dir_doesnt_exist(self):
+ self.assertPathDoesNotExist('branch/dir')
+
+ def do_create_file_in_dir(self):
+ return [('add', ('dir', 'dir-id', 'directory', '')),
+ ('add', ('dir/file', 'file-id', 'file', 'trunk content\n'))]
+
+ def do_rename_file_in_dir(self):
+ return [('rename', ('dir/file', 'dir/new-file'))]
+
+ def check_file_in_dir_renamed(self):
+ self.assertPathDoesNotExist('branch/dir/file')
+ self.assertPathExists('branch/dir/new-file')
+
+ def check_file_in_dir_doesnt_exist(self):
+ self.assertPathDoesNotExist('branch/dir/file')
+
+ def _get_resolve_path_arg(self, wt, action):
+ tpath = self._this['path']
+ opath = self._other['path']
+ if tpath == '<deleted>':
+ path = opath
+ else:
+ path = tpath
+ return path
+
+ def assertPathConflict(self, wt, c):
+ tpath = self._this['path']
+ tfile_id = self._this['file_id']
+ opath = self._other['path']
+ ofile_id = self._other['file_id']
+ self.assertEqual(tfile_id, ofile_id) # Sanity check
+ self.assertEqual(tfile_id, c.file_id)
+ self.assertEqual(tpath, c.path)
+ self.assertEqual(opath, c.conflict_path)
+ _assert_conflict = assertPathConflict
+
+
+class TestResolvePathConflictBefore531967(TestResolvePathConflict):
+ """Same as TestResolvePathConflict but a specific conflict object.
+ """
+
+ def assertPathConflict(self, c):
+ # We create a conflict object as it was created before the fix and
+ # inject it into the working tree, the test will exercise the
+ # compatibility code.
+ old_c = conflicts.PathConflict('<deleted>', self._item_path,
+ file_id=None)
+ wt.set_conflicts(conflicts.ConflictList([old_c]))
+
+
+class TestResolveDuplicateEntry(TestParametrizedResolveConflicts):
+
+ _conflict_type = conflicts.DuplicateEntry
+
+ scenarios = mirror_scenarios(
+ [
+ # File created with different file-ids
+ (dict(_base_actions='nothing'),
+ ('filea_created',
+ dict(actions='create_file_a', check='file_content_a',
+ path='file', file_id='file-a-id')),
+ ('fileb_created',
+ dict(actions='create_file_b', check='file_content_b',
+ path='file', file_id='file-b-id')),),
+ # File created with different file-ids but deleted on one side
+ (dict(_base_actions='create_file_a'),
+ ('filea_replaced',
+ dict(actions='replace_file_a_by_b', check='file_content_b',
+ path='file', file_id='file-b-id')),
+ ('filea_modified',
+ dict(actions='modify_file_a', check='file_new_content',
+ path='file', file_id='file-a-id')),),
+ ])
+
+ def do_nothing(self):
+ return []
+
+ def do_create_file_a(self):
+ return [('add', ('file', 'file-a-id', 'file', 'file a content\n'))]
+
+ def check_file_content_a(self):
+ self.assertFileEqual('file a content\n', 'branch/file')
+
+ def do_create_file_b(self):
+ return [('add', ('file', 'file-b-id', 'file', 'file b content\n'))]
+
+ def check_file_content_b(self):
+ self.assertFileEqual('file b content\n', 'branch/file')
+
+ def do_replace_file_a_by_b(self):
+ return [('unversion', 'file-a-id'),
+ ('add', ('file', 'file-b-id', 'file', 'file b content\n'))]
+
+ def do_modify_file_a(self):
+ return [('modify', ('file-a-id', 'new content\n'))]
+
+ def check_file_new_content(self):
+ self.assertFileEqual('new content\n', 'branch/file')
+
+ def _get_resolve_path_arg(self, wt, action):
+ return self._this['path']
+
+ def assertDuplicateEntry(self, wt, c):
+ tpath = self._this['path']
+ tfile_id = self._this['file_id']
+ opath = self._other['path']
+ ofile_id = self._other['file_id']
+ self.assertEqual(tpath, opath) # Sanity check
+ self.assertEqual(tfile_id, c.file_id)
+ self.assertEqual(tpath + '.moved', c.path)
+ self.assertEqual(tpath, c.conflict_path)
+ _assert_conflict = assertDuplicateEntry
+
+
+class TestResolveUnversionedParent(TestResolveConflicts):
+
+ # FIXME: Add the reverse tests: dir deleted in trunk, file added in branch
+
+ # FIXME: While this *creates* UnversionedParent conflicts, this really only
+ # tests MissingParent resolution :-/
+ preamble = """
+$ bzr init trunk
+...
+$ cd trunk
+$ mkdir dir
+$ bzr add -q dir
+$ bzr commit -m 'Create trunk' -q
+$ echo 'trunk content' >dir/file
+$ bzr add -q dir/file
+$ bzr commit -q -m 'Add dir/file in trunk'
+$ bzr branch -q . -r 1 ../branch
+$ cd ../branch
+$ bzr rm dir -q
+$ bzr commit -q -m 'Remove dir in branch'
+$ bzr merge ../trunk
+2>+N dir/
+2>+N dir/file
+2>Conflict adding files to dir. Created directory.
+2>Conflict because dir is not versioned, but has versioned children. Versioned directory.
+2>2 conflicts encountered.
+"""
+
+ def test_take_this(self):
+ self.run_script("""
+$ bzr rm -q dir --force
+$ bzr resolve dir
+2>2 conflicts resolved, 0 remaining
+$ bzr commit -q --strict -m 'No more conflicts nor unknown files'
+""")
+
+ def test_take_other(self):
+ self.run_script("""
+$ bzr resolve dir
+2>2 conflicts resolved, 0 remaining
+$ bzr commit -q --strict -m 'No more conflicts nor unknown files'
+""")
+
+
+class TestResolveMissingParent(TestResolveConflicts):
+
+ preamble = """
+$ bzr init trunk
+...
+$ cd trunk
+$ mkdir dir
+$ echo 'trunk content' >dir/file
+$ bzr add -q
+$ bzr commit -m 'Create trunk' -q
+$ echo 'trunk content' >dir/file2
+$ bzr add -q dir/file2
+$ bzr commit -q -m 'Add dir/file2 in branch'
+$ bzr branch -q . -r 1 ../branch
+$ cd ../branch
+$ bzr rm -q dir/file --force
+$ bzr rm -q dir
+$ bzr commit -q -m 'Remove dir/file'
+$ bzr merge ../trunk
+2>+N dir/
+2>+N dir/file2
+2>Conflict adding files to dir. Created directory.
+2>Conflict because dir is not versioned, but has versioned children. Versioned directory.
+2>2 conflicts encountered.
+"""
+
+ def test_keep_them_all(self):
+ self.run_script("""
+$ bzr resolve dir
+2>2 conflicts resolved, 0 remaining
+$ bzr commit -q --strict -m 'No more conflicts nor unknown files'
+""")
+
+ def test_adopt_child(self):
+ self.run_script("""
+$ bzr mv -q dir/file2 file2
+$ bzr rm -q dir --force
+$ bzr resolve dir
+2>2 conflicts resolved, 0 remaining
+$ bzr commit -q --strict -m 'No more conflicts nor unknown files'
+""")
+
+ def test_kill_them_all(self):
+ self.run_script("""
+$ bzr rm -q dir --force
+$ bzr resolve dir
+2>2 conflicts resolved, 0 remaining
+$ bzr commit -q --strict -m 'No more conflicts nor unknown files'
+""")
+
+ def test_resolve_taking_this(self):
+ self.run_script("""
+$ bzr resolve --take-this dir
+2>...
+$ bzr commit -q --strict -m 'No more conflicts nor unknown files'
+""")
+
+ def test_resolve_taking_other(self):
+ self.run_script("""
+$ bzr resolve --take-other dir
+2>...
+$ bzr commit -q --strict -m 'No more conflicts nor unknown files'
+""")
+
+
+class TestResolveDeletingParent(TestResolveConflicts):
+
+ preamble = """
+$ bzr init trunk
+...
+$ cd trunk
+$ mkdir dir
+$ echo 'trunk content' >dir/file
+$ bzr add -q
+$ bzr commit -m 'Create trunk' -q
+$ bzr rm -q dir/file --force
+$ bzr rm -q dir --force
+$ bzr commit -q -m 'Remove dir/file'
+$ bzr branch -q . -r 1 ../branch
+$ cd ../branch
+$ echo 'branch content' >dir/file2
+$ bzr add -q dir/file2
+$ bzr commit -q -m 'Add dir/file2 in branch'
+$ bzr merge ../trunk
+2>-D dir/file
+2>Conflict: can't delete dir because it is not empty. Not deleting.
+2>Conflict because dir is not versioned, but has versioned children. Versioned directory.
+2>2 conflicts encountered.
+"""
+
+ def test_keep_them_all(self):
+ self.run_script("""
+$ bzr resolve dir
+2>2 conflicts resolved, 0 remaining
+$ bzr commit -q --strict -m 'No more conflicts nor unknown files'
+""")
+
+ def test_adopt_child(self):
+ self.run_script("""
+$ bzr mv -q dir/file2 file2
+$ bzr rm -q dir --force
+$ bzr resolve dir
+2>2 conflicts resolved, 0 remaining
+$ bzr commit -q --strict -m 'No more conflicts nor unknown files'
+""")
+
+ def test_kill_them_all(self):
+ self.run_script("""
+$ bzr rm -q dir --force
+$ bzr resolve dir
+2>2 conflicts resolved, 0 remaining
+$ bzr commit -q --strict -m 'No more conflicts nor unknown files'
+""")
+
+ def test_resolve_taking_this(self):
+ self.run_script("""
+$ bzr resolve --take-this dir
+2>2 conflicts resolved, 0 remaining
+$ bzr commit -q --strict -m 'No more conflicts nor unknown files'
+""")
+
+ def test_resolve_taking_other(self):
+ self.run_script("""
+$ bzr resolve --take-other dir
+2>deleted dir/file2
+2>deleted dir
+2>2 conflicts resolved, 0 remaining
+$ bzr commit -q --strict -m 'No more conflicts nor unknown files'
+""")
+
+
+class TestResolveParentLoop(TestParametrizedResolveConflicts):
+
+ _conflict_type = conflicts.ParentLoop
+
+ _this_args = None
+ _other_args = None
+
+ # Each side dict additionally defines:
+ # - dir_id: the directory being moved
+ # - target_id: The target directory
+ # - xfail: whether the test is expected to fail if the action is
+ # involved as 'other'
+ scenarios = mirror_scenarios(
+ [
+ # Dirs moved into each other
+ (dict(_base_actions='create_dir1_dir2'),
+ ('dir1_into_dir2',
+ dict(actions='move_dir1_into_dir2', check='dir1_moved',
+ dir_id='dir1-id', target_id='dir2-id', xfail=False)),
+ ('dir2_into_dir1',
+ dict(actions='move_dir2_into_dir1', check='dir2_moved',
+ dir_id='dir2-id', target_id='dir1-id', xfail=False))),
+ # Subdirs moved into each other
+ (dict(_base_actions='create_dir1_4'),
+ ('dir1_into_dir4',
+ dict(actions='move_dir1_into_dir4', check='dir1_2_moved',
+ dir_id='dir1-id', target_id='dir4-id', xfail=True)),
+ ('dir3_into_dir2',
+ dict(actions='move_dir3_into_dir2', check='dir3_4_moved',
+ dir_id='dir3-id', target_id='dir2-id', xfail=True))),
+ ])
+
+ def do_create_dir1_dir2(self):
+ return [('add', ('dir1', 'dir1-id', 'directory', '')),
+ ('add', ('dir2', 'dir2-id', 'directory', '')),]
+
+ def do_move_dir1_into_dir2(self):
+ return [('rename', ('dir1', 'dir2/dir1'))]
+
+ def check_dir1_moved(self):
+ self.assertPathDoesNotExist('branch/dir1')
+ self.assertPathExists('branch/dir2/dir1')
+
+ def do_move_dir2_into_dir1(self):
+ return [('rename', ('dir2', 'dir1/dir2'))]
+
+ def check_dir2_moved(self):
+ self.assertPathDoesNotExist('branch/dir2')
+ self.assertPathExists('branch/dir1/dir2')
+
+ def do_create_dir1_4(self):
+ return [('add', ('dir1', 'dir1-id', 'directory', '')),
+ ('add', ('dir1/dir2', 'dir2-id', 'directory', '')),
+ ('add', ('dir3', 'dir3-id', 'directory', '')),
+ ('add', ('dir3/dir4', 'dir4-id', 'directory', '')),]
+
+ def do_move_dir1_into_dir4(self):
+ return [('rename', ('dir1', 'dir3/dir4/dir1'))]
+
+ def check_dir1_2_moved(self):
+ self.assertPathDoesNotExist('branch/dir1')
+ self.assertPathExists('branch/dir3/dir4/dir1')
+ self.assertPathExists('branch/dir3/dir4/dir1/dir2')
+
+ def do_move_dir3_into_dir2(self):
+ return [('rename', ('dir3', 'dir1/dir2/dir3'))]
+
+ def check_dir3_4_moved(self):
+ self.assertPathDoesNotExist('branch/dir3')
+ self.assertPathExists('branch/dir1/dir2/dir3')
+ self.assertPathExists('branch/dir1/dir2/dir3/dir4')
+
+ def _get_resolve_path_arg(self, wt, action):
+ # ParentLoop says: moving <conflict_path> into <path>. Cancelled move.
+ # But since <path> doesn't exist in the working tree, we need to use
+ # <conflict_path> instead, and that, in turn, is given by dir_id. Pfew.
+ return wt.id2path(self._other['dir_id'])
+
+ def assertParentLoop(self, wt, c):
+ self.assertEqual(self._other['dir_id'], c.file_id)
+ self.assertEqual(self._other['target_id'], c.conflict_file_id)
+ # The conflict paths are irrelevant (they are deterministic but not
+ # worth checking since they don't provide the needed information
+ # anyway)
+ if self._other['xfail']:
+ # It's a bit hackish to raise from here relying on being called for
+ # both tests but this avoid overriding test_resolve_taking_other
+ self.knownFailure(
+ "ParentLoop doesn't carry enough info to resolve --take-other")
+ _assert_conflict = assertParentLoop
+
+
+class TestResolveNonDirectoryParent(TestResolveConflicts):
+
+ preamble = """
+$ bzr init trunk
+...
+$ cd trunk
+$ bzr mkdir foo
+...
+$ bzr commit -m 'Create trunk' -q
+$ echo "Boing" >foo/bar
+$ bzr add -q foo/bar
+$ bzr commit -q -m 'Add foo/bar'
+$ bzr branch -q . -r 1 ../branch
+$ cd ../branch
+$ rm -r foo
+$ echo "Boo!" >foo
+$ bzr commit -q -m 'foo is now a file'
+$ bzr merge ../trunk
+2>+N foo.new/bar
+2>RK foo => foo.new/
+# FIXME: The message is misleading, foo.new *is* a directory when the message
+# is displayed -- vila 090916
+2>Conflict: foo.new is not a directory, but has files in it. Created directory.
+2>1 conflicts encountered.
+"""
+
+ def test_take_this(self):
+ self.run_script("""
+$ bzr rm -q foo.new --force
+# FIXME: Isn't it weird that foo is now unkown even if foo.new has been put
+# aside ? -- vila 090916
+$ bzr add -q foo
+$ bzr resolve foo.new
+2>1 conflict resolved, 0 remaining
+$ bzr commit -q --strict -m 'No more conflicts nor unknown files'
+""")
+
+ def test_take_other(self):
+ self.run_script("""
+$ bzr rm -q foo --force
+$ bzr mv -q foo.new foo
+$ bzr resolve foo
+2>1 conflict resolved, 0 remaining
+$ bzr commit -q --strict -m 'No more conflicts nor unknown files'
+""")
+
+ def test_resolve_taking_this(self):
+ self.run_script("""
+$ bzr resolve --take-this foo.new
+2>...
+$ bzr commit -q --strict -m 'No more conflicts nor unknown files'
+""")
+
+ def test_resolve_taking_other(self):
+ self.run_script("""
+$ bzr resolve --take-other foo.new
+2>...
+$ bzr commit -q --strict -m 'No more conflicts nor unknown files'
+""")
+
+
+class TestMalformedTransform(script.TestCaseWithTransportAndScript):
+
+ def test_bug_430129(self):
+ # This is nearly like TestResolveNonDirectoryParent but with branch and
+ # trunk switched. As such it should certainly produce the same
+ # conflict.
+ self.assertRaises(errors.MalformedTransform,
+ self.run_script,"""
+$ bzr init trunk
+...
+$ cd trunk
+$ bzr mkdir foo
+...
+$ bzr commit -m 'Create trunk' -q
+$ rm -r foo
+$ echo "Boo!" >foo
+$ bzr commit -m 'foo is now a file' -q
+$ bzr branch -q . -r 1 ../branch -q
+$ cd ../branch
+$ echo "Boing" >foo/bar
+$ bzr add -q foo/bar -q
+$ bzr commit -m 'Add foo/bar' -q
+$ bzr merge ../trunk
+2>bzr: ERROR: Tree transform is malformed [('unversioned executability', 'new-1')]
+""")
+
+
+class TestNoFinalPath(script.TestCaseWithTransportAndScript):
+
+ def test_bug_805809(self):
+ self.run_script("""
+$ bzr init trunk
+Created a standalone tree (format: 2a)
+$ cd trunk
+$ echo trunk >file
+$ bzr add
+adding file
+$ bzr commit -m 'create file on trunk'
+2>Committing to: .../trunk/
+2>added file
+2>Committed revision 1.
+# Create a debian branch based on trunk
+$ cd ..
+$ bzr branch trunk -r 1 debian
+2>Branched 1 revision.
+$ cd debian
+$ mkdir dir
+$ bzr add
+adding dir
+$ bzr mv file dir
+file => dir/file
+$ bzr commit -m 'rename file to dir/file for debian'
+2>Committing to: .../debian/
+2>added dir
+2>renamed file => dir/file
+2>Committed revision 2.
+# Create an experimental branch with a new root-id
+$ cd ..
+$ bzr init experimental
+Created a standalone tree (format: 2a)
+$ cd experimental
+# Work around merging into empty branch not being supported
+# (http://pad.lv/308562)
+$ echo something >not-empty
+$ bzr add
+adding not-empty
+$ bzr commit -m 'Add some content in experimental'
+2>Committing to: .../experimental/
+2>added not-empty
+2>Committed revision 1.
+# merge debian even without a common ancestor
+$ bzr merge ../debian -r0..2
+2>+N dir/
+2>+N dir/file
+2>All changes applied successfully.
+$ bzr commit -m 'merging debian into experimental'
+2>Committing to: .../experimental/
+2>added dir
+2>added dir/file
+2>Committed revision 2.
+# Create an ubuntu branch with yet another root-id
+$ cd ..
+$ bzr init ubuntu
+Created a standalone tree (format: 2a)
+$ cd ubuntu
+# Work around merging into empty branch not being supported
+# (http://pad.lv/308562)
+$ echo something >not-empty-ubuntu
+$ bzr add
+adding not-empty-ubuntu
+$ bzr commit -m 'Add some content in experimental'
+2>Committing to: .../ubuntu/
+2>added not-empty-ubuntu
+2>Committed revision 1.
+# Also merge debian
+$ bzr merge ../debian -r0..2
+2>+N dir/
+2>+N dir/file
+2>All changes applied successfully.
+$ bzr commit -m 'merging debian'
+2>Committing to: .../ubuntu/
+2>added dir
+2>added dir/file
+2>Committed revision 2.
+# Now try to merge experimental
+$ bzr merge ../experimental
+2>+N not-empty
+2>Path conflict: dir / dir
+2>1 conflicts encountered.
+""")
+
+
+class TestResolveActionOption(tests.TestCase):
+
+ def setUp(self):
+ super(TestResolveActionOption, self).setUp()
+ self.options = [conflicts.ResolveActionOption()]
+ self.parser = option.get_optparser(dict((o.name, o)
+ for o in self.options))
+
+ def parse(self, args):
+ return self.parser.parse_args(args)
+
+ def test_unknown_action(self):
+ self.assertRaises(errors.BadOptionValue,
+ self.parse, ['--action', 'take-me-to-the-moon'])
+
+ def test_done(self):
+ opts, args = self.parse(['--action', 'done'])
+ self.assertEqual({'action':'done'}, opts)
+
+ def test_take_this(self):
+ opts, args = self.parse(['--action', 'take-this'])
+ self.assertEqual({'action': 'take_this'}, opts)
+ opts, args = self.parse(['--take-this'])
+ self.assertEqual({'action': 'take_this'}, opts)
+
+ def test_take_other(self):
+ opts, args = self.parse(['--action', 'take-other'])
+ self.assertEqual({'action': 'take_other'}, opts)
+ opts, args = self.parse(['--take-other'])
+ self.assertEqual({'action': 'take_other'}, opts)
diff --git a/bzrlib/tests/test_controldir.py b/bzrlib/tests/test_controldir.py
new file mode 100644
index 0000000..88b1be6
--- /dev/null
+++ b/bzrlib/tests/test_controldir.py
@@ -0,0 +1,237 @@
+# Copyright (C) 2011 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""Tests for the ControlDir facility.
+
+For interface contract tests, see tests/per_control_dir.
+"""
+
+from bzrlib import (
+ controldir,
+ errors,
+ tests,
+ ui,
+ )
+from bzrlib.tests.scenarios import load_tests_apply_scenarios
+
+
+load_tests = load_tests_apply_scenarios
+
+
+class SampleComponentFormat(controldir.ControlComponentFormat):
+
+ def get_format_string(self):
+ return "Example component format."
+
+
+class SampleExtraComponentFormat(controldir.ControlComponentFormat):
+ """Extra format, no format string."""
+
+
+class TestMetaComponentFormatRegistry(tests.TestCase):
+
+ def setUp(self):
+ super(TestMetaComponentFormatRegistry, self).setUp()
+ self.registry = controldir.ControlComponentFormatRegistry()
+
+ def test_register_unregister_format(self):
+ format = SampleComponentFormat()
+ self.registry.register(format)
+ self.assertEquals(format,
+ self.registry.get("Example component format."))
+ self.registry.remove(format)
+ self.assertRaises(KeyError, self.registry.get,
+ "Example component format.")
+
+ def test_get_all(self):
+ format = SampleComponentFormat()
+ self.assertEquals([], self.registry._get_all())
+ self.registry.register(format)
+ self.assertEquals([format], self.registry._get_all())
+
+ def test_get_all_modules(self):
+ format = SampleComponentFormat()
+ self.assertEquals(set(), self.registry._get_all_modules())
+ self.registry.register(format)
+ self.assertEquals(
+ set(["bzrlib.tests.test_controldir"]),
+ self.registry._get_all_modules())
+
+ def test_register_extra(self):
+ format = SampleExtraComponentFormat()
+ self.assertEquals([], self.registry._get_all())
+ self.registry.register_extra(format)
+ self.assertEquals([format], self.registry._get_all())
+
+ def test_register_extra_lazy(self):
+ self.assertEquals([], self.registry._get_all())
+ self.registry.register_extra_lazy("bzrlib.tests.test_controldir",
+ "SampleExtraComponentFormat")
+ formats = self.registry._get_all()
+ self.assertEquals(1, len(formats))
+ self.assertIsInstance(formats[0], SampleExtraComponentFormat)
+
+
+class TestControlDirFormatDeprecated(tests.TestCaseWithTransport):
+ """Tests for removed registration method in the ControlDirFormat facility."""
+
+ def test_register_format(self):
+ self.assertRaises(errors.BzrError,
+ controldir.ControlDirFormat.register_format, object())
+
+
+class TestProber(tests.TestCaseWithTransport):
+ """Per-prober tests."""
+
+ scenarios = [
+ (prober_cls.__name__, {'prober_cls': prober_cls})
+ for prober_cls in controldir.ControlDirFormat._probers]
+
+ def setUp(self):
+ super(TestProber, self).setUp()
+ self.prober = self.prober_cls()
+
+ def test_probe_transport_empty(self):
+ transport = self.get_transport(".")
+ self.assertRaises(errors.NotBranchError,
+ self.prober.probe_transport, transport)
+
+ def test_known_formats(self):
+ known_formats = self.prober_cls.known_formats()
+ self.assertIsInstance(known_formats, set)
+ for format in known_formats:
+ self.assertIsInstance(format, controldir.ControlDirFormat,
+ repr(format))
+
+
+class NotBzrDir(controldir.ControlDir):
+ """A non .bzr based control directory."""
+
+ def __init__(self, transport, format):
+ self._format = format
+ self.root_transport = transport
+ self.transport = transport.clone('.not')
+
+
+class NotBzrDirFormat(controldir.ControlDirFormat):
+ """A test class representing any non-.bzr based disk format."""
+
+ def initialize_on_transport(self, transport):
+ """Initialize a new .not dir in the base directory of a Transport."""
+ transport.mkdir('.not')
+ return self.open(transport)
+
+ def open(self, transport):
+ """Open this directory."""
+ return NotBzrDir(transport, self)
+
+
+class NotBzrDirProber(controldir.Prober):
+
+ def probe_transport(self, transport):
+ """Our format is present if the transport ends in '.not/'."""
+ if transport.has('.not'):
+ return NotBzrDirFormat()
+
+ @classmethod
+ def known_formats(cls):
+ return set([NotBzrDirFormat()])
+
+
+class TestNotBzrDir(tests.TestCaseWithTransport):
+ """Tests for using the controldir api with a non .bzr based disk format.
+
+ If/when one of these is in the core, we can let the implementation tests
+ verify this works.
+ """
+
+ def test_create_and_find_format(self):
+ # create a .notbzr dir
+ format = NotBzrDirFormat()
+ dir = format.initialize(self.get_url())
+ self.assertIsInstance(dir, NotBzrDir)
+ # now probe for it.
+ controldir.ControlDirFormat.register_prober(NotBzrDirProber)
+ try:
+ found = controldir.ControlDirFormat.find_format(self.get_transport())
+ self.assertIsInstance(found, NotBzrDirFormat)
+ finally:
+ controldir.ControlDirFormat.unregister_prober(NotBzrDirProber)
+
+ def test_included_in_known_formats(self):
+ controldir.ControlDirFormat.register_prober(NotBzrDirProber)
+ self.addCleanup(controldir.ControlDirFormat.unregister_prober, NotBzrDirProber)
+ formats = controldir.ControlDirFormat.known_formats()
+ self.assertIsInstance(formats, set)
+ for format in formats:
+ if isinstance(format, NotBzrDirFormat):
+ break
+ else:
+ self.fail("No NotBzrDirFormat in %s" % formats)
+
+
+class UnsupportedControlComponentFormat(controldir.ControlComponentFormat):
+
+ def is_supported(self):
+ return False
+
+
+class OldControlComponentFormat(controldir.ControlComponentFormat):
+
+ def get_format_description(self):
+ return "An old format that is slow"
+
+ upgrade_recommended = True
+
+
+class DefaultControlComponentFormatTests(tests.TestCase):
+ """Tests for default ControlComponentFormat implementation."""
+
+ def test_check_support_status_unsupported(self):
+ self.assertRaises(errors.UnsupportedFormatError,
+ UnsupportedControlComponentFormat().check_support_status,
+ allow_unsupported=False)
+ UnsupportedControlComponentFormat().check_support_status(
+ allow_unsupported=True)
+
+ def test_check_support_status_supported(self):
+ controldir.ControlComponentFormat().check_support_status(
+ allow_unsupported=False)
+ controldir.ControlComponentFormat().check_support_status(
+ allow_unsupported=True)
+
+ def test_recommend_upgrade_current_format(self):
+ stderr = tests.StringIOWrapper()
+ ui.ui_factory = tests.TestUIFactory(stderr=stderr)
+ format = controldir.ControlComponentFormat()
+ format.check_support_status(allow_unsupported=False,
+ recommend_upgrade=True)
+ self.assertEquals("", stderr.getvalue())
+
+ def test_recommend_upgrade_old_format(self):
+ stderr = tests.StringIOWrapper()
+ ui.ui_factory = tests.TestUIFactory(stderr=stderr)
+ format = OldControlComponentFormat()
+ format.check_support_status(allow_unsupported=False,
+ recommend_upgrade=False)
+ self.assertEquals("", stderr.getvalue())
+ format.check_support_status(allow_unsupported=False,
+ recommend_upgrade=True, basedir='apath')
+ self.assertEquals(
+ 'An old format that is slow is deprecated and a better format '
+ 'is available.\nIt is recommended that you upgrade by running '
+ 'the command\n bzr upgrade apath\n',
+ stderr.getvalue())
diff --git a/bzrlib/tests/test_counted_lock.py b/bzrlib/tests/test_counted_lock.py
new file mode 100644
index 0000000..e8a720c
--- /dev/null
+++ b/bzrlib/tests/test_counted_lock.py
@@ -0,0 +1,219 @@
+# Copyright (C) 2007, 2008, 2009 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""Tests for bzrlib.counted_lock"""
+
+from bzrlib.counted_lock import CountedLock
+from bzrlib.errors import (
+ LockError,
+ LockNotHeld,
+ ReadOnlyError,
+ TokenMismatch,
+ )
+from bzrlib.tests import TestCase
+
+
+class DummyLock(object):
+ """Lock that just records what's been done to it."""
+
+ def __init__(self):
+ self._calls = []
+ self._lock_mode = None
+
+ def is_locked(self):
+ return self._lock_mode is not None
+
+ def lock_read(self):
+ self._assert_not_locked()
+ self._lock_mode = 'r'
+ self._calls.append('lock_read')
+
+ def lock_write(self, token=None):
+ if token is not None:
+ if token == 'token':
+ # already held by this caller
+ return 'token'
+ else:
+ raise TokenMismatch()
+ self._assert_not_locked()
+ self._lock_mode = 'w'
+ self._calls.append('lock_write')
+ return 'token'
+
+ def unlock(self):
+ self._assert_locked()
+ self._lock_mode = None
+ self._calls.append('unlock')
+
+ def break_lock(self):
+ self._lock_mode = None
+ self._calls.append('break')
+
+ def _assert_locked(self):
+ if not self._lock_mode:
+ raise LockError("%s is not locked" % (self,))
+
+ def _assert_not_locked(self):
+ if self._lock_mode:
+ raise LockError("%s is already locked in mode %r" %
+ (self, self._lock_mode))
+
+ def validate_token(self, token):
+ if token == 'token':
+ # already held by this caller
+ return 'token'
+ elif token is None:
+ return
+ else:
+ raise TokenMismatch(token, 'token')
+
+
+class TestDummyLock(TestCase):
+
+ def test_lock_initially_not_held(self):
+ l = DummyLock()
+ self.assertFalse(l.is_locked())
+
+ def test_lock_not_reentrant(self):
+ # can't take the underlying lock twice
+ l = DummyLock()
+ l.lock_read()
+ self.assertRaises(LockError, l.lock_read)
+
+ def test_detect_underlock(self):
+ l = DummyLock()
+ self.assertRaises(LockError, l.unlock)
+
+ def test_basic_locking(self):
+ # dummy lock works like a basic non reentrant lock
+ real_lock = DummyLock()
+ self.assertFalse(real_lock.is_locked())
+ # lock read and unlock
+ real_lock.lock_read()
+ self.assertTrue(real_lock.is_locked())
+ real_lock.unlock()
+ self.assertFalse(real_lock.is_locked())
+ # lock write and unlock
+ result = real_lock.lock_write()
+ self.assertEqual('token', result)
+ self.assertTrue(real_lock.is_locked())
+ real_lock.unlock()
+ self.assertFalse(real_lock.is_locked())
+ # check calls
+ self.assertEqual(
+ ['lock_read', 'unlock', 'lock_write', 'unlock'],
+ real_lock._calls)
+
+ def test_break_lock(self):
+ l = DummyLock()
+ l.lock_write()
+ l.break_lock()
+ self.assertFalse(l.is_locked())
+ self.assertEqual(
+ ['lock_write', 'break'],
+ l._calls)
+
+
+class TestCountedLock(TestCase):
+
+ def test_read_lock(self):
+ # Lock and unlock a counted lock
+ real_lock = DummyLock()
+ l = CountedLock(real_lock)
+ self.assertFalse(l.is_locked())
+ # can lock twice, although this isn't allowed on the underlying lock
+ l.lock_read()
+ l.lock_read()
+ self.assertTrue(l.is_locked())
+ # and release
+ l.unlock()
+ self.assertTrue(l.is_locked())
+ l.unlock()
+ self.assertFalse(l.is_locked())
+ self.assertEquals(
+ ['lock_read', 'unlock'],
+ real_lock._calls)
+
+ def test_unlock_not_locked(self):
+ real_lock = DummyLock()
+ l = CountedLock(real_lock)
+ self.assertRaises(LockNotHeld, l.unlock)
+
+ def test_read_lock_while_write_locked(self):
+ real_lock = DummyLock()
+ l = CountedLock(real_lock)
+ l.lock_write()
+ l.lock_read()
+ self.assertEquals('token', l.lock_write())
+ l.unlock()
+ l.unlock()
+ l.unlock()
+ self.assertFalse(l.is_locked())
+ self.assertEquals(
+ ['lock_write', 'unlock'],
+ real_lock._calls)
+
+ def test_write_lock_while_read_locked(self):
+ real_lock = DummyLock()
+ l = CountedLock(real_lock)
+ l.lock_read()
+ self.assertRaises(ReadOnlyError, l.lock_write)
+ self.assertRaises(ReadOnlyError, l.lock_write)
+ l.unlock()
+ self.assertFalse(l.is_locked())
+ self.assertEquals(
+ ['lock_read', 'unlock'],
+ real_lock._calls)
+
+ def test_write_lock_reentrant(self):
+ real_lock = DummyLock()
+ l = CountedLock(real_lock)
+ self.assertEqual('token', l.lock_write())
+ self.assertEqual('token', l.lock_write())
+ l.unlock()
+ l.unlock()
+
+ def test_reenter_with_token(self):
+ real_lock = DummyLock()
+ l1 = CountedLock(real_lock)
+ l2 = CountedLock(real_lock)
+ token = l1.lock_write()
+ self.assertEqual('token', token)
+ # now imagine that we lost that connection, but we still have the
+ # token...
+ del l1
+ # because we can supply the token, we can acquire the lock through
+ # another instance
+ self.assertTrue(real_lock.is_locked())
+ self.assertFalse(l2.is_locked())
+ self.assertEqual(token, l2.lock_write(token=token))
+ self.assertTrue(l2.is_locked())
+ self.assertTrue(real_lock.is_locked())
+ l2.unlock()
+ self.assertFalse(l2.is_locked())
+ self.assertFalse(real_lock.is_locked())
+
+ def test_break_lock(self):
+ real_lock = DummyLock()
+ l = CountedLock(real_lock)
+ l.lock_write()
+ l.lock_write()
+ self.assertTrue(real_lock.is_locked())
+ l.break_lock()
+ self.assertFalse(l.is_locked())
+ self.assertFalse(real_lock.is_locked())
+
+ # TODO: test get_physical_lock_status
diff --git a/bzrlib/tests/test_crash.py b/bzrlib/tests/test_crash.py
new file mode 100644
index 0000000..f7647b7
--- /dev/null
+++ b/bzrlib/tests/test_crash.py
@@ -0,0 +1,112 @@
+# Copyright (C) 2009, 2010, 2011 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+
+import doctest
+import os
+from StringIO import StringIO
+import sys
+
+from bzrlib import (
+ config,
+ crash,
+ osutils,
+ plugin,
+ tests,
+ )
+
+from bzrlib.tests import features
+
+
+class TestApportReporting(tests.TestCaseInTempDir):
+
+ _test_needs_features = [features.apport]
+
+ def test_apport_report(self):
+ crash_dir = osutils.joinpath((self.test_base_dir, 'crash'))
+ os.mkdir(crash_dir)
+ self.overrideEnv('APPORT_CRASH_DIR', crash_dir)
+ self.assertEquals(crash_dir, config.crash_dir())
+
+ self.overrideAttr(
+ plugin,
+ 'plugin_warnings',
+ {'example': ['Failed to load plugin foo']})
+
+ stderr = StringIO()
+
+ try:
+ raise AssertionError("my error")
+ except AssertionError, e:
+ pass
+
+ crash_filename = crash.report_bug_to_apport(sys.exc_info(),
+ stderr)
+
+ # message explaining the crash
+ self.assertContainsRe(stderr.getvalue(),
+ " apport-bug %s" % crash_filename)
+
+ crash_file = open(crash_filename)
+ try:
+ report = crash_file.read()
+ finally:
+ crash_file.close()
+
+ self.assertContainsRe(report,
+ '(?m)^BzrVersion:') # should be in the traceback
+ self.assertContainsRe(report, 'my error')
+ self.assertContainsRe(report, 'AssertionError')
+ # see https://bugs.launchpad.net/bzr/+bug/528114
+ self.assertContainsRe(report, 'ExecutablePath')
+ self.assertContainsRe(report, 'test_apport_report')
+ # should also be in there
+ self.assertContainsRe(report, '(?m)^CommandLine:')
+ self.assertContainsRe(
+ report,
+ 'Failed to load plugin foo')
+
+
+class TestNonApportReporting(tests.TestCase):
+ """Reporting of crash-type bugs without apport.
+
+ This should work in all environments.
+ """
+
+ def setup_fake_plugins(self):
+ def fake_plugins():
+ fake = plugin.PlugIn('fake_plugin', plugin)
+ fake.version_info = lambda: (1, 2, 3)
+ return {"fake_plugin": fake}
+ self.overrideAttr(plugin, 'plugins', fake_plugins)
+
+ def test_report_bug_legacy(self):
+ self.setup_fake_plugins()
+ err_file = StringIO()
+ try:
+ raise AssertionError("my error")
+ except AssertionError, e:
+ pass
+ crash.report_bug_legacy(sys.exc_info(), err_file)
+ report = err_file.getvalue()
+ for needle in [
+ "bzr: ERROR: exceptions.AssertionError: my error",
+ r"Traceback \(most recent call last\):",
+ r"plugins: fake_plugin\[1\.2\.3\]",
+ ]:
+ self.assertContainsRe(
+ report,
+ needle)
diff --git a/bzrlib/tests/test_debug.py b/bzrlib/tests/test_debug.py
new file mode 100644
index 0000000..fda3ceb
--- /dev/null
+++ b/bzrlib/tests/test_debug.py
@@ -0,0 +1,41 @@
+# Copyright (C) 2009, 2010 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""Tests for bzrlib.debug"""
+
+
+from bzrlib import (
+ config,
+ debug,
+ tests,
+ )
+
+
+class TestDebugFlags(tests.TestCaseInTempDir):
+
+ def test_set_debug_flags_from_config(self):
+ # test both combinations because configobject automatically splits up
+ # comma-separated lists
+ self.assertDebugFlags(['hpss', 'error'], 'debug_flags = hpss, error\n')
+ self.assertDebugFlags(['hpss'], 'debug_flags = hpss\n')
+
+ def assertDebugFlags(self, expected_flags, conf_bytes):
+ conf = config.GlobalStack()
+ conf.store._load_from_string('[DEFAULT]\n' + conf_bytes)
+ conf.store.save()
+ self.overrideAttr(debug, 'debug_flags', set())
+ debug.set_debug_flags_from_config()
+ self.assertEqual(set(expected_flags), debug.debug_flags)
diff --git a/bzrlib/tests/test_decorators.py b/bzrlib/tests/test_decorators.py
new file mode 100644
index 0000000..be25bb1
--- /dev/null
+++ b/bzrlib/tests/test_decorators.py
@@ -0,0 +1,321 @@
+# Copyright (C) 2006-2010 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+
+"""Tests for decorator functions"""
+
+import inspect
+
+from bzrlib import decorators
+from bzrlib.tests import TestCase
+
+
+class SampleUnlockError(Exception):
+ pass
+
+
+def create_decorator_sample(style, unlock_error=None, meth=None):
+ """Create a DecoratorSample object, using specific lock operators.
+
+ :param style: The type of lock decorators to use (fast/pretty/None)
+ :param unlock_error: If specified, an error to raise from unlock.
+ :param meth: a function to be decorated and added as a 'meth_read' and
+ 'meth_write' to the object.
+ :return: An instantiated DecoratorSample object.
+ """
+
+ if style is None:
+ # Default
+ needs_read_lock = decorators.needs_read_lock
+ needs_write_lock = decorators.needs_write_lock
+ elif style == 'pretty':
+ needs_read_lock = decorators._pretty_needs_read_lock
+ needs_write_lock = decorators._pretty_needs_write_lock
+ else:
+ needs_read_lock = decorators._fast_needs_read_lock
+ needs_write_lock = decorators._fast_needs_write_lock
+
+ class DecoratorSample(object):
+ """Sample class that uses decorators.
+
+ Log when requests go through lock_read()/unlock() or
+ lock_write()/unlock.
+ """
+
+ def __init__(self):
+ self.actions = []
+
+ def lock_read(self):
+ self.actions.append('lock_read')
+
+ def lock_write(self):
+ self.actions.append('lock_write')
+
+ @decorators.only_raises(SampleUnlockError)
+ def unlock(self):
+ if unlock_error:
+ self.actions.append('unlock_fail')
+ raise unlock_error
+ else:
+ self.actions.append('unlock')
+
+ @needs_read_lock
+ def frob(self):
+ """Frob the sample object"""
+ self.actions.append('frob')
+ return 'newbie'
+
+ @needs_write_lock
+ def bank(self, bar, biz=None):
+ """Bank the sample, but using bar and biz."""
+ self.actions.append(('bank', bar, biz))
+ return (bar, biz)
+
+ @needs_read_lock
+ def fail_during_read(self):
+ self.actions.append('fail_during_read')
+ raise TypeError('during read')
+
+ @needs_write_lock
+ def fail_during_write(self):
+ self.actions.append('fail_during_write')
+ raise TypeError('during write')
+
+ if meth is not None:
+ meth_read = needs_read_lock(meth)
+ meth_write = needs_write_lock(meth)
+
+ return DecoratorSample()
+
+
+class TestDecoratorActions(TestCase):
+
+ _decorator_style = None # default
+
+ def test_read_lock_locks_and_unlocks(self):
+ sam = create_decorator_sample(self._decorator_style)
+ self.assertEqual('newbie', sam.frob())
+ self.assertEqual(['lock_read', 'frob', 'unlock'], sam.actions)
+
+ def test_write_lock_locks_and_unlocks(self):
+ sam = create_decorator_sample(self._decorator_style)
+ self.assertEqual(('bar', 'bing'), sam.bank('bar', biz='bing'))
+ self.assertEqual(['lock_write', ('bank', 'bar', 'bing'), 'unlock'],
+ sam.actions)
+
+ def test_read_lock_unlocks_during_failure(self):
+ sam = create_decorator_sample(self._decorator_style)
+ self.assertRaises(TypeError, sam.fail_during_read)
+ self.assertEqual(['lock_read', 'fail_during_read', 'unlock'],
+ sam.actions)
+
+ def test_write_lock_unlocks_during_failure(self):
+ sam = create_decorator_sample(self._decorator_style)
+ self.assertRaises(TypeError, sam.fail_during_write)
+ self.assertEqual(['lock_write', 'fail_during_write', 'unlock'],
+ sam.actions)
+
+ def test_read_lock_raises_original_error(self):
+ sam = create_decorator_sample(self._decorator_style,
+ unlock_error=SampleUnlockError())
+ self.assertRaises(TypeError, sam.fail_during_read)
+ self.assertEqual(['lock_read', 'fail_during_read', 'unlock_fail'],
+ sam.actions)
+
+ def test_write_lock_raises_original_error(self):
+ sam = create_decorator_sample(self._decorator_style,
+ unlock_error=SampleUnlockError())
+ self.assertRaises(TypeError, sam.fail_during_write)
+ self.assertEqual(['lock_write', 'fail_during_write', 'unlock_fail'],
+ sam.actions)
+
+ def test_read_lock_raises_unlock_error(self):
+ sam = create_decorator_sample(self._decorator_style,
+ unlock_error=SampleUnlockError())
+ self.assertRaises(SampleUnlockError, sam.frob)
+ self.assertEqual(['lock_read', 'frob', 'unlock_fail'], sam.actions)
+
+ def test_write_lock_raises_unlock_error(self):
+ sam = create_decorator_sample(self._decorator_style,
+ unlock_error=SampleUnlockError())
+ self.assertRaises(SampleUnlockError, sam.bank, 'bar', biz='bing')
+ self.assertEqual(['lock_write', ('bank', 'bar', 'bing'),
+ 'unlock_fail'], sam.actions)
+
+ def test_read_lock_preserves_default_str_kwarg_identity(self):
+ a_constant = 'A str used as a constant'
+ def meth(self, param=a_constant):
+ return param
+ sam = create_decorator_sample(self._decorator_style, meth=meth)
+ self.assertIs(a_constant, sam.meth_read())
+
+ def test_write_lock_preserves_default_str_kwarg_identity(self):
+ a_constant = 'A str used as a constant'
+ def meth(self, param=a_constant):
+ return param
+ sam = create_decorator_sample(self._decorator_style, meth=meth)
+ self.assertIs(a_constant, sam.meth_write())
+
+
+class TestFastDecoratorActions(TestDecoratorActions):
+
+ _decorator_style = 'fast'
+
+
+class TestPrettyDecoratorActions(TestDecoratorActions):
+
+ _decorator_style = 'pretty'
+
+
+class TestDecoratorDocs(TestCase):
+ """Test method decorators"""
+
+ def test_read_lock_passthrough(self):
+ """@needs_read_lock exposes underlying name and doc."""
+ sam = create_decorator_sample(None)
+ self.assertEqual('frob', sam.frob.__name__)
+ self.assertDocstring('Frob the sample object', sam.frob)
+
+ def test_write_lock_passthrough(self):
+ """@needs_write_lock exposes underlying name and doc."""
+ sam = create_decorator_sample(None)
+ self.assertEqual('bank', sam.bank.__name__)
+ self.assertDocstring('Bank the sample, but using bar and biz.',
+ sam.bank)
+
+ def test_argument_passthrough(self):
+ """Test that arguments get passed around properly."""
+ sam = create_decorator_sample(None)
+ sam.bank('1', biz='2')
+ self.assertEqual(['lock_write',
+ ('bank', '1', '2'),
+ 'unlock',
+ ], sam.actions)
+
+
+class TestPrettyDecorators(TestCase):
+ """Test that pretty decorators generate nice looking wrappers."""
+
+ def get_formatted_args(self, func):
+ """Return a nicely formatted string for the arguments to a function.
+
+ This generates something like "(foo, bar=None)".
+ """
+ return inspect.formatargspec(*inspect.getargspec(func))
+
+ def test__pretty_needs_read_lock(self):
+ """Test that _pretty_needs_read_lock generates a nice wrapper."""
+
+ @decorators._pretty_needs_read_lock
+ def my_function(foo, bar, baz=None, biz=1):
+ """Just a function that supplies several arguments."""
+
+ self.assertEqual('my_function', my_function.__name__)
+ self.assertEqual('my_function_read_locked',
+ my_function.func_code.co_name)
+ self.assertEqual('(foo, bar, baz=None, biz=1)',
+ self.get_formatted_args(my_function))
+ self.assertDocstring(
+ 'Just a function that supplies several arguments.', my_function)
+
+ def test__fast_needs_read_lock(self):
+ """Test the output of _fast_needs_read_lock."""
+
+ @decorators._fast_needs_read_lock
+ def my_function(foo, bar, baz=None, biz=1):
+ """Just a function that supplies several arguments."""
+
+ self.assertEqual('my_function', my_function.__name__)
+ self.assertEqual('read_locked', my_function.func_code.co_name)
+ self.assertEqual('(self, *args, **kwargs)',
+ self.get_formatted_args(my_function))
+ self.assertDocstring(
+ 'Just a function that supplies several arguments.', my_function)
+
+ def test__pretty_needs_write_lock(self):
+ """Test that _pretty_needs_write_lock generates a nice wrapper."""
+
+ @decorators._pretty_needs_write_lock
+ def my_function(foo, bar, baz=None, biz=1):
+ """Just a function that supplies several arguments."""
+
+ self.assertEqual('my_function', my_function.__name__)
+ self.assertEqual('my_function_write_locked',
+ my_function.func_code.co_name)
+ self.assertEqual('(foo, bar, baz=None, biz=1)',
+ self.get_formatted_args(my_function))
+ self.assertDocstring(
+ 'Just a function that supplies several arguments.', my_function)
+
+ def test__fast_needs_write_lock(self):
+ """Test the output of _fast_needs_write_lock."""
+
+ @decorators._fast_needs_write_lock
+ def my_function(foo, bar, baz=None, biz=1):
+ """Just a function that supplies several arguments."""
+
+ self.assertEqual('my_function', my_function.__name__)
+ self.assertEqual('write_locked', my_function.func_code.co_name)
+ self.assertEqual('(self, *args, **kwargs)',
+ self.get_formatted_args(my_function))
+ self.assertDocstring(
+ 'Just a function that supplies several arguments.', my_function)
+
+ def test_use_decorators(self):
+ """Test that you can switch the type of the decorators."""
+ cur_read = decorators.needs_read_lock
+ cur_write = decorators.needs_write_lock
+ try:
+ decorators.use_fast_decorators()
+ self.assertIs(decorators._fast_needs_read_lock,
+ decorators.needs_read_lock)
+ self.assertIs(decorators._fast_needs_write_lock,
+ decorators.needs_write_lock)
+
+ decorators.use_pretty_decorators()
+ self.assertIs(decorators._pretty_needs_read_lock,
+ decorators.needs_read_lock)
+ self.assertIs(decorators._pretty_needs_write_lock,
+ decorators.needs_write_lock)
+
+ # One more switch to make sure it wasn't just good luck that the
+ # functions pointed to the correct version
+ decorators.use_fast_decorators()
+ self.assertIs(decorators._fast_needs_read_lock,
+ decorators.needs_read_lock)
+ self.assertIs(decorators._fast_needs_write_lock,
+ decorators.needs_write_lock)
+ finally:
+ decorators.needs_read_lock = cur_read
+ decorators.needs_write_lock = cur_write
+
+
+class TestOnlyRaisesDecorator(TestCase):
+
+ def raise_ZeroDivisionError(self):
+ 1/0
+
+ def test_raises_approved_error(self):
+ decorator = decorators.only_raises(ZeroDivisionError)
+ decorated_meth = decorator(self.raise_ZeroDivisionError)
+ self.assertRaises(ZeroDivisionError, decorated_meth)
+
+ def test_quietly_logs_unapproved_errors(self):
+ decorator = decorators.only_raises(IOError)
+ decorated_meth = decorator(self.raise_ZeroDivisionError)
+ self.assertLogsError(ZeroDivisionError, decorated_meth)
+
+
diff --git a/bzrlib/tests/test_delta.py b/bzrlib/tests/test_delta.py
new file mode 100644
index 0000000..779582c
--- /dev/null
+++ b/bzrlib/tests/test_delta.py
@@ -0,0 +1,363 @@
+# Copyright (C) 2007-2010 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+import os
+from cStringIO import StringIO
+
+from bzrlib import (
+ delta as _mod_delta,
+ revision as _mod_revision,
+ tests,
+ )
+
+
+class InstrumentedReporter(object):
+ def __init__(self):
+ self.calls = []
+
+ def report(self, file_id, path, versioned, renamed, modified, exe_change,
+ kind):
+ self.calls.append((file_id, path, versioned, renamed, modified,
+ exe_change, kind))
+
+
+class TestReportChanges(tests.TestCase):
+ """Test the new change reporting infrastructure"""
+
+ def assertReport(self, expected, file_id='fid', path='path',
+ versioned_change='unchanged', renamed=False,
+ modified='unchanged', exe_change=False,
+ kind=('file', 'file'), old_path=None,
+ unversioned_filter=None, view_info=None):
+ if expected is None:
+ expected_lines = None
+ else:
+ expected_lines = [expected]
+ self.assertReportLines(expected_lines, file_id, path,
+ versioned_change, renamed,
+ modified, exe_change,
+ kind, old_path,
+ unversioned_filter, view_info)
+
+ def assertReportLines(self, expected_lines, file_id='fid', path='path',
+ versioned_change='unchanged', renamed=False,
+ modified='unchanged', exe_change=False,
+ kind=('file', 'file'), old_path=None,
+ unversioned_filter=None, view_info=None):
+ result = []
+ def result_line(format, *args):
+ result.append(format % args)
+ reporter = _mod_delta._ChangeReporter(result_line,
+ unversioned_filter=unversioned_filter, view_info=view_info)
+ reporter.report(file_id, (old_path, path), versioned_change, renamed,
+ modified, exe_change, kind)
+ if expected_lines is not None:
+ self.assertEqualDiff('\n'.join(expected_lines), '\n'.join(result))
+ else:
+ self.assertEqual([], result)
+
+ def test_rename(self):
+ self.assertReport('R old => path', renamed=True, old_path='old')
+ self.assertReport(' path')
+ self.assertReport('RN old => path', renamed=True, old_path='old',
+ modified='created', kind=(None, 'file'))
+
+ def test_kind(self):
+ self.assertReport(' K path => path/', modified='kind changed',
+ kind=('file', 'directory'), old_path='path')
+ self.assertReport(' K path/ => path', modified='kind changed',
+ kind=('directory', 'file'), old_path='old')
+ self.assertReport('RK old => path/', renamed=True,
+ modified='kind changed',
+ kind=('file', 'directory'), old_path='old')
+ def test_new(self):
+ self.assertReport(' N path/', modified='created',
+ kind=(None, 'directory'))
+ self.assertReport('+ path/', versioned_change='added',
+ modified='unchanged', kind=(None, 'directory'))
+ self.assertReport('+ path', versioned_change='added',
+ modified='unchanged', kind=(None, None))
+ self.assertReport('+N path/', versioned_change='added',
+ modified='created', kind=(None, 'directory'))
+ self.assertReport('+M path/', versioned_change='added',
+ modified='modified', kind=(None, 'directory'))
+
+ def test_removal(self):
+ self.assertReport(' D path/', modified='deleted',
+ kind=('directory', None), old_path='old')
+ self.assertReport('- path/', versioned_change='removed',
+ old_path='path',
+ kind=(None, 'directory'))
+ self.assertReport('-D path', versioned_change='removed',
+ old_path='path',
+ modified='deleted', kind=('file', 'directory'))
+
+ def test_modification(self):
+ self.assertReport(' M path', modified='modified')
+ self.assertReport(' M* path', modified='modified', exe_change=True)
+
+ def test_unversioned(self):
+ # by default any unversioned file is output
+ self.assertReport('? subdir/foo~', file_id=None, path='subdir/foo~',
+ old_path=None, versioned_change='unversioned',
+ renamed=False, modified='created', exe_change=False,
+ kind=(None, 'file'))
+ # but we can choose to filter these. Probably that should be done
+ # close to the tree, but this is a reasonable starting point.
+ self.assertReport(None, file_id=None, path='subdir/foo~',
+ old_path=None, versioned_change='unversioned',
+ renamed=False, modified='created', exe_change=False,
+ kind=(None, 'file'), unversioned_filter=lambda x:True)
+
+ def test_missing(self):
+ self.assertReport('+! missing.c', file_id=None, path='missing.c',
+ old_path=None, versioned_change='added',
+ renamed=False, modified='missing', exe_change=False,
+ kind=(None, None))
+
+ def test_view_filtering(self):
+ # If a file in within the view, it should appear in the output
+ expected_lines = [
+ "Operating on whole tree but only reporting on 'my' view.",
+ " M path"]
+ self.assertReportLines(expected_lines, modified='modified',
+ view_info=('my',['path']))
+ # If a file in outside the view, it should not appear in the output
+ expected_lines = [
+ "Operating on whole tree but only reporting on 'my' view."]
+ self.assertReportLines(expected_lines, modified='modified',
+ path="foo", view_info=('my',['path']))
+
+ def assertChangesEqual(self,
+ file_id='fid',
+ paths=('path', 'path'),
+ content_change=False,
+ versioned=(True, True),
+ parent_id=('pid', 'pid'),
+ name=('name', 'name'),
+ kind=('file', 'file'),
+ executable=(False, False),
+ versioned_change='unchanged',
+ renamed=False,
+ modified='unchanged',
+ exe_change=False):
+ reporter = InstrumentedReporter()
+ _mod_delta.report_changes([(file_id, paths, content_change, versioned,
+ parent_id, name, kind, executable)], reporter)
+ output = reporter.calls[0]
+ self.assertEqual(file_id, output[0])
+ self.assertEqual(paths, output[1])
+ self.assertEqual(versioned_change, output[2])
+ self.assertEqual(renamed, output[3])
+ self.assertEqual(modified, output[4])
+ self.assertEqual(exe_change, output[5])
+ self.assertEqual(kind, output[6])
+
+ def test_report_changes(self):
+ """Test change detection of report_changes"""
+ #Ensure no changes are detected by default
+ self.assertChangesEqual(modified='unchanged', renamed=False,
+ versioned_change='unchanged',
+ exe_change=False)
+ self.assertChangesEqual(modified='kind changed',
+ kind=('file', 'directory'))
+ self.assertChangesEqual(modified='created', kind=(None, 'directory'))
+ self.assertChangesEqual(modified='deleted', kind=('directory', None))
+ self.assertChangesEqual(content_change=True, modified='modified')
+ self.assertChangesEqual(renamed=True, name=('old', 'new'))
+ self.assertChangesEqual(renamed=True,
+ parent_id=('old-parent', 'new-parent'))
+ self.assertChangesEqual(versioned_change='added',
+ versioned=(False, True))
+ self.assertChangesEqual(versioned_change='removed',
+ versioned=(True, False))
+ # execute bit is only detected as "changed" if the file is and was
+ # a regular file.
+ self.assertChangesEqual(exe_change=True, executable=(True, False))
+ self.assertChangesEqual(exe_change=False, executable=(True, False),
+ kind=('directory', 'directory'))
+ self.assertChangesEqual(exe_change=False, modified='kind changed',
+ executable=(False, True),
+ kind=('directory', 'file'))
+ self.assertChangesEqual(parent_id=('pid', None))
+
+ # Now make sure they all work together
+ self.assertChangesEqual(versioned_change='removed',
+ modified='deleted', versioned=(True, False),
+ kind=('directory', None))
+ self.assertChangesEqual(versioned_change='removed',
+ modified='created', versioned=(True, False),
+ kind=(None, 'file'))
+ self.assertChangesEqual(versioned_change='removed',
+ modified='modified', renamed=True,
+ exe_change=True, versioned=(True, False),
+ content_change=True, name=('old', 'new'),
+ executable=(False, True))
+
+ def test_report_unversioned(self):
+ """Unversioned entries are reported well."""
+ self.assertChangesEqual(file_id=None, paths=(None, 'full/path'),
+ content_change=True,
+ versioned=(False, False),
+ parent_id=(None, None),
+ name=(None, 'path'),
+ kind=(None, 'file'),
+ executable=(None, False),
+ versioned_change='unversioned',
+ renamed=False,
+ modified='created',
+ exe_change=False)
+
+
+class TestChangesFrom(tests.TestCaseWithTransport):
+
+ def show_string(self, delta, *args, **kwargs):
+ to_file = StringIO()
+ _mod_delta.report_delta(to_file, delta, *args, **kwargs)
+ return to_file.getvalue()
+
+ def test_kind_change(self):
+ """Doing a status when a file has changed kind should work"""
+ tree = self.make_branch_and_tree('.')
+ self.build_tree(['filename'])
+ tree.add('filename', 'file-id')
+ tree.commit('added filename')
+ os.unlink('filename')
+ self.build_tree(['filename/'])
+ delta = tree.changes_from(tree.basis_tree())
+ self.assertEqual([('filename', 'file-id', 'file', 'directory')],
+ delta.kind_changed)
+ self.assertEqual([], delta.added)
+ self.assertEqual([], delta.removed)
+ self.assertEqual([], delta.renamed)
+ self.assertEqual([], delta.modified)
+ self.assertEqual([], delta.unchanged)
+ self.assertTrue(delta.has_changed())
+ self.assertTrue(delta.touches_file_id('file-id'))
+ self.assertEqual('kind changed:\n filename (file => directory)\n',
+ self.show_string(delta))
+ other_delta = _mod_delta.TreeDelta()
+ self.assertNotEqual(other_delta, delta)
+ other_delta.kind_changed = [('filename', 'file-id', 'file',
+ 'symlink')]
+ self.assertNotEqual(other_delta, delta)
+ other_delta.kind_changed = [('filename', 'file-id', 'file',
+ 'directory')]
+ self.assertEqual(other_delta, delta)
+ self.assertEqualDiff("TreeDelta(added=[], removed=[], renamed=[],"
+ " kind_changed=[(u'filename', 'file-id', 'file', 'directory')],"
+ " modified=[], unchanged=[], unversioned=[])", repr(delta))
+ self.assertEqual('K filename (file => directory) file-id\n',
+ self.show_string(delta, show_ids=True,
+ short_status=True))
+
+ tree.rename_one('filename', 'dirname')
+ delta = tree.changes_from(tree.basis_tree())
+ self.assertEqual([], delta.kind_changed)
+ # This loses the fact that kind changed, remembering it as a
+ # modification
+ self.assertEqual([('filename', 'dirname', 'file-id', 'directory',
+ True, False)], delta.renamed)
+ self.assertTrue(delta.has_changed())
+ self.assertTrue(delta.touches_file_id('file-id'))
+
+
+class TestDeltaShow(tests.TestCaseWithTransport):
+
+ def _get_delta(self):
+ # We build the delta from a real tree to avoid depending on internal
+ # implementation details.
+ wt = self.make_branch_and_tree('branch')
+ self.build_tree_contents([('branch/f1', '1\n'),
+ ('branch/f2', '2\n'),
+ ('branch/f3', '3\n'),
+ ('branch/f4', '4\n'),
+ ('branch/f5', '5\n'),
+ ('branch/dir/',),
+ ])
+ wt.add(['f1', 'f2', 'f3', 'f4', 'dir'],
+ ['f1-id', 'f2-id', 'f3-id', 'f4-id', 'dir-id'])
+ wt.commit('commit one', rev_id='1')
+
+ # TODO add rename,removed,etc. here?
+ wt.add('f5')
+ os.unlink('branch/f5')
+
+ long_status = """added:
+ dir/
+ f1
+ f2
+ f3
+ f4
+missing:
+ f5
+"""
+ short_status = """A dir/
+A f1
+A f2
+A f3
+A f4
+! f5
+"""
+
+ repo = wt.branch.repository
+ d = wt.changes_from(repo.revision_tree(_mod_revision.NULL_REVISION))
+ return d, long_status, short_status
+
+ def test_delta_show_short_status_no_filter(self):
+ d, long_status, short_status = self._get_delta()
+ out = StringIO()
+ _mod_delta.report_delta(out, d, short_status=True)
+ self.assertEquals(short_status, out.getvalue())
+
+ def test_delta_show_long_status_no_filter(self):
+ d, long_status, short_status = self._get_delta()
+ out = StringIO()
+ _mod_delta.report_delta(out, d, short_status=False)
+ self.assertEquals(long_status, out.getvalue())
+
+ def test_delta_show_no_filter(self):
+ d, long_status, short_status = self._get_delta()
+ out = StringIO()
+ def not_a_filter(path, file_id):
+ return True
+ _mod_delta.report_delta(out, d, short_status=True, filter=not_a_filter)
+ self.assertEquals(short_status, out.getvalue())
+
+ def test_delta_show_short_status_single_file_filter(self):
+ d, long_status, short_status = self._get_delta()
+ out = StringIO()
+ def only_f2(path, file_id):
+ return path == 'f2'
+ _mod_delta.report_delta(out, d, short_status=True, filter=only_f2)
+ self.assertEquals("A f2\n", out.getvalue())
+
+ def test_delta_show_long_status_single_file_filter(self):
+ d, long_status, short_status = self._get_delta()
+ out = StringIO()
+ def only_f2(path, file_id):
+ return path == 'f2'
+ _mod_delta.report_delta(out, d, short_status=False, filter=only_f2)
+ self.assertEquals("added:\n f2\n", out.getvalue())
+
+ def test_delta_show_short_status_single_file_id_filter(self):
+ d, long_status, short_status = self._get_delta()
+ out = StringIO()
+ def only_f2_id(path, file_id):
+ return file_id == 'f2-id'
+ _mod_delta.report_delta(out, d, short_status=True, filter=only_f2_id)
+ self.assertEquals("A f2\n", out.getvalue())
+
diff --git a/bzrlib/tests/test_diff.py b/bzrlib/tests/test_diff.py
new file mode 100644
index 0000000..c6a9d63
--- /dev/null
+++ b/bzrlib/tests/test_diff.py
@@ -0,0 +1,1497 @@
+# Copyright (C) 2005-2011 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+import os
+from cStringIO import StringIO
+import subprocess
+import sys
+import tempfile
+
+from bzrlib import (
+ diff,
+ errors,
+ osutils,
+ patiencediff,
+ _patiencediff_py,
+ revision as _mod_revision,
+ revisionspec,
+ revisiontree,
+ tests,
+ transform,
+ )
+from bzrlib.symbol_versioning import deprecated_in
+from bzrlib.tests import features, EncodingAdapter
+from bzrlib.tests.blackbox.test_diff import subst_dates
+from bzrlib.tests import (
+ features,
+ )
+
+
+def udiff_lines(old, new, allow_binary=False):
+ output = StringIO()
+ diff.internal_diff('old', old, 'new', new, output, allow_binary)
+ output.seek(0, 0)
+ return output.readlines()
+
+
+def external_udiff_lines(old, new, use_stringio=False):
+ if use_stringio:
+ # StringIO has no fileno, so it tests a different codepath
+ output = StringIO()
+ else:
+ output = tempfile.TemporaryFile()
+ try:
+ diff.external_diff('old', old, 'new', new, output, diff_opts=['-u'])
+ except errors.NoDiff:
+ raise tests.TestSkipped('external "diff" not present to test')
+ output.seek(0, 0)
+ lines = output.readlines()
+ output.close()
+ return lines
+
+
+class TestDiff(tests.TestCase):
+
+ def test_add_nl(self):
+ """diff generates a valid diff for patches that add a newline"""
+ lines = udiff_lines(['boo'], ['boo\n'])
+ self.check_patch(lines)
+ self.assertEquals(lines[4], '\\ No newline at end of file\n')
+ ## "expected no-nl, got %r" % lines[4]
+
+ def test_add_nl_2(self):
+ """diff generates a valid diff for patches that change last line and
+ add a newline.
+ """
+ lines = udiff_lines(['boo'], ['goo\n'])
+ self.check_patch(lines)
+ self.assertEquals(lines[4], '\\ No newline at end of file\n')
+ ## "expected no-nl, got %r" % lines[4]
+
+ def test_remove_nl(self):
+ """diff generates a valid diff for patches that change last line and
+ add a newline.
+ """
+ lines = udiff_lines(['boo\n'], ['boo'])
+ self.check_patch(lines)
+ self.assertEquals(lines[5], '\\ No newline at end of file\n')
+ ## "expected no-nl, got %r" % lines[5]
+
+ def check_patch(self, lines):
+ self.assert_(len(lines) > 1)
+ ## "Not enough lines for a file header for patch:\n%s" % "".join(lines)
+ self.assert_(lines[0].startswith ('---'))
+ ## 'No orig line for patch:\n%s' % "".join(lines)
+ self.assert_(lines[1].startswith ('+++'))
+ ## 'No mod line for patch:\n%s' % "".join(lines)
+ self.assert_(len(lines) > 2)
+ ## "No hunks for patch:\n%s" % "".join(lines)
+ self.assert_(lines[2].startswith('@@'))
+ ## "No hunk header for patch:\n%s" % "".join(lines)
+ self.assert_('@@' in lines[2][2:])
+ ## "Unterminated hunk header for patch:\n%s" % "".join(lines)
+
+ def test_binary_lines(self):
+ empty = []
+ uni_lines = [1023 * 'a' + '\x00']
+ self.assertRaises(errors.BinaryFile, udiff_lines, uni_lines , empty)
+ self.assertRaises(errors.BinaryFile, udiff_lines, empty, uni_lines)
+ udiff_lines(uni_lines , empty, allow_binary=True)
+ udiff_lines(empty, uni_lines, allow_binary=True)
+
+ def test_external_diff(self):
+ lines = external_udiff_lines(['boo\n'], ['goo\n'])
+ self.check_patch(lines)
+ self.assertEqual('\n', lines[-1])
+
+ def test_external_diff_no_fileno(self):
+ # Make sure that we can handle not having a fileno, even
+ # if the diff is large
+ lines = external_udiff_lines(['boo\n']*10000,
+ ['goo\n']*10000,
+ use_stringio=True)
+ self.check_patch(lines)
+
+ def test_external_diff_binary_lang_c(self):
+ for lang in ('LANG', 'LC_ALL', 'LANGUAGE'):
+ self.overrideEnv(lang, 'C')
+ lines = external_udiff_lines(['\x00foobar\n'], ['foo\x00bar\n'])
+ # Older versions of diffutils say "Binary files", newer
+ # versions just say "Files".
+ self.assertContainsRe(lines[0], '(Binary f|F)iles old and new differ\n')
+ self.assertEquals(lines[1:], ['\n'])
+
+ def test_no_external_diff(self):
+ """Check that NoDiff is raised when diff is not available"""
+ # Make sure no 'diff' command is available
+ # XXX: Weird, using None instead of '' breaks the test -- vila 20101216
+ self.overrideEnv('PATH', '')
+ self.assertRaises(errors.NoDiff, diff.external_diff,
+ 'old', ['boo\n'], 'new', ['goo\n'],
+ StringIO(), diff_opts=['-u'])
+
+ def test_internal_diff_default(self):
+ # Default internal diff encoding is utf8
+ output = StringIO()
+ diff.internal_diff(u'old_\xb5', ['old_text\n'],
+ u'new_\xe5', ['new_text\n'], output)
+ lines = output.getvalue().splitlines(True)
+ self.check_patch(lines)
+ self.assertEquals(['--- old_\xc2\xb5\n',
+ '+++ new_\xc3\xa5\n',
+ '@@ -1,1 +1,1 @@\n',
+ '-old_text\n',
+ '+new_text\n',
+ '\n',
+ ]
+ , lines)
+
+ def test_internal_diff_utf8(self):
+ output = StringIO()
+ diff.internal_diff(u'old_\xb5', ['old_text\n'],
+ u'new_\xe5', ['new_text\n'], output,
+ path_encoding='utf8')
+ lines = output.getvalue().splitlines(True)
+ self.check_patch(lines)
+ self.assertEquals(['--- old_\xc2\xb5\n',
+ '+++ new_\xc3\xa5\n',
+ '@@ -1,1 +1,1 @@\n',
+ '-old_text\n',
+ '+new_text\n',
+ '\n',
+ ]
+ , lines)
+
+ def test_internal_diff_iso_8859_1(self):
+ output = StringIO()
+ diff.internal_diff(u'old_\xb5', ['old_text\n'],
+ u'new_\xe5', ['new_text\n'], output,
+ path_encoding='iso-8859-1')
+ lines = output.getvalue().splitlines(True)
+ self.check_patch(lines)
+ self.assertEquals(['--- old_\xb5\n',
+ '+++ new_\xe5\n',
+ '@@ -1,1 +1,1 @@\n',
+ '-old_text\n',
+ '+new_text\n',
+ '\n',
+ ]
+ , lines)
+
+ def test_internal_diff_no_content(self):
+ output = StringIO()
+ diff.internal_diff(u'old', [], u'new', [], output)
+ self.assertEqual('', output.getvalue())
+
+ def test_internal_diff_no_changes(self):
+ output = StringIO()
+ diff.internal_diff(u'old', ['text\n', 'contents\n'],
+ u'new', ['text\n', 'contents\n'],
+ output)
+ self.assertEqual('', output.getvalue())
+
+ def test_internal_diff_returns_bytes(self):
+ import StringIO
+ output = StringIO.StringIO()
+ diff.internal_diff(u'old_\xb5', ['old_text\n'],
+ u'new_\xe5', ['new_text\n'], output)
+ self.assertIsInstance(output.getvalue(), str,
+ 'internal_diff should return bytestrings')
+
+
+class TestDiffFiles(tests.TestCaseInTempDir):
+
+ def test_external_diff_binary(self):
+ """The output when using external diff should use diff's i18n error"""
+ # Make sure external_diff doesn't fail in the current LANG
+ lines = external_udiff_lines(['\x00foobar\n'], ['foo\x00bar\n'])
+
+ cmd = ['diff', '-u', '--binary', 'old', 'new']
+ with open('old', 'wb') as f: f.write('\x00foobar\n')
+ with open('new', 'wb') as f: f.write('foo\x00bar\n')
+ pipe = subprocess.Popen(cmd, stdout=subprocess.PIPE,
+ stdin=subprocess.PIPE)
+ out, err = pipe.communicate()
+ # Diff returns '2' on Binary files.
+ self.assertEqual(2, pipe.returncode)
+ # We should output whatever diff tells us, plus a trailing newline
+ self.assertEqual(out.splitlines(True) + ['\n'], lines)
+
+
+def get_diff_as_string(tree1, tree2, specific_files=None, working_tree=None):
+ output = StringIO()
+ if working_tree is not None:
+ extra_trees = (working_tree,)
+ else:
+ extra_trees = ()
+ diff.show_diff_trees(tree1, tree2, output,
+ specific_files=specific_files,
+ extra_trees=extra_trees, old_label='old/',
+ new_label='new/')
+ return output.getvalue()
+
+
+class TestDiffDates(tests.TestCaseWithTransport):
+
+ def setUp(self):
+ super(TestDiffDates, self).setUp()
+ self.wt = self.make_branch_and_tree('.')
+ self.b = self.wt.branch
+ self.build_tree_contents([
+ ('file1', 'file1 contents at rev 1\n'),
+ ('file2', 'file2 contents at rev 1\n')
+ ])
+ self.wt.add(['file1', 'file2'])
+ self.wt.commit(
+ message='Revision 1',
+ timestamp=1143849600, # 2006-04-01 00:00:00 UTC
+ timezone=0,
+ rev_id='rev-1')
+ self.build_tree_contents([('file1', 'file1 contents at rev 2\n')])
+ self.wt.commit(
+ message='Revision 2',
+ timestamp=1143936000, # 2006-04-02 00:00:00 UTC
+ timezone=28800,
+ rev_id='rev-2')
+ self.build_tree_contents([('file2', 'file2 contents at rev 3\n')])
+ self.wt.commit(
+ message='Revision 3',
+ timestamp=1144022400, # 2006-04-03 00:00:00 UTC
+ timezone=-3600,
+ rev_id='rev-3')
+ self.wt.remove(['file2'])
+ self.wt.commit(
+ message='Revision 4',
+ timestamp=1144108800, # 2006-04-04 00:00:00 UTC
+ timezone=0,
+ rev_id='rev-4')
+ self.build_tree_contents([
+ ('file1', 'file1 contents in working tree\n')
+ ])
+ # set the date stamps for files in the working tree to known values
+ os.utime('file1', (1144195200, 1144195200)) # 2006-04-05 00:00:00 UTC
+
+ def test_diff_rev_tree_working_tree(self):
+ output = get_diff_as_string(self.wt.basis_tree(), self.wt)
+ # note that the date for old/file1 is from rev 2 rather than from
+ # the basis revision (rev 4)
+ self.assertEqualDiff(output, '''\
+=== modified file 'file1'
+--- old/file1\t2006-04-02 00:00:00 +0000
++++ new/file1\t2006-04-05 00:00:00 +0000
+@@ -1,1 +1,1 @@
+-file1 contents at rev 2
++file1 contents in working tree
+
+''')
+
+ def test_diff_rev_tree_rev_tree(self):
+ tree1 = self.b.repository.revision_tree('rev-2')
+ tree2 = self.b.repository.revision_tree('rev-3')
+ output = get_diff_as_string(tree1, tree2)
+ self.assertEqualDiff(output, '''\
+=== modified file 'file2'
+--- old/file2\t2006-04-01 00:00:00 +0000
++++ new/file2\t2006-04-03 00:00:00 +0000
+@@ -1,1 +1,1 @@
+-file2 contents at rev 1
++file2 contents at rev 3
+
+''')
+
+ def test_diff_add_files(self):
+ tree1 = self.b.repository.revision_tree(_mod_revision.NULL_REVISION)
+ tree2 = self.b.repository.revision_tree('rev-1')
+ output = get_diff_as_string(tree1, tree2)
+ # the files have the epoch time stamp for the tree in which
+ # they don't exist.
+ self.assertEqualDiff(output, '''\
+=== added file 'file1'
+--- old/file1\t1970-01-01 00:00:00 +0000
++++ new/file1\t2006-04-01 00:00:00 +0000
+@@ -0,0 +1,1 @@
++file1 contents at rev 1
+
+=== added file 'file2'
+--- old/file2\t1970-01-01 00:00:00 +0000
++++ new/file2\t2006-04-01 00:00:00 +0000
+@@ -0,0 +1,1 @@
++file2 contents at rev 1
+
+''')
+
+ def test_diff_remove_files(self):
+ tree1 = self.b.repository.revision_tree('rev-3')
+ tree2 = self.b.repository.revision_tree('rev-4')
+ output = get_diff_as_string(tree1, tree2)
+ # the file has the epoch time stamp for the tree in which
+ # it doesn't exist.
+ self.assertEqualDiff(output, '''\
+=== removed file 'file2'
+--- old/file2\t2006-04-03 00:00:00 +0000
++++ new/file2\t1970-01-01 00:00:00 +0000
+@@ -1,1 +0,0 @@
+-file2 contents at rev 3
+
+''')
+
+ def test_show_diff_specified(self):
+ """A working tree filename can be used to identify a file"""
+ self.wt.rename_one('file1', 'file1b')
+ old_tree = self.b.repository.revision_tree('rev-1')
+ new_tree = self.b.repository.revision_tree('rev-4')
+ out = get_diff_as_string(old_tree, new_tree, specific_files=['file1b'],
+ working_tree=self.wt)
+ self.assertContainsRe(out, 'file1\t')
+
+ def test_recursive_diff(self):
+ """Children of directories are matched"""
+ os.mkdir('dir1')
+ os.mkdir('dir2')
+ self.wt.add(['dir1', 'dir2'])
+ self.wt.rename_one('file1', 'dir1/file1')
+ old_tree = self.b.repository.revision_tree('rev-1')
+ new_tree = self.b.repository.revision_tree('rev-4')
+ out = get_diff_as_string(old_tree, new_tree, specific_files=['dir1'],
+ working_tree=self.wt)
+ self.assertContainsRe(out, 'file1\t')
+ out = get_diff_as_string(old_tree, new_tree, specific_files=['dir2'],
+ working_tree=self.wt)
+ self.assertNotContainsRe(out, 'file1\t')
+
+
+class TestShowDiffTrees(tests.TestCaseWithTransport):
+ """Direct tests for show_diff_trees"""
+
+ def test_modified_file(self):
+ """Test when a file is modified."""
+ tree = self.make_branch_and_tree('tree')
+ self.build_tree_contents([('tree/file', 'contents\n')])
+ tree.add(['file'], ['file-id'])
+ tree.commit('one', rev_id='rev-1')
+
+ self.build_tree_contents([('tree/file', 'new contents\n')])
+ d = get_diff_as_string(tree.basis_tree(), tree)
+ self.assertContainsRe(d, "=== modified file 'file'\n")
+ self.assertContainsRe(d, '--- old/file\t')
+ self.assertContainsRe(d, '\\+\\+\\+ new/file\t')
+ self.assertContainsRe(d, '-contents\n'
+ '\\+new contents\n')
+
+ def test_modified_file_in_renamed_dir(self):
+ """Test when a file is modified in a renamed directory."""
+ tree = self.make_branch_and_tree('tree')
+ self.build_tree(['tree/dir/'])
+ self.build_tree_contents([('tree/dir/file', 'contents\n')])
+ tree.add(['dir', 'dir/file'], ['dir-id', 'file-id'])
+ tree.commit('one', rev_id='rev-1')
+
+ tree.rename_one('dir', 'other')
+ self.build_tree_contents([('tree/other/file', 'new contents\n')])
+ d = get_diff_as_string(tree.basis_tree(), tree)
+ self.assertContainsRe(d, "=== renamed directory 'dir' => 'other'\n")
+ self.assertContainsRe(d, "=== modified file 'other/file'\n")
+ # XXX: This is technically incorrect, because it used to be at another
+ # location. What to do?
+ self.assertContainsRe(d, '--- old/dir/file\t')
+ self.assertContainsRe(d, '\\+\\+\\+ new/other/file\t')
+ self.assertContainsRe(d, '-contents\n'
+ '\\+new contents\n')
+
+ def test_renamed_directory(self):
+ """Test when only a directory is only renamed."""
+ tree = self.make_branch_and_tree('tree')
+ self.build_tree(['tree/dir/'])
+ self.build_tree_contents([('tree/dir/file', 'contents\n')])
+ tree.add(['dir', 'dir/file'], ['dir-id', 'file-id'])
+ tree.commit('one', rev_id='rev-1')
+
+ tree.rename_one('dir', 'newdir')
+ d = get_diff_as_string(tree.basis_tree(), tree)
+ # Renaming a directory should be a single "you renamed this dir" even
+ # when there are files inside.
+ self.assertEqual(d, "=== renamed directory 'dir' => 'newdir'\n")
+
+ def test_renamed_file(self):
+ """Test when a file is only renamed."""
+ tree = self.make_branch_and_tree('tree')
+ self.build_tree_contents([('tree/file', 'contents\n')])
+ tree.add(['file'], ['file-id'])
+ tree.commit('one', rev_id='rev-1')
+
+ tree.rename_one('file', 'newname')
+ d = get_diff_as_string(tree.basis_tree(), tree)
+ self.assertContainsRe(d, "=== renamed file 'file' => 'newname'\n")
+ # We shouldn't have a --- or +++ line, because there is no content
+ # change
+ self.assertNotContainsRe(d, '---')
+
+ def test_renamed_and_modified_file(self):
+ """Test when a file is only renamed."""
+ tree = self.make_branch_and_tree('tree')
+ self.build_tree_contents([('tree/file', 'contents\n')])
+ tree.add(['file'], ['file-id'])
+ tree.commit('one', rev_id='rev-1')
+
+ tree.rename_one('file', 'newname')
+ self.build_tree_contents([('tree/newname', 'new contents\n')])
+ d = get_diff_as_string(tree.basis_tree(), tree)
+ self.assertContainsRe(d, "=== renamed file 'file' => 'newname'\n")
+ self.assertContainsRe(d, '--- old/file\t')
+ self.assertContainsRe(d, '\\+\\+\\+ new/newname\t')
+ self.assertContainsRe(d, '-contents\n'
+ '\\+new contents\n')
+
+
+ def test_internal_diff_exec_property(self):
+ tree = self.make_branch_and_tree('tree')
+
+ tt = transform.TreeTransform(tree)
+ tt.new_file('a', tt.root, 'contents\n', 'a-id', True)
+ tt.new_file('b', tt.root, 'contents\n', 'b-id', False)
+ tt.new_file('c', tt.root, 'contents\n', 'c-id', True)
+ tt.new_file('d', tt.root, 'contents\n', 'd-id', False)
+ tt.new_file('e', tt.root, 'contents\n', 'control-e-id', True)
+ tt.new_file('f', tt.root, 'contents\n', 'control-f-id', False)
+ tt.apply()
+ tree.commit('one', rev_id='rev-1')
+
+ tt = transform.TreeTransform(tree)
+ tt.set_executability(False, tt.trans_id_file_id('a-id'))
+ tt.set_executability(True, tt.trans_id_file_id('b-id'))
+ tt.set_executability(False, tt.trans_id_file_id('c-id'))
+ tt.set_executability(True, tt.trans_id_file_id('d-id'))
+ tt.apply()
+ tree.rename_one('c', 'new-c')
+ tree.rename_one('d', 'new-d')
+
+ d = get_diff_as_string(tree.basis_tree(), tree)
+
+ self.assertContainsRe(d, r"file 'a'.*\(properties changed:"
+ ".*\+x to -x.*\)")
+ self.assertContainsRe(d, r"file 'b'.*\(properties changed:"
+ ".*-x to \+x.*\)")
+ self.assertContainsRe(d, r"file 'c'.*\(properties changed:"
+ ".*\+x to -x.*\)")
+ self.assertContainsRe(d, r"file 'd'.*\(properties changed:"
+ ".*-x to \+x.*\)")
+ self.assertNotContainsRe(d, r"file 'e'")
+ self.assertNotContainsRe(d, r"file 'f'")
+
+ def test_binary_unicode_filenames(self):
+ """Test that contents of files are *not* encoded in UTF-8 when there
+ is a binary file in the diff.
+ """
+ # See https://bugs.launchpad.net/bugs/110092.
+ self.requireFeature(features.UnicodeFilenameFeature)
+
+ # This bug isn't triggered with cStringIO.
+ from StringIO import StringIO
+ tree = self.make_branch_and_tree('tree')
+ alpha, omega = u'\u03b1', u'\u03c9'
+ alpha_utf8, omega_utf8 = alpha.encode('utf8'), omega.encode('utf8')
+ self.build_tree_contents(
+ [('tree/' + alpha, chr(0)),
+ ('tree/' + omega,
+ ('The %s and the %s\n' % (alpha_utf8, omega_utf8)))])
+ tree.add([alpha], ['file-id'])
+ tree.add([omega], ['file-id-2'])
+ diff_content = StringIO()
+ diff.show_diff_trees(tree.basis_tree(), tree, diff_content)
+ d = diff_content.getvalue()
+ self.assertContainsRe(d, r"=== added file '%s'" % alpha_utf8)
+ self.assertContainsRe(d, "Binary files a/%s.*and b/%s.* differ\n"
+ % (alpha_utf8, alpha_utf8))
+ self.assertContainsRe(d, r"=== added file '%s'" % omega_utf8)
+ self.assertContainsRe(d, r"--- a/%s" % (omega_utf8,))
+ self.assertContainsRe(d, r"\+\+\+ b/%s" % (omega_utf8,))
+
+ def test_unicode_filename(self):
+ """Test when the filename are unicode."""
+ self.requireFeature(features.UnicodeFilenameFeature)
+
+ alpha, omega = u'\u03b1', u'\u03c9'
+ autf8, outf8 = alpha.encode('utf8'), omega.encode('utf8')
+
+ tree = self.make_branch_and_tree('tree')
+ self.build_tree_contents([('tree/ren_'+alpha, 'contents\n')])
+ tree.add(['ren_'+alpha], ['file-id-2'])
+ self.build_tree_contents([('tree/del_'+alpha, 'contents\n')])
+ tree.add(['del_'+alpha], ['file-id-3'])
+ self.build_tree_contents([('tree/mod_'+alpha, 'contents\n')])
+ tree.add(['mod_'+alpha], ['file-id-4'])
+
+ tree.commit('one', rev_id='rev-1')
+
+ tree.rename_one('ren_'+alpha, 'ren_'+omega)
+ tree.remove('del_'+alpha)
+ self.build_tree_contents([('tree/add_'+alpha, 'contents\n')])
+ tree.add(['add_'+alpha], ['file-id'])
+ self.build_tree_contents([('tree/mod_'+alpha, 'contents_mod\n')])
+
+ d = get_diff_as_string(tree.basis_tree(), tree)
+ self.assertContainsRe(d,
+ "=== renamed file 'ren_%s' => 'ren_%s'\n"%(autf8, outf8))
+ self.assertContainsRe(d, "=== added file 'add_%s'"%autf8)
+ self.assertContainsRe(d, "=== modified file 'mod_%s'"%autf8)
+ self.assertContainsRe(d, "=== removed file 'del_%s'"%autf8)
+
+ def test_unicode_filename_path_encoding(self):
+ """Test for bug #382699: unicode filenames on Windows should be shown
+ in user encoding.
+ """
+ self.requireFeature(features.UnicodeFilenameFeature)
+ # The word 'test' in Russian
+ _russian_test = u'\u0422\u0435\u0441\u0442'
+ directory = _russian_test + u'/'
+ test_txt = _russian_test + u'.txt'
+ u1234 = u'\u1234.txt'
+
+ tree = self.make_branch_and_tree('.')
+ self.build_tree_contents([
+ (test_txt, 'foo\n'),
+ (u1234, 'foo\n'),
+ (directory, None),
+ ])
+ tree.add([test_txt, u1234, directory])
+
+ sio = StringIO()
+ diff.show_diff_trees(tree.basis_tree(), tree, sio,
+ path_encoding='cp1251')
+
+ output = subst_dates(sio.getvalue())
+ shouldbe = ('''\
+=== added directory '%(directory)s'
+=== added file '%(test_txt)s'
+--- a/%(test_txt)s\tYYYY-MM-DD HH:MM:SS +ZZZZ
++++ b/%(test_txt)s\tYYYY-MM-DD HH:MM:SS +ZZZZ
+@@ -0,0 +1,1 @@
++foo
+
+=== added file '?.txt'
+--- a/?.txt\tYYYY-MM-DD HH:MM:SS +ZZZZ
++++ b/?.txt\tYYYY-MM-DD HH:MM:SS +ZZZZ
+@@ -0,0 +1,1 @@
++foo
+
+''' % {'directory': _russian_test.encode('cp1251'),
+ 'test_txt': test_txt.encode('cp1251'),
+ })
+ self.assertEqualDiff(output, shouldbe)
+
+
+class DiffWasIs(diff.DiffPath):
+
+ def diff(self, file_id, old_path, new_path, old_kind, new_kind):
+ self.to_file.write('was: ')
+ self.to_file.write(self.old_tree.get_file(file_id).read())
+ self.to_file.write('is: ')
+ self.to_file.write(self.new_tree.get_file(file_id).read())
+ pass
+
+
+class TestDiffTree(tests.TestCaseWithTransport):
+
+ def setUp(self):
+ super(TestDiffTree, self).setUp()
+ self.old_tree = self.make_branch_and_tree('old-tree')
+ self.old_tree.lock_write()
+ self.addCleanup(self.old_tree.unlock)
+ self.new_tree = self.make_branch_and_tree('new-tree')
+ self.new_tree.lock_write()
+ self.addCleanup(self.new_tree.unlock)
+ self.differ = diff.DiffTree(self.old_tree, self.new_tree, StringIO())
+
+ def test_diff_text(self):
+ self.build_tree_contents([('old-tree/olddir/',),
+ ('old-tree/olddir/oldfile', 'old\n')])
+ self.old_tree.add('olddir')
+ self.old_tree.add('olddir/oldfile', 'file-id')
+ self.build_tree_contents([('new-tree/newdir/',),
+ ('new-tree/newdir/newfile', 'new\n')])
+ self.new_tree.add('newdir')
+ self.new_tree.add('newdir/newfile', 'file-id')
+ differ = diff.DiffText(self.old_tree, self.new_tree, StringIO())
+ differ.diff_text('file-id', None, 'old label', 'new label')
+ self.assertEqual(
+ '--- old label\n+++ new label\n@@ -1,1 +0,0 @@\n-old\n\n',
+ differ.to_file.getvalue())
+ differ.to_file.seek(0)
+ differ.diff_text(None, 'file-id', 'old label', 'new label')
+ self.assertEqual(
+ '--- old label\n+++ new label\n@@ -0,0 +1,1 @@\n+new\n\n',
+ differ.to_file.getvalue())
+ differ.to_file.seek(0)
+ differ.diff_text('file-id', 'file-id', 'old label', 'new label')
+ self.assertEqual(
+ '--- old label\n+++ new label\n@@ -1,1 +1,1 @@\n-old\n+new\n\n',
+ differ.to_file.getvalue())
+
+ def test_diff_deletion(self):
+ self.build_tree_contents([('old-tree/file', 'contents'),
+ ('new-tree/file', 'contents')])
+ self.old_tree.add('file', 'file-id')
+ self.new_tree.add('file', 'file-id')
+ os.unlink('new-tree/file')
+ self.differ.show_diff(None)
+ self.assertContainsRe(self.differ.to_file.getvalue(), '-contents')
+
+ def test_diff_creation(self):
+ self.build_tree_contents([('old-tree/file', 'contents'),
+ ('new-tree/file', 'contents')])
+ self.old_tree.add('file', 'file-id')
+ self.new_tree.add('file', 'file-id')
+ os.unlink('old-tree/file')
+ self.differ.show_diff(None)
+ self.assertContainsRe(self.differ.to_file.getvalue(), '\+contents')
+
+ def test_diff_symlink(self):
+ differ = diff.DiffSymlink(self.old_tree, self.new_tree, StringIO())
+ differ.diff_symlink('old target', None)
+ self.assertEqual("=== target was 'old target'\n",
+ differ.to_file.getvalue())
+
+ differ = diff.DiffSymlink(self.old_tree, self.new_tree, StringIO())
+ differ.diff_symlink(None, 'new target')
+ self.assertEqual("=== target is 'new target'\n",
+ differ.to_file.getvalue())
+
+ differ = diff.DiffSymlink(self.old_tree, self.new_tree, StringIO())
+ differ.diff_symlink('old target', 'new target')
+ self.assertEqual("=== target changed 'old target' => 'new target'\n",
+ differ.to_file.getvalue())
+
+ def test_diff(self):
+ self.build_tree_contents([('old-tree/olddir/',),
+ ('old-tree/olddir/oldfile', 'old\n')])
+ self.old_tree.add('olddir')
+ self.old_tree.add('olddir/oldfile', 'file-id')
+ self.build_tree_contents([('new-tree/newdir/',),
+ ('new-tree/newdir/newfile', 'new\n')])
+ self.new_tree.add('newdir')
+ self.new_tree.add('newdir/newfile', 'file-id')
+ self.differ.diff('file-id', 'olddir/oldfile', 'newdir/newfile')
+ self.assertContainsRe(
+ self.differ.to_file.getvalue(),
+ r'--- olddir/oldfile.*\n\+\+\+ newdir/newfile.*\n\@\@ -1,1 \+1,1'
+ ' \@\@\n-old\n\+new\n\n')
+
+ def test_diff_kind_change(self):
+ self.requireFeature(features.SymlinkFeature)
+ self.build_tree_contents([('old-tree/olddir/',),
+ ('old-tree/olddir/oldfile', 'old\n')])
+ self.old_tree.add('olddir')
+ self.old_tree.add('olddir/oldfile', 'file-id')
+ self.build_tree(['new-tree/newdir/'])
+ os.symlink('new', 'new-tree/newdir/newfile')
+ self.new_tree.add('newdir')
+ self.new_tree.add('newdir/newfile', 'file-id')
+ self.differ.diff('file-id', 'olddir/oldfile', 'newdir/newfile')
+ self.assertContainsRe(
+ self.differ.to_file.getvalue(),
+ r'--- olddir/oldfile.*\n\+\+\+ newdir/newfile.*\n\@\@ -1,1 \+0,0'
+ ' \@\@\n-old\n\n')
+ self.assertContainsRe(self.differ.to_file.getvalue(),
+ "=== target is u'new'\n")
+
+ def test_diff_directory(self):
+ self.build_tree(['new-tree/new-dir/'])
+ self.new_tree.add('new-dir', 'new-dir-id')
+ self.differ.diff('new-dir-id', None, 'new-dir')
+ self.assertEqual(self.differ.to_file.getvalue(), '')
+
+ def create_old_new(self):
+ self.build_tree_contents([('old-tree/olddir/',),
+ ('old-tree/olddir/oldfile', 'old\n')])
+ self.old_tree.add('olddir')
+ self.old_tree.add('olddir/oldfile', 'file-id')
+ self.build_tree_contents([('new-tree/newdir/',),
+ ('new-tree/newdir/newfile', 'new\n')])
+ self.new_tree.add('newdir')
+ self.new_tree.add('newdir/newfile', 'file-id')
+
+ def test_register_diff(self):
+ self.create_old_new()
+ old_diff_factories = diff.DiffTree.diff_factories
+ diff.DiffTree.diff_factories=old_diff_factories[:]
+ diff.DiffTree.diff_factories.insert(0, DiffWasIs.from_diff_tree)
+ try:
+ differ = diff.DiffTree(self.old_tree, self.new_tree, StringIO())
+ finally:
+ diff.DiffTree.diff_factories = old_diff_factories
+ differ.diff('file-id', 'olddir/oldfile', 'newdir/newfile')
+ self.assertNotContainsRe(
+ differ.to_file.getvalue(),
+ r'--- olddir/oldfile.*\n\+\+\+ newdir/newfile.*\n\@\@ -1,1 \+1,1'
+ ' \@\@\n-old\n\+new\n\n')
+ self.assertContainsRe(differ.to_file.getvalue(),
+ 'was: old\nis: new\n')
+
+ def test_extra_factories(self):
+ self.create_old_new()
+ differ = diff.DiffTree(self.old_tree, self.new_tree, StringIO(),
+ extra_factories=[DiffWasIs.from_diff_tree])
+ differ.diff('file-id', 'olddir/oldfile', 'newdir/newfile')
+ self.assertNotContainsRe(
+ differ.to_file.getvalue(),
+ r'--- olddir/oldfile.*\n\+\+\+ newdir/newfile.*\n\@\@ -1,1 \+1,1'
+ ' \@\@\n-old\n\+new\n\n')
+ self.assertContainsRe(differ.to_file.getvalue(),
+ 'was: old\nis: new\n')
+
+ def test_alphabetical_order(self):
+ self.build_tree(['new-tree/a-file'])
+ self.new_tree.add('a-file')
+ self.build_tree(['old-tree/b-file'])
+ self.old_tree.add('b-file')
+ self.differ.show_diff(None)
+ self.assertContainsRe(self.differ.to_file.getvalue(),
+ '.*a-file(.|\n)*b-file')
+
+
+class TestPatienceDiffLib(tests.TestCase):
+
+ def setUp(self):
+ super(TestPatienceDiffLib, self).setUp()
+ self._unique_lcs = _patiencediff_py.unique_lcs_py
+ self._recurse_matches = _patiencediff_py.recurse_matches_py
+ self._PatienceSequenceMatcher = \
+ _patiencediff_py.PatienceSequenceMatcher_py
+
+ def test_diff_unicode_string(self):
+ a = ''.join([unichr(i) for i in range(4000, 4500, 3)])
+ b = ''.join([unichr(i) for i in range(4300, 4800, 2)])
+ sm = self._PatienceSequenceMatcher(None, a, b)
+ mb = sm.get_matching_blocks()
+ self.assertEquals(35, len(mb))
+
+ def test_unique_lcs(self):
+ unique_lcs = self._unique_lcs
+ self.assertEquals(unique_lcs('', ''), [])
+ self.assertEquals(unique_lcs('', 'a'), [])
+ self.assertEquals(unique_lcs('a', ''), [])
+ self.assertEquals(unique_lcs('a', 'a'), [(0,0)])
+ self.assertEquals(unique_lcs('a', 'b'), [])
+ self.assertEquals(unique_lcs('ab', 'ab'), [(0,0), (1,1)])
+ self.assertEquals(unique_lcs('abcde', 'cdeab'), [(2,0), (3,1), (4,2)])
+ self.assertEquals(unique_lcs('cdeab', 'abcde'), [(0,2), (1,3), (2,4)])
+ self.assertEquals(unique_lcs('abXde', 'abYde'), [(0,0), (1,1),
+ (3,3), (4,4)])
+ self.assertEquals(unique_lcs('acbac', 'abc'), [(2,1)])
+
+ def test_recurse_matches(self):
+ def test_one(a, b, matches):
+ test_matches = []
+ self._recurse_matches(
+ a, b, 0, 0, len(a), len(b), test_matches, 10)
+ self.assertEquals(test_matches, matches)
+
+ test_one(['a', '', 'b', '', 'c'], ['a', 'a', 'b', 'c', 'c'],
+ [(0, 0), (2, 2), (4, 4)])
+ test_one(['a', 'c', 'b', 'a', 'c'], ['a', 'b', 'c'],
+ [(0, 0), (2, 1), (4, 2)])
+ # Even though 'bc' is not unique globally, and is surrounded by
+ # non-matching lines, we should still match, because they are locally
+ # unique
+ test_one('abcdbce', 'afbcgdbce', [(0,0), (1, 2), (2, 3), (3, 5),
+ (4, 6), (5, 7), (6, 8)])
+
+ # recurse_matches doesn't match non-unique
+ # lines surrounded by bogus text.
+ # The update has been done in patiencediff.SequenceMatcher instead
+
+ # This is what it could be
+ #test_one('aBccDe', 'abccde', [(0,0), (2,2), (3,3), (5,5)])
+
+ # This is what it currently gives:
+ test_one('aBccDe', 'abccde', [(0,0), (5,5)])
+
+ def assertDiffBlocks(self, a, b, expected_blocks):
+ """Check that the sequence matcher returns the correct blocks.
+
+ :param a: A sequence to match
+ :param b: Another sequence to match
+ :param expected_blocks: The expected output, not including the final
+ matching block (len(a), len(b), 0)
+ """
+ matcher = self._PatienceSequenceMatcher(None, a, b)
+ blocks = matcher.get_matching_blocks()
+ last = blocks.pop()
+ self.assertEqual((len(a), len(b), 0), last)
+ self.assertEqual(expected_blocks, blocks)
+
+ def test_matching_blocks(self):
+ # Some basic matching tests
+ self.assertDiffBlocks('', '', [])
+ self.assertDiffBlocks([], [], [])
+ self.assertDiffBlocks('abc', '', [])
+ self.assertDiffBlocks('', 'abc', [])
+ self.assertDiffBlocks('abcd', 'abcd', [(0, 0, 4)])
+ self.assertDiffBlocks('abcd', 'abce', [(0, 0, 3)])
+ self.assertDiffBlocks('eabc', 'abce', [(1, 0, 3)])
+ self.assertDiffBlocks('eabce', 'abce', [(1, 0, 4)])
+ self.assertDiffBlocks('abcde', 'abXde', [(0, 0, 2), (3, 3, 2)])
+ self.assertDiffBlocks('abcde', 'abXYZde', [(0, 0, 2), (3, 5, 2)])
+ self.assertDiffBlocks('abde', 'abXYZde', [(0, 0, 2), (2, 5, 2)])
+ # This may check too much, but it checks to see that
+ # a copied block stays attached to the previous section,
+ # not the later one.
+ # difflib would tend to grab the trailing longest match
+ # which would make the diff not look right
+ self.assertDiffBlocks('abcdefghijklmnop', 'abcdefxydefghijklmnop',
+ [(0, 0, 6), (6, 11, 10)])
+
+ # make sure it supports passing in lists
+ self.assertDiffBlocks(
+ ['hello there\n',
+ 'world\n',
+ 'how are you today?\n'],
+ ['hello there\n',
+ 'how are you today?\n'],
+ [(0, 0, 1), (2, 1, 1)])
+
+ # non unique lines surrounded by non-matching lines
+ # won't be found
+ self.assertDiffBlocks('aBccDe', 'abccde', [(0,0,1), (5,5,1)])
+
+ # But they only need to be locally unique
+ self.assertDiffBlocks('aBcDec', 'abcdec', [(0,0,1), (2,2,1), (4,4,2)])
+
+ # non unique blocks won't be matched
+ self.assertDiffBlocks('aBcdEcdFg', 'abcdecdfg', [(0,0,1), (8,8,1)])
+
+ # but locally unique ones will
+ self.assertDiffBlocks('aBcdEeXcdFg', 'abcdecdfg', [(0,0,1), (2,2,2),
+ (5,4,1), (7,5,2), (10,8,1)])
+
+ self.assertDiffBlocks('abbabbXd', 'cabbabxd', [(7,7,1)])
+ self.assertDiffBlocks('abbabbbb', 'cabbabbc', [])
+ self.assertDiffBlocks('bbbbbbbb', 'cbbbbbbc', [])
+
+ def test_matching_blocks_tuples(self):
+ # Some basic matching tests
+ self.assertDiffBlocks([], [], [])
+ self.assertDiffBlocks([('a',), ('b',), ('c,')], [], [])
+ self.assertDiffBlocks([], [('a',), ('b',), ('c,')], [])
+ self.assertDiffBlocks([('a',), ('b',), ('c,')],
+ [('a',), ('b',), ('c,')],
+ [(0, 0, 3)])
+ self.assertDiffBlocks([('a',), ('b',), ('c,')],
+ [('a',), ('b',), ('d,')],
+ [(0, 0, 2)])
+ self.assertDiffBlocks([('d',), ('b',), ('c,')],
+ [('a',), ('b',), ('c,')],
+ [(1, 1, 2)])
+ self.assertDiffBlocks([('d',), ('a',), ('b',), ('c,')],
+ [('a',), ('b',), ('c,')],
+ [(1, 0, 3)])
+ self.assertDiffBlocks([('a', 'b'), ('c', 'd'), ('e', 'f')],
+ [('a', 'b'), ('c', 'X'), ('e', 'f')],
+ [(0, 0, 1), (2, 2, 1)])
+ self.assertDiffBlocks([('a', 'b'), ('c', 'd'), ('e', 'f')],
+ [('a', 'b'), ('c', 'dX'), ('e', 'f')],
+ [(0, 0, 1), (2, 2, 1)])
+
+ def test_opcodes(self):
+ def chk_ops(a, b, expected_codes):
+ s = self._PatienceSequenceMatcher(None, a, b)
+ self.assertEquals(expected_codes, s.get_opcodes())
+
+ chk_ops('', '', [])
+ chk_ops([], [], [])
+ chk_ops('abc', '', [('delete', 0,3, 0,0)])
+ chk_ops('', 'abc', [('insert', 0,0, 0,3)])
+ chk_ops('abcd', 'abcd', [('equal', 0,4, 0,4)])
+ chk_ops('abcd', 'abce', [('equal', 0,3, 0,3),
+ ('replace', 3,4, 3,4)
+ ])
+ chk_ops('eabc', 'abce', [('delete', 0,1, 0,0),
+ ('equal', 1,4, 0,3),
+ ('insert', 4,4, 3,4)
+ ])
+ chk_ops('eabce', 'abce', [('delete', 0,1, 0,0),
+ ('equal', 1,5, 0,4)
+ ])
+ chk_ops('abcde', 'abXde', [('equal', 0,2, 0,2),
+ ('replace', 2,3, 2,3),
+ ('equal', 3,5, 3,5)
+ ])
+ chk_ops('abcde', 'abXYZde', [('equal', 0,2, 0,2),
+ ('replace', 2,3, 2,5),
+ ('equal', 3,5, 5,7)
+ ])
+ chk_ops('abde', 'abXYZde', [('equal', 0,2, 0,2),
+ ('insert', 2,2, 2,5),
+ ('equal', 2,4, 5,7)
+ ])
+ chk_ops('abcdefghijklmnop', 'abcdefxydefghijklmnop',
+ [('equal', 0,6, 0,6),
+ ('insert', 6,6, 6,11),
+ ('equal', 6,16, 11,21)
+ ])
+ chk_ops(
+ [ 'hello there\n'
+ , 'world\n'
+ , 'how are you today?\n'],
+ [ 'hello there\n'
+ , 'how are you today?\n'],
+ [('equal', 0,1, 0,1),
+ ('delete', 1,2, 1,1),
+ ('equal', 2,3, 1,2),
+ ])
+ chk_ops('aBccDe', 'abccde',
+ [('equal', 0,1, 0,1),
+ ('replace', 1,5, 1,5),
+ ('equal', 5,6, 5,6),
+ ])
+ chk_ops('aBcDec', 'abcdec',
+ [('equal', 0,1, 0,1),
+ ('replace', 1,2, 1,2),
+ ('equal', 2,3, 2,3),
+ ('replace', 3,4, 3,4),
+ ('equal', 4,6, 4,6),
+ ])
+ chk_ops('aBcdEcdFg', 'abcdecdfg',
+ [('equal', 0,1, 0,1),
+ ('replace', 1,8, 1,8),
+ ('equal', 8,9, 8,9)
+ ])
+ chk_ops('aBcdEeXcdFg', 'abcdecdfg',
+ [('equal', 0,1, 0,1),
+ ('replace', 1,2, 1,2),
+ ('equal', 2,4, 2,4),
+ ('delete', 4,5, 4,4),
+ ('equal', 5,6, 4,5),
+ ('delete', 6,7, 5,5),
+ ('equal', 7,9, 5,7),
+ ('replace', 9,10, 7,8),
+ ('equal', 10,11, 8,9)
+ ])
+
+ def test_grouped_opcodes(self):
+ def chk_ops(a, b, expected_codes, n=3):
+ s = self._PatienceSequenceMatcher(None, a, b)
+ self.assertEquals(expected_codes, list(s.get_grouped_opcodes(n)))
+
+ chk_ops('', '', [])
+ chk_ops([], [], [])
+ chk_ops('abc', '', [[('delete', 0,3, 0,0)]])
+ chk_ops('', 'abc', [[('insert', 0,0, 0,3)]])
+ chk_ops('abcd', 'abcd', [])
+ chk_ops('abcd', 'abce', [[('equal', 0,3, 0,3),
+ ('replace', 3,4, 3,4)
+ ]])
+ chk_ops('eabc', 'abce', [[('delete', 0,1, 0,0),
+ ('equal', 1,4, 0,3),
+ ('insert', 4,4, 3,4)
+ ]])
+ chk_ops('abcdefghijklmnop', 'abcdefxydefghijklmnop',
+ [[('equal', 3,6, 3,6),
+ ('insert', 6,6, 6,11),
+ ('equal', 6,9, 11,14)
+ ]])
+ chk_ops('abcdefghijklmnop', 'abcdefxydefghijklmnop',
+ [[('equal', 2,6, 2,6),
+ ('insert', 6,6, 6,11),
+ ('equal', 6,10, 11,15)
+ ]], 4)
+ chk_ops('Xabcdef', 'abcdef',
+ [[('delete', 0,1, 0,0),
+ ('equal', 1,4, 0,3)
+ ]])
+ chk_ops('abcdef', 'abcdefX',
+ [[('equal', 3,6, 3,6),
+ ('insert', 6,6, 6,7)
+ ]])
+
+
+ def test_multiple_ranges(self):
+ # There was an earlier bug where we used a bad set of ranges,
+ # this triggers that specific bug, to make sure it doesn't regress
+ self.assertDiffBlocks('abcdefghijklmnop',
+ 'abcXghiYZQRSTUVWXYZijklmnop',
+ [(0, 0, 3), (6, 4, 3), (9, 20, 7)])
+
+ self.assertDiffBlocks('ABCd efghIjk L',
+ 'AxyzBCn mo pqrstuvwI1 2 L',
+ [(0,0,1), (1, 4, 2), (9, 19, 1), (12, 23, 3)])
+
+ # These are rot13 code snippets.
+ self.assertDiffBlocks('''\
+ trg nqqrq jura lbh nqq n svyr va gur qverpgbel.
+ """
+ gnxrf_netf = ['svyr*']
+ gnxrf_bcgvbaf = ['ab-erphefr']
+
+ qrs eha(frys, svyr_yvfg, ab_erphefr=Snyfr):
+ sebz omeyvo.nqq vzcbeg fzneg_nqq, nqq_ercbegre_cevag, nqq_ercbegre_ahyy
+ vs vf_dhvrg():
+ ercbegre = nqq_ercbegre_ahyy
+ ryfr:
+ ercbegre = nqq_ercbegre_cevag
+ fzneg_nqq(svyr_yvfg, abg ab_erphefr, ercbegre)
+
+
+pynff pzq_zxqve(Pbzznaq):
+'''.splitlines(True), '''\
+ trg nqqrq jura lbh nqq n svyr va gur qverpgbel.
+
+ --qel-eha jvyy fubj juvpu svyrf jbhyq or nqqrq, ohg abg npghnyyl
+ nqq gurz.
+ """
+ gnxrf_netf = ['svyr*']
+ gnxrf_bcgvbaf = ['ab-erphefr', 'qel-eha']
+
+ qrs eha(frys, svyr_yvfg, ab_erphefr=Snyfr, qel_eha=Snyfr):
+ vzcbeg omeyvo.nqq
+
+ vs qel_eha:
+ vs vf_dhvrg():
+ # Guvf vf cbvagyrff, ohg V'q engure abg envfr na reebe
+ npgvba = omeyvo.nqq.nqq_npgvba_ahyy
+ ryfr:
+ npgvba = omeyvo.nqq.nqq_npgvba_cevag
+ ryvs vf_dhvrg():
+ npgvba = omeyvo.nqq.nqq_npgvba_nqq
+ ryfr:
+ npgvba = omeyvo.nqq.nqq_npgvba_nqq_naq_cevag
+
+ omeyvo.nqq.fzneg_nqq(svyr_yvfg, abg ab_erphefr, npgvba)
+
+
+pynff pzq_zxqve(Pbzznaq):
+'''.splitlines(True)
+, [(0,0,1), (1, 4, 2), (9, 19, 1), (12, 23, 3)])
+
+ def test_patience_unified_diff(self):
+ txt_a = ['hello there\n',
+ 'world\n',
+ 'how are you today?\n']
+ txt_b = ['hello there\n',
+ 'how are you today?\n']
+ unified_diff = patiencediff.unified_diff
+ psm = self._PatienceSequenceMatcher
+ self.assertEquals(['--- \n',
+ '+++ \n',
+ '@@ -1,3 +1,2 @@\n',
+ ' hello there\n',
+ '-world\n',
+ ' how are you today?\n'
+ ]
+ , list(unified_diff(txt_a, txt_b,
+ sequencematcher=psm)))
+ txt_a = map(lambda x: x+'\n', 'abcdefghijklmnop')
+ txt_b = map(lambda x: x+'\n', 'abcdefxydefghijklmnop')
+ # This is the result with LongestCommonSubstring matching
+ self.assertEquals(['--- \n',
+ '+++ \n',
+ '@@ -1,6 +1,11 @@\n',
+ ' a\n',
+ ' b\n',
+ ' c\n',
+ '+d\n',
+ '+e\n',
+ '+f\n',
+ '+x\n',
+ '+y\n',
+ ' d\n',
+ ' e\n',
+ ' f\n']
+ , list(unified_diff(txt_a, txt_b)))
+ # And the patience diff
+ self.assertEquals(['--- \n',
+ '+++ \n',
+ '@@ -4,6 +4,11 @@\n',
+ ' d\n',
+ ' e\n',
+ ' f\n',
+ '+x\n',
+ '+y\n',
+ '+d\n',
+ '+e\n',
+ '+f\n',
+ ' g\n',
+ ' h\n',
+ ' i\n',
+ ]
+ , list(unified_diff(txt_a, txt_b,
+ sequencematcher=psm)))
+
+ def test_patience_unified_diff_with_dates(self):
+ txt_a = ['hello there\n',
+ 'world\n',
+ 'how are you today?\n']
+ txt_b = ['hello there\n',
+ 'how are you today?\n']
+ unified_diff = patiencediff.unified_diff
+ psm = self._PatienceSequenceMatcher
+ self.assertEquals(['--- a\t2008-08-08\n',
+ '+++ b\t2008-09-09\n',
+ '@@ -1,3 +1,2 @@\n',
+ ' hello there\n',
+ '-world\n',
+ ' how are you today?\n'
+ ]
+ , list(unified_diff(txt_a, txt_b,
+ fromfile='a', tofile='b',
+ fromfiledate='2008-08-08',
+ tofiledate='2008-09-09',
+ sequencematcher=psm)))
+
+
+class TestPatienceDiffLib_c(TestPatienceDiffLib):
+
+ _test_needs_features = [features.compiled_patiencediff_feature]
+
+ def setUp(self):
+ super(TestPatienceDiffLib_c, self).setUp()
+ from bzrlib import _patiencediff_c
+ self._unique_lcs = _patiencediff_c.unique_lcs_c
+ self._recurse_matches = _patiencediff_c.recurse_matches_c
+ self._PatienceSequenceMatcher = \
+ _patiencediff_c.PatienceSequenceMatcher_c
+
+ def test_unhashable(self):
+ """We should get a proper exception here."""
+ # We need to be able to hash items in the sequence, lists are
+ # unhashable, and thus cannot be diffed
+ e = self.assertRaises(TypeError, self._PatienceSequenceMatcher,
+ None, [[]], [])
+ e = self.assertRaises(TypeError, self._PatienceSequenceMatcher,
+ None, ['valid', []], [])
+ e = self.assertRaises(TypeError, self._PatienceSequenceMatcher,
+ None, ['valid'], [[]])
+ e = self.assertRaises(TypeError, self._PatienceSequenceMatcher,
+ None, ['valid'], ['valid', []])
+
+
+class TestPatienceDiffLibFiles(tests.TestCaseInTempDir):
+
+ def setUp(self):
+ super(TestPatienceDiffLibFiles, self).setUp()
+ self._PatienceSequenceMatcher = \
+ _patiencediff_py.PatienceSequenceMatcher_py
+
+ def test_patience_unified_diff_files(self):
+ txt_a = ['hello there\n',
+ 'world\n',
+ 'how are you today?\n']
+ txt_b = ['hello there\n',
+ 'how are you today?\n']
+ with open('a1', 'wb') as f: f.writelines(txt_a)
+ with open('b1', 'wb') as f: f.writelines(txt_b)
+
+ unified_diff_files = patiencediff.unified_diff_files
+ psm = self._PatienceSequenceMatcher
+ self.assertEquals(['--- a1\n',
+ '+++ b1\n',
+ '@@ -1,3 +1,2 @@\n',
+ ' hello there\n',
+ '-world\n',
+ ' how are you today?\n',
+ ]
+ , list(unified_diff_files('a1', 'b1',
+ sequencematcher=psm)))
+
+ txt_a = map(lambda x: x+'\n', 'abcdefghijklmnop')
+ txt_b = map(lambda x: x+'\n', 'abcdefxydefghijklmnop')
+ with open('a2', 'wb') as f: f.writelines(txt_a)
+ with open('b2', 'wb') as f: f.writelines(txt_b)
+
+ # This is the result with LongestCommonSubstring matching
+ self.assertEquals(['--- a2\n',
+ '+++ b2\n',
+ '@@ -1,6 +1,11 @@\n',
+ ' a\n',
+ ' b\n',
+ ' c\n',
+ '+d\n',
+ '+e\n',
+ '+f\n',
+ '+x\n',
+ '+y\n',
+ ' d\n',
+ ' e\n',
+ ' f\n']
+ , list(unified_diff_files('a2', 'b2')))
+
+ # And the patience diff
+ self.assertEquals(['--- a2\n',
+ '+++ b2\n',
+ '@@ -4,6 +4,11 @@\n',
+ ' d\n',
+ ' e\n',
+ ' f\n',
+ '+x\n',
+ '+y\n',
+ '+d\n',
+ '+e\n',
+ '+f\n',
+ ' g\n',
+ ' h\n',
+ ' i\n',
+ ]
+ , list(unified_diff_files('a2', 'b2',
+ sequencematcher=psm)))
+
+
+class TestPatienceDiffLibFiles_c(TestPatienceDiffLibFiles):
+
+ _test_needs_features = [features.compiled_patiencediff_feature]
+
+ def setUp(self):
+ super(TestPatienceDiffLibFiles_c, self).setUp()
+ from bzrlib import _patiencediff_c
+ self._PatienceSequenceMatcher = \
+ _patiencediff_c.PatienceSequenceMatcher_c
+
+
+class TestUsingCompiledIfAvailable(tests.TestCase):
+
+ def test_PatienceSequenceMatcher(self):
+ if features.compiled_patiencediff_feature.available():
+ from bzrlib._patiencediff_c import PatienceSequenceMatcher_c
+ self.assertIs(PatienceSequenceMatcher_c,
+ patiencediff.PatienceSequenceMatcher)
+ else:
+ from bzrlib._patiencediff_py import PatienceSequenceMatcher_py
+ self.assertIs(PatienceSequenceMatcher_py,
+ patiencediff.PatienceSequenceMatcher)
+
+ def test_unique_lcs(self):
+ if features.compiled_patiencediff_feature.available():
+ from bzrlib._patiencediff_c import unique_lcs_c
+ self.assertIs(unique_lcs_c,
+ patiencediff.unique_lcs)
+ else:
+ from bzrlib._patiencediff_py import unique_lcs_py
+ self.assertIs(unique_lcs_py,
+ patiencediff.unique_lcs)
+
+ def test_recurse_matches(self):
+ if features.compiled_patiencediff_feature.available():
+ from bzrlib._patiencediff_c import recurse_matches_c
+ self.assertIs(recurse_matches_c,
+ patiencediff.recurse_matches)
+ else:
+ from bzrlib._patiencediff_py import recurse_matches_py
+ self.assertIs(recurse_matches_py,
+ patiencediff.recurse_matches)
+
+
+class TestDiffFromTool(tests.TestCaseWithTransport):
+
+ def test_from_string(self):
+ diff_obj = diff.DiffFromTool.from_string('diff', None, None, None)
+ self.addCleanup(diff_obj.finish)
+ self.assertEqual(['diff', '@old_path', '@new_path'],
+ diff_obj.command_template)
+
+ def test_from_string_u5(self):
+ diff_obj = diff.DiffFromTool.from_string('diff "-u 5"',
+ None, None, None)
+ self.addCleanup(diff_obj.finish)
+ self.assertEqual(['diff', '-u 5', '@old_path', '@new_path'],
+ diff_obj.command_template)
+ self.assertEqual(['diff', '-u 5', 'old-path', 'new-path'],
+ diff_obj._get_command('old-path', 'new-path'))
+
+ def test_from_string_path_with_backslashes(self):
+ self.requireFeature(features.backslashdir_feature)
+ tool = 'C:\\Tools\\Diff.exe'
+ diff_obj = diff.DiffFromTool.from_string(tool, None, None, None)
+ self.addCleanup(diff_obj.finish)
+ self.assertEqual(['C:\\Tools\\Diff.exe', '@old_path', '@new_path'],
+ diff_obj.command_template)
+ self.assertEqual(['C:\\Tools\\Diff.exe', 'old-path', 'new-path'],
+ diff_obj._get_command('old-path', 'new-path'))
+
+ def test_execute(self):
+ output = StringIO()
+ diff_obj = diff.DiffFromTool(['python', '-c',
+ 'print "@old_path @new_path"'],
+ None, None, output)
+ self.addCleanup(diff_obj.finish)
+ diff_obj._execute('old', 'new')
+ self.assertEqual(output.getvalue().rstrip(), 'old new')
+
+ def test_excute_missing(self):
+ diff_obj = diff.DiffFromTool(['a-tool-which-is-unlikely-to-exist'],
+ None, None, None)
+ self.addCleanup(diff_obj.finish)
+ e = self.assertRaises(errors.ExecutableMissing, diff_obj._execute,
+ 'old', 'new')
+ self.assertEqual('a-tool-which-is-unlikely-to-exist could not be found'
+ ' on this machine', str(e))
+
+ def test_prepare_files_creates_paths_readable_by_windows_tool(self):
+ self.requireFeature(features.AttribFeature)
+ output = StringIO()
+ tree = self.make_branch_and_tree('tree')
+ self.build_tree_contents([('tree/file', 'content')])
+ tree.add('file', 'file-id')
+ tree.commit('old tree')
+ tree.lock_read()
+ self.addCleanup(tree.unlock)
+ basis_tree = tree.basis_tree()
+ basis_tree.lock_read()
+ self.addCleanup(basis_tree.unlock)
+ diff_obj = diff.DiffFromTool(['python', '-c',
+ 'print "@old_path @new_path"'],
+ basis_tree, tree, output)
+ diff_obj._prepare_files('file-id', 'file', 'file')
+ # The old content should be readonly
+ self.assertReadableByAttrib(diff_obj._root, 'old\\file',
+ r'R.*old\\file$')
+ # The new content should use the tree object, not a 'new' file anymore
+ self.assertEndsWith(tree.basedir, 'work/tree')
+ self.assertReadableByAttrib(tree.basedir, 'file', r'work\\tree\\file$')
+
+ def assertReadableByAttrib(self, cwd, relpath, regex):
+ proc = subprocess.Popen(['attrib', relpath],
+ stdout=subprocess.PIPE,
+ cwd=cwd)
+ (result, err) = proc.communicate()
+ self.assertContainsRe(result.replace('\r\n', '\n'), regex)
+
+ def test_prepare_files(self):
+ output = StringIO()
+ tree = self.make_branch_and_tree('tree')
+ self.build_tree_contents([('tree/oldname', 'oldcontent')])
+ self.build_tree_contents([('tree/oldname2', 'oldcontent2')])
+ tree.add('oldname', 'file-id')
+ tree.add('oldname2', 'file2-id')
+ # Earliest allowable date on FAT32 filesystems is 1980-01-01
+ tree.commit('old tree', timestamp=315532800)
+ tree.rename_one('oldname', 'newname')
+ tree.rename_one('oldname2', 'newname2')
+ self.build_tree_contents([('tree/newname', 'newcontent')])
+ self.build_tree_contents([('tree/newname2', 'newcontent2')])
+ old_tree = tree.basis_tree()
+ old_tree.lock_read()
+ self.addCleanup(old_tree.unlock)
+ tree.lock_read()
+ self.addCleanup(tree.unlock)
+ diff_obj = diff.DiffFromTool(['python', '-c',
+ 'print "@old_path @new_path"'],
+ old_tree, tree, output)
+ self.addCleanup(diff_obj.finish)
+ self.assertContainsRe(diff_obj._root, 'bzr-diff-[^/]*')
+ old_path, new_path = diff_obj._prepare_files('file-id', 'oldname',
+ 'newname')
+ self.assertContainsRe(old_path, 'old/oldname$')
+ self.assertEqual(315532800, os.stat(old_path).st_mtime)
+ self.assertContainsRe(new_path, 'tree/newname$')
+ self.assertFileEqual('oldcontent', old_path)
+ self.assertFileEqual('newcontent', new_path)
+ if osutils.host_os_dereferences_symlinks():
+ self.assertTrue(os.path.samefile('tree/newname', new_path))
+ # make sure we can create files with the same parent directories
+ diff_obj._prepare_files('file2-id', 'oldname2', 'newname2')
+
+
+class TestDiffFromToolEncodedFilename(tests.TestCaseWithTransport):
+
+ def test_encodable_filename(self):
+ # Just checks file path for external diff tool.
+ # We cannot change CPython's internal encoding used by os.exec*.
+ import sys
+ diffobj = diff.DiffFromTool(['dummy', '@old_path', '@new_path'],
+ None, None, None)
+ for _, scenario in EncodingAdapter.encoding_scenarios:
+ encoding = scenario['encoding']
+ dirname = scenario['info']['directory']
+ filename = scenario['info']['filename']
+
+ self.overrideAttr(diffobj, '_fenc', lambda: encoding)
+ relpath = dirname + u'/' + filename
+ fullpath = diffobj._safe_filename('safe', relpath)
+ self.assertEqual(
+ fullpath,
+ fullpath.encode(encoding).decode(encoding)
+ )
+ self.assert_(fullpath.startswith(diffobj._root + '/safe'))
+
+ def test_unencodable_filename(self):
+ import sys
+ diffobj = diff.DiffFromTool(['dummy', '@old_path', '@new_path'],
+ None, None, None)
+ for _, scenario in EncodingAdapter.encoding_scenarios:
+ encoding = scenario['encoding']
+ dirname = scenario['info']['directory']
+ filename = scenario['info']['filename']
+
+ if encoding == 'iso-8859-1':
+ encoding = 'iso-8859-2'
+ else:
+ encoding = 'iso-8859-1'
+
+ self.overrideAttr(diffobj, '_fenc', lambda: encoding)
+ relpath = dirname + u'/' + filename
+ fullpath = diffobj._safe_filename('safe', relpath)
+ self.assertEqual(
+ fullpath,
+ fullpath.encode(encoding).decode(encoding)
+ )
+ self.assert_(fullpath.startswith(diffobj._root + '/safe'))
+
+
+class TestGetTreesAndBranchesToDiffLocked(tests.TestCaseWithTransport):
+
+ def call_gtabtd(self, path_list, revision_specs, old_url, new_url):
+ """Call get_trees_and_branches_to_diff_locked."""
+ return diff.get_trees_and_branches_to_diff_locked(
+ path_list, revision_specs, old_url, new_url, self.addCleanup)
+
+ def test_basic(self):
+ tree = self.make_branch_and_tree('tree')
+ (old_tree, new_tree,
+ old_branch, new_branch,
+ specific_files, extra_trees) = self.call_gtabtd(
+ ['tree'], None, None, None)
+
+ self.assertIsInstance(old_tree, revisiontree.RevisionTree)
+ self.assertEqual(_mod_revision.NULL_REVISION,
+ old_tree.get_revision_id())
+ self.assertEqual(tree.basedir, new_tree.basedir)
+ self.assertEqual(tree.branch.base, old_branch.base)
+ self.assertEqual(tree.branch.base, new_branch.base)
+ self.assertIs(None, specific_files)
+ self.assertIs(None, extra_trees)
+
+ def test_with_rev_specs(self):
+ tree = self.make_branch_and_tree('tree')
+ self.build_tree_contents([('tree/file', 'oldcontent')])
+ tree.add('file', 'file-id')
+ tree.commit('old tree', timestamp=0, rev_id="old-id")
+ self.build_tree_contents([('tree/file', 'newcontent')])
+ tree.commit('new tree', timestamp=0, rev_id="new-id")
+
+ revisions = [revisionspec.RevisionSpec.from_string('1'),
+ revisionspec.RevisionSpec.from_string('2')]
+ (old_tree, new_tree,
+ old_branch, new_branch,
+ specific_files, extra_trees) = self.call_gtabtd(
+ ['tree'], revisions, None, None)
+
+ self.assertIsInstance(old_tree, revisiontree.RevisionTree)
+ self.assertEqual("old-id", old_tree.get_revision_id())
+ self.assertIsInstance(new_tree, revisiontree.RevisionTree)
+ self.assertEqual("new-id", new_tree.get_revision_id())
+ self.assertEqual(tree.branch.base, old_branch.base)
+ self.assertEqual(tree.branch.base, new_branch.base)
+ self.assertIs(None, specific_files)
+ self.assertEqual(tree.basedir, extra_trees[0].basedir)
diff --git a/bzrlib/tests/test_directory_service.py b/bzrlib/tests/test_directory_service.py
new file mode 100644
index 0000000..f3a2653
--- /dev/null
+++ b/bzrlib/tests/test_directory_service.py
@@ -0,0 +1,139 @@
+# Copyright (C) 2008-2012 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""Test directory service implementation"""
+
+from bzrlib import (
+ errors,
+ transport,
+ urlutils,
+ )
+from bzrlib.directory_service import (
+ AliasDirectory,
+ DirectoryServiceRegistry,
+ directories,
+ )
+from bzrlib.tests import TestCase, TestCaseWithTransport
+
+
+class FooService(object):
+ """A directory service that maps the name to a FILE url"""
+
+ # eg 'file:///foo' on Unix, or 'file:///C:/foo' on Windows
+ base = urlutils.local_path_to_url('/foo')
+
+ def look_up(self, name, url):
+ return self.base + name
+
+
+class TestDirectoryLookup(TestCase):
+
+ def setUp(self):
+ TestCase.setUp(self)
+ self.registry = DirectoryServiceRegistry()
+ self.registry.register('foo:', FooService, 'Map foo URLs to http urls')
+
+ def test_get_directory_service(self):
+ directory, suffix = self.registry.get_prefix('foo:bar')
+ self.assertIs(FooService, directory)
+ self.assertEqual('bar', suffix)
+
+ def test_dereference(self):
+ self.assertEqual(FooService.base + 'bar',
+ self.registry.dereference('foo:bar'))
+ self.assertEqual('baz:qux', self.registry.dereference('baz:qux'))
+
+ def test_get_transport(self):
+ directories.register('foo:', FooService, 'Map foo URLs to http urls')
+ self.addCleanup(directories.remove, 'foo:')
+ self.assertEqual(FooService.base + 'bar/',
+ transport.get_transport('foo:bar').base)
+
+
+class TestAliasDirectory(TestCaseWithTransport):
+
+ def setUp(self):
+ super(TestAliasDirectory, self).setUp()
+ self.branch = self.make_branch('.')
+
+ def assertAliasFromBranch(self, setter, value, alias):
+ setter(value)
+ self.assertEquals(value, directories.dereference(alias))
+
+ def test_lookup_parent(self):
+ self.assertAliasFromBranch(self.branch.set_parent, 'http://a',
+ ':parent')
+
+ def test_lookup_submit(self):
+ self.assertAliasFromBranch(self.branch.set_submit_branch, 'http://b',
+ ':submit')
+
+ def test_lookup_public(self):
+ self.assertAliasFromBranch(self.branch.set_public_branch, 'http://c',
+ ':public')
+
+ def test_lookup_bound(self):
+ self.assertAliasFromBranch(self.branch.set_bound_location, 'http://d',
+ ':bound')
+
+ def test_lookup_push(self):
+ self.assertAliasFromBranch(self.branch.set_push_location, 'http://e',
+ ':push')
+
+ def test_lookup_this(self):
+ self.assertEqual(self.branch.base, directories.dereference(':this'))
+
+ def test_extra_path(self):
+ self.assertEqual(urlutils.join(self.branch.base, 'arg'),
+ directories.dereference(':this/arg'))
+
+ def test_lookup_badname(self):
+ e = self.assertRaises(errors.InvalidLocationAlias,
+ directories.dereference, ':booga')
+ self.assertEqual('":booga" is not a valid location alias.',
+ str(e))
+
+ def test_lookup_badvalue(self):
+ e = self.assertRaises(errors.UnsetLocationAlias,
+ directories.dereference, ':parent')
+ self.assertEqual('No parent location assigned.', str(e))
+
+ def test_register_location_alias(self):
+ self.addCleanup(AliasDirectory.branch_aliases.remove, "booga")
+ AliasDirectory.branch_aliases.register("booga",
+ lambda b: "UHH?", help="Nobody knows")
+ self.assertEquals("UHH?", directories.dereference(":booga"))
+
+
+class TestColocatedDirectory(TestCaseWithTransport):
+
+ def test_lookup_non_default(self):
+ default = self.make_branch('.')
+ non_default = default.bzrdir.create_branch(name='nondefault')
+ self.assertEquals(non_default.base, directories.dereference('co:nondefault'))
+
+ def test_lookup_default(self):
+ default = self.make_branch('.')
+ non_default = default.bzrdir.create_branch(name='nondefault')
+ self.assertEquals(urlutils.join_segment_parameters(default.bzrdir.user_url,
+ {"branch": ""}), directories.dereference('co:'))
+
+ def test_no_such_branch(self):
+ # No error is raised in this case, that is up to the code that actually
+ # opens the branch.
+ default = self.make_branch('.')
+ self.assertEquals(urlutils.join_segment_parameters(default.bzrdir.user_url,
+ {"branch": "foo"}), directories.dereference('co:foo'))
diff --git a/bzrlib/tests/test_dirstate.py b/bzrlib/tests/test_dirstate.py
new file mode 100644
index 0000000..8ced0c5
--- /dev/null
+++ b/bzrlib/tests/test_dirstate.py
@@ -0,0 +1,2881 @@
+# Copyright (C) 2006-2011 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""Tests of the dirstate functionality being built for WorkingTreeFormat4."""
+
+import os
+import tempfile
+
+from bzrlib import (
+ controldir,
+ dirstate,
+ errors,
+ inventory,
+ memorytree,
+ osutils,
+ revision as _mod_revision,
+ revisiontree,
+ tests,
+ workingtree_4,
+ )
+from bzrlib.tests import (
+ features,
+ test_osutils,
+ )
+from bzrlib.tests.scenarios import load_tests_apply_scenarios
+
+
+# TODO:
+# TESTS to write:
+# general checks for NOT_IN_MEMORY error conditions.
+# set_path_id on a NOT_IN_MEMORY dirstate
+# set_path_id unicode support
+# set_path_id setting id of a path not root
+# set_path_id setting id when there are parents without the id in the parents
+# set_path_id setting id when there are parents with the id in the parents
+# set_path_id setting id when state is not in memory
+# set_path_id setting id when state is in memory unmodified
+# set_path_id setting id when state is in memory modified
+
+
+load_tests = load_tests_apply_scenarios
+
+
+class TestCaseWithDirState(tests.TestCaseWithTransport):
+ """Helper functions for creating DirState objects with various content."""
+
+ scenarios = test_osutils.dir_reader_scenarios()
+
+ # Set by load_tests
+ _dir_reader_class = None
+ _native_to_unicode = None # Not used yet
+
+ def setUp(self):
+ tests.TestCaseWithTransport.setUp(self)
+
+ self.overrideAttr(osutils,
+ '_selected_dir_reader', self._dir_reader_class())
+
+ def create_empty_dirstate(self):
+ """Return a locked but empty dirstate"""
+ state = dirstate.DirState.initialize('dirstate')
+ return state
+
+ def create_dirstate_with_root(self):
+ """Return a write-locked state with a single root entry."""
+ packed_stat = 'AAAAREUHaIpFB2iKAAADAQAtkqUAAIGk'
+ root_entry_direntry = ('', '', 'a-root-value'), [
+ ('d', '', 0, False, packed_stat),
+ ]
+ dirblocks = []
+ dirblocks.append(('', [root_entry_direntry]))
+ dirblocks.append(('', []))
+ state = self.create_empty_dirstate()
+ try:
+ state._set_data([], dirblocks)
+ state._validate()
+ except:
+ state.unlock()
+ raise
+ return state
+
+ def create_dirstate_with_root_and_subdir(self):
+ """Return a locked DirState with a root and a subdir"""
+ packed_stat = 'AAAAREUHaIpFB2iKAAADAQAtkqUAAIGk'
+ subdir_entry = ('', 'subdir', 'subdir-id'), [
+ ('d', '', 0, False, packed_stat),
+ ]
+ state = self.create_dirstate_with_root()
+ try:
+ dirblocks = list(state._dirblocks)
+ dirblocks[1][1].append(subdir_entry)
+ state._set_data([], dirblocks)
+ except:
+ state.unlock()
+ raise
+ return state
+
+ def create_complex_dirstate(self):
+ """This dirstate contains multiple files and directories.
+
+ / a-root-value
+ a/ a-dir
+ b/ b-dir
+ c c-file
+ d d-file
+ a/e/ e-dir
+ a/f f-file
+ b/g g-file
+ b/h\xc3\xa5 h-\xc3\xa5-file #This is u'\xe5' encoded into utf-8
+
+ Notice that a/e is an empty directory.
+
+ :return: The dirstate, still write-locked.
+ """
+ packed_stat = 'AAAAREUHaIpFB2iKAAADAQAtkqUAAIGk'
+ null_sha = 'xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx'
+ root_entry = ('', '', 'a-root-value'), [
+ ('d', '', 0, False, packed_stat),
+ ]
+ a_entry = ('', 'a', 'a-dir'), [
+ ('d', '', 0, False, packed_stat),
+ ]
+ b_entry = ('', 'b', 'b-dir'), [
+ ('d', '', 0, False, packed_stat),
+ ]
+ c_entry = ('', 'c', 'c-file'), [
+ ('f', null_sha, 10, False, packed_stat),
+ ]
+ d_entry = ('', 'd', 'd-file'), [
+ ('f', null_sha, 20, False, packed_stat),
+ ]
+ e_entry = ('a', 'e', 'e-dir'), [
+ ('d', '', 0, False, packed_stat),
+ ]
+ f_entry = ('a', 'f', 'f-file'), [
+ ('f', null_sha, 30, False, packed_stat),
+ ]
+ g_entry = ('b', 'g', 'g-file'), [
+ ('f', null_sha, 30, False, packed_stat),
+ ]
+ h_entry = ('b', 'h\xc3\xa5', 'h-\xc3\xa5-file'), [
+ ('f', null_sha, 40, False, packed_stat),
+ ]
+ dirblocks = []
+ dirblocks.append(('', [root_entry]))
+ dirblocks.append(('', [a_entry, b_entry, c_entry, d_entry]))
+ dirblocks.append(('a', [e_entry, f_entry]))
+ dirblocks.append(('b', [g_entry, h_entry]))
+ state = dirstate.DirState.initialize('dirstate')
+ state._validate()
+ try:
+ state._set_data([], dirblocks)
+ except:
+ state.unlock()
+ raise
+ return state
+
+ def check_state_with_reopen(self, expected_result, state):
+ """Check that state has current state expected_result.
+
+ This will check the current state, open the file anew and check it
+ again.
+ This function expects the current state to be locked for writing, and
+ will unlock it before re-opening.
+ This is required because we can't open a lock_read() while something
+ else has a lock_write().
+ write => mutually exclusive lock
+ read => shared lock
+ """
+ # The state should already be write locked, since we just had to do
+ # some operation to get here.
+ self.assertTrue(state._lock_token is not None)
+ try:
+ self.assertEqual(expected_result[0], state.get_parent_ids())
+ # there should be no ghosts in this tree.
+ self.assertEqual([], state.get_ghosts())
+ # there should be one fileid in this tree - the root of the tree.
+ self.assertEqual(expected_result[1], list(state._iter_entries()))
+ state.save()
+ finally:
+ state.unlock()
+ del state
+ state = dirstate.DirState.on_file('dirstate')
+ state.lock_read()
+ try:
+ self.assertEqual(expected_result[1], list(state._iter_entries()))
+ finally:
+ state.unlock()
+
+ def create_basic_dirstate(self):
+ """Create a dirstate with a few files and directories.
+
+ a
+ b/
+ c
+ d/
+ e
+ b-c
+ f
+ """
+ tree = self.make_branch_and_tree('tree')
+ paths = ['a', 'b/', 'b/c', 'b/d/', 'b/d/e', 'b-c', 'f']
+ file_ids = ['a-id', 'b-id', 'c-id', 'd-id', 'e-id', 'b-c-id', 'f-id']
+ self.build_tree(['tree/' + p for p in paths])
+ tree.set_root_id('TREE_ROOT')
+ tree.add([p.rstrip('/') for p in paths], file_ids)
+ tree.commit('initial', rev_id='rev-1')
+ revision_id = 'rev-1'
+ # a_packed_stat = dirstate.pack_stat(os.stat('tree/a'))
+ t = self.get_transport('tree')
+ a_text = t.get_bytes('a')
+ a_sha = osutils.sha_string(a_text)
+ a_len = len(a_text)
+ # b_packed_stat = dirstate.pack_stat(os.stat('tree/b'))
+ # c_packed_stat = dirstate.pack_stat(os.stat('tree/b/c'))
+ c_text = t.get_bytes('b/c')
+ c_sha = osutils.sha_string(c_text)
+ c_len = len(c_text)
+ # d_packed_stat = dirstate.pack_stat(os.stat('tree/b/d'))
+ # e_packed_stat = dirstate.pack_stat(os.stat('tree/b/d/e'))
+ e_text = t.get_bytes('b/d/e')
+ e_sha = osutils.sha_string(e_text)
+ e_len = len(e_text)
+ b_c_text = t.get_bytes('b-c')
+ b_c_sha = osutils.sha_string(b_c_text)
+ b_c_len = len(b_c_text)
+ # f_packed_stat = dirstate.pack_stat(os.stat('tree/f'))
+ f_text = t.get_bytes('f')
+ f_sha = osutils.sha_string(f_text)
+ f_len = len(f_text)
+ null_stat = dirstate.DirState.NULLSTAT
+ expected = {
+ '':(('', '', 'TREE_ROOT'), [
+ ('d', '', 0, False, null_stat),
+ ('d', '', 0, False, revision_id),
+ ]),
+ 'a':(('', 'a', 'a-id'), [
+ ('f', '', 0, False, null_stat),
+ ('f', a_sha, a_len, False, revision_id),
+ ]),
+ 'b':(('', 'b', 'b-id'), [
+ ('d', '', 0, False, null_stat),
+ ('d', '', 0, False, revision_id),
+ ]),
+ 'b/c':(('b', 'c', 'c-id'), [
+ ('f', '', 0, False, null_stat),
+ ('f', c_sha, c_len, False, revision_id),
+ ]),
+ 'b/d':(('b', 'd', 'd-id'), [
+ ('d', '', 0, False, null_stat),
+ ('d', '', 0, False, revision_id),
+ ]),
+ 'b/d/e':(('b/d', 'e', 'e-id'), [
+ ('f', '', 0, False, null_stat),
+ ('f', e_sha, e_len, False, revision_id),
+ ]),
+ 'b-c':(('', 'b-c', 'b-c-id'), [
+ ('f', '', 0, False, null_stat),
+ ('f', b_c_sha, b_c_len, False, revision_id),
+ ]),
+ 'f':(('', 'f', 'f-id'), [
+ ('f', '', 0, False, null_stat),
+ ('f', f_sha, f_len, False, revision_id),
+ ]),
+ }
+ state = dirstate.DirState.from_tree(tree, 'dirstate')
+ try:
+ state.save()
+ finally:
+ state.unlock()
+ # Use a different object, to make sure nothing is pre-cached in memory.
+ state = dirstate.DirState.on_file('dirstate')
+ state.lock_read()
+ self.addCleanup(state.unlock)
+ self.assertEqual(dirstate.DirState.NOT_IN_MEMORY,
+ state._dirblock_state)
+ # This is code is only really tested if we actually have to make more
+ # than one read, so set the page size to something smaller.
+ # We want it to contain about 2.2 records, so that we have a couple
+ # records that we can read per attempt
+ state._bisect_page_size = 200
+ return tree, state, expected
+
+ def create_duplicated_dirstate(self):
+ """Create a dirstate with a deleted and added entries.
+
+ This grabs a basic_dirstate, and then removes and re adds every entry
+ with a new file id.
+ """
+ tree, state, expected = self.create_basic_dirstate()
+ # Now we will just remove and add every file so we get an extra entry
+ # per entry. Unversion in reverse order so we handle subdirs
+ tree.unversion(['f-id', 'b-c-id', 'e-id', 'd-id', 'c-id', 'b-id', 'a-id'])
+ tree.add(['a', 'b', 'b/c', 'b/d', 'b/d/e', 'b-c', 'f'],
+ ['a-id2', 'b-id2', 'c-id2', 'd-id2', 'e-id2', 'b-c-id2', 'f-id2'])
+
+ # Update the expected dictionary.
+ for path in ['a', 'b', 'b/c', 'b/d', 'b/d/e', 'b-c', 'f']:
+ orig = expected[path]
+ path2 = path + '2'
+ # This record was deleted in the current tree
+ expected[path] = (orig[0], [dirstate.DirState.NULL_PARENT_DETAILS,
+ orig[1][1]])
+ new_key = (orig[0][0], orig[0][1], orig[0][2]+'2')
+ # And didn't exist in the basis tree
+ expected[path2] = (new_key, [orig[1][0],
+ dirstate.DirState.NULL_PARENT_DETAILS])
+
+ # We will replace the 'dirstate' file underneath 'state', but that is
+ # okay as lock as we unlock 'state' first.
+ state.unlock()
+ try:
+ new_state = dirstate.DirState.from_tree(tree, 'dirstate')
+ try:
+ new_state.save()
+ finally:
+ new_state.unlock()
+ finally:
+ # But we need to leave state in a read-lock because we already have
+ # a cleanup scheduled
+ state.lock_read()
+ return tree, state, expected
+
+ def create_renamed_dirstate(self):
+ """Create a dirstate with a few internal renames.
+
+ This takes the basic dirstate, and moves the paths around.
+ """
+ tree, state, expected = self.create_basic_dirstate()
+ # Rename a file
+ tree.rename_one('a', 'b/g')
+ # And a directory
+ tree.rename_one('b/d', 'h')
+
+ old_a = expected['a']
+ expected['a'] = (old_a[0], [('r', 'b/g', 0, False, ''), old_a[1][1]])
+ expected['b/g'] = (('b', 'g', 'a-id'), [old_a[1][0],
+ ('r', 'a', 0, False, '')])
+ old_d = expected['b/d']
+ expected['b/d'] = (old_d[0], [('r', 'h', 0, False, ''), old_d[1][1]])
+ expected['h'] = (('', 'h', 'd-id'), [old_d[1][0],
+ ('r', 'b/d', 0, False, '')])
+
+ old_e = expected['b/d/e']
+ expected['b/d/e'] = (old_e[0], [('r', 'h/e', 0, False, ''),
+ old_e[1][1]])
+ expected['h/e'] = (('h', 'e', 'e-id'), [old_e[1][0],
+ ('r', 'b/d/e', 0, False, '')])
+
+ state.unlock()
+ try:
+ new_state = dirstate.DirState.from_tree(tree, 'dirstate')
+ try:
+ new_state.save()
+ finally:
+ new_state.unlock()
+ finally:
+ state.lock_read()
+ return tree, state, expected
+
+
+class TestTreeToDirState(TestCaseWithDirState):
+
+ def test_empty_to_dirstate(self):
+ """We should be able to create a dirstate for an empty tree."""
+ # There are no files on disk and no parents
+ tree = self.make_branch_and_tree('tree')
+ expected_result = ([], [
+ (('', '', tree.get_root_id()), # common details
+ [('d', '', 0, False, dirstate.DirState.NULLSTAT), # current tree
+ ])])
+ state = dirstate.DirState.from_tree(tree, 'dirstate')
+ state._validate()
+ self.check_state_with_reopen(expected_result, state)
+
+ def test_1_parents_empty_to_dirstate(self):
+ # create a parent by doing a commit
+ tree = self.make_branch_and_tree('tree')
+ rev_id = tree.commit('first post').encode('utf8')
+ root_stat_pack = dirstate.pack_stat(os.stat(tree.basedir))
+ expected_result = ([rev_id], [
+ (('', '', tree.get_root_id()), # common details
+ [('d', '', 0, False, dirstate.DirState.NULLSTAT), # current tree
+ ('d', '', 0, False, rev_id), # first parent details
+ ])])
+ state = dirstate.DirState.from_tree(tree, 'dirstate')
+ self.check_state_with_reopen(expected_result, state)
+ state.lock_read()
+ try:
+ state._validate()
+ finally:
+ state.unlock()
+
+ def test_2_parents_empty_to_dirstate(self):
+ # create a parent by doing a commit
+ tree = self.make_branch_and_tree('tree')
+ rev_id = tree.commit('first post')
+ tree2 = tree.bzrdir.sprout('tree2').open_workingtree()
+ rev_id2 = tree2.commit('second post', allow_pointless=True)
+ tree.merge_from_branch(tree2.branch)
+ expected_result = ([rev_id, rev_id2], [
+ (('', '', tree.get_root_id()), # common details
+ [('d', '', 0, False, dirstate.DirState.NULLSTAT), # current tree
+ ('d', '', 0, False, rev_id), # first parent details
+ ('d', '', 0, False, rev_id), # second parent details
+ ])])
+ state = dirstate.DirState.from_tree(tree, 'dirstate')
+ self.check_state_with_reopen(expected_result, state)
+ state.lock_read()
+ try:
+ state._validate()
+ finally:
+ state.unlock()
+
+ def test_empty_unknowns_are_ignored_to_dirstate(self):
+ """We should be able to create a dirstate for an empty tree."""
+ # There are no files on disk and no parents
+ tree = self.make_branch_and_tree('tree')
+ self.build_tree(['tree/unknown'])
+ expected_result = ([], [
+ (('', '', tree.get_root_id()), # common details
+ [('d', '', 0, False, dirstate.DirState.NULLSTAT), # current tree
+ ])])
+ state = dirstate.DirState.from_tree(tree, 'dirstate')
+ self.check_state_with_reopen(expected_result, state)
+
+ def get_tree_with_a_file(self):
+ tree = self.make_branch_and_tree('tree')
+ self.build_tree(['tree/a file'])
+ tree.add('a file', 'a-file-id')
+ return tree
+
+ def test_non_empty_no_parents_to_dirstate(self):
+ """We should be able to create a dirstate for an empty tree."""
+ # There are files on disk and no parents
+ tree = self.get_tree_with_a_file()
+ expected_result = ([], [
+ (('', '', tree.get_root_id()), # common details
+ [('d', '', 0, False, dirstate.DirState.NULLSTAT), # current tree
+ ]),
+ (('', 'a file', 'a-file-id'), # common
+ [('f', '', 0, False, dirstate.DirState.NULLSTAT), # current
+ ]),
+ ])
+ state = dirstate.DirState.from_tree(tree, 'dirstate')
+ self.check_state_with_reopen(expected_result, state)
+
+ def test_1_parents_not_empty_to_dirstate(self):
+ # create a parent by doing a commit
+ tree = self.get_tree_with_a_file()
+ rev_id = tree.commit('first post').encode('utf8')
+ # change the current content to be different this will alter stat, sha
+ # and length:
+ self.build_tree_contents([('tree/a file', 'new content\n')])
+ expected_result = ([rev_id], [
+ (('', '', tree.get_root_id()), # common details
+ [('d', '', 0, False, dirstate.DirState.NULLSTAT), # current tree
+ ('d', '', 0, False, rev_id), # first parent details
+ ]),
+ (('', 'a file', 'a-file-id'), # common
+ [('f', '', 0, False, dirstate.DirState.NULLSTAT), # current
+ ('f', 'c3ed76e4bfd45ff1763ca206055bca8e9fc28aa8', 24, False,
+ rev_id), # first parent
+ ]),
+ ])
+ state = dirstate.DirState.from_tree(tree, 'dirstate')
+ self.check_state_with_reopen(expected_result, state)
+
+ def test_2_parents_not_empty_to_dirstate(self):
+ # create a parent by doing a commit
+ tree = self.get_tree_with_a_file()
+ rev_id = tree.commit('first post').encode('utf8')
+ tree2 = tree.bzrdir.sprout('tree2').open_workingtree()
+ # change the current content to be different this will alter stat, sha
+ # and length:
+ self.build_tree_contents([('tree2/a file', 'merge content\n')])
+ rev_id2 = tree2.commit('second post').encode('utf8')
+ tree.merge_from_branch(tree2.branch)
+ # change the current content to be different this will alter stat, sha
+ # and length again, giving us three distinct values:
+ self.build_tree_contents([('tree/a file', 'new content\n')])
+ expected_result = ([rev_id, rev_id2], [
+ (('', '', tree.get_root_id()), # common details
+ [('d', '', 0, False, dirstate.DirState.NULLSTAT), # current tree
+ ('d', '', 0, False, rev_id), # first parent details
+ ('d', '', 0, False, rev_id), # second parent details
+ ]),
+ (('', 'a file', 'a-file-id'), # common
+ [('f', '', 0, False, dirstate.DirState.NULLSTAT), # current
+ ('f', 'c3ed76e4bfd45ff1763ca206055bca8e9fc28aa8', 24, False,
+ rev_id), # first parent
+ ('f', '314d796174c9412647c3ce07dfb5d36a94e72958', 14, False,
+ rev_id2), # second parent
+ ]),
+ ])
+ state = dirstate.DirState.from_tree(tree, 'dirstate')
+ self.check_state_with_reopen(expected_result, state)
+
+ def test_colliding_fileids(self):
+ # test insertion of parents creating several entries at the same path.
+ # we used to have a bug where they could cause the dirstate to break
+ # its ordering invariants.
+ # create some trees to test from
+ parents = []
+ for i in range(7):
+ tree = self.make_branch_and_tree('tree%d' % i)
+ self.build_tree(['tree%d/name' % i,])
+ tree.add(['name'], ['file-id%d' % i])
+ revision_id = 'revid-%d' % i
+ tree.commit('message', rev_id=revision_id)
+ parents.append((revision_id,
+ tree.branch.repository.revision_tree(revision_id)))
+ # now fold these trees into a dirstate
+ state = dirstate.DirState.initialize('dirstate')
+ try:
+ state.set_parent_trees(parents, [])
+ state._validate()
+ finally:
+ state.unlock()
+
+
+class TestDirStateOnFile(TestCaseWithDirState):
+
+ def create_updated_dirstate(self):
+ self.build_tree(['a-file'])
+ tree = self.make_branch_and_tree('.')
+ tree.add(['a-file'], ['a-id'])
+ tree.commit('add a-file')
+ # Save and unlock the state, re-open it in readonly mode
+ state = dirstate.DirState.from_tree(tree, 'dirstate')
+ state.save()
+ state.unlock()
+ state = dirstate.DirState.on_file('dirstate')
+ state.lock_read()
+ return state
+
+ def test_construct_with_path(self):
+ tree = self.make_branch_and_tree('tree')
+ state = dirstate.DirState.from_tree(tree, 'dirstate.from_tree')
+ # we want to be able to get the lines of the dirstate that we will
+ # write to disk.
+ lines = state.get_lines()
+ state.unlock()
+ self.build_tree_contents([('dirstate', ''.join(lines))])
+ # get a state object
+ # no parents, default tree content
+ expected_result = ([], [
+ (('', '', tree.get_root_id()), # common details
+ # current tree details, but new from_tree skips statting, it
+ # uses set_state_from_inventory, and thus depends on the
+ # inventory state.
+ [('d', '', 0, False, dirstate.DirState.NULLSTAT),
+ ])
+ ])
+ state = dirstate.DirState.on_file('dirstate')
+ state.lock_write() # check_state_with_reopen will save() and unlock it
+ self.check_state_with_reopen(expected_result, state)
+
+ def test_can_save_clean_on_file(self):
+ tree = self.make_branch_and_tree('tree')
+ state = dirstate.DirState.from_tree(tree, 'dirstate')
+ try:
+ # doing a save should work here as there have been no changes.
+ state.save()
+ # TODO: stat it and check it hasn't changed; may require waiting
+ # for the state accuracy window.
+ finally:
+ state.unlock()
+
+ def test_can_save_in_read_lock(self):
+ state = self.create_updated_dirstate()
+ try:
+ entry = state._get_entry(0, path_utf8='a-file')
+ # The current size should be 0 (default)
+ self.assertEqual(0, entry[1][0][2])
+ # We should have a real entry.
+ self.assertNotEqual((None, None), entry)
+ # Set the cutoff-time into the future, so things look cacheable
+ state._sha_cutoff_time()
+ state._cutoff_time += 10.0
+ st = os.lstat('a-file')
+ sha1sum = dirstate.update_entry(state, entry, 'a-file', st)
+ # We updated the current sha1sum because the file is cacheable
+ self.assertEqual('ecc5374e9ed82ad3ea3b4d452ea995a5fd3e70e3',
+ sha1sum)
+
+ # The dirblock has been updated
+ self.assertEqual(st.st_size, entry[1][0][2])
+ self.assertEqual(dirstate.DirState.IN_MEMORY_HASH_MODIFIED,
+ state._dirblock_state)
+
+ del entry
+ # Now, since we are the only one holding a lock, we should be able
+ # to save and have it written to disk
+ state.save()
+ finally:
+ state.unlock()
+
+ # Re-open the file, and ensure that the state has been updated.
+ state = dirstate.DirState.on_file('dirstate')
+ state.lock_read()
+ try:
+ entry = state._get_entry(0, path_utf8='a-file')
+ self.assertEqual(st.st_size, entry[1][0][2])
+ finally:
+ state.unlock()
+
+ def test_save_fails_quietly_if_locked(self):
+ """If dirstate is locked, save will fail without complaining."""
+ state = self.create_updated_dirstate()
+ try:
+ entry = state._get_entry(0, path_utf8='a-file')
+ # No cached sha1 yet.
+ self.assertEqual('', entry[1][0][1])
+ # Set the cutoff-time into the future, so things look cacheable
+ state._sha_cutoff_time()
+ state._cutoff_time += 10.0
+ st = os.lstat('a-file')
+ sha1sum = dirstate.update_entry(state, entry, 'a-file', st)
+ self.assertEqual('ecc5374e9ed82ad3ea3b4d452ea995a5fd3e70e3',
+ sha1sum)
+ self.assertEqual(dirstate.DirState.IN_MEMORY_HASH_MODIFIED,
+ state._dirblock_state)
+
+ # Now, before we try to save, grab another dirstate, and take out a
+ # read lock.
+ # TODO: jam 20070315 Ideally this would be locked by another
+ # process. To make sure the file is really OS locked.
+ state2 = dirstate.DirState.on_file('dirstate')
+ state2.lock_read()
+ try:
+ # This won't actually write anything, because it couldn't grab
+ # a write lock. But it shouldn't raise an error, either.
+ # TODO: jam 20070315 We should probably distinguish between
+ # being dirty because of 'update_entry'. And dirty
+ # because of real modification. So that save() *does*
+ # raise a real error if it fails when we have real
+ # modifications.
+ state.save()
+ finally:
+ state2.unlock()
+ finally:
+ state.unlock()
+
+ # The file on disk should not be modified.
+ state = dirstate.DirState.on_file('dirstate')
+ state.lock_read()
+ try:
+ entry = state._get_entry(0, path_utf8='a-file')
+ self.assertEqual('', entry[1][0][1])
+ finally:
+ state.unlock()
+
+ def test_save_refuses_if_changes_aborted(self):
+ self.build_tree(['a-file', 'a-dir/'])
+ state = dirstate.DirState.initialize('dirstate')
+ try:
+ # No stat and no sha1 sum.
+ state.add('a-file', 'a-file-id', 'file', None, '')
+ state.save()
+ finally:
+ state.unlock()
+
+ # The dirstate should include TREE_ROOT and 'a-file' and nothing else
+ expected_blocks = [
+ ('', [(('', '', 'TREE_ROOT'),
+ [('d', '', 0, False, dirstate.DirState.NULLSTAT)])]),
+ ('', [(('', 'a-file', 'a-file-id'),
+ [('f', '', 0, False, dirstate.DirState.NULLSTAT)])]),
+ ]
+
+ state = dirstate.DirState.on_file('dirstate')
+ state.lock_write()
+ try:
+ state._read_dirblocks_if_needed()
+ self.assertEqual(expected_blocks, state._dirblocks)
+
+ # Now modify the state, but mark it as inconsistent
+ state.add('a-dir', 'a-dir-id', 'directory', None, '')
+ state._changes_aborted = True
+ state.save()
+ finally:
+ state.unlock()
+
+ state = dirstate.DirState.on_file('dirstate')
+ state.lock_read()
+ try:
+ state._read_dirblocks_if_needed()
+ self.assertEqual(expected_blocks, state._dirblocks)
+ finally:
+ state.unlock()
+
+
+class TestDirStateInitialize(TestCaseWithDirState):
+
+ def test_initialize(self):
+ expected_result = ([], [
+ (('', '', 'TREE_ROOT'), # common details
+ [('d', '', 0, False, dirstate.DirState.NULLSTAT), # current tree
+ ])
+ ])
+ state = dirstate.DirState.initialize('dirstate')
+ try:
+ self.assertIsInstance(state, dirstate.DirState)
+ lines = state.get_lines()
+ finally:
+ state.unlock()
+ # On win32 you can't read from a locked file, even within the same
+ # process. So we have to unlock and release before we check the file
+ # contents.
+ self.assertFileEqual(''.join(lines), 'dirstate')
+ state.lock_read() # check_state_with_reopen will unlock
+ self.check_state_with_reopen(expected_result, state)
+
+
+class TestDirStateManipulations(TestCaseWithDirState):
+
+ def make_minimal_tree(self):
+ tree1 = self.make_branch_and_memory_tree('tree1')
+ tree1.lock_write()
+ self.addCleanup(tree1.unlock)
+ tree1.add('')
+ revid1 = tree1.commit('foo')
+ return tree1, revid1
+
+ def test_update_minimal_updates_id_index(self):
+ state = self.create_dirstate_with_root_and_subdir()
+ self.addCleanup(state.unlock)
+ id_index = state._get_id_index()
+ self.assertEqual(['a-root-value', 'subdir-id'], sorted(id_index))
+ state.add('file-name', 'file-id', 'file', None, '')
+ self.assertEqual(['a-root-value', 'file-id', 'subdir-id'],
+ sorted(id_index))
+ state.update_minimal(('', 'new-name', 'file-id'), 'f',
+ path_utf8='new-name')
+ self.assertEqual(['a-root-value', 'file-id', 'subdir-id'],
+ sorted(id_index))
+ self.assertEqual([('', 'new-name', 'file-id')],
+ sorted(id_index['file-id']))
+ state._validate()
+
+ def test_set_state_from_inventory_no_content_no_parents(self):
+ # setting the current inventory is a slow but important api to support.
+ tree1, revid1 = self.make_minimal_tree()
+ inv = tree1.root_inventory
+ root_id = inv.path2id('')
+ expected_result = [], [
+ (('', '', root_id), [
+ ('d', '', 0, False, dirstate.DirState.NULLSTAT)])]
+ state = dirstate.DirState.initialize('dirstate')
+ try:
+ state.set_state_from_inventory(inv)
+ self.assertEqual(dirstate.DirState.IN_MEMORY_UNMODIFIED,
+ state._header_state)
+ self.assertEqual(dirstate.DirState.IN_MEMORY_MODIFIED,
+ state._dirblock_state)
+ except:
+ state.unlock()
+ raise
+ else:
+ # This will unlock it
+ self.check_state_with_reopen(expected_result, state)
+
+ def test_set_state_from_scratch_no_parents(self):
+ tree1, revid1 = self.make_minimal_tree()
+ inv = tree1.root_inventory
+ root_id = inv.path2id('')
+ expected_result = [], [
+ (('', '', root_id), [
+ ('d', '', 0, False, dirstate.DirState.NULLSTAT)])]
+ state = dirstate.DirState.initialize('dirstate')
+ try:
+ state.set_state_from_scratch(inv, [], [])
+ self.assertEqual(dirstate.DirState.IN_MEMORY_MODIFIED,
+ state._header_state)
+ self.assertEqual(dirstate.DirState.IN_MEMORY_MODIFIED,
+ state._dirblock_state)
+ except:
+ state.unlock()
+ raise
+ else:
+ # This will unlock it
+ self.check_state_with_reopen(expected_result, state)
+
+ def test_set_state_from_scratch_identical_parent(self):
+ tree1, revid1 = self.make_minimal_tree()
+ inv = tree1.root_inventory
+ root_id = inv.path2id('')
+ rev_tree1 = tree1.branch.repository.revision_tree(revid1)
+ d_entry = ('d', '', 0, False, dirstate.DirState.NULLSTAT)
+ parent_entry = ('d', '', 0, False, revid1)
+ expected_result = [revid1], [
+ (('', '', root_id), [d_entry, parent_entry])]
+ state = dirstate.DirState.initialize('dirstate')
+ try:
+ state.set_state_from_scratch(inv, [(revid1, rev_tree1)], [])
+ self.assertEqual(dirstate.DirState.IN_MEMORY_MODIFIED,
+ state._header_state)
+ self.assertEqual(dirstate.DirState.IN_MEMORY_MODIFIED,
+ state._dirblock_state)
+ except:
+ state.unlock()
+ raise
+ else:
+ # This will unlock it
+ self.check_state_with_reopen(expected_result, state)
+
+ def test_set_state_from_inventory_preserves_hashcache(self):
+ # https://bugs.launchpad.net/bzr/+bug/146176
+ # set_state_from_inventory should preserve the stat and hash value for
+ # workingtree files that are not changed by the inventory.
+
+ tree = self.make_branch_and_tree('.')
+ # depends on the default format using dirstate...
+ tree.lock_write()
+ try:
+ # make a dirstate with some valid hashcache data
+ # file on disk, but that's not needed for this test
+ foo_contents = 'contents of foo'
+ self.build_tree_contents([('foo', foo_contents)])
+ tree.add('foo', 'foo-id')
+
+ foo_stat = os.stat('foo')
+ foo_packed = dirstate.pack_stat(foo_stat)
+ foo_sha = osutils.sha_string(foo_contents)
+ foo_size = len(foo_contents)
+
+ # should not be cached yet, because the file's too fresh
+ self.assertEqual(
+ (('', 'foo', 'foo-id',),
+ [('f', '', 0, False, dirstate.DirState.NULLSTAT)]),
+ tree._dirstate._get_entry(0, 'foo-id'))
+ # poke in some hashcache information - it wouldn't normally be
+ # stored because it's too fresh
+ tree._dirstate.update_minimal(
+ ('', 'foo', 'foo-id'),
+ 'f', False, foo_sha, foo_packed, foo_size, 'foo')
+ # now should be cached
+ self.assertEqual(
+ (('', 'foo', 'foo-id',),
+ [('f', foo_sha, foo_size, False, foo_packed)]),
+ tree._dirstate._get_entry(0, 'foo-id'))
+
+ # extract the inventory, and add something to it
+ inv = tree._get_root_inventory()
+ # should see the file we poked in...
+ self.assertTrue(inv.has_id('foo-id'))
+ self.assertTrue(inv.has_filename('foo'))
+ inv.add_path('bar', 'file', 'bar-id')
+ tree._dirstate._validate()
+ # this used to cause it to lose its hashcache
+ tree._dirstate.set_state_from_inventory(inv)
+ tree._dirstate._validate()
+ finally:
+ tree.unlock()
+
+ tree.lock_read()
+ try:
+ # now check that the state still has the original hashcache value
+ state = tree._dirstate
+ state._validate()
+ foo_tuple = state._get_entry(0, path_utf8='foo')
+ self.assertEqual(
+ (('', 'foo', 'foo-id',),
+ [('f', foo_sha, len(foo_contents), False,
+ dirstate.pack_stat(foo_stat))]),
+ foo_tuple)
+ finally:
+ tree.unlock()
+
+ def test_set_state_from_inventory_mixed_paths(self):
+ tree1 = self.make_branch_and_tree('tree1')
+ self.build_tree(['tree1/a/', 'tree1/a/b/', 'tree1/a-b/',
+ 'tree1/a/b/foo', 'tree1/a-b/bar'])
+ tree1.lock_write()
+ try:
+ tree1.add(['a', 'a/b', 'a-b', 'a/b/foo', 'a-b/bar'],
+ ['a-id', 'b-id', 'a-b-id', 'foo-id', 'bar-id'])
+ tree1.commit('rev1', rev_id='rev1')
+ root_id = tree1.get_root_id()
+ inv = tree1.root_inventory
+ finally:
+ tree1.unlock()
+ expected_result1 = [('', '', root_id, 'd'),
+ ('', 'a', 'a-id', 'd'),
+ ('', 'a-b', 'a-b-id', 'd'),
+ ('a', 'b', 'b-id', 'd'),
+ ('a/b', 'foo', 'foo-id', 'f'),
+ ('a-b', 'bar', 'bar-id', 'f'),
+ ]
+ expected_result2 = [('', '', root_id, 'd'),
+ ('', 'a', 'a-id', 'd'),
+ ('', 'a-b', 'a-b-id', 'd'),
+ ('a-b', 'bar', 'bar-id', 'f'),
+ ]
+ state = dirstate.DirState.initialize('dirstate')
+ try:
+ state.set_state_from_inventory(inv)
+ values = []
+ for entry in state._iter_entries():
+ values.append(entry[0] + entry[1][0][:1])
+ self.assertEqual(expected_result1, values)
+ del inv['b-id']
+ state.set_state_from_inventory(inv)
+ values = []
+ for entry in state._iter_entries():
+ values.append(entry[0] + entry[1][0][:1])
+ self.assertEqual(expected_result2, values)
+ finally:
+ state.unlock()
+
+ def test_set_path_id_no_parents(self):
+ """The id of a path can be changed trivally with no parents."""
+ state = dirstate.DirState.initialize('dirstate')
+ try:
+ # check precondition to be sure the state does change appropriately.
+ root_entry = (('', '', 'TREE_ROOT'), [('d', '', 0, False, 'x'*32)])
+ self.assertEqual([root_entry], list(state._iter_entries()))
+ self.assertEqual(root_entry, state._get_entry(0, path_utf8=''))
+ self.assertEqual(root_entry,
+ state._get_entry(0, fileid_utf8='TREE_ROOT'))
+ self.assertEqual((None, None),
+ state._get_entry(0, fileid_utf8='second-root-id'))
+ state.set_path_id('', 'second-root-id')
+ new_root_entry = (('', '', 'second-root-id'),
+ [('d', '', 0, False, 'x'*32)])
+ expected_rows = [new_root_entry]
+ self.assertEqual(expected_rows, list(state._iter_entries()))
+ self.assertEqual(new_root_entry, state._get_entry(0, path_utf8=''))
+ self.assertEqual(new_root_entry,
+ state._get_entry(0, fileid_utf8='second-root-id'))
+ self.assertEqual((None, None),
+ state._get_entry(0, fileid_utf8='TREE_ROOT'))
+ # should work across save too
+ state.save()
+ finally:
+ state.unlock()
+ state = dirstate.DirState.on_file('dirstate')
+ state.lock_read()
+ try:
+ state._validate()
+ self.assertEqual(expected_rows, list(state._iter_entries()))
+ finally:
+ state.unlock()
+
+ def test_set_path_id_with_parents(self):
+ """Set the root file id in a dirstate with parents"""
+ mt = self.make_branch_and_tree('mt')
+ # in case the default tree format uses a different root id
+ mt.set_root_id('TREE_ROOT')
+ mt.commit('foo', rev_id='parent-revid')
+ rt = mt.branch.repository.revision_tree('parent-revid')
+ state = dirstate.DirState.initialize('dirstate')
+ state._validate()
+ try:
+ state.set_parent_trees([('parent-revid', rt)], ghosts=[])
+ root_entry = (('', '', 'TREE_ROOT'),
+ [('d', '', 0, False, 'x'*32),
+ ('d', '', 0, False, 'parent-revid')])
+ self.assertEqual(root_entry, state._get_entry(0, path_utf8=''))
+ self.assertEqual(root_entry,
+ state._get_entry(0, fileid_utf8='TREE_ROOT'))
+ self.assertEqual((None, None),
+ state._get_entry(0, fileid_utf8='Asecond-root-id'))
+ state.set_path_id('', 'Asecond-root-id')
+ state._validate()
+ # now see that it is what we expected
+ old_root_entry = (('', '', 'TREE_ROOT'),
+ [('a', '', 0, False, ''),
+ ('d', '', 0, False, 'parent-revid')])
+ new_root_entry = (('', '', 'Asecond-root-id'),
+ [('d', '', 0, False, ''),
+ ('a', '', 0, False, '')])
+ expected_rows = [new_root_entry, old_root_entry]
+ state._validate()
+ self.assertEqual(expected_rows, list(state._iter_entries()))
+ self.assertEqual(new_root_entry, state._get_entry(0, path_utf8=''))
+ self.assertEqual(old_root_entry, state._get_entry(1, path_utf8=''))
+ self.assertEqual((None, None),
+ state._get_entry(0, fileid_utf8='TREE_ROOT'))
+ self.assertEqual(old_root_entry,
+ state._get_entry(1, fileid_utf8='TREE_ROOT'))
+ self.assertEqual(new_root_entry,
+ state._get_entry(0, fileid_utf8='Asecond-root-id'))
+ self.assertEqual((None, None),
+ state._get_entry(1, fileid_utf8='Asecond-root-id'))
+ # should work across save too
+ state.save()
+ finally:
+ state.unlock()
+ # now flush & check we get the same
+ state = dirstate.DirState.on_file('dirstate')
+ state.lock_read()
+ try:
+ state._validate()
+ self.assertEqual(expected_rows, list(state._iter_entries()))
+ finally:
+ state.unlock()
+ # now change within an existing file-backed state
+ state.lock_write()
+ try:
+ state._validate()
+ state.set_path_id('', 'tree-root-2')
+ state._validate()
+ finally:
+ state.unlock()
+
+ def test_set_parent_trees_no_content(self):
+ # set_parent_trees is a slow but important api to support.
+ tree1 = self.make_branch_and_memory_tree('tree1')
+ tree1.lock_write()
+ try:
+ tree1.add('')
+ revid1 = tree1.commit('foo')
+ finally:
+ tree1.unlock()
+ branch2 = tree1.branch.bzrdir.clone('tree2').open_branch()
+ tree2 = memorytree.MemoryTree.create_on_branch(branch2)
+ tree2.lock_write()
+ try:
+ revid2 = tree2.commit('foo')
+ root_id = tree2.get_root_id()
+ finally:
+ tree2.unlock()
+ state = dirstate.DirState.initialize('dirstate')
+ try:
+ state.set_path_id('', root_id)
+ state.set_parent_trees(
+ ((revid1, tree1.branch.repository.revision_tree(revid1)),
+ (revid2, tree2.branch.repository.revision_tree(revid2)),
+ ('ghost-rev', None)),
+ ['ghost-rev'])
+ # check we can reopen and use the dirstate after setting parent
+ # trees.
+ state._validate()
+ state.save()
+ state._validate()
+ finally:
+ state.unlock()
+ state = dirstate.DirState.on_file('dirstate')
+ state.lock_write()
+ try:
+ self.assertEqual([revid1, revid2, 'ghost-rev'],
+ state.get_parent_ids())
+ # iterating the entire state ensures that the state is parsable.
+ list(state._iter_entries())
+ # be sure that it sets not appends - change it
+ state.set_parent_trees(
+ ((revid1, tree1.branch.repository.revision_tree(revid1)),
+ ('ghost-rev', None)),
+ ['ghost-rev'])
+ # and now put it back.
+ state.set_parent_trees(
+ ((revid1, tree1.branch.repository.revision_tree(revid1)),
+ (revid2, tree2.branch.repository.revision_tree(revid2)),
+ ('ghost-rev', tree2.branch.repository.revision_tree(
+ _mod_revision.NULL_REVISION))),
+ ['ghost-rev'])
+ self.assertEqual([revid1, revid2, 'ghost-rev'],
+ state.get_parent_ids())
+ # the ghost should be recorded as such by set_parent_trees.
+ self.assertEqual(['ghost-rev'], state.get_ghosts())
+ self.assertEqual(
+ [(('', '', root_id), [
+ ('d', '', 0, False, dirstate.DirState.NULLSTAT),
+ ('d', '', 0, False, revid1),
+ ('d', '', 0, False, revid1)
+ ])],
+ list(state._iter_entries()))
+ finally:
+ state.unlock()
+
+ def test_set_parent_trees_file_missing_from_tree(self):
+ # Adding a parent tree may reference files not in the current state.
+ # they should get listed just once by id, even if they are in two
+ # separate trees.
+ # set_parent_trees is a slow but important api to support.
+ tree1 = self.make_branch_and_memory_tree('tree1')
+ tree1.lock_write()
+ try:
+ tree1.add('')
+ tree1.add(['a file'], ['file-id'], ['file'])
+ tree1.put_file_bytes_non_atomic('file-id', 'file-content')
+ revid1 = tree1.commit('foo')
+ finally:
+ tree1.unlock()
+ branch2 = tree1.branch.bzrdir.clone('tree2').open_branch()
+ tree2 = memorytree.MemoryTree.create_on_branch(branch2)
+ tree2.lock_write()
+ try:
+ tree2.put_file_bytes_non_atomic('file-id', 'new file-content')
+ revid2 = tree2.commit('foo')
+ root_id = tree2.get_root_id()
+ finally:
+ tree2.unlock()
+ # check the layout in memory
+ expected_result = [revid1.encode('utf8'), revid2.encode('utf8')], [
+ (('', '', root_id), [
+ ('d', '', 0, False, dirstate.DirState.NULLSTAT),
+ ('d', '', 0, False, revid1.encode('utf8')),
+ ('d', '', 0, False, revid1.encode('utf8'))
+ ]),
+ (('', 'a file', 'file-id'), [
+ ('a', '', 0, False, ''),
+ ('f', '2439573625385400f2a669657a7db6ae7515d371', 12, False,
+ revid1.encode('utf8')),
+ ('f', '542e57dc1cda4af37cb8e55ec07ce60364bb3c7d', 16, False,
+ revid2.encode('utf8'))
+ ])
+ ]
+ state = dirstate.DirState.initialize('dirstate')
+ try:
+ state.set_path_id('', root_id)
+ state.set_parent_trees(
+ ((revid1, tree1.branch.repository.revision_tree(revid1)),
+ (revid2, tree2.branch.repository.revision_tree(revid2)),
+ ), [])
+ except:
+ state.unlock()
+ raise
+ else:
+ # check_state_with_reopen will unlock
+ self.check_state_with_reopen(expected_result, state)
+
+ ### add a path via _set_data - so we dont need delta work, just
+ # raw data in, and ensure that it comes out via get_lines happily.
+
+ def test_add_path_to_root_no_parents_all_data(self):
+ # The most trivial addition of a path is when there are no parents and
+ # its in the root and all data about the file is supplied
+ self.build_tree(['a file'])
+ stat = os.lstat('a file')
+ # the 1*20 is the sha1 pretend value.
+ state = dirstate.DirState.initialize('dirstate')
+ expected_entries = [
+ (('', '', 'TREE_ROOT'), [
+ ('d', '', 0, False, dirstate.DirState.NULLSTAT), # current tree
+ ]),
+ (('', 'a file', 'a-file-id'), [
+ ('f', '1'*20, 19, False, dirstate.pack_stat(stat)), # current tree
+ ]),
+ ]
+ try:
+ state.add('a file', 'a-file-id', 'file', stat, '1'*20)
+ # having added it, it should be in the output of iter_entries.
+ self.assertEqual(expected_entries, list(state._iter_entries()))
+ # saving and reloading should not affect this.
+ state.save()
+ finally:
+ state.unlock()
+ state = dirstate.DirState.on_file('dirstate')
+ state.lock_read()
+ self.addCleanup(state.unlock)
+ self.assertEqual(expected_entries, list(state._iter_entries()))
+
+ def test_add_path_to_unversioned_directory(self):
+ """Adding a path to an unversioned directory should error.
+
+ This is a duplicate of TestWorkingTree.test_add_in_unversioned,
+ once dirstate is stable and if it is merged with WorkingTree3, consider
+ removing this copy of the test.
+ """
+ self.build_tree(['unversioned/', 'unversioned/a file'])
+ state = dirstate.DirState.initialize('dirstate')
+ self.addCleanup(state.unlock)
+ self.assertRaises(errors.NotVersionedError, state.add,
+ 'unversioned/a file', 'a-file-id', 'file', None, None)
+
+ def test_add_directory_to_root_no_parents_all_data(self):
+ # The most trivial addition of a dir is when there are no parents and
+ # its in the root and all data about the file is supplied
+ self.build_tree(['a dir/'])
+ stat = os.lstat('a dir')
+ expected_entries = [
+ (('', '', 'TREE_ROOT'), [
+ ('d', '', 0, False, dirstate.DirState.NULLSTAT), # current tree
+ ]),
+ (('', 'a dir', 'a dir id'), [
+ ('d', '', 0, False, dirstate.pack_stat(stat)), # current tree
+ ]),
+ ]
+ state = dirstate.DirState.initialize('dirstate')
+ try:
+ state.add('a dir', 'a dir id', 'directory', stat, None)
+ # having added it, it should be in the output of iter_entries.
+ self.assertEqual(expected_entries, list(state._iter_entries()))
+ # saving and reloading should not affect this.
+ state.save()
+ finally:
+ state.unlock()
+ state = dirstate.DirState.on_file('dirstate')
+ state.lock_read()
+ self.addCleanup(state.unlock)
+ state._validate()
+ self.assertEqual(expected_entries, list(state._iter_entries()))
+
+ def _test_add_symlink_to_root_no_parents_all_data(self, link_name, target):
+ # The most trivial addition of a symlink when there are no parents and
+ # its in the root and all data about the file is supplied
+ # bzr doesn't support fake symlinks on windows, yet.
+ self.requireFeature(features.SymlinkFeature)
+ os.symlink(target, link_name)
+ stat = os.lstat(link_name)
+ expected_entries = [
+ (('', '', 'TREE_ROOT'), [
+ ('d', '', 0, False, dirstate.DirState.NULLSTAT), # current tree
+ ]),
+ (('', link_name.encode('UTF-8'), 'a link id'), [
+ ('l', target.encode('UTF-8'), stat[6],
+ False, dirstate.pack_stat(stat)), # current tree
+ ]),
+ ]
+ state = dirstate.DirState.initialize('dirstate')
+ try:
+ state.add(link_name, 'a link id', 'symlink', stat,
+ target.encode('UTF-8'))
+ # having added it, it should be in the output of iter_entries.
+ self.assertEqual(expected_entries, list(state._iter_entries()))
+ # saving and reloading should not affect this.
+ state.save()
+ finally:
+ state.unlock()
+ state = dirstate.DirState.on_file('dirstate')
+ state.lock_read()
+ self.addCleanup(state.unlock)
+ self.assertEqual(expected_entries, list(state._iter_entries()))
+
+ def test_add_symlink_to_root_no_parents_all_data(self):
+ self._test_add_symlink_to_root_no_parents_all_data('a link', 'target')
+
+ def test_add_symlink_unicode_to_root_no_parents_all_data(self):
+ self.requireFeature(features.UnicodeFilenameFeature)
+ self._test_add_symlink_to_root_no_parents_all_data(
+ u'\N{Euro Sign}link', u'targ\N{Euro Sign}et')
+
+ def test_add_directory_and_child_no_parents_all_data(self):
+ # after adding a directory, we should be able to add children to it.
+ self.build_tree(['a dir/', 'a dir/a file'])
+ dirstat = os.lstat('a dir')
+ filestat = os.lstat('a dir/a file')
+ expected_entries = [
+ (('', '', 'TREE_ROOT'), [
+ ('d', '', 0, False, dirstate.DirState.NULLSTAT), # current tree
+ ]),
+ (('', 'a dir', 'a dir id'), [
+ ('d', '', 0, False, dirstate.pack_stat(dirstat)), # current tree
+ ]),
+ (('a dir', 'a file', 'a-file-id'), [
+ ('f', '1'*20, 25, False,
+ dirstate.pack_stat(filestat)), # current tree details
+ ]),
+ ]
+ state = dirstate.DirState.initialize('dirstate')
+ try:
+ state.add('a dir', 'a dir id', 'directory', dirstat, None)
+ state.add('a dir/a file', 'a-file-id', 'file', filestat, '1'*20)
+ # added it, it should be in the output of iter_entries.
+ self.assertEqual(expected_entries, list(state._iter_entries()))
+ # saving and reloading should not affect this.
+ state.save()
+ finally:
+ state.unlock()
+ state = dirstate.DirState.on_file('dirstate')
+ state.lock_read()
+ self.addCleanup(state.unlock)
+ self.assertEqual(expected_entries, list(state._iter_entries()))
+
+ def test_add_tree_reference(self):
+ # make a dirstate and add a tree reference
+ state = dirstate.DirState.initialize('dirstate')
+ expected_entry = (
+ ('', 'subdir', 'subdir-id'),
+ [('t', 'subtree-123123', 0, False,
+ 'xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx')],
+ )
+ try:
+ state.add('subdir', 'subdir-id', 'tree-reference', None, 'subtree-123123')
+ entry = state._get_entry(0, 'subdir-id', 'subdir')
+ self.assertEqual(entry, expected_entry)
+ state._validate()
+ state.save()
+ finally:
+ state.unlock()
+ # now check we can read it back
+ state.lock_read()
+ self.addCleanup(state.unlock)
+ state._validate()
+ entry2 = state._get_entry(0, 'subdir-id', 'subdir')
+ self.assertEqual(entry, entry2)
+ self.assertEqual(entry, expected_entry)
+ # and lookup by id should work too
+ entry2 = state._get_entry(0, fileid_utf8='subdir-id')
+ self.assertEqual(entry, expected_entry)
+
+ def test_add_forbidden_names(self):
+ state = dirstate.DirState.initialize('dirstate')
+ self.addCleanup(state.unlock)
+ self.assertRaises(errors.BzrError,
+ state.add, '.', 'ass-id', 'directory', None, None)
+ self.assertRaises(errors.BzrError,
+ state.add, '..', 'ass-id', 'directory', None, None)
+
+ def test_set_state_with_rename_b_a_bug_395556(self):
+ # bug 395556 uncovered a bug where the dirstate ends up with a false
+ # relocation record - in a tree with no parents there should be no
+ # absent or relocated records. This then leads to further corruption
+ # when a commit occurs, as the incorrect relocation gathers an
+ # incorrect absent in tree 1, and future changes go to pot.
+ tree1 = self.make_branch_and_tree('tree1')
+ self.build_tree(['tree1/b'])
+ tree1.lock_write()
+ try:
+ tree1.add(['b'], ['b-id'])
+ root_id = tree1.get_root_id()
+ inv = tree1.root_inventory
+ state = dirstate.DirState.initialize('dirstate')
+ try:
+ # Set the initial state with 'b'
+ state.set_state_from_inventory(inv)
+ inv.rename('b-id', root_id, 'a')
+ # Set the new state with 'a', which currently corrupts.
+ state.set_state_from_inventory(inv)
+ expected_result1 = [('', '', root_id, 'd'),
+ ('', 'a', 'b-id', 'f'),
+ ]
+ values = []
+ for entry in state._iter_entries():
+ values.append(entry[0] + entry[1][0][:1])
+ self.assertEqual(expected_result1, values)
+ finally:
+ state.unlock()
+ finally:
+ tree1.unlock()
+
+
+class TestDirStateHashUpdates(TestCaseWithDirState):
+
+ def do_update_entry(self, state, path):
+ entry = state._get_entry(0, path_utf8=path)
+ stat = os.lstat(path)
+ return dirstate.update_entry(state, entry, os.path.abspath(path), stat)
+
+ def _read_state_content(self, state):
+ """Read the content of the dirstate file.
+
+ On Windows when one process locks a file, you can't even open() the
+ file in another process (to read it). So we go directly to
+ state._state_file. This should always be the exact disk representation,
+ so it is reasonable to do so.
+ DirState also always seeks before reading, so it doesn't matter if we
+ bump the file pointer.
+ """
+ state._state_file.seek(0)
+ return state._state_file.read()
+
+ def test_worth_saving_limit_avoids_writing(self):
+ tree = self.make_branch_and_tree('.')
+ self.build_tree(['c', 'd'])
+ tree.lock_write()
+ tree.add(['c', 'd'], ['c-id', 'd-id'])
+ tree.commit('add c and d')
+ state = InstrumentedDirState.on_file(tree.current_dirstate()._filename,
+ worth_saving_limit=2)
+ tree.unlock()
+ state.lock_write()
+ self.addCleanup(state.unlock)
+ state._read_dirblocks_if_needed()
+ state.adjust_time(+20) # Allow things to be cached
+ self.assertEqual(dirstate.DirState.IN_MEMORY_UNMODIFIED,
+ state._dirblock_state)
+ content = self._read_state_content(state)
+ self.do_update_entry(state, 'c')
+ self.assertEqual(1, len(state._known_hash_changes))
+ self.assertEqual(dirstate.DirState.IN_MEMORY_HASH_MODIFIED,
+ state._dirblock_state)
+ state.save()
+ # It should not have set the state to IN_MEMORY_UNMODIFIED because the
+ # hash values haven't been written out.
+ self.assertEqual(dirstate.DirState.IN_MEMORY_HASH_MODIFIED,
+ state._dirblock_state)
+ self.assertEqual(content, self._read_state_content(state))
+ self.assertEqual(dirstate.DirState.IN_MEMORY_HASH_MODIFIED,
+ state._dirblock_state)
+ self.do_update_entry(state, 'd')
+ self.assertEqual(2, len(state._known_hash_changes))
+ state.save()
+ self.assertEqual(dirstate.DirState.IN_MEMORY_UNMODIFIED,
+ state._dirblock_state)
+ self.assertEqual(0, len(state._known_hash_changes))
+
+
+class TestGetLines(TestCaseWithDirState):
+
+ def test_get_line_with_2_rows(self):
+ state = self.create_dirstate_with_root_and_subdir()
+ try:
+ self.assertEqual(['#bazaar dirstate flat format 3\n',
+ 'crc32: 41262208\n',
+ 'num_entries: 2\n',
+ '0\x00\n\x00'
+ '0\x00\n\x00'
+ '\x00\x00a-root-value\x00'
+ 'd\x00\x000\x00n\x00AAAAREUHaIpFB2iKAAADAQAtkqUAAIGk\x00\n\x00'
+ '\x00subdir\x00subdir-id\x00'
+ 'd\x00\x000\x00n\x00AAAAREUHaIpFB2iKAAADAQAtkqUAAIGk\x00\n\x00'
+ ], state.get_lines())
+ finally:
+ state.unlock()
+
+ def test_entry_to_line(self):
+ state = self.create_dirstate_with_root()
+ try:
+ self.assertEqual(
+ '\x00\x00a-root-value\x00d\x00\x000\x00n'
+ '\x00AAAAREUHaIpFB2iKAAADAQAtkqUAAIGk',
+ state._entry_to_line(state._dirblocks[0][1][0]))
+ finally:
+ state.unlock()
+
+ def test_entry_to_line_with_parent(self):
+ packed_stat = 'AAAAREUHaIpFB2iKAAADAQAtkqUAAIGk'
+ root_entry = ('', '', 'a-root-value'), [
+ ('d', '', 0, False, packed_stat), # current tree details
+ # first: a pointer to the current location
+ ('a', 'dirname/basename', 0, False, ''),
+ ]
+ state = dirstate.DirState.initialize('dirstate')
+ try:
+ self.assertEqual(
+ '\x00\x00a-root-value\x00'
+ 'd\x00\x000\x00n\x00AAAAREUHaIpFB2iKAAADAQAtkqUAAIGk\x00'
+ 'a\x00dirname/basename\x000\x00n\x00',
+ state._entry_to_line(root_entry))
+ finally:
+ state.unlock()
+
+ def test_entry_to_line_with_two_parents_at_different_paths(self):
+ # / in the tree, at / in one parent and /dirname/basename in the other.
+ packed_stat = 'AAAAREUHaIpFB2iKAAADAQAtkqUAAIGk'
+ root_entry = ('', '', 'a-root-value'), [
+ ('d', '', 0, False, packed_stat), # current tree details
+ ('d', '', 0, False, 'rev_id'), # first parent details
+ # second: a pointer to the current location
+ ('a', 'dirname/basename', 0, False, ''),
+ ]
+ state = dirstate.DirState.initialize('dirstate')
+ try:
+ self.assertEqual(
+ '\x00\x00a-root-value\x00'
+ 'd\x00\x000\x00n\x00AAAAREUHaIpFB2iKAAADAQAtkqUAAIGk\x00'
+ 'd\x00\x000\x00n\x00rev_id\x00'
+ 'a\x00dirname/basename\x000\x00n\x00',
+ state._entry_to_line(root_entry))
+ finally:
+ state.unlock()
+
+ def test_iter_entries(self):
+ # we should be able to iterate the dirstate entries from end to end
+ # this is for get_lines to be easy to read.
+ packed_stat = 'AAAAREUHaIpFB2iKAAADAQAtkqUAAIGk'
+ dirblocks = []
+ root_entries = [(('', '', 'a-root-value'), [
+ ('d', '', 0, False, packed_stat), # current tree details
+ ])]
+ dirblocks.append(('', root_entries))
+ # add two files in the root
+ subdir_entry = ('', 'subdir', 'subdir-id'), [
+ ('d', '', 0, False, packed_stat), # current tree details
+ ]
+ afile_entry = ('', 'afile', 'afile-id'), [
+ ('f', 'sha1value', 34, False, packed_stat), # current tree details
+ ]
+ dirblocks.append(('', [subdir_entry, afile_entry]))
+ # and one in subdir
+ file_entry2 = ('subdir', '2file', '2file-id'), [
+ ('f', 'sha1value', 23, False, packed_stat), # current tree details
+ ]
+ dirblocks.append(('subdir', [file_entry2]))
+ state = dirstate.DirState.initialize('dirstate')
+ try:
+ state._set_data([], dirblocks)
+ expected_entries = [root_entries[0], subdir_entry, afile_entry,
+ file_entry2]
+ self.assertEqual(expected_entries, list(state._iter_entries()))
+ finally:
+ state.unlock()
+
+
+class TestGetBlockRowIndex(TestCaseWithDirState):
+
+ def assertBlockRowIndexEqual(self, block_index, row_index, dir_present,
+ file_present, state, dirname, basename, tree_index):
+ self.assertEqual((block_index, row_index, dir_present, file_present),
+ state._get_block_entry_index(dirname, basename, tree_index))
+ if dir_present:
+ block = state._dirblocks[block_index]
+ self.assertEqual(dirname, block[0])
+ if dir_present and file_present:
+ row = state._dirblocks[block_index][1][row_index]
+ self.assertEqual(dirname, row[0][0])
+ self.assertEqual(basename, row[0][1])
+
+ def test_simple_structure(self):
+ state = self.create_dirstate_with_root_and_subdir()
+ self.addCleanup(state.unlock)
+ self.assertBlockRowIndexEqual(1, 0, True, True, state, '', 'subdir', 0)
+ self.assertBlockRowIndexEqual(1, 0, True, False, state, '', 'bdir', 0)
+ self.assertBlockRowIndexEqual(1, 1, True, False, state, '', 'zdir', 0)
+ self.assertBlockRowIndexEqual(2, 0, False, False, state, 'a', 'foo', 0)
+ self.assertBlockRowIndexEqual(2, 0, False, False, state,
+ 'subdir', 'foo', 0)
+
+ def test_complex_structure_exists(self):
+ state = self.create_complex_dirstate()
+ self.addCleanup(state.unlock)
+ # Make sure we can find everything that exists
+ self.assertBlockRowIndexEqual(0, 0, True, True, state, '', '', 0)
+ self.assertBlockRowIndexEqual(1, 0, True, True, state, '', 'a', 0)
+ self.assertBlockRowIndexEqual(1, 1, True, True, state, '', 'b', 0)
+ self.assertBlockRowIndexEqual(1, 2, True, True, state, '', 'c', 0)
+ self.assertBlockRowIndexEqual(1, 3, True, True, state, '', 'd', 0)
+ self.assertBlockRowIndexEqual(2, 0, True, True, state, 'a', 'e', 0)
+ self.assertBlockRowIndexEqual(2, 1, True, True, state, 'a', 'f', 0)
+ self.assertBlockRowIndexEqual(3, 0, True, True, state, 'b', 'g', 0)
+ self.assertBlockRowIndexEqual(3, 1, True, True, state,
+ 'b', 'h\xc3\xa5', 0)
+
+ def test_complex_structure_missing(self):
+ state = self.create_complex_dirstate()
+ self.addCleanup(state.unlock)
+ # Make sure things would be inserted in the right locations
+ # '_' comes before 'a'
+ self.assertBlockRowIndexEqual(0, 0, True, True, state, '', '', 0)
+ self.assertBlockRowIndexEqual(1, 0, True, False, state, '', '_', 0)
+ self.assertBlockRowIndexEqual(1, 1, True, False, state, '', 'aa', 0)
+ self.assertBlockRowIndexEqual(1, 4, True, False, state,
+ '', 'h\xc3\xa5', 0)
+ self.assertBlockRowIndexEqual(2, 0, False, False, state, '_', 'a', 0)
+ self.assertBlockRowIndexEqual(3, 0, False, False, state, 'aa', 'a', 0)
+ self.assertBlockRowIndexEqual(4, 0, False, False, state, 'bb', 'a', 0)
+ # This would be inserted between a/ and b/
+ self.assertBlockRowIndexEqual(3, 0, False, False, state, 'a/e', 'a', 0)
+ # Put at the end
+ self.assertBlockRowIndexEqual(4, 0, False, False, state, 'e', 'a', 0)
+
+
+class TestGetEntry(TestCaseWithDirState):
+
+ def assertEntryEqual(self, dirname, basename, file_id, state, path, index):
+ """Check that the right entry is returned for a request to getEntry."""
+ entry = state._get_entry(index, path_utf8=path)
+ if file_id is None:
+ self.assertEqual((None, None), entry)
+ else:
+ cur = entry[0]
+ self.assertEqual((dirname, basename, file_id), cur[:3])
+
+ def test_simple_structure(self):
+ state = self.create_dirstate_with_root_and_subdir()
+ self.addCleanup(state.unlock)
+ self.assertEntryEqual('', '', 'a-root-value', state, '', 0)
+ self.assertEntryEqual('', 'subdir', 'subdir-id', state, 'subdir', 0)
+ self.assertEntryEqual(None, None, None, state, 'missing', 0)
+ self.assertEntryEqual(None, None, None, state, 'missing/foo', 0)
+ self.assertEntryEqual(None, None, None, state, 'subdir/foo', 0)
+
+ def test_complex_structure_exists(self):
+ state = self.create_complex_dirstate()
+ self.addCleanup(state.unlock)
+ self.assertEntryEqual('', '', 'a-root-value', state, '', 0)
+ self.assertEntryEqual('', 'a', 'a-dir', state, 'a', 0)
+ self.assertEntryEqual('', 'b', 'b-dir', state, 'b', 0)
+ self.assertEntryEqual('', 'c', 'c-file', state, 'c', 0)
+ self.assertEntryEqual('', 'd', 'd-file', state, 'd', 0)
+ self.assertEntryEqual('a', 'e', 'e-dir', state, 'a/e', 0)
+ self.assertEntryEqual('a', 'f', 'f-file', state, 'a/f', 0)
+ self.assertEntryEqual('b', 'g', 'g-file', state, 'b/g', 0)
+ self.assertEntryEqual('b', 'h\xc3\xa5', 'h-\xc3\xa5-file', state,
+ 'b/h\xc3\xa5', 0)
+
+ def test_complex_structure_missing(self):
+ state = self.create_complex_dirstate()
+ self.addCleanup(state.unlock)
+ self.assertEntryEqual(None, None, None, state, '_', 0)
+ self.assertEntryEqual(None, None, None, state, '_\xc3\xa5', 0)
+ self.assertEntryEqual(None, None, None, state, 'a/b', 0)
+ self.assertEntryEqual(None, None, None, state, 'c/d', 0)
+
+ def test_get_entry_uninitialized(self):
+ """Calling get_entry will load data if it needs to"""
+ state = self.create_dirstate_with_root()
+ try:
+ state.save()
+ finally:
+ state.unlock()
+ del state
+ state = dirstate.DirState.on_file('dirstate')
+ state.lock_read()
+ try:
+ self.assertEqual(dirstate.DirState.NOT_IN_MEMORY,
+ state._header_state)
+ self.assertEqual(dirstate.DirState.NOT_IN_MEMORY,
+ state._dirblock_state)
+ self.assertEntryEqual('', '', 'a-root-value', state, '', 0)
+ finally:
+ state.unlock()
+
+
+class TestIterChildEntries(TestCaseWithDirState):
+
+ def create_dirstate_with_two_trees(self):
+ """This dirstate contains multiple files and directories.
+
+ / a-root-value
+ a/ a-dir
+ b/ b-dir
+ c c-file
+ d d-file
+ a/e/ e-dir
+ a/f f-file
+ b/g g-file
+ b/h\xc3\xa5 h-\xc3\xa5-file #This is u'\xe5' encoded into utf-8
+
+ Notice that a/e is an empty directory.
+
+ There is one parent tree, which has the same shape with the following variations:
+ b/g in the parent is gone.
+ b/h in the parent has a different id
+ b/i is new in the parent
+ c is renamed to b/j in the parent
+
+ :return: The dirstate, still write-locked.
+ """
+ packed_stat = 'AAAAREUHaIpFB2iKAAADAQAtkqUAAIGk'
+ null_sha = 'xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx'
+ NULL_PARENT_DETAILS = dirstate.DirState.NULL_PARENT_DETAILS
+ root_entry = ('', '', 'a-root-value'), [
+ ('d', '', 0, False, packed_stat),
+ ('d', '', 0, False, 'parent-revid'),
+ ]
+ a_entry = ('', 'a', 'a-dir'), [
+ ('d', '', 0, False, packed_stat),
+ ('d', '', 0, False, 'parent-revid'),
+ ]
+ b_entry = ('', 'b', 'b-dir'), [
+ ('d', '', 0, False, packed_stat),
+ ('d', '', 0, False, 'parent-revid'),
+ ]
+ c_entry = ('', 'c', 'c-file'), [
+ ('f', null_sha, 10, False, packed_stat),
+ ('r', 'b/j', 0, False, ''),
+ ]
+ d_entry = ('', 'd', 'd-file'), [
+ ('f', null_sha, 20, False, packed_stat),
+ ('f', 'd', 20, False, 'parent-revid'),
+ ]
+ e_entry = ('a', 'e', 'e-dir'), [
+ ('d', '', 0, False, packed_stat),
+ ('d', '', 0, False, 'parent-revid'),
+ ]
+ f_entry = ('a', 'f', 'f-file'), [
+ ('f', null_sha, 30, False, packed_stat),
+ ('f', 'f', 20, False, 'parent-revid'),
+ ]
+ g_entry = ('b', 'g', 'g-file'), [
+ ('f', null_sha, 30, False, packed_stat),
+ NULL_PARENT_DETAILS,
+ ]
+ h_entry1 = ('b', 'h\xc3\xa5', 'h-\xc3\xa5-file1'), [
+ ('f', null_sha, 40, False, packed_stat),
+ NULL_PARENT_DETAILS,
+ ]
+ h_entry2 = ('b', 'h\xc3\xa5', 'h-\xc3\xa5-file2'), [
+ NULL_PARENT_DETAILS,
+ ('f', 'h', 20, False, 'parent-revid'),
+ ]
+ i_entry = ('b', 'i', 'i-file'), [
+ NULL_PARENT_DETAILS,
+ ('f', 'h', 20, False, 'parent-revid'),
+ ]
+ j_entry = ('b', 'j', 'c-file'), [
+ ('r', 'c', 0, False, ''),
+ ('f', 'j', 20, False, 'parent-revid'),
+ ]
+ dirblocks = []
+ dirblocks.append(('', [root_entry]))
+ dirblocks.append(('', [a_entry, b_entry, c_entry, d_entry]))
+ dirblocks.append(('a', [e_entry, f_entry]))
+ dirblocks.append(('b', [g_entry, h_entry1, h_entry2, i_entry, j_entry]))
+ state = dirstate.DirState.initialize('dirstate')
+ state._validate()
+ try:
+ state._set_data(['parent'], dirblocks)
+ except:
+ state.unlock()
+ raise
+ return state, dirblocks
+
+ def test_iter_children_b(self):
+ state, dirblocks = self.create_dirstate_with_two_trees()
+ self.addCleanup(state.unlock)
+ expected_result = []
+ expected_result.append(dirblocks[3][1][2]) # h2
+ expected_result.append(dirblocks[3][1][3]) # i
+ expected_result.append(dirblocks[3][1][4]) # j
+ self.assertEqual(expected_result,
+ list(state._iter_child_entries(1, 'b')))
+
+ def test_iter_child_root(self):
+ state, dirblocks = self.create_dirstate_with_two_trees()
+ self.addCleanup(state.unlock)
+ expected_result = []
+ expected_result.append(dirblocks[1][1][0]) # a
+ expected_result.append(dirblocks[1][1][1]) # b
+ expected_result.append(dirblocks[1][1][3]) # d
+ expected_result.append(dirblocks[2][1][0]) # e
+ expected_result.append(dirblocks[2][1][1]) # f
+ expected_result.append(dirblocks[3][1][2]) # h2
+ expected_result.append(dirblocks[3][1][3]) # i
+ expected_result.append(dirblocks[3][1][4]) # j
+ self.assertEqual(expected_result,
+ list(state._iter_child_entries(1, '')))
+
+
+class TestDirstateSortOrder(tests.TestCaseWithTransport):
+ """Test that DirState adds entries in the right order."""
+
+ def test_add_sorting(self):
+ """Add entries in lexicographical order, we get path sorted order.
+
+ This tests it to a depth of 4, to make sure we don't just get it right
+ at a single depth. 'a/a' should come before 'a-a', even though it
+ doesn't lexicographically.
+ """
+ dirs = ['a', 'a/a', 'a/a/a', 'a/a/a/a',
+ 'a-a', 'a/a-a', 'a/a/a-a', 'a/a/a/a-a',
+ ]
+ null_sha = ''
+ state = dirstate.DirState.initialize('dirstate')
+ self.addCleanup(state.unlock)
+
+ fake_stat = os.stat('dirstate')
+ for d in dirs:
+ d_id = d.replace('/', '_')+'-id'
+ file_path = d + '/f'
+ file_id = file_path.replace('/', '_')+'-id'
+ state.add(d, d_id, 'directory', fake_stat, null_sha)
+ state.add(file_path, file_id, 'file', fake_stat, null_sha)
+
+ expected = ['', '', 'a',
+ 'a/a', 'a/a/a', 'a/a/a/a',
+ 'a/a/a/a-a', 'a/a/a-a', 'a/a-a', 'a-a',
+ ]
+ split = lambda p:p.split('/')
+ self.assertEqual(sorted(expected, key=split), expected)
+ dirblock_names = [d[0] for d in state._dirblocks]
+ self.assertEqual(expected, dirblock_names)
+
+ def test_set_parent_trees_correct_order(self):
+ """After calling set_parent_trees() we should maintain the order."""
+ dirs = ['a', 'a-a', 'a/a']
+ null_sha = ''
+ state = dirstate.DirState.initialize('dirstate')
+ self.addCleanup(state.unlock)
+
+ fake_stat = os.stat('dirstate')
+ for d in dirs:
+ d_id = d.replace('/', '_')+'-id'
+ file_path = d + '/f'
+ file_id = file_path.replace('/', '_')+'-id'
+ state.add(d, d_id, 'directory', fake_stat, null_sha)
+ state.add(file_path, file_id, 'file', fake_stat, null_sha)
+
+ expected = ['', '', 'a', 'a/a', 'a-a']
+ dirblock_names = [d[0] for d in state._dirblocks]
+ self.assertEqual(expected, dirblock_names)
+
+ # *really* cheesy way to just get an empty tree
+ repo = self.make_repository('repo')
+ empty_tree = repo.revision_tree(_mod_revision.NULL_REVISION)
+ state.set_parent_trees([('null:', empty_tree)], [])
+
+ dirblock_names = [d[0] for d in state._dirblocks]
+ self.assertEqual(expected, dirblock_names)
+
+
+class InstrumentedDirState(dirstate.DirState):
+ """An DirState with instrumented sha1 functionality."""
+
+ def __init__(self, path, sha1_provider, worth_saving_limit=0):
+ super(InstrumentedDirState, self).__init__(path, sha1_provider,
+ worth_saving_limit=worth_saving_limit)
+ self._time_offset = 0
+ self._log = []
+ # member is dynamically set in DirState.__init__ to turn on trace
+ self._sha1_provider = sha1_provider
+ self._sha1_file = self._sha1_file_and_log
+
+ def _sha_cutoff_time(self):
+ timestamp = super(InstrumentedDirState, self)._sha_cutoff_time()
+ self._cutoff_time = timestamp + self._time_offset
+
+ def _sha1_file_and_log(self, abspath):
+ self._log.append(('sha1', abspath))
+ return self._sha1_provider.sha1(abspath)
+
+ def _read_link(self, abspath, old_link):
+ self._log.append(('read_link', abspath, old_link))
+ return super(InstrumentedDirState, self)._read_link(abspath, old_link)
+
+ def _lstat(self, abspath, entry):
+ self._log.append(('lstat', abspath))
+ return super(InstrumentedDirState, self)._lstat(abspath, entry)
+
+ def _is_executable(self, mode, old_executable):
+ self._log.append(('is_exec', mode, old_executable))
+ return super(InstrumentedDirState, self)._is_executable(mode,
+ old_executable)
+
+ def adjust_time(self, secs):
+ """Move the clock forward or back.
+
+ :param secs: The amount to adjust the clock by. Positive values make it
+ seem as if we are in the future, negative values make it seem like we
+ are in the past.
+ """
+ self._time_offset += secs
+ self._cutoff_time = None
+
+
+class _FakeStat(object):
+ """A class with the same attributes as a real stat result."""
+
+ def __init__(self, size, mtime, ctime, dev, ino, mode):
+ self.st_size = size
+ self.st_mtime = mtime
+ self.st_ctime = ctime
+ self.st_dev = dev
+ self.st_ino = ino
+ self.st_mode = mode
+
+ @staticmethod
+ def from_stat(st):
+ return _FakeStat(st.st_size, st.st_mtime, st.st_ctime, st.st_dev,
+ st.st_ino, st.st_mode)
+
+
+class TestPackStat(tests.TestCaseWithTransport):
+
+ def assertPackStat(self, expected, stat_value):
+ """Check the packed and serialized form of a stat value."""
+ self.assertEqual(expected, dirstate.pack_stat(stat_value))
+
+ def test_pack_stat_int(self):
+ st = _FakeStat(6859L, 1172758614, 1172758617, 777L, 6499538L, 0100644)
+ # Make sure that all parameters have an impact on the packed stat.
+ self.assertPackStat('AAAay0Xm4FZF5uBZAAADCQBjLNIAAIGk', st)
+ st.st_size = 7000L
+ # ay0 => bWE
+ self.assertPackStat('AAAbWEXm4FZF5uBZAAADCQBjLNIAAIGk', st)
+ st.st_mtime = 1172758620
+ # 4FZ => 4Fx
+ self.assertPackStat('AAAbWEXm4FxF5uBZAAADCQBjLNIAAIGk', st)
+ st.st_ctime = 1172758630
+ # uBZ => uBm
+ self.assertPackStat('AAAbWEXm4FxF5uBmAAADCQBjLNIAAIGk', st)
+ st.st_dev = 888L
+ # DCQ => DeA
+ self.assertPackStat('AAAbWEXm4FxF5uBmAAADeABjLNIAAIGk', st)
+ st.st_ino = 6499540L
+ # LNI => LNQ
+ self.assertPackStat('AAAbWEXm4FxF5uBmAAADeABjLNQAAIGk', st)
+ st.st_mode = 0100744
+ # IGk => IHk
+ self.assertPackStat('AAAbWEXm4FxF5uBmAAADeABjLNQAAIHk', st)
+
+ def test_pack_stat_float(self):
+ """On some platforms mtime and ctime are floats.
+
+ Make sure we don't get warnings or errors, and that we ignore changes <
+ 1s
+ """
+ st = _FakeStat(7000L, 1172758614.0, 1172758617.0,
+ 777L, 6499538L, 0100644)
+ # These should all be the same as the integer counterparts
+ self.assertPackStat('AAAbWEXm4FZF5uBZAAADCQBjLNIAAIGk', st)
+ st.st_mtime = 1172758620.0
+ # FZF5 => FxF5
+ self.assertPackStat('AAAbWEXm4FxF5uBZAAADCQBjLNIAAIGk', st)
+ st.st_ctime = 1172758630.0
+ # uBZ => uBm
+ self.assertPackStat('AAAbWEXm4FxF5uBmAAADCQBjLNIAAIGk', st)
+ # fractional seconds are discarded, so no change from above
+ st.st_mtime = 1172758620.453
+ self.assertPackStat('AAAbWEXm4FxF5uBmAAADCQBjLNIAAIGk', st)
+ st.st_ctime = 1172758630.228
+ self.assertPackStat('AAAbWEXm4FxF5uBmAAADCQBjLNIAAIGk', st)
+
+
+class TestBisect(TestCaseWithDirState):
+ """Test the ability to bisect into the disk format."""
+
+ def assertBisect(self, expected_map, map_keys, state, paths):
+ """Assert that bisecting for paths returns the right result.
+
+ :param expected_map: A map from key => entry value
+ :param map_keys: The keys to expect for each path
+ :param state: The DirState object.
+ :param paths: A list of paths, these will automatically be split into
+ (dir, name) tuples, and sorted according to how _bisect
+ requires.
+ """
+ result = state._bisect(paths)
+ # For now, results are just returned in whatever order we read them.
+ # We could sort by (dir, name, file_id) or something like that, but in
+ # the end it would still be fairly arbitrary, and we don't want the
+ # extra overhead if we can avoid it. So sort everything to make sure
+ # equality is true
+ self.assertEqual(len(map_keys), len(paths))
+ expected = {}
+ for path, keys in zip(paths, map_keys):
+ if keys is None:
+ # This should not be present in the output
+ continue
+ expected[path] = sorted(expected_map[k] for k in keys)
+
+ # The returned values are just arranged randomly based on when they
+ # were read, for testing, make sure it is properly sorted.
+ for path in result:
+ result[path].sort()
+
+ self.assertEqual(expected, result)
+
+ def assertBisectDirBlocks(self, expected_map, map_keys, state, paths):
+ """Assert that bisecting for dirbblocks returns the right result.
+
+ :param expected_map: A map from key => expected values
+ :param map_keys: A nested list of paths we expect to be returned.
+ Something like [['a', 'b', 'f'], ['b/c', 'b/d']]
+ :param state: The DirState object.
+ :param paths: A list of directories
+ """
+ result = state._bisect_dirblocks(paths)
+ self.assertEqual(len(map_keys), len(paths))
+ expected = {}
+ for path, keys in zip(paths, map_keys):
+ if keys is None:
+ # This should not be present in the output
+ continue
+ expected[path] = sorted(expected_map[k] for k in keys)
+ for path in result:
+ result[path].sort()
+
+ self.assertEqual(expected, result)
+
+ def assertBisectRecursive(self, expected_map, map_keys, state, paths):
+ """Assert the return value of a recursive bisection.
+
+ :param expected_map: A map from key => entry value
+ :param map_keys: A list of paths we expect to be returned.
+ Something like ['a', 'b', 'f', 'b/d', 'b/d2']
+ :param state: The DirState object.
+ :param paths: A list of files and directories. It will be broken up
+ into (dir, name) pairs and sorted before calling _bisect_recursive.
+ """
+ expected = {}
+ for key in map_keys:
+ entry = expected_map[key]
+ dir_name_id, trees_info = entry
+ expected[dir_name_id] = trees_info
+
+ result = state._bisect_recursive(paths)
+
+ self.assertEqual(expected, result)
+
+ def test_bisect_each(self):
+ """Find a single record using bisect."""
+ tree, state, expected = self.create_basic_dirstate()
+
+ # Bisect should return the rows for the specified files.
+ self.assertBisect(expected, [['']], state, [''])
+ self.assertBisect(expected, [['a']], state, ['a'])
+ self.assertBisect(expected, [['b']], state, ['b'])
+ self.assertBisect(expected, [['b/c']], state, ['b/c'])
+ self.assertBisect(expected, [['b/d']], state, ['b/d'])
+ self.assertBisect(expected, [['b/d/e']], state, ['b/d/e'])
+ self.assertBisect(expected, [['b-c']], state, ['b-c'])
+ self.assertBisect(expected, [['f']], state, ['f'])
+
+ def test_bisect_multi(self):
+ """Bisect can be used to find multiple records at the same time."""
+ tree, state, expected = self.create_basic_dirstate()
+ # Bisect should be capable of finding multiple entries at the same time
+ self.assertBisect(expected, [['a'], ['b'], ['f']],
+ state, ['a', 'b', 'f'])
+ self.assertBisect(expected, [['f'], ['b/d'], ['b/d/e']],
+ state, ['f', 'b/d', 'b/d/e'])
+ self.assertBisect(expected, [['b'], ['b-c'], ['b/c']],
+ state, ['b', 'b-c', 'b/c'])
+
+ def test_bisect_one_page(self):
+ """Test bisect when there is only 1 page to read"""
+ tree, state, expected = self.create_basic_dirstate()
+ state._bisect_page_size = 5000
+ self.assertBisect(expected,[['']], state, [''])
+ self.assertBisect(expected,[['a']], state, ['a'])
+ self.assertBisect(expected,[['b']], state, ['b'])
+ self.assertBisect(expected,[['b/c']], state, ['b/c'])
+ self.assertBisect(expected,[['b/d']], state, ['b/d'])
+ self.assertBisect(expected,[['b/d/e']], state, ['b/d/e'])
+ self.assertBisect(expected,[['b-c']], state, ['b-c'])
+ self.assertBisect(expected,[['f']], state, ['f'])
+ self.assertBisect(expected,[['a'], ['b'], ['f']],
+ state, ['a', 'b', 'f'])
+ self.assertBisect(expected, [['b/d'], ['b/d/e'], ['f']],
+ state, ['b/d', 'b/d/e', 'f'])
+ self.assertBisect(expected, [['b'], ['b/c'], ['b-c']],
+ state, ['b', 'b/c', 'b-c'])
+
+ def test_bisect_duplicate_paths(self):
+ """When bisecting for a path, handle multiple entries."""
+ tree, state, expected = self.create_duplicated_dirstate()
+
+ # Now make sure that both records are properly returned.
+ self.assertBisect(expected, [['']], state, [''])
+ self.assertBisect(expected, [['a', 'a2']], state, ['a'])
+ self.assertBisect(expected, [['b', 'b2']], state, ['b'])
+ self.assertBisect(expected, [['b/c', 'b/c2']], state, ['b/c'])
+ self.assertBisect(expected, [['b/d', 'b/d2']], state, ['b/d'])
+ self.assertBisect(expected, [['b/d/e', 'b/d/e2']],
+ state, ['b/d/e'])
+ self.assertBisect(expected, [['b-c', 'b-c2']], state, ['b-c'])
+ self.assertBisect(expected, [['f', 'f2']], state, ['f'])
+
+ def test_bisect_page_size_too_small(self):
+ """If the page size is too small, we will auto increase it."""
+ tree, state, expected = self.create_basic_dirstate()
+ state._bisect_page_size = 50
+ self.assertBisect(expected, [None], state, ['b/e'])
+ self.assertBisect(expected, [['a']], state, ['a'])
+ self.assertBisect(expected, [['b']], state, ['b'])
+ self.assertBisect(expected, [['b/c']], state, ['b/c'])
+ self.assertBisect(expected, [['b/d']], state, ['b/d'])
+ self.assertBisect(expected, [['b/d/e']], state, ['b/d/e'])
+ self.assertBisect(expected, [['b-c']], state, ['b-c'])
+ self.assertBisect(expected, [['f']], state, ['f'])
+
+ def test_bisect_missing(self):
+ """Test that bisect return None if it cannot find a path."""
+ tree, state, expected = self.create_basic_dirstate()
+ self.assertBisect(expected, [None], state, ['foo'])
+ self.assertBisect(expected, [None], state, ['b/foo'])
+ self.assertBisect(expected, [None], state, ['bar/foo'])
+ self.assertBisect(expected, [None], state, ['b-c/foo'])
+
+ self.assertBisect(expected, [['a'], None, ['b/d']],
+ state, ['a', 'foo', 'b/d'])
+
+ def test_bisect_rename(self):
+ """Check that we find a renamed row."""
+ tree, state, expected = self.create_renamed_dirstate()
+
+ # Search for the pre and post renamed entries
+ self.assertBisect(expected, [['a']], state, ['a'])
+ self.assertBisect(expected, [['b/g']], state, ['b/g'])
+ self.assertBisect(expected, [['b/d']], state, ['b/d'])
+ self.assertBisect(expected, [['h']], state, ['h'])
+
+ # What about b/d/e? shouldn't that also get 2 directory entries?
+ self.assertBisect(expected, [['b/d/e']], state, ['b/d/e'])
+ self.assertBisect(expected, [['h/e']], state, ['h/e'])
+
+ def test_bisect_dirblocks(self):
+ tree, state, expected = self.create_duplicated_dirstate()
+ self.assertBisectDirBlocks(expected,
+ [['', 'a', 'a2', 'b', 'b2', 'b-c', 'b-c2', 'f', 'f2']],
+ state, [''])
+ self.assertBisectDirBlocks(expected,
+ [['b/c', 'b/c2', 'b/d', 'b/d2']], state, ['b'])
+ self.assertBisectDirBlocks(expected,
+ [['b/d/e', 'b/d/e2']], state, ['b/d'])
+ self.assertBisectDirBlocks(expected,
+ [['', 'a', 'a2', 'b', 'b2', 'b-c', 'b-c2', 'f', 'f2'],
+ ['b/c', 'b/c2', 'b/d', 'b/d2'],
+ ['b/d/e', 'b/d/e2'],
+ ], state, ['', 'b', 'b/d'])
+
+ def test_bisect_dirblocks_missing(self):
+ tree, state, expected = self.create_basic_dirstate()
+ self.assertBisectDirBlocks(expected, [['b/d/e'], None],
+ state, ['b/d', 'b/e'])
+ # Files don't show up in this search
+ self.assertBisectDirBlocks(expected, [None], state, ['a'])
+ self.assertBisectDirBlocks(expected, [None], state, ['b/c'])
+ self.assertBisectDirBlocks(expected, [None], state, ['c'])
+ self.assertBisectDirBlocks(expected, [None], state, ['b/d/e'])
+ self.assertBisectDirBlocks(expected, [None], state, ['f'])
+
+ def test_bisect_recursive_each(self):
+ tree, state, expected = self.create_basic_dirstate()
+ self.assertBisectRecursive(expected, ['a'], state, ['a'])
+ self.assertBisectRecursive(expected, ['b/c'], state, ['b/c'])
+ self.assertBisectRecursive(expected, ['b/d/e'], state, ['b/d/e'])
+ self.assertBisectRecursive(expected, ['b-c'], state, ['b-c'])
+ self.assertBisectRecursive(expected, ['b/d', 'b/d/e'],
+ state, ['b/d'])
+ self.assertBisectRecursive(expected, ['b', 'b/c', 'b/d', 'b/d/e'],
+ state, ['b'])
+ self.assertBisectRecursive(expected, ['', 'a', 'b', 'b-c', 'f', 'b/c',
+ 'b/d', 'b/d/e'],
+ state, [''])
+
+ def test_bisect_recursive_multiple(self):
+ tree, state, expected = self.create_basic_dirstate()
+ self.assertBisectRecursive(expected, ['a', 'b/c'], state, ['a', 'b/c'])
+ self.assertBisectRecursive(expected, ['b/d', 'b/d/e'],
+ state, ['b/d', 'b/d/e'])
+
+ def test_bisect_recursive_missing(self):
+ tree, state, expected = self.create_basic_dirstate()
+ self.assertBisectRecursive(expected, [], state, ['d'])
+ self.assertBisectRecursive(expected, [], state, ['b/e'])
+ self.assertBisectRecursive(expected, [], state, ['g'])
+ self.assertBisectRecursive(expected, ['a'], state, ['a', 'g'])
+
+ def test_bisect_recursive_renamed(self):
+ tree, state, expected = self.create_renamed_dirstate()
+
+ # Looking for either renamed item should find the other
+ self.assertBisectRecursive(expected, ['a', 'b/g'], state, ['a'])
+ self.assertBisectRecursive(expected, ['a', 'b/g'], state, ['b/g'])
+ # Looking in the containing directory should find the rename target,
+ # and anything in a subdir of the renamed target.
+ self.assertBisectRecursive(expected, ['a', 'b', 'b/c', 'b/d',
+ 'b/d/e', 'b/g', 'h', 'h/e'],
+ state, ['b'])
+
+
+class TestDirstateValidation(TestCaseWithDirState):
+
+ def test_validate_correct_dirstate(self):
+ state = self.create_complex_dirstate()
+ state._validate()
+ state.unlock()
+ # and make sure we can also validate with a read lock
+ state.lock_read()
+ try:
+ state._validate()
+ finally:
+ state.unlock()
+
+ def test_dirblock_not_sorted(self):
+ tree, state, expected = self.create_renamed_dirstate()
+ state._read_dirblocks_if_needed()
+ last_dirblock = state._dirblocks[-1]
+ # we're appending to the dirblock, but this name comes before some of
+ # the existing names; that's wrong
+ last_dirblock[1].append(
+ (('h', 'aaaa', 'a-id'),
+ [('a', '', 0, False, ''),
+ ('a', '', 0, False, '')]))
+ e = self.assertRaises(AssertionError,
+ state._validate)
+ self.assertContainsRe(str(e), 'not sorted')
+
+ def test_dirblock_name_mismatch(self):
+ tree, state, expected = self.create_renamed_dirstate()
+ state._read_dirblocks_if_needed()
+ last_dirblock = state._dirblocks[-1]
+ # add an entry with the wrong directory name
+ last_dirblock[1].append(
+ (('', 'z', 'a-id'),
+ [('a', '', 0, False, ''),
+ ('a', '', 0, False, '')]))
+ e = self.assertRaises(AssertionError,
+ state._validate)
+ self.assertContainsRe(str(e),
+ "doesn't match directory name")
+
+ def test_dirblock_missing_rename(self):
+ tree, state, expected = self.create_renamed_dirstate()
+ state._read_dirblocks_if_needed()
+ last_dirblock = state._dirblocks[-1]
+ # make another entry for a-id, without a correct 'r' pointer to
+ # the real occurrence in the working tree
+ last_dirblock[1].append(
+ (('h', 'z', 'a-id'),
+ [('a', '', 0, False, ''),
+ ('a', '', 0, False, '')]))
+ e = self.assertRaises(AssertionError,
+ state._validate)
+ self.assertContainsRe(str(e),
+ 'file a-id is absent in row')
+
+
+class TestDirstateTreeReference(TestCaseWithDirState):
+
+ def test_reference_revision_is_none(self):
+ tree = self.make_branch_and_tree('tree', format='development-subtree')
+ subtree = self.make_branch_and_tree('tree/subtree',
+ format='development-subtree')
+ subtree.set_root_id('subtree')
+ tree.add_reference(subtree)
+ tree.add('subtree')
+ state = dirstate.DirState.from_tree(tree, 'dirstate')
+ key = ('', 'subtree', 'subtree')
+ expected = ('', [(key,
+ [('t', '', 0, False, 'xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx')])])
+
+ try:
+ self.assertEqual(expected, state._find_block(key))
+ finally:
+ state.unlock()
+
+
+class TestDiscardMergeParents(TestCaseWithDirState):
+
+ def test_discard_no_parents(self):
+ # This should be a no-op
+ state = self.create_empty_dirstate()
+ self.addCleanup(state.unlock)
+ state._discard_merge_parents()
+ state._validate()
+
+ def test_discard_one_parent(self):
+ # No-op
+ packed_stat = 'AAAAREUHaIpFB2iKAAADAQAtkqUAAIGk'
+ root_entry_direntry = ('', '', 'a-root-value'), [
+ ('d', '', 0, False, packed_stat),
+ ('d', '', 0, False, packed_stat),
+ ]
+ dirblocks = []
+ dirblocks.append(('', [root_entry_direntry]))
+ dirblocks.append(('', []))
+
+ state = self.create_empty_dirstate()
+ self.addCleanup(state.unlock)
+ state._set_data(['parent-id'], dirblocks[:])
+ state._validate()
+
+ state._discard_merge_parents()
+ state._validate()
+ self.assertEqual(dirblocks, state._dirblocks)
+
+ def test_discard_simple(self):
+ # No-op
+ packed_stat = 'AAAAREUHaIpFB2iKAAADAQAtkqUAAIGk'
+ root_entry_direntry = ('', '', 'a-root-value'), [
+ ('d', '', 0, False, packed_stat),
+ ('d', '', 0, False, packed_stat),
+ ('d', '', 0, False, packed_stat),
+ ]
+ expected_root_entry_direntry = ('', '', 'a-root-value'), [
+ ('d', '', 0, False, packed_stat),
+ ('d', '', 0, False, packed_stat),
+ ]
+ dirblocks = []
+ dirblocks.append(('', [root_entry_direntry]))
+ dirblocks.append(('', []))
+
+ state = self.create_empty_dirstate()
+ self.addCleanup(state.unlock)
+ state._set_data(['parent-id', 'merged-id'], dirblocks[:])
+ state._validate()
+
+ # This should strip of the extra column
+ state._discard_merge_parents()
+ state._validate()
+ expected_dirblocks = [('', [expected_root_entry_direntry]), ('', [])]
+ self.assertEqual(expected_dirblocks, state._dirblocks)
+
+ def test_discard_absent(self):
+ """If entries are only in a merge, discard should remove the entries"""
+ null_stat = dirstate.DirState.NULLSTAT
+ present_dir = ('d', '', 0, False, null_stat)
+ present_file = ('f', '', 0, False, null_stat)
+ absent = dirstate.DirState.NULL_PARENT_DETAILS
+ root_key = ('', '', 'a-root-value')
+ file_in_root_key = ('', 'file-in-root', 'a-file-id')
+ file_in_merged_key = ('', 'file-in-merged', 'b-file-id')
+ dirblocks = [('', [(root_key, [present_dir, present_dir, present_dir])]),
+ ('', [(file_in_merged_key,
+ [absent, absent, present_file]),
+ (file_in_root_key,
+ [present_file, present_file, present_file]),
+ ]),
+ ]
+
+ state = self.create_empty_dirstate()
+ self.addCleanup(state.unlock)
+ state._set_data(['parent-id', 'merged-id'], dirblocks[:])
+ state._validate()
+
+ exp_dirblocks = [('', [(root_key, [present_dir, present_dir])]),
+ ('', [(file_in_root_key,
+ [present_file, present_file]),
+ ]),
+ ]
+ state._discard_merge_parents()
+ state._validate()
+ self.assertEqual(exp_dirblocks, state._dirblocks)
+
+ def test_discard_renamed(self):
+ null_stat = dirstate.DirState.NULLSTAT
+ present_dir = ('d', '', 0, False, null_stat)
+ present_file = ('f', '', 0, False, null_stat)
+ absent = dirstate.DirState.NULL_PARENT_DETAILS
+ root_key = ('', '', 'a-root-value')
+ file_in_root_key = ('', 'file-in-root', 'a-file-id')
+ # Renamed relative to parent
+ file_rename_s_key = ('', 'file-s', 'b-file-id')
+ file_rename_t_key = ('', 'file-t', 'b-file-id')
+ # And one that is renamed between the parents, but absent in this
+ key_in_1 = ('', 'file-in-1', 'c-file-id')
+ key_in_2 = ('', 'file-in-2', 'c-file-id')
+
+ dirblocks = [
+ ('', [(root_key, [present_dir, present_dir, present_dir])]),
+ ('', [(key_in_1,
+ [absent, present_file, ('r', 'file-in-2', 'c-file-id')]),
+ (key_in_2,
+ [absent, ('r', 'file-in-1', 'c-file-id'), present_file]),
+ (file_in_root_key,
+ [present_file, present_file, present_file]),
+ (file_rename_s_key,
+ [('r', 'file-t', 'b-file-id'), absent, present_file]),
+ (file_rename_t_key,
+ [present_file, absent, ('r', 'file-s', 'b-file-id')]),
+ ]),
+ ]
+ exp_dirblocks = [
+ ('', [(root_key, [present_dir, present_dir])]),
+ ('', [(key_in_1, [absent, present_file]),
+ (file_in_root_key, [present_file, present_file]),
+ (file_rename_t_key, [present_file, absent]),
+ ]),
+ ]
+ state = self.create_empty_dirstate()
+ self.addCleanup(state.unlock)
+ state._set_data(['parent-id', 'merged-id'], dirblocks[:])
+ state._validate()
+
+ state._discard_merge_parents()
+ state._validate()
+ self.assertEqual(exp_dirblocks, state._dirblocks)
+
+ def test_discard_all_subdir(self):
+ null_stat = dirstate.DirState.NULLSTAT
+ present_dir = ('d', '', 0, False, null_stat)
+ present_file = ('f', '', 0, False, null_stat)
+ absent = dirstate.DirState.NULL_PARENT_DETAILS
+ root_key = ('', '', 'a-root-value')
+ subdir_key = ('', 'sub', 'dir-id')
+ child1_key = ('sub', 'child1', 'child1-id')
+ child2_key = ('sub', 'child2', 'child2-id')
+ child3_key = ('sub', 'child3', 'child3-id')
+
+ dirblocks = [
+ ('', [(root_key, [present_dir, present_dir, present_dir])]),
+ ('', [(subdir_key, [present_dir, present_dir, present_dir])]),
+ ('sub', [(child1_key, [absent, absent, present_file]),
+ (child2_key, [absent, absent, present_file]),
+ (child3_key, [absent, absent, present_file]),
+ ]),
+ ]
+ exp_dirblocks = [
+ ('', [(root_key, [present_dir, present_dir])]),
+ ('', [(subdir_key, [present_dir, present_dir])]),
+ ('sub', []),
+ ]
+ state = self.create_empty_dirstate()
+ self.addCleanup(state.unlock)
+ state._set_data(['parent-id', 'merged-id'], dirblocks[:])
+ state._validate()
+
+ state._discard_merge_parents()
+ state._validate()
+ self.assertEqual(exp_dirblocks, state._dirblocks)
+
+
+class Test_InvEntryToDetails(tests.TestCase):
+
+ def assertDetails(self, expected, inv_entry):
+ details = dirstate.DirState._inv_entry_to_details(inv_entry)
+ self.assertEqual(expected, details)
+ # details should always allow join() and always be a plain str when
+ # finished
+ (minikind, fingerprint, size, executable, tree_data) = details
+ self.assertIsInstance(minikind, str)
+ self.assertIsInstance(fingerprint, str)
+ self.assertIsInstance(tree_data, str)
+
+ def test_unicode_symlink(self):
+ inv_entry = inventory.InventoryLink('link-file-id',
+ u'nam\N{Euro Sign}e',
+ 'link-parent-id')
+ inv_entry.revision = 'link-revision-id'
+ target = u'link-targ\N{Euro Sign}t'
+ inv_entry.symlink_target = target
+ self.assertDetails(('l', target.encode('UTF-8'), 0, False,
+ 'link-revision-id'), inv_entry)
+
+
+class TestSHA1Provider(tests.TestCaseInTempDir):
+
+ def test_sha1provider_is_an_interface(self):
+ p = dirstate.SHA1Provider()
+ self.assertRaises(NotImplementedError, p.sha1, "foo")
+ self.assertRaises(NotImplementedError, p.stat_and_sha1, "foo")
+
+ def test_defaultsha1provider_sha1(self):
+ text = 'test\r\nwith\nall\rpossible line endings\r\n'
+ self.build_tree_contents([('foo', text)])
+ expected_sha = osutils.sha_string(text)
+ p = dirstate.DefaultSHA1Provider()
+ self.assertEqual(expected_sha, p.sha1('foo'))
+
+ def test_defaultsha1provider_stat_and_sha1(self):
+ text = 'test\r\nwith\nall\rpossible line endings\r\n'
+ self.build_tree_contents([('foo', text)])
+ expected_sha = osutils.sha_string(text)
+ p = dirstate.DefaultSHA1Provider()
+ statvalue, sha1 = p.stat_and_sha1('foo')
+ self.assertTrue(len(statvalue) >= 10)
+ self.assertEqual(len(text), statvalue.st_size)
+ self.assertEqual(expected_sha, sha1)
+
+
+class _Repo(object):
+ """A minimal api to get InventoryRevisionTree to work."""
+
+ def __init__(self):
+ default_format = controldir.format_registry.make_bzrdir('default')
+ self._format = default_format.repository_format
+
+ def lock_read(self):
+ pass
+
+ def unlock(self):
+ pass
+
+
+class TestUpdateBasisByDelta(tests.TestCase):
+
+ def path_to_ie(self, path, file_id, rev_id, dir_ids):
+ if path.endswith('/'):
+ is_dir = True
+ path = path[:-1]
+ else:
+ is_dir = False
+ dirname, basename = osutils.split(path)
+ try:
+ dir_id = dir_ids[dirname]
+ except KeyError:
+ dir_id = osutils.basename(dirname) + '-id'
+ if is_dir:
+ ie = inventory.InventoryDirectory(file_id, basename, dir_id)
+ dir_ids[path] = file_id
+ else:
+ ie = inventory.InventoryFile(file_id, basename, dir_id)
+ ie.text_size = 0
+ ie.text_sha1 = ''
+ ie.revision = rev_id
+ return ie
+
+ def create_tree_from_shape(self, rev_id, shape):
+ dir_ids = {'': 'root-id'}
+ inv = inventory.Inventory('root-id', rev_id)
+ for path, file_id in shape:
+ if path == '':
+ # Replace the root entry
+ del inv._byid[inv.root.file_id]
+ inv.root.file_id = file_id
+ inv._byid[file_id] = inv.root
+ dir_ids[''] = file_id
+ continue
+ inv.add(self.path_to_ie(path, file_id, rev_id, dir_ids))
+ return revisiontree.InventoryRevisionTree(_Repo(), inv, rev_id)
+
+ def create_empty_dirstate(self):
+ fd, path = tempfile.mkstemp(prefix='bzr-dirstate')
+ self.addCleanup(os.remove, path)
+ os.close(fd)
+ state = dirstate.DirState.initialize(path)
+ self.addCleanup(state.unlock)
+ return state
+
+ def create_inv_delta(self, delta, rev_id):
+ """Translate a 'delta shape' into an actual InventoryDelta"""
+ dir_ids = {'': 'root-id'}
+ inv_delta = []
+ for old_path, new_path, file_id in delta:
+ if old_path is not None and old_path.endswith('/'):
+ # Don't have to actually do anything for this, because only
+ # new_path creates InventoryEntries
+ old_path = old_path[:-1]
+ if new_path is None: # Delete
+ inv_delta.append((old_path, None, file_id, None))
+ continue
+ ie = self.path_to_ie(new_path, file_id, rev_id, dir_ids)
+ inv_delta.append((old_path, new_path, file_id, ie))
+ return inv_delta
+
+ def assertUpdate(self, active, basis, target):
+ """Assert that update_basis_by_delta works how we want.
+
+ Set up a DirState object with active_shape for tree 0, basis_shape for
+ tree 1. Then apply the delta from basis_shape to target_shape,
+ and assert that the DirState is still valid, and that its stored
+ content matches the target_shape.
+ """
+ active_tree = self.create_tree_from_shape('active', active)
+ basis_tree = self.create_tree_from_shape('basis', basis)
+ target_tree = self.create_tree_from_shape('target', target)
+ state = self.create_empty_dirstate()
+ state.set_state_from_scratch(active_tree.root_inventory,
+ [('basis', basis_tree)], [])
+ delta = target_tree.root_inventory._make_delta(
+ basis_tree.root_inventory)
+ state.update_basis_by_delta(delta, 'target')
+ state._validate()
+ dirstate_tree = workingtree_4.DirStateRevisionTree(state,
+ 'target', _Repo())
+ # The target now that delta has been applied should match the
+ # RevisionTree
+ self.assertEqual([], list(dirstate_tree.iter_changes(target_tree)))
+ # And the dirblock state should be identical to the state if we created
+ # it from scratch.
+ state2 = self.create_empty_dirstate()
+ state2.set_state_from_scratch(active_tree.root_inventory,
+ [('target', target_tree)], [])
+ self.assertEqual(state2._dirblocks, state._dirblocks)
+ return state
+
+ def assertBadDelta(self, active, basis, delta):
+ """Test that we raise InconsistentDelta when appropriate.
+
+ :param active: The active tree shape
+ :param basis: The basis tree shape
+ :param delta: A description of the delta to apply. Similar to the form
+ for regular inventory deltas, but omitting the InventoryEntry.
+ So adding a file is: (None, 'path', 'file-id')
+ Adding a directory is: (None, 'path/', 'dir-id')
+ Renaming a dir is: ('old/', 'new/', 'dir-id')
+ etc.
+ """
+ active_tree = self.create_tree_from_shape('active', active)
+ basis_tree = self.create_tree_from_shape('basis', basis)
+ inv_delta = self.create_inv_delta(delta, 'target')
+ state = self.create_empty_dirstate()
+ state.set_state_from_scratch(active_tree.root_inventory,
+ [('basis', basis_tree)], [])
+ self.assertRaises(errors.InconsistentDelta,
+ state.update_basis_by_delta, inv_delta, 'target')
+ ## try:
+ ## state.update_basis_by_delta(inv_delta, 'target')
+ ## except errors.InconsistentDelta, e:
+ ## import pdb; pdb.set_trace()
+ ## else:
+ ## import pdb; pdb.set_trace()
+ self.assertTrue(state._changes_aborted)
+
+ def test_remove_file_matching_active_state(self):
+ state = self.assertUpdate(
+ active=[],
+ basis =[('file', 'file-id')],
+ target=[],
+ )
+
+ def test_remove_file_present_in_active_state(self):
+ state = self.assertUpdate(
+ active=[('file', 'file-id')],
+ basis =[('file', 'file-id')],
+ target=[],
+ )
+
+ def test_remove_file_present_elsewhere_in_active_state(self):
+ state = self.assertUpdate(
+ active=[('other-file', 'file-id')],
+ basis =[('file', 'file-id')],
+ target=[],
+ )
+
+ def test_remove_file_active_state_has_diff_file(self):
+ state = self.assertUpdate(
+ active=[('file', 'file-id-2')],
+ basis =[('file', 'file-id')],
+ target=[],
+ )
+
+ def test_remove_file_active_state_has_diff_file_and_file_elsewhere(self):
+ state = self.assertUpdate(
+ active=[('file', 'file-id-2'),
+ ('other-file', 'file-id')],
+ basis =[('file', 'file-id')],
+ target=[],
+ )
+
+ def test_add_file_matching_active_state(self):
+ state = self.assertUpdate(
+ active=[('file', 'file-id')],
+ basis =[],
+ target=[('file', 'file-id')],
+ )
+
+ def test_add_file_missing_in_active_state(self):
+ state = self.assertUpdate(
+ active=[],
+ basis =[],
+ target=[('file', 'file-id')],
+ )
+
+ def test_add_file_elsewhere_in_active_state(self):
+ state = self.assertUpdate(
+ active=[('other-file', 'file-id')],
+ basis =[],
+ target=[('file', 'file-id')],
+ )
+
+ def test_add_file_active_state_has_diff_file_and_file_elsewhere(self):
+ state = self.assertUpdate(
+ active=[('other-file', 'file-id'),
+ ('file', 'file-id-2')],
+ basis =[],
+ target=[('file', 'file-id')],
+ )
+
+ def test_rename_file_matching_active_state(self):
+ state = self.assertUpdate(
+ active=[('other-file', 'file-id')],
+ basis =[('file', 'file-id')],
+ target=[('other-file', 'file-id')],
+ )
+
+ def test_rename_file_missing_in_active_state(self):
+ state = self.assertUpdate(
+ active=[],
+ basis =[('file', 'file-id')],
+ target=[('other-file', 'file-id')],
+ )
+
+ def test_rename_file_present_elsewhere_in_active_state(self):
+ state = self.assertUpdate(
+ active=[('third', 'file-id')],
+ basis =[('file', 'file-id')],
+ target=[('other-file', 'file-id')],
+ )
+
+ def test_rename_file_active_state_has_diff_source_file(self):
+ state = self.assertUpdate(
+ active=[('file', 'file-id-2')],
+ basis =[('file', 'file-id')],
+ target=[('other-file', 'file-id')],
+ )
+
+ def test_rename_file_active_state_has_diff_target_file(self):
+ state = self.assertUpdate(
+ active=[('other-file', 'file-id-2')],
+ basis =[('file', 'file-id')],
+ target=[('other-file', 'file-id')],
+ )
+
+ def test_rename_file_active_has_swapped_files(self):
+ state = self.assertUpdate(
+ active=[('file', 'file-id'),
+ ('other-file', 'file-id-2')],
+ basis= [('file', 'file-id'),
+ ('other-file', 'file-id-2')],
+ target=[('file', 'file-id-2'),
+ ('other-file', 'file-id')])
+
+ def test_rename_file_basis_has_swapped_files(self):
+ state = self.assertUpdate(
+ active=[('file', 'file-id'),
+ ('other-file', 'file-id-2')],
+ basis= [('file', 'file-id-2'),
+ ('other-file', 'file-id')],
+ target=[('file', 'file-id'),
+ ('other-file', 'file-id-2')])
+
+ def test_rename_directory_with_contents(self):
+ state = self.assertUpdate( # active matches basis
+ active=[('dir1/', 'dir-id'),
+ ('dir1/file', 'file-id')],
+ basis= [('dir1/', 'dir-id'),
+ ('dir1/file', 'file-id')],
+ target=[('dir2/', 'dir-id'),
+ ('dir2/file', 'file-id')])
+ state = self.assertUpdate( # active matches target
+ active=[('dir2/', 'dir-id'),
+ ('dir2/file', 'file-id')],
+ basis= [('dir1/', 'dir-id'),
+ ('dir1/file', 'file-id')],
+ target=[('dir2/', 'dir-id'),
+ ('dir2/file', 'file-id')])
+ state = self.assertUpdate( # active empty
+ active=[],
+ basis= [('dir1/', 'dir-id'),
+ ('dir1/file', 'file-id')],
+ target=[('dir2/', 'dir-id'),
+ ('dir2/file', 'file-id')])
+ state = self.assertUpdate( # active present at other location
+ active=[('dir3/', 'dir-id'),
+ ('dir3/file', 'file-id')],
+ basis= [('dir1/', 'dir-id'),
+ ('dir1/file', 'file-id')],
+ target=[('dir2/', 'dir-id'),
+ ('dir2/file', 'file-id')])
+ state = self.assertUpdate( # active has different ids
+ active=[('dir1/', 'dir1-id'),
+ ('dir1/file', 'file1-id'),
+ ('dir2/', 'dir2-id'),
+ ('dir2/file', 'file2-id')],
+ basis= [('dir1/', 'dir-id'),
+ ('dir1/file', 'file-id')],
+ target=[('dir2/', 'dir-id'),
+ ('dir2/file', 'file-id')])
+
+ def test_invalid_file_not_present(self):
+ state = self.assertBadDelta(
+ active=[('file', 'file-id')],
+ basis= [('file', 'file-id')],
+ delta=[('other-file', 'file', 'file-id')])
+
+ def test_invalid_new_id_same_path(self):
+ # The bad entry comes after
+ state = self.assertBadDelta(
+ active=[('file', 'file-id')],
+ basis= [('file', 'file-id')],
+ delta=[(None, 'file', 'file-id-2')])
+ # The bad entry comes first
+ state = self.assertBadDelta(
+ active=[('file', 'file-id-2')],
+ basis=[('file', 'file-id-2')],
+ delta=[(None, 'file', 'file-id')])
+
+ def test_invalid_existing_id(self):
+ state = self.assertBadDelta(
+ active=[('file', 'file-id')],
+ basis= [('file', 'file-id')],
+ delta=[(None, 'file', 'file-id')])
+
+ def test_invalid_parent_missing(self):
+ state = self.assertBadDelta(
+ active=[],
+ basis= [],
+ delta=[(None, 'path/path2', 'file-id')])
+ # Note: we force the active tree to have the directory, by knowing how
+ # path_to_ie handles entries with missing parents
+ state = self.assertBadDelta(
+ active=[('path/', 'path-id')],
+ basis= [],
+ delta=[(None, 'path/path2', 'file-id')])
+ state = self.assertBadDelta(
+ active=[('path/', 'path-id'),
+ ('path/path2', 'file-id')],
+ basis= [],
+ delta=[(None, 'path/path2', 'file-id')])
+
+ def test_renamed_dir_same_path(self):
+ # We replace the parent directory, with another parent dir. But the C
+ # file doesn't look like it has been moved.
+ state = self.assertUpdate(# Same as basis
+ active=[('dir/', 'A-id'),
+ ('dir/B', 'B-id')],
+ basis= [('dir/', 'A-id'),
+ ('dir/B', 'B-id')],
+ target=[('dir/', 'C-id'),
+ ('dir/B', 'B-id')])
+ state = self.assertUpdate(# Same as target
+ active=[('dir/', 'C-id'),
+ ('dir/B', 'B-id')],
+ basis= [('dir/', 'A-id'),
+ ('dir/B', 'B-id')],
+ target=[('dir/', 'C-id'),
+ ('dir/B', 'B-id')])
+ state = self.assertUpdate(# empty active
+ active=[],
+ basis= [('dir/', 'A-id'),
+ ('dir/B', 'B-id')],
+ target=[('dir/', 'C-id'),
+ ('dir/B', 'B-id')])
+ state = self.assertUpdate(# different active
+ active=[('dir/', 'D-id'),
+ ('dir/B', 'B-id')],
+ basis= [('dir/', 'A-id'),
+ ('dir/B', 'B-id')],
+ target=[('dir/', 'C-id'),
+ ('dir/B', 'B-id')])
+
+ def test_parent_child_swap(self):
+ state = self.assertUpdate(# Same as basis
+ active=[('A/', 'A-id'),
+ ('A/B/', 'B-id'),
+ ('A/B/C', 'C-id')],
+ basis= [('A/', 'A-id'),
+ ('A/B/', 'B-id'),
+ ('A/B/C', 'C-id')],
+ target=[('A/', 'B-id'),
+ ('A/B/', 'A-id'),
+ ('A/B/C', 'C-id')])
+ state = self.assertUpdate(# Same as target
+ active=[('A/', 'B-id'),
+ ('A/B/', 'A-id'),
+ ('A/B/C', 'C-id')],
+ basis= [('A/', 'A-id'),
+ ('A/B/', 'B-id'),
+ ('A/B/C', 'C-id')],
+ target=[('A/', 'B-id'),
+ ('A/B/', 'A-id'),
+ ('A/B/C', 'C-id')])
+ state = self.assertUpdate(# empty active
+ active=[],
+ basis= [('A/', 'A-id'),
+ ('A/B/', 'B-id'),
+ ('A/B/C', 'C-id')],
+ target=[('A/', 'B-id'),
+ ('A/B/', 'A-id'),
+ ('A/B/C', 'C-id')])
+ state = self.assertUpdate(# different active
+ active=[('D/', 'A-id'),
+ ('D/E/', 'B-id'),
+ ('F', 'C-id')],
+ basis= [('A/', 'A-id'),
+ ('A/B/', 'B-id'),
+ ('A/B/C', 'C-id')],
+ target=[('A/', 'B-id'),
+ ('A/B/', 'A-id'),
+ ('A/B/C', 'C-id')])
+
+ def test_change_root_id(self):
+ state = self.assertUpdate( # same as basis
+ active=[('', 'root-id'),
+ ('file', 'file-id')],
+ basis= [('', 'root-id'),
+ ('file', 'file-id')],
+ target=[('', 'target-root-id'),
+ ('file', 'file-id')])
+ state = self.assertUpdate( # same as target
+ active=[('', 'target-root-id'),
+ ('file', 'file-id')],
+ basis= [('', 'root-id'),
+ ('file', 'file-id')],
+ target=[('', 'target-root-id'),
+ ('file', 'root-id')])
+ state = self.assertUpdate( # all different
+ active=[('', 'active-root-id'),
+ ('file', 'file-id')],
+ basis= [('', 'root-id'),
+ ('file', 'file-id')],
+ target=[('', 'target-root-id'),
+ ('file', 'root-id')])
+
+ def test_change_file_absent_in_active(self):
+ state = self.assertUpdate(
+ active=[],
+ basis= [('file', 'file-id')],
+ target=[('file', 'file-id')])
+
+ def test_invalid_changed_file(self):
+ state = self.assertBadDelta( # Not present in basis
+ active=[('file', 'file-id')],
+ basis= [],
+ delta=[('file', 'file', 'file-id')])
+ state = self.assertBadDelta( # present at another location in basis
+ active=[('file', 'file-id')],
+ basis= [('other-file', 'file-id')],
+ delta=[('file', 'file', 'file-id')])
diff --git a/bzrlib/tests/test_email_message.py b/bzrlib/tests/test_email_message.py
new file mode 100644
index 0000000..f556e82
--- /dev/null
+++ b/bzrlib/tests/test_email_message.py
@@ -0,0 +1,226 @@
+# Copyright (C) 2007 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+from email.Header import decode_header
+
+from bzrlib import __version__ as _bzrlib_version
+from bzrlib.email_message import EmailMessage
+from bzrlib.errors import BzrBadParameterNotUnicode
+from bzrlib.smtp_connection import SMTPConnection
+from bzrlib.tests import TestCase
+
+EMPTY_MESSAGE = '''\
+From: from@from.com
+Subject: subject
+To: to@to.com
+User-Agent: Bazaar (%s)
+
+''' % _bzrlib_version
+
+_SIMPLE_MESSAGE = '''\
+MIME-Version: 1.0
+Content-Type: text/plain; charset="%%s"
+Content-Transfer-Encoding: %%s
+From: from@from.com
+Subject: subject
+To: to@to.com
+User-Agent: Bazaar (%s)
+
+%%s''' % _bzrlib_version
+
+SIMPLE_MESSAGE_ASCII = _SIMPLE_MESSAGE % ('us-ascii', '7bit', 'body')
+SIMPLE_MESSAGE_UTF8 = _SIMPLE_MESSAGE % ('utf-8', 'base64', 'YsOzZHk=\n')
+SIMPLE_MESSAGE_8BIT = _SIMPLE_MESSAGE % ('8-bit', 'base64', 'YvRkeQ==\n')
+
+
+BOUNDARY = '=====123456=='
+
+_MULTIPART_HEAD = '''\
+Content-Type: multipart/mixed; boundary="%(boundary)s"
+MIME-Version: 1.0
+From: from@from.com
+Subject: subject
+To: to@to.com
+User-Agent: Bazaar (%(version)s)
+
+--%(boundary)s
+MIME-Version: 1.0
+Content-Type: text/plain; charset="us-ascii"
+Content-Transfer-Encoding: 7bit
+Content-Disposition: inline
+
+body
+''' % { 'version': _bzrlib_version, 'boundary': BOUNDARY }
+
+SIMPLE_MULTIPART_MESSAGE = _MULTIPART_HEAD + '--%s--' % BOUNDARY
+
+COMPLEX_MULTIPART_MESSAGE = _MULTIPART_HEAD + '''\
+--%(boundary)s
+MIME-Version: 1.0
+Content-Type: text/%%s; charset="us-ascii"; name="lines.txt"
+Content-Transfer-Encoding: 7bit
+Content-Disposition: inline
+
+a
+b
+c
+d
+e
+
+--%(boundary)s--''' % { 'boundary': BOUNDARY }
+
+
+class TestEmailMessage(TestCase):
+
+ def test_empty_message(self):
+ msg = EmailMessage('from@from.com', 'to@to.com', 'subject')
+ self.assertEqualDiff(EMPTY_MESSAGE , msg.as_string())
+
+ def test_simple_message(self):
+ pairs = {
+ 'body': SIMPLE_MESSAGE_ASCII,
+ u'b\xf3dy': SIMPLE_MESSAGE_UTF8,
+ 'b\xc3\xb3dy': SIMPLE_MESSAGE_UTF8,
+ 'b\xf4dy': SIMPLE_MESSAGE_8BIT,
+ }
+ for body, expected in pairs.items():
+ msg = EmailMessage('from@from.com', 'to@to.com', 'subject', body)
+ self.assertEqualDiff(expected, msg.as_string())
+
+ def test_multipart_message(self):
+ msg = EmailMessage('from@from.com', 'to@to.com', 'subject')
+ msg.add_inline_attachment('body')
+ self.assertEqualDiff(SIMPLE_MULTIPART_MESSAGE, msg.as_string(BOUNDARY))
+
+ msg = EmailMessage('from@from.com', 'to@to.com', 'subject', 'body')
+ msg.add_inline_attachment(u'a\nb\nc\nd\ne\n', 'lines.txt', 'x-subtype')
+ self.assertEqualDiff(COMPLEX_MULTIPART_MESSAGE % 'x-subtype',
+ msg.as_string(BOUNDARY))
+
+ def test_headers_accept_unicode_and_utf8(self):
+ for user in [ u'Pepe P\xe9rez <pperez@ejemplo.com>',
+ 'Pepe P\xc3\xa9red <pperez@ejemplo.com>' ]:
+ msg = EmailMessage(user, user, user) # no exception raised
+
+ for header in ['From', 'To', 'Subject']:
+ value = msg[header]
+ str(value).decode('ascii') # no UnicodeDecodeError
+
+ def test_headers_reject_8bit(self):
+ for i in range(3): # from_address, to_address, subject
+ x = [ '"J. Random Developer" <jrandom@example.com>' ] * 3
+ x[i] = 'Pepe P\xe9rez <pperez@ejemplo.com>'
+ self.assertRaises(BzrBadParameterNotUnicode, EmailMessage, *x)
+
+ def test_multiple_destinations(self):
+ to_addresses = [ 'to1@to.com', 'to2@to.com', 'to3@to.com' ]
+ msg = EmailMessage('from@from.com', to_addresses, 'subject')
+ self.assertContainsRe(msg.as_string(), 'To: ' +
+ ', '.join(to_addresses)) # re.M can't be passed, so no ^$
+
+ def test_retrieving_headers(self):
+ msg = EmailMessage('from@from.com', 'to@to.com', 'subject')
+ for header, value in [('From', 'from@from.com'), ('To', 'to@to.com'),
+ ('Subject', 'subject')]:
+ self.assertEqual(value, msg.get(header))
+ self.assertEqual(value, msg[header])
+ self.assertEqual(None, msg.get('Does-Not-Exist'))
+ self.assertEqual(None, msg['Does-Not-Exist'])
+ self.assertEqual('None', msg.get('Does-Not-Exist', 'None'))
+
+ def test_setting_headers(self):
+ msg = EmailMessage('from@from.com', 'to@to.com', 'subject')
+ msg['To'] = 'to2@to.com'
+ msg['Cc'] = 'cc@cc.com'
+ self.assertEqual('to2@to.com', msg['To'])
+ self.assertEqual('cc@cc.com', msg['Cc'])
+
+ def test_send(self):
+ class FakeConfig:
+ def get(self, option):
+ return None
+
+ messages = []
+
+ def send_as_append(_self, msg):
+ messages.append(msg.as_string(BOUNDARY))
+
+ old_send_email = SMTPConnection.send_email
+ try:
+ SMTPConnection.send_email = send_as_append
+
+ EmailMessage.send(FakeConfig(), 'from@from.com', 'to@to.com',
+ 'subject', 'body', u'a\nb\nc\nd\ne\n', 'lines.txt')
+ self.assertEqualDiff(COMPLEX_MULTIPART_MESSAGE % 'plain',
+ messages[0])
+ messages[:] = []
+
+ EmailMessage.send(FakeConfig(), 'from@from.com', 'to@to.com',
+ 'subject', 'body', u'a\nb\nc\nd\ne\n', 'lines.txt',
+ 'x-patch')
+ self.assertEqualDiff(COMPLEX_MULTIPART_MESSAGE % 'x-patch',
+ messages[0])
+ messages[:] = []
+
+ EmailMessage.send(FakeConfig(), 'from@from.com', 'to@to.com',
+ 'subject', 'body')
+ self.assertEqualDiff(SIMPLE_MESSAGE_ASCII , messages[0])
+ messages[:] = []
+ finally:
+ SMTPConnection.send_email = old_send_email
+
+ def test_address_to_encoded_header(self):
+ def decode(s):
+ """Convert a RFC2047-encoded string to a unicode string."""
+ return ' '.join([chunk.decode(encoding or 'ascii')
+ for chunk, encoding in decode_header(s)])
+
+ address = 'jrandom@example.com'
+ encoded = EmailMessage.address_to_encoded_header(address)
+ self.assertEqual(address, encoded)
+
+ address = 'J Random Developer <jrandom@example.com>'
+ encoded = EmailMessage.address_to_encoded_header(address)
+ self.assertEqual(address, encoded)
+
+ address = '"J. Random Developer" <jrandom@example.com>'
+ encoded = EmailMessage.address_to_encoded_header(address)
+ self.assertEqual(address, encoded)
+
+ address = u'Pepe P\xe9rez <pperez@ejemplo.com>' # unicode ok
+ encoded = EmailMessage.address_to_encoded_header(address)
+ self.assert_('pperez@ejemplo.com' in encoded) # addr must be unencoded
+ self.assertEquals(address, decode(encoded))
+
+ address = 'Pepe P\xc3\xa9red <pperez@ejemplo.com>' # UTF-8 ok
+ encoded = EmailMessage.address_to_encoded_header(address)
+ self.assert_('pperez@ejemplo.com' in encoded)
+ self.assertEquals(address, decode(encoded).encode('utf-8'))
+
+ address = 'Pepe P\xe9rez <pperez@ejemplo.com>' # ISO-8859-1 not ok
+ self.assertRaises(BzrBadParameterNotUnicode,
+ EmailMessage.address_to_encoded_header, address)
+
+ def test_string_with_encoding(self):
+ pairs = {
+ u'Pepe': ('Pepe', 'ascii'),
+ u'P\xe9rez': ('P\xc3\xa9rez', 'utf-8'),
+ 'Perez': ('Perez', 'ascii'), # u'Pepe' == 'Pepe'
+ 'P\xc3\xa9rez': ('P\xc3\xa9rez', 'utf-8'),
+ 'P\xe8rez': ('P\xe8rez', '8-bit'),
+ }
+ for string_, pair in pairs.items():
+ self.assertEqual(pair, EmailMessage.string_with_encoding(string_))
diff --git a/bzrlib/tests/test_eol_filters.py b/bzrlib/tests/test_eol_filters.py
new file mode 100644
index 0000000..a5ba865
--- /dev/null
+++ b/bzrlib/tests/test_eol_filters.py
@@ -0,0 +1,75 @@
+# Copyright (C) 2009, 2011 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""Tests for eol conversion."""
+
+
+from bzrlib import (
+ errors,
+ )
+from bzrlib.filters import _get_filter_stack_for
+from bzrlib.filters.eol import (
+ _to_crlf_converter,
+ _to_lf_converter,
+ )
+from bzrlib.tests import TestCase
+
+
+# Sample files
+_sample_file1 = """hello\nworld\r\n"""
+
+
+class TestEolFilters(TestCase):
+
+ def test_to_lf(self):
+ result = _to_lf_converter([_sample_file1])
+ self.assertEqual(["hello\nworld\n"], result)
+
+ def test_to_crlf(self):
+ result = _to_crlf_converter([_sample_file1])
+ self.assertEqual(["hello\r\nworld\r\n"], result)
+
+
+class TestEolRulesSpecifications(TestCase):
+
+ def test_exact_value(self):
+ """'eol = exact' should have no content filters"""
+ prefs = (('eol','exact'),)
+ self.assertEqual([], _get_filter_stack_for(prefs))
+
+ def test_other_known_values(self):
+ """These known eol values have corresponding filters."""
+ known_values = ('lf', 'crlf', 'native',
+ 'native-with-crlf-in-repo', 'lf-with-crlf-in-repo',
+ 'crlf-with-crlf-in-repo')
+ for value in known_values:
+ prefs = (('eol',value),)
+ self.assertNotEqual([], _get_filter_stack_for(prefs))
+
+ def test_unknown_value(self):
+ """
+ Unknown eol values should raise an error.
+ """
+ prefs = (('eol','unknown-value'),)
+ self.assertRaises(errors.BzrError, _get_filter_stack_for, prefs)
+
+ def test_eol_missing_altogether_is_ok(self):
+ """
+ Not having eol in the set of preferences should be ok.
+ """
+ # In this case, 'eol' is looked up with a value of None.
+ prefs = (('eol', None),)
+ self.assertEqual([], _get_filter_stack_for(prefs))
diff --git a/bzrlib/tests/test_errors.py b/bzrlib/tests/test_errors.py
new file mode 100644
index 0000000..84b1387
--- /dev/null
+++ b/bzrlib/tests/test_errors.py
@@ -0,0 +1,765 @@
+# Copyright (C) 2006-2011 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""Tests for the formatting and construction of errors."""
+
+import inspect
+import re
+import socket
+import sys
+
+from bzrlib import (
+ controldir,
+ errors,
+ osutils,
+ urlutils,
+ )
+from bzrlib.tests import (
+ TestCase,
+ TestCaseWithTransport,
+ TestSkipped,
+ )
+
+
+class TestErrors(TestCaseWithTransport):
+
+ def test_no_arg_named_message(self):
+ """Ensure the __init__ and _fmt in errors do not have "message" arg.
+
+ This test fails if __init__ or _fmt in errors has an argument
+ named "message" as this can cause errors in some Python versions.
+ Python 2.5 uses a slot for StandardError.message.
+ See bug #603461
+ """
+ fmt_pattern = re.compile("%\(message\)[sir]")
+ subclasses_present = getattr(errors.BzrError, '__subclasses__', None)
+ if not subclasses_present:
+ raise TestSkipped('__subclasses__ attribute required for classes. '
+ 'Requires Python 2.5 or later.')
+ for c in errors.BzrError.__subclasses__():
+ init = getattr(c, '__init__', None)
+ fmt = getattr(c, '_fmt', None)
+ if init:
+ args = inspect.getargspec(init)[0]
+ self.assertFalse('message' in args,
+ ('Argument name "message" not allowed for '
+ '"errors.%s.__init__"' % c.__name__))
+ if fmt and fmt_pattern.search(fmt):
+ self.assertFalse(True, ('"message" not allowed in '
+ '"errors.%s._fmt"' % c.__name__))
+
+ def test_bad_filename_encoding(self):
+ error = errors.BadFilenameEncoding('bad/filen\xe5me', 'UTF-8')
+ self.assertEqualDiff(
+ "Filename 'bad/filen\\xe5me' is not valid in your current"
+ " filesystem encoding UTF-8",
+ str(error))
+
+ def test_corrupt_dirstate(self):
+ error = errors.CorruptDirstate('path/to/dirstate', 'the reason why')
+ self.assertEqualDiff(
+ "Inconsistency in dirstate file path/to/dirstate.\n"
+ "Error: the reason why",
+ str(error))
+
+ def test_dirstate_corrupt(self):
+ error = errors.DirstateCorrupt('.bzr/checkout/dirstate',
+ 'trailing garbage: "x"')
+ self.assertEqualDiff("The dirstate file (.bzr/checkout/dirstate)"
+ " appears to be corrupt: trailing garbage: \"x\"",
+ str(error))
+
+ def test_disabled_method(self):
+ error = errors.DisabledMethod("class name")
+ self.assertEqualDiff(
+ "The smart server method 'class name' is disabled.", str(error))
+
+ def test_duplicate_file_id(self):
+ error = errors.DuplicateFileId('a_file_id', 'foo')
+ self.assertEqualDiff('File id {a_file_id} already exists in inventory'
+ ' as foo', str(error))
+
+ def test_duplicate_help_prefix(self):
+ error = errors.DuplicateHelpPrefix('foo')
+ self.assertEqualDiff('The prefix foo is in the help search path twice.',
+ str(error))
+
+ def test_ghost_revisions_have_no_revno(self):
+ error = errors.GhostRevisionsHaveNoRevno('target', 'ghost_rev')
+ self.assertEqualDiff("Could not determine revno for {target} because"
+ " its ancestry shows a ghost at {ghost_rev}",
+ str(error))
+
+ def test_incompatibleAPI(self):
+ error = errors.IncompatibleAPI("module", (1, 2, 3), (4, 5, 6), (7, 8, 9))
+ self.assertEqualDiff(
+ 'The API for "module" is not compatible with "(1, 2, 3)". '
+ 'It supports versions "(4, 5, 6)" to "(7, 8, 9)".',
+ str(error))
+
+ def test_inconsistent_delta(self):
+ error = errors.InconsistentDelta('path', 'file-id', 'reason for foo')
+ self.assertEqualDiff(
+ "An inconsistent delta was supplied involving 'path', 'file-id'\n"
+ "reason: reason for foo",
+ str(error))
+
+ def test_inconsistent_delta_delta(self):
+ error = errors.InconsistentDeltaDelta([], 'reason')
+ self.assertEqualDiff(
+ "An inconsistent delta was supplied: []\nreason: reason",
+ str(error))
+
+ def test_in_process_transport(self):
+ error = errors.InProcessTransport('fpp')
+ self.assertEqualDiff(
+ "The transport 'fpp' is only accessible within this process.",
+ str(error))
+
+ def test_invalid_http_range(self):
+ error = errors.InvalidHttpRange('path',
+ 'Content-Range: potatoes 0-00/o0oo0',
+ 'bad range')
+ self.assertEquals("Invalid http range"
+ " 'Content-Range: potatoes 0-00/o0oo0'"
+ " for path: bad range",
+ str(error))
+
+ def test_invalid_range(self):
+ error = errors.InvalidRange('path', 12, 'bad range')
+ self.assertEquals("Invalid range access in path at 12: bad range",
+ str(error))
+
+ def test_inventory_modified(self):
+ error = errors.InventoryModified("a tree to be repred")
+ self.assertEqualDiff("The current inventory for the tree 'a tree to "
+ "be repred' has been modified, so a clean inventory cannot be "
+ "read without data loss.",
+ str(error))
+
+ def test_jail_break(self):
+ error = errors.JailBreak("some url")
+ self.assertEqualDiff("An attempt to access a url outside the server"
+ " jail was made: 'some url'.",
+ str(error))
+
+ def test_lock_active(self):
+ error = errors.LockActive("lock description")
+ self.assertEqualDiff("The lock for 'lock description' is in use and "
+ "cannot be broken.",
+ str(error))
+
+ def test_lock_corrupt(self):
+ error = errors.LockCorrupt("corruption info")
+ self.assertEqualDiff("Lock is apparently held, but corrupted: "
+ "corruption info\n"
+ "Use 'bzr break-lock' to clear it",
+ str(error))
+
+ def test_knit_data_stream_incompatible(self):
+ error = errors.KnitDataStreamIncompatible(
+ 'stream format', 'target format')
+ self.assertEqual('Cannot insert knit data stream of format '
+ '"stream format" into knit of format '
+ '"target format".', str(error))
+
+ def test_knit_data_stream_unknown(self):
+ error = errors.KnitDataStreamUnknown(
+ 'stream format')
+ self.assertEqual('Cannot parse knit data stream of format '
+ '"stream format".', str(error))
+
+ def test_knit_header_error(self):
+ error = errors.KnitHeaderError('line foo\n', 'path/to/file')
+ self.assertEqual("Knit header error: 'line foo\\n' unexpected"
+ " for file \"path/to/file\".", str(error))
+
+ def test_knit_index_unknown_method(self):
+ error = errors.KnitIndexUnknownMethod('http://host/foo.kndx',
+ ['bad', 'no-eol'])
+ self.assertEqual("Knit index http://host/foo.kndx does not have a"
+ " known method in options: ['bad', 'no-eol']",
+ str(error))
+
+ def test_medium_not_connected(self):
+ error = errors.MediumNotConnected("a medium")
+ self.assertEqualDiff(
+ "The medium 'a medium' is not connected.", str(error))
+
+ def test_no_public_branch(self):
+ b = self.make_branch('.')
+ error = errors.NoPublicBranch(b)
+ url = urlutils.unescape_for_display(b.base, 'ascii')
+ self.assertEqualDiff(
+ 'There is no public branch set for "%s".' % url, str(error))
+
+ def test_no_repo(self):
+ dir = controldir.ControlDir.create(self.get_url())
+ error = errors.NoRepositoryPresent(dir)
+ self.assertNotEqual(-1, str(error).find((dir.transport.clone('..').base)))
+ self.assertEqual(-1, str(error).find((dir.transport.base)))
+
+ def test_no_smart_medium(self):
+ error = errors.NoSmartMedium("a transport")
+ self.assertEqualDiff("The transport 'a transport' cannot tunnel the "
+ "smart protocol.",
+ str(error))
+
+ def test_no_help_topic(self):
+ error = errors.NoHelpTopic("topic")
+ self.assertEqualDiff("No help could be found for 'topic'. "
+ "Please use 'bzr help topics' to obtain a list of topics.",
+ str(error))
+
+ def test_no_such_id(self):
+ error = errors.NoSuchId("atree", "anid")
+ self.assertEqualDiff("The file id \"anid\" is not present in the tree "
+ "atree.",
+ str(error))
+
+ def test_no_such_revision_in_tree(self):
+ error = errors.NoSuchRevisionInTree("atree", "anid")
+ self.assertEqualDiff("The revision id {anid} is not present in the"
+ " tree atree.", str(error))
+ self.assertIsInstance(error, errors.NoSuchRevision)
+
+ def test_not_stacked(self):
+ error = errors.NotStacked('a branch')
+ self.assertEqualDiff("The branch 'a branch' is not stacked.",
+ str(error))
+
+ def test_not_write_locked(self):
+ error = errors.NotWriteLocked('a thing to repr')
+ self.assertEqualDiff("'a thing to repr' is not write locked but needs "
+ "to be.",
+ str(error))
+
+ def test_lock_failed(self):
+ error = errors.LockFailed('http://canonical.com/', 'readonly transport')
+ self.assertEqualDiff("Cannot lock http://canonical.com/: readonly transport",
+ str(error))
+ self.assertFalse(error.internal_error)
+
+ def test_too_many_concurrent_requests(self):
+ error = errors.TooManyConcurrentRequests("a medium")
+ self.assertEqualDiff("The medium 'a medium' has reached its concurrent "
+ "request limit. Be sure to finish_writing and finish_reading on "
+ "the currently open request.",
+ str(error))
+
+ def test_unavailable_representation(self):
+ error = errors.UnavailableRepresentation(('key',), "mpdiff", "fulltext")
+ self.assertEqualDiff("The encoding 'mpdiff' is not available for key "
+ "('key',) which is encoded as 'fulltext'.",
+ str(error))
+
+ def test_unknown_hook(self):
+ error = errors.UnknownHook("branch", "foo")
+ self.assertEqualDiff("The branch hook 'foo' is unknown in this version"
+ " of bzrlib.",
+ str(error))
+ error = errors.UnknownHook("tree", "bar")
+ self.assertEqualDiff("The tree hook 'bar' is unknown in this version"
+ " of bzrlib.",
+ str(error))
+
+ def test_unstackable_branch_format(self):
+ format = u'foo'
+ url = "/foo"
+ error = errors.UnstackableBranchFormat(format, url)
+ self.assertEqualDiff(
+ "The branch '/foo'(foo) is not a stackable format. "
+ "You will need to upgrade the branch to permit branch stacking.",
+ str(error))
+
+ def test_unstackable_location(self):
+ error = errors.UnstackableLocationError('foo', 'bar')
+ self.assertEqualDiff("The branch 'foo' cannot be stacked on 'bar'.",
+ str(error))
+
+ def test_unstackable_repository_format(self):
+ format = u'foo'
+ url = "/foo"
+ error = errors.UnstackableRepositoryFormat(format, url)
+ self.assertEqualDiff(
+ "The repository '/foo'(foo) is not a stackable format. "
+ "You will need to upgrade the repository to permit branch stacking.",
+ str(error))
+
+ def test_up_to_date(self):
+ error = errors.UpToDateFormat("someformat")
+ self.assertEqualDiff(
+ "The branch format someformat is already at the most "
+ "recent format.", str(error))
+
+ def test_corrupt_repository(self):
+ repo = self.make_repository('.')
+ error = errors.CorruptRepository(repo)
+ self.assertEqualDiff("An error has been detected in the repository %s.\n"
+ "Please run bzr reconcile on this repository." %
+ repo.bzrdir.root_transport.base,
+ str(error))
+
+ def test_read_error(self):
+ # a unicode path to check that %r is being used.
+ path = u'a path'
+ error = errors.ReadError(path)
+ self.assertEqualDiff("Error reading from u'a path'.", str(error))
+
+ def test_bad_index_format_signature(self):
+ error = errors.BadIndexFormatSignature("foo", "bar")
+ self.assertEqual("foo is not an index of type bar.",
+ str(error))
+
+ def test_bad_index_data(self):
+ error = errors.BadIndexData("foo")
+ self.assertEqual("Error in data for index foo.",
+ str(error))
+
+ def test_bad_index_duplicate_key(self):
+ error = errors.BadIndexDuplicateKey("foo", "bar")
+ self.assertEqual("The key 'foo' is already in index 'bar'.",
+ str(error))
+
+ def test_bad_index_key(self):
+ error = errors.BadIndexKey("foo")
+ self.assertEqual("The key 'foo' is not a valid key.",
+ str(error))
+
+ def test_bad_index_options(self):
+ error = errors.BadIndexOptions("foo")
+ self.assertEqual("Could not parse options for index foo.",
+ str(error))
+
+ def test_bad_index_value(self):
+ error = errors.BadIndexValue("foo")
+ self.assertEqual("The value 'foo' is not a valid value.",
+ str(error))
+
+ def test_bzrerror_from_literal_string(self):
+ # Some code constructs BzrError from a literal string, in which case
+ # no further formatting is done. (I'm not sure raising the base class
+ # is a great idea, but if the exception is not intended to be caught
+ # perhaps no more is needed.)
+ try:
+ raise errors.BzrError('this is my errors; %d is not expanded')
+ except errors.BzrError, e:
+ self.assertEqual('this is my errors; %d is not expanded', str(e))
+
+ def test_reading_completed(self):
+ error = errors.ReadingCompleted("a request")
+ self.assertEqualDiff("The MediumRequest 'a request' has already had "
+ "finish_reading called upon it - the request has been completed and"
+ " no more data may be read.",
+ str(error))
+
+ def test_writing_completed(self):
+ error = errors.WritingCompleted("a request")
+ self.assertEqualDiff("The MediumRequest 'a request' has already had "
+ "finish_writing called upon it - accept bytes may not be called "
+ "anymore.",
+ str(error))
+
+ def test_writing_not_completed(self):
+ error = errors.WritingNotComplete("a request")
+ self.assertEqualDiff("The MediumRequest 'a request' has not has "
+ "finish_writing called upon it - until the write phase is complete"
+ " no data may be read.",
+ str(error))
+
+ def test_transport_not_possible(self):
+ error = errors.TransportNotPossible('readonly', 'original error')
+ self.assertEqualDiff('Transport operation not possible:'
+ ' readonly original error', str(error))
+
+ def assertSocketConnectionError(self, expected, *args, **kwargs):
+ """Check the formatting of a SocketConnectionError exception"""
+ e = errors.SocketConnectionError(*args, **kwargs)
+ self.assertEqual(expected, str(e))
+
+ def test_socket_connection_error(self):
+ """Test the formatting of SocketConnectionError"""
+
+ # There should be a default msg about failing to connect
+ # we only require a host name.
+ self.assertSocketConnectionError(
+ 'Failed to connect to ahost',
+ 'ahost')
+
+ # If port is None, we don't put :None
+ self.assertSocketConnectionError(
+ 'Failed to connect to ahost',
+ 'ahost', port=None)
+ # But if port is supplied we include it
+ self.assertSocketConnectionError(
+ 'Failed to connect to ahost:22',
+ 'ahost', port=22)
+
+ # We can also supply extra information about the error
+ # with or without a port
+ self.assertSocketConnectionError(
+ 'Failed to connect to ahost:22; bogus error',
+ 'ahost', port=22, orig_error='bogus error')
+ self.assertSocketConnectionError(
+ 'Failed to connect to ahost; bogus error',
+ 'ahost', orig_error='bogus error')
+ # An exception object can be passed rather than a string
+ orig_error = ValueError('bad value')
+ self.assertSocketConnectionError(
+ 'Failed to connect to ahost; %s' % (str(orig_error),),
+ host='ahost', orig_error=orig_error)
+
+ # And we can supply a custom failure message
+ self.assertSocketConnectionError(
+ 'Unable to connect to ssh host ahost:444; my_error',
+ host='ahost', port=444, msg='Unable to connect to ssh host',
+ orig_error='my_error')
+
+ def test_target_not_branch(self):
+ """Test the formatting of TargetNotBranch."""
+ error = errors.TargetNotBranch('foo')
+ self.assertEqual(
+ "Your branch does not have all of the revisions required in "
+ "order to merge this merge directive and the target "
+ "location specified in the merge directive is not a branch: "
+ "foo.", str(error))
+
+ def test_malformed_bug_identifier(self):
+ """Test the formatting of MalformedBugIdentifier."""
+ error = errors.MalformedBugIdentifier('bogus', 'reason for bogosity')
+ self.assertEqual(
+ 'Did not understand bug identifier bogus: reason for bogosity. '
+ 'See "bzr help bugs" for more information on this feature.',
+ str(error))
+
+ def test_unknown_bug_tracker_abbreviation(self):
+ """Test the formatting of UnknownBugTrackerAbbreviation."""
+ branch = self.make_branch('some_branch')
+ error = errors.UnknownBugTrackerAbbreviation('xxx', branch)
+ self.assertEqual(
+ "Cannot find registered bug tracker called xxx on %s" % branch,
+ str(error))
+
+ def test_unexpected_smart_server_response(self):
+ e = errors.UnexpectedSmartServerResponse(('not yes',))
+ self.assertEqual(
+ "Could not understand response from smart server: ('not yes',)",
+ str(e))
+
+ def test_unknown_container_format(self):
+ """Test the formatting of UnknownContainerFormatError."""
+ e = errors.UnknownContainerFormatError('bad format string')
+ self.assertEqual(
+ "Unrecognised container format: 'bad format string'",
+ str(e))
+
+ def test_unexpected_end_of_container(self):
+ """Test the formatting of UnexpectedEndOfContainerError."""
+ e = errors.UnexpectedEndOfContainerError()
+ self.assertEqual(
+ "Unexpected end of container stream", str(e))
+
+ def test_unknown_record_type(self):
+ """Test the formatting of UnknownRecordTypeError."""
+ e = errors.UnknownRecordTypeError("X")
+ self.assertEqual(
+ "Unknown record type: 'X'",
+ str(e))
+
+ def test_invalid_record(self):
+ """Test the formatting of InvalidRecordError."""
+ e = errors.InvalidRecordError("xxx")
+ self.assertEqual(
+ "Invalid record: xxx",
+ str(e))
+
+ def test_container_has_excess_data(self):
+ """Test the formatting of ContainerHasExcessDataError."""
+ e = errors.ContainerHasExcessDataError("excess bytes")
+ self.assertEqual(
+ "Container has data after end marker: 'excess bytes'",
+ str(e))
+
+ def test_duplicate_record_name_error(self):
+ """Test the formatting of DuplicateRecordNameError."""
+ e = errors.DuplicateRecordNameError(u"n\xe5me".encode('utf-8'))
+ self.assertEqual(
+ "Container has multiple records with the same name: n\xc3\xa5me",
+ str(e))
+
+ def test_check_error(self):
+ # This has a member called 'message', which is problematic in
+ # python2.5 because that is a slot on the base Exception class
+ e = errors.BzrCheckError('example check failure')
+ self.assertEqual(
+ "Internal check failed: example check failure",
+ str(e))
+ self.assertTrue(e.internal_error)
+
+ def test_repository_data_stream_error(self):
+ """Test the formatting of RepositoryDataStreamError."""
+ e = errors.RepositoryDataStreamError(u"my reason")
+ self.assertEqual(
+ "Corrupt or incompatible data stream: my reason", str(e))
+
+ def test_immortal_pending_deletion_message(self):
+ err = errors.ImmortalPendingDeletion('foo')
+ self.assertEquals(
+ "Unable to delete transform temporary directory foo. "
+ "Please examine foo to see if it contains any files "
+ "you wish to keep, and delete it when you are done.",
+ str(err))
+
+ def test_unable_create_symlink(self):
+ err = errors.UnableCreateSymlink()
+ self.assertEquals(
+ "Unable to create symlink on this platform",
+ str(err))
+ err = errors.UnableCreateSymlink(path=u'foo')
+ self.assertEquals(
+ "Unable to create symlink 'foo' on this platform",
+ str(err))
+ err = errors.UnableCreateSymlink(path=u'\xb5')
+ self.assertEquals(
+ "Unable to create symlink u'\\xb5' on this platform",
+ str(err))
+
+ def test_invalid_url_join(self):
+ """Test the formatting of InvalidURLJoin."""
+ e = errors.InvalidURLJoin('Reason', 'base path', ('args',))
+ self.assertEqual(
+ "Invalid URL join request: Reason: 'base path' + ('args',)",
+ str(e))
+
+ def test_incorrect_url(self):
+ err = errors.InvalidBugTrackerURL('foo', 'http://bug.com/')
+ self.assertEquals(
+ ("The URL for bug tracker \"foo\" doesn't contain {id}: "
+ "http://bug.com/"),
+ str(err))
+
+ def test_unable_encode_path(self):
+ err = errors.UnableEncodePath('foo', 'executable')
+ self.assertEquals("Unable to encode executable path 'foo' in "
+ "user encoding " + osutils.get_user_encoding(),
+ str(err))
+
+ def test_unknown_format(self):
+ err = errors.UnknownFormatError('bar', kind='foo')
+ self.assertEquals("Unknown foo format: 'bar'", str(err))
+
+ def test_unknown_rules(self):
+ err = errors.UnknownRules(['foo', 'bar'])
+ self.assertEquals("Unknown rules detected: foo, bar.", str(err))
+
+ def test_tip_change_rejected(self):
+ err = errors.TipChangeRejected(u'Unicode message\N{INTERROBANG}')
+ self.assertEquals(
+ u'Tip change rejected: Unicode message\N{INTERROBANG}',
+ unicode(err))
+ self.assertEquals(
+ 'Tip change rejected: Unicode message\xe2\x80\xbd',
+ str(err))
+
+ def test_error_from_smart_server(self):
+ error_tuple = ('error', 'tuple')
+ err = errors.ErrorFromSmartServer(error_tuple)
+ self.assertEquals(
+ "Error received from smart server: ('error', 'tuple')", str(err))
+
+ def test_untranslateable_error_from_smart_server(self):
+ error_tuple = ('error', 'tuple')
+ orig_err = errors.ErrorFromSmartServer(error_tuple)
+ err = errors.UnknownErrorFromSmartServer(orig_err)
+ self.assertEquals(
+ "Server sent an unexpected error: ('error', 'tuple')", str(err))
+
+ def test_smart_message_handler_error(self):
+ # Make an exc_info tuple.
+ try:
+ raise Exception("example error")
+ except Exception:
+ err = errors.SmartMessageHandlerError(sys.exc_info())
+ # GZ 2010-11-08: Should not store exc_info in exception instances.
+ try:
+ self.assertStartsWith(
+ str(err), "The message handler raised an exception:\n")
+ self.assertEndsWith(str(err), "Exception: example error\n")
+ finally:
+ del err
+
+ def test_must_have_working_tree(self):
+ err = errors.MustHaveWorkingTree('foo', 'bar')
+ self.assertEqual(str(err), "Branching 'bar'(foo) must create a"
+ " working tree.")
+
+ def test_no_such_view(self):
+ err = errors.NoSuchView('foo')
+ self.assertEquals("No such view: foo.", str(err))
+
+ def test_views_not_supported(self):
+ err = errors.ViewsNotSupported('atree')
+ err_str = str(err)
+ self.assertStartsWith(err_str, "Views are not supported by ")
+ self.assertEndsWith(err_str, "; use 'bzr upgrade' to change your "
+ "tree to a later format.")
+
+ def test_file_outside_view(self):
+ err = errors.FileOutsideView('baz', ['foo', 'bar'])
+ self.assertEquals('Specified file "baz" is outside the current view: '
+ 'foo, bar', str(err))
+
+ def test_invalid_shelf_id(self):
+ invalid_id = "foo"
+ err = errors.InvalidShelfId(invalid_id)
+ self.assertEqual('"foo" is not a valid shelf id, '
+ 'try a number instead.', str(err))
+
+ def test_unresumable_write_group(self):
+ repo = "dummy repo"
+ wg_tokens = ['token']
+ reason = "a reason"
+ err = errors.UnresumableWriteGroup(repo, wg_tokens, reason)
+ self.assertEqual(
+ "Repository dummy repo cannot resume write group "
+ "['token']: a reason", str(err))
+
+ def test_unsuspendable_write_group(self):
+ repo = "dummy repo"
+ err = errors.UnsuspendableWriteGroup(repo)
+ self.assertEqual(
+ 'Repository dummy repo cannot suspend a write group.', str(err))
+
+ def test_not_branch_no_args(self):
+ err = errors.NotBranchError('path')
+ self.assertEqual('Not a branch: "path".', str(err))
+
+ def test_not_branch_bzrdir_with_repo(self):
+ bzrdir = self.make_repository('repo').bzrdir
+ err = errors.NotBranchError('path', bzrdir=bzrdir)
+ self.assertEqual(
+ 'Not a branch: "path": location is a repository.', str(err))
+
+ def test_not_branch_bzrdir_without_repo(self):
+ bzrdir = self.make_bzrdir('bzrdir')
+ err = errors.NotBranchError('path', bzrdir=bzrdir)
+ self.assertEqual('Not a branch: "path".', str(err))
+
+ def test_not_branch_bzrdir_with_recursive_not_branch_error(self):
+ class FakeBzrDir(object):
+ def open_repository(self):
+ # str() on the NotBranchError will trigger a call to this,
+ # which in turn will another, identical NotBranchError.
+ raise errors.NotBranchError('path', bzrdir=FakeBzrDir())
+ err = errors.NotBranchError('path', bzrdir=FakeBzrDir())
+ self.assertEqual('Not a branch: "path".', str(err))
+
+ def test_not_branch_laziness(self):
+ real_bzrdir = self.make_bzrdir('path')
+ class FakeBzrDir(object):
+ def __init__(self):
+ self.calls = []
+ def open_repository(self):
+ self.calls.append('open_repository')
+ raise errors.NoRepositoryPresent(real_bzrdir)
+ fake_bzrdir = FakeBzrDir()
+ err = errors.NotBranchError('path', bzrdir=fake_bzrdir)
+ self.assertEqual([], fake_bzrdir.calls)
+ str(err)
+ self.assertEqual(['open_repository'], fake_bzrdir.calls)
+ # Stringifying twice doesn't try to open a repository twice.
+ str(err)
+ self.assertEqual(['open_repository'], fake_bzrdir.calls)
+
+ def test_invalid_pattern(self):
+ error = errors.InvalidPattern('Bad pattern msg.')
+ self.assertEqualDiff("Invalid pattern(s) found. Bad pattern msg.",
+ str(error))
+
+ def test_recursive_bind(self):
+ error = errors.RecursiveBind('foo_bar_branch')
+ msg = ('Branch "foo_bar_branch" appears to be bound to itself. '
+ 'Please use `bzr unbind` to fix.')
+ self.assertEqualDiff(msg, str(error))
+
+ def test_retry_with_new_packs(self):
+ fake_exc_info = ('{exc type}', '{exc value}', '{exc traceback}')
+ error = errors.RetryWithNewPacks(
+ '{context}', reload_occurred=False, exc_info=fake_exc_info)
+ self.assertEqual(
+ 'Pack files have changed, reload and retry. context: '
+ '{context} {exc value}', str(error))
+
+
+class PassThroughError(errors.BzrError):
+
+ _fmt = """Pass through %(foo)s and %(bar)s"""
+
+ def __init__(self, foo, bar):
+ errors.BzrError.__init__(self, foo=foo, bar=bar)
+
+
+class ErrorWithBadFormat(errors.BzrError):
+
+ _fmt = """One format specifier: %(thing)s"""
+
+
+class ErrorWithNoFormat(errors.BzrError):
+ __doc__ = """This class has a docstring but no format string."""
+
+
+class TestErrorFormatting(TestCase):
+
+ def test_always_str(self):
+ e = PassThroughError(u'\xb5', 'bar')
+ self.assertIsInstance(e.__str__(), str)
+ # In Python str(foo) *must* return a real byte string
+ # not a Unicode string. The following line would raise a
+ # Unicode error, because it tries to call str() on the string
+ # returned from e.__str__(), and it has non ascii characters
+ s = str(e)
+ self.assertEqual('Pass through \xc2\xb5 and bar', s)
+
+ def test_missing_format_string(self):
+ e = ErrorWithNoFormat(param='randomvalue')
+ self.assertStartsWith(str(e),
+ "Unprintable exception ErrorWithNoFormat")
+
+ def test_mismatched_format_args(self):
+ # Even though ErrorWithBadFormat's format string does not match the
+ # arguments we constructing it with, we can still stringify an instance
+ # of this exception. The resulting string will say its unprintable.
+ e = ErrorWithBadFormat(not_thing='x')
+ self.assertStartsWith(
+ str(e), 'Unprintable exception ErrorWithBadFormat')
+
+ def test_cannot_bind_address(self):
+ # see <https://bugs.launchpad.net/bzr/+bug/286871>
+ e = errors.CannotBindAddress('example.com', 22,
+ socket.error(13, 'Permission denied'))
+ self.assertContainsRe(str(e),
+ r'Cannot bind address "example\.com:22":.*Permission denied')
+
+ def test_file_timestamp_unavailable(self):
+ e = errors.FileTimestampUnavailable("/path/foo")
+ self.assertEquals("The filestamp for /path/foo is not available.",
+ str(e))
+
+ def test_transform_rename_failed(self):
+ e = errors.TransformRenameFailed(u"from", u"to", "readonly file", 2)
+ self.assertEquals(
+ u"Failed to rename from to to: readonly file",
+ str(e))
diff --git a/bzrlib/tests/test_estimate_compressed_size.py b/bzrlib/tests/test_estimate_compressed_size.py
new file mode 100644
index 0000000..b26ca73
--- /dev/null
+++ b/bzrlib/tests/test_estimate_compressed_size.py
@@ -0,0 +1,79 @@
+# Copyright (C) 2011 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""Tests for our estimation of compressed content."""
+
+import zlib
+import hashlib
+
+from bzrlib import (
+ estimate_compressed_size,
+ tests,
+ )
+
+
+class TestZLibEstimator(tests.TestCase):
+
+ def get_slightly_random_content(self, length, seed=''):
+ """We generate some hex-data that can be seeded.
+
+ The output should be deterministic, but the data stream is effectively
+ random.
+ """
+ h = hashlib.md5(seed)
+ hex_content = []
+ count = 0
+ while count < length:
+ b = h.hexdigest()
+ hex_content.append(b)
+ h.update(b)
+ count += len(b)
+ return ''.join(hex_content)[:length]
+
+ def test_adding_content(self):
+ ze = estimate_compressed_size.ZLibEstimator(32000)
+ raw_data = self.get_slightly_random_content(60000)
+ block_size = 1000
+ for start in xrange(0, len(raw_data), block_size):
+ ze.add_content(raw_data[start:start+block_size])
+ if ze.full():
+ break
+ # Practise showed that 'start' was 56000. However, zlib is a bit
+ # platform dependent, so give it +/- 5%.
+ self.assertTrue(54000 <= start <= 58000,
+ "Unexpected amount of raw data added: %d bytes" % (start,))
+ # The real compression should be 'close' to 32000, real measurement was
+ # 32401
+ raw_comp = zlib.compress(raw_data[:start])
+ self.assertTrue(31000 < len(raw_comp) < 33000,
+ "Unexpected compressed size: %d bytes" % (len(raw_comp),))
+
+ def test_adding_more_content(self):
+ ze = estimate_compressed_size.ZLibEstimator(64000)
+ raw_data = self.get_slightly_random_content(150000)
+ block_size = 1000
+ for start in xrange(0, len(raw_data), block_size):
+ ze.add_content(raw_data[start:start+block_size])
+ if ze.full():
+ break
+ # Practise showed that 'start' was 112000
+ self.assertTrue(110000 <= start <= 114000,
+ "Unexpected amount of raw data added: %d bytes" % (start,))
+ # The real compression should be 'close' to 32000, real measurement was
+ # 32401
+ raw_comp = zlib.compress(raw_data[:start])
+ self.assertTrue(63000 < len(raw_comp) < 65000,
+ "Unexpected compressed size: %d bytes" % (len(raw_comp),))
diff --git a/bzrlib/tests/test_export.py b/bzrlib/tests/test_export.py
new file mode 100644
index 0000000..d02c82e
--- /dev/null
+++ b/bzrlib/tests/test_export.py
@@ -0,0 +1,294 @@
+# Copyright (C) 2009, 2010 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""Tests for bzrlib.export."""
+
+from cStringIO import StringIO
+import os
+import tarfile
+import time
+import zipfile
+
+from bzrlib import (
+ errors,
+ export,
+ tests,
+ )
+from bzrlib.export import get_root_name
+from bzrlib.export.tar_exporter import export_tarball_generator
+from bzrlib.tests import features
+
+
+class TestDirExport(tests.TestCaseWithTransport):
+
+ def test_missing_file(self):
+ self.build_tree(['a/', 'a/b', 'a/c'])
+ wt = self.make_branch_and_tree('.')
+ wt.add(['a', 'a/b', 'a/c'])
+ os.unlink('a/c')
+ export.export(wt, 'target', format="dir")
+ self.assertPathExists('target/a/b')
+ self.assertPathDoesNotExist('target/a/c')
+
+ def test_empty(self):
+ wt = self.make_branch_and_tree('.')
+ export.export(wt, 'target', format="dir")
+ self.assertEquals([], os.listdir("target"))
+
+ def test_symlink(self):
+ self.requireFeature(features.SymlinkFeature)
+ wt = self.make_branch_and_tree('.')
+ os.symlink('source', 'link')
+ wt.add(['link'])
+ export.export(wt, 'target', format="dir")
+ self.assertPathExists('target/link')
+
+ def test_to_existing_empty_dir_success(self):
+ self.build_tree(['source/', 'source/a', 'source/b/', 'source/b/c'])
+ wt = self.make_branch_and_tree('source')
+ wt.add(['a', 'b', 'b/c'])
+ wt.commit('1')
+ self.build_tree(['target/'])
+ export.export(wt, 'target', format="dir")
+ self.assertPathExists('target/a')
+ self.assertPathExists('target/b')
+ self.assertPathExists('target/b/c')
+
+ def test_empty_subdir(self):
+ self.build_tree(['source/', 'source/a', 'source/b/', 'source/b/c'])
+ wt = self.make_branch_and_tree('source')
+ wt.add(['a', 'b', 'b/c'])
+ wt.commit('1')
+ self.build_tree(['target/'])
+ export.export(wt, 'target', format="dir", subdir='')
+ self.assertPathExists('target/a')
+ self.assertPathExists('target/b')
+ self.assertPathExists('target/b/c')
+
+ def test_to_existing_nonempty_dir_fail(self):
+ self.build_tree(['source/', 'source/a', 'source/b/', 'source/b/c'])
+ wt = self.make_branch_and_tree('source')
+ wt.add(['a', 'b', 'b/c'])
+ wt.commit('1')
+ self.build_tree(['target/', 'target/foo'])
+ self.assertRaises(errors.BzrError,
+ export.export, wt, 'target', format="dir")
+
+ def test_existing_single_file(self):
+ self.build_tree([
+ 'dir1/', 'dir1/dir2/', 'dir1/first', 'dir1/dir2/second'])
+ wtree = self.make_branch_and_tree('dir1')
+ wtree.add(['dir2', 'first', 'dir2/second'])
+ wtree.commit('1')
+ export.export(wtree, 'target1', format='dir', subdir='first')
+ self.assertPathExists('target1/first')
+ export.export(wtree, 'target2', format='dir', subdir='dir2/second')
+ self.assertPathExists('target2/second')
+
+ def test_files_same_timestamp(self):
+ builder = self.make_branch_builder('source')
+ builder.start_series()
+ builder.build_snapshot(None, None, [
+ ('add', ('', 'root-id', 'directory', '')),
+ ('add', ('a', 'a-id', 'file', 'content\n'))])
+ builder.build_snapshot(None, None, [
+ ('add', ('b', 'b-id', 'file', 'content\n'))])
+ builder.finish_series()
+ b = builder.get_branch()
+ b.lock_read()
+ self.addCleanup(b.unlock)
+ tree = b.basis_tree()
+ orig_iter_files_bytes = tree.iter_files_bytes
+
+ # Make iter_files_bytes slower, so we provoke mtime skew
+ def iter_files_bytes(to_fetch):
+ for thing in orig_iter_files_bytes(to_fetch):
+ yield thing
+ time.sleep(1)
+ tree.iter_files_bytes = iter_files_bytes
+ export.export(tree, 'target', format='dir')
+ t = self.get_transport('target')
+ st_a = t.stat('a')
+ st_b = t.stat('b')
+ # All files must be given the same mtime.
+ self.assertEqual(st_a.st_mtime, st_b.st_mtime)
+
+ def test_files_per_file_timestamps(self):
+ builder = self.make_branch_builder('source')
+ builder.start_series()
+ # Earliest allowable date on FAT32 filesystems is 1980-01-01
+ a_time = time.mktime((1999, 12, 12, 0, 0, 0, 0, 0, 0))
+ b_time = time.mktime((1980, 01, 01, 0, 0, 0, 0, 0, 0))
+ builder.build_snapshot(None, None, [
+ ('add', ('', 'root-id', 'directory', '')),
+ ('add', ('a', 'a-id', 'file', 'content\n'))],
+ timestamp=a_time)
+ builder.build_snapshot(None, None, [
+ ('add', ('b', 'b-id', 'file', 'content\n'))],
+ timestamp=b_time)
+ builder.finish_series()
+ b = builder.get_branch()
+ b.lock_read()
+ self.addCleanup(b.unlock)
+ tree = b.basis_tree()
+ export.export(tree, 'target', format='dir', per_file_timestamps=True)
+ t = self.get_transport('target')
+ self.assertEqual(a_time, t.stat('a').st_mtime)
+ self.assertEqual(b_time, t.stat('b').st_mtime)
+
+ def test_subdir_files_per_timestamps(self):
+ builder = self.make_branch_builder('source')
+ builder.start_series()
+ foo_time = time.mktime((1999, 12, 12, 0, 0, 0, 0, 0, 0))
+ builder.build_snapshot(None, None, [
+ ('add', ('', 'root-id', 'directory', '')),
+ ('add', ('subdir', 'subdir-id', 'directory', '')),
+ ('add', ('subdir/foo.txt', 'foo-id', 'file', 'content\n'))],
+ timestamp=foo_time)
+ builder.finish_series()
+ b = builder.get_branch()
+ b.lock_read()
+ self.addCleanup(b.unlock)
+ tree = b.basis_tree()
+ export.export(tree, 'target', format='dir', subdir='subdir',
+ per_file_timestamps=True)
+ t = self.get_transport('target')
+ self.assertEquals(foo_time, t.stat('foo.txt').st_mtime)
+
+
+class TarExporterTests(tests.TestCaseWithTransport):
+
+ def test_xz(self):
+ self.requireFeature(features.lzma)
+ import lzma
+ wt = self.make_branch_and_tree('.')
+ self.build_tree(['a'])
+ wt.add(["a"])
+ wt.commit("1")
+ export.export(wt, 'target.tar.xz', format="txz")
+ tf = tarfile.open(fileobj=lzma.LZMAFile('target.tar.xz'))
+ self.assertEquals(["target/a"], tf.getnames())
+
+ def test_lzma(self):
+ self.requireFeature(features.lzma)
+ import lzma
+ wt = self.make_branch_and_tree('.')
+ self.build_tree(['a'])
+ wt.add(["a"])
+ wt.commit("1")
+ export.export(wt, 'target.tar.lzma', format="tlzma")
+ tf = tarfile.open(fileobj=lzma.LZMAFile('target.tar.lzma'))
+ self.assertEquals(["target/a"], tf.getnames())
+
+ def test_tgz(self):
+ wt = self.make_branch_and_tree('.')
+ self.build_tree(['a'])
+ wt.add(["a"])
+ wt.commit("1")
+ export.export(wt, 'target.tar.gz', format="tgz")
+ tf = tarfile.open('target.tar.gz')
+ self.assertEquals(["target/a"], tf.getnames())
+
+ def test_tgz_ignores_dest_path(self):
+ # The target path should not be a part of the target file.
+ # (bug #102234)
+ wt = self.make_branch_and_tree('.')
+ self.build_tree(['a'])
+ wt.add(["a"])
+ wt.commit("1")
+ os.mkdir("testdir1")
+ os.mkdir("testdir2")
+ export.export(wt, 'testdir1/target.tar.gz', format="tgz",
+ per_file_timestamps=True)
+ export.export(wt, 'testdir2/target.tar.gz', format="tgz",
+ per_file_timestamps=True)
+ file1 = open('testdir1/target.tar.gz', 'r')
+ self.addCleanup(file1.close)
+ file2 = open('testdir1/target.tar.gz', 'r')
+ self.addCleanup(file2.close)
+ content1 = file1.read()
+ content2 = file2.read()
+ self.assertEqualDiff(content1, content2)
+ # the gzip module doesn't have a way to read back to the original
+ # filename, but it's stored as-is in the tarfile.
+ self.assertFalse("testdir1" in content1)
+ self.assertFalse("target.tar.gz" in content1)
+ self.assertTrue("target.tar" in content1)
+
+ def test_tbz2(self):
+ wt = self.make_branch_and_tree('.')
+ self.build_tree(['a'])
+ wt.add(["a"])
+ wt.commit("1")
+ export.export(wt, 'target.tar.bz2', format="tbz2")
+ tf = tarfile.open('target.tar.bz2')
+ self.assertEquals(["target/a"], tf.getnames())
+
+ def test_xz_stdout(self):
+ wt = self.make_branch_and_tree('.')
+ self.assertRaises(errors.BzrError, export.export, wt, '-',
+ format="txz")
+
+ def test_export_tarball_generator(self):
+ wt = self.make_branch_and_tree('.')
+ self.build_tree(['a'])
+ wt.add(["a"])
+ wt.commit("1", timestamp=42)
+ target = StringIO()
+ ball = tarfile.open(None, "w|", target)
+ wt.lock_read()
+ try:
+ for _ in export_tarball_generator(wt, ball, "bar"):
+ pass
+ finally:
+ wt.unlock()
+ # Ball should now be closed.
+ target.seek(0)
+ ball2 = tarfile.open(None, "r", target)
+ self.addCleanup(ball2.close)
+ self.assertEquals(["bar/a"], ball2.getnames())
+
+
+class ZipExporterTests(tests.TestCaseWithTransport):
+
+ def test_per_file_timestamps(self):
+ tree = self.make_branch_and_tree('.')
+ self.build_tree_contents([('har', 'foo')])
+ tree.add('har')
+ # Earliest allowable date on FAT32 filesystems is 1980-01-01
+ timestamp = 347151600
+ tree.commit('setup', timestamp=timestamp)
+ export.export(tree.basis_tree(), 'test.zip', format='zip',
+ per_file_timestamps=True)
+ zfile = zipfile.ZipFile('test.zip')
+ info = zfile.getinfo("test/har")
+ self.assertEquals(time.localtime(timestamp)[:6], info.date_time)
+
+
+class RootNameTests(tests.TestCase):
+
+ def test_root_name(self):
+ self.assertEquals('mytest', get_root_name('../mytest.tar'))
+ self.assertEquals('mytar', get_root_name('mytar.tar'))
+ self.assertEquals('mytar', get_root_name('mytar.tar.bz2'))
+ self.assertEquals('tar.tar.tar', get_root_name('tar.tar.tar.tgz'))
+ self.assertEquals('bzr-0.0.5', get_root_name('bzr-0.0.5.tar.gz'))
+ self.assertEquals('bzr-0.0.5', get_root_name('bzr-0.0.5.zip'))
+ self.assertEquals('bzr-0.0.5', get_root_name('bzr-0.0.5'))
+ self.assertEquals('mytar', get_root_name('a/long/path/mytar.tgz'))
+ self.assertEquals('other',
+ get_root_name('../parent/../dir/other.tbz2'))
+ self.assertEquals('', get_root_name('-'))
diff --git a/bzrlib/tests/test_export_pot.py b/bzrlib/tests/test_export_pot.py
new file mode 100644
index 0000000..58e34b7
--- /dev/null
+++ b/bzrlib/tests/test_export_pot.py
@@ -0,0 +1,467 @@
+# Copyright (C) 2011 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+from cStringIO import StringIO
+import textwrap
+
+from bzrlib import (
+ commands,
+ export_pot,
+ option,
+ registry,
+ tests,
+ )
+
+import re
+
+
+class TestEscape(tests.TestCase):
+
+ def test_simple_escape(self):
+ self.assertEqual(
+ export_pot._escape('foobar'),
+ 'foobar')
+
+ s = '''foo\nbar\r\tbaz\\"spam"'''
+ e = '''foo\\nbar\\r\\tbaz\\\\\\"spam\\"'''
+ self.assertEqual(export_pot._escape(s), e)
+
+ def test_complex_escape(self):
+ s = '''\\r \\\n'''
+ e = '''\\\\r \\\\\\n'''
+ self.assertEqual(export_pot._escape(s), e)
+
+
+class TestNormalize(tests.TestCase):
+
+ def test_single_line(self):
+ s = 'foobar'
+ e = '"foobar"'
+ self.assertEqual(export_pot._normalize(s), e)
+
+ s = 'foo"bar'
+ e = '"foo\\"bar"'
+ self.assertEqual(export_pot._normalize(s), e)
+
+ def test_multi_lines(self):
+ s = 'foo\nbar\n'
+ e = '""\n"foo\\n"\n"bar\\n"'
+ self.assertEqual(export_pot._normalize(s), e)
+
+ s = '\nfoo\nbar\n'
+ e = ('""\n'
+ '"\\n"\n'
+ '"foo\\n"\n'
+ '"bar\\n"')
+ self.assertEqual(export_pot._normalize(s), e)
+
+
+class TestParseSource(tests.TestCase):
+ """Check mappings to line numbers generated from python source"""
+
+ def test_classes(self):
+ src = '''
+class Ancient:
+ """Old style class"""
+
+class Modern(object):
+ """New style class"""
+'''
+ cls_lines, _ = export_pot._parse_source(src)
+ self.assertEqual(cls_lines,
+ {"Ancient": 2, "Modern": 5})
+
+ def test_classes_nested(self):
+ src = '''
+class Matroska(object):
+ class Smaller(object):
+ class Smallest(object):
+ pass
+'''
+ cls_lines, _ = export_pot._parse_source(src)
+ self.assertEqual(cls_lines,
+ {"Matroska": 2, "Smaller": 3, "Smallest":4})
+
+ def test_strings_docstrings(self):
+ src = '''\
+"""Module"""
+
+def function():
+ """Function"""
+
+class Class(object):
+ """Class"""
+
+ def method(self):
+ """Method"""
+'''
+ _, str_lines = export_pot._parse_source(src)
+ self.assertEqual(str_lines,
+ {"Module": 1, "Function": 4, "Class": 7, "Method": 10})
+
+ def test_strings_literals(self):
+ src = '''\
+s = "One"
+t = (2, "Two")
+f = dict(key="Three")
+'''
+ _, str_lines = export_pot._parse_source(src)
+ self.assertEqual(str_lines,
+ {"One": 1, "Two": 2, "Three": 3})
+
+ def test_strings_multiline(self):
+ src = '''\
+"""Start
+
+End
+"""
+t = (
+ "A"
+ "B"
+ "C"
+ )
+'''
+ _, str_lines = export_pot._parse_source(src)
+ self.assertEqual(str_lines,
+ {"Start\n\nEnd\n": 1, "ABC": 6})
+
+ def test_strings_multiline_escapes(self):
+ src = '''\
+s = "Escaped\\n"
+r = r"Raw\\n"
+t = (
+ "A\\n\\n"
+ "B\\n\\n"
+ "C\\n\\n"
+ )
+'''
+ _, str_lines = export_pot._parse_source(src)
+ self.expectFailure("Escaped newlines confuses the multiline handling",
+ self.assertNotEqual, str_lines,
+ {"Escaped\n": 0, "Raw\\n": 2, "A\n\nB\n\nC\n\n": -2})
+ self.assertEqual(str_lines,
+ {"Escaped\n": 1, "Raw\\n": 2, "A\n\nB\n\nC\n\n": 4})
+
+
+class TestModuleContext(tests.TestCase):
+ """Checks for source context tracking objects"""
+
+ def check_context(self, context, path, lineno):
+ self.assertEquals((context.path, context.lineno), (path, lineno))
+
+ def test___init__(self):
+ context = export_pot._ModuleContext("one.py")
+ self.check_context(context, "one.py", 1)
+ context = export_pot._ModuleContext("two.py", 5)
+ self.check_context(context, "two.py", 5)
+
+ def test_from_class(self):
+ """New context returned with lineno updated from class"""
+ path = "cls.py"
+ class A(object): pass
+ class B(object): pass
+ cls_lines = {"A": 5, "B": 7}
+ context = export_pot._ModuleContext(path, _source_info=(cls_lines, {}))
+ contextA = context.from_class(A)
+ self.check_context(contextA, path, 5)
+ contextB1 = context.from_class(B)
+ self.check_context(contextB1, path, 7)
+ contextB2 = contextA.from_class(B)
+ self.check_context(contextB2, path, 7)
+ self.check_context(context, path, 1)
+ self.assertEquals("", self.get_log())
+
+ def test_from_class_missing(self):
+ """When class has no lineno the old context details are returned"""
+ path = "cls_missing.py"
+ class A(object): pass
+ class M(object): pass
+ context = export_pot._ModuleContext(path, 3, ({"A": 15}, {}))
+ contextA = context.from_class(A)
+ contextM1 = context.from_class(M)
+ self.check_context(contextM1, path, 3)
+ contextM2 = contextA.from_class(M)
+ self.check_context(contextM2, path, 15)
+ self.assertContainsRe(self.get_log(), "Definition of <.*M'> not found")
+
+ def test_from_string(self):
+ """New context returned with lineno updated from string"""
+ path = "str.py"
+ str_lines = {"one": 14, "two": 42}
+ context = export_pot._ModuleContext(path, _source_info=({}, str_lines))
+ context1 = context.from_string("one")
+ self.check_context(context1, path, 14)
+ context2A = context.from_string("two")
+ self.check_context(context2A, path, 42)
+ context2B = context1.from_string("two")
+ self.check_context(context2B, path, 42)
+ self.check_context(context, path, 1)
+ self.assertEquals("", self.get_log())
+
+ def test_from_string_missing(self):
+ """When string has no lineno the old context details are returned"""
+ path = "str_missing.py"
+ context = export_pot._ModuleContext(path, 4, ({}, {"line\n": 21}))
+ context1 = context.from_string("line\n")
+ context2A = context.from_string("not there")
+ self.check_context(context2A, path, 4)
+ context2B = context1.from_string("not there")
+ self.check_context(context2B, path, 21)
+ self.assertContainsRe(self.get_log(), "String 'not there' not found")
+
+
+class TestWriteOption(tests.TestCase):
+ """Tests for writing texts extracted from options in pot format"""
+
+ def pot_from_option(self, opt, context=None, note="test"):
+ sio = StringIO()
+ exporter = export_pot._PotExporter(sio)
+ if context is None:
+ context = export_pot._ModuleContext("nowhere", 0)
+ export_pot._write_option(exporter, context, opt, note)
+ return sio.getvalue()
+
+ def test_option_without_help(self):
+ opt = option.Option("helpless")
+ self.assertEqual("", self.pot_from_option(opt))
+
+ def test_option_with_help(self):
+ opt = option.Option("helpful", help="Info.")
+ self.assertContainsString(self.pot_from_option(opt), "\n"
+ "# help of 'helpful' test\n"
+ "msgid \"Info.\"\n")
+
+ def test_option_hidden(self):
+ opt = option.Option("hidden", help="Unseen.", hidden=True)
+ self.assertEqual("", self.pot_from_option(opt))
+
+ def test_option_context_missing(self):
+ context = export_pot._ModuleContext("remote.py", 3)
+ opt = option.Option("metaphor", help="Not a literal in the source.")
+ self.assertContainsString(self.pot_from_option(opt, context),
+ "#: remote.py:3\n"
+ "# help of 'metaphor' test\n")
+
+ def test_option_context_string(self):
+ s = "Literally."
+ context = export_pot._ModuleContext("local.py", 3, ({}, {s: 17}))
+ opt = option.Option("example", help=s)
+ self.assertContainsString(self.pot_from_option(opt, context),
+ "#: local.py:17\n"
+ "# help of 'example' test\n")
+
+ def test_registry_option_title(self):
+ opt = option.RegistryOption.from_kwargs("group", help="Pick one.",
+ title="Choose!")
+ pot = self.pot_from_option(opt)
+ self.assertContainsString(pot, "\n"
+ "# title of 'group' test\n"
+ "msgid \"Choose!\"\n")
+ self.assertContainsString(pot, "\n"
+ "# help of 'group' test\n"
+ "msgid \"Pick one.\"\n")
+
+ def test_registry_option_title_context_missing(self):
+ context = export_pot._ModuleContext("theory.py", 3)
+ opt = option.RegistryOption.from_kwargs("abstract", title="Unfounded!")
+ self.assertContainsString(self.pot_from_option(opt, context),
+ "#: theory.py:3\n"
+ "# title of 'abstract' test\n")
+
+ def test_registry_option_title_context_string(self):
+ s = "Grounded!"
+ context = export_pot._ModuleContext("practice.py", 3, ({}, {s: 144}))
+ opt = option.RegistryOption.from_kwargs("concrete", title=s)
+ self.assertContainsString(self.pot_from_option(opt, context),
+ "#: practice.py:144\n"
+ "# title of 'concrete' test\n")
+
+ def test_registry_option_value_switches(self):
+ opt = option.RegistryOption.from_kwargs("switch", help="Flip one.",
+ value_switches=True, enum_switch=False,
+ red="Big.", green="Small.")
+ pot = self.pot_from_option(opt)
+ self.assertContainsString(pot, "\n"
+ "# help of 'switch' test\n"
+ "msgid \"Flip one.\"\n")
+ self.assertContainsString(pot, "\n"
+ "# help of 'switch=red' test\n"
+ "msgid \"Big.\"\n")
+ self.assertContainsString(pot, "\n"
+ "# help of 'switch=green' test\n"
+ "msgid \"Small.\"\n")
+
+ def test_registry_option_value_switches_hidden(self):
+ reg = registry.Registry()
+ class Hider(object):
+ hidden = True
+ reg.register("new", 1, "Current.")
+ reg.register("old", 0, "Legacy.", info=Hider())
+ opt = option.RegistryOption("protocol", "Talking.", reg,
+ value_switches=True, enum_switch=False)
+ pot = self.pot_from_option(opt)
+ self.assertContainsString(pot, "\n"
+ "# help of 'protocol' test\n"
+ "msgid \"Talking.\"\n")
+ self.assertContainsString(pot, "\n"
+ "# help of 'protocol=new' test\n"
+ "msgid \"Current.\"\n")
+ self.assertNotContainsString(pot, "'protocol=old'")
+
+
+class TestPotExporter(tests.TestCase):
+ """Test for logic specific to the _PotExporter class"""
+
+ # This test duplicates test_duplicates below
+ def test_duplicates(self):
+ exporter = export_pot._PotExporter(StringIO())
+ context = export_pot._ModuleContext("mod.py", 1)
+ exporter.poentry_in_context(context, "Common line.")
+ context.lineno = 3
+ exporter.poentry_in_context(context, "Common line.")
+ self.assertEqual(1, exporter.outf.getvalue().count("Common line."))
+
+ def test_duplicates_included(self):
+ exporter = export_pot._PotExporter(StringIO(), True)
+ context = export_pot._ModuleContext("mod.py", 1)
+ exporter.poentry_in_context(context, "Common line.")
+ context.lineno = 3
+ exporter.poentry_in_context(context, "Common line.")
+ self.assertEqual(2, exporter.outf.getvalue().count("Common line."))
+
+
+class PoEntryTestCase(tests.TestCase):
+
+ def setUp(self):
+ super(PoEntryTestCase, self).setUp()
+ self.exporter = export_pot._PotExporter(StringIO())
+
+ def check_output(self, expected):
+ self.assertEqual(
+ self.exporter.outf.getvalue(),
+ textwrap.dedent(expected)
+ )
+
+
+class TestPoEntry(PoEntryTestCase):
+
+ def test_simple(self):
+ self.exporter.poentry('dummy', 1, "spam")
+ self.exporter.poentry('dummy', 2, "ham", 'EGG')
+ self.check_output('''\
+ #: dummy:1
+ msgid "spam"
+ msgstr ""
+
+ #: dummy:2
+ # EGG
+ msgid "ham"
+ msgstr ""
+
+ ''')
+
+ def test_duplicate(self):
+ self.exporter.poentry('dummy', 1, "spam")
+ # This should be ignored.
+ self.exporter.poentry('dummy', 2, "spam", 'EGG')
+
+ self.check_output('''\
+ #: dummy:1
+ msgid "spam"
+ msgstr ""\n
+ ''')
+
+
+class TestPoentryPerPergraph(PoEntryTestCase):
+
+ def test_single(self):
+ self.exporter.poentry_per_paragraph(
+ 'dummy',
+ 10,
+ '''foo\nbar\nbaz\n'''
+ )
+ self.check_output('''\
+ #: dummy:10
+ msgid ""
+ "foo\\n"
+ "bar\\n"
+ "baz\\n"
+ msgstr ""\n
+ ''')
+
+ def test_multi(self):
+ self.exporter.poentry_per_paragraph(
+ 'dummy',
+ 10,
+ '''spam\nham\negg\n\nSPAM\nHAM\nEGG\n'''
+ )
+ self.check_output('''\
+ #: dummy:10
+ msgid ""
+ "spam\\n"
+ "ham\\n"
+ "egg"
+ msgstr ""
+
+ #: dummy:14
+ msgid ""
+ "SPAM\\n"
+ "HAM\\n"
+ "EGG\\n"
+ msgstr ""\n
+ ''')
+
+
+class TestExportCommandHelp(PoEntryTestCase):
+
+ def test_command_help(self):
+
+ class cmd_Demo(commands.Command):
+ __doc__ = """A sample command.
+
+ :Usage:
+ bzr demo
+
+ :Examples:
+ Example 1::
+
+ cmd arg1
+
+ Blah Blah Blah
+ """
+
+ export_pot._write_command_help(self.exporter, cmd_Demo())
+ result = self.exporter.outf.getvalue()
+ # We don't care about filename and lineno here.
+ result = re.sub(r'(?m)^#: [^\n]+\n', '', result)
+
+ self.assertEqualDiff(
+ 'msgid "A sample command."\n'
+ 'msgstr ""\n'
+ '\n' # :Usage: should not be translated.
+ 'msgid ""\n'
+ '":Examples:\\n"\n'
+ '" Example 1::"\n'
+ 'msgstr ""\n'
+ '\n'
+ 'msgid " cmd arg1"\n'
+ 'msgstr ""\n'
+ '\n'
+ 'msgid "Blah Blah Blah"\n'
+ 'msgstr ""\n'
+ '\n',
+ result
+ )
diff --git a/bzrlib/tests/test_extract.py b/bzrlib/tests/test_extract.py
new file mode 100644
index 0000000..9867e2c
--- /dev/null
+++ b/bzrlib/tests/test_extract.py
@@ -0,0 +1,80 @@
+# Copyright (C) 2006, 2007, 2009, 2011 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+from bzrlib import (
+ branch,
+ errors,
+ )
+from bzrlib.tests import TestCaseWithTransport
+
+
+class TestExtract(TestCaseWithTransport):
+
+ def test_extract(self):
+ self.build_tree(['a/', 'a/b/', 'a/b/c', 'a/d'])
+ wt = self.make_branch_and_tree('a', format='rich-root-pack')
+ wt.add(['b', 'b/c', 'd'], ['b-id', 'c-id', 'd-id'])
+ wt.commit('added files')
+ b_wt = wt.extract('b-id')
+ self.assertEqual('b-id', b_wt.get_root_id())
+ self.assertEqual('c-id', b_wt.path2id('c'))
+ self.assertEqual('c', b_wt.id2path('c-id'))
+ self.assertRaises(errors.BzrError, wt.id2path, 'b-id')
+ self.assertEqual(b_wt.basedir, wt.abspath('b'))
+ self.assertEqual(wt.get_parent_ids(), b_wt.get_parent_ids())
+ self.assertEqual(wt.branch.last_revision(),
+ b_wt.branch.last_revision())
+
+ def extract_in_checkout(self, a_branch):
+ self.build_tree(['a/', 'a/b/', 'a/b/c/', 'a/b/c/d'])
+ wt = a_branch.create_checkout('a', lightweight=True)
+ wt.add(['b', 'b/c', 'b/c/d'], ['b-id', 'c-id', 'd-id'])
+ wt.commit('added files')
+ return wt.extract('b-id')
+
+ def test_extract_in_checkout(self):
+ a_branch = self.make_branch('branch', format='rich-root-pack')
+ self.extract_in_checkout(a_branch)
+ b_branch = branch.Branch.open('branch/b')
+ b_branch_ref = branch.Branch.open('a/b')
+ self.assertEqual(b_branch.base, b_branch_ref.base)
+
+ def test_extract_in_deep_checkout(self):
+ a_branch = self.make_branch('branch', format='rich-root-pack')
+ self.build_tree(['a/', 'a/b/', 'a/b/c/', 'a/b/c/d/', 'a/b/c/d/e'])
+ wt = a_branch.create_checkout('a', lightweight=True)
+ wt.add(['b', 'b/c', 'b/c/d', 'b/c/d/e/'], ['b-id', 'c-id', 'd-id',
+ 'e-id'])
+ wt.commit('added files')
+ b_wt = wt.extract('d-id')
+ b_branch = branch.Branch.open('branch/b/c/d')
+ b_branch_ref = branch.Branch.open('a/b/c/d')
+ self.assertEqual(b_branch.base, b_branch_ref.base)
+
+ def test_bad_repo_format(self):
+ repo = self.make_repository('branch', shared=True,
+ format='knit')
+ a_branch = repo.bzrdir.create_branch()
+ self.assertRaises(errors.RootNotRich, self.extract_in_checkout,
+ a_branch)
+
+ def test_good_repo_format(self):
+ repo = self.make_repository('branch', shared=True,
+ format='dirstate-with-subtree')
+ a_branch = repo.bzrdir.create_branch()
+ wt_b = self.extract_in_checkout(a_branch)
+ self.assertEqual(wt_b.branch.repository.bzrdir.transport.base,
+ repo.bzrdir.transport.base)
diff --git a/bzrlib/tests/test_features.py b/bzrlib/tests/test_features.py
new file mode 100644
index 0000000..ce72590
--- /dev/null
+++ b/bzrlib/tests/test_features.py
@@ -0,0 +1,137 @@
+# Copyright (C) 2005-2011 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""Tests for test feature dependencies."""
+
+import sys
+
+from bzrlib import (
+ symbol_versioning,
+ tests,
+ )
+from bzrlib.tests import (
+ features,
+ )
+
+
+class TestFeature(tests.TestCase):
+
+ def test_caching(self):
+ """Feature._probe is called by the feature at most once."""
+ class InstrumentedFeature(features.Feature):
+ def __init__(self):
+ super(InstrumentedFeature, self).__init__()
+ self.calls = []
+
+ def _probe(self):
+ self.calls.append('_probe')
+ return False
+ feature = InstrumentedFeature()
+ feature.available()
+ self.assertEqual(['_probe'], feature.calls)
+ feature.available()
+ self.assertEqual(['_probe'], feature.calls)
+
+ def test_named_str(self):
+ """Feature.__str__ should thunk to feature_name()."""
+ class NamedFeature(features.Feature):
+ def feature_name(self):
+ return 'symlinks'
+ feature = NamedFeature()
+ self.assertEqual('symlinks', str(feature))
+
+ def test_default_str(self):
+ """Feature.__str__ should default to __class__.__name__."""
+ class NamedFeature(features.Feature):
+ pass
+ feature = NamedFeature()
+ self.assertEqual('NamedFeature', str(feature))
+
+
+class TestUnavailableFeature(tests.TestCase):
+
+ def test_access_feature(self):
+ feature = features.Feature()
+ exception = tests.UnavailableFeature(feature)
+ self.assertIs(feature, exception.args[0])
+
+
+# Although this was deprecated a long time ago, please keep it here because
+# it's really just a test fixture for test-feature deprecation.
+simple_thunk_feature = features._CompatabilityThunkFeature(
+ symbol_versioning.deprecated_in((2, 1, 0)),
+ 'bzrlib.tests.test_features',
+ 'simple_thunk_feature',
+ 'UnicodeFilenameFeature',
+ replacement_module='bzrlib.tests.features')
+
+
+class Test_CompatibilityFeature(tests.TestCase):
+
+ def test_does_thunk(self):
+ res = self.callDeprecated(
+ ['bzrlib.tests.test_features.simple_thunk_feature '
+ 'was deprecated in version 2.1.0. '
+ 'Use bzrlib.tests.features.UnicodeFilenameFeature instead.'],
+ simple_thunk_feature.available)
+ self.assertEqual(features.UnicodeFilenameFeature.available(), res)
+
+ def test_reports_correct_location(self):
+ a_feature = features._CompatabilityThunkFeature(
+ symbol_versioning.deprecated_in((2, 1, 0)),
+ 'bzrlib.tests.test_features',
+ 'a_feature',
+ 'UnicodeFilenameFeature',
+ replacement_module='bzrlib.tests.features')
+ def test_caller(message, category=None, stacklevel=1):
+ # Find ourselves back from the right frame
+ caller = sys._getframe(stacklevel)
+ reported_file = caller.f_globals['__file__']
+ reported_lineno = caller.f_lineno
+ self.assertEquals(__file__, reported_file)
+ # The call we're tracking occurred the line after we grabbed the
+ # lineno.
+ self.assertEquals(self.lineno + 1, reported_lineno)
+ self.overrideAttr(symbol_versioning, 'warn', test_caller)
+ # Grab the current lineno
+ self.lineno = sys._getframe().f_lineno
+ self.requireFeature(a_feature)
+
+
+class TestModuleAvailableFeature(tests.TestCase):
+
+ def test_available_module(self):
+ feature = features.ModuleAvailableFeature('bzrlib.tests')
+ self.assertEqual('bzrlib.tests', feature.module_name)
+ self.assertEqual('bzrlib.tests', str(feature))
+ self.assertTrue(feature.available())
+ self.assertIs(tests, feature.module)
+
+ def test_unavailable_module(self):
+ feature = features.ModuleAvailableFeature(
+ 'bzrlib.no_such_module_exists')
+ self.assertEqual('bzrlib.no_such_module_exists', str(feature))
+ self.assertFalse(feature.available())
+ self.assertIs(None, feature.module)
+
+
+class TestUnicodeFilenameFeature(tests.TestCase):
+
+ def test_probe_passes(self):
+ """UnicodeFilenameFeature._probe passes."""
+ # We can't test much more than that because the behaviour depends
+ # on the platform.
+ features.UnicodeFilenameFeature._probe()
diff --git a/bzrlib/tests/test_fetch.py b/bzrlib/tests/test_fetch.py
new file mode 100644
index 0000000..5124785
--- /dev/null
+++ b/bzrlib/tests/test_fetch.py
@@ -0,0 +1,523 @@
+# Copyright (C) 2005-2011 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+from bzrlib import (
+ bzrdir,
+ errors,
+ osutils,
+ revision as _mod_revision,
+ versionedfile,
+ )
+from bzrlib.branch import Branch
+from bzrlib.repofmt import knitrepo
+from bzrlib.tests import TestCaseWithTransport
+from bzrlib.tests.test_revision import make_branches
+from bzrlib.upgrade import Convert
+from bzrlib.workingtree import WorkingTree
+
+# These tests are a bit old; please instead add new tests into
+# per_interrepository/ so they'll run on all relevant
+# combinations.
+
+
+def has_revision(branch, revision_id):
+ return branch.repository.has_revision(revision_id)
+
+
+def revision_history(branch):
+ branch.lock_read()
+ try:
+ graph = branch.repository.get_graph()
+ history = list(graph.iter_lefthand_ancestry(branch.last_revision(),
+ [_mod_revision.NULL_REVISION]))
+ finally:
+ branch.unlock()
+ history.reverse()
+ return history
+
+
+def fetch_steps(self, br_a, br_b, writable_a):
+ """A foreign test method for testing fetch locally and remotely."""
+
+ # TODO RBC 20060201 make this a repository test.
+ repo_b = br_b.repository
+ self.assertFalse(repo_b.has_revision(revision_history(br_a)[3]))
+ self.assertTrue(repo_b.has_revision(revision_history(br_a)[2]))
+ self.assertEquals(len(revision_history(br_b)), 7)
+ br_b.fetch(br_a, revision_history(br_a)[2])
+ # branch.fetch is not supposed to alter the revision history
+ self.assertEquals(len(revision_history(br_b)), 7)
+ self.assertFalse(repo_b.has_revision(revision_history(br_a)[3]))
+
+ # fetching the next revision up in sample data copies one revision
+ br_b.fetch(br_a, revision_history(br_a)[3])
+ self.assertTrue(repo_b.has_revision(revision_history(br_a)[3]))
+ self.assertFalse(has_revision(br_a, revision_history(br_b)[6]))
+ self.assertTrue(br_a.repository.has_revision(revision_history(br_b)[5]))
+
+ # When a non-branch ancestor is missing, it should be unlisted...
+ # as its not reference from the inventory weave.
+ br_b4 = self.make_branch('br_4')
+ br_b4.fetch(br_b)
+
+ writable_a.fetch(br_b)
+ self.assertTrue(has_revision(br_a, revision_history(br_b)[3]))
+ self.assertTrue(has_revision(br_a, revision_history(br_b)[4]))
+
+ br_b2 = self.make_branch('br_b2')
+ br_b2.fetch(br_b)
+ self.assertTrue(has_revision(br_b2, revision_history(br_b)[4]))
+ self.assertTrue(has_revision(br_b2, revision_history(br_a)[2]))
+ self.assertFalse(has_revision(br_b2, revision_history(br_a)[3]))
+
+ br_a2 = self.make_branch('br_a2')
+ br_a2.fetch(br_a)
+ self.assertTrue(has_revision(br_a2, revision_history(br_b)[4]))
+ self.assertTrue(has_revision(br_a2, revision_history(br_a)[3]))
+ self.assertTrue(has_revision(br_a2, revision_history(br_a)[2]))
+
+ br_a3 = self.make_branch('br_a3')
+ # pulling a branch with no revisions grabs nothing, regardless of
+ # whats in the inventory.
+ br_a3.fetch(br_a2)
+ for revno in range(4):
+ self.assertFalse(
+ br_a3.repository.has_revision(revision_history(br_a)[revno]))
+ br_a3.fetch(br_a2, revision_history(br_a)[2])
+ # pull the 3 revisions introduced by a@u-0-3
+ br_a3.fetch(br_a2, revision_history(br_a)[3])
+ # NoSuchRevision should be raised if the branch is missing the revision
+ # that was requested.
+ self.assertRaises(errors.NoSuchRevision, br_a3.fetch, br_a2, 'pizza')
+
+ # TODO: Test trying to fetch from a branch that points to a revision not
+ # actually present in its repository. Not every branch format allows you
+ # to directly point to such revisions, so it's a bit complicated to
+ # construct. One way would be to uncommit and gc the revision, but not
+ # every branch supports that. -- mbp 20070814
+
+ #TODO: test that fetch correctly does reweaving when needed. RBC 20051008
+ # Note that this means - updating the weave when ghosts are filled in to
+ # add the right parents.
+
+
+class TestFetch(TestCaseWithTransport):
+
+ def test_fetch(self):
+ #highest indices a: 5, b: 7
+ br_a, br_b = make_branches(self, format='dirstate-tags')
+ fetch_steps(self, br_a, br_b, br_a)
+
+ def test_fetch_self(self):
+ wt = self.make_branch_and_tree('br')
+ wt.branch.fetch(wt.branch)
+
+ def test_fetch_root_knit(self):
+ """Ensure that knit2.fetch() updates the root knit
+
+ This tests the case where the root has a new revision, but there are no
+ corresponding filename, parent, contents or other changes.
+ """
+ knit1_format = bzrdir.BzrDirMetaFormat1()
+ knit1_format.repository_format = knitrepo.RepositoryFormatKnit1()
+ knit2_format = bzrdir.BzrDirMetaFormat1()
+ knit2_format.repository_format = knitrepo.RepositoryFormatKnit3()
+ # we start with a knit1 repository because that causes the
+ # root revision to change for each commit, even though the content,
+ # parent, name, and other attributes are unchanged.
+ tree = self.make_branch_and_tree('tree', knit1_format)
+ tree.set_root_id('tree-root')
+ tree.commit('rev1', rev_id='rev1')
+ tree.commit('rev2', rev_id='rev2')
+
+ # Now we convert it to a knit2 repository so that it has a root knit
+ Convert(tree.basedir, knit2_format)
+ tree = WorkingTree.open(tree.basedir)
+ branch = self.make_branch('branch', format=knit2_format)
+ branch.pull(tree.branch, stop_revision='rev1')
+ repo = branch.repository
+ repo.lock_read()
+ try:
+ # Make sure fetch retrieved only what we requested
+ self.assertEqual({('tree-root', 'rev1'):()},
+ repo.texts.get_parent_map(
+ [('tree-root', 'rev1'), ('tree-root', 'rev2')]))
+ finally:
+ repo.unlock()
+ branch.pull(tree.branch)
+ # Make sure that the next revision in the root knit was retrieved,
+ # even though the text, name, parent_id, etc., were unchanged.
+ repo.lock_read()
+ try:
+ # Make sure fetch retrieved only what we requested
+ self.assertEqual({('tree-root', 'rev2'):(('tree-root', 'rev1'),)},
+ repo.texts.get_parent_map([('tree-root', 'rev2')]))
+ finally:
+ repo.unlock()
+
+ def test_fetch_incompatible(self):
+ knit_tree = self.make_branch_and_tree('knit', format='knit')
+ knit3_tree = self.make_branch_and_tree('knit3',
+ format='dirstate-with-subtree')
+ knit3_tree.commit('blah')
+ e = self.assertRaises(errors.IncompatibleRepositories,
+ knit_tree.branch.fetch, knit3_tree.branch)
+ self.assertContainsRe(str(e),
+ r"(?m).*/knit.*\nis not compatible with\n.*/knit3/.*\n"
+ r"different rich-root support")
+
+
+class TestMergeFetch(TestCaseWithTransport):
+
+ def test_merge_fetches_unrelated(self):
+ """Merge brings across history from unrelated source"""
+ wt1 = self.make_branch_and_tree('br1')
+ br1 = wt1.branch
+ wt1.commit(message='rev 1-1', rev_id='1-1')
+ wt1.commit(message='rev 1-2', rev_id='1-2')
+ wt2 = self.make_branch_and_tree('br2')
+ br2 = wt2.branch
+ wt2.commit(message='rev 2-1', rev_id='2-1')
+ wt2.merge_from_branch(br1, from_revision='null:')
+ self._check_revs_present(br2)
+
+ def test_merge_fetches(self):
+ """Merge brings across history from source"""
+ wt1 = self.make_branch_and_tree('br1')
+ br1 = wt1.branch
+ wt1.commit(message='rev 1-1', rev_id='1-1')
+ dir_2 = br1.bzrdir.sprout('br2')
+ br2 = dir_2.open_branch()
+ wt1.commit(message='rev 1-2', rev_id='1-2')
+ wt2 = dir_2.open_workingtree()
+ wt2.commit(message='rev 2-1', rev_id='2-1')
+ wt2.merge_from_branch(br1)
+ self._check_revs_present(br2)
+
+ def _check_revs_present(self, br2):
+ for rev_id in '1-1', '1-2', '2-1':
+ self.assertTrue(br2.repository.has_revision(rev_id))
+ rev = br2.repository.get_revision(rev_id)
+ self.assertEqual(rev.revision_id, rev_id)
+ self.assertTrue(br2.repository.get_inventory(rev_id))
+
+
+class TestMergeFileHistory(TestCaseWithTransport):
+
+ def setUp(self):
+ super(TestMergeFileHistory, self).setUp()
+ wt1 = self.make_branch_and_tree('br1')
+ br1 = wt1.branch
+ self.build_tree_contents([('br1/file', 'original contents\n')])
+ wt1.add('file', 'this-file-id')
+ wt1.commit(message='rev 1-1', rev_id='1-1')
+ dir_2 = br1.bzrdir.sprout('br2')
+ br2 = dir_2.open_branch()
+ wt2 = dir_2.open_workingtree()
+ self.build_tree_contents([('br1/file', 'original from 1\n')])
+ wt1.commit(message='rev 1-2', rev_id='1-2')
+ self.build_tree_contents([('br1/file', 'agreement\n')])
+ wt1.commit(message='rev 1-3', rev_id='1-3')
+ self.build_tree_contents([('br2/file', 'contents in 2\n')])
+ wt2.commit(message='rev 2-1', rev_id='2-1')
+ self.build_tree_contents([('br2/file', 'agreement\n')])
+ wt2.commit(message='rev 2-2', rev_id='2-2')
+
+ def test_merge_fetches_file_history(self):
+ """Merge brings across file histories"""
+ br2 = Branch.open('br2')
+ br1 = Branch.open('br1')
+ wt2 = WorkingTree.open('br2').merge_from_branch(br1)
+ br2.lock_read()
+ self.addCleanup(br2.unlock)
+ for rev_id, text in [('1-2', 'original from 1\n'),
+ ('1-3', 'agreement\n'),
+ ('2-1', 'contents in 2\n'),
+ ('2-2', 'agreement\n')]:
+ self.assertEqualDiff(
+ br2.repository.revision_tree(
+ rev_id).get_file_text('this-file-id'), text)
+
+
+class TestKnitToPackFetch(TestCaseWithTransport):
+
+ def find_get_record_stream(self, calls, expected_count=1):
+ """In a list of calls, find the last 'get_record_stream'.
+
+ :param expected_count: The number of calls we should exepect to find.
+ If a different number is found, an assertion is raised.
+ """
+ get_record_call = None
+ call_count = 0
+ for call in calls:
+ if call[0] == 'get_record_stream':
+ call_count += 1
+ get_record_call = call
+ self.assertEqual(expected_count, call_count)
+ return get_record_call
+
+ def test_fetch_with_deltas_no_delta_closure(self):
+ tree = self.make_branch_and_tree('source', format='dirstate')
+ target = self.make_repository('target', format='pack-0.92')
+ self.build_tree(['source/file'])
+ tree.set_root_id('root-id')
+ tree.add('file', 'file-id')
+ tree.commit('one', rev_id='rev-one')
+ source = tree.branch.repository
+ source.texts = versionedfile.RecordingVersionedFilesDecorator(
+ source.texts)
+ source.signatures = versionedfile.RecordingVersionedFilesDecorator(
+ source.signatures)
+ source.revisions = versionedfile.RecordingVersionedFilesDecorator(
+ source.revisions)
+ source.inventories = versionedfile.RecordingVersionedFilesDecorator(
+ source.inventories)
+ # precondition
+ self.assertTrue(target._format._fetch_uses_deltas)
+ target.fetch(source, revision_id='rev-one')
+ self.assertEqual(('get_record_stream', [('file-id', 'rev-one')],
+ target._format._fetch_order, False),
+ self.find_get_record_stream(source.texts.calls))
+ self.assertEqual(('get_record_stream', [('rev-one',)],
+ target._format._fetch_order, False),
+ self.find_get_record_stream(source.inventories.calls, 2))
+ self.assertEqual(('get_record_stream', [('rev-one',)],
+ target._format._fetch_order, False),
+ self.find_get_record_stream(source.revisions.calls))
+ # XXX: Signatures is special, and slightly broken. The
+ # standard item_keys_introduced_by actually does a lookup for every
+ # signature to see if it exists, rather than waiting to do them all at
+ # once at the end. The fetch code then does an all-at-once and just
+ # allows for some of them to be missing.
+ # So we know there will be extra calls, but the *last* one is the one
+ # we care about.
+ signature_calls = source.signatures.calls[-1:]
+ self.assertEqual(('get_record_stream', [('rev-one',)],
+ target._format._fetch_order, False),
+ self.find_get_record_stream(signature_calls))
+
+ def test_fetch_no_deltas_with_delta_closure(self):
+ tree = self.make_branch_and_tree('source', format='dirstate')
+ target = self.make_repository('target', format='pack-0.92')
+ self.build_tree(['source/file'])
+ tree.set_root_id('root-id')
+ tree.add('file', 'file-id')
+ tree.commit('one', rev_id='rev-one')
+ source = tree.branch.repository
+ source.texts = versionedfile.RecordingVersionedFilesDecorator(
+ source.texts)
+ source.signatures = versionedfile.RecordingVersionedFilesDecorator(
+ source.signatures)
+ source.revisions = versionedfile.RecordingVersionedFilesDecorator(
+ source.revisions)
+ source.inventories = versionedfile.RecordingVersionedFilesDecorator(
+ source.inventories)
+ # XXX: This won't work in general, but for the dirstate format it does.
+ self.overrideAttr(target._format, '_fetch_uses_deltas', False)
+ target.fetch(source, revision_id='rev-one')
+ self.assertEqual(('get_record_stream', [('file-id', 'rev-one')],
+ target._format._fetch_order, True),
+ self.find_get_record_stream(source.texts.calls))
+ self.assertEqual(('get_record_stream', [('rev-one',)],
+ target._format._fetch_order, True),
+ self.find_get_record_stream(source.inventories.calls, 2))
+ self.assertEqual(('get_record_stream', [('rev-one',)],
+ target._format._fetch_order, True),
+ self.find_get_record_stream(source.revisions.calls))
+ # XXX: Signatures is special, and slightly broken. The
+ # standard item_keys_introduced_by actually does a lookup for every
+ # signature to see if it exists, rather than waiting to do them all at
+ # once at the end. The fetch code then does an all-at-once and just
+ # allows for some of them to be missing.
+ # So we know there will be extra calls, but the *last* one is the one
+ # we care about.
+ signature_calls = source.signatures.calls[-1:]
+ self.assertEqual(('get_record_stream', [('rev-one',)],
+ target._format._fetch_order, True),
+ self.find_get_record_stream(signature_calls))
+
+ def test_fetch_revisions_with_deltas_into_pack(self):
+ # See BUG #261339, dev versions of bzr could accidentally create deltas
+ # in revision texts in knit branches (when fetching from packs). So we
+ # ensure that *if* a knit repository has a delta in revisions, that it
+ # gets properly expanded back into a fulltext when stored in the pack
+ # file.
+ tree = self.make_branch_and_tree('source', format='dirstate')
+ target = self.make_repository('target', format='pack-0.92')
+ self.build_tree(['source/file'])
+ tree.set_root_id('root-id')
+ tree.add('file', 'file-id')
+ tree.commit('one', rev_id='rev-one')
+ # Hack the KVF for revisions so that it "accidentally" allows a delta
+ tree.branch.repository.revisions._max_delta_chain = 200
+ tree.commit('two', rev_id='rev-two')
+ source = tree.branch.repository
+ # Ensure that we stored a delta
+ source.lock_read()
+ self.addCleanup(source.unlock)
+ record = source.revisions.get_record_stream([('rev-two',)],
+ 'unordered', False).next()
+ self.assertEqual('knit-delta-gz', record.storage_kind)
+ target.fetch(tree.branch.repository, revision_id='rev-two')
+ # The record should get expanded back to a fulltext
+ target.lock_read()
+ self.addCleanup(target.unlock)
+ record = target.revisions.get_record_stream([('rev-two',)],
+ 'unordered', False).next()
+ self.assertEqual('knit-ft-gz', record.storage_kind)
+
+ def test_fetch_with_fallback_and_merge(self):
+ builder = self.make_branch_builder('source', format='pack-0.92')
+ builder.start_series()
+ # graph
+ # A
+ # |\
+ # B C
+ # | |
+ # | D
+ # | |
+ # | E
+ # \|
+ # F
+ # A & B are present in the base (stacked-on) repository, A-E are
+ # present in the source.
+ # This reproduces bug #304841
+ # We need a large enough inventory that total size of compressed deltas
+ # is shorter than the size of a compressed fulltext. We have to use
+ # random ids because otherwise the inventory fulltext compresses too
+ # well and the deltas get bigger.
+ to_add = [
+ ('add', ('', 'TREE_ROOT', 'directory', None))]
+ for i in xrange(10):
+ fname = 'file%03d' % (i,)
+ fileid = '%s-%s' % (fname, osutils.rand_chars(64))
+ to_add.append(('add', (fname, fileid, 'file', 'content\n')))
+ builder.build_snapshot('A', None, to_add)
+ builder.build_snapshot('B', ['A'], [])
+ builder.build_snapshot('C', ['A'], [])
+ builder.build_snapshot('D', ['C'], [])
+ builder.build_snapshot('E', ['D'], [])
+ builder.build_snapshot('F', ['E', 'B'], [])
+ builder.finish_series()
+ source_branch = builder.get_branch()
+ source_branch.bzrdir.sprout('base', revision_id='B')
+ target_branch = self.make_branch('target', format='1.6')
+ target_branch.set_stacked_on_url('../base')
+ source = source_branch.repository
+ source.lock_read()
+ self.addCleanup(source.unlock)
+ source.inventories = versionedfile.OrderingVersionedFilesDecorator(
+ source.inventories,
+ key_priority={('E',): 1, ('D',): 2, ('C',): 4,
+ ('F',): 3})
+ # Ensure that the content is yielded in the proper order, and given as
+ # the expected kinds
+ records = [(record.key, record.storage_kind)
+ for record in source.inventories.get_record_stream(
+ [('D',), ('C',), ('E',), ('F',)], 'unordered', False)]
+ self.assertEqual([(('E',), 'knit-delta-gz'), (('D',), 'knit-delta-gz'),
+ (('F',), 'knit-delta-gz'), (('C',), 'knit-delta-gz')],
+ records)
+
+ target_branch.lock_write()
+ self.addCleanup(target_branch.unlock)
+ target = target_branch.repository
+ target.fetch(source, revision_id='F')
+ # 'C' should be expanded to a fulltext, but D and E should still be
+ # deltas
+ stream = target.inventories.get_record_stream(
+ [('C',), ('D',), ('E',), ('F',)],
+ 'unordered', False)
+ kinds = dict((record.key, record.storage_kind) for record in stream)
+ self.assertEqual({('C',): 'knit-ft-gz', ('D',): 'knit-delta-gz',
+ ('E',): 'knit-delta-gz', ('F',): 'knit-delta-gz'},
+ kinds)
+
+
+class Test1To2Fetch(TestCaseWithTransport):
+ """Tests for Model1To2 failure modes"""
+
+ def make_tree_and_repo(self):
+ self.tree = self.make_branch_and_tree('tree', format='pack-0.92')
+ self.repo = self.make_repository('rich-repo', format='rich-root-pack')
+ self.repo.lock_write()
+ self.addCleanup(self.repo.unlock)
+
+ def do_fetch_order_test(self, first, second):
+ """Test that fetch works no matter what the set order of revision is.
+
+ This test depends on the order of items in a set, which is
+ implementation-dependant, so we test A, B and then B, A.
+ """
+ self.make_tree_and_repo()
+ self.tree.commit('Commit 1', rev_id=first)
+ self.tree.commit('Commit 2', rev_id=second)
+ self.repo.fetch(self.tree.branch.repository, second)
+
+ def test_fetch_order_AB(self):
+ """See do_fetch_order_test"""
+ self.do_fetch_order_test('A', 'B')
+
+ def test_fetch_order_BA(self):
+ """See do_fetch_order_test"""
+ self.do_fetch_order_test('B', 'A')
+
+ def get_parents(self, file_id, revision_id):
+ self.repo.lock_read()
+ try:
+ parent_map = self.repo.texts.get_parent_map([(file_id, revision_id)])
+ return parent_map[(file_id, revision_id)]
+ finally:
+ self.repo.unlock()
+
+ def test_fetch_ghosts(self):
+ self.make_tree_and_repo()
+ self.tree.commit('first commit', rev_id='left-parent')
+ self.tree.add_parent_tree_id('ghost-parent')
+ fork = self.tree.bzrdir.sprout('fork', 'null:').open_workingtree()
+ fork.commit('not a ghost', rev_id='not-ghost-parent')
+ self.tree.branch.repository.fetch(fork.branch.repository,
+ 'not-ghost-parent')
+ self.tree.add_parent_tree_id('not-ghost-parent')
+ self.tree.commit('second commit', rev_id='second-id')
+ self.repo.fetch(self.tree.branch.repository, 'second-id')
+ root_id = self.tree.get_root_id()
+ self.assertEqual(
+ ((root_id, 'left-parent'), (root_id, 'not-ghost-parent')),
+ self.get_parents(root_id, 'second-id'))
+
+ def make_two_commits(self, change_root, fetch_twice):
+ self.make_tree_and_repo()
+ self.tree.commit('first commit', rev_id='first-id')
+ if change_root:
+ self.tree.set_root_id('unique-id')
+ self.tree.commit('second commit', rev_id='second-id')
+ if fetch_twice:
+ self.repo.fetch(self.tree.branch.repository, 'first-id')
+ self.repo.fetch(self.tree.branch.repository, 'second-id')
+
+ def test_fetch_changed_root(self):
+ self.make_two_commits(change_root=True, fetch_twice=False)
+ self.assertEqual((), self.get_parents('unique-id', 'second-id'))
+
+ def test_two_fetch_changed_root(self):
+ self.make_two_commits(change_root=True, fetch_twice=True)
+ self.assertEqual((), self.get_parents('unique-id', 'second-id'))
+
+ def test_two_fetches(self):
+ self.make_two_commits(change_root=False, fetch_twice=True)
+ self.assertEqual((('TREE_ROOT', 'first-id'),),
+ self.get_parents('TREE_ROOT', 'second-id'))
diff --git a/bzrlib/tests/test_fifo_cache.py b/bzrlib/tests/test_fifo_cache.py
new file mode 100644
index 0000000..b3ed22a
--- /dev/null
+++ b/bzrlib/tests/test_fifo_cache.py
@@ -0,0 +1,325 @@
+# Copyright (C) 2008 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""Tests for the fifo_cache module."""
+
+from bzrlib import (
+ fifo_cache,
+ tests,
+ )
+
+
+class TestFIFOCache(tests.TestCase):
+ """Test that FIFO cache properly keeps track of entries."""
+
+ def test_add_is_present(self):
+ c = fifo_cache.FIFOCache()
+ c[1] = 2
+ self.assertTrue(1 in c)
+ self.assertEqual(1, len(c))
+ self.assertEqual(2, c[1])
+ self.assertEqual(2, c.get(1))
+ self.assertEqual(2, c.get(1, None))
+ self.assertEqual([1], c.keys())
+ self.assertEqual([1], list(c.iterkeys()))
+ self.assertEqual([(1, 2)], c.items())
+ self.assertEqual([(1, 2)], list(c.iteritems()))
+ self.assertEqual([2], c.values())
+ self.assertEqual([2], list(c.itervalues()))
+ self.assertEqual({1: 2}, c)
+
+ def test_cache_size(self):
+ c = fifo_cache.FIFOCache()
+ self.assertEqual(100, c.cache_size())
+ c.resize(20, 5)
+ self.assertEqual(20, c.cache_size())
+
+ def test_missing(self):
+ c = fifo_cache.FIFOCache()
+ self.assertRaises(KeyError, c.__getitem__, 1)
+ self.assertFalse(1 in c)
+ self.assertEqual(0, len(c))
+ self.assertEqual(None, c.get(1))
+ self.assertEqual(None, c.get(1, None))
+ self.assertEqual([], c.keys())
+ self.assertEqual([], list(c.iterkeys()))
+ self.assertEqual([], c.items())
+ self.assertEqual([], list(c.iteritems()))
+ self.assertEqual([], c.values())
+ self.assertEqual([], list(c.itervalues()))
+ self.assertEqual({}, c)
+
+ def test_add_maintains_fifo(self):
+ c = fifo_cache.FIFOCache(4, 4)
+ c[1] = 2
+ c[2] = 3
+ c[3] = 4
+ c[4] = 5
+ self.assertEqual([1, 2, 3, 4], sorted(c.keys()))
+ c[5] = 6
+ # This should pop out the oldest entry
+ self.assertEqual([2, 3, 4, 5], sorted(c.keys()))
+ # Replacing an item doesn't change the stored keys
+ c[2] = 7
+ self.assertEqual([2, 3, 4, 5], sorted(c.keys()))
+ # But it does change the position in the FIFO
+ c[6] = 7
+ self.assertEqual([2, 4, 5, 6], sorted(c.keys()))
+ self.assertEqual([4, 5, 2, 6], list(c._queue))
+
+ def test_default_after_cleanup_count(self):
+ c = fifo_cache.FIFOCache(5)
+ self.assertEqual(4, c._after_cleanup_count)
+ c[1] = 2
+ c[2] = 3
+ c[3] = 4
+ c[4] = 5
+ c[5] = 6
+ # So far, everything fits
+ self.assertEqual([1, 2, 3, 4, 5], sorted(c.keys()))
+ c[6] = 7
+ # But adding one more should shrink down to after_cleanup_count
+ self.assertEqual([3, 4, 5, 6], sorted(c.keys()))
+
+ def test_clear(self):
+ c = fifo_cache.FIFOCache(5)
+ c[1] = 2
+ c[2] = 3
+ c[3] = 4
+ c[4] = 5
+ c[5] = 6
+ c.cleanup()
+ self.assertEqual([2, 3, 4, 5], sorted(c.keys()))
+ c.clear()
+ self.assertEqual([], c.keys())
+ self.assertEqual([], list(c._queue))
+ self.assertEqual({}, c)
+
+ def test_copy_not_implemented(self):
+ c = fifo_cache.FIFOCache()
+ self.assertRaises(NotImplementedError, c.copy)
+
+ def test_pop_not_implemeted(self):
+ c = fifo_cache.FIFOCache()
+ self.assertRaises(NotImplementedError, c.pop, 'key')
+
+ def test_popitem_not_implemeted(self):
+ c = fifo_cache.FIFOCache()
+ self.assertRaises(NotImplementedError, c.popitem)
+
+ def test_resize_smaller(self):
+ c = fifo_cache.FIFOCache()
+ c[1] = 2
+ c[2] = 3
+ c[3] = 4
+ c[4] = 5
+ c[5] = 6
+ # No cleanup, because it is the exact size
+ c.resize(5)
+ self.assertEqual({1: 2, 2: 3, 3: 4, 4: 5, 5: 6}, c)
+ self.assertEqual(5, c.cache_size())
+ # Adding one more will trigger a cleanup, though
+ c[6] = 7
+ self.assertEqual({3: 4, 4: 5, 5: 6, 6: 7}, c)
+ c.resize(3, 2)
+ self.assertEqual({5: 6, 6: 7}, c)
+
+ def test_resize_larger(self):
+ c = fifo_cache.FIFOCache(5, 4)
+ c[1] = 2
+ c[2] = 3
+ c[3] = 4
+ c[4] = 5
+ c[5] = 6
+ # No cleanup, because it is the exact size
+ c.resize(10)
+ self.assertEqual({1: 2, 2: 3, 3: 4, 4: 5, 5: 6}, c)
+ self.assertEqual(10, c.cache_size())
+ c[6] = 7
+ c[7] = 8
+ c[8] = 9
+ c[9] = 10
+ c[10] = 11
+ self.assertEqual({1: 2, 2: 3, 3: 4, 4: 5, 5: 6, 6: 7, 7: 8, 8: 9,
+ 9: 10, 10: 11}, c)
+ c[11] = 12
+ self.assertEqual({4: 5, 5: 6, 6: 7, 7: 8, 8: 9, 9: 10, 10: 11,
+ 11: 12}, c)
+
+ def test_setdefault(self):
+ c = fifo_cache.FIFOCache(5, 4)
+ c['one'] = 1
+ c['two'] = 2
+ c['three'] = 3
+ myobj = object()
+ self.assertIs(myobj, c.setdefault('four', myobj))
+ self.assertEqual({'one': 1, 'two': 2, 'three': 3, 'four': myobj}, c)
+ self.assertEqual(3, c.setdefault('three', myobj))
+ c.setdefault('five', myobj)
+ c.setdefault('six', myobj)
+ self.assertEqual({'three': 3, 'four': myobj, 'five': myobj,
+ 'six': myobj}, c)
+
+ def test_update(self):
+ c = fifo_cache.FIFOCache(5, 4)
+ # We allow an iterable
+ c.update([(1, 2), (3, 4)])
+ self.assertEqual({1: 2, 3: 4}, c)
+ # Or kwarg form
+ c.update(foo=3, bar=4)
+ self.assertEqual({1: 2, 3: 4, 'foo': 3, 'bar': 4}, c)
+ # Even a dict (This triggers a cleanup)
+ c.update({'baz': 'biz', 'bing': 'bang'})
+ self.assertEqual({'foo': 3, 'bar': 4, 'baz': 'biz', 'bing': 'bang'}, c)
+ # We only allow 1 iterable, just like dict
+ self.assertRaises(TypeError, c.update, [(1, 2)], [(3, 4)])
+ # But you can mix and match. kwargs take precedence over iterable
+ c.update([('a', 'b'), ('d', 'e')], a='c', q='r')
+ self.assertEqual({'baz': 'biz', 'bing': 'bang',
+ 'a': 'c', 'd': 'e', 'q': 'r'}, c)
+
+ def test_cleanup_funcs(self):
+ log = []
+ def logging_cleanup(key, value):
+ log.append((key, value))
+ c = fifo_cache.FIFOCache(5, 4)
+ c.add(1, 2, cleanup=logging_cleanup)
+ c.add(2, 3, cleanup=logging_cleanup)
+ c.add(3, 4, cleanup=logging_cleanup)
+ c.add(4, 5, cleanup=None) # no cleanup for 4
+ c[5] = 6 # no cleanup for 5
+ self.assertEqual([], log)
+ # Adding another key should cleanup 1 & 2
+ c.add(6, 7, cleanup=logging_cleanup)
+ self.assertEqual([(1, 2), (2, 3)], log)
+ del log[:]
+ # replacing 3 should trigger a cleanup
+ c.add(3, 8, cleanup=logging_cleanup)
+ self.assertEqual([(3, 4)], log)
+ del log[:]
+ c[3] = 9
+ self.assertEqual([(3, 8)], log)
+ del log[:]
+ # Clearing everything should call all remaining cleanups
+ c.clear()
+ self.assertEqual([(6, 7)], log)
+ del log[:]
+ c.add(8, 9, cleanup=logging_cleanup)
+ # __delitem__ should also trigger a cleanup
+ del c[8]
+ self.assertEqual([(8, 9)], log)
+
+ def test_cleanup_at_deconstruct(self):
+ log = []
+ def logging_cleanup(key, value):
+ log.append((key, value))
+ c = fifo_cache.FIFOCache()
+ c.add(1, 2, cleanup=logging_cleanup)
+ del c
+ # As a matter of design, bzr does not (can not) count on anything
+ # being run from Python __del__ methods, because they may not run for
+ # a long time, and because in cPython merely having them defined
+ # interferes with garbage collection.
+ self.assertEqual([], log)
+
+
+class TestFIFOSizeCache(tests.TestCase):
+
+ def test_add_is_present(self):
+ c = fifo_cache.FIFOSizeCache()
+ c[1] = '2'
+ self.assertTrue(1 in c)
+ self.assertEqual(1, len(c))
+ self.assertEqual('2', c[1])
+ self.assertEqual('2', c.get(1))
+ self.assertEqual('2', c.get(1, None))
+ self.assertEqual([1], c.keys())
+ self.assertEqual([1], list(c.iterkeys()))
+ self.assertEqual([(1, '2')], c.items())
+ self.assertEqual([(1, '2')], list(c.iteritems()))
+ self.assertEqual(['2'], c.values())
+ self.assertEqual(['2'], list(c.itervalues()))
+ self.assertEqual({1: '2'}, c)
+ self.assertEqual(1024*1024, c.cache_size())
+
+ def test_missing(self):
+ c = fifo_cache.FIFOSizeCache()
+ self.assertRaises(KeyError, c.__getitem__, 1)
+ self.assertFalse(1 in c)
+ self.assertEqual(0, len(c))
+ self.assertEqual(None, c.get(1))
+ self.assertEqual(None, c.get(1, None))
+ self.assertEqual([], c.keys())
+ self.assertEqual([], list(c.iterkeys()))
+ self.assertEqual([], c.items())
+ self.assertEqual([], list(c.iteritems()))
+ self.assertEqual([], c.values())
+ self.assertEqual([], list(c.itervalues()))
+ self.assertEqual({}, c)
+
+ def test_add_maintains_fifo(self):
+ c = fifo_cache.FIFOSizeCache(10, 8)
+ c[1] = 'ab'
+ c[2] = 'cde'
+ c[3] = 'fghi'
+ self.assertEqual({1: 'ab', 2: 'cde', 3: 'fghi'}, c)
+ c[4] = 'jkl' # Collapse
+ self.assertEqual({3: 'fghi', 4: 'jkl'}, c)
+ # Replacing an item will bump it to the end of the queue
+ c[3] = 'mnop'
+ self.assertEqual({3: 'mnop', 4: 'jkl'}, c)
+ c[5] = 'qrst'
+ self.assertEqual({3: 'mnop', 5: 'qrst'}, c)
+
+ def test_adding_large_key(self):
+ c = fifo_cache.FIFOSizeCache(10, 8)
+ c[1] = 'abcdefgh' # Adding a large key won't get cached at all
+ self.assertEqual({}, c)
+ c[1] = 'abcdefg'
+ self.assertEqual({1: 'abcdefg'}, c)
+ # Replacing with a too-large key will remove it
+ c[1] = 'abcdefgh'
+ self.assertEqual({}, c)
+ self.assertEqual(0, c._value_size)
+
+ def test_resize_smaller(self):
+ c = fifo_cache.FIFOSizeCache(20, 16)
+ c[1] = 'a'
+ c[2] = 'bc'
+ c[3] = 'def'
+ c[4] = 'ghij'
+ # No cleanup, because it is the exact size
+ c.resize(10, 8)
+ self.assertEqual({1: 'a', 2: 'bc', 3: 'def', 4: 'ghij'}, c)
+ self.assertEqual(10, c.cache_size())
+ # Adding one more will trigger a cleanup, though
+ c[5] = 'k'
+ self.assertEqual({3: 'def', 4: 'ghij', 5: 'k'}, c)
+ c.resize(5, 4)
+ self.assertEqual({5: 'k'}, c)
+
+ def test_resize_larger(self):
+ c = fifo_cache.FIFOSizeCache(10, 8)
+ c[1] = 'a'
+ c[2] = 'bc'
+ c[3] = 'def'
+ c[4] = 'ghij'
+ c.resize(12, 10)
+ self.assertEqual({1: 'a', 2: 'bc', 3: 'def', 4: 'ghij'}, c)
+ c[5] = 'kl'
+ self.assertEqual({1: 'a', 2: 'bc', 3: 'def', 4: 'ghij', 5: 'kl'}, c)
+ c[6] = 'mn'
+ self.assertEqual({4: 'ghij', 5: 'kl', 6: 'mn'}, c)
diff --git a/bzrlib/tests/test_filter_tree.py b/bzrlib/tests/test_filter_tree.py
new file mode 100644
index 0000000..ae7d3a6
--- /dev/null
+++ b/bzrlib/tests/test_filter_tree.py
@@ -0,0 +1,68 @@
+# Copyright (C) 2011 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""Tests for ContentFilterTree"""
+
+import tarfile
+import zipfile
+
+from bzrlib import (
+ export,
+ filter_tree,
+ tests,
+ )
+from bzrlib.tests import (
+ fixtures,
+ )
+from bzrlib.tests.test_filters import _stack_1
+
+
+class TestFilterTree(tests.TestCaseWithTransport):
+
+ def make_tree(self):
+ self.underlying_tree = fixtures.make_branch_and_populated_tree(
+ self)
+ def stack_callback(path):
+ return _stack_1
+ self.filter_tree = filter_tree.ContentFilterTree(
+ self.underlying_tree, stack_callback)
+ return self.filter_tree
+
+ def test_get_file_text(self):
+ self.make_tree()
+ self.assertEquals(
+ self.underlying_tree.get_file_text('hello-id'),
+ 'hello world')
+ self.assertEquals(
+ self.filter_tree.get_file_text('hello-id'),
+ 'HELLO WORLD')
+
+ def test_tar_export_content_filter_tree(self):
+ # TODO: this could usefully be run generically across all exporters.
+ self.make_tree()
+ export.export(self.filter_tree, "out.tgz")
+ ball = tarfile.open("out.tgz", "r:gz")
+ self.assertEquals(
+ 'HELLO WORLD',
+ ball.extractfile('out/hello').read())
+
+ def test_zip_export_content_filter_tree(self):
+ self.make_tree()
+ export.export(self.filter_tree, 'out.zip')
+ zipf = zipfile.ZipFile('out.zip', 'r')
+ self.assertEquals(
+ 'HELLO WORLD',
+ zipf.read('out/hello'))
diff --git a/bzrlib/tests/test_filters.py b/bzrlib/tests/test_filters.py
new file mode 100644
index 0000000..f055e52
--- /dev/null
+++ b/bzrlib/tests/test_filters.py
@@ -0,0 +1,157 @@
+# Copyright (C) 2008 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+import StringIO
+from bzrlib import errors, filters
+from bzrlib.filters import (
+ ContentFilter,
+ ContentFilterContext,
+ filtered_input_file,
+ filtered_output_bytes,
+ _get_filter_stack_for,
+ _get_registered_names,
+ internal_size_sha_file_byname,
+ )
+from bzrlib.osutils import sha_string
+from bzrlib.tests import TestCase, TestCaseInTempDir
+
+
+# sample filter stacks
+def _swapcase(chunks, context=None):
+ return [s.swapcase() for s in chunks]
+def _addjunk(chunks):
+ return ['junk\n'] + [s for s in chunks]
+def _deljunk(chunks, context):
+ return [s for s in chunks[1:]]
+_stack_1 = [
+ ContentFilter(_swapcase, _swapcase),
+ ]
+_stack_2 = [
+ ContentFilter(_swapcase, _swapcase),
+ ContentFilter(_addjunk, _deljunk),
+ ]
+
+# sample data
+_sample_external = ['Hello\n', 'World\n']
+_internal_1 = ['hELLO\n', 'wORLD\n']
+_internal_2 = ['junk\n', 'hELLO\n', 'wORLD\n']
+
+
+class TestContentFilterContext(TestCase):
+
+ def test_empty_filter_context(self):
+ ctx = ContentFilterContext()
+ self.assertEqual(None, ctx.relpath())
+
+ def test_filter_context_with_path(self):
+ ctx = ContentFilterContext('foo/bar')
+ self.assertEquals('foo/bar', ctx.relpath())
+
+
+class TestFilteredInput(TestCase):
+
+ def test_filtered_input_file(self):
+ # test an empty stack returns the same result
+ external = ''.join(_sample_external)
+ f = StringIO.StringIO(external)
+ self.assertEqual(external, filtered_input_file(f, None).read())
+ # test a single item filter stack
+ f = StringIO.StringIO(external)
+ expected = ''.join(_internal_1)
+ self.assertEqual(expected, filtered_input_file(f, _stack_1).read())
+ # test a multi item filter stack
+ f = StringIO.StringIO(external)
+ expected = ''.join(_internal_2)
+ self.assertEqual(expected, filtered_input_file(f, _stack_2).read())
+
+
+class TestFilteredOutput(TestCase):
+
+ def test_filtered_output_bytes(self):
+ # test an empty stack returns the same result
+ self.assertEqual(_sample_external, list(filtered_output_bytes(
+ _sample_external, None)))
+ # test a single item filter stack
+ self.assertEqual(_sample_external, list(filtered_output_bytes(
+ _internal_1, _stack_1)))
+ # test a multi item filter stack
+ self.assertEqual(_sample_external, list(filtered_output_bytes(
+ _internal_2, _stack_2)))
+
+
+class TestFilteredSha(TestCaseInTempDir):
+
+ def test_filtered_size_sha(self):
+ # check that the size and sha matches what's expected
+ text = 'Foo Bar Baz\n'
+ a = open('a', 'wb')
+ a.write(text)
+ a.close()
+ post_filtered_content = ''.join(_swapcase([text], None))
+ expected_len = len(post_filtered_content)
+ expected_sha = sha_string(post_filtered_content)
+ self.assertEqual((expected_len,expected_sha),
+ internal_size_sha_file_byname('a',
+ [ContentFilter(_swapcase, _swapcase)]))
+
+
+class TestFilterStackMaps(TestCase):
+
+ def _register_map(self, pref, stk1, stk2):
+ def stk_lookup(key):
+ return {'v1': stk1, 'v2': stk2}.get(key)
+ filters.filter_stacks_registry.register(pref, stk_lookup)
+
+ def test_filter_stack_maps(self):
+ # Save the current registry
+ original_registry = filters._reset_registry()
+ self.addCleanup(filters._reset_registry, original_registry)
+ # Test registration
+ a_stack = [ContentFilter('b', 'c')]
+ z_stack = [ContentFilter('y', 'x'), ContentFilter('w', 'v')]
+ self._register_map('foo', a_stack, z_stack)
+ self.assertEqual(['foo'], _get_registered_names())
+ self._register_map('bar', z_stack, a_stack)
+ self.assertEqual(['bar', 'foo'], _get_registered_names())
+ # Test re-registration raises an error
+ self.assertRaises(KeyError, self._register_map,
+ 'foo', [], [])
+
+ def test_get_filter_stack_for(self):
+ # Save the current registry
+ original_registry = filters._reset_registry()
+ self.addCleanup(filters._reset_registry, original_registry)
+ # Test filter stack lookup
+ a_stack = [ContentFilter('b', 'c')]
+ d_stack = [ContentFilter('d', 'D')]
+ z_stack = [ContentFilter('y', 'x'), ContentFilter('w', 'v')]
+ self._register_map('foo', a_stack, z_stack)
+ self._register_map('bar', d_stack, z_stack)
+ prefs = (('foo','v1'),)
+ self.assertEqual(a_stack, _get_filter_stack_for(prefs))
+ prefs = (('foo','v2'),)
+ self.assertEqual(z_stack, _get_filter_stack_for(prefs))
+ prefs = (('foo','v1'), ('bar','v1'))
+ self.assertEqual(a_stack + d_stack, _get_filter_stack_for(prefs))
+ # Test an unknown preference
+ prefs = (('baz','v1'),)
+ self.assertEqual([], _get_filter_stack_for(prefs))
+ # Test an unknown value
+ prefs = (('foo','v3'),)
+ self.assertEqual([], _get_filter_stack_for(prefs))
+ # Test a value of None is skipped
+ prefs = (('foo',None), ('bar', 'v1'))
+ self.assertEqual(d_stack, _get_filter_stack_for(prefs))
diff --git a/bzrlib/tests/test_fixtures.py b/bzrlib/tests/test_fixtures.py
new file mode 100644
index 0000000..778deaa
--- /dev/null
+++ b/bzrlib/tests/test_fixtures.py
@@ -0,0 +1,28 @@
+# Copyright (C) 2010 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""Tests for test fixtures"""
+
+import codecs
+
+from bzrlib import (
+ tests,
+ )
+from bzrlib.tests import (
+ fixtures,
+ )
+
+
diff --git a/bzrlib/tests/test_foreign.py b/bzrlib/tests/test_foreign.py
new file mode 100644
index 0000000..e5ae348
--- /dev/null
+++ b/bzrlib/tests/test_foreign.py
@@ -0,0 +1,495 @@
+# Copyright (C) 2008-2011 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+
+"""Tests for foreign VCS utility code."""
+
+
+from bzrlib import (
+ branch,
+ bzrdir,
+ controldir,
+ errors,
+ foreign,
+ lockable_files,
+ lockdir,
+ repository,
+ revision,
+ tests,
+ trace,
+ vf_repository,
+ )
+
+from bzrlib.repofmt import groupcompress_repo
+
+# This is the dummy foreign revision control system, used
+# mainly here in the testsuite to test the foreign VCS infrastructure.
+# It is basically standard Bazaar with some minor modifications to
+# make it "foreign".
+#
+# It has the following differences to "regular" Bazaar:
+# - The control directory is named ".dummy", not ".bzr".
+# - The revision ids are tuples, not strings.
+# - Doesn't support more than one parent natively
+
+
+class DummyForeignVcsMapping(foreign.VcsMapping):
+ """A simple mapping for the dummy Foreign VCS, for use with testing."""
+
+ def __eq__(self, other):
+ return type(self) == type(other)
+
+ def revision_id_bzr_to_foreign(self, bzr_revid):
+ return tuple(bzr_revid[len("dummy-v1:"):].split("-")), self
+
+ def revision_id_foreign_to_bzr(self, foreign_revid):
+ return "dummy-v1:%s-%s-%s" % foreign_revid
+
+
+class DummyForeignVcsMappingRegistry(foreign.VcsMappingRegistry):
+
+ def revision_id_bzr_to_foreign(self, revid):
+ if not revid.startswith("dummy-"):
+ raise errors.InvalidRevisionId(revid, None)
+ mapping_version = revid[len("dummy-"):len("dummy-vx")]
+ mapping = self.get(mapping_version)
+ return mapping.revision_id_bzr_to_foreign(revid)
+
+
+class DummyForeignVcs(foreign.ForeignVcs):
+ """A dummy Foreign VCS, for use with testing.
+
+ It has revision ids that are a tuple with three strings.
+ """
+
+ def __init__(self):
+ self.mapping_registry = DummyForeignVcsMappingRegistry()
+ self.mapping_registry.register("v1", DummyForeignVcsMapping(self),
+ "Version 1")
+ self.abbreviation = "dummy"
+
+ def show_foreign_revid(self, foreign_revid):
+ return { "dummy ding": "%s/%s\\%s" % foreign_revid }
+
+ def serialize_foreign_revid(self, foreign_revid):
+ return "%s|%s|%s" % foreign_revid
+
+
+class DummyForeignVcsBranch(branch.BzrBranch6,foreign.ForeignBranch):
+ """A Dummy VCS Branch."""
+
+ @property
+ def user_transport(self):
+ return self.bzrdir.user_transport
+
+ def __init__(self, _format, _control_files, a_bzrdir, *args, **kwargs):
+ self._format = _format
+ self._base = a_bzrdir.transport.base
+ self._ignore_fallbacks = False
+ self.bzrdir = a_bzrdir
+ foreign.ForeignBranch.__init__(self,
+ DummyForeignVcsMapping(DummyForeignVcs()))
+ branch.BzrBranch6.__init__(self, _format, _control_files, a_bzrdir,
+ *args, **kwargs)
+
+ def _get_checkout_format(self, lightweight=False):
+ """Return the most suitable metadir for a checkout of this branch.
+ Weaves are used if this branch's repository uses weaves.
+ """
+ return self.bzrdir.checkout_metadir()
+
+ def import_last_revision_info_and_tags(self, source, revno, revid,
+ lossy=False):
+ interbranch = InterToDummyVcsBranch(source, self)
+ result = interbranch.push(stop_revision=revid, lossy=True)
+ if lossy:
+ revid = result.revidmap[revid]
+ return (revno, revid)
+
+
+class DummyForeignCommitBuilder(vf_repository.VersionedFileRootCommitBuilder):
+
+ def _generate_revision_if_needed(self):
+ mapping = DummyForeignVcsMapping(DummyForeignVcs())
+ if self._lossy:
+ self._new_revision_id = mapping.revision_id_foreign_to_bzr(
+ (str(self._timestamp), str(self._timezone), "UNKNOWN"))
+ self.random_revid = False
+ elif self._new_revision_id is not None:
+ self.random_revid = False
+ else:
+ self._new_revision_id = self._gen_revision_id()
+ self.random_revid = True
+
+
+class DummyForeignVcsRepository(groupcompress_repo.CHKInventoryRepository,
+ foreign.ForeignRepository):
+ """Dummy foreign vcs repository."""
+
+
+class DummyForeignVcsRepositoryFormat(groupcompress_repo.RepositoryFormat2a):
+
+ repository_class = DummyForeignVcsRepository
+ _commit_builder_class = DummyForeignCommitBuilder
+
+ @classmethod
+ def get_format_string(cls):
+ return "Dummy Foreign Vcs Repository"
+
+ def get_format_description(self):
+ return "Dummy Foreign Vcs Repository"
+
+
+def branch_history(graph, revid):
+ ret = list(graph.iter_lefthand_ancestry(revid,
+ (revision.NULL_REVISION,)))
+ ret.reverse()
+ return ret
+
+
+class InterToDummyVcsBranch(branch.GenericInterBranch):
+
+ @staticmethod
+ def is_compatible(source, target):
+ return isinstance(target, DummyForeignVcsBranch)
+
+ def push(self, overwrite=False, stop_revision=None, lossy=False):
+ if not lossy:
+ raise errors.NoRoundtrippingSupport(self.source, self.target)
+ result = branch.BranchPushResult()
+ result.source_branch = self.source
+ result.target_branch = self.target
+ result.old_revno, result.old_revid = self.target.last_revision_info()
+ self.source.lock_read()
+ try:
+ graph = self.source.repository.get_graph()
+ # This just handles simple cases, but that's good enough for tests
+ my_history = branch_history(self.target.repository.get_graph(),
+ result.old_revid)
+ if stop_revision is None:
+ stop_revision = self.source.last_revision()
+ their_history = branch_history(graph, stop_revision)
+ if their_history[:min(len(my_history), len(their_history))] != my_history:
+ raise errors.DivergedBranches(self.target, self.source)
+ todo = their_history[len(my_history):]
+ revidmap = {}
+ for revid in todo:
+ rev = self.source.repository.get_revision(revid)
+ tree = self.source.repository.revision_tree(revid)
+ def get_file_with_stat(file_id, path=None):
+ return (tree.get_file(file_id), None)
+ tree.get_file_with_stat = get_file_with_stat
+ new_revid = self.target.mapping.revision_id_foreign_to_bzr(
+ (str(rev.timestamp), str(rev.timezone),
+ str(self.target.revno())))
+ parent_revno, parent_revid= self.target.last_revision_info()
+ if parent_revid == revision.NULL_REVISION:
+ parent_revids = []
+ else:
+ parent_revids = [parent_revid]
+ builder = self.target.get_commit_builder(parent_revids,
+ self.target.get_config_stack(), rev.timestamp,
+ rev.timezone, rev.committer, rev.properties,
+ new_revid)
+ try:
+ parent_tree = self.target.repository.revision_tree(
+ parent_revid)
+ for path, ie in tree.iter_entries_by_dir():
+ new_ie = ie.copy()
+ new_ie.revision = None
+ builder.record_entry_contents(new_ie,
+ [parent_tree.root_inventory],
+ path, tree,
+ (ie.kind, ie.text_size, ie.executable, ie.text_sha1))
+ builder.finish_inventory()
+ except:
+ builder.abort()
+ raise
+ revidmap[revid] = builder.commit(rev.message)
+ self.target.set_last_revision_info(parent_revno+1,
+ revidmap[revid])
+ trace.mutter('lossily pushed revision %s -> %s',
+ revid, revidmap[revid])
+ finally:
+ self.source.unlock()
+ result.new_revno, result.new_revid = self.target.last_revision_info()
+ result.revidmap = revidmap
+ return result
+
+
+class DummyForeignVcsBranchFormat(branch.BzrBranchFormat6):
+
+ @classmethod
+ def get_format_string(cls):
+ return "Branch for Testing"
+
+ @property
+ def _matchingbzrdir(self):
+ return DummyForeignVcsDirFormat()
+
+ def open(self, a_bzrdir, name=None, _found=False, ignore_fallbacks=False,
+ found_repository=None):
+ if name is None:
+ name = a_bzrdir._get_selected_branch()
+ if not _found:
+ raise NotImplementedError
+ try:
+ transport = a_bzrdir.get_branch_transport(None, name=name)
+ control_files = lockable_files.LockableFiles(transport, 'lock',
+ lockdir.LockDir)
+ if found_repository is None:
+ found_repository = a_bzrdir.find_repository()
+ return DummyForeignVcsBranch(_format=self,
+ _control_files=control_files,
+ a_bzrdir=a_bzrdir,
+ _repository=found_repository,
+ name=name)
+ except errors.NoSuchFile:
+ raise errors.NotBranchError(path=transport.base)
+
+
+class DummyForeignVcsDirFormat(bzrdir.BzrDirMetaFormat1):
+ """BzrDirFormat for the dummy foreign VCS."""
+
+ @classmethod
+ def get_format_string(cls):
+ return "A Dummy VCS Dir"
+
+ @classmethod
+ def get_format_description(cls):
+ return "A Dummy VCS Dir"
+
+ @classmethod
+ def is_supported(cls):
+ return True
+
+ def get_branch_format(self):
+ return DummyForeignVcsBranchFormat()
+
+ @property
+ def repository_format(self):
+ return DummyForeignVcsRepositoryFormat()
+
+ def initialize_on_transport(self, transport):
+ """Initialize a new bzrdir in the base directory of a Transport."""
+ # Since we don't have a .bzr directory, inherit the
+ # mode from the root directory
+ temp_control = lockable_files.LockableFiles(transport,
+ '', lockable_files.TransportLock)
+ temp_control._transport.mkdir('.dummy',
+ # FIXME: RBC 20060121 don't peek under
+ # the covers
+ mode=temp_control._dir_mode)
+ del temp_control
+ bzrdir_transport = transport.clone('.dummy')
+ # NB: no need to escape relative paths that are url safe.
+ control_files = lockable_files.LockableFiles(bzrdir_transport,
+ self._lock_file_name, self._lock_class)
+ control_files.create_lock()
+ return self.open(transport, _found=True)
+
+ def _open(self, transport):
+ return DummyForeignVcsDir(transport, self)
+
+
+class DummyForeignVcsDir(bzrdir.BzrDirMeta1):
+
+ def __init__(self, _transport, _format):
+ self._format = _format
+ self.transport = _transport.clone('.dummy')
+ self.root_transport = _transport
+ self._mode_check_done = False
+ self._control_files = lockable_files.LockableFiles(self.transport,
+ "lock", lockable_files.TransportLock)
+
+ def create_workingtree(self):
+ # dirstate requires a ".bzr" entry to exist
+ self.root_transport.put_bytes(".bzr", "foo")
+ return super(DummyForeignVcsDir, self).create_workingtree()
+
+ def open_branch(self, name=None, unsupported=False, ignore_fallbacks=True,
+ possible_transports=None):
+ if name is None:
+ name = self._get_selected_branch()
+ if name != "":
+ raise errors.NoColocatedBranchSupport(self)
+ return self._format.get_branch_format().open(self, _found=True)
+
+ def cloning_metadir(self, stacked=False):
+ """Produce a metadir suitable for cloning with."""
+ return controldir.format_registry.make_bzrdir("default")
+
+ def checkout_metadir(self):
+ return self.cloning_metadir()
+
+ def sprout(self, url, revision_id=None, force_new_repo=False,
+ recurse='down', possible_transports=None,
+ accelerator_tree=None, hardlink=False, stacked=False,
+ source_branch=None):
+ # dirstate doesn't cope with accelerator_trees well
+ # that have a different control dir
+ return super(DummyForeignVcsDir, self).sprout(url=url,
+ revision_id=revision_id, force_new_repo=force_new_repo,
+ recurse=recurse, possible_transports=possible_transports,
+ hardlink=hardlink, stacked=stacked, source_branch=source_branch)
+
+
+def register_dummy_foreign_for_test(testcase):
+ controldir.ControlDirFormat.register_prober(DummyForeignProber)
+ testcase.addCleanup(controldir.ControlDirFormat.unregister_prober,
+ DummyForeignProber)
+ repository.format_registry.register(DummyForeignVcsRepositoryFormat())
+ testcase.addCleanup(repository.format_registry.remove,
+ DummyForeignVcsRepositoryFormat())
+ branch.format_registry.register(DummyForeignVcsBranchFormat())
+ testcase.addCleanup(branch.format_registry.remove,
+ DummyForeignVcsBranchFormat())
+ # We need to register the optimiser to make the dummy appears really
+ # different from a regular bzr repository.
+ branch.InterBranch.register_optimiser(InterToDummyVcsBranch)
+ testcase.addCleanup(branch.InterBranch.unregister_optimiser,
+ InterToDummyVcsBranch)
+
+
+class DummyForeignProber(controldir.Prober):
+
+ @classmethod
+ def probe_transport(klass, transport):
+ """Return the .bzrdir style format present in a directory."""
+ if not transport.has('.dummy'):
+ raise errors.NotBranchError(path=transport.base)
+ return DummyForeignVcsDirFormat()
+
+ @classmethod
+ def known_formats(cls):
+ return set([DummyForeignVcsDirFormat()])
+
+
+class ForeignVcsRegistryTests(tests.TestCase):
+ """Tests for the ForeignVcsRegistry class."""
+
+ def test_parse_revision_id_no_dash(self):
+ reg = foreign.ForeignVcsRegistry()
+ self.assertRaises(errors.InvalidRevisionId,
+ reg.parse_revision_id, "invalid")
+
+ def test_parse_revision_id_unknown_mapping(self):
+ reg = foreign.ForeignVcsRegistry()
+ self.assertRaises(errors.InvalidRevisionId,
+ reg.parse_revision_id, "unknown-foreignrevid")
+
+ def test_parse_revision_id(self):
+ reg = foreign.ForeignVcsRegistry()
+ vcs = DummyForeignVcs()
+ reg.register("dummy", vcs, "Dummy VCS")
+ self.assertEquals((
+ ("some", "foreign", "revid"), DummyForeignVcsMapping(vcs)),
+ reg.parse_revision_id("dummy-v1:some-foreign-revid"))
+
+
+class ForeignRevisionTests(tests.TestCase):
+ """Tests for the ForeignRevision class."""
+
+ def test_create(self):
+ mapp = DummyForeignVcsMapping(DummyForeignVcs())
+ rev = foreign.ForeignRevision(("a", "foreign", "revid"),
+ mapp, "roundtripped-revid")
+ self.assertEquals("", rev.inventory_sha1)
+ self.assertEquals(("a", "foreign", "revid"), rev.foreign_revid)
+ self.assertEquals(mapp, rev.mapping)
+
+
+class WorkingTreeFileUpdateTests(tests.TestCaseWithTransport):
+ """Tests for update_workingtree_fileids()."""
+
+ def test_update_workingtree(self):
+ wt = self.make_branch_and_tree('br1')
+ self.build_tree_contents([('br1/bla', 'original contents\n')])
+ wt.add('bla', 'bla-a')
+ wt.commit('bla-a')
+ root_id = wt.get_root_id()
+ target = wt.bzrdir.sprout('br2').open_workingtree()
+ target.unversion(['bla-a'])
+ target.add('bla', 'bla-b')
+ target.commit('bla-b')
+ target_basis = target.basis_tree()
+ target_basis.lock_read()
+ self.addCleanup(target_basis.unlock)
+ foreign.update_workingtree_fileids(wt, target_basis)
+ wt.lock_read()
+ try:
+ self.assertEquals(set([root_id, "bla-b"]), set(wt.all_file_ids()))
+ finally:
+ wt.unlock()
+
+
+class DummyForeignVcsTests(tests.TestCaseWithTransport):
+ """Very basic test for DummyForeignVcs."""
+
+ def setUp(self):
+ super(DummyForeignVcsTests, self).setUp()
+ register_dummy_foreign_for_test(self)
+
+ def test_create(self):
+ """Test we can create dummies."""
+ self.make_branch_and_tree("d", format=DummyForeignVcsDirFormat())
+ dir = controldir.ControlDir.open("d")
+ self.assertEquals("A Dummy VCS Dir", dir._format.get_format_string())
+ dir.open_repository()
+ dir.open_branch()
+ dir.open_workingtree()
+
+ def test_sprout(self):
+ """Test we can clone dummies and that the format is not preserved."""
+ self.make_branch_and_tree("d", format=DummyForeignVcsDirFormat())
+ dir = controldir.ControlDir.open("d")
+ newdir = dir.sprout("e")
+ self.assertNotEquals("A Dummy VCS Dir",
+ newdir._format.get_format_string())
+
+ def test_push_not_supported(self):
+ source_tree = self.make_branch_and_tree("source")
+ target_tree = self.make_branch_and_tree("target",
+ format=DummyForeignVcsDirFormat())
+ self.assertRaises(errors.NoRoundtrippingSupport,
+ source_tree.branch.push, target_tree.branch)
+
+ def test_lossy_push_empty(self):
+ source_tree = self.make_branch_and_tree("source")
+ target_tree = self.make_branch_and_tree("target",
+ format=DummyForeignVcsDirFormat())
+ pushresult = source_tree.branch.push(target_tree.branch, lossy=True)
+ self.assertEquals(revision.NULL_REVISION, pushresult.old_revid)
+ self.assertEquals(revision.NULL_REVISION, pushresult.new_revid)
+ self.assertEquals({}, pushresult.revidmap)
+
+ def test_lossy_push_simple(self):
+ source_tree = self.make_branch_and_tree("source")
+ self.build_tree(['source/a', 'source/b'])
+ source_tree.add(['a', 'b'])
+ revid1 = source_tree.commit("msg")
+ target_tree = self.make_branch_and_tree("target",
+ format=DummyForeignVcsDirFormat())
+ target_tree.branch.lock_write()
+ try:
+ pushresult = source_tree.branch.push(
+ target_tree.branch, lossy=True)
+ finally:
+ target_tree.branch.unlock()
+ self.assertEquals(revision.NULL_REVISION, pushresult.old_revid)
+ self.assertEquals({revid1:target_tree.branch.last_revision()},
+ pushresult.revidmap)
+ self.assertEquals(pushresult.revidmap[revid1], pushresult.new_revid)
diff --git a/bzrlib/tests/test_ftp_transport.py b/bzrlib/tests/test_ftp_transport.py
new file mode 100644
index 0000000..d1ff64d
--- /dev/null
+++ b/bzrlib/tests/test_ftp_transport.py
@@ -0,0 +1,151 @@
+# Copyright (C) 2006, 2007, 2009, 2010, 2011 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+import ftplib
+import getpass
+
+from bzrlib import (
+ config,
+ errors,
+ tests,
+ transport,
+ ui,
+ urlutils,
+ )
+
+from bzrlib.transport import ftp
+
+from bzrlib.tests import ftp_server
+
+
+class TestCaseWithFTPServer(tests.TestCaseWithTransport):
+
+ _test_needs_features = [ftp_server.FTPServerFeature]
+
+ def setUp(self):
+ self.transport_server = ftp_server.FTPTestServer
+ super(TestCaseWithFTPServer, self).setUp()
+
+
+class TestCaseAFTP(tests.TestCaseWithTransport):
+ """Test aftp transport."""
+
+ def test_aftp_degrade(self):
+ t = transport.get_transport_from_url('aftp://host/path')
+ self.assertTrue(t.is_active)
+ parent = t.clone('..')
+ self.assertTrue(parent.is_active)
+
+ self.assertEqual('aftp://host/path', t.abspath(''))
+
+
+class TestFTPTestServer(TestCaseWithFTPServer):
+
+ def test_basic_exists(self):
+ url = self.get_url()
+ self.assertStartsWith(url, 'ftp://')
+
+ t = self.get_transport()
+ t.put_bytes('foo', 'test bytes\n')
+ self.assertEqual('test bytes\n', t.get_bytes('foo'))
+
+ def test__reconnect(self):
+ t = self.get_transport()
+ t.put_bytes('foo', 'test bytes\n')
+ self.assertEqual('test bytes\n', t.get_bytes('foo'))
+ t._reconnect()
+ t.put_bytes('foo', 'test more bytes\n')
+ self.assertEqual('test more bytes\n', t.get_bytes('foo'))
+
+
+class TestFTPTestServerUI(TestCaseWithFTPServer):
+
+ def setUp(self):
+ super(TestFTPTestServerUI, self).setUp()
+ self.user = 'joe'
+ self.password = 'secret'
+ self.get_server().add_user(self.user, self.password)
+
+ def get_url(self, relpath=None):
+ """Overrides get_url to inject our user."""
+ base = super(TestFTPTestServerUI, self).get_url(relpath)
+ parsed_url = transport.ConnectedTransport._split_url(base)
+ new_url = parsed_url.clone()
+ new_url.user = self.user
+ new_url.quoted_user = urlutils.quote(self.user)
+ new_url.password = self.password
+ new_url.quoted_password = urlutils.quote(self.password)
+ return str(new_url)
+
+ def test_no_prompt_for_username(self):
+ """ensure getpass.getuser() is used if there's no username in the
+ configuration.""",
+ self.get_server().add_user(getpass.getuser(), self.password)
+ t = self.get_transport()
+ ui.ui_factory = ui.CannedInputUIFactory([self.password])
+ # Issue a request to the server to connect
+ t.put_bytes('foo', 'test bytes\n')
+ self.assertEqual('test bytes\n', t.get_bytes('foo'))
+ # Only the password should've been read
+ ui.ui_factory.assert_all_input_consumed()
+
+ def test_prompt_for_password(self):
+ t = self.get_transport()
+ ui.ui_factory = ui.CannedInputUIFactory([self.password])
+ # Issue a request to the server to connect
+ t.has('whatever/not/existing')
+ # stdin should be empty (the provided password have been consumed)
+ ui.ui_factory.assert_all_input_consumed()
+
+ def test_no_prompt_for_password_when_using_auth_config(self):
+ t = self.get_transport()
+ ui.ui_factory = ui.CannedInputUIFactory([])
+ # Create a config file with the right password
+ conf = config.AuthenticationConfig()
+ conf._get_config().update({'ftptest': {'scheme': 'ftp',
+ 'user': self.user,
+ 'password': self.password}})
+ conf._save()
+ # Issue a request to the server to connect
+ t.put_bytes('foo', 'test bytes\n')
+ self.assertEqual('test bytes\n', t.get_bytes('foo'))
+
+ def test_empty_password(self):
+ # Override the default user/password from setUp
+ self.user = 'jim'
+ self.password = ''
+ self.get_server().add_user(self.user, self.password)
+ t = self.get_transport()
+ ui.ui_factory = ui.CannedInputUIFactory([self.password])
+ # Issue a request to the server to connect
+ t.has('whatever/not/existing')
+ # stdin should be empty (the provided password have been consumed),
+ # even if the password is empty, it's followed by a newline.
+ ui.ui_factory.assert_all_input_consumed()
+
+
+class TestFTPErrorTranslation(tests.TestCase):
+
+ def test_translate_directory_not_empty(self):
+ # https://bugs.launchpad.net/bugs/528722
+
+ t = ftp.FtpTransport("ftp://none/")
+
+ try:
+ raise ftplib.error_temp("Rename/move failure: Directory not empty")
+ except Exception, e:
+ e = self.assertRaises(errors.DirectoryNotEmpty,
+ t._translate_ftp_error, e, "/path")
diff --git a/bzrlib/tests/test_generate_docs.py b/bzrlib/tests/test_generate_docs.py
new file mode 100644
index 0000000..e61ad9c
--- /dev/null
+++ b/bzrlib/tests/test_generate_docs.py
@@ -0,0 +1,53 @@
+# Copyright (C) 2007, 2009, 2011 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""Tests for generating docs (man pages).
+
+This test checks that generation will be successful
+and produce non-empty output.
+"""
+
+from cStringIO import StringIO
+
+import bzrlib.commands
+from bzrlib.tests import TestCase
+
+
+class Options:
+ """Simply container"""
+ pass
+
+
+class TestGenerateDocs(TestCase):
+
+ def setUp(self):
+ TestCase.setUp(self)
+ self.sio = StringIO()
+ self.options = Options()
+ self.options.bzr_name = 'bzr'
+ bzrlib.commands.install_bzr_command_hooks()
+
+ def test_man_page(self):
+ from bzrlib.doc_generate import autodoc_man
+
+ autodoc_man.infogen(self.options, self.sio)
+ self.assertNotEqual('', self.sio.getvalue())
+
+ def test_rstx_man(self):
+ from bzrlib.doc_generate import autodoc_rstx
+
+ autodoc_rstx.infogen(self.options, self.sio)
+ self.assertNotEqual('', self.sio.getvalue())
diff --git a/bzrlib/tests/test_generate_ids.py b/bzrlib/tests/test_generate_ids.py
new file mode 100644
index 0000000..919cf44
--- /dev/null
+++ b/bzrlib/tests/test_generate_ids.py
@@ -0,0 +1,157 @@
+# Copyright (C) 2006, 2007, 2009, 2011 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""Tests for bzrlib/generate_ids.py"""
+
+from bzrlib import (
+ generate_ids,
+ tests,
+ )
+
+
+class TestFileIds(tests.TestCase):
+ """Test functions which generate file ids"""
+
+ def assertGenFileId(self, regex, filename):
+ """gen_file_id should create a file id matching the regex.
+
+ The file id should be ascii, and should be an 8-bit string
+ """
+ file_id = generate_ids.gen_file_id(filename)
+ self.assertContainsRe(file_id, '^'+regex+'$')
+ # It should be a utf8 file_id, not a unicode one
+ self.assertIsInstance(file_id, str)
+ # gen_file_id should always return ascii file ids.
+ file_id.decode('ascii')
+
+ def test_gen_file_id(self):
+ gen_file_id = generate_ids.gen_file_id
+
+ # We try to use the filename if possible
+ self.assertStartsWith(gen_file_id('bar'), 'bar-')
+
+ # but we squash capitalization, and remove non word characters
+ self.assertStartsWith(gen_file_id('Mwoo oof\t m'), 'mwoooofm-')
+
+ # We also remove leading '.' characters to prevent hidden file-ids
+ self.assertStartsWith(gen_file_id('..gam.py'), 'gam.py-')
+ self.assertStartsWith(gen_file_id('..Mwoo oof\t m'), 'mwoooofm-')
+
+ # we remove unicode characters, and still don't end up with a
+ # hidden file id
+ self.assertStartsWith(gen_file_id(u'\xe5\xb5.txt'), 'txt-')
+
+ # Our current method of generating unique ids adds 33 characters
+ # plus an serial number (log10(N) characters)
+ # to the end of the filename. We now restrict the filename portion to
+ # be <= 20 characters, so the maximum length should now be approx < 60
+
+ # Test both case squashing and length restriction
+ fid = gen_file_id('A'*50 + '.txt')
+ self.assertStartsWith(fid, 'a'*20 + '-')
+ self.assertTrue(len(fid) < 60)
+
+ # restricting length happens after the other actions, so
+ # we preserve as much as possible
+ fid = gen_file_id('\xe5\xb5..aBcd\tefGhijKLMnop\tqrstuvwxyz')
+ self.assertStartsWith(fid, 'abcdefghijklmnopqrst-')
+ self.assertTrue(len(fid) < 60)
+
+ def test_file_ids_are_ascii(self):
+ tail = r'-\d{14}-[a-z0-9]{16}-\d+'
+ self.assertGenFileId('foo' + tail, 'foo')
+ self.assertGenFileId('foo' + tail, u'foo')
+ self.assertGenFileId('bar' + tail, u'bar')
+ self.assertGenFileId('br' + tail, u'b\xe5r')
+
+ def test__next_id_suffix_sets_suffix(self):
+ generate_ids._gen_file_id_suffix = None
+ generate_ids._next_id_suffix()
+ self.assertNotEqual(None, generate_ids._gen_file_id_suffix)
+
+ def test__next_id_suffix_increments(self):
+ generate_ids._gen_file_id_suffix = "foo-"
+ generate_ids._gen_file_id_serial = 1
+ try:
+ self.assertEqual("foo-2", generate_ids._next_id_suffix())
+ self.assertEqual("foo-3", generate_ids._next_id_suffix())
+ self.assertEqual("foo-4", generate_ids._next_id_suffix())
+ self.assertEqual("foo-5", generate_ids._next_id_suffix())
+ self.assertEqual("foo-6", generate_ids._next_id_suffix())
+ self.assertEqual("foo-7", generate_ids._next_id_suffix())
+ self.assertEqual("foo-8", generate_ids._next_id_suffix())
+ self.assertEqual("foo-9", generate_ids._next_id_suffix())
+ self.assertEqual("foo-10", generate_ids._next_id_suffix())
+ finally:
+ # Reset so that all future ids generated in the test suite
+ # don't end in 'foo-XXX'
+ generate_ids._gen_file_id_suffix = None
+ generate_ids._gen_file_id_serial = 0
+
+ def test_gen_root_id(self):
+ # Mostly just make sure gen_root_id() exists
+ root_id = generate_ids.gen_root_id()
+ self.assertStartsWith(root_id, 'tree_root-')
+
+
+class TestGenRevisionId(tests.TestCase):
+ """Test generating revision ids"""
+
+ def assertGenRevisionId(self, regex, username, timestamp=None):
+ """gen_revision_id should create a revision id matching the regex"""
+ revision_id = generate_ids.gen_revision_id(username, timestamp)
+ self.assertContainsRe(revision_id, '^'+regex+'$')
+ # It should be a utf8 revision_id, not a unicode one
+ self.assertIsInstance(revision_id, str)
+ # gen_revision_id should always return ascii revision ids.
+ revision_id.decode('ascii')
+
+ def test_timestamp(self):
+ """passing a timestamp should cause it to be used"""
+ self.assertGenRevisionId(r'user@host-\d{14}-[a-z0-9]{16}', 'user@host')
+ self.assertGenRevisionId('user@host-20061102205056-[a-z0-9]{16}',
+ 'user@host', 1162500656.688)
+ self.assertGenRevisionId(r'user@host-20061102205024-[a-z0-9]{16}',
+ 'user@host', 1162500624.000)
+
+ def test_gen_revision_id_email(self):
+ """gen_revision_id uses email address if present"""
+ regex = r'user\+joe_bar@foo-bar\.com-\d{14}-[a-z0-9]{16}'
+ self.assertGenRevisionId(regex,'user+joe_bar@foo-bar.com')
+ self.assertGenRevisionId(regex, '<user+joe_bar@foo-bar.com>')
+ self.assertGenRevisionId(regex, 'Joe Bar <user+joe_bar@foo-bar.com>')
+ self.assertGenRevisionId(regex, 'Joe Bar <user+Joe_Bar@Foo-Bar.com>')
+ self.assertGenRevisionId(regex, u'Joe B\xe5r <user+Joe_Bar@Foo-Bar.com>')
+
+ def test_gen_revision_id_user(self):
+ """If there is no email, fall back to the whole username"""
+ tail = r'-\d{14}-[a-z0-9]{16}'
+ self.assertGenRevisionId('joe_bar' + tail, 'Joe Bar')
+ self.assertGenRevisionId('joebar' + tail, 'joebar')
+ self.assertGenRevisionId('joe_br' + tail, u'Joe B\xe5r')
+ self.assertGenRevisionId(r'joe_br_user\+joe_bar_foo-bar.com' + tail,
+ u'Joe B\xe5r <user+Joe_Bar_Foo-Bar.com>')
+
+ def test_revision_ids_are_ascii(self):
+ """gen_revision_id should always return an ascii revision id."""
+ tail = r'-\d{14}-[a-z0-9]{16}'
+ self.assertGenRevisionId('joe_bar' + tail, 'Joe Bar')
+ self.assertGenRevisionId('joe_bar' + tail, u'Joe Bar')
+ self.assertGenRevisionId('joe@foo' + tail, u'Joe Bar <joe@foo>')
+ # We cheat a little with this one, because email-addresses shouldn't
+ # contain non-ascii characters, but generate_ids should strip them
+ # anyway.
+ self.assertGenRevisionId('joe@f' + tail, u'Joe Bar <joe@f\xb6>')
diff --git a/bzrlib/tests/test_globbing.py b/bzrlib/tests/test_globbing.py
new file mode 100644
index 0000000..ea6e2a2
--- /dev/null
+++ b/bzrlib/tests/test_globbing.py
@@ -0,0 +1,386 @@
+# Copyright (C) 2006-2011 Canonical Ltd
+# -*- coding: utf-8 -*-
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+import re
+
+from bzrlib import errors
+from bzrlib.globbing import (
+ Globster,
+ ExceptionGlobster,
+ _OrderedGlobster,
+ normalize_pattern
+ )
+from bzrlib.tests import (
+ TestCase,
+ )
+
+
+class TestGlobster(TestCase):
+
+ def assertMatch(self, matchset, glob_prefix=None):
+ for glob, positive, negative in matchset:
+ if glob_prefix:
+ glob = glob_prefix + glob
+ globster = Globster([glob])
+ for name in positive:
+ self.assertTrue(globster.match(name), repr(
+ u'name "%s" does not match glob "%s" (re=%s)' %
+ (name, glob, globster._regex_patterns[0][0].pattern)))
+ for name in negative:
+ self.assertFalse(globster.match(name), repr(
+ u'name "%s" does match glob "%s" (re=%s)' %
+ (name, glob, globster._regex_patterns[0][0].pattern)))
+
+ def assertMatchBasenameAndFullpath(self, matchset):
+ # test basename matcher
+ self.assertMatch(matchset)
+ # test fullpath matcher
+ self.assertMatch(matchset, glob_prefix='./')
+
+ def test_char_group_digit(self):
+ self.assertMatchBasenameAndFullpath([
+ # The definition of digit this uses includes arabic digits from
+ # non-latin scripts (arabic, indic, etc.) but neither roman
+ # numerals nor vulgar fractions. Some characters such as
+ # subscript/superscript digits may or may not match depending on
+ # the Python version used, see: <http://bugs.python.org/issue6561>
+ (u'[[:digit:]]',
+ [u'0', u'5', u'\u0663', u'\u06f9', u'\u0f21'],
+ [u'T', u'q', u' ', u'\u8336', u'.']),
+ (u'[^[:digit:]]',
+ [u'T', u'q', u' ', u'\u8336', u'.'],
+ [u'0', u'5', u'\u0663', u'\u06f9', u'\u0f21']),
+ ])
+
+ def test_char_group_space(self):
+ self.assertMatchBasenameAndFullpath([
+ (u'[[:space:]]',
+ [u' ', u'\t', u'\n', u'\xa0', u'\u2000', u'\u2002'],
+ [u'a', u'-', u'\u8336', u'.']),
+ (u'[^[:space:]]',
+ [u'a', u'-', u'\u8336', u'.'],
+ [u' ', u'\t', u'\n', u'\xa0', u'\u2000', u'\u2002']),
+ ])
+
+ def test_char_group_alnum(self):
+ self.assertMatchBasenameAndFullpath([
+ (u'[[:alnum:]]',
+ [u'a', u'Z', u'\u017e', u'\u8336'],
+ [u':', u'-', u'\u25cf', u'.']),
+ (u'[^[:alnum:]]',
+ [u':', u'-', u'\u25cf', u'.'],
+ [u'a']),
+ ])
+
+ def test_char_group_ascii(self):
+ self.assertMatchBasenameAndFullpath([
+ (u'[[:ascii:]]',
+ [u'a', u'Q', u'^', u'.'],
+ [u'\xcc', u'\u8336']),
+ (u'[^[:ascii:]]',
+ [u'\xcc', u'\u8336'],
+ [u'a', u'Q', u'^', u'.']),
+ ])
+
+ def test_char_group_blank(self):
+ self.assertMatchBasenameAndFullpath([
+ (u'[[:blank:]]',
+ [u'\t'],
+ [u'x', u'y', u'z', u'.']),
+ (u'[^[:blank:]]',
+ [u'x', u'y', u'z', u'.'],
+ [u'\t']),
+ ])
+
+ def test_char_group_cntrl(self):
+ self.assertMatchBasenameAndFullpath([
+ (u'[[:cntrl:]]',
+ [u'\b', u'\t', '\x7f'],
+ [u'a', u'Q', u'\u8336', u'.']),
+ (u'[^[:cntrl:]]',
+ [u'a', u'Q', u'\u8336', u'.'],
+ [u'\b', u'\t', '\x7f']),
+ ])
+
+ def test_char_group_range(self):
+ self.assertMatchBasenameAndFullpath([
+ (u'[a-z]',
+ [u'a', u'q', u'f'],
+ [u'A', u'Q', u'F']),
+ (u'[^a-z]',
+ [u'A', u'Q', u'F'],
+ [u'a', u'q', u'f']),
+ (u'[!a-z]foo',
+ [u'Afoo', u'.foo'],
+ [u'afoo', u'ABfoo']),
+ (u'foo[!a-z]bar',
+ [u'fooAbar', u'foo.bar'],
+ [u'foojbar']),
+ (u'[\x20-\x30\u8336]',
+ [u'\040', u'\044', u'\u8336'],
+ [u'\x1f']),
+ (u'[^\x20-\x30\u8336]',
+ [u'\x1f'],
+ [u'\040', u'\044', u'\u8336']),
+ ])
+
+ def test_regex(self):
+ self.assertMatch([
+ (u'RE:(a|b|c+)',
+ [u'a', u'b', u'ccc'],
+ [u'd', u'aa', u'c+', u'-a']),
+ (u'RE:(?:a|b|c+)',
+ [u'a', u'b', u'ccc'],
+ [u'd', u'aa', u'c+', u'-a']),
+ (u'RE:(?P<a>.)(?P=a)',
+ [u'a'],
+ [u'ab', u'aa', u'aaa']),
+ # test we can handle odd numbers of trailing backslashes
+ (u'RE:a\\\\\\',
+ [u'a\\'],
+ [u'a', u'ab', u'aa', u'aaa']),
+ ])
+
+ def test_question_mark(self):
+ self.assertMatch([
+ (u'?foo',
+ [u'xfoo', u'bar/xfoo', u'bar/\u8336foo', u'.foo', u'bar/.foo'],
+ [u'bar/foo', u'foo']),
+ (u'foo?bar',
+ [u'fooxbar', u'foo.bar', u'foo\u8336bar', u'qyzzy/foo.bar'],
+ [u'foo/bar']),
+ (u'foo/?bar',
+ [u'foo/xbar', u'foo/\u8336bar', u'foo/.bar'],
+ [u'foo/bar', u'bar/foo/xbar']),
+ ])
+
+ def test_asterisk(self):
+ self.assertMatch([
+ (u'x*x',
+ [u'xx', u'x.x', u'x\u8336..x', u'\u8336/x.x', u'x.y.x'],
+ [u'x/x', u'bar/x/bar/x', u'bax/abaxab']),
+ (u'foo/*x',
+ [u'foo/x', u'foo/bax', u'foo/a.x', u'foo/.x', u'foo/.q.x'],
+ [u'foo/bar/bax']),
+ (u'*/*x',
+ [u'\u8336/x', u'foo/x', u'foo/bax', u'x/a.x', u'.foo/x',
+ u'\u8336/.x', u'foo/.q.x'],
+ [u'foo/bar/bax']),
+ (u'f*',
+ [u'foo', u'foo.bar'],
+ [u'.foo', u'foo/bar', u'foo/.bar']),
+ (u'*bar',
+ [u'bar', u'foobar', ur'foo\nbar', u'foo.bar', u'foo/bar',
+ u'foo/foobar', u'foo/f.bar', u'.bar', u'foo/.bar'],
+ []),
+ ])
+
+ def test_double_asterisk(self):
+ self.assertMatch([
+ # expected uses of double asterisk
+ (u'foo/**/x',
+ [u'foo/x', u'foo/bar/x'],
+ [u'foox', u'foo/bax', u'foo/.x', u'foo/bar/bax']),
+ (u'**/bar',
+ [u'bar', u'foo/bar'],
+ [u'foobar', u'foo.bar', u'foo/foobar', u'foo/f.bar',
+ u'.bar', u'foo/.bar']),
+ # check that we ignore extra *s, so *** is treated like ** not *.
+ (u'foo/***/x',
+ [u'foo/x', u'foo/bar/x'],
+ [u'foox', u'foo/bax', u'foo/.x', u'foo/bar/bax']),
+ (u'***/bar',
+ [u'bar', u'foo/bar'],
+ [u'foobar', u'foo.bar', u'foo/foobar', u'foo/f.bar',
+ u'.bar', u'foo/.bar']),
+ # the remaining tests check that ** is interpreted as *
+ # unless it is a whole path component
+ (u'x**/x',
+ [u'x\u8336/x', u'x/x'],
+ [u'xx', u'x.x', u'bar/x/bar/x', u'x.y.x', u'x/y/x']),
+ (u'x**x',
+ [u'xx', u'x.x', u'x\u8336..x', u'foo/x.x', u'x.y.x'],
+ [u'bar/x/bar/x', u'xfoo/bar/x', u'x/x', u'bax/abaxab']),
+ (u'foo/**x',
+ [u'foo/x', u'foo/bax', u'foo/a.x', u'foo/.x', u'foo/.q.x'],
+ [u'foo/bar/bax']),
+ (u'f**',
+ [u'foo', u'foo.bar'],
+ [u'.foo', u'foo/bar', u'foo/.bar']),
+ (u'**bar',
+ [u'bar', u'foobar', ur'foo\nbar', u'foo.bar', u'foo/bar',
+ u'foo/foobar', u'foo/f.bar', u'.bar', u'foo/.bar'],
+ []),
+ ])
+
+ def test_leading_dot_slash(self):
+ self.assertMatch([
+ (u'./foo',
+ [u'foo'],
+ [u'\u8336/foo', u'barfoo', u'x/y/foo']),
+ (u'./f*',
+ [u'foo'],
+ [u'foo/bar', u'foo/.bar', u'x/foo/y']),
+ ])
+
+ def test_backslash(self):
+ self.assertMatch([
+ (u'.\\foo',
+ [u'foo'],
+ [u'\u8336/foo', u'barfoo', u'x/y/foo']),
+ (u'.\\f*',
+ [u'foo'],
+ [u'foo/bar', u'foo/.bar', u'x/foo/y']),
+ (u'foo\\**\\x',
+ [u'foo/x', u'foo/bar/x'],
+ [u'foox', u'foo/bax', u'foo/.x', u'foo/bar/bax']),
+ ])
+
+ def test_trailing_slash(self):
+ self.assertMatch([
+ (u'./foo/',
+ [u'foo'],
+ [u'\u8336/foo', u'barfoo', u'x/y/foo']),
+ (u'.\\foo\\',
+ [u'foo'],
+ [u'foo/', u'\u8336/foo', u'barfoo', u'x/y/foo']),
+ ])
+
+ def test_leading_asterisk_dot(self):
+ self.assertMatch([
+ (u'*.x',
+ [u'foo/bar/baz.x', u'\u8336/Q.x', u'foo.y.x', u'.foo.x',
+ u'bar/.foo.x', u'.x',],
+ [u'foo.x.y']),
+ (u'foo/*.bar',
+ [u'foo/b.bar', u'foo/a.b.bar', u'foo/.bar'],
+ [u'foo/bar']),
+ (u'*.~*',
+ [u'foo.py.~1~', u'.foo.py.~1~'],
+ []),
+ ])
+
+ def test_end_anchor(self):
+ self.assertMatch([
+ (u'*.333',
+ [u'foo.333'],
+ [u'foo.3']),
+ (u'*.3',
+ [u'foo.3'],
+ [u'foo.333']),
+ ])
+
+ def test_mixed_globs(self):
+ """tests handling of combinations of path type matches.
+
+ The types being extension, basename and full path.
+ """
+ patterns = [ u'*.foo', u'.*.swp', u'./*.png']
+ globster = Globster(patterns)
+ self.assertEqual(u'*.foo', globster.match('bar.foo'))
+ self.assertEqual(u'./*.png', globster.match('foo.png'))
+ self.assertEqual(None, globster.match('foo/bar.png'))
+ self.assertEqual(u'.*.swp', globster.match('foo/.bar.py.swp'))
+
+ def test_large_globset(self):
+ """tests that the globster can handle a large set of patterns.
+
+ Large is defined as more than supported by python regex groups,
+ i.e. 99.
+ This test assumes the globs are broken into regexs containing 99
+ groups.
+ """
+ patterns = [ u'*.%03d' % i for i in xrange(0,300) ]
+ globster = Globster(patterns)
+ # test the fence posts
+ for x in (0,98,99,197,198,296,297,299):
+ filename = u'foo.%03d' % x
+ self.assertEqual(patterns[x],globster.match(filename))
+ self.assertEqual(None,globster.match('foobar.300'))
+
+ def test_bad_pattern(self):
+ """Ensure that globster handles bad patterns cleanly."""
+ patterns = [u'RE:[', u'/home/foo', u'RE:*.cpp']
+ g = Globster(patterns)
+ e = self.assertRaises(errors.InvalidPattern, g.match, 'filename')
+ self.assertContainsRe(e.msg,
+ "File.*ignore.*contains error.*RE:\[.*RE:\*\.cpp", flags=re.DOTALL)
+
+
+class TestExceptionGlobster(TestCase):
+
+ def test_exclusion_patterns(self):
+ """test that exception patterns are not matched"""
+ patterns = [ u'*', u'!./local', u'!./local/**/*', u'!RE:\.z.*',u'!!./.zcompdump' ]
+ globster = ExceptionGlobster(patterns)
+ self.assertEqual(u'*', globster.match('tmp/foo.txt'))
+ self.assertEqual(None, globster.match('local'))
+ self.assertEqual(None, globster.match('local/bin/wombat'))
+ self.assertEqual(None, globster.match('.zshrc'))
+ self.assertEqual(None, globster.match('.zfunctions/fiddle/flam'))
+ self.assertEqual(u'!!./.zcompdump', globster.match('.zcompdump'))
+
+ def test_exclusion_order(self):
+ """test that ordering of exclusion patterns does not matter"""
+ patterns = [ u'static/**/*.html', u'!static/**/versionable.html']
+ globster = ExceptionGlobster(patterns)
+ self.assertEqual(u'static/**/*.html', globster.match('static/foo.html'))
+ self.assertEqual(None, globster.match('static/versionable.html'))
+ self.assertEqual(None, globster.match('static/bar/versionable.html'))
+ globster = ExceptionGlobster(reversed(patterns))
+ self.assertEqual(u'static/**/*.html', globster.match('static/foo.html'))
+ self.assertEqual(None, globster.match('static/versionable.html'))
+ self.assertEqual(None, globster.match('static/bar/versionable.html'))
+
+class TestOrderedGlobster(TestCase):
+
+ def test_ordered_globs(self):
+ """test that the first match in a list is the one found"""
+ patterns = [ u'*.foo', u'bar.*']
+ globster = _OrderedGlobster(patterns)
+ self.assertEqual(u'*.foo', globster.match('bar.foo'))
+ self.assertEqual(None, globster.match('foo.bar'))
+ globster = _OrderedGlobster(reversed(patterns))
+ self.assertEqual(u'bar.*', globster.match('bar.foo'))
+ self.assertEqual(None, globster.match('foo.bar'))
+
+
+class TestNormalizePattern(TestCase):
+
+ def test_backslashes(self):
+ """tests that backslashes are converted to forward slashes, multiple
+ backslashes are collapsed to single forward slashes and trailing
+ backslashes are removed"""
+ self.assertEqual(u'/', normalize_pattern(u'\\'))
+ self.assertEqual(u'/', normalize_pattern(u'\\\\'))
+ self.assertEqual(u'/foo/bar', normalize_pattern(u'\\foo\\bar'))
+ self.assertEqual(u'foo/bar', normalize_pattern(u'foo\\bar\\'))
+ self.assertEqual(u'/foo/bar', normalize_pattern(u'\\\\foo\\\\bar\\\\'))
+
+ def test_forward_slashes(self):
+ """tests that multiple foward slashes are collapsed to single forward
+ slashes and trailing forward slashes are removed"""
+ self.assertEqual(u'/', normalize_pattern(u'/'))
+ self.assertEqual(u'/', normalize_pattern(u'//'))
+ self.assertEqual(u'/foo/bar', normalize_pattern(u'/foo/bar'))
+ self.assertEqual(u'foo/bar', normalize_pattern(u'foo/bar/'))
+ self.assertEqual(u'/foo/bar', normalize_pattern(u'//foo//bar//'))
+
+ def test_mixed_slashes(self):
+ """tests that multiple mixed slashes are collapsed to single forward
+ slashes and trailing mixed slashes are removed"""
+ self.assertEqual(u'/foo/bar', normalize_pattern(u'\\/\\foo//\\///bar/\\\\/'))
diff --git a/bzrlib/tests/test_gpg.py b/bzrlib/tests/test_gpg.py
new file mode 100644
index 0000000..85dbb0f
--- /dev/null
+++ b/bzrlib/tests/test_gpg.py
@@ -0,0 +1,524 @@
+# Copyright (C) 2005, 2006, 2007, 2009, 2011 Canonical Ltd
+# Authors: Robert Collins <robert.collins@canonical.com>
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""Tests for signing and verifying blobs of data via gpg."""
+
+# import system imports here
+import sys
+
+from bzrlib import (
+ config,
+ errors,
+ gpg,
+ tests,
+ trace,
+ ui,
+ )
+from bzrlib.tests import (
+ TestCase,
+ features,
+ )
+
+
+class FakeConfig(config.MemoryStack):
+
+ def __init__(self, content=None):
+ if content is None:
+ content = '''
+gpg_signing_key=amy@example.com
+gpg_signing_command=false'''
+ super(FakeConfig, self).__init__(content)
+
+
+class TestCommandLine(tests.TestCase):
+
+ def setUp(self):
+ super(TestCommandLine, self).setUp()
+ self.my_gpg = gpg.GPGStrategy(FakeConfig())
+
+ def test_signing_command_line(self):
+ self.assertEqual(['false', '--clearsign', '-u', 'amy@example.com'],
+ self.my_gpg._command_line())
+
+ def test_signing_command_line_from_default(self):
+ # Using 'default' for gpg_signing_key will use the mail part of 'email'
+ my_gpg = gpg.GPGStrategy(FakeConfig('''
+email=Amy <amy@example.com>
+gpg_signing_key=default
+gpg_signing_command=false'''))
+ self.assertEqual(['false', '--clearsign', '-u', 'amy@example.com'],
+ my_gpg._command_line())
+
+ def test_signing_command_line_from_email(self):
+ # Not setting gpg_signing_key will use the mail part of 'email'
+ my_gpg = gpg.GPGStrategy(FakeConfig('''
+email=Amy <amy@example.com>
+gpg_signing_command=false'''))
+ self.assertEqual(['false', '--clearsign', '-u', 'amy@example.com'],
+ my_gpg._command_line())
+
+ def test_checks_return_code(self):
+ # This test needs a unix like platform - one with 'false' to run.
+ # if you have one, please make this work :)
+ self.assertRaises(errors.SigningFailed, self.my_gpg.sign, 'content')
+
+ def assertProduces(self, content):
+ # This needs a 'cat' command or similar to work.
+ if sys.platform == 'win32':
+ # Windows doesn't come with cat, and we don't require it
+ # so lets try using python instead.
+ # But stupid windows and line-ending conversions.
+ # It is too much work to make sys.stdout be in binary mode.
+ # http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/65443
+ self.my_gpg._command_line = lambda:[sys.executable, '-c',
+ 'import sys; sys.stdout.write(sys.stdin.read())']
+ new_content = content.replace('\n', '\r\n')
+
+ self.assertEqual(new_content, self.my_gpg.sign(content))
+ else:
+ self.my_gpg._command_line = lambda:['cat', '-']
+ self.assertEqual(content, self.my_gpg.sign(content))
+
+ def test_returns_output(self):
+ content = "some content\nwith newlines\n"
+ self.assertProduces(content)
+
+ def test_clears_progress(self):
+ content = "some content\nwith newlines\n"
+ old_clear_term = ui.ui_factory.clear_term
+ clear_term_called = []
+ def clear_term():
+ old_clear_term()
+ clear_term_called.append(True)
+ ui.ui_factory.clear_term = clear_term
+ try:
+ self.assertProduces(content)
+ finally:
+ ui.ui_factory.clear_term = old_clear_term
+ self.assertEqual([True], clear_term_called)
+
+ def test_aborts_on_unicode(self):
+ """You can't sign Unicode text; it must be encoded first."""
+ self.assertRaises(errors.BzrBadParameterUnicode,
+ self.assertProduces, u'foo')
+
+
+class TestVerify(TestCase):
+
+ def import_keys(self):
+ from StringIO import StringIO
+ import gpgme
+ context = gpgme.Context()
+
+ key = StringIO("""-----BEGIN PGP PUBLIC KEY BLOCK-----
+Version: GnuPG v1.4.11 (GNU/Linux)
+
+mQENBE343IgBCADwzPW7kmKb2bjB+UU+1ER/ABMZspvtoZMPusUw7bk6coXHF/0W
+u1K/hSYeX9xaGOfOQw41r/g13MoR9dsL6L84RLiisf38rRoBZt+d5bCbZA5Xo801
+2PeoBoGo6u5oOYKAFLMvrUitPiiE0IT/oQTfC4YUrLN4A+9W0QZruPGIpIXwmZXr
+L0zsqYfNqIN0ompeJenVpKpvm3loJ/zfK7R3EJ3hsv6nkUmWCFsP1Pw3UV1YuCmw
+Mkdn1U7DaOql1WjXgj9ABQDJrun2TGsqrSRzBODtHKA/uOX0K3VfKBU8VZo3dXUm
+1Q4ZeZC39L9qJGTH8TQYlwBLe1yAOp+vx7QJABEBAAG0JEJhemFhciBUZXN0IEtl
+eSA8YmF6YWFyQGV4YW1wbGUuY29tPokBOAQTAQIAIgUCTfjciAIbAwYLCQgHAwIG
+FQgCCQoLBBYCAwECHgECF4AACgkQh2gbHuMIDkWJUggAwj537fH6WW+GGLA5onys
+2hZmXUq/tU+L92bjQoRY4fmsQpk/FUVPUf+NQ0v1gkxx4BTfyYewaj5G6L8cvqW2
+jj7UiJd8z9gTRxWTnYwfR/w5PGmxfJsBfEUKWsccrPQdOXAhwu0fjYIVk4nqgswa
+IOAZIwe5Vsfs36uSS7p8RQHAZXLXtTOn3KcXHaxu83w6nc4zkWRovGJ9isBN3haO
+2qEa0mYiAfDpz40CGtb8N/TQHF3Xcw8rJcxpg6RF3jMtWQnzbVJFp13it00R3LqW
+o/r3RII3Ii3z2yARlg6D+5hVOrFBV8jFLkff1R2ZnVu+7WOrnbpmt3OiMkSeZrtB
+OrkBDQRN+NyIAQgArRZ2YGzUj5dXOVIWgZ1/QFpyfx/cG/293WjRE4Wt2e4SxMf2
+V0dcVCqWwT0+a79Wbausv4bStD4SkwDmu0Jf3z5ERzrr7oZwP0PMsIlM5zT6XSsr
+6UUneB3UXX7MrEqVogVhRM0ORIaK/oRwMXr7K6xVT+bCBP3/p66kHtY1ZpfEzTEX
+imBsN3GqoewBHYIneJKBtHE7uzdzw3O5p5dXqoj5foxGi9R1J15vAmt5pI68HJeX
+P6ktvXbX2Iu7VDNoCvRXM9+ntyJtsXCjNXg4pTGHS/XO4nm2db4FUZOBcVMb1vCc
+VtFjLTcbCqJqpoJWUtsLcNqDqMHOQDpe6KTNTQARAQABiQEfBBgBAgAJBQJN+NyI
+AhsMAAoJEIdoGx7jCA5FrR8IANnOF3PUj1TbRcwV6RoWmHsFQHrPmM8ogXia1Lsv
+jE1iEWoC+muvKh6Oydf90k6ZslS7rdDnp2qzYY8W/TiDkxP+fvsZ4mMi1Y0F+3ty
+1jzWhcsnB2VrJSiavxEXk0tKPrNv4EUGWG6wHsC9TBj37If+nrMyim94VHvI0eHm
+X8yMlN4O3HfmgD9CbJdUxueP3e31OIYuwh/6F7GII8TNEVHU/8vh/mQcCxppNbc+
+boff+kIsoa/TAMLwtJoSrX1nXm0K3vZePRLnIgmwVzdkOIkaRJUG2tSQFvkfhvtE
+LhnkL5l4MO0wrUds0UWRwa3d7j/P2ExrqXdlLmEzrifWyEQ=
+=hUJn
+-----END PGP PUBLIC KEY BLOCK-----
+""")
+
+ secret_key = StringIO("""-----BEGIN PGP PRIVATE KEY BLOCK-----
+Version: GnuPG v1.4.11 (GNU/Linux)
+
+lQOYBE343IgBCADwzPW7kmKb2bjB+UU+1ER/ABMZspvtoZMPusUw7bk6coXHF/0W
+u1K/hSYeX9xaGOfOQw41r/g13MoR9dsL6L84RLiisf38rRoBZt+d5bCbZA5Xo801
+2PeoBoGo6u5oOYKAFLMvrUitPiiE0IT/oQTfC4YUrLN4A+9W0QZruPGIpIXwmZXr
+L0zsqYfNqIN0ompeJenVpKpvm3loJ/zfK7R3EJ3hsv6nkUmWCFsP1Pw3UV1YuCmw
+Mkdn1U7DaOql1WjXgj9ABQDJrun2TGsqrSRzBODtHKA/uOX0K3VfKBU8VZo3dXUm
+1Q4ZeZC39L9qJGTH8TQYlwBLe1yAOp+vx7QJABEBAAEAB/0RJTbV991SOtVfPQVu
+LM+tD0SiOXJwIBIINlngsFHWVIiBSDb6uF8dneMR70IRnuEFHFyAUXA7PZDxvcSu
+phAqIdKCWxQPkAULAS0o4U2K3ZFGh4uOqvfZ8eSnh1rETFv7Yf3u23K89cZiy99n
+EtWgSqzC/2z5PaZ7/alsYCBqhHuyd4Phaud7qv7FTz8mFrCf+CCY+D08wbnZBu4g
+N9tBwoxT/UKRfv3nghIh9v+3qWfBEFGhrYbt92XKFbHOQeATZz8AGIv1eqN/+ZQY
+oYmvVfO3GkrWaRoPeJNLqSDEn/45O1Uh9MJ4mQclXqB0QzMShle8uusHxIeJSQsR
+z//VBAD11WS7qSgCeiHR+4jDzrrlb2snnA2bfDToEomDxd/n8xm7nJWdkNfJ2BCw
+KvnxYVxjFNAwkKJGRajzALBLzRVO+K9NtSLiddv5zv+UNdgsKuE8tD7Jqxd/IbWw
+AimCtL8osnJ+r9dvL+NyjkAT6l/NdEbLXGrBaMeTfSgl2cBOOwQA+sJIh1R5PiCK
+nLIs9pm3PSy3w92Peelq/x/+0aebTZaJUk2ou3oCvB3druDqrUeaopuuCc0drV7C
+Ldoey8x/T2ZGzmT2af9qNaD6ScTimDodXcJdwlpobhZTKpsE4EyywpLXtlWte1x0
+1Mq3llQsIdRdf3GLS+L207hWgKDiDosD/0SyOBO/IBDteeEzeN2hNE3A8oeVbvRS
+XrS/3uj6oKmlWUBORYP8ptUrXPoVPmNz2y4GO+OysFtfct3Yqb+Sb/52SXMOHTox
+2oLW08tkzfkDArU5aauMEPmyutGyJ+hGo7fsuLXzXR8OPw4yZJdzG1tRlP2TTKmq
+Fx8G/Ik6bN4zTYK0JEJhemFhciBUZXN0IEtleSA8YmF6YWFyQGV4YW1wbGUuY29t
+PokBOAQTAQIAIgUCTfjciAIbAwYLCQgHAwIGFQgCCQoLBBYCAwECHgECF4AACgkQ
+h2gbHuMIDkWJUggAwj537fH6WW+GGLA5onys2hZmXUq/tU+L92bjQoRY4fmsQpk/
+FUVPUf+NQ0v1gkxx4BTfyYewaj5G6L8cvqW2jj7UiJd8z9gTRxWTnYwfR/w5PGmx
+fJsBfEUKWsccrPQdOXAhwu0fjYIVk4nqgswaIOAZIwe5Vsfs36uSS7p8RQHAZXLX
+tTOn3KcXHaxu83w6nc4zkWRovGJ9isBN3haO2qEa0mYiAfDpz40CGtb8N/TQHF3X
+cw8rJcxpg6RF3jMtWQnzbVJFp13it00R3LqWo/r3RII3Ii3z2yARlg6D+5hVOrFB
+V8jFLkff1R2ZnVu+7WOrnbpmt3OiMkSeZrtBOp0DlwRN+NyIAQgArRZ2YGzUj5dX
+OVIWgZ1/QFpyfx/cG/293WjRE4Wt2e4SxMf2V0dcVCqWwT0+a79Wbausv4bStD4S
+kwDmu0Jf3z5ERzrr7oZwP0PMsIlM5zT6XSsr6UUneB3UXX7MrEqVogVhRM0ORIaK
+/oRwMXr7K6xVT+bCBP3/p66kHtY1ZpfEzTEXimBsN3GqoewBHYIneJKBtHE7uzdz
+w3O5p5dXqoj5foxGi9R1J15vAmt5pI68HJeXP6ktvXbX2Iu7VDNoCvRXM9+ntyJt
+sXCjNXg4pTGHS/XO4nm2db4FUZOBcVMb1vCcVtFjLTcbCqJqpoJWUtsLcNqDqMHO
+QDpe6KTNTQARAQABAAf1EfceUlGLvoA/+yDTNTMjuPfzfKwbB/FOVfX44g3Za1eT
+v7RvSuj4rFYIdE9UvZEei/pqPOSc+hhSsKZCulGXD5TUpf3AyG7ipWU/kID46Csp
+0V08DPpFHnuw/N6+qNo5iSnhN9U1XMLjYT5d1HvKur26r2vWbmUTSJ1qIluHL2fT
+R1pKYYLuoff4MIjZ01Hawq72jjor+dLBmMWveHpq4XNp+vQ4x8aFnY9ozufon0nM
+uRSJRlQjDNB274tvUbmDFP+nzNbqF1nBTZ6FTdH/iKVNbytiYF7Hbat8GWVZqY1u
+CZr7BklpIVWlk62ll0psMIPVyANi7YT332LLqYmBBADJKTx2dariG/kWU2W/9VEO
+2VZpqsqazAxOoFEIOpcOlByhhyw5g0IKu0UyzHkhoCje0cWxpdSBFG432b8zL0AT
+Z0RycfUG7Sgp9CpY1h8Cc/HbBa8xo1fSM7zplPQrHBqUzlVVBq6HOkUq+7qsPFWc
+RRie95VsDmIMKQKPJHeYHQQA3EYGit+QHV0dccAInghEsf/mq8Gfnvo6HPYhWcDC
+DTM39NhNlnl1WkTFCd2TWc+TWQ4KlRsh6bMjUpNa2qjrUl90fLekbogcxxMhcwa6
+xgzEANZfwqdY0u3aB/CyZ6odfThwcAoeqoMpw34CfeKEroubpi2n8wKByrN2MQXJ
+4vEEAJbXZOqgAcFAFBUVb5mVT0s2lJMagZFPdhRJz2bttz01s/B8aca6CrDpFRjT
+03zRFUZjwDYqZDWBC181dCE9yla4OkWd5QyRKSS2EE02KEYqRzT0RngQn7s4AW2r
+326up3Jhleln3hgD4Kk3V3KHmyK8zqZA0qWzry4Vl2jjkbnAPB2JAR8EGAECAAkF
+Ak343IgCGwwACgkQh2gbHuMIDkWtHwgA2c4Xc9SPVNtFzBXpGhaYewVAes+YzyiB
+eJrUuy+MTWIRagL6a68qHo7J1/3STpmyVLut0OenarNhjxb9OIOTE/5++xniYyLV
+jQX7e3LWPNaFyycHZWslKJq/EReTS0o+s2/gRQZYbrAewL1MGPfsh/6eszKKb3hU
+e8jR4eZfzIyU3g7cd+aAP0Jsl1TG54/d7fU4hi7CH/oXsYgjxM0RUdT/y+H+ZBwL
+Gmk1tz5uh9/6Qiyhr9MAwvC0mhKtfWdebQre9l49EuciCbBXN2Q4iRpElQba1JAW
++R+G+0QuGeQvmXgw7TCtR2zRRZHBrd3uP8/YTGupd2UuYTOuJ9bIRA==
+=LXn0
+-----END PGP PRIVATE KEY BLOCK-----
+""")
+
+ revoked_key = StringIO("""-----BEGIN PGP PUBLIC KEY BLOCK-----
+Version: GnuPG v1.4.11 (GNU/Linux)
+
+mI0ETjlW5gEEAOb/6P+TVM59E897wRtatxys2BhsHCXM4T7xjIiANfDwejDdifqh
+tluTfSJLLxPembtrrEjux1C0AJgc+f0MIfsc3Pr3eFJzKB2ot/1IVG1/1KnA0zt3
+W2xPT3lRib27WJ9Fag+dMtQaIzgJ7/n2DFxsFZ33FD2kxrEXB2exGg6FABEBAAGI
+pgQgAQIAEAUCTjlXkAkdAHJldm9rZWQACgkQjs6dvEpb0cQPHAP/Wi9rbx0e+1Sf
+ziGgyVdr3m3A6uvze5oXKVgFRbGRUYSH4/I8GW0W9x4TcRg9h+YaQ8NUdADr9kNE
+tKAljLqYA5qdqSfYuaij1M++Xj+KUZ359R74sHuQqwnRy1XXQNfRs/QpXA7vLdds
+rjg+pbWuXO92TZJUdnqtWW+VEyZBsPy0G3Rlc3Qga2V5IDx0ZXN0QGV4YW1wbGUu
+Y29tPoi4BBMBAgAiBQJOOVbmAhsDBgsJCAcDAgYVCAIJCgsEFgIDAQIeAQIXgAAK
+CRCOzp28SlvRxNWzA/42WVmI0b+6mF/imEOlY1TiyvrcpK250rkSDsCtL4lOwy7G
+antZhpgNfnXRd/ySfsS3EB6dpOWgOSxGRvWQhA+vxBT9BYNk49qd3JIrSaSWpR12
+rET8qO1rEQQFWsw03CxTGujxGlmEO+a1yguRXp2UWaY7FngcQmD+8q7BUIVm7riN
+BE45VuYBBADTEH2jHTjNCc5CMOhea6EJTrkx3upcEqB2oyhWeSWJiBGOxlcddsjo
+3J3/EmBB8kK1hM9TidD3SG64x1N287lg8ELJBlKv+pQVyxohGJ1u/THgpTDMMQcL
+luG5rAHQGSfyzKTiOnaTyBYg3M/nzgUOU9dKEFB0EA3tjUXFOT+r3wARAQABiJ8E
+GAECAAkFAk45VuYCGwwACgkQjs6dvEpb0cRSLQP/fzCWX2lXwlwWiVF8BOPF7o9z
+icHErc7/X17RGb4qj1kVf+UkRdUWJrbEVh4h6MncBIuA70WsYogiw+Kz/0LCtQAR
+YUJsPy/EL++OKPH1aFasOdTxwkTka85+RdYqhP1+z/aYLFMWq6mRFI+o6x2k5mGi
+7dMv2kKTJPoXUpiXJbg=
+=hLYO
+-----END PGP PUBLIC KEY BLOCK-----
+""")
+
+ expired_key = StringIO("""-----BEGIN PGP PUBLIC KEY BLOCK-----
+Version: GnuPG v1.4.11 (GNU/Linux)
+
+mI0ETjZ6PAEEALkR4GcFQidCCxV7pgQwQd5MZua0YO2l92fVqHX+PhnZ6egCLKdD
+2bWlMUd6MLPF3FlRL7BBAxvW/DazkBOp7ljsnpMpptEzY49Uem1irYLYiVb9zK96
+0sQZzFxFkfEYetQEXC68mIck8tbySOX5NAOw++3jFm3J7dsU1R3XtYzRABEBAAG0
+G3Rlc3Qga2V5IDx0ZXN0QGV4YW1wbGUuY29tPoi+BBMBAgAoBQJONno8AhsDBQkA
+AVGABgsJCAcDAgYVCAIJCgsEFgIDAQIeAQIXgAAKCRAc4m97T40VEz+DA/9PBphG
+Yp9cHVaHSfTUKGTGgIbvRe60sFNpDCYZeAGDrygOMuI8MNzbVpwefRBFHVPx7jWd
+rrYMsLkcsNUS9D0baU+0D/qp7JVg7ZSQtG0O6IG4eTZhibteY1fu0+unlXmg9NHx
+5VvhwzBiJDYji00M2p/CZEMiYFUuy76CsxUpN7iNBE42ejwBBACkv2/mX7IPQg0C
+A3KSrJsJv+sdvKm4b4xuI4OwagwTIVz4KlTqV4IBrVjSBfwyMXucXz0bTW85qjgA
++n67td8vyjYYZUEz1uY9lSquQQDnAN0txL3cLHZXWiWOkmzZVddQtlflK2a/J9o0
+QkHPVUm+hc4l64dIzStrNl2S66fAvQARAQABiKUEGAECAA8FAk42ejwCGwwFCQAB
+UYAACgkQHOJve0+NFROEYQP/epg+o8iBs31hkSERyZjrRR66LpywezWj30Rn/3mX
+Fzi9HkF4xLemWOzdNt9C5PYrOep85PQg8haEjknxVjZFS0ikT1h3OWk/TF1ZrLVm
+WzyX8DaHQEjKpLJJjXcAbTiZBNMk0QaVC9RvIeHpCf3n3DC49DdjsPJRMKOn8KDi
+kRk=
+=p0gt
+-----END PGP PUBLIC KEY BLOCK-----
+""")
+ context.import_(key)
+ context.import_(secret_key)
+ context.import_(revoked_key)
+ context.import_(expired_key)
+
+ def test_verify_untrusted_but_accepted(self):
+ #untrusted by gpg but listed as acceptable_keys by user
+ self.requireFeature(features.gpgme)
+ self.import_keys()
+
+ content = """-----BEGIN PGP SIGNED MESSAGE-----
+Hash: SHA1
+
+bazaar-ng testament short form 1
+revision-id: amy@example.com-20110527185938-hluafawphszb8dl1
+sha1: 6411f9bdf6571200357140c9ce7c0f50106ac9a4
+-----BEGIN PGP SIGNATURE-----
+Version: GnuPG v1.4.11 (GNU/Linux)
+
+iQEcBAEBAgAGBQJN+ekFAAoJEIdoGx7jCA5FGtEH/i+XxJRvqU6wdBtLVrGBMAGk
+FZ5VP+KyXYtymSbgSstj/vM12NeMIeFs3xGnNnYuX1MIcY6We5TKtCH0epY6ym5+
+6g2Q2QpQ5/sT2d0mWzR0K4uVngmxVQaXTdk5PdZ40O7ULeDLW6CxzxMHyUL1rsIx
+7UBUTBh1O/1n3ZfD99hUkm3hVcnsN90uTKH59zV9NWwArU0cug60+5eDKJhSJDbG
+rIwlqbFAjDZ7L/48e+IaYIJwBZFzMBpJKdCxzALLtauMf+KK8hGiL2hrRbWm7ty6
+NgxfkMYOB4rDPdSstT35N+5uBG3n/UzjxHssi0svMfVETYYX40y57dm2eZQXFp8=
+=iwsn
+-----END PGP SIGNATURE-----
+"""
+ plain = """bazaar-ng testament short form 1
+revision-id: amy@example.com-20110527185938-hluafawphszb8dl1
+sha1: 6411f9bdf6571200357140c9ce7c0f50106ac9a4
+"""
+ my_gpg = gpg.GPGStrategy(FakeConfig())
+ my_gpg.set_acceptable_keys("bazaar@example.com")
+ self.assertEqual((gpg.SIGNATURE_VALID, None), my_gpg.verify(content,
+ plain))
+
+ def test_verify_unacceptable_key(self):
+ self.requireFeature(features.gpgme)
+ self.import_keys()
+
+ content = """-----BEGIN PGP SIGNED MESSAGE-----
+Hash: SHA1
+
+bazaar-ng testament short form 1
+revision-id: amy@example.com-20110527185938-hluafawphszb8dl1
+sha1: 6411f9bdf6571200357140c9ce7c0f50106ac9a4
+-----BEGIN PGP SIGNATURE-----
+Version: GnuPG v1.4.11 (GNU/Linux)
+
+iQEcBAEBAgAGBQJN+ekFAAoJEIdoGx7jCA5FGtEH/i+XxJRvqU6wdBtLVrGBMAGk
+FZ5VP+KyXYtymSbgSstj/vM12NeMIeFs3xGnNnYuX1MIcY6We5TKtCH0epY6ym5+
+6g2Q2QpQ5/sT2d0mWzR0K4uVngmxVQaXTdk5PdZ40O7ULeDLW6CxzxMHyUL1rsIx
+7UBUTBh1O/1n3ZfD99hUkm3hVcnsN90uTKH59zV9NWwArU0cug60+5eDKJhSJDbG
+rIwlqbFAjDZ7L/48e+IaYIJwBZFzMBpJKdCxzALLtauMf+KK8hGiL2hrRbWm7ty6
+NgxfkMYOB4rDPdSstT35N+5uBG3n/UzjxHssi0svMfVETYYX40y57dm2eZQXFp8=
+=iwsn
+-----END PGP SIGNATURE-----
+"""
+ plain = """bazaar-ng testament short form 1
+revision-id: amy@example.com-20110527185938-hluafawphszb8dl1
+sha1: 6411f9bdf6571200357140c9ce7c0f50106ac9a4
+"""
+ my_gpg = gpg.GPGStrategy(FakeConfig())
+ my_gpg.set_acceptable_keys("foo@example.com")
+ self.assertEqual((gpg.SIGNATURE_KEY_MISSING, u'E3080E45'),
+ my_gpg.verify(content, plain))
+
+ def test_verify_valid_but_untrusted(self):
+ self.requireFeature(features.gpgme)
+ self.import_keys()
+
+ content = """-----BEGIN PGP SIGNED MESSAGE-----
+Hash: SHA1
+
+bazaar-ng testament short form 1
+revision-id: amy@example.com-20110527185938-hluafawphszb8dl1
+sha1: 6411f9bdf6571200357140c9ce7c0f50106ac9a4
+-----BEGIN PGP SIGNATURE-----
+Version: GnuPG v1.4.11 (GNU/Linux)
+
+iQEcBAEBAgAGBQJN+ekFAAoJEIdoGx7jCA5FGtEH/i+XxJRvqU6wdBtLVrGBMAGk
+FZ5VP+KyXYtymSbgSstj/vM12NeMIeFs3xGnNnYuX1MIcY6We5TKtCH0epY6ym5+
+6g2Q2QpQ5/sT2d0mWzR0K4uVngmxVQaXTdk5PdZ40O7ULeDLW6CxzxMHyUL1rsIx
+7UBUTBh1O/1n3ZfD99hUkm3hVcnsN90uTKH59zV9NWwArU0cug60+5eDKJhSJDbG
+rIwlqbFAjDZ7L/48e+IaYIJwBZFzMBpJKdCxzALLtauMf+KK8hGiL2hrRbWm7ty6
+NgxfkMYOB4rDPdSstT35N+5uBG3n/UzjxHssi0svMfVETYYX40y57dm2eZQXFp8=
+=iwsn
+-----END PGP SIGNATURE-----
+"""
+ plain = """bazaar-ng testament short form 1
+revision-id: amy@example.com-20110527185938-hluafawphszb8dl1
+sha1: 6411f9bdf6571200357140c9ce7c0f50106ac9a4
+"""
+ my_gpg = gpg.GPGStrategy(FakeConfig())
+ self.assertEqual((gpg.SIGNATURE_NOT_VALID, None), my_gpg.verify(content,
+ plain))
+
+ def test_verify_bad_testament(self):
+ self.requireFeature(features.gpgme)
+ self.import_keys()
+
+ content = """-----BEGIN PGP SIGNED MESSAGE-----
+Hash: SHA1
+
+bazaar-ng testament short form 1
+revision-id: amy@example.com-20110527185938-hluafawphszb8dl1
+sha1: 6411f9bdf6571200357140c9ce7c0f50106ac9a4
+-----BEGIN PGP SIGNATURE-----
+Version: GnuPG v1.4.11 (GNU/Linux)
+
+iQEcBAEBAgAGBQJN+ekFAAoJEIdoGx7jCA5FGtEH/i+XxJRvqU6wdBtLVrGBMAGk
+FZ5VP+KyXYtymSbgSstj/vM12NeMIeFs3xGnNnYuX1MIcY6We5TKtCH0epY6ym5+
+6g2Q2QpQ5/sT2d0mWzR0K4uVngmxVQaXTdk5PdZ40O7ULeDLW6CxzxMHyUL1rsIx
+7UBUTBh1O/1n3ZfD99hUkm3hVcnsN90uTKH59zV9NWwArU0cug60+5eDKJhSJDbG
+rIwlqbFAjDZ7L/48e+IaYIJwBZFzMBpJKdCxzALLtauMf+KK8hGiL2hrRbWm7ty6
+NgxfkMYOB4rDPdSstT35N+5uBG3n/UzjxHssi0svMfVETYYX40y57dm2eZQXFp8=
+=iwsn
+-----END PGP SIGNATURE-----
+"""
+ plain = """bazaar-ng testament short form 1
+revision-id: doctor@example.com-20110527185938-hluafawphszb8dl1
+sha1: 6411f9bdf6571200357140c9ce7c0f50106ac9a4
+"""
+ my_gpg = gpg.GPGStrategy(FakeConfig())
+ my_gpg.set_acceptable_keys("bazaar@example.com")
+ self.assertEqual((gpg.SIGNATURE_NOT_VALID, None), my_gpg.verify(content,
+ plain))
+
+
+ def test_verify_revoked_signature(self):
+ self.requireFeature(features.gpgme)
+ self.import_keys()
+
+ content = """-----BEGIN PGP SIGNED MESSAGE-----
+Hash: SHA1
+
+asdf
+-----BEGIN PGP SIGNATURE-----
+Version: GnuPG v1.4.11 (GNU/Linux)
+
+iJwEAQECAAYFAk45V18ACgkQjs6dvEpb0cSIZQP/eOGTXGPlrNwvDkcX2d8O///I
+ecB4sUIUEpv1XAk1MkNu58lsjjK72lRaLusEGqd7HwrFmpxVeVs0oWLg23PNPCFs
+yJBID9ma+VxFVPtkEFnrc1R72sBJLfBcTxMkwVTC8eeznjdtn+cg+aLkxbPdrGnr
+JFA6kUIJU2w9LU/b88Y=
+=UuRX
+-----END PGP SIGNATURE-----
+"""
+ plain = """asdf\n"""
+ my_gpg = gpg.GPGStrategy(FakeConfig())
+ my_gpg.set_acceptable_keys("test@example.com")
+ self.assertEqual((gpg.SIGNATURE_NOT_VALID, None), my_gpg.verify(content,
+ plain))
+
+ def test_verify_invalid(self):
+ self.requireFeature(features.gpgme)
+ self.import_keys()
+ content = """-----BEGIN PGP SIGNED MESSAGE-----
+Hash: SHA1
+
+bazaar-ng testament short form 1
+revision-id: amy@example.com-20110527185938-hluafawphszb8dl1
+sha1: 6411f9bdf6571200357140c9ce7c0f50106ac9a4
+-----BEGIN PGP SIGNATURE-----
+Version: GnuPG v1.4.11 (GNU/Linux)
+
+iEYEARECAAYFAk33gYsACgkQpQbm1N1NUIhiDACglOuQDlnSF4NxfHSkN/zrmFy8
+nswAoNGXAVuR9ONasAKIGBNUE0b+lols
+=SOuC
+-----END PGP SIGNATURE-----
+"""
+ plain = """bazaar-ng testament short form 1
+revision-id: amy@example.com-20110527185938-hluafawphszb8dl1
+sha1: 6411f9bdf6571200357140c9ce7c0f50106ac9a4
+"""
+ my_gpg = gpg.GPGStrategy(FakeConfig())
+ self.assertEqual((gpg.SIGNATURE_NOT_VALID, None),
+ my_gpg.verify(content, plain))
+
+ def test_verify_expired_but_valid(self):
+ self.requireFeature(features.gpgme)
+ self.import_keys()
+ content = """-----BEGIN PGP SIGNED MESSAGE-----
+Hash: SHA1
+
+bazaar-ng testament short form 1
+revision-id: test@example.com-20110801100657-f1dr1nompeex723z
+sha1: 59ab434be4c2d5d646dee84f514aa09e1b72feeb
+-----BEGIN PGP SIGNATURE-----
+Version: GnuPG v1.4.10 (GNU/Linux)
+
+iJwEAQECAAYFAk42esUACgkQHOJve0+NFRPc5wP7BoZkzBU8JaHMLv/LmqLr0sUz
+zuE51ofZZ19L7KVtQWsOi4jFy0fi4A5TFwO8u9SOfoREGvkw292Uty9subSouK5/
+mFmDOYPQ+O83zWgYZsBmMJWYDZ+X9I6XXZSbPtV/7XyTjaxtl5uRnDVJjg+AzKvD
+dTp8VatVVrwuvzOPDVc=
+=uHen
+-----END PGP SIGNATURE-----
+"""
+ plain = """bazaar-ng testament short form 1
+revision-id: test@example.com-20110801100657-f1dr1nompeex723z
+sha1: 59ab434be4c2d5d646dee84f514aa09e1b72feeb
+"""
+ my_gpg = gpg.GPGStrategy(FakeConfig())
+ self.assertEqual((gpg.SIGNATURE_EXPIRED, u'4F8D1513'),
+ my_gpg.verify(content, plain))
+
+ def test_verify_unknown_key(self):
+ self.requireFeature(features.gpgme)
+ self.import_keys()
+ content = """-----BEGIN PGP SIGNED MESSAGE-----
+Hash: SHA1
+
+asdf
+-----BEGIN PGP SIGNATURE-----
+Version: GnuPG v1.4.11 (GNU/Linux)
+
+iQEcBAEBAgAGBQJOORKwAAoJENf6AkFdUeVvJDYH/1Cz+AJn1Jvy5n64o+0fZ5Ow
+Y7UQb4QQTIOV7jI7n4hv/yBzuHrtImFzYvQl/o2Ezzi8B8L5gZtQy+xCUF+Q8iWs
+gytZ5JUtSze7hDZo1NUl4etjoRGYqRfrUcvE2LkVH2dFbDGyyQfVmoeSHa5akuuP
+QZmyg2F983rACVIpGvsqTH6RcBdvE9vx68lugeKQA8ArDn39/74FBFipFzrXSPij
+eKFpl+yZmIb3g6HkPIC8o4j/tMvc37xF1OG5sBu8FT0+FC+VgY7vAblneDftAbyP
+sIODx4WcfJtjLG/qkRYqJ4gDHo0eMpTJSk2CWebajdm4b+JBrM1F9mgKuZFLruE=
+=RNR5
+-----END PGP SIGNATURE-----
+"""
+ plain = "asdf\n"
+ my_gpg = gpg.GPGStrategy(FakeConfig())
+ self.assertEqual((gpg.SIGNATURE_KEY_MISSING, u'5D51E56F'),
+ my_gpg.verify(content, plain))
+
+ def test_set_acceptable_keys(self):
+ self.requireFeature(features.gpgme)
+ self.import_keys()
+ my_gpg = gpg.GPGStrategy(FakeConfig())
+ my_gpg.set_acceptable_keys("bazaar@example.com")
+ self.assertEqual(my_gpg.acceptable_keys,
+ [u'B5DEED5FCB15DAE6ECEF919587681B1EE3080E45'])
+
+ def test_set_acceptable_keys_unknown(self):
+ self.requireFeature(features.gpgme)
+ my_gpg = gpg.GPGStrategy(FakeConfig())
+ self.notes = []
+ def note(*args):
+ self.notes.append(args[0] % args[1:])
+ self.overrideAttr(trace, 'note', note)
+ my_gpg.set_acceptable_keys("unknown")
+ self.assertEqual(my_gpg.acceptable_keys, [])
+ self.assertEqual(self.notes,
+ ['No GnuPG key results for pattern: unknown'])
+
+
+class TestDisabled(TestCase):
+
+ def test_sign(self):
+ self.assertRaises(errors.SigningFailed,
+ gpg.DisabledGPGStrategy(None).sign, 'content')
+
+ def test_verify(self):
+ self.assertRaises(errors.SignatureVerificationFailed,
+ gpg.DisabledGPGStrategy(None).verify, 'content',
+ 'testament')
diff --git a/bzrlib/tests/test_graph.py b/bzrlib/tests/test_graph.py
new file mode 100644
index 0000000..312d60d
--- /dev/null
+++ b/bzrlib/tests/test_graph.py
@@ -0,0 +1,1743 @@
+# Copyright (C) 2007-2011 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+from bzrlib import (
+ errors,
+ graph as _mod_graph,
+ tests,
+ )
+from bzrlib.revision import NULL_REVISION
+from bzrlib.tests import TestCaseWithMemoryTransport
+
+
+# Ancestry 1:
+#
+# NULL_REVISION
+# |
+# rev1
+# /\
+# rev2a rev2b
+# | |
+# rev3 /
+# | /
+# rev4
+ancestry_1 = {'rev1': [NULL_REVISION], 'rev2a': ['rev1'], 'rev2b': ['rev1'],
+ 'rev3': ['rev2a'], 'rev4': ['rev3', 'rev2b']}
+
+
+# Ancestry 2:
+#
+# NULL_REVISION
+# / \
+# rev1a rev1b
+# |
+# rev2a
+# |
+# rev3a
+# |
+# rev4a
+ancestry_2 = {'rev1a': [NULL_REVISION], 'rev2a': ['rev1a'],
+ 'rev1b': [NULL_REVISION], 'rev3a': ['rev2a'], 'rev4a': ['rev3a']}
+
+
+# Criss cross ancestry
+#
+# NULL_REVISION
+# |
+# rev1
+# / \
+# rev2a rev2b
+# |\ /|
+# | X |
+# |/ \|
+# rev3a rev3b
+criss_cross = {'rev1': [NULL_REVISION], 'rev2a': ['rev1'], 'rev2b': ['rev1'],
+ 'rev3a': ['rev2a', 'rev2b'], 'rev3b': ['rev2b', 'rev2a']}
+
+
+# Criss-cross 2
+#
+# NULL_REVISION
+# / \
+# rev1a rev1b
+# |\ /|
+# | \ / |
+# | X |
+# | / \ |
+# |/ \|
+# rev2a rev2b
+criss_cross2 = {'rev1a': [NULL_REVISION], 'rev1b': [NULL_REVISION],
+ 'rev2a': ['rev1a', 'rev1b'], 'rev2b': ['rev1b', 'rev1a']}
+
+
+# Mainline:
+#
+# NULL_REVISION
+# |
+# rev1
+# / \
+# | rev2b
+# | /
+# rev2a
+mainline = {'rev1': [NULL_REVISION], 'rev2a': ['rev1', 'rev2b'],
+ 'rev2b': ['rev1']}
+
+
+# feature branch:
+#
+# NULL_REVISION
+# |
+# rev1
+# |
+# rev2b
+# |
+# rev3b
+feature_branch = {'rev1': [NULL_REVISION],
+ 'rev2b': ['rev1'], 'rev3b': ['rev2b']}
+
+
+# History shortcut
+# NULL_REVISION
+# |
+# rev1------
+# / \ \
+# rev2a rev2b rev2c
+# | / \ /
+# rev3a rev3b
+history_shortcut = {'rev1': [NULL_REVISION], 'rev2a': ['rev1'],
+ 'rev2b': ['rev1'], 'rev2c': ['rev1'],
+ 'rev3a': ['rev2a', 'rev2b'], 'rev3b': ['rev2b', 'rev2c']}
+
+# Extended history shortcut
+# NULL_REVISION
+# |
+# a
+# |\
+# b |
+# | |
+# c |
+# | |
+# d |
+# |\|
+# e f
+extended_history_shortcut = {'a': [NULL_REVISION],
+ 'b': ['a'],
+ 'c': ['b'],
+ 'd': ['c'],
+ 'e': ['d'],
+ 'f': ['a', 'd'],
+ }
+
+# Double shortcut
+# Both sides will see 'A' first, even though it is actually a decendent of a
+# different common revision.
+#
+# NULL_REVISION
+# |
+# a
+# /|\
+# / b \
+# / | \
+# | c |
+# | / \ |
+# | d e |
+# |/ \|
+# f g
+
+double_shortcut = {'a':[NULL_REVISION], 'b':['a'], 'c':['b'],
+ 'd':['c'], 'e':['c'], 'f':['a', 'd'],
+ 'g':['a', 'e']}
+
+# Complex shortcut
+# This has a failure mode in that a shortcut will find some nodes in common,
+# but the common searcher won't have time to find that one branch is actually
+# in common. The extra nodes at the beginning are because we want to avoid
+# walking off the graph. Specifically, node G should be considered common, but
+# is likely to be seen by M long before the common searcher finds it.
+#
+# NULL_REVISION
+# |
+# a
+# |
+# b
+# |
+# c
+# |
+# d
+# |\
+# e f
+# | |\
+# | g h
+# |/| |
+# i j |
+# | | |
+# | k |
+# | | |
+# | l |
+# |/|/
+# m n
+complex_shortcut = {'a':[NULL_REVISION], 'b':['a'], 'c':['b'], 'd':['c'],
+ 'e':['d'], 'f':['d'], 'g':['f'], 'h':['f'],
+ 'i':['e', 'g'], 'j':['g'], 'k':['j'],
+ 'l':['k'], 'm':['i', 'l'], 'n':['l', 'h']}
+
+# NULL_REVISION
+# |
+# a
+# |
+# b
+# |
+# c
+# |
+# d
+# |\
+# e |
+# | |
+# f |
+# | |
+# g h
+# | |\
+# i | j
+# |\| |
+# | k |
+# | | |
+# | l |
+# | | |
+# | m |
+# | | |
+# | n |
+# | | |
+# | o |
+# | | |
+# | p |
+# | | |
+# | q |
+# | | |
+# | r |
+# | | |
+# | s |
+# | | |
+# |/|/
+# t u
+complex_shortcut2 = {'a':[NULL_REVISION], 'b':['a'], 'c':['b'], 'd':['c'],
+ 'e':['d'], 'f':['e'], 'g':['f'], 'h':['d'], 'i':['g'],
+ 'j':['h'], 'k':['h', 'i'], 'l':['k'], 'm':['l'], 'n':['m'],
+ 'o':['n'], 'p':['o'], 'q':['p'], 'r':['q'], 's':['r'],
+ 't':['i', 's'], 'u':['s', 'j'],
+ }
+
+# Graph where different walkers will race to find the common and uncommon
+# nodes.
+#
+# NULL_REVISION
+# |
+# a
+# |
+# b
+# |
+# c
+# |
+# d
+# |\
+# e k
+# | |
+# f-+-p
+# | | |
+# | l |
+# | | |
+# | m |
+# | |\|
+# g n q
+# |\| |
+# h o |
+# |/| |
+# i r |
+# | | |
+# | s |
+# | | |
+# | t |
+# | | |
+# | u |
+# | | |
+# | v |
+# | | |
+# | w |
+# | | |
+# | x |
+# | |\|
+# | y z
+# |/
+# j
+#
+# x is found to be common right away, but is the start of a long series of
+# common commits.
+# o is actually common, but the i-j shortcut makes it look like it is actually
+# unique to j at first, you have to traverse all of x->o to find it.
+# q,m gives the walker from j a common point to stop searching, as does p,f.
+# k-n exists so that the second pass still has nodes that are worth searching,
+# rather than instantly cancelling the extra walker.
+
+racing_shortcuts = {'a':[NULL_REVISION], 'b':['a'], 'c':['b'], 'd':['c'],
+ 'e':['d'], 'f':['e'], 'g':['f'], 'h':['g'], 'i':['h', 'o'], 'j':['i', 'y'],
+ 'k':['d'], 'l':['k'], 'm':['l'], 'n':['m'], 'o':['n', 'g'], 'p':['f'],
+ 'q':['p', 'm'], 'r':['o'], 's':['r'], 't':['s'], 'u':['t'], 'v':['u'],
+ 'w':['v'], 'x':['w'], 'y':['x'], 'z':['x', 'q']}
+
+
+# A graph with multiple nodes unique to one side.
+#
+# NULL_REVISION
+# |
+# a
+# |
+# b
+# |
+# c
+# |
+# d
+# |\
+# e f
+# |\ \
+# g h i
+# |\ \ \
+# j k l m
+# | |/ x|
+# | n o p
+# | |/ |
+# | q |
+# | | |
+# | r |
+# | | |
+# | s |
+# | | |
+# | t |
+# | | |
+# | u |
+# | | |
+# | v |
+# | | |
+# | w |
+# | | |
+# | x |
+# |/ \ /
+# y z
+#
+
+multiple_interesting_unique = {'a':[NULL_REVISION], 'b':['a'], 'c':['b'],
+ 'd':['c'], 'e':['d'], 'f':['d'], 'g':['e'], 'h':['e'], 'i':['f'],
+ 'j':['g'], 'k':['g'], 'l':['h'], 'm':['i'], 'n':['k', 'l'],
+ 'o':['m'], 'p':['m', 'l'], 'q':['n', 'o'], 'r':['q'], 's':['r'],
+ 't':['s'], 'u':['t'], 'v':['u'], 'w':['v'], 'x':['w'],
+ 'y':['j', 'x'], 'z':['x', 'p']}
+
+
+# Shortcut with extra root
+# We have a long history shortcut, and an extra root, which is why we can't
+# stop searchers based on seeing NULL_REVISION
+# NULL_REVISION
+# | |
+# a |
+# |\ |
+# b | |
+# | | |
+# c | |
+# | | |
+# d | g
+# |\|/
+# e f
+shortcut_extra_root = {'a': [NULL_REVISION],
+ 'b': ['a'],
+ 'c': ['b'],
+ 'd': ['c'],
+ 'e': ['d'],
+ 'f': ['a', 'd', 'g'],
+ 'g': [NULL_REVISION],
+ }
+
+# NULL_REVISION
+# |
+# f
+# |
+# e
+# / \
+# b d
+# | \ |
+# a c
+
+boundary = {'a': ['b'], 'c': ['b', 'd'], 'b':['e'], 'd':['e'], 'e': ['f'],
+ 'f':[NULL_REVISION]}
+
+
+# A graph that contains a ghost
+# NULL_REVISION
+# |
+# f
+# |
+# e g
+# / \ /
+# b d
+# | \ |
+# a c
+
+with_ghost = {'a': ['b'], 'c': ['b', 'd'], 'b':['e'], 'd':['e', 'g'],
+ 'e': ['f'], 'f':[NULL_REVISION], NULL_REVISION:()}
+
+# A graph that shows we can shortcut finding revnos when reaching them from the
+# side.
+# NULL_REVISION
+# |
+# a
+# |
+# b
+# |
+# c
+# |
+# d
+# |
+# e
+# / \
+# f g
+# |
+# h
+# |
+# i
+
+with_tail = {'a':[NULL_REVISION], 'b':['a'], 'c':['b'], 'd':['c'], 'e':['d'],
+ 'f':['e'], 'g':['e'], 'h':['f'], 'i':['h']}
+
+
+class InstrumentedParentsProvider(object):
+
+ def __init__(self, parents_provider):
+ self.calls = []
+ self._real_parents_provider = parents_provider
+ get_cached = getattr(parents_provider, 'get_cached_parent_map', None)
+ if get_cached is not None:
+ # Only expose the underlying 'get_cached_parent_map' function if
+ # the wrapped provider has it.
+ self.get_cached_parent_map = self._get_cached_parent_map
+
+ def get_parent_map(self, nodes):
+ self.calls.extend(nodes)
+ return self._real_parents_provider.get_parent_map(nodes)
+
+ def _get_cached_parent_map(self, nodes):
+ self.calls.append(('cached', sorted(nodes)))
+ return self._real_parents_provider.get_cached_parent_map(nodes)
+
+
+class SharedInstrumentedParentsProvider(object):
+
+ def __init__(self, parents_provider, calls, info):
+ self.calls = calls
+ self.info = info
+ self._real_parents_provider = parents_provider
+ get_cached = getattr(parents_provider, 'get_cached_parent_map', None)
+ if get_cached is not None:
+ # Only expose the underlying 'get_cached_parent_map' function if
+ # the wrapped provider has it.
+ self.get_cached_parent_map = self._get_cached_parent_map
+
+ def get_parent_map(self, nodes):
+ self.calls.append((self.info, sorted(nodes)))
+ return self._real_parents_provider.get_parent_map(nodes)
+
+ def _get_cached_parent_map(self, nodes):
+ self.calls.append((self.info, 'cached', sorted(nodes)))
+ return self._real_parents_provider.get_cached_parent_map(nodes)
+
+
+class TestGraphBase(tests.TestCase):
+
+ def make_graph(self, ancestors):
+ return _mod_graph.Graph(_mod_graph.DictParentsProvider(ancestors))
+
+ def make_breaking_graph(self, ancestors, break_on):
+ """Make a Graph that raises an exception if we hit a node."""
+ g = self.make_graph(ancestors)
+ orig_parent_map = g.get_parent_map
+ def get_parent_map(keys):
+ bad_keys = set(keys).intersection(break_on)
+ if bad_keys:
+ self.fail('key(s) %s was accessed' % (sorted(bad_keys),))
+ return orig_parent_map(keys)
+ g.get_parent_map = get_parent_map
+ return g
+
+
+class TestGraph(TestCaseWithMemoryTransport):
+
+ def make_graph(self, ancestors):
+ return _mod_graph.Graph(_mod_graph.DictParentsProvider(ancestors))
+
+ def prepare_memory_tree(self, location):
+ tree = self.make_branch_and_memory_tree(location)
+ tree.lock_write()
+ tree.add('.')
+ return tree
+
+ def build_ancestry(self, tree, ancestors):
+ """Create an ancestry as specified by a graph dict
+
+ :param tree: A tree to use
+ :param ancestors: a dict of {node: [node_parent, ...]}
+ """
+ pending = [NULL_REVISION]
+ descendants = {}
+ for descendant, parents in ancestors.iteritems():
+ for parent in parents:
+ descendants.setdefault(parent, []).append(descendant)
+ while len(pending) > 0:
+ cur_node = pending.pop()
+ for descendant in descendants.get(cur_node, []):
+ if tree.branch.repository.has_revision(descendant):
+ continue
+ parents = [p for p in ancestors[descendant] if p is not
+ NULL_REVISION]
+ if len([p for p in parents if not
+ tree.branch.repository.has_revision(p)]) > 0:
+ continue
+ tree.set_parent_ids(parents)
+ if len(parents) > 0:
+ left_parent = parents[0]
+ else:
+ left_parent = NULL_REVISION
+ tree.branch.set_last_revision_info(
+ len(tree.branch._lefthand_history(left_parent)),
+ left_parent)
+ tree.commit(descendant, rev_id=descendant)
+ pending.append(descendant)
+
+ def test_lca(self):
+ """Test finding least common ancestor.
+
+ ancestry_1 should always have a single common ancestor
+ """
+ graph = self.make_graph(ancestry_1)
+ self.assertRaises(errors.InvalidRevisionId, graph.find_lca, None)
+ self.assertEqual(set([NULL_REVISION]),
+ graph.find_lca(NULL_REVISION, NULL_REVISION))
+ self.assertEqual(set([NULL_REVISION]),
+ graph.find_lca(NULL_REVISION, 'rev1'))
+ self.assertEqual(set(['rev1']), graph.find_lca('rev1', 'rev1'))
+ self.assertEqual(set(['rev1']), graph.find_lca('rev2a', 'rev2b'))
+
+ def test_no_unique_lca(self):
+ """Test error when one revision is not in the graph"""
+ graph = self.make_graph(ancestry_1)
+ self.assertRaises(errors.NoCommonAncestor, graph.find_unique_lca,
+ 'rev1', '1rev')
+
+ def test_lca_criss_cross(self):
+ """Test least-common-ancestor after a criss-cross merge."""
+ graph = self.make_graph(criss_cross)
+ self.assertEqual(set(['rev2a', 'rev2b']),
+ graph.find_lca('rev3a', 'rev3b'))
+ self.assertEqual(set(['rev2b']),
+ graph.find_lca('rev3a', 'rev3b', 'rev2b'))
+
+ def test_lca_shortcut(self):
+ """Test least-common ancestor on this history shortcut"""
+ graph = self.make_graph(history_shortcut)
+ self.assertEqual(set(['rev2b']), graph.find_lca('rev3a', 'rev3b'))
+
+ def test_lefthand_distance_smoke(self):
+ """A simple does it work test for graph.lefthand_distance(keys)."""
+ graph = self.make_graph(history_shortcut)
+ distance_graph = graph.find_lefthand_distances(['rev3b', 'rev2a'])
+ self.assertEqual({'rev2a': 2, 'rev3b': 3}, distance_graph)
+
+ def test_lefthand_distance_ghosts(self):
+ """A simple does it work test for graph.lefthand_distance(keys)."""
+ nodes = {'nonghost':[NULL_REVISION], 'toghost':['ghost']}
+ graph = self.make_graph(nodes)
+ distance_graph = graph.find_lefthand_distances(['nonghost', 'toghost'])
+ self.assertEqual({'nonghost': 1, 'toghost': -1}, distance_graph)
+
+ def test_recursive_unique_lca(self):
+ """Test finding a unique least common ancestor.
+
+ ancestry_1 should always have a single common ancestor
+ """
+ graph = self.make_graph(ancestry_1)
+ self.assertEqual(NULL_REVISION,
+ graph.find_unique_lca(NULL_REVISION, NULL_REVISION))
+ self.assertEqual(NULL_REVISION,
+ graph.find_unique_lca(NULL_REVISION, 'rev1'))
+ self.assertEqual('rev1', graph.find_unique_lca('rev1', 'rev1'))
+ self.assertEqual('rev1', graph.find_unique_lca('rev2a', 'rev2b'))
+ self.assertEqual(('rev1', 1,),
+ graph.find_unique_lca('rev2a', 'rev2b',
+ count_steps=True))
+
+ def assertRemoveDescendants(self, expected, graph, revisions):
+ parents = graph.get_parent_map(revisions)
+ self.assertEqual(expected,
+ graph._remove_simple_descendants(revisions, parents))
+
+ def test__remove_simple_descendants(self):
+ graph = self.make_graph(ancestry_1)
+ self.assertRemoveDescendants(set(['rev1']), graph,
+ set(['rev1', 'rev2a', 'rev2b', 'rev3', 'rev4']))
+
+ def test__remove_simple_descendants_disjoint(self):
+ graph = self.make_graph(ancestry_1)
+ self.assertRemoveDescendants(set(['rev1', 'rev3']), graph,
+ set(['rev1', 'rev3']))
+
+ def test__remove_simple_descendants_chain(self):
+ graph = self.make_graph(ancestry_1)
+ self.assertRemoveDescendants(set(['rev1']), graph,
+ set(['rev1', 'rev2a', 'rev3']))
+
+ def test__remove_simple_descendants_siblings(self):
+ graph = self.make_graph(ancestry_1)
+ self.assertRemoveDescendants(set(['rev2a', 'rev2b']), graph,
+ set(['rev2a', 'rev2b', 'rev3']))
+
+ def test_unique_lca_criss_cross(self):
+ """Ensure we don't pick non-unique lcas in a criss-cross"""
+ graph = self.make_graph(criss_cross)
+ self.assertEqual('rev1', graph.find_unique_lca('rev3a', 'rev3b'))
+ lca, steps = graph.find_unique_lca('rev3a', 'rev3b', count_steps=True)
+ self.assertEqual('rev1', lca)
+ self.assertEqual(2, steps)
+
+ def test_unique_lca_null_revision(self):
+ """Ensure we pick NULL_REVISION when necessary"""
+ graph = self.make_graph(criss_cross2)
+ self.assertEqual('rev1b', graph.find_unique_lca('rev2a', 'rev1b'))
+ self.assertEqual(NULL_REVISION,
+ graph.find_unique_lca('rev2a', 'rev2b'))
+
+ def test_unique_lca_null_revision2(self):
+ """Ensure we pick NULL_REVISION when necessary"""
+ graph = self.make_graph(ancestry_2)
+ self.assertEqual(NULL_REVISION,
+ graph.find_unique_lca('rev4a', 'rev1b'))
+
+ def test_lca_double_shortcut(self):
+ graph = self.make_graph(double_shortcut)
+ self.assertEqual('c', graph.find_unique_lca('f', 'g'))
+
+ def test_common_ancestor_two_repos(self):
+ """Ensure we do unique_lca using data from two repos"""
+ mainline_tree = self.prepare_memory_tree('mainline')
+ self.build_ancestry(mainline_tree, mainline)
+ self.addCleanup(mainline_tree.unlock)
+
+ # This is cheating, because the revisions in the graph are actually
+ # different revisions, despite having the same revision-id.
+ feature_tree = self.prepare_memory_tree('feature')
+ self.build_ancestry(feature_tree, feature_branch)
+ self.addCleanup(feature_tree.unlock)
+
+ graph = mainline_tree.branch.repository.get_graph(
+ feature_tree.branch.repository)
+ self.assertEqual('rev2b', graph.find_unique_lca('rev2a', 'rev3b'))
+
+ def test_graph_difference(self):
+ graph = self.make_graph(ancestry_1)
+ self.assertEqual((set(), set()), graph.find_difference('rev1', 'rev1'))
+ self.assertEqual((set(), set(['rev1'])),
+ graph.find_difference(NULL_REVISION, 'rev1'))
+ self.assertEqual((set(['rev1']), set()),
+ graph.find_difference('rev1', NULL_REVISION))
+ self.assertEqual((set(['rev2a', 'rev3']), set(['rev2b'])),
+ graph.find_difference('rev3', 'rev2b'))
+ self.assertEqual((set(['rev4', 'rev3', 'rev2a']), set()),
+ graph.find_difference('rev4', 'rev2b'))
+
+ def test_graph_difference_separate_ancestry(self):
+ graph = self.make_graph(ancestry_2)
+ self.assertEqual((set(['rev1a']), set(['rev1b'])),
+ graph.find_difference('rev1a', 'rev1b'))
+ self.assertEqual((set(['rev1a', 'rev2a', 'rev3a', 'rev4a']),
+ set(['rev1b'])),
+ graph.find_difference('rev4a', 'rev1b'))
+
+ def test_graph_difference_criss_cross(self):
+ graph = self.make_graph(criss_cross)
+ self.assertEqual((set(['rev3a']), set(['rev3b'])),
+ graph.find_difference('rev3a', 'rev3b'))
+ self.assertEqual((set([]), set(['rev3b', 'rev2b'])),
+ graph.find_difference('rev2a', 'rev3b'))
+
+ def test_graph_difference_extended_history(self):
+ graph = self.make_graph(extended_history_shortcut)
+ self.assertEqual((set(['e']), set(['f'])),
+ graph.find_difference('e', 'f'))
+ self.assertEqual((set(['f']), set(['e'])),
+ graph.find_difference('f', 'e'))
+
+ def test_graph_difference_double_shortcut(self):
+ graph = self.make_graph(double_shortcut)
+ self.assertEqual((set(['d', 'f']), set(['e', 'g'])),
+ graph.find_difference('f', 'g'))
+
+ def test_graph_difference_complex_shortcut(self):
+ graph = self.make_graph(complex_shortcut)
+ self.assertEqual((set(['m', 'i', 'e']), set(['n', 'h'])),
+ graph.find_difference('m', 'n'))
+
+ def test_graph_difference_complex_shortcut2(self):
+ graph = self.make_graph(complex_shortcut2)
+ self.assertEqual((set(['t']), set(['j', 'u'])),
+ graph.find_difference('t', 'u'))
+
+ def test_graph_difference_shortcut_extra_root(self):
+ graph = self.make_graph(shortcut_extra_root)
+ self.assertEqual((set(['e']), set(['f', 'g'])),
+ graph.find_difference('e', 'f'))
+
+ def test_iter_topo_order(self):
+ graph = self.make_graph(ancestry_1)
+ args = ['rev2a', 'rev3', 'rev1']
+ topo_args = list(graph.iter_topo_order(args))
+ self.assertEqual(set(args), set(topo_args))
+ self.assertTrue(topo_args.index('rev2a') > topo_args.index('rev1'))
+ self.assertTrue(topo_args.index('rev2a') < topo_args.index('rev3'))
+
+ def test_is_ancestor(self):
+ graph = self.make_graph(ancestry_1)
+ self.assertEqual(True, graph.is_ancestor('null:', 'null:'))
+ self.assertEqual(True, graph.is_ancestor('null:', 'rev1'))
+ self.assertEqual(False, graph.is_ancestor('rev1', 'null:'))
+ self.assertEqual(True, graph.is_ancestor('null:', 'rev4'))
+ self.assertEqual(False, graph.is_ancestor('rev4', 'null:'))
+ self.assertEqual(False, graph.is_ancestor('rev4', 'rev2b'))
+ self.assertEqual(True, graph.is_ancestor('rev2b', 'rev4'))
+ self.assertEqual(False, graph.is_ancestor('rev2b', 'rev3'))
+ self.assertEqual(False, graph.is_ancestor('rev3', 'rev2b'))
+ instrumented_provider = InstrumentedParentsProvider(graph)
+ instrumented_graph = _mod_graph.Graph(instrumented_provider)
+ instrumented_graph.is_ancestor('rev2a', 'rev2b')
+ self.assertTrue('null:' not in instrumented_provider.calls)
+
+ def test_is_between(self):
+ graph = self.make_graph(ancestry_1)
+ self.assertEqual(True, graph.is_between('null:', 'null:', 'null:'))
+ self.assertEqual(True, graph.is_between('rev1', 'null:', 'rev1'))
+ self.assertEqual(True, graph.is_between('rev1', 'rev1', 'rev4'))
+ self.assertEqual(True, graph.is_between('rev4', 'rev1', 'rev4'))
+ self.assertEqual(True, graph.is_between('rev3', 'rev1', 'rev4'))
+ self.assertEqual(False, graph.is_between('rev4', 'rev1', 'rev3'))
+ self.assertEqual(False, graph.is_between('rev1', 'rev2a', 'rev4'))
+ self.assertEqual(False, graph.is_between('null:', 'rev1', 'rev4'))
+
+ def test_is_ancestor_boundary(self):
+ """Ensure that we avoid searching the whole graph.
+
+ This requires searching through b as a common ancestor, so we
+ can identify that e is common.
+ """
+ graph = self.make_graph(boundary)
+ instrumented_provider = InstrumentedParentsProvider(graph)
+ graph = _mod_graph.Graph(instrumented_provider)
+ self.assertFalse(graph.is_ancestor('a', 'c'))
+ self.assertTrue('null:' not in instrumented_provider.calls)
+
+ def test_iter_ancestry(self):
+ nodes = boundary.copy()
+ nodes[NULL_REVISION] = ()
+ graph = self.make_graph(nodes)
+ expected = nodes.copy()
+ expected.pop('a') # 'a' is not in the ancestry of 'c', all the
+ # other nodes are
+ self.assertEqual(expected, dict(graph.iter_ancestry(['c'])))
+ self.assertEqual(nodes, dict(graph.iter_ancestry(['a', 'c'])))
+
+ def test_iter_ancestry_with_ghost(self):
+ graph = self.make_graph(with_ghost)
+ expected = with_ghost.copy()
+ # 'a' is not in the ancestry of 'c', and 'g' is a ghost
+ expected['g'] = None
+ self.assertEqual(expected, dict(graph.iter_ancestry(['a', 'c'])))
+ expected.pop('a')
+ self.assertEqual(expected, dict(graph.iter_ancestry(['c'])))
+
+ def test_filter_candidate_lca(self):
+ """Test filter_candidate_lca for a corner case
+
+ This tests the case where we encounter the end of iteration for 'e'
+ in the same pass as we discover that 'd' is an ancestor of 'e', and
+ therefore 'e' can't be an lca.
+
+ To compensate for different dict orderings on other Python
+ implementations, we mirror 'd' and 'e' with 'b' and 'a'.
+ """
+ # This test is sensitive to the iteration order of dicts. It will
+ # pass incorrectly if 'e' and 'a' sort before 'c'
+ #
+ # NULL_REVISION
+ # / \
+ # a e
+ # | |
+ # b d
+ # \ /
+ # c
+ graph = self.make_graph({'c': ['b', 'd'], 'd': ['e'], 'b': ['a'],
+ 'a': [NULL_REVISION], 'e': [NULL_REVISION]})
+ self.assertEqual(set(['c']), graph.heads(['a', 'c', 'e']))
+
+ def test_heads_null(self):
+ graph = self.make_graph(ancestry_1)
+ self.assertEqual(set(['null:']), graph.heads(['null:']))
+ self.assertEqual(set(['rev1']), graph.heads(['null:', 'rev1']))
+ self.assertEqual(set(['rev1']), graph.heads(['rev1', 'null:']))
+ self.assertEqual(set(['rev1']), graph.heads(set(['rev1', 'null:'])))
+ self.assertEqual(set(['rev1']), graph.heads(('rev1', 'null:')))
+
+ def test_heads_one(self):
+ # A single node will always be a head
+ graph = self.make_graph(ancestry_1)
+ self.assertEqual(set(['null:']), graph.heads(['null:']))
+ self.assertEqual(set(['rev1']), graph.heads(['rev1']))
+ self.assertEqual(set(['rev2a']), graph.heads(['rev2a']))
+ self.assertEqual(set(['rev2b']), graph.heads(['rev2b']))
+ self.assertEqual(set(['rev3']), graph.heads(['rev3']))
+ self.assertEqual(set(['rev4']), graph.heads(['rev4']))
+
+ def test_heads_single(self):
+ graph = self.make_graph(ancestry_1)
+ self.assertEqual(set(['rev4']), graph.heads(['null:', 'rev4']))
+ self.assertEqual(set(['rev2a']), graph.heads(['rev1', 'rev2a']))
+ self.assertEqual(set(['rev2b']), graph.heads(['rev1', 'rev2b']))
+ self.assertEqual(set(['rev3']), graph.heads(['rev1', 'rev3']))
+ self.assertEqual(set(['rev4']), graph.heads(['rev1', 'rev4']))
+ self.assertEqual(set(['rev4']), graph.heads(['rev2a', 'rev4']))
+ self.assertEqual(set(['rev4']), graph.heads(['rev2b', 'rev4']))
+ self.assertEqual(set(['rev4']), graph.heads(['rev3', 'rev4']))
+
+ def test_heads_two_heads(self):
+ graph = self.make_graph(ancestry_1)
+ self.assertEqual(set(['rev2a', 'rev2b']),
+ graph.heads(['rev2a', 'rev2b']))
+ self.assertEqual(set(['rev3', 'rev2b']),
+ graph.heads(['rev3', 'rev2b']))
+
+ def test_heads_criss_cross(self):
+ graph = self.make_graph(criss_cross)
+ self.assertEqual(set(['rev2a']),
+ graph.heads(['rev2a', 'rev1']))
+ self.assertEqual(set(['rev2b']),
+ graph.heads(['rev2b', 'rev1']))
+ self.assertEqual(set(['rev3a']),
+ graph.heads(['rev3a', 'rev1']))
+ self.assertEqual(set(['rev3b']),
+ graph.heads(['rev3b', 'rev1']))
+ self.assertEqual(set(['rev2a', 'rev2b']),
+ graph.heads(['rev2a', 'rev2b']))
+ self.assertEqual(set(['rev3a']),
+ graph.heads(['rev3a', 'rev2a']))
+ self.assertEqual(set(['rev3a']),
+ graph.heads(['rev3a', 'rev2b']))
+ self.assertEqual(set(['rev3a']),
+ graph.heads(['rev3a', 'rev2a', 'rev2b']))
+ self.assertEqual(set(['rev3b']),
+ graph.heads(['rev3b', 'rev2a']))
+ self.assertEqual(set(['rev3b']),
+ graph.heads(['rev3b', 'rev2b']))
+ self.assertEqual(set(['rev3b']),
+ graph.heads(['rev3b', 'rev2a', 'rev2b']))
+ self.assertEqual(set(['rev3a', 'rev3b']),
+ graph.heads(['rev3a', 'rev3b']))
+ self.assertEqual(set(['rev3a', 'rev3b']),
+ graph.heads(['rev3a', 'rev3b', 'rev2a', 'rev2b']))
+
+ def test_heads_shortcut(self):
+ graph = self.make_graph(history_shortcut)
+
+ self.assertEqual(set(['rev2a', 'rev2b', 'rev2c']),
+ graph.heads(['rev2a', 'rev2b', 'rev2c']))
+ self.assertEqual(set(['rev3a', 'rev3b']),
+ graph.heads(['rev3a', 'rev3b']))
+ self.assertEqual(set(['rev3a', 'rev3b']),
+ graph.heads(['rev2a', 'rev3a', 'rev3b']))
+ self.assertEqual(set(['rev2a', 'rev3b']),
+ graph.heads(['rev2a', 'rev3b']))
+ self.assertEqual(set(['rev2c', 'rev3a']),
+ graph.heads(['rev2c', 'rev3a']))
+
+ def _run_heads_break_deeper(self, graph_dict, search):
+ """Run heads on a graph-as-a-dict.
+
+ If the search asks for the parents of 'deeper' the test will fail.
+ """
+ class stub(object):
+ pass
+ def get_parent_map(keys):
+ result = {}
+ for key in keys:
+ if key == 'deeper':
+ self.fail('key deeper was accessed')
+ result[key] = graph_dict[key]
+ return result
+ an_obj = stub()
+ an_obj.get_parent_map = get_parent_map
+ graph = _mod_graph.Graph(an_obj)
+ return graph.heads(search)
+
+ def test_heads_limits_search(self):
+ # test that a heads query does not search all of history
+ graph_dict = {
+ 'left':['common'],
+ 'right':['common'],
+ 'common':['deeper'],
+ }
+ self.assertEqual(set(['left', 'right']),
+ self._run_heads_break_deeper(graph_dict, ['left', 'right']))
+
+ def test_heads_limits_search_assymetric(self):
+ # test that a heads query does not search all of history
+ graph_dict = {
+ 'left':['midleft'],
+ 'midleft':['common'],
+ 'right':['common'],
+ 'common':['aftercommon'],
+ 'aftercommon':['deeper'],
+ }
+ self.assertEqual(set(['left', 'right']),
+ self._run_heads_break_deeper(graph_dict, ['left', 'right']))
+
+ def test_heads_limits_search_common_search_must_continue(self):
+ # test that common nodes are still queried, preventing
+ # all-the-way-to-origin behaviour in the following graph:
+ graph_dict = {
+ 'h1':['shortcut', 'common1'],
+ 'h2':['common1'],
+ 'shortcut':['common2'],
+ 'common1':['common2'],
+ 'common2':['deeper'],
+ }
+ self.assertEqual(set(['h1', 'h2']),
+ self._run_heads_break_deeper(graph_dict, ['h1', 'h2']))
+
+ def test_breadth_first_search_start_ghosts(self):
+ graph = self.make_graph({})
+ # with_ghosts reports the ghosts
+ search = graph._make_breadth_first_searcher(['a-ghost'])
+ self.assertEqual((set(), set(['a-ghost'])), search.next_with_ghosts())
+ self.assertRaises(StopIteration, search.next_with_ghosts)
+ # next includes them
+ search = graph._make_breadth_first_searcher(['a-ghost'])
+ self.assertEqual(set(['a-ghost']), search.next())
+ self.assertRaises(StopIteration, search.next)
+
+ def test_breadth_first_search_deep_ghosts(self):
+ graph = self.make_graph({
+ 'head':['present'],
+ 'present':['child', 'ghost'],
+ 'child':[],
+ })
+ # with_ghosts reports the ghosts
+ search = graph._make_breadth_first_searcher(['head'])
+ self.assertEqual((set(['head']), set()), search.next_with_ghosts())
+ self.assertEqual((set(['present']), set()), search.next_with_ghosts())
+ self.assertEqual((set(['child']), set(['ghost'])),
+ search.next_with_ghosts())
+ self.assertRaises(StopIteration, search.next_with_ghosts)
+ # next includes them
+ search = graph._make_breadth_first_searcher(['head'])
+ self.assertEqual(set(['head']), search.next())
+ self.assertEqual(set(['present']), search.next())
+ self.assertEqual(set(['child', 'ghost']),
+ search.next())
+ self.assertRaises(StopIteration, search.next)
+
+ def test_breadth_first_search_change_next_to_next_with_ghosts(self):
+ # To make the API robust, we allow calling both next() and
+ # next_with_ghosts() on the same searcher.
+ graph = self.make_graph({
+ 'head':['present'],
+ 'present':['child', 'ghost'],
+ 'child':[],
+ })
+ # start with next_with_ghosts
+ search = graph._make_breadth_first_searcher(['head'])
+ self.assertEqual((set(['head']), set()), search.next_with_ghosts())
+ self.assertEqual(set(['present']), search.next())
+ self.assertEqual((set(['child']), set(['ghost'])),
+ search.next_with_ghosts())
+ self.assertRaises(StopIteration, search.next)
+ # start with next
+ search = graph._make_breadth_first_searcher(['head'])
+ self.assertEqual(set(['head']), search.next())
+ self.assertEqual((set(['present']), set()), search.next_with_ghosts())
+ self.assertEqual(set(['child', 'ghost']),
+ search.next())
+ self.assertRaises(StopIteration, search.next_with_ghosts)
+
+ def test_breadth_first_change_search(self):
+ # Changing the search should work with both next and next_with_ghosts.
+ graph = self.make_graph({
+ 'head':['present'],
+ 'present':['stopped'],
+ 'other':['other_2'],
+ 'other_2':[],
+ })
+ search = graph._make_breadth_first_searcher(['head'])
+ self.assertEqual((set(['head']), set()), search.next_with_ghosts())
+ self.assertEqual((set(['present']), set()), search.next_with_ghosts())
+ self.assertEqual(set(['present']),
+ search.stop_searching_any(['present']))
+ self.assertEqual((set(['other']), set(['other_ghost'])),
+ search.start_searching(['other', 'other_ghost']))
+ self.assertEqual((set(['other_2']), set()), search.next_with_ghosts())
+ self.assertRaises(StopIteration, search.next_with_ghosts)
+ # next includes them
+ search = graph._make_breadth_first_searcher(['head'])
+ self.assertEqual(set(['head']), search.next())
+ self.assertEqual(set(['present']), search.next())
+ self.assertEqual(set(['present']),
+ search.stop_searching_any(['present']))
+ search.start_searching(['other', 'other_ghost'])
+ self.assertEqual(set(['other_2']), search.next())
+ self.assertRaises(StopIteration, search.next)
+
+ def assertSeenAndResult(self, instructions, search, next):
+ """Check the results of .seen and get_result() for a seach.
+
+ :param instructions: A list of tuples:
+ (seen, recipe, included_keys, starts, stops).
+ seen, recipe and included_keys are results to check on the search
+ and the searches get_result(). starts and stops are parameters to
+ pass to start_searching and stop_searching_any during each
+ iteration, if they are not None.
+ :param search: The search to use.
+ :param next: A callable to advance the search.
+ """
+ for seen, recipe, included_keys, starts, stops in instructions:
+ # Adjust for recipe contract changes that don't vary for all the
+ # current tests.
+ recipe = ('search',) + recipe
+ next()
+ if starts is not None:
+ search.start_searching(starts)
+ if stops is not None:
+ search.stop_searching_any(stops)
+ state = search.get_state()
+ self.assertEqual(set(included_keys), state[2])
+ self.assertEqual(seen, search.seen)
+
+ def test_breadth_first_get_result_excludes_current_pending(self):
+ graph = self.make_graph({
+ 'head':['child'],
+ 'child':[NULL_REVISION],
+ NULL_REVISION:[],
+ })
+ search = graph._make_breadth_first_searcher(['head'])
+ # At the start, nothing has been seen, to its all excluded:
+ state = search.get_state()
+ self.assertEqual((set(['head']), set(['head']), set()),
+ state)
+ self.assertEqual(set(), search.seen)
+ # using next:
+ expected = [
+ (set(['head']), (set(['head']), set(['child']), 1),
+ ['head'], None, None),
+ (set(['head', 'child']), (set(['head']), set([NULL_REVISION]), 2),
+ ['head', 'child'], None, None),
+ (set(['head', 'child', NULL_REVISION]), (set(['head']), set(), 3),
+ ['head', 'child', NULL_REVISION], None, None),
+ ]
+ self.assertSeenAndResult(expected, search, search.next)
+ # using next_with_ghosts:
+ search = graph._make_breadth_first_searcher(['head'])
+ self.assertSeenAndResult(expected, search, search.next_with_ghosts)
+
+ def test_breadth_first_get_result_starts_stops(self):
+ graph = self.make_graph({
+ 'head':['child'],
+ 'child':[NULL_REVISION],
+ 'otherhead':['otherchild'],
+ 'otherchild':['excluded'],
+ 'excluded':[NULL_REVISION],
+ NULL_REVISION:[]
+ })
+ search = graph._make_breadth_first_searcher([])
+ # Starting with nothing and adding a search works:
+ search.start_searching(['head'])
+ # head has been seen:
+ state = search.get_state()
+ self.assertEqual((set(['head']), set(['child']), set(['head'])),
+ state)
+ self.assertEqual(set(['head']), search.seen)
+ # using next:
+ expected = [
+ # stop at child, and start a new search at otherhead:
+ # - otherhead counts as seen immediately when start_searching is
+ # called.
+ (set(['head', 'child', 'otherhead']),
+ (set(['head', 'otherhead']), set(['child', 'otherchild']), 2),
+ ['head', 'otherhead'], ['otherhead'], ['child']),
+ (set(['head', 'child', 'otherhead', 'otherchild']),
+ (set(['head', 'otherhead']), set(['child', 'excluded']), 3),
+ ['head', 'otherhead', 'otherchild'], None, None),
+ # stop searching excluded now
+ (set(['head', 'child', 'otherhead', 'otherchild', 'excluded']),
+ (set(['head', 'otherhead']), set(['child', 'excluded']), 3),
+ ['head', 'otherhead', 'otherchild'], None, ['excluded']),
+ ]
+ self.assertSeenAndResult(expected, search, search.next)
+ # using next_with_ghosts:
+ search = graph._make_breadth_first_searcher([])
+ search.start_searching(['head'])
+ self.assertSeenAndResult(expected, search, search.next_with_ghosts)
+
+ def test_breadth_first_stop_searching_not_queried(self):
+ # A client should be able to say 'stop node X' even if X has not been
+ # returned to the client.
+ graph = self.make_graph({
+ 'head':['child', 'ghost1'],
+ 'child':[NULL_REVISION],
+ NULL_REVISION:[],
+ })
+ search = graph._make_breadth_first_searcher(['head'])
+ expected = [
+ # NULL_REVISION and ghost1 have not been returned
+ (set(['head']),
+ (set(['head']), set(['child', NULL_REVISION, 'ghost1']), 1),
+ ['head'], None, [NULL_REVISION, 'ghost1']),
+ # ghost1 has been returned, NULL_REVISION is to be returned in the
+ # next iteration.
+ (set(['head', 'child', 'ghost1']),
+ (set(['head']), set(['ghost1', NULL_REVISION]), 2),
+ ['head', 'child'], None, [NULL_REVISION, 'ghost1']),
+ ]
+ self.assertSeenAndResult(expected, search, search.next)
+ # using next_with_ghosts:
+ search = graph._make_breadth_first_searcher(['head'])
+ self.assertSeenAndResult(expected, search, search.next_with_ghosts)
+
+ def test_breadth_first_stop_searching_late(self):
+ # A client should be able to say 'stop node X' and have it excluded
+ # from the result even if X was seen in an older iteration of the
+ # search.
+ graph = self.make_graph({
+ 'head':['middle'],
+ 'middle':['child'],
+ 'child':[NULL_REVISION],
+ NULL_REVISION:[],
+ })
+ search = graph._make_breadth_first_searcher(['head'])
+ expected = [
+ (set(['head']), (set(['head']), set(['middle']), 1),
+ ['head'], None, None),
+ (set(['head', 'middle']), (set(['head']), set(['child']), 2),
+ ['head', 'middle'], None, None),
+ # 'middle' came from the previous iteration, but we don't stop
+ # searching it until *after* advancing the searcher.
+ (set(['head', 'middle', 'child']),
+ (set(['head']), set(['middle', 'child']), 1),
+ ['head'], None, ['middle', 'child']),
+ ]
+ self.assertSeenAndResult(expected, search, search.next)
+ # using next_with_ghosts:
+ search = graph._make_breadth_first_searcher(['head'])
+ self.assertSeenAndResult(expected, search, search.next_with_ghosts)
+
+ def test_breadth_first_get_result_ghosts_are_excluded(self):
+ graph = self.make_graph({
+ 'head':['child', 'ghost'],
+ 'child':[NULL_REVISION],
+ NULL_REVISION:[],
+ })
+ search = graph._make_breadth_first_searcher(['head'])
+ # using next:
+ expected = [
+ (set(['head']),
+ (set(['head']), set(['ghost', 'child']), 1),
+ ['head'], None, None),
+ (set(['head', 'child', 'ghost']),
+ (set(['head']), set([NULL_REVISION, 'ghost']), 2),
+ ['head', 'child'], None, None),
+ ]
+ self.assertSeenAndResult(expected, search, search.next)
+ # using next_with_ghosts:
+ search = graph._make_breadth_first_searcher(['head'])
+ self.assertSeenAndResult(expected, search, search.next_with_ghosts)
+
+ def test_breadth_first_get_result_starting_a_ghost_ghost_is_excluded(self):
+ graph = self.make_graph({
+ 'head':['child'],
+ 'child':[NULL_REVISION],
+ NULL_REVISION:[],
+ })
+ search = graph._make_breadth_first_searcher(['head'])
+ # using next:
+ expected = [
+ (set(['head', 'ghost']),
+ (set(['head', 'ghost']), set(['child', 'ghost']), 1),
+ ['head'], ['ghost'], None),
+ (set(['head', 'child', 'ghost']),
+ (set(['head', 'ghost']), set([NULL_REVISION, 'ghost']), 2),
+ ['head', 'child'], None, None),
+ ]
+ self.assertSeenAndResult(expected, search, search.next)
+ # using next_with_ghosts:
+ search = graph._make_breadth_first_searcher(['head'])
+ self.assertSeenAndResult(expected, search, search.next_with_ghosts)
+
+ def test_breadth_first_revision_count_includes_NULL_REVISION(self):
+ graph = self.make_graph({
+ 'head':[NULL_REVISION],
+ NULL_REVISION:[],
+ })
+ search = graph._make_breadth_first_searcher(['head'])
+ # using next:
+ expected = [
+ (set(['head']),
+ (set(['head']), set([NULL_REVISION]), 1),
+ ['head'], None, None),
+ (set(['head', NULL_REVISION]),
+ (set(['head']), set([]), 2),
+ ['head', NULL_REVISION], None, None),
+ ]
+ self.assertSeenAndResult(expected, search, search.next)
+ # using next_with_ghosts:
+ search = graph._make_breadth_first_searcher(['head'])
+ self.assertSeenAndResult(expected, search, search.next_with_ghosts)
+
+ def test_breadth_first_search_get_result_after_StopIteration(self):
+ # StopIteration should not invalid anything..
+ graph = self.make_graph({
+ 'head':[NULL_REVISION],
+ NULL_REVISION:[],
+ })
+ search = graph._make_breadth_first_searcher(['head'])
+ # using next:
+ expected = [
+ (set(['head']),
+ (set(['head']), set([NULL_REVISION]), 1),
+ ['head'], None, None),
+ (set(['head', 'ghost', NULL_REVISION]),
+ (set(['head', 'ghost']), set(['ghost']), 2),
+ ['head', NULL_REVISION], ['ghost'], None),
+ ]
+ self.assertSeenAndResult(expected, search, search.next)
+ self.assertRaises(StopIteration, search.next)
+ self.assertEqual(set(['head', 'ghost', NULL_REVISION]), search.seen)
+ state = search.get_state()
+ self.assertEqual(
+ (set(['ghost', 'head']), set(['ghost']),
+ set(['head', NULL_REVISION])),
+ state)
+ # using next_with_ghosts:
+ search = graph._make_breadth_first_searcher(['head'])
+ self.assertSeenAndResult(expected, search, search.next_with_ghosts)
+ self.assertRaises(StopIteration, search.next)
+ self.assertEqual(set(['head', 'ghost', NULL_REVISION]), search.seen)
+ state = search.get_state()
+ self.assertEqual(
+ (set(['ghost', 'head']), set(['ghost']),
+ set(['head', NULL_REVISION])),
+ state)
+
+
+class TestFindUniqueAncestors(TestGraphBase):
+
+ def assertFindUniqueAncestors(self, graph, expected, node, common):
+ actual = graph.find_unique_ancestors(node, common)
+ self.assertEqual(expected, sorted(actual))
+
+ def test_empty_set(self):
+ graph = self.make_graph(ancestry_1)
+ self.assertFindUniqueAncestors(graph, [], 'rev1', ['rev1'])
+ self.assertFindUniqueAncestors(graph, [], 'rev2b', ['rev2b'])
+ self.assertFindUniqueAncestors(graph, [], 'rev3', ['rev1', 'rev3'])
+
+ def test_single_node(self):
+ graph = self.make_graph(ancestry_1)
+ self.assertFindUniqueAncestors(graph, ['rev2a'], 'rev2a', ['rev1'])
+ self.assertFindUniqueAncestors(graph, ['rev2b'], 'rev2b', ['rev1'])
+ self.assertFindUniqueAncestors(graph, ['rev3'], 'rev3', ['rev2a'])
+
+ def test_minimal_ancestry(self):
+ graph = self.make_breaking_graph(extended_history_shortcut,
+ [NULL_REVISION, 'a', 'b'])
+ self.assertFindUniqueAncestors(graph, ['e'], 'e', ['d'])
+
+ graph = self.make_breaking_graph(extended_history_shortcut,
+ ['b'])
+ self.assertFindUniqueAncestors(graph, ['f'], 'f', ['a', 'd'])
+
+ graph = self.make_breaking_graph(complex_shortcut,
+ ['a', 'b'])
+ self.assertFindUniqueAncestors(graph, ['h'], 'h', ['i'])
+ self.assertFindUniqueAncestors(graph, ['e', 'g', 'i'], 'i', ['h'])
+ self.assertFindUniqueAncestors(graph, ['h'], 'h', ['g'])
+ self.assertFindUniqueAncestors(graph, ['h'], 'h', ['j'])
+
+ def test_in_ancestry(self):
+ graph = self.make_graph(ancestry_1)
+ self.assertFindUniqueAncestors(graph, [], 'rev1', ['rev3'])
+ self.assertFindUniqueAncestors(graph, [], 'rev2b', ['rev4'])
+
+ def test_multiple_revisions(self):
+ graph = self.make_graph(ancestry_1)
+ self.assertFindUniqueAncestors(graph,
+ ['rev4'], 'rev4', ['rev3', 'rev2b'])
+ self.assertFindUniqueAncestors(graph,
+ ['rev2a', 'rev3', 'rev4'], 'rev4', ['rev2b'])
+
+ def test_complex_shortcut(self):
+ graph = self.make_graph(complex_shortcut)
+ self.assertFindUniqueAncestors(graph,
+ ['h', 'n'], 'n', ['m'])
+ self.assertFindUniqueAncestors(graph,
+ ['e', 'i', 'm'], 'm', ['n'])
+
+ def test_complex_shortcut2(self):
+ graph = self.make_graph(complex_shortcut2)
+ self.assertFindUniqueAncestors(graph,
+ ['j', 'u'], 'u', ['t'])
+ self.assertFindUniqueAncestors(graph,
+ ['t'], 't', ['u'])
+
+ def test_multiple_interesting_unique(self):
+ graph = self.make_graph(multiple_interesting_unique)
+ self.assertFindUniqueAncestors(graph,
+ ['j', 'y'], 'y', ['z'])
+ self.assertFindUniqueAncestors(graph,
+ ['p', 'z'], 'z', ['y'])
+
+ def test_racing_shortcuts(self):
+ graph = self.make_graph(racing_shortcuts)
+ self.assertFindUniqueAncestors(graph,
+ ['p', 'q', 'z'], 'z', ['y'])
+ self.assertFindUniqueAncestors(graph,
+ ['h', 'i', 'j', 'y'], 'j', ['z'])
+
+
+class TestGraphFindDistanceToNull(TestGraphBase):
+ """Test an api that should be able to compute a revno"""
+
+ def assertFindDistance(self, revno, graph, target_id, known_ids):
+ """Assert the output of Graph.find_distance_to_null()"""
+ actual = graph.find_distance_to_null(target_id, known_ids)
+ self.assertEqual(revno, actual)
+
+ def test_nothing_known(self):
+ graph = self.make_graph(ancestry_1)
+ self.assertFindDistance(0, graph, NULL_REVISION, [])
+ self.assertFindDistance(1, graph, 'rev1', [])
+ self.assertFindDistance(2, graph, 'rev2a', [])
+ self.assertFindDistance(2, graph, 'rev2b', [])
+ self.assertFindDistance(3, graph, 'rev3', [])
+ self.assertFindDistance(4, graph, 'rev4', [])
+
+ def test_rev_is_ghost(self):
+ graph = self.make_graph(ancestry_1)
+ e = self.assertRaises(errors.GhostRevisionsHaveNoRevno,
+ graph.find_distance_to_null, 'rev_missing', [])
+ self.assertEqual('rev_missing', e.revision_id)
+ self.assertEqual('rev_missing', e.ghost_revision_id)
+
+ def test_ancestor_is_ghost(self):
+ graph = self.make_graph({'rev':['parent']})
+ e = self.assertRaises(errors.GhostRevisionsHaveNoRevno,
+ graph.find_distance_to_null, 'rev', [])
+ self.assertEqual('rev', e.revision_id)
+ self.assertEqual('parent', e.ghost_revision_id)
+
+ def test_known_in_ancestry(self):
+ graph = self.make_graph(ancestry_1)
+ self.assertFindDistance(2, graph, 'rev2a', [('rev1', 1)])
+ self.assertFindDistance(3, graph, 'rev3', [('rev2a', 2)])
+
+ def test_known_in_ancestry_limits(self):
+ graph = self.make_breaking_graph(ancestry_1, ['rev1'])
+ self.assertFindDistance(4, graph, 'rev4', [('rev3', 3)])
+
+ def test_target_is_ancestor(self):
+ graph = self.make_graph(ancestry_1)
+ self.assertFindDistance(2, graph, 'rev2a', [('rev3', 3)])
+
+ def test_target_is_ancestor_limits(self):
+ """We shouldn't search all history if we run into ourselves"""
+ graph = self.make_breaking_graph(ancestry_1, ['rev1'])
+ self.assertFindDistance(3, graph, 'rev3', [('rev4', 4)])
+
+ def test_target_parallel_to_known_limits(self):
+ # Even though the known revision isn't part of the other ancestry, they
+ # eventually converge
+ graph = self.make_breaking_graph(with_tail, ['a'])
+ self.assertFindDistance(6, graph, 'f', [('g', 6)])
+ self.assertFindDistance(7, graph, 'h', [('g', 6)])
+ self.assertFindDistance(8, graph, 'i', [('g', 6)])
+ self.assertFindDistance(6, graph, 'g', [('i', 8)])
+
+
+class TestFindMergeOrder(TestGraphBase):
+
+ def assertMergeOrder(self, expected, graph, tip, base_revisions):
+ self.assertEqual(expected, graph.find_merge_order(tip, base_revisions))
+
+ def test_parents(self):
+ graph = self.make_graph(ancestry_1)
+ self.assertMergeOrder(['rev3', 'rev2b'], graph, 'rev4',
+ ['rev3', 'rev2b'])
+ self.assertMergeOrder(['rev3', 'rev2b'], graph, 'rev4',
+ ['rev2b', 'rev3'])
+
+ def test_ancestors(self):
+ graph = self.make_graph(ancestry_1)
+ self.assertMergeOrder(['rev1', 'rev2b'], graph, 'rev4',
+ ['rev1', 'rev2b'])
+ self.assertMergeOrder(['rev1', 'rev2b'], graph, 'rev4',
+ ['rev2b', 'rev1'])
+
+ def test_shortcut_one_ancestor(self):
+ # When we have enough info, we can stop searching
+ graph = self.make_breaking_graph(ancestry_1, ['rev3', 'rev2b', 'rev4'])
+ # Single ancestors shortcut right away
+ self.assertMergeOrder(['rev3'], graph, 'rev4', ['rev3'])
+
+ def test_shortcut_after_one_ancestor(self):
+ graph = self.make_breaking_graph(ancestry_1, ['rev2a', 'rev2b'])
+ self.assertMergeOrder(['rev3', 'rev1'], graph, 'rev4', ['rev1', 'rev3'])
+
+
+class TestFindDescendants(TestGraphBase):
+
+ def test_find_descendants_rev1_rev3(self):
+ graph = self.make_graph(ancestry_1)
+ descendants = graph.find_descendants('rev1', 'rev3')
+ self.assertEqual(set(['rev1', 'rev2a', 'rev3']), descendants)
+
+ def test_find_descendants_rev1_rev4(self):
+ graph = self.make_graph(ancestry_1)
+ descendants = graph.find_descendants('rev1', 'rev4')
+ self.assertEqual(set(['rev1', 'rev2a', 'rev2b', 'rev3', 'rev4']),
+ descendants)
+
+ def test_find_descendants_rev2a_rev4(self):
+ graph = self.make_graph(ancestry_1)
+ descendants = graph.find_descendants('rev2a', 'rev4')
+ self.assertEqual(set(['rev2a', 'rev3', 'rev4']), descendants)
+
+class TestFindLefthandMerger(TestGraphBase):
+
+ def check_merger(self, result, ancestry, merged, tip):
+ graph = self.make_graph(ancestry)
+ self.assertEqual(result, graph.find_lefthand_merger(merged, tip))
+
+ def test_find_lefthand_merger_rev2b(self):
+ self.check_merger('rev4', ancestry_1, 'rev2b', 'rev4')
+
+ def test_find_lefthand_merger_rev2a(self):
+ self.check_merger('rev2a', ancestry_1, 'rev2a', 'rev4')
+
+ def test_find_lefthand_merger_rev4(self):
+ self.check_merger(None, ancestry_1, 'rev4', 'rev2a')
+
+ def test_find_lefthand_merger_f(self):
+ self.check_merger('i', complex_shortcut, 'f', 'm')
+
+ def test_find_lefthand_merger_g(self):
+ self.check_merger('i', complex_shortcut, 'g', 'm')
+
+ def test_find_lefthand_merger_h(self):
+ self.check_merger('n', complex_shortcut, 'h', 'n')
+
+
+class TestGetChildMap(TestGraphBase):
+
+ def test_get_child_map(self):
+ graph = self.make_graph(ancestry_1)
+ child_map = graph.get_child_map(['rev4', 'rev3', 'rev2a', 'rev2b'])
+ self.assertEqual({'rev1': ['rev2a', 'rev2b'],
+ 'rev2a': ['rev3'],
+ 'rev2b': ['rev4'],
+ 'rev3': ['rev4']},
+ child_map)
+
+
+class TestCachingParentsProvider(tests.TestCase):
+ """These tests run with:
+
+ self.inst_pp, a recording parents provider with a graph of a->b, and b is a
+ ghost.
+ self.caching_pp, a CachingParentsProvider layered on inst_pp.
+ """
+
+ def setUp(self):
+ super(TestCachingParentsProvider, self).setUp()
+ dict_pp = _mod_graph.DictParentsProvider({'a': ('b',)})
+ self.inst_pp = InstrumentedParentsProvider(dict_pp)
+ self.caching_pp = _mod_graph.CachingParentsProvider(self.inst_pp)
+
+ def test_get_parent_map(self):
+ """Requesting the same revision should be returned from cache"""
+ self.assertEqual({}, self.caching_pp._cache)
+ self.assertEqual({'a':('b',)}, self.caching_pp.get_parent_map(['a']))
+ self.assertEqual(['a'], self.inst_pp.calls)
+ self.assertEqual({'a':('b',)}, self.caching_pp.get_parent_map(['a']))
+ # No new call, as it should have been returned from the cache
+ self.assertEqual(['a'], self.inst_pp.calls)
+ self.assertEqual({'a':('b',)}, self.caching_pp._cache)
+
+ def test_get_parent_map_not_present(self):
+ """The cache should also track when a revision doesn't exist"""
+ self.assertEqual({}, self.caching_pp.get_parent_map(['b']))
+ self.assertEqual(['b'], self.inst_pp.calls)
+ self.assertEqual({}, self.caching_pp.get_parent_map(['b']))
+ # No new calls
+ self.assertEqual(['b'], self.inst_pp.calls)
+
+ def test_get_parent_map_mixed(self):
+ """Anything that can be returned from cache, should be"""
+ self.assertEqual({}, self.caching_pp.get_parent_map(['b']))
+ self.assertEqual(['b'], self.inst_pp.calls)
+ self.assertEqual({'a':('b',)},
+ self.caching_pp.get_parent_map(['a', 'b']))
+ self.assertEqual(['b', 'a'], self.inst_pp.calls)
+
+ def test_get_parent_map_repeated(self):
+ """Asking for the same parent 2x will only forward 1 request."""
+ self.assertEqual({'a':('b',)},
+ self.caching_pp.get_parent_map(['b', 'a', 'b']))
+ # Use sorted because we don't care about the order, just that each is
+ # only present 1 time.
+ self.assertEqual(['a', 'b'], sorted(self.inst_pp.calls))
+
+ def test_note_missing_key(self):
+ """After noting that a key is missing it is cached."""
+ self.caching_pp.note_missing_key('b')
+ self.assertEqual({}, self.caching_pp.get_parent_map(['b']))
+ self.assertEqual([], self.inst_pp.calls)
+ self.assertEqual(set(['b']), self.caching_pp.missing_keys)
+
+ def test_get_cached_parent_map(self):
+ self.assertEqual({}, self.caching_pp.get_cached_parent_map(['a']))
+ self.assertEqual([], self.inst_pp.calls)
+ self.assertEqual({'a': ('b',)}, self.caching_pp.get_parent_map(['a']))
+ self.assertEqual(['a'], self.inst_pp.calls)
+ self.assertEqual({'a': ('b',)},
+ self.caching_pp.get_cached_parent_map(['a']))
+
+
+class TestCachingParentsProviderExtras(tests.TestCaseWithTransport):
+ """Test the behaviour when parents are provided that were not requested."""
+
+ def setUp(self):
+ super(TestCachingParentsProviderExtras, self).setUp()
+ class ExtraParentsProvider(object):
+
+ def get_parent_map(self, keys):
+ return {'rev1': [], 'rev2': ['rev1',]}
+
+ self.inst_pp = InstrumentedParentsProvider(ExtraParentsProvider())
+ self.caching_pp = _mod_graph.CachingParentsProvider(
+ get_parent_map=self.inst_pp.get_parent_map)
+
+ def test_uncached(self):
+ self.caching_pp.disable_cache()
+ self.assertEqual({'rev1': []},
+ self.caching_pp.get_parent_map(['rev1']))
+ self.assertEqual(['rev1'], self.inst_pp.calls)
+ self.assertIs(None, self.caching_pp._cache)
+
+ def test_cache_initially_empty(self):
+ self.assertEqual({}, self.caching_pp._cache)
+
+ def test_cached(self):
+ self.assertEqual({'rev1': []},
+ self.caching_pp.get_parent_map(['rev1']))
+ self.assertEqual(['rev1'], self.inst_pp.calls)
+ self.assertEqual({'rev1': [], 'rev2': ['rev1']},
+ self.caching_pp._cache)
+ self.assertEqual({'rev1': []},
+ self.caching_pp.get_parent_map(['rev1']))
+ self.assertEqual(['rev1'], self.inst_pp.calls)
+
+ def test_disable_cache_clears_cache(self):
+ # Put something in the cache
+ self.caching_pp.get_parent_map(['rev1'])
+ self.assertEqual(2, len(self.caching_pp._cache))
+ self.caching_pp.disable_cache()
+ self.assertIs(None, self.caching_pp._cache)
+
+ def test_enable_cache_raises(self):
+ e = self.assertRaises(AssertionError, self.caching_pp.enable_cache)
+ self.assertEqual('Cache enabled when already enabled.', str(e))
+
+ def test_cache_misses(self):
+ self.caching_pp.get_parent_map(['rev3'])
+ self.caching_pp.get_parent_map(['rev3'])
+ self.assertEqual(['rev3'], self.inst_pp.calls)
+
+ def test_no_cache_misses(self):
+ self.caching_pp.disable_cache()
+ self.caching_pp.enable_cache(cache_misses=False)
+ self.caching_pp.get_parent_map(['rev3'])
+ self.caching_pp.get_parent_map(['rev3'])
+ self.assertEqual(['rev3', 'rev3'], self.inst_pp.calls)
+
+ def test_cache_extras(self):
+ self.assertEqual({}, self.caching_pp.get_parent_map(['rev3']))
+ self.assertEqual({'rev2': ['rev1']},
+ self.caching_pp.get_parent_map(['rev2']))
+ self.assertEqual(['rev3'], self.inst_pp.calls)
+
+ def test_extras_using_cached(self):
+ self.assertEqual({}, self.caching_pp.get_cached_parent_map(['rev3']))
+ self.assertEqual({}, self.caching_pp.get_parent_map(['rev3']))
+ self.assertEqual({'rev2': ['rev1']},
+ self.caching_pp.get_cached_parent_map(['rev2']))
+ self.assertEqual(['rev3'], self.inst_pp.calls)
+
+
+
+class TestCollapseLinearRegions(tests.TestCase):
+
+ def assertCollapsed(self, collapsed, original):
+ self.assertEqual(collapsed,
+ _mod_graph.collapse_linear_regions(original))
+
+ def test_collapse_nothing(self):
+ d = {1:[2, 3], 2:[], 3:[]}
+ self.assertCollapsed(d, d)
+ d = {1:[2], 2:[3, 4], 3:[5], 4:[5], 5:[]}
+ self.assertCollapsed(d, d)
+
+ def test_collapse_chain(self):
+ # Any time we have a linear chain, we should be able to collapse
+ d = {1:[2], 2:[3], 3:[4], 4:[5], 5:[]}
+ self.assertCollapsed({1:[5], 5:[]}, d)
+ d = {5:[4], 4:[3], 3:[2], 2:[1], 1:[]}
+ self.assertCollapsed({5:[1], 1:[]}, d)
+ d = {5:[3], 3:[4], 4:[1], 1:[2], 2:[]}
+ self.assertCollapsed({5:[2], 2:[]}, d)
+
+ def test_collapse_with_multiple_children(self):
+ # 7
+ # |
+ # 6
+ # / \
+ # 4 5
+ # | |
+ # 2 3
+ # \ /
+ # 1
+ #
+ # 4 and 5 cannot be removed because 6 has 2 children
+ # 2 and 3 cannot be removed because 1 has 2 parents
+ d = {1:[2, 3], 2:[4], 4:[6], 3:[5], 5:[6], 6:[7], 7:[]}
+ self.assertCollapsed(d, d)
+
+
+class TestGraphThunkIdsToKeys(tests.TestCase):
+
+ def test_heads(self):
+ # A
+ # |\
+ # B C
+ # |/
+ # D
+ d = {('D',): [('B',), ('C',)], ('C',):[('A',)],
+ ('B',): [('A',)], ('A',): []}
+ g = _mod_graph.Graph(_mod_graph.DictParentsProvider(d))
+ graph_thunk = _mod_graph.GraphThunkIdsToKeys(g)
+ self.assertEqual(['D'], sorted(graph_thunk.heads(['D', 'A'])))
+ self.assertEqual(['D'], sorted(graph_thunk.heads(['D', 'B'])))
+ self.assertEqual(['D'], sorted(graph_thunk.heads(['D', 'C'])))
+ self.assertEqual(['B', 'C'], sorted(graph_thunk.heads(['B', 'C'])))
+
+ def test_add_node(self):
+ d = {('C',):[('A',)], ('B',): [('A',)], ('A',): []}
+ g = _mod_graph.KnownGraph(d)
+ graph_thunk = _mod_graph.GraphThunkIdsToKeys(g)
+ graph_thunk.add_node("D", ["A", "C"])
+ self.assertEqual(['B', 'D'],
+ sorted(graph_thunk.heads(['D', 'B', 'A'])))
+
+ def test_merge_sort(self):
+ d = {('C',):[('A',)], ('B',): [('A',)], ('A',): []}
+ g = _mod_graph.KnownGraph(d)
+ graph_thunk = _mod_graph.GraphThunkIdsToKeys(g)
+ graph_thunk.add_node("D", ["A", "C"])
+ self.assertEqual([('C', 0, (2,), False), ('A', 0, (1,), True)],
+ [(n.key, n.merge_depth, n.revno, n.end_of_merge)
+ for n in graph_thunk.merge_sort('C')])
+
+
+class TestStackedParentsProvider(tests.TestCase):
+
+ def setUp(self):
+ super(TestStackedParentsProvider, self).setUp()
+ self.calls = []
+
+ def get_shared_provider(self, info, ancestry, has_cached):
+ pp = _mod_graph.DictParentsProvider(ancestry)
+ if has_cached:
+ pp.get_cached_parent_map = pp.get_parent_map
+ return SharedInstrumentedParentsProvider(pp, self.calls, info)
+
+ def test_stacked_parents_provider(self):
+ parents1 = _mod_graph.DictParentsProvider({'rev2': ['rev3']})
+ parents2 = _mod_graph.DictParentsProvider({'rev1': ['rev4']})
+ stacked = _mod_graph.StackedParentsProvider([parents1, parents2])
+ self.assertEqual({'rev1':['rev4'], 'rev2':['rev3']},
+ stacked.get_parent_map(['rev1', 'rev2']))
+ self.assertEqual({'rev2':['rev3'], 'rev1':['rev4']},
+ stacked.get_parent_map(['rev2', 'rev1']))
+ self.assertEqual({'rev2':['rev3']},
+ stacked.get_parent_map(['rev2', 'rev2']))
+ self.assertEqual({'rev1':['rev4']},
+ stacked.get_parent_map(['rev1', 'rev1']))
+
+ def test_stacked_parents_provider_overlapping(self):
+ # rev2 is availible in both providers.
+ # 1
+ # |
+ # 2
+ parents1 = _mod_graph.DictParentsProvider({'rev2': ['rev1']})
+ parents2 = _mod_graph.DictParentsProvider({'rev2': ['rev1']})
+ stacked = _mod_graph.StackedParentsProvider([parents1, parents2])
+ self.assertEqual({'rev2': ['rev1']},
+ stacked.get_parent_map(['rev2']))
+
+ def test_handles_no_get_cached_parent_map(self):
+ # this shows that we both handle when a provider doesn't implement
+ # get_cached_parent_map
+ pp1 = self.get_shared_provider('pp1', {'rev2': ('rev1',)},
+ has_cached=False)
+ pp2 = self.get_shared_provider('pp2', {'rev2': ('rev1',)},
+ has_cached=True)
+ stacked = _mod_graph.StackedParentsProvider([pp1, pp2])
+ self.assertEqual({'rev2': ('rev1',)}, stacked.get_parent_map(['rev2']))
+ # No call on 'pp1' because it doesn't provide get_cached_parent_map
+ self.assertEqual([('pp2', 'cached', ['rev2'])], self.calls)
+
+ def test_query_order(self):
+ # We should call get_cached_parent_map on all providers before we call
+ # get_parent_map. Further, we should track what entries we have found,
+ # and not re-try them.
+ pp1 = self.get_shared_provider('pp1', {'a': ()}, has_cached=True)
+ pp2 = self.get_shared_provider('pp2', {'c': ('b',)}, has_cached=False)
+ pp3 = self.get_shared_provider('pp3', {'b': ('a',)}, has_cached=True)
+ stacked = _mod_graph.StackedParentsProvider([pp1, pp2, pp3])
+ self.assertEqual({'a': (), 'b': ('a',), 'c': ('b',)},
+ stacked.get_parent_map(['a', 'b', 'c', 'd']))
+ self.assertEqual([('pp1', 'cached', ['a', 'b', 'c', 'd']),
+ # No call to pp2, because it doesn't have cached
+ ('pp3', 'cached', ['b', 'c', 'd']),
+ ('pp1', ['c', 'd']),
+ ('pp2', ['c', 'd']),
+ ('pp3', ['d']),
+ ], self.calls)
diff --git a/bzrlib/tests/test_groupcompress.py b/bzrlib/tests/test_groupcompress.py
new file mode 100644
index 0000000..f463c60
--- /dev/null
+++ b/bzrlib/tests/test_groupcompress.py
@@ -0,0 +1,1224 @@
+# Copyright (C) 2008-2011 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""Tests for group compression."""
+
+import zlib
+
+from bzrlib import (
+ btree_index,
+ config,
+ groupcompress,
+ errors,
+ index as _mod_index,
+ osutils,
+ tests,
+ trace,
+ versionedfile,
+ )
+from bzrlib.osutils import sha_string
+from bzrlib.tests.test__groupcompress import compiled_groupcompress_feature
+from bzrlib.tests.scenarios import load_tests_apply_scenarios
+
+
+def group_compress_implementation_scenarios():
+ scenarios = [
+ ('python', {'compressor': groupcompress.PythonGroupCompressor}),
+ ]
+ if compiled_groupcompress_feature.available():
+ scenarios.append(('C',
+ {'compressor': groupcompress.PyrexGroupCompressor}))
+ return scenarios
+
+
+load_tests = load_tests_apply_scenarios
+
+
+class TestGroupCompressor(tests.TestCase):
+
+ def _chunks_to_repr_lines(self, chunks):
+ return '\n'.join(map(repr, ''.join(chunks).split('\n')))
+
+ def assertEqualDiffEncoded(self, expected, actual):
+ """Compare the actual content to the expected content.
+
+ :param expected: A group of chunks that we expect to see
+ :param actual: The measured 'chunks'
+
+ We will transform the chunks back into lines, and then run 'repr()'
+ over them to handle non-ascii characters.
+ """
+ self.assertEqualDiff(self._chunks_to_repr_lines(expected),
+ self._chunks_to_repr_lines(actual))
+
+
+class TestAllGroupCompressors(TestGroupCompressor):
+ """Tests for GroupCompressor"""
+
+ scenarios = group_compress_implementation_scenarios()
+ compressor = None # Set by scenario
+
+ def test_empty_delta(self):
+ compressor = self.compressor()
+ self.assertEqual([], compressor.chunks)
+
+ def test_one_nosha_delta(self):
+ # diff against NUKK
+ compressor = self.compressor()
+ sha1, start_point, end_point, _ = compressor.compress(('label',),
+ 'strange\ncommon\n', None)
+ self.assertEqual(sha_string('strange\ncommon\n'), sha1)
+ expected_lines = 'f' '\x0f' 'strange\ncommon\n'
+ self.assertEqual(expected_lines, ''.join(compressor.chunks))
+ self.assertEqual(0, start_point)
+ self.assertEqual(sum(map(len, expected_lines)), end_point)
+
+ def test_empty_content(self):
+ compressor = self.compressor()
+ # Adding empty bytes should return the 'null' record
+ sha1, start_point, end_point, kind = compressor.compress(('empty',),
+ '', None)
+ self.assertEqual(0, start_point)
+ self.assertEqual(0, end_point)
+ self.assertEqual('fulltext', kind)
+ self.assertEqual(groupcompress._null_sha1, sha1)
+ self.assertEqual(0, compressor.endpoint)
+ self.assertEqual([], compressor.chunks)
+ # Even after adding some content
+ compressor.compress(('content',), 'some\nbytes\n', None)
+ self.assertTrue(compressor.endpoint > 0)
+ sha1, start_point, end_point, kind = compressor.compress(('empty2',),
+ '', None)
+ self.assertEqual(0, start_point)
+ self.assertEqual(0, end_point)
+ self.assertEqual('fulltext', kind)
+ self.assertEqual(groupcompress._null_sha1, sha1)
+
+ def test_extract_from_compressor(self):
+ # Knit fetching will try to reconstruct texts locally which results in
+ # reading something that is in the compressor stream already.
+ compressor = self.compressor()
+ sha1_1, _, _, _ = compressor.compress(('label',),
+ 'strange\ncommon long line\nthat needs a 16 byte match\n', None)
+ expected_lines = list(compressor.chunks)
+ sha1_2, _, end_point, _ = compressor.compress(('newlabel',),
+ 'common long line\nthat needs a 16 byte match\ndifferent\n', None)
+ # get the first out
+ self.assertEqual(('strange\ncommon long line\n'
+ 'that needs a 16 byte match\n', sha1_1),
+ compressor.extract(('label',)))
+ # and the second
+ self.assertEqual(('common long line\nthat needs a 16 byte match\n'
+ 'different\n', sha1_2),
+ compressor.extract(('newlabel',)))
+
+ def test_pop_last(self):
+ compressor = self.compressor()
+ _, _, _, _ = compressor.compress(('key1',),
+ 'some text\nfor the first entry\n', None)
+ expected_lines = list(compressor.chunks)
+ _, _, _, _ = compressor.compress(('key2',),
+ 'some text\nfor the second entry\n', None)
+ compressor.pop_last()
+ self.assertEqual(expected_lines, compressor.chunks)
+
+
+class TestPyrexGroupCompressor(TestGroupCompressor):
+
+ _test_needs_features = [compiled_groupcompress_feature]
+ compressor = groupcompress.PyrexGroupCompressor
+
+ def test_stats(self):
+ compressor = self.compressor()
+ compressor.compress(('label',),
+ 'strange\n'
+ 'common very very long line\n'
+ 'plus more text\n', None)
+ compressor.compress(('newlabel',),
+ 'common very very long line\n'
+ 'plus more text\n'
+ 'different\n'
+ 'moredifferent\n', None)
+ compressor.compress(('label3',),
+ 'new\n'
+ 'common very very long line\n'
+ 'plus more text\n'
+ 'different\n'
+ 'moredifferent\n', None)
+ self.assertAlmostEqual(1.9, compressor.ratio(), 1)
+
+ def test_two_nosha_delta(self):
+ compressor = self.compressor()
+ sha1_1, _, _, _ = compressor.compress(('label',),
+ 'strange\ncommon long line\nthat needs a 16 byte match\n', None)
+ expected_lines = list(compressor.chunks)
+ sha1_2, start_point, end_point, _ = compressor.compress(('newlabel',),
+ 'common long line\nthat needs a 16 byte match\ndifferent\n', None)
+ self.assertEqual(sha_string('common long line\n'
+ 'that needs a 16 byte match\n'
+ 'different\n'), sha1_2)
+ expected_lines.extend([
+ # 'delta', delta length
+ 'd\x0f',
+ # source and target length
+ '\x36',
+ # copy the line common
+ '\x91\x0a\x2c', #copy, offset 0x0a, len 0x2c
+ # add the line different, and the trailing newline
+ '\x0adifferent\n', # insert 10 bytes
+ ])
+ self.assertEqualDiffEncoded(expected_lines, compressor.chunks)
+ self.assertEqual(sum(map(len, expected_lines)), end_point)
+
+ def test_three_nosha_delta(self):
+ # The first interesting test: make a change that should use lines from
+ # both parents.
+ compressor = self.compressor()
+ sha1_1, _, _, _ = compressor.compress(('label',),
+ 'strange\ncommon very very long line\nwith some extra text\n', None)
+ sha1_2, _, _, _ = compressor.compress(('newlabel',),
+ 'different\nmoredifferent\nand then some more\n', None)
+ expected_lines = list(compressor.chunks)
+ sha1_3, start_point, end_point, _ = compressor.compress(('label3',),
+ 'new\ncommon very very long line\nwith some extra text\n'
+ 'different\nmoredifferent\nand then some more\n',
+ None)
+ self.assertEqual(
+ sha_string('new\ncommon very very long line\nwith some extra text\n'
+ 'different\nmoredifferent\nand then some more\n'),
+ sha1_3)
+ expected_lines.extend([
+ # 'delta', delta length
+ 'd\x0b',
+ # source and target length
+ '\x5f'
+ # insert new
+ '\x03new',
+ # Copy of first parent 'common' range
+ '\x91\x09\x31' # copy, offset 0x09, 0x31 bytes
+ # Copy of second parent 'different' range
+ '\x91\x3c\x2b' # copy, offset 0x3c, 0x2b bytes
+ ])
+ self.assertEqualDiffEncoded(expected_lines, compressor.chunks)
+ self.assertEqual(sum(map(len, expected_lines)), end_point)
+
+
+class TestPythonGroupCompressor(TestGroupCompressor):
+
+ compressor = groupcompress.PythonGroupCompressor
+
+ def test_stats(self):
+ compressor = self.compressor()
+ compressor.compress(('label',),
+ 'strange\n'
+ 'common very very long line\n'
+ 'plus more text\n', None)
+ compressor.compress(('newlabel',),
+ 'common very very long line\n'
+ 'plus more text\n'
+ 'different\n'
+ 'moredifferent\n', None)
+ compressor.compress(('label3',),
+ 'new\n'
+ 'common very very long line\n'
+ 'plus more text\n'
+ 'different\n'
+ 'moredifferent\n', None)
+ self.assertAlmostEqual(1.9, compressor.ratio(), 1)
+
+ def test_two_nosha_delta(self):
+ compressor = self.compressor()
+ sha1_1, _, _, _ = compressor.compress(('label',),
+ 'strange\ncommon long line\nthat needs a 16 byte match\n', None)
+ expected_lines = list(compressor.chunks)
+ sha1_2, start_point, end_point, _ = compressor.compress(('newlabel',),
+ 'common long line\nthat needs a 16 byte match\ndifferent\n', None)
+ self.assertEqual(sha_string('common long line\n'
+ 'that needs a 16 byte match\n'
+ 'different\n'), sha1_2)
+ expected_lines.extend([
+ # 'delta', delta length
+ 'd\x0f',
+ # target length
+ '\x36',
+ # copy the line common
+ '\x91\x0a\x2c', #copy, offset 0x0a, len 0x2c
+ # add the line different, and the trailing newline
+ '\x0adifferent\n', # insert 10 bytes
+ ])
+ self.assertEqualDiffEncoded(expected_lines, compressor.chunks)
+ self.assertEqual(sum(map(len, expected_lines)), end_point)
+
+ def test_three_nosha_delta(self):
+ # The first interesting test: make a change that should use lines from
+ # both parents.
+ compressor = self.compressor()
+ sha1_1, _, _, _ = compressor.compress(('label',),
+ 'strange\ncommon very very long line\nwith some extra text\n', None)
+ sha1_2, _, _, _ = compressor.compress(('newlabel',),
+ 'different\nmoredifferent\nand then some more\n', None)
+ expected_lines = list(compressor.chunks)
+ sha1_3, start_point, end_point, _ = compressor.compress(('label3',),
+ 'new\ncommon very very long line\nwith some extra text\n'
+ 'different\nmoredifferent\nand then some more\n',
+ None)
+ self.assertEqual(
+ sha_string('new\ncommon very very long line\nwith some extra text\n'
+ 'different\nmoredifferent\nand then some more\n'),
+ sha1_3)
+ expected_lines.extend([
+ # 'delta', delta length
+ 'd\x0c',
+ # target length
+ '\x5f'
+ # insert new
+ '\x04new\n',
+ # Copy of first parent 'common' range
+ '\x91\x0a\x30' # copy, offset 0x0a, 0x30 bytes
+ # Copy of second parent 'different' range
+ '\x91\x3c\x2b' # copy, offset 0x3c, 0x2b bytes
+ ])
+ self.assertEqualDiffEncoded(expected_lines, compressor.chunks)
+ self.assertEqual(sum(map(len, expected_lines)), end_point)
+
+
+class TestGroupCompressBlock(tests.TestCase):
+
+ def make_block(self, key_to_text):
+ """Create a GroupCompressBlock, filling it with the given texts."""
+ compressor = groupcompress.GroupCompressor()
+ start = 0
+ for key in sorted(key_to_text):
+ compressor.compress(key, key_to_text[key], None)
+ locs = dict((key, (start, end)) for key, (start, _, end, _)
+ in compressor.labels_deltas.iteritems())
+ block = compressor.flush()
+ raw_bytes = block.to_bytes()
+ # Go through from_bytes(to_bytes()) so that we start with a compressed
+ # content object
+ return locs, groupcompress.GroupCompressBlock.from_bytes(raw_bytes)
+
+ def test_from_empty_bytes(self):
+ self.assertRaises(ValueError,
+ groupcompress.GroupCompressBlock.from_bytes, '')
+
+ def test_from_minimal_bytes(self):
+ block = groupcompress.GroupCompressBlock.from_bytes(
+ 'gcb1z\n0\n0\n')
+ self.assertIsInstance(block, groupcompress.GroupCompressBlock)
+ self.assertIs(None, block._content)
+ self.assertEqual('', block._z_content)
+ block._ensure_content()
+ self.assertEqual('', block._content)
+ self.assertEqual('', block._z_content)
+ block._ensure_content() # Ensure content is safe to call 2x
+
+ def test_from_invalid(self):
+ self.assertRaises(ValueError,
+ groupcompress.GroupCompressBlock.from_bytes,
+ 'this is not a valid header')
+
+ def test_from_bytes(self):
+ content = ('a tiny bit of content\n')
+ z_content = zlib.compress(content)
+ z_bytes = (
+ 'gcb1z\n' # group compress block v1 plain
+ '%d\n' # Length of compressed content
+ '%d\n' # Length of uncompressed content
+ '%s' # Compressed content
+ ) % (len(z_content), len(content), z_content)
+ block = groupcompress.GroupCompressBlock.from_bytes(
+ z_bytes)
+ self.assertEqual(z_content, block._z_content)
+ self.assertIs(None, block._content)
+ self.assertEqual(len(z_content), block._z_content_length)
+ self.assertEqual(len(content), block._content_length)
+ block._ensure_content()
+ self.assertEqual(z_content, block._z_content)
+ self.assertEqual(content, block._content)
+
+ def test_to_chunks(self):
+ content_chunks = ['this is some content\n',
+ 'this content will be compressed\n']
+ content_len = sum(map(len, content_chunks))
+ content = ''.join(content_chunks)
+ gcb = groupcompress.GroupCompressBlock()
+ gcb.set_chunked_content(content_chunks, content_len)
+ total_len, block_chunks = gcb.to_chunks()
+ block_bytes = ''.join(block_chunks)
+ self.assertEqual(gcb._z_content_length, len(gcb._z_content))
+ self.assertEqual(total_len, len(block_bytes))
+ self.assertEqual(gcb._content_length, content_len)
+ expected_header =('gcb1z\n' # group compress block v1 zlib
+ '%d\n' # Length of compressed content
+ '%d\n' # Length of uncompressed content
+ ) % (gcb._z_content_length, gcb._content_length)
+ # The first chunk should be the header chunk. It is small, fixed size,
+ # and there is no compelling reason to split it up
+ self.assertEqual(expected_header, block_chunks[0])
+ self.assertStartsWith(block_bytes, expected_header)
+ remaining_bytes = block_bytes[len(expected_header):]
+ raw_bytes = zlib.decompress(remaining_bytes)
+ self.assertEqual(content, raw_bytes)
+
+ def test_to_bytes(self):
+ content = ('this is some content\n'
+ 'this content will be compressed\n')
+ gcb = groupcompress.GroupCompressBlock()
+ gcb.set_content(content)
+ bytes = gcb.to_bytes()
+ self.assertEqual(gcb._z_content_length, len(gcb._z_content))
+ self.assertEqual(gcb._content_length, len(content))
+ expected_header =('gcb1z\n' # group compress block v1 zlib
+ '%d\n' # Length of compressed content
+ '%d\n' # Length of uncompressed content
+ ) % (gcb._z_content_length, gcb._content_length)
+ self.assertStartsWith(bytes, expected_header)
+ remaining_bytes = bytes[len(expected_header):]
+ raw_bytes = zlib.decompress(remaining_bytes)
+ self.assertEqual(content, raw_bytes)
+
+ # we should get the same results if using the chunked version
+ gcb = groupcompress.GroupCompressBlock()
+ gcb.set_chunked_content(['this is some content\n'
+ 'this content will be compressed\n'],
+ len(content))
+ old_bytes = bytes
+ bytes = gcb.to_bytes()
+ self.assertEqual(old_bytes, bytes)
+
+ def test_partial_decomp(self):
+ content_chunks = []
+ # We need a sufficient amount of data so that zlib.decompress has
+ # partial decompression to work with. Most auto-generated data
+ # compresses a bit too well, we want a combination, so we combine a sha
+ # hash with compressible data.
+ for i in xrange(2048):
+ next_content = '%d\nThis is a bit of duplicate text\n' % (i,)
+ content_chunks.append(next_content)
+ next_sha1 = osutils.sha_string(next_content)
+ content_chunks.append(next_sha1 + '\n')
+ content = ''.join(content_chunks)
+ self.assertEqual(158634, len(content))
+ z_content = zlib.compress(content)
+ self.assertEqual(57182, len(z_content))
+ block = groupcompress.GroupCompressBlock()
+ block._z_content_chunks = (z_content,)
+ block._z_content_length = len(z_content)
+ block._compressor_name = 'zlib'
+ block._content_length = 158634
+ self.assertIs(None, block._content)
+ block._ensure_content(100)
+ self.assertIsNot(None, block._content)
+ # We have decompressed at least 100 bytes
+ self.assertTrue(len(block._content) >= 100)
+ # We have not decompressed the whole content
+ self.assertTrue(len(block._content) < 158634)
+ self.assertEqualDiff(content[:len(block._content)], block._content)
+ # ensuring content that we already have shouldn't cause any more data
+ # to be extracted
+ cur_len = len(block._content)
+ block._ensure_content(cur_len - 10)
+ self.assertEqual(cur_len, len(block._content))
+ # Now we want a bit more content
+ cur_len += 10
+ block._ensure_content(cur_len)
+ self.assertTrue(len(block._content) >= cur_len)
+ self.assertTrue(len(block._content) < 158634)
+ self.assertEqualDiff(content[:len(block._content)], block._content)
+ # And now lets finish
+ block._ensure_content(158634)
+ self.assertEqualDiff(content, block._content)
+ # And the decompressor is finalized
+ self.assertIs(None, block._z_content_decompressor)
+
+ def test__ensure_all_content(self):
+ content_chunks = []
+ # We need a sufficient amount of data so that zlib.decompress has
+ # partial decompression to work with. Most auto-generated data
+ # compresses a bit too well, we want a combination, so we combine a sha
+ # hash with compressible data.
+ for i in xrange(2048):
+ next_content = '%d\nThis is a bit of duplicate text\n' % (i,)
+ content_chunks.append(next_content)
+ next_sha1 = osutils.sha_string(next_content)
+ content_chunks.append(next_sha1 + '\n')
+ content = ''.join(content_chunks)
+ self.assertEqual(158634, len(content))
+ z_content = zlib.compress(content)
+ self.assertEqual(57182, len(z_content))
+ block = groupcompress.GroupCompressBlock()
+ block._z_content_chunks = (z_content,)
+ block._z_content_length = len(z_content)
+ block._compressor_name = 'zlib'
+ block._content_length = 158634
+ self.assertIs(None, block._content)
+ # The first _ensure_content got all of the required data
+ block._ensure_content(158634)
+ self.assertEqualDiff(content, block._content)
+ # And we should have released the _z_content_decompressor since it was
+ # fully consumed
+ self.assertIs(None, block._z_content_decompressor)
+
+ def test__dump(self):
+ dup_content = 'some duplicate content\nwhich is sufficiently long\n'
+ key_to_text = {('1',): dup_content + '1 unique\n',
+ ('2',): dup_content + '2 extra special\n'}
+ locs, block = self.make_block(key_to_text)
+ self.assertEqual([('f', len(key_to_text[('1',)])),
+ ('d', 21, len(key_to_text[('2',)]),
+ [('c', 2, len(dup_content)),
+ ('i', len('2 extra special\n'), '')
+ ]),
+ ], block._dump())
+
+
+class TestCaseWithGroupCompressVersionedFiles(
+ tests.TestCaseWithMemoryTransport):
+
+ def make_test_vf(self, create_graph, keylength=1, do_cleanup=True,
+ dir='.', inconsistency_fatal=True):
+ t = self.get_transport(dir)
+ t.ensure_base()
+ vf = groupcompress.make_pack_factory(graph=create_graph,
+ delta=False, keylength=keylength,
+ inconsistency_fatal=inconsistency_fatal)(t)
+ if do_cleanup:
+ self.addCleanup(groupcompress.cleanup_pack_group, vf)
+ return vf
+
+
+class TestGroupCompressVersionedFiles(TestCaseWithGroupCompressVersionedFiles):
+
+ def make_g_index(self, name, ref_lists=0, nodes=[]):
+ builder = btree_index.BTreeBuilder(ref_lists)
+ for node, references, value in nodes:
+ builder.add_node(node, references, value)
+ stream = builder.finish()
+ trans = self.get_transport()
+ size = trans.put_file(name, stream)
+ return btree_index.BTreeGraphIndex(trans, name, size)
+
+ def make_g_index_missing_parent(self):
+ graph_index = self.make_g_index('missing_parent', 1,
+ [(('parent', ), '2 78 2 10', ([],)),
+ (('tip', ), '2 78 2 10',
+ ([('parent', ), ('missing-parent', )],)),
+ ])
+ return graph_index
+
+ def test_get_record_stream_as_requested(self):
+ # Consider promoting 'as-requested' to general availability, and
+ # make this a VF interface test
+ vf = self.make_test_vf(False, dir='source')
+ vf.add_lines(('a',), (), ['lines\n'])
+ vf.add_lines(('b',), (), ['lines\n'])
+ vf.add_lines(('c',), (), ['lines\n'])
+ vf.add_lines(('d',), (), ['lines\n'])
+ vf.writer.end()
+ keys = [record.key for record in vf.get_record_stream(
+ [('a',), ('b',), ('c',), ('d',)],
+ 'as-requested', False)]
+ self.assertEqual([('a',), ('b',), ('c',), ('d',)], keys)
+ keys = [record.key for record in vf.get_record_stream(
+ [('b',), ('a',), ('d',), ('c',)],
+ 'as-requested', False)]
+ self.assertEqual([('b',), ('a',), ('d',), ('c',)], keys)
+
+ # It should work even after being repacked into another VF
+ vf2 = self.make_test_vf(False, dir='target')
+ vf2.insert_record_stream(vf.get_record_stream(
+ [('b',), ('a',), ('d',), ('c',)], 'as-requested', False))
+ vf2.writer.end()
+
+ keys = [record.key for record in vf2.get_record_stream(
+ [('a',), ('b',), ('c',), ('d',)],
+ 'as-requested', False)]
+ self.assertEqual([('a',), ('b',), ('c',), ('d',)], keys)
+ keys = [record.key for record in vf2.get_record_stream(
+ [('b',), ('a',), ('d',), ('c',)],
+ 'as-requested', False)]
+ self.assertEqual([('b',), ('a',), ('d',), ('c',)], keys)
+
+ def test_get_record_stream_max_bytes_to_index_default(self):
+ vf = self.make_test_vf(True, dir='source')
+ vf.add_lines(('a',), (), ['lines\n'])
+ vf.writer.end()
+ record = vf.get_record_stream([('a',)], 'unordered', True).next()
+ self.assertEqual(vf._DEFAULT_COMPRESSOR_SETTINGS,
+ record._manager._get_compressor_settings())
+
+ def test_get_record_stream_accesses_compressor_settings(self):
+ vf = self.make_test_vf(True, dir='source')
+ vf.add_lines(('a',), (), ['lines\n'])
+ vf.writer.end()
+ vf._max_bytes_to_index = 1234
+ record = vf.get_record_stream([('a',)], 'unordered', True).next()
+ self.assertEqual(dict(max_bytes_to_index=1234),
+ record._manager._get_compressor_settings())
+
+ def test_insert_record_stream_reuses_blocks(self):
+ vf = self.make_test_vf(True, dir='source')
+ def grouped_stream(revision_ids, first_parents=()):
+ parents = first_parents
+ for revision_id in revision_ids:
+ key = (revision_id,)
+ record = versionedfile.FulltextContentFactory(
+ key, parents, None,
+ 'some content that is\n'
+ 'identical except for\n'
+ 'revision_id:%s\n' % (revision_id,))
+ yield record
+ parents = (key,)
+ # One group, a-d
+ vf.insert_record_stream(grouped_stream(['a', 'b', 'c', 'd']))
+ # Second group, e-h
+ vf.insert_record_stream(grouped_stream(['e', 'f', 'g', 'h'],
+ first_parents=(('d',),)))
+ block_bytes = {}
+ stream = vf.get_record_stream([(r,) for r in 'abcdefgh'],
+ 'unordered', False)
+ num_records = 0
+ for record in stream:
+ if record.key in [('a',), ('e',)]:
+ self.assertEqual('groupcompress-block', record.storage_kind)
+ else:
+ self.assertEqual('groupcompress-block-ref',
+ record.storage_kind)
+ block_bytes[record.key] = record._manager._block._z_content
+ num_records += 1
+ self.assertEqual(8, num_records)
+ for r in 'abcd':
+ key = (r,)
+ self.assertIs(block_bytes[key], block_bytes[('a',)])
+ self.assertNotEqual(block_bytes[key], block_bytes[('e',)])
+ for r in 'efgh':
+ key = (r,)
+ self.assertIs(block_bytes[key], block_bytes[('e',)])
+ self.assertNotEqual(block_bytes[key], block_bytes[('a',)])
+ # Now copy the blocks into another vf, and ensure that the blocks are
+ # preserved without creating new entries
+ vf2 = self.make_test_vf(True, dir='target')
+ # ordering in 'groupcompress' order, should actually swap the groups in
+ # the target vf, but the groups themselves should not be disturbed.
+ def small_size_stream():
+ for record in vf.get_record_stream([(r,) for r in 'abcdefgh'],
+ 'groupcompress', False):
+ record._manager._full_enough_block_size = \
+ record._manager._block._content_length
+ yield record
+
+ vf2.insert_record_stream(small_size_stream())
+ stream = vf2.get_record_stream([(r,) for r in 'abcdefgh'],
+ 'groupcompress', False)
+ vf2.writer.end()
+ num_records = 0
+ for record in stream:
+ num_records += 1
+ self.assertEqual(block_bytes[record.key],
+ record._manager._block._z_content)
+ self.assertEqual(8, num_records)
+
+ def test_insert_record_stream_packs_on_the_fly(self):
+ vf = self.make_test_vf(True, dir='source')
+ def grouped_stream(revision_ids, first_parents=()):
+ parents = first_parents
+ for revision_id in revision_ids:
+ key = (revision_id,)
+ record = versionedfile.FulltextContentFactory(
+ key, parents, None,
+ 'some content that is\n'
+ 'identical except for\n'
+ 'revision_id:%s\n' % (revision_id,))
+ yield record
+ parents = (key,)
+ # One group, a-d
+ vf.insert_record_stream(grouped_stream(['a', 'b', 'c', 'd']))
+ # Second group, e-h
+ vf.insert_record_stream(grouped_stream(['e', 'f', 'g', 'h'],
+ first_parents=(('d',),)))
+ # Now copy the blocks into another vf, and see that the
+ # insert_record_stream rebuilt a new block on-the-fly because of
+ # under-utilization
+ vf2 = self.make_test_vf(True, dir='target')
+ vf2.insert_record_stream(vf.get_record_stream(
+ [(r,) for r in 'abcdefgh'], 'groupcompress', False))
+ stream = vf2.get_record_stream([(r,) for r in 'abcdefgh'],
+ 'groupcompress', False)
+ vf2.writer.end()
+ num_records = 0
+ # All of the records should be recombined into a single block
+ block = None
+ for record in stream:
+ num_records += 1
+ if block is None:
+ block = record._manager._block
+ else:
+ self.assertIs(block, record._manager._block)
+ self.assertEqual(8, num_records)
+
+ def test__insert_record_stream_no_reuse_block(self):
+ vf = self.make_test_vf(True, dir='source')
+ def grouped_stream(revision_ids, first_parents=()):
+ parents = first_parents
+ for revision_id in revision_ids:
+ key = (revision_id,)
+ record = versionedfile.FulltextContentFactory(
+ key, parents, None,
+ 'some content that is\n'
+ 'identical except for\n'
+ 'revision_id:%s\n' % (revision_id,))
+ yield record
+ parents = (key,)
+ # One group, a-d
+ vf.insert_record_stream(grouped_stream(['a', 'b', 'c', 'd']))
+ # Second group, e-h
+ vf.insert_record_stream(grouped_stream(['e', 'f', 'g', 'h'],
+ first_parents=(('d',),)))
+ vf.writer.end()
+ self.assertEqual(8, len(list(vf.get_record_stream(
+ [(r,) for r in 'abcdefgh'],
+ 'unordered', False))))
+ # Now copy the blocks into another vf, and ensure that the blocks are
+ # preserved without creating new entries
+ vf2 = self.make_test_vf(True, dir='target')
+ # ordering in 'groupcompress' order, should actually swap the groups in
+ # the target vf, but the groups themselves should not be disturbed.
+ list(vf2._insert_record_stream(vf.get_record_stream(
+ [(r,) for r in 'abcdefgh'], 'groupcompress', False),
+ reuse_blocks=False))
+ vf2.writer.end()
+ # After inserting with reuse_blocks=False, we should have everything in
+ # a single new block.
+ stream = vf2.get_record_stream([(r,) for r in 'abcdefgh'],
+ 'groupcompress', False)
+ block = None
+ for record in stream:
+ if block is None:
+ block = record._manager._block
+ else:
+ self.assertIs(block, record._manager._block)
+
+ def test_add_missing_noncompression_parent_unvalidated_index(self):
+ unvalidated = self.make_g_index_missing_parent()
+ combined = _mod_index.CombinedGraphIndex([unvalidated])
+ index = groupcompress._GCGraphIndex(combined,
+ is_locked=lambda: True, parents=True,
+ track_external_parent_refs=True)
+ index.scan_unvalidated_index(unvalidated)
+ self.assertEqual(
+ frozenset([('missing-parent',)]), index.get_missing_parents())
+
+ def test_track_external_parent_refs(self):
+ g_index = self.make_g_index('empty', 1, [])
+ mod_index = btree_index.BTreeBuilder(1, 1)
+ combined = _mod_index.CombinedGraphIndex([g_index, mod_index])
+ index = groupcompress._GCGraphIndex(combined,
+ is_locked=lambda: True, parents=True,
+ add_callback=mod_index.add_nodes,
+ track_external_parent_refs=True)
+ index.add_records([
+ (('new-key',), '2 10 2 10', [(('parent-1',), ('parent-2',))])])
+ self.assertEqual(
+ frozenset([('parent-1',), ('parent-2',)]),
+ index.get_missing_parents())
+
+ def make_source_with_b(self, a_parent, path):
+ source = self.make_test_vf(True, dir=path)
+ source.add_lines(('a',), (), ['lines\n'])
+ if a_parent:
+ b_parents = (('a',),)
+ else:
+ b_parents = ()
+ source.add_lines(('b',), b_parents, ['lines\n'])
+ return source
+
+ def do_inconsistent_inserts(self, inconsistency_fatal):
+ target = self.make_test_vf(True, dir='target',
+ inconsistency_fatal=inconsistency_fatal)
+ for x in range(2):
+ source = self.make_source_with_b(x==1, 'source%s' % x)
+ target.insert_record_stream(source.get_record_stream(
+ [('b',)], 'unordered', False))
+
+ def test_inconsistent_redundant_inserts_warn(self):
+ """Should not insert a record that is already present."""
+ warnings = []
+ def warning(template, args):
+ warnings.append(template % args)
+ _trace_warning = trace.warning
+ trace.warning = warning
+ try:
+ self.do_inconsistent_inserts(inconsistency_fatal=False)
+ finally:
+ trace.warning = _trace_warning
+ self.assertEqual(["inconsistent details in skipped record: ('b',)"
+ " ('42 32 0 8', ((),)) ('74 32 0 8', ((('a',),),))"],
+ warnings)
+
+ def test_inconsistent_redundant_inserts_raises(self):
+ e = self.assertRaises(errors.KnitCorrupt, self.do_inconsistent_inserts,
+ inconsistency_fatal=True)
+ self.assertContainsRe(str(e), "Knit.* corrupt: inconsistent details"
+ " in add_records:"
+ " \('b',\) \('42 32 0 8', \(\(\),\)\) \('74 32"
+ " 0 8', \(\(\('a',\),\),\)\)")
+
+ def test_clear_cache(self):
+ vf = self.make_source_with_b(True, 'source')
+ vf.writer.end()
+ for record in vf.get_record_stream([('a',), ('b',)], 'unordered',
+ True):
+ pass
+ self.assertTrue(len(vf._group_cache) > 0)
+ vf.clear_cache()
+ self.assertEqual(0, len(vf._group_cache))
+
+
+class TestGroupCompressConfig(tests.TestCaseWithTransport):
+
+ def make_test_vf(self):
+ t = self.get_transport('.')
+ t.ensure_base()
+ factory = groupcompress.make_pack_factory(graph=True,
+ delta=False, keylength=1, inconsistency_fatal=True)
+ vf = factory(t)
+ self.addCleanup(groupcompress.cleanup_pack_group, vf)
+ return vf
+
+ def test_max_bytes_to_index_default(self):
+ vf = self.make_test_vf()
+ gc = vf._make_group_compressor()
+ self.assertEqual(vf._DEFAULT_MAX_BYTES_TO_INDEX,
+ vf._max_bytes_to_index)
+ if isinstance(gc, groupcompress.PyrexGroupCompressor):
+ self.assertEqual(vf._DEFAULT_MAX_BYTES_TO_INDEX,
+ gc._delta_index._max_bytes_to_index)
+
+ def test_max_bytes_to_index_in_config(self):
+ c = config.GlobalConfig()
+ c.set_user_option('bzr.groupcompress.max_bytes_to_index', '10000')
+ vf = self.make_test_vf()
+ gc = vf._make_group_compressor()
+ self.assertEqual(10000, vf._max_bytes_to_index)
+ if isinstance(gc, groupcompress.PyrexGroupCompressor):
+ self.assertEqual(10000, gc._delta_index._max_bytes_to_index)
+
+ def test_max_bytes_to_index_bad_config(self):
+ c = config.GlobalConfig()
+ c.set_user_option('bzr.groupcompress.max_bytes_to_index', 'boogah')
+ vf = self.make_test_vf()
+ # TODO: This is triggering a warning, we might want to trap and make
+ # sure it is readable.
+ gc = vf._make_group_compressor()
+ self.assertEqual(vf._DEFAULT_MAX_BYTES_TO_INDEX,
+ vf._max_bytes_to_index)
+ if isinstance(gc, groupcompress.PyrexGroupCompressor):
+ self.assertEqual(vf._DEFAULT_MAX_BYTES_TO_INDEX,
+ gc._delta_index._max_bytes_to_index)
+
+
+class StubGCVF(object):
+ def __init__(self, canned_get_blocks=None):
+ self._group_cache = {}
+ self._canned_get_blocks = canned_get_blocks or []
+ def _get_blocks(self, read_memos):
+ return iter(self._canned_get_blocks)
+
+
+class Test_BatchingBlockFetcher(TestCaseWithGroupCompressVersionedFiles):
+ """Simple whitebox unit tests for _BatchingBlockFetcher."""
+
+ def test_add_key_new_read_memo(self):
+ """Adding a key with an uncached read_memo new to this batch adds that
+ read_memo to the list of memos to fetch.
+ """
+ # locations are: index_memo, ignored, parents, ignored
+ # where index_memo is: (idx, offset, len, factory_start, factory_end)
+ # and (idx, offset, size) is known as the 'read_memo', identifying the
+ # raw bytes needed.
+ read_memo = ('fake index', 100, 50)
+ locations = {
+ ('key',): (read_memo + (None, None), None, None, None)}
+ batcher = groupcompress._BatchingBlockFetcher(StubGCVF(), locations)
+ total_size = batcher.add_key(('key',))
+ self.assertEqual(50, total_size)
+ self.assertEqual([('key',)], batcher.keys)
+ self.assertEqual([read_memo], batcher.memos_to_get)
+
+ def test_add_key_duplicate_read_memo(self):
+ """read_memos that occur multiple times in a batch will only be fetched
+ once.
+ """
+ read_memo = ('fake index', 100, 50)
+ # Two keys, both sharing the same read memo (but different overall
+ # index_memos).
+ locations = {
+ ('key1',): (read_memo + (0, 1), None, None, None),
+ ('key2',): (read_memo + (1, 2), None, None, None)}
+ batcher = groupcompress._BatchingBlockFetcher(StubGCVF(), locations)
+ total_size = batcher.add_key(('key1',))
+ total_size = batcher.add_key(('key2',))
+ self.assertEqual(50, total_size)
+ self.assertEqual([('key1',), ('key2',)], batcher.keys)
+ self.assertEqual([read_memo], batcher.memos_to_get)
+
+ def test_add_key_cached_read_memo(self):
+ """Adding a key with a cached read_memo will not cause that read_memo
+ to be added to the list to fetch.
+ """
+ read_memo = ('fake index', 100, 50)
+ gcvf = StubGCVF()
+ gcvf._group_cache[read_memo] = 'fake block'
+ locations = {
+ ('key',): (read_memo + (None, None), None, None, None)}
+ batcher = groupcompress._BatchingBlockFetcher(gcvf, locations)
+ total_size = batcher.add_key(('key',))
+ self.assertEqual(0, total_size)
+ self.assertEqual([('key',)], batcher.keys)
+ self.assertEqual([], batcher.memos_to_get)
+
+ def test_yield_factories_empty(self):
+ """An empty batch yields no factories."""
+ batcher = groupcompress._BatchingBlockFetcher(StubGCVF(), {})
+ self.assertEqual([], list(batcher.yield_factories()))
+
+ def test_yield_factories_calls_get_blocks(self):
+ """Uncached memos are retrieved via get_blocks."""
+ read_memo1 = ('fake index', 100, 50)
+ read_memo2 = ('fake index', 150, 40)
+ gcvf = StubGCVF(
+ canned_get_blocks=[
+ (read_memo1, groupcompress.GroupCompressBlock()),
+ (read_memo2, groupcompress.GroupCompressBlock())])
+ locations = {
+ ('key1',): (read_memo1 + (None, None), None, None, None),
+ ('key2',): (read_memo2 + (None, None), None, None, None)}
+ batcher = groupcompress._BatchingBlockFetcher(gcvf, locations)
+ batcher.add_key(('key1',))
+ batcher.add_key(('key2',))
+ factories = list(batcher.yield_factories(full_flush=True))
+ self.assertLength(2, factories)
+ keys = [f.key for f in factories]
+ kinds = [f.storage_kind for f in factories]
+ self.assertEqual([('key1',), ('key2',)], keys)
+ self.assertEqual(['groupcompress-block', 'groupcompress-block'], kinds)
+
+ def test_yield_factories_flushing(self):
+ """yield_factories holds back on yielding results from the final block
+ unless passed full_flush=True.
+ """
+ fake_block = groupcompress.GroupCompressBlock()
+ read_memo = ('fake index', 100, 50)
+ gcvf = StubGCVF()
+ gcvf._group_cache[read_memo] = fake_block
+ locations = {
+ ('key',): (read_memo + (None, None), None, None, None)}
+ batcher = groupcompress._BatchingBlockFetcher(gcvf, locations)
+ batcher.add_key(('key',))
+ self.assertEqual([], list(batcher.yield_factories()))
+ factories = list(batcher.yield_factories(full_flush=True))
+ self.assertLength(1, factories)
+ self.assertEqual(('key',), factories[0].key)
+ self.assertEqual('groupcompress-block', factories[0].storage_kind)
+
+
+class TestLazyGroupCompress(tests.TestCaseWithTransport):
+
+ _texts = {
+ ('key1',): "this is a text\n"
+ "with a reasonable amount of compressible bytes\n"
+ "which can be shared between various other texts\n",
+ ('key2',): "another text\n"
+ "with a reasonable amount of compressible bytes\n"
+ "which can be shared between various other texts\n",
+ ('key3',): "yet another text which won't be extracted\n"
+ "with a reasonable amount of compressible bytes\n"
+ "which can be shared between various other texts\n",
+ ('key4',): "this will be extracted\n"
+ "but references most of its bytes from\n"
+ "yet another text which won't be extracted\n"
+ "with a reasonable amount of compressible bytes\n"
+ "which can be shared between various other texts\n",
+ }
+ def make_block(self, key_to_text):
+ """Create a GroupCompressBlock, filling it with the given texts."""
+ compressor = groupcompress.GroupCompressor()
+ start = 0
+ for key in sorted(key_to_text):
+ compressor.compress(key, key_to_text[key], None)
+ locs = dict((key, (start, end)) for key, (start, _, end, _)
+ in compressor.labels_deltas.iteritems())
+ block = compressor.flush()
+ raw_bytes = block.to_bytes()
+ return locs, groupcompress.GroupCompressBlock.from_bytes(raw_bytes)
+
+ def add_key_to_manager(self, key, locations, block, manager):
+ start, end = locations[key]
+ manager.add_factory(key, (), start, end)
+
+ def make_block_and_full_manager(self, texts):
+ locations, block = self.make_block(texts)
+ manager = groupcompress._LazyGroupContentManager(block)
+ for key in sorted(texts):
+ self.add_key_to_manager(key, locations, block, manager)
+ return block, manager
+
+ def test_get_fulltexts(self):
+ locations, block = self.make_block(self._texts)
+ manager = groupcompress._LazyGroupContentManager(block)
+ self.add_key_to_manager(('key1',), locations, block, manager)
+ self.add_key_to_manager(('key2',), locations, block, manager)
+ result_order = []
+ for record in manager.get_record_stream():
+ result_order.append(record.key)
+ text = self._texts[record.key]
+ self.assertEqual(text, record.get_bytes_as('fulltext'))
+ self.assertEqual([('key1',), ('key2',)], result_order)
+
+ # If we build the manager in the opposite order, we should get them
+ # back in the opposite order
+ manager = groupcompress._LazyGroupContentManager(block)
+ self.add_key_to_manager(('key2',), locations, block, manager)
+ self.add_key_to_manager(('key1',), locations, block, manager)
+ result_order = []
+ for record in manager.get_record_stream():
+ result_order.append(record.key)
+ text = self._texts[record.key]
+ self.assertEqual(text, record.get_bytes_as('fulltext'))
+ self.assertEqual([('key2',), ('key1',)], result_order)
+
+ def test__wire_bytes_no_keys(self):
+ locations, block = self.make_block(self._texts)
+ manager = groupcompress._LazyGroupContentManager(block)
+ wire_bytes = manager._wire_bytes()
+ block_length = len(block.to_bytes())
+ # We should have triggered a strip, since we aren't using any content
+ stripped_block = manager._block.to_bytes()
+ self.assertTrue(block_length > len(stripped_block))
+ empty_z_header = zlib.compress('')
+ self.assertEqual('groupcompress-block\n'
+ '8\n' # len(compress(''))
+ '0\n' # len('')
+ '%d\n'# compressed block len
+ '%s' # zheader
+ '%s' # block
+ % (len(stripped_block), empty_z_header,
+ stripped_block),
+ wire_bytes)
+
+ def test__wire_bytes(self):
+ locations, block = self.make_block(self._texts)
+ manager = groupcompress._LazyGroupContentManager(block)
+ self.add_key_to_manager(('key1',), locations, block, manager)
+ self.add_key_to_manager(('key4',), locations, block, manager)
+ block_bytes = block.to_bytes()
+ wire_bytes = manager._wire_bytes()
+ (storage_kind, z_header_len, header_len,
+ block_len, rest) = wire_bytes.split('\n', 4)
+ z_header_len = int(z_header_len)
+ header_len = int(header_len)
+ block_len = int(block_len)
+ self.assertEqual('groupcompress-block', storage_kind)
+ self.assertEqual(34, z_header_len)
+ self.assertEqual(26, header_len)
+ self.assertEqual(len(block_bytes), block_len)
+ z_header = rest[:z_header_len]
+ header = zlib.decompress(z_header)
+ self.assertEqual(header_len, len(header))
+ entry1 = locations[('key1',)]
+ entry4 = locations[('key4',)]
+ self.assertEqualDiff('key1\n'
+ '\n' # no parents
+ '%d\n' # start offset
+ '%d\n' # end offset
+ 'key4\n'
+ '\n'
+ '%d\n'
+ '%d\n'
+ % (entry1[0], entry1[1],
+ entry4[0], entry4[1]),
+ header)
+ z_block = rest[z_header_len:]
+ self.assertEqual(block_bytes, z_block)
+
+ def test_from_bytes(self):
+ locations, block = self.make_block(self._texts)
+ manager = groupcompress._LazyGroupContentManager(block)
+ self.add_key_to_manager(('key1',), locations, block, manager)
+ self.add_key_to_manager(('key4',), locations, block, manager)
+ wire_bytes = manager._wire_bytes()
+ self.assertStartsWith(wire_bytes, 'groupcompress-block\n')
+ manager = groupcompress._LazyGroupContentManager.from_bytes(wire_bytes)
+ self.assertIsInstance(manager, groupcompress._LazyGroupContentManager)
+ self.assertEqual(2, len(manager._factories))
+ self.assertEqual(block._z_content, manager._block._z_content)
+ result_order = []
+ for record in manager.get_record_stream():
+ result_order.append(record.key)
+ text = self._texts[record.key]
+ self.assertEqual(text, record.get_bytes_as('fulltext'))
+ self.assertEqual([('key1',), ('key4',)], result_order)
+
+ def test__check_rebuild_no_changes(self):
+ block, manager = self.make_block_and_full_manager(self._texts)
+ manager._check_rebuild_block()
+ self.assertIs(block, manager._block)
+
+ def test__check_rebuild_only_one(self):
+ locations, block = self.make_block(self._texts)
+ manager = groupcompress._LazyGroupContentManager(block)
+ # Request just the first key, which should trigger a 'strip' action
+ self.add_key_to_manager(('key1',), locations, block, manager)
+ manager._check_rebuild_block()
+ self.assertIsNot(block, manager._block)
+ self.assertTrue(block._content_length > manager._block._content_length)
+ # We should be able to still get the content out of this block, though
+ # it should only have 1 entry
+ for record in manager.get_record_stream():
+ self.assertEqual(('key1',), record.key)
+ self.assertEqual(self._texts[record.key],
+ record.get_bytes_as('fulltext'))
+
+ def test__check_rebuild_middle(self):
+ locations, block = self.make_block(self._texts)
+ manager = groupcompress._LazyGroupContentManager(block)
+ # Request a small key in the middle should trigger a 'rebuild'
+ self.add_key_to_manager(('key4',), locations, block, manager)
+ manager._check_rebuild_block()
+ self.assertIsNot(block, manager._block)
+ self.assertTrue(block._content_length > manager._block._content_length)
+ for record in manager.get_record_stream():
+ self.assertEqual(('key4',), record.key)
+ self.assertEqual(self._texts[record.key],
+ record.get_bytes_as('fulltext'))
+
+ def test_manager_default_compressor_settings(self):
+ locations, old_block = self.make_block(self._texts)
+ manager = groupcompress._LazyGroupContentManager(old_block)
+ gcvf = groupcompress.GroupCompressVersionedFiles
+ # It doesn't greedily evaluate _max_bytes_to_index
+ self.assertIs(None, manager._compressor_settings)
+ self.assertEqual(gcvf._DEFAULT_COMPRESSOR_SETTINGS,
+ manager._get_compressor_settings())
+
+ def test_manager_custom_compressor_settings(self):
+ locations, old_block = self.make_block(self._texts)
+ called = []
+ def compressor_settings():
+ called.append('called')
+ return (10,)
+ manager = groupcompress._LazyGroupContentManager(old_block,
+ get_compressor_settings=compressor_settings)
+ gcvf = groupcompress.GroupCompressVersionedFiles
+ # It doesn't greedily evaluate compressor_settings
+ self.assertIs(None, manager._compressor_settings)
+ self.assertEqual((10,), manager._get_compressor_settings())
+ self.assertEqual((10,), manager._get_compressor_settings())
+ self.assertEqual((10,), manager._compressor_settings)
+ # Only called 1 time
+ self.assertEqual(['called'], called)
+
+ def test__rebuild_handles_compressor_settings(self):
+ if not isinstance(groupcompress.GroupCompressor,
+ groupcompress.PyrexGroupCompressor):
+ raise tests.TestNotApplicable('pure-python compressor'
+ ' does not handle compressor_settings')
+ locations, old_block = self.make_block(self._texts)
+ manager = groupcompress._LazyGroupContentManager(old_block,
+ get_compressor_settings=lambda: dict(max_bytes_to_index=32))
+ gc = manager._make_group_compressor()
+ self.assertEqual(32, gc._delta_index._max_bytes_to_index)
+ self.add_key_to_manager(('key3',), locations, old_block, manager)
+ self.add_key_to_manager(('key4',), locations, old_block, manager)
+ action, last_byte, total_bytes = manager._check_rebuild_action()
+ self.assertEqual('rebuild', action)
+ manager._rebuild_block()
+ new_block = manager._block
+ self.assertIsNot(old_block, new_block)
+ # Because of the new max_bytes_to_index, we do a poor job of
+ # rebuilding. This is a side-effect of the change, but at least it does
+ # show the setting had an effect.
+ self.assertTrue(old_block._content_length < new_block._content_length)
+
+ def test_check_is_well_utilized_all_keys(self):
+ block, manager = self.make_block_and_full_manager(self._texts)
+ self.assertFalse(manager.check_is_well_utilized())
+ # Though we can fake it by changing the recommended minimum size
+ manager._full_enough_block_size = block._content_length
+ self.assertTrue(manager.check_is_well_utilized())
+ # Setting it just above causes it to fail
+ manager._full_enough_block_size = block._content_length + 1
+ self.assertFalse(manager.check_is_well_utilized())
+ # Setting the mixed-block size doesn't do anything, because the content
+ # is considered to not be 'mixed'
+ manager._full_enough_mixed_block_size = block._content_length
+ self.assertFalse(manager.check_is_well_utilized())
+
+ def test_check_is_well_utilized_mixed_keys(self):
+ texts = {}
+ f1k1 = ('f1', 'k1')
+ f1k2 = ('f1', 'k2')
+ f2k1 = ('f2', 'k1')
+ f2k2 = ('f2', 'k2')
+ texts[f1k1] = self._texts[('key1',)]
+ texts[f1k2] = self._texts[('key2',)]
+ texts[f2k1] = self._texts[('key3',)]
+ texts[f2k2] = self._texts[('key4',)]
+ block, manager = self.make_block_and_full_manager(texts)
+ self.assertFalse(manager.check_is_well_utilized())
+ manager._full_enough_block_size = block._content_length
+ self.assertTrue(manager.check_is_well_utilized())
+ manager._full_enough_block_size = block._content_length + 1
+ self.assertFalse(manager.check_is_well_utilized())
+ manager._full_enough_mixed_block_size = block._content_length
+ self.assertTrue(manager.check_is_well_utilized())
+
+ def test_check_is_well_utilized_partial_use(self):
+ locations, block = self.make_block(self._texts)
+ manager = groupcompress._LazyGroupContentManager(block)
+ manager._full_enough_block_size = block._content_length
+ self.add_key_to_manager(('key1',), locations, block, manager)
+ self.add_key_to_manager(('key2',), locations, block, manager)
+ # Just using the content from key1 and 2 is not enough to be considered
+ # 'complete'
+ self.assertFalse(manager.check_is_well_utilized())
+ # However if we add key3, then we have enough, as we only require 75%
+ # consumption
+ self.add_key_to_manager(('key4',), locations, block, manager)
+ self.assertTrue(manager.check_is_well_utilized())
+
+
+class Test_GCBuildDetails(tests.TestCase):
+
+ def test_acts_like_tuple(self):
+ # _GCBuildDetails inlines some of the data that used to be spread out
+ # across a bunch of tuples
+ bd = groupcompress._GCBuildDetails((('parent1',), ('parent2',)),
+ ('INDEX', 10, 20, 0, 5))
+ self.assertEqual(4, len(bd))
+ self.assertEqual(('INDEX', 10, 20, 0, 5), bd[0])
+ self.assertEqual(None, bd[1]) # Compression Parent is always None
+ self.assertEqual((('parent1',), ('parent2',)), bd[2])
+ self.assertEqual(('group', None), bd[3]) # Record details
+
+ def test__repr__(self):
+ bd = groupcompress._GCBuildDetails((('parent1',), ('parent2',)),
+ ('INDEX', 10, 20, 0, 5))
+ self.assertEqual("_GCBuildDetails(('INDEX', 10, 20, 0, 5),"
+ " (('parent1',), ('parent2',)))",
+ repr(bd))
+
diff --git a/bzrlib/tests/test_hashcache.py b/bzrlib/tests/test_hashcache.py
new file mode 100644
index 0000000..9719ed9
--- /dev/null
+++ b/bzrlib/tests/test_hashcache.py
@@ -0,0 +1,211 @@
+# Copyright (C) 2005-2009, 2011 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+import os
+import stat
+import time
+
+from bzrlib import osutils
+from bzrlib.errors import BzrError
+from bzrlib.hashcache import HashCache
+from bzrlib.tests import (
+ TestCaseInTempDir,
+ )
+from bzrlib.tests.features import (
+ OsFifoFeature,
+ )
+
+
+sha1 = osutils.sha_string
+
+
+def pause():
+ time.sleep(5.0)
+
+
+class TestHashCache(TestCaseInTempDir):
+ """Test the hashcache against a real directory"""
+
+ def make_hashcache(self):
+ # make a dummy bzr directory just to hold the cache
+ os.mkdir('.bzr')
+ hc = HashCache('.', '.bzr/stat-cache')
+ return hc
+
+ def reopen_hashcache(self):
+ hc = HashCache('.', '.bzr/stat-cache')
+ hc.read()
+ return hc
+
+ def test_hashcache_initial_miss(self):
+ """Get correct hash from an empty hashcache"""
+ hc = self.make_hashcache()
+ self.build_tree_contents([('foo', 'hello')])
+ self.assertEquals(hc.get_sha1('foo'),
+ 'aaf4c61ddcc5e8a2dabede0f3b482cd9aea9434d')
+ self.assertEquals(hc.miss_count, 1)
+ self.assertEquals(hc.hit_count, 0)
+
+ def test_hashcache_new_file(self):
+ hc = self.make_hashcache()
+ self.build_tree_contents([('foo', 'goodbye')])
+ # now read without pausing; it may not be possible to cache it as its
+ # so new
+ self.assertEquals(hc.get_sha1('foo'), sha1('goodbye'))
+
+ def test_hashcache_nonexistent_file(self):
+ hc = self.make_hashcache()
+ self.assertEquals(hc.get_sha1('no-name-yet'), None)
+
+ def test_hashcache_replaced_file(self):
+ hc = self.make_hashcache()
+ self.build_tree_contents([('foo', 'goodbye')])
+ self.assertEquals(hc.get_sha1('foo'), sha1('goodbye'))
+ os.remove('foo')
+ self.assertEquals(hc.get_sha1('foo'), None)
+ self.build_tree_contents([('foo', 'new content')])
+ self.assertEquals(hc.get_sha1('foo'), sha1('new content'))
+
+ def test_hashcache_not_file(self):
+ hc = self.make_hashcache()
+ self.build_tree(['subdir/'])
+ self.assertEquals(hc.get_sha1('subdir'), None)
+
+ def test_hashcache_load(self):
+ hc = self.make_hashcache()
+ self.build_tree_contents([('foo', 'contents')])
+ pause()
+ self.assertEquals(hc.get_sha1('foo'), sha1('contents'))
+ hc.write()
+ hc = self.reopen_hashcache()
+ self.assertEquals(hc.get_sha1('foo'), sha1('contents'))
+ self.assertEquals(hc.hit_count, 1)
+
+ def test_hammer_hashcache(self):
+ hc = self.make_hashcache()
+ for i in xrange(10000):
+ self.log('start writing at %s', time.time())
+ f = file('foo', 'w')
+ try:
+ last_content = '%08x' % i
+ f.write(last_content)
+ finally:
+ f.close()
+ last_sha1 = sha1(last_content)
+ self.log("iteration %d: %r -> %r",
+ i, last_content, last_sha1)
+ got_sha1 = hc.get_sha1('foo')
+ self.assertEquals(got_sha1, last_sha1)
+ hc.write()
+ hc = self.reopen_hashcache()
+
+ def test_hashcache_raise(self):
+ """check that hashcache can raise BzrError"""
+ self.requireFeature(OsFifoFeature)
+ hc = self.make_hashcache()
+ os.mkfifo('a')
+ # It's possible that the system supports fifos but the filesystem
+ # can't. In that case we should skip at this point. But in fact
+ # such combinations don't usually occur for the filesystem where
+ # people test bzr.
+ self.assertRaises(BzrError, hc.get_sha1, 'a')
+
+
+class FakeHashCache(HashCache):
+ """Hashcache that consults a fake clock rather than the real one.
+
+ This lets us examine how old or new files would be handled, without
+ actually having to wait for time to pass.
+ """
+ def __init__(self):
+ # set root and cache file name to none to make sure we won't touch the
+ # real filesystem
+ HashCache.__init__(self, '.', 'hashcache')
+ self._files = {}
+ # simulated clock running forward as operations happen
+ self._clock = 0
+
+ def put_file(self, filename, file_contents):
+ abspath = './' + filename
+ self._files[abspath] = (file_contents, self._clock)
+
+ def _fingerprint(self, abspath, fs=None):
+ entry = self._files[abspath]
+ return (len(entry[0]),
+ entry[1], entry[1],
+ 10, 20,
+ stat.S_IFREG | 0600)
+
+ def _really_sha1_file(self, abspath, filters):
+ if abspath in self._files:
+ return sha1(self._files[abspath][0])
+ else:
+ return None
+
+ def _cutoff_time(self):
+ return self._clock - 2
+
+ def pretend_to_sleep(self, secs):
+ self._clock += secs
+
+
+class TestHashCacheFakeFilesystem(TestCaseInTempDir):
+ """Tests the hashcache using a simulated OS.
+ """
+
+ def make_hashcache(self):
+ return FakeHashCache()
+
+ def test_hashcache_miss_new_file(self):
+ """A new file gives the right sha1 but misses"""
+ hc = self.make_hashcache()
+ hc.put_file('foo', 'hello')
+ self.assertEquals(hc.get_sha1('foo'), sha1('hello'))
+ self.assertEquals(hc.miss_count, 1)
+ self.assertEquals(hc.hit_count, 0)
+ # if we try again it's still too new;
+ self.assertEquals(hc.get_sha1('foo'), sha1('hello'))
+ self.assertEquals(hc.miss_count, 2)
+ self.assertEquals(hc.hit_count, 0)
+
+ def test_hashcache_old_file(self):
+ """An old file gives the right sha1 and hits"""
+ hc = self.make_hashcache()
+ hc.put_file('foo', 'hello')
+ hc.pretend_to_sleep(20)
+ # file is new; should get the correct hash but miss
+ self.assertEquals(hc.get_sha1('foo'), sha1('hello'))
+ self.assertEquals(hc.miss_count, 1)
+ self.assertEquals(hc.hit_count, 0)
+ # and can now be hit
+ self.assertEquals(hc.get_sha1('foo'), sha1('hello'))
+ self.assertEquals(hc.miss_count, 1)
+ self.assertEquals(hc.hit_count, 1)
+ hc.pretend_to_sleep(3)
+ # and again
+ self.assertEquals(hc.get_sha1('foo'), sha1('hello'))
+ self.assertEquals(hc.miss_count, 1)
+ self.assertEquals(hc.hit_count, 2)
+
+ def test_hashcache_invalidates(self):
+ hc = self.make_hashcache()
+ hc.put_file('foo', 'hello')
+ hc.pretend_to_sleep(20)
+ hc.get_sha1('foo')
+ hc.put_file('foo', 'h1llo')
+ self.assertEquals(hc.get_sha1('foo'), sha1('h1llo'))
+ self.assertEquals(hc.miss_count, 2)
+ self.assertEquals(hc.hit_count, 0)
diff --git a/bzrlib/tests/test_help.py b/bzrlib/tests/test_help.py
new file mode 100644
index 0000000..f4f747b
--- /dev/null
+++ b/bzrlib/tests/test_help.py
@@ -0,0 +1,713 @@
+# Copyright (C) 2007-2011 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""Unit tests for the bzrlib.help module."""
+
+import textwrap
+
+from bzrlib import (
+ builtins,
+ commands,
+ config,
+ errors,
+ help,
+ help_topics,
+ i18n,
+ plugin,
+ tests,
+ )
+
+from bzrlib.tests.test_i18n import ZzzTranslations
+import re
+
+
+class TestCommandHelp(tests.TestCase):
+ """Tests for help on commands."""
+
+ def assertCmdHelp(self, expected, cmd):
+ self.assertEqualDiff(textwrap.dedent(expected), cmd.get_help_text())
+
+ def test_command_help_includes_see_also(self):
+ class cmd_WithSeeAlso(commands.Command):
+ __doc__ = """A sample command."""
+ _see_also = ['foo', 'bar']
+ self.assertCmdHelp('''\
+Purpose: A sample command.
+Usage: bzr WithSeeAlso
+
+Options:
+ --usage Show usage message and options.
+ -q, --quiet Only display errors and warnings.
+ -v, --verbose Display more information.
+ -h, --help Show help message.
+
+See also: bar, foo
+''',
+ cmd_WithSeeAlso())
+
+ def test_get_help_text(self):
+ """Commands have a get_help_text method which returns their help."""
+ class cmd_Demo(commands.Command):
+ __doc__ = """A sample command."""
+ self.assertCmdHelp('''\
+Purpose: A sample command.
+Usage: bzr Demo
+
+Options:
+ --usage Show usage message and options.
+ -q, --quiet Only display errors and warnings.
+ -v, --verbose Display more information.
+ -h, --help Show help message.
+
+''',
+ cmd_Demo())
+ cmd = cmd_Demo()
+ helptext = cmd.get_help_text()
+ self.assertStartsWith(helptext,
+ 'Purpose: A sample command.\n'
+ 'Usage: bzr Demo')
+ self.assertEndsWith(helptext,
+ ' -h, --help Show help message.\n\n')
+
+ def test_command_with_additional_see_also(self):
+ class cmd_WithSeeAlso(commands.Command):
+ __doc__ = """A sample command."""
+ _see_also = ['foo', 'bar']
+ cmd = cmd_WithSeeAlso()
+ helptext = cmd.get_help_text(['gam'])
+ self.assertEndsWith(
+ helptext,
+ ' -q, --quiet Only display errors and warnings.\n'
+ ' -v, --verbose Display more information.\n'
+ ' -h, --help Show help message.\n'
+ '\n'
+ 'See also: bar, foo, gam\n')
+
+ def test_command_only_additional_see_also(self):
+ class cmd_WithSeeAlso(commands.Command):
+ __doc__ = """A sample command."""
+ cmd = cmd_WithSeeAlso()
+ helptext = cmd.get_help_text(['gam'])
+ self.assertEndsWith(
+ helptext,
+ ' -q, --quiet Only display errors and warnings.\n'
+ ' -v, --verbose Display more information.\n'
+ ' -h, --help Show help message.\n'
+ '\n'
+ 'See also: gam\n')
+
+ def test_get_help_topic(self):
+ """The help topic for a Command is its name()."""
+ class cmd_foo_bar(commands.Command):
+ __doc__ = """A sample command."""
+ cmd = cmd_foo_bar()
+ self.assertEqual(cmd.name(), cmd.get_help_topic())
+
+ def test_formatted_help_text(self):
+ """Help text should be plain text by default."""
+ class cmd_Demo(commands.Command):
+ __doc__ = """A sample command.
+
+ :Examples:
+ Example 1::
+
+ cmd arg1
+
+ Example 2::
+
+ cmd arg2
+
+ A code block follows.
+
+ ::
+
+ bzr Demo something
+ """
+ cmd = cmd_Demo()
+ helptext = cmd.get_help_text()
+ self.assertEqualDiff('''\
+Purpose: A sample command.
+Usage: bzr Demo
+
+Options:
+ --usage Show usage message and options.
+ -q, --quiet Only display errors and warnings.
+ -v, --verbose Display more information.
+ -h, --help Show help message.
+
+Examples:
+ Example 1:
+
+ cmd arg1
+
+ Example 2:
+
+ cmd arg2
+
+ A code block follows.
+
+ bzr Demo something
+
+''',
+ helptext)
+ helptext = cmd.get_help_text(plain=False)
+ self.assertEqualDiff('''\
+:Purpose: A sample command.
+:Usage: bzr Demo
+
+:Options:
+ --usage Show usage message and options.
+ -q, --quiet Only display errors and warnings.
+ -v, --verbose Display more information.
+ -h, --help Show help message.
+
+:Examples:
+ Example 1::
+
+ cmd arg1
+
+ Example 2::
+
+ cmd arg2
+
+ A code block follows.
+
+ ::
+
+ bzr Demo something
+
+''',
+ helptext)
+
+ def test_concise_help_text(self):
+ """Concise help text excludes the descriptive sections."""
+ class cmd_Demo(commands.Command):
+ __doc__ = """A sample command.
+
+ Blah blah blah.
+
+ :Examples:
+ Example 1::
+
+ cmd arg1
+ """
+ cmd = cmd_Demo()
+ helptext = cmd.get_help_text()
+ self.assertEqualDiff('''\
+Purpose: A sample command.
+Usage: bzr Demo
+
+Options:
+ --usage Show usage message and options.
+ -q, --quiet Only display errors and warnings.
+ -v, --verbose Display more information.
+ -h, --help Show help message.
+
+Description:
+ Blah blah blah.
+
+Examples:
+ Example 1:
+
+ cmd arg1
+
+''',
+ helptext)
+ helptext = cmd.get_help_text(verbose=False)
+ self.assertEqualDiff('''\
+Purpose: A sample command.
+Usage: bzr Demo
+
+Options:
+ --usage Show usage message and options.
+ -q, --quiet Only display errors and warnings.
+ -v, --verbose Display more information.
+ -h, --help Show help message.
+
+See bzr help Demo for more details and examples.
+
+''',
+ helptext)
+
+ def test_help_custom_section_ordering(self):
+ """Custom descriptive sections should remain in the order given."""
+ class cmd_Demo(commands.Command):
+ __doc__ = """\
+A sample command.
+
+Blah blah blah.
+
+:Formats:
+ Interesting stuff about formats.
+
+:Examples:
+ Example 1::
+
+ cmd arg1
+
+:Tips:
+ Clever things to keep in mind.
+"""
+ cmd = cmd_Demo()
+ helptext = cmd.get_help_text()
+ self.assertEqualDiff('''\
+Purpose: A sample command.
+Usage: bzr Demo
+
+Options:
+ --usage Show usage message and options.
+ -q, --quiet Only display errors and warnings.
+ -v, --verbose Display more information.
+ -h, --help Show help message.
+
+Description:
+ Blah blah blah.
+
+Formats:
+ Interesting stuff about formats.
+
+Examples:
+ Example 1:
+
+ cmd arg1
+
+Tips:
+ Clever things to keep in mind.
+
+''',
+ helptext)
+
+ def test_help_text_custom_usage(self):
+ """Help text may contain a custom usage section."""
+ class cmd_Demo(commands.Command):
+ __doc__ = """A sample command.
+
+ :Usage:
+ cmd Demo [opts] args
+
+ cmd Demo -h
+
+ Blah blah blah.
+ """
+ cmd = cmd_Demo()
+ helptext = cmd.get_help_text()
+ self.assertEqualDiff('''\
+Purpose: A sample command.
+Usage:
+ cmd Demo [opts] args
+
+ cmd Demo -h
+
+
+Options:
+ --usage Show usage message and options.
+ -q, --quiet Only display errors and warnings.
+ -v, --verbose Display more information.
+ -h, --help Show help message.
+
+Description:
+ Blah blah blah.
+
+''',
+ helptext)
+
+
+class ZzzTranslationsForDoc(ZzzTranslations):
+
+ _section_pat = re.compile(':\w+:\\n\\s+')
+ _indent_pat = re.compile('\\s+')
+
+ def zzz(self, s):
+ m = self._section_pat.match(s)
+ if m is None:
+ m = self._indent_pat.match(s)
+ if m:
+ return u'%szz{{%s}}' % (m.group(0), s[m.end():])
+ return u'zz{{%s}}' % s
+
+
+class TestCommandHelpI18n(tests.TestCase):
+ """Tests for help on translated commands."""
+
+ def setUp(self):
+ super(TestCommandHelpI18n, self).setUp()
+ self.overrideAttr(i18n, '_translations', ZzzTranslationsForDoc())
+
+ def assertCmdHelp(self, expected, cmd):
+ self.assertEqualDiff(textwrap.dedent(expected), cmd.get_help_text())
+
+ def test_command_help_includes_see_also(self):
+ class cmd_WithSeeAlso(commands.Command):
+ __doc__ = """A sample command."""
+ _see_also = ['foo', 'bar']
+ self.assertCmdHelp('''\
+zz{{:Purpose: zz{{A sample command.}}
+}}zz{{:Usage: bzr WithSeeAlso
+}}
+zz{{:Options:
+ --usage zz{{Show usage message and options.}}
+ -q, --quiet zz{{Only display errors and warnings.}}
+ -v, --verbose zz{{Display more information.}}
+ -h, --help zz{{Show help message.}}
+}}
+zz{{:See also: bar, foo}}
+''',
+ cmd_WithSeeAlso())
+
+ def test_get_help_text(self):
+ """Commands have a get_help_text method which returns their help."""
+ class cmd_Demo(commands.Command):
+ __doc__ = """A sample command."""
+ self.assertCmdHelp('''\
+zz{{:Purpose: zz{{A sample command.}}
+}}zz{{:Usage: bzr Demo
+}}
+zz{{:Options:
+ --usage zz{{Show usage message and options.}}
+ -q, --quiet zz{{Only display errors and warnings.}}
+ -v, --verbose zz{{Display more information.}}
+ -h, --help zz{{Show help message.}}
+}}
+''',
+ cmd_Demo())
+
+ def test_command_with_additional_see_also(self):
+ class cmd_WithSeeAlso(commands.Command):
+ __doc__ = """A sample command."""
+ _see_also = ['foo', 'bar']
+ cmd = cmd_WithSeeAlso()
+ helptext = cmd.get_help_text(['gam'])
+ self.assertEndsWith(
+ helptext,'''\
+ -q, --quiet zz{{Only display errors and warnings.}}
+ -v, --verbose zz{{Display more information.}}
+ -h, --help zz{{Show help message.}}
+}}
+zz{{:See also: bar, foo, gam}}
+''')
+
+ def test_command_only_additional_see_also(self):
+ class cmd_WithSeeAlso(commands.Command):
+ __doc__ = """A sample command."""
+ cmd = cmd_WithSeeAlso()
+ helptext = cmd.get_help_text(['gam'])
+ self.assertEndsWith(
+ helptext, '''\
+zz{{:Options:
+ --usage zz{{Show usage message and options.}}
+ -q, --quiet zz{{Only display errors and warnings.}}
+ -v, --verbose zz{{Display more information.}}
+ -h, --help zz{{Show help message.}}
+}}
+zz{{:See also: gam}}
+''')
+
+
+ def test_help_custom_section_ordering(self):
+ """Custom descriptive sections should remain in the order given."""
+ # The help formatter expect the class name to start with 'cmd_'
+ class cmd_Demo(commands.Command):
+ __doc__ = """A sample command.
+
+ Blah blah blah.
+
+ :Formats:
+ Interesting stuff about formats.
+
+ :Examples:
+ Example 1::
+
+ cmd arg1
+
+ :Tips:
+ Clever things to keep in mind.
+ """
+ self.assertCmdHelp('''\
+zz{{:Purpose: zz{{A sample command.}}
+}}zz{{:Usage: bzr Demo
+}}
+zz{{:Options:
+ --usage zz{{Show usage message and options.}}
+ -q, --quiet zz{{Only display errors and warnings.}}
+ -v, --verbose zz{{Display more information.}}
+ -h, --help zz{{Show help message.}}
+}}
+Description:
+ zz{{zz{{Blah blah blah.}}
+
+}}:Formats:
+ zz{{Interesting stuff about formats.}}
+
+Examples:
+ zz{{Example 1::}}
+
+ zz{{cmd arg1}}
+
+Tips:
+ zz{{Clever things to keep in mind.}}
+
+''',
+ cmd_Demo())
+
+ def test_help_text_custom_usage(self):
+ """Help text may contain a custom usage section."""
+ class cmd_Demo(commands.Command):
+ __doc__ = """A sample command.
+
+ :Usage:
+ cmd Demo [opts] args
+
+ cmd Demo -h
+
+ Blah blah blah.
+ """
+ self.assertCmdHelp('''\
+zz{{:Purpose: zz{{A sample command.}}
+}}zz{{:Usage:
+ zz{{cmd Demo [opts] args}}
+
+ zz{{cmd Demo -h}}
+
+}}
+zz{{:Options:
+ --usage zz{{Show usage message and options.}}
+ -q, --quiet zz{{Only display errors and warnings.}}
+ -v, --verbose zz{{Display more information.}}
+ -h, --help zz{{Show help message.}}
+}}
+Description:
+ zz{{zz{{Blah blah blah.}}
+
+}}
+''',
+ cmd_Demo())
+
+
+class TestHelp(tests.TestCase):
+
+ def setUp(self):
+ tests.TestCase.setUp(self)
+ commands.install_bzr_command_hooks()
+
+
+class TestRegisteredTopic(TestHelp):
+ """Tests for the RegisteredTopic class."""
+
+ def test_contruct(self):
+ """Construction takes the help topic name for the registered item."""
+ # validate our test
+ self.assertTrue('basic' in help_topics.topic_registry)
+ topic = help_topics.RegisteredTopic('basic')
+ self.assertEqual('basic', topic.topic)
+
+ def test_get_help_text(self):
+ """RegisteredTopic returns the get_detail results for get_help_text."""
+ topic = help_topics.RegisteredTopic('commands')
+ self.assertEqual(help_topics.topic_registry.get_detail('commands'),
+ topic.get_help_text())
+
+ def test_get_help_text_with_additional_see_also(self):
+ topic = help_topics.RegisteredTopic('commands')
+ self.assertEndsWith(
+ topic.get_help_text(['foo', 'bar']),
+ '\n'
+ 'See also: bar, foo\n')
+
+ def test_get_help_text_loaded_from_file(self):
+ # Pick a known topic stored in an external file
+ topic = help_topics.RegisteredTopic('authentication')
+ self.assertStartsWith(topic.get_help_text(),
+ 'Authentication Settings\n'
+ '=======================\n'
+ '\n')
+
+ def test_get_help_topic(self):
+ """The help topic for RegisteredTopic is its topic from construction."""
+ topic = help_topics.RegisteredTopic('foobar')
+ self.assertEqual('foobar', topic.get_help_topic())
+ topic = help_topics.RegisteredTopic('baz')
+ self.assertEqual('baz', topic.get_help_topic())
+
+
+class TestTopicIndex(TestHelp):
+ """Tests for the HelpTopicIndex class."""
+
+ def test_default_constructable(self):
+ index = help_topics.HelpTopicIndex()
+
+ def test_get_topics_None(self):
+ """Searching for None returns the basic help topic."""
+ index = help_topics.HelpTopicIndex()
+ topics = index.get_topics(None)
+ self.assertEqual(1, len(topics))
+ self.assertIsInstance(topics[0], help_topics.RegisteredTopic)
+ self.assertEqual('basic', topics[0].topic)
+
+ def test_get_topics_topics(self):
+ """Searching for a string returns the matching string."""
+ index = help_topics.HelpTopicIndex()
+ topics = index.get_topics('topics')
+ self.assertEqual(1, len(topics))
+ self.assertIsInstance(topics[0], help_topics.RegisteredTopic)
+ self.assertEqual('topics', topics[0].topic)
+
+ def test_get_topics_no_topic(self):
+ """Searching for something not registered returns []."""
+ index = help_topics.HelpTopicIndex()
+ self.assertEqual([], index.get_topics('nothing by this name'))
+
+ def test_prefix(self):
+ """TopicIndex has a prefix of ''."""
+ index = help_topics.HelpTopicIndex()
+ self.assertEqual('', index.prefix)
+
+
+class TestConfigOptionIndex(TestHelp):
+ """Tests for the HelpCommandIndex class."""
+
+ def setUp(self):
+ super(TestConfigOptionIndex, self).setUp()
+ self.index = help_topics.ConfigOptionHelpIndex()
+
+ def test_get_topics_None(self):
+ """Searching for None returns an empty list."""
+ self.assertEqual([], self.index.get_topics(None))
+
+ def test_get_topics_no_topic(self):
+ self.assertEqual([], self.index.get_topics('nothing by this name'))
+
+ def test_prefix(self):
+ self.assertEqual('configuration/', self.index.prefix)
+
+ def test_get_topic_with_prefix(self):
+ topics = self.index.get_topics('configuration/default_format')
+ self.assertLength(1, topics)
+ opt = topics[0]
+ self.assertIsInstance(opt, config.Option)
+ self.assertEquals('default_format', opt.name)
+
+
+class TestCommandIndex(TestHelp):
+ """Tests for the HelpCommandIndex class."""
+
+ def test_default_constructable(self):
+ index = commands.HelpCommandIndex()
+
+ def test_get_topics_None(self):
+ """Searching for None returns an empty list."""
+ index = commands.HelpCommandIndex()
+ self.assertEqual([], index.get_topics(None))
+
+ def test_get_topics_rocks(self):
+ """Searching for 'rocks' returns the cmd_rocks command instance."""
+ index = commands.HelpCommandIndex()
+ topics = index.get_topics('rocks')
+ self.assertEqual(1, len(topics))
+ self.assertIsInstance(topics[0], builtins.cmd_rocks)
+
+ def test_get_topics_no_topic(self):
+ """Searching for something that is not a command returns []."""
+ index = commands.HelpCommandIndex()
+ self.assertEqual([], index.get_topics('nothing by this name'))
+
+ def test_prefix(self):
+ """CommandIndex has a prefix of 'commands/'."""
+ index = commands.HelpCommandIndex()
+ self.assertEqual('commands/', index.prefix)
+
+ def test_get_topic_with_prefix(self):
+ """Searching for commands/rocks returns the rocks command object."""
+ index = commands.HelpCommandIndex()
+ topics = index.get_topics('commands/rocks')
+ self.assertEqual(1, len(topics))
+ self.assertIsInstance(topics[0], builtins.cmd_rocks)
+
+
+class TestHelpIndices(tests.TestCase):
+ """Tests for the HelpIndices class."""
+
+ def test_default_search_path(self):
+ """The default search path should include internal indexs."""
+ indices = help.HelpIndices()
+ self.assertEqual(4, len(indices.search_path))
+ # help topics should be searched in first.
+ self.assertIsInstance(indices.search_path[0],
+ help_topics.HelpTopicIndex)
+ # with commands being search second.
+ self.assertIsInstance(indices.search_path[1],
+ commands.HelpCommandIndex)
+ # plugins are a third index.
+ self.assertIsInstance(indices.search_path[2],
+ plugin.PluginsHelpIndex)
+ # config options are a fourth index
+ self.assertIsInstance(indices.search_path[3],
+ help_topics.ConfigOptionHelpIndex)
+
+ def test_search_for_unknown_topic_raises(self):
+ """Searching for an unknown topic should raise NoHelpTopic."""
+ indices = help.HelpIndices()
+ indices.search_path = []
+ error = self.assertRaises(errors.NoHelpTopic, indices.search, 'foo')
+ self.assertEqual('foo', error.topic)
+
+ def test_search_calls_get_topic(self):
+ """Searching should call get_topics in all indexes in order."""
+ calls = []
+ class RecordingIndex(object):
+ def __init__(self, name):
+ self.prefix = name
+ def get_topics(self, topic):
+ calls.append(('get_topics', self.prefix, topic))
+ return ['something']
+ index = help.HelpIndices()
+ index.search_path = [RecordingIndex('1'), RecordingIndex('2')]
+ # try with None
+ index.search(None)
+ self.assertEqual([
+ ('get_topics', '1', None),
+ ('get_topics', '2', None),
+ ],
+ calls)
+ # and with a string
+ del calls[:]
+ index.search('bar')
+ self.assertEqual([
+ ('get_topics', '1', 'bar'),
+ ('get_topics', '2', 'bar'),
+ ],
+ calls)
+
+ def test_search_returns_index_and_results(self):
+ """Searching should return help topics with their index"""
+ class CannedIndex(object):
+ def __init__(self, prefix, search_result):
+ self.prefix = prefix
+ self.result = search_result
+ def get_topics(self, topic):
+ return self.result
+ index = help.HelpIndices()
+ index_one = CannedIndex('1', ['a'])
+ index_two = CannedIndex('2', ['b', 'c'])
+ index.search_path = [index_one, index_two]
+ self.assertEqual([(index_one, 'a'), (index_two, 'b'), (index_two, 'c')],
+ index.search(None))
+
+ def test_search_checks_for_duplicate_prefixes(self):
+ """Its an error when there are multiple indices with the same prefix."""
+ indices = help.HelpIndices()
+ indices.search_path = [help_topics.HelpTopicIndex(),
+ help_topics.HelpTopicIndex()]
+ self.assertRaises(errors.DuplicateHelpPrefix, indices.search, None)
diff --git a/bzrlib/tests/test_hooks.py b/bzrlib/tests/test_hooks.py
new file mode 100644
index 0000000..16bbdfc
--- /dev/null
+++ b/bzrlib/tests/test_hooks.py
@@ -0,0 +1,275 @@
+# Copyright (C) 2007-2010 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""Tests for the core Hooks logic."""
+
+from bzrlib import (
+ branch,
+ errors,
+ hooks as _mod_hooks,
+ pyutils,
+ tests,
+ )
+from bzrlib.hooks import (
+ HookPoint,
+ Hooks,
+ install_lazy_named_hook,
+ known_hooks,
+ known_hooks_key_to_object,
+ )
+from bzrlib.symbol_versioning import (
+ deprecated_in,
+ )
+
+
+class TestHooks(tests.TestCase):
+
+ def test_docs(self):
+ """docs() should return something reasonable about the Hooks."""
+ class MyHooks(Hooks):
+ pass
+ hooks = MyHooks("bzrlib.tests.test_hooks", "some_hooks")
+ hooks['legacy'] = []
+ hooks.add_hook('post_tip_change',
+ "Invoked after the tip of a branch changes. Called with "
+ "a ChangeBranchTipParams object.", (1, 4))
+ hooks.add_hook('pre_tip_change',
+ "Invoked before the tip of a branch changes. Called with "
+ "a ChangeBranchTipParams object. Hooks should raise "
+ "TipChangeRejected to signal that a tip change is not permitted.",
+ (1, 6), None)
+ self.assertEqualDiff(
+ "MyHooks\n"
+ "-------\n"
+ "\n"
+ "legacy\n"
+ "~~~~~~\n"
+ "\n"
+ "An old-style hook. For documentation see the __init__ method of 'MyHooks'\n"
+ "\n"
+ "post_tip_change\n"
+ "~~~~~~~~~~~~~~~\n"
+ "\n"
+ "Introduced in: 1.4\n"
+ "\n"
+ "Invoked after the tip of a branch changes. Called with a\n"
+ "ChangeBranchTipParams object.\n"
+ "\n"
+ "pre_tip_change\n"
+ "~~~~~~~~~~~~~~\n"
+ "\n"
+ "Introduced in: 1.6\n"
+ "\n"
+ "Invoked before the tip of a branch changes. Called with a\n"
+ "ChangeBranchTipParams object. Hooks should raise TipChangeRejected to\n"
+ "signal that a tip change is not permitted.\n", hooks.docs())
+
+ def test_install_named_hook_raises_unknown_hook(self):
+ hooks = Hooks("bzrlib.tests.test_hooks", "some_hooks")
+ self.assertRaises(errors.UnknownHook, hooks.install_named_hook, 'silly',
+ None, "")
+
+ def test_install_named_hook_appends_known_hook(self):
+ hooks = Hooks("bzrlib.tests.test_hooks", "some_hooks")
+ hooks['set_rh'] = []
+ hooks.install_named_hook('set_rh', None, "demo")
+ self.assertEqual(hooks['set_rh'], [None])
+
+ def test_install_named_hook_and_retrieve_name(self):
+ hooks = Hooks("bzrlib.tests.test_hooks", "somehooks")
+ hooks['set_rh'] = []
+ hooks.install_named_hook('set_rh', None, "demo")
+ self.assertEqual("demo", hooks.get_hook_name(None))
+
+ def test_uninstall_named_hook(self):
+ hooks = Hooks("bzrlib.tests.test_hooks", "some_hooks")
+ hooks.add_hook('set_rh', "Set revision history", (2, 0))
+ hooks.install_named_hook('set_rh', None, "demo")
+ self.assertEqual(1, len(hooks["set_rh"]))
+ hooks.uninstall_named_hook("set_rh", "demo")
+ self.assertEqual(0, len(hooks["set_rh"]))
+
+ def test_uninstall_multiple_named_hooks(self):
+ # Multiple callbacks with the same label all get removed
+ hooks = Hooks("bzrlib.tests.test_hooks", "some_hooks")
+ hooks.add_hook('set_rh', "Set revision history", (2, 0))
+ hooks.install_named_hook('set_rh', 1, "demo")
+ hooks.install_named_hook('set_rh', 2, "demo")
+ hooks.install_named_hook('set_rh', 3, "othername")
+ self.assertEqual(3, len(hooks["set_rh"]))
+ hooks.uninstall_named_hook("set_rh", "demo")
+ self.assertEqual(1, len(hooks["set_rh"]))
+
+ def test_uninstall_named_hook_unknown_callable(self):
+ hooks = Hooks("bzrlib.tests.test_hooks", "some_hooks")
+ hooks.add_hook('set_rh', "Set revision hsitory", (2, 0))
+ self.assertRaises(KeyError, hooks.uninstall_named_hook, "set_rh",
+ "demo")
+
+ def test_uninstall_named_hook_raises_unknown_hook(self):
+ hooks = Hooks("bzrlib.tests.test_hooks", "some_hooks")
+ self.assertRaises(errors.UnknownHook, hooks.uninstall_named_hook,
+ 'silly', "")
+
+ def test_uninstall_named_hook_old_style(self):
+ hooks = Hooks("bzrlib.tests.test_hooks", "some_hooks")
+ hooks["set_rh"] = []
+ hooks.install_named_hook('set_rh', None, "demo")
+ self.assertRaises(errors.UnsupportedOperation,
+ hooks.uninstall_named_hook, "set_rh", "demo")
+
+ hooks = Hooks("bzrlib.tests.test_hooks", "TestHooks.hooks")
+
+ def test_install_lazy_named_hook(self):
+ # When the hook points are not yet registered the hook is
+ # added to the _lazy_hooks dictionary in bzrlib.hooks.
+ self.hooks.add_hook('set_rh', "doc", (0, 15))
+ set_rh = lambda: None
+ install_lazy_named_hook('bzrlib.tests.test_hooks',
+ 'TestHooks.hooks', 'set_rh', set_rh, "demo")
+ set_rh_lazy_hooks = _mod_hooks._lazy_hooks[
+ ('bzrlib.tests.test_hooks', 'TestHooks.hooks', 'set_rh')]
+ self.assertEquals(1, len(set_rh_lazy_hooks))
+ self.assertEquals(set_rh, set_rh_lazy_hooks[0][0].get_obj())
+ self.assertEquals("demo", set_rh_lazy_hooks[0][1])
+ self.assertEqual(list(TestHooks.hooks['set_rh']), [set_rh])
+
+ set_rh = lambda: None
+
+ def test_install_named_hook_lazy(self):
+ hooks = Hooks("bzrlib.tests.hooks", "some_hooks")
+ hooks['set_rh'] = HookPoint("set_rh", "doc", (0, 15), None)
+ hooks.install_named_hook_lazy('set_rh', 'bzrlib.tests.test_hooks',
+ 'TestHooks.set_rh', "demo")
+ self.assertEqual(list(hooks['set_rh']), [TestHooks.set_rh])
+
+ def test_install_named_hook_lazy_old(self):
+ # An exception is raised if a lazy hook is raised for
+ # an old style hook point.
+ hooks = Hooks("bzrlib.tests.hooks", "some_hooks")
+ hooks['set_rh'] = []
+ self.assertRaises(errors.UnsupportedOperation,
+ hooks.install_named_hook_lazy,
+ 'set_rh', 'bzrlib.tests.test_hooks', 'TestHooks.set_rh',
+ "demo")
+
+ def test_valid_lazy_hooks(self):
+ # Make sure that all the registered lazy hooks are referring to existing
+ # hook points which allow lazy registration.
+ for key, callbacks in _mod_hooks._lazy_hooks.iteritems():
+ (module_name, member_name, hook_name) = key
+ obj = pyutils.get_named_object(module_name, member_name)
+ self.assertEquals(obj._module, module_name)
+ self.assertEquals(obj._member_name, member_name)
+ self.assertTrue(hook_name in obj)
+ self.assertIs(callbacks, obj[hook_name]._callbacks)
+
+
+class TestHook(tests.TestCase):
+
+ def test___init__(self):
+ doc = ("Invoked after changing the tip of a branch object. Called with"
+ "a bzrlib.branch.PostChangeBranchTipParams object")
+ hook = HookPoint("post_tip_change", doc, (0, 15), None)
+ self.assertEqual(doc, hook.__doc__)
+ self.assertEqual("post_tip_change", hook.name)
+ self.assertEqual((0, 15), hook.introduced)
+ self.assertEqual(None, hook.deprecated)
+ self.assertEqual([], list(hook))
+
+ def test_docs(self):
+ doc = ("Invoked after changing the tip of a branch object. Called with"
+ " a bzrlib.branch.PostChangeBranchTipParams object")
+ hook = HookPoint("post_tip_change", doc, (0, 15), None)
+ self.assertEqual("post_tip_change\n"
+ "~~~~~~~~~~~~~~~\n"
+ "\n"
+ "Introduced in: 0.15\n"
+ "\n"
+ "Invoked after changing the tip of a branch object. Called with a\n"
+ "bzrlib.branch.PostChangeBranchTipParams object\n", hook.docs())
+
+ def test_hook(self):
+ hook = HookPoint("foo", "no docs", None, None)
+ def callback():
+ pass
+ hook.hook(callback, "my callback")
+ self.assertEqual([callback], list(hook))
+
+ def lazy_callback():
+ pass
+
+ def test_lazy_hook(self):
+ hook = HookPoint("foo", "no docs", None, None)
+ hook.hook_lazy(
+ "bzrlib.tests.test_hooks", "TestHook.lazy_callback",
+ "my callback")
+ self.assertEqual([TestHook.lazy_callback], list(hook))
+
+ def test_uninstall(self):
+ hook = HookPoint("foo", "no docs", None, None)
+ hook.hook_lazy(
+ "bzrlib.tests.test_hooks", "TestHook.lazy_callback",
+ "my callback")
+ self.assertEqual([TestHook.lazy_callback], list(hook))
+ hook.uninstall("my callback")
+ self.assertEqual([], list(hook))
+
+ def test_uninstall_unknown(self):
+ hook = HookPoint("foo", "no docs", None, None)
+ self.assertRaises(KeyError, hook.uninstall, "my callback")
+
+ def test___repr(self):
+ # The repr should list all the callbacks, with names.
+ hook = HookPoint("foo", "no docs", None, None)
+ def callback():
+ pass
+ hook.hook(callback, "my callback")
+ callback_repr = repr(callback)
+ self.assertEqual(
+ '<HookPoint(foo), callbacks=[%s(my callback)]>' %
+ callback_repr, repr(hook))
+
+
+class TestHookRegistry(tests.TestCase):
+
+ def test_items_are_reasonable_keys(self):
+ # All the items in the known_hooks registry need to map from
+ # (module_name, member_name) tuples to the callable used to get an
+ # empty Hooks for that attribute. This is used to support the test
+ # suite which needs to generate empty hooks (and HookPoints) to ensure
+ # isolation and prevent tests failing spuriously.
+ for key, factory in known_hooks.items():
+ self.assertTrue(callable(factory),
+ "The factory(%r) for %r is not callable" % (factory, key))
+ obj = known_hooks_key_to_object(key)
+ self.assertIsInstance(obj, Hooks)
+ new_hooks = factory()
+ self.assertIsInstance(obj, Hooks)
+ self.assertEqual(type(obj), type(new_hooks))
+ self.assertEqual("No hook name", new_hooks.get_hook_name(None))
+
+ def test_known_hooks_key_to_object(self):
+ self.assertIs(branch.Branch.hooks,
+ known_hooks_key_to_object(('bzrlib.branch', 'Branch.hooks')))
+
+ def test_known_hooks_key_to_parent_and_attribute(self):
+ self.assertEqual((branch.Branch, 'hooks'),
+ known_hooks.key_to_parent_and_attribute(
+ ('bzrlib.branch', 'Branch.hooks')))
+ self.assertEqual((branch, 'Branch'),
+ known_hooks.key_to_parent_and_attribute(
+ ('bzrlib.branch', 'Branch')))
diff --git a/bzrlib/tests/test_http.py b/bzrlib/tests/test_http.py
new file mode 100644
index 0000000..7e29b4b
--- /dev/null
+++ b/bzrlib/tests/test_http.py
@@ -0,0 +1,2363 @@
+# Copyright (C) 2005-2011 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""Tests for HTTP implementations.
+
+This module defines a load_tests() method that parametrize tests classes for
+transport implementation, http protocol versions and authentication schemes.
+"""
+
+# TODO: Should be renamed to bzrlib.transport.http.tests?
+# TODO: What about renaming to bzrlib.tests.transport.http ?
+
+import httplib
+import SimpleHTTPServer
+import socket
+import sys
+import threading
+
+import bzrlib
+from bzrlib import (
+ config,
+ controldir,
+ debug,
+ errors,
+ osutils,
+ remote as _mod_remote,
+ tests,
+ trace,
+ transport,
+ ui,
+ )
+from bzrlib.tests import (
+ features,
+ http_server,
+ http_utils,
+ test_server,
+ )
+from bzrlib.tests.scenarios import (
+ load_tests_apply_scenarios,
+ multiply_scenarios,
+ )
+from bzrlib.transport import (
+ http,
+ remote,
+ )
+from bzrlib.transport.http import (
+ _urllib,
+ _urllib2_wrappers,
+ )
+
+
+if features.pycurl.available():
+ from bzrlib.transport.http._pycurl import PyCurlTransport
+
+
+load_tests = load_tests_apply_scenarios
+
+
+def vary_by_http_client_implementation():
+ """Test the two libraries we can use, pycurl and urllib."""
+ transport_scenarios = [
+ ('urllib', dict(_transport=_urllib.HttpTransport_urllib,
+ _server=http_server.HttpServer_urllib,
+ _url_protocol='http+urllib',)),
+ ]
+ if features.pycurl.available():
+ transport_scenarios.append(
+ ('pycurl', dict(_transport=PyCurlTransport,
+ _server=http_server.HttpServer_PyCurl,
+ _url_protocol='http+pycurl',)))
+ return transport_scenarios
+
+
+def vary_by_http_protocol_version():
+ """Test on http/1.0 and 1.1"""
+ return [
+ ('HTTP/1.0', dict(_protocol_version='HTTP/1.0')),
+ ('HTTP/1.1', dict(_protocol_version='HTTP/1.1')),
+ ]
+
+
+def vary_by_http_auth_scheme():
+ scenarios = [
+ ('basic', dict(_auth_server=http_utils.HTTPBasicAuthServer)),
+ ('digest', dict(_auth_server=http_utils.HTTPDigestAuthServer)),
+ ('basicdigest',
+ dict(_auth_server=http_utils.HTTPBasicAndDigestAuthServer)),
+ ]
+ # Add some attributes common to all scenarios
+ for scenario_id, scenario_dict in scenarios:
+ scenario_dict.update(_auth_header='Authorization',
+ _username_prompt_prefix='',
+ _password_prompt_prefix='')
+ return scenarios
+
+
+def vary_by_http_proxy_auth_scheme():
+ scenarios = [
+ ('proxy-basic', dict(_auth_server=http_utils.ProxyBasicAuthServer)),
+ ('proxy-digest', dict(_auth_server=http_utils.ProxyDigestAuthServer)),
+ ('proxy-basicdigest',
+ dict(_auth_server=http_utils.ProxyBasicAndDigestAuthServer)),
+ ]
+ # Add some attributes common to all scenarios
+ for scenario_id, scenario_dict in scenarios:
+ scenario_dict.update(_auth_header='Proxy-Authorization',
+ _username_prompt_prefix='Proxy ',
+ _password_prompt_prefix='Proxy ')
+ return scenarios
+
+
+def vary_by_http_activity():
+ activity_scenarios = [
+ ('urllib,http', dict(_activity_server=ActivityHTTPServer,
+ _transport=_urllib.HttpTransport_urllib,)),
+ ]
+ if features.pycurl.available():
+ activity_scenarios.append(
+ ('pycurl,http', dict(_activity_server=ActivityHTTPServer,
+ _transport=PyCurlTransport,)),)
+ if features.HTTPSServerFeature.available():
+ # FIXME: Until we have a better way to handle self-signed certificates
+ # (like allowing them in a test specific authentication.conf for
+ # example), we need some specialized pycurl/urllib transport for tests.
+ # -- vila 2012-01-20
+ from bzrlib.tests import (
+ ssl_certs,
+ )
+ class HTTPS_urllib_transport(_urllib.HttpTransport_urllib):
+
+ def __init__(self, base, _from_transport=None):
+ super(HTTPS_urllib_transport, self).__init__(
+ base, _from_transport=_from_transport,
+ ca_certs=ssl_certs.build_path('ca.crt'))
+
+ activity_scenarios.append(
+ ('urllib,https', dict(_activity_server=ActivityHTTPSServer,
+ _transport=HTTPS_urllib_transport,)),)
+ if features.pycurl.available():
+ class HTTPS_pycurl_transport(PyCurlTransport):
+
+ def __init__(self, base, _from_transport=None):
+ super(HTTPS_pycurl_transport, self).__init__(
+ base, _from_transport)
+ self.cabundle = str(ssl_certs.build_path('ca.crt'))
+
+ activity_scenarios.append(
+ ('pycurl,https', dict(_activity_server=ActivityHTTPSServer,
+ _transport=HTTPS_pycurl_transport,)),)
+ return activity_scenarios
+
+
+class FakeManager(object):
+
+ def __init__(self):
+ self.credentials = []
+
+ def add_password(self, realm, host, username, password):
+ self.credentials.append([realm, host, username, password])
+
+
+class RecordingServer(object):
+ """A fake HTTP server.
+
+ It records the bytes sent to it, and replies with a 200.
+ """
+
+ def __init__(self, expect_body_tail=None, scheme=''):
+ """Constructor.
+
+ :type expect_body_tail: str
+ :param expect_body_tail: a reply won't be sent until this string is
+ received.
+ """
+ self._expect_body_tail = expect_body_tail
+ self.host = None
+ self.port = None
+ self.received_bytes = ''
+ self.scheme = scheme
+
+ def get_url(self):
+ return '%s://%s:%s/' % (self.scheme, self.host, self.port)
+
+ def start_server(self):
+ self._sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
+ self._sock.bind(('127.0.0.1', 0))
+ self.host, self.port = self._sock.getsockname()
+ self._ready = threading.Event()
+ self._thread = test_server.TestThread(
+ sync_event=self._ready, target=self._accept_read_and_reply)
+ self._thread.start()
+ if 'threads' in tests.selftest_debug_flags:
+ sys.stderr.write('Thread started: %s\n' % (self._thread.ident,))
+ self._ready.wait()
+
+ def _accept_read_and_reply(self):
+ self._sock.listen(1)
+ self._ready.set()
+ conn, address = self._sock.accept()
+ if self._expect_body_tail is not None:
+ while not self.received_bytes.endswith(self._expect_body_tail):
+ self.received_bytes += conn.recv(4096)
+ conn.sendall('HTTP/1.1 200 OK\r\n')
+ try:
+ self._sock.close()
+ except socket.error:
+ # The client may have already closed the socket.
+ pass
+
+ def stop_server(self):
+ try:
+ # Issue a fake connection to wake up the server and allow it to
+ # finish quickly
+ fake_conn = osutils.connect_socket((self.host, self.port))
+ fake_conn.close()
+ except socket.error:
+ # We might have already closed it. We don't care.
+ pass
+ self.host = None
+ self.port = None
+ self._thread.join()
+ if 'threads' in tests.selftest_debug_flags:
+ sys.stderr.write('Thread joined: %s\n' % (self._thread.ident,))
+
+
+class TestAuthHeader(tests.TestCase):
+
+ def parse_header(self, header, auth_handler_class=None):
+ if auth_handler_class is None:
+ auth_handler_class = _urllib2_wrappers.AbstractAuthHandler
+ self.auth_handler = auth_handler_class()
+ return self.auth_handler._parse_auth_header(header)
+
+ def test_empty_header(self):
+ scheme, remainder = self.parse_header('')
+ self.assertEqual('', scheme)
+ self.assertIs(None, remainder)
+
+ def test_negotiate_header(self):
+ scheme, remainder = self.parse_header('Negotiate')
+ self.assertEqual('negotiate', scheme)
+ self.assertIs(None, remainder)
+
+ def test_basic_header(self):
+ scheme, remainder = self.parse_header(
+ 'Basic realm="Thou should not pass"')
+ self.assertEqual('basic', scheme)
+ self.assertEqual('realm="Thou should not pass"', remainder)
+
+ def test_basic_extract_realm(self):
+ scheme, remainder = self.parse_header(
+ 'Basic realm="Thou should not pass"',
+ _urllib2_wrappers.BasicAuthHandler)
+ match, realm = self.auth_handler.extract_realm(remainder)
+ self.assertTrue(match is not None)
+ self.assertEqual('Thou should not pass', realm)
+
+ def test_digest_header(self):
+ scheme, remainder = self.parse_header(
+ 'Digest realm="Thou should not pass"')
+ self.assertEqual('digest', scheme)
+ self.assertEqual('realm="Thou should not pass"', remainder)
+
+
+class TestHTTPRangeParsing(tests.TestCase):
+
+ def setUp(self):
+ super(TestHTTPRangeParsing, self).setUp()
+ # We focus on range parsing here and ignore everything else
+ class RequestHandler(http_server.TestingHTTPRequestHandler):
+ def setup(self): pass
+ def handle(self): pass
+ def finish(self): pass
+
+ self.req_handler = RequestHandler(None, None, None)
+
+ def assertRanges(self, ranges, header, file_size):
+ self.assertEquals(ranges,
+ self.req_handler._parse_ranges(header, file_size))
+
+ def test_simple_range(self):
+ self.assertRanges([(0,2)], 'bytes=0-2', 12)
+
+ def test_tail(self):
+ self.assertRanges([(8, 11)], 'bytes=-4', 12)
+
+ def test_tail_bigger_than_file(self):
+ self.assertRanges([(0, 11)], 'bytes=-99', 12)
+
+ def test_range_without_end(self):
+ self.assertRanges([(4, 11)], 'bytes=4-', 12)
+
+ def test_invalid_ranges(self):
+ self.assertRanges(None, 'bytes=12-22', 12)
+ self.assertRanges(None, 'bytes=1-3,12-22', 12)
+ self.assertRanges(None, 'bytes=-', 12)
+
+
+class TestHTTPServer(tests.TestCase):
+ """Test the HTTP servers implementations."""
+
+ def test_invalid_protocol(self):
+ class BogusRequestHandler(http_server.TestingHTTPRequestHandler):
+
+ protocol_version = 'HTTP/0.1'
+
+ self.assertRaises(httplib.UnknownProtocol,
+ http_server.HttpServer, BogusRequestHandler)
+
+ def test_force_invalid_protocol(self):
+ self.assertRaises(httplib.UnknownProtocol,
+ http_server.HttpServer, protocol_version='HTTP/0.1')
+
+ def test_server_start_and_stop(self):
+ server = http_server.HttpServer()
+ self.addCleanup(server.stop_server)
+ server.start_server()
+ self.assertTrue(server.server is not None)
+ self.assertTrue(server.server.serving is not None)
+ self.assertTrue(server.server.serving)
+
+ def test_create_http_server_one_zero(self):
+ class RequestHandlerOneZero(http_server.TestingHTTPRequestHandler):
+
+ protocol_version = 'HTTP/1.0'
+
+ server = http_server.HttpServer(RequestHandlerOneZero)
+ self.start_server(server)
+ self.assertIsInstance(server.server, http_server.TestingHTTPServer)
+
+ def test_create_http_server_one_one(self):
+ class RequestHandlerOneOne(http_server.TestingHTTPRequestHandler):
+
+ protocol_version = 'HTTP/1.1'
+
+ server = http_server.HttpServer(RequestHandlerOneOne)
+ self.start_server(server)
+ self.assertIsInstance(server.server,
+ http_server.TestingThreadingHTTPServer)
+
+ def test_create_http_server_force_one_one(self):
+ class RequestHandlerOneZero(http_server.TestingHTTPRequestHandler):
+
+ protocol_version = 'HTTP/1.0'
+
+ server = http_server.HttpServer(RequestHandlerOneZero,
+ protocol_version='HTTP/1.1')
+ self.start_server(server)
+ self.assertIsInstance(server.server,
+ http_server.TestingThreadingHTTPServer)
+
+ def test_create_http_server_force_one_zero(self):
+ class RequestHandlerOneOne(http_server.TestingHTTPRequestHandler):
+
+ protocol_version = 'HTTP/1.1'
+
+ server = http_server.HttpServer(RequestHandlerOneOne,
+ protocol_version='HTTP/1.0')
+ self.start_server(server)
+ self.assertIsInstance(server.server,
+ http_server.TestingHTTPServer)
+
+
+class TestWithTransport_pycurl(object):
+ """Test case to inherit from if pycurl is present"""
+
+ def _get_pycurl_maybe(self):
+ self.requireFeature(features.pycurl)
+ return PyCurlTransport
+
+ _transport = property(_get_pycurl_maybe)
+
+
+class TestHttpUrls(tests.TestCase):
+
+ # TODO: This should be moved to authorization tests once they
+ # are written.
+
+ def test_url_parsing(self):
+ f = FakeManager()
+ url = http.extract_auth('http://example.com', f)
+ self.assertEqual('http://example.com', url)
+ self.assertEqual(0, len(f.credentials))
+ url = http.extract_auth(
+ 'http://user:pass@example.com/bzr/bzr.dev', f)
+ self.assertEqual('http://example.com/bzr/bzr.dev', url)
+ self.assertEqual(1, len(f.credentials))
+ self.assertEqual([None, 'example.com', 'user', 'pass'],
+ f.credentials[0])
+
+
+class TestHttpTransportUrls(tests.TestCase):
+ """Test the http urls."""
+
+ scenarios = vary_by_http_client_implementation()
+
+ def test_abs_url(self):
+ """Construction of absolute http URLs"""
+ t = self._transport('http://example.com/bzr/bzr.dev/')
+ eq = self.assertEqualDiff
+ eq(t.abspath('.'), 'http://example.com/bzr/bzr.dev')
+ eq(t.abspath('foo/bar'), 'http://example.com/bzr/bzr.dev/foo/bar')
+ eq(t.abspath('.bzr'), 'http://example.com/bzr/bzr.dev/.bzr')
+ eq(t.abspath('.bzr/1//2/./3'),
+ 'http://example.com/bzr/bzr.dev/.bzr/1/2/3')
+
+ def test_invalid_http_urls(self):
+ """Trap invalid construction of urls"""
+ self._transport('http://example.com/bzr/bzr.dev/')
+ self.assertRaises(errors.InvalidURL,
+ self._transport,
+ 'http://http://example.com/bzr/bzr.dev/')
+
+ def test_http_root_urls(self):
+ """Construction of URLs from server root"""
+ t = self._transport('http://example.com/')
+ eq = self.assertEqualDiff
+ eq(t.abspath('.bzr/tree-version'),
+ 'http://example.com/.bzr/tree-version')
+
+ def test_http_impl_urls(self):
+ """There are servers which ask for particular clients to connect"""
+ server = self._server()
+ server.start_server()
+ try:
+ url = server.get_url()
+ self.assertTrue(url.startswith('%s://' % self._url_protocol))
+ finally:
+ server.stop_server()
+
+
+class TestHttps_pycurl(TestWithTransport_pycurl, tests.TestCase):
+
+ # TODO: This should really be moved into another pycurl
+ # specific test. When https tests will be implemented, take
+ # this one into account.
+ def test_pycurl_without_https_support(self):
+ """Test that pycurl without SSL do not fail with a traceback.
+
+ For the purpose of the test, we force pycurl to ignore
+ https by supplying a fake version_info that do not
+ support it.
+ """
+ self.requireFeature(features.pycurl)
+ # Import the module locally now that we now it's available.
+ pycurl = features.pycurl.module
+
+ self.overrideAttr(pycurl, 'version_info',
+ # Fake the pycurl version_info This was taken from
+ # a windows pycurl without SSL (thanks to bialix)
+ lambda : (2,
+ '7.13.2',
+ 462082,
+ 'i386-pc-win32',
+ 2576,
+ None,
+ 0,
+ None,
+ ('ftp', 'gopher', 'telnet',
+ 'dict', 'ldap', 'http', 'file'),
+ None,
+ 0,
+ None))
+ self.assertRaises(errors.DependencyNotPresent, self._transport,
+ 'https://launchpad.net')
+
+
+class TestHTTPConnections(http_utils.TestCaseWithWebserver):
+ """Test the http connections."""
+
+ scenarios = multiply_scenarios(
+ vary_by_http_client_implementation(),
+ vary_by_http_protocol_version(),
+ )
+
+ def setUp(self):
+ http_utils.TestCaseWithWebserver.setUp(self)
+ self.build_tree(['foo/', 'foo/bar'], line_endings='binary',
+ transport=self.get_transport())
+
+ def test_http_has(self):
+ server = self.get_readonly_server()
+ t = self.get_readonly_transport()
+ self.assertEqual(t.has('foo/bar'), True)
+ self.assertEqual(len(server.logs), 1)
+ self.assertContainsRe(server.logs[0],
+ r'"HEAD /foo/bar HTTP/1.." (200|302) - "-" "bzr/')
+
+ def test_http_has_not_found(self):
+ server = self.get_readonly_server()
+ t = self.get_readonly_transport()
+ self.assertEqual(t.has('not-found'), False)
+ self.assertContainsRe(server.logs[1],
+ r'"HEAD /not-found HTTP/1.." 404 - "-" "bzr/')
+
+ def test_http_get(self):
+ server = self.get_readonly_server()
+ t = self.get_readonly_transport()
+ fp = t.get('foo/bar')
+ self.assertEqualDiff(
+ fp.read(),
+ 'contents of foo/bar\n')
+ self.assertEqual(len(server.logs), 1)
+ self.assertTrue(server.logs[0].find(
+ '"GET /foo/bar HTTP/1.1" 200 - "-" "bzr/%s'
+ % bzrlib.__version__) > -1)
+
+ def test_has_on_bogus_host(self):
+ # Get a free address and don't 'accept' on it, so that we
+ # can be sure there is no http handler there, but set a
+ # reasonable timeout to not slow down tests too much.
+ default_timeout = socket.getdefaulttimeout()
+ try:
+ socket.setdefaulttimeout(2)
+ s = socket.socket()
+ s.bind(('localhost', 0))
+ t = self._transport('http://%s:%s/' % s.getsockname())
+ self.assertRaises(errors.ConnectionError, t.has, 'foo/bar')
+ finally:
+ socket.setdefaulttimeout(default_timeout)
+
+
+class TestHttpTransportRegistration(tests.TestCase):
+ """Test registrations of various http implementations"""
+
+ scenarios = vary_by_http_client_implementation()
+
+ def test_http_registered(self):
+ t = transport.get_transport_from_url(
+ '%s://foo.com/' % self._url_protocol)
+ self.assertIsInstance(t, transport.Transport)
+ self.assertIsInstance(t, self._transport)
+
+
+class TestPost(tests.TestCase):
+
+ scenarios = multiply_scenarios(
+ vary_by_http_client_implementation(),
+ vary_by_http_protocol_version(),
+ )
+
+ def test_post_body_is_received(self):
+ server = RecordingServer(expect_body_tail='end-of-body',
+ scheme=self._url_protocol)
+ self.start_server(server)
+ url = server.get_url()
+ # FIXME: needs a cleanup -- vila 20100611
+ http_transport = transport.get_transport_from_url(url)
+ code, response = http_transport._post('abc def end-of-body')
+ self.assertTrue(
+ server.received_bytes.startswith('POST /.bzr/smart HTTP/1.'))
+ self.assertTrue('content-length: 19\r' in server.received_bytes.lower())
+ self.assertTrue('content-type: application/octet-stream\r'
+ in server.received_bytes.lower())
+ # The transport should not be assuming that the server can accept
+ # chunked encoding the first time it connects, because HTTP/1.1, so we
+ # check for the literal string.
+ self.assertTrue(
+ server.received_bytes.endswith('\r\n\r\nabc def end-of-body'))
+
+
+class TestRangeHeader(tests.TestCase):
+ """Test range_header method"""
+
+ def check_header(self, value, ranges=[], tail=0):
+ offsets = [ (start, end - start + 1) for start, end in ranges]
+ coalesce = transport.Transport._coalesce_offsets
+ coalesced = list(coalesce(offsets, limit=0, fudge_factor=0))
+ range_header = http.HttpTransportBase._range_header
+ self.assertEqual(value, range_header(coalesced, tail))
+
+ def test_range_header_single(self):
+ self.check_header('0-9', ranges=[(0,9)])
+ self.check_header('100-109', ranges=[(100,109)])
+
+ def test_range_header_tail(self):
+ self.check_header('-10', tail=10)
+ self.check_header('-50', tail=50)
+
+ def test_range_header_multi(self):
+ self.check_header('0-9,100-200,300-5000',
+ ranges=[(0,9), (100, 200), (300,5000)])
+
+ def test_range_header_mixed(self):
+ self.check_header('0-9,300-5000,-50',
+ ranges=[(0,9), (300,5000)],
+ tail=50)
+
+
+class TestSpecificRequestHandler(http_utils.TestCaseWithWebserver):
+ """Tests a specific request handler.
+
+ Daughter classes are expected to override _req_handler_class
+ """
+
+ scenarios = multiply_scenarios(
+ vary_by_http_client_implementation(),
+ vary_by_http_protocol_version(),
+ )
+
+ # Provide a useful default
+ _req_handler_class = http_server.TestingHTTPRequestHandler
+
+ def create_transport_readonly_server(self):
+ server = http_server.HttpServer(self._req_handler_class,
+ protocol_version=self._protocol_version)
+ server._url_protocol = self._url_protocol
+ return server
+
+ def _testing_pycurl(self):
+ # TODO: This is duplicated for lots of the classes in this file
+ return (features.pycurl.available()
+ and self._transport == PyCurlTransport)
+
+
+class WallRequestHandler(http_server.TestingHTTPRequestHandler):
+ """Whatever request comes in, close the connection"""
+
+ def _handle_one_request(self):
+ """Handle a single HTTP request, by abruptly closing the connection"""
+ self.close_connection = 1
+
+
+class TestWallServer(TestSpecificRequestHandler):
+ """Tests exceptions during the connection phase"""
+
+ _req_handler_class = WallRequestHandler
+
+ def test_http_has(self):
+ t = self.get_readonly_transport()
+ # Unfortunately httplib (see HTTPResponse._read_status
+ # for details) make no distinction between a closed
+ # socket and badly formatted status line, so we can't
+ # just test for ConnectionError, we have to test
+ # InvalidHttpResponse too. And pycurl may raise ConnectionReset
+ # instead of ConnectionError too.
+ self.assertRaises(( errors.ConnectionError, errors.ConnectionReset,
+ errors.InvalidHttpResponse),
+ t.has, 'foo/bar')
+
+ def test_http_get(self):
+ t = self.get_readonly_transport()
+ self.assertRaises((errors.ConnectionError, errors.ConnectionReset,
+ errors.InvalidHttpResponse),
+ t.get, 'foo/bar')
+
+
+class BadStatusRequestHandler(http_server.TestingHTTPRequestHandler):
+ """Whatever request comes in, returns a bad status"""
+
+ def parse_request(self):
+ """Fakes handling a single HTTP request, returns a bad status"""
+ ignored = http_server.TestingHTTPRequestHandler.parse_request(self)
+ self.send_response(0, "Bad status")
+ self.close_connection = 1
+ return False
+
+
+class TestBadStatusServer(TestSpecificRequestHandler):
+ """Tests bad status from server."""
+
+ _req_handler_class = BadStatusRequestHandler
+
+ def test_http_has(self):
+ t = self.get_readonly_transport()
+ self.assertRaises(errors.InvalidHttpResponse, t.has, 'foo/bar')
+
+ def test_http_get(self):
+ t = self.get_readonly_transport()
+ self.assertRaises(errors.InvalidHttpResponse, t.get, 'foo/bar')
+
+
+class InvalidStatusRequestHandler(http_server.TestingHTTPRequestHandler):
+ """Whatever request comes in, returns an invalid status"""
+
+ def parse_request(self):
+ """Fakes handling a single HTTP request, returns a bad status"""
+ ignored = http_server.TestingHTTPRequestHandler.parse_request(self)
+ self.wfile.write("Invalid status line\r\n")
+ # If we don't close the connection pycurl will hang. Since this is a
+ # stress test we don't *have* to respect the protocol, but we don't
+ # have to sabotage it too much either.
+ self.close_connection = True
+ return False
+
+
+class TestInvalidStatusServer(TestBadStatusServer):
+ """Tests invalid status from server.
+
+ Both implementations raises the same error as for a bad status.
+ """
+
+ _req_handler_class = InvalidStatusRequestHandler
+
+
+class BadProtocolRequestHandler(http_server.TestingHTTPRequestHandler):
+ """Whatever request comes in, returns a bad protocol version"""
+
+ def parse_request(self):
+ """Fakes handling a single HTTP request, returns a bad status"""
+ ignored = http_server.TestingHTTPRequestHandler.parse_request(self)
+ # Returns an invalid protocol version, but curl just
+ # ignores it and those cannot be tested.
+ self.wfile.write("%s %d %s\r\n" % ('HTTP/0.0',
+ 404,
+ 'Look at my protocol version'))
+ return False
+
+
+class TestBadProtocolServer(TestSpecificRequestHandler):
+ """Tests bad protocol from server."""
+
+ _req_handler_class = BadProtocolRequestHandler
+
+ def setUp(self):
+ if self._testing_pycurl():
+ raise tests.TestNotApplicable(
+ "pycurl doesn't check the protocol version")
+ super(TestBadProtocolServer, self).setUp()
+
+ def test_http_has(self):
+ t = self.get_readonly_transport()
+ self.assertRaises(errors.InvalidHttpResponse, t.has, 'foo/bar')
+
+ def test_http_get(self):
+ t = self.get_readonly_transport()
+ self.assertRaises(errors.InvalidHttpResponse, t.get, 'foo/bar')
+
+
+class ForbiddenRequestHandler(http_server.TestingHTTPRequestHandler):
+ """Whatever request comes in, returns a 403 code"""
+
+ def parse_request(self):
+ """Handle a single HTTP request, by replying we cannot handle it"""
+ ignored = http_server.TestingHTTPRequestHandler.parse_request(self)
+ self.send_error(403)
+ return False
+
+
+class TestForbiddenServer(TestSpecificRequestHandler):
+ """Tests forbidden server"""
+
+ _req_handler_class = ForbiddenRequestHandler
+
+ def test_http_has(self):
+ t = self.get_readonly_transport()
+ self.assertRaises(errors.TransportError, t.has, 'foo/bar')
+
+ def test_http_get(self):
+ t = self.get_readonly_transport()
+ self.assertRaises(errors.TransportError, t.get, 'foo/bar')
+
+
+class TestRecordingServer(tests.TestCase):
+
+ def test_create(self):
+ server = RecordingServer(expect_body_tail=None)
+ self.assertEqual('', server.received_bytes)
+ self.assertEqual(None, server.host)
+ self.assertEqual(None, server.port)
+
+ def test_setUp_and_stop(self):
+ server = RecordingServer(expect_body_tail=None)
+ server.start_server()
+ try:
+ self.assertNotEqual(None, server.host)
+ self.assertNotEqual(None, server.port)
+ finally:
+ server.stop_server()
+ self.assertEqual(None, server.host)
+ self.assertEqual(None, server.port)
+
+ def test_send_receive_bytes(self):
+ server = RecordingServer(expect_body_tail='c', scheme='http')
+ self.start_server(server)
+ sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
+ sock.connect((server.host, server.port))
+ sock.sendall('abc')
+ self.assertEqual('HTTP/1.1 200 OK\r\n',
+ osutils.recv_all(sock, 4096))
+ self.assertEqual('abc', server.received_bytes)
+
+
+class TestRangeRequestServer(TestSpecificRequestHandler):
+ """Tests readv requests against server.
+
+ We test against default "normal" server.
+ """
+
+ def setUp(self):
+ super(TestRangeRequestServer, self).setUp()
+ self.build_tree_contents([('a', '0123456789')],)
+
+ def test_readv(self):
+ t = self.get_readonly_transport()
+ l = list(t.readv('a', ((0, 1), (1, 1), (3, 2), (9, 1))))
+ self.assertEqual(l[0], (0, '0'))
+ self.assertEqual(l[1], (1, '1'))
+ self.assertEqual(l[2], (3, '34'))
+ self.assertEqual(l[3], (9, '9'))
+
+ def test_readv_out_of_order(self):
+ t = self.get_readonly_transport()
+ l = list(t.readv('a', ((1, 1), (9, 1), (0, 1), (3, 2))))
+ self.assertEqual(l[0], (1, '1'))
+ self.assertEqual(l[1], (9, '9'))
+ self.assertEqual(l[2], (0, '0'))
+ self.assertEqual(l[3], (3, '34'))
+
+ def test_readv_invalid_ranges(self):
+ t = self.get_readonly_transport()
+
+ # This is intentionally reading off the end of the file
+ # since we are sure that it cannot get there
+ self.assertListRaises((errors.InvalidRange, errors.ShortReadvError,),
+ t.readv, 'a', [(1,1), (8,10)])
+
+ # This is trying to seek past the end of the file, it should
+ # also raise a special error
+ self.assertListRaises((errors.InvalidRange, errors.ShortReadvError,),
+ t.readv, 'a', [(12,2)])
+
+ def test_readv_multiple_get_requests(self):
+ server = self.get_readonly_server()
+ t = self.get_readonly_transport()
+ # force transport to issue multiple requests
+ t._max_readv_combine = 1
+ t._max_get_ranges = 1
+ l = list(t.readv('a', ((0, 1), (1, 1), (3, 2), (9, 1))))
+ self.assertEqual(l[0], (0, '0'))
+ self.assertEqual(l[1], (1, '1'))
+ self.assertEqual(l[2], (3, '34'))
+ self.assertEqual(l[3], (9, '9'))
+ # The server should have issued 4 requests
+ self.assertEqual(4, server.GET_request_nb)
+
+ def test_readv_get_max_size(self):
+ server = self.get_readonly_server()
+ t = self.get_readonly_transport()
+ # force transport to issue multiple requests by limiting the number of
+ # bytes by request. Note that this apply to coalesced offsets only, a
+ # single range will keep its size even if bigger than the limit.
+ t._get_max_size = 2
+ l = list(t.readv('a', ((0, 1), (1, 1), (2, 4), (6, 4))))
+ self.assertEqual(l[0], (0, '0'))
+ self.assertEqual(l[1], (1, '1'))
+ self.assertEqual(l[2], (2, '2345'))
+ self.assertEqual(l[3], (6, '6789'))
+ # The server should have issued 3 requests
+ self.assertEqual(3, server.GET_request_nb)
+
+ def test_complete_readv_leave_pipe_clean(self):
+ server = self.get_readonly_server()
+ t = self.get_readonly_transport()
+ # force transport to issue multiple requests
+ t._get_max_size = 2
+ list(t.readv('a', ((0, 1), (1, 1), (2, 4), (6, 4))))
+ # The server should have issued 3 requests
+ self.assertEqual(3, server.GET_request_nb)
+ self.assertEqual('0123456789', t.get_bytes('a'))
+ self.assertEqual(4, server.GET_request_nb)
+
+ def test_incomplete_readv_leave_pipe_clean(self):
+ server = self.get_readonly_server()
+ t = self.get_readonly_transport()
+ # force transport to issue multiple requests
+ t._get_max_size = 2
+ # Don't collapse readv results into a list so that we leave unread
+ # bytes on the socket
+ ireadv = iter(t.readv('a', ((0, 1), (1, 1), (2, 4), (6, 4))))
+ self.assertEqual((0, '0'), ireadv.next())
+ # The server should have issued one request so far
+ self.assertEqual(1, server.GET_request_nb)
+ self.assertEqual('0123456789', t.get_bytes('a'))
+ # get_bytes issued an additional request, the readv pending ones are
+ # lost
+ self.assertEqual(2, server.GET_request_nb)
+
+
+class SingleRangeRequestHandler(http_server.TestingHTTPRequestHandler):
+ """Always reply to range request as if they were single.
+
+ Don't be explicit about it, just to annoy the clients.
+ """
+
+ def get_multiple_ranges(self, file, file_size, ranges):
+ """Answer as if it was a single range request and ignores the rest"""
+ (start, end) = ranges[0]
+ return self.get_single_range(file, file_size, start, end)
+
+
+class TestSingleRangeRequestServer(TestRangeRequestServer):
+ """Test readv against a server which accept only single range requests"""
+
+ _req_handler_class = SingleRangeRequestHandler
+
+
+class SingleOnlyRangeRequestHandler(http_server.TestingHTTPRequestHandler):
+ """Only reply to simple range requests, errors out on multiple"""
+
+ def get_multiple_ranges(self, file, file_size, ranges):
+ """Refuses the multiple ranges request"""
+ if len(ranges) > 1:
+ file.close()
+ self.send_error(416, "Requested range not satisfiable")
+ return
+ (start, end) = ranges[0]
+ return self.get_single_range(file, file_size, start, end)
+
+
+class TestSingleOnlyRangeRequestServer(TestRangeRequestServer):
+ """Test readv against a server which only accept single range requests"""
+
+ _req_handler_class = SingleOnlyRangeRequestHandler
+
+
+class NoRangeRequestHandler(http_server.TestingHTTPRequestHandler):
+ """Ignore range requests without notice"""
+
+ def do_GET(self):
+ # Update the statistics
+ self.server.test_case_server.GET_request_nb += 1
+ # Just bypass the range handling done by TestingHTTPRequestHandler
+ return SimpleHTTPServer.SimpleHTTPRequestHandler.do_GET(self)
+
+
+class TestNoRangeRequestServer(TestRangeRequestServer):
+ """Test readv against a server which do not accept range requests"""
+
+ _req_handler_class = NoRangeRequestHandler
+
+
+class MultipleRangeWithoutContentLengthRequestHandler(
+ http_server.TestingHTTPRequestHandler):
+ """Reply to multiple range requests without content length header."""
+
+ def get_multiple_ranges(self, file, file_size, ranges):
+ self.send_response(206)
+ self.send_header('Accept-Ranges', 'bytes')
+ # XXX: this is strange; the 'random' name below seems undefined and
+ # yet the tests pass -- mbp 2010-10-11 bug 658773
+ boundary = "%d" % random.randint(0,0x7FFFFFFF)
+ self.send_header("Content-Type",
+ "multipart/byteranges; boundary=%s" % boundary)
+ self.end_headers()
+ for (start, end) in ranges:
+ self.wfile.write("--%s\r\n" % boundary)
+ self.send_header("Content-type", 'application/octet-stream')
+ self.send_header("Content-Range", "bytes %d-%d/%d" % (start,
+ end,
+ file_size))
+ self.end_headers()
+ self.send_range_content(file, start, end - start + 1)
+ # Final boundary
+ self.wfile.write("--%s\r\n" % boundary)
+
+
+class TestMultipleRangeWithoutContentLengthServer(TestRangeRequestServer):
+
+ _req_handler_class = MultipleRangeWithoutContentLengthRequestHandler
+
+
+class TruncatedMultipleRangeRequestHandler(
+ http_server.TestingHTTPRequestHandler):
+ """Reply to multiple range requests truncating the last ones.
+
+ This server generates responses whose Content-Length describes all the
+ ranges, but fail to include the last ones leading to client short reads.
+ This has been observed randomly with lighttpd (bug #179368).
+ """
+
+ _truncated_ranges = 2
+
+ def get_multiple_ranges(self, file, file_size, ranges):
+ self.send_response(206)
+ self.send_header('Accept-Ranges', 'bytes')
+ boundary = 'tagada'
+ self.send_header('Content-Type',
+ 'multipart/byteranges; boundary=%s' % boundary)
+ boundary_line = '--%s\r\n' % boundary
+ # Calculate the Content-Length
+ content_length = 0
+ for (start, end) in ranges:
+ content_length += len(boundary_line)
+ content_length += self._header_line_length(
+ 'Content-type', 'application/octet-stream')
+ content_length += self._header_line_length(
+ 'Content-Range', 'bytes %d-%d/%d' % (start, end, file_size))
+ content_length += len('\r\n') # end headers
+ content_length += end - start # + 1
+ content_length += len(boundary_line)
+ self.send_header('Content-length', content_length)
+ self.end_headers()
+
+ # Send the multipart body
+ cur = 0
+ for (start, end) in ranges:
+ self.wfile.write(boundary_line)
+ self.send_header('Content-type', 'application/octet-stream')
+ self.send_header('Content-Range', 'bytes %d-%d/%d'
+ % (start, end, file_size))
+ self.end_headers()
+ if cur + self._truncated_ranges >= len(ranges):
+ # Abruptly ends the response and close the connection
+ self.close_connection = 1
+ return
+ self.send_range_content(file, start, end - start + 1)
+ cur += 1
+ # Final boundary
+ self.wfile.write(boundary_line)
+
+
+class TestTruncatedMultipleRangeServer(TestSpecificRequestHandler):
+
+ _req_handler_class = TruncatedMultipleRangeRequestHandler
+
+ def setUp(self):
+ super(TestTruncatedMultipleRangeServer, self).setUp()
+ self.build_tree_contents([('a', '0123456789')],)
+
+ def test_readv_with_short_reads(self):
+ server = self.get_readonly_server()
+ t = self.get_readonly_transport()
+ # Force separate ranges for each offset
+ t._bytes_to_read_before_seek = 0
+ ireadv = iter(t.readv('a', ((0, 1), (2, 1), (4, 2), (9, 1))))
+ self.assertEqual((0, '0'), ireadv.next())
+ self.assertEqual((2, '2'), ireadv.next())
+ if not self._testing_pycurl():
+ # Only one request have been issued so far (except for pycurl that
+ # try to read the whole response at once)
+ self.assertEqual(1, server.GET_request_nb)
+ self.assertEqual((4, '45'), ireadv.next())
+ self.assertEqual((9, '9'), ireadv.next())
+ # Both implementations issue 3 requests but:
+ # - urllib does two multiple (4 ranges, then 2 ranges) then a single
+ # range,
+ # - pycurl does two multiple (4 ranges, 4 ranges) then a single range
+ self.assertEqual(3, server.GET_request_nb)
+ # Finally the client have tried a single range request and stays in
+ # that mode
+ self.assertEqual('single', t._range_hint)
+
+
+class TruncatedBeforeBoundaryRequestHandler(
+ http_server.TestingHTTPRequestHandler):
+ """Truncation before a boundary, like in bug 198646"""
+
+ _truncated_ranges = 1
+
+ def get_multiple_ranges(self, file, file_size, ranges):
+ self.send_response(206)
+ self.send_header('Accept-Ranges', 'bytes')
+ boundary = 'tagada'
+ self.send_header('Content-Type',
+ 'multipart/byteranges; boundary=%s' % boundary)
+ boundary_line = '--%s\r\n' % boundary
+ # Calculate the Content-Length
+ content_length = 0
+ for (start, end) in ranges:
+ content_length += len(boundary_line)
+ content_length += self._header_line_length(
+ 'Content-type', 'application/octet-stream')
+ content_length += self._header_line_length(
+ 'Content-Range', 'bytes %d-%d/%d' % (start, end, file_size))
+ content_length += len('\r\n') # end headers
+ content_length += end - start # + 1
+ content_length += len(boundary_line)
+ self.send_header('Content-length', content_length)
+ self.end_headers()
+
+ # Send the multipart body
+ cur = 0
+ for (start, end) in ranges:
+ if cur + self._truncated_ranges >= len(ranges):
+ # Abruptly ends the response and close the connection
+ self.close_connection = 1
+ return
+ self.wfile.write(boundary_line)
+ self.send_header('Content-type', 'application/octet-stream')
+ self.send_header('Content-Range', 'bytes %d-%d/%d'
+ % (start, end, file_size))
+ self.end_headers()
+ self.send_range_content(file, start, end - start + 1)
+ cur += 1
+ # Final boundary
+ self.wfile.write(boundary_line)
+
+
+class TestTruncatedBeforeBoundary(TestSpecificRequestHandler):
+ """Tests the case of bug 198646, disconnecting before a boundary."""
+
+ _req_handler_class = TruncatedBeforeBoundaryRequestHandler
+
+ def setUp(self):
+ super(TestTruncatedBeforeBoundary, self).setUp()
+ self.build_tree_contents([('a', '0123456789')],)
+
+ def test_readv_with_short_reads(self):
+ server = self.get_readonly_server()
+ t = self.get_readonly_transport()
+ # Force separate ranges for each offset
+ t._bytes_to_read_before_seek = 0
+ ireadv = iter(t.readv('a', ((0, 1), (2, 1), (4, 2), (9, 1))))
+ self.assertEqual((0, '0'), ireadv.next())
+ self.assertEqual((2, '2'), ireadv.next())
+ self.assertEqual((4, '45'), ireadv.next())
+ self.assertEqual((9, '9'), ireadv.next())
+
+
+class LimitedRangeRequestHandler(http_server.TestingHTTPRequestHandler):
+ """Errors out when range specifiers exceed the limit"""
+
+ def get_multiple_ranges(self, file, file_size, ranges):
+ """Refuses the multiple ranges request"""
+ tcs = self.server.test_case_server
+ if tcs.range_limit is not None and len(ranges) > tcs.range_limit:
+ file.close()
+ # Emulate apache behavior
+ self.send_error(400, "Bad Request")
+ return
+ return http_server.TestingHTTPRequestHandler.get_multiple_ranges(
+ self, file, file_size, ranges)
+
+
+class LimitedRangeHTTPServer(http_server.HttpServer):
+ """An HttpServer erroring out on requests with too much range specifiers"""
+
+ def __init__(self, request_handler=LimitedRangeRequestHandler,
+ protocol_version=None,
+ range_limit=None):
+ http_server.HttpServer.__init__(self, request_handler,
+ protocol_version=protocol_version)
+ self.range_limit = range_limit
+
+
+class TestLimitedRangeRequestServer(http_utils.TestCaseWithWebserver):
+ """Tests readv requests against a server erroring out on too much ranges."""
+
+ scenarios = multiply_scenarios(
+ vary_by_http_client_implementation(),
+ vary_by_http_protocol_version(),
+ )
+
+ # Requests with more range specifiers will error out
+ range_limit = 3
+
+ def create_transport_readonly_server(self):
+ return LimitedRangeHTTPServer(range_limit=self.range_limit,
+ protocol_version=self._protocol_version)
+
+ def setUp(self):
+ http_utils.TestCaseWithWebserver.setUp(self)
+ # We need to manipulate ranges that correspond to real chunks in the
+ # response, so we build a content appropriately.
+ filler = ''.join(['abcdefghij' for x in range(102)])
+ content = ''.join(['%04d' % v + filler for v in range(16)])
+ self.build_tree_contents([('a', content)],)
+
+ def test_few_ranges(self):
+ t = self.get_readonly_transport()
+ l = list(t.readv('a', ((0, 4), (1024, 4), )))
+ self.assertEqual(l[0], (0, '0000'))
+ self.assertEqual(l[1], (1024, '0001'))
+ self.assertEqual(1, self.get_readonly_server().GET_request_nb)
+
+ def test_more_ranges(self):
+ t = self.get_readonly_transport()
+ l = list(t.readv('a', ((0, 4), (1024, 4), (4096, 4), (8192, 4))))
+ self.assertEqual(l[0], (0, '0000'))
+ self.assertEqual(l[1], (1024, '0001'))
+ self.assertEqual(l[2], (4096, '0004'))
+ self.assertEqual(l[3], (8192, '0008'))
+ # The server will refuse to serve the first request (too much ranges),
+ # a second request will succeed.
+ self.assertEqual(2, self.get_readonly_server().GET_request_nb)
+
+
+class TestHttpProxyWhiteBox(tests.TestCase):
+ """Whitebox test proxy http authorization.
+
+ Only the urllib implementation is tested here.
+ """
+
+ def _proxied_request(self):
+ handler = _urllib2_wrappers.ProxyHandler()
+ request = _urllib2_wrappers.Request('GET', 'http://baz/buzzle')
+ handler.set_proxy(request, 'http')
+ return request
+
+ def assertEvaluateProxyBypass(self, expected, host, no_proxy):
+ handler = _urllib2_wrappers.ProxyHandler()
+ self.assertEquals(expected,
+ handler.evaluate_proxy_bypass(host, no_proxy))
+
+ def test_empty_user(self):
+ self.overrideEnv('http_proxy', 'http://bar.com')
+ request = self._proxied_request()
+ self.assertFalse(request.headers.has_key('Proxy-authorization'))
+
+ def test_user_with_at(self):
+ self.overrideEnv('http_proxy',
+ 'http://username@domain:password@proxy_host:1234')
+ request = self._proxied_request()
+ self.assertFalse(request.headers.has_key('Proxy-authorization'))
+
+ def test_invalid_proxy(self):
+ """A proxy env variable without scheme"""
+ self.overrideEnv('http_proxy', 'host:1234')
+ self.assertRaises(errors.InvalidURL, self._proxied_request)
+
+ def test_evaluate_proxy_bypass_true(self):
+ """The host is not proxied"""
+ self.assertEvaluateProxyBypass(True, 'example.com', 'example.com')
+ self.assertEvaluateProxyBypass(True, 'bzr.example.com', '*example.com')
+
+ def test_evaluate_proxy_bypass_false(self):
+ """The host is proxied"""
+ self.assertEvaluateProxyBypass(False, 'bzr.example.com', None)
+
+ def test_evaluate_proxy_bypass_unknown(self):
+ """The host is not explicitly proxied"""
+ self.assertEvaluateProxyBypass(None, 'example.com', 'not.example.com')
+ self.assertEvaluateProxyBypass(None, 'bzr.example.com', 'example.com')
+
+ def test_evaluate_proxy_bypass_empty_entries(self):
+ """Ignore empty entries"""
+ self.assertEvaluateProxyBypass(None, 'example.com', '')
+ self.assertEvaluateProxyBypass(None, 'example.com', ',')
+ self.assertEvaluateProxyBypass(None, 'example.com', 'foo,,bar')
+
+
+class TestProxyHttpServer(http_utils.TestCaseWithTwoWebservers):
+ """Tests proxy server.
+
+ Be aware that we do not setup a real proxy here. Instead, we
+ check that the *connection* goes through the proxy by serving
+ different content (the faked proxy server append '-proxied'
+ to the file names).
+ """
+
+ scenarios = multiply_scenarios(
+ vary_by_http_client_implementation(),
+ vary_by_http_protocol_version(),
+ )
+
+ # FIXME: We don't have an https server available, so we don't
+ # test https connections. --vila toolongago
+
+ def setUp(self):
+ super(TestProxyHttpServer, self).setUp()
+ self.transport_secondary_server = http_utils.ProxyServer
+ self.build_tree_contents([('foo', 'contents of foo\n'),
+ ('foo-proxied', 'proxied contents of foo\n')])
+ # Let's setup some attributes for tests
+ server = self.get_readonly_server()
+ self.server_host_port = '%s:%d' % (server.host, server.port)
+ if self._testing_pycurl():
+ # Oh my ! pycurl does not check for the port as part of
+ # no_proxy :-( So we just test the host part
+ self.no_proxy_host = server.host
+ else:
+ self.no_proxy_host = self.server_host_port
+ # The secondary server is the proxy
+ self.proxy_url = self.get_secondary_url()
+
+ def _testing_pycurl(self):
+ # TODO: This is duplicated for lots of the classes in this file
+ return (features.pycurl.available()
+ and self._transport == PyCurlTransport)
+
+ def assertProxied(self):
+ t = self.get_readonly_transport()
+ self.assertEqual('proxied contents of foo\n', t.get('foo').read())
+
+ def assertNotProxied(self):
+ t = self.get_readonly_transport()
+ self.assertEqual('contents of foo\n', t.get('foo').read())
+
+ def test_http_proxy(self):
+ self.overrideEnv('http_proxy', self.proxy_url)
+ self.assertProxied()
+
+ def test_HTTP_PROXY(self):
+ if self._testing_pycurl():
+ # pycurl does not check HTTP_PROXY for security reasons
+ # (for use in a CGI context that we do not care
+ # about. Should we ?)
+ raise tests.TestNotApplicable(
+ 'pycurl does not check HTTP_PROXY for security reasons')
+ self.overrideEnv('HTTP_PROXY', self.proxy_url)
+ self.assertProxied()
+
+ def test_all_proxy(self):
+ self.overrideEnv('all_proxy', self.proxy_url)
+ self.assertProxied()
+
+ def test_ALL_PROXY(self):
+ self.overrideEnv('ALL_PROXY', self.proxy_url)
+ self.assertProxied()
+
+ def test_http_proxy_with_no_proxy(self):
+ self.overrideEnv('no_proxy', self.no_proxy_host)
+ self.overrideEnv('http_proxy', self.proxy_url)
+ self.assertNotProxied()
+
+ def test_HTTP_PROXY_with_NO_PROXY(self):
+ if self._testing_pycurl():
+ raise tests.TestNotApplicable(
+ 'pycurl does not check HTTP_PROXY for security reasons')
+ self.overrideEnv('NO_PROXY', self.no_proxy_host)
+ self.overrideEnv('HTTP_PROXY', self.proxy_url)
+ self.assertNotProxied()
+
+ def test_all_proxy_with_no_proxy(self):
+ self.overrideEnv('no_proxy', self.no_proxy_host)
+ self.overrideEnv('all_proxy', self.proxy_url)
+ self.assertNotProxied()
+
+ def test_ALL_PROXY_with_NO_PROXY(self):
+ self.overrideEnv('NO_PROXY', self.no_proxy_host)
+ self.overrideEnv('ALL_PROXY', self.proxy_url)
+ self.assertNotProxied()
+
+ def test_http_proxy_without_scheme(self):
+ self.overrideEnv('http_proxy', self.server_host_port)
+ if self._testing_pycurl():
+ # pycurl *ignores* invalid proxy env variables. If that ever change
+ # in the future, this test will fail indicating that pycurl do not
+ # ignore anymore such variables.
+ self.assertNotProxied()
+ else:
+ self.assertRaises(errors.InvalidURL, self.assertProxied)
+
+
+class TestRanges(http_utils.TestCaseWithWebserver):
+ """Test the Range header in GET methods."""
+
+ scenarios = multiply_scenarios(
+ vary_by_http_client_implementation(),
+ vary_by_http_protocol_version(),
+ )
+
+ def setUp(self):
+ http_utils.TestCaseWithWebserver.setUp(self)
+ self.build_tree_contents([('a', '0123456789')],)
+
+ def create_transport_readonly_server(self):
+ return http_server.HttpServer(protocol_version=self._protocol_version)
+
+ def _file_contents(self, relpath, ranges):
+ t = self.get_readonly_transport()
+ offsets = [ (start, end - start + 1) for start, end in ranges]
+ coalesce = t._coalesce_offsets
+ coalesced = list(coalesce(offsets, limit=0, fudge_factor=0))
+ code, data = t._get(relpath, coalesced)
+ self.assertTrue(code in (200, 206),'_get returns: %d' % code)
+ for start, end in ranges:
+ data.seek(start)
+ yield data.read(end - start + 1)
+
+ def _file_tail(self, relpath, tail_amount):
+ t = self.get_readonly_transport()
+ code, data = t._get(relpath, [], tail_amount)
+ self.assertTrue(code in (200, 206),'_get returns: %d' % code)
+ data.seek(-tail_amount, 2)
+ return data.read(tail_amount)
+
+ def test_range_header(self):
+ # Valid ranges
+ map(self.assertEqual,['0', '234'],
+ list(self._file_contents('a', [(0,0), (2,4)])),)
+
+ def test_range_header_tail(self):
+ self.assertEqual('789', self._file_tail('a', 3))
+
+ def test_syntactically_invalid_range_header(self):
+ self.assertListRaises(errors.InvalidHttpRange,
+ self._file_contents, 'a', [(4, 3)])
+
+ def test_semantically_invalid_range_header(self):
+ self.assertListRaises(errors.InvalidHttpRange,
+ self._file_contents, 'a', [(42, 128)])
+
+
+class TestHTTPRedirections(http_utils.TestCaseWithRedirectedWebserver):
+ """Test redirection between http servers."""
+
+ scenarios = multiply_scenarios(
+ vary_by_http_client_implementation(),
+ vary_by_http_protocol_version(),
+ )
+
+ def setUp(self):
+ super(TestHTTPRedirections, self).setUp()
+ self.build_tree_contents([('a', '0123456789'),
+ ('bundle',
+ '# Bazaar revision bundle v0.9\n#\n')
+ ],)
+
+ def test_redirected(self):
+ self.assertRaises(errors.RedirectRequested,
+ self.get_old_transport().get, 'a')
+ self.assertEqual('0123456789', self.get_new_transport().get('a').read())
+
+
+class RedirectedRequest(_urllib2_wrappers.Request):
+ """Request following redirections. """
+
+ init_orig = _urllib2_wrappers.Request.__init__
+
+ def __init__(self, method, url, *args, **kwargs):
+ """Constructor.
+
+ """
+ # Since the tests using this class will replace
+ # _urllib2_wrappers.Request, we can't just call the base class __init__
+ # or we'll loop.
+ RedirectedRequest.init_orig(self, method, url, *args, **kwargs)
+ self.follow_redirections = True
+
+
+def install_redirected_request(test):
+ test.overrideAttr(_urllib2_wrappers, 'Request', RedirectedRequest)
+
+
+def cleanup_http_redirection_connections(test):
+ # Some sockets are opened but never seen by _urllib, so we trap them at
+ # the _urllib2_wrappers level to be able to clean them up.
+ def socket_disconnect(sock):
+ try:
+ sock.shutdown(socket.SHUT_RDWR)
+ sock.close()
+ except socket.error:
+ pass
+ def connect(connection):
+ test.http_connect_orig(connection)
+ test.addCleanup(socket_disconnect, connection.sock)
+ test.http_connect_orig = test.overrideAttr(
+ _urllib2_wrappers.HTTPConnection, 'connect', connect)
+ def connect(connection):
+ test.https_connect_orig(connection)
+ test.addCleanup(socket_disconnect, connection.sock)
+ test.https_connect_orig = test.overrideAttr(
+ _urllib2_wrappers.HTTPSConnection, 'connect', connect)
+
+
+class TestHTTPSilentRedirections(http_utils.TestCaseWithRedirectedWebserver):
+ """Test redirections.
+
+ http implementations do not redirect silently anymore (they
+ do not redirect at all in fact). The mechanism is still in
+ place at the _urllib2_wrappers.Request level and these tests
+ exercise it.
+
+ For the pycurl implementation
+ the redirection have been deleted as we may deprecate pycurl
+ and I have no place to keep a working implementation.
+ -- vila 20070212
+ """
+
+ scenarios = multiply_scenarios(
+ vary_by_http_client_implementation(),
+ vary_by_http_protocol_version(),
+ )
+
+ def setUp(self):
+ if (features.pycurl.available()
+ and self._transport == PyCurlTransport):
+ raise tests.TestNotApplicable(
+ "pycurl doesn't redirect silently anymore")
+ super(TestHTTPSilentRedirections, self).setUp()
+ install_redirected_request(self)
+ cleanup_http_redirection_connections(self)
+ self.build_tree_contents([('a','a'),
+ ('1/',),
+ ('1/a', 'redirected once'),
+ ('2/',),
+ ('2/a', 'redirected twice'),
+ ('3/',),
+ ('3/a', 'redirected thrice'),
+ ('4/',),
+ ('4/a', 'redirected 4 times'),
+ ('5/',),
+ ('5/a', 'redirected 5 times'),
+ ],)
+
+ def test_one_redirection(self):
+ t = self.get_old_transport()
+ req = RedirectedRequest('GET', t._remote_path('a'))
+ new_prefix = 'http://%s:%s' % (self.new_server.host,
+ self.new_server.port)
+ self.old_server.redirections = \
+ [('(.*)', r'%s/1\1' % (new_prefix), 301),]
+ self.assertEqual('redirected once', t._perform(req).read())
+
+ def test_five_redirections(self):
+ t = self.get_old_transport()
+ req = RedirectedRequest('GET', t._remote_path('a'))
+ old_prefix = 'http://%s:%s' % (self.old_server.host,
+ self.old_server.port)
+ new_prefix = 'http://%s:%s' % (self.new_server.host,
+ self.new_server.port)
+ self.old_server.redirections = [
+ ('/1(.*)', r'%s/2\1' % (old_prefix), 302),
+ ('/2(.*)', r'%s/3\1' % (old_prefix), 303),
+ ('/3(.*)', r'%s/4\1' % (old_prefix), 307),
+ ('/4(.*)', r'%s/5\1' % (new_prefix), 301),
+ ('(/[^/]+)', r'%s/1\1' % (old_prefix), 301),
+ ]
+ self.assertEqual('redirected 5 times', t._perform(req).read())
+
+
+class TestDoCatchRedirections(http_utils.TestCaseWithRedirectedWebserver):
+ """Test transport.do_catching_redirections."""
+
+ scenarios = multiply_scenarios(
+ vary_by_http_client_implementation(),
+ vary_by_http_protocol_version(),
+ )
+
+ def setUp(self):
+ super(TestDoCatchRedirections, self).setUp()
+ self.build_tree_contents([('a', '0123456789'),],)
+ cleanup_http_redirection_connections(self)
+
+ self.old_transport = self.get_old_transport()
+
+ def get_a(self, t):
+ return t.get('a')
+
+ def test_no_redirection(self):
+ t = self.get_new_transport()
+
+ # We use None for redirected so that we fail if redirected
+ self.assertEqual('0123456789',
+ transport.do_catching_redirections(
+ self.get_a, t, None).read())
+
+ def test_one_redirection(self):
+ self.redirections = 0
+
+ def redirected(t, exception, redirection_notice):
+ self.redirections += 1
+ redirected_t = t._redirected_to(exception.source, exception.target)
+ return redirected_t
+
+ self.assertEqual('0123456789',
+ transport.do_catching_redirections(
+ self.get_a, self.old_transport, redirected).read())
+ self.assertEqual(1, self.redirections)
+
+ def test_redirection_loop(self):
+
+ def redirected(transport, exception, redirection_notice):
+ # By using the redirected url as a base dir for the
+ # *old* transport, we create a loop: a => a/a =>
+ # a/a/a
+ return self.old_transport.clone(exception.target)
+
+ self.assertRaises(errors.TooManyRedirections,
+ transport.do_catching_redirections,
+ self.get_a, self.old_transport, redirected)
+
+
+def _setup_authentication_config(**kwargs):
+ conf = config.AuthenticationConfig()
+ conf._get_config().update({'httptest': kwargs})
+ conf._save()
+
+
+class TestUrllib2AuthHandler(tests.TestCaseWithTransport):
+ """Unit tests for glue by which urllib2 asks us for authentication"""
+
+ def test_get_user_password_without_port(self):
+ """We cope if urllib2 doesn't tell us the port.
+
+ See https://bugs.launchpad.net/bzr/+bug/654684
+ """
+ user = 'joe'
+ password = 'foo'
+ _setup_authentication_config(scheme='http', host='localhost',
+ user=user, password=password)
+ handler = _urllib2_wrappers.HTTPAuthHandler()
+ got_pass = handler.get_user_password(dict(
+ user='joe',
+ protocol='http',
+ host='localhost',
+ path='/',
+ realm='Realm',
+ ))
+ self.assertEquals((user, password), got_pass)
+
+
+class TestAuth(http_utils.TestCaseWithWebserver):
+ """Test authentication scheme"""
+
+ scenarios = multiply_scenarios(
+ vary_by_http_client_implementation(),
+ vary_by_http_protocol_version(),
+ vary_by_http_auth_scheme(),
+ )
+
+ def setUp(self):
+ super(TestAuth, self).setUp()
+ self.server = self.get_readonly_server()
+ self.build_tree_contents([('a', 'contents of a\n'),
+ ('b', 'contents of b\n'),])
+
+ def create_transport_readonly_server(self):
+ server = self._auth_server(protocol_version=self._protocol_version)
+ server._url_protocol = self._url_protocol
+ return server
+
+ def _testing_pycurl(self):
+ # TODO: This is duplicated for lots of the classes in this file
+ return (features.pycurl.available()
+ and self._transport == PyCurlTransport)
+
+ def get_user_url(self, user, password):
+ """Build an url embedding user and password"""
+ url = '%s://' % self.server._url_protocol
+ if user is not None:
+ url += user
+ if password is not None:
+ url += ':' + password
+ url += '@'
+ url += '%s:%s/' % (self.server.host, self.server.port)
+ return url
+
+ def get_user_transport(self, user, password):
+ t = transport.get_transport_from_url(
+ self.get_user_url(user, password))
+ return t
+
+ def test_no_user(self):
+ self.server.add_user('joe', 'foo')
+ t = self.get_user_transport(None, None)
+ self.assertRaises(errors.InvalidHttpResponse, t.get, 'a')
+ # Only one 'Authentication Required' error should occur
+ self.assertEqual(1, self.server.auth_required_errors)
+
+ def test_empty_pass(self):
+ self.server.add_user('joe', '')
+ t = self.get_user_transport('joe', '')
+ self.assertEqual('contents of a\n', t.get('a').read())
+ # Only one 'Authentication Required' error should occur
+ self.assertEqual(1, self.server.auth_required_errors)
+
+ def test_user_pass(self):
+ self.server.add_user('joe', 'foo')
+ t = self.get_user_transport('joe', 'foo')
+ self.assertEqual('contents of a\n', t.get('a').read())
+ # Only one 'Authentication Required' error should occur
+ self.assertEqual(1, self.server.auth_required_errors)
+
+ def test_unknown_user(self):
+ self.server.add_user('joe', 'foo')
+ t = self.get_user_transport('bill', 'foo')
+ self.assertRaises(errors.InvalidHttpResponse, t.get, 'a')
+ # Two 'Authentication Required' errors should occur (the
+ # initial 'who are you' and 'I don't know you, who are
+ # you').
+ self.assertEqual(2, self.server.auth_required_errors)
+
+ def test_wrong_pass(self):
+ self.server.add_user('joe', 'foo')
+ t = self.get_user_transport('joe', 'bar')
+ self.assertRaises(errors.InvalidHttpResponse, t.get, 'a')
+ # Two 'Authentication Required' errors should occur (the
+ # initial 'who are you' and 'this is not you, who are you')
+ self.assertEqual(2, self.server.auth_required_errors)
+
+ def test_prompt_for_username(self):
+ if self._testing_pycurl():
+ raise tests.TestNotApplicable(
+ 'pycurl cannot prompt, it handles auth by embedding'
+ ' user:pass in urls only')
+
+ self.server.add_user('joe', 'foo')
+ t = self.get_user_transport(None, None)
+ stdout = tests.StringIOWrapper()
+ stderr = tests.StringIOWrapper()
+ ui.ui_factory = tests.TestUIFactory(stdin='joe\nfoo\n',
+ stdout=stdout, stderr=stderr)
+ self.assertEqual('contents of a\n',t.get('a').read())
+ # stdin should be empty
+ self.assertEqual('', ui.ui_factory.stdin.readline())
+ stderr.seek(0)
+ expected_prompt = self._expected_username_prompt(t._unqualified_scheme)
+ self.assertEqual(expected_prompt, stderr.read(len(expected_prompt)))
+ self.assertEqual('', stdout.getvalue())
+ self._check_password_prompt(t._unqualified_scheme, 'joe',
+ stderr.readline())
+
+ def test_prompt_for_password(self):
+ if self._testing_pycurl():
+ raise tests.TestNotApplicable(
+ 'pycurl cannot prompt, it handles auth by embedding'
+ ' user:pass in urls only')
+
+ self.server.add_user('joe', 'foo')
+ t = self.get_user_transport('joe', None)
+ stdout = tests.StringIOWrapper()
+ stderr = tests.StringIOWrapper()
+ ui.ui_factory = tests.TestUIFactory(stdin='foo\n',
+ stdout=stdout, stderr=stderr)
+ self.assertEqual('contents of a\n', t.get('a').read())
+ # stdin should be empty
+ self.assertEqual('', ui.ui_factory.stdin.readline())
+ self._check_password_prompt(t._unqualified_scheme, 'joe',
+ stderr.getvalue())
+ self.assertEqual('', stdout.getvalue())
+ # And we shouldn't prompt again for a different request
+ # against the same transport.
+ self.assertEqual('contents of b\n',t.get('b').read())
+ t2 = t.clone()
+ # And neither against a clone
+ self.assertEqual('contents of b\n',t2.get('b').read())
+ # Only one 'Authentication Required' error should occur
+ self.assertEqual(1, self.server.auth_required_errors)
+
+ def _check_password_prompt(self, scheme, user, actual_prompt):
+ expected_prompt = (self._password_prompt_prefix
+ + ("%s %s@%s:%d, Realm: '%s' password: "
+ % (scheme.upper(),
+ user, self.server.host, self.server.port,
+ self.server.auth_realm)))
+ self.assertEqual(expected_prompt, actual_prompt)
+
+ def _expected_username_prompt(self, scheme):
+ return (self._username_prompt_prefix
+ + "%s %s:%d, Realm: '%s' username: " % (scheme.upper(),
+ self.server.host, self.server.port,
+ self.server.auth_realm))
+
+ def test_no_prompt_for_password_when_using_auth_config(self):
+ if self._testing_pycurl():
+ raise tests.TestNotApplicable(
+ 'pycurl does not support authentication.conf'
+ ' since it cannot prompt')
+
+ user =' joe'
+ password = 'foo'
+ stdin_content = 'bar\n' # Not the right password
+ self.server.add_user(user, password)
+ t = self.get_user_transport(user, None)
+ ui.ui_factory = tests.TestUIFactory(stdin=stdin_content,
+ stderr=tests.StringIOWrapper())
+ # Create a minimal config file with the right password
+ _setup_authentication_config(scheme='http', port=self.server.port,
+ user=user, password=password)
+ # Issue a request to the server to connect
+ self.assertEqual('contents of a\n',t.get('a').read())
+ # stdin should have been left untouched
+ self.assertEqual(stdin_content, ui.ui_factory.stdin.readline())
+ # Only one 'Authentication Required' error should occur
+ self.assertEqual(1, self.server.auth_required_errors)
+
+ def test_changing_nonce(self):
+ if self._auth_server not in (http_utils.HTTPDigestAuthServer,
+ http_utils.ProxyDigestAuthServer):
+ raise tests.TestNotApplicable('HTTP/proxy auth digest only test')
+ if self._testing_pycurl():
+ self.knownFailure(
+ 'pycurl does not handle a nonce change')
+ self.server.add_user('joe', 'foo')
+ t = self.get_user_transport('joe', 'foo')
+ self.assertEqual('contents of a\n', t.get('a').read())
+ self.assertEqual('contents of b\n', t.get('b').read())
+ # Only one 'Authentication Required' error should have
+ # occured so far
+ self.assertEqual(1, self.server.auth_required_errors)
+ # The server invalidates the current nonce
+ self.server.auth_nonce = self.server.auth_nonce + '. No, now!'
+ self.assertEqual('contents of a\n', t.get('a').read())
+ # Two 'Authentication Required' errors should occur (the
+ # initial 'who are you' and a second 'who are you' with the new nonce)
+ self.assertEqual(2, self.server.auth_required_errors)
+
+ def test_user_from_auth_conf(self):
+ if self._testing_pycurl():
+ raise tests.TestNotApplicable(
+ 'pycurl does not support authentication.conf')
+ user = 'joe'
+ password = 'foo'
+ self.server.add_user(user, password)
+ _setup_authentication_config(scheme='http', port=self.server.port,
+ user=user, password=password)
+ t = self.get_user_transport(None, None)
+ # Issue a request to the server to connect
+ self.assertEqual('contents of a\n', t.get('a').read())
+ # Only one 'Authentication Required' error should occur
+ self.assertEqual(1, self.server.auth_required_errors)
+
+ def test_no_credential_leaks_in_log(self):
+ self.overrideAttr(debug, 'debug_flags', set(['http']))
+ user = 'joe'
+ password = 'very-sensitive-password'
+ self.server.add_user(user, password)
+ t = self.get_user_transport(user, password)
+ # Capture the debug calls to mutter
+ self.mutters = []
+ def mutter(*args):
+ lines = args[0] % args[1:]
+ # Some calls output multiple lines, just split them now since we
+ # care about a single one later.
+ self.mutters.extend(lines.splitlines())
+ self.overrideAttr(trace, 'mutter', mutter)
+ # Issue a request to the server to connect
+ self.assertEqual(True, t.has('a'))
+ # Only one 'Authentication Required' error should occur
+ self.assertEqual(1, self.server.auth_required_errors)
+ # Since the authentification succeeded, there should be a corresponding
+ # debug line
+ sent_auth_headers = [line for line in self.mutters
+ if line.startswith('> %s' % (self._auth_header,))]
+ self.assertLength(1, sent_auth_headers)
+ self.assertStartsWith(sent_auth_headers[0],
+ '> %s: <masked>' % (self._auth_header,))
+
+
+class TestProxyAuth(TestAuth):
+ """Test proxy authentication schemes.
+
+ This inherits from TestAuth to tweak the setUp and filter some failing
+ tests.
+ """
+
+ scenarios = multiply_scenarios(
+ vary_by_http_client_implementation(),
+ vary_by_http_protocol_version(),
+ vary_by_http_proxy_auth_scheme(),
+ )
+
+ def setUp(self):
+ super(TestProxyAuth, self).setUp()
+ # Override the contents to avoid false positives
+ self.build_tree_contents([('a', 'not proxied contents of a\n'),
+ ('b', 'not proxied contents of b\n'),
+ ('a-proxied', 'contents of a\n'),
+ ('b-proxied', 'contents of b\n'),
+ ])
+
+ def get_user_transport(self, user, password):
+ self.overrideEnv('all_proxy', self.get_user_url(user, password))
+ return TestAuth.get_user_transport(self, user, password)
+
+ def test_empty_pass(self):
+ if self._testing_pycurl():
+ import pycurl
+ if pycurl.version_info()[1] < '7.16.0':
+ self.knownFailure(
+ 'pycurl < 7.16.0 does not handle empty proxy passwords')
+ super(TestProxyAuth, self).test_empty_pass()
+
+
+class SampleSocket(object):
+ """A socket-like object for use in testing the HTTP request handler."""
+
+ def __init__(self, socket_read_content):
+ """Constructs a sample socket.
+
+ :param socket_read_content: a byte sequence
+ """
+ # Use plain python StringIO so we can monkey-patch the close method to
+ # not discard the contents.
+ from StringIO import StringIO
+ self.readfile = StringIO(socket_read_content)
+ self.writefile = StringIO()
+ self.writefile.close = lambda: None
+ self.close = lambda: None
+
+ def makefile(self, mode='r', bufsize=None):
+ if 'r' in mode:
+ return self.readfile
+ else:
+ return self.writefile
+
+
+class SmartHTTPTunnellingTest(tests.TestCaseWithTransport):
+
+ scenarios = multiply_scenarios(
+ vary_by_http_client_implementation(),
+ vary_by_http_protocol_version(),
+ )
+
+ def setUp(self):
+ super(SmartHTTPTunnellingTest, self).setUp()
+ # We use the VFS layer as part of HTTP tunnelling tests.
+ self.overrideEnv('BZR_NO_SMART_VFS', None)
+ self.transport_readonly_server = http_utils.HTTPServerWithSmarts
+ self.http_server = self.get_readonly_server()
+
+ def create_transport_readonly_server(self):
+ server = http_utils.HTTPServerWithSmarts(
+ protocol_version=self._protocol_version)
+ server._url_protocol = self._url_protocol
+ return server
+
+ def test_open_controldir(self):
+ branch = self.make_branch('relpath')
+ url = self.http_server.get_url() + 'relpath'
+ bd = controldir.ControlDir.open(url)
+ self.addCleanup(bd.transport.disconnect)
+ self.assertIsInstance(bd, _mod_remote.RemoteBzrDir)
+
+ def test_bulk_data(self):
+ # We should be able to send and receive bulk data in a single message.
+ # The 'readv' command in the smart protocol both sends and receives
+ # bulk data, so we use that.
+ self.build_tree(['data-file'])
+ http_transport = transport.get_transport_from_url(
+ self.http_server.get_url())
+ medium = http_transport.get_smart_medium()
+ # Since we provide the medium, the url below will be mostly ignored
+ # during the test, as long as the path is '/'.
+ remote_transport = remote.RemoteTransport('bzr://fake_host/',
+ medium=medium)
+ self.assertEqual(
+ [(0, "c")], list(remote_transport.readv("data-file", [(0,1)])))
+
+ def test_http_send_smart_request(self):
+
+ post_body = 'hello\n'
+ expected_reply_body = 'ok\x012\n'
+
+ http_transport = transport.get_transport_from_url(
+ self.http_server.get_url())
+ medium = http_transport.get_smart_medium()
+ response = medium.send_http_smart_request(post_body)
+ reply_body = response.read()
+ self.assertEqual(expected_reply_body, reply_body)
+
+ def test_smart_http_server_post_request_handler(self):
+ httpd = self.http_server.server
+
+ socket = SampleSocket(
+ 'POST /.bzr/smart %s \r\n' % self._protocol_version
+ # HTTP/1.1 posts must have a Content-Length (but it doesn't hurt
+ # for 1.0)
+ + 'Content-Length: 6\r\n'
+ '\r\n'
+ 'hello\n')
+ # Beware: the ('localhost', 80) below is the
+ # client_address parameter, but we don't have one because
+ # we have defined a socket which is not bound to an
+ # address. The test framework never uses this client
+ # address, so far...
+ request_handler = http_utils.SmartRequestHandler(socket,
+ ('localhost', 80),
+ httpd)
+ response = socket.writefile.getvalue()
+ self.assertStartsWith(response, '%s 200 ' % self._protocol_version)
+ # This includes the end of the HTTP headers, and all the body.
+ expected_end_of_response = '\r\n\r\nok\x012\n'
+ self.assertEndsWith(response, expected_end_of_response)
+
+
+class ForbiddenRequestHandler(http_server.TestingHTTPRequestHandler):
+ """No smart server here request handler."""
+
+ def do_POST(self):
+ self.send_error(403, "Forbidden")
+
+
+class SmartClientAgainstNotSmartServer(TestSpecificRequestHandler):
+ """Test smart client behaviour against an http server without smarts."""
+
+ _req_handler_class = ForbiddenRequestHandler
+
+ def test_probe_smart_server(self):
+ """Test error handling against server refusing smart requests."""
+ t = self.get_readonly_transport()
+ # No need to build a valid smart request here, the server will not even
+ # try to interpret it.
+ self.assertRaises(errors.SmartProtocolError,
+ t.get_smart_medium().send_http_smart_request,
+ 'whatever')
+
+
+class Test_redirected_to(tests.TestCase):
+
+ scenarios = vary_by_http_client_implementation()
+
+ def test_redirected_to_subdir(self):
+ t = self._transport('http://www.example.com/foo')
+ r = t._redirected_to('http://www.example.com/foo',
+ 'http://www.example.com/foo/subdir')
+ self.assertIsInstance(r, type(t))
+ # Both transports share the some connection
+ self.assertEqual(t._get_connection(), r._get_connection())
+ self.assertEquals('http://www.example.com/foo/subdir/', r.base)
+
+ def test_redirected_to_self_with_slash(self):
+ t = self._transport('http://www.example.com/foo')
+ r = t._redirected_to('http://www.example.com/foo',
+ 'http://www.example.com/foo/')
+ self.assertIsInstance(r, type(t))
+ # Both transports share the some connection (one can argue that we
+ # should return the exact same transport here, but that seems
+ # overkill).
+ self.assertEqual(t._get_connection(), r._get_connection())
+
+ def test_redirected_to_host(self):
+ t = self._transport('http://www.example.com/foo')
+ r = t._redirected_to('http://www.example.com/foo',
+ 'http://foo.example.com/foo/subdir')
+ self.assertIsInstance(r, type(t))
+ self.assertEquals('http://foo.example.com/foo/subdir/',
+ r.external_url())
+
+ def test_redirected_to_same_host_sibling_protocol(self):
+ t = self._transport('http://www.example.com/foo')
+ r = t._redirected_to('http://www.example.com/foo',
+ 'https://www.example.com/foo')
+ self.assertIsInstance(r, type(t))
+ self.assertEquals('https://www.example.com/foo/',
+ r.external_url())
+
+ def test_redirected_to_same_host_different_protocol(self):
+ t = self._transport('http://www.example.com/foo')
+ r = t._redirected_to('http://www.example.com/foo',
+ 'ftp://www.example.com/foo')
+ self.assertNotEquals(type(r), type(t))
+ self.assertEquals('ftp://www.example.com/foo/', r.external_url())
+
+ def test_redirected_to_same_host_specific_implementation(self):
+ t = self._transport('http://www.example.com/foo')
+ r = t._redirected_to('http://www.example.com/foo',
+ 'https+urllib://www.example.com/foo')
+ self.assertEquals('https://www.example.com/foo/', r.external_url())
+
+ def test_redirected_to_different_host_same_user(self):
+ t = self._transport('http://joe@www.example.com/foo')
+ r = t._redirected_to('http://www.example.com/foo',
+ 'https://foo.example.com/foo')
+ self.assertIsInstance(r, type(t))
+ self.assertEqual(t._parsed_url.user, r._parsed_url.user)
+ self.assertEquals('https://joe@foo.example.com/foo/', r.external_url())
+
+
+class PredefinedRequestHandler(http_server.TestingHTTPRequestHandler):
+ """Request handler for a unique and pre-defined request.
+
+ The only thing we care about here is how many bytes travel on the wire. But
+ since we want to measure it for a real http client, we have to send it
+ correct responses.
+
+ We expect to receive a *single* request nothing more (and we won't even
+ check what request it is, we just measure the bytes read until an empty
+ line.
+ """
+
+ def _handle_one_request(self):
+ tcs = self.server.test_case_server
+ requestline = self.rfile.readline()
+ headers = self.MessageClass(self.rfile, 0)
+ # We just read: the request, the headers, an empty line indicating the
+ # end of the headers.
+ bytes_read = len(requestline)
+ for line in headers.headers:
+ bytes_read += len(line)
+ bytes_read += len('\r\n')
+ if requestline.startswith('POST'):
+ # The body should be a single line (or we don't know where it ends
+ # and we don't want to issue a blocking read)
+ body = self.rfile.readline()
+ bytes_read += len(body)
+ tcs.bytes_read = bytes_read
+
+ # We set the bytes written *before* issuing the write, the client is
+ # supposed to consume every produced byte *before* checking that value.
+
+ # Doing the oppposite may lead to test failure: we may be interrupted
+ # after the write but before updating the value. The client can then
+ # continue and read the value *before* we can update it. And yes,
+ # this has been observed -- vila 20090129
+ tcs.bytes_written = len(tcs.canned_response)
+ self.wfile.write(tcs.canned_response)
+
+
+class ActivityServerMixin(object):
+
+ def __init__(self, protocol_version):
+ super(ActivityServerMixin, self).__init__(
+ request_handler=PredefinedRequestHandler,
+ protocol_version=protocol_version)
+ # Bytes read and written by the server
+ self.bytes_read = 0
+ self.bytes_written = 0
+ self.canned_response = None
+
+
+class ActivityHTTPServer(ActivityServerMixin, http_server.HttpServer):
+ pass
+
+
+if features.HTTPSServerFeature.available():
+ from bzrlib.tests import https_server
+ class ActivityHTTPSServer(ActivityServerMixin, https_server.HTTPSServer):
+ pass
+
+
+class TestActivityMixin(object):
+ """Test socket activity reporting.
+
+ We use a special purpose server to control the bytes sent and received and
+ be able to predict the activity on the client socket.
+ """
+
+ def setUp(self):
+ tests.TestCase.setUp(self)
+ self.server = self._activity_server(self._protocol_version)
+ self.server.start_server()
+ self.addCleanup(self.server.stop_server)
+ _activities = {} # Don't close over self and create a cycle
+ def report_activity(t, bytes, direction):
+ count = _activities.get(direction, 0)
+ count += bytes
+ _activities[direction] = count
+ self.activities = _activities
+ # We override at class level because constructors may propagate the
+ # bound method and render instance overriding ineffective (an
+ # alternative would be to define a specific ui factory instead...)
+ self.overrideAttr(self._transport, '_report_activity', report_activity)
+
+ def get_transport(self):
+ t = self._transport(self.server.get_url())
+ # FIXME: Needs cleanup -- vila 20100611
+ return t
+
+ def assertActivitiesMatch(self):
+ self.assertEqual(self.server.bytes_read,
+ self.activities.get('write', 0), 'written bytes')
+ self.assertEqual(self.server.bytes_written,
+ self.activities.get('read', 0), 'read bytes')
+
+ def test_get(self):
+ self.server.canned_response = '''HTTP/1.1 200 OK\r
+Date: Tue, 11 Jul 2006 04:32:56 GMT\r
+Server: Apache/2.0.54 (Fedora)\r
+Last-Modified: Sun, 23 Apr 2006 19:35:20 GMT\r
+ETag: "56691-23-38e9ae00"\r
+Accept-Ranges: bytes\r
+Content-Length: 35\r
+Connection: close\r
+Content-Type: text/plain; charset=UTF-8\r
+\r
+Bazaar-NG meta directory, format 1
+'''
+ t = self.get_transport()
+ self.assertEqual('Bazaar-NG meta directory, format 1\n',
+ t.get('foo/bar').read())
+ self.assertActivitiesMatch()
+
+ def test_has(self):
+ self.server.canned_response = '''HTTP/1.1 200 OK\r
+Server: SimpleHTTP/0.6 Python/2.5.2\r
+Date: Thu, 29 Jan 2009 20:21:47 GMT\r
+Content-type: application/octet-stream\r
+Content-Length: 20\r
+Last-Modified: Thu, 29 Jan 2009 20:21:47 GMT\r
+\r
+'''
+ t = self.get_transport()
+ self.assertTrue(t.has('foo/bar'))
+ self.assertActivitiesMatch()
+
+ def test_readv(self):
+ self.server.canned_response = '''HTTP/1.1 206 Partial Content\r
+Date: Tue, 11 Jul 2006 04:49:48 GMT\r
+Server: Apache/2.0.54 (Fedora)\r
+Last-Modified: Thu, 06 Jul 2006 20:22:05 GMT\r
+ETag: "238a3c-16ec2-805c5540"\r
+Accept-Ranges: bytes\r
+Content-Length: 1534\r
+Connection: close\r
+Content-Type: multipart/byteranges; boundary=418470f848b63279b\r
+\r
+\r
+--418470f848b63279b\r
+Content-type: text/plain; charset=UTF-8\r
+Content-range: bytes 0-254/93890\r
+\r
+mbp@sourcefrog.net-20050309040815-13242001617e4a06
+mbp@sourcefrog.net-20050309040929-eee0eb3e6d1e7627
+mbp@sourcefrog.net-20050309040957-6cad07f466bb0bb8
+mbp@sourcefrog.net-20050309041501-c840e09071de3b67
+mbp@sourcefrog.net-20050309044615-c24a3250be83220a
+\r
+--418470f848b63279b\r
+Content-type: text/plain; charset=UTF-8\r
+Content-range: bytes 1000-2049/93890\r
+\r
+40-fd4ec249b6b139ab
+mbp@sourcefrog.net-20050311063625-07858525021f270b
+mbp@sourcefrog.net-20050311231934-aa3776aff5200bb9
+mbp@sourcefrog.net-20050311231953-73aeb3a131c3699a
+mbp@sourcefrog.net-20050311232353-f5e33da490872c6a
+mbp@sourcefrog.net-20050312071639-0a8f59a34a024ff0
+mbp@sourcefrog.net-20050312073432-b2c16a55e0d6e9fb
+mbp@sourcefrog.net-20050312073831-a47c3335ece1920f
+mbp@sourcefrog.net-20050312085412-13373aa129ccbad3
+mbp@sourcefrog.net-20050313052251-2bf004cb96b39933
+mbp@sourcefrog.net-20050313052856-3edd84094687cb11
+mbp@sourcefrog.net-20050313053233-e30a4f28aef48f9d
+mbp@sourcefrog.net-20050313053853-7c64085594ff3072
+mbp@sourcefrog.net-20050313054757-a86c3f5871069e22
+mbp@sourcefrog.net-20050313061422-418f1f73b94879b9
+mbp@sourcefrog.net-20050313120651-497bd231b19df600
+mbp@sourcefrog.net-20050314024931-eae0170ef25a5d1a
+mbp@sourcefrog.net-20050314025438-d52099f915fe65fc
+mbp@sourcefrog.net-20050314025539-637a636692c055cf
+mbp@sourcefrog.net-20050314025737-55eb441f430ab4ba
+mbp@sourcefrog.net-20050314025901-d74aa93bb7ee8f62
+mbp@source\r
+--418470f848b63279b--\r
+'''
+ t = self.get_transport()
+ # Remember that the request is ignored and that the ranges below
+ # doesn't have to match the canned response.
+ l = list(t.readv('/foo/bar', ((0, 255), (1000, 1050))))
+ self.assertEqual(2, len(l))
+ self.assertActivitiesMatch()
+
+ def test_post(self):
+ self.server.canned_response = '''HTTP/1.1 200 OK\r
+Date: Tue, 11 Jul 2006 04:32:56 GMT\r
+Server: Apache/2.0.54 (Fedora)\r
+Last-Modified: Sun, 23 Apr 2006 19:35:20 GMT\r
+ETag: "56691-23-38e9ae00"\r
+Accept-Ranges: bytes\r
+Content-Length: 35\r
+Connection: close\r
+Content-Type: text/plain; charset=UTF-8\r
+\r
+lalala whatever as long as itsssss
+'''
+ t = self.get_transport()
+ # We must send a single line of body bytes, see
+ # PredefinedRequestHandler._handle_one_request
+ code, f = t._post('abc def end-of-body\n')
+ self.assertEqual('lalala whatever as long as itsssss\n', f.read())
+ self.assertActivitiesMatch()
+
+
+class TestActivity(tests.TestCase, TestActivityMixin):
+
+ scenarios = multiply_scenarios(
+ vary_by_http_activity(),
+ vary_by_http_protocol_version(),
+ )
+
+ def setUp(self):
+ TestActivityMixin.setUp(self)
+
+
+class TestNoReportActivity(tests.TestCase, TestActivityMixin):
+
+ # Unlike TestActivity, we are really testing ReportingFileSocket and
+ # ReportingSocket, so we don't need all the parametrization. Since
+ # ReportingFileSocket and ReportingSocket are wrappers, it's easier to
+ # test them through their use by the transport than directly (that's a
+ # bit less clean but far more simpler and effective).
+ _activity_server = ActivityHTTPServer
+ _protocol_version = 'HTTP/1.1'
+
+ def setUp(self):
+ self._transport =_urllib.HttpTransport_urllib
+ TestActivityMixin.setUp(self)
+
+ def assertActivitiesMatch(self):
+ # Nothing to check here
+ pass
+
+
+class TestAuthOnRedirected(http_utils.TestCaseWithRedirectedWebserver):
+ """Test authentication on the redirected http server."""
+
+ scenarios = vary_by_http_protocol_version()
+
+ _auth_header = 'Authorization'
+ _password_prompt_prefix = ''
+ _username_prompt_prefix = ''
+ _auth_server = http_utils.HTTPBasicAuthServer
+ _transport = _urllib.HttpTransport_urllib
+
+ def setUp(self):
+ super(TestAuthOnRedirected, self).setUp()
+ self.build_tree_contents([('a','a'),
+ ('1/',),
+ ('1/a', 'redirected once'),
+ ],)
+ new_prefix = 'http://%s:%s' % (self.new_server.host,
+ self.new_server.port)
+ self.old_server.redirections = [
+ ('(.*)', r'%s/1\1' % (new_prefix), 301),]
+ self.old_transport = self.get_old_transport()
+ self.new_server.add_user('joe', 'foo')
+ cleanup_http_redirection_connections(self)
+
+ def create_transport_readonly_server(self):
+ server = self._auth_server(protocol_version=self._protocol_version)
+ server._url_protocol = self._url_protocol
+ return server
+
+ def get_a(self, t):
+ return t.get('a')
+
+ def test_auth_on_redirected_via_do_catching_redirections(self):
+ self.redirections = 0
+
+ def redirected(t, exception, redirection_notice):
+ self.redirections += 1
+ redirected_t = t._redirected_to(exception.source, exception.target)
+ self.addCleanup(redirected_t.disconnect)
+ return redirected_t
+
+ stdout = tests.StringIOWrapper()
+ stderr = tests.StringIOWrapper()
+ ui.ui_factory = tests.TestUIFactory(stdin='joe\nfoo\n',
+ stdout=stdout, stderr=stderr)
+ self.assertEqual('redirected once',
+ transport.do_catching_redirections(
+ self.get_a, self.old_transport, redirected).read())
+ self.assertEqual(1, self.redirections)
+ # stdin should be empty
+ self.assertEqual('', ui.ui_factory.stdin.readline())
+ # stdout should be empty, stderr will contains the prompts
+ self.assertEqual('', stdout.getvalue())
+
+ def test_auth_on_redirected_via_following_redirections(self):
+ self.new_server.add_user('joe', 'foo')
+ stdout = tests.StringIOWrapper()
+ stderr = tests.StringIOWrapper()
+ ui.ui_factory = tests.TestUIFactory(stdin='joe\nfoo\n',
+ stdout=stdout, stderr=stderr)
+ t = self.old_transport
+ req = RedirectedRequest('GET', t.abspath('a'))
+ new_prefix = 'http://%s:%s' % (self.new_server.host,
+ self.new_server.port)
+ self.old_server.redirections = [
+ ('(.*)', r'%s/1\1' % (new_prefix), 301),]
+ self.assertEqual('redirected once', t._perform(req).read())
+ # stdin should be empty
+ self.assertEqual('', ui.ui_factory.stdin.readline())
+ # stdout should be empty, stderr will contains the prompts
+ self.assertEqual('', stdout.getvalue())
+
diff --git a/bzrlib/tests/test_http_response.py b/bzrlib/tests/test_http_response.py
new file mode 100644
index 0000000..bbfcea8
--- /dev/null
+++ b/bzrlib/tests/test_http_response.py
@@ -0,0 +1,830 @@
+# Copyright (C) 2006-2010 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""Tests from HTTP response parsing.
+
+The handle_response method read the response body of a GET request an returns
+the corresponding RangeFile.
+
+There are four different kinds of RangeFile:
+- a whole file whose size is unknown, seen as a simple byte stream,
+- a whole file whose size is known, we can't read past its end,
+- a single range file, a part of a file with a start and a size,
+- a multiple range file, several consecutive parts with known start offset
+ and size.
+
+Some properties are common to all kinds:
+- seek can only be forward (its really a socket underneath),
+- read can't cross ranges,
+- successive ranges are taken into account transparently,
+
+- the expected pattern of use is either seek(offset)+read(size) or a single
+ read with no size specified. For multiple range files, multiple read() will
+ return the corresponding ranges, trying to read further will raise
+ InvalidHttpResponse.
+"""
+
+from cStringIO import StringIO
+import httplib
+
+from bzrlib import (
+ errors,
+ tests,
+ )
+from bzrlib.transport.http import (
+ response,
+ _urllib2_wrappers,
+ )
+from bzrlib.tests.file_utils import (
+ FakeReadFile,
+ )
+
+
+class ReadSocket(object):
+ """A socket-like object that can be given a predefined content."""
+
+ def __init__(self, data):
+ self.readfile = StringIO(data)
+
+ def makefile(self, mode='r', bufsize=None):
+ return self.readfile
+
+
+class FakeHTTPConnection(_urllib2_wrappers.HTTPConnection):
+
+ def __init__(self, sock):
+ _urllib2_wrappers.HTTPConnection.__init__(self, 'localhost')
+ # Set the socket to bypass the connection
+ self.sock = sock
+
+ def send(self, str):
+ """Ignores the writes on the socket."""
+ pass
+
+
+class TestHTTPConnection(tests.TestCase):
+
+ def test_cleanup_pipe(self):
+ sock = ReadSocket("""HTTP/1.1 200 OK\r
+Content-Type: text/plain; charset=UTF-8\r
+Content-Length: 18
+\r
+0123456789
+garbage""")
+ conn = FakeHTTPConnection(sock)
+ # Simulate the request sending so that the connection will be able to
+ # read the response.
+ conn.putrequest('GET', 'http://localhost/fictious')
+ conn.endheaders()
+ # Now, get the response
+ resp = conn.getresponse()
+ # Read part of the response
+ self.assertEquals('0123456789\n', resp.read(11))
+ # Override the thresold to force the warning emission
+ conn._range_warning_thresold = 6 # There are 7 bytes pending
+ conn.cleanup_pipe()
+ self.assertContainsRe(self.get_log(), 'Got a 200 response when asking')
+
+
+class TestRangeFileMixin(object):
+ """Tests for accessing the first range in a RangeFile."""
+
+ # A simple string used to represent a file part (also called a range), in
+ # which offsets are easy to calculate for test writers. It's used as a
+ # building block with slight variations but basically 'a' is the first char
+ # of the range and 'z' is the last.
+ alpha = 'abcdefghijklmnopqrstuvwxyz'
+
+ def test_can_read_at_first_access(self):
+ """Test that the just created file can be read."""
+ self.assertEquals(self.alpha, self._file.read())
+
+ def test_seek_read(self):
+ """Test seek/read inside the range."""
+ f = self._file
+ start = self.first_range_start
+ # Before any use, tell() should be at the range start
+ self.assertEquals(start, f.tell())
+ cur = start # For an overall offset assertion
+ f.seek(start + 3)
+ cur += 3
+ self.assertEquals('def', f.read(3))
+ cur += len('def')
+ f.seek(4, 1)
+ cur += 4
+ self.assertEquals('klmn', f.read(4))
+ cur += len('klmn')
+ # read(0) in the middle of a range
+ self.assertEquals('', f.read(0))
+ # seek in place
+ here = f.tell()
+ f.seek(0, 1)
+ self.assertEquals(here, f.tell())
+ self.assertEquals(cur, f.tell())
+
+ def test_read_zero(self):
+ f = self._file
+ start = self.first_range_start
+ self.assertEquals('', f.read(0))
+ f.seek(10, 1)
+ self.assertEquals('', f.read(0))
+
+ def test_seek_at_range_end(self):
+ f = self._file
+ f.seek(26, 1)
+
+ def test_read_at_range_end(self):
+ """Test read behaviour at range end."""
+ f = self._file
+ self.assertEquals(self.alpha, f.read())
+ self.assertEquals('', f.read(0))
+ self.assertRaises(errors.InvalidRange, f.read, 1)
+
+ def test_unbounded_read_after_seek(self):
+ f = self._file
+ f.seek(24, 1)
+ # Should not cross ranges
+ self.assertEquals('yz', f.read())
+
+ def test_seek_backwards(self):
+ f = self._file
+ start = self.first_range_start
+ f.seek(start)
+ f.read(12)
+ self.assertRaises(errors.InvalidRange, f.seek, start + 5)
+
+ def test_seek_outside_single_range(self):
+ f = self._file
+ if f._size == -1 or f._boundary is not None:
+ raise tests.TestNotApplicable('Needs a fully defined range')
+ # Will seek past the range and then errors out
+ self.assertRaises(errors.InvalidRange,
+ f.seek, self.first_range_start + 27)
+
+ def test_read_past_end_of_range(self):
+ f = self._file
+ if f._size == -1:
+ raise tests.TestNotApplicable("Can't check an unknown size")
+ start = self.first_range_start
+ f.seek(start + 20)
+ self.assertRaises(errors.InvalidRange, f.read, 10)
+
+ def test_seek_from_end(self):
+ """Test seeking from the end of the file.
+
+ The semantic is unclear in case of multiple ranges. Seeking from end
+ exists only for the http transports, cannot be used if the file size is
+ unknown and is not used in bzrlib itself. This test must be (and is)
+ overridden by daughter classes.
+
+ Reading from end makes sense only when a range has been requested from
+ the end of the file (see HttpTransportBase._get() when using the
+ 'tail_amount' parameter). The HTTP response can only be a whole file or
+ a single range.
+ """
+ f = self._file
+ f.seek(-2, 2)
+ self.assertEquals('yz', f.read())
+
+
+class TestRangeFileSizeUnknown(tests.TestCase, TestRangeFileMixin):
+ """Test a RangeFile for a whole file whose size is not known."""
+
+ def setUp(self):
+ super(TestRangeFileSizeUnknown, self).setUp()
+ self._file = response.RangeFile('Whole_file_size_known',
+ StringIO(self.alpha))
+ # We define no range, relying on RangeFile to provide default values
+ self.first_range_start = 0 # It's the whole file
+
+ def test_seek_from_end(self):
+ """See TestRangeFileMixin.test_seek_from_end.
+
+ The end of the file can't be determined since the size is unknown.
+ """
+ self.assertRaises(errors.InvalidRange, self._file.seek, -1, 2)
+
+ def test_read_at_range_end(self):
+ """Test read behaviour at range end."""
+ f = self._file
+ self.assertEquals(self.alpha, f.read())
+ self.assertEquals('', f.read(0))
+ self.assertEquals('', f.read(1))
+
+
+class TestRangeFileSizeKnown(tests.TestCase, TestRangeFileMixin):
+ """Test a RangeFile for a whole file whose size is known."""
+
+ def setUp(self):
+ super(TestRangeFileSizeKnown, self).setUp()
+ self._file = response.RangeFile('Whole_file_size_known',
+ StringIO(self.alpha))
+ self._file.set_range(0, len(self.alpha))
+ self.first_range_start = 0 # It's the whole file
+
+
+class TestRangeFileSingleRange(tests.TestCase, TestRangeFileMixin):
+ """Test a RangeFile for a single range."""
+
+ def setUp(self):
+ super(TestRangeFileSingleRange, self).setUp()
+ self._file = response.RangeFile('Single_range_file',
+ StringIO(self.alpha))
+ self.first_range_start = 15
+ self._file.set_range(self.first_range_start, len(self.alpha))
+
+
+ def test_read_before_range(self):
+ # This can't occur under normal circumstances, we have to force it
+ f = self._file
+ f._pos = 0 # Force an invalid pos
+ self.assertRaises(errors.InvalidRange, f.read, 2)
+
+
+class TestRangeFileMultipleRanges(tests.TestCase, TestRangeFileMixin):
+ """Test a RangeFile for multiple ranges.
+
+ The RangeFile used for the tests contains three ranges:
+
+ - at offset 25: alpha
+ - at offset 100: alpha
+ - at offset 126: alpha.upper()
+
+ The two last ranges are contiguous. This only rarely occurs (should not in
+ fact) in real uses but may lead to hard to track bugs.
+ """
+
+ # The following is used to represent the boundary paramter defined
+ # in HTTP response headers and the boundary lines that separate
+ # multipart content.
+
+ boundary = "separation"
+
+ def setUp(self):
+ super(TestRangeFileMultipleRanges, self).setUp()
+
+ boundary = self.boundary
+
+ content = ''
+ self.first_range_start = 25
+ file_size = 200 # big enough to encompass all ranges
+ for (start, part) in [(self.first_range_start, self.alpha),
+ # Two contiguous ranges
+ (100, self.alpha),
+ (126, self.alpha.upper())]:
+ content += self._multipart_byterange(part, start, boundary,
+ file_size)
+ # Final boundary
+ content += self._boundary_line()
+
+ self._file = response.RangeFile('Multiple_ranges_file',
+ StringIO(content))
+ self.set_file_boundary()
+
+ def _boundary_line(self):
+ """Helper to build the formatted boundary line."""
+ return '--' + self.boundary + '\r\n'
+
+ def set_file_boundary(self):
+ # Ranges are set by decoding the range headers, the RangeFile user is
+ # supposed to call the following before using seek or read since it
+ # requires knowing the *response* headers (in that case the boundary
+ # which is part of the Content-Type header).
+ self._file.set_boundary(self.boundary)
+
+ def _multipart_byterange(self, data, offset, boundary, file_size='*'):
+ """Encode a part of a file as a multipart/byterange MIME type.
+
+ When a range request is issued, the HTTP response body can be
+ decomposed in parts, each one representing a range (start, size) in a
+ file.
+
+ :param data: The payload.
+ :param offset: where data starts in the file
+ :param boundary: used to separate the parts
+ :param file_size: the size of the file containing the range (default to
+ '*' meaning unknown)
+
+ :return: a string containing the data encoded as it will appear in the
+ HTTP response body.
+ """
+ bline = self._boundary_line()
+ # Each range begins with a boundary line
+ range = bline
+ # A range is described by a set of headers, but only 'Content-Range' is
+ # required for our implementation (TestHandleResponse below will
+ # exercise ranges with multiple or missing headers')
+ range += 'Content-Range: bytes %d-%d/%d\r\n' % (offset,
+ offset+len(data)-1,
+ file_size)
+ range += '\r\n'
+ # Finally the raw bytes
+ range += data
+ return range
+
+ def test_read_all_ranges(self):
+ f = self._file
+ self.assertEquals(self.alpha, f.read()) # Read first range
+ f.seek(100) # Trigger the second range recognition
+ self.assertEquals(self.alpha, f.read()) # Read second range
+ self.assertEquals(126, f.tell())
+ f.seek(126) # Start of third range which is also the current pos !
+ self.assertEquals('A', f.read(1))
+ f.seek(10, 1)
+ self.assertEquals('LMN', f.read(3))
+
+ def test_seek_from_end(self):
+ """See TestRangeFileMixin.test_seek_from_end."""
+ # The actual implementation will seek from end for the first range only
+ # and then fail. Since seeking from end is intended to be used for a
+ # single range only anyway, this test just document the actual
+ # behaviour.
+ f = self._file
+ f.seek(-2, 2)
+ self.assertEquals('yz', f.read())
+ self.assertRaises(errors.InvalidRange, f.seek, -2, 2)
+
+ def test_seek_into_void(self):
+ f = self._file
+ start = self.first_range_start
+ f.seek(start)
+ # Seeking to a point between two ranges is possible (only once) but
+ # reading there is forbidden
+ f.seek(start + 40)
+ # We crossed a range boundary, so now the file is positioned at the
+ # start of the new range (i.e. trying to seek below 100 will error out)
+ f.seek(100)
+ f.seek(125)
+
+ def test_seek_across_ranges(self):
+ f = self._file
+ start = self.first_range_start
+ f.seek(126) # skip the two first ranges
+ self.assertEquals('AB', f.read(2))
+
+ def test_checked_read_dont_overflow_buffers(self):
+ f = self._file
+ start = self.first_range_start
+ # We force a very low value to exercise all code paths in _checked_read
+ f._discarded_buf_size = 8
+ f.seek(126) # skip the two first ranges
+ self.assertEquals('AB', f.read(2))
+
+ def test_seek_twice_between_ranges(self):
+ f = self._file
+ start = self.first_range_start
+ f.seek(start + 40) # Past the first range but before the second
+ # Now the file is positioned at the second range start (100)
+ self.assertRaises(errors.InvalidRange, f.seek, start + 41)
+
+ def test_seek_at_range_end(self):
+ """Test seek behavior at range end."""
+ f = self._file
+ f.seek(25 + 25)
+ f.seek(100 + 25)
+ f.seek(126 + 25)
+
+ def test_read_at_range_end(self):
+ f = self._file
+ self.assertEquals(self.alpha, f.read())
+ self.assertEquals(self.alpha, f.read())
+ self.assertEquals(self.alpha.upper(), f.read())
+ self.assertRaises(errors.InvalidHttpResponse, f.read, 1)
+
+
+class TestRangeFileMultipleRangesQuotedBoundaries(TestRangeFileMultipleRanges):
+ """Perform the same tests as TestRangeFileMultipleRanges, but uses
+ an angle-bracket quoted boundary string like IIS 6.0 and 7.0
+ (but not IIS 5, which breaks the RFC in a different way
+ by using square brackets, not angle brackets)
+
+ This reveals a bug caused by
+
+ - The bad implementation of RFC 822 unquoting in Python (angles are not
+ quotes), coupled with
+
+ - The bad implementation of RFC 2046 in IIS (angles are not permitted chars
+ in boundary lines).
+
+ """
+ # The boundary as it appears in boundary lines
+ # IIS 6 and 7 use this value
+ _boundary_trimmed = "q1w2e3r4t5y6u7i8o9p0zaxscdvfbgnhmjklkl"
+ boundary = '<' + _boundary_trimmed + '>'
+
+ def set_file_boundary(self):
+ # Emulate broken rfc822.unquote() here by removing angles
+ self._file.set_boundary(self._boundary_trimmed)
+
+
+class TestRangeFileVarious(tests.TestCase):
+ """Tests RangeFile aspects not covered elsewhere."""
+
+ def test_seek_whence(self):
+ """Test the seek whence parameter values."""
+ f = response.RangeFile('foo', StringIO('abc'))
+ f.set_range(0, 3)
+ f.seek(0)
+ f.seek(1, 1)
+ f.seek(-1, 2)
+ self.assertRaises(ValueError, f.seek, 0, 14)
+
+ def test_range_syntax(self):
+ """Test the Content-Range scanning."""
+
+ f = response.RangeFile('foo', StringIO())
+
+ def ok(expected, header_value):
+ f.set_range_from_header(header_value)
+ # Slightly peek under the covers to get the size
+ self.assertEquals(expected, (f.tell(), f._size))
+
+ ok((1, 10), 'bytes 1-10/11')
+ ok((1, 10), 'bytes 1-10/*')
+ ok((12, 2), '\tbytes 12-13/*')
+ ok((28, 1), ' bytes 28-28/*')
+ ok((2123, 2120), 'bytes 2123-4242/12310')
+ ok((1, 10), 'bytes 1-10/ttt') # We don't check total (ttt)
+
+ def nok(header_value):
+ self.assertRaises(errors.InvalidHttpRange,
+ f.set_range_from_header, header_value)
+
+ nok('bytes 10-2/3')
+ nok('chars 1-2/3')
+ nok('bytes xx-yyy/zzz')
+ nok('bytes xx-12/zzz')
+ nok('bytes 11-yy/zzz')
+ nok('bytes10-2/3')
+
+
+# Taken from real request responses
+_full_text_response = (200, """HTTP/1.1 200 OK\r
+Date: Tue, 11 Jul 2006 04:32:56 GMT\r
+Server: Apache/2.0.54 (Fedora)\r
+Last-Modified: Sun, 23 Apr 2006 19:35:20 GMT\r
+ETag: "56691-23-38e9ae00"\r
+Accept-Ranges: bytes\r
+Content-Length: 35\r
+Connection: close\r
+Content-Type: text/plain; charset=UTF-8\r
+\r
+""", """Bazaar-NG meta directory, format 1
+""")
+
+
+_single_range_response = (206, """HTTP/1.1 206 Partial Content\r
+Date: Tue, 11 Jul 2006 04:45:22 GMT\r
+Server: Apache/2.0.54 (Fedora)\r
+Last-Modified: Thu, 06 Jul 2006 20:22:05 GMT\r
+ETag: "238a3c-16ec2-805c5540"\r
+Accept-Ranges: bytes\r
+Content-Length: 100\r
+Content-Range: bytes 100-199/93890\r
+Connection: close\r
+Content-Type: text/plain; charset=UTF-8\r
+\r
+""", """mbp@sourcefrog.net-20050309040815-13242001617e4a06
+mbp@sourcefrog.net-20050309040929-eee0eb3e6d1e762""")
+
+
+_single_range_no_content_type = (206, """HTTP/1.1 206 Partial Content\r
+Date: Tue, 11 Jul 2006 04:45:22 GMT\r
+Server: Apache/2.0.54 (Fedora)\r
+Last-Modified: Thu, 06 Jul 2006 20:22:05 GMT\r
+ETag: "238a3c-16ec2-805c5540"\r
+Accept-Ranges: bytes\r
+Content-Length: 100\r
+Content-Range: bytes 100-199/93890\r
+Connection: close\r
+\r
+""", """mbp@sourcefrog.net-20050309040815-13242001617e4a06
+mbp@sourcefrog.net-20050309040929-eee0eb3e6d1e762""")
+
+
+_multipart_range_response = (206, """HTTP/1.1 206 Partial Content\r
+Date: Tue, 11 Jul 2006 04:49:48 GMT\r
+Server: Apache/2.0.54 (Fedora)\r
+Last-Modified: Thu, 06 Jul 2006 20:22:05 GMT\r
+ETag: "238a3c-16ec2-805c5540"\r
+Accept-Ranges: bytes\r
+Content-Length: 1534\r
+Connection: close\r
+Content-Type: multipart/byteranges; boundary=418470f848b63279b\r
+\r
+\r""", """--418470f848b63279b\r
+Content-type: text/plain; charset=UTF-8\r
+Content-range: bytes 0-254/93890\r
+\r
+mbp@sourcefrog.net-20050309040815-13242001617e4a06
+mbp@sourcefrog.net-20050309040929-eee0eb3e6d1e7627
+mbp@sourcefrog.net-20050309040957-6cad07f466bb0bb8
+mbp@sourcefrog.net-20050309041501-c840e09071de3b67
+mbp@sourcefrog.net-20050309044615-c24a3250be83220a
+\r
+--418470f848b63279b\r
+Content-type: text/plain; charset=UTF-8\r
+Content-range: bytes 1000-2049/93890\r
+\r
+40-fd4ec249b6b139ab
+mbp@sourcefrog.net-20050311063625-07858525021f270b
+mbp@sourcefrog.net-20050311231934-aa3776aff5200bb9
+mbp@sourcefrog.net-20050311231953-73aeb3a131c3699a
+mbp@sourcefrog.net-20050311232353-f5e33da490872c6a
+mbp@sourcefrog.net-20050312071639-0a8f59a34a024ff0
+mbp@sourcefrog.net-20050312073432-b2c16a55e0d6e9fb
+mbp@sourcefrog.net-20050312073831-a47c3335ece1920f
+mbp@sourcefrog.net-20050312085412-13373aa129ccbad3
+mbp@sourcefrog.net-20050313052251-2bf004cb96b39933
+mbp@sourcefrog.net-20050313052856-3edd84094687cb11
+mbp@sourcefrog.net-20050313053233-e30a4f28aef48f9d
+mbp@sourcefrog.net-20050313053853-7c64085594ff3072
+mbp@sourcefrog.net-20050313054757-a86c3f5871069e22
+mbp@sourcefrog.net-20050313061422-418f1f73b94879b9
+mbp@sourcefrog.net-20050313120651-497bd231b19df600
+mbp@sourcefrog.net-20050314024931-eae0170ef25a5d1a
+mbp@sourcefrog.net-20050314025438-d52099f915fe65fc
+mbp@sourcefrog.net-20050314025539-637a636692c055cf
+mbp@sourcefrog.net-20050314025737-55eb441f430ab4ba
+mbp@sourcefrog.net-20050314025901-d74aa93bb7ee8f62
+mbp@source\r
+--418470f848b63279b--\r
+""")
+
+
+_multipart_squid_range_response = (206, """HTTP/1.0 206 Partial Content\r
+Date: Thu, 31 Aug 2006 21:16:22 GMT\r
+Server: Apache/2.2.2 (Unix) DAV/2\r
+Last-Modified: Thu, 31 Aug 2006 17:57:06 GMT\r
+Accept-Ranges: bytes\r
+Content-Type: multipart/byteranges; boundary="squid/2.5.STABLE12:C99323425AD4FE26F726261FA6C24196"\r
+Content-Length: 598\r
+X-Cache: MISS from localhost.localdomain\r
+X-Cache-Lookup: HIT from localhost.localdomain:3128\r
+Proxy-Connection: keep-alive\r
+\r
+""",
+"""\r
+--squid/2.5.STABLE12:C99323425AD4FE26F726261FA6C24196\r
+Content-Type: text/plain\r
+Content-Range: bytes 0-99/18672\r
+\r
+# bzr knit index 8
+
+scott@netsplit.com-20050708230047-47c7868f276b939f fulltext 0 863 :
+scott@netsp\r
+--squid/2.5.STABLE12:C99323425AD4FE26F726261FA6C24196\r
+Content-Type: text/plain\r
+Content-Range: bytes 300-499/18672\r
+\r
+com-20050708231537-2b124b835395399a :
+scott@netsplit.com-20050820234126-551311dbb7435b51 line-delta 1803 479 .scott@netsplit.com-20050820232911-dc4322a084eadf7e :
+scott@netsplit.com-20050821213706-c86\r
+--squid/2.5.STABLE12:C99323425AD4FE26F726261FA6C24196--\r
+""")
+
+
+# This is made up
+_full_text_response_no_content_type = (200, """HTTP/1.1 200 OK\r
+Date: Tue, 11 Jul 2006 04:32:56 GMT\r
+Server: Apache/2.0.54 (Fedora)\r
+Last-Modified: Sun, 23 Apr 2006 19:35:20 GMT\r
+ETag: "56691-23-38e9ae00"\r
+Accept-Ranges: bytes\r
+Content-Length: 35\r
+Connection: close\r
+\r
+""", """Bazaar-NG meta directory, format 1
+""")
+
+
+_full_text_response_no_content_length = (200, """HTTP/1.1 200 OK\r
+Date: Tue, 11 Jul 2006 04:32:56 GMT\r
+Server: Apache/2.0.54 (Fedora)\r
+Last-Modified: Sun, 23 Apr 2006 19:35:20 GMT\r
+ETag: "56691-23-38e9ae00"\r
+Accept-Ranges: bytes\r
+Connection: close\r
+Content-Type: text/plain; charset=UTF-8\r
+\r
+""", """Bazaar-NG meta directory, format 1
+""")
+
+
+_single_range_no_content_range = (206, """HTTP/1.1 206 Partial Content\r
+Date: Tue, 11 Jul 2006 04:45:22 GMT\r
+Server: Apache/2.0.54 (Fedora)\r
+Last-Modified: Thu, 06 Jul 2006 20:22:05 GMT\r
+ETag: "238a3c-16ec2-805c5540"\r
+Accept-Ranges: bytes\r
+Content-Length: 100\r
+Connection: close\r
+\r
+""", """mbp@sourcefrog.net-20050309040815-13242001617e4a06
+mbp@sourcefrog.net-20050309040929-eee0eb3e6d1e762""")
+
+
+_single_range_response_truncated = (206, """HTTP/1.1 206 Partial Content\r
+Date: Tue, 11 Jul 2006 04:45:22 GMT\r
+Server: Apache/2.0.54 (Fedora)\r
+Last-Modified: Thu, 06 Jul 2006 20:22:05 GMT\r
+ETag: "238a3c-16ec2-805c5540"\r
+Accept-Ranges: bytes\r
+Content-Length: 100\r
+Content-Range: bytes 100-199/93890\r
+Connection: close\r
+Content-Type: text/plain; charset=UTF-8\r
+\r
+""", """mbp@sourcefrog.net-20050309040815-13242001617e4a06""")
+
+
+_invalid_response = (444, """HTTP/1.1 444 Bad Response\r
+Date: Tue, 11 Jul 2006 04:32:56 GMT\r
+Connection: close\r
+Content-Type: text/html; charset=iso-8859-1\r
+\r
+""", """<!DOCTYPE HTML PUBLIC "-//IETF//DTD HTML 2.0//EN">
+<html><head>
+<title>404 Not Found</title>
+</head><body>
+<h1>Not Found</h1>
+<p>I don't know what I'm doing</p>
+<hr>
+</body></html>
+""")
+
+
+_multipart_no_content_range = (206, """HTTP/1.0 206 Partial Content\r
+Content-Type: multipart/byteranges; boundary=THIS_SEPARATES\r
+Content-Length: 598\r
+\r
+""",
+"""\r
+--THIS_SEPARATES\r
+Content-Type: text/plain\r
+\r
+# bzr knit index 8
+--THIS_SEPARATES\r
+""")
+
+
+_multipart_no_boundary = (206, """HTTP/1.0 206 Partial Content\r
+Content-Type: multipart/byteranges; boundary=THIS_SEPARATES\r
+Content-Length: 598\r
+\r
+""",
+"""\r
+--THIS_SEPARATES\r
+Content-Type: text/plain\r
+Content-Range: bytes 0-18/18672\r
+\r
+# bzr knit index 8
+
+The range ended at the line above, this text is garbage instead of a boundary
+line
+""")
+
+
+class TestHandleResponse(tests.TestCase):
+
+ def _build_HTTPMessage(self, raw_headers):
+ status_and_headers = StringIO(raw_headers)
+ # Get rid of the status line
+ status_and_headers.readline()
+ msg = httplib.HTTPMessage(status_and_headers)
+ return msg
+
+ def get_response(self, a_response):
+ """Process a supplied response, and return the result."""
+ code, raw_headers, body = a_response
+ msg = self._build_HTTPMessage(raw_headers)
+ return response.handle_response('http://foo', code, msg,
+ StringIO(a_response[2]))
+
+ def test_full_text(self):
+ out = self.get_response(_full_text_response)
+ # It is a StringIO from the original data
+ self.assertEqual(_full_text_response[2], out.read())
+
+ def test_single_range(self):
+ out = self.get_response(_single_range_response)
+
+ out.seek(100)
+ self.assertEqual(_single_range_response[2], out.read(100))
+
+ def test_single_range_no_content(self):
+ out = self.get_response(_single_range_no_content_type)
+
+ out.seek(100)
+ self.assertEqual(_single_range_no_content_type[2], out.read(100))
+
+ def test_single_range_truncated(self):
+ out = self.get_response(_single_range_response_truncated)
+ # Content-Range declares 100 but only 51 present
+ self.assertRaises(errors.ShortReadvError, out.seek, out.tell() + 51)
+
+ def test_multi_range(self):
+ out = self.get_response(_multipart_range_response)
+
+ # Just make sure we can read the right contents
+ out.seek(0)
+ out.read(255)
+
+ out.seek(1000)
+ out.read(1050)
+
+ def test_multi_squid_range(self):
+ out = self.get_response(_multipart_squid_range_response)
+
+ # Just make sure we can read the right contents
+ out.seek(0)
+ out.read(100)
+
+ out.seek(300)
+ out.read(200)
+
+ def test_invalid_response(self):
+ self.assertRaises(errors.InvalidHttpResponse,
+ self.get_response, _invalid_response)
+
+ def test_full_text_no_content_type(self):
+ # We should not require Content-Type for a full response
+ code, raw_headers, body = _full_text_response_no_content_type
+ msg = self._build_HTTPMessage(raw_headers)
+ out = response.handle_response('http://foo', code, msg, StringIO(body))
+ self.assertEqual(body, out.read())
+
+ def test_full_text_no_content_length(self):
+ code, raw_headers, body = _full_text_response_no_content_length
+ msg = self._build_HTTPMessage(raw_headers)
+ out = response.handle_response('http://foo', code, msg, StringIO(body))
+ self.assertEqual(body, out.read())
+
+ def test_missing_content_range(self):
+ code, raw_headers, body = _single_range_no_content_range
+ msg = self._build_HTTPMessage(raw_headers)
+ self.assertRaises(errors.InvalidHttpResponse,
+ response.handle_response,
+ 'http://bogus', code, msg, StringIO(body))
+
+ def test_multipart_no_content_range(self):
+ code, raw_headers, body = _multipart_no_content_range
+ msg = self._build_HTTPMessage(raw_headers)
+ self.assertRaises(errors.InvalidHttpResponse,
+ response.handle_response,
+ 'http://bogus', code, msg, StringIO(body))
+
+ def test_multipart_no_boundary(self):
+ out = self.get_response(_multipart_no_boundary)
+ out.read() # Read the whole range
+ # Fail to find the boundary line
+ self.assertRaises(errors.InvalidHttpResponse, out.seek, 1, 1)
+
+
+class TestRangeFileSizeReadLimited(tests.TestCase):
+ """Test RangeFile _max_read_size functionality which limits the size of
+ read blocks to prevent MemoryError messages in socket.recv.
+ """
+
+ def setUp(self):
+ tests.TestCase.setUp(self)
+ # create a test datablock larger than _max_read_size.
+ chunk_size = response.RangeFile._max_read_size
+ test_pattern = '0123456789ABCDEF'
+ self.test_data = test_pattern * (3 * chunk_size / len(test_pattern))
+ self.test_data_len = len(self.test_data)
+
+ def test_max_read_size(self):
+ """Read data in blocks and verify that the reads are not larger than
+ the maximum read size.
+ """
+ # retrieve data in large blocks from response.RangeFile object
+ mock_read_file = FakeReadFile(self.test_data)
+ range_file = response.RangeFile('test_max_read_size', mock_read_file)
+ response_data = range_file.read(self.test_data_len)
+
+ # verify read size was equal to the maximum read size
+ self.assertTrue(mock_read_file.get_max_read_size() > 0)
+ self.assertEqual(mock_read_file.get_max_read_size(),
+ response.RangeFile._max_read_size)
+ self.assertEqual(mock_read_file.get_read_count(), 3)
+
+ # report error if the data wasn't equal (we only report the size due
+ # to the length of the data)
+ if response_data != self.test_data:
+ message = "Data not equal. Expected %d bytes, received %d."
+ self.fail(message % (len(response_data), self.test_data_len))
+
diff --git a/bzrlib/tests/test_https_ca_bundle.py b/bzrlib/tests/test_https_ca_bundle.py
new file mode 100644
index 0000000..57f77fa
--- /dev/null
+++ b/bzrlib/tests/test_https_ca_bundle.py
@@ -0,0 +1,58 @@
+# Copyright (C) 2007, 2009, 2010, 2011 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""Testing of bzrlib.transport.http.ca_bundle module"""
+
+import os
+import sys
+
+from bzrlib.tests import (
+ TestCaseInTempDir,
+ TestSkipped,
+ )
+from bzrlib.transport.http import ca_bundle
+
+
+class TestGetCAPath(TestCaseInTempDir):
+
+ def setUp(self):
+ super(TestGetCAPath, self).setUp()
+ self.overrideEnv('CURL_CA_BUNDLE', None)
+ self.overrideEnv('PATH', None)
+
+ def _make_file(self, in_dir='.'):
+ fname = os.path.join(in_dir, 'curl-ca-bundle.crt')
+ f = file(fname,'w')
+ f.write('spam')
+ f.close()
+
+ def test_found_nothing(self):
+ self.assertEqual('', ca_bundle.get_ca_path(use_cache=False))
+
+ def test_env_var(self):
+ self.overrideEnv('CURL_CA_BUNDLE', 'foo.bar')
+ self._make_file()
+ self.assertEqual('foo.bar', ca_bundle.get_ca_path(use_cache=False))
+
+ def test_in_path(self):
+ if sys.platform != 'win32':
+ raise TestSkipped('Searching in PATH implemented only for win32')
+ os.mkdir('foo')
+ in_dir = os.path.join(os.getcwd(), 'foo')
+ self._make_file(in_dir=in_dir)
+ self.overrideEnv('PATH', in_dir)
+ shouldbe = os.path.join(in_dir, 'curl-ca-bundle.crt')
+ self.assertEqual(shouldbe, ca_bundle.get_ca_path(use_cache=False))
diff --git a/bzrlib/tests/test_https_urllib.py b/bzrlib/tests/test_https_urllib.py
new file mode 100644
index 0000000..4804418
--- /dev/null
+++ b/bzrlib/tests/test_https_urllib.py
@@ -0,0 +1,100 @@
+# Copyright (C) 2011,2012 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""Tests for the SSL support in the urllib HTTP transport.
+
+"""
+
+import os
+import ssl
+
+from bzrlib import (
+ config,
+ trace,
+ )
+from bzrlib.errors import (
+ CertificateError,
+ ConfigOptionValueError,
+ )
+from bzrlib.tests import (
+ TestCase,
+ TestCaseInTempDir,
+ )
+from bzrlib.transport.http import _urllib2_wrappers
+
+
+class CaCertsConfigTests(TestCaseInTempDir):
+
+ def get_stack(self, content):
+ return config.MemoryStack(content.encode('utf-8'))
+
+ def test_default_exists(self):
+ """Check that the default we provide exists for the tested platform."""
+ stack = self.get_stack("")
+ self.assertPathExists(stack.get('ssl.ca_certs'))
+
+ def test_specified(self):
+ self.build_tree(['cacerts.pem'])
+ path = os.path.join(self.test_dir, "cacerts.pem")
+ stack = self.get_stack("ssl.ca_certs = %s\n" % path)
+ self.assertEquals(path, stack.get('ssl.ca_certs'))
+
+ def test_specified_doesnt_exist(self):
+ stack = self.get_stack('')
+ # Disable the default value mechanism to force the behavior we want
+ self.overrideAttr(_urllib2_wrappers.opt_ssl_ca_certs, 'default',
+ os.path.join(self.test_dir, u"nonexisting.pem"))
+ self.warnings = []
+ def warning(*args):
+ self.warnings.append(args[0] % args[1:])
+ self.overrideAttr(trace, 'warning', warning)
+ self.assertEquals(None, stack.get('ssl.ca_certs'))
+ self.assertLength(1, self.warnings)
+ self.assertContainsRe(self.warnings[0],
+ "is not valid for \"ssl.ca_certs\"")
+
+
+class CertReqsConfigTests(TestCaseInTempDir):
+
+ def test_default(self):
+ stack = config.MemoryStack("")
+ self.assertEquals(ssl.CERT_REQUIRED, stack.get("ssl.cert_reqs"))
+
+ def test_from_string(self):
+ stack = config.MemoryStack("ssl.cert_reqs = none\n")
+ self.assertEquals(ssl.CERT_NONE, stack.get("ssl.cert_reqs"))
+ stack = config.MemoryStack("ssl.cert_reqs = required\n")
+ self.assertEquals(ssl.CERT_REQUIRED, stack.get("ssl.cert_reqs"))
+ stack = config.MemoryStack("ssl.cert_reqs = invalid\n")
+ self.assertRaises(ConfigOptionValueError, stack.get, "ssl.cert_reqs")
+
+
+class MatchHostnameTests(TestCase):
+
+ def test_no_certificate(self):
+ self.assertRaises(ValueError,
+ _urllib2_wrappers.match_hostname, {}, "example.com")
+
+ def test_no_valid_attributes(self):
+ self.assertRaises(CertificateError, _urllib2_wrappers.match_hostname,
+ {"Problem": "Solved"}, "example.com")
+
+ def test_common_name(self):
+ cert = {'subject': ((('commonName', 'example.com'),),)}
+ self.assertIs(None,
+ _urllib2_wrappers.match_hostname(cert, "example.com"))
+ self.assertRaises(CertificateError, _urllib2_wrappers.match_hostname,
+ cert, "example.org")
diff --git a/bzrlib/tests/test_i18n.py b/bzrlib/tests/test_i18n.py
new file mode 100644
index 0000000..0f81df5
--- /dev/null
+++ b/bzrlib/tests/test_i18n.py
@@ -0,0 +1,162 @@
+# Copyright (C) 2011 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""Tests for bzrlib.i18n"""
+
+from bzrlib import (
+ i18n,
+ tests,
+ errors,
+ workingtree,
+ )
+
+
+class ZzzTranslations(object):
+ """Special Zzz translation for debugging i18n stuff.
+
+ This class can be used to confirm that the message is properly translated
+ during black box tests.
+ """
+ _null_translation = i18n._gettext.NullTranslations()
+
+ def zzz(self, s):
+ return u'zz\xe5{{%s}}' % s
+
+ def ugettext(self, s):
+ return self.zzz(self._null_translation.ugettext(s))
+
+ def ungettext(self, s, p, n):
+ return self.zzz(self._null_translation.ungettext(s, p, n))
+
+
+class TestZzzTranslation(tests.TestCase):
+
+ def _check_exact(self, expected, source):
+ self.assertEqual(expected, source)
+ self.assertEqual(type(expected), type(source))
+
+ def test_translation(self):
+ trans = ZzzTranslations()
+
+ t = trans.zzz('msg')
+ self._check_exact(u'zz\xe5{{msg}}', t)
+
+ t = trans.ugettext('msg')
+ self._check_exact(u'zz\xe5{{msg}}', t)
+
+ t = trans.ungettext('msg1', 'msg2', 0)
+ self._check_exact(u'zz\xe5{{msg2}}', t)
+ t = trans.ungettext('msg1', 'msg2', 2)
+ self._check_exact(u'zz\xe5{{msg2}}', t)
+
+ t = trans.ungettext('msg1', 'msg2', 1)
+ self._check_exact(u'zz\xe5{{msg1}}', t)
+
+
+class TestGetText(tests.TestCase):
+
+ def setUp(self):
+ super(TestGetText, self).setUp()
+ self.overrideAttr(i18n, '_translations', ZzzTranslations())
+
+ def test_oneline(self):
+ self.assertEqual(u"zz\xe5{{spam ham eggs}}",
+ i18n.gettext("spam ham eggs"))
+
+ def test_multiline(self):
+ self.assertEqual(u"zz\xe5{{spam\nham\n\neggs\n}}",
+ i18n.gettext("spam\nham\n\neggs\n"))
+
+
+class TestGetTextPerParagraph(tests.TestCase):
+
+ def setUp(self):
+ super(TestGetTextPerParagraph, self).setUp()
+ self.overrideAttr(i18n, '_translations', ZzzTranslations())
+
+ def test_oneline(self):
+ self.assertEqual(u"zz\xe5{{spam ham eggs}}",
+ i18n.gettext_per_paragraph("spam ham eggs"))
+
+ def test_multiline(self):
+ self.assertEqual(u"zz\xe5{{spam\nham}}\n\nzz\xe5{{eggs\n}}",
+ i18n.gettext_per_paragraph("spam\nham\n\neggs\n"))
+
+
+class TestInstall(tests.TestCase):
+
+ def setUp(self):
+ super(TestInstall, self).setUp()
+ # Restore a proper env to test translation installation
+ self.overrideAttr(i18n, '_translations', None)
+
+ def test_custom_languages(self):
+ i18n.install('nl:fy')
+ # Whether we found a valid tranlsation or not doesn't matter, we got
+ # one and _translations is not None anymore.
+ self.assertIsInstance(i18n._translations,
+ i18n._gettext.NullTranslations)
+
+ def test_no_env_variables(self):
+ self.overrideEnv('LANGUAGE', None)
+ self.overrideEnv('LC_ALL', None)
+ self.overrideEnv('LC_MESSAGES', None)
+ self.overrideEnv('LANG', None)
+ i18n.install()
+ # Whether we found a valid tranlsation or not doesn't matter, we got
+ # one and _translations is not None anymore.
+ self.assertIsInstance(i18n._translations,
+ i18n._gettext.NullTranslations)
+
+ def test_disable_i18n(self):
+ i18n.disable_i18n()
+ i18n.install()
+ # It's disabled, you can't install anything and we fallback to null
+ self.assertIsInstance(i18n._translations,
+ i18n._gettext.NullTranslations)
+
+
+class TestTranslate(tests.TestCaseWithTransport):
+
+ def setUp(self):
+ super(TestTranslate, self).setUp()
+ self.overrideAttr(i18n, '_translations', ZzzTranslations())
+
+ def test_error_message_translation(self):
+ """do errors get translated?"""
+ err = None
+ tree = self.make_branch_and_tree('.')
+ try:
+ workingtree.WorkingTree.open('./foo')
+ except errors.NotBranchError,e:
+ err = str(e)
+ self.assertContainsRe(err,
+ u"zz\xe5{{Not a branch: .*}}".encode("utf-8"))
+
+ def test_topic_help_translation(self):
+ """does topic help get translated?"""
+ from bzrlib import help
+ from StringIO import StringIO
+ out = StringIO()
+ help.help("authentication", out)
+ self.assertContainsRe(out.getvalue(), "zz\xe5{{Authentication Settings")
+
+
+class LoadPluginTranslations(tests.TestCase):
+
+ def test_does_not_exist(self):
+ translation = i18n.load_plugin_translations("doesnotexist")
+ self.assertEquals("foo", translation.gettext("foo"))
diff --git a/bzrlib/tests/test_identitymap.py b/bzrlib/tests/test_identitymap.py
new file mode 100644
index 0000000..193d54a
--- /dev/null
+++ b/bzrlib/tests/test_identitymap.py
@@ -0,0 +1,81 @@
+# Copyright (C) 2005 Canonical Ltd
+# Authors: Robert Collins <robert.collins@canonical.com>
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""Tests for the IdentityMap class."""
+
+# import system imports here
+
+#import bzrlib specific imports here
+import bzrlib.errors as errors
+from bzrlib.tests import TestCase
+import bzrlib.identitymap as identitymap
+
+
+class TestIdentityMap(TestCase):
+
+ def test_symbols(self):
+ from bzrlib.identitymap import IdentityMap
+
+ def test_construct(self):
+ identitymap.IdentityMap()
+
+ def test_add_weave(self):
+ map = identitymap.IdentityMap()
+ weave = "foo"
+ map.add_weave("id", weave)
+ self.assertEqual(weave, map.find_weave("id"))
+
+ def test_double_add_weave(self):
+ map = identitymap.IdentityMap()
+ weave = "foo"
+ map.add_weave("id", weave)
+ self.assertRaises(errors.BzrError, map.add_weave, "id", weave)
+ self.assertEqual(weave, map.find_weave("id"))
+
+ def test_remove_object(self):
+ map = identitymap.IdentityMap()
+ weave = "foo"
+ map.add_weave("id", weave)
+ map.remove_object(weave)
+ map.add_weave("id", weave)
+
+
+class TestNullIdentityMap(TestCase):
+
+ def test_symbols(self):
+ from bzrlib.identitymap import NullIdentityMap
+
+ def test_construct(self):
+ identitymap.NullIdentityMap()
+
+ def test_add_weave(self):
+ map = identitymap.NullIdentityMap()
+ weave = "foo"
+ map.add_weave("id", weave)
+ self.assertEqual(None, map.find_weave("id"))
+
+ def test_double_add_weave(self):
+ map = identitymap.NullIdentityMap()
+ weave = "foo"
+ map.add_weave("id", weave)
+ map.add_weave("id", weave)
+ self.assertEqual(None, map.find_weave("id"))
+
+ def test_null_identity_map_has_no_remove(self):
+ map = identitymap.NullIdentityMap()
+ self.assertEqual(None, getattr(map, 'remove_object', None))
+
diff --git a/bzrlib/tests/test_ignores.py b/bzrlib/tests/test_ignores.py
new file mode 100644
index 0000000..abc8496
--- /dev/null
+++ b/bzrlib/tests/test_ignores.py
@@ -0,0 +1,226 @@
+# Copyright (C) 2006-2011 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""Tests for handling of ignore files"""
+
+from cStringIO import StringIO
+
+from bzrlib import (
+ config,
+ ignores,
+ )
+from bzrlib.tests import (
+ TestCase,
+ TestCaseInTempDir,
+ TestCaseWithTransport,
+ )
+
+
+class TestParseIgnoreFile(TestCase):
+
+ def test_parse_fancy(self):
+ ignored = ignores.parse_ignore_file(StringIO(
+ './rootdir\n'
+ 'randomfile*\n'
+ 'path/from/ro?t\n'
+ 'unicode\xc2\xb5\n' # u'\xb5'.encode('utf8')
+ 'dos\r\n'
+ '\n' # empty line
+ '#comment\n'
+ ' xx \n' # whitespace
+ '!RE:^\.z.*\n'
+ '!!./.zcompdump\n'
+ ))
+ self.assertEqual(set(['./rootdir',
+ 'randomfile*',
+ 'path/from/ro?t',
+ u'unicode\xb5',
+ 'dos',
+ ' xx ',
+ '!RE:^\.z.*',
+ '!!./.zcompdump',
+ ]), ignored)
+
+ def test_parse_empty(self):
+ ignored = ignores.parse_ignore_file(StringIO(''))
+ self.assertEqual(set([]), ignored)
+
+ def test_parse_non_utf8(self):
+ """Lines with non utf 8 characters should be discarded."""
+ ignored = ignores.parse_ignore_file(StringIO(
+ 'utf8filename_a\n'
+ 'invalid utf8\x80\n'
+ 'utf8filename_b\n'
+ ))
+ self.assertEqual(set([
+ 'utf8filename_a',
+ 'utf8filename_b',
+ ]), ignored)
+
+
+class TestUserIgnores(TestCaseInTempDir):
+
+ def test_create_if_missing(self):
+ # $HOME should be set to '.'
+ ignore_path = config.user_ignore_config_filename()
+ self.assertPathDoesNotExist(ignore_path)
+ user_ignores = ignores.get_user_ignores()
+ self.assertEqual(set(ignores.USER_DEFAULTS), user_ignores)
+
+ self.assertPathExists(ignore_path)
+ f = open(ignore_path, 'rb')
+ try:
+ entries = ignores.parse_ignore_file(f)
+ finally:
+ f.close()
+ self.assertEqual(set(ignores.USER_DEFAULTS), entries)
+
+ def test_use_existing(self):
+ patterns = ['*.o', '*.py[co]', u'\xe5*']
+ ignores._set_user_ignores(patterns)
+
+ user_ignores = ignores.get_user_ignores()
+ self.assertEqual(set(patterns), user_ignores)
+
+ def test_use_empty(self):
+ ignores._set_user_ignores([])
+ ignore_path = config.user_ignore_config_filename()
+ self.check_file_contents(ignore_path, '')
+
+ self.assertEqual(set([]), ignores.get_user_ignores())
+
+ def test_set(self):
+ patterns = ['*.py[co]', '*.py[oc]']
+ ignores._set_user_ignores(patterns)
+
+ self.assertEqual(set(patterns), ignores.get_user_ignores())
+
+ patterns = ['vim', '*.swp']
+ ignores._set_user_ignores(patterns)
+ self.assertEqual(set(patterns), ignores.get_user_ignores())
+
+ def test_add(self):
+ """Test that adding will not duplicate ignores"""
+ # Create an empty file
+ ignores._set_user_ignores([])
+
+ patterns = ['foo', './bar', u'b\xe5z']
+ added = ignores.add_unique_user_ignores(patterns)
+ self.assertEqual(patterns, added)
+ self.assertEqual(set(patterns), ignores.get_user_ignores())
+
+ def test_add_directory(self):
+ """Test that adding a directory will strip any trailing slash"""
+ # Create an empty file
+ ignores._set_user_ignores([])
+
+ in_patterns = ['foo/', 'bar/', 'baz\\']
+ added = ignores.add_unique_user_ignores(in_patterns)
+ out_patterns = [ x.rstrip('/\\') for x in in_patterns ]
+ self.assertEqual(out_patterns, added)
+ self.assertEqual(set(out_patterns), ignores.get_user_ignores())
+
+ def test_add_unique(self):
+ """Test that adding will not duplicate ignores"""
+ ignores._set_user_ignores(
+ ['foo', './bar', u'b\xe5z', 'dir1/', 'dir3\\'])
+
+ added = ignores.add_unique_user_ignores(
+ ['xxx', './bar', 'xxx', 'dir1/', 'dir2/', 'dir3\\'])
+ self.assertEqual(['xxx', 'dir2'], added)
+ self.assertEqual(set(['foo', './bar', u'b\xe5z',
+ 'xxx', 'dir1', 'dir2', 'dir3']),
+ ignores.get_user_ignores())
+
+
+class TestRuntimeIgnores(TestCase):
+
+ def setUp(self):
+ TestCase.setUp(self)
+
+ # For the purposes of these tests, we must have no
+ # runtime ignores
+ self.overrideAttr(ignores, '_runtime_ignores', set())
+
+ def test_add(self):
+ """Test that we can add an entry to the list."""
+ self.assertEqual(set(), ignores.get_runtime_ignores())
+
+ ignores.add_runtime_ignores(['foo'])
+ self.assertEqual(set(['foo']), ignores.get_runtime_ignores())
+
+ def test_add_duplicate(self):
+ """Adding the same ignore twice shouldn't add a new entry."""
+ ignores.add_runtime_ignores(['foo', 'bar'])
+ self.assertEqual(set(['foo', 'bar']), ignores.get_runtime_ignores())
+
+ ignores.add_runtime_ignores(['bar'])
+ self.assertEqual(set(['foo', 'bar']), ignores.get_runtime_ignores())
+
+
+class TestTreeIgnores(TestCaseWithTransport):
+
+ def assertPatternsEquals(self, patterns):
+ contents = open(".bzrignore", 'rU').read().strip().split('\n')
+ self.assertEquals(sorted(patterns), sorted(contents))
+
+ def test_new_file(self):
+ tree = self.make_branch_and_tree(".")
+ ignores.tree_ignores_add_patterns(tree, ["myentry"])
+ self.assertTrue(tree.has_filename(".bzrignore"))
+ self.assertPatternsEquals(["myentry"])
+
+ def test_add_to_existing(self):
+ tree = self.make_branch_and_tree(".")
+ self.build_tree_contents([('.bzrignore', "myentry1\n")])
+ tree.add([".bzrignore"])
+ ignores.tree_ignores_add_patterns(tree, ["myentry2", "foo"])
+ self.assertPatternsEquals(["myentry1", "myentry2", "foo"])
+
+ def test_adds_ending_newline(self):
+ tree = self.make_branch_and_tree(".")
+ self.build_tree_contents([('.bzrignore', "myentry1")])
+ tree.add([".bzrignore"])
+ ignores.tree_ignores_add_patterns(tree, ["myentry2"])
+ self.assertPatternsEquals(["myentry1", "myentry2"])
+ text = open(".bzrignore", 'r').read()
+ self.assertTrue(text.endswith('\r\n') or
+ text.endswith('\n') or
+ text.endswith('\r'))
+
+ def test_does_not_add_dupe(self):
+ tree = self.make_branch_and_tree(".")
+ self.build_tree_contents([('.bzrignore', "myentry\n")])
+ tree.add([".bzrignore"])
+ ignores.tree_ignores_add_patterns(tree, ["myentry"])
+ self.assertPatternsEquals(["myentry"])
+
+ def test_non_ascii(self):
+ tree = self.make_branch_and_tree(".")
+ self.build_tree_contents([('.bzrignore',
+ u"myentry\u1234\n".encode('utf-8'))])
+ tree.add([".bzrignore"])
+ ignores.tree_ignores_add_patterns(tree, [u"myentry\u5678"])
+ self.assertPatternsEquals([u"myentry\u1234".encode('utf-8'),
+ u"myentry\u5678".encode('utf-8')])
+
+ def test_crlf(self):
+ tree = self.make_branch_and_tree(".")
+ self.build_tree_contents([('.bzrignore', "myentry1\r\n")])
+ tree.add([".bzrignore"])
+ ignores.tree_ignores_add_patterns(tree, ["myentry2", "foo"])
+ self.assertEquals(open('.bzrignore', 'rb').read(), 'myentry1\r\nmyentry2\r\nfoo\r\n')
+ self.assertPatternsEquals(["myentry1", "myentry2", "foo"])
diff --git a/bzrlib/tests/test_import_tariff.py b/bzrlib/tests/test_import_tariff.py
new file mode 100644
index 0000000..b2fd59c
--- /dev/null
+++ b/bzrlib/tests/test_import_tariff.py
@@ -0,0 +1,272 @@
+# Copyright (C) 2010, 2011 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+
+"""Tests for how many modules are loaded in executing various commands."""
+
+import os
+
+from testtools import content
+
+from bzrlib import (
+ plugins as _mod_plugins,
+ trace,
+ )
+from bzrlib.controldir import ControlDir
+from bzrlib.smart import medium
+from bzrlib.transport import remote
+
+from bzrlib.plugin import (
+ are_plugins_disabled,
+ )
+
+from bzrlib.tests import (
+ TestCaseWithTransport,
+ )
+
+old_format_modules = [
+ 'bzrlib.repofmt.knitrepo',
+ 'bzrlib.repofmt.knitpack_repo',
+ 'bzrlib.plugins.weave_fmt.branch',
+ 'bzrlib.plugins.weave_fmt.bzrdir',
+ 'bzrlib.plugins.weave_fmt.repository',
+ 'bzrlib.plugins.weave_fmt.workingtree',
+ 'bzrlib.weave',
+ 'bzrlib.weavefile',
+ 'bzrlib.xml4',
+ 'bzrlib.xml5',
+ 'bzrlib.xml6',
+ 'bzrlib.xml7',
+ ]
+
+
+class ImportTariffTestCase(TestCaseWithTransport):
+ """Check how many modules are loaded for some representative scenarios.
+
+ See the Testing Guide in the developer documentation for more explanation.
+
+
+ We must respect the setup used by the selftest command regarding
+ plugins. This allows the user to control which plugins are in effect while
+ running these tests and respect the import policies defined here.
+
+ When failures are encountered for a given plugin, they can generally be
+ addressed by using lazy import or lazy hook registration.
+ """
+
+ def setUp(self):
+ self.preserved_env_vars = {}
+ for name in ('BZR_PLUGIN_PATH', 'BZR_DISABLE_PLUGINS', 'BZR_PLUGINS_AT'
+ ):
+ self.preserved_env_vars[name] = os.environ.get(name)
+ super(ImportTariffTestCase, self).setUp()
+
+ def start_bzr_subprocess_with_import_check(self, args, stderr_file=None):
+ """Run a bzr process and capture the imports.
+
+ This is fairly expensive because we start a subprocess, so we aim to
+ cover representative rather than exhaustive cases.
+ """
+ # We use PYTHON_VERBOSE rather than --profile-imports because in
+ # experimentation the profile-imports output seems to not always show
+ # the modules you'd expect; this can be debugged but python -v seems
+ # more likely to always show everything. And we use the environment
+ # variable rather than 'python -v' in the hope it will work even if
+ # bzr is frozen and python is not explicitly specified. -- mbp 20100208
+ env_changes = dict(PYTHONVERBOSE='1', **self.preserved_env_vars)
+ trace.mutter('Setting env for bzr subprocess: %r', env_changes)
+ kwargs = dict(env_changes=env_changes,
+ allow_plugins=(not are_plugins_disabled()))
+ if stderr_file:
+ # We don't want to update the whole call chain so we insert stderr
+ # *iff* we need to
+ kwargs['stderr'] = stderr_file
+ return self.start_bzr_subprocess(args, **kwargs)
+
+ def check_forbidden_modules(self, err, forbidden_imports):
+ """Check for forbidden modules in stderr.
+
+ :param err: Standard error
+ :param forbidden_imports: List of forbidden modules
+ """
+ self.addDetail('subprocess_stderr',
+ content.Content(content.ContentType("text", "plain"),
+ lambda:[err]))
+
+ bad_modules = []
+ for module_name in forbidden_imports:
+ if err.find("\nimport %s " % module_name) != -1:
+ bad_modules.append(module_name)
+
+ if bad_modules:
+ self.fail("command loaded forbidden modules %r"
+ % (bad_modules,))
+
+ def finish_bzr_subprocess_with_import_check(self, process,
+ args, forbidden_imports):
+ """Finish subprocess and check specific modules have not been
+ imported.
+
+ :param forbidden_imports: List of fully-qualified Python module names
+ that should not be loaded while running this command.
+ """
+ (out, err) = self.finish_bzr_subprocess(process,
+ universal_newlines=False, process_args=args)
+ self.check_forbidden_modules(err, forbidden_imports)
+ return out, err
+
+ def run_command_check_imports(self, args, forbidden_imports):
+ """Run bzr ARGS in a subprocess and check its imports.
+
+ This is fairly expensive because we start a subprocess, so we aim to
+ cover representative rather than exhaustive cases.
+
+ :param forbidden_imports: List of fully-qualified Python module names
+ that should not be loaded while running this command.
+ """
+ process = self.start_bzr_subprocess_with_import_check(args)
+ self.finish_bzr_subprocess_with_import_check(process, args,
+ forbidden_imports)
+
+
+class TestImportTariffs(ImportTariffTestCase):
+ """Basic import tariff tests for some common bzr commands"""
+
+ def test_import_tariffs_working(self):
+ # check some guaranteed-true and false imports to be sure we're
+ # measuring correctly
+ self.make_branch_and_tree('.')
+ self.run_command_check_imports(['st'],
+ ['nonexistentmodulename', 'anothernonexistentmodule'])
+ self.assertRaises(AssertionError,
+ self.run_command_check_imports,
+ ['st'],
+ ['bzrlib.tree'])
+
+ def test_simple_local(self):
+ # 'st' in a default format working tree shouldn't need many modules
+ self.make_branch_and_tree('.')
+ self.run_command_check_imports(['st'], [
+ 'bzrlib.annotate',
+ 'bzrlib.atomicfile',
+ 'bzrlib.bugtracker',
+ 'bzrlib.bundle.commands',
+ 'bzrlib.cmd_version_info',
+ 'bzrlib.externalcommand',
+ 'bzrlib.filters',
+ 'bzrlib.hashcache',
+ # foreign branch plugins import the foreign_vcs_registry from
+ # bzrlib.foreign so it can't be blacklisted
+ 'bzrlib.gpg',
+ 'bzrlib.info',
+ 'bzrlib.knit',
+ 'bzrlib.merge3',
+ 'bzrlib.merge_directive',
+ 'bzrlib.msgeditor',
+ 'bzrlib.patiencediff',
+ 'bzrlib.remote',
+ 'bzrlib.rules',
+ 'bzrlib.sign_my_commits',
+ 'bzrlib.smart',
+ 'bzrlib.smart.client',
+ 'bzrlib.smart.medium',
+ 'bzrlib.smart.server',
+ 'bzrlib.transform',
+ 'bzrlib.version_info_formats.format_rio',
+ 'bzrlib.xml_serializer',
+ 'bzrlib.xml8',
+ 'getpass',
+ 'kerberos',
+ 'ssl',
+ 'socket',
+ 'smtplib',
+ 'tarfile',
+ 'tempfile',
+ 'termios',
+ 'tty',
+ 'urllib',
+ ] + old_format_modules)
+ # TODO: similar test for repository-only operations, checking we avoid
+ # loading wt-specific stuff
+ #
+ # See https://bugs.launchpad.net/bzr/+bug/553017
+
+ def test_help_commands(self):
+ # See https://bugs.launchpad.net/bzr/+bug/663773
+ self.run_command_check_imports(['help', 'commands'], [
+ 'testtools',
+ ])
+
+ def test_simple_serve(self):
+ # 'serve' in a default format working tree shouldn't need many modules
+ tree = self.make_branch_and_tree('.')
+ # Capture the bzr serve process' stderr in a file to avoid deadlocks
+ # while the smart client interacts with it.
+ stderr_file = open('bzr-serve.stderr', 'w')
+ process = self.start_bzr_subprocess_with_import_check(['serve',
+ '--inet', '-d', tree.basedir], stderr_file=stderr_file)
+ url = 'bzr://localhost/'
+ self.permit_url(url)
+ client_medium = medium.SmartSimplePipesClientMedium(
+ process.stdout, process.stdin, url)
+ transport = remote.RemoteTransport(url, medium=client_medium)
+ branch = ControlDir.open_from_transport(transport).open_branch()
+ process.stdin.close()
+ # Hide stdin from the subprocess module, so it won't fail to close it.
+ process.stdin = None
+ (out, err) = self.finish_bzr_subprocess(process,
+ universal_newlines=False)
+ stderr_file.close()
+ with open('bzr-serve.stderr', 'r') as stderr_file:
+ err = stderr_file.read()
+ self.check_forbidden_modules(err,
+ ['bzrlib.annotate',
+ 'bzrlib.atomicfile',
+ 'bzrlib.bugtracker',
+ 'bzrlib.bundle.commands',
+ 'bzrlib.cmd_version_info',
+ 'bzrlib.dirstate',
+ 'bzrlib._dirstate_helpers_py',
+ 'bzrlib._dirstate_helpers_pyx',
+ 'bzrlib.externalcommand',
+ 'bzrlib.filters',
+ 'bzrlib.hashcache',
+ # foreign branch plugins import the foreign_vcs_registry from
+ # bzrlib.foreign so it can't be blacklisted
+ 'bzrlib.gpg',
+ 'bzrlib.info',
+ 'bzrlib.knit',
+ 'bzrlib.merge3',
+ 'bzrlib.merge_directive',
+ 'bzrlib.msgeditor',
+ 'bzrlib.patiencediff',
+ 'bzrlib.remote',
+ 'bzrlib.rules',
+ 'bzrlib.sign_my_commits',
+ 'bzrlib.smart.client',
+ 'bzrlib.transform',
+ 'bzrlib.version_info_formats.format_rio',
+ 'bzrlib.workingtree_4',
+ 'bzrlib.xml_serializer',
+ 'bzrlib.xml8',
+ 'getpass',
+ 'kerberos',
+ 'smtplib',
+ 'tarfile',
+ 'tempfile',
+ 'termios',
+ 'tty',
+ ] + old_format_modules)
diff --git a/bzrlib/tests/test_index.py b/bzrlib/tests/test_index.py
new file mode 100644
index 0000000..9456b09
--- /dev/null
+++ b/bzrlib/tests/test_index.py
@@ -0,0 +1,1766 @@
+# Copyright (C) 2007-2010 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""Tests for indices."""
+
+from bzrlib import (
+ errors,
+ index,
+ tests,
+ transport,
+ )
+
+
+class TestGraphIndexBuilder(tests.TestCaseWithMemoryTransport):
+
+ def test_build_index_empty(self):
+ builder = index.GraphIndexBuilder()
+ stream = builder.finish()
+ contents = stream.read()
+ self.assertEqual(
+ "Bazaar Graph Index 1\nnode_ref_lists=0\nkey_elements=1\nlen=0\n\n",
+ contents)
+
+ def test_build_index_empty_two_element_keys(self):
+ builder = index.GraphIndexBuilder(key_elements=2)
+ stream = builder.finish()
+ contents = stream.read()
+ self.assertEqual(
+ "Bazaar Graph Index 1\nnode_ref_lists=0\nkey_elements=2\nlen=0\n\n",
+ contents)
+
+ def test_build_index_one_reference_list_empty(self):
+ builder = index.GraphIndexBuilder(reference_lists=1)
+ stream = builder.finish()
+ contents = stream.read()
+ self.assertEqual(
+ "Bazaar Graph Index 1\nnode_ref_lists=1\nkey_elements=1\nlen=0\n\n",
+ contents)
+
+ def test_build_index_two_reference_list_empty(self):
+ builder = index.GraphIndexBuilder(reference_lists=2)
+ stream = builder.finish()
+ contents = stream.read()
+ self.assertEqual(
+ "Bazaar Graph Index 1\nnode_ref_lists=2\nkey_elements=1\nlen=0\n\n",
+ contents)
+
+ def test_build_index_one_node_no_refs(self):
+ builder = index.GraphIndexBuilder()
+ builder.add_node(('akey', ), 'data')
+ stream = builder.finish()
+ contents = stream.read()
+ self.assertEqual(
+ "Bazaar Graph Index 1\nnode_ref_lists=0\nkey_elements=1\nlen=1\n"
+ "akey\x00\x00\x00data\n\n", contents)
+
+ def test_build_index_one_node_no_refs_accepts_empty_reflist(self):
+ builder = index.GraphIndexBuilder()
+ builder.add_node(('akey', ), 'data', ())
+ stream = builder.finish()
+ contents = stream.read()
+ self.assertEqual(
+ "Bazaar Graph Index 1\nnode_ref_lists=0\nkey_elements=1\nlen=1\n"
+ "akey\x00\x00\x00data\n\n", contents)
+
+ def test_build_index_one_node_2_element_keys(self):
+ # multipart keys are separated by \x00 - because they are fixed length,
+ # not variable this does not cause any issues, and seems clearer to the
+ # author.
+ builder = index.GraphIndexBuilder(key_elements=2)
+ builder.add_node(('akey', 'secondpart'), 'data')
+ stream = builder.finish()
+ contents = stream.read()
+ self.assertEqual(
+ "Bazaar Graph Index 1\nnode_ref_lists=0\nkey_elements=2\nlen=1\n"
+ "akey\x00secondpart\x00\x00\x00data\n\n", contents)
+
+ def test_add_node_empty_value(self):
+ builder = index.GraphIndexBuilder()
+ builder.add_node(('akey', ), '')
+ stream = builder.finish()
+ contents = stream.read()
+ self.assertEqual(
+ "Bazaar Graph Index 1\nnode_ref_lists=0\nkey_elements=1\nlen=1\n"
+ "akey\x00\x00\x00\n\n", contents)
+
+ def test_build_index_nodes_sorted(self):
+ # the highest sorted node comes first.
+ builder = index.GraphIndexBuilder()
+ # use three to have a good chance of glitching dictionary hash
+ # lookups etc. Insert in randomish order that is not correct
+ # and not the reverse of the correct order.
+ builder.add_node(('2002', ), 'data')
+ builder.add_node(('2000', ), 'data')
+ builder.add_node(('2001', ), 'data')
+ stream = builder.finish()
+ contents = stream.read()
+ self.assertEqual(
+ "Bazaar Graph Index 1\nnode_ref_lists=0\nkey_elements=1\nlen=3\n"
+ "2000\x00\x00\x00data\n"
+ "2001\x00\x00\x00data\n"
+ "2002\x00\x00\x00data\n"
+ "\n", contents)
+
+ def test_build_index_2_element_key_nodes_sorted(self):
+ # multiple element keys are sorted first-key, second-key.
+ builder = index.GraphIndexBuilder(key_elements=2)
+ # use three values of each key element, to have a good chance of
+ # glitching dictionary hash lookups etc. Insert in randomish order that
+ # is not correct and not the reverse of the correct order.
+ builder.add_node(('2002', '2002'), 'data')
+ builder.add_node(('2002', '2000'), 'data')
+ builder.add_node(('2002', '2001'), 'data')
+ builder.add_node(('2000', '2002'), 'data')
+ builder.add_node(('2000', '2000'), 'data')
+ builder.add_node(('2000', '2001'), 'data')
+ builder.add_node(('2001', '2002'), 'data')
+ builder.add_node(('2001', '2000'), 'data')
+ builder.add_node(('2001', '2001'), 'data')
+ stream = builder.finish()
+ contents = stream.read()
+ self.assertEqual(
+ "Bazaar Graph Index 1\nnode_ref_lists=0\nkey_elements=2\nlen=9\n"
+ "2000\x002000\x00\x00\x00data\n"
+ "2000\x002001\x00\x00\x00data\n"
+ "2000\x002002\x00\x00\x00data\n"
+ "2001\x002000\x00\x00\x00data\n"
+ "2001\x002001\x00\x00\x00data\n"
+ "2001\x002002\x00\x00\x00data\n"
+ "2002\x002000\x00\x00\x00data\n"
+ "2002\x002001\x00\x00\x00data\n"
+ "2002\x002002\x00\x00\x00data\n"
+ "\n", contents)
+
+ def test_build_index_reference_lists_are_included_one(self):
+ builder = index.GraphIndexBuilder(reference_lists=1)
+ builder.add_node(('key', ), 'data', ([], ))
+ stream = builder.finish()
+ contents = stream.read()
+ self.assertEqual(
+ "Bazaar Graph Index 1\nnode_ref_lists=1\nkey_elements=1\nlen=1\n"
+ "key\x00\x00\x00data\n"
+ "\n", contents)
+
+ def test_build_index_reference_lists_with_2_element_keys(self):
+ builder = index.GraphIndexBuilder(reference_lists=1, key_elements=2)
+ builder.add_node(('key', 'key2'), 'data', ([], ))
+ stream = builder.finish()
+ contents = stream.read()
+ self.assertEqual(
+ "Bazaar Graph Index 1\nnode_ref_lists=1\nkey_elements=2\nlen=1\n"
+ "key\x00key2\x00\x00\x00data\n"
+ "\n", contents)
+
+ def test_build_index_reference_lists_are_included_two(self):
+ builder = index.GraphIndexBuilder(reference_lists=2)
+ builder.add_node(('key', ), 'data', ([], []))
+ stream = builder.finish()
+ contents = stream.read()
+ self.assertEqual(
+ "Bazaar Graph Index 1\nnode_ref_lists=2\nkey_elements=1\nlen=1\n"
+ "key\x00\x00\t\x00data\n"
+ "\n", contents)
+
+ def test_clear_cache(self):
+ builder = index.GraphIndexBuilder(reference_lists=2)
+ # This is a no-op, but the api should exist
+ builder.clear_cache()
+
+ def test_node_references_are_byte_offsets(self):
+ builder = index.GraphIndexBuilder(reference_lists=1)
+ builder.add_node(('reference', ), 'data', ([], ))
+ builder.add_node(('key', ), 'data', ([('reference', )], ))
+ stream = builder.finish()
+ contents = stream.read()
+ self.assertEqual(
+ "Bazaar Graph Index 1\nnode_ref_lists=1\nkey_elements=1\nlen=2\n"
+ "key\x00\x0072\x00data\n"
+ "reference\x00\x00\x00data\n"
+ "\n", contents)
+
+ def test_node_references_are_cr_delimited(self):
+ builder = index.GraphIndexBuilder(reference_lists=1)
+ builder.add_node(('reference', ), 'data', ([], ))
+ builder.add_node(('reference2', ), 'data', ([], ))
+ builder.add_node(('key', ), 'data',
+ ([('reference', ), ('reference2', )], ))
+ stream = builder.finish()
+ contents = stream.read()
+ self.assertEqual(
+ "Bazaar Graph Index 1\nnode_ref_lists=1\nkey_elements=1\nlen=3\n"
+ "key\x00\x00077\r094\x00data\n"
+ "reference\x00\x00\x00data\n"
+ "reference2\x00\x00\x00data\n"
+ "\n", contents)
+
+ def test_multiple_reference_lists_are_tab_delimited(self):
+ builder = index.GraphIndexBuilder(reference_lists=2)
+ builder.add_node(('keference', ), 'data', ([], []))
+ builder.add_node(('rey', ), 'data',
+ ([('keference', )], [('keference', )]))
+ stream = builder.finish()
+ contents = stream.read()
+ self.assertEqual(
+ "Bazaar Graph Index 1\nnode_ref_lists=2\nkey_elements=1\nlen=2\n"
+ "keference\x00\x00\t\x00data\n"
+ "rey\x00\x0059\t59\x00data\n"
+ "\n", contents)
+
+ def test_add_node_referencing_missing_key_makes_absent(self):
+ builder = index.GraphIndexBuilder(reference_lists=1)
+ builder.add_node(('rey', ), 'data',
+ ([('beference', ), ('aeference2', )], ))
+ stream = builder.finish()
+ contents = stream.read()
+ self.assertEqual(
+ "Bazaar Graph Index 1\nnode_ref_lists=1\nkey_elements=1\nlen=1\n"
+ "aeference2\x00a\x00\x00\n"
+ "beference\x00a\x00\x00\n"
+ "rey\x00\x00074\r059\x00data\n"
+ "\n", contents)
+
+ def test_node_references_three_digits(self):
+ # test the node digit expands as needed.
+ builder = index.GraphIndexBuilder(reference_lists=1)
+ references = [(str(val), ) for val in reversed(range(9))]
+ builder.add_node(('2-key', ), '', (references, ))
+ stream = builder.finish()
+ contents = stream.read()
+ self.assertEqualDiff(
+ "Bazaar Graph Index 1\nnode_ref_lists=1\nkey_elements=1\nlen=1\n"
+ "0\x00a\x00\x00\n"
+ "1\x00a\x00\x00\n"
+ "2\x00a\x00\x00\n"
+ "2-key\x00\x00151\r145\r139\r133\r127\r121\r071\r065\r059\x00\n"
+ "3\x00a\x00\x00\n"
+ "4\x00a\x00\x00\n"
+ "5\x00a\x00\x00\n"
+ "6\x00a\x00\x00\n"
+ "7\x00a\x00\x00\n"
+ "8\x00a\x00\x00\n"
+ "\n", contents)
+
+ def test_absent_has_no_reference_overhead(self):
+ # the offsets after an absent record should be correct when there are
+ # >1 reference lists.
+ builder = index.GraphIndexBuilder(reference_lists=2)
+ builder.add_node(('parent', ), '', ([('aail', ), ('zther', )], []))
+ stream = builder.finish()
+ contents = stream.read()
+ self.assertEqual(
+ "Bazaar Graph Index 1\nnode_ref_lists=2\nkey_elements=1\nlen=1\n"
+ "aail\x00a\x00\x00\n"
+ "parent\x00\x0059\r84\t\x00\n"
+ "zther\x00a\x00\x00\n"
+ "\n", contents)
+
+ def test_add_node_bad_key(self):
+ builder = index.GraphIndexBuilder()
+ for bad_char in '\t\n\x0b\x0c\r\x00 ':
+ self.assertRaises(errors.BadIndexKey, builder.add_node,
+ ('a%skey' % bad_char, ), 'data')
+ self.assertRaises(errors.BadIndexKey, builder.add_node,
+ ('', ), 'data')
+ self.assertRaises(errors.BadIndexKey, builder.add_node,
+ 'not-a-tuple', 'data')
+ # not enough length
+ self.assertRaises(errors.BadIndexKey, builder.add_node,
+ (), 'data')
+ # too long
+ self.assertRaises(errors.BadIndexKey, builder.add_node,
+ ('primary', 'secondary'), 'data')
+ # secondary key elements get checked too:
+ builder = index.GraphIndexBuilder(key_elements=2)
+ for bad_char in '\t\n\x0b\x0c\r\x00 ':
+ self.assertRaises(errors.BadIndexKey, builder.add_node,
+ ('prefix', 'a%skey' % bad_char), 'data')
+
+ def test_add_node_bad_data(self):
+ builder = index.GraphIndexBuilder()
+ self.assertRaises(errors.BadIndexValue, builder.add_node, ('akey', ),
+ 'data\naa')
+ self.assertRaises(errors.BadIndexValue, builder.add_node, ('akey', ),
+ 'data\x00aa')
+
+ def test_add_node_bad_mismatched_ref_lists_length(self):
+ builder = index.GraphIndexBuilder()
+ self.assertRaises(errors.BadIndexValue, builder.add_node, ('akey', ),
+ 'data aa', ([], ))
+ builder = index.GraphIndexBuilder(reference_lists=1)
+ self.assertRaises(errors.BadIndexValue, builder.add_node, ('akey', ),
+ 'data aa')
+ self.assertRaises(errors.BadIndexValue, builder.add_node, ('akey', ),
+ 'data aa', (), )
+ self.assertRaises(errors.BadIndexValue, builder.add_node, ('akey', ),
+ 'data aa', ([], []))
+ builder = index.GraphIndexBuilder(reference_lists=2)
+ self.assertRaises(errors.BadIndexValue, builder.add_node, ('akey', ),
+ 'data aa')
+ self.assertRaises(errors.BadIndexValue, builder.add_node, ('akey', ),
+ 'data aa', ([], ))
+ self.assertRaises(errors.BadIndexValue, builder.add_node, ('akey', ),
+ 'data aa', ([], [], []))
+
+ def test_add_node_bad_key_in_reference_lists(self):
+ # first list, first key - trivial
+ builder = index.GraphIndexBuilder(reference_lists=1)
+ self.assertRaises(errors.BadIndexKey, builder.add_node, ('akey', ),
+ 'data aa', ([('a key', )], ))
+ # references keys must be tuples too
+ self.assertRaises(errors.BadIndexKey, builder.add_node, ('akey', ),
+ 'data aa', (['not-a-tuple'], ))
+ # not enough length
+ self.assertRaises(errors.BadIndexKey, builder.add_node, ('akey', ),
+ 'data aa', ([()], ))
+ # too long
+ self.assertRaises(errors.BadIndexKey, builder.add_node, ('akey', ),
+ 'data aa', ([('primary', 'secondary')], ))
+ # need to check more than the first key in the list
+ self.assertRaises(errors.BadIndexKey, builder.add_node, ('akey', ),
+ 'data aa', ([('agoodkey', ), ('that is a bad key', )], ))
+ # and if there is more than one list it should be getting checked
+ # too
+ builder = index.GraphIndexBuilder(reference_lists=2)
+ self.assertRaises(errors.BadIndexKey, builder.add_node, ('akey', ),
+ 'data aa', ([], ['a bad key']))
+
+ def test_add_duplicate_key(self):
+ builder = index.GraphIndexBuilder()
+ builder.add_node(('key', ), 'data')
+ self.assertRaises(errors.BadIndexDuplicateKey,
+ builder.add_node, ('key', ), 'data')
+
+ def test_add_duplicate_key_2_elements(self):
+ builder = index.GraphIndexBuilder(key_elements=2)
+ builder.add_node(('key', 'key'), 'data')
+ self.assertRaises(errors.BadIndexDuplicateKey, builder.add_node,
+ ('key', 'key'), 'data')
+
+ def test_add_key_after_referencing_key(self):
+ builder = index.GraphIndexBuilder(reference_lists=1)
+ builder.add_node(('key', ), 'data', ([('reference', )], ))
+ builder.add_node(('reference', ), 'data', ([],))
+
+ def test_add_key_after_referencing_key_2_elements(self):
+ builder = index.GraphIndexBuilder(reference_lists=1, key_elements=2)
+ builder.add_node(('k', 'ey'), 'data', ([('reference', 'tokey')], ))
+ builder.add_node(('reference', 'tokey'), 'data', ([],))
+
+ def test_set_optimize(self):
+ builder = index.GraphIndexBuilder(reference_lists=1, key_elements=2)
+ builder.set_optimize(for_size=True)
+ self.assertTrue(builder._optimize_for_size)
+ builder.set_optimize(for_size=False)
+ self.assertFalse(builder._optimize_for_size)
+
+
+class TestGraphIndex(tests.TestCaseWithMemoryTransport):
+
+ def make_key(self, number):
+ return (str(number) + 'X'*100,)
+
+ def make_value(self, number):
+ return str(number) + 'Y'*100
+
+ def make_nodes(self, count=64):
+ # generate a big enough index that we only read some of it on a typical
+ # bisection lookup.
+ nodes = []
+ for counter in range(count):
+ nodes.append((self.make_key(counter), self.make_value(counter), ()))
+ return nodes
+
+ def make_index(self, ref_lists=0, key_elements=1, nodes=[]):
+ builder = index.GraphIndexBuilder(ref_lists, key_elements=key_elements)
+ for key, value, references in nodes:
+ builder.add_node(key, value, references)
+ stream = builder.finish()
+ trans = transport.get_transport_from_url('trace+' + self.get_url())
+ size = trans.put_file('index', stream)
+ return index.GraphIndex(trans, 'index', size)
+
+ def make_index_with_offset(self, ref_lists=0, key_elements=1, nodes=[],
+ offset=0):
+ builder = index.GraphIndexBuilder(ref_lists, key_elements=key_elements)
+ for key, value, references in nodes:
+ builder.add_node(key, value, references)
+ content = builder.finish().read()
+ size = len(content)
+ trans = self.get_transport()
+ trans.put_bytes('index', (' '*offset) + content)
+ return index.GraphIndex(trans, 'index', size, offset=offset)
+
+ def test_clear_cache(self):
+ index = self.make_index()
+ # For now, we just want to make sure the api is available. As this is
+ # old code, we don't really worry if it *does* anything.
+ index.clear_cache()
+
+ def test_open_bad_index_no_error(self):
+ trans = self.get_transport()
+ trans.put_bytes('name', "not an index\n")
+ idx = index.GraphIndex(trans, 'name', 13)
+
+ def test_with_offset(self):
+ nodes = self.make_nodes(200)
+ idx = self.make_index_with_offset(offset=1234567, nodes=nodes)
+ self.assertEqual(200, idx.key_count())
+
+ def test_buffer_all_with_offset(self):
+ nodes = self.make_nodes(200)
+ idx = self.make_index_with_offset(offset=1234567, nodes=nodes)
+ idx._buffer_all()
+ self.assertEqual(200, idx.key_count())
+
+ def test_side_effect_buffering_with_offset(self):
+ nodes = self.make_nodes(20)
+ index = self.make_index_with_offset(offset=1234567, nodes=nodes)
+ index._transport.recommended_page_size = lambda:64*1024
+ subset_nodes = [nodes[0][0], nodes[10][0], nodes[19][0]]
+ entries = [n[1] for n in index.iter_entries(subset_nodes)]
+ self.assertEqual(sorted(subset_nodes), sorted(entries))
+ self.assertEqual(20, index.key_count())
+
+ def test_open_sets_parsed_map_empty(self):
+ index = self.make_index()
+ self.assertEqual([], index._parsed_byte_map)
+ self.assertEqual([], index._parsed_key_map)
+
+ def test_key_count_buffers(self):
+ index = self.make_index(nodes=self.make_nodes(2))
+ # reset the transport log
+ del index._transport._activity[:]
+ self.assertEqual(2, index.key_count())
+ # We should have requested reading the header bytes
+ self.assertEqual([
+ ('readv', 'index', [(0, 200)], True, index._size),
+ ],
+ index._transport._activity)
+ # And that should have been enough to trigger reading the whole index
+ # with buffering
+ self.assertIsNot(None, index._nodes)
+
+ def test_lookup_key_via_location_buffers(self):
+ index = self.make_index()
+ # reset the transport log
+ del index._transport._activity[:]
+ # do a _lookup_keys_via_location call for the middle of the file, which
+ # is what bisection uses.
+ result = index._lookup_keys_via_location(
+ [(index._size // 2, ('missing', ))])
+ # this should have asked for a readv request, with adjust_for_latency,
+ # and two regions: the header, and half-way into the file.
+ self.assertEqual([
+ ('readv', 'index', [(30, 30), (0, 200)], True, 60),
+ ],
+ index._transport._activity)
+ # and the result should be that the key cannot be present, because this
+ # is a trivial index.
+ self.assertEqual([((index._size // 2, ('missing', )), False)],
+ result)
+ # And this should have caused the file to be fully buffered
+ self.assertIsNot(None, index._nodes)
+ self.assertEqual([], index._parsed_byte_map)
+
+ def test_first_lookup_key_via_location(self):
+ # We need enough data so that the _HEADER_READV doesn't consume the
+ # whole file. We always read 800 bytes for every key, and the local
+ # transport natural expansion is 4096 bytes. So we have to have >8192
+ # bytes or we will trigger "buffer_all".
+ # We also want the 'missing' key to fall within the range that *did*
+ # read
+ nodes = []
+ index = self.make_index(nodes=self.make_nodes(64))
+ # reset the transport log
+ del index._transport._activity[:]
+ # do a _lookup_keys_via_location call for the middle of the file, which
+ # is what bisection uses.
+ start_lookup = index._size // 2
+ result = index._lookup_keys_via_location(
+ [(start_lookup, ('40missing', ))])
+ # this should have asked for a readv request, with adjust_for_latency,
+ # and two regions: the header, and half-way into the file.
+ self.assertEqual([
+ ('readv', 'index',
+ [(start_lookup, 800), (0, 200)], True, index._size),
+ ],
+ index._transport._activity)
+ # and the result should be that the key cannot be present, because this
+ # is a trivial index.
+ self.assertEqual([((start_lookup, ('40missing', )), False)],
+ result)
+ # And this should not have caused the file to be fully buffered
+ self.assertIs(None, index._nodes)
+ # And the regions of the file that have been parsed should be in the
+ # parsed_byte_map and the parsed_key_map
+ self.assertEqual([(0, 4008), (5046, 8996)], index._parsed_byte_map)
+ self.assertEqual([(None, self.make_key(26)),
+ (self.make_key(31), self.make_key(48))],
+ index._parsed_key_map)
+
+ def test_parsing_non_adjacent_data_trims(self):
+ index = self.make_index(nodes=self.make_nodes(64))
+ result = index._lookup_keys_via_location(
+ [(index._size // 2, ('40', ))])
+ # and the result should be that the key cannot be present, because key is
+ # in the middle of the observed data from a 4K read - the smallest transport
+ # will do today with this api.
+ self.assertEqual([((index._size // 2, ('40', )), False)],
+ result)
+ # and we should have a parse map that includes the header and the
+ # region that was parsed after trimming.
+ self.assertEqual([(0, 4008), (5046, 8996)], index._parsed_byte_map)
+ self.assertEqual([(None, self.make_key(26)),
+ (self.make_key(31), self.make_key(48))],
+ index._parsed_key_map)
+
+ def test_parsing_data_handles_parsed_contained_regions(self):
+ # the following patten creates a parsed region that is wholly within a
+ # single result from the readv layer:
+ # .... single-read (readv-minimum-size) ...
+ # which then trims the start and end so the parsed size is < readv
+ # miniumum.
+ # then a dual lookup (or a reference lookup for that matter) which
+ # abuts or overlaps the parsed region on both sides will need to
+ # discard the data in the middle, but parse the end as well.
+ #
+ # we test this by doing a single lookup to seed the data, then
+ # a lookup for two keys that are present, and adjacent -
+ # we except both to be found, and the parsed byte map to include the
+ # locations of both keys.
+ index = self.make_index(nodes=self.make_nodes(128))
+ result = index._lookup_keys_via_location(
+ [(index._size // 2, ('40', ))])
+ # and we should have a parse map that includes the header and the
+ # region that was parsed after trimming.
+ self.assertEqual([(0, 4045), (11759, 15707)], index._parsed_byte_map)
+ self.assertEqual([(None, self.make_key(116)),
+ (self.make_key(35), self.make_key(51))],
+ index._parsed_key_map)
+ # now ask for two keys, right before and after the parsed region
+ result = index._lookup_keys_via_location(
+ [(11450, self.make_key(34)), (15707, self.make_key(52))])
+ self.assertEqual([
+ ((11450, self.make_key(34)),
+ (index, self.make_key(34), self.make_value(34))),
+ ((15707, self.make_key(52)),
+ (index, self.make_key(52), self.make_value(52))),
+ ],
+ result)
+ self.assertEqual([(0, 4045), (9889, 17993)], index._parsed_byte_map)
+
+ def test_lookup_missing_key_answers_without_io_when_map_permits(self):
+ # generate a big enough index that we only read some of it on a typical
+ # bisection lookup.
+ index = self.make_index(nodes=self.make_nodes(64))
+ # lookup the keys in the middle of the file
+ result =index._lookup_keys_via_location(
+ [(index._size // 2, ('40', ))])
+ # check the parse map, this determines the test validity
+ self.assertEqual([(0, 4008), (5046, 8996)], index._parsed_byte_map)
+ self.assertEqual([(None, self.make_key(26)),
+ (self.make_key(31), self.make_key(48))],
+ index._parsed_key_map)
+ # reset the transport log
+ del index._transport._activity[:]
+ # now looking up a key in the portion of the file already parsed should
+ # not create a new transport request, and should return False (cannot
+ # be in the index) - even when the byte location we ask for is outside
+ # the parsed region
+ result = index._lookup_keys_via_location(
+ [(4000, ('40', ))])
+ self.assertEqual([((4000, ('40', )), False)],
+ result)
+ self.assertEqual([], index._transport._activity)
+
+ def test_lookup_present_key_answers_without_io_when_map_permits(self):
+ # generate a big enough index that we only read some of it on a typical
+ # bisection lookup.
+ index = self.make_index(nodes=self.make_nodes(64))
+ # lookup the keys in the middle of the file
+ result =index._lookup_keys_via_location(
+ [(index._size // 2, ('40', ))])
+ # check the parse map, this determines the test validity
+ self.assertEqual([(0, 4008), (5046, 8996)], index._parsed_byte_map)
+ self.assertEqual([(None, self.make_key(26)),
+ (self.make_key(31), self.make_key(48))],
+ index._parsed_key_map)
+ # reset the transport log
+ del index._transport._activity[:]
+ # now looking up a key in the portion of the file already parsed should
+ # not create a new transport request, and should return False (cannot
+ # be in the index) - even when the byte location we ask for is outside
+ # the parsed region
+ #
+ result = index._lookup_keys_via_location([(4000, self.make_key(40))])
+ self.assertEqual(
+ [((4000, self.make_key(40)),
+ (index, self.make_key(40), self.make_value(40)))],
+ result)
+ self.assertEqual([], index._transport._activity)
+
+ def test_lookup_key_below_probed_area(self):
+ # generate a big enough index that we only read some of it on a typical
+ # bisection lookup.
+ index = self.make_index(nodes=self.make_nodes(64))
+ # ask for the key in the middle, but a key that is located in the
+ # unparsed region before the middle.
+ result =index._lookup_keys_via_location(
+ [(index._size // 2, ('30', ))])
+ # check the parse map, this determines the test validity
+ self.assertEqual([(0, 4008), (5046, 8996)], index._parsed_byte_map)
+ self.assertEqual([(None, self.make_key(26)),
+ (self.make_key(31), self.make_key(48))],
+ index._parsed_key_map)
+ self.assertEqual([((index._size // 2, ('30', )), -1)],
+ result)
+
+ def test_lookup_key_above_probed_area(self):
+ # generate a big enough index that we only read some of it on a typical
+ # bisection lookup.
+ index = self.make_index(nodes=self.make_nodes(64))
+ # ask for the key in the middle, but a key that is located in the
+ # unparsed region after the middle.
+ result =index._lookup_keys_via_location(
+ [(index._size // 2, ('50', ))])
+ # check the parse map, this determines the test validity
+ self.assertEqual([(0, 4008), (5046, 8996)], index._parsed_byte_map)
+ self.assertEqual([(None, self.make_key(26)),
+ (self.make_key(31), self.make_key(48))],
+ index._parsed_key_map)
+ self.assertEqual([((index._size // 2, ('50', )), +1)],
+ result)
+
+ def test_lookup_key_resolves_references(self):
+ # generate a big enough index that we only read some of it on a typical
+ # bisection lookup.
+ nodes = []
+ for counter in range(99):
+ nodes.append((self.make_key(counter), self.make_value(counter),
+ ((self.make_key(counter + 20),),) ))
+ index = self.make_index(ref_lists=1, nodes=nodes)
+ # lookup a key in the middle that does not exist, so that when we can
+ # check that the referred-to-keys are not accessed automatically.
+ index_size = index._size
+ index_center = index_size // 2
+ result = index._lookup_keys_via_location(
+ [(index_center, ('40', ))])
+ # check the parse map - only the start and middle should have been
+ # parsed.
+ self.assertEqual([(0, 4027), (10198, 14028)], index._parsed_byte_map)
+ self.assertEqual([(None, self.make_key(17)),
+ (self.make_key(44), self.make_key(5))],
+ index._parsed_key_map)
+ # and check the transport activity likewise.
+ self.assertEqual(
+ [('readv', 'index', [(index_center, 800), (0, 200)], True,
+ index_size)],
+ index._transport._activity)
+ # reset the transport log for testing the reference lookup
+ del index._transport._activity[:]
+ # now looking up a key in the portion of the file already parsed should
+ # only perform IO to resolve its key references.
+ result = index._lookup_keys_via_location([(11000, self.make_key(45))])
+ self.assertEqual(
+ [((11000, self.make_key(45)),
+ (index, self.make_key(45), self.make_value(45),
+ ((self.make_key(65),),)))],
+ result)
+ self.assertEqual([('readv', 'index', [(15093, 800)], True, index_size)],
+ index._transport._activity)
+
+ def test_lookup_key_can_buffer_all(self):
+ nodes = []
+ for counter in range(64):
+ nodes.append((self.make_key(counter), self.make_value(counter),
+ ((self.make_key(counter + 20),),) ))
+ index = self.make_index(ref_lists=1, nodes=nodes)
+ # lookup a key in the middle that does not exist, so that when we can
+ # check that the referred-to-keys are not accessed automatically.
+ index_size = index._size
+ index_center = index_size // 2
+ result = index._lookup_keys_via_location([(index_center, ('40', ))])
+ # check the parse map - only the start and middle should have been
+ # parsed.
+ self.assertEqual([(0, 3890), (6444, 10274)], index._parsed_byte_map)
+ self.assertEqual([(None, self.make_key(25)),
+ (self.make_key(37), self.make_key(52))],
+ index._parsed_key_map)
+ # and check the transport activity likewise.
+ self.assertEqual(
+ [('readv', 'index', [(index_center, 800), (0, 200)], True,
+ index_size)],
+ index._transport._activity)
+ # reset the transport log for testing the reference lookup
+ del index._transport._activity[:]
+ # now looking up a key in the portion of the file already parsed should
+ # only perform IO to resolve its key references.
+ result = index._lookup_keys_via_location([(7000, self.make_key(40))])
+ self.assertEqual(
+ [((7000, self.make_key(40)),
+ (index, self.make_key(40), self.make_value(40),
+ ((self.make_key(60),),)))],
+ result)
+ # Resolving the references would have required more data read, and we
+ # are already above the 50% threshold, so it triggered a _buffer_all
+ self.assertEqual([('get', 'index')], index._transport._activity)
+
+ def test_iter_all_entries_empty(self):
+ index = self.make_index()
+ self.assertEqual([], list(index.iter_all_entries()))
+
+ def test_iter_all_entries_simple(self):
+ index = self.make_index(nodes=[(('name', ), 'data', ())])
+ self.assertEqual([(index, ('name', ), 'data')],
+ list(index.iter_all_entries()))
+
+ def test_iter_all_entries_simple_2_elements(self):
+ index = self.make_index(key_elements=2,
+ nodes=[(('name', 'surname'), 'data', ())])
+ self.assertEqual([(index, ('name', 'surname'), 'data')],
+ list(index.iter_all_entries()))
+
+ def test_iter_all_entries_references_resolved(self):
+ index = self.make_index(1, nodes=[
+ (('name', ), 'data', ([('ref', )], )),
+ (('ref', ), 'refdata', ([], ))])
+ self.assertEqual(set([(index, ('name', ), 'data', ((('ref',),),)),
+ (index, ('ref', ), 'refdata', ((), ))]),
+ set(index.iter_all_entries()))
+
+ def test_iter_entries_buffers_once(self):
+ index = self.make_index(nodes=self.make_nodes(2))
+ # reset the transport log
+ del index._transport._activity[:]
+ self.assertEqual(set([(index, self.make_key(1), self.make_value(1))]),
+ set(index.iter_entries([self.make_key(1)])))
+ # We should have requested reading the header bytes
+ # But not needed any more than that because it would have triggered a
+ # buffer all
+ self.assertEqual([
+ ('readv', 'index', [(0, 200)], True, index._size),
+ ],
+ index._transport._activity)
+ # And that should have been enough to trigger reading the whole index
+ # with buffering
+ self.assertIsNot(None, index._nodes)
+
+ def test_iter_entries_buffers_by_bytes_read(self):
+ index = self.make_index(nodes=self.make_nodes(64))
+ list(index.iter_entries([self.make_key(10)]))
+ # The first time through isn't enough to trigger a buffer all
+ self.assertIs(None, index._nodes)
+ self.assertEqual(4096, index._bytes_read)
+ # Grabbing a key in that same page won't trigger a buffer all, as we
+ # still haven't read 50% of the file
+ list(index.iter_entries([self.make_key(11)]))
+ self.assertIs(None, index._nodes)
+ self.assertEqual(4096, index._bytes_read)
+ # We haven't read more data, so reading outside the range won't trigger
+ # a buffer all right away
+ list(index.iter_entries([self.make_key(40)]))
+ self.assertIs(None, index._nodes)
+ self.assertEqual(8192, index._bytes_read)
+ # On the next pass, we will not trigger buffer all if the key is
+ # available without reading more
+ list(index.iter_entries([self.make_key(32)]))
+ self.assertIs(None, index._nodes)
+ # But if we *would* need to read more to resolve it, then we will
+ # buffer all.
+ list(index.iter_entries([self.make_key(60)]))
+ self.assertIsNot(None, index._nodes)
+
+ def test_iter_entries_references_resolved(self):
+ index = self.make_index(1, nodes=[
+ (('name', ), 'data', ([('ref', ), ('ref', )], )),
+ (('ref', ), 'refdata', ([], ))])
+ self.assertEqual(set([(index, ('name', ), 'data', ((('ref',),('ref',)),)),
+ (index, ('ref', ), 'refdata', ((), ))]),
+ set(index.iter_entries([('name',), ('ref',)])))
+
+ def test_iter_entries_references_2_refs_resolved(self):
+ index = self.make_index(2, nodes=[
+ (('name', ), 'data', ([('ref', )], [('ref', )])),
+ (('ref', ), 'refdata', ([], []))])
+ self.assertEqual(set([(index, ('name', ), 'data', ((('ref',),), (('ref',),))),
+ (index, ('ref', ), 'refdata', ((), ()))]),
+ set(index.iter_entries([('name',), ('ref',)])))
+
+ def test_iteration_absent_skipped(self):
+ index = self.make_index(1, nodes=[
+ (('name', ), 'data', ([('ref', )], ))])
+ self.assertEqual(set([(index, ('name', ), 'data', ((('ref',),),))]),
+ set(index.iter_all_entries()))
+ self.assertEqual(set([(index, ('name', ), 'data', ((('ref',),),))]),
+ set(index.iter_entries([('name', )])))
+ self.assertEqual([], list(index.iter_entries([('ref', )])))
+
+ def test_iteration_absent_skipped_2_element_keys(self):
+ index = self.make_index(1, key_elements=2, nodes=[
+ (('name', 'fin'), 'data', ([('ref', 'erence')], ))])
+ self.assertEqual(set([(index, ('name', 'fin'), 'data', ((('ref', 'erence'),),))]),
+ set(index.iter_all_entries()))
+ self.assertEqual(set([(index, ('name', 'fin'), 'data', ((('ref', 'erence'),),))]),
+ set(index.iter_entries([('name', 'fin')])))
+ self.assertEqual([], list(index.iter_entries([('ref', 'erence')])))
+
+ def test_iter_all_keys(self):
+ index = self.make_index(1, nodes=[
+ (('name', ), 'data', ([('ref', )], )),
+ (('ref', ), 'refdata', ([], ))])
+ self.assertEqual(set([(index, ('name', ), 'data', ((('ref',),),)),
+ (index, ('ref', ), 'refdata', ((), ))]),
+ set(index.iter_entries([('name', ), ('ref', )])))
+
+ def test_iter_nothing_empty(self):
+ index = self.make_index()
+ self.assertEqual([], list(index.iter_entries([])))
+
+ def test_iter_missing_entry_empty(self):
+ index = self.make_index()
+ self.assertEqual([], list(index.iter_entries([('a', )])))
+
+ def test_iter_missing_entry_empty_no_size(self):
+ idx = self.make_index()
+ idx = index.GraphIndex(idx._transport, 'index', None)
+ self.assertEqual([], list(idx.iter_entries([('a', )])))
+
+ def test_iter_key_prefix_1_element_key_None(self):
+ index = self.make_index()
+ self.assertRaises(errors.BadIndexKey, list,
+ index.iter_entries_prefix([(None, )]))
+
+ def test_iter_key_prefix_wrong_length(self):
+ index = self.make_index()
+ self.assertRaises(errors.BadIndexKey, list,
+ index.iter_entries_prefix([('foo', None)]))
+ index = self.make_index(key_elements=2)
+ self.assertRaises(errors.BadIndexKey, list,
+ index.iter_entries_prefix([('foo', )]))
+ self.assertRaises(errors.BadIndexKey, list,
+ index.iter_entries_prefix([('foo', None, None)]))
+
+ def test_iter_key_prefix_1_key_element_no_refs(self):
+ index = self.make_index( nodes=[
+ (('name', ), 'data', ()),
+ (('ref', ), 'refdata', ())])
+ self.assertEqual(set([(index, ('name', ), 'data'),
+ (index, ('ref', ), 'refdata')]),
+ set(index.iter_entries_prefix([('name', ), ('ref', )])))
+
+ def test_iter_key_prefix_1_key_element_refs(self):
+ index = self.make_index(1, nodes=[
+ (('name', ), 'data', ([('ref', )], )),
+ (('ref', ), 'refdata', ([], ))])
+ self.assertEqual(set([(index, ('name', ), 'data', ((('ref',),),)),
+ (index, ('ref', ), 'refdata', ((), ))]),
+ set(index.iter_entries_prefix([('name', ), ('ref', )])))
+
+ def test_iter_key_prefix_2_key_element_no_refs(self):
+ index = self.make_index(key_elements=2, nodes=[
+ (('name', 'fin1'), 'data', ()),
+ (('name', 'fin2'), 'beta', ()),
+ (('ref', 'erence'), 'refdata', ())])
+ self.assertEqual(set([(index, ('name', 'fin1'), 'data'),
+ (index, ('ref', 'erence'), 'refdata')]),
+ set(index.iter_entries_prefix([('name', 'fin1'), ('ref', 'erence')])))
+ self.assertEqual(set([(index, ('name', 'fin1'), 'data'),
+ (index, ('name', 'fin2'), 'beta')]),
+ set(index.iter_entries_prefix([('name', None)])))
+
+ def test_iter_key_prefix_2_key_element_refs(self):
+ index = self.make_index(1, key_elements=2, nodes=[
+ (('name', 'fin1'), 'data', ([('ref', 'erence')], )),
+ (('name', 'fin2'), 'beta', ([], )),
+ (('ref', 'erence'), 'refdata', ([], ))])
+ self.assertEqual(set([(index, ('name', 'fin1'), 'data', ((('ref', 'erence'),),)),
+ (index, ('ref', 'erence'), 'refdata', ((), ))]),
+ set(index.iter_entries_prefix([('name', 'fin1'), ('ref', 'erence')])))
+ self.assertEqual(set([(index, ('name', 'fin1'), 'data', ((('ref', 'erence'),),)),
+ (index, ('name', 'fin2'), 'beta', ((), ))]),
+ set(index.iter_entries_prefix([('name', None)])))
+
+ def test_key_count_empty(self):
+ index = self.make_index()
+ self.assertEqual(0, index.key_count())
+
+ def test_key_count_one(self):
+ index = self.make_index(nodes=[(('name', ), '', ())])
+ self.assertEqual(1, index.key_count())
+
+ def test_key_count_two(self):
+ index = self.make_index(nodes=[
+ (('name', ), '', ()), (('foo', ), '', ())])
+ self.assertEqual(2, index.key_count())
+
+ def test_read_and_parse_tracks_real_read_value(self):
+ index = self.make_index(nodes=self.make_nodes(10))
+ del index._transport._activity[:]
+ index._read_and_parse([(0, 200)])
+ self.assertEqual([
+ ('readv', 'index', [(0, 200)], True, index._size),
+ ],
+ index._transport._activity)
+ # The readv expansion code will expand the initial request to 4096
+ # bytes, which is more than enough to read the entire index, and we
+ # will track the fact that we read that many bytes.
+ self.assertEqual(index._size, index._bytes_read)
+
+ def test_read_and_parse_triggers_buffer_all(self):
+ index = self.make_index(key_elements=2, nodes=[
+ (('name', 'fin1'), 'data', ()),
+ (('name', 'fin2'), 'beta', ()),
+ (('ref', 'erence'), 'refdata', ())])
+ self.assertTrue(index._size > 0)
+ self.assertIs(None, index._nodes)
+ index._read_and_parse([(0, index._size)])
+ self.assertIsNot(None, index._nodes)
+
+ def test_validate_bad_index_errors(self):
+ trans = self.get_transport()
+ trans.put_bytes('name', "not an index\n")
+ idx = index.GraphIndex(trans, 'name', 13)
+ self.assertRaises(errors.BadIndexFormatSignature, idx.validate)
+
+ def test_validate_bad_node_refs(self):
+ idx = self.make_index(2)
+ trans = self.get_transport()
+ content = trans.get_bytes('index')
+ # change the options line to end with a rather than a parseable number
+ new_content = content[:-2] + 'a\n\n'
+ trans.put_bytes('index', new_content)
+ self.assertRaises(errors.BadIndexOptions, idx.validate)
+
+ def test_validate_missing_end_line_empty(self):
+ index = self.make_index(2)
+ trans = self.get_transport()
+ content = trans.get_bytes('index')
+ # truncate the last byte
+ trans.put_bytes('index', content[:-1])
+ self.assertRaises(errors.BadIndexData, index.validate)
+
+ def test_validate_missing_end_line_nonempty(self):
+ index = self.make_index(2, nodes=[(('key', ), '', ([], []))])
+ trans = self.get_transport()
+ content = trans.get_bytes('index')
+ # truncate the last byte
+ trans.put_bytes('index', content[:-1])
+ self.assertRaises(errors.BadIndexData, index.validate)
+
+ def test_validate_empty(self):
+ index = self.make_index()
+ index.validate()
+
+ def test_validate_no_refs_content(self):
+ index = self.make_index(nodes=[(('key', ), 'value', ())])
+ index.validate()
+
+ # XXX: external_references tests are duplicated in test_btree_index. We
+ # probably should have per_graph_index tests...
+ def test_external_references_no_refs(self):
+ index = self.make_index(ref_lists=0, nodes=[])
+ self.assertRaises(ValueError, index.external_references, 0)
+
+ def test_external_references_no_results(self):
+ index = self.make_index(ref_lists=1, nodes=[
+ (('key',), 'value', ([],))])
+ self.assertEqual(set(), index.external_references(0))
+
+ def test_external_references_missing_ref(self):
+ missing_key = ('missing',)
+ index = self.make_index(ref_lists=1, nodes=[
+ (('key',), 'value', ([missing_key],))])
+ self.assertEqual(set([missing_key]), index.external_references(0))
+
+ def test_external_references_multiple_ref_lists(self):
+ missing_key = ('missing',)
+ index = self.make_index(ref_lists=2, nodes=[
+ (('key',), 'value', ([], [missing_key]))])
+ self.assertEqual(set([]), index.external_references(0))
+ self.assertEqual(set([missing_key]), index.external_references(1))
+
+ def test_external_references_two_records(self):
+ index = self.make_index(ref_lists=1, nodes=[
+ (('key-1',), 'value', ([('key-2',)],)),
+ (('key-2',), 'value', ([],)),
+ ])
+ self.assertEqual(set([]), index.external_references(0))
+
+ def test__find_ancestors(self):
+ key1 = ('key-1',)
+ key2 = ('key-2',)
+ index = self.make_index(ref_lists=1, key_elements=1, nodes=[
+ (key1, 'value', ([key2],)),
+ (key2, 'value', ([],)),
+ ])
+ parent_map = {}
+ missing_keys = set()
+ search_keys = index._find_ancestors([key1], 0, parent_map, missing_keys)
+ self.assertEqual({key1: (key2,)}, parent_map)
+ self.assertEqual(set(), missing_keys)
+ self.assertEqual(set([key2]), search_keys)
+ search_keys = index._find_ancestors(search_keys, 0, parent_map,
+ missing_keys)
+ self.assertEqual({key1: (key2,), key2: ()}, parent_map)
+ self.assertEqual(set(), missing_keys)
+ self.assertEqual(set(), search_keys)
+
+ def test__find_ancestors_w_missing(self):
+ key1 = ('key-1',)
+ key2 = ('key-2',)
+ key3 = ('key-3',)
+ index = self.make_index(ref_lists=1, key_elements=1, nodes=[
+ (key1, 'value', ([key2],)),
+ (key2, 'value', ([],)),
+ ])
+ parent_map = {}
+ missing_keys = set()
+ search_keys = index._find_ancestors([key2, key3], 0, parent_map,
+ missing_keys)
+ self.assertEqual({key2: ()}, parent_map)
+ self.assertEqual(set([key3]), missing_keys)
+ self.assertEqual(set(), search_keys)
+
+ def test__find_ancestors_dont_search_known(self):
+ key1 = ('key-1',)
+ key2 = ('key-2',)
+ key3 = ('key-3',)
+ index = self.make_index(ref_lists=1, key_elements=1, nodes=[
+ (key1, 'value', ([key2],)),
+ (key2, 'value', ([key3],)),
+ (key3, 'value', ([],)),
+ ])
+ # We already know about key2, so we won't try to search for key3
+ parent_map = {key2: (key3,)}
+ missing_keys = set()
+ search_keys = index._find_ancestors([key1], 0, parent_map,
+ missing_keys)
+ self.assertEqual({key1: (key2,), key2: (key3,)}, parent_map)
+ self.assertEqual(set(), missing_keys)
+ self.assertEqual(set(), search_keys)
+
+ def test_supports_unlimited_cache(self):
+ builder = index.GraphIndexBuilder(0, key_elements=1)
+ stream = builder.finish()
+ trans = self.get_transport()
+ size = trans.put_file('index', stream)
+ # It doesn't matter what unlimited_cache does here, just that it can be
+ # passed
+ idx = index.GraphIndex(trans, 'index', size, unlimited_cache=True)
+
+
+class TestCombinedGraphIndex(tests.TestCaseWithMemoryTransport):
+
+ def make_index(self, name, ref_lists=0, key_elements=1, nodes=[]):
+ builder = index.GraphIndexBuilder(ref_lists, key_elements=key_elements)
+ for key, value, references in nodes:
+ builder.add_node(key, value, references)
+ stream = builder.finish()
+ trans = self.get_transport()
+ size = trans.put_file(name, stream)
+ return index.GraphIndex(trans, name, size)
+
+ def make_combined_index_with_missing(self, missing=['1', '2']):
+ """Create a CombinedGraphIndex which will have missing indexes.
+
+ This creates a CGI which thinks it has 2 indexes, however they have
+ been deleted. If CGI._reload_func() is called, then it will repopulate
+ with a new index.
+
+ :param missing: The underlying indexes to delete
+ :return: (CombinedGraphIndex, reload_counter)
+ """
+ idx1 = self.make_index('1', nodes=[(('1',), '', ())])
+ idx2 = self.make_index('2', nodes=[(('2',), '', ())])
+ idx3 = self.make_index('3', nodes=[
+ (('1',), '', ()),
+ (('2',), '', ())])
+
+ # total_reloads, num_changed, num_unchanged
+ reload_counter = [0, 0, 0]
+ def reload():
+ reload_counter[0] += 1
+ new_indices = [idx3]
+ if idx._indices == new_indices:
+ reload_counter[2] += 1
+ return False
+ reload_counter[1] += 1
+ idx._indices[:] = new_indices
+ return True
+ idx = index.CombinedGraphIndex([idx1, idx2], reload_func=reload)
+ trans = self.get_transport()
+ for fname in missing:
+ trans.delete(fname)
+ return idx, reload_counter
+
+ def test_open_missing_index_no_error(self):
+ trans = self.get_transport()
+ idx1 = index.GraphIndex(trans, 'missing', 100)
+ idx = index.CombinedGraphIndex([idx1])
+
+ def test_add_index(self):
+ idx = index.CombinedGraphIndex([])
+ idx1 = self.make_index('name', 0, nodes=[(('key', ), '', ())])
+ idx.insert_index(0, idx1)
+ self.assertEqual([(idx1, ('key', ), '')],
+ list(idx.iter_all_entries()))
+
+ def test_clear_cache(self):
+ log = []
+
+ class ClearCacheProxy(object):
+
+ def __init__(self, index):
+ self._index = index
+
+ def __getattr__(self, name):
+ return getattr(self._index)
+
+ def clear_cache(self):
+ log.append(self._index)
+ return self._index.clear_cache()
+
+ idx = index.CombinedGraphIndex([])
+ idx1 = self.make_index('name', 0, nodes=[(('key', ), '', ())])
+ idx.insert_index(0, ClearCacheProxy(idx1))
+ idx2 = self.make_index('name', 0, nodes=[(('key', ), '', ())])
+ idx.insert_index(1, ClearCacheProxy(idx2))
+ # CombinedGraphIndex should call 'clear_cache()' on all children
+ idx.clear_cache()
+ self.assertEqual(sorted([idx1, idx2]), sorted(log))
+
+ def test_iter_all_entries_empty(self):
+ idx = index.CombinedGraphIndex([])
+ self.assertEqual([], list(idx.iter_all_entries()))
+
+ def test_iter_all_entries_children_empty(self):
+ idx1 = self.make_index('name')
+ idx = index.CombinedGraphIndex([idx1])
+ self.assertEqual([], list(idx.iter_all_entries()))
+
+ def test_iter_all_entries_simple(self):
+ idx1 = self.make_index('name', nodes=[(('name', ), 'data', ())])
+ idx = index.CombinedGraphIndex([idx1])
+ self.assertEqual([(idx1, ('name', ), 'data')],
+ list(idx.iter_all_entries()))
+
+ def test_iter_all_entries_two_indices(self):
+ idx1 = self.make_index('name1', nodes=[(('name', ), 'data', ())])
+ idx2 = self.make_index('name2', nodes=[(('2', ), '', ())])
+ idx = index.CombinedGraphIndex([idx1, idx2])
+ self.assertEqual([(idx1, ('name', ), 'data'),
+ (idx2, ('2', ), '')],
+ list(idx.iter_all_entries()))
+
+ def test_iter_entries_two_indices_dup_key(self):
+ idx1 = self.make_index('name1', nodes=[(('name', ), 'data', ())])
+ idx2 = self.make_index('name2', nodes=[(('name', ), 'data', ())])
+ idx = index.CombinedGraphIndex([idx1, idx2])
+ self.assertEqual([(idx1, ('name', ), 'data')],
+ list(idx.iter_entries([('name', )])))
+
+ def test_iter_all_entries_two_indices_dup_key(self):
+ idx1 = self.make_index('name1', nodes=[(('name', ), 'data', ())])
+ idx2 = self.make_index('name2', nodes=[(('name', ), 'data', ())])
+ idx = index.CombinedGraphIndex([idx1, idx2])
+ self.assertEqual([(idx1, ('name', ), 'data')],
+ list(idx.iter_all_entries()))
+
+ def test_iter_key_prefix_2_key_element_refs(self):
+ idx1 = self.make_index('1', 1, key_elements=2, nodes=[
+ (('name', 'fin1'), 'data', ([('ref', 'erence')], ))])
+ idx2 = self.make_index('2', 1, key_elements=2, nodes=[
+ (('name', 'fin2'), 'beta', ([], )),
+ (('ref', 'erence'), 'refdata', ([], ))])
+ idx = index.CombinedGraphIndex([idx1, idx2])
+ self.assertEqual(set([(idx1, ('name', 'fin1'), 'data',
+ ((('ref', 'erence'),),)),
+ (idx2, ('ref', 'erence'), 'refdata', ((), ))]),
+ set(idx.iter_entries_prefix([('name', 'fin1'),
+ ('ref', 'erence')])))
+ self.assertEqual(set([(idx1, ('name', 'fin1'), 'data',
+ ((('ref', 'erence'),),)),
+ (idx2, ('name', 'fin2'), 'beta', ((), ))]),
+ set(idx.iter_entries_prefix([('name', None)])))
+
+ def test_iter_nothing_empty(self):
+ idx = index.CombinedGraphIndex([])
+ self.assertEqual([], list(idx.iter_entries([])))
+
+ def test_iter_nothing_children_empty(self):
+ idx1 = self.make_index('name')
+ idx = index.CombinedGraphIndex([idx1])
+ self.assertEqual([], list(idx.iter_entries([])))
+
+ def test_iter_all_keys(self):
+ idx1 = self.make_index('1', 1, nodes=[(('name', ), 'data',
+ ([('ref', )], ))])
+ idx2 = self.make_index('2', 1, nodes=[(('ref', ), 'refdata', ((), ))])
+ idx = index.CombinedGraphIndex([idx1, idx2])
+ self.assertEqual(set([(idx1, ('name', ), 'data', ((('ref', ), ), )),
+ (idx2, ('ref', ), 'refdata', ((), ))]),
+ set(idx.iter_entries([('name', ), ('ref', )])))
+
+ def test_iter_all_keys_dup_entry(self):
+ idx1 = self.make_index('1', 1, nodes=[(('name', ), 'data',
+ ([('ref', )], )),
+ (('ref', ), 'refdata', ([], ))])
+ idx2 = self.make_index('2', 1, nodes=[(('ref', ), 'refdata', ([], ))])
+ idx = index.CombinedGraphIndex([idx1, idx2])
+ self.assertEqual(set([(idx1, ('name', ), 'data', ((('ref',),),)),
+ (idx1, ('ref', ), 'refdata', ((), ))]),
+ set(idx.iter_entries([('name', ), ('ref', )])))
+
+ def test_iter_missing_entry_empty(self):
+ idx = index.CombinedGraphIndex([])
+ self.assertEqual([], list(idx.iter_entries([('a', )])))
+
+ def test_iter_missing_entry_one_index(self):
+ idx1 = self.make_index('1')
+ idx = index.CombinedGraphIndex([idx1])
+ self.assertEqual([], list(idx.iter_entries([('a', )])))
+
+ def test_iter_missing_entry_two_index(self):
+ idx1 = self.make_index('1')
+ idx2 = self.make_index('2')
+ idx = index.CombinedGraphIndex([idx1, idx2])
+ self.assertEqual([], list(idx.iter_entries([('a', )])))
+
+ def test_iter_entry_present_one_index_only(self):
+ idx1 = self.make_index('1', nodes=[(('key', ), '', ())])
+ idx2 = self.make_index('2', nodes=[])
+ idx = index.CombinedGraphIndex([idx1, idx2])
+ self.assertEqual([(idx1, ('key', ), '')],
+ list(idx.iter_entries([('key', )])))
+ # and in the other direction
+ idx = index.CombinedGraphIndex([idx2, idx1])
+ self.assertEqual([(idx1, ('key', ), '')],
+ list(idx.iter_entries([('key', )])))
+
+ def test_key_count_empty(self):
+ idx1 = self.make_index('1', nodes=[])
+ idx2 = self.make_index('2', nodes=[])
+ idx = index.CombinedGraphIndex([idx1, idx2])
+ self.assertEqual(0, idx.key_count())
+
+ def test_key_count_sums_index_keys(self):
+ idx1 = self.make_index('1', nodes=[
+ (('1',), '', ()),
+ (('2',), '', ())])
+ idx2 = self.make_index('2', nodes=[(('1',), '', ())])
+ idx = index.CombinedGraphIndex([idx1, idx2])
+ self.assertEqual(3, idx.key_count())
+
+ def test_validate_bad_child_index_errors(self):
+ trans = self.get_transport()
+ trans.put_bytes('name', "not an index\n")
+ idx1 = index.GraphIndex(trans, 'name', 13)
+ idx = index.CombinedGraphIndex([idx1])
+ self.assertRaises(errors.BadIndexFormatSignature, idx.validate)
+
+ def test_validate_empty(self):
+ idx = index.CombinedGraphIndex([])
+ idx.validate()
+
+ def test_key_count_reloads(self):
+ idx, reload_counter = self.make_combined_index_with_missing()
+ self.assertEqual(2, idx.key_count())
+ self.assertEqual([1, 1, 0], reload_counter)
+
+ def test_key_count_no_reload(self):
+ idx, reload_counter = self.make_combined_index_with_missing()
+ idx._reload_func = None
+ # Without a _reload_func we just raise the exception
+ self.assertRaises(errors.NoSuchFile, idx.key_count)
+
+ def test_key_count_reloads_and_fails(self):
+ # We have deleted all underlying indexes, so we will try to reload, but
+ # still fail. This is mostly to test we don't get stuck in an infinite
+ # loop trying to reload
+ idx, reload_counter = self.make_combined_index_with_missing(
+ ['1', '2', '3'])
+ self.assertRaises(errors.NoSuchFile, idx.key_count)
+ self.assertEqual([2, 1, 1], reload_counter)
+
+ def test_iter_entries_reloads(self):
+ index, reload_counter = self.make_combined_index_with_missing()
+ result = list(index.iter_entries([('1',), ('2',), ('3',)]))
+ index3 = index._indices[0]
+ self.assertEqual([(index3, ('1',), ''), (index3, ('2',), '')],
+ result)
+ self.assertEqual([1, 1, 0], reload_counter)
+
+ def test_iter_entries_reloads_midway(self):
+ # The first index still looks present, so we get interrupted mid-way
+ # through
+ index, reload_counter = self.make_combined_index_with_missing(['2'])
+ index1, index2 = index._indices
+ result = list(index.iter_entries([('1',), ('2',), ('3',)]))
+ index3 = index._indices[0]
+ # We had already yielded '1', so we just go on to the next, we should
+ # not yield '1' twice.
+ self.assertEqual([(index1, ('1',), ''), (index3, ('2',), '')],
+ result)
+ self.assertEqual([1, 1, 0], reload_counter)
+
+ def test_iter_entries_no_reload(self):
+ index, reload_counter = self.make_combined_index_with_missing()
+ index._reload_func = None
+ # Without a _reload_func we just raise the exception
+ self.assertListRaises(errors.NoSuchFile, index.iter_entries, [('3',)])
+
+ def test_iter_entries_reloads_and_fails(self):
+ index, reload_counter = self.make_combined_index_with_missing(
+ ['1', '2', '3'])
+ self.assertListRaises(errors.NoSuchFile, index.iter_entries, [('3',)])
+ self.assertEqual([2, 1, 1], reload_counter)
+
+ def test_iter_all_entries_reloads(self):
+ index, reload_counter = self.make_combined_index_with_missing()
+ result = list(index.iter_all_entries())
+ index3 = index._indices[0]
+ self.assertEqual([(index3, ('1',), ''), (index3, ('2',), '')],
+ result)
+ self.assertEqual([1, 1, 0], reload_counter)
+
+ def test_iter_all_entries_reloads_midway(self):
+ index, reload_counter = self.make_combined_index_with_missing(['2'])
+ index1, index2 = index._indices
+ result = list(index.iter_all_entries())
+ index3 = index._indices[0]
+ # We had already yielded '1', so we just go on to the next, we should
+ # not yield '1' twice.
+ self.assertEqual([(index1, ('1',), ''), (index3, ('2',), '')],
+ result)
+ self.assertEqual([1, 1, 0], reload_counter)
+
+ def test_iter_all_entries_no_reload(self):
+ index, reload_counter = self.make_combined_index_with_missing()
+ index._reload_func = None
+ self.assertListRaises(errors.NoSuchFile, index.iter_all_entries)
+
+ def test_iter_all_entries_reloads_and_fails(self):
+ index, reload_counter = self.make_combined_index_with_missing(
+ ['1', '2', '3'])
+ self.assertListRaises(errors.NoSuchFile, index.iter_all_entries)
+
+ def test_iter_entries_prefix_reloads(self):
+ index, reload_counter = self.make_combined_index_with_missing()
+ result = list(index.iter_entries_prefix([('1',)]))
+ index3 = index._indices[0]
+ self.assertEqual([(index3, ('1',), '')], result)
+ self.assertEqual([1, 1, 0], reload_counter)
+
+ def test_iter_entries_prefix_reloads_midway(self):
+ index, reload_counter = self.make_combined_index_with_missing(['2'])
+ index1, index2 = index._indices
+ result = list(index.iter_entries_prefix([('1',)]))
+ index3 = index._indices[0]
+ # We had already yielded '1', so we just go on to the next, we should
+ # not yield '1' twice.
+ self.assertEqual([(index1, ('1',), '')], result)
+ self.assertEqual([1, 1, 0], reload_counter)
+
+ def test_iter_entries_prefix_no_reload(self):
+ index, reload_counter = self.make_combined_index_with_missing()
+ index._reload_func = None
+ self.assertListRaises(errors.NoSuchFile, index.iter_entries_prefix,
+ [('1',)])
+
+ def test_iter_entries_prefix_reloads_and_fails(self):
+ index, reload_counter = self.make_combined_index_with_missing(
+ ['1', '2', '3'])
+ self.assertListRaises(errors.NoSuchFile, index.iter_entries_prefix,
+ [('1',)])
+
+
+ def make_index_with_simple_nodes(self, name, num_nodes=1):
+ """Make an index named after 'name', with keys named after 'name' too.
+
+ Nodes will have a value of '' and no references.
+ """
+ nodes = [
+ (('index-%s-key-%s' % (name, n),), '', ())
+ for n in range(1, num_nodes+1)]
+ return self.make_index('index-%s' % name, 0, nodes=nodes)
+
+ def test_reorder_after_iter_entries(self):
+ # Four indices: [key1] in idx1, [key2,key3] in idx2, [] in idx3,
+ # [key4] in idx4.
+ idx = index.CombinedGraphIndex([])
+ idx.insert_index(0, self.make_index_with_simple_nodes('1'), '1')
+ idx.insert_index(1, self.make_index_with_simple_nodes('2'), '2')
+ idx.insert_index(2, self.make_index_with_simple_nodes('3'), '3')
+ idx.insert_index(3, self.make_index_with_simple_nodes('4'), '4')
+ idx1, idx2, idx3, idx4 = idx._indices
+ # Query a key from idx4 and idx2.
+ self.assertLength(2, list(idx.iter_entries(
+ [('index-4-key-1',), ('index-2-key-1',)])))
+ # Now idx2 and idx4 should be moved to the front (and idx1 should
+ # still be before idx3).
+ self.assertEqual([idx2, idx4, idx1, idx3], idx._indices)
+ self.assertEqual(['2', '4', '1', '3'], idx._index_names)
+
+ def test_reorder_propagates_to_siblings(self):
+ # Two CombinedGraphIndex objects, with the same number of indicies with
+ # matching names.
+ cgi1 = index.CombinedGraphIndex([])
+ cgi2 = index.CombinedGraphIndex([])
+ cgi1.insert_index(0, self.make_index_with_simple_nodes('1-1'), 'one')
+ cgi1.insert_index(1, self.make_index_with_simple_nodes('1-2'), 'two')
+ cgi2.insert_index(0, self.make_index_with_simple_nodes('2-1'), 'one')
+ cgi2.insert_index(1, self.make_index_with_simple_nodes('2-2'), 'two')
+ index2_1, index2_2 = cgi2._indices
+ cgi1.set_sibling_indices([cgi2])
+ # Trigger a reordering in cgi1. cgi2 will be reordered as well.
+ list(cgi1.iter_entries([('index-1-2-key-1',)]))
+ self.assertEqual([index2_2, index2_1], cgi2._indices)
+ self.assertEqual(['two', 'one'], cgi2._index_names)
+
+ def test_validate_reloads(self):
+ idx, reload_counter = self.make_combined_index_with_missing()
+ idx.validate()
+ self.assertEqual([1, 1, 0], reload_counter)
+
+ def test_validate_reloads_midway(self):
+ idx, reload_counter = self.make_combined_index_with_missing(['2'])
+ idx.validate()
+
+ def test_validate_no_reload(self):
+ idx, reload_counter = self.make_combined_index_with_missing()
+ idx._reload_func = None
+ self.assertRaises(errors.NoSuchFile, idx.validate)
+
+ def test_validate_reloads_and_fails(self):
+ idx, reload_counter = self.make_combined_index_with_missing(
+ ['1', '2', '3'])
+ self.assertRaises(errors.NoSuchFile, idx.validate)
+
+ def test_find_ancestors_across_indexes(self):
+ key1 = ('key-1',)
+ key2 = ('key-2',)
+ key3 = ('key-3',)
+ key4 = ('key-4',)
+ index1 = self.make_index('12', ref_lists=1, nodes=[
+ (key1, 'value', ([],)),
+ (key2, 'value', ([key1],)),
+ ])
+ index2 = self.make_index('34', ref_lists=1, nodes=[
+ (key3, 'value', ([key2],)),
+ (key4, 'value', ([key3],)),
+ ])
+ c_index = index.CombinedGraphIndex([index1, index2])
+ parent_map, missing_keys = c_index.find_ancestry([key1], 0)
+ self.assertEqual({key1: ()}, parent_map)
+ self.assertEqual(set(), missing_keys)
+ # Now look for a key from index2 which requires us to find the key in
+ # the second index, and then continue searching for parents in the
+ # first index
+ parent_map, missing_keys = c_index.find_ancestry([key3], 0)
+ self.assertEqual({key1: (), key2: (key1,), key3: (key2,)}, parent_map)
+ self.assertEqual(set(), missing_keys)
+
+ def test_find_ancestors_missing_keys(self):
+ key1 = ('key-1',)
+ key2 = ('key-2',)
+ key3 = ('key-3',)
+ key4 = ('key-4',)
+ index1 = self.make_index('12', ref_lists=1, nodes=[
+ (key1, 'value', ([],)),
+ (key2, 'value', ([key1],)),
+ ])
+ index2 = self.make_index('34', ref_lists=1, nodes=[
+ (key3, 'value', ([key2],)),
+ ])
+ c_index = index.CombinedGraphIndex([index1, index2])
+ # Searching for a key which is actually not present at all should
+ # eventually converge
+ parent_map, missing_keys = c_index.find_ancestry([key4], 0)
+ self.assertEqual({}, parent_map)
+ self.assertEqual(set([key4]), missing_keys)
+
+ def test_find_ancestors_no_indexes(self):
+ c_index = index.CombinedGraphIndex([])
+ key1 = ('key-1',)
+ parent_map, missing_keys = c_index.find_ancestry([key1], 0)
+ self.assertEqual({}, parent_map)
+ self.assertEqual(set([key1]), missing_keys)
+
+ def test_find_ancestors_ghost_parent(self):
+ key1 = ('key-1',)
+ key2 = ('key-2',)
+ key3 = ('key-3',)
+ key4 = ('key-4',)
+ index1 = self.make_index('12', ref_lists=1, nodes=[
+ (key1, 'value', ([],)),
+ (key2, 'value', ([key1],)),
+ ])
+ index2 = self.make_index('34', ref_lists=1, nodes=[
+ (key4, 'value', ([key2, key3],)),
+ ])
+ c_index = index.CombinedGraphIndex([index1, index2])
+ # Searching for a key which is actually not present at all should
+ # eventually converge
+ parent_map, missing_keys = c_index.find_ancestry([key4], 0)
+ self.assertEqual({key4: (key2, key3), key2: (key1,), key1: ()},
+ parent_map)
+ self.assertEqual(set([key3]), missing_keys)
+
+ def test__find_ancestors_empty_index(self):
+ idx = self.make_index('test', ref_lists=1, key_elements=1, nodes=[])
+ parent_map = {}
+ missing_keys = set()
+ search_keys = idx._find_ancestors([('one',), ('two',)], 0, parent_map,
+ missing_keys)
+ self.assertEqual(set(), search_keys)
+ self.assertEqual({}, parent_map)
+ self.assertEqual(set([('one',), ('two',)]), missing_keys)
+
+
+class TestInMemoryGraphIndex(tests.TestCaseWithMemoryTransport):
+
+ def make_index(self, ref_lists=0, key_elements=1, nodes=[]):
+ result = index.InMemoryGraphIndex(ref_lists, key_elements=key_elements)
+ result.add_nodes(nodes)
+ return result
+
+ def test_add_nodes_no_refs(self):
+ index = self.make_index(0)
+ index.add_nodes([(('name', ), 'data')])
+ index.add_nodes([(('name2', ), ''), (('name3', ), '')])
+ self.assertEqual(set([
+ (index, ('name', ), 'data'),
+ (index, ('name2', ), ''),
+ (index, ('name3', ), ''),
+ ]), set(index.iter_all_entries()))
+
+ def test_add_nodes(self):
+ index = self.make_index(1)
+ index.add_nodes([(('name', ), 'data', ([],))])
+ index.add_nodes([(('name2', ), '', ([],)), (('name3', ), '', ([('r', )],))])
+ self.assertEqual(set([
+ (index, ('name', ), 'data', ((),)),
+ (index, ('name2', ), '', ((),)),
+ (index, ('name3', ), '', ((('r', ), ), )),
+ ]), set(index.iter_all_entries()))
+
+ def test_iter_all_entries_empty(self):
+ index = self.make_index()
+ self.assertEqual([], list(index.iter_all_entries()))
+
+ def test_iter_all_entries_simple(self):
+ index = self.make_index(nodes=[(('name', ), 'data')])
+ self.assertEqual([(index, ('name', ), 'data')],
+ list(index.iter_all_entries()))
+
+ def test_iter_all_entries_references(self):
+ index = self.make_index(1, nodes=[
+ (('name', ), 'data', ([('ref', )], )),
+ (('ref', ), 'refdata', ([], ))])
+ self.assertEqual(set([(index, ('name', ), 'data', ((('ref', ),),)),
+ (index, ('ref', ), 'refdata', ((), ))]),
+ set(index.iter_all_entries()))
+
+ def test_iteration_absent_skipped(self):
+ index = self.make_index(1, nodes=[
+ (('name', ), 'data', ([('ref', )], ))])
+ self.assertEqual(set([(index, ('name', ), 'data', ((('ref',),),))]),
+ set(index.iter_all_entries()))
+ self.assertEqual(set([(index, ('name', ), 'data', ((('ref',),),))]),
+ set(index.iter_entries([('name', )])))
+ self.assertEqual([], list(index.iter_entries([('ref', )])))
+
+ def test_iter_all_keys(self):
+ index = self.make_index(1, nodes=[
+ (('name', ), 'data', ([('ref', )], )),
+ (('ref', ), 'refdata', ([], ))])
+ self.assertEqual(set([(index, ('name', ), 'data', ((('ref',),),)),
+ (index, ('ref', ), 'refdata', ((), ))]),
+ set(index.iter_entries([('name', ), ('ref', )])))
+
+ def test_iter_key_prefix_1_key_element_no_refs(self):
+ index = self.make_index( nodes=[
+ (('name', ), 'data'),
+ (('ref', ), 'refdata')])
+ self.assertEqual(set([(index, ('name', ), 'data'),
+ (index, ('ref', ), 'refdata')]),
+ set(index.iter_entries_prefix([('name', ), ('ref', )])))
+
+ def test_iter_key_prefix_1_key_element_refs(self):
+ index = self.make_index(1, nodes=[
+ (('name', ), 'data', ([('ref', )], )),
+ (('ref', ), 'refdata', ([], ))])
+ self.assertEqual(set([(index, ('name', ), 'data', ((('ref',),),)),
+ (index, ('ref', ), 'refdata', ((), ))]),
+ set(index.iter_entries_prefix([('name', ), ('ref', )])))
+
+ def test_iter_key_prefix_2_key_element_no_refs(self):
+ index = self.make_index(key_elements=2, nodes=[
+ (('name', 'fin1'), 'data'),
+ (('name', 'fin2'), 'beta'),
+ (('ref', 'erence'), 'refdata')])
+ self.assertEqual(set([(index, ('name', 'fin1'), 'data'),
+ (index, ('ref', 'erence'), 'refdata')]),
+ set(index.iter_entries_prefix([('name', 'fin1'), ('ref', 'erence')])))
+ self.assertEqual(set([(index, ('name', 'fin1'), 'data'),
+ (index, ('name', 'fin2'), 'beta')]),
+ set(index.iter_entries_prefix([('name', None)])))
+
+ def test_iter_key_prefix_2_key_element_refs(self):
+ index = self.make_index(1, key_elements=2, nodes=[
+ (('name', 'fin1'), 'data', ([('ref', 'erence')], )),
+ (('name', 'fin2'), 'beta', ([], )),
+ (('ref', 'erence'), 'refdata', ([], ))])
+ self.assertEqual(set([(index, ('name', 'fin1'), 'data', ((('ref', 'erence'),),)),
+ (index, ('ref', 'erence'), 'refdata', ((), ))]),
+ set(index.iter_entries_prefix([('name', 'fin1'), ('ref', 'erence')])))
+ self.assertEqual(set([(index, ('name', 'fin1'), 'data', ((('ref', 'erence'),),)),
+ (index, ('name', 'fin2'), 'beta', ((), ))]),
+ set(index.iter_entries_prefix([('name', None)])))
+
+ def test_iter_nothing_empty(self):
+ index = self.make_index()
+ self.assertEqual([], list(index.iter_entries([])))
+
+ def test_iter_missing_entry_empty(self):
+ index = self.make_index()
+ self.assertEqual([], list(index.iter_entries(['a'])))
+
+ def test_key_count_empty(self):
+ index = self.make_index()
+ self.assertEqual(0, index.key_count())
+
+ def test_key_count_one(self):
+ index = self.make_index(nodes=[(('name', ), '')])
+ self.assertEqual(1, index.key_count())
+
+ def test_key_count_two(self):
+ index = self.make_index(nodes=[(('name', ), ''), (('foo', ), '')])
+ self.assertEqual(2, index.key_count())
+
+ def test_validate_empty(self):
+ index = self.make_index()
+ index.validate()
+
+ def test_validate_no_refs_content(self):
+ index = self.make_index(nodes=[(('key', ), 'value')])
+ index.validate()
+
+
+class TestGraphIndexPrefixAdapter(tests.TestCaseWithMemoryTransport):
+
+ def make_index(self, ref_lists=1, key_elements=2, nodes=[],
+ add_callback=False):
+ result = index.InMemoryGraphIndex(ref_lists, key_elements=key_elements)
+ result.add_nodes(nodes)
+ if add_callback:
+ add_nodes_callback = result.add_nodes
+ else:
+ add_nodes_callback = None
+ adapter = index.GraphIndexPrefixAdapter(
+ result, ('prefix', ), key_elements - 1,
+ add_nodes_callback=add_nodes_callback)
+ return result, adapter
+
+ def test_add_node(self):
+ index, adapter = self.make_index(add_callback=True)
+ adapter.add_node(('key',), 'value', ((('ref',),),))
+ self.assertEqual(set([(index, ('prefix', 'key'), 'value',
+ ((('prefix', 'ref'),),))]),
+ set(index.iter_all_entries()))
+
+ def test_add_nodes(self):
+ index, adapter = self.make_index(add_callback=True)
+ adapter.add_nodes((
+ (('key',), 'value', ((('ref',),),)),
+ (('key2',), 'value2', ((),)),
+ ))
+ self.assertEqual(set([
+ (index, ('prefix', 'key2'), 'value2', ((),)),
+ (index, ('prefix', 'key'), 'value', ((('prefix', 'ref'),),))
+ ]),
+ set(index.iter_all_entries()))
+
+ def test_construct(self):
+ idx = index.InMemoryGraphIndex()
+ adapter = index.GraphIndexPrefixAdapter(idx, ('prefix', ), 1)
+
+ def test_construct_with_callback(self):
+ idx = index.InMemoryGraphIndex()
+ adapter = index.GraphIndexPrefixAdapter(idx, ('prefix', ), 1,
+ idx.add_nodes)
+
+ def test_iter_all_entries_cross_prefix_map_errors(self):
+ index, adapter = self.make_index(nodes=[
+ (('prefix', 'key1'), 'data1', ((('prefixaltered', 'key2'),),))])
+ self.assertRaises(errors.BadIndexData, list, adapter.iter_all_entries())
+
+ def test_iter_all_entries(self):
+ index, adapter = self.make_index(nodes=[
+ (('notprefix', 'key1'), 'data', ((), )),
+ (('prefix', 'key1'), 'data1', ((), )),
+ (('prefix', 'key2'), 'data2', ((('prefix', 'key1'),),))])
+ self.assertEqual(set([(index, ('key1', ), 'data1', ((),)),
+ (index, ('key2', ), 'data2', ((('key1',),),))]),
+ set(adapter.iter_all_entries()))
+
+ def test_iter_entries(self):
+ index, adapter = self.make_index(nodes=[
+ (('notprefix', 'key1'), 'data', ((), )),
+ (('prefix', 'key1'), 'data1', ((), )),
+ (('prefix', 'key2'), 'data2', ((('prefix', 'key1'),),))])
+ # ask for many - get all
+ self.assertEqual(set([(index, ('key1', ), 'data1', ((),)),
+ (index, ('key2', ), 'data2', ((('key1', ),),))]),
+ set(adapter.iter_entries([('key1', ), ('key2', )])))
+ # ask for one, get one
+ self.assertEqual(set([(index, ('key1', ), 'data1', ((),))]),
+ set(adapter.iter_entries([('key1', )])))
+ # ask for missing, get none
+ self.assertEqual(set(),
+ set(adapter.iter_entries([('key3', )])))
+
+ def test_iter_entries_prefix(self):
+ index, adapter = self.make_index(key_elements=3, nodes=[
+ (('notprefix', 'foo', 'key1'), 'data', ((), )),
+ (('prefix', 'prefix2', 'key1'), 'data1', ((), )),
+ (('prefix', 'prefix2', 'key2'), 'data2', ((('prefix', 'prefix2', 'key1'),),))])
+ # ask for a prefix, get the results for just that prefix, adjusted.
+ self.assertEqual(set([(index, ('prefix2', 'key1', ), 'data1', ((),)),
+ (index, ('prefix2', 'key2', ), 'data2', ((('prefix2', 'key1', ),),))]),
+ set(adapter.iter_entries_prefix([('prefix2', None)])))
+
+ def test_key_count_no_matching_keys(self):
+ index, adapter = self.make_index(nodes=[
+ (('notprefix', 'key1'), 'data', ((), ))])
+ self.assertEqual(0, adapter.key_count())
+
+ def test_key_count_some_keys(self):
+ index, adapter = self.make_index(nodes=[
+ (('notprefix', 'key1'), 'data', ((), )),
+ (('prefix', 'key1'), 'data1', ((), )),
+ (('prefix', 'key2'), 'data2', ((('prefix', 'key1'),),))])
+ self.assertEqual(2, adapter.key_count())
+
+ def test_validate(self):
+ index, adapter = self.make_index()
+ calls = []
+ def validate():
+ calls.append('called')
+ index.validate = validate
+ adapter.validate()
+ self.assertEqual(['called'], calls)
diff --git a/bzrlib/tests/test_info.py b/bzrlib/tests/test_info.py
new file mode 100644
index 0000000..cc664a5
--- /dev/null
+++ b/bzrlib/tests/test_info.py
@@ -0,0 +1,336 @@
+# Copyright (C) 2007-2012 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+import sys
+
+from bzrlib import (
+ branch as _mod_branch,
+ controldir,
+ info,
+ tests,
+ workingtree,
+ repository as _mod_repository,
+ )
+
+
+class TestInfo(tests.TestCaseWithTransport):
+
+ def test_describe_standalone_layout(self):
+ tree = self.make_branch_and_tree('tree')
+ self.assertEqual('Empty control directory', info.describe_layout())
+ self.assertEqual(
+ 'Unshared repository with trees and colocated branches',
+ info.describe_layout(tree.branch.repository, control=tree.bzrdir))
+ tree.branch.repository.set_make_working_trees(False)
+ self.assertEqual('Unshared repository with colocated branches',
+ info.describe_layout(tree.branch.repository, control=tree.bzrdir))
+ self.assertEqual('Standalone branch',
+ info.describe_layout(tree.branch.repository, tree.branch,
+ control=tree.bzrdir))
+ self.assertEqual('Standalone branchless tree',
+ info.describe_layout(tree.branch.repository, None, tree,
+ control=tree.bzrdir))
+ self.assertEqual('Standalone tree',
+ info.describe_layout(tree.branch.repository, tree.branch, tree,
+ control=tree.bzrdir))
+ tree.branch.bind(tree.branch)
+ self.assertEqual('Bound branch',
+ info.describe_layout(tree.branch.repository, tree.branch,
+ control=tree.bzrdir))
+ self.assertEqual('Checkout',
+ info.describe_layout(tree.branch.repository, tree.branch, tree,
+ control=tree.bzrdir))
+ checkout = tree.branch.create_checkout('checkout', lightweight=True)
+ self.assertEqual('Lightweight checkout',
+ info.describe_layout(checkout.branch.repository, checkout.branch,
+ checkout, control=tree.bzrdir))
+
+ def test_describe_repository_layout(self):
+ repository = self.make_repository('.', shared=True)
+ tree = controldir.ControlDir.create_branch_convenience('tree',
+ force_new_tree=True).bzrdir.open_workingtree()
+ self.assertEqual('Shared repository with trees and colocated branches',
+ info.describe_layout(tree.branch.repository, control=tree.bzrdir))
+ repository.set_make_working_trees(False)
+ self.assertEqual('Shared repository with colocated branches',
+ info.describe_layout(tree.branch.repository, control=tree.bzrdir))
+ self.assertEqual('Repository branch',
+ info.describe_layout(tree.branch.repository, tree.branch,
+ control=tree.bzrdir))
+ self.assertEqual('Repository branchless tree',
+ info.describe_layout(tree.branch.repository, None, tree,
+ control=tree.bzrdir))
+ self.assertEqual('Repository tree',
+ info.describe_layout(tree.branch.repository, tree.branch, tree,
+ control=tree.bzrdir))
+ tree.branch.bind(tree.branch)
+ self.assertEqual('Repository checkout',
+ info.describe_layout(tree.branch.repository, tree.branch, tree,
+ control=tree.bzrdir))
+ checkout = tree.branch.create_checkout('checkout', lightweight=True)
+ self.assertEqual('Lightweight checkout',
+ info.describe_layout(checkout.branch.repository, checkout.branch,
+ checkout, control=tree.bzrdir))
+
+ def assertTreeDescription(self, format):
+ """Assert a tree's format description matches expectations"""
+ self.make_branch_and_tree('%s_tree' % format, format=format)
+ tree = workingtree.WorkingTree.open('%s_tree' % format)
+ self.assertEqual(format, info.describe_format(tree.bzrdir,
+ tree.branch.repository, tree.branch, tree))
+
+ def assertCheckoutDescription(self, format, expected=None):
+ """Assert a checkout's format description matches expectations"""
+ if expected is None:
+ expected = format
+ branch = self.make_branch('%s_cobranch' % format, format=format)
+ # this ought to be easier...
+ branch.create_checkout('%s_co' % format,
+ lightweight=True).bzrdir.destroy_workingtree()
+ control = controldir.ControlDir.open('%s_co' % format)
+ old_format = control._format.workingtree_format
+ try:
+ control._format.workingtree_format = \
+ controldir.format_registry.make_bzrdir(format).workingtree_format
+ control.create_workingtree()
+ tree = workingtree.WorkingTree.open('%s_co' % format)
+ format_description = info.describe_format(tree.bzrdir,
+ tree.branch.repository, tree.branch, tree)
+ self.assertEqual(expected, format_description,
+ "checkout of format called %r was described as %r" %
+ (expected, format_description))
+ finally:
+ control._format.workingtree_format = old_format
+
+ def assertBranchDescription(self, format, expected=None):
+ """Assert branch's format description matches expectations"""
+ if expected is None:
+ expected = format
+ self.make_branch('%s_branch' % format, format=format)
+ branch = _mod_branch.Branch.open('%s_branch' % format)
+ self.assertEqual(expected, info.describe_format(branch.bzrdir,
+ branch.repository, branch, None))
+
+ def assertRepoDescription(self, format, expected=None):
+ """Assert repository's format description matches expectations"""
+ if expected is None:
+ expected = format
+ self.make_repository('%s_repo' % format, format=format)
+ repo = _mod_repository.Repository.open('%s_repo' % format)
+ self.assertEqual(expected, info.describe_format(repo.bzrdir,
+ repo, None, None))
+
+ def test_describe_tree_format(self):
+ for key in controldir.format_registry.keys():
+ if key in controldir.format_registry.aliases():
+ continue
+ self.assertTreeDescription(key)
+
+ def test_describe_checkout_format(self):
+ for key in controldir.format_registry.keys():
+ if key in controldir.format_registry.aliases():
+ # Aliases will not describe correctly in the UI because the
+ # real format is found.
+ continue
+ # legacy: weave does not support checkouts
+ if key == 'weave':
+ continue
+ if controldir.format_registry.get_info(key).experimental:
+ # We don't require that experimental formats support checkouts
+ # or describe correctly in the UI.
+ continue
+ if controldir.format_registry.get_info(key).hidden:
+ continue
+ expected = None
+ if key in ('pack-0.92',):
+ expected = 'pack-0.92'
+ elif key in ('knit', 'metaweave'):
+ if 'metaweave' in controldir.format_registry:
+ expected = 'knit or metaweave'
+ else:
+ expected = 'knit'
+ elif key in ('1.14', '1.14-rich-root'):
+ expected = '1.14 or 1.14-rich-root'
+ self.assertCheckoutDescription(key, expected)
+
+ def test_describe_branch_format(self):
+ for key in controldir.format_registry.keys():
+ if key in controldir.format_registry.aliases():
+ continue
+ if controldir.format_registry.get_info(key).hidden:
+ continue
+ expected = None
+ if key in ('dirstate', 'knit'):
+ expected = 'dirstate or knit'
+ elif key in ('1.14',):
+ expected = '1.14'
+ elif key in ('1.14-rich-root',):
+ expected = '1.14-rich-root'
+ self.assertBranchDescription(key, expected)
+
+ def test_describe_repo_format(self):
+ for key in controldir.format_registry.keys():
+ if key in controldir.format_registry.aliases():
+ continue
+ if controldir.format_registry.get_info(key).hidden:
+ continue
+ expected = None
+ if key in ('dirstate', 'knit', 'dirstate-tags'):
+ expected = 'dirstate or dirstate-tags or knit'
+ elif key in ('1.14',):
+ expected = '1.14'
+ elif key in ('1.14-rich-root',):
+ expected = '1.14-rich-root'
+ self.assertRepoDescription(key, expected)
+
+ format = controldir.format_registry.make_bzrdir('knit')
+ format.set_branch_format(_mod_branch.BzrBranchFormat6())
+ tree = self.make_branch_and_tree('unknown', format=format)
+ self.assertEqual('unnamed', info.describe_format(tree.bzrdir,
+ tree.branch.repository, tree.branch, tree))
+
+ def test_gather_location_controldir_only(self):
+ bzrdir = self.make_bzrdir('.')
+ self.assertEqual([('control directory', bzrdir.user_url)],
+ info.gather_location_info(control=bzrdir))
+
+ def test_gather_location_standalone(self):
+ tree = self.make_branch_and_tree('tree')
+ self.assertEqual([('branch root', tree.bzrdir.root_transport.base)],
+ info.gather_location_info(
+ tree.branch.repository, tree.branch, tree, control=tree.bzrdir))
+ self.assertEqual([('branch root', tree.bzrdir.root_transport.base)],
+ info.gather_location_info(
+ tree.branch.repository, tree.branch, control=tree.bzrdir))
+ return tree
+
+ def test_gather_location_repo(self):
+ srepo = self.make_repository('shared', shared=True)
+ self.assertEqual(
+ [('shared repository', srepo.bzrdir.root_transport.base)],
+ info.gather_location_info(srepo, control=srepo.bzrdir))
+ urepo = self.make_repository('unshared')
+ self.assertEqual(
+ [('repository', urepo.bzrdir.root_transport.base)],
+ info.gather_location_info(urepo, control=urepo.bzrdir))
+
+ def test_gather_location_repo_branch(self):
+ srepo = self.make_repository('shared', shared=True)
+ self.assertEqual(
+ [('shared repository', srepo.bzrdir.root_transport.base)],
+ info.gather_location_info(srepo, control=srepo.bzrdir))
+ tree = self.make_branch_and_tree('shared/tree')
+ self.assertEqual(
+ [('shared repository', srepo.bzrdir.root_transport.base),
+ ('repository branch', tree.branch.base)],
+ info.gather_location_info(srepo, tree.branch, tree, srepo.bzrdir))
+
+ def test_gather_location_light_checkout(self):
+ tree = self.make_branch_and_tree('tree')
+ lcheckout = tree.branch.create_checkout('lcheckout', lightweight=True)
+ self.assertEqual(
+ [('light checkout root', lcheckout.bzrdir.root_transport.base),
+ ('checkout of branch', tree.bzrdir.root_transport.base)],
+ self.gather_tree_location_info(lcheckout))
+
+ def test_gather_location_heavy_checkout(self):
+ tree = self.make_branch_and_tree('tree')
+ checkout = tree.branch.create_checkout('checkout')
+ self.assertEqual(
+ [('checkout root', checkout.bzrdir.root_transport.base),
+ ('checkout of branch', tree.bzrdir.root_transport.base)],
+ self.gather_tree_location_info(checkout))
+ light_checkout = checkout.branch.create_checkout('light_checkout',
+ lightweight=True)
+ self.assertEqual(
+ [('light checkout root',
+ light_checkout.bzrdir.root_transport.base),
+ ('checkout root', checkout.bzrdir.root_transport.base),
+ ('checkout of branch', tree.bzrdir.root_transport.base)],
+ self.gather_tree_location_info(light_checkout)
+ )
+
+ def test_gather_location_shared_repo_checkout(self):
+ tree = self.make_branch_and_tree('tree')
+ srepo = self.make_repository('shared', shared=True)
+ shared_checkout = tree.branch.create_checkout('shared/checkout')
+ self.assertEqual(
+ [('repository checkout root',
+ shared_checkout.bzrdir.root_transport.base),
+ ('checkout of branch', tree.bzrdir.root_transport.base),
+ ('shared repository', srepo.bzrdir.root_transport.base)],
+ self.gather_tree_location_info(shared_checkout))
+
+ def gather_tree_location_info(self, tree):
+ return info.gather_location_info(
+ tree.branch.repository, tree.branch, tree, tree.bzrdir)
+
+ def test_gather_location_bound(self):
+ branch = self.make_branch('branch')
+ bound_branch = self.make_branch('bound_branch')
+ bound_branch.bind(branch)
+ self.assertEqual(
+ [('branch root', bound_branch.bzrdir.root_transport.base),
+ ('bound to branch', branch.bzrdir.root_transport.base)],
+ info.gather_location_info(
+ bound_branch.repository, bound_branch, control=bound_branch.bzrdir)
+ )
+
+ def test_gather_location_bound_in_repository(self):
+ repo = self.make_repository('repo', shared=True)
+ repo.set_make_working_trees(False)
+ branch = self.make_branch('branch')
+ bound_branch = controldir.ControlDir.create_branch_convenience(
+ 'repo/bound_branch')
+ bound_branch.bind(branch)
+ self.assertEqual(
+ [('shared repository', bound_branch.repository.bzrdir.user_url),
+ ('repository branch', bound_branch.bzrdir.user_url),
+ ('bound to branch', branch.bzrdir.user_url)],
+ info.gather_location_info(bound_branch.repository, bound_branch)
+ )
+
+ def test_location_list(self):
+ if sys.platform == 'win32':
+ raise tests.TestSkipped('Windows-unfriendly test')
+ locs = info.LocationList('/home/foo')
+ locs.add_url('a', 'file:///home/foo/')
+ locs.add_url('b', 'file:///home/foo/bar/')
+ locs.add_url('c', 'file:///home/bar/bar')
+ locs.add_url('d', 'http://example.com/example/')
+ locs.add_url('e', None)
+ self.assertEqual(locs.locs, [('a', '.'),
+ ('b', 'bar'),
+ ('c', '/home/bar/bar'),
+ ('d', 'http://example.com/example/')])
+ self.assertEqualDiff(' a: .\n b: bar\n c: /home/bar/bar\n'
+ ' d: http://example.com/example/\n',
+ ''.join(locs.get_lines()))
+
+ def test_gather_related_braches(self):
+ branch = self.make_branch('.')
+ branch.lock_write()
+ try:
+ branch.set_public_branch('baz')
+ branch.set_push_location('bar')
+ branch.set_parent('foo')
+ branch.set_submit_branch('qux')
+ finally:
+ branch.unlock()
+ self.assertEqual(
+ [('public branch', 'baz'), ('push branch', 'bar'),
+ ('parent branch', 'foo'), ('submit branch', 'qux')],
+ info._gather_related_branches(branch).locs)
diff --git a/bzrlib/tests/test_inv.py b/bzrlib/tests/test_inv.py
new file mode 100644
index 0000000..ff2838b
--- /dev/null
+++ b/bzrlib/tests/test_inv.py
@@ -0,0 +1,1572 @@
+# Copyright (C) 2005-2011 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+
+from bzrlib import (
+ chk_map,
+ groupcompress,
+ errors,
+ inventory,
+ osutils,
+ repository,
+ revision,
+ tests,
+ workingtree,
+ )
+from bzrlib.inventory import (
+ CHKInventory,
+ Inventory,
+ ROOT_ID,
+ InventoryFile,
+ InventoryDirectory,
+ InventoryEntry,
+ TreeReference,
+ mutable_inventory_from_tree,
+ )
+from bzrlib.tests import (
+ TestCase,
+ TestCaseWithTransport,
+ )
+from bzrlib.tests.scenarios import load_tests_apply_scenarios
+
+
+load_tests = load_tests_apply_scenarios
+
+
+def delta_application_scenarios():
+ scenarios = [
+ ('Inventory', {'apply_delta':apply_inventory_Inventory}),
+ ]
+ # Working tree basis delta application
+ # Repository add_inv_by_delta.
+ # Reduce form of the per_repository test logic - that logic needs to be
+ # be able to get /just/ repositories whereas these tests are fine with
+ # just creating trees.
+ formats = set()
+ for _, format in repository.format_registry.iteritems():
+ if format.supports_full_versioned_files:
+ scenarios.append((str(format.__name__), {
+ 'apply_delta':apply_inventory_Repository_add_inventory_by_delta,
+ 'format':format}))
+ for format in workingtree.format_registry._get_all():
+ repo_fmt = format._matchingbzrdir.repository_format
+ if not repo_fmt.supports_full_versioned_files:
+ continue
+ scenarios.append(
+ (str(format.__class__.__name__) + ".update_basis_by_delta", {
+ 'apply_delta':apply_inventory_WT_basis,
+ 'format':format}))
+ scenarios.append(
+ (str(format.__class__.__name__) + ".apply_inventory_delta", {
+ 'apply_delta':apply_inventory_WT,
+ 'format':format}))
+ return scenarios
+
+
+def create_texts_for_inv(repo, inv):
+ for path, ie in inv.iter_entries():
+ if ie.text_size:
+ lines = ['a' * ie.text_size]
+ else:
+ lines = []
+ repo.texts.add_lines((ie.file_id, ie.revision), [], lines)
+
+
+def apply_inventory_Inventory(self, basis, delta, invalid_delta=True):
+ """Apply delta to basis and return the result.
+
+ :param basis: An inventory to be used as the basis.
+ :param delta: The inventory delta to apply:
+ :return: An inventory resulting from the application.
+ """
+ basis.apply_delta(delta)
+ return basis
+
+
+def apply_inventory_WT(self, basis, delta, invalid_delta=True):
+ """Apply delta to basis and return the result.
+
+ This sets the tree state to be basis, and then calls apply_inventory_delta.
+
+ :param basis: An inventory to be used as the basis.
+ :param delta: The inventory delta to apply:
+ :return: An inventory resulting from the application.
+ """
+ control = self.make_bzrdir('tree', format=self.format._matchingbzrdir)
+ control.create_repository()
+ control.create_branch()
+ tree = self.format.initialize(control)
+ tree.lock_write()
+ try:
+ tree._write_inventory(basis)
+ finally:
+ tree.unlock()
+ # Fresh object, reads disk again.
+ tree = tree.bzrdir.open_workingtree()
+ tree.lock_write()
+ try:
+ tree.apply_inventory_delta(delta)
+ finally:
+ tree.unlock()
+ # reload tree - ensure we get what was written.
+ tree = tree.bzrdir.open_workingtree()
+ tree.lock_read()
+ self.addCleanup(tree.unlock)
+ if not invalid_delta:
+ tree._validate()
+ return tree.root_inventory
+
+
+def _create_repo_revisions(repo, basis, delta, invalid_delta):
+ repo.start_write_group()
+ try:
+ rev = revision.Revision('basis', timestamp=0, timezone=None,
+ message="", committer="foo@example.com")
+ basis.revision_id = 'basis'
+ create_texts_for_inv(repo, basis)
+ repo.add_revision('basis', rev, basis)
+ if invalid_delta:
+ # We don't want to apply the delta to the basis, because we expect
+ # the delta is invalid.
+ result_inv = basis
+ result_inv.revision_id = 'result'
+ target_entries = None
+ else:
+ result_inv = basis.create_by_apply_delta(delta, 'result')
+ create_texts_for_inv(repo, result_inv)
+ target_entries = list(result_inv.iter_entries_by_dir())
+ rev = revision.Revision('result', timestamp=0, timezone=None,
+ message="", committer="foo@example.com")
+ repo.add_revision('result', rev, result_inv)
+ repo.commit_write_group()
+ except:
+ repo.abort_write_group()
+ raise
+ return target_entries
+
+
+def _get_basis_entries(tree):
+ basis_tree = tree.basis_tree()
+ basis_tree.lock_read()
+ basis_tree_entries = list(basis_tree.inventory.iter_entries_by_dir())
+ basis_tree.unlock()
+ return basis_tree_entries
+
+
+def _populate_different_tree(tree, basis, delta):
+ """Put all entries into tree, but at a unique location."""
+ added_ids = set()
+ added_paths = set()
+ tree.add(['unique-dir'], ['unique-dir-id'], ['directory'])
+ for path, ie in basis.iter_entries_by_dir():
+ if ie.file_id in added_ids:
+ continue
+ # We want a unique path for each of these, we use the file-id
+ tree.add(['unique-dir/' + ie.file_id], [ie.file_id], [ie.kind])
+ added_ids.add(ie.file_id)
+ for old_path, new_path, file_id, ie in delta:
+ if file_id in added_ids:
+ continue
+ tree.add(['unique-dir/' + file_id], [file_id], [ie.kind])
+
+
+def apply_inventory_WT_basis(test, basis, delta, invalid_delta=True):
+ """Apply delta to basis and return the result.
+
+ This sets the parent and then calls update_basis_by_delta.
+ It also puts the basis in the repository under both 'basis' and 'result' to
+ allow safety checks made by the WT to succeed, and finally ensures that all
+ items in the delta with a new path are present in the WT before calling
+ update_basis_by_delta.
+
+ :param basis: An inventory to be used as the basis.
+ :param delta: The inventory delta to apply:
+ :return: An inventory resulting from the application.
+ """
+ control = test.make_bzrdir('tree', format=test.format._matchingbzrdir)
+ control.create_repository()
+ control.create_branch()
+ tree = test.format.initialize(control)
+ tree.lock_write()
+ try:
+ target_entries = _create_repo_revisions(tree.branch.repository, basis,
+ delta, invalid_delta)
+ # Set the basis state as the trees current state
+ tree._write_inventory(basis)
+ # This reads basis from the repo and puts it into the tree's local
+ # cache, if it has one.
+ tree.set_parent_ids(['basis'])
+ finally:
+ tree.unlock()
+ # Fresh lock, reads disk again.
+ tree.lock_write()
+ try:
+ tree.update_basis_by_delta('result', delta)
+ if not invalid_delta:
+ tree._validate()
+ finally:
+ tree.unlock()
+ # reload tree - ensure we get what was written.
+ tree = tree.bzrdir.open_workingtree()
+ basis_tree = tree.basis_tree()
+ basis_tree.lock_read()
+ test.addCleanup(basis_tree.unlock)
+ basis_inv = basis_tree.root_inventory
+ if target_entries:
+ basis_entries = list(basis_inv.iter_entries_by_dir())
+ test.assertEqual(target_entries, basis_entries)
+ return basis_inv
+
+
+def apply_inventory_Repository_add_inventory_by_delta(self, basis, delta,
+ invalid_delta=True):
+ """Apply delta to basis and return the result.
+
+ This inserts basis as a whole inventory and then uses
+ add_inventory_by_delta to add delta.
+
+ :param basis: An inventory to be used as the basis.
+ :param delta: The inventory delta to apply:
+ :return: An inventory resulting from the application.
+ """
+ format = self.format()
+ control = self.make_bzrdir('tree', format=format._matchingbzrdir)
+ repo = format.initialize(control)
+ repo.lock_write()
+ try:
+ repo.start_write_group()
+ try:
+ rev = revision.Revision('basis', timestamp=0, timezone=None,
+ message="", committer="foo@example.com")
+ basis.revision_id = 'basis'
+ create_texts_for_inv(repo, basis)
+ repo.add_revision('basis', rev, basis)
+ repo.commit_write_group()
+ except:
+ repo.abort_write_group()
+ raise
+ finally:
+ repo.unlock()
+ repo.lock_write()
+ try:
+ repo.start_write_group()
+ try:
+ inv_sha1 = repo.add_inventory_by_delta('basis', delta,
+ 'result', ['basis'])
+ except:
+ repo.abort_write_group()
+ raise
+ else:
+ repo.commit_write_group()
+ finally:
+ repo.unlock()
+ # Fresh lock, reads disk again.
+ repo = repo.bzrdir.open_repository()
+ repo.lock_read()
+ self.addCleanup(repo.unlock)
+ return repo.get_inventory('result')
+
+
+class TestInventoryUpdates(TestCase):
+
+ def test_creation_from_root_id(self):
+ # iff a root id is passed to the constructor, a root directory is made
+ inv = inventory.Inventory(root_id='tree-root')
+ self.assertNotEqual(None, inv.root)
+ self.assertEqual('tree-root', inv.root.file_id)
+
+ def test_add_path_of_root(self):
+ # if no root id is given at creation time, there is no root directory
+ inv = inventory.Inventory(root_id=None)
+ self.assertIs(None, inv.root)
+ # add a root entry by adding its path
+ ie = inv.add_path("", "directory", "my-root")
+ ie.revision = 'test-rev'
+ self.assertEqual("my-root", ie.file_id)
+ self.assertIs(ie, inv.root)
+
+ def test_add_path(self):
+ inv = inventory.Inventory(root_id='tree_root')
+ ie = inv.add_path('hello', 'file', 'hello-id')
+ self.assertEqual('hello-id', ie.file_id)
+ self.assertEqual('file', ie.kind)
+
+ def test_copy(self):
+ """Make sure copy() works and creates a deep copy."""
+ inv = inventory.Inventory(root_id='some-tree-root')
+ ie = inv.add_path('hello', 'file', 'hello-id')
+ inv2 = inv.copy()
+ inv.root.file_id = 'some-new-root'
+ ie.name = 'file2'
+ self.assertEqual('some-tree-root', inv2.root.file_id)
+ self.assertEqual('hello', inv2['hello-id'].name)
+
+ def test_copy_empty(self):
+ """Make sure an empty inventory can be copied."""
+ inv = inventory.Inventory(root_id=None)
+ inv2 = inv.copy()
+ self.assertIs(None, inv2.root)
+
+ def test_copy_copies_root_revision(self):
+ """Make sure the revision of the root gets copied."""
+ inv = inventory.Inventory(root_id='someroot')
+ inv.root.revision = 'therev'
+ inv2 = inv.copy()
+ self.assertEquals('someroot', inv2.root.file_id)
+ self.assertEquals('therev', inv2.root.revision)
+
+ def test_create_tree_reference(self):
+ inv = inventory.Inventory('tree-root-123')
+ inv.add(TreeReference('nested-id', 'nested', parent_id='tree-root-123',
+ revision='rev', reference_revision='rev2'))
+
+ def test_error_encoding(self):
+ inv = inventory.Inventory('tree-root')
+ inv.add(InventoryFile('a-id', u'\u1234', 'tree-root'))
+ e = self.assertRaises(errors.InconsistentDelta, inv.add,
+ InventoryFile('b-id', u'\u1234', 'tree-root'))
+ self.assertContainsRe(str(e), r'\\u1234')
+
+ def test_add_recursive(self):
+ parent = InventoryDirectory('src-id', 'src', 'tree-root')
+ child = InventoryFile('hello-id', 'hello.c', 'src-id')
+ parent.children[child.file_id] = child
+ inv = inventory.Inventory('tree-root')
+ inv.add(parent)
+ self.assertEqual('src/hello.c', inv.id2path('hello-id'))
+
+
+
+class TestDeltaApplication(TestCaseWithTransport):
+
+ scenarios = delta_application_scenarios()
+
+ def get_empty_inventory(self, reference_inv=None):
+ """Get an empty inventory.
+
+ Note that tests should not depend on the revision of the root for
+ setting up test conditions, as it has to be flexible to accomodate non
+ rich root repositories.
+
+ :param reference_inv: If not None, get the revision for the root from
+ this inventory. This is useful for dealing with older repositories
+ that routinely discarded the root entry data. If None, the root's
+ revision is set to 'basis'.
+ """
+ inv = inventory.Inventory()
+ if reference_inv is not None:
+ inv.root.revision = reference_inv.root.revision
+ else:
+ inv.root.revision = 'basis'
+ return inv
+
+ def make_file_ie(self, file_id='file-id', name='name', parent_id=None):
+ ie_file = inventory.InventoryFile(file_id, name, parent_id)
+ ie_file.revision = 'result'
+ ie_file.text_size = 0
+ ie_file.text_sha1 = ''
+ return ie_file
+
+ def test_empty_delta(self):
+ inv = self.get_empty_inventory()
+ delta = []
+ inv = self.apply_delta(self, inv, delta)
+ inv2 = self.get_empty_inventory(inv)
+ self.assertEqual([], inv2._make_delta(inv))
+
+ def test_None_file_id(self):
+ inv = self.get_empty_inventory()
+ dir1 = inventory.InventoryDirectory(None, 'dir1', inv.root.file_id)
+ dir1.revision = 'result'
+ delta = [(None, u'dir1', None, dir1)]
+ self.assertRaises(errors.InconsistentDelta, self.apply_delta, self,
+ inv, delta)
+
+ def test_unicode_file_id(self):
+ inv = self.get_empty_inventory()
+ dir1 = inventory.InventoryDirectory(u'dirid', 'dir1', inv.root.file_id)
+ dir1.revision = 'result'
+ delta = [(None, u'dir1', dir1.file_id, dir1)]
+ self.assertRaises(errors.InconsistentDelta, self.apply_delta, self,
+ inv, delta)
+
+ def test_repeated_file_id(self):
+ inv = self.get_empty_inventory()
+ file1 = inventory.InventoryFile('id', 'path1', inv.root.file_id)
+ file1.revision = 'result'
+ file1.text_size = 0
+ file1.text_sha1 = ""
+ file2 = file1.copy()
+ file2.name = 'path2'
+ delta = [(None, u'path1', 'id', file1), (None, u'path2', 'id', file2)]
+ self.assertRaises(errors.InconsistentDelta, self.apply_delta, self,
+ inv, delta)
+
+ def test_repeated_new_path(self):
+ inv = self.get_empty_inventory()
+ file1 = inventory.InventoryFile('id1', 'path', inv.root.file_id)
+ file1.revision = 'result'
+ file1.text_size = 0
+ file1.text_sha1 = ""
+ file2 = file1.copy()
+ file2.file_id = 'id2'
+ delta = [(None, u'path', 'id1', file1), (None, u'path', 'id2', file2)]
+ self.assertRaises(errors.InconsistentDelta, self.apply_delta, self,
+ inv, delta)
+
+ def test_repeated_old_path(self):
+ inv = self.get_empty_inventory()
+ file1 = inventory.InventoryFile('id1', 'path', inv.root.file_id)
+ file1.revision = 'result'
+ file1.text_size = 0
+ file1.text_sha1 = ""
+ # We can't *create* a source inventory with the same path, but
+ # a badly generated partial delta might claim the same source twice.
+ # This would be buggy in two ways: the path is repeated in the delta,
+ # And the path for one of the file ids doesn't match the source
+ # location. Alternatively, we could have a repeated fileid, but that
+ # is separately checked for.
+ file2 = inventory.InventoryFile('id2', 'path2', inv.root.file_id)
+ file2.revision = 'result'
+ file2.text_size = 0
+ file2.text_sha1 = ""
+ inv.add(file1)
+ inv.add(file2)
+ delta = [(u'path', None, 'id1', None), (u'path', None, 'id2', None)]
+ self.assertRaises(errors.InconsistentDelta, self.apply_delta, self,
+ inv, delta)
+
+ def test_mismatched_id_entry_id(self):
+ inv = self.get_empty_inventory()
+ file1 = inventory.InventoryFile('id1', 'path', inv.root.file_id)
+ file1.revision = 'result'
+ file1.text_size = 0
+ file1.text_sha1 = ""
+ delta = [(None, u'path', 'id', file1)]
+ self.assertRaises(errors.InconsistentDelta, self.apply_delta, self,
+ inv, delta)
+
+ def test_mismatched_new_path_entry_None(self):
+ inv = self.get_empty_inventory()
+ delta = [(None, u'path', 'id', None)]
+ self.assertRaises(errors.InconsistentDelta, self.apply_delta, self,
+ inv, delta)
+
+ def test_mismatched_new_path_None_entry(self):
+ inv = self.get_empty_inventory()
+ file1 = inventory.InventoryFile('id1', 'path', inv.root.file_id)
+ file1.revision = 'result'
+ file1.text_size = 0
+ file1.text_sha1 = ""
+ delta = [(u"path", None, 'id1', file1)]
+ self.assertRaises(errors.InconsistentDelta, self.apply_delta, self,
+ inv, delta)
+
+ def test_parent_is_not_directory(self):
+ inv = self.get_empty_inventory()
+ file1 = inventory.InventoryFile('id1', 'path', inv.root.file_id)
+ file1.revision = 'result'
+ file1.text_size = 0
+ file1.text_sha1 = ""
+ file2 = inventory.InventoryFile('id2', 'path2', 'id1')
+ file2.revision = 'result'
+ file2.text_size = 0
+ file2.text_sha1 = ""
+ inv.add(file1)
+ delta = [(None, u'path/path2', 'id2', file2)]
+ self.assertRaises(errors.InconsistentDelta, self.apply_delta, self,
+ inv, delta)
+
+ def test_parent_is_missing(self):
+ inv = self.get_empty_inventory()
+ file2 = inventory.InventoryFile('id2', 'path2', 'missingparent')
+ file2.revision = 'result'
+ file2.text_size = 0
+ file2.text_sha1 = ""
+ delta = [(None, u'path/path2', 'id2', file2)]
+ self.assertRaises(errors.InconsistentDelta, self.apply_delta, self,
+ inv, delta)
+
+ def test_new_parent_path_has_wrong_id(self):
+ inv = self.get_empty_inventory()
+ parent1 = inventory.InventoryDirectory('p-1', 'dir', inv.root.file_id)
+ parent1.revision = 'result'
+ parent2 = inventory.InventoryDirectory('p-2', 'dir2', inv.root.file_id)
+ parent2.revision = 'result'
+ file1 = inventory.InventoryFile('id', 'path', 'p-2')
+ file1.revision = 'result'
+ file1.text_size = 0
+ file1.text_sha1 = ""
+ inv.add(parent1)
+ inv.add(parent2)
+ # This delta claims that file1 is at dir/path, but actually its at
+ # dir2/path if you follow the inventory parent structure.
+ delta = [(None, u'dir/path', 'id', file1)]
+ self.assertRaises(errors.InconsistentDelta, self.apply_delta, self,
+ inv, delta)
+
+ def test_old_parent_path_is_wrong(self):
+ inv = self.get_empty_inventory()
+ parent1 = inventory.InventoryDirectory('p-1', 'dir', inv.root.file_id)
+ parent1.revision = 'result'
+ parent2 = inventory.InventoryDirectory('p-2', 'dir2', inv.root.file_id)
+ parent2.revision = 'result'
+ file1 = inventory.InventoryFile('id', 'path', 'p-2')
+ file1.revision = 'result'
+ file1.text_size = 0
+ file1.text_sha1 = ""
+ inv.add(parent1)
+ inv.add(parent2)
+ inv.add(file1)
+ # This delta claims that file1 was at dir/path, but actually it was at
+ # dir2/path if you follow the inventory parent structure.
+ delta = [(u'dir/path', None, 'id', None)]
+ self.assertRaises(errors.InconsistentDelta, self.apply_delta, self,
+ inv, delta)
+
+ def test_old_parent_path_is_for_other_id(self):
+ inv = self.get_empty_inventory()
+ parent1 = inventory.InventoryDirectory('p-1', 'dir', inv.root.file_id)
+ parent1.revision = 'result'
+ parent2 = inventory.InventoryDirectory('p-2', 'dir2', inv.root.file_id)
+ parent2.revision = 'result'
+ file1 = inventory.InventoryFile('id', 'path', 'p-2')
+ file1.revision = 'result'
+ file1.text_size = 0
+ file1.text_sha1 = ""
+ file2 = inventory.InventoryFile('id2', 'path', 'p-1')
+ file2.revision = 'result'
+ file2.text_size = 0
+ file2.text_sha1 = ""
+ inv.add(parent1)
+ inv.add(parent2)
+ inv.add(file1)
+ inv.add(file2)
+ # This delta claims that file1 was at dir/path, but actually it was at
+ # dir2/path if you follow the inventory parent structure. At dir/path
+ # is another entry we should not delete.
+ delta = [(u'dir/path', None, 'id', None)]
+ self.assertRaises(errors.InconsistentDelta, self.apply_delta, self,
+ inv, delta)
+
+ def test_add_existing_id_new_path(self):
+ inv = self.get_empty_inventory()
+ parent1 = inventory.InventoryDirectory('p-1', 'dir1', inv.root.file_id)
+ parent1.revision = 'result'
+ parent2 = inventory.InventoryDirectory('p-1', 'dir2', inv.root.file_id)
+ parent2.revision = 'result'
+ inv.add(parent1)
+ delta = [(None, u'dir2', 'p-1', parent2)]
+ self.assertRaises(errors.InconsistentDelta, self.apply_delta, self,
+ inv, delta)
+
+ def test_add_new_id_existing_path(self):
+ inv = self.get_empty_inventory()
+ parent1 = inventory.InventoryDirectory('p-1', 'dir1', inv.root.file_id)
+ parent1.revision = 'result'
+ parent2 = inventory.InventoryDirectory('p-2', 'dir1', inv.root.file_id)
+ parent2.revision = 'result'
+ inv.add(parent1)
+ delta = [(None, u'dir1', 'p-2', parent2)]
+ self.assertRaises(errors.InconsistentDelta, self.apply_delta, self,
+ inv, delta)
+
+ def test_remove_dir_leaving_dangling_child(self):
+ inv = self.get_empty_inventory()
+ dir1 = inventory.InventoryDirectory('p-1', 'dir1', inv.root.file_id)
+ dir1.revision = 'result'
+ dir2 = inventory.InventoryDirectory('p-2', 'child1', 'p-1')
+ dir2.revision = 'result'
+ dir3 = inventory.InventoryDirectory('p-3', 'child2', 'p-1')
+ dir3.revision = 'result'
+ inv.add(dir1)
+ inv.add(dir2)
+ inv.add(dir3)
+ delta = [(u'dir1', None, 'p-1', None),
+ (u'dir1/child2', None, 'p-3', None)]
+ self.assertRaises(errors.InconsistentDelta, self.apply_delta, self,
+ inv, delta)
+
+ def test_add_file(self):
+ inv = self.get_empty_inventory()
+ file1 = inventory.InventoryFile('file-id', 'path', inv.root.file_id)
+ file1.revision = 'result'
+ file1.text_size = 0
+ file1.text_sha1 = ''
+ delta = [(None, u'path', 'file-id', file1)]
+ res_inv = self.apply_delta(self, inv, delta, invalid_delta=False)
+ self.assertEqual('file-id', res_inv['file-id'].file_id)
+
+ def test_remove_file(self):
+ inv = self.get_empty_inventory()
+ file1 = inventory.InventoryFile('file-id', 'path', inv.root.file_id)
+ file1.revision = 'result'
+ file1.text_size = 0
+ file1.text_sha1 = ''
+ inv.add(file1)
+ delta = [(u'path', None, 'file-id', None)]
+ res_inv = self.apply_delta(self, inv, delta, invalid_delta=False)
+ self.assertEqual(None, res_inv.path2id('path'))
+ self.assertRaises(errors.NoSuchId, res_inv.id2path, 'file-id')
+
+ def test_rename_file(self):
+ inv = self.get_empty_inventory()
+ file1 = self.make_file_ie(name='path', parent_id=inv.root.file_id)
+ inv.add(file1)
+ file2 = self.make_file_ie(name='path2', parent_id=inv.root.file_id)
+ delta = [(u'path', 'path2', 'file-id', file2)]
+ res_inv = self.apply_delta(self, inv, delta, invalid_delta=False)
+ self.assertEqual(None, res_inv.path2id('path'))
+ self.assertEqual('file-id', res_inv.path2id('path2'))
+
+ def test_replaced_at_new_path(self):
+ inv = self.get_empty_inventory()
+ file1 = self.make_file_ie(file_id='id1', parent_id=inv.root.file_id)
+ inv.add(file1)
+ file2 = self.make_file_ie(file_id='id2', parent_id=inv.root.file_id)
+ delta = [(u'name', None, 'id1', None),
+ (None, u'name', 'id2', file2)]
+ res_inv = self.apply_delta(self, inv, delta, invalid_delta=False)
+ self.assertEqual('id2', res_inv.path2id('name'))
+
+ def test_rename_dir(self):
+ inv = self.get_empty_inventory()
+ dir1 = inventory.InventoryDirectory('dir-id', 'dir1', inv.root.file_id)
+ dir1.revision = 'basis'
+ file1 = self.make_file_ie(parent_id='dir-id')
+ inv.add(dir1)
+ inv.add(file1)
+ dir2 = inventory.InventoryDirectory('dir-id', 'dir2', inv.root.file_id)
+ dir2.revision = 'result'
+ delta = [('dir1', 'dir2', 'dir-id', dir2)]
+ res_inv = self.apply_delta(self, inv, delta, invalid_delta=False)
+ # The file should be accessible under the new path
+ self.assertEqual('file-id', res_inv.path2id('dir2/name'))
+
+ def test_renamed_dir_with_renamed_child(self):
+ inv = self.get_empty_inventory()
+ dir1 = inventory.InventoryDirectory('dir-id', 'dir1', inv.root.file_id)
+ dir1.revision = 'basis'
+ file1 = self.make_file_ie('file-id-1', 'name1', parent_id='dir-id')
+ file2 = self.make_file_ie('file-id-2', 'name2', parent_id='dir-id')
+ inv.add(dir1)
+ inv.add(file1)
+ inv.add(file2)
+ dir2 = inventory.InventoryDirectory('dir-id', 'dir2', inv.root.file_id)
+ dir2.revision = 'result'
+ file2b = self.make_file_ie('file-id-2', 'name2', inv.root.file_id)
+ delta = [('dir1', 'dir2', 'dir-id', dir2),
+ ('dir1/name2', 'name2', 'file-id-2', file2b)]
+ res_inv = self.apply_delta(self, inv, delta, invalid_delta=False)
+ # The file should be accessible under the new path
+ self.assertEqual('file-id-1', res_inv.path2id('dir2/name1'))
+ self.assertEqual(None, res_inv.path2id('dir2/name2'))
+ self.assertEqual('file-id-2', res_inv.path2id('name2'))
+
+ def test_is_root(self):
+ """Ensure our root-checking code is accurate."""
+ inv = inventory.Inventory('TREE_ROOT')
+ self.assertTrue(inv.is_root('TREE_ROOT'))
+ self.assertFalse(inv.is_root('booga'))
+ inv.root.file_id = 'booga'
+ self.assertFalse(inv.is_root('TREE_ROOT'))
+ self.assertTrue(inv.is_root('booga'))
+ # works properly even if no root is set
+ inv.root = None
+ self.assertFalse(inv.is_root('TREE_ROOT'))
+ self.assertFalse(inv.is_root('booga'))
+
+ def test_entries_for_empty_inventory(self):
+ """Test that entries() will not fail for an empty inventory"""
+ inv = Inventory(root_id=None)
+ self.assertEqual([], inv.entries())
+
+
+class TestInventoryEntry(TestCase):
+
+ def test_file_kind_character(self):
+ file = inventory.InventoryFile('123', 'hello.c', ROOT_ID)
+ self.assertEqual(file.kind_character(), '')
+
+ def test_dir_kind_character(self):
+ dir = inventory.InventoryDirectory('123', 'hello.c', ROOT_ID)
+ self.assertEqual(dir.kind_character(), '/')
+
+ def test_link_kind_character(self):
+ dir = inventory.InventoryLink('123', 'hello.c', ROOT_ID)
+ self.assertEqual(dir.kind_character(), '')
+
+ def test_dir_detect_changes(self):
+ left = inventory.InventoryDirectory('123', 'hello.c', ROOT_ID)
+ right = inventory.InventoryDirectory('123', 'hello.c', ROOT_ID)
+ self.assertEqual((False, False), left.detect_changes(right))
+ self.assertEqual((False, False), right.detect_changes(left))
+
+ def test_file_detect_changes(self):
+ left = inventory.InventoryFile('123', 'hello.c', ROOT_ID)
+ left.text_sha1 = 123
+ right = inventory.InventoryFile('123', 'hello.c', ROOT_ID)
+ right.text_sha1 = 123
+ self.assertEqual((False, False), left.detect_changes(right))
+ self.assertEqual((False, False), right.detect_changes(left))
+ left.executable = True
+ self.assertEqual((False, True), left.detect_changes(right))
+ self.assertEqual((False, True), right.detect_changes(left))
+ right.text_sha1 = 321
+ self.assertEqual((True, True), left.detect_changes(right))
+ self.assertEqual((True, True), right.detect_changes(left))
+
+ def test_symlink_detect_changes(self):
+ left = inventory.InventoryLink('123', 'hello.c', ROOT_ID)
+ left.symlink_target='foo'
+ right = inventory.InventoryLink('123', 'hello.c', ROOT_ID)
+ right.symlink_target='foo'
+ self.assertEqual((False, False), left.detect_changes(right))
+ self.assertEqual((False, False), right.detect_changes(left))
+ left.symlink_target = 'different'
+ self.assertEqual((True, False), left.detect_changes(right))
+ self.assertEqual((True, False), right.detect_changes(left))
+
+ def test_file_has_text(self):
+ file = inventory.InventoryFile('123', 'hello.c', ROOT_ID)
+ self.assertTrue(file.has_text())
+
+ def test_directory_has_text(self):
+ dir = inventory.InventoryDirectory('123', 'hello.c', ROOT_ID)
+ self.assertFalse(dir.has_text())
+
+ def test_link_has_text(self):
+ link = inventory.InventoryLink('123', 'hello.c', ROOT_ID)
+ self.assertFalse(link.has_text())
+
+ def test_make_entry(self):
+ self.assertIsInstance(inventory.make_entry("file", "name", ROOT_ID),
+ inventory.InventoryFile)
+ self.assertIsInstance(inventory.make_entry("symlink", "name", ROOT_ID),
+ inventory.InventoryLink)
+ self.assertIsInstance(inventory.make_entry("directory", "name", ROOT_ID),
+ inventory.InventoryDirectory)
+
+ def test_make_entry_non_normalized(self):
+ orig_normalized_filename = osutils.normalized_filename
+
+ try:
+ osutils.normalized_filename = osutils._accessible_normalized_filename
+ entry = inventory.make_entry("file", u'a\u030a', ROOT_ID)
+ self.assertEqual(u'\xe5', entry.name)
+ self.assertIsInstance(entry, inventory.InventoryFile)
+
+ osutils.normalized_filename = osutils._inaccessible_normalized_filename
+ self.assertRaises(errors.InvalidNormalization,
+ inventory.make_entry, 'file', u'a\u030a', ROOT_ID)
+ finally:
+ osutils.normalized_filename = orig_normalized_filename
+
+
+class TestDescribeChanges(TestCase):
+
+ def test_describe_change(self):
+ # we need to test the following change combinations:
+ # rename
+ # reparent
+ # modify
+ # gone
+ # added
+ # renamed/reparented and modified
+ # change kind (perhaps can't be done yet?)
+ # also, merged in combination with all of these?
+ old_a = InventoryFile('a-id', 'a_file', ROOT_ID)
+ old_a.text_sha1 = '123132'
+ old_a.text_size = 0
+ new_a = InventoryFile('a-id', 'a_file', ROOT_ID)
+ new_a.text_sha1 = '123132'
+ new_a.text_size = 0
+
+ self.assertChangeDescription('unchanged', old_a, new_a)
+
+ new_a.text_size = 10
+ new_a.text_sha1 = 'abcabc'
+ self.assertChangeDescription('modified', old_a, new_a)
+
+ self.assertChangeDescription('added', None, new_a)
+ self.assertChangeDescription('removed', old_a, None)
+ # perhaps a bit questionable but seems like the most reasonable thing...
+ self.assertChangeDescription('unchanged', None, None)
+
+ # in this case it's both renamed and modified; show a rename and
+ # modification:
+ new_a.name = 'newfilename'
+ self.assertChangeDescription('modified and renamed', old_a, new_a)
+
+ # reparenting is 'renaming'
+ new_a.name = old_a.name
+ new_a.parent_id = 'somedir-id'
+ self.assertChangeDescription('modified and renamed', old_a, new_a)
+
+ # reset the content values so its not modified
+ new_a.text_size = old_a.text_size
+ new_a.text_sha1 = old_a.text_sha1
+ new_a.name = old_a.name
+
+ new_a.name = 'newfilename'
+ self.assertChangeDescription('renamed', old_a, new_a)
+
+ # reparenting is 'renaming'
+ new_a.name = old_a.name
+ new_a.parent_id = 'somedir-id'
+ self.assertChangeDescription('renamed', old_a, new_a)
+
+ def assertChangeDescription(self, expected_change, old_ie, new_ie):
+ change = InventoryEntry.describe_change(old_ie, new_ie)
+ self.assertEqual(expected_change, change)
+
+
+class TestCHKInventory(tests.TestCaseWithMemoryTransport):
+
+ def get_chk_bytes(self):
+ factory = groupcompress.make_pack_factory(True, True, 1)
+ trans = self.get_transport('')
+ return factory(trans)
+
+ def read_bytes(self, chk_bytes, key):
+ stream = chk_bytes.get_record_stream([key], 'unordered', True)
+ return stream.next().get_bytes_as("fulltext")
+
+ def test_deserialise_gives_CHKInventory(self):
+ inv = Inventory()
+ inv.revision_id = "revid"
+ inv.root.revision = "rootrev"
+ chk_bytes = self.get_chk_bytes()
+ chk_inv = CHKInventory.from_inventory(chk_bytes, inv)
+ bytes = ''.join(chk_inv.to_lines())
+ new_inv = CHKInventory.deserialise(chk_bytes, bytes, ("revid",))
+ self.assertEqual("revid", new_inv.revision_id)
+ self.assertEqual("directory", new_inv.root.kind)
+ self.assertEqual(inv.root.file_id, new_inv.root.file_id)
+ self.assertEqual(inv.root.parent_id, new_inv.root.parent_id)
+ self.assertEqual(inv.root.name, new_inv.root.name)
+ self.assertEqual("rootrev", new_inv.root.revision)
+ self.assertEqual('plain', new_inv._search_key_name)
+
+ def test_deserialise_wrong_revid(self):
+ inv = Inventory()
+ inv.revision_id = "revid"
+ inv.root.revision = "rootrev"
+ chk_bytes = self.get_chk_bytes()
+ chk_inv = CHKInventory.from_inventory(chk_bytes, inv)
+ bytes = ''.join(chk_inv.to_lines())
+ self.assertRaises(ValueError, CHKInventory.deserialise, chk_bytes,
+ bytes, ("revid2",))
+
+ def test_captures_rev_root_byid(self):
+ inv = Inventory()
+ inv.revision_id = "foo"
+ inv.root.revision = "bar"
+ chk_bytes = self.get_chk_bytes()
+ chk_inv = CHKInventory.from_inventory(chk_bytes, inv)
+ lines = chk_inv.to_lines()
+ self.assertEqual([
+ 'chkinventory:\n',
+ 'revision_id: foo\n',
+ 'root_id: TREE_ROOT\n',
+ 'parent_id_basename_to_file_id: sha1:eb23f0ad4b07f48e88c76d4c94292be57fb2785f\n',
+ 'id_to_entry: sha1:debfe920f1f10e7929260f0534ac9a24d7aabbb4\n',
+ ], lines)
+ chk_inv = CHKInventory.deserialise(chk_bytes, ''.join(lines), ('foo',))
+ self.assertEqual('plain', chk_inv._search_key_name)
+
+ def test_captures_parent_id_basename_index(self):
+ inv = Inventory()
+ inv.revision_id = "foo"
+ inv.root.revision = "bar"
+ chk_bytes = self.get_chk_bytes()
+ chk_inv = CHKInventory.from_inventory(chk_bytes, inv)
+ lines = chk_inv.to_lines()
+ self.assertEqual([
+ 'chkinventory:\n',
+ 'revision_id: foo\n',
+ 'root_id: TREE_ROOT\n',
+ 'parent_id_basename_to_file_id: sha1:eb23f0ad4b07f48e88c76d4c94292be57fb2785f\n',
+ 'id_to_entry: sha1:debfe920f1f10e7929260f0534ac9a24d7aabbb4\n',
+ ], lines)
+ chk_inv = CHKInventory.deserialise(chk_bytes, ''.join(lines), ('foo',))
+ self.assertEqual('plain', chk_inv._search_key_name)
+
+ def test_captures_search_key_name(self):
+ inv = Inventory()
+ inv.revision_id = "foo"
+ inv.root.revision = "bar"
+ chk_bytes = self.get_chk_bytes()
+ chk_inv = CHKInventory.from_inventory(chk_bytes, inv,
+ search_key_name='hash-16-way')
+ lines = chk_inv.to_lines()
+ self.assertEqual([
+ 'chkinventory:\n',
+ 'search_key_name: hash-16-way\n',
+ 'root_id: TREE_ROOT\n',
+ 'parent_id_basename_to_file_id: sha1:eb23f0ad4b07f48e88c76d4c94292be57fb2785f\n',
+ 'revision_id: foo\n',
+ 'id_to_entry: sha1:debfe920f1f10e7929260f0534ac9a24d7aabbb4\n',
+ ], lines)
+ chk_inv = CHKInventory.deserialise(chk_bytes, ''.join(lines), ('foo',))
+ self.assertEqual('hash-16-way', chk_inv._search_key_name)
+
+ def test_directory_children_on_demand(self):
+ inv = Inventory()
+ inv.revision_id = "revid"
+ inv.root.revision = "rootrev"
+ inv.add(InventoryFile("fileid", "file", inv.root.file_id))
+ inv["fileid"].revision = "filerev"
+ inv["fileid"].executable = True
+ inv["fileid"].text_sha1 = "ffff"
+ inv["fileid"].text_size = 1
+ chk_bytes = self.get_chk_bytes()
+ chk_inv = CHKInventory.from_inventory(chk_bytes, inv)
+ bytes = ''.join(chk_inv.to_lines())
+ new_inv = CHKInventory.deserialise(chk_bytes, bytes, ("revid",))
+ root_entry = new_inv[inv.root.file_id]
+ self.assertEqual(None, root_entry._children)
+ self.assertEqual(['file'], root_entry.children.keys())
+ file_direct = new_inv["fileid"]
+ file_found = root_entry.children['file']
+ self.assertEqual(file_direct.kind, file_found.kind)
+ self.assertEqual(file_direct.file_id, file_found.file_id)
+ self.assertEqual(file_direct.parent_id, file_found.parent_id)
+ self.assertEqual(file_direct.name, file_found.name)
+ self.assertEqual(file_direct.revision, file_found.revision)
+ self.assertEqual(file_direct.text_sha1, file_found.text_sha1)
+ self.assertEqual(file_direct.text_size, file_found.text_size)
+ self.assertEqual(file_direct.executable, file_found.executable)
+
+ def test_from_inventory_maximum_size(self):
+ # from_inventory supports the maximum_size parameter.
+ inv = Inventory()
+ inv.revision_id = "revid"
+ inv.root.revision = "rootrev"
+ chk_bytes = self.get_chk_bytes()
+ chk_inv = CHKInventory.from_inventory(chk_bytes, inv, 120)
+ chk_inv.id_to_entry._ensure_root()
+ self.assertEqual(120, chk_inv.id_to_entry._root_node.maximum_size)
+ self.assertEqual(1, chk_inv.id_to_entry._root_node._key_width)
+ p_id_basename = chk_inv.parent_id_basename_to_file_id
+ p_id_basename._ensure_root()
+ self.assertEqual(120, p_id_basename._root_node.maximum_size)
+ self.assertEqual(2, p_id_basename._root_node._key_width)
+
+ def test___iter__(self):
+ inv = Inventory()
+ inv.revision_id = "revid"
+ inv.root.revision = "rootrev"
+ inv.add(InventoryFile("fileid", "file", inv.root.file_id))
+ inv["fileid"].revision = "filerev"
+ inv["fileid"].executable = True
+ inv["fileid"].text_sha1 = "ffff"
+ inv["fileid"].text_size = 1
+ chk_bytes = self.get_chk_bytes()
+ chk_inv = CHKInventory.from_inventory(chk_bytes, inv)
+ bytes = ''.join(chk_inv.to_lines())
+ new_inv = CHKInventory.deserialise(chk_bytes, bytes, ("revid",))
+ fileids = list(new_inv.__iter__())
+ fileids.sort()
+ self.assertEqual([inv.root.file_id, "fileid"], fileids)
+
+ def test__len__(self):
+ inv = Inventory()
+ inv.revision_id = "revid"
+ inv.root.revision = "rootrev"
+ inv.add(InventoryFile("fileid", "file", inv.root.file_id))
+ inv["fileid"].revision = "filerev"
+ inv["fileid"].executable = True
+ inv["fileid"].text_sha1 = "ffff"
+ inv["fileid"].text_size = 1
+ chk_bytes = self.get_chk_bytes()
+ chk_inv = CHKInventory.from_inventory(chk_bytes, inv)
+ self.assertEqual(2, len(chk_inv))
+
+ def test___getitem__(self):
+ inv = Inventory()
+ inv.revision_id = "revid"
+ inv.root.revision = "rootrev"
+ inv.add(InventoryFile("fileid", "file", inv.root.file_id))
+ inv["fileid"].revision = "filerev"
+ inv["fileid"].executable = True
+ inv["fileid"].text_sha1 = "ffff"
+ inv["fileid"].text_size = 1
+ chk_bytes = self.get_chk_bytes()
+ chk_inv = CHKInventory.from_inventory(chk_bytes, inv)
+ bytes = ''.join(chk_inv.to_lines())
+ new_inv = CHKInventory.deserialise(chk_bytes, bytes, ("revid",))
+ root_entry = new_inv[inv.root.file_id]
+ file_entry = new_inv["fileid"]
+ self.assertEqual("directory", root_entry.kind)
+ self.assertEqual(inv.root.file_id, root_entry.file_id)
+ self.assertEqual(inv.root.parent_id, root_entry.parent_id)
+ self.assertEqual(inv.root.name, root_entry.name)
+ self.assertEqual("rootrev", root_entry.revision)
+ self.assertEqual("file", file_entry.kind)
+ self.assertEqual("fileid", file_entry.file_id)
+ self.assertEqual(inv.root.file_id, file_entry.parent_id)
+ self.assertEqual("file", file_entry.name)
+ self.assertEqual("filerev", file_entry.revision)
+ self.assertEqual("ffff", file_entry.text_sha1)
+ self.assertEqual(1, file_entry.text_size)
+ self.assertEqual(True, file_entry.executable)
+ self.assertRaises(errors.NoSuchId, new_inv.__getitem__, 'missing')
+
+ def test_has_id_true(self):
+ inv = Inventory()
+ inv.revision_id = "revid"
+ inv.root.revision = "rootrev"
+ inv.add(InventoryFile("fileid", "file", inv.root.file_id))
+ inv["fileid"].revision = "filerev"
+ inv["fileid"].executable = True
+ inv["fileid"].text_sha1 = "ffff"
+ inv["fileid"].text_size = 1
+ chk_bytes = self.get_chk_bytes()
+ chk_inv = CHKInventory.from_inventory(chk_bytes, inv)
+ self.assertTrue(chk_inv.has_id('fileid'))
+ self.assertTrue(chk_inv.has_id(inv.root.file_id))
+
+ def test_has_id_not(self):
+ inv = Inventory()
+ inv.revision_id = "revid"
+ inv.root.revision = "rootrev"
+ chk_bytes = self.get_chk_bytes()
+ chk_inv = CHKInventory.from_inventory(chk_bytes, inv)
+ self.assertFalse(chk_inv.has_id('fileid'))
+
+ def test_id2path(self):
+ inv = Inventory()
+ inv.revision_id = "revid"
+ inv.root.revision = "rootrev"
+ direntry = InventoryDirectory("dirid", "dir", inv.root.file_id)
+ fileentry = InventoryFile("fileid", "file", "dirid")
+ inv.add(direntry)
+ inv.add(fileentry)
+ inv["fileid"].revision = "filerev"
+ inv["fileid"].executable = True
+ inv["fileid"].text_sha1 = "ffff"
+ inv["fileid"].text_size = 1
+ inv["dirid"].revision = "filerev"
+ chk_bytes = self.get_chk_bytes()
+ chk_inv = CHKInventory.from_inventory(chk_bytes, inv)
+ bytes = ''.join(chk_inv.to_lines())
+ new_inv = CHKInventory.deserialise(chk_bytes, bytes, ("revid",))
+ self.assertEqual('', new_inv.id2path(inv.root.file_id))
+ self.assertEqual('dir', new_inv.id2path('dirid'))
+ self.assertEqual('dir/file', new_inv.id2path('fileid'))
+
+ def test_path2id(self):
+ inv = Inventory()
+ inv.revision_id = "revid"
+ inv.root.revision = "rootrev"
+ direntry = InventoryDirectory("dirid", "dir", inv.root.file_id)
+ fileentry = InventoryFile("fileid", "file", "dirid")
+ inv.add(direntry)
+ inv.add(fileentry)
+ inv["fileid"].revision = "filerev"
+ inv["fileid"].executable = True
+ inv["fileid"].text_sha1 = "ffff"
+ inv["fileid"].text_size = 1
+ inv["dirid"].revision = "filerev"
+ chk_bytes = self.get_chk_bytes()
+ chk_inv = CHKInventory.from_inventory(chk_bytes, inv)
+ bytes = ''.join(chk_inv.to_lines())
+ new_inv = CHKInventory.deserialise(chk_bytes, bytes, ("revid",))
+ self.assertEqual(inv.root.file_id, new_inv.path2id(''))
+ self.assertEqual('dirid', new_inv.path2id('dir'))
+ self.assertEqual('fileid', new_inv.path2id('dir/file'))
+
+ def test_create_by_apply_delta_sets_root(self):
+ inv = Inventory()
+ inv.revision_id = "revid"
+ chk_bytes = self.get_chk_bytes()
+ base_inv = CHKInventory.from_inventory(chk_bytes, inv)
+ inv.add_path("", "directory", "myrootid", None)
+ inv.revision_id = "expectedid"
+ reference_inv = CHKInventory.from_inventory(chk_bytes, inv)
+ delta = [("", None, base_inv.root.file_id, None),
+ (None, "", "myrootid", inv.root)]
+ new_inv = base_inv.create_by_apply_delta(delta, "expectedid")
+ self.assertEquals(reference_inv.root, new_inv.root)
+
+ def test_create_by_apply_delta_empty_add_child(self):
+ inv = Inventory()
+ inv.revision_id = "revid"
+ inv.root.revision = "rootrev"
+ chk_bytes = self.get_chk_bytes()
+ base_inv = CHKInventory.from_inventory(chk_bytes, inv)
+ a_entry = InventoryFile("A-id", "A", inv.root.file_id)
+ a_entry.revision = "filerev"
+ a_entry.executable = True
+ a_entry.text_sha1 = "ffff"
+ a_entry.text_size = 1
+ inv.add(a_entry)
+ inv.revision_id = "expectedid"
+ reference_inv = CHKInventory.from_inventory(chk_bytes, inv)
+ delta = [(None, "A", "A-id", a_entry)]
+ new_inv = base_inv.create_by_apply_delta(delta, "expectedid")
+ # new_inv should be the same as reference_inv.
+ self.assertEqual(reference_inv.revision_id, new_inv.revision_id)
+ self.assertEqual(reference_inv.root_id, new_inv.root_id)
+ reference_inv.id_to_entry._ensure_root()
+ new_inv.id_to_entry._ensure_root()
+ self.assertEqual(reference_inv.id_to_entry._root_node._key,
+ new_inv.id_to_entry._root_node._key)
+
+ def test_create_by_apply_delta_empty_add_child_updates_parent_id(self):
+ inv = Inventory()
+ inv.revision_id = "revid"
+ inv.root.revision = "rootrev"
+ chk_bytes = self.get_chk_bytes()
+ base_inv = CHKInventory.from_inventory(chk_bytes, inv)
+ a_entry = InventoryFile("A-id", "A", inv.root.file_id)
+ a_entry.revision = "filerev"
+ a_entry.executable = True
+ a_entry.text_sha1 = "ffff"
+ a_entry.text_size = 1
+ inv.add(a_entry)
+ inv.revision_id = "expectedid"
+ reference_inv = CHKInventory.from_inventory(chk_bytes, inv)
+ delta = [(None, "A", "A-id", a_entry)]
+ new_inv = base_inv.create_by_apply_delta(delta, "expectedid")
+ reference_inv.id_to_entry._ensure_root()
+ reference_inv.parent_id_basename_to_file_id._ensure_root()
+ new_inv.id_to_entry._ensure_root()
+ new_inv.parent_id_basename_to_file_id._ensure_root()
+ # new_inv should be the same as reference_inv.
+ self.assertEqual(reference_inv.revision_id, new_inv.revision_id)
+ self.assertEqual(reference_inv.root_id, new_inv.root_id)
+ self.assertEqual(reference_inv.id_to_entry._root_node._key,
+ new_inv.id_to_entry._root_node._key)
+ self.assertEqual(reference_inv.parent_id_basename_to_file_id._root_node._key,
+ new_inv.parent_id_basename_to_file_id._root_node._key)
+
+ def test_iter_changes(self):
+ # Low level bootstrapping smoke test; comprehensive generic tests via
+ # InterTree are coming.
+ inv = Inventory()
+ inv.revision_id = "revid"
+ inv.root.revision = "rootrev"
+ inv.add(InventoryFile("fileid", "file", inv.root.file_id))
+ inv["fileid"].revision = "filerev"
+ inv["fileid"].executable = True
+ inv["fileid"].text_sha1 = "ffff"
+ inv["fileid"].text_size = 1
+ inv2 = Inventory()
+ inv2.revision_id = "revid2"
+ inv2.root.revision = "rootrev"
+ inv2.add(InventoryFile("fileid", "file", inv.root.file_id))
+ inv2["fileid"].revision = "filerev2"
+ inv2["fileid"].executable = False
+ inv2["fileid"].text_sha1 = "bbbb"
+ inv2["fileid"].text_size = 2
+ # get fresh objects.
+ chk_bytes = self.get_chk_bytes()
+ chk_inv = CHKInventory.from_inventory(chk_bytes, inv)
+ bytes = ''.join(chk_inv.to_lines())
+ inv_1 = CHKInventory.deserialise(chk_bytes, bytes, ("revid",))
+ chk_inv2 = CHKInventory.from_inventory(chk_bytes, inv2)
+ bytes = ''.join(chk_inv2.to_lines())
+ inv_2 = CHKInventory.deserialise(chk_bytes, bytes, ("revid2",))
+ self.assertEqual([('fileid', (u'file', u'file'), True, (True, True),
+ ('TREE_ROOT', 'TREE_ROOT'), (u'file', u'file'), ('file', 'file'),
+ (False, True))],
+ list(inv_1.iter_changes(inv_2)))
+
+ def test_parent_id_basename_to_file_id_index_enabled(self):
+ inv = Inventory()
+ inv.revision_id = "revid"
+ inv.root.revision = "rootrev"
+ inv.add(InventoryFile("fileid", "file", inv.root.file_id))
+ inv["fileid"].revision = "filerev"
+ inv["fileid"].executable = True
+ inv["fileid"].text_sha1 = "ffff"
+ inv["fileid"].text_size = 1
+ # get fresh objects.
+ chk_bytes = self.get_chk_bytes()
+ tmp_inv = CHKInventory.from_inventory(chk_bytes, inv)
+ bytes = ''.join(tmp_inv.to_lines())
+ chk_inv = CHKInventory.deserialise(chk_bytes, bytes, ("revid",))
+ self.assertIsInstance(chk_inv.parent_id_basename_to_file_id, chk_map.CHKMap)
+ self.assertEqual(
+ {('', ''): 'TREE_ROOT', ('TREE_ROOT', 'file'): 'fileid'},
+ dict(chk_inv.parent_id_basename_to_file_id.iteritems()))
+
+ def test_file_entry_to_bytes(self):
+ inv = CHKInventory(None)
+ ie = inventory.InventoryFile('file-id', 'filename', 'parent-id')
+ ie.executable = True
+ ie.revision = 'file-rev-id'
+ ie.text_sha1 = 'abcdefgh'
+ ie.text_size = 100
+ bytes = inv._entry_to_bytes(ie)
+ self.assertEqual('file: file-id\nparent-id\nfilename\n'
+ 'file-rev-id\nabcdefgh\n100\nY', bytes)
+ ie2 = inv._bytes_to_entry(bytes)
+ self.assertEqual(ie, ie2)
+ self.assertIsInstance(ie2.name, unicode)
+ self.assertEqual(('filename', 'file-id', 'file-rev-id'),
+ inv._bytes_to_utf8name_key(bytes))
+
+ def test_file2_entry_to_bytes(self):
+ inv = CHKInventory(None)
+ # \u30a9 == 'omega'
+ ie = inventory.InventoryFile('file-id', u'\u03a9name', 'parent-id')
+ ie.executable = False
+ ie.revision = 'file-rev-id'
+ ie.text_sha1 = '123456'
+ ie.text_size = 25
+ bytes = inv._entry_to_bytes(ie)
+ self.assertEqual('file: file-id\nparent-id\n\xce\xa9name\n'
+ 'file-rev-id\n123456\n25\nN', bytes)
+ ie2 = inv._bytes_to_entry(bytes)
+ self.assertEqual(ie, ie2)
+ self.assertIsInstance(ie2.name, unicode)
+ self.assertEqual(('\xce\xa9name', 'file-id', 'file-rev-id'),
+ inv._bytes_to_utf8name_key(bytes))
+
+ def test_dir_entry_to_bytes(self):
+ inv = CHKInventory(None)
+ ie = inventory.InventoryDirectory('dir-id', 'dirname', 'parent-id')
+ ie.revision = 'dir-rev-id'
+ bytes = inv._entry_to_bytes(ie)
+ self.assertEqual('dir: dir-id\nparent-id\ndirname\ndir-rev-id', bytes)
+ ie2 = inv._bytes_to_entry(bytes)
+ self.assertEqual(ie, ie2)
+ self.assertIsInstance(ie2.name, unicode)
+ self.assertEqual(('dirname', 'dir-id', 'dir-rev-id'),
+ inv._bytes_to_utf8name_key(bytes))
+
+ def test_dir2_entry_to_bytes(self):
+ inv = CHKInventory(None)
+ ie = inventory.InventoryDirectory('dir-id', u'dir\u03a9name',
+ None)
+ ie.revision = 'dir-rev-id'
+ bytes = inv._entry_to_bytes(ie)
+ self.assertEqual('dir: dir-id\n\ndir\xce\xa9name\n'
+ 'dir-rev-id', bytes)
+ ie2 = inv._bytes_to_entry(bytes)
+ self.assertEqual(ie, ie2)
+ self.assertIsInstance(ie2.name, unicode)
+ self.assertIs(ie2.parent_id, None)
+ self.assertEqual(('dir\xce\xa9name', 'dir-id', 'dir-rev-id'),
+ inv._bytes_to_utf8name_key(bytes))
+
+ def test_symlink_entry_to_bytes(self):
+ inv = CHKInventory(None)
+ ie = inventory.InventoryLink('link-id', 'linkname', 'parent-id')
+ ie.revision = 'link-rev-id'
+ ie.symlink_target = u'target/path'
+ bytes = inv._entry_to_bytes(ie)
+ self.assertEqual('symlink: link-id\nparent-id\nlinkname\n'
+ 'link-rev-id\ntarget/path', bytes)
+ ie2 = inv._bytes_to_entry(bytes)
+ self.assertEqual(ie, ie2)
+ self.assertIsInstance(ie2.name, unicode)
+ self.assertIsInstance(ie2.symlink_target, unicode)
+ self.assertEqual(('linkname', 'link-id', 'link-rev-id'),
+ inv._bytes_to_utf8name_key(bytes))
+
+ def test_symlink2_entry_to_bytes(self):
+ inv = CHKInventory(None)
+ ie = inventory.InventoryLink('link-id', u'link\u03a9name', 'parent-id')
+ ie.revision = 'link-rev-id'
+ ie.symlink_target = u'target/\u03a9path'
+ bytes = inv._entry_to_bytes(ie)
+ self.assertEqual('symlink: link-id\nparent-id\nlink\xce\xa9name\n'
+ 'link-rev-id\ntarget/\xce\xa9path', bytes)
+ ie2 = inv._bytes_to_entry(bytes)
+ self.assertEqual(ie, ie2)
+ self.assertIsInstance(ie2.name, unicode)
+ self.assertIsInstance(ie2.symlink_target, unicode)
+ self.assertEqual(('link\xce\xa9name', 'link-id', 'link-rev-id'),
+ inv._bytes_to_utf8name_key(bytes))
+
+ def test_tree_reference_entry_to_bytes(self):
+ inv = CHKInventory(None)
+ ie = inventory.TreeReference('tree-root-id', u'tree\u03a9name',
+ 'parent-id')
+ ie.revision = 'tree-rev-id'
+ ie.reference_revision = 'ref-rev-id'
+ bytes = inv._entry_to_bytes(ie)
+ self.assertEqual('tree: tree-root-id\nparent-id\ntree\xce\xa9name\n'
+ 'tree-rev-id\nref-rev-id', bytes)
+ ie2 = inv._bytes_to_entry(bytes)
+ self.assertEqual(ie, ie2)
+ self.assertIsInstance(ie2.name, unicode)
+ self.assertEqual(('tree\xce\xa9name', 'tree-root-id', 'tree-rev-id'),
+ inv._bytes_to_utf8name_key(bytes))
+
+ def make_basic_utf8_inventory(self):
+ inv = Inventory()
+ inv.revision_id = "revid"
+ inv.root.revision = "rootrev"
+ root_id = inv.root.file_id
+ inv.add(InventoryFile("fileid", u'f\xefle', root_id))
+ inv["fileid"].revision = "filerev"
+ inv["fileid"].text_sha1 = "ffff"
+ inv["fileid"].text_size = 0
+ inv.add(InventoryDirectory("dirid", u'dir-\N{EURO SIGN}', root_id))
+ inv.add(InventoryFile("childid", u'ch\xefld', "dirid"))
+ inv["childid"].revision = "filerev"
+ inv["childid"].text_sha1 = "ffff"
+ inv["childid"].text_size = 0
+ chk_bytes = self.get_chk_bytes()
+ chk_inv = CHKInventory.from_inventory(chk_bytes, inv)
+ bytes = ''.join(chk_inv.to_lines())
+ return CHKInventory.deserialise(chk_bytes, bytes, ("revid",))
+
+ def test__preload_handles_utf8(self):
+ new_inv = self.make_basic_utf8_inventory()
+ self.assertEqual({}, new_inv._fileid_to_entry_cache)
+ self.assertFalse(new_inv._fully_cached)
+ new_inv._preload_cache()
+ self.assertEqual(
+ sorted([new_inv.root_id, "fileid", "dirid", "childid"]),
+ sorted(new_inv._fileid_to_entry_cache.keys()))
+ ie_root = new_inv._fileid_to_entry_cache[new_inv.root_id]
+ self.assertEqual([u'dir-\N{EURO SIGN}', u'f\xefle'],
+ sorted(ie_root._children.keys()))
+ ie_dir = new_inv._fileid_to_entry_cache['dirid']
+ self.assertEqual([u'ch\xefld'], sorted(ie_dir._children.keys()))
+
+ def test__preload_populates_cache(self):
+ inv = Inventory()
+ inv.revision_id = "revid"
+ inv.root.revision = "rootrev"
+ root_id = inv.root.file_id
+ inv.add(InventoryFile("fileid", "file", root_id))
+ inv["fileid"].revision = "filerev"
+ inv["fileid"].executable = True
+ inv["fileid"].text_sha1 = "ffff"
+ inv["fileid"].text_size = 1
+ inv.add(InventoryDirectory("dirid", "dir", root_id))
+ inv.add(InventoryFile("childid", "child", "dirid"))
+ inv["childid"].revision = "filerev"
+ inv["childid"].executable = False
+ inv["childid"].text_sha1 = "dddd"
+ inv["childid"].text_size = 1
+ chk_bytes = self.get_chk_bytes()
+ chk_inv = CHKInventory.from_inventory(chk_bytes, inv)
+ bytes = ''.join(chk_inv.to_lines())
+ new_inv = CHKInventory.deserialise(chk_bytes, bytes, ("revid",))
+ self.assertEqual({}, new_inv._fileid_to_entry_cache)
+ self.assertFalse(new_inv._fully_cached)
+ new_inv._preload_cache()
+ self.assertEqual(
+ sorted([root_id, "fileid", "dirid", "childid"]),
+ sorted(new_inv._fileid_to_entry_cache.keys()))
+ self.assertTrue(new_inv._fully_cached)
+ ie_root = new_inv._fileid_to_entry_cache[root_id]
+ self.assertEqual(['dir', 'file'], sorted(ie_root._children.keys()))
+ ie_dir = new_inv._fileid_to_entry_cache['dirid']
+ self.assertEqual(['child'], sorted(ie_dir._children.keys()))
+
+ def test__preload_handles_partially_evaluated_inventory(self):
+ new_inv = self.make_basic_utf8_inventory()
+ ie = new_inv[new_inv.root_id]
+ self.assertIs(None, ie._children)
+ self.assertEqual([u'dir-\N{EURO SIGN}', u'f\xefle'],
+ sorted(ie.children.keys()))
+ # Accessing .children loads _children
+ self.assertEqual([u'dir-\N{EURO SIGN}', u'f\xefle'],
+ sorted(ie._children.keys()))
+ new_inv._preload_cache()
+ # No change
+ self.assertEqual([u'dir-\N{EURO SIGN}', u'f\xefle'],
+ sorted(ie._children.keys()))
+ ie_dir = new_inv["dirid"]
+ self.assertEqual([u'ch\xefld'],
+ sorted(ie_dir._children.keys()))
+
+ def test_filter_change_in_renamed_subfolder(self):
+ inv = Inventory('tree-root')
+ src_ie = inv.add_path('src', 'directory', 'src-id')
+ inv.add_path('src/sub/', 'directory', 'sub-id')
+ a_ie = inv.add_path('src/sub/a', 'file', 'a-id')
+ a_ie.text_sha1 = osutils.sha_string('content\n')
+ a_ie.text_size = len('content\n')
+ chk_bytes = self.get_chk_bytes()
+ inv = CHKInventory.from_inventory(chk_bytes, inv)
+ inv = inv.create_by_apply_delta([
+ ("src/sub/a", "src/sub/a", "a-id", a_ie),
+ ("src", "src2", "src-id", src_ie),
+ ], 'new-rev-2')
+ new_inv = inv.filter(['a-id', 'src-id'])
+ self.assertEqual([
+ ('', 'tree-root'),
+ ('src', 'src-id'),
+ ('src/sub', 'sub-id'),
+ ('src/sub/a', 'a-id'),
+ ], [(path, ie.file_id) for path, ie in new_inv.iter_entries()])
+
+class TestCHKInventoryExpand(tests.TestCaseWithMemoryTransport):
+
+ def get_chk_bytes(self):
+ factory = groupcompress.make_pack_factory(True, True, 1)
+ trans = self.get_transport('')
+ return factory(trans)
+
+ def make_dir(self, inv, name, parent_id):
+ inv.add(inv.make_entry('directory', name, parent_id, name + '-id'))
+
+ def make_file(self, inv, name, parent_id, content='content\n'):
+ ie = inv.make_entry('file', name, parent_id, name + '-id')
+ ie.text_sha1 = osutils.sha_string(content)
+ ie.text_size = len(content)
+ inv.add(ie)
+
+ def make_simple_inventory(self):
+ inv = Inventory('TREE_ROOT')
+ inv.revision_id = "revid"
+ inv.root.revision = "rootrev"
+ # / TREE_ROOT
+ # dir1/ dir1-id
+ # sub-file1 sub-file1-id
+ # sub-file2 sub-file2-id
+ # sub-dir1/ sub-dir1-id
+ # subsub-file1 subsub-file1-id
+ # dir2/ dir2-id
+ # sub2-file1 sub2-file1-id
+ # top top-id
+ self.make_dir(inv, 'dir1', 'TREE_ROOT')
+ self.make_dir(inv, 'dir2', 'TREE_ROOT')
+ self.make_dir(inv, 'sub-dir1', 'dir1-id')
+ self.make_file(inv, 'top', 'TREE_ROOT')
+ self.make_file(inv, 'sub-file1', 'dir1-id')
+ self.make_file(inv, 'sub-file2', 'dir1-id')
+ self.make_file(inv, 'subsub-file1', 'sub-dir1-id')
+ self.make_file(inv, 'sub2-file1', 'dir2-id')
+ chk_bytes = self.get_chk_bytes()
+ # use a small maximum_size to force internal paging structures
+ chk_inv = CHKInventory.from_inventory(chk_bytes, inv,
+ maximum_size=100,
+ search_key_name='hash-255-way')
+ bytes = ''.join(chk_inv.to_lines())
+ return CHKInventory.deserialise(chk_bytes, bytes, ("revid",))
+
+ def assert_Getitems(self, expected_fileids, inv, file_ids):
+ self.assertEqual(sorted(expected_fileids),
+ sorted([ie.file_id for ie in inv._getitems(file_ids)]))
+
+ def assertExpand(self, all_ids, inv, file_ids):
+ (val_all_ids,
+ val_children) = inv._expand_fileids_to_parents_and_children(file_ids)
+ self.assertEqual(set(all_ids), val_all_ids)
+ entries = inv._getitems(val_all_ids)
+ expected_children = {}
+ for entry in entries:
+ s = expected_children.setdefault(entry.parent_id, [])
+ s.append(entry.file_id)
+ val_children = dict((k, sorted(v)) for k, v
+ in val_children.iteritems())
+ expected_children = dict((k, sorted(v)) for k, v
+ in expected_children.iteritems())
+ self.assertEqual(expected_children, val_children)
+
+ def test_make_simple_inventory(self):
+ inv = self.make_simple_inventory()
+ layout = []
+ for path, entry in inv.iter_entries_by_dir():
+ layout.append((path, entry.file_id))
+ self.assertEqual([
+ ('', 'TREE_ROOT'),
+ ('dir1', 'dir1-id'),
+ ('dir2', 'dir2-id'),
+ ('top', 'top-id'),
+ ('dir1/sub-dir1', 'sub-dir1-id'),
+ ('dir1/sub-file1', 'sub-file1-id'),
+ ('dir1/sub-file2', 'sub-file2-id'),
+ ('dir1/sub-dir1/subsub-file1', 'subsub-file1-id'),
+ ('dir2/sub2-file1', 'sub2-file1-id'),
+ ], layout)
+
+ def test__getitems(self):
+ inv = self.make_simple_inventory()
+ # Reading from disk
+ self.assert_Getitems(['dir1-id'], inv, ['dir1-id'])
+ self.assertTrue('dir1-id' in inv._fileid_to_entry_cache)
+ self.assertFalse('sub-file2-id' in inv._fileid_to_entry_cache)
+ # From cache
+ self.assert_Getitems(['dir1-id'], inv, ['dir1-id'])
+ # Mixed
+ self.assert_Getitems(['dir1-id', 'sub-file2-id'], inv,
+ ['dir1-id', 'sub-file2-id'])
+ self.assertTrue('dir1-id' in inv._fileid_to_entry_cache)
+ self.assertTrue('sub-file2-id' in inv._fileid_to_entry_cache)
+
+ def test_single_file(self):
+ inv = self.make_simple_inventory()
+ self.assertExpand(['TREE_ROOT', 'top-id'], inv, ['top-id'])
+
+ def test_get_all_parents(self):
+ inv = self.make_simple_inventory()
+ self.assertExpand(['TREE_ROOT', 'dir1-id', 'sub-dir1-id',
+ 'subsub-file1-id',
+ ], inv, ['subsub-file1-id'])
+
+ def test_get_children(self):
+ inv = self.make_simple_inventory()
+ self.assertExpand(['TREE_ROOT', 'dir1-id', 'sub-dir1-id',
+ 'sub-file1-id', 'sub-file2-id', 'subsub-file1-id',
+ ], inv, ['dir1-id'])
+
+ def test_from_root(self):
+ inv = self.make_simple_inventory()
+ self.assertExpand(['TREE_ROOT', 'dir1-id', 'dir2-id', 'sub-dir1-id',
+ 'sub-file1-id', 'sub-file2-id', 'sub2-file1-id',
+ 'subsub-file1-id', 'top-id'], inv, ['TREE_ROOT'])
+
+ def test_top_level_file(self):
+ inv = self.make_simple_inventory()
+ self.assertExpand(['TREE_ROOT', 'top-id'], inv, ['top-id'])
+
+ def test_subsub_file(self):
+ inv = self.make_simple_inventory()
+ self.assertExpand(['TREE_ROOT', 'dir1-id', 'sub-dir1-id',
+ 'subsub-file1-id'], inv, ['subsub-file1-id'])
+
+ def test_sub_and_root(self):
+ inv = self.make_simple_inventory()
+ self.assertExpand(['TREE_ROOT', 'dir1-id', 'sub-dir1-id', 'top-id',
+ 'subsub-file1-id'], inv, ['top-id', 'subsub-file1-id'])
+
+
+class TestMutableInventoryFromTree(TestCaseWithTransport):
+
+ def test_empty(self):
+ repository = self.make_repository('.')
+ tree = repository.revision_tree(revision.NULL_REVISION)
+ inv = mutable_inventory_from_tree(tree)
+ self.assertEquals(revision.NULL_REVISION, inv.revision_id)
+ self.assertEquals(0, len(inv))
+
+ def test_some_files(self):
+ wt = self.make_branch_and_tree('.')
+ self.build_tree(['a'])
+ wt.add(['a'], ['thefileid'])
+ revid = wt.commit("commit")
+ tree = wt.branch.repository.revision_tree(revid)
+ inv = mutable_inventory_from_tree(tree)
+ self.assertEquals(revid, inv.revision_id)
+ self.assertEquals(2, len(inv))
+ self.assertEquals("a", inv['thefileid'].name)
+ # The inventory should be mutable and independent of
+ # the original tree
+ self.assertFalse(tree.root_inventory['thefileid'].executable)
+ inv['thefileid'].executable = True
+ self.assertFalse(tree.root_inventory['thefileid'].executable)
diff --git a/bzrlib/tests/test_inventory_delta.py b/bzrlib/tests/test_inventory_delta.py
new file mode 100644
index 0000000..b44e493
--- /dev/null
+++ b/bzrlib/tests/test_inventory_delta.py
@@ -0,0 +1,622 @@
+# Copyright (C) 2009, 2010, 2011 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""Tests for bzrlib.inventory_delta.
+
+See doc/developer/inventory.txt for more information.
+"""
+
+from cStringIO import StringIO
+
+from bzrlib import (
+ inventory,
+ inventory_delta,
+ )
+from bzrlib.inventory_delta import InventoryDeltaError
+from bzrlib.inventory import Inventory
+from bzrlib.revision import NULL_REVISION
+from bzrlib.tests import TestCase
+
+### DO NOT REFLOW THESE TEXTS. NEW LINES ARE SIGNIFICANT. ###
+empty_lines = """format: bzr inventory delta v1 (bzr 1.14)
+parent: null:
+version: null:
+versioned_root: true
+tree_references: true
+"""
+
+root_only_lines = """format: bzr inventory delta v1 (bzr 1.14)
+parent: null:
+version: entry-version
+versioned_root: true
+tree_references: true
+None\x00/\x00an-id\x00\x00a@e\xc3\xa5ample.com--2004\x00dir
+"""
+
+
+root_change_lines = """format: bzr inventory delta v1 (bzr 1.14)
+parent: entry-version
+version: changed-root
+versioned_root: true
+tree_references: true
+/\x00an-id\x00\x00different-version\x00dir
+"""
+
+corrupt_parent_lines = """format: bzr inventory delta v1 (bzr 1.14)
+parent: entry-version
+version: changed-root
+versioned_root: false
+tree_references: false
+/\x00an-id\x00\x00different-version\x00dir
+"""
+
+root_only_unversioned = """format: bzr inventory delta v1 (bzr 1.14)
+parent: null:
+version: entry-version
+versioned_root: false
+tree_references: false
+None\x00/\x00TREE_ROOT\x00\x00entry-version\x00dir
+"""
+
+reference_lines = """format: bzr inventory delta v1 (bzr 1.14)
+parent: null:
+version: entry-version
+versioned_root: true
+tree_references: true
+None\x00/\x00TREE_ROOT\x00\x00a@e\xc3\xa5ample.com--2004\x00dir
+None\x00/foo\x00id\x00TREE_ROOT\x00changed\x00tree\x00subtree-version
+"""
+
+change_tree_lines = """format: bzr inventory delta v1 (bzr 1.14)
+parent: entry-version
+version: change-tree
+versioned_root: false
+tree_references: false
+/foo\x00id\x00TREE_ROOT\x00changed-twice\x00tree\x00subtree-version2
+"""
+
+
+class TestDeserialization(TestCase):
+ """Test InventoryDeltaSerializer.parse_text_bytes."""
+
+ def test_parse_no_bytes(self):
+ deserializer = inventory_delta.InventoryDeltaDeserializer()
+ err = self.assertRaises(
+ InventoryDeltaError, deserializer.parse_text_bytes, '')
+ self.assertContainsRe(str(err), 'last line not empty')
+
+ def test_parse_bad_format(self):
+ deserializer = inventory_delta.InventoryDeltaDeserializer()
+ err = self.assertRaises(InventoryDeltaError,
+ deserializer.parse_text_bytes, 'format: foo\n')
+ self.assertContainsRe(str(err), 'unknown format')
+
+ def test_parse_no_parent(self):
+ deserializer = inventory_delta.InventoryDeltaDeserializer()
+ err = self.assertRaises(InventoryDeltaError,
+ deserializer.parse_text_bytes,
+ 'format: bzr inventory delta v1 (bzr 1.14)\n')
+ self.assertContainsRe(str(err), 'missing parent: marker')
+
+ def test_parse_no_version(self):
+ deserializer = inventory_delta.InventoryDeltaDeserializer()
+ err = self.assertRaises(InventoryDeltaError,
+ deserializer.parse_text_bytes,
+ 'format: bzr inventory delta v1 (bzr 1.14)\n'
+ 'parent: null:\n')
+ self.assertContainsRe(str(err), 'missing version: marker')
+
+ def test_parse_duplicate_key_errors(self):
+ deserializer = inventory_delta.InventoryDeltaDeserializer()
+ double_root_lines = \
+"""format: bzr inventory delta v1 (bzr 1.14)
+parent: null:
+version: null:
+versioned_root: true
+tree_references: true
+None\x00/\x00an-id\x00\x00a@e\xc3\xa5ample.com--2004\x00dir\x00\x00
+None\x00/\x00an-id\x00\x00a@e\xc3\xa5ample.com--2004\x00dir\x00\x00
+"""
+ err = self.assertRaises(InventoryDeltaError,
+ deserializer.parse_text_bytes, double_root_lines)
+ self.assertContainsRe(str(err), 'duplicate file id')
+
+ def test_parse_versioned_root_only(self):
+ deserializer = inventory_delta.InventoryDeltaDeserializer()
+ parse_result = deserializer.parse_text_bytes(root_only_lines)
+ expected_entry = inventory.make_entry(
+ 'directory', u'', None, 'an-id')
+ expected_entry.revision = 'a@e\xc3\xa5ample.com--2004'
+ self.assertEqual(
+ ('null:', 'entry-version', True, True,
+ [(None, '', 'an-id', expected_entry)]),
+ parse_result)
+
+ def test_parse_special_revid_not_valid_last_mod(self):
+ deserializer = inventory_delta.InventoryDeltaDeserializer()
+ root_only_lines = """format: bzr inventory delta v1 (bzr 1.14)
+parent: null:
+version: null:
+versioned_root: false
+tree_references: true
+None\x00/\x00TREE_ROOT\x00\x00null:\x00dir\x00\x00
+"""
+ err = self.assertRaises(InventoryDeltaError,
+ deserializer.parse_text_bytes, root_only_lines)
+ self.assertContainsRe(str(err), 'special revisionid found')
+
+ def test_parse_versioned_root_versioned_disabled(self):
+ deserializer = inventory_delta.InventoryDeltaDeserializer()
+ root_only_lines = """format: bzr inventory delta v1 (bzr 1.14)
+parent: null:
+version: null:
+versioned_root: false
+tree_references: true
+None\x00/\x00TREE_ROOT\x00\x00a@e\xc3\xa5ample.com--2004\x00dir\x00\x00
+"""
+ err = self.assertRaises(InventoryDeltaError,
+ deserializer.parse_text_bytes, root_only_lines)
+ self.assertContainsRe(str(err), 'Versioned root found')
+
+ def test_parse_unique_root_id_root_versioned_disabled(self):
+ deserializer = inventory_delta.InventoryDeltaDeserializer()
+ root_only_lines = """format: bzr inventory delta v1 (bzr 1.14)
+parent: parent-id
+version: a@e\xc3\xa5ample.com--2004
+versioned_root: false
+tree_references: true
+None\x00/\x00an-id\x00\x00parent-id\x00dir\x00\x00
+"""
+ err = self.assertRaises(InventoryDeltaError,
+ deserializer.parse_text_bytes, root_only_lines)
+ self.assertContainsRe(str(err), 'Versioned root found')
+
+ def test_parse_unversioned_root_versioning_enabled(self):
+ deserializer = inventory_delta.InventoryDeltaDeserializer()
+ parse_result = deserializer.parse_text_bytes(root_only_unversioned)
+ expected_entry = inventory.make_entry(
+ 'directory', u'', None, 'TREE_ROOT')
+ expected_entry.revision = 'entry-version'
+ self.assertEqual(
+ ('null:', 'entry-version', False, False,
+ [(None, u'', 'TREE_ROOT', expected_entry)]),
+ parse_result)
+
+ def test_parse_versioned_root_when_disabled(self):
+ deserializer = inventory_delta.InventoryDeltaDeserializer(
+ allow_versioned_root=False)
+ err = self.assertRaises(inventory_delta.IncompatibleInventoryDelta,
+ deserializer.parse_text_bytes, root_only_lines)
+ self.assertEquals("versioned_root not allowed", str(err))
+
+ def test_parse_tree_when_disabled(self):
+ deserializer = inventory_delta.InventoryDeltaDeserializer(
+ allow_tree_references=False)
+ err = self.assertRaises(inventory_delta.IncompatibleInventoryDelta,
+ deserializer.parse_text_bytes, reference_lines)
+ self.assertEquals("Tree reference not allowed", str(err))
+
+ def test_parse_tree_when_header_disallows(self):
+ # A deserializer that allows tree_references to be set or unset.
+ deserializer = inventory_delta.InventoryDeltaDeserializer()
+ # A serialised inventory delta with a header saying no tree refs, but
+ # that has a tree ref in its content.
+ lines = """format: bzr inventory delta v1 (bzr 1.14)
+parent: null:
+version: entry-version
+versioned_root: false
+tree_references: false
+None\x00/foo\x00id\x00TREE_ROOT\x00changed\x00tree\x00subtree-version
+"""
+ err = self.assertRaises(InventoryDeltaError,
+ deserializer.parse_text_bytes, lines)
+ self.assertContainsRe(str(err), 'Tree reference found')
+
+ def test_parse_versioned_root_when_header_disallows(self):
+ # A deserializer that allows tree_references to be set or unset.
+ deserializer = inventory_delta.InventoryDeltaDeserializer()
+ # A serialised inventory delta with a header saying no tree refs, but
+ # that has a tree ref in its content.
+ lines = """format: bzr inventory delta v1 (bzr 1.14)
+parent: null:
+version: entry-version
+versioned_root: false
+tree_references: false
+None\x00/\x00TREE_ROOT\x00\x00a@e\xc3\xa5ample.com--2004\x00dir
+"""
+ err = self.assertRaises(InventoryDeltaError,
+ deserializer.parse_text_bytes, lines)
+ self.assertContainsRe(str(err), 'Versioned root found')
+
+ def test_parse_last_line_not_empty(self):
+ """newpath must start with / if it is not None."""
+ # Trim the trailing newline from a valid serialization
+ lines = root_only_lines[:-1]
+ deserializer = inventory_delta.InventoryDeltaDeserializer()
+ err = self.assertRaises(InventoryDeltaError,
+ deserializer.parse_text_bytes, lines)
+ self.assertContainsRe(str(err), 'last line not empty')
+
+ def test_parse_invalid_newpath(self):
+ """newpath must start with / if it is not None."""
+ lines = empty_lines
+ lines += "None\x00bad\x00TREE_ROOT\x00\x00version\x00dir\n"
+ deserializer = inventory_delta.InventoryDeltaDeserializer()
+ err = self.assertRaises(InventoryDeltaError,
+ deserializer.parse_text_bytes, lines)
+ self.assertContainsRe(str(err), 'newpath invalid')
+
+ def test_parse_invalid_oldpath(self):
+ """oldpath must start with / if it is not None."""
+ lines = root_only_lines
+ lines += "bad\x00/new\x00file-id\x00\x00version\x00dir\n"
+ deserializer = inventory_delta.InventoryDeltaDeserializer()
+ err = self.assertRaises(InventoryDeltaError,
+ deserializer.parse_text_bytes, lines)
+ self.assertContainsRe(str(err), 'oldpath invalid')
+
+ def test_parse_new_file(self):
+ """a new file is parsed correctly"""
+ lines = root_only_lines
+ fake_sha = "deadbeef" * 5
+ lines += (
+ "None\x00/new\x00file-id\x00an-id\x00version\x00file\x00123\x00" +
+ "\x00" + fake_sha + "\n")
+ deserializer = inventory_delta.InventoryDeltaDeserializer()
+ parse_result = deserializer.parse_text_bytes(lines)
+ expected_entry = inventory.make_entry(
+ 'file', u'new', 'an-id', 'file-id')
+ expected_entry.revision = 'version'
+ expected_entry.text_size = 123
+ expected_entry.text_sha1 = fake_sha
+ delta = parse_result[4]
+ self.assertEqual(
+ (None, u'new', 'file-id', expected_entry), delta[-1])
+
+ def test_parse_delete(self):
+ lines = root_only_lines
+ lines += (
+ "/old-file\x00None\x00deleted-id\x00\x00null:\x00deleted\x00\x00\n")
+ deserializer = inventory_delta.InventoryDeltaDeserializer()
+ parse_result = deserializer.parse_text_bytes(lines)
+ delta = parse_result[4]
+ self.assertEqual(
+ (u'old-file', None, 'deleted-id', None), delta[-1])
+
+
+class TestSerialization(TestCase):
+ """Tests for InventoryDeltaSerializer.delta_to_lines."""
+
+ def test_empty_delta_to_lines(self):
+ old_inv = Inventory(None)
+ new_inv = Inventory(None)
+ delta = new_inv._make_delta(old_inv)
+ serializer = inventory_delta.InventoryDeltaSerializer(
+ versioned_root=True, tree_references=True)
+ self.assertEqual(StringIO(empty_lines).readlines(),
+ serializer.delta_to_lines(NULL_REVISION, NULL_REVISION, delta))
+
+ def test_root_only_to_lines(self):
+ old_inv = Inventory(None)
+ new_inv = Inventory(None)
+ root = new_inv.make_entry('directory', '', None, 'an-id')
+ root.revision = 'a@e\xc3\xa5ample.com--2004'
+ new_inv.add(root)
+ delta = new_inv._make_delta(old_inv)
+ serializer = inventory_delta.InventoryDeltaSerializer(
+ versioned_root=True, tree_references=True)
+ self.assertEqual(StringIO(root_only_lines).readlines(),
+ serializer.delta_to_lines(NULL_REVISION, 'entry-version', delta))
+
+ def test_unversioned_root(self):
+ old_inv = Inventory(None)
+ new_inv = Inventory(None)
+ root = new_inv.make_entry('directory', '', None, 'TREE_ROOT')
+ # Implicit roots are considered modified in every revision.
+ root.revision = 'entry-version'
+ new_inv.add(root)
+ delta = new_inv._make_delta(old_inv)
+ serializer = inventory_delta.InventoryDeltaSerializer(
+ versioned_root=False, tree_references=False)
+ serialized_lines = serializer.delta_to_lines(
+ NULL_REVISION, 'entry-version', delta)
+ self.assertEqual(StringIO(root_only_unversioned).readlines(),
+ serialized_lines)
+ deserializer = inventory_delta.InventoryDeltaDeserializer()
+ self.assertEqual(
+ (NULL_REVISION, 'entry-version', False, False, delta),
+ deserializer.parse_text_bytes(''.join(serialized_lines)))
+
+ def test_unversioned_non_root_errors(self):
+ old_inv = Inventory(None)
+ new_inv = Inventory(None)
+ root = new_inv.make_entry('directory', '', None, 'TREE_ROOT')
+ root.revision = 'a@e\xc3\xa5ample.com--2004'
+ new_inv.add(root)
+ non_root = new_inv.make_entry('directory', 'foo', root.file_id, 'id')
+ new_inv.add(non_root)
+ delta = new_inv._make_delta(old_inv)
+ serializer = inventory_delta.InventoryDeltaSerializer(
+ versioned_root=True, tree_references=True)
+ err = self.assertRaises(InventoryDeltaError,
+ serializer.delta_to_lines, NULL_REVISION, 'entry-version', delta)
+ self.assertEqual(str(err), 'no version for fileid id')
+
+ def test_richroot_unversioned_root_errors(self):
+ old_inv = Inventory(None)
+ new_inv = Inventory(None)
+ root = new_inv.make_entry('directory', '', None, 'TREE_ROOT')
+ new_inv.add(root)
+ delta = new_inv._make_delta(old_inv)
+ serializer = inventory_delta.InventoryDeltaSerializer(
+ versioned_root=True, tree_references=True)
+ err = self.assertRaises(InventoryDeltaError,
+ serializer.delta_to_lines, NULL_REVISION, 'entry-version', delta)
+ self.assertEqual(str(err), 'no version for fileid TREE_ROOT')
+
+ def test_nonrichroot_versioned_root_errors(self):
+ old_inv = Inventory(None)
+ new_inv = Inventory(None)
+ root = new_inv.make_entry('directory', '', None, 'TREE_ROOT')
+ root.revision = 'a@e\xc3\xa5ample.com--2004'
+ new_inv.add(root)
+ delta = new_inv._make_delta(old_inv)
+ serializer = inventory_delta.InventoryDeltaSerializer(
+ versioned_root=False, tree_references=True)
+ err = self.assertRaises(InventoryDeltaError,
+ serializer.delta_to_lines, NULL_REVISION, 'entry-version', delta)
+ self.assertStartsWith(str(err), 'Version present for / in TREE_ROOT')
+
+ def test_unknown_kind_errors(self):
+ old_inv = Inventory(None)
+ new_inv = Inventory(None)
+ root = new_inv.make_entry('directory', '', None, 'my-rich-root-id')
+ root.revision = 'changed'
+ new_inv.add(root)
+ class StrangeInventoryEntry(inventory.InventoryEntry):
+ kind = 'strange'
+ non_root = StrangeInventoryEntry('id', 'foo', root.file_id)
+ non_root.revision = 'changed'
+ new_inv.add(non_root)
+ delta = new_inv._make_delta(old_inv)
+ serializer = inventory_delta.InventoryDeltaSerializer(
+ versioned_root=True, tree_references=True)
+ # we expect keyerror because there is little value wrapping this.
+ # This test aims to prove that it errors more than how it errors.
+ err = self.assertRaises(KeyError,
+ serializer.delta_to_lines, NULL_REVISION, 'entry-version', delta)
+ self.assertEqual(('strange',), err.args)
+
+ def test_tree_reference_disabled(self):
+ old_inv = Inventory(None)
+ new_inv = Inventory(None)
+ root = new_inv.make_entry('directory', '', None, 'TREE_ROOT')
+ root.revision = 'a@e\xc3\xa5ample.com--2004'
+ new_inv.add(root)
+ non_root = new_inv.make_entry(
+ 'tree-reference', 'foo', root.file_id, 'id')
+ non_root.revision = 'changed'
+ non_root.reference_revision = 'subtree-version'
+ new_inv.add(non_root)
+ delta = new_inv._make_delta(old_inv)
+ serializer = inventory_delta.InventoryDeltaSerializer(
+ versioned_root=True, tree_references=False)
+ # we expect keyerror because there is little value wrapping this.
+ # This test aims to prove that it errors more than how it errors.
+ err = self.assertRaises(KeyError,
+ serializer.delta_to_lines, NULL_REVISION, 'entry-version', delta)
+ self.assertEqual(('tree-reference',), err.args)
+
+ def test_tree_reference_enabled(self):
+ old_inv = Inventory(None)
+ new_inv = Inventory(None)
+ root = new_inv.make_entry('directory', '', None, 'TREE_ROOT')
+ root.revision = 'a@e\xc3\xa5ample.com--2004'
+ new_inv.add(root)
+ non_root = new_inv.make_entry(
+ 'tree-reference', 'foo', root.file_id, 'id')
+ non_root.revision = 'changed'
+ non_root.reference_revision = 'subtree-version'
+ new_inv.add(non_root)
+ delta = new_inv._make_delta(old_inv)
+ serializer = inventory_delta.InventoryDeltaSerializer(
+ versioned_root=True, tree_references=True)
+ self.assertEqual(StringIO(reference_lines).readlines(),
+ serializer.delta_to_lines(NULL_REVISION, 'entry-version', delta))
+
+ def test_to_inventory_root_id_versioned_not_permitted(self):
+ root_entry = inventory.make_entry('directory', '', None, 'TREE_ROOT')
+ root_entry.revision = 'some-version'
+ delta = [(None, '', 'TREE_ROOT', root_entry)]
+ serializer = inventory_delta.InventoryDeltaSerializer(
+ versioned_root=False, tree_references=True)
+ self.assertRaises(
+ InventoryDeltaError, serializer.delta_to_lines, 'old-version',
+ 'new-version', delta)
+
+ def test_to_inventory_root_id_not_versioned(self):
+ delta = [(None, '', 'an-id', inventory.make_entry(
+ 'directory', '', None, 'an-id'))]
+ serializer = inventory_delta.InventoryDeltaSerializer(
+ versioned_root=True, tree_references=True)
+ self.assertRaises(
+ InventoryDeltaError, serializer.delta_to_lines, 'old-version',
+ 'new-version', delta)
+
+ def test_to_inventory_has_tree_not_meant_to(self):
+ make_entry = inventory.make_entry
+ tree_ref = make_entry('tree-reference', 'foo', 'changed-in', 'ref-id')
+ tree_ref.reference_revision = 'ref-revision'
+ delta = [
+ (None, '', 'an-id',
+ make_entry('directory', '', 'changed-in', 'an-id')),
+ (None, 'foo', 'ref-id', tree_ref)
+ # a file that followed the root move
+ ]
+ serializer = inventory_delta.InventoryDeltaSerializer(
+ versioned_root=True, tree_references=True)
+ self.assertRaises(InventoryDeltaError, serializer.delta_to_lines,
+ 'old-version', 'new-version', delta)
+
+ def test_to_inventory_torture(self):
+ def make_entry(kind, name, parent_id, file_id, **attrs):
+ entry = inventory.make_entry(kind, name, parent_id, file_id)
+ for name, value in attrs.items():
+ setattr(entry, name, value)
+ return entry
+ # this delta is crafted to have all the following:
+ # - deletes
+ # - renamed roots
+ # - deep dirs
+ # - files moved after parent dir was renamed
+ # - files with and without exec bit
+ delta = [
+ # new root:
+ (None, '', 'new-root-id',
+ make_entry('directory', '', None, 'new-root-id',
+ revision='changed-in')),
+ # an old root:
+ ('', 'old-root', 'TREE_ROOT',
+ make_entry('directory', 'subdir-now', 'new-root-id',
+ 'TREE_ROOT', revision='moved-root')),
+ # a file that followed the root move
+ ('under-old-root', 'old-root/under-old-root', 'moved-id',
+ make_entry('file', 'under-old-root', 'TREE_ROOT', 'moved-id',
+ revision='old-rev', executable=False, text_size=30,
+ text_sha1='some-sha')),
+ # a deleted path
+ ('old-file', None, 'deleted-id', None),
+ # a tree reference moved to the new root
+ ('ref', 'ref', 'ref-id',
+ make_entry('tree-reference', 'ref', 'new-root-id', 'ref-id',
+ reference_revision='tree-reference-id',
+ revision='new-rev')),
+ # a symlink now in a deep dir
+ ('dir/link', 'old-root/dir/link', 'link-id',
+ make_entry('symlink', 'link', 'deep-id', 'link-id',
+ symlink_target='target', revision='new-rev')),
+ # a deep dir
+ ('dir', 'old-root/dir', 'deep-id',
+ make_entry('directory', 'dir', 'TREE_ROOT', 'deep-id',
+ revision='new-rev')),
+ # a file with an exec bit set
+ (None, 'configure', 'exec-id',
+ make_entry('file', 'configure', 'new-root-id', 'exec-id',
+ executable=True, text_size=30, text_sha1='some-sha',
+ revision='old-rev')),
+ ]
+ serializer = inventory_delta.InventoryDeltaSerializer(
+ versioned_root=True, tree_references=True)
+ lines = serializer.delta_to_lines(NULL_REVISION, 'something', delta)
+ expected = """format: bzr inventory delta v1 (bzr 1.14)
+parent: null:
+version: something
+versioned_root: true
+tree_references: true
+/\x00/old-root\x00TREE_ROOT\x00new-root-id\x00moved-root\x00dir
+/dir\x00/old-root/dir\x00deep-id\x00TREE_ROOT\x00new-rev\x00dir
+/dir/link\x00/old-root/dir/link\x00link-id\x00deep-id\x00new-rev\x00link\x00target
+/old-file\x00None\x00deleted-id\x00\x00null:\x00deleted\x00\x00
+/ref\x00/ref\x00ref-id\x00new-root-id\x00new-rev\x00tree\x00tree-reference-id
+/under-old-root\x00/old-root/under-old-root\x00moved-id\x00TREE_ROOT\x00old-rev\x00file\x0030\x00\x00some-sha
+None\x00/\x00new-root-id\x00\x00changed-in\x00dir
+None\x00/configure\x00exec-id\x00new-root-id\x00old-rev\x00file\x0030\x00Y\x00some-sha
+"""
+ serialized = ''.join(lines)
+ self.assertIsInstance(serialized, str)
+ self.assertEqual(expected, serialized)
+
+
+class TestContent(TestCase):
+ """Test serialization of the content part of a line."""
+
+ def test_dir(self):
+ entry = inventory.make_entry('directory', 'a dir', None)
+ self.assertEqual('dir', inventory_delta._directory_content(entry))
+
+ def test_file_0_short_sha(self):
+ file_entry = inventory.make_entry('file', 'a file', None, 'file-id')
+ file_entry.text_sha1 = ''
+ file_entry.text_size = 0
+ self.assertEqual('file\x000\x00\x00',
+ inventory_delta._file_content(file_entry))
+
+ def test_file_10_foo(self):
+ file_entry = inventory.make_entry('file', 'a file', None, 'file-id')
+ file_entry.text_sha1 = 'foo'
+ file_entry.text_size = 10
+ self.assertEqual('file\x0010\x00\x00foo',
+ inventory_delta._file_content(file_entry))
+
+ def test_file_executable(self):
+ file_entry = inventory.make_entry('file', 'a file', None, 'file-id')
+ file_entry.executable = True
+ file_entry.text_sha1 = 'foo'
+ file_entry.text_size = 10
+ self.assertEqual('file\x0010\x00Y\x00foo',
+ inventory_delta._file_content(file_entry))
+
+ def test_file_without_size(self):
+ file_entry = inventory.make_entry('file', 'a file', None, 'file-id')
+ file_entry.text_sha1 = 'foo'
+ self.assertRaises(InventoryDeltaError,
+ inventory_delta._file_content, file_entry)
+
+ def test_file_without_sha1(self):
+ file_entry = inventory.make_entry('file', 'a file', None, 'file-id')
+ file_entry.text_size = 10
+ self.assertRaises(InventoryDeltaError,
+ inventory_delta._file_content, file_entry)
+
+ def test_link_empty_target(self):
+ entry = inventory.make_entry('symlink', 'a link', None)
+ entry.symlink_target = ''
+ self.assertEqual('link\x00',
+ inventory_delta._link_content(entry))
+
+ def test_link_unicode_target(self):
+ entry = inventory.make_entry('symlink', 'a link', None)
+ entry.symlink_target = ' \xc3\xa5'.decode('utf8')
+ self.assertEqual('link\x00 \xc3\xa5',
+ inventory_delta._link_content(entry))
+
+ def test_link_space_target(self):
+ entry = inventory.make_entry('symlink', 'a link', None)
+ entry.symlink_target = ' '
+ self.assertEqual('link\x00 ',
+ inventory_delta._link_content(entry))
+
+ def test_link_no_target(self):
+ entry = inventory.make_entry('symlink', 'a link', None)
+ self.assertRaises(InventoryDeltaError,
+ inventory_delta._link_content, entry)
+
+ def test_reference_null(self):
+ entry = inventory.make_entry('tree-reference', 'a tree', None)
+ entry.reference_revision = NULL_REVISION
+ self.assertEqual('tree\x00null:',
+ inventory_delta._reference_content(entry))
+
+ def test_reference_revision(self):
+ entry = inventory.make_entry('tree-reference', 'a tree', None)
+ entry.reference_revision = 'foo@\xc3\xa5b-lah'
+ self.assertEqual('tree\x00foo@\xc3\xa5b-lah',
+ inventory_delta._reference_content(entry))
+
+ def test_reference_no_reference(self):
+ entry = inventory.make_entry('tree-reference', 'a tree', None)
+ self.assertRaises(InventoryDeltaError,
+ inventory_delta._reference_content, entry)
diff --git a/bzrlib/tests/test_knit.py b/bzrlib/tests/test_knit.py
new file mode 100644
index 0000000..9396166
--- /dev/null
+++ b/bzrlib/tests/test_knit.py
@@ -0,0 +1,2644 @@
+# Copyright (C) 2006-2011 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""Tests for Knit data structure"""
+
+from cStringIO import StringIO
+import gzip
+import sys
+
+from bzrlib import (
+ errors,
+ knit,
+ multiparent,
+ osutils,
+ pack,
+ tests,
+ transport,
+ )
+from bzrlib.errors import (
+ KnitHeaderError,
+ NoSuchFile,
+ )
+from bzrlib.index import *
+from bzrlib.knit import (
+ AnnotatedKnitContent,
+ KnitContent,
+ KnitVersionedFiles,
+ PlainKnitContent,
+ _VFContentMapGenerator,
+ _KndxIndex,
+ _KnitGraphIndex,
+ _KnitKeyAccess,
+ make_file_factory,
+ )
+from bzrlib.patiencediff import PatienceSequenceMatcher
+from bzrlib.repofmt import (
+ knitpack_repo,
+ pack_repo,
+ )
+from bzrlib.tests import (
+ TestCase,
+ TestCaseWithMemoryTransport,
+ TestCaseWithTransport,
+ TestNotApplicable,
+ )
+from bzrlib.versionedfile import (
+ AbsentContentFactory,
+ ConstantMapper,
+ network_bytes_to_kind_and_offset,
+ RecordingVersionedFilesDecorator,
+ )
+from bzrlib.tests import (
+ features,
+ )
+
+
+compiled_knit_feature = features.ModuleAvailableFeature(
+ 'bzrlib._knit_load_data_pyx')
+
+
+class KnitContentTestsMixin(object):
+
+ def test_constructor(self):
+ content = self._make_content([])
+
+ def test_text(self):
+ content = self._make_content([])
+ self.assertEqual(content.text(), [])
+
+ content = self._make_content([("origin1", "text1"), ("origin2", "text2")])
+ self.assertEqual(content.text(), ["text1", "text2"])
+
+ def test_copy(self):
+ content = self._make_content([("origin1", "text1"), ("origin2", "text2")])
+ copy = content.copy()
+ self.assertIsInstance(copy, content.__class__)
+ self.assertEqual(copy.annotate(), content.annotate())
+
+ def assertDerivedBlocksEqual(self, source, target, noeol=False):
+ """Assert that the derived matching blocks match real output"""
+ source_lines = source.splitlines(True)
+ target_lines = target.splitlines(True)
+ def nl(line):
+ if noeol and not line.endswith('\n'):
+ return line + '\n'
+ else:
+ return line
+ source_content = self._make_content([(None, nl(l)) for l in source_lines])
+ target_content = self._make_content([(None, nl(l)) for l in target_lines])
+ line_delta = source_content.line_delta(target_content)
+ delta_blocks = list(KnitContent.get_line_delta_blocks(line_delta,
+ source_lines, target_lines))
+ matcher = PatienceSequenceMatcher(None, source_lines, target_lines)
+ matcher_blocks = list(matcher.get_matching_blocks())
+ self.assertEqual(matcher_blocks, delta_blocks)
+
+ def test_get_line_delta_blocks(self):
+ self.assertDerivedBlocksEqual('a\nb\nc\n', 'q\nc\n')
+ self.assertDerivedBlocksEqual(TEXT_1, TEXT_1)
+ self.assertDerivedBlocksEqual(TEXT_1, TEXT_1A)
+ self.assertDerivedBlocksEqual(TEXT_1, TEXT_1B)
+ self.assertDerivedBlocksEqual(TEXT_1B, TEXT_1A)
+ self.assertDerivedBlocksEqual(TEXT_1A, TEXT_1B)
+ self.assertDerivedBlocksEqual(TEXT_1A, '')
+ self.assertDerivedBlocksEqual('', TEXT_1A)
+ self.assertDerivedBlocksEqual('', '')
+ self.assertDerivedBlocksEqual('a\nb\nc', 'a\nb\nc\nd')
+
+ def test_get_line_delta_blocks_noeol(self):
+ """Handle historical knit deltas safely
+
+ Some existing knit deltas don't consider the last line to differ
+ when the only difference whether it has a final newline.
+
+ New knit deltas appear to always consider the last line to differ
+ in this case.
+ """
+ self.assertDerivedBlocksEqual('a\nb\nc', 'a\nb\nc\nd\n', noeol=True)
+ self.assertDerivedBlocksEqual('a\nb\nc\nd\n', 'a\nb\nc', noeol=True)
+ self.assertDerivedBlocksEqual('a\nb\nc\n', 'a\nb\nc', noeol=True)
+ self.assertDerivedBlocksEqual('a\nb\nc', 'a\nb\nc\n', noeol=True)
+
+
+TEXT_1 = """\
+Banana cup cakes:
+
+- bananas
+- eggs
+- broken tea cups
+"""
+
+TEXT_1A = """\
+Banana cup cake recipe
+(serves 6)
+
+- bananas
+- eggs
+- broken tea cups
+- self-raising flour
+"""
+
+TEXT_1B = """\
+Banana cup cake recipe
+
+- bananas (do not use plantains!!!)
+- broken tea cups
+- flour
+"""
+
+delta_1_1a = """\
+0,1,2
+Banana cup cake recipe
+(serves 6)
+5,5,1
+- self-raising flour
+"""
+
+TEXT_2 = """\
+Boeuf bourguignon
+
+- beef
+- red wine
+- small onions
+- carrot
+- mushrooms
+"""
+
+
+class TestPlainKnitContent(TestCase, KnitContentTestsMixin):
+
+ def _make_content(self, lines):
+ annotated_content = AnnotatedKnitContent(lines)
+ return PlainKnitContent(annotated_content.text(), 'bogus')
+
+ def test_annotate(self):
+ content = self._make_content([])
+ self.assertEqual(content.annotate(), [])
+
+ content = self._make_content([("origin1", "text1"), ("origin2", "text2")])
+ self.assertEqual(content.annotate(),
+ [("bogus", "text1"), ("bogus", "text2")])
+
+ def test_line_delta(self):
+ content1 = self._make_content([("", "a"), ("", "b")])
+ content2 = self._make_content([("", "a"), ("", "a"), ("", "c")])
+ self.assertEqual(content1.line_delta(content2),
+ [(1, 2, 2, ["a", "c"])])
+
+ def test_line_delta_iter(self):
+ content1 = self._make_content([("", "a"), ("", "b")])
+ content2 = self._make_content([("", "a"), ("", "a"), ("", "c")])
+ it = content1.line_delta_iter(content2)
+ self.assertEqual(it.next(), (1, 2, 2, ["a", "c"]))
+ self.assertRaises(StopIteration, it.next)
+
+
+class TestAnnotatedKnitContent(TestCase, KnitContentTestsMixin):
+
+ def _make_content(self, lines):
+ return AnnotatedKnitContent(lines)
+
+ def test_annotate(self):
+ content = self._make_content([])
+ self.assertEqual(content.annotate(), [])
+
+ content = self._make_content([("origin1", "text1"), ("origin2", "text2")])
+ self.assertEqual(content.annotate(),
+ [("origin1", "text1"), ("origin2", "text2")])
+
+ def test_line_delta(self):
+ content1 = self._make_content([("", "a"), ("", "b")])
+ content2 = self._make_content([("", "a"), ("", "a"), ("", "c")])
+ self.assertEqual(content1.line_delta(content2),
+ [(1, 2, 2, [("", "a"), ("", "c")])])
+
+ def test_line_delta_iter(self):
+ content1 = self._make_content([("", "a"), ("", "b")])
+ content2 = self._make_content([("", "a"), ("", "a"), ("", "c")])
+ it = content1.line_delta_iter(content2)
+ self.assertEqual(it.next(), (1, 2, 2, [("", "a"), ("", "c")]))
+ self.assertRaises(StopIteration, it.next)
+
+
+class MockTransport(object):
+
+ def __init__(self, file_lines=None):
+ self.file_lines = file_lines
+ self.calls = []
+ # We have no base directory for the MockTransport
+ self.base = ''
+
+ def get(self, filename):
+ if self.file_lines is None:
+ raise NoSuchFile(filename)
+ else:
+ return StringIO("\n".join(self.file_lines))
+
+ def readv(self, relpath, offsets):
+ fp = self.get(relpath)
+ for offset, size in offsets:
+ fp.seek(offset)
+ yield offset, fp.read(size)
+
+ def __getattr__(self, name):
+ def queue_call(*args, **kwargs):
+ self.calls.append((name, args, kwargs))
+ return queue_call
+
+
+class MockReadvFailingTransport(MockTransport):
+ """Fail in the middle of a readv() result.
+
+ This Transport will successfully yield the first two requested hunks, but
+ raise NoSuchFile for the rest.
+ """
+
+ def readv(self, relpath, offsets):
+ count = 0
+ for result in MockTransport.readv(self, relpath, offsets):
+ count += 1
+ # we use 2 because the first offset is the pack header, the second
+ # is the first actual content requset
+ if count > 2:
+ raise errors.NoSuchFile(relpath)
+ yield result
+
+
+class KnitRecordAccessTestsMixin(object):
+ """Tests for getting and putting knit records."""
+
+ def test_add_raw_records(self):
+ """Add_raw_records adds records retrievable later."""
+ access = self.get_access()
+ memos = access.add_raw_records([('key', 10)], '1234567890')
+ self.assertEqual(['1234567890'], list(access.get_raw_records(memos)))
+
+ def test_add_several_raw_records(self):
+ """add_raw_records with many records and read some back."""
+ access = self.get_access()
+ memos = access.add_raw_records([('key', 10), ('key2', 2), ('key3', 5)],
+ '12345678901234567')
+ self.assertEqual(['1234567890', '12', '34567'],
+ list(access.get_raw_records(memos)))
+ self.assertEqual(['1234567890'],
+ list(access.get_raw_records(memos[0:1])))
+ self.assertEqual(['12'],
+ list(access.get_raw_records(memos[1:2])))
+ self.assertEqual(['34567'],
+ list(access.get_raw_records(memos[2:3])))
+ self.assertEqual(['1234567890', '34567'],
+ list(access.get_raw_records(memos[0:1] + memos[2:3])))
+
+
+class TestKnitKnitAccess(TestCaseWithMemoryTransport, KnitRecordAccessTestsMixin):
+ """Tests for the .kndx implementation."""
+
+ def get_access(self):
+ """Get a .knit style access instance."""
+ mapper = ConstantMapper("foo")
+ access = _KnitKeyAccess(self.get_transport(), mapper)
+ return access
+
+
+class _TestException(Exception):
+ """Just an exception for local tests to use."""
+
+
+class TestPackKnitAccess(TestCaseWithMemoryTransport, KnitRecordAccessTestsMixin):
+ """Tests for the pack based access."""
+
+ def get_access(self):
+ return self._get_access()[0]
+
+ def _get_access(self, packname='packfile', index='FOO'):
+ transport = self.get_transport()
+ def write_data(bytes):
+ transport.append_bytes(packname, bytes)
+ writer = pack.ContainerWriter(write_data)
+ writer.begin()
+ access = pack_repo._DirectPackAccess({})
+ access.set_writer(writer, index, (transport, packname))
+ return access, writer
+
+ def make_pack_file(self):
+ """Create a pack file with 2 records."""
+ access, writer = self._get_access(packname='packname', index='foo')
+ memos = []
+ memos.extend(access.add_raw_records([('key1', 10)], '1234567890'))
+ memos.extend(access.add_raw_records([('key2', 5)], '12345'))
+ writer.end()
+ return memos
+
+ def test_pack_collection_pack_retries(self):
+ """An explicit pack of a pack collection succeeds even when a
+ concurrent pack happens.
+ """
+ builder = self.make_branch_builder('.')
+ builder.start_series()
+ builder.build_snapshot('rev-1', None, [
+ ('add', ('', 'root-id', 'directory', None)),
+ ('add', ('file', 'file-id', 'file', 'content\nrev 1\n')),
+ ])
+ builder.build_snapshot('rev-2', ['rev-1'], [
+ ('modify', ('file-id', 'content\nrev 2\n')),
+ ])
+ builder.build_snapshot('rev-3', ['rev-2'], [
+ ('modify', ('file-id', 'content\nrev 3\n')),
+ ])
+ self.addCleanup(builder.finish_series)
+ b = builder.get_branch()
+ self.addCleanup(b.lock_write().unlock)
+ repo = b.repository
+ collection = repo._pack_collection
+ # Concurrently repack the repo.
+ reopened_repo = repo.bzrdir.open_repository()
+ reopened_repo.pack()
+ # Pack the new pack.
+ collection.pack()
+
+ def make_vf_for_retrying(self):
+ """Create 3 packs and a reload function.
+
+ Originally, 2 pack files will have the data, but one will be missing.
+ And then the third will be used in place of the first two if reload()
+ is called.
+
+ :return: (versioned_file, reload_counter)
+ versioned_file a KnitVersionedFiles using the packs for access
+ """
+ builder = self.make_branch_builder('.', format="1.9")
+ builder.start_series()
+ builder.build_snapshot('rev-1', None, [
+ ('add', ('', 'root-id', 'directory', None)),
+ ('add', ('file', 'file-id', 'file', 'content\nrev 1\n')),
+ ])
+ builder.build_snapshot('rev-2', ['rev-1'], [
+ ('modify', ('file-id', 'content\nrev 2\n')),
+ ])
+ builder.build_snapshot('rev-3', ['rev-2'], [
+ ('modify', ('file-id', 'content\nrev 3\n')),
+ ])
+ builder.finish_series()
+ b = builder.get_branch()
+ b.lock_write()
+ self.addCleanup(b.unlock)
+ # Pack these three revisions into another pack file, but don't remove
+ # the originals
+ repo = b.repository
+ collection = repo._pack_collection
+ collection.ensure_loaded()
+ orig_packs = collection.packs
+ packer = knitpack_repo.KnitPacker(collection, orig_packs, '.testpack')
+ new_pack = packer.pack()
+ # forget about the new pack
+ collection.reset()
+ repo.refresh_data()
+ vf = repo.revisions
+ # Set up a reload() function that switches to using the new pack file
+ new_index = new_pack.revision_index
+ access_tuple = new_pack.access_tuple()
+ reload_counter = [0, 0, 0]
+ def reload():
+ reload_counter[0] += 1
+ if reload_counter[1] > 0:
+ # We already reloaded, nothing more to do
+ reload_counter[2] += 1
+ return False
+ reload_counter[1] += 1
+ vf._index._graph_index._indices[:] = [new_index]
+ vf._access._indices.clear()
+ vf._access._indices[new_index] = access_tuple
+ return True
+ # Delete one of the pack files so the data will need to be reloaded. We
+ # will delete the file with 'rev-2' in it
+ trans, name = orig_packs[1].access_tuple()
+ trans.delete(name)
+ # We don't have the index trigger reloading because we want to test
+ # that we reload when the .pack disappears
+ vf._access._reload_func = reload
+ return vf, reload_counter
+
+ def make_reload_func(self, return_val=True):
+ reload_called = [0]
+ def reload():
+ reload_called[0] += 1
+ return return_val
+ return reload_called, reload
+
+ def make_retry_exception(self):
+ # We raise a real exception so that sys.exc_info() is properly
+ # populated
+ try:
+ raise _TestException('foobar')
+ except _TestException, e:
+ retry_exc = errors.RetryWithNewPacks(None, reload_occurred=False,
+ exc_info=sys.exc_info())
+ # GZ 2010-08-10: Cycle with exc_info affects 3 tests
+ return retry_exc
+
+ def test_read_from_several_packs(self):
+ access, writer = self._get_access()
+ memos = []
+ memos.extend(access.add_raw_records([('key', 10)], '1234567890'))
+ writer.end()
+ access, writer = self._get_access('pack2', 'FOOBAR')
+ memos.extend(access.add_raw_records([('key', 5)], '12345'))
+ writer.end()
+ access, writer = self._get_access('pack3', 'BAZ')
+ memos.extend(access.add_raw_records([('key', 5)], 'alpha'))
+ writer.end()
+ transport = self.get_transport()
+ access = pack_repo._DirectPackAccess({"FOO":(transport, 'packfile'),
+ "FOOBAR":(transport, 'pack2'),
+ "BAZ":(transport, 'pack3')})
+ self.assertEqual(['1234567890', '12345', 'alpha'],
+ list(access.get_raw_records(memos)))
+ self.assertEqual(['1234567890'],
+ list(access.get_raw_records(memos[0:1])))
+ self.assertEqual(['12345'],
+ list(access.get_raw_records(memos[1:2])))
+ self.assertEqual(['alpha'],
+ list(access.get_raw_records(memos[2:3])))
+ self.assertEqual(['1234567890', 'alpha'],
+ list(access.get_raw_records(memos[0:1] + memos[2:3])))
+
+ def test_set_writer(self):
+ """The writer should be settable post construction."""
+ access = pack_repo._DirectPackAccess({})
+ transport = self.get_transport()
+ packname = 'packfile'
+ index = 'foo'
+ def write_data(bytes):
+ transport.append_bytes(packname, bytes)
+ writer = pack.ContainerWriter(write_data)
+ writer.begin()
+ access.set_writer(writer, index, (transport, packname))
+ memos = access.add_raw_records([('key', 10)], '1234567890')
+ writer.end()
+ self.assertEqual(['1234567890'], list(access.get_raw_records(memos)))
+
+ def test_missing_index_raises_retry(self):
+ memos = self.make_pack_file()
+ transport = self.get_transport()
+ reload_called, reload_func = self.make_reload_func()
+ # Note that the index key has changed from 'foo' to 'bar'
+ access = pack_repo._DirectPackAccess({'bar':(transport, 'packname')},
+ reload_func=reload_func)
+ e = self.assertListRaises(errors.RetryWithNewPacks,
+ access.get_raw_records, memos)
+ # Because a key was passed in which does not match our index list, we
+ # assume that the listing was already reloaded
+ self.assertTrue(e.reload_occurred)
+ self.assertIsInstance(e.exc_info, tuple)
+ self.assertIs(e.exc_info[0], KeyError)
+ self.assertIsInstance(e.exc_info[1], KeyError)
+
+ def test_missing_index_raises_key_error_with_no_reload(self):
+ memos = self.make_pack_file()
+ transport = self.get_transport()
+ # Note that the index key has changed from 'foo' to 'bar'
+ access = pack_repo._DirectPackAccess({'bar':(transport, 'packname')})
+ e = self.assertListRaises(KeyError, access.get_raw_records, memos)
+
+ def test_missing_file_raises_retry(self):
+ memos = self.make_pack_file()
+ transport = self.get_transport()
+ reload_called, reload_func = self.make_reload_func()
+ # Note that the 'filename' has been changed to 'different-packname'
+ access = pack_repo._DirectPackAccess(
+ {'foo':(transport, 'different-packname')},
+ reload_func=reload_func)
+ e = self.assertListRaises(errors.RetryWithNewPacks,
+ access.get_raw_records, memos)
+ # The file has gone missing, so we assume we need to reload
+ self.assertFalse(e.reload_occurred)
+ self.assertIsInstance(e.exc_info, tuple)
+ self.assertIs(e.exc_info[0], errors.NoSuchFile)
+ self.assertIsInstance(e.exc_info[1], errors.NoSuchFile)
+ self.assertEqual('different-packname', e.exc_info[1].path)
+
+ def test_missing_file_raises_no_such_file_with_no_reload(self):
+ memos = self.make_pack_file()
+ transport = self.get_transport()
+ # Note that the 'filename' has been changed to 'different-packname'
+ access = pack_repo._DirectPackAccess(
+ {'foo': (transport, 'different-packname')})
+ e = self.assertListRaises(errors.NoSuchFile,
+ access.get_raw_records, memos)
+
+ def test_failing_readv_raises_retry(self):
+ memos = self.make_pack_file()
+ transport = self.get_transport()
+ failing_transport = MockReadvFailingTransport(
+ [transport.get_bytes('packname')])
+ reload_called, reload_func = self.make_reload_func()
+ access = pack_repo._DirectPackAccess(
+ {'foo': (failing_transport, 'packname')},
+ reload_func=reload_func)
+ # Asking for a single record will not trigger the Mock failure
+ self.assertEqual(['1234567890'],
+ list(access.get_raw_records(memos[:1])))
+ self.assertEqual(['12345'],
+ list(access.get_raw_records(memos[1:2])))
+ # A multiple offset readv() will fail mid-way through
+ e = self.assertListRaises(errors.RetryWithNewPacks,
+ access.get_raw_records, memos)
+ # The file has gone missing, so we assume we need to reload
+ self.assertFalse(e.reload_occurred)
+ self.assertIsInstance(e.exc_info, tuple)
+ self.assertIs(e.exc_info[0], errors.NoSuchFile)
+ self.assertIsInstance(e.exc_info[1], errors.NoSuchFile)
+ self.assertEqual('packname', e.exc_info[1].path)
+
+ def test_failing_readv_raises_no_such_file_with_no_reload(self):
+ memos = self.make_pack_file()
+ transport = self.get_transport()
+ failing_transport = MockReadvFailingTransport(
+ [transport.get_bytes('packname')])
+ reload_called, reload_func = self.make_reload_func()
+ access = pack_repo._DirectPackAccess(
+ {'foo':(failing_transport, 'packname')})
+ # Asking for a single record will not trigger the Mock failure
+ self.assertEqual(['1234567890'],
+ list(access.get_raw_records(memos[:1])))
+ self.assertEqual(['12345'],
+ list(access.get_raw_records(memos[1:2])))
+ # A multiple offset readv() will fail mid-way through
+ e = self.assertListRaises(errors.NoSuchFile,
+ access.get_raw_records, memos)
+
+ def test_reload_or_raise_no_reload(self):
+ access = pack_repo._DirectPackAccess({}, reload_func=None)
+ retry_exc = self.make_retry_exception()
+ # Without a reload_func, we will just re-raise the original exception
+ self.assertRaises(_TestException, access.reload_or_raise, retry_exc)
+
+ def test_reload_or_raise_reload_changed(self):
+ reload_called, reload_func = self.make_reload_func(return_val=True)
+ access = pack_repo._DirectPackAccess({}, reload_func=reload_func)
+ retry_exc = self.make_retry_exception()
+ access.reload_or_raise(retry_exc)
+ self.assertEqual([1], reload_called)
+ retry_exc.reload_occurred=True
+ access.reload_or_raise(retry_exc)
+ self.assertEqual([2], reload_called)
+
+ def test_reload_or_raise_reload_no_change(self):
+ reload_called, reload_func = self.make_reload_func(return_val=False)
+ access = pack_repo._DirectPackAccess({}, reload_func=reload_func)
+ retry_exc = self.make_retry_exception()
+ # If reload_occurred is False, then we consider it an error to have
+ # reload_func() return False (no changes).
+ self.assertRaises(_TestException, access.reload_or_raise, retry_exc)
+ self.assertEqual([1], reload_called)
+ retry_exc.reload_occurred=True
+ # If reload_occurred is True, then we assume nothing changed because
+ # it had changed earlier, but didn't change again
+ access.reload_or_raise(retry_exc)
+ self.assertEqual([2], reload_called)
+
+ def test_annotate_retries(self):
+ vf, reload_counter = self.make_vf_for_retrying()
+ # It is a little bit bogus to annotate the Revision VF, but it works,
+ # as we have ancestry stored there
+ key = ('rev-3',)
+ reload_lines = vf.annotate(key)
+ self.assertEqual([1, 1, 0], reload_counter)
+ plain_lines = vf.annotate(key)
+ self.assertEqual([1, 1, 0], reload_counter) # No extra reloading
+ if reload_lines != plain_lines:
+ self.fail('Annotation was not identical with reloading.')
+ # Now delete the packs-in-use, which should trigger another reload, but
+ # this time we just raise an exception because we can't recover
+ for trans, name in vf._access._indices.itervalues():
+ trans.delete(name)
+ self.assertRaises(errors.NoSuchFile, vf.annotate, key)
+ self.assertEqual([2, 1, 1], reload_counter)
+
+ def test__get_record_map_retries(self):
+ vf, reload_counter = self.make_vf_for_retrying()
+ keys = [('rev-1',), ('rev-2',), ('rev-3',)]
+ records = vf._get_record_map(keys)
+ self.assertEqual(keys, sorted(records.keys()))
+ self.assertEqual([1, 1, 0], reload_counter)
+ # Now delete the packs-in-use, which should trigger another reload, but
+ # this time we just raise an exception because we can't recover
+ for trans, name in vf._access._indices.itervalues():
+ trans.delete(name)
+ self.assertRaises(errors.NoSuchFile, vf._get_record_map, keys)
+ self.assertEqual([2, 1, 1], reload_counter)
+
+ def test_get_record_stream_retries(self):
+ vf, reload_counter = self.make_vf_for_retrying()
+ keys = [('rev-1',), ('rev-2',), ('rev-3',)]
+ record_stream = vf.get_record_stream(keys, 'topological', False)
+ record = record_stream.next()
+ self.assertEqual(('rev-1',), record.key)
+ self.assertEqual([0, 0, 0], reload_counter)
+ record = record_stream.next()
+ self.assertEqual(('rev-2',), record.key)
+ self.assertEqual([1, 1, 0], reload_counter)
+ record = record_stream.next()
+ self.assertEqual(('rev-3',), record.key)
+ self.assertEqual([1, 1, 0], reload_counter)
+ # Now delete all pack files, and see that we raise the right error
+ for trans, name in vf._access._indices.itervalues():
+ trans.delete(name)
+ self.assertListRaises(errors.NoSuchFile,
+ vf.get_record_stream, keys, 'topological', False)
+
+ def test_iter_lines_added_or_present_in_keys_retries(self):
+ vf, reload_counter = self.make_vf_for_retrying()
+ keys = [('rev-1',), ('rev-2',), ('rev-3',)]
+ # Unfortunately, iter_lines_added_or_present_in_keys iterates the
+ # result in random order (determined by the iteration order from a
+ # set()), so we don't have any solid way to trigger whether data is
+ # read before or after. However we tried to delete the middle node to
+ # exercise the code well.
+ # What we care about is that all lines are always yielded, but not
+ # duplicated
+ count = 0
+ reload_lines = sorted(vf.iter_lines_added_or_present_in_keys(keys))
+ self.assertEqual([1, 1, 0], reload_counter)
+ # Now do it again, to make sure the result is equivalent
+ plain_lines = sorted(vf.iter_lines_added_or_present_in_keys(keys))
+ self.assertEqual([1, 1, 0], reload_counter) # No extra reloading
+ self.assertEqual(plain_lines, reload_lines)
+ self.assertEqual(21, len(plain_lines))
+ # Now delete all pack files, and see that we raise the right error
+ for trans, name in vf._access._indices.itervalues():
+ trans.delete(name)
+ self.assertListRaises(errors.NoSuchFile,
+ vf.iter_lines_added_or_present_in_keys, keys)
+ self.assertEqual([2, 1, 1], reload_counter)
+
+ def test_get_record_stream_yields_disk_sorted_order(self):
+ # if we get 'unordered' pick a semi-optimal order for reading. The
+ # order should be grouped by pack file, and then by position in file
+ repo = self.make_repository('test', format='pack-0.92')
+ repo.lock_write()
+ self.addCleanup(repo.unlock)
+ repo.start_write_group()
+ vf = repo.texts
+ vf.add_lines(('f-id', 'rev-5'), [('f-id', 'rev-4')], ['lines\n'])
+ vf.add_lines(('f-id', 'rev-1'), [], ['lines\n'])
+ vf.add_lines(('f-id', 'rev-2'), [('f-id', 'rev-1')], ['lines\n'])
+ repo.commit_write_group()
+ # We inserted them as rev-5, rev-1, rev-2, we should get them back in
+ # the same order
+ stream = vf.get_record_stream([('f-id', 'rev-1'), ('f-id', 'rev-5'),
+ ('f-id', 'rev-2')], 'unordered', False)
+ keys = [r.key for r in stream]
+ self.assertEqual([('f-id', 'rev-5'), ('f-id', 'rev-1'),
+ ('f-id', 'rev-2')], keys)
+ repo.start_write_group()
+ vf.add_lines(('f-id', 'rev-4'), [('f-id', 'rev-3')], ['lines\n'])
+ vf.add_lines(('f-id', 'rev-3'), [('f-id', 'rev-2')], ['lines\n'])
+ vf.add_lines(('f-id', 'rev-6'), [('f-id', 'rev-5')], ['lines\n'])
+ repo.commit_write_group()
+ # Request in random order, to make sure the output order isn't based on
+ # the request
+ request_keys = set(('f-id', 'rev-%d' % i) for i in range(1, 7))
+ stream = vf.get_record_stream(request_keys, 'unordered', False)
+ keys = [r.key for r in stream]
+ # We want to get the keys back in disk order, but it doesn't matter
+ # which pack we read from first. So this can come back in 2 orders
+ alt1 = [('f-id', 'rev-%d' % i) for i in [4, 3, 6, 5, 1, 2]]
+ alt2 = [('f-id', 'rev-%d' % i) for i in [5, 1, 2, 4, 3, 6]]
+ if keys != alt1 and keys != alt2:
+ self.fail('Returned key order did not match either expected order.'
+ ' expected %s or %s, not %s'
+ % (alt1, alt2, keys))
+
+
+class LowLevelKnitDataTests(TestCase):
+
+ def create_gz_content(self, text):
+ sio = StringIO()
+ gz_file = gzip.GzipFile(mode='wb', fileobj=sio)
+ gz_file.write(text)
+ gz_file.close()
+ return sio.getvalue()
+
+ def make_multiple_records(self):
+ """Create the content for multiple records."""
+ sha1sum = osutils.sha_string('foo\nbar\n')
+ total_txt = []
+ gz_txt = self.create_gz_content('version rev-id-1 2 %s\n'
+ 'foo\n'
+ 'bar\n'
+ 'end rev-id-1\n'
+ % (sha1sum,))
+ record_1 = (0, len(gz_txt), sha1sum)
+ total_txt.append(gz_txt)
+ sha1sum = osutils.sha_string('baz\n')
+ gz_txt = self.create_gz_content('version rev-id-2 1 %s\n'
+ 'baz\n'
+ 'end rev-id-2\n'
+ % (sha1sum,))
+ record_2 = (record_1[1], len(gz_txt), sha1sum)
+ total_txt.append(gz_txt)
+ return total_txt, record_1, record_2
+
+ def test_valid_knit_data(self):
+ sha1sum = osutils.sha_string('foo\nbar\n')
+ gz_txt = self.create_gz_content('version rev-id-1 2 %s\n'
+ 'foo\n'
+ 'bar\n'
+ 'end rev-id-1\n'
+ % (sha1sum,))
+ transport = MockTransport([gz_txt])
+ access = _KnitKeyAccess(transport, ConstantMapper('filename'))
+ knit = KnitVersionedFiles(None, access)
+ records = [(('rev-id-1',), (('rev-id-1',), 0, len(gz_txt)))]
+
+ contents = list(knit._read_records_iter(records))
+ self.assertEqual([(('rev-id-1',), ['foo\n', 'bar\n'],
+ '4e48e2c9a3d2ca8a708cb0cc545700544efb5021')], contents)
+
+ raw_contents = list(knit._read_records_iter_raw(records))
+ self.assertEqual([(('rev-id-1',), gz_txt, sha1sum)], raw_contents)
+
+ def test_multiple_records_valid(self):
+ total_txt, record_1, record_2 = self.make_multiple_records()
+ transport = MockTransport([''.join(total_txt)])
+ access = _KnitKeyAccess(transport, ConstantMapper('filename'))
+ knit = KnitVersionedFiles(None, access)
+ records = [(('rev-id-1',), (('rev-id-1',), record_1[0], record_1[1])),
+ (('rev-id-2',), (('rev-id-2',), record_2[0], record_2[1]))]
+
+ contents = list(knit._read_records_iter(records))
+ self.assertEqual([(('rev-id-1',), ['foo\n', 'bar\n'], record_1[2]),
+ (('rev-id-2',), ['baz\n'], record_2[2])],
+ contents)
+
+ raw_contents = list(knit._read_records_iter_raw(records))
+ self.assertEqual([(('rev-id-1',), total_txt[0], record_1[2]),
+ (('rev-id-2',), total_txt[1], record_2[2])],
+ raw_contents)
+
+ def test_not_enough_lines(self):
+ sha1sum = osutils.sha_string('foo\n')
+ # record says 2 lines data says 1
+ gz_txt = self.create_gz_content('version rev-id-1 2 %s\n'
+ 'foo\n'
+ 'end rev-id-1\n'
+ % (sha1sum,))
+ transport = MockTransport([gz_txt])
+ access = _KnitKeyAccess(transport, ConstantMapper('filename'))
+ knit = KnitVersionedFiles(None, access)
+ records = [(('rev-id-1',), (('rev-id-1',), 0, len(gz_txt)))]
+ self.assertRaises(errors.KnitCorrupt, list,
+ knit._read_records_iter(records))
+
+ # read_records_iter_raw won't detect that sort of mismatch/corruption
+ raw_contents = list(knit._read_records_iter_raw(records))
+ self.assertEqual([(('rev-id-1',), gz_txt, sha1sum)], raw_contents)
+
+ def test_too_many_lines(self):
+ sha1sum = osutils.sha_string('foo\nbar\n')
+ # record says 1 lines data says 2
+ gz_txt = self.create_gz_content('version rev-id-1 1 %s\n'
+ 'foo\n'
+ 'bar\n'
+ 'end rev-id-1\n'
+ % (sha1sum,))
+ transport = MockTransport([gz_txt])
+ access = _KnitKeyAccess(transport, ConstantMapper('filename'))
+ knit = KnitVersionedFiles(None, access)
+ records = [(('rev-id-1',), (('rev-id-1',), 0, len(gz_txt)))]
+ self.assertRaises(errors.KnitCorrupt, list,
+ knit._read_records_iter(records))
+
+ # read_records_iter_raw won't detect that sort of mismatch/corruption
+ raw_contents = list(knit._read_records_iter_raw(records))
+ self.assertEqual([(('rev-id-1',), gz_txt, sha1sum)], raw_contents)
+
+ def test_mismatched_version_id(self):
+ sha1sum = osutils.sha_string('foo\nbar\n')
+ gz_txt = self.create_gz_content('version rev-id-1 2 %s\n'
+ 'foo\n'
+ 'bar\n'
+ 'end rev-id-1\n'
+ % (sha1sum,))
+ transport = MockTransport([gz_txt])
+ access = _KnitKeyAccess(transport, ConstantMapper('filename'))
+ knit = KnitVersionedFiles(None, access)
+ # We are asking for rev-id-2, but the data is rev-id-1
+ records = [(('rev-id-2',), (('rev-id-2',), 0, len(gz_txt)))]
+ self.assertRaises(errors.KnitCorrupt, list,
+ knit._read_records_iter(records))
+
+ # read_records_iter_raw detects mismatches in the header
+ self.assertRaises(errors.KnitCorrupt, list,
+ knit._read_records_iter_raw(records))
+
+ def test_uncompressed_data(self):
+ sha1sum = osutils.sha_string('foo\nbar\n')
+ txt = ('version rev-id-1 2 %s\n'
+ 'foo\n'
+ 'bar\n'
+ 'end rev-id-1\n'
+ % (sha1sum,))
+ transport = MockTransport([txt])
+ access = _KnitKeyAccess(transport, ConstantMapper('filename'))
+ knit = KnitVersionedFiles(None, access)
+ records = [(('rev-id-1',), (('rev-id-1',), 0, len(txt)))]
+
+ # We don't have valid gzip data ==> corrupt
+ self.assertRaises(errors.KnitCorrupt, list,
+ knit._read_records_iter(records))
+
+ # read_records_iter_raw will notice the bad data
+ self.assertRaises(errors.KnitCorrupt, list,
+ knit._read_records_iter_raw(records))
+
+ def test_corrupted_data(self):
+ sha1sum = osutils.sha_string('foo\nbar\n')
+ gz_txt = self.create_gz_content('version rev-id-1 2 %s\n'
+ 'foo\n'
+ 'bar\n'
+ 'end rev-id-1\n'
+ % (sha1sum,))
+ # Change 2 bytes in the middle to \xff
+ gz_txt = gz_txt[:10] + '\xff\xff' + gz_txt[12:]
+ transport = MockTransport([gz_txt])
+ access = _KnitKeyAccess(transport, ConstantMapper('filename'))
+ knit = KnitVersionedFiles(None, access)
+ records = [(('rev-id-1',), (('rev-id-1',), 0, len(gz_txt)))]
+ self.assertRaises(errors.KnitCorrupt, list,
+ knit._read_records_iter(records))
+ # read_records_iter_raw will barf on bad gz data
+ self.assertRaises(errors.KnitCorrupt, list,
+ knit._read_records_iter_raw(records))
+
+
+class LowLevelKnitIndexTests(TestCase):
+
+ def get_knit_index(self, transport, name, mode):
+ mapper = ConstantMapper(name)
+ from bzrlib._knit_load_data_py import _load_data_py
+ self.overrideAttr(knit, '_load_data', _load_data_py)
+ allow_writes = lambda: 'w' in mode
+ return _KndxIndex(transport, mapper, lambda:None, allow_writes, lambda:True)
+
+ def test_create_file(self):
+ transport = MockTransport()
+ index = self.get_knit_index(transport, "filename", "w")
+ index.keys()
+ call = transport.calls.pop(0)
+ # call[1][1] is a StringIO - we can't test it by simple equality.
+ self.assertEqual('put_file_non_atomic', call[0])
+ self.assertEqual('filename.kndx', call[1][0])
+ # With no history, _KndxIndex writes a new index:
+ self.assertEqual(_KndxIndex.HEADER,
+ call[1][1].getvalue())
+ self.assertEqual({'create_parent_dir': True}, call[2])
+
+ def test_read_utf8_version_id(self):
+ unicode_revision_id = u"version-\N{CYRILLIC CAPITAL LETTER A}"
+ utf8_revision_id = unicode_revision_id.encode('utf-8')
+ transport = MockTransport([
+ _KndxIndex.HEADER,
+ '%s option 0 1 :' % (utf8_revision_id,)
+ ])
+ index = self.get_knit_index(transport, "filename", "r")
+ # _KndxIndex is a private class, and deals in utf8 revision_ids, not
+ # Unicode revision_ids.
+ self.assertEqual({(utf8_revision_id,):()},
+ index.get_parent_map(index.keys()))
+ self.assertFalse((unicode_revision_id,) in index.keys())
+
+ def test_read_utf8_parents(self):
+ unicode_revision_id = u"version-\N{CYRILLIC CAPITAL LETTER A}"
+ utf8_revision_id = unicode_revision_id.encode('utf-8')
+ transport = MockTransport([
+ _KndxIndex.HEADER,
+ "version option 0 1 .%s :" % (utf8_revision_id,)
+ ])
+ index = self.get_knit_index(transport, "filename", "r")
+ self.assertEqual({("version",):((utf8_revision_id,),)},
+ index.get_parent_map(index.keys()))
+
+ def test_read_ignore_corrupted_lines(self):
+ transport = MockTransport([
+ _KndxIndex.HEADER,
+ "corrupted",
+ "corrupted options 0 1 .b .c ",
+ "version options 0 1 :"
+ ])
+ index = self.get_knit_index(transport, "filename", "r")
+ self.assertEqual(1, len(index.keys()))
+ self.assertEqual(set([("version",)]), index.keys())
+
+ def test_read_corrupted_header(self):
+ transport = MockTransport(['not a bzr knit index header\n'])
+ index = self.get_knit_index(transport, "filename", "r")
+ self.assertRaises(KnitHeaderError, index.keys)
+
+ def test_read_duplicate_entries(self):
+ transport = MockTransport([
+ _KndxIndex.HEADER,
+ "parent options 0 1 :",
+ "version options1 0 1 0 :",
+ "version options2 1 2 .other :",
+ "version options3 3 4 0 .other :"
+ ])
+ index = self.get_knit_index(transport, "filename", "r")
+ self.assertEqual(2, len(index.keys()))
+ # check that the index used is the first one written. (Specific
+ # to KnitIndex style indices.
+ self.assertEqual("1", index._dictionary_compress([("version",)]))
+ self.assertEqual((("version",), 3, 4), index.get_position(("version",)))
+ self.assertEqual(["options3"], index.get_options(("version",)))
+ self.assertEqual({("version",):(("parent",), ("other",))},
+ index.get_parent_map([("version",)]))
+
+ def test_read_compressed_parents(self):
+ transport = MockTransport([
+ _KndxIndex.HEADER,
+ "a option 0 1 :",
+ "b option 0 1 0 :",
+ "c option 0 1 1 0 :",
+ ])
+ index = self.get_knit_index(transport, "filename", "r")
+ self.assertEqual({("b",):(("a",),), ("c",):(("b",), ("a",))},
+ index.get_parent_map([("b",), ("c",)]))
+
+ def test_write_utf8_version_id(self):
+ unicode_revision_id = u"version-\N{CYRILLIC CAPITAL LETTER A}"
+ utf8_revision_id = unicode_revision_id.encode('utf-8')
+ transport = MockTransport([
+ _KndxIndex.HEADER
+ ])
+ index = self.get_knit_index(transport, "filename", "r")
+ index.add_records([
+ ((utf8_revision_id,), ["option"], ((utf8_revision_id,), 0, 1), [])])
+ call = transport.calls.pop(0)
+ # call[1][1] is a StringIO - we can't test it by simple equality.
+ self.assertEqual('put_file_non_atomic', call[0])
+ self.assertEqual('filename.kndx', call[1][0])
+ # With no history, _KndxIndex writes a new index:
+ self.assertEqual(_KndxIndex.HEADER +
+ "\n%s option 0 1 :" % (utf8_revision_id,),
+ call[1][1].getvalue())
+ self.assertEqual({'create_parent_dir': True}, call[2])
+
+ def test_write_utf8_parents(self):
+ unicode_revision_id = u"version-\N{CYRILLIC CAPITAL LETTER A}"
+ utf8_revision_id = unicode_revision_id.encode('utf-8')
+ transport = MockTransport([
+ _KndxIndex.HEADER
+ ])
+ index = self.get_knit_index(transport, "filename", "r")
+ index.add_records([
+ (("version",), ["option"], (("version",), 0, 1), [(utf8_revision_id,)])])
+ call = transport.calls.pop(0)
+ # call[1][1] is a StringIO - we can't test it by simple equality.
+ self.assertEqual('put_file_non_atomic', call[0])
+ self.assertEqual('filename.kndx', call[1][0])
+ # With no history, _KndxIndex writes a new index:
+ self.assertEqual(_KndxIndex.HEADER +
+ "\nversion option 0 1 .%s :" % (utf8_revision_id,),
+ call[1][1].getvalue())
+ self.assertEqual({'create_parent_dir': True}, call[2])
+
+ def test_keys(self):
+ transport = MockTransport([
+ _KndxIndex.HEADER
+ ])
+ index = self.get_knit_index(transport, "filename", "r")
+
+ self.assertEqual(set(), index.keys())
+
+ index.add_records([(("a",), ["option"], (("a",), 0, 1), [])])
+ self.assertEqual(set([("a",)]), index.keys())
+
+ index.add_records([(("a",), ["option"], (("a",), 0, 1), [])])
+ self.assertEqual(set([("a",)]), index.keys())
+
+ index.add_records([(("b",), ["option"], (("b",), 0, 1), [])])
+ self.assertEqual(set([("a",), ("b",)]), index.keys())
+
+ def add_a_b(self, index, random_id=None):
+ kwargs = {}
+ if random_id is not None:
+ kwargs["random_id"] = random_id
+ index.add_records([
+ (("a",), ["option"], (("a",), 0, 1), [("b",)]),
+ (("a",), ["opt"], (("a",), 1, 2), [("c",)]),
+ (("b",), ["option"], (("b",), 2, 3), [("a",)])
+ ], **kwargs)
+
+ def assertIndexIsAB(self, index):
+ self.assertEqual({
+ ('a',): (('c',),),
+ ('b',): (('a',),),
+ },
+ index.get_parent_map(index.keys()))
+ self.assertEqual((("a",), 1, 2), index.get_position(("a",)))
+ self.assertEqual((("b",), 2, 3), index.get_position(("b",)))
+ self.assertEqual(["opt"], index.get_options(("a",)))
+
+ def test_add_versions(self):
+ transport = MockTransport([
+ _KndxIndex.HEADER
+ ])
+ index = self.get_knit_index(transport, "filename", "r")
+
+ self.add_a_b(index)
+ call = transport.calls.pop(0)
+ # call[1][1] is a StringIO - we can't test it by simple equality.
+ self.assertEqual('put_file_non_atomic', call[0])
+ self.assertEqual('filename.kndx', call[1][0])
+ # With no history, _KndxIndex writes a new index:
+ self.assertEqual(
+ _KndxIndex.HEADER +
+ "\na option 0 1 .b :"
+ "\na opt 1 2 .c :"
+ "\nb option 2 3 0 :",
+ call[1][1].getvalue())
+ self.assertEqual({'create_parent_dir': True}, call[2])
+ self.assertIndexIsAB(index)
+
+ def test_add_versions_random_id_is_accepted(self):
+ transport = MockTransport([
+ _KndxIndex.HEADER
+ ])
+ index = self.get_knit_index(transport, "filename", "r")
+ self.add_a_b(index, random_id=True)
+
+ def test_delay_create_and_add_versions(self):
+ transport = MockTransport()
+
+ index = self.get_knit_index(transport, "filename", "w")
+ # dir_mode=0777)
+ self.assertEqual([], transport.calls)
+ self.add_a_b(index)
+ #self.assertEqual(
+ #[ {"dir_mode": 0777, "create_parent_dir": True, "mode": "wb"},
+ # kwargs)
+ # Two calls: one during which we load the existing index (and when its
+ # missing create it), then a second where we write the contents out.
+ self.assertEqual(2, len(transport.calls))
+ call = transport.calls.pop(0)
+ self.assertEqual('put_file_non_atomic', call[0])
+ self.assertEqual('filename.kndx', call[1][0])
+ # With no history, _KndxIndex writes a new index:
+ self.assertEqual(_KndxIndex.HEADER, call[1][1].getvalue())
+ self.assertEqual({'create_parent_dir': True}, call[2])
+ call = transport.calls.pop(0)
+ # call[1][1] is a StringIO - we can't test it by simple equality.
+ self.assertEqual('put_file_non_atomic', call[0])
+ self.assertEqual('filename.kndx', call[1][0])
+ # With no history, _KndxIndex writes a new index:
+ self.assertEqual(
+ _KndxIndex.HEADER +
+ "\na option 0 1 .b :"
+ "\na opt 1 2 .c :"
+ "\nb option 2 3 0 :",
+ call[1][1].getvalue())
+ self.assertEqual({'create_parent_dir': True}, call[2])
+
+ def assertTotalBuildSize(self, size, keys, positions):
+ self.assertEqual(size,
+ knit._get_total_build_size(None, keys, positions))
+
+ def test__get_total_build_size(self):
+ positions = {
+ ('a',): (('fulltext', False), (('a',), 0, 100), None),
+ ('b',): (('line-delta', False), (('b',), 100, 21), ('a',)),
+ ('c',): (('line-delta', False), (('c',), 121, 35), ('b',)),
+ ('d',): (('line-delta', False), (('d',), 156, 12), ('b',)),
+ }
+ self.assertTotalBuildSize(100, [('a',)], positions)
+ self.assertTotalBuildSize(121, [('b',)], positions)
+ # c needs both a & b
+ self.assertTotalBuildSize(156, [('c',)], positions)
+ # we shouldn't count 'b' twice
+ self.assertTotalBuildSize(156, [('b',), ('c',)], positions)
+ self.assertTotalBuildSize(133, [('d',)], positions)
+ self.assertTotalBuildSize(168, [('c',), ('d',)], positions)
+
+ def test_get_position(self):
+ transport = MockTransport([
+ _KndxIndex.HEADER,
+ "a option 0 1 :",
+ "b option 1 2 :"
+ ])
+ index = self.get_knit_index(transport, "filename", "r")
+
+ self.assertEqual((("a",), 0, 1), index.get_position(("a",)))
+ self.assertEqual((("b",), 1, 2), index.get_position(("b",)))
+
+ def test_get_method(self):
+ transport = MockTransport([
+ _KndxIndex.HEADER,
+ "a fulltext,unknown 0 1 :",
+ "b unknown,line-delta 1 2 :",
+ "c bad 3 4 :"
+ ])
+ index = self.get_knit_index(transport, "filename", "r")
+
+ self.assertEqual("fulltext", index.get_method("a"))
+ self.assertEqual("line-delta", index.get_method("b"))
+ self.assertRaises(errors.KnitIndexUnknownMethod, index.get_method, "c")
+
+ def test_get_options(self):
+ transport = MockTransport([
+ _KndxIndex.HEADER,
+ "a opt1 0 1 :",
+ "b opt2,opt3 1 2 :"
+ ])
+ index = self.get_knit_index(transport, "filename", "r")
+
+ self.assertEqual(["opt1"], index.get_options("a"))
+ self.assertEqual(["opt2", "opt3"], index.get_options("b"))
+
+ def test_get_parent_map(self):
+ transport = MockTransport([
+ _KndxIndex.HEADER,
+ "a option 0 1 :",
+ "b option 1 2 0 .c :",
+ "c option 1 2 1 0 .e :"
+ ])
+ index = self.get_knit_index(transport, "filename", "r")
+
+ self.assertEqual({
+ ("a",):(),
+ ("b",):(("a",), ("c",)),
+ ("c",):(("b",), ("a",), ("e",)),
+ }, index.get_parent_map(index.keys()))
+
+ def test_impossible_parent(self):
+ """Test we get KnitCorrupt if the parent couldn't possibly exist."""
+ transport = MockTransport([
+ _KndxIndex.HEADER,
+ "a option 0 1 :",
+ "b option 0 1 4 :" # We don't have a 4th record
+ ])
+ index = self.get_knit_index(transport, 'filename', 'r')
+ try:
+ self.assertRaises(errors.KnitCorrupt, index.keys)
+ except TypeError, e:
+ if (str(e) == ('exceptions must be strings, classes, or instances,'
+ ' not exceptions.IndexError')):
+ self.knownFailure('Pyrex <0.9.5 fails with TypeError when'
+ ' raising new style exceptions with python'
+ ' >=2.5')
+ else:
+ raise
+
+ def test_corrupted_parent(self):
+ transport = MockTransport([
+ _KndxIndex.HEADER,
+ "a option 0 1 :",
+ "b option 0 1 :",
+ "c option 0 1 1v :", # Can't have a parent of '1v'
+ ])
+ index = self.get_knit_index(transport, 'filename', 'r')
+ try:
+ self.assertRaises(errors.KnitCorrupt, index.keys)
+ except TypeError, e:
+ if (str(e) == ('exceptions must be strings, classes, or instances,'
+ ' not exceptions.ValueError')):
+ self.knownFailure('Pyrex <0.9.5 fails with TypeError when'
+ ' raising new style exceptions with python'
+ ' >=2.5')
+ else:
+ raise
+
+ def test_corrupted_parent_in_list(self):
+ transport = MockTransport([
+ _KndxIndex.HEADER,
+ "a option 0 1 :",
+ "b option 0 1 :",
+ "c option 0 1 1 v :", # Can't have a parent of 'v'
+ ])
+ index = self.get_knit_index(transport, 'filename', 'r')
+ try:
+ self.assertRaises(errors.KnitCorrupt, index.keys)
+ except TypeError, e:
+ if (str(e) == ('exceptions must be strings, classes, or instances,'
+ ' not exceptions.ValueError')):
+ self.knownFailure('Pyrex <0.9.5 fails with TypeError when'
+ ' raising new style exceptions with python'
+ ' >=2.5')
+ else:
+ raise
+
+ def test_invalid_position(self):
+ transport = MockTransport([
+ _KndxIndex.HEADER,
+ "a option 1v 1 :",
+ ])
+ index = self.get_knit_index(transport, 'filename', 'r')
+ try:
+ self.assertRaises(errors.KnitCorrupt, index.keys)
+ except TypeError, e:
+ if (str(e) == ('exceptions must be strings, classes, or instances,'
+ ' not exceptions.ValueError')):
+ self.knownFailure('Pyrex <0.9.5 fails with TypeError when'
+ ' raising new style exceptions with python'
+ ' >=2.5')
+ else:
+ raise
+
+ def test_invalid_size(self):
+ transport = MockTransport([
+ _KndxIndex.HEADER,
+ "a option 1 1v :",
+ ])
+ index = self.get_knit_index(transport, 'filename', 'r')
+ try:
+ self.assertRaises(errors.KnitCorrupt, index.keys)
+ except TypeError, e:
+ if (str(e) == ('exceptions must be strings, classes, or instances,'
+ ' not exceptions.ValueError')):
+ self.knownFailure('Pyrex <0.9.5 fails with TypeError when'
+ ' raising new style exceptions with python'
+ ' >=2.5')
+ else:
+ raise
+
+ def test_scan_unvalidated_index_not_implemented(self):
+ transport = MockTransport()
+ index = self.get_knit_index(transport, 'filename', 'r')
+ self.assertRaises(
+ NotImplementedError, index.scan_unvalidated_index,
+ 'dummy graph_index')
+ self.assertRaises(
+ NotImplementedError, index.get_missing_compression_parents)
+
+ def test_short_line(self):
+ transport = MockTransport([
+ _KndxIndex.HEADER,
+ "a option 0 10 :",
+ "b option 10 10 0", # This line isn't terminated, ignored
+ ])
+ index = self.get_knit_index(transport, "filename", "r")
+ self.assertEqual(set([('a',)]), index.keys())
+
+ def test_skip_incomplete_record(self):
+ # A line with bogus data should just be skipped
+ transport = MockTransport([
+ _KndxIndex.HEADER,
+ "a option 0 10 :",
+ "b option 10 10 0", # This line isn't terminated, ignored
+ "c option 20 10 0 :", # Properly terminated, and starts with '\n'
+ ])
+ index = self.get_knit_index(transport, "filename", "r")
+ self.assertEqual(set([('a',), ('c',)]), index.keys())
+
+ def test_trailing_characters(self):
+ # A line with bogus data should just be skipped
+ transport = MockTransport([
+ _KndxIndex.HEADER,
+ "a option 0 10 :",
+ "b option 10 10 0 :a", # This line has extra trailing characters
+ "c option 20 10 0 :", # Properly terminated, and starts with '\n'
+ ])
+ index = self.get_knit_index(transport, "filename", "r")
+ self.assertEqual(set([('a',), ('c',)]), index.keys())
+
+
+class LowLevelKnitIndexTests_c(LowLevelKnitIndexTests):
+
+ _test_needs_features = [compiled_knit_feature]
+
+ def get_knit_index(self, transport, name, mode):
+ mapper = ConstantMapper(name)
+ from bzrlib._knit_load_data_pyx import _load_data_c
+ self.overrideAttr(knit, '_load_data', _load_data_c)
+ allow_writes = lambda: mode == 'w'
+ return _KndxIndex(transport, mapper, lambda:None,
+ allow_writes, lambda:True)
+
+
+class Test_KnitAnnotator(TestCaseWithMemoryTransport):
+
+ def make_annotator(self):
+ factory = knit.make_pack_factory(True, True, 1)
+ vf = factory(self.get_transport())
+ return knit._KnitAnnotator(vf)
+
+ def test__expand_fulltext(self):
+ ann = self.make_annotator()
+ rev_key = ('rev-id',)
+ ann._num_compression_children[rev_key] = 1
+ res = ann._expand_record(rev_key, (('parent-id',),), None,
+ ['line1\n', 'line2\n'], ('fulltext', True))
+ # The content object and text lines should be cached appropriately
+ self.assertEqual(['line1\n', 'line2'], res)
+ content_obj = ann._content_objects[rev_key]
+ self.assertEqual(['line1\n', 'line2\n'], content_obj._lines)
+ self.assertEqual(res, content_obj.text())
+ self.assertEqual(res, ann._text_cache[rev_key])
+
+ def test__expand_delta_comp_parent_not_available(self):
+ # Parent isn't available yet, so we return nothing, but queue up this
+ # node for later processing
+ ann = self.make_annotator()
+ rev_key = ('rev-id',)
+ parent_key = ('parent-id',)
+ record = ['0,1,1\n', 'new-line\n']
+ details = ('line-delta', False)
+ res = ann._expand_record(rev_key, (parent_key,), parent_key,
+ record, details)
+ self.assertEqual(None, res)
+ self.assertTrue(parent_key in ann._pending_deltas)
+ pending = ann._pending_deltas[parent_key]
+ self.assertEqual(1, len(pending))
+ self.assertEqual((rev_key, (parent_key,), record, details), pending[0])
+
+ def test__expand_record_tracks_num_children(self):
+ ann = self.make_annotator()
+ rev_key = ('rev-id',)
+ rev2_key = ('rev2-id',)
+ parent_key = ('parent-id',)
+ record = ['0,1,1\n', 'new-line\n']
+ details = ('line-delta', False)
+ ann._num_compression_children[parent_key] = 2
+ ann._expand_record(parent_key, (), None, ['line1\n', 'line2\n'],
+ ('fulltext', False))
+ res = ann._expand_record(rev_key, (parent_key,), parent_key,
+ record, details)
+ self.assertEqual({parent_key: 1}, ann._num_compression_children)
+ # Expanding the second child should remove the content object, and the
+ # num_compression_children entry
+ res = ann._expand_record(rev2_key, (parent_key,), parent_key,
+ record, details)
+ self.assertFalse(parent_key in ann._content_objects)
+ self.assertEqual({}, ann._num_compression_children)
+ # We should not cache the content_objects for rev2 and rev, because
+ # they do not have compression children of their own.
+ self.assertEqual({}, ann._content_objects)
+
+ def test__expand_delta_records_blocks(self):
+ ann = self.make_annotator()
+ rev_key = ('rev-id',)
+ parent_key = ('parent-id',)
+ record = ['0,1,1\n', 'new-line\n']
+ details = ('line-delta', True)
+ ann._num_compression_children[parent_key] = 2
+ ann._expand_record(parent_key, (), None,
+ ['line1\n', 'line2\n', 'line3\n'],
+ ('fulltext', False))
+ ann._expand_record(rev_key, (parent_key,), parent_key, record, details)
+ self.assertEqual({(rev_key, parent_key): [(1, 1, 1), (3, 3, 0)]},
+ ann._matching_blocks)
+ rev2_key = ('rev2-id',)
+ record = ['0,1,1\n', 'new-line\n']
+ details = ('line-delta', False)
+ ann._expand_record(rev2_key, (parent_key,), parent_key, record, details)
+ self.assertEqual([(1, 1, 2), (3, 3, 0)],
+ ann._matching_blocks[(rev2_key, parent_key)])
+
+ def test__get_parent_ann_uses_matching_blocks(self):
+ ann = self.make_annotator()
+ rev_key = ('rev-id',)
+ parent_key = ('parent-id',)
+ parent_ann = [(parent_key,)]*3
+ block_key = (rev_key, parent_key)
+ ann._annotations_cache[parent_key] = parent_ann
+ ann._matching_blocks[block_key] = [(0, 1, 1), (3, 3, 0)]
+ # We should not try to access any parent_lines content, because we know
+ # we already have the matching blocks
+ par_ann, blocks = ann._get_parent_annotations_and_matches(rev_key,
+ ['1\n', '2\n', '3\n'], parent_key)
+ self.assertEqual(parent_ann, par_ann)
+ self.assertEqual([(0, 1, 1), (3, 3, 0)], blocks)
+ self.assertEqual({}, ann._matching_blocks)
+
+ def test__process_pending(self):
+ ann = self.make_annotator()
+ rev_key = ('rev-id',)
+ p1_key = ('p1-id',)
+ p2_key = ('p2-id',)
+ record = ['0,1,1\n', 'new-line\n']
+ details = ('line-delta', False)
+ p1_record = ['line1\n', 'line2\n']
+ ann._num_compression_children[p1_key] = 1
+ res = ann._expand_record(rev_key, (p1_key,p2_key), p1_key,
+ record, details)
+ self.assertEqual(None, res)
+ # self.assertTrue(p1_key in ann._pending_deltas)
+ self.assertEqual({}, ann._pending_annotation)
+ # Now insert p1, and we should be able to expand the delta
+ res = ann._expand_record(p1_key, (), None, p1_record,
+ ('fulltext', False))
+ self.assertEqual(p1_record, res)
+ ann._annotations_cache[p1_key] = [(p1_key,)]*2
+ res = ann._process_pending(p1_key)
+ self.assertEqual([], res)
+ self.assertFalse(p1_key in ann._pending_deltas)
+ self.assertTrue(p2_key in ann._pending_annotation)
+ self.assertEqual({p2_key: [(rev_key, (p1_key, p2_key))]},
+ ann._pending_annotation)
+ # Now fill in parent 2, and pending annotation should be satisfied
+ res = ann._expand_record(p2_key, (), None, [], ('fulltext', False))
+ ann._annotations_cache[p2_key] = []
+ res = ann._process_pending(p2_key)
+ self.assertEqual([rev_key], res)
+ self.assertEqual({}, ann._pending_annotation)
+ self.assertEqual({}, ann._pending_deltas)
+
+ def test_record_delta_removes_basis(self):
+ ann = self.make_annotator()
+ ann._expand_record(('parent-id',), (), None,
+ ['line1\n', 'line2\n'], ('fulltext', False))
+ ann._num_compression_children['parent-id'] = 2
+
+ def test_annotate_special_text(self):
+ ann = self.make_annotator()
+ vf = ann._vf
+ rev1_key = ('rev-1',)
+ rev2_key = ('rev-2',)
+ rev3_key = ('rev-3',)
+ spec_key = ('special:',)
+ vf.add_lines(rev1_key, [], ['initial content\n'])
+ vf.add_lines(rev2_key, [rev1_key], ['initial content\n',
+ 'common content\n',
+ 'content in 2\n'])
+ vf.add_lines(rev3_key, [rev1_key], ['initial content\n',
+ 'common content\n',
+ 'content in 3\n'])
+ spec_text = ('initial content\n'
+ 'common content\n'
+ 'content in 2\n'
+ 'content in 3\n')
+ ann.add_special_text(spec_key, [rev2_key, rev3_key], spec_text)
+ anns, lines = ann.annotate(spec_key)
+ self.assertEqual([(rev1_key,),
+ (rev2_key, rev3_key),
+ (rev2_key,),
+ (rev3_key,),
+ ], anns)
+ self.assertEqualDiff(spec_text, ''.join(lines))
+
+
+class KnitTests(TestCaseWithTransport):
+ """Class containing knit test helper routines."""
+
+ def make_test_knit(self, annotate=False, name='test'):
+ mapper = ConstantMapper(name)
+ return make_file_factory(annotate, mapper)(self.get_transport())
+
+
+class TestBadShaError(KnitTests):
+ """Tests for handling of sha errors."""
+
+ def test_sha_exception_has_text(self):
+ # having the failed text included in the error allows for recovery.
+ source = self.make_test_knit()
+ target = self.make_test_knit(name="target")
+ if not source._max_delta_chain:
+ raise TestNotApplicable(
+ "cannot get delta-caused sha failures without deltas.")
+ # create a basis
+ basis = ('basis',)
+ broken = ('broken',)
+ source.add_lines(basis, (), ['foo\n'])
+ source.add_lines(broken, (basis,), ['foo\n', 'bar\n'])
+ # Seed target with a bad basis text
+ target.add_lines(basis, (), ['gam\n'])
+ target.insert_record_stream(
+ source.get_record_stream([broken], 'unordered', False))
+ err = self.assertRaises(errors.KnitCorrupt,
+ target.get_record_stream([broken], 'unordered', True
+ ).next().get_bytes_as, 'chunked')
+ self.assertEqual(['gam\n', 'bar\n'], err.content)
+ # Test for formatting with live data
+ self.assertStartsWith(str(err), "Knit ")
+
+
+class TestKnitIndex(KnitTests):
+
+ def test_add_versions_dictionary_compresses(self):
+ """Adding versions to the index should update the lookup dict"""
+ knit = self.make_test_knit()
+ idx = knit._index
+ idx.add_records([(('a-1',), ['fulltext'], (('a-1',), 0, 0), [])])
+ self.check_file_contents('test.kndx',
+ '# bzr knit index 8\n'
+ '\n'
+ 'a-1 fulltext 0 0 :'
+ )
+ idx.add_records([
+ (('a-2',), ['fulltext'], (('a-2',), 0, 0), [('a-1',)]),
+ (('a-3',), ['fulltext'], (('a-3',), 0, 0), [('a-2',)]),
+ ])
+ self.check_file_contents('test.kndx',
+ '# bzr knit index 8\n'
+ '\n'
+ 'a-1 fulltext 0 0 :\n'
+ 'a-2 fulltext 0 0 0 :\n'
+ 'a-3 fulltext 0 0 1 :'
+ )
+ self.assertEqual(set([('a-3',), ('a-1',), ('a-2',)]), idx.keys())
+ self.assertEqual({
+ ('a-1',): ((('a-1',), 0, 0), None, (), ('fulltext', False)),
+ ('a-2',): ((('a-2',), 0, 0), None, (('a-1',),), ('fulltext', False)),
+ ('a-3',): ((('a-3',), 0, 0), None, (('a-2',),), ('fulltext', False)),
+ }, idx.get_build_details(idx.keys()))
+ self.assertEqual({('a-1',):(),
+ ('a-2',):(('a-1',),),
+ ('a-3',):(('a-2',),),},
+ idx.get_parent_map(idx.keys()))
+
+ def test_add_versions_fails_clean(self):
+ """If add_versions fails in the middle, it restores a pristine state.
+
+ Any modifications that are made to the index are reset if all versions
+ cannot be added.
+ """
+ # This cheats a little bit by passing in a generator which will
+ # raise an exception before the processing finishes
+ # Other possibilities would be to have an version with the wrong number
+ # of entries, or to make the backing transport unable to write any
+ # files.
+
+ knit = self.make_test_knit()
+ idx = knit._index
+ idx.add_records([(('a-1',), ['fulltext'], (('a-1',), 0, 0), [])])
+
+ class StopEarly(Exception):
+ pass
+
+ def generate_failure():
+ """Add some entries and then raise an exception"""
+ yield (('a-2',), ['fulltext'], (None, 0, 0), ('a-1',))
+ yield (('a-3',), ['fulltext'], (None, 0, 0), ('a-2',))
+ raise StopEarly()
+
+ # Assert the pre-condition
+ def assertA1Only():
+ self.assertEqual(set([('a-1',)]), set(idx.keys()))
+ self.assertEqual(
+ {('a-1',): ((('a-1',), 0, 0), None, (), ('fulltext', False))},
+ idx.get_build_details([('a-1',)]))
+ self.assertEqual({('a-1',):()}, idx.get_parent_map(idx.keys()))
+
+ assertA1Only()
+ self.assertRaises(StopEarly, idx.add_records, generate_failure())
+ # And it shouldn't be modified
+ assertA1Only()
+
+ def test_knit_index_ignores_empty_files(self):
+ # There was a race condition in older bzr, where a ^C at the right time
+ # could leave an empty .kndx file, which bzr would later claim was a
+ # corrupted file since the header was not present. In reality, the file
+ # just wasn't created, so it should be ignored.
+ t = transport.get_transport_from_path('.')
+ t.put_bytes('test.kndx', '')
+
+ knit = self.make_test_knit()
+
+ def test_knit_index_checks_header(self):
+ t = transport.get_transport_from_path('.')
+ t.put_bytes('test.kndx', '# not really a knit header\n\n')
+ k = self.make_test_knit()
+ self.assertRaises(KnitHeaderError, k.keys)
+
+
+class TestGraphIndexKnit(KnitTests):
+ """Tests for knits using a GraphIndex rather than a KnitIndex."""
+
+ def make_g_index(self, name, ref_lists=0, nodes=[]):
+ builder = GraphIndexBuilder(ref_lists)
+ for node, references, value in nodes:
+ builder.add_node(node, references, value)
+ stream = builder.finish()
+ trans = self.get_transport()
+ size = trans.put_file(name, stream)
+ return GraphIndex(trans, name, size)
+
+ def two_graph_index(self, deltas=False, catch_adds=False):
+ """Build a two-graph index.
+
+ :param deltas: If true, use underlying indices with two node-ref
+ lists and 'parent' set to a delta-compressed against tail.
+ """
+ # build a complex graph across several indices.
+ if deltas:
+ # delta compression inn the index
+ index1 = self.make_g_index('1', 2, [
+ (('tip', ), 'N0 100', ([('parent', )], [], )),
+ (('tail', ), '', ([], []))])
+ index2 = self.make_g_index('2', 2, [
+ (('parent', ), ' 100 78', ([('tail', ), ('ghost', )], [('tail', )])),
+ (('separate', ), '', ([], []))])
+ else:
+ # just blob location and graph in the index.
+ index1 = self.make_g_index('1', 1, [
+ (('tip', ), 'N0 100', ([('parent', )], )),
+ (('tail', ), '', ([], ))])
+ index2 = self.make_g_index('2', 1, [
+ (('parent', ), ' 100 78', ([('tail', ), ('ghost', )], )),
+ (('separate', ), '', ([], ))])
+ combined_index = CombinedGraphIndex([index1, index2])
+ if catch_adds:
+ self.combined_index = combined_index
+ self.caught_entries = []
+ add_callback = self.catch_add
+ else:
+ add_callback = None
+ return _KnitGraphIndex(combined_index, lambda:True, deltas=deltas,
+ add_callback=add_callback)
+
+ def test_keys(self):
+ index = self.two_graph_index()
+ self.assertEqual(set([('tail',), ('tip',), ('parent',), ('separate',)]),
+ set(index.keys()))
+
+ def test_get_position(self):
+ index = self.two_graph_index()
+ self.assertEqual((index._graph_index._indices[0], 0, 100), index.get_position(('tip',)))
+ self.assertEqual((index._graph_index._indices[1], 100, 78), index.get_position(('parent',)))
+
+ def test_get_method_deltas(self):
+ index = self.two_graph_index(deltas=True)
+ self.assertEqual('fulltext', index.get_method(('tip',)))
+ self.assertEqual('line-delta', index.get_method(('parent',)))
+
+ def test_get_method_no_deltas(self):
+ # check that the parent-history lookup is ignored with deltas=False.
+ index = self.two_graph_index(deltas=False)
+ self.assertEqual('fulltext', index.get_method(('tip',)))
+ self.assertEqual('fulltext', index.get_method(('parent',)))
+
+ def test_get_options_deltas(self):
+ index = self.two_graph_index(deltas=True)
+ self.assertEqual(['fulltext', 'no-eol'], index.get_options(('tip',)))
+ self.assertEqual(['line-delta'], index.get_options(('parent',)))
+
+ def test_get_options_no_deltas(self):
+ # check that the parent-history lookup is ignored with deltas=False.
+ index = self.two_graph_index(deltas=False)
+ self.assertEqual(['fulltext', 'no-eol'], index.get_options(('tip',)))
+ self.assertEqual(['fulltext'], index.get_options(('parent',)))
+
+ def test_get_parent_map(self):
+ index = self.two_graph_index()
+ self.assertEqual({('parent',):(('tail',), ('ghost',))},
+ index.get_parent_map([('parent',), ('ghost',)]))
+
+ def catch_add(self, entries):
+ self.caught_entries.append(entries)
+
+ def test_add_no_callback_errors(self):
+ index = self.two_graph_index()
+ self.assertRaises(errors.ReadOnlyError, index.add_records,
+ [(('new',), 'fulltext,no-eol', (None, 50, 60), ['separate'])])
+
+ def test_add_version_smoke(self):
+ index = self.two_graph_index(catch_adds=True)
+ index.add_records([(('new',), 'fulltext,no-eol', (None, 50, 60),
+ [('separate',)])])
+ self.assertEqual([[(('new', ), 'N50 60', ((('separate',),),))]],
+ self.caught_entries)
+
+ def test_add_version_delta_not_delta_index(self):
+ index = self.two_graph_index(catch_adds=True)
+ self.assertRaises(errors.KnitCorrupt, index.add_records,
+ [(('new',), 'no-eol,line-delta', (None, 0, 100), [('parent',)])])
+ self.assertEqual([], self.caught_entries)
+
+ def test_add_version_same_dup(self):
+ index = self.two_graph_index(catch_adds=True)
+ # options can be spelt two different ways
+ index.add_records([(('tip',), 'fulltext,no-eol', (None, 0, 100), [('parent',)])])
+ index.add_records([(('tip',), 'no-eol,fulltext', (None, 0, 100), [('parent',)])])
+ # position/length are ignored (because each pack could have fulltext or
+ # delta, and be at a different position.
+ index.add_records([(('tip',), 'fulltext,no-eol', (None, 50, 100),
+ [('parent',)])])
+ index.add_records([(('tip',), 'fulltext,no-eol', (None, 0, 1000),
+ [('parent',)])])
+ # but neither should have added data:
+ self.assertEqual([[], [], [], []], self.caught_entries)
+
+ def test_add_version_different_dup(self):
+ index = self.two_graph_index(deltas=True, catch_adds=True)
+ # change options
+ self.assertRaises(errors.KnitCorrupt, index.add_records,
+ [(('tip',), 'line-delta', (None, 0, 100), [('parent',)])])
+ self.assertRaises(errors.KnitCorrupt, index.add_records,
+ [(('tip',), 'fulltext', (None, 0, 100), [('parent',)])])
+ # parents
+ self.assertRaises(errors.KnitCorrupt, index.add_records,
+ [(('tip',), 'fulltext,no-eol', (None, 0, 100), [])])
+ self.assertEqual([], self.caught_entries)
+
+ def test_add_versions_nodeltas(self):
+ index = self.two_graph_index(catch_adds=True)
+ index.add_records([
+ (('new',), 'fulltext,no-eol', (None, 50, 60), [('separate',)]),
+ (('new2',), 'fulltext', (None, 0, 6), [('new',)]),
+ ])
+ self.assertEqual([(('new', ), 'N50 60', ((('separate',),),)),
+ (('new2', ), ' 0 6', ((('new',),),))],
+ sorted(self.caught_entries[0]))
+ self.assertEqual(1, len(self.caught_entries))
+
+ def test_add_versions_deltas(self):
+ index = self.two_graph_index(deltas=True, catch_adds=True)
+ index.add_records([
+ (('new',), 'fulltext,no-eol', (None, 50, 60), [('separate',)]),
+ (('new2',), 'line-delta', (None, 0, 6), [('new',)]),
+ ])
+ self.assertEqual([(('new', ), 'N50 60', ((('separate',),), ())),
+ (('new2', ), ' 0 6', ((('new',),), (('new',),), ))],
+ sorted(self.caught_entries[0]))
+ self.assertEqual(1, len(self.caught_entries))
+
+ def test_add_versions_delta_not_delta_index(self):
+ index = self.two_graph_index(catch_adds=True)
+ self.assertRaises(errors.KnitCorrupt, index.add_records,
+ [(('new',), 'no-eol,line-delta', (None, 0, 100), [('parent',)])])
+ self.assertEqual([], self.caught_entries)
+
+ def test_add_versions_random_id_accepted(self):
+ index = self.two_graph_index(catch_adds=True)
+ index.add_records([], random_id=True)
+
+ def test_add_versions_same_dup(self):
+ index = self.two_graph_index(catch_adds=True)
+ # options can be spelt two different ways
+ index.add_records([(('tip',), 'fulltext,no-eol', (None, 0, 100),
+ [('parent',)])])
+ index.add_records([(('tip',), 'no-eol,fulltext', (None, 0, 100),
+ [('parent',)])])
+ # position/length are ignored (because each pack could have fulltext or
+ # delta, and be at a different position.
+ index.add_records([(('tip',), 'fulltext,no-eol', (None, 50, 100),
+ [('parent',)])])
+ index.add_records([(('tip',), 'fulltext,no-eol', (None, 0, 1000),
+ [('parent',)])])
+ # but neither should have added data.
+ self.assertEqual([[], [], [], []], self.caught_entries)
+
+ def test_add_versions_different_dup(self):
+ index = self.two_graph_index(deltas=True, catch_adds=True)
+ # change options
+ self.assertRaises(errors.KnitCorrupt, index.add_records,
+ [(('tip',), 'line-delta', (None, 0, 100), [('parent',)])])
+ self.assertRaises(errors.KnitCorrupt, index.add_records,
+ [(('tip',), 'fulltext', (None, 0, 100), [('parent',)])])
+ # parents
+ self.assertRaises(errors.KnitCorrupt, index.add_records,
+ [(('tip',), 'fulltext,no-eol', (None, 0, 100), [])])
+ # change options in the second record
+ self.assertRaises(errors.KnitCorrupt, index.add_records,
+ [(('tip',), 'fulltext,no-eol', (None, 0, 100), [('parent',)]),
+ (('tip',), 'line-delta', (None, 0, 100), [('parent',)])])
+ self.assertEqual([], self.caught_entries)
+
+ def make_g_index_missing_compression_parent(self):
+ graph_index = self.make_g_index('missing_comp', 2,
+ [(('tip', ), ' 100 78',
+ ([('missing-parent', ), ('ghost', )], [('missing-parent', )]))])
+ return graph_index
+
+ def make_g_index_missing_parent(self):
+ graph_index = self.make_g_index('missing_parent', 2,
+ [(('parent', ), ' 100 78', ([], [])),
+ (('tip', ), ' 100 78',
+ ([('parent', ), ('missing-parent', )], [('parent', )])),
+ ])
+ return graph_index
+
+ def make_g_index_no_external_refs(self):
+ graph_index = self.make_g_index('no_external_refs', 2,
+ [(('rev', ), ' 100 78',
+ ([('parent', ), ('ghost', )], []))])
+ return graph_index
+
+ def test_add_good_unvalidated_index(self):
+ unvalidated = self.make_g_index_no_external_refs()
+ combined = CombinedGraphIndex([unvalidated])
+ index = _KnitGraphIndex(combined, lambda: True, deltas=True)
+ index.scan_unvalidated_index(unvalidated)
+ self.assertEqual(frozenset(), index.get_missing_compression_parents())
+
+ def test_add_missing_compression_parent_unvalidated_index(self):
+ unvalidated = self.make_g_index_missing_compression_parent()
+ combined = CombinedGraphIndex([unvalidated])
+ index = _KnitGraphIndex(combined, lambda: True, deltas=True)
+ index.scan_unvalidated_index(unvalidated)
+ # This also checks that its only the compression parent that is
+ # examined, otherwise 'ghost' would also be reported as a missing
+ # parent.
+ self.assertEqual(
+ frozenset([('missing-parent',)]),
+ index.get_missing_compression_parents())
+
+ def test_add_missing_noncompression_parent_unvalidated_index(self):
+ unvalidated = self.make_g_index_missing_parent()
+ combined = CombinedGraphIndex([unvalidated])
+ index = _KnitGraphIndex(combined, lambda: True, deltas=True,
+ track_external_parent_refs=True)
+ index.scan_unvalidated_index(unvalidated)
+ self.assertEqual(
+ frozenset([('missing-parent',)]), index.get_missing_parents())
+
+ def test_track_external_parent_refs(self):
+ g_index = self.make_g_index('empty', 2, [])
+ combined = CombinedGraphIndex([g_index])
+ index = _KnitGraphIndex(combined, lambda: True, deltas=True,
+ add_callback=self.catch_add, track_external_parent_refs=True)
+ self.caught_entries = []
+ index.add_records([
+ (('new-key',), 'fulltext,no-eol', (None, 50, 60),
+ [('parent-1',), ('parent-2',)])])
+ self.assertEqual(
+ frozenset([('parent-1',), ('parent-2',)]),
+ index.get_missing_parents())
+
+ def test_add_unvalidated_index_with_present_external_references(self):
+ index = self.two_graph_index(deltas=True)
+ # Ugly hack to get at one of the underlying GraphIndex objects that
+ # two_graph_index built.
+ unvalidated = index._graph_index._indices[1]
+ # 'parent' is an external ref of _indices[1] (unvalidated), but is
+ # present in _indices[0].
+ index.scan_unvalidated_index(unvalidated)
+ self.assertEqual(frozenset(), index.get_missing_compression_parents())
+
+ def make_new_missing_parent_g_index(self, name):
+ missing_parent = name + '-missing-parent'
+ graph_index = self.make_g_index(name, 2,
+ [((name + 'tip', ), ' 100 78',
+ ([(missing_parent, ), ('ghost', )], [(missing_parent, )]))])
+ return graph_index
+
+ def test_add_mulitiple_unvalidated_indices_with_missing_parents(self):
+ g_index_1 = self.make_new_missing_parent_g_index('one')
+ g_index_2 = self.make_new_missing_parent_g_index('two')
+ combined = CombinedGraphIndex([g_index_1, g_index_2])
+ index = _KnitGraphIndex(combined, lambda: True, deltas=True)
+ index.scan_unvalidated_index(g_index_1)
+ index.scan_unvalidated_index(g_index_2)
+ self.assertEqual(
+ frozenset([('one-missing-parent',), ('two-missing-parent',)]),
+ index.get_missing_compression_parents())
+
+ def test_add_mulitiple_unvalidated_indices_with_mutual_dependencies(self):
+ graph_index_a = self.make_g_index('one', 2,
+ [(('parent-one', ), ' 100 78', ([('non-compression-parent',)], [])),
+ (('child-of-two', ), ' 100 78',
+ ([('parent-two',)], [('parent-two',)]))])
+ graph_index_b = self.make_g_index('two', 2,
+ [(('parent-two', ), ' 100 78', ([('non-compression-parent',)], [])),
+ (('child-of-one', ), ' 100 78',
+ ([('parent-one',)], [('parent-one',)]))])
+ combined = CombinedGraphIndex([graph_index_a, graph_index_b])
+ index = _KnitGraphIndex(combined, lambda: True, deltas=True)
+ index.scan_unvalidated_index(graph_index_a)
+ index.scan_unvalidated_index(graph_index_b)
+ self.assertEqual(
+ frozenset([]), index.get_missing_compression_parents())
+
+
+class TestNoParentsGraphIndexKnit(KnitTests):
+ """Tests for knits using _KnitGraphIndex with no parents."""
+
+ def make_g_index(self, name, ref_lists=0, nodes=[]):
+ builder = GraphIndexBuilder(ref_lists)
+ for node, references in nodes:
+ builder.add_node(node, references)
+ stream = builder.finish()
+ trans = self.get_transport()
+ size = trans.put_file(name, stream)
+ return GraphIndex(trans, name, size)
+
+ def test_add_good_unvalidated_index(self):
+ unvalidated = self.make_g_index('unvalidated')
+ combined = CombinedGraphIndex([unvalidated])
+ index = _KnitGraphIndex(combined, lambda: True, parents=False)
+ index.scan_unvalidated_index(unvalidated)
+ self.assertEqual(frozenset(),
+ index.get_missing_compression_parents())
+
+ def test_parents_deltas_incompatible(self):
+ index = CombinedGraphIndex([])
+ self.assertRaises(errors.KnitError, _KnitGraphIndex, lambda:True,
+ index, deltas=True, parents=False)
+
+ def two_graph_index(self, catch_adds=False):
+ """Build a two-graph index.
+
+ :param deltas: If true, use underlying indices with two node-ref
+ lists and 'parent' set to a delta-compressed against tail.
+ """
+ # put several versions in the index.
+ index1 = self.make_g_index('1', 0, [
+ (('tip', ), 'N0 100'),
+ (('tail', ), '')])
+ index2 = self.make_g_index('2', 0, [
+ (('parent', ), ' 100 78'),
+ (('separate', ), '')])
+ combined_index = CombinedGraphIndex([index1, index2])
+ if catch_adds:
+ self.combined_index = combined_index
+ self.caught_entries = []
+ add_callback = self.catch_add
+ else:
+ add_callback = None
+ return _KnitGraphIndex(combined_index, lambda:True, parents=False,
+ add_callback=add_callback)
+
+ def test_keys(self):
+ index = self.two_graph_index()
+ self.assertEqual(set([('tail',), ('tip',), ('parent',), ('separate',)]),
+ set(index.keys()))
+
+ def test_get_position(self):
+ index = self.two_graph_index()
+ self.assertEqual((index._graph_index._indices[0], 0, 100),
+ index.get_position(('tip',)))
+ self.assertEqual((index._graph_index._indices[1], 100, 78),
+ index.get_position(('parent',)))
+
+ def test_get_method(self):
+ index = self.two_graph_index()
+ self.assertEqual('fulltext', index.get_method(('tip',)))
+ self.assertEqual(['fulltext'], index.get_options(('parent',)))
+
+ def test_get_options(self):
+ index = self.two_graph_index()
+ self.assertEqual(['fulltext', 'no-eol'], index.get_options(('tip',)))
+ self.assertEqual(['fulltext'], index.get_options(('parent',)))
+
+ def test_get_parent_map(self):
+ index = self.two_graph_index()
+ self.assertEqual({('parent',):None},
+ index.get_parent_map([('parent',), ('ghost',)]))
+
+ def catch_add(self, entries):
+ self.caught_entries.append(entries)
+
+ def test_add_no_callback_errors(self):
+ index = self.two_graph_index()
+ self.assertRaises(errors.ReadOnlyError, index.add_records,
+ [(('new',), 'fulltext,no-eol', (None, 50, 60), [('separate',)])])
+
+ def test_add_version_smoke(self):
+ index = self.two_graph_index(catch_adds=True)
+ index.add_records([(('new',), 'fulltext,no-eol', (None, 50, 60), [])])
+ self.assertEqual([[(('new', ), 'N50 60')]],
+ self.caught_entries)
+
+ def test_add_version_delta_not_delta_index(self):
+ index = self.two_graph_index(catch_adds=True)
+ self.assertRaises(errors.KnitCorrupt, index.add_records,
+ [(('new',), 'no-eol,line-delta', (None, 0, 100), [])])
+ self.assertEqual([], self.caught_entries)
+
+ def test_add_version_same_dup(self):
+ index = self.two_graph_index(catch_adds=True)
+ # options can be spelt two different ways
+ index.add_records([(('tip',), 'fulltext,no-eol', (None, 0, 100), [])])
+ index.add_records([(('tip',), 'no-eol,fulltext', (None, 0, 100), [])])
+ # position/length are ignored (because each pack could have fulltext or
+ # delta, and be at a different position.
+ index.add_records([(('tip',), 'fulltext,no-eol', (None, 50, 100), [])])
+ index.add_records([(('tip',), 'fulltext,no-eol', (None, 0, 1000), [])])
+ # but neither should have added data.
+ self.assertEqual([[], [], [], []], self.caught_entries)
+
+ def test_add_version_different_dup(self):
+ index = self.two_graph_index(catch_adds=True)
+ # change options
+ self.assertRaises(errors.KnitCorrupt, index.add_records,
+ [(('tip',), 'no-eol,line-delta', (None, 0, 100), [])])
+ self.assertRaises(errors.KnitCorrupt, index.add_records,
+ [(('tip',), 'line-delta,no-eol', (None, 0, 100), [])])
+ self.assertRaises(errors.KnitCorrupt, index.add_records,
+ [(('tip',), 'fulltext', (None, 0, 100), [])])
+ # parents
+ self.assertRaises(errors.KnitCorrupt, index.add_records,
+ [(('tip',), 'fulltext,no-eol', (None, 0, 100), [('parent',)])])
+ self.assertEqual([], self.caught_entries)
+
+ def test_add_versions(self):
+ index = self.two_graph_index(catch_adds=True)
+ index.add_records([
+ (('new',), 'fulltext,no-eol', (None, 50, 60), []),
+ (('new2',), 'fulltext', (None, 0, 6), []),
+ ])
+ self.assertEqual([(('new', ), 'N50 60'), (('new2', ), ' 0 6')],
+ sorted(self.caught_entries[0]))
+ self.assertEqual(1, len(self.caught_entries))
+
+ def test_add_versions_delta_not_delta_index(self):
+ index = self.two_graph_index(catch_adds=True)
+ self.assertRaises(errors.KnitCorrupt, index.add_records,
+ [(('new',), 'no-eol,line-delta', (None, 0, 100), [('parent',)])])
+ self.assertEqual([], self.caught_entries)
+
+ def test_add_versions_parents_not_parents_index(self):
+ index = self.two_graph_index(catch_adds=True)
+ self.assertRaises(errors.KnitCorrupt, index.add_records,
+ [(('new',), 'no-eol,fulltext', (None, 0, 100), [('parent',)])])
+ self.assertEqual([], self.caught_entries)
+
+ def test_add_versions_random_id_accepted(self):
+ index = self.two_graph_index(catch_adds=True)
+ index.add_records([], random_id=True)
+
+ def test_add_versions_same_dup(self):
+ index = self.two_graph_index(catch_adds=True)
+ # options can be spelt two different ways
+ index.add_records([(('tip',), 'fulltext,no-eol', (None, 0, 100), [])])
+ index.add_records([(('tip',), 'no-eol,fulltext', (None, 0, 100), [])])
+ # position/length are ignored (because each pack could have fulltext or
+ # delta, and be at a different position.
+ index.add_records([(('tip',), 'fulltext,no-eol', (None, 50, 100), [])])
+ index.add_records([(('tip',), 'fulltext,no-eol', (None, 0, 1000), [])])
+ # but neither should have added data.
+ self.assertEqual([[], [], [], []], self.caught_entries)
+
+ def test_add_versions_different_dup(self):
+ index = self.two_graph_index(catch_adds=True)
+ # change options
+ self.assertRaises(errors.KnitCorrupt, index.add_records,
+ [(('tip',), 'no-eol,line-delta', (None, 0, 100), [])])
+ self.assertRaises(errors.KnitCorrupt, index.add_records,
+ [(('tip',), 'line-delta,no-eol', (None, 0, 100), [])])
+ self.assertRaises(errors.KnitCorrupt, index.add_records,
+ [(('tip',), 'fulltext', (None, 0, 100), [])])
+ # parents
+ self.assertRaises(errors.KnitCorrupt, index.add_records,
+ [(('tip',), 'fulltext,no-eol', (None, 0, 100), [('parent',)])])
+ # change options in the second record
+ self.assertRaises(errors.KnitCorrupt, index.add_records,
+ [(('tip',), 'fulltext,no-eol', (None, 0, 100), []),
+ (('tip',), 'no-eol,line-delta', (None, 0, 100), [])])
+ self.assertEqual([], self.caught_entries)
+
+
+class TestKnitVersionedFiles(KnitTests):
+
+ def assertGroupKeysForIo(self, exp_groups, keys, non_local_keys,
+ positions, _min_buffer_size=None):
+ kvf = self.make_test_knit()
+ if _min_buffer_size is None:
+ _min_buffer_size = knit._STREAM_MIN_BUFFER_SIZE
+ self.assertEqual(exp_groups, kvf._group_keys_for_io(keys,
+ non_local_keys, positions,
+ _min_buffer_size=_min_buffer_size))
+
+ def assertSplitByPrefix(self, expected_map, expected_prefix_order,
+ keys):
+ split, prefix_order = KnitVersionedFiles._split_by_prefix(keys)
+ self.assertEqual(expected_map, split)
+ self.assertEqual(expected_prefix_order, prefix_order)
+
+ def test__group_keys_for_io(self):
+ ft_detail = ('fulltext', False)
+ ld_detail = ('line-delta', False)
+ f_a = ('f', 'a')
+ f_b = ('f', 'b')
+ f_c = ('f', 'c')
+ g_a = ('g', 'a')
+ g_b = ('g', 'b')
+ g_c = ('g', 'c')
+ positions = {
+ f_a: (ft_detail, (f_a, 0, 100), None),
+ f_b: (ld_detail, (f_b, 100, 21), f_a),
+ f_c: (ld_detail, (f_c, 180, 15), f_b),
+ g_a: (ft_detail, (g_a, 121, 35), None),
+ g_b: (ld_detail, (g_b, 156, 12), g_a),
+ g_c: (ld_detail, (g_c, 195, 13), g_a),
+ }
+ self.assertGroupKeysForIo([([f_a], set())],
+ [f_a], [], positions)
+ self.assertGroupKeysForIo([([f_a], set([f_a]))],
+ [f_a], [f_a], positions)
+ self.assertGroupKeysForIo([([f_a, f_b], set([]))],
+ [f_a, f_b], [], positions)
+ self.assertGroupKeysForIo([([f_a, f_b], set([f_b]))],
+ [f_a, f_b], [f_b], positions)
+ self.assertGroupKeysForIo([([f_a, f_b, g_a, g_b], set())],
+ [f_a, g_a, f_b, g_b], [], positions)
+ self.assertGroupKeysForIo([([f_a, f_b, g_a, g_b], set())],
+ [f_a, g_a, f_b, g_b], [], positions,
+ _min_buffer_size=150)
+ self.assertGroupKeysForIo([([f_a, f_b], set()), ([g_a, g_b], set())],
+ [f_a, g_a, f_b, g_b], [], positions,
+ _min_buffer_size=100)
+ self.assertGroupKeysForIo([([f_c], set()), ([g_b], set())],
+ [f_c, g_b], [], positions,
+ _min_buffer_size=125)
+ self.assertGroupKeysForIo([([g_b, f_c], set())],
+ [g_b, f_c], [], positions,
+ _min_buffer_size=125)
+
+ def test__split_by_prefix(self):
+ self.assertSplitByPrefix({'f': [('f', 'a'), ('f', 'b')],
+ 'g': [('g', 'b'), ('g', 'a')],
+ }, ['f', 'g'],
+ [('f', 'a'), ('g', 'b'),
+ ('g', 'a'), ('f', 'b')])
+
+ self.assertSplitByPrefix({'f': [('f', 'a'), ('f', 'b')],
+ 'g': [('g', 'b'), ('g', 'a')],
+ }, ['f', 'g'],
+ [('f', 'a'), ('f', 'b'),
+ ('g', 'b'), ('g', 'a')])
+
+ self.assertSplitByPrefix({'f': [('f', 'a'), ('f', 'b')],
+ 'g': [('g', 'b'), ('g', 'a')],
+ }, ['f', 'g'],
+ [('f', 'a'), ('f', 'b'),
+ ('g', 'b'), ('g', 'a')])
+
+ self.assertSplitByPrefix({'f': [('f', 'a'), ('f', 'b')],
+ 'g': [('g', 'b'), ('g', 'a')],
+ '': [('a',), ('b',)]
+ }, ['f', 'g', ''],
+ [('f', 'a'), ('g', 'b'),
+ ('a',), ('b',),
+ ('g', 'a'), ('f', 'b')])
+
+
+class TestStacking(KnitTests):
+
+ def get_basis_and_test_knit(self):
+ basis = self.make_test_knit(name='basis')
+ basis = RecordingVersionedFilesDecorator(basis)
+ test = self.make_test_knit(name='test')
+ test.add_fallback_versioned_files(basis)
+ return basis, test
+
+ def test_add_fallback_versioned_files(self):
+ basis = self.make_test_knit(name='basis')
+ test = self.make_test_knit(name='test')
+ # It must not error; other tests test that the fallback is referred to
+ # when accessing data.
+ test.add_fallback_versioned_files(basis)
+
+ def test_add_lines(self):
+ # lines added to the test are not added to the basis
+ basis, test = self.get_basis_and_test_knit()
+ key = ('foo',)
+ key_basis = ('bar',)
+ key_cross_border = ('quux',)
+ key_delta = ('zaphod',)
+ test.add_lines(key, (), ['foo\n'])
+ self.assertEqual({}, basis.get_parent_map([key]))
+ # lines added to the test that reference across the stack do a
+ # fulltext.
+ basis.add_lines(key_basis, (), ['foo\n'])
+ basis.calls = []
+ test.add_lines(key_cross_border, (key_basis,), ['foo\n'])
+ self.assertEqual('fulltext', test._index.get_method(key_cross_border))
+ # we don't even need to look at the basis to see that this should be
+ # stored as a fulltext
+ self.assertEqual([], basis.calls)
+ # Subsequent adds do delta.
+ basis.calls = []
+ test.add_lines(key_delta, (key_cross_border,), ['foo\n'])
+ self.assertEqual('line-delta', test._index.get_method(key_delta))
+ self.assertEqual([], basis.calls)
+
+ def test_annotate(self):
+ # annotations from the test knit are answered without asking the basis
+ basis, test = self.get_basis_and_test_knit()
+ key = ('foo',)
+ key_basis = ('bar',)
+ key_missing = ('missing',)
+ test.add_lines(key, (), ['foo\n'])
+ details = test.annotate(key)
+ self.assertEqual([(key, 'foo\n')], details)
+ self.assertEqual([], basis.calls)
+ # But texts that are not in the test knit are looked for in the basis
+ # directly.
+ basis.add_lines(key_basis, (), ['foo\n', 'bar\n'])
+ basis.calls = []
+ details = test.annotate(key_basis)
+ self.assertEqual([(key_basis, 'foo\n'), (key_basis, 'bar\n')], details)
+ # Not optimised to date:
+ # self.assertEqual([("annotate", key_basis)], basis.calls)
+ self.assertEqual([('get_parent_map', set([key_basis])),
+ ('get_parent_map', set([key_basis])),
+ ('get_record_stream', [key_basis], 'topological', True)],
+ basis.calls)
+
+ def test_check(self):
+ # At the moment checking a stacked knit does implicitly check the
+ # fallback files.
+ basis, test = self.get_basis_and_test_knit()
+ test.check()
+
+ def test_get_parent_map(self):
+ # parents in the test knit are answered without asking the basis
+ basis, test = self.get_basis_and_test_knit()
+ key = ('foo',)
+ key_basis = ('bar',)
+ key_missing = ('missing',)
+ test.add_lines(key, (), [])
+ parent_map = test.get_parent_map([key])
+ self.assertEqual({key: ()}, parent_map)
+ self.assertEqual([], basis.calls)
+ # But parents that are not in the test knit are looked for in the basis
+ basis.add_lines(key_basis, (), [])
+ basis.calls = []
+ parent_map = test.get_parent_map([key, key_basis, key_missing])
+ self.assertEqual({key: (),
+ key_basis: ()}, parent_map)
+ self.assertEqual([("get_parent_map", set([key_basis, key_missing]))],
+ basis.calls)
+
+ def test_get_record_stream_unordered_fulltexts(self):
+ # records from the test knit are answered without asking the basis:
+ basis, test = self.get_basis_and_test_knit()
+ key = ('foo',)
+ key_basis = ('bar',)
+ key_missing = ('missing',)
+ test.add_lines(key, (), ['foo\n'])
+ records = list(test.get_record_stream([key], 'unordered', True))
+ self.assertEqual(1, len(records))
+ self.assertEqual([], basis.calls)
+ # Missing (from test knit) objects are retrieved from the basis:
+ basis.add_lines(key_basis, (), ['foo\n', 'bar\n'])
+ basis.calls = []
+ records = list(test.get_record_stream([key_basis, key_missing],
+ 'unordered', True))
+ self.assertEqual(2, len(records))
+ calls = list(basis.calls)
+ for record in records:
+ self.assertSubset([record.key], (key_basis, key_missing))
+ if record.key == key_missing:
+ self.assertIsInstance(record, AbsentContentFactory)
+ else:
+ reference = list(basis.get_record_stream([key_basis],
+ 'unordered', True))[0]
+ self.assertEqual(reference.key, record.key)
+ self.assertEqual(reference.sha1, record.sha1)
+ self.assertEqual(reference.storage_kind, record.storage_kind)
+ self.assertEqual(reference.get_bytes_as(reference.storage_kind),
+ record.get_bytes_as(record.storage_kind))
+ self.assertEqual(reference.get_bytes_as('fulltext'),
+ record.get_bytes_as('fulltext'))
+ # It's not strictly minimal, but it seems reasonable for now for it to
+ # ask which fallbacks have which parents.
+ self.assertEqual([
+ ("get_parent_map", set([key_basis, key_missing])),
+ ("get_record_stream", [key_basis], 'unordered', True)],
+ calls)
+
+ def test_get_record_stream_ordered_fulltexts(self):
+ # ordering is preserved down into the fallback store.
+ basis, test = self.get_basis_and_test_knit()
+ key = ('foo',)
+ key_basis = ('bar',)
+ key_basis_2 = ('quux',)
+ key_missing = ('missing',)
+ test.add_lines(key, (key_basis,), ['foo\n'])
+ # Missing (from test knit) objects are retrieved from the basis:
+ basis.add_lines(key_basis, (key_basis_2,), ['foo\n', 'bar\n'])
+ basis.add_lines(key_basis_2, (), ['quux\n'])
+ basis.calls = []
+ # ask for in non-topological order
+ records = list(test.get_record_stream(
+ [key, key_basis, key_missing, key_basis_2], 'topological', True))
+ self.assertEqual(4, len(records))
+ results = []
+ for record in records:
+ self.assertSubset([record.key],
+ (key_basis, key_missing, key_basis_2, key))
+ if record.key == key_missing:
+ self.assertIsInstance(record, AbsentContentFactory)
+ else:
+ results.append((record.key, record.sha1, record.storage_kind,
+ record.get_bytes_as('fulltext')))
+ calls = list(basis.calls)
+ order = [record[0] for record in results]
+ self.assertEqual([key_basis_2, key_basis, key], order)
+ for result in results:
+ if result[0] == key:
+ source = test
+ else:
+ source = basis
+ record = source.get_record_stream([result[0]], 'unordered',
+ True).next()
+ self.assertEqual(record.key, result[0])
+ self.assertEqual(record.sha1, result[1])
+ # We used to check that the storage kind matched, but actually it
+ # depends on whether it was sourced from the basis, or in a single
+ # group, because asking for full texts returns proxy objects to a
+ # _ContentMapGenerator object; so checking the kind is unneeded.
+ self.assertEqual(record.get_bytes_as('fulltext'), result[3])
+ # It's not strictly minimal, but it seems reasonable for now for it to
+ # ask which fallbacks have which parents.
+ self.assertEqual([
+ ("get_parent_map", set([key_basis, key_basis_2, key_missing])),
+ # topological is requested from the fallback, because that is what
+ # was requested at the top level.
+ ("get_record_stream", [key_basis_2, key_basis], 'topological', True)],
+ calls)
+
+ def test_get_record_stream_unordered_deltas(self):
+ # records from the test knit are answered without asking the basis:
+ basis, test = self.get_basis_and_test_knit()
+ key = ('foo',)
+ key_basis = ('bar',)
+ key_missing = ('missing',)
+ test.add_lines(key, (), ['foo\n'])
+ records = list(test.get_record_stream([key], 'unordered', False))
+ self.assertEqual(1, len(records))
+ self.assertEqual([], basis.calls)
+ # Missing (from test knit) objects are retrieved from the basis:
+ basis.add_lines(key_basis, (), ['foo\n', 'bar\n'])
+ basis.calls = []
+ records = list(test.get_record_stream([key_basis, key_missing],
+ 'unordered', False))
+ self.assertEqual(2, len(records))
+ calls = list(basis.calls)
+ for record in records:
+ self.assertSubset([record.key], (key_basis, key_missing))
+ if record.key == key_missing:
+ self.assertIsInstance(record, AbsentContentFactory)
+ else:
+ reference = list(basis.get_record_stream([key_basis],
+ 'unordered', False))[0]
+ self.assertEqual(reference.key, record.key)
+ self.assertEqual(reference.sha1, record.sha1)
+ self.assertEqual(reference.storage_kind, record.storage_kind)
+ self.assertEqual(reference.get_bytes_as(reference.storage_kind),
+ record.get_bytes_as(record.storage_kind))
+ # It's not strictly minimal, but it seems reasonable for now for it to
+ # ask which fallbacks have which parents.
+ self.assertEqual([
+ ("get_parent_map", set([key_basis, key_missing])),
+ ("get_record_stream", [key_basis], 'unordered', False)],
+ calls)
+
+ def test_get_record_stream_ordered_deltas(self):
+ # ordering is preserved down into the fallback store.
+ basis, test = self.get_basis_and_test_knit()
+ key = ('foo',)
+ key_basis = ('bar',)
+ key_basis_2 = ('quux',)
+ key_missing = ('missing',)
+ test.add_lines(key, (key_basis,), ['foo\n'])
+ # Missing (from test knit) objects are retrieved from the basis:
+ basis.add_lines(key_basis, (key_basis_2,), ['foo\n', 'bar\n'])
+ basis.add_lines(key_basis_2, (), ['quux\n'])
+ basis.calls = []
+ # ask for in non-topological order
+ records = list(test.get_record_stream(
+ [key, key_basis, key_missing, key_basis_2], 'topological', False))
+ self.assertEqual(4, len(records))
+ results = []
+ for record in records:
+ self.assertSubset([record.key],
+ (key_basis, key_missing, key_basis_2, key))
+ if record.key == key_missing:
+ self.assertIsInstance(record, AbsentContentFactory)
+ else:
+ results.append((record.key, record.sha1, record.storage_kind,
+ record.get_bytes_as(record.storage_kind)))
+ calls = list(basis.calls)
+ order = [record[0] for record in results]
+ self.assertEqual([key_basis_2, key_basis, key], order)
+ for result in results:
+ if result[0] == key:
+ source = test
+ else:
+ source = basis
+ record = source.get_record_stream([result[0]], 'unordered',
+ False).next()
+ self.assertEqual(record.key, result[0])
+ self.assertEqual(record.sha1, result[1])
+ self.assertEqual(record.storage_kind, result[2])
+ self.assertEqual(record.get_bytes_as(record.storage_kind), result[3])
+ # It's not strictly minimal, but it seems reasonable for now for it to
+ # ask which fallbacks have which parents.
+ self.assertEqual([
+ ("get_parent_map", set([key_basis, key_basis_2, key_missing])),
+ ("get_record_stream", [key_basis_2, key_basis], 'topological', False)],
+ calls)
+
+ def test_get_sha1s(self):
+ # sha1's in the test knit are answered without asking the basis
+ basis, test = self.get_basis_and_test_knit()
+ key = ('foo',)
+ key_basis = ('bar',)
+ key_missing = ('missing',)
+ test.add_lines(key, (), ['foo\n'])
+ key_sha1sum = osutils.sha_string('foo\n')
+ sha1s = test.get_sha1s([key])
+ self.assertEqual({key: key_sha1sum}, sha1s)
+ self.assertEqual([], basis.calls)
+ # But texts that are not in the test knit are looked for in the basis
+ # directly (rather than via text reconstruction) so that remote servers
+ # etc don't have to answer with full content.
+ basis.add_lines(key_basis, (), ['foo\n', 'bar\n'])
+ basis_sha1sum = osutils.sha_string('foo\nbar\n')
+ basis.calls = []
+ sha1s = test.get_sha1s([key, key_missing, key_basis])
+ self.assertEqual({key: key_sha1sum,
+ key_basis: basis_sha1sum}, sha1s)
+ self.assertEqual([("get_sha1s", set([key_basis, key_missing]))],
+ basis.calls)
+
+ def test_insert_record_stream(self):
+ # records are inserted as normal; insert_record_stream builds on
+ # add_lines, so a smoke test should be all that's needed:
+ key = ('foo',)
+ key_basis = ('bar',)
+ key_delta = ('zaphod',)
+ basis, test = self.get_basis_and_test_knit()
+ source = self.make_test_knit(name='source')
+ basis.add_lines(key_basis, (), ['foo\n'])
+ basis.calls = []
+ source.add_lines(key_basis, (), ['foo\n'])
+ source.add_lines(key_delta, (key_basis,), ['bar\n'])
+ stream = source.get_record_stream([key_delta], 'unordered', False)
+ test.insert_record_stream(stream)
+ # XXX: this does somewhat too many calls in making sure of whether it
+ # has to recreate the full text.
+ self.assertEqual([("get_parent_map", set([key_basis])),
+ ('get_parent_map', set([key_basis])),
+ ('get_record_stream', [key_basis], 'unordered', True)],
+ basis.calls)
+ self.assertEqual({key_delta:(key_basis,)},
+ test.get_parent_map([key_delta]))
+ self.assertEqual('bar\n', test.get_record_stream([key_delta],
+ 'unordered', True).next().get_bytes_as('fulltext'))
+
+ def test_iter_lines_added_or_present_in_keys(self):
+ # Lines from the basis are returned, and lines for a given key are only
+ # returned once.
+ key1 = ('foo1',)
+ key2 = ('foo2',)
+ # all sources are asked for keys:
+ basis, test = self.get_basis_and_test_knit()
+ basis.add_lines(key1, (), ["foo"])
+ basis.calls = []
+ lines = list(test.iter_lines_added_or_present_in_keys([key1]))
+ self.assertEqual([("foo\n", key1)], lines)
+ self.assertEqual([("iter_lines_added_or_present_in_keys", set([key1]))],
+ basis.calls)
+ # keys in both are not duplicated:
+ test.add_lines(key2, (), ["bar\n"])
+ basis.add_lines(key2, (), ["bar\n"])
+ basis.calls = []
+ lines = list(test.iter_lines_added_or_present_in_keys([key2]))
+ self.assertEqual([("bar\n", key2)], lines)
+ self.assertEqual([], basis.calls)
+
+ def test_keys(self):
+ key1 = ('foo1',)
+ key2 = ('foo2',)
+ # all sources are asked for keys:
+ basis, test = self.get_basis_and_test_knit()
+ keys = test.keys()
+ self.assertEqual(set(), set(keys))
+ self.assertEqual([("keys",)], basis.calls)
+ # keys from a basis are returned:
+ basis.add_lines(key1, (), [])
+ basis.calls = []
+ keys = test.keys()
+ self.assertEqual(set([key1]), set(keys))
+ self.assertEqual([("keys",)], basis.calls)
+ # keys in both are not duplicated:
+ test.add_lines(key2, (), [])
+ basis.add_lines(key2, (), [])
+ basis.calls = []
+ keys = test.keys()
+ self.assertEqual(2, len(keys))
+ self.assertEqual(set([key1, key2]), set(keys))
+ self.assertEqual([("keys",)], basis.calls)
+
+ def test_add_mpdiffs(self):
+ # records are inserted as normal; add_mpdiff builds on
+ # add_lines, so a smoke test should be all that's needed:
+ key = ('foo',)
+ key_basis = ('bar',)
+ key_delta = ('zaphod',)
+ basis, test = self.get_basis_and_test_knit()
+ source = self.make_test_knit(name='source')
+ basis.add_lines(key_basis, (), ['foo\n'])
+ basis.calls = []
+ source.add_lines(key_basis, (), ['foo\n'])
+ source.add_lines(key_delta, (key_basis,), ['bar\n'])
+ diffs = source.make_mpdiffs([key_delta])
+ test.add_mpdiffs([(key_delta, (key_basis,),
+ source.get_sha1s([key_delta])[key_delta], diffs[0])])
+ self.assertEqual([("get_parent_map", set([key_basis])),
+ ('get_record_stream', [key_basis], 'unordered', True),],
+ basis.calls)
+ self.assertEqual({key_delta:(key_basis,)},
+ test.get_parent_map([key_delta]))
+ self.assertEqual('bar\n', test.get_record_stream([key_delta],
+ 'unordered', True).next().get_bytes_as('fulltext'))
+
+ def test_make_mpdiffs(self):
+ # Generating an mpdiff across a stacking boundary should detect parent
+ # texts regions.
+ key = ('foo',)
+ key_left = ('bar',)
+ key_right = ('zaphod',)
+ basis, test = self.get_basis_and_test_knit()
+ basis.add_lines(key_left, (), ['bar\n'])
+ basis.add_lines(key_right, (), ['zaphod\n'])
+ basis.calls = []
+ test.add_lines(key, (key_left, key_right),
+ ['bar\n', 'foo\n', 'zaphod\n'])
+ diffs = test.make_mpdiffs([key])
+ self.assertEqual([
+ multiparent.MultiParent([multiparent.ParentText(0, 0, 0, 1),
+ multiparent.NewText(['foo\n']),
+ multiparent.ParentText(1, 0, 2, 1)])],
+ diffs)
+ self.assertEqual(3, len(basis.calls))
+ self.assertEqual([
+ ("get_parent_map", set([key_left, key_right])),
+ ("get_parent_map", set([key_left, key_right])),
+ ],
+ basis.calls[:-1])
+ last_call = basis.calls[-1]
+ self.assertEqual('get_record_stream', last_call[0])
+ self.assertEqual(set([key_left, key_right]), set(last_call[1]))
+ self.assertEqual('topological', last_call[2])
+ self.assertEqual(True, last_call[3])
+
+
+class TestNetworkBehaviour(KnitTests):
+ """Tests for getting data out of/into knits over the network."""
+
+ def test_include_delta_closure_generates_a_knit_delta_closure(self):
+ vf = self.make_test_knit(name='test')
+ # put in three texts, giving ft, delta, delta
+ vf.add_lines(('base',), (), ['base\n', 'content\n'])
+ vf.add_lines(('d1',), (('base',),), ['d1\n'])
+ vf.add_lines(('d2',), (('d1',),), ['d2\n'])
+ # But heuristics could interfere, so check what happened:
+ self.assertEqual(['knit-ft-gz', 'knit-delta-gz', 'knit-delta-gz'],
+ [record.storage_kind for record in
+ vf.get_record_stream([('base',), ('d1',), ('d2',)],
+ 'topological', False)])
+ # generate a stream of just the deltas include_delta_closure=True,
+ # serialise to the network, and check that we get a delta closure on the wire.
+ stream = vf.get_record_stream([('d1',), ('d2',)], 'topological', True)
+ netb = [record.get_bytes_as(record.storage_kind) for record in stream]
+ # The first bytes should be a memo from _ContentMapGenerator, and the
+ # second bytes should be empty (because its a API proxy not something
+ # for wire serialisation.
+ self.assertEqual('', netb[1])
+ bytes = netb[0]
+ kind, line_end = network_bytes_to_kind_and_offset(bytes)
+ self.assertEqual('knit-delta-closure', kind)
+
+
+class TestContentMapGenerator(KnitTests):
+ """Tests for ContentMapGenerator"""
+
+ def test_get_record_stream_gives_records(self):
+ vf = self.make_test_knit(name='test')
+ # put in three texts, giving ft, delta, delta
+ vf.add_lines(('base',), (), ['base\n', 'content\n'])
+ vf.add_lines(('d1',), (('base',),), ['d1\n'])
+ vf.add_lines(('d2',), (('d1',),), ['d2\n'])
+ keys = [('d1',), ('d2',)]
+ generator = _VFContentMapGenerator(vf, keys,
+ global_map=vf.get_parent_map(keys))
+ for record in generator.get_record_stream():
+ if record.key == ('d1',):
+ self.assertEqual('d1\n', record.get_bytes_as('fulltext'))
+ else:
+ self.assertEqual('d2\n', record.get_bytes_as('fulltext'))
+
+ def test_get_record_stream_kinds_are_raw(self):
+ vf = self.make_test_knit(name='test')
+ # put in three texts, giving ft, delta, delta
+ vf.add_lines(('base',), (), ['base\n', 'content\n'])
+ vf.add_lines(('d1',), (('base',),), ['d1\n'])
+ vf.add_lines(('d2',), (('d1',),), ['d2\n'])
+ keys = [('base',), ('d1',), ('d2',)]
+ generator = _VFContentMapGenerator(vf, keys,
+ global_map=vf.get_parent_map(keys))
+ kinds = {('base',): 'knit-delta-closure',
+ ('d1',): 'knit-delta-closure-ref',
+ ('d2',): 'knit-delta-closure-ref',
+ }
+ for record in generator.get_record_stream():
+ self.assertEqual(kinds[record.key], record.storage_kind)
diff --git a/bzrlib/tests/test_lazy_import.py b/bzrlib/tests/test_lazy_import.py
new file mode 100644
index 0000000..079c66b
--- /dev/null
+++ b/bzrlib/tests/test_lazy_import.py
@@ -0,0 +1,1217 @@
+# Copyright (C) 2006-2011 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""Test the lazy_import functionality."""
+
+import linecache
+import os
+import re
+import sys
+
+from bzrlib import (
+ errors,
+ lazy_import,
+ osutils,
+ )
+from bzrlib.tests import (
+ TestCase,
+ TestCaseInTempDir,
+ )
+
+
+class InstrumentedReplacer(lazy_import.ScopeReplacer):
+ """Track what actions are done"""
+
+ @staticmethod
+ def use_actions(actions):
+ InstrumentedReplacer.actions = actions
+
+ def __getattribute__(self, attr):
+ InstrumentedReplacer.actions.append(('__getattribute__', attr))
+ return lazy_import.ScopeReplacer.__getattribute__(self, attr)
+
+ def __call__(self, *args, **kwargs):
+ InstrumentedReplacer.actions.append(('__call__', args, kwargs))
+ return lazy_import.ScopeReplacer.__call__(self, *args, **kwargs)
+
+
+class InstrumentedImportReplacer(lazy_import.ImportReplacer):
+
+ @staticmethod
+ def use_actions(actions):
+ InstrumentedImportReplacer.actions = actions
+
+ def _import(self, scope, name):
+ InstrumentedImportReplacer.actions.append(('_import', name))
+ return lazy_import.ImportReplacer._import(self, scope, name)
+
+ def __getattribute__(self, attr):
+ InstrumentedImportReplacer.actions.append(('__getattribute__', attr))
+ return lazy_import.ScopeReplacer.__getattribute__(self, attr)
+
+ def __call__(self, *args, **kwargs):
+ InstrumentedImportReplacer.actions.append(('__call__', args, kwargs))
+ return lazy_import.ScopeReplacer.__call__(self, *args, **kwargs)
+
+
+class TestClass(object):
+ """Just a simple test class instrumented for the test cases"""
+
+ class_member = 'class_member'
+
+ @staticmethod
+ def use_actions(actions):
+ TestClass.actions = actions
+
+ def __init__(self):
+ TestClass.actions.append('init')
+
+ def foo(self, x):
+ TestClass.actions.append(('foo', x))
+ return 'foo'
+
+
+class TestScopeReplacer(TestCase):
+ """Test the ability of the replacer to put itself into the correct scope.
+
+ In these tests we use the global scope, because we cannot replace
+ variables in the local scope. This means that we need to be careful
+ and not have the replacing objects use the same name, or we would
+ get collisions.
+ """
+
+ def setUp(self):
+ TestCase.setUp(self)
+ # These tests assume we will not be proxying, so make sure proxying is
+ # disabled.
+ orig_proxy = lazy_import.ScopeReplacer._should_proxy
+ def restore():
+ lazy_import.ScopeReplacer._should_proxy = orig_proxy
+ lazy_import.ScopeReplacer._should_proxy = False
+
+ def test_object(self):
+ """ScopeReplacer can create an instance in local scope.
+
+ An object should appear in globals() by constructing a ScopeReplacer,
+ and it will be replaced with the real object upon the first request.
+ """
+ actions = []
+ InstrumentedReplacer.use_actions(actions)
+ TestClass.use_actions(actions)
+
+ def factory(replacer, scope, name):
+ actions.append('factory')
+ return TestClass()
+
+ try:
+ test_obj1
+ except NameError:
+ # test_obj1 shouldn't exist yet
+ pass
+ else:
+ self.fail('test_obj1 was not supposed to exist yet')
+
+ InstrumentedReplacer(scope=globals(), name='test_obj1',
+ factory=factory)
+
+ # We can't use isinstance() because that uses test_obj1.__class__
+ # and that goes through __getattribute__ which would activate
+ # the replacement
+ self.assertEqual(InstrumentedReplacer,
+ object.__getattribute__(test_obj1, '__class__'))
+ self.assertEqual('foo', test_obj1.foo(1))
+ self.assertIsInstance(test_obj1, TestClass)
+ self.assertEqual('foo', test_obj1.foo(2))
+ self.assertEqual([('__getattribute__', 'foo'),
+ 'factory',
+ 'init',
+ ('foo', 1),
+ ('foo', 2),
+ ], actions)
+
+ def test_setattr_replaces(self):
+ """ScopeReplacer can create an instance in local scope.
+
+ An object should appear in globals() by constructing a ScopeReplacer,
+ and it will be replaced with the real object upon the first request.
+ """
+ actions = []
+ TestClass.use_actions(actions)
+ def factory(replacer, scope, name):
+ return TestClass()
+ try:
+ test_obj6
+ except NameError:
+ # test_obj6 shouldn't exist yet
+ pass
+ else:
+ self.fail('test_obj6 was not supposed to exist yet')
+
+ lazy_import.ScopeReplacer(scope=globals(), name='test_obj6',
+ factory=factory)
+
+ # We can't use isinstance() because that uses test_obj6.__class__
+ # and that goes through __getattribute__ which would activate
+ # the replacement
+ self.assertEqual(lazy_import.ScopeReplacer,
+ object.__getattribute__(test_obj6, '__class__'))
+ test_obj6.bar = 'test'
+ self.assertNotEqual(lazy_import.ScopeReplacer,
+ object.__getattribute__(test_obj6, '__class__'))
+ self.assertEqual('test', test_obj6.bar)
+
+ def test_replace_side_effects(self):
+ """Creating a new object should only create one entry in globals.
+
+ And only that entry even after replacement.
+ """
+ try:
+ test_scope1
+ except NameError:
+ # test_scope1 shouldn't exist yet
+ pass
+ else:
+ self.fail('test_scope1 was not supposed to exist yet')
+
+ # ignore the logged actions
+ TestClass.use_actions([])
+
+ def factory(replacer, scope, name):
+ return TestClass()
+
+ orig_globals = set(globals().keys())
+
+ lazy_import.ScopeReplacer(scope=globals(), name='test_scope1',
+ factory=factory)
+
+ new_globals = set(globals().keys())
+
+ self.assertEqual(lazy_import.ScopeReplacer,
+ object.__getattribute__(test_scope1, '__class__'))
+ self.assertEqual('foo', test_scope1.foo(1))
+ self.assertIsInstance(test_scope1, TestClass)
+
+ final_globals = set(globals().keys())
+
+ self.assertEqual(set(['test_scope1']), new_globals - orig_globals)
+ self.assertEqual(set(), orig_globals - new_globals)
+ self.assertEqual(set(), final_globals - new_globals)
+ self.assertEqual(set(), new_globals - final_globals)
+
+ def test_class(self):
+ actions = []
+ InstrumentedReplacer.use_actions(actions)
+ TestClass.use_actions(actions)
+
+ def factory(replacer, scope, name):
+ actions.append('factory')
+ return TestClass
+
+ try:
+ test_class1
+ except NameError:
+ # test_class2 shouldn't exist yet
+ pass
+ else:
+ self.fail('test_class1 was not supposed to exist yet')
+
+ InstrumentedReplacer(scope=globals(), name='test_class1',
+ factory=factory)
+
+ self.assertEqual('class_member', test_class1.class_member)
+ self.assertEqual(test_class1, TestClass)
+ self.assertEqual([('__getattribute__', 'class_member'),
+ 'factory',
+ ], actions)
+
+ def test_call_class(self):
+ actions = []
+ InstrumentedReplacer.use_actions(actions)
+ TestClass.use_actions(actions)
+
+ def factory(replacer, scope, name):
+ actions.append('factory')
+ return TestClass
+
+ try:
+ test_class2
+ except NameError:
+ # test_class2 shouldn't exist yet
+ pass
+ else:
+ self.fail('test_class2 was not supposed to exist yet')
+
+ InstrumentedReplacer(scope=globals(), name='test_class2',
+ factory=factory)
+
+ self.assertFalse(test_class2 is TestClass)
+ obj = test_class2()
+ self.assertIs(test_class2, TestClass)
+ self.assertIsInstance(obj, TestClass)
+ self.assertEqual('class_member', obj.class_member)
+ self.assertEqual([('__call__', (), {}),
+ 'factory',
+ 'init',
+ ], actions)
+
+ def test_call_func(self):
+ actions = []
+ InstrumentedReplacer.use_actions(actions)
+
+ def func(a, b, c=None):
+ actions.append('func')
+ return (a, b, c)
+
+ def factory(replacer, scope, name):
+ actions.append('factory')
+ return func
+
+ try:
+ test_func1
+ except NameError:
+ # test_func1 shouldn't exist yet
+ pass
+ else:
+ self.fail('test_func1 was not supposed to exist yet')
+ InstrumentedReplacer(scope=globals(), name='test_func1',
+ factory=factory)
+
+ self.assertFalse(test_func1 is func)
+ val = test_func1(1, 2, c='3')
+ self.assertIs(test_func1, func)
+
+ self.assertEqual((1,2,'3'), val)
+ self.assertEqual([('__call__', (1,2), {'c':'3'}),
+ 'factory',
+ 'func',
+ ], actions)
+
+ def test_other_variable(self):
+ """Test when a ScopeReplacer is assigned to another variable.
+
+ This test could be updated if we find a way to trap '=' rather
+ than just giving a belated exception.
+ ScopeReplacer only knows about the variable it was created as,
+ so until the object is replaced, it is illegal to pass it to
+ another variable. (Though discovering this may take a while)
+ """
+ actions = []
+ InstrumentedReplacer.use_actions(actions)
+ TestClass.use_actions(actions)
+
+ def factory(replacer, scope, name):
+ actions.append('factory')
+ return TestClass()
+
+ try:
+ test_obj2
+ except NameError:
+ # test_obj2 shouldn't exist yet
+ pass
+ else:
+ self.fail('test_obj2 was not supposed to exist yet')
+
+ InstrumentedReplacer(scope=globals(), name='test_obj2',
+ factory=factory)
+
+ self.assertEqual(InstrumentedReplacer,
+ object.__getattribute__(test_obj2, '__class__'))
+ # This is technically not allowed, but we don't have a way to
+ # test it until later.
+ test_obj3 = test_obj2
+ self.assertEqual(InstrumentedReplacer,
+ object.__getattribute__(test_obj2, '__class__'))
+ self.assertEqual(InstrumentedReplacer,
+ object.__getattribute__(test_obj3, '__class__'))
+
+ # The first use of the alternate variable causes test_obj2 to
+ # be replaced.
+ self.assertEqual('foo', test_obj3.foo(1))
+ # test_obj2 has been replaced, but the ScopeReplacer has no
+ # idea of test_obj3
+ self.assertEqual(TestClass,
+ object.__getattribute__(test_obj2, '__class__'))
+ self.assertEqual(InstrumentedReplacer,
+ object.__getattribute__(test_obj3, '__class__'))
+ # We should be able to access test_obj2 attributes normally
+ self.assertEqual('foo', test_obj2.foo(2))
+ self.assertEqual('foo', test_obj2.foo(3))
+
+ # However, the next access on test_obj3 should raise an error
+ # because only now are we able to detect the problem.
+ self.assertRaises(errors.IllegalUseOfScopeReplacer,
+ getattr, test_obj3, 'foo')
+
+ self.assertEqual([('__getattribute__', 'foo'),
+ 'factory',
+ 'init',
+ ('foo', 1),
+ ('foo', 2),
+ ('foo', 3),
+ ('__getattribute__', 'foo'),
+ ], actions)
+
+ def test_enable_proxying(self):
+ """Test that we can allow ScopeReplacer to proxy."""
+ actions = []
+ InstrumentedReplacer.use_actions(actions)
+ TestClass.use_actions(actions)
+
+ def factory(replacer, scope, name):
+ actions.append('factory')
+ return TestClass()
+
+ try:
+ test_obj4
+ except NameError:
+ # test_obj4 shouldn't exist yet
+ pass
+ else:
+ self.fail('test_obj4 was not supposed to exist yet')
+
+ lazy_import.ScopeReplacer._should_proxy = True
+ InstrumentedReplacer(scope=globals(), name='test_obj4',
+ factory=factory)
+
+ self.assertEqual(InstrumentedReplacer,
+ object.__getattribute__(test_obj4, '__class__'))
+ test_obj5 = test_obj4
+ self.assertEqual(InstrumentedReplacer,
+ object.__getattribute__(test_obj4, '__class__'))
+ self.assertEqual(InstrumentedReplacer,
+ object.__getattribute__(test_obj5, '__class__'))
+
+ # The first use of the alternate variable causes test_obj2 to
+ # be replaced.
+ self.assertEqual('foo', test_obj4.foo(1))
+ self.assertEqual(TestClass,
+ object.__getattribute__(test_obj4, '__class__'))
+ self.assertEqual(InstrumentedReplacer,
+ object.__getattribute__(test_obj5, '__class__'))
+ # We should be able to access test_obj4 attributes normally
+ self.assertEqual('foo', test_obj4.foo(2))
+ # because we enabled proxying, test_obj5 can access its members as well
+ self.assertEqual('foo', test_obj5.foo(3))
+ self.assertEqual('foo', test_obj5.foo(4))
+
+ # However, it cannot be replaced by the ScopeReplacer
+ self.assertEqual(InstrumentedReplacer,
+ object.__getattribute__(test_obj5, '__class__'))
+
+ self.assertEqual([('__getattribute__', 'foo'),
+ 'factory',
+ 'init',
+ ('foo', 1),
+ ('foo', 2),
+ ('__getattribute__', 'foo'),
+ ('foo', 3),
+ ('__getattribute__', 'foo'),
+ ('foo', 4),
+ ], actions)
+
+ def test_replacing_from_own_scope_fails(self):
+ """If a ScopeReplacer tries to replace itself a nice error is given"""
+ actions = []
+ InstrumentedReplacer.use_actions(actions)
+ TestClass.use_actions(actions)
+
+ def factory(replacer, scope, name):
+ actions.append('factory')
+ # return the name in given scope, which is currently the replacer
+ return scope[name]
+
+ try:
+ test_obj7
+ except NameError:
+ # test_obj7 shouldn't exist yet
+ pass
+ else:
+ self.fail('test_obj7 was not supposed to exist yet')
+
+ InstrumentedReplacer(scope=globals(), name='test_obj7',
+ factory=factory)
+
+ self.assertEqual(InstrumentedReplacer,
+ object.__getattribute__(test_obj7, '__class__'))
+ e = self.assertRaises(errors.IllegalUseOfScopeReplacer, test_obj7)
+ self.assertIn("replace itself", e.msg)
+ self.assertEqual([('__call__', (), {}),
+ 'factory'], actions)
+
+
+class ImportReplacerHelper(TestCaseInTempDir):
+ """Test the ability to have a lazily imported module or object"""
+
+ def setUp(self):
+ TestCaseInTempDir.setUp(self)
+ self.create_modules()
+ base_path = self.test_dir + '/base'
+
+ self.actions = []
+ InstrumentedImportReplacer.use_actions(self.actions)
+
+ sys.path.append(base_path)
+ self.addCleanup(sys.path.remove, base_path)
+
+ original_import = __import__
+ def instrumented_import(mod, scope1, scope2, fromlist, level):
+ self.actions.append(('import', mod, fromlist, level))
+ return original_import(mod, scope1, scope2, fromlist, level)
+ def cleanup():
+ __builtins__['__import__'] = original_import
+ self.addCleanup(cleanup)
+ __builtins__['__import__'] = instrumented_import
+
+ def create_modules(self):
+ """Create some random modules to be imported.
+
+ Each entry has a random suffix, and the full names are saved
+
+ These are setup as follows:
+ base/ <= used to ensure not in default search path
+ root-XXX/
+ __init__.py <= This will contain var1, func1
+ mod-XXX.py <= This will contain var2, func2
+ sub-XXX/
+ __init__.py <= Contains var3, func3
+ submoda-XXX.py <= contains var4, func4
+ submodb-XXX.py <= containse var5, func5
+ """
+ rand_suffix = osutils.rand_chars(4)
+ root_name = 'root_' + rand_suffix
+ mod_name = 'mod_' + rand_suffix
+ sub_name = 'sub_' + rand_suffix
+ submoda_name = 'submoda_' + rand_suffix
+ submodb_name = 'submodb_' + rand_suffix
+
+ os.mkdir('base')
+ root_path = osutils.pathjoin('base', root_name)
+ os.mkdir(root_path)
+ root_init = osutils.pathjoin(root_path, '__init__.py')
+ f = open(osutils.pathjoin(root_path, '__init__.py'), 'wb')
+ try:
+ f.write('var1 = 1\ndef func1(a):\n return a\n')
+ finally:
+ f.close()
+ mod_path = osutils.pathjoin(root_path, mod_name + '.py')
+ f = open(mod_path, 'wb')
+ try:
+ f.write('var2 = 2\ndef func2(a):\n return a\n')
+ finally:
+ f.close()
+
+ sub_path = osutils.pathjoin(root_path, sub_name)
+ os.mkdir(sub_path)
+ f = open(osutils.pathjoin(sub_path, '__init__.py'), 'wb')
+ try:
+ f.write('var3 = 3\ndef func3(a):\n return a\n')
+ finally:
+ f.close()
+ submoda_path = osutils.pathjoin(sub_path, submoda_name + '.py')
+ f = open(submoda_path, 'wb')
+ try:
+ f.write('var4 = 4\ndef func4(a):\n return a\n')
+ finally:
+ f.close()
+ submodb_path = osutils.pathjoin(sub_path, submodb_name + '.py')
+ f = open(submodb_path, 'wb')
+ try:
+ f.write('var5 = 5\ndef func5(a):\n return a\n')
+ finally:
+ f.close()
+ self.root_name = root_name
+ self.mod_name = mod_name
+ self.sub_name = sub_name
+ self.submoda_name = submoda_name
+ self.submodb_name = submodb_name
+
+
+class TestImportReplacerHelper(ImportReplacerHelper):
+
+ def test_basic_import(self):
+ """Test that a real import of these modules works"""
+ sub_mod_path = '.'.join([self.root_name, self.sub_name,
+ self.submoda_name])
+ root = __import__(sub_mod_path, globals(), locals(), [], 0)
+ self.assertEqual(1, root.var1)
+ self.assertEqual(3, getattr(root, self.sub_name).var3)
+ self.assertEqual(4, getattr(getattr(root, self.sub_name),
+ self.submoda_name).var4)
+
+ mod_path = '.'.join([self.root_name, self.mod_name])
+ root = __import__(mod_path, globals(), locals(), [], 0)
+ self.assertEqual(2, getattr(root, self.mod_name).var2)
+
+ self.assertEqual([('import', sub_mod_path, [], 0),
+ ('import', mod_path, [], 0),
+ ], self.actions)
+
+
+class TestImportReplacer(ImportReplacerHelper):
+
+ def test_import_root(self):
+ """Test 'import root-XXX as root1'"""
+ try:
+ root1
+ except NameError:
+ # root1 shouldn't exist yet
+ pass
+ else:
+ self.fail('root1 was not supposed to exist yet')
+
+ # This should replicate 'import root-xxyyzz as root1'
+ InstrumentedImportReplacer(scope=globals(), name='root1',
+ module_path=[self.root_name],
+ member=None, children={})
+
+ self.assertEqual(InstrumentedImportReplacer,
+ object.__getattribute__(root1, '__class__'))
+ self.assertEqual(1, root1.var1)
+ self.assertEqual('x', root1.func1('x'))
+
+ self.assertEqual([('__getattribute__', 'var1'),
+ ('_import', 'root1'),
+ ('import', self.root_name, [], 0),
+ ], self.actions)
+
+ def test_import_mod(self):
+ """Test 'import root-XXX.mod-XXX as mod2'"""
+ try:
+ mod1
+ except NameError:
+ # mod1 shouldn't exist yet
+ pass
+ else:
+ self.fail('mod1 was not supposed to exist yet')
+
+ mod_path = self.root_name + '.' + self.mod_name
+ InstrumentedImportReplacer(scope=globals(), name='mod1',
+ module_path=[self.root_name, self.mod_name],
+ member=None, children={})
+
+ self.assertEqual(InstrumentedImportReplacer,
+ object.__getattribute__(mod1, '__class__'))
+ self.assertEqual(2, mod1.var2)
+ self.assertEqual('y', mod1.func2('y'))
+
+ self.assertEqual([('__getattribute__', 'var2'),
+ ('_import', 'mod1'),
+ ('import', mod_path, [], 0),
+ ], self.actions)
+
+ def test_import_mod_from_root(self):
+ """Test 'from root-XXX import mod-XXX as mod2'"""
+ try:
+ mod2
+ except NameError:
+ # mod2 shouldn't exist yet
+ pass
+ else:
+ self.fail('mod2 was not supposed to exist yet')
+
+ InstrumentedImportReplacer(scope=globals(), name='mod2',
+ module_path=[self.root_name],
+ member=self.mod_name, children={})
+
+ self.assertEqual(InstrumentedImportReplacer,
+ object.__getattribute__(mod2, '__class__'))
+ self.assertEqual(2, mod2.var2)
+ self.assertEqual('y', mod2.func2('y'))
+
+ self.assertEqual([('__getattribute__', 'var2'),
+ ('_import', 'mod2'),
+ ('import', self.root_name, [self.mod_name], 0),
+ ], self.actions)
+
+ def test_import_root_and_mod(self):
+ """Test 'import root-XXX.mod-XXX' remapping both to root3.mod3"""
+ try:
+ root3
+ except NameError:
+ # root3 shouldn't exist yet
+ pass
+ else:
+ self.fail('root3 was not supposed to exist yet')
+
+ InstrumentedImportReplacer(scope=globals(),
+ name='root3', module_path=[self.root_name], member=None,
+ children={'mod3':([self.root_name, self.mod_name], None, {})})
+
+ # So 'root3' should be a lazy import
+ # and once it is imported, mod3 should also be lazy until
+ # actually accessed.
+ self.assertEqual(InstrumentedImportReplacer,
+ object.__getattribute__(root3, '__class__'))
+ self.assertEqual(1, root3.var1)
+
+ # There is a mod3 member, but it is also lazy
+ self.assertEqual(InstrumentedImportReplacer,
+ object.__getattribute__(root3.mod3, '__class__'))
+ self.assertEqual(2, root3.mod3.var2)
+
+ mod_path = self.root_name + '.' + self.mod_name
+ self.assertEqual([('__getattribute__', 'var1'),
+ ('_import', 'root3'),
+ ('import', self.root_name, [], 0),
+ ('__getattribute__', 'var2'),
+ ('_import', 'mod3'),
+ ('import', mod_path, [], 0),
+ ], self.actions)
+
+ def test_import_root_and_root_mod(self):
+ """Test that 'import root, root.mod' can be done.
+
+ The second import should re-use the first one, and just add
+ children to be imported.
+ """
+ try:
+ root4
+ except NameError:
+ # root4 shouldn't exist yet
+ pass
+ else:
+ self.fail('root4 was not supposed to exist yet')
+
+ InstrumentedImportReplacer(scope=globals(),
+ name='root4', module_path=[self.root_name], member=None,
+ children={})
+
+ # So 'root4' should be a lazy import
+ self.assertEqual(InstrumentedImportReplacer,
+ object.__getattribute__(root4, '__class__'))
+
+ # Lets add a new child to be imported on demand
+ # This syntax of using object.__getattribute__ is the correct method
+ # for accessing the _import_replacer_children member
+ children = object.__getattribute__(root4, '_import_replacer_children')
+ children['mod4'] = ([self.root_name, self.mod_name], None, {})
+
+ # Accessing root4.mod4 should import root, but mod should stay lazy
+ self.assertEqual(InstrumentedImportReplacer,
+ object.__getattribute__(root4.mod4, '__class__'))
+ self.assertEqual(2, root4.mod4.var2)
+
+ mod_path = self.root_name + '.' + self.mod_name
+ self.assertEqual([('__getattribute__', 'mod4'),
+ ('_import', 'root4'),
+ ('import', self.root_name, [], 0),
+ ('__getattribute__', 'var2'),
+ ('_import', 'mod4'),
+ ('import', mod_path, [], 0),
+ ], self.actions)
+
+ def test_import_root_sub_submod(self):
+ """Test import root.mod, root.sub.submoda, root.sub.submodb
+ root should be a lazy import, with multiple children, who also
+ have children to be imported.
+ And when root is imported, the children should be lazy, and
+ reuse the intermediate lazy object.
+ """
+ try:
+ root5
+ except NameError:
+ # root5 shouldn't exist yet
+ pass
+ else:
+ self.fail('root5 was not supposed to exist yet')
+
+ InstrumentedImportReplacer(scope=globals(),
+ name='root5', module_path=[self.root_name], member=None,
+ children={'mod5':([self.root_name, self.mod_name], None, {}),
+ 'sub5':([self.root_name, self.sub_name], None,
+ {'submoda5':([self.root_name, self.sub_name,
+ self.submoda_name], None, {}),
+ 'submodb5':([self.root_name, self.sub_name,
+ self.submodb_name], None, {})
+ }),
+ })
+
+ # So 'root5' should be a lazy import
+ self.assertEqual(InstrumentedImportReplacer,
+ object.__getattribute__(root5, '__class__'))
+
+ # Accessing root5.mod5 should import root, but mod should stay lazy
+ self.assertEqual(InstrumentedImportReplacer,
+ object.__getattribute__(root5.mod5, '__class__'))
+ # root5.sub5 should still be lazy, but not re-import root5
+ self.assertEqual(InstrumentedImportReplacer,
+ object.__getattribute__(root5.sub5, '__class__'))
+
+ # Accessing root5.sub5.submoda5 should import sub5, but not either
+ # of the sub objects (they should be available as lazy objects
+ self.assertEqual(InstrumentedImportReplacer,
+ object.__getattribute__(root5.sub5.submoda5, '__class__'))
+ self.assertEqual(InstrumentedImportReplacer,
+ object.__getattribute__(root5.sub5.submodb5, '__class__'))
+
+ # This should import mod5
+ self.assertEqual(2, root5.mod5.var2)
+ # These should import submoda5 and submodb5
+ self.assertEqual(4, root5.sub5.submoda5.var4)
+ self.assertEqual(5, root5.sub5.submodb5.var5)
+
+ mod_path = self.root_name + '.' + self.mod_name
+ sub_path = self.root_name + '.' + self.sub_name
+ submoda_path = sub_path + '.' + self.submoda_name
+ submodb_path = sub_path + '.' + self.submodb_name
+
+ self.assertEqual([('__getattribute__', 'mod5'),
+ ('_import', 'root5'),
+ ('import', self.root_name, [], 0),
+ ('__getattribute__', 'submoda5'),
+ ('_import', 'sub5'),
+ ('import', sub_path, [], 0),
+ ('__getattribute__', 'var2'),
+ ('_import', 'mod5'),
+ ('import', mod_path, [], 0),
+ ('__getattribute__', 'var4'),
+ ('_import', 'submoda5'),
+ ('import', submoda_path, [], 0),
+ ('__getattribute__', 'var5'),
+ ('_import', 'submodb5'),
+ ('import', submodb_path, [], 0),
+ ], self.actions)
+
+
+class TestConvertImportToMap(TestCase):
+ """Directly test the conversion from import strings to maps"""
+
+ def check(self, expected, import_strings):
+ proc = lazy_import.ImportProcessor()
+ for import_str in import_strings:
+ proc._convert_import_str(import_str)
+ self.assertEqual(expected, proc.imports,
+ 'Import of %r was not converted correctly'
+ ' %s != %s' % (import_strings, expected,
+ proc.imports))
+
+ def test_import_one(self):
+ self.check({'one':(['one'], None, {}),
+ }, ['import one'])
+
+ def test_import_one_two(self):
+ one_two_map = {'one':(['one'], None,
+ {'two':(['one', 'two'], None, {}),
+ }),
+ }
+ self.check(one_two_map, ['import one.two'])
+ self.check(one_two_map, ['import one, one.two'])
+ self.check(one_two_map, ['import one', 'import one.two'])
+ self.check(one_two_map, ['import one.two', 'import one'])
+
+ def test_import_one_two_three(self):
+ one_two_three_map = {
+ 'one':(['one'], None,
+ {'two':(['one', 'two'], None,
+ {'three':(['one', 'two', 'three'], None, {}),
+ }),
+ }),
+ }
+ self.check(one_two_three_map, ['import one.two.three'])
+ self.check(one_two_three_map, ['import one, one.two.three'])
+ self.check(one_two_three_map, ['import one',
+ 'import one.two.three'])
+ self.check(one_two_three_map, ['import one.two.three',
+ 'import one'])
+
+ def test_import_one_as_x(self):
+ self.check({'x':(['one'], None, {}),
+ }, ['import one as x'])
+
+ def test_import_one_two_as_x(self):
+ self.check({'x':(['one', 'two'], None, {}),
+ }, ['import one.two as x'])
+
+ def test_import_mixed(self):
+ mixed = {'x':(['one', 'two'], None, {}),
+ 'one':(['one'], None,
+ {'two':(['one', 'two'], None, {}),
+ }),
+ }
+ self.check(mixed, ['import one.two as x, one.two'])
+ self.check(mixed, ['import one.two as x', 'import one.two'])
+ self.check(mixed, ['import one.two', 'import one.two as x'])
+
+ def test_import_with_as(self):
+ self.check({'fast':(['fast'], None, {})}, ['import fast'])
+
+
+class TestFromToMap(TestCase):
+ """Directly test the conversion of 'from foo import bar' syntax"""
+
+ def check_result(self, expected, from_strings):
+ proc = lazy_import.ImportProcessor()
+ for from_str in from_strings:
+ proc._convert_from_str(from_str)
+ self.assertEqual(expected, proc.imports,
+ 'Import of %r was not converted correctly'
+ ' %s != %s' % (from_strings, expected, proc.imports))
+
+ def test_from_one_import_two(self):
+ self.check_result({'two':(['one'], 'two', {})},
+ ['from one import two'])
+
+ def test_from_one_import_two_as_three(self):
+ self.check_result({'three':(['one'], 'two', {})},
+ ['from one import two as three'])
+
+ def test_from_one_import_two_three(self):
+ two_three_map = {'two':(['one'], 'two', {}),
+ 'three':(['one'], 'three', {}),
+ }
+ self.check_result(two_three_map,
+ ['from one import two, three'])
+ self.check_result(two_three_map,
+ ['from one import two',
+ 'from one import three'])
+
+ def test_from_one_two_import_three(self):
+ self.check_result({'three':(['one', 'two'], 'three', {})},
+ ['from one.two import three'])
+
+
+class TestCanonicalize(TestCase):
+ """Test that we can canonicalize import texts"""
+
+ def check(self, expected, text):
+ proc = lazy_import.ImportProcessor()
+ parsed = proc._canonicalize_import_text(text)
+ self.assertEqual(expected, parsed,
+ 'Incorrect parsing of text:\n%s\n%s\n!=\n%s'
+ % (text, expected, parsed))
+
+ def test_import_one(self):
+ self.check(['import one'], 'import one')
+ self.check(['import one'], '\nimport one\n\n')
+
+ def test_import_one_two(self):
+ self.check(['import one, two'], 'import one, two')
+ self.check(['import one, two'], '\nimport one, two\n\n')
+
+ def test_import_one_as_two_as(self):
+ self.check(['import one as x, two as y'], 'import one as x, two as y')
+ self.check(['import one as x, two as y'],
+ '\nimport one as x, two as y\n')
+
+ def test_from_one_import_two(self):
+ self.check(['from one import two'], 'from one import two')
+ self.check(['from one import two'], '\nfrom one import two\n\n')
+ self.check(['from one import two'], '\nfrom one import (two)\n')
+ self.check(['from one import two '], '\nfrom one import (\n\ttwo\n)\n')
+
+ def test_multiple(self):
+ self.check(['import one', 'import two, three', 'from one import four'],
+ 'import one\nimport two, three\nfrom one import four')
+ self.check(['import one', 'import two, three', 'from one import four'],
+ 'import one\nimport (two, three)\nfrom one import four')
+ self.check(['import one', 'import two, three', 'from one import four'],
+ 'import one\n'
+ 'import two, three\n'
+ 'from one import four')
+ self.check(['import one',
+ 'import two, three', 'from one import four, '],
+ 'import one\n'
+ 'import two, three\n'
+ 'from one import (\n'
+ ' four,\n'
+ ' )\n'
+ )
+
+ def test_missing_trailing(self):
+ proc = lazy_import.ImportProcessor()
+ self.assertRaises(errors.InvalidImportLine,
+ proc._canonicalize_import_text,
+ "from foo import (\n bar\n")
+
+
+class TestImportProcessor(TestCase):
+ """Test that ImportProcessor can turn import texts into lazy imports"""
+
+ def check(self, expected, text):
+ proc = lazy_import.ImportProcessor()
+ proc._build_map(text)
+ self.assertEqual(expected, proc.imports,
+ 'Incorrect processing of:\n%s\n%s\n!=\n%s'
+ % (text, expected, proc.imports))
+
+ def test_import_one(self):
+ exp = {'one':(['one'], None, {})}
+ self.check(exp, 'import one')
+ self.check(exp, '\nimport one\n')
+
+ def test_import_one_two(self):
+ exp = {'one':(['one'], None,
+ {'two':(['one', 'two'], None, {}),
+ }),
+ }
+ self.check(exp, 'import one.two')
+ self.check(exp, 'import one, one.two')
+ self.check(exp, 'import one\nimport one.two')
+
+ def test_import_as(self):
+ exp = {'two':(['one'], None, {})}
+ self.check(exp, 'import one as two')
+
+ def test_import_many(self):
+ exp = {'one':(['one'], None,
+ {'two':(['one', 'two'], None,
+ {'three':(['one', 'two', 'three'], None, {}),
+ }),
+ 'four':(['one', 'four'], None, {}),
+ }),
+ 'five':(['one', 'five'], None, {}),
+ }
+ self.check(exp, 'import one.two.three, one.four, one.five as five')
+ self.check(exp, 'import one.five as five\n'
+ 'import one\n'
+ 'import one.two.three\n'
+ 'import one.four\n')
+
+ def test_from_one_import_two(self):
+ exp = {'two':(['one'], 'two', {})}
+ self.check(exp, 'from one import two\n')
+ self.check(exp, 'from one import (\n'
+ ' two,\n'
+ ' )\n')
+
+ def test_from_one_import_two(self):
+ exp = {'two':(['one'], 'two', {})}
+ self.check(exp, 'from one import two\n')
+ self.check(exp, 'from one import (two)\n')
+ self.check(exp, 'from one import (two,)\n')
+ self.check(exp, 'from one import two as two\n')
+ self.check(exp, 'from one import (\n'
+ ' two,\n'
+ ' )\n')
+
+ def test_from_many(self):
+ exp = {'two':(['one'], 'two', {}),
+ 'three':(['one', 'two'], 'three', {}),
+ 'five':(['one', 'two'], 'four', {}),
+ }
+ self.check(exp, 'from one import two\n'
+ 'from one.two import three, four as five\n')
+ self.check(exp, 'from one import two\n'
+ 'from one.two import (\n'
+ ' three,\n'
+ ' four as five,\n'
+ ' )\n')
+
+ def test_mixed(self):
+ exp = {'two':(['one'], 'two', {}),
+ 'three':(['one', 'two'], 'three', {}),
+ 'five':(['one', 'two'], 'four', {}),
+ 'one':(['one'], None,
+ {'two':(['one', 'two'], None, {}),
+ }),
+ }
+ self.check(exp, 'from one import two\n'
+ 'from one.two import three, four as five\n'
+ 'import one.two')
+ self.check(exp, 'from one import two\n'
+ 'from one.two import (\n'
+ ' three,\n'
+ ' four as five,\n'
+ ' )\n'
+ 'import one\n'
+ 'import one.two\n')
+
+ def test_incorrect_line(self):
+ proc = lazy_import.ImportProcessor()
+ self.assertRaises(errors.InvalidImportLine,
+ proc._build_map, 'foo bar baz')
+ self.assertRaises(errors.InvalidImportLine,
+ proc._build_map, 'improt foo')
+ self.assertRaises(errors.InvalidImportLine,
+ proc._build_map, 'importfoo')
+ self.assertRaises(errors.InvalidImportLine,
+ proc._build_map, 'fromimport')
+
+ def test_name_collision(self):
+ proc = lazy_import.ImportProcessor()
+ proc._build_map('import foo')
+
+ # All of these would try to create an object with the
+ # same name as an existing object.
+ self.assertRaises(errors.ImportNameCollision,
+ proc._build_map, 'import bar as foo')
+ self.assertRaises(errors.ImportNameCollision,
+ proc._build_map, 'from foo import bar as foo')
+ self.assertRaises(errors.ImportNameCollision,
+ proc._build_map, 'from bar import foo')
+
+
+class TestLazyImportProcessor(ImportReplacerHelper):
+
+ def test_root(self):
+ try:
+ root6
+ except NameError:
+ pass # root6 should not be defined yet
+ else:
+ self.fail('root6 was not supposed to exist yet')
+
+ text = 'import %s as root6' % (self.root_name,)
+ proc = lazy_import.ImportProcessor(InstrumentedImportReplacer)
+ proc.lazy_import(scope=globals(), text=text)
+
+ # So 'root6' should be a lazy import
+ self.assertEqual(InstrumentedImportReplacer,
+ object.__getattribute__(root6, '__class__'))
+
+ self.assertEqual(1, root6.var1)
+ self.assertEqual('x', root6.func1('x'))
+
+ self.assertEqual([('__getattribute__', 'var1'),
+ ('_import', 'root6'),
+ ('import', self.root_name, [], 0),
+ ], self.actions)
+
+ def test_import_deep(self):
+ """Test import root.mod, root.sub.submoda, root.sub.submodb
+ root should be a lazy import, with multiple children, who also
+ have children to be imported.
+ And when root is imported, the children should be lazy, and
+ reuse the intermediate lazy object.
+ """
+ try:
+ submoda7
+ except NameError:
+ pass # submoda7 should not be defined yet
+ else:
+ self.fail('submoda7 was not supposed to exist yet')
+
+ text = """\
+import %(root_name)s.%(sub_name)s.%(submoda_name)s as submoda7
+""" % self.__dict__
+ proc = lazy_import.ImportProcessor(InstrumentedImportReplacer)
+ proc.lazy_import(scope=globals(), text=text)
+
+ # So 'submoda7' should be a lazy import
+ self.assertEqual(InstrumentedImportReplacer,
+ object.__getattribute__(submoda7, '__class__'))
+
+ # This should import submoda7
+ self.assertEqual(4, submoda7.var4)
+
+ sub_path = self.root_name + '.' + self.sub_name
+ submoda_path = sub_path + '.' + self.submoda_name
+
+ self.assertEqual([('__getattribute__', 'var4'),
+ ('_import', 'submoda7'),
+ ('import', submoda_path, [], 0),
+ ], self.actions)
+
+ def test_lazy_import(self):
+ """Smoke test that lazy_import() does the right thing"""
+ try:
+ root8
+ except NameError:
+ pass # root8 should not be defined yet
+ else:
+ self.fail('root8 was not supposed to exist yet')
+ lazy_import.lazy_import(globals(),
+ 'import %s as root8' % (self.root_name,),
+ lazy_import_class=InstrumentedImportReplacer)
+
+ self.assertEqual(InstrumentedImportReplacer,
+ object.__getattribute__(root8, '__class__'))
+
+ self.assertEqual(1, root8.var1)
+ self.assertEqual(1, root8.var1)
+ self.assertEqual(1, root8.func1(1))
+
+ self.assertEqual([('__getattribute__', 'var1'),
+ ('_import', 'root8'),
+ ('import', self.root_name, [], 0),
+ ], self.actions)
+
+
+class TestScopeReplacerReentrance(TestCase):
+ """The ScopeReplacer should be reentrant.
+
+ Invoking a replacer while an invocation was already on-going leads to a
+ race to see which invocation will be the first to call _replace.
+ The losing caller used to see an exception (bugs 396819 and 702914).
+
+ These tests set up a tracer that stops at a suitable moment (upon
+ entry of a specified method) and starts another call to the
+ functionality in question (__call__, __getattribute__, __setattr_)
+ in order to win the race, setting up the original caller to lose.
+ """
+
+ def tracer(self, frame, event, arg):
+ if event != 'call':
+ return self.tracer
+ # Grab the name of the file that contains the code being executed.
+ code = frame.f_code
+ filename = code.co_filename
+ # Convert ".pyc" and ".pyo" file names to their ".py" equivalent.
+ filename = re.sub(r'\.py[co]$', '.py', filename)
+ function_name = code.co_name
+ # If we're executing a line of code from the right module...
+ if (filename.endswith('lazy_import.py') and
+ function_name == self.method_to_trace):
+ # We don't need to trace any more.
+ sys.settrace(None)
+ # Run another racer. This one will "win" the race.
+ self.racer()
+ return self.tracer
+
+ def run_race(self, racer, method_to_trace='_resolve'):
+ self.overrideAttr(lazy_import.ScopeReplacer, '_should_proxy', True)
+ self.racer = racer
+ self.method_to_trace = method_to_trace
+ sys.settrace(self.tracer)
+ self.racer() # Should not raise any exception
+ # Make sure the tracer actually found the code it was
+ # looking for. If not, maybe the code was refactored in
+ # such a way that these tests aren't needed any more.
+ self.assertEqual(None, sys.gettrace())
+
+ def test_call(self):
+ def factory(*args):
+ return factory
+ replacer = lazy_import.ScopeReplacer({}, factory, 'name')
+ self.run_race(replacer)
+
+ def test_setattr(self):
+ class Replaced:
+ pass
+
+ def factory(*args):
+ return Replaced()
+
+ replacer = lazy_import.ScopeReplacer({}, factory, 'name')
+
+ def racer():
+ replacer.foo = 42
+
+ self.run_race(racer)
+
+ def test_getattribute(self):
+ class Replaced:
+ foo = 'bar'
+
+ def factory(*args):
+ return Replaced()
+
+ replacer = lazy_import.ScopeReplacer({}, factory, 'name')
+
+ def racer():
+ replacer.foo
+
+ self.run_race(racer)
diff --git a/bzrlib/tests/test_lazy_regex.py b/bzrlib/tests/test_lazy_regex.py
new file mode 100644
index 0000000..d233d72
--- /dev/null
+++ b/bzrlib/tests/test_lazy_regex.py
@@ -0,0 +1,153 @@
+# Copyright (C) 2006, 2011 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""Test that lazy regexes are not compiled right away"""
+
+import pickle
+import re
+
+from bzrlib import errors
+from bzrlib import (
+ lazy_regex,
+ tests,
+ )
+
+
+class InstrumentedLazyRegex(lazy_regex.LazyRegex):
+ """Keep track of actions on the lazy regex"""
+
+ _actions = []
+
+ @classmethod
+ def use_actions(cls, actions):
+ cls._actions = actions
+
+ def __getattr__(self, attr):
+ self._actions.append(('__getattr__', attr))
+ return super(InstrumentedLazyRegex, self).__getattr__(attr)
+
+ def _real_re_compile(self, *args, **kwargs):
+ self._actions.append(('_real_re_compile',
+ args, kwargs))
+ return super(InstrumentedLazyRegex, self)._real_re_compile(
+ *args, **kwargs)
+
+
+class TestLazyRegex(tests.TestCase):
+
+ def test_lazy_compile(self):
+ """Make sure that LazyRegex objects compile at the right time"""
+ actions = []
+ InstrumentedLazyRegex.use_actions(actions)
+
+ pattern = InstrumentedLazyRegex(args=('foo',))
+ actions.append(('created regex', 'foo'))
+ # This match call should compile the regex and go through __getattr__
+ pattern.match('foo')
+ # But a further call should not go through __getattr__ because it has
+ # been bound locally.
+ pattern.match('foo')
+
+ self.assertEqual([('created regex', 'foo'),
+ ('__getattr__', 'match'),
+ ('_real_re_compile', ('foo',), {}),
+ ], actions)
+
+ def test_bad_pattern(self):
+ """Ensure lazy regex handles bad patterns cleanly."""
+ p = lazy_regex.lazy_compile('RE:[')
+ # As p.match is lazy, we make it into a lambda so its handled
+ # by assertRaises correctly.
+ e = self.assertRaises(errors.InvalidPattern, lambda: p.match('foo'))
+ self.assertEqual(e.msg, '"RE:[" unexpected end of regular expression')
+
+
+class TestLazyCompile(tests.TestCase):
+
+ def test_simple_acts_like_regex(self):
+ """Test that the returned object has basic regex like functionality"""
+ pattern = lazy_regex.lazy_compile('foo')
+ self.assertIsInstance(pattern, lazy_regex.LazyRegex)
+ self.assertTrue(pattern.match('foo'))
+ self.assertIs(None, pattern.match('bar'))
+
+ def test_extra_args(self):
+ """Test that extra arguments are also properly passed"""
+ pattern = lazy_regex.lazy_compile('foo', re.I)
+ self.assertIsInstance(pattern, lazy_regex.LazyRegex)
+ self.assertTrue(pattern.match('foo'))
+ self.assertTrue(pattern.match('Foo'))
+
+ def test_findall(self):
+ pattern = lazy_regex.lazy_compile('fo*')
+ self.assertEqual(['f', 'fo', 'foo', 'fooo'],
+ pattern.findall('f fo foo fooo'))
+
+ def test_finditer(self):
+ pattern = lazy_regex.lazy_compile('fo*')
+ matches = [(m.start(), m.end(), m.group())
+ for m in pattern.finditer('foo bar fop')]
+ self.assertEqual([(0, 3, 'foo'), (8, 10, 'fo')], matches)
+
+ def test_match(self):
+ pattern = lazy_regex.lazy_compile('fo*')
+ self.assertIs(None, pattern.match('baz foo'))
+ self.assertEqual('fooo', pattern.match('fooo').group())
+
+ def test_search(self):
+ pattern = lazy_regex.lazy_compile('fo*')
+ self.assertEqual('foo', pattern.search('baz foo').group())
+ self.assertEqual('fooo', pattern.search('fooo').group())
+
+ def test_split(self):
+ pattern = lazy_regex.lazy_compile('[,;]*')
+ self.assertEqual(['x', 'y', 'z'], pattern.split('x,y;z'))
+
+ def test_pickle(self):
+ # When pickling, just compile the regex.
+ # Sphinx, which we use for documentation, pickles
+ # some compiled regexes.
+ lazy_pattern = lazy_regex.lazy_compile('[,;]*')
+ pickled = pickle.dumps(lazy_pattern)
+ unpickled_lazy_pattern = pickle.loads(pickled)
+ self.assertEqual(['x', 'y', 'z'],
+ unpickled_lazy_pattern.split('x,y;z'))
+
+
+class TestInstallLazyCompile(tests.TestCase):
+ """Tests for lazy compiled regexps.
+
+ Other tests, and bzrlib in general, count on the lazy regexp compiler
+ being installed, and this is done by loading bzrlib. So these tests
+ assume it is installed, and leave it installed when they're done.
+ """
+
+ def test_install(self):
+ # Don't count on it being present
+ lazy_regex.install_lazy_compile()
+ pattern = re.compile('foo')
+ self.assertIsInstance(pattern, lazy_regex.LazyRegex)
+
+ def test_reset(self):
+ lazy_regex.reset_compile()
+ self.addCleanup(lazy_regex.install_lazy_compile)
+ pattern = re.compile('foo')
+ self.assertFalse(isinstance(pattern, lazy_regex.LazyRegex),
+ 'lazy_regex.reset_compile() did not restore the original'
+ ' compile() function %s' % (type(pattern),))
+ # but the returned object should still support regex operations
+ m = pattern.match('foo')
+ self.assertEqual('foo', m.group())
diff --git a/bzrlib/tests/test_library_state.py b/bzrlib/tests/test_library_state.py
new file mode 100644
index 0000000..fcc871b
--- /dev/null
+++ b/bzrlib/tests/test_library_state.py
@@ -0,0 +1,51 @@
+# Copyright (C) 2010, 2011 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""Tests for BzrLibraryState."""
+
+from bzrlib import (
+ library_state,
+ tests,
+ ui as _mod_ui
+ )
+from bzrlib.tests import fixtures
+
+
+# TODO: once sufficiently cleaned up this should be able to be TestCase.
+class TestLibraryState(tests.TestCaseWithTransport):
+
+ def test_ui_is_used(self):
+ ui = _mod_ui.SilentUIFactory()
+ state = library_state.BzrLibraryState(
+ ui=ui, trace=fixtures.RecordingContextManager())
+ orig_ui = _mod_ui.ui_factory
+ state.__enter__()
+ try:
+ self.assertEqual(ui, _mod_ui.ui_factory)
+ finally:
+ state.__exit__(None, None, None)
+ self.assertEqual(orig_ui, _mod_ui.ui_factory)
+
+ def test_trace_context(self):
+ tracer = fixtures.RecordingContextManager()
+ ui = _mod_ui.SilentUIFactory()
+ state = library_state.BzrLibraryState(ui=ui, trace=tracer)
+ state.__enter__()
+ try:
+ self.assertEqual(['__enter__'], tracer._calls)
+ finally:
+ state.__exit__(None, None, None)
+ self.assertEqual(['__enter__', '__exit__'], tracer._calls)
diff --git a/bzrlib/tests/test_lock.py b/bzrlib/tests/test_lock.py
new file mode 100644
index 0000000..3868c9c
--- /dev/null
+++ b/bzrlib/tests/test_lock.py
@@ -0,0 +1,155 @@
+# Copyright (C) 2009, 2011 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""Tests for OS Locks."""
+
+
+from bzrlib import (
+ debug,
+ errors,
+ lock,
+ tests,
+ )
+from bzrlib.tests.scenarios import load_tests_apply_scenarios
+
+
+load_tests = load_tests_apply_scenarios
+
+
+class TestOSLock(tests.TestCaseInTempDir):
+
+ scenarios = [(
+ name, {
+ 'write_lock': write_lock,
+ 'read_lock': read_lock})
+ for name, write_lock, read_lock in lock._lock_classes]
+
+ read_lock = None
+ write_lock = None
+
+ def setUp(self):
+ super(TestOSLock, self).setUp()
+ self.build_tree(['a-lock-file'])
+
+ def test_create_read_lock(self):
+ r_lock = self.read_lock('a-lock-file')
+ r_lock.unlock()
+
+ def test_create_write_lock(self):
+ w_lock = self.write_lock('a-lock-file')
+ w_lock.unlock()
+
+ def test_read_locks_share(self):
+ r_lock = self.read_lock('a-lock-file')
+ try:
+ lock2 = self.read_lock('a-lock-file')
+ lock2.unlock()
+ finally:
+ r_lock.unlock()
+
+ def test_write_locks_are_exclusive(self):
+ w_lock = self.write_lock('a-lock-file')
+ try:
+ self.assertRaises(errors.LockContention,
+ self.write_lock, 'a-lock-file')
+ finally:
+ w_lock.unlock()
+
+ def test_read_locks_block_write_locks(self):
+ r_lock = self.read_lock('a-lock-file')
+ try:
+ if lock.have_fcntl and self.write_lock is lock._fcntl_WriteLock:
+ # With -Dlock, fcntl locks are properly exclusive
+ debug.debug_flags.add('strict_locks')
+ self.assertRaises(errors.LockContention,
+ self.write_lock, 'a-lock-file')
+ # But not without it
+ debug.debug_flags.remove('strict_locks')
+ try:
+ w_lock = self.write_lock('a-lock-file')
+ except errors.LockContention:
+ self.fail('Unexpected success. fcntl read locks'
+ ' do not usually block write locks')
+ else:
+ w_lock.unlock()
+ self.knownFailure('fcntl read locks don\'t'
+ ' block write locks without -Dlock')
+ else:
+ self.assertRaises(errors.LockContention,
+ self.write_lock, 'a-lock-file')
+ finally:
+ r_lock.unlock()
+
+ def test_write_locks_block_read_lock(self):
+ w_lock = self.write_lock('a-lock-file')
+ try:
+ if lock.have_fcntl and self.read_lock is lock._fcntl_ReadLock:
+ # With -Dlock, fcntl locks are properly exclusive
+ debug.debug_flags.add('strict_locks')
+ self.assertRaises(errors.LockContention,
+ self.read_lock, 'a-lock-file')
+ # But not without it
+ debug.debug_flags.remove('strict_locks')
+ try:
+ r_lock = self.read_lock('a-lock-file')
+ except errors.LockContention:
+ self.fail('Unexpected success. fcntl write locks'
+ ' do not usually block read locks')
+ else:
+ r_lock.unlock()
+ self.knownFailure('fcntl write locks don\'t'
+ ' block read locks without -Dlock')
+ else:
+ self.assertRaises(errors.LockContention,
+ self.read_lock, 'a-lock-file')
+ finally:
+ w_lock.unlock()
+
+
+ def test_temporary_write_lock(self):
+ r_lock = self.read_lock('a-lock-file')
+ try:
+ status, w_lock = r_lock.temporary_write_lock()
+ self.assertTrue(status)
+ # This should block another write lock
+ try:
+ self.assertRaises(errors.LockContention,
+ self.write_lock, 'a-lock-file')
+ finally:
+ r_lock = w_lock.restore_read_lock()
+ # We should be able to take a read lock now
+ r_lock2 = self.read_lock('a-lock-file')
+ r_lock2.unlock()
+ finally:
+ r_lock.unlock()
+
+ def test_temporary_write_lock_fails(self):
+ r_lock = self.read_lock('a-lock-file')
+ try:
+ r_lock2 = self.read_lock('a-lock-file')
+ try:
+ status, w_lock = r_lock.temporary_write_lock()
+ self.assertFalse(status)
+ # Taking out the lock requires unlocking and locking again, so
+ # we have to replace the original object
+ r_lock = w_lock
+ finally:
+ r_lock2.unlock()
+ # We should be able to take a read lock now
+ r_lock2 = self.read_lock('a-lock-file')
+ r_lock2.unlock()
+ finally:
+ r_lock.unlock()
diff --git a/bzrlib/tests/test_lockable_files.py b/bzrlib/tests/test_lockable_files.py
new file mode 100644
index 0000000..e8b2345
--- /dev/null
+++ b/bzrlib/tests/test_lockable_files.py
@@ -0,0 +1,351 @@
+# Copyright (C) 2005-2011 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+import bzrlib
+from bzrlib import (
+ errors,
+ lockdir,
+ osutils,
+ transport,
+ )
+from bzrlib.lockable_files import LockableFiles, TransportLock
+from bzrlib.tests import (
+ TestCaseInTempDir,
+ TestNotApplicable,
+ )
+from bzrlib.tests.test_smart import TestCaseWithSmartMedium
+from bzrlib.tests.test_transactions import DummyWeave
+from bzrlib.transactions import (PassThroughTransaction,
+ ReadOnlyTransaction,
+ WriteTransaction,
+ )
+
+
+# these tests are applied in each parameterized suite for LockableFiles
+#
+# they use an old style of parameterization, but we want to remove this class
+# so won't modernize them now. - mbp 20080430
+class _TestLockableFiles_mixin(object):
+
+ def test_transactions(self):
+ self.assertIs(self.lockable.get_transaction().__class__,
+ PassThroughTransaction)
+ self.lockable.lock_read()
+ try:
+ self.assertIs(self.lockable.get_transaction().__class__,
+ ReadOnlyTransaction)
+ finally:
+ self.lockable.unlock()
+ self.assertIs(self.lockable.get_transaction().__class__,
+ PassThroughTransaction)
+ self.lockable.lock_write()
+ self.assertIs(self.lockable.get_transaction().__class__,
+ WriteTransaction)
+ # check that finish is called:
+ vf = DummyWeave('a')
+ self.lockable.get_transaction().register_dirty(vf)
+ self.lockable.unlock()
+ self.assertTrue(vf.finished)
+
+ def test__escape(self):
+ self.assertEqual('%25', self.lockable._escape('%'))
+
+ def test__escape_empty(self):
+ self.assertEqual('', self.lockable._escape(''))
+
+ def test_break_lock(self):
+ # some locks are not breakable
+ self.lockable.lock_write()
+ try:
+ self.assertRaises(AssertionError, self.lockable.break_lock)
+ except NotImplementedError:
+ # this lock cannot be broken
+ self.lockable.unlock()
+ raise TestNotApplicable("%r is not breakable" % (self.lockable,))
+ l2 = self.get_lockable()
+ orig_factory = bzrlib.ui.ui_factory
+ # silent ui - no need for stdout
+ bzrlib.ui.ui_factory = bzrlib.ui.CannedInputUIFactory([True])
+ try:
+ l2.break_lock()
+ finally:
+ bzrlib.ui.ui_factory = orig_factory
+ try:
+ l2.lock_write()
+ l2.unlock()
+ finally:
+ self.assertRaises(errors.LockBroken, self.lockable.unlock)
+ self.assertFalse(self.lockable.is_locked())
+
+ def test_lock_write_returns_None_refuses_token(self):
+ token = self.lockable.lock_write()
+ self.addCleanup(self.lockable.unlock)
+ if token is not None:
+ # This test does not apply, because this lockable supports
+ # tokens.
+ raise TestNotApplicable("%r uses tokens" % (self.lockable,))
+ self.assertRaises(errors.TokenLockingNotSupported,
+ self.lockable.lock_write, token='token')
+
+ def test_lock_write_returns_token_when_given_token(self):
+ token = self.lockable.lock_write()
+ self.addCleanup(self.lockable.unlock)
+ if token is None:
+ # This test does not apply, because this lockable refuses
+ # tokens.
+ return
+ new_lockable = self.get_lockable()
+ token_from_new_lockable = new_lockable.lock_write(token=token)
+ self.addCleanup(new_lockable.unlock)
+ self.assertEqual(token, token_from_new_lockable)
+
+ def test_lock_write_raises_on_token_mismatch(self):
+ token = self.lockable.lock_write()
+ self.addCleanup(self.lockable.unlock)
+ if token is None:
+ # This test does not apply, because this lockable refuses
+ # tokens.
+ return
+ different_token = token + 'xxx'
+ # Re-using the same lockable instance with a different token will
+ # raise TokenMismatch.
+ self.assertRaises(errors.TokenMismatch,
+ self.lockable.lock_write, token=different_token)
+ # A separate instance for the same lockable will also raise
+ # TokenMismatch.
+ # This detects the case where a caller claims to have a lock (via
+ # the token) for an external resource, but doesn't (the token is
+ # different). Clients need a separate lock object to make sure the
+ # external resource is probed, whereas the existing lock object
+ # might cache.
+ new_lockable = self.get_lockable()
+ self.assertRaises(errors.TokenMismatch,
+ new_lockable.lock_write, token=different_token)
+
+ def test_lock_write_with_matching_token(self):
+ # If the token matches, so no exception is raised by lock_write.
+ token = self.lockable.lock_write()
+ self.addCleanup(self.lockable.unlock)
+ if token is None:
+ # This test does not apply, because this lockable refuses
+ # tokens.
+ return
+ # The same instance will accept a second lock_write if the specified
+ # token matches.
+ self.lockable.lock_write(token=token)
+ self.lockable.unlock()
+ # Calling lock_write on a new instance for the same lockable will
+ # also succeed.
+ new_lockable = self.get_lockable()
+ new_lockable.lock_write(token=token)
+ new_lockable.unlock()
+
+ def test_unlock_after_lock_write_with_token(self):
+ # If lock_write did not physically acquire the lock (because it was
+ # passed a token), then unlock should not physically release it.
+ token = self.lockable.lock_write()
+ self.addCleanup(self.lockable.unlock)
+ if token is None:
+ # This test does not apply, because this lockable refuses
+ # tokens.
+ return
+ new_lockable = self.get_lockable()
+ new_lockable.lock_write(token=token)
+ new_lockable.unlock()
+ self.assertTrue(self.lockable.get_physical_lock_status())
+
+ def test_lock_write_with_token_fails_when_unlocked(self):
+ # Lock and unlock to get a superficially valid token. This mimics a
+ # likely programming error, where a caller accidentally tries to lock
+ # with a token that is no longer valid (because the original lock was
+ # released).
+ token = self.lockable.lock_write()
+ self.lockable.unlock()
+ if token is None:
+ # This test does not apply, because this lockable refuses
+ # tokens.
+ return
+
+ self.assertRaises(errors.TokenMismatch,
+ self.lockable.lock_write, token=token)
+
+ def test_lock_write_reenter_with_token(self):
+ token = self.lockable.lock_write()
+ try:
+ if token is None:
+ # This test does not apply, because this lockable refuses
+ # tokens.
+ return
+ # Relock with a token.
+ token_from_reentry = self.lockable.lock_write(token=token)
+ try:
+ self.assertEqual(token, token_from_reentry)
+ finally:
+ self.lockable.unlock()
+ finally:
+ self.lockable.unlock()
+ # The lock should be unlocked on disk. Verify that with a new lock
+ # instance.
+ new_lockable = self.get_lockable()
+ # Calling lock_write now should work, rather than raise LockContention.
+ new_lockable.lock_write()
+ new_lockable.unlock()
+
+ def test_second_lock_write_returns_same_token(self):
+ first_token = self.lockable.lock_write()
+ try:
+ if first_token is None:
+ # This test does not apply, because this lockable refuses
+ # tokens.
+ return
+ # Relock the already locked lockable. It should return the same
+ # token.
+ second_token = self.lockable.lock_write()
+ try:
+ self.assertEqual(first_token, second_token)
+ finally:
+ self.lockable.unlock()
+ finally:
+ self.lockable.unlock()
+
+ def test_leave_in_place(self):
+ token = self.lockable.lock_write()
+ try:
+ if token is None:
+ # This test does not apply, because this lockable refuses
+ # tokens.
+ return
+ self.lockable.leave_in_place()
+ finally:
+ self.lockable.unlock()
+ # At this point, the lock is still in place on disk
+ self.assertRaises(errors.LockContention, self.lockable.lock_write)
+ # But should be relockable with a token.
+ self.lockable.lock_write(token=token)
+ self.lockable.unlock()
+ # Cleanup: we should still be able to get the lock, but we restore the
+ # behavior to clearing the lock when unlocking.
+ self.lockable.lock_write(token=token)
+ self.lockable.dont_leave_in_place()
+ self.lockable.unlock()
+
+ def test_dont_leave_in_place(self):
+ token = self.lockable.lock_write()
+ try:
+ if token is None:
+ # This test does not apply, because this lockable refuses
+ # tokens.
+ return
+ self.lockable.leave_in_place()
+ finally:
+ self.lockable.unlock()
+ # At this point, the lock is still in place on disk.
+ # Acquire the existing lock with the token, and ask that it is removed
+ # when this object unlocks, and unlock to trigger that removal.
+ new_lockable = self.get_lockable()
+ new_lockable.lock_write(token=token)
+ new_lockable.dont_leave_in_place()
+ new_lockable.unlock()
+ # At this point, the lock is no longer on disk, so we can lock it.
+ third_lockable = self.get_lockable()
+ third_lockable.lock_write()
+ third_lockable.unlock()
+
+
+# This method of adapting tests to parameters is different to
+# the TestProviderAdapters used elsewhere, but seems simpler for this
+# case.
+class TestLockableFiles_TransportLock(TestCaseInTempDir,
+ _TestLockableFiles_mixin):
+
+ def setUp(self):
+ TestCaseInTempDir.setUp(self)
+ t = transport.get_transport_from_path('.')
+ t.mkdir('.bzr')
+ self.sub_transport = t.clone('.bzr')
+ self.lockable = self.get_lockable()
+ self.lockable.create_lock()
+
+ def stop_server(self):
+ super(TestLockableFiles_TransportLock, self).stop_server()
+ # free the subtransport so that we do not get a 5 second
+ # timeout due to the SFTP connection cache.
+ try:
+ del self.sub_transport
+ except AttributeError:
+ pass
+
+ def get_lockable(self):
+ return LockableFiles(self.sub_transport, 'my-lock', TransportLock)
+
+
+class TestLockableFiles_LockDir(TestCaseInTempDir,
+ _TestLockableFiles_mixin):
+ """LockableFile tests run with LockDir underneath"""
+
+ def setUp(self):
+ TestCaseInTempDir.setUp(self)
+ self.transport = transport.get_transport_from_path('.')
+ self.lockable = self.get_lockable()
+ # the lock creation here sets mode - test_permissions on branch
+ # tests that implicitly, but it might be a good idea to factor
+ # out the mode checking logic and have it applied to loackable files
+ # directly. RBC 20060418
+ self.lockable.create_lock()
+
+ def get_lockable(self):
+ return LockableFiles(self.transport, 'my-lock', lockdir.LockDir)
+
+ def test_lock_created(self):
+ self.assertTrue(self.transport.has('my-lock'))
+ self.lockable.lock_write()
+ self.assertTrue(self.transport.has('my-lock/held/info'))
+ self.lockable.unlock()
+ self.assertFalse(self.transport.has('my-lock/held/info'))
+ self.assertTrue(self.transport.has('my-lock'))
+
+ def test__file_modes(self):
+ self.transport.mkdir('readonly')
+ osutils.make_readonly('readonly')
+ lockable = LockableFiles(self.transport.clone('readonly'), 'test-lock',
+ lockdir.LockDir)
+ # The directory mode should be read-write-execute for the current user
+ self.assertEqual(00700, lockable._dir_mode & 00700)
+ # Files should be read-write for the current user
+ self.assertEqual(00600, lockable._file_mode & 00700)
+
+
+class TestLockableFiles_RemoteLockDir(TestCaseWithSmartMedium,
+ _TestLockableFiles_mixin):
+ """LockableFile tests run with RemoteLockDir on a branch."""
+
+ def setUp(self):
+ TestCaseWithSmartMedium.setUp(self)
+ # can only get a RemoteLockDir with some RemoteObject...
+ # use a branch as thats what we want. These mixin tests test the end
+ # to end behaviour, so stubbing out the backend and simulating would
+ # defeat the purpose. We test the protocol implementation separately
+ # in test_remote and test_smart as usual.
+ b = self.make_branch('foo')
+ self.addCleanup(b.bzrdir.transport.disconnect)
+ self.transport = transport.get_transport_from_path('.')
+ self.lockable = self.get_lockable()
+
+ def get_lockable(self):
+ # getting a new lockable involves opening a new instance of the branch
+ branch = bzrlib.branch.Branch.open(self.get_url('foo'))
+ self.addCleanup(branch.bzrdir.transport.disconnect)
+ return branch.control_files
diff --git a/bzrlib/tests/test_lockdir.py b/bzrlib/tests/test_lockdir.py
new file mode 100644
index 0000000..0a9ca21
--- /dev/null
+++ b/bzrlib/tests/test_lockdir.py
@@ -0,0 +1,761 @@
+# Copyright (C) 2006-2011 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""Tests for LockDir"""
+
+import os
+import time
+
+import bzrlib
+from bzrlib import (
+ config,
+ errors,
+ lock,
+ lockdir,
+ osutils,
+ tests,
+ transport,
+ )
+from bzrlib.errors import (
+ LockBreakMismatch,
+ LockBroken,
+ LockContention,
+ LockFailed,
+ LockNotHeld,
+ )
+from bzrlib.lockdir import (
+ LockDir,
+ LockHeldInfo,
+ )
+from bzrlib.tests import (
+ features,
+ TestCase,
+ TestCaseInTempDir,
+ TestCaseWithTransport,
+ )
+
+# These tests are run on the default transport provided by the test framework
+# (typically a local disk transport). That can be changed by the --transport
+# option to bzr selftest. The required properties of the transport
+# implementation are tested separately. (The main requirement is just that
+# they don't allow overwriting nonempty directories.)
+
+
+class TestLockDir(TestCaseWithTransport):
+ """Test LockDir operations"""
+
+ def logging_report_function(self, fmt, *args):
+ self._logged_reports.append((fmt, args))
+
+ def setup_log_reporter(self, lock_dir):
+ self._logged_reports = []
+ lock_dir._report_function = self.logging_report_function
+
+ def test_00_lock_creation(self):
+ """Creation of lock file on a transport"""
+ t = self.get_transport()
+ lf = LockDir(t, 'test_lock')
+ self.assertFalse(lf.is_held)
+
+ def test_01_lock_repr(self):
+ """Lock string representation"""
+ lf = LockDir(self.get_transport(), 'test_lock')
+ r = repr(lf)
+ self.assertContainsRe(r, r'^LockDir\(.*/test_lock\)$')
+
+ def test_02_unlocked_peek(self):
+ lf = LockDir(self.get_transport(), 'test_lock')
+ self.assertEqual(lf.peek(), None)
+
+ def get_lock(self):
+ return LockDir(self.get_transport(), 'test_lock')
+
+ def test_unlock_after_break_raises(self):
+ ld = self.get_lock()
+ ld2 = self.get_lock()
+ ld.create()
+ ld.attempt_lock()
+ ld2.force_break(ld2.peek())
+ self.assertRaises(LockBroken, ld.unlock)
+
+ def test_03_readonly_peek(self):
+ lf = LockDir(self.get_readonly_transport(), 'test_lock')
+ self.assertEqual(lf.peek(), None)
+
+ def test_10_lock_uncontested(self):
+ """Acquire and release a lock"""
+ t = self.get_transport()
+ lf = LockDir(t, 'test_lock')
+ lf.create()
+ lf.attempt_lock()
+ try:
+ self.assertTrue(lf.is_held)
+ finally:
+ lf.unlock()
+ self.assertFalse(lf.is_held)
+
+ def test_11_create_readonly_transport(self):
+ """Fail to create lock on readonly transport"""
+ t = self.get_readonly_transport()
+ lf = LockDir(t, 'test_lock')
+ self.assertRaises(LockFailed, lf.create)
+
+ def test_12_lock_readonly_transport(self):
+ """Fail to lock on readonly transport"""
+ lf = LockDir(self.get_transport(), 'test_lock')
+ lf.create()
+ lf = LockDir(self.get_readonly_transport(), 'test_lock')
+ self.assertRaises(LockFailed, lf.attempt_lock)
+
+ def test_20_lock_contested(self):
+ """Contention to get a lock"""
+ t = self.get_transport()
+ lf1 = LockDir(t, 'test_lock')
+ lf1.create()
+ lf1.attempt_lock()
+ lf2 = LockDir(t, 'test_lock')
+ try:
+ # locking is between LockDir instances; aliases within
+ # a single process are not detected
+ lf2.attempt_lock()
+ self.fail('Failed to detect lock collision')
+ except LockContention, e:
+ self.assertEqual(e.lock, lf2)
+ self.assertContainsRe(str(e),
+ r'^Could not acquire.*test_lock.*$')
+ lf1.unlock()
+
+ def test_20_lock_peek(self):
+ """Peek at the state of a lock"""
+ t = self.get_transport()
+ lf1 = LockDir(t, 'test_lock')
+ lf1.create()
+ lf1.attempt_lock()
+ self.addCleanup(lf1.unlock)
+ # lock is held, should get some info on it
+ info1 = lf1.peek()
+ self.assertEqual(set(info1.info_dict.keys()),
+ set(['user', 'nonce', 'hostname', 'pid', 'start_time']))
+ # should get the same info if we look at it through a different
+ # instance
+ info2 = LockDir(t, 'test_lock').peek()
+ self.assertEqual(info1, info2)
+ # locks which are never used should be not-held
+ self.assertEqual(LockDir(t, 'other_lock').peek(), None)
+
+ def test_21_peek_readonly(self):
+ """Peek over a readonly transport"""
+ t = self.get_transport()
+ lf1 = LockDir(t, 'test_lock')
+ lf1.create()
+ lf2 = LockDir(self.get_readonly_transport(), 'test_lock')
+ self.assertEqual(lf2.peek(), None)
+ lf1.attempt_lock()
+ self.addCleanup(lf1.unlock)
+ info2 = lf2.peek()
+ self.assertTrue(info2)
+ self.assertEqual(info2.get('nonce'), lf1.nonce)
+
+ def test_30_lock_wait_fail(self):
+ """Wait on a lock, then fail
+
+ We ask to wait up to 400ms; this should fail within at most one
+ second. (Longer times are more realistic but we don't want the test
+ suite to take too long, and this should do for now.)
+ """
+ t = self.get_transport()
+ lf1 = LockDir(t, 'test_lock')
+ lf1.create()
+ lf2 = LockDir(t, 'test_lock')
+ self.setup_log_reporter(lf2)
+ lf1.attempt_lock()
+ try:
+ before = time.time()
+ self.assertRaises(LockContention, lf2.wait_lock,
+ timeout=0.4, poll=0.1)
+ after = time.time()
+ # it should only take about 0.4 seconds, but we allow more time in
+ # case the machine is heavily loaded
+ self.assertTrue(after - before <= 8.0,
+ "took %f seconds to detect lock contention" % (after - before))
+ finally:
+ lf1.unlock()
+ self.assertEqual(1, len(self._logged_reports))
+ self.assertContainsRe(self._logged_reports[0][0],
+ r'Unable to obtain lock .* held by jrandom@example\.com on .*'
+ r' \(process #\d+\), acquired .* ago\.\n'
+ r'Will continue to try until \d{2}:\d{2}:\d{2}, unless '
+ r'you press Ctrl-C.\n'
+ r'See "bzr help break-lock" for more.')
+
+ def test_31_lock_wait_easy(self):
+ """Succeed when waiting on a lock with no contention.
+ """
+ t = self.get_transport()
+ lf1 = LockDir(t, 'test_lock')
+ lf1.create()
+ self.setup_log_reporter(lf1)
+ try:
+ before = time.time()
+ lf1.wait_lock(timeout=0.4, poll=0.1)
+ after = time.time()
+ self.assertTrue(after - before <= 1.0)
+ finally:
+ lf1.unlock()
+ self.assertEqual([], self._logged_reports)
+
+ def test_40_confirm_easy(self):
+ """Confirm a lock that's already held"""
+ t = self.get_transport()
+ lf1 = LockDir(t, 'test_lock')
+ lf1.create()
+ lf1.attempt_lock()
+ self.addCleanup(lf1.unlock)
+ lf1.confirm()
+
+ def test_41_confirm_not_held(self):
+ """Confirm a lock that's already held"""
+ t = self.get_transport()
+ lf1 = LockDir(t, 'test_lock')
+ lf1.create()
+ self.assertRaises(LockNotHeld, lf1.confirm)
+
+ def test_42_confirm_broken_manually(self):
+ """Confirm a lock broken by hand"""
+ t = self.get_transport()
+ lf1 = LockDir(t, 'test_lock')
+ lf1.create()
+ lf1.attempt_lock()
+ t.move('test_lock', 'lock_gone_now')
+ self.assertRaises(LockBroken, lf1.confirm)
+ # Clean up
+ t.move('lock_gone_now', 'test_lock')
+ lf1.unlock()
+
+ def test_43_break(self):
+ """Break a lock whose caller has forgotten it"""
+ t = self.get_transport()
+ lf1 = LockDir(t, 'test_lock')
+ lf1.create()
+ lf1.attempt_lock()
+ # we incorrectly discard the lock object without unlocking it
+ del lf1
+ # someone else sees it's still locked
+ lf2 = LockDir(t, 'test_lock')
+ holder_info = lf2.peek()
+ self.assertTrue(holder_info)
+ lf2.force_break(holder_info)
+ # now we should be able to take it
+ lf2.attempt_lock()
+ self.addCleanup(lf2.unlock)
+ lf2.confirm()
+
+ def test_44_break_already_released(self):
+ """Lock break races with regular release"""
+ t = self.get_transport()
+ lf1 = LockDir(t, 'test_lock')
+ lf1.create()
+ lf1.attempt_lock()
+ # someone else sees it's still locked
+ lf2 = LockDir(t, 'test_lock')
+ holder_info = lf2.peek()
+ # in the interim the lock is released
+ lf1.unlock()
+ # break should succeed
+ lf2.force_break(holder_info)
+ # now we should be able to take it
+ lf2.attempt_lock()
+ self.addCleanup(lf2.unlock)
+ lf2.confirm()
+
+ def test_45_break_mismatch(self):
+ """Lock break races with someone else acquiring it"""
+ t = self.get_transport()
+ lf1 = LockDir(t, 'test_lock')
+ lf1.create()
+ lf1.attempt_lock()
+ # someone else sees it's still locked
+ lf2 = LockDir(t, 'test_lock')
+ holder_info = lf2.peek()
+ # in the interim the lock is released
+ lf1.unlock()
+ lf3 = LockDir(t, 'test_lock')
+ lf3.attempt_lock()
+ # break should now *fail*
+ self.assertRaises(LockBreakMismatch, lf2.force_break,
+ holder_info)
+ lf3.unlock()
+
+ def test_46_fake_read_lock(self):
+ t = self.get_transport()
+ lf1 = LockDir(t, 'test_lock')
+ lf1.create()
+ lf1.lock_read()
+ lf1.unlock()
+
+ def test_50_lockdir_representation(self):
+ """Check the on-disk representation of LockDirs is as expected.
+
+ There should always be a top-level directory named by the lock.
+ When the lock is held, there should be a lockname/held directory
+ containing an info file.
+ """
+ t = self.get_transport()
+ lf1 = LockDir(t, 'test_lock')
+ lf1.create()
+ self.assertTrue(t.has('test_lock'))
+ lf1.lock_write()
+ self.assertTrue(t.has('test_lock/held/info'))
+ lf1.unlock()
+ self.assertFalse(t.has('test_lock/held/info'))
+
+ def test_break_lock(self):
+ # the ui based break_lock routine should Just Work (tm)
+ ld1 = self.get_lock()
+ ld2 = self.get_lock()
+ ld1.create()
+ ld1.lock_write()
+ # do this without IO redirection to ensure it doesn't prompt.
+ self.assertRaises(AssertionError, ld1.break_lock)
+ orig_factory = bzrlib.ui.ui_factory
+ bzrlib.ui.ui_factory = bzrlib.ui.CannedInputUIFactory([True])
+ try:
+ ld2.break_lock()
+ self.assertRaises(LockBroken, ld1.unlock)
+ finally:
+ bzrlib.ui.ui_factory = orig_factory
+
+ def test_break_lock_corrupt_info(self):
+ """break_lock works even if the info file is corrupt (and tells the UI
+ that it is corrupt).
+ """
+ ld = self.get_lock()
+ ld2 = self.get_lock()
+ ld.create()
+ ld.lock_write()
+ ld.transport.put_bytes_non_atomic('test_lock/held/info', '\0')
+
+ class LoggingUIFactory(bzrlib.ui.SilentUIFactory):
+ def __init__(self):
+ self.prompts = []
+
+ def get_boolean(self, prompt):
+ self.prompts.append(('boolean', prompt))
+ return True
+
+ ui = LoggingUIFactory()
+ self.overrideAttr(bzrlib.ui, 'ui_factory', ui)
+ ld2.break_lock()
+ self.assertLength(1, ui.prompts)
+ self.assertEqual('boolean', ui.prompts[0][0])
+ self.assertStartsWith(ui.prompts[0][1], 'Break (corrupt LockDir')
+ self.assertRaises(LockBroken, ld.unlock)
+
+ def test_break_lock_missing_info(self):
+ """break_lock works even if the info file is missing (and tells the UI
+ that it is corrupt).
+ """
+ ld = self.get_lock()
+ ld2 = self.get_lock()
+ ld.create()
+ ld.lock_write()
+ ld.transport.delete('test_lock/held/info')
+
+ class LoggingUIFactory(bzrlib.ui.SilentUIFactory):
+ def __init__(self):
+ self.prompts = []
+
+ def get_boolean(self, prompt):
+ self.prompts.append(('boolean', prompt))
+ return True
+
+ ui = LoggingUIFactory()
+ orig_factory = bzrlib.ui.ui_factory
+ bzrlib.ui.ui_factory = ui
+ try:
+ ld2.break_lock()
+ self.assertRaises(LockBroken, ld.unlock)
+ self.assertLength(0, ui.prompts)
+ finally:
+ bzrlib.ui.ui_factory = orig_factory
+ # Suppress warnings due to ld not being unlocked
+ # XXX: if lock_broken hook was invoked in this case, this hack would
+ # not be necessary. - Andrew Bennetts, 2010-09-06.
+ del self._lock_actions[:]
+
+ def test_create_missing_base_directory(self):
+ """If LockDir.path doesn't exist, it can be created
+
+ Some people manually remove the entire lock/ directory trying
+ to unlock a stuck repository/branch/etc. Rather than failing
+ after that, just create the lock directory when needed.
+ """
+ t = self.get_transport()
+ lf1 = LockDir(t, 'test_lock')
+
+ lf1.create()
+ self.assertTrue(t.has('test_lock'))
+
+ t.rmdir('test_lock')
+ self.assertFalse(t.has('test_lock'))
+
+ # This will create 'test_lock' if it needs to
+ lf1.lock_write()
+ self.assertTrue(t.has('test_lock'))
+ self.assertTrue(t.has('test_lock/held/info'))
+
+ lf1.unlock()
+ self.assertFalse(t.has('test_lock/held/info'))
+
+ def test_display_form(self):
+ ld1 = self.get_lock()
+ ld1.create()
+ ld1.lock_write()
+ try:
+ info_list = ld1.peek().to_readable_dict()
+ finally:
+ ld1.unlock()
+ self.assertEqual(info_list['user'], u'jrandom@example.com')
+ self.assertContainsRe(info_list['pid'], '^\d+$')
+ self.assertContainsRe(info_list['time_ago'], r'^\d+ seconds? ago$')
+
+ def test_lock_without_email(self):
+ global_config = config.GlobalStack()
+ # Intentionally has no email address
+ global_config.set('email', 'User Identity')
+ ld1 = self.get_lock()
+ ld1.create()
+ ld1.lock_write()
+ ld1.unlock()
+
+ def test_lock_permission(self):
+ self.requireFeature(features.not_running_as_root)
+ if not osutils.supports_posix_readonly():
+ raise tests.TestSkipped('Cannot induce a permission failure')
+ ld1 = self.get_lock()
+ lock_path = ld1.transport.local_abspath('test_lock')
+ os.mkdir(lock_path)
+ osutils.make_readonly(lock_path)
+ self.assertRaises(errors.LockFailed, ld1.attempt_lock)
+
+ def test_lock_by_token(self):
+ ld1 = self.get_lock()
+ token = ld1.lock_write()
+ self.addCleanup(ld1.unlock)
+ self.assertNotEqual(None, token)
+ ld2 = self.get_lock()
+ t2 = ld2.lock_write(token)
+ self.addCleanup(ld2.unlock)
+ self.assertEqual(token, t2)
+
+ def test_lock_with_buggy_rename(self):
+ # test that lock acquisition handles servers which pretend they
+ # renamed correctly but that actually fail
+ t = transport.get_transport_from_url(
+ 'brokenrename+' + self.get_url())
+ ld1 = LockDir(t, 'test_lock')
+ ld1.create()
+ ld1.attempt_lock()
+ ld2 = LockDir(t, 'test_lock')
+ # we should fail to lock
+ e = self.assertRaises(errors.LockContention, ld2.attempt_lock)
+ # now the original caller should succeed in unlocking
+ ld1.unlock()
+ # and there should be nothing left over
+ self.assertEquals([], t.list_dir('test_lock'))
+
+ def test_failed_lock_leaves_no_trash(self):
+ # if we fail to acquire the lock, we don't leave pending directories
+ # behind -- https://bugs.launchpad.net/bzr/+bug/109169
+ ld1 = self.get_lock()
+ ld2 = self.get_lock()
+ # should be nothing before we start
+ ld1.create()
+ t = self.get_transport().clone('test_lock')
+
+ def check_dir(a):
+ self.assertEquals(a, t.list_dir('.'))
+
+ check_dir([])
+ # when held, that's all we see
+ ld1.attempt_lock()
+ self.addCleanup(ld1.unlock)
+ check_dir(['held'])
+ # second guy should fail
+ self.assertRaises(errors.LockContention, ld2.attempt_lock)
+ # no kibble
+ check_dir(['held'])
+
+ def test_no_lockdir_info(self):
+ """We can cope with empty info files."""
+ # This seems like a fairly common failure case - see
+ # <https://bugs.launchpad.net/bzr/+bug/185103> and all its dupes.
+ # Processes are often interrupted after opening the file
+ # before the actual contents are committed.
+ t = self.get_transport()
+ t.mkdir('test_lock')
+ t.mkdir('test_lock/held')
+ t.put_bytes('test_lock/held/info', '')
+ lf = LockDir(t, 'test_lock')
+ info = lf.peek()
+ formatted_info = info.to_readable_dict()
+ self.assertEquals(
+ dict(user='<unknown>', hostname='<unknown>', pid='<unknown>',
+ time_ago='(unknown)'),
+ formatted_info)
+
+ def test_corrupt_lockdir_info(self):
+ """We can cope with corrupt (and thus unparseable) info files."""
+ # This seems like a fairly common failure case too - see
+ # <https://bugs.launchpad.net/bzr/+bug/619872> for instance.
+ # In particular some systems tend to fill recently created files with
+ # nul bytes after recovering from a system crash.
+ t = self.get_transport()
+ t.mkdir('test_lock')
+ t.mkdir('test_lock/held')
+ t.put_bytes('test_lock/held/info', '\0')
+ lf = LockDir(t, 'test_lock')
+ self.assertRaises(errors.LockCorrupt, lf.peek)
+ # Currently attempt_lock gives LockContention, but LockCorrupt would be
+ # a reasonable result too.
+ self.assertRaises(
+ (errors.LockCorrupt, errors.LockContention), lf.attempt_lock)
+ self.assertRaises(errors.LockCorrupt, lf.validate_token, 'fake token')
+
+ def test_missing_lockdir_info(self):
+ """We can cope with absent info files."""
+ t = self.get_transport()
+ t.mkdir('test_lock')
+ t.mkdir('test_lock/held')
+ lf = LockDir(t, 'test_lock')
+ # In this case we expect the 'not held' result from peek, because peek
+ # cannot be expected to notice that there is a 'held' directory with no
+ # 'info' file.
+ self.assertEqual(None, lf.peek())
+ # And lock/unlock may work or give LockContention (but not any other
+ # error).
+ try:
+ lf.attempt_lock()
+ except LockContention:
+ # LockContention is ok, and expected on Windows
+ pass
+ else:
+ # no error is ok, and expected on POSIX (because POSIX allows
+ # os.rename over an empty directory).
+ lf.unlock()
+ # Currently raises TokenMismatch, but LockCorrupt would be reasonable
+ # too.
+ self.assertRaises(
+ (errors.TokenMismatch, errors.LockCorrupt),
+ lf.validate_token, 'fake token')
+
+
+class TestLockDirHooks(TestCaseWithTransport):
+
+ def setUp(self):
+ super(TestLockDirHooks, self).setUp()
+ self._calls = []
+
+ def get_lock(self):
+ return LockDir(self.get_transport(), 'test_lock')
+
+ def record_hook(self, result):
+ self._calls.append(result)
+
+ def test_LockDir_acquired_success(self):
+ # the LockDir.lock_acquired hook fires when a lock is acquired.
+ LockDir.hooks.install_named_hook('lock_acquired',
+ self.record_hook, 'record_hook')
+ ld = self.get_lock()
+ ld.create()
+ self.assertEqual([], self._calls)
+ result = ld.attempt_lock()
+ lock_path = ld.transport.abspath(ld.path)
+ self.assertEqual([lock.LockResult(lock_path, result)], self._calls)
+ ld.unlock()
+ self.assertEqual([lock.LockResult(lock_path, result)], self._calls)
+
+ def test_LockDir_acquired_fail(self):
+ # the LockDir.lock_acquired hook does not fire on failure.
+ ld = self.get_lock()
+ ld.create()
+ ld2 = self.get_lock()
+ ld2.attempt_lock()
+ # install a lock hook now, when the disk lock is locked
+ LockDir.hooks.install_named_hook('lock_acquired',
+ self.record_hook, 'record_hook')
+ self.assertRaises(errors.LockContention, ld.attempt_lock)
+ self.assertEqual([], self._calls)
+ ld2.unlock()
+ self.assertEqual([], self._calls)
+
+ def test_LockDir_released_success(self):
+ # the LockDir.lock_released hook fires when a lock is acquired.
+ LockDir.hooks.install_named_hook('lock_released',
+ self.record_hook, 'record_hook')
+ ld = self.get_lock()
+ ld.create()
+ self.assertEqual([], self._calls)
+ result = ld.attempt_lock()
+ self.assertEqual([], self._calls)
+ ld.unlock()
+ lock_path = ld.transport.abspath(ld.path)
+ self.assertEqual([lock.LockResult(lock_path, result)], self._calls)
+
+ def test_LockDir_released_fail(self):
+ # the LockDir.lock_released hook does not fire on failure.
+ ld = self.get_lock()
+ ld.create()
+ ld2 = self.get_lock()
+ ld.attempt_lock()
+ ld2.force_break(ld2.peek())
+ LockDir.hooks.install_named_hook('lock_released',
+ self.record_hook, 'record_hook')
+ self.assertRaises(LockBroken, ld.unlock)
+ self.assertEqual([], self._calls)
+
+ def test_LockDir_broken_success(self):
+ # the LockDir.lock_broken hook fires when a lock is broken.
+ ld = self.get_lock()
+ ld.create()
+ ld2 = self.get_lock()
+ result = ld.attempt_lock()
+ LockDir.hooks.install_named_hook('lock_broken',
+ self.record_hook, 'record_hook')
+ ld2.force_break(ld2.peek())
+ lock_path = ld.transport.abspath(ld.path)
+ self.assertEqual([lock.LockResult(lock_path, result)], self._calls)
+
+ def test_LockDir_broken_failure(self):
+ # the LockDir.lock_broken hook does not fires when a lock is already
+ # released.
+ ld = self.get_lock()
+ ld.create()
+ ld2 = self.get_lock()
+ result = ld.attempt_lock()
+ holder_info = ld2.peek()
+ ld.unlock()
+ LockDir.hooks.install_named_hook('lock_broken',
+ self.record_hook, 'record_hook')
+ ld2.force_break(holder_info)
+ lock_path = ld.transport.abspath(ld.path)
+ self.assertEqual([], self._calls)
+
+
+class TestLockHeldInfo(TestCaseInTempDir):
+ """Can get information about the lock holder, and detect whether they're
+ still alive."""
+
+ def test_repr(self):
+ info = LockHeldInfo.for_this_process(None)
+ self.assertContainsRe(repr(info), r"LockHeldInfo\(.*\)")
+
+ def test_unicode(self):
+ info = LockHeldInfo.for_this_process(None)
+ self.assertContainsRe(unicode(info),
+ r'held by .* on .* \(process #\d+\), acquired .* ago')
+
+ def test_is_locked_by_this_process(self):
+ info = LockHeldInfo.for_this_process(None)
+ self.assertTrue(info.is_locked_by_this_process())
+
+ def test_is_not_locked_by_this_process(self):
+ info = LockHeldInfo.for_this_process(None)
+ info.info_dict['pid'] = '123123123123123'
+ self.assertFalse(info.is_locked_by_this_process())
+
+ def test_lock_holder_live_process(self):
+ """Detect that the holder (this process) is still running."""
+ info = LockHeldInfo.for_this_process(None)
+ self.assertFalse(info.is_lock_holder_known_dead())
+
+ def test_lock_holder_dead_process(self):
+ """Detect that the holder (this process) is still running."""
+ self.overrideAttr(lockdir, 'get_host_name',
+ lambda: 'aproperhostname')
+ info = LockHeldInfo.for_this_process(None)
+ info.info_dict['pid'] = '123123123'
+ self.assertTrue(info.is_lock_holder_known_dead())
+
+ def test_lock_holder_other_machine(self):
+ """The lock holder isn't here so we don't know if they're alive."""
+ info = LockHeldInfo.for_this_process(None)
+ info.info_dict['hostname'] = 'egg.example.com'
+ info.info_dict['pid'] = '123123123'
+ self.assertFalse(info.is_lock_holder_known_dead())
+
+ def test_lock_holder_other_user(self):
+ """Only auto-break locks held by this user."""
+ info = LockHeldInfo.for_this_process(None)
+ info.info_dict['user'] = 'notme@example.com'
+ info.info_dict['pid'] = '123123123'
+ self.assertFalse(info.is_lock_holder_known_dead())
+
+ def test_no_good_hostname(self):
+ """Correctly handle ambiguous hostnames.
+
+ If the lock's recorded with just 'localhost' we can't really trust
+ it's the same 'localhost'. (There are quite a few of them. :-)
+ So even if the process is known not to be alive, we can't say that's
+ known for sure.
+ """
+ self.overrideAttr(lockdir, 'get_host_name',
+ lambda: 'localhost')
+ info = LockHeldInfo.for_this_process(None)
+ info.info_dict['pid'] = '123123123'
+ self.assertFalse(info.is_lock_holder_known_dead())
+
+
+class TestStaleLockDir(TestCaseWithTransport):
+ """Can automatically break stale locks.
+
+ :see: https://bugs.launchpad.net/bzr/+bug/220464
+ """
+
+ def test_auto_break_stale_lock(self):
+ """Locks safely known to be stale are just cleaned up.
+
+ This generates a warning but no other user interaction.
+ """
+ self.overrideAttr(lockdir, 'get_host_name',
+ lambda: 'aproperhostname')
+ # This is off by default at present; see the discussion in the bug.
+ # If you change the default, don't forget to update the docs.
+ config.GlobalConfig().set_user_option('locks.steal_dead', True)
+ # Create a lock pretending to come from a different nonexistent
+ # process on the same machine.
+ l1 = LockDir(self.get_transport(), 'a',
+ extra_holder_info={'pid': '12312313'})
+ token_1 = l1.attempt_lock()
+ l2 = LockDir(self.get_transport(), 'a')
+ token_2 = l2.attempt_lock()
+ # l1 will notice its lock was stolen.
+ self.assertRaises(errors.LockBroken,
+ l1.unlock)
+ l2.unlock()
+
+ def test_auto_break_stale_lock_configured_off(self):
+ """Automatic breaking can be turned off"""
+ l1 = LockDir(self.get_transport(), 'a',
+ extra_holder_info={'pid': '12312313'})
+ token_1 = l1.attempt_lock()
+ self.addCleanup(l1.unlock)
+ l2 = LockDir(self.get_transport(), 'a')
+ # This fails now, because dead lock breaking is off by default.
+ self.assertRaises(LockContention,
+ l2.attempt_lock)
+ # and it's in fact not broken
+ l1.confirm()
diff --git a/bzrlib/tests/test_log.py b/bzrlib/tests/test_log.py
new file mode 100644
index 0000000..08b2e91
--- /dev/null
+++ b/bzrlib/tests/test_log.py
@@ -0,0 +1,1638 @@
+# Copyright (C) 2005-2011 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+import os
+from cStringIO import StringIO
+
+from bzrlib import (
+ branchbuilder,
+ errors,
+ log,
+ registry,
+ revision,
+ revisionspec,
+ tests,
+ )
+
+
+class TestLogMixin(object):
+
+ def wt_commit(self, wt, message, **kwargs):
+ """Use some mostly fixed values for commits to simplify tests.
+
+ Tests can use this function to get some commit attributes. The time
+ stamp is incremented at each commit.
+ """
+ if getattr(self, 'timestamp', None) is None:
+ self.timestamp = 1132617600 # Mon 2005-11-22 00:00:00 +0000
+ else:
+ self.timestamp += 1 # 1 second between each commit
+ kwargs.setdefault('timestamp', self.timestamp)
+ kwargs.setdefault('timezone', 0) # UTC
+ kwargs.setdefault('committer', 'Joe Foo <joe@foo.com>')
+
+ return wt.commit(message, **kwargs)
+
+
+class TestCaseForLogFormatter(tests.TestCaseWithTransport, TestLogMixin):
+
+ def setUp(self):
+ super(TestCaseForLogFormatter, self).setUp()
+ # keep a reference to the "current" custom prop. handler registry
+ self.properties_handler_registry = log.properties_handler_registry
+ # Use a clean registry for log
+ log.properties_handler_registry = registry.Registry()
+
+ def restore():
+ log.properties_handler_registry = self.properties_handler_registry
+ self.addCleanup(restore)
+
+ def assertFormatterResult(self, result, branch, formatter_class,
+ formatter_kwargs=None, show_log_kwargs=None):
+ logfile = self.make_utf8_encoded_stringio()
+ if formatter_kwargs is None:
+ formatter_kwargs = {}
+ formatter = formatter_class(to_file=logfile, **formatter_kwargs)
+ if show_log_kwargs is None:
+ show_log_kwargs = {}
+ log.show_log(branch, formatter, **show_log_kwargs)
+ self.assertEqualDiff(result, logfile.getvalue())
+
+ def make_standard_commit(self, branch_nick, **kwargs):
+ wt = self.make_branch_and_tree('.')
+ wt.lock_write()
+ self.addCleanup(wt.unlock)
+ self.build_tree(['a'])
+ wt.add(['a'])
+ wt.branch.nick = branch_nick
+ kwargs.setdefault('committer', 'Lorem Ipsum <test@example.com>')
+ kwargs.setdefault('authors', ['John Doe <jdoe@example.com>'])
+ self.wt_commit(wt, 'add a', **kwargs)
+ return wt
+
+ def make_commits_with_trailing_newlines(self, wt):
+ """Helper method for LogFormatter tests"""
+ b = wt.branch
+ b.nick = 'test'
+ self.build_tree_contents([('a', 'hello moto\n')])
+ self.wt_commit(wt, 'simple log message', rev_id='a1')
+ self.build_tree_contents([('b', 'goodbye\n')])
+ wt.add('b')
+ self.wt_commit(wt, 'multiline\nlog\nmessage\n', rev_id='a2')
+
+ self.build_tree_contents([('c', 'just another manic monday\n')])
+ wt.add('c')
+ self.wt_commit(wt, 'single line with trailing newline\n', rev_id='a3')
+ return b
+
+ def _prepare_tree_with_merges(self, with_tags=False):
+ wt = self.make_branch_and_memory_tree('.')
+ wt.lock_write()
+ self.addCleanup(wt.unlock)
+ wt.add('')
+ self.wt_commit(wt, 'rev-1', rev_id='rev-1')
+ self.wt_commit(wt, 'rev-merged', rev_id='rev-2a')
+ wt.set_parent_ids(['rev-1', 'rev-2a'])
+ wt.branch.set_last_revision_info(1, 'rev-1')
+ self.wt_commit(wt, 'rev-2', rev_id='rev-2b')
+ if with_tags:
+ branch = wt.branch
+ branch.tags.set_tag('v0.2', 'rev-2b')
+ self.wt_commit(wt, 'rev-3', rev_id='rev-3')
+ branch.tags.set_tag('v1.0rc1', 'rev-3')
+ branch.tags.set_tag('v1.0', 'rev-3')
+ return wt
+
+
+class LogCatcher(log.LogFormatter):
+ """Pull log messages into a list rather than displaying them.
+
+ To simplify testing we save logged revisions here rather than actually
+ formatting anything, so that we can precisely check the result without
+ being dependent on the formatting.
+ """
+
+ supports_merge_revisions = True
+ supports_delta = True
+ supports_diff = True
+ preferred_levels = 0
+
+ def __init__(self, *args, **kwargs):
+ kwargs.update(dict(to_file=None))
+ super(LogCatcher, self).__init__(*args, **kwargs)
+ self.revisions = []
+
+ def log_revision(self, revision):
+ self.revisions.append(revision)
+
+
+class TestShowLog(tests.TestCaseWithTransport):
+
+ def checkDelta(self, delta, **kw):
+ """Check the filenames touched by a delta are as expected.
+
+ Caller only have to pass in the list of files for each part, all
+ unspecified parts are considered empty (and checked as such).
+ """
+ for n in 'added', 'removed', 'renamed', 'modified', 'unchanged':
+ # By default we expect an empty list
+ expected = kw.get(n, [])
+ # strip out only the path components
+ got = [x[0] for x in getattr(delta, n)]
+ self.assertEqual(expected, got)
+
+ def assertInvalidRevisonNumber(self, br, start, end):
+ lf = LogCatcher()
+ self.assertRaises(errors.InvalidRevisionNumber,
+ log.show_log, br, lf,
+ start_revision=start, end_revision=end)
+
+ def test_cur_revno(self):
+ wt = self.make_branch_and_tree('.')
+ b = wt.branch
+
+ lf = LogCatcher()
+ wt.commit('empty commit')
+ log.show_log(b, lf, verbose=True, start_revision=1, end_revision=1)
+
+ # Since there is a single revision in the branch all the combinations
+ # below should fail.
+ self.assertInvalidRevisonNumber(b, 2, 1)
+ self.assertInvalidRevisonNumber(b, 1, 2)
+ self.assertInvalidRevisonNumber(b, 0, 2)
+ self.assertInvalidRevisonNumber(b, 1, 0)
+ self.assertInvalidRevisonNumber(b, -1, 1)
+ self.assertInvalidRevisonNumber(b, 1, -1)
+
+ def test_empty_branch(self):
+ wt = self.make_branch_and_tree('.')
+
+ lf = LogCatcher()
+ log.show_log(wt.branch, lf)
+ # no entries yet
+ self.assertEqual([], lf.revisions)
+
+ def test_empty_commit(self):
+ wt = self.make_branch_and_tree('.')
+
+ wt.commit('empty commit')
+ lf = LogCatcher()
+ log.show_log(wt.branch, lf, verbose=True)
+ revs = lf.revisions
+ self.assertEqual(1, len(revs))
+ self.assertEqual('1', revs[0].revno)
+ self.assertEqual('empty commit', revs[0].rev.message)
+ self.checkDelta(revs[0].delta)
+
+ def test_simple_commit(self):
+ wt = self.make_branch_and_tree('.')
+ wt.commit('empty commit')
+ self.build_tree(['hello'])
+ wt.add('hello')
+ wt.commit('add one file',
+ committer=u'\u013d\xf3r\xe9m \xcdp\u0161\xfam '
+ u'<test@example.com>')
+ lf = LogCatcher()
+ log.show_log(wt.branch, lf, verbose=True)
+ self.assertEqual(2, len(lf.revisions))
+ # first one is most recent
+ log_entry = lf.revisions[0]
+ self.assertEqual('2', log_entry.revno)
+ self.assertEqual('add one file', log_entry.rev.message)
+ self.checkDelta(log_entry.delta, added=['hello'])
+
+ def test_commit_message_with_control_chars(self):
+ wt = self.make_branch_and_tree('.')
+ msg = u"All 8-bit chars: " + ''.join([unichr(x) for x in range(256)])
+ msg = msg.replace(u'\r', u'\n')
+ wt.commit(msg)
+ lf = LogCatcher()
+ log.show_log(wt.branch, lf, verbose=True)
+ committed_msg = lf.revisions[0].rev.message
+ if wt.branch.repository._serializer.squashes_xml_invalid_characters:
+ self.assertNotEqual(msg, committed_msg)
+ self.assertTrue(len(committed_msg) > len(msg))
+ else:
+ self.assertEqual(msg, committed_msg)
+
+ def test_commit_message_without_control_chars(self):
+ wt = self.make_branch_and_tree('.')
+ # escaped. As ElementTree apparently does some kind of
+ # newline conversion, neither LF (\x0A) nor CR (\x0D) are
+ # included in the test commit message, even though they are
+ # valid XML 1.0 characters.
+ msg = "\x09" + ''.join([unichr(x) for x in range(0x20, 256)])
+ wt.commit(msg)
+ lf = LogCatcher()
+ log.show_log(wt.branch, lf, verbose=True)
+ committed_msg = lf.revisions[0].rev.message
+ self.assertEqual(msg, committed_msg)
+
+ def test_deltas_in_merge_revisions(self):
+ """Check deltas created for both mainline and merge revisions"""
+ wt = self.make_branch_and_tree('parent')
+ self.build_tree(['parent/file1', 'parent/file2', 'parent/file3'])
+ wt.add('file1')
+ wt.add('file2')
+ wt.commit(message='add file1 and file2')
+ self.run_bzr('branch parent child')
+ os.unlink('child/file1')
+ with file('child/file2', 'wb') as f: f.write('hello\n')
+ self.run_bzr(['commit', '-m', 'remove file1 and modify file2',
+ 'child'])
+ os.chdir('parent')
+ self.run_bzr('merge ../child')
+ wt.commit('merge child branch')
+ os.chdir('..')
+ b = wt.branch
+ lf = LogCatcher()
+ lf.supports_merge_revisions = True
+ log.show_log(b, lf, verbose=True)
+
+ revs = lf.revisions
+ self.assertEqual(3, len(revs))
+
+ logentry = revs[0]
+ self.assertEqual('2', logentry.revno)
+ self.assertEqual('merge child branch', logentry.rev.message)
+ self.checkDelta(logentry.delta, removed=['file1'], modified=['file2'])
+
+ logentry = revs[1]
+ self.assertEqual('1.1.1', logentry.revno)
+ self.assertEqual('remove file1 and modify file2', logentry.rev.message)
+ self.checkDelta(logentry.delta, removed=['file1'], modified=['file2'])
+
+ logentry = revs[2]
+ self.assertEqual('1', logentry.revno)
+ self.assertEqual('add file1 and file2', logentry.rev.message)
+ self.checkDelta(logentry.delta, added=['file1', 'file2'])
+
+
+class TestShortLogFormatter(TestCaseForLogFormatter):
+
+ def test_trailing_newlines(self):
+ wt = self.make_branch_and_tree('.')
+ b = self.make_commits_with_trailing_newlines(wt)
+ self.assertFormatterResult("""\
+ 3 Joe Foo\t2005-11-22
+ single line with trailing newline
+
+ 2 Joe Foo\t2005-11-22
+ multiline
+ log
+ message
+
+ 1 Joe Foo\t2005-11-22
+ simple log message
+
+""",
+ b, log.ShortLogFormatter)
+
+ def test_short_log_with_merges(self):
+ wt = self._prepare_tree_with_merges()
+ self.assertFormatterResult("""\
+ 2 Joe Foo\t2005-11-22 [merge]
+ rev-2
+
+ 1 Joe Foo\t2005-11-22
+ rev-1
+
+""",
+ wt.branch, log.ShortLogFormatter)
+
+ def test_short_log_with_merges_and_advice(self):
+ wt = self._prepare_tree_with_merges()
+ self.assertFormatterResult("""\
+ 2 Joe Foo\t2005-11-22 [merge]
+ rev-2
+
+ 1 Joe Foo\t2005-11-22
+ rev-1
+
+Use --include-merged or -n0 to see merged revisions.
+""",
+ wt.branch, log.ShortLogFormatter,
+ formatter_kwargs=dict(show_advice=True))
+
+ def test_short_log_with_merges_and_range(self):
+ wt = self._prepare_tree_with_merges()
+ self.wt_commit(wt, 'rev-3a', rev_id='rev-3a')
+ wt.branch.set_last_revision_info(2, 'rev-2b')
+ wt.set_parent_ids(['rev-2b', 'rev-3a'])
+ self.wt_commit(wt, 'rev-3b', rev_id='rev-3b')
+ self.assertFormatterResult("""\
+ 3 Joe Foo\t2005-11-22 [merge]
+ rev-3b
+
+ 2 Joe Foo\t2005-11-22 [merge]
+ rev-2
+
+""",
+ wt.branch, log.ShortLogFormatter,
+ show_log_kwargs=dict(start_revision=2, end_revision=3))
+
+ def test_short_log_with_tags(self):
+ wt = self._prepare_tree_with_merges(with_tags=True)
+ self.assertFormatterResult("""\
+ 3 Joe Foo\t2005-11-22 {v1.0, v1.0rc1}
+ rev-3
+
+ 2 Joe Foo\t2005-11-22 {v0.2} [merge]
+ rev-2
+
+ 1 Joe Foo\t2005-11-22
+ rev-1
+
+""",
+ wt.branch, log.ShortLogFormatter)
+
+ def test_short_log_single_merge_revision(self):
+ wt = self._prepare_tree_with_merges()
+ revspec = revisionspec.RevisionSpec.from_string('1.1.1')
+ rev = revspec.in_history(wt.branch)
+ self.assertFormatterResult("""\
+ 1.1.1 Joe Foo\t2005-11-22
+ rev-merged
+
+""",
+ wt.branch, log.ShortLogFormatter,
+ show_log_kwargs=dict(start_revision=rev, end_revision=rev))
+
+ def test_show_ids(self):
+ wt = self.make_branch_and_tree('parent')
+ self.build_tree(['parent/f1', 'parent/f2'])
+ wt.add(['f1','f2'])
+ self.wt_commit(wt, 'first post', rev_id='a')
+ child_wt = wt.bzrdir.sprout('child').open_workingtree()
+ self.wt_commit(child_wt, 'branch 1 changes', rev_id='b')
+ wt.merge_from_branch(child_wt.branch)
+ self.wt_commit(wt, 'merge branch 1', rev_id='c')
+ self.assertFormatterResult("""\
+ 2 Joe Foo\t2005-11-22 [merge]
+ revision-id:c
+ merge branch 1
+
+ 1.1.1 Joe Foo\t2005-11-22
+ revision-id:b
+ branch 1 changes
+
+ 1 Joe Foo\t2005-11-22
+ revision-id:a
+ first post
+
+""",
+ wt.branch, log.ShortLogFormatter,
+ formatter_kwargs=dict(levels=0,show_ids=True))
+
+
+class TestShortLogFormatterWithMergeRevisions(TestCaseForLogFormatter):
+
+ def test_short_merge_revs_log_with_merges(self):
+ wt = self._prepare_tree_with_merges()
+ # Note that the 1.1.1 indenting is in fact correct given that
+ # the revision numbers are right justified within 5 characters
+ # for mainline revnos and 9 characters for dotted revnos.
+ self.assertFormatterResult("""\
+ 2 Joe Foo\t2005-11-22 [merge]
+ rev-2
+
+ 1.1.1 Joe Foo\t2005-11-22
+ rev-merged
+
+ 1 Joe Foo\t2005-11-22
+ rev-1
+
+""",
+ wt.branch, log.ShortLogFormatter,
+ formatter_kwargs=dict(levels=0))
+
+ def test_short_merge_revs_log_single_merge_revision(self):
+ wt = self._prepare_tree_with_merges()
+ revspec = revisionspec.RevisionSpec.from_string('1.1.1')
+ rev = revspec.in_history(wt.branch)
+ self.assertFormatterResult("""\
+ 1.1.1 Joe Foo\t2005-11-22
+ rev-merged
+
+""",
+ wt.branch, log.ShortLogFormatter,
+ formatter_kwargs=dict(levels=0),
+ show_log_kwargs=dict(start_revision=rev, end_revision=rev))
+
+
+class TestLongLogFormatter(TestCaseForLogFormatter):
+
+ def test_verbose_log(self):
+ """Verbose log includes changed files
+
+ bug #4676
+ """
+ wt = self.make_standard_commit('test_verbose_log', authors=[])
+ self.assertFormatterResult('''\
+------------------------------------------------------------
+revno: 1
+committer: Lorem Ipsum <test@example.com>
+branch nick: test_verbose_log
+timestamp: Tue 2005-11-22 00:00:00 +0000
+message:
+ add a
+added:
+ a
+''',
+ wt.branch, log.LongLogFormatter,
+ show_log_kwargs=dict(verbose=True))
+
+ def test_merges_are_indented_by_level(self):
+ wt = self.make_branch_and_tree('parent')
+ self.wt_commit(wt, 'first post')
+ child_wt = wt.bzrdir.sprout('child').open_workingtree()
+ self.wt_commit(child_wt, 'branch 1')
+ smallerchild_wt = wt.bzrdir.sprout('smallerchild').open_workingtree()
+ self.wt_commit(smallerchild_wt, 'branch 2')
+ child_wt.merge_from_branch(smallerchild_wt.branch)
+ self.wt_commit(child_wt, 'merge branch 2')
+ wt.merge_from_branch(child_wt.branch)
+ self.wt_commit(wt, 'merge branch 1')
+ self.assertFormatterResult("""\
+------------------------------------------------------------
+revno: 2 [merge]
+committer: Joe Foo <joe@foo.com>
+branch nick: parent
+timestamp: Tue 2005-11-22 00:00:04 +0000
+message:
+ merge branch 1
+ ------------------------------------------------------------
+ revno: 1.1.2 [merge]
+ committer: Joe Foo <joe@foo.com>
+ branch nick: child
+ timestamp: Tue 2005-11-22 00:00:03 +0000
+ message:
+ merge branch 2
+ ------------------------------------------------------------
+ revno: 1.2.1
+ committer: Joe Foo <joe@foo.com>
+ branch nick: smallerchild
+ timestamp: Tue 2005-11-22 00:00:02 +0000
+ message:
+ branch 2
+ ------------------------------------------------------------
+ revno: 1.1.1
+ committer: Joe Foo <joe@foo.com>
+ branch nick: child
+ timestamp: Tue 2005-11-22 00:00:01 +0000
+ message:
+ branch 1
+------------------------------------------------------------
+revno: 1
+committer: Joe Foo <joe@foo.com>
+branch nick: parent
+timestamp: Tue 2005-11-22 00:00:00 +0000
+message:
+ first post
+""",
+ wt.branch, log.LongLogFormatter,
+ formatter_kwargs=dict(levels=0),
+ show_log_kwargs=dict(verbose=True))
+
+ def test_verbose_merge_revisions_contain_deltas(self):
+ wt = self.make_branch_and_tree('parent')
+ self.build_tree(['parent/f1', 'parent/f2'])
+ wt.add(['f1','f2'])
+ self.wt_commit(wt, 'first post')
+ child_wt = wt.bzrdir.sprout('child').open_workingtree()
+ os.unlink('child/f1')
+ self.build_tree_contents([('child/f2', 'hello\n')])
+ self.wt_commit(child_wt, 'removed f1 and modified f2')
+ wt.merge_from_branch(child_wt.branch)
+ self.wt_commit(wt, 'merge branch 1')
+ self.assertFormatterResult("""\
+------------------------------------------------------------
+revno: 2 [merge]
+committer: Joe Foo <joe@foo.com>
+branch nick: parent
+timestamp: Tue 2005-11-22 00:00:02 +0000
+message:
+ merge branch 1
+removed:
+ f1
+modified:
+ f2
+ ------------------------------------------------------------
+ revno: 1.1.1
+ committer: Joe Foo <joe@foo.com>
+ branch nick: child
+ timestamp: Tue 2005-11-22 00:00:01 +0000
+ message:
+ removed f1 and modified f2
+ removed:
+ f1
+ modified:
+ f2
+------------------------------------------------------------
+revno: 1
+committer: Joe Foo <joe@foo.com>
+branch nick: parent
+timestamp: Tue 2005-11-22 00:00:00 +0000
+message:
+ first post
+added:
+ f1
+ f2
+""",
+ wt.branch, log.LongLogFormatter,
+ formatter_kwargs=dict(levels=0),
+ show_log_kwargs=dict(verbose=True))
+
+ def test_trailing_newlines(self):
+ wt = self.make_branch_and_tree('.')
+ b = self.make_commits_with_trailing_newlines(wt)
+ self.assertFormatterResult("""\
+------------------------------------------------------------
+revno: 3
+committer: Joe Foo <joe@foo.com>
+branch nick: test
+timestamp: Tue 2005-11-22 00:00:02 +0000
+message:
+ single line with trailing newline
+------------------------------------------------------------
+revno: 2
+committer: Joe Foo <joe@foo.com>
+branch nick: test
+timestamp: Tue 2005-11-22 00:00:01 +0000
+message:
+ multiline
+ log
+ message
+------------------------------------------------------------
+revno: 1
+committer: Joe Foo <joe@foo.com>
+branch nick: test
+timestamp: Tue 2005-11-22 00:00:00 +0000
+message:
+ simple log message
+""",
+ b, log.LongLogFormatter)
+
+ def test_author_in_log(self):
+ """Log includes the author name if it's set in
+ the revision properties
+ """
+ wt = self.make_standard_commit('test_author_log',
+ authors=['John Doe <jdoe@example.com>',
+ 'Jane Rey <jrey@example.com>'])
+ self.assertFormatterResult("""\
+------------------------------------------------------------
+revno: 1
+author: John Doe <jdoe@example.com>, Jane Rey <jrey@example.com>
+committer: Lorem Ipsum <test@example.com>
+branch nick: test_author_log
+timestamp: Tue 2005-11-22 00:00:00 +0000
+message:
+ add a
+""",
+ wt.branch, log.LongLogFormatter)
+
+ def test_properties_in_log(self):
+ """Log includes the custom properties returned by the registered
+ handlers.
+ """
+ wt = self.make_standard_commit('test_properties_in_log')
+ def trivial_custom_prop_handler(revision):
+ return {'test_prop':'test_value'}
+
+ # Cleaned up in setUp()
+ log.properties_handler_registry.register(
+ 'trivial_custom_prop_handler',
+ trivial_custom_prop_handler)
+ self.assertFormatterResult("""\
+------------------------------------------------------------
+revno: 1
+test_prop: test_value
+author: John Doe <jdoe@example.com>
+committer: Lorem Ipsum <test@example.com>
+branch nick: test_properties_in_log
+timestamp: Tue 2005-11-22 00:00:00 +0000
+message:
+ add a
+""",
+ wt.branch, log.LongLogFormatter)
+
+ def test_properties_in_short_log(self):
+ """Log includes the custom properties returned by the registered
+ handlers.
+ """
+ wt = self.make_standard_commit('test_properties_in_short_log')
+ def trivial_custom_prop_handler(revision):
+ return {'test_prop':'test_value'}
+
+ log.properties_handler_registry.register(
+ 'trivial_custom_prop_handler',
+ trivial_custom_prop_handler)
+ self.assertFormatterResult("""\
+ 1 John Doe\t2005-11-22
+ test_prop: test_value
+ add a
+
+""",
+ wt.branch, log.ShortLogFormatter)
+
+ def test_error_in_properties_handler(self):
+ """Log includes the custom properties returned by the registered
+ handlers.
+ """
+ wt = self.make_standard_commit('error_in_properties_handler',
+ revprops={'first_prop':'first_value'})
+ sio = self.make_utf8_encoded_stringio()
+ formatter = log.LongLogFormatter(to_file=sio)
+ def trivial_custom_prop_handler(revision):
+ raise StandardError("a test error")
+
+ log.properties_handler_registry.register(
+ 'trivial_custom_prop_handler',
+ trivial_custom_prop_handler)
+ self.assertRaises(StandardError, log.show_log, wt.branch, formatter,)
+
+ def test_properties_handler_bad_argument(self):
+ wt = self.make_standard_commit('bad_argument',
+ revprops={'a_prop':'test_value'})
+ sio = self.make_utf8_encoded_stringio()
+ formatter = log.LongLogFormatter(to_file=sio)
+ def bad_argument_prop_handler(revision):
+ return {'custom_prop_name':revision.properties['a_prop']}
+
+ log.properties_handler_registry.register(
+ 'bad_argument_prop_handler',
+ bad_argument_prop_handler)
+
+ self.assertRaises(AttributeError, formatter.show_properties,
+ 'a revision', '')
+
+ revision = wt.branch.repository.get_revision(wt.branch.last_revision())
+ formatter.show_properties(revision, '')
+ self.assertEqualDiff('''custom_prop_name: test_value\n''',
+ sio.getvalue())
+
+ def test_show_ids(self):
+ wt = self.make_branch_and_tree('parent')
+ self.build_tree(['parent/f1', 'parent/f2'])
+ wt.add(['f1','f2'])
+ self.wt_commit(wt, 'first post', rev_id='a')
+ child_wt = wt.bzrdir.sprout('child').open_workingtree()
+ self.wt_commit(child_wt, 'branch 1 changes', rev_id='b')
+ wt.merge_from_branch(child_wt.branch)
+ self.wt_commit(wt, 'merge branch 1', rev_id='c')
+ self.assertFormatterResult("""\
+------------------------------------------------------------
+revno: 2 [merge]
+revision-id: c
+parent: a
+parent: b
+committer: Joe Foo <joe@foo.com>
+branch nick: parent
+timestamp: Tue 2005-11-22 00:00:02 +0000
+message:
+ merge branch 1
+ ------------------------------------------------------------
+ revno: 1.1.1
+ revision-id: b
+ parent: a
+ committer: Joe Foo <joe@foo.com>
+ branch nick: child
+ timestamp: Tue 2005-11-22 00:00:01 +0000
+ message:
+ branch 1 changes
+------------------------------------------------------------
+revno: 1
+revision-id: a
+committer: Joe Foo <joe@foo.com>
+branch nick: parent
+timestamp: Tue 2005-11-22 00:00:00 +0000
+message:
+ first post
+""",
+ wt.branch, log.LongLogFormatter,
+ formatter_kwargs=dict(levels=0,show_ids=True))
+
+
+class TestLongLogFormatterWithoutMergeRevisions(TestCaseForLogFormatter):
+
+ def test_long_verbose_log(self):
+ """Verbose log includes changed files
+
+ bug #4676
+ """
+ wt = self.make_standard_commit('test_long_verbose_log', authors=[])
+ self.assertFormatterResult("""\
+------------------------------------------------------------
+revno: 1
+committer: Lorem Ipsum <test@example.com>
+branch nick: test_long_verbose_log
+timestamp: Tue 2005-11-22 00:00:00 +0000
+message:
+ add a
+added:
+ a
+""",
+ wt.branch, log.LongLogFormatter,
+ formatter_kwargs=dict(levels=1),
+ show_log_kwargs=dict(verbose=True))
+
+ def test_long_verbose_contain_deltas(self):
+ wt = self.make_branch_and_tree('parent')
+ self.build_tree(['parent/f1', 'parent/f2'])
+ wt.add(['f1','f2'])
+ self.wt_commit(wt, 'first post')
+ child_wt = wt.bzrdir.sprout('child').open_workingtree()
+ os.unlink('child/f1')
+ self.build_tree_contents([('child/f2', 'hello\n')])
+ self.wt_commit(child_wt, 'removed f1 and modified f2')
+ wt.merge_from_branch(child_wt.branch)
+ self.wt_commit(wt, 'merge branch 1')
+ self.assertFormatterResult("""\
+------------------------------------------------------------
+revno: 2 [merge]
+committer: Joe Foo <joe@foo.com>
+branch nick: parent
+timestamp: Tue 2005-11-22 00:00:02 +0000
+message:
+ merge branch 1
+removed:
+ f1
+modified:
+ f2
+------------------------------------------------------------
+revno: 1
+committer: Joe Foo <joe@foo.com>
+branch nick: parent
+timestamp: Tue 2005-11-22 00:00:00 +0000
+message:
+ first post
+added:
+ f1
+ f2
+""",
+ wt.branch, log.LongLogFormatter,
+ formatter_kwargs=dict(levels=1),
+ show_log_kwargs=dict(verbose=True))
+
+ def test_long_trailing_newlines(self):
+ wt = self.make_branch_and_tree('.')
+ b = self.make_commits_with_trailing_newlines(wt)
+ self.assertFormatterResult("""\
+------------------------------------------------------------
+revno: 3
+committer: Joe Foo <joe@foo.com>
+branch nick: test
+timestamp: Tue 2005-11-22 00:00:02 +0000
+message:
+ single line with trailing newline
+------------------------------------------------------------
+revno: 2
+committer: Joe Foo <joe@foo.com>
+branch nick: test
+timestamp: Tue 2005-11-22 00:00:01 +0000
+message:
+ multiline
+ log
+ message
+------------------------------------------------------------
+revno: 1
+committer: Joe Foo <joe@foo.com>
+branch nick: test
+timestamp: Tue 2005-11-22 00:00:00 +0000
+message:
+ simple log message
+""",
+ b, log.LongLogFormatter,
+ formatter_kwargs=dict(levels=1))
+
+ def test_long_author_in_log(self):
+ """Log includes the author name if it's set in
+ the revision properties
+ """
+ wt = self.make_standard_commit('test_author_log')
+ self.assertFormatterResult("""\
+------------------------------------------------------------
+revno: 1
+author: John Doe <jdoe@example.com>
+committer: Lorem Ipsum <test@example.com>
+branch nick: test_author_log
+timestamp: Tue 2005-11-22 00:00:00 +0000
+message:
+ add a
+""",
+ wt.branch, log.LongLogFormatter,
+ formatter_kwargs=dict(levels=1))
+
+ def test_long_properties_in_log(self):
+ """Log includes the custom properties returned by the registered
+ handlers.
+ """
+ wt = self.make_standard_commit('test_properties_in_log')
+ def trivial_custom_prop_handler(revision):
+ return {'test_prop':'test_value'}
+
+ log.properties_handler_registry.register(
+ 'trivial_custom_prop_handler',
+ trivial_custom_prop_handler)
+ self.assertFormatterResult("""\
+------------------------------------------------------------
+revno: 1
+test_prop: test_value
+author: John Doe <jdoe@example.com>
+committer: Lorem Ipsum <test@example.com>
+branch nick: test_properties_in_log
+timestamp: Tue 2005-11-22 00:00:00 +0000
+message:
+ add a
+""",
+ wt.branch, log.LongLogFormatter,
+ formatter_kwargs=dict(levels=1))
+
+
+class TestLineLogFormatter(TestCaseForLogFormatter):
+
+ def test_line_log(self):
+ """Line log should show revno
+
+ bug #5162
+ """
+ wt = self.make_standard_commit('test-line-log',
+ committer='Line-Log-Formatter Tester <test@line.log>',
+ authors=[])
+ self.assertFormatterResult("""\
+1: Line-Log-Formatte... 2005-11-22 add a
+""",
+ wt.branch, log.LineLogFormatter)
+
+ def test_trailing_newlines(self):
+ wt = self.make_branch_and_tree('.')
+ b = self.make_commits_with_trailing_newlines(wt)
+ self.assertFormatterResult("""\
+3: Joe Foo 2005-11-22 single line with trailing newline
+2: Joe Foo 2005-11-22 multiline
+1: Joe Foo 2005-11-22 simple log message
+""",
+ b, log.LineLogFormatter)
+
+ def test_line_log_single_merge_revision(self):
+ wt = self._prepare_tree_with_merges()
+ revspec = revisionspec.RevisionSpec.from_string('1.1.1')
+ rev = revspec.in_history(wt.branch)
+ self.assertFormatterResult("""\
+1.1.1: Joe Foo 2005-11-22 rev-merged
+""",
+ wt.branch, log.LineLogFormatter,
+ show_log_kwargs=dict(start_revision=rev, end_revision=rev))
+
+ def test_line_log_with_tags(self):
+ wt = self._prepare_tree_with_merges(with_tags=True)
+ self.assertFormatterResult("""\
+3: Joe Foo 2005-11-22 {v1.0, v1.0rc1} rev-3
+2: Joe Foo 2005-11-22 [merge] {v0.2} rev-2
+1: Joe Foo 2005-11-22 rev-1
+""",
+ wt.branch, log.LineLogFormatter)
+
+
+class TestLineLogFormatterWithMergeRevisions(TestCaseForLogFormatter):
+
+ def test_line_merge_revs_log(self):
+ """Line log should show revno
+
+ bug #5162
+ """
+ wt = self.make_standard_commit('test-line-log',
+ committer='Line-Log-Formatter Tester <test@line.log>',
+ authors=[])
+ self.assertFormatterResult("""\
+1: Line-Log-Formatte... 2005-11-22 add a
+""",
+ wt.branch, log.LineLogFormatter)
+
+ def test_line_merge_revs_log_single_merge_revision(self):
+ wt = self._prepare_tree_with_merges()
+ revspec = revisionspec.RevisionSpec.from_string('1.1.1')
+ rev = revspec.in_history(wt.branch)
+ self.assertFormatterResult("""\
+1.1.1: Joe Foo 2005-11-22 rev-merged
+""",
+ wt.branch, log.LineLogFormatter,
+ formatter_kwargs=dict(levels=0),
+ show_log_kwargs=dict(start_revision=rev, end_revision=rev))
+
+ def test_line_merge_revs_log_with_merges(self):
+ wt = self._prepare_tree_with_merges()
+ self.assertFormatterResult("""\
+2: Joe Foo 2005-11-22 [merge] rev-2
+ 1.1.1: Joe Foo 2005-11-22 rev-merged
+1: Joe Foo 2005-11-22 rev-1
+""",
+ wt.branch, log.LineLogFormatter,
+ formatter_kwargs=dict(levels=0))
+
+
+class TestGnuChangelogFormatter(TestCaseForLogFormatter):
+
+ def test_gnu_changelog(self):
+ wt = self.make_standard_commit('nicky', authors=[])
+ self.assertFormatterResult('''\
+2005-11-22 Lorem Ipsum <test@example.com>
+
+\tadd a
+
+''',
+ wt.branch, log.GnuChangelogLogFormatter)
+
+ def test_with_authors(self):
+ wt = self.make_standard_commit('nicky',
+ authors=['Fooa Fooz <foo@example.com>',
+ 'Bari Baro <bar@example.com>'])
+ self.assertFormatterResult('''\
+2005-11-22 Fooa Fooz <foo@example.com>
+
+\tadd a
+
+''',
+ wt.branch, log.GnuChangelogLogFormatter)
+
+ def test_verbose(self):
+ wt = self.make_standard_commit('nicky')
+ self.assertFormatterResult('''\
+2005-11-22 John Doe <jdoe@example.com>
+
+\t* a:
+
+\tadd a
+
+''',
+ wt.branch, log.GnuChangelogLogFormatter,
+ show_log_kwargs=dict(verbose=True))
+
+
+class TestShowChangedRevisions(tests.TestCaseWithTransport):
+
+ def test_show_changed_revisions_verbose(self):
+ tree = self.make_branch_and_tree('tree_a')
+ self.build_tree(['tree_a/foo'])
+ tree.add('foo')
+ tree.commit('bar', rev_id='bar-id')
+ s = self.make_utf8_encoded_stringio()
+ log.show_changed_revisions(tree.branch, [], ['bar-id'], s)
+ self.assertContainsRe(s.getvalue(), 'bar')
+ self.assertNotContainsRe(s.getvalue(), 'foo')
+
+
+class TestLogFormatter(tests.TestCase):
+
+ def setUp(self):
+ super(TestLogFormatter, self).setUp()
+ self.rev = revision.Revision('a-id')
+ self.lf = log.LogFormatter(None)
+
+ def test_short_committer(self):
+ def assertCommitter(expected, committer):
+ self.rev.committer = committer
+ self.assertEqual(expected, self.lf.short_committer(self.rev))
+
+ assertCommitter('John Doe', 'John Doe <jdoe@example.com>')
+ assertCommitter('John Smith', 'John Smith <jsmith@example.com>')
+ assertCommitter('John Smith', 'John Smith')
+ assertCommitter('jsmith@example.com', 'jsmith@example.com')
+ assertCommitter('jsmith@example.com', '<jsmith@example.com>')
+ assertCommitter('John Smith', 'John Smith jsmith@example.com')
+
+ def test_short_author(self):
+ def assertAuthor(expected, author):
+ self.rev.properties['author'] = author
+ self.assertEqual(expected, self.lf.short_author(self.rev))
+
+ assertAuthor('John Smith', 'John Smith <jsmith@example.com>')
+ assertAuthor('John Smith', 'John Smith')
+ assertAuthor('jsmith@example.com', 'jsmith@example.com')
+ assertAuthor('jsmith@example.com', '<jsmith@example.com>')
+ assertAuthor('John Smith', 'John Smith jsmith@example.com')
+
+ def test_short_author_from_committer(self):
+ self.rev.committer = 'John Doe <jdoe@example.com>'
+ self.assertEqual('John Doe', self.lf.short_author(self.rev))
+
+ def test_short_author_from_authors(self):
+ self.rev.properties['authors'] = ('John Smith <jsmith@example.com>\n'
+ 'Jane Rey <jrey@example.com>')
+ self.assertEqual('John Smith', self.lf.short_author(self.rev))
+
+
+class TestReverseByDepth(tests.TestCase):
+ """Test reverse_by_depth behavior.
+
+ This is used to present revisions in forward (oldest first) order in a nice
+ layout.
+
+ The tests use lighter revision description to ease reading.
+ """
+
+ def assertReversed(self, forward, backward):
+ # Transform the descriptions to suit the API: tests use (revno, depth),
+ # while the API expects (revid, revno, depth)
+ def complete_revisions(l):
+ """Transform the description to suit the API.
+
+ Tests use (revno, depth) whil the API expects (revid, revno, depth).
+ Since the revid is arbitrary, we just duplicate revno
+ """
+ return [ (r, r, d) for r, d in l]
+ forward = complete_revisions(forward)
+ backward= complete_revisions(backward)
+ self.assertEqual(forward, log.reverse_by_depth(backward))
+
+
+ def test_mainline_revisions(self):
+ self.assertReversed([( '1', 0), ('2', 0)],
+ [('2', 0), ('1', 0)])
+
+ def test_merged_revisions(self):
+ self.assertReversed([('1', 0), ('2', 0), ('2.2', 1), ('2.1', 1),],
+ [('2', 0), ('2.1', 1), ('2.2', 1), ('1', 0),])
+ def test_shifted_merged_revisions(self):
+ """Test irregular layout.
+
+ Requesting revisions touching a file can produce "holes" in the depths.
+ """
+ self.assertReversed([('1', 0), ('2', 0), ('1.1', 2), ('1.2', 2),],
+ [('2', 0), ('1.2', 2), ('1.1', 2), ('1', 0),])
+
+ def test_merged_without_child_revisions(self):
+ """Test irregular layout.
+
+ Revision ranges can produce "holes" in the depths.
+ """
+ # When a revision of higher depth doesn't follow one of lower depth, we
+ # assume a lower depth one is virtually there
+ self.assertReversed([('1', 2), ('2', 2), ('3', 3), ('4', 4)],
+ [('4', 4), ('3', 3), ('2', 2), ('1', 2),])
+ # So we get the same order after reversing below even if the original
+ # revisions are not in the same order.
+ self.assertReversed([('1', 2), ('2', 2), ('3', 3), ('4', 4)],
+ [('3', 3), ('4', 4), ('2', 2), ('1', 2),])
+
+
+class TestHistoryChange(tests.TestCaseWithTransport):
+
+ def setup_a_tree(self):
+ tree = self.make_branch_and_tree('tree')
+ tree.lock_write()
+ self.addCleanup(tree.unlock)
+ tree.commit('1a', rev_id='1a')
+ tree.commit('2a', rev_id='2a')
+ tree.commit('3a', rev_id='3a')
+ return tree
+
+ def setup_ab_tree(self):
+ tree = self.setup_a_tree()
+ tree.set_last_revision('1a')
+ tree.branch.set_last_revision_info(1, '1a')
+ tree.commit('2b', rev_id='2b')
+ tree.commit('3b', rev_id='3b')
+ return tree
+
+ def setup_ac_tree(self):
+ tree = self.setup_a_tree()
+ tree.set_last_revision(revision.NULL_REVISION)
+ tree.branch.set_last_revision_info(0, revision.NULL_REVISION)
+ tree.commit('1c', rev_id='1c')
+ tree.commit('2c', rev_id='2c')
+ tree.commit('3c', rev_id='3c')
+ return tree
+
+ def test_all_new(self):
+ tree = self.setup_ab_tree()
+ old, new = log.get_history_change('1a', '3a', tree.branch.repository)
+ self.assertEqual([], old)
+ self.assertEqual(['2a', '3a'], new)
+
+ def test_all_old(self):
+ tree = self.setup_ab_tree()
+ old, new = log.get_history_change('3a', '1a', tree.branch.repository)
+ self.assertEqual([], new)
+ self.assertEqual(['2a', '3a'], old)
+
+ def test_null_old(self):
+ tree = self.setup_ab_tree()
+ old, new = log.get_history_change(revision.NULL_REVISION,
+ '3a', tree.branch.repository)
+ self.assertEqual([], old)
+ self.assertEqual(['1a', '2a', '3a'], new)
+
+ def test_null_new(self):
+ tree = self.setup_ab_tree()
+ old, new = log.get_history_change('3a', revision.NULL_REVISION,
+ tree.branch.repository)
+ self.assertEqual([], new)
+ self.assertEqual(['1a', '2a', '3a'], old)
+
+ def test_diverged(self):
+ tree = self.setup_ab_tree()
+ old, new = log.get_history_change('3a', '3b', tree.branch.repository)
+ self.assertEqual(old, ['2a', '3a'])
+ self.assertEqual(new, ['2b', '3b'])
+
+ def test_unrelated(self):
+ tree = self.setup_ac_tree()
+ old, new = log.get_history_change('3a', '3c', tree.branch.repository)
+ self.assertEqual(old, ['1a', '2a', '3a'])
+ self.assertEqual(new, ['1c', '2c', '3c'])
+
+ def test_show_branch_change(self):
+ tree = self.setup_ab_tree()
+ s = StringIO()
+ log.show_branch_change(tree.branch, s, 3, '3a')
+ self.assertContainsRe(s.getvalue(),
+ '[*]{60}\nRemoved Revisions:\n(.|\n)*2a(.|\n)*3a(.|\n)*'
+ '[*]{60}\n\nAdded Revisions:\n(.|\n)*2b(.|\n)*3b')
+
+ def test_show_branch_change_no_change(self):
+ tree = self.setup_ab_tree()
+ s = StringIO()
+ log.show_branch_change(tree.branch, s, 3, '3b')
+ self.assertEqual(s.getvalue(),
+ 'Nothing seems to have changed\n')
+
+ def test_show_branch_change_no_old(self):
+ tree = self.setup_ab_tree()
+ s = StringIO()
+ log.show_branch_change(tree.branch, s, 2, '2b')
+ self.assertContainsRe(s.getvalue(), 'Added Revisions:')
+ self.assertNotContainsRe(s.getvalue(), 'Removed Revisions:')
+
+ def test_show_branch_change_no_new(self):
+ tree = self.setup_ab_tree()
+ tree.branch.set_last_revision_info(2, '2b')
+ s = StringIO()
+ log.show_branch_change(tree.branch, s, 3, '3b')
+ self.assertContainsRe(s.getvalue(), 'Removed Revisions:')
+ self.assertNotContainsRe(s.getvalue(), 'Added Revisions:')
+
+
+class TestRevisionNotInBranch(TestCaseForLogFormatter):
+
+ def setup_a_tree(self):
+ tree = self.make_branch_and_tree('tree')
+ tree.lock_write()
+ self.addCleanup(tree.unlock)
+ kwargs = {
+ 'committer': 'Joe Foo <joe@foo.com>',
+ 'timestamp': 1132617600, # Mon 2005-11-22 00:00:00 +0000
+ 'timezone': 0, # UTC
+ }
+ tree.commit('commit 1a', rev_id='1a', **kwargs)
+ tree.commit('commit 2a', rev_id='2a', **kwargs)
+ tree.commit('commit 3a', rev_id='3a', **kwargs)
+ return tree
+
+ def setup_ab_tree(self):
+ tree = self.setup_a_tree()
+ tree.set_last_revision('1a')
+ tree.branch.set_last_revision_info(1, '1a')
+ kwargs = {
+ 'committer': 'Joe Foo <joe@foo.com>',
+ 'timestamp': 1132617600, # Mon 2005-11-22 00:00:00 +0000
+ 'timezone': 0, # UTC
+ }
+ tree.commit('commit 2b', rev_id='2b', **kwargs)
+ tree.commit('commit 3b', rev_id='3b', **kwargs)
+ return tree
+
+ def test_one_revision(self):
+ tree = self.setup_ab_tree()
+ lf = LogCatcher()
+ rev = revisionspec.RevisionInfo(tree.branch, None, '3a')
+ log.show_log(tree.branch, lf, verbose=True, start_revision=rev,
+ end_revision=rev)
+ self.assertEqual(1, len(lf.revisions))
+ self.assertEqual(None, lf.revisions[0].revno) # Out-of-branch
+ self.assertEqual('3a', lf.revisions[0].rev.revision_id)
+
+ def test_many_revisions(self):
+ tree = self.setup_ab_tree()
+ lf = LogCatcher()
+ start_rev = revisionspec.RevisionInfo(tree.branch, None, '1a')
+ end_rev = revisionspec.RevisionInfo(tree.branch, None, '3a')
+ log.show_log(tree.branch, lf, verbose=True, start_revision=start_rev,
+ end_revision=end_rev)
+ self.assertEqual(3, len(lf.revisions))
+ self.assertEqual(None, lf.revisions[0].revno) # Out-of-branch
+ self.assertEqual('3a', lf.revisions[0].rev.revision_id)
+ self.assertEqual(None, lf.revisions[1].revno) # Out-of-branch
+ self.assertEqual('2a', lf.revisions[1].rev.revision_id)
+ self.assertEqual('1', lf.revisions[2].revno) # In-branch
+
+ def test_long_format(self):
+ tree = self.setup_ab_tree()
+ start_rev = revisionspec.RevisionInfo(tree.branch, None, '1a')
+ end_rev = revisionspec.RevisionInfo(tree.branch, None, '3a')
+ self.assertFormatterResult("""\
+------------------------------------------------------------
+revision-id: 3a
+committer: Joe Foo <joe@foo.com>
+branch nick: tree
+timestamp: Tue 2005-11-22 00:00:00 +0000
+message:
+ commit 3a
+------------------------------------------------------------
+revision-id: 2a
+committer: Joe Foo <joe@foo.com>
+branch nick: tree
+timestamp: Tue 2005-11-22 00:00:00 +0000
+message:
+ commit 2a
+------------------------------------------------------------
+revno: 1
+committer: Joe Foo <joe@foo.com>
+branch nick: tree
+timestamp: Tue 2005-11-22 00:00:00 +0000
+message:
+ commit 1a
+""",
+ tree.branch, log.LongLogFormatter, show_log_kwargs={
+ 'start_revision': start_rev, 'end_revision': end_rev
+ })
+
+ def test_short_format(self):
+ tree = self.setup_ab_tree()
+ start_rev = revisionspec.RevisionInfo(tree.branch, None, '1a')
+ end_rev = revisionspec.RevisionInfo(tree.branch, None, '3a')
+ self.assertFormatterResult("""\
+ Joe Foo\t2005-11-22
+ revision-id:3a
+ commit 3a
+
+ Joe Foo\t2005-11-22
+ revision-id:2a
+ commit 2a
+
+ 1 Joe Foo\t2005-11-22
+ commit 1a
+
+""",
+ tree.branch, log.ShortLogFormatter, show_log_kwargs={
+ 'start_revision': start_rev, 'end_revision': end_rev
+ })
+
+ def test_line_format(self):
+ tree = self.setup_ab_tree()
+ start_rev = revisionspec.RevisionInfo(tree.branch, None, '1a')
+ end_rev = revisionspec.RevisionInfo(tree.branch, None, '3a')
+ self.assertFormatterResult("""\
+Joe Foo 2005-11-22 commit 3a
+Joe Foo 2005-11-22 commit 2a
+1: Joe Foo 2005-11-22 commit 1a
+""",
+ tree.branch, log.LineLogFormatter, show_log_kwargs={
+ 'start_revision': start_rev, 'end_revision': end_rev
+ })
+
+
+class TestLogWithBugs(TestCaseForLogFormatter, TestLogMixin):
+
+ def setUp(self):
+ TestCaseForLogFormatter.setUp(self)
+ log.properties_handler_registry.register(
+ 'bugs_properties_handler',
+ log._bugs_properties_handler)
+
+ def make_commits_with_bugs(self):
+ """Helper method for LogFormatter tests"""
+ tree = self.make_branch_and_tree(u'.')
+ self.build_tree(['a', 'b'])
+ tree.add('a')
+ self.wt_commit(tree, 'simple log message', rev_id='a1',
+ revprops={'bugs': 'test://bug/id fixed'})
+ tree.add('b')
+ self.wt_commit(tree, 'multiline\nlog\nmessage\n', rev_id='a2',
+ authors=['Joe Bar <joe@bar.com>'],
+ revprops={'bugs': 'test://bug/id fixed\n'
+ 'test://bug/2 fixed'})
+ return tree
+
+
+ def test_long_bugs(self):
+ tree = self.make_commits_with_bugs()
+ self.assertFormatterResult("""\
+------------------------------------------------------------
+revno: 2
+fixes bugs: test://bug/id test://bug/2
+author: Joe Bar <joe@bar.com>
+committer: Joe Foo <joe@foo.com>
+branch nick: work
+timestamp: Tue 2005-11-22 00:00:01 +0000
+message:
+ multiline
+ log
+ message
+------------------------------------------------------------
+revno: 1
+fixes bug: test://bug/id
+committer: Joe Foo <joe@foo.com>
+branch nick: work
+timestamp: Tue 2005-11-22 00:00:00 +0000
+message:
+ simple log message
+""",
+ tree.branch, log.LongLogFormatter)
+
+ def test_short_bugs(self):
+ tree = self.make_commits_with_bugs()
+ self.assertFormatterResult("""\
+ 2 Joe Bar\t2005-11-22
+ fixes bugs: test://bug/id test://bug/2
+ multiline
+ log
+ message
+
+ 1 Joe Foo\t2005-11-22
+ fixes bug: test://bug/id
+ simple log message
+
+""",
+ tree.branch, log.ShortLogFormatter)
+
+ def test_wrong_bugs_property(self):
+ tree = self.make_branch_and_tree(u'.')
+ self.build_tree(['foo'])
+ self.wt_commit(tree, 'simple log message', rev_id='a1',
+ revprops={'bugs': 'test://bug/id invalid_value'})
+ self.assertFormatterResult("""\
+ 1 Joe Foo\t2005-11-22
+ simple log message
+
+""",
+ tree.branch, log.ShortLogFormatter)
+
+ def test_bugs_handler_present(self):
+ self.properties_handler_registry.get('bugs_properties_handler')
+
+
+class TestLogForAuthors(TestCaseForLogFormatter):
+
+ def setUp(self):
+ TestCaseForLogFormatter.setUp(self)
+ self.wt = self.make_standard_commit('nicky',
+ authors=['John Doe <jdoe@example.com>',
+ 'Jane Rey <jrey@example.com>'])
+
+ def assertFormatterResult(self, formatter, who, result):
+ formatter_kwargs = dict()
+ if who is not None:
+ author_list_handler = log.author_list_registry.get(who)
+ formatter_kwargs['author_list_handler'] = author_list_handler
+ TestCaseForLogFormatter.assertFormatterResult(self, result,
+ self.wt.branch, formatter, formatter_kwargs=formatter_kwargs)
+
+ def test_line_default(self):
+ self.assertFormatterResult(log.LineLogFormatter, None, """\
+1: John Doe 2005-11-22 add a
+""")
+
+ def test_line_committer(self):
+ self.assertFormatterResult(log.LineLogFormatter, 'committer', """\
+1: Lorem Ipsum 2005-11-22 add a
+""")
+
+ def test_line_first(self):
+ self.assertFormatterResult(log.LineLogFormatter, 'first', """\
+1: John Doe 2005-11-22 add a
+""")
+
+ def test_line_all(self):
+ self.assertFormatterResult(log.LineLogFormatter, 'all', """\
+1: John Doe, Jane Rey 2005-11-22 add a
+""")
+
+
+ def test_short_default(self):
+ self.assertFormatterResult(log.ShortLogFormatter, None, """\
+ 1 John Doe\t2005-11-22
+ add a
+
+""")
+
+ def test_short_committer(self):
+ self.assertFormatterResult(log.ShortLogFormatter, 'committer', """\
+ 1 Lorem Ipsum\t2005-11-22
+ add a
+
+""")
+
+ def test_short_first(self):
+ self.assertFormatterResult(log.ShortLogFormatter, 'first', """\
+ 1 John Doe\t2005-11-22
+ add a
+
+""")
+
+ def test_short_all(self):
+ self.assertFormatterResult(log.ShortLogFormatter, 'all', """\
+ 1 John Doe, Jane Rey\t2005-11-22
+ add a
+
+""")
+
+ def test_long_default(self):
+ self.assertFormatterResult(log.LongLogFormatter, None, """\
+------------------------------------------------------------
+revno: 1
+author: John Doe <jdoe@example.com>, Jane Rey <jrey@example.com>
+committer: Lorem Ipsum <test@example.com>
+branch nick: nicky
+timestamp: Tue 2005-11-22 00:00:00 +0000
+message:
+ add a
+""")
+
+ def test_long_committer(self):
+ self.assertFormatterResult(log.LongLogFormatter, 'committer', """\
+------------------------------------------------------------
+revno: 1
+committer: Lorem Ipsum <test@example.com>
+branch nick: nicky
+timestamp: Tue 2005-11-22 00:00:00 +0000
+message:
+ add a
+""")
+
+ def test_long_first(self):
+ self.assertFormatterResult(log.LongLogFormatter, 'first', """\
+------------------------------------------------------------
+revno: 1
+author: John Doe <jdoe@example.com>
+committer: Lorem Ipsum <test@example.com>
+branch nick: nicky
+timestamp: Tue 2005-11-22 00:00:00 +0000
+message:
+ add a
+""")
+
+ def test_long_all(self):
+ self.assertFormatterResult(log.LongLogFormatter, 'all', """\
+------------------------------------------------------------
+revno: 1
+author: John Doe <jdoe@example.com>, Jane Rey <jrey@example.com>
+committer: Lorem Ipsum <test@example.com>
+branch nick: nicky
+timestamp: Tue 2005-11-22 00:00:00 +0000
+message:
+ add a
+""")
+
+ def test_gnu_changelog_default(self):
+ self.assertFormatterResult(log.GnuChangelogLogFormatter, None, """\
+2005-11-22 John Doe <jdoe@example.com>
+
+\tadd a
+
+""")
+
+ def test_gnu_changelog_committer(self):
+ self.assertFormatterResult(log.GnuChangelogLogFormatter, 'committer', """\
+2005-11-22 Lorem Ipsum <test@example.com>
+
+\tadd a
+
+""")
+
+ def test_gnu_changelog_first(self):
+ self.assertFormatterResult(log.GnuChangelogLogFormatter, 'first', """\
+2005-11-22 John Doe <jdoe@example.com>
+
+\tadd a
+
+""")
+
+ def test_gnu_changelog_all(self):
+ self.assertFormatterResult(log.GnuChangelogLogFormatter, 'all', """\
+2005-11-22 John Doe <jdoe@example.com>, Jane Rey <jrey@example.com>
+
+\tadd a
+
+""")
+
+
+class TestLogExcludeAncestry(tests.TestCaseWithTransport):
+
+ def make_branch_with_alternate_ancestries(self, relpath='.'):
+ # See test_merge_sorted_exclude_ancestry below for the difference with
+ # bt.per_branch.test_iter_merge_sorted_revision.
+ # TestIterMergeSortedRevisionsBushyGraph.
+ # make_branch_with_alternate_ancestries
+ # and test_merge_sorted_exclude_ancestry
+ # See the FIXME in assertLogRevnos too.
+ builder = branchbuilder.BranchBuilder(self.get_transport(relpath))
+ # 1
+ # |\
+ # 2 \
+ # | |
+ # | 1.1.1
+ # | | \
+ # | | 1.2.1
+ # | | /
+ # | 1.1.2
+ # | /
+ # 3
+ builder.start_series()
+ builder.build_snapshot('1', None, [
+ ('add', ('', 'TREE_ROOT', 'directory', '')),])
+ builder.build_snapshot('1.1.1', ['1'], [])
+ builder.build_snapshot('2', ['1'], [])
+ builder.build_snapshot('1.2.1', ['1.1.1'], [])
+ builder.build_snapshot('1.1.2', ['1.1.1', '1.2.1'], [])
+ builder.build_snapshot('3', ['2', '1.1.2'], [])
+ builder.finish_series()
+ br = builder.get_branch()
+ br.lock_read()
+ self.addCleanup(br.unlock)
+ return br
+
+ def assertLogRevnos(self, expected_revnos, b, start, end,
+ exclude_common_ancestry, generate_merge_revisions=True):
+ # FIXME: the layering in log makes it hard to test intermediate levels,
+ # I wish adding filters with their parameters was easier...
+ # -- vila 20100413
+ iter_revs = log._calc_view_revisions(
+ b, start, end, direction='reverse',
+ generate_merge_revisions=generate_merge_revisions,
+ exclude_common_ancestry=exclude_common_ancestry)
+ self.assertEqual(expected_revnos,
+ [revid for revid, revno, depth in iter_revs])
+
+ def test_merge_sorted_exclude_ancestry(self):
+ b = self.make_branch_with_alternate_ancestries()
+ self.assertLogRevnos(['3', '1.1.2', '1.2.1', '1.1.1', '2', '1'],
+ b, '1', '3', exclude_common_ancestry=False)
+ # '2' is part of the '3' ancestry but not part of '1.1.1' ancestry so
+ # it should be mentioned even if merge_sort order will make it appear
+ # after 1.1.1
+ self.assertLogRevnos(['3', '1.1.2', '1.2.1', '2'],
+ b, '1.1.1', '3', exclude_common_ancestry=True)
+
+ def test_merge_sorted_simple_revnos_exclude_ancestry(self):
+ b = self.make_branch_with_alternate_ancestries()
+ self.assertLogRevnos(['3', '2'],
+ b, '1', '3', exclude_common_ancestry=True,
+ generate_merge_revisions=False)
+ self.assertLogRevnos(['3', '1.1.2', '1.2.1', '1.1.1', '2'],
+ b, '1', '3', exclude_common_ancestry=True,
+ generate_merge_revisions=True)
+
+
+class TestLogDefaults(TestCaseForLogFormatter):
+ def test_default_log_level(self):
+ """
+ Test to ensure that specifying 'levels=1' to make_log_request_dict
+ doesn't get overwritten when using a LogFormatter that supports more
+ detail.
+ Fixes bug #747958.
+ """
+ wt = self._prepare_tree_with_merges()
+ b = wt.branch
+
+ class CustomLogFormatter(log.LogFormatter):
+ def __init__(self, *args, **kwargs):
+ super(CustomLogFormatter, self).__init__(*args, **kwargs)
+ self.revisions = []
+ def get_levels(self):
+ # log formatter supports all levels:
+ return 0
+ def log_revision(self, revision):
+ self.revisions.append(revision)
+
+ log_formatter = LogCatcher()
+ # First request we don't specify number of levels, we should get a
+ # sensible default (whatever the LogFormatter handles - which in this
+ # case is 0/everything):
+ request = log.make_log_request_dict(limit=10)
+ log.Logger(b, request).show(log_formatter)
+ # should have all three revisions:
+ self.assertEquals(len(log_formatter.revisions), 3)
+
+ del log_formatter
+ log_formatter = LogCatcher()
+ # now explicitly request mainline revisions only:
+ request = log.make_log_request_dict(limit=10, levels=1)
+ log.Logger(b, request).show(log_formatter)
+ # should now only have 2 revisions:
+ self.assertEquals(len(log_formatter.revisions), 2)
+
diff --git a/bzrlib/tests/test_lru_cache.py b/bzrlib/tests/test_lru_cache.py
new file mode 100644
index 0000000..8b38583
--- /dev/null
+++ b/bzrlib/tests/test_lru_cache.py
@@ -0,0 +1,435 @@
+# Copyright (C) 2006, 2008, 2009 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""Tests for the lru_cache module."""
+
+from bzrlib import (
+ lru_cache,
+ symbol_versioning,
+ tests,
+ )
+
+
+def walk_lru(lru):
+ """Test helper to walk the LRU list and assert its consistency"""
+ node = lru._most_recently_used
+ if node is not None:
+ if node.prev is not None:
+ raise AssertionError('the _most_recently_used entry is not'
+ ' supposed to have a previous entry'
+ ' %s' % (node,))
+ while node is not None:
+ if node.next_key is lru_cache._null_key:
+ if node is not lru._least_recently_used:
+ raise AssertionError('only the last node should have'
+ ' no next value: %s' % (node,))
+ node_next = None
+ else:
+ node_next = lru._cache[node.next_key]
+ if node_next.prev is not node:
+ raise AssertionError('inconsistency found, node.next.prev'
+ ' != node: %s' % (node,))
+ if node.prev is None:
+ if node is not lru._most_recently_used:
+ raise AssertionError('only the _most_recently_used should'
+ ' not have a previous node: %s'
+ % (node,))
+ else:
+ if node.prev.next_key != node.key:
+ raise AssertionError('inconsistency found, node.prev.next'
+ ' != node: %s' % (node,))
+ yield node
+ node = node_next
+
+
+class TestLRUCache(tests.TestCase):
+ """Test that LRU cache properly keeps track of entries."""
+
+ def test_cache_size(self):
+ cache = lru_cache.LRUCache(max_cache=10)
+ self.assertEqual(10, cache.cache_size())
+
+ cache = lru_cache.LRUCache(max_cache=256)
+ self.assertEqual(256, cache.cache_size())
+
+ cache.resize(512)
+ self.assertEqual(512, cache.cache_size())
+
+ def test_missing(self):
+ cache = lru_cache.LRUCache(max_cache=10)
+
+ self.assertFalse('foo' in cache)
+ self.assertRaises(KeyError, cache.__getitem__, 'foo')
+
+ cache['foo'] = 'bar'
+ self.assertEqual('bar', cache['foo'])
+ self.assertTrue('foo' in cache)
+ self.assertFalse('bar' in cache)
+
+ def test_map_None(self):
+ # Make sure that we can properly map None as a key.
+ cache = lru_cache.LRUCache(max_cache=10)
+ self.assertFalse(None in cache)
+ cache[None] = 1
+ self.assertEqual(1, cache[None])
+ cache[None] = 2
+ self.assertEqual(2, cache[None])
+ # Test the various code paths of __getitem__, to make sure that we can
+ # handle when None is the key for the LRU and the MRU
+ cache[1] = 3
+ cache[None] = 1
+ cache[None]
+ cache[1]
+ cache[None]
+ self.assertEqual([None, 1], [n.key for n in walk_lru(cache)])
+
+ def test_add__null_key(self):
+ cache = lru_cache.LRUCache(max_cache=10)
+ self.assertRaises(ValueError,
+ cache.__setitem__, lru_cache._null_key, 1)
+
+ def test_overflow(self):
+ """Adding extra entries will pop out old ones."""
+ cache = lru_cache.LRUCache(max_cache=1, after_cleanup_count=1)
+
+ cache['foo'] = 'bar'
+ # With a max cache of 1, adding 'baz' should pop out 'foo'
+ cache['baz'] = 'biz'
+
+ self.assertFalse('foo' in cache)
+ self.assertTrue('baz' in cache)
+
+ self.assertEqual('biz', cache['baz'])
+
+ def test_by_usage(self):
+ """Accessing entries bumps them up in priority."""
+ cache = lru_cache.LRUCache(max_cache=2)
+
+ cache['baz'] = 'biz'
+ cache['foo'] = 'bar'
+
+ self.assertEqual('biz', cache['baz'])
+
+ # This must kick out 'foo' because it was the last accessed
+ cache['nub'] = 'in'
+
+ self.assertFalse('foo' in cache)
+
+ def test_cleanup_function_deprecated(self):
+ """Test that per-node cleanup functions are no longer allowed"""
+ cache = lru_cache.LRUCache()
+ self.assertRaises(ValueError, self.applyDeprecated,
+ symbol_versioning.deprecated_in((2, 5, 0)),
+ cache.add, "key", 1, cleanup=lambda: None)
+
+ def test_len(self):
+ cache = lru_cache.LRUCache(max_cache=10, after_cleanup_count=10)
+
+ cache[1] = 10
+ cache[2] = 20
+ cache[3] = 30
+ cache[4] = 40
+
+ self.assertEqual(4, len(cache))
+
+ cache[5] = 50
+ cache[6] = 60
+ cache[7] = 70
+ cache[8] = 80
+
+ self.assertEqual(8, len(cache))
+
+ cache[1] = 15 # replacement
+
+ self.assertEqual(8, len(cache))
+
+ cache[9] = 90
+ cache[10] = 100
+ cache[11] = 110
+
+ # We hit the max
+ self.assertEqual(10, len(cache))
+ self.assertEqual([11, 10, 9, 1, 8, 7, 6, 5, 4, 3],
+ [n.key for n in walk_lru(cache)])
+
+ def test_cleanup_shrinks_to_after_clean_count(self):
+ cache = lru_cache.LRUCache(max_cache=5, after_cleanup_count=3)
+
+ cache[1] = 10
+ cache[2] = 20
+ cache[3] = 25
+ cache[4] = 30
+ cache[5] = 35
+
+ self.assertEqual(5, len(cache))
+ # This will bump us over the max, which causes us to shrink down to
+ # after_cleanup_cache size
+ cache[6] = 40
+ self.assertEqual(3, len(cache))
+
+ def test_after_cleanup_larger_than_max(self):
+ cache = lru_cache.LRUCache(max_cache=5, after_cleanup_count=10)
+ self.assertEqual(5, cache._after_cleanup_count)
+
+ def test_after_cleanup_none(self):
+ cache = lru_cache.LRUCache(max_cache=5, after_cleanup_count=None)
+ # By default _after_cleanup_size is 80% of the normal size
+ self.assertEqual(4, cache._after_cleanup_count)
+
+ def test_cleanup(self):
+ cache = lru_cache.LRUCache(max_cache=5, after_cleanup_count=2)
+
+ # Add these in order
+ cache[1] = 10
+ cache[2] = 20
+ cache[3] = 25
+ cache[4] = 30
+ cache[5] = 35
+
+ self.assertEqual(5, len(cache))
+ # Force a compaction
+ cache.cleanup()
+ self.assertEqual(2, len(cache))
+
+ def test_preserve_last_access_order(self):
+ cache = lru_cache.LRUCache(max_cache=5)
+
+ # Add these in order
+ cache[1] = 10
+ cache[2] = 20
+ cache[3] = 25
+ cache[4] = 30
+ cache[5] = 35
+
+ self.assertEqual([5, 4, 3, 2, 1], [n.key for n in walk_lru(cache)])
+
+ # Now access some randomly
+ cache[2]
+ cache[5]
+ cache[3]
+ cache[2]
+ self.assertEqual([2, 3, 5, 4, 1], [n.key for n in walk_lru(cache)])
+
+ def test_get(self):
+ cache = lru_cache.LRUCache(max_cache=5)
+
+ cache[1] = 10
+ cache[2] = 20
+ self.assertEqual(20, cache.get(2))
+ self.assertIs(None, cache.get(3))
+ obj = object()
+ self.assertIs(obj, cache.get(3, obj))
+ self.assertEqual([2, 1], [n.key for n in walk_lru(cache)])
+ self.assertEqual(10, cache.get(1))
+ self.assertEqual([1, 2], [n.key for n in walk_lru(cache)])
+
+ def test_keys(self):
+ cache = lru_cache.LRUCache(max_cache=5, after_cleanup_count=5)
+
+ cache[1] = 2
+ cache[2] = 3
+ cache[3] = 4
+ self.assertEqual([1, 2, 3], sorted(cache.keys()))
+ cache[4] = 5
+ cache[5] = 6
+ cache[6] = 7
+ self.assertEqual([2, 3, 4, 5, 6], sorted(cache.keys()))
+
+ def test_resize_smaller(self):
+ cache = lru_cache.LRUCache(max_cache=5, after_cleanup_count=4)
+ cache[1] = 2
+ cache[2] = 3
+ cache[3] = 4
+ cache[4] = 5
+ cache[5] = 6
+ self.assertEqual([1, 2, 3, 4, 5], sorted(cache.keys()))
+ cache[6] = 7
+ self.assertEqual([3, 4, 5, 6], sorted(cache.keys()))
+ # Now resize to something smaller, which triggers a cleanup
+ cache.resize(max_cache=3, after_cleanup_count=2)
+ self.assertEqual([5, 6], sorted(cache.keys()))
+ # Adding something will use the new size
+ cache[7] = 8
+ self.assertEqual([5, 6, 7], sorted(cache.keys()))
+ cache[8] = 9
+ self.assertEqual([7, 8], sorted(cache.keys()))
+
+ def test_resize_larger(self):
+ cache = lru_cache.LRUCache(max_cache=5, after_cleanup_count=4)
+ cache[1] = 2
+ cache[2] = 3
+ cache[3] = 4
+ cache[4] = 5
+ cache[5] = 6
+ self.assertEqual([1, 2, 3, 4, 5], sorted(cache.keys()))
+ cache[6] = 7
+ self.assertEqual([3, 4, 5, 6], sorted(cache.keys()))
+ cache.resize(max_cache=8, after_cleanup_count=6)
+ self.assertEqual([3, 4, 5, 6], sorted(cache.keys()))
+ cache[7] = 8
+ cache[8] = 9
+ cache[9] = 10
+ cache[10] = 11
+ self.assertEqual([3, 4, 5, 6, 7, 8, 9, 10], sorted(cache.keys()))
+ cache[11] = 12 # triggers cleanup back to new after_cleanup_count
+ self.assertEqual([6, 7, 8, 9, 10, 11], sorted(cache.keys()))
+
+
+class TestLRUSizeCache(tests.TestCase):
+
+ def test_basic_init(self):
+ cache = lru_cache.LRUSizeCache()
+ self.assertEqual(2048, cache._max_cache)
+ self.assertEqual(int(cache._max_size*0.8), cache._after_cleanup_size)
+ self.assertEqual(0, cache._value_size)
+
+ def test_add__null_key(self):
+ cache = lru_cache.LRUSizeCache()
+ self.assertRaises(ValueError,
+ cache.__setitem__, lru_cache._null_key, 1)
+
+ def test_add_tracks_size(self):
+ cache = lru_cache.LRUSizeCache()
+ self.assertEqual(0, cache._value_size)
+ cache['my key'] = 'my value text'
+ self.assertEqual(13, cache._value_size)
+
+ def test_remove_tracks_size(self):
+ cache = lru_cache.LRUSizeCache()
+ self.assertEqual(0, cache._value_size)
+ cache['my key'] = 'my value text'
+ self.assertEqual(13, cache._value_size)
+ node = cache._cache['my key']
+ cache._remove_node(node)
+ self.assertEqual(0, cache._value_size)
+
+ def test_no_add_over_size(self):
+ """Adding a large value may not be cached at all."""
+ cache = lru_cache.LRUSizeCache(max_size=10, after_cleanup_size=5)
+ self.assertEqual(0, cache._value_size)
+ self.assertEqual({}, cache.as_dict())
+ cache['test'] = 'key'
+ self.assertEqual(3, cache._value_size)
+ self.assertEqual({'test': 'key'}, cache.as_dict())
+ cache['test2'] = 'key that is too big'
+ self.assertEqual(3, cache._value_size)
+ self.assertEqual({'test':'key'}, cache.as_dict())
+ # If we would add a key, only to cleanup and remove all cached entries,
+ # then obviously that value should not be stored
+ cache['test3'] = 'bigkey'
+ self.assertEqual(3, cache._value_size)
+ self.assertEqual({'test':'key'}, cache.as_dict())
+
+ cache['test4'] = 'bikey'
+ self.assertEqual(3, cache._value_size)
+ self.assertEqual({'test':'key'}, cache.as_dict())
+
+ def test_adding_clears_cache_based_on_size(self):
+ """The cache is cleared in LRU order until small enough"""
+ cache = lru_cache.LRUSizeCache(max_size=20)
+ cache['key1'] = 'value' # 5 chars
+ cache['key2'] = 'value2' # 6 chars
+ cache['key3'] = 'value23' # 7 chars
+ self.assertEqual(5+6+7, cache._value_size)
+ cache['key2'] # reference key2 so it gets a newer reference time
+ cache['key4'] = 'value234' # 8 chars, over limit
+ # We have to remove 2 keys to get back under limit
+ self.assertEqual(6+8, cache._value_size)
+ self.assertEqual({'key2':'value2', 'key4':'value234'},
+ cache.as_dict())
+
+ def test_adding_clears_to_after_cleanup_size(self):
+ cache = lru_cache.LRUSizeCache(max_size=20, after_cleanup_size=10)
+ cache['key1'] = 'value' # 5 chars
+ cache['key2'] = 'value2' # 6 chars
+ cache['key3'] = 'value23' # 7 chars
+ self.assertEqual(5+6+7, cache._value_size)
+ cache['key2'] # reference key2 so it gets a newer reference time
+ cache['key4'] = 'value234' # 8 chars, over limit
+ # We have to remove 3 keys to get back under limit
+ self.assertEqual(8, cache._value_size)
+ self.assertEqual({'key4':'value234'}, cache.as_dict())
+
+ def test_custom_sizes(self):
+ def size_of_list(lst):
+ return sum(len(x) for x in lst)
+ cache = lru_cache.LRUSizeCache(max_size=20, after_cleanup_size=10,
+ compute_size=size_of_list)
+
+ cache['key1'] = ['val', 'ue'] # 5 chars
+ cache['key2'] = ['val', 'ue2'] # 6 chars
+ cache['key3'] = ['val', 'ue23'] # 7 chars
+ self.assertEqual(5+6+7, cache._value_size)
+ cache['key2'] # reference key2 so it gets a newer reference time
+ cache['key4'] = ['value', '234'] # 8 chars, over limit
+ # We have to remove 3 keys to get back under limit
+ self.assertEqual(8, cache._value_size)
+ self.assertEqual({'key4':['value', '234']}, cache.as_dict())
+
+ def test_cleanup(self):
+ cache = lru_cache.LRUSizeCache(max_size=20, after_cleanup_size=10)
+
+ # Add these in order
+ cache['key1'] = 'value' # 5 chars
+ cache['key2'] = 'value2' # 6 chars
+ cache['key3'] = 'value23' # 7 chars
+ self.assertEqual(5+6+7, cache._value_size)
+
+ cache.cleanup()
+ # Only the most recent fits after cleaning up
+ self.assertEqual(7, cache._value_size)
+
+ def test_keys(self):
+ cache = lru_cache.LRUSizeCache(max_size=10)
+
+ cache[1] = 'a'
+ cache[2] = 'b'
+ cache[3] = 'cdef'
+ self.assertEqual([1, 2, 3], sorted(cache.keys()))
+
+ def test_resize_smaller(self):
+ cache = lru_cache.LRUSizeCache(max_size=10, after_cleanup_size=9)
+ cache[1] = 'abc'
+ cache[2] = 'def'
+ cache[3] = 'ghi'
+ cache[4] = 'jkl'
+ # Triggers a cleanup
+ self.assertEqual([2, 3, 4], sorted(cache.keys()))
+ # Resize should also cleanup again
+ cache.resize(max_size=6, after_cleanup_size=4)
+ self.assertEqual([4], sorted(cache.keys()))
+ # Adding should use the new max size
+ cache[5] = 'mno'
+ self.assertEqual([4, 5], sorted(cache.keys()))
+ cache[6] = 'pqr'
+ self.assertEqual([6], sorted(cache.keys()))
+
+ def test_resize_larger(self):
+ cache = lru_cache.LRUSizeCache(max_size=10, after_cleanup_size=9)
+ cache[1] = 'abc'
+ cache[2] = 'def'
+ cache[3] = 'ghi'
+ cache[4] = 'jkl'
+ # Triggers a cleanup
+ self.assertEqual([2, 3, 4], sorted(cache.keys()))
+ cache.resize(max_size=15, after_cleanup_size=12)
+ self.assertEqual([2, 3, 4], sorted(cache.keys()))
+ cache[5] = 'mno'
+ cache[6] = 'pqr'
+ self.assertEqual([2, 3, 4, 5, 6], sorted(cache.keys()))
+ cache[7] = 'stu'
+ self.assertEqual([4, 5, 6, 7], sorted(cache.keys()))
+
diff --git a/bzrlib/tests/test_lsprof.py b/bzrlib/tests/test_lsprof.py
new file mode 100644
index 0000000..8104e1c
--- /dev/null
+++ b/bzrlib/tests/test_lsprof.py
@@ -0,0 +1,139 @@
+# Copyright (C) 2007, 2009, 2010, 2011 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""Tests for profiling data collection."""
+
+
+import cPickle
+import threading
+
+import bzrlib
+from bzrlib import errors, tests
+from bzrlib.tests import (
+ features,
+ )
+
+
+_TXT_HEADER = " CallCount Recursive Total(ms) " + \
+ "Inline(ms) module:lineno(function)\n"
+
+
+def _junk_callable():
+ "A simple routine to profile."
+ result = sorted(['abc', 'def', 'ghi'])
+
+
+def _collect_stats():
+ "Collect and return some dummy profile data."
+ from bzrlib.lsprof import profile
+ ret, stats = profile(_junk_callable)
+ return stats
+
+
+class TestStatsSave(tests.TestCaseInTempDir):
+
+ _test_needs_features = [features.lsprof_feature]
+
+ def setUp(self):
+ super(tests.TestCaseInTempDir, self).setUp()
+ self.stats = _collect_stats()
+
+ def _tempfile(self, ext):
+ dir = self.test_dir
+ return bzrlib.osutils.pathjoin(dir, "tmp_profile_data." + ext)
+
+ def test_stats_save_to_txt(self):
+ f = self._tempfile("txt")
+ self.stats.save(f)
+ lines = open(f).readlines()
+ self.assertEqual(lines[0], _TXT_HEADER)
+
+ def test_stats_save_to_callgrind(self):
+ f = self._tempfile("callgrind")
+ self.stats.save(f)
+ lines = open(f).readlines()
+ self.assertEqual(lines[0], "events: Ticks\n")
+ f = bzrlib.osutils.pathjoin(self.test_dir, "callgrind.out.foo")
+ self.stats.save(f)
+ lines = open(f).readlines()
+ self.assertEqual(lines[0], "events: Ticks\n")
+ # Test explicit format nommination
+ f2 = self._tempfile("txt")
+ self.stats.save(f2, format="callgrind")
+ lines2 = open(f2).readlines()
+ self.assertEqual(lines2[0], "events: Ticks\n")
+
+ def test_stats_save_to_pickle(self):
+ f = self._tempfile("pkl")
+ self.stats.save(f)
+ data1 = cPickle.load(open(f))
+ self.assertEqual(type(data1), bzrlib.lsprof.Stats)
+
+
+class TestBzrProfiler(tests.TestCase):
+
+ _test_needs_features = [features.lsprof_feature]
+
+ def test_start_call_stuff_stop(self):
+ profiler = bzrlib.lsprof.BzrProfiler()
+ profiler.start()
+ try:
+ def a_function():
+ pass
+ a_function()
+ finally:
+ stats = profiler.stop()
+ stats.freeze()
+ lines = [str(data) for data in stats.data]
+ lines = [line for line in lines if 'a_function' in line]
+ self.assertLength(1, lines)
+
+ def test_block_0(self):
+ # When profiler_block is 0, reentrant profile requests fail.
+ self.overrideAttr(bzrlib.lsprof.BzrProfiler, 'profiler_block', 0)
+ inner_calls = []
+ def inner():
+ profiler = bzrlib.lsprof.BzrProfiler()
+ self.assertRaises(errors.BzrError, profiler.start)
+ inner_calls.append(True)
+ bzrlib.lsprof.profile(inner)
+ self.assertLength(1, inner_calls)
+
+ def test_block_1(self):
+ # When profiler_block is 1, concurrent profiles serialise.
+ # This is tested by manually acquiring the profiler lock, then
+ # starting a thread that tries to profile, and releasing the lock.
+ # We know due to test_block_0 that two profiles at once hit the lock,
+ # so while this isn't perfect (we'd want a callback on the lock being
+ # entered to allow lockstep evaluation of the actions), its good enough
+ # to be confident regressions would be caught. Alternatively, if this
+ # is flakey, a fake Lock object can be used to trace the calls made.
+ calls = []
+ def profiled():
+ calls.append('profiled')
+ def do_profile():
+ bzrlib.lsprof.profile(profiled)
+ calls.append('after_profiled')
+ thread = threading.Thread(target=do_profile)
+ bzrlib.lsprof.BzrProfiler.profiler_lock.acquire()
+ try:
+ try:
+ thread.start()
+ finally:
+ bzrlib.lsprof.BzrProfiler.profiler_lock.release()
+ finally:
+ thread.join()
+ self.assertLength(2, calls)
diff --git a/bzrlib/tests/test_mail_client.py b/bzrlib/tests/test_mail_client.py
new file mode 100644
index 0000000..f1d47fb
--- /dev/null
+++ b/bzrlib/tests/test_mail_client.py
@@ -0,0 +1,293 @@
+# Copyright (C) 2007 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+from bzrlib import (
+ errors,
+ mail_client,
+ tests,
+ urlutils,
+ osutils,
+ )
+
+class TestMutt(tests.TestCase):
+
+ def test_commandline(self):
+ mutt = mail_client.Mutt(None)
+ commandline = mutt._get_compose_commandline(
+ None, None, 'file%', body="hello")
+ # The temporary filename is randomly generated, so it is not matched.
+ self.assertEqual(['-a', 'file%', '-i'], commandline[:-1])
+ commandline = mutt._get_compose_commandline('jrandom@example.org',
+ 'Hi there!', None)
+ self.assertEqual(['-s', 'Hi there!', '--', 'jrandom@example.org'],
+ commandline)
+
+ def test_commandline_is_8bit(self):
+ mutt = mail_client.Mutt(None)
+ cmdline = mutt._get_compose_commandline(u'jrandom@example.org',
+ u'Hi there!', u'file%')
+ self.assertEqual(
+ ['-s', 'Hi there!', '-a', 'file%', '--', 'jrandom@example.org'],
+ cmdline)
+ for item in cmdline:
+ self.assertFalse(isinstance(item, unicode),
+ 'Command-line item %r is unicode!' % item)
+
+
+class TestThunderbird(tests.TestCase):
+
+ def test_commandline(self):
+ tbird = mail_client.Thunderbird(None)
+ commandline = tbird._get_compose_commandline(None, None,
+ 'file%')
+ self.assertEqual(['-compose', "attachment='%s'" %
+ urlutils.local_path_to_url('file%')], commandline)
+ commandline = tbird._get_compose_commandline('jrandom@example.org',
+ 'Hi there!', None,
+ "bo'dy")
+ self.assertEqual(['-compose', "body=bo%27dy,"
+ "subject='Hi there!',"
+ "to='jrandom@example.org'"],
+ commandline)
+
+ def test_commandline_is_8bit(self):
+ # test for bug #139318
+ tbird = mail_client.Thunderbird(None)
+ cmdline = tbird._get_compose_commandline(u'jrandom@example.org',
+ u'Hi there!', u'file%')
+ self.assertEqual(['-compose',
+ ("attachment='%s'," % urlutils.local_path_to_url('file%')) +
+ "subject='Hi there!',to='jrandom@example.org'",
+ ], cmdline)
+ for item in cmdline:
+ self.assertFalse(isinstance(item, unicode),
+ 'Command-line item %r is unicode!' % item)
+
+
+class TestEmacsMail(tests.TestCase):
+
+ def test_commandline(self):
+ eclient = mail_client.EmacsMail(None)
+
+ commandline = eclient._get_compose_commandline(None, 'Hi there!', None)
+ self.assertEqual(['--eval', '(compose-mail nil "Hi there!")'],
+ commandline)
+
+ commandline = eclient._get_compose_commandline('jrandom@example.org',
+ 'Hi there!', None)
+ self.assertEqual(['--eval',
+ '(compose-mail "jrandom@example.org" "Hi there!")'],
+ commandline)
+
+ # We won't be able to know the temporary file name at this stage
+ # so we can't raise an assertion with assertEqual
+ cmdline = eclient._get_compose_commandline(None, None, 'file%')
+ if eclient.elisp_tmp_file is not None:
+ self.addCleanup(osutils.delete_any, eclient.elisp_tmp_file)
+ commandline = ' '.join(cmdline)
+ self.assertContainsRe(commandline, '--eval')
+ self.assertContainsRe(commandline, '(compose-mail nil nil)')
+ self.assertContainsRe(commandline, '(load .*)')
+ self.assertContainsRe(commandline, '(bzr-add-mime-att \"file%\")')
+
+ def test_commandline_is_8bit(self):
+ eclient = mail_client.EmacsMail(None)
+ commandline = eclient._get_compose_commandline(u'jrandom@example.org',
+ u'Hi there!', u'file%')
+ if eclient.elisp_tmp_file is not None:
+ self.addCleanup(osutils.delete_any, eclient.elisp_tmp_file)
+ for item in commandline:
+ self.assertFalse(isinstance(item, unicode),
+ 'Command-line item %r is unicode!' % item)
+
+
+class TestXDGEmail(tests.TestCase):
+
+ def test_commandline(self):
+ xdg_email = mail_client.XDGEmail(None)
+ self.assertRaises(errors.NoMailAddressSpecified,
+ xdg_email._get_compose_commandline,
+ None, None, 'file%')
+ commandline = xdg_email._get_compose_commandline(
+ 'jrandom@example.org', None, 'file%')
+ self.assertEqual(['jrandom@example.org', '--attach', 'file%'],
+ commandline)
+ commandline = xdg_email._get_compose_commandline(
+ 'jrandom@example.org', 'Hi there!', None, "bo'dy")
+ self.assertEqual(['jrandom@example.org', '--subject', 'Hi there!',
+ '--body', "bo'dy"], commandline)
+
+ def test_commandline_is_8bit(self):
+ xdg_email = mail_client.XDGEmail(None)
+ cmdline = xdg_email._get_compose_commandline(u'jrandom@example.org',
+ u'Hi there!', u'file%')
+ self.assertEqual(
+ ['jrandom@example.org', '--subject', 'Hi there!',
+ '--attach', 'file%'],
+ cmdline)
+ for item in cmdline:
+ self.assertFalse(isinstance(item, unicode),
+ 'Command-line item %r is unicode!' % item)
+
+
+class TestEvolution(tests.TestCase):
+
+ def test_commandline(self):
+ evo = mail_client.Evolution(None)
+ commandline = evo._get_compose_commandline(None, None, 'file%')
+ self.assertEqual(['mailto:?attach=file%25'], commandline)
+ commandline = evo._get_compose_commandline('jrandom@example.org',
+ 'Hi there!', None, 'bo&dy')
+ self.assertEqual(['mailto:jrandom@example.org?body=bo%26dy&'
+ 'subject=Hi%20there%21'], commandline)
+
+ def test_commandline_is_8bit(self):
+ evo = mail_client.Evolution(None)
+ cmdline = evo._get_compose_commandline(u'jrandom@example.org',
+ u'Hi there!', u'file%')
+ self.assertEqual(
+ ['mailto:jrandom@example.org?attach=file%25&subject=Hi%20there%21'
+ ],
+ cmdline)
+ for item in cmdline:
+ self.assertFalse(isinstance(item, unicode),
+ 'Command-line item %r is unicode!' % item)
+
+
+class TestKMail(tests.TestCase):
+
+ def test_commandline(self):
+ kmail = mail_client.KMail(None)
+ commandline = kmail._get_compose_commandline(None, None, 'file%')
+ self.assertEqual(['--attach', 'file%'], commandline)
+ commandline = kmail._get_compose_commandline('jrandom@example.org',
+ 'Hi there!', None)
+ self.assertEqual(['-s', 'Hi there!', 'jrandom@example.org'],
+ commandline)
+
+ def test_commandline_is_8bit(self):
+ kmail = mail_client.KMail(None)
+ cmdline = kmail._get_compose_commandline(u'jrandom@example.org',
+ u'Hi there!', u'file%')
+ self.assertEqual(
+ ['-s', 'Hi there!', '--attach', 'file%', 'jrandom@example.org'],
+ cmdline)
+ for item in cmdline:
+ self.assertFalse(isinstance(item, unicode),
+ 'Command-line item %r is unicode!' % item)
+
+
+class TestClaws(tests.TestCase):
+
+ def test_commandline(self):
+ claws = mail_client.Claws(None)
+ commandline = claws._get_compose_commandline(
+ 'jrandom@example.org', None, 'file%')
+ self.assertEqual(
+ ['--compose', 'mailto:jrandom@example.org?', '--attach', 'file%'],
+ commandline)
+ commandline = claws._get_compose_commandline(
+ 'jrandom@example.org', 'Hi there!', None)
+ self.assertEqual(
+ ['--compose',
+ 'mailto:jrandom@example.org?subject=Hi%20there%21'],
+ commandline)
+
+ def test_commandline_is_8bit(self):
+ claws = mail_client.Claws(None)
+ cmdline = claws._get_compose_commandline(
+ u'jrandom@example.org', u'\xb5cosm of fun!', u'file%')
+ subject_string = urlutils.quote(
+ u'\xb5cosm of fun!'.encode(osutils.get_user_encoding(), 'replace'))
+ self.assertEqual(
+ ['--compose',
+ 'mailto:jrandom@example.org?subject=%s' % subject_string,
+ '--attach',
+ 'file%'],
+ cmdline)
+ for item in cmdline:
+ self.assertFalse(isinstance(item, unicode),
+ 'Command-line item %r is unicode!' % item)
+
+ def test_with_from(self):
+ claws = mail_client.Claws(None)
+ cmdline = claws._get_compose_commandline(
+ u'jrandom@example.org', None, None, None, u'qrandom@example.com')
+ self.assertEqual(
+ ['--compose',
+ 'mailto:jrandom@example.org?from=qrandom%40example.com'],
+ cmdline)
+
+ def test_to_required(self):
+ claws = mail_client.Claws(None)
+ self.assertRaises(errors.NoMailAddressSpecified,
+ claws._get_compose_commandline,
+ None, None, 'file%')
+
+ def test_with_body(self):
+ claws = mail_client.Claws(None)
+ cmdline = claws._get_compose_commandline(
+ u'jrandom@example.org', None, None, 'This is some body text')
+ self.assertEqual(
+ ['--compose',
+ 'mailto:jrandom@example.org?body=This%20is%20some%20body%20text'],
+ cmdline)
+
+
+class TestEditor(tests.TestCase):
+
+ def test_get_merge_prompt_unicode(self):
+ """Prompt, to and subject are unicode, the attachement is binary"""
+ editor = mail_client.Editor(None)
+ prompt = editor._get_merge_prompt(u'foo\u1234',
+ u'bar\u1234',
+ u'baz\u1234',
+ u'qux\u1234'.encode('utf-8'))
+ self.assertContainsRe(prompt, u'foo\u1234(.|\n)*bar\u1234'
+ u'(.|\n)*baz\u1234(.|\n)*qux\u1234')
+ editor._get_merge_prompt(u'foo', u'bar', u'baz', 'qux\xff')
+
+
+class DummyMailClient(object):
+
+ def compose_merge_request(self, *args, **kwargs):
+ self.args = args
+ self.kwargs = kwargs
+
+
+class DefaultMailDummyClient(mail_client.DefaultMail):
+
+ def __init__(self):
+ self.client = DummyMailClient()
+
+ def _mail_client(self):
+ return self.client
+
+
+class TestDefaultMail(tests.TestCase):
+
+ def test_compose_merge_request(self):
+ client = DefaultMailDummyClient()
+ to = "a@b.com"
+ subject = "[MERGE]"
+ directive = "directive",
+ basename = "merge"
+ client.compose_merge_request(to, subject, directive,
+ basename=basename)
+ dummy_client = client.client
+ self.assertEqual(dummy_client.args, (to, subject, directive))
+ self.assertEqual(dummy_client.kwargs,
+ {"basename": basename, 'body': None})
diff --git a/bzrlib/tests/test_matchers.py b/bzrlib/tests/test_matchers.py
new file mode 100644
index 0000000..4930af6
--- /dev/null
+++ b/bzrlib/tests/test_matchers.py
@@ -0,0 +1,204 @@
+# Copyright (C) 2010 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""Tests of bzrlib test matchers."""
+
+from testtools.matchers import *
+
+from bzrlib.smart.client import CallHookParams
+
+from bzrlib.tests import (
+ CapturedCall,
+ TestCase,
+ TestCaseWithTransport,
+ )
+from bzrlib.tests.matchers import *
+
+
+class StubTree(object):
+ """Stubg for testing."""
+
+ def __init__(self, lock_status):
+ self._is_locked = lock_status
+
+ def __str__(self):
+ return u'I am da tree'
+
+ def is_locked(self):
+ return self._is_locked
+
+
+class FakeUnlockable(object):
+ """Something that can be unlocked."""
+
+ def unlock(self):
+ pass
+
+
+class TestReturnsUnlockable(TestCase):
+
+ def test___str__(self):
+ matcher = ReturnsUnlockable(StubTree(True))
+ self.assertEqual(
+ 'ReturnsUnlockable(lockable_thing=I am da tree)',
+ str(matcher))
+
+ def test_match(self):
+ stub_tree = StubTree(False)
+ matcher = ReturnsUnlockable(stub_tree)
+ self.assertThat(matcher.match(lambda:FakeUnlockable()), Equals(None))
+
+ def test_mismatch(self):
+ stub_tree = StubTree(True)
+ matcher = ReturnsUnlockable(stub_tree)
+ mismatch = matcher.match(lambda:FakeUnlockable())
+ self.assertNotEqual(None, mismatch)
+ self.assertThat(mismatch.describe(), Equals("I am da tree is locked"))
+
+
+class TestMatchesAncestry(TestCaseWithTransport):
+
+ def test__str__(self):
+ matcher = MatchesAncestry("A repository", "arevid")
+ self.assertEqual(
+ "MatchesAncestry(repository='A repository', "
+ "revision_id='arevid')",
+ str(matcher))
+
+ def test_match(self):
+ b = self.make_branch_builder('.')
+ b.start_series()
+ revid1 = b.build_commit()
+ revid2 = b.build_commit()
+ b.finish_series()
+ branch = b.get_branch()
+ m = MatchesAncestry(branch.repository, revid2)
+ self.assertThat([revid2, revid1], m)
+ self.assertThat([revid1, revid2], m)
+ m = MatchesAncestry(branch.repository, revid1)
+ self.assertThat([revid1], m)
+ m = MatchesAncestry(branch.repository, "unknown")
+ self.assertThat(["unknown"], m)
+
+ def test_mismatch(self):
+ b = self.make_branch_builder('.')
+ b.start_series()
+ revid1 = b.build_commit()
+ revid2 = b.build_commit()
+ b.finish_series()
+ branch = b.get_branch()
+ m = MatchesAncestry(branch.repository, revid1)
+ mismatch = m.match([])
+ self.assertIsNot(None, mismatch)
+ self.assertEquals(
+ "mismatched ancestry for revision '%s' was ['%s'], expected []" % (
+ revid1, revid1),
+ mismatch.describe())
+
+
+class TestHasLayout(TestCaseWithTransport):
+
+ def test__str__(self):
+ matcher = HasLayout([("a", "a-id")])
+ self.assertEqual("HasLayout([('a', 'a-id')])", str(matcher))
+
+ def test_match(self):
+ t = self.make_branch_and_tree('.')
+ self.build_tree(['a', 'b/', 'b/c'])
+ t.add(['a', 'b', 'b/c'], ['a-id', 'b-id', 'c-id'])
+ self.assertThat(t, HasLayout(['', 'a', 'b/', 'b/c']))
+ self.assertThat(t, HasLayout(
+ [('', t.get_root_id()),
+ ('a', 'a-id'),
+ ('b/', 'b-id'),
+ ('b/c', 'c-id')]))
+
+ def test_mismatch(self):
+ t = self.make_branch_and_tree('.')
+ self.build_tree(['a', 'b/', 'b/c'])
+ t.add(['a', 'b', 'b/c'], ['a-id', 'b-id', 'c-id'])
+ mismatch = HasLayout(['a']).match(t)
+ self.assertIsNot(None, mismatch)
+ self.assertEquals(
+ "['a'] != [u'', u'a', u'b/', u'b/c']",
+ mismatch.describe())
+
+ def test_no_dirs(self):
+ # Some tree/repository formats do not support versioned directories
+ t = self.make_branch_and_tree('.')
+ t.has_versioned_directories = lambda: False
+ self.build_tree(['a', 'b/', 'b/c'])
+ t.add(['a', 'b', 'b/c'], ['a-id', 'b-id', 'c-id'])
+ self.assertIs(None, HasLayout(['', 'a', 'b/', 'b/c']).match(t))
+ self.assertIs(None, HasLayout(['', 'a', 'b/', 'b/c', 'd/']).match(t))
+ mismatch = HasLayout([u'', u'a', u'd/']).match(t)
+ self.assertIsNot(None, mismatch)
+ self.assertEquals(
+ "[u'', u'a'] != [u'', u'a', u'b/', u'b/c']",
+ mismatch.describe())
+
+
+class TestContainsNoVfsCalls(TestCase):
+
+ def _make_call(self, method, args):
+ return CapturedCall(CallHookParams(method, args, None, None, None), 0)
+
+ def test__str__(self):
+ self.assertEqual("ContainsNoVfsCalls()", str(ContainsNoVfsCalls()))
+
+ def test_empty(self):
+ self.assertIs(None, ContainsNoVfsCalls().match([]))
+
+ def test_no_vfs_calls(self):
+ calls = [self._make_call("Branch.get_config_file", [])]
+ self.assertIs(None, ContainsNoVfsCalls().match(calls))
+
+ def test_ignores_unknown(self):
+ calls = [self._make_call("unknown", [])]
+ self.assertIs(None, ContainsNoVfsCalls().match(calls))
+
+ def test_match(self):
+ calls = [self._make_call("append", ["file"]),
+ self._make_call("Branch.get_config_file", [])]
+ mismatch = ContainsNoVfsCalls().match(calls)
+ self.assertIsNot(None, mismatch)
+ self.assertEquals([calls[0].call], mismatch.vfs_calls)
+ self.assertEquals("no VFS calls expected, got: append('file')""",
+ mismatch.describe())
+
+
+class TestRevisionHistoryMatches(TestCaseWithTransport):
+
+ def test_empty(self):
+ tree = self.make_branch_and_tree('.')
+ matcher = RevisionHistoryMatches([])
+ self.assertIs(None, matcher.match(tree.branch))
+
+ def test_matches(self):
+ tree = self.make_branch_and_tree('.')
+ tree.commit('msg1', rev_id='a')
+ tree.commit('msg2', rev_id='b')
+ matcher = RevisionHistoryMatches(['a', 'b'])
+ self.assertIs(None, matcher.match(tree.branch))
+
+ def test_mismatch(self):
+ tree = self.make_branch_and_tree('.')
+ tree.commit('msg1', rev_id='a')
+ tree.commit('msg2', rev_id='b')
+ matcher = RevisionHistoryMatches(['a', 'b', 'c'])
+ self.assertEquals(
+ "['a', 'b', 'c'] != ['a', 'b']",
+ matcher.match(tree.branch).describe())
diff --git a/bzrlib/tests/test_memorytree.py b/bzrlib/tests/test_memorytree.py
new file mode 100644
index 0000000..0283504
--- /dev/null
+++ b/bzrlib/tests/test_memorytree.py
@@ -0,0 +1,223 @@
+# Copyright (C) 2006 Canonical Ltd
+# Authors: Robert Collins <robert.collins@canonical.com>
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""Tests for the MemoryTree class."""
+
+from bzrlib import errors
+from bzrlib.memorytree import MemoryTree
+from bzrlib.tests import TestCaseWithTransport
+from bzrlib.treebuilder import TreeBuilder
+
+
+class TestMemoryTree(TestCaseWithTransport):
+
+ def test_create_on_branch(self):
+ """Creating a mutable tree on a trivial branch works."""
+ branch = self.make_branch('branch')
+ tree = MemoryTree.create_on_branch(branch)
+ self.assertEqual(branch.bzrdir, tree.bzrdir)
+ self.assertEqual(branch, tree.branch)
+ self.assertEqual([], tree.get_parent_ids())
+
+ def test_create_on_branch_with_content(self):
+ """Creating a mutable tree on a non-trivial branch works."""
+ branch = self.make_branch('branch')
+ tree = MemoryTree.create_on_branch(branch)
+ # build some content
+ tree.lock_write()
+ builder = TreeBuilder()
+ builder.start_tree(tree)
+ builder.build(['foo'])
+ builder.finish_tree()
+ rev_id = tree.commit('first post')
+ tree.unlock()
+ tree = MemoryTree.create_on_branch(branch)
+ tree.lock_read()
+ self.assertEqual([rev_id], tree.get_parent_ids())
+ self.assertEqual('contents of foo\n',
+ tree.get_file(tree.path2id('foo')).read())
+ tree.unlock()
+
+ def test_get_root_id(self):
+ branch = self.make_branch('branch')
+ tree = MemoryTree.create_on_branch(branch)
+ tree.lock_write()
+ try:
+ tree.add([''])
+ self.assertIsNot(None, tree.get_root_id())
+ finally:
+ tree.unlock()
+
+ def test_lock_tree_write(self):
+ """Check we can lock_tree_write and unlock MemoryTrees."""
+ branch = self.make_branch('branch')
+ tree = MemoryTree.create_on_branch(branch)
+ tree.lock_tree_write()
+ tree.unlock()
+
+ def test_lock_tree_write_after_read_fails(self):
+ """Check that we error when trying to upgrade a read lock to write."""
+ branch = self.make_branch('branch')
+ tree = MemoryTree.create_on_branch(branch)
+ tree.lock_read()
+ self.assertRaises(errors.ReadOnlyError, tree.lock_tree_write)
+ tree.unlock()
+
+ def test_lock_write(self):
+ """Check we can lock_write and unlock MemoryTrees."""
+ branch = self.make_branch('branch')
+ tree = MemoryTree.create_on_branch(branch)
+ tree.lock_write()
+ tree.unlock()
+
+ def test_lock_write_after_read_fails(self):
+ """Check that we error when trying to upgrade a read lock to write."""
+ branch = self.make_branch('branch')
+ tree = MemoryTree.create_on_branch(branch)
+ tree.lock_read()
+ self.assertRaises(errors.ReadOnlyError, tree.lock_write)
+ tree.unlock()
+
+ def test_add_with_kind(self):
+ branch = self.make_branch('branch')
+ tree = MemoryTree.create_on_branch(branch)
+ tree.lock_write()
+ tree.add(['', 'afile', 'adir'], None,
+ ['directory', 'file', 'directory'])
+ self.assertEqual('afile', tree.id2path(tree.path2id('afile')))
+ self.assertEqual('adir', tree.id2path(tree.path2id('adir')))
+ self.assertFalse(tree.has_filename('afile'))
+ self.assertFalse(tree.has_filename('adir'))
+ tree.unlock()
+
+ def test_put_new_file(self):
+ branch = self.make_branch('branch')
+ tree = MemoryTree.create_on_branch(branch)
+ tree.lock_write()
+ tree.add(['', 'foo'], ids=['root-id', 'foo-id'],
+ kinds=['directory', 'file'])
+ tree.put_file_bytes_non_atomic('foo-id', 'barshoom')
+ self.assertEqual('barshoom', tree.get_file('foo-id').read())
+ tree.unlock()
+
+ def test_put_existing_file(self):
+ branch = self.make_branch('branch')
+ tree = MemoryTree.create_on_branch(branch)
+ tree.lock_write()
+ tree.add(['', 'foo'], ids=['root-id', 'foo-id'],
+ kinds=['directory', 'file'])
+ tree.put_file_bytes_non_atomic('foo-id', 'first-content')
+ tree.put_file_bytes_non_atomic('foo-id', 'barshoom')
+ self.assertEqual('barshoom', tree.get_file('foo-id').read())
+ tree.unlock()
+
+ def test_add_in_subdir(self):
+ branch = self.make_branch('branch')
+ tree = MemoryTree.create_on_branch(branch)
+ tree.lock_write()
+ self.addCleanup(tree.unlock)
+ tree.add([''], ['root-id'], ['directory'])
+ # Unfortunately, the only way to 'mkdir' is to call 'tree.mkdir', but
+ # that *always* adds the directory as well. So if you want to create a
+ # file in a subdirectory, you have to split out the 'mkdir()' calls
+ # from the add and put_file_bytes_non_atomic calls. :(
+ tree.mkdir('adir', 'dir-id')
+ tree.add(['adir/afile'], ['file-id'], ['file'])
+ self.assertEqual('adir/afile', tree.id2path('file-id'))
+ self.assertEqual('adir', tree.id2path('dir-id'))
+ tree.put_file_bytes_non_atomic('file-id', 'barshoom')
+
+ def test_commit_trivial(self):
+ """Smoke test for commit on a MemoryTree.
+
+ Becamse of commits design and layering, if this works, all commit
+ logic should work quite reliably.
+ """
+ branch = self.make_branch('branch')
+ tree = MemoryTree.create_on_branch(branch)
+ tree.lock_write()
+ tree.add(['', 'foo'], ids=['root-id', 'foo-id'],
+ kinds=['directory', 'file'])
+ tree.put_file_bytes_non_atomic('foo-id', 'barshoom')
+ revision_id = tree.commit('message baby')
+ # the parents list for the tree should have changed.
+ self.assertEqual([revision_id], tree.get_parent_ids())
+ tree.unlock()
+ # and we should have a revision that is accessible outside the tree lock
+ revtree = tree.branch.repository.revision_tree(revision_id)
+ revtree.lock_read()
+ self.addCleanup(revtree.unlock)
+ self.assertEqual('barshoom', revtree.get_file('foo-id').read())
+
+ def test_unversion(self):
+ """Some test for unversion of a memory tree."""
+ branch = self.make_branch('branch')
+ tree = MemoryTree.create_on_branch(branch)
+ tree.lock_write()
+ tree.add(['', 'foo'], ids=['root-id', 'foo-id'],
+ kinds=['directory', 'file'])
+ tree.unversion(['foo-id'])
+ self.assertFalse(tree.has_id('foo-id'))
+ tree.unlock()
+
+ def test_last_revision(self):
+ """There should be a last revision method we can call."""
+ tree = self.make_branch_and_memory_tree('branch')
+ tree.lock_write()
+ tree.add('')
+ rev_id = tree.commit('first post')
+ tree.unlock()
+ self.assertEqual(rev_id, tree.last_revision())
+
+ def test_rename_file(self):
+ tree = self.make_branch_and_memory_tree('branch')
+ tree.lock_write()
+ self.addCleanup(tree.unlock)
+ tree.add(['', 'foo'], ['root-id', 'foo-id'], ['directory', 'file'])
+ tree.put_file_bytes_non_atomic('foo-id', 'content\n')
+ tree.commit('one', rev_id='rev-one')
+ tree.rename_one('foo', 'bar')
+ self.assertEqual('bar', tree.id2path('foo-id'))
+ self.assertEqual('content\n', tree._file_transport.get_bytes('bar'))
+ self.assertRaises(errors.NoSuchFile,
+ tree._file_transport.get_bytes, 'foo')
+ tree.commit('two', rev_id='rev-two')
+ self.assertEqual('content\n', tree._file_transport.get_bytes('bar'))
+ self.assertRaises(errors.NoSuchFile,
+ tree._file_transport.get_bytes, 'foo')
+
+ rev_tree2 = tree.branch.repository.revision_tree('rev-two')
+ self.assertEqual('bar', rev_tree2.id2path('foo-id'))
+ self.assertEqual('content\n', rev_tree2.get_file_text('foo-id'))
+
+ def test_rename_file_to_subdir(self):
+ tree = self.make_branch_and_memory_tree('branch')
+ tree.lock_write()
+ self.addCleanup(tree.unlock)
+ tree.add('')
+ tree.mkdir('subdir', 'subdir-id')
+ tree.add('foo', 'foo-id', 'file')
+ tree.put_file_bytes_non_atomic('foo-id', 'content\n')
+ tree.commit('one', rev_id='rev-one')
+
+ tree.rename_one('foo', 'subdir/bar')
+ self.assertEqual('subdir/bar', tree.id2path('foo-id'))
+ self.assertEqual('content\n',
+ tree._file_transport.get_bytes('subdir/bar'))
+ tree.commit('two', rev_id='rev-two')
+ rev_tree2 = tree.branch.repository.revision_tree('rev-two')
+ self.assertEqual('subdir/bar', rev_tree2.id2path('foo-id'))
diff --git a/bzrlib/tests/test_merge.py b/bzrlib/tests/test_merge.py
new file mode 100644
index 0000000..70136e8
--- /dev/null
+++ b/bzrlib/tests/test_merge.py
@@ -0,0 +1,3291 @@
+# Copyright (C) 2005-2012 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+import os
+from StringIO import StringIO
+
+from bzrlib import (
+ branch as _mod_branch,
+ cleanup,
+ conflicts,
+ errors,
+ inventory,
+ knit,
+ memorytree,
+ merge as _mod_merge,
+ option,
+ revision as _mod_revision,
+ tests,
+ transform,
+ versionedfile,
+ )
+from bzrlib.conflicts import ConflictList, TextConflict
+from bzrlib.errors import UnrelatedBranches, NoCommits
+from bzrlib.merge import transform_tree, merge_inner, _PlanMerge
+from bzrlib.osutils import basename, pathjoin, file_kind
+from bzrlib.tests import (
+ features,
+ TestCaseWithMemoryTransport,
+ TestCaseWithTransport,
+ test_merge_core,
+ )
+from bzrlib.workingtree import WorkingTree
+
+
+class TestMerge(TestCaseWithTransport):
+ """Test appending more than one revision"""
+
+ def test_pending(self):
+ wt = self.make_branch_and_tree('.')
+ rev_a = wt.commit("lala!")
+ self.assertEqual([rev_a], wt.get_parent_ids())
+ self.assertRaises(errors.PointlessMerge, wt.merge_from_branch,
+ wt.branch)
+ self.assertEqual([rev_a], wt.get_parent_ids())
+ return wt
+
+ def test_undo(self):
+ wt = self.make_branch_and_tree('.')
+ wt.commit("lala!")
+ wt.commit("haha!")
+ wt.commit("blabla!")
+ wt.merge_from_branch(wt.branch, wt.branch.get_rev_id(2),
+ wt.branch.get_rev_id(1))
+
+ def test_nocommits(self):
+ wt = self.test_pending()
+ wt2 = self.make_branch_and_tree('branch2')
+ self.assertRaises(NoCommits, wt.merge_from_branch, wt2.branch)
+ return wt, wt2
+
+ def test_unrelated(self):
+ wt, wt2 = self.test_nocommits()
+ wt2.commit("blah")
+ self.assertRaises(UnrelatedBranches, wt.merge_from_branch, wt2.branch)
+ return wt2
+
+ def test_merge_one_file(self):
+ """Do a partial merge of a tree which should not affect tree parents."""
+ wt1 = self.make_branch_and_tree('branch1')
+ tip = wt1.commit('empty commit')
+ wt2 = self.make_branch_and_tree('branch2')
+ wt2.pull(wt1.branch)
+ with file('branch1/foo', 'wb') as f:
+ f.write('foo')
+ with file('branch1/bar', 'wb') as f:
+ f.write('bar')
+ wt1.add('foo')
+ wt1.add('bar')
+ wt1.commit('add foobar')
+ self.run_bzr('merge ../branch1/baz', retcode=3, working_dir='branch2')
+ self.run_bzr('merge ../branch1/foo', working_dir='branch2')
+ self.assertPathExists('branch2/foo')
+ self.assertPathDoesNotExist('branch2/bar')
+ wt2 = WorkingTree.open('branch2')
+ self.assertEqual([tip], wt2.get_parent_ids())
+
+ def test_pending_with_null(self):
+ """When base is forced to revno 0, parent_ids are set"""
+ wt2 = self.test_unrelated()
+ wt1 = WorkingTree.open('.')
+ br1 = wt1.branch
+ br1.fetch(wt2.branch)
+ # merge all of branch 2 into branch 1 even though they
+ # are not related.
+ wt1.merge_from_branch(wt2.branch, wt2.last_revision(), 'null:')
+ self.assertEqual([br1.last_revision(), wt2.branch.last_revision()],
+ wt1.get_parent_ids())
+ return (wt1, wt2.branch)
+
+ def test_two_roots(self):
+ """Merge base is sane when two unrelated branches are merged"""
+ wt1, br2 = self.test_pending_with_null()
+ wt1.commit("blah")
+ wt1.lock_read()
+ try:
+ last = wt1.branch.last_revision()
+ last2 = br2.last_revision()
+ graph = wt1.branch.repository.get_graph()
+ self.assertEqual(last2, graph.find_unique_lca(last, last2))
+ finally:
+ wt1.unlock()
+
+ def test_merge_into_null_tree(self):
+ wt = self.make_branch_and_tree('tree')
+ null_tree = wt.basis_tree()
+ self.build_tree(['tree/file'])
+ wt.add('file')
+ wt.commit('tree with root')
+ merger = _mod_merge.Merge3Merger(null_tree, null_tree, null_tree, wt,
+ this_branch=wt.branch,
+ do_merge=False)
+ with merger.make_preview_transform() as tt:
+ self.assertEqual([], tt.find_conflicts())
+ preview = tt.get_preview_tree()
+ self.assertEqual(wt.get_root_id(), preview.get_root_id())
+
+ def test_merge_unrelated_retains_root(self):
+ wt = self.make_branch_and_tree('tree')
+ other_tree = self.make_branch_and_tree('other')
+ self.addCleanup(other_tree.lock_read().unlock)
+ merger = _mod_merge.Merge3Merger(wt, wt, wt.basis_tree(), other_tree,
+ this_branch=wt.branch,
+ do_merge=False)
+ with transform.TransformPreview(wt) as merger.tt:
+ merger._compute_transform()
+ new_root_id = merger.tt.final_file_id(merger.tt.root)
+ self.assertEqual(wt.get_root_id(), new_root_id)
+
+ def test_create_rename(self):
+ """Rename an inventory entry while creating the file"""
+ tree =self.make_branch_and_tree('.')
+ with file('name1', 'wb') as f: f.write('Hello')
+ tree.add('name1')
+ tree.commit(message="hello")
+ tree.rename_one('name1', 'name2')
+ os.unlink('name2')
+ transform_tree(tree, tree.branch.basis_tree())
+
+ def test_layered_rename(self):
+ """Rename both child and parent at same time"""
+ tree =self.make_branch_and_tree('.')
+ os.mkdir('dirname1')
+ tree.add('dirname1')
+ filename = pathjoin('dirname1', 'name1')
+ with file(filename, 'wb') as f: f.write('Hello')
+ tree.add(filename)
+ tree.commit(message="hello")
+ filename2 = pathjoin('dirname1', 'name2')
+ tree.rename_one(filename, filename2)
+ tree.rename_one('dirname1', 'dirname2')
+ transform_tree(tree, tree.branch.basis_tree())
+
+ def test_ignore_zero_merge_inner(self):
+ # Test that merge_inner's ignore zero parameter is effective
+ tree_a =self.make_branch_and_tree('a')
+ tree_a.commit(message="hello")
+ dir_b = tree_a.bzrdir.sprout('b')
+ tree_b = dir_b.open_workingtree()
+ tree_b.lock_write()
+ self.addCleanup(tree_b.unlock)
+ tree_a.commit(message="hello again")
+ log = StringIO()
+ merge_inner(tree_b.branch, tree_a, tree_b.basis_tree(),
+ this_tree=tree_b, ignore_zero=True)
+ self.assertTrue('All changes applied successfully.\n' not in
+ self.get_log())
+ tree_b.revert()
+ merge_inner(tree_b.branch, tree_a, tree_b.basis_tree(),
+ this_tree=tree_b, ignore_zero=False)
+ self.assertTrue('All changes applied successfully.\n' in self.get_log())
+
+ def test_merge_inner_conflicts(self):
+ tree_a = self.make_branch_and_tree('a')
+ tree_a.set_conflicts(ConflictList([TextConflict('patha')]))
+ merge_inner(tree_a.branch, tree_a, tree_a, this_tree=tree_a)
+ self.assertEqual(1, len(tree_a.conflicts()))
+
+ def test_rmdir_conflict(self):
+ tree_a = self.make_branch_and_tree('a')
+ self.build_tree(['a/b/'])
+ tree_a.add('b', 'b-id')
+ tree_a.commit('added b')
+ # basis_tree() is only guaranteed to be valid as long as it is actually
+ # the basis tree. This mutates the tree after grabbing basis, so go to
+ # the repository.
+ base_tree = tree_a.branch.repository.revision_tree(tree_a.last_revision())
+ tree_z = tree_a.bzrdir.sprout('z').open_workingtree()
+ self.build_tree(['a/b/c'])
+ tree_a.add('b/c')
+ tree_a.commit('added c')
+ os.rmdir('z/b')
+ tree_z.commit('removed b')
+ merge_inner(tree_z.branch, tree_a, base_tree, this_tree=tree_z)
+ self.assertEqual([
+ conflicts.MissingParent('Created directory', 'b', 'b-id'),
+ conflicts.UnversionedParent('Versioned directory', 'b', 'b-id')],
+ tree_z.conflicts())
+ merge_inner(tree_a.branch, tree_z.basis_tree(), base_tree,
+ this_tree=tree_a)
+ self.assertEqual([
+ conflicts.DeletingParent('Not deleting', 'b', 'b-id'),
+ conflicts.UnversionedParent('Versioned directory', 'b', 'b-id')],
+ tree_a.conflicts())
+
+ def test_nested_merge(self):
+ tree = self.make_branch_and_tree('tree',
+ format='development-subtree')
+ sub_tree = self.make_branch_and_tree('tree/sub-tree',
+ format='development-subtree')
+ sub_tree.set_root_id('sub-tree-root')
+ self.build_tree_contents([('tree/sub-tree/file', 'text1')])
+ sub_tree.add('file')
+ sub_tree.commit('foo')
+ tree.add_reference(sub_tree)
+ tree.commit('set text to 1')
+ tree2 = tree.bzrdir.sprout('tree2').open_workingtree()
+ # modify the file in the subtree
+ self.build_tree_contents([('tree2/sub-tree/file', 'text2')])
+ # and merge the changes from the diverged subtree into the containing
+ # tree
+ tree2.commit('changed file text')
+ tree.merge_from_branch(tree2.branch)
+ self.assertFileEqual('text2', 'tree/sub-tree/file')
+
+ def test_merge_with_missing(self):
+ tree_a = self.make_branch_and_tree('tree_a')
+ self.build_tree_contents([('tree_a/file', 'content_1')])
+ tree_a.add('file')
+ tree_a.commit('commit base')
+ # basis_tree() is only guaranteed to be valid as long as it is actually
+ # the basis tree. This test commits to the tree after grabbing basis,
+ # so we go to the repository.
+ base_tree = tree_a.branch.repository.revision_tree(tree_a.last_revision())
+ tree_b = tree_a.bzrdir.sprout('tree_b').open_workingtree()
+ self.build_tree_contents([('tree_a/file', 'content_2')])
+ tree_a.commit('commit other')
+ other_tree = tree_a.basis_tree()
+ # 'file' is now missing but isn't altered in any commit in b so no
+ # change should be applied.
+ os.unlink('tree_b/file')
+ merge_inner(tree_b.branch, other_tree, base_tree, this_tree=tree_b)
+
+ def test_merge_kind_change(self):
+ tree_a = self.make_branch_and_tree('tree_a')
+ self.build_tree_contents([('tree_a/file', 'content_1')])
+ tree_a.add('file', 'file-id')
+ tree_a.commit('added file')
+ tree_b = tree_a.bzrdir.sprout('tree_b').open_workingtree()
+ os.unlink('tree_a/file')
+ self.build_tree(['tree_a/file/'])
+ tree_a.commit('changed file to directory')
+ tree_b.merge_from_branch(tree_a.branch)
+ self.assertEqual('directory', file_kind('tree_b/file'))
+ tree_b.revert()
+ self.assertEqual('file', file_kind('tree_b/file'))
+ self.build_tree_contents([('tree_b/file', 'content_2')])
+ tree_b.commit('content change')
+ tree_b.merge_from_branch(tree_a.branch)
+ self.assertEqual(tree_b.conflicts(),
+ [conflicts.ContentsConflict('file',
+ file_id='file-id')])
+
+ def test_merge_type_registry(self):
+ merge_type_option = option.Option.OPTIONS['merge-type']
+ self.assertFalse('merge4' in [x[0] for x in
+ merge_type_option.iter_switches()])
+ registry = _mod_merge.get_merge_type_registry()
+ registry.register_lazy('merge4', 'bzrlib.merge', 'Merge4Merger',
+ 'time-travelling merge')
+ self.assertTrue('merge4' in [x[0] for x in
+ merge_type_option.iter_switches()])
+ registry.remove('merge4')
+ self.assertFalse('merge4' in [x[0] for x in
+ merge_type_option.iter_switches()])
+
+ def test_merge_other_moves_we_deleted(self):
+ tree_a = self.make_branch_and_tree('A')
+ tree_a.lock_write()
+ self.addCleanup(tree_a.unlock)
+ self.build_tree(['A/a'])
+ tree_a.add('a')
+ tree_a.commit('1', rev_id='rev-1')
+ tree_a.flush()
+ tree_a.rename_one('a', 'b')
+ tree_a.commit('2')
+ bzrdir_b = tree_a.bzrdir.sprout('B', revision_id='rev-1')
+ tree_b = bzrdir_b.open_workingtree()
+ tree_b.lock_write()
+ self.addCleanup(tree_b.unlock)
+ os.unlink('B/a')
+ tree_b.commit('3')
+ try:
+ tree_b.merge_from_branch(tree_a.branch)
+ except AttributeError:
+ self.fail('tried to join a path when name was None')
+
+ def test_merge_uncommitted_otherbasis_ancestor_of_thisbasis(self):
+ tree_a = self.make_branch_and_tree('a')
+ self.build_tree(['a/file_1', 'a/file_2'])
+ tree_a.add(['file_1'])
+ tree_a.commit('commit 1')
+ tree_a.add(['file_2'])
+ tree_a.commit('commit 2')
+ tree_b = tree_a.bzrdir.sprout('b').open_workingtree()
+ tree_b.rename_one('file_1', 'renamed')
+ merger = _mod_merge.Merger.from_uncommitted(tree_a, tree_b)
+ merger.merge_type = _mod_merge.Merge3Merger
+ merger.do_merge()
+ self.assertEqual(tree_a.get_parent_ids(), [tree_b.last_revision()])
+
+ def test_merge_uncommitted_otherbasis_ancestor_of_thisbasis_weave(self):
+ tree_a = self.make_branch_and_tree('a')
+ self.build_tree(['a/file_1', 'a/file_2'])
+ tree_a.add(['file_1'])
+ tree_a.commit('commit 1')
+ tree_a.add(['file_2'])
+ tree_a.commit('commit 2')
+ tree_b = tree_a.bzrdir.sprout('b').open_workingtree()
+ tree_b.rename_one('file_1', 'renamed')
+ merger = _mod_merge.Merger.from_uncommitted(tree_a, tree_b)
+ merger.merge_type = _mod_merge.WeaveMerger
+ merger.do_merge()
+ self.assertEqual(tree_a.get_parent_ids(), [tree_b.last_revision()])
+
+ def prepare_cherrypick(self):
+ """Prepare a pair of trees for cherrypicking tests.
+
+ Both trees have a file, 'file'.
+ rev1 sets content to 'a'.
+ rev2b adds 'b'.
+ rev3b adds 'c'.
+ A full merge of rev2b and rev3b into this_tree would add both 'b' and
+ 'c'. A successful cherrypick of rev2b-rev3b into this_tree will add
+ 'c', but not 'b'.
+ """
+ this_tree = self.make_branch_and_tree('this')
+ self.build_tree_contents([('this/file', "a\n")])
+ this_tree.add('file')
+ this_tree.commit('rev1')
+ other_tree = this_tree.bzrdir.sprout('other').open_workingtree()
+ self.build_tree_contents([('other/file', "a\nb\n")])
+ other_tree.commit('rev2b', rev_id='rev2b')
+ self.build_tree_contents([('other/file', "c\na\nb\n")])
+ other_tree.commit('rev3b', rev_id='rev3b')
+ this_tree.lock_write()
+ self.addCleanup(this_tree.unlock)
+ return this_tree, other_tree
+
+ def test_weave_cherrypick(self):
+ this_tree, other_tree = self.prepare_cherrypick()
+ merger = _mod_merge.Merger.from_revision_ids(None,
+ this_tree, 'rev3b', 'rev2b', other_tree.branch)
+ merger.merge_type = _mod_merge.WeaveMerger
+ merger.do_merge()
+ self.assertFileEqual('c\na\n', 'this/file')
+
+ def test_weave_cannot_reverse_cherrypick(self):
+ this_tree, other_tree = self.prepare_cherrypick()
+ merger = _mod_merge.Merger.from_revision_ids(None,
+ this_tree, 'rev2b', 'rev3b', other_tree.branch)
+ merger.merge_type = _mod_merge.WeaveMerger
+ self.assertRaises(errors.CannotReverseCherrypick, merger.do_merge)
+
+ def test_merge3_can_reverse_cherrypick(self):
+ this_tree, other_tree = self.prepare_cherrypick()
+ merger = _mod_merge.Merger.from_revision_ids(None,
+ this_tree, 'rev2b', 'rev3b', other_tree.branch)
+ merger.merge_type = _mod_merge.Merge3Merger
+ merger.do_merge()
+
+ def test_merge3_will_detect_cherrypick(self):
+ this_tree = self.make_branch_and_tree('this')
+ self.build_tree_contents([('this/file', "a\n")])
+ this_tree.add('file')
+ this_tree.commit('rev1')
+ other_tree = this_tree.bzrdir.sprout('other').open_workingtree()
+ self.build_tree_contents([('other/file', "a\nb\n")])
+ other_tree.commit('rev2b', rev_id='rev2b')
+ self.build_tree_contents([('other/file', "a\nb\nc\n")])
+ other_tree.commit('rev3b', rev_id='rev3b')
+ this_tree.lock_write()
+ self.addCleanup(this_tree.unlock)
+
+ merger = _mod_merge.Merger.from_revision_ids(None,
+ this_tree, 'rev3b', 'rev2b', other_tree.branch)
+ merger.merge_type = _mod_merge.Merge3Merger
+ merger.do_merge()
+ self.assertFileEqual('a\n'
+ '<<<<<<< TREE\n'
+ '=======\n'
+ 'c\n'
+ '>>>>>>> MERGE-SOURCE\n',
+ 'this/file')
+
+ def test_merge_reverse_revision_range(self):
+ tree = self.make_branch_and_tree(".")
+ tree.lock_write()
+ self.addCleanup(tree.unlock)
+ self.build_tree(['a'])
+ tree.add('a')
+ first_rev = tree.commit("added a")
+ merger = _mod_merge.Merger.from_revision_ids(None, tree,
+ _mod_revision.NULL_REVISION,
+ first_rev)
+ merger.merge_type = _mod_merge.Merge3Merger
+ merger.interesting_files = 'a'
+ conflict_count = merger.do_merge()
+ self.assertEqual(0, conflict_count)
+
+ self.assertPathDoesNotExist("a")
+ tree.revert()
+ self.assertPathExists("a")
+
+ def test_make_merger(self):
+ this_tree = self.make_branch_and_tree('this')
+ this_tree.commit('rev1', rev_id='rev1')
+ other_tree = this_tree.bzrdir.sprout('other').open_workingtree()
+ this_tree.commit('rev2', rev_id='rev2a')
+ other_tree.commit('rev2', rev_id='rev2b')
+ this_tree.lock_write()
+ self.addCleanup(this_tree.unlock)
+ merger = _mod_merge.Merger.from_revision_ids(None,
+ this_tree, 'rev2b', other_branch=other_tree.branch)
+ merger.merge_type = _mod_merge.Merge3Merger
+ tree_merger = merger.make_merger()
+ self.assertIs(_mod_merge.Merge3Merger, tree_merger.__class__)
+ self.assertEqual('rev2b',
+ tree_merger.other_tree.get_revision_id())
+ self.assertEqual('rev1',
+ tree_merger.base_tree.get_revision_id())
+ self.assertEqual(other_tree.branch, tree_merger.other_branch)
+
+ def test_make_preview_transform(self):
+ this_tree = self.make_branch_and_tree('this')
+ self.build_tree_contents([('this/file', '1\n')])
+ this_tree.add('file', 'file-id')
+ this_tree.commit('rev1', rev_id='rev1')
+ other_tree = this_tree.bzrdir.sprout('other').open_workingtree()
+ self.build_tree_contents([('this/file', '1\n2a\n')])
+ this_tree.commit('rev2', rev_id='rev2a')
+ self.build_tree_contents([('other/file', '2b\n1\n')])
+ other_tree.commit('rev2', rev_id='rev2b')
+ this_tree.lock_write()
+ self.addCleanup(this_tree.unlock)
+ merger = _mod_merge.Merger.from_revision_ids(None,
+ this_tree, 'rev2b', other_branch=other_tree.branch)
+ merger.merge_type = _mod_merge.Merge3Merger
+ tree_merger = merger.make_merger()
+ tt = tree_merger.make_preview_transform()
+ self.addCleanup(tt.finalize)
+ preview_tree = tt.get_preview_tree()
+ tree_file = this_tree.get_file('file-id')
+ try:
+ self.assertEqual('1\n2a\n', tree_file.read())
+ finally:
+ tree_file.close()
+ preview_file = preview_tree.get_file('file-id')
+ try:
+ self.assertEqual('2b\n1\n2a\n', preview_file.read())
+ finally:
+ preview_file.close()
+
+ def test_do_merge(self):
+ this_tree = self.make_branch_and_tree('this')
+ self.build_tree_contents([('this/file', '1\n')])
+ this_tree.add('file', 'file-id')
+ this_tree.commit('rev1', rev_id='rev1')
+ other_tree = this_tree.bzrdir.sprout('other').open_workingtree()
+ self.build_tree_contents([('this/file', '1\n2a\n')])
+ this_tree.commit('rev2', rev_id='rev2a')
+ self.build_tree_contents([('other/file', '2b\n1\n')])
+ other_tree.commit('rev2', rev_id='rev2b')
+ this_tree.lock_write()
+ self.addCleanup(this_tree.unlock)
+ merger = _mod_merge.Merger.from_revision_ids(None,
+ this_tree, 'rev2b', other_branch=other_tree.branch)
+ merger.merge_type = _mod_merge.Merge3Merger
+ tree_merger = merger.make_merger()
+ tt = tree_merger.do_merge()
+ tree_file = this_tree.get_file('file-id')
+ try:
+ self.assertEqual('2b\n1\n2a\n', tree_file.read())
+ finally:
+ tree_file.close()
+
+ def test_merge_require_tree_root(self):
+ tree = self.make_branch_and_tree(".")
+ tree.lock_write()
+ self.addCleanup(tree.unlock)
+ self.build_tree(['a'])
+ tree.add('a')
+ first_rev = tree.commit("added a")
+ old_root_id = tree.get_root_id()
+ merger = _mod_merge.Merger.from_revision_ids(None, tree,
+ _mod_revision.NULL_REVISION,
+ first_rev)
+ merger.merge_type = _mod_merge.Merge3Merger
+ conflict_count = merger.do_merge()
+ self.assertEqual(0, conflict_count)
+ self.assertEquals(set([old_root_id]), tree.all_file_ids())
+ tree.set_parent_ids([])
+
+ def test_merge_add_into_deleted_root(self):
+ # Yes, people actually do this. And report bugs if it breaks.
+ source = self.make_branch_and_tree('source', format='rich-root-pack')
+ self.build_tree(['source/foo/'])
+ source.add('foo', 'foo-id')
+ source.commit('Add foo')
+ target = source.bzrdir.sprout('target').open_workingtree()
+ subtree = target.extract('foo-id')
+ subtree.commit('Delete root')
+ self.build_tree(['source/bar'])
+ source.add('bar', 'bar-id')
+ source.commit('Add bar')
+ subtree.merge_from_branch(source.branch)
+
+ def test_merge_joined_branch(self):
+ source = self.make_branch_and_tree('source', format='rich-root-pack')
+ self.build_tree(['source/foo'])
+ source.add('foo')
+ source.commit('Add foo')
+ target = self.make_branch_and_tree('target', format='rich-root-pack')
+ self.build_tree(['target/bla'])
+ target.add('bla')
+ target.commit('Add bla')
+ nested = source.bzrdir.sprout('target/subtree').open_workingtree()
+ target.subsume(nested)
+ target.commit('Join nested')
+ self.build_tree(['source/bar'])
+ source.add('bar')
+ source.commit('Add bar')
+ target.merge_from_branch(source.branch)
+ target.commit('Merge source')
+
+
+class TestPlanMerge(TestCaseWithMemoryTransport):
+
+ def setUp(self):
+ TestCaseWithMemoryTransport.setUp(self)
+ mapper = versionedfile.PrefixMapper()
+ factory = knit.make_file_factory(True, mapper)
+ self.vf = factory(self.get_transport())
+ self.plan_merge_vf = versionedfile._PlanMergeVersionedFile('root')
+ self.plan_merge_vf.fallback_versionedfiles.append(self.vf)
+
+ def add_version(self, key, parents, text):
+ self.vf.add_lines(key, parents, [c+'\n' for c in text])
+
+ def add_rev(self, prefix, revision_id, parents, text):
+ self.add_version((prefix, revision_id), [(prefix, p) for p in parents],
+ text)
+
+ def add_uncommitted_version(self, key, parents, text):
+ self.plan_merge_vf.add_lines(key, parents,
+ [c+'\n' for c in text])
+
+ def setup_plan_merge(self):
+ self.add_rev('root', 'A', [], 'abc')
+ self.add_rev('root', 'B', ['A'], 'acehg')
+ self.add_rev('root', 'C', ['A'], 'fabg')
+ return _PlanMerge('B', 'C', self.plan_merge_vf, ('root',))
+
+ def setup_plan_merge_uncommitted(self):
+ self.add_version(('root', 'A'), [], 'abc')
+ self.add_uncommitted_version(('root', 'B:'), [('root', 'A')], 'acehg')
+ self.add_uncommitted_version(('root', 'C:'), [('root', 'A')], 'fabg')
+ return _PlanMerge('B:', 'C:', self.plan_merge_vf, ('root',))
+
+ def test_base_from_plan(self):
+ self.setup_plan_merge()
+ plan = self.plan_merge_vf.plan_merge('B', 'C')
+ pwm = versionedfile.PlanWeaveMerge(plan)
+ self.assertEqual(['a\n', 'b\n', 'c\n'], pwm.base_from_plan())
+
+ def test_unique_lines(self):
+ plan = self.setup_plan_merge()
+ self.assertEqual(plan._unique_lines(
+ plan._get_matching_blocks('B', 'C')),
+ ([1, 2, 3], [0, 2]))
+
+ def test_plan_merge(self):
+ self.setup_plan_merge()
+ plan = self.plan_merge_vf.plan_merge('B', 'C')
+ self.assertEqual([
+ ('new-b', 'f\n'),
+ ('unchanged', 'a\n'),
+ ('killed-a', 'b\n'),
+ ('killed-b', 'c\n'),
+ ('new-a', 'e\n'),
+ ('new-a', 'h\n'),
+ ('new-a', 'g\n'),
+ ('new-b', 'g\n')],
+ list(plan))
+
+ def test_plan_merge_cherrypick(self):
+ self.add_rev('root', 'A', [], 'abc')
+ self.add_rev('root', 'B', ['A'], 'abcde')
+ self.add_rev('root', 'C', ['A'], 'abcefg')
+ self.add_rev('root', 'D', ['A', 'B', 'C'], 'abcdegh')
+ my_plan = _PlanMerge('B', 'D', self.plan_merge_vf, ('root',))
+ # We shortcut when one text supersedes the other in the per-file graph.
+ # We don't actually need to compare the texts at this point.
+ self.assertEqual([
+ ('new-b', 'a\n'),
+ ('new-b', 'b\n'),
+ ('new-b', 'c\n'),
+ ('new-b', 'd\n'),
+ ('new-b', 'e\n'),
+ ('new-b', 'g\n'),
+ ('new-b', 'h\n')],
+ list(my_plan.plan_merge()))
+
+ def test_plan_merge_no_common_ancestor(self):
+ self.add_rev('root', 'A', [], 'abc')
+ self.add_rev('root', 'B', [], 'xyz')
+ my_plan = _PlanMerge('A', 'B', self.plan_merge_vf, ('root',))
+ self.assertEqual([
+ ('new-a', 'a\n'),
+ ('new-a', 'b\n'),
+ ('new-a', 'c\n'),
+ ('new-b', 'x\n'),
+ ('new-b', 'y\n'),
+ ('new-b', 'z\n')],
+ list(my_plan.plan_merge()))
+
+ def test_plan_merge_tail_ancestors(self):
+ # The graph looks like this:
+ # A # Common to all ancestors
+ # / \
+ # B C # Ancestors of E, only common to one side
+ # |\ /|
+ # D E F # D, F are unique to G, H respectively
+ # |/ \| # E is the LCA for G & H, and the unique LCA for
+ # G H # I, J
+ # |\ /|
+ # | X |
+ # |/ \|
+ # I J # criss-cross merge of G, H
+ #
+ # In this situation, a simple pruning of ancestors of E will leave D &
+ # F "dangling", which looks like they introduce lines different from
+ # the ones in E, but in actuality C&B introduced the lines, and they
+ # are already present in E
+
+ # Introduce the base text
+ self.add_rev('root', 'A', [], 'abc')
+ # Introduces a new line B
+ self.add_rev('root', 'B', ['A'], 'aBbc')
+ # Introduces a new line C
+ self.add_rev('root', 'C', ['A'], 'abCc')
+ # Introduce new line D
+ self.add_rev('root', 'D', ['B'], 'DaBbc')
+ # Merges B and C by just incorporating both
+ self.add_rev('root', 'E', ['B', 'C'], 'aBbCc')
+ # Introduce new line F
+ self.add_rev('root', 'F', ['C'], 'abCcF')
+ # Merge D & E by just combining the texts
+ self.add_rev('root', 'G', ['D', 'E'], 'DaBbCc')
+ # Merge F & E by just combining the texts
+ self.add_rev('root', 'H', ['F', 'E'], 'aBbCcF')
+ # Merge G & H by just combining texts
+ self.add_rev('root', 'I', ['G', 'H'], 'DaBbCcF')
+ # Merge G & H but supersede an old line in B
+ self.add_rev('root', 'J', ['H', 'G'], 'DaJbCcF')
+ plan = self.plan_merge_vf.plan_merge('I', 'J')
+ self.assertEqual([
+ ('unchanged', 'D\n'),
+ ('unchanged', 'a\n'),
+ ('killed-b', 'B\n'),
+ ('new-b', 'J\n'),
+ ('unchanged', 'b\n'),
+ ('unchanged', 'C\n'),
+ ('unchanged', 'c\n'),
+ ('unchanged', 'F\n')],
+ list(plan))
+
+ def test_plan_merge_tail_triple_ancestors(self):
+ # The graph looks like this:
+ # A # Common to all ancestors
+ # / \
+ # B C # Ancestors of E, only common to one side
+ # |\ /|
+ # D E F # D, F are unique to G, H respectively
+ # |/|\| # E is the LCA for G & H, and the unique LCA for
+ # G Q H # I, J
+ # |\ /| # Q is just an extra node which is merged into both
+ # | X | # I and J
+ # |/ \|
+ # I J # criss-cross merge of G, H
+ #
+ # This is the same as the test_plan_merge_tail_ancestors, except we add
+ # a third LCA that doesn't add new lines, but will trigger our more
+ # involved ancestry logic
+
+ self.add_rev('root', 'A', [], 'abc')
+ self.add_rev('root', 'B', ['A'], 'aBbc')
+ self.add_rev('root', 'C', ['A'], 'abCc')
+ self.add_rev('root', 'D', ['B'], 'DaBbc')
+ self.add_rev('root', 'E', ['B', 'C'], 'aBbCc')
+ self.add_rev('root', 'F', ['C'], 'abCcF')
+ self.add_rev('root', 'G', ['D', 'E'], 'DaBbCc')
+ self.add_rev('root', 'H', ['F', 'E'], 'aBbCcF')
+ self.add_rev('root', 'Q', ['E'], 'aBbCc')
+ self.add_rev('root', 'I', ['G', 'Q', 'H'], 'DaBbCcF')
+ # Merge G & H but supersede an old line in B
+ self.add_rev('root', 'J', ['H', 'Q', 'G'], 'DaJbCcF')
+ plan = self.plan_merge_vf.plan_merge('I', 'J')
+ self.assertEqual([
+ ('unchanged', 'D\n'),
+ ('unchanged', 'a\n'),
+ ('killed-b', 'B\n'),
+ ('new-b', 'J\n'),
+ ('unchanged', 'b\n'),
+ ('unchanged', 'C\n'),
+ ('unchanged', 'c\n'),
+ ('unchanged', 'F\n')],
+ list(plan))
+
+ def test_plan_merge_2_tail_triple_ancestors(self):
+ # The graph looks like this:
+ # A B # 2 tails going back to NULL
+ # |\ /|
+ # D E F # D, is unique to G, F to H
+ # |/|\| # E is the LCA for G & H, and the unique LCA for
+ # G Q H # I, J
+ # |\ /| # Q is just an extra node which is merged into both
+ # | X | # I and J
+ # |/ \|
+ # I J # criss-cross merge of G, H (and Q)
+ #
+
+ # This is meant to test after hitting a 3-way LCA, and multiple tail
+ # ancestors (only have NULL_REVISION in common)
+
+ self.add_rev('root', 'A', [], 'abc')
+ self.add_rev('root', 'B', [], 'def')
+ self.add_rev('root', 'D', ['A'], 'Dabc')
+ self.add_rev('root', 'E', ['A', 'B'], 'abcdef')
+ self.add_rev('root', 'F', ['B'], 'defF')
+ self.add_rev('root', 'G', ['D', 'E'], 'Dabcdef')
+ self.add_rev('root', 'H', ['F', 'E'], 'abcdefF')
+ self.add_rev('root', 'Q', ['E'], 'abcdef')
+ self.add_rev('root', 'I', ['G', 'Q', 'H'], 'DabcdefF')
+ # Merge G & H but supersede an old line in B
+ self.add_rev('root', 'J', ['H', 'Q', 'G'], 'DabcdJfF')
+ plan = self.plan_merge_vf.plan_merge('I', 'J')
+ self.assertEqual([
+ ('unchanged', 'D\n'),
+ ('unchanged', 'a\n'),
+ ('unchanged', 'b\n'),
+ ('unchanged', 'c\n'),
+ ('unchanged', 'd\n'),
+ ('killed-b', 'e\n'),
+ ('new-b', 'J\n'),
+ ('unchanged', 'f\n'),
+ ('unchanged', 'F\n')],
+ list(plan))
+
+ def test_plan_merge_uncommitted_files(self):
+ self.setup_plan_merge_uncommitted()
+ plan = self.plan_merge_vf.plan_merge('B:', 'C:')
+ self.assertEqual([
+ ('new-b', 'f\n'),
+ ('unchanged', 'a\n'),
+ ('killed-a', 'b\n'),
+ ('killed-b', 'c\n'),
+ ('new-a', 'e\n'),
+ ('new-a', 'h\n'),
+ ('new-a', 'g\n'),
+ ('new-b', 'g\n')],
+ list(plan))
+
+ def test_plan_merge_insert_order(self):
+ """Weave merges are sensitive to the order of insertion.
+
+ Specifically for overlapping regions, it effects which region gets put
+ 'first'. And when a user resolves an overlapping merge, if they use the
+ same ordering, then the lines match the parents, if they don't only
+ *some* of the lines match.
+ """
+ self.add_rev('root', 'A', [], 'abcdef')
+ self.add_rev('root', 'B', ['A'], 'abwxcdef')
+ self.add_rev('root', 'C', ['A'], 'abyzcdef')
+ # Merge, and resolve the conflict by adding *both* sets of lines
+ # If we get the ordering wrong, these will look like new lines in D,
+ # rather than carried over from B, C
+ self.add_rev('root', 'D', ['B', 'C'],
+ 'abwxyzcdef')
+ # Supersede the lines in B and delete the lines in C, which will
+ # conflict if they are treated as being in D
+ self.add_rev('root', 'E', ['C', 'B'],
+ 'abnocdef')
+ # Same thing for the lines in C
+ self.add_rev('root', 'F', ['C'], 'abpqcdef')
+ plan = self.plan_merge_vf.plan_merge('D', 'E')
+ self.assertEqual([
+ ('unchanged', 'a\n'),
+ ('unchanged', 'b\n'),
+ ('killed-b', 'w\n'),
+ ('killed-b', 'x\n'),
+ ('killed-b', 'y\n'),
+ ('killed-b', 'z\n'),
+ ('new-b', 'n\n'),
+ ('new-b', 'o\n'),
+ ('unchanged', 'c\n'),
+ ('unchanged', 'd\n'),
+ ('unchanged', 'e\n'),
+ ('unchanged', 'f\n')],
+ list(plan))
+ plan = self.plan_merge_vf.plan_merge('E', 'D')
+ # Going in the opposite direction shows the effect of the opposite plan
+ self.assertEqual([
+ ('unchanged', 'a\n'),
+ ('unchanged', 'b\n'),
+ ('new-b', 'w\n'),
+ ('new-b', 'x\n'),
+ ('killed-a', 'y\n'),
+ ('killed-a', 'z\n'),
+ ('killed-both', 'w\n'),
+ ('killed-both', 'x\n'),
+ ('new-a', 'n\n'),
+ ('new-a', 'o\n'),
+ ('unchanged', 'c\n'),
+ ('unchanged', 'd\n'),
+ ('unchanged', 'e\n'),
+ ('unchanged', 'f\n')],
+ list(plan))
+
+ def test_plan_merge_criss_cross(self):
+ # This is specificly trying to trigger problems when using limited
+ # ancestry and weaves. The ancestry graph looks like:
+ # XX unused ancestor, should not show up in the weave
+ # |
+ # A Unique LCA
+ # |\
+ # B \ Introduces a line 'foo'
+ # / \ \
+ # C D E C & D both have 'foo', E has different changes
+ # |\ /| |
+ # | X | |
+ # |/ \|/
+ # F G All of C, D, E are merged into F and G, so they are
+ # all common ancestors.
+ #
+ # The specific issue with weaves:
+ # B introduced a text ('foo') that is present in both C and D.
+ # If we do not include B (because it isn't an ancestor of E), then
+ # the A=>C and A=>D look like both sides independently introduce the
+ # text ('foo'). If F does not modify the text, it would still appear
+ # to have deleted on of the versions from C or D. If G then modifies
+ # 'foo', it should appear as superseding the value in F (since it
+ # came from B), rather than conflict because of the resolution during
+ # C & D.
+ self.add_rev('root', 'XX', [], 'qrs')
+ self.add_rev('root', 'A', ['XX'], 'abcdef')
+ self.add_rev('root', 'B', ['A'], 'axcdef')
+ self.add_rev('root', 'C', ['B'], 'axcdefg')
+ self.add_rev('root', 'D', ['B'], 'haxcdef')
+ self.add_rev('root', 'E', ['A'], 'abcdyf')
+ # Simple combining of all texts
+ self.add_rev('root', 'F', ['C', 'D', 'E'], 'haxcdyfg')
+ # combine and supersede 'x'
+ self.add_rev('root', 'G', ['C', 'D', 'E'], 'hazcdyfg')
+ plan = self.plan_merge_vf.plan_merge('F', 'G')
+ self.assertEqual([
+ ('unchanged', 'h\n'),
+ ('unchanged', 'a\n'),
+ ('killed-base', 'b\n'),
+ ('killed-b', 'x\n'),
+ ('new-b', 'z\n'),
+ ('unchanged', 'c\n'),
+ ('unchanged', 'd\n'),
+ ('killed-base', 'e\n'),
+ ('unchanged', 'y\n'),
+ ('unchanged', 'f\n'),
+ ('unchanged', 'g\n')],
+ list(plan))
+ plan = self.plan_merge_vf.plan_lca_merge('F', 'G')
+ # This is one of the main differences between plan_merge and
+ # plan_lca_merge. plan_lca_merge generates a conflict for 'x => z',
+ # because 'x' was not present in one of the bases. However, in this
+ # case it is spurious because 'x' does not exist in the global base A.
+ self.assertEqual([
+ ('unchanged', 'h\n'),
+ ('unchanged', 'a\n'),
+ ('conflicted-a', 'x\n'),
+ ('new-b', 'z\n'),
+ ('unchanged', 'c\n'),
+ ('unchanged', 'd\n'),
+ ('unchanged', 'y\n'),
+ ('unchanged', 'f\n'),
+ ('unchanged', 'g\n')],
+ list(plan))
+
+ def test_criss_cross_flip_flop(self):
+ # This is specificly trying to trigger problems when using limited
+ # ancestry and weaves. The ancestry graph looks like:
+ # XX unused ancestor, should not show up in the weave
+ # |
+ # A Unique LCA
+ # / \
+ # B C B & C both introduce a new line
+ # |\ /|
+ # | X |
+ # |/ \|
+ # D E B & C are both merged, so both are common ancestors
+ # In the process of merging, both sides order the new
+ # lines differently
+ #
+ self.add_rev('root', 'XX', [], 'qrs')
+ self.add_rev('root', 'A', ['XX'], 'abcdef')
+ self.add_rev('root', 'B', ['A'], 'abcdgef')
+ self.add_rev('root', 'C', ['A'], 'abcdhef')
+ self.add_rev('root', 'D', ['B', 'C'], 'abcdghef')
+ self.add_rev('root', 'E', ['C', 'B'], 'abcdhgef')
+ plan = list(self.plan_merge_vf.plan_merge('D', 'E'))
+ self.assertEqual([
+ ('unchanged', 'a\n'),
+ ('unchanged', 'b\n'),
+ ('unchanged', 'c\n'),
+ ('unchanged', 'd\n'),
+ ('new-b', 'h\n'),
+ ('unchanged', 'g\n'),
+ ('killed-b', 'h\n'),
+ ('unchanged', 'e\n'),
+ ('unchanged', 'f\n'),
+ ], plan)
+ pwm = versionedfile.PlanWeaveMerge(plan)
+ self.assertEqualDiff('\n'.join('abcdghef') + '\n',
+ ''.join(pwm.base_from_plan()))
+ # Reversing the order reverses the merge plan, and final order of 'hg'
+ # => 'gh'
+ plan = list(self.plan_merge_vf.plan_merge('E', 'D'))
+ self.assertEqual([
+ ('unchanged', 'a\n'),
+ ('unchanged', 'b\n'),
+ ('unchanged', 'c\n'),
+ ('unchanged', 'd\n'),
+ ('new-b', 'g\n'),
+ ('unchanged', 'h\n'),
+ ('killed-b', 'g\n'),
+ ('unchanged', 'e\n'),
+ ('unchanged', 'f\n'),
+ ], plan)
+ pwm = versionedfile.PlanWeaveMerge(plan)
+ self.assertEqualDiff('\n'.join('abcdhgef') + '\n',
+ ''.join(pwm.base_from_plan()))
+ # This is where lca differs, in that it (fairly correctly) determines
+ # that there is a conflict because both sides resolved the merge
+ # differently
+ plan = list(self.plan_merge_vf.plan_lca_merge('D', 'E'))
+ self.assertEqual([
+ ('unchanged', 'a\n'),
+ ('unchanged', 'b\n'),
+ ('unchanged', 'c\n'),
+ ('unchanged', 'd\n'),
+ ('conflicted-b', 'h\n'),
+ ('unchanged', 'g\n'),
+ ('conflicted-a', 'h\n'),
+ ('unchanged', 'e\n'),
+ ('unchanged', 'f\n'),
+ ], plan)
+ pwm = versionedfile.PlanWeaveMerge(plan)
+ self.assertEqualDiff('\n'.join('abcdgef') + '\n',
+ ''.join(pwm.base_from_plan()))
+ # Reversing it changes what line is doubled, but still gives a
+ # double-conflict
+ plan = list(self.plan_merge_vf.plan_lca_merge('E', 'D'))
+ self.assertEqual([
+ ('unchanged', 'a\n'),
+ ('unchanged', 'b\n'),
+ ('unchanged', 'c\n'),
+ ('unchanged', 'd\n'),
+ ('conflicted-b', 'g\n'),
+ ('unchanged', 'h\n'),
+ ('conflicted-a', 'g\n'),
+ ('unchanged', 'e\n'),
+ ('unchanged', 'f\n'),
+ ], plan)
+ pwm = versionedfile.PlanWeaveMerge(plan)
+ self.assertEqualDiff('\n'.join('abcdhef') + '\n',
+ ''.join(pwm.base_from_plan()))
+
+ def assertRemoveExternalReferences(self, filtered_parent_map,
+ child_map, tails, parent_map):
+ """Assert results for _PlanMerge._remove_external_references."""
+ (act_filtered_parent_map, act_child_map,
+ act_tails) = _PlanMerge._remove_external_references(parent_map)
+
+ # The parent map *should* preserve ordering, but the ordering of
+ # children is not strictly defined
+ # child_map = dict((k, sorted(children))
+ # for k, children in child_map.iteritems())
+ # act_child_map = dict(k, sorted(children)
+ # for k, children in act_child_map.iteritems())
+ self.assertEqual(filtered_parent_map, act_filtered_parent_map)
+ self.assertEqual(child_map, act_child_map)
+ self.assertEqual(sorted(tails), sorted(act_tails))
+
+ def test__remove_external_references(self):
+ # First, nothing to remove
+ self.assertRemoveExternalReferences({3: [2], 2: [1], 1: []},
+ {1: [2], 2: [3], 3: []}, [1], {3: [2], 2: [1], 1: []})
+ # The reverse direction
+ self.assertRemoveExternalReferences({1: [2], 2: [3], 3: []},
+ {3: [2], 2: [1], 1: []}, [3], {1: [2], 2: [3], 3: []})
+ # Extra references
+ self.assertRemoveExternalReferences({3: [2], 2: [1], 1: []},
+ {1: [2], 2: [3], 3: []}, [1], {3: [2, 4], 2: [1, 5], 1: [6]})
+ # Multiple tails
+ self.assertRemoveExternalReferences(
+ {4: [2, 3], 3: [], 2: [1], 1: []},
+ {1: [2], 2: [4], 3: [4], 4: []},
+ [1, 3],
+ {4: [2, 3], 3: [5], 2: [1], 1: [6]})
+ # Multiple children
+ self.assertRemoveExternalReferences(
+ {1: [3], 2: [3, 4], 3: [], 4: []},
+ {1: [], 2: [], 3: [1, 2], 4: [2]},
+ [3, 4],
+ {1: [3], 2: [3, 4], 3: [5], 4: []})
+
+ def assertPruneTails(self, pruned_map, tails, parent_map):
+ child_map = {}
+ for key, parent_keys in parent_map.iteritems():
+ child_map.setdefault(key, [])
+ for pkey in parent_keys:
+ child_map.setdefault(pkey, []).append(key)
+ _PlanMerge._prune_tails(parent_map, child_map, tails)
+ self.assertEqual(pruned_map, parent_map)
+
+ def test__prune_tails(self):
+ # Nothing requested to prune
+ self.assertPruneTails({1: [], 2: [], 3: []}, [],
+ {1: [], 2: [], 3: []})
+ # Prune a single entry
+ self.assertPruneTails({1: [], 3: []}, [2],
+ {1: [], 2: [], 3: []})
+ # Prune a chain
+ self.assertPruneTails({1: []}, [3],
+ {1: [], 2: [3], 3: []})
+ # Prune a chain with a diamond
+ self.assertPruneTails({1: []}, [5],
+ {1: [], 2: [3, 4], 3: [5], 4: [5], 5: []})
+ # Prune a partial chain
+ self.assertPruneTails({1: [6], 6:[]}, [5],
+ {1: [2, 6], 2: [3, 4], 3: [5], 4: [5], 5: [],
+ 6: []})
+ # Prune a chain with multiple tips, that pulls out intermediates
+ self.assertPruneTails({1:[3], 3:[]}, [4, 5],
+ {1: [2, 3], 2: [4, 5], 3: [], 4:[], 5:[]})
+ self.assertPruneTails({1:[3], 3:[]}, [5, 4],
+ {1: [2, 3], 2: [4, 5], 3: [], 4:[], 5:[]})
+
+ def test_subtract_plans(self):
+ old_plan = [
+ ('unchanged', 'a\n'),
+ ('new-a', 'b\n'),
+ ('killed-a', 'c\n'),
+ ('new-b', 'd\n'),
+ ('new-b', 'e\n'),
+ ('killed-b', 'f\n'),
+ ('killed-b', 'g\n'),
+ ]
+ new_plan = [
+ ('unchanged', 'a\n'),
+ ('new-a', 'b\n'),
+ ('killed-a', 'c\n'),
+ ('new-b', 'd\n'),
+ ('new-b', 'h\n'),
+ ('killed-b', 'f\n'),
+ ('killed-b', 'i\n'),
+ ]
+ subtracted_plan = [
+ ('unchanged', 'a\n'),
+ ('new-a', 'b\n'),
+ ('killed-a', 'c\n'),
+ ('new-b', 'h\n'),
+ ('unchanged', 'f\n'),
+ ('killed-b', 'i\n'),
+ ]
+ self.assertEqual(subtracted_plan,
+ list(_PlanMerge._subtract_plans(old_plan, new_plan)))
+
+ def setup_merge_with_base(self):
+ self.add_rev('root', 'COMMON', [], 'abc')
+ self.add_rev('root', 'THIS', ['COMMON'], 'abcd')
+ self.add_rev('root', 'BASE', ['COMMON'], 'eabc')
+ self.add_rev('root', 'OTHER', ['BASE'], 'eafb')
+
+ def test_plan_merge_with_base(self):
+ self.setup_merge_with_base()
+ plan = self.plan_merge_vf.plan_merge('THIS', 'OTHER', 'BASE')
+ self.assertEqual([('unchanged', 'a\n'),
+ ('new-b', 'f\n'),
+ ('unchanged', 'b\n'),
+ ('killed-b', 'c\n'),
+ ('new-a', 'd\n')
+ ], list(plan))
+
+ def test_plan_lca_merge(self):
+ self.setup_plan_merge()
+ plan = self.plan_merge_vf.plan_lca_merge('B', 'C')
+ self.assertEqual([
+ ('new-b', 'f\n'),
+ ('unchanged', 'a\n'),
+ ('killed-b', 'c\n'),
+ ('new-a', 'e\n'),
+ ('new-a', 'h\n'),
+ ('killed-a', 'b\n'),
+ ('unchanged', 'g\n')],
+ list(plan))
+
+ def test_plan_lca_merge_uncommitted_files(self):
+ self.setup_plan_merge_uncommitted()
+ plan = self.plan_merge_vf.plan_lca_merge('B:', 'C:')
+ self.assertEqual([
+ ('new-b', 'f\n'),
+ ('unchanged', 'a\n'),
+ ('killed-b', 'c\n'),
+ ('new-a', 'e\n'),
+ ('new-a', 'h\n'),
+ ('killed-a', 'b\n'),
+ ('unchanged', 'g\n')],
+ list(plan))
+
+ def test_plan_lca_merge_with_base(self):
+ self.setup_merge_with_base()
+ plan = self.plan_merge_vf.plan_lca_merge('THIS', 'OTHER', 'BASE')
+ self.assertEqual([('unchanged', 'a\n'),
+ ('new-b', 'f\n'),
+ ('unchanged', 'b\n'),
+ ('killed-b', 'c\n'),
+ ('new-a', 'd\n')
+ ], list(plan))
+
+ def test_plan_lca_merge_with_criss_cross(self):
+ self.add_version(('root', 'ROOT'), [], 'abc')
+ # each side makes a change
+ self.add_version(('root', 'REV1'), [('root', 'ROOT')], 'abcd')
+ self.add_version(('root', 'REV2'), [('root', 'ROOT')], 'abce')
+ # both sides merge, discarding others' changes
+ self.add_version(('root', 'LCA1'),
+ [('root', 'REV1'), ('root', 'REV2')], 'abcd')
+ self.add_version(('root', 'LCA2'),
+ [('root', 'REV1'), ('root', 'REV2')], 'fabce')
+ plan = self.plan_merge_vf.plan_lca_merge('LCA1', 'LCA2')
+ self.assertEqual([('new-b', 'f\n'),
+ ('unchanged', 'a\n'),
+ ('unchanged', 'b\n'),
+ ('unchanged', 'c\n'),
+ ('conflicted-a', 'd\n'),
+ ('conflicted-b', 'e\n'),
+ ], list(plan))
+
+ def test_plan_lca_merge_with_null(self):
+ self.add_version(('root', 'A'), [], 'ab')
+ self.add_version(('root', 'B'), [], 'bc')
+ plan = self.plan_merge_vf.plan_lca_merge('A', 'B')
+ self.assertEqual([('new-a', 'a\n'),
+ ('unchanged', 'b\n'),
+ ('new-b', 'c\n'),
+ ], list(plan))
+
+ def test_plan_merge_with_delete_and_change(self):
+ self.add_rev('root', 'C', [], 'a')
+ self.add_rev('root', 'A', ['C'], 'b')
+ self.add_rev('root', 'B', ['C'], '')
+ plan = self.plan_merge_vf.plan_merge('A', 'B')
+ self.assertEqual([('killed-both', 'a\n'),
+ ('new-a', 'b\n'),
+ ], list(plan))
+
+ def test_plan_merge_with_move_and_change(self):
+ self.add_rev('root', 'C', [], 'abcd')
+ self.add_rev('root', 'A', ['C'], 'acbd')
+ self.add_rev('root', 'B', ['C'], 'aBcd')
+ plan = self.plan_merge_vf.plan_merge('A', 'B')
+ self.assertEqual([('unchanged', 'a\n'),
+ ('new-a', 'c\n'),
+ ('killed-b', 'b\n'),
+ ('new-b', 'B\n'),
+ ('killed-a', 'c\n'),
+ ('unchanged', 'd\n'),
+ ], list(plan))
+
+
+class LoggingMerger(object):
+ # These seem to be the required attributes
+ requires_base = False
+ supports_reprocess = False
+ supports_show_base = False
+ supports_cherrypick = False
+ # We intentionally do not define supports_lca_trees
+
+ def __init__(self, *args, **kwargs):
+ self.args = args
+ self.kwargs = kwargs
+
+
+class TestMergerBase(TestCaseWithMemoryTransport):
+ """Common functionality for Merger tests that don't write to disk."""
+
+ def get_builder(self):
+ builder = self.make_branch_builder('path')
+ builder.start_series()
+ self.addCleanup(builder.finish_series)
+ return builder
+
+ def setup_simple_graph(self):
+ """Create a simple 3-node graph.
+
+ :return: A BranchBuilder
+ """
+ #
+ # A
+ # |\
+ # B C
+ #
+ builder = self.get_builder()
+ builder.build_snapshot('A-id', None,
+ [('add', ('', None, 'directory', None))])
+ builder.build_snapshot('C-id', ['A-id'], [])
+ builder.build_snapshot('B-id', ['A-id'], [])
+ return builder
+
+ def setup_criss_cross_graph(self):
+ """Create a 5-node graph with a criss-cross.
+
+ :return: A BranchBuilder
+ """
+ # A
+ # |\
+ # B C
+ # |X|
+ # D E
+ builder = self.setup_simple_graph()
+ builder.build_snapshot('E-id', ['C-id', 'B-id'], [])
+ builder.build_snapshot('D-id', ['B-id', 'C-id'], [])
+ return builder
+
+ def make_Merger(self, builder, other_revision_id,
+ interesting_files=None, interesting_ids=None):
+ """Make a Merger object from a branch builder"""
+ mem_tree = memorytree.MemoryTree.create_on_branch(builder.get_branch())
+ mem_tree.lock_write()
+ self.addCleanup(mem_tree.unlock)
+ merger = _mod_merge.Merger.from_revision_ids(None,
+ mem_tree, other_revision_id)
+ merger.set_interesting_files(interesting_files)
+ # It seems there is no matching function for set_interesting_ids
+ merger.interesting_ids = interesting_ids
+ merger.merge_type = _mod_merge.Merge3Merger
+ return merger
+
+
+class TestMergerInMemory(TestMergerBase):
+
+ def test_cache_trees_with_revision_ids_None(self):
+ merger = self.make_Merger(self.setup_simple_graph(), 'C-id')
+ original_cache = dict(merger._cached_trees)
+ merger.cache_trees_with_revision_ids([None])
+ self.assertEqual(original_cache, merger._cached_trees)
+
+ def test_cache_trees_with_revision_ids_no_revision_id(self):
+ merger = self.make_Merger(self.setup_simple_graph(), 'C-id')
+ original_cache = dict(merger._cached_trees)
+ tree = self.make_branch_and_memory_tree('tree')
+ merger.cache_trees_with_revision_ids([tree])
+ self.assertEqual(original_cache, merger._cached_trees)
+
+ def test_cache_trees_with_revision_ids_having_revision_id(self):
+ merger = self.make_Merger(self.setup_simple_graph(), 'C-id')
+ original_cache = dict(merger._cached_trees)
+ tree = merger.this_branch.repository.revision_tree('B-id')
+ original_cache['B-id'] = tree
+ merger.cache_trees_with_revision_ids([tree])
+ self.assertEqual(original_cache, merger._cached_trees)
+
+ def test_find_base(self):
+ merger = self.make_Merger(self.setup_simple_graph(), 'C-id')
+ self.assertEqual('A-id', merger.base_rev_id)
+ self.assertFalse(merger._is_criss_cross)
+ self.assertIs(None, merger._lca_trees)
+
+ def test_find_base_criss_cross(self):
+ builder = self.setup_criss_cross_graph()
+ merger = self.make_Merger(builder, 'E-id')
+ self.assertEqual('A-id', merger.base_rev_id)
+ self.assertTrue(merger._is_criss_cross)
+ self.assertEqual(['B-id', 'C-id'], [t.get_revision_id()
+ for t in merger._lca_trees])
+ # If we swap the order, we should get a different lca order
+ builder.build_snapshot('F-id', ['E-id'], [])
+ merger = self.make_Merger(builder, 'D-id')
+ self.assertEqual(['C-id', 'B-id'], [t.get_revision_id()
+ for t in merger._lca_trees])
+
+ def test_find_base_triple_criss_cross(self):
+ # A-.
+ # / \ \
+ # B C F # F is merged into both branches
+ # |\ /| |
+ # | X | |\
+ # |/ \| | :
+ # : D E |
+ # \| |/
+ # G H
+ builder = self.setup_criss_cross_graph()
+ builder.build_snapshot('F-id', ['A-id'], [])
+ builder.build_snapshot('H-id', ['E-id', 'F-id'], [])
+ builder.build_snapshot('G-id', ['D-id', 'F-id'], [])
+ merger = self.make_Merger(builder, 'H-id')
+ self.assertEqual(['B-id', 'C-id', 'F-id'],
+ [t.get_revision_id() for t in merger._lca_trees])
+
+ def test_find_base_new_root_criss_cross(self):
+ # A B
+ # |\ /|
+ # | X |
+ # |/ \|
+ # C D
+
+ builder = self.get_builder()
+ builder.build_snapshot('A-id', None,
+ [('add', ('', None, 'directory', None))])
+ builder.build_snapshot('B-id', [],
+ [('add', ('', None, 'directory', None))])
+ builder.build_snapshot('D-id', ['A-id', 'B-id'], [])
+ builder.build_snapshot('C-id', ['A-id', 'B-id'], [])
+ merger = self.make_Merger(builder, 'D-id')
+ self.assertEqual('A-id', merger.base_rev_id)
+ self.assertTrue(merger._is_criss_cross)
+ self.assertEqual(['A-id', 'B-id'], [t.get_revision_id()
+ for t in merger._lca_trees])
+
+ def test_no_criss_cross_passed_to_merge_type(self):
+ class LCATreesMerger(LoggingMerger):
+ supports_lca_trees = True
+
+ merger = self.make_Merger(self.setup_simple_graph(), 'C-id')
+ merger.merge_type = LCATreesMerger
+ merge_obj = merger.make_merger()
+ self.assertIsInstance(merge_obj, LCATreesMerger)
+ self.assertFalse('lca_trees' in merge_obj.kwargs)
+
+ def test_criss_cross_passed_to_merge_type(self):
+ merger = self.make_Merger(self.setup_criss_cross_graph(), 'E-id')
+ merger.merge_type = _mod_merge.Merge3Merger
+ merge_obj = merger.make_merger()
+ self.assertEqual(['B-id', 'C-id'], [t.get_revision_id()
+ for t in merger._lca_trees])
+
+ def test_criss_cross_not_supported_merge_type(self):
+ merger = self.make_Merger(self.setup_criss_cross_graph(), 'E-id')
+ # We explicitly do not define supports_lca_trees
+ merger.merge_type = LoggingMerger
+ merge_obj = merger.make_merger()
+ self.assertIsInstance(merge_obj, LoggingMerger)
+ self.assertFalse('lca_trees' in merge_obj.kwargs)
+
+ def test_criss_cross_unsupported_merge_type(self):
+ class UnsupportedLCATreesMerger(LoggingMerger):
+ supports_lca_trees = False
+
+ merger = self.make_Merger(self.setup_criss_cross_graph(), 'E-id')
+ merger.merge_type = UnsupportedLCATreesMerger
+ merge_obj = merger.make_merger()
+ self.assertIsInstance(merge_obj, UnsupportedLCATreesMerger)
+ self.assertFalse('lca_trees' in merge_obj.kwargs)
+
+
+class TestMergerEntriesLCA(TestMergerBase):
+
+ def make_merge_obj(self, builder, other_revision_id,
+ interesting_files=None, interesting_ids=None):
+ merger = self.make_Merger(builder, other_revision_id,
+ interesting_files=interesting_files,
+ interesting_ids=interesting_ids)
+ return merger.make_merger()
+
+ def test_simple(self):
+ builder = self.get_builder()
+ builder.build_snapshot('A-id', None,
+ [('add', (u'', 'a-root-id', 'directory', None)),
+ ('add', (u'a', 'a-id', 'file', 'a\nb\nc\n'))])
+ builder.build_snapshot('C-id', ['A-id'],
+ [('modify', ('a-id', 'a\nb\nC\nc\n'))])
+ builder.build_snapshot('B-id', ['A-id'],
+ [('modify', ('a-id', 'a\nB\nb\nc\n'))])
+ builder.build_snapshot('E-id', ['C-id', 'B-id'],
+ [('modify', ('a-id', 'a\nB\nb\nC\nc\nE\n'))])
+ builder.build_snapshot('D-id', ['B-id', 'C-id'],
+ [('modify', ('a-id', 'a\nB\nb\nC\nc\n'))])
+ merge_obj = self.make_merge_obj(builder, 'E-id')
+
+ self.assertEqual(['B-id', 'C-id'], [t.get_revision_id()
+ for t in merge_obj._lca_trees])
+ self.assertEqual('A-id', merge_obj.base_tree.get_revision_id())
+ entries = list(merge_obj._entries_lca())
+
+ # (file_id, changed, parents, names, executable)
+ # BASE, lca1, lca2, OTHER, THIS
+ root_id = 'a-root-id'
+ self.assertEqual([('a-id', True,
+ ((root_id, [root_id, root_id]), root_id, root_id),
+ ((u'a', [u'a', u'a']), u'a', u'a'),
+ ((False, [False, False]), False, False)),
+ ], entries)
+
+ def test_not_in_base(self):
+ # LCAs all have the same last-modified revision for the file, as do
+ # the tips, but the base has something different
+ # A base, doesn't have the file
+ # |\
+ # B C B introduces 'foo', C introduces 'bar'
+ # |X|
+ # D E D and E now both have 'foo' and 'bar'
+ # |X|
+ # F G the files are now in F, G, D and E, but not in A
+ # G modifies 'bar'
+
+ builder = self.get_builder()
+ builder.build_snapshot('A-id', None,
+ [('add', (u'', 'a-root-id', 'directory', None))])
+ builder.build_snapshot('B-id', ['A-id'],
+ [('add', (u'foo', 'foo-id', 'file', 'a\nb\nc\n'))])
+ builder.build_snapshot('C-id', ['A-id'],
+ [('add', (u'bar', 'bar-id', 'file', 'd\ne\nf\n'))])
+ builder.build_snapshot('D-id', ['B-id', 'C-id'],
+ [('add', (u'bar', 'bar-id', 'file', 'd\ne\nf\n'))])
+ builder.build_snapshot('E-id', ['C-id', 'B-id'],
+ [('add', (u'foo', 'foo-id', 'file', 'a\nb\nc\n'))])
+ builder.build_snapshot('G-id', ['E-id', 'D-id'],
+ [('modify', (u'bar-id', 'd\ne\nf\nG\n'))])
+ builder.build_snapshot('F-id', ['D-id', 'E-id'], [])
+ merge_obj = self.make_merge_obj(builder, 'G-id')
+
+ self.assertEqual(['D-id', 'E-id'], [t.get_revision_id()
+ for t in merge_obj._lca_trees])
+ self.assertEqual('A-id', merge_obj.base_tree.get_revision_id())
+ entries = list(merge_obj._entries_lca())
+ root_id = 'a-root-id'
+ self.assertEqual([('bar-id', True,
+ ((None, [root_id, root_id]), root_id, root_id),
+ ((None, [u'bar', u'bar']), u'bar', u'bar'),
+ ((None, [False, False]), False, False)),
+ ], entries)
+
+ def test_not_in_this(self):
+ builder = self.get_builder()
+ builder.build_snapshot('A-id', None,
+ [('add', (u'', 'a-root-id', 'directory', None)),
+ ('add', (u'a', 'a-id', 'file', 'a\nb\nc\n'))])
+ builder.build_snapshot('B-id', ['A-id'],
+ [('modify', ('a-id', 'a\nB\nb\nc\n'))])
+ builder.build_snapshot('C-id', ['A-id'],
+ [('modify', ('a-id', 'a\nb\nC\nc\n'))])
+ builder.build_snapshot('E-id', ['C-id', 'B-id'],
+ [('modify', ('a-id', 'a\nB\nb\nC\nc\nE\n'))])
+ builder.build_snapshot('D-id', ['B-id', 'C-id'],
+ [('unversion', 'a-id')])
+ merge_obj = self.make_merge_obj(builder, 'E-id')
+
+ self.assertEqual(['B-id', 'C-id'], [t.get_revision_id()
+ for t in merge_obj._lca_trees])
+ self.assertEqual('A-id', merge_obj.base_tree.get_revision_id())
+
+ entries = list(merge_obj._entries_lca())
+ root_id = 'a-root-id'
+ self.assertEqual([('a-id', True,
+ ((root_id, [root_id, root_id]), root_id, None),
+ ((u'a', [u'a', u'a']), u'a', None),
+ ((False, [False, False]), False, None)),
+ ], entries)
+
+ def test_file_not_in_one_lca(self):
+ # A # just root
+ # |\
+ # B C # B no file, C introduces a file
+ # |X|
+ # D E # D and E both have the file, unchanged from C
+ builder = self.get_builder()
+ builder.build_snapshot('A-id', None,
+ [('add', (u'', 'a-root-id', 'directory', None))])
+ builder.build_snapshot('B-id', ['A-id'], [])
+ builder.build_snapshot('C-id', ['A-id'],
+ [('add', (u'a', 'a-id', 'file', 'a\nb\nc\n'))])
+ builder.build_snapshot('E-id', ['C-id', 'B-id'], []) # Inherited from C
+ builder.build_snapshot('D-id', ['B-id', 'C-id'], # Merged from C
+ [('add', (u'a', 'a-id', 'file', 'a\nb\nc\n'))])
+ merge_obj = self.make_merge_obj(builder, 'E-id')
+
+ self.assertEqual(['B-id', 'C-id'], [t.get_revision_id()
+ for t in merge_obj._lca_trees])
+ self.assertEqual('A-id', merge_obj.base_tree.get_revision_id())
+
+ entries = list(merge_obj._entries_lca())
+ self.assertEqual([], entries)
+
+ def test_not_in_other(self):
+ builder = self.get_builder()
+ builder.build_snapshot('A-id', None,
+ [('add', (u'', 'a-root-id', 'directory', None)),
+ ('add', (u'a', 'a-id', 'file', 'a\nb\nc\n'))])
+ builder.build_snapshot('B-id', ['A-id'], [])
+ builder.build_snapshot('C-id', ['A-id'], [])
+ builder.build_snapshot('E-id', ['C-id', 'B-id'],
+ [('unversion', 'a-id')])
+ builder.build_snapshot('D-id', ['B-id', 'C-id'], [])
+ merge_obj = self.make_merge_obj(builder, 'E-id')
+
+ entries = list(merge_obj._entries_lca())
+ root_id = 'a-root-id'
+ self.assertEqual([('a-id', True,
+ ((root_id, [root_id, root_id]), None, root_id),
+ ((u'a', [u'a', u'a']), None, u'a'),
+ ((False, [False, False]), None, False)),
+ ], entries)
+
+ def test_not_in_other_or_lca(self):
+ # A base, introduces 'foo'
+ # |\
+ # B C B nothing, C deletes foo
+ # |X|
+ # D E D restores foo (same as B), E leaves it deleted
+ # Analysis:
+ # A => B, no changes
+ # A => C, delete foo (C should supersede B)
+ # C => D, restore foo
+ # C => E, no changes
+ # D would then win 'cleanly' and no record would be given
+ builder = self.get_builder()
+ builder.build_snapshot('A-id', None,
+ [('add', (u'', 'a-root-id', 'directory', None)),
+ ('add', (u'foo', 'foo-id', 'file', 'content\n'))])
+ builder.build_snapshot('B-id', ['A-id'], [])
+ builder.build_snapshot('C-id', ['A-id'],
+ [('unversion', 'foo-id')])
+ builder.build_snapshot('E-id', ['C-id', 'B-id'], [])
+ builder.build_snapshot('D-id', ['B-id', 'C-id'], [])
+ merge_obj = self.make_merge_obj(builder, 'E-id')
+
+ entries = list(merge_obj._entries_lca())
+ self.assertEqual([], entries)
+
+ def test_not_in_other_mod_in_lca1_not_in_lca2(self):
+ # A base, introduces 'foo'
+ # |\
+ # B C B changes 'foo', C deletes foo
+ # |X|
+ # D E D restores foo (same as B), E leaves it deleted (as C)
+ # Analysis:
+ # A => B, modified foo
+ # A => C, delete foo, C does not supersede B
+ # B => D, no changes
+ # C => D, resolve in favor of B
+ # B => E, resolve in favor of E
+ # C => E, no changes
+ # In this case, we have a conflict of how the changes were resolved. E
+ # picked C and D picked B, so we should issue a conflict
+ builder = self.get_builder()
+ builder.build_snapshot('A-id', None,
+ [('add', (u'', 'a-root-id', 'directory', None)),
+ ('add', (u'foo', 'foo-id', 'file', 'content\n'))])
+ builder.build_snapshot('B-id', ['A-id'], [
+ ('modify', ('foo-id', 'new-content\n'))])
+ builder.build_snapshot('C-id', ['A-id'],
+ [('unversion', 'foo-id')])
+ builder.build_snapshot('E-id', ['C-id', 'B-id'], [])
+ builder.build_snapshot('D-id', ['B-id', 'C-id'], [])
+ merge_obj = self.make_merge_obj(builder, 'E-id')
+
+ entries = list(merge_obj._entries_lca())
+ root_id = 'a-root-id'
+ self.assertEqual([('foo-id', True,
+ ((root_id, [root_id, None]), None, root_id),
+ ((u'foo', [u'foo', None]), None, 'foo'),
+ ((False, [False, None]), None, False)),
+ ], entries)
+
+ def test_only_in_one_lca(self):
+ # A add only root
+ # |\
+ # B C B nothing, C add file
+ # |X|
+ # D E D still has nothing, E removes file
+ # Analysis:
+ # B => D, no change
+ # C => D, removed the file
+ # B => E, no change
+ # C => E, removed the file
+ # Thus D & E have identical changes, and this is a no-op
+ # Alternatively:
+ # A => B, no change
+ # A => C, add file, thus C supersedes B
+ # w/ C=BASE, D=THIS, E=OTHER we have 'happy convergence'
+ builder = self.get_builder()
+ builder.build_snapshot('A-id', None,
+ [('add', (u'', 'a-root-id', 'directory', None))])
+ builder.build_snapshot('B-id', ['A-id'], [])
+ builder.build_snapshot('C-id', ['A-id'],
+ [('add', (u'a', 'a-id', 'file', 'a\nb\nc\n'))])
+ builder.build_snapshot('E-id', ['C-id', 'B-id'],
+ [('unversion', 'a-id')])
+ builder.build_snapshot('D-id', ['B-id', 'C-id'], [])
+ merge_obj = self.make_merge_obj(builder, 'E-id')
+
+ entries = list(merge_obj._entries_lca())
+ self.assertEqual([], entries)
+
+ def test_only_in_other(self):
+ builder = self.get_builder()
+ builder.build_snapshot('A-id', None,
+ [('add', (u'', 'a-root-id', 'directory', None))])
+ builder.build_snapshot('B-id', ['A-id'], [])
+ builder.build_snapshot('C-id', ['A-id'], [])
+ builder.build_snapshot('E-id', ['C-id', 'B-id'],
+ [('add', (u'a', 'a-id', 'file', 'a\nb\nc\n'))])
+ builder.build_snapshot('D-id', ['B-id', 'C-id'], [])
+ merge_obj = self.make_merge_obj(builder, 'E-id')
+
+ entries = list(merge_obj._entries_lca())
+ root_id = 'a-root-id'
+ self.assertEqual([('a-id', True,
+ ((None, [None, None]), root_id, None),
+ ((None, [None, None]), u'a', None),
+ ((None, [None, None]), False, None)),
+ ], entries)
+
+ def test_one_lca_supersedes(self):
+ # One LCA supersedes the other LCAs last modified value, but the
+ # value is not the same as BASE.
+ # A base, introduces 'foo', last mod A
+ # |\
+ # B C B modifies 'foo' (mod B), C does nothing (mod A)
+ # |X|
+ # D E D does nothing (mod B), E updates 'foo' (mod E)
+ # |X|
+ # F G F updates 'foo' (mod F). G does nothing (mod E)
+ #
+ # At this point, G should not be considered to modify 'foo', even
+ # though its LCAs disagree. This is because the modification in E
+ # completely supersedes the value in D.
+ builder = self.get_builder()
+ builder.build_snapshot('A-id', None,
+ [('add', (u'', 'a-root-id', 'directory', None)),
+ ('add', (u'foo', 'foo-id', 'file', 'A content\n'))])
+ builder.build_snapshot('C-id', ['A-id'], [])
+ builder.build_snapshot('B-id', ['A-id'],
+ [('modify', ('foo-id', 'B content\n'))])
+ builder.build_snapshot('D-id', ['B-id', 'C-id'], [])
+ builder.build_snapshot('E-id', ['C-id', 'B-id'],
+ [('modify', ('foo-id', 'E content\n'))])
+ builder.build_snapshot('G-id', ['E-id', 'D-id'], [])
+ builder.build_snapshot('F-id', ['D-id', 'E-id'],
+ [('modify', ('foo-id', 'F content\n'))])
+ merge_obj = self.make_merge_obj(builder, 'G-id')
+
+ self.assertEqual([], list(merge_obj._entries_lca()))
+
+ def test_one_lca_supersedes_path(self):
+ # Double-criss-cross merge, the ultimate base value is different from
+ # the intermediate.
+ # A value 'foo'
+ # |\
+ # B C B value 'bar', C = 'foo'
+ # |X|
+ # D E D = 'bar', E supersedes to 'bing'
+ # |X|
+ # F G F = 'bing', G supersedes to 'barry'
+ #
+ # In this case, we technically should not care about the value 'bar' for
+ # D, because it was clearly superseded by E's 'bing'. The
+ # per-file/attribute graph would actually look like:
+ # A
+ # |
+ # B
+ # |
+ # E
+ # |
+ # G
+ #
+ # Because the other side of the merge never modifies the value, it just
+ # takes the value from the merge.
+ #
+ # ATM this fails because we will prune 'foo' from the LCAs, but we
+ # won't prune 'bar'. This is getting far off into edge-case land, so we
+ # aren't supporting it yet.
+ #
+ builder = self.get_builder()
+ builder.build_snapshot('A-id', None,
+ [('add', (u'', 'a-root-id', 'directory', None)),
+ ('add', (u'foo', 'foo-id', 'file', 'A content\n'))])
+ builder.build_snapshot('C-id', ['A-id'], [])
+ builder.build_snapshot('B-id', ['A-id'],
+ [('rename', ('foo', 'bar'))])
+ builder.build_snapshot('D-id', ['B-id', 'C-id'], [])
+ builder.build_snapshot('E-id', ['C-id', 'B-id'],
+ [('rename', ('foo', 'bing'))]) # override to bing
+ builder.build_snapshot('G-id', ['E-id', 'D-id'],
+ [('rename', ('bing', 'barry'))]) # override to barry
+ builder.build_snapshot('F-id', ['D-id', 'E-id'],
+ [('rename', ('bar', 'bing'))]) # Merge in E's change
+ merge_obj = self.make_merge_obj(builder, 'G-id')
+
+ self.expectFailure("We don't do an actual heads() check on lca values,"
+ " or use the per-attribute graph",
+ self.assertEqual, [], list(merge_obj._entries_lca()))
+
+ def test_one_lca_accidentally_pruned(self):
+ # Another incorrect resolution from the same basic flaw:
+ # A value 'foo'
+ # |\
+ # B C B value 'bar', C = 'foo'
+ # |X|
+ # D E D = 'bar', E reverts to 'foo'
+ # |X|
+ # F G F = 'bing', G switches to 'bar'
+ #
+ # 'bar' will not be seen as an interesting change, because 'foo' will
+ # be pruned from the LCAs, even though it was newly introduced by E
+ # (superseding B).
+ builder = self.get_builder()
+ builder.build_snapshot('A-id', None,
+ [('add', (u'', 'a-root-id', 'directory', None)),
+ ('add', (u'foo', 'foo-id', 'file', 'A content\n'))])
+ builder.build_snapshot('C-id', ['A-id'], [])
+ builder.build_snapshot('B-id', ['A-id'],
+ [('rename', ('foo', 'bar'))])
+ builder.build_snapshot('D-id', ['B-id', 'C-id'], [])
+ builder.build_snapshot('E-id', ['C-id', 'B-id'], [])
+ builder.build_snapshot('G-id', ['E-id', 'D-id'],
+ [('rename', ('foo', 'bar'))])
+ builder.build_snapshot('F-id', ['D-id', 'E-id'],
+ [('rename', ('bar', 'bing'))]) # should end up conflicting
+ merge_obj = self.make_merge_obj(builder, 'G-id')
+
+ entries = list(merge_obj._entries_lca())
+ root_id = 'a-root-id'
+ self.expectFailure("We prune values from BASE even when relevant.",
+ self.assertEqual,
+ [('foo-id', False,
+ ((root_id, [root_id, root_id]), root_id, root_id),
+ ((u'foo', [u'bar', u'foo']), u'bar', u'bing'),
+ ((False, [False, False]), False, False)),
+ ], entries)
+
+ def test_both_sides_revert(self):
+ # Both sides of a criss-cross revert the text to the lca
+ # A base, introduces 'foo'
+ # |\
+ # B C B modifies 'foo', C modifies 'foo'
+ # |X|
+ # D E D reverts to B, E reverts to C
+ # This should conflict
+ builder = self.get_builder()
+ builder.build_snapshot('A-id', None,
+ [('add', (u'', 'a-root-id', 'directory', None)),
+ ('add', (u'foo', 'foo-id', 'file', 'A content\n'))])
+ builder.build_snapshot('B-id', ['A-id'],
+ [('modify', ('foo-id', 'B content\n'))])
+ builder.build_snapshot('C-id', ['A-id'],
+ [('modify', ('foo-id', 'C content\n'))])
+ builder.build_snapshot('E-id', ['C-id', 'B-id'], [])
+ builder.build_snapshot('D-id', ['B-id', 'C-id'], [])
+ merge_obj = self.make_merge_obj(builder, 'E-id')
+
+ entries = list(merge_obj._entries_lca())
+ root_id = 'a-root-id'
+ self.assertEqual([('foo-id', True,
+ ((root_id, [root_id, root_id]), root_id, root_id),
+ ((u'foo', [u'foo', u'foo']), u'foo', u'foo'),
+ ((False, [False, False]), False, False)),
+ ], entries)
+
+ def test_different_lca_resolve_one_side_updates_content(self):
+ # Both sides converge, but then one side updates the text.
+ # A base, introduces 'foo'
+ # |\
+ # B C B modifies 'foo', C modifies 'foo'
+ # |X|
+ # D E D reverts to B, E reverts to C
+ # |
+ # F F updates to a new value
+ # We need to emit an entry for 'foo', because D & E differed on the
+ # merge resolution
+ builder = self.get_builder()
+ builder.build_snapshot('A-id', None,
+ [('add', (u'', 'a-root-id', 'directory', None)),
+ ('add', (u'foo', 'foo-id', 'file', 'A content\n'))])
+ builder.build_snapshot('B-id', ['A-id'],
+ [('modify', ('foo-id', 'B content\n'))])
+ builder.build_snapshot('C-id', ['A-id'],
+ [('modify', ('foo-id', 'C content\n'))])
+ builder.build_snapshot('E-id', ['C-id', 'B-id'], [])
+ builder.build_snapshot('D-id', ['B-id', 'C-id'], [])
+ builder.build_snapshot('F-id', ['D-id'],
+ [('modify', ('foo-id', 'F content\n'))])
+ merge_obj = self.make_merge_obj(builder, 'E-id')
+
+ entries = list(merge_obj._entries_lca())
+ root_id = 'a-root-id'
+ self.assertEqual([('foo-id', True,
+ ((root_id, [root_id, root_id]), root_id, root_id),
+ ((u'foo', [u'foo', u'foo']), u'foo', u'foo'),
+ ((False, [False, False]), False, False)),
+ ], entries)
+
+ def test_same_lca_resolution_one_side_updates_content(self):
+ # Both sides converge, but then one side updates the text.
+ # A base, introduces 'foo'
+ # |\
+ # B C B modifies 'foo', C modifies 'foo'
+ # |X|
+ # D E D and E use C's value
+ # |
+ # F F updates to a new value
+ # I think it is a bug that this conflicts, but we don't have a way to
+ # detect otherwise. And because of:
+ # test_different_lca_resolve_one_side_updates_content
+ # We need to conflict.
+
+ builder = self.get_builder()
+ builder.build_snapshot('A-id', None,
+ [('add', (u'', 'a-root-id', 'directory', None)),
+ ('add', (u'foo', 'foo-id', 'file', 'A content\n'))])
+ builder.build_snapshot('B-id', ['A-id'],
+ [('modify', ('foo-id', 'B content\n'))])
+ builder.build_snapshot('C-id', ['A-id'],
+ [('modify', ('foo-id', 'C content\n'))])
+ builder.build_snapshot('E-id', ['C-id', 'B-id'], [])
+ builder.build_snapshot('D-id', ['B-id', 'C-id'],
+ [('modify', ('foo-id', 'C content\n'))]) # Same as E
+ builder.build_snapshot('F-id', ['D-id'],
+ [('modify', ('foo-id', 'F content\n'))])
+ merge_obj = self.make_merge_obj(builder, 'E-id')
+
+ entries = list(merge_obj._entries_lca())
+ self.expectFailure("We don't detect that LCA resolution was the"
+ " same on both sides",
+ self.assertEqual, [], entries)
+
+ def test_only_path_changed(self):
+ builder = self.get_builder()
+ builder.build_snapshot('A-id', None,
+ [('add', (u'', 'a-root-id', 'directory', None)),
+ ('add', (u'a', 'a-id', 'file', 'content\n'))])
+ builder.build_snapshot('B-id', ['A-id'], [])
+ builder.build_snapshot('C-id', ['A-id'], [])
+ builder.build_snapshot('E-id', ['C-id', 'B-id'],
+ [('rename', (u'a', u'b'))])
+ builder.build_snapshot('D-id', ['B-id', 'C-id'], [])
+ merge_obj = self.make_merge_obj(builder, 'E-id')
+ entries = list(merge_obj._entries_lca())
+ root_id = 'a-root-id'
+ # The content was not changed, only the path
+ self.assertEqual([('a-id', False,
+ ((root_id, [root_id, root_id]), root_id, root_id),
+ ((u'a', [u'a', u'a']), u'b', u'a'),
+ ((False, [False, False]), False, False)),
+ ], entries)
+
+ def test_kind_changed(self):
+ # Identical content, except 'D' changes a-id into a directory
+ builder = self.get_builder()
+ builder.build_snapshot('A-id', None,
+ [('add', (u'', 'a-root-id', 'directory', None)),
+ ('add', (u'a', 'a-id', 'file', 'content\n'))])
+ builder.build_snapshot('B-id', ['A-id'], [])
+ builder.build_snapshot('C-id', ['A-id'], [])
+ builder.build_snapshot('E-id', ['C-id', 'B-id'],
+ [('unversion', 'a-id'),
+ ('flush', None),
+ ('add', (u'a', 'a-id', 'directory', None))])
+ builder.build_snapshot('D-id', ['B-id', 'C-id'], [])
+ merge_obj = self.make_merge_obj(builder, 'E-id')
+ entries = list(merge_obj._entries_lca())
+ root_id = 'a-root-id'
+ # Only the kind was changed (content)
+ self.assertEqual([('a-id', True,
+ ((root_id, [root_id, root_id]), root_id, root_id),
+ ((u'a', [u'a', u'a']), u'a', u'a'),
+ ((False, [False, False]), False, False)),
+ ], entries)
+
+ def test_this_changed_kind(self):
+ # Identical content, but THIS changes a file to a directory
+ builder = self.get_builder()
+ builder.build_snapshot('A-id', None,
+ [('add', (u'', 'a-root-id', 'directory', None)),
+ ('add', (u'a', 'a-id', 'file', 'content\n'))])
+ builder.build_snapshot('B-id', ['A-id'], [])
+ builder.build_snapshot('C-id', ['A-id'], [])
+ builder.build_snapshot('E-id', ['C-id', 'B-id'], [])
+ builder.build_snapshot('D-id', ['B-id', 'C-id'],
+ [('unversion', 'a-id'),
+ ('flush', None),
+ ('add', (u'a', 'a-id', 'directory', None))])
+ merge_obj = self.make_merge_obj(builder, 'E-id')
+ entries = list(merge_obj._entries_lca())
+ # Only the kind was changed (content)
+ self.assertEqual([], entries)
+
+ def test_interesting_files(self):
+ # Two files modified, but we should filter one of them
+ builder = self.get_builder()
+ builder.build_snapshot('A-id', None,
+ [('add', (u'', 'a-root-id', 'directory', None)),
+ ('add', (u'a', 'a-id', 'file', 'content\n')),
+ ('add', (u'b', 'b-id', 'file', 'content\n'))])
+ builder.build_snapshot('B-id', ['A-id'], [])
+ builder.build_snapshot('C-id', ['A-id'], [])
+ builder.build_snapshot('E-id', ['C-id', 'B-id'],
+ [('modify', ('a-id', 'new-content\n')),
+ ('modify', ('b-id', 'new-content\n'))])
+ builder.build_snapshot('D-id', ['B-id', 'C-id'], [])
+ merge_obj = self.make_merge_obj(builder, 'E-id',
+ interesting_files=['b'])
+ entries = list(merge_obj._entries_lca())
+ root_id = 'a-root-id'
+ self.assertEqual([('b-id', True,
+ ((root_id, [root_id, root_id]), root_id, root_id),
+ ((u'b', [u'b', u'b']), u'b', u'b'),
+ ((False, [False, False]), False, False)),
+ ], entries)
+
+ def test_interesting_file_in_this(self):
+ # This renamed the file, but it should still match the entry in other
+ builder = self.get_builder()
+ builder.build_snapshot('A-id', None,
+ [('add', (u'', 'a-root-id', 'directory', None)),
+ ('add', (u'a', 'a-id', 'file', 'content\n')),
+ ('add', (u'b', 'b-id', 'file', 'content\n'))])
+ builder.build_snapshot('B-id', ['A-id'], [])
+ builder.build_snapshot('C-id', ['A-id'], [])
+ builder.build_snapshot('E-id', ['C-id', 'B-id'],
+ [('modify', ('a-id', 'new-content\n')),
+ ('modify', ('b-id', 'new-content\n'))])
+ builder.build_snapshot('D-id', ['B-id', 'C-id'],
+ [('rename', ('b', 'c'))])
+ merge_obj = self.make_merge_obj(builder, 'E-id',
+ interesting_files=['c'])
+ entries = list(merge_obj._entries_lca())
+ root_id = 'a-root-id'
+ self.assertEqual([('b-id', True,
+ ((root_id, [root_id, root_id]), root_id, root_id),
+ ((u'b', [u'b', u'b']), u'b', u'c'),
+ ((False, [False, False]), False, False)),
+ ], entries)
+
+ def test_interesting_file_in_base(self):
+ # This renamed the file, but it should still match the entry in BASE
+ builder = self.get_builder()
+ builder.build_snapshot('A-id', None,
+ [('add', (u'', 'a-root-id', 'directory', None)),
+ ('add', (u'a', 'a-id', 'file', 'content\n')),
+ ('add', (u'c', 'c-id', 'file', 'content\n'))])
+ builder.build_snapshot('B-id', ['A-id'],
+ [('rename', ('c', 'b'))])
+ builder.build_snapshot('C-id', ['A-id'],
+ [('rename', ('c', 'b'))])
+ builder.build_snapshot('E-id', ['C-id', 'B-id'],
+ [('modify', ('a-id', 'new-content\n')),
+ ('modify', ('c-id', 'new-content\n'))])
+ builder.build_snapshot('D-id', ['B-id', 'C-id'], [])
+ merge_obj = self.make_merge_obj(builder, 'E-id',
+ interesting_files=['c'])
+ entries = list(merge_obj._entries_lca())
+ root_id = 'a-root-id'
+ self.assertEqual([('c-id', True,
+ ((root_id, [root_id, root_id]), root_id, root_id),
+ ((u'c', [u'b', u'b']), u'b', u'b'),
+ ((False, [False, False]), False, False)),
+ ], entries)
+
+ def test_interesting_file_in_lca(self):
+ # This renamed the file, but it should still match the entry in LCA
+ builder = self.get_builder()
+ builder.build_snapshot('A-id', None,
+ [('add', (u'', 'a-root-id', 'directory', None)),
+ ('add', (u'a', 'a-id', 'file', 'content\n')),
+ ('add', (u'b', 'b-id', 'file', 'content\n'))])
+ builder.build_snapshot('B-id', ['A-id'],
+ [('rename', ('b', 'c'))])
+ builder.build_snapshot('C-id', ['A-id'], [])
+ builder.build_snapshot('E-id', ['C-id', 'B-id'],
+ [('modify', ('a-id', 'new-content\n')),
+ ('modify', ('b-id', 'new-content\n'))])
+ builder.build_snapshot('D-id', ['B-id', 'C-id'],
+ [('rename', ('c', 'b'))])
+ merge_obj = self.make_merge_obj(builder, 'E-id',
+ interesting_files=['c'])
+ entries = list(merge_obj._entries_lca())
+ root_id = 'a-root-id'
+ self.assertEqual([('b-id', True,
+ ((root_id, [root_id, root_id]), root_id, root_id),
+ ((u'b', [u'c', u'b']), u'b', u'b'),
+ ((False, [False, False]), False, False)),
+ ], entries)
+
+ def test_interesting_ids(self):
+ # Two files modified, but we should filter one of them
+ builder = self.get_builder()
+ builder.build_snapshot('A-id', None,
+ [('add', (u'', 'a-root-id', 'directory', None)),
+ ('add', (u'a', 'a-id', 'file', 'content\n')),
+ ('add', (u'b', 'b-id', 'file', 'content\n'))])
+ builder.build_snapshot('B-id', ['A-id'], [])
+ builder.build_snapshot('C-id', ['A-id'], [])
+ builder.build_snapshot('E-id', ['C-id', 'B-id'],
+ [('modify', ('a-id', 'new-content\n')),
+ ('modify', ('b-id', 'new-content\n'))])
+ builder.build_snapshot('D-id', ['B-id', 'C-id'], [])
+ merge_obj = self.make_merge_obj(builder, 'E-id',
+ interesting_ids=['b-id'])
+ entries = list(merge_obj._entries_lca())
+ root_id = 'a-root-id'
+ self.assertEqual([('b-id', True,
+ ((root_id, [root_id, root_id]), root_id, root_id),
+ ((u'b', [u'b', u'b']), u'b', u'b'),
+ ((False, [False, False]), False, False)),
+ ], entries)
+
+
+
+class TestMergerEntriesLCAOnDisk(tests.TestCaseWithTransport):
+
+ def get_builder(self):
+ builder = self.make_branch_builder('path')
+ builder.start_series()
+ self.addCleanup(builder.finish_series)
+ return builder
+
+ def get_wt_from_builder(self, builder):
+ """Get a real WorkingTree from the builder."""
+ the_branch = builder.get_branch()
+ wt = the_branch.bzrdir.create_workingtree()
+ # Note: This is a little bit ugly, but we are holding the branch
+ # write-locked as part of the build process, and we would like to
+ # maintain that. So we just force the WT to re-use the same
+ # branch object.
+ wt._branch = the_branch
+ wt.lock_write()
+ self.addCleanup(wt.unlock)
+ return wt
+
+ def do_merge(self, builder, other_revision_id):
+ wt = self.get_wt_from_builder(builder)
+ merger = _mod_merge.Merger.from_revision_ids(None,
+ wt, other_revision_id)
+ merger.merge_type = _mod_merge.Merge3Merger
+ return wt, merger.do_merge()
+
+ def test_simple_lca(self):
+ builder = self.get_builder()
+ builder.build_snapshot('A-id', None,
+ [('add', (u'', 'a-root-id', 'directory', None)),
+ ('add', (u'a', 'a-id', 'file', 'a\nb\nc\n'))])
+ builder.build_snapshot('C-id', ['A-id'], [])
+ builder.build_snapshot('B-id', ['A-id'], [])
+ builder.build_snapshot('E-id', ['C-id', 'B-id'], [])
+ builder.build_snapshot('D-id', ['B-id', 'C-id'],
+ [('modify', ('a-id', 'a\nb\nc\nd\ne\nf\n'))])
+ wt, conflicts = self.do_merge(builder, 'E-id')
+ self.assertEqual(0, conflicts)
+ # The merge should have simply update the contents of 'a'
+ self.assertEqual('a\nb\nc\nd\ne\nf\n', wt.get_file_text('a-id'))
+
+ def test_conflict_without_lca(self):
+ # This test would cause a merge conflict, unless we use the lca trees
+ # to determine the real ancestry
+ # A Path at 'foo'
+ # / \
+ # B C Path renamed to 'bar' in B
+ # |\ /|
+ # | X |
+ # |/ \|
+ # D E Path at 'bar' in D and E
+ # |
+ # F Path at 'baz' in F, which supersedes 'bar' and 'foo'
+ builder = self.get_builder()
+ builder.build_snapshot('A-id', None,
+ [('add', (u'', 'a-root-id', 'directory', None)),
+ ('add', (u'foo', 'foo-id', 'file', 'a\nb\nc\n'))])
+ builder.build_snapshot('C-id', ['A-id'], [])
+ builder.build_snapshot('B-id', ['A-id'],
+ [('rename', ('foo', 'bar'))])
+ builder.build_snapshot('E-id', ['C-id', 'B-id'], # merge the rename
+ [('rename', ('foo', 'bar'))])
+ builder.build_snapshot('F-id', ['E-id'],
+ [('rename', ('bar', 'baz'))])
+ builder.build_snapshot('D-id', ['B-id', 'C-id'], [])
+ wt, conflicts = self.do_merge(builder, 'F-id')
+ self.assertEqual(0, conflicts)
+ # The merge should simply recognize that the final rename takes
+ # precedence
+ self.assertEqual('baz', wt.id2path('foo-id'))
+
+ def test_other_deletes_lca_renames(self):
+ # This test would cause a merge conflict, unless we use the lca trees
+ # to determine the real ancestry
+ # A Path at 'foo'
+ # / \
+ # B C Path renamed to 'bar' in B
+ # |\ /|
+ # | X |
+ # |/ \|
+ # D E Path at 'bar' in D and E
+ # |
+ # F F deletes 'bar'
+ builder = self.get_builder()
+ builder.build_snapshot('A-id', None,
+ [('add', (u'', 'a-root-id', 'directory', None)),
+ ('add', (u'foo', 'foo-id', 'file', 'a\nb\nc\n'))])
+ builder.build_snapshot('C-id', ['A-id'], [])
+ builder.build_snapshot('B-id', ['A-id'],
+ [('rename', ('foo', 'bar'))])
+ builder.build_snapshot('E-id', ['C-id', 'B-id'], # merge the rename
+ [('rename', ('foo', 'bar'))])
+ builder.build_snapshot('F-id', ['E-id'],
+ [('unversion', 'foo-id')])
+ builder.build_snapshot('D-id', ['B-id', 'C-id'], [])
+ wt, conflicts = self.do_merge(builder, 'F-id')
+ self.assertEqual(0, conflicts)
+ self.assertRaises(errors.NoSuchId, wt.id2path, 'foo-id')
+
+ def test_executable_changes(self):
+ # A Path at 'foo'
+ # / \
+ # B C
+ # |\ /|
+ # | X |
+ # |/ \|
+ # D E
+ # |
+ # F Executable bit changed
+ builder = self.get_builder()
+ builder.build_snapshot('A-id', None,
+ [('add', (u'', 'a-root-id', 'directory', None)),
+ ('add', (u'foo', 'foo-id', 'file', 'a\nb\nc\n'))])
+ builder.build_snapshot('C-id', ['A-id'], [])
+ builder.build_snapshot('B-id', ['A-id'], [])
+ builder.build_snapshot('D-id', ['B-id', 'C-id'], [])
+ builder.build_snapshot('E-id', ['C-id', 'B-id'], [])
+ # Have to use a real WT, because BranchBuilder doesn't support exec bit
+ wt = self.get_wt_from_builder(builder)
+ tt = transform.TreeTransform(wt)
+ try:
+ tt.set_executability(True, tt.trans_id_tree_file_id('foo-id'))
+ tt.apply()
+ except:
+ tt.finalize()
+ raise
+ self.assertTrue(wt.is_executable('foo-id'))
+ wt.commit('F-id', rev_id='F-id')
+ # Reset to D, so that we can merge F
+ wt.set_parent_ids(['D-id'])
+ wt.branch.set_last_revision_info(3, 'D-id')
+ wt.revert()
+ self.assertFalse(wt.is_executable('foo-id'))
+ conflicts = wt.merge_from_branch(wt.branch, to_revision='F-id')
+ self.assertEqual(0, conflicts)
+ self.assertTrue(wt.is_executable('foo-id'))
+
+ def test_create_symlink(self):
+ self.requireFeature(features.SymlinkFeature)
+ # A
+ # / \
+ # B C
+ # |\ /|
+ # | X |
+ # |/ \|
+ # D E
+ # |
+ # F Add a symlink 'foo' => 'bar'
+ # Have to use a real WT, because BranchBuilder and MemoryTree don't
+ # have symlink support
+ builder = self.get_builder()
+ builder.build_snapshot('A-id', None,
+ [('add', (u'', 'a-root-id', 'directory', None))])
+ builder.build_snapshot('C-id', ['A-id'], [])
+ builder.build_snapshot('B-id', ['A-id'], [])
+ builder.build_snapshot('D-id', ['B-id', 'C-id'], [])
+ builder.build_snapshot('E-id', ['C-id', 'B-id'], [])
+ # Have to use a real WT, because BranchBuilder doesn't support exec bit
+ wt = self.get_wt_from_builder(builder)
+ os.symlink('bar', 'path/foo')
+ wt.add(['foo'], ['foo-id'])
+ self.assertEqual('bar', wt.get_symlink_target('foo-id'))
+ wt.commit('add symlink', rev_id='F-id')
+ # Reset to D, so that we can merge F
+ wt.set_parent_ids(['D-id'])
+ wt.branch.set_last_revision_info(3, 'D-id')
+ wt.revert()
+ self.assertIs(None, wt.path2id('foo'))
+ conflicts = wt.merge_from_branch(wt.branch, to_revision='F-id')
+ self.assertEqual(0, conflicts)
+ self.assertEqual('foo-id', wt.path2id('foo'))
+ self.assertEqual('bar', wt.get_symlink_target('foo-id'))
+
+ def test_both_sides_revert(self):
+ # Both sides of a criss-cross revert the text to the lca
+ # A base, introduces 'foo'
+ # |\
+ # B C B modifies 'foo', C modifies 'foo'
+ # |X|
+ # D E D reverts to B, E reverts to C
+ # This should conflict
+ # This must be done with a real WorkingTree, because normally their
+ # inventory contains "None" rather than a real sha1
+ builder = self.get_builder()
+ builder.build_snapshot('A-id', None,
+ [('add', (u'', 'a-root-id', 'directory', None)),
+ ('add', (u'foo', 'foo-id', 'file', 'A content\n'))])
+ builder.build_snapshot('B-id', ['A-id'],
+ [('modify', ('foo-id', 'B content\n'))])
+ builder.build_snapshot('C-id', ['A-id'],
+ [('modify', ('foo-id', 'C content\n'))])
+ builder.build_snapshot('E-id', ['C-id', 'B-id'], [])
+ builder.build_snapshot('D-id', ['B-id', 'C-id'], [])
+ wt, conflicts = self.do_merge(builder, 'E-id')
+ self.assertEqual(1, conflicts)
+ self.assertEqualDiff('<<<<<<< TREE\n'
+ 'B content\n'
+ '=======\n'
+ 'C content\n'
+ '>>>>>>> MERGE-SOURCE\n',
+ wt.get_file_text('foo-id'))
+
+ def test_modified_symlink(self):
+ self.requireFeature(features.SymlinkFeature)
+ # A Create symlink foo => bar
+ # / \
+ # B C B relinks foo => baz
+ # |\ /|
+ # | X |
+ # |/ \|
+ # D E D & E have foo => baz
+ # |
+ # F F changes it to bing
+ #
+ # Merging D & F should result in F cleanly overriding D, because D's
+ # value actually comes from B
+
+ # Have to use a real WT, because BranchBuilder and MemoryTree don't
+ # have symlink support
+ wt = self.make_branch_and_tree('path')
+ wt.lock_write()
+ self.addCleanup(wt.unlock)
+ os.symlink('bar', 'path/foo')
+ wt.add(['foo'], ['foo-id'])
+ wt.commit('add symlink', rev_id='A-id')
+ os.remove('path/foo')
+ os.symlink('baz', 'path/foo')
+ wt.commit('foo => baz', rev_id='B-id')
+ wt.set_last_revision('A-id')
+ wt.branch.set_last_revision_info(1, 'A-id')
+ wt.revert()
+ wt.commit('C', rev_id='C-id')
+ wt.merge_from_branch(wt.branch, 'B-id')
+ self.assertEqual('baz', wt.get_symlink_target('foo-id'))
+ wt.commit('E merges C & B', rev_id='E-id')
+ os.remove('path/foo')
+ os.symlink('bing', 'path/foo')
+ wt.commit('F foo => bing', rev_id='F-id')
+ wt.set_last_revision('B-id')
+ wt.branch.set_last_revision_info(2, 'B-id')
+ wt.revert()
+ wt.merge_from_branch(wt.branch, 'C-id')
+ wt.commit('D merges B & C', rev_id='D-id')
+ conflicts = wt.merge_from_branch(wt.branch, to_revision='F-id')
+ self.assertEqual(0, conflicts)
+ self.assertEqual('bing', wt.get_symlink_target('foo-id'))
+
+ def test_renamed_symlink(self):
+ self.requireFeature(features.SymlinkFeature)
+ # A Create symlink foo => bar
+ # / \
+ # B C B renames foo => barry
+ # |\ /|
+ # | X |
+ # |/ \|
+ # D E D & E have barry
+ # |
+ # F F renames barry to blah
+ #
+ # Merging D & F should result in F cleanly overriding D, because D's
+ # value actually comes from B
+
+ wt = self.make_branch_and_tree('path')
+ wt.lock_write()
+ self.addCleanup(wt.unlock)
+ os.symlink('bar', 'path/foo')
+ wt.add(['foo'], ['foo-id'])
+ wt.commit('A add symlink', rev_id='A-id')
+ wt.rename_one('foo', 'barry')
+ wt.commit('B foo => barry', rev_id='B-id')
+ wt.set_last_revision('A-id')
+ wt.branch.set_last_revision_info(1, 'A-id')
+ wt.revert()
+ wt.commit('C', rev_id='C-id')
+ wt.merge_from_branch(wt.branch, 'B-id')
+ self.assertEqual('barry', wt.id2path('foo-id'))
+ self.assertEqual('bar', wt.get_symlink_target('foo-id'))
+ wt.commit('E merges C & B', rev_id='E-id')
+ wt.rename_one('barry', 'blah')
+ wt.commit('F barry => blah', rev_id='F-id')
+ wt.set_last_revision('B-id')
+ wt.branch.set_last_revision_info(2, 'B-id')
+ wt.revert()
+ wt.merge_from_branch(wt.branch, 'C-id')
+ wt.commit('D merges B & C', rev_id='D-id')
+ self.assertEqual('barry', wt.id2path('foo-id'))
+ # Check the output of the Merger object directly
+ merger = _mod_merge.Merger.from_revision_ids(None,
+ wt, 'F-id')
+ merger.merge_type = _mod_merge.Merge3Merger
+ merge_obj = merger.make_merger()
+ root_id = wt.path2id('')
+ entries = list(merge_obj._entries_lca())
+ # No content change, just a path change
+ self.assertEqual([('foo-id', False,
+ ((root_id, [root_id, root_id]), root_id, root_id),
+ ((u'foo', [u'barry', u'foo']), u'blah', u'barry'),
+ ((False, [False, False]), False, False)),
+ ], entries)
+ conflicts = wt.merge_from_branch(wt.branch, to_revision='F-id')
+ self.assertEqual(0, conflicts)
+ self.assertEqual('blah', wt.id2path('foo-id'))
+
+ def test_symlink_no_content_change(self):
+ self.requireFeature(features.SymlinkFeature)
+ # A Create symlink foo => bar
+ # / \
+ # B C B relinks foo => baz
+ # |\ /|
+ # | X |
+ # |/ \|
+ # D E D & E have foo => baz
+ # |
+ # F F has foo => bing
+ #
+ # Merging E into F should not cause a conflict, because E doesn't have
+ # a content change relative to the LCAs (it does relative to A)
+ wt = self.make_branch_and_tree('path')
+ wt.lock_write()
+ self.addCleanup(wt.unlock)
+ os.symlink('bar', 'path/foo')
+ wt.add(['foo'], ['foo-id'])
+ wt.commit('add symlink', rev_id='A-id')
+ os.remove('path/foo')
+ os.symlink('baz', 'path/foo')
+ wt.commit('foo => baz', rev_id='B-id')
+ wt.set_last_revision('A-id')
+ wt.branch.set_last_revision_info(1, 'A-id')
+ wt.revert()
+ wt.commit('C', rev_id='C-id')
+ wt.merge_from_branch(wt.branch, 'B-id')
+ self.assertEqual('baz', wt.get_symlink_target('foo-id'))
+ wt.commit('E merges C & B', rev_id='E-id')
+ wt.set_last_revision('B-id')
+ wt.branch.set_last_revision_info(2, 'B-id')
+ wt.revert()
+ wt.merge_from_branch(wt.branch, 'C-id')
+ wt.commit('D merges B & C', rev_id='D-id')
+ os.remove('path/foo')
+ os.symlink('bing', 'path/foo')
+ wt.commit('F foo => bing', rev_id='F-id')
+
+ # Check the output of the Merger object directly
+ merger = _mod_merge.Merger.from_revision_ids(None,
+ wt, 'E-id')
+ merger.merge_type = _mod_merge.Merge3Merger
+ merge_obj = merger.make_merger()
+ # Nothing interesting happened in OTHER relative to BASE
+ self.assertEqual([], list(merge_obj._entries_lca()))
+ # Now do a real merge, just to test the rest of the stack
+ conflicts = wt.merge_from_branch(wt.branch, to_revision='E-id')
+ self.assertEqual(0, conflicts)
+ self.assertEqual('bing', wt.get_symlink_target('foo-id'))
+
+ def test_symlink_this_changed_kind(self):
+ self.requireFeature(features.SymlinkFeature)
+ # A Nothing
+ # / \
+ # B C B creates symlink foo => bar
+ # |\ /|
+ # | X |
+ # |/ \|
+ # D E D changes foo into a file, E has foo => bing
+ #
+ # Mostly, this is trying to test that we don't try to os.readlink() on
+ # a file, or when there is nothing there
+ wt = self.make_branch_and_tree('path')
+ wt.lock_write()
+ self.addCleanup(wt.unlock)
+ wt.commit('base', rev_id='A-id')
+ os.symlink('bar', 'path/foo')
+ wt.add(['foo'], ['foo-id'])
+ wt.commit('add symlink foo => bar', rev_id='B-id')
+ wt.set_last_revision('A-id')
+ wt.branch.set_last_revision_info(1, 'A-id')
+ wt.revert()
+ wt.commit('C', rev_id='C-id')
+ wt.merge_from_branch(wt.branch, 'B-id')
+ self.assertEqual('bar', wt.get_symlink_target('foo-id'))
+ os.remove('path/foo')
+ # We have to change the link in E, or it won't try to do a comparison
+ os.symlink('bing', 'path/foo')
+ wt.commit('E merges C & B, overrides to bing', rev_id='E-id')
+ wt.set_last_revision('B-id')
+ wt.branch.set_last_revision_info(2, 'B-id')
+ wt.revert()
+ wt.merge_from_branch(wt.branch, 'C-id')
+ os.remove('path/foo')
+ self.build_tree_contents([('path/foo', 'file content\n')])
+ # XXX: workaround, WT doesn't detect kind changes unless you do
+ # iter_changes()
+ list(wt.iter_changes(wt.basis_tree()))
+ wt.commit('D merges B & C, makes it a file', rev_id='D-id')
+
+ merger = _mod_merge.Merger.from_revision_ids(None,
+ wt, 'E-id')
+ merger.merge_type = _mod_merge.Merge3Merger
+ merge_obj = merger.make_merger()
+ entries = list(merge_obj._entries_lca())
+ root_id = wt.path2id('')
+ self.assertEqual([('foo-id', True,
+ ((None, [root_id, None]), root_id, root_id),
+ ((None, [u'foo', None]), u'foo', u'foo'),
+ ((None, [False, None]), False, False)),
+ ], entries)
+
+ def test_symlink_all_wt(self):
+ """Check behavior if all trees are Working Trees."""
+ self.requireFeature(features.SymlinkFeature)
+ # The big issue is that entry.symlink_target is None for WorkingTrees.
+ # So we need to make sure we handle that case correctly.
+ # A foo => bar
+ # |\
+ # B C B relinks foo => baz
+ # |X|
+ # D E D & E have foo => baz
+ # |
+ # F F changes it to bing
+ # Merging D & F should result in F cleanly overriding D, because D's
+ # value actually comes from B
+
+ wt = self.make_branch_and_tree('path')
+ wt.lock_write()
+ self.addCleanup(wt.unlock)
+ os.symlink('bar', 'path/foo')
+ wt.add(['foo'], ['foo-id'])
+ wt.commit('add symlink', rev_id='A-id')
+ os.remove('path/foo')
+ os.symlink('baz', 'path/foo')
+ wt.commit('foo => baz', rev_id='B-id')
+ wt.set_last_revision('A-id')
+ wt.branch.set_last_revision_info(1, 'A-id')
+ wt.revert()
+ wt.commit('C', rev_id='C-id')
+ wt.merge_from_branch(wt.branch, 'B-id')
+ self.assertEqual('baz', wt.get_symlink_target('foo-id'))
+ wt.commit('E merges C & B', rev_id='E-id')
+ os.remove('path/foo')
+ os.symlink('bing', 'path/foo')
+ wt.commit('F foo => bing', rev_id='F-id')
+ wt.set_last_revision('B-id')
+ wt.branch.set_last_revision_info(2, 'B-id')
+ wt.revert()
+ wt.merge_from_branch(wt.branch, 'C-id')
+ wt.commit('D merges B & C', rev_id='D-id')
+ wt_base = wt.bzrdir.sprout('base', 'A-id').open_workingtree()
+ wt_base.lock_read()
+ self.addCleanup(wt_base.unlock)
+ wt_lca1 = wt.bzrdir.sprout('b-tree', 'B-id').open_workingtree()
+ wt_lca1.lock_read()
+ self.addCleanup(wt_lca1.unlock)
+ wt_lca2 = wt.bzrdir.sprout('c-tree', 'C-id').open_workingtree()
+ wt_lca2.lock_read()
+ self.addCleanup(wt_lca2.unlock)
+ wt_other = wt.bzrdir.sprout('other', 'F-id').open_workingtree()
+ wt_other.lock_read()
+ self.addCleanup(wt_other.unlock)
+ merge_obj = _mod_merge.Merge3Merger(wt, wt, wt_base,
+ wt_other, lca_trees=[wt_lca1, wt_lca2], do_merge=False)
+ entries = list(merge_obj._entries_lca())
+ root_id = wt.path2id('')
+ self.assertEqual([('foo-id', True,
+ ((root_id, [root_id, root_id]), root_id, root_id),
+ ((u'foo', [u'foo', u'foo']), u'foo', u'foo'),
+ ((False, [False, False]), False, False)),
+ ], entries)
+
+ def test_other_reverted_path_to_base(self):
+ # A Path at 'foo'
+ # / \
+ # B C Path at 'bar' in B
+ # |\ /|
+ # | X |
+ # |/ \|
+ # D E Path at 'bar'
+ # |
+ # F Path at 'foo'
+ builder = self.get_builder()
+ builder.build_snapshot('A-id', None,
+ [('add', (u'', 'a-root-id', 'directory', None)),
+ ('add', (u'foo', 'foo-id', 'file', 'a\nb\nc\n'))])
+ builder.build_snapshot('C-id', ['A-id'], [])
+ builder.build_snapshot('B-id', ['A-id'],
+ [('rename', ('foo', 'bar'))])
+ builder.build_snapshot('E-id', ['C-id', 'B-id'],
+ [('rename', ('foo', 'bar'))]) # merge the rename
+ builder.build_snapshot('F-id', ['E-id'],
+ [('rename', ('bar', 'foo'))]) # Rename back to BASE
+ builder.build_snapshot('D-id', ['B-id', 'C-id'], [])
+ wt, conflicts = self.do_merge(builder, 'F-id')
+ self.assertEqual(0, conflicts)
+ self.assertEqual('foo', wt.id2path('foo-id'))
+
+ def test_other_reverted_content_to_base(self):
+ builder = self.get_builder()
+ builder.build_snapshot('A-id', None,
+ [('add', (u'', 'a-root-id', 'directory', None)),
+ ('add', (u'foo', 'foo-id', 'file', 'base content\n'))])
+ builder.build_snapshot('C-id', ['A-id'], [])
+ builder.build_snapshot('B-id', ['A-id'],
+ [('modify', ('foo-id', 'B content\n'))])
+ builder.build_snapshot('E-id', ['C-id', 'B-id'],
+ [('modify', ('foo-id', 'B content\n'))]) # merge the content
+ builder.build_snapshot('F-id', ['E-id'],
+ [('modify', ('foo-id', 'base content\n'))]) # Revert back to BASE
+ builder.build_snapshot('D-id', ['B-id', 'C-id'], [])
+ wt, conflicts = self.do_merge(builder, 'F-id')
+ self.assertEqual(0, conflicts)
+ # TODO: We need to use the per-file graph to properly select a BASE
+ # before this will work. Or at least use the LCA trees to find
+ # the appropriate content base. (which is B, not A).
+ self.assertEqual('base content\n', wt.get_file_text('foo-id'))
+
+ def test_other_modified_content(self):
+ builder = self.get_builder()
+ builder.build_snapshot('A-id', None,
+ [('add', (u'', 'a-root-id', 'directory', None)),
+ ('add', (u'foo', 'foo-id', 'file', 'base content\n'))])
+ builder.build_snapshot('C-id', ['A-id'], [])
+ builder.build_snapshot('B-id', ['A-id'],
+ [('modify', ('foo-id', 'B content\n'))])
+ builder.build_snapshot('E-id', ['C-id', 'B-id'],
+ [('modify', ('foo-id', 'B content\n'))]) # merge the content
+ builder.build_snapshot('F-id', ['E-id'],
+ [('modify', ('foo-id', 'F content\n'))]) # Override B content
+ builder.build_snapshot('D-id', ['B-id', 'C-id'], [])
+ wt, conflicts = self.do_merge(builder, 'F-id')
+ self.assertEqual(0, conflicts)
+ self.assertEqual('F content\n', wt.get_file_text('foo-id'))
+
+ def test_all_wt(self):
+ """Check behavior if all trees are Working Trees."""
+ # The big issue is that entry.revision is None for WorkingTrees. (as is
+ # entry.text_sha1, etc. So we need to make sure we handle that case
+ # correctly.
+ # A Content of 'foo', path of 'a'
+ # |\
+ # B C B modifies content, C renames 'a' => 'b'
+ # |X|
+ # D E E updates content, renames 'b' => 'c'
+ builder = self.get_builder()
+ builder.build_snapshot('A-id', None,
+ [('add', (u'', 'a-root-id', 'directory', None)),
+ ('add', (u'a', 'a-id', 'file', 'base content\n')),
+ ('add', (u'foo', 'foo-id', 'file', 'base content\n'))])
+ builder.build_snapshot('B-id', ['A-id'],
+ [('modify', ('foo-id', 'B content\n'))])
+ builder.build_snapshot('C-id', ['A-id'],
+ [('rename', ('a', 'b'))])
+ builder.build_snapshot('E-id', ['C-id', 'B-id'],
+ [('rename', ('b', 'c')),
+ ('modify', ('foo-id', 'E content\n'))])
+ builder.build_snapshot('D-id', ['B-id', 'C-id'],
+ [('rename', ('a', 'b'))]) # merged change
+ wt_this = self.get_wt_from_builder(builder)
+ wt_base = wt_this.bzrdir.sprout('base', 'A-id').open_workingtree()
+ wt_base.lock_read()
+ self.addCleanup(wt_base.unlock)
+ wt_lca1 = wt_this.bzrdir.sprout('b-tree', 'B-id').open_workingtree()
+ wt_lca1.lock_read()
+ self.addCleanup(wt_lca1.unlock)
+ wt_lca2 = wt_this.bzrdir.sprout('c-tree', 'C-id').open_workingtree()
+ wt_lca2.lock_read()
+ self.addCleanup(wt_lca2.unlock)
+ wt_other = wt_this.bzrdir.sprout('other', 'E-id').open_workingtree()
+ wt_other.lock_read()
+ self.addCleanup(wt_other.unlock)
+ merge_obj = _mod_merge.Merge3Merger(wt_this, wt_this, wt_base,
+ wt_other, lca_trees=[wt_lca1, wt_lca2], do_merge=False)
+ entries = list(merge_obj._entries_lca())
+ root_id = 'a-root-id'
+ self.assertEqual([('a-id', False,
+ ((root_id, [root_id, root_id]), root_id, root_id),
+ ((u'a', [u'a', u'b']), u'c', u'b'),
+ ((False, [False, False]), False, False)),
+ ('foo-id', True,
+ ((root_id, [root_id, root_id]), root_id, root_id),
+ ((u'foo', [u'foo', u'foo']), u'foo', u'foo'),
+ ((False, [False, False]), False, False)),
+ ], entries)
+
+ def test_nested_tree_unmodified(self):
+ # Tested with a real WT, because BranchBuilder/MemoryTree don't handle
+ # 'tree-reference'
+ wt = self.make_branch_and_tree('tree',
+ format='development-subtree')
+ wt.lock_write()
+ self.addCleanup(wt.unlock)
+ sub_tree = self.make_branch_and_tree('tree/sub-tree',
+ format='development-subtree')
+ wt.set_root_id('a-root-id')
+ sub_tree.set_root_id('sub-tree-root')
+ self.build_tree_contents([('tree/sub-tree/file', 'text1')])
+ sub_tree.add('file')
+ sub_tree.commit('foo', rev_id='sub-A-id')
+ wt.add_reference(sub_tree)
+ wt.commit('set text to 1', rev_id='A-id', recursive=None)
+ # Now create a criss-cross merge in the parent, without modifying the
+ # subtree
+ wt.commit('B', rev_id='B-id', recursive=None)
+ wt.set_last_revision('A-id')
+ wt.branch.set_last_revision_info(1, 'A-id')
+ wt.commit('C', rev_id='C-id', recursive=None)
+ wt.merge_from_branch(wt.branch, to_revision='B-id')
+ wt.commit('E', rev_id='E-id', recursive=None)
+ wt.set_parent_ids(['B-id', 'C-id'])
+ wt.branch.set_last_revision_info(2, 'B-id')
+ wt.commit('D', rev_id='D-id', recursive=None)
+
+ merger = _mod_merge.Merger.from_revision_ids(None,
+ wt, 'E-id')
+ merger.merge_type = _mod_merge.Merge3Merger
+ merge_obj = merger.make_merger()
+ entries = list(merge_obj._entries_lca())
+ self.assertEqual([], entries)
+
+ def test_nested_tree_subtree_modified(self):
+ # Tested with a real WT, because BranchBuilder/MemoryTree don't handle
+ # 'tree-reference'
+ wt = self.make_branch_and_tree('tree',
+ format='development-subtree')
+ wt.lock_write()
+ self.addCleanup(wt.unlock)
+ sub_tree = self.make_branch_and_tree('tree/sub',
+ format='development-subtree')
+ wt.set_root_id('a-root-id')
+ sub_tree.set_root_id('sub-tree-root')
+ self.build_tree_contents([('tree/sub/file', 'text1')])
+ sub_tree.add('file')
+ sub_tree.commit('foo', rev_id='sub-A-id')
+ wt.add_reference(sub_tree)
+ wt.commit('set text to 1', rev_id='A-id', recursive=None)
+ # Now create a criss-cross merge in the parent, without modifying the
+ # subtree
+ wt.commit('B', rev_id='B-id', recursive=None)
+ wt.set_last_revision('A-id')
+ wt.branch.set_last_revision_info(1, 'A-id')
+ wt.commit('C', rev_id='C-id', recursive=None)
+ wt.merge_from_branch(wt.branch, to_revision='B-id')
+ self.build_tree_contents([('tree/sub/file', 'text2')])
+ sub_tree.commit('modify contents', rev_id='sub-B-id')
+ wt.commit('E', rev_id='E-id', recursive=None)
+ wt.set_parent_ids(['B-id', 'C-id'])
+ wt.branch.set_last_revision_info(2, 'B-id')
+ wt.commit('D', rev_id='D-id', recursive=None)
+
+ merger = _mod_merge.Merger.from_revision_ids(None,
+ wt, 'E-id')
+ merger.merge_type = _mod_merge.Merge3Merger
+ merge_obj = merger.make_merger()
+ entries = list(merge_obj._entries_lca())
+ # Nothing interesting about this sub-tree, because content changes are
+ # computed at a higher level
+ self.assertEqual([], entries)
+
+ def test_nested_tree_subtree_renamed(self):
+ # Tested with a real WT, because BranchBuilder/MemoryTree don't handle
+ # 'tree-reference'
+ wt = self.make_branch_and_tree('tree',
+ format='development-subtree')
+ wt.lock_write()
+ self.addCleanup(wt.unlock)
+ sub_tree = self.make_branch_and_tree('tree/sub',
+ format='development-subtree')
+ wt.set_root_id('a-root-id')
+ sub_tree.set_root_id('sub-tree-root')
+ self.build_tree_contents([('tree/sub/file', 'text1')])
+ sub_tree.add('file')
+ sub_tree.commit('foo', rev_id='sub-A-id')
+ wt.add_reference(sub_tree)
+ wt.commit('set text to 1', rev_id='A-id', recursive=None)
+ # Now create a criss-cross merge in the parent, without modifying the
+ # subtree
+ wt.commit('B', rev_id='B-id', recursive=None)
+ wt.set_last_revision('A-id')
+ wt.branch.set_last_revision_info(1, 'A-id')
+ wt.commit('C', rev_id='C-id', recursive=None)
+ wt.merge_from_branch(wt.branch, to_revision='B-id')
+ wt.rename_one('sub', 'alt_sub')
+ wt.commit('E', rev_id='E-id', recursive=None)
+ wt.set_last_revision('B-id')
+ wt.revert()
+ wt.set_parent_ids(['B-id', 'C-id'])
+ wt.branch.set_last_revision_info(2, 'B-id')
+ wt.commit('D', rev_id='D-id', recursive=None)
+
+ merger = _mod_merge.Merger.from_revision_ids(None,
+ wt, 'E-id')
+ merger.merge_type = _mod_merge.Merge3Merger
+ merge_obj = merger.make_merger()
+ entries = list(merge_obj._entries_lca())
+ root_id = 'a-root-id'
+ self.assertEqual([('sub-tree-root', False,
+ ((root_id, [root_id, root_id]), root_id, root_id),
+ ((u'sub', [u'sub', u'sub']), u'alt_sub', u'sub'),
+ ((False, [False, False]), False, False)),
+ ], entries)
+
+ def test_nested_tree_subtree_renamed_and_modified(self):
+ # Tested with a real WT, because BranchBuilder/MemoryTree don't handle
+ # 'tree-reference'
+ wt = self.make_branch_and_tree('tree',
+ format='development-subtree')
+ wt.lock_write()
+ self.addCleanup(wt.unlock)
+ sub_tree = self.make_branch_and_tree('tree/sub',
+ format='development-subtree')
+ wt.set_root_id('a-root-id')
+ sub_tree.set_root_id('sub-tree-root')
+ self.build_tree_contents([('tree/sub/file', 'text1')])
+ sub_tree.add('file')
+ sub_tree.commit('foo', rev_id='sub-A-id')
+ wt.add_reference(sub_tree)
+ wt.commit('set text to 1', rev_id='A-id', recursive=None)
+ # Now create a criss-cross merge in the parent, without modifying the
+ # subtree
+ wt.commit('B', rev_id='B-id', recursive=None)
+ wt.set_last_revision('A-id')
+ wt.branch.set_last_revision_info(1, 'A-id')
+ wt.commit('C', rev_id='C-id', recursive=None)
+ wt.merge_from_branch(wt.branch, to_revision='B-id')
+ self.build_tree_contents([('tree/sub/file', 'text2')])
+ sub_tree.commit('modify contents', rev_id='sub-B-id')
+ wt.rename_one('sub', 'alt_sub')
+ wt.commit('E', rev_id='E-id', recursive=None)
+ wt.set_last_revision('B-id')
+ wt.revert()
+ wt.set_parent_ids(['B-id', 'C-id'])
+ wt.branch.set_last_revision_info(2, 'B-id')
+ wt.commit('D', rev_id='D-id', recursive=None)
+
+ merger = _mod_merge.Merger.from_revision_ids(None,
+ wt, 'E-id')
+ merger.merge_type = _mod_merge.Merge3Merger
+ merge_obj = merger.make_merger()
+ entries = list(merge_obj._entries_lca())
+ root_id = 'a-root-id'
+ self.assertEqual([('sub-tree-root', False,
+ ((root_id, [root_id, root_id]), root_id, root_id),
+ ((u'sub', [u'sub', u'sub']), u'alt_sub', u'sub'),
+ ((False, [False, False]), False, False)),
+ ], entries)
+
+
+class TestLCAMultiWay(tests.TestCase):
+
+ def assertLCAMultiWay(self, expected, base, lcas, other, this,
+ allow_overriding_lca=True):
+ self.assertEqual(expected, _mod_merge.Merge3Merger._lca_multi_way(
+ (base, lcas), other, this,
+ allow_overriding_lca=allow_overriding_lca))
+
+ def test_other_equal_equal_lcas(self):
+ """Test when OTHER=LCA and all LCAs are identical."""
+ self.assertLCAMultiWay('this',
+ 'bval', ['bval', 'bval'], 'bval', 'bval')
+ self.assertLCAMultiWay('this',
+ 'bval', ['lcaval', 'lcaval'], 'lcaval', 'bval')
+ self.assertLCAMultiWay('this',
+ 'bval', ['lcaval', 'lcaval', 'lcaval'], 'lcaval', 'bval')
+ self.assertLCAMultiWay('this',
+ 'bval', ['lcaval', 'lcaval', 'lcaval'], 'lcaval', 'tval')
+ self.assertLCAMultiWay('this',
+ 'bval', ['lcaval', 'lcaval', 'lcaval'], 'lcaval', None)
+
+ def test_other_equal_this(self):
+ """Test when other and this are identical."""
+ self.assertLCAMultiWay('this',
+ 'bval', ['bval', 'bval'], 'oval', 'oval')
+ self.assertLCAMultiWay('this',
+ 'bval', ['lcaval', 'lcaval'], 'oval', 'oval')
+ self.assertLCAMultiWay('this',
+ 'bval', ['cval', 'dval'], 'oval', 'oval')
+ self.assertLCAMultiWay('this',
+ 'bval', [None, 'lcaval'], 'oval', 'oval')
+ self.assertLCAMultiWay('this',
+ None, [None, 'lcaval'], 'oval', 'oval')
+ self.assertLCAMultiWay('this',
+ None, ['lcaval', 'lcaval'], 'oval', 'oval')
+ self.assertLCAMultiWay('this',
+ None, ['cval', 'dval'], 'oval', 'oval')
+ self.assertLCAMultiWay('this',
+ None, ['cval', 'dval'], None, None)
+ self.assertLCAMultiWay('this',
+ None, ['cval', 'dval', 'eval', 'fval'], 'oval', 'oval')
+
+ def test_no_lcas(self):
+ self.assertLCAMultiWay('this',
+ 'bval', [], 'bval', 'tval')
+ self.assertLCAMultiWay('other',
+ 'bval', [], 'oval', 'bval')
+ self.assertLCAMultiWay('conflict',
+ 'bval', [], 'oval', 'tval')
+ self.assertLCAMultiWay('this',
+ 'bval', [], 'oval', 'oval')
+
+ def test_lca_supersedes_other_lca(self):
+ """If one lca == base, the other lca takes precedence"""
+ self.assertLCAMultiWay('this',
+ 'bval', ['bval', 'lcaval'], 'lcaval', 'tval')
+ self.assertLCAMultiWay('this',
+ 'bval', ['bval', 'lcaval'], 'lcaval', 'bval')
+ # This is actually considered a 'revert' because the 'lcaval' in LCAS
+ # supersedes the BASE val (in the other LCA) but then OTHER reverts it
+ # back to bval.
+ self.assertLCAMultiWay('other',
+ 'bval', ['bval', 'lcaval'], 'bval', 'lcaval')
+ self.assertLCAMultiWay('conflict',
+ 'bval', ['bval', 'lcaval'], 'bval', 'tval')
+
+ def test_other_and_this_pick_different_lca(self):
+ # OTHER and THIS resolve the lca conflict in different ways
+ self.assertLCAMultiWay('conflict',
+ 'bval', ['lca1val', 'lca2val'], 'lca1val', 'lca2val')
+ self.assertLCAMultiWay('conflict',
+ 'bval', ['lca1val', 'lca2val', 'lca3val'], 'lca1val', 'lca2val')
+ self.assertLCAMultiWay('conflict',
+ 'bval', ['lca1val', 'lca2val', 'bval'], 'lca1val', 'lca2val')
+
+ def test_other_in_lca(self):
+ # OTHER takes a value of one of the LCAs, THIS takes a new value, which
+ # theoretically supersedes both LCA values and 'wins'
+ self.assertLCAMultiWay('this',
+ 'bval', ['lca1val', 'lca2val'], 'lca1val', 'newval')
+ self.assertLCAMultiWay('this',
+ 'bval', ['lca1val', 'lca2val', 'lca3val'], 'lca1val', 'newval')
+ self.assertLCAMultiWay('conflict',
+ 'bval', ['lca1val', 'lca2val'], 'lca1val', 'newval',
+ allow_overriding_lca=False)
+ self.assertLCAMultiWay('conflict',
+ 'bval', ['lca1val', 'lca2val', 'lca3val'], 'lca1val', 'newval',
+ allow_overriding_lca=False)
+ # THIS reverted back to BASE, but that is an explicit supersede of all
+ # LCAs
+ self.assertLCAMultiWay('this',
+ 'bval', ['lca1val', 'lca2val', 'lca3val'], 'lca1val', 'bval')
+ self.assertLCAMultiWay('this',
+ 'bval', ['lca1val', 'lca2val', 'bval'], 'lca1val', 'bval')
+ self.assertLCAMultiWay('conflict',
+ 'bval', ['lca1val', 'lca2val', 'lca3val'], 'lca1val', 'bval',
+ allow_overriding_lca=False)
+ self.assertLCAMultiWay('conflict',
+ 'bval', ['lca1val', 'lca2val', 'bval'], 'lca1val', 'bval',
+ allow_overriding_lca=False)
+
+ def test_this_in_lca(self):
+ # THIS takes a value of one of the LCAs, OTHER takes a new value, which
+ # theoretically supersedes both LCA values and 'wins'
+ self.assertLCAMultiWay('other',
+ 'bval', ['lca1val', 'lca2val'], 'oval', 'lca1val')
+ self.assertLCAMultiWay('other',
+ 'bval', ['lca1val', 'lca2val'], 'oval', 'lca2val')
+ self.assertLCAMultiWay('conflict',
+ 'bval', ['lca1val', 'lca2val'], 'oval', 'lca1val',
+ allow_overriding_lca=False)
+ self.assertLCAMultiWay('conflict',
+ 'bval', ['lca1val', 'lca2val'], 'oval', 'lca2val',
+ allow_overriding_lca=False)
+ # OTHER reverted back to BASE, but that is an explicit supersede of all
+ # LCAs
+ self.assertLCAMultiWay('other',
+ 'bval', ['lca1val', 'lca2val', 'lca3val'], 'bval', 'lca3val')
+ self.assertLCAMultiWay('conflict',
+ 'bval', ['lca1val', 'lca2val', 'lca3val'], 'bval', 'lca3val',
+ allow_overriding_lca=False)
+
+ def test_all_differ(self):
+ self.assertLCAMultiWay('conflict',
+ 'bval', ['lca1val', 'lca2val'], 'oval', 'tval')
+ self.assertLCAMultiWay('conflict',
+ 'bval', ['lca1val', 'lca2val', 'lca2val'], 'oval', 'tval')
+ self.assertLCAMultiWay('conflict',
+ 'bval', ['lca1val', 'lca2val', 'lca3val'], 'oval', 'tval')
+
+
+class TestConfigurableFileMerger(tests.TestCaseWithTransport):
+
+ def setUp(self):
+ super(TestConfigurableFileMerger, self).setUp()
+ self.calls = []
+
+ def get_merger_factory(self):
+ # Allows the inner methods to access the test attributes
+ calls = self.calls
+
+ class FooMerger(_mod_merge.ConfigurableFileMerger):
+ name_prefix = "foo"
+ default_files = ['bar']
+
+ def merge_text(self, params):
+ calls.append('merge_text')
+ return ('not_applicable', None)
+
+ def factory(merger):
+ result = FooMerger(merger)
+ # Make sure we start with a clean slate
+ self.assertEqual(None, result.affected_files)
+ # Track the original merger
+ self.merger = result
+ return result
+
+ return factory
+
+ def _install_hook(self, factory):
+ _mod_merge.Merger.hooks.install_named_hook('merge_file_content',
+ factory, 'test factory')
+
+ def make_builder(self):
+ builder = test_merge_core.MergeBuilder(self.test_base_dir)
+ self.addCleanup(builder.cleanup)
+ return builder
+
+ def make_text_conflict(self, file_name='bar'):
+ factory = self.get_merger_factory()
+ self._install_hook(factory)
+ builder = self.make_builder()
+ builder.add_file('bar-id', builder.tree_root, file_name, 'text1', True)
+ builder.change_contents('bar-id', other='text4', this='text3')
+ return builder
+
+ def make_kind_change(self):
+ factory = self.get_merger_factory()
+ self._install_hook(factory)
+ builder = self.make_builder()
+ builder.add_file('bar-id', builder.tree_root, 'bar', 'text1', True,
+ this=False)
+ builder.add_dir('bar-dir', builder.tree_root, 'bar-id',
+ base=False, other=False)
+ return builder
+
+ def test_uses_this_branch(self):
+ builder = self.make_text_conflict()
+ tt = builder.make_preview_transform()
+ self.addCleanup(tt.finalize)
+
+ def test_affected_files_cached(self):
+ """Ensures that the config variable is cached"""
+ builder = self.make_text_conflict()
+ conflicts = builder.merge()
+ # The hook should set the variable
+ self.assertEqual(['bar'], self.merger.affected_files)
+ self.assertEqual(1, len(conflicts))
+
+ def test_hook_called_for_text_conflicts(self):
+ builder = self.make_text_conflict()
+ conflicts = builder.merge()
+ # The hook should call the merge_text() method
+ self.assertEqual(['merge_text'], self.calls)
+
+ def test_hook_not_called_for_kind_change(self):
+ builder = self.make_kind_change()
+ conflicts = builder.merge()
+ # The hook should not call the merge_text() method
+ self.assertEqual([], self.calls)
+
+ def test_hook_not_called_for_other_files(self):
+ builder = self.make_text_conflict('foobar')
+ conflicts = builder.merge()
+ # The hook should not call the merge_text() method
+ self.assertEqual([], self.calls)
+
+
+class TestMergeIntoBase(tests.TestCaseWithTransport):
+
+ def setup_simple_branch(self, relpath, shape=None, root_id=None):
+ """One commit, containing tree specified by optional shape.
+
+ Default is empty tree (just root entry).
+ """
+ if root_id is None:
+ root_id = '%s-root-id' % (relpath,)
+ wt = self.make_branch_and_tree(relpath)
+ wt.set_root_id(root_id)
+ if shape is not None:
+ adjusted_shape = [relpath + '/' + elem for elem in shape]
+ self.build_tree(adjusted_shape)
+ ids = ['%s-%s-id' % (relpath, basename(elem.rstrip('/')))
+ for elem in shape]
+ wt.add(shape, ids=ids)
+ rev_id = 'r1-%s' % (relpath,)
+ wt.commit("Initial commit of %s" % (relpath,), rev_id=rev_id)
+ self.assertEqual(root_id, wt.path2id(''))
+ return wt
+
+ def setup_two_branches(self, custom_root_ids=True):
+ """Setup 2 branches, one will be a library, the other a project."""
+ if custom_root_ids:
+ root_id = None
+ else:
+ root_id = inventory.ROOT_ID
+ project_wt = self.setup_simple_branch(
+ 'project', ['README', 'dir/', 'dir/file.c'],
+ root_id)
+ lib_wt = self.setup_simple_branch(
+ 'lib1', ['README', 'Makefile', 'foo.c'], root_id)
+
+ return project_wt, lib_wt
+
+ def do_merge_into(self, location, merge_as):
+ """Helper for using MergeIntoMerger.
+
+ :param location: location of directory to merge from, either the
+ location of a branch or of a path inside a branch.
+ :param merge_as: the path in a tree to add the new directory as.
+ :returns: the conflicts from 'do_merge'.
+ """
+ operation = cleanup.OperationWithCleanups(self._merge_into)
+ return operation.run(location, merge_as)
+
+ def _merge_into(self, op, location, merge_as):
+ # Open and lock the various tree and branch objects
+ wt, subdir_relpath = WorkingTree.open_containing(merge_as)
+ op.add_cleanup(wt.lock_write().unlock)
+ branch_to_merge, subdir_to_merge = _mod_branch.Branch.open_containing(
+ location)
+ op.add_cleanup(branch_to_merge.lock_read().unlock)
+ other_tree = branch_to_merge.basis_tree()
+ op.add_cleanup(other_tree.lock_read().unlock)
+ # Perform the merge
+ merger = _mod_merge.MergeIntoMerger(this_tree=wt, other_tree=other_tree,
+ other_branch=branch_to_merge, target_subdir=subdir_relpath,
+ source_subpath=subdir_to_merge)
+ merger.set_base_revision(_mod_revision.NULL_REVISION, branch_to_merge)
+ conflicts = merger.do_merge()
+ merger.set_pending()
+ return conflicts
+
+ def assertTreeEntriesEqual(self, expected_entries, tree):
+ """Assert that 'tree' contains the expected inventory entries.
+
+ :param expected_entries: sequence of (path, file-id) pairs.
+ """
+ files = [(path, ie.file_id) for path, ie in tree.iter_entries_by_dir()]
+ self.assertEqual(expected_entries, files)
+
+
+class TestMergeInto(TestMergeIntoBase):
+
+ def test_newdir_with_unique_roots(self):
+ """Merge a branch with a unique root into a new directory."""
+ project_wt, lib_wt = self.setup_two_branches()
+ self.do_merge_into('lib1', 'project/lib1')
+ project_wt.lock_read()
+ self.addCleanup(project_wt.unlock)
+ # The r1-lib1 revision should be merged into this one
+ self.assertEqual(['r1-project', 'r1-lib1'], project_wt.get_parent_ids())
+ self.assertTreeEntriesEqual(
+ [('', 'project-root-id'),
+ ('README', 'project-README-id'),
+ ('dir', 'project-dir-id'),
+ ('lib1', 'lib1-root-id'),
+ ('dir/file.c', 'project-file.c-id'),
+ ('lib1/Makefile', 'lib1-Makefile-id'),
+ ('lib1/README', 'lib1-README-id'),
+ ('lib1/foo.c', 'lib1-foo.c-id'),
+ ], project_wt)
+
+ def test_subdir(self):
+ """Merge a branch into a subdirectory of an existing directory."""
+ project_wt, lib_wt = self.setup_two_branches()
+ self.do_merge_into('lib1', 'project/dir/lib1')
+ project_wt.lock_read()
+ self.addCleanup(project_wt.unlock)
+ # The r1-lib1 revision should be merged into this one
+ self.assertEqual(['r1-project', 'r1-lib1'], project_wt.get_parent_ids())
+ self.assertTreeEntriesEqual(
+ [('', 'project-root-id'),
+ ('README', 'project-README-id'),
+ ('dir', 'project-dir-id'),
+ ('dir/file.c', 'project-file.c-id'),
+ ('dir/lib1', 'lib1-root-id'),
+ ('dir/lib1/Makefile', 'lib1-Makefile-id'),
+ ('dir/lib1/README', 'lib1-README-id'),
+ ('dir/lib1/foo.c', 'lib1-foo.c-id'),
+ ], project_wt)
+
+ def test_newdir_with_repeat_roots(self):
+ """If the file-id of the dir to be merged already exists a new ID will
+ be allocated to let the merge happen.
+ """
+ project_wt, lib_wt = self.setup_two_branches(custom_root_ids=False)
+ root_id = project_wt.path2id('')
+ self.do_merge_into('lib1', 'project/lib1')
+ project_wt.lock_read()
+ self.addCleanup(project_wt.unlock)
+ # The r1-lib1 revision should be merged into this one
+ self.assertEqual(['r1-project', 'r1-lib1'], project_wt.get_parent_ids())
+ new_lib1_id = project_wt.path2id('lib1')
+ self.assertNotEqual(None, new_lib1_id)
+ self.assertTreeEntriesEqual(
+ [('', root_id),
+ ('README', 'project-README-id'),
+ ('dir', 'project-dir-id'),
+ ('lib1', new_lib1_id),
+ ('dir/file.c', 'project-file.c-id'),
+ ('lib1/Makefile', 'lib1-Makefile-id'),
+ ('lib1/README', 'lib1-README-id'),
+ ('lib1/foo.c', 'lib1-foo.c-id'),
+ ], project_wt)
+
+ def test_name_conflict(self):
+ """When the target directory name already exists a conflict is
+ generated and the original directory is renamed to foo.moved.
+ """
+ dest_wt = self.setup_simple_branch('dest', ['dir/', 'dir/file.txt'])
+ src_wt = self.setup_simple_branch('src', ['README'])
+ conflicts = self.do_merge_into('src', 'dest/dir')
+ self.assertEqual(1, conflicts)
+ dest_wt.lock_read()
+ self.addCleanup(dest_wt.unlock)
+ # The r1-lib1 revision should be merged into this one
+ self.assertEqual(['r1-dest', 'r1-src'], dest_wt.get_parent_ids())
+ self.assertTreeEntriesEqual(
+ [('', 'dest-root-id'),
+ ('dir', 'src-root-id'),
+ ('dir.moved', 'dest-dir-id'),
+ ('dir/README', 'src-README-id'),
+ ('dir.moved/file.txt', 'dest-file.txt-id'),
+ ], dest_wt)
+
+ def test_file_id_conflict(self):
+ """A conflict is generated if the merge-into adds a file (or other
+ inventory entry) with a file-id that already exists in the target tree.
+ """
+ dest_wt = self.setup_simple_branch('dest', ['file.txt'])
+ # Make a second tree with a file-id that will clash with file.txt in
+ # dest.
+ src_wt = self.make_branch_and_tree('src')
+ self.build_tree(['src/README'])
+ src_wt.add(['README'], ids=['dest-file.txt-id'])
+ src_wt.commit("Rev 1 of src.", rev_id='r1-src')
+ conflicts = self.do_merge_into('src', 'dest/dir')
+ # This is an edge case that shouldn't happen to users very often. So
+ # we don't care really about the exact presentation of the conflict,
+ # just that there is one.
+ self.assertEqual(1, conflicts)
+
+ def test_only_subdir(self):
+ """When the location points to just part of a tree, merge just that
+ subtree.
+ """
+ dest_wt = self.setup_simple_branch('dest')
+ src_wt = self.setup_simple_branch(
+ 'src', ['hello.txt', 'dir/', 'dir/foo.c'])
+ conflicts = self.do_merge_into('src/dir', 'dest/dir')
+ dest_wt.lock_read()
+ self.addCleanup(dest_wt.unlock)
+ # The r1-lib1 revision should NOT be merged into this one (this is a
+ # partial merge).
+ self.assertEqual(['r1-dest'], dest_wt.get_parent_ids())
+ self.assertTreeEntriesEqual(
+ [('', 'dest-root-id'),
+ ('dir', 'src-dir-id'),
+ ('dir/foo.c', 'src-foo.c-id'),
+ ], dest_wt)
+
+ def test_only_file(self):
+ """An edge case: merge just one file, not a whole dir."""
+ dest_wt = self.setup_simple_branch('dest')
+ two_file_wt = self.setup_simple_branch(
+ 'two-file', ['file1.txt', 'file2.txt'])
+ conflicts = self.do_merge_into('two-file/file1.txt', 'dest/file1.txt')
+ dest_wt.lock_read()
+ self.addCleanup(dest_wt.unlock)
+ # The r1-lib1 revision should NOT be merged into this one
+ self.assertEqual(['r1-dest'], dest_wt.get_parent_ids())
+ self.assertTreeEntriesEqual(
+ [('', 'dest-root-id'), ('file1.txt', 'two-file-file1.txt-id')],
+ dest_wt)
+
+ def test_no_such_source_path(self):
+ """PathNotInTree is raised if the specified path in the source tree
+ does not exist.
+ """
+ dest_wt = self.setup_simple_branch('dest')
+ two_file_wt = self.setup_simple_branch('src', ['dir/'])
+ self.assertRaises(_mod_merge.PathNotInTree, self.do_merge_into,
+ 'src/no-such-dir', 'dest/foo')
+ dest_wt.lock_read()
+ self.addCleanup(dest_wt.unlock)
+ # The dest tree is unmodified.
+ self.assertEqual(['r1-dest'], dest_wt.get_parent_ids())
+ self.assertTreeEntriesEqual([('', 'dest-root-id')], dest_wt)
+
+ def test_no_such_target_path(self):
+ """PathNotInTree is also raised if the specified path in the target
+ tree does not exist.
+ """
+ dest_wt = self.setup_simple_branch('dest')
+ two_file_wt = self.setup_simple_branch('src', ['file.txt'])
+ self.assertRaises(_mod_merge.PathNotInTree, self.do_merge_into,
+ 'src', 'dest/no-such-dir/foo')
+ dest_wt.lock_read()
+ self.addCleanup(dest_wt.unlock)
+ # The dest tree is unmodified.
+ self.assertEqual(['r1-dest'], dest_wt.get_parent_ids())
+ self.assertTreeEntriesEqual([('', 'dest-root-id')], dest_wt)
+
+
+class TestMergeHooks(TestCaseWithTransport):
+
+ def setUp(self):
+ super(TestMergeHooks, self).setUp()
+ self.tree_a = self.make_branch_and_tree('tree_a')
+ self.build_tree_contents([('tree_a/file', 'content_1')])
+ self.tree_a.add('file', 'file-id')
+ self.tree_a.commit('added file')
+
+ self.tree_b = self.tree_a.bzrdir.sprout('tree_b').open_workingtree()
+ self.build_tree_contents([('tree_b/file', 'content_2')])
+ self.tree_b.commit('modify file')
+
+ def test_pre_merge_hook_inject_different_tree(self):
+ tree_c = self.tree_b.bzrdir.sprout('tree_c').open_workingtree()
+ self.build_tree_contents([('tree_c/file', 'content_3')])
+ tree_c.commit("more content")
+ calls = []
+ def factory(merger):
+ self.assertIsInstance(merger, _mod_merge.Merge3Merger)
+ merger.other_tree = tree_c
+ calls.append(merger)
+ _mod_merge.Merger.hooks.install_named_hook('pre_merge',
+ factory, 'test factory')
+ self.tree_a.merge_from_branch(self.tree_b.branch)
+
+ self.assertFileEqual("content_3", 'tree_a/file')
+ self.assertLength(1, calls)
+
+ def test_post_merge_hook_called(self):
+ calls = []
+ def factory(merger):
+ self.assertIsInstance(merger, _mod_merge.Merge3Merger)
+ calls.append(merger)
+ _mod_merge.Merger.hooks.install_named_hook('post_merge',
+ factory, 'test factory')
+
+ self.tree_a.merge_from_branch(self.tree_b.branch)
+
+ self.assertFileEqual("content_2", 'tree_a/file')
+ self.assertLength(1, calls)
diff --git a/bzrlib/tests/test_merge3.py b/bzrlib/tests/test_merge3.py
new file mode 100644
index 0000000..6c16485
--- /dev/null
+++ b/bzrlib/tests/test_merge3.py
@@ -0,0 +1,454 @@
+# Copyright (C) 2005-2011 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+
+from bzrlib import (
+ merge3,
+ tests,
+ )
+from bzrlib.errors import CantReprocessAndShowBase, BinaryFile
+
+def split_lines(t):
+ from cStringIO import StringIO
+ return StringIO(t).readlines()
+
+############################################################
+# test case data from the gnu diffutils manual
+# common base
+TZU = split_lines(""" The Nameless is the origin of Heaven and Earth;
+ The named is the mother of all things.
+
+ Therefore let there always be non-being,
+ so we may see their subtlety,
+ And let there always be being,
+ so we may see their outcome.
+ The two are the same,
+ But after they are produced,
+ they have different names.
+ They both may be called deep and profound.
+ Deeper and more profound,
+ The door of all subtleties!
+""")
+
+LAO = split_lines(""" The Way that can be told of is not the eternal Way;
+ The name that can be named is not the eternal name.
+ The Nameless is the origin of Heaven and Earth;
+ The Named is the mother of all things.
+ Therefore let there always be non-being,
+ so we may see their subtlety,
+ And let there always be being,
+ so we may see their outcome.
+ The two are the same,
+ But after they are produced,
+ they have different names.
+""")
+
+
+TAO = split_lines(""" The Way that can be told of is not the eternal Way;
+ The name that can be named is not the eternal name.
+ The Nameless is the origin of Heaven and Earth;
+ The named is the mother of all things.
+
+ Therefore let there always be non-being,
+ so we may see their subtlety,
+ And let there always be being,
+ so we may see their result.
+ The two are the same,
+ But after they are produced,
+ they have different names.
+
+ -- The Way of Lao-Tzu, tr. Wing-tsit Chan
+
+""")
+
+MERGED_RESULT = split_lines(""" The Way that can be told of is not the eternal Way;
+ The name that can be named is not the eternal name.
+ The Nameless is the origin of Heaven and Earth;
+ The Named is the mother of all things.
+ Therefore let there always be non-being,
+ so we may see their subtlety,
+ And let there always be being,
+ so we may see their result.
+ The two are the same,
+ But after they are produced,
+ they have different names.
+<<<<<<< LAO
+=======
+
+ -- The Way of Lao-Tzu, tr. Wing-tsit Chan
+
+>>>>>>> TAO
+""")
+
+class TestMerge3(tests.TestCase):
+
+ def test_no_changes(self):
+ """No conflicts because nothing changed"""
+ m3 = merge3.Merge3(['aaa', 'bbb'],
+ ['aaa', 'bbb'],
+ ['aaa', 'bbb'])
+
+ self.assertEquals(m3.find_unconflicted(),
+ [(0, 2)])
+
+ self.assertEquals(list(m3.find_sync_regions()),
+ [(0, 2,
+ 0, 2,
+ 0, 2),
+ (2,2, 2,2, 2,2)])
+
+ self.assertEquals(list(m3.merge_regions()),
+ [('unchanged', 0, 2)])
+
+ self.assertEquals(list(m3.merge_groups()),
+ [('unchanged', ['aaa', 'bbb'])])
+
+ def test_front_insert(self):
+ m3 = merge3.Merge3(['zz'],
+ ['aaa', 'bbb', 'zz'],
+ ['zz'])
+
+ # todo: should use a sentinal at end as from get_matching_blocks
+ # to match without zz
+ self.assertEquals(list(m3.find_sync_regions()),
+ [(0,1, 2,3, 0,1),
+ (1,1, 3,3, 1,1),])
+
+ self.assertEquals(list(m3.merge_regions()),
+ [('a', 0, 2),
+ ('unchanged', 0, 1)])
+
+ self.assertEquals(list(m3.merge_groups()),
+ [('a', ['aaa', 'bbb']),
+ ('unchanged', ['zz'])])
+
+ def test_null_insert(self):
+ m3 = merge3.Merge3([],
+ ['aaa', 'bbb'],
+ [])
+ # todo: should use a sentinal at end as from get_matching_blocks
+ # to match without zz
+ self.assertEquals(list(m3.find_sync_regions()),
+ [(0,0, 2,2, 0,0)])
+
+ self.assertEquals(list(m3.merge_regions()),
+ [('a', 0, 2)])
+
+ self.assertEquals(list(m3.merge_lines()),
+ ['aaa', 'bbb'])
+
+ def test_no_conflicts(self):
+ """No conflicts because only one side changed"""
+ m3 = merge3.Merge3(['aaa', 'bbb'],
+ ['aaa', '111', 'bbb'],
+ ['aaa', 'bbb'])
+
+ self.assertEquals(m3.find_unconflicted(),
+ [(0, 1), (1, 2)])
+
+ self.assertEquals(list(m3.find_sync_regions()),
+ [(0,1, 0,1, 0,1),
+ (1,2, 2,3, 1,2),
+ (2,2, 3,3, 2,2),])
+
+ self.assertEquals(list(m3.merge_regions()),
+ [('unchanged', 0, 1),
+ ('a', 1, 2),
+ ('unchanged', 1, 2),])
+
+ def test_append_a(self):
+ m3 = merge3.Merge3(['aaa\n', 'bbb\n'],
+ ['aaa\n', 'bbb\n', '222\n'],
+ ['aaa\n', 'bbb\n'])
+
+ self.assertEquals(''.join(m3.merge_lines()),
+ 'aaa\nbbb\n222\n')
+
+ def test_append_b(self):
+ m3 = merge3.Merge3(['aaa\n', 'bbb\n'],
+ ['aaa\n', 'bbb\n'],
+ ['aaa\n', 'bbb\n', '222\n'])
+
+ self.assertEquals(''.join(m3.merge_lines()),
+ 'aaa\nbbb\n222\n')
+
+ def test_append_agreement(self):
+ m3 = merge3.Merge3(['aaa\n', 'bbb\n'],
+ ['aaa\n', 'bbb\n', '222\n'],
+ ['aaa\n', 'bbb\n', '222\n'])
+
+ self.assertEquals(''.join(m3.merge_lines()),
+ 'aaa\nbbb\n222\n')
+
+ def test_append_clash(self):
+ m3 = merge3.Merge3(['aaa\n', 'bbb\n'],
+ ['aaa\n', 'bbb\n', '222\n'],
+ ['aaa\n', 'bbb\n', '333\n'])
+
+ ml = m3.merge_lines(name_a='a',
+ name_b='b',
+ start_marker='<<',
+ mid_marker='--',
+ end_marker='>>')
+ self.assertEquals(''.join(ml),
+'''\
+aaa
+bbb
+<< a
+222
+--
+333
+>> b
+''')
+
+ def test_insert_agreement(self):
+ m3 = merge3.Merge3(['aaa\n', 'bbb\n'],
+ ['aaa\n', '222\n', 'bbb\n'],
+ ['aaa\n', '222\n', 'bbb\n'])
+
+ ml = m3.merge_lines(name_a='a',
+ name_b='b',
+ start_marker='<<',
+ mid_marker='--',
+ end_marker='>>')
+ self.assertEquals(''.join(ml), 'aaa\n222\nbbb\n')
+
+
+ def test_insert_clash(self):
+ """Both try to insert lines in the same place."""
+ m3 = merge3.Merge3(['aaa\n', 'bbb\n'],
+ ['aaa\n', '111\n', 'bbb\n'],
+ ['aaa\n', '222\n', 'bbb\n'])
+
+ self.assertEquals(m3.find_unconflicted(),
+ [(0, 1), (1, 2)])
+
+ self.assertEquals(list(m3.find_sync_regions()),
+ [(0,1, 0,1, 0,1),
+ (1,2, 2,3, 2,3),
+ (2,2, 3,3, 3,3),])
+
+ self.assertEquals(list(m3.merge_regions()),
+ [('unchanged', 0,1),
+ ('conflict', 1,1, 1,2, 1,2),
+ ('unchanged', 1,2)])
+
+ self.assertEquals(list(m3.merge_groups()),
+ [('unchanged', ['aaa\n']),
+ ('conflict', [], ['111\n'], ['222\n']),
+ ('unchanged', ['bbb\n']),
+ ])
+
+ ml = m3.merge_lines(name_a='a',
+ name_b='b',
+ start_marker='<<',
+ mid_marker='--',
+ end_marker='>>')
+ self.assertEquals(''.join(ml),
+'''aaa
+<< a
+111
+--
+222
+>> b
+bbb
+''')
+
+ def test_replace_clash(self):
+ """Both try to insert lines in the same place."""
+ m3 = merge3.Merge3(['aaa', '000', 'bbb'],
+ ['aaa', '111', 'bbb'],
+ ['aaa', '222', 'bbb'])
+
+ self.assertEquals(m3.find_unconflicted(),
+ [(0, 1), (2, 3)])
+
+ self.assertEquals(list(m3.find_sync_regions()),
+ [(0,1, 0,1, 0,1),
+ (2,3, 2,3, 2,3),
+ (3,3, 3,3, 3,3),])
+
+ def test_replace_multi(self):
+ """Replacement with regions of different size."""
+ m3 = merge3.Merge3(['aaa', '000', '000', 'bbb'],
+ ['aaa', '111', '111', '111', 'bbb'],
+ ['aaa', '222', '222', '222', '222', 'bbb'])
+
+ self.assertEquals(m3.find_unconflicted(),
+ [(0, 1), (3, 4)])
+
+
+ self.assertEquals(list(m3.find_sync_regions()),
+ [(0,1, 0,1, 0,1),
+ (3,4, 4,5, 5,6),
+ (4,4, 5,5, 6,6),])
+
+ def test_merge_poem(self):
+ """Test case from diff3 manual"""
+ m3 = merge3.Merge3(TZU, LAO, TAO)
+ ml = list(m3.merge_lines('LAO', 'TAO'))
+ self.log('merge result:')
+ self.log(''.join(ml))
+ self.assertEquals(ml, MERGED_RESULT)
+
+ def test_minimal_conflicts_common(self):
+ """Reprocessing"""
+ base_text = ("a\n" * 20).splitlines(True)
+ this_text = ("a\n"*10+"b\n" * 10).splitlines(True)
+ other_text = ("a\n"*10+"c\n"+"b\n" * 8 + "c\n").splitlines(True)
+ m3 = merge3.Merge3(base_text, other_text, this_text)
+ m_lines = m3.merge_lines('OTHER', 'THIS', reprocess=True)
+ merged_text = "".join(list(m_lines))
+ optimal_text = ("a\n" * 10 + "<<<<<<< OTHER\nc\n"
+ + 8* "b\n" + "c\n=======\n"
+ + 10*"b\n" + ">>>>>>> THIS\n")
+ self.assertEqualDiff(optimal_text, merged_text)
+
+ def test_minimal_conflicts_unique(self):
+ def add_newline(s):
+ """Add a newline to each entry in the string"""
+ return [(x+'\n') for x in s]
+
+ base_text = add_newline("abcdefghijklm")
+ this_text = add_newline("abcdefghijklmNOPQRSTUVWXYZ")
+ other_text = add_newline("abcdefghijklm1OPQRSTUVWXY2")
+ m3 = merge3.Merge3(base_text, other_text, this_text)
+ m_lines = m3.merge_lines('OTHER', 'THIS', reprocess=True)
+ merged_text = "".join(list(m_lines))
+ optimal_text = ''.join(add_newline("abcdefghijklm")
+ + ["<<<<<<< OTHER\n1\n=======\nN\n>>>>>>> THIS\n"]
+ + add_newline('OPQRSTUVWXY')
+ + ["<<<<<<< OTHER\n2\n=======\nZ\n>>>>>>> THIS\n"]
+ )
+ self.assertEqualDiff(optimal_text, merged_text)
+
+ def test_minimal_conflicts_nonunique(self):
+ def add_newline(s):
+ """Add a newline to each entry in the string"""
+ return [(x+'\n') for x in s]
+
+ base_text = add_newline("abacddefgghij")
+ this_text = add_newline("abacddefgghijkalmontfprz")
+ other_text = add_newline("abacddefgghijknlmontfprd")
+ m3 = merge3.Merge3(base_text, other_text, this_text)
+ m_lines = m3.merge_lines('OTHER', 'THIS', reprocess=True)
+ merged_text = "".join(list(m_lines))
+ optimal_text = ''.join(add_newline("abacddefgghijk")
+ + ["<<<<<<< OTHER\nn\n=======\na\n>>>>>>> THIS\n"]
+ + add_newline('lmontfpr')
+ + ["<<<<<<< OTHER\nd\n=======\nz\n>>>>>>> THIS\n"]
+ )
+ self.assertEqualDiff(optimal_text, merged_text)
+
+ def test_reprocess_and_base(self):
+ """Reprocessing and showing base breaks correctly"""
+ base_text = ("a\n" * 20).splitlines(True)
+ this_text = ("a\n"*10+"b\n" * 10).splitlines(True)
+ other_text = ("a\n"*10+"c\n"+"b\n" * 8 + "c\n").splitlines(True)
+ m3 = merge3.Merge3(base_text, other_text, this_text)
+ m_lines = m3.merge_lines('OTHER', 'THIS', reprocess=True,
+ base_marker='|||||||')
+ self.assertRaises(CantReprocessAndShowBase, list, m_lines)
+
+ def test_binary(self):
+ self.assertRaises(BinaryFile, merge3.Merge3, ['\x00'], ['a'], ['b'])
+
+ def test_dos_text(self):
+ base_text = 'a\r\n'
+ this_text = 'b\r\n'
+ other_text = 'c\r\n'
+ m3 = merge3.Merge3(base_text.splitlines(True),
+ other_text.splitlines(True),
+ this_text.splitlines(True))
+ m_lines = m3.merge_lines('OTHER', 'THIS')
+ self.assertEqual('<<<<<<< OTHER\r\nc\r\n=======\r\nb\r\n'
+ '>>>>>>> THIS\r\n'.splitlines(True), list(m_lines))
+
+ def test_mac_text(self):
+ base_text = 'a\r'
+ this_text = 'b\r'
+ other_text = 'c\r'
+ m3 = merge3.Merge3(base_text.splitlines(True),
+ other_text.splitlines(True),
+ this_text.splitlines(True))
+ m_lines = m3.merge_lines('OTHER', 'THIS')
+ self.assertEqual('<<<<<<< OTHER\rc\r=======\rb\r'
+ '>>>>>>> THIS\r'.splitlines(True), list(m_lines))
+
+ def test_merge3_cherrypick(self):
+ base_text = "a\nb\n"
+ this_text = "a\n"
+ other_text = "a\nb\nc\n"
+ # When cherrypicking, lines in base are not part of the conflict
+ m3 = merge3.Merge3(base_text.splitlines(True),
+ this_text.splitlines(True),
+ other_text.splitlines(True), is_cherrypick=True)
+ m_lines = m3.merge_lines()
+ self.assertEqualDiff('a\n<<<<<<<\n=======\nc\n>>>>>>>\n',
+ ''.join(m_lines))
+
+ # This is not symmetric
+ m3 = merge3.Merge3(base_text.splitlines(True),
+ other_text.splitlines(True),
+ this_text.splitlines(True), is_cherrypick=True)
+ m_lines = m3.merge_lines()
+ self.assertEqualDiff('a\n<<<<<<<\nb\nc\n=======\n>>>>>>>\n',
+ ''.join(m_lines))
+
+ def test_merge3_cherrypick_w_mixed(self):
+ base_text = 'a\nb\nc\nd\ne\n'
+ this_text = 'a\nb\nq\n'
+ other_text = 'a\nb\nc\nd\nf\ne\ng\n'
+ # When cherrypicking, lines in base are not part of the conflict
+ m3 = merge3.Merge3(base_text.splitlines(True),
+ this_text.splitlines(True),
+ other_text.splitlines(True), is_cherrypick=True)
+ m_lines = m3.merge_lines()
+ self.assertEqualDiff('a\n'
+ 'b\n'
+ '<<<<<<<\n'
+ 'q\n'
+ '=======\n'
+ 'f\n'
+ '>>>>>>>\n'
+ '<<<<<<<\n'
+ '=======\n'
+ 'g\n'
+ '>>>>>>>\n',
+ ''.join(m_lines))
+
+ def test_allow_objects(self):
+ """Objects other than strs may be used with Merge3 when
+ allow_objects=True.
+
+ merge_groups and merge_regions work with non-str input. Methods that
+ return lines like merge_lines fail.
+ """
+ base = [(x,x) for x in 'abcde']
+ a = [(x,x) for x in 'abcdef']
+ b = [(x,x) for x in 'Zabcde']
+ m3 = merge3.Merge3(base, a, b, allow_objects=True)
+ self.assertEqual(
+ [('b', 0, 1),
+ ('unchanged', 0, 5),
+ ('a', 5, 6)],
+ list(m3.merge_regions()))
+ self.assertEqual(
+ [('b', [('Z', 'Z')]),
+ ('unchanged', [(x,x) for x in 'abcde']),
+ ('a', [('f', 'f')])],
+ list(m3.merge_groups()))
+
diff --git a/bzrlib/tests/test_merge_core.py b/bzrlib/tests/test_merge_core.py
new file mode 100644
index 0000000..c1fcb28
--- /dev/null
+++ b/bzrlib/tests/test_merge_core.py
@@ -0,0 +1,829 @@
+# Copyright (C) 2005-2011 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+import os
+import sys
+
+import bzrlib
+from bzrlib import (
+ controldir,
+ errors,
+ generate_ids,
+ merge_directive,
+ osutils,
+ )
+from bzrlib.conflicts import (
+ ContentsConflict,
+ TextConflict,
+ PathConflict,
+ )
+from bzrlib.merge import (
+ Merge3Merger,
+ Diff3Merger,
+ WeaveMerger,
+ Merger,
+ )
+from bzrlib.osutils import getcwd, pathjoin
+from bzrlib.transform import TreeTransform
+from bzrlib.tests import TestCaseWithTransport, TestSkipped
+from bzrlib.workingtree import WorkingTree
+
+
+class MergeBuilder(object):
+
+ def __init__(self, dir=None):
+ self.dir = osutils.mkdtemp(prefix="merge-test", dir=dir)
+ self.tree_root = generate_ids.gen_root_id()
+ def wt(name):
+ path = pathjoin(self.dir, name)
+ os.mkdir(path)
+ wt = controldir.ControlDir.create_standalone_workingtree(path)
+ # the tests perform pulls, so need a branch that is writeable.
+ wt.lock_write()
+ wt.set_root_id(self.tree_root)
+ wt.flush()
+ tt = TreeTransform(wt)
+ return wt, tt
+ self.base, self.base_tt = wt('base')
+ self.this, self.this_tt = wt('this')
+ self.other, self.other_tt = wt('other')
+
+ def get_cset_path(self, parent, name):
+ if name is None:
+ if parent is not None:
+ raise AssertionError()
+ return None
+ return pathjoin(self.cset.entries[parent].path, name)
+
+ def add_file(self, id, parent, name, contents, executable, this=True,
+ base=True, other=True):
+ def new_file(tt):
+ parent_id = tt.trans_id_file_id(parent)
+ tt.new_file(name, parent_id, contents, id, executable)
+ for option, tt in self.selected_transforms(this, base, other):
+ if option is True:
+ new_file(tt)
+
+ def merge(self, merge_type=Merge3Merger, interesting_ids=None, **kwargs):
+ merger = self.make_merger(merge_type, interesting_ids, **kwargs)
+ merger.do_merge()
+ return merger.cooked_conflicts
+
+ def make_preview_transform(self):
+ merger = self.make_merger(Merge3Merger, None, this_revision_tree=True)
+ return merger.make_preview_transform()
+
+ def make_merger(self, merge_type, interesting_ids,
+ this_revision_tree=False, **kwargs):
+ self.base_tt.apply()
+ self.base.commit('base commit')
+ for tt, wt in ((self.this_tt, self.this), (self.other_tt, self.other)):
+ # why does this not do wt.pull() ?
+ wt.branch.pull(self.base.branch)
+ wt.set_parent_ids([wt.branch.last_revision()])
+ wt.flush()
+ # We maintain a write lock, so make sure changes are flushed to
+ # disk first
+ tt.apply()
+ wt.commit('branch commit')
+ wt.flush()
+ if wt.branch.last_revision_info()[0] != 2:
+ raise AssertionError()
+ self.this.branch.fetch(self.other.branch)
+ other_basis = self.other.branch.basis_tree()
+ if this_revision_tree:
+ self.this.commit('message')
+ this_tree = self.this.basis_tree()
+ else:
+ this_tree = self.this
+ merger = merge_type(this_tree, self.this, self.base, other_basis,
+ interesting_ids=interesting_ids, do_merge=False,
+ this_branch=self.this.branch, **kwargs)
+ return merger
+
+ def list_transforms(self):
+ return [self.this_tt, self.base_tt, self.other_tt]
+
+ def selected_transforms(self, this, base, other):
+ pairs = [(this, self.this_tt), (base, self.base_tt),
+ (other, self.other_tt)]
+ return [(v, tt) for (v, tt) in pairs if v is not None]
+
+ def add_symlink(self, id, parent, name, contents):
+ for tt in self.list_transforms():
+ parent_id = tt.trans_id_file_id(parent)
+ tt.new_symlink(name, parent_id, contents, id)
+
+ def remove_file(self, file_id, base=False, this=False, other=False):
+ for option, tt in self.selected_transforms(this, base, other):
+ if option is True:
+ trans_id = tt.trans_id_file_id(file_id)
+ tt.cancel_creation(trans_id)
+ tt.cancel_versioning(trans_id)
+ tt.set_executability(None, trans_id)
+
+ def add_dir(self, file_id, parent, name, this=True, base=True, other=True):
+ for option, tt in self.selected_transforms(this, base, other):
+ if option is True:
+ parent_id = tt.trans_id_file_id(parent)
+ tt.new_directory(name, parent_id, file_id)
+
+ def change_name(self, id, base=None, this=None, other=None):
+ for val, tt in ((base, self.base_tt), (this, self.this_tt),
+ (other, self.other_tt)):
+ if val is None:
+ continue
+ trans_id = tt.trans_id_file_id(id)
+ parent_id = tt.final_parent(trans_id)
+ tt.adjust_path(val, parent_id, trans_id)
+
+ def change_parent(self, file_id, base=None, this=None, other=None):
+ for parent, tt in self.selected_transforms(this, base, other):
+ trans_id = tt.trans_id_file_id(file_id)
+ parent_id = tt.trans_id_file_id(parent)
+ tt.adjust_path(tt.final_name(trans_id), parent_id, trans_id)
+
+ def change_contents(self, file_id, base=None, this=None, other=None):
+ for contents, tt in self.selected_transforms(this, base, other):
+ trans_id = tt.trans_id_file_id(file_id)
+ tt.cancel_creation(trans_id)
+ tt.create_file(contents, trans_id)
+
+ def change_target(self, id, base=None, this=None, other=None):
+ for target, tt in self.selected_transforms(this, base, other):
+ trans_id = tt.trans_id_file_id(id)
+ tt.cancel_creation(trans_id)
+ tt.create_symlink(target, trans_id)
+
+ def change_perms(self, id, base=None, this=None, other=None):
+ for executability, tt in self.selected_transforms(this, base, other):
+ trans_id = tt.trans_id_file_id(id)
+ tt.set_executability(None, trans_id)
+ tt.set_executability(executability, trans_id)
+
+ def change_perms_tree(self, id, tree, mode):
+ os.chmod(tree.full_path(id), mode)
+
+ def apply_inv_change(self, inventory_change, orig_inventory):
+ orig_inventory_by_path = {}
+ for file_id, path in orig_inventory.iteritems():
+ orig_inventory_by_path[path] = file_id
+
+ def parent_id(file_id):
+ try:
+ parent_dir = os.path.dirname(orig_inventory[file_id])
+ except:
+ print file_id
+ raise
+ if parent_dir == "":
+ return None
+ return orig_inventory_by_path[parent_dir]
+
+ def new_path(file_id):
+ if fild_id in inventory_change:
+ return inventory_change[file_id]
+ else:
+ parent = parent_id(file_id)
+ if parent is None:
+ return orig_inventory[file_id]
+ dirname = new_path(parent)
+ return pathjoin(dirname, os.path.basename(orig_inventory[file_id]))
+
+ new_inventory = {}
+ for file_id in orig_inventory.iterkeys():
+ path = new_path(file_id)
+ if path is None:
+ continue
+ new_inventory[file_id] = path
+
+ for file_id, path in inventory_change.iteritems():
+ if file_id in orig_inventory:
+ continue
+ new_inventory[file_id] = path
+ return new_inventory
+
+ def unlock(self):
+ self.base.unlock()
+ self.this.unlock()
+ self.other.unlock()
+
+ def cleanup(self):
+ self.unlock()
+ osutils.rmtree(self.dir)
+
+
+class MergeTest(TestCaseWithTransport):
+
+ def test_change_name(self):
+ """Test renames"""
+ builder = MergeBuilder(getcwd())
+ builder.add_file("1", builder.tree_root, "name1", "hello1", True)
+ builder.change_name("1", other="name2")
+ builder.add_file("2", builder.tree_root, "name3", "hello2", True)
+ builder.change_name("2", base="name4")
+ builder.add_file("3", builder.tree_root, "name5", "hello3", True)
+ builder.change_name("3", this="name6")
+ builder.merge()
+ builder.cleanup()
+ builder = MergeBuilder(getcwd())
+ builder.add_file("1", builder.tree_root, "name1", "hello1", False)
+ builder.change_name("1", other="name2", this="name3")
+ conflicts = builder.merge()
+ self.assertEqual(conflicts, [PathConflict('name3', 'name2', '1')])
+ builder.cleanup()
+
+ def test_merge_one(self):
+ builder = MergeBuilder(getcwd())
+ builder.add_file("1", builder.tree_root, "name1", "hello1", True)
+ builder.change_contents("1", other="text4")
+ builder.add_file("2", builder.tree_root, "name2", "hello1", True)
+ builder.change_contents("2", other="text4")
+ builder.merge(interesting_ids=["1"])
+ self.assertEqual(builder.this.get_file("1").read(), "text4" )
+ self.assertEqual(builder.this.get_file("2").read(), "hello1" )
+ builder.cleanup()
+
+ def test_file_moves(self):
+ """Test moves"""
+ builder = MergeBuilder(getcwd())
+ builder.add_dir("1", builder.tree_root, "dir1")
+ builder.add_dir("2", builder.tree_root, "dir2")
+ builder.add_file("3", "1", "file1", "hello1", True)
+ builder.add_file("4", "1", "file2", "hello2", True)
+ builder.add_file("5", "1", "file3", "hello3", True)
+ builder.change_parent("3", other="2")
+ builder.change_parent("4", this="2")
+ builder.change_parent("5", base="2")
+ builder.merge()
+ builder.cleanup()
+
+ builder = MergeBuilder(getcwd())
+ builder.add_dir("1", builder.tree_root, "dir1")
+ builder.add_dir("2", builder.tree_root, "dir2")
+ builder.add_dir("3", builder.tree_root, "dir3")
+ builder.add_file("4", "1", "file1", "hello1", False)
+ builder.change_parent("4", other="2", this="3")
+ conflicts = builder.merge()
+ path2 = pathjoin('dir2', 'file1')
+ path3 = pathjoin('dir3', 'file1')
+ self.assertEqual(conflicts, [PathConflict(path3, path2, '4')])
+ builder.cleanup()
+
+ def test_contents_merge(self):
+ """Test merge3 merging"""
+ self.do_contents_test(Merge3Merger)
+
+ def test_contents_merge2(self):
+ """Test diff3 merging"""
+ if sys.platform == 'win32':
+ raise TestSkipped("diff3 does not have --binary flag"
+ " and therefore always fails on win32")
+ try:
+ self.do_contents_test(Diff3Merger)
+ except errors.NoDiff3:
+ raise TestSkipped("diff3 not available")
+
+ def test_contents_merge3(self):
+ """Test diff3 merging"""
+ self.do_contents_test(WeaveMerger)
+
+ def test_reprocess_weave(self):
+ # Reprocess works on weaves, and behaves as expected
+ builder = MergeBuilder(getcwd())
+ builder.add_file('a', builder.tree_root, 'blah', 'a', False)
+ builder.change_contents('a', this='b\nc\nd\ne\n', other='z\nc\nd\ny\n')
+ builder.merge(WeaveMerger, reprocess=True)
+ expected = """<<<<<<< TREE
+b
+=======
+z
+>>>>>>> MERGE-SOURCE
+c
+d
+<<<<<<< TREE
+e
+=======
+y
+>>>>>>> MERGE-SOURCE
+"""
+ self.assertEqualDiff(builder.this.get_file("a").read(), expected)
+ builder.cleanup()
+
+ def do_contents_test(self, merge_factory):
+ """Test merging with specified ContentsChange factory"""
+ builder = self.contents_test_success(merge_factory)
+ builder.cleanup()
+ self.contents_test_conflicts(merge_factory)
+
+ def contents_test_success(self, merge_factory):
+ builder = MergeBuilder(getcwd())
+ builder.add_file("1", builder.tree_root, "name1", "text1", True)
+ builder.change_contents("1", other="text4")
+ builder.add_file("2", builder.tree_root, "name3", "text2", False)
+ builder.change_contents("2", base="text5")
+ builder.add_file("3", builder.tree_root, "name5", "text3", True)
+ builder.add_file("4", builder.tree_root, "name6", "text4", True)
+ builder.remove_file("4", base=True)
+ builder.add_file("5", builder.tree_root, "name7", "a\nb\nc\nd\ne\nf\n",
+ True)
+ builder.change_contents("5", other="a\nz\nc\nd\ne\nf\n",
+ this="a\nb\nc\nd\ne\nz\n")
+ conflicts = builder.merge(merge_factory)
+ try:
+ self.assertEqual([], conflicts)
+ self.assertEqual("text4", builder.this.get_file("1").read())
+ self.assertEqual("text2", builder.this.get_file("2").read())
+ self.assertEqual("a\nz\nc\nd\ne\nz\n",
+ builder.this.get_file("5").read())
+ self.assertTrue(builder.this.is_executable("1"))
+ self.assertFalse(builder.this.is_executable("2"))
+ self.assertTrue(builder.this.is_executable("3"))
+ except:
+ builder.unlock()
+ raise
+ return builder
+
+ def contents_test_conflicts(self, merge_factory):
+ builder = MergeBuilder(getcwd())
+ builder.add_file("1", builder.tree_root, "name1", "text1", True)
+ builder.change_contents("1", other="text4", this="text3")
+ builder.add_file("2", builder.tree_root, "name2", "text1", True)
+ builder.change_contents("2", other="\x00", this="text3")
+ builder.add_file("3", builder.tree_root, "name3", "text5", False)
+ builder.change_perms("3", this=True)
+ builder.change_contents('3', this='moretext')
+ builder.remove_file('3', other=True)
+ conflicts = builder.merge(merge_factory)
+ self.assertEqual(conflicts, [TextConflict('name1', file_id='1'),
+ ContentsConflict('name2', file_id='2'),
+ ContentsConflict('name3', file_id='3')])
+ self.assertEqual(builder.this.get_file('2').read(), '\x00')
+ builder.cleanup()
+
+ def test_symlink_conflicts(self):
+ if sys.platform != "win32":
+ builder = MergeBuilder(getcwd())
+ builder.add_symlink("2", builder.tree_root, "name2", "target1")
+ builder.change_target("2", other="target4", base="text3")
+ conflicts = builder.merge()
+ self.assertEqual(conflicts, [ContentsConflict('name2',
+ file_id='2')])
+ builder.cleanup()
+
+ def test_symlink_merge(self):
+ if sys.platform != "win32":
+ builder = MergeBuilder(getcwd())
+ builder.add_symlink("1", builder.tree_root, "name1", "target1")
+ builder.add_symlink("2", builder.tree_root, "name2", "target1")
+ builder.add_symlink("3", builder.tree_root, "name3", "target1")
+ builder.change_target("1", this="target2")
+ builder.change_target("2", base="target2")
+ builder.change_target("3", other="target2")
+ builder.merge()
+ self.assertEqual(builder.this.get_symlink_target("1"), "target2")
+ self.assertEqual(builder.this.get_symlink_target("2"), "target1")
+ self.assertEqual(builder.this.get_symlink_target("3"), "target2")
+ builder.cleanup()
+
+ def test_no_passive_add(self):
+ builder = MergeBuilder(getcwd())
+ builder.add_file("1", builder.tree_root, "name1", "text1", True)
+ builder.remove_file("1", this=True)
+ builder.merge()
+ builder.cleanup()
+
+ def test_perms_merge(self):
+ builder = MergeBuilder(getcwd())
+ builder.add_file("1", builder.tree_root, "name1", "text1", True)
+ builder.change_perms("1", other=False)
+ builder.add_file("2", builder.tree_root, "name2", "text2", True)
+ builder.change_perms("2", base=False)
+ builder.add_file("3", builder.tree_root, "name3", "text3", True)
+ builder.change_perms("3", this=False)
+ builder.add_file('4', builder.tree_root, 'name4', 'text4', False)
+ builder.change_perms('4', this=True)
+ builder.remove_file('4', base=True)
+ builder.merge()
+ self.assertIs(builder.this.is_executable("1"), False)
+ self.assertIs(builder.this.is_executable("2"), True)
+ self.assertIs(builder.this.is_executable("3"), False)
+ builder.cleanup();
+
+ def test_new_suffix(self):
+ builder = MergeBuilder(getcwd())
+ builder.add_file("1", builder.tree_root, "name1", "text1", True)
+ builder.change_contents("1", other="text3")
+ builder.add_file("2", builder.tree_root, "name1.new", "text2", True)
+ builder.merge()
+ os.lstat(builder.this.id2abspath("2"))
+ builder.cleanup()
+
+ def test_spurious_conflict(self):
+ builder = MergeBuilder(getcwd())
+ builder.add_file("1", builder.tree_root, "name1", "text1", False)
+ builder.remove_file("1", other=True)
+ builder.add_file("2", builder.tree_root, "name1", "text1", False,
+ this=False, base=False)
+ conflicts = builder.merge()
+ self.assertEqual(conflicts, [])
+ builder.cleanup()
+
+ def test_merge_one_renamed(self):
+ builder = MergeBuilder(getcwd())
+ builder.add_file('1', builder.tree_root, 'name1', 'text1a', False)
+ builder.change_name('1', this='name2')
+ builder.change_contents('1', other='text2')
+ builder.merge(interesting_files=['name2'])
+ self.assertEqual('text2', builder.this.get_file('1').read())
+ builder.cleanup()
+
+
+class FunctionalMergeTest(TestCaseWithTransport):
+
+ def test_trivial_star_merge(self):
+ """Test that merges in a star shape Just Work."""
+ # John starts a branch
+ self.build_tree(("original/", "original/file1", "original/file2"))
+ tree = self.make_branch_and_tree('original')
+ branch = tree.branch
+ tree.smart_add(["original"])
+ tree.commit("start branch.", verbose=False)
+ # Mary branches it.
+ self.build_tree(("mary/",))
+ branch.bzrdir.clone("mary")
+ # Now John commits a change
+ file = open("original/file1", "wt")
+ file.write("John\n")
+ file.close()
+ tree.commit("change file1")
+ # Mary does too
+ mary_tree = WorkingTree.open('mary')
+ mary_branch = mary_tree.branch
+ file = open("mary/file2", "wt")
+ file.write("Mary\n")
+ file.close()
+ mary_tree.commit("change file2")
+ # john should be able to merge with no conflicts.
+ base = [None, None]
+ other = ("mary", -1)
+ tree.merge_from_branch(mary_tree.branch)
+ self.assertEqual("John\n", open("original/file1", "rt").read())
+ self.assertEqual("Mary\n", open("original/file2", "rt").read())
+
+ def test_conflicts(self):
+ wta = self.make_branch_and_tree('a')
+ self.build_tree_contents([('a/file', 'contents\n')])
+ wta.add('file')
+ wta.commit('base revision', allow_pointless=False)
+ d_b = wta.branch.bzrdir.clone('b')
+ self.build_tree_contents([('a/file', 'other contents\n')])
+ wta.commit('other revision', allow_pointless=False)
+ self.build_tree_contents([('b/file', 'this contents contents\n')])
+ wtb = d_b.open_workingtree()
+ wtb.commit('this revision', allow_pointless=False)
+ self.assertEqual(1, wtb.merge_from_branch(wta.branch))
+ self.assertPathExists('b/file.THIS')
+ self.assertPathExists('b/file.BASE')
+ self.assertPathExists('b/file.OTHER')
+ wtb.revert()
+ self.assertEqual(1, wtb.merge_from_branch(wta.branch,
+ merge_type=WeaveMerger))
+ self.assertPathExists('b/file')
+ self.assertPathExists('b/file.THIS')
+ self.assertPathExists('b/file.BASE')
+ self.assertPathExists('b/file.OTHER')
+
+ def test_weave_conflicts_not_in_base(self):
+ builder = self.make_branch_builder('source')
+ builder.start_series()
+ # See bug #494197
+ # A base revision (before criss-cross)
+ # |\
+ # B C B does nothing, C adds 'foo'
+ # |X|
+ # D E D and E modify foo in incompatible ways
+ #
+ # Merging will conflict, with C as a clean base text. However, the
+ # current code uses A as the global base and 'foo' doesn't exist there.
+ # It isn't trivial to create foo.BASE because it tries to look up
+ # attributes like 'executable' in A.
+ builder.build_snapshot('A-id', None, [
+ ('add', ('', 'TREE_ROOT', 'directory', None))])
+ builder.build_snapshot('B-id', ['A-id'], [])
+ builder.build_snapshot('C-id', ['A-id'], [
+ ('add', ('foo', 'foo-id', 'file', 'orig\ncontents\n'))])
+ builder.build_snapshot('D-id', ['B-id', 'C-id'], [
+ ('add', ('foo', 'foo-id', 'file', 'orig\ncontents\nand D\n'))])
+ builder.build_snapshot('E-id', ['C-id', 'B-id'], [
+ ('modify', ('foo-id', 'orig\ncontents\nand E\n'))])
+ builder.finish_series()
+ tree = builder.get_branch().create_checkout('tree', lightweight=True)
+ self.assertEqual(1, tree.merge_from_branch(tree.branch,
+ to_revision='D-id',
+ merge_type=WeaveMerger))
+ self.assertPathExists('tree/foo.THIS')
+ self.assertPathExists('tree/foo.OTHER')
+ self.expectFailure('fail to create .BASE in some criss-cross merges',
+ self.assertPathExists, 'tree/foo.BASE')
+ self.assertPathExists('tree/foo.BASE')
+
+ def test_merge_unrelated(self):
+ """Sucessfully merges unrelated branches with no common names"""
+ wta = self.make_branch_and_tree('a')
+ a = wta.branch
+ with file('a/a_file', 'wb') as f: f.write('contents\n')
+ wta.add('a_file')
+ wta.commit('a_revision', allow_pointless=False)
+ wtb = self.make_branch_and_tree('b')
+ b = wtb.branch
+ with file('b/b_file', 'wb') as f: f.write('contents\n')
+ wtb.add('b_file')
+ b_rev = wtb.commit('b_revision', allow_pointless=False)
+ wta.merge_from_branch(wtb.branch, b_rev, 'null:')
+ self.assert_(os.path.lexists('a/b_file'))
+ self.assertEqual([b_rev], wta.get_parent_ids()[1:])
+
+ def test_merge_unrelated_conflicting(self):
+ """Sucessfully merges unrelated branches with common names"""
+ wta = self.make_branch_and_tree('a')
+ a = wta.branch
+ with file('a/file', 'wb') as f: f.write('contents\n')
+ wta.add('file')
+ wta.commit('a_revision', allow_pointless=False)
+ wtb = self.make_branch_and_tree('b')
+ b = wtb.branch
+ with file('b/file', 'wb') as f: f.write('contents\n')
+ wtb.add('file')
+ b_rev = wtb.commit('b_revision', allow_pointless=False)
+ wta.merge_from_branch(wtb.branch, b_rev, 'null:')
+ self.assert_(os.path.lexists('a/file'))
+ self.assert_(os.path.lexists('a/file.moved'))
+ self.assertEqual([b_rev], wta.get_parent_ids()[1:])
+
+ def test_merge_deleted_conflicts(self):
+ wta = self.make_branch_and_tree('a')
+ with file('a/file', 'wb') as f: f.write('contents\n')
+ wta.add('file')
+ wta.commit('a_revision', allow_pointless=False)
+ self.run_bzr('branch a b')
+ os.remove('a/file')
+ wta.commit('removed file', allow_pointless=False)
+ with file('b/file', 'wb') as f: f.write('changed contents\n')
+ wtb = WorkingTree.open('b')
+ wtb.commit('changed file', allow_pointless=False)
+ wtb.merge_from_branch(wta.branch, wta.branch.last_revision(),
+ wta.branch.get_rev_id(1))
+ self.assertFalse(os.path.lexists('b/file'))
+
+ def test_merge_metadata_vs_deletion(self):
+ """Conflict deletion vs metadata change"""
+ a_wt = self.make_branch_and_tree('a')
+ with file('a/file', 'wb') as f: f.write('contents\n')
+ a_wt.add('file')
+ a_wt.commit('r0')
+ self.run_bzr('branch a b')
+ b_wt = WorkingTree.open('b')
+ os.chmod('b/file', 0755)
+ os.remove('a/file')
+ a_wt.commit('removed a')
+ self.assertEqual(a_wt.branch.revno(), 2)
+ self.assertFalse(os.path.exists('a/file'))
+ b_wt.commit('exec a')
+ a_wt.merge_from_branch(b_wt.branch, b_wt.last_revision(), 'null:')
+ self.assert_(os.path.exists('a/file'))
+
+ def test_merge_swapping_renames(self):
+ a_wt = self.make_branch_and_tree('a')
+ with file('a/un','wb') as f: f.write('UN')
+ with file('a/deux','wb') as f: f.write('DEUX')
+ a_wt.add('un', 'un-id')
+ a_wt.add('deux', 'deux-id')
+ a_wt.commit('r0', rev_id='r0')
+ self.run_bzr('branch a b')
+ b_wt = WorkingTree.open('b')
+ b_wt.rename_one('un','tmp')
+ b_wt.rename_one('deux','un')
+ b_wt.rename_one('tmp','deux')
+ b_wt.commit('r1', rev_id='r1')
+ self.assertEqual(0, a_wt.merge_from_branch(b_wt.branch,
+ b_wt.branch.last_revision(), b_wt.branch.get_rev_id(1)))
+ self.assertPathExists('a/un')
+ self.assertTrue('a/deux')
+ self.assertFalse(os.path.exists('a/tmp'))
+ self.assertEqual(file('a/un').read(),'DEUX')
+ self.assertEqual(file('a/deux').read(),'UN')
+
+ def test_merge_delete_and_add_same(self):
+ a_wt = self.make_branch_and_tree('a')
+ with file('a/file', 'wb') as f: f.write('THIS')
+ a_wt.add('file')
+ a_wt.commit('r0')
+ self.run_bzr('branch a b')
+ b_wt = WorkingTree.open('b')
+ os.remove('b/file')
+ b_wt.commit('r1')
+ with file('b/file', 'wb') as f: f.write('THAT')
+ b_wt.add('file')
+ b_wt.commit('r2')
+ a_wt.merge_from_branch(b_wt.branch, b_wt.branch.last_revision(),
+ b_wt.branch.get_rev_id(1))
+ self.assert_(os.path.exists('a/file'))
+ self.assertEqual(file('a/file').read(),'THAT')
+
+ def test_merge_rename_before_create(self):
+ """rename before create
+
+ This case requires that you must not do creates
+ before move-into-place:
+
+ $ touch foo
+ $ bzr add foo
+ $ bzr commit
+ $ bzr mv foo bar
+ $ touch foo
+ $ bzr add foo
+ $ bzr commit
+ """
+ a_wt = self.make_branch_and_tree('a')
+ with file('a/foo', 'wb') as f: f.write('A/FOO')
+ a_wt.add('foo')
+ a_wt.commit('added foo')
+ self.run_bzr('branch a b')
+ b_wt = WorkingTree.open('b')
+ b_wt.rename_one('foo', 'bar')
+ with file('b/foo', 'wb') as f: f.write('B/FOO')
+ b_wt.add('foo')
+ b_wt.commit('moved foo to bar, added new foo')
+ a_wt.merge_from_branch(b_wt.branch, b_wt.branch.last_revision(),
+ b_wt.branch.get_rev_id(1))
+
+ def test_merge_create_before_rename(self):
+ """create before rename, target parents before children
+
+ This case requires that you must not do move-into-place
+ before creates, and that you must not do children after
+ parents:
+
+ $ touch foo
+ $ bzr add foo
+ $ bzr commit
+ $ bzr mkdir bar
+ $ bzr add bar
+ $ bzr mv foo bar/foo
+ $ bzr commit
+ """
+ os.mkdir('a')
+ a_wt = self.make_branch_and_tree('a')
+ with file('a/foo', 'wb') as f: f.write('A/FOO')
+ a_wt.add('foo')
+ a_wt.commit('added foo')
+ self.run_bzr('branch a b')
+ b_wt = WorkingTree.open('b')
+ os.mkdir('b/bar')
+ b_wt.add('bar')
+ b_wt.rename_one('foo', 'bar/foo')
+ b_wt.commit('created bar dir, moved foo into bar')
+ a_wt.merge_from_branch(b_wt.branch, b_wt.branch.last_revision(),
+ b_wt.branch.get_rev_id(1))
+
+ def test_merge_rename_to_temp_before_delete(self):
+ """rename to temp before delete, source children before parents
+
+ This case requires that you must not do deletes before
+ move-out-of-the-way, and that you must not do children
+ after parents:
+
+ $ mkdir foo
+ $ touch foo/bar
+ $ bzr add foo/bar
+ $ bzr commit
+ $ bzr mv foo/bar bar
+ $ rmdir foo
+ $ bzr commit
+ """
+ a_wt = self.make_branch_and_tree('a')
+ os.mkdir('a/foo')
+ with file('a/foo/bar', 'wb') as f: f.write('A/FOO/BAR')
+ a_wt.add('foo')
+ a_wt.add('foo/bar')
+ a_wt.commit('added foo/bar')
+ self.run_bzr('branch a b')
+ b_wt = WorkingTree.open('b')
+ b_wt.rename_one('foo/bar', 'bar')
+ os.rmdir('b/foo')
+ b_wt.remove('foo')
+ b_wt.commit('moved foo/bar to bar, deleted foo')
+ a_wt.merge_from_branch(b_wt.branch, b_wt.branch.last_revision(),
+ b_wt.branch.get_rev_id(1))
+
+ def test_merge_delete_before_rename_to_temp(self):
+ """delete before rename to temp
+
+ This case requires that you must not do
+ move-out-of-the-way before deletes:
+
+ $ touch foo
+ $ touch bar
+ $ bzr add foo bar
+ $ bzr commit
+ $ rm foo
+ $ bzr rm foo
+ $ bzr mv bar foo
+ $ bzr commit
+ """
+ a_wt = self.make_branch_and_tree('a')
+ with file('a/foo', 'wb') as f: f.write('A/FOO')
+ with file('a/bar', 'wb') as f: f.write('A/BAR')
+ a_wt.add('foo')
+ a_wt.add('bar')
+ a_wt.commit('added foo and bar')
+ self.run_bzr('branch a b')
+ b_wt = WorkingTree.open('b')
+ os.unlink('b/foo')
+ b_wt.remove('foo')
+ b_wt.rename_one('bar', 'foo')
+ b_wt.commit('deleted foo, renamed bar to foo')
+ a_wt.merge_from_branch(b_wt.branch, b_wt.branch.last_revision(),
+ b_wt.branch.get_rev_id(1))
+
+
+class TestMerger(TestCaseWithTransport):
+
+ def set_up_trees(self):
+ this = self.make_branch_and_tree('this')
+ this.commit('rev1', rev_id='rev1')
+ other = this.bzrdir.sprout('other').open_workingtree()
+ this.commit('rev2a', rev_id='rev2a')
+ other.commit('rev2b', rev_id='rev2b')
+ return this, other
+
+ def test_from_revision_ids(self):
+ this, other = self.set_up_trees()
+ self.assertRaises(errors.NoSuchRevision, Merger.from_revision_ids,
+ None, this, 'rev2b')
+ this.lock_write()
+ self.addCleanup(this.unlock)
+ merger = Merger.from_revision_ids(None, this,
+ 'rev2b', other_branch=other.branch)
+ self.assertEqual('rev2b', merger.other_rev_id)
+ self.assertEqual('rev1', merger.base_rev_id)
+ merger = Merger.from_revision_ids(None, this,
+ 'rev2b', 'rev2a', other_branch=other.branch)
+ self.assertEqual('rev2a', merger.base_rev_id)
+
+ def test_from_uncommitted(self):
+ this, other = self.set_up_trees()
+ merger = Merger.from_uncommitted(this, other, None)
+ self.assertIs(other, merger.other_tree)
+ self.assertIs(None, merger.other_rev_id)
+ self.assertEqual('rev2b', merger.base_rev_id)
+
+ def prepare_for_merging(self):
+ this, other = self.set_up_trees()
+ other.commit('rev3', rev_id='rev3')
+ this.lock_write()
+ self.addCleanup(this.unlock)
+ return this, other
+
+ def test_from_mergeable(self):
+ this, other = self.prepare_for_merging()
+ md = merge_directive.MergeDirective2.from_objects(
+ other.branch.repository, 'rev3', 0, 0, 'this')
+ other.lock_read()
+ self.addCleanup(other.unlock)
+ merger, verified = Merger.from_mergeable(this, md,
+ None)
+ md.patch = None
+ merger, verified = Merger.from_mergeable(this, md,
+ None)
+ self.assertEqual('inapplicable', verified)
+ self.assertEqual('rev3', merger.other_rev_id)
+ self.assertEqual('rev1', merger.base_rev_id)
+ md.base_revision_id = 'rev2b'
+ merger, verified = Merger.from_mergeable(this, md,
+ None)
+ self.assertEqual('rev2b', merger.base_rev_id)
+
+ def test_from_mergeable_old_merge_directive(self):
+ this, other = self.prepare_for_merging()
+ other.lock_write()
+ self.addCleanup(other.unlock)
+ md = merge_directive.MergeDirective.from_objects(
+ other.branch.repository, 'rev3', 0, 0, 'this')
+ merger, verified = Merger.from_mergeable(this, md,
+ None)
+ self.assertEqual('rev3', merger.other_rev_id)
+ self.assertEqual('rev1', merger.base_rev_id)
diff --git a/bzrlib/tests/test_merge_directive.py b/bzrlib/tests/test_merge_directive.py
new file mode 100644
index 0000000..7f7eec3
--- /dev/null
+++ b/bzrlib/tests/test_merge_directive.py
@@ -0,0 +1,787 @@
+# Copyright (C) 2007 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+import re
+
+from bzrlib import (
+ errors,
+ gpg,
+ mail_client,
+ merge_directive,
+ tests,
+ trace,
+ )
+
+
+OUTPUT1 = """# Bazaar merge directive format 1
+# revision_id: example:
+# target_branch: http://example.com
+# testament_sha1: sha
+# timestamp: 1970-01-01 00:09:33 +0002
+#\x20
+booga"""
+
+OUTPUT1_2 = """# Bazaar merge directive format 2 (Bazaar 0.90)
+# revision_id: example:
+# target_branch: http://example.com
+# testament_sha1: sha
+# timestamp: 1970-01-01 00:09:33 +0002
+# base_revision_id: null:
+#\x20
+# Begin bundle
+booga"""
+
+OUTPUT2 = """# Bazaar merge directive format 1
+# revision_id: example:
+# target_branch: http://example.com
+# testament_sha1: sha
+# timestamp: 1970-01-01 00:09:33 +0002
+# source_branch: http://example.org
+# message: Hi mom!
+#\x20
+booga"""
+
+OUTPUT2_2 = """# Bazaar merge directive format 2 (Bazaar 0.90)
+# revision_id: example:
+# target_branch: http://example.com
+# testament_sha1: sha
+# timestamp: 1970-01-01 00:09:33 +0002
+# source_branch: http://example.org
+# message: Hi mom!
+# base_revision_id: null:
+#\x20
+# Begin patch
+booga"""
+
+INPUT1 = """
+I was thinking today about creating a merge directive.
+
+So I did.
+
+Here it is.
+
+(I've pasted it in the body of this message)
+
+Aaron
+
+# Bazaar merge directive format 1\r
+# revision_id: example:
+# target_branch: http://example.com
+# testament_sha1: sha
+# timestamp: 1970-01-01 00:09:33 +0002
+# source_branch: http://example.org
+# message: Hi mom!
+#\x20
+booga""".splitlines(True)
+
+
+INPUT1_2 = """
+I was thinking today about creating a merge directive.
+
+So I did.
+
+Here it is.
+
+(I've pasted it in the body of this message)
+
+Aaron
+
+# Bazaar merge directive format 2 (Bazaar 0.90)\r
+# revision_id: example:
+# target_branch: http://example.com
+# testament_sha1: sha
+# timestamp: 1970-01-01 00:09:33 +0002
+# source_branch: http://example.org
+# base_revision_id: null:
+# message: Hi mom!
+#\x20
+# Begin patch
+booga""".splitlines(True)
+
+
+INPUT1_2_OLD = """
+I was thinking today about creating a merge directive.
+
+So I did.
+
+Here it is.
+
+(I've pasted it in the body of this message)
+
+Aaron
+
+# Bazaar merge directive format 2 (Bazaar 0.19)\r
+# revision_id: example:
+# target_branch: http://example.com
+# testament_sha1: sha
+# timestamp: 1970-01-01 00:09:33 +0002
+# source_branch: http://example.org
+# base_revision_id: null:
+# message: Hi mom!
+#\x20
+# Begin patch
+booga""".splitlines(True)
+
+
+OLD_DIRECTIVE_2 = """# Bazaar merge directive format 2 (Bazaar 0.19)
+# revision_id: abentley@panoramicfeedback.com-20070807234458-\
+# nzhkoyza56lan7z5
+# target_branch: http://panoramicfeedback.com/opensource/bzr/repo\
+# /bzr.ab
+# testament_sha1: d825a5cdb267a90ec2ba86b00895f3d8a9bed6bf
+# timestamp: 2007-08-10 16:15:02 -0400
+# source_branch: http://panoramicfeedback.com/opensource/bzr/repo\
+# /bzr.ab
+# base_revision_id: abentley@panoramicfeedback.com-20070731163346-\
+# 623xwcycwij91xen
+#
+""".splitlines(True)
+
+
+class TestMergeDirective(object):
+
+ def test_merge_source(self):
+ time = 500000.0
+ timezone = 5 * 3600
+ self.assertRaises(errors.NoMergeSource, self.make_merge_directive,
+ 'example:', 'sha', time, timezone, 'http://example.com')
+ self.assertRaises(errors.NoMergeSource, self.make_merge_directive,
+ 'example:', 'sha', time, timezone, 'http://example.com',
+ patch_type='diff')
+ self.make_merge_directive('example:', 'sha', time, timezone,
+ 'http://example.com', source_branch='http://example.org')
+ md = self.make_merge_directive('null:', 'sha', time, timezone,
+ 'http://example.com', patch='blah', patch_type='bundle')
+ self.assertIs(None, md.source_branch)
+ md2 = self.make_merge_directive('null:', 'sha', time, timezone,
+ 'http://example.com', patch='blah', patch_type='bundle',
+ source_branch='bar')
+ self.assertEqual('bar', md2.source_branch)
+
+ def test_serialization(self):
+ time = 453
+ timezone = 120
+ md = self.make_merge_directive('example:', 'sha', time, timezone,
+ 'http://example.com', patch='booga', patch_type='bundle')
+ self.assertEqualDiff(self.OUTPUT1, ''.join(md.to_lines()))
+ md = self.make_merge_directive('example:', 'sha', time, timezone,
+ 'http://example.com', source_branch="http://example.org",
+ patch='booga', patch_type='diff', message="Hi mom!")
+ self.assertEqualDiff(self.OUTPUT2, ''.join(md.to_lines()))
+
+ def test_deserialize_junk(self):
+ time = 501
+ self.assertRaises(errors.NotAMergeDirective,
+ merge_directive.MergeDirective.from_lines, 'lala')
+
+ def test_deserialize_empty(self):
+ self.assertRaises(errors.NotAMergeDirective,
+ merge_directive.MergeDirective.from_lines, [])
+
+ def test_deserialize_leading_junk(self):
+ md = merge_directive.MergeDirective.from_lines(self.INPUT1)
+ self.assertEqual('example:', md.revision_id)
+ self.assertEqual('sha', md.testament_sha1)
+ self.assertEqual('http://example.com', md.target_branch)
+ self.assertEqual('http://example.org', md.source_branch)
+ self.assertEqual(453, md.time)
+ self.assertEqual(120, md.timezone)
+ self.assertEqual('booga', md.patch)
+ self.assertEqual('diff', md.patch_type)
+ self.assertEqual('Hi mom!', md.message)
+
+ def test_roundtrip(self):
+ time = 500000
+ timezone = 7.5 * 3600
+ md = self.make_merge_directive('example:', 'sha', time, timezone,
+ 'http://example.com', source_branch="http://example.org",
+ patch='booga', patch_type='diff')
+ md2 = merge_directive.MergeDirective.from_lines(md.to_lines())
+ self.assertEqual('example:', md2.revision_id)
+ self.assertIsInstance(md2.revision_id, str)
+ self.assertEqual('sha', md2.testament_sha1)
+ self.assertEqual('http://example.com', md2.target_branch)
+ self.assertEqual('http://example.org', md2.source_branch)
+ self.assertEqual(time, md2.time)
+ self.assertEqual(timezone, md2.timezone)
+ self.assertEqual('diff', md2.patch_type)
+ self.assertEqual('booga', md2.patch)
+ self.assertEqual(None, md2.message)
+ self.set_bundle(md, "# Bazaar revision bundle v0.9\n#\n")
+ md.message = "Hi mom!"
+ lines = md.to_lines()
+ md3 = merge_directive.MergeDirective.from_lines(lines)
+ self.assertEqual("# Bazaar revision bundle v0.9\n#\n", md3.bundle)
+ self.assertEqual("bundle", md3.patch_type)
+ self.assertContainsRe(md3.to_lines()[0],
+ '^# Bazaar merge directive format ')
+ self.assertEqual("Hi mom!", md3.message)
+ md3.clear_payload()
+ self.assertIs(None, md3.get_raw_bundle())
+ md4 = merge_directive.MergeDirective.from_lines(md3.to_lines())
+ self.assertIs(None, md4.patch_type)
+
+
+class TestMergeDirective1(tests.TestCase, TestMergeDirective):
+ """Test merge directive format 1"""
+
+ INPUT1 = INPUT1
+
+ OUTPUT1 = OUTPUT1
+
+ OUTPUT2 = OUTPUT2
+
+ def make_merge_directive(self, revision_id, testament_sha1, time, timezone,
+ target_branch, patch=None, patch_type=None,
+ source_branch=None, message=None):
+ return merge_directive.MergeDirective(revision_id, testament_sha1,
+ time, timezone, target_branch, patch, patch_type,
+ source_branch, message)
+
+ @staticmethod
+ def set_bundle(md, value):
+ md.patch = value
+
+ def test_require_patch(self):
+ time = 500.0
+ timezone = 120
+ self.assertRaises(errors.PatchMissing, merge_directive.MergeDirective,
+ 'example:', 'sha', time, timezone, 'http://example.com',
+ patch_type='bundle')
+ md = merge_directive.MergeDirective('example:', 'sha1', time, timezone,
+ 'http://example.com', source_branch="http://example.org",
+ patch='', patch_type='diff')
+ self.assertEqual(md.patch, '')
+
+
+class TestMergeDirective2(tests.TestCase, TestMergeDirective):
+ """Test merge directive format 2"""
+
+ INPUT1 = INPUT1_2
+
+ OUTPUT1 = OUTPUT1_2
+
+ OUTPUT2 = OUTPUT2_2
+
+ def make_merge_directive(self, revision_id, testament_sha1, time, timezone,
+ target_branch, patch=None, patch_type=None,
+ source_branch=None, message=None, base_revision_id='null:'):
+ if patch_type == 'bundle':
+ bundle = patch
+ patch = None
+ else:
+ bundle = None
+ return merge_directive.MergeDirective2(revision_id, testament_sha1,
+ time, timezone, target_branch, patch, source_branch, message,
+ bundle, base_revision_id)
+
+ @staticmethod
+ def set_bundle(md, value):
+ md.bundle = value
+
+
+EMAIL1 = """From: "J. Random Hacker" <jrandom@example.com>
+Subject: Commit of rev2a
+To: pqm@example.com
+User-Agent: Bazaar \(.*\)
+
+# Bazaar merge directive format 1
+# revision_id: rev2a
+# target_branch: (.|\n)*
+# testament_sha1: .*
+# timestamp: 1970-01-01 00:08:56 \\+0001
+# source_branch: (.|\n)*
+"""
+
+
+EMAIL1_2 = """From: "J. Random Hacker" <jrandom@example.com>
+Subject: Commit of rev2a
+To: pqm@example.com
+User-Agent: Bazaar \(.*\)
+
+# Bazaar merge directive format 2 \\(Bazaar 0.90\\)
+# revision_id: rev2a
+# target_branch: (.|\n)*
+# testament_sha1: .*
+# timestamp: 1970-01-01 00:08:56 \\+0001
+# source_branch: (.|\n)*
+"""
+
+
+EMAIL2 = """From: "J. Random Hacker" <jrandom@example.com>
+Subject: Commit of rev2a with special message
+To: pqm@example.com
+User-Agent: Bazaar \(.*\)
+
+# Bazaar merge directive format 1
+# revision_id: rev2a
+# target_branch: (.|\n)*
+# testament_sha1: .*
+# timestamp: 1970-01-01 00:08:56 \\+0001
+# source_branch: (.|\n)*
+# message: Commit of rev2a with special message
+"""
+
+EMAIL2_2 = """From: "J. Random Hacker" <jrandom@example.com>
+Subject: Commit of rev2a with special message
+To: pqm@example.com
+User-Agent: Bazaar \(.*\)
+
+# Bazaar merge directive format 2 \\(Bazaar 0.90\\)
+# revision_id: rev2a
+# target_branch: (.|\n)*
+# testament_sha1: .*
+# timestamp: 1970-01-01 00:08:56 \\+0001
+# source_branch: (.|\n)*
+# message: Commit of rev2a with special message
+"""
+
+class TestMergeDirectiveBranch(object):
+
+ def make_trees(self):
+ tree_a = self.make_branch_and_tree('tree_a')
+ tree_a.branch.get_config_stack().set(
+ 'email', 'J. Random Hacker <jrandom@example.com>')
+ self.build_tree_contents([('tree_a/file', 'content_a\ncontent_b\n'),
+ ('tree_a/file_2', 'content_x\rcontent_y\r')])
+ tree_a.add(['file', 'file_2'])
+ tree_a.commit('message', rev_id='rev1')
+ tree_b = tree_a.bzrdir.sprout('tree_b').open_workingtree()
+ branch_c = tree_a.bzrdir.sprout('branch_c').open_branch()
+ tree_b.commit('message', rev_id='rev2b')
+ self.build_tree_contents([('tree_a/file', 'content_a\ncontent_c \n'),
+ ('tree_a/file_2', 'content_x\rcontent_z\r')])
+ tree_a.commit('Commit of rev2a', rev_id='rev2a')
+ return tree_a, tree_b, branch_c
+
+ def test_empty_target(self):
+ tree_a, tree_b, branch_c = self.make_trees()
+ tree_d = self.make_branch_and_tree('tree_d')
+ md2 = self.from_objects(tree_a.branch.repository, 'rev2a', 500, 120,
+ tree_d.branch.base, patch_type='diff',
+ public_branch=tree_a.branch.base)
+
+ def test_disk_name(self):
+ tree_a, tree_b, branch_c = self.make_trees()
+ tree_a.branch.nick = 'fancy <name>'
+ md = self.from_objects(tree_a.branch.repository, 'rev2a', 500, 120,
+ tree_b.branch.base)
+ self.assertEqual('fancy-name-2', md.get_disk_name(tree_a.branch))
+
+ def test_disk_name_old_revno(self):
+ tree_a, tree_b, branch_c = self.make_trees()
+ tree_a.branch.nick = 'fancy-name'
+ md = self.from_objects(tree_a.branch.repository, 'rev1', 500, 120,
+ tree_b.branch.base)
+ self.assertEqual('fancy-name-1', md.get_disk_name(tree_a.branch))
+
+ def test_generate_patch(self):
+ tree_a, tree_b, branch_c = self.make_trees()
+ md2 = self.from_objects(tree_a.branch.repository, 'rev2a', 500, 120,
+ tree_b.branch.base, patch_type='diff',
+ public_branch=tree_a.branch.base)
+ self.assertNotContainsRe(md2.patch, 'Bazaar revision bundle')
+ self.assertContainsRe(md2.patch, '\\+content_c')
+ self.assertNotContainsRe(md2.patch, '\\+\\+\\+ b/')
+ self.assertContainsRe(md2.patch, '\\+\\+\\+ file')
+
+ def test_public_branch(self):
+ tree_a, tree_b, branch_c = self.make_trees()
+ self.assertRaises(errors.PublicBranchOutOfDate,
+ self.from_objects, tree_a.branch.repository, 'rev2a', 500, 144,
+ tree_b.branch.base, public_branch=branch_c.base, patch_type='diff')
+ self.assertRaises(errors.PublicBranchOutOfDate,
+ self.from_objects, tree_a.branch.repository, 'rev2a', 500, 144,
+ tree_b.branch.base, public_branch=branch_c.base, patch_type=None)
+ # public branch is not checked if patch format is bundle.
+ md1 = self.from_objects(tree_a.branch.repository, 'rev2a', 500, 144,
+ tree_b.branch.base, public_branch=branch_c.base)
+ # public branch is provided with a bundle, despite possibly being out
+ # of date, because it's not required if a bundle is present.
+ self.assertEqual(md1.source_branch, branch_c.base)
+ # Once we update the public branch, we can generate a diff.
+ branch_c.pull(tree_a.branch)
+ md3 = self.from_objects(tree_a.branch.repository, 'rev2a', 500, 144,
+ tree_b.branch.base, patch_type=None, public_branch=branch_c.base)
+
+ def test_use_public_submit_branch(self):
+ tree_a, tree_b, branch_c = self.make_trees()
+ branch_c.pull(tree_a.branch)
+ md = self.from_objects(tree_a.branch.repository, 'rev2a', 500, 144,
+ tree_b.branch.base, patch_type=None, public_branch=branch_c.base)
+ self.assertEqual(md.target_branch, tree_b.branch.base)
+ tree_b.branch.set_public_branch('http://example.com')
+ md2 = self.from_objects(
+ tree_a.branch.repository, 'rev2a', 500, 144, tree_b.branch.base,
+ patch_type=None, public_branch=branch_c.base)
+ self.assertEqual(md2.target_branch, 'http://example.com')
+
+ def test_message(self):
+ tree_a, tree_b, branch_c = self.make_trees()
+ md3 = self.from_objects(tree_a.branch.repository, 'rev1', 500, 120,
+ tree_b.branch.base, patch_type=None, public_branch=branch_c.base,
+ message='Merge message')
+ md3.to_lines()
+ self.assertIs(None, md3.patch)
+ self.assertEqual('Merge message', md3.message)
+
+ def test_generate_bundle(self):
+ tree_a, tree_b, branch_c = self.make_trees()
+ md1 = self.from_objects(tree_a.branch.repository, 'rev2a', 500, 120,
+ tree_b.branch.base, public_branch=branch_c.base)
+
+ self.assertContainsRe(md1.get_raw_bundle(), 'Bazaar revision bundle')
+ self.assertContainsRe(md1.patch, '\\+content_c')
+ self.assertNotContainsRe(md1.patch, '\\+content_a')
+ self.assertContainsRe(md1.patch, '\\+content_c')
+ self.assertNotContainsRe(md1.patch, '\\+content_a')
+
+ def test_broken_bundle(self):
+ tree_a, tree_b, branch_c = self.make_trees()
+ md1 = self.from_objects(tree_a.branch.repository, 'rev2a', 500, 120,
+ tree_b.branch.base, public_branch=branch_c.base)
+ lines = md1.to_lines()
+ lines = [l.replace('\n', '\r\n') for l in lines]
+ md2 = merge_directive.MergeDirective.from_lines(lines)
+ self.assertEqual('rev2a', md2.revision_id)
+
+ def test_signing(self):
+ time = 453
+ timezone = 7200
+ class FakeBranch(object):
+ def get_config_stack(self):
+ return self
+ def gpg_signing_command(self):
+ return 'loopback'
+ md = self.make_merge_directive('example:', 'sha', time, timezone,
+ 'http://example.com', source_branch="http://example.org",
+ patch='booga', patch_type='diff')
+ old_strategy = gpg.GPGStrategy
+ gpg.GPGStrategy = gpg.LoopbackGPGStrategy
+ try:
+ signed = md.to_signed(FakeBranch())
+ finally:
+ gpg.GPGStrategy = old_strategy
+ self.assertContainsRe(signed, '^-----BEGIN PSEUDO-SIGNED CONTENT')
+ self.assertContainsRe(signed, 'example.org')
+ self.assertContainsRe(signed, 'booga')
+
+ def test_email(self):
+ tree_a, tree_b, branch_c = self.make_trees()
+ md = self.from_objects(tree_a.branch.repository, 'rev2a', 476, 60,
+ tree_b.branch.base, patch_type=None,
+ public_branch=tree_a.branch.base)
+ message = md.to_email('pqm@example.com', tree_a.branch)
+ self.assertContainsRe(message.as_string(), self.EMAIL1)
+ md.message = 'Commit of rev2a with special message'
+ message = md.to_email('pqm@example.com', tree_a.branch)
+ self.assertContainsRe(message.as_string(), self.EMAIL2)
+
+ def test_install_revisions_branch(self):
+ tree_a, tree_b, branch_c = self.make_trees()
+ md = self.from_objects(tree_a.branch.repository, 'rev2a', 500, 36,
+ tree_b.branch.base, patch_type=None,
+ public_branch=tree_a.branch.base)
+ self.assertFalse(tree_b.branch.repository.has_revision('rev2a'))
+ revision = md.install_revisions(tree_b.branch.repository)
+ self.assertEqual('rev2a', revision)
+ self.assertTrue(tree_b.branch.repository.has_revision('rev2a'))
+
+ def test_get_merge_request(self):
+ tree_a, tree_b, branch_c = self.make_trees()
+ md = self.from_objects(tree_a.branch.repository, 'rev2a', 500, 36,
+ tree_b.branch.base, patch_type='bundle',
+ public_branch=tree_a.branch.base)
+ self.assertFalse(tree_b.branch.repository.has_revision('rev2a'))
+ md.install_revisions(tree_b.branch.repository)
+ base, revision, verified = md.get_merge_request(
+ tree_b.branch.repository)
+ if isinstance(md, merge_directive.MergeDirective):
+ self.assertIs(None, base)
+ self.assertEqual('inapplicable', verified)
+ else:
+ self.assertEqual('rev1', base)
+ self.assertEqual('verified', verified)
+ self.assertEqual('rev2a', revision)
+ self.assertTrue(tree_b.branch.repository.has_revision('rev2a'))
+ md = self.from_objects(tree_a.branch.repository, 'rev2a', 500, 36,
+ tree_b.branch.base, patch_type=None,
+ public_branch=tree_a.branch.base)
+ base, revision, verified = md.get_merge_request(
+ tree_b.branch.repository)
+ if isinstance(md, merge_directive.MergeDirective):
+ self.assertIs(None, base)
+ self.assertEqual('inapplicable', verified)
+ else:
+ self.assertEqual('rev1', base)
+ self.assertEqual('inapplicable', verified)
+ md = self.from_objects(tree_a.branch.repository, 'rev2a', 500, 36,
+ tree_b.branch.base, patch_type='diff',
+ public_branch=tree_a.branch.base)
+ base, revision, verified = md.get_merge_request(
+ tree_b.branch.repository)
+ if isinstance(md, merge_directive.MergeDirective):
+ self.assertIs(None, base)
+ self.assertEqual('inapplicable', verified)
+ else:
+ self.assertEqual('rev1', base)
+ self.assertEqual('verified', verified)
+ md.patch='asdf'
+ base, revision, verified = md.get_merge_request(
+ tree_b.branch.repository)
+ if isinstance(md, merge_directive.MergeDirective):
+ self.assertIs(None, base)
+ self.assertEqual('inapplicable', verified)
+ else:
+ self.assertEqual('rev1', base)
+ self.assertEqual('failed', verified)
+
+ def test_install_revisions_bundle(self):
+ tree_a, tree_b, branch_c = self.make_trees()
+ md = self.from_objects(tree_a.branch.repository, 'rev2a', 500, 36,
+ tree_b.branch.base, patch_type='bundle',
+ public_branch=tree_a.branch.base)
+ self.assertFalse(tree_b.branch.repository.has_revision('rev2a'))
+ revision = md.install_revisions(tree_b.branch.repository)
+ self.assertEqual('rev2a', revision)
+ self.assertTrue(tree_b.branch.repository.has_revision('rev2a'))
+
+ def test_get_target_revision_nofetch(self):
+ tree_a, tree_b, branch_c = self.make_trees()
+ tree_b.branch.fetch(tree_a.branch)
+ md = self.from_objects( tree_a.branch.repository, 'rev2a', 500, 36,
+ tree_b.branch.base, patch_type=None,
+ public_branch=tree_a.branch.base)
+ md.source_branch = '/dev/null'
+ revision = md.install_revisions(tree_b.branch.repository)
+ self.assertEqual('rev2a', revision)
+
+ def test_use_submit_for_missing_dependency(self):
+ tree_a, tree_b, branch_c = self.make_trees()
+ branch_c.pull(tree_a.branch)
+ self.build_tree_contents([('tree_a/file', 'content_q\ncontent_r\n')])
+ tree_a.commit('rev3a', rev_id='rev3a')
+ md = self.from_objects(tree_a.branch.repository, 'rev3a', 500, 36,
+ branch_c.base, base_revision_id='rev2a')
+ revision = md.install_revisions(tree_b.branch.repository)
+
+ def test_handle_target_not_a_branch(self):
+ tree_a, tree_b, branch_c = self.make_trees()
+ branch_c.pull(tree_a.branch)
+ self.build_tree_contents([('tree_a/file', 'content_q\ncontent_r\n')])
+ tree_a.commit('rev3a', rev_id='rev3a')
+ md = self.from_objects(tree_a.branch.repository, 'rev3a', 500, 36,
+ branch_c.base, base_revision_id='rev2a')
+ md.target_branch = self.get_url('not-a-branch')
+ self.assertRaises(errors.TargetNotBranch, md.install_revisions,
+ tree_b.branch.repository)
+
+
+class TestMergeDirective1Branch(tests.TestCaseWithTransport,
+ TestMergeDirectiveBranch):
+ """Test merge directive format 1 with a branch"""
+
+ EMAIL1 = EMAIL1
+
+ EMAIL2 = EMAIL2
+
+ def from_objects(self, repository, revision_id, time, timezone,
+ target_branch, patch_type='bundle', local_target_branch=None,
+ public_branch=None, message=None, base_revision_id=None):
+ if base_revision_id is not None:
+ raise tests.TestNotApplicable('This format does not support'
+ ' explicit bases.')
+ repository.lock_write()
+ try:
+ return merge_directive.MergeDirective.from_objects( repository,
+ revision_id, time, timezone, target_branch, patch_type,
+ local_target_branch, public_branch, message)
+ finally:
+ repository.unlock()
+
+ def make_merge_directive(self, revision_id, testament_sha1, time, timezone,
+ target_branch, patch=None, patch_type=None,
+ source_branch=None, message=None):
+ return merge_directive.MergeDirective(revision_id, testament_sha1,
+ time, timezone, target_branch, patch, patch_type,
+ source_branch, message)
+
+
+class TestMergeDirective2Branch(tests.TestCaseWithTransport,
+ TestMergeDirectiveBranch):
+ """Test merge directive format 2 with a branch"""
+
+ EMAIL1 = EMAIL1_2
+
+ EMAIL2 = EMAIL2_2
+
+ def from_objects(self, repository, revision_id, time, timezone,
+ target_branch, patch_type='bundle', local_target_branch=None,
+ public_branch=None, message=None, base_revision_id=None):
+ include_patch = (patch_type in ('bundle', 'diff'))
+ include_bundle = (patch_type == 'bundle')
+ self.assertTrue(patch_type in ('bundle', 'diff', None))
+ return merge_directive.MergeDirective2.from_objects(
+ repository, revision_id, time, timezone, target_branch,
+ include_patch, include_bundle, local_target_branch, public_branch,
+ message, base_revision_id)
+
+ def make_merge_directive(self, revision_id, testament_sha1, time, timezone,
+ target_branch, patch=None, patch_type=None,
+ source_branch=None, message=None, base_revision_id='null:'):
+ if patch_type == 'bundle':
+ bundle = patch
+ patch = None
+ else:
+ bundle = None
+ return merge_directive.MergeDirective2(revision_id, testament_sha1,
+ time, timezone, target_branch, patch, source_branch, message,
+ bundle, base_revision_id)
+
+ def test_base_revision(self):
+ tree_a, tree_b, branch_c = self.make_trees()
+ md = self.from_objects(tree_a.branch.repository, 'rev2a', 500, 60,
+ tree_b.branch.base, patch_type='bundle',
+ public_branch=tree_a.branch.base, base_revision_id=None)
+ self.assertEqual('rev1', md.base_revision_id)
+ md = self.from_objects(tree_a.branch.repository, 'rev2a', 500, 60,
+ tree_b.branch.base, patch_type='bundle',
+ public_branch=tree_a.branch.base, base_revision_id='null:')
+ self.assertEqual('null:', md.base_revision_id)
+ lines = md.to_lines()
+ md2 = merge_directive.MergeDirective.from_lines(lines)
+ self.assertEqual(md2.base_revision_id, md.base_revision_id)
+
+ def test_patch_verification(self):
+ tree_a, tree_b, branch_c = self.make_trees()
+ md = self.from_objects(tree_a.branch.repository, 'rev2a', 500, 60,
+ tree_b.branch.base, patch_type='bundle',
+ public_branch=tree_a.branch.base)
+ lines = md.to_lines()
+ md2 = merge_directive.MergeDirective.from_lines(lines)
+ md2._verify_patch(tree_a.branch.repository)
+ # Strip trailing whitespace
+ md2.patch = md2.patch.replace(' \n', '\n')
+ md2._verify_patch(tree_a.branch.repository)
+ # Convert to Mac line-endings
+ md2.patch = re.sub('(\r\n|\r|\n)', '\r', md2.patch)
+ self.assertTrue(md2._verify_patch(tree_a.branch.repository))
+ # Convert to DOS line-endings
+ md2.patch = re.sub('(\r\n|\r|\n)', '\r\n', md2.patch)
+ self.assertTrue(md2._verify_patch(tree_a.branch.repository))
+ md2.patch = md2.patch.replace('content_c', 'content_d')
+ self.assertFalse(md2._verify_patch(tree_a.branch.repository))
+
+
+class TestParseOldMergeDirective2(tests.TestCase):
+
+ def test_parse_old_merge_directive(self):
+ md = merge_directive.MergeDirective.from_lines(INPUT1_2_OLD)
+ self.assertEqual('example:', md.revision_id)
+ self.assertEqual('sha', md.testament_sha1)
+ self.assertEqual('http://example.com', md.target_branch)
+ self.assertEqual('http://example.org', md.source_branch)
+ self.assertEqual(453, md.time)
+ self.assertEqual(120, md.timezone)
+ self.assertEqual('booga', md.patch)
+ self.assertEqual('diff', md.patch_type)
+ self.assertEqual('Hi mom!', md.message)
+
+
+class TestHook(object):
+ """Hook callback for test purposes."""
+
+ def __init__(self, result=None):
+ self.calls = []
+ self.result = result
+
+ def __call__(self, params):
+ self.calls.append(params)
+ return self.result
+
+
+class HookMailClient(mail_client.MailClient):
+ """Mail client for testing hooks."""
+
+ def __init__(self, config):
+ self.body = None
+ self.config = config
+
+ def compose(self, prompt, to, subject, attachment, mime_subtype,
+ extension, basename=None, body=None):
+ self.body = body
+
+
+class TestBodyHook(tests.TestCaseWithTransport):
+
+ def compose_with_hooks(self, test_hooks, supports_body=True):
+ client = HookMailClient({})
+ client.supports_body = supports_body
+ for test_hook in test_hooks:
+ merge_directive.MergeDirective.hooks.install_named_hook(
+ 'merge_request_body', test_hook, 'test')
+ tree = self.make_branch_and_tree('foo')
+ tree.commit('foo')
+ directive = merge_directive.MergeDirective2(
+ tree.branch.last_revision(), 'sha', 0, 0, 'sha',
+ source_branch=tree.branch.base,
+ base_revision_id=tree.branch.last_revision(),
+ message='This code rox')
+ directive.compose_merge_request(client, 'jrandom@example.com',
+ None, tree.branch)
+ return client, directive
+
+ def test_no_supports_body(self):
+ test_hook = TestHook('foo')
+ old_warn = trace.warning
+ warnings = []
+ def warn(*args):
+ warnings.append(args)
+ trace.warning = warn
+ try:
+ client, directive = self.compose_with_hooks([test_hook],
+ supports_body=False)
+ finally:
+ trace.warning = old_warn
+ self.assertEqual(0, len(test_hook.calls))
+ self.assertEqual(('Cannot run merge_request_body hooks because mail'
+ ' client %s does not support message bodies.',
+ 'HookMailClient'), warnings[0])
+
+ def test_body_hook(self):
+ test_hook = TestHook('foo')
+ client, directive = self.compose_with_hooks([test_hook])
+ self.assertEqual(1, len(test_hook.calls))
+ self.assertEqual('foo', client.body)
+ params = test_hook.calls[0]
+ self.assertIsInstance(params,
+ merge_directive.MergeRequestBodyParams)
+ self.assertIs(None, params.body)
+ self.assertIs(None, params.orig_body)
+ self.assertEqual('jrandom@example.com', params.to)
+ self.assertEqual('[MERGE] This code rox', params.subject)
+ self.assertEqual(directive, params.directive)
+ self.assertEqual('foo-1', params.basename)
+
+ def test_body_hook_chaining(self):
+ test_hook1 = TestHook('foo')
+ test_hook2 = TestHook('bar')
+ client = self.compose_with_hooks([test_hook1, test_hook2])[0]
+ self.assertEqual(None, test_hook1.calls[0].body)
+ self.assertEqual(None, test_hook1.calls[0].orig_body)
+ self.assertEqual('foo', test_hook2.calls[0].body)
+ self.assertEqual(None, test_hook2.calls[0].orig_body)
+ self.assertEqual('bar', client.body)
diff --git a/bzrlib/tests/test_mergetools.py b/bzrlib/tests/test_mergetools.py
new file mode 100644
index 0000000..ba6a4d6
--- /dev/null
+++ b/bzrlib/tests/test_mergetools.py
@@ -0,0 +1,177 @@
+# Copyright (C) 2010 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+import os
+import sys
+import tempfile
+
+from bzrlib import (
+ mergetools,
+ tests
+)
+
+
+class TestFilenameSubstitution(tests.TestCaseInTempDir):
+
+ def test_simple_filename(self):
+ cmd_list = ['kdiff3', '{base}', '{this}', '{other}', '-o', '{result}']
+ args, tmpfile = mergetools._subst_filename(cmd_list, 'test.txt')
+ self.assertEqual(
+ ['kdiff3',
+ 'test.txt.BASE',
+ 'test.txt.THIS',
+ 'test.txt.OTHER',
+ '-o',
+ 'test.txt'],
+ args)
+
+ def test_spaces(self):
+ cmd_list = ['kdiff3', '{base}', '{this}', '{other}', '-o', '{result}']
+ args, tmpfile = mergetools._subst_filename(cmd_list,
+ 'file with space.txt')
+ self.assertEqual(
+ ['kdiff3',
+ 'file with space.txt.BASE',
+ 'file with space.txt.THIS',
+ 'file with space.txt.OTHER',
+ '-o',
+ 'file with space.txt'],
+ args)
+
+ def test_spaces_and_quotes(self):
+ cmd_list = ['kdiff3', '{base}', '{this}', '{other}', '-o', '{result}']
+ args, tmpfile = mergetools._subst_filename(cmd_list,
+ 'file with "space and quotes".txt')
+ self.assertEqual(
+ ['kdiff3',
+ 'file with "space and quotes".txt.BASE',
+ 'file with "space and quotes".txt.THIS',
+ 'file with "space and quotes".txt.OTHER',
+ '-o',
+ 'file with "space and quotes".txt'],
+ args)
+
+ def test_tempfile(self):
+ self.build_tree(('test.txt', 'test.txt.BASE', 'test.txt.THIS',
+ 'test.txt.OTHER'))
+ cmd_list = ['some_tool', '{this_temp}']
+ args, tmpfile = mergetools._subst_filename(cmd_list, 'test.txt')
+ self.assertPathExists(tmpfile)
+ os.remove(tmpfile)
+
+
+class TestCheckAvailability(tests.TestCaseInTempDir):
+
+ def test_full_path(self):
+ self.assertTrue(mergetools.check_availability(sys.executable))
+
+ def test_exe_on_path(self):
+ self.assertTrue(mergetools.check_availability('python'))
+
+ def test_nonexistent(self):
+ self.assertFalse(mergetools.check_availability('DOES NOT EXIST'))
+
+ def test_non_executable(self):
+ f, name = tempfile.mkstemp()
+ try:
+ self.log('temp filename: %s', name)
+ self.assertFalse(mergetools.check_availability(name))
+ finally:
+ os.close(f)
+ os.unlink(name)
+
+
+class TestInvoke(tests.TestCaseInTempDir):
+ def setUp(self):
+ super(tests.TestCaseInTempDir, self).setUp()
+ self._exe = None
+ self._args = None
+ self.build_tree_contents((
+ ('test.txt', 'stuff'),
+ ('test.txt.BASE', 'base stuff'),
+ ('test.txt.THIS', 'this stuff'),
+ ('test.txt.OTHER', 'other stuff'),
+ ))
+
+ def test_invoke_expands_exe_path(self):
+ self.overrideEnv('PATH', os.path.dirname(sys.executable))
+ def dummy_invoker(exe, args, cleanup):
+ self._exe = exe
+ self._args = args
+ cleanup(0)
+ return 0
+ command = '%s {result}' % os.path.basename(sys.executable)
+ retcode = mergetools.invoke(command, 'test.txt', dummy_invoker)
+ self.assertEqual(0, retcode)
+ self.assertEqual(sys.executable, self._exe)
+ self.assertEqual(['test.txt'], self._args)
+
+ def test_success(self):
+ def dummy_invoker(exe, args, cleanup):
+ self._exe = exe
+ self._args = args
+ cleanup(0)
+ return 0
+ retcode = mergetools.invoke('tool {result}', 'test.txt', dummy_invoker)
+ self.assertEqual(0, retcode)
+ self.assertEqual('tool', self._exe)
+ self.assertEqual(['test.txt'], self._args)
+
+ def test_failure(self):
+ def dummy_invoker(exe, args, cleanup):
+ self._exe = exe
+ self._args = args
+ cleanup(1)
+ return 1
+ retcode = mergetools.invoke('tool {result}', 'test.txt', dummy_invoker)
+ self.assertEqual(1, retcode)
+ self.assertEqual('tool', self._exe)
+ self.assertEqual(['test.txt'], self._args)
+
+ def test_success_tempfile(self):
+ def dummy_invoker(exe, args, cleanup):
+ self._exe = exe
+ self._args = args
+ self.assertPathExists(args[0])
+ f = open(args[0], 'wt')
+ f.write('temp stuff')
+ f.close()
+ cleanup(0)
+ return 0
+ retcode = mergetools.invoke('tool {this_temp}', 'test.txt',
+ dummy_invoker)
+ self.assertEqual(0, retcode)
+ self.assertEqual('tool', self._exe)
+ self.assertPathDoesNotExist(self._args[0])
+ self.assertFileEqual('temp stuff', 'test.txt')
+
+ def test_failure_tempfile(self):
+ def dummy_invoker(exe, args, cleanup):
+ self._exe = exe
+ self._args = args
+ self.assertPathExists(args[0])
+ self.log(repr(args))
+ f = open(args[0], 'wt')
+ self.log(repr(f))
+ f.write('temp stuff')
+ f.close()
+ cleanup(1)
+ return 1
+ retcode = mergetools.invoke('tool {this_temp}', 'test.txt',
+ dummy_invoker)
+ self.assertEqual(1, retcode)
+ self.assertEqual('tool', self._exe)
+ self.assertFileEqual('stuff', 'test.txt')
diff --git a/bzrlib/tests/test_missing.py b/bzrlib/tests/test_missing.py
new file mode 100644
index 0000000..a9e11d1
--- /dev/null
+++ b/bzrlib/tests/test_missing.py
@@ -0,0 +1,281 @@
+# Copyright (C) 2005-2009, 2011 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+from bzrlib import (
+ missing,
+ tests,
+ )
+from bzrlib.missing import (
+ iter_log_revisions,
+ )
+from bzrlib.tests import TestCaseWithTransport
+
+
+class TestMissing(TestCaseWithTransport):
+
+ def assertUnmerged(self, expected, source, target, restrict='all',
+ backward=False):
+ unmerged = missing.find_unmerged(source, target, restrict=restrict,
+ backward=backward)
+ self.assertEqual(expected, unmerged)
+
+ def test_find_unmerged(self):
+ original_tree = self.make_branch_and_tree('original')
+ original = original_tree.branch
+ puller_tree = self.make_branch_and_tree('puller')
+ puller = puller_tree.branch
+ merger_tree = self.make_branch_and_tree('merger')
+ merger = merger_tree.branch
+ self.assertUnmerged(([], []), original, puller)
+ original_tree.commit('a', rev_id='a')
+ self.assertUnmerged(([('1', 'a')], []), original, puller)
+ puller_tree.pull(original)
+ self.assertUnmerged(([], []), original, puller)
+ merger_tree.pull(original)
+ original_tree.commit('b', rev_id='b')
+ original_tree.commit('c', rev_id='c')
+ self.assertUnmerged(([('2', 'b'), ('3', 'c')], []),
+ original, puller)
+ self.assertUnmerged(([('3', 'c'), ('2', 'b')], []),
+ original, puller, backward=True)
+
+ puller_tree.pull(original)
+ self.assertUnmerged(([], []), original, puller)
+ self.assertUnmerged(([('2', 'b'), ('3', 'c')], []),
+ original, merger)
+ merger_tree.merge_from_branch(original)
+ self.assertUnmerged(([('2', 'b'), ('3', 'c')], []),
+ original, merger)
+ merger_tree.commit('d', rev_id='d')
+ self.assertUnmerged(([], [('2', 'd')]), original, merger)
+
+ def test_iter_log_revisions(self):
+ base_tree = self.make_branch_and_tree('base')
+ self.build_tree(['base/a'])
+ base_tree.add(['a'], ['a-id'])
+ base_tree.commit('add a', rev_id='b-1')
+
+ child_tree = base_tree.bzrdir.sprout('child').open_workingtree()
+
+ self.build_tree(['child/b'])
+ child_tree.add(['b'], ['b-id'])
+ child_tree.commit('adding b', rev_id='c-2')
+
+ child_tree.remove(['a'])
+ child_tree.commit('removing a', rev_id='c-3')
+
+ self.build_tree_contents([('child/b', 'new contents for b\n')])
+ child_tree.commit('modifying b', rev_id='c-4')
+
+ child_tree.rename_one('b', 'c')
+ child_tree.commit('rename b=>c', rev_id='c-5')
+
+ base_extra, child_extra = missing.find_unmerged(base_tree.branch,
+ child_tree.branch)
+ results = list(iter_log_revisions(base_extra,
+ base_tree.branch.repository,
+ verbose=True))
+ self.assertEqual([], results)
+
+ results = list(iter_log_revisions(child_extra,
+ child_tree.branch.repository,
+ verbose=True))
+ self.assertEqual(4, len(results))
+
+ r0,r1,r2,r3 = results
+
+ self.assertEqual([('2', 'c-2'), ('3', 'c-3'),
+ ('4', 'c-4'), ('5', 'c-5'),],
+ [(r.revno, r.rev.revision_id) for r in results])
+
+ delta0 = r0.delta
+ self.assertNotEqual(None, delta0)
+ self.assertEqual([('b', 'b-id', 'file')], delta0.added)
+ self.assertEqual([], delta0.removed)
+ self.assertEqual([], delta0.renamed)
+ self.assertEqual([], delta0.modified)
+
+ delta1 = r1.delta
+ self.assertNotEqual(None, delta1)
+ self.assertEqual([], delta1.added)
+ self.assertEqual([('a', 'a-id', 'file')], delta1.removed)
+ self.assertEqual([], delta1.renamed)
+ self.assertEqual([], delta1.modified)
+
+ delta2 = r2.delta
+ self.assertNotEqual(None, delta2)
+ self.assertEqual([], delta2.added)
+ self.assertEqual([], delta2.removed)
+ self.assertEqual([], delta2.renamed)
+ self.assertEqual([('b', 'b-id', 'file', True, False)], delta2.modified)
+
+ delta3 = r3.delta
+ self.assertNotEqual(None, delta3)
+ self.assertEqual([], delta3.added)
+ self.assertEqual([], delta3.removed)
+ self.assertEqual([('b', 'c', 'b-id', 'file', False, False)],
+ delta3.renamed)
+ self.assertEqual([], delta3.modified)
+
+
+class TestFindUnmerged(tests.TestCaseWithTransport):
+
+ def assertUnmerged(self, local, remote, local_branch, remote_branch,
+ restrict='all', include_merged=False, backward=False,
+ local_revid_range=None, remote_revid_range=None):
+ """Check the output of find_unmerged_mainline_revisions"""
+ local_extra, remote_extra = missing.find_unmerged(
+ local_branch, remote_branch, restrict,
+ include_merged=include_merged, backward=backward,
+ local_revid_range=local_revid_range,
+ remote_revid_range=remote_revid_range)
+ self.assertEqual(local, local_extra)
+ self.assertEqual(remote, remote_extra)
+
+ def test_same_branch(self):
+ tree = self.make_branch_and_tree('tree')
+ rev1 = tree.commit('one')
+ tree.lock_read()
+ self.addCleanup(tree.unlock)
+ self.assertUnmerged([], [], tree.branch, tree.branch)
+ self.assertUnmerged([], [], tree.branch, tree.branch,
+ local_revid_range=(rev1, rev1))
+
+ def test_one_ahead(self):
+ tree = self.make_branch_and_tree('tree')
+ rev1 = tree.commit('one')
+ tree2 = tree.bzrdir.sprout('tree2').open_workingtree()
+ rev2 = tree2.commit('two')
+ self.assertUnmerged([], [('2', rev2)], tree.branch, tree2.branch)
+ self.assertUnmerged([('2', rev2)], [], tree2.branch, tree.branch)
+
+ def test_restrict(self):
+ tree = self.make_branch_and_tree('tree')
+ rev1 = tree.commit('one')
+ tree2 = tree.bzrdir.sprout('tree2').open_workingtree()
+ rev2 = tree2.commit('two')
+ self.assertUnmerged([], [('2', rev2)], tree.branch, tree2.branch)
+ self.assertUnmerged([], None, tree.branch, tree2.branch, 'local')
+ self.assertUnmerged(None, [('2', rev2)], tree.branch, tree2.branch,
+ 'remote')
+
+ def test_merged(self):
+ tree = self.make_branch_and_tree('tree')
+ rev1 = tree.commit('one')
+ tree2 = tree.bzrdir.sprout('tree2').open_workingtree()
+ rev2 = tree2.commit('two')
+ rev3 = tree2.commit('three')
+ tree.merge_from_branch(tree2.branch)
+ rev4 = tree.commit('four')
+
+ self.assertUnmerged([('2', rev4)], [], tree.branch, tree2.branch)
+ self.assertUnmerged([('2', rev4)], [], tree.branch, tree2.branch,
+ local_revid_range=(rev4, rev4))
+ self.assertUnmerged([], [], tree.branch, tree2.branch,
+ local_revid_range=(rev1, rev1))
+
+ def test_include_merged(self):
+ tree = self.make_branch_and_tree('tree')
+ rev1 = tree.commit('one', rev_id='rev1')
+
+ tree2 = tree.bzrdir.sprout('tree2').open_workingtree()
+ rev2 = tree2.commit('two', rev_id='rev2')
+ rev3 = tree2.commit('three', rev_id='rev3')
+
+ tree3 = tree2.bzrdir.sprout('tree3').open_workingtree()
+ rev4 = tree3.commit('four', rev_id='rev4')
+ rev5 = tree3.commit('five', rev_id='rev5')
+
+ tree2.merge_from_branch(tree3.branch)
+ rev6 = tree2.commit('six', rev_id='rev6')
+
+ self.assertUnmerged([], [('2', 'rev2', 0), ('3', 'rev3', 0),
+ ('4', 'rev6', 0),
+ ('3.1.1', 'rev4', 1), ('3.1.2', 'rev5', 1),
+ ],
+ tree.branch, tree2.branch,
+ include_merged=True)
+
+ self.assertUnmerged([], [('4', 'rev6', 0),
+ ('3.1.2', 'rev5', 1), ('3.1.1', 'rev4', 1),
+ ('3', 'rev3', 0), ('2', 'rev2', 0),
+ ],
+ tree.branch, tree2.branch,
+ include_merged=True,
+ backward=True)
+
+ self.assertUnmerged([], [('4', 'rev6', 0)],
+ tree.branch, tree2.branch,
+ include_merged=True, remote_revid_range=(rev6, rev6))
+
+ self.assertUnmerged([], [('3', 'rev3', 0), ('3.1.1', 'rev4', 1)],
+ tree.branch, tree2.branch,
+ include_merged=True, remote_revid_range=(rev3, rev4))
+
+ self.assertUnmerged([], [('4', 'rev6', 0), ('3.1.2', 'rev5', 1)],
+ tree.branch, tree2.branch,
+ include_merged=True, remote_revid_range=(rev5, rev6))
+
+ def test_revision_range(self):
+ local = self.make_branch_and_tree('local')
+ lrevid1 = local.commit('one')
+ remote = local.bzrdir.sprout('remote').open_workingtree()
+ rrevid2 = remote.commit('two')
+ rrevid3 = remote.commit('three')
+ rrevid4 = remote.commit('four')
+ lrevid2 = local.commit('two')
+ lrevid3 = local.commit('three')
+ lrevid4 = local.commit('four')
+ local_extra = [('2', lrevid2), ('3', lrevid3), ('4', lrevid4)]
+ remote_extra = [('2', rrevid2), ('3', rrevid3), ('4', rrevid4)]
+
+ # control
+ self.assertUnmerged(local_extra, remote_extra,
+ local.branch, remote.branch)
+ self.assertUnmerged(local_extra, remote_extra,
+ local.branch, remote.branch, local_revid_range=(None, None),
+ remote_revid_range=(None, None))
+
+ # exclude local revisions
+ self.assertUnmerged([('2', lrevid2)], remote_extra,
+ local.branch, remote.branch, local_revid_range=(lrevid2, lrevid2))
+ self.assertUnmerged([('2', lrevid2), ('3', lrevid3)], remote_extra,
+ local.branch, remote.branch, local_revid_range=(lrevid2, lrevid3))
+ self.assertUnmerged([('2', lrevid2), ('3', lrevid3)], None,
+ local.branch, remote.branch, 'local',
+ local_revid_range=(lrevid2, lrevid3))
+
+ # exclude remote revisions
+ self.assertUnmerged(local_extra, [('2', rrevid2)],
+ local.branch, remote.branch, remote_revid_range=(None, rrevid2))
+ self.assertUnmerged(local_extra, [('2', rrevid2)],
+ local.branch, remote.branch, remote_revid_range=(lrevid1, rrevid2))
+ self.assertUnmerged(local_extra, [('2', rrevid2)],
+ local.branch, remote.branch, remote_revid_range=(rrevid2, rrevid2))
+ self.assertUnmerged(local_extra, [('2', rrevid2), ('3', rrevid3)],
+ local.branch, remote.branch, remote_revid_range=(None, rrevid3))
+ self.assertUnmerged(local_extra, [('2', rrevid2), ('3', rrevid3)],
+ local.branch, remote.branch, remote_revid_range=(rrevid2, rrevid3))
+ self.assertUnmerged(local_extra, [('3', rrevid3)],
+ local.branch, remote.branch, remote_revid_range=(rrevid3, rrevid3))
+ self.assertUnmerged(None, [('2', rrevid2), ('3', rrevid3)],
+ local.branch, remote.branch, 'remote',
+ remote_revid_range=(rrevid2, rrevid3))
+
+ # exclude local and remote revisions
+ self.assertUnmerged([('3', lrevid3)], [('3', rrevid3)],
+ local.branch, remote.branch, local_revid_range=(lrevid3, lrevid3),
+ remote_revid_range=(rrevid3, rrevid3))
diff --git a/bzrlib/tests/test_msgeditor.py b/bzrlib/tests/test_msgeditor.py
new file mode 100644
index 0000000..7c10583
--- /dev/null
+++ b/bzrlib/tests/test_msgeditor.py
@@ -0,0 +1,382 @@
+# Copyright (C) 2005-2011 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""Test commit message editor.
+"""
+
+import os
+import sys
+
+from bzrlib import (
+ commit,
+ config,
+ errors,
+ msgeditor,
+ osutils,
+ trace,
+ )
+from bzrlib.msgeditor import (
+ make_commit_message_template_encoded,
+ edit_commit_message_encoded
+)
+from bzrlib.tests import (
+ features,
+ TestCaseInTempDir,
+ TestCaseWithTransport,
+ TestNotApplicable,
+ TestSkipped,
+ multiply_tests,
+ probe_bad_non_ascii,
+ split_suite_by_re,
+ )
+from bzrlib.tests.EncodingAdapter import encoding_scenarios
+from bzrlib.trace import mutter
+
+
+def load_tests(standard_tests, module, loader):
+ """Parameterize the test for tempfile creation with different encodings."""
+ to_adapt, result = split_suite_by_re(standard_tests,
+ "test__create_temp_file_with_commit_template_in_unicode_dir")
+ return multiply_tests(to_adapt, encoding_scenarios, result)
+
+
+class MsgEditorTest(TestCaseWithTransport):
+
+ def make_uncommitted_tree(self):
+ """Build a branch with uncommitted unicode named changes in the cwd."""
+ working_tree = self.make_branch_and_tree('.')
+ b = working_tree.branch
+ filename = u'hell\u00d8'
+ try:
+ self.build_tree_contents([(filename, 'contents of hello')])
+ except UnicodeEncodeError:
+ raise TestSkipped("can't build unicode working tree in "
+ "filesystem encoding %s" % sys.getfilesystemencoding())
+ working_tree.add(filename)
+ return working_tree
+
+ def test_commit_template(self):
+ """Test building a commit message template"""
+ working_tree = self.make_uncommitted_tree()
+ template = msgeditor.make_commit_message_template(working_tree,
+ None)
+ self.assertEqualDiff(template,
+u"""\
+added:
+ hell\u00d8
+""")
+
+ def make_multiple_pending_tree(self):
+ config.GlobalStack().set('email', 'Bilbo Baggins <bb@hobbit.net>')
+ tree = self.make_branch_and_tree('a')
+ tree.commit('Initial checkin.', timestamp=1230912900, timezone=0)
+ tree2 = tree.bzrdir.clone('b').open_workingtree()
+ tree.commit('Minor tweak.', timestamp=1231977840, timezone=0)
+ tree2.commit('Feature X work.', timestamp=1233186240, timezone=0)
+ tree3 = tree2.bzrdir.clone('c').open_workingtree()
+ tree2.commit('Feature X finished.', timestamp=1233187680, timezone=0)
+ tree3.commit('Feature Y, based on initial X work.',
+ timestamp=1233285960, timezone=0)
+ tree.merge_from_branch(tree2.branch)
+ tree.merge_from_branch(tree3.branch, force=True)
+ return tree
+
+ def test_commit_template_pending_merges(self):
+ """Test building a commit message template when there are pending
+ merges. The commit message should show all pending merge revisions,
+ as does 'status -v', not only the merge tips.
+ """
+ working_tree = self.make_multiple_pending_tree()
+ template = msgeditor.make_commit_message_template(working_tree, None)
+ self.assertEqualDiff(template,
+u"""\
+pending merges:
+ Bilbo Baggins 2009-01-29 Feature X finished.
+ Bilbo Baggins 2009-01-28 Feature X work.
+ Bilbo Baggins 2009-01-30 Feature Y, based on initial X work.
+""")
+
+ def test_commit_template_encoded(self):
+ """Test building a commit message template"""
+ working_tree = self.make_uncommitted_tree()
+ template = make_commit_message_template_encoded(working_tree,
+ None,
+ output_encoding='utf8')
+ self.assertEqualDiff(template,
+u"""\
+added:
+ hell\u00d8
+""".encode("utf8"))
+
+
+ def test_commit_template_and_diff(self):
+ """Test building a commit message template"""
+ working_tree = self.make_uncommitted_tree()
+ template = make_commit_message_template_encoded(working_tree,
+ None,
+ diff=True,
+ output_encoding='utf8')
+
+ self.assertTrue("""\
+@@ -0,0 +1,1 @@
++contents of hello
+""" in template)
+ self.assertTrue(u"""\
+added:
+ hell\u00d8
+""".encode('utf8') in template)
+
+ def make_do_nothing_editor(self, basename='fed'):
+ if sys.platform == "win32":
+ name = basename + '.bat'
+ f = file(name, 'w')
+ f.write('@rem dummy fed')
+ f.close()
+ return name
+ else:
+ name = basename + '.sh'
+ f = file(name, 'wb')
+ f.write('#!/bin/sh\n')
+ f.close()
+ os.chmod(name, 0755)
+ return './' + name
+
+ def test_run_editor(self):
+ self.overrideEnv('BZR_EDITOR', self.make_do_nothing_editor())
+ self.assertEqual(True, msgeditor._run_editor(''),
+ 'Unable to run dummy fake editor')
+
+ def test_parse_editor_name(self):
+ """Correctly interpret names with spaces.
+
+ See <https://bugs.launchpad.net/bzr/+bug/220331>
+ """
+ self.overrideEnv('BZR_EDITOR',
+ '"%s"' % self.make_do_nothing_editor('name with spaces'))
+ self.assertEqual(True, msgeditor._run_editor('a_filename'))
+
+ def make_fake_editor(self, message='test message from fed\\n'):
+ """Set up environment so that an editor will be a known script.
+
+ Sets up BZR_EDITOR so that if an editor is spawned it will run a
+ script that just adds a known message to the start of the file.
+ """
+ f = file('fed.py', 'wb')
+ f.write('#!%s\n' % sys.executable)
+ f.write("""\
+# coding=utf-8
+import sys
+if len(sys.argv) == 2:
+ fn = sys.argv[1]
+ f = file(fn, 'rb')
+ s = f.read()
+ f.close()
+ f = file(fn, 'wb')
+ f.write('%s')
+ f.write(s)
+ f.close()
+""" % (message, ))
+ f.close()
+ if sys.platform == "win32":
+ # [win32] make batch file and set BZR_EDITOR
+ f = file('fed.bat', 'w')
+ f.write("""\
+@echo off
+"%s" fed.py %%1
+""" % sys.executable)
+ f.close()
+ self.overrideEnv('BZR_EDITOR', 'fed.bat')
+ else:
+ # [non-win32] make python script executable and set BZR_EDITOR
+ os.chmod('fed.py', 0755)
+ self.overrideEnv('BZR_EDITOR', './fed.py')
+
+ def test_edit_commit_message(self):
+ working_tree = self.make_uncommitted_tree()
+ self.make_fake_editor()
+
+ mutter('edit_commit_message without infotext')
+ self.assertEqual('test message from fed\n',
+ msgeditor.edit_commit_message(''))
+
+ mutter('edit_commit_message with ascii string infotext')
+ self.assertEqual('test message from fed\n',
+ msgeditor.edit_commit_message('spam'))
+
+ mutter('edit_commit_message with unicode infotext')
+ self.assertEqual('test message from fed\n',
+ msgeditor.edit_commit_message(u'\u1234'))
+
+ tmpl = edit_commit_message_encoded(u'\u1234'.encode("utf8"))
+ self.assertEqual('test message from fed\n', tmpl)
+
+ def test_start_message(self):
+ self.make_uncommitted_tree()
+ self.make_fake_editor()
+ self.assertEqual('test message from fed\nstart message\n',
+ msgeditor.edit_commit_message('',
+ start_message='start message\n'))
+ self.assertEqual('test message from fed\n',
+ msgeditor.edit_commit_message('',
+ start_message=''))
+
+ def test_deleted_commit_message(self):
+ working_tree = self.make_uncommitted_tree()
+
+ if sys.platform == 'win32':
+ editor = 'cmd.exe /c del'
+ else:
+ editor = 'rm'
+ self.overrideEnv('BZR_EDITOR', editor)
+
+ self.assertRaises((IOError, OSError), msgeditor.edit_commit_message, '')
+
+ def test__get_editor(self):
+ self.overrideEnv('BZR_EDITOR', 'bzr_editor')
+ self.overrideEnv('VISUAL', 'visual')
+ self.overrideEnv('EDITOR', 'editor')
+
+ conf = config.GlobalStack()
+ conf.store._load_from_string('[DEFAULT]\neditor = config_editor\n')
+ conf.store.save()
+ editors = list(msgeditor._get_editor())
+ editors = [editor for (editor, cfg_src) in editors]
+
+ self.assertEqual(['bzr_editor', 'config_editor', 'visual', 'editor'],
+ editors[:4])
+
+ if sys.platform == 'win32':
+ self.assertEqual(['wordpad.exe', 'notepad.exe'], editors[4:])
+ else:
+ self.assertEqual(['/usr/bin/editor', 'vi', 'pico', 'nano', 'joe'],
+ editors[4:])
+
+
+ def test__run_editor_EACCES(self):
+ """If running a configured editor raises EACESS, the user is warned."""
+ self.overrideEnv('BZR_EDITOR', 'eacces.py')
+ f = file('eacces.py', 'wb')
+ f.write('# Not a real editor')
+ f.close()
+ # Make the fake editor unreadable (and unexecutable)
+ os.chmod('eacces.py', 0)
+ # Set $EDITOR so that _run_editor will terminate before trying real
+ # editors.
+ self.overrideEnv('EDITOR', self.make_do_nothing_editor())
+ # Call _run_editor, capturing mutter.warning calls.
+ warnings = []
+ def warning(*args):
+ if len(args) > 1:
+ warnings.append(args[0] % args[1:])
+ else:
+ warnings.append(args[0])
+ _warning = trace.warning
+ trace.warning = warning
+ try:
+ msgeditor._run_editor('')
+ finally:
+ trace.warning = _warning
+ self.assertStartsWith(warnings[0], 'Could not start editor "eacces.py"')
+
+ def test__create_temp_file_with_commit_template(self):
+ # check that commit template written properly
+ # and has platform native line-endings (CRLF on win32)
+ create_file = msgeditor._create_temp_file_with_commit_template
+ msgfilename, hasinfo = create_file('infotext','----','start message')
+ self.assertNotEqual(None, msgfilename)
+ self.assertTrue(hasinfo)
+ expected = os.linesep.join(['start message',
+ '',
+ '',
+ '----',
+ '',
+ 'infotext'])
+ self.assertFileEqual(expected, msgfilename)
+
+ def test__create_temp_file_with_commit_template_in_unicode_dir(self):
+ self.requireFeature(features.UnicodeFilenameFeature)
+ if hasattr(self, 'info'):
+ tmpdir = self.info['directory']
+ os.mkdir(tmpdir)
+ # Force the creation of temp file in a directory whose name
+ # requires some encoding support
+ msgeditor._create_temp_file_with_commit_template('infotext',
+ tmpdir=tmpdir)
+ else:
+ raise TestNotApplicable('Test run elsewhere with non-ascii data.')
+
+ def test__create_temp_file_with_empty_commit_template(self):
+ # empty file
+ create_file = msgeditor._create_temp_file_with_commit_template
+ msgfilename, hasinfo = create_file('')
+ self.assertNotEqual(None, msgfilename)
+ self.assertFalse(hasinfo)
+ self.assertFileEqual('', msgfilename)
+
+ def test_unsupported_encoding_commit_message(self):
+ self.overrideEnv('LANG', 'C')
+ # LANG env variable has no effect on Windows
+ # but some characters anyway cannot be represented
+ # in default user encoding
+ char = probe_bad_non_ascii(osutils.get_user_encoding())
+ if char is None:
+ raise TestSkipped('Cannot find suitable non-ascii character '
+ 'for user_encoding (%s)' % osutils.get_user_encoding())
+
+ self.make_fake_editor(message=char)
+
+ working_tree = self.make_uncommitted_tree()
+ self.assertRaises(errors.BadCommitMessageEncoding,
+ msgeditor.edit_commit_message, '')
+
+ def test_set_commit_message_no_hooks(self):
+ commit_obj = commit.Commit()
+ self.assertIs(None,
+ msgeditor.set_commit_message(commit_obj))
+
+ def test_set_commit_message_hook(self):
+ msgeditor.hooks.install_named_hook("set_commit_message",
+ lambda commit_obj, existing_message: "save me some typing\n", None)
+ commit_obj = commit.Commit()
+ self.assertEquals("save me some typing\n",
+ msgeditor.set_commit_message(commit_obj))
+
+ def test_generate_commit_message_template_no_hooks(self):
+ commit_obj = commit.Commit()
+ self.assertIs(None,
+ msgeditor.generate_commit_message_template(commit_obj))
+
+ def test_generate_commit_message_template_hook(self):
+ msgeditor.hooks.install_named_hook("commit_message_template",
+ lambda commit_obj, msg: "save me some typing\n", None)
+ commit_obj = commit.Commit()
+ self.assertEquals("save me some typing\n",
+ msgeditor.generate_commit_message_template(commit_obj))
+
+
+# GZ 2009-11-17: This wants moving to osutils when the errno checking code is
+class TestPlatformErrnoWorkarounds(TestCaseInTempDir):
+ """Ensuring workarounds enshrined in code actually serve a purpose"""
+
+ def test_subprocess_call_bad_file(self):
+ if sys.platform != "win32":
+ raise TestNotApplicable("Workarounds for windows only")
+ import subprocess, errno
+ ERROR_BAD_EXE_FORMAT = 193
+ file("textfile.txt", "w").close()
+ e = self.assertRaises(WindowsError, subprocess.call, "textfile.txt")
+ self.assertEqual(e.errno, errno.ENOEXEC)
+ self.assertEqual(e.winerror, ERROR_BAD_EXE_FORMAT)
diff --git a/bzrlib/tests/test_multiparent.py b/bzrlib/tests/test_multiparent.py
new file mode 100644
index 0000000..0fde218
--- /dev/null
+++ b/bzrlib/tests/test_multiparent.py
@@ -0,0 +1,274 @@
+# Copyright (C) 2007, 2009, 2011 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+from unittest import TestCase
+
+from bzrlib import (
+ multiparent,
+ patiencediff,
+ tests,
+ )
+
+
+LINES_1 = "a\nb\nc\nd\ne\n".splitlines(True)
+LINES_2 = "a\nc\nd\ne\n".splitlines(True)
+LINES_3 = "a\nb\nc\nd\n".splitlines(True)
+LF_SPLIT_LINES = ['\x00\n', '\x00\r\x01\n', '\x02\r\xff']
+
+
+class Mock(object):
+
+ def __init__(self, **kwargs):
+ self.__dict__ = kwargs
+
+
+class TestMulti(TestCase):
+
+ def test_compare_no_parent(self):
+ diff = multiparent.MultiParent.from_lines(LINES_1)
+ self.assertEqual([multiparent.NewText(LINES_1)], diff.hunks)
+
+ def test_compare_one_parent(self):
+ diff = multiparent.MultiParent.from_lines(LINES_1, [LINES_2])
+ self.assertEqual([multiparent.ParentText(0, 0, 0, 1),
+ multiparent.NewText(['b\n']),
+ multiparent.ParentText(0, 1, 2, 3)],
+ diff.hunks)
+
+ diff = multiparent.MultiParent.from_lines(LINES_2, [LINES_1])
+ self.assertEqual([multiparent.ParentText(0, 0, 0, 1),
+ multiparent.ParentText(0, 2, 1, 3)],
+ diff.hunks)
+
+ def test_compare_two_parents(self):
+ diff = multiparent.MultiParent.from_lines(LINES_1, [LINES_2, LINES_3])
+ self.assertEqual([multiparent.ParentText(1, 0, 0, 4),
+ multiparent.ParentText(0, 3, 4, 1)],
+ diff.hunks)
+
+ def test_compare_two_parents_blocks(self):
+ matcher = patiencediff.PatienceSequenceMatcher(None, LINES_2, LINES_1)
+ blocks = matcher.get_matching_blocks()
+ diff = multiparent.MultiParent.from_lines(LINES_1, [LINES_2, LINES_3],
+ left_blocks=blocks)
+ self.assertEqual([multiparent.ParentText(1, 0, 0, 4),
+ multiparent.ParentText(0, 3, 4, 1)],
+ diff.hunks)
+
+ def test_get_matching_blocks(self):
+ diff = multiparent.MultiParent.from_lines(LINES_1, [LINES_2])
+ self.assertEqual([(0, 0, 1), (1, 2, 3), (4, 5, 0)],
+ list(diff.get_matching_blocks(0, len(LINES_2))))
+
+ diff = multiparent.MultiParent.from_lines(LINES_2, [LINES_1])
+ self.assertEqual([(0, 0, 1), (2, 1, 3), (5, 4, 0)],
+ list(diff.get_matching_blocks(0, len(LINES_1))))
+
+ def test_range_iterator(self):
+ diff = multiparent.MultiParent.from_lines(LINES_1, [LINES_2, LINES_3])
+ diff.hunks.append(multiparent.NewText(['q\n']))
+ self.assertEqual([(0, 4, 'parent', (1, 0, 4)),
+ (4, 5, 'parent', (0, 3, 4)),
+ (5, 6, 'new', ['q\n'])],
+ list(diff.range_iterator()))
+
+ def test_eq(self):
+ diff = multiparent.MultiParent.from_lines(LINES_1)
+ diff2 = multiparent.MultiParent.from_lines(LINES_1)
+ self.assertEqual(diff, diff2)
+ diff3 = multiparent.MultiParent.from_lines(LINES_2)
+ self.assertFalse(diff == diff3)
+ self.assertFalse(diff == Mock(hunks=[multiparent.NewText(LINES_1)]))
+ self.assertEqual(multiparent.MultiParent(
+ [multiparent.NewText(LINES_1),
+ multiparent.ParentText(0, 1, 2, 3)]),
+ multiparent.MultiParent(
+ [multiparent.NewText(LINES_1),
+ multiparent.ParentText(0, 1, 2, 3)]))
+
+ def test_to_patch(self):
+ self.assertEqual(['i 1\n', 'a\n', '\n', 'c 0 1 2 3\n'],
+ list(multiparent.MultiParent([multiparent.NewText(['a\n']),
+ multiparent.ParentText(0, 1, 2, 3)]).to_patch()))
+
+ def test_from_patch(self):
+ self.assertEqual(multiparent.MultiParent(
+ [multiparent.NewText(['a\n']),
+ multiparent.ParentText(0, 1, 2, 3)]),
+ multiparent.MultiParent.from_patch('i 1\na\n\nc 0 1 2 3'))
+ self.assertEqual(multiparent.MultiParent(
+ [multiparent.NewText(['a']),
+ multiparent.ParentText(0, 1, 2, 3)]),
+ multiparent.MultiParent.from_patch('i 1\na\nc 0 1 2 3\n'))
+
+ def test_binary_content(self):
+ patch = list(
+ multiparent.MultiParent.from_lines(LF_SPLIT_LINES).to_patch())
+ multiparent.MultiParent.from_patch(''.join(patch))
+
+ def test_make_patch_from_binary(self):
+ patch = multiparent.MultiParent.from_texts(''.join(LF_SPLIT_LINES))
+ expected = multiparent.MultiParent([
+ multiparent.NewText(LF_SPLIT_LINES)])
+ self.assertEqual(expected, patch)
+
+ def test_num_lines(self):
+ mp = multiparent.MultiParent([multiparent.NewText(['a\n'])])
+ self.assertEqual(1, mp.num_lines())
+ mp.hunks.append(multiparent.NewText(['b\n', 'c\n']))
+ self.assertEqual(3, mp.num_lines())
+ mp.hunks.append(multiparent.ParentText(0, 0, 3, 2))
+ self.assertEqual(5, mp.num_lines())
+ mp.hunks.append(multiparent.NewText(['f\n', 'g\n']))
+ self.assertEqual(7, mp.num_lines())
+
+ def test_to_lines(self):
+ mpdiff = multiparent.MultiParent.from_texts('a\nb\nc\n', ('b\nc\n',))
+ lines = mpdiff.to_lines(('b\ne\n',))
+ self.assertEqual(['a\n', 'b\n', 'e\n'], lines)
+
+
+class TestNewText(TestCase):
+
+ def test_eq(self):
+ self.assertEqual(multiparent.NewText([]), multiparent.NewText([]))
+ self.assertFalse(multiparent.NewText(['a']) ==
+ multiparent.NewText(['b']))
+ self.assertFalse(multiparent.NewText(['a']) == Mock(lines=['a']))
+
+ def test_to_patch(self):
+ self.assertEqual(['i 0\n', '\n'],
+ list(multiparent.NewText([]).to_patch()))
+ self.assertEqual(['i 1\n', 'a', '\n'],
+ list(multiparent.NewText(['a']).to_patch()))
+ self.assertEqual(['i 1\n', 'a\n', '\n'],
+ list(multiparent.NewText(['a\n']).to_patch()))
+
+
+class TestParentText(TestCase):
+
+ def test_eq(self):
+ self.assertEqual(multiparent.ParentText(1, 2, 3, 4),
+ multiparent.ParentText(1, 2, 3, 4))
+ self.assertFalse(multiparent.ParentText(1, 2, 3, 4) ==
+ multiparent.ParentText(2, 2, 3, 4))
+ self.assertFalse(multiparent.ParentText(1, 2, 3, 4) ==
+ Mock(parent=1, parent_pos=2, child_pos=3,
+ num_lines=4))
+
+ def test_to_patch(self):
+ self.assertEqual(['c 0 1 2 3\n'],
+ list(multiparent.ParentText(0, 1, 2, 3).to_patch()))
+
+
+REV_A = ['a\n', 'b\n', 'c\n', 'd\n']
+REV_B = ['a\n', 'c\n', 'd\n', 'e\n']
+REV_C = ['a\n', 'b\n', 'e\n', 'f\n']
+
+
+class TestVersionedFile(TestCase):
+
+ def add_version(self, vf, text, version_id, parent_ids):
+ vf.add_version([(t+'\n') for t in text], version_id, parent_ids)
+
+ def make_vf(self):
+ vf = multiparent.MultiMemoryVersionedFile()
+ self.add_version(vf, 'abcd', 'rev-a', [])
+ self.add_version(vf, 'acde', 'rev-b', [])
+ self.add_version(vf, 'abef', 'rev-c', ['rev-a', 'rev-b'])
+ return vf
+
+ def test_add_version(self):
+ vf = self.make_vf()
+ self.assertEqual(REV_A, vf._lines['rev-a'])
+ vf.clear_cache()
+ self.assertEqual(vf._lines, {})
+
+ def test_get_line_list(self):
+ vf = self.make_vf()
+ vf.clear_cache()
+ self.assertEqual(REV_A, vf.get_line_list(['rev-a'])[0])
+ self.assertEqual([REV_B, REV_C], vf.get_line_list(['rev-b', 'rev-c']))
+
+ def test_reconstruct_empty(self):
+ vf = multiparent.MultiMemoryVersionedFile()
+ vf.add_version([], 'a', [])
+ self.assertEqual([], self.reconstruct_version(vf, 'a'))
+
+ @staticmethod
+ def reconstruct(vf, revision_id, start, end):
+ reconstructor = multiparent._Reconstructor(vf, vf._lines,
+ vf._parents)
+ lines = []
+ reconstructor._reconstruct(lines, revision_id, start, end)
+ return lines
+
+ @staticmethod
+ def reconstruct_version(vf, revision_id):
+ reconstructor = multiparent._Reconstructor(vf, vf._lines,
+ vf._parents)
+ lines = []
+ reconstructor.reconstruct_version(lines, revision_id)
+ return lines
+
+ def test_reconstructor(self):
+ vf = self.make_vf()
+ self.assertEqual(['a\n', 'b\n'], self.reconstruct(vf, 'rev-a', 0, 2))
+ self.assertEqual(['c\n', 'd\n'], self.reconstruct(vf, 'rev-a', 2, 4))
+ self.assertEqual(['e\n', 'f\n'], self.reconstruct(vf, 'rev-c', 2, 4))
+ self.assertEqual(['a\n', 'b\n', 'e\n', 'f\n'],
+ self.reconstruct(vf, 'rev-c', 0, 4))
+ self.assertEqual(['a\n', 'b\n', 'e\n', 'f\n'],
+ self.reconstruct_version(vf, 'rev-c'))
+
+ def test_reordered(self):
+ """Check for a corner case that requires re-starting the cursor"""
+ vf = multiparent.MultiMemoryVersionedFile()
+ # rev-b must have at least two hunks, so split a and b with c.
+ self.add_version(vf, 'c', 'rev-a', [])
+ self.add_version(vf, 'acb', 'rev-b', ['rev-a'])
+ # rev-c and rev-d must each have a line from a different rev-b hunk
+ self.add_version(vf, 'b', 'rev-c', ['rev-b'])
+ self.add_version(vf, 'a', 'rev-d', ['rev-b'])
+ # The lines from rev-c and rev-d must appear in the opposite order
+ self.add_version(vf, 'ba', 'rev-e', ['rev-c', 'rev-d'])
+ vf.clear_cache()
+ lines = vf.get_line_list(['rev-e'])[0]
+ self.assertEqual(['b\n', 'a\n'], lines)
+
+
+class TestMultiVersionedFile(tests.TestCaseInTempDir):
+
+ def test_save_load(self):
+ vf = multiparent.MultiVersionedFile('foop')
+ vf.add_version('a\nb\nc\nd'.splitlines(True), 'a', [])
+ vf.add_version('a\ne\nd\n'.splitlines(True), 'b', ['a'])
+ vf.save()
+ newvf = multiparent.MultiVersionedFile('foop')
+ newvf.load()
+ self.assertEqual('a\nb\nc\nd', ''.join(newvf.get_line_list(['a'])[0]))
+ self.assertEqual('a\ne\nd\n', ''.join(newvf.get_line_list(['b'])[0]))
+
+ def test_filenames(self):
+ vf = multiparent.MultiVersionedFile('foop')
+ vf.add_version('a\nb\nc\nd'.splitlines(True), 'a', [])
+ self.assertPathExists('foop.mpknit')
+ self.assertPathDoesNotExist('foop.mpidx')
+ vf.save()
+ self.assertPathExists('foop.mpidx')
+ vf.destroy()
+ self.assertPathDoesNotExist('foop.mpknit')
+ self.assertPathDoesNotExist('foop.mpidx')
diff --git a/bzrlib/tests/test_mutabletree.py b/bzrlib/tests/test_mutabletree.py
new file mode 100644
index 0000000..9f7be8b
--- /dev/null
+++ b/bzrlib/tests/test_mutabletree.py
@@ -0,0 +1,62 @@
+# Copyright (C) 2008 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""Tests for MutableTree.
+
+Most functionality of MutableTree is tested as part of WorkingTree.
+"""
+
+from bzrlib import (
+ mutabletree,
+ tests,
+ )
+
+
+class TestHooks(tests.TestCase):
+
+ def test_constructor(self):
+ """Check that creating a MutableTreeHooks instance has the right
+ defaults."""
+ hooks = mutabletree.MutableTreeHooks()
+ self.assertTrue("start_commit" in hooks,
+ "start_commit not in %s" % hooks)
+ self.assertTrue("post_commit" in hooks,
+ "post_commit not in %s" % hooks)
+
+ def test_installed_hooks_are_MutableTreeHooks(self):
+ """The installed hooks object should be a MutableTreeHooks."""
+ # the installed hooks are saved in self._preserved_hooks.
+ self.assertIsInstance(self._preserved_hooks[mutabletree.MutableTree][1],
+ mutabletree.MutableTreeHooks)
+
+
+class TestHasChanges(tests.TestCaseWithTransport):
+
+ def setUp(self):
+ super(TestHasChanges, self).setUp()
+ self.tree = self.make_branch_and_tree('tree')
+
+ def test_with_uncommitted_changes(self):
+ self.build_tree(['tree/file'])
+ self.tree.add('file')
+ self.assertTrue(self.tree.has_changes())
+
+ def test_with_pending_merges(self):
+ self.tree.commit('first commit')
+ other_tree = self.tree.bzrdir.sprout('other').open_workingtree()
+ other_tree.commit('mergeable commit')
+ self.tree.merge_from_branch(other_tree.branch)
+ self.assertTrue(self.tree.has_changes())
diff --git a/bzrlib/tests/test_nonascii.py b/bzrlib/tests/test_nonascii.py
new file mode 100644
index 0000000..992fa77
--- /dev/null
+++ b/bzrlib/tests/test_nonascii.py
@@ -0,0 +1,194 @@
+# Copyright (C) 2005, 2006, 2008, 2009, 2011 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""Test that various operations work in a non-ASCII environment."""
+
+import os
+import sys
+from unicodedata import normalize
+
+from bzrlib import osutils
+from bzrlib.osutils import pathjoin
+from bzrlib.tests import TestCase, TestCaseWithTransport, TestSkipped
+
+
+class NonAsciiTest(TestCaseWithTransport):
+
+ def test_add_in_nonascii_branch(self):
+ """Test adding in a non-ASCII branch."""
+ br_dir = u"\u1234"
+ try:
+ wt = self.make_branch_and_tree(br_dir)
+ except UnicodeEncodeError:
+ raise TestSkipped("filesystem can't accomodate nonascii names")
+ return
+ with file(pathjoin(br_dir, "a"), "w") as f: f.write("hello")
+ wt.add(["a"], ["a-id"])
+
+
+a_circle_c = u'\xe5'
+a_circle_d = u'a\u030a'
+a_dots_c = u'\xe4'
+a_dots_d = u'a\u0308'
+z_umlat_c = u'\u017d'
+z_umlat_d = u'Z\u030c'
+squared_c = u'\xbc' # This gets mapped to '2' if we use NFK[CD]
+squared_d = u'\xbc'
+quarter_c = u'\xb2' # Gets mapped to u'1\u20444' (1/4) if we use NFK[CD]
+quarter_d = u'\xb2'
+
+
+class TestNormalization(TestCase):
+ """Verify that we have our normalizations correct."""
+
+ def test_normalize(self):
+ self.assertEqual(a_circle_d, normalize('NFD', a_circle_c))
+ self.assertEqual(a_circle_c, normalize('NFC', a_circle_d))
+ self.assertEqual(a_dots_d, normalize('NFD', a_dots_c))
+ self.assertEqual(a_dots_c, normalize('NFC', a_dots_d))
+ self.assertEqual(z_umlat_d, normalize('NFD', z_umlat_c))
+ self.assertEqual(z_umlat_c, normalize('NFC', z_umlat_d))
+ self.assertEqual(squared_d, normalize('NFC', squared_c))
+ self.assertEqual(squared_c, normalize('NFD', squared_d))
+ self.assertEqual(quarter_d, normalize('NFC', quarter_c))
+ self.assertEqual(quarter_c, normalize('NFD', quarter_d))
+
+
+class NormalizedFilename(TestCaseWithTransport):
+ """Test normalized_filename and associated helpers"""
+
+ def test__accessible_normalized_filename(self):
+ anf = osutils._accessible_normalized_filename
+ # normalized_filename should allow plain ascii strings
+ # not just unicode strings
+ self.assertEqual((u'ascii', True), anf('ascii'))
+ self.assertEqual((a_circle_c, True), anf(a_circle_c))
+ self.assertEqual((a_circle_c, True), anf(a_circle_d))
+ self.assertEqual((a_dots_c, True), anf(a_dots_c))
+ self.assertEqual((a_dots_c, True), anf(a_dots_d))
+ self.assertEqual((z_umlat_c, True), anf(z_umlat_c))
+ self.assertEqual((z_umlat_c, True), anf(z_umlat_d))
+ self.assertEqual((squared_c, True), anf(squared_c))
+ self.assertEqual((squared_c, True), anf(squared_d))
+ self.assertEqual((quarter_c, True), anf(quarter_c))
+ self.assertEqual((quarter_c, True), anf(quarter_d))
+
+ def test__inaccessible_normalized_filename(self):
+ inf = osutils._inaccessible_normalized_filename
+ # normalized_filename should allow plain ascii strings
+ # not just unicode strings
+ self.assertEqual((u'ascii', True), inf('ascii'))
+ self.assertEqual((a_circle_c, True), inf(a_circle_c))
+ self.assertEqual((a_circle_c, False), inf(a_circle_d))
+ self.assertEqual((a_dots_c, True), inf(a_dots_c))
+ self.assertEqual((a_dots_c, False), inf(a_dots_d))
+ self.assertEqual((z_umlat_c, True), inf(z_umlat_c))
+ self.assertEqual((z_umlat_c, False), inf(z_umlat_d))
+ self.assertEqual((squared_c, True), inf(squared_c))
+ self.assertEqual((squared_c, True), inf(squared_d))
+ self.assertEqual((quarter_c, True), inf(quarter_c))
+ self.assertEqual((quarter_c, True), inf(quarter_d))
+
+ def test_functions(self):
+ if osutils.normalizes_filenames():
+ self.assertEqual(osutils.normalized_filename,
+ osutils._accessible_normalized_filename)
+ else:
+ self.assertEqual(osutils.normalized_filename,
+ osutils._inaccessible_normalized_filename)
+
+ def test_platform(self):
+ # With FAT32 and certain encodings on win32
+ # a_circle_c and a_dots_c actually map to the same file
+ # adding a suffix kicks in the 'preserving but insensitive'
+ # route, and maintains the right files
+ files = [a_circle_c+'.1', a_dots_c+'.2', z_umlat_c+'.3']
+ try:
+ self.build_tree(files)
+ except UnicodeError:
+ raise TestSkipped("filesystem cannot create unicode files")
+
+ if sys.platform == 'darwin':
+ expected = sorted([a_circle_d+'.1', a_dots_d+'.2', z_umlat_d+'.3'])
+ else:
+ expected = sorted(files)
+
+ present = sorted(os.listdir(u'.'))
+ self.assertEqual(expected, present)
+
+ def test_access_normalized(self):
+ # We should always be able to access files created with
+ # normalized filenames
+ # With FAT32 and certain encodings on win32
+ # a_circle_c and a_dots_c actually map to the same file
+ # adding a suffix kicks in the 'preserving but insensitive'
+ # route, and maintains the right files
+ files = [a_circle_c+'.1', a_dots_c+'.2', z_umlat_c+'.3',
+ squared_c+'.4', quarter_c+'.5']
+ try:
+ self.build_tree(files, line_endings='native')
+ except UnicodeError:
+ raise TestSkipped("filesystem cannot create unicode files")
+
+ for fname in files:
+ # We should get an exception if we can't open the file at
+ # this location.
+ path, can_access = osutils.normalized_filename(fname)
+
+ self.assertEqual(path, fname)
+ self.assertTrue(can_access)
+
+ f = open(path, 'rb')
+ try:
+ # Check the contents
+ shouldbe = 'contents of %s%s' % (path.encode('utf8'),
+ os.linesep)
+ actual = f.read()
+ finally:
+ f.close()
+ self.assertEqual(shouldbe, actual,
+ 'contents of %r is incorrect: %r != %r'
+ % (path, shouldbe, actual))
+
+ def test_access_non_normalized(self):
+ # Sometimes we can access non-normalized files by their normalized
+ # path, verify that normalized_filename returns the right info
+ files = [a_circle_d+'.1', a_dots_d+'.2', z_umlat_d+'.3']
+
+ try:
+ self.build_tree(files)
+ except UnicodeError:
+ raise TestSkipped("filesystem cannot create unicode files")
+
+ for fname in files:
+ # We should get an exception if we can't open the file at
+ # this location.
+ path, can_access = osutils.normalized_filename(fname)
+
+ self.assertNotEqual(path, fname)
+
+ # We should always be able to access them from the name
+ # they were created with
+ f = open(fname, 'rb')
+ f.close()
+
+ # And normalized_filename sholud tell us correctly if we can
+ # access them by an alternate name
+ if can_access:
+ f = open(path, 'rb')
+ f.close()
+ else:
+ self.assertRaises(IOError, open, path, 'rb')
diff --git a/bzrlib/tests/test_options.py b/bzrlib/tests/test_options.py
new file mode 100644
index 0000000..eb20f61
--- /dev/null
+++ b/bzrlib/tests/test_options.py
@@ -0,0 +1,433 @@
+# Copyright (C) 2005-2011 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+import re
+
+from bzrlib import (
+ bzrdir,
+ commands,
+ controldir,
+ errors,
+ option,
+ registry,
+ )
+from bzrlib.builtins import cmd_commit
+from bzrlib.commands import parse_args
+from bzrlib.tests import TestCase
+from bzrlib.repofmt import knitrepo
+
+
+def parse(options, args):
+ parser = option.get_optparser(dict((o.name, o) for o in options))
+ return parser.parse_args(args)
+
+
+class OptionTests(TestCase):
+ """Command-line option tests"""
+
+ def test_parse_args(self):
+ """Option parser"""
+ # XXX: Using cmd_commit makes these tests overly sensitive to changes
+ # to cmd_commit, when they are meant to be about option parsing in
+ # general.
+ self.assertEqual(
+ ([], {'author': [], 'exclude': [], 'fixes': [], 'help': True}),
+ parse_args(cmd_commit(), ['--help']))
+ self.assertEqual(
+ ([], {'author': [], 'exclude': [], 'fixes': [], 'message': 'biter'}),
+ parse_args(cmd_commit(), ['--message=biter']))
+
+ def test_no_more_opts(self):
+ """Terminated options"""
+ self.assertEqual(
+ (['-file-with-dashes'], {'author': [], 'exclude': [], 'fixes': []}),
+ parse_args(cmd_commit(), ['--', '-file-with-dashes']))
+
+ def test_option_help(self):
+ """Options have help strings."""
+ out, err = self.run_bzr('commit --help')
+ self.assertContainsRe(out,
+ r'--file(.|\n)*Take commit message from this file\.')
+ self.assertContainsRe(out, r'-h.*--help')
+
+ def test_option_help_global(self):
+ """Global options have help strings."""
+ out, err = self.run_bzr('help status')
+ self.assertContainsRe(out, r'--show-ids.*Show internal object.')
+
+ def test_option_help_global_hidden(self):
+ """Hidden global options have no help strings."""
+ out, err = self.run_bzr('help log')
+ self.assertNotContainsRe(out, r'--message')
+
+ def test_option_arg_help(self):
+ """Help message shows option arguments."""
+ out, err = self.run_bzr('help commit')
+ self.assertEqual(err, '')
+ self.assertContainsRe(out, r'--file[ =]MSGFILE')
+
+ def test_unknown_short_opt(self):
+ out, err = self.run_bzr('help -r', retcode=3)
+ self.assertContainsRe(err, r'no such option')
+
+ def test_set_short_name(self):
+ o = option.Option('wiggle')
+ o.set_short_name('w')
+ self.assertEqual(o.short_name(), 'w')
+
+ def test_allow_dash(self):
+ """Test that we can pass a plain '-' as an argument."""
+ self.assertEqual((['-']), parse_args(cmd_commit(), ['-'])[0])
+
+ def parse(self, options, args):
+ parser = option.get_optparser(dict((o.name, o) for o in options))
+ return parser.parse_args(args)
+
+ def test_conversion(self):
+ options = [option.Option('hello')]
+ opts, args = self.parse(options, ['--no-hello', '--hello'])
+ self.assertEqual(True, opts.hello)
+ opts, args = self.parse(options, [])
+ self.assertFalse(hasattr(opts, 'hello'))
+ opts, args = self.parse(options, ['--hello', '--no-hello'])
+ self.assertEqual(False, opts.hello)
+ options = [option.Option('number', type=int)]
+ opts, args = self.parse(options, ['--number', '6'])
+ self.assertEqual(6, opts.number)
+ self.assertRaises(errors.BzrCommandError, self.parse, options,
+ ['--number'])
+ self.assertRaises(errors.BzrCommandError, self.parse, options,
+ ['--no-number'])
+
+ def test_is_hidden(self):
+ self.assertTrue(option.Option('foo', hidden=True).is_hidden('foo'))
+ self.assertFalse(option.Option('foo', hidden=False).is_hidden('foo'))
+
+ def test_registry_conversion(self):
+ registry = controldir.ControlDirFormatRegistry()
+ bzrdir.register_metadir(registry, 'one', 'RepositoryFormat7', 'one help')
+ bzrdir.register_metadir(registry, 'two', 'RepositoryFormatKnit1', 'two help')
+ bzrdir.register_metadir(registry, 'hidden', 'RepositoryFormatKnit1',
+ 'two help', hidden=True)
+ registry.set_default('one')
+ options = [option.RegistryOption('format', '', registry, str)]
+ opts, args = self.parse(options, ['--format', 'one'])
+ self.assertEqual({'format':'one'}, opts)
+ opts, args = self.parse(options, ['--format', 'two'])
+ self.assertEqual({'format':'two'}, opts)
+ self.assertRaises(errors.BadOptionValue, self.parse, options,
+ ['--format', 'three'])
+ self.assertRaises(errors.BzrCommandError, self.parse, options,
+ ['--two'])
+ options = [option.RegistryOption('format', '', registry, str,
+ value_switches=True)]
+ opts, args = self.parse(options, ['--two'])
+ self.assertEqual({'format':'two'}, opts)
+ opts, args = self.parse(options, ['--two', '--one'])
+ self.assertEqual({'format':'one'}, opts)
+ opts, args = self.parse(options, ['--two', '--one',
+ '--format', 'two'])
+ self.assertEqual({'format':'two'}, opts)
+ options = [option.RegistryOption('format', '', registry, str,
+ enum_switch=False)]
+ self.assertRaises(errors.BzrCommandError, self.parse, options,
+ ['--format', 'two'])
+
+ def test_override(self):
+ options = [option.Option('hello', type=str),
+ option.Option('hi', type=str, param_name='hello')]
+ opts, args = self.parse(options, ['--hello', 'a', '--hello', 'b'])
+ self.assertEqual('b', opts.hello)
+ opts, args = self.parse(options, ['--hello', 'b', '--hello', 'a'])
+ self.assertEqual('a', opts.hello)
+ opts, args = self.parse(options, ['--hello', 'a', '--hi', 'b'])
+ self.assertEqual('b', opts.hello)
+ opts, args = self.parse(options, ['--hi', 'b', '--hello', 'a'])
+ self.assertEqual('a', opts.hello)
+
+ def test_registry_converter(self):
+ options = [option.RegistryOption('format', '',
+ controldir.format_registry, controldir.format_registry.make_bzrdir)]
+ opts, args = self.parse(options, ['--format', 'knit'])
+ self.assertIsInstance(opts.format.repository_format,
+ knitrepo.RepositoryFormatKnit1)
+
+ def test_lazy_registry(self):
+ options = [option.RegistryOption('format', '',
+ lazy_registry=('bzrlib.controldir','format_registry'),
+ converter=str)]
+ opts, args = self.parse(options, ['--format', 'knit'])
+ self.assertEqual({'format': 'knit'}, opts)
+ self.assertRaises(
+ errors.BadOptionValue, self.parse, options, ['--format', 'BAD'])
+
+ def test_from_kwargs(self):
+ my_option = option.RegistryOption.from_kwargs('my-option',
+ help='test option', short='be short', be_long='go long')
+ self.assertEqual(['my-option'],
+ [x[0] for x in my_option.iter_switches()])
+ my_option = option.RegistryOption.from_kwargs('my-option',
+ help='test option', title="My option", short='be short',
+ be_long='go long', value_switches=True)
+ self.assertEqual(['my-option', 'be-long', 'short'],
+ [x[0] for x in my_option.iter_switches()])
+ self.assertEqual('test option', my_option.help)
+
+ def test_help(self):
+ registry = controldir.ControlDirFormatRegistry()
+ bzrdir.register_metadir(registry, 'one', 'RepositoryFormat7', 'one help')
+ bzrdir.register_metadir(registry, 'two',
+ 'bzrlib.repofmt.knitrepo.RepositoryFormatKnit1',
+ 'two help',
+ )
+ bzrdir.register_metadir(registry, 'hidden', 'RepositoryFormat7', 'hidden help',
+ hidden=True)
+ registry.set_default('one')
+ options = [option.RegistryOption('format', 'format help', registry,
+ str, value_switches=True, title='Formats')]
+ parser = option.get_optparser(dict((o.name, o) for o in options))
+ value = parser.format_option_help()
+ self.assertContainsRe(value, 'format.*format help')
+ self.assertContainsRe(value, 'one.*one help')
+ self.assertContainsRe(value, 'Formats:\n *--format')
+ self.assertNotContainsRe(value, 'hidden help')
+
+ def test_iter_switches(self):
+ opt = option.Option('hello', help='fg')
+ self.assertEqual(list(opt.iter_switches()),
+ [('hello', None, None, 'fg')])
+ opt = option.Option('hello', help='fg', type=int)
+ self.assertEqual(list(opt.iter_switches()),
+ [('hello', None, 'ARG', 'fg')])
+ opt = option.Option('hello', help='fg', type=int, argname='gar')
+ self.assertEqual(list(opt.iter_switches()),
+ [('hello', None, 'GAR', 'fg')])
+ registry = controldir.ControlDirFormatRegistry()
+ bzrdir.register_metadir(registry, 'one', 'RepositoryFormat7', 'one help')
+ bzrdir.register_metadir(registry, 'two',
+ 'bzrlib.repofmt.knitrepo.RepositoryFormatKnit1',
+ 'two help',
+ )
+ registry.set_default('one')
+ opt = option.RegistryOption('format', 'format help', registry,
+ value_switches=False)
+ self.assertEqual(list(opt.iter_switches()),
+ [('format', None, 'ARG', 'format help')])
+ opt = option.RegistryOption('format', 'format help', registry,
+ value_switches=True)
+ self.assertEqual(list(opt.iter_switches()),
+ [('format', None, 'ARG', 'format help'),
+ ('default', None, None, 'one help'),
+ ('one', None, None, 'one help'),
+ ('two', None, None, 'two help'),
+ ])
+
+ def test_option_callback_bool(self):
+ "Test booleans get True and False passed correctly to a callback."""
+ cb_calls = []
+ def cb(option, name, value, parser):
+ cb_calls.append((option,name,value,parser))
+ options = [option.Option('hello', custom_callback=cb)]
+ opts, args = self.parse(options, ['--hello', '--no-hello'])
+ self.assertEqual(2, len(cb_calls))
+ opt,name,value,parser = cb_calls[0]
+ self.assertEqual('hello', name)
+ self.assertTrue(value)
+ opt,name,value,parser = cb_calls[1]
+ self.assertEqual('hello', name)
+ self.assertFalse(value)
+
+ def test_option_callback_str(self):
+ """Test callbacks work for string options both long and short."""
+ cb_calls = []
+ def cb(option, name, value, parser):
+ cb_calls.append((option,name,value,parser))
+ options = [option.Option('hello', type=str, custom_callback=cb,
+ short_name='h')]
+ opts, args = self.parse(options, ['--hello', 'world', '-h', 'mars'])
+ self.assertEqual(2, len(cb_calls))
+ opt,name,value,parser = cb_calls[0]
+ self.assertEqual('hello', name)
+ self.assertEqual('world', value)
+ opt,name,value,parser = cb_calls[1]
+ self.assertEqual('hello', name)
+ self.assertEqual('mars', value)
+
+
+class TestListOptions(TestCase):
+ """Tests for ListOption, used to specify lists on the command-line."""
+
+ def parse(self, options, args):
+ parser = option.get_optparser(dict((o.name, o) for o in options))
+ return parser.parse_args(args)
+
+ def test_list_option(self):
+ options = [option.ListOption('hello', type=str)]
+ opts, args = self.parse(options, ['--hello=world', '--hello=sailor'])
+ self.assertEqual(['world', 'sailor'], opts.hello)
+
+ def test_list_option_with_dash(self):
+ options = [option.ListOption('with-dash', type=str)]
+ opts, args = self.parse(options, ['--with-dash=world',
+ '--with-dash=sailor'])
+ self.assertEqual(['world', 'sailor'], opts.with_dash)
+
+ def test_list_option_no_arguments(self):
+ options = [option.ListOption('hello', type=str)]
+ opts, args = self.parse(options, [])
+ self.assertEqual([], opts.hello)
+
+ def test_list_option_with_int_type(self):
+ options = [option.ListOption('hello', type=int)]
+ opts, args = self.parse(options, ['--hello=2', '--hello=3'])
+ self.assertEqual([2, 3], opts.hello)
+
+ def test_list_option_with_int_type_can_be_reset(self):
+ options = [option.ListOption('hello', type=int)]
+ opts, args = self.parse(options, ['--hello=2', '--hello=3',
+ '--hello=-', '--hello=5'])
+ self.assertEqual([5], opts.hello)
+
+ def test_list_option_can_be_reset(self):
+ """Passing an option of '-' to a list option should reset the list."""
+ options = [option.ListOption('hello', type=str)]
+ opts, args = self.parse(
+ options, ['--hello=a', '--hello=b', '--hello=-', '--hello=c'])
+ self.assertEqual(['c'], opts.hello)
+
+ def test_option_callback_list(self):
+ """Test callbacks work for list options."""
+ cb_calls = []
+ def cb(option, name, value, parser):
+ # Note that the value is a reference so copy to keep it
+ cb_calls.append((option,name,value[:],parser))
+ options = [option.ListOption('hello', type=str, custom_callback=cb)]
+ opts, args = self.parse(options, ['--hello=world', '--hello=mars',
+ '--hello=-'])
+ self.assertEqual(3, len(cb_calls))
+ opt,name,value,parser = cb_calls[0]
+ self.assertEqual('hello', name)
+ self.assertEqual(['world'], value)
+ opt,name,value,parser = cb_calls[1]
+ self.assertEqual('hello', name)
+ self.assertEqual(['world', 'mars'], value)
+ opt,name,value,parser = cb_calls[2]
+ self.assertEqual('hello', name)
+ self.assertEqual([], value)
+
+ def test_list_option_param_name(self):
+ """Test list options can have their param_name set."""
+ options = [option.ListOption('hello', type=str, param_name='greeting')]
+ opts, args = self.parse(
+ options, ['--hello=world', '--hello=sailor'])
+ self.assertEqual(['world', 'sailor'], opts.greeting)
+
+
+class TestOptionDefinitions(TestCase):
+ """Tests for options in the Bazaar codebase."""
+
+ def get_builtin_command_options(self):
+ g = []
+ commands.install_bzr_command_hooks()
+ for cmd_name in sorted(commands.builtin_command_names()):
+ cmd = commands.get_cmd_object(cmd_name)
+ for opt_name, opt in sorted(cmd.options().items()):
+ g.append((cmd_name, opt))
+ self.assert_(g)
+ return g
+
+ def test_option_grammar(self):
+ msgs = []
+ # Option help should be written in sentence form, and have a final
+ # period with an optional bracketed suffix. All the text should be on
+ # one line, because the display code will wrap it.
+ option_re = re.compile(r'^[A-Z][^\n]+\.(?: \([^\n]+\))?$')
+ for scope, opt in self.get_builtin_command_options():
+ for name, _, _, helptxt in opt.iter_switches():
+ if name != opt.name:
+ name = "/".join([opt.name, name])
+ if not helptxt:
+ msgs.append('%-16s %-16s %s' %
+ ((scope or 'GLOBAL'), name, 'NO HELP'))
+ elif not option_re.match(helptxt):
+ if name.startswith("format/"):
+ # Don't complain about the odd format registry help
+ continue
+ msgs.append('%-16s %-16s %s' %
+ ((scope or 'GLOBAL'), name, helptxt))
+ if msgs:
+ self.fail("The following options don't match the style guide:\n"
+ + '\n'.join(msgs))
+
+
+class TestOptionMisc(TestCase):
+
+ def test_is_hidden(self):
+ registry = controldir.ControlDirFormatRegistry()
+ bzrdir.register_metadir(registry, 'hidden', 'HiddenFormat',
+ 'hidden help text', hidden=True)
+ bzrdir.register_metadir(registry, 'visible', 'VisibleFormat',
+ 'visible help text', hidden=False)
+ format = option.RegistryOption('format', '', registry, str)
+ self.assertTrue(format.is_hidden('hidden'))
+ self.assertFalse(format.is_hidden('visible'))
+
+ def test_short_name(self):
+ registry = controldir.ControlDirFormatRegistry()
+ opt = option.RegistryOption('format', help='', registry=registry)
+ self.assertEquals(None, opt.short_name())
+ opt = option.RegistryOption('format', short_name='F', help='',
+ registry=registry)
+ self.assertEquals('F', opt.short_name())
+
+ def test_option_custom_help(self):
+ the_opt = option.Option.OPTIONS['help']
+ orig_help = the_opt.help[:]
+ my_opt = option.custom_help('help', 'suggest lottery numbers')
+ # Confirm that my_opt has my help and the original is unchanged
+ self.assertEqual('suggest lottery numbers', my_opt.help)
+ self.assertEqual(orig_help, the_opt.help)
+
+ def test_short_value_switches(self):
+ reg = registry.Registry()
+ reg.register('short', 'ShortChoice')
+ reg.register('long', 'LongChoice')
+ ropt = option.RegistryOption('choice', '', reg, value_switches=True,
+ short_value_switches={'short': 's'})
+ opts, args = parse([ropt], ['--short'])
+ self.assertEqual('ShortChoice', opts.choice)
+ opts, args = parse([ropt], ['-s'])
+ self.assertEqual('ShortChoice', opts.choice)
+
+
+class TestVerboseQuietLinkage(TestCase):
+
+ def check(self, parser, level, args):
+ option._verbosity_level = 0
+ opts, args = parser.parse_args(args)
+ self.assertEqual(level, option._verbosity_level)
+
+ def test_verbose_quiet_linkage(self):
+ parser = option.get_optparser(option.Option.STD_OPTIONS)
+ self.check(parser, 0, [])
+ self.check(parser, 1, ['-v'])
+ self.check(parser, 2, ['-v', '-v'])
+ self.check(parser, -1, ['-q'])
+ self.check(parser, -2, ['-qq'])
+ self.check(parser, -1, ['-v', '-v', '-q'])
+ self.check(parser, 2, ['-q', '-v', '-v'])
+ self.check(parser, 0, ['--no-verbose'])
+ self.check(parser, 0, ['-v', '-q', '--no-quiet'])
diff --git a/bzrlib/tests/test_osutils.py b/bzrlib/tests/test_osutils.py
new file mode 100644
index 0000000..f31508b
--- /dev/null
+++ b/bzrlib/tests/test_osutils.py
@@ -0,0 +1,2277 @@
+# Copyright (C) 2005-2011 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""Tests for the osutils wrapper."""
+
+from cStringIO import StringIO
+import errno
+import os
+import re
+import select
+import socket
+import sys
+import time
+
+from bzrlib import (
+ errors,
+ lazy_regex,
+ osutils,
+ symbol_versioning,
+ tests,
+ trace,
+ win32utils,
+ )
+from bzrlib.tests import (
+ features,
+ file_utils,
+ test__walkdirs_win32,
+ )
+from bzrlib.tests.scenarios import load_tests_apply_scenarios
+
+
+class _UTF8DirReaderFeature(features.Feature):
+
+ def _probe(self):
+ try:
+ from bzrlib import _readdir_pyx
+ self.reader = _readdir_pyx.UTF8DirReader
+ return True
+ except ImportError:
+ return False
+
+ def feature_name(self):
+ return 'bzrlib._readdir_pyx'
+
+UTF8DirReaderFeature = features.ModuleAvailableFeature('bzrlib._readdir_pyx')
+
+term_ios_feature = features.ModuleAvailableFeature('termios')
+
+
+def _already_unicode(s):
+ return s
+
+
+def _utf8_to_unicode(s):
+ return s.decode('UTF-8')
+
+
+def dir_reader_scenarios():
+ # For each dir reader we define:
+
+ # - native_to_unicode: a function converting the native_abspath as returned
+ # by DirReader.read_dir to its unicode representation
+
+ # UnicodeDirReader is the fallback, it should be tested on all platforms.
+ scenarios = [('unicode',
+ dict(_dir_reader_class=osutils.UnicodeDirReader,
+ _native_to_unicode=_already_unicode))]
+ # Some DirReaders are platform specific and even there they may not be
+ # available.
+ if UTF8DirReaderFeature.available():
+ from bzrlib import _readdir_pyx
+ scenarios.append(('utf8',
+ dict(_dir_reader_class=_readdir_pyx.UTF8DirReader,
+ _native_to_unicode=_utf8_to_unicode)))
+
+ if test__walkdirs_win32.win32_readdir_feature.available():
+ try:
+ from bzrlib import _walkdirs_win32
+ scenarios.append(
+ ('win32',
+ dict(_dir_reader_class=_walkdirs_win32.Win32ReadDir,
+ _native_to_unicode=_already_unicode)))
+ except ImportError:
+ pass
+ return scenarios
+
+
+load_tests = load_tests_apply_scenarios
+
+
+class TestContainsWhitespace(tests.TestCase):
+
+ def test_contains_whitespace(self):
+ self.assertTrue(osutils.contains_whitespace(u' '))
+ self.assertTrue(osutils.contains_whitespace(u'hello there'))
+ self.assertTrue(osutils.contains_whitespace(u'hellothere\n'))
+ self.assertTrue(osutils.contains_whitespace(u'hello\nthere'))
+ self.assertTrue(osutils.contains_whitespace(u'hello\rthere'))
+ self.assertTrue(osutils.contains_whitespace(u'hello\tthere'))
+
+ # \xa0 is "Non-breaking-space" which on some python locales thinks it
+ # is whitespace, but we do not.
+ self.assertFalse(osutils.contains_whitespace(u''))
+ self.assertFalse(osutils.contains_whitespace(u'hellothere'))
+ self.assertFalse(osutils.contains_whitespace(u'hello\xa0there'))
+
+
+class TestRename(tests.TestCaseInTempDir):
+
+ def create_file(self, filename, content):
+ f = open(filename, 'wb')
+ try:
+ f.write(content)
+ finally:
+ f.close()
+
+ def _fancy_rename(self, a, b):
+ osutils.fancy_rename(a, b, rename_func=os.rename,
+ unlink_func=os.unlink)
+
+ def test_fancy_rename(self):
+ # This should work everywhere
+ self.create_file('a', 'something in a\n')
+ self._fancy_rename('a', 'b')
+ self.assertPathDoesNotExist('a')
+ self.assertPathExists('b')
+ self.check_file_contents('b', 'something in a\n')
+
+ self.create_file('a', 'new something in a\n')
+ self._fancy_rename('b', 'a')
+
+ self.check_file_contents('a', 'something in a\n')
+
+ def test_fancy_rename_fails_source_missing(self):
+ # An exception should be raised, and the target should be left in place
+ self.create_file('target', 'data in target\n')
+ self.assertRaises((IOError, OSError), self._fancy_rename,
+ 'missingsource', 'target')
+ self.assertPathExists('target')
+ self.check_file_contents('target', 'data in target\n')
+
+ def test_fancy_rename_fails_if_source_and_target_missing(self):
+ self.assertRaises((IOError, OSError), self._fancy_rename,
+ 'missingsource', 'missingtarget')
+
+ def test_rename(self):
+ # Rename should be semi-atomic on all platforms
+ self.create_file('a', 'something in a\n')
+ osutils.rename('a', 'b')
+ self.assertPathDoesNotExist('a')
+ self.assertPathExists('b')
+ self.check_file_contents('b', 'something in a\n')
+
+ self.create_file('a', 'new something in a\n')
+ osutils.rename('b', 'a')
+
+ self.check_file_contents('a', 'something in a\n')
+
+ # TODO: test fancy_rename using a MemoryTransport
+
+ def test_rename_change_case(self):
+ # on Windows we should be able to change filename case by rename
+ self.build_tree(['a', 'b/'])
+ osutils.rename('a', 'A')
+ osutils.rename('b', 'B')
+ # we can't use failUnlessExists on case-insensitive filesystem
+ # so try to check shape of the tree
+ shape = sorted(os.listdir('.'))
+ self.assertEquals(['A', 'B'], shape)
+
+ def test_rename_exception(self):
+ try:
+ osutils.rename('nonexistent_path', 'different_nonexistent_path')
+ except OSError, e:
+ self.assertEqual(e.old_filename, 'nonexistent_path')
+ self.assertEqual(e.new_filename, 'different_nonexistent_path')
+ self.assertTrue('nonexistent_path' in e.strerror)
+ self.assertTrue('different_nonexistent_path' in e.strerror)
+
+
+class TestRandChars(tests.TestCase):
+
+ def test_01_rand_chars_empty(self):
+ result = osutils.rand_chars(0)
+ self.assertEqual(result, '')
+
+ def test_02_rand_chars_100(self):
+ result = osutils.rand_chars(100)
+ self.assertEqual(len(result), 100)
+ self.assertEqual(type(result), str)
+ self.assertContainsRe(result, r'^[a-z0-9]{100}$')
+
+
+class TestIsInside(tests.TestCase):
+
+ def test_is_inside(self):
+ is_inside = osutils.is_inside
+ self.assertTrue(is_inside('src', 'src/foo.c'))
+ self.assertFalse(is_inside('src', 'srccontrol'))
+ self.assertTrue(is_inside('src', 'src/a/a/a/foo.c'))
+ self.assertTrue(is_inside('foo.c', 'foo.c'))
+ self.assertFalse(is_inside('foo.c', ''))
+ self.assertTrue(is_inside('', 'foo.c'))
+
+ def test_is_inside_any(self):
+ SRC_FOO_C = osutils.pathjoin('src', 'foo.c')
+ for dirs, fn in [(['src', 'doc'], SRC_FOO_C),
+ (['src'], SRC_FOO_C),
+ (['src'], 'src'),
+ ]:
+ self.assert_(osutils.is_inside_any(dirs, fn))
+ for dirs, fn in [(['src'], 'srccontrol'),
+ (['src'], 'srccontrol/foo')]:
+ self.assertFalse(osutils.is_inside_any(dirs, fn))
+
+ def test_is_inside_or_parent_of_any(self):
+ for dirs, fn in [(['src', 'doc'], 'src/foo.c'),
+ (['src'], 'src/foo.c'),
+ (['src/bar.c'], 'src'),
+ (['src/bar.c', 'bla/foo.c'], 'src'),
+ (['src'], 'src'),
+ ]:
+ self.assert_(osutils.is_inside_or_parent_of_any(dirs, fn))
+
+ for dirs, fn in [(['src'], 'srccontrol'),
+ (['srccontrol/foo.c'], 'src'),
+ (['src'], 'srccontrol/foo')]:
+ self.assertFalse(osutils.is_inside_or_parent_of_any(dirs, fn))
+
+
+class TestLstat(tests.TestCaseInTempDir):
+
+ def test_lstat_matches_fstat(self):
+ # On Windows, lstat and fstat don't always agree, primarily in the
+ # 'st_ino' and 'st_dev' fields. So we force them to be '0' in our
+ # custom implementation.
+ if sys.platform == 'win32':
+ # We only have special lstat/fstat if we have the extension.
+ # Without it, we may end up re-reading content when we don't have
+ # to, but otherwise it doesn't effect correctness.
+ self.requireFeature(test__walkdirs_win32.win32_readdir_feature)
+ f = open('test-file.txt', 'wb')
+ self.addCleanup(f.close)
+ f.write('some content\n')
+ f.flush()
+ self.assertEqualStat(osutils.fstat(f.fileno()),
+ osutils.lstat('test-file.txt'))
+
+
+class TestRmTree(tests.TestCaseInTempDir):
+
+ def test_rmtree(self):
+ # Check to remove tree with read-only files/dirs
+ os.mkdir('dir')
+ f = file('dir/file', 'w')
+ f.write('spam')
+ f.close()
+ # would like to also try making the directory readonly, but at the
+ # moment python shutil.rmtree doesn't handle that properly - it would
+ # need to chmod the directory before removing things inside it - deferred
+ # for now -- mbp 20060505
+ # osutils.make_readonly('dir')
+ osutils.make_readonly('dir/file')
+
+ osutils.rmtree('dir')
+
+ self.assertPathDoesNotExist('dir/file')
+ self.assertPathDoesNotExist('dir')
+
+
+class TestDeleteAny(tests.TestCaseInTempDir):
+
+ def test_delete_any_readonly(self):
+ # from <https://bugs.launchpad.net/bzr/+bug/218206>
+ self.build_tree(['d/', 'f'])
+ osutils.make_readonly('d')
+ osutils.make_readonly('f')
+
+ osutils.delete_any('f')
+ osutils.delete_any('d')
+
+
+class TestKind(tests.TestCaseInTempDir):
+
+ def test_file_kind(self):
+ self.build_tree(['file', 'dir/'])
+ self.assertEquals('file', osutils.file_kind('file'))
+ self.assertEquals('directory', osutils.file_kind('dir/'))
+ if osutils.has_symlinks():
+ os.symlink('symlink', 'symlink')
+ self.assertEquals('symlink', osutils.file_kind('symlink'))
+
+ # TODO: jam 20060529 Test a block device
+ try:
+ os.lstat('/dev/null')
+ except OSError, e:
+ if e.errno not in (errno.ENOENT,):
+ raise
+ else:
+ self.assertEquals('chardev', osutils.file_kind('/dev/null'))
+
+ mkfifo = getattr(os, 'mkfifo', None)
+ if mkfifo:
+ mkfifo('fifo')
+ try:
+ self.assertEquals('fifo', osutils.file_kind('fifo'))
+ finally:
+ os.remove('fifo')
+
+ AF_UNIX = getattr(socket, 'AF_UNIX', None)
+ if AF_UNIX:
+ s = socket.socket(AF_UNIX)
+ s.bind('socket')
+ try:
+ self.assertEquals('socket', osutils.file_kind('socket'))
+ finally:
+ os.remove('socket')
+
+ def test_kind_marker(self):
+ self.assertEqual("", osutils.kind_marker("file"))
+ self.assertEqual("/", osutils.kind_marker('directory'))
+ self.assertEqual("/", osutils.kind_marker(osutils._directory_kind))
+ self.assertEqual("@", osutils.kind_marker("symlink"))
+ self.assertEqual("+", osutils.kind_marker("tree-reference"))
+ self.assertEqual("", osutils.kind_marker("fifo"))
+ self.assertEqual("", osutils.kind_marker("socket"))
+ self.assertEqual("", osutils.kind_marker("unknown"))
+
+
+class TestUmask(tests.TestCaseInTempDir):
+
+ def test_get_umask(self):
+ if sys.platform == 'win32':
+ # umask always returns '0', no way to set it
+ self.assertEqual(0, osutils.get_umask())
+ return
+
+ orig_umask = osutils.get_umask()
+ self.addCleanup(os.umask, orig_umask)
+ os.umask(0222)
+ self.assertEqual(0222, osutils.get_umask())
+ os.umask(0022)
+ self.assertEqual(0022, osutils.get_umask())
+ os.umask(0002)
+ self.assertEqual(0002, osutils.get_umask())
+ os.umask(0027)
+ self.assertEqual(0027, osutils.get_umask())
+
+
+class TestDateTime(tests.TestCase):
+
+ def assertFormatedDelta(self, expected, seconds):
+ """Assert osutils.format_delta formats as expected"""
+ actual = osutils.format_delta(seconds)
+ self.assertEqual(expected, actual)
+
+ def test_format_delta(self):
+ self.assertFormatedDelta('0 seconds ago', 0)
+ self.assertFormatedDelta('1 second ago', 1)
+ self.assertFormatedDelta('10 seconds ago', 10)
+ self.assertFormatedDelta('59 seconds ago', 59)
+ self.assertFormatedDelta('89 seconds ago', 89)
+ self.assertFormatedDelta('1 minute, 30 seconds ago', 90)
+ self.assertFormatedDelta('3 minutes, 0 seconds ago', 180)
+ self.assertFormatedDelta('3 minutes, 1 second ago', 181)
+ self.assertFormatedDelta('10 minutes, 15 seconds ago', 615)
+ self.assertFormatedDelta('30 minutes, 59 seconds ago', 1859)
+ self.assertFormatedDelta('31 minutes, 0 seconds ago', 1860)
+ self.assertFormatedDelta('60 minutes, 0 seconds ago', 3600)
+ self.assertFormatedDelta('89 minutes, 59 seconds ago', 5399)
+ self.assertFormatedDelta('1 hour, 30 minutes ago', 5400)
+ self.assertFormatedDelta('2 hours, 30 minutes ago', 9017)
+ self.assertFormatedDelta('10 hours, 0 minutes ago', 36000)
+ self.assertFormatedDelta('24 hours, 0 minutes ago', 86400)
+ self.assertFormatedDelta('35 hours, 59 minutes ago', 129599)
+ self.assertFormatedDelta('36 hours, 0 minutes ago', 129600)
+ self.assertFormatedDelta('36 hours, 0 minutes ago', 129601)
+ self.assertFormatedDelta('36 hours, 1 minute ago', 129660)
+ self.assertFormatedDelta('36 hours, 1 minute ago', 129661)
+ self.assertFormatedDelta('84 hours, 10 minutes ago', 303002)
+
+ # We handle when time steps the wrong direction because computers
+ # don't have synchronized clocks.
+ self.assertFormatedDelta('84 hours, 10 minutes in the future', -303002)
+ self.assertFormatedDelta('1 second in the future', -1)
+ self.assertFormatedDelta('2 seconds in the future', -2)
+
+ def test_format_date(self):
+ self.assertRaises(errors.UnsupportedTimezoneFormat,
+ osutils.format_date, 0, timezone='foo')
+ self.assertIsInstance(osutils.format_date(0), str)
+ self.assertIsInstance(osutils.format_local_date(0), unicode)
+ # Testing for the actual value of the local weekday without
+ # duplicating the code from format_date is difficult.
+ # Instead blackbox.test_locale should check for localized
+ # dates once they do occur in output strings.
+
+ def test_format_date_with_offset_in_original_timezone(self):
+ self.assertEqual("Thu 1970-01-01 00:00:00 +0000",
+ osutils.format_date_with_offset_in_original_timezone(0))
+ self.assertEqual("Fri 1970-01-02 03:46:40 +0000",
+ osutils.format_date_with_offset_in_original_timezone(100000))
+ self.assertEqual("Fri 1970-01-02 05:46:40 +0200",
+ osutils.format_date_with_offset_in_original_timezone(100000, 7200))
+
+ def test_local_time_offset(self):
+ """Test that local_time_offset() returns a sane value."""
+ offset = osutils.local_time_offset()
+ self.assertTrue(isinstance(offset, int))
+ # Test that the offset is no more than a eighteen hours in
+ # either direction.
+ # Time zone handling is system specific, so it is difficult to
+ # do more specific tests, but a value outside of this range is
+ # probably wrong.
+ eighteen_hours = 18 * 3600
+ self.assertTrue(-eighteen_hours < offset < eighteen_hours)
+
+ def test_local_time_offset_with_timestamp(self):
+ """Test that local_time_offset() works with a timestamp."""
+ offset = osutils.local_time_offset(1000000000.1234567)
+ self.assertTrue(isinstance(offset, int))
+ eighteen_hours = 18 * 3600
+ self.assertTrue(-eighteen_hours < offset < eighteen_hours)
+
+
+class TestLinks(tests.TestCaseInTempDir):
+
+ def test_dereference_path(self):
+ self.requireFeature(features.SymlinkFeature)
+ cwd = osutils.realpath('.')
+ os.mkdir('bar')
+ bar_path = osutils.pathjoin(cwd, 'bar')
+ # Using './' to avoid bug #1213894 (first path component not
+ # dereferenced) in Python 2.4.1 and earlier
+ self.assertEqual(bar_path, osutils.realpath('./bar'))
+ os.symlink('bar', 'foo')
+ self.assertEqual(bar_path, osutils.realpath('./foo'))
+
+ # Does not dereference terminal symlinks
+ foo_path = osutils.pathjoin(cwd, 'foo')
+ self.assertEqual(foo_path, osutils.dereference_path('./foo'))
+
+ # Dereferences parent symlinks
+ os.mkdir('bar/baz')
+ baz_path = osutils.pathjoin(bar_path, 'baz')
+ self.assertEqual(baz_path, osutils.dereference_path('./foo/baz'))
+
+ # Dereferences parent symlinks that are the first path element
+ self.assertEqual(baz_path, osutils.dereference_path('foo/baz'))
+
+ # Dereferences parent symlinks in absolute paths
+ foo_baz_path = osutils.pathjoin(foo_path, 'baz')
+ self.assertEqual(baz_path, osutils.dereference_path(foo_baz_path))
+
+ def test_changing_access(self):
+ f = file('file', 'w')
+ f.write('monkey')
+ f.close()
+
+ # Make a file readonly
+ osutils.make_readonly('file')
+ mode = os.lstat('file').st_mode
+ self.assertEqual(mode, mode & 0777555)
+
+ # Make a file writable
+ osutils.make_writable('file')
+ mode = os.lstat('file').st_mode
+ self.assertEqual(mode, mode | 0200)
+
+ if osutils.has_symlinks():
+ # should not error when handed a symlink
+ os.symlink('nonexistent', 'dangling')
+ osutils.make_readonly('dangling')
+ osutils.make_writable('dangling')
+
+ def test_host_os_dereferences_symlinks(self):
+ osutils.host_os_dereferences_symlinks()
+
+
+class TestCanonicalRelPath(tests.TestCaseInTempDir):
+
+ _test_needs_features = [features.CaseInsCasePresFilenameFeature]
+
+ def test_canonical_relpath_simple(self):
+ f = file('MixedCaseName', 'w')
+ f.close()
+ actual = osutils.canonical_relpath(self.test_base_dir, 'mixedcasename')
+ self.assertEqual('work/MixedCaseName', actual)
+
+ def test_canonical_relpath_missing_tail(self):
+ os.mkdir('MixedCaseParent')
+ actual = osutils.canonical_relpath(self.test_base_dir,
+ 'mixedcaseparent/nochild')
+ self.assertEqual('work/MixedCaseParent/nochild', actual)
+
+
+class Test_CICPCanonicalRelpath(tests.TestCaseWithTransport):
+
+ def assertRelpath(self, expected, base, path):
+ actual = osutils._cicp_canonical_relpath(base, path)
+ self.assertEqual(expected, actual)
+
+ def test_simple(self):
+ self.build_tree(['MixedCaseName'])
+ base = osutils.realpath(self.get_transport('.').local_abspath('.'))
+ self.assertRelpath('MixedCaseName', base, 'mixedcAsename')
+
+ def test_subdir_missing_tail(self):
+ self.build_tree(['MixedCaseParent/', 'MixedCaseParent/a_child'])
+ base = osutils.realpath(self.get_transport('.').local_abspath('.'))
+ self.assertRelpath('MixedCaseParent/a_child', base,
+ 'MixedCaseParent/a_child')
+ self.assertRelpath('MixedCaseParent/a_child', base,
+ 'MixedCaseParent/A_Child')
+ self.assertRelpath('MixedCaseParent/not_child', base,
+ 'MixedCaseParent/not_child')
+
+ def test_at_root_slash(self):
+ # We can't test this on Windows, because it has a 'MIN_ABS_PATHLENGTH'
+ # check...
+ if osutils.MIN_ABS_PATHLENGTH > 1:
+ raise tests.TestSkipped('relpath requires %d chars'
+ % osutils.MIN_ABS_PATHLENGTH)
+ self.assertRelpath('foo', '/', '/foo')
+
+ def test_at_root_drive(self):
+ if sys.platform != 'win32':
+ raise tests.TestNotApplicable('we can only test drive-letter relative'
+ ' paths on Windows where we have drive'
+ ' letters.')
+ # see bug #322807
+ # The specific issue is that when at the root of a drive, 'abspath'
+ # returns "C:/" or just "/". However, the code assumes that abspath
+ # always returns something like "C:/foo" or "/foo" (no trailing slash).
+ self.assertRelpath('foo', 'C:/', 'C:/foo')
+ self.assertRelpath('foo', 'X:/', 'X:/foo')
+ self.assertRelpath('foo', 'X:/', 'X://foo')
+
+
+class TestPumpFile(tests.TestCase):
+ """Test pumpfile method."""
+
+ def setUp(self):
+ tests.TestCase.setUp(self)
+ # create a test datablock
+ self.block_size = 512
+ pattern = '0123456789ABCDEF'
+ self.test_data = pattern * (3 * self.block_size / len(pattern))
+ self.test_data_len = len(self.test_data)
+
+ def test_bracket_block_size(self):
+ """Read data in blocks with the requested read size bracketing the
+ block size."""
+ # make sure test data is larger than max read size
+ self.assertTrue(self.test_data_len > self.block_size)
+
+ from_file = file_utils.FakeReadFile(self.test_data)
+ to_file = StringIO()
+
+ # read (max / 2) bytes and verify read size wasn't affected
+ num_bytes_to_read = self.block_size / 2
+ osutils.pumpfile(from_file, to_file, num_bytes_to_read, self.block_size)
+ self.assertEqual(from_file.get_max_read_size(), num_bytes_to_read)
+ self.assertEqual(from_file.get_read_count(), 1)
+
+ # read (max) bytes and verify read size wasn't affected
+ num_bytes_to_read = self.block_size
+ from_file.reset_read_count()
+ osutils.pumpfile(from_file, to_file, num_bytes_to_read, self.block_size)
+ self.assertEqual(from_file.get_max_read_size(), num_bytes_to_read)
+ self.assertEqual(from_file.get_read_count(), 1)
+
+ # read (max + 1) bytes and verify read size was limited
+ num_bytes_to_read = self.block_size + 1
+ from_file.reset_read_count()
+ osutils.pumpfile(from_file, to_file, num_bytes_to_read, self.block_size)
+ self.assertEqual(from_file.get_max_read_size(), self.block_size)
+ self.assertEqual(from_file.get_read_count(), 2)
+
+ # finish reading the rest of the data
+ num_bytes_to_read = self.test_data_len - to_file.tell()
+ osutils.pumpfile(from_file, to_file, num_bytes_to_read, self.block_size)
+
+ # report error if the data wasn't equal (we only report the size due
+ # to the length of the data)
+ response_data = to_file.getvalue()
+ if response_data != self.test_data:
+ message = "Data not equal. Expected %d bytes, received %d."
+ self.fail(message % (len(response_data), self.test_data_len))
+
+ def test_specified_size(self):
+ """Request a transfer larger than the maximum block size and verify
+ that the maximum read doesn't exceed the block_size."""
+ # make sure test data is larger than max read size
+ self.assertTrue(self.test_data_len > self.block_size)
+
+ # retrieve data in blocks
+ from_file = file_utils.FakeReadFile(self.test_data)
+ to_file = StringIO()
+ osutils.pumpfile(from_file, to_file, self.test_data_len,
+ self.block_size)
+
+ # verify read size was equal to the maximum read size
+ self.assertTrue(from_file.get_max_read_size() > 0)
+ self.assertEqual(from_file.get_max_read_size(), self.block_size)
+ self.assertEqual(from_file.get_read_count(), 3)
+
+ # report error if the data wasn't equal (we only report the size due
+ # to the length of the data)
+ response_data = to_file.getvalue()
+ if response_data != self.test_data:
+ message = "Data not equal. Expected %d bytes, received %d."
+ self.fail(message % (len(response_data), self.test_data_len))
+
+ def test_to_eof(self):
+ """Read to end-of-file and verify that the reads are not larger than
+ the maximum read size."""
+ # make sure test data is larger than max read size
+ self.assertTrue(self.test_data_len > self.block_size)
+
+ # retrieve data to EOF
+ from_file = file_utils.FakeReadFile(self.test_data)
+ to_file = StringIO()
+ osutils.pumpfile(from_file, to_file, -1, self.block_size)
+
+ # verify read size was equal to the maximum read size
+ self.assertEqual(from_file.get_max_read_size(), self.block_size)
+ self.assertEqual(from_file.get_read_count(), 4)
+
+ # report error if the data wasn't equal (we only report the size due
+ # to the length of the data)
+ response_data = to_file.getvalue()
+ if response_data != self.test_data:
+ message = "Data not equal. Expected %d bytes, received %d."
+ self.fail(message % (len(response_data), self.test_data_len))
+
+ def test_defaults(self):
+ """Verifies that the default arguments will read to EOF -- this
+ test verifies that any existing usages of pumpfile will not be broken
+ with this new version."""
+ # retrieve data using default (old) pumpfile method
+ from_file = file_utils.FakeReadFile(self.test_data)
+ to_file = StringIO()
+ osutils.pumpfile(from_file, to_file)
+
+ # report error if the data wasn't equal (we only report the size due
+ # to the length of the data)
+ response_data = to_file.getvalue()
+ if response_data != self.test_data:
+ message = "Data not equal. Expected %d bytes, received %d."
+ self.fail(message % (len(response_data), self.test_data_len))
+
+ def test_report_activity(self):
+ activity = []
+ def log_activity(length, direction):
+ activity.append((length, direction))
+ from_file = StringIO(self.test_data)
+ to_file = StringIO()
+ osutils.pumpfile(from_file, to_file, buff_size=500,
+ report_activity=log_activity, direction='read')
+ self.assertEqual([(500, 'read'), (500, 'read'), (500, 'read'),
+ (36, 'read')], activity)
+
+ from_file = StringIO(self.test_data)
+ to_file = StringIO()
+ del activity[:]
+ osutils.pumpfile(from_file, to_file, buff_size=500,
+ report_activity=log_activity, direction='write')
+ self.assertEqual([(500, 'write'), (500, 'write'), (500, 'write'),
+ (36, 'write')], activity)
+
+ # And with a limited amount of data
+ from_file = StringIO(self.test_data)
+ to_file = StringIO()
+ del activity[:]
+ osutils.pumpfile(from_file, to_file, buff_size=500, read_length=1028,
+ report_activity=log_activity, direction='read')
+ self.assertEqual([(500, 'read'), (500, 'read'), (28, 'read')], activity)
+
+
+
+class TestPumpStringFile(tests.TestCase):
+
+ def test_empty(self):
+ output = StringIO()
+ osutils.pump_string_file("", output)
+ self.assertEqual("", output.getvalue())
+
+ def test_more_than_segment_size(self):
+ output = StringIO()
+ osutils.pump_string_file("123456789", output, 2)
+ self.assertEqual("123456789", output.getvalue())
+
+ def test_segment_size(self):
+ output = StringIO()
+ osutils.pump_string_file("12", output, 2)
+ self.assertEqual("12", output.getvalue())
+
+ def test_segment_size_multiple(self):
+ output = StringIO()
+ osutils.pump_string_file("1234", output, 2)
+ self.assertEqual("1234", output.getvalue())
+
+
+class TestRelpath(tests.TestCase):
+
+ def test_simple_relpath(self):
+ cwd = osutils.getcwd()
+ subdir = cwd + '/subdir'
+ self.assertEqual('subdir', osutils.relpath(cwd, subdir))
+
+ def test_deep_relpath(self):
+ cwd = osutils.getcwd()
+ subdir = cwd + '/sub/subsubdir'
+ self.assertEqual('sub/subsubdir', osutils.relpath(cwd, subdir))
+
+ def test_not_relative(self):
+ self.assertRaises(errors.PathNotChild,
+ osutils.relpath, 'C:/path', 'H:/path')
+ self.assertRaises(errors.PathNotChild,
+ osutils.relpath, 'C:/', 'H:/path')
+
+
+class TestSafeUnicode(tests.TestCase):
+
+ def test_from_ascii_string(self):
+ self.assertEqual(u'foobar', osutils.safe_unicode('foobar'))
+
+ def test_from_unicode_string_ascii_contents(self):
+ self.assertEqual(u'bargam', osutils.safe_unicode(u'bargam'))
+
+ def test_from_unicode_string_unicode_contents(self):
+ self.assertEqual(u'bargam\xae', osutils.safe_unicode(u'bargam\xae'))
+
+ def test_from_utf8_string(self):
+ self.assertEqual(u'foo\xae', osutils.safe_unicode('foo\xc2\xae'))
+
+ def test_bad_utf8_string(self):
+ self.assertRaises(errors.BzrBadParameterNotUnicode,
+ osutils.safe_unicode,
+ '\xbb\xbb')
+
+
+class TestSafeUtf8(tests.TestCase):
+
+ def test_from_ascii_string(self):
+ f = 'foobar'
+ self.assertEqual('foobar', osutils.safe_utf8(f))
+
+ def test_from_unicode_string_ascii_contents(self):
+ self.assertEqual('bargam', osutils.safe_utf8(u'bargam'))
+
+ def test_from_unicode_string_unicode_contents(self):
+ self.assertEqual('bargam\xc2\xae', osutils.safe_utf8(u'bargam\xae'))
+
+ def test_from_utf8_string(self):
+ self.assertEqual('foo\xc2\xae', osutils.safe_utf8('foo\xc2\xae'))
+
+ def test_bad_utf8_string(self):
+ self.assertRaises(errors.BzrBadParameterNotUnicode,
+ osutils.safe_utf8, '\xbb\xbb')
+
+
+class TestSafeRevisionId(tests.TestCase):
+
+ def test_from_ascii_string(self):
+ # this shouldn't give a warning because it's getting an ascii string
+ self.assertEqual('foobar', osutils.safe_revision_id('foobar'))
+
+ def test_from_unicode_string_ascii_contents(self):
+ self.assertEqual('bargam',
+ osutils.safe_revision_id(u'bargam', warn=False))
+
+ def test_from_unicode_deprecated(self):
+ self.assertEqual('bargam',
+ self.callDeprecated([osutils._revision_id_warning],
+ osutils.safe_revision_id, u'bargam'))
+
+ def test_from_unicode_string_unicode_contents(self):
+ self.assertEqual('bargam\xc2\xae',
+ osutils.safe_revision_id(u'bargam\xae', warn=False))
+
+ def test_from_utf8_string(self):
+ self.assertEqual('foo\xc2\xae',
+ osutils.safe_revision_id('foo\xc2\xae'))
+
+ def test_none(self):
+ """Currently, None is a valid revision_id"""
+ self.assertEqual(None, osutils.safe_revision_id(None))
+
+
+class TestSafeFileId(tests.TestCase):
+
+ def test_from_ascii_string(self):
+ self.assertEqual('foobar', osutils.safe_file_id('foobar'))
+
+ def test_from_unicode_string_ascii_contents(self):
+ self.assertEqual('bargam', osutils.safe_file_id(u'bargam', warn=False))
+
+ def test_from_unicode_deprecated(self):
+ self.assertEqual('bargam',
+ self.callDeprecated([osutils._file_id_warning],
+ osutils.safe_file_id, u'bargam'))
+
+ def test_from_unicode_string_unicode_contents(self):
+ self.assertEqual('bargam\xc2\xae',
+ osutils.safe_file_id(u'bargam\xae', warn=False))
+
+ def test_from_utf8_string(self):
+ self.assertEqual('foo\xc2\xae',
+ osutils.safe_file_id('foo\xc2\xae'))
+
+ def test_none(self):
+ """Currently, None is a valid revision_id"""
+ self.assertEqual(None, osutils.safe_file_id(None))
+
+
+class TestPosixFuncs(tests.TestCase):
+ """Test that the posix version of normpath returns an appropriate path
+ when used with 2 leading slashes."""
+
+ def test_normpath(self):
+ self.assertEqual('/etc/shadow', osutils._posix_normpath('/etc/shadow'))
+ self.assertEqual('/etc/shadow', osutils._posix_normpath('//etc/shadow'))
+ self.assertEqual('/etc/shadow', osutils._posix_normpath('///etc/shadow'))
+
+
+class TestWin32Funcs(tests.TestCase):
+ """Test that _win32 versions of os utilities return appropriate paths."""
+
+ def test_abspath(self):
+ self.assertEqual('C:/foo', osutils._win32_abspath('C:\\foo'))
+ self.assertEqual('C:/foo', osutils._win32_abspath('C:/foo'))
+ self.assertEqual('//HOST/path', osutils._win32_abspath(r'\\HOST\path'))
+ self.assertEqual('//HOST/path', osutils._win32_abspath('//HOST/path'))
+
+ def test_realpath(self):
+ self.assertEqual('C:/foo', osutils._win32_realpath('C:\\foo'))
+ self.assertEqual('C:/foo', osutils._win32_realpath('C:/foo'))
+
+ def test_pathjoin(self):
+ self.assertEqual('path/to/foo',
+ osutils._win32_pathjoin('path', 'to', 'foo'))
+ self.assertEqual('C:/foo',
+ osutils._win32_pathjoin('path\\to', 'C:\\foo'))
+ self.assertEqual('C:/foo',
+ osutils._win32_pathjoin('path/to', 'C:/foo'))
+ self.assertEqual('path/to/foo',
+ osutils._win32_pathjoin('path/to/', 'foo'))
+ self.assertEqual('/foo',
+ osutils._win32_pathjoin('C:/path/to/', '/foo'))
+ self.assertEqual('/foo',
+ osutils._win32_pathjoin('C:\\path\\to\\', '\\foo'))
+
+ def test_normpath(self):
+ self.assertEqual('path/to/foo',
+ osutils._win32_normpath(r'path\\from\..\to\.\foo'))
+ self.assertEqual('path/to/foo',
+ osutils._win32_normpath('path//from/../to/./foo'))
+
+ def test_getcwd(self):
+ cwd = osutils._win32_getcwd()
+ os_cwd = os.getcwdu()
+ self.assertEqual(os_cwd[1:].replace('\\', '/'), cwd[1:])
+ # win32 is inconsistent whether it returns lower or upper case
+ # and even if it was consistent the user might type the other
+ # so we force it to uppercase
+ # running python.exe under cmd.exe return capital C:\\
+ # running win32 python inside a cygwin shell returns lowercase
+ self.assertEqual(os_cwd[0].upper(), cwd[0])
+
+ def test_fixdrive(self):
+ self.assertEqual('H:/foo', osutils._win32_fixdrive('h:/foo'))
+ self.assertEqual('H:/foo', osutils._win32_fixdrive('H:/foo'))
+ self.assertEqual('C:\\foo', osutils._win32_fixdrive('c:\\foo'))
+
+ def test_win98_abspath(self):
+ # absolute path
+ self.assertEqual('C:/foo', osutils._win98_abspath('C:\\foo'))
+ self.assertEqual('C:/foo', osutils._win98_abspath('C:/foo'))
+ # UNC path
+ self.assertEqual('//HOST/path', osutils._win98_abspath(r'\\HOST\path'))
+ self.assertEqual('//HOST/path', osutils._win98_abspath('//HOST/path'))
+ # relative path
+ cwd = osutils.getcwd().rstrip('/')
+ drive = osutils.ntpath.splitdrive(cwd)[0]
+ self.assertEqual(cwd+'/path', osutils._win98_abspath('path'))
+ self.assertEqual(drive+'/path', osutils._win98_abspath('/path'))
+ # unicode path
+ u = u'\u1234'
+ self.assertEqual(cwd+'/'+u, osutils._win98_abspath(u))
+
+
+class TestWin32FuncsDirs(tests.TestCaseInTempDir):
+ """Test win32 functions that create files."""
+
+ def test_getcwd(self):
+ self.requireFeature(features.UnicodeFilenameFeature)
+ os.mkdir(u'mu-\xb5')
+ os.chdir(u'mu-\xb5')
+ # TODO: jam 20060427 This will probably fail on Mac OSX because
+ # it will change the normalization of B\xe5gfors
+ # Consider using a different unicode character, or make
+ # osutils.getcwd() renormalize the path.
+ self.assertEndsWith(osutils._win32_getcwd(), u'mu-\xb5')
+
+ def test_minimum_path_selection(self):
+ self.assertEqual(set(),
+ osutils.minimum_path_selection([]))
+ self.assertEqual(set(['a']),
+ osutils.minimum_path_selection(['a']))
+ self.assertEqual(set(['a', 'b']),
+ osutils.minimum_path_selection(['a', 'b']))
+ self.assertEqual(set(['a/', 'b']),
+ osutils.minimum_path_selection(['a/', 'b']))
+ self.assertEqual(set(['a/', 'b']),
+ osutils.minimum_path_selection(['a/c', 'a/', 'b']))
+ self.assertEqual(set(['a-b', 'a', 'a0b']),
+ osutils.minimum_path_selection(['a-b', 'a/b', 'a0b', 'a']))
+
+ def test_mkdtemp(self):
+ tmpdir = osutils._win32_mkdtemp(dir='.')
+ self.assertFalse('\\' in tmpdir)
+
+ def test_rename(self):
+ a = open('a', 'wb')
+ a.write('foo\n')
+ a.close()
+ b = open('b', 'wb')
+ b.write('baz\n')
+ b.close()
+
+ osutils._win32_rename('b', 'a')
+ self.assertPathExists('a')
+ self.assertPathDoesNotExist('b')
+ self.assertFileEqual('baz\n', 'a')
+
+ def test_rename_missing_file(self):
+ a = open('a', 'wb')
+ a.write('foo\n')
+ a.close()
+
+ try:
+ osutils._win32_rename('b', 'a')
+ except (IOError, OSError), e:
+ self.assertEqual(errno.ENOENT, e.errno)
+ self.assertFileEqual('foo\n', 'a')
+
+ def test_rename_missing_dir(self):
+ os.mkdir('a')
+ try:
+ osutils._win32_rename('b', 'a')
+ except (IOError, OSError), e:
+ self.assertEqual(errno.ENOENT, e.errno)
+
+ def test_rename_current_dir(self):
+ os.mkdir('a')
+ os.chdir('a')
+ # You can't rename the working directory
+ # doing rename non-existant . usually
+ # just raises ENOENT, since non-existant
+ # doesn't exist.
+ try:
+ osutils._win32_rename('b', '.')
+ except (IOError, OSError), e:
+ self.assertEqual(errno.ENOENT, e.errno)
+
+ def test_splitpath(self):
+ def check(expected, path):
+ self.assertEqual(expected, osutils.splitpath(path))
+
+ check(['a'], 'a')
+ check(['a', 'b'], 'a/b')
+ check(['a', 'b'], 'a/./b')
+ check(['a', '.b'], 'a/.b')
+ check(['a', '.b'], 'a\\.b')
+
+ self.assertRaises(errors.BzrError, osutils.splitpath, 'a/../b')
+
+
+class TestParentDirectories(tests.TestCaseInTempDir):
+ """Test osutils.parent_directories()"""
+
+ def test_parent_directories(self):
+ self.assertEqual([], osutils.parent_directories('a'))
+ self.assertEqual(['a'], osutils.parent_directories('a/b'))
+ self.assertEqual(['a/b', 'a'], osutils.parent_directories('a/b/c'))
+
+
+class TestMacFuncsDirs(tests.TestCaseInTempDir):
+ """Test mac special functions that require directories."""
+
+ def test_getcwd(self):
+ self.requireFeature(features.UnicodeFilenameFeature)
+ os.mkdir(u'B\xe5gfors')
+ os.chdir(u'B\xe5gfors')
+ self.assertEndsWith(osutils._mac_getcwd(), u'B\xe5gfors')
+
+ def test_getcwd_nonnorm(self):
+ self.requireFeature(features.UnicodeFilenameFeature)
+ # Test that _mac_getcwd() will normalize this path
+ os.mkdir(u'Ba\u030agfors')
+ os.chdir(u'Ba\u030agfors')
+ self.assertEndsWith(osutils._mac_getcwd(), u'B\xe5gfors')
+
+
+class TestChunksToLines(tests.TestCase):
+
+ def test_smoketest(self):
+ self.assertEqual(['foo\n', 'bar\n', 'baz\n'],
+ osutils.chunks_to_lines(['foo\nbar', '\nbaz\n']))
+ self.assertEqual(['foo\n', 'bar\n', 'baz\n'],
+ osutils.chunks_to_lines(['foo\n', 'bar\n', 'baz\n']))
+
+ def test_osutils_binding(self):
+ from bzrlib.tests import test__chunks_to_lines
+ if test__chunks_to_lines.compiled_chunkstolines_feature.available():
+ from bzrlib._chunks_to_lines_pyx import chunks_to_lines
+ else:
+ from bzrlib._chunks_to_lines_py import chunks_to_lines
+ self.assertIs(chunks_to_lines, osutils.chunks_to_lines)
+
+
+class TestSplitLines(tests.TestCase):
+
+ def test_split_unicode(self):
+ self.assertEqual([u'foo\n', u'bar\xae'],
+ osutils.split_lines(u'foo\nbar\xae'))
+ self.assertEqual([u'foo\n', u'bar\xae\n'],
+ osutils.split_lines(u'foo\nbar\xae\n'))
+
+ def test_split_with_carriage_returns(self):
+ self.assertEqual(['foo\rbar\n'],
+ osutils.split_lines('foo\rbar\n'))
+
+
+class TestWalkDirs(tests.TestCaseInTempDir):
+
+ def assertExpectedBlocks(self, expected, result):
+ self.assertEqual(expected,
+ [(dirinfo, [line[0:3] for line in block])
+ for dirinfo, block in result])
+
+ def test_walkdirs(self):
+ tree = [
+ '.bzr',
+ '0file',
+ '1dir/',
+ '1dir/0file',
+ '1dir/1dir/',
+ '2file'
+ ]
+ self.build_tree(tree)
+ expected_dirblocks = [
+ (('', '.'),
+ [('0file', '0file', 'file'),
+ ('1dir', '1dir', 'directory'),
+ ('2file', '2file', 'file'),
+ ]
+ ),
+ (('1dir', './1dir'),
+ [('1dir/0file', '0file', 'file'),
+ ('1dir/1dir', '1dir', 'directory'),
+ ]
+ ),
+ (('1dir/1dir', './1dir/1dir'),
+ [
+ ]
+ ),
+ ]
+ result = []
+ found_bzrdir = False
+ for dirdetail, dirblock in osutils.walkdirs('.'):
+ if len(dirblock) and dirblock[0][1] == '.bzr':
+ # this tests the filtering of selected paths
+ found_bzrdir = True
+ del dirblock[0]
+ result.append((dirdetail, dirblock))
+
+ self.assertTrue(found_bzrdir)
+ self.assertExpectedBlocks(expected_dirblocks, result)
+ # you can search a subdir only, with a supplied prefix.
+ result = []
+ for dirblock in osutils.walkdirs('./1dir', '1dir'):
+ result.append(dirblock)
+ self.assertExpectedBlocks(expected_dirblocks[1:], result)
+
+ def test_walkdirs_os_error(self):
+ # <https://bugs.launchpad.net/bzr/+bug/338653>
+ # Pyrex readdir didn't raise useful messages if it had an error
+ # reading the directory
+ if sys.platform == 'win32':
+ raise tests.TestNotApplicable(
+ "readdir IOError not tested on win32")
+ self.requireFeature(features.not_running_as_root)
+ os.mkdir("test-unreadable")
+ os.chmod("test-unreadable", 0000)
+ # must chmod it back so that it can be removed
+ self.addCleanup(os.chmod, "test-unreadable", 0700)
+ # The error is not raised until the generator is actually evaluated.
+ # (It would be ok if it happened earlier but at the moment it
+ # doesn't.)
+ e = self.assertRaises(OSError, list, osutils._walkdirs_utf8("."))
+ self.assertEquals('./test-unreadable', e.filename)
+ self.assertEquals(errno.EACCES, e.errno)
+ # Ensure the message contains the file name
+ self.assertContainsRe(str(e), "\./test-unreadable")
+
+
+ def test_walkdirs_encoding_error(self):
+ # <https://bugs.launchpad.net/bzr/+bug/488519>
+ # walkdirs didn't raise a useful message when the filenames
+ # are not using the filesystem's encoding
+
+ # require a bytestring based filesystem
+ self.requireFeature(features.ByteStringNamedFilesystem)
+
+ tree = [
+ '.bzr',
+ '0file',
+ '1dir/',
+ '1dir/0file',
+ '1dir/1dir/',
+ '1file'
+ ]
+
+ self.build_tree(tree)
+
+ # rename the 1file to a latin-1 filename
+ os.rename("./1file", "\xe8file")
+ if "\xe8file" not in os.listdir("."):
+ self.skip("Lack filesystem that preserves arbitrary bytes")
+
+ self._save_platform_info()
+ win32utils.winver = None # Avoid the win32 detection code
+ osutils._fs_enc = 'UTF-8'
+
+ # this should raise on error
+ def attempt():
+ for dirdetail, dirblock in osutils.walkdirs('.'):
+ pass
+
+ self.assertRaises(errors.BadFilenameEncoding, attempt)
+
+ def test__walkdirs_utf8(self):
+ tree = [
+ '.bzr',
+ '0file',
+ '1dir/',
+ '1dir/0file',
+ '1dir/1dir/',
+ '2file'
+ ]
+ self.build_tree(tree)
+ expected_dirblocks = [
+ (('', '.'),
+ [('0file', '0file', 'file'),
+ ('1dir', '1dir', 'directory'),
+ ('2file', '2file', 'file'),
+ ]
+ ),
+ (('1dir', './1dir'),
+ [('1dir/0file', '0file', 'file'),
+ ('1dir/1dir', '1dir', 'directory'),
+ ]
+ ),
+ (('1dir/1dir', './1dir/1dir'),
+ [
+ ]
+ ),
+ ]
+ result = []
+ found_bzrdir = False
+ for dirdetail, dirblock in osutils._walkdirs_utf8('.'):
+ if len(dirblock) and dirblock[0][1] == '.bzr':
+ # this tests the filtering of selected paths
+ found_bzrdir = True
+ del dirblock[0]
+ result.append((dirdetail, dirblock))
+
+ self.assertTrue(found_bzrdir)
+ self.assertExpectedBlocks(expected_dirblocks, result)
+
+ # you can search a subdir only, with a supplied prefix.
+ result = []
+ for dirblock in osutils.walkdirs('./1dir', '1dir'):
+ result.append(dirblock)
+ self.assertExpectedBlocks(expected_dirblocks[1:], result)
+
+ def _filter_out_stat(self, result):
+ """Filter out the stat value from the walkdirs result"""
+ for dirdetail, dirblock in result:
+ new_dirblock = []
+ for info in dirblock:
+ # Ignore info[3] which is the stat
+ new_dirblock.append((info[0], info[1], info[2], info[4]))
+ dirblock[:] = new_dirblock
+
+ def _save_platform_info(self):
+ self.overrideAttr(win32utils, 'winver')
+ self.overrideAttr(osutils, '_fs_enc')
+ self.overrideAttr(osutils, '_selected_dir_reader')
+
+ def assertDirReaderIs(self, expected):
+ """Assert the right implementation for _walkdirs_utf8 is chosen."""
+ # Force it to redetect
+ osutils._selected_dir_reader = None
+ # Nothing to list, but should still trigger the selection logic
+ self.assertEqual([(('', '.'), [])], list(osutils._walkdirs_utf8('.')))
+ self.assertIsInstance(osutils._selected_dir_reader, expected)
+
+ def test_force_walkdirs_utf8_fs_utf8(self):
+ self.requireFeature(UTF8DirReaderFeature)
+ self._save_platform_info()
+ win32utils.winver = None # Avoid the win32 detection code
+ osutils._fs_enc = 'utf-8'
+ self.assertDirReaderIs(
+ UTF8DirReaderFeature.module.UTF8DirReader)
+
+ def test_force_walkdirs_utf8_fs_ascii(self):
+ self.requireFeature(UTF8DirReaderFeature)
+ self._save_platform_info()
+ win32utils.winver = None # Avoid the win32 detection code
+ osutils._fs_enc = 'ascii'
+ self.assertDirReaderIs(
+ UTF8DirReaderFeature.module.UTF8DirReader)
+
+ def test_force_walkdirs_utf8_fs_latin1(self):
+ self._save_platform_info()
+ win32utils.winver = None # Avoid the win32 detection code
+ osutils._fs_enc = 'iso-8859-1'
+ self.assertDirReaderIs(osutils.UnicodeDirReader)
+
+ def test_force_walkdirs_utf8_nt(self):
+ # Disabled because the thunk of the whole walkdirs api is disabled.
+ self.requireFeature(test__walkdirs_win32.win32_readdir_feature)
+ self._save_platform_info()
+ win32utils.winver = 'Windows NT'
+ from bzrlib._walkdirs_win32 import Win32ReadDir
+ self.assertDirReaderIs(Win32ReadDir)
+
+ def test_force_walkdirs_utf8_98(self):
+ self.requireFeature(test__walkdirs_win32.win32_readdir_feature)
+ self._save_platform_info()
+ win32utils.winver = 'Windows 98'
+ self.assertDirReaderIs(osutils.UnicodeDirReader)
+
+ def test_unicode_walkdirs(self):
+ """Walkdirs should always return unicode paths."""
+ self.requireFeature(features.UnicodeFilenameFeature)
+ name0 = u'0file-\xb6'
+ name1 = u'1dir-\u062c\u0648'
+ name2 = u'2file-\u0633'
+ tree = [
+ name0,
+ name1 + '/',
+ name1 + '/' + name0,
+ name1 + '/' + name1 + '/',
+ name2,
+ ]
+ self.build_tree(tree)
+ expected_dirblocks = [
+ ((u'', u'.'),
+ [(name0, name0, 'file', './' + name0),
+ (name1, name1, 'directory', './' + name1),
+ (name2, name2, 'file', './' + name2),
+ ]
+ ),
+ ((name1, './' + name1),
+ [(name1 + '/' + name0, name0, 'file', './' + name1
+ + '/' + name0),
+ (name1 + '/' + name1, name1, 'directory', './' + name1
+ + '/' + name1),
+ ]
+ ),
+ ((name1 + '/' + name1, './' + name1 + '/' + name1),
+ [
+ ]
+ ),
+ ]
+ result = list(osutils.walkdirs('.'))
+ self._filter_out_stat(result)
+ self.assertEqual(expected_dirblocks, result)
+ result = list(osutils.walkdirs(u'./'+name1, name1))
+ self._filter_out_stat(result)
+ self.assertEqual(expected_dirblocks[1:], result)
+
+ def test_unicode__walkdirs_utf8(self):
+ """Walkdirs_utf8 should always return utf8 paths.
+
+ The abspath portion might be in unicode or utf-8
+ """
+ self.requireFeature(features.UnicodeFilenameFeature)
+ name0 = u'0file-\xb6'
+ name1 = u'1dir-\u062c\u0648'
+ name2 = u'2file-\u0633'
+ tree = [
+ name0,
+ name1 + '/',
+ name1 + '/' + name0,
+ name1 + '/' + name1 + '/',
+ name2,
+ ]
+ self.build_tree(tree)
+ name0 = name0.encode('utf8')
+ name1 = name1.encode('utf8')
+ name2 = name2.encode('utf8')
+
+ expected_dirblocks = [
+ (('', '.'),
+ [(name0, name0, 'file', './' + name0),
+ (name1, name1, 'directory', './' + name1),
+ (name2, name2, 'file', './' + name2),
+ ]
+ ),
+ ((name1, './' + name1),
+ [(name1 + '/' + name0, name0, 'file', './' + name1
+ + '/' + name0),
+ (name1 + '/' + name1, name1, 'directory', './' + name1
+ + '/' + name1),
+ ]
+ ),
+ ((name1 + '/' + name1, './' + name1 + '/' + name1),
+ [
+ ]
+ ),
+ ]
+ result = []
+ # For ease in testing, if walkdirs_utf8 returns Unicode, assert that
+ # all abspaths are Unicode, and encode them back into utf8.
+ for dirdetail, dirblock in osutils._walkdirs_utf8('.'):
+ self.assertIsInstance(dirdetail[0], str)
+ if isinstance(dirdetail[1], unicode):
+ dirdetail = (dirdetail[0], dirdetail[1].encode('utf8'))
+ dirblock = [list(info) for info in dirblock]
+ for info in dirblock:
+ self.assertIsInstance(info[4], unicode)
+ info[4] = info[4].encode('utf8')
+ new_dirblock = []
+ for info in dirblock:
+ self.assertIsInstance(info[0], str)
+ self.assertIsInstance(info[1], str)
+ self.assertIsInstance(info[4], str)
+ # Remove the stat information
+ new_dirblock.append((info[0], info[1], info[2], info[4]))
+ result.append((dirdetail, new_dirblock))
+ self.assertEqual(expected_dirblocks, result)
+
+ def test__walkdirs_utf8_with_unicode_fs(self):
+ """UnicodeDirReader should be a safe fallback everywhere
+
+ The abspath portion should be in unicode
+ """
+ self.requireFeature(features.UnicodeFilenameFeature)
+ # Use the unicode reader. TODO: split into driver-and-driven unit
+ # tests.
+ self._save_platform_info()
+ osutils._selected_dir_reader = osutils.UnicodeDirReader()
+ name0u = u'0file-\xb6'
+ name1u = u'1dir-\u062c\u0648'
+ name2u = u'2file-\u0633'
+ tree = [
+ name0u,
+ name1u + '/',
+ name1u + '/' + name0u,
+ name1u + '/' + name1u + '/',
+ name2u,
+ ]
+ self.build_tree(tree)
+ name0 = name0u.encode('utf8')
+ name1 = name1u.encode('utf8')
+ name2 = name2u.encode('utf8')
+
+ # All of the abspaths should be in unicode, all of the relative paths
+ # should be in utf8
+ expected_dirblocks = [
+ (('', '.'),
+ [(name0, name0, 'file', './' + name0u),
+ (name1, name1, 'directory', './' + name1u),
+ (name2, name2, 'file', './' + name2u),
+ ]
+ ),
+ ((name1, './' + name1u),
+ [(name1 + '/' + name0, name0, 'file', './' + name1u
+ + '/' + name0u),
+ (name1 + '/' + name1, name1, 'directory', './' + name1u
+ + '/' + name1u),
+ ]
+ ),
+ ((name1 + '/' + name1, './' + name1u + '/' + name1u),
+ [
+ ]
+ ),
+ ]
+ result = list(osutils._walkdirs_utf8('.'))
+ self._filter_out_stat(result)
+ self.assertEqual(expected_dirblocks, result)
+
+ def test__walkdirs_utf8_win32readdir(self):
+ self.requireFeature(test__walkdirs_win32.win32_readdir_feature)
+ self.requireFeature(features.UnicodeFilenameFeature)
+ from bzrlib._walkdirs_win32 import Win32ReadDir
+ self._save_platform_info()
+ osutils._selected_dir_reader = Win32ReadDir()
+ name0u = u'0file-\xb6'
+ name1u = u'1dir-\u062c\u0648'
+ name2u = u'2file-\u0633'
+ tree = [
+ name0u,
+ name1u + '/',
+ name1u + '/' + name0u,
+ name1u + '/' + name1u + '/',
+ name2u,
+ ]
+ self.build_tree(tree)
+ name0 = name0u.encode('utf8')
+ name1 = name1u.encode('utf8')
+ name2 = name2u.encode('utf8')
+
+ # All of the abspaths should be in unicode, all of the relative paths
+ # should be in utf8
+ expected_dirblocks = [
+ (('', '.'),
+ [(name0, name0, 'file', './' + name0u),
+ (name1, name1, 'directory', './' + name1u),
+ (name2, name2, 'file', './' + name2u),
+ ]
+ ),
+ ((name1, './' + name1u),
+ [(name1 + '/' + name0, name0, 'file', './' + name1u
+ + '/' + name0u),
+ (name1 + '/' + name1, name1, 'directory', './' + name1u
+ + '/' + name1u),
+ ]
+ ),
+ ((name1 + '/' + name1, './' + name1u + '/' + name1u),
+ [
+ ]
+ ),
+ ]
+ result = list(osutils._walkdirs_utf8(u'.'))
+ self._filter_out_stat(result)
+ self.assertEqual(expected_dirblocks, result)
+
+ def assertStatIsCorrect(self, path, win32stat):
+ os_stat = os.stat(path)
+ self.assertEqual(os_stat.st_size, win32stat.st_size)
+ self.assertAlmostEqual(os_stat.st_mtime, win32stat.st_mtime, places=4)
+ self.assertAlmostEqual(os_stat.st_ctime, win32stat.st_ctime, places=4)
+ self.assertAlmostEqual(os_stat.st_atime, win32stat.st_atime, places=4)
+ self.assertEqual(os_stat.st_dev, win32stat.st_dev)
+ self.assertEqual(os_stat.st_ino, win32stat.st_ino)
+ self.assertEqual(os_stat.st_mode, win32stat.st_mode)
+
+ def test__walkdirs_utf_win32_find_file_stat_file(self):
+ """make sure our Stat values are valid"""
+ self.requireFeature(test__walkdirs_win32.win32_readdir_feature)
+ self.requireFeature(features.UnicodeFilenameFeature)
+ from bzrlib._walkdirs_win32 import Win32ReadDir
+ name0u = u'0file-\xb6'
+ name0 = name0u.encode('utf8')
+ self.build_tree([name0u])
+ # I hate to sleep() here, but I'm trying to make the ctime different
+ # from the mtime
+ time.sleep(2)
+ f = open(name0u, 'ab')
+ try:
+ f.write('just a small update')
+ finally:
+ f.close()
+
+ result = Win32ReadDir().read_dir('', u'.')
+ entry = result[0]
+ self.assertEqual((name0, name0, 'file'), entry[:3])
+ self.assertEqual(u'./' + name0u, entry[4])
+ self.assertStatIsCorrect(entry[4], entry[3])
+ self.assertNotEqual(entry[3].st_mtime, entry[3].st_ctime)
+
+ def test__walkdirs_utf_win32_find_file_stat_directory(self):
+ """make sure our Stat values are valid"""
+ self.requireFeature(test__walkdirs_win32.win32_readdir_feature)
+ self.requireFeature(features.UnicodeFilenameFeature)
+ from bzrlib._walkdirs_win32 import Win32ReadDir
+ name0u = u'0dir-\u062c\u0648'
+ name0 = name0u.encode('utf8')
+ self.build_tree([name0u + '/'])
+
+ result = Win32ReadDir().read_dir('', u'.')
+ entry = result[0]
+ self.assertEqual((name0, name0, 'directory'), entry[:3])
+ self.assertEqual(u'./' + name0u, entry[4])
+ self.assertStatIsCorrect(entry[4], entry[3])
+
+ def assertPathCompare(self, path_less, path_greater):
+ """check that path_less and path_greater compare correctly."""
+ self.assertEqual(0, osutils.compare_paths_prefix_order(
+ path_less, path_less))
+ self.assertEqual(0, osutils.compare_paths_prefix_order(
+ path_greater, path_greater))
+ self.assertEqual(-1, osutils.compare_paths_prefix_order(
+ path_less, path_greater))
+ self.assertEqual(1, osutils.compare_paths_prefix_order(
+ path_greater, path_less))
+
+ def test_compare_paths_prefix_order(self):
+ # root before all else
+ self.assertPathCompare("/", "/a")
+ # alpha within a dir
+ self.assertPathCompare("/a", "/b")
+ self.assertPathCompare("/b", "/z")
+ # high dirs before lower.
+ self.assertPathCompare("/z", "/a/a")
+ # except if the deeper dir should be output first
+ self.assertPathCompare("/a/b/c", "/d/g")
+ # lexical betwen dirs of the same height
+ self.assertPathCompare("/a/z", "/z/z")
+ self.assertPathCompare("/a/c/z", "/a/d/e")
+
+ # this should also be consistent for no leading / paths
+ # root before all else
+ self.assertPathCompare("", "a")
+ # alpha within a dir
+ self.assertPathCompare("a", "b")
+ self.assertPathCompare("b", "z")
+ # high dirs before lower.
+ self.assertPathCompare("z", "a/a")
+ # except if the deeper dir should be output first
+ self.assertPathCompare("a/b/c", "d/g")
+ # lexical betwen dirs of the same height
+ self.assertPathCompare("a/z", "z/z")
+ self.assertPathCompare("a/c/z", "a/d/e")
+
+ def test_path_prefix_sorting(self):
+ """Doing a sort on path prefix should match our sample data."""
+ original_paths = [
+ 'a',
+ 'a/b',
+ 'a/b/c',
+ 'b',
+ 'b/c',
+ 'd',
+ 'd/e',
+ 'd/e/f',
+ 'd/f',
+ 'd/g',
+ 'g',
+ ]
+
+ dir_sorted_paths = [
+ 'a',
+ 'b',
+ 'd',
+ 'g',
+ 'a/b',
+ 'a/b/c',
+ 'b/c',
+ 'd/e',
+ 'd/f',
+ 'd/g',
+ 'd/e/f',
+ ]
+
+ self.assertEqual(
+ dir_sorted_paths,
+ sorted(original_paths, key=osutils.path_prefix_key))
+ # using the comparison routine shoudl work too:
+ self.assertEqual(
+ dir_sorted_paths,
+ sorted(original_paths, cmp=osutils.compare_paths_prefix_order))
+
+
+class TestCopyTree(tests.TestCaseInTempDir):
+
+ def test_copy_basic_tree(self):
+ self.build_tree(['source/', 'source/a', 'source/b/', 'source/b/c'])
+ osutils.copy_tree('source', 'target')
+ self.assertEqual(['a', 'b'], sorted(os.listdir('target')))
+ self.assertEqual(['c'], os.listdir('target/b'))
+
+ def test_copy_tree_target_exists(self):
+ self.build_tree(['source/', 'source/a', 'source/b/', 'source/b/c',
+ 'target/'])
+ osutils.copy_tree('source', 'target')
+ self.assertEqual(['a', 'b'], sorted(os.listdir('target')))
+ self.assertEqual(['c'], os.listdir('target/b'))
+
+ def test_copy_tree_symlinks(self):
+ self.requireFeature(features.SymlinkFeature)
+ self.build_tree(['source/'])
+ os.symlink('a/generic/path', 'source/lnk')
+ osutils.copy_tree('source', 'target')
+ self.assertEqual(['lnk'], os.listdir('target'))
+ self.assertEqual('a/generic/path', os.readlink('target/lnk'))
+
+ def test_copy_tree_handlers(self):
+ processed_files = []
+ processed_links = []
+ def file_handler(from_path, to_path):
+ processed_files.append(('f', from_path, to_path))
+ def dir_handler(from_path, to_path):
+ processed_files.append(('d', from_path, to_path))
+ def link_handler(from_path, to_path):
+ processed_links.append((from_path, to_path))
+ handlers = {'file':file_handler,
+ 'directory':dir_handler,
+ 'symlink':link_handler,
+ }
+
+ self.build_tree(['source/', 'source/a', 'source/b/', 'source/b/c'])
+ if osutils.has_symlinks():
+ os.symlink('a/generic/path', 'source/lnk')
+ osutils.copy_tree('source', 'target', handlers=handlers)
+
+ self.assertEqual([('d', 'source', 'target'),
+ ('f', 'source/a', 'target/a'),
+ ('d', 'source/b', 'target/b'),
+ ('f', 'source/b/c', 'target/b/c'),
+ ], processed_files)
+ self.assertPathDoesNotExist('target')
+ if osutils.has_symlinks():
+ self.assertEqual([('source/lnk', 'target/lnk')], processed_links)
+
+
+class TestSetUnsetEnv(tests.TestCase):
+ """Test updating the environment"""
+
+ def setUp(self):
+ super(TestSetUnsetEnv, self).setUp()
+
+ self.assertEqual(None, os.environ.get('BZR_TEST_ENV_VAR'),
+ 'Environment was not cleaned up properly.'
+ ' Variable BZR_TEST_ENV_VAR should not exist.')
+ def cleanup():
+ if 'BZR_TEST_ENV_VAR' in os.environ:
+ del os.environ['BZR_TEST_ENV_VAR']
+ self.addCleanup(cleanup)
+
+ def test_set(self):
+ """Test that we can set an env variable"""
+ old = osutils.set_or_unset_env('BZR_TEST_ENV_VAR', 'foo')
+ self.assertEqual(None, old)
+ self.assertEqual('foo', os.environ.get('BZR_TEST_ENV_VAR'))
+
+ def test_double_set(self):
+ """Test that we get the old value out"""
+ osutils.set_or_unset_env('BZR_TEST_ENV_VAR', 'foo')
+ old = osutils.set_or_unset_env('BZR_TEST_ENV_VAR', 'bar')
+ self.assertEqual('foo', old)
+ self.assertEqual('bar', os.environ.get('BZR_TEST_ENV_VAR'))
+
+ def test_unicode(self):
+ """Environment can only contain plain strings
+
+ So Unicode strings must be encoded.
+ """
+ uni_val, env_val = tests.probe_unicode_in_user_encoding()
+ if uni_val is None:
+ raise tests.TestSkipped(
+ 'Cannot find a unicode character that works in encoding %s'
+ % (osutils.get_user_encoding(),))
+
+ old = osutils.set_or_unset_env('BZR_TEST_ENV_VAR', uni_val)
+ self.assertEqual(env_val, os.environ.get('BZR_TEST_ENV_VAR'))
+
+ def test_unset(self):
+ """Test that passing None will remove the env var"""
+ osutils.set_or_unset_env('BZR_TEST_ENV_VAR', 'foo')
+ old = osutils.set_or_unset_env('BZR_TEST_ENV_VAR', None)
+ self.assertEqual('foo', old)
+ self.assertEqual(None, os.environ.get('BZR_TEST_ENV_VAR'))
+ self.assertFalse('BZR_TEST_ENV_VAR' in os.environ)
+
+
+class TestSizeShaFile(tests.TestCaseInTempDir):
+
+ def test_sha_empty(self):
+ self.build_tree_contents([('foo', '')])
+ expected_sha = osutils.sha_string('')
+ f = open('foo')
+ self.addCleanup(f.close)
+ size, sha = osutils.size_sha_file(f)
+ self.assertEqual(0, size)
+ self.assertEqual(expected_sha, sha)
+
+ def test_sha_mixed_endings(self):
+ text = 'test\r\nwith\nall\rpossible line endings\r\n'
+ self.build_tree_contents([('foo', text)])
+ expected_sha = osutils.sha_string(text)
+ f = open('foo', 'rb')
+ self.addCleanup(f.close)
+ size, sha = osutils.size_sha_file(f)
+ self.assertEqual(38, size)
+ self.assertEqual(expected_sha, sha)
+
+
+class TestShaFileByName(tests.TestCaseInTempDir):
+
+ def test_sha_empty(self):
+ self.build_tree_contents([('foo', '')])
+ expected_sha = osutils.sha_string('')
+ self.assertEqual(expected_sha, osutils.sha_file_by_name('foo'))
+
+ def test_sha_mixed_endings(self):
+ text = 'test\r\nwith\nall\rpossible line endings\r\n'
+ self.build_tree_contents([('foo', text)])
+ expected_sha = osutils.sha_string(text)
+ self.assertEqual(expected_sha, osutils.sha_file_by_name('foo'))
+
+
+class TestResourceLoading(tests.TestCaseInTempDir):
+
+ def test_resource_string(self):
+ # test resource in bzrlib
+ text = osutils.resource_string('bzrlib', 'debug.py')
+ self.assertContainsRe(text, "debug_flags = set()")
+ # test resource under bzrlib
+ text = osutils.resource_string('bzrlib.ui', 'text.py')
+ self.assertContainsRe(text, "class TextUIFactory")
+ # test unsupported package
+ self.assertRaises(errors.BzrError, osutils.resource_string, 'zzzz',
+ 'yyy.xx')
+ # test unknown resource
+ self.assertRaises(IOError, osutils.resource_string, 'bzrlib', 'yyy.xx')
+
+
+class TestReCompile(tests.TestCase):
+
+ def _deprecated_re_compile_checked(self, *args, **kwargs):
+ return self.applyDeprecated(symbol_versioning.deprecated_in((2, 2, 0)),
+ osutils.re_compile_checked, *args, **kwargs)
+
+ def test_re_compile_checked(self):
+ r = self._deprecated_re_compile_checked(r'A*', re.IGNORECASE)
+ self.assertTrue(r.match('aaaa'))
+ self.assertTrue(r.match('aAaA'))
+
+ def test_re_compile_checked_error(self):
+ # like https://bugs.launchpad.net/bzr/+bug/251352
+
+ # Due to possible test isolation error, re.compile is not lazy at
+ # this point. We re-install lazy compile.
+ lazy_regex.install_lazy_compile()
+ err = self.assertRaises(
+ errors.BzrCommandError,
+ self._deprecated_re_compile_checked, '*', re.IGNORECASE, 'test case')
+ self.assertEqual(
+ 'Invalid regular expression in test case: '
+ '"*" nothing to repeat',
+ str(err))
+
+
+class TestDirReader(tests.TestCaseInTempDir):
+
+ scenarios = dir_reader_scenarios()
+
+ # Set by load_tests
+ _dir_reader_class = None
+ _native_to_unicode = None
+
+ def setUp(self):
+ tests.TestCaseInTempDir.setUp(self)
+ self.overrideAttr(osutils,
+ '_selected_dir_reader', self._dir_reader_class())
+
+ def _get_ascii_tree(self):
+ tree = [
+ '0file',
+ '1dir/',
+ '1dir/0file',
+ '1dir/1dir/',
+ '2file'
+ ]
+ expected_dirblocks = [
+ (('', '.'),
+ [('0file', '0file', 'file'),
+ ('1dir', '1dir', 'directory'),
+ ('2file', '2file', 'file'),
+ ]
+ ),
+ (('1dir', './1dir'),
+ [('1dir/0file', '0file', 'file'),
+ ('1dir/1dir', '1dir', 'directory'),
+ ]
+ ),
+ (('1dir/1dir', './1dir/1dir'),
+ [
+ ]
+ ),
+ ]
+ return tree, expected_dirblocks
+
+ def test_walk_cur_dir(self):
+ tree, expected_dirblocks = self._get_ascii_tree()
+ self.build_tree(tree)
+ result = list(osutils._walkdirs_utf8('.'))
+ # Filter out stat and abspath
+ self.assertEqual(expected_dirblocks,
+ [(dirinfo, [line[0:3] for line in block])
+ for dirinfo, block in result])
+
+ def test_walk_sub_dir(self):
+ tree, expected_dirblocks = self._get_ascii_tree()
+ self.build_tree(tree)
+ # you can search a subdir only, with a supplied prefix.
+ result = list(osutils._walkdirs_utf8('./1dir', '1dir'))
+ # Filter out stat and abspath
+ self.assertEqual(expected_dirblocks[1:],
+ [(dirinfo, [line[0:3] for line in block])
+ for dirinfo, block in result])
+
+ def _get_unicode_tree(self):
+ name0u = u'0file-\xb6'
+ name1u = u'1dir-\u062c\u0648'
+ name2u = u'2file-\u0633'
+ tree = [
+ name0u,
+ name1u + '/',
+ name1u + '/' + name0u,
+ name1u + '/' + name1u + '/',
+ name2u,
+ ]
+ name0 = name0u.encode('UTF-8')
+ name1 = name1u.encode('UTF-8')
+ name2 = name2u.encode('UTF-8')
+ expected_dirblocks = [
+ (('', '.'),
+ [(name0, name0, 'file', './' + name0u),
+ (name1, name1, 'directory', './' + name1u),
+ (name2, name2, 'file', './' + name2u),
+ ]
+ ),
+ ((name1, './' + name1u),
+ [(name1 + '/' + name0, name0, 'file', './' + name1u
+ + '/' + name0u),
+ (name1 + '/' + name1, name1, 'directory', './' + name1u
+ + '/' + name1u),
+ ]
+ ),
+ ((name1 + '/' + name1, './' + name1u + '/' + name1u),
+ [
+ ]
+ ),
+ ]
+ return tree, expected_dirblocks
+
+ def _filter_out(self, raw_dirblocks):
+ """Filter out a walkdirs_utf8 result.
+
+ stat field is removed, all native paths are converted to unicode
+ """
+ filtered_dirblocks = []
+ for dirinfo, block in raw_dirblocks:
+ dirinfo = (dirinfo[0], self._native_to_unicode(dirinfo[1]))
+ details = []
+ for line in block:
+ details.append(line[0:3] + (self._native_to_unicode(line[4]), ))
+ filtered_dirblocks.append((dirinfo, details))
+ return filtered_dirblocks
+
+ def test_walk_unicode_tree(self):
+ self.requireFeature(features.UnicodeFilenameFeature)
+ tree, expected_dirblocks = self._get_unicode_tree()
+ self.build_tree(tree)
+ result = list(osutils._walkdirs_utf8('.'))
+ self.assertEqual(expected_dirblocks, self._filter_out(result))
+
+ def test_symlink(self):
+ self.requireFeature(features.SymlinkFeature)
+ self.requireFeature(features.UnicodeFilenameFeature)
+ target = u'target\N{Euro Sign}'
+ link_name = u'l\N{Euro Sign}nk'
+ os.symlink(target, link_name)
+ target_utf8 = target.encode('UTF-8')
+ link_name_utf8 = link_name.encode('UTF-8')
+ expected_dirblocks = [
+ (('', '.'),
+ [(link_name_utf8, link_name_utf8,
+ 'symlink', './' + link_name),],
+ )]
+ result = list(osutils._walkdirs_utf8('.'))
+ self.assertEqual(expected_dirblocks, self._filter_out(result))
+
+
+class TestReadLink(tests.TestCaseInTempDir):
+ """Exposes os.readlink() problems and the osutils solution.
+
+ The only guarantee offered by os.readlink(), starting with 2.6, is that a
+ unicode string will be returned if a unicode string is passed.
+
+ But prior python versions failed to properly encode the passed unicode
+ string.
+ """
+ _test_needs_features = [features.SymlinkFeature, features.UnicodeFilenameFeature]
+
+ def setUp(self):
+ super(tests.TestCaseInTempDir, self).setUp()
+ self.link = u'l\N{Euro Sign}ink'
+ self.target = u'targe\N{Euro Sign}t'
+ os.symlink(self.target, self.link)
+
+ def test_os_readlink_link_encoding(self):
+ self.assertEquals(self.target, os.readlink(self.link))
+
+ def test_os_readlink_link_decoding(self):
+ self.assertEquals(self.target.encode(osutils._fs_enc),
+ os.readlink(self.link.encode(osutils._fs_enc)))
+
+
+class TestConcurrency(tests.TestCase):
+
+ def setUp(self):
+ super(TestConcurrency, self).setUp()
+ self.overrideAttr(osutils, '_cached_local_concurrency')
+
+ def test_local_concurrency(self):
+ concurrency = osutils.local_concurrency()
+ self.assertIsInstance(concurrency, int)
+
+ def test_local_concurrency_environment_variable(self):
+ self.overrideEnv('BZR_CONCURRENCY', '2')
+ self.assertEqual(2, osutils.local_concurrency(use_cache=False))
+ self.overrideEnv('BZR_CONCURRENCY', '3')
+ self.assertEqual(3, osutils.local_concurrency(use_cache=False))
+ self.overrideEnv('BZR_CONCURRENCY', 'foo')
+ self.assertEqual(1, osutils.local_concurrency(use_cache=False))
+
+ def test_option_concurrency(self):
+ self.overrideEnv('BZR_CONCURRENCY', '1')
+ self.run_bzr('rocks --concurrency 42')
+ # Command line overrides environment variable
+ self.assertEquals('42', os.environ['BZR_CONCURRENCY'])
+ self.assertEquals(42, osutils.local_concurrency(use_cache=False))
+
+
+class TestFailedToLoadExtension(tests.TestCase):
+
+ def _try_loading(self):
+ try:
+ import bzrlib._fictional_extension_py
+ except ImportError, e:
+ osutils.failed_to_load_extension(e)
+ return True
+
+ def setUp(self):
+ super(TestFailedToLoadExtension, self).setUp()
+ self.overrideAttr(osutils, '_extension_load_failures', [])
+
+ def test_failure_to_load(self):
+ self._try_loading()
+ self.assertLength(1, osutils._extension_load_failures)
+ self.assertEquals(osutils._extension_load_failures[0],
+ "No module named _fictional_extension_py")
+
+ def test_report_extension_load_failures_no_warning(self):
+ self.assertTrue(self._try_loading())
+ warnings, result = self.callCatchWarnings(osutils.report_extension_load_failures)
+ # it used to give a Python warning; it no longer does
+ self.assertLength(0, warnings)
+
+ def test_report_extension_load_failures_message(self):
+ log = StringIO()
+ trace.push_log_file(log)
+ self.assertTrue(self._try_loading())
+ osutils.report_extension_load_failures()
+ self.assertContainsRe(
+ log.getvalue(),
+ r"bzr: warning: some compiled extensions could not be loaded; "
+ "see <https://answers\.launchpad\.net/bzr/\+faq/703>\n"
+ )
+
+
+class TestTerminalWidth(tests.TestCase):
+
+ def setUp(self):
+ tests.TestCase.setUp(self)
+ self._orig_terminal_size_state = osutils._terminal_size_state
+ self._orig_first_terminal_size = osutils._first_terminal_size
+ self.addCleanup(self.restore_osutils_globals)
+ osutils._terminal_size_state = 'no_data'
+ osutils._first_terminal_size = None
+
+ def restore_osutils_globals(self):
+ osutils._terminal_size_state = self._orig_terminal_size_state
+ osutils._first_terminal_size = self._orig_first_terminal_size
+
+ def replace_stdout(self, new):
+ self.overrideAttr(sys, 'stdout', new)
+
+ def replace__terminal_size(self, new):
+ self.overrideAttr(osutils, '_terminal_size', new)
+
+ def set_fake_tty(self):
+
+ class I_am_a_tty(object):
+ def isatty(self):
+ return True
+
+ self.replace_stdout(I_am_a_tty())
+
+ def test_default_values(self):
+ self.assertEqual(80, osutils.default_terminal_width)
+
+ def test_defaults_to_BZR_COLUMNS(self):
+ # BZR_COLUMNS is set by the test framework
+ self.assertNotEqual('12', os.environ['BZR_COLUMNS'])
+ self.overrideEnv('BZR_COLUMNS', '12')
+ self.assertEqual(12, osutils.terminal_width())
+
+ def test_BZR_COLUMNS_0_no_limit(self):
+ self.overrideEnv('BZR_COLUMNS', '0')
+ self.assertEqual(None, osutils.terminal_width())
+
+ def test_falls_back_to_COLUMNS(self):
+ self.overrideEnv('BZR_COLUMNS', None)
+ self.assertNotEqual('42', os.environ['COLUMNS'])
+ self.set_fake_tty()
+ self.overrideEnv('COLUMNS', '42')
+ self.assertEqual(42, osutils.terminal_width())
+
+ def test_tty_default_without_columns(self):
+ self.overrideEnv('BZR_COLUMNS', None)
+ self.overrideEnv('COLUMNS', None)
+
+ def terminal_size(w, h):
+ return 42, 42
+
+ self.set_fake_tty()
+ # We need to override the osutils definition as it depends on the
+ # running environment that we can't control (PQM running without a
+ # controlling terminal is one example).
+ self.replace__terminal_size(terminal_size)
+ self.assertEqual(42, osutils.terminal_width())
+
+ def test_non_tty_default_without_columns(self):
+ self.overrideEnv('BZR_COLUMNS', None)
+ self.overrideEnv('COLUMNS', None)
+ self.replace_stdout(None)
+ self.assertEqual(None, osutils.terminal_width())
+
+ def test_no_TIOCGWINSZ(self):
+ self.requireFeature(term_ios_feature)
+ termios = term_ios_feature.module
+ # bug 63539 is about a termios without TIOCGWINSZ attribute
+ try:
+ orig = termios.TIOCGWINSZ
+ except AttributeError:
+ # We won't remove TIOCGWINSZ, because it doesn't exist anyway :)
+ pass
+ else:
+ self.overrideAttr(termios, 'TIOCGWINSZ')
+ del termios.TIOCGWINSZ
+ self.overrideEnv('BZR_COLUMNS', None)
+ self.overrideEnv('COLUMNS', None)
+ # Whatever the result is, if we don't raise an exception, it's ok.
+ osutils.terminal_width()
+
+
+class TestCreationOps(tests.TestCaseInTempDir):
+ _test_needs_features = [features.chown_feature]
+
+ def setUp(self):
+ tests.TestCaseInTempDir.setUp(self)
+ self.overrideAttr(os, 'chown', self._dummy_chown)
+
+ # params set by call to _dummy_chown
+ self.path = self.uid = self.gid = None
+
+ def _dummy_chown(self, path, uid, gid):
+ self.path, self.uid, self.gid = path, uid, gid
+
+ def test_copy_ownership_from_path(self):
+ """copy_ownership_from_path test with specified src."""
+ ownsrc = '/'
+ f = open('test_file', 'wt')
+ osutils.copy_ownership_from_path('test_file', ownsrc)
+
+ s = os.stat(ownsrc)
+ self.assertEquals(self.path, 'test_file')
+ self.assertEquals(self.uid, s.st_uid)
+ self.assertEquals(self.gid, s.st_gid)
+
+ def test_copy_ownership_nonesrc(self):
+ """copy_ownership_from_path test with src=None."""
+ f = open('test_file', 'wt')
+ # should use parent dir for permissions
+ osutils.copy_ownership_from_path('test_file')
+
+ s = os.stat('..')
+ self.assertEquals(self.path, 'test_file')
+ self.assertEquals(self.uid, s.st_uid)
+ self.assertEquals(self.gid, s.st_gid)
+
+
+class TestPathFromEnviron(tests.TestCase):
+
+ def test_is_unicode(self):
+ self.overrideEnv('BZR_TEST_PATH', './anywhere at all/')
+ path = osutils.path_from_environ('BZR_TEST_PATH')
+ self.assertIsInstance(path, unicode)
+ self.assertEqual(u'./anywhere at all/', path)
+
+ def test_posix_path_env_ascii(self):
+ self.overrideEnv('BZR_TEST_PATH', '/tmp')
+ home = osutils._posix_path_from_environ('BZR_TEST_PATH')
+ self.assertIsInstance(home, unicode)
+ self.assertEqual(u'/tmp', home)
+
+ def test_posix_path_env_unicode(self):
+ self.requireFeature(features.ByteStringNamedFilesystem)
+ self.overrideEnv('BZR_TEST_PATH', '/home/\xa7test')
+ self.overrideAttr(osutils, "_fs_enc", "iso8859-1")
+ self.assertEqual(u'/home/\xa7test',
+ osutils._posix_path_from_environ('BZR_TEST_PATH'))
+ osutils._fs_enc = "iso8859-5"
+ self.assertEqual(u'/home/\u0407test',
+ osutils._posix_path_from_environ('BZR_TEST_PATH'))
+ osutils._fs_enc = "utf-8"
+ self.assertRaises(errors.BadFilenameEncoding,
+ osutils._posix_path_from_environ, 'BZR_TEST_PATH')
+
+
+class TestGetHomeDir(tests.TestCase):
+
+ def test_is_unicode(self):
+ home = osutils._get_home_dir()
+ self.assertIsInstance(home, unicode)
+
+ def test_posix_homeless(self):
+ self.overrideEnv('HOME', None)
+ home = osutils._get_home_dir()
+ self.assertIsInstance(home, unicode)
+
+ def test_posix_home_ascii(self):
+ self.overrideEnv('HOME', '/home/test')
+ home = osutils._posix_get_home_dir()
+ self.assertIsInstance(home, unicode)
+ self.assertEqual(u'/home/test', home)
+
+ def test_posix_home_unicode(self):
+ self.requireFeature(features.ByteStringNamedFilesystem)
+ self.overrideEnv('HOME', '/home/\xa7test')
+ self.overrideAttr(osutils, "_fs_enc", "iso8859-1")
+ self.assertEqual(u'/home/\xa7test', osutils._posix_get_home_dir())
+ osutils._fs_enc = "iso8859-5"
+ self.assertEqual(u'/home/\u0407test', osutils._posix_get_home_dir())
+ osutils._fs_enc = "utf-8"
+ self.assertRaises(errors.BadFilenameEncoding,
+ osutils._posix_get_home_dir)
+
+
+class TestGetuserUnicode(tests.TestCase):
+
+ def test_is_unicode(self):
+ user = osutils.getuser_unicode()
+ self.assertIsInstance(user, unicode)
+
+ def envvar_to_override(self):
+ if sys.platform == "win32":
+ # Disable use of platform calls on windows so envvar is used
+ self.overrideAttr(win32utils, 'has_ctypes', False)
+ return 'USERNAME' # only variable used on windows
+ return 'LOGNAME' # first variable checked by getpass.getuser()
+
+ def test_ascii_user(self):
+ self.overrideEnv(self.envvar_to_override(), 'jrandom')
+ self.assertEqual(u'jrandom', osutils.getuser_unicode())
+
+ def test_unicode_user(self):
+ ue = osutils.get_user_encoding()
+ uni_val, env_val = tests.probe_unicode_in_user_encoding()
+ if uni_val is None:
+ raise tests.TestSkipped(
+ 'Cannot find a unicode character that works in encoding %s'
+ % (osutils.get_user_encoding(),))
+ uni_username = u'jrandom' + uni_val
+ encoded_username = uni_username.encode(ue)
+ self.overrideEnv(self.envvar_to_override(), encoded_username)
+ self.assertEqual(uni_username, osutils.getuser_unicode())
+
+
+class TestBackupNames(tests.TestCase):
+
+ def setUp(self):
+ super(TestBackupNames, self).setUp()
+ self.backups = []
+
+ def backup_exists(self, name):
+ return name in self.backups
+
+ def available_backup_name(self, name):
+ backup_name = osutils.available_backup_name(name, self.backup_exists)
+ self.backups.append(backup_name)
+ return backup_name
+
+ def assertBackupName(self, expected, name):
+ self.assertEqual(expected, self.available_backup_name(name))
+
+ def test_empty(self):
+ self.assertBackupName('file.~1~', 'file')
+
+ def test_existing(self):
+ self.available_backup_name('file')
+ self.available_backup_name('file')
+ self.assertBackupName('file.~3~', 'file')
+ # Empty slots are found, this is not a strict requirement and may be
+ # revisited if we test against all implementations.
+ self.backups.remove('file.~2~')
+ self.assertBackupName('file.~2~', 'file')
+
+
+class TestFindExecutableInPath(tests.TestCase):
+
+ def test_windows(self):
+ if sys.platform != 'win32':
+ raise tests.TestSkipped('test requires win32')
+ self.assertTrue(osutils.find_executable_on_path('explorer') is not None)
+ self.assertTrue(
+ osutils.find_executable_on_path('explorer.exe') is not None)
+ self.assertTrue(
+ osutils.find_executable_on_path('EXPLORER.EXE') is not None)
+ self.assertTrue(
+ osutils.find_executable_on_path('THIS SHOULD NOT EXIST') is None)
+ self.assertTrue(osutils.find_executable_on_path('file.txt') is None)
+
+ def test_windows_app_path(self):
+ if sys.platform != 'win32':
+ raise tests.TestSkipped('test requires win32')
+ # Override PATH env var so that exe can only be found on App Path
+ self.overrideEnv('PATH', '')
+ # Internt Explorer is always registered in the App Path
+ self.assertTrue(osutils.find_executable_on_path('iexplore') is not None)
+
+ def test_other(self):
+ if sys.platform == 'win32':
+ raise tests.TestSkipped('test requires non-win32')
+ self.assertTrue(osutils.find_executable_on_path('sh') is not None)
+ self.assertTrue(
+ osutils.find_executable_on_path('THIS SHOULD NOT EXIST') is None)
+
+
+class TestEnvironmentErrors(tests.TestCase):
+ """Test handling of environmental errors"""
+
+ def test_is_oserror(self):
+ self.assertTrue(osutils.is_environment_error(
+ OSError(errno.EINVAL, "Invalid parameter")))
+
+ def test_is_ioerror(self):
+ self.assertTrue(osutils.is_environment_error(
+ IOError(errno.EINVAL, "Invalid parameter")))
+
+ def test_is_socket_error(self):
+ self.assertTrue(osutils.is_environment_error(
+ socket.error(errno.EINVAL, "Invalid parameter")))
+
+ def test_is_select_error(self):
+ self.assertTrue(osutils.is_environment_error(
+ select.error(errno.EINVAL, "Invalid parameter")))
+
+ def test_is_pywintypes_error(self):
+ self.requireFeature(features.pywintypes)
+ import pywintypes
+ self.assertTrue(osutils.is_environment_error(
+ pywintypes.error(errno.EINVAL, "Invalid parameter", "Caller")))
diff --git a/bzrlib/tests/test_osutils_encodings.py b/bzrlib/tests/test_osutils_encodings.py
new file mode 100644
index 0000000..fe90794
--- /dev/null
+++ b/bzrlib/tests/test_osutils_encodings.py
@@ -0,0 +1,202 @@
+# Copyright (C) 2006-2011 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""Tests for the osutils wrapper."""
+
+import codecs
+import locale
+import sys
+
+from bzrlib import (
+ osutils,
+ )
+from bzrlib.tests import (
+ StringIOWrapper,
+ TestCase,
+ )
+
+
+class FakeCodec(object):
+ """Special class that helps testing over several non-existed encodings.
+
+ Clients can add new encoding names, but because of how codecs is
+ implemented they cannot be removed. Be careful with naming to avoid
+ collisions between tests.
+ """
+ _registered = False
+ _enabled_encodings = set()
+
+ def add(self, encoding_name):
+ """Adding encoding name to fake.
+
+ :type encoding_name: lowercase plain string
+ """
+ if not self._registered:
+ codecs.register(self)
+ self._registered = True
+ if encoding_name is not None:
+ self._enabled_encodings.add(encoding_name)
+
+ def __call__(self, encoding_name):
+ """Called indirectly by codecs module during lookup"""
+ if encoding_name in self._enabled_encodings:
+ return codecs.lookup('latin-1')
+
+
+fake_codec = FakeCodec()
+
+
+class TestFakeCodec(TestCase):
+
+ def test_fake_codec(self):
+ self.assertRaises(LookupError, codecs.lookup, 'fake')
+
+ fake_codec.add('fake')
+ codecs.lookup('fake')
+
+
+class TestTerminalEncoding(TestCase):
+ """Test the auto-detection of proper terminal encoding."""
+
+ def setUp(self):
+ TestCase.setUp(self)
+ self.overrideAttr(sys, 'stdin')
+ self.overrideAttr(sys, 'stdout')
+ self.overrideAttr(sys, 'stderr')
+ self.overrideAttr(osutils, '_cached_user_encoding')
+
+ def make_wrapped_streams(self,
+ stdout_encoding,
+ stderr_encoding,
+ stdin_encoding,
+ user_encoding='user_encoding',
+ enable_fake_encodings=True):
+ sys.stdout = StringIOWrapper()
+ sys.stdout.encoding = stdout_encoding
+ sys.stderr = StringIOWrapper()
+ sys.stderr.encoding = stderr_encoding
+ sys.stdin = StringIOWrapper()
+ sys.stdin.encoding = stdin_encoding
+ osutils._cached_user_encoding = user_encoding
+ if enable_fake_encodings:
+ fake_codec.add(stdout_encoding)
+ fake_codec.add(stderr_encoding)
+ fake_codec.add(stdin_encoding)
+
+ def test_get_terminal_encoding(self):
+ self.make_wrapped_streams('stdout_encoding',
+ 'stderr_encoding',
+ 'stdin_encoding')
+
+ # first preference is stdout encoding
+ self.assertEqual('stdout_encoding', osutils.get_terminal_encoding())
+
+ sys.stdout.encoding = None
+ # if sys.stdout is None, fall back to sys.stdin
+ self.assertEqual('stdin_encoding', osutils.get_terminal_encoding())
+
+ sys.stdin.encoding = None
+ # and in the worst case, use osutils.get_user_encoding()
+ self.assertEqual('user_encoding', osutils.get_terminal_encoding())
+
+ def test_get_terminal_encoding_silent(self):
+ self.make_wrapped_streams('stdout_encoding',
+ 'stderr_encoding',
+ 'stdin_encoding')
+ # Calling get_terminal_encoding should not mutter when silent=True is
+ # passed.
+ log = self.get_log()
+ osutils.get_terminal_encoding()
+ self.assertEqual(log, self.get_log())
+
+ def test_get_terminal_encoding_trace(self):
+ self.make_wrapped_streams('stdout_encoding',
+ 'stderr_encoding',
+ 'stdin_encoding')
+ # Calling get_terminal_encoding should not mutter when silent=True is
+ # passed.
+ log = self.get_log()
+ osutils.get_terminal_encoding(trace=True)
+ self.assertNotEqual(log, self.get_log())
+
+ def test_terminal_cp0(self):
+ # test cp0 encoding (Windows returns cp0 when there is no encoding)
+ self.make_wrapped_streams('cp0',
+ 'cp0',
+ 'cp0',
+ user_encoding='latin-1',
+ enable_fake_encodings=False)
+
+ # cp0 is invalid encoding. We should fall back to user_encoding
+ self.assertEqual('latin-1', osutils.get_terminal_encoding())
+
+ # check stderr
+ self.assertEquals('', sys.stderr.getvalue())
+
+ def test_terminal_cp_unknown(self):
+ # test against really unknown encoding
+ # catch warning at stderr
+ self.make_wrapped_streams('cp-unknown',
+ 'cp-unknown',
+ 'cp-unknown',
+ user_encoding='latin-1',
+ enable_fake_encodings=False)
+
+ self.assertEqual('latin-1', osutils.get_terminal_encoding())
+
+ # check stderr
+ self.assertEquals('bzr: warning: unknown terminal encoding cp-unknown.\n'
+ ' Using encoding latin-1 instead.\n',
+ sys.stderr.getvalue())
+
+
+class TestUserEncoding(TestCase):
+ """Test detection of default user encoding."""
+
+ def setUp(self):
+ TestCase.setUp(self)
+ self.overrideAttr(osutils, '_cached_user_encoding', None)
+ self.overrideAttr(locale, 'getpreferredencoding', self.get_encoding)
+ self.overrideAttr(locale, 'CODESET', None)
+ self.overrideAttr(sys, 'stderr', StringIOWrapper())
+
+ def get_encoding(self, do_setlocale=True):
+ return self._encoding
+
+ def test_get_user_encoding(self):
+ self._encoding = 'user_encoding'
+ fake_codec.add('user_encoding')
+ self.assertEquals('iso8859-1', # fake_codec maps to latin-1
+ osutils.get_user_encoding())
+ self.assertEquals('', sys.stderr.getvalue())
+
+ def test_user_cp0(self):
+ self._encoding = 'cp0'
+ self.assertEquals('ascii', osutils.get_user_encoding())
+ self.assertEquals('', sys.stderr.getvalue())
+
+ def test_user_cp_unknown(self):
+ self._encoding = 'cp-unknown'
+ self.assertEquals('ascii', osutils.get_user_encoding())
+ self.assertEquals('bzr: warning: unknown encoding cp-unknown.'
+ ' Continuing with ascii encoding.\n',
+ sys.stderr.getvalue())
+
+ def test_user_empty(self):
+ """Running bzr from a vim script gives '' for a preferred locale"""
+ self._encoding = ''
+ self.assertEquals('ascii', osutils.get_user_encoding())
+ self.assertEquals('', sys.stderr.getvalue())
diff --git a/bzrlib/tests/test_pack.py b/bzrlib/tests/test_pack.py
new file mode 100644
index 0000000..33c8176
--- /dev/null
+++ b/bzrlib/tests/test_pack.py
@@ -0,0 +1,741 @@
+# Copyright (C) 2007 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""Tests for bzrlib.pack."""
+
+
+from cStringIO import StringIO
+
+from bzrlib import pack, errors, tests
+
+
+class TestContainerSerialiser(tests.TestCase):
+ """Tests for the ContainerSerialiser class."""
+
+ def test_construct(self):
+ """Test constructing a ContainerSerialiser."""
+ pack.ContainerSerialiser()
+
+ def test_begin(self):
+ serialiser = pack.ContainerSerialiser()
+ self.assertEqual('Bazaar pack format 1 (introduced in 0.18)\n',
+ serialiser.begin())
+
+ def test_end(self):
+ serialiser = pack.ContainerSerialiser()
+ self.assertEqual('E', serialiser.end())
+
+ def test_bytes_record_no_name(self):
+ serialiser = pack.ContainerSerialiser()
+ record = serialiser.bytes_record('bytes', [])
+ self.assertEqual('B5\n\nbytes', record)
+
+ def test_bytes_record_one_name_with_one_part(self):
+ serialiser = pack.ContainerSerialiser()
+ record = serialiser.bytes_record('bytes', [('name',)])
+ self.assertEqual('B5\nname\n\nbytes', record)
+
+ def test_bytes_record_one_name_with_two_parts(self):
+ serialiser = pack.ContainerSerialiser()
+ record = serialiser.bytes_record('bytes', [('part1', 'part2')])
+ self.assertEqual('B5\npart1\x00part2\n\nbytes', record)
+
+ def test_bytes_record_two_names(self):
+ serialiser = pack.ContainerSerialiser()
+ record = serialiser.bytes_record('bytes', [('name1',), ('name2',)])
+ self.assertEqual('B5\nname1\nname2\n\nbytes', record)
+
+ def test_bytes_record_whitespace_in_name_part(self):
+ serialiser = pack.ContainerSerialiser()
+ self.assertRaises(
+ errors.InvalidRecordError,
+ serialiser.bytes_record, 'bytes', [('bad name',)])
+
+ def test_bytes_record_header(self):
+ serialiser = pack.ContainerSerialiser()
+ record = serialiser.bytes_header(32, [('name1',), ('name2',)])
+ self.assertEqual('B32\nname1\nname2\n\n', record)
+
+
+class TestContainerWriter(tests.TestCase):
+
+ def setUp(self):
+ tests.TestCase.setUp(self)
+ self.output = StringIO()
+ self.writer = pack.ContainerWriter(self.output.write)
+
+ def assertOutput(self, expected_output):
+ """Assert that the output of self.writer ContainerWriter is equal to
+ expected_output.
+ """
+ self.assertEqual(expected_output, self.output.getvalue())
+
+ def test_construct(self):
+ """Test constructing a ContainerWriter.
+
+ This uses None as the output stream to show that the constructor
+ doesn't try to use the output stream.
+ """
+ writer = pack.ContainerWriter(None)
+
+ def test_begin(self):
+ """The begin() method writes the container format marker line."""
+ self.writer.begin()
+ self.assertOutput('Bazaar pack format 1 (introduced in 0.18)\n')
+
+ def test_zero_records_written_after_begin(self):
+ """After begin is written, 0 records have been written."""
+ self.writer.begin()
+ self.assertEqual(0, self.writer.records_written)
+
+ def test_end(self):
+ """The end() method writes an End Marker record."""
+ self.writer.begin()
+ self.writer.end()
+ self.assertOutput('Bazaar pack format 1 (introduced in 0.18)\nE')
+
+ def test_empty_end_does_not_add_a_record_to_records_written(self):
+ """The end() method does not count towards the records written."""
+ self.writer.begin()
+ self.writer.end()
+ self.assertEqual(0, self.writer.records_written)
+
+ def test_non_empty_end_does_not_add_a_record_to_records_written(self):
+ """The end() method does not count towards the records written."""
+ self.writer.begin()
+ self.writer.add_bytes_record('foo', names=[])
+ self.writer.end()
+ self.assertEqual(1, self.writer.records_written)
+
+ def test_add_bytes_record_no_name(self):
+ """Add a bytes record with no name."""
+ self.writer.begin()
+ offset, length = self.writer.add_bytes_record('abc', names=[])
+ self.assertEqual((42, 7), (offset, length))
+ self.assertOutput(
+ 'Bazaar pack format 1 (introduced in 0.18)\nB3\n\nabc')
+
+ def test_add_bytes_record_one_name(self):
+ """Add a bytes record with one name."""
+ self.writer.begin()
+
+ offset, length = self.writer.add_bytes_record(
+ 'abc', names=[('name1', )])
+ self.assertEqual((42, 13), (offset, length))
+ self.assertOutput(
+ 'Bazaar pack format 1 (introduced in 0.18)\n'
+ 'B3\nname1\n\nabc')
+
+ def test_add_bytes_record_split_writes(self):
+ """Write a large record which does multiple IOs"""
+
+ writes = []
+ real_write = self.writer.write_func
+
+ def record_writes(bytes):
+ writes.append(bytes)
+ return real_write(bytes)
+
+ self.writer.write_func = record_writes
+ self.writer._JOIN_WRITES_THRESHOLD = 2
+
+ self.writer.begin()
+ offset, length = self.writer.add_bytes_record(
+ 'abcabc', names=[('name1', )])
+ self.assertEqual((42, 16), (offset, length))
+ self.assertOutput(
+ 'Bazaar pack format 1 (introduced in 0.18)\n'
+ 'B6\nname1\n\nabcabc')
+
+ self.assertEquals([
+ 'Bazaar pack format 1 (introduced in 0.18)\n',
+ 'B6\nname1\n\n',
+ 'abcabc'],
+ writes)
+
+ def test_add_bytes_record_two_names(self):
+ """Add a bytes record with two names."""
+ self.writer.begin()
+ offset, length = self.writer.add_bytes_record(
+ 'abc', names=[('name1', ), ('name2', )])
+ self.assertEqual((42, 19), (offset, length))
+ self.assertOutput(
+ 'Bazaar pack format 1 (introduced in 0.18)\n'
+ 'B3\nname1\nname2\n\nabc')
+
+ def test_add_bytes_record_two_names(self):
+ """Add a bytes record with two names."""
+ self.writer.begin()
+ offset, length = self.writer.add_bytes_record(
+ 'abc', names=[('name1', ), ('name2', )])
+ self.assertEqual((42, 19), (offset, length))
+ self.assertOutput(
+ 'Bazaar pack format 1 (introduced in 0.18)\n'
+ 'B3\nname1\nname2\n\nabc')
+
+ def test_add_bytes_record_two_element_name(self):
+ """Add a bytes record with a two-element name."""
+ self.writer.begin()
+ offset, length = self.writer.add_bytes_record(
+ 'abc', names=[('name1', 'name2')])
+ self.assertEqual((42, 19), (offset, length))
+ self.assertOutput(
+ 'Bazaar pack format 1 (introduced in 0.18)\n'
+ 'B3\nname1\x00name2\n\nabc')
+
+ def test_add_second_bytes_record_gets_higher_offset(self):
+ self.writer.begin()
+ self.writer.add_bytes_record('abc', names=[])
+ offset, length = self.writer.add_bytes_record('abc', names=[])
+ self.assertEqual((49, 7), (offset, length))
+ self.assertOutput(
+ 'Bazaar pack format 1 (introduced in 0.18)\n'
+ 'B3\n\nabc'
+ 'B3\n\nabc')
+
+ def test_add_bytes_record_invalid_name(self):
+ """Adding a Bytes record with a name with whitespace in it raises
+ InvalidRecordError.
+ """
+ self.writer.begin()
+ self.assertRaises(
+ errors.InvalidRecordError,
+ self.writer.add_bytes_record, 'abc', names=[('bad name', )])
+
+ def test_add_bytes_records_add_to_records_written(self):
+ """Adding a Bytes record increments the records_written counter."""
+ self.writer.begin()
+ self.writer.add_bytes_record('foo', names=[])
+ self.assertEqual(1, self.writer.records_written)
+ self.writer.add_bytes_record('foo', names=[])
+ self.assertEqual(2, self.writer.records_written)
+
+
+class TestContainerReader(tests.TestCase):
+ """Tests for the ContainerReader.
+
+ The ContainerReader reads format 1 containers, so these tests explicitly
+ test how it reacts to format 1 data. If a new version of the format is
+ added, then separate tests for that format should be added.
+ """
+
+ def get_reader_for(self, bytes):
+ stream = StringIO(bytes)
+ reader = pack.ContainerReader(stream)
+ return reader
+
+ def test_construct(self):
+ """Test constructing a ContainerReader.
+
+ This uses None as the output stream to show that the constructor
+ doesn't try to use the input stream.
+ """
+ reader = pack.ContainerReader(None)
+
+ def test_empty_container(self):
+ """Read an empty container."""
+ reader = self.get_reader_for(
+ "Bazaar pack format 1 (introduced in 0.18)\nE")
+ self.assertEqual([], list(reader.iter_records()))
+
+ def test_unknown_format(self):
+ """Unrecognised container formats raise UnknownContainerFormatError."""
+ reader = self.get_reader_for("unknown format\n")
+ self.assertRaises(
+ errors.UnknownContainerFormatError, reader.iter_records)
+
+ def test_unexpected_end_of_container(self):
+ """Containers that don't end with an End Marker record should cause
+ UnexpectedEndOfContainerError to be raised.
+ """
+ reader = self.get_reader_for(
+ "Bazaar pack format 1 (introduced in 0.18)\n")
+ iterator = reader.iter_records()
+ self.assertRaises(
+ errors.UnexpectedEndOfContainerError, iterator.next)
+
+ def test_unknown_record_type(self):
+ """Unknown record types cause UnknownRecordTypeError to be raised."""
+ reader = self.get_reader_for(
+ "Bazaar pack format 1 (introduced in 0.18)\nX")
+ iterator = reader.iter_records()
+ self.assertRaises(
+ errors.UnknownRecordTypeError, iterator.next)
+
+ def test_container_with_one_unnamed_record(self):
+ """Read a container with one Bytes record.
+
+ Parsing Bytes records is more thoroughly exercised by
+ TestBytesRecordReader. This test is here to ensure that
+ ContainerReader's integration with BytesRecordReader is working.
+ """
+ reader = self.get_reader_for(
+ "Bazaar pack format 1 (introduced in 0.18)\nB5\n\naaaaaE")
+ expected_records = [([], 'aaaaa')]
+ self.assertEqual(
+ expected_records,
+ [(names, read_bytes(None))
+ for (names, read_bytes) in reader.iter_records()])
+
+ def test_validate_empty_container(self):
+ """validate does not raise an error for a container with no records."""
+ reader = self.get_reader_for(
+ "Bazaar pack format 1 (introduced in 0.18)\nE")
+ # No exception raised
+ reader.validate()
+
+ def test_validate_non_empty_valid_container(self):
+ """validate does not raise an error for a container with a valid record.
+ """
+ reader = self.get_reader_for(
+ "Bazaar pack format 1 (introduced in 0.18)\nB3\nname\n\nabcE")
+ # No exception raised
+ reader.validate()
+
+ def test_validate_bad_format(self):
+ """validate raises an error for unrecognised format strings.
+
+ It may raise either UnexpectedEndOfContainerError or
+ UnknownContainerFormatError, depending on exactly what the string is.
+ """
+ inputs = ["", "x", "Bazaar pack format 1 (introduced in 0.18)", "bad\n"]
+ for input in inputs:
+ reader = self.get_reader_for(input)
+ self.assertRaises(
+ (errors.UnexpectedEndOfContainerError,
+ errors.UnknownContainerFormatError),
+ reader.validate)
+
+ def test_validate_bad_record_marker(self):
+ """validate raises UnknownRecordTypeError for unrecognised record
+ types.
+ """
+ reader = self.get_reader_for(
+ "Bazaar pack format 1 (introduced in 0.18)\nX")
+ self.assertRaises(errors.UnknownRecordTypeError, reader.validate)
+
+ def test_validate_data_after_end_marker(self):
+ """validate raises ContainerHasExcessDataError if there are any bytes
+ after the end of the container.
+ """
+ reader = self.get_reader_for(
+ "Bazaar pack format 1 (introduced in 0.18)\nEcrud")
+ self.assertRaises(
+ errors.ContainerHasExcessDataError, reader.validate)
+
+ def test_validate_no_end_marker(self):
+ """validate raises UnexpectedEndOfContainerError if there's no end of
+ container marker, even if the container up to this point has been valid.
+ """
+ reader = self.get_reader_for(
+ "Bazaar pack format 1 (introduced in 0.18)\n")
+ self.assertRaises(
+ errors.UnexpectedEndOfContainerError, reader.validate)
+
+ def test_validate_duplicate_name(self):
+ """validate raises DuplicateRecordNameError if the same name occurs
+ multiple times in the container.
+ """
+ reader = self.get_reader_for(
+ "Bazaar pack format 1 (introduced in 0.18)\n"
+ "B0\nname\n\n"
+ "B0\nname\n\n"
+ "E")
+ self.assertRaises(errors.DuplicateRecordNameError, reader.validate)
+
+ def test_validate_undecodeable_name(self):
+ """Names that aren't valid UTF-8 cause validate to fail."""
+ reader = self.get_reader_for(
+ "Bazaar pack format 1 (introduced in 0.18)\nB0\n\xcc\n\nE")
+ self.assertRaises(errors.InvalidRecordError, reader.validate)
+
+
+class TestBytesRecordReader(tests.TestCase):
+ """Tests for reading and validating Bytes records with
+ BytesRecordReader.
+
+ Like TestContainerReader, this explicitly tests the reading of format 1
+ data. If a new version of the format is added, then a separate set of
+ tests for reading that format should be added.
+ """
+
+ def get_reader_for(self, bytes):
+ stream = StringIO(bytes)
+ reader = pack.BytesRecordReader(stream)
+ return reader
+
+ def test_record_with_no_name(self):
+ """Reading a Bytes record with no name returns an empty list of
+ names.
+ """
+ reader = self.get_reader_for("5\n\naaaaa")
+ names, get_bytes = reader.read()
+ self.assertEqual([], names)
+ self.assertEqual('aaaaa', get_bytes(None))
+
+ def test_record_with_one_name(self):
+ """Reading a Bytes record with one name returns a list of just that
+ name.
+ """
+ reader = self.get_reader_for("5\nname1\n\naaaaa")
+ names, get_bytes = reader.read()
+ self.assertEqual([('name1', )], names)
+ self.assertEqual('aaaaa', get_bytes(None))
+
+ def test_record_with_two_names(self):
+ """Reading a Bytes record with two names returns a list of both names.
+ """
+ reader = self.get_reader_for("5\nname1\nname2\n\naaaaa")
+ names, get_bytes = reader.read()
+ self.assertEqual([('name1', ), ('name2', )], names)
+ self.assertEqual('aaaaa', get_bytes(None))
+
+ def test_record_with_two_part_names(self):
+ """Reading a Bytes record with a two_part name reads both."""
+ reader = self.get_reader_for("5\nname1\x00name2\n\naaaaa")
+ names, get_bytes = reader.read()
+ self.assertEqual([('name1', 'name2', )], names)
+ self.assertEqual('aaaaa', get_bytes(None))
+
+ def test_invalid_length(self):
+ """If the length-prefix is not a number, parsing raises
+ InvalidRecordError.
+ """
+ reader = self.get_reader_for("not a number\n")
+ self.assertRaises(errors.InvalidRecordError, reader.read)
+
+ def test_early_eof(self):
+ """Tests for premature EOF occuring during parsing Bytes records with
+ BytesRecordReader.
+
+ A incomplete container might be interrupted at any point. The
+ BytesRecordReader needs to cope with the input stream running out no
+ matter where it is in the parsing process.
+
+ In all cases, UnexpectedEndOfContainerError should be raised.
+ """
+ complete_record = "6\nname\n\nabcdef"
+ for count in range(0, len(complete_record)):
+ incomplete_record = complete_record[:count]
+ reader = self.get_reader_for(incomplete_record)
+ # We don't use assertRaises to make diagnosing failures easier
+ # (assertRaises doesn't allow a custom failure message).
+ try:
+ names, read_bytes = reader.read()
+ read_bytes(None)
+ except errors.UnexpectedEndOfContainerError:
+ pass
+ else:
+ self.fail(
+ "UnexpectedEndOfContainerError not raised when parsing %r"
+ % (incomplete_record,))
+
+ def test_initial_eof(self):
+ """EOF before any bytes read at all."""
+ reader = self.get_reader_for("")
+ self.assertRaises(errors.UnexpectedEndOfContainerError, reader.read)
+
+ def test_eof_after_length(self):
+ """EOF after reading the length and before reading name(s)."""
+ reader = self.get_reader_for("123\n")
+ self.assertRaises(errors.UnexpectedEndOfContainerError, reader.read)
+
+ def test_eof_during_name(self):
+ """EOF during reading a name."""
+ reader = self.get_reader_for("123\nname")
+ self.assertRaises(errors.UnexpectedEndOfContainerError, reader.read)
+
+ def test_read_invalid_name_whitespace(self):
+ """Names must have no whitespace."""
+ # A name with a space.
+ reader = self.get_reader_for("0\nbad name\n\n")
+ self.assertRaises(errors.InvalidRecordError, reader.read)
+
+ # A name with a tab.
+ reader = self.get_reader_for("0\nbad\tname\n\n")
+ self.assertRaises(errors.InvalidRecordError, reader.read)
+
+ # A name with a vertical tab.
+ reader = self.get_reader_for("0\nbad\vname\n\n")
+ self.assertRaises(errors.InvalidRecordError, reader.read)
+
+ def test_validate_whitespace_in_name(self):
+ """Names must have no whitespace."""
+ reader = self.get_reader_for("0\nbad name\n\n")
+ self.assertRaises(errors.InvalidRecordError, reader.validate)
+
+ def test_validate_interrupted_prelude(self):
+ """EOF during reading a record's prelude causes validate to fail."""
+ reader = self.get_reader_for("")
+ self.assertRaises(
+ errors.UnexpectedEndOfContainerError, reader.validate)
+
+ def test_validate_interrupted_body(self):
+ """EOF during reading a record's body causes validate to fail."""
+ reader = self.get_reader_for("1\n\n")
+ self.assertRaises(
+ errors.UnexpectedEndOfContainerError, reader.validate)
+
+ def test_validate_unparseable_length(self):
+ """An unparseable record length causes validate to fail."""
+ reader = self.get_reader_for("\n\n")
+ self.assertRaises(
+ errors.InvalidRecordError, reader.validate)
+
+ def test_validate_undecodeable_name(self):
+ """Names that aren't valid UTF-8 cause validate to fail."""
+ reader = self.get_reader_for("0\n\xcc\n\n")
+ self.assertRaises(errors.InvalidRecordError, reader.validate)
+
+ def test_read_max_length(self):
+ """If the max_length passed to the callable returned by read is not
+ None, then no more than that many bytes will be read.
+ """
+ reader = self.get_reader_for("6\n\nabcdef")
+ names, get_bytes = reader.read()
+ self.assertEqual('abc', get_bytes(3))
+
+ def test_read_no_max_length(self):
+ """If the max_length passed to the callable returned by read is None,
+ then all the bytes in the record will be read.
+ """
+ reader = self.get_reader_for("6\n\nabcdef")
+ names, get_bytes = reader.read()
+ self.assertEqual('abcdef', get_bytes(None))
+
+ def test_repeated_read_calls(self):
+ """Repeated calls to the callable returned from BytesRecordReader.read
+ will not read beyond the end of the record.
+ """
+ reader = self.get_reader_for("6\n\nabcdefB3\nnext-record\nXXX")
+ names, get_bytes = reader.read()
+ self.assertEqual('abcdef', get_bytes(None))
+ self.assertEqual('', get_bytes(None))
+ self.assertEqual('', get_bytes(99))
+
+
+class TestMakeReadvReader(tests.TestCaseWithTransport):
+
+ def test_read_skipping_records(self):
+ pack_data = StringIO()
+ writer = pack.ContainerWriter(pack_data.write)
+ writer.begin()
+ memos = []
+ memos.append(writer.add_bytes_record('abc', names=[]))
+ memos.append(writer.add_bytes_record('def', names=[('name1', )]))
+ memos.append(writer.add_bytes_record('ghi', names=[('name2', )]))
+ memos.append(writer.add_bytes_record('jkl', names=[]))
+ writer.end()
+ transport = self.get_transport()
+ transport.put_bytes('mypack', pack_data.getvalue())
+ requested_records = [memos[0], memos[2]]
+ reader = pack.make_readv_reader(transport, 'mypack', requested_records)
+ result = []
+ for names, reader_func in reader.iter_records():
+ result.append((names, reader_func(None)))
+ self.assertEqual([([], 'abc'), ([('name2', )], 'ghi')], result)
+
+
+class TestReadvFile(tests.TestCaseWithTransport):
+ """Tests of the ReadVFile class.
+
+ Error cases are deliberately undefined: this code adapts the underlying
+ transport interface to a single 'streaming read' interface as
+ ContainerReader needs.
+ """
+
+ def test_read_bytes(self):
+ """Test reading of both single bytes and all bytes in a hunk."""
+ transport = self.get_transport()
+ transport.put_bytes('sample', '0123456789')
+ f = pack.ReadVFile(transport.readv('sample', [(0,1), (1,2), (4,1), (6,2)]))
+ results = []
+ results.append(f.read(1))
+ results.append(f.read(2))
+ results.append(f.read(1))
+ results.append(f.read(1))
+ results.append(f.read(1))
+ self.assertEqual(['0', '12', '4', '6', '7'], results)
+
+ def test_readline(self):
+ """Test using readline() as ContainerReader does.
+
+ This is always within a readv hunk, never across it.
+ """
+ transport = self.get_transport()
+ transport.put_bytes('sample', '0\n2\n4\n')
+ f = pack.ReadVFile(transport.readv('sample', [(0,2), (2,4)]))
+ results = []
+ results.append(f.readline())
+ results.append(f.readline())
+ results.append(f.readline())
+ self.assertEqual(['0\n', '2\n', '4\n'], results)
+
+ def test_readline_and_read(self):
+ """Test exercising one byte reads, readline, and then read again."""
+ transport = self.get_transport()
+ transport.put_bytes('sample', '0\n2\n4\n')
+ f = pack.ReadVFile(transport.readv('sample', [(0,6)]))
+ results = []
+ results.append(f.read(1))
+ results.append(f.readline())
+ results.append(f.read(4))
+ self.assertEqual(['0', '\n', '2\n4\n'], results)
+
+
+class PushParserTestCase(tests.TestCase):
+ """Base class for TestCases involving ContainerPushParser."""
+
+ def make_parser_expecting_record_type(self):
+ parser = pack.ContainerPushParser()
+ parser.accept_bytes("Bazaar pack format 1 (introduced in 0.18)\n")
+ return parser
+
+ def make_parser_expecting_bytes_record(self):
+ parser = pack.ContainerPushParser()
+ parser.accept_bytes("Bazaar pack format 1 (introduced in 0.18)\nB")
+ return parser
+
+ def assertRecordParsing(self, expected_record, bytes):
+ """Assert that 'bytes' is parsed as a given bytes record.
+
+ :param expected_record: A tuple of (names, bytes).
+ """
+ parser = self.make_parser_expecting_bytes_record()
+ parser.accept_bytes(bytes)
+ parsed_records = parser.read_pending_records()
+ self.assertEqual([expected_record], parsed_records)
+
+
+class TestContainerPushParser(PushParserTestCase):
+ """Tests for ContainerPushParser.
+
+ The ContainerPushParser reads format 1 containers, so these tests
+ explicitly test how it reacts to format 1 data. If a new version of the
+ format is added, then separate tests for that format should be added.
+ """
+
+ def test_construct(self):
+ """ContainerPushParser can be constructed."""
+ pack.ContainerPushParser()
+
+ def test_multiple_records_at_once(self):
+ """If multiple records worth of data are fed to the parser in one
+ string, the parser will correctly parse all the records.
+
+ (A naive implementation might stop after parsing the first record.)
+ """
+ parser = self.make_parser_expecting_record_type()
+ parser.accept_bytes("B5\nname1\n\nbody1B5\nname2\n\nbody2")
+ self.assertEqual(
+ [([('name1',)], 'body1'), ([('name2',)], 'body2')],
+ parser.read_pending_records())
+
+ def test_multiple_empty_records_at_once(self):
+ """If multiple empty records worth of data are fed to the parser in one
+ string, the parser will correctly parse all the records.
+
+ (A naive implementation might stop after parsing the first empty
+ record, because the buffer size had not changed.)
+ """
+ parser = self.make_parser_expecting_record_type()
+ parser.accept_bytes("B0\nname1\n\nB0\nname2\n\n")
+ self.assertEqual(
+ [([('name1',)], ''), ([('name2',)], '')],
+ parser.read_pending_records())
+
+
+class TestContainerPushParserBytesParsing(PushParserTestCase):
+ """Tests for reading Bytes records with ContainerPushParser.
+
+ The ContainerPushParser reads format 1 containers, so these tests
+ explicitly test how it reacts to format 1 data. If a new version of the
+ format is added, then separate tests for that format should be added.
+ """
+
+ def test_record_with_no_name(self):
+ """Reading a Bytes record with no name returns an empty list of
+ names.
+ """
+ self.assertRecordParsing(([], 'aaaaa'), "5\n\naaaaa")
+
+ def test_record_with_one_name(self):
+ """Reading a Bytes record with one name returns a list of just that
+ name.
+ """
+ self.assertRecordParsing(
+ ([('name1', )], 'aaaaa'),
+ "5\nname1\n\naaaaa")
+
+ def test_record_with_two_names(self):
+ """Reading a Bytes record with two names returns a list of both names.
+ """
+ self.assertRecordParsing(
+ ([('name1', ), ('name2', )], 'aaaaa'),
+ "5\nname1\nname2\n\naaaaa")
+
+ def test_record_with_two_part_names(self):
+ """Reading a Bytes record with a two_part name reads both."""
+ self.assertRecordParsing(
+ ([('name1', 'name2')], 'aaaaa'),
+ "5\nname1\x00name2\n\naaaaa")
+
+ def test_invalid_length(self):
+ """If the length-prefix is not a number, parsing raises
+ InvalidRecordError.
+ """
+ parser = self.make_parser_expecting_bytes_record()
+ self.assertRaises(
+ errors.InvalidRecordError, parser.accept_bytes, "not a number\n")
+
+ def test_incomplete_record(self):
+ """If the bytes seen so far don't form a complete record, then there
+ will be nothing returned by read_pending_records.
+ """
+ parser = self.make_parser_expecting_bytes_record()
+ parser.accept_bytes("5\n\nabcd")
+ self.assertEqual([], parser.read_pending_records())
+
+ def test_accept_nothing(self):
+ """The edge case of parsing an empty string causes no error."""
+ parser = self.make_parser_expecting_bytes_record()
+ parser.accept_bytes("")
+
+ def assertInvalidRecord(self, bytes):
+ """Assert that parsing the given bytes will raise an
+ InvalidRecordError.
+ """
+ parser = self.make_parser_expecting_bytes_record()
+ self.assertRaises(
+ errors.InvalidRecordError, parser.accept_bytes, bytes)
+
+ def test_read_invalid_name_whitespace(self):
+ """Names must have no whitespace."""
+ # A name with a space.
+ self.assertInvalidRecord("0\nbad name\n\n")
+
+ # A name with a tab.
+ self.assertInvalidRecord("0\nbad\tname\n\n")
+
+ # A name with a vertical tab.
+ self.assertInvalidRecord("0\nbad\vname\n\n")
+
+ def test_repeated_read_pending_records(self):
+ """read_pending_records will not return the same record twice."""
+ parser = self.make_parser_expecting_bytes_record()
+ parser.accept_bytes("6\n\nabcdef")
+ self.assertEqual([([], 'abcdef')], parser.read_pending_records())
+ self.assertEqual([], parser.read_pending_records())
diff --git a/bzrlib/tests/test_patch.py b/bzrlib/tests/test_patch.py
new file mode 100644
index 0000000..e1b19db
--- /dev/null
+++ b/bzrlib/tests/test_patch.py
@@ -0,0 +1,28 @@
+# Copyright (C) 2006 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+from bzrlib.errors import BinaryFile
+from bzrlib.patch import diff3
+from bzrlib.tests import TestCaseInTempDir
+
+
+class TestPatch(TestCaseInTempDir):
+
+ def test_diff3_binaries(self):
+ with file('this', 'wb') as f: f.write('a')
+ with file('other', 'wb') as f: f.write('a')
+ with file('base', 'wb') as f: f.write('\x00')
+ self.assertRaises(BinaryFile, diff3, 'unused', 'this', 'other', 'base')
diff --git a/bzrlib/tests/test_patches.py b/bzrlib/tests/test_patches.py
new file mode 100644
index 0000000..a35e84e
--- /dev/null
+++ b/bzrlib/tests/test_patches.py
@@ -0,0 +1,310 @@
+# Copyright (C) 2005-2010 Aaron Bentley, Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+
+import os.path
+
+from bzrlib.tests import TestCase
+
+from bzrlib.iterablefile import IterableFile
+from bzrlib.patches import (MalformedLine,
+ MalformedHunkHeader,
+ MalformedPatchHeader,
+ BinaryPatch,
+ BinaryFiles,
+ Patch,
+ ContextLine,
+ InsertLine,
+ RemoveLine,
+ difference_index,
+ get_patch_names,
+ hunk_from_header,
+ iter_patched,
+ iter_patched_from_hunks,
+ parse_line,
+ parse_patch,
+ parse_patches,
+ NO_NL)
+
+
+class PatchesTester(TestCase):
+
+ def datafile(self, filename):
+ data_path = os.path.join(os.path.dirname(__file__),
+ "test_patches_data", filename)
+ return file(data_path, "rb")
+
+ def data_lines(self, filename):
+ datafile = self.datafile(filename)
+ try:
+ return datafile.readlines()
+ finally:
+ datafile.close()
+
+ def test_parse_patches_leading_noise(self):
+ # https://bugs.launchpad.net/bzr/+bug/502076
+ # https://code.launchpad.net/~toshio/bzr/allow-dirty-patches/+merge/18854
+ lines = ["diff -pruN commands.py",
+ "--- orig/commands.py",
+ "+++ mod/dommands.py"]
+ bits = parse_patches(iter(lines), allow_dirty=True)
+
+ def testValidPatchHeader(self):
+ """Parse a valid patch header"""
+ lines = "--- orig/commands.py\n+++ mod/dommands.py\n".split('\n')
+ (orig, mod) = get_patch_names(lines.__iter__())
+ self.assertEqual(orig, "orig/commands.py")
+ self.assertEqual(mod, "mod/dommands.py")
+
+ def testInvalidPatchHeader(self):
+ """Parse an invalid patch header"""
+ lines = "-- orig/commands.py\n+++ mod/dommands.py".split('\n')
+ self.assertRaises(MalformedPatchHeader, get_patch_names,
+ lines.__iter__())
+
+ def testValidHunkHeader(self):
+ """Parse a valid hunk header"""
+ header = "@@ -34,11 +50,6 @@\n"
+ hunk = hunk_from_header(header);
+ self.assertEqual(hunk.orig_pos, 34)
+ self.assertEqual(hunk.orig_range, 11)
+ self.assertEqual(hunk.mod_pos, 50)
+ self.assertEqual(hunk.mod_range, 6)
+ self.assertEqual(str(hunk), header)
+
+ def testValidHunkHeader2(self):
+ """Parse a tricky, valid hunk header"""
+ header = "@@ -1 +0,0 @@\n"
+ hunk = hunk_from_header(header);
+ self.assertEqual(hunk.orig_pos, 1)
+ self.assertEqual(hunk.orig_range, 1)
+ self.assertEqual(hunk.mod_pos, 0)
+ self.assertEqual(hunk.mod_range, 0)
+ self.assertEqual(str(hunk), header)
+
+ def testPDiff(self):
+ """Parse a hunk header produced by diff -p"""
+ header = "@@ -407,7 +292,7 @@ bzr 0.18rc1 2007-07-10\n"
+ hunk = hunk_from_header(header)
+ self.assertEqual('bzr 0.18rc1 2007-07-10', hunk.tail)
+ self.assertEqual(header, str(hunk))
+
+ def makeMalformed(self, header):
+ self.assertRaises(MalformedHunkHeader, hunk_from_header, header)
+
+ def testInvalidHeader(self):
+ """Parse an invalid hunk header"""
+ self.makeMalformed(" -34,11 +50,6 \n")
+ self.makeMalformed("@@ +50,6 -34,11 @@\n")
+ self.makeMalformed("@@ -34,11 +50,6 @@")
+ self.makeMalformed("@@ -34.5,11 +50,6 @@\n")
+ self.makeMalformed("@@-34,11 +50,6@@\n")
+ self.makeMalformed("@@ 34,11 50,6 @@\n")
+ self.makeMalformed("@@ -34,11 @@\n")
+ self.makeMalformed("@@ -34,11 +50,6.5 @@\n")
+ self.makeMalformed("@@ -34,11 +50,-6 @@\n")
+
+ def lineThing(self,text, type):
+ line = parse_line(text)
+ self.assertIsInstance(line, type)
+ self.assertEqual(str(line), text)
+
+ def makeMalformedLine(self, text):
+ self.assertRaises(MalformedLine, parse_line, text)
+
+ def testValidLine(self):
+ """Parse a valid hunk line"""
+ self.lineThing(" hello\n", ContextLine)
+ self.lineThing("+hello\n", InsertLine)
+ self.lineThing("-hello\n", RemoveLine)
+
+ def testMalformedLine(self):
+ """Parse invalid valid hunk lines"""
+ self.makeMalformedLine("hello\n")
+
+ def testMalformedLineNO_NL(self):
+ """Parse invalid '\ No newline at end of file' in hunk lines"""
+ self.makeMalformedLine(NO_NL)
+
+ def compare_parsed(self, patchtext):
+ lines = patchtext.splitlines(True)
+ patch = parse_patch(lines.__iter__())
+ pstr = str(patch)
+ i = difference_index(patchtext, pstr)
+ if i is not None:
+ print "%i: \"%s\" != \"%s\"" % (i, patchtext[i], pstr[i])
+ self.assertEqual (patchtext, str(patch))
+
+ def testAll(self):
+ """Test parsing a whole patch"""
+ patchtext = self.datafile("patchtext.patch").read()
+ self.compare_parsed(patchtext)
+
+ def test_parse_binary(self):
+ """Test parsing a whole patch"""
+ patches = parse_patches(self.data_lines("binary.patch"))
+ self.assertIs(BinaryPatch, patches[0].__class__)
+ self.assertIs(Patch, patches[1].__class__)
+ self.assertContainsRe(patches[0].oldname, '^bar\t')
+ self.assertContainsRe(patches[0].newname, '^qux\t')
+ self.assertContainsRe(str(patches[0]),
+ 'Binary files bar\t.* and qux\t.* differ\n')
+
+ def test_parse_binary_after_normal(self):
+ patches = parse_patches(self.data_lines("binary-after-normal.patch"))
+ self.assertIs(BinaryPatch, patches[1].__class__)
+ self.assertIs(Patch, patches[0].__class__)
+ self.assertContainsRe(patches[1].oldname, '^bar\t')
+ self.assertContainsRe(patches[1].newname, '^qux\t')
+ self.assertContainsRe(str(patches[1]),
+ 'Binary files bar\t.* and qux\t.* differ\n')
+
+ def test_roundtrip_binary(self):
+ patchtext = ''.join(self.data_lines("binary.patch"))
+ patches = parse_patches(patchtext.splitlines(True))
+ self.assertEqual(patchtext, ''.join(str(p) for p in patches))
+
+ def testInit(self):
+ """Handle patches missing half the position, range tuple"""
+ patchtext = \
+"""--- orig/__vavg__.cl
++++ mod/__vavg__.cl
+@@ -1 +1,2 @@
+ __qbpsbezng__ = "erfgehpgherqgrkg ra"
++__qbp__ = Na nygreangr Nepu pbzznaqyvar vagresnpr
+"""
+ self.compare_parsed(patchtext)
+
+ def testLineLookup(self):
+ import sys
+ """Make sure we can accurately look up mod line from orig"""
+ patch = parse_patch(self.datafile("diff"))
+ orig = list(self.datafile("orig"))
+ mod = list(self.datafile("mod"))
+ removals = []
+ for i in range(len(orig)):
+ mod_pos = patch.pos_in_mod(i)
+ if mod_pos is None:
+ removals.append(orig[i])
+ continue
+ self.assertEqual(mod[mod_pos], orig[i])
+ rem_iter = removals.__iter__()
+ for hunk in patch.hunks:
+ for line in hunk.lines:
+ if isinstance(line, RemoveLine):
+ next = rem_iter.next()
+ if line.contents != next:
+ sys.stdout.write(" orig:%spatch:%s" % (next,
+ line.contents))
+ self.assertEqual(line.contents, next)
+ self.assertRaises(StopIteration, rem_iter.next)
+
+ def testPatching(self):
+ """Test a few patch files, and make sure they work."""
+ files = [
+ ('diff-2', 'orig-2', 'mod-2'),
+ ('diff-3', 'orig-3', 'mod-3'),
+ ('diff-4', 'orig-4', 'mod-4'),
+ ('diff-5', 'orig-5', 'mod-5'),
+ ('diff-6', 'orig-6', 'mod-6'),
+ ('diff-7', 'orig-7', 'mod-7'),
+ ]
+ for diff, orig, mod in files:
+ patch = self.datafile(diff)
+ orig_lines = list(self.datafile(orig))
+ mod_lines = list(self.datafile(mod))
+
+ patched_file = IterableFile(iter_patched(orig_lines, patch))
+ lines = []
+ count = 0
+ for patch_line in patched_file:
+ self.assertEqual(patch_line, mod_lines[count])
+ count += 1
+ self.assertEqual(count, len(mod_lines))
+
+ def test_iter_patched_binary(self):
+ binary_lines = self.data_lines('binary.patch')
+ e = self.assertRaises(BinaryFiles, iter_patched, [], binary_lines)
+
+
+ def test_iter_patched_from_hunks(self):
+ """Test a few patch files, and make sure they work."""
+ files = [
+ ('diff-2', 'orig-2', 'mod-2'),
+ ('diff-3', 'orig-3', 'mod-3'),
+ ('diff-4', 'orig-4', 'mod-4'),
+ ('diff-5', 'orig-5', 'mod-5'),
+ ('diff-6', 'orig-6', 'mod-6'),
+ ('diff-7', 'orig-7', 'mod-7'),
+ ]
+ for diff, orig, mod in files:
+ parsed = parse_patch(self.datafile(diff))
+ orig_lines = list(self.datafile(orig))
+ mod_lines = list(self.datafile(mod))
+ iter_patched = iter_patched_from_hunks(orig_lines, parsed.hunks)
+ patched_file = IterableFile(iter_patched)
+ lines = []
+ count = 0
+ for patch_line in patched_file:
+ self.assertEqual(patch_line, mod_lines[count])
+ count += 1
+ self.assertEqual(count, len(mod_lines))
+
+ def testFirstLineRenumber(self):
+ """Make sure we handle lines at the beginning of the hunk"""
+ patch = parse_patch(self.datafile("insert_top.patch"))
+ self.assertEqual(patch.pos_in_mod(0), 1)
+
+ def testParsePatches(self):
+ """Make sure file names can be extracted from tricky unified diffs"""
+ patchtext = \
+"""--- orig-7
++++ mod-7
+@@ -1,10 +1,10 @@
+ -- a
+--- b
++++ c
+ xx d
+ xx e
+ ++ f
+-++ g
++-- h
+ xx i
+ xx j
+ -- k
+--- l
++++ m
+--- orig-8
++++ mod-8
+@@ -1 +1 @@
+--- A
++++ B
+@@ -1 +1 @@
+--- C
++++ D
+"""
+ filenames = [('orig-7', 'mod-7'),
+ ('orig-8', 'mod-8')]
+ patches = parse_patches(patchtext.splitlines(True))
+ patch_files = []
+ for patch in patches:
+ patch_files.append((patch.oldname, patch.newname))
+ self.assertEqual(patch_files, filenames)
+
+ def testStatsValues(self):
+ """Test the added, removed and hunks values for stats_values."""
+ patch = parse_patch(self.datafile("diff"))
+ self.assertEqual((299, 407, 48), patch.stats_values())
diff --git a/bzrlib/tests/test_patches_data/binary-after-normal.patch b/bzrlib/tests/test_patches_data/binary-after-normal.patch
new file mode 100644
index 0000000..1e22006
--- /dev/null
+++ b/bzrlib/tests/test_patches_data/binary-after-normal.patch
@@ -0,0 +1,6 @@
+--- baz 2009-10-14 19:49:59 +0000
++++ quxx 2009-10-14 19:51:00 +0000
+@@ -1 +1 @@
+-hello
++goodbye
+Binary files bar 2009-10-14 19:49:59 +0000 and qux 2009-10-14 19:50:35 +0000 differ
diff --git a/bzrlib/tests/test_patches_data/binary.patch b/bzrlib/tests/test_patches_data/binary.patch
new file mode 100644
index 0000000..043d179
--- /dev/null
+++ b/bzrlib/tests/test_patches_data/binary.patch
@@ -0,0 +1,6 @@
+Binary files bar 2009-10-14 19:49:59 +0000 and qux 2009-10-14 19:50:35 +0000 differ
+--- baz 2009-10-14 19:49:59 +0000
++++ quxx 2009-10-14 19:51:00 +0000
+@@ -1 +1 @@
+-hello
++goodbye
diff --git a/bzrlib/tests/test_patches_data/diff b/bzrlib/tests/test_patches_data/diff
new file mode 100644
index 0000000..3ead13c
--- /dev/null
+++ b/bzrlib/tests/test_patches_data/diff
@@ -0,0 +1,1154 @@
+--- orig/commands.py
++++ mod/commands.py
+@@ -19,25 +19,31 @@
+ import arch
+ import arch.util
+ import arch.arch
++
++import pylon.errors
++from pylon.errors import *
++from pylon import errors
++from pylon import util
++from pylon import arch_core
++from pylon import arch_compound
++from pylon import ancillary
++from pylon import misc
++from pylon import paths
++
+ import abacmds
+ import cmdutil
+ import shutil
+ import os
+ import options
+-import paths
+ import time
+ import cmd
+ import readline
+ import re
+ import string
+-import arch_core
+-from errors import *
+-import errors
+ import terminal
+-import ancillary
+-import misc
+ import email
+ import smtplib
++import textwrap
+
+ __docformat__ = "restructuredtext"
+ __doc__ = "Implementation of user (sub) commands"
+@@ -257,7 +263,7 @@
+
+ tree=arch.tree_root()
+ if len(args) == 0:
+- a_spec = cmdutil.comp_revision(tree)
++ a_spec = ancillary.comp_revision(tree)
+ else:
+ a_spec = cmdutil.determine_revision_tree(tree, args[0])
+ cmdutil.ensure_archive_registered(a_spec.archive)
+@@ -284,7 +290,7 @@
+ changeset=options.changeset
+ tmpdir = None
+ else:
+- tmpdir=cmdutil.tmpdir()
++ tmpdir=util.tmpdir()
+ changeset=tmpdir+"/changeset"
+ try:
+ delta=arch.iter_delta(a_spec, b_spec, changeset)
+@@ -304,14 +310,14 @@
+ if status > 1:
+ return
+ if (options.perform_diff):
+- chan = cmdutil.ChangesetMunger(changeset)
++ chan = arch_compound.ChangesetMunger(changeset)
+ chan.read_indices()
+- if isinstance(b_spec, arch.Revision):
+- b_dir = b_spec.library_find()
+- else:
+- b_dir = b_spec
+- a_dir = a_spec.library_find()
+ if options.diffopts is not None:
++ if isinstance(b_spec, arch.Revision):
++ b_dir = b_spec.library_find()
++ else:
++ b_dir = b_spec
++ a_dir = a_spec.library_find()
+ diffopts = options.diffopts.split()
+ cmdutil.show_custom_diffs(chan, diffopts, a_dir, b_dir)
+ else:
+@@ -517,7 +523,7 @@
+ except arch.errors.TreeRootError, e:
+ print e
+ return
+- from_revision=cmdutil.tree_latest(tree)
++ from_revision = arch_compound.tree_latest(tree)
+ if from_revision==to_revision:
+ print "Tree is already up to date with:\n"+str(to_revision)+"."
+ return
+@@ -592,6 +598,9 @@
+
+ if len(args) == 0:
+ args = None
++ if options.version is None:
++ return options, tree.tree_version, args
++
+ revision=cmdutil.determine_revision_arch(tree, options.version)
+ return options, revision.get_version(), args
+
+@@ -601,11 +610,16 @@
+ """
+ tree=arch.tree_root()
+ options, version, files = self.parse_commandline(cmdargs, tree)
++ ancestor = None
+ if options.__dict__.has_key("base") and options.base:
+ base = cmdutil.determine_revision_tree(tree, options.base)
++ ancestor = base
+ else:
+- base = cmdutil.submit_revision(tree)
+-
++ base = ancillary.submit_revision(tree)
++ ancestor = base
++ if ancestor is None:
++ ancestor = arch_compound.tree_latest(tree, version)
++
+ writeversion=version
+ archive=version.archive
+ source=cmdutil.get_mirror_source(archive)
+@@ -625,18 +639,26 @@
+ try:
+ last_revision=tree.iter_logs(version, True).next().revision
+ except StopIteration, e:
+- if cmdutil.prompt("Import from commit"):
+- return do_import(version)
+- else:
+- raise NoVersionLogs(version)
+- if last_revision!=version.iter_revisions(True).next():
++ last_revision = None
++ if ancestor is None:
++ if cmdutil.prompt("Import from commit"):
++ return do_import(version)
++ else:
++ raise NoVersionLogs(version)
++ try:
++ arch_last_revision = version.iter_revisions(True).next()
++ except StopIteration, e:
++ arch_last_revision = None
++
++ if last_revision != arch_last_revision:
++ print "Tree is not up to date with %s" % str(version)
+ if not cmdutil.prompt("Out of date"):
+ raise OutOfDate
+ else:
+ allow_old=True
+
+ try:
+- if not cmdutil.has_changed(version):
++ if not cmdutil.has_changed(ancestor):
+ if not cmdutil.prompt("Empty commit"):
+ raise EmptyCommit
+ except arch.util.ExecProblem, e:
+@@ -645,15 +667,15 @@
+ raise MissingID(e)
+ else:
+ raise
+- log = tree.log_message(create=False)
++ log = tree.log_message(create=False, version=version)
+ if log is None:
+ try:
+ if cmdutil.prompt("Create log"):
+- edit_log(tree)
++ edit_log(tree, version)
+
+ except cmdutil.NoEditorSpecified, e:
+ raise CommandFailed(e)
+- log = tree.log_message(create=False)
++ log = tree.log_message(create=False, version=version)
+ if log is None:
+ raise NoLogMessage
+ if log["Summary"] is None or len(log["Summary"].strip()) == 0:
+@@ -837,23 +859,24 @@
+ if spec is not None:
+ revision = cmdutil.determine_revision_tree(tree, spec)
+ else:
+- revision = cmdutil.comp_revision(tree)
++ revision = ancillary.comp_revision(tree)
+ except cmdutil.CantDetermineRevision, e:
+ raise CommandFailedWrapper(e)
+ munger = None
+
+ if options.file_contents or options.file_perms or options.deletions\
+ or options.additions or options.renames or options.hunk_prompt:
+- munger = cmdutil.MungeOpts()
+- munger.hunk_prompt = options.hunk_prompt
++ munger = arch_compound.MungeOpts()
++ munger.set_hunk_prompt(cmdutil.colorize, cmdutil.user_hunk_confirm,
++ options.hunk_prompt)
+
+ if len(args) > 0 or options.logs or options.pattern_files or \
+ options.control:
+ if munger is None:
+- munger = cmdutil.MungeOpts(True)
++ munger = cmdutil.arch_compound.MungeOpts(True)
+ munger.all_types(True)
+ if len(args) > 0:
+- t_cwd = cmdutil.tree_cwd(tree)
++ t_cwd = arch_compound.tree_cwd(tree)
+ for name in args:
+ if len(t_cwd) > 0:
+ t_cwd += "/"
+@@ -878,7 +901,7 @@
+ if options.pattern_files:
+ munger.add_keep_pattern(options.pattern_files)
+
+- for line in cmdutil.revert(tree, revision, munger,
++ for line in arch_compound.revert(tree, revision, munger,
+ not options.no_output):
+ cmdutil.colorize(line)
+
+@@ -1042,18 +1065,13 @@
+ help_tree_spec()
+ return
+
+-def require_version_exists(version, spec):
+- if not version.exists():
+- raise cmdutil.CantDetermineVersion(spec,
+- "The version %s does not exist." \
+- % version)
+-
+ class Revisions(BaseCommand):
+ """
+ Print a revision name based on a revision specifier
+ """
+ def __init__(self):
+ self.description="Lists revisions"
++ self.cl_revisions = []
+
+ def do_command(self, cmdargs):
+ """
+@@ -1066,224 +1084,68 @@
+ self.tree = arch.tree_root()
+ except arch.errors.TreeRootError:
+ self.tree = None
++ if options.type == "default":
++ options.type = "archive"
+ try:
+- iter = self.get_iterator(options.type, args, options.reverse,
+- options.modified)
++ iter = cmdutil.revision_iterator(self.tree, options.type, args,
++ options.reverse, options.modified,
++ options.shallow)
+ except cmdutil.CantDetermineRevision, e:
+ raise CommandFailedWrapper(e)
+-
++ except cmdutil.CantDetermineVersion, e:
++ raise CommandFailedWrapper(e)
+ if options.skip is not None:
+ iter = cmdutil.iter_skip(iter, int(options.skip))
+
+- for revision in iter:
+- log = None
+- if isinstance(revision, arch.Patchlog):
+- log = revision
+- revision=revision.revision
+- print options.display(revision)
+- if log is None and (options.summary or options.creator or
+- options.date or options.merges):
+- log = revision.patchlog
+- if options.creator:
+- print " %s" % log.creator
+- if options.date:
+- print " %s" % time.strftime('%Y-%m-%d %H:%M:%S %Z', log.date)
+- if options.summary:
+- print " %s" % log.summary
+- if options.merges:
+- showed_title = False
+- for revision in log.merged_patches:
+- if not showed_title:
+- print " Merged:"
+- showed_title = True
+- print " %s" % revision
+-
+- def get_iterator(self, type, args, reverse, modified):
+- if len(args) > 0:
+- spec = args[0]
+- else:
+- spec = None
+- if modified is not None:
+- iter = cmdutil.modified_iter(modified, self.tree)
+- if reverse:
+- return iter
+- else:
+- return cmdutil.iter_reverse(iter)
+- elif type == "archive":
+- if spec is None:
+- if self.tree is None:
+- raise cmdutil.CantDetermineRevision("",
+- "Not in a project tree")
+- version = cmdutil.determine_version_tree(spec, self.tree)
+- else:
+- version = cmdutil.determine_version_arch(spec, self.tree)
+- cmdutil.ensure_archive_registered(version.archive)
+- require_version_exists(version, spec)
+- return version.iter_revisions(reverse)
+- elif type == "cacherevs":
+- if spec is None:
+- if self.tree is None:
+- raise cmdutil.CantDetermineRevision("",
+- "Not in a project tree")
+- version = cmdutil.determine_version_tree(spec, self.tree)
+- else:
+- version = cmdutil.determine_version_arch(spec, self.tree)
+- cmdutil.ensure_archive_registered(version.archive)
+- require_version_exists(version, spec)
+- return cmdutil.iter_cacherevs(version, reverse)
+- elif type == "library":
+- if spec is None:
+- if self.tree is None:
+- raise cmdutil.CantDetermineRevision("",
+- "Not in a project tree")
+- version = cmdutil.determine_version_tree(spec, self.tree)
+- else:
+- version = cmdutil.determine_version_arch(spec, self.tree)
+- return version.iter_library_revisions(reverse)
+- elif type == "logs":
+- if self.tree is None:
+- raise cmdutil.CantDetermineRevision("", "Not in a project tree")
+- return self.tree.iter_logs(cmdutil.determine_version_tree(spec, \
+- self.tree), reverse)
+- elif type == "missing" or type == "skip-present":
+- if self.tree is None:
+- raise cmdutil.CantDetermineRevision("", "Not in a project tree")
+- skip = (type == "skip-present")
+- version = cmdutil.determine_version_tree(spec, self.tree)
+- cmdutil.ensure_archive_registered(version.archive)
+- require_version_exists(version, spec)
+- return cmdutil.iter_missing(self.tree, version, reverse,
+- skip_present=skip)
+-
+- elif type == "present":
+- if self.tree is None:
+- raise cmdutil.CantDetermineRevision("", "Not in a project tree")
+- version = cmdutil.determine_version_tree(spec, self.tree)
+- cmdutil.ensure_archive_registered(version.archive)
+- require_version_exists(version, spec)
+- return cmdutil.iter_present(self.tree, version, reverse)
+-
+- elif type == "new-merges" or type == "direct-merges":
+- if self.tree is None:
+- raise cmdutil.CantDetermineRevision("", "Not in a project tree")
+- version = cmdutil.determine_version_tree(spec, self.tree)
+- cmdutil.ensure_archive_registered(version.archive)
+- require_version_exists(version, spec)
+- iter = cmdutil.iter_new_merges(self.tree, version, reverse)
+- if type == "new-merges":
+- return iter
+- elif type == "direct-merges":
+- return cmdutil.direct_merges(iter)
+-
+- elif type == "missing-from":
+- if self.tree is None:
+- raise cmdutil.CantDetermineRevision("", "Not in a project tree")
+- revision = cmdutil.determine_revision_tree(self.tree, spec)
+- libtree = cmdutil.find_or_make_local_revision(revision)
+- return cmdutil.iter_missing(libtree, self.tree.tree_version,
+- reverse)
+-
+- elif type == "partner-missing":
+- return cmdutil.iter_partner_missing(self.tree, reverse)
+-
+- elif type == "ancestry":
+- revision = cmdutil.determine_revision_tree(self.tree, spec)
+- iter = cmdutil._iter_ancestry(self.tree, revision)
+- if reverse:
+- return iter
+- else:
+- return cmdutil.iter_reverse(iter)
+-
+- elif type == "dependencies" or type == "non-dependencies":
+- nondeps = (type == "non-dependencies")
+- revision = cmdutil.determine_revision_tree(self.tree, spec)
+- anc_iter = cmdutil._iter_ancestry(self.tree, revision)
+- iter_depends = cmdutil.iter_depends(anc_iter, nondeps)
+- if reverse:
+- return iter_depends
+- else:
+- return cmdutil.iter_reverse(iter_depends)
+- elif type == "micro":
+- return cmdutil.iter_micro(self.tree)
+-
+-
++ try:
++ for revision in iter:
++ log = None
++ if isinstance(revision, arch.Patchlog):
++ log = revision
++ revision=revision.revision
++ out = options.display(revision)
++ if out is not None:
++ print out
++ if log is None and (options.summary or options.creator or
++ options.date or options.merges):
++ log = revision.patchlog
++ if options.creator:
++ print " %s" % log.creator
++ if options.date:
++ print " %s" % time.strftime('%Y-%m-%d %H:%M:%S %Z', log.date)
++ if options.summary:
++ print " %s" % log.summary
++ if options.merges:
++ showed_title = False
++ for revision in log.merged_patches:
++ if not showed_title:
++ print " Merged:"
++ showed_title = True
++ print " %s" % revision
++ if len(self.cl_revisions) > 0:
++ print pylon.changelog_for_merge(self.cl_revisions)
++ except pylon.errors.TreeRootNone:
++ raise CommandFailedWrapper(
++ Exception("This option can only be used in a project tree."))
++
++ def changelog_append(self, revision):
++ if isinstance(revision, arch.Revision):
++ revision=arch.Patchlog(revision)
++ self.cl_revisions.append(revision)
++
+ def get_parser(self):
+ """
+ Returns the options parser to use for the "revision" command.
+
+ :rtype: cmdutil.CmdOptionParser
+ """
+- parser=cmdutil.CmdOptionParser("fai revisions [revision]")
++ parser=cmdutil.CmdOptionParser("fai revisions [version/revision]")
+ select = cmdutil.OptionGroup(parser, "Selection options",
+ "Control which revisions are listed. These options"
+ " are mutually exclusive. If more than one is"
+ " specified, the last is used.")
+- select.add_option("", "--archive", action="store_const",
+- const="archive", dest="type", default="archive",
+- help="List all revisions in the archive")
+- select.add_option("", "--cacherevs", action="store_const",
+- const="cacherevs", dest="type",
+- help="List all revisions stored in the archive as "
+- "complete copies")
+- select.add_option("", "--logs", action="store_const",
+- const="logs", dest="type",
+- help="List revisions that have a patchlog in the "
+- "tree")
+- select.add_option("", "--missing", action="store_const",
+- const="missing", dest="type",
+- help="List revisions from the specified version that"
+- " have no patchlog in the tree")
+- select.add_option("", "--skip-present", action="store_const",
+- const="skip-present", dest="type",
+- help="List revisions from the specified version that"
+- " have no patchlogs at all in the tree")
+- select.add_option("", "--present", action="store_const",
+- const="present", dest="type",
+- help="List revisions from the specified version that"
+- " have no patchlog in the tree, but can't be merged")
+- select.add_option("", "--missing-from", action="store_const",
+- const="missing-from", dest="type",
+- help="List revisions from the specified revision "
+- "that have no patchlog for the tree version")
+- select.add_option("", "--partner-missing", action="store_const",
+- const="partner-missing", dest="type",
+- help="List revisions in partner versions that are"
+- " missing")
+- select.add_option("", "--new-merges", action="store_const",
+- const="new-merges", dest="type",
+- help="List revisions that have had patchlogs added"
+- " to the tree since the last commit")
+- select.add_option("", "--direct-merges", action="store_const",
+- const="direct-merges", dest="type",
+- help="List revisions that have been directly added"
+- " to tree since the last commit ")
+- select.add_option("", "--library", action="store_const",
+- const="library", dest="type",
+- help="List revisions in the revision library")
+- select.add_option("", "--ancestry", action="store_const",
+- const="ancestry", dest="type",
+- help="List revisions that are ancestors of the "
+- "current tree version")
+-
+- select.add_option("", "--dependencies", action="store_const",
+- const="dependencies", dest="type",
+- help="List revisions that the given revision "
+- "depends on")
+-
+- select.add_option("", "--non-dependencies", action="store_const",
+- const="non-dependencies", dest="type",
+- help="List revisions that the given revision "
+- "does not depend on")
+-
+- select.add_option("--micro", action="store_const",
+- const="micro", dest="type",
+- help="List partner revisions aimed for this "
+- "micro-branch")
+-
+- select.add_option("", "--modified", dest="modified",
+- help="List tree ancestor revisions that modified a "
+- "given file", metavar="FILE[:LINE]")
+
++ cmdutil.add_revision_iter_options(select)
+ parser.add_option("", "--skip", dest="skip",
+ help="Skip revisions. Positive numbers skip from "
+ "beginning, negative skip from end.",
+@@ -1312,6 +1174,9 @@
+ format.add_option("--cacherev", action="store_const",
+ const=paths.determine_cacherev_path, dest="display",
+ help="Show location of cacherev file")
++ format.add_option("--changelog", action="store_const",
++ const=self.changelog_append, dest="display",
++ help="Show location of cacherev file")
+ parser.add_option_group(format)
+ display = cmdutil.OptionGroup(parser, "Display format options",
+ "These control the display of data")
+@@ -1448,6 +1313,7 @@
+ if os.access(self.history_file, os.R_OK) and \
+ os.path.isfile(self.history_file):
+ readline.read_history_file(self.history_file)
++ self.cwd = os.getcwd()
+
+ def write_history(self):
+ readline.write_history_file(self.history_file)
+@@ -1470,16 +1336,21 @@
+ def set_prompt(self):
+ if self.tree is not None:
+ try:
+- version = " "+self.tree.tree_version.nonarch
++ prompt = pylon.alias_or_version(self.tree.tree_version,
++ self.tree,
++ full=False)
++ if prompt is not None:
++ prompt = " " + prompt
+ except:
+- version = ""
++ prompt = ""
+ else:
+- version = ""
+- self.prompt = "Fai%s> " % version
++ prompt = ""
++ self.prompt = "Fai%s> " % prompt
+
+ def set_title(self, command=None):
+ try:
+- version = self.tree.tree_version.nonarch
++ version = pylon.alias_or_version(self.tree.tree_version, self.tree,
++ full=False)
+ except:
+ version = "[no version]"
+ if command is None:
+@@ -1489,8 +1360,15 @@
+ def do_cd(self, line):
+ if line == "":
+ line = "~"
++ line = os.path.expanduser(line)
++ if os.path.isabs(line):
++ newcwd = line
++ else:
++ newcwd = self.cwd+'/'+line
++ newcwd = os.path.normpath(newcwd)
+ try:
+- os.chdir(os.path.expanduser(line))
++ os.chdir(newcwd)
++ self.cwd = newcwd
+ except Exception, e:
+ print e
+ try:
+@@ -1523,7 +1401,7 @@
+ except cmdutil.CantDetermineRevision, e:
+ print e
+ except Exception, e:
+- print "Unhandled error:\n%s" % cmdutil.exception_str(e)
++ print "Unhandled error:\n%s" % errors.exception_str(e)
+
+ elif suggestions.has_key(args[0]):
+ print suggestions[args[0]]
+@@ -1574,7 +1452,7 @@
+ arg = line.split()[-1]
+ else:
+ arg = ""
+- iter = iter_munged_completions(iter, arg, text)
++ iter = cmdutil.iter_munged_completions(iter, arg, text)
+ except Exception, e:
+ print e
+ return list(iter)
+@@ -1604,10 +1482,11 @@
+ else:
+ arg = ""
+ if arg.startswith("-"):
+- return list(iter_munged_completions(iter, arg, text))
++ return list(cmdutil.iter_munged_completions(iter, arg,
++ text))
+ else:
+- return list(iter_munged_completions(
+- iter_file_completions(arg), arg, text))
++ return list(cmdutil.iter_munged_completions(
++ cmdutil.iter_file_completions(arg), arg, text))
+
+
+ elif cmd == "cd":
+@@ -1615,13 +1494,13 @@
+ arg = args.split()[-1]
+ else:
+ arg = ""
+- iter = iter_dir_completions(arg)
+- iter = iter_munged_completions(iter, arg, text)
++ iter = cmdutil.iter_dir_completions(arg)
++ iter = cmdutil.iter_munged_completions(iter, arg, text)
+ return list(iter)
+ elif len(args)>0:
+ arg = args.split()[-1]
+- return list(iter_munged_completions(iter_file_completions(arg),
+- arg, text))
++ iter = cmdutil.iter_file_completions(arg)
++ return list(cmdutil.iter_munged_completions(iter, arg, text))
+ else:
+ return self.completenames(text, line, begidx, endidx)
+ except Exception, e:
+@@ -1636,44 +1515,8 @@
+ yield entry
+
+
+-def iter_file_completions(arg, only_dirs = False):
+- """Generate an iterator that iterates through filename completions.
+-
+- :param arg: The filename fragment to match
+- :type arg: str
+- :param only_dirs: If true, match only directories
+- :type only_dirs: bool
+- """
+- cwd = os.getcwd()
+- if cwd != "/":
+- extras = [".", ".."]
+- else:
+- extras = []
+- (dir, file) = os.path.split(arg)
+- if dir != "":
+- listingdir = os.path.expanduser(dir)
+- else:
+- listingdir = cwd
+- for file in cmdutil.iter_combine([os.listdir(listingdir), extras]):
+- if dir != "":
+- userfile = dir+'/'+file
+- else:
+- userfile = file
+- if userfile.startswith(arg):
+- if os.path.isdir(listingdir+'/'+file):
+- userfile+='/'
+- yield userfile
+- elif not only_dirs:
+- yield userfile
+-
+-def iter_munged_completions(iter, arg, text):
+- for completion in iter:
+- completion = str(completion)
+- if completion.startswith(arg):
+- yield completion[len(arg)-len(text):]
+-
+ def iter_source_file_completions(tree, arg):
+- treepath = cmdutil.tree_cwd(tree)
++ treepath = arch_compound.tree_cwd(tree)
+ if len(treepath) > 0:
+ dirs = [treepath]
+ else:
+@@ -1701,7 +1544,7 @@
+ :return: An iterator of all matching untagged files
+ :rtype: iterator of str
+ """
+- treepath = cmdutil.tree_cwd(tree)
++ treepath = arch_compound.tree_cwd(tree)
+ if len(treepath) > 0:
+ dirs = [treepath]
+ else:
+@@ -1743,8 +1586,8 @@
+ :param arg: The prefix to match
+ :type arg: str
+ """
+- treepath = cmdutil.tree_cwd(tree)
+- tmpdir = cmdutil.tmpdir()
++ treepath = arch_compound.tree_cwd(tree)
++ tmpdir = util.tmpdir()
+ changeset = tmpdir+"/changeset"
+ completions = []
+ revision = cmdutil.determine_revision_tree(tree)
+@@ -1756,14 +1599,6 @@
+ shutil.rmtree(tmpdir)
+ return completions
+
+-def iter_dir_completions(arg):
+- """Generate an iterator that iterates through directory name completions.
+-
+- :param arg: The directory name fragment to match
+- :type arg: str
+- """
+- return iter_file_completions(arg, True)
+-
+ class Shell(BaseCommand):
+ def __init__(self):
+ self.description = "Runs Fai as a shell"
+@@ -1795,7 +1630,11 @@
+ parser=self.get_parser()
+ (options, args) = parser.parse_args(cmdargs)
+
+- tree = arch.tree_root()
++ try:
++ tree = arch.tree_root()
++ except arch.errors.TreeRootError, e:
++ raise pylon.errors.CommandFailedWrapper(e)
++
+
+ if (len(args) == 0) == (options.untagged == False):
+ raise cmdutil.GetHelp
+@@ -1809,13 +1648,22 @@
+ if options.id_type == "tagline":
+ if method != "tagline":
+ if not cmdutil.prompt("Tagline in other tree"):
+- if method == "explicit":
+- options.id_type == explicit
++ if method == "explicit" or method == "implicit":
++ options.id_type == method
+ else:
+ print "add-id not supported for \"%s\" tagging method"\
+ % method
+ return
+
++ elif options.id_type == "implicit":
++ if method != "implicit":
++ if not cmdutil.prompt("Implicit in other tree"):
++ if method == "explicit" or method == "tagline":
++ options.id_type == method
++ else:
++ print "add-id not supported for \"%s\" tagging method"\
++ % method
++ return
+ elif options.id_type == "explicit":
+ if method != "tagline" and method != explicit:
+ if not prompt("Explicit in other tree"):
+@@ -1824,7 +1672,8 @@
+ return
+
+ if options.id_type == "auto":
+- if method != "tagline" and method != "explicit":
++ if method != "tagline" and method != "explicit" \
++ and method !="implicit":
+ print "add-id not supported for \"%s\" tagging method" % method
+ return
+ else:
+@@ -1852,10 +1701,12 @@
+ previous_files.extend(files)
+ if id_type == "explicit":
+ cmdutil.add_id(files)
+- elif id_type == "tagline":
++ elif id_type == "tagline" or id_type == "implicit":
+ for file in files:
+ try:
+- cmdutil.add_tagline_or_explicit_id(file)
++ implicit = (id_type == "implicit")
++ cmdutil.add_tagline_or_explicit_id(file, False,
++ implicit)
+ except cmdutil.AlreadyTagged:
+ print "\"%s\" already has a tagline." % file
+ except cmdutil.NoCommentSyntax:
+@@ -1888,6 +1739,9 @@
+ parser.add_option("--tagline", action="store_const",
+ const="tagline", dest="id_type",
+ help="Use a tagline id")
++ parser.add_option("--implicit", action="store_const",
++ const="implicit", dest="id_type",
++ help="Use an implicit id (deprecated)")
+ parser.add_option("--untagged", action="store_true",
+ dest="untagged", default=False,
+ help="tag all untagged files")
+@@ -1926,27 +1780,7 @@
+ def get_completer(self, arg, index):
+ if self.tree is None:
+ raise arch.errors.TreeRootError
+- completions = list(ancillary.iter_partners(self.tree,
+- self.tree.tree_version))
+- if len(completions) == 0:
+- completions = list(self.tree.iter_log_versions())
+-
+- aliases = []
+- try:
+- for completion in completions:
+- alias = ancillary.compact_alias(str(completion), self.tree)
+- if alias:
+- aliases.extend(alias)
+-
+- for completion in completions:
+- if completion.archive == self.tree.tree_version.archive:
+- aliases.append(completion.nonarch)
+-
+- except Exception, e:
+- print e
+-
+- completions.extend(aliases)
+- return completions
++ return cmdutil.merge_completions(self.tree, arg, index)
+
+ def do_command(self, cmdargs):
+ """
+@@ -1961,7 +1795,7 @@
+
+ if self.tree is None:
+ raise arch.errors.TreeRootError(os.getcwd())
+- if cmdutil.has_changed(self.tree.tree_version):
++ if cmdutil.has_changed(ancillary.comp_revision(self.tree)):
+ raise UncommittedChanges(self.tree)
+
+ if len(args) > 0:
+@@ -2027,14 +1861,14 @@
+ :type other_revision: `arch.Revision`
+ :return: 0 if the merge was skipped, 1 if it was applied
+ """
+- other_tree = cmdutil.find_or_make_local_revision(other_revision)
++ other_tree = arch_compound.find_or_make_local_revision(other_revision)
+ try:
+ if action == "native-merge":
+- ancestor = cmdutil.merge_ancestor2(self.tree, other_tree,
+- other_revision)
++ ancestor = arch_compound.merge_ancestor2(self.tree, other_tree,
++ other_revision)
+ elif action == "update":
+- ancestor = cmdutil.tree_latest(self.tree,
+- other_revision.version)
++ ancestor = arch_compound.tree_latest(self.tree,
++ other_revision.version)
+ except CantDetermineRevision, e:
+ raise CommandFailedWrapper(e)
+ cmdutil.colorize(arch.Chatter("* Found common ancestor %s" % ancestor))
+@@ -2104,7 +1938,10 @@
+ if self.tree is None:
+ raise arch.errors.TreeRootError
+
+- edit_log(self.tree)
++ try:
++ edit_log(self.tree, self.tree.tree_version)
++ except pylon.errors.NoEditorSpecified, e:
++ raise pylon.errors.CommandFailedWrapper(e)
+
+ def get_parser(self):
+ """
+@@ -2132,7 +1969,7 @@
+ """
+ return
+
+-def edit_log(tree):
++def edit_log(tree, version):
+ """Makes and edits the log for a tree. Does all kinds of fancy things
+ like log templates and merge summaries and log-for-merge
+
+@@ -2141,28 +1978,29 @@
+ """
+ #ensure we have an editor before preparing the log
+ cmdutil.find_editor()
+- log = tree.log_message(create=False)
++ log = tree.log_message(create=False, version=version)
+ log_is_new = False
+ if log is None or cmdutil.prompt("Overwrite log"):
+ if log is not None:
+ os.remove(log.name)
+- log = tree.log_message(create=True)
++ log = tree.log_message(create=True, version=version)
+ log_is_new = True
+ tmplog = log.name
+- template = tree+"/{arch}/=log-template"
+- if not os.path.exists(template):
+- template = os.path.expanduser("~/.arch-params/=log-template")
+- if not os.path.exists(template):
+- template = None
++ template = pylon.log_template_path(tree)
+ if template:
+ shutil.copyfile(template, tmplog)
+-
+- new_merges = list(cmdutil.iter_new_merges(tree,
+- tree.tree_version))
+- log["Summary"] = merge_summary(new_merges, tree.tree_version)
++ comp_version = ancillary.comp_revision(tree).version
++ new_merges = cmdutil.iter_new_merges(tree, comp_version)
++ new_merges = cmdutil.direct_merges(new_merges)
++ log["Summary"] = pylon.merge_summary(new_merges,
++ version)
+ if len(new_merges) > 0:
+ if cmdutil.prompt("Log for merge"):
+- mergestuff = cmdutil.log_for_merge(tree)
++ if cmdutil.prompt("changelog for merge"):
++ mergestuff = "Patches applied:\n"
++ mergestuff += pylon.changelog_for_merge(new_merges)
++ else:
++ mergestuff = cmdutil.log_for_merge(tree, comp_version)
+ log.description += mergestuff
+ log.save()
+ try:
+@@ -2172,29 +2010,6 @@
+ os.remove(log.name)
+ raise
+
+-def merge_summary(new_merges, tree_version):
+- if len(new_merges) == 0:
+- return ""
+- if len(new_merges) == 1:
+- summary = new_merges[0].summary
+- else:
+- summary = "Merge"
+-
+- credits = []
+- for merge in new_merges:
+- if arch.my_id() != merge.creator:
+- name = re.sub("<.*>", "", merge.creator).rstrip(" ");
+- if not name in credits:
+- credits.append(name)
+- else:
+- version = merge.revision.version
+- if version.archive == tree_version.archive:
+- if not version.nonarch in credits:
+- credits.append(version.nonarch)
+- elif not str(version) in credits:
+- credits.append(str(version))
+-
+- return ("%s (%s)") % (summary, ", ".join(credits))
+
+ class MirrorArchive(BaseCommand):
+ """
+@@ -2268,31 +2083,73 @@
+
+ Use "alias" to list available (user and automatic) aliases."""
+
++auto_alias = [
++"acur",
++"The latest revision in the archive of the tree-version. You can specify \
++a different version like so: acur:foo--bar--0 (aliases can be used)",
++"tcur",
++"""(tree current) The latest revision in the tree of the tree-version. \
++You can specify a different version like so: tcur:foo--bar--0 (aliases can be \
++used).""",
++"tprev" ,
++"""(tree previous) The previous revision in the tree of the tree-version. To \
++specify an older revision, use a number, e.g. "tprev:4" """,
++"tanc" ,
++"""(tree ancestor) The ancestor revision of the tree To specify an older \
++revision, use a number, e.g. "tanc:4".""",
++"tdate" ,
++"""(tree date) The latest revision from a given date, e.g. "tdate:July 6".""",
++"tmod" ,
++""" (tree modified) The latest revision to modify a given file, e.g. \
++"tmod:engine.cpp" or "tmod:engine.cpp:16".""",
++"ttag" ,
++"""(tree tag) The revision that was tagged into the current tree revision, \
++according to the tree""",
++"tagcur",
++"""(tag current) The latest revision of the version that the current tree \
++was tagged from.""",
++"mergeanc" ,
++"""The common ancestor of the current tree and the specified revision. \
++Defaults to the first partner-version's latest revision or to tagcur.""",
++]
++
++
++def is_auto_alias(name):
++ """Determine whether a name is an auto alias name
++
++ :param name: the name to check
++ :type name: str
++ :return: True if the name is an auto alias, false if not
++ :rtype: bool
++ """
++ return name in [f for (f, v) in pylon.util.iter_pairs(auto_alias)]
++
++
++def display_def(iter, wrap = 80):
++ """Display a list of definitions
++
++ :param iter: iter of name, definition pairs
++ :type iter: iter of (str, str)
++ :param wrap: The width for text wrapping
++ :type wrap: int
++ """
++ vals = list(iter)
++ maxlen = 0
++ for (key, value) in vals:
++ if len(key) > maxlen:
++ maxlen = len(key)
++ for (key, value) in vals:
++ tw=textwrap.TextWrapper(width=wrap,
++ initial_indent=key.rjust(maxlen)+" : ",
++ subsequent_indent="".rjust(maxlen+3))
++ print tw.fill(value)
++
++
+ def help_aliases(tree):
+- print """Auto-generated aliases
+- acur : The latest revision in the archive of the tree-version. You can specfy
+- a different version like so: acur:foo--bar--0 (aliases can be used)
+- tcur : (tree current) The latest revision in the tree of the tree-version.
+- You can specify a different version like so: tcur:foo--bar--0 (aliases
+- can be used).
+-tprev : (tree previous) The previous revision in the tree of the tree-version.
+- To specify an older revision, use a number, e.g. "tprev:4"
+- tanc : (tree ancestor) The ancestor revision of the tree
+- To specify an older revision, use a number, e.g. "tanc:4"
+-tdate : (tree date) The latest revision from a given date (e.g. "tdate:July 6")
+- tmod : (tree modified) The latest revision to modify a given file
+- (e.g. "tmod:engine.cpp" or "tmod:engine.cpp:16")
+- ttag : (tree tag) The revision that was tagged into the current tree revision,
+- according to the tree.
+-tagcur: (tag current) The latest revision of the version that the current tree
+- was tagged from.
+-mergeanc : The common ancestor of the current tree and the specified revision.
+- Defaults to the first partner-version's latest revision or to tagcur.
+- """
++ print """Auto-generated aliases"""
++ display_def(pylon.util.iter_pairs(auto_alias))
+ print "User aliases"
+- for parts in ancillary.iter_all_alias(tree):
+- print parts[0].rjust(10)+" : "+parts[1]
+-
++ display_def(ancillary.iter_all_alias(tree))
+
+ class Inventory(BaseCommand):
+ """List the status of files in the tree"""
+@@ -2428,6 +2285,11 @@
+ except cmdutil.ForbiddenAliasSyntax, e:
+ raise CommandFailedWrapper(e)
+
++ def no_prefix(self, alias):
++ if alias.startswith("^"):
++ alias = alias[1:]
++ return alias
++
+ def arg_dispatch(self, args, options):
+ """Add, modify, or list aliases, depending on number of arguments
+
+@@ -2438,15 +2300,20 @@
+ if len(args) == 0:
+ help_aliases(self.tree)
+ return
+- elif len(args) == 1:
+- self.print_alias(args[0])
+- elif (len(args)) == 2:
+- self.add(args[0], args[1], options)
+ else:
+- raise cmdutil.GetHelp
++ alias = self.no_prefix(args[0])
++ if len(args) == 1:
++ self.print_alias(alias)
++ elif (len(args)) == 2:
++ self.add(alias, args[1], options)
++ else:
++ raise cmdutil.GetHelp
+
+ def print_alias(self, alias):
+ answer = None
++ if is_auto_alias(alias):
++ raise pylon.errors.IsAutoAlias(alias, "\"%s\" is an auto alias."
++ " Use \"revision\" to expand auto aliases." % alias)
+ for pair in ancillary.iter_all_alias(self.tree):
+ if pair[0] == alias:
+ answer = pair[1]
+@@ -2464,6 +2331,8 @@
+ :type expansion: str
+ :param options: The commandline options
+ """
++ if is_auto_alias(alias):
++ raise IsAutoAlias(alias)
+ newlist = ""
+ written = False
+ new_line = "%s=%s\n" % (alias, cmdutil.expand_alias(expansion,
+@@ -2490,14 +2359,17 @@
+ deleted = False
+ if len(args) != 1:
+ raise cmdutil.GetHelp
++ alias = self.no_prefix(args[0])
++ if is_auto_alias(alias):
++ raise IsAutoAlias(alias)
+ newlist = ""
+ for pair in self.get_iterator(options):
+- if pair[0] != args[0]:
++ if pair[0] != alias:
+ newlist+="%s=%s\n" % (pair[0], pair[1])
+ else:
+ deleted = True
+ if not deleted:
+- raise errors.NoSuchAlias(args[0])
++ raise errors.NoSuchAlias(alias)
+ self.write_aliases(newlist, options)
+
+ def get_alias_file(self, options):
+@@ -2526,7 +2398,7 @@
+ :param options: The commandline options
+ """
+ filename = os.path.expanduser(self.get_alias_file(options))
+- file = cmdutil.NewFileVersion(filename)
++ file = util.NewFileVersion(filename)
+ file.write(newlist)
+ file.commit()
+
+@@ -2588,10 +2460,13 @@
+ :param cmdargs: The commandline arguments
+ :type cmdargs: list of str
+ """
+- cmdutil.find_editor()
+ parser = self.get_parser()
+ (options, args) = parser.parse_args(cmdargs)
+ try:
++ cmdutil.find_editor()
++ except pylon.errors.NoEditorSpecified, e:
++ raise pylon.errors.CommandFailedWrapper(e)
++ try:
+ self.tree=arch.tree_root()
+ except:
+ self.tree=None
+@@ -2655,7 +2530,7 @@
+ target_revision = cmdutil.determine_revision_arch(self.tree,
+ args[0])
+ else:
+- target_revision = cmdutil.tree_latest(self.tree)
++ target_revision = arch_compound.tree_latest(self.tree)
+ if len(args) > 1:
+ merges = [ arch.Patchlog(cmdutil.determine_revision_arch(
+ self.tree, f)) for f in args[1:] ]
+@@ -2711,7 +2586,7 @@
+
+ :param message: The message to send
+ :type message: `email.Message`"""
+- server = smtplib.SMTP()
++ server = smtplib.SMTP("localhost")
+ server.sendmail(message['From'], message['To'], message.as_string())
+ server.quit()
+
+@@ -2763,6 +2638,22 @@
+ 'alias' : Alias,
+ 'request-merge': RequestMerge,
+ }
++
++def my_import(mod_name):
++ module = __import__(mod_name)
++ components = mod_name.split('.')
++ for comp in components[1:]:
++ module = getattr(module, comp)
++ return module
++
++def plugin(mod_name):
++ module = my_import(mod_name)
++ module.add_command(commands)
++
++for file in os.listdir(sys.path[0]+"/command"):
++ if len(file) > 3 and file[-3:] == ".py" and file != "__init__.py":
++ plugin("command."+file[:-3])
++
+ suggestions = {
+ 'apply-delta' : "Try \"apply-changes\".",
+ 'delta' : "To compare two revisions, use \"changes\".",
+@@ -2784,6 +2675,7 @@
+ 'tagline' : "Use add-id. It uses taglines in tagline trees",
+ 'emlog' : "Use elog. It automatically adds log-for-merge text, if any",
+ 'library-revisions' : "Use revisions --library",
+-'file-revert' : "Use revert FILE"
++'file-revert' : "Use revert FILE",
++'join-branch' : "Use replay --logs-only"
+ }
+ # arch-tag: 19d5739d-3708-486c-93ba-deecc3027fc7
diff --git a/bzrlib/tests/test_patches_data/diff-2 b/bzrlib/tests/test_patches_data/diff-2
new file mode 100644
index 0000000..c9e157f
--- /dev/null
+++ b/bzrlib/tests/test_patches_data/diff-2
@@ -0,0 +1,12 @@
+--- patches.py
++++ patches.py
+@@ -391,6 +391,8 @@
+ else:
+ assert isinstance(hunk_line, RemoveLine)
+ line_no += 1
++ for line in orig_lines:
++ yield line
+
+ import unittest
+ import os.path
+
diff --git a/bzrlib/tests/test_patches_data/diff-3 b/bzrlib/tests/test_patches_data/diff-3
new file mode 100644
index 0000000..6087c14
--- /dev/null
+++ b/bzrlib/tests/test_patches_data/diff-3
@@ -0,0 +1,7 @@
+--- orig-3 2005-09-23 16:23:20.000000000 -0500
++++ mod-3 2005-09-23 16:23:38.000000000 -0500
+@@ -1,3 +1,4 @@
++First line change
+ # Copyright (C) 2004, 2005 Aaron Bentley
+ # <aaron.bentley@utoronto.ca>
+ #
diff --git a/bzrlib/tests/test_patches_data/diff-4 b/bzrlib/tests/test_patches_data/diff-4
new file mode 100644
index 0000000..2e27aa5
--- /dev/null
+++ b/bzrlib/tests/test_patches_data/diff-4
@@ -0,0 +1,8 @@
+--- orig-4 2005-09-23 16:24:21.000000000 -0500
++++ mod-4 2005-09-23 16:24:35.000000000 -0500
+@@ -555,4 +555,4 @@
+
+ if __name__ == "__main__":
+ test()
+-# arch-tag: d1541a25-eac5-4de9-a476-08a7cecd5683
++last line change
diff --git a/bzrlib/tests/test_patches_data/diff-5 b/bzrlib/tests/test_patches_data/diff-5
new file mode 100644
index 0000000..d46b6fa
--- /dev/null
+++ b/bzrlib/tests/test_patches_data/diff-5
@@ -0,0 +1,164 @@
+--- orig-5 2005-09-23 16:25:00.000000000 -0500
++++ mod-5 2005-09-23 16:25:21.000000000 -0500
+@@ -60,161 +60,6 @@
+ raise MalformedPatchHeader("No mod line", "")
+ return (orig_name, mod_name)
+
+-def parse_range(textrange):
+- """Parse a patch range, handling the "1" special-case
+-
+- :param textrange: The text to parse
+- :type textrange: str
+- :return: the position and range, as a tuple
+- :rtype: (int, int)
+- """
+- tmp = textrange.split(',')
+- if len(tmp) == 1:
+- pos = tmp[0]
+- range = "1"
+- else:
+- (pos, range) = tmp
+- pos = int(pos)
+- range = int(range)
+- return (pos, range)
+-
+-
+-def hunk_from_header(line):
+- if not line.startswith("@@") or not line.endswith("@@\n") \
+- or not len(line) > 4:
+- raise MalformedHunkHeader("Does not start and end with @@.", line)
+- try:
+- (orig, mod) = line[3:-4].split(" ")
+- except Exception, e:
+- raise MalformedHunkHeader(str(e), line)
+- if not orig.startswith('-') or not mod.startswith('+'):
+- raise MalformedHunkHeader("Positions don't start with + or -.", line)
+- try:
+- (orig_pos, orig_range) = parse_range(orig[1:])
+- (mod_pos, mod_range) = parse_range(mod[1:])
+- except Exception, e:
+- raise MalformedHunkHeader(str(e), line)
+- if mod_range < 0 or orig_range < 0:
+- raise MalformedHunkHeader("Hunk range is negative", line)
+- return Hunk(orig_pos, orig_range, mod_pos, mod_range)
+-
+-
+-class HunkLine:
+- def __init__(self, contents):
+- self.contents = contents
+-
+- def get_str(self, leadchar):
+- if self.contents == "\n" and leadchar == " " and False:
+- return "\n"
+- if not self.contents.endswith('\n'):
+- terminator = '\n' + NO_NL
+- else:
+- terminator = ''
+- return leadchar + self.contents + terminator
+-
+-
+-class ContextLine(HunkLine):
+- def __init__(self, contents):
+- HunkLine.__init__(self, contents)
+-
+- def __str__(self):
+- return self.get_str(" ")
+-
+-
+-class InsertLine(HunkLine):
+- def __init__(self, contents):
+- HunkLine.__init__(self, contents)
+-
+- def __str__(self):
+- return self.get_str("+")
+-
+-
+-class RemoveLine(HunkLine):
+- def __init__(self, contents):
+- HunkLine.__init__(self, contents)
+-
+- def __str__(self):
+- return self.get_str("-")
+-
+-NO_NL = '\\ No newline at end of file\n'
+-__pychecker__="no-returnvalues"
+-
+-def parse_line(line):
+- if line.startswith("\n"):
+- return ContextLine(line)
+- elif line.startswith(" "):
+- return ContextLine(line[1:])
+- elif line.startswith("+"):
+- return InsertLine(line[1:])
+- elif line.startswith("-"):
+- return RemoveLine(line[1:])
+- elif line == NO_NL:
+- return NO_NL
+- else:
+- raise MalformedLine("Unknown line type", line)
+-__pychecker__=""
+-
+-
+-class Hunk:
+- def __init__(self, orig_pos, orig_range, mod_pos, mod_range):
+- self.orig_pos = orig_pos
+- self.orig_range = orig_range
+- self.mod_pos = mod_pos
+- self.mod_range = mod_range
+- self.lines = []
+-
+- def get_header(self):
+- return "@@ -%s +%s @@\n" % (self.range_str(self.orig_pos,
+- self.orig_range),
+- self.range_str(self.mod_pos,
+- self.mod_range))
+-
+- def range_str(self, pos, range):
+- """Return a file range, special-casing for 1-line files.
+-
+- :param pos: The position in the file
+- :type pos: int
+- :range: The range in the file
+- :type range: int
+- :return: a string in the format 1,4 except when range == pos == 1
+- """
+- if range == 1:
+- return "%i" % pos
+- else:
+- return "%i,%i" % (pos, range)
+-
+- def __str__(self):
+- lines = [self.get_header()]
+- for line in self.lines:
+- lines.append(str(line))
+- return "".join(lines)
+-
+- def shift_to_mod(self, pos):
+- if pos < self.orig_pos-1:
+- return 0
+- elif pos > self.orig_pos+self.orig_range:
+- return self.mod_range - self.orig_range
+- else:
+- return self.shift_to_mod_lines(pos)
+-
+- def shift_to_mod_lines(self, pos):
+- assert (pos >= self.orig_pos-1 and pos <= self.orig_pos+self.orig_range)
+- position = self.orig_pos-1
+- shift = 0
+- for line in self.lines:
+- if isinstance(line, InsertLine):
+- shift += 1
+- elif isinstance(line, RemoveLine):
+- if position == pos:
+- return None
+- shift -= 1
+- position += 1
+- elif isinstance(line, ContextLine):
+- position += 1
+- if position > pos:
+- break
+- return shift
+-
+ def iter_hunks(iter_lines):
+ hunk = None
+ for line in iter_lines:
diff --git a/bzrlib/tests/test_patches_data/diff-6 b/bzrlib/tests/test_patches_data/diff-6
new file mode 100644
index 0000000..92d6f19
--- /dev/null
+++ b/bzrlib/tests/test_patches_data/diff-6
@@ -0,0 +1,562 @@
+--- orig-6 2005-09-23 16:27:16.000000000 -0500
++++ mod-6 2005-09-23 16:27:32.000000000 -0500
+@@ -1,558 +1 @@
+-# Copyright (C) 2004, 2005 Aaron Bentley
+-# <aaron.bentley@utoronto.ca>
+-#
+-# This program is free software; you can redistribute it and/or modify
+-# it under the terms of the GNU General Public License as published by
+-# the Free Software Foundation; either version 2 of the License, or
+-# (at your option) any later version.
+-#
+-# This program is distributed in the hope that it will be useful,
+-# but WITHOUT ANY WARRANTY; without even the implied warranty of
+-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+-# GNU General Public License for more details.
+-#
+-# You should have received a copy of the GNU General Public License
+-# along with this program; if not, write to the Free Software
+-# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+-
+-class PatchSyntax(Exception):
+- def __init__(self, msg):
+- Exception.__init__(self, msg)
+-
+-
+-class MalformedPatchHeader(PatchSyntax):
+- def __init__(self, desc, line):
+- self.desc = desc
+- self.line = line
+- msg = "Malformed patch header. %s\n%r" % (self.desc, self.line)
+- PatchSyntax.__init__(self, msg)
+-
+-class MalformedHunkHeader(PatchSyntax):
+- def __init__(self, desc, line):
+- self.desc = desc
+- self.line = line
+- msg = "Malformed hunk header. %s\n%r" % (self.desc, self.line)
+- PatchSyntax.__init__(self, msg)
+-
+-class MalformedLine(PatchSyntax):
+- def __init__(self, desc, line):
+- self.desc = desc
+- self.line = line
+- msg = "Malformed line. %s\n%s" % (self.desc, self.line)
+- PatchSyntax.__init__(self, msg)
+-
+-def get_patch_names(iter_lines):
+- try:
+- line = iter_lines.next()
+- if not line.startswith("--- "):
+- raise MalformedPatchHeader("No orig name", line)
+- else:
+- orig_name = line[4:].rstrip("\n")
+- except StopIteration:
+- raise MalformedPatchHeader("No orig line", "")
+- try:
+- line = iter_lines.next()
+- if not line.startswith("+++ "):
+- raise PatchSyntax("No mod name")
+- else:
+- mod_name = line[4:].rstrip("\n")
+- except StopIteration:
+- raise MalformedPatchHeader("No mod line", "")
+- return (orig_name, mod_name)
+-
+-def parse_range(textrange):
+- """Parse a patch range, handling the "1" special-case
+-
+- :param textrange: The text to parse
+- :type textrange: str
+- :return: the position and range, as a tuple
+- :rtype: (int, int)
+- """
+- tmp = textrange.split(',')
+- if len(tmp) == 1:
+- pos = tmp[0]
+- range = "1"
+- else:
+- (pos, range) = tmp
+- pos = int(pos)
+- range = int(range)
+- return (pos, range)
+-
+-
+-def hunk_from_header(line):
+- if not line.startswith("@@") or not line.endswith("@@\n") \
+- or not len(line) > 4:
+- raise MalformedHunkHeader("Does not start and end with @@.", line)
+- try:
+- (orig, mod) = line[3:-4].split(" ")
+- except Exception, e:
+- raise MalformedHunkHeader(str(e), line)
+- if not orig.startswith('-') or not mod.startswith('+'):
+- raise MalformedHunkHeader("Positions don't start with + or -.", line)
+- try:
+- (orig_pos, orig_range) = parse_range(orig[1:])
+- (mod_pos, mod_range) = parse_range(mod[1:])
+- except Exception, e:
+- raise MalformedHunkHeader(str(e), line)
+- if mod_range < 0 or orig_range < 0:
+- raise MalformedHunkHeader("Hunk range is negative", line)
+- return Hunk(orig_pos, orig_range, mod_pos, mod_range)
+-
+-
+-class HunkLine:
+- def __init__(self, contents):
+- self.contents = contents
+-
+- def get_str(self, leadchar):
+- if self.contents == "\n" and leadchar == " " and False:
+- return "\n"
+- if not self.contents.endswith('\n'):
+- terminator = '\n' + NO_NL
+- else:
+- terminator = ''
+- return leadchar + self.contents + terminator
+-
+-
+-class ContextLine(HunkLine):
+- def __init__(self, contents):
+- HunkLine.__init__(self, contents)
+-
+- def __str__(self):
+- return self.get_str(" ")
+-
+-
+-class InsertLine(HunkLine):
+- def __init__(self, contents):
+- HunkLine.__init__(self, contents)
+-
+- def __str__(self):
+- return self.get_str("+")
+-
+-
+-class RemoveLine(HunkLine):
+- def __init__(self, contents):
+- HunkLine.__init__(self, contents)
+-
+- def __str__(self):
+- return self.get_str("-")
+-
+-NO_NL = '\\ No newline at end of file\n'
+-__pychecker__="no-returnvalues"
+-
+-def parse_line(line):
+- if line.startswith("\n"):
+- return ContextLine(line)
+- elif line.startswith(" "):
+- return ContextLine(line[1:])
+- elif line.startswith("+"):
+- return InsertLine(line[1:])
+- elif line.startswith("-"):
+- return RemoveLine(line[1:])
+- elif line == NO_NL:
+- return NO_NL
+- else:
+- raise MalformedLine("Unknown line type", line)
+-__pychecker__=""
+-
+-
+-class Hunk:
+- def __init__(self, orig_pos, orig_range, mod_pos, mod_range):
+- self.orig_pos = orig_pos
+- self.orig_range = orig_range
+- self.mod_pos = mod_pos
+- self.mod_range = mod_range
+- self.lines = []
+-
+- def get_header(self):
+- return "@@ -%s +%s @@\n" % (self.range_str(self.orig_pos,
+- self.orig_range),
+- self.range_str(self.mod_pos,
+- self.mod_range))
+-
+- def range_str(self, pos, range):
+- """Return a file range, special-casing for 1-line files.
+-
+- :param pos: The position in the file
+- :type pos: int
+- :range: The range in the file
+- :type range: int
+- :return: a string in the format 1,4 except when range == pos == 1
+- """
+- if range == 1:
+- return "%i" % pos
+- else:
+- return "%i,%i" % (pos, range)
+-
+- def __str__(self):
+- lines = [self.get_header()]
+- for line in self.lines:
+- lines.append(str(line))
+- return "".join(lines)
+-
+- def shift_to_mod(self, pos):
+- if pos < self.orig_pos-1:
+- return 0
+- elif pos > self.orig_pos+self.orig_range:
+- return self.mod_range - self.orig_range
+- else:
+- return self.shift_to_mod_lines(pos)
+-
+- def shift_to_mod_lines(self, pos):
+- assert (pos >= self.orig_pos-1 and pos <= self.orig_pos+self.orig_range)
+- position = self.orig_pos-1
+- shift = 0
+- for line in self.lines:
+- if isinstance(line, InsertLine):
+- shift += 1
+- elif isinstance(line, RemoveLine):
+- if position == pos:
+- return None
+- shift -= 1
+- position += 1
+- elif isinstance(line, ContextLine):
+- position += 1
+- if position > pos:
+- break
+- return shift
+-
+-def iter_hunks(iter_lines):
+- hunk = None
+- for line in iter_lines:
+- if line == "\n":
+- if hunk is not None:
+- yield hunk
+- hunk = None
+- continue
+- if hunk is not None:
+- yield hunk
+- hunk = hunk_from_header(line)
+- orig_size = 0
+- mod_size = 0
+- while orig_size < hunk.orig_range or mod_size < hunk.mod_range:
+- hunk_line = parse_line(iter_lines.next())
+- hunk.lines.append(hunk_line)
+- if isinstance(hunk_line, (RemoveLine, ContextLine)):
+- orig_size += 1
+- if isinstance(hunk_line, (InsertLine, ContextLine)):
+- mod_size += 1
+- if hunk is not None:
+- yield hunk
+-
+-class Patch:
+- def __init__(self, oldname, newname):
+- self.oldname = oldname
+- self.newname = newname
+- self.hunks = []
+-
+- def __str__(self):
+- ret = self.get_header()
+- ret += "".join([str(h) for h in self.hunks])
+- return ret
+-
+- def get_header(self):
+- return "--- %s\n+++ %s\n" % (self.oldname, self.newname)
+-
+- def stats_str(self):
+- """Return a string of patch statistics"""
+- removes = 0
+- inserts = 0
+- for hunk in self.hunks:
+- for line in hunk.lines:
+- if isinstance(line, InsertLine):
+- inserts+=1;
+- elif isinstance(line, RemoveLine):
+- removes+=1;
+- return "%i inserts, %i removes in %i hunks" % \
+- (inserts, removes, len(self.hunks))
+-
+- def pos_in_mod(self, position):
+- newpos = position
+- for hunk in self.hunks:
+- shift = hunk.shift_to_mod(position)
+- if shift is None:
+- return None
+- newpos += shift
+- return newpos
+-
+- def iter_inserted(self):
+- """Iteraties through inserted lines
+-
+- :return: Pair of line number, line
+- :rtype: iterator of (int, InsertLine)
+- """
+- for hunk in self.hunks:
+- pos = hunk.mod_pos - 1;
+- for line in hunk.lines:
+- if isinstance(line, InsertLine):
+- yield (pos, line)
+- pos += 1
+- if isinstance(line, ContextLine):
+- pos += 1
+-
+-def parse_patch(iter_lines):
+- (orig_name, mod_name) = get_patch_names(iter_lines)
+- patch = Patch(orig_name, mod_name)
+- for hunk in iter_hunks(iter_lines):
+- patch.hunks.append(hunk)
+- return patch
+-
+-
+-def iter_file_patch(iter_lines):
+- saved_lines = []
+- for line in iter_lines:
+- if line.startswith('=== '):
+- continue
+- elif line.startswith('--- '):
+- if len(saved_lines) > 0:
+- yield saved_lines
+- saved_lines = []
+- saved_lines.append(line)
+- if len(saved_lines) > 0:
+- yield saved_lines
+-
+-
+-def iter_lines_handle_nl(iter_lines):
+- """
+- Iterates through lines, ensuring that lines that originally had no
+- terminating \n are produced without one. This transformation may be
+- applied at any point up until hunk line parsing, and is safe to apply
+- repeatedly.
+- """
+- last_line = None
+- for line in iter_lines:
+- if line == NO_NL:
+- assert last_line.endswith('\n')
+- last_line = last_line[:-1]
+- line = None
+- if last_line is not None:
+- yield last_line
+- last_line = line
+- if last_line is not None:
+- yield last_line
+-
+-
+-def parse_patches(iter_lines):
+- iter_lines = iter_lines_handle_nl(iter_lines)
+- return [parse_patch(f.__iter__()) for f in iter_file_patch(iter_lines)]
+-
+-
+-def difference_index(atext, btext):
+- """Find the indext of the first character that differs betweeen two texts
+-
+- :param atext: The first text
+- :type atext: str
+- :param btext: The second text
+- :type str: str
+- :return: The index, or None if there are no differences within the range
+- :rtype: int or NoneType
+- """
+- length = len(atext)
+- if len(btext) < length:
+- length = len(btext)
+- for i in range(length):
+- if atext[i] != btext[i]:
+- return i;
+- return None
+-
+-class PatchConflict(Exception):
+- def __init__(self, line_no, orig_line, patch_line):
+- orig = orig_line.rstrip('\n')
+- patch = str(patch_line).rstrip('\n')
+- msg = 'Text contents mismatch at line %d. Original has "%s",'\
+- ' but patch says it should be "%s"' % (line_no, orig, patch)
+- Exception.__init__(self, msg)
+-
+-
+-def iter_patched(orig_lines, patch_lines):
+- """Iterate through a series of lines with a patch applied.
+- This handles a single file, and does exact, not fuzzy patching.
+- """
+- if orig_lines is not None:
+- orig_lines = orig_lines.__iter__()
+- seen_patch = []
+- patch_lines = iter_lines_handle_nl(patch_lines.__iter__())
+- get_patch_names(patch_lines)
+- line_no = 1
+- for hunk in iter_hunks(patch_lines):
+- while line_no < hunk.orig_pos:
+- orig_line = orig_lines.next()
+- yield orig_line
+- line_no += 1
+- for hunk_line in hunk.lines:
+- seen_patch.append(str(hunk_line))
+- if isinstance(hunk_line, InsertLine):
+- yield hunk_line.contents
+- elif isinstance(hunk_line, (ContextLine, RemoveLine)):
+- orig_line = orig_lines.next()
+- if orig_line != hunk_line.contents:
+- raise PatchConflict(line_no, orig_line, "".join(seen_patch))
+- if isinstance(hunk_line, ContextLine):
+- yield orig_line
+- else:
+- assert isinstance(hunk_line, RemoveLine)
+- line_no += 1
+-
+-import unittest
+-import os.path
+-class PatchesTester(unittest.TestCase):
+- def datafile(self, filename):
+- data_path = os.path.join(os.path.dirname(__file__), "testdata",
+- filename)
+- return file(data_path, "rb")
+-
+- def testValidPatchHeader(self):
+- """Parse a valid patch header"""
+- lines = "--- orig/commands.py\n+++ mod/dommands.py\n".split('\n')
+- (orig, mod) = get_patch_names(lines.__iter__())
+- assert(orig == "orig/commands.py")
+- assert(mod == "mod/dommands.py")
+-
+- def testInvalidPatchHeader(self):
+- """Parse an invalid patch header"""
+- lines = "-- orig/commands.py\n+++ mod/dommands.py".split('\n')
+- self.assertRaises(MalformedPatchHeader, get_patch_names,
+- lines.__iter__())
+-
+- def testValidHunkHeader(self):
+- """Parse a valid hunk header"""
+- header = "@@ -34,11 +50,6 @@\n"
+- hunk = hunk_from_header(header);
+- assert (hunk.orig_pos == 34)
+- assert (hunk.orig_range == 11)
+- assert (hunk.mod_pos == 50)
+- assert (hunk.mod_range == 6)
+- assert (str(hunk) == header)
+-
+- def testValidHunkHeader2(self):
+- """Parse a tricky, valid hunk header"""
+- header = "@@ -1 +0,0 @@\n"
+- hunk = hunk_from_header(header);
+- assert (hunk.orig_pos == 1)
+- assert (hunk.orig_range == 1)
+- assert (hunk.mod_pos == 0)
+- assert (hunk.mod_range == 0)
+- assert (str(hunk) == header)
+-
+- def makeMalformed(self, header):
+- self.assertRaises(MalformedHunkHeader, hunk_from_header, header)
+-
+- def testInvalidHeader(self):
+- """Parse an invalid hunk header"""
+- self.makeMalformed(" -34,11 +50,6 \n")
+- self.makeMalformed("@@ +50,6 -34,11 @@\n")
+- self.makeMalformed("@@ -34,11 +50,6 @@")
+- self.makeMalformed("@@ -34.5,11 +50,6 @@\n")
+- self.makeMalformed("@@-34,11 +50,6@@\n")
+- self.makeMalformed("@@ 34,11 50,6 @@\n")
+- self.makeMalformed("@@ -34,11 @@\n")
+- self.makeMalformed("@@ -34,11 +50,6.5 @@\n")
+- self.makeMalformed("@@ -34,11 +50,-6 @@\n")
+-
+- def lineThing(self,text, type):
+- line = parse_line(text)
+- assert(isinstance(line, type))
+- assert(str(line)==text)
+-
+- def makeMalformedLine(self, text):
+- self.assertRaises(MalformedLine, parse_line, text)
+-
+- def testValidLine(self):
+- """Parse a valid hunk line"""
+- self.lineThing(" hello\n", ContextLine)
+- self.lineThing("+hello\n", InsertLine)
+- self.lineThing("-hello\n", RemoveLine)
+-
+- def testMalformedLine(self):
+- """Parse invalid valid hunk lines"""
+- self.makeMalformedLine("hello\n")
+-
+- def compare_parsed(self, patchtext):
+- lines = patchtext.splitlines(True)
+- patch = parse_patch(lines.__iter__())
+- pstr = str(patch)
+- i = difference_index(patchtext, pstr)
+- if i is not None:
+- print "%i: \"%s\" != \"%s\"" % (i, patchtext[i], pstr[i])
+- self.assertEqual (patchtext, str(patch))
+-
+- def testAll(self):
+- """Test parsing a whole patch"""
+- patchtext = """--- orig/commands.py
+-+++ mod/commands.py
+-@@ -1337,7 +1337,8 @@
+-
+- def set_title(self, command=None):
+- try:
+-- version = self.tree.tree_version.nonarch
+-+ version = pylon.alias_or_version(self.tree.tree_version, self.tree,
+-+ full=False)
+- except:
+- version = "[no version]"
+- if command is None:
+-@@ -1983,7 +1984,11 @@
+- version)
+- if len(new_merges) > 0:
+- if cmdutil.prompt("Log for merge"):
+-- mergestuff = cmdutil.log_for_merge(tree, comp_version)
+-+ if cmdutil.prompt("changelog for merge"):
+-+ mergestuff = "Patches applied:\\n"
+-+ mergestuff += pylon.changelog_for_merge(new_merges)
+-+ else:
+-+ mergestuff = cmdutil.log_for_merge(tree, comp_version)
+- log.description += mergestuff
+- log.save()
+- try:
+-"""
+- self.compare_parsed(patchtext)
+-
+- def testInit(self):
+- """Handle patches missing half the position, range tuple"""
+- patchtext = \
+-"""--- orig/__init__.py
+-+++ mod/__init__.py
+-@@ -1 +1,2 @@
+- __docformat__ = "restructuredtext en"
+-+__doc__ = An alternate Arch commandline interface
+-"""
+- self.compare_parsed(patchtext)
+-
+-
+-
+- def testLineLookup(self):
+- import sys
+- """Make sure we can accurately look up mod line from orig"""
+- patch = parse_patch(self.datafile("diff"))
+- orig = list(self.datafile("orig"))
+- mod = list(self.datafile("mod"))
+- removals = []
+- for i in range(len(orig)):
+- mod_pos = patch.pos_in_mod(i)
+- if mod_pos is None:
+- removals.append(orig[i])
+- continue
+- assert(mod[mod_pos]==orig[i])
+- rem_iter = removals.__iter__()
+- for hunk in patch.hunks:
+- for line in hunk.lines:
+- if isinstance(line, RemoveLine):
+- next = rem_iter.next()
+- if line.contents != next:
+- sys.stdout.write(" orig:%spatch:%s" % (next,
+- line.contents))
+- assert(line.contents == next)
+- self.assertRaises(StopIteration, rem_iter.next)
+-
+- def testFirstLineRenumber(self):
+- """Make sure we handle lines at the beginning of the hunk"""
+- patch = parse_patch(self.datafile("insert_top.patch"))
+- assert (patch.pos_in_mod(0)==1)
+-
+-def test():
+- patchesTestSuite = unittest.makeSuite(PatchesTester,'test')
+- runner = unittest.TextTestRunner(verbosity=0)
+- return runner.run(patchesTestSuite)
+-
+-
+-if __name__ == "__main__":
+- test()
+-# arch-tag: d1541a25-eac5-4de9-a476-08a7cecd5683
++Total contents change
diff --git a/bzrlib/tests/test_patches_data/diff-7 b/bzrlib/tests/test_patches_data/diff-7
new file mode 100644
index 0000000..0205724
--- /dev/null
+++ b/bzrlib/tests/test_patches_data/diff-7
@@ -0,0 +1,7 @@
+--- orig-7 2008-12-02 01:58:43.000000000 +0100
++++ mod-7 2008-12-02 01:58:43.000000000 +0100
+@@ -1 +1 @@
+-No terminating newline
+\ No newline at end of file
++No newline either
+\ No newline at end of file
diff --git a/bzrlib/tests/test_patches_data/insert_top.patch b/bzrlib/tests/test_patches_data/insert_top.patch
new file mode 100644
index 0000000..16d1892
--- /dev/null
+++ b/bzrlib/tests/test_patches_data/insert_top.patch
@@ -0,0 +1,7 @@
+--- orig/pylon/patches.py
++++ mod/pylon/patches.py
+@@ -1,3 +1,4 @@
++#test
+ import util
+ import sys
+ class PatchSyntax(Exception):
diff --git a/bzrlib/tests/test_patches_data/mod b/bzrlib/tests/test_patches_data/mod
new file mode 100644
index 0000000..103fe99
--- /dev/null
+++ b/bzrlib/tests/test_patches_data/mod
@@ -0,0 +1,2681 @@
+# Copyright (C) 2004 Aaron Bentley
+# <aaron.bentley@utoronto.ca>
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+import sys
+import arch
+import arch.util
+import arch.arch
+
+import pylon.errors
+from pylon.errors import *
+from pylon import errors
+from pylon import util
+from pylon import arch_core
+from pylon import arch_compound
+from pylon import ancillary
+from pylon import misc
+from pylon import paths
+
+import abacmds
+import cmdutil
+import shutil
+import os
+import options
+import time
+import cmd
+import readline
+import re
+import string
+import terminal
+import email
+import smtplib
+import textwrap
+
+__docformat__ = "restructuredtext"
+__doc__ = "Implementation of user (sub) commands"
+commands = {}
+
+def find_command(cmd):
+ """
+ Return an instance of a command type. Return None if the type isn't
+ registered.
+
+ :param cmd: the name of the command to look for
+ :type cmd: the type of the command
+ """
+ if commands.has_key(cmd):
+ return commands[cmd]()
+ else:
+ return None
+
+class BaseCommand:
+ def __call__(self, cmdline):
+ try:
+ self.do_command(cmdline.split())
+ except cmdutil.GetHelp, e:
+ self.help()
+ except Exception, e:
+ print e
+
+ def get_completer(index):
+ return None
+
+ def complete(self, args, text):
+ """
+ Returns a list of possible completions for the given text.
+
+ :param args: The complete list of arguments
+ :type args: List of str
+ :param text: text to complete (may be shorter than args[-1])
+ :type text: str
+ :rtype: list of str
+ """
+ matches = []
+ candidates = None
+
+ if len(args) > 0:
+ realtext = args[-1]
+ else:
+ realtext = ""
+
+ try:
+ parser=self.get_parser()
+ if realtext.startswith('-'):
+ candidates = parser.iter_options()
+ else:
+ (options, parsed_args) = parser.parse_args(args)
+
+ if len (parsed_args) > 0:
+ candidates = self.get_completer(parsed_args[-1], len(parsed_args) -1)
+ else:
+ candidates = self.get_completer("", 0)
+ except:
+ pass
+ if candidates is None:
+ return
+ for candidate in candidates:
+ candidate = str(candidate)
+ if candidate.startswith(realtext):
+ matches.append(candidate[len(realtext)- len(text):])
+ return matches
+
+
+class Help(BaseCommand):
+ """
+ Lists commands, prints help messages.
+ """
+ def __init__(self):
+ self.description="Prints help mesages"
+ self.parser = None
+
+ def do_command(self, cmdargs):
+ """
+ Prints a help message.
+ """
+ options, args = self.get_parser().parse_args(cmdargs)
+ if len(args) > 1:
+ raise cmdutil.GetHelp
+
+ if options.native or options.suggestions or options.external:
+ native = options.native
+ suggestions = options.suggestions
+ external = options.external
+ else:
+ native = True
+ suggestions = False
+ external = True
+
+ if len(args) == 0:
+ self.list_commands(native, suggestions, external)
+ return
+ elif len(args) == 1:
+ command_help(args[0])
+ return
+
+ def help(self):
+ self.get_parser().print_help()
+ print """
+If no command is specified, commands are listed. If a command is
+specified, help for that command is listed.
+ """
+
+ def get_parser(self):
+ """
+ Returns the options parser to use for the "revision" command.
+
+ :rtype: cmdutil.CmdOptionParser
+ """
+ if self.parser is not None:
+ return self.parser
+ parser=cmdutil.CmdOptionParser("fai help [command]")
+ parser.add_option("-n", "--native", action="store_true",
+ dest="native", help="Show native commands")
+ parser.add_option("-e", "--external", action="store_true",
+ dest="external", help="Show external commands")
+ parser.add_option("-s", "--suggest", action="store_true",
+ dest="suggestions", help="Show suggestions")
+ self.parser = parser
+ return parser
+
+ def list_commands(self, native=True, suggest=False, external=True):
+ """
+ Lists supported commands.
+
+ :param native: list native, python-based commands
+ :type native: bool
+ :param external: list external aba-style commands
+ :type external: bool
+ """
+ if native:
+ print "Native Fai commands"
+ keys=commands.keys()
+ keys.sort()
+ for k in keys:
+ space=""
+ for i in range(28-len(k)):
+ space+=" "
+ print space+k+" : "+commands[k]().description
+ print
+ if suggest:
+ print "Unavailable commands and suggested alternatives"
+ key_list = suggestions.keys()
+ key_list.sort()
+ for key in key_list:
+ print "%28s : %s" % (key, suggestions[key])
+ print
+ if external:
+ fake_aba = abacmds.AbaCmds()
+ if (fake_aba.abadir == ""):
+ return
+ print "External commands"
+ fake_aba.list_commands()
+ print
+ if not suggest:
+ print "Use help --suggest to list alternatives to tla and aba"\
+ " commands."
+ if options.tla_fallthrough and (native or external):
+ print "Fai also supports tla commands."
+
+def command_help(cmd):
+ """
+ Prints help for a command.
+
+ :param cmd: The name of the command to print help for
+ :type cmd: str
+ """
+ fake_aba = abacmds.AbaCmds()
+ cmdobj = find_command(cmd)
+ if cmdobj != None:
+ cmdobj.help()
+ elif suggestions.has_key(cmd):
+ print "Not available\n" + suggestions[cmd]
+ else:
+ abacmd = fake_aba.is_command(cmd)
+ if abacmd:
+ abacmd.help()
+ else:
+ print "No help is available for \""+cmd+"\". Maybe try \"tla "+cmd+" -H\"?"
+
+
+
+class Changes(BaseCommand):
+ """
+ the "changes" command: lists differences between trees/revisions:
+ """
+
+ def __init__(self):
+ self.description="Lists what files have changed in the project tree"
+
+ def get_completer(self, arg, index):
+ if index > 1:
+ return None
+ try:
+ tree = arch.tree_root()
+ except:
+ tree = None
+ return cmdutil.iter_revision_completions(arg, tree)
+
+ def parse_commandline(self, cmdline):
+ """
+ Parse commandline arguments. Raises cmdutil.GetHelp if help is needed.
+
+ :param cmdline: A list of arguments to parse
+ :rtype: (options, Revision, Revision/WorkingTree)
+ """
+ parser=self.get_parser()
+ (options, args) = parser.parse_args(cmdline)
+ if len(args) > 2:
+ raise cmdutil.GetHelp
+
+ tree=arch.tree_root()
+ if len(args) == 0:
+ a_spec = ancillary.comp_revision(tree)
+ else:
+ a_spec = cmdutil.determine_revision_tree(tree, args[0])
+ cmdutil.ensure_archive_registered(a_spec.archive)
+ if len(args) == 2:
+ b_spec = cmdutil.determine_revision_tree(tree, args[1])
+ cmdutil.ensure_archive_registered(b_spec.archive)
+ else:
+ b_spec=tree
+ return options, a_spec, b_spec
+
+ def do_command(self, cmdargs):
+ """
+ Master function that perfoms the "changes" command.
+ """
+ try:
+ options, a_spec, b_spec = self.parse_commandline(cmdargs);
+ except cmdutil.CantDetermineRevision, e:
+ print e
+ return
+ except arch.errors.TreeRootError, e:
+ print e
+ return
+ if options.changeset:
+ changeset=options.changeset
+ tmpdir = None
+ else:
+ tmpdir=util.tmpdir()
+ changeset=tmpdir+"/changeset"
+ try:
+ delta=arch.iter_delta(a_spec, b_spec, changeset)
+ try:
+ for line in delta:
+ if cmdutil.chattermatch(line, "changeset:"):
+ pass
+ else:
+ cmdutil.colorize(line, options.suppress_chatter)
+ except arch.util.ExecProblem, e:
+ if e.proc.error and e.proc.error.startswith(
+ "missing explicit id for file"):
+ raise MissingID(e)
+ else:
+ raise
+ status=delta.status
+ if status > 1:
+ return
+ if (options.perform_diff):
+ chan = arch_compound.ChangesetMunger(changeset)
+ chan.read_indices()
+ if options.diffopts is not None:
+ if isinstance(b_spec, arch.Revision):
+ b_dir = b_spec.library_find()
+ else:
+ b_dir = b_spec
+ a_dir = a_spec.library_find()
+ diffopts = options.diffopts.split()
+ cmdutil.show_custom_diffs(chan, diffopts, a_dir, b_dir)
+ else:
+ cmdutil.show_diffs(delta.changeset)
+ finally:
+ if tmpdir and (os.access(tmpdir, os.X_OK)):
+ shutil.rmtree(tmpdir)
+
+ def get_parser(self):
+ """
+ Returns the options parser to use for the "changes" command.
+
+ :rtype: cmdutil.CmdOptionParser
+ """
+ parser=cmdutil.CmdOptionParser("fai changes [options] [revision]"
+ " [revision]")
+ parser.add_option("-d", "--diff", action="store_true",
+ dest="perform_diff", default=False,
+ help="Show diffs in summary")
+ parser.add_option("-c", "--changeset", dest="changeset",
+ help="Store a changeset in the given directory",
+ metavar="DIRECTORY")
+ parser.add_option("-s", "--silent", action="store_true",
+ dest="suppress_chatter", default=False,
+ help="Suppress chatter messages")
+ parser.add_option("--diffopts", dest="diffopts",
+ help="Use the specified diff options",
+ metavar="OPTIONS")
+
+ return parser
+
+ def help(self, parser=None):
+ """
+ Prints a help message.
+
+ :param parser: If supplied, the parser to use for generating help. If \
+ not supplied, it is retrieved.
+ :type parser: cmdutil.CmdOptionParser
+ """
+ if parser is None:
+ parser=self.get_parser()
+ parser.print_help()
+ print """
+Performs source-tree comparisons
+
+If no revision is specified, the current project tree is compared to the
+last-committed revision. If one revision is specified, the current project
+tree is compared to that revision. If two revisions are specified, they are
+compared to each other.
+ """
+ help_tree_spec()
+ return
+
+
+class ApplyChanges(BaseCommand):
+ """
+ Apply differences between two revisions to a tree
+ """
+
+ def __init__(self):
+ self.description="Applies changes to a project tree"
+
+ def get_completer(self, arg, index):
+ if index > 1:
+ return None
+ try:
+ tree = arch.tree_root()
+ except:
+ tree = None
+ return cmdutil.iter_revision_completions(arg, tree)
+
+ def parse_commandline(self, cmdline, tree):
+ """
+ Parse commandline arguments. Raises cmdutil.GetHelp if help is needed.
+
+ :param cmdline: A list of arguments to parse
+ :rtype: (options, Revision, Revision/WorkingTree)
+ """
+ parser=self.get_parser()
+ (options, args) = parser.parse_args(cmdline)
+ if len(args) != 2:
+ raise cmdutil.GetHelp
+
+ a_spec = cmdutil.determine_revision_tree(tree, args[0])
+ cmdutil.ensure_archive_registered(a_spec.archive)
+ b_spec = cmdutil.determine_revision_tree(tree, args[1])
+ cmdutil.ensure_archive_registered(b_spec.archive)
+ return options, a_spec, b_spec
+
+ def do_command(self, cmdargs):
+ """
+ Master function that performs "apply-changes".
+ """
+ try:
+ tree = arch.tree_root()
+ options, a_spec, b_spec = self.parse_commandline(cmdargs, tree);
+ except cmdutil.CantDetermineRevision, e:
+ print e
+ return
+ except arch.errors.TreeRootError, e:
+ print e
+ return
+ delta=cmdutil.apply_delta(a_spec, b_spec, tree)
+ for line in cmdutil.iter_apply_delta_filter(delta):
+ cmdutil.colorize(line, options.suppress_chatter)
+
+ def get_parser(self):
+ """
+ Returns the options parser to use for the "apply-changes" command.
+
+ :rtype: cmdutil.CmdOptionParser
+ """
+ parser=cmdutil.CmdOptionParser("fai apply-changes [options] revision"
+ " revision")
+ parser.add_option("-d", "--diff", action="store_true",
+ dest="perform_diff", default=False,
+ help="Show diffs in summary")
+ parser.add_option("-c", "--changeset", dest="changeset",
+ help="Store a changeset in the given directory",
+ metavar="DIRECTORY")
+ parser.add_option("-s", "--silent", action="store_true",
+ dest="suppress_chatter", default=False,
+ help="Suppress chatter messages")
+ return parser
+
+ def help(self, parser=None):
+ """
+ Prints a help message.
+
+ :param parser: If supplied, the parser to use for generating help. If \
+ not supplied, it is retrieved.
+ :type parser: cmdutil.CmdOptionParser
+ """
+ if parser is None:
+ parser=self.get_parser()
+ parser.print_help()
+ print """
+Applies changes to a project tree
+
+Compares two revisions and applies the difference between them to the current
+tree.
+ """
+ help_tree_spec()
+ return
+
+class Update(BaseCommand):
+ """
+ Updates a project tree to a given revision, preserving un-committed hanges.
+ """
+
+ def __init__(self):
+ self.description="Apply the latest changes to the current directory"
+
+ def get_completer(self, arg, index):
+ if index > 0:
+ return None
+ try:
+ tree = arch.tree_root()
+ except:
+ tree = None
+ return cmdutil.iter_revision_completions(arg, tree)
+
+ def parse_commandline(self, cmdline, tree):
+ """
+ Parse commandline arguments. Raises cmdutil.GetHelp if help is needed.
+
+ :param cmdline: A list of arguments to parse
+ :rtype: (options, Revision, Revision/WorkingTree)
+ """
+ parser=self.get_parser()
+ (options, args) = parser.parse_args(cmdline)
+ if len(args) > 2:
+ raise cmdutil.GetHelp
+
+ spec=None
+ if len(args)>0:
+ spec=args[0]
+ revision=cmdutil.determine_revision_arch(tree, spec)
+ cmdutil.ensure_archive_registered(revision.archive)
+
+ mirror_source = cmdutil.get_mirror_source(revision.archive)
+ if mirror_source != None:
+ if cmdutil.prompt("Mirror update"):
+ cmd=cmdutil.mirror_archive(mirror_source,
+ revision.archive, arch.NameParser(revision).get_package_version())
+ for line in arch.chatter_classifier(cmd):
+ cmdutil.colorize(line, options.suppress_chatter)
+
+ revision=cmdutil.determine_revision_arch(tree, spec)
+
+ return options, revision
+
+ def do_command(self, cmdargs):
+ """
+ Master function that perfoms the "update" command.
+ """
+ tree=arch.tree_root()
+ try:
+ options, to_revision = self.parse_commandline(cmdargs, tree);
+ except cmdutil.CantDetermineRevision, e:
+ print e
+ return
+ except arch.errors.TreeRootError, e:
+ print e
+ return
+ from_revision = arch_compound.tree_latest(tree)
+ if from_revision==to_revision:
+ print "Tree is already up to date with:\n"+str(to_revision)+"."
+ return
+ cmdutil.ensure_archive_registered(from_revision.archive)
+ cmd=cmdutil.apply_delta(from_revision, to_revision, tree,
+ options.patch_forward)
+ for line in cmdutil.iter_apply_delta_filter(cmd):
+ cmdutil.colorize(line)
+ if to_revision.version != tree.tree_version:
+ if cmdutil.prompt("Update version"):
+ tree.tree_version = to_revision.version
+
+ def get_parser(self):
+ """
+ Returns the options parser to use for the "update" command.
+
+ :rtype: cmdutil.CmdOptionParser
+ """
+ parser=cmdutil.CmdOptionParser("fai update [options]"
+ " [revision/version]")
+ parser.add_option("-f", "--forward", action="store_true",
+ dest="patch_forward", default=False,
+ help="pass the --forward option to 'patch'")
+ parser.add_option("-s", "--silent", action="store_true",
+ dest="suppress_chatter", default=False,
+ help="Suppress chatter messages")
+ return parser
+
+ def help(self, parser=None):
+ """
+ Prints a help message.
+
+ :param parser: If supplied, the parser to use for generating help. If \
+ not supplied, it is retrieved.
+ :type parser: cmdutil.CmdOptionParser
+ """
+ if parser is None:
+ parser=self.get_parser()
+ parser.print_help()
+ print """
+Updates a working tree to the current archive revision
+
+If a revision or version is specified, that is used instead
+ """
+ help_tree_spec()
+ return
+
+
+class Commit(BaseCommand):
+ """
+ Create a revision based on the changes in the current tree.
+ """
+
+ def __init__(self):
+ self.description="Write local changes to the archive"
+
+ def get_completer(self, arg, index):
+ if arg is None:
+ arg = ""
+ return iter_modified_file_completions(arch.tree_root(), arg)
+# return iter_source_file_completions(arch.tree_root(), arg)
+
+ def parse_commandline(self, cmdline, tree):
+ """
+ Parse commandline arguments. Raise cmtutil.GetHelp if help is needed.
+
+ :param cmdline: A list of arguments to parse
+ :rtype: (options, Revision, Revision/WorkingTree)
+ """
+ parser=self.get_parser()
+ (options, args) = parser.parse_args(cmdline)
+
+ if len(args) == 0:
+ args = None
+ if options.version is None:
+ return options, tree.tree_version, args
+
+ revision=cmdutil.determine_revision_arch(tree, options.version)
+ return options, revision.get_version(), args
+
+ def do_command(self, cmdargs):
+ """
+ Master function that perfoms the "commit" command.
+ """
+ tree=arch.tree_root()
+ options, version, files = self.parse_commandline(cmdargs, tree)
+ ancestor = None
+ if options.__dict__.has_key("base") and options.base:
+ base = cmdutil.determine_revision_tree(tree, options.base)
+ ancestor = base
+ else:
+ base = ancillary.submit_revision(tree)
+ ancestor = base
+ if ancestor is None:
+ ancestor = arch_compound.tree_latest(tree, version)
+
+ writeversion=version
+ archive=version.archive
+ source=cmdutil.get_mirror_source(archive)
+ allow_old=False
+ writethrough="implicit"
+
+ if source!=None:
+ if writethrough=="explicit" and \
+ cmdutil.prompt("Writethrough"):
+ writeversion=arch.Version(str(source)+"/"+str(version.get_nonarch()))
+ elif writethrough=="none":
+ raise CommitToMirror(archive)
+
+ elif archive.is_mirror:
+ raise CommitToMirror(archive)
+
+ try:
+ last_revision=tree.iter_logs(version, True).next().revision
+ except StopIteration, e:
+ last_revision = None
+ if ancestor is None:
+ if cmdutil.prompt("Import from commit"):
+ return do_import(version)
+ else:
+ raise NoVersionLogs(version)
+ try:
+ arch_last_revision = version.iter_revisions(True).next()
+ except StopIteration, e:
+ arch_last_revision = None
+
+ if last_revision != arch_last_revision:
+ print "Tree is not up to date with %s" % str(version)
+ if not cmdutil.prompt("Out of date"):
+ raise OutOfDate
+ else:
+ allow_old=True
+
+ try:
+ if not cmdutil.has_changed(ancestor):
+ if not cmdutil.prompt("Empty commit"):
+ raise EmptyCommit
+ except arch.util.ExecProblem, e:
+ if e.proc.error and e.proc.error.startswith(
+ "missing explicit id for file"):
+ raise MissingID(e)
+ else:
+ raise
+ log = tree.log_message(create=False, version=version)
+ if log is None:
+ try:
+ if cmdutil.prompt("Create log"):
+ edit_log(tree, version)
+
+ except cmdutil.NoEditorSpecified, e:
+ raise CommandFailed(e)
+ log = tree.log_message(create=False, version=version)
+ if log is None:
+ raise NoLogMessage
+ if log["Summary"] is None or len(log["Summary"].strip()) == 0:
+ if not cmdutil.prompt("Omit log summary"):
+ raise errors.NoLogSummary
+ try:
+ for line in tree.iter_commit(version, seal=options.seal_version,
+ base=base, out_of_date_ok=allow_old, file_list=files):
+ cmdutil.colorize(line, options.suppress_chatter)
+
+ except arch.util.ExecProblem, e:
+ if e.proc.error and e.proc.error.startswith(
+ "These files violate naming conventions:"):
+ raise LintFailure(e.proc.error)
+ else:
+ raise
+
+ def get_parser(self):
+ """
+ Returns the options parser to use for the "commit" command.
+
+ :rtype: cmdutil.CmdOptionParser
+ """
+
+ parser=cmdutil.CmdOptionParser("fai commit [options] [file1]"
+ " [file2...]")
+ parser.add_option("--seal", action="store_true",
+ dest="seal_version", default=False,
+ help="seal this version")
+ parser.add_option("-v", "--version", dest="version",
+ help="Use the specified version",
+ metavar="VERSION")
+ parser.add_option("-s", "--silent", action="store_true",
+ dest="suppress_chatter", default=False,
+ help="Suppress chatter messages")
+ if cmdutil.supports_switch("commit", "--base"):
+ parser.add_option("--base", dest="base", help="",
+ metavar="REVISION")
+ return parser
+
+ def help(self, parser=None):
+ """
+ Prints a help message.
+
+ :param parser: If supplied, the parser to use for generating help. If \
+ not supplied, it is retrieved.
+ :type parser: cmdutil.CmdOptionParser
+ """
+ if parser is None:
+ parser=self.get_parser()
+ parser.print_help()
+ print """
+Updates a working tree to the current archive revision
+
+If a version is specified, that is used instead
+ """
+# help_tree_spec()
+ return
+
+
+
+class CatLog(BaseCommand):
+ """
+ Print the log of a given file (from current tree)
+ """
+ def __init__(self):
+ self.description="Prints the patch log for a revision"
+
+ def get_completer(self, arg, index):
+ if index > 0:
+ return None
+ try:
+ tree = arch.tree_root()
+ except:
+ tree = None
+ return cmdutil.iter_revision_completions(arg, tree)
+
+ def do_command(self, cmdargs):
+ """
+ Master function that perfoms the "cat-log" command.
+ """
+ parser=self.get_parser()
+ (options, args) = parser.parse_args(cmdargs)
+ try:
+ tree = arch.tree_root()
+ except arch.errors.TreeRootError, e:
+ tree = None
+ spec=None
+ if len(args) > 0:
+ spec=args[0]
+ if len(args) > 1:
+ raise cmdutil.GetHelp()
+ try:
+ if tree:
+ revision = cmdutil.determine_revision_tree(tree, spec)
+ else:
+ revision = cmdutil.determine_revision_arch(tree, spec)
+ except cmdutil.CantDetermineRevision, e:
+ raise CommandFailedWrapper(e)
+ log = None
+
+ use_tree = (options.source == "tree" or \
+ (options.source == "any" and tree))
+ use_arch = (options.source == "archive" or options.source == "any")
+
+ log = None
+ if use_tree:
+ for log in tree.iter_logs(revision.get_version()):
+ if log.revision == revision:
+ break
+ else:
+ log = None
+ if log is None and use_arch:
+ cmdutil.ensure_revision_exists(revision)
+ log = arch.Patchlog(revision)
+ if log is not None:
+ for item in log.items():
+ print "%s: %s" % item
+ print log.description
+
+ def get_parser(self):
+ """
+ Returns the options parser to use for the "cat-log" command.
+
+ :rtype: cmdutil.CmdOptionParser
+ """
+ parser=cmdutil.CmdOptionParser("fai cat-log [revision]")
+ parser.add_option("--archive", action="store_const", dest="source",
+ const="archive", default="any",
+ help="Always get the log from the archive")
+ parser.add_option("--tree", action="store_const", dest="source",
+ const="tree", help="Always get the log from the tree")
+ return parser
+
+ def help(self, parser=None):
+ """
+ Prints a help message.
+
+ :param parser: If supplied, the parser to use for generating help. If \
+ not supplied, it is retrieved.
+ :type parser: cmdutil.CmdOptionParser
+ """
+ if parser==None:
+ parser=self.get_parser()
+ parser.print_help()
+ print """
+Prints the log for the specified revision
+ """
+ help_tree_spec()
+ return
+
+class Revert(BaseCommand):
+ """ Reverts a tree (or aspects of it) to a revision
+ """
+ def __init__(self):
+ self.description="Reverts a tree (or aspects of it) to a revision "
+
+ def get_completer(self, arg, index):
+ if index > 0:
+ return None
+ try:
+ tree = arch.tree_root()
+ except:
+ tree = None
+ return iter_modified_file_completions(tree, arg)
+
+ def do_command(self, cmdargs):
+ """
+ Master function that perfoms the "revert" command.
+ """
+ parser=self.get_parser()
+ (options, args) = parser.parse_args(cmdargs)
+ try:
+ tree = arch.tree_root()
+ except arch.errors.TreeRootError, e:
+ raise CommandFailed(e)
+ spec=None
+ if options.revision is not None:
+ spec=options.revision
+ try:
+ if spec is not None:
+ revision = cmdutil.determine_revision_tree(tree, spec)
+ else:
+ revision = ancillary.comp_revision(tree)
+ except cmdutil.CantDetermineRevision, e:
+ raise CommandFailedWrapper(e)
+ munger = None
+
+ if options.file_contents or options.file_perms or options.deletions\
+ or options.additions or options.renames or options.hunk_prompt:
+ munger = arch_compound.MungeOpts()
+ munger.set_hunk_prompt(cmdutil.colorize, cmdutil.user_hunk_confirm,
+ options.hunk_prompt)
+
+ if len(args) > 0 or options.logs or options.pattern_files or \
+ options.control:
+ if munger is None:
+ munger = cmdutil.arch_compound.MungeOpts(True)
+ munger.all_types(True)
+ if len(args) > 0:
+ t_cwd = arch_compound.tree_cwd(tree)
+ for name in args:
+ if len(t_cwd) > 0:
+ t_cwd += "/"
+ name = "./" + t_cwd + name
+ munger.add_keep_file(name);
+
+ if options.file_perms:
+ munger.file_perms = True
+ if options.file_contents:
+ munger.file_contents = True
+ if options.deletions:
+ munger.deletions = True
+ if options.additions:
+ munger.additions = True
+ if options.renames:
+ munger.renames = True
+ if options.logs:
+ munger.add_keep_pattern('^\./\{arch\}/[^=].*')
+ if options.control:
+ munger.add_keep_pattern("/\.arch-ids|^\./\{arch\}|"\
+ "/\.arch-inventory$")
+ if options.pattern_files:
+ munger.add_keep_pattern(options.pattern_files)
+
+ for line in arch_compound.revert(tree, revision, munger,
+ not options.no_output):
+ cmdutil.colorize(line)
+
+
+ def get_parser(self):
+ """
+ Returns the options parser to use for the "cat-log" command.
+
+ :rtype: cmdutil.CmdOptionParser
+ """
+ parser=cmdutil.CmdOptionParser("fai revert [options] [FILE...]")
+ parser.add_option("", "--contents", action="store_true",
+ dest="file_contents",
+ help="Revert file content changes")
+ parser.add_option("", "--permissions", action="store_true",
+ dest="file_perms",
+ help="Revert file permissions changes")
+ parser.add_option("", "--deletions", action="store_true",
+ dest="deletions",
+ help="Restore deleted files")
+ parser.add_option("", "--additions", action="store_true",
+ dest="additions",
+ help="Remove added files")
+ parser.add_option("", "--renames", action="store_true",
+ dest="renames",
+ help="Revert file names")
+ parser.add_option("--hunks", action="store_true",
+ dest="hunk_prompt", default=False,
+ help="Prompt which hunks to revert")
+ parser.add_option("--pattern-files", dest="pattern_files",
+ help="Revert files that match this pattern",
+ metavar="REGEX")
+ parser.add_option("--logs", action="store_true",
+ dest="logs", default=False,
+ help="Revert only logs")
+ parser.add_option("--control-files", action="store_true",
+ dest="control", default=False,
+ help="Revert logs and other control files")
+ parser.add_option("-n", "--no-output", action="store_true",
+ dest="no_output",
+ help="Don't keep an undo changeset")
+ parser.add_option("--revision", dest="revision",
+ help="Revert to the specified revision",
+ metavar="REVISION")
+ return parser
+
+ def help(self, parser=None):
+ """
+ Prints a help message.
+
+ :param parser: If supplied, the parser to use for generating help. If \
+ not supplied, it is retrieved.
+ :type parser: cmdutil.CmdOptionParser
+ """
+ if parser==None:
+ parser=self.get_parser()
+ parser.print_help()
+ print """
+Reverts changes in the current working tree. If no flags are specified, all
+types of changes are reverted. Otherwise, only selected types of changes are
+reverted.
+
+If a revision is specified on the commandline, differences between the current
+tree and that revision are reverted. If a version is specified, the current
+tree is used to determine the revision.
+
+If files are specified, only those files listed will have any changes applied.
+To specify a renamed file, you can use either the old or new name. (or both!)
+
+Unless "-n" is specified, reversions can be undone with "redo".
+ """
+ return
+
+class Revision(BaseCommand):
+ """
+ Print a revision name based on a revision specifier
+ """
+ def __init__(self):
+ self.description="Prints the name of a revision"
+
+ def get_completer(self, arg, index):
+ if index > 0:
+ return None
+ try:
+ tree = arch.tree_root()
+ except:
+ tree = None
+ return cmdutil.iter_revision_completions(arg, tree)
+
+ def do_command(self, cmdargs):
+ """
+ Master function that perfoms the "revision" command.
+ """
+ parser=self.get_parser()
+ (options, args) = parser.parse_args(cmdargs)
+
+ try:
+ tree = arch.tree_root()
+ except arch.errors.TreeRootError:
+ tree = None
+
+ spec=None
+ if len(args) > 0:
+ spec=args[0]
+ if len(args) > 1:
+ raise cmdutil.GetHelp
+ try:
+ if tree:
+ revision = cmdutil.determine_revision_tree(tree, spec)
+ else:
+ revision = cmdutil.determine_revision_arch(tree, spec)
+ except cmdutil.CantDetermineRevision, e:
+ print str(e)
+ return
+ print options.display(revision)
+
+ def get_parser(self):
+ """
+ Returns the options parser to use for the "revision" command.
+
+ :rtype: cmdutil.CmdOptionParser
+ """
+ parser=cmdutil.CmdOptionParser("fai revision [revision]")
+ parser.add_option("", "--location", action="store_const",
+ const=paths.determine_path, dest="display",
+ help="Show location instead of name", default=str)
+ parser.add_option("--import", action="store_const",
+ const=paths.determine_import_path, dest="display",
+ help="Show location of import file")
+ parser.add_option("--log", action="store_const",
+ const=paths.determine_log_path, dest="display",
+ help="Show location of log file")
+ parser.add_option("--patch", action="store_const",
+ dest="display", const=paths.determine_patch_path,
+ help="Show location of patchfile")
+ parser.add_option("--continuation", action="store_const",
+ const=paths.determine_continuation_path,
+ dest="display",
+ help="Show location of continuation file")
+ parser.add_option("--cacherev", action="store_const",
+ const=paths.determine_cacherev_path, dest="display",
+ help="Show location of cacherev file")
+ return parser
+
+ def help(self, parser=None):
+ """
+ Prints a help message.
+
+ :param parser: If supplied, the parser to use for generating help. If \
+ not supplied, it is retrieved.
+ :type parser: cmdutil.CmdOptionParser
+ """
+ if parser==None:
+ parser=self.get_parser()
+ parser.print_help()
+ print """
+Expands aliases and prints the name of the specified revision. Instead of
+the name, several options can be used to print locations. If more than one is
+specified, the last one is used.
+ """
+ help_tree_spec()
+ return
+
+class Revisions(BaseCommand):
+ """
+ Print a revision name based on a revision specifier
+ """
+ def __init__(self):
+ self.description="Lists revisions"
+ self.cl_revisions = []
+
+ def do_command(self, cmdargs):
+ """
+ Master function that perfoms the "revision" command.
+ """
+ (options, args) = self.get_parser().parse_args(cmdargs)
+ if len(args) > 1:
+ raise cmdutil.GetHelp
+ try:
+ self.tree = arch.tree_root()
+ except arch.errors.TreeRootError:
+ self.tree = None
+ if options.type == "default":
+ options.type = "archive"
+ try:
+ iter = cmdutil.revision_iterator(self.tree, options.type, args,
+ options.reverse, options.modified,
+ options.shallow)
+ except cmdutil.CantDetermineRevision, e:
+ raise CommandFailedWrapper(e)
+ except cmdutil.CantDetermineVersion, e:
+ raise CommandFailedWrapper(e)
+ if options.skip is not None:
+ iter = cmdutil.iter_skip(iter, int(options.skip))
+
+ try:
+ for revision in iter:
+ log = None
+ if isinstance(revision, arch.Patchlog):
+ log = revision
+ revision=revision.revision
+ out = options.display(revision)
+ if out is not None:
+ print out
+ if log is None and (options.summary or options.creator or
+ options.date or options.merges):
+ log = revision.patchlog
+ if options.creator:
+ print " %s" % log.creator
+ if options.date:
+ print " %s" % time.strftime('%Y-%m-%d %H:%M:%S %Z', log.date)
+ if options.summary:
+ print " %s" % log.summary
+ if options.merges:
+ showed_title = False
+ for revision in log.merged_patches:
+ if not showed_title:
+ print " Merged:"
+ showed_title = True
+ print " %s" % revision
+ if len(self.cl_revisions) > 0:
+ print pylon.changelog_for_merge(self.cl_revisions)
+ except pylon.errors.TreeRootNone:
+ raise CommandFailedWrapper(
+ Exception("This option can only be used in a project tree."))
+
+ def changelog_append(self, revision):
+ if isinstance(revision, arch.Revision):
+ revision=arch.Patchlog(revision)
+ self.cl_revisions.append(revision)
+
+ def get_parser(self):
+ """
+ Returns the options parser to use for the "revision" command.
+
+ :rtype: cmdutil.CmdOptionParser
+ """
+ parser=cmdutil.CmdOptionParser("fai revisions [version/revision]")
+ select = cmdutil.OptionGroup(parser, "Selection options",
+ "Control which revisions are listed. These options"
+ " are mutually exclusive. If more than one is"
+ " specified, the last is used.")
+
+ cmdutil.add_revision_iter_options(select)
+ parser.add_option("", "--skip", dest="skip",
+ help="Skip revisions. Positive numbers skip from "
+ "beginning, negative skip from end.",
+ metavar="NUMBER")
+
+ parser.add_option_group(select)
+
+ format = cmdutil.OptionGroup(parser, "Revision format options",
+ "These control the appearance of listed revisions")
+ format.add_option("", "--location", action="store_const",
+ const=paths.determine_path, dest="display",
+ help="Show location instead of name", default=str)
+ format.add_option("--import", action="store_const",
+ const=paths.determine_import_path, dest="display",
+ help="Show location of import file")
+ format.add_option("--log", action="store_const",
+ const=paths.determine_log_path, dest="display",
+ help="Show location of log file")
+ format.add_option("--patch", action="store_const",
+ dest="display", const=paths.determine_patch_path,
+ help="Show location of patchfile")
+ format.add_option("--continuation", action="store_const",
+ const=paths.determine_continuation_path,
+ dest="display",
+ help="Show location of continuation file")
+ format.add_option("--cacherev", action="store_const",
+ const=paths.determine_cacherev_path, dest="display",
+ help="Show location of cacherev file")
+ format.add_option("--changelog", action="store_const",
+ const=self.changelog_append, dest="display",
+ help="Show location of cacherev file")
+ parser.add_option_group(format)
+ display = cmdutil.OptionGroup(parser, "Display format options",
+ "These control the display of data")
+ display.add_option("-r", "--reverse", action="store_true",
+ dest="reverse", help="Sort from newest to oldest")
+ display.add_option("-s", "--summary", action="store_true",
+ dest="summary", help="Show patchlog summary")
+ display.add_option("-D", "--date", action="store_true",
+ dest="date", help="Show patchlog date")
+ display.add_option("-c", "--creator", action="store_true",
+ dest="creator", help="Show the id that committed the"
+ " revision")
+ display.add_option("-m", "--merges", action="store_true",
+ dest="merges", help="Show the revisions that were"
+ " merged")
+ parser.add_option_group(display)
+ return parser
+ def help(self, parser=None):
+ """Attempt to explain the revisions command
+
+ :param parser: If supplied, used to determine options
+ """
+ if parser==None:
+ parser=self.get_parser()
+ parser.print_help()
+ print """List revisions.
+ """
+ help_tree_spec()
+
+
+class Get(BaseCommand):
+ """
+ Retrieve a revision from the archive
+ """
+ def __init__(self):
+ self.description="Retrieve a revision from the archive"
+ self.parser=self.get_parser()
+
+
+ def get_completer(self, arg, index):
+ if index > 0:
+ return None
+ try:
+ tree = arch.tree_root()
+ except:
+ tree = None
+ return cmdutil.iter_revision_completions(arg, tree)
+
+
+ def do_command(self, cmdargs):
+ """
+ Master function that perfoms the "get" command.
+ """
+ (options, args) = self.parser.parse_args(cmdargs)
+ if len(args) < 1:
+ return self.help()
+ try:
+ tree = arch.tree_root()
+ except arch.errors.TreeRootError:
+ tree = None
+
+ arch_loc = None
+ try:
+ revision, arch_loc = paths.full_path_decode(args[0])
+ except Exception, e:
+ revision = cmdutil.determine_revision_arch(tree, args[0],
+ check_existence=False, allow_package=True)
+ if len(args) > 1:
+ directory = args[1]
+ else:
+ directory = str(revision.nonarch)
+ if os.path.exists(directory):
+ raise DirectoryExists(directory)
+ cmdutil.ensure_archive_registered(revision.archive, arch_loc)
+ try:
+ cmdutil.ensure_revision_exists(revision)
+ except cmdutil.NoSuchRevision, e:
+ raise CommandFailedWrapper(e)
+
+ link = cmdutil.prompt ("get link")
+ for line in cmdutil.iter_get(revision, directory, link,
+ options.no_pristine,
+ options.no_greedy_add):
+ cmdutil.colorize(line)
+
+ def get_parser(self):
+ """
+ Returns the options parser to use for the "get" command.
+
+ :rtype: cmdutil.CmdOptionParser
+ """
+ parser=cmdutil.CmdOptionParser("fai get revision [dir]")
+ parser.add_option("--no-pristine", action="store_true",
+ dest="no_pristine",
+ help="Do not make pristine copy for reference")
+ parser.add_option("--no-greedy-add", action="store_true",
+ dest="no_greedy_add",
+ help="Never add to greedy libraries")
+
+ return parser
+
+ def help(self, parser=None):
+ """
+ Prints a help message.
+
+ :param parser: If supplied, the parser to use for generating help. If \
+ not supplied, it is retrieved.
+ :type parser: cmdutil.CmdOptionParser
+ """
+ if parser==None:
+ parser=self.get_parser()
+ parser.print_help()
+ print """
+Expands aliases and constructs a project tree for a revision. If the optional
+"dir" argument is provided, the project tree will be stored in this directory.
+ """
+ help_tree_spec()
+ return
+
+class PromptCmd(cmd.Cmd):
+ def __init__(self):
+ cmd.Cmd.__init__(self)
+ self.prompt = "Fai> "
+ try:
+ self.tree = arch.tree_root()
+ except:
+ self.tree = None
+ self.set_title()
+ self.set_prompt()
+ self.fake_aba = abacmds.AbaCmds()
+ self.identchars += '-'
+ self.history_file = os.path.expanduser("~/.fai-history")
+ readline.set_completer_delims(string.whitespace)
+ if os.access(self.history_file, os.R_OK) and \
+ os.path.isfile(self.history_file):
+ readline.read_history_file(self.history_file)
+ self.cwd = os.getcwd()
+
+ def write_history(self):
+ readline.write_history_file(self.history_file)
+
+ def do_quit(self, args):
+ self.write_history()
+ sys.exit(0)
+
+ def do_exit(self, args):
+ self.do_quit(args)
+
+ def do_EOF(self, args):
+ print
+ self.do_quit(args)
+
+ def postcmd(self, line, bar):
+ self.set_title()
+ self.set_prompt()
+
+ def set_prompt(self):
+ if self.tree is not None:
+ try:
+ prompt = pylon.alias_or_version(self.tree.tree_version,
+ self.tree,
+ full=False)
+ if prompt is not None:
+ prompt = " " + prompt
+ except:
+ prompt = ""
+ else:
+ prompt = ""
+ self.prompt = "Fai%s> " % prompt
+
+ def set_title(self, command=None):
+ try:
+ version = pylon.alias_or_version(self.tree.tree_version, self.tree,
+ full=False)
+ except:
+ version = "[no version]"
+ if command is None:
+ command = ""
+ sys.stdout.write(terminal.term_title("Fai %s %s" % (command, version)))
+
+ def do_cd(self, line):
+ if line == "":
+ line = "~"
+ line = os.path.expanduser(line)
+ if os.path.isabs(line):
+ newcwd = line
+ else:
+ newcwd = self.cwd+'/'+line
+ newcwd = os.path.normpath(newcwd)
+ try:
+ os.chdir(newcwd)
+ self.cwd = newcwd
+ except Exception, e:
+ print e
+ try:
+ self.tree = arch.tree_root()
+ except:
+ self.tree = None
+
+ def do_help(self, line):
+ Help()(line)
+
+ def default(self, line):
+ args = line.split()
+ if find_command(args[0]):
+ try:
+ find_command(args[0]).do_command(args[1:])
+ except cmdutil.BadCommandOption, e:
+ print e
+ except cmdutil.GetHelp, e:
+ find_command(args[0]).help()
+ except CommandFailed, e:
+ print e
+ except arch.errors.ArchiveNotRegistered, e:
+ print e
+ except KeyboardInterrupt, e:
+ print "Interrupted"
+ except arch.util.ExecProblem, e:
+ print e.proc.error.rstrip('\n')
+ except cmdutil.CantDetermineVersion, e:
+ print e
+ except cmdutil.CantDetermineRevision, e:
+ print e
+ except Exception, e:
+ print "Unhandled error:\n%s" % errors.exception_str(e)
+
+ elif suggestions.has_key(args[0]):
+ print suggestions[args[0]]
+
+ elif self.fake_aba.is_command(args[0]):
+ tree = None
+ try:
+ tree = arch.tree_root()
+ except arch.errors.TreeRootError:
+ pass
+ cmd = self.fake_aba.is_command(args[0])
+ try:
+ cmd.run(cmdutil.expand_prefix_alias(args[1:], tree))
+ except KeyboardInterrupt, e:
+ print "Interrupted"
+
+ elif options.tla_fallthrough and args[0] != "rm" and \
+ cmdutil.is_tla_command(args[0]):
+ try:
+ tree = None
+ try:
+ tree = arch.tree_root()
+ except arch.errors.TreeRootError:
+ pass
+ args = cmdutil.expand_prefix_alias(args, tree)
+ arch.util.exec_safe('tla', args, stderr=sys.stderr,
+ expected=(0, 1))
+ except arch.util.ExecProblem, e:
+ pass
+ except KeyboardInterrupt, e:
+ print "Interrupted"
+ else:
+ try:
+ try:
+ tree = arch.tree_root()
+ except arch.errors.TreeRootError:
+ tree = None
+ args=line.split()
+ os.system(" ".join(cmdutil.expand_prefix_alias(args, tree)))
+ except KeyboardInterrupt, e:
+ print "Interrupted"
+
+ def completenames(self, text, line, begidx, endidx):
+ completions = []
+ iter = iter_command_names(self.fake_aba)
+ try:
+ if len(line) > 0:
+ arg = line.split()[-1]
+ else:
+ arg = ""
+ iter = cmdutil.iter_munged_completions(iter, arg, text)
+ except Exception, e:
+ print e
+ return list(iter)
+
+ def completedefault(self, text, line, begidx, endidx):
+ """Perform completion for native commands.
+
+ :param text: The text to complete
+ :type text: str
+ :param line: The entire line to complete
+ :type line: str
+ :param begidx: The start of the text in the line
+ :type begidx: int
+ :param endidx: The end of the text in the line
+ :type endidx: int
+ """
+ try:
+ (cmd, args, foo) = self.parseline(line)
+ command_obj=find_command(cmd)
+ if command_obj is not None:
+ return command_obj.complete(args.split(), text)
+ elif not self.fake_aba.is_command(cmd) and \
+ cmdutil.is_tla_command(cmd):
+ iter = cmdutil.iter_supported_switches(cmd)
+ if len(args) > 0:
+ arg = args.split()[-1]
+ else:
+ arg = ""
+ if arg.startswith("-"):
+ return list(cmdutil.iter_munged_completions(iter, arg,
+ text))
+ else:
+ return list(cmdutil.iter_munged_completions(
+ cmdutil.iter_file_completions(arg), arg, text))
+
+
+ elif cmd == "cd":
+ if len(args) > 0:
+ arg = args.split()[-1]
+ else:
+ arg = ""
+ iter = cmdutil.iter_dir_completions(arg)
+ iter = cmdutil.iter_munged_completions(iter, arg, text)
+ return list(iter)
+ elif len(args)>0:
+ arg = args.split()[-1]
+ iter = cmdutil.iter_file_completions(arg)
+ return list(cmdutil.iter_munged_completions(iter, arg, text))
+ else:
+ return self.completenames(text, line, begidx, endidx)
+ except Exception, e:
+ print e
+
+
+def iter_command_names(fake_aba):
+ for entry in cmdutil.iter_combine([commands.iterkeys(),
+ fake_aba.get_commands(),
+ cmdutil.iter_tla_commands(False)]):
+ if not suggestions.has_key(str(entry)):
+ yield entry
+
+
+def iter_source_file_completions(tree, arg):
+ treepath = arch_compound.tree_cwd(tree)
+ if len(treepath) > 0:
+ dirs = [treepath]
+ else:
+ dirs = None
+ for file in tree.iter_inventory(dirs, source=True, both=True):
+ file = file_completion_match(file, treepath, arg)
+ if file is not None:
+ yield file
+
+
+def iter_untagged(tree, dirs):
+ for file in arch_core.iter_inventory_filter(tree, dirs, tagged=False,
+ categories=arch_core.non_root,
+ control_files=True):
+ yield file.name
+
+
+def iter_untagged_completions(tree, arg):
+ """Generate an iterator for all visible untagged files that match arg.
+
+ :param tree: The tree to look for untagged files in
+ :type tree: `arch.WorkingTree`
+ :param arg: The argument to match
+ :type arg: str
+ :return: An iterator of all matching untagged files
+ :rtype: iterator of str
+ """
+ treepath = arch_compound.tree_cwd(tree)
+ if len(treepath) > 0:
+ dirs = [treepath]
+ else:
+ dirs = None
+
+ for file in iter_untagged(tree, dirs):
+ file = file_completion_match(file, treepath, arg)
+ if file is not None:
+ yield file
+
+
+def file_completion_match(file, treepath, arg):
+ """Determines whether a file within an arch tree matches the argument.
+
+ :param file: The rooted filename
+ :type file: str
+ :param treepath: The path to the cwd within the tree
+ :type treepath: str
+ :param arg: The prefix to match
+ :return: The completion name, or None if not a match
+ :rtype: str
+ """
+ if not file.startswith(treepath):
+ return None
+ if treepath != "":
+ file = file[len(treepath)+1:]
+
+ if not file.startswith(arg):
+ return None
+ if os.path.isdir(file):
+ file += '/'
+ return file
+
+def iter_modified_file_completions(tree, arg):
+ """Returns a list of modified files that match the specified prefix.
+
+ :param tree: The current tree
+ :type tree: `arch.WorkingTree`
+ :param arg: The prefix to match
+ :type arg: str
+ """
+ treepath = arch_compound.tree_cwd(tree)
+ tmpdir = util.tmpdir()
+ changeset = tmpdir+"/changeset"
+ completions = []
+ revision = cmdutil.determine_revision_tree(tree)
+ for line in arch.iter_delta(revision, tree, changeset):
+ if isinstance(line, arch.FileModification):
+ file = file_completion_match(line.name[1:], treepath, arg)
+ if file is not None:
+ completions.append(file)
+ shutil.rmtree(tmpdir)
+ return completions
+
+class Shell(BaseCommand):
+ def __init__(self):
+ self.description = "Runs Fai as a shell"
+
+ def do_command(self, cmdargs):
+ if len(cmdargs)!=0:
+ raise cmdutil.GetHelp
+ prompt = PromptCmd()
+ try:
+ prompt.cmdloop()
+ finally:
+ prompt.write_history()
+
+class AddID(BaseCommand):
+ """
+ Adds an inventory id for the given file
+ """
+ def __init__(self):
+ self.description="Add an inventory id for a given file"
+
+ def get_completer(self, arg, index):
+ tree = arch.tree_root()
+ return iter_untagged_completions(tree, arg)
+
+ def do_command(self, cmdargs):
+ """
+ Master function that perfoms the "revision" command.
+ """
+ parser=self.get_parser()
+ (options, args) = parser.parse_args(cmdargs)
+
+ try:
+ tree = arch.tree_root()
+ except arch.errors.TreeRootError, e:
+ raise pylon.errors.CommandFailedWrapper(e)
+
+
+ if (len(args) == 0) == (options.untagged == False):
+ raise cmdutil.GetHelp
+
+ #if options.id and len(args) != 1:
+ # print "If --id is specified, only one file can be named."
+ # return
+
+ method = tree.tagging_method
+
+ if options.id_type == "tagline":
+ if method != "tagline":
+ if not cmdutil.prompt("Tagline in other tree"):
+ if method == "explicit" or method == "implicit":
+ options.id_type == method
+ else:
+ print "add-id not supported for \"%s\" tagging method"\
+ % method
+ return
+
+ elif options.id_type == "implicit":
+ if method != "implicit":
+ if not cmdutil.prompt("Implicit in other tree"):
+ if method == "explicit" or method == "tagline":
+ options.id_type == method
+ else:
+ print "add-id not supported for \"%s\" tagging method"\
+ % method
+ return
+ elif options.id_type == "explicit":
+ if method != "tagline" and method != explicit:
+ if not prompt("Explicit in other tree"):
+ print "add-id not supported for \"%s\" tagging method" % \
+ method
+ return
+
+ if options.id_type == "auto":
+ if method != "tagline" and method != "explicit" \
+ and method !="implicit":
+ print "add-id not supported for \"%s\" tagging method" % method
+ return
+ else:
+ options.id_type = method
+ if options.untagged:
+ args = None
+ self.add_ids(tree, options.id_type, args)
+
+ def add_ids(self, tree, id_type, files=()):
+ """Add inventory ids to files.
+
+ :param tree: the tree the files are in
+ :type tree: `arch.WorkingTree`
+ :param id_type: the type of id to add: "explicit" or "tagline"
+ :type id_type: str
+ :param files: The list of files to add. If None do all untagged.
+ :type files: tuple of str
+ """
+
+ untagged = (files is None)
+ if untagged:
+ files = list(iter_untagged(tree, None))
+ previous_files = []
+ while len(files) > 0:
+ previous_files.extend(files)
+ if id_type == "explicit":
+ cmdutil.add_id(files)
+ elif id_type == "tagline" or id_type == "implicit":
+ for file in files:
+ try:
+ implicit = (id_type == "implicit")
+ cmdutil.add_tagline_or_explicit_id(file, False,
+ implicit)
+ except cmdutil.AlreadyTagged:
+ print "\"%s\" already has a tagline." % file
+ except cmdutil.NoCommentSyntax:
+ pass
+ #do inventory after tagging until no untagged files are encountered
+ if untagged:
+ files = []
+ for file in iter_untagged(tree, None):
+ if not file in previous_files:
+ files.append(file)
+
+ else:
+ break
+
+ def get_parser(self):
+ """
+ Returns the options parser to use for the "revision" command.
+
+ :rtype: cmdutil.CmdOptionParser
+ """
+ parser=cmdutil.CmdOptionParser("fai add-id file1 [file2] [file3]...")
+# ddaa suggests removing this to promote GUIDs. Let's see who squalks.
+# parser.add_option("-i", "--id", dest="id",
+# help="Specify id for a single file", default=None)
+ parser.add_option("--tltl", action="store_true",
+ dest="lord_style", help="Use Tom Lord's style of id.")
+ parser.add_option("--explicit", action="store_const",
+ const="explicit", dest="id_type",
+ help="Use an explicit id", default="auto")
+ parser.add_option("--tagline", action="store_const",
+ const="tagline", dest="id_type",
+ help="Use a tagline id")
+ parser.add_option("--implicit", action="store_const",
+ const="implicit", dest="id_type",
+ help="Use an implicit id (deprecated)")
+ parser.add_option("--untagged", action="store_true",
+ dest="untagged", default=False,
+ help="tag all untagged files")
+ return parser
+
+ def help(self, parser=None):
+ """
+ Prints a help message.
+
+ :param parser: If supplied, the parser to use for generating help. If \
+ not supplied, it is retrieved.
+ :type parser: cmdutil.CmdOptionParser
+ """
+ if parser==None:
+ parser=self.get_parser()
+ parser.print_help()
+ print """
+Adds an inventory to the specified file(s) and directories. If --untagged is
+specified, adds inventory to all untagged files and directories.
+ """
+ return
+
+
+class Merge(BaseCommand):
+ """
+ Merges changes from other versions into the current tree
+ """
+ def __init__(self):
+ self.description="Merges changes from other versions"
+ try:
+ self.tree = arch.tree_root()
+ except:
+ self.tree = None
+
+
+ def get_completer(self, arg, index):
+ if self.tree is None:
+ raise arch.errors.TreeRootError
+ return cmdutil.merge_completions(self.tree, arg, index)
+
+ def do_command(self, cmdargs):
+ """
+ Master function that perfoms the "merge" command.
+ """
+ parser=self.get_parser()
+ (options, args) = parser.parse_args(cmdargs)
+ if options.diff3:
+ action="star-merge"
+ else:
+ action = options.action
+
+ if self.tree is None:
+ raise arch.errors.TreeRootError(os.getcwd())
+ if cmdutil.has_changed(ancillary.comp_revision(self.tree)):
+ raise UncommittedChanges(self.tree)
+
+ if len(args) > 0:
+ revisions = []
+ for arg in args:
+ revisions.append(cmdutil.determine_revision_arch(self.tree,
+ arg))
+ source = "from commandline"
+ else:
+ revisions = ancillary.iter_partner_revisions(self.tree,
+ self.tree.tree_version)
+ source = "from partner version"
+ revisions = misc.rewind_iterator(revisions)
+ try:
+ revisions.next()
+ revisions.rewind()
+ except StopIteration, e:
+ revision = cmdutil.tag_cur(self.tree)
+ if revision is None:
+ raise CantDetermineRevision("", "No version specified, no "
+ "partner-versions, and no tag"
+ " source")
+ revisions = [revision]
+ source = "from tag source"
+ for revision in revisions:
+ cmdutil.ensure_archive_registered(revision.archive)
+ cmdutil.colorize(arch.Chatter("* Merging %s [%s]" %
+ (revision, source)))
+ if action=="native-merge" or action=="update":
+ if self.native_merge(revision, action) == 0:
+ continue
+ elif action=="star-merge":
+ try:
+ self.star_merge(revision, options.diff3)
+ except errors.MergeProblem, e:
+ break
+ if cmdutil.has_changed(self.tree.tree_version):
+ break
+
+ def star_merge(self, revision, diff3):
+ """Perform a star-merge on the current tree.
+
+ :param revision: The revision to use for the merge
+ :type revision: `arch.Revision`
+ :param diff3: If true, do a diff3 merge
+ :type diff3: bool
+ """
+ try:
+ for line in self.tree.iter_star_merge(revision, diff3=diff3):
+ cmdutil.colorize(line)
+ except arch.util.ExecProblem, e:
+ if e.proc.status is not None and e.proc.status == 1:
+ if e.proc.error:
+ print e.proc.error
+ raise MergeProblem
+ else:
+ raise
+
+ def native_merge(self, other_revision, action):
+ """Perform a native-merge on the current tree.
+
+ :param other_revision: The revision to use for the merge
+ :type other_revision: `arch.Revision`
+ :return: 0 if the merge was skipped, 1 if it was applied
+ """
+ other_tree = arch_compound.find_or_make_local_revision(other_revision)
+ try:
+ if action == "native-merge":
+ ancestor = arch_compound.merge_ancestor2(self.tree, other_tree,
+ other_revision)
+ elif action == "update":
+ ancestor = arch_compound.tree_latest(self.tree,
+ other_revision.version)
+ except CantDetermineRevision, e:
+ raise CommandFailedWrapper(e)
+ cmdutil.colorize(arch.Chatter("* Found common ancestor %s" % ancestor))
+ if (ancestor == other_revision):
+ cmdutil.colorize(arch.Chatter("* Skipping redundant merge"
+ % ancestor))
+ return 0
+ delta = cmdutil.apply_delta(ancestor, other_tree, self.tree)
+ for line in cmdutil.iter_apply_delta_filter(delta):
+ cmdutil.colorize(line)
+ return 1
+
+
+
+ def get_parser(self):
+ """
+ Returns the options parser to use for the "merge" command.
+
+ :rtype: cmdutil.CmdOptionParser
+ """
+ parser=cmdutil.CmdOptionParser("fai merge [VERSION]")
+ parser.add_option("-s", "--star-merge", action="store_const",
+ dest="action", help="Use star-merge",
+ const="star-merge", default="native-merge")
+ parser.add_option("--update", action="store_const",
+ dest="action", help="Use update picker",
+ const="update")
+ parser.add_option("--diff3", action="store_true",
+ dest="diff3",
+ help="Use diff3 for merge (implies star-merge)")
+ return parser
+
+ def help(self, parser=None):
+ """
+ Prints a help message.
+
+ :param parser: If supplied, the parser to use for generating help. If \
+ not supplied, it is retrieved.
+ :type parser: cmdutil.CmdOptionParser
+ """
+ if parser==None:
+ parser=self.get_parser()
+ parser.print_help()
+ print """
+Performs a merge operation using the specified version.
+ """
+ return
+
+class ELog(BaseCommand):
+ """
+ Produces a raw patchlog and invokes the user's editor
+ """
+ def __init__(self):
+ self.description="Edit a patchlog to commit"
+ try:
+ self.tree = arch.tree_root()
+ except:
+ self.tree = None
+
+
+ def do_command(self, cmdargs):
+ """
+ Master function that perfoms the "elog" command.
+ """
+ parser=self.get_parser()
+ (options, args) = parser.parse_args(cmdargs)
+ if self.tree is None:
+ raise arch.errors.TreeRootError
+
+ try:
+ edit_log(self.tree, self.tree.tree_version)
+ except pylon.errors.NoEditorSpecified, e:
+ raise pylon.errors.CommandFailedWrapper(e)
+
+ def get_parser(self):
+ """
+ Returns the options parser to use for the "merge" command.
+
+ :rtype: cmdutil.CmdOptionParser
+ """
+ parser=cmdutil.CmdOptionParser("fai elog")
+ return parser
+
+
+ def help(self, parser=None):
+ """
+ Invokes $EDITOR to produce a log for committing.
+
+ :param parser: If supplied, the parser to use for generating help. If \
+ not supplied, it is retrieved.
+ :type parser: cmdutil.CmdOptionParser
+ """
+ if parser==None:
+ parser=self.get_parser()
+ parser.print_help()
+ print """
+Invokes $EDITOR to produce a log for committing.
+ """
+ return
+
+def edit_log(tree, version):
+ """Makes and edits the log for a tree. Does all kinds of fancy things
+ like log templates and merge summaries and log-for-merge
+
+ :param tree: The tree to edit the log for
+ :type tree: `arch.WorkingTree`
+ """
+ #ensure we have an editor before preparing the log
+ cmdutil.find_editor()
+ log = tree.log_message(create=False, version=version)
+ log_is_new = False
+ if log is None or cmdutil.prompt("Overwrite log"):
+ if log is not None:
+ os.remove(log.name)
+ log = tree.log_message(create=True, version=version)
+ log_is_new = True
+ tmplog = log.name
+ template = pylon.log_template_path(tree)
+ if template:
+ shutil.copyfile(template, tmplog)
+ comp_version = ancillary.comp_revision(tree).version
+ new_merges = cmdutil.iter_new_merges(tree, comp_version)
+ new_merges = cmdutil.direct_merges(new_merges)
+ log["Summary"] = pylon.merge_summary(new_merges,
+ version)
+ if len(new_merges) > 0:
+ if cmdutil.prompt("Log for merge"):
+ if cmdutil.prompt("changelog for merge"):
+ mergestuff = "Patches applied:\n"
+ mergestuff += pylon.changelog_for_merge(new_merges)
+ else:
+ mergestuff = cmdutil.log_for_merge(tree, comp_version)
+ log.description += mergestuff
+ log.save()
+ try:
+ cmdutil.invoke_editor(log.name)
+ except:
+ if log_is_new:
+ os.remove(log.name)
+ raise
+
+
+class MirrorArchive(BaseCommand):
+ """
+ Updates a mirror from an archive
+ """
+ def __init__(self):
+ self.description="Update a mirror from an archive"
+
+ def do_command(self, cmdargs):
+ """
+ Master function that perfoms the "revision" command.
+ """
+
+ parser=self.get_parser()
+ (options, args) = parser.parse_args(cmdargs)
+ if len(args) > 1:
+ raise GetHelp
+ try:
+ tree = arch.tree_root()
+ except:
+ tree = None
+
+ if len(args) == 0:
+ if tree is not None:
+ name = tree.tree_version()
+ else:
+ name = cmdutil.expand_alias(args[0], tree)
+ name = arch.NameParser(name)
+
+ to_arch = name.get_archive()
+ from_arch = cmdutil.get_mirror_source(arch.Archive(to_arch))
+ limit = name.get_nonarch()
+
+ iter = arch_core.mirror_archive(from_arch,to_arch, limit)
+ for line in arch.chatter_classifier(iter):
+ cmdutil.colorize(line)
+
+ def get_parser(self):
+ """
+ Returns the options parser to use for the "revision" command.
+
+ :rtype: cmdutil.CmdOptionParser
+ """
+ parser=cmdutil.CmdOptionParser("fai mirror-archive ARCHIVE")
+ return parser
+
+ def help(self, parser=None):
+ """
+ Prints a help message.
+
+ :param parser: If supplied, the parser to use for generating help. If \
+ not supplied, it is retrieved.
+ :type parser: cmdutil.CmdOptionParser
+ """
+ if parser==None:
+ parser=self.get_parser()
+ parser.print_help()
+ print """
+Updates a mirror from an archive. If a branch, package, or version is
+supplied, only changes under it are mirrored.
+ """
+ return
+
+def help_tree_spec():
+ print """Specifying revisions (default: tree)
+Revisions may be specified by alias, revision, version or patchlevel.
+Revisions or versions may be fully qualified. Unqualified revisions, versions,
+or patchlevels use the archive of the current project tree. Versions will
+use the latest patchlevel in the tree. Patchlevels will use the current tree-
+version.
+
+Use "alias" to list available (user and automatic) aliases."""
+
+auto_alias = [
+"acur",
+"The latest revision in the archive of the tree-version. You can specify \
+a different version like so: acur:foo--bar--0 (aliases can be used)",
+"tcur",
+"""(tree current) The latest revision in the tree of the tree-version. \
+You can specify a different version like so: tcur:foo--bar--0 (aliases can be \
+used).""",
+"tprev" ,
+"""(tree previous) The previous revision in the tree of the tree-version. To \
+specify an older revision, use a number, e.g. "tprev:4" """,
+"tanc" ,
+"""(tree ancestor) The ancestor revision of the tree To specify an older \
+revision, use a number, e.g. "tanc:4".""",
+"tdate" ,
+"""(tree date) The latest revision from a given date, e.g. "tdate:July 6".""",
+"tmod" ,
+""" (tree modified) The latest revision to modify a given file, e.g. \
+"tmod:engine.cpp" or "tmod:engine.cpp:16".""",
+"ttag" ,
+"""(tree tag) The revision that was tagged into the current tree revision, \
+according to the tree""",
+"tagcur",
+"""(tag current) The latest revision of the version that the current tree \
+was tagged from.""",
+"mergeanc" ,
+"""The common ancestor of the current tree and the specified revision. \
+Defaults to the first partner-version's latest revision or to tagcur.""",
+]
+
+
+def is_auto_alias(name):
+ """Determine whether a name is an auto alias name
+
+ :param name: the name to check
+ :type name: str
+ :return: True if the name is an auto alias, false if not
+ :rtype: bool
+ """
+ return name in [f for (f, v) in pylon.util.iter_pairs(auto_alias)]
+
+
+def display_def(iter, wrap = 80):
+ """Display a list of definitions
+
+ :param iter: iter of name, definition pairs
+ :type iter: iter of (str, str)
+ :param wrap: The width for text wrapping
+ :type wrap: int
+ """
+ vals = list(iter)
+ maxlen = 0
+ for (key, value) in vals:
+ if len(key) > maxlen:
+ maxlen = len(key)
+ for (key, value) in vals:
+ tw=textwrap.TextWrapper(width=wrap,
+ initial_indent=key.rjust(maxlen)+" : ",
+ subsequent_indent="".rjust(maxlen+3))
+ print tw.fill(value)
+
+
+def help_aliases(tree):
+ print """Auto-generated aliases"""
+ display_def(pylon.util.iter_pairs(auto_alias))
+ print "User aliases"
+ display_def(ancillary.iter_all_alias(tree))
+
+class Inventory(BaseCommand):
+ """List the status of files in the tree"""
+ def __init__(self):
+ self.description=self.__doc__
+
+ def do_command(self, cmdargs):
+ """
+ Master function that perfoms the "revision" command.
+ """
+
+ parser=self.get_parser()
+ (options, args) = parser.parse_args(cmdargs)
+ tree = arch.tree_root()
+ categories = []
+
+ if (options.source):
+ categories.append(arch_core.SourceFile)
+ if (options.precious):
+ categories.append(arch_core.PreciousFile)
+ if (options.backup):
+ categories.append(arch_core.BackupFile)
+ if (options.junk):
+ categories.append(arch_core.JunkFile)
+
+ if len(categories) == 1:
+ show_leading = False
+ else:
+ show_leading = True
+
+ if len(categories) == 0:
+ categories = None
+
+ if options.untagged:
+ categories = arch_core.non_root
+ show_leading = False
+ tagged = False
+ else:
+ tagged = None
+
+ for file in arch_core.iter_inventory_filter(tree, None,
+ control_files=options.control_files,
+ categories = categories, tagged=tagged):
+ print arch_core.file_line(file,
+ category = show_leading,
+ untagged = show_leading,
+ id = options.ids)
+
+ def get_parser(self):
+ """
+ Returns the options parser to use for the "revision" command.
+
+ :rtype: cmdutil.CmdOptionParser
+ """
+ parser=cmdutil.CmdOptionParser("fai inventory [options]")
+ parser.add_option("--ids", action="store_true", dest="ids",
+ help="Show file ids")
+ parser.add_option("--control", action="store_true",
+ dest="control_files", help="include control files")
+ parser.add_option("--source", action="store_true", dest="source",
+ help="List source files")
+ parser.add_option("--backup", action="store_true", dest="backup",
+ help="List backup files")
+ parser.add_option("--precious", action="store_true", dest="precious",
+ help="List precious files")
+ parser.add_option("--junk", action="store_true", dest="junk",
+ help="List junk files")
+ parser.add_option("--unrecognized", action="store_true",
+ dest="unrecognized", help="List unrecognized files")
+ parser.add_option("--untagged", action="store_true",
+ dest="untagged", help="List only untagged files")
+ return parser
+
+ def help(self, parser=None):
+ """
+ Prints a help message.
+
+ :param parser: If supplied, the parser to use for generating help. If \
+ not supplied, it is retrieved.
+ :type parser: cmdutil.CmdOptionParser
+ """
+ if parser==None:
+ parser=self.get_parser()
+ parser.print_help()
+ print """
+Lists the status of files in the archive:
+S source
+P precious
+B backup
+J junk
+U unrecognized
+T tree root
+? untagged-source
+Leading letter are not displayed if only one kind of file is shown
+ """
+ return
+
+
+class Alias(BaseCommand):
+ """List or adjust aliases"""
+ def __init__(self):
+ self.description=self.__doc__
+
+ def get_completer(self, arg, index):
+ if index > 2:
+ return ()
+ try:
+ self.tree = arch.tree_root()
+ except:
+ self.tree = None
+
+ if index == 0:
+ return [part[0]+" " for part in ancillary.iter_all_alias(self.tree)]
+ elif index == 1:
+ return cmdutil.iter_revision_completions(arg, self.tree)
+
+
+ def do_command(self, cmdargs):
+ """
+ Master function that perfoms the "revision" command.
+ """
+
+ parser=self.get_parser()
+ (options, args) = parser.parse_args(cmdargs)
+ try:
+ self.tree = arch.tree_root()
+ except:
+ self.tree = None
+
+
+ try:
+ options.action(args, options)
+ except cmdutil.ForbiddenAliasSyntax, e:
+ raise CommandFailedWrapper(e)
+
+ def no_prefix(self, alias):
+ if alias.startswith("^"):
+ alias = alias[1:]
+ return alias
+
+ def arg_dispatch(self, args, options):
+ """Add, modify, or list aliases, depending on number of arguments
+
+ :param args: The list of commandline arguments
+ :type args: list of str
+ :param options: The commandline options
+ """
+ if len(args) == 0:
+ help_aliases(self.tree)
+ return
+ else:
+ alias = self.no_prefix(args[0])
+ if len(args) == 1:
+ self.print_alias(alias)
+ elif (len(args)) == 2:
+ self.add(alias, args[1], options)
+ else:
+ raise cmdutil.GetHelp
+
+ def print_alias(self, alias):
+ answer = None
+ if is_auto_alias(alias):
+ raise pylon.errors.IsAutoAlias(alias, "\"%s\" is an auto alias."
+ " Use \"revision\" to expand auto aliases." % alias)
+ for pair in ancillary.iter_all_alias(self.tree):
+ if pair[0] == alias:
+ answer = pair[1]
+ if answer is not None:
+ print answer
+ else:
+ print "The alias %s is not assigned." % alias
+
+ def add(self, alias, expansion, options):
+ """Add or modify aliases
+
+ :param alias: The alias name to create/modify
+ :type alias: str
+ :param expansion: The expansion to assign to the alias name
+ :type expansion: str
+ :param options: The commandline options
+ """
+ if is_auto_alias(alias):
+ raise IsAutoAlias(alias)
+ newlist = ""
+ written = False
+ new_line = "%s=%s\n" % (alias, cmdutil.expand_alias(expansion,
+ self.tree))
+ ancillary.check_alias(new_line.rstrip("\n"), [alias, expansion])
+
+ for pair in self.get_iterator(options):
+ if pair[0] != alias:
+ newlist+="%s=%s\n" % (pair[0], pair[1])
+ elif not written:
+ newlist+=new_line
+ written = True
+ if not written:
+ newlist+=new_line
+ self.write_aliases(newlist, options)
+
+ def delete(self, args, options):
+ """Delete the specified alias
+
+ :param args: The list of arguments
+ :type args: list of str
+ :param options: The commandline options
+ """
+ deleted = False
+ if len(args) != 1:
+ raise cmdutil.GetHelp
+ alias = self.no_prefix(args[0])
+ if is_auto_alias(alias):
+ raise IsAutoAlias(alias)
+ newlist = ""
+ for pair in self.get_iterator(options):
+ if pair[0] != alias:
+ newlist+="%s=%s\n" % (pair[0], pair[1])
+ else:
+ deleted = True
+ if not deleted:
+ raise errors.NoSuchAlias(alias)
+ self.write_aliases(newlist, options)
+
+ def get_alias_file(self, options):
+ """Return the name of the alias file to use
+
+ :param options: The commandline options
+ """
+ if options.tree:
+ if self.tree is None:
+ self.tree == arch.tree_root()
+ return str(self.tree)+"/{arch}/+aliases"
+ else:
+ return "~/.aba/aliases"
+
+ def get_iterator(self, options):
+ """Return the alias iterator to use
+
+ :param options: The commandline options
+ """
+ return ancillary.iter_alias(self.get_alias_file(options))
+
+ def write_aliases(self, newlist, options):
+ """Safely rewrite the alias file
+ :param newlist: The new list of aliases
+ :type newlist: str
+ :param options: The commandline options
+ """
+ filename = os.path.expanduser(self.get_alias_file(options))
+ file = util.NewFileVersion(filename)
+ file.write(newlist)
+ file.commit()
+
+
+ def get_parser(self):
+ """
+ Returns the options parser to use for the "alias" command.
+
+ :rtype: cmdutil.CmdOptionParser
+ """
+ parser=cmdutil.CmdOptionParser("fai alias [ALIAS] [NAME]")
+ parser.add_option("-d", "--delete", action="store_const", dest="action",
+ const=self.delete, default=self.arg_dispatch,
+ help="Delete an alias")
+ parser.add_option("--tree", action="store_true", dest="tree",
+ help="Create a per-tree alias", default=False)
+ return parser
+
+ def help(self, parser=None):
+ """
+ Prints a help message.
+
+ :param parser: If supplied, the parser to use for generating help. If \
+ not supplied, it is retrieved.
+ :type parser: cmdutil.CmdOptionParser
+ """
+ if parser==None:
+ parser=self.get_parser()
+ parser.print_help()
+ print """
+Lists current aliases or modifies the list of aliases.
+
+If no arguments are supplied, aliases will be listed. If two arguments are
+supplied, the specified alias will be created or modified. If -d or --delete
+is supplied, the specified alias will be deleted.
+
+You can create aliases that refer to any fully-qualified part of the
+Arch namespace, e.g.
+archive,
+archive/category,
+archive/category--branch,
+archive/category--branch--version (my favourite)
+archive/category--branch--version--patchlevel
+
+Aliases can be used automatically by native commands. To use them
+with external or tla commands, prefix them with ^ (you can do this
+with native commands, too).
+"""
+
+
+class RequestMerge(BaseCommand):
+ """Submit a merge request to Bug Goo"""
+ def __init__(self):
+ self.description=self.__doc__
+
+ def do_command(self, cmdargs):
+ """Submit a merge request
+
+ :param cmdargs: The commandline arguments
+ :type cmdargs: list of str
+ """
+ parser = self.get_parser()
+ (options, args) = parser.parse_args(cmdargs)
+ try:
+ cmdutil.find_editor()
+ except pylon.errors.NoEditorSpecified, e:
+ raise pylon.errors.CommandFailedWrapper(e)
+ try:
+ self.tree=arch.tree_root()
+ except:
+ self.tree=None
+ base, revisions = self.revision_specs(args)
+ message = self.make_headers(base, revisions)
+ message += self.make_summary(revisions)
+ path = self.edit_message(message)
+ message = self.tidy_message(path)
+ if cmdutil.prompt("Send merge"):
+ self.send_message(message)
+ print "Merge request sent"
+
+ def make_headers(self, base, revisions):
+ """Produce email and Bug Goo header strings
+
+ :param base: The base revision to apply merges to
+ :type base: `arch.Revision`
+ :param revisions: The revisions to replay into the base
+ :type revisions: list of `arch.Patchlog`
+ :return: The headers
+ :rtype: str
+ """
+ headers = "To: gnu-arch-users@gnu.org\n"
+ headers += "From: %s\n" % options.fromaddr
+ if len(revisions) == 1:
+ headers += "Subject: [MERGE REQUEST] %s\n" % revisions[0].summary
+ else:
+ headers += "Subject: [MERGE REQUEST]\n"
+ headers += "\n"
+ headers += "Base-Revision: %s\n" % base
+ for revision in revisions:
+ headers += "Revision: %s\n" % revision.revision
+ headers += "Bug: \n\n"
+ return headers
+
+ def make_summary(self, logs):
+ """Generate a summary of merges
+
+ :param logs: the patchlogs that were directly added by the merges
+ :type logs: list of `arch.Patchlog`
+ :return: the summary
+ :rtype: str
+ """
+ summary = ""
+ for log in logs:
+ summary+=str(log.revision)+"\n"
+ summary+=log.summary+"\n"
+ if log.description.strip():
+ summary+=log.description.strip('\n')+"\n\n"
+ return summary
+
+ def revision_specs(self, args):
+ """Determine the base and merge revisions from tree and arguments.
+
+ :param args: The parsed arguments
+ :type args: list of str
+ :return: The base revision and merge revisions
+ :rtype: `arch.Revision`, list of `arch.Patchlog`
+ """
+ if len(args) > 0:
+ target_revision = cmdutil.determine_revision_arch(self.tree,
+ args[0])
+ else:
+ target_revision = arch_compound.tree_latest(self.tree)
+ if len(args) > 1:
+ merges = [ arch.Patchlog(cmdutil.determine_revision_arch(
+ self.tree, f)) for f in args[1:] ]
+ else:
+ if self.tree is None:
+ raise CantDetermineRevision("", "Not in a project tree")
+ merge_iter = cmdutil.iter_new_merges(self.tree,
+ target_revision.version,
+ False)
+ merges = [f for f in cmdutil.direct_merges(merge_iter)]
+ return (target_revision, merges)
+
+ def edit_message(self, message):
+ """Edit an email message in the user's standard editor
+
+ :param message: The message to edit
+ :type message: str
+ :return: the path of the edited message
+ :rtype: str
+ """
+ if self.tree is None:
+ path = os.get_cwd()
+ else:
+ path = self.tree
+ path += "/,merge-request"
+ file = open(path, 'w')
+ file.write(message)
+ file.flush()
+ cmdutil.invoke_editor(path)
+ return path
+
+ def tidy_message(self, path):
+ """Validate and clean up message.
+
+ :param path: The path to the message to clean up
+ :type path: str
+ :return: The parsed message
+ :rtype: `email.Message`
+ """
+ mail = email.message_from_file(open(path))
+ if mail["Subject"].strip() == "[MERGE REQUEST]":
+ raise BlandSubject
+
+ request = email.message_from_string(mail.get_payload())
+ if request.has_key("Bug"):
+ if request["Bug"].strip()=="":
+ del request["Bug"]
+ mail.set_payload(request.as_string())
+ return mail
+
+ def send_message(self, message):
+ """Send a message, using its headers to address it.
+
+ :param message: The message to send
+ :type message: `email.Message`"""
+ server = smtplib.SMTP("localhost")
+ server.sendmail(message['From'], message['To'], message.as_string())
+ server.quit()
+
+ def help(self, parser=None):
+ """Print a usage message
+
+ :param parser: The options parser to use
+ :type parser: `cmdutil.CmdOptionParser`
+ """
+ if parser is None:
+ parser = self.get_parser()
+ parser.print_help()
+ print """
+Sends a merge request formatted for Bug Goo. Intended use: get the tree
+you'd like to merge into. Apply the merges you want. Invoke request-merge.
+The merge request will open in your $EDITOR.
+
+When no TARGET is specified, it uses the current tree revision. When
+no MERGE is specified, it uses the direct merges (as in "revisions
+--direct-merges"). But you can specify just the TARGET, or all the MERGE
+revisions.
+"""
+
+ def get_parser(self):
+ """Produce a commandline parser for this command.
+
+ :rtype: `cmdutil.CmdOptionParser`
+ """
+ parser=cmdutil.CmdOptionParser("request-merge [TARGET] [MERGE1...]")
+ return parser
+
+commands = {
+'changes' : Changes,
+'help' : Help,
+'update': Update,
+'apply-changes':ApplyChanges,
+'cat-log': CatLog,
+'commit': Commit,
+'revision': Revision,
+'revisions': Revisions,
+'get': Get,
+'revert': Revert,
+'shell': Shell,
+'add-id': AddID,
+'merge': Merge,
+'elog': ELog,
+'mirror-archive': MirrorArchive,
+'ninventory': Inventory,
+'alias' : Alias,
+'request-merge': RequestMerge,
+}
+
+def my_import(mod_name):
+ module = __import__(mod_name)
+ components = mod_name.split('.')
+ for comp in components[1:]:
+ module = getattr(module, comp)
+ return module
+
+def plugin(mod_name):
+ module = my_import(mod_name)
+ module.add_command(commands)
+
+for file in os.listdir(sys.path[0]+"/command"):
+ if len(file) > 3 and file[-3:] == ".py" and file != "__init__.py":
+ plugin("command."+file[:-3])
+
+suggestions = {
+'apply-delta' : "Try \"apply-changes\".",
+'delta' : "To compare two revisions, use \"changes\".",
+'diff-rev' : "To compare two revisions, use \"changes\".",
+'undo' : "To undo local changes, use \"revert\".",
+'undelete' : "To undo only deletions, use \"revert --deletions\"",
+'missing-from' : "Try \"revisions --missing-from\".",
+'missing' : "Try \"revisions --missing\".",
+'missing-merge' : "Try \"revisions --partner-missing\".",
+'new-merges' : "Try \"revisions --new-merges\".",
+'cachedrevs' : "Try \"revisions --cacherevs\". (no 'd')",
+'logs' : "Try \"revisions --logs\"",
+'tree-source' : "Use the \"^ttag\" alias (\"revision ^ttag\")",
+'latest-revision' : "Use the \"^acur\" alias (\"revision ^acur\")",
+'change-version' : "Try \"update REVISION\"",
+'tree-revision' : "Use the \"^tcur\" alias (\"revision ^tcur\")",
+'rev-depends' : "Use revisions --dependencies",
+'auto-get' : "Plain get will do archive lookups",
+'tagline' : "Use add-id. It uses taglines in tagline trees",
+'emlog' : "Use elog. It automatically adds log-for-merge text, if any",
+'library-revisions' : "Use revisions --library",
+'file-revert' : "Use revert FILE",
+'join-branch' : "Use replay --logs-only"
+}
+# arch-tag: 19d5739d-3708-486c-93ba-deecc3027fc7
diff --git a/bzrlib/tests/test_patches_data/mod-2 b/bzrlib/tests/test_patches_data/mod-2
new file mode 100644
index 0000000..a937595
--- /dev/null
+++ b/bzrlib/tests/test_patches_data/mod-2
@@ -0,0 +1,560 @@
+# Copyright (C) 2004, 2005 Aaron Bentley
+# <aaron.bentley@utoronto.ca>
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+class PatchSyntax(Exception):
+ def __init__(self, msg):
+ Exception.__init__(self, msg)
+
+
+class MalformedPatchHeader(PatchSyntax):
+ def __init__(self, desc, line):
+ self.desc = desc
+ self.line = line
+ msg = "Malformed patch header. %s\n%r" % (self.desc, self.line)
+ PatchSyntax.__init__(self, msg)
+
+class MalformedHunkHeader(PatchSyntax):
+ def __init__(self, desc, line):
+ self.desc = desc
+ self.line = line
+ msg = "Malformed hunk header. %s\n%r" % (self.desc, self.line)
+ PatchSyntax.__init__(self, msg)
+
+class MalformedLine(PatchSyntax):
+ def __init__(self, desc, line):
+ self.desc = desc
+ self.line = line
+ msg = "Malformed line. %s\n%s" % (self.desc, self.line)
+ PatchSyntax.__init__(self, msg)
+
+def get_patch_names(iter_lines):
+ try:
+ line = iter_lines.next()
+ if not line.startswith("--- "):
+ raise MalformedPatchHeader("No orig name", line)
+ else:
+ orig_name = line[4:].rstrip("\n")
+ except StopIteration:
+ raise MalformedPatchHeader("No orig line", "")
+ try:
+ line = iter_lines.next()
+ if not line.startswith("+++ "):
+ raise PatchSyntax("No mod name")
+ else:
+ mod_name = line[4:].rstrip("\n")
+ except StopIteration:
+ raise MalformedPatchHeader("No mod line", "")
+ return (orig_name, mod_name)
+
+def parse_range(textrange):
+ """Parse a patch range, handling the "1" special-case
+
+ :param textrange: The text to parse
+ :type textrange: str
+ :return: the position and range, as a tuple
+ :rtype: (int, int)
+ """
+ tmp = textrange.split(',')
+ if len(tmp) == 1:
+ pos = tmp[0]
+ range = "1"
+ else:
+ (pos, range) = tmp
+ pos = int(pos)
+ range = int(range)
+ return (pos, range)
+
+
+def hunk_from_header(line):
+ if not line.startswith("@@") or not line.endswith("@@\n") \
+ or not len(line) > 4:
+ raise MalformedHunkHeader("Does not start and end with @@.", line)
+ try:
+ (orig, mod) = line[3:-4].split(" ")
+ except Exception, e:
+ raise MalformedHunkHeader(str(e), line)
+ if not orig.startswith('-') or not mod.startswith('+'):
+ raise MalformedHunkHeader("Positions don't start with + or -.", line)
+ try:
+ (orig_pos, orig_range) = parse_range(orig[1:])
+ (mod_pos, mod_range) = parse_range(mod[1:])
+ except Exception, e:
+ raise MalformedHunkHeader(str(e), line)
+ if mod_range < 0 or orig_range < 0:
+ raise MalformedHunkHeader("Hunk range is negative", line)
+ return Hunk(orig_pos, orig_range, mod_pos, mod_range)
+
+
+class HunkLine:
+ def __init__(self, contents):
+ self.contents = contents
+
+ def get_str(self, leadchar):
+ if self.contents == "\n" and leadchar == " " and False:
+ return "\n"
+ if not self.contents.endswith('\n'):
+ terminator = '\n' + NO_NL
+ else:
+ terminator = ''
+ return leadchar + self.contents + terminator
+
+
+class ContextLine(HunkLine):
+ def __init__(self, contents):
+ HunkLine.__init__(self, contents)
+
+ def __str__(self):
+ return self.get_str(" ")
+
+
+class InsertLine(HunkLine):
+ def __init__(self, contents):
+ HunkLine.__init__(self, contents)
+
+ def __str__(self):
+ return self.get_str("+")
+
+
+class RemoveLine(HunkLine):
+ def __init__(self, contents):
+ HunkLine.__init__(self, contents)
+
+ def __str__(self):
+ return self.get_str("-")
+
+NO_NL = '\\ No newline at end of file\n'
+__pychecker__="no-returnvalues"
+
+def parse_line(line):
+ if line.startswith("\n"):
+ return ContextLine(line)
+ elif line.startswith(" "):
+ return ContextLine(line[1:])
+ elif line.startswith("+"):
+ return InsertLine(line[1:])
+ elif line.startswith("-"):
+ return RemoveLine(line[1:])
+ elif line == NO_NL:
+ return NO_NL
+ else:
+ raise MalformedLine("Unknown line type", line)
+__pychecker__=""
+
+
+class Hunk:
+ def __init__(self, orig_pos, orig_range, mod_pos, mod_range):
+ self.orig_pos = orig_pos
+ self.orig_range = orig_range
+ self.mod_pos = mod_pos
+ self.mod_range = mod_range
+ self.lines = []
+
+ def get_header(self):
+ return "@@ -%s +%s @@\n" % (self.range_str(self.orig_pos,
+ self.orig_range),
+ self.range_str(self.mod_pos,
+ self.mod_range))
+
+ def range_str(self, pos, range):
+ """Return a file range, special-casing for 1-line files.
+
+ :param pos: The position in the file
+ :type pos: int
+ :range: The range in the file
+ :type range: int
+ :return: a string in the format 1,4 except when range == pos == 1
+ """
+ if range == 1:
+ return "%i" % pos
+ else:
+ return "%i,%i" % (pos, range)
+
+ def __str__(self):
+ lines = [self.get_header()]
+ for line in self.lines:
+ lines.append(str(line))
+ return "".join(lines)
+
+ def shift_to_mod(self, pos):
+ if pos < self.orig_pos-1:
+ return 0
+ elif pos > self.orig_pos+self.orig_range:
+ return self.mod_range - self.orig_range
+ else:
+ return self.shift_to_mod_lines(pos)
+
+ def shift_to_mod_lines(self, pos):
+ assert (pos >= self.orig_pos-1 and pos <= self.orig_pos+self.orig_range)
+ position = self.orig_pos-1
+ shift = 0
+ for line in self.lines:
+ if isinstance(line, InsertLine):
+ shift += 1
+ elif isinstance(line, RemoveLine):
+ if position == pos:
+ return None
+ shift -= 1
+ position += 1
+ elif isinstance(line, ContextLine):
+ position += 1
+ if position > pos:
+ break
+ return shift
+
+def iter_hunks(iter_lines):
+ hunk = None
+ for line in iter_lines:
+ if line == "\n":
+ if hunk is not None:
+ yield hunk
+ hunk = None
+ continue
+ if hunk is not None:
+ yield hunk
+ hunk = hunk_from_header(line)
+ orig_size = 0
+ mod_size = 0
+ while orig_size < hunk.orig_range or mod_size < hunk.mod_range:
+ hunk_line = parse_line(iter_lines.next())
+ hunk.lines.append(hunk_line)
+ if isinstance(hunk_line, (RemoveLine, ContextLine)):
+ orig_size += 1
+ if isinstance(hunk_line, (InsertLine, ContextLine)):
+ mod_size += 1
+ if hunk is not None:
+ yield hunk
+
+class Patch:
+ def __init__(self, oldname, newname):
+ self.oldname = oldname
+ self.newname = newname
+ self.hunks = []
+
+ def __str__(self):
+ ret = self.get_header()
+ ret += "".join([str(h) for h in self.hunks])
+ return ret
+
+ def get_header(self):
+ return "--- %s\n+++ %s\n" % (self.oldname, self.newname)
+
+ def stats_str(self):
+ """Return a string of patch statistics"""
+ removes = 0
+ inserts = 0
+ for hunk in self.hunks:
+ for line in hunk.lines:
+ if isinstance(line, InsertLine):
+ inserts+=1;
+ elif isinstance(line, RemoveLine):
+ removes+=1;
+ return "%i inserts, %i removes in %i hunks" % \
+ (inserts, removes, len(self.hunks))
+
+ def pos_in_mod(self, position):
+ newpos = position
+ for hunk in self.hunks:
+ shift = hunk.shift_to_mod(position)
+ if shift is None:
+ return None
+ newpos += shift
+ return newpos
+
+ def iter_inserted(self):
+ """Iteraties through inserted lines
+
+ :return: Pair of line number, line
+ :rtype: iterator of (int, InsertLine)
+ """
+ for hunk in self.hunks:
+ pos = hunk.mod_pos - 1;
+ for line in hunk.lines:
+ if isinstance(line, InsertLine):
+ yield (pos, line)
+ pos += 1
+ if isinstance(line, ContextLine):
+ pos += 1
+
+def parse_patch(iter_lines):
+ (orig_name, mod_name) = get_patch_names(iter_lines)
+ patch = Patch(orig_name, mod_name)
+ for hunk in iter_hunks(iter_lines):
+ patch.hunks.append(hunk)
+ return patch
+
+
+def iter_file_patch(iter_lines):
+ saved_lines = []
+ for line in iter_lines:
+ if line.startswith('=== '):
+ continue
+ elif line.startswith('--- '):
+ if len(saved_lines) > 0:
+ yield saved_lines
+ saved_lines = []
+ saved_lines.append(line)
+ if len(saved_lines) > 0:
+ yield saved_lines
+
+
+def iter_lines_handle_nl(iter_lines):
+ """
+ Iterates through lines, ensuring that lines that originally had no
+ terminating \n are produced without one. This transformation may be
+ applied at any point up until hunk line parsing, and is safe to apply
+ repeatedly.
+ """
+ last_line = None
+ for line in iter_lines:
+ if line == NO_NL:
+ assert last_line.endswith('\n')
+ last_line = last_line[:-1]
+ line = None
+ if last_line is not None:
+ yield last_line
+ last_line = line
+ if last_line is not None:
+ yield last_line
+
+
+def parse_patches(iter_lines):
+ iter_lines = iter_lines_handle_nl(iter_lines)
+ return [parse_patch(f.__iter__()) for f in iter_file_patch(iter_lines)]
+
+
+def difference_index(atext, btext):
+ """Find the indext of the first character that differs betweeen two texts
+
+ :param atext: The first text
+ :type atext: str
+ :param btext: The second text
+ :type str: str
+ :return: The index, or None if there are no differences within the range
+ :rtype: int or NoneType
+ """
+ length = len(atext)
+ if len(btext) < length:
+ length = len(btext)
+ for i in range(length):
+ if atext[i] != btext[i]:
+ return i;
+ return None
+
+class PatchConflict(Exception):
+ def __init__(self, line_no, orig_line, patch_line):
+ orig = orig_line.rstrip('\n')
+ patch = str(patch_line).rstrip('\n')
+ msg = 'Text contents mismatch at line %d. Original has "%s",'\
+ ' but patch says it should be "%s"' % (line_no, orig, patch)
+ Exception.__init__(self, msg)
+
+
+def iter_patched(orig_lines, patch_lines):
+ """Iterate through a series of lines with a patch applied.
+ This handles a single file, and does exact, not fuzzy patching.
+ """
+ if orig_lines is not None:
+ orig_lines = orig_lines.__iter__()
+ seen_patch = []
+ patch_lines = iter_lines_handle_nl(patch_lines.__iter__())
+ get_patch_names(patch_lines)
+ line_no = 1
+ for hunk in iter_hunks(patch_lines):
+ while line_no < hunk.orig_pos:
+ orig_line = orig_lines.next()
+ yield orig_line
+ line_no += 1
+ for hunk_line in hunk.lines:
+ seen_patch.append(str(hunk_line))
+ if isinstance(hunk_line, InsertLine):
+ yield hunk_line.contents
+ elif isinstance(hunk_line, (ContextLine, RemoveLine)):
+ orig_line = orig_lines.next()
+ if orig_line != hunk_line.contents:
+ raise PatchConflict(line_no, orig_line, "".join(seen_patch))
+ if isinstance(hunk_line, ContextLine):
+ yield orig_line
+ else:
+ assert isinstance(hunk_line, RemoveLine)
+ line_no += 1
+ for line in orig_lines:
+ yield line
+
+import unittest
+import os.path
+class PatchesTester(unittest.TestCase):
+ def datafile(self, filename):
+ data_path = os.path.join(os.path.dirname(__file__), "testdata",
+ filename)
+ return file(data_path, "rb")
+
+ def testValidPatchHeader(self):
+ """Parse a valid patch header"""
+ lines = "--- orig/commands.py\n+++ mod/dommands.py\n".split('\n')
+ (orig, mod) = get_patch_names(lines.__iter__())
+ assert(orig == "orig/commands.py")
+ assert(mod == "mod/dommands.py")
+
+ def testInvalidPatchHeader(self):
+ """Parse an invalid patch header"""
+ lines = "-- orig/commands.py\n+++ mod/dommands.py".split('\n')
+ self.assertRaises(MalformedPatchHeader, get_patch_names,
+ lines.__iter__())
+
+ def testValidHunkHeader(self):
+ """Parse a valid hunk header"""
+ header = "@@ -34,11 +50,6 @@\n"
+ hunk = hunk_from_header(header);
+ assert (hunk.orig_pos == 34)
+ assert (hunk.orig_range == 11)
+ assert (hunk.mod_pos == 50)
+ assert (hunk.mod_range == 6)
+ assert (str(hunk) == header)
+
+ def testValidHunkHeader2(self):
+ """Parse a tricky, valid hunk header"""
+ header = "@@ -1 +0,0 @@\n"
+ hunk = hunk_from_header(header);
+ assert (hunk.orig_pos == 1)
+ assert (hunk.orig_range == 1)
+ assert (hunk.mod_pos == 0)
+ assert (hunk.mod_range == 0)
+ assert (str(hunk) == header)
+
+ def makeMalformed(self, header):
+ self.assertRaises(MalformedHunkHeader, hunk_from_header, header)
+
+ def testInvalidHeader(self):
+ """Parse an invalid hunk header"""
+ self.makeMalformed(" -34,11 +50,6 \n")
+ self.makeMalformed("@@ +50,6 -34,11 @@\n")
+ self.makeMalformed("@@ -34,11 +50,6 @@")
+ self.makeMalformed("@@ -34.5,11 +50,6 @@\n")
+ self.makeMalformed("@@-34,11 +50,6@@\n")
+ self.makeMalformed("@@ 34,11 50,6 @@\n")
+ self.makeMalformed("@@ -34,11 @@\n")
+ self.makeMalformed("@@ -34,11 +50,6.5 @@\n")
+ self.makeMalformed("@@ -34,11 +50,-6 @@\n")
+
+ def lineThing(self,text, type):
+ line = parse_line(text)
+ assert(isinstance(line, type))
+ assert(str(line)==text)
+
+ def makeMalformedLine(self, text):
+ self.assertRaises(MalformedLine, parse_line, text)
+
+ def testValidLine(self):
+ """Parse a valid hunk line"""
+ self.lineThing(" hello\n", ContextLine)
+ self.lineThing("+hello\n", InsertLine)
+ self.lineThing("-hello\n", RemoveLine)
+
+ def testMalformedLine(self):
+ """Parse invalid valid hunk lines"""
+ self.makeMalformedLine("hello\n")
+
+ def compare_parsed(self, patchtext):
+ lines = patchtext.splitlines(True)
+ patch = parse_patch(lines.__iter__())
+ pstr = str(patch)
+ i = difference_index(patchtext, pstr)
+ if i is not None:
+ print "%i: \"%s\" != \"%s\"" % (i, patchtext[i], pstr[i])
+ self.assertEqual (patchtext, str(patch))
+
+ def testAll(self):
+ """Test parsing a whole patch"""
+ patchtext = """--- orig/commands.py
++++ mod/commands.py
+@@ -1337,7 +1337,8 @@
+
+ def set_title(self, command=None):
+ try:
+- version = self.tree.tree_version.nonarch
++ version = pylon.alias_or_version(self.tree.tree_version, self.tree,
++ full=False)
+ except:
+ version = "[no version]"
+ if command is None:
+@@ -1983,7 +1984,11 @@
+ version)
+ if len(new_merges) > 0:
+ if cmdutil.prompt("Log for merge"):
+- mergestuff = cmdutil.log_for_merge(tree, comp_version)
++ if cmdutil.prompt("changelog for merge"):
++ mergestuff = "Patches applied:\\n"
++ mergestuff += pylon.changelog_for_merge(new_merges)
++ else:
++ mergestuff = cmdutil.log_for_merge(tree, comp_version)
+ log.description += mergestuff
+ log.save()
+ try:
+"""
+ self.compare_parsed(patchtext)
+
+ def testInit(self):
+ """Handle patches missing half the position, range tuple"""
+ patchtext = \
+"""--- orig/__init__.py
++++ mod/__init__.py
+@@ -1 +1,2 @@
+ __docformat__ = "restructuredtext en"
++__doc__ = An alternate Arch commandline interface
+"""
+ self.compare_parsed(patchtext)
+
+
+
+ def testLineLookup(self):
+ import sys
+ """Make sure we can accurately look up mod line from orig"""
+ patch = parse_patch(self.datafile("diff"))
+ orig = list(self.datafile("orig"))
+ mod = list(self.datafile("mod"))
+ removals = []
+ for i in range(len(orig)):
+ mod_pos = patch.pos_in_mod(i)
+ if mod_pos is None:
+ removals.append(orig[i])
+ continue
+ assert(mod[mod_pos]==orig[i])
+ rem_iter = removals.__iter__()
+ for hunk in patch.hunks:
+ for line in hunk.lines:
+ if isinstance(line, RemoveLine):
+ next = rem_iter.next()
+ if line.contents != next:
+ sys.stdout.write(" orig:%spatch:%s" % (next,
+ line.contents))
+ assert(line.contents == next)
+ self.assertRaises(StopIteration, rem_iter.next)
+
+ def testFirstLineRenumber(self):
+ """Make sure we handle lines at the beginning of the hunk"""
+ patch = parse_patch(self.datafile("insert_top.patch"))
+ assert (patch.pos_in_mod(0)==1)
+
+def test():
+ patchesTestSuite = unittest.makeSuite(PatchesTester,'test')
+ runner = unittest.TextTestRunner(verbosity=0)
+ return runner.run(patchesTestSuite)
+
+
+if __name__ == "__main__":
+ test()
+# arch-tag: d1541a25-eac5-4de9-a476-08a7cecd5683
diff --git a/bzrlib/tests/test_patches_data/mod-3 b/bzrlib/tests/test_patches_data/mod-3
new file mode 100644
index 0000000..14ac85f
--- /dev/null
+++ b/bzrlib/tests/test_patches_data/mod-3
@@ -0,0 +1,561 @@
+First line change
+# Copyright (C) 2004, 2005 Aaron Bentley
+# <aaron.bentley@utoronto.ca>
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+class PatchSyntax(Exception):
+ def __init__(self, msg):
+ Exception.__init__(self, msg)
+
+
+class MalformedPatchHeader(PatchSyntax):
+ def __init__(self, desc, line):
+ self.desc = desc
+ self.line = line
+ msg = "Malformed patch header. %s\n%r" % (self.desc, self.line)
+ PatchSyntax.__init__(self, msg)
+
+class MalformedHunkHeader(PatchSyntax):
+ def __init__(self, desc, line):
+ self.desc = desc
+ self.line = line
+ msg = "Malformed hunk header. %s\n%r" % (self.desc, self.line)
+ PatchSyntax.__init__(self, msg)
+
+class MalformedLine(PatchSyntax):
+ def __init__(self, desc, line):
+ self.desc = desc
+ self.line = line
+ msg = "Malformed line. %s\n%s" % (self.desc, self.line)
+ PatchSyntax.__init__(self, msg)
+
+def get_patch_names(iter_lines):
+ try:
+ line = iter_lines.next()
+ if not line.startswith("--- "):
+ raise MalformedPatchHeader("No orig name", line)
+ else:
+ orig_name = line[4:].rstrip("\n")
+ except StopIteration:
+ raise MalformedPatchHeader("No orig line", "")
+ try:
+ line = iter_lines.next()
+ if not line.startswith("+++ "):
+ raise PatchSyntax("No mod name")
+ else:
+ mod_name = line[4:].rstrip("\n")
+ except StopIteration:
+ raise MalformedPatchHeader("No mod line", "")
+ return (orig_name, mod_name)
+
+def parse_range(textrange):
+ """Parse a patch range, handling the "1" special-case
+
+ :param textrange: The text to parse
+ :type textrange: str
+ :return: the position and range, as a tuple
+ :rtype: (int, int)
+ """
+ tmp = textrange.split(',')
+ if len(tmp) == 1:
+ pos = tmp[0]
+ range = "1"
+ else:
+ (pos, range) = tmp
+ pos = int(pos)
+ range = int(range)
+ return (pos, range)
+
+
+def hunk_from_header(line):
+ if not line.startswith("@@") or not line.endswith("@@\n") \
+ or not len(line) > 4:
+ raise MalformedHunkHeader("Does not start and end with @@.", line)
+ try:
+ (orig, mod) = line[3:-4].split(" ")
+ except Exception, e:
+ raise MalformedHunkHeader(str(e), line)
+ if not orig.startswith('-') or not mod.startswith('+'):
+ raise MalformedHunkHeader("Positions don't start with + or -.", line)
+ try:
+ (orig_pos, orig_range) = parse_range(orig[1:])
+ (mod_pos, mod_range) = parse_range(mod[1:])
+ except Exception, e:
+ raise MalformedHunkHeader(str(e), line)
+ if mod_range < 0 or orig_range < 0:
+ raise MalformedHunkHeader("Hunk range is negative", line)
+ return Hunk(orig_pos, orig_range, mod_pos, mod_range)
+
+
+class HunkLine:
+ def __init__(self, contents):
+ self.contents = contents
+
+ def get_str(self, leadchar):
+ if self.contents == "\n" and leadchar == " " and False:
+ return "\n"
+ if not self.contents.endswith('\n'):
+ terminator = '\n' + NO_NL
+ else:
+ terminator = ''
+ return leadchar + self.contents + terminator
+
+
+class ContextLine(HunkLine):
+ def __init__(self, contents):
+ HunkLine.__init__(self, contents)
+
+ def __str__(self):
+ return self.get_str(" ")
+
+
+class InsertLine(HunkLine):
+ def __init__(self, contents):
+ HunkLine.__init__(self, contents)
+
+ def __str__(self):
+ return self.get_str("+")
+
+
+class RemoveLine(HunkLine):
+ def __init__(self, contents):
+ HunkLine.__init__(self, contents)
+
+ def __str__(self):
+ return self.get_str("-")
+
+NO_NL = '\\ No newline at end of file\n'
+__pychecker__="no-returnvalues"
+
+def parse_line(line):
+ if line.startswith("\n"):
+ return ContextLine(line)
+ elif line.startswith(" "):
+ return ContextLine(line[1:])
+ elif line.startswith("+"):
+ return InsertLine(line[1:])
+ elif line.startswith("-"):
+ return RemoveLine(line[1:])
+ elif line == NO_NL:
+ return NO_NL
+ else:
+ raise MalformedLine("Unknown line type", line)
+__pychecker__=""
+
+
+class Hunk:
+ def __init__(self, orig_pos, orig_range, mod_pos, mod_range):
+ self.orig_pos = orig_pos
+ self.orig_range = orig_range
+ self.mod_pos = mod_pos
+ self.mod_range = mod_range
+ self.lines = []
+
+ def get_header(self):
+ return "@@ -%s +%s @@\n" % (self.range_str(self.orig_pos,
+ self.orig_range),
+ self.range_str(self.mod_pos,
+ self.mod_range))
+
+ def range_str(self, pos, range):
+ """Return a file range, special-casing for 1-line files.
+
+ :param pos: The position in the file
+ :type pos: int
+ :range: The range in the file
+ :type range: int
+ :return: a string in the format 1,4 except when range == pos == 1
+ """
+ if range == 1:
+ return "%i" % pos
+ else:
+ return "%i,%i" % (pos, range)
+
+ def __str__(self):
+ lines = [self.get_header()]
+ for line in self.lines:
+ lines.append(str(line))
+ return "".join(lines)
+
+ def shift_to_mod(self, pos):
+ if pos < self.orig_pos-1:
+ return 0
+ elif pos > self.orig_pos+self.orig_range:
+ return self.mod_range - self.orig_range
+ else:
+ return self.shift_to_mod_lines(pos)
+
+ def shift_to_mod_lines(self, pos):
+ assert (pos >= self.orig_pos-1 and pos <= self.orig_pos+self.orig_range)
+ position = self.orig_pos-1
+ shift = 0
+ for line in self.lines:
+ if isinstance(line, InsertLine):
+ shift += 1
+ elif isinstance(line, RemoveLine):
+ if position == pos:
+ return None
+ shift -= 1
+ position += 1
+ elif isinstance(line, ContextLine):
+ position += 1
+ if position > pos:
+ break
+ return shift
+
+def iter_hunks(iter_lines):
+ hunk = None
+ for line in iter_lines:
+ if line == "\n":
+ if hunk is not None:
+ yield hunk
+ hunk = None
+ continue
+ if hunk is not None:
+ yield hunk
+ hunk = hunk_from_header(line)
+ orig_size = 0
+ mod_size = 0
+ while orig_size < hunk.orig_range or mod_size < hunk.mod_range:
+ hunk_line = parse_line(iter_lines.next())
+ hunk.lines.append(hunk_line)
+ if isinstance(hunk_line, (RemoveLine, ContextLine)):
+ orig_size += 1
+ if isinstance(hunk_line, (InsertLine, ContextLine)):
+ mod_size += 1
+ if hunk is not None:
+ yield hunk
+
+class Patch:
+ def __init__(self, oldname, newname):
+ self.oldname = oldname
+ self.newname = newname
+ self.hunks = []
+
+ def __str__(self):
+ ret = self.get_header()
+ ret += "".join([str(h) for h in self.hunks])
+ return ret
+
+ def get_header(self):
+ return "--- %s\n+++ %s\n" % (self.oldname, self.newname)
+
+ def stats_str(self):
+ """Return a string of patch statistics"""
+ removes = 0
+ inserts = 0
+ for hunk in self.hunks:
+ for line in hunk.lines:
+ if isinstance(line, InsertLine):
+ inserts+=1;
+ elif isinstance(line, RemoveLine):
+ removes+=1;
+ return "%i inserts, %i removes in %i hunks" % \
+ (inserts, removes, len(self.hunks))
+
+ def pos_in_mod(self, position):
+ newpos = position
+ for hunk in self.hunks:
+ shift = hunk.shift_to_mod(position)
+ if shift is None:
+ return None
+ newpos += shift
+ return newpos
+
+ def iter_inserted(self):
+ """Iteraties through inserted lines
+
+ :return: Pair of line number, line
+ :rtype: iterator of (int, InsertLine)
+ """
+ for hunk in self.hunks:
+ pos = hunk.mod_pos - 1;
+ for line in hunk.lines:
+ if isinstance(line, InsertLine):
+ yield (pos, line)
+ pos += 1
+ if isinstance(line, ContextLine):
+ pos += 1
+
+def parse_patch(iter_lines):
+ (orig_name, mod_name) = get_patch_names(iter_lines)
+ patch = Patch(orig_name, mod_name)
+ for hunk in iter_hunks(iter_lines):
+ patch.hunks.append(hunk)
+ return patch
+
+
+def iter_file_patch(iter_lines):
+ saved_lines = []
+ for line in iter_lines:
+ if line.startswith('=== '):
+ continue
+ elif line.startswith('--- '):
+ if len(saved_lines) > 0:
+ yield saved_lines
+ saved_lines = []
+ saved_lines.append(line)
+ if len(saved_lines) > 0:
+ yield saved_lines
+
+
+def iter_lines_handle_nl(iter_lines):
+ """
+ Iterates through lines, ensuring that lines that originally had no
+ terminating \n are produced without one. This transformation may be
+ applied at any point up until hunk line parsing, and is safe to apply
+ repeatedly.
+ """
+ last_line = None
+ for line in iter_lines:
+ if line == NO_NL:
+ assert last_line.endswith('\n')
+ last_line = last_line[:-1]
+ line = None
+ if last_line is not None:
+ yield last_line
+ last_line = line
+ if last_line is not None:
+ yield last_line
+
+
+def parse_patches(iter_lines):
+ iter_lines = iter_lines_handle_nl(iter_lines)
+ return [parse_patch(f.__iter__()) for f in iter_file_patch(iter_lines)]
+
+
+def difference_index(atext, btext):
+ """Find the indext of the first character that differs betweeen two texts
+
+ :param atext: The first text
+ :type atext: str
+ :param btext: The second text
+ :type str: str
+ :return: The index, or None if there are no differences within the range
+ :rtype: int or NoneType
+ """
+ length = len(atext)
+ if len(btext) < length:
+ length = len(btext)
+ for i in range(length):
+ if atext[i] != btext[i]:
+ return i;
+ return None
+
+class PatchConflict(Exception):
+ def __init__(self, line_no, orig_line, patch_line):
+ orig = orig_line.rstrip('\n')
+ patch = str(patch_line).rstrip('\n')
+ msg = 'Text contents mismatch at line %d. Original has "%s",'\
+ ' but patch says it should be "%s"' % (line_no, orig, patch)
+ Exception.__init__(self, msg)
+
+
+def iter_patched(orig_lines, patch_lines):
+ """Iterate through a series of lines with a patch applied.
+ This handles a single file, and does exact, not fuzzy patching.
+ """
+ if orig_lines is not None:
+ orig_lines = orig_lines.__iter__()
+ seen_patch = []
+ patch_lines = iter_lines_handle_nl(patch_lines.__iter__())
+ get_patch_names(patch_lines)
+ line_no = 1
+ for hunk in iter_hunks(patch_lines):
+ while line_no < hunk.orig_pos:
+ orig_line = orig_lines.next()
+ yield orig_line
+ line_no += 1
+ for hunk_line in hunk.lines:
+ seen_patch.append(str(hunk_line))
+ if isinstance(hunk_line, InsertLine):
+ yield hunk_line.contents
+ elif isinstance(hunk_line, (ContextLine, RemoveLine)):
+ orig_line = orig_lines.next()
+ if orig_line != hunk_line.contents:
+ raise PatchConflict(line_no, orig_line, "".join(seen_patch))
+ if isinstance(hunk_line, ContextLine):
+ yield orig_line
+ else:
+ assert isinstance(hunk_line, RemoveLine)
+ line_no += 1
+ for line in orig_lines:
+ yield line
+
+import unittest
+import os.path
+class PatchesTester(unittest.TestCase):
+ def datafile(self, filename):
+ data_path = os.path.join(os.path.dirname(__file__), "testdata",
+ filename)
+ return file(data_path, "rb")
+
+ def testValidPatchHeader(self):
+ """Parse a valid patch header"""
+ lines = "--- orig/commands.py\n+++ mod/dommands.py\n".split('\n')
+ (orig, mod) = get_patch_names(lines.__iter__())
+ assert(orig == "orig/commands.py")
+ assert(mod == "mod/dommands.py")
+
+ def testInvalidPatchHeader(self):
+ """Parse an invalid patch header"""
+ lines = "-- orig/commands.py\n+++ mod/dommands.py".split('\n')
+ self.assertRaises(MalformedPatchHeader, get_patch_names,
+ lines.__iter__())
+
+ def testValidHunkHeader(self):
+ """Parse a valid hunk header"""
+ header = "@@ -34,11 +50,6 @@\n"
+ hunk = hunk_from_header(header);
+ assert (hunk.orig_pos == 34)
+ assert (hunk.orig_range == 11)
+ assert (hunk.mod_pos == 50)
+ assert (hunk.mod_range == 6)
+ assert (str(hunk) == header)
+
+ def testValidHunkHeader2(self):
+ """Parse a tricky, valid hunk header"""
+ header = "@@ -1 +0,0 @@\n"
+ hunk = hunk_from_header(header);
+ assert (hunk.orig_pos == 1)
+ assert (hunk.orig_range == 1)
+ assert (hunk.mod_pos == 0)
+ assert (hunk.mod_range == 0)
+ assert (str(hunk) == header)
+
+ def makeMalformed(self, header):
+ self.assertRaises(MalformedHunkHeader, hunk_from_header, header)
+
+ def testInvalidHeader(self):
+ """Parse an invalid hunk header"""
+ self.makeMalformed(" -34,11 +50,6 \n")
+ self.makeMalformed("@@ +50,6 -34,11 @@\n")
+ self.makeMalformed("@@ -34,11 +50,6 @@")
+ self.makeMalformed("@@ -34.5,11 +50,6 @@\n")
+ self.makeMalformed("@@-34,11 +50,6@@\n")
+ self.makeMalformed("@@ 34,11 50,6 @@\n")
+ self.makeMalformed("@@ -34,11 @@\n")
+ self.makeMalformed("@@ -34,11 +50,6.5 @@\n")
+ self.makeMalformed("@@ -34,11 +50,-6 @@\n")
+
+ def lineThing(self,text, type):
+ line = parse_line(text)
+ assert(isinstance(line, type))
+ assert(str(line)==text)
+
+ def makeMalformedLine(self, text):
+ self.assertRaises(MalformedLine, parse_line, text)
+
+ def testValidLine(self):
+ """Parse a valid hunk line"""
+ self.lineThing(" hello\n", ContextLine)
+ self.lineThing("+hello\n", InsertLine)
+ self.lineThing("-hello\n", RemoveLine)
+
+ def testMalformedLine(self):
+ """Parse invalid valid hunk lines"""
+ self.makeMalformedLine("hello\n")
+
+ def compare_parsed(self, patchtext):
+ lines = patchtext.splitlines(True)
+ patch = parse_patch(lines.__iter__())
+ pstr = str(patch)
+ i = difference_index(patchtext, pstr)
+ if i is not None:
+ print "%i: \"%s\" != \"%s\"" % (i, patchtext[i], pstr[i])
+ self.assertEqual (patchtext, str(patch))
+
+ def testAll(self):
+ """Test parsing a whole patch"""
+ patchtext = """--- orig/commands.py
++++ mod/commands.py
+@@ -1337,7 +1337,8 @@
+
+ def set_title(self, command=None):
+ try:
+- version = self.tree.tree_version.nonarch
++ version = pylon.alias_or_version(self.tree.tree_version, self.tree,
++ full=False)
+ except:
+ version = "[no version]"
+ if command is None:
+@@ -1983,7 +1984,11 @@
+ version)
+ if len(new_merges) > 0:
+ if cmdutil.prompt("Log for merge"):
+- mergestuff = cmdutil.log_for_merge(tree, comp_version)
++ if cmdutil.prompt("changelog for merge"):
++ mergestuff = "Patches applied:\\n"
++ mergestuff += pylon.changelog_for_merge(new_merges)
++ else:
++ mergestuff = cmdutil.log_for_merge(tree, comp_version)
+ log.description += mergestuff
+ log.save()
+ try:
+"""
+ self.compare_parsed(patchtext)
+
+ def testInit(self):
+ """Handle patches missing half the position, range tuple"""
+ patchtext = \
+"""--- orig/__init__.py
++++ mod/__init__.py
+@@ -1 +1,2 @@
+ __docformat__ = "restructuredtext en"
++__doc__ = An alternate Arch commandline interface
+"""
+ self.compare_parsed(patchtext)
+
+
+
+ def testLineLookup(self):
+ import sys
+ """Make sure we can accurately look up mod line from orig"""
+ patch = parse_patch(self.datafile("diff"))
+ orig = list(self.datafile("orig"))
+ mod = list(self.datafile("mod"))
+ removals = []
+ for i in range(len(orig)):
+ mod_pos = patch.pos_in_mod(i)
+ if mod_pos is None:
+ removals.append(orig[i])
+ continue
+ assert(mod[mod_pos]==orig[i])
+ rem_iter = removals.__iter__()
+ for hunk in patch.hunks:
+ for line in hunk.lines:
+ if isinstance(line, RemoveLine):
+ next = rem_iter.next()
+ if line.contents != next:
+ sys.stdout.write(" orig:%spatch:%s" % (next,
+ line.contents))
+ assert(line.contents == next)
+ self.assertRaises(StopIteration, rem_iter.next)
+
+ def testFirstLineRenumber(self):
+ """Make sure we handle lines at the beginning of the hunk"""
+ patch = parse_patch(self.datafile("insert_top.patch"))
+ assert (patch.pos_in_mod(0)==1)
+
+def test():
+ patchesTestSuite = unittest.makeSuite(PatchesTester,'test')
+ runner = unittest.TextTestRunner(verbosity=0)
+ return runner.run(patchesTestSuite)
+
+
+if __name__ == "__main__":
+ test()
+# arch-tag: d1541a25-eac5-4de9-a476-08a7cecd5683
diff --git a/bzrlib/tests/test_patches_data/mod-4 b/bzrlib/tests/test_patches_data/mod-4
new file mode 100644
index 0000000..22eddd8
--- /dev/null
+++ b/bzrlib/tests/test_patches_data/mod-4
@@ -0,0 +1,558 @@
+# Copyright (C) 2004, 2005 Aaron Bentley
+# <aaron.bentley@utoronto.ca>
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+class PatchSyntax(Exception):
+ def __init__(self, msg):
+ Exception.__init__(self, msg)
+
+
+class MalformedPatchHeader(PatchSyntax):
+ def __init__(self, desc, line):
+ self.desc = desc
+ self.line = line
+ msg = "Malformed patch header. %s\n%r" % (self.desc, self.line)
+ PatchSyntax.__init__(self, msg)
+
+class MalformedHunkHeader(PatchSyntax):
+ def __init__(self, desc, line):
+ self.desc = desc
+ self.line = line
+ msg = "Malformed hunk header. %s\n%r" % (self.desc, self.line)
+ PatchSyntax.__init__(self, msg)
+
+class MalformedLine(PatchSyntax):
+ def __init__(self, desc, line):
+ self.desc = desc
+ self.line = line
+ msg = "Malformed line. %s\n%s" % (self.desc, self.line)
+ PatchSyntax.__init__(self, msg)
+
+def get_patch_names(iter_lines):
+ try:
+ line = iter_lines.next()
+ if not line.startswith("--- "):
+ raise MalformedPatchHeader("No orig name", line)
+ else:
+ orig_name = line[4:].rstrip("\n")
+ except StopIteration:
+ raise MalformedPatchHeader("No orig line", "")
+ try:
+ line = iter_lines.next()
+ if not line.startswith("+++ "):
+ raise PatchSyntax("No mod name")
+ else:
+ mod_name = line[4:].rstrip("\n")
+ except StopIteration:
+ raise MalformedPatchHeader("No mod line", "")
+ return (orig_name, mod_name)
+
+def parse_range(textrange):
+ """Parse a patch range, handling the "1" special-case
+
+ :param textrange: The text to parse
+ :type textrange: str
+ :return: the position and range, as a tuple
+ :rtype: (int, int)
+ """
+ tmp = textrange.split(',')
+ if len(tmp) == 1:
+ pos = tmp[0]
+ range = "1"
+ else:
+ (pos, range) = tmp
+ pos = int(pos)
+ range = int(range)
+ return (pos, range)
+
+
+def hunk_from_header(line):
+ if not line.startswith("@@") or not line.endswith("@@\n") \
+ or not len(line) > 4:
+ raise MalformedHunkHeader("Does not start and end with @@.", line)
+ try:
+ (orig, mod) = line[3:-4].split(" ")
+ except Exception, e:
+ raise MalformedHunkHeader(str(e), line)
+ if not orig.startswith('-') or not mod.startswith('+'):
+ raise MalformedHunkHeader("Positions don't start with + or -.", line)
+ try:
+ (orig_pos, orig_range) = parse_range(orig[1:])
+ (mod_pos, mod_range) = parse_range(mod[1:])
+ except Exception, e:
+ raise MalformedHunkHeader(str(e), line)
+ if mod_range < 0 or orig_range < 0:
+ raise MalformedHunkHeader("Hunk range is negative", line)
+ return Hunk(orig_pos, orig_range, mod_pos, mod_range)
+
+
+class HunkLine:
+ def __init__(self, contents):
+ self.contents = contents
+
+ def get_str(self, leadchar):
+ if self.contents == "\n" and leadchar == " " and False:
+ return "\n"
+ if not self.contents.endswith('\n'):
+ terminator = '\n' + NO_NL
+ else:
+ terminator = ''
+ return leadchar + self.contents + terminator
+
+
+class ContextLine(HunkLine):
+ def __init__(self, contents):
+ HunkLine.__init__(self, contents)
+
+ def __str__(self):
+ return self.get_str(" ")
+
+
+class InsertLine(HunkLine):
+ def __init__(self, contents):
+ HunkLine.__init__(self, contents)
+
+ def __str__(self):
+ return self.get_str("+")
+
+
+class RemoveLine(HunkLine):
+ def __init__(self, contents):
+ HunkLine.__init__(self, contents)
+
+ def __str__(self):
+ return self.get_str("-")
+
+NO_NL = '\\ No newline at end of file\n'
+__pychecker__="no-returnvalues"
+
+def parse_line(line):
+ if line.startswith("\n"):
+ return ContextLine(line)
+ elif line.startswith(" "):
+ return ContextLine(line[1:])
+ elif line.startswith("+"):
+ return InsertLine(line[1:])
+ elif line.startswith("-"):
+ return RemoveLine(line[1:])
+ elif line == NO_NL:
+ return NO_NL
+ else:
+ raise MalformedLine("Unknown line type", line)
+__pychecker__=""
+
+
+class Hunk:
+ def __init__(self, orig_pos, orig_range, mod_pos, mod_range):
+ self.orig_pos = orig_pos
+ self.orig_range = orig_range
+ self.mod_pos = mod_pos
+ self.mod_range = mod_range
+ self.lines = []
+
+ def get_header(self):
+ return "@@ -%s +%s @@\n" % (self.range_str(self.orig_pos,
+ self.orig_range),
+ self.range_str(self.mod_pos,
+ self.mod_range))
+
+ def range_str(self, pos, range):
+ """Return a file range, special-casing for 1-line files.
+
+ :param pos: The position in the file
+ :type pos: int
+ :range: The range in the file
+ :type range: int
+ :return: a string in the format 1,4 except when range == pos == 1
+ """
+ if range == 1:
+ return "%i" % pos
+ else:
+ return "%i,%i" % (pos, range)
+
+ def __str__(self):
+ lines = [self.get_header()]
+ for line in self.lines:
+ lines.append(str(line))
+ return "".join(lines)
+
+ def shift_to_mod(self, pos):
+ if pos < self.orig_pos-1:
+ return 0
+ elif pos > self.orig_pos+self.orig_range:
+ return self.mod_range - self.orig_range
+ else:
+ return self.shift_to_mod_lines(pos)
+
+ def shift_to_mod_lines(self, pos):
+ assert (pos >= self.orig_pos-1 and pos <= self.orig_pos+self.orig_range)
+ position = self.orig_pos-1
+ shift = 0
+ for line in self.lines:
+ if isinstance(line, InsertLine):
+ shift += 1
+ elif isinstance(line, RemoveLine):
+ if position == pos:
+ return None
+ shift -= 1
+ position += 1
+ elif isinstance(line, ContextLine):
+ position += 1
+ if position > pos:
+ break
+ return shift
+
+def iter_hunks(iter_lines):
+ hunk = None
+ for line in iter_lines:
+ if line == "\n":
+ if hunk is not None:
+ yield hunk
+ hunk = None
+ continue
+ if hunk is not None:
+ yield hunk
+ hunk = hunk_from_header(line)
+ orig_size = 0
+ mod_size = 0
+ while orig_size < hunk.orig_range or mod_size < hunk.mod_range:
+ hunk_line = parse_line(iter_lines.next())
+ hunk.lines.append(hunk_line)
+ if isinstance(hunk_line, (RemoveLine, ContextLine)):
+ orig_size += 1
+ if isinstance(hunk_line, (InsertLine, ContextLine)):
+ mod_size += 1
+ if hunk is not None:
+ yield hunk
+
+class Patch:
+ def __init__(self, oldname, newname):
+ self.oldname = oldname
+ self.newname = newname
+ self.hunks = []
+
+ def __str__(self):
+ ret = self.get_header()
+ ret += "".join([str(h) for h in self.hunks])
+ return ret
+
+ def get_header(self):
+ return "--- %s\n+++ %s\n" % (self.oldname, self.newname)
+
+ def stats_str(self):
+ """Return a string of patch statistics"""
+ removes = 0
+ inserts = 0
+ for hunk in self.hunks:
+ for line in hunk.lines:
+ if isinstance(line, InsertLine):
+ inserts+=1;
+ elif isinstance(line, RemoveLine):
+ removes+=1;
+ return "%i inserts, %i removes in %i hunks" % \
+ (inserts, removes, len(self.hunks))
+
+ def pos_in_mod(self, position):
+ newpos = position
+ for hunk in self.hunks:
+ shift = hunk.shift_to_mod(position)
+ if shift is None:
+ return None
+ newpos += shift
+ return newpos
+
+ def iter_inserted(self):
+ """Iteraties through inserted lines
+
+ :return: Pair of line number, line
+ :rtype: iterator of (int, InsertLine)
+ """
+ for hunk in self.hunks:
+ pos = hunk.mod_pos - 1;
+ for line in hunk.lines:
+ if isinstance(line, InsertLine):
+ yield (pos, line)
+ pos += 1
+ if isinstance(line, ContextLine):
+ pos += 1
+
+def parse_patch(iter_lines):
+ (orig_name, mod_name) = get_patch_names(iter_lines)
+ patch = Patch(orig_name, mod_name)
+ for hunk in iter_hunks(iter_lines):
+ patch.hunks.append(hunk)
+ return patch
+
+
+def iter_file_patch(iter_lines):
+ saved_lines = []
+ for line in iter_lines:
+ if line.startswith('=== '):
+ continue
+ elif line.startswith('--- '):
+ if len(saved_lines) > 0:
+ yield saved_lines
+ saved_lines = []
+ saved_lines.append(line)
+ if len(saved_lines) > 0:
+ yield saved_lines
+
+
+def iter_lines_handle_nl(iter_lines):
+ """
+ Iterates through lines, ensuring that lines that originally had no
+ terminating \n are produced without one. This transformation may be
+ applied at any point up until hunk line parsing, and is safe to apply
+ repeatedly.
+ """
+ last_line = None
+ for line in iter_lines:
+ if line == NO_NL:
+ assert last_line.endswith('\n')
+ last_line = last_line[:-1]
+ line = None
+ if last_line is not None:
+ yield last_line
+ last_line = line
+ if last_line is not None:
+ yield last_line
+
+
+def parse_patches(iter_lines):
+ iter_lines = iter_lines_handle_nl(iter_lines)
+ return [parse_patch(f.__iter__()) for f in iter_file_patch(iter_lines)]
+
+
+def difference_index(atext, btext):
+ """Find the indext of the first character that differs betweeen two texts
+
+ :param atext: The first text
+ :type atext: str
+ :param btext: The second text
+ :type str: str
+ :return: The index, or None if there are no differences within the range
+ :rtype: int or NoneType
+ """
+ length = len(atext)
+ if len(btext) < length:
+ length = len(btext)
+ for i in range(length):
+ if atext[i] != btext[i]:
+ return i;
+ return None
+
+class PatchConflict(Exception):
+ def __init__(self, line_no, orig_line, patch_line):
+ orig = orig_line.rstrip('\n')
+ patch = str(patch_line).rstrip('\n')
+ msg = 'Text contents mismatch at line %d. Original has "%s",'\
+ ' but patch says it should be "%s"' % (line_no, orig, patch)
+ Exception.__init__(self, msg)
+
+
+def iter_patched(orig_lines, patch_lines):
+ """Iterate through a series of lines with a patch applied.
+ This handles a single file, and does exact, not fuzzy patching.
+ """
+ if orig_lines is not None:
+ orig_lines = orig_lines.__iter__()
+ seen_patch = []
+ patch_lines = iter_lines_handle_nl(patch_lines.__iter__())
+ get_patch_names(patch_lines)
+ line_no = 1
+ for hunk in iter_hunks(patch_lines):
+ while line_no < hunk.orig_pos:
+ orig_line = orig_lines.next()
+ yield orig_line
+ line_no += 1
+ for hunk_line in hunk.lines:
+ seen_patch.append(str(hunk_line))
+ if isinstance(hunk_line, InsertLine):
+ yield hunk_line.contents
+ elif isinstance(hunk_line, (ContextLine, RemoveLine)):
+ orig_line = orig_lines.next()
+ if orig_line != hunk_line.contents:
+ raise PatchConflict(line_no, orig_line, "".join(seen_patch))
+ if isinstance(hunk_line, ContextLine):
+ yield orig_line
+ else:
+ assert isinstance(hunk_line, RemoveLine)
+ line_no += 1
+
+import unittest
+import os.path
+class PatchesTester(unittest.TestCase):
+ def datafile(self, filename):
+ data_path = os.path.join(os.path.dirname(__file__), "testdata",
+ filename)
+ return file(data_path, "rb")
+
+ def testValidPatchHeader(self):
+ """Parse a valid patch header"""
+ lines = "--- orig/commands.py\n+++ mod/dommands.py\n".split('\n')
+ (orig, mod) = get_patch_names(lines.__iter__())
+ assert(orig == "orig/commands.py")
+ assert(mod == "mod/dommands.py")
+
+ def testInvalidPatchHeader(self):
+ """Parse an invalid patch header"""
+ lines = "-- orig/commands.py\n+++ mod/dommands.py".split('\n')
+ self.assertRaises(MalformedPatchHeader, get_patch_names,
+ lines.__iter__())
+
+ def testValidHunkHeader(self):
+ """Parse a valid hunk header"""
+ header = "@@ -34,11 +50,6 @@\n"
+ hunk = hunk_from_header(header);
+ assert (hunk.orig_pos == 34)
+ assert (hunk.orig_range == 11)
+ assert (hunk.mod_pos == 50)
+ assert (hunk.mod_range == 6)
+ assert (str(hunk) == header)
+
+ def testValidHunkHeader2(self):
+ """Parse a tricky, valid hunk header"""
+ header = "@@ -1 +0,0 @@\n"
+ hunk = hunk_from_header(header);
+ assert (hunk.orig_pos == 1)
+ assert (hunk.orig_range == 1)
+ assert (hunk.mod_pos == 0)
+ assert (hunk.mod_range == 0)
+ assert (str(hunk) == header)
+
+ def makeMalformed(self, header):
+ self.assertRaises(MalformedHunkHeader, hunk_from_header, header)
+
+ def testInvalidHeader(self):
+ """Parse an invalid hunk header"""
+ self.makeMalformed(" -34,11 +50,6 \n")
+ self.makeMalformed("@@ +50,6 -34,11 @@\n")
+ self.makeMalformed("@@ -34,11 +50,6 @@")
+ self.makeMalformed("@@ -34.5,11 +50,6 @@\n")
+ self.makeMalformed("@@-34,11 +50,6@@\n")
+ self.makeMalformed("@@ 34,11 50,6 @@\n")
+ self.makeMalformed("@@ -34,11 @@\n")
+ self.makeMalformed("@@ -34,11 +50,6.5 @@\n")
+ self.makeMalformed("@@ -34,11 +50,-6 @@\n")
+
+ def lineThing(self,text, type):
+ line = parse_line(text)
+ assert(isinstance(line, type))
+ assert(str(line)==text)
+
+ def makeMalformedLine(self, text):
+ self.assertRaises(MalformedLine, parse_line, text)
+
+ def testValidLine(self):
+ """Parse a valid hunk line"""
+ self.lineThing(" hello\n", ContextLine)
+ self.lineThing("+hello\n", InsertLine)
+ self.lineThing("-hello\n", RemoveLine)
+
+ def testMalformedLine(self):
+ """Parse invalid valid hunk lines"""
+ self.makeMalformedLine("hello\n")
+
+ def compare_parsed(self, patchtext):
+ lines = patchtext.splitlines(True)
+ patch = parse_patch(lines.__iter__())
+ pstr = str(patch)
+ i = difference_index(patchtext, pstr)
+ if i is not None:
+ print "%i: \"%s\" != \"%s\"" % (i, patchtext[i], pstr[i])
+ self.assertEqual (patchtext, str(patch))
+
+ def testAll(self):
+ """Test parsing a whole patch"""
+ patchtext = """--- orig/commands.py
++++ mod/commands.py
+@@ -1337,7 +1337,8 @@
+
+ def set_title(self, command=None):
+ try:
+- version = self.tree.tree_version.nonarch
++ version = pylon.alias_or_version(self.tree.tree_version, self.tree,
++ full=False)
+ except:
+ version = "[no version]"
+ if command is None:
+@@ -1983,7 +1984,11 @@
+ version)
+ if len(new_merges) > 0:
+ if cmdutil.prompt("Log for merge"):
+- mergestuff = cmdutil.log_for_merge(tree, comp_version)
++ if cmdutil.prompt("changelog for merge"):
++ mergestuff = "Patches applied:\\n"
++ mergestuff += pylon.changelog_for_merge(new_merges)
++ else:
++ mergestuff = cmdutil.log_for_merge(tree, comp_version)
+ log.description += mergestuff
+ log.save()
+ try:
+"""
+ self.compare_parsed(patchtext)
+
+ def testInit(self):
+ """Handle patches missing half the position, range tuple"""
+ patchtext = \
+"""--- orig/__init__.py
++++ mod/__init__.py
+@@ -1 +1,2 @@
+ __docformat__ = "restructuredtext en"
++__doc__ = An alternate Arch commandline interface
+"""
+ self.compare_parsed(patchtext)
+
+
+
+ def testLineLookup(self):
+ import sys
+ """Make sure we can accurately look up mod line from orig"""
+ patch = parse_patch(self.datafile("diff"))
+ orig = list(self.datafile("orig"))
+ mod = list(self.datafile("mod"))
+ removals = []
+ for i in range(len(orig)):
+ mod_pos = patch.pos_in_mod(i)
+ if mod_pos is None:
+ removals.append(orig[i])
+ continue
+ assert(mod[mod_pos]==orig[i])
+ rem_iter = removals.__iter__()
+ for hunk in patch.hunks:
+ for line in hunk.lines:
+ if isinstance(line, RemoveLine):
+ next = rem_iter.next()
+ if line.contents != next:
+ sys.stdout.write(" orig:%spatch:%s" % (next,
+ line.contents))
+ assert(line.contents == next)
+ self.assertRaises(StopIteration, rem_iter.next)
+
+ def testFirstLineRenumber(self):
+ """Make sure we handle lines at the beginning of the hunk"""
+ patch = parse_patch(self.datafile("insert_top.patch"))
+ assert (patch.pos_in_mod(0)==1)
+
+def test():
+ patchesTestSuite = unittest.makeSuite(PatchesTester,'test')
+ runner = unittest.TextTestRunner(verbosity=0)
+ return runner.run(patchesTestSuite)
+
+
+if __name__ == "__main__":
+ test()
+last line change
diff --git a/bzrlib/tests/test_patches_data/mod-5 b/bzrlib/tests/test_patches_data/mod-5
new file mode 100644
index 0000000..bd3d7ea
--- /dev/null
+++ b/bzrlib/tests/test_patches_data/mod-5
@@ -0,0 +1,403 @@
+# Copyright (C) 2004, 2005 Aaron Bentley
+# <aaron.bentley@utoronto.ca>
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+class PatchSyntax(Exception):
+ def __init__(self, msg):
+ Exception.__init__(self, msg)
+
+
+class MalformedPatchHeader(PatchSyntax):
+ def __init__(self, desc, line):
+ self.desc = desc
+ self.line = line
+ msg = "Malformed patch header. %s\n%r" % (self.desc, self.line)
+ PatchSyntax.__init__(self, msg)
+
+class MalformedHunkHeader(PatchSyntax):
+ def __init__(self, desc, line):
+ self.desc = desc
+ self.line = line
+ msg = "Malformed hunk header. %s\n%r" % (self.desc, self.line)
+ PatchSyntax.__init__(self, msg)
+
+class MalformedLine(PatchSyntax):
+ def __init__(self, desc, line):
+ self.desc = desc
+ self.line = line
+ msg = "Malformed line. %s\n%s" % (self.desc, self.line)
+ PatchSyntax.__init__(self, msg)
+
+def get_patch_names(iter_lines):
+ try:
+ line = iter_lines.next()
+ if not line.startswith("--- "):
+ raise MalformedPatchHeader("No orig name", line)
+ else:
+ orig_name = line[4:].rstrip("\n")
+ except StopIteration:
+ raise MalformedPatchHeader("No orig line", "")
+ try:
+ line = iter_lines.next()
+ if not line.startswith("+++ "):
+ raise PatchSyntax("No mod name")
+ else:
+ mod_name = line[4:].rstrip("\n")
+ except StopIteration:
+ raise MalformedPatchHeader("No mod line", "")
+ return (orig_name, mod_name)
+
+def iter_hunks(iter_lines):
+ hunk = None
+ for line in iter_lines:
+ if line == "\n":
+ if hunk is not None:
+ yield hunk
+ hunk = None
+ continue
+ if hunk is not None:
+ yield hunk
+ hunk = hunk_from_header(line)
+ orig_size = 0
+ mod_size = 0
+ while orig_size < hunk.orig_range or mod_size < hunk.mod_range:
+ hunk_line = parse_line(iter_lines.next())
+ hunk.lines.append(hunk_line)
+ if isinstance(hunk_line, (RemoveLine, ContextLine)):
+ orig_size += 1
+ if isinstance(hunk_line, (InsertLine, ContextLine)):
+ mod_size += 1
+ if hunk is not None:
+ yield hunk
+
+class Patch:
+ def __init__(self, oldname, newname):
+ self.oldname = oldname
+ self.newname = newname
+ self.hunks = []
+
+ def __str__(self):
+ ret = self.get_header()
+ ret += "".join([str(h) for h in self.hunks])
+ return ret
+
+ def get_header(self):
+ return "--- %s\n+++ %s\n" % (self.oldname, self.newname)
+
+ def stats_str(self):
+ """Return a string of patch statistics"""
+ removes = 0
+ inserts = 0
+ for hunk in self.hunks:
+ for line in hunk.lines:
+ if isinstance(line, InsertLine):
+ inserts+=1;
+ elif isinstance(line, RemoveLine):
+ removes+=1;
+ return "%i inserts, %i removes in %i hunks" % \
+ (inserts, removes, len(self.hunks))
+
+ def pos_in_mod(self, position):
+ newpos = position
+ for hunk in self.hunks:
+ shift = hunk.shift_to_mod(position)
+ if shift is None:
+ return None
+ newpos += shift
+ return newpos
+
+ def iter_inserted(self):
+ """Iteraties through inserted lines
+
+ :return: Pair of line number, line
+ :rtype: iterator of (int, InsertLine)
+ """
+ for hunk in self.hunks:
+ pos = hunk.mod_pos - 1;
+ for line in hunk.lines:
+ if isinstance(line, InsertLine):
+ yield (pos, line)
+ pos += 1
+ if isinstance(line, ContextLine):
+ pos += 1
+
+def parse_patch(iter_lines):
+ (orig_name, mod_name) = get_patch_names(iter_lines)
+ patch = Patch(orig_name, mod_name)
+ for hunk in iter_hunks(iter_lines):
+ patch.hunks.append(hunk)
+ return patch
+
+
+def iter_file_patch(iter_lines):
+ saved_lines = []
+ for line in iter_lines:
+ if line.startswith('=== '):
+ continue
+ elif line.startswith('--- '):
+ if len(saved_lines) > 0:
+ yield saved_lines
+ saved_lines = []
+ saved_lines.append(line)
+ if len(saved_lines) > 0:
+ yield saved_lines
+
+
+def iter_lines_handle_nl(iter_lines):
+ """
+ Iterates through lines, ensuring that lines that originally had no
+ terminating \n are produced without one. This transformation may be
+ applied at any point up until hunk line parsing, and is safe to apply
+ repeatedly.
+ """
+ last_line = None
+ for line in iter_lines:
+ if line == NO_NL:
+ assert last_line.endswith('\n')
+ last_line = last_line[:-1]
+ line = None
+ if last_line is not None:
+ yield last_line
+ last_line = line
+ if last_line is not None:
+ yield last_line
+
+
+def parse_patches(iter_lines):
+ iter_lines = iter_lines_handle_nl(iter_lines)
+ return [parse_patch(f.__iter__()) for f in iter_file_patch(iter_lines)]
+
+
+def difference_index(atext, btext):
+ """Find the indext of the first character that differs betweeen two texts
+
+ :param atext: The first text
+ :type atext: str
+ :param btext: The second text
+ :type str: str
+ :return: The index, or None if there are no differences within the range
+ :rtype: int or NoneType
+ """
+ length = len(atext)
+ if len(btext) < length:
+ length = len(btext)
+ for i in range(length):
+ if atext[i] != btext[i]:
+ return i;
+ return None
+
+class PatchConflict(Exception):
+ def __init__(self, line_no, orig_line, patch_line):
+ orig = orig_line.rstrip('\n')
+ patch = str(patch_line).rstrip('\n')
+ msg = 'Text contents mismatch at line %d. Original has "%s",'\
+ ' but patch says it should be "%s"' % (line_no, orig, patch)
+ Exception.__init__(self, msg)
+
+
+def iter_patched(orig_lines, patch_lines):
+ """Iterate through a series of lines with a patch applied.
+ This handles a single file, and does exact, not fuzzy patching.
+ """
+ if orig_lines is not None:
+ orig_lines = orig_lines.__iter__()
+ seen_patch = []
+ patch_lines = iter_lines_handle_nl(patch_lines.__iter__())
+ get_patch_names(patch_lines)
+ line_no = 1
+ for hunk in iter_hunks(patch_lines):
+ while line_no < hunk.orig_pos:
+ orig_line = orig_lines.next()
+ yield orig_line
+ line_no += 1
+ for hunk_line in hunk.lines:
+ seen_patch.append(str(hunk_line))
+ if isinstance(hunk_line, InsertLine):
+ yield hunk_line.contents
+ elif isinstance(hunk_line, (ContextLine, RemoveLine)):
+ orig_line = orig_lines.next()
+ if orig_line != hunk_line.contents:
+ raise PatchConflict(line_no, orig_line, "".join(seen_patch))
+ if isinstance(hunk_line, ContextLine):
+ yield orig_line
+ else:
+ assert isinstance(hunk_line, RemoveLine)
+ line_no += 1
+
+import unittest
+import os.path
+class PatchesTester(unittest.TestCase):
+ def datafile(self, filename):
+ data_path = os.path.join(os.path.dirname(__file__), "testdata",
+ filename)
+ return file(data_path, "rb")
+
+ def testValidPatchHeader(self):
+ """Parse a valid patch header"""
+ lines = "--- orig/commands.py\n+++ mod/dommands.py\n".split('\n')
+ (orig, mod) = get_patch_names(lines.__iter__())
+ assert(orig == "orig/commands.py")
+ assert(mod == "mod/dommands.py")
+
+ def testInvalidPatchHeader(self):
+ """Parse an invalid patch header"""
+ lines = "-- orig/commands.py\n+++ mod/dommands.py".split('\n')
+ self.assertRaises(MalformedPatchHeader, get_patch_names,
+ lines.__iter__())
+
+ def testValidHunkHeader(self):
+ """Parse a valid hunk header"""
+ header = "@@ -34,11 +50,6 @@\n"
+ hunk = hunk_from_header(header);
+ assert (hunk.orig_pos == 34)
+ assert (hunk.orig_range == 11)
+ assert (hunk.mod_pos == 50)
+ assert (hunk.mod_range == 6)
+ assert (str(hunk) == header)
+
+ def testValidHunkHeader2(self):
+ """Parse a tricky, valid hunk header"""
+ header = "@@ -1 +0,0 @@\n"
+ hunk = hunk_from_header(header);
+ assert (hunk.orig_pos == 1)
+ assert (hunk.orig_range == 1)
+ assert (hunk.mod_pos == 0)
+ assert (hunk.mod_range == 0)
+ assert (str(hunk) == header)
+
+ def makeMalformed(self, header):
+ self.assertRaises(MalformedHunkHeader, hunk_from_header, header)
+
+ def testInvalidHeader(self):
+ """Parse an invalid hunk header"""
+ self.makeMalformed(" -34,11 +50,6 \n")
+ self.makeMalformed("@@ +50,6 -34,11 @@\n")
+ self.makeMalformed("@@ -34,11 +50,6 @@")
+ self.makeMalformed("@@ -34.5,11 +50,6 @@\n")
+ self.makeMalformed("@@-34,11 +50,6@@\n")
+ self.makeMalformed("@@ 34,11 50,6 @@\n")
+ self.makeMalformed("@@ -34,11 @@\n")
+ self.makeMalformed("@@ -34,11 +50,6.5 @@\n")
+ self.makeMalformed("@@ -34,11 +50,-6 @@\n")
+
+ def lineThing(self,text, type):
+ line = parse_line(text)
+ assert(isinstance(line, type))
+ assert(str(line)==text)
+
+ def makeMalformedLine(self, text):
+ self.assertRaises(MalformedLine, parse_line, text)
+
+ def testValidLine(self):
+ """Parse a valid hunk line"""
+ self.lineThing(" hello\n", ContextLine)
+ self.lineThing("+hello\n", InsertLine)
+ self.lineThing("-hello\n", RemoveLine)
+
+ def testMalformedLine(self):
+ """Parse invalid valid hunk lines"""
+ self.makeMalformedLine("hello\n")
+
+ def compare_parsed(self, patchtext):
+ lines = patchtext.splitlines(True)
+ patch = parse_patch(lines.__iter__())
+ pstr = str(patch)
+ i = difference_index(patchtext, pstr)
+ if i is not None:
+ print "%i: \"%s\" != \"%s\"" % (i, patchtext[i], pstr[i])
+ self.assertEqual (patchtext, str(patch))
+
+ def testAll(self):
+ """Test parsing a whole patch"""
+ patchtext = """--- orig/commands.py
++++ mod/commands.py
+@@ -1337,7 +1337,8 @@
+
+ def set_title(self, command=None):
+ try:
+- version = self.tree.tree_version.nonarch
++ version = pylon.alias_or_version(self.tree.tree_version, self.tree,
++ full=False)
+ except:
+ version = "[no version]"
+ if command is None:
+@@ -1983,7 +1984,11 @@
+ version)
+ if len(new_merges) > 0:
+ if cmdutil.prompt("Log for merge"):
+- mergestuff = cmdutil.log_for_merge(tree, comp_version)
++ if cmdutil.prompt("changelog for merge"):
++ mergestuff = "Patches applied:\\n"
++ mergestuff += pylon.changelog_for_merge(new_merges)
++ else:
++ mergestuff = cmdutil.log_for_merge(tree, comp_version)
+ log.description += mergestuff
+ log.save()
+ try:
+"""
+ self.compare_parsed(patchtext)
+
+ def testInit(self):
+ """Handle patches missing half the position, range tuple"""
+ patchtext = \
+"""--- orig/__init__.py
++++ mod/__init__.py
+@@ -1 +1,2 @@
+ __docformat__ = "restructuredtext en"
++__doc__ = An alternate Arch commandline interface
+"""
+ self.compare_parsed(patchtext)
+
+
+
+ def testLineLookup(self):
+ import sys
+ """Make sure we can accurately look up mod line from orig"""
+ patch = parse_patch(self.datafile("diff"))
+ orig = list(self.datafile("orig"))
+ mod = list(self.datafile("mod"))
+ removals = []
+ for i in range(len(orig)):
+ mod_pos = patch.pos_in_mod(i)
+ if mod_pos is None:
+ removals.append(orig[i])
+ continue
+ assert(mod[mod_pos]==orig[i])
+ rem_iter = removals.__iter__()
+ for hunk in patch.hunks:
+ for line in hunk.lines:
+ if isinstance(line, RemoveLine):
+ next = rem_iter.next()
+ if line.contents != next:
+ sys.stdout.write(" orig:%spatch:%s" % (next,
+ line.contents))
+ assert(line.contents == next)
+ self.assertRaises(StopIteration, rem_iter.next)
+
+ def testFirstLineRenumber(self):
+ """Make sure we handle lines at the beginning of the hunk"""
+ patch = parse_patch(self.datafile("insert_top.patch"))
+ assert (patch.pos_in_mod(0)==1)
+
+def test():
+ patchesTestSuite = unittest.makeSuite(PatchesTester,'test')
+ runner = unittest.TextTestRunner(verbosity=0)
+ return runner.run(patchesTestSuite)
+
+
+if __name__ == "__main__":
+ test()
+# arch-tag: d1541a25-eac5-4de9-a476-08a7cecd5683
diff --git a/bzrlib/tests/test_patches_data/mod-6 b/bzrlib/tests/test_patches_data/mod-6
new file mode 100644
index 0000000..cf4f85b
--- /dev/null
+++ b/bzrlib/tests/test_patches_data/mod-6
@@ -0,0 +1 @@
+Total contents change
diff --git a/bzrlib/tests/test_patches_data/mod-7 b/bzrlib/tests/test_patches_data/mod-7
new file mode 100644
index 0000000..7016a57
--- /dev/null
+++ b/bzrlib/tests/test_patches_data/mod-7
@@ -0,0 +1 @@
+No newline either \ No newline at end of file
diff --git a/bzrlib/tests/test_patches_data/orig b/bzrlib/tests/test_patches_data/orig
new file mode 100644
index 0000000..8e02dac
--- /dev/null
+++ b/bzrlib/tests/test_patches_data/orig
@@ -0,0 +1,2789 @@
+# Copyright (C) 2004 Aaron Bentley
+# <aaron.bentley@utoronto.ca>
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+import sys
+import arch
+import arch.util
+import arch.arch
+import abacmds
+import cmdutil
+import shutil
+import os
+import options
+import paths
+import time
+import cmd
+import readline
+import re
+import string
+import arch_core
+from errors import *
+import errors
+import terminal
+import ancillary
+import misc
+import email
+import smtplib
+
+__docformat__ = "restructuredtext"
+__doc__ = "Implementation of user (sub) commands"
+commands = {}
+
+def find_command(cmd):
+ """
+ Return an instance of a command type. Return None if the type isn't
+ registered.
+
+ :param cmd: the name of the command to look for
+ :type cmd: the type of the command
+ """
+ if commands.has_key(cmd):
+ return commands[cmd]()
+ else:
+ return None
+
+class BaseCommand:
+ def __call__(self, cmdline):
+ try:
+ self.do_command(cmdline.split())
+ except cmdutil.GetHelp, e:
+ self.help()
+ except Exception, e:
+ print e
+
+ def get_completer(index):
+ return None
+
+ def complete(self, args, text):
+ """
+ Returns a list of possible completions for the given text.
+
+ :param args: The complete list of arguments
+ :type args: List of str
+ :param text: text to complete (may be shorter than args[-1])
+ :type text: str
+ :rtype: list of str
+ """
+ matches = []
+ candidates = None
+
+ if len(args) > 0:
+ realtext = args[-1]
+ else:
+ realtext = ""
+
+ try:
+ parser=self.get_parser()
+ if realtext.startswith('-'):
+ candidates = parser.iter_options()
+ else:
+ (options, parsed_args) = parser.parse_args(args)
+
+ if len (parsed_args) > 0:
+ candidates = self.get_completer(parsed_args[-1], len(parsed_args) -1)
+ else:
+ candidates = self.get_completer("", 0)
+ except:
+ pass
+ if candidates is None:
+ return
+ for candidate in candidates:
+ candidate = str(candidate)
+ if candidate.startswith(realtext):
+ matches.append(candidate[len(realtext)- len(text):])
+ return matches
+
+
+class Help(BaseCommand):
+ """
+ Lists commands, prints help messages.
+ """
+ def __init__(self):
+ self.description="Prints help mesages"
+ self.parser = None
+
+ def do_command(self, cmdargs):
+ """
+ Prints a help message.
+ """
+ options, args = self.get_parser().parse_args(cmdargs)
+ if len(args) > 1:
+ raise cmdutil.GetHelp
+
+ if options.native or options.suggestions or options.external:
+ native = options.native
+ suggestions = options.suggestions
+ external = options.external
+ else:
+ native = True
+ suggestions = False
+ external = True
+
+ if len(args) == 0:
+ self.list_commands(native, suggestions, external)
+ return
+ elif len(args) == 1:
+ command_help(args[0])
+ return
+
+ def help(self):
+ self.get_parser().print_help()
+ print """
+If no command is specified, commands are listed. If a command is
+specified, help for that command is listed.
+ """
+
+ def get_parser(self):
+ """
+ Returns the options parser to use for the "revision" command.
+
+ :rtype: cmdutil.CmdOptionParser
+ """
+ if self.parser is not None:
+ return self.parser
+ parser=cmdutil.CmdOptionParser("fai help [command]")
+ parser.add_option("-n", "--native", action="store_true",
+ dest="native", help="Show native commands")
+ parser.add_option("-e", "--external", action="store_true",
+ dest="external", help="Show external commands")
+ parser.add_option("-s", "--suggest", action="store_true",
+ dest="suggestions", help="Show suggestions")
+ self.parser = parser
+ return parser
+
+ def list_commands(self, native=True, suggest=False, external=True):
+ """
+ Lists supported commands.
+
+ :param native: list native, python-based commands
+ :type native: bool
+ :param external: list external aba-style commands
+ :type external: bool
+ """
+ if native:
+ print "Native Fai commands"
+ keys=commands.keys()
+ keys.sort()
+ for k in keys:
+ space=""
+ for i in range(28-len(k)):
+ space+=" "
+ print space+k+" : "+commands[k]().description
+ print
+ if suggest:
+ print "Unavailable commands and suggested alternatives"
+ key_list = suggestions.keys()
+ key_list.sort()
+ for key in key_list:
+ print "%28s : %s" % (key, suggestions[key])
+ print
+ if external:
+ fake_aba = abacmds.AbaCmds()
+ if (fake_aba.abadir == ""):
+ return
+ print "External commands"
+ fake_aba.list_commands()
+ print
+ if not suggest:
+ print "Use help --suggest to list alternatives to tla and aba"\
+ " commands."
+ if options.tla_fallthrough and (native or external):
+ print "Fai also supports tla commands."
+
+def command_help(cmd):
+ """
+ Prints help for a command.
+
+ :param cmd: The name of the command to print help for
+ :type cmd: str
+ """
+ fake_aba = abacmds.AbaCmds()
+ cmdobj = find_command(cmd)
+ if cmdobj != None:
+ cmdobj.help()
+ elif suggestions.has_key(cmd):
+ print "Not available\n" + suggestions[cmd]
+ else:
+ abacmd = fake_aba.is_command(cmd)
+ if abacmd:
+ abacmd.help()
+ else:
+ print "No help is available for \""+cmd+"\". Maybe try \"tla "+cmd+" -H\"?"
+
+
+
+class Changes(BaseCommand):
+ """
+ the "changes" command: lists differences between trees/revisions:
+ """
+
+ def __init__(self):
+ self.description="Lists what files have changed in the project tree"
+
+ def get_completer(self, arg, index):
+ if index > 1:
+ return None
+ try:
+ tree = arch.tree_root()
+ except:
+ tree = None
+ return cmdutil.iter_revision_completions(arg, tree)
+
+ def parse_commandline(self, cmdline):
+ """
+ Parse commandline arguments. Raises cmdutil.GetHelp if help is needed.
+
+ :param cmdline: A list of arguments to parse
+ :rtype: (options, Revision, Revision/WorkingTree)
+ """
+ parser=self.get_parser()
+ (options, args) = parser.parse_args(cmdline)
+ if len(args) > 2:
+ raise cmdutil.GetHelp
+
+ tree=arch.tree_root()
+ if len(args) == 0:
+ a_spec = cmdutil.comp_revision(tree)
+ else:
+ a_spec = cmdutil.determine_revision_tree(tree, args[0])
+ cmdutil.ensure_archive_registered(a_spec.archive)
+ if len(args) == 2:
+ b_spec = cmdutil.determine_revision_tree(tree, args[1])
+ cmdutil.ensure_archive_registered(b_spec.archive)
+ else:
+ b_spec=tree
+ return options, a_spec, b_spec
+
+ def do_command(self, cmdargs):
+ """
+ Master function that perfoms the "changes" command.
+ """
+ try:
+ options, a_spec, b_spec = self.parse_commandline(cmdargs);
+ except cmdutil.CantDetermineRevision, e:
+ print e
+ return
+ except arch.errors.TreeRootError, e:
+ print e
+ return
+ if options.changeset:
+ changeset=options.changeset
+ tmpdir = None
+ else:
+ tmpdir=cmdutil.tmpdir()
+ changeset=tmpdir+"/changeset"
+ try:
+ delta=arch.iter_delta(a_spec, b_spec, changeset)
+ try:
+ for line in delta:
+ if cmdutil.chattermatch(line, "changeset:"):
+ pass
+ else:
+ cmdutil.colorize(line, options.suppress_chatter)
+ except arch.util.ExecProblem, e:
+ if e.proc.error and e.proc.error.startswith(
+ "missing explicit id for file"):
+ raise MissingID(e)
+ else:
+ raise
+ status=delta.status
+ if status > 1:
+ return
+ if (options.perform_diff):
+ chan = cmdutil.ChangesetMunger(changeset)
+ chan.read_indices()
+ if isinstance(b_spec, arch.Revision):
+ b_dir = b_spec.library_find()
+ else:
+ b_dir = b_spec
+ a_dir = a_spec.library_find()
+ if options.diffopts is not None:
+ diffopts = options.diffopts.split()
+ cmdutil.show_custom_diffs(chan, diffopts, a_dir, b_dir)
+ else:
+ cmdutil.show_diffs(delta.changeset)
+ finally:
+ if tmpdir and (os.access(tmpdir, os.X_OK)):
+ shutil.rmtree(tmpdir)
+
+ def get_parser(self):
+ """
+ Returns the options parser to use for the "changes" command.
+
+ :rtype: cmdutil.CmdOptionParser
+ """
+ parser=cmdutil.CmdOptionParser("fai changes [options] [revision]"
+ " [revision]")
+ parser.add_option("-d", "--diff", action="store_true",
+ dest="perform_diff", default=False,
+ help="Show diffs in summary")
+ parser.add_option("-c", "--changeset", dest="changeset",
+ help="Store a changeset in the given directory",
+ metavar="DIRECTORY")
+ parser.add_option("-s", "--silent", action="store_true",
+ dest="suppress_chatter", default=False,
+ help="Suppress chatter messages")
+ parser.add_option("--diffopts", dest="diffopts",
+ help="Use the specified diff options",
+ metavar="OPTIONS")
+
+ return parser
+
+ def help(self, parser=None):
+ """
+ Prints a help message.
+
+ :param parser: If supplied, the parser to use for generating help. If \
+ not supplied, it is retrieved.
+ :type parser: cmdutil.CmdOptionParser
+ """
+ if parser is None:
+ parser=self.get_parser()
+ parser.print_help()
+ print """
+Performs source-tree comparisons
+
+If no revision is specified, the current project tree is compared to the
+last-committed revision. If one revision is specified, the current project
+tree is compared to that revision. If two revisions are specified, they are
+compared to each other.
+ """
+ help_tree_spec()
+ return
+
+
+class ApplyChanges(BaseCommand):
+ """
+ Apply differences between two revisions to a tree
+ """
+
+ def __init__(self):
+ self.description="Applies changes to a project tree"
+
+ def get_completer(self, arg, index):
+ if index > 1:
+ return None
+ try:
+ tree = arch.tree_root()
+ except:
+ tree = None
+ return cmdutil.iter_revision_completions(arg, tree)
+
+ def parse_commandline(self, cmdline, tree):
+ """
+ Parse commandline arguments. Raises cmdutil.GetHelp if help is needed.
+
+ :param cmdline: A list of arguments to parse
+ :rtype: (options, Revision, Revision/WorkingTree)
+ """
+ parser=self.get_parser()
+ (options, args) = parser.parse_args(cmdline)
+ if len(args) != 2:
+ raise cmdutil.GetHelp
+
+ a_spec = cmdutil.determine_revision_tree(tree, args[0])
+ cmdutil.ensure_archive_registered(a_spec.archive)
+ b_spec = cmdutil.determine_revision_tree(tree, args[1])
+ cmdutil.ensure_archive_registered(b_spec.archive)
+ return options, a_spec, b_spec
+
+ def do_command(self, cmdargs):
+ """
+ Master function that performs "apply-changes".
+ """
+ try:
+ tree = arch.tree_root()
+ options, a_spec, b_spec = self.parse_commandline(cmdargs, tree);
+ except cmdutil.CantDetermineRevision, e:
+ print e
+ return
+ except arch.errors.TreeRootError, e:
+ print e
+ return
+ delta=cmdutil.apply_delta(a_spec, b_spec, tree)
+ for line in cmdutil.iter_apply_delta_filter(delta):
+ cmdutil.colorize(line, options.suppress_chatter)
+
+ def get_parser(self):
+ """
+ Returns the options parser to use for the "apply-changes" command.
+
+ :rtype: cmdutil.CmdOptionParser
+ """
+ parser=cmdutil.CmdOptionParser("fai apply-changes [options] revision"
+ " revision")
+ parser.add_option("-d", "--diff", action="store_true",
+ dest="perform_diff", default=False,
+ help="Show diffs in summary")
+ parser.add_option("-c", "--changeset", dest="changeset",
+ help="Store a changeset in the given directory",
+ metavar="DIRECTORY")
+ parser.add_option("-s", "--silent", action="store_true",
+ dest="suppress_chatter", default=False,
+ help="Suppress chatter messages")
+ return parser
+
+ def help(self, parser=None):
+ """
+ Prints a help message.
+
+ :param parser: If supplied, the parser to use for generating help. If \
+ not supplied, it is retrieved.
+ :type parser: cmdutil.CmdOptionParser
+ """
+ if parser is None:
+ parser=self.get_parser()
+ parser.print_help()
+ print """
+Applies changes to a project tree
+
+Compares two revisions and applies the difference between them to the current
+tree.
+ """
+ help_tree_spec()
+ return
+
+class Update(BaseCommand):
+ """
+ Updates a project tree to a given revision, preserving un-committed hanges.
+ """
+
+ def __init__(self):
+ self.description="Apply the latest changes to the current directory"
+
+ def get_completer(self, arg, index):
+ if index > 0:
+ return None
+ try:
+ tree = arch.tree_root()
+ except:
+ tree = None
+ return cmdutil.iter_revision_completions(arg, tree)
+
+ def parse_commandline(self, cmdline, tree):
+ """
+ Parse commandline arguments. Raises cmdutil.GetHelp if help is needed.
+
+ :param cmdline: A list of arguments to parse
+ :rtype: (options, Revision, Revision/WorkingTree)
+ """
+ parser=self.get_parser()
+ (options, args) = parser.parse_args(cmdline)
+ if len(args) > 2:
+ raise cmdutil.GetHelp
+
+ spec=None
+ if len(args)>0:
+ spec=args[0]
+ revision=cmdutil.determine_revision_arch(tree, spec)
+ cmdutil.ensure_archive_registered(revision.archive)
+
+ mirror_source = cmdutil.get_mirror_source(revision.archive)
+ if mirror_source != None:
+ if cmdutil.prompt("Mirror update"):
+ cmd=cmdutil.mirror_archive(mirror_source,
+ revision.archive, arch.NameParser(revision).get_package_version())
+ for line in arch.chatter_classifier(cmd):
+ cmdutil.colorize(line, options.suppress_chatter)
+
+ revision=cmdutil.determine_revision_arch(tree, spec)
+
+ return options, revision
+
+ def do_command(self, cmdargs):
+ """
+ Master function that perfoms the "update" command.
+ """
+ tree=arch.tree_root()
+ try:
+ options, to_revision = self.parse_commandline(cmdargs, tree);
+ except cmdutil.CantDetermineRevision, e:
+ print e
+ return
+ except arch.errors.TreeRootError, e:
+ print e
+ return
+ from_revision=cmdutil.tree_latest(tree)
+ if from_revision==to_revision:
+ print "Tree is already up to date with:\n"+str(to_revision)+"."
+ return
+ cmdutil.ensure_archive_registered(from_revision.archive)
+ cmd=cmdutil.apply_delta(from_revision, to_revision, tree,
+ options.patch_forward)
+ for line in cmdutil.iter_apply_delta_filter(cmd):
+ cmdutil.colorize(line)
+ if to_revision.version != tree.tree_version:
+ if cmdutil.prompt("Update version"):
+ tree.tree_version = to_revision.version
+
+ def get_parser(self):
+ """
+ Returns the options parser to use for the "update" command.
+
+ :rtype: cmdutil.CmdOptionParser
+ """
+ parser=cmdutil.CmdOptionParser("fai update [options]"
+ " [revision/version]")
+ parser.add_option("-f", "--forward", action="store_true",
+ dest="patch_forward", default=False,
+ help="pass the --forward option to 'patch'")
+ parser.add_option("-s", "--silent", action="store_true",
+ dest="suppress_chatter", default=False,
+ help="Suppress chatter messages")
+ return parser
+
+ def help(self, parser=None):
+ """
+ Prints a help message.
+
+ :param parser: If supplied, the parser to use for generating help. If \
+ not supplied, it is retrieved.
+ :type parser: cmdutil.CmdOptionParser
+ """
+ if parser is None:
+ parser=self.get_parser()
+ parser.print_help()
+ print """
+Updates a working tree to the current archive revision
+
+If a revision or version is specified, that is used instead
+ """
+ help_tree_spec()
+ return
+
+
+class Commit(BaseCommand):
+ """
+ Create a revision based on the changes in the current tree.
+ """
+
+ def __init__(self):
+ self.description="Write local changes to the archive"
+
+ def get_completer(self, arg, index):
+ if arg is None:
+ arg = ""
+ return iter_modified_file_completions(arch.tree_root(), arg)
+# return iter_source_file_completions(arch.tree_root(), arg)
+
+ def parse_commandline(self, cmdline, tree):
+ """
+ Parse commandline arguments. Raise cmtutil.GetHelp if help is needed.
+
+ :param cmdline: A list of arguments to parse
+ :rtype: (options, Revision, Revision/WorkingTree)
+ """
+ parser=self.get_parser()
+ (options, args) = parser.parse_args(cmdline)
+
+ if len(args) == 0:
+ args = None
+ revision=cmdutil.determine_revision_arch(tree, options.version)
+ return options, revision.get_version(), args
+
+ def do_command(self, cmdargs):
+ """
+ Master function that perfoms the "commit" command.
+ """
+ tree=arch.tree_root()
+ options, version, files = self.parse_commandline(cmdargs, tree)
+ if options.__dict__.has_key("base") and options.base:
+ base = cmdutil.determine_revision_tree(tree, options.base)
+ else:
+ base = cmdutil.submit_revision(tree)
+
+ writeversion=version
+ archive=version.archive
+ source=cmdutil.get_mirror_source(archive)
+ allow_old=False
+ writethrough="implicit"
+
+ if source!=None:
+ if writethrough=="explicit" and \
+ cmdutil.prompt("Writethrough"):
+ writeversion=arch.Version(str(source)+"/"+str(version.get_nonarch()))
+ elif writethrough=="none":
+ raise CommitToMirror(archive)
+
+ elif archive.is_mirror:
+ raise CommitToMirror(archive)
+
+ try:
+ last_revision=tree.iter_logs(version, True).next().revision
+ except StopIteration, e:
+ if cmdutil.prompt("Import from commit"):
+ return do_import(version)
+ else:
+ raise NoVersionLogs(version)
+ if last_revision!=version.iter_revisions(True).next():
+ if not cmdutil.prompt("Out of date"):
+ raise OutOfDate
+ else:
+ allow_old=True
+
+ try:
+ if not cmdutil.has_changed(version):
+ if not cmdutil.prompt("Empty commit"):
+ raise EmptyCommit
+ except arch.util.ExecProblem, e:
+ if e.proc.error and e.proc.error.startswith(
+ "missing explicit id for file"):
+ raise MissingID(e)
+ else:
+ raise
+ log = tree.log_message(create=False)
+ if log is None:
+ try:
+ if cmdutil.prompt("Create log"):
+ edit_log(tree)
+
+ except cmdutil.NoEditorSpecified, e:
+ raise CommandFailed(e)
+ log = tree.log_message(create=False)
+ if log is None:
+ raise NoLogMessage
+ if log["Summary"] is None or len(log["Summary"].strip()) == 0:
+ if not cmdutil.prompt("Omit log summary"):
+ raise errors.NoLogSummary
+ try:
+ for line in tree.iter_commit(version, seal=options.seal_version,
+ base=base, out_of_date_ok=allow_old, file_list=files):
+ cmdutil.colorize(line, options.suppress_chatter)
+
+ except arch.util.ExecProblem, e:
+ if e.proc.error and e.proc.error.startswith(
+ "These files violate naming conventions:"):
+ raise LintFailure(e.proc.error)
+ else:
+ raise
+
+ def get_parser(self):
+ """
+ Returns the options parser to use for the "commit" command.
+
+ :rtype: cmdutil.CmdOptionParser
+ """
+
+ parser=cmdutil.CmdOptionParser("fai commit [options] [file1]"
+ " [file2...]")
+ parser.add_option("--seal", action="store_true",
+ dest="seal_version", default=False,
+ help="seal this version")
+ parser.add_option("-v", "--version", dest="version",
+ help="Use the specified version",
+ metavar="VERSION")
+ parser.add_option("-s", "--silent", action="store_true",
+ dest="suppress_chatter", default=False,
+ help="Suppress chatter messages")
+ if cmdutil.supports_switch("commit", "--base"):
+ parser.add_option("--base", dest="base", help="",
+ metavar="REVISION")
+ return parser
+
+ def help(self, parser=None):
+ """
+ Prints a help message.
+
+ :param parser: If supplied, the parser to use for generating help. If \
+ not supplied, it is retrieved.
+ :type parser: cmdutil.CmdOptionParser
+ """
+ if parser is None:
+ parser=self.get_parser()
+ parser.print_help()
+ print """
+Updates a working tree to the current archive revision
+
+If a version is specified, that is used instead
+ """
+# help_tree_spec()
+ return
+
+
+
+class CatLog(BaseCommand):
+ """
+ Print the log of a given file (from current tree)
+ """
+ def __init__(self):
+ self.description="Prints the patch log for a revision"
+
+ def get_completer(self, arg, index):
+ if index > 0:
+ return None
+ try:
+ tree = arch.tree_root()
+ except:
+ tree = None
+ return cmdutil.iter_revision_completions(arg, tree)
+
+ def do_command(self, cmdargs):
+ """
+ Master function that perfoms the "cat-log" command.
+ """
+ parser=self.get_parser()
+ (options, args) = parser.parse_args(cmdargs)
+ try:
+ tree = arch.tree_root()
+ except arch.errors.TreeRootError, e:
+ tree = None
+ spec=None
+ if len(args) > 0:
+ spec=args[0]
+ if len(args) > 1:
+ raise cmdutil.GetHelp()
+ try:
+ if tree:
+ revision = cmdutil.determine_revision_tree(tree, spec)
+ else:
+ revision = cmdutil.determine_revision_arch(tree, spec)
+ except cmdutil.CantDetermineRevision, e:
+ raise CommandFailedWrapper(e)
+ log = None
+
+ use_tree = (options.source == "tree" or \
+ (options.source == "any" and tree))
+ use_arch = (options.source == "archive" or options.source == "any")
+
+ log = None
+ if use_tree:
+ for log in tree.iter_logs(revision.get_version()):
+ if log.revision == revision:
+ break
+ else:
+ log = None
+ if log is None and use_arch:
+ cmdutil.ensure_revision_exists(revision)
+ log = arch.Patchlog(revision)
+ if log is not None:
+ for item in log.items():
+ print "%s: %s" % item
+ print log.description
+
+ def get_parser(self):
+ """
+ Returns the options parser to use for the "cat-log" command.
+
+ :rtype: cmdutil.CmdOptionParser
+ """
+ parser=cmdutil.CmdOptionParser("fai cat-log [revision]")
+ parser.add_option("--archive", action="store_const", dest="source",
+ const="archive", default="any",
+ help="Always get the log from the archive")
+ parser.add_option("--tree", action="store_const", dest="source",
+ const="tree", help="Always get the log from the tree")
+ return parser
+
+ def help(self, parser=None):
+ """
+ Prints a help message.
+
+ :param parser: If supplied, the parser to use for generating help. If \
+ not supplied, it is retrieved.
+ :type parser: cmdutil.CmdOptionParser
+ """
+ if parser==None:
+ parser=self.get_parser()
+ parser.print_help()
+ print """
+Prints the log for the specified revision
+ """
+ help_tree_spec()
+ return
+
+class Revert(BaseCommand):
+ """ Reverts a tree (or aspects of it) to a revision
+ """
+ def __init__(self):
+ self.description="Reverts a tree (or aspects of it) to a revision "
+
+ def get_completer(self, arg, index):
+ if index > 0:
+ return None
+ try:
+ tree = arch.tree_root()
+ except:
+ tree = None
+ return iter_modified_file_completions(tree, arg)
+
+ def do_command(self, cmdargs):
+ """
+ Master function that perfoms the "revert" command.
+ """
+ parser=self.get_parser()
+ (options, args) = parser.parse_args(cmdargs)
+ try:
+ tree = arch.tree_root()
+ except arch.errors.TreeRootError, e:
+ raise CommandFailed(e)
+ spec=None
+ if options.revision is not None:
+ spec=options.revision
+ try:
+ if spec is not None:
+ revision = cmdutil.determine_revision_tree(tree, spec)
+ else:
+ revision = cmdutil.comp_revision(tree)
+ except cmdutil.CantDetermineRevision, e:
+ raise CommandFailedWrapper(e)
+ munger = None
+
+ if options.file_contents or options.file_perms or options.deletions\
+ or options.additions or options.renames or options.hunk_prompt:
+ munger = cmdutil.MungeOpts()
+ munger.hunk_prompt = options.hunk_prompt
+
+ if len(args) > 0 or options.logs or options.pattern_files or \
+ options.control:
+ if munger is None:
+ munger = cmdutil.MungeOpts(True)
+ munger.all_types(True)
+ if len(args) > 0:
+ t_cwd = cmdutil.tree_cwd(tree)
+ for name in args:
+ if len(t_cwd) > 0:
+ t_cwd += "/"
+ name = "./" + t_cwd + name
+ munger.add_keep_file(name);
+
+ if options.file_perms:
+ munger.file_perms = True
+ if options.file_contents:
+ munger.file_contents = True
+ if options.deletions:
+ munger.deletions = True
+ if options.additions:
+ munger.additions = True
+ if options.renames:
+ munger.renames = True
+ if options.logs:
+ munger.add_keep_pattern('^\./\{arch\}/[^=].*')
+ if options.control:
+ munger.add_keep_pattern("/\.arch-ids|^\./\{arch\}|"\
+ "/\.arch-inventory$")
+ if options.pattern_files:
+ munger.add_keep_pattern(options.pattern_files)
+
+ for line in cmdutil.revert(tree, revision, munger,
+ not options.no_output):
+ cmdutil.colorize(line)
+
+
+ def get_parser(self):
+ """
+ Returns the options parser to use for the "cat-log" command.
+
+ :rtype: cmdutil.CmdOptionParser
+ """
+ parser=cmdutil.CmdOptionParser("fai revert [options] [FILE...]")
+ parser.add_option("", "--contents", action="store_true",
+ dest="file_contents",
+ help="Revert file content changes")
+ parser.add_option("", "--permissions", action="store_true",
+ dest="file_perms",
+ help="Revert file permissions changes")
+ parser.add_option("", "--deletions", action="store_true",
+ dest="deletions",
+ help="Restore deleted files")
+ parser.add_option("", "--additions", action="store_true",
+ dest="additions",
+ help="Remove added files")
+ parser.add_option("", "--renames", action="store_true",
+ dest="renames",
+ help="Revert file names")
+ parser.add_option("--hunks", action="store_true",
+ dest="hunk_prompt", default=False,
+ help="Prompt which hunks to revert")
+ parser.add_option("--pattern-files", dest="pattern_files",
+ help="Revert files that match this pattern",
+ metavar="REGEX")
+ parser.add_option("--logs", action="store_true",
+ dest="logs", default=False,
+ help="Revert only logs")
+ parser.add_option("--control-files", action="store_true",
+ dest="control", default=False,
+ help="Revert logs and other control files")
+ parser.add_option("-n", "--no-output", action="store_true",
+ dest="no_output",
+ help="Don't keep an undo changeset")
+ parser.add_option("--revision", dest="revision",
+ help="Revert to the specified revision",
+ metavar="REVISION")
+ return parser
+
+ def help(self, parser=None):
+ """
+ Prints a help message.
+
+ :param parser: If supplied, the parser to use for generating help. If \
+ not supplied, it is retrieved.
+ :type parser: cmdutil.CmdOptionParser
+ """
+ if parser==None:
+ parser=self.get_parser()
+ parser.print_help()
+ print """
+Reverts changes in the current working tree. If no flags are specified, all
+types of changes are reverted. Otherwise, only selected types of changes are
+reverted.
+
+If a revision is specified on the commandline, differences between the current
+tree and that revision are reverted. If a version is specified, the current
+tree is used to determine the revision.
+
+If files are specified, only those files listed will have any changes applied.
+To specify a renamed file, you can use either the old or new name. (or both!)
+
+Unless "-n" is specified, reversions can be undone with "redo".
+ """
+ return
+
+class Revision(BaseCommand):
+ """
+ Print a revision name based on a revision specifier
+ """
+ def __init__(self):
+ self.description="Prints the name of a revision"
+
+ def get_completer(self, arg, index):
+ if index > 0:
+ return None
+ try:
+ tree = arch.tree_root()
+ except:
+ tree = None
+ return cmdutil.iter_revision_completions(arg, tree)
+
+ def do_command(self, cmdargs):
+ """
+ Master function that perfoms the "revision" command.
+ """
+ parser=self.get_parser()
+ (options, args) = parser.parse_args(cmdargs)
+
+ try:
+ tree = arch.tree_root()
+ except arch.errors.TreeRootError:
+ tree = None
+
+ spec=None
+ if len(args) > 0:
+ spec=args[0]
+ if len(args) > 1:
+ raise cmdutil.GetHelp
+ try:
+ if tree:
+ revision = cmdutil.determine_revision_tree(tree, spec)
+ else:
+ revision = cmdutil.determine_revision_arch(tree, spec)
+ except cmdutil.CantDetermineRevision, e:
+ print str(e)
+ return
+ print options.display(revision)
+
+ def get_parser(self):
+ """
+ Returns the options parser to use for the "revision" command.
+
+ :rtype: cmdutil.CmdOptionParser
+ """
+ parser=cmdutil.CmdOptionParser("fai revision [revision]")
+ parser.add_option("", "--location", action="store_const",
+ const=paths.determine_path, dest="display",
+ help="Show location instead of name", default=str)
+ parser.add_option("--import", action="store_const",
+ const=paths.determine_import_path, dest="display",
+ help="Show location of import file")
+ parser.add_option("--log", action="store_const",
+ const=paths.determine_log_path, dest="display",
+ help="Show location of log file")
+ parser.add_option("--patch", action="store_const",
+ dest="display", const=paths.determine_patch_path,
+ help="Show location of patchfile")
+ parser.add_option("--continuation", action="store_const",
+ const=paths.determine_continuation_path,
+ dest="display",
+ help="Show location of continuation file")
+ parser.add_option("--cacherev", action="store_const",
+ const=paths.determine_cacherev_path, dest="display",
+ help="Show location of cacherev file")
+ return parser
+
+ def help(self, parser=None):
+ """
+ Prints a help message.
+
+ :param parser: If supplied, the parser to use for generating help. If \
+ not supplied, it is retrieved.
+ :type parser: cmdutil.CmdOptionParser
+ """
+ if parser==None:
+ parser=self.get_parser()
+ parser.print_help()
+ print """
+Expands aliases and prints the name of the specified revision. Instead of
+the name, several options can be used to print locations. If more than one is
+specified, the last one is used.
+ """
+ help_tree_spec()
+ return
+
+def require_version_exists(version, spec):
+ if not version.exists():
+ raise cmdutil.CantDetermineVersion(spec,
+ "The version %s does not exist." \
+ % version)
+
+class Revisions(BaseCommand):
+ """
+ Print a revision name based on a revision specifier
+ """
+ def __init__(self):
+ self.description="Lists revisions"
+
+ def do_command(self, cmdargs):
+ """
+ Master function that perfoms the "revision" command.
+ """
+ (options, args) = self.get_parser().parse_args(cmdargs)
+ if len(args) > 1:
+ raise cmdutil.GetHelp
+ try:
+ self.tree = arch.tree_root()
+ except arch.errors.TreeRootError:
+ self.tree = None
+ try:
+ iter = self.get_iterator(options.type, args, options.reverse,
+ options.modified)
+ except cmdutil.CantDetermineRevision, e:
+ raise CommandFailedWrapper(e)
+
+ if options.skip is not None:
+ iter = cmdutil.iter_skip(iter, int(options.skip))
+
+ for revision in iter:
+ log = None
+ if isinstance(revision, arch.Patchlog):
+ log = revision
+ revision=revision.revision
+ print options.display(revision)
+ if log is None and (options.summary or options.creator or
+ options.date or options.merges):
+ log = revision.patchlog
+ if options.creator:
+ print " %s" % log.creator
+ if options.date:
+ print " %s" % time.strftime('%Y-%m-%d %H:%M:%S %Z', log.date)
+ if options.summary:
+ print " %s" % log.summary
+ if options.merges:
+ showed_title = False
+ for revision in log.merged_patches:
+ if not showed_title:
+ print " Merged:"
+ showed_title = True
+ print " %s" % revision
+
+ def get_iterator(self, type, args, reverse, modified):
+ if len(args) > 0:
+ spec = args[0]
+ else:
+ spec = None
+ if modified is not None:
+ iter = cmdutil.modified_iter(modified, self.tree)
+ if reverse:
+ return iter
+ else:
+ return cmdutil.iter_reverse(iter)
+ elif type == "archive":
+ if spec is None:
+ if self.tree is None:
+ raise cmdutil.CantDetermineRevision("",
+ "Not in a project tree")
+ version = cmdutil.determine_version_tree(spec, self.tree)
+ else:
+ version = cmdutil.determine_version_arch(spec, self.tree)
+ cmdutil.ensure_archive_registered(version.archive)
+ require_version_exists(version, spec)
+ return version.iter_revisions(reverse)
+ elif type == "cacherevs":
+ if spec is None:
+ if self.tree is None:
+ raise cmdutil.CantDetermineRevision("",
+ "Not in a project tree")
+ version = cmdutil.determine_version_tree(spec, self.tree)
+ else:
+ version = cmdutil.determine_version_arch(spec, self.tree)
+ cmdutil.ensure_archive_registered(version.archive)
+ require_version_exists(version, spec)
+ return cmdutil.iter_cacherevs(version, reverse)
+ elif type == "library":
+ if spec is None:
+ if self.tree is None:
+ raise cmdutil.CantDetermineRevision("",
+ "Not in a project tree")
+ version = cmdutil.determine_version_tree(spec, self.tree)
+ else:
+ version = cmdutil.determine_version_arch(spec, self.tree)
+ return version.iter_library_revisions(reverse)
+ elif type == "logs":
+ if self.tree is None:
+ raise cmdutil.CantDetermineRevision("", "Not in a project tree")
+ return self.tree.iter_logs(cmdutil.determine_version_tree(spec, \
+ self.tree), reverse)
+ elif type == "missing" or type == "skip-present":
+ if self.tree is None:
+ raise cmdutil.CantDetermineRevision("", "Not in a project tree")
+ skip = (type == "skip-present")
+ version = cmdutil.determine_version_tree(spec, self.tree)
+ cmdutil.ensure_archive_registered(version.archive)
+ require_version_exists(version, spec)
+ return cmdutil.iter_missing(self.tree, version, reverse,
+ skip_present=skip)
+
+ elif type == "present":
+ if self.tree is None:
+ raise cmdutil.CantDetermineRevision("", "Not in a project tree")
+ version = cmdutil.determine_version_tree(spec, self.tree)
+ cmdutil.ensure_archive_registered(version.archive)
+ require_version_exists(version, spec)
+ return cmdutil.iter_present(self.tree, version, reverse)
+
+ elif type == "new-merges" or type == "direct-merges":
+ if self.tree is None:
+ raise cmdutil.CantDetermineRevision("", "Not in a project tree")
+ version = cmdutil.determine_version_tree(spec, self.tree)
+ cmdutil.ensure_archive_registered(version.archive)
+ require_version_exists(version, spec)
+ iter = cmdutil.iter_new_merges(self.tree, version, reverse)
+ if type == "new-merges":
+ return iter
+ elif type == "direct-merges":
+ return cmdutil.direct_merges(iter)
+
+ elif type == "missing-from":
+ if self.tree is None:
+ raise cmdutil.CantDetermineRevision("", "Not in a project tree")
+ revision = cmdutil.determine_revision_tree(self.tree, spec)
+ libtree = cmdutil.find_or_make_local_revision(revision)
+ return cmdutil.iter_missing(libtree, self.tree.tree_version,
+ reverse)
+
+ elif type == "partner-missing":
+ return cmdutil.iter_partner_missing(self.tree, reverse)
+
+ elif type == "ancestry":
+ revision = cmdutil.determine_revision_tree(self.tree, spec)
+ iter = cmdutil._iter_ancestry(self.tree, revision)
+ if reverse:
+ return iter
+ else:
+ return cmdutil.iter_reverse(iter)
+
+ elif type == "dependencies" or type == "non-dependencies":
+ nondeps = (type == "non-dependencies")
+ revision = cmdutil.determine_revision_tree(self.tree, spec)
+ anc_iter = cmdutil._iter_ancestry(self.tree, revision)
+ iter_depends = cmdutil.iter_depends(anc_iter, nondeps)
+ if reverse:
+ return iter_depends
+ else:
+ return cmdutil.iter_reverse(iter_depends)
+ elif type == "micro":
+ return cmdutil.iter_micro(self.tree)
+
+
+ def get_parser(self):
+ """
+ Returns the options parser to use for the "revision" command.
+
+ :rtype: cmdutil.CmdOptionParser
+ """
+ parser=cmdutil.CmdOptionParser("fai revisions [revision]")
+ select = cmdutil.OptionGroup(parser, "Selection options",
+ "Control which revisions are listed. These options"
+ " are mutually exclusive. If more than one is"
+ " specified, the last is used.")
+ select.add_option("", "--archive", action="store_const",
+ const="archive", dest="type", default="archive",
+ help="List all revisions in the archive")
+ select.add_option("", "--cacherevs", action="store_const",
+ const="cacherevs", dest="type",
+ help="List all revisions stored in the archive as "
+ "complete copies")
+ select.add_option("", "--logs", action="store_const",
+ const="logs", dest="type",
+ help="List revisions that have a patchlog in the "
+ "tree")
+ select.add_option("", "--missing", action="store_const",
+ const="missing", dest="type",
+ help="List revisions from the specified version that"
+ " have no patchlog in the tree")
+ select.add_option("", "--skip-present", action="store_const",
+ const="skip-present", dest="type",
+ help="List revisions from the specified version that"
+ " have no patchlogs at all in the tree")
+ select.add_option("", "--present", action="store_const",
+ const="present", dest="type",
+ help="List revisions from the specified version that"
+ " have no patchlog in the tree, but can't be merged")
+ select.add_option("", "--missing-from", action="store_const",
+ const="missing-from", dest="type",
+ help="List revisions from the specified revision "
+ "that have no patchlog for the tree version")
+ select.add_option("", "--partner-missing", action="store_const",
+ const="partner-missing", dest="type",
+ help="List revisions in partner versions that are"
+ " missing")
+ select.add_option("", "--new-merges", action="store_const",
+ const="new-merges", dest="type",
+ help="List revisions that have had patchlogs added"
+ " to the tree since the last commit")
+ select.add_option("", "--direct-merges", action="store_const",
+ const="direct-merges", dest="type",
+ help="List revisions that have been directly added"
+ " to tree since the last commit ")
+ select.add_option("", "--library", action="store_const",
+ const="library", dest="type",
+ help="List revisions in the revision library")
+ select.add_option("", "--ancestry", action="store_const",
+ const="ancestry", dest="type",
+ help="List revisions that are ancestors of the "
+ "current tree version")
+
+ select.add_option("", "--dependencies", action="store_const",
+ const="dependencies", dest="type",
+ help="List revisions that the given revision "
+ "depends on")
+
+ select.add_option("", "--non-dependencies", action="store_const",
+ const="non-dependencies", dest="type",
+ help="List revisions that the given revision "
+ "does not depend on")
+
+ select.add_option("--micro", action="store_const",
+ const="micro", dest="type",
+ help="List partner revisions aimed for this "
+ "micro-branch")
+
+ select.add_option("", "--modified", dest="modified",
+ help="List tree ancestor revisions that modified a "
+ "given file", metavar="FILE[:LINE]")
+
+ parser.add_option("", "--skip", dest="skip",
+ help="Skip revisions. Positive numbers skip from "
+ "beginning, negative skip from end.",
+ metavar="NUMBER")
+
+ parser.add_option_group(select)
+
+ format = cmdutil.OptionGroup(parser, "Revision format options",
+ "These control the appearance of listed revisions")
+ format.add_option("", "--location", action="store_const",
+ const=paths.determine_path, dest="display",
+ help="Show location instead of name", default=str)
+ format.add_option("--import", action="store_const",
+ const=paths.determine_import_path, dest="display",
+ help="Show location of import file")
+ format.add_option("--log", action="store_const",
+ const=paths.determine_log_path, dest="display",
+ help="Show location of log file")
+ format.add_option("--patch", action="store_const",
+ dest="display", const=paths.determine_patch_path,
+ help="Show location of patchfile")
+ format.add_option("--continuation", action="store_const",
+ const=paths.determine_continuation_path,
+ dest="display",
+ help="Show location of continuation file")
+ format.add_option("--cacherev", action="store_const",
+ const=paths.determine_cacherev_path, dest="display",
+ help="Show location of cacherev file")
+ parser.add_option_group(format)
+ display = cmdutil.OptionGroup(parser, "Display format options",
+ "These control the display of data")
+ display.add_option("-r", "--reverse", action="store_true",
+ dest="reverse", help="Sort from newest to oldest")
+ display.add_option("-s", "--summary", action="store_true",
+ dest="summary", help="Show patchlog summary")
+ display.add_option("-D", "--date", action="store_true",
+ dest="date", help="Show patchlog date")
+ display.add_option("-c", "--creator", action="store_true",
+ dest="creator", help="Show the id that committed the"
+ " revision")
+ display.add_option("-m", "--merges", action="store_true",
+ dest="merges", help="Show the revisions that were"
+ " merged")
+ parser.add_option_group(display)
+ return parser
+ def help(self, parser=None):
+ """Attempt to explain the revisions command
+
+ :param parser: If supplied, used to determine options
+ """
+ if parser==None:
+ parser=self.get_parser()
+ parser.print_help()
+ print """List revisions.
+ """
+ help_tree_spec()
+
+
+class Get(BaseCommand):
+ """
+ Retrieve a revision from the archive
+ """
+ def __init__(self):
+ self.description="Retrieve a revision from the archive"
+ self.parser=self.get_parser()
+
+
+ def get_completer(self, arg, index):
+ if index > 0:
+ return None
+ try:
+ tree = arch.tree_root()
+ except:
+ tree = None
+ return cmdutil.iter_revision_completions(arg, tree)
+
+
+ def do_command(self, cmdargs):
+ """
+ Master function that perfoms the "get" command.
+ """
+ (options, args) = self.parser.parse_args(cmdargs)
+ if len(args) < 1:
+ return self.help()
+ try:
+ tree = arch.tree_root()
+ except arch.errors.TreeRootError:
+ tree = None
+
+ arch_loc = None
+ try:
+ revision, arch_loc = paths.full_path_decode(args[0])
+ except Exception, e:
+ revision = cmdutil.determine_revision_arch(tree, args[0],
+ check_existence=False, allow_package=True)
+ if len(args) > 1:
+ directory = args[1]
+ else:
+ directory = str(revision.nonarch)
+ if os.path.exists(directory):
+ raise DirectoryExists(directory)
+ cmdutil.ensure_archive_registered(revision.archive, arch_loc)
+ try:
+ cmdutil.ensure_revision_exists(revision)
+ except cmdutil.NoSuchRevision, e:
+ raise CommandFailedWrapper(e)
+
+ link = cmdutil.prompt ("get link")
+ for line in cmdutil.iter_get(revision, directory, link,
+ options.no_pristine,
+ options.no_greedy_add):
+ cmdutil.colorize(line)
+
+ def get_parser(self):
+ """
+ Returns the options parser to use for the "get" command.
+
+ :rtype: cmdutil.CmdOptionParser
+ """
+ parser=cmdutil.CmdOptionParser("fai get revision [dir]")
+ parser.add_option("--no-pristine", action="store_true",
+ dest="no_pristine",
+ help="Do not make pristine copy for reference")
+ parser.add_option("--no-greedy-add", action="store_true",
+ dest="no_greedy_add",
+ help="Never add to greedy libraries")
+
+ return parser
+
+ def help(self, parser=None):
+ """
+ Prints a help message.
+
+ :param parser: If supplied, the parser to use for generating help. If \
+ not supplied, it is retrieved.
+ :type parser: cmdutil.CmdOptionParser
+ """
+ if parser==None:
+ parser=self.get_parser()
+ parser.print_help()
+ print """
+Expands aliases and constructs a project tree for a revision. If the optional
+"dir" argument is provided, the project tree will be stored in this directory.
+ """
+ help_tree_spec()
+ return
+
+class PromptCmd(cmd.Cmd):
+ def __init__(self):
+ cmd.Cmd.__init__(self)
+ self.prompt = "Fai> "
+ try:
+ self.tree = arch.tree_root()
+ except:
+ self.tree = None
+ self.set_title()
+ self.set_prompt()
+ self.fake_aba = abacmds.AbaCmds()
+ self.identchars += '-'
+ self.history_file = os.path.expanduser("~/.fai-history")
+ readline.set_completer_delims(string.whitespace)
+ if os.access(self.history_file, os.R_OK) and \
+ os.path.isfile(self.history_file):
+ readline.read_history_file(self.history_file)
+
+ def write_history(self):
+ readline.write_history_file(self.history_file)
+
+ def do_quit(self, args):
+ self.write_history()
+ sys.exit(0)
+
+ def do_exit(self, args):
+ self.do_quit(args)
+
+ def do_EOF(self, args):
+ print
+ self.do_quit(args)
+
+ def postcmd(self, line, bar):
+ self.set_title()
+ self.set_prompt()
+
+ def set_prompt(self):
+ if self.tree is not None:
+ try:
+ version = " "+self.tree.tree_version.nonarch
+ except:
+ version = ""
+ else:
+ version = ""
+ self.prompt = "Fai%s> " % version
+
+ def set_title(self, command=None):
+ try:
+ version = self.tree.tree_version.nonarch
+ except:
+ version = "[no version]"
+ if command is None:
+ command = ""
+ sys.stdout.write(terminal.term_title("Fai %s %s" % (command, version)))
+
+ def do_cd(self, line):
+ if line == "":
+ line = "~"
+ try:
+ os.chdir(os.path.expanduser(line))
+ except Exception, e:
+ print e
+ try:
+ self.tree = arch.tree_root()
+ except:
+ self.tree = None
+
+ def do_help(self, line):
+ Help()(line)
+
+ def default(self, line):
+ args = line.split()
+ if find_command(args[0]):
+ try:
+ find_command(args[0]).do_command(args[1:])
+ except cmdutil.BadCommandOption, e:
+ print e
+ except cmdutil.GetHelp, e:
+ find_command(args[0]).help()
+ except CommandFailed, e:
+ print e
+ except arch.errors.ArchiveNotRegistered, e:
+ print e
+ except KeyboardInterrupt, e:
+ print "Interrupted"
+ except arch.util.ExecProblem, e:
+ print e.proc.error.rstrip('\n')
+ except cmdutil.CantDetermineVersion, e:
+ print e
+ except cmdutil.CantDetermineRevision, e:
+ print e
+ except Exception, e:
+ print "Unhandled error:\n%s" % cmdutil.exception_str(e)
+
+ elif suggestions.has_key(args[0]):
+ print suggestions[args[0]]
+
+ elif self.fake_aba.is_command(args[0]):
+ tree = None
+ try:
+ tree = arch.tree_root()
+ except arch.errors.TreeRootError:
+ pass
+ cmd = self.fake_aba.is_command(args[0])
+ try:
+ cmd.run(cmdutil.expand_prefix_alias(args[1:], tree))
+ except KeyboardInterrupt, e:
+ print "Interrupted"
+
+ elif options.tla_fallthrough and args[0] != "rm" and \
+ cmdutil.is_tla_command(args[0]):
+ try:
+ tree = None
+ try:
+ tree = arch.tree_root()
+ except arch.errors.TreeRootError:
+ pass
+ args = cmdutil.expand_prefix_alias(args, tree)
+ arch.util.exec_safe('tla', args, stderr=sys.stderr,
+ expected=(0, 1))
+ except arch.util.ExecProblem, e:
+ pass
+ except KeyboardInterrupt, e:
+ print "Interrupted"
+ else:
+ try:
+ try:
+ tree = arch.tree_root()
+ except arch.errors.TreeRootError:
+ tree = None
+ args=line.split()
+ os.system(" ".join(cmdutil.expand_prefix_alias(args, tree)))
+ except KeyboardInterrupt, e:
+ print "Interrupted"
+
+ def completenames(self, text, line, begidx, endidx):
+ completions = []
+ iter = iter_command_names(self.fake_aba)
+ try:
+ if len(line) > 0:
+ arg = line.split()[-1]
+ else:
+ arg = ""
+ iter = iter_munged_completions(iter, arg, text)
+ except Exception, e:
+ print e
+ return list(iter)
+
+ def completedefault(self, text, line, begidx, endidx):
+ """Perform completion for native commands.
+
+ :param text: The text to complete
+ :type text: str
+ :param line: The entire line to complete
+ :type line: str
+ :param begidx: The start of the text in the line
+ :type begidx: int
+ :param endidx: The end of the text in the line
+ :type endidx: int
+ """
+ try:
+ (cmd, args, foo) = self.parseline(line)
+ command_obj=find_command(cmd)
+ if command_obj is not None:
+ return command_obj.complete(args.split(), text)
+ elif not self.fake_aba.is_command(cmd) and \
+ cmdutil.is_tla_command(cmd):
+ iter = cmdutil.iter_supported_switches(cmd)
+ if len(args) > 0:
+ arg = args.split()[-1]
+ else:
+ arg = ""
+ if arg.startswith("-"):
+ return list(iter_munged_completions(iter, arg, text))
+ else:
+ return list(iter_munged_completions(
+ iter_file_completions(arg), arg, text))
+
+
+ elif cmd == "cd":
+ if len(args) > 0:
+ arg = args.split()[-1]
+ else:
+ arg = ""
+ iter = iter_dir_completions(arg)
+ iter = iter_munged_completions(iter, arg, text)
+ return list(iter)
+ elif len(args)>0:
+ arg = args.split()[-1]
+ return list(iter_munged_completions(iter_file_completions(arg),
+ arg, text))
+ else:
+ return self.completenames(text, line, begidx, endidx)
+ except Exception, e:
+ print e
+
+
+def iter_command_names(fake_aba):
+ for entry in cmdutil.iter_combine([commands.iterkeys(),
+ fake_aba.get_commands(),
+ cmdutil.iter_tla_commands(False)]):
+ if not suggestions.has_key(str(entry)):
+ yield entry
+
+
+def iter_file_completions(arg, only_dirs = False):
+ """Generate an iterator that iterates through filename completions.
+
+ :param arg: The filename fragment to match
+ :type arg: str
+ :param only_dirs: If true, match only directories
+ :type only_dirs: bool
+ """
+ cwd = os.getcwd()
+ if cwd != "/":
+ extras = [".", ".."]
+ else:
+ extras = []
+ (dir, file) = os.path.split(arg)
+ if dir != "":
+ listingdir = os.path.expanduser(dir)
+ else:
+ listingdir = cwd
+ for file in cmdutil.iter_combine([os.listdir(listingdir), extras]):
+ if dir != "":
+ userfile = dir+'/'+file
+ else:
+ userfile = file
+ if userfile.startswith(arg):
+ if os.path.isdir(listingdir+'/'+file):
+ userfile+='/'
+ yield userfile
+ elif not only_dirs:
+ yield userfile
+
+def iter_munged_completions(iter, arg, text):
+ for completion in iter:
+ completion = str(completion)
+ if completion.startswith(arg):
+ yield completion[len(arg)-len(text):]
+
+def iter_source_file_completions(tree, arg):
+ treepath = cmdutil.tree_cwd(tree)
+ if len(treepath) > 0:
+ dirs = [treepath]
+ else:
+ dirs = None
+ for file in tree.iter_inventory(dirs, source=True, both=True):
+ file = file_completion_match(file, treepath, arg)
+ if file is not None:
+ yield file
+
+
+def iter_untagged(tree, dirs):
+ for file in arch_core.iter_inventory_filter(tree, dirs, tagged=False,
+ categories=arch_core.non_root,
+ control_files=True):
+ yield file.name
+
+
+def iter_untagged_completions(tree, arg):
+ """Generate an iterator for all visible untagged files that match arg.
+
+ :param tree: The tree to look for untagged files in
+ :type tree: `arch.WorkingTree`
+ :param arg: The argument to match
+ :type arg: str
+ :return: An iterator of all matching untagged files
+ :rtype: iterator of str
+ """
+ treepath = cmdutil.tree_cwd(tree)
+ if len(treepath) > 0:
+ dirs = [treepath]
+ else:
+ dirs = None
+
+ for file in iter_untagged(tree, dirs):
+ file = file_completion_match(file, treepath, arg)
+ if file is not None:
+ yield file
+
+
+def file_completion_match(file, treepath, arg):
+ """Determines whether a file within an arch tree matches the argument.
+
+ :param file: The rooted filename
+ :type file: str
+ :param treepath: The path to the cwd within the tree
+ :type treepath: str
+ :param arg: The prefix to match
+ :return: The completion name, or None if not a match
+ :rtype: str
+ """
+ if not file.startswith(treepath):
+ return None
+ if treepath != "":
+ file = file[len(treepath)+1:]
+
+ if not file.startswith(arg):
+ return None
+ if os.path.isdir(file):
+ file += '/'
+ return file
+
+def iter_modified_file_completions(tree, arg):
+ """Returns a list of modified files that match the specified prefix.
+
+ :param tree: The current tree
+ :type tree: `arch.WorkingTree`
+ :param arg: The prefix to match
+ :type arg: str
+ """
+ treepath = cmdutil.tree_cwd(tree)
+ tmpdir = cmdutil.tmpdir()
+ changeset = tmpdir+"/changeset"
+ completions = []
+ revision = cmdutil.determine_revision_tree(tree)
+ for line in arch.iter_delta(revision, tree, changeset):
+ if isinstance(line, arch.FileModification):
+ file = file_completion_match(line.name[1:], treepath, arg)
+ if file is not None:
+ completions.append(file)
+ shutil.rmtree(tmpdir)
+ return completions
+
+def iter_dir_completions(arg):
+ """Generate an iterator that iterates through directory name completions.
+
+ :param arg: The directory name fragment to match
+ :type arg: str
+ """
+ return iter_file_completions(arg, True)
+
+class Shell(BaseCommand):
+ def __init__(self):
+ self.description = "Runs Fai as a shell"
+
+ def do_command(self, cmdargs):
+ if len(cmdargs)!=0:
+ raise cmdutil.GetHelp
+ prompt = PromptCmd()
+ try:
+ prompt.cmdloop()
+ finally:
+ prompt.write_history()
+
+class AddID(BaseCommand):
+ """
+ Adds an inventory id for the given file
+ """
+ def __init__(self):
+ self.description="Add an inventory id for a given file"
+
+ def get_completer(self, arg, index):
+ tree = arch.tree_root()
+ return iter_untagged_completions(tree, arg)
+
+ def do_command(self, cmdargs):
+ """
+ Master function that perfoms the "revision" command.
+ """
+ parser=self.get_parser()
+ (options, args) = parser.parse_args(cmdargs)
+
+ tree = arch.tree_root()
+
+ if (len(args) == 0) == (options.untagged == False):
+ raise cmdutil.GetHelp
+
+ #if options.id and len(args) != 1:
+ # print "If --id is specified, only one file can be named."
+ # return
+
+ method = tree.tagging_method
+
+ if options.id_type == "tagline":
+ if method != "tagline":
+ if not cmdutil.prompt("Tagline in other tree"):
+ if method == "explicit":
+ options.id_type == explicit
+ else:
+ print "add-id not supported for \"%s\" tagging method"\
+ % method
+ return
+
+ elif options.id_type == "explicit":
+ if method != "tagline" and method != explicit:
+ if not prompt("Explicit in other tree"):
+ print "add-id not supported for \"%s\" tagging method" % \
+ method
+ return
+
+ if options.id_type == "auto":
+ if method != "tagline" and method != "explicit":
+ print "add-id not supported for \"%s\" tagging method" % method
+ return
+ else:
+ options.id_type = method
+ if options.untagged:
+ args = None
+ self.add_ids(tree, options.id_type, args)
+
+ def add_ids(self, tree, id_type, files=()):
+ """Add inventory ids to files.
+
+ :param tree: the tree the files are in
+ :type tree: `arch.WorkingTree`
+ :param id_type: the type of id to add: "explicit" or "tagline"
+ :type id_type: str
+ :param files: The list of files to add. If None do all untagged.
+ :type files: tuple of str
+ """
+
+ untagged = (files is None)
+ if untagged:
+ files = list(iter_untagged(tree, None))
+ previous_files = []
+ while len(files) > 0:
+ previous_files.extend(files)
+ if id_type == "explicit":
+ cmdutil.add_id(files)
+ elif id_type == "tagline":
+ for file in files:
+ try:
+ cmdutil.add_tagline_or_explicit_id(file)
+ except cmdutil.AlreadyTagged:
+ print "\"%s\" already has a tagline." % file
+ except cmdutil.NoCommentSyntax:
+ pass
+ #do inventory after tagging until no untagged files are encountered
+ if untagged:
+ files = []
+ for file in iter_untagged(tree, None):
+ if not file in previous_files:
+ files.append(file)
+
+ else:
+ break
+
+ def get_parser(self):
+ """
+ Returns the options parser to use for the "revision" command.
+
+ :rtype: cmdutil.CmdOptionParser
+ """
+ parser=cmdutil.CmdOptionParser("fai add-id file1 [file2] [file3]...")
+# ddaa suggests removing this to promote GUIDs. Let's see who squalks.
+# parser.add_option("-i", "--id", dest="id",
+# help="Specify id for a single file", default=None)
+ parser.add_option("--tltl", action="store_true",
+ dest="lord_style", help="Use Tom Lord's style of id.")
+ parser.add_option("--explicit", action="store_const",
+ const="explicit", dest="id_type",
+ help="Use an explicit id", default="auto")
+ parser.add_option("--tagline", action="store_const",
+ const="tagline", dest="id_type",
+ help="Use a tagline id")
+ parser.add_option("--untagged", action="store_true",
+ dest="untagged", default=False,
+ help="tag all untagged files")
+ return parser
+
+ def help(self, parser=None):
+ """
+ Prints a help message.
+
+ :param parser: If supplied, the parser to use for generating help. If \
+ not supplied, it is retrieved.
+ :type parser: cmdutil.CmdOptionParser
+ """
+ if parser==None:
+ parser=self.get_parser()
+ parser.print_help()
+ print """
+Adds an inventory to the specified file(s) and directories. If --untagged is
+specified, adds inventory to all untagged files and directories.
+ """
+ return
+
+
+class Merge(BaseCommand):
+ """
+ Merges changes from other versions into the current tree
+ """
+ def __init__(self):
+ self.description="Merges changes from other versions"
+ try:
+ self.tree = arch.tree_root()
+ except:
+ self.tree = None
+
+
+ def get_completer(self, arg, index):
+ if self.tree is None:
+ raise arch.errors.TreeRootError
+ completions = list(ancillary.iter_partners(self.tree,
+ self.tree.tree_version))
+ if len(completions) == 0:
+ completions = list(self.tree.iter_log_versions())
+
+ aliases = []
+ try:
+ for completion in completions:
+ alias = ancillary.compact_alias(str(completion), self.tree)
+ if alias:
+ aliases.extend(alias)
+
+ for completion in completions:
+ if completion.archive == self.tree.tree_version.archive:
+ aliases.append(completion.nonarch)
+
+ except Exception, e:
+ print e
+
+ completions.extend(aliases)
+ return completions
+
+ def do_command(self, cmdargs):
+ """
+ Master function that perfoms the "merge" command.
+ """
+ parser=self.get_parser()
+ (options, args) = parser.parse_args(cmdargs)
+ if options.diff3:
+ action="star-merge"
+ else:
+ action = options.action
+
+ if self.tree is None:
+ raise arch.errors.TreeRootError(os.getcwd())
+ if cmdutil.has_changed(self.tree.tree_version):
+ raise UncommittedChanges(self.tree)
+
+ if len(args) > 0:
+ revisions = []
+ for arg in args:
+ revisions.append(cmdutil.determine_revision_arch(self.tree,
+ arg))
+ source = "from commandline"
+ else:
+ revisions = ancillary.iter_partner_revisions(self.tree,
+ self.tree.tree_version)
+ source = "from partner version"
+ revisions = misc.rewind_iterator(revisions)
+ try:
+ revisions.next()
+ revisions.rewind()
+ except StopIteration, e:
+ revision = cmdutil.tag_cur(self.tree)
+ if revision is None:
+ raise CantDetermineRevision("", "No version specified, no "
+ "partner-versions, and no tag"
+ " source")
+ revisions = [revision]
+ source = "from tag source"
+ for revision in revisions:
+ cmdutil.ensure_archive_registered(revision.archive)
+ cmdutil.colorize(arch.Chatter("* Merging %s [%s]" %
+ (revision, source)))
+ if action=="native-merge" or action=="update":
+ if self.native_merge(revision, action) == 0:
+ continue
+ elif action=="star-merge":
+ try:
+ self.star_merge(revision, options.diff3)
+ except errors.MergeProblem, e:
+ break
+ if cmdutil.has_changed(self.tree.tree_version):
+ break
+
+ def star_merge(self, revision, diff3):
+ """Perform a star-merge on the current tree.
+
+ :param revision: The revision to use for the merge
+ :type revision: `arch.Revision`
+ :param diff3: If true, do a diff3 merge
+ :type diff3: bool
+ """
+ try:
+ for line in self.tree.iter_star_merge(revision, diff3=diff3):
+ cmdutil.colorize(line)
+ except arch.util.ExecProblem, e:
+ if e.proc.status is not None and e.proc.status == 1:
+ if e.proc.error:
+ print e.proc.error
+ raise MergeProblem
+ else:
+ raise
+
+ def native_merge(self, other_revision, action):
+ """Perform a native-merge on the current tree.
+
+ :param other_revision: The revision to use for the merge
+ :type other_revision: `arch.Revision`
+ :return: 0 if the merge was skipped, 1 if it was applied
+ """
+ other_tree = cmdutil.find_or_make_local_revision(other_revision)
+ try:
+ if action == "native-merge":
+ ancestor = cmdutil.merge_ancestor2(self.tree, other_tree,
+ other_revision)
+ elif action == "update":
+ ancestor = cmdutil.tree_latest(self.tree,
+ other_revision.version)
+ except CantDetermineRevision, e:
+ raise CommandFailedWrapper(e)
+ cmdutil.colorize(arch.Chatter("* Found common ancestor %s" % ancestor))
+ if (ancestor == other_revision):
+ cmdutil.colorize(arch.Chatter("* Skipping redundant merge"
+ % ancestor))
+ return 0
+ delta = cmdutil.apply_delta(ancestor, other_tree, self.tree)
+ for line in cmdutil.iter_apply_delta_filter(delta):
+ cmdutil.colorize(line)
+ return 1
+
+
+
+ def get_parser(self):
+ """
+ Returns the options parser to use for the "merge" command.
+
+ :rtype: cmdutil.CmdOptionParser
+ """
+ parser=cmdutil.CmdOptionParser("fai merge [VERSION]")
+ parser.add_option("-s", "--star-merge", action="store_const",
+ dest="action", help="Use star-merge",
+ const="star-merge", default="native-merge")
+ parser.add_option("--update", action="store_const",
+ dest="action", help="Use update picker",
+ const="update")
+ parser.add_option("--diff3", action="store_true",
+ dest="diff3",
+ help="Use diff3 for merge (implies star-merge)")
+ return parser
+
+ def help(self, parser=None):
+ """
+ Prints a help message.
+
+ :param parser: If supplied, the parser to use for generating help. If \
+ not supplied, it is retrieved.
+ :type parser: cmdutil.CmdOptionParser
+ """
+ if parser==None:
+ parser=self.get_parser()
+ parser.print_help()
+ print """
+Performs a merge operation using the specified version.
+ """
+ return
+
+class ELog(BaseCommand):
+ """
+ Produces a raw patchlog and invokes the user's editor
+ """
+ def __init__(self):
+ self.description="Edit a patchlog to commit"
+ try:
+ self.tree = arch.tree_root()
+ except:
+ self.tree = None
+
+
+ def do_command(self, cmdargs):
+ """
+ Master function that perfoms the "elog" command.
+ """
+ parser=self.get_parser()
+ (options, args) = parser.parse_args(cmdargs)
+ if self.tree is None:
+ raise arch.errors.TreeRootError
+
+ edit_log(self.tree)
+
+ def get_parser(self):
+ """
+ Returns the options parser to use for the "merge" command.
+
+ :rtype: cmdutil.CmdOptionParser
+ """
+ parser=cmdutil.CmdOptionParser("fai elog")
+ return parser
+
+
+ def help(self, parser=None):
+ """
+ Invokes $EDITOR to produce a log for committing.
+
+ :param parser: If supplied, the parser to use for generating help. If \
+ not supplied, it is retrieved.
+ :type parser: cmdutil.CmdOptionParser
+ """
+ if parser==None:
+ parser=self.get_parser()
+ parser.print_help()
+ print """
+Invokes $EDITOR to produce a log for committing.
+ """
+ return
+
+def edit_log(tree):
+ """Makes and edits the log for a tree. Does all kinds of fancy things
+ like log templates and merge summaries and log-for-merge
+
+ :param tree: The tree to edit the log for
+ :type tree: `arch.WorkingTree`
+ """
+ #ensure we have an editor before preparing the log
+ cmdutil.find_editor()
+ log = tree.log_message(create=False)
+ log_is_new = False
+ if log is None or cmdutil.prompt("Overwrite log"):
+ if log is not None:
+ os.remove(log.name)
+ log = tree.log_message(create=True)
+ log_is_new = True
+ tmplog = log.name
+ template = tree+"/{arch}/=log-template"
+ if not os.path.exists(template):
+ template = os.path.expanduser("~/.arch-params/=log-template")
+ if not os.path.exists(template):
+ template = None
+ if template:
+ shutil.copyfile(template, tmplog)
+
+ new_merges = list(cmdutil.iter_new_merges(tree,
+ tree.tree_version))
+ log["Summary"] = merge_summary(new_merges, tree.tree_version)
+ if len(new_merges) > 0:
+ if cmdutil.prompt("Log for merge"):
+ mergestuff = cmdutil.log_for_merge(tree)
+ log.description += mergestuff
+ log.save()
+ try:
+ cmdutil.invoke_editor(log.name)
+ except:
+ if log_is_new:
+ os.remove(log.name)
+ raise
+
+def merge_summary(new_merges, tree_version):
+ if len(new_merges) == 0:
+ return ""
+ if len(new_merges) == 1:
+ summary = new_merges[0].summary
+ else:
+ summary = "Merge"
+
+ credits = []
+ for merge in new_merges:
+ if arch.my_id() != merge.creator:
+ name = re.sub("<.*>", "", merge.creator).rstrip(" ");
+ if not name in credits:
+ credits.append(name)
+ else:
+ version = merge.revision.version
+ if version.archive == tree_version.archive:
+ if not version.nonarch in credits:
+ credits.append(version.nonarch)
+ elif not str(version) in credits:
+ credits.append(str(version))
+
+ return ("%s (%s)") % (summary, ", ".join(credits))
+
+class MirrorArchive(BaseCommand):
+ """
+ Updates a mirror from an archive
+ """
+ def __init__(self):
+ self.description="Update a mirror from an archive"
+
+ def do_command(self, cmdargs):
+ """
+ Master function that perfoms the "revision" command.
+ """
+
+ parser=self.get_parser()
+ (options, args) = parser.parse_args(cmdargs)
+ if len(args) > 1:
+ raise GetHelp
+ try:
+ tree = arch.tree_root()
+ except:
+ tree = None
+
+ if len(args) == 0:
+ if tree is not None:
+ name = tree.tree_version()
+ else:
+ name = cmdutil.expand_alias(args[0], tree)
+ name = arch.NameParser(name)
+
+ to_arch = name.get_archive()
+ from_arch = cmdutil.get_mirror_source(arch.Archive(to_arch))
+ limit = name.get_nonarch()
+
+ iter = arch_core.mirror_archive(from_arch,to_arch, limit)
+ for line in arch.chatter_classifier(iter):
+ cmdutil.colorize(line)
+
+ def get_parser(self):
+ """
+ Returns the options parser to use for the "revision" command.
+
+ :rtype: cmdutil.CmdOptionParser
+ """
+ parser=cmdutil.CmdOptionParser("fai mirror-archive ARCHIVE")
+ return parser
+
+ def help(self, parser=None):
+ """
+ Prints a help message.
+
+ :param parser: If supplied, the parser to use for generating help. If \
+ not supplied, it is retrieved.
+ :type parser: cmdutil.CmdOptionParser
+ """
+ if parser==None:
+ parser=self.get_parser()
+ parser.print_help()
+ print """
+Updates a mirror from an archive. If a branch, package, or version is
+supplied, only changes under it are mirrored.
+ """
+ return
+
+def help_tree_spec():
+ print """Specifying revisions (default: tree)
+Revisions may be specified by alias, revision, version or patchlevel.
+Revisions or versions may be fully qualified. Unqualified revisions, versions,
+or patchlevels use the archive of the current project tree. Versions will
+use the latest patchlevel in the tree. Patchlevels will use the current tree-
+version.
+
+Use "alias" to list available (user and automatic) aliases."""
+
+def help_aliases(tree):
+ print """Auto-generated aliases
+ acur : The latest revision in the archive of the tree-version. You can specfy
+ a different version like so: acur:foo--bar--0 (aliases can be used)
+ tcur : (tree current) The latest revision in the tree of the tree-version.
+ You can specify a different version like so: tcur:foo--bar--0 (aliases
+ can be used).
+tprev : (tree previous) The previous revision in the tree of the tree-version.
+ To specify an older revision, use a number, e.g. "tprev:4"
+ tanc : (tree ancestor) The ancestor revision of the tree
+ To specify an older revision, use a number, e.g. "tanc:4"
+tdate : (tree date) The latest revision from a given date (e.g. "tdate:July 6")
+ tmod : (tree modified) The latest revision to modify a given file
+ (e.g. "tmod:engine.cpp" or "tmod:engine.cpp:16")
+ ttag : (tree tag) The revision that was tagged into the current tree revision,
+ according to the tree.
+tagcur: (tag current) The latest revision of the version that the current tree
+ was tagged from.
+mergeanc : The common ancestor of the current tree and the specified revision.
+ Defaults to the first partner-version's latest revision or to tagcur.
+ """
+ print "User aliases"
+ for parts in ancillary.iter_all_alias(tree):
+ print parts[0].rjust(10)+" : "+parts[1]
+
+
+class Inventory(BaseCommand):
+ """List the status of files in the tree"""
+ def __init__(self):
+ self.description=self.__doc__
+
+ def do_command(self, cmdargs):
+ """
+ Master function that perfoms the "revision" command.
+ """
+
+ parser=self.get_parser()
+ (options, args) = parser.parse_args(cmdargs)
+ tree = arch.tree_root()
+ categories = []
+
+ if (options.source):
+ categories.append(arch_core.SourceFile)
+ if (options.precious):
+ categories.append(arch_core.PreciousFile)
+ if (options.backup):
+ categories.append(arch_core.BackupFile)
+ if (options.junk):
+ categories.append(arch_core.JunkFile)
+
+ if len(categories) == 1:
+ show_leading = False
+ else:
+ show_leading = True
+
+ if len(categories) == 0:
+ categories = None
+
+ if options.untagged:
+ categories = arch_core.non_root
+ show_leading = False
+ tagged = False
+ else:
+ tagged = None
+
+ for file in arch_core.iter_inventory_filter(tree, None,
+ control_files=options.control_files,
+ categories = categories, tagged=tagged):
+ print arch_core.file_line(file,
+ category = show_leading,
+ untagged = show_leading,
+ id = options.ids)
+
+ def get_parser(self):
+ """
+ Returns the options parser to use for the "revision" command.
+
+ :rtype: cmdutil.CmdOptionParser
+ """
+ parser=cmdutil.CmdOptionParser("fai inventory [options]")
+ parser.add_option("--ids", action="store_true", dest="ids",
+ help="Show file ids")
+ parser.add_option("--control", action="store_true",
+ dest="control_files", help="include control files")
+ parser.add_option("--source", action="store_true", dest="source",
+ help="List source files")
+ parser.add_option("--backup", action="store_true", dest="backup",
+ help="List backup files")
+ parser.add_option("--precious", action="store_true", dest="precious",
+ help="List precious files")
+ parser.add_option("--junk", action="store_true", dest="junk",
+ help="List junk files")
+ parser.add_option("--unrecognized", action="store_true",
+ dest="unrecognized", help="List unrecognized files")
+ parser.add_option("--untagged", action="store_true",
+ dest="untagged", help="List only untagged files")
+ return parser
+
+ def help(self, parser=None):
+ """
+ Prints a help message.
+
+ :param parser: If supplied, the parser to use for generating help. If \
+ not supplied, it is retrieved.
+ :type parser: cmdutil.CmdOptionParser
+ """
+ if parser==None:
+ parser=self.get_parser()
+ parser.print_help()
+ print """
+Lists the status of files in the archive:
+S source
+P precious
+B backup
+J junk
+U unrecognized
+T tree root
+? untagged-source
+Leading letter are not displayed if only one kind of file is shown
+ """
+ return
+
+
+class Alias(BaseCommand):
+ """List or adjust aliases"""
+ def __init__(self):
+ self.description=self.__doc__
+
+ def get_completer(self, arg, index):
+ if index > 2:
+ return ()
+ try:
+ self.tree = arch.tree_root()
+ except:
+ self.tree = None
+
+ if index == 0:
+ return [part[0]+" " for part in ancillary.iter_all_alias(self.tree)]
+ elif index == 1:
+ return cmdutil.iter_revision_completions(arg, self.tree)
+
+
+ def do_command(self, cmdargs):
+ """
+ Master function that perfoms the "revision" command.
+ """
+
+ parser=self.get_parser()
+ (options, args) = parser.parse_args(cmdargs)
+ try:
+ self.tree = arch.tree_root()
+ except:
+ self.tree = None
+
+
+ try:
+ options.action(args, options)
+ except cmdutil.ForbiddenAliasSyntax, e:
+ raise CommandFailedWrapper(e)
+
+ def arg_dispatch(self, args, options):
+ """Add, modify, or list aliases, depending on number of arguments
+
+ :param args: The list of commandline arguments
+ :type args: list of str
+ :param options: The commandline options
+ """
+ if len(args) == 0:
+ help_aliases(self.tree)
+ return
+ elif len(args) == 1:
+ self.print_alias(args[0])
+ elif (len(args)) == 2:
+ self.add(args[0], args[1], options)
+ else:
+ raise cmdutil.GetHelp
+
+ def print_alias(self, alias):
+ answer = None
+ for pair in ancillary.iter_all_alias(self.tree):
+ if pair[0] == alias:
+ answer = pair[1]
+ if answer is not None:
+ print answer
+ else:
+ print "The alias %s is not assigned." % alias
+
+ def add(self, alias, expansion, options):
+ """Add or modify aliases
+
+ :param alias: The alias name to create/modify
+ :type alias: str
+ :param expansion: The expansion to assign to the alias name
+ :type expansion: str
+ :param options: The commandline options
+ """
+ newlist = ""
+ written = False
+ new_line = "%s=%s\n" % (alias, cmdutil.expand_alias(expansion,
+ self.tree))
+ ancillary.check_alias(new_line.rstrip("\n"), [alias, expansion])
+
+ for pair in self.get_iterator(options):
+ if pair[0] != alias:
+ newlist+="%s=%s\n" % (pair[0], pair[1])
+ elif not written:
+ newlist+=new_line
+ written = True
+ if not written:
+ newlist+=new_line
+ self.write_aliases(newlist, options)
+
+ def delete(self, args, options):
+ """Delete the specified alias
+
+ :param args: The list of arguments
+ :type args: list of str
+ :param options: The commandline options
+ """
+ deleted = False
+ if len(args) != 1:
+ raise cmdutil.GetHelp
+ newlist = ""
+ for pair in self.get_iterator(options):
+ if pair[0] != args[0]:
+ newlist+="%s=%s\n" % (pair[0], pair[1])
+ else:
+ deleted = True
+ if not deleted:
+ raise errors.NoSuchAlias(args[0])
+ self.write_aliases(newlist, options)
+
+ def get_alias_file(self, options):
+ """Return the name of the alias file to use
+
+ :param options: The commandline options
+ """
+ if options.tree:
+ if self.tree is None:
+ self.tree == arch.tree_root()
+ return str(self.tree)+"/{arch}/+aliases"
+ else:
+ return "~/.aba/aliases"
+
+ def get_iterator(self, options):
+ """Return the alias iterator to use
+
+ :param options: The commandline options
+ """
+ return ancillary.iter_alias(self.get_alias_file(options))
+
+ def write_aliases(self, newlist, options):
+ """Safely rewrite the alias file
+ :param newlist: The new list of aliases
+ :type newlist: str
+ :param options: The commandline options
+ """
+ filename = os.path.expanduser(self.get_alias_file(options))
+ file = cmdutil.NewFileVersion(filename)
+ file.write(newlist)
+ file.commit()
+
+
+ def get_parser(self):
+ """
+ Returns the options parser to use for the "alias" command.
+
+ :rtype: cmdutil.CmdOptionParser
+ """
+ parser=cmdutil.CmdOptionParser("fai alias [ALIAS] [NAME]")
+ parser.add_option("-d", "--delete", action="store_const", dest="action",
+ const=self.delete, default=self.arg_dispatch,
+ help="Delete an alias")
+ parser.add_option("--tree", action="store_true", dest="tree",
+ help="Create a per-tree alias", default=False)
+ return parser
+
+ def help(self, parser=None):
+ """
+ Prints a help message.
+
+ :param parser: If supplied, the parser to use for generating help. If \
+ not supplied, it is retrieved.
+ :type parser: cmdutil.CmdOptionParser
+ """
+ if parser==None:
+ parser=self.get_parser()
+ parser.print_help()
+ print """
+Lists current aliases or modifies the list of aliases.
+
+If no arguments are supplied, aliases will be listed. If two arguments are
+supplied, the specified alias will be created or modified. If -d or --delete
+is supplied, the specified alias will be deleted.
+
+You can create aliases that refer to any fully-qualified part of the
+Arch namespace, e.g.
+archive,
+archive/category,
+archive/category--branch,
+archive/category--branch--version (my favourite)
+archive/category--branch--version--patchlevel
+
+Aliases can be used automatically by native commands. To use them
+with external or tla commands, prefix them with ^ (you can do this
+with native commands, too).
+"""
+
+
+class RequestMerge(BaseCommand):
+ """Submit a merge request to Bug Goo"""
+ def __init__(self):
+ self.description=self.__doc__
+
+ def do_command(self, cmdargs):
+ """Submit a merge request
+
+ :param cmdargs: The commandline arguments
+ :type cmdargs: list of str
+ """
+ cmdutil.find_editor()
+ parser = self.get_parser()
+ (options, args) = parser.parse_args(cmdargs)
+ try:
+ self.tree=arch.tree_root()
+ except:
+ self.tree=None
+ base, revisions = self.revision_specs(args)
+ message = self.make_headers(base, revisions)
+ message += self.make_summary(revisions)
+ path = self.edit_message(message)
+ message = self.tidy_message(path)
+ if cmdutil.prompt("Send merge"):
+ self.send_message(message)
+ print "Merge request sent"
+
+ def make_headers(self, base, revisions):
+ """Produce email and Bug Goo header strings
+
+ :param base: The base revision to apply merges to
+ :type base: `arch.Revision`
+ :param revisions: The revisions to replay into the base
+ :type revisions: list of `arch.Patchlog`
+ :return: The headers
+ :rtype: str
+ """
+ headers = "To: gnu-arch-users@gnu.org\n"
+ headers += "From: %s\n" % options.fromaddr
+ if len(revisions) == 1:
+ headers += "Subject: [MERGE REQUEST] %s\n" % revisions[0].summary
+ else:
+ headers += "Subject: [MERGE REQUEST]\n"
+ headers += "\n"
+ headers += "Base-Revision: %s\n" % base
+ for revision in revisions:
+ headers += "Revision: %s\n" % revision.revision
+ headers += "Bug: \n\n"
+ return headers
+
+ def make_summary(self, logs):
+ """Generate a summary of merges
+
+ :param logs: the patchlogs that were directly added by the merges
+ :type logs: list of `arch.Patchlog`
+ :return: the summary
+ :rtype: str
+ """
+ summary = ""
+ for log in logs:
+ summary+=str(log.revision)+"\n"
+ summary+=log.summary+"\n"
+ if log.description.strip():
+ summary+=log.description.strip('\n')+"\n\n"
+ return summary
+
+ def revision_specs(self, args):
+ """Determine the base and merge revisions from tree and arguments.
+
+ :param args: The parsed arguments
+ :type args: list of str
+ :return: The base revision and merge revisions
+ :rtype: `arch.Revision`, list of `arch.Patchlog`
+ """
+ if len(args) > 0:
+ target_revision = cmdutil.determine_revision_arch(self.tree,
+ args[0])
+ else:
+ target_revision = cmdutil.tree_latest(self.tree)
+ if len(args) > 1:
+ merges = [ arch.Patchlog(cmdutil.determine_revision_arch(
+ self.tree, f)) for f in args[1:] ]
+ else:
+ if self.tree is None:
+ raise CantDetermineRevision("", "Not in a project tree")
+ merge_iter = cmdutil.iter_new_merges(self.tree,
+ target_revision.version,
+ False)
+ merges = [f for f in cmdutil.direct_merges(merge_iter)]
+ return (target_revision, merges)
+
+ def edit_message(self, message):
+ """Edit an email message in the user's standard editor
+
+ :param message: The message to edit
+ :type message: str
+ :return: the path of the edited message
+ :rtype: str
+ """
+ if self.tree is None:
+ path = os.get_cwd()
+ else:
+ path = self.tree
+ path += "/,merge-request"
+ file = open(path, 'w')
+ file.write(message)
+ file.flush()
+ cmdutil.invoke_editor(path)
+ return path
+
+ def tidy_message(self, path):
+ """Validate and clean up message.
+
+ :param path: The path to the message to clean up
+ :type path: str
+ :return: The parsed message
+ :rtype: `email.Message`
+ """
+ mail = email.message_from_file(open(path))
+ if mail["Subject"].strip() == "[MERGE REQUEST]":
+ raise BlandSubject
+
+ request = email.message_from_string(mail.get_payload())
+ if request.has_key("Bug"):
+ if request["Bug"].strip()=="":
+ del request["Bug"]
+ mail.set_payload(request.as_string())
+ return mail
+
+ def send_message(self, message):
+ """Send a message, using its headers to address it.
+
+ :param message: The message to send
+ :type message: `email.Message`"""
+ server = smtplib.SMTP()
+ server.sendmail(message['From'], message['To'], message.as_string())
+ server.quit()
+
+ def help(self, parser=None):
+ """Print a usage message
+
+ :param parser: The options parser to use
+ :type parser: `cmdutil.CmdOptionParser`
+ """
+ if parser is None:
+ parser = self.get_parser()
+ parser.print_help()
+ print """
+Sends a merge request formatted for Bug Goo. Intended use: get the tree
+you'd like to merge into. Apply the merges you want. Invoke request-merge.
+The merge request will open in your $EDITOR.
+
+When no TARGET is specified, it uses the current tree revision. When
+no MERGE is specified, it uses the direct merges (as in "revisions
+--direct-merges"). But you can specify just the TARGET, or all the MERGE
+revisions.
+"""
+
+ def get_parser(self):
+ """Produce a commandline parser for this command.
+
+ :rtype: `cmdutil.CmdOptionParser`
+ """
+ parser=cmdutil.CmdOptionParser("request-merge [TARGET] [MERGE1...]")
+ return parser
+
+commands = {
+'changes' : Changes,
+'help' : Help,
+'update': Update,
+'apply-changes':ApplyChanges,
+'cat-log': CatLog,
+'commit': Commit,
+'revision': Revision,
+'revisions': Revisions,
+'get': Get,
+'revert': Revert,
+'shell': Shell,
+'add-id': AddID,
+'merge': Merge,
+'elog': ELog,
+'mirror-archive': MirrorArchive,
+'ninventory': Inventory,
+'alias' : Alias,
+'request-merge': RequestMerge,
+}
+suggestions = {
+'apply-delta' : "Try \"apply-changes\".",
+'delta' : "To compare two revisions, use \"changes\".",
+'diff-rev' : "To compare two revisions, use \"changes\".",
+'undo' : "To undo local changes, use \"revert\".",
+'undelete' : "To undo only deletions, use \"revert --deletions\"",
+'missing-from' : "Try \"revisions --missing-from\".",
+'missing' : "Try \"revisions --missing\".",
+'missing-merge' : "Try \"revisions --partner-missing\".",
+'new-merges' : "Try \"revisions --new-merges\".",
+'cachedrevs' : "Try \"revisions --cacherevs\". (no 'd')",
+'logs' : "Try \"revisions --logs\"",
+'tree-source' : "Use the \"^ttag\" alias (\"revision ^ttag\")",
+'latest-revision' : "Use the \"^acur\" alias (\"revision ^acur\")",
+'change-version' : "Try \"update REVISION\"",
+'tree-revision' : "Use the \"^tcur\" alias (\"revision ^tcur\")",
+'rev-depends' : "Use revisions --dependencies",
+'auto-get' : "Plain get will do archive lookups",
+'tagline' : "Use add-id. It uses taglines in tagline trees",
+'emlog' : "Use elog. It automatically adds log-for-merge text, if any",
+'library-revisions' : "Use revisions --library",
+'file-revert' : "Use revert FILE"
+}
+# arch-tag: 19d5739d-3708-486c-93ba-deecc3027fc7
diff --git a/bzrlib/tests/test_patches_data/orig-2 b/bzrlib/tests/test_patches_data/orig-2
new file mode 100644
index 0000000..6af5b11
--- /dev/null
+++ b/bzrlib/tests/test_patches_data/orig-2
@@ -0,0 +1,558 @@
+# Copyright (C) 2004, 2005 Aaron Bentley
+# <aaron.bentley@utoronto.ca>
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+class PatchSyntax(Exception):
+ def __init__(self, msg):
+ Exception.__init__(self, msg)
+
+
+class MalformedPatchHeader(PatchSyntax):
+ def __init__(self, desc, line):
+ self.desc = desc
+ self.line = line
+ msg = "Malformed patch header. %s\n%r" % (self.desc, self.line)
+ PatchSyntax.__init__(self, msg)
+
+class MalformedHunkHeader(PatchSyntax):
+ def __init__(self, desc, line):
+ self.desc = desc
+ self.line = line
+ msg = "Malformed hunk header. %s\n%r" % (self.desc, self.line)
+ PatchSyntax.__init__(self, msg)
+
+class MalformedLine(PatchSyntax):
+ def __init__(self, desc, line):
+ self.desc = desc
+ self.line = line
+ msg = "Malformed line. %s\n%s" % (self.desc, self.line)
+ PatchSyntax.__init__(self, msg)
+
+def get_patch_names(iter_lines):
+ try:
+ line = iter_lines.next()
+ if not line.startswith("--- "):
+ raise MalformedPatchHeader("No orig name", line)
+ else:
+ orig_name = line[4:].rstrip("\n")
+ except StopIteration:
+ raise MalformedPatchHeader("No orig line", "")
+ try:
+ line = iter_lines.next()
+ if not line.startswith("+++ "):
+ raise PatchSyntax("No mod name")
+ else:
+ mod_name = line[4:].rstrip("\n")
+ except StopIteration:
+ raise MalformedPatchHeader("No mod line", "")
+ return (orig_name, mod_name)
+
+def parse_range(textrange):
+ """Parse a patch range, handling the "1" special-case
+
+ :param textrange: The text to parse
+ :type textrange: str
+ :return: the position and range, as a tuple
+ :rtype: (int, int)
+ """
+ tmp = textrange.split(',')
+ if len(tmp) == 1:
+ pos = tmp[0]
+ range = "1"
+ else:
+ (pos, range) = tmp
+ pos = int(pos)
+ range = int(range)
+ return (pos, range)
+
+
+def hunk_from_header(line):
+ if not line.startswith("@@") or not line.endswith("@@\n") \
+ or not len(line) > 4:
+ raise MalformedHunkHeader("Does not start and end with @@.", line)
+ try:
+ (orig, mod) = line[3:-4].split(" ")
+ except Exception, e:
+ raise MalformedHunkHeader(str(e), line)
+ if not orig.startswith('-') or not mod.startswith('+'):
+ raise MalformedHunkHeader("Positions don't start with + or -.", line)
+ try:
+ (orig_pos, orig_range) = parse_range(orig[1:])
+ (mod_pos, mod_range) = parse_range(mod[1:])
+ except Exception, e:
+ raise MalformedHunkHeader(str(e), line)
+ if mod_range < 0 or orig_range < 0:
+ raise MalformedHunkHeader("Hunk range is negative", line)
+ return Hunk(orig_pos, orig_range, mod_pos, mod_range)
+
+
+class HunkLine:
+ def __init__(self, contents):
+ self.contents = contents
+
+ def get_str(self, leadchar):
+ if self.contents == "\n" and leadchar == " " and False:
+ return "\n"
+ if not self.contents.endswith('\n'):
+ terminator = '\n' + NO_NL
+ else:
+ terminator = ''
+ return leadchar + self.contents + terminator
+
+
+class ContextLine(HunkLine):
+ def __init__(self, contents):
+ HunkLine.__init__(self, contents)
+
+ def __str__(self):
+ return self.get_str(" ")
+
+
+class InsertLine(HunkLine):
+ def __init__(self, contents):
+ HunkLine.__init__(self, contents)
+
+ def __str__(self):
+ return self.get_str("+")
+
+
+class RemoveLine(HunkLine):
+ def __init__(self, contents):
+ HunkLine.__init__(self, contents)
+
+ def __str__(self):
+ return self.get_str("-")
+
+NO_NL = '\\ No newline at end of file\n'
+__pychecker__="no-returnvalues"
+
+def parse_line(line):
+ if line.startswith("\n"):
+ return ContextLine(line)
+ elif line.startswith(" "):
+ return ContextLine(line[1:])
+ elif line.startswith("+"):
+ return InsertLine(line[1:])
+ elif line.startswith("-"):
+ return RemoveLine(line[1:])
+ elif line == NO_NL:
+ return NO_NL
+ else:
+ raise MalformedLine("Unknown line type", line)
+__pychecker__=""
+
+
+class Hunk:
+ def __init__(self, orig_pos, orig_range, mod_pos, mod_range):
+ self.orig_pos = orig_pos
+ self.orig_range = orig_range
+ self.mod_pos = mod_pos
+ self.mod_range = mod_range
+ self.lines = []
+
+ def get_header(self):
+ return "@@ -%s +%s @@\n" % (self.range_str(self.orig_pos,
+ self.orig_range),
+ self.range_str(self.mod_pos,
+ self.mod_range))
+
+ def range_str(self, pos, range):
+ """Return a file range, special-casing for 1-line files.
+
+ :param pos: The position in the file
+ :type pos: int
+ :range: The range in the file
+ :type range: int
+ :return: a string in the format 1,4 except when range == pos == 1
+ """
+ if range == 1:
+ return "%i" % pos
+ else:
+ return "%i,%i" % (pos, range)
+
+ def __str__(self):
+ lines = [self.get_header()]
+ for line in self.lines:
+ lines.append(str(line))
+ return "".join(lines)
+
+ def shift_to_mod(self, pos):
+ if pos < self.orig_pos-1:
+ return 0
+ elif pos > self.orig_pos+self.orig_range:
+ return self.mod_range - self.orig_range
+ else:
+ return self.shift_to_mod_lines(pos)
+
+ def shift_to_mod_lines(self, pos):
+ assert (pos >= self.orig_pos-1 and pos <= self.orig_pos+self.orig_range)
+ position = self.orig_pos-1
+ shift = 0
+ for line in self.lines:
+ if isinstance(line, InsertLine):
+ shift += 1
+ elif isinstance(line, RemoveLine):
+ if position == pos:
+ return None
+ shift -= 1
+ position += 1
+ elif isinstance(line, ContextLine):
+ position += 1
+ if position > pos:
+ break
+ return shift
+
+def iter_hunks(iter_lines):
+ hunk = None
+ for line in iter_lines:
+ if line == "\n":
+ if hunk is not None:
+ yield hunk
+ hunk = None
+ continue
+ if hunk is not None:
+ yield hunk
+ hunk = hunk_from_header(line)
+ orig_size = 0
+ mod_size = 0
+ while orig_size < hunk.orig_range or mod_size < hunk.mod_range:
+ hunk_line = parse_line(iter_lines.next())
+ hunk.lines.append(hunk_line)
+ if isinstance(hunk_line, (RemoveLine, ContextLine)):
+ orig_size += 1
+ if isinstance(hunk_line, (InsertLine, ContextLine)):
+ mod_size += 1
+ if hunk is not None:
+ yield hunk
+
+class Patch:
+ def __init__(self, oldname, newname):
+ self.oldname = oldname
+ self.newname = newname
+ self.hunks = []
+
+ def __str__(self):
+ ret = self.get_header()
+ ret += "".join([str(h) for h in self.hunks])
+ return ret
+
+ def get_header(self):
+ return "--- %s\n+++ %s\n" % (self.oldname, self.newname)
+
+ def stats_str(self):
+ """Return a string of patch statistics"""
+ removes = 0
+ inserts = 0
+ for hunk in self.hunks:
+ for line in hunk.lines:
+ if isinstance(line, InsertLine):
+ inserts+=1;
+ elif isinstance(line, RemoveLine):
+ removes+=1;
+ return "%i inserts, %i removes in %i hunks" % \
+ (inserts, removes, len(self.hunks))
+
+ def pos_in_mod(self, position):
+ newpos = position
+ for hunk in self.hunks:
+ shift = hunk.shift_to_mod(position)
+ if shift is None:
+ return None
+ newpos += shift
+ return newpos
+
+ def iter_inserted(self):
+ """Iteraties through inserted lines
+
+ :return: Pair of line number, line
+ :rtype: iterator of (int, InsertLine)
+ """
+ for hunk in self.hunks:
+ pos = hunk.mod_pos - 1;
+ for line in hunk.lines:
+ if isinstance(line, InsertLine):
+ yield (pos, line)
+ pos += 1
+ if isinstance(line, ContextLine):
+ pos += 1
+
+def parse_patch(iter_lines):
+ (orig_name, mod_name) = get_patch_names(iter_lines)
+ patch = Patch(orig_name, mod_name)
+ for hunk in iter_hunks(iter_lines):
+ patch.hunks.append(hunk)
+ return patch
+
+
+def iter_file_patch(iter_lines):
+ saved_lines = []
+ for line in iter_lines:
+ if line.startswith('=== '):
+ continue
+ elif line.startswith('--- '):
+ if len(saved_lines) > 0:
+ yield saved_lines
+ saved_lines = []
+ saved_lines.append(line)
+ if len(saved_lines) > 0:
+ yield saved_lines
+
+
+def iter_lines_handle_nl(iter_lines):
+ """
+ Iterates through lines, ensuring that lines that originally had no
+ terminating \n are produced without one. This transformation may be
+ applied at any point up until hunk line parsing, and is safe to apply
+ repeatedly.
+ """
+ last_line = None
+ for line in iter_lines:
+ if line == NO_NL:
+ assert last_line.endswith('\n')
+ last_line = last_line[:-1]
+ line = None
+ if last_line is not None:
+ yield last_line
+ last_line = line
+ if last_line is not None:
+ yield last_line
+
+
+def parse_patches(iter_lines):
+ iter_lines = iter_lines_handle_nl(iter_lines)
+ return [parse_patch(f.__iter__()) for f in iter_file_patch(iter_lines)]
+
+
+def difference_index(atext, btext):
+ """Find the indext of the first character that differs betweeen two texts
+
+ :param atext: The first text
+ :type atext: str
+ :param btext: The second text
+ :type str: str
+ :return: The index, or None if there are no differences within the range
+ :rtype: int or NoneType
+ """
+ length = len(atext)
+ if len(btext) < length:
+ length = len(btext)
+ for i in range(length):
+ if atext[i] != btext[i]:
+ return i;
+ return None
+
+class PatchConflict(Exception):
+ def __init__(self, line_no, orig_line, patch_line):
+ orig = orig_line.rstrip('\n')
+ patch = str(patch_line).rstrip('\n')
+ msg = 'Text contents mismatch at line %d. Original has "%s",'\
+ ' but patch says it should be "%s"' % (line_no, orig, patch)
+ Exception.__init__(self, msg)
+
+
+def iter_patched(orig_lines, patch_lines):
+ """Iterate through a series of lines with a patch applied.
+ This handles a single file, and does exact, not fuzzy patching.
+ """
+ if orig_lines is not None:
+ orig_lines = orig_lines.__iter__()
+ seen_patch = []
+ patch_lines = iter_lines_handle_nl(patch_lines.__iter__())
+ get_patch_names(patch_lines)
+ line_no = 1
+ for hunk in iter_hunks(patch_lines):
+ while line_no < hunk.orig_pos:
+ orig_line = orig_lines.next()
+ yield orig_line
+ line_no += 1
+ for hunk_line in hunk.lines:
+ seen_patch.append(str(hunk_line))
+ if isinstance(hunk_line, InsertLine):
+ yield hunk_line.contents
+ elif isinstance(hunk_line, (ContextLine, RemoveLine)):
+ orig_line = orig_lines.next()
+ if orig_line != hunk_line.contents:
+ raise PatchConflict(line_no, orig_line, "".join(seen_patch))
+ if isinstance(hunk_line, ContextLine):
+ yield orig_line
+ else:
+ assert isinstance(hunk_line, RemoveLine)
+ line_no += 1
+
+import unittest
+import os.path
+class PatchesTester(unittest.TestCase):
+ def datafile(self, filename):
+ data_path = os.path.join(os.path.dirname(__file__), "testdata",
+ filename)
+ return file(data_path, "rb")
+
+ def testValidPatchHeader(self):
+ """Parse a valid patch header"""
+ lines = "--- orig/commands.py\n+++ mod/dommands.py\n".split('\n')
+ (orig, mod) = get_patch_names(lines.__iter__())
+ assert(orig == "orig/commands.py")
+ assert(mod == "mod/dommands.py")
+
+ def testInvalidPatchHeader(self):
+ """Parse an invalid patch header"""
+ lines = "-- orig/commands.py\n+++ mod/dommands.py".split('\n')
+ self.assertRaises(MalformedPatchHeader, get_patch_names,
+ lines.__iter__())
+
+ def testValidHunkHeader(self):
+ """Parse a valid hunk header"""
+ header = "@@ -34,11 +50,6 @@\n"
+ hunk = hunk_from_header(header);
+ assert (hunk.orig_pos == 34)
+ assert (hunk.orig_range == 11)
+ assert (hunk.mod_pos == 50)
+ assert (hunk.mod_range == 6)
+ assert (str(hunk) == header)
+
+ def testValidHunkHeader2(self):
+ """Parse a tricky, valid hunk header"""
+ header = "@@ -1 +0,0 @@\n"
+ hunk = hunk_from_header(header);
+ assert (hunk.orig_pos == 1)
+ assert (hunk.orig_range == 1)
+ assert (hunk.mod_pos == 0)
+ assert (hunk.mod_range == 0)
+ assert (str(hunk) == header)
+
+ def makeMalformed(self, header):
+ self.assertRaises(MalformedHunkHeader, hunk_from_header, header)
+
+ def testInvalidHeader(self):
+ """Parse an invalid hunk header"""
+ self.makeMalformed(" -34,11 +50,6 \n")
+ self.makeMalformed("@@ +50,6 -34,11 @@\n")
+ self.makeMalformed("@@ -34,11 +50,6 @@")
+ self.makeMalformed("@@ -34.5,11 +50,6 @@\n")
+ self.makeMalformed("@@-34,11 +50,6@@\n")
+ self.makeMalformed("@@ 34,11 50,6 @@\n")
+ self.makeMalformed("@@ -34,11 @@\n")
+ self.makeMalformed("@@ -34,11 +50,6.5 @@\n")
+ self.makeMalformed("@@ -34,11 +50,-6 @@\n")
+
+ def lineThing(self,text, type):
+ line = parse_line(text)
+ assert(isinstance(line, type))
+ assert(str(line)==text)
+
+ def makeMalformedLine(self, text):
+ self.assertRaises(MalformedLine, parse_line, text)
+
+ def testValidLine(self):
+ """Parse a valid hunk line"""
+ self.lineThing(" hello\n", ContextLine)
+ self.lineThing("+hello\n", InsertLine)
+ self.lineThing("-hello\n", RemoveLine)
+
+ def testMalformedLine(self):
+ """Parse invalid valid hunk lines"""
+ self.makeMalformedLine("hello\n")
+
+ def compare_parsed(self, patchtext):
+ lines = patchtext.splitlines(True)
+ patch = parse_patch(lines.__iter__())
+ pstr = str(patch)
+ i = difference_index(patchtext, pstr)
+ if i is not None:
+ print "%i: \"%s\" != \"%s\"" % (i, patchtext[i], pstr[i])
+ self.assertEqual (patchtext, str(patch))
+
+ def testAll(self):
+ """Test parsing a whole patch"""
+ patchtext = """--- orig/commands.py
++++ mod/commands.py
+@@ -1337,7 +1337,8 @@
+
+ def set_title(self, command=None):
+ try:
+- version = self.tree.tree_version.nonarch
++ version = pylon.alias_or_version(self.tree.tree_version, self.tree,
++ full=False)
+ except:
+ version = "[no version]"
+ if command is None:
+@@ -1983,7 +1984,11 @@
+ version)
+ if len(new_merges) > 0:
+ if cmdutil.prompt("Log for merge"):
+- mergestuff = cmdutil.log_for_merge(tree, comp_version)
++ if cmdutil.prompt("changelog for merge"):
++ mergestuff = "Patches applied:\\n"
++ mergestuff += pylon.changelog_for_merge(new_merges)
++ else:
++ mergestuff = cmdutil.log_for_merge(tree, comp_version)
+ log.description += mergestuff
+ log.save()
+ try:
+"""
+ self.compare_parsed(patchtext)
+
+ def testInit(self):
+ """Handle patches missing half the position, range tuple"""
+ patchtext = \
+"""--- orig/__init__.py
++++ mod/__init__.py
+@@ -1 +1,2 @@
+ __docformat__ = "restructuredtext en"
++__doc__ = An alternate Arch commandline interface
+"""
+ self.compare_parsed(patchtext)
+
+
+
+ def testLineLookup(self):
+ import sys
+ """Make sure we can accurately look up mod line from orig"""
+ patch = parse_patch(self.datafile("diff"))
+ orig = list(self.datafile("orig"))
+ mod = list(self.datafile("mod"))
+ removals = []
+ for i in range(len(orig)):
+ mod_pos = patch.pos_in_mod(i)
+ if mod_pos is None:
+ removals.append(orig[i])
+ continue
+ assert(mod[mod_pos]==orig[i])
+ rem_iter = removals.__iter__()
+ for hunk in patch.hunks:
+ for line in hunk.lines:
+ if isinstance(line, RemoveLine):
+ next = rem_iter.next()
+ if line.contents != next:
+ sys.stdout.write(" orig:%spatch:%s" % (next,
+ line.contents))
+ assert(line.contents == next)
+ self.assertRaises(StopIteration, rem_iter.next)
+
+ def testFirstLineRenumber(self):
+ """Make sure we handle lines at the beginning of the hunk"""
+ patch = parse_patch(self.datafile("insert_top.patch"))
+ assert (patch.pos_in_mod(0)==1)
+
+def test():
+ patchesTestSuite = unittest.makeSuite(PatchesTester,'test')
+ runner = unittest.TextTestRunner(verbosity=0)
+ return runner.run(patchesTestSuite)
+
+
+if __name__ == "__main__":
+ test()
+# arch-tag: d1541a25-eac5-4de9-a476-08a7cecd5683
diff --git a/bzrlib/tests/test_patches_data/orig-3 b/bzrlib/tests/test_patches_data/orig-3
new file mode 100644
index 0000000..a937595
--- /dev/null
+++ b/bzrlib/tests/test_patches_data/orig-3
@@ -0,0 +1,560 @@
+# Copyright (C) 2004, 2005 Aaron Bentley
+# <aaron.bentley@utoronto.ca>
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+class PatchSyntax(Exception):
+ def __init__(self, msg):
+ Exception.__init__(self, msg)
+
+
+class MalformedPatchHeader(PatchSyntax):
+ def __init__(self, desc, line):
+ self.desc = desc
+ self.line = line
+ msg = "Malformed patch header. %s\n%r" % (self.desc, self.line)
+ PatchSyntax.__init__(self, msg)
+
+class MalformedHunkHeader(PatchSyntax):
+ def __init__(self, desc, line):
+ self.desc = desc
+ self.line = line
+ msg = "Malformed hunk header. %s\n%r" % (self.desc, self.line)
+ PatchSyntax.__init__(self, msg)
+
+class MalformedLine(PatchSyntax):
+ def __init__(self, desc, line):
+ self.desc = desc
+ self.line = line
+ msg = "Malformed line. %s\n%s" % (self.desc, self.line)
+ PatchSyntax.__init__(self, msg)
+
+def get_patch_names(iter_lines):
+ try:
+ line = iter_lines.next()
+ if not line.startswith("--- "):
+ raise MalformedPatchHeader("No orig name", line)
+ else:
+ orig_name = line[4:].rstrip("\n")
+ except StopIteration:
+ raise MalformedPatchHeader("No orig line", "")
+ try:
+ line = iter_lines.next()
+ if not line.startswith("+++ "):
+ raise PatchSyntax("No mod name")
+ else:
+ mod_name = line[4:].rstrip("\n")
+ except StopIteration:
+ raise MalformedPatchHeader("No mod line", "")
+ return (orig_name, mod_name)
+
+def parse_range(textrange):
+ """Parse a patch range, handling the "1" special-case
+
+ :param textrange: The text to parse
+ :type textrange: str
+ :return: the position and range, as a tuple
+ :rtype: (int, int)
+ """
+ tmp = textrange.split(',')
+ if len(tmp) == 1:
+ pos = tmp[0]
+ range = "1"
+ else:
+ (pos, range) = tmp
+ pos = int(pos)
+ range = int(range)
+ return (pos, range)
+
+
+def hunk_from_header(line):
+ if not line.startswith("@@") or not line.endswith("@@\n") \
+ or not len(line) > 4:
+ raise MalformedHunkHeader("Does not start and end with @@.", line)
+ try:
+ (orig, mod) = line[3:-4].split(" ")
+ except Exception, e:
+ raise MalformedHunkHeader(str(e), line)
+ if not orig.startswith('-') or not mod.startswith('+'):
+ raise MalformedHunkHeader("Positions don't start with + or -.", line)
+ try:
+ (orig_pos, orig_range) = parse_range(orig[1:])
+ (mod_pos, mod_range) = parse_range(mod[1:])
+ except Exception, e:
+ raise MalformedHunkHeader(str(e), line)
+ if mod_range < 0 or orig_range < 0:
+ raise MalformedHunkHeader("Hunk range is negative", line)
+ return Hunk(orig_pos, orig_range, mod_pos, mod_range)
+
+
+class HunkLine:
+ def __init__(self, contents):
+ self.contents = contents
+
+ def get_str(self, leadchar):
+ if self.contents == "\n" and leadchar == " " and False:
+ return "\n"
+ if not self.contents.endswith('\n'):
+ terminator = '\n' + NO_NL
+ else:
+ terminator = ''
+ return leadchar + self.contents + terminator
+
+
+class ContextLine(HunkLine):
+ def __init__(self, contents):
+ HunkLine.__init__(self, contents)
+
+ def __str__(self):
+ return self.get_str(" ")
+
+
+class InsertLine(HunkLine):
+ def __init__(self, contents):
+ HunkLine.__init__(self, contents)
+
+ def __str__(self):
+ return self.get_str("+")
+
+
+class RemoveLine(HunkLine):
+ def __init__(self, contents):
+ HunkLine.__init__(self, contents)
+
+ def __str__(self):
+ return self.get_str("-")
+
+NO_NL = '\\ No newline at end of file\n'
+__pychecker__="no-returnvalues"
+
+def parse_line(line):
+ if line.startswith("\n"):
+ return ContextLine(line)
+ elif line.startswith(" "):
+ return ContextLine(line[1:])
+ elif line.startswith("+"):
+ return InsertLine(line[1:])
+ elif line.startswith("-"):
+ return RemoveLine(line[1:])
+ elif line == NO_NL:
+ return NO_NL
+ else:
+ raise MalformedLine("Unknown line type", line)
+__pychecker__=""
+
+
+class Hunk:
+ def __init__(self, orig_pos, orig_range, mod_pos, mod_range):
+ self.orig_pos = orig_pos
+ self.orig_range = orig_range
+ self.mod_pos = mod_pos
+ self.mod_range = mod_range
+ self.lines = []
+
+ def get_header(self):
+ return "@@ -%s +%s @@\n" % (self.range_str(self.orig_pos,
+ self.orig_range),
+ self.range_str(self.mod_pos,
+ self.mod_range))
+
+ def range_str(self, pos, range):
+ """Return a file range, special-casing for 1-line files.
+
+ :param pos: The position in the file
+ :type pos: int
+ :range: The range in the file
+ :type range: int
+ :return: a string in the format 1,4 except when range == pos == 1
+ """
+ if range == 1:
+ return "%i" % pos
+ else:
+ return "%i,%i" % (pos, range)
+
+ def __str__(self):
+ lines = [self.get_header()]
+ for line in self.lines:
+ lines.append(str(line))
+ return "".join(lines)
+
+ def shift_to_mod(self, pos):
+ if pos < self.orig_pos-1:
+ return 0
+ elif pos > self.orig_pos+self.orig_range:
+ return self.mod_range - self.orig_range
+ else:
+ return self.shift_to_mod_lines(pos)
+
+ def shift_to_mod_lines(self, pos):
+ assert (pos >= self.orig_pos-1 and pos <= self.orig_pos+self.orig_range)
+ position = self.orig_pos-1
+ shift = 0
+ for line in self.lines:
+ if isinstance(line, InsertLine):
+ shift += 1
+ elif isinstance(line, RemoveLine):
+ if position == pos:
+ return None
+ shift -= 1
+ position += 1
+ elif isinstance(line, ContextLine):
+ position += 1
+ if position > pos:
+ break
+ return shift
+
+def iter_hunks(iter_lines):
+ hunk = None
+ for line in iter_lines:
+ if line == "\n":
+ if hunk is not None:
+ yield hunk
+ hunk = None
+ continue
+ if hunk is not None:
+ yield hunk
+ hunk = hunk_from_header(line)
+ orig_size = 0
+ mod_size = 0
+ while orig_size < hunk.orig_range or mod_size < hunk.mod_range:
+ hunk_line = parse_line(iter_lines.next())
+ hunk.lines.append(hunk_line)
+ if isinstance(hunk_line, (RemoveLine, ContextLine)):
+ orig_size += 1
+ if isinstance(hunk_line, (InsertLine, ContextLine)):
+ mod_size += 1
+ if hunk is not None:
+ yield hunk
+
+class Patch:
+ def __init__(self, oldname, newname):
+ self.oldname = oldname
+ self.newname = newname
+ self.hunks = []
+
+ def __str__(self):
+ ret = self.get_header()
+ ret += "".join([str(h) for h in self.hunks])
+ return ret
+
+ def get_header(self):
+ return "--- %s\n+++ %s\n" % (self.oldname, self.newname)
+
+ def stats_str(self):
+ """Return a string of patch statistics"""
+ removes = 0
+ inserts = 0
+ for hunk in self.hunks:
+ for line in hunk.lines:
+ if isinstance(line, InsertLine):
+ inserts+=1;
+ elif isinstance(line, RemoveLine):
+ removes+=1;
+ return "%i inserts, %i removes in %i hunks" % \
+ (inserts, removes, len(self.hunks))
+
+ def pos_in_mod(self, position):
+ newpos = position
+ for hunk in self.hunks:
+ shift = hunk.shift_to_mod(position)
+ if shift is None:
+ return None
+ newpos += shift
+ return newpos
+
+ def iter_inserted(self):
+ """Iteraties through inserted lines
+
+ :return: Pair of line number, line
+ :rtype: iterator of (int, InsertLine)
+ """
+ for hunk in self.hunks:
+ pos = hunk.mod_pos - 1;
+ for line in hunk.lines:
+ if isinstance(line, InsertLine):
+ yield (pos, line)
+ pos += 1
+ if isinstance(line, ContextLine):
+ pos += 1
+
+def parse_patch(iter_lines):
+ (orig_name, mod_name) = get_patch_names(iter_lines)
+ patch = Patch(orig_name, mod_name)
+ for hunk in iter_hunks(iter_lines):
+ patch.hunks.append(hunk)
+ return patch
+
+
+def iter_file_patch(iter_lines):
+ saved_lines = []
+ for line in iter_lines:
+ if line.startswith('=== '):
+ continue
+ elif line.startswith('--- '):
+ if len(saved_lines) > 0:
+ yield saved_lines
+ saved_lines = []
+ saved_lines.append(line)
+ if len(saved_lines) > 0:
+ yield saved_lines
+
+
+def iter_lines_handle_nl(iter_lines):
+ """
+ Iterates through lines, ensuring that lines that originally had no
+ terminating \n are produced without one. This transformation may be
+ applied at any point up until hunk line parsing, and is safe to apply
+ repeatedly.
+ """
+ last_line = None
+ for line in iter_lines:
+ if line == NO_NL:
+ assert last_line.endswith('\n')
+ last_line = last_line[:-1]
+ line = None
+ if last_line is not None:
+ yield last_line
+ last_line = line
+ if last_line is not None:
+ yield last_line
+
+
+def parse_patches(iter_lines):
+ iter_lines = iter_lines_handle_nl(iter_lines)
+ return [parse_patch(f.__iter__()) for f in iter_file_patch(iter_lines)]
+
+
+def difference_index(atext, btext):
+ """Find the indext of the first character that differs betweeen two texts
+
+ :param atext: The first text
+ :type atext: str
+ :param btext: The second text
+ :type str: str
+ :return: The index, or None if there are no differences within the range
+ :rtype: int or NoneType
+ """
+ length = len(atext)
+ if len(btext) < length:
+ length = len(btext)
+ for i in range(length):
+ if atext[i] != btext[i]:
+ return i;
+ return None
+
+class PatchConflict(Exception):
+ def __init__(self, line_no, orig_line, patch_line):
+ orig = orig_line.rstrip('\n')
+ patch = str(patch_line).rstrip('\n')
+ msg = 'Text contents mismatch at line %d. Original has "%s",'\
+ ' but patch says it should be "%s"' % (line_no, orig, patch)
+ Exception.__init__(self, msg)
+
+
+def iter_patched(orig_lines, patch_lines):
+ """Iterate through a series of lines with a patch applied.
+ This handles a single file, and does exact, not fuzzy patching.
+ """
+ if orig_lines is not None:
+ orig_lines = orig_lines.__iter__()
+ seen_patch = []
+ patch_lines = iter_lines_handle_nl(patch_lines.__iter__())
+ get_patch_names(patch_lines)
+ line_no = 1
+ for hunk in iter_hunks(patch_lines):
+ while line_no < hunk.orig_pos:
+ orig_line = orig_lines.next()
+ yield orig_line
+ line_no += 1
+ for hunk_line in hunk.lines:
+ seen_patch.append(str(hunk_line))
+ if isinstance(hunk_line, InsertLine):
+ yield hunk_line.contents
+ elif isinstance(hunk_line, (ContextLine, RemoveLine)):
+ orig_line = orig_lines.next()
+ if orig_line != hunk_line.contents:
+ raise PatchConflict(line_no, orig_line, "".join(seen_patch))
+ if isinstance(hunk_line, ContextLine):
+ yield orig_line
+ else:
+ assert isinstance(hunk_line, RemoveLine)
+ line_no += 1
+ for line in orig_lines:
+ yield line
+
+import unittest
+import os.path
+class PatchesTester(unittest.TestCase):
+ def datafile(self, filename):
+ data_path = os.path.join(os.path.dirname(__file__), "testdata",
+ filename)
+ return file(data_path, "rb")
+
+ def testValidPatchHeader(self):
+ """Parse a valid patch header"""
+ lines = "--- orig/commands.py\n+++ mod/dommands.py\n".split('\n')
+ (orig, mod) = get_patch_names(lines.__iter__())
+ assert(orig == "orig/commands.py")
+ assert(mod == "mod/dommands.py")
+
+ def testInvalidPatchHeader(self):
+ """Parse an invalid patch header"""
+ lines = "-- orig/commands.py\n+++ mod/dommands.py".split('\n')
+ self.assertRaises(MalformedPatchHeader, get_patch_names,
+ lines.__iter__())
+
+ def testValidHunkHeader(self):
+ """Parse a valid hunk header"""
+ header = "@@ -34,11 +50,6 @@\n"
+ hunk = hunk_from_header(header);
+ assert (hunk.orig_pos == 34)
+ assert (hunk.orig_range == 11)
+ assert (hunk.mod_pos == 50)
+ assert (hunk.mod_range == 6)
+ assert (str(hunk) == header)
+
+ def testValidHunkHeader2(self):
+ """Parse a tricky, valid hunk header"""
+ header = "@@ -1 +0,0 @@\n"
+ hunk = hunk_from_header(header);
+ assert (hunk.orig_pos == 1)
+ assert (hunk.orig_range == 1)
+ assert (hunk.mod_pos == 0)
+ assert (hunk.mod_range == 0)
+ assert (str(hunk) == header)
+
+ def makeMalformed(self, header):
+ self.assertRaises(MalformedHunkHeader, hunk_from_header, header)
+
+ def testInvalidHeader(self):
+ """Parse an invalid hunk header"""
+ self.makeMalformed(" -34,11 +50,6 \n")
+ self.makeMalformed("@@ +50,6 -34,11 @@\n")
+ self.makeMalformed("@@ -34,11 +50,6 @@")
+ self.makeMalformed("@@ -34.5,11 +50,6 @@\n")
+ self.makeMalformed("@@-34,11 +50,6@@\n")
+ self.makeMalformed("@@ 34,11 50,6 @@\n")
+ self.makeMalformed("@@ -34,11 @@\n")
+ self.makeMalformed("@@ -34,11 +50,6.5 @@\n")
+ self.makeMalformed("@@ -34,11 +50,-6 @@\n")
+
+ def lineThing(self,text, type):
+ line = parse_line(text)
+ assert(isinstance(line, type))
+ assert(str(line)==text)
+
+ def makeMalformedLine(self, text):
+ self.assertRaises(MalformedLine, parse_line, text)
+
+ def testValidLine(self):
+ """Parse a valid hunk line"""
+ self.lineThing(" hello\n", ContextLine)
+ self.lineThing("+hello\n", InsertLine)
+ self.lineThing("-hello\n", RemoveLine)
+
+ def testMalformedLine(self):
+ """Parse invalid valid hunk lines"""
+ self.makeMalformedLine("hello\n")
+
+ def compare_parsed(self, patchtext):
+ lines = patchtext.splitlines(True)
+ patch = parse_patch(lines.__iter__())
+ pstr = str(patch)
+ i = difference_index(patchtext, pstr)
+ if i is not None:
+ print "%i: \"%s\" != \"%s\"" % (i, patchtext[i], pstr[i])
+ self.assertEqual (patchtext, str(patch))
+
+ def testAll(self):
+ """Test parsing a whole patch"""
+ patchtext = """--- orig/commands.py
++++ mod/commands.py
+@@ -1337,7 +1337,8 @@
+
+ def set_title(self, command=None):
+ try:
+- version = self.tree.tree_version.nonarch
++ version = pylon.alias_or_version(self.tree.tree_version, self.tree,
++ full=False)
+ except:
+ version = "[no version]"
+ if command is None:
+@@ -1983,7 +1984,11 @@
+ version)
+ if len(new_merges) > 0:
+ if cmdutil.prompt("Log for merge"):
+- mergestuff = cmdutil.log_for_merge(tree, comp_version)
++ if cmdutil.prompt("changelog for merge"):
++ mergestuff = "Patches applied:\\n"
++ mergestuff += pylon.changelog_for_merge(new_merges)
++ else:
++ mergestuff = cmdutil.log_for_merge(tree, comp_version)
+ log.description += mergestuff
+ log.save()
+ try:
+"""
+ self.compare_parsed(patchtext)
+
+ def testInit(self):
+ """Handle patches missing half the position, range tuple"""
+ patchtext = \
+"""--- orig/__init__.py
++++ mod/__init__.py
+@@ -1 +1,2 @@
+ __docformat__ = "restructuredtext en"
++__doc__ = An alternate Arch commandline interface
+"""
+ self.compare_parsed(patchtext)
+
+
+
+ def testLineLookup(self):
+ import sys
+ """Make sure we can accurately look up mod line from orig"""
+ patch = parse_patch(self.datafile("diff"))
+ orig = list(self.datafile("orig"))
+ mod = list(self.datafile("mod"))
+ removals = []
+ for i in range(len(orig)):
+ mod_pos = patch.pos_in_mod(i)
+ if mod_pos is None:
+ removals.append(orig[i])
+ continue
+ assert(mod[mod_pos]==orig[i])
+ rem_iter = removals.__iter__()
+ for hunk in patch.hunks:
+ for line in hunk.lines:
+ if isinstance(line, RemoveLine):
+ next = rem_iter.next()
+ if line.contents != next:
+ sys.stdout.write(" orig:%spatch:%s" % (next,
+ line.contents))
+ assert(line.contents == next)
+ self.assertRaises(StopIteration, rem_iter.next)
+
+ def testFirstLineRenumber(self):
+ """Make sure we handle lines at the beginning of the hunk"""
+ patch = parse_patch(self.datafile("insert_top.patch"))
+ assert (patch.pos_in_mod(0)==1)
+
+def test():
+ patchesTestSuite = unittest.makeSuite(PatchesTester,'test')
+ runner = unittest.TextTestRunner(verbosity=0)
+ return runner.run(patchesTestSuite)
+
+
+if __name__ == "__main__":
+ test()
+# arch-tag: d1541a25-eac5-4de9-a476-08a7cecd5683
diff --git a/bzrlib/tests/test_patches_data/orig-4 b/bzrlib/tests/test_patches_data/orig-4
new file mode 100644
index 0000000..6af5b11
--- /dev/null
+++ b/bzrlib/tests/test_patches_data/orig-4
@@ -0,0 +1,558 @@
+# Copyright (C) 2004, 2005 Aaron Bentley
+# <aaron.bentley@utoronto.ca>
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+class PatchSyntax(Exception):
+ def __init__(self, msg):
+ Exception.__init__(self, msg)
+
+
+class MalformedPatchHeader(PatchSyntax):
+ def __init__(self, desc, line):
+ self.desc = desc
+ self.line = line
+ msg = "Malformed patch header. %s\n%r" % (self.desc, self.line)
+ PatchSyntax.__init__(self, msg)
+
+class MalformedHunkHeader(PatchSyntax):
+ def __init__(self, desc, line):
+ self.desc = desc
+ self.line = line
+ msg = "Malformed hunk header. %s\n%r" % (self.desc, self.line)
+ PatchSyntax.__init__(self, msg)
+
+class MalformedLine(PatchSyntax):
+ def __init__(self, desc, line):
+ self.desc = desc
+ self.line = line
+ msg = "Malformed line. %s\n%s" % (self.desc, self.line)
+ PatchSyntax.__init__(self, msg)
+
+def get_patch_names(iter_lines):
+ try:
+ line = iter_lines.next()
+ if not line.startswith("--- "):
+ raise MalformedPatchHeader("No orig name", line)
+ else:
+ orig_name = line[4:].rstrip("\n")
+ except StopIteration:
+ raise MalformedPatchHeader("No orig line", "")
+ try:
+ line = iter_lines.next()
+ if not line.startswith("+++ "):
+ raise PatchSyntax("No mod name")
+ else:
+ mod_name = line[4:].rstrip("\n")
+ except StopIteration:
+ raise MalformedPatchHeader("No mod line", "")
+ return (orig_name, mod_name)
+
+def parse_range(textrange):
+ """Parse a patch range, handling the "1" special-case
+
+ :param textrange: The text to parse
+ :type textrange: str
+ :return: the position and range, as a tuple
+ :rtype: (int, int)
+ """
+ tmp = textrange.split(',')
+ if len(tmp) == 1:
+ pos = tmp[0]
+ range = "1"
+ else:
+ (pos, range) = tmp
+ pos = int(pos)
+ range = int(range)
+ return (pos, range)
+
+
+def hunk_from_header(line):
+ if not line.startswith("@@") or not line.endswith("@@\n") \
+ or not len(line) > 4:
+ raise MalformedHunkHeader("Does not start and end with @@.", line)
+ try:
+ (orig, mod) = line[3:-4].split(" ")
+ except Exception, e:
+ raise MalformedHunkHeader(str(e), line)
+ if not orig.startswith('-') or not mod.startswith('+'):
+ raise MalformedHunkHeader("Positions don't start with + or -.", line)
+ try:
+ (orig_pos, orig_range) = parse_range(orig[1:])
+ (mod_pos, mod_range) = parse_range(mod[1:])
+ except Exception, e:
+ raise MalformedHunkHeader(str(e), line)
+ if mod_range < 0 or orig_range < 0:
+ raise MalformedHunkHeader("Hunk range is negative", line)
+ return Hunk(orig_pos, orig_range, mod_pos, mod_range)
+
+
+class HunkLine:
+ def __init__(self, contents):
+ self.contents = contents
+
+ def get_str(self, leadchar):
+ if self.contents == "\n" and leadchar == " " and False:
+ return "\n"
+ if not self.contents.endswith('\n'):
+ terminator = '\n' + NO_NL
+ else:
+ terminator = ''
+ return leadchar + self.contents + terminator
+
+
+class ContextLine(HunkLine):
+ def __init__(self, contents):
+ HunkLine.__init__(self, contents)
+
+ def __str__(self):
+ return self.get_str(" ")
+
+
+class InsertLine(HunkLine):
+ def __init__(self, contents):
+ HunkLine.__init__(self, contents)
+
+ def __str__(self):
+ return self.get_str("+")
+
+
+class RemoveLine(HunkLine):
+ def __init__(self, contents):
+ HunkLine.__init__(self, contents)
+
+ def __str__(self):
+ return self.get_str("-")
+
+NO_NL = '\\ No newline at end of file\n'
+__pychecker__="no-returnvalues"
+
+def parse_line(line):
+ if line.startswith("\n"):
+ return ContextLine(line)
+ elif line.startswith(" "):
+ return ContextLine(line[1:])
+ elif line.startswith("+"):
+ return InsertLine(line[1:])
+ elif line.startswith("-"):
+ return RemoveLine(line[1:])
+ elif line == NO_NL:
+ return NO_NL
+ else:
+ raise MalformedLine("Unknown line type", line)
+__pychecker__=""
+
+
+class Hunk:
+ def __init__(self, orig_pos, orig_range, mod_pos, mod_range):
+ self.orig_pos = orig_pos
+ self.orig_range = orig_range
+ self.mod_pos = mod_pos
+ self.mod_range = mod_range
+ self.lines = []
+
+ def get_header(self):
+ return "@@ -%s +%s @@\n" % (self.range_str(self.orig_pos,
+ self.orig_range),
+ self.range_str(self.mod_pos,
+ self.mod_range))
+
+ def range_str(self, pos, range):
+ """Return a file range, special-casing for 1-line files.
+
+ :param pos: The position in the file
+ :type pos: int
+ :range: The range in the file
+ :type range: int
+ :return: a string in the format 1,4 except when range == pos == 1
+ """
+ if range == 1:
+ return "%i" % pos
+ else:
+ return "%i,%i" % (pos, range)
+
+ def __str__(self):
+ lines = [self.get_header()]
+ for line in self.lines:
+ lines.append(str(line))
+ return "".join(lines)
+
+ def shift_to_mod(self, pos):
+ if pos < self.orig_pos-1:
+ return 0
+ elif pos > self.orig_pos+self.orig_range:
+ return self.mod_range - self.orig_range
+ else:
+ return self.shift_to_mod_lines(pos)
+
+ def shift_to_mod_lines(self, pos):
+ assert (pos >= self.orig_pos-1 and pos <= self.orig_pos+self.orig_range)
+ position = self.orig_pos-1
+ shift = 0
+ for line in self.lines:
+ if isinstance(line, InsertLine):
+ shift += 1
+ elif isinstance(line, RemoveLine):
+ if position == pos:
+ return None
+ shift -= 1
+ position += 1
+ elif isinstance(line, ContextLine):
+ position += 1
+ if position > pos:
+ break
+ return shift
+
+def iter_hunks(iter_lines):
+ hunk = None
+ for line in iter_lines:
+ if line == "\n":
+ if hunk is not None:
+ yield hunk
+ hunk = None
+ continue
+ if hunk is not None:
+ yield hunk
+ hunk = hunk_from_header(line)
+ orig_size = 0
+ mod_size = 0
+ while orig_size < hunk.orig_range or mod_size < hunk.mod_range:
+ hunk_line = parse_line(iter_lines.next())
+ hunk.lines.append(hunk_line)
+ if isinstance(hunk_line, (RemoveLine, ContextLine)):
+ orig_size += 1
+ if isinstance(hunk_line, (InsertLine, ContextLine)):
+ mod_size += 1
+ if hunk is not None:
+ yield hunk
+
+class Patch:
+ def __init__(self, oldname, newname):
+ self.oldname = oldname
+ self.newname = newname
+ self.hunks = []
+
+ def __str__(self):
+ ret = self.get_header()
+ ret += "".join([str(h) for h in self.hunks])
+ return ret
+
+ def get_header(self):
+ return "--- %s\n+++ %s\n" % (self.oldname, self.newname)
+
+ def stats_str(self):
+ """Return a string of patch statistics"""
+ removes = 0
+ inserts = 0
+ for hunk in self.hunks:
+ for line in hunk.lines:
+ if isinstance(line, InsertLine):
+ inserts+=1;
+ elif isinstance(line, RemoveLine):
+ removes+=1;
+ return "%i inserts, %i removes in %i hunks" % \
+ (inserts, removes, len(self.hunks))
+
+ def pos_in_mod(self, position):
+ newpos = position
+ for hunk in self.hunks:
+ shift = hunk.shift_to_mod(position)
+ if shift is None:
+ return None
+ newpos += shift
+ return newpos
+
+ def iter_inserted(self):
+ """Iteraties through inserted lines
+
+ :return: Pair of line number, line
+ :rtype: iterator of (int, InsertLine)
+ """
+ for hunk in self.hunks:
+ pos = hunk.mod_pos - 1;
+ for line in hunk.lines:
+ if isinstance(line, InsertLine):
+ yield (pos, line)
+ pos += 1
+ if isinstance(line, ContextLine):
+ pos += 1
+
+def parse_patch(iter_lines):
+ (orig_name, mod_name) = get_patch_names(iter_lines)
+ patch = Patch(orig_name, mod_name)
+ for hunk in iter_hunks(iter_lines):
+ patch.hunks.append(hunk)
+ return patch
+
+
+def iter_file_patch(iter_lines):
+ saved_lines = []
+ for line in iter_lines:
+ if line.startswith('=== '):
+ continue
+ elif line.startswith('--- '):
+ if len(saved_lines) > 0:
+ yield saved_lines
+ saved_lines = []
+ saved_lines.append(line)
+ if len(saved_lines) > 0:
+ yield saved_lines
+
+
+def iter_lines_handle_nl(iter_lines):
+ """
+ Iterates through lines, ensuring that lines that originally had no
+ terminating \n are produced without one. This transformation may be
+ applied at any point up until hunk line parsing, and is safe to apply
+ repeatedly.
+ """
+ last_line = None
+ for line in iter_lines:
+ if line == NO_NL:
+ assert last_line.endswith('\n')
+ last_line = last_line[:-1]
+ line = None
+ if last_line is not None:
+ yield last_line
+ last_line = line
+ if last_line is not None:
+ yield last_line
+
+
+def parse_patches(iter_lines):
+ iter_lines = iter_lines_handle_nl(iter_lines)
+ return [parse_patch(f.__iter__()) for f in iter_file_patch(iter_lines)]
+
+
+def difference_index(atext, btext):
+ """Find the indext of the first character that differs betweeen two texts
+
+ :param atext: The first text
+ :type atext: str
+ :param btext: The second text
+ :type str: str
+ :return: The index, or None if there are no differences within the range
+ :rtype: int or NoneType
+ """
+ length = len(atext)
+ if len(btext) < length:
+ length = len(btext)
+ for i in range(length):
+ if atext[i] != btext[i]:
+ return i;
+ return None
+
+class PatchConflict(Exception):
+ def __init__(self, line_no, orig_line, patch_line):
+ orig = orig_line.rstrip('\n')
+ patch = str(patch_line).rstrip('\n')
+ msg = 'Text contents mismatch at line %d. Original has "%s",'\
+ ' but patch says it should be "%s"' % (line_no, orig, patch)
+ Exception.__init__(self, msg)
+
+
+def iter_patched(orig_lines, patch_lines):
+ """Iterate through a series of lines with a patch applied.
+ This handles a single file, and does exact, not fuzzy patching.
+ """
+ if orig_lines is not None:
+ orig_lines = orig_lines.__iter__()
+ seen_patch = []
+ patch_lines = iter_lines_handle_nl(patch_lines.__iter__())
+ get_patch_names(patch_lines)
+ line_no = 1
+ for hunk in iter_hunks(patch_lines):
+ while line_no < hunk.orig_pos:
+ orig_line = orig_lines.next()
+ yield orig_line
+ line_no += 1
+ for hunk_line in hunk.lines:
+ seen_patch.append(str(hunk_line))
+ if isinstance(hunk_line, InsertLine):
+ yield hunk_line.contents
+ elif isinstance(hunk_line, (ContextLine, RemoveLine)):
+ orig_line = orig_lines.next()
+ if orig_line != hunk_line.contents:
+ raise PatchConflict(line_no, orig_line, "".join(seen_patch))
+ if isinstance(hunk_line, ContextLine):
+ yield orig_line
+ else:
+ assert isinstance(hunk_line, RemoveLine)
+ line_no += 1
+
+import unittest
+import os.path
+class PatchesTester(unittest.TestCase):
+ def datafile(self, filename):
+ data_path = os.path.join(os.path.dirname(__file__), "testdata",
+ filename)
+ return file(data_path, "rb")
+
+ def testValidPatchHeader(self):
+ """Parse a valid patch header"""
+ lines = "--- orig/commands.py\n+++ mod/dommands.py\n".split('\n')
+ (orig, mod) = get_patch_names(lines.__iter__())
+ assert(orig == "orig/commands.py")
+ assert(mod == "mod/dommands.py")
+
+ def testInvalidPatchHeader(self):
+ """Parse an invalid patch header"""
+ lines = "-- orig/commands.py\n+++ mod/dommands.py".split('\n')
+ self.assertRaises(MalformedPatchHeader, get_patch_names,
+ lines.__iter__())
+
+ def testValidHunkHeader(self):
+ """Parse a valid hunk header"""
+ header = "@@ -34,11 +50,6 @@\n"
+ hunk = hunk_from_header(header);
+ assert (hunk.orig_pos == 34)
+ assert (hunk.orig_range == 11)
+ assert (hunk.mod_pos == 50)
+ assert (hunk.mod_range == 6)
+ assert (str(hunk) == header)
+
+ def testValidHunkHeader2(self):
+ """Parse a tricky, valid hunk header"""
+ header = "@@ -1 +0,0 @@\n"
+ hunk = hunk_from_header(header);
+ assert (hunk.orig_pos == 1)
+ assert (hunk.orig_range == 1)
+ assert (hunk.mod_pos == 0)
+ assert (hunk.mod_range == 0)
+ assert (str(hunk) == header)
+
+ def makeMalformed(self, header):
+ self.assertRaises(MalformedHunkHeader, hunk_from_header, header)
+
+ def testInvalidHeader(self):
+ """Parse an invalid hunk header"""
+ self.makeMalformed(" -34,11 +50,6 \n")
+ self.makeMalformed("@@ +50,6 -34,11 @@\n")
+ self.makeMalformed("@@ -34,11 +50,6 @@")
+ self.makeMalformed("@@ -34.5,11 +50,6 @@\n")
+ self.makeMalformed("@@-34,11 +50,6@@\n")
+ self.makeMalformed("@@ 34,11 50,6 @@\n")
+ self.makeMalformed("@@ -34,11 @@\n")
+ self.makeMalformed("@@ -34,11 +50,6.5 @@\n")
+ self.makeMalformed("@@ -34,11 +50,-6 @@\n")
+
+ def lineThing(self,text, type):
+ line = parse_line(text)
+ assert(isinstance(line, type))
+ assert(str(line)==text)
+
+ def makeMalformedLine(self, text):
+ self.assertRaises(MalformedLine, parse_line, text)
+
+ def testValidLine(self):
+ """Parse a valid hunk line"""
+ self.lineThing(" hello\n", ContextLine)
+ self.lineThing("+hello\n", InsertLine)
+ self.lineThing("-hello\n", RemoveLine)
+
+ def testMalformedLine(self):
+ """Parse invalid valid hunk lines"""
+ self.makeMalformedLine("hello\n")
+
+ def compare_parsed(self, patchtext):
+ lines = patchtext.splitlines(True)
+ patch = parse_patch(lines.__iter__())
+ pstr = str(patch)
+ i = difference_index(patchtext, pstr)
+ if i is not None:
+ print "%i: \"%s\" != \"%s\"" % (i, patchtext[i], pstr[i])
+ self.assertEqual (patchtext, str(patch))
+
+ def testAll(self):
+ """Test parsing a whole patch"""
+ patchtext = """--- orig/commands.py
++++ mod/commands.py
+@@ -1337,7 +1337,8 @@
+
+ def set_title(self, command=None):
+ try:
+- version = self.tree.tree_version.nonarch
++ version = pylon.alias_or_version(self.tree.tree_version, self.tree,
++ full=False)
+ except:
+ version = "[no version]"
+ if command is None:
+@@ -1983,7 +1984,11 @@
+ version)
+ if len(new_merges) > 0:
+ if cmdutil.prompt("Log for merge"):
+- mergestuff = cmdutil.log_for_merge(tree, comp_version)
++ if cmdutil.prompt("changelog for merge"):
++ mergestuff = "Patches applied:\\n"
++ mergestuff += pylon.changelog_for_merge(new_merges)
++ else:
++ mergestuff = cmdutil.log_for_merge(tree, comp_version)
+ log.description += mergestuff
+ log.save()
+ try:
+"""
+ self.compare_parsed(patchtext)
+
+ def testInit(self):
+ """Handle patches missing half the position, range tuple"""
+ patchtext = \
+"""--- orig/__init__.py
++++ mod/__init__.py
+@@ -1 +1,2 @@
+ __docformat__ = "restructuredtext en"
++__doc__ = An alternate Arch commandline interface
+"""
+ self.compare_parsed(patchtext)
+
+
+
+ def testLineLookup(self):
+ import sys
+ """Make sure we can accurately look up mod line from orig"""
+ patch = parse_patch(self.datafile("diff"))
+ orig = list(self.datafile("orig"))
+ mod = list(self.datafile("mod"))
+ removals = []
+ for i in range(len(orig)):
+ mod_pos = patch.pos_in_mod(i)
+ if mod_pos is None:
+ removals.append(orig[i])
+ continue
+ assert(mod[mod_pos]==orig[i])
+ rem_iter = removals.__iter__()
+ for hunk in patch.hunks:
+ for line in hunk.lines:
+ if isinstance(line, RemoveLine):
+ next = rem_iter.next()
+ if line.contents != next:
+ sys.stdout.write(" orig:%spatch:%s" % (next,
+ line.contents))
+ assert(line.contents == next)
+ self.assertRaises(StopIteration, rem_iter.next)
+
+ def testFirstLineRenumber(self):
+ """Make sure we handle lines at the beginning of the hunk"""
+ patch = parse_patch(self.datafile("insert_top.patch"))
+ assert (patch.pos_in_mod(0)==1)
+
+def test():
+ patchesTestSuite = unittest.makeSuite(PatchesTester,'test')
+ runner = unittest.TextTestRunner(verbosity=0)
+ return runner.run(patchesTestSuite)
+
+
+if __name__ == "__main__":
+ test()
+# arch-tag: d1541a25-eac5-4de9-a476-08a7cecd5683
diff --git a/bzrlib/tests/test_patches_data/orig-5 b/bzrlib/tests/test_patches_data/orig-5
new file mode 100644
index 0000000..6af5b11
--- /dev/null
+++ b/bzrlib/tests/test_patches_data/orig-5
@@ -0,0 +1,558 @@
+# Copyright (C) 2004, 2005 Aaron Bentley
+# <aaron.bentley@utoronto.ca>
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+class PatchSyntax(Exception):
+ def __init__(self, msg):
+ Exception.__init__(self, msg)
+
+
+class MalformedPatchHeader(PatchSyntax):
+ def __init__(self, desc, line):
+ self.desc = desc
+ self.line = line
+ msg = "Malformed patch header. %s\n%r" % (self.desc, self.line)
+ PatchSyntax.__init__(self, msg)
+
+class MalformedHunkHeader(PatchSyntax):
+ def __init__(self, desc, line):
+ self.desc = desc
+ self.line = line
+ msg = "Malformed hunk header. %s\n%r" % (self.desc, self.line)
+ PatchSyntax.__init__(self, msg)
+
+class MalformedLine(PatchSyntax):
+ def __init__(self, desc, line):
+ self.desc = desc
+ self.line = line
+ msg = "Malformed line. %s\n%s" % (self.desc, self.line)
+ PatchSyntax.__init__(self, msg)
+
+def get_patch_names(iter_lines):
+ try:
+ line = iter_lines.next()
+ if not line.startswith("--- "):
+ raise MalformedPatchHeader("No orig name", line)
+ else:
+ orig_name = line[4:].rstrip("\n")
+ except StopIteration:
+ raise MalformedPatchHeader("No orig line", "")
+ try:
+ line = iter_lines.next()
+ if not line.startswith("+++ "):
+ raise PatchSyntax("No mod name")
+ else:
+ mod_name = line[4:].rstrip("\n")
+ except StopIteration:
+ raise MalformedPatchHeader("No mod line", "")
+ return (orig_name, mod_name)
+
+def parse_range(textrange):
+ """Parse a patch range, handling the "1" special-case
+
+ :param textrange: The text to parse
+ :type textrange: str
+ :return: the position and range, as a tuple
+ :rtype: (int, int)
+ """
+ tmp = textrange.split(',')
+ if len(tmp) == 1:
+ pos = tmp[0]
+ range = "1"
+ else:
+ (pos, range) = tmp
+ pos = int(pos)
+ range = int(range)
+ return (pos, range)
+
+
+def hunk_from_header(line):
+ if not line.startswith("@@") or not line.endswith("@@\n") \
+ or not len(line) > 4:
+ raise MalformedHunkHeader("Does not start and end with @@.", line)
+ try:
+ (orig, mod) = line[3:-4].split(" ")
+ except Exception, e:
+ raise MalformedHunkHeader(str(e), line)
+ if not orig.startswith('-') or not mod.startswith('+'):
+ raise MalformedHunkHeader("Positions don't start with + or -.", line)
+ try:
+ (orig_pos, orig_range) = parse_range(orig[1:])
+ (mod_pos, mod_range) = parse_range(mod[1:])
+ except Exception, e:
+ raise MalformedHunkHeader(str(e), line)
+ if mod_range < 0 or orig_range < 0:
+ raise MalformedHunkHeader("Hunk range is negative", line)
+ return Hunk(orig_pos, orig_range, mod_pos, mod_range)
+
+
+class HunkLine:
+ def __init__(self, contents):
+ self.contents = contents
+
+ def get_str(self, leadchar):
+ if self.contents == "\n" and leadchar == " " and False:
+ return "\n"
+ if not self.contents.endswith('\n'):
+ terminator = '\n' + NO_NL
+ else:
+ terminator = ''
+ return leadchar + self.contents + terminator
+
+
+class ContextLine(HunkLine):
+ def __init__(self, contents):
+ HunkLine.__init__(self, contents)
+
+ def __str__(self):
+ return self.get_str(" ")
+
+
+class InsertLine(HunkLine):
+ def __init__(self, contents):
+ HunkLine.__init__(self, contents)
+
+ def __str__(self):
+ return self.get_str("+")
+
+
+class RemoveLine(HunkLine):
+ def __init__(self, contents):
+ HunkLine.__init__(self, contents)
+
+ def __str__(self):
+ return self.get_str("-")
+
+NO_NL = '\\ No newline at end of file\n'
+__pychecker__="no-returnvalues"
+
+def parse_line(line):
+ if line.startswith("\n"):
+ return ContextLine(line)
+ elif line.startswith(" "):
+ return ContextLine(line[1:])
+ elif line.startswith("+"):
+ return InsertLine(line[1:])
+ elif line.startswith("-"):
+ return RemoveLine(line[1:])
+ elif line == NO_NL:
+ return NO_NL
+ else:
+ raise MalformedLine("Unknown line type", line)
+__pychecker__=""
+
+
+class Hunk:
+ def __init__(self, orig_pos, orig_range, mod_pos, mod_range):
+ self.orig_pos = orig_pos
+ self.orig_range = orig_range
+ self.mod_pos = mod_pos
+ self.mod_range = mod_range
+ self.lines = []
+
+ def get_header(self):
+ return "@@ -%s +%s @@\n" % (self.range_str(self.orig_pos,
+ self.orig_range),
+ self.range_str(self.mod_pos,
+ self.mod_range))
+
+ def range_str(self, pos, range):
+ """Return a file range, special-casing for 1-line files.
+
+ :param pos: The position in the file
+ :type pos: int
+ :range: The range in the file
+ :type range: int
+ :return: a string in the format 1,4 except when range == pos == 1
+ """
+ if range == 1:
+ return "%i" % pos
+ else:
+ return "%i,%i" % (pos, range)
+
+ def __str__(self):
+ lines = [self.get_header()]
+ for line in self.lines:
+ lines.append(str(line))
+ return "".join(lines)
+
+ def shift_to_mod(self, pos):
+ if pos < self.orig_pos-1:
+ return 0
+ elif pos > self.orig_pos+self.orig_range:
+ return self.mod_range - self.orig_range
+ else:
+ return self.shift_to_mod_lines(pos)
+
+ def shift_to_mod_lines(self, pos):
+ assert (pos >= self.orig_pos-1 and pos <= self.orig_pos+self.orig_range)
+ position = self.orig_pos-1
+ shift = 0
+ for line in self.lines:
+ if isinstance(line, InsertLine):
+ shift += 1
+ elif isinstance(line, RemoveLine):
+ if position == pos:
+ return None
+ shift -= 1
+ position += 1
+ elif isinstance(line, ContextLine):
+ position += 1
+ if position > pos:
+ break
+ return shift
+
+def iter_hunks(iter_lines):
+ hunk = None
+ for line in iter_lines:
+ if line == "\n":
+ if hunk is not None:
+ yield hunk
+ hunk = None
+ continue
+ if hunk is not None:
+ yield hunk
+ hunk = hunk_from_header(line)
+ orig_size = 0
+ mod_size = 0
+ while orig_size < hunk.orig_range or mod_size < hunk.mod_range:
+ hunk_line = parse_line(iter_lines.next())
+ hunk.lines.append(hunk_line)
+ if isinstance(hunk_line, (RemoveLine, ContextLine)):
+ orig_size += 1
+ if isinstance(hunk_line, (InsertLine, ContextLine)):
+ mod_size += 1
+ if hunk is not None:
+ yield hunk
+
+class Patch:
+ def __init__(self, oldname, newname):
+ self.oldname = oldname
+ self.newname = newname
+ self.hunks = []
+
+ def __str__(self):
+ ret = self.get_header()
+ ret += "".join([str(h) for h in self.hunks])
+ return ret
+
+ def get_header(self):
+ return "--- %s\n+++ %s\n" % (self.oldname, self.newname)
+
+ def stats_str(self):
+ """Return a string of patch statistics"""
+ removes = 0
+ inserts = 0
+ for hunk in self.hunks:
+ for line in hunk.lines:
+ if isinstance(line, InsertLine):
+ inserts+=1;
+ elif isinstance(line, RemoveLine):
+ removes+=1;
+ return "%i inserts, %i removes in %i hunks" % \
+ (inserts, removes, len(self.hunks))
+
+ def pos_in_mod(self, position):
+ newpos = position
+ for hunk in self.hunks:
+ shift = hunk.shift_to_mod(position)
+ if shift is None:
+ return None
+ newpos += shift
+ return newpos
+
+ def iter_inserted(self):
+ """Iteraties through inserted lines
+
+ :return: Pair of line number, line
+ :rtype: iterator of (int, InsertLine)
+ """
+ for hunk in self.hunks:
+ pos = hunk.mod_pos - 1;
+ for line in hunk.lines:
+ if isinstance(line, InsertLine):
+ yield (pos, line)
+ pos += 1
+ if isinstance(line, ContextLine):
+ pos += 1
+
+def parse_patch(iter_lines):
+ (orig_name, mod_name) = get_patch_names(iter_lines)
+ patch = Patch(orig_name, mod_name)
+ for hunk in iter_hunks(iter_lines):
+ patch.hunks.append(hunk)
+ return patch
+
+
+def iter_file_patch(iter_lines):
+ saved_lines = []
+ for line in iter_lines:
+ if line.startswith('=== '):
+ continue
+ elif line.startswith('--- '):
+ if len(saved_lines) > 0:
+ yield saved_lines
+ saved_lines = []
+ saved_lines.append(line)
+ if len(saved_lines) > 0:
+ yield saved_lines
+
+
+def iter_lines_handle_nl(iter_lines):
+ """
+ Iterates through lines, ensuring that lines that originally had no
+ terminating \n are produced without one. This transformation may be
+ applied at any point up until hunk line parsing, and is safe to apply
+ repeatedly.
+ """
+ last_line = None
+ for line in iter_lines:
+ if line == NO_NL:
+ assert last_line.endswith('\n')
+ last_line = last_line[:-1]
+ line = None
+ if last_line is not None:
+ yield last_line
+ last_line = line
+ if last_line is not None:
+ yield last_line
+
+
+def parse_patches(iter_lines):
+ iter_lines = iter_lines_handle_nl(iter_lines)
+ return [parse_patch(f.__iter__()) for f in iter_file_patch(iter_lines)]
+
+
+def difference_index(atext, btext):
+ """Find the indext of the first character that differs betweeen two texts
+
+ :param atext: The first text
+ :type atext: str
+ :param btext: The second text
+ :type str: str
+ :return: The index, or None if there are no differences within the range
+ :rtype: int or NoneType
+ """
+ length = len(atext)
+ if len(btext) < length:
+ length = len(btext)
+ for i in range(length):
+ if atext[i] != btext[i]:
+ return i;
+ return None
+
+class PatchConflict(Exception):
+ def __init__(self, line_no, orig_line, patch_line):
+ orig = orig_line.rstrip('\n')
+ patch = str(patch_line).rstrip('\n')
+ msg = 'Text contents mismatch at line %d. Original has "%s",'\
+ ' but patch says it should be "%s"' % (line_no, orig, patch)
+ Exception.__init__(self, msg)
+
+
+def iter_patched(orig_lines, patch_lines):
+ """Iterate through a series of lines with a patch applied.
+ This handles a single file, and does exact, not fuzzy patching.
+ """
+ if orig_lines is not None:
+ orig_lines = orig_lines.__iter__()
+ seen_patch = []
+ patch_lines = iter_lines_handle_nl(patch_lines.__iter__())
+ get_patch_names(patch_lines)
+ line_no = 1
+ for hunk in iter_hunks(patch_lines):
+ while line_no < hunk.orig_pos:
+ orig_line = orig_lines.next()
+ yield orig_line
+ line_no += 1
+ for hunk_line in hunk.lines:
+ seen_patch.append(str(hunk_line))
+ if isinstance(hunk_line, InsertLine):
+ yield hunk_line.contents
+ elif isinstance(hunk_line, (ContextLine, RemoveLine)):
+ orig_line = orig_lines.next()
+ if orig_line != hunk_line.contents:
+ raise PatchConflict(line_no, orig_line, "".join(seen_patch))
+ if isinstance(hunk_line, ContextLine):
+ yield orig_line
+ else:
+ assert isinstance(hunk_line, RemoveLine)
+ line_no += 1
+
+import unittest
+import os.path
+class PatchesTester(unittest.TestCase):
+ def datafile(self, filename):
+ data_path = os.path.join(os.path.dirname(__file__), "testdata",
+ filename)
+ return file(data_path, "rb")
+
+ def testValidPatchHeader(self):
+ """Parse a valid patch header"""
+ lines = "--- orig/commands.py\n+++ mod/dommands.py\n".split('\n')
+ (orig, mod) = get_patch_names(lines.__iter__())
+ assert(orig == "orig/commands.py")
+ assert(mod == "mod/dommands.py")
+
+ def testInvalidPatchHeader(self):
+ """Parse an invalid patch header"""
+ lines = "-- orig/commands.py\n+++ mod/dommands.py".split('\n')
+ self.assertRaises(MalformedPatchHeader, get_patch_names,
+ lines.__iter__())
+
+ def testValidHunkHeader(self):
+ """Parse a valid hunk header"""
+ header = "@@ -34,11 +50,6 @@\n"
+ hunk = hunk_from_header(header);
+ assert (hunk.orig_pos == 34)
+ assert (hunk.orig_range == 11)
+ assert (hunk.mod_pos == 50)
+ assert (hunk.mod_range == 6)
+ assert (str(hunk) == header)
+
+ def testValidHunkHeader2(self):
+ """Parse a tricky, valid hunk header"""
+ header = "@@ -1 +0,0 @@\n"
+ hunk = hunk_from_header(header);
+ assert (hunk.orig_pos == 1)
+ assert (hunk.orig_range == 1)
+ assert (hunk.mod_pos == 0)
+ assert (hunk.mod_range == 0)
+ assert (str(hunk) == header)
+
+ def makeMalformed(self, header):
+ self.assertRaises(MalformedHunkHeader, hunk_from_header, header)
+
+ def testInvalidHeader(self):
+ """Parse an invalid hunk header"""
+ self.makeMalformed(" -34,11 +50,6 \n")
+ self.makeMalformed("@@ +50,6 -34,11 @@\n")
+ self.makeMalformed("@@ -34,11 +50,6 @@")
+ self.makeMalformed("@@ -34.5,11 +50,6 @@\n")
+ self.makeMalformed("@@-34,11 +50,6@@\n")
+ self.makeMalformed("@@ 34,11 50,6 @@\n")
+ self.makeMalformed("@@ -34,11 @@\n")
+ self.makeMalformed("@@ -34,11 +50,6.5 @@\n")
+ self.makeMalformed("@@ -34,11 +50,-6 @@\n")
+
+ def lineThing(self,text, type):
+ line = parse_line(text)
+ assert(isinstance(line, type))
+ assert(str(line)==text)
+
+ def makeMalformedLine(self, text):
+ self.assertRaises(MalformedLine, parse_line, text)
+
+ def testValidLine(self):
+ """Parse a valid hunk line"""
+ self.lineThing(" hello\n", ContextLine)
+ self.lineThing("+hello\n", InsertLine)
+ self.lineThing("-hello\n", RemoveLine)
+
+ def testMalformedLine(self):
+ """Parse invalid valid hunk lines"""
+ self.makeMalformedLine("hello\n")
+
+ def compare_parsed(self, patchtext):
+ lines = patchtext.splitlines(True)
+ patch = parse_patch(lines.__iter__())
+ pstr = str(patch)
+ i = difference_index(patchtext, pstr)
+ if i is not None:
+ print "%i: \"%s\" != \"%s\"" % (i, patchtext[i], pstr[i])
+ self.assertEqual (patchtext, str(patch))
+
+ def testAll(self):
+ """Test parsing a whole patch"""
+ patchtext = """--- orig/commands.py
++++ mod/commands.py
+@@ -1337,7 +1337,8 @@
+
+ def set_title(self, command=None):
+ try:
+- version = self.tree.tree_version.nonarch
++ version = pylon.alias_or_version(self.tree.tree_version, self.tree,
++ full=False)
+ except:
+ version = "[no version]"
+ if command is None:
+@@ -1983,7 +1984,11 @@
+ version)
+ if len(new_merges) > 0:
+ if cmdutil.prompt("Log for merge"):
+- mergestuff = cmdutil.log_for_merge(tree, comp_version)
++ if cmdutil.prompt("changelog for merge"):
++ mergestuff = "Patches applied:\\n"
++ mergestuff += pylon.changelog_for_merge(new_merges)
++ else:
++ mergestuff = cmdutil.log_for_merge(tree, comp_version)
+ log.description += mergestuff
+ log.save()
+ try:
+"""
+ self.compare_parsed(patchtext)
+
+ def testInit(self):
+ """Handle patches missing half the position, range tuple"""
+ patchtext = \
+"""--- orig/__init__.py
++++ mod/__init__.py
+@@ -1 +1,2 @@
+ __docformat__ = "restructuredtext en"
++__doc__ = An alternate Arch commandline interface
+"""
+ self.compare_parsed(patchtext)
+
+
+
+ def testLineLookup(self):
+ import sys
+ """Make sure we can accurately look up mod line from orig"""
+ patch = parse_patch(self.datafile("diff"))
+ orig = list(self.datafile("orig"))
+ mod = list(self.datafile("mod"))
+ removals = []
+ for i in range(len(orig)):
+ mod_pos = patch.pos_in_mod(i)
+ if mod_pos is None:
+ removals.append(orig[i])
+ continue
+ assert(mod[mod_pos]==orig[i])
+ rem_iter = removals.__iter__()
+ for hunk in patch.hunks:
+ for line in hunk.lines:
+ if isinstance(line, RemoveLine):
+ next = rem_iter.next()
+ if line.contents != next:
+ sys.stdout.write(" orig:%spatch:%s" % (next,
+ line.contents))
+ assert(line.contents == next)
+ self.assertRaises(StopIteration, rem_iter.next)
+
+ def testFirstLineRenumber(self):
+ """Make sure we handle lines at the beginning of the hunk"""
+ patch = parse_patch(self.datafile("insert_top.patch"))
+ assert (patch.pos_in_mod(0)==1)
+
+def test():
+ patchesTestSuite = unittest.makeSuite(PatchesTester,'test')
+ runner = unittest.TextTestRunner(verbosity=0)
+ return runner.run(patchesTestSuite)
+
+
+if __name__ == "__main__":
+ test()
+# arch-tag: d1541a25-eac5-4de9-a476-08a7cecd5683
diff --git a/bzrlib/tests/test_patches_data/orig-6 b/bzrlib/tests/test_patches_data/orig-6
new file mode 100644
index 0000000..6af5b11
--- /dev/null
+++ b/bzrlib/tests/test_patches_data/orig-6
@@ -0,0 +1,558 @@
+# Copyright (C) 2004, 2005 Aaron Bentley
+# <aaron.bentley@utoronto.ca>
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+class PatchSyntax(Exception):
+ def __init__(self, msg):
+ Exception.__init__(self, msg)
+
+
+class MalformedPatchHeader(PatchSyntax):
+ def __init__(self, desc, line):
+ self.desc = desc
+ self.line = line
+ msg = "Malformed patch header. %s\n%r" % (self.desc, self.line)
+ PatchSyntax.__init__(self, msg)
+
+class MalformedHunkHeader(PatchSyntax):
+ def __init__(self, desc, line):
+ self.desc = desc
+ self.line = line
+ msg = "Malformed hunk header. %s\n%r" % (self.desc, self.line)
+ PatchSyntax.__init__(self, msg)
+
+class MalformedLine(PatchSyntax):
+ def __init__(self, desc, line):
+ self.desc = desc
+ self.line = line
+ msg = "Malformed line. %s\n%s" % (self.desc, self.line)
+ PatchSyntax.__init__(self, msg)
+
+def get_patch_names(iter_lines):
+ try:
+ line = iter_lines.next()
+ if not line.startswith("--- "):
+ raise MalformedPatchHeader("No orig name", line)
+ else:
+ orig_name = line[4:].rstrip("\n")
+ except StopIteration:
+ raise MalformedPatchHeader("No orig line", "")
+ try:
+ line = iter_lines.next()
+ if not line.startswith("+++ "):
+ raise PatchSyntax("No mod name")
+ else:
+ mod_name = line[4:].rstrip("\n")
+ except StopIteration:
+ raise MalformedPatchHeader("No mod line", "")
+ return (orig_name, mod_name)
+
+def parse_range(textrange):
+ """Parse a patch range, handling the "1" special-case
+
+ :param textrange: The text to parse
+ :type textrange: str
+ :return: the position and range, as a tuple
+ :rtype: (int, int)
+ """
+ tmp = textrange.split(',')
+ if len(tmp) == 1:
+ pos = tmp[0]
+ range = "1"
+ else:
+ (pos, range) = tmp
+ pos = int(pos)
+ range = int(range)
+ return (pos, range)
+
+
+def hunk_from_header(line):
+ if not line.startswith("@@") or not line.endswith("@@\n") \
+ or not len(line) > 4:
+ raise MalformedHunkHeader("Does not start and end with @@.", line)
+ try:
+ (orig, mod) = line[3:-4].split(" ")
+ except Exception, e:
+ raise MalformedHunkHeader(str(e), line)
+ if not orig.startswith('-') or not mod.startswith('+'):
+ raise MalformedHunkHeader("Positions don't start with + or -.", line)
+ try:
+ (orig_pos, orig_range) = parse_range(orig[1:])
+ (mod_pos, mod_range) = parse_range(mod[1:])
+ except Exception, e:
+ raise MalformedHunkHeader(str(e), line)
+ if mod_range < 0 or orig_range < 0:
+ raise MalformedHunkHeader("Hunk range is negative", line)
+ return Hunk(orig_pos, orig_range, mod_pos, mod_range)
+
+
+class HunkLine:
+ def __init__(self, contents):
+ self.contents = contents
+
+ def get_str(self, leadchar):
+ if self.contents == "\n" and leadchar == " " and False:
+ return "\n"
+ if not self.contents.endswith('\n'):
+ terminator = '\n' + NO_NL
+ else:
+ terminator = ''
+ return leadchar + self.contents + terminator
+
+
+class ContextLine(HunkLine):
+ def __init__(self, contents):
+ HunkLine.__init__(self, contents)
+
+ def __str__(self):
+ return self.get_str(" ")
+
+
+class InsertLine(HunkLine):
+ def __init__(self, contents):
+ HunkLine.__init__(self, contents)
+
+ def __str__(self):
+ return self.get_str("+")
+
+
+class RemoveLine(HunkLine):
+ def __init__(self, contents):
+ HunkLine.__init__(self, contents)
+
+ def __str__(self):
+ return self.get_str("-")
+
+NO_NL = '\\ No newline at end of file\n'
+__pychecker__="no-returnvalues"
+
+def parse_line(line):
+ if line.startswith("\n"):
+ return ContextLine(line)
+ elif line.startswith(" "):
+ return ContextLine(line[1:])
+ elif line.startswith("+"):
+ return InsertLine(line[1:])
+ elif line.startswith("-"):
+ return RemoveLine(line[1:])
+ elif line == NO_NL:
+ return NO_NL
+ else:
+ raise MalformedLine("Unknown line type", line)
+__pychecker__=""
+
+
+class Hunk:
+ def __init__(self, orig_pos, orig_range, mod_pos, mod_range):
+ self.orig_pos = orig_pos
+ self.orig_range = orig_range
+ self.mod_pos = mod_pos
+ self.mod_range = mod_range
+ self.lines = []
+
+ def get_header(self):
+ return "@@ -%s +%s @@\n" % (self.range_str(self.orig_pos,
+ self.orig_range),
+ self.range_str(self.mod_pos,
+ self.mod_range))
+
+ def range_str(self, pos, range):
+ """Return a file range, special-casing for 1-line files.
+
+ :param pos: The position in the file
+ :type pos: int
+ :range: The range in the file
+ :type range: int
+ :return: a string in the format 1,4 except when range == pos == 1
+ """
+ if range == 1:
+ return "%i" % pos
+ else:
+ return "%i,%i" % (pos, range)
+
+ def __str__(self):
+ lines = [self.get_header()]
+ for line in self.lines:
+ lines.append(str(line))
+ return "".join(lines)
+
+ def shift_to_mod(self, pos):
+ if pos < self.orig_pos-1:
+ return 0
+ elif pos > self.orig_pos+self.orig_range:
+ return self.mod_range - self.orig_range
+ else:
+ return self.shift_to_mod_lines(pos)
+
+ def shift_to_mod_lines(self, pos):
+ assert (pos >= self.orig_pos-1 and pos <= self.orig_pos+self.orig_range)
+ position = self.orig_pos-1
+ shift = 0
+ for line in self.lines:
+ if isinstance(line, InsertLine):
+ shift += 1
+ elif isinstance(line, RemoveLine):
+ if position == pos:
+ return None
+ shift -= 1
+ position += 1
+ elif isinstance(line, ContextLine):
+ position += 1
+ if position > pos:
+ break
+ return shift
+
+def iter_hunks(iter_lines):
+ hunk = None
+ for line in iter_lines:
+ if line == "\n":
+ if hunk is not None:
+ yield hunk
+ hunk = None
+ continue
+ if hunk is not None:
+ yield hunk
+ hunk = hunk_from_header(line)
+ orig_size = 0
+ mod_size = 0
+ while orig_size < hunk.orig_range or mod_size < hunk.mod_range:
+ hunk_line = parse_line(iter_lines.next())
+ hunk.lines.append(hunk_line)
+ if isinstance(hunk_line, (RemoveLine, ContextLine)):
+ orig_size += 1
+ if isinstance(hunk_line, (InsertLine, ContextLine)):
+ mod_size += 1
+ if hunk is not None:
+ yield hunk
+
+class Patch:
+ def __init__(self, oldname, newname):
+ self.oldname = oldname
+ self.newname = newname
+ self.hunks = []
+
+ def __str__(self):
+ ret = self.get_header()
+ ret += "".join([str(h) for h in self.hunks])
+ return ret
+
+ def get_header(self):
+ return "--- %s\n+++ %s\n" % (self.oldname, self.newname)
+
+ def stats_str(self):
+ """Return a string of patch statistics"""
+ removes = 0
+ inserts = 0
+ for hunk in self.hunks:
+ for line in hunk.lines:
+ if isinstance(line, InsertLine):
+ inserts+=1;
+ elif isinstance(line, RemoveLine):
+ removes+=1;
+ return "%i inserts, %i removes in %i hunks" % \
+ (inserts, removes, len(self.hunks))
+
+ def pos_in_mod(self, position):
+ newpos = position
+ for hunk in self.hunks:
+ shift = hunk.shift_to_mod(position)
+ if shift is None:
+ return None
+ newpos += shift
+ return newpos
+
+ def iter_inserted(self):
+ """Iteraties through inserted lines
+
+ :return: Pair of line number, line
+ :rtype: iterator of (int, InsertLine)
+ """
+ for hunk in self.hunks:
+ pos = hunk.mod_pos - 1;
+ for line in hunk.lines:
+ if isinstance(line, InsertLine):
+ yield (pos, line)
+ pos += 1
+ if isinstance(line, ContextLine):
+ pos += 1
+
+def parse_patch(iter_lines):
+ (orig_name, mod_name) = get_patch_names(iter_lines)
+ patch = Patch(orig_name, mod_name)
+ for hunk in iter_hunks(iter_lines):
+ patch.hunks.append(hunk)
+ return patch
+
+
+def iter_file_patch(iter_lines):
+ saved_lines = []
+ for line in iter_lines:
+ if line.startswith('=== '):
+ continue
+ elif line.startswith('--- '):
+ if len(saved_lines) > 0:
+ yield saved_lines
+ saved_lines = []
+ saved_lines.append(line)
+ if len(saved_lines) > 0:
+ yield saved_lines
+
+
+def iter_lines_handle_nl(iter_lines):
+ """
+ Iterates through lines, ensuring that lines that originally had no
+ terminating \n are produced without one. This transformation may be
+ applied at any point up until hunk line parsing, and is safe to apply
+ repeatedly.
+ """
+ last_line = None
+ for line in iter_lines:
+ if line == NO_NL:
+ assert last_line.endswith('\n')
+ last_line = last_line[:-1]
+ line = None
+ if last_line is not None:
+ yield last_line
+ last_line = line
+ if last_line is not None:
+ yield last_line
+
+
+def parse_patches(iter_lines):
+ iter_lines = iter_lines_handle_nl(iter_lines)
+ return [parse_patch(f.__iter__()) for f in iter_file_patch(iter_lines)]
+
+
+def difference_index(atext, btext):
+ """Find the indext of the first character that differs betweeen two texts
+
+ :param atext: The first text
+ :type atext: str
+ :param btext: The second text
+ :type str: str
+ :return: The index, or None if there are no differences within the range
+ :rtype: int or NoneType
+ """
+ length = len(atext)
+ if len(btext) < length:
+ length = len(btext)
+ for i in range(length):
+ if atext[i] != btext[i]:
+ return i;
+ return None
+
+class PatchConflict(Exception):
+ def __init__(self, line_no, orig_line, patch_line):
+ orig = orig_line.rstrip('\n')
+ patch = str(patch_line).rstrip('\n')
+ msg = 'Text contents mismatch at line %d. Original has "%s",'\
+ ' but patch says it should be "%s"' % (line_no, orig, patch)
+ Exception.__init__(self, msg)
+
+
+def iter_patched(orig_lines, patch_lines):
+ """Iterate through a series of lines with a patch applied.
+ This handles a single file, and does exact, not fuzzy patching.
+ """
+ if orig_lines is not None:
+ orig_lines = orig_lines.__iter__()
+ seen_patch = []
+ patch_lines = iter_lines_handle_nl(patch_lines.__iter__())
+ get_patch_names(patch_lines)
+ line_no = 1
+ for hunk in iter_hunks(patch_lines):
+ while line_no < hunk.orig_pos:
+ orig_line = orig_lines.next()
+ yield orig_line
+ line_no += 1
+ for hunk_line in hunk.lines:
+ seen_patch.append(str(hunk_line))
+ if isinstance(hunk_line, InsertLine):
+ yield hunk_line.contents
+ elif isinstance(hunk_line, (ContextLine, RemoveLine)):
+ orig_line = orig_lines.next()
+ if orig_line != hunk_line.contents:
+ raise PatchConflict(line_no, orig_line, "".join(seen_patch))
+ if isinstance(hunk_line, ContextLine):
+ yield orig_line
+ else:
+ assert isinstance(hunk_line, RemoveLine)
+ line_no += 1
+
+import unittest
+import os.path
+class PatchesTester(unittest.TestCase):
+ def datafile(self, filename):
+ data_path = os.path.join(os.path.dirname(__file__), "testdata",
+ filename)
+ return file(data_path, "rb")
+
+ def testValidPatchHeader(self):
+ """Parse a valid patch header"""
+ lines = "--- orig/commands.py\n+++ mod/dommands.py\n".split('\n')
+ (orig, mod) = get_patch_names(lines.__iter__())
+ assert(orig == "orig/commands.py")
+ assert(mod == "mod/dommands.py")
+
+ def testInvalidPatchHeader(self):
+ """Parse an invalid patch header"""
+ lines = "-- orig/commands.py\n+++ mod/dommands.py".split('\n')
+ self.assertRaises(MalformedPatchHeader, get_patch_names,
+ lines.__iter__())
+
+ def testValidHunkHeader(self):
+ """Parse a valid hunk header"""
+ header = "@@ -34,11 +50,6 @@\n"
+ hunk = hunk_from_header(header);
+ assert (hunk.orig_pos == 34)
+ assert (hunk.orig_range == 11)
+ assert (hunk.mod_pos == 50)
+ assert (hunk.mod_range == 6)
+ assert (str(hunk) == header)
+
+ def testValidHunkHeader2(self):
+ """Parse a tricky, valid hunk header"""
+ header = "@@ -1 +0,0 @@\n"
+ hunk = hunk_from_header(header);
+ assert (hunk.orig_pos == 1)
+ assert (hunk.orig_range == 1)
+ assert (hunk.mod_pos == 0)
+ assert (hunk.mod_range == 0)
+ assert (str(hunk) == header)
+
+ def makeMalformed(self, header):
+ self.assertRaises(MalformedHunkHeader, hunk_from_header, header)
+
+ def testInvalidHeader(self):
+ """Parse an invalid hunk header"""
+ self.makeMalformed(" -34,11 +50,6 \n")
+ self.makeMalformed("@@ +50,6 -34,11 @@\n")
+ self.makeMalformed("@@ -34,11 +50,6 @@")
+ self.makeMalformed("@@ -34.5,11 +50,6 @@\n")
+ self.makeMalformed("@@-34,11 +50,6@@\n")
+ self.makeMalformed("@@ 34,11 50,6 @@\n")
+ self.makeMalformed("@@ -34,11 @@\n")
+ self.makeMalformed("@@ -34,11 +50,6.5 @@\n")
+ self.makeMalformed("@@ -34,11 +50,-6 @@\n")
+
+ def lineThing(self,text, type):
+ line = parse_line(text)
+ assert(isinstance(line, type))
+ assert(str(line)==text)
+
+ def makeMalformedLine(self, text):
+ self.assertRaises(MalformedLine, parse_line, text)
+
+ def testValidLine(self):
+ """Parse a valid hunk line"""
+ self.lineThing(" hello\n", ContextLine)
+ self.lineThing("+hello\n", InsertLine)
+ self.lineThing("-hello\n", RemoveLine)
+
+ def testMalformedLine(self):
+ """Parse invalid valid hunk lines"""
+ self.makeMalformedLine("hello\n")
+
+ def compare_parsed(self, patchtext):
+ lines = patchtext.splitlines(True)
+ patch = parse_patch(lines.__iter__())
+ pstr = str(patch)
+ i = difference_index(patchtext, pstr)
+ if i is not None:
+ print "%i: \"%s\" != \"%s\"" % (i, patchtext[i], pstr[i])
+ self.assertEqual (patchtext, str(patch))
+
+ def testAll(self):
+ """Test parsing a whole patch"""
+ patchtext = """--- orig/commands.py
++++ mod/commands.py
+@@ -1337,7 +1337,8 @@
+
+ def set_title(self, command=None):
+ try:
+- version = self.tree.tree_version.nonarch
++ version = pylon.alias_or_version(self.tree.tree_version, self.tree,
++ full=False)
+ except:
+ version = "[no version]"
+ if command is None:
+@@ -1983,7 +1984,11 @@
+ version)
+ if len(new_merges) > 0:
+ if cmdutil.prompt("Log for merge"):
+- mergestuff = cmdutil.log_for_merge(tree, comp_version)
++ if cmdutil.prompt("changelog for merge"):
++ mergestuff = "Patches applied:\\n"
++ mergestuff += pylon.changelog_for_merge(new_merges)
++ else:
++ mergestuff = cmdutil.log_for_merge(tree, comp_version)
+ log.description += mergestuff
+ log.save()
+ try:
+"""
+ self.compare_parsed(patchtext)
+
+ def testInit(self):
+ """Handle patches missing half the position, range tuple"""
+ patchtext = \
+"""--- orig/__init__.py
++++ mod/__init__.py
+@@ -1 +1,2 @@
+ __docformat__ = "restructuredtext en"
++__doc__ = An alternate Arch commandline interface
+"""
+ self.compare_parsed(patchtext)
+
+
+
+ def testLineLookup(self):
+ import sys
+ """Make sure we can accurately look up mod line from orig"""
+ patch = parse_patch(self.datafile("diff"))
+ orig = list(self.datafile("orig"))
+ mod = list(self.datafile("mod"))
+ removals = []
+ for i in range(len(orig)):
+ mod_pos = patch.pos_in_mod(i)
+ if mod_pos is None:
+ removals.append(orig[i])
+ continue
+ assert(mod[mod_pos]==orig[i])
+ rem_iter = removals.__iter__()
+ for hunk in patch.hunks:
+ for line in hunk.lines:
+ if isinstance(line, RemoveLine):
+ next = rem_iter.next()
+ if line.contents != next:
+ sys.stdout.write(" orig:%spatch:%s" % (next,
+ line.contents))
+ assert(line.contents == next)
+ self.assertRaises(StopIteration, rem_iter.next)
+
+ def testFirstLineRenumber(self):
+ """Make sure we handle lines at the beginning of the hunk"""
+ patch = parse_patch(self.datafile("insert_top.patch"))
+ assert (patch.pos_in_mod(0)==1)
+
+def test():
+ patchesTestSuite = unittest.makeSuite(PatchesTester,'test')
+ runner = unittest.TextTestRunner(verbosity=0)
+ return runner.run(patchesTestSuite)
+
+
+if __name__ == "__main__":
+ test()
+# arch-tag: d1541a25-eac5-4de9-a476-08a7cecd5683
diff --git a/bzrlib/tests/test_patches_data/orig-7 b/bzrlib/tests/test_patches_data/orig-7
new file mode 100644
index 0000000..098091c
--- /dev/null
+++ b/bzrlib/tests/test_patches_data/orig-7
@@ -0,0 +1 @@
+No terminating newline \ No newline at end of file
diff --git a/bzrlib/tests/test_patches_data/patchtext.patch b/bzrlib/tests/test_patches_data/patchtext.patch
new file mode 100644
index 0000000..197c843
--- /dev/null
+++ b/bzrlib/tests/test_patches_data/patchtext.patch
@@ -0,0 +1,25 @@
+--- orig/commands.py
++++ mod/commands.py
+@@ -1337,7 +1337,8 @@
+
+ def set_title(self, command=None):
+ try:
+- version = self.tree.tree_version.nonarch
++ version = pylon.alias_or_version(self.tree.tree_version, self.tree,
++ full=False)
+ except:
+ version = "[no version]"
+ if command is None:
+@@ -1983,7 +1984,11 @@
+ version)
+ if len(new_merges) > 0:
+ if cmdutil.prompt("Log for merge"):
+- mergestuff = cmdutil.log_for_merge(tree, comp_version)
++ if cmdutil.prompt("changelog for merge"):
++ mergestuff = "Patches applied:\\n"
++ mergestuff += pylon.changelog_for_merge(new_merges)
++ else:
++ mergestuff = cmdutil.log_for_merge(tree, comp_version)
+ log.description += mergestuff
+ log.save()
+ try:
diff --git a/bzrlib/tests/test_permissions.py b/bzrlib/tests/test_permissions.py
new file mode 100644
index 0000000..3bed258
--- /dev/null
+++ b/bzrlib/tests/test_permissions.py
@@ -0,0 +1,272 @@
+# Copyright (C) 2005-2011 Canonical Ltd
+# -*- coding: utf-8 -*-
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+
+"""Tests for bzr setting permissions.
+
+Files which are created underneath .bzr/ should inherit its permissions.
+So if the directory is group writable, the files and subdirs should be as well.
+
+In the future, when we have Repository/Branch/Checkout information, the
+permissions should be inherited individually, rather than all be the same.
+"""
+
+# TODO: jam 20051215 There are no tests for ftp yet, because we have no ftp server
+# TODO: jam 20051215 Currently the default behavior for 'bzr branch' is just
+# defined by the local umask. This isn't terrible, is it
+# the truly desired behavior?
+
+import os
+import sys
+
+from bzrlib import urlutils
+from bzrlib.branch import Branch
+from bzrlib.controldir import ControlDir
+from bzrlib.tests import TestCaseWithTransport, TestSkipped
+from bzrlib.tests.test_sftp_transport import TestCaseWithSFTPServer
+from bzrlib.workingtree import WorkingTree
+
+
+def chmod_r(base, file_mode, dir_mode):
+ """Recursively chmod from a base directory"""
+ os.chmod(base, dir_mode)
+ for root, dirs, files in os.walk(base):
+ for d in dirs:
+ p = os.path.join(root, d)
+ os.chmod(p, dir_mode)
+ for f in files:
+ p = os.path.join(root, f)
+ os.chmod(p, file_mode)
+
+
+def check_mode_r(test, base, file_mode, dir_mode, include_base=True):
+ """Check that all permissions match
+
+ :param test: The TestCase being run
+ :param base: The path to the root directory to check
+ :param file_mode: The mode for all files
+ :param dir_mode: The mode for all directories
+ :param include_base: If false, only check the subdirectories
+ """
+ t = test.get_transport()
+ if include_base:
+ test.assertTransportMode(t, base, dir_mode)
+ for root, dirs, files in os.walk(base):
+ for d in dirs:
+ p = '/'.join([urlutils.quote(x) for x in root.split('/\\') + [d]])
+ test.assertTransportMode(t, p, dir_mode)
+ for f in files:
+ p = os.path.join(root, f)
+ p = '/'.join([urlutils.quote(x) for x in root.split('/\\') + [f]])
+ test.assertTransportMode(t, p, file_mode)
+
+
+class TestPermissions(TestCaseWithTransport):
+
+ def test_new_files(self):
+ if sys.platform == 'win32':
+ raise TestSkipped('chmod has no effect on win32')
+
+ t = self.make_branch_and_tree('.')
+ b = t.branch
+ with open('a', 'wb') as f: f.write('foo\n')
+ # ensure check_mode_r works with capital-letter file-ids like TREE_ROOT
+ t.add('a', 'CAPS-ID')
+ t.commit('foo')
+
+ chmod_r('.bzr', 0644, 0755)
+ check_mode_r(self, '.bzr', 0644, 0755)
+
+ # although we are modifying the filesystem
+ # underneath the objects, they are not locked, and thus it must
+ # be safe for most operations. But here we want to observe a
+ # mode change in the control bits, which current do not refresh
+ # when a new lock is taken out.
+ t = WorkingTree.open('.')
+ b = t.branch
+ self.assertEqualMode(0755, b.control_files._dir_mode)
+ self.assertEqualMode(0644, b.control_files._file_mode)
+ self.assertEqualMode(0755, b.bzrdir._get_dir_mode())
+ self.assertEqualMode(0644, b.bzrdir._get_file_mode())
+
+ # Modifying a file shouldn't break the permissions
+ with open('a', 'wb') as f: f.write('foo2\n')
+ t.commit('foo2')
+ # The mode should be maintained after commit
+ check_mode_r(self, '.bzr', 0644, 0755)
+
+ # Adding a new file should maintain the permissions
+ with open('b', 'wb') as f: f.write('new b\n')
+ t.add('b')
+ t.commit('new b')
+ check_mode_r(self, '.bzr', 0644, 0755)
+
+ # Recursively update the modes of all files
+ chmod_r('.bzr', 0664, 0775)
+ check_mode_r(self, '.bzr', 0664, 0775)
+ t = WorkingTree.open('.')
+ b = t.branch
+ self.assertEqualMode(0775, b.control_files._dir_mode)
+ self.assertEqualMode(0664, b.control_files._file_mode)
+ self.assertEqualMode(0775, b.bzrdir._get_dir_mode())
+ self.assertEqualMode(0664, b.bzrdir._get_file_mode())
+
+ with open('a', 'wb') as f: f.write('foo3\n')
+ t.commit('foo3')
+ check_mode_r(self, '.bzr', 0664, 0775)
+
+ with open('c', 'wb') as f: f.write('new c\n')
+ t.add('c')
+ t.commit('new c')
+ check_mode_r(self, '.bzr', 0664, 0775)
+
+ def test_new_files_group_sticky_bit(self):
+ if sys.platform == 'win32':
+ raise TestSkipped('chmod has no effect on win32')
+ elif sys.platform == 'darwin' or 'freebsd' in sys.platform:
+ # FreeBSD-based platforms create temp dirs with the 'wheel' group,
+ # which users are not likely to be in, and this prevents us from
+ # setting the sgid bit
+ os.chown(self.test_dir, os.getuid(), os.getgid())
+
+ t = self.make_branch_and_tree('.')
+ b = t.branch
+
+ # Test the group sticky bit
+ # Recursively update the modes of all files
+ chmod_r('.bzr', 0664, 02775)
+ check_mode_r(self, '.bzr', 0664, 02775)
+ t = WorkingTree.open('.')
+ b = t.branch
+ self.assertEqualMode(02775, b.control_files._dir_mode)
+ self.assertEqualMode(0664, b.control_files._file_mode)
+ self.assertEqualMode(02775, b.bzrdir._get_dir_mode())
+ self.assertEqualMode(0664, b.bzrdir._get_file_mode())
+
+ with open('a', 'wb') as f: f.write('foo4\n')
+ t.commit('foo4')
+ check_mode_r(self, '.bzr', 0664, 02775)
+
+ with open('d', 'wb') as f: f.write('new d\n')
+ t.add('d')
+ t.commit('new d')
+ check_mode_r(self, '.bzr', 0664, 02775)
+
+
+class TestSftpPermissions(TestCaseWithSFTPServer):
+
+ def test_new_files(self):
+ if sys.platform == 'win32':
+ raise TestSkipped('chmod has no effect on win32')
+ # Though it would be nice to test that SFTP to a server
+ # which does support chmod has the right effect
+
+ # bodge around for stubsftpserver not letting use connect
+ # more than once
+ _t = self.get_transport()
+
+ os.mkdir('local')
+ t_local = self.make_branch_and_tree('local')
+ b_local = t_local.branch
+ with open('local/a', 'wb') as f: f.write('foo\n')
+ t_local.add('a')
+ t_local.commit('foo')
+
+ # Delete them because we are modifying the filesystem underneath them
+ chmod_r('local/.bzr', 0644, 0755)
+ check_mode_r(self, 'local/.bzr', 0644, 0755)
+
+ t = WorkingTree.open('local')
+ b_local = t.branch
+ self.assertEqualMode(0755, b_local.control_files._dir_mode)
+ self.assertEqualMode(0644, b_local.control_files._file_mode)
+ self.assertEqualMode(0755, b_local.bzrdir._get_dir_mode())
+ self.assertEqualMode(0644, b_local.bzrdir._get_file_mode())
+
+ os.mkdir('sftp')
+ sftp_url = self.get_url('sftp')
+ b_sftp = ControlDir.create_branch_and_repo(sftp_url)
+
+ b_sftp.pull(b_local)
+ del b_sftp
+ chmod_r('sftp/.bzr', 0644, 0755)
+ check_mode_r(self, 'sftp/.bzr', 0644, 0755)
+
+ b_sftp = Branch.open(sftp_url)
+ self.assertEqualMode(0755, b_sftp.control_files._dir_mode)
+ self.assertEqualMode(0644, b_sftp.control_files._file_mode)
+ self.assertEqualMode(0755, b_sftp.bzrdir._get_dir_mode())
+ self.assertEqualMode(0644, b_sftp.bzrdir._get_file_mode())
+
+ with open('local/a', 'wb') as f: f.write('foo2\n')
+ t_local.commit('foo2')
+ b_sftp.pull(b_local)
+ # The mode should be maintained after commit
+ check_mode_r(self, 'sftp/.bzr', 0644, 0755)
+
+ with open('local/b', 'wb') as f: f.write('new b\n')
+ t_local.add('b')
+ t_local.commit('new b')
+ b_sftp.pull(b_local)
+ check_mode_r(self, 'sftp/.bzr', 0644, 0755)
+
+ del b_sftp
+ # Recursively update the modes of all files
+ chmod_r('sftp/.bzr', 0664, 0775)
+ check_mode_r(self, 'sftp/.bzr', 0664, 0775)
+
+ b_sftp = Branch.open(sftp_url)
+ self.assertEqualMode(0775, b_sftp.control_files._dir_mode)
+ self.assertEqualMode(0664, b_sftp.control_files._file_mode)
+ self.assertEqualMode(0775, b_sftp.bzrdir._get_dir_mode())
+ self.assertEqualMode(0664, b_sftp.bzrdir._get_file_mode())
+
+ with open('local/a', 'wb') as f: f.write('foo3\n')
+ t_local.commit('foo3')
+ b_sftp.pull(b_local)
+ check_mode_r(self, 'sftp/.bzr', 0664, 0775)
+
+ with open('local/c', 'wb') as f: f.write('new c\n')
+ t_local.add('c')
+ t_local.commit('new c')
+ b_sftp.pull(b_local)
+ check_mode_r(self, 'sftp/.bzr', 0664, 0775)
+
+ def test_sftp_server_modes(self):
+ if sys.platform == 'win32':
+ raise TestSkipped('chmod has no effect on win32')
+
+ umask = 0022
+ original_umask = os.umask(umask)
+
+ try:
+ t = self.get_transport()
+ # Direct access should be masked by umask
+ t._sftp_open_exclusive('a', mode=0666).write('foo\n')
+ self.assertTransportMode(t, 'a', 0666 &~umask)
+
+ # but Transport overrides umask
+ t.put_bytes('b', 'txt', mode=0666)
+ self.assertTransportMode(t, 'b', 0666)
+
+ t._get_sftp().mkdir('c', mode=0777)
+ self.assertTransportMode(t, 'c', 0777 &~umask)
+
+ t.mkdir('d', mode=0777)
+ self.assertTransportMode(t, 'd', 0777)
+ finally:
+ os.umask(original_umask)
diff --git a/bzrlib/tests/test_plugins.py b/bzrlib/tests/test_plugins.py
new file mode 100644
index 0000000..3249fe6
--- /dev/null
+++ b/bzrlib/tests/test_plugins.py
@@ -0,0 +1,985 @@
+# Copyright (C) 2005-2011 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""Tests for plugins"""
+
+# XXX: There are no plugin tests at the moment because the plugin module
+# affects the global state of the process. See bzrlib/plugins.py for more
+# comments.
+
+from cStringIO import StringIO
+import logging
+import os
+import sys
+
+import bzrlib
+from bzrlib import (
+ errors,
+ osutils,
+ plugin,
+ plugins,
+ tests,
+ trace,
+ )
+
+
+# TODO: Write a test for plugin decoration of commands.
+
+class BaseTestPlugins(tests.TestCaseInTempDir):
+
+ def create_plugin(self, name, source=None, dir='.', file_name=None):
+ if source is None:
+ source = '''\
+"""This is the doc for %s"""
+''' % (name)
+ if file_name is None:
+ file_name = name + '.py'
+ # 'source' must not fail to load
+ path = osutils.pathjoin(dir, file_name)
+ f = open(path, 'w')
+ self.addCleanup(os.unlink, path)
+ try:
+ f.write(source + '\n')
+ finally:
+ f.close()
+
+ def create_plugin_package(self, name, dir=None, source=None):
+ if dir is None:
+ dir = name
+ if source is None:
+ source = '''\
+"""This is the doc for %s"""
+dir_source = '%s'
+''' % (name, dir)
+ os.makedirs(dir)
+ def cleanup():
+ # Workaround lazy import random? madness
+ osutils.rmtree(dir)
+ self.addCleanup(cleanup)
+ self.create_plugin(name, source, dir,
+ file_name='__init__.py')
+
+ def _unregister_plugin(self, name):
+ """Remove the plugin from sys.modules and the bzrlib namespace."""
+ py_name = 'bzrlib.plugins.%s' % name
+ if py_name in sys.modules:
+ del sys.modules[py_name]
+ if getattr(bzrlib.plugins, name, None) is not None:
+ delattr(bzrlib.plugins, name)
+
+ def _unregister_plugin_submodule(self, plugin_name, submodule_name):
+ """Remove the submodule from sys.modules and the bzrlib namespace."""
+ py_name = 'bzrlib.plugins.%s.%s' % (plugin_name, submodule_name)
+ if py_name in sys.modules:
+ del sys.modules[py_name]
+ plugin = getattr(bzrlib.plugins, plugin_name, None)
+ if plugin is not None:
+ if getattr(plugin, submodule_name, None) is not None:
+ delattr(plugin, submodule_name)
+
+ def assertPluginUnknown(self, name):
+ self.assertFalse(getattr(bzrlib.plugins, name, None) is not None)
+ self.assertFalse('bzrlib.plugins.%s' % name in sys.modules)
+
+ def assertPluginKnown(self, name):
+ self.assertTrue(getattr(bzrlib.plugins, name, None) is not None)
+ self.assertTrue('bzrlib.plugins.%s' % name in sys.modules)
+
+
+class TestLoadingPlugins(BaseTestPlugins):
+
+ activeattributes = {}
+
+ def test_plugins_with_the_same_name_are_not_loaded(self):
+ # This test tests that having two plugins in different directories does
+ # not result in both being loaded when they have the same name. get a
+ # file name we can use which is also a valid attribute for accessing in
+ # activeattributes. - we cannot give import parameters.
+ tempattribute = "0"
+ self.assertFalse(tempattribute in self.activeattributes)
+ # set a place for the plugins to record their loading, and at the same
+ # time validate that the location the plugins should record to is
+ # valid and correct.
+ self.__class__.activeattributes [tempattribute] = []
+ self.assertTrue(tempattribute in self.activeattributes)
+ # create two plugin directories
+ os.mkdir('first')
+ os.mkdir('second')
+ # write a plugin that will record when its loaded in the
+ # tempattribute list.
+ template = ("from bzrlib.tests.test_plugins import TestLoadingPlugins\n"
+ "TestLoadingPlugins.activeattributes[%r].append('%s')\n")
+
+ outfile = open(os.path.join('first', 'plugin.py'), 'w')
+ try:
+ outfile.write(template % (tempattribute, 'first'))
+ outfile.write('\n')
+ finally:
+ outfile.close()
+
+ outfile = open(os.path.join('second', 'plugin.py'), 'w')
+ try:
+ outfile.write(template % (tempattribute, 'second'))
+ outfile.write('\n')
+ finally:
+ outfile.close()
+
+ try:
+ bzrlib.plugin.load_from_path(['first', 'second'])
+ self.assertEqual(['first'], self.activeattributes[tempattribute])
+ finally:
+ # remove the plugin 'plugin'
+ del self.activeattributes[tempattribute]
+ self._unregister_plugin('plugin')
+ self.assertPluginUnknown('plugin')
+
+ def test_plugins_from_different_dirs_can_demand_load(self):
+ self.assertFalse('bzrlib.plugins.pluginone' in sys.modules)
+ self.assertFalse('bzrlib.plugins.plugintwo' in sys.modules)
+ # This test tests that having two plugins in different
+ # directories with different names allows them both to be loaded, when
+ # we do a direct import statement.
+ # Determine a file name we can use which is also a valid attribute
+ # for accessing in activeattributes. - we cannot give import parameters.
+ tempattribute = "different-dirs"
+ self.assertFalse(tempattribute in self.activeattributes)
+ # set a place for the plugins to record their loading, and at the same
+ # time validate that the location the plugins should record to is
+ # valid and correct.
+ bzrlib.tests.test_plugins.TestLoadingPlugins.activeattributes \
+ [tempattribute] = []
+ self.assertTrue(tempattribute in self.activeattributes)
+ # create two plugin directories
+ os.mkdir('first')
+ os.mkdir('second')
+ # write plugins that will record when they are loaded in the
+ # tempattribute list.
+ template = ("from bzrlib.tests.test_plugins import TestLoadingPlugins\n"
+ "TestLoadingPlugins.activeattributes[%r].append('%s')\n")
+
+ outfile = open(os.path.join('first', 'pluginone.py'), 'w')
+ try:
+ outfile.write(template % (tempattribute, 'first'))
+ outfile.write('\n')
+ finally:
+ outfile.close()
+
+ outfile = open(os.path.join('second', 'plugintwo.py'), 'w')
+ try:
+ outfile.write(template % (tempattribute, 'second'))
+ outfile.write('\n')
+ finally:
+ outfile.close()
+
+ oldpath = bzrlib.plugins.__path__
+ try:
+ self.assertFalse('bzrlib.plugins.pluginone' in sys.modules)
+ self.assertFalse('bzrlib.plugins.plugintwo' in sys.modules)
+ bzrlib.plugins.__path__ = ['first', 'second']
+ exec "import bzrlib.plugins.pluginone"
+ self.assertEqual(['first'], self.activeattributes[tempattribute])
+ exec "import bzrlib.plugins.plugintwo"
+ self.assertEqual(['first', 'second'],
+ self.activeattributes[tempattribute])
+ finally:
+ # remove the plugin 'plugin'
+ del self.activeattributes[tempattribute]
+ self._unregister_plugin('pluginone')
+ self._unregister_plugin('plugintwo')
+ self.assertPluginUnknown('pluginone')
+ self.assertPluginUnknown('plugintwo')
+
+ def test_plugins_can_load_from_directory_with_trailing_slash(self):
+ # This test tests that a plugin can load from a directory when the
+ # directory in the path has a trailing slash.
+ # check the plugin is not loaded already
+ self.assertPluginUnknown('ts_plugin')
+ tempattribute = "trailing-slash"
+ self.assertFalse(tempattribute in self.activeattributes)
+ # set a place for the plugin to record its loading, and at the same
+ # time validate that the location the plugin should record to is
+ # valid and correct.
+ bzrlib.tests.test_plugins.TestLoadingPlugins.activeattributes \
+ [tempattribute] = []
+ self.assertTrue(tempattribute in self.activeattributes)
+ # create a directory for the plugin
+ os.mkdir('plugin_test')
+ # write a plugin that will record when its loaded in the
+ # tempattribute list.
+ template = ("from bzrlib.tests.test_plugins import TestLoadingPlugins\n"
+ "TestLoadingPlugins.activeattributes[%r].append('%s')\n")
+
+ outfile = open(os.path.join('plugin_test', 'ts_plugin.py'), 'w')
+ try:
+ outfile.write(template % (tempattribute, 'plugin'))
+ outfile.write('\n')
+ finally:
+ outfile.close()
+
+ try:
+ bzrlib.plugin.load_from_path(['plugin_test'+os.sep])
+ self.assertEqual(['plugin'], self.activeattributes[tempattribute])
+ finally:
+ del self.activeattributes[tempattribute]
+ self._unregister_plugin('ts_plugin')
+ self.assertPluginUnknown('ts_plugin')
+
+ def load_and_capture(self, name):
+ """Load plugins from '.' capturing the output.
+
+ :param name: The name of the plugin.
+ :return: A string with the log from the plugin loading call.
+ """
+ # Capture output
+ stream = StringIO()
+ try:
+ handler = logging.StreamHandler(stream)
+ log = logging.getLogger('bzr')
+ log.addHandler(handler)
+ try:
+ try:
+ bzrlib.plugin.load_from_path(['.'])
+ finally:
+ if 'bzrlib.plugins.%s' % name in sys.modules:
+ del sys.modules['bzrlib.plugins.%s' % name]
+ if getattr(bzrlib.plugins, name, None):
+ delattr(bzrlib.plugins, name)
+ finally:
+ # Stop capturing output
+ handler.flush()
+ handler.close()
+ log.removeHandler(handler)
+ return stream.getvalue()
+ finally:
+ stream.close()
+
+ def test_plugin_with_bad_api_version_reports(self):
+ """Try loading a plugin that requests an unsupported api.
+
+ Observe that it records the problem but doesn't complain on stderr.
+
+ See https://bugs.launchpad.net/bzr/+bug/704195
+ """
+ self.overrideAttr(plugin, 'plugin_warnings', {})
+ name = 'wants100.py'
+ f = file(name, 'w')
+ try:
+ f.write("import bzrlib.api\n"
+ "bzrlib.api.require_any_api(bzrlib, [(1, 0, 0)])\n")
+ finally:
+ f.close()
+ log = self.load_and_capture(name)
+ self.assertNotContainsRe(log,
+ r"It requested API version")
+ self.assertEquals(
+ ['wants100'],
+ plugin.plugin_warnings.keys())
+ self.assertContainsRe(
+ plugin.plugin_warnings['wants100'][0],
+ r"It requested API version")
+
+ def test_plugin_with_bad_name_does_not_load(self):
+ # The file name here invalid for a python module.
+ name = 'bzr-bad plugin-name..py'
+ file(name, 'w').close()
+ log = self.load_and_capture(name)
+ self.assertContainsRe(log,
+ r"Unable to load 'bzr-bad plugin-name\.' in '\.' as a plugin "
+ "because the file path isn't a valid module name; try renaming "
+ "it to 'bad_plugin_name_'\.")
+
+
+class TestPlugins(BaseTestPlugins):
+
+ def setup_plugin(self, source=""):
+ # This test tests a new plugin appears in bzrlib.plugin.plugins().
+ # check the plugin is not loaded already
+ self.assertPluginUnknown('plugin')
+ # write a plugin that _cannot_ fail to load.
+ with file('plugin.py', 'w') as f: f.write(source + '\n')
+ self.addCleanup(self.teardown_plugin)
+ plugin.load_from_path(['.'])
+
+ def teardown_plugin(self):
+ self._unregister_plugin('plugin')
+ self.assertPluginUnknown('plugin')
+
+ def test_plugin_appears_in_plugins(self):
+ self.setup_plugin()
+ self.assertPluginKnown('plugin')
+ p = plugin.plugins()['plugin']
+ self.assertIsInstance(p, bzrlib.plugin.PlugIn)
+ self.assertEqual(p.module, plugins.plugin)
+
+ def test_trivial_plugin_get_path(self):
+ self.setup_plugin()
+ p = plugin.plugins()['plugin']
+ plugin_path = self.test_dir + '/plugin.py'
+ self.assertIsSameRealPath(plugin_path, osutils.normpath(p.path()))
+
+ def test_plugin_get_path_py_not_pyc(self):
+ # first import creates plugin.pyc
+ self.setup_plugin()
+ self.teardown_plugin()
+ plugin.load_from_path(['.']) # import plugin.pyc
+ p = plugin.plugins()['plugin']
+ plugin_path = self.test_dir + '/plugin.py'
+ self.assertIsSameRealPath(plugin_path, osutils.normpath(p.path()))
+
+ def test_plugin_get_path_pyc_only(self):
+ # first import creates plugin.pyc (or plugin.pyo depending on __debug__)
+ self.setup_plugin()
+ self.teardown_plugin()
+ os.unlink(self.test_dir + '/plugin.py')
+ plugin.load_from_path(['.']) # import plugin.pyc (or .pyo)
+ p = plugin.plugins()['plugin']
+ if __debug__:
+ plugin_path = self.test_dir + '/plugin.pyc'
+ else:
+ plugin_path = self.test_dir + '/plugin.pyo'
+ self.assertIsSameRealPath(plugin_path, osutils.normpath(p.path()))
+
+ def test_no_test_suite_gives_None_for_test_suite(self):
+ self.setup_plugin()
+ p = plugin.plugins()['plugin']
+ self.assertEqual(None, p.test_suite())
+
+ def test_test_suite_gives_test_suite_result(self):
+ source = """def test_suite(): return 'foo'"""
+ self.setup_plugin(source)
+ p = plugin.plugins()['plugin']
+ self.assertEqual('foo', p.test_suite())
+
+ def test_no_load_plugin_tests_gives_None_for_load_plugin_tests(self):
+ self.setup_plugin()
+ loader = tests.TestUtil.TestLoader()
+ p = plugin.plugins()['plugin']
+ self.assertEqual(None, p.load_plugin_tests(loader))
+
+ def test_load_plugin_tests_gives_load_plugin_tests_result(self):
+ source = """
+def load_tests(standard_tests, module, loader):
+ return 'foo'"""
+ self.setup_plugin(source)
+ loader = tests.TestUtil.TestLoader()
+ p = plugin.plugins()['plugin']
+ self.assertEqual('foo', p.load_plugin_tests(loader))
+
+ def check_version_info(self, expected, source='', name='plugin'):
+ self.setup_plugin(source)
+ self.assertEqual(expected, plugin.plugins()[name].version_info())
+
+ def test_no_version_info(self):
+ self.check_version_info(None)
+
+ def test_with_version_info(self):
+ self.check_version_info((1, 2, 3, 'dev', 4),
+ "version_info = (1, 2, 3, 'dev', 4)")
+
+ def test_short_version_info_gets_padded(self):
+ # the gtk plugin has version_info = (1,2,3) rather than the 5-tuple.
+ # so we adapt it
+ self.check_version_info((1, 2, 3, 'final', 0),
+ "version_info = (1, 2, 3)")
+
+ def check_version(self, expected, source=None, name='plugin'):
+ self.setup_plugin(source)
+ self.assertEqual(expected, plugins[name].__version__)
+
+ def test_no_version_info___version__(self):
+ self.setup_plugin()
+ plugin = bzrlib.plugin.plugins()['plugin']
+ self.assertEqual("unknown", plugin.__version__)
+
+ def test_str__version__with_version_info(self):
+ self.setup_plugin("version_info = '1.2.3'")
+ plugin = bzrlib.plugin.plugins()['plugin']
+ self.assertEqual("1.2.3", plugin.__version__)
+
+ def test_noniterable__version__with_version_info(self):
+ self.setup_plugin("version_info = (1)")
+ plugin = bzrlib.plugin.plugins()['plugin']
+ self.assertEqual("1", plugin.__version__)
+
+ def test_1__version__with_version_info(self):
+ self.setup_plugin("version_info = (1,)")
+ plugin = bzrlib.plugin.plugins()['plugin']
+ self.assertEqual("1", plugin.__version__)
+
+ def test_1_2__version__with_version_info(self):
+ self.setup_plugin("version_info = (1, 2)")
+ plugin = bzrlib.plugin.plugins()['plugin']
+ self.assertEqual("1.2", plugin.__version__)
+
+ def test_1_2_3__version__with_version_info(self):
+ self.setup_plugin("version_info = (1, 2, 3)")
+ plugin = bzrlib.plugin.plugins()['plugin']
+ self.assertEqual("1.2.3", plugin.__version__)
+
+ def test_candidate__version__with_version_info(self):
+ self.setup_plugin("version_info = (1, 2, 3, 'candidate', 1)")
+ plugin = bzrlib.plugin.plugins()['plugin']
+ self.assertEqual("1.2.3rc1", plugin.__version__)
+
+ def test_dev__version__with_version_info(self):
+ self.setup_plugin("version_info = (1, 2, 3, 'dev', 0)")
+ plugin = bzrlib.plugin.plugins()['plugin']
+ self.assertEqual("1.2.3dev", plugin.__version__)
+
+ def test_dev_fallback__version__with_version_info(self):
+ self.setup_plugin("version_info = (1, 2, 3, 'dev', 4)")
+ plugin = bzrlib.plugin.plugins()['plugin']
+ self.assertEqual("1.2.3dev4", plugin.__version__)
+
+ def test_final__version__with_version_info(self):
+ self.setup_plugin("version_info = (1, 2, 3, 'final', 0)")
+ plugin = bzrlib.plugin.plugins()['plugin']
+ self.assertEqual("1.2.3", plugin.__version__)
+
+ def test_final_fallback__version__with_version_info(self):
+ self.setup_plugin("version_info = (1, 2, 3, 'final', 2)")
+ plugin = bzrlib.plugin.plugins()['plugin']
+ self.assertEqual("1.2.3.2", plugin.__version__)
+
+
+class TestPluginHelp(tests.TestCaseInTempDir):
+
+ def split_help_commands(self):
+ help = {}
+ current = None
+ out, err = self.run_bzr('--no-plugins help commands')
+ for line in out.splitlines():
+ if not line.startswith(' '):
+ current = line.split()[0]
+ help[current] = help.get(current, '') + line
+
+ return help
+
+ def test_plugin_help_builtins_unaffected(self):
+ # Check we don't get false positives
+ help_commands = self.split_help_commands()
+ for cmd_name in bzrlib.commands.builtin_command_names():
+ if cmd_name in bzrlib.commands.plugin_command_names():
+ continue
+ try:
+ help = bzrlib.commands.get_cmd_object(cmd_name).get_help_text()
+ except NotImplementedError:
+ # some commands have no help
+ pass
+ else:
+ self.assertNotContainsRe(help, 'plugin "[^"]*"')
+
+ if cmd_name in help_commands.keys():
+ # some commands are hidden
+ help = help_commands[cmd_name]
+ self.assertNotContainsRe(help, 'plugin "[^"]*"')
+
+ def test_plugin_help_shows_plugin(self):
+ # Create a test plugin
+ os.mkdir('plugin_test')
+ f = open(osutils.pathjoin('plugin_test', 'myplug.py'), 'w')
+ f.write("""\
+from bzrlib import commands
+class cmd_myplug(commands.Command):
+ __doc__ = '''Just a simple test plugin.'''
+ aliases = ['mplg']
+ def run(self):
+ print 'Hello from my plugin'
+
+"""
+)
+ f.close()
+
+ try:
+ # Check its help
+ bzrlib.plugin.load_from_path(['plugin_test'])
+ bzrlib.commands.register_command( bzrlib.plugins.myplug.cmd_myplug)
+ help = self.run_bzr('help myplug')[0]
+ self.assertContainsRe(help, 'plugin "myplug"')
+ help = self.split_help_commands()['myplug']
+ self.assertContainsRe(help, '\[myplug\]')
+ finally:
+ # unregister command
+ if 'myplug' in bzrlib.commands.plugin_cmds:
+ bzrlib.commands.plugin_cmds.remove('myplug')
+ # remove the plugin 'myplug'
+ if getattr(bzrlib.plugins, 'myplug', None):
+ delattr(bzrlib.plugins, 'myplug')
+
+
+class TestHelpIndex(tests.TestCase):
+ """Tests for the PluginsHelpIndex class."""
+
+ def test_default_constructable(self):
+ index = plugin.PluginsHelpIndex()
+
+ def test_get_topics_None(self):
+ """Searching for None returns an empty list."""
+ index = plugin.PluginsHelpIndex()
+ self.assertEqual([], index.get_topics(None))
+
+ def test_get_topics_for_plugin(self):
+ """Searching for plugin name gets its docstring."""
+ index = plugin.PluginsHelpIndex()
+ # make a new plugin here for this test, even if we're run with
+ # --no-plugins
+ self.assertFalse(sys.modules.has_key('bzrlib.plugins.demo_module'))
+ demo_module = FakeModule('', 'bzrlib.plugins.demo_module')
+ sys.modules['bzrlib.plugins.demo_module'] = demo_module
+ try:
+ topics = index.get_topics('demo_module')
+ self.assertEqual(1, len(topics))
+ self.assertIsInstance(topics[0], plugin.ModuleHelpTopic)
+ self.assertEqual(demo_module, topics[0].module)
+ finally:
+ del sys.modules['bzrlib.plugins.demo_module']
+
+ def test_get_topics_no_topic(self):
+ """Searching for something that is not a plugin returns []."""
+ # test this by using a name that cannot be a plugin - its not
+ # a valid python identifier.
+ index = plugin.PluginsHelpIndex()
+ self.assertEqual([], index.get_topics('nothing by this name'))
+
+ def test_prefix(self):
+ """PluginsHelpIndex has a prefix of 'plugins/'."""
+ index = plugin.PluginsHelpIndex()
+ self.assertEqual('plugins/', index.prefix)
+
+ def test_get_plugin_topic_with_prefix(self):
+ """Searching for plugins/demo_module returns help."""
+ index = plugin.PluginsHelpIndex()
+ self.assertFalse(sys.modules.has_key('bzrlib.plugins.demo_module'))
+ demo_module = FakeModule('', 'bzrlib.plugins.demo_module')
+ sys.modules['bzrlib.plugins.demo_module'] = demo_module
+ try:
+ topics = index.get_topics('plugins/demo_module')
+ self.assertEqual(1, len(topics))
+ self.assertIsInstance(topics[0], plugin.ModuleHelpTopic)
+ self.assertEqual(demo_module, topics[0].module)
+ finally:
+ del sys.modules['bzrlib.plugins.demo_module']
+
+
+class FakeModule(object):
+ """A fake module to test with."""
+
+ def __init__(self, doc, name):
+ self.__doc__ = doc
+ self.__name__ = name
+
+
+class TestModuleHelpTopic(tests.TestCase):
+ """Tests for the ModuleHelpTopic class."""
+
+ def test_contruct(self):
+ """Construction takes the module to document."""
+ mod = FakeModule('foo', 'foo')
+ topic = plugin.ModuleHelpTopic(mod)
+ self.assertEqual(mod, topic.module)
+
+ def test_get_help_text_None(self):
+ """A ModuleHelpTopic returns the docstring for get_help_text."""
+ mod = FakeModule(None, 'demo')
+ topic = plugin.ModuleHelpTopic(mod)
+ self.assertEqual("Plugin 'demo' has no docstring.\n",
+ topic.get_help_text())
+
+ def test_get_help_text_no_carriage_return(self):
+ """ModuleHelpTopic.get_help_text adds a \n if needed."""
+ mod = FakeModule('one line of help', 'demo')
+ topic = plugin.ModuleHelpTopic(mod)
+ self.assertEqual("one line of help\n",
+ topic.get_help_text())
+
+ def test_get_help_text_carriage_return(self):
+ """ModuleHelpTopic.get_help_text adds a \n if needed."""
+ mod = FakeModule('two lines of help\nand more\n', 'demo')
+ topic = plugin.ModuleHelpTopic(mod)
+ self.assertEqual("two lines of help\nand more\n",
+ topic.get_help_text())
+
+ def test_get_help_text_with_additional_see_also(self):
+ mod = FakeModule('two lines of help\nand more', 'demo')
+ topic = plugin.ModuleHelpTopic(mod)
+ self.assertEqual("two lines of help\nand more\n\n:See also: bar, foo\n",
+ topic.get_help_text(['foo', 'bar']))
+
+ def test_get_help_topic(self):
+ """The help topic for a plugin is its module name."""
+ mod = FakeModule('two lines of help\nand more', 'bzrlib.plugins.demo')
+ topic = plugin.ModuleHelpTopic(mod)
+ self.assertEqual('demo', topic.get_help_topic())
+ mod = FakeModule('two lines of help\nand more',
+ 'bzrlib.plugins.foo_bar')
+ topic = plugin.ModuleHelpTopic(mod)
+ self.assertEqual('foo_bar', topic.get_help_topic())
+
+
+class TestLoadFromPath(tests.TestCaseInTempDir):
+
+ def setUp(self):
+ super(TestLoadFromPath, self).setUp()
+ # Change bzrlib.plugin to think no plugins have been loaded yet.
+ self.overrideAttr(bzrlib.plugins, '__path__', [])
+ self.overrideAttr(plugin, '_loaded', False)
+
+ # Monkey-patch load_from_path to stop it from actually loading anything.
+ self.overrideAttr(plugin, 'load_from_path', lambda dirs: None)
+
+ def test_set_plugins_path_with_args(self):
+ plugin.set_plugins_path(['a', 'b'])
+ self.assertEqual(['a', 'b'], bzrlib.plugins.__path__)
+
+ def test_set_plugins_path_defaults(self):
+ plugin.set_plugins_path()
+ self.assertEqual(plugin.get_standard_plugins_path(),
+ bzrlib.plugins.__path__)
+
+ def test_get_standard_plugins_path(self):
+ path = plugin.get_standard_plugins_path()
+ for directory in path:
+ self.assertNotContainsRe(directory, r'\\/$')
+ try:
+ from distutils.sysconfig import get_python_lib
+ except ImportError:
+ pass
+ else:
+ if sys.platform != 'win32':
+ python_lib = get_python_lib()
+ for directory in path:
+ if directory.startswith(python_lib):
+ break
+ else:
+ self.fail('No path to global plugins')
+
+ def test_get_standard_plugins_path_env(self):
+ self.overrideEnv('BZR_PLUGIN_PATH', 'foo/')
+ path = plugin.get_standard_plugins_path()
+ for directory in path:
+ self.assertNotContainsRe(directory, r'\\/$')
+
+ def test_load_plugins(self):
+ plugin.load_plugins(['.'])
+ self.assertEqual(bzrlib.plugins.__path__, ['.'])
+ # subsequent loads are no-ops
+ plugin.load_plugins(['foo'])
+ self.assertEqual(bzrlib.plugins.__path__, ['.'])
+
+ def test_load_plugins_default(self):
+ plugin.load_plugins()
+ path = plugin.get_standard_plugins_path()
+ self.assertEqual(path, bzrlib.plugins.__path__)
+
+
+class TestEnvPluginPath(tests.TestCase):
+
+ def setUp(self):
+ super(TestEnvPluginPath, self).setUp()
+ self.overrideAttr(plugin, 'DEFAULT_PLUGIN_PATH', None)
+
+ self.user = plugin.get_user_plugin_path()
+ self.site = plugin.get_site_plugin_path()
+ self.core = plugin.get_core_plugin_path()
+
+ def _list2paths(self, *args):
+ paths = []
+ for p in args:
+ plugin._append_new_path(paths, p)
+ return paths
+
+ def _set_path(self, *args):
+ path = os.pathsep.join(self._list2paths(*args))
+ self.overrideEnv('BZR_PLUGIN_PATH', path)
+
+ def check_path(self, expected_dirs, setting_dirs):
+ if setting_dirs:
+ self._set_path(*setting_dirs)
+ actual = plugin.get_standard_plugins_path()
+ self.assertEquals(self._list2paths(*expected_dirs), actual)
+
+ def test_default(self):
+ self.check_path([self.user, self.core, self.site],
+ None)
+
+ def test_adhoc_policy(self):
+ self.check_path([self.user, self.core, self.site],
+ ['+user', '+core', '+site'])
+
+ def test_fallback_policy(self):
+ self.check_path([self.core, self.site, self.user],
+ ['+core', '+site', '+user'])
+
+ def test_override_policy(self):
+ self.check_path([self.user, self.site, self.core],
+ ['+user', '+site', '+core'])
+
+ def test_disable_user(self):
+ self.check_path([self.core, self.site], ['-user'])
+
+ def test_disable_user_twice(self):
+ # Ensures multiple removals don't left cruft
+ self.check_path([self.core, self.site], ['-user', '-user'])
+
+ def test_duplicates_are_removed(self):
+ self.check_path([self.user, self.core, self.site],
+ ['+user', '+user'])
+ # And only the first reference is kept (since the later references will
+ # only produce '<plugin> already loaded' mutters)
+ self.check_path([self.user, self.core, self.site],
+ ['+user', '+user', '+core',
+ '+user', '+site', '+site',
+ '+core'])
+
+ def test_disable_overrides_enable(self):
+ self.check_path([self.core, self.site], ['-user', '+user'])
+
+ def test_disable_core(self):
+ self.check_path([self.site], ['-core'])
+ self.check_path([self.user, self.site], ['+user', '-core'])
+
+ def test_disable_site(self):
+ self.check_path([self.core], ['-site'])
+ self.check_path([self.user, self.core], ['-site', '+user'])
+
+ def test_override_site(self):
+ self.check_path(['mysite', self.user, self.core],
+ ['mysite', '-site', '+user'])
+ self.check_path(['mysite', self.core],
+ ['mysite', '-site'])
+
+ def test_override_core(self):
+ self.check_path(['mycore', self.user, self.site],
+ ['mycore', '-core', '+user', '+site'])
+ self.check_path(['mycore', self.site],
+ ['mycore', '-core'])
+
+ def test_my_plugin_only(self):
+ self.check_path(['myplugin'], ['myplugin', '-user', '-core', '-site'])
+
+ def test_my_plugin_first(self):
+ self.check_path(['myplugin', self.core, self.site, self.user],
+ ['myplugin', '+core', '+site', '+user'])
+
+ def test_bogus_references(self):
+ self.check_path(['+foo', '-bar', self.core, self.site],
+ ['+foo', '-bar'])
+
+
+class TestDisablePlugin(BaseTestPlugins):
+
+ def setUp(self):
+ super(TestDisablePlugin, self).setUp()
+ self.create_plugin_package('test_foo')
+ # Make sure we don't pollute the plugins namespace
+ self.overrideAttr(plugins, '__path__')
+ # Be paranoid in case a test fail
+ self.addCleanup(self._unregister_plugin, 'test_foo')
+
+ def test_cannot_import(self):
+ self.overrideEnv('BZR_DISABLE_PLUGINS', 'test_foo')
+ plugin.set_plugins_path(['.'])
+ try:
+ import bzrlib.plugins.test_foo
+ except ImportError:
+ pass
+ self.assertPluginUnknown('test_foo')
+
+ def test_regular_load(self):
+ self.overrideAttr(plugin, '_loaded', False)
+ plugin.load_plugins(['.'])
+ self.assertPluginKnown('test_foo')
+ self.assertDocstring("This is the doc for test_foo",
+ bzrlib.plugins.test_foo)
+
+ def test_not_loaded(self):
+ self.warnings = []
+ def captured_warning(*args, **kwargs):
+ self.warnings.append((args, kwargs))
+ self.overrideAttr(trace, 'warning', captured_warning)
+ # Reset the flag that protect against double loading
+ self.overrideAttr(plugin, '_loaded', False)
+ self.overrideEnv('BZR_DISABLE_PLUGINS', 'test_foo')
+ plugin.load_plugins(['.'])
+ self.assertPluginUnknown('test_foo')
+ # Make sure we don't warn about the plugin ImportError since this has
+ # been *requested* by the user.
+ self.assertLength(0, self.warnings)
+
+
+
+class TestLoadPluginAtSyntax(tests.TestCase):
+
+ def _get_paths(self, paths):
+ return plugin._get_specific_plugin_paths(paths)
+
+ def test_empty(self):
+ self.assertEquals([], self._get_paths(None))
+ self.assertEquals([], self._get_paths(''))
+
+ def test_one_path(self):
+ self.assertEquals([('b', 'man')], self._get_paths('b@man'))
+
+ def test_bogus_path(self):
+ # We need a '@'
+ self.assertRaises(errors.BzrCommandError, self._get_paths, 'batman')
+ # Too much '@' isn't good either
+ self.assertRaises(errors.BzrCommandError, self._get_paths,
+ 'batman@mobile@cave')
+ # An empty description probably indicates a problem
+ self.assertRaises(errors.BzrCommandError, self._get_paths,
+ os.pathsep.join(['batman@cave', '', 'robin@mobile']))
+
+
+class TestLoadPluginAt(BaseTestPlugins):
+
+ def setUp(self):
+ super(TestLoadPluginAt, self).setUp()
+ # Make sure we don't pollute the plugins namespace
+ self.overrideAttr(plugins, '__path__')
+ # Reset the flag that protect against double loading
+ self.overrideAttr(plugin, '_loaded', False)
+ # Create the same plugin in two directories
+ self.create_plugin_package('test_foo', dir='non-standard-dir')
+ # The "normal" directory, we use 'standard' instead of 'plugins' to
+ # avoid depending on the precise naming.
+ self.create_plugin_package('test_foo', dir='standard/test_foo')
+ # All the tests will load the 'test_foo' plugin from various locations
+ self.addCleanup(self._unregister_plugin, 'test_foo')
+ # Unfortunately there's global cached state for the specific
+ # registered paths.
+ self.addCleanup(plugin.PluginImporter.reset)
+
+ def assertTestFooLoadedFrom(self, path):
+ self.assertPluginKnown('test_foo')
+ self.assertDocstring('This is the doc for test_foo',
+ bzrlib.plugins.test_foo)
+ self.assertEqual(path, bzrlib.plugins.test_foo.dir_source)
+
+ def test_regular_load(self):
+ plugin.load_plugins(['standard'])
+ self.assertTestFooLoadedFrom('standard/test_foo')
+
+ def test_import(self):
+ self.overrideEnv('BZR_PLUGINS_AT', 'test_foo@non-standard-dir')
+ plugin.set_plugins_path(['standard'])
+ try:
+ import bzrlib.plugins.test_foo
+ except ImportError:
+ pass
+ self.assertTestFooLoadedFrom('non-standard-dir')
+
+ def test_loading(self):
+ self.overrideEnv('BZR_PLUGINS_AT', 'test_foo@non-standard-dir')
+ plugin.load_plugins(['standard'])
+ self.assertTestFooLoadedFrom('non-standard-dir')
+
+ def test_compiled_loaded(self):
+ self.overrideEnv('BZR_PLUGINS_AT', 'test_foo@non-standard-dir')
+ plugin.load_plugins(['standard'])
+ self.assertTestFooLoadedFrom('non-standard-dir')
+ self.assertIsSameRealPath('non-standard-dir/__init__.py',
+ bzrlib.plugins.test_foo.__file__)
+
+ # Try importing again now that the source has been compiled
+ self._unregister_plugin('test_foo')
+ plugin._loaded = False
+ plugin.load_plugins(['standard'])
+ self.assertTestFooLoadedFrom('non-standard-dir')
+ if __debug__:
+ suffix = 'pyc'
+ else:
+ suffix = 'pyo'
+ self.assertIsSameRealPath('non-standard-dir/__init__.%s' % suffix,
+ bzrlib.plugins.test_foo.__file__)
+
+ def test_submodule_loading(self):
+ # We create an additional directory under the one for test_foo
+ self.create_plugin_package('test_bar', dir='non-standard-dir/test_bar')
+ self.addCleanup(self._unregister_plugin_submodule,
+ 'test_foo', 'test_bar')
+ self.overrideEnv('BZR_PLUGINS_AT', 'test_foo@non-standard-dir')
+ plugin.set_plugins_path(['standard'])
+ import bzrlib.plugins.test_foo
+ self.assertEqual('bzrlib.plugins.test_foo',
+ bzrlib.plugins.test_foo.__package__)
+ import bzrlib.plugins.test_foo.test_bar
+ self.assertIsSameRealPath('non-standard-dir/test_bar/__init__.py',
+ bzrlib.plugins.test_foo.test_bar.__file__)
+
+ def test_relative_submodule_loading(self):
+ self.create_plugin_package('test_foo', dir='another-dir', source='''
+import test_bar
+''')
+ # We create an additional directory under the one for test_foo
+ self.create_plugin_package('test_bar', dir='another-dir/test_bar')
+ self.addCleanup(self._unregister_plugin_submodule,
+ 'test_foo', 'test_bar')
+ self.overrideEnv('BZR_PLUGINS_AT', 'test_foo@another-dir')
+ plugin.set_plugins_path(['standard'])
+ import bzrlib.plugins.test_foo
+ self.assertEqual('bzrlib.plugins.test_foo',
+ bzrlib.plugins.test_foo.__package__)
+ self.assertIsSameRealPath('another-dir/test_bar/__init__.py',
+ bzrlib.plugins.test_foo.test_bar.__file__)
+
+ def test_loading_from___init__only(self):
+ # We rename the existing __init__.py file to ensure that we don't load
+ # a random file
+ init = 'non-standard-dir/__init__.py'
+ random = 'non-standard-dir/setup.py'
+ os.rename(init, random)
+ self.addCleanup(os.rename, random, init)
+ self.overrideEnv('BZR_PLUGINS_AT', 'test_foo@non-standard-dir')
+ plugin.load_plugins(['standard'])
+ self.assertPluginUnknown('test_foo')
+
+ def test_loading_from_specific_file(self):
+ plugin_dir = 'non-standard-dir'
+ plugin_file_name = 'iamtestfoo.py'
+ plugin_path = osutils.pathjoin(plugin_dir, plugin_file_name)
+ source = '''\
+"""This is the doc for %s"""
+dir_source = '%s'
+''' % ('test_foo', plugin_path)
+ self.create_plugin('test_foo', source=source,
+ dir=plugin_dir, file_name=plugin_file_name)
+ self.overrideEnv('BZR_PLUGINS_AT', 'test_foo@%s' % plugin_path)
+ plugin.load_plugins(['standard'])
+ self.assertTestFooLoadedFrom(plugin_path)
+
+
+class TestDescribePlugins(BaseTestPlugins):
+
+ def test_describe_plugins(self):
+ class DummyModule(object):
+ __doc__ = 'Hi there'
+ class DummyPlugin(object):
+ __version__ = '0.1.0'
+ module = DummyModule()
+ def dummy_plugins():
+ return { 'good': DummyPlugin() }
+ self.overrideAttr(plugin, 'plugin_warnings',
+ {'bad': ['Failed to load (just testing)']})
+ self.overrideAttr(plugin, 'plugins', dummy_plugins)
+ self.assertEquals("""\
+bad (failed to load)
+ ** Failed to load (just testing)
+
+good 0.1.0
+ Hi there
+
+""", ''.join(plugin.describe_plugins()))
diff --git a/bzrlib/tests/test_progress.py b/bzrlib/tests/test_progress.py
new file mode 100644
index 0000000..239d2de
--- /dev/null
+++ b/bzrlib/tests/test_progress.py
@@ -0,0 +1,188 @@
+# Copyright (C) 2006-2011 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+
+from cStringIO import StringIO
+
+from bzrlib import (
+ tests,
+ )
+from bzrlib.progress import (
+ ProgressTask,
+ )
+from bzrlib.ui.text import (
+ TextProgressView,
+ )
+
+
+class TestTextProgressView(tests.TestCase):
+ """Tests for text display of progress bars.
+
+ These try to exercise the progressview independently of its construction,
+ which is arranged by the TextUIFactory.
+ """
+ # The ProgressTask now connects directly to the ProgressView, so we can
+ # check them independently of the factory or of the determination of what
+ # view to use.
+
+ def make_view_only(self, out, width=79):
+ view = TextProgressView(out)
+ view._avail_width = lambda: width
+ return view
+
+ def make_view(self):
+ out = StringIO()
+ return out, self.make_view_only(out)
+
+ def make_task(self, parent_task, view, msg, curr, total):
+ # would normally be done by UIFactory; is done here so that we don't
+ # have to have one.
+ task = ProgressTask(parent_task, progress_view=view)
+ task.msg = msg
+ task.current_cnt = curr
+ task.total_cnt = total
+ return task
+
+ def test_clear(self):
+ # <https://bugs.launchpad.net/bzr/+bug/611127> clear must actually
+ # send spaces to clear the line
+ out, view = self.make_view()
+ task = self.make_task(None, view, 'reticulating splines', 5, 20)
+ view.show_progress(task)
+ self.assertEqual(
+'\r/ reticulating splines 5/20 \r'
+ , out.getvalue())
+ view.clear()
+ self.assertEqual(
+'\r/ reticulating splines 5/20 \r'
+ + '\r' + 79 * ' ' + '\r',
+ out.getvalue())
+
+ def test_render_progress_no_bar(self):
+ """The default view now has a spinner but no bar."""
+ out, view = self.make_view()
+ # view.enable_bar = False
+ task = self.make_task(None, view, 'reticulating splines', 5, 20)
+ view.show_progress(task)
+ self.assertEqual(
+'\r/ reticulating splines 5/20 \r'
+ , out.getvalue())
+
+ def test_render_progress_easy(self):
+ """Just one task and one quarter done"""
+ out, view = self.make_view()
+ view.enable_bar = True
+ task = self.make_task(None, view, 'reticulating splines', 5, 20)
+ view.show_progress(task)
+ self.assertEqual(
+'\r[####/ ] reticulating splines 5/20 \r'
+ , out.getvalue())
+
+ def test_render_progress_nested(self):
+ """Tasks proportionally contribute to overall progress"""
+ out, view = self.make_view()
+ task = self.make_task(None, view, 'reticulating splines', 0, 2)
+ task2 = self.make_task(task, view, 'stage2', 1, 2)
+ view.show_progress(task2)
+ view.enable_bar = True
+ # so we're in the first half of the main task, and half way through
+ # that
+ self.assertEqual(
+'[####- ] reticulating splines:stage2 1/2 '
+ , view._render_line())
+ # if the nested task is complete, then we're all the way through the
+ # first half of the overall work
+ task2.update('stage2', 2, 2)
+ self.assertEqual(
+'[#########\ ] reticulating splines:stage2 2/2 '
+ , view._render_line())
+
+ def test_render_progress_sub_nested(self):
+ """Intermediate tasks don't mess up calculation."""
+ out, view = self.make_view()
+ view.enable_bar = True
+ task_a = ProgressTask(None, progress_view=view)
+ task_a.update('a', 0, 2)
+ task_b = ProgressTask(task_a, progress_view=view)
+ task_b.update('b')
+ task_c = ProgressTask(task_b, progress_view=view)
+ task_c.update('c', 1, 2)
+ # the top-level task is in its first half; the middle one has no
+ # progress indication, just a label; and the bottom one is half done,
+ # so the overall fraction is 1/4
+ self.assertEqual(
+'[####| ] a:b:c 1/2 '
+ , view._render_line())
+
+ def test_render_truncated(self):
+ # when the bar is too long for the terminal, we prefer not to truncate
+ # the counters because they might be interesting, and because
+ # truncating the numbers might be misleading
+ out, view = self.make_view()
+ task_a = ProgressTask(None, progress_view=view)
+ task_a.update('start_' + 'a' * 200 + '_end', 2000, 5000)
+ line = view._render_line()
+ self.assertEqual(
+'- start_aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa.. 2000/5000',
+ line)
+ self.assertEqual(len(line), 79)
+
+
+ def test_render_with_activity(self):
+ # if the progress view has activity, it's shown before the spinner
+ out, view = self.make_view()
+ task_a = ProgressTask(None, progress_view=view)
+ view._last_transport_msg = ' 123kB 100kB/s '
+ line = view._render_line()
+ self.assertEqual(
+' 123kB 100kB/s / ',
+ line)
+ self.assertEqual(len(line), 79)
+
+ task_a.update('start_' + 'a' * 200 + '_end', 2000, 5000)
+ view._last_transport_msg = ' 123kB 100kB/s '
+ line = view._render_line()
+ self.assertEqual(
+' 123kB 100kB/s \\ start_aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa.. 2000/5000',
+ line)
+ self.assertEqual(len(line), 79)
+
+ def test_render_progress_unicode_enc_utf8(self):
+ out = tests.StringIOWrapper()
+ out.encoding = "utf-8"
+ view = self.make_view_only(out, 20)
+ task = self.make_task(None, view, u"\xa7", 0, 1)
+ view.show_progress(task)
+ self.assertEqual('\r/ \xc2\xa7 0/1 \r',
+ out.getvalue())
+
+ def test_render_progress_unicode_enc_missing(self):
+ out = StringIO()
+ self.assertRaises(AttributeError, getattr, out, "encoding")
+ view = self.make_view_only(out, 20)
+ task = self.make_task(None, view, u"\xa7", 0, 1)
+ view.show_progress(task)
+ self.assertEqual('\r/ ? 0/1 \r',
+ out.getvalue())
+
+ def test_render_progress_unicode_enc_none(self):
+ out = tests.StringIOWrapper()
+ out.encoding = None
+ view = self.make_view_only(out, 20)
+ task = self.make_task(None, view, u"\xa7", 0, 1)
+ view.show_progress(task)
+ self.assertEqual('\r/ ? 0/1 \r',
+ out.getvalue())
diff --git a/bzrlib/tests/test_pyutils.py b/bzrlib/tests/test_pyutils.py
new file mode 100644
index 0000000..694d9e7
--- /dev/null
+++ b/bzrlib/tests/test_pyutils.py
@@ -0,0 +1,88 @@
+# Copyright (C) 2010 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""Tests for bzrlib.pyutils."""
+
+from bzrlib import (
+ branch,
+ tests,
+ )
+from bzrlib.pyutils import (
+ calc_parent_name,
+ get_named_object,
+ )
+
+
+class TestGetNamedObject(tests.TestCase):
+ """Tests for get_named_object."""
+
+ def test_module_only(self):
+ import sys
+ self.assertIs(sys, get_named_object('sys'))
+
+ def test_dotted_module(self):
+ self.assertIs(branch, get_named_object('bzrlib.branch'))
+
+ def test_module_attr(self):
+ self.assertIs(
+ branch.Branch, get_named_object('bzrlib.branch', 'Branch'))
+
+ def test_dotted_attr(self):
+ self.assertIs(
+ branch.Branch.hooks,
+ get_named_object('bzrlib.branch', 'Branch.hooks'))
+
+ def test_package(self):
+ # bzrlib.tests is a package, not simply a module
+ self.assertIs(tests, get_named_object('bzrlib.tests'))
+
+ def test_package_attr(self):
+ # bzrlib.tests is a package, not simply a module
+ self.assertIs(
+ tests.TestCase, get_named_object('bzrlib.tests', 'TestCase'))
+
+ def test_import_error(self):
+ self.assertRaises(ImportError, get_named_object, 'NO_SUCH_MODULE')
+
+ def test_attribute_error(self):
+ self.assertRaises(
+ AttributeError, get_named_object, 'sys', 'NO_SUCH_ATTR')
+
+
+
+class TestCalcParent_name(tests.TestCase):
+ """Tests for calc_parent_name."""
+
+ def test_dotted_member(self):
+ self.assertEqual(
+ ('mod_name', 'attr1', 'attr2'),
+ calc_parent_name('mod_name', 'attr1.attr2'))
+
+ def test_undotted_member(self):
+ self.assertEqual(
+ ('mod_name', None, 'attr1'),
+ calc_parent_name('mod_name', 'attr1'))
+
+ def test_dotted_module_no_member(self):
+ self.assertEqual(
+ ('mod', None, 'sub_mod'),
+ calc_parent_name('mod.sub_mod'))
+
+ def test_undotted_module_no_member(self):
+ err = self.assertRaises(AssertionError, calc_parent_name, 'mod_name')
+ self.assertEqual(
+ "No parent object for top-level module 'mod_name'", err.args[0])
+
diff --git a/bzrlib/tests/test_read_bundle.py b/bzrlib/tests/test_read_bundle.py
new file mode 100644
index 0000000..fa30e64
--- /dev/null
+++ b/bzrlib/tests/test_read_bundle.py
@@ -0,0 +1,111 @@
+# Copyright (C) 2006-2011 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""Test read_bundle works properly across various transports."""
+
+import cStringIO
+
+import bzrlib.bundle
+from bzrlib.bundle.serializer import write_bundle
+import bzrlib.bzrdir
+import bzrlib.errors as errors
+from bzrlib import tests
+from bzrlib.tests.test_transport import TestTransportImplementation
+from bzrlib.tests.per_transport import transport_test_permutations
+import bzrlib.transport
+import bzrlib.urlutils
+from bzrlib.tests.scenarios import load_tests_apply_scenarios
+
+
+load_tests = load_tests_apply_scenarios
+
+
+def create_bundle_file(test_case):
+ test_case.build_tree(['tree/', 'tree/a', 'tree/subdir/'])
+
+ format = bzrlib.bzrdir.BzrDirFormat.get_default_format()
+
+ bzrdir = format.initialize('tree')
+ repo = bzrdir.create_repository()
+ branch = repo.bzrdir.create_branch()
+ wt = branch.bzrdir.create_workingtree()
+
+ wt.add(['a', 'subdir/'])
+ wt.commit('new project', rev_id='commit-1')
+
+ out = cStringIO.StringIO()
+ rev_ids = write_bundle(wt.branch.repository,
+ wt.get_parent_ids()[0], 'null:', out)
+ out.seek(0)
+ return out, wt
+
+
+class TestReadMergeableBundleFromURL(TestTransportImplementation):
+ """Test that read_bundle works properly across multiple transports"""
+
+ scenarios = transport_test_permutations()
+
+ def setUp(self):
+ super(TestReadMergeableBundleFromURL, self).setUp()
+ self.bundle_name = 'test_bundle'
+ # read_mergeable_from_url will invoke get_transport which may *not*
+ # respect self._transport (i.e. returns a transport that is different
+ # from the one we want to test, so we must inject a correct transport
+ # into possible_transports first).
+ self.possible_transports = [self.get_transport(self.bundle_name)]
+ self.overrideEnv('BZR_NO_SMART_VFS', None)
+ wt = self.create_test_bundle()
+
+ def read_mergeable_from_url(self, url):
+ return bzrlib.bundle.read_mergeable_from_url(
+ url, possible_transports=self.possible_transports)
+
+ def get_url(self, relpath=''):
+ return bzrlib.urlutils.join(self._server.get_url(), relpath)
+
+ def create_test_bundle(self):
+ out, wt = create_bundle_file(self)
+ if self.get_transport().is_readonly():
+ self.build_tree_contents([(self.bundle_name, out.getvalue())])
+ else:
+ self.get_transport().put_file(self.bundle_name, out)
+ self.log('Put to: %s', self.get_url(self.bundle_name))
+ return wt
+
+ def test_read_mergeable_from_url(self):
+ info = self.read_mergeable_from_url(
+ unicode(self.get_url(self.bundle_name)))
+ revision = info.real_revisions[-1]
+ self.assertEqual('commit-1', revision.revision_id)
+
+ def test_read_fail(self):
+ # Trying to read from a directory, or non-bundle file
+ # should fail with NotABundle
+ self.assertRaises(errors.NotABundle,
+ self.read_mergeable_from_url, self.get_url('tree'))
+ self.assertRaises(errors.NotABundle,
+ self.read_mergeable_from_url, self.get_url('tree/a'))
+
+ def test_read_mergeable_respects_possible_transports(self):
+ if not isinstance(self.get_transport(self.bundle_name),
+ bzrlib.transport.ConnectedTransport):
+ # There is no point testing transport reuse for not connected
+ # transports (the test will fail even).
+ raise tests.TestSkipped(
+ 'Need a ConnectedTransport to test transport reuse')
+ url = unicode(self.get_url(self.bundle_name))
+ info = self.read_mergeable_from_url(url)
+ self.assertEqual(1, len(self.possible_transports))
diff --git a/bzrlib/tests/test_reconcile.py b/bzrlib/tests/test_reconcile.py
new file mode 100644
index 0000000..468d5c1
--- /dev/null
+++ b/bzrlib/tests/test_reconcile.py
@@ -0,0 +1,70 @@
+# Copyright (C) 2006, 2008-2011 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""Tests for reconiliation behaviour that is repository independent."""
+
+
+from bzrlib import (
+ bzrdir,
+ errors,
+ tests,
+ )
+from bzrlib.reconcile import reconcile, Reconciler
+from bzrlib.tests import per_repository
+
+
+class TestWorksWithSharedRepositories(per_repository.TestCaseWithRepository):
+
+ def test_reweave_empty(self):
+ # we want a repo capable format
+ parent = bzrdir.BzrDirMetaFormat1().initialize('.')
+ parent.create_repository(shared=True)
+ parent.root_transport.mkdir('child')
+ child = bzrdir.BzrDirMetaFormat1().initialize('child')
+ self.assertRaises(errors.NoRepositoryPresent, child.open_repository)
+ reconciler = Reconciler(child)
+ reconciler.reconcile()
+ # smoke test for reconcile appears to work too.
+ reconcile(child)
+ # no inconsistent parents should have been found
+ # but the values should have been set.
+ self.assertEqual(0, reconciler.inconsistent_parents)
+ # and no garbage inventories
+ self.assertEqual(0, reconciler.garbage_inventories)
+
+
+class TestReconciler(tests.TestCaseWithTransport):
+
+ def test_reconciler_with_no_branch(self):
+ repo = self.make_repository('repo')
+ reconciler = Reconciler(repo.bzrdir)
+ reconciler.reconcile()
+ # no inconsistent parents should have been found
+ # but the values should have been set.
+ self.assertEqual(0, reconciler.inconsistent_parents)
+ # and no garbage inventories
+ self.assertEqual(0, reconciler.garbage_inventories)
+ self.assertIs(None, reconciler.fixed_branch_history)
+
+ def test_reconciler_finds_branch(self):
+ a_branch = self.make_branch('a_branch')
+ reconciler = Reconciler(a_branch.bzrdir)
+ reconciler.reconcile()
+
+ # It should have checked the repository, and the branch
+ self.assertEqual(0, reconciler.inconsistent_parents)
+ self.assertEqual(0, reconciler.garbage_inventories)
+ self.assertIs(False, reconciler.fixed_branch_history)
diff --git a/bzrlib/tests/test_reconfigure.py b/bzrlib/tests/test_reconfigure.py
new file mode 100644
index 0000000..58a23d5
--- /dev/null
+++ b/bzrlib/tests/test_reconfigure.py
@@ -0,0 +1,457 @@
+# Copyright (C) 2007, 2008, 2009, 2011, 2012 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+from bzrlib import (
+ branch as _mod_branch,
+ controldir,
+ errors,
+ reconfigure,
+ repository,
+ tests,
+ vf_repository,
+ workingtree,
+ )
+
+
+class TestReconfigure(tests.TestCaseWithTransport):
+
+ def test_tree_to_branch(self):
+ tree = self.make_branch_and_tree('tree')
+ reconfiguration = reconfigure.Reconfigure.to_branch(tree.bzrdir)
+ reconfiguration.apply()
+ self.assertRaises(errors.NoWorkingTree, workingtree.WorkingTree.open,
+ 'tree')
+
+ def test_modified_tree_to_branch(self):
+ tree = self.make_branch_and_tree('tree')
+ self.build_tree(['tree/file'])
+ tree.add('file')
+ reconfiguration = reconfigure.Reconfigure.to_branch(tree.bzrdir)
+ self.assertRaises(errors.UncommittedChanges, reconfiguration.apply)
+ reconfiguration.apply(force=True)
+ self.assertRaises(errors.NoWorkingTree, workingtree.WorkingTree.open,
+ 'tree')
+
+ def test_tree_with_pending_merge_to_branch(self):
+ tree = self.make_branch_and_tree('tree')
+ tree.commit('unchanged')
+ other_tree = tree.bzrdir.sprout('other').open_workingtree()
+ other_tree.commit('mergeable commit')
+ tree.merge_from_branch(other_tree.branch)
+ reconfiguration = reconfigure.Reconfigure.to_branch(tree.bzrdir)
+ self.assertRaises(errors.UncommittedChanges, reconfiguration.apply)
+ reconfiguration.apply(force=True)
+ self.assertRaises(errors.NoWorkingTree, workingtree.WorkingTree.open,
+ 'tree')
+
+ def test_branch_to_branch(self):
+ branch = self.make_branch('branch')
+ self.assertRaises(errors.AlreadyBranch,
+ reconfigure.Reconfigure.to_branch, branch.bzrdir)
+
+ def test_repo_to_branch(self):
+ repo = self.make_repository('repo')
+ reconfiguration = reconfigure.Reconfigure.to_branch(repo.bzrdir)
+ reconfiguration.apply()
+
+ def test_checkout_to_branch(self):
+ branch = self.make_branch('branch')
+ checkout = branch.create_checkout('checkout')
+ reconfiguration = reconfigure.Reconfigure.to_branch(checkout.bzrdir)
+ reconfiguration.apply()
+ reconfigured = controldir.ControlDir.open('checkout').open_branch()
+ self.assertIs(None, reconfigured.get_bound_location())
+
+ def prepare_lightweight_checkout_to_branch(self):
+ branch = self.make_branch('branch')
+ checkout = branch.create_checkout('checkout', lightweight=True)
+ checkout.commit('first commit', rev_id='rev1')
+ reconfiguration = reconfigure.Reconfigure.to_branch(checkout.bzrdir)
+ return reconfiguration, checkout
+
+ def test_lightweight_checkout_to_branch(self):
+ reconfiguration, checkout = \
+ self.prepare_lightweight_checkout_to_branch()
+ reconfiguration.apply()
+ checkout_branch = checkout.bzrdir.open_branch()
+ self.assertEqual(checkout_branch.bzrdir.root_transport.base,
+ checkout.bzrdir.root_transport.base)
+ self.assertEqual('rev1', checkout_branch.last_revision())
+ repo = checkout.bzrdir.open_repository()
+ repo.get_revision('rev1')
+
+ def test_lightweight_checkout_to_branch_tags(self):
+ reconfiguration, checkout = \
+ self.prepare_lightweight_checkout_to_branch()
+ checkout.branch.tags.set_tag('foo', 'bar')
+ reconfiguration.apply()
+ checkout_branch = checkout.bzrdir.open_branch()
+ self.assertEqual('bar', checkout_branch.tags.lookup_tag('foo'))
+
+ def prepare_lightweight_checkout_to_checkout(self):
+ branch = self.make_branch('branch')
+ checkout = branch.create_checkout('checkout', lightweight=True)
+ reconfiguration = reconfigure.Reconfigure.to_checkout(checkout.bzrdir)
+ return reconfiguration, checkout
+
+ def test_lightweight_checkout_to_checkout(self):
+ reconfiguration, checkout = \
+ self.prepare_lightweight_checkout_to_checkout()
+ reconfiguration.apply()
+ checkout_branch = checkout.bzrdir.open_branch()
+ self.assertIsNot(checkout_branch.get_bound_location(), None)
+
+ def test_lightweight_checkout_to_checkout_tags(self):
+ reconfiguration, checkout = \
+ self.prepare_lightweight_checkout_to_checkout()
+ checkout.branch.tags.set_tag('foo', 'bar')
+ reconfiguration.apply()
+ checkout_branch = checkout.bzrdir.open_branch()
+ self.assertEqual('bar', checkout_branch.tags.lookup_tag('foo'))
+
+ def test_lightweight_conversion_uses_shared_repo(self):
+ parent = self.make_branch('parent')
+ shared_repo = self.make_repository('repo', shared=True)
+ checkout = parent.create_checkout('repo/checkout', lightweight=True)
+ reconfigure.Reconfigure.to_tree(checkout.bzrdir).apply()
+ checkout_repo = checkout.bzrdir.open_branch().repository
+ self.assertEqual(shared_repo.bzrdir.root_transport.base,
+ checkout_repo.bzrdir.root_transport.base)
+
+ def test_branch_to_tree(self):
+ branch = self.make_branch('branch')
+ reconfiguration=reconfigure.Reconfigure.to_tree(branch.bzrdir)
+ reconfiguration.apply()
+ branch.bzrdir.open_workingtree()
+
+ def test_tree_to_tree(self):
+ tree = self.make_branch_and_tree('tree')
+ self.assertRaises(errors.AlreadyTree, reconfigure.Reconfigure.to_tree,
+ tree.bzrdir)
+
+ def test_select_bind_location(self):
+ branch = self.make_branch('branch')
+ reconfiguration = reconfigure.Reconfigure(branch.bzrdir)
+ self.assertRaises(errors.NoBindLocation,
+ reconfiguration._select_bind_location)
+ branch.set_parent('http://parent')
+ reconfiguration = reconfigure.Reconfigure(branch.bzrdir)
+ self.assertEqual('http://parent',
+ reconfiguration._select_bind_location())
+ branch.set_push_location('sftp://push')
+ reconfiguration = reconfigure.Reconfigure(branch.bzrdir)
+ self.assertEqual('sftp://push',
+ reconfiguration._select_bind_location())
+ branch.lock_write()
+ try:
+ branch.set_bound_location('bzr://foo/old-bound')
+ branch.set_bound_location(None)
+ finally:
+ branch.unlock()
+ reconfiguration = reconfigure.Reconfigure(branch.bzrdir)
+ self.assertEqual('bzr://foo/old-bound',
+ reconfiguration._select_bind_location())
+ branch.set_bound_location('bzr://foo/cur-bound')
+ reconfiguration = reconfigure.Reconfigure(branch.bzrdir)
+ self.assertEqual('bzr://foo/cur-bound',
+ reconfiguration._select_bind_location())
+ reconfiguration.new_bound_location = 'ftp://user-specified'
+ self.assertEqual('ftp://user-specified',
+ reconfiguration._select_bind_location())
+
+ def test_select_reference_bind_location(self):
+ branch = self.make_branch('branch')
+ checkout = branch.create_checkout('checkout', lightweight=True)
+ reconfiguration = reconfigure.Reconfigure(checkout.bzrdir)
+ self.assertEqual(branch.base,
+ reconfiguration._select_bind_location())
+
+ def test_tree_to_checkout(self):
+ # A tree with no related branches and no supplied bind location cannot
+ # become a checkout
+ parent = self.make_branch('parent')
+
+ tree = self.make_branch_and_tree('tree')
+ reconfiguration = reconfigure.Reconfigure.to_checkout(tree.bzrdir)
+ self.assertRaises(errors.NoBindLocation, reconfiguration.apply)
+ # setting a parent allows it to become a checkout
+ tree.branch.set_parent(parent.base)
+ reconfiguration = reconfigure.Reconfigure.to_checkout(tree.bzrdir)
+ reconfiguration.apply()
+ # supplying a location allows it to become a checkout
+ tree2 = self.make_branch_and_tree('tree2')
+ reconfiguration = reconfigure.Reconfigure.to_checkout(tree2.bzrdir,
+ parent.base)
+ reconfiguration.apply()
+
+ def test_tree_to_lightweight_checkout(self):
+ # A tree with no related branches and no supplied bind location cannot
+ # become a checkout
+ parent = self.make_branch('parent')
+
+ tree = self.make_branch_and_tree('tree')
+ reconfiguration = reconfigure.Reconfigure.to_lightweight_checkout(
+ tree.bzrdir)
+ self.assertRaises(errors.NoBindLocation, reconfiguration.apply)
+ # setting a parent allows it to become a checkout
+ tree.branch.set_parent(parent.base)
+ reconfiguration = reconfigure.Reconfigure.to_lightweight_checkout(
+ tree.bzrdir)
+ reconfiguration.apply()
+ # supplying a location allows it to become a checkout
+ tree2 = self.make_branch_and_tree('tree2')
+ reconfiguration = reconfigure.Reconfigure.to_lightweight_checkout(
+ tree2.bzrdir, parent.base)
+ reconfiguration.apply()
+
+ def test_checkout_to_checkout(self):
+ parent = self.make_branch('parent')
+ checkout = parent.create_checkout('checkout')
+ self.assertRaises(errors.AlreadyCheckout,
+ reconfigure.Reconfigure.to_checkout, checkout.bzrdir)
+
+ def make_unsynced_checkout(self):
+ parent = self.make_branch('parent')
+ checkout = parent.create_checkout('checkout')
+ checkout.commit('test', rev_id='new-commit', local=True)
+ reconfiguration = reconfigure.Reconfigure.to_lightweight_checkout(
+ checkout.bzrdir)
+ return checkout, parent, reconfiguration
+
+ def test_unsynced_checkout_to_lightweight(self):
+ checkout, parent, reconfiguration = self.make_unsynced_checkout()
+ self.assertRaises(errors.UnsyncedBranches, reconfiguration.apply)
+
+ def test_synced_checkout_to_lightweight(self):
+ checkout, parent, reconfiguration = self.make_unsynced_checkout()
+ parent.pull(checkout.branch)
+ reconfiguration.apply()
+ wt = checkout.bzrdir.open_workingtree()
+ self.assertTrue(parent.repository.has_same_location(
+ wt.branch.repository))
+ parent.repository.get_revision('new-commit')
+ self.assertRaises(errors.NoRepositoryPresent,
+ checkout.bzrdir.open_repository)
+
+ def prepare_branch_to_lightweight_checkout(self):
+ parent = self.make_branch('parent')
+ child = parent.bzrdir.sprout('child').open_workingtree()
+ child.commit('test', rev_id='new-commit')
+ parent.pull(child.branch)
+ child.bzrdir.destroy_workingtree()
+ reconfiguration = reconfigure.Reconfigure.to_lightweight_checkout(
+ child.bzrdir)
+ return parent, child, reconfiguration
+
+ def test_branch_to_lightweight_checkout(self):
+ parent, child, reconfiguration = \
+ self.prepare_branch_to_lightweight_checkout()
+ reconfiguration.apply()
+ self.assertTrue(reconfiguration._destroy_branch)
+ wt = child.bzrdir.open_workingtree()
+ self.assertTrue(parent.repository.has_same_location(
+ wt.branch.repository))
+ parent.repository.get_revision('new-commit')
+ self.assertRaises(errors.NoRepositoryPresent,
+ child.bzrdir.open_repository)
+
+ def test_branch_to_lightweight_checkout_failure(self):
+ parent, child, reconfiguration = \
+ self.prepare_branch_to_lightweight_checkout()
+ old_Repository_fetch = vf_repository.VersionedFileRepository.fetch
+ vf_repository.VersionedFileRepository.fetch = None
+ try:
+ self.assertRaises(TypeError, reconfiguration.apply)
+ finally:
+ vf_repository.VersionedFileRepository.fetch = old_Repository_fetch
+ child = _mod_branch.Branch.open('child')
+ self.assertContainsRe(child.base, 'child/$')
+
+ def test_branch_to_lightweight_checkout_fetch_tags(self):
+ parent, child, reconfiguration = \
+ self.prepare_branch_to_lightweight_checkout()
+ child.branch.tags.set_tag('foo', 'bar')
+ reconfiguration.apply()
+ child = _mod_branch.Branch.open('child')
+ self.assertEqual('bar', parent.tags.lookup_tag('foo'))
+
+ def test_lightweight_checkout_to_lightweight_checkout(self):
+ parent = self.make_branch('parent')
+ checkout = parent.create_checkout('checkout', lightweight=True)
+ self.assertRaises(errors.AlreadyLightweightCheckout,
+ reconfigure.Reconfigure.to_lightweight_checkout,
+ checkout.bzrdir)
+
+ def test_repo_to_tree(self):
+ repo = self.make_repository('repo')
+ reconfiguration = reconfigure.Reconfigure.to_tree(repo.bzrdir)
+ reconfiguration.apply()
+ workingtree.WorkingTree.open('repo')
+
+ def test_shared_repo_to_lightweight_checkout(self):
+ repo = self.make_repository('repo', shared=True)
+ reconfiguration = reconfigure.Reconfigure.to_lightweight_checkout(
+ repo.bzrdir)
+ self.assertRaises(errors.NoBindLocation, reconfiguration.apply)
+ branch = self.make_branch('branch')
+ reconfiguration = reconfigure.Reconfigure.to_lightweight_checkout(
+ repo.bzrdir, 'branch')
+ reconfiguration.apply()
+ workingtree.WorkingTree.open('repo')
+ repository.Repository.open('repo')
+
+ def test_unshared_repo_to_lightweight_checkout(self):
+ repo = self.make_repository('repo', shared=False)
+ branch = self.make_branch('branch')
+ reconfiguration = reconfigure.Reconfigure.to_lightweight_checkout(
+ repo.bzrdir, 'branch')
+ reconfiguration.apply()
+ workingtree.WorkingTree.open('repo')
+ self.assertRaises(errors.NoRepositoryPresent,
+ repository.Repository.open, 'repo')
+
+ def test_standalone_to_use_shared(self):
+ self.build_tree(['root/'])
+ tree = self.make_branch_and_tree('root/tree')
+ tree.commit('Hello', rev_id='hello-id')
+ repo = self.make_repository('root', shared=True)
+ reconfiguration = reconfigure.Reconfigure.to_use_shared(tree.bzrdir)
+ reconfiguration.apply()
+ tree = workingtree.WorkingTree.open('root/tree')
+ self.assertTrue(repo.has_same_location(tree.branch.repository))
+ self.assertEqual('Hello', repo.get_revision('hello-id').message)
+
+ def add_dead_head(self, tree):
+ revno, revision_id = tree.branch.last_revision_info()
+ tree.commit('Dead head', rev_id='dead-head-id')
+ tree.branch.set_last_revision_info(revno, revision_id)
+ tree.set_last_revision(revision_id)
+
+ def test_standalone_to_use_shared_preserves_dead_heads(self):
+ self.build_tree(['root/'])
+ tree = self.make_branch_and_tree('root/tree')
+ self.add_dead_head(tree)
+ tree.commit('Hello', rev_id='hello-id')
+ repo = self.make_repository('root', shared=True)
+ reconfiguration = reconfigure.Reconfigure.to_use_shared(tree.bzrdir)
+ reconfiguration.apply()
+ tree = workingtree.WorkingTree.open('root/tree')
+ message = repo.get_revision('dead-head-id').message
+ self.assertEqual('Dead head', message)
+
+ def make_repository_tree(self):
+ self.build_tree(['root/'])
+ repo = self.make_repository('root', shared=True)
+ tree = self.make_branch_and_tree('root/tree')
+ reconfigure.Reconfigure.to_use_shared(tree.bzrdir).apply()
+ return workingtree.WorkingTree.open('root/tree')
+
+ def test_use_shared_to_use_shared(self):
+ tree = self.make_repository_tree()
+ self.assertRaises(errors.AlreadyUsingShared,
+ reconfigure.Reconfigure.to_use_shared, tree.bzrdir)
+
+ def test_use_shared_to_standalone(self):
+ tree = self.make_repository_tree()
+ tree.commit('Hello', rev_id='hello-id')
+ reconfigure.Reconfigure.to_standalone(tree.bzrdir).apply()
+ tree = workingtree.WorkingTree.open('root/tree')
+ repo = tree.branch.repository
+ self.assertEqual(repo.bzrdir.root_transport.base,
+ tree.bzrdir.root_transport.base)
+ self.assertEqual('Hello', repo.get_revision('hello-id').message)
+
+ def test_use_shared_to_standalone_preserves_dead_heads(self):
+ tree = self.make_repository_tree()
+ self.add_dead_head(tree)
+ tree.commit('Hello', rev_id='hello-id')
+ reconfigure.Reconfigure.to_standalone(tree.bzrdir).apply()
+ tree = workingtree.WorkingTree.open('root/tree')
+ repo = tree.branch.repository
+ self.assertRaises(errors.NoSuchRevision, repo.get_revision,
+ 'dead-head-id')
+
+ def test_standalone_to_standalone(self):
+ tree = self.make_branch_and_tree('tree')
+ self.assertRaises(errors.AlreadyStandalone,
+ reconfigure.Reconfigure.to_standalone, tree.bzrdir)
+
+ def make_unsynced_branch_reconfiguration(self):
+ parent = self.make_branch_and_tree('parent')
+ parent.commit('commit 1')
+ child = parent.bzrdir.sprout('child').open_workingtree()
+ child.commit('commit 2')
+ return reconfigure.Reconfigure.to_lightweight_checkout(child.bzrdir)
+
+ def test_unsynced_branch_to_lightweight_checkout_unforced(self):
+ reconfiguration = self.make_unsynced_branch_reconfiguration()
+ self.assertRaises(errors.UnsyncedBranches, reconfiguration.apply)
+
+ def test_unsynced_branch_to_lightweight_checkout_forced(self):
+ reconfiguration = self.make_unsynced_branch_reconfiguration()
+ reconfiguration.apply(force=True)
+
+ def make_repository_with_without_trees(self, with_trees):
+ repo = self.make_repository('repo', shared=True)
+ repo.set_make_working_trees(with_trees)
+ return repo
+
+ def test_make_with_trees(self):
+ repo = self.make_repository_with_without_trees(False)
+ reconfiguration = reconfigure.Reconfigure.set_repository_trees(
+ repo.bzrdir, True)
+ reconfiguration.apply()
+ self.assertIs(True, repo.make_working_trees())
+
+ def test_make_without_trees(self):
+ repo = self.make_repository_with_without_trees(True)
+ reconfiguration = reconfigure.Reconfigure.set_repository_trees(
+ repo.bzrdir, False)
+ reconfiguration.apply()
+ self.assertIs(False, repo.make_working_trees())
+
+ def test_make_with_trees_already_with_trees(self):
+ repo = self.make_repository_with_without_trees(True)
+ e = self.assertRaises(errors.AlreadyWithTrees,
+ reconfigure.Reconfigure.set_repository_trees, repo.bzrdir, True)
+ self.assertContainsRe(str(e),
+ r"Shared repository '.*' already creates working trees.")
+
+ def test_make_without_trees_already_no_trees(self):
+ repo = self.make_repository_with_without_trees(False)
+ e = self.assertRaises(errors.AlreadyWithNoTrees,
+ reconfigure.Reconfigure.set_repository_trees, repo.bzrdir, False)
+ self.assertContainsRe(str(e),
+ r"Shared repository '.*' already doesn't create working trees.")
+
+ def test_repository_tree_reconfiguration_not_supported(self):
+ tree = self.make_branch_and_tree('tree')
+ e = self.assertRaises(errors.ReconfigurationNotSupported,
+ reconfigure.Reconfigure.set_repository_trees, tree.bzrdir, None)
+ self.assertContainsRe(str(e),
+ r"Requested reconfiguration of '.*' is not supported.")
+
+ def test_lightweight_checkout_to_tree_preserves_reference_locations(self):
+ format = controldir.format_registry.make_bzrdir('1.9')
+ format.set_branch_format(_mod_branch.BzrBranchFormat8())
+ tree = self.make_branch_and_tree('tree', format=format)
+ tree.branch.set_reference_info('file_id', 'path', '../location')
+ checkout = tree.branch.create_checkout('checkout', lightweight=True)
+ reconfiguration = reconfigure.Reconfigure.to_tree(checkout.bzrdir)
+ reconfiguration.apply()
+ checkout_branch = checkout.bzrdir.open_branch()
+ self.assertEqual(('path', '../location'),
+ checkout_branch.get_reference_info('file_id'))
diff --git a/bzrlib/tests/test_registry.py b/bzrlib/tests/test_registry.py
new file mode 100644
index 0000000..c8b3286
--- /dev/null
+++ b/bzrlib/tests/test_registry.py
@@ -0,0 +1,356 @@
+# Copyright (C) 2006, 2008-2011 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""Tests for the Registry classes"""
+
+import os
+import sys
+
+from bzrlib import (
+ branch,
+ osutils,
+ registry,
+ tests,
+ )
+
+
+class TestRegistry(tests.TestCase):
+
+ def register_stuff(self, a_registry):
+ a_registry.register('one', 1)
+ a_registry.register('two', 2)
+ a_registry.register('four', 4)
+ a_registry.register('five', 5)
+
+ def test_registry(self):
+ a_registry = registry.Registry()
+ self.register_stuff(a_registry)
+
+ self.assertTrue(a_registry.default_key is None)
+
+ # test get() (self.default_key is None)
+ self.assertRaises(KeyError, a_registry.get)
+ self.assertRaises(KeyError, a_registry.get, None)
+ self.assertEqual(2, a_registry.get('two'))
+ self.assertRaises(KeyError, a_registry.get, 'three')
+
+ # test _set_default_key
+ a_registry.default_key = 'five'
+ self.assertTrue(a_registry.default_key == 'five')
+ self.assertEqual(5, a_registry.get())
+ self.assertEqual(5, a_registry.get(None))
+ # If they ask for a specific entry, they should get KeyError
+ # not the default value. They can always pass None if they prefer
+ self.assertRaises(KeyError, a_registry.get, 'six')
+ self.assertRaises(KeyError, a_registry._set_default_key, 'six')
+
+ # test keys()
+ self.assertEqual(['five', 'four', 'one', 'two'], a_registry.keys())
+
+ def test_registry_funcs(self):
+ a_registry = registry.Registry()
+ self.register_stuff(a_registry)
+
+ self.assertTrue('one' in a_registry)
+ a_registry.remove('one')
+ self.assertFalse('one' in a_registry)
+ self.assertRaises(KeyError, a_registry.get, 'one')
+
+ a_registry.register('one', 'one')
+
+ self.assertEqual(['five', 'four', 'one', 'two'],
+ sorted(a_registry.keys()))
+ self.assertEqual([('five', 5), ('four', 4),
+ ('one', 'one'), ('two', 2)],
+ sorted(a_registry.iteritems()))
+
+ def test_register_override(self):
+ a_registry = registry.Registry()
+ a_registry.register('one', 'one')
+ self.assertRaises(KeyError, a_registry.register, 'one', 'two')
+ self.assertRaises(KeyError, a_registry.register, 'one', 'two',
+ override_existing=False)
+
+ a_registry.register('one', 'two', override_existing=True)
+ self.assertEqual('two', a_registry.get('one'))
+
+ self.assertRaises(KeyError, a_registry.register_lazy,
+ 'one', 'three', 'four')
+
+ a_registry.register_lazy('one', 'module', 'member',
+ override_existing=True)
+
+ def test_registry_help(self):
+ a_registry = registry.Registry()
+ a_registry.register('one', 1, help='help text for one')
+ # We should not have to import the module to return the help
+ # information
+ a_registry.register_lazy('two', 'nonexistent_module', 'member',
+ help='help text for two')
+
+ # We should be able to handle a callable to get information
+ help_calls = []
+ def generic_help(reg, key):
+ help_calls.append(key)
+ return 'generic help for %s' % (key,)
+ a_registry.register('three', 3, help=generic_help)
+ a_registry.register_lazy('four', 'nonexistent_module', 'member2',
+ help=generic_help)
+ a_registry.register('five', 5)
+
+ def help_from_object(reg, key):
+ obj = reg.get(key)
+ return obj.help()
+
+ class SimpleObj(object):
+ def help(self):
+ return 'this is my help'
+ a_registry.register('six', SimpleObj(), help=help_from_object)
+
+ self.assertEqual('help text for one', a_registry.get_help('one'))
+ self.assertEqual('help text for two', a_registry.get_help('two'))
+ self.assertEqual('generic help for three',
+ a_registry.get_help('three'))
+ self.assertEqual(['three'], help_calls)
+ self.assertEqual('generic help for four',
+ a_registry.get_help('four'))
+ self.assertEqual(['three', 'four'], help_calls)
+ self.assertEqual(None, a_registry.get_help('five'))
+ self.assertEqual('this is my help', a_registry.get_help('six'))
+
+ self.assertRaises(KeyError, a_registry.get_help, None)
+ self.assertRaises(KeyError, a_registry.get_help, 'seven')
+
+ a_registry.default_key = 'one'
+ self.assertEqual('help text for one', a_registry.get_help(None))
+ self.assertRaises(KeyError, a_registry.get_help, 'seven')
+
+ self.assertEqual([('five', None),
+ ('four', 'generic help for four'),
+ ('one', 'help text for one'),
+ ('six', 'this is my help'),
+ ('three', 'generic help for three'),
+ ('two', 'help text for two'),
+ ], sorted((key, a_registry.get_help(key))
+ for key in a_registry.keys()))
+
+ # We don't know what order it was called in, but we should get
+ # 2 more calls to three and four
+ self.assertEqual(['four', 'four', 'three', 'three'],
+ sorted(help_calls))
+
+ def test_registry_info(self):
+ a_registry = registry.Registry()
+ a_registry.register('one', 1, info='string info')
+ # We should not have to import the module to return the info
+ a_registry.register_lazy('two', 'nonexistent_module', 'member',
+ info=2)
+
+ # We should be able to handle a callable to get information
+ a_registry.register('three', 3, info=['a', 'list'])
+ obj = object()
+ a_registry.register_lazy('four', 'nonexistent_module', 'member2',
+ info=obj)
+ a_registry.register('five', 5)
+
+ self.assertEqual('string info', a_registry.get_info('one'))
+ self.assertEqual(2, a_registry.get_info('two'))
+ self.assertEqual(['a', 'list'], a_registry.get_info('three'))
+ self.assertIs(obj, a_registry.get_info('four'))
+ self.assertIs(None, a_registry.get_info('five'))
+
+ self.assertRaises(KeyError, a_registry.get_info, None)
+ self.assertRaises(KeyError, a_registry.get_info, 'six')
+
+ a_registry.default_key = 'one'
+ self.assertEqual('string info', a_registry.get_info(None))
+ self.assertRaises(KeyError, a_registry.get_info, 'six')
+
+ self.assertEqual([('five', None),
+ ('four', obj),
+ ('one', 'string info'),
+ ('three', ['a', 'list']),
+ ('two', 2),
+ ], sorted((key, a_registry.get_info(key))
+ for key in a_registry.keys()))
+
+ def test_get_prefix(self):
+ my_registry = registry.Registry()
+ http_object = object()
+ sftp_object = object()
+ my_registry.register('http:', http_object)
+ my_registry.register('sftp:', sftp_object)
+ found_object, suffix = my_registry.get_prefix('http://foo/bar')
+ self.assertEqual('//foo/bar', suffix)
+ self.assertIs(http_object, found_object)
+ self.assertIsNot(sftp_object, found_object)
+ found_object, suffix = my_registry.get_prefix('sftp://baz/qux')
+ self.assertEqual('//baz/qux', suffix)
+ self.assertIs(sftp_object, found_object)
+
+
+class TestRegistryIter(tests.TestCase):
+ """Test registry iteration behaviors.
+
+ There are dark corner cases here when the registered objects trigger
+ addition in the iterated registry.
+ """
+
+ def setUp(self):
+ super(TestRegistryIter, self).setUp()
+
+ # We create a registry with "official" objects and "hidden"
+ # objects. The later represent the side effects that led to bug #277048
+ # and #430510
+ _registry = registry.Registry()
+
+ def register_more():
+ _registry.register('hidden', None)
+
+ # Avoid closing over self by binding local variable
+ self.registry = _registry
+ self.registry.register('passive', None)
+ self.registry.register('active', register_more)
+ self.registry.register('passive-too', None)
+
+ class InvasiveGetter(registry._ObjectGetter):
+
+ def get_obj(inner_self):
+ # Surprise ! Getting a registered object (think lazy loaded
+ # module) register yet another object !
+ _registry.register('more hidden', None)
+ return inner_self._obj
+
+ self.registry.register('hacky', None)
+ # We peek under the covers because the alternative is to use lazy
+ # registration and create a module that can reference our test registry
+ # it's too much work for such a corner case -- vila 090916
+ self.registry._dict['hacky'] = InvasiveGetter(None)
+
+ def _iter_them(self, iter_func_name):
+ iter_func = getattr(self.registry, iter_func_name, None)
+ self.assertIsNot(None, iter_func)
+ count = 0
+ for name, func in iter_func():
+ count += 1
+ self.assertFalse(name in ('hidden', 'more hidden'))
+ if func is not None:
+ # Using an object register another one as a side effect
+ func()
+ self.assertEqual(4, count)
+
+ def test_iteritems(self):
+ # the dict is modified during the iteration
+ self.assertRaises(RuntimeError, self._iter_them, 'iteritems')
+
+ def test_items(self):
+ # we should be able to iterate even if one item modify the dict
+ self._iter_them('items')
+
+
+class TestRegistryWithDirs(tests.TestCaseInTempDir):
+ """Registry tests that require temporary dirs"""
+
+ def create_plugin_file(self, contents):
+ """Create a file to be used as a plugin.
+
+ This is created in a temporary directory, so that we
+ are sure that it doesn't start in the plugin path.
+ """
+ os.mkdir('tmp')
+ plugin_name = 'bzr_plugin_a_%s' % (osutils.rand_chars(4),)
+ with open('tmp/'+plugin_name+'.py', 'wb') as f: f.write(contents)
+ return plugin_name
+
+ def create_simple_plugin(self):
+ return self.create_plugin_file(
+ 'object1 = "foo"\n'
+ '\n\n'
+ 'def function(a,b,c):\n'
+ ' return a,b,c\n'
+ '\n\n'
+ 'class MyClass(object):\n'
+ ' def __init__(self, a):\n'
+ ' self.a = a\n'
+ '\n\n'
+ )
+
+ def test_lazy_import_registry_foo(self):
+ a_registry = registry.Registry()
+ a_registry.register_lazy('foo', 'bzrlib.branch', 'Branch')
+ a_registry.register_lazy('bar', 'bzrlib.branch', 'Branch.hooks')
+ self.assertEqual(branch.Branch, a_registry.get('foo'))
+ self.assertEqual(branch.Branch.hooks, a_registry.get('bar'))
+
+ def test_lazy_import_registry(self):
+ plugin_name = self.create_simple_plugin()
+ a_registry = registry.Registry()
+ a_registry.register_lazy('obj', plugin_name, 'object1')
+ a_registry.register_lazy('function', plugin_name, 'function')
+ a_registry.register_lazy('klass', plugin_name, 'MyClass')
+ a_registry.register_lazy('module', plugin_name, None)
+
+ self.assertEqual(['function', 'klass', 'module', 'obj'],
+ sorted(a_registry.keys()))
+ # The plugin should not be loaded until we grab the first object
+ self.assertFalse(plugin_name in sys.modules)
+
+ # By default the plugin won't be in the search path
+ self.assertRaises(ImportError, a_registry.get, 'obj')
+
+ plugin_path = os.getcwd() + '/tmp'
+ sys.path.append(plugin_path)
+ try:
+ obj = a_registry.get('obj')
+ self.assertEqual('foo', obj)
+ self.assertTrue(plugin_name in sys.modules)
+
+ # Now grab another object
+ func = a_registry.get('function')
+ self.assertEqual(plugin_name, func.__module__)
+ self.assertEqual('function', func.__name__)
+ self.assertEqual((1, [], '3'), func(1, [], '3'))
+
+ # And finally a class
+ klass = a_registry.get('klass')
+ self.assertEqual(plugin_name, klass.__module__)
+ self.assertEqual('MyClass', klass.__name__)
+
+ inst = klass(1)
+ self.assertIsInstance(inst, klass)
+ self.assertEqual(1, inst.a)
+
+ module = a_registry.get('module')
+ self.assertIs(obj, module.object1)
+ self.assertIs(func, module.function)
+ self.assertIs(klass, module.MyClass)
+ finally:
+ sys.path.remove(plugin_path)
+
+ def test_lazy_import_get_module(self):
+ a_registry = registry.Registry()
+ a_registry.register_lazy('obj', "bzrlib.tests.test_registry",
+ 'object1')
+ self.assertEquals("bzrlib.tests.test_registry",
+ a_registry._get_module("obj"))
+
+ def test_normal_get_module(self):
+ class AThing(object):
+ """Something"""
+ a_registry = registry.Registry()
+ a_registry.register("obj", AThing())
+ self.assertEquals("bzrlib.tests.test_registry",
+ a_registry._get_module("obj"))
diff --git a/bzrlib/tests/test_remote.py b/bzrlib/tests/test_remote.py
new file mode 100644
index 0000000..f9a162a
--- /dev/null
+++ b/bzrlib/tests/test_remote.py
@@ -0,0 +1,4294 @@
+# Copyright (C) 2006-2012 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""Tests for remote bzrdir/branch/repo/etc
+
+These are proxy objects which act on remote objects by sending messages
+through a smart client. The proxies are to be created when attempting to open
+the object given a transport that supports smartserver rpc operations.
+
+These tests correspond to tests.test_smart, which exercises the server side.
+"""
+
+import bz2
+from cStringIO import StringIO
+import zlib
+
+from bzrlib import (
+ bencode,
+ branch,
+ bzrdir,
+ config,
+ controldir,
+ errors,
+ inventory,
+ inventory_delta,
+ remote,
+ repository,
+ tests,
+ transport,
+ treebuilder,
+ versionedfile,
+ vf_search,
+ )
+from bzrlib.branch import Branch
+from bzrlib.bzrdir import (
+ BzrDir,
+ BzrDirFormat,
+ RemoteBzrProber,
+ )
+from bzrlib.chk_serializer import chk_bencode_serializer
+from bzrlib.remote import (
+ RemoteBranch,
+ RemoteBranchFormat,
+ RemoteBzrDir,
+ RemoteBzrDirFormat,
+ RemoteRepository,
+ RemoteRepositoryFormat,
+ )
+from bzrlib.repofmt import groupcompress_repo, knitpack_repo
+from bzrlib.revision import (
+ NULL_REVISION,
+ Revision,
+ )
+from bzrlib.smart import medium, request
+from bzrlib.smart.client import _SmartClient
+from bzrlib.smart.repository import (
+ SmartServerRepositoryGetParentMap,
+ SmartServerRepositoryGetStream_1_19,
+ _stream_to_byte_stream,
+ )
+from bzrlib.symbol_versioning import deprecated_in
+from bzrlib.tests import (
+ test_server,
+ )
+from bzrlib.tests.scenarios import load_tests_apply_scenarios
+from bzrlib.transport.memory import MemoryTransport
+from bzrlib.transport.remote import (
+ RemoteTransport,
+ RemoteSSHTransport,
+ RemoteTCPTransport,
+ )
+
+
+load_tests = load_tests_apply_scenarios
+
+
+class BasicRemoteObjectTests(tests.TestCaseWithTransport):
+
+ scenarios = [
+ ('HPSS-v2',
+ {'transport_server': test_server.SmartTCPServer_for_testing_v2_only}),
+ ('HPSS-v3',
+ {'transport_server': test_server.SmartTCPServer_for_testing})]
+
+
+ def setUp(self):
+ super(BasicRemoteObjectTests, self).setUp()
+ self.transport = self.get_transport()
+ # make a branch that can be opened over the smart transport
+ self.local_wt = BzrDir.create_standalone_workingtree('.')
+ self.addCleanup(self.transport.disconnect)
+
+ def test_create_remote_bzrdir(self):
+ b = remote.RemoteBzrDir(self.transport, RemoteBzrDirFormat())
+ self.assertIsInstance(b, BzrDir)
+
+ def test_open_remote_branch(self):
+ # open a standalone branch in the working directory
+ b = remote.RemoteBzrDir(self.transport, RemoteBzrDirFormat())
+ branch = b.open_branch()
+ self.assertIsInstance(branch, Branch)
+
+ def test_remote_repository(self):
+ b = BzrDir.open_from_transport(self.transport)
+ repo = b.open_repository()
+ revid = u'\xc823123123'.encode('utf8')
+ self.assertFalse(repo.has_revision(revid))
+ self.local_wt.commit(message='test commit', rev_id=revid)
+ self.assertTrue(repo.has_revision(revid))
+
+ def test_find_correct_format(self):
+ """Should open a RemoteBzrDir over a RemoteTransport"""
+ fmt = BzrDirFormat.find_format(self.transport)
+ self.assertTrue(bzrdir.RemoteBzrProber
+ in controldir.ControlDirFormat._server_probers)
+ self.assertIsInstance(fmt, RemoteBzrDirFormat)
+
+ def test_open_detected_smart_format(self):
+ fmt = BzrDirFormat.find_format(self.transport)
+ d = fmt.open(self.transport)
+ self.assertIsInstance(d, BzrDir)
+
+ def test_remote_branch_repr(self):
+ b = BzrDir.open_from_transport(self.transport).open_branch()
+ self.assertStartsWith(str(b), 'RemoteBranch(')
+
+ def test_remote_bzrdir_repr(self):
+ b = BzrDir.open_from_transport(self.transport)
+ self.assertStartsWith(str(b), 'RemoteBzrDir(')
+
+ def test_remote_branch_format_supports_stacking(self):
+ t = self.transport
+ self.make_branch('unstackable', format='pack-0.92')
+ b = BzrDir.open_from_transport(t.clone('unstackable')).open_branch()
+ self.assertFalse(b._format.supports_stacking())
+ self.make_branch('stackable', format='1.9')
+ b = BzrDir.open_from_transport(t.clone('stackable')).open_branch()
+ self.assertTrue(b._format.supports_stacking())
+
+ def test_remote_repo_format_supports_external_references(self):
+ t = self.transport
+ bd = self.make_bzrdir('unstackable', format='pack-0.92')
+ r = bd.create_repository()
+ self.assertFalse(r._format.supports_external_lookups)
+ r = BzrDir.open_from_transport(t.clone('unstackable')).open_repository()
+ self.assertFalse(r._format.supports_external_lookups)
+ bd = self.make_bzrdir('stackable', format='1.9')
+ r = bd.create_repository()
+ self.assertTrue(r._format.supports_external_lookups)
+ r = BzrDir.open_from_transport(t.clone('stackable')).open_repository()
+ self.assertTrue(r._format.supports_external_lookups)
+
+ def test_remote_branch_set_append_revisions_only(self):
+ # Make a format 1.9 branch, which supports append_revisions_only
+ branch = self.make_branch('branch', format='1.9')
+ branch.set_append_revisions_only(True)
+ config = branch.get_config_stack()
+ self.assertEqual(
+ True, config.get('append_revisions_only'))
+ branch.set_append_revisions_only(False)
+ config = branch.get_config_stack()
+ self.assertEqual(
+ False, config.get('append_revisions_only'))
+
+ def test_remote_branch_set_append_revisions_only_upgrade_reqd(self):
+ branch = self.make_branch('branch', format='knit')
+ self.assertRaises(
+ errors.UpgradeRequired, branch.set_append_revisions_only, True)
+
+
+class FakeProtocol(object):
+ """Lookalike SmartClientRequestProtocolOne allowing body reading tests."""
+
+ def __init__(self, body, fake_client):
+ self.body = body
+ self._body_buffer = None
+ self._fake_client = fake_client
+
+ def read_body_bytes(self, count=-1):
+ if self._body_buffer is None:
+ self._body_buffer = StringIO(self.body)
+ bytes = self._body_buffer.read(count)
+ if self._body_buffer.tell() == len(self._body_buffer.getvalue()):
+ self._fake_client.expecting_body = False
+ return bytes
+
+ def cancel_read_body(self):
+ self._fake_client.expecting_body = False
+
+ def read_streamed_body(self):
+ return self.body
+
+
+class FakeClient(_SmartClient):
+ """Lookalike for _SmartClient allowing testing."""
+
+ def __init__(self, fake_medium_base='fake base'):
+ """Create a FakeClient."""
+ self.responses = []
+ self._calls = []
+ self.expecting_body = False
+ # if non-None, this is the list of expected calls, with only the
+ # method name and arguments included. the body might be hard to
+ # compute so is not included. If a call is None, that call can
+ # be anything.
+ self._expected_calls = None
+ _SmartClient.__init__(self, FakeMedium(self._calls, fake_medium_base))
+
+ def add_expected_call(self, call_name, call_args, response_type,
+ response_args, response_body=None):
+ if self._expected_calls is None:
+ self._expected_calls = []
+ self._expected_calls.append((call_name, call_args))
+ self.responses.append((response_type, response_args, response_body))
+
+ def add_success_response(self, *args):
+ self.responses.append(('success', args, None))
+
+ def add_success_response_with_body(self, body, *args):
+ self.responses.append(('success', args, body))
+ if self._expected_calls is not None:
+ self._expected_calls.append(None)
+
+ def add_error_response(self, *args):
+ self.responses.append(('error', args))
+
+ def add_unknown_method_response(self, verb):
+ self.responses.append(('unknown', verb))
+
+ def finished_test(self):
+ if self._expected_calls:
+ raise AssertionError("%r finished but was still expecting %r"
+ % (self, self._expected_calls[0]))
+
+ def _get_next_response(self):
+ try:
+ response_tuple = self.responses.pop(0)
+ except IndexError, e:
+ raise AssertionError("%r didn't expect any more calls"
+ % (self,))
+ if response_tuple[0] == 'unknown':
+ raise errors.UnknownSmartMethod(response_tuple[1])
+ elif response_tuple[0] == 'error':
+ raise errors.ErrorFromSmartServer(response_tuple[1])
+ return response_tuple
+
+ def _check_call(self, method, args):
+ if self._expected_calls is None:
+ # the test should be updated to say what it expects
+ return
+ try:
+ next_call = self._expected_calls.pop(0)
+ except IndexError:
+ raise AssertionError("%r didn't expect any more calls "
+ "but got %r%r"
+ % (self, method, args,))
+ if next_call is None:
+ return
+ if method != next_call[0] or args != next_call[1]:
+ raise AssertionError("%r expected %r%r "
+ "but got %r%r"
+ % (self, next_call[0], next_call[1], method, args,))
+
+ def call(self, method, *args):
+ self._check_call(method, args)
+ self._calls.append(('call', method, args))
+ return self._get_next_response()[1]
+
+ def call_expecting_body(self, method, *args):
+ self._check_call(method, args)
+ self._calls.append(('call_expecting_body', method, args))
+ result = self._get_next_response()
+ self.expecting_body = True
+ return result[1], FakeProtocol(result[2], self)
+
+ def call_with_body_bytes(self, method, args, body):
+ self._check_call(method, args)
+ self._calls.append(('call_with_body_bytes', method, args, body))
+ result = self._get_next_response()
+ return result[1], FakeProtocol(result[2], self)
+
+ def call_with_body_bytes_expecting_body(self, method, args, body):
+ self._check_call(method, args)
+ self._calls.append(('call_with_body_bytes_expecting_body', method,
+ args, body))
+ result = self._get_next_response()
+ self.expecting_body = True
+ return result[1], FakeProtocol(result[2], self)
+
+ def call_with_body_stream(self, args, stream):
+ # Explicitly consume the stream before checking for an error, because
+ # that's what happens a real medium.
+ stream = list(stream)
+ self._check_call(args[0], args[1:])
+ self._calls.append(('call_with_body_stream', args[0], args[1:], stream))
+ result = self._get_next_response()
+ # The second value returned from call_with_body_stream is supposed to
+ # be a response_handler object, but so far no tests depend on that.
+ response_handler = None
+ return result[1], response_handler
+
+
+class FakeMedium(medium.SmartClientMedium):
+
+ def __init__(self, client_calls, base):
+ medium.SmartClientMedium.__init__(self, base)
+ self._client_calls = client_calls
+
+ def disconnect(self):
+ self._client_calls.append(('disconnect medium',))
+
+
+class TestVfsHas(tests.TestCase):
+
+ def test_unicode_path(self):
+ client = FakeClient('/')
+ client.add_success_response('yes',)
+ transport = RemoteTransport('bzr://localhost/', _client=client)
+ filename = u'/hell\u00d8'.encode('utf8')
+ result = transport.has(filename)
+ self.assertEqual(
+ [('call', 'has', (filename,))],
+ client._calls)
+ self.assertTrue(result)
+
+
+class TestRemote(tests.TestCaseWithMemoryTransport):
+
+ def get_branch_format(self):
+ reference_bzrdir_format = controldir.format_registry.get('default')()
+ return reference_bzrdir_format.get_branch_format()
+
+ def get_repo_format(self):
+ reference_bzrdir_format = controldir.format_registry.get('default')()
+ return reference_bzrdir_format.repository_format
+
+ def assertFinished(self, fake_client):
+ """Assert that all of a FakeClient's expected calls have occurred."""
+ fake_client.finished_test()
+
+
+class Test_ClientMedium_remote_path_from_transport(tests.TestCase):
+ """Tests for the behaviour of client_medium.remote_path_from_transport."""
+
+ def assertRemotePath(self, expected, client_base, transport_base):
+ """Assert that the result of
+ SmartClientMedium.remote_path_from_transport is the expected value for
+ a given client_base and transport_base.
+ """
+ client_medium = medium.SmartClientMedium(client_base)
+ t = transport.get_transport(transport_base)
+ result = client_medium.remote_path_from_transport(t)
+ self.assertEqual(expected, result)
+
+ def test_remote_path_from_transport(self):
+ """SmartClientMedium.remote_path_from_transport calculates a URL for
+ the given transport relative to the root of the client base URL.
+ """
+ self.assertRemotePath('xyz/', 'bzr://host/path', 'bzr://host/xyz')
+ self.assertRemotePath(
+ 'path/xyz/', 'bzr://host/path', 'bzr://host/path/xyz')
+
+ def assertRemotePathHTTP(self, expected, transport_base, relpath):
+ """Assert that the result of
+ HttpTransportBase.remote_path_from_transport is the expected value for
+ a given transport_base and relpath of that transport. (Note that
+ HttpTransportBase is a subclass of SmartClientMedium)
+ """
+ base_transport = transport.get_transport(transport_base)
+ client_medium = base_transport.get_smart_medium()
+ cloned_transport = base_transport.clone(relpath)
+ result = client_medium.remote_path_from_transport(cloned_transport)
+ self.assertEqual(expected, result)
+
+ def test_remote_path_from_transport_http(self):
+ """Remote paths for HTTP transports are calculated differently to other
+ transports. They are just relative to the client base, not the root
+ directory of the host.
+ """
+ for scheme in ['http:', 'https:', 'bzr+http:', 'bzr+https:']:
+ self.assertRemotePathHTTP(
+ '../xyz/', scheme + '//host/path', '../xyz/')
+ self.assertRemotePathHTTP(
+ 'xyz/', scheme + '//host/path', 'xyz/')
+
+
+class Test_ClientMedium_remote_is_at_least(tests.TestCase):
+ """Tests for the behaviour of client_medium.remote_is_at_least."""
+
+ def test_initially_unlimited(self):
+ """A fresh medium assumes that the remote side supports all
+ versions.
+ """
+ client_medium = medium.SmartClientMedium('dummy base')
+ self.assertFalse(client_medium._is_remote_before((99, 99)))
+
+ def test__remember_remote_is_before(self):
+ """Calling _remember_remote_is_before ratchets down the known remote
+ version.
+ """
+ client_medium = medium.SmartClientMedium('dummy base')
+ # Mark the remote side as being less than 1.6. The remote side may
+ # still be 1.5.
+ client_medium._remember_remote_is_before((1, 6))
+ self.assertTrue(client_medium._is_remote_before((1, 6)))
+ self.assertFalse(client_medium._is_remote_before((1, 5)))
+ # Calling _remember_remote_is_before again with a lower value works.
+ client_medium._remember_remote_is_before((1, 5))
+ self.assertTrue(client_medium._is_remote_before((1, 5)))
+ # If you call _remember_remote_is_before with a higher value it logs a
+ # warning, and continues to remember the lower value.
+ self.assertNotContainsRe(self.get_log(), '_remember_remote_is_before')
+ client_medium._remember_remote_is_before((1, 9))
+ self.assertContainsRe(self.get_log(), '_remember_remote_is_before')
+ self.assertTrue(client_medium._is_remote_before((1, 5)))
+
+
+class TestBzrDirCloningMetaDir(TestRemote):
+
+ def test_backwards_compat(self):
+ self.setup_smart_server_with_call_log()
+ a_dir = self.make_bzrdir('.')
+ self.reset_smart_call_log()
+ verb = 'BzrDir.cloning_metadir'
+ self.disable_verb(verb)
+ format = a_dir.cloning_metadir()
+ call_count = len([call for call in self.hpss_calls if
+ call.call.method == verb])
+ self.assertEqual(1, call_count)
+
+ def test_branch_reference(self):
+ transport = self.get_transport('quack')
+ referenced = self.make_branch('referenced')
+ expected = referenced.bzrdir.cloning_metadir()
+ client = FakeClient(transport.base)
+ client.add_expected_call(
+ 'BzrDir.cloning_metadir', ('quack/', 'False'),
+ 'error', ('BranchReference',)),
+ client.add_expected_call(
+ 'BzrDir.open_branchV3', ('quack/',),
+ 'success', ('ref', self.get_url('referenced'))),
+ a_bzrdir = RemoteBzrDir(transport, RemoteBzrDirFormat(),
+ _client=client)
+ result = a_bzrdir.cloning_metadir()
+ # We should have got a control dir matching the referenced branch.
+ self.assertEqual(bzrdir.BzrDirMetaFormat1, type(result))
+ self.assertEqual(expected._repository_format, result._repository_format)
+ self.assertEqual(expected._branch_format, result._branch_format)
+ self.assertFinished(client)
+
+ def test_current_server(self):
+ transport = self.get_transport('.')
+ transport = transport.clone('quack')
+ self.make_bzrdir('quack')
+ client = FakeClient(transport.base)
+ reference_bzrdir_format = controldir.format_registry.get('default')()
+ control_name = reference_bzrdir_format.network_name()
+ client.add_expected_call(
+ 'BzrDir.cloning_metadir', ('quack/', 'False'),
+ 'success', (control_name, '', ('branch', ''))),
+ a_bzrdir = RemoteBzrDir(transport, RemoteBzrDirFormat(),
+ _client=client)
+ result = a_bzrdir.cloning_metadir()
+ # We should have got a reference control dir with default branch and
+ # repository formats.
+ # This pokes a little, just to be sure.
+ self.assertEqual(bzrdir.BzrDirMetaFormat1, type(result))
+ self.assertEqual(None, result._repository_format)
+ self.assertEqual(None, result._branch_format)
+ self.assertFinished(client)
+
+ def test_unknown(self):
+ transport = self.get_transport('quack')
+ referenced = self.make_branch('referenced')
+ expected = referenced.bzrdir.cloning_metadir()
+ client = FakeClient(transport.base)
+ client.add_expected_call(
+ 'BzrDir.cloning_metadir', ('quack/', 'False'),
+ 'success', ('unknown', 'unknown', ('branch', ''))),
+ a_bzrdir = RemoteBzrDir(transport, RemoteBzrDirFormat(),
+ _client=client)
+ self.assertRaises(errors.UnknownFormatError, a_bzrdir.cloning_metadir)
+
+
+class TestBzrDirCheckoutMetaDir(TestRemote):
+
+ def test__get_checkout_format(self):
+ transport = MemoryTransport()
+ client = FakeClient(transport.base)
+ reference_bzrdir_format = controldir.format_registry.get('default')()
+ control_name = reference_bzrdir_format.network_name()
+ client.add_expected_call(
+ 'BzrDir.checkout_metadir', ('quack/', ),
+ 'success', (control_name, '', ''))
+ transport.mkdir('quack')
+ transport = transport.clone('quack')
+ a_bzrdir = RemoteBzrDir(transport, RemoteBzrDirFormat(),
+ _client=client)
+ result = a_bzrdir.checkout_metadir()
+ # We should have got a reference control dir with default branch and
+ # repository formats.
+ self.assertEqual(bzrdir.BzrDirMetaFormat1, type(result))
+ self.assertEqual(None, result._repository_format)
+ self.assertEqual(None, result._branch_format)
+ self.assertFinished(client)
+
+ def test_unknown_format(self):
+ transport = MemoryTransport()
+ client = FakeClient(transport.base)
+ client.add_expected_call(
+ 'BzrDir.checkout_metadir', ('quack/',),
+ 'success', ('dontknow', '', ''))
+ transport.mkdir('quack')
+ transport = transport.clone('quack')
+ a_bzrdir = RemoteBzrDir(transport, RemoteBzrDirFormat(),
+ _client=client)
+ self.assertRaises(errors.UnknownFormatError,
+ a_bzrdir.checkout_metadir)
+ self.assertFinished(client)
+
+
+class TestBzrDirGetBranches(TestRemote):
+
+ def test_get_branches(self):
+ transport = MemoryTransport()
+ client = FakeClient(transport.base)
+ reference_bzrdir_format = controldir.format_registry.get('default')()
+ branch_name = reference_bzrdir_format.get_branch_format().network_name()
+ client.add_success_response_with_body(
+ bencode.bencode({
+ "foo": ("branch", branch_name),
+ "": ("branch", branch_name)}), "success")
+ client.add_success_response(
+ 'ok', '', 'no', 'no', 'no',
+ reference_bzrdir_format.repository_format.network_name())
+ client.add_error_response('NotStacked')
+ client.add_success_response(
+ 'ok', '', 'no', 'no', 'no',
+ reference_bzrdir_format.repository_format.network_name())
+ client.add_error_response('NotStacked')
+ transport.mkdir('quack')
+ transport = transport.clone('quack')
+ a_bzrdir = RemoteBzrDir(transport, RemoteBzrDirFormat(),
+ _client=client)
+ result = a_bzrdir.get_branches()
+ self.assertEquals(set(["", "foo"]), set(result.keys()))
+ self.assertEqual(
+ [('call_expecting_body', 'BzrDir.get_branches', ('quack/',)),
+ ('call', 'BzrDir.find_repositoryV3', ('quack/', )),
+ ('call', 'Branch.get_stacked_on_url', ('quack/', )),
+ ('call', 'BzrDir.find_repositoryV3', ('quack/', )),
+ ('call', 'Branch.get_stacked_on_url', ('quack/', ))],
+ client._calls)
+
+
+class TestBzrDirDestroyBranch(TestRemote):
+
+ def test_destroy_default(self):
+ transport = self.get_transport('quack')
+ referenced = self.make_branch('referenced')
+ client = FakeClient(transport.base)
+ client.add_expected_call(
+ 'BzrDir.destroy_branch', ('quack/', ),
+ 'success', ('ok',)),
+ a_bzrdir = RemoteBzrDir(transport, RemoteBzrDirFormat(),
+ _client=client)
+ a_bzrdir.destroy_branch()
+ self.assertFinished(client)
+
+
+class TestBzrDirHasWorkingTree(TestRemote):
+
+ def test_has_workingtree(self):
+ transport = self.get_transport('quack')
+ client = FakeClient(transport.base)
+ client.add_expected_call(
+ 'BzrDir.has_workingtree', ('quack/',),
+ 'success', ('yes',)),
+ a_bzrdir = RemoteBzrDir(transport, RemoteBzrDirFormat(),
+ _client=client)
+ self.assertTrue(a_bzrdir.has_workingtree())
+ self.assertFinished(client)
+
+ def test_no_workingtree(self):
+ transport = self.get_transport('quack')
+ client = FakeClient(transport.base)
+ client.add_expected_call(
+ 'BzrDir.has_workingtree', ('quack/',),
+ 'success', ('no',)),
+ a_bzrdir = RemoteBzrDir(transport, RemoteBzrDirFormat(),
+ _client=client)
+ self.assertFalse(a_bzrdir.has_workingtree())
+ self.assertFinished(client)
+
+
+class TestBzrDirDestroyRepository(TestRemote):
+
+ def test_destroy_repository(self):
+ transport = self.get_transport('quack')
+ client = FakeClient(transport.base)
+ client.add_expected_call(
+ 'BzrDir.destroy_repository', ('quack/',),
+ 'success', ('ok',)),
+ a_bzrdir = RemoteBzrDir(transport, RemoteBzrDirFormat(),
+ _client=client)
+ a_bzrdir.destroy_repository()
+ self.assertFinished(client)
+
+
+class TestBzrDirOpen(TestRemote):
+
+ def make_fake_client_and_transport(self, path='quack'):
+ transport = MemoryTransport()
+ transport.mkdir(path)
+ transport = transport.clone(path)
+ client = FakeClient(transport.base)
+ return client, transport
+
+ def test_absent(self):
+ client, transport = self.make_fake_client_and_transport()
+ client.add_expected_call(
+ 'BzrDir.open_2.1', ('quack/',), 'success', ('no',))
+ self.assertRaises(errors.NotBranchError, RemoteBzrDir, transport,
+ RemoteBzrDirFormat(), _client=client, _force_probe=True)
+ self.assertFinished(client)
+
+ def test_present_without_workingtree(self):
+ client, transport = self.make_fake_client_and_transport()
+ client.add_expected_call(
+ 'BzrDir.open_2.1', ('quack/',), 'success', ('yes', 'no'))
+ bd = RemoteBzrDir(transport, RemoteBzrDirFormat(),
+ _client=client, _force_probe=True)
+ self.assertIsInstance(bd, RemoteBzrDir)
+ self.assertFalse(bd.has_workingtree())
+ self.assertRaises(errors.NoWorkingTree, bd.open_workingtree)
+ self.assertFinished(client)
+
+ def test_present_with_workingtree(self):
+ client, transport = self.make_fake_client_and_transport()
+ client.add_expected_call(
+ 'BzrDir.open_2.1', ('quack/',), 'success', ('yes', 'yes'))
+ bd = RemoteBzrDir(transport, RemoteBzrDirFormat(),
+ _client=client, _force_probe=True)
+ self.assertIsInstance(bd, RemoteBzrDir)
+ self.assertTrue(bd.has_workingtree())
+ self.assertRaises(errors.NotLocalUrl, bd.open_workingtree)
+ self.assertFinished(client)
+
+ def test_backwards_compat(self):
+ client, transport = self.make_fake_client_and_transport()
+ client.add_expected_call(
+ 'BzrDir.open_2.1', ('quack/',), 'unknown', ('BzrDir.open_2.1',))
+ client.add_expected_call(
+ 'BzrDir.open', ('quack/',), 'success', ('yes',))
+ bd = RemoteBzrDir(transport, RemoteBzrDirFormat(),
+ _client=client, _force_probe=True)
+ self.assertIsInstance(bd, RemoteBzrDir)
+ self.assertFinished(client)
+
+ def test_backwards_compat_hpss_v2(self):
+ client, transport = self.make_fake_client_and_transport()
+ # Monkey-patch fake client to simulate real-world behaviour with v2
+ # server: upon first RPC call detect the protocol version, and because
+ # the version is 2 also do _remember_remote_is_before((1, 6)) before
+ # continuing with the RPC.
+ orig_check_call = client._check_call
+ def check_call(method, args):
+ client._medium._protocol_version = 2
+ client._medium._remember_remote_is_before((1, 6))
+ client._check_call = orig_check_call
+ client._check_call(method, args)
+ client._check_call = check_call
+ client.add_expected_call(
+ 'BzrDir.open_2.1', ('quack/',), 'unknown', ('BzrDir.open_2.1',))
+ client.add_expected_call(
+ 'BzrDir.open', ('quack/',), 'success', ('yes',))
+ bd = RemoteBzrDir(transport, RemoteBzrDirFormat(),
+ _client=client, _force_probe=True)
+ self.assertIsInstance(bd, RemoteBzrDir)
+ self.assertFinished(client)
+
+
+class TestBzrDirOpenBranch(TestRemote):
+
+ def test_backwards_compat(self):
+ self.setup_smart_server_with_call_log()
+ self.make_branch('.')
+ a_dir = BzrDir.open(self.get_url('.'))
+ self.reset_smart_call_log()
+ verb = 'BzrDir.open_branchV3'
+ self.disable_verb(verb)
+ format = a_dir.open_branch()
+ call_count = len([call for call in self.hpss_calls if
+ call.call.method == verb])
+ self.assertEqual(1, call_count)
+
+ def test_branch_present(self):
+ reference_format = self.get_repo_format()
+ network_name = reference_format.network_name()
+ branch_network_name = self.get_branch_format().network_name()
+ transport = MemoryTransport()
+ transport.mkdir('quack')
+ transport = transport.clone('quack')
+ client = FakeClient(transport.base)
+ client.add_expected_call(
+ 'BzrDir.open_branchV3', ('quack/',),
+ 'success', ('branch', branch_network_name))
+ client.add_expected_call(
+ 'BzrDir.find_repositoryV3', ('quack/',),
+ 'success', ('ok', '', 'no', 'no', 'no', network_name))
+ client.add_expected_call(
+ 'Branch.get_stacked_on_url', ('quack/',),
+ 'error', ('NotStacked',))
+ bzrdir = RemoteBzrDir(transport, RemoteBzrDirFormat(),
+ _client=client)
+ result = bzrdir.open_branch()
+ self.assertIsInstance(result, RemoteBranch)
+ self.assertEqual(bzrdir, result.bzrdir)
+ self.assertFinished(client)
+
+ def test_branch_missing(self):
+ transport = MemoryTransport()
+ transport.mkdir('quack')
+ transport = transport.clone('quack')
+ client = FakeClient(transport.base)
+ client.add_error_response('nobranch')
+ bzrdir = RemoteBzrDir(transport, RemoteBzrDirFormat(),
+ _client=client)
+ self.assertRaises(errors.NotBranchError, bzrdir.open_branch)
+ self.assertEqual(
+ [('call', 'BzrDir.open_branchV3', ('quack/',))],
+ client._calls)
+
+ def test__get_tree_branch(self):
+ # _get_tree_branch is a form of open_branch, but it should only ask for
+ # branch opening, not any other network requests.
+ calls = []
+ def open_branch(name=None, possible_transports=None):
+ calls.append("Called")
+ return "a-branch"
+ transport = MemoryTransport()
+ # no requests on the network - catches other api calls being made.
+ client = FakeClient(transport.base)
+ bzrdir = RemoteBzrDir(transport, RemoteBzrDirFormat(),
+ _client=client)
+ # patch the open_branch call to record that it was called.
+ bzrdir.open_branch = open_branch
+ self.assertEqual((None, "a-branch"), bzrdir._get_tree_branch())
+ self.assertEqual(["Called"], calls)
+ self.assertEqual([], client._calls)
+
+ def test_url_quoting_of_path(self):
+ # Relpaths on the wire should not be URL-escaped. So "~" should be
+ # transmitted as "~", not "%7E".
+ transport = RemoteTCPTransport('bzr://localhost/~hello/')
+ client = FakeClient(transport.base)
+ reference_format = self.get_repo_format()
+ network_name = reference_format.network_name()
+ branch_network_name = self.get_branch_format().network_name()
+ client.add_expected_call(
+ 'BzrDir.open_branchV3', ('~hello/',),
+ 'success', ('branch', branch_network_name))
+ client.add_expected_call(
+ 'BzrDir.find_repositoryV3', ('~hello/',),
+ 'success', ('ok', '', 'no', 'no', 'no', network_name))
+ client.add_expected_call(
+ 'Branch.get_stacked_on_url', ('~hello/',),
+ 'error', ('NotStacked',))
+ bzrdir = RemoteBzrDir(transport, RemoteBzrDirFormat(),
+ _client=client)
+ result = bzrdir.open_branch()
+ self.assertFinished(client)
+
+ def check_open_repository(self, rich_root, subtrees, external_lookup='no'):
+ reference_format = self.get_repo_format()
+ network_name = reference_format.network_name()
+ transport = MemoryTransport()
+ transport.mkdir('quack')
+ transport = transport.clone('quack')
+ if rich_root:
+ rich_response = 'yes'
+ else:
+ rich_response = 'no'
+ if subtrees:
+ subtree_response = 'yes'
+ else:
+ subtree_response = 'no'
+ client = FakeClient(transport.base)
+ client.add_success_response(
+ 'ok', '', rich_response, subtree_response, external_lookup,
+ network_name)
+ bzrdir = RemoteBzrDir(transport, RemoteBzrDirFormat(),
+ _client=client)
+ result = bzrdir.open_repository()
+ self.assertEqual(
+ [('call', 'BzrDir.find_repositoryV3', ('quack/',))],
+ client._calls)
+ self.assertIsInstance(result, RemoteRepository)
+ self.assertEqual(bzrdir, result.bzrdir)
+ self.assertEqual(rich_root, result._format.rich_root_data)
+ self.assertEqual(subtrees, result._format.supports_tree_reference)
+
+ def test_open_repository_sets_format_attributes(self):
+ self.check_open_repository(True, True)
+ self.check_open_repository(False, True)
+ self.check_open_repository(True, False)
+ self.check_open_repository(False, False)
+ self.check_open_repository(False, False, 'yes')
+
+ def test_old_server(self):
+ """RemoteBzrDirFormat should fail to probe if the server version is too
+ old.
+ """
+ self.assertRaises(errors.NotBranchError,
+ RemoteBzrProber.probe_transport, OldServerTransport())
+
+
+class TestBzrDirCreateBranch(TestRemote):
+
+ def test_backwards_compat(self):
+ self.setup_smart_server_with_call_log()
+ repo = self.make_repository('.')
+ self.reset_smart_call_log()
+ self.disable_verb('BzrDir.create_branch')
+ branch = repo.bzrdir.create_branch()
+ create_branch_call_count = len([call for call in self.hpss_calls if
+ call.call.method == 'BzrDir.create_branch'])
+ self.assertEqual(1, create_branch_call_count)
+
+ def test_current_server(self):
+ transport = self.get_transport('.')
+ transport = transport.clone('quack')
+ self.make_repository('quack')
+ client = FakeClient(transport.base)
+ reference_bzrdir_format = controldir.format_registry.get('default')()
+ reference_format = reference_bzrdir_format.get_branch_format()
+ network_name = reference_format.network_name()
+ reference_repo_fmt = reference_bzrdir_format.repository_format
+ reference_repo_name = reference_repo_fmt.network_name()
+ client.add_expected_call(
+ 'BzrDir.create_branch', ('quack/', network_name),
+ 'success', ('ok', network_name, '', 'no', 'no', 'yes',
+ reference_repo_name))
+ a_bzrdir = RemoteBzrDir(transport, RemoteBzrDirFormat(),
+ _client=client)
+ branch = a_bzrdir.create_branch()
+ # We should have got a remote branch
+ self.assertIsInstance(branch, remote.RemoteBranch)
+ # its format should have the settings from the response
+ format = branch._format
+ self.assertEqual(network_name, format.network_name())
+
+ def test_already_open_repo_and_reused_medium(self):
+ """Bug 726584: create_branch(..., repository=repo) should work
+ regardless of what the smart medium's base URL is.
+ """
+ self.transport_server = test_server.SmartTCPServer_for_testing
+ transport = self.get_transport('.')
+ repo = self.make_repository('quack')
+ # Client's medium rooted a transport root (not at the bzrdir)
+ client = FakeClient(transport.base)
+ transport = transport.clone('quack')
+ reference_bzrdir_format = controldir.format_registry.get('default')()
+ reference_format = reference_bzrdir_format.get_branch_format()
+ network_name = reference_format.network_name()
+ reference_repo_fmt = reference_bzrdir_format.repository_format
+ reference_repo_name = reference_repo_fmt.network_name()
+ client.add_expected_call(
+ 'BzrDir.create_branch', ('extra/quack/', network_name),
+ 'success', ('ok', network_name, '', 'no', 'no', 'yes',
+ reference_repo_name))
+ a_bzrdir = RemoteBzrDir(transport, RemoteBzrDirFormat(),
+ _client=client)
+ branch = a_bzrdir.create_branch(repository=repo)
+ # We should have got a remote branch
+ self.assertIsInstance(branch, remote.RemoteBranch)
+ # its format should have the settings from the response
+ format = branch._format
+ self.assertEqual(network_name, format.network_name())
+
+
+class TestBzrDirCreateRepository(TestRemote):
+
+ def test_backwards_compat(self):
+ self.setup_smart_server_with_call_log()
+ bzrdir = self.make_bzrdir('.')
+ self.reset_smart_call_log()
+ self.disable_verb('BzrDir.create_repository')
+ repo = bzrdir.create_repository()
+ create_repo_call_count = len([call for call in self.hpss_calls if
+ call.call.method == 'BzrDir.create_repository'])
+ self.assertEqual(1, create_repo_call_count)
+
+ def test_current_server(self):
+ transport = self.get_transport('.')
+ transport = transport.clone('quack')
+ self.make_bzrdir('quack')
+ client = FakeClient(transport.base)
+ reference_bzrdir_format = controldir.format_registry.get('default')()
+ reference_format = reference_bzrdir_format.repository_format
+ network_name = reference_format.network_name()
+ client.add_expected_call(
+ 'BzrDir.create_repository', ('quack/',
+ 'Bazaar repository format 2a (needs bzr 1.16 or later)\n',
+ 'False'),
+ 'success', ('ok', 'yes', 'yes', 'yes', network_name))
+ a_bzrdir = RemoteBzrDir(transport, RemoteBzrDirFormat(),
+ _client=client)
+ repo = a_bzrdir.create_repository()
+ # We should have got a remote repository
+ self.assertIsInstance(repo, remote.RemoteRepository)
+ # its format should have the settings from the response
+ format = repo._format
+ self.assertTrue(format.rich_root_data)
+ self.assertTrue(format.supports_tree_reference)
+ self.assertTrue(format.supports_external_lookups)
+ self.assertEqual(network_name, format.network_name())
+
+
+class TestBzrDirOpenRepository(TestRemote):
+
+ def test_backwards_compat_1_2_3(self):
+ # fallback all the way to the first version.
+ reference_format = self.get_repo_format()
+ network_name = reference_format.network_name()
+ server_url = 'bzr://example.com/'
+ self.permit_url(server_url)
+ client = FakeClient(server_url)
+ client.add_unknown_method_response('BzrDir.find_repositoryV3')
+ client.add_unknown_method_response('BzrDir.find_repositoryV2')
+ client.add_success_response('ok', '', 'no', 'no')
+ # A real repository instance will be created to determine the network
+ # name.
+ client.add_success_response_with_body(
+ "Bazaar-NG meta directory, format 1\n", 'ok')
+ client.add_success_response('stat', '0', '65535')
+ client.add_success_response_with_body(
+ reference_format.get_format_string(), 'ok')
+ # PackRepository wants to do a stat
+ client.add_success_response('stat', '0', '65535')
+ remote_transport = RemoteTransport(server_url + 'quack/', medium=False,
+ _client=client)
+ bzrdir = RemoteBzrDir(remote_transport, RemoteBzrDirFormat(),
+ _client=client)
+ repo = bzrdir.open_repository()
+ self.assertEqual(
+ [('call', 'BzrDir.find_repositoryV3', ('quack/',)),
+ ('call', 'BzrDir.find_repositoryV2', ('quack/',)),
+ ('call', 'BzrDir.find_repository', ('quack/',)),
+ ('call_expecting_body', 'get', ('/quack/.bzr/branch-format',)),
+ ('call', 'stat', ('/quack/.bzr',)),
+ ('call_expecting_body', 'get', ('/quack/.bzr/repository/format',)),
+ ('call', 'stat', ('/quack/.bzr/repository',)),
+ ],
+ client._calls)
+ self.assertEqual(network_name, repo._format.network_name())
+
+ def test_backwards_compat_2(self):
+ # fallback to find_repositoryV2
+ reference_format = self.get_repo_format()
+ network_name = reference_format.network_name()
+ server_url = 'bzr://example.com/'
+ self.permit_url(server_url)
+ client = FakeClient(server_url)
+ client.add_unknown_method_response('BzrDir.find_repositoryV3')
+ client.add_success_response('ok', '', 'no', 'no', 'no')
+ # A real repository instance will be created to determine the network
+ # name.
+ client.add_success_response_with_body(
+ "Bazaar-NG meta directory, format 1\n", 'ok')
+ client.add_success_response('stat', '0', '65535')
+ client.add_success_response_with_body(
+ reference_format.get_format_string(), 'ok')
+ # PackRepository wants to do a stat
+ client.add_success_response('stat', '0', '65535')
+ remote_transport = RemoteTransport(server_url + 'quack/', medium=False,
+ _client=client)
+ bzrdir = RemoteBzrDir(remote_transport, RemoteBzrDirFormat(),
+ _client=client)
+ repo = bzrdir.open_repository()
+ self.assertEqual(
+ [('call', 'BzrDir.find_repositoryV3', ('quack/',)),
+ ('call', 'BzrDir.find_repositoryV2', ('quack/',)),
+ ('call_expecting_body', 'get', ('/quack/.bzr/branch-format',)),
+ ('call', 'stat', ('/quack/.bzr',)),
+ ('call_expecting_body', 'get', ('/quack/.bzr/repository/format',)),
+ ('call', 'stat', ('/quack/.bzr/repository',)),
+ ],
+ client._calls)
+ self.assertEqual(network_name, repo._format.network_name())
+
+ def test_current_server(self):
+ reference_format = self.get_repo_format()
+ network_name = reference_format.network_name()
+ transport = MemoryTransport()
+ transport.mkdir('quack')
+ transport = transport.clone('quack')
+ client = FakeClient(transport.base)
+ client.add_success_response('ok', '', 'no', 'no', 'no', network_name)
+ bzrdir = RemoteBzrDir(transport, RemoteBzrDirFormat(),
+ _client=client)
+ repo = bzrdir.open_repository()
+ self.assertEqual(
+ [('call', 'BzrDir.find_repositoryV3', ('quack/',))],
+ client._calls)
+ self.assertEqual(network_name, repo._format.network_name())
+
+
+class TestBzrDirFormatInitializeEx(TestRemote):
+
+ def test_success(self):
+ """Simple test for typical successful call."""
+ fmt = RemoteBzrDirFormat()
+ default_format_name = BzrDirFormat.get_default_format().network_name()
+ transport = self.get_transport()
+ client = FakeClient(transport.base)
+ client.add_expected_call(
+ 'BzrDirFormat.initialize_ex_1.16',
+ (default_format_name, 'path', 'False', 'False', 'False', '',
+ '', '', '', 'False'),
+ 'success',
+ ('.', 'no', 'no', 'yes', 'repo fmt', 'repo bzrdir fmt',
+ 'bzrdir fmt', 'False', '', '', 'repo lock token'))
+ # XXX: It would be better to call fmt.initialize_on_transport_ex, but
+ # it's currently hard to test that without supplying a real remote
+ # transport connected to a real server.
+ result = fmt._initialize_on_transport_ex_rpc(client, 'path',
+ transport, False, False, False, None, None, None, None, False)
+ self.assertFinished(client)
+
+ def test_error(self):
+ """Error responses are translated, e.g. 'PermissionDenied' raises the
+ corresponding error from the client.
+ """
+ fmt = RemoteBzrDirFormat()
+ default_format_name = BzrDirFormat.get_default_format().network_name()
+ transport = self.get_transport()
+ client = FakeClient(transport.base)
+ client.add_expected_call(
+ 'BzrDirFormat.initialize_ex_1.16',
+ (default_format_name, 'path', 'False', 'False', 'False', '',
+ '', '', '', 'False'),
+ 'error',
+ ('PermissionDenied', 'path', 'extra info'))
+ # XXX: It would be better to call fmt.initialize_on_transport_ex, but
+ # it's currently hard to test that without supplying a real remote
+ # transport connected to a real server.
+ err = self.assertRaises(errors.PermissionDenied,
+ fmt._initialize_on_transport_ex_rpc, client, 'path', transport,
+ False, False, False, None, None, None, None, False)
+ self.assertEqual('path', err.path)
+ self.assertEqual(': extra info', err.extra)
+ self.assertFinished(client)
+
+ def test_error_from_real_server(self):
+ """Integration test for error translation."""
+ transport = self.make_smart_server('foo')
+ transport = transport.clone('no-such-path')
+ fmt = RemoteBzrDirFormat()
+ err = self.assertRaises(errors.NoSuchFile,
+ fmt.initialize_on_transport_ex, transport, create_prefix=False)
+
+
+class OldSmartClient(object):
+ """A fake smart client for test_old_version that just returns a version one
+ response to the 'hello' (query version) command.
+ """
+
+ def get_request(self):
+ input_file = StringIO('ok\x011\n')
+ output_file = StringIO()
+ client_medium = medium.SmartSimplePipesClientMedium(
+ input_file, output_file)
+ return medium.SmartClientStreamMediumRequest(client_medium)
+
+ def protocol_version(self):
+ return 1
+
+
+class OldServerTransport(object):
+ """A fake transport for test_old_server that reports it's smart server
+ protocol version as version one.
+ """
+
+ def __init__(self):
+ self.base = 'fake:'
+
+ def get_smart_client(self):
+ return OldSmartClient()
+
+
+class RemoteBzrDirTestCase(TestRemote):
+
+ def make_remote_bzrdir(self, transport, client):
+ """Make a RemotebzrDir using 'client' as the _client."""
+ return RemoteBzrDir(transport, RemoteBzrDirFormat(),
+ _client=client)
+
+
+class RemoteBranchTestCase(RemoteBzrDirTestCase):
+
+ def lock_remote_branch(self, branch):
+ """Trick a RemoteBranch into thinking it is locked."""
+ branch._lock_mode = 'w'
+ branch._lock_count = 2
+ branch._lock_token = 'branch token'
+ branch._repo_lock_token = 'repo token'
+ branch.repository._lock_mode = 'w'
+ branch.repository._lock_count = 2
+ branch.repository._lock_token = 'repo token'
+
+ def make_remote_branch(self, transport, client):
+ """Make a RemoteBranch using 'client' as its _SmartClient.
+
+ A RemoteBzrDir and RemoteRepository will also be created to fill out
+ the RemoteBranch, albeit with stub values for some of their attributes.
+ """
+ # we do not want bzrdir to make any remote calls, so use False as its
+ # _client. If it tries to make a remote call, this will fail
+ # immediately.
+ bzrdir = self.make_remote_bzrdir(transport, False)
+ repo = RemoteRepository(bzrdir, None, _client=client)
+ branch_format = self.get_branch_format()
+ format = RemoteBranchFormat(network_name=branch_format.network_name())
+ return RemoteBranch(bzrdir, repo, _client=client, format=format)
+
+
+class TestBranchBreakLock(RemoteBranchTestCase):
+
+ def test_break_lock(self):
+ transport_path = 'quack'
+ transport = MemoryTransport()
+ client = FakeClient(transport.base)
+ client.add_expected_call(
+ 'Branch.get_stacked_on_url', ('quack/',),
+ 'error', ('NotStacked',))
+ client.add_expected_call(
+ 'Branch.break_lock', ('quack/',),
+ 'success', ('ok',))
+ transport.mkdir('quack')
+ transport = transport.clone('quack')
+ branch = self.make_remote_branch(transport, client)
+ branch.break_lock()
+ self.assertFinished(client)
+
+
+class TestBranchGetPhysicalLockStatus(RemoteBranchTestCase):
+
+ def test_get_physical_lock_status_yes(self):
+ transport = MemoryTransport()
+ client = FakeClient(transport.base)
+ client.add_expected_call(
+ 'Branch.get_stacked_on_url', ('quack/',),
+ 'error', ('NotStacked',))
+ client.add_expected_call(
+ 'Branch.get_physical_lock_status', ('quack/',),
+ 'success', ('yes',))
+ transport.mkdir('quack')
+ transport = transport.clone('quack')
+ branch = self.make_remote_branch(transport, client)
+ result = branch.get_physical_lock_status()
+ self.assertFinished(client)
+ self.assertEqual(True, result)
+
+ def test_get_physical_lock_status_no(self):
+ transport = MemoryTransport()
+ client = FakeClient(transport.base)
+ client.add_expected_call(
+ 'Branch.get_stacked_on_url', ('quack/',),
+ 'error', ('NotStacked',))
+ client.add_expected_call(
+ 'Branch.get_physical_lock_status', ('quack/',),
+ 'success', ('no',))
+ transport.mkdir('quack')
+ transport = transport.clone('quack')
+ branch = self.make_remote_branch(transport, client)
+ result = branch.get_physical_lock_status()
+ self.assertFinished(client)
+ self.assertEqual(False, result)
+
+
+class TestBranchGetParent(RemoteBranchTestCase):
+
+ def test_no_parent(self):
+ # in an empty branch we decode the response properly
+ transport = MemoryTransport()
+ client = FakeClient(transport.base)
+ client.add_expected_call(
+ 'Branch.get_stacked_on_url', ('quack/',),
+ 'error', ('NotStacked',))
+ client.add_expected_call(
+ 'Branch.get_parent', ('quack/',),
+ 'success', ('',))
+ transport.mkdir('quack')
+ transport = transport.clone('quack')
+ branch = self.make_remote_branch(transport, client)
+ result = branch.get_parent()
+ self.assertFinished(client)
+ self.assertEqual(None, result)
+
+ def test_parent_relative(self):
+ transport = MemoryTransport()
+ client = FakeClient(transport.base)
+ client.add_expected_call(
+ 'Branch.get_stacked_on_url', ('kwaak/',),
+ 'error', ('NotStacked',))
+ client.add_expected_call(
+ 'Branch.get_parent', ('kwaak/',),
+ 'success', ('../foo/',))
+ transport.mkdir('kwaak')
+ transport = transport.clone('kwaak')
+ branch = self.make_remote_branch(transport, client)
+ result = branch.get_parent()
+ self.assertEqual(transport.clone('../foo').base, result)
+
+ def test_parent_absolute(self):
+ transport = MemoryTransport()
+ client = FakeClient(transport.base)
+ client.add_expected_call(
+ 'Branch.get_stacked_on_url', ('kwaak/',),
+ 'error', ('NotStacked',))
+ client.add_expected_call(
+ 'Branch.get_parent', ('kwaak/',),
+ 'success', ('http://foo/',))
+ transport.mkdir('kwaak')
+ transport = transport.clone('kwaak')
+ branch = self.make_remote_branch(transport, client)
+ result = branch.get_parent()
+ self.assertEqual('http://foo/', result)
+ self.assertFinished(client)
+
+
+class TestBranchSetParentLocation(RemoteBranchTestCase):
+
+ def test_no_parent(self):
+ # We call the verb when setting parent to None
+ transport = MemoryTransport()
+ client = FakeClient(transport.base)
+ client.add_expected_call(
+ 'Branch.get_stacked_on_url', ('quack/',),
+ 'error', ('NotStacked',))
+ client.add_expected_call(
+ 'Branch.set_parent_location', ('quack/', 'b', 'r', ''),
+ 'success', ())
+ transport.mkdir('quack')
+ transport = transport.clone('quack')
+ branch = self.make_remote_branch(transport, client)
+ branch._lock_token = 'b'
+ branch._repo_lock_token = 'r'
+ branch._set_parent_location(None)
+ self.assertFinished(client)
+
+ def test_parent(self):
+ transport = MemoryTransport()
+ client = FakeClient(transport.base)
+ client.add_expected_call(
+ 'Branch.get_stacked_on_url', ('kwaak/',),
+ 'error', ('NotStacked',))
+ client.add_expected_call(
+ 'Branch.set_parent_location', ('kwaak/', 'b', 'r', 'foo'),
+ 'success', ())
+ transport.mkdir('kwaak')
+ transport = transport.clone('kwaak')
+ branch = self.make_remote_branch(transport, client)
+ branch._lock_token = 'b'
+ branch._repo_lock_token = 'r'
+ branch._set_parent_location('foo')
+ self.assertFinished(client)
+
+ def test_backwards_compat(self):
+ self.setup_smart_server_with_call_log()
+ branch = self.make_branch('.')
+ self.reset_smart_call_log()
+ verb = 'Branch.set_parent_location'
+ self.disable_verb(verb)
+ branch.set_parent('http://foo/')
+ self.assertLength(14, self.hpss_calls)
+
+
+class TestBranchGetTagsBytes(RemoteBranchTestCase):
+
+ def test_backwards_compat(self):
+ self.setup_smart_server_with_call_log()
+ branch = self.make_branch('.')
+ self.reset_smart_call_log()
+ verb = 'Branch.get_tags_bytes'
+ self.disable_verb(verb)
+ branch.tags.get_tag_dict()
+ call_count = len([call for call in self.hpss_calls if
+ call.call.method == verb])
+ self.assertEqual(1, call_count)
+
+ def test_trivial(self):
+ transport = MemoryTransport()
+ client = FakeClient(transport.base)
+ client.add_expected_call(
+ 'Branch.get_stacked_on_url', ('quack/',),
+ 'error', ('NotStacked',))
+ client.add_expected_call(
+ 'Branch.get_tags_bytes', ('quack/',),
+ 'success', ('',))
+ transport.mkdir('quack')
+ transport = transport.clone('quack')
+ branch = self.make_remote_branch(transport, client)
+ result = branch.tags.get_tag_dict()
+ self.assertFinished(client)
+ self.assertEqual({}, result)
+
+
+class TestBranchSetTagsBytes(RemoteBranchTestCase):
+
+ def test_trivial(self):
+ transport = MemoryTransport()
+ client = FakeClient(transport.base)
+ client.add_expected_call(
+ 'Branch.get_stacked_on_url', ('quack/',),
+ 'error', ('NotStacked',))
+ client.add_expected_call(
+ 'Branch.set_tags_bytes', ('quack/', 'branch token', 'repo token'),
+ 'success', ('',))
+ transport.mkdir('quack')
+ transport = transport.clone('quack')
+ branch = self.make_remote_branch(transport, client)
+ self.lock_remote_branch(branch)
+ branch._set_tags_bytes('tags bytes')
+ self.assertFinished(client)
+ self.assertEqual('tags bytes', client._calls[-1][-1])
+
+ def test_backwards_compatible(self):
+ transport = MemoryTransport()
+ client = FakeClient(transport.base)
+ client.add_expected_call(
+ 'Branch.get_stacked_on_url', ('quack/',),
+ 'error', ('NotStacked',))
+ client.add_expected_call(
+ 'Branch.set_tags_bytes', ('quack/', 'branch token', 'repo token'),
+ 'unknown', ('Branch.set_tags_bytes',))
+ transport.mkdir('quack')
+ transport = transport.clone('quack')
+ branch = self.make_remote_branch(transport, client)
+ self.lock_remote_branch(branch)
+ class StubRealBranch(object):
+ def __init__(self):
+ self.calls = []
+ def _set_tags_bytes(self, bytes):
+ self.calls.append(('set_tags_bytes', bytes))
+ real_branch = StubRealBranch()
+ branch._real_branch = real_branch
+ branch._set_tags_bytes('tags bytes')
+ # Call a second time, to exercise the 'remote version already inferred'
+ # code path.
+ branch._set_tags_bytes('tags bytes')
+ self.assertFinished(client)
+ self.assertEqual(
+ [('set_tags_bytes', 'tags bytes')] * 2, real_branch.calls)
+
+
+class TestBranchHeadsToFetch(RemoteBranchTestCase):
+
+ def test_uses_last_revision_info_and_tags_by_default(self):
+ transport = MemoryTransport()
+ client = FakeClient(transport.base)
+ client.add_expected_call(
+ 'Branch.get_stacked_on_url', ('quack/',),
+ 'error', ('NotStacked',))
+ client.add_expected_call(
+ 'Branch.last_revision_info', ('quack/',),
+ 'success', ('ok', '1', 'rev-tip'))
+ client.add_expected_call(
+ 'Branch.get_config_file', ('quack/',),
+ 'success', ('ok',), '')
+ transport.mkdir('quack')
+ transport = transport.clone('quack')
+ branch = self.make_remote_branch(transport, client)
+ result = branch.heads_to_fetch()
+ self.assertFinished(client)
+ self.assertEqual((set(['rev-tip']), set()), result)
+
+ def test_uses_last_revision_info_and_tags_when_set(self):
+ transport = MemoryTransport()
+ client = FakeClient(transport.base)
+ client.add_expected_call(
+ 'Branch.get_stacked_on_url', ('quack/',),
+ 'error', ('NotStacked',))
+ client.add_expected_call(
+ 'Branch.last_revision_info', ('quack/',),
+ 'success', ('ok', '1', 'rev-tip'))
+ client.add_expected_call(
+ 'Branch.get_config_file', ('quack/',),
+ 'success', ('ok',), 'branch.fetch_tags = True')
+ # XXX: this will break if the default format's serialization of tags
+ # changes, or if the RPC for fetching tags changes from get_tags_bytes.
+ client.add_expected_call(
+ 'Branch.get_tags_bytes', ('quack/',),
+ 'success', ('d5:tag-17:rev-foo5:tag-27:rev-bare',))
+ transport.mkdir('quack')
+ transport = transport.clone('quack')
+ branch = self.make_remote_branch(transport, client)
+ result = branch.heads_to_fetch()
+ self.assertFinished(client)
+ self.assertEqual(
+ (set(['rev-tip']), set(['rev-foo', 'rev-bar'])), result)
+
+ def test_uses_rpc_for_formats_with_non_default_heads_to_fetch(self):
+ transport = MemoryTransport()
+ client = FakeClient(transport.base)
+ client.add_expected_call(
+ 'Branch.get_stacked_on_url', ('quack/',),
+ 'error', ('NotStacked',))
+ client.add_expected_call(
+ 'Branch.heads_to_fetch', ('quack/',),
+ 'success', (['tip'], ['tagged-1', 'tagged-2']))
+ transport.mkdir('quack')
+ transport = transport.clone('quack')
+ branch = self.make_remote_branch(transport, client)
+ branch._format._use_default_local_heads_to_fetch = lambda: False
+ result = branch.heads_to_fetch()
+ self.assertFinished(client)
+ self.assertEqual((set(['tip']), set(['tagged-1', 'tagged-2'])), result)
+
+ def make_branch_with_tags(self):
+ self.setup_smart_server_with_call_log()
+ # Make a branch with a single revision.
+ builder = self.make_branch_builder('foo')
+ builder.start_series()
+ builder.build_snapshot('tip', None, [
+ ('add', ('', 'root-id', 'directory', ''))])
+ builder.finish_series()
+ branch = builder.get_branch()
+ # Add two tags to that branch
+ branch.tags.set_tag('tag-1', 'rev-1')
+ branch.tags.set_tag('tag-2', 'rev-2')
+ return branch
+
+ def test_backwards_compatible(self):
+ br = self.make_branch_with_tags()
+ br.get_config_stack().set('branch.fetch_tags', True)
+ self.addCleanup(br.lock_read().unlock)
+ # Disable the heads_to_fetch verb
+ verb = 'Branch.heads_to_fetch'
+ self.disable_verb(verb)
+ self.reset_smart_call_log()
+ result = br.heads_to_fetch()
+ self.assertEqual((set(['tip']), set(['rev-1', 'rev-2'])), result)
+ self.assertEqual(
+ ['Branch.last_revision_info', 'Branch.get_tags_bytes'],
+ [call.call.method for call in self.hpss_calls])
+
+ def test_backwards_compatible_no_tags(self):
+ br = self.make_branch_with_tags()
+ br.get_config_stack().set('branch.fetch_tags', False)
+ self.addCleanup(br.lock_read().unlock)
+ # Disable the heads_to_fetch verb
+ verb = 'Branch.heads_to_fetch'
+ self.disable_verb(verb)
+ self.reset_smart_call_log()
+ result = br.heads_to_fetch()
+ self.assertEqual((set(['tip']), set()), result)
+ self.assertEqual(
+ ['Branch.last_revision_info'],
+ [call.call.method for call in self.hpss_calls])
+
+
+class TestBranchLastRevisionInfo(RemoteBranchTestCase):
+
+ def test_empty_branch(self):
+ # in an empty branch we decode the response properly
+ transport = MemoryTransport()
+ client = FakeClient(transport.base)
+ client.add_expected_call(
+ 'Branch.get_stacked_on_url', ('quack/',),
+ 'error', ('NotStacked',))
+ client.add_expected_call(
+ 'Branch.last_revision_info', ('quack/',),
+ 'success', ('ok', '0', 'null:'))
+ transport.mkdir('quack')
+ transport = transport.clone('quack')
+ branch = self.make_remote_branch(transport, client)
+ result = branch.last_revision_info()
+ self.assertFinished(client)
+ self.assertEqual((0, NULL_REVISION), result)
+
+ def test_non_empty_branch(self):
+ # in a non-empty branch we also decode the response properly
+ revid = u'\xc8'.encode('utf8')
+ transport = MemoryTransport()
+ client = FakeClient(transport.base)
+ client.add_expected_call(
+ 'Branch.get_stacked_on_url', ('kwaak/',),
+ 'error', ('NotStacked',))
+ client.add_expected_call(
+ 'Branch.last_revision_info', ('kwaak/',),
+ 'success', ('ok', '2', revid))
+ transport.mkdir('kwaak')
+ transport = transport.clone('kwaak')
+ branch = self.make_remote_branch(transport, client)
+ result = branch.last_revision_info()
+ self.assertEqual((2, revid), result)
+
+
+class TestBranch_get_stacked_on_url(TestRemote):
+ """Test Branch._get_stacked_on_url rpc"""
+
+ def test_get_stacked_on_invalid_url(self):
+ # test that asking for a stacked on url the server can't access works.
+ # This isn't perfect, but then as we're in the same process there
+ # really isn't anything we can do to be 100% sure that the server
+ # doesn't just open in - this test probably needs to be rewritten using
+ # a spawn()ed server.
+ stacked_branch = self.make_branch('stacked', format='1.9')
+ memory_branch = self.make_branch('base', format='1.9')
+ vfs_url = self.get_vfs_only_url('base')
+ stacked_branch.set_stacked_on_url(vfs_url)
+ transport = stacked_branch.bzrdir.root_transport
+ client = FakeClient(transport.base)
+ client.add_expected_call(
+ 'Branch.get_stacked_on_url', ('stacked/',),
+ 'success', ('ok', vfs_url))
+ # XXX: Multiple calls are bad, this second call documents what is
+ # today.
+ client.add_expected_call(
+ 'Branch.get_stacked_on_url', ('stacked/',),
+ 'success', ('ok', vfs_url))
+ bzrdir = RemoteBzrDir(transport, RemoteBzrDirFormat(),
+ _client=client)
+ repo_fmt = remote.RemoteRepositoryFormat()
+ repo_fmt._custom_format = stacked_branch.repository._format
+ branch = RemoteBranch(bzrdir, RemoteRepository(bzrdir, repo_fmt),
+ _client=client)
+ result = branch.get_stacked_on_url()
+ self.assertEqual(vfs_url, result)
+
+ def test_backwards_compatible(self):
+ # like with bzr1.6 with no Branch.get_stacked_on_url rpc
+ base_branch = self.make_branch('base', format='1.6')
+ stacked_branch = self.make_branch('stacked', format='1.6')
+ stacked_branch.set_stacked_on_url('../base')
+ client = FakeClient(self.get_url())
+ branch_network_name = self.get_branch_format().network_name()
+ client.add_expected_call(
+ 'BzrDir.open_branchV3', ('stacked/',),
+ 'success', ('branch', branch_network_name))
+ client.add_expected_call(
+ 'BzrDir.find_repositoryV3', ('stacked/',),
+ 'success', ('ok', '', 'no', 'no', 'yes',
+ stacked_branch.repository._format.network_name()))
+ # called twice, once from constructor and then again by us
+ client.add_expected_call(
+ 'Branch.get_stacked_on_url', ('stacked/',),
+ 'unknown', ('Branch.get_stacked_on_url',))
+ client.add_expected_call(
+ 'Branch.get_stacked_on_url', ('stacked/',),
+ 'unknown', ('Branch.get_stacked_on_url',))
+ # this will also do vfs access, but that goes direct to the transport
+ # and isn't seen by the FakeClient.
+ bzrdir = RemoteBzrDir(self.get_transport('stacked'),
+ RemoteBzrDirFormat(), _client=client)
+ branch = bzrdir.open_branch()
+ result = branch.get_stacked_on_url()
+ self.assertEqual('../base', result)
+ self.assertFinished(client)
+ # it's in the fallback list both for the RemoteRepository and its vfs
+ # repository
+ self.assertEqual(1, len(branch.repository._fallback_repositories))
+ self.assertEqual(1,
+ len(branch.repository._real_repository._fallback_repositories))
+
+ def test_get_stacked_on_real_branch(self):
+ base_branch = self.make_branch('base')
+ stacked_branch = self.make_branch('stacked')
+ stacked_branch.set_stacked_on_url('../base')
+ reference_format = self.get_repo_format()
+ network_name = reference_format.network_name()
+ client = FakeClient(self.get_url())
+ branch_network_name = self.get_branch_format().network_name()
+ client.add_expected_call(
+ 'BzrDir.open_branchV3', ('stacked/',),
+ 'success', ('branch', branch_network_name))
+ client.add_expected_call(
+ 'BzrDir.find_repositoryV3', ('stacked/',),
+ 'success', ('ok', '', 'yes', 'no', 'yes', network_name))
+ # called twice, once from constructor and then again by us
+ client.add_expected_call(
+ 'Branch.get_stacked_on_url', ('stacked/',),
+ 'success', ('ok', '../base'))
+ client.add_expected_call(
+ 'Branch.get_stacked_on_url', ('stacked/',),
+ 'success', ('ok', '../base'))
+ bzrdir = RemoteBzrDir(self.get_transport('stacked'),
+ RemoteBzrDirFormat(), _client=client)
+ branch = bzrdir.open_branch()
+ result = branch.get_stacked_on_url()
+ self.assertEqual('../base', result)
+ self.assertFinished(client)
+ # it's in the fallback list both for the RemoteRepository.
+ self.assertEqual(1, len(branch.repository._fallback_repositories))
+ # And we haven't had to construct a real repository.
+ self.assertEqual(None, branch.repository._real_repository)
+
+
+class TestBranchSetLastRevision(RemoteBranchTestCase):
+
+ def test_set_empty(self):
+ # _set_last_revision_info('null:') is translated to calling
+ # Branch.set_last_revision(path, '') on the wire.
+ transport = MemoryTransport()
+ transport.mkdir('branch')
+ transport = transport.clone('branch')
+
+ client = FakeClient(transport.base)
+ client.add_expected_call(
+ 'Branch.get_stacked_on_url', ('branch/',),
+ 'error', ('NotStacked',))
+ client.add_expected_call(
+ 'Branch.lock_write', ('branch/', '', ''),
+ 'success', ('ok', 'branch token', 'repo token'))
+ client.add_expected_call(
+ 'Branch.last_revision_info',
+ ('branch/',),
+ 'success', ('ok', '0', 'null:'))
+ client.add_expected_call(
+ 'Branch.set_last_revision', ('branch/', 'branch token', 'repo token', 'null:',),
+ 'success', ('ok',))
+ client.add_expected_call(
+ 'Branch.unlock', ('branch/', 'branch token', 'repo token'),
+ 'success', ('ok',))
+ branch = self.make_remote_branch(transport, client)
+ branch.lock_write()
+ result = branch._set_last_revision(NULL_REVISION)
+ branch.unlock()
+ self.assertEqual(None, result)
+ self.assertFinished(client)
+
+ def test_set_nonempty(self):
+ # set_last_revision_info(N, rev-idN) is translated to calling
+ # Branch.set_last_revision(path, rev-idN) on the wire.
+ transport = MemoryTransport()
+ transport.mkdir('branch')
+ transport = transport.clone('branch')
+
+ client = FakeClient(transport.base)
+ client.add_expected_call(
+ 'Branch.get_stacked_on_url', ('branch/',),
+ 'error', ('NotStacked',))
+ client.add_expected_call(
+ 'Branch.lock_write', ('branch/', '', ''),
+ 'success', ('ok', 'branch token', 'repo token'))
+ client.add_expected_call(
+ 'Branch.last_revision_info',
+ ('branch/',),
+ 'success', ('ok', '0', 'null:'))
+ lines = ['rev-id2']
+ encoded_body = bz2.compress('\n'.join(lines))
+ client.add_success_response_with_body(encoded_body, 'ok')
+ client.add_expected_call(
+ 'Branch.set_last_revision', ('branch/', 'branch token', 'repo token', 'rev-id2',),
+ 'success', ('ok',))
+ client.add_expected_call(
+ 'Branch.unlock', ('branch/', 'branch token', 'repo token'),
+ 'success', ('ok',))
+ branch = self.make_remote_branch(transport, client)
+ # Lock the branch, reset the record of remote calls.
+ branch.lock_write()
+ result = branch._set_last_revision('rev-id2')
+ branch.unlock()
+ self.assertEqual(None, result)
+ self.assertFinished(client)
+
+ def test_no_such_revision(self):
+ transport = MemoryTransport()
+ transport.mkdir('branch')
+ transport = transport.clone('branch')
+ # A response of 'NoSuchRevision' is translated into an exception.
+ client = FakeClient(transport.base)
+ client.add_expected_call(
+ 'Branch.get_stacked_on_url', ('branch/',),
+ 'error', ('NotStacked',))
+ client.add_expected_call(
+ 'Branch.lock_write', ('branch/', '', ''),
+ 'success', ('ok', 'branch token', 'repo token'))
+ client.add_expected_call(
+ 'Branch.last_revision_info',
+ ('branch/',),
+ 'success', ('ok', '0', 'null:'))
+ # get_graph calls to construct the revision history, for the set_rh
+ # hook
+ lines = ['rev-id']
+ encoded_body = bz2.compress('\n'.join(lines))
+ client.add_success_response_with_body(encoded_body, 'ok')
+ client.add_expected_call(
+ 'Branch.set_last_revision', ('branch/', 'branch token', 'repo token', 'rev-id',),
+ 'error', ('NoSuchRevision', 'rev-id'))
+ client.add_expected_call(
+ 'Branch.unlock', ('branch/', 'branch token', 'repo token'),
+ 'success', ('ok',))
+
+ branch = self.make_remote_branch(transport, client)
+ branch.lock_write()
+ self.assertRaises(
+ errors.NoSuchRevision, branch._set_last_revision, 'rev-id')
+ branch.unlock()
+ self.assertFinished(client)
+
+ def test_tip_change_rejected(self):
+ """TipChangeRejected responses cause a TipChangeRejected exception to
+ be raised.
+ """
+ transport = MemoryTransport()
+ transport.mkdir('branch')
+ transport = transport.clone('branch')
+ client = FakeClient(transport.base)
+ rejection_msg_unicode = u'rejection message\N{INTERROBANG}'
+ rejection_msg_utf8 = rejection_msg_unicode.encode('utf8')
+ client.add_expected_call(
+ 'Branch.get_stacked_on_url', ('branch/',),
+ 'error', ('NotStacked',))
+ client.add_expected_call(
+ 'Branch.lock_write', ('branch/', '', ''),
+ 'success', ('ok', 'branch token', 'repo token'))
+ client.add_expected_call(
+ 'Branch.last_revision_info',
+ ('branch/',),
+ 'success', ('ok', '0', 'null:'))
+ lines = ['rev-id']
+ encoded_body = bz2.compress('\n'.join(lines))
+ client.add_success_response_with_body(encoded_body, 'ok')
+ client.add_expected_call(
+ 'Branch.set_last_revision', ('branch/', 'branch token', 'repo token', 'rev-id',),
+ 'error', ('TipChangeRejected', rejection_msg_utf8))
+ client.add_expected_call(
+ 'Branch.unlock', ('branch/', 'branch token', 'repo token'),
+ 'success', ('ok',))
+ branch = self.make_remote_branch(transport, client)
+ branch.lock_write()
+ # The 'TipChangeRejected' error response triggered by calling
+ # set_last_revision_info causes a TipChangeRejected exception.
+ err = self.assertRaises(
+ errors.TipChangeRejected,
+ branch._set_last_revision, 'rev-id')
+ # The UTF-8 message from the response has been decoded into a unicode
+ # object.
+ self.assertIsInstance(err.msg, unicode)
+ self.assertEqual(rejection_msg_unicode, err.msg)
+ branch.unlock()
+ self.assertFinished(client)
+
+
+class TestBranchSetLastRevisionInfo(RemoteBranchTestCase):
+
+ def test_set_last_revision_info(self):
+ # set_last_revision_info(num, 'rev-id') is translated to calling
+ # Branch.set_last_revision_info(num, 'rev-id') on the wire.
+ transport = MemoryTransport()
+ transport.mkdir('branch')
+ transport = transport.clone('branch')
+ client = FakeClient(transport.base)
+ # get_stacked_on_url
+ client.add_error_response('NotStacked')
+ # lock_write
+ client.add_success_response('ok', 'branch token', 'repo token')
+ # query the current revision
+ client.add_success_response('ok', '0', 'null:')
+ # set_last_revision
+ client.add_success_response('ok')
+ # unlock
+ client.add_success_response('ok')
+
+ branch = self.make_remote_branch(transport, client)
+ # Lock the branch, reset the record of remote calls.
+ branch.lock_write()
+ client._calls = []
+ result = branch.set_last_revision_info(1234, 'a-revision-id')
+ self.assertEqual(
+ [('call', 'Branch.last_revision_info', ('branch/',)),
+ ('call', 'Branch.set_last_revision_info',
+ ('branch/', 'branch token', 'repo token',
+ '1234', 'a-revision-id'))],
+ client._calls)
+ self.assertEqual(None, result)
+
+ def test_no_such_revision(self):
+ # A response of 'NoSuchRevision' is translated into an exception.
+ transport = MemoryTransport()
+ transport.mkdir('branch')
+ transport = transport.clone('branch')
+ client = FakeClient(transport.base)
+ # get_stacked_on_url
+ client.add_error_response('NotStacked')
+ # lock_write
+ client.add_success_response('ok', 'branch token', 'repo token')
+ # set_last_revision
+ client.add_error_response('NoSuchRevision', 'revid')
+ # unlock
+ client.add_success_response('ok')
+
+ branch = self.make_remote_branch(transport, client)
+ # Lock the branch, reset the record of remote calls.
+ branch.lock_write()
+ client._calls = []
+
+ self.assertRaises(
+ errors.NoSuchRevision, branch.set_last_revision_info, 123, 'revid')
+ branch.unlock()
+
+ def test_backwards_compatibility(self):
+ """If the server does not support the Branch.set_last_revision_info
+ verb (which is new in 1.4), then the client falls back to VFS methods.
+ """
+ # This test is a little messy. Unlike most tests in this file, it
+ # doesn't purely test what a Remote* object sends over the wire, and
+ # how it reacts to responses from the wire. It instead relies partly
+ # on asserting that the RemoteBranch will call
+ # self._real_branch.set_last_revision_info(...).
+
+ # First, set up our RemoteBranch with a FakeClient that raises
+ # UnknownSmartMethod, and a StubRealBranch that logs how it is called.
+ transport = MemoryTransport()
+ transport.mkdir('branch')
+ transport = transport.clone('branch')
+ client = FakeClient(transport.base)
+ client.add_expected_call(
+ 'Branch.get_stacked_on_url', ('branch/',),
+ 'error', ('NotStacked',))
+ client.add_expected_call(
+ 'Branch.last_revision_info',
+ ('branch/',),
+ 'success', ('ok', '0', 'null:'))
+ client.add_expected_call(
+ 'Branch.set_last_revision_info',
+ ('branch/', 'branch token', 'repo token', '1234', 'a-revision-id',),
+ 'unknown', 'Branch.set_last_revision_info')
+
+ branch = self.make_remote_branch(transport, client)
+ class StubRealBranch(object):
+ def __init__(self):
+ self.calls = []
+ def set_last_revision_info(self, revno, revision_id):
+ self.calls.append(
+ ('set_last_revision_info', revno, revision_id))
+ def _clear_cached_state(self):
+ pass
+ real_branch = StubRealBranch()
+ branch._real_branch = real_branch
+ self.lock_remote_branch(branch)
+
+ # Call set_last_revision_info, and verify it behaved as expected.
+ result = branch.set_last_revision_info(1234, 'a-revision-id')
+ self.assertEqual(
+ [('set_last_revision_info', 1234, 'a-revision-id')],
+ real_branch.calls)
+ self.assertFinished(client)
+
+ def test_unexpected_error(self):
+ # If the server sends an error the client doesn't understand, it gets
+ # turned into an UnknownErrorFromSmartServer, which is presented as a
+ # non-internal error to the user.
+ transport = MemoryTransport()
+ transport.mkdir('branch')
+ transport = transport.clone('branch')
+ client = FakeClient(transport.base)
+ # get_stacked_on_url
+ client.add_error_response('NotStacked')
+ # lock_write
+ client.add_success_response('ok', 'branch token', 'repo token')
+ # set_last_revision
+ client.add_error_response('UnexpectedError')
+ # unlock
+ client.add_success_response('ok')
+
+ branch = self.make_remote_branch(transport, client)
+ # Lock the branch, reset the record of remote calls.
+ branch.lock_write()
+ client._calls = []
+
+ err = self.assertRaises(
+ errors.UnknownErrorFromSmartServer,
+ branch.set_last_revision_info, 123, 'revid')
+ self.assertEqual(('UnexpectedError',), err.error_tuple)
+ branch.unlock()
+
+ def test_tip_change_rejected(self):
+ """TipChangeRejected responses cause a TipChangeRejected exception to
+ be raised.
+ """
+ transport = MemoryTransport()
+ transport.mkdir('branch')
+ transport = transport.clone('branch')
+ client = FakeClient(transport.base)
+ # get_stacked_on_url
+ client.add_error_response('NotStacked')
+ # lock_write
+ client.add_success_response('ok', 'branch token', 'repo token')
+ # set_last_revision
+ client.add_error_response('TipChangeRejected', 'rejection message')
+ # unlock
+ client.add_success_response('ok')
+
+ branch = self.make_remote_branch(transport, client)
+ # Lock the branch, reset the record of remote calls.
+ branch.lock_write()
+ self.addCleanup(branch.unlock)
+ client._calls = []
+
+ # The 'TipChangeRejected' error response triggered by calling
+ # set_last_revision_info causes a TipChangeRejected exception.
+ err = self.assertRaises(
+ errors.TipChangeRejected,
+ branch.set_last_revision_info, 123, 'revid')
+ self.assertEqual('rejection message', err.msg)
+
+
+class TestBranchGetSetConfig(RemoteBranchTestCase):
+
+ def test_get_branch_conf(self):
+ # in an empty branch we decode the response properly
+ client = FakeClient()
+ client.add_expected_call(
+ 'Branch.get_stacked_on_url', ('memory:///',),
+ 'error', ('NotStacked',),)
+ client.add_success_response_with_body('# config file body', 'ok')
+ transport = MemoryTransport()
+ branch = self.make_remote_branch(transport, client)
+ config = branch.get_config()
+ config.has_explicit_nickname()
+ self.assertEqual(
+ [('call', 'Branch.get_stacked_on_url', ('memory:///',)),
+ ('call_expecting_body', 'Branch.get_config_file', ('memory:///',))],
+ client._calls)
+
+ def test_get_multi_line_branch_conf(self):
+ # Make sure that multiple-line branch.conf files are supported
+ #
+ # https://bugs.launchpad.net/bzr/+bug/354075
+ client = FakeClient()
+ client.add_expected_call(
+ 'Branch.get_stacked_on_url', ('memory:///',),
+ 'error', ('NotStacked',),)
+ client.add_success_response_with_body('a = 1\nb = 2\nc = 3\n', 'ok')
+ transport = MemoryTransport()
+ branch = self.make_remote_branch(transport, client)
+ config = branch.get_config()
+ self.assertEqual(u'2', config.get_user_option('b'))
+
+ def test_set_option(self):
+ client = FakeClient()
+ client.add_expected_call(
+ 'Branch.get_stacked_on_url', ('memory:///',),
+ 'error', ('NotStacked',),)
+ client.add_expected_call(
+ 'Branch.lock_write', ('memory:///', '', ''),
+ 'success', ('ok', 'branch token', 'repo token'))
+ client.add_expected_call(
+ 'Branch.set_config_option', ('memory:///', 'branch token',
+ 'repo token', 'foo', 'bar', ''),
+ 'success', ())
+ client.add_expected_call(
+ 'Branch.unlock', ('memory:///', 'branch token', 'repo token'),
+ 'success', ('ok',))
+ transport = MemoryTransport()
+ branch = self.make_remote_branch(transport, client)
+ branch.lock_write()
+ config = branch._get_config()
+ config.set_option('foo', 'bar')
+ branch.unlock()
+ self.assertFinished(client)
+
+ def test_set_option_with_dict(self):
+ client = FakeClient()
+ client.add_expected_call(
+ 'Branch.get_stacked_on_url', ('memory:///',),
+ 'error', ('NotStacked',),)
+ client.add_expected_call(
+ 'Branch.lock_write', ('memory:///', '', ''),
+ 'success', ('ok', 'branch token', 'repo token'))
+ encoded_dict_value = 'd5:ascii1:a11:unicode \xe2\x8c\x9a3:\xe2\x80\xbde'
+ client.add_expected_call(
+ 'Branch.set_config_option_dict', ('memory:///', 'branch token',
+ 'repo token', encoded_dict_value, 'foo', ''),
+ 'success', ())
+ client.add_expected_call(
+ 'Branch.unlock', ('memory:///', 'branch token', 'repo token'),
+ 'success', ('ok',))
+ transport = MemoryTransport()
+ branch = self.make_remote_branch(transport, client)
+ branch.lock_write()
+ config = branch._get_config()
+ config.set_option(
+ {'ascii': 'a', u'unicode \N{WATCH}': u'\N{INTERROBANG}'},
+ 'foo')
+ branch.unlock()
+ self.assertFinished(client)
+
+ def test_backwards_compat_set_option(self):
+ self.setup_smart_server_with_call_log()
+ branch = self.make_branch('.')
+ verb = 'Branch.set_config_option'
+ self.disable_verb(verb)
+ branch.lock_write()
+ self.addCleanup(branch.unlock)
+ self.reset_smart_call_log()
+ branch._get_config().set_option('value', 'name')
+ self.assertLength(11, self.hpss_calls)
+ self.assertEqual('value', branch._get_config().get_option('name'))
+
+ def test_backwards_compat_set_option_with_dict(self):
+ self.setup_smart_server_with_call_log()
+ branch = self.make_branch('.')
+ verb = 'Branch.set_config_option_dict'
+ self.disable_verb(verb)
+ branch.lock_write()
+ self.addCleanup(branch.unlock)
+ self.reset_smart_call_log()
+ config = branch._get_config()
+ value_dict = {'ascii': 'a', u'unicode \N{WATCH}': u'\N{INTERROBANG}'}
+ config.set_option(value_dict, 'name')
+ self.assertLength(11, self.hpss_calls)
+ self.assertEqual(value_dict, branch._get_config().get_option('name'))
+
+
+class TestBranchGetPutConfigStore(RemoteBranchTestCase):
+
+ def test_get_branch_conf(self):
+ # in an empty branch we decode the response properly
+ client = FakeClient()
+ client.add_expected_call(
+ 'Branch.get_stacked_on_url', ('memory:///',),
+ 'error', ('NotStacked',),)
+ client.add_success_response_with_body('# config file body', 'ok')
+ transport = MemoryTransport()
+ branch = self.make_remote_branch(transport, client)
+ config = branch.get_config_stack()
+ config.get("email")
+ config.get("log_format")
+ self.assertEqual(
+ [('call', 'Branch.get_stacked_on_url', ('memory:///',)),
+ ('call_expecting_body', 'Branch.get_config_file', ('memory:///',))],
+ client._calls)
+
+ def test_set_branch_conf(self):
+ client = FakeClient()
+ client.add_expected_call(
+ 'Branch.get_stacked_on_url', ('memory:///',),
+ 'error', ('NotStacked',),)
+ client.add_expected_call(
+ 'Branch.lock_write', ('memory:///', '', ''),
+ 'success', ('ok', 'branch token', 'repo token'))
+ client.add_expected_call(
+ 'Branch.get_config_file', ('memory:///', ),
+ 'success', ('ok', ), "# line 1\n")
+ client.add_expected_call(
+ 'Branch.get_config_file', ('memory:///', ),
+ 'success', ('ok', ), "# line 1\n")
+ client.add_expected_call(
+ 'Branch.put_config_file', ('memory:///', 'branch token',
+ 'repo token'),
+ 'success', ('ok',))
+ client.add_expected_call(
+ 'Branch.unlock', ('memory:///', 'branch token', 'repo token'),
+ 'success', ('ok',))
+ transport = MemoryTransport()
+ branch = self.make_remote_branch(transport, client)
+ branch.lock_write()
+ config = branch.get_config_stack()
+ config.set('email', 'The Dude <lebowski@example.com>')
+ branch.unlock()
+ self.assertFinished(client)
+ self.assertEqual(
+ [('call', 'Branch.get_stacked_on_url', ('memory:///',)),
+ ('call', 'Branch.lock_write', ('memory:///', '', '')),
+ ('call_expecting_body', 'Branch.get_config_file', ('memory:///',)),
+ ('call_expecting_body', 'Branch.get_config_file', ('memory:///',)),
+ ('call_with_body_bytes_expecting_body', 'Branch.put_config_file',
+ ('memory:///', 'branch token', 'repo token'),
+ '# line 1\nemail = The Dude <lebowski@example.com>\n'),
+ ('call', 'Branch.unlock', ('memory:///', 'branch token', 'repo token'))],
+ client._calls)
+
+
+class TestBranchLockWrite(RemoteBranchTestCase):
+
+ def test_lock_write_unlockable(self):
+ transport = MemoryTransport()
+ client = FakeClient(transport.base)
+ client.add_expected_call(
+ 'Branch.get_stacked_on_url', ('quack/',),
+ 'error', ('NotStacked',),)
+ client.add_expected_call(
+ 'Branch.lock_write', ('quack/', '', ''),
+ 'error', ('UnlockableTransport',))
+ transport.mkdir('quack')
+ transport = transport.clone('quack')
+ branch = self.make_remote_branch(transport, client)
+ self.assertRaises(errors.UnlockableTransport, branch.lock_write)
+ self.assertFinished(client)
+
+
+class TestBranchRevisionIdToRevno(RemoteBranchTestCase):
+
+ def test_simple(self):
+ transport = MemoryTransport()
+ client = FakeClient(transport.base)
+ client.add_expected_call(
+ 'Branch.get_stacked_on_url', ('quack/',),
+ 'error', ('NotStacked',),)
+ client.add_expected_call(
+ 'Branch.revision_id_to_revno', ('quack/', 'null:'),
+ 'success', ('ok', '0',),)
+ client.add_expected_call(
+ 'Branch.revision_id_to_revno', ('quack/', 'unknown'),
+ 'error', ('NoSuchRevision', 'unknown',),)
+ transport.mkdir('quack')
+ transport = transport.clone('quack')
+ branch = self.make_remote_branch(transport, client)
+ self.assertEquals(0, branch.revision_id_to_revno('null:'))
+ self.assertRaises(errors.NoSuchRevision,
+ branch.revision_id_to_revno, 'unknown')
+ self.assertFinished(client)
+
+ def test_dotted(self):
+ transport = MemoryTransport()
+ client = FakeClient(transport.base)
+ client.add_expected_call(
+ 'Branch.get_stacked_on_url', ('quack/',),
+ 'error', ('NotStacked',),)
+ client.add_expected_call(
+ 'Branch.revision_id_to_revno', ('quack/', 'null:'),
+ 'success', ('ok', '0',),)
+ client.add_expected_call(
+ 'Branch.revision_id_to_revno', ('quack/', 'unknown'),
+ 'error', ('NoSuchRevision', 'unknown',),)
+ transport.mkdir('quack')
+ transport = transport.clone('quack')
+ branch = self.make_remote_branch(transport, client)
+ self.assertEquals((0, ), branch.revision_id_to_dotted_revno('null:'))
+ self.assertRaises(errors.NoSuchRevision,
+ branch.revision_id_to_dotted_revno, 'unknown')
+ self.assertFinished(client)
+
+ def test_dotted_no_smart_verb(self):
+ self.setup_smart_server_with_call_log()
+ branch = self.make_branch('.')
+ self.disable_verb('Branch.revision_id_to_revno')
+ self.reset_smart_call_log()
+ self.assertEquals((0, ),
+ branch.revision_id_to_dotted_revno('null:'))
+ self.assertLength(8, self.hpss_calls)
+
+
+class TestBzrDirGetSetConfig(RemoteBzrDirTestCase):
+
+ def test__get_config(self):
+ client = FakeClient()
+ client.add_success_response_with_body('default_stack_on = /\n', 'ok')
+ transport = MemoryTransport()
+ bzrdir = self.make_remote_bzrdir(transport, client)
+ config = bzrdir.get_config()
+ self.assertEqual('/', config.get_default_stack_on())
+ self.assertEqual(
+ [('call_expecting_body', 'BzrDir.get_config_file', ('memory:///',))],
+ client._calls)
+
+ def test_set_option_uses_vfs(self):
+ self.setup_smart_server_with_call_log()
+ bzrdir = self.make_bzrdir('.')
+ self.reset_smart_call_log()
+ config = bzrdir.get_config()
+ config.set_default_stack_on('/')
+ self.assertLength(4, self.hpss_calls)
+
+ def test_backwards_compat_get_option(self):
+ self.setup_smart_server_with_call_log()
+ bzrdir = self.make_bzrdir('.')
+ verb = 'BzrDir.get_config_file'
+ self.disable_verb(verb)
+ self.reset_smart_call_log()
+ self.assertEqual(None,
+ bzrdir._get_config().get_option('default_stack_on'))
+ self.assertLength(4, self.hpss_calls)
+
+
+class TestTransportIsReadonly(tests.TestCase):
+
+ def test_true(self):
+ client = FakeClient()
+ client.add_success_response('yes')
+ transport = RemoteTransport('bzr://example.com/', medium=False,
+ _client=client)
+ self.assertEqual(True, transport.is_readonly())
+ self.assertEqual(
+ [('call', 'Transport.is_readonly', ())],
+ client._calls)
+
+ def test_false(self):
+ client = FakeClient()
+ client.add_success_response('no')
+ transport = RemoteTransport('bzr://example.com/', medium=False,
+ _client=client)
+ self.assertEqual(False, transport.is_readonly())
+ self.assertEqual(
+ [('call', 'Transport.is_readonly', ())],
+ client._calls)
+
+ def test_error_from_old_server(self):
+ """bzr 0.15 and earlier servers don't recognise the is_readonly verb.
+
+ Clients should treat it as a "no" response, because is_readonly is only
+ advisory anyway (a transport could be read-write, but then the
+ underlying filesystem could be readonly anyway).
+ """
+ client = FakeClient()
+ client.add_unknown_method_response('Transport.is_readonly')
+ transport = RemoteTransport('bzr://example.com/', medium=False,
+ _client=client)
+ self.assertEqual(False, transport.is_readonly())
+ self.assertEqual(
+ [('call', 'Transport.is_readonly', ())],
+ client._calls)
+
+
+class TestTransportMkdir(tests.TestCase):
+
+ def test_permissiondenied(self):
+ client = FakeClient()
+ client.add_error_response('PermissionDenied', 'remote path', 'extra')
+ transport = RemoteTransport('bzr://example.com/', medium=False,
+ _client=client)
+ exc = self.assertRaises(
+ errors.PermissionDenied, transport.mkdir, 'client path')
+ expected_error = errors.PermissionDenied('/client path', 'extra')
+ self.assertEqual(expected_error, exc)
+
+
+class TestRemoteSSHTransportAuthentication(tests.TestCaseInTempDir):
+
+ def test_defaults_to_none(self):
+ t = RemoteSSHTransport('bzr+ssh://example.com')
+ self.assertIs(None, t._get_credentials()[0])
+
+ def test_uses_authentication_config(self):
+ conf = config.AuthenticationConfig()
+ conf._get_config().update(
+ {'bzr+sshtest': {'scheme': 'ssh', 'user': 'bar', 'host':
+ 'example.com'}})
+ conf._save()
+ t = RemoteSSHTransport('bzr+ssh://example.com')
+ self.assertEqual('bar', t._get_credentials()[0])
+
+
+class TestRemoteRepository(TestRemote):
+ """Base for testing RemoteRepository protocol usage.
+
+ These tests contain frozen requests and responses. We want any changes to
+ what is sent or expected to be require a thoughtful update to these tests
+ because they might break compatibility with different-versioned servers.
+ """
+
+ def setup_fake_client_and_repository(self, transport_path):
+ """Create the fake client and repository for testing with.
+
+ There's no real server here; we just have canned responses sent
+ back one by one.
+
+ :param transport_path: Path below the root of the MemoryTransport
+ where the repository will be created.
+ """
+ transport = MemoryTransport()
+ transport.mkdir(transport_path)
+ client = FakeClient(transport.base)
+ transport = transport.clone(transport_path)
+ # we do not want bzrdir to make any remote calls
+ bzrdir = RemoteBzrDir(transport, RemoteBzrDirFormat(),
+ _client=False)
+ repo = RemoteRepository(bzrdir, None, _client=client)
+ return repo, client
+
+
+def remoted_description(format):
+ return 'Remote: ' + format.get_format_description()
+
+
+class TestBranchFormat(tests.TestCase):
+
+ def test_get_format_description(self):
+ remote_format = RemoteBranchFormat()
+ real_format = branch.format_registry.get_default()
+ remote_format._network_name = real_format.network_name()
+ self.assertEqual(remoted_description(real_format),
+ remote_format.get_format_description())
+
+
+class TestRepositoryFormat(TestRemoteRepository):
+
+ def test_fast_delta(self):
+ true_name = groupcompress_repo.RepositoryFormat2a().network_name()
+ true_format = RemoteRepositoryFormat()
+ true_format._network_name = true_name
+ self.assertEqual(True, true_format.fast_deltas)
+ false_name = knitpack_repo.RepositoryFormatKnitPack1().network_name()
+ false_format = RemoteRepositoryFormat()
+ false_format._network_name = false_name
+ self.assertEqual(False, false_format.fast_deltas)
+
+ def test_get_format_description(self):
+ remote_repo_format = RemoteRepositoryFormat()
+ real_format = repository.format_registry.get_default()
+ remote_repo_format._network_name = real_format.network_name()
+ self.assertEqual(remoted_description(real_format),
+ remote_repo_format.get_format_description())
+
+
+class TestRepositoryAllRevisionIds(TestRemoteRepository):
+
+ def test_empty(self):
+ transport_path = 'quack'
+ repo, client = self.setup_fake_client_and_repository(transport_path)
+ client.add_success_response_with_body('', 'ok')
+ self.assertEquals([], repo.all_revision_ids())
+ self.assertEqual(
+ [('call_expecting_body', 'Repository.all_revision_ids',
+ ('quack/',))],
+ client._calls)
+
+ def test_with_some_content(self):
+ transport_path = 'quack'
+ repo, client = self.setup_fake_client_and_repository(transport_path)
+ client.add_success_response_with_body(
+ 'rev1\nrev2\nanotherrev\n', 'ok')
+ self.assertEquals(["rev1", "rev2", "anotherrev"],
+ repo.all_revision_ids())
+ self.assertEqual(
+ [('call_expecting_body', 'Repository.all_revision_ids',
+ ('quack/',))],
+ client._calls)
+
+
+class TestRepositoryGatherStats(TestRemoteRepository):
+
+ def test_revid_none(self):
+ # ('ok',), body with revisions and size
+ transport_path = 'quack'
+ repo, client = self.setup_fake_client_and_repository(transport_path)
+ client.add_success_response_with_body(
+ 'revisions: 2\nsize: 18\n', 'ok')
+ result = repo.gather_stats(None)
+ self.assertEqual(
+ [('call_expecting_body', 'Repository.gather_stats',
+ ('quack/','','no'))],
+ client._calls)
+ self.assertEqual({'revisions': 2, 'size': 18}, result)
+
+ def test_revid_no_committers(self):
+ # ('ok',), body without committers
+ body = ('firstrev: 123456.300 3600\n'
+ 'latestrev: 654231.400 0\n'
+ 'revisions: 2\n'
+ 'size: 18\n')
+ transport_path = 'quick'
+ revid = u'\xc8'.encode('utf8')
+ repo, client = self.setup_fake_client_and_repository(transport_path)
+ client.add_success_response_with_body(body, 'ok')
+ result = repo.gather_stats(revid)
+ self.assertEqual(
+ [('call_expecting_body', 'Repository.gather_stats',
+ ('quick/', revid, 'no'))],
+ client._calls)
+ self.assertEqual({'revisions': 2, 'size': 18,
+ 'firstrev': (123456.300, 3600),
+ 'latestrev': (654231.400, 0),},
+ result)
+
+ def test_revid_with_committers(self):
+ # ('ok',), body with committers
+ body = ('committers: 128\n'
+ 'firstrev: 123456.300 3600\n'
+ 'latestrev: 654231.400 0\n'
+ 'revisions: 2\n'
+ 'size: 18\n')
+ transport_path = 'buick'
+ revid = u'\xc8'.encode('utf8')
+ repo, client = self.setup_fake_client_and_repository(transport_path)
+ client.add_success_response_with_body(body, 'ok')
+ result = repo.gather_stats(revid, True)
+ self.assertEqual(
+ [('call_expecting_body', 'Repository.gather_stats',
+ ('buick/', revid, 'yes'))],
+ client._calls)
+ self.assertEqual({'revisions': 2, 'size': 18,
+ 'committers': 128,
+ 'firstrev': (123456.300, 3600),
+ 'latestrev': (654231.400, 0),},
+ result)
+
+
+class TestRepositoryBreakLock(TestRemoteRepository):
+
+ def test_break_lock(self):
+ transport_path = 'quack'
+ repo, client = self.setup_fake_client_and_repository(transport_path)
+ client.add_success_response('ok')
+ repo.break_lock()
+ self.assertEqual(
+ [('call', 'Repository.break_lock', ('quack/',))],
+ client._calls)
+
+
+class TestRepositoryGetSerializerFormat(TestRemoteRepository):
+
+ def test_get_serializer_format(self):
+ transport_path = 'hill'
+ repo, client = self.setup_fake_client_and_repository(transport_path)
+ client.add_success_response('ok', '7')
+ self.assertEquals('7', repo.get_serializer_format())
+ self.assertEqual(
+ [('call', 'VersionedFileRepository.get_serializer_format',
+ ('hill/', ))],
+ client._calls)
+
+
+class TestRepositoryReconcile(TestRemoteRepository):
+
+ def test_reconcile(self):
+ transport_path = 'hill'
+ repo, client = self.setup_fake_client_and_repository(transport_path)
+ body = ("garbage_inventories: 2\n"
+ "inconsistent_parents: 3\n")
+ client.add_expected_call(
+ 'Repository.lock_write', ('hill/', ''),
+ 'success', ('ok', 'a token'))
+ client.add_success_response_with_body(body, 'ok')
+ reconciler = repo.reconcile()
+ self.assertEqual(
+ [('call', 'Repository.lock_write', ('hill/', '')),
+ ('call_expecting_body', 'Repository.reconcile',
+ ('hill/', 'a token'))],
+ client._calls)
+ self.assertEquals(2, reconciler.garbage_inventories)
+ self.assertEquals(3, reconciler.inconsistent_parents)
+
+
+class TestRepositoryGetRevisionSignatureText(TestRemoteRepository):
+
+ def test_text(self):
+ # ('ok',), body with signature text
+ transport_path = 'quack'
+ repo, client = self.setup_fake_client_and_repository(transport_path)
+ client.add_success_response_with_body(
+ 'THETEXT', 'ok')
+ self.assertEquals("THETEXT", repo.get_signature_text("revid"))
+ self.assertEqual(
+ [('call_expecting_body', 'Repository.get_revision_signature_text',
+ ('quack/', 'revid'))],
+ client._calls)
+
+ def test_no_signature(self):
+ transport_path = 'quick'
+ repo, client = self.setup_fake_client_and_repository(transport_path)
+ client.add_error_response('nosuchrevision', 'unknown')
+ self.assertRaises(errors.NoSuchRevision, repo.get_signature_text,
+ "unknown")
+ self.assertEqual(
+ [('call_expecting_body', 'Repository.get_revision_signature_text',
+ ('quick/', 'unknown'))],
+ client._calls)
+
+
+class TestRepositoryGetGraph(TestRemoteRepository):
+
+ def test_get_graph(self):
+ # get_graph returns a graph with a custom parents provider.
+ transport_path = 'quack'
+ repo, client = self.setup_fake_client_and_repository(transport_path)
+ graph = repo.get_graph()
+ self.assertNotEqual(graph._parents_provider, repo)
+
+
+class TestRepositoryAddSignatureText(TestRemoteRepository):
+
+ def test_add_signature_text(self):
+ transport_path = 'quack'
+ repo, client = self.setup_fake_client_and_repository(transport_path)
+ client.add_expected_call(
+ 'Repository.lock_write', ('quack/', ''),
+ 'success', ('ok', 'a token'))
+ client.add_expected_call(
+ 'Repository.start_write_group', ('quack/', 'a token'),
+ 'success', ('ok', ('token1', )))
+ client.add_expected_call(
+ 'Repository.add_signature_text', ('quack/', 'a token', 'rev1',
+ 'token1'),
+ 'success', ('ok', ), None)
+ repo.lock_write()
+ repo.start_write_group()
+ self.assertIs(None,
+ repo.add_signature_text("rev1", "every bloody emperor"))
+ self.assertEqual(
+ ('call_with_body_bytes_expecting_body',
+ 'Repository.add_signature_text',
+ ('quack/', 'a token', 'rev1', 'token1'),
+ 'every bloody emperor'),
+ client._calls[-1])
+
+
+class TestRepositoryGetParentMap(TestRemoteRepository):
+
+ def test_get_parent_map_caching(self):
+ # get_parent_map returns from cache until unlock()
+ # setup a reponse with two revisions
+ r1 = u'\u0e33'.encode('utf8')
+ r2 = u'\u0dab'.encode('utf8')
+ lines = [' '.join([r2, r1]), r1]
+ encoded_body = bz2.compress('\n'.join(lines))
+
+ transport_path = 'quack'
+ repo, client = self.setup_fake_client_and_repository(transport_path)
+ client.add_success_response_with_body(encoded_body, 'ok')
+ client.add_success_response_with_body(encoded_body, 'ok')
+ repo.lock_read()
+ graph = repo.get_graph()
+ parents = graph.get_parent_map([r2])
+ self.assertEqual({r2: (r1,)}, parents)
+ # locking and unlocking deeper should not reset
+ repo.lock_read()
+ repo.unlock()
+ parents = graph.get_parent_map([r1])
+ self.assertEqual({r1: (NULL_REVISION,)}, parents)
+ self.assertEqual(
+ [('call_with_body_bytes_expecting_body',
+ 'Repository.get_parent_map', ('quack/', 'include-missing:', r2),
+ '\n\n0')],
+ client._calls)
+ repo.unlock()
+ # now we call again, and it should use the second response.
+ repo.lock_read()
+ graph = repo.get_graph()
+ parents = graph.get_parent_map([r1])
+ self.assertEqual({r1: (NULL_REVISION,)}, parents)
+ self.assertEqual(
+ [('call_with_body_bytes_expecting_body',
+ 'Repository.get_parent_map', ('quack/', 'include-missing:', r2),
+ '\n\n0'),
+ ('call_with_body_bytes_expecting_body',
+ 'Repository.get_parent_map', ('quack/', 'include-missing:', r1),
+ '\n\n0'),
+ ],
+ client._calls)
+ repo.unlock()
+
+ def test_get_parent_map_reconnects_if_unknown_method(self):
+ transport_path = 'quack'
+ rev_id = 'revision-id'
+ repo, client = self.setup_fake_client_and_repository(transport_path)
+ client.add_unknown_method_response('Repository.get_parent_map')
+ client.add_success_response_with_body(rev_id, 'ok')
+ self.assertFalse(client._medium._is_remote_before((1, 2)))
+ parents = repo.get_parent_map([rev_id])
+ self.assertEqual(
+ [('call_with_body_bytes_expecting_body',
+ 'Repository.get_parent_map',
+ ('quack/', 'include-missing:', rev_id), '\n\n0'),
+ ('disconnect medium',),
+ ('call_expecting_body', 'Repository.get_revision_graph',
+ ('quack/', ''))],
+ client._calls)
+ # The medium is now marked as being connected to an older server
+ self.assertTrue(client._medium._is_remote_before((1, 2)))
+ self.assertEqual({rev_id: ('null:',)}, parents)
+
+ def test_get_parent_map_fallback_parentless_node(self):
+ """get_parent_map falls back to get_revision_graph on old servers. The
+ results from get_revision_graph are tweaked to match the get_parent_map
+ API.
+
+ Specifically, a {key: ()} result from get_revision_graph means "no
+ parents" for that key, which in get_parent_map results should be
+ represented as {key: ('null:',)}.
+
+ This is the test for https://bugs.launchpad.net/bzr/+bug/214894
+ """
+ rev_id = 'revision-id'
+ transport_path = 'quack'
+ repo, client = self.setup_fake_client_and_repository(transport_path)
+ client.add_success_response_with_body(rev_id, 'ok')
+ client._medium._remember_remote_is_before((1, 2))
+ parents = repo.get_parent_map([rev_id])
+ self.assertEqual(
+ [('call_expecting_body', 'Repository.get_revision_graph',
+ ('quack/', ''))],
+ client._calls)
+ self.assertEqual({rev_id: ('null:',)}, parents)
+
+ def test_get_parent_map_unexpected_response(self):
+ repo, client = self.setup_fake_client_and_repository('path')
+ client.add_success_response('something unexpected!')
+ self.assertRaises(
+ errors.UnexpectedSmartServerResponse,
+ repo.get_parent_map, ['a-revision-id'])
+
+ def test_get_parent_map_negative_caches_missing_keys(self):
+ self.setup_smart_server_with_call_log()
+ repo = self.make_repository('foo')
+ self.assertIsInstance(repo, RemoteRepository)
+ repo.lock_read()
+ self.addCleanup(repo.unlock)
+ self.reset_smart_call_log()
+ graph = repo.get_graph()
+ self.assertEqual({},
+ graph.get_parent_map(['some-missing', 'other-missing']))
+ self.assertLength(1, self.hpss_calls)
+ # No call if we repeat this
+ self.reset_smart_call_log()
+ graph = repo.get_graph()
+ self.assertEqual({},
+ graph.get_parent_map(['some-missing', 'other-missing']))
+ self.assertLength(0, self.hpss_calls)
+ # Asking for more unknown keys makes a request.
+ self.reset_smart_call_log()
+ graph = repo.get_graph()
+ self.assertEqual({},
+ graph.get_parent_map(['some-missing', 'other-missing',
+ 'more-missing']))
+ self.assertLength(1, self.hpss_calls)
+
+ def disableExtraResults(self):
+ self.overrideAttr(SmartServerRepositoryGetParentMap,
+ 'no_extra_results', True)
+
+ def test_null_cached_missing_and_stop_key(self):
+ self.setup_smart_server_with_call_log()
+ # Make a branch with a single revision.
+ builder = self.make_branch_builder('foo')
+ builder.start_series()
+ builder.build_snapshot('first', None, [
+ ('add', ('', 'root-id', 'directory', ''))])
+ builder.finish_series()
+ branch = builder.get_branch()
+ repo = branch.repository
+ self.assertIsInstance(repo, RemoteRepository)
+ # Stop the server from sending extra results.
+ self.disableExtraResults()
+ repo.lock_read()
+ self.addCleanup(repo.unlock)
+ self.reset_smart_call_log()
+ graph = repo.get_graph()
+ # Query for 'first' and 'null:'. Because 'null:' is a parent of
+ # 'first' it will be a candidate for the stop_keys of subsequent
+ # requests, and because 'null:' was queried but not returned it will be
+ # cached as missing.
+ self.assertEqual({'first': ('null:',)},
+ graph.get_parent_map(['first', 'null:']))
+ # Now query for another key. This request will pass along a recipe of
+ # start and stop keys describing the already cached results, and this
+ # recipe's revision count must be correct (or else it will trigger an
+ # error from the server).
+ self.assertEqual({}, graph.get_parent_map(['another-key']))
+ # This assertion guards against disableExtraResults silently failing to
+ # work, thus invalidating the test.
+ self.assertLength(2, self.hpss_calls)
+
+ def test_get_parent_map_gets_ghosts_from_result(self):
+ # asking for a revision should negatively cache close ghosts in its
+ # ancestry.
+ self.setup_smart_server_with_call_log()
+ tree = self.make_branch_and_memory_tree('foo')
+ tree.lock_write()
+ try:
+ builder = treebuilder.TreeBuilder()
+ builder.start_tree(tree)
+ builder.build([])
+ builder.finish_tree()
+ tree.set_parent_ids(['non-existant'], allow_leftmost_as_ghost=True)
+ rev_id = tree.commit('')
+ finally:
+ tree.unlock()
+ tree.lock_read()
+ self.addCleanup(tree.unlock)
+ repo = tree.branch.repository
+ self.assertIsInstance(repo, RemoteRepository)
+ # ask for rev_id
+ repo.get_parent_map([rev_id])
+ self.reset_smart_call_log()
+ # Now asking for rev_id's ghost parent should not make calls
+ self.assertEqual({}, repo.get_parent_map(['non-existant']))
+ self.assertLength(0, self.hpss_calls)
+
+ def test_exposes_get_cached_parent_map(self):
+ """RemoteRepository exposes get_cached_parent_map from
+ _unstacked_provider
+ """
+ r1 = u'\u0e33'.encode('utf8')
+ r2 = u'\u0dab'.encode('utf8')
+ lines = [' '.join([r2, r1]), r1]
+ encoded_body = bz2.compress('\n'.join(lines))
+
+ transport_path = 'quack'
+ repo, client = self.setup_fake_client_and_repository(transport_path)
+ client.add_success_response_with_body(encoded_body, 'ok')
+ repo.lock_read()
+ # get_cached_parent_map should *not* trigger an RPC
+ self.assertEqual({}, repo.get_cached_parent_map([r1]))
+ self.assertEqual([], client._calls)
+ self.assertEqual({r2: (r1,)}, repo.get_parent_map([r2]))
+ self.assertEqual({r1: (NULL_REVISION,)},
+ repo.get_cached_parent_map([r1]))
+ self.assertEqual(
+ [('call_with_body_bytes_expecting_body',
+ 'Repository.get_parent_map', ('quack/', 'include-missing:', r2),
+ '\n\n0')],
+ client._calls)
+ repo.unlock()
+
+
+class TestGetParentMapAllowsNew(tests.TestCaseWithTransport):
+
+ def test_allows_new_revisions(self):
+ """get_parent_map's results can be updated by commit."""
+ smart_server = test_server.SmartTCPServer_for_testing()
+ self.start_server(smart_server)
+ self.make_branch('branch')
+ branch = Branch.open(smart_server.get_url() + '/branch')
+ tree = branch.create_checkout('tree', lightweight=True)
+ tree.lock_write()
+ self.addCleanup(tree.unlock)
+ graph = tree.branch.repository.get_graph()
+ # This provides an opportunity for the missing rev-id to be cached.
+ self.assertEqual({}, graph.get_parent_map(['rev1']))
+ tree.commit('message', rev_id='rev1')
+ graph = tree.branch.repository.get_graph()
+ self.assertEqual({'rev1': ('null:',)}, graph.get_parent_map(['rev1']))
+
+
+class TestRepositoryGetRevisions(TestRemoteRepository):
+
+ def test_hpss_missing_revision(self):
+ transport_path = 'quack'
+ repo, client = self.setup_fake_client_and_repository(transport_path)
+ client.add_success_response_with_body(
+ '', 'ok', '10')
+ self.assertRaises(errors.NoSuchRevision, repo.get_revisions,
+ ['somerev1', 'anotherrev2'])
+ self.assertEqual(
+ [('call_with_body_bytes_expecting_body', 'Repository.iter_revisions',
+ ('quack/', ), "somerev1\nanotherrev2")],
+ client._calls)
+
+ def test_hpss_get_single_revision(self):
+ transport_path = 'quack'
+ repo, client = self.setup_fake_client_and_repository(transport_path)
+ somerev1 = Revision("somerev1")
+ somerev1.committer = "Joe Committer <joe@example.com>"
+ somerev1.timestamp = 1321828927
+ somerev1.timezone = -60
+ somerev1.inventory_sha1 = "691b39be74c67b1212a75fcb19c433aaed903c2b"
+ somerev1.message = "Message"
+ body = zlib.compress(chk_bencode_serializer.write_revision_to_string(
+ somerev1))
+ # Split up body into two bits to make sure the zlib compression object
+ # gets data fed twice.
+ client.add_success_response_with_body(
+ [body[:10], body[10:]], 'ok', '10')
+ revs = repo.get_revisions(['somerev1'])
+ self.assertEquals(revs, [somerev1])
+ self.assertEqual(
+ [('call_with_body_bytes_expecting_body', 'Repository.iter_revisions',
+ ('quack/', ), "somerev1")],
+ client._calls)
+
+
+class TestRepositoryGetRevisionGraph(TestRemoteRepository):
+
+ def test_null_revision(self):
+ # a null revision has the predictable result {}, we should have no wire
+ # traffic when calling it with this argument
+ transport_path = 'empty'
+ repo, client = self.setup_fake_client_and_repository(transport_path)
+ client.add_success_response('notused')
+ # actual RemoteRepository.get_revision_graph is gone, but there's an
+ # equivalent private method for testing
+ result = repo._get_revision_graph(NULL_REVISION)
+ self.assertEqual([], client._calls)
+ self.assertEqual({}, result)
+
+ def test_none_revision(self):
+ # with none we want the entire graph
+ r1 = u'\u0e33'.encode('utf8')
+ r2 = u'\u0dab'.encode('utf8')
+ lines = [' '.join([r2, r1]), r1]
+ encoded_body = '\n'.join(lines)
+
+ transport_path = 'sinhala'
+ repo, client = self.setup_fake_client_and_repository(transport_path)
+ client.add_success_response_with_body(encoded_body, 'ok')
+ # actual RemoteRepository.get_revision_graph is gone, but there's an
+ # equivalent private method for testing
+ result = repo._get_revision_graph(None)
+ self.assertEqual(
+ [('call_expecting_body', 'Repository.get_revision_graph',
+ ('sinhala/', ''))],
+ client._calls)
+ self.assertEqual({r1: (), r2: (r1, )}, result)
+
+ def test_specific_revision(self):
+ # with a specific revision we want the graph for that
+ # with none we want the entire graph
+ r11 = u'\u0e33'.encode('utf8')
+ r12 = u'\xc9'.encode('utf8')
+ r2 = u'\u0dab'.encode('utf8')
+ lines = [' '.join([r2, r11, r12]), r11, r12]
+ encoded_body = '\n'.join(lines)
+
+ transport_path = 'sinhala'
+ repo, client = self.setup_fake_client_and_repository(transport_path)
+ client.add_success_response_with_body(encoded_body, 'ok')
+ result = repo._get_revision_graph(r2)
+ self.assertEqual(
+ [('call_expecting_body', 'Repository.get_revision_graph',
+ ('sinhala/', r2))],
+ client._calls)
+ self.assertEqual({r11: (), r12: (), r2: (r11, r12), }, result)
+
+ def test_no_such_revision(self):
+ revid = '123'
+ transport_path = 'sinhala'
+ repo, client = self.setup_fake_client_and_repository(transport_path)
+ client.add_error_response('nosuchrevision', revid)
+ # also check that the right revision is reported in the error
+ self.assertRaises(errors.NoSuchRevision,
+ repo._get_revision_graph, revid)
+ self.assertEqual(
+ [('call_expecting_body', 'Repository.get_revision_graph',
+ ('sinhala/', revid))],
+ client._calls)
+
+ def test_unexpected_error(self):
+ revid = '123'
+ transport_path = 'sinhala'
+ repo, client = self.setup_fake_client_and_repository(transport_path)
+ client.add_error_response('AnUnexpectedError')
+ e = self.assertRaises(errors.UnknownErrorFromSmartServer,
+ repo._get_revision_graph, revid)
+ self.assertEqual(('AnUnexpectedError',), e.error_tuple)
+
+
+class TestRepositoryGetRevIdForRevno(TestRemoteRepository):
+
+ def test_ok(self):
+ repo, client = self.setup_fake_client_and_repository('quack')
+ client.add_expected_call(
+ 'Repository.get_rev_id_for_revno', ('quack/', 5, (42, 'rev-foo')),
+ 'success', ('ok', 'rev-five'))
+ result = repo.get_rev_id_for_revno(5, (42, 'rev-foo'))
+ self.assertEqual((True, 'rev-five'), result)
+ self.assertFinished(client)
+
+ def test_history_incomplete(self):
+ repo, client = self.setup_fake_client_and_repository('quack')
+ client.add_expected_call(
+ 'Repository.get_rev_id_for_revno', ('quack/', 5, (42, 'rev-foo')),
+ 'success', ('history-incomplete', 10, 'rev-ten'))
+ result = repo.get_rev_id_for_revno(5, (42, 'rev-foo'))
+ self.assertEqual((False, (10, 'rev-ten')), result)
+ self.assertFinished(client)
+
+ def test_history_incomplete_with_fallback(self):
+ """A 'history-incomplete' response causes the fallback repository to be
+ queried too, if one is set.
+ """
+ # Make a repo with a fallback repo, both using a FakeClient.
+ format = remote.response_tuple_to_repo_format(
+ ('yes', 'no', 'yes', self.get_repo_format().network_name()))
+ repo, client = self.setup_fake_client_and_repository('quack')
+ repo._format = format
+ fallback_repo, ignored = self.setup_fake_client_and_repository(
+ 'fallback')
+ fallback_repo._client = client
+ fallback_repo._format = format
+ repo.add_fallback_repository(fallback_repo)
+ # First the client should ask the primary repo
+ client.add_expected_call(
+ 'Repository.get_rev_id_for_revno', ('quack/', 1, (42, 'rev-foo')),
+ 'success', ('history-incomplete', 2, 'rev-two'))
+ # Then it should ask the fallback, using revno/revid from the
+ # history-incomplete response as the known revno/revid.
+ client.add_expected_call(
+ 'Repository.get_rev_id_for_revno',('fallback/', 1, (2, 'rev-two')),
+ 'success', ('ok', 'rev-one'))
+ result = repo.get_rev_id_for_revno(1, (42, 'rev-foo'))
+ self.assertEqual((True, 'rev-one'), result)
+ self.assertFinished(client)
+
+ def test_nosuchrevision(self):
+ # 'nosuchrevision' is returned when the known-revid is not found in the
+ # remote repo. The client translates that response to NoSuchRevision.
+ repo, client = self.setup_fake_client_and_repository('quack')
+ client.add_expected_call(
+ 'Repository.get_rev_id_for_revno', ('quack/', 5, (42, 'rev-foo')),
+ 'error', ('nosuchrevision', 'rev-foo'))
+ self.assertRaises(
+ errors.NoSuchRevision,
+ repo.get_rev_id_for_revno, 5, (42, 'rev-foo'))
+ self.assertFinished(client)
+
+ def test_branch_fallback_locking(self):
+ """RemoteBranch.get_rev_id takes a read lock, and tries to call the
+ get_rev_id_for_revno verb. If the verb is unknown the VFS fallback
+ will be invoked, which will fail if the repo is unlocked.
+ """
+ self.setup_smart_server_with_call_log()
+ tree = self.make_branch_and_memory_tree('.')
+ tree.lock_write()
+ tree.add('')
+ rev1 = tree.commit('First')
+ rev2 = tree.commit('Second')
+ tree.unlock()
+ branch = tree.branch
+ self.assertFalse(branch.is_locked())
+ self.reset_smart_call_log()
+ verb = 'Repository.get_rev_id_for_revno'
+ self.disable_verb(verb)
+ self.assertEqual(rev1, branch.get_rev_id(1))
+ self.assertLength(1, [call for call in self.hpss_calls if
+ call.call.method == verb])
+
+
+class TestRepositoryHasSignatureForRevisionId(TestRemoteRepository):
+
+ def test_has_signature_for_revision_id(self):
+ # ('yes', ) for Repository.has_signature_for_revision_id -> 'True'.
+ transport_path = 'quack'
+ repo, client = self.setup_fake_client_and_repository(transport_path)
+ client.add_success_response('yes')
+ result = repo.has_signature_for_revision_id('A')
+ self.assertEqual(
+ [('call', 'Repository.has_signature_for_revision_id',
+ ('quack/', 'A'))],
+ client._calls)
+ self.assertEqual(True, result)
+
+ def test_is_not_shared(self):
+ # ('no', ) for Repository.has_signature_for_revision_id -> 'False'.
+ transport_path = 'qwack'
+ repo, client = self.setup_fake_client_and_repository(transport_path)
+ client.add_success_response('no')
+ result = repo.has_signature_for_revision_id('A')
+ self.assertEqual(
+ [('call', 'Repository.has_signature_for_revision_id',
+ ('qwack/', 'A'))],
+ client._calls)
+ self.assertEqual(False, result)
+
+
+class TestRepositoryPhysicalLockStatus(TestRemoteRepository):
+
+ def test_get_physical_lock_status_yes(self):
+ transport_path = 'qwack'
+ repo, client = self.setup_fake_client_and_repository(transport_path)
+ client.add_success_response('yes')
+ result = repo.get_physical_lock_status()
+ self.assertEqual(
+ [('call', 'Repository.get_physical_lock_status',
+ ('qwack/', ))],
+ client._calls)
+ self.assertEqual(True, result)
+
+ def test_get_physical_lock_status_no(self):
+ transport_path = 'qwack'
+ repo, client = self.setup_fake_client_and_repository(transport_path)
+ client.add_success_response('no')
+ result = repo.get_physical_lock_status()
+ self.assertEqual(
+ [('call', 'Repository.get_physical_lock_status',
+ ('qwack/', ))],
+ client._calls)
+ self.assertEqual(False, result)
+
+
+class TestRepositoryIsShared(TestRemoteRepository):
+
+ def test_is_shared(self):
+ # ('yes', ) for Repository.is_shared -> 'True'.
+ transport_path = 'quack'
+ repo, client = self.setup_fake_client_and_repository(transport_path)
+ client.add_success_response('yes')
+ result = repo.is_shared()
+ self.assertEqual(
+ [('call', 'Repository.is_shared', ('quack/',))],
+ client._calls)
+ self.assertEqual(True, result)
+
+ def test_is_not_shared(self):
+ # ('no', ) for Repository.is_shared -> 'False'.
+ transport_path = 'qwack'
+ repo, client = self.setup_fake_client_and_repository(transport_path)
+ client.add_success_response('no')
+ result = repo.is_shared()
+ self.assertEqual(
+ [('call', 'Repository.is_shared', ('qwack/',))],
+ client._calls)
+ self.assertEqual(False, result)
+
+
+class TestRepositoryMakeWorkingTrees(TestRemoteRepository):
+
+ def test_make_working_trees(self):
+ # ('yes', ) for Repository.make_working_trees -> 'True'.
+ transport_path = 'quack'
+ repo, client = self.setup_fake_client_and_repository(transport_path)
+ client.add_success_response('yes')
+ result = repo.make_working_trees()
+ self.assertEqual(
+ [('call', 'Repository.make_working_trees', ('quack/',))],
+ client._calls)
+ self.assertEqual(True, result)
+
+ def test_no_working_trees(self):
+ # ('no', ) for Repository.make_working_trees -> 'False'.
+ transport_path = 'qwack'
+ repo, client = self.setup_fake_client_and_repository(transport_path)
+ client.add_success_response('no')
+ result = repo.make_working_trees()
+ self.assertEqual(
+ [('call', 'Repository.make_working_trees', ('qwack/',))],
+ client._calls)
+ self.assertEqual(False, result)
+
+
+class TestRepositoryLockWrite(TestRemoteRepository):
+
+ def test_lock_write(self):
+ transport_path = 'quack'
+ repo, client = self.setup_fake_client_and_repository(transport_path)
+ client.add_success_response('ok', 'a token')
+ token = repo.lock_write().repository_token
+ self.assertEqual(
+ [('call', 'Repository.lock_write', ('quack/', ''))],
+ client._calls)
+ self.assertEqual('a token', token)
+
+ def test_lock_write_already_locked(self):
+ transport_path = 'quack'
+ repo, client = self.setup_fake_client_and_repository(transport_path)
+ client.add_error_response('LockContention')
+ self.assertRaises(errors.LockContention, repo.lock_write)
+ self.assertEqual(
+ [('call', 'Repository.lock_write', ('quack/', ''))],
+ client._calls)
+
+ def test_lock_write_unlockable(self):
+ transport_path = 'quack'
+ repo, client = self.setup_fake_client_and_repository(transport_path)
+ client.add_error_response('UnlockableTransport')
+ self.assertRaises(errors.UnlockableTransport, repo.lock_write)
+ self.assertEqual(
+ [('call', 'Repository.lock_write', ('quack/', ''))],
+ client._calls)
+
+
+class TestRepositoryWriteGroups(TestRemoteRepository):
+
+ def test_start_write_group(self):
+ transport_path = 'quack'
+ repo, client = self.setup_fake_client_and_repository(transport_path)
+ client.add_expected_call(
+ 'Repository.lock_write', ('quack/', ''),
+ 'success', ('ok', 'a token'))
+ client.add_expected_call(
+ 'Repository.start_write_group', ('quack/', 'a token'),
+ 'success', ('ok', ('token1', )))
+ repo.lock_write()
+ repo.start_write_group()
+
+ def test_start_write_group_unsuspendable(self):
+ # Some repositories do not support suspending write
+ # groups. For those, fall back to the "real" repository.
+ transport_path = 'quack'
+ repo, client = self.setup_fake_client_and_repository(transport_path)
+ def stub_ensure_real():
+ client._calls.append(('_ensure_real',))
+ repo._real_repository = _StubRealPackRepository(client._calls)
+ repo._ensure_real = stub_ensure_real
+ client.add_expected_call(
+ 'Repository.lock_write', ('quack/', ''),
+ 'success', ('ok', 'a token'))
+ client.add_expected_call(
+ 'Repository.start_write_group', ('quack/', 'a token'),
+ 'error', ('UnsuspendableWriteGroup',))
+ repo.lock_write()
+ repo.start_write_group()
+ self.assertEquals(client._calls[-2:], [
+ ('_ensure_real',),
+ ('start_write_group',)])
+
+ def test_commit_write_group(self):
+ transport_path = 'quack'
+ repo, client = self.setup_fake_client_and_repository(transport_path)
+ client.add_expected_call(
+ 'Repository.lock_write', ('quack/', ''),
+ 'success', ('ok', 'a token'))
+ client.add_expected_call(
+ 'Repository.start_write_group', ('quack/', 'a token'),
+ 'success', ('ok', ['token1']))
+ client.add_expected_call(
+ 'Repository.commit_write_group', ('quack/', 'a token', ['token1']),
+ 'success', ('ok',))
+ repo.lock_write()
+ repo.start_write_group()
+ repo.commit_write_group()
+
+ def test_abort_write_group(self):
+ transport_path = 'quack'
+ repo, client = self.setup_fake_client_and_repository(transport_path)
+ client.add_expected_call(
+ 'Repository.lock_write', ('quack/', ''),
+ 'success', ('ok', 'a token'))
+ client.add_expected_call(
+ 'Repository.start_write_group', ('quack/', 'a token'),
+ 'success', ('ok', ['token1']))
+ client.add_expected_call(
+ 'Repository.abort_write_group', ('quack/', 'a token', ['token1']),
+ 'success', ('ok',))
+ repo.lock_write()
+ repo.start_write_group()
+ repo.abort_write_group(False)
+
+ def test_suspend_write_group(self):
+ transport_path = 'quack'
+ repo, client = self.setup_fake_client_and_repository(transport_path)
+ self.assertEquals([], repo.suspend_write_group())
+
+ def test_resume_write_group(self):
+ transport_path = 'quack'
+ repo, client = self.setup_fake_client_and_repository(transport_path)
+ client.add_expected_call(
+ 'Repository.lock_write', ('quack/', ''),
+ 'success', ('ok', 'a token'))
+ client.add_expected_call(
+ 'Repository.check_write_group', ('quack/', 'a token', ['token1']),
+ 'success', ('ok',))
+ repo.lock_write()
+ repo.resume_write_group(['token1'])
+
+
+class TestRepositorySetMakeWorkingTrees(TestRemoteRepository):
+
+ def test_backwards_compat(self):
+ self.setup_smart_server_with_call_log()
+ repo = self.make_repository('.')
+ self.reset_smart_call_log()
+ verb = 'Repository.set_make_working_trees'
+ self.disable_verb(verb)
+ repo.set_make_working_trees(True)
+ call_count = len([call for call in self.hpss_calls if
+ call.call.method == verb])
+ self.assertEqual(1, call_count)
+
+ def test_current(self):
+ transport_path = 'quack'
+ repo, client = self.setup_fake_client_and_repository(transport_path)
+ client.add_expected_call(
+ 'Repository.set_make_working_trees', ('quack/', 'True'),
+ 'success', ('ok',))
+ client.add_expected_call(
+ 'Repository.set_make_working_trees', ('quack/', 'False'),
+ 'success', ('ok',))
+ repo.set_make_working_trees(True)
+ repo.set_make_working_trees(False)
+
+
+class TestRepositoryUnlock(TestRemoteRepository):
+
+ def test_unlock(self):
+ transport_path = 'quack'
+ repo, client = self.setup_fake_client_and_repository(transport_path)
+ client.add_success_response('ok', 'a token')
+ client.add_success_response('ok')
+ repo.lock_write()
+ repo.unlock()
+ self.assertEqual(
+ [('call', 'Repository.lock_write', ('quack/', '')),
+ ('call', 'Repository.unlock', ('quack/', 'a token'))],
+ client._calls)
+
+ def test_unlock_wrong_token(self):
+ # If somehow the token is wrong, unlock will raise TokenMismatch.
+ transport_path = 'quack'
+ repo, client = self.setup_fake_client_and_repository(transport_path)
+ client.add_success_response('ok', 'a token')
+ client.add_error_response('TokenMismatch')
+ repo.lock_write()
+ self.assertRaises(errors.TokenMismatch, repo.unlock)
+
+
+class TestRepositoryHasRevision(TestRemoteRepository):
+
+ def test_none(self):
+ # repo.has_revision(None) should not cause any traffic.
+ transport_path = 'quack'
+ repo, client = self.setup_fake_client_and_repository(transport_path)
+
+ # The null revision is always there, so has_revision(None) == True.
+ self.assertEqual(True, repo.has_revision(NULL_REVISION))
+
+ # The remote repo shouldn't be accessed.
+ self.assertEqual([], client._calls)
+
+
+class TestRepositoryIterFilesBytes(TestRemoteRepository):
+ """Test Repository.iter_file_bytes."""
+
+ def test_single(self):
+ transport_path = 'quack'
+ repo, client = self.setup_fake_client_and_repository(transport_path)
+ client.add_expected_call(
+ 'Repository.iter_files_bytes', ('quack/', ),
+ 'success', ('ok',), iter(["ok\x000", "\n", zlib.compress("mydata" * 10)]))
+ for (identifier, byte_stream) in repo.iter_files_bytes([("somefile",
+ "somerev", "myid")]):
+ self.assertEquals("myid", identifier)
+ self.assertEquals("".join(byte_stream), "mydata" * 10)
+
+ def test_missing(self):
+ transport_path = 'quack'
+ repo, client = self.setup_fake_client_and_repository(transport_path)
+ client.add_expected_call(
+ 'Repository.iter_files_bytes',
+ ('quack/', ),
+ 'error', ('RevisionNotPresent', 'somefile', 'somerev'),
+ iter(["absent\0somefile\0somerev\n"]))
+ self.assertRaises(errors.RevisionNotPresent, list,
+ repo.iter_files_bytes(
+ [("somefile", "somerev", "myid")]))
+
+
+class TestRepositoryInsertStreamBase(TestRemoteRepository):
+ """Base class for Repository.insert_stream and .insert_stream_1.19
+ tests.
+ """
+
+ def checkInsertEmptyStream(self, repo, client):
+ """Insert an empty stream, checking the result.
+
+ This checks that there are no resume_tokens or missing_keys, and that
+ the client is finished.
+ """
+ sink = repo._get_sink()
+ fmt = repository.format_registry.get_default()
+ resume_tokens, missing_keys = sink.insert_stream([], fmt, [])
+ self.assertEqual([], resume_tokens)
+ self.assertEqual(set(), missing_keys)
+ self.assertFinished(client)
+
+
+class TestRepositoryInsertStream(TestRepositoryInsertStreamBase):
+ """Tests for using Repository.insert_stream verb when the _1.19 variant is
+ not available.
+
+ This test case is very similar to TestRepositoryInsertStream_1_19.
+ """
+
+ def setUp(self):
+ TestRemoteRepository.setUp(self)
+ self.disable_verb('Repository.insert_stream_1.19')
+
+ def test_unlocked_repo(self):
+ transport_path = 'quack'
+ repo, client = self.setup_fake_client_and_repository(transport_path)
+ client.add_expected_call(
+ 'Repository.insert_stream_1.19', ('quack/', ''),
+ 'unknown', ('Repository.insert_stream_1.19',))
+ client.add_expected_call(
+ 'Repository.insert_stream', ('quack/', ''),
+ 'success', ('ok',))
+ client.add_expected_call(
+ 'Repository.insert_stream', ('quack/', ''),
+ 'success', ('ok',))
+ self.checkInsertEmptyStream(repo, client)
+
+ def test_locked_repo_with_no_lock_token(self):
+ transport_path = 'quack'
+ repo, client = self.setup_fake_client_and_repository(transport_path)
+ client.add_expected_call(
+ 'Repository.lock_write', ('quack/', ''),
+ 'success', ('ok', ''))
+ client.add_expected_call(
+ 'Repository.insert_stream_1.19', ('quack/', ''),
+ 'unknown', ('Repository.insert_stream_1.19',))
+ client.add_expected_call(
+ 'Repository.insert_stream', ('quack/', ''),
+ 'success', ('ok',))
+ client.add_expected_call(
+ 'Repository.insert_stream', ('quack/', ''),
+ 'success', ('ok',))
+ repo.lock_write()
+ self.checkInsertEmptyStream(repo, client)
+
+ def test_locked_repo_with_lock_token(self):
+ transport_path = 'quack'
+ repo, client = self.setup_fake_client_and_repository(transport_path)
+ client.add_expected_call(
+ 'Repository.lock_write', ('quack/', ''),
+ 'success', ('ok', 'a token'))
+ client.add_expected_call(
+ 'Repository.insert_stream_1.19', ('quack/', '', 'a token'),
+ 'unknown', ('Repository.insert_stream_1.19',))
+ client.add_expected_call(
+ 'Repository.insert_stream_locked', ('quack/', '', 'a token'),
+ 'success', ('ok',))
+ client.add_expected_call(
+ 'Repository.insert_stream_locked', ('quack/', '', 'a token'),
+ 'success', ('ok',))
+ repo.lock_write()
+ self.checkInsertEmptyStream(repo, client)
+
+ def test_stream_with_inventory_deltas(self):
+ """'inventory-deltas' substreams cannot be sent to the
+ Repository.insert_stream verb, because not all servers that implement
+ that verb will accept them. So when one is encountered the RemoteSink
+ immediately stops using that verb and falls back to VFS insert_stream.
+ """
+ transport_path = 'quack'
+ repo, client = self.setup_fake_client_and_repository(transport_path)
+ client.add_expected_call(
+ 'Repository.insert_stream_1.19', ('quack/', ''),
+ 'unknown', ('Repository.insert_stream_1.19',))
+ client.add_expected_call(
+ 'Repository.insert_stream', ('quack/', ''),
+ 'success', ('ok',))
+ client.add_expected_call(
+ 'Repository.insert_stream', ('quack/', ''),
+ 'success', ('ok',))
+ # Create a fake real repository for insert_stream to fall back on, so
+ # that we can directly see the records the RemoteSink passes to the
+ # real sink.
+ class FakeRealSink:
+ def __init__(self):
+ self.records = []
+ def insert_stream(self, stream, src_format, resume_tokens):
+ for substream_kind, substream in stream:
+ self.records.append(
+ (substream_kind, [record.key for record in substream]))
+ return ['fake tokens'], ['fake missing keys']
+ fake_real_sink = FakeRealSink()
+ class FakeRealRepository:
+ def _get_sink(self):
+ return fake_real_sink
+ def is_in_write_group(self):
+ return False
+ def refresh_data(self):
+ return True
+ repo._real_repository = FakeRealRepository()
+ sink = repo._get_sink()
+ fmt = repository.format_registry.get_default()
+ stream = self.make_stream_with_inv_deltas(fmt)
+ resume_tokens, missing_keys = sink.insert_stream(stream, fmt, [])
+ # Every record from the first inventory delta should have been sent to
+ # the VFS sink.
+ expected_records = [
+ ('inventory-deltas', [('rev2',), ('rev3',)]),
+ ('texts', [('some-rev', 'some-file')])]
+ self.assertEqual(expected_records, fake_real_sink.records)
+ # The return values from the real sink's insert_stream are propagated
+ # back to the original caller.
+ self.assertEqual(['fake tokens'], resume_tokens)
+ self.assertEqual(['fake missing keys'], missing_keys)
+ self.assertFinished(client)
+
+ def make_stream_with_inv_deltas(self, fmt):
+ """Make a simple stream with an inventory delta followed by more
+ records and more substreams to test that all records and substreams
+ from that point on are used.
+
+ This sends, in order:
+ * inventories substream: rev1, rev2, rev3. rev2 and rev3 are
+ inventory-deltas.
+ * texts substream: (some-rev, some-file)
+ """
+ # Define a stream using generators so that it isn't rewindable.
+ inv = inventory.Inventory(revision_id='rev1')
+ inv.root.revision = 'rev1'
+ def stream_with_inv_delta():
+ yield ('inventories', inventories_substream())
+ yield ('inventory-deltas', inventory_delta_substream())
+ yield ('texts', [
+ versionedfile.FulltextContentFactory(
+ ('some-rev', 'some-file'), (), None, 'content')])
+ def inventories_substream():
+ # An empty inventory fulltext. This will be streamed normally.
+ text = fmt._serializer.write_inventory_to_string(inv)
+ yield versionedfile.FulltextContentFactory(
+ ('rev1',), (), None, text)
+ def inventory_delta_substream():
+ # An inventory delta. This can't be streamed via this verb, so it
+ # will trigger a fallback to VFS insert_stream.
+ entry = inv.make_entry(
+ 'directory', 'newdir', inv.root.file_id, 'newdir-id')
+ entry.revision = 'ghost'
+ delta = [(None, 'newdir', 'newdir-id', entry)]
+ serializer = inventory_delta.InventoryDeltaSerializer(
+ versioned_root=True, tree_references=False)
+ lines = serializer.delta_to_lines('rev1', 'rev2', delta)
+ yield versionedfile.ChunkedContentFactory(
+ ('rev2',), (('rev1',)), None, lines)
+ # Another delta.
+ lines = serializer.delta_to_lines('rev1', 'rev3', delta)
+ yield versionedfile.ChunkedContentFactory(
+ ('rev3',), (('rev1',)), None, lines)
+ return stream_with_inv_delta()
+
+
+class TestRepositoryInsertStream_1_19(TestRepositoryInsertStreamBase):
+
+ def test_unlocked_repo(self):
+ transport_path = 'quack'
+ repo, client = self.setup_fake_client_and_repository(transport_path)
+ client.add_expected_call(
+ 'Repository.insert_stream_1.19', ('quack/', ''),
+ 'success', ('ok',))
+ client.add_expected_call(
+ 'Repository.insert_stream_1.19', ('quack/', ''),
+ 'success', ('ok',))
+ self.checkInsertEmptyStream(repo, client)
+
+ def test_locked_repo_with_no_lock_token(self):
+ transport_path = 'quack'
+ repo, client = self.setup_fake_client_and_repository(transport_path)
+ client.add_expected_call(
+ 'Repository.lock_write', ('quack/', ''),
+ 'success', ('ok', ''))
+ client.add_expected_call(
+ 'Repository.insert_stream_1.19', ('quack/', ''),
+ 'success', ('ok',))
+ client.add_expected_call(
+ 'Repository.insert_stream_1.19', ('quack/', ''),
+ 'success', ('ok',))
+ repo.lock_write()
+ self.checkInsertEmptyStream(repo, client)
+
+ def test_locked_repo_with_lock_token(self):
+ transport_path = 'quack'
+ repo, client = self.setup_fake_client_and_repository(transport_path)
+ client.add_expected_call(
+ 'Repository.lock_write', ('quack/', ''),
+ 'success', ('ok', 'a token'))
+ client.add_expected_call(
+ 'Repository.insert_stream_1.19', ('quack/', '', 'a token'),
+ 'success', ('ok',))
+ client.add_expected_call(
+ 'Repository.insert_stream_1.19', ('quack/', '', 'a token'),
+ 'success', ('ok',))
+ repo.lock_write()
+ self.checkInsertEmptyStream(repo, client)
+
+
+class TestRepositoryTarball(TestRemoteRepository):
+
+ # This is a canned tarball reponse we can validate against
+ tarball_content = (
+ 'QlpoOTFBWSZTWdGkj3wAAWF/k8aQACBIB//A9+8cIX/v33AACEAYABAECEACNz'
+ 'JqsgJJFPTSnk1A3qh6mTQAAAANPUHkagkSTEkaA09QaNAAAGgAAAcwCYCZGAEY'
+ 'mJhMJghpiaYBUkKammSHqNMZQ0NABkNAeo0AGneAevnlwQoGzEzNVzaYxp/1Uk'
+ 'xXzA1CQX0BJMZZLcPBrluJir5SQyijWHYZ6ZUtVqqlYDdB2QoCwa9GyWwGYDMA'
+ 'OQYhkpLt/OKFnnlT8E0PmO8+ZNSo2WWqeCzGB5fBXZ3IvV7uNJVE7DYnWj6qwB'
+ 'k5DJDIrQ5OQHHIjkS9KqwG3mc3t+F1+iujb89ufyBNIKCgeZBWrl5cXxbMGoMs'
+ 'c9JuUkg5YsiVcaZJurc6KLi6yKOkgCUOlIlOpOoXyrTJjK8ZgbklReDdwGmFgt'
+ 'dkVsAIslSVCd4AtACSLbyhLHryfb14PKegrVDba+U8OL6KQtzdM5HLjAc8/p6n'
+ '0lgaWU8skgO7xupPTkyuwheSckejFLK5T4ZOo0Gda9viaIhpD1Qn7JqqlKAJqC'
+ 'QplPKp2nqBWAfwBGaOwVrz3y1T+UZZNismXHsb2Jq18T+VaD9k4P8DqE3g70qV'
+ 'JLurpnDI6VS5oqDDPVbtVjMxMxMg4rzQVipn2Bv1fVNK0iq3Gl0hhnnHKm/egy'
+ 'nWQ7QH/F3JFOFCQ0aSPfA='
+ ).decode('base64')
+
+ def test_repository_tarball(self):
+ # Test that Repository.tarball generates the right operations
+ transport_path = 'repo'
+ expected_calls = [('call_expecting_body', 'Repository.tarball',
+ ('repo/', 'bz2',),),
+ ]
+ repo, client = self.setup_fake_client_and_repository(transport_path)
+ client.add_success_response_with_body(self.tarball_content, 'ok')
+ # Now actually ask for the tarball
+ tarball_file = repo._get_tarball('bz2')
+ try:
+ self.assertEqual(expected_calls, client._calls)
+ self.assertEqual(self.tarball_content, tarball_file.read())
+ finally:
+ tarball_file.close()
+
+
+class TestRemoteRepositoryCopyContent(tests.TestCaseWithTransport):
+ """RemoteRepository.copy_content_into optimizations"""
+
+ def test_copy_content_remote_to_local(self):
+ self.transport_server = test_server.SmartTCPServer_for_testing
+ src_repo = self.make_repository('repo1')
+ src_repo = repository.Repository.open(self.get_url('repo1'))
+ # At the moment the tarball-based copy_content_into can't write back
+ # into a smart server. It would be good if it could upload the
+ # tarball; once that works we'd have to create repositories of
+ # different formats. -- mbp 20070410
+ dest_url = self.get_vfs_only_url('repo2')
+ dest_bzrdir = BzrDir.create(dest_url)
+ dest_repo = dest_bzrdir.create_repository()
+ self.assertFalse(isinstance(dest_repo, RemoteRepository))
+ self.assertTrue(isinstance(src_repo, RemoteRepository))
+ src_repo.copy_content_into(dest_repo)
+
+
+class _StubRealPackRepository(object):
+
+ def __init__(self, calls):
+ self.calls = calls
+ self._pack_collection = _StubPackCollection(calls)
+
+ def start_write_group(self):
+ self.calls.append(('start_write_group',))
+
+ def is_in_write_group(self):
+ return False
+
+ def refresh_data(self):
+ self.calls.append(('pack collection reload_pack_names',))
+
+
+class _StubPackCollection(object):
+
+ def __init__(self, calls):
+ self.calls = calls
+
+ def autopack(self):
+ self.calls.append(('pack collection autopack',))
+
+
+class TestRemotePackRepositoryAutoPack(TestRemoteRepository):
+ """Tests for RemoteRepository.autopack implementation."""
+
+ def test_ok(self):
+ """When the server returns 'ok' and there's no _real_repository, then
+ nothing else happens: the autopack method is done.
+ """
+ transport_path = 'quack'
+ repo, client = self.setup_fake_client_and_repository(transport_path)
+ client.add_expected_call(
+ 'PackRepository.autopack', ('quack/',), 'success', ('ok',))
+ repo.autopack()
+ self.assertFinished(client)
+
+ def test_ok_with_real_repo(self):
+ """When the server returns 'ok' and there is a _real_repository, then
+ the _real_repository's reload_pack_name's method will be called.
+ """
+ transport_path = 'quack'
+ repo, client = self.setup_fake_client_and_repository(transport_path)
+ client.add_expected_call(
+ 'PackRepository.autopack', ('quack/',),
+ 'success', ('ok',))
+ repo._real_repository = _StubRealPackRepository(client._calls)
+ repo.autopack()
+ self.assertEqual(
+ [('call', 'PackRepository.autopack', ('quack/',)),
+ ('pack collection reload_pack_names',)],
+ client._calls)
+
+ def test_backwards_compatibility(self):
+ """If the server does not recognise the PackRepository.autopack verb,
+ fallback to the real_repository's implementation.
+ """
+ transport_path = 'quack'
+ repo, client = self.setup_fake_client_and_repository(transport_path)
+ client.add_unknown_method_response('PackRepository.autopack')
+ def stub_ensure_real():
+ client._calls.append(('_ensure_real',))
+ repo._real_repository = _StubRealPackRepository(client._calls)
+ repo._ensure_real = stub_ensure_real
+ repo.autopack()
+ self.assertEqual(
+ [('call', 'PackRepository.autopack', ('quack/',)),
+ ('_ensure_real',),
+ ('pack collection autopack',)],
+ client._calls)
+
+ def test_oom_error_reporting(self):
+ """An out-of-memory condition on the server is reported clearly"""
+ transport_path = 'quack'
+ repo, client = self.setup_fake_client_and_repository(transport_path)
+ client.add_expected_call(
+ 'PackRepository.autopack', ('quack/',),
+ 'error', ('MemoryError',))
+ err = self.assertRaises(errors.BzrError, repo.autopack)
+ self.assertContainsRe(str(err), "^remote server out of mem")
+
+
+class TestErrorTranslationBase(tests.TestCaseWithMemoryTransport):
+ """Base class for unit tests for bzrlib.remote._translate_error."""
+
+ def translateTuple(self, error_tuple, **context):
+ """Call _translate_error with an ErrorFromSmartServer built from the
+ given error_tuple.
+
+ :param error_tuple: A tuple of a smart server response, as would be
+ passed to an ErrorFromSmartServer.
+ :kwargs context: context items to call _translate_error with.
+
+ :returns: The error raised by _translate_error.
+ """
+ # Raise the ErrorFromSmartServer before passing it as an argument,
+ # because _translate_error may need to re-raise it with a bare 'raise'
+ # statement.
+ server_error = errors.ErrorFromSmartServer(error_tuple)
+ translated_error = self.translateErrorFromSmartServer(
+ server_error, **context)
+ return translated_error
+
+ def translateErrorFromSmartServer(self, error_object, **context):
+ """Like translateTuple, but takes an already constructed
+ ErrorFromSmartServer rather than a tuple.
+ """
+ try:
+ raise error_object
+ except errors.ErrorFromSmartServer, server_error:
+ translated_error = self.assertRaises(
+ errors.BzrError, remote._translate_error, server_error,
+ **context)
+ return translated_error
+
+
+class TestErrorTranslationSuccess(TestErrorTranslationBase):
+ """Unit tests for bzrlib.remote._translate_error.
+
+ Given an ErrorFromSmartServer (which has an error tuple from a smart
+ server) and some context, _translate_error raises more specific errors from
+ bzrlib.errors.
+
+ This test case covers the cases where _translate_error succeeds in
+ translating an ErrorFromSmartServer to something better. See
+ TestErrorTranslationRobustness for other cases.
+ """
+
+ def test_NoSuchRevision(self):
+ branch = self.make_branch('')
+ revid = 'revid'
+ translated_error = self.translateTuple(
+ ('NoSuchRevision', revid), branch=branch)
+ expected_error = errors.NoSuchRevision(branch, revid)
+ self.assertEqual(expected_error, translated_error)
+
+ def test_nosuchrevision(self):
+ repository = self.make_repository('')
+ revid = 'revid'
+ translated_error = self.translateTuple(
+ ('nosuchrevision', revid), repository=repository)
+ expected_error = errors.NoSuchRevision(repository, revid)
+ self.assertEqual(expected_error, translated_error)
+
+ def test_nobranch(self):
+ bzrdir = self.make_bzrdir('')
+ translated_error = self.translateTuple(('nobranch',), bzrdir=bzrdir)
+ expected_error = errors.NotBranchError(path=bzrdir.root_transport.base)
+ self.assertEqual(expected_error, translated_error)
+
+ def test_nobranch_one_arg(self):
+ bzrdir = self.make_bzrdir('')
+ translated_error = self.translateTuple(
+ ('nobranch', 'extra detail'), bzrdir=bzrdir)
+ expected_error = errors.NotBranchError(
+ path=bzrdir.root_transport.base,
+ detail='extra detail')
+ self.assertEqual(expected_error, translated_error)
+
+ def test_norepository(self):
+ bzrdir = self.make_bzrdir('')
+ translated_error = self.translateTuple(('norepository',),
+ bzrdir=bzrdir)
+ expected_error = errors.NoRepositoryPresent(bzrdir)
+ self.assertEqual(expected_error, translated_error)
+
+ def test_LockContention(self):
+ translated_error = self.translateTuple(('LockContention',))
+ expected_error = errors.LockContention('(remote lock)')
+ self.assertEqual(expected_error, translated_error)
+
+ def test_UnlockableTransport(self):
+ bzrdir = self.make_bzrdir('')
+ translated_error = self.translateTuple(
+ ('UnlockableTransport',), bzrdir=bzrdir)
+ expected_error = errors.UnlockableTransport(bzrdir.root_transport)
+ self.assertEqual(expected_error, translated_error)
+
+ def test_LockFailed(self):
+ lock = 'str() of a server lock'
+ why = 'str() of why'
+ translated_error = self.translateTuple(('LockFailed', lock, why))
+ expected_error = errors.LockFailed(lock, why)
+ self.assertEqual(expected_error, translated_error)
+
+ def test_TokenMismatch(self):
+ token = 'a lock token'
+ translated_error = self.translateTuple(('TokenMismatch',), token=token)
+ expected_error = errors.TokenMismatch(token, '(remote token)')
+ self.assertEqual(expected_error, translated_error)
+
+ def test_Diverged(self):
+ branch = self.make_branch('a')
+ other_branch = self.make_branch('b')
+ translated_error = self.translateTuple(
+ ('Diverged',), branch=branch, other_branch=other_branch)
+ expected_error = errors.DivergedBranches(branch, other_branch)
+ self.assertEqual(expected_error, translated_error)
+
+ def test_NotStacked(self):
+ branch = self.make_branch('')
+ translated_error = self.translateTuple(('NotStacked',), branch=branch)
+ expected_error = errors.NotStacked(branch)
+ self.assertEqual(expected_error, translated_error)
+
+ def test_ReadError_no_args(self):
+ path = 'a path'
+ translated_error = self.translateTuple(('ReadError',), path=path)
+ expected_error = errors.ReadError(path)
+ self.assertEqual(expected_error, translated_error)
+
+ def test_ReadError(self):
+ path = 'a path'
+ translated_error = self.translateTuple(('ReadError', path))
+ expected_error = errors.ReadError(path)
+ self.assertEqual(expected_error, translated_error)
+
+ def test_IncompatibleRepositories(self):
+ translated_error = self.translateTuple(('IncompatibleRepositories',
+ "repo1", "repo2", "details here"))
+ expected_error = errors.IncompatibleRepositories("repo1", "repo2",
+ "details here")
+ self.assertEqual(expected_error, translated_error)
+
+ def test_PermissionDenied_no_args(self):
+ path = 'a path'
+ translated_error = self.translateTuple(('PermissionDenied',),
+ path=path)
+ expected_error = errors.PermissionDenied(path)
+ self.assertEqual(expected_error, translated_error)
+
+ def test_PermissionDenied_one_arg(self):
+ path = 'a path'
+ translated_error = self.translateTuple(('PermissionDenied', path))
+ expected_error = errors.PermissionDenied(path)
+ self.assertEqual(expected_error, translated_error)
+
+ def test_PermissionDenied_one_arg_and_context(self):
+ """Given a choice between a path from the local context and a path on
+ the wire, _translate_error prefers the path from the local context.
+ """
+ local_path = 'local path'
+ remote_path = 'remote path'
+ translated_error = self.translateTuple(
+ ('PermissionDenied', remote_path), path=local_path)
+ expected_error = errors.PermissionDenied(local_path)
+ self.assertEqual(expected_error, translated_error)
+
+ def test_PermissionDenied_two_args(self):
+ path = 'a path'
+ extra = 'a string with extra info'
+ translated_error = self.translateTuple(
+ ('PermissionDenied', path, extra))
+ expected_error = errors.PermissionDenied(path, extra)
+ self.assertEqual(expected_error, translated_error)
+
+ # GZ 2011-03-02: TODO test for PermissionDenied with non-ascii 'extra'
+
+ def test_NoSuchFile_context_path(self):
+ local_path = "local path"
+ translated_error = self.translateTuple(('ReadError', "remote path"),
+ path=local_path)
+ expected_error = errors.ReadError(local_path)
+ self.assertEqual(expected_error, translated_error)
+
+ def test_NoSuchFile_without_context(self):
+ remote_path = "remote path"
+ translated_error = self.translateTuple(('ReadError', remote_path))
+ expected_error = errors.ReadError(remote_path)
+ self.assertEqual(expected_error, translated_error)
+
+ def test_ReadOnlyError(self):
+ translated_error = self.translateTuple(('ReadOnlyError',))
+ expected_error = errors.TransportNotPossible("readonly transport")
+ self.assertEqual(expected_error, translated_error)
+
+ def test_MemoryError(self):
+ translated_error = self.translateTuple(('MemoryError',))
+ self.assertStartsWith(str(translated_error),
+ "remote server out of memory")
+
+ def test_generic_IndexError_no_classname(self):
+ err = errors.ErrorFromSmartServer(('error', "list index out of range"))
+ translated_error = self.translateErrorFromSmartServer(err)
+ expected_error = errors.UnknownErrorFromSmartServer(err)
+ self.assertEqual(expected_error, translated_error)
+
+ # GZ 2011-03-02: TODO test generic non-ascii error string
+
+ def test_generic_KeyError(self):
+ err = errors.ErrorFromSmartServer(('error', 'KeyError', "1"))
+ translated_error = self.translateErrorFromSmartServer(err)
+ expected_error = errors.UnknownErrorFromSmartServer(err)
+ self.assertEqual(expected_error, translated_error)
+
+
+class TestErrorTranslationRobustness(TestErrorTranslationBase):
+ """Unit tests for bzrlib.remote._translate_error's robustness.
+
+ TestErrorTranslationSuccess is for cases where _translate_error can
+ translate successfully. This class about how _translate_err behaves when
+ it fails to translate: it re-raises the original error.
+ """
+
+ def test_unrecognised_server_error(self):
+ """If the error code from the server is not recognised, the original
+ ErrorFromSmartServer is propagated unmodified.
+ """
+ error_tuple = ('An unknown error tuple',)
+ server_error = errors.ErrorFromSmartServer(error_tuple)
+ translated_error = self.translateErrorFromSmartServer(server_error)
+ expected_error = errors.UnknownErrorFromSmartServer(server_error)
+ self.assertEqual(expected_error, translated_error)
+
+ def test_context_missing_a_key(self):
+ """In case of a bug in the client, or perhaps an unexpected response
+ from a server, _translate_error returns the original error tuple from
+ the server and mutters a warning.
+ """
+ # To translate a NoSuchRevision error _translate_error needs a 'branch'
+ # in the context dict. So let's give it an empty context dict instead
+ # to exercise its error recovery.
+ empty_context = {}
+ error_tuple = ('NoSuchRevision', 'revid')
+ server_error = errors.ErrorFromSmartServer(error_tuple)
+ translated_error = self.translateErrorFromSmartServer(server_error)
+ self.assertEqual(server_error, translated_error)
+ # In addition to re-raising ErrorFromSmartServer, some debug info has
+ # been muttered to the log file for developer to look at.
+ self.assertContainsRe(
+ self.get_log(),
+ "Missing key 'branch' in context")
+
+ def test_path_missing(self):
+ """Some translations (PermissionDenied, ReadError) can determine the
+ 'path' variable from either the wire or the local context. If neither
+ has it, then an error is raised.
+ """
+ error_tuple = ('ReadError',)
+ server_error = errors.ErrorFromSmartServer(error_tuple)
+ translated_error = self.translateErrorFromSmartServer(server_error)
+ self.assertEqual(server_error, translated_error)
+ # In addition to re-raising ErrorFromSmartServer, some debug info has
+ # been muttered to the log file for developer to look at.
+ self.assertContainsRe(self.get_log(), "Missing key 'path' in context")
+
+
+class TestStacking(tests.TestCaseWithTransport):
+ """Tests for operations on stacked remote repositories.
+
+ The underlying format type must support stacking.
+ """
+
+ def test_access_stacked_remote(self):
+ # based on <http://launchpad.net/bugs/261315>
+ # make a branch stacked on another repository containing an empty
+ # revision, then open it over hpss - we should be able to see that
+ # revision.
+ base_transport = self.get_transport()
+ base_builder = self.make_branch_builder('base', format='1.9')
+ base_builder.start_series()
+ base_revid = base_builder.build_snapshot('rev-id', None,
+ [('add', ('', None, 'directory', None))],
+ 'message')
+ base_builder.finish_series()
+ stacked_branch = self.make_branch('stacked', format='1.9')
+ stacked_branch.set_stacked_on_url('../base')
+ # start a server looking at this
+ smart_server = test_server.SmartTCPServer_for_testing()
+ self.start_server(smart_server)
+ remote_bzrdir = BzrDir.open(smart_server.get_url() + '/stacked')
+ # can get its branch and repository
+ remote_branch = remote_bzrdir.open_branch()
+ remote_repo = remote_branch.repository
+ remote_repo.lock_read()
+ try:
+ # it should have an appropriate fallback repository, which should also
+ # be a RemoteRepository
+ self.assertLength(1, remote_repo._fallback_repositories)
+ self.assertIsInstance(remote_repo._fallback_repositories[0],
+ RemoteRepository)
+ # and it has the revision committed to the underlying repository;
+ # these have varying implementations so we try several of them
+ self.assertTrue(remote_repo.has_revisions([base_revid]))
+ self.assertTrue(remote_repo.has_revision(base_revid))
+ self.assertEqual(remote_repo.get_revision(base_revid).message,
+ 'message')
+ finally:
+ remote_repo.unlock()
+
+ def prepare_stacked_remote_branch(self):
+ """Get stacked_upon and stacked branches with content in each."""
+ self.setup_smart_server_with_call_log()
+ tree1 = self.make_branch_and_tree('tree1', format='1.9')
+ tree1.commit('rev1', rev_id='rev1')
+ tree2 = tree1.branch.bzrdir.sprout('tree2', stacked=True
+ ).open_workingtree()
+ local_tree = tree2.branch.create_checkout('local')
+ local_tree.commit('local changes make me feel good.')
+ branch2 = Branch.open(self.get_url('tree2'))
+ branch2.lock_read()
+ self.addCleanup(branch2.unlock)
+ return tree1.branch, branch2
+
+ def test_stacked_get_parent_map(self):
+ # the public implementation of get_parent_map obeys stacking
+ _, branch = self.prepare_stacked_remote_branch()
+ repo = branch.repository
+ self.assertEqual(['rev1'], repo.get_parent_map(['rev1']).keys())
+
+ def test_unstacked_get_parent_map(self):
+ # _unstacked_provider.get_parent_map ignores stacking
+ _, branch = self.prepare_stacked_remote_branch()
+ provider = branch.repository._unstacked_provider
+ self.assertEqual([], provider.get_parent_map(['rev1']).keys())
+
+ def fetch_stream_to_rev_order(self, stream):
+ result = []
+ for kind, substream in stream:
+ if not kind == 'revisions':
+ list(substream)
+ else:
+ for content in substream:
+ result.append(content.key[-1])
+ return result
+
+ def get_ordered_revs(self, format, order, branch_factory=None):
+ """Get a list of the revisions in a stream to format format.
+
+ :param format: The format of the target.
+ :param order: the order that target should have requested.
+ :param branch_factory: A callable to create a trunk and stacked branch
+ to fetch from. If none, self.prepare_stacked_remote_branch is used.
+ :result: The revision ids in the stream, in the order seen,
+ the topological order of revisions in the source.
+ """
+ unordered_format = controldir.format_registry.get(format)()
+ target_repository_format = unordered_format.repository_format
+ # Cross check
+ self.assertEqual(order, target_repository_format._fetch_order)
+ if branch_factory is None:
+ branch_factory = self.prepare_stacked_remote_branch
+ _, stacked = branch_factory()
+ source = stacked.repository._get_source(target_repository_format)
+ tip = stacked.last_revision()
+ stacked.repository._ensure_real()
+ graph = stacked.repository.get_graph()
+ revs = [r for (r,ps) in graph.iter_ancestry([tip])
+ if r != NULL_REVISION]
+ revs.reverse()
+ search = vf_search.PendingAncestryResult([tip], stacked.repository)
+ self.reset_smart_call_log()
+ stream = source.get_stream(search)
+ # We trust that if a revision is in the stream the rest of the new
+ # content for it is too, as per our main fetch tests; here we are
+ # checking that the revisions are actually included at all, and their
+ # order.
+ return self.fetch_stream_to_rev_order(stream), revs
+
+ def test_stacked_get_stream_unordered(self):
+ # Repository._get_source.get_stream() from a stacked repository with
+ # unordered yields the full data from both stacked and stacked upon
+ # sources.
+ rev_ord, expected_revs = self.get_ordered_revs('1.9', 'unordered')
+ self.assertEqual(set(expected_revs), set(rev_ord))
+ # Getting unordered results should have made a streaming data request
+ # from the server, then one from the backing branch.
+ self.assertLength(2, self.hpss_calls)
+
+ def test_stacked_on_stacked_get_stream_unordered(self):
+ # Repository._get_source.get_stream() from a stacked repository which
+ # is itself stacked yields the full data from all three sources.
+ def make_stacked_stacked():
+ _, stacked = self.prepare_stacked_remote_branch()
+ tree = stacked.bzrdir.sprout('tree3', stacked=True
+ ).open_workingtree()
+ local_tree = tree.branch.create_checkout('local-tree3')
+ local_tree.commit('more local changes are better')
+ branch = Branch.open(self.get_url('tree3'))
+ branch.lock_read()
+ self.addCleanup(branch.unlock)
+ return None, branch
+ rev_ord, expected_revs = self.get_ordered_revs('1.9', 'unordered',
+ branch_factory=make_stacked_stacked)
+ self.assertEqual(set(expected_revs), set(rev_ord))
+ # Getting unordered results should have made a streaming data request
+ # from the server, and one from each backing repo
+ self.assertLength(3, self.hpss_calls)
+
+ def test_stacked_get_stream_topological(self):
+ # Repository._get_source.get_stream() from a stacked repository with
+ # topological sorting yields the full data from both stacked and
+ # stacked upon sources in topological order.
+ rev_ord, expected_revs = self.get_ordered_revs('knit', 'topological')
+ self.assertEqual(expected_revs, rev_ord)
+ # Getting topological sort requires VFS calls still - one of which is
+ # pushing up from the bound branch.
+ self.assertLength(14, self.hpss_calls)
+
+ def test_stacked_get_stream_groupcompress(self):
+ # Repository._get_source.get_stream() from a stacked repository with
+ # groupcompress sorting yields the full data from both stacked and
+ # stacked upon sources in groupcompress order.
+ raise tests.TestSkipped('No groupcompress ordered format available')
+ rev_ord, expected_revs = self.get_ordered_revs('dev5', 'groupcompress')
+ self.assertEqual(expected_revs, reversed(rev_ord))
+ # Getting unordered results should have made a streaming data request
+ # from the backing branch, and one from the stacked on branch.
+ self.assertLength(2, self.hpss_calls)
+
+ def test_stacked_pull_more_than_stacking_has_bug_360791(self):
+ # When pulling some fixed amount of content that is more than the
+ # source has (because some is coming from a fallback branch, no error
+ # should be received. This was reported as bug 360791.
+ # Need three branches: a trunk, a stacked branch, and a preexisting
+ # branch pulling content from stacked and trunk.
+ self.setup_smart_server_with_call_log()
+ trunk = self.make_branch_and_tree('trunk', format="1.9-rich-root")
+ r1 = trunk.commit('start')
+ stacked_branch = trunk.branch.create_clone_on_transport(
+ self.get_transport('stacked'), stacked_on=trunk.branch.base)
+ local = self.make_branch('local', format='1.9-rich-root')
+ local.repository.fetch(stacked_branch.repository,
+ stacked_branch.last_revision())
+
+
+class TestRemoteBranchEffort(tests.TestCaseWithTransport):
+
+ def setUp(self):
+ super(TestRemoteBranchEffort, self).setUp()
+ # Create a smart server that publishes whatever the backing VFS server
+ # does.
+ self.smart_server = test_server.SmartTCPServer_for_testing()
+ self.start_server(self.smart_server, self.get_server())
+ # Log all HPSS calls into self.hpss_calls.
+ _SmartClient.hooks.install_named_hook(
+ 'call', self.capture_hpss_call, None)
+ self.hpss_calls = []
+
+ def capture_hpss_call(self, params):
+ self.hpss_calls.append(params.method)
+
+ def test_copy_content_into_avoids_revision_history(self):
+ local = self.make_branch('local')
+ builder = self.make_branch_builder('remote')
+ builder.build_commit(message="Commit.")
+ remote_branch_url = self.smart_server.get_url() + 'remote'
+ remote_branch = bzrdir.BzrDir.open(remote_branch_url).open_branch()
+ local.repository.fetch(remote_branch.repository)
+ self.hpss_calls = []
+ remote_branch.copy_content_into(local)
+ self.assertFalse('Branch.revision_history' in self.hpss_calls)
+
+ def test_fetch_everything_needs_just_one_call(self):
+ local = self.make_branch('local')
+ builder = self.make_branch_builder('remote')
+ builder.build_commit(message="Commit.")
+ remote_branch_url = self.smart_server.get_url() + 'remote'
+ remote_branch = bzrdir.BzrDir.open(remote_branch_url).open_branch()
+ self.hpss_calls = []
+ local.repository.fetch(
+ remote_branch.repository,
+ fetch_spec=vf_search.EverythingResult(remote_branch.repository))
+ self.assertEqual(['Repository.get_stream_1.19'], self.hpss_calls)
+
+ def override_verb(self, verb_name, verb):
+ request_handlers = request.request_handlers
+ orig_verb = request_handlers.get(verb_name)
+ orig_info = request_handlers.get_info(verb_name)
+ request_handlers.register(verb_name, verb, override_existing=True)
+ self.addCleanup(request_handlers.register, verb_name, orig_verb,
+ override_existing=True, info=orig_info)
+
+ def test_fetch_everything_backwards_compat(self):
+ """Can fetch with EverythingResult even with pre 2.4 servers.
+
+ Pre-2.4 do not support 'everything' searches with the
+ Repository.get_stream_1.19 verb.
+ """
+ verb_log = []
+ class OldGetStreamVerb(SmartServerRepositoryGetStream_1_19):
+ """A version of the Repository.get_stream_1.19 verb patched to
+ reject 'everything' searches the way 2.3 and earlier do.
+ """
+ def recreate_search(self, repository, search_bytes,
+ discard_excess=False):
+ verb_log.append(search_bytes.split('\n', 1)[0])
+ if search_bytes == 'everything':
+ return (None,
+ request.FailedSmartServerResponse(('BadSearch',)))
+ return super(OldGetStreamVerb,
+ self).recreate_search(repository, search_bytes,
+ discard_excess=discard_excess)
+ self.override_verb('Repository.get_stream_1.19', OldGetStreamVerb)
+ local = self.make_branch('local')
+ builder = self.make_branch_builder('remote')
+ builder.build_commit(message="Commit.")
+ remote_branch_url = self.smart_server.get_url() + 'remote'
+ remote_branch = bzrdir.BzrDir.open(remote_branch_url).open_branch()
+ self.hpss_calls = []
+ local.repository.fetch(
+ remote_branch.repository,
+ fetch_spec=vf_search.EverythingResult(remote_branch.repository))
+ # make sure the overridden verb was used
+ self.assertLength(1, verb_log)
+ # more than one HPSS call is needed, but because it's a VFS callback
+ # its hard to predict exactly how many.
+ self.assertTrue(len(self.hpss_calls) > 1)
+
+
+class TestUpdateBoundBranchWithModifiedBoundLocation(
+ tests.TestCaseWithTransport):
+ """Ensure correct handling of bound_location modifications.
+
+ This is tested against a smart server as http://pad.lv/786980 was about a
+ ReadOnlyError (write attempt during a read-only transaction) which can only
+ happen in this context.
+ """
+
+ def setUp(self):
+ super(TestUpdateBoundBranchWithModifiedBoundLocation, self).setUp()
+ self.transport_server = test_server.SmartTCPServer_for_testing
+
+ def make_master_and_checkout(self, master_name, checkout_name):
+ # Create the master branch and its associated checkout
+ self.master = self.make_branch_and_tree(master_name)
+ self.checkout = self.master.branch.create_checkout(checkout_name)
+ # Modify the master branch so there is something to update
+ self.master.commit('add stuff')
+ self.last_revid = self.master.commit('even more stuff')
+ self.bound_location = self.checkout.branch.get_bound_location()
+
+ def assertUpdateSucceeds(self, new_location):
+ self.checkout.branch.set_bound_location(new_location)
+ self.checkout.update()
+ self.assertEquals(self.last_revid, self.checkout.last_revision())
+
+ def test_without_final_slash(self):
+ self.make_master_and_checkout('master', 'checkout')
+ # For unclear reasons some users have a bound_location without a final
+ # '/', simulate that by forcing such a value
+ self.assertEndsWith(self.bound_location, '/')
+ self.assertUpdateSucceeds(self.bound_location.rstrip('/'))
+
+ def test_plus_sign(self):
+ self.make_master_and_checkout('+master', 'checkout')
+ self.assertUpdateSucceeds(self.bound_location.replace('%2B', '+', 1))
+
+ def test_tilda(self):
+ # Embed ~ in the middle of the path just to avoid any $HOME
+ # interpretation
+ self.make_master_and_checkout('mas~ter', 'checkout')
+ self.assertUpdateSucceeds(self.bound_location.replace('%2E', '~', 1))
+
+
+class TestWithCustomErrorHandler(RemoteBranchTestCase):
+
+ def test_no_context(self):
+ class OutOfCoffee(errors.BzrError):
+ """A dummy exception for testing."""
+
+ def __init__(self, urgency):
+ self.urgency = urgency
+ remote.no_context_error_translators.register("OutOfCoffee",
+ lambda err: OutOfCoffee(err.error_args[0]))
+ transport = MemoryTransport()
+ client = FakeClient(transport.base)
+ client.add_expected_call(
+ 'Branch.get_stacked_on_url', ('quack/',),
+ 'error', ('NotStacked',))
+ client.add_expected_call(
+ 'Branch.last_revision_info',
+ ('quack/',),
+ 'error', ('OutOfCoffee', 'low'))
+ transport.mkdir('quack')
+ transport = transport.clone('quack')
+ branch = self.make_remote_branch(transport, client)
+ self.assertRaises(OutOfCoffee, branch.last_revision_info)
+ self.assertFinished(client)
+
+ def test_with_context(self):
+ class OutOfTea(errors.BzrError):
+ def __init__(self, branch, urgency):
+ self.branch = branch
+ self.urgency = urgency
+ remote.error_translators.register("OutOfTea",
+ lambda err, find, path: OutOfTea(err.error_args[0],
+ find("branch")))
+ transport = MemoryTransport()
+ client = FakeClient(transport.base)
+ client.add_expected_call(
+ 'Branch.get_stacked_on_url', ('quack/',),
+ 'error', ('NotStacked',))
+ client.add_expected_call(
+ 'Branch.last_revision_info',
+ ('quack/',),
+ 'error', ('OutOfTea', 'low'))
+ transport.mkdir('quack')
+ transport = transport.clone('quack')
+ branch = self.make_remote_branch(transport, client)
+ self.assertRaises(OutOfTea, branch.last_revision_info)
+ self.assertFinished(client)
+
+
+class TestRepositoryPack(TestRemoteRepository):
+
+ def test_pack(self):
+ transport_path = 'quack'
+ repo, client = self.setup_fake_client_and_repository(transport_path)
+ client.add_expected_call(
+ 'Repository.lock_write', ('quack/', ''),
+ 'success', ('ok', 'token'))
+ client.add_expected_call(
+ 'Repository.pack', ('quack/', 'token', 'False'),
+ 'success', ('ok',), )
+ client.add_expected_call(
+ 'Repository.unlock', ('quack/', 'token'),
+ 'success', ('ok', ))
+ repo.pack()
+
+ def test_pack_with_hint(self):
+ transport_path = 'quack'
+ repo, client = self.setup_fake_client_and_repository(transport_path)
+ client.add_expected_call(
+ 'Repository.lock_write', ('quack/', ''),
+ 'success', ('ok', 'token'))
+ client.add_expected_call(
+ 'Repository.pack', ('quack/', 'token', 'False'),
+ 'success', ('ok',), )
+ client.add_expected_call(
+ 'Repository.unlock', ('quack/', 'token', 'False'),
+ 'success', ('ok', ))
+ repo.pack(['hinta', 'hintb'])
+
+
+class TestRepositoryIterInventories(TestRemoteRepository):
+ """Test Repository.iter_inventories."""
+
+ def _serialize_inv_delta(self, old_name, new_name, delta):
+ serializer = inventory_delta.InventoryDeltaSerializer(True, False)
+ return "".join(serializer.delta_to_lines(old_name, new_name, delta))
+
+ def test_single_empty(self):
+ transport_path = 'quack'
+ repo, client = self.setup_fake_client_and_repository(transport_path)
+ fmt = controldir.format_registry.get('2a')().repository_format
+ repo._format = fmt
+ stream = [('inventory-deltas', [
+ versionedfile.FulltextContentFactory('somerevid', None, None,
+ self._serialize_inv_delta('null:', 'somerevid', []))])]
+ client.add_expected_call(
+ 'VersionedFileRepository.get_inventories', ('quack/', 'unordered'),
+ 'success', ('ok', ),
+ _stream_to_byte_stream(stream, fmt))
+ ret = list(repo.iter_inventories(["somerevid"]))
+ self.assertLength(1, ret)
+ inv = ret[0]
+ self.assertEquals("somerevid", inv.revision_id)
+
+ def test_empty(self):
+ transport_path = 'quack'
+ repo, client = self.setup_fake_client_and_repository(transport_path)
+ ret = list(repo.iter_inventories([]))
+ self.assertEquals(ret, [])
+
+ def test_missing(self):
+ transport_path = 'quack'
+ repo, client = self.setup_fake_client_and_repository(transport_path)
+ client.add_expected_call(
+ 'VersionedFileRepository.get_inventories', ('quack/', 'unordered'),
+ 'success', ('ok', ), iter([]))
+ self.assertRaises(errors.NoSuchRevision, list, repo.iter_inventories(
+ ["somerevid"]))
diff --git a/bzrlib/tests/test_rename_map.py b/bzrlib/tests/test_rename_map.py
new file mode 100644
index 0000000..b38be3a
--- /dev/null
+++ b/bzrlib/tests/test_rename_map.py
@@ -0,0 +1,202 @@
+# Copyright (C) 2009 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+
+import os
+
+from bzrlib import trace
+from bzrlib.rename_map import RenameMap
+from bzrlib.tests import TestCaseWithTransport
+
+
+def myhash(val):
+ """This the hash used by RenameMap."""
+ return hash(val) % (1024 * 1024 * 10)
+
+
+class TestRenameMap(TestCaseWithTransport):
+
+ a_lines = 'a\nb\nc\n'.splitlines(True)
+ b_lines = 'b\nc\nd\n'.splitlines(True)
+
+
+ def test_add_edge_hashes(self):
+ rn = RenameMap(None)
+ rn.add_edge_hashes(self.a_lines, 'a')
+ self.assertEqual(set(['a']), rn.edge_hashes[myhash(('a\n', 'b\n'))])
+ self.assertEqual(set(['a']), rn.edge_hashes[myhash(('b\n', 'c\n'))])
+ self.assertIs(None, rn.edge_hashes.get(myhash(('c\n', 'd\n'))))
+
+ def test_add_file_edge_hashes(self):
+ tree = self.make_branch_and_tree('tree')
+ self.build_tree_contents([('tree/a', ''.join(self.a_lines))])
+ tree.add('a', 'a')
+ rn = RenameMap(tree)
+ rn.add_file_edge_hashes(tree, ['a'])
+ self.assertEqual(set(['a']), rn.edge_hashes[myhash(('a\n', 'b\n'))])
+ self.assertEqual(set(['a']), rn.edge_hashes[myhash(('b\n', 'c\n'))])
+ self.assertIs(None, rn.edge_hashes.get(myhash(('c\n', 'd\n'))))
+
+ def test_hitcounts(self):
+ rn = RenameMap(None)
+ rn.add_edge_hashes(self.a_lines, 'a')
+ rn.add_edge_hashes(self.b_lines, 'b')
+ self.assertEqual({'a': 2.5, 'b': 0.5}, rn.hitcounts(self.a_lines))
+ self.assertEqual({'a': 1}, rn.hitcounts(self.a_lines[:-1]))
+ self.assertEqual({'b': 2.5, 'a': 0.5}, rn.hitcounts(self.b_lines))
+
+ def test_file_match(self):
+ tree = self.make_branch_and_tree('tree')
+ rn = RenameMap(tree)
+ rn.add_edge_hashes(self.a_lines, 'aid')
+ rn.add_edge_hashes(self.b_lines, 'bid')
+ self.build_tree_contents([('tree/a', ''.join(self.a_lines))])
+ self.build_tree_contents([('tree/b', ''.join(self.b_lines))])
+ self.assertEqual({'a': 'aid', 'b': 'bid'},
+ rn.file_match(['a', 'b']))
+
+ def test_file_match_no_dups(self):
+ tree = self.make_branch_and_tree('tree')
+ rn = RenameMap(tree)
+ rn.add_edge_hashes(self.a_lines, 'aid')
+ self.build_tree_contents([('tree/a', ''.join(self.a_lines))])
+ self.build_tree_contents([('tree/b', ''.join(self.b_lines))])
+ self.build_tree_contents([('tree/c', ''.join(self.b_lines))])
+ self.assertEqual({'a': 'aid'},
+ rn.file_match(['a', 'b', 'c']))
+
+ def test_match_directories(self):
+ tree = self.make_branch_and_tree('tree')
+ rn = RenameMap(tree)
+ required_parents = rn.get_required_parents({
+ 'path1': 'a',
+ 'path2/tr': 'b',
+ 'path3/path4/path5': 'c',
+ })
+ self.assertEqual(
+ {'path2': set(['b']), 'path3/path4': set(['c']), 'path3': set()},
+ required_parents)
+
+ def test_find_directory_renames(self):
+ tree = self.make_branch_and_tree('tree')
+ rn = RenameMap(tree)
+ matches = {
+ 'path1': 'a',
+ 'path3/path4/path5': 'c',
+ }
+ required_parents = {
+ 'path2': set(['b']),
+ 'path3/path4': set(['c']),
+ 'path3': set([])}
+ missing_parents = {
+ 'path2-id': set(['b']),
+ 'path4-id': set(['c']),
+ 'path3-id': set(['path4-id'])}
+ matches = rn.match_parents(required_parents, missing_parents)
+ self.assertEqual({'path3/path4': 'path4-id', 'path2': 'path2-id'},
+ matches)
+
+ def test_guess_renames(self):
+ tree = self.make_branch_and_tree('tree')
+ tree.lock_write()
+ self.addCleanup(tree.unlock)
+ self.build_tree(['tree/file'])
+ tree.add('file', 'file-id')
+ tree.commit('Added file')
+ os.rename('tree/file', 'tree/file2')
+ RenameMap.guess_renames(tree)
+ self.assertEqual('file2', tree.id2path('file-id'))
+
+ def test_guess_renames_handles_directories(self):
+ tree = self.make_branch_and_tree('tree')
+ tree.lock_write()
+ self.addCleanup(tree.unlock)
+ self.build_tree(['tree/dir/', 'tree/dir/file'])
+ tree.add(['dir', 'dir/file'], ['dir-id', 'file-id'])
+ tree.commit('Added file')
+ os.rename('tree/dir', 'tree/dir2')
+ RenameMap.guess_renames(tree)
+ self.assertEqual('dir2/file', tree.id2path('file-id'))
+ self.assertEqual('dir2', tree.id2path('dir-id'))
+
+ def test_guess_renames_handles_grandparent_directories(self):
+ tree = self.make_branch_and_tree('tree')
+ tree.lock_write()
+ self.addCleanup(tree.unlock)
+ self.build_tree(['tree/topdir/',
+ 'tree/topdir/middledir/',
+ 'tree/topdir/middledir/file'])
+ tree.add(['topdir', 'topdir/middledir', 'topdir/middledir/file'],
+ ['topdir-id', 'middledir-id', 'file-id'])
+ tree.commit('Added files.')
+ os.rename('tree/topdir', 'tree/topdir2')
+ RenameMap.guess_renames(tree)
+ self.assertEqual('topdir2', tree.id2path('topdir-id'))
+
+ def test_guess_renames_preserves_children(self):
+ """When a directory has been moved, its children are preserved."""
+ tree = self.make_branch_and_tree('tree')
+ tree.lock_write()
+ self.addCleanup(tree.unlock)
+ self.build_tree_contents([('tree/foo/', ''),
+ ('tree/foo/bar', 'bar'),
+ ('tree/foo/empty', '')])
+ tree.add(['foo', 'foo/bar', 'foo/empty'],
+ ['foo-id', 'bar-id', 'empty-id'])
+ tree.commit('rev1')
+ os.rename('tree/foo', 'tree/baz')
+ RenameMap.guess_renames(tree)
+ self.assertEqual('baz/empty', tree.id2path('empty-id'))
+
+ def test_guess_renames_dry_run(self):
+ tree = self.make_branch_and_tree('tree')
+ tree.lock_write()
+ self.addCleanup(tree.unlock)
+ self.build_tree(['tree/file'])
+ tree.add('file', 'file-id')
+ tree.commit('Added file')
+ os.rename('tree/file', 'tree/file2')
+ RenameMap.guess_renames(tree, dry_run=True)
+ self.assertEqual('file', tree.id2path('file-id'))
+
+ @staticmethod
+ def captureNotes(cmd, *args, **kwargs):
+ notes = []
+ def my_note(fmt, *args):
+ notes.append(fmt % args)
+ old_note = trace.note
+ trace.note = my_note
+ try:
+ result = cmd(*args, **kwargs)
+ finally:
+ trace.note = old_note
+ return notes, result
+
+ def test_guess_renames_output(self):
+ """guess_renames emits output whether dry_run is True or False."""
+ tree = self.make_branch_and_tree('tree')
+ tree.lock_write()
+ self.addCleanup(tree.unlock)
+ self.build_tree(['tree/file'])
+ tree.add('file', 'file-id')
+ tree.commit('Added file')
+ os.rename('tree/file', 'tree/file2')
+ notes = self.captureNotes(RenameMap.guess_renames, tree,
+ dry_run=True)[0]
+ self.assertEqual('file => file2', ''.join(notes))
+ notes = self.captureNotes(RenameMap.guess_renames, tree,
+ dry_run=False)[0]
+ self.assertEqual('file => file2', ''.join(notes))
diff --git a/bzrlib/tests/test_repository.py b/bzrlib/tests/test_repository.py
new file mode 100644
index 0000000..7d8a8d3
--- /dev/null
+++ b/bzrlib/tests/test_repository.py
@@ -0,0 +1,1723 @@
+# Copyright (C) 2006-2011 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""Tests for the Repository facility that are not interface tests.
+
+For interface tests see tests/per_repository/*.py.
+
+For concrete class tests see this file, and for storage formats tests
+also see this file.
+"""
+
+from stat import S_ISDIR
+
+import bzrlib
+from bzrlib.errors import (
+ UnknownFormatError,
+ UnsupportedFormatError,
+ )
+from bzrlib import (
+ btree_index,
+ symbol_versioning,
+ tests,
+ transport,
+ vf_search,
+ )
+from bzrlib.btree_index import BTreeBuilder, BTreeGraphIndex
+from bzrlib.index import GraphIndex
+from bzrlib.repository import RepositoryFormat
+from bzrlib.tests import (
+ TestCase,
+ TestCaseWithTransport,
+ )
+from bzrlib import (
+ bzrdir,
+ controldir,
+ errors,
+ inventory,
+ osutils,
+ repository,
+ revision as _mod_revision,
+ upgrade,
+ versionedfile,
+ vf_repository,
+ workingtree,
+ )
+from bzrlib.repofmt import (
+ groupcompress_repo,
+ knitrepo,
+ knitpack_repo,
+ pack_repo,
+ )
+
+
+class TestDefaultFormat(TestCase):
+
+ def test_get_set_default_format(self):
+ old_default = controldir.format_registry.get('default')
+ private_default = old_default().repository_format.__class__
+ old_format = repository.format_registry.get_default()
+ self.assertTrue(isinstance(old_format, private_default))
+ def make_sample_bzrdir():
+ my_bzrdir = bzrdir.BzrDirMetaFormat1()
+ my_bzrdir.repository_format = SampleRepositoryFormat()
+ return my_bzrdir
+ controldir.format_registry.remove('default')
+ controldir.format_registry.register('sample', make_sample_bzrdir, '')
+ controldir.format_registry.set_default('sample')
+ # creating a repository should now create an instrumented dir.
+ try:
+ # the default branch format is used by the meta dir format
+ # which is not the default bzrdir format at this point
+ dir = bzrdir.BzrDirMetaFormat1().initialize('memory:///')
+ result = dir.create_repository()
+ self.assertEqual(result, 'A bzr repository dir')
+ finally:
+ controldir.format_registry.remove('default')
+ controldir.format_registry.remove('sample')
+ controldir.format_registry.register('default', old_default, '')
+ self.assertIsInstance(repository.format_registry.get_default(),
+ old_format.__class__)
+
+
+class SampleRepositoryFormat(repository.RepositoryFormatMetaDir):
+ """A sample format
+
+ this format is initializable, unsupported to aid in testing the
+ open and open(unsupported=True) routines.
+ """
+
+ @classmethod
+ def get_format_string(cls):
+ """See RepositoryFormat.get_format_string()."""
+ return "Sample .bzr repository format."
+
+ def initialize(self, a_bzrdir, shared=False):
+ """Initialize a repository in a BzrDir"""
+ t = a_bzrdir.get_repository_transport(self)
+ t.put_bytes('format', self.get_format_string())
+ return 'A bzr repository dir'
+
+ def is_supported(self):
+ return False
+
+ def open(self, a_bzrdir, _found=False):
+ return "opened repository."
+
+
+class SampleExtraRepositoryFormat(repository.RepositoryFormat):
+ """A sample format that can not be used in a metadir
+
+ """
+
+ def get_format_string(self):
+ raise NotImplementedError
+
+
+class TestRepositoryFormat(TestCaseWithTransport):
+ """Tests for the Repository format detection used by the bzr meta dir facility.BzrBranchFormat facility."""
+
+ def test_find_format(self):
+ # is the right format object found for a repository?
+ # create a branch with a few known format objects.
+ # this is not quite the same as
+ self.build_tree(["foo/", "bar/"])
+ def check_format(format, url):
+ dir = format._matchingbzrdir.initialize(url)
+ format.initialize(dir)
+ t = transport.get_transport_from_path(url)
+ found_format = repository.RepositoryFormatMetaDir.find_format(dir)
+ self.assertIsInstance(found_format, format.__class__)
+ check_format(repository.format_registry.get_default(), "bar")
+
+ def test_find_format_no_repository(self):
+ dir = bzrdir.BzrDirMetaFormat1().initialize(self.get_url())
+ self.assertRaises(errors.NoRepositoryPresent,
+ repository.RepositoryFormatMetaDir.find_format,
+ dir)
+
+ def test_from_string(self):
+ self.assertIsInstance(
+ SampleRepositoryFormat.from_string(
+ "Sample .bzr repository format."),
+ SampleRepositoryFormat)
+ self.assertRaises(AssertionError,
+ SampleRepositoryFormat.from_string,
+ "Different .bzr repository format.")
+
+ def test_find_format_unknown_format(self):
+ dir = bzrdir.BzrDirMetaFormat1().initialize(self.get_url())
+ SampleRepositoryFormat().initialize(dir)
+ self.assertRaises(UnknownFormatError,
+ repository.RepositoryFormatMetaDir.find_format,
+ dir)
+
+ def test_find_format_with_features(self):
+ tree = self.make_branch_and_tree('.', format='2a')
+ tree.branch.repository.update_feature_flags({"name": "necessity"})
+ found_format = repository.RepositoryFormatMetaDir.find_format(tree.bzrdir)
+ self.assertIsInstance(found_format, repository.RepositoryFormatMetaDir)
+ self.assertEquals(found_format.features.get("name"), "necessity")
+ self.assertRaises(errors.MissingFeature, found_format.check_support_status,
+ True)
+ self.addCleanup(repository.RepositoryFormatMetaDir.unregister_feature,
+ "name")
+ repository.RepositoryFormatMetaDir.register_feature("name")
+ found_format.check_support_status(True)
+
+
+class TestRepositoryFormatRegistry(TestCase):
+
+ def setUp(self):
+ super(TestRepositoryFormatRegistry, self).setUp()
+ self.registry = repository.RepositoryFormatRegistry()
+
+ def test_register_unregister_format(self):
+ format = SampleRepositoryFormat()
+ self.registry.register(format)
+ self.assertEquals(format, self.registry.get("Sample .bzr repository format."))
+ self.registry.remove(format)
+ self.assertRaises(KeyError, self.registry.get, "Sample .bzr repository format.")
+
+ def test_get_all(self):
+ format = SampleRepositoryFormat()
+ self.assertEquals([], self.registry._get_all())
+ self.registry.register(format)
+ self.assertEquals([format], self.registry._get_all())
+
+ def test_register_extra(self):
+ format = SampleExtraRepositoryFormat()
+ self.assertEquals([], self.registry._get_all())
+ self.registry.register_extra(format)
+ self.assertEquals([format], self.registry._get_all())
+
+ def test_register_extra_lazy(self):
+ self.assertEquals([], self.registry._get_all())
+ self.registry.register_extra_lazy("bzrlib.tests.test_repository",
+ "SampleExtraRepositoryFormat")
+ formats = self.registry._get_all()
+ self.assertEquals(1, len(formats))
+ self.assertIsInstance(formats[0], SampleExtraRepositoryFormat)
+
+
+class TestFormatKnit1(TestCaseWithTransport):
+
+ def test_attribute__fetch_order(self):
+ """Knits need topological data insertion."""
+ repo = self.make_repository('.',
+ format=controldir.format_registry.get('knit')())
+ self.assertEqual('topological', repo._format._fetch_order)
+
+ def test_attribute__fetch_uses_deltas(self):
+ """Knits reuse deltas."""
+ repo = self.make_repository('.',
+ format=controldir.format_registry.get('knit')())
+ self.assertEqual(True, repo._format._fetch_uses_deltas)
+
+ def test_disk_layout(self):
+ control = bzrdir.BzrDirMetaFormat1().initialize(self.get_url())
+ repo = knitrepo.RepositoryFormatKnit1().initialize(control)
+ # in case of side effects of locking.
+ repo.lock_write()
+ repo.unlock()
+ # we want:
+ # format 'Bazaar-NG Knit Repository Format 1'
+ # lock: is a directory
+ # inventory.weave == empty_weave
+ # empty revision-store directory
+ # empty weaves directory
+ t = control.get_repository_transport(None)
+ self.assertEqualDiff('Bazaar-NG Knit Repository Format 1',
+ t.get('format').read())
+ # XXX: no locks left when unlocked at the moment
+ # self.assertEqualDiff('', t.get('lock').read())
+ self.assertTrue(S_ISDIR(t.stat('knits').st_mode))
+ self.check_knits(t)
+ # Check per-file knits.
+ branch = control.create_branch()
+ tree = control.create_workingtree()
+ tree.add(['foo'], ['Nasty-IdC:'], ['file'])
+ tree.put_file_bytes_non_atomic('Nasty-IdC:', '')
+ tree.commit('1st post', rev_id='foo')
+ self.assertHasKnit(t, 'knits/e8/%254easty-%2549d%2543%253a',
+ '\nfoo fulltext 0 81 :')
+
+ def assertHasKnit(self, t, knit_name, extra_content=''):
+ """Assert that knit_name exists on t."""
+ self.assertEqualDiff('# bzr knit index 8\n' + extra_content,
+ t.get(knit_name + '.kndx').read())
+
+ def check_knits(self, t):
+ """check knit content for a repository."""
+ self.assertHasKnit(t, 'inventory')
+ self.assertHasKnit(t, 'revisions')
+ self.assertHasKnit(t, 'signatures')
+
+ def test_shared_disk_layout(self):
+ control = bzrdir.BzrDirMetaFormat1().initialize(self.get_url())
+ repo = knitrepo.RepositoryFormatKnit1().initialize(control, shared=True)
+ # we want:
+ # format 'Bazaar-NG Knit Repository Format 1'
+ # lock: is a directory
+ # inventory.weave == empty_weave
+ # empty revision-store directory
+ # empty weaves directory
+ # a 'shared-storage' marker file.
+ t = control.get_repository_transport(None)
+ self.assertEqualDiff('Bazaar-NG Knit Repository Format 1',
+ t.get('format').read())
+ # XXX: no locks left when unlocked at the moment
+ # self.assertEqualDiff('', t.get('lock').read())
+ self.assertEqualDiff('', t.get('shared-storage').read())
+ self.assertTrue(S_ISDIR(t.stat('knits').st_mode))
+ self.check_knits(t)
+
+ def test_shared_no_tree_disk_layout(self):
+ control = bzrdir.BzrDirMetaFormat1().initialize(self.get_url())
+ repo = knitrepo.RepositoryFormatKnit1().initialize(control, shared=True)
+ repo.set_make_working_trees(False)
+ # we want:
+ # format 'Bazaar-NG Knit Repository Format 1'
+ # lock ''
+ # inventory.weave == empty_weave
+ # empty revision-store directory
+ # empty weaves directory
+ # a 'shared-storage' marker file.
+ t = control.get_repository_transport(None)
+ self.assertEqualDiff('Bazaar-NG Knit Repository Format 1',
+ t.get('format').read())
+ # XXX: no locks left when unlocked at the moment
+ # self.assertEqualDiff('', t.get('lock').read())
+ self.assertEqualDiff('', t.get('shared-storage').read())
+ self.assertEqualDiff('', t.get('no-working-trees').read())
+ repo.set_make_working_trees(True)
+ self.assertFalse(t.has('no-working-trees'))
+ self.assertTrue(S_ISDIR(t.stat('knits').st_mode))
+ self.check_knits(t)
+
+ def test_deserialise_sets_root_revision(self):
+ """We must have a inventory.root.revision
+
+ Old versions of the XML5 serializer did not set the revision_id for
+ the whole inventory. So we grab the one from the expected text. Which
+ is valid when the api is not being abused.
+ """
+ repo = self.make_repository('.',
+ format=controldir.format_registry.get('knit')())
+ inv_xml = '<inventory format="5">\n</inventory>\n'
+ inv = repo._deserialise_inventory('test-rev-id', inv_xml)
+ self.assertEqual('test-rev-id', inv.root.revision)
+
+ def test_deserialise_uses_global_revision_id(self):
+ """If it is set, then we re-use the global revision id"""
+ repo = self.make_repository('.',
+ format=controldir.format_registry.get('knit')())
+ inv_xml = ('<inventory format="5" revision_id="other-rev-id">\n'
+ '</inventory>\n')
+ # Arguably, the deserialise_inventory should detect a mismatch, and
+ # raise an error, rather than silently using one revision_id over the
+ # other.
+ self.assertRaises(AssertionError, repo._deserialise_inventory,
+ 'test-rev-id', inv_xml)
+ inv = repo._deserialise_inventory('other-rev-id', inv_xml)
+ self.assertEqual('other-rev-id', inv.root.revision)
+
+ def test_supports_external_lookups(self):
+ repo = self.make_repository('.',
+ format=controldir.format_registry.get('knit')())
+ self.assertFalse(repo._format.supports_external_lookups)
+
+
+class DummyRepository(object):
+ """A dummy repository for testing."""
+
+ _format = None
+ _serializer = None
+
+ def supports_rich_root(self):
+ if self._format is not None:
+ return self._format.rich_root_data
+ return False
+
+ def get_graph(self):
+ raise NotImplementedError
+
+ def get_parent_map(self, revision_ids):
+ raise NotImplementedError
+
+
+class InterDummy(repository.InterRepository):
+ """An inter-repository optimised code path for DummyRepository.
+
+ This is for use during testing where we use DummyRepository as repositories
+ so that none of the default regsitered inter-repository classes will
+ MATCH.
+ """
+
+ @staticmethod
+ def is_compatible(repo_source, repo_target):
+ """InterDummy is compatible with DummyRepository."""
+ return (isinstance(repo_source, DummyRepository) and
+ isinstance(repo_target, DummyRepository))
+
+
+class TestInterRepository(TestCaseWithTransport):
+
+ def test_get_default_inter_repository(self):
+ # test that the InterRepository.get(repo_a, repo_b) probes
+ # for a inter_repo class where is_compatible(repo_a, repo_b) returns
+ # true and returns a default inter_repo otherwise.
+ # This also tests that the default registered optimised interrepository
+ # classes do not barf inappropriately when a surprising repository type
+ # is handed to them.
+ dummy_a = DummyRepository()
+ dummy_a._format = RepositoryFormat()
+ dummy_a._format.supports_full_versioned_files = True
+ dummy_b = DummyRepository()
+ dummy_b._format = RepositoryFormat()
+ dummy_b._format.supports_full_versioned_files = True
+ self.assertGetsDefaultInterRepository(dummy_a, dummy_b)
+
+ def assertGetsDefaultInterRepository(self, repo_a, repo_b):
+ """Asserts that InterRepository.get(repo_a, repo_b) -> the default.
+
+ The effective default is now InterSameDataRepository because there is
+ no actual sane default in the presence of incompatible data models.
+ """
+ inter_repo = repository.InterRepository.get(repo_a, repo_b)
+ self.assertEqual(vf_repository.InterSameDataRepository,
+ inter_repo.__class__)
+ self.assertEqual(repo_a, inter_repo.source)
+ self.assertEqual(repo_b, inter_repo.target)
+
+ def test_register_inter_repository_class(self):
+ # test that a optimised code path provider - a
+ # InterRepository subclass can be registered and unregistered
+ # and that it is correctly selected when given a repository
+ # pair that it returns true on for the is_compatible static method
+ # check
+ dummy_a = DummyRepository()
+ dummy_a._format = RepositoryFormat()
+ dummy_b = DummyRepository()
+ dummy_b._format = RepositoryFormat()
+ repo = self.make_repository('.')
+ # hack dummies to look like repo somewhat.
+ dummy_a._serializer = repo._serializer
+ dummy_a._format.supports_tree_reference = repo._format.supports_tree_reference
+ dummy_a._format.rich_root_data = repo._format.rich_root_data
+ dummy_a._format.supports_full_versioned_files = repo._format.supports_full_versioned_files
+ dummy_b._serializer = repo._serializer
+ dummy_b._format.supports_tree_reference = repo._format.supports_tree_reference
+ dummy_b._format.rich_root_data = repo._format.rich_root_data
+ dummy_b._format.supports_full_versioned_files = repo._format.supports_full_versioned_files
+ repository.InterRepository.register_optimiser(InterDummy)
+ try:
+ # we should get the default for something InterDummy returns False
+ # to
+ self.assertFalse(InterDummy.is_compatible(dummy_a, repo))
+ self.assertGetsDefaultInterRepository(dummy_a, repo)
+ # and we should get an InterDummy for a pair it 'likes'
+ self.assertTrue(InterDummy.is_compatible(dummy_a, dummy_b))
+ inter_repo = repository.InterRepository.get(dummy_a, dummy_b)
+ self.assertEqual(InterDummy, inter_repo.__class__)
+ self.assertEqual(dummy_a, inter_repo.source)
+ self.assertEqual(dummy_b, inter_repo.target)
+ finally:
+ repository.InterRepository.unregister_optimiser(InterDummy)
+ # now we should get the default InterRepository object again.
+ self.assertGetsDefaultInterRepository(dummy_a, dummy_b)
+
+
+class TestRepositoryFormat1(knitrepo.RepositoryFormatKnit1):
+
+ @classmethod
+ def get_format_string(cls):
+ return "Test Format 1"
+
+
+class TestRepositoryFormat2(knitrepo.RepositoryFormatKnit1):
+
+ @classmethod
+ def get_format_string(cls):
+ return "Test Format 2"
+
+
+class TestRepositoryConverter(TestCaseWithTransport):
+
+ def test_convert_empty(self):
+ source_format = TestRepositoryFormat1()
+ target_format = TestRepositoryFormat2()
+ repository.format_registry.register(source_format)
+ self.addCleanup(repository.format_registry.remove,
+ source_format)
+ repository.format_registry.register(target_format)
+ self.addCleanup(repository.format_registry.remove,
+ target_format)
+ t = self.get_transport()
+ t.mkdir('repository')
+ repo_dir = bzrdir.BzrDirMetaFormat1().initialize('repository')
+ repo = TestRepositoryFormat1().initialize(repo_dir)
+ converter = repository.CopyConverter(target_format)
+ pb = bzrlib.ui.ui_factory.nested_progress_bar()
+ try:
+ converter.convert(repo, pb)
+ finally:
+ pb.finished()
+ repo = repo_dir.open_repository()
+ self.assertTrue(isinstance(target_format, repo._format.__class__))
+
+
+class TestRepositoryFormatKnit3(TestCaseWithTransport):
+
+ def test_attribute__fetch_order(self):
+ """Knits need topological data insertion."""
+ format = bzrdir.BzrDirMetaFormat1()
+ format.repository_format = knitrepo.RepositoryFormatKnit3()
+ repo = self.make_repository('.', format=format)
+ self.assertEqual('topological', repo._format._fetch_order)
+
+ def test_attribute__fetch_uses_deltas(self):
+ """Knits reuse deltas."""
+ format = bzrdir.BzrDirMetaFormat1()
+ format.repository_format = knitrepo.RepositoryFormatKnit3()
+ repo = self.make_repository('.', format=format)
+ self.assertEqual(True, repo._format._fetch_uses_deltas)
+
+ def test_convert(self):
+ """Ensure the upgrade adds weaves for roots"""
+ format = bzrdir.BzrDirMetaFormat1()
+ format.repository_format = knitrepo.RepositoryFormatKnit1()
+ tree = self.make_branch_and_tree('.', format)
+ tree.commit("Dull commit", rev_id="dull")
+ revision_tree = tree.branch.repository.revision_tree('dull')
+ revision_tree.lock_read()
+ try:
+ self.assertRaises(errors.NoSuchFile, revision_tree.get_file_lines,
+ revision_tree.get_root_id())
+ finally:
+ revision_tree.unlock()
+ format = bzrdir.BzrDirMetaFormat1()
+ format.repository_format = knitrepo.RepositoryFormatKnit3()
+ upgrade.Convert('.', format)
+ tree = workingtree.WorkingTree.open('.')
+ revision_tree = tree.branch.repository.revision_tree('dull')
+ revision_tree.lock_read()
+ try:
+ revision_tree.get_file_lines(revision_tree.get_root_id())
+ finally:
+ revision_tree.unlock()
+ tree.commit("Another dull commit", rev_id='dull2')
+ revision_tree = tree.branch.repository.revision_tree('dull2')
+ revision_tree.lock_read()
+ self.addCleanup(revision_tree.unlock)
+ self.assertEqual('dull',
+ revision_tree.get_file_revision(revision_tree.get_root_id()))
+
+ def test_supports_external_lookups(self):
+ format = bzrdir.BzrDirMetaFormat1()
+ format.repository_format = knitrepo.RepositoryFormatKnit3()
+ repo = self.make_repository('.', format=format)
+ self.assertFalse(repo._format.supports_external_lookups)
+
+
+class Test2a(tests.TestCaseWithMemoryTransport):
+
+ def test_chk_bytes_uses_custom_btree_parser(self):
+ mt = self.make_branch_and_memory_tree('test', format='2a')
+ mt.lock_write()
+ self.addCleanup(mt.unlock)
+ mt.add([''], ['root-id'])
+ mt.commit('first')
+ index = mt.branch.repository.chk_bytes._index._graph_index._indices[0]
+ self.assertEqual(btree_index._gcchk_factory, index._leaf_factory)
+ # It should also work if we re-open the repo
+ repo = mt.branch.repository.bzrdir.open_repository()
+ repo.lock_read()
+ self.addCleanup(repo.unlock)
+ index = repo.chk_bytes._index._graph_index._indices[0]
+ self.assertEqual(btree_index._gcchk_factory, index._leaf_factory)
+
+ def test_fetch_combines_groups(self):
+ builder = self.make_branch_builder('source', format='2a')
+ builder.start_series()
+ builder.build_snapshot('1', None, [
+ ('add', ('', 'root-id', 'directory', '')),
+ ('add', ('file', 'file-id', 'file', 'content\n'))])
+ builder.build_snapshot('2', ['1'], [
+ ('modify', ('file-id', 'content-2\n'))])
+ builder.finish_series()
+ source = builder.get_branch()
+ target = self.make_repository('target', format='2a')
+ target.fetch(source.repository)
+ target.lock_read()
+ self.addCleanup(target.unlock)
+ details = target.texts._index.get_build_details(
+ [('file-id', '1',), ('file-id', '2',)])
+ file_1_details = details[('file-id', '1')]
+ file_2_details = details[('file-id', '2')]
+ # The index, and what to read off disk, should be the same for both
+ # versions of the file.
+ self.assertEqual(file_1_details[0][:3], file_2_details[0][:3])
+
+ def test_fetch_combines_groups(self):
+ builder = self.make_branch_builder('source', format='2a')
+ builder.start_series()
+ builder.build_snapshot('1', None, [
+ ('add', ('', 'root-id', 'directory', '')),
+ ('add', ('file', 'file-id', 'file', 'content\n'))])
+ builder.build_snapshot('2', ['1'], [
+ ('modify', ('file-id', 'content-2\n'))])
+ builder.finish_series()
+ source = builder.get_branch()
+ target = self.make_repository('target', format='2a')
+ target.fetch(source.repository)
+ target.lock_read()
+ self.addCleanup(target.unlock)
+ details = target.texts._index.get_build_details(
+ [('file-id', '1',), ('file-id', '2',)])
+ file_1_details = details[('file-id', '1')]
+ file_2_details = details[('file-id', '2')]
+ # The index, and what to read off disk, should be the same for both
+ # versions of the file.
+ self.assertEqual(file_1_details[0][:3], file_2_details[0][:3])
+
+ def test_fetch_combines_groups(self):
+ builder = self.make_branch_builder('source', format='2a')
+ builder.start_series()
+ builder.build_snapshot('1', None, [
+ ('add', ('', 'root-id', 'directory', '')),
+ ('add', ('file', 'file-id', 'file', 'content\n'))])
+ builder.build_snapshot('2', ['1'], [
+ ('modify', ('file-id', 'content-2\n'))])
+ builder.finish_series()
+ source = builder.get_branch()
+ target = self.make_repository('target', format='2a')
+ target.fetch(source.repository)
+ target.lock_read()
+ self.addCleanup(target.unlock)
+ details = target.texts._index.get_build_details(
+ [('file-id', '1',), ('file-id', '2',)])
+ file_1_details = details[('file-id', '1')]
+ file_2_details = details[('file-id', '2')]
+ # The index, and what to read off disk, should be the same for both
+ # versions of the file.
+ self.assertEqual(file_1_details[0][:3], file_2_details[0][:3])
+
+ def test_format_pack_compresses_True(self):
+ repo = self.make_repository('repo', format='2a')
+ self.assertTrue(repo._format.pack_compresses)
+
+ def test_inventories_use_chk_map_with_parent_base_dict(self):
+ tree = self.make_branch_and_memory_tree('repo', format="2a")
+ tree.lock_write()
+ tree.add([''], ['TREE_ROOT'])
+ revid = tree.commit("foo")
+ tree.unlock()
+ tree.lock_read()
+ self.addCleanup(tree.unlock)
+ inv = tree.branch.repository.get_inventory(revid)
+ self.assertNotEqual(None, inv.parent_id_basename_to_file_id)
+ inv.parent_id_basename_to_file_id._ensure_root()
+ inv.id_to_entry._ensure_root()
+ self.assertEqual(65536, inv.id_to_entry._root_node.maximum_size)
+ self.assertEqual(65536,
+ inv.parent_id_basename_to_file_id._root_node.maximum_size)
+
+ def test_autopack_unchanged_chk_nodes(self):
+ # at 20 unchanged commits, chk pages are packed that are split into
+ # two groups such that the new pack being made doesn't have all its
+ # pages in the source packs (though they are in the repository).
+ # Use a memory backed repository, we don't need to hit disk for this
+ tree = self.make_branch_and_memory_tree('tree', format='2a')
+ tree.lock_write()
+ self.addCleanup(tree.unlock)
+ tree.add([''], ['TREE_ROOT'])
+ for pos in range(20):
+ tree.commit(str(pos))
+
+ def test_pack_with_hint(self):
+ tree = self.make_branch_and_memory_tree('tree', format='2a')
+ tree.lock_write()
+ self.addCleanup(tree.unlock)
+ tree.add([''], ['TREE_ROOT'])
+ # 1 commit to leave untouched
+ tree.commit('1')
+ to_keep = tree.branch.repository._pack_collection.names()
+ # 2 to combine
+ tree.commit('2')
+ tree.commit('3')
+ all = tree.branch.repository._pack_collection.names()
+ combine = list(set(all) - set(to_keep))
+ self.assertLength(3, all)
+ self.assertLength(2, combine)
+ tree.branch.repository.pack(hint=combine)
+ final = tree.branch.repository._pack_collection.names()
+ self.assertLength(2, final)
+ self.assertFalse(combine[0] in final)
+ self.assertFalse(combine[1] in final)
+ self.assertSubset(to_keep, final)
+
+ def test_stream_source_to_gc(self):
+ source = self.make_repository('source', format='2a')
+ target = self.make_repository('target', format='2a')
+ stream = source._get_source(target._format)
+ self.assertIsInstance(stream, groupcompress_repo.GroupCHKStreamSource)
+
+ def test_stream_source_to_non_gc(self):
+ source = self.make_repository('source', format='2a')
+ target = self.make_repository('target', format='rich-root-pack')
+ stream = source._get_source(target._format)
+ # We don't want the child GroupCHKStreamSource
+ self.assertIs(type(stream), vf_repository.StreamSource)
+
+ def test_get_stream_for_missing_keys_includes_all_chk_refs(self):
+ source_builder = self.make_branch_builder('source',
+ format='2a')
+ # We have to build a fairly large tree, so that we are sure the chk
+ # pages will have split into multiple pages.
+ entries = [('add', ('', 'a-root-id', 'directory', None))]
+ for i in 'abcdefghijklmnopqrstuvwxyz123456789':
+ for j in 'abcdefghijklmnopqrstuvwxyz123456789':
+ fname = i + j
+ fid = fname + '-id'
+ content = 'content for %s\n' % (fname,)
+ entries.append(('add', (fname, fid, 'file', content)))
+ source_builder.start_series()
+ source_builder.build_snapshot('rev-1', None, entries)
+ # Now change a few of them, so we get a few new pages for the second
+ # revision
+ source_builder.build_snapshot('rev-2', ['rev-1'], [
+ ('modify', ('aa-id', 'new content for aa-id\n')),
+ ('modify', ('cc-id', 'new content for cc-id\n')),
+ ('modify', ('zz-id', 'new content for zz-id\n')),
+ ])
+ source_builder.finish_series()
+ source_branch = source_builder.get_branch()
+ source_branch.lock_read()
+ self.addCleanup(source_branch.unlock)
+ target = self.make_repository('target', format='2a')
+ source = source_branch.repository._get_source(target._format)
+ self.assertIsInstance(source, groupcompress_repo.GroupCHKStreamSource)
+
+ # On a regular pass, getting the inventories and chk pages for rev-2
+ # would only get the newly created chk pages
+ search = vf_search.SearchResult(set(['rev-2']), set(['rev-1']), 1,
+ set(['rev-2']))
+ simple_chk_records = []
+ for vf_name, substream in source.get_stream(search):
+ if vf_name == 'chk_bytes':
+ for record in substream:
+ simple_chk_records.append(record.key)
+ else:
+ for _ in substream:
+ continue
+ # 3 pages, the root (InternalNode), + 2 pages which actually changed
+ self.assertEqual([('sha1:91481f539e802c76542ea5e4c83ad416bf219f73',),
+ ('sha1:4ff91971043668583985aec83f4f0ab10a907d3f',),
+ ('sha1:81e7324507c5ca132eedaf2d8414ee4bb2226187',),
+ ('sha1:b101b7da280596c71a4540e9a1eeba8045985ee0',)],
+ simple_chk_records)
+ # Now, when we do a similar call using 'get_stream_for_missing_keys'
+ # we should get a much larger set of pages.
+ missing = [('inventories', 'rev-2')]
+ full_chk_records = []
+ for vf_name, substream in source.get_stream_for_missing_keys(missing):
+ if vf_name == 'inventories':
+ for record in substream:
+ self.assertEqual(('rev-2',), record.key)
+ elif vf_name == 'chk_bytes':
+ for record in substream:
+ full_chk_records.append(record.key)
+ else:
+ self.fail('Should not be getting a stream of %s' % (vf_name,))
+ # We have 257 records now. This is because we have 1 root page, and 256
+ # leaf pages in a complete listing.
+ self.assertEqual(257, len(full_chk_records))
+ self.assertSubset(simple_chk_records, full_chk_records)
+
+ def test_inconsistency_fatal(self):
+ repo = self.make_repository('repo', format='2a')
+ self.assertTrue(repo.revisions._index._inconsistency_fatal)
+ self.assertFalse(repo.texts._index._inconsistency_fatal)
+ self.assertFalse(repo.inventories._index._inconsistency_fatal)
+ self.assertFalse(repo.signatures._index._inconsistency_fatal)
+ self.assertFalse(repo.chk_bytes._index._inconsistency_fatal)
+
+
+class TestKnitPackStreamSource(tests.TestCaseWithMemoryTransport):
+
+ def test_source_to_exact_pack_092(self):
+ source = self.make_repository('source', format='pack-0.92')
+ target = self.make_repository('target', format='pack-0.92')
+ stream_source = source._get_source(target._format)
+ self.assertIsInstance(stream_source, knitpack_repo.KnitPackStreamSource)
+
+ def test_source_to_exact_pack_rich_root_pack(self):
+ source = self.make_repository('source', format='rich-root-pack')
+ target = self.make_repository('target', format='rich-root-pack')
+ stream_source = source._get_source(target._format)
+ self.assertIsInstance(stream_source, knitpack_repo.KnitPackStreamSource)
+
+ def test_source_to_exact_pack_19(self):
+ source = self.make_repository('source', format='1.9')
+ target = self.make_repository('target', format='1.9')
+ stream_source = source._get_source(target._format)
+ self.assertIsInstance(stream_source, knitpack_repo.KnitPackStreamSource)
+
+ def test_source_to_exact_pack_19_rich_root(self):
+ source = self.make_repository('source', format='1.9-rich-root')
+ target = self.make_repository('target', format='1.9-rich-root')
+ stream_source = source._get_source(target._format)
+ self.assertIsInstance(stream_source, knitpack_repo.KnitPackStreamSource)
+
+ def test_source_to_remote_exact_pack_19(self):
+ trans = self.make_smart_server('target')
+ trans.ensure_base()
+ source = self.make_repository('source', format='1.9')
+ target = self.make_repository('target', format='1.9')
+ target = repository.Repository.open(trans.base)
+ stream_source = source._get_source(target._format)
+ self.assertIsInstance(stream_source, knitpack_repo.KnitPackStreamSource)
+
+ def test_stream_source_to_non_exact(self):
+ source = self.make_repository('source', format='pack-0.92')
+ target = self.make_repository('target', format='1.9')
+ stream = source._get_source(target._format)
+ self.assertIs(type(stream), vf_repository.StreamSource)
+
+ def test_stream_source_to_non_exact_rich_root(self):
+ source = self.make_repository('source', format='1.9')
+ target = self.make_repository('target', format='1.9-rich-root')
+ stream = source._get_source(target._format)
+ self.assertIs(type(stream), vf_repository.StreamSource)
+
+ def test_source_to_remote_non_exact_pack_19(self):
+ trans = self.make_smart_server('target')
+ trans.ensure_base()
+ source = self.make_repository('source', format='1.9')
+ target = self.make_repository('target', format='1.6')
+ target = repository.Repository.open(trans.base)
+ stream_source = source._get_source(target._format)
+ self.assertIs(type(stream_source), vf_repository.StreamSource)
+
+ def test_stream_source_to_knit(self):
+ source = self.make_repository('source', format='pack-0.92')
+ target = self.make_repository('target', format='dirstate')
+ stream = source._get_source(target._format)
+ self.assertIs(type(stream), vf_repository.StreamSource)
+
+
+class TestDevelopment6FindParentIdsOfRevisions(TestCaseWithTransport):
+ """Tests for _find_parent_ids_of_revisions."""
+
+ def setUp(self):
+ super(TestDevelopment6FindParentIdsOfRevisions, self).setUp()
+ self.builder = self.make_branch_builder('source')
+ self.builder.start_series()
+ self.builder.build_snapshot('initial', None,
+ [('add', ('', 'tree-root', 'directory', None))])
+ self.repo = self.builder.get_branch().repository
+ self.addCleanup(self.builder.finish_series)
+
+ def assertParentIds(self, expected_result, rev_set):
+ self.assertEqual(sorted(expected_result),
+ sorted(self.repo._find_parent_ids_of_revisions(rev_set)))
+
+ def test_simple(self):
+ self.builder.build_snapshot('revid1', None, [])
+ self.builder.build_snapshot('revid2', ['revid1'], [])
+ rev_set = ['revid2']
+ self.assertParentIds(['revid1'], rev_set)
+
+ def test_not_first_parent(self):
+ self.builder.build_snapshot('revid1', None, [])
+ self.builder.build_snapshot('revid2', ['revid1'], [])
+ self.builder.build_snapshot('revid3', ['revid2'], [])
+ rev_set = ['revid3', 'revid2']
+ self.assertParentIds(['revid1'], rev_set)
+
+ def test_not_null(self):
+ rev_set = ['initial']
+ self.assertParentIds([], rev_set)
+
+ def test_not_null_set(self):
+ self.builder.build_snapshot('revid1', None, [])
+ rev_set = [_mod_revision.NULL_REVISION]
+ self.assertParentIds([], rev_set)
+
+ def test_ghost(self):
+ self.builder.build_snapshot('revid1', None, [])
+ rev_set = ['ghost', 'revid1']
+ self.assertParentIds(['initial'], rev_set)
+
+ def test_ghost_parent(self):
+ self.builder.build_snapshot('revid1', None, [])
+ self.builder.build_snapshot('revid2', ['revid1', 'ghost'], [])
+ rev_set = ['revid2', 'revid1']
+ self.assertParentIds(['ghost', 'initial'], rev_set)
+
+ def test_righthand_parent(self):
+ self.builder.build_snapshot('revid1', None, [])
+ self.builder.build_snapshot('revid2a', ['revid1'], [])
+ self.builder.build_snapshot('revid2b', ['revid1'], [])
+ self.builder.build_snapshot('revid3', ['revid2a', 'revid2b'], [])
+ rev_set = ['revid3', 'revid2a']
+ self.assertParentIds(['revid1', 'revid2b'], rev_set)
+
+
+class TestWithBrokenRepo(TestCaseWithTransport):
+ """These tests seem to be more appropriate as interface tests?"""
+
+ def make_broken_repository(self):
+ # XXX: This function is borrowed from Aaron's "Reconcile can fix bad
+ # parent references" branch which is due to land in bzr.dev soon. Once
+ # it does, this duplication should be removed.
+ repo = self.make_repository('broken-repo')
+ cleanups = []
+ try:
+ repo.lock_write()
+ cleanups.append(repo.unlock)
+ repo.start_write_group()
+ cleanups.append(repo.commit_write_group)
+ # make rev1a: A well-formed revision, containing 'file1'
+ inv = inventory.Inventory(revision_id='rev1a')
+ inv.root.revision = 'rev1a'
+ self.add_file(repo, inv, 'file1', 'rev1a', [])
+ repo.texts.add_lines((inv.root.file_id, 'rev1a'), [], [])
+ repo.add_inventory('rev1a', inv, [])
+ revision = _mod_revision.Revision('rev1a',
+ committer='jrandom@example.com', timestamp=0,
+ inventory_sha1='', timezone=0, message='foo', parent_ids=[])
+ repo.add_revision('rev1a', revision, inv)
+
+ # make rev1b, which has no Revision, but has an Inventory, and
+ # file1
+ inv = inventory.Inventory(revision_id='rev1b')
+ inv.root.revision = 'rev1b'
+ self.add_file(repo, inv, 'file1', 'rev1b', [])
+ repo.add_inventory('rev1b', inv, [])
+
+ # make rev2, with file1 and file2
+ # file2 is sane
+ # file1 has 'rev1b' as an ancestor, even though this is not
+ # mentioned by 'rev1a', making it an unreferenced ancestor
+ inv = inventory.Inventory()
+ self.add_file(repo, inv, 'file1', 'rev2', ['rev1a', 'rev1b'])
+ self.add_file(repo, inv, 'file2', 'rev2', [])
+ self.add_revision(repo, 'rev2', inv, ['rev1a'])
+
+ # make ghost revision rev1c
+ inv = inventory.Inventory()
+ self.add_file(repo, inv, 'file2', 'rev1c', [])
+
+ # make rev3 with file2
+ # file2 refers to 'rev1c', which is a ghost in this repository, so
+ # file2 cannot have rev1c as its ancestor.
+ inv = inventory.Inventory()
+ self.add_file(repo, inv, 'file2', 'rev3', ['rev1c'])
+ self.add_revision(repo, 'rev3', inv, ['rev1c'])
+ return repo
+ finally:
+ for cleanup in reversed(cleanups):
+ cleanup()
+
+ def add_revision(self, repo, revision_id, inv, parent_ids):
+ inv.revision_id = revision_id
+ inv.root.revision = revision_id
+ repo.texts.add_lines((inv.root.file_id, revision_id), [], [])
+ repo.add_inventory(revision_id, inv, parent_ids)
+ revision = _mod_revision.Revision(revision_id,
+ committer='jrandom@example.com', timestamp=0, inventory_sha1='',
+ timezone=0, message='foo', parent_ids=parent_ids)
+ repo.add_revision(revision_id, revision, inv)
+
+ def add_file(self, repo, inv, filename, revision, parents):
+ file_id = filename + '-id'
+ entry = inventory.InventoryFile(file_id, filename, 'TREE_ROOT')
+ entry.revision = revision
+ entry.text_size = 0
+ inv.add(entry)
+ text_key = (file_id, revision)
+ parent_keys = [(file_id, parent) for parent in parents]
+ repo.texts.add_lines(text_key, parent_keys, ['line\n'])
+
+ def test_insert_from_broken_repo(self):
+ """Inserting a data stream from a broken repository won't silently
+ corrupt the target repository.
+ """
+ broken_repo = self.make_broken_repository()
+ empty_repo = self.make_repository('empty-repo')
+ try:
+ empty_repo.fetch(broken_repo)
+ except (errors.RevisionNotPresent, errors.BzrCheckError):
+ # Test successful: compression parent not being copied leads to
+ # error.
+ return
+ empty_repo.lock_read()
+ self.addCleanup(empty_repo.unlock)
+ text = empty_repo.texts.get_record_stream(
+ [('file2-id', 'rev3')], 'topological', True).next()
+ self.assertEqual('line\n', text.get_bytes_as('fulltext'))
+
+
+class TestRepositoryPackCollection(TestCaseWithTransport):
+
+ def get_format(self):
+ return controldir.format_registry.make_bzrdir('pack-0.92')
+
+ def get_packs(self):
+ format = self.get_format()
+ repo = self.make_repository('.', format=format)
+ return repo._pack_collection
+
+ def make_packs_and_alt_repo(self, write_lock=False):
+ """Create a pack repo with 3 packs, and access it via a second repo."""
+ tree = self.make_branch_and_tree('.', format=self.get_format())
+ tree.lock_write()
+ self.addCleanup(tree.unlock)
+ rev1 = tree.commit('one')
+ rev2 = tree.commit('two')
+ rev3 = tree.commit('three')
+ r = repository.Repository.open('.')
+ if write_lock:
+ r.lock_write()
+ else:
+ r.lock_read()
+ self.addCleanup(r.unlock)
+ packs = r._pack_collection
+ packs.ensure_loaded()
+ return tree, r, packs, [rev1, rev2, rev3]
+
+ def test__clear_obsolete_packs(self):
+ packs = self.get_packs()
+ obsolete_pack_trans = packs.transport.clone('obsolete_packs')
+ obsolete_pack_trans.put_bytes('a-pack.pack', 'content\n')
+ obsolete_pack_trans.put_bytes('a-pack.rix', 'content\n')
+ obsolete_pack_trans.put_bytes('a-pack.iix', 'content\n')
+ obsolete_pack_trans.put_bytes('another-pack.pack', 'foo\n')
+ obsolete_pack_trans.put_bytes('not-a-pack.rix', 'foo\n')
+ res = packs._clear_obsolete_packs()
+ self.assertEqual(['a-pack', 'another-pack'], sorted(res))
+ self.assertEqual([], obsolete_pack_trans.list_dir('.'))
+
+ def test__clear_obsolete_packs_preserve(self):
+ packs = self.get_packs()
+ obsolete_pack_trans = packs.transport.clone('obsolete_packs')
+ obsolete_pack_trans.put_bytes('a-pack.pack', 'content\n')
+ obsolete_pack_trans.put_bytes('a-pack.rix', 'content\n')
+ obsolete_pack_trans.put_bytes('a-pack.iix', 'content\n')
+ obsolete_pack_trans.put_bytes('another-pack.pack', 'foo\n')
+ obsolete_pack_trans.put_bytes('not-a-pack.rix', 'foo\n')
+ res = packs._clear_obsolete_packs(preserve=set(['a-pack']))
+ self.assertEqual(['a-pack', 'another-pack'], sorted(res))
+ self.assertEqual(['a-pack.iix', 'a-pack.pack', 'a-pack.rix'],
+ sorted(obsolete_pack_trans.list_dir('.')))
+
+ def test__max_pack_count(self):
+ """The maximum pack count is a function of the number of revisions."""
+ # no revisions - one pack, so that we can have a revision free repo
+ # without it blowing up
+ packs = self.get_packs()
+ self.assertEqual(1, packs._max_pack_count(0))
+ # after that the sum of the digits, - check the first 1-9
+ self.assertEqual(1, packs._max_pack_count(1))
+ self.assertEqual(2, packs._max_pack_count(2))
+ self.assertEqual(3, packs._max_pack_count(3))
+ self.assertEqual(4, packs._max_pack_count(4))
+ self.assertEqual(5, packs._max_pack_count(5))
+ self.assertEqual(6, packs._max_pack_count(6))
+ self.assertEqual(7, packs._max_pack_count(7))
+ self.assertEqual(8, packs._max_pack_count(8))
+ self.assertEqual(9, packs._max_pack_count(9))
+ # check the boundary cases with two digits for the next decade
+ self.assertEqual(1, packs._max_pack_count(10))
+ self.assertEqual(2, packs._max_pack_count(11))
+ self.assertEqual(10, packs._max_pack_count(19))
+ self.assertEqual(2, packs._max_pack_count(20))
+ self.assertEqual(3, packs._max_pack_count(21))
+ # check some arbitrary big numbers
+ self.assertEqual(25, packs._max_pack_count(112894))
+
+ def test_repr(self):
+ packs = self.get_packs()
+ self.assertContainsRe(repr(packs),
+ 'RepositoryPackCollection(.*Repository(.*))')
+
+ def test__obsolete_packs(self):
+ tree, r, packs, revs = self.make_packs_and_alt_repo(write_lock=True)
+ names = packs.names()
+ pack = packs.get_pack_by_name(names[0])
+ # Schedule this one for removal
+ packs._remove_pack_from_memory(pack)
+ # Simulate a concurrent update by renaming the .pack file and one of
+ # the indices
+ packs.transport.rename('packs/%s.pack' % (names[0],),
+ 'obsolete_packs/%s.pack' % (names[0],))
+ packs.transport.rename('indices/%s.iix' % (names[0],),
+ 'obsolete_packs/%s.iix' % (names[0],))
+ # Now trigger the obsoletion, and ensure that all the remaining files
+ # are still renamed
+ packs._obsolete_packs([pack])
+ self.assertEqual([n + '.pack' for n in names[1:]],
+ sorted(packs._pack_transport.list_dir('.')))
+ # names[0] should not be present in the index anymore
+ self.assertEqual(names[1:],
+ sorted(set([osutils.splitext(n)[0] for n in
+ packs._index_transport.list_dir('.')])))
+
+ def test__obsolete_packs_missing_directory(self):
+ tree, r, packs, revs = self.make_packs_and_alt_repo(write_lock=True)
+ r.control_transport.rmdir('obsolete_packs')
+ names = packs.names()
+ pack = packs.get_pack_by_name(names[0])
+ # Schedule this one for removal
+ packs._remove_pack_from_memory(pack)
+ # Now trigger the obsoletion, and ensure that all the remaining files
+ # are still renamed
+ packs._obsolete_packs([pack])
+ self.assertEqual([n + '.pack' for n in names[1:]],
+ sorted(packs._pack_transport.list_dir('.')))
+ # names[0] should not be present in the index anymore
+ self.assertEqual(names[1:],
+ sorted(set([osutils.splitext(n)[0] for n in
+ packs._index_transport.list_dir('.')])))
+
+ def test_pack_distribution_zero(self):
+ packs = self.get_packs()
+ self.assertEqual([0], packs.pack_distribution(0))
+
+ def test_ensure_loaded_unlocked(self):
+ packs = self.get_packs()
+ self.assertRaises(errors.ObjectNotLocked,
+ packs.ensure_loaded)
+
+ def test_pack_distribution_one_to_nine(self):
+ packs = self.get_packs()
+ self.assertEqual([1],
+ packs.pack_distribution(1))
+ self.assertEqual([1, 1],
+ packs.pack_distribution(2))
+ self.assertEqual([1, 1, 1],
+ packs.pack_distribution(3))
+ self.assertEqual([1, 1, 1, 1],
+ packs.pack_distribution(4))
+ self.assertEqual([1, 1, 1, 1, 1],
+ packs.pack_distribution(5))
+ self.assertEqual([1, 1, 1, 1, 1, 1],
+ packs.pack_distribution(6))
+ self.assertEqual([1, 1, 1, 1, 1, 1, 1],
+ packs.pack_distribution(7))
+ self.assertEqual([1, 1, 1, 1, 1, 1, 1, 1],
+ packs.pack_distribution(8))
+ self.assertEqual([1, 1, 1, 1, 1, 1, 1, 1, 1],
+ packs.pack_distribution(9))
+
+ def test_pack_distribution_stable_at_boundaries(self):
+ """When there are multi-rev packs the counts are stable."""
+ packs = self.get_packs()
+ # in 10s:
+ self.assertEqual([10], packs.pack_distribution(10))
+ self.assertEqual([10, 1], packs.pack_distribution(11))
+ self.assertEqual([10, 10], packs.pack_distribution(20))
+ self.assertEqual([10, 10, 1], packs.pack_distribution(21))
+ # 100s
+ self.assertEqual([100], packs.pack_distribution(100))
+ self.assertEqual([100, 1], packs.pack_distribution(101))
+ self.assertEqual([100, 10, 1], packs.pack_distribution(111))
+ self.assertEqual([100, 100], packs.pack_distribution(200))
+ self.assertEqual([100, 100, 1], packs.pack_distribution(201))
+ self.assertEqual([100, 100, 10, 1], packs.pack_distribution(211))
+
+ def test_plan_pack_operations_2009_revisions_skip_all_packs(self):
+ packs = self.get_packs()
+ existing_packs = [(2000, "big"), (9, "medium")]
+ # rev count - 2009 -> 2x1000 + 9x1
+ pack_operations = packs.plan_autopack_combinations(
+ existing_packs, [1000, 1000, 1, 1, 1, 1, 1, 1, 1, 1, 1])
+ self.assertEqual([], pack_operations)
+
+ def test_plan_pack_operations_2010_revisions_skip_all_packs(self):
+ packs = self.get_packs()
+ existing_packs = [(2000, "big"), (9, "medium"), (1, "single")]
+ # rev count - 2010 -> 2x1000 + 1x10
+ pack_operations = packs.plan_autopack_combinations(
+ existing_packs, [1000, 1000, 10])
+ self.assertEqual([], pack_operations)
+
+ def test_plan_pack_operations_2010_combines_smallest_two(self):
+ packs = self.get_packs()
+ existing_packs = [(1999, "big"), (9, "medium"), (1, "single2"),
+ (1, "single1")]
+ # rev count - 2010 -> 2x1000 + 1x10 (3)
+ pack_operations = packs.plan_autopack_combinations(
+ existing_packs, [1000, 1000, 10])
+ self.assertEqual([[2, ["single2", "single1"]]], pack_operations)
+
+ def test_plan_pack_operations_creates_a_single_op(self):
+ packs = self.get_packs()
+ existing_packs = [(50, 'a'), (40, 'b'), (30, 'c'), (10, 'd'),
+ (10, 'e'), (6, 'f'), (4, 'g')]
+ # rev count 150 -> 1x100 and 5x10
+ # The two size 10 packs do not need to be touched. The 50, 40, 30 would
+ # be combined into a single 120 size pack, and the 6 & 4 would
+ # becombined into a size 10 pack. However, if we have to rewrite them,
+ # we save a pack file with no increased I/O by putting them into the
+ # same file.
+ distribution = packs.pack_distribution(150)
+ pack_operations = packs.plan_autopack_combinations(existing_packs,
+ distribution)
+ self.assertEqual([[130, ['a', 'b', 'c', 'f', 'g']]], pack_operations)
+
+ def test_all_packs_none(self):
+ format = self.get_format()
+ tree = self.make_branch_and_tree('.', format=format)
+ tree.lock_read()
+ self.addCleanup(tree.unlock)
+ packs = tree.branch.repository._pack_collection
+ packs.ensure_loaded()
+ self.assertEqual([], packs.all_packs())
+
+ def test_all_packs_one(self):
+ format = self.get_format()
+ tree = self.make_branch_and_tree('.', format=format)
+ tree.commit('start')
+ tree.lock_read()
+ self.addCleanup(tree.unlock)
+ packs = tree.branch.repository._pack_collection
+ packs.ensure_loaded()
+ self.assertEqual([
+ packs.get_pack_by_name(packs.names()[0])],
+ packs.all_packs())
+
+ def test_all_packs_two(self):
+ format = self.get_format()
+ tree = self.make_branch_and_tree('.', format=format)
+ tree.commit('start')
+ tree.commit('continue')
+ tree.lock_read()
+ self.addCleanup(tree.unlock)
+ packs = tree.branch.repository._pack_collection
+ packs.ensure_loaded()
+ self.assertEqual([
+ packs.get_pack_by_name(packs.names()[0]),
+ packs.get_pack_by_name(packs.names()[1]),
+ ], packs.all_packs())
+
+ def test_get_pack_by_name(self):
+ format = self.get_format()
+ tree = self.make_branch_and_tree('.', format=format)
+ tree.commit('start')
+ tree.lock_read()
+ self.addCleanup(tree.unlock)
+ packs = tree.branch.repository._pack_collection
+ packs.reset()
+ packs.ensure_loaded()
+ name = packs.names()[0]
+ pack_1 = packs.get_pack_by_name(name)
+ # the pack should be correctly initialised
+ sizes = packs._names[name]
+ rev_index = GraphIndex(packs._index_transport, name + '.rix', sizes[0])
+ inv_index = GraphIndex(packs._index_transport, name + '.iix', sizes[1])
+ txt_index = GraphIndex(packs._index_transport, name + '.tix', sizes[2])
+ sig_index = GraphIndex(packs._index_transport, name + '.six', sizes[3])
+ self.assertEqual(pack_repo.ExistingPack(packs._pack_transport,
+ name, rev_index, inv_index, txt_index, sig_index), pack_1)
+ # and the same instance should be returned on successive calls.
+ self.assertTrue(pack_1 is packs.get_pack_by_name(name))
+
+ def test_reload_pack_names_new_entry(self):
+ tree, r, packs, revs = self.make_packs_and_alt_repo()
+ names = packs.names()
+ # Add a new pack file into the repository
+ rev4 = tree.commit('four')
+ new_names = tree.branch.repository._pack_collection.names()
+ new_name = set(new_names).difference(names)
+ self.assertEqual(1, len(new_name))
+ new_name = new_name.pop()
+ # The old collection hasn't noticed yet
+ self.assertEqual(names, packs.names())
+ self.assertTrue(packs.reload_pack_names())
+ self.assertEqual(new_names, packs.names())
+ # And the repository can access the new revision
+ self.assertEqual({rev4:(revs[-1],)}, r.get_parent_map([rev4]))
+ self.assertFalse(packs.reload_pack_names())
+
+ def test_reload_pack_names_added_and_removed(self):
+ tree, r, packs, revs = self.make_packs_and_alt_repo()
+ names = packs.names()
+ # Now repack the whole thing
+ tree.branch.repository.pack()
+ new_names = tree.branch.repository._pack_collection.names()
+ # The other collection hasn't noticed yet
+ self.assertEqual(names, packs.names())
+ self.assertTrue(packs.reload_pack_names())
+ self.assertEqual(new_names, packs.names())
+ self.assertEqual({revs[-1]:(revs[-2],)}, r.get_parent_map([revs[-1]]))
+ self.assertFalse(packs.reload_pack_names())
+
+ def test_reload_pack_names_preserves_pending(self):
+ # TODO: Update this to also test for pending-deleted names
+ tree, r, packs, revs = self.make_packs_and_alt_repo(write_lock=True)
+ # We will add one pack (via start_write_group + insert_record_stream),
+ # and remove another pack (via _remove_pack_from_memory)
+ orig_names = packs.names()
+ orig_at_load = packs._packs_at_load
+ to_remove_name = iter(orig_names).next()
+ r.start_write_group()
+ self.addCleanup(r.abort_write_group)
+ r.texts.insert_record_stream([versionedfile.FulltextContentFactory(
+ ('text', 'rev'), (), None, 'content\n')])
+ new_pack = packs._new_pack
+ self.assertTrue(new_pack.data_inserted())
+ new_pack.finish()
+ packs.allocate(new_pack)
+ packs._new_pack = None
+ removed_pack = packs.get_pack_by_name(to_remove_name)
+ packs._remove_pack_from_memory(removed_pack)
+ names = packs.names()
+ all_nodes, deleted_nodes, new_nodes, _ = packs._diff_pack_names()
+ new_names = set([x[0][0] for x in new_nodes])
+ self.assertEqual(names, sorted([x[0][0] for x in all_nodes]))
+ self.assertEqual(set(names) - set(orig_names), new_names)
+ self.assertEqual(set([new_pack.name]), new_names)
+ self.assertEqual([to_remove_name],
+ sorted([x[0][0] for x in deleted_nodes]))
+ packs.reload_pack_names()
+ reloaded_names = packs.names()
+ self.assertEqual(orig_at_load, packs._packs_at_load)
+ self.assertEqual(names, reloaded_names)
+ all_nodes, deleted_nodes, new_nodes, _ = packs._diff_pack_names()
+ new_names = set([x[0][0] for x in new_nodes])
+ self.assertEqual(names, sorted([x[0][0] for x in all_nodes]))
+ self.assertEqual(set(names) - set(orig_names), new_names)
+ self.assertEqual(set([new_pack.name]), new_names)
+ self.assertEqual([to_remove_name],
+ sorted([x[0][0] for x in deleted_nodes]))
+
+ def test_autopack_obsoletes_new_pack(self):
+ tree, r, packs, revs = self.make_packs_and_alt_repo(write_lock=True)
+ packs._max_pack_count = lambda x: 1
+ packs.pack_distribution = lambda x: [10]
+ r.start_write_group()
+ r.revisions.insert_record_stream([versionedfile.FulltextContentFactory(
+ ('bogus-rev',), (), None, 'bogus-content\n')])
+ # This should trigger an autopack, which will combine everything into a
+ # single pack file.
+ new_names = r.commit_write_group()
+ names = packs.names()
+ self.assertEqual(1, len(names))
+ self.assertEqual([names[0] + '.pack'],
+ packs._pack_transport.list_dir('.'))
+
+ def test_autopack_reloads_and_stops(self):
+ tree, r, packs, revs = self.make_packs_and_alt_repo(write_lock=True)
+ # After we have determined what needs to be autopacked, trigger a
+ # full-pack via the other repo which will cause us to re-evaluate and
+ # decide we don't need to do anything
+ orig_execute = packs._execute_pack_operations
+ def _munged_execute_pack_ops(*args, **kwargs):
+ tree.branch.repository.pack()
+ return orig_execute(*args, **kwargs)
+ packs._execute_pack_operations = _munged_execute_pack_ops
+ packs._max_pack_count = lambda x: 1
+ packs.pack_distribution = lambda x: [10]
+ self.assertFalse(packs.autopack())
+ self.assertEqual(1, len(packs.names()))
+ self.assertEqual(tree.branch.repository._pack_collection.names(),
+ packs.names())
+
+ def test__save_pack_names(self):
+ tree, r, packs, revs = self.make_packs_and_alt_repo(write_lock=True)
+ names = packs.names()
+ pack = packs.get_pack_by_name(names[0])
+ packs._remove_pack_from_memory(pack)
+ packs._save_pack_names(obsolete_packs=[pack])
+ cur_packs = packs._pack_transport.list_dir('.')
+ self.assertEqual([n + '.pack' for n in names[1:]], sorted(cur_packs))
+ # obsolete_packs will also have stuff like .rix and .iix present.
+ obsolete_packs = packs.transport.list_dir('obsolete_packs')
+ obsolete_names = set([osutils.splitext(n)[0] for n in obsolete_packs])
+ self.assertEqual([pack.name], sorted(obsolete_names))
+
+ def test__save_pack_names_already_obsoleted(self):
+ tree, r, packs, revs = self.make_packs_and_alt_repo(write_lock=True)
+ names = packs.names()
+ pack = packs.get_pack_by_name(names[0])
+ packs._remove_pack_from_memory(pack)
+ # We are going to simulate a concurrent autopack by manually obsoleting
+ # the pack directly.
+ packs._obsolete_packs([pack])
+ packs._save_pack_names(clear_obsolete_packs=True,
+ obsolete_packs=[pack])
+ cur_packs = packs._pack_transport.list_dir('.')
+ self.assertEqual([n + '.pack' for n in names[1:]], sorted(cur_packs))
+ # Note that while we set clear_obsolete_packs=True, it should not
+ # delete a pack file that we have also scheduled for obsoletion.
+ obsolete_packs = packs.transport.list_dir('obsolete_packs')
+ obsolete_names = set([osutils.splitext(n)[0] for n in obsolete_packs])
+ self.assertEqual([pack.name], sorted(obsolete_names))
+
+ def test_pack_no_obsolete_packs_directory(self):
+ """Bug #314314, don't fail if obsolete_packs directory does
+ not exist."""
+ tree, r, packs, revs = self.make_packs_and_alt_repo(write_lock=True)
+ r.control_transport.rmdir('obsolete_packs')
+ packs._clear_obsolete_packs()
+
+
+class TestPack(TestCaseWithTransport):
+ """Tests for the Pack object."""
+
+ def assertCurrentlyEqual(self, left, right):
+ self.assertTrue(left == right)
+ self.assertTrue(right == left)
+ self.assertFalse(left != right)
+ self.assertFalse(right != left)
+
+ def assertCurrentlyNotEqual(self, left, right):
+ self.assertFalse(left == right)
+ self.assertFalse(right == left)
+ self.assertTrue(left != right)
+ self.assertTrue(right != left)
+
+ def test___eq____ne__(self):
+ left = pack_repo.ExistingPack('', '', '', '', '', '')
+ right = pack_repo.ExistingPack('', '', '', '', '', '')
+ self.assertCurrentlyEqual(left, right)
+ # change all attributes and ensure equality changes as we do.
+ left.revision_index = 'a'
+ self.assertCurrentlyNotEqual(left, right)
+ right.revision_index = 'a'
+ self.assertCurrentlyEqual(left, right)
+ left.inventory_index = 'a'
+ self.assertCurrentlyNotEqual(left, right)
+ right.inventory_index = 'a'
+ self.assertCurrentlyEqual(left, right)
+ left.text_index = 'a'
+ self.assertCurrentlyNotEqual(left, right)
+ right.text_index = 'a'
+ self.assertCurrentlyEqual(left, right)
+ left.signature_index = 'a'
+ self.assertCurrentlyNotEqual(left, right)
+ right.signature_index = 'a'
+ self.assertCurrentlyEqual(left, right)
+ left.name = 'a'
+ self.assertCurrentlyNotEqual(left, right)
+ right.name = 'a'
+ self.assertCurrentlyEqual(left, right)
+ left.transport = 'a'
+ self.assertCurrentlyNotEqual(left, right)
+ right.transport = 'a'
+ self.assertCurrentlyEqual(left, right)
+
+ def test_file_name(self):
+ pack = pack_repo.ExistingPack('', 'a_name', '', '', '', '')
+ self.assertEqual('a_name.pack', pack.file_name())
+
+
+class TestNewPack(TestCaseWithTransport):
+ """Tests for pack_repo.NewPack."""
+
+ def test_new_instance_attributes(self):
+ upload_transport = self.get_transport('upload')
+ pack_transport = self.get_transport('pack')
+ index_transport = self.get_transport('index')
+ upload_transport.mkdir('.')
+ collection = pack_repo.RepositoryPackCollection(
+ repo=None,
+ transport=self.get_transport('.'),
+ index_transport=index_transport,
+ upload_transport=upload_transport,
+ pack_transport=pack_transport,
+ index_builder_class=BTreeBuilder,
+ index_class=BTreeGraphIndex,
+ use_chk_index=False)
+ pack = pack_repo.NewPack(collection)
+ self.addCleanup(pack.abort) # Make sure the write stream gets closed
+ self.assertIsInstance(pack.revision_index, BTreeBuilder)
+ self.assertIsInstance(pack.inventory_index, BTreeBuilder)
+ self.assertIsInstance(pack._hash, type(osutils.md5()))
+ self.assertTrue(pack.upload_transport is upload_transport)
+ self.assertTrue(pack.index_transport is index_transport)
+ self.assertTrue(pack.pack_transport is pack_transport)
+ self.assertEqual(None, pack.index_sizes)
+ self.assertEqual(20, len(pack.random_name))
+ self.assertIsInstance(pack.random_name, str)
+ self.assertIsInstance(pack.start_time, float)
+
+
+class TestPacker(TestCaseWithTransport):
+ """Tests for the packs repository Packer class."""
+
+ def test_pack_optimizes_pack_order(self):
+ builder = self.make_branch_builder('.', format="1.9")
+ builder.start_series()
+ builder.build_snapshot('A', None, [
+ ('add', ('', 'root-id', 'directory', None)),
+ ('add', ('f', 'f-id', 'file', 'content\n'))])
+ builder.build_snapshot('B', ['A'],
+ [('modify', ('f-id', 'new-content\n'))])
+ builder.build_snapshot('C', ['B'],
+ [('modify', ('f-id', 'third-content\n'))])
+ builder.build_snapshot('D', ['C'],
+ [('modify', ('f-id', 'fourth-content\n'))])
+ b = builder.get_branch()
+ b.lock_read()
+ builder.finish_series()
+ self.addCleanup(b.unlock)
+ # At this point, we should have 4 pack files available
+ # Because of how they were built, they correspond to
+ # ['D', 'C', 'B', 'A']
+ packs = b.repository._pack_collection.packs
+ packer = knitpack_repo.KnitPacker(b.repository._pack_collection,
+ packs, 'testing',
+ revision_ids=['B', 'C'])
+ # Now, when we are copying the B & C revisions, their pack files should
+ # be moved to the front of the stack
+ # The new ordering moves B & C to the front of the .packs attribute,
+ # and leaves the others in the original order.
+ new_packs = [packs[1], packs[2], packs[0], packs[3]]
+ new_pack = packer.pack()
+ self.assertEqual(new_packs, packer.packs)
+
+
+class TestOptimisingPacker(TestCaseWithTransport):
+ """Tests for the OptimisingPacker class."""
+
+ def get_pack_collection(self):
+ repo = self.make_repository('.')
+ return repo._pack_collection
+
+ def test_open_pack_will_optimise(self):
+ packer = knitpack_repo.OptimisingKnitPacker(self.get_pack_collection(),
+ [], '.test')
+ new_pack = packer.open_pack()
+ self.addCleanup(new_pack.abort) # ensure cleanup
+ self.assertIsInstance(new_pack, pack_repo.NewPack)
+ self.assertTrue(new_pack.revision_index._optimize_for_size)
+ self.assertTrue(new_pack.inventory_index._optimize_for_size)
+ self.assertTrue(new_pack.text_index._optimize_for_size)
+ self.assertTrue(new_pack.signature_index._optimize_for_size)
+
+
+class TestGCCHKPacker(TestCaseWithTransport):
+
+ def make_abc_branch(self):
+ builder = self.make_branch_builder('source')
+ builder.start_series()
+ builder.build_snapshot('A', None, [
+ ('add', ('', 'root-id', 'directory', None)),
+ ('add', ('file', 'file-id', 'file', 'content\n')),
+ ])
+ builder.build_snapshot('B', ['A'], [
+ ('add', ('dir', 'dir-id', 'directory', None))])
+ builder.build_snapshot('C', ['B'], [
+ ('modify', ('file-id', 'new content\n'))])
+ builder.finish_series()
+ return builder.get_branch()
+
+ def make_branch_with_disjoint_inventory_and_revision(self):
+ """a repo with separate packs for a revisions Revision and Inventory.
+
+ There will be one pack file that holds the Revision content, and one
+ for the Inventory content.
+
+ :return: (repository,
+ pack_name_with_rev_A_Revision,
+ pack_name_with_rev_A_Inventory,
+ pack_name_with_rev_C_content)
+ """
+ b_source = self.make_abc_branch()
+ b_base = b_source.bzrdir.sprout('base', revision_id='A').open_branch()
+ b_stacked = b_base.bzrdir.sprout('stacked', stacked=True).open_branch()
+ b_stacked.lock_write()
+ self.addCleanup(b_stacked.unlock)
+ b_stacked.fetch(b_source, 'B')
+ # Now re-open the stacked repo directly (no fallbacks) so that we can
+ # fill in the A rev.
+ repo_not_stacked = b_stacked.bzrdir.open_repository()
+ repo_not_stacked.lock_write()
+ self.addCleanup(repo_not_stacked.unlock)
+ # Now we should have a pack file with A's inventory, but not its
+ # Revision
+ self.assertEqual([('A',), ('B',)],
+ sorted(repo_not_stacked.inventories.keys()))
+ self.assertEqual([('B',)],
+ sorted(repo_not_stacked.revisions.keys()))
+ stacked_pack_names = repo_not_stacked._pack_collection.names()
+ # We have a couple names here, figure out which has A's inventory
+ for name in stacked_pack_names:
+ pack = repo_not_stacked._pack_collection.get_pack_by_name(name)
+ keys = [n[1] for n in pack.inventory_index.iter_all_entries()]
+ if ('A',) in keys:
+ inv_a_pack_name = name
+ break
+ else:
+ self.fail('Could not find pack containing A\'s inventory')
+ repo_not_stacked.fetch(b_source.repository, 'A')
+ self.assertEqual([('A',), ('B',)],
+ sorted(repo_not_stacked.revisions.keys()))
+ new_pack_names = set(repo_not_stacked._pack_collection.names())
+ rev_a_pack_names = new_pack_names.difference(stacked_pack_names)
+ self.assertEqual(1, len(rev_a_pack_names))
+ rev_a_pack_name = list(rev_a_pack_names)[0]
+ # Now fetch 'C', so we have a couple pack files to join
+ repo_not_stacked.fetch(b_source.repository, 'C')
+ rev_c_pack_names = set(repo_not_stacked._pack_collection.names())
+ rev_c_pack_names = rev_c_pack_names.difference(new_pack_names)
+ self.assertEqual(1, len(rev_c_pack_names))
+ rev_c_pack_name = list(rev_c_pack_names)[0]
+ return (repo_not_stacked, rev_a_pack_name, inv_a_pack_name,
+ rev_c_pack_name)
+
+ def test_pack_with_distant_inventories(self):
+ # See https://bugs.launchpad.net/bzr/+bug/437003
+ # When repacking, it is possible to have an inventory in a different
+ # pack file than the associated revision. An autopack can then come
+ # along, and miss that inventory, and complain.
+ (repo, rev_a_pack_name, inv_a_pack_name, rev_c_pack_name
+ ) = self.make_branch_with_disjoint_inventory_and_revision()
+ a_pack = repo._pack_collection.get_pack_by_name(rev_a_pack_name)
+ c_pack = repo._pack_collection.get_pack_by_name(rev_c_pack_name)
+ packer = groupcompress_repo.GCCHKPacker(repo._pack_collection,
+ [a_pack, c_pack], '.test-pack')
+ # This would raise ValueError in bug #437003, but should not raise an
+ # error once fixed.
+ packer.pack()
+
+ def test_pack_with_missing_inventory(self):
+ # Similar to test_pack_with_missing_inventory, but this time, we force
+ # the A inventory to actually be gone from the repository.
+ (repo, rev_a_pack_name, inv_a_pack_name, rev_c_pack_name
+ ) = self.make_branch_with_disjoint_inventory_and_revision()
+ inv_a_pack = repo._pack_collection.get_pack_by_name(inv_a_pack_name)
+ repo._pack_collection._remove_pack_from_memory(inv_a_pack)
+ packer = groupcompress_repo.GCCHKPacker(repo._pack_collection,
+ repo._pack_collection.all_packs(), '.test-pack')
+ e = self.assertRaises(ValueError, packer.pack)
+ packer.new_pack.abort()
+ self.assertContainsRe(str(e),
+ r"We are missing inventories for revisions: .*'A'")
+
+
+class TestCrossFormatPacks(TestCaseWithTransport):
+
+ def log_pack(self, hint=None):
+ self.calls.append(('pack', hint))
+ self.orig_pack(hint=hint)
+ if self.expect_hint:
+ self.assertTrue(hint)
+
+ def run_stream(self, src_fmt, target_fmt, expect_pack_called):
+ self.expect_hint = expect_pack_called
+ self.calls = []
+ source_tree = self.make_branch_and_tree('src', format=src_fmt)
+ source_tree.lock_write()
+ self.addCleanup(source_tree.unlock)
+ tip = source_tree.commit('foo')
+ target = self.make_repository('target', format=target_fmt)
+ target.lock_write()
+ self.addCleanup(target.unlock)
+ source = source_tree.branch.repository._get_source(target._format)
+ self.orig_pack = target.pack
+ self.overrideAttr(target, "pack", self.log_pack)
+ search = target.search_missing_revision_ids(
+ source_tree.branch.repository, revision_ids=[tip])
+ stream = source.get_stream(search)
+ from_format = source_tree.branch.repository._format
+ sink = target._get_sink()
+ sink.insert_stream(stream, from_format, [])
+ if expect_pack_called:
+ self.assertLength(1, self.calls)
+ else:
+ self.assertLength(0, self.calls)
+
+ def run_fetch(self, src_fmt, target_fmt, expect_pack_called):
+ self.expect_hint = expect_pack_called
+ self.calls = []
+ source_tree = self.make_branch_and_tree('src', format=src_fmt)
+ source_tree.lock_write()
+ self.addCleanup(source_tree.unlock)
+ tip = source_tree.commit('foo')
+ target = self.make_repository('target', format=target_fmt)
+ target.lock_write()
+ self.addCleanup(target.unlock)
+ source = source_tree.branch.repository
+ self.orig_pack = target.pack
+ self.overrideAttr(target, "pack", self.log_pack)
+ target.fetch(source)
+ if expect_pack_called:
+ self.assertLength(1, self.calls)
+ else:
+ self.assertLength(0, self.calls)
+
+ def test_sink_format_hint_no(self):
+ # When the target format says packing makes no difference, pack is not
+ # called.
+ self.run_stream('1.9', 'rich-root-pack', False)
+
+ def test_sink_format_hint_yes(self):
+ # When the target format says packing makes a difference, pack is
+ # called.
+ self.run_stream('1.9', '2a', True)
+
+ def test_sink_format_same_no(self):
+ # When the formats are the same, pack is not called.
+ self.run_stream('2a', '2a', False)
+
+ def test_IDS_format_hint_no(self):
+ # When the target format says packing makes no difference, pack is not
+ # called.
+ self.run_fetch('1.9', 'rich-root-pack', False)
+
+ def test_IDS_format_hint_yes(self):
+ # When the target format says packing makes a difference, pack is
+ # called.
+ self.run_fetch('1.9', '2a', True)
+
+ def test_IDS_format_same_no(self):
+ # When the formats are the same, pack is not called.
+ self.run_fetch('2a', '2a', False)
+
+
+class Test_LazyListJoin(tests.TestCase):
+
+ def test__repr__(self):
+ lazy = repository._LazyListJoin(['a'], ['b'])
+ self.assertEqual("bzrlib.repository._LazyListJoin((['a'], ['b']))",
+ repr(lazy))
+
+
+class TestFeatures(tests.TestCaseWithTransport):
+
+ def test_open_with_present_feature(self):
+ self.addCleanup(
+ repository.RepositoryFormatMetaDir.unregister_feature,
+ "makes-cheese-sandwich")
+ repository.RepositoryFormatMetaDir.register_feature(
+ "makes-cheese-sandwich")
+ repo = self.make_repository('.')
+ repo.lock_write()
+ repo._format.features["makes-cheese-sandwich"] = "required"
+ repo._format.check_support_status(False)
+ repo.unlock()
+
+ def test_open_with_missing_required_feature(self):
+ repo = self.make_repository('.')
+ repo.lock_write()
+ repo._format.features["makes-cheese-sandwich"] = "required"
+ self.assertRaises(errors.MissingFeature,
+ repo._format.check_support_status, False)
diff --git a/bzrlib/tests/test_revert.py b/bzrlib/tests/test_revert.py
new file mode 100644
index 0000000..5ac6137
--- /dev/null
+++ b/bzrlib/tests/test_revert.py
@@ -0,0 +1,161 @@
+# Copyright (C) 2006, 2007, 2009, 2010 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+import os
+
+from bzrlib import merge, tests, transform, workingtree
+
+
+class TestRevert(tests.TestCaseWithTransport):
+ """Ensure that revert behaves as expected"""
+
+ def test_revert_merged_dir(self):
+ """Reverting a merge that adds a directory deletes the directory"""
+ source_tree = self.make_branch_and_tree('source')
+ source_tree.commit('empty tree')
+ target_tree = source_tree.bzrdir.sprout('target').open_workingtree()
+ self.build_tree(['source/dir/', 'source/dir/contents'])
+ source_tree.add(['dir', 'dir/contents'], ['dir-id', 'contents-id'])
+ source_tree.commit('added dir')
+ target_tree.lock_write()
+ self.addCleanup(target_tree.unlock)
+ merge.merge_inner(target_tree.branch, source_tree.basis_tree(),
+ target_tree.basis_tree(), this_tree=target_tree)
+ self.assertPathExists('target/dir')
+ self.assertPathExists('target/dir/contents')
+ target_tree.revert()
+ self.assertPathDoesNotExist('target/dir/contents')
+ self.assertPathDoesNotExist('target/dir')
+
+ def test_revert_new(self):
+ """Only locally-changed new files should be preserved when reverting
+
+ When a file isn't present in revert's target tree:
+ If a file hasn't been committed, revert should unversion it, but not
+ delete it.
+ If a file has local changes, revert should unversion it, but not
+ delete it.
+ If a file has no changes from the last commit, revert should delete it.
+ If a file has changes due to a merge, revert should delete it.
+ """
+ tree = self.make_branch_and_tree('tree')
+ tree.commit('empty tree')
+ merge_target = tree.bzrdir.sprout('merge_target').open_workingtree()
+ self.build_tree(['tree/new_file'])
+
+ # newly-added files should not be deleted
+ tree.add('new_file')
+ basis_tree = tree.branch.repository.revision_tree(tree.last_revision())
+ tree.revert()
+ self.assertPathExists('tree/new_file')
+
+ # unchanged files should be deleted
+ tree.add('new_file')
+ tree.commit('add new_file')
+ tree.revert(old_tree=basis_tree)
+ self.assertPathDoesNotExist('tree/new_file')
+
+ # files should be deleted if their changes came from merges
+ merge_target.merge_from_branch(tree.branch)
+ self.assertPathExists('merge_target/new_file')
+ merge_target.revert()
+ self.assertPathDoesNotExist('merge_target/new_file')
+
+ # files should not be deleted if changed after a merge
+ merge_target.merge_from_branch(tree.branch)
+ self.assertPathExists('merge_target/new_file')
+ self.build_tree_contents([('merge_target/new_file', 'new_contents')])
+ merge_target.revert()
+ self.assertPathExists('merge_target/new_file')
+
+ def tree_with_executable(self):
+ tree = self.make_branch_and_tree('tree')
+ tt = transform.TreeTransform(tree)
+ tt.new_file('newfile', tt.root, 'helooo!', 'newfile-id', True)
+ tt.apply()
+ tree.lock_write()
+ try:
+ self.assertTrue(tree.is_executable('newfile-id'))
+ tree.commit('added newfile')
+ finally:
+ tree.unlock()
+ return tree
+
+ def test_preserve_execute(self):
+ tree = self.tree_with_executable()
+ tt = transform.TreeTransform(tree)
+ newfile = tt.trans_id_tree_file_id('newfile-id')
+ tt.delete_contents(newfile)
+ tt.create_file('Woooorld!', newfile)
+ tt.apply()
+ tree = workingtree.WorkingTree.open('tree')
+ tree.lock_write()
+ self.addCleanup(tree.unlock)
+ self.assertTrue(tree.is_executable('newfile-id'))
+ transform.revert(tree, tree.basis_tree(), None, backups=True)
+ self.assertEqual('helooo!', tree.get_file('newfile-id').read())
+ self.assertTrue(tree.is_executable('newfile-id'))
+
+ def test_revert_executable(self):
+ tree = self.tree_with_executable()
+ tt = transform.TreeTransform(tree)
+ newfile = tt.trans_id_tree_file_id('newfile-id')
+ tt.set_executability(False, newfile)
+ tt.apply()
+ tree.lock_write()
+ self.addCleanup(tree.unlock)
+ transform.revert(tree, tree.basis_tree(), None)
+ self.assertTrue(tree.is_executable('newfile-id'))
+
+ def test_revert_deletes_files_from_revert(self):
+ tree = self.make_branch_and_tree('.')
+ self.build_tree(['file'])
+ tree.add('file')
+ tree.commit('added file', rev_id='rev1')
+ os.unlink('file')
+ tree.commit('removed file')
+ self.assertPathDoesNotExist('file')
+ tree.revert(old_tree=tree.branch.repository.revision_tree('rev1'))
+ self.assertPathExists('file')
+ tree.revert()
+ self.assertPathDoesNotExist('file')
+ self.assertEqual({}, tree.merge_modified())
+
+ def test_revert_file_in_deleted_dir(self):
+ tree = self.make_branch_and_tree('.')
+ self.build_tree(['dir/', 'dir/file1', 'dir/file2'])
+ tree.add(['dir', 'dir/file1', 'dir/file2'],
+ ['dir-id', 'file1-id', 'file2-id'])
+ tree.commit("Added files")
+ os.unlink('dir/file1')
+ os.unlink('dir/file2')
+ os.rmdir('dir')
+ tree.remove(['dir/', 'dir/file1', 'dir/file2'])
+ tree.revert(['dir/file1'])
+ self.assertPathExists('dir/file1')
+ self.assertPathDoesNotExist('dir/file2')
+ self.assertEqual('dir-id', tree.path2id('dir'))
+
+ def test_revert_root_id_change(self):
+ tree = self.make_branch_and_tree('.')
+ tree.set_root_id('initial-root-id')
+ self.build_tree(['file1'])
+ tree.add(['file1'])
+ tree.commit('first')
+ tree.set_root_id('temp-root-id')
+ self.assertEqual('temp-root-id', tree.get_root_id())
+ tree.revert()
+ self.assertEqual('initial-root-id', tree.get_root_id())
diff --git a/bzrlib/tests/test_revision.py b/bzrlib/tests/test_revision.py
new file mode 100644
index 0000000..a6c523b
--- /dev/null
+++ b/bzrlib/tests/test_revision.py
@@ -0,0 +1,257 @@
+# Copyright (C) 2005-2011 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+
+import warnings
+
+from bzrlib import (
+ bugtracker,
+ revision,
+ )
+from bzrlib.errors import (
+ InvalidBugStatus,
+ InvalidLineInBugsProperty,
+ )
+from bzrlib.revision import NULL_REVISION
+from bzrlib.tests import TestCase, TestCaseWithTransport
+from bzrlib.tests.matchers import MatchesAncestry
+
+# We're allowed to test deprecated interfaces
+warnings.filterwarnings('ignore',
+ '.*get_intervening_revisions was deprecated',
+ DeprecationWarning,
+ r'bzrlib\.tests\.test_revision')
+
+# XXX: Make this a method of a merge base case
+def make_branches(self, format=None):
+ """Create two branches
+
+ branch 1 has 6 commits, branch 2 has 3 commits
+ commit 10 is a ghosted merge merge from branch 1
+
+ the object graph is
+ B: A:
+ a..0 a..0
+ a..1 a..1
+ a..2 a..2
+ b..3 a..3 merges b..4
+ b..4 a..4
+ b..5 a..5 merges b..5
+ b..6 merges a4
+
+ so A is missing b6 at the start
+ and B is missing a3, a4, a5
+ """
+ tree1 = self.make_branch_and_tree("branch1", format=format)
+ br1 = tree1.branch
+
+ tree1.commit("Commit one", rev_id="a@u-0-0")
+ tree1.commit("Commit two", rev_id="a@u-0-1")
+ tree1.commit("Commit three", rev_id="a@u-0-2")
+
+ tree2 = tree1.bzrdir.sprout("branch2").open_workingtree()
+ br2 = tree2.branch
+ tree2.commit("Commit four", rev_id="b@u-0-3")
+ tree2.commit("Commit five", rev_id="b@u-0-4")
+ self.assertEquals(br2.last_revision(), 'b@u-0-4')
+
+ tree1.merge_from_branch(br2)
+ tree1.commit("Commit six", rev_id="a@u-0-3")
+ tree1.commit("Commit seven", rev_id="a@u-0-4")
+ tree2.commit("Commit eight", rev_id="b@u-0-5")
+ self.assertEquals(br2.last_revision(), 'b@u-0-5')
+
+ tree1.merge_from_branch(br2)
+ tree1.commit("Commit nine", rev_id="a@u-0-5")
+ # DO NOT MERGE HERE - we WANT a GHOST.
+ br1.lock_read()
+ try:
+ graph = br1.repository.get_graph()
+ revhistory = list(graph.iter_lefthand_ancestry(br1.last_revision(),
+ [revision.NULL_REVISION]))
+ revhistory.reverse()
+ finally:
+ br1.unlock()
+ tree2.add_parent_tree_id(revhistory[4])
+ tree2.commit("Commit ten - ghost merge", rev_id="b@u-0-6")
+
+ return br1, br2
+
+
+class TestIsAncestor(TestCaseWithTransport):
+
+ def test_recorded_ancestry(self):
+ """Test that commit records all ancestors"""
+ br1, br2 = make_branches(self)
+ d = [('a@u-0-0', ['a@u-0-0']),
+ ('a@u-0-1', ['a@u-0-0', 'a@u-0-1']),
+ ('a@u-0-2', ['a@u-0-0', 'a@u-0-1', 'a@u-0-2']),
+ ('b@u-0-3', ['a@u-0-0', 'a@u-0-1', 'a@u-0-2', 'b@u-0-3']),
+ ('b@u-0-4', ['a@u-0-0', 'a@u-0-1', 'a@u-0-2', 'b@u-0-3',
+ 'b@u-0-4']),
+ ('a@u-0-3', ['a@u-0-0', 'a@u-0-1', 'a@u-0-2', 'b@u-0-3', 'b@u-0-4',
+ 'a@u-0-3']),
+ ('a@u-0-4', ['a@u-0-0', 'a@u-0-1', 'a@u-0-2', 'b@u-0-3', 'b@u-0-4',
+ 'a@u-0-3', 'a@u-0-4']),
+ ('b@u-0-5', ['a@u-0-0', 'a@u-0-1', 'a@u-0-2', 'b@u-0-3', 'b@u-0-4',
+ 'b@u-0-5']),
+ ('a@u-0-5', ['a@u-0-0', 'a@u-0-1', 'a@u-0-2', 'a@u-0-3', 'a@u-0-4',
+ 'b@u-0-3', 'b@u-0-4',
+ 'b@u-0-5', 'a@u-0-5']),
+ ('b@u-0-6', ['a@u-0-0', 'a@u-0-1', 'a@u-0-2', 'a@u-0-4',
+ 'b@u-0-3', 'b@u-0-4',
+ 'b@u-0-5', 'b@u-0-6']),
+ ]
+ br1_only = ('a@u-0-3', 'a@u-0-4', 'a@u-0-5')
+ br2_only = ('b@u-0-6',)
+ for branch in br1, br2:
+ for rev_id, anc in d:
+ if rev_id in br1_only and not branch is br1:
+ continue
+ if rev_id in br2_only and not branch is br2:
+ continue
+ self.assertThat(anc,
+ MatchesAncestry(branch.repository, rev_id))
+
+
+class TestIntermediateRevisions(TestCaseWithTransport):
+
+ def setUp(self):
+ TestCaseWithTransport.setUp(self)
+ self.br1, self.br2 = make_branches(self)
+ wt1 = self.br1.bzrdir.open_workingtree()
+ wt2 = self.br2.bzrdir.open_workingtree()
+ wt2.commit("Commit eleven", rev_id="b@u-0-7")
+ wt2.commit("Commit twelve", rev_id="b@u-0-8")
+ wt2.commit("Commit thirtteen", rev_id="b@u-0-9")
+
+ wt1.merge_from_branch(self.br2)
+ wt1.commit("Commit fourtten", rev_id="a@u-0-6")
+
+ wt2.merge_from_branch(self.br1)
+ wt2.commit("Commit fifteen", rev_id="b@u-0-10")
+
+
+class MockRevisionSource(object):
+ """A RevisionSource that takes a pregenerated graph.
+
+ This is useful for testing revision graph algorithms where
+ the actual branch existing is irrelevant.
+ """
+
+ def __init__(self, full_graph):
+ self._full_graph = full_graph
+
+ def get_revision_graph_with_ghosts(self, revision_ids):
+ # This is mocked out to just return a constant graph.
+ return self._full_graph
+
+
+class TestCommonAncestor(TestCaseWithTransport):
+ """Test checking whether a revision is an ancestor of another revision"""
+
+ def test_get_history(self):
+ # TODO: test ghosts on the left hand branch's impact
+ # TODO: test ghosts on all parents, we should get some
+ # indicator. i.e. NULL_REVISION
+ # RBC 20060608
+ tree = self.make_branch_and_tree('.')
+ tree.commit('1', rev_id = '1', allow_pointless=True)
+ tree.commit('2', rev_id = '2', allow_pointless=True)
+ tree.commit('3', rev_id = '3', allow_pointless=True)
+ rev = tree.branch.repository.get_revision('1')
+ history = rev.get_history(tree.branch.repository)
+ self.assertEqual([None, '1'], history)
+ rev = tree.branch.repository.get_revision('2')
+ history = rev.get_history(tree.branch.repository)
+ self.assertEqual([None, '1', '2'], history)
+ rev = tree.branch.repository.get_revision('3')
+ history = rev.get_history(tree.branch.repository)
+ self.assertEqual([None, '1', '2' ,'3'], history)
+
+
+class TestReservedId(TestCase):
+
+ def test_is_reserved_id(self):
+ self.assertEqual(True, revision.is_reserved_id(NULL_REVISION))
+ self.assertEqual(True, revision.is_reserved_id(
+ revision.CURRENT_REVISION))
+ self.assertEqual(True, revision.is_reserved_id('arch:'))
+ self.assertEqual(False, revision.is_reserved_id('null'))
+ self.assertEqual(False, revision.is_reserved_id(
+ 'arch:a@example.com/c--b--v--r'))
+ self.assertEqual(False, revision.is_reserved_id(None))
+
+
+class TestRevisionMethods(TestCase):
+
+ def test_get_summary(self):
+ r = revision.Revision('1')
+ r.message = 'a'
+ self.assertEqual('a', r.get_summary())
+ r.message = 'a\nb'
+ self.assertEqual('a', r.get_summary())
+ r.message = '\na\nb'
+ self.assertEqual('a', r.get_summary())
+ r.message = None
+ self.assertEqual('', r.get_summary())
+
+ def test_get_apparent_authors(self):
+ r = revision.Revision('1')
+ r.committer = 'A'
+ self.assertEqual(['A'], r.get_apparent_authors())
+ r.properties['author'] = 'B'
+ self.assertEqual(['B'], r.get_apparent_authors())
+ r.properties['authors'] = 'C\nD'
+ self.assertEqual(['C', 'D'], r.get_apparent_authors())
+
+ def test_get_apparent_authors_no_committer(self):
+ r = revision.Revision('1')
+ self.assertEqual([], r.get_apparent_authors())
+
+
+class TestRevisionBugs(TestCase):
+ """Tests for getting the bugs that a revision is linked to."""
+
+ def test_no_bugs(self):
+ r = revision.Revision('1')
+ self.assertEqual([], list(r.iter_bugs()))
+
+ def test_some_bugs(self):
+ r = revision.Revision(
+ '1', properties={
+ 'bugs': bugtracker.encode_fixes_bug_urls(
+ ['http://example.com/bugs/1',
+ 'http://launchpad.net/bugs/1234'])})
+ self.assertEqual(
+ [('http://example.com/bugs/1', bugtracker.FIXED),
+ ('http://launchpad.net/bugs/1234', bugtracker.FIXED)],
+ list(r.iter_bugs()))
+
+ def test_no_status(self):
+ r = revision.Revision(
+ '1', properties={'bugs': 'http://example.com/bugs/1'})
+ self.assertRaises(InvalidLineInBugsProperty, list, r.iter_bugs())
+
+ def test_too_much_information(self):
+ r = revision.Revision(
+ '1', properties={'bugs': 'http://example.com/bugs/1 fixed bar'})
+ self.assertRaises(InvalidLineInBugsProperty, list, r.iter_bugs())
+
+ def test_invalid_status(self):
+ r = revision.Revision(
+ '1', properties={'bugs': 'http://example.com/bugs/1 faxed'})
+ self.assertRaises(InvalidBugStatus, list, r.iter_bugs())
diff --git a/bzrlib/tests/test_revisionspec.py b/bzrlib/tests/test_revisionspec.py
new file mode 100644
index 0000000..0f9cb5c
--- /dev/null
+++ b/bzrlib/tests/test_revisionspec.py
@@ -0,0 +1,756 @@
+# Copyright (C) 2005-2011 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+import datetime
+import time
+
+from bzrlib import (
+ errors,
+ revision as _mod_revision,
+ symbol_versioning,
+ )
+from bzrlib.tests import TestCaseWithTransport
+from bzrlib.revisionspec import (
+ RevisionInfo,
+ RevisionSpec,
+ RevisionSpec_dwim,
+ RevisionSpec_tag,
+ )
+
+
+def spec_in_history(spec, branch):
+ """A simple helper to change a revision spec into a branch search"""
+ return RevisionSpec.from_string(spec).in_history(branch)
+
+
+# Basic class, which just creates a really basic set of revisions
+class TestRevisionSpec(TestCaseWithTransport):
+
+ def setUp(self):
+ super(TestRevisionSpec, self).setUp()
+ # this sets up a revision graph:
+ # r1: [] 1
+ # alt_r2: [r1] 1.1.1
+ # r2: [r1, alt_r2] 2
+
+ self.tree = self.make_branch_and_tree('tree')
+ self.build_tree(['tree/a'])
+ self.tree.lock_write()
+ self.addCleanup(self.tree.unlock)
+ self.tree.add(['a'])
+ self.tree.commit('a', rev_id='r1')
+
+ self.tree2 = self.tree.bzrdir.sprout('tree2').open_workingtree()
+ self.tree2.commit('alt', rev_id='alt_r2')
+
+ self.tree.merge_from_branch(self.tree2.branch)
+ self.tree.commit('second', rev_id='r2')
+
+ def get_in_history(self, revision_spec):
+ return spec_in_history(revision_spec, self.tree.branch)
+
+ def assertInHistoryIs(self, exp_revno, exp_revision_id, revision_spec):
+ rev_info = self.get_in_history(revision_spec)
+ self.assertEqual(exp_revno, rev_info.revno,
+ 'Revision spec: %r returned wrong revno: %r != %r'
+ % (revision_spec, exp_revno, rev_info.revno))
+ self.assertEqual(exp_revision_id, rev_info.rev_id,
+ 'Revision spec: %r returned wrong revision id:'
+ ' %r != %r'
+ % (revision_spec, exp_revision_id, rev_info.rev_id))
+
+ def assertInvalid(self, revision_spec, extra='',
+ invalid_as_revision_id=True):
+ try:
+ self.get_in_history(revision_spec)
+ except errors.InvalidRevisionSpec, e:
+ self.assertEqual(revision_spec, e.spec)
+ self.assertEqual(extra, e.extra)
+ else:
+ self.fail('Expected InvalidRevisionSpec to be raised for'
+ ' %r.in_history' % (revision_spec,))
+ if invalid_as_revision_id:
+ try:
+ spec = RevisionSpec.from_string(revision_spec)
+ spec.as_revision_id(self.tree.branch)
+ except errors.InvalidRevisionSpec, e:
+ self.assertEqual(revision_spec, e.spec)
+ self.assertEqual(extra, e.extra)
+ else:
+ self.fail('Expected InvalidRevisionSpec to be raised for'
+ ' %r.as_revision_id' % (revision_spec,))
+
+ def assertAsRevisionId(self, revision_id, revision_spec):
+ """Calling as_revision_id() should return the specified id."""
+ spec = RevisionSpec.from_string(revision_spec)
+ self.assertEqual(revision_id,
+ spec.as_revision_id(self.tree.branch))
+
+ def get_as_tree(self, revision_spec, tree=None):
+ if tree is None:
+ tree = self.tree
+ spec = RevisionSpec.from_string(revision_spec)
+ return spec.as_tree(tree.branch)
+
+
+class RevisionSpecMatchOnTrap(RevisionSpec):
+
+ def _match_on(self, branch, revs):
+ self.last_call = (branch, revs)
+ return super(RevisionSpecMatchOnTrap, self)._match_on(branch, revs)
+
+
+class TestRevisionSpecBase(TestRevisionSpec):
+
+ def test_wants_revision_history(self):
+ # If wants_revision_history = True, then _match_on should get the
+ # branch revision history
+ spec = RevisionSpecMatchOnTrap('foo', _internal=True)
+ spec.wants_revision_history = True
+ self.callDeprecated(['RevisionSpec.wants_revision_history was '
+ 'deprecated in 2.5 (RevisionSpecMatchOnTrap).'],
+ spec.in_history, self.tree.branch)
+
+ self.assertEqual((self.tree.branch, ['r1' ,'r2']),
+ spec.last_call)
+
+ def test_wants_no_revision_history(self):
+ # If wants_revision_history = False, then _match_on should get None for
+ # the branch revision history
+ spec = RevisionSpecMatchOnTrap('foo', _internal=True)
+ spec.in_history(self.tree.branch)
+
+ self.assertEqual((self.tree.branch, None), spec.last_call)
+
+
+
+class TestOddRevisionSpec(TestRevisionSpec):
+ """Test things that aren't normally thought of as revision specs"""
+
+ def test_none(self):
+ self.assertInHistoryIs(None, None, None)
+
+ def test_object(self):
+ self.assertRaises(TypeError, RevisionSpec.from_string, object())
+
+
+class RevisionSpec_bork(RevisionSpec):
+
+ prefix = 'irrelevant:'
+
+ def _match_on(self, branch, revs):
+ if self.spec == "bork":
+ return RevisionInfo.from_revision_id(branch, "r1")
+ else:
+ raise errors.InvalidRevisionSpec(self.spec, branch)
+
+
+class TestRevisionSpec_dwim(TestRevisionSpec):
+
+ # Don't need to test revno's explicitly since TRS_revno already
+ # covers that well for us
+ def test_dwim_spec_revno(self):
+ self.assertInHistoryIs(2, 'r2', '2')
+ self.assertAsRevisionId('alt_r2', '1.1.1')
+
+ def test_dwim_spec_revid(self):
+ self.assertInHistoryIs(2, 'r2', 'r2')
+
+ def test_dwim_spec_tag(self):
+ self.tree.branch.tags.set_tag('footag', 'r1')
+ self.assertAsRevisionId('r1', 'footag')
+ self.tree.branch.tags.delete_tag('footag')
+ self.assertRaises(errors.InvalidRevisionSpec,
+ self.get_in_history, 'footag')
+
+ def test_dwim_spec_tag_that_looks_like_revno(self):
+ # Test that we slip past revno with things that look like revnos,
+ # but aren't. Tags are convenient for testing this since we can
+ # make them look however we want.
+ self.tree.branch.tags.set_tag('3', 'r2')
+ self.assertAsRevisionId('r2', '3')
+ self.build_tree(['tree/b'])
+ self.tree.add(['b'])
+ self.tree.commit('b', rev_id='r3')
+ self.assertAsRevisionId('r3', '3')
+
+ def test_dwim_spec_date(self):
+ self.assertAsRevisionId('r1', 'today')
+
+ def test_dwim_spec_branch(self):
+ self.assertInHistoryIs(None, 'alt_r2', 'tree2')
+
+ def test_dwim_spec_nonexistent(self):
+ self.assertInvalid('somethingrandom', invalid_as_revision_id=False)
+ self.assertInvalid('-1.1', invalid_as_revision_id=False)
+ self.assertInvalid('.1', invalid_as_revision_id=False)
+ self.assertInvalid('1..1', invalid_as_revision_id=False)
+ self.assertInvalid('1.2..1', invalid_as_revision_id=False)
+ self.assertInvalid('1.', invalid_as_revision_id=False)
+
+ def test_append_dwim_revspec(self):
+ original_dwim_revspecs = list(RevisionSpec_dwim._possible_revspecs)
+ def reset_dwim_revspecs():
+ RevisionSpec_dwim._possible_revspecs = original_dwim_revspecs
+ self.addCleanup(reset_dwim_revspecs)
+ RevisionSpec_dwim.append_possible_revspec(RevisionSpec_bork)
+ self.assertAsRevisionId('r1', 'bork')
+
+ def test_append_lazy_dwim_revspec(self):
+ original_dwim_revspecs = list(RevisionSpec_dwim._possible_revspecs)
+ def reset_dwim_revspecs():
+ RevisionSpec_dwim._possible_revspecs = original_dwim_revspecs
+ self.addCleanup(reset_dwim_revspecs)
+ RevisionSpec_dwim.append_possible_lazy_revspec(
+ "bzrlib.tests.test_revisionspec", "RevisionSpec_bork")
+ self.assertAsRevisionId('r1', 'bork')
+
+
+class TestRevisionSpec_revno(TestRevisionSpec):
+
+ def test_positive_int(self):
+ self.assertInHistoryIs(0, 'null:', '0')
+ self.assertInHistoryIs(1, 'r1', '1')
+ self.assertInHistoryIs(2, 'r2', '2')
+ self.assertInvalid('3')
+
+ def test_dotted_decimal(self):
+ self.assertInHistoryIs(None, 'alt_r2', '1.1.1')
+ self.assertInvalid('1.1.123')
+
+ def test_negative_int(self):
+ self.assertInHistoryIs(2, 'r2', '-1')
+ self.assertInHistoryIs(1, 'r1', '-2')
+
+ self.assertInHistoryIs(1, 'r1', '-3')
+ self.assertInHistoryIs(1, 'r1', '-4')
+ self.assertInHistoryIs(1, 'r1', '-100')
+
+ def test_positive(self):
+ self.assertInHistoryIs(0, 'null:', 'revno:0')
+ self.assertInHistoryIs(1, 'r1', 'revno:1')
+ self.assertInHistoryIs(2, 'r2', 'revno:2')
+
+ self.assertInvalid('revno:3')
+
+ def test_negative(self):
+ self.assertInHistoryIs(2, 'r2', 'revno:-1')
+ self.assertInHistoryIs(1, 'r1', 'revno:-2')
+
+ self.assertInHistoryIs(1, 'r1', 'revno:-3')
+ self.assertInHistoryIs(1, 'r1', 'revno:-4')
+
+ def test_invalid_number(self):
+ # Get the right exception text
+ try:
+ int('X')
+ except ValueError, e:
+ pass
+ self.assertInvalid('revno:X', extra='\n' + str(e))
+
+ def test_missing_number_and_branch(self):
+ self.assertInvalid('revno::',
+ extra='\ncannot have an empty revno and no branch')
+
+ def test_invalid_number_with_branch(self):
+ try:
+ int('X')
+ except ValueError, e:
+ pass
+ self.assertInvalid('revno:X:tree2', extra='\n' + str(e))
+
+ def test_non_exact_branch(self):
+ # It seems better to require an exact path to the branch
+ # Branch.open() rather than using Branch.open_containing()
+ spec = RevisionSpec.from_string('revno:2:tree2/a')
+ self.assertRaises(errors.NotBranchError,
+ spec.in_history, self.tree.branch)
+
+ def test_with_branch(self):
+ # Passing a URL overrides the supplied branch path
+ revinfo = self.get_in_history('revno:2:tree2')
+ self.assertNotEqual(self.tree.branch.base, revinfo.branch.base)
+ self.assertEqual(self.tree2.branch.base, revinfo.branch.base)
+ self.assertEqual(2, revinfo.revno)
+ self.assertEqual('alt_r2', revinfo.rev_id)
+
+ def test_int_with_branch(self):
+ revinfo = self.get_in_history('2:tree2')
+ self.assertNotEqual(self.tree.branch.base, revinfo.branch.base)
+ self.assertEqual(self.tree2.branch.base, revinfo.branch.base)
+ self.assertEqual(2, revinfo.revno)
+ self.assertEqual('alt_r2', revinfo.rev_id)
+
+ def test_with_url(self):
+ url = self.get_url() + '/tree2'
+ revinfo = self.get_in_history('revno:2:%s' % (url,))
+ self.assertNotEqual(self.tree.branch.base, revinfo.branch.base)
+ self.assertEqual(self.tree2.branch.base, revinfo.branch.base)
+ self.assertEqual(2, revinfo.revno)
+ self.assertEqual('alt_r2', revinfo.rev_id)
+
+ def test_negative_with_url(self):
+ url = self.get_url() + '/tree2'
+ revinfo = self.get_in_history('revno:-1:%s' % (url,))
+ self.assertNotEqual(self.tree.branch.base, revinfo.branch.base)
+ self.assertEqual(self.tree2.branch.base, revinfo.branch.base)
+ self.assertEqual(2, revinfo.revno)
+ self.assertEqual('alt_r2', revinfo.rev_id)
+
+ def test_different_history_lengths(self):
+ # Make sure we use the revisions and offsets in the supplied branch
+ # not the ones in the original branch.
+ self.tree2.commit('three', rev_id='r3')
+ self.assertInHistoryIs(3, 'r3', 'revno:3:tree2')
+ self.assertInHistoryIs(3, 'r3', 'revno:-1:tree2')
+
+ def test_invalid_branch(self):
+ self.assertRaises(errors.NotBranchError,
+ self.get_in_history, 'revno:-1:tree3')
+
+ def test_invalid_revno_in_branch(self):
+ self.tree.commit('three', rev_id='r3')
+ self.assertInvalid('revno:3:tree2')
+
+ def test_revno_n_path(self):
+ """Old revno:N:path tests"""
+ wta = self.make_branch_and_tree('a')
+ ba = wta.branch
+
+ wta.commit('Commit one', rev_id='a@r-0-1')
+ wta.commit('Commit two', rev_id='a@r-0-2')
+ wta.commit('Commit three', rev_id='a@r-0-3')
+
+ wtb = self.make_branch_and_tree('b')
+ bb = wtb.branch
+
+ wtb.commit('Commit one', rev_id='b@r-0-1')
+ wtb.commit('Commit two', rev_id='b@r-0-2')
+ wtb.commit('Commit three', rev_id='b@r-0-3')
+
+
+ self.assertEqual((1, 'a@r-0-1'),
+ spec_in_history('revno:1:a/', ba))
+ # The argument of in_history should be ignored since it is
+ # redundant with the path in the spec.
+ self.assertEqual((1, 'a@r-0-1'),
+ spec_in_history('revno:1:a/', None))
+ self.assertEqual((1, 'a@r-0-1'),
+ spec_in_history('revno:1:a/', bb))
+ self.assertEqual((2, 'b@r-0-2'),
+ spec_in_history('revno:2:b/', None))
+
+ def test_as_revision_id(self):
+ self.assertAsRevisionId('null:', '0')
+ self.assertAsRevisionId('r1', '1')
+ self.assertAsRevisionId('r2', '2')
+ self.assertAsRevisionId('r1', '-2')
+ self.assertAsRevisionId('r2', '-1')
+ self.assertAsRevisionId('alt_r2', '1.1.1')
+
+ def test_as_tree(self):
+ tree = self.get_as_tree('0')
+ self.assertEquals(_mod_revision.NULL_REVISION, tree.get_revision_id())
+ tree = self.get_as_tree('1')
+ self.assertEquals('r1', tree.get_revision_id())
+ tree = self.get_as_tree('2')
+ self.assertEquals('r2', tree.get_revision_id())
+ tree = self.get_as_tree('-2')
+ self.assertEquals('r1', tree.get_revision_id())
+ tree = self.get_as_tree('-1')
+ self.assertEquals('r2', tree.get_revision_id())
+ tree = self.get_as_tree('1.1.1')
+ self.assertEquals('alt_r2', tree.get_revision_id())
+
+
+class TestRevisionSpec_revid(TestRevisionSpec):
+
+ def test_in_history(self):
+ # We should be able to access revisions that are directly
+ # in the history.
+ self.assertInHistoryIs(1, 'r1', 'revid:r1')
+ self.assertInHistoryIs(2, 'r2', 'revid:r2')
+
+ def test_missing(self):
+ self.assertInvalid('revid:r3', invalid_as_revision_id=False)
+
+ def test_merged(self):
+ """We can reach revisions in the ancestry"""
+ self.assertInHistoryIs(None, 'alt_r2', 'revid:alt_r2')
+
+ def test_not_here(self):
+ self.tree2.commit('alt third', rev_id='alt_r3')
+ # It exists in tree2, but not in tree
+ self.assertInvalid('revid:alt_r3', invalid_as_revision_id=False)
+
+ def test_in_repository(self):
+ """We can get any revision id in the repository"""
+ # XXX: This may change in the future, but for now, it is true
+ self.tree2.commit('alt third', rev_id='alt_r3')
+ self.tree.branch.fetch(self.tree2.branch, 'alt_r3')
+ self.assertInHistoryIs(None, 'alt_r3', 'revid:alt_r3')
+
+ def test_unicode(self):
+ """We correctly convert a unicode ui string to an encoded revid."""
+ revision_id = u'\N{SNOWMAN}'.encode('utf-8')
+ self.tree.commit('unicode', rev_id=revision_id)
+ self.assertInHistoryIs(3, revision_id, u'revid:\N{SNOWMAN}')
+ self.assertInHistoryIs(3, revision_id, 'revid:' + revision_id)
+
+ def test_as_revision_id(self):
+ self.assertAsRevisionId('r1', 'revid:r1')
+ self.assertAsRevisionId('r2', 'revid:r2')
+ self.assertAsRevisionId('alt_r2', 'revid:alt_r2')
+
+
+class TestRevisionSpec_last(TestRevisionSpec):
+
+ def test_positive(self):
+ self.assertInHistoryIs(2, 'r2', 'last:1')
+ self.assertInHistoryIs(1, 'r1', 'last:2')
+ self.assertInHistoryIs(0, 'null:', 'last:3')
+
+ def test_empty(self):
+ self.assertInHistoryIs(2, 'r2', 'last:')
+
+ def test_negative(self):
+ self.assertInvalid('last:-1',
+ extra='\nyou must supply a positive value')
+
+ def test_missing(self):
+ self.assertInvalid('last:4')
+
+ def test_no_history(self):
+ tree = self.make_branch_and_tree('tree3')
+
+ self.assertRaises(errors.NoCommits,
+ spec_in_history, 'last:', tree.branch)
+
+ def test_not_a_number(self):
+ try:
+ int('Y')
+ except ValueError, e:
+ pass
+ self.assertInvalid('last:Y', extra='\n' + str(e))
+
+ def test_as_revision_id(self):
+ self.assertAsRevisionId('r2', 'last:1')
+ self.assertAsRevisionId('r1', 'last:2')
+
+
+class TestRevisionSpec_before(TestRevisionSpec):
+
+ def test_int(self):
+ self.assertInHistoryIs(1, 'r1', 'before:2')
+ self.assertInHistoryIs(1, 'r1', 'before:-1')
+
+ def test_before_one(self):
+ self.assertInHistoryIs(0, 'null:', 'before:1')
+
+ def test_before_none(self):
+ self.assertInvalid('before:0',
+ extra='\ncannot go before the null: revision')
+
+ def test_revid(self):
+ self.assertInHistoryIs(1, 'r1', 'before:revid:r2')
+
+ def test_last(self):
+ self.assertInHistoryIs(1, 'r1', 'before:last:1')
+
+ def test_alt_revid(self):
+ # This will grab the left-most ancestor for alternate histories
+ self.assertInHistoryIs(1, 'r1', 'before:revid:alt_r2')
+
+ def test_alt_no_parents(self):
+ new_tree = self.make_branch_and_tree('new_tree')
+ new_tree.commit('first', rev_id='new_r1')
+ self.tree.branch.fetch(new_tree.branch, 'new_r1')
+ self.assertInHistoryIs(0, 'null:', 'before:revid:new_r1')
+
+ def test_as_revision_id(self):
+ self.assertAsRevisionId('r1', 'before:revid:r2')
+ self.assertAsRevisionId('r1', 'before:2')
+ self.assertAsRevisionId('r1', 'before:1.1.1')
+ self.assertAsRevisionId('r1', 'before:revid:alt_r2')
+
+
+class TestRevisionSpec_tag(TestRevisionSpec):
+
+ def make_branch_and_tree(self, relpath):
+ # override format as the default one may not support tags
+ return TestRevisionSpec.make_branch_and_tree(
+ self, relpath, format='dirstate-tags')
+
+ def test_from_string_tag(self):
+ spec = RevisionSpec.from_string('tag:bzr-0.14')
+ self.assertIsInstance(spec, RevisionSpec_tag)
+ self.assertEqual(spec.spec, 'bzr-0.14')
+
+ def test_lookup_tag(self):
+ self.tree.branch.tags.set_tag('bzr-0.14', 'r1')
+ self.assertInHistoryIs(1, 'r1', 'tag:bzr-0.14')
+ self.tree.branch.tags.set_tag('null_rev', 'null:')
+ self.assertInHistoryIs(0, 'null:', 'tag:null_rev')
+
+ def test_failed_lookup(self):
+ # tags that don't exist give a specific message: arguably we should
+ # just give InvalidRevisionSpec but I think this is more helpful
+ self.assertRaises(errors.NoSuchTag,
+ self.get_in_history,
+ 'tag:some-random-tag')
+
+ def test_as_revision_id(self):
+ self.tree.branch.tags.set_tag('my-tag', 'r2')
+ self.tree.branch.tags.set_tag('null_rev', 'null:')
+ self.assertAsRevisionId('r2', 'tag:my-tag')
+ self.assertAsRevisionId('null:', 'tag:null_rev')
+ self.assertAsRevisionId('r1', 'before:tag:my-tag')
+
+
+class TestRevisionSpec_date(TestRevisionSpec):
+
+ def setUp(self):
+ super(TestRevisionSpec, self).setUp()
+
+ new_tree = self.make_branch_and_tree('new_tree')
+ new_tree.commit('Commit one', rev_id='new_r1',
+ timestamp=time.time() - 60*60*24)
+ new_tree.commit('Commit two', rev_id='new_r2')
+ new_tree.commit('Commit three', rev_id='new_r3')
+
+ self.tree = new_tree
+
+ def test_tomorrow(self):
+ self.assertInvalid('date:tomorrow')
+
+ def test_today(self):
+ self.assertInHistoryIs(2, 'new_r2', 'date:today')
+ self.assertInHistoryIs(1, 'new_r1', 'before:date:today')
+
+ def test_yesterday(self):
+ self.assertInHistoryIs(1, 'new_r1', 'date:yesterday')
+
+ def test_invalid(self):
+ self.assertInvalid('date:foobar', extra='\ninvalid date')
+ # You must have '-' between year/month/day
+ self.assertInvalid('date:20040404', extra='\ninvalid date')
+ # Need 2 digits for each date piece
+ self.assertInvalid('date:2004-4-4', extra='\ninvalid date')
+
+ def test_day(self):
+ now = datetime.datetime.now()
+ self.assertInHistoryIs(2, 'new_r2',
+ 'date:%04d-%02d-%02d' % (now.year, now.month, now.day))
+
+ def test_as_revision_id(self):
+ self.assertAsRevisionId('new_r2', 'date:today')
+
+
+class TestRevisionSpec_ancestor(TestRevisionSpec):
+
+ def test_non_exact_branch(self):
+ # It seems better to require an exact path to the branch
+ # Branch.open() rather than using Branch.open_containing()
+ self.assertRaises(errors.NotBranchError,
+ self.get_in_history, 'ancestor:tree2/a')
+
+ def test_simple(self):
+ # Common ancestor of trees is 'alt_r2'
+ self.assertInHistoryIs(None, 'alt_r2', 'ancestor:tree2')
+
+ # Going the other way, we get a valid revno
+ tmp = self.tree
+ self.tree = self.tree2
+ self.tree2 = tmp
+ self.assertInHistoryIs(2, 'alt_r2', 'ancestor:tree')
+
+ def test_self(self):
+ self.assertInHistoryIs(2, 'r2', 'ancestor:tree')
+
+ def test_unrelated(self):
+ new_tree = self.make_branch_and_tree('new_tree')
+
+ new_tree.commit('Commit one', rev_id='new_r1')
+ new_tree.commit('Commit two', rev_id='new_r2')
+ new_tree.commit('Commit three', rev_id='new_r3')
+
+ # With no common ancestor, we should raise another user error
+ self.assertRaises(errors.NoCommonAncestor,
+ self.get_in_history, 'ancestor:new_tree')
+
+ def test_no_commits(self):
+ new_tree = self.make_branch_and_tree('new_tree')
+ self.assertRaises(errors.NoCommits,
+ spec_in_history, 'ancestor:new_tree',
+ self.tree.branch)
+
+ self.assertRaises(errors.NoCommits,
+ spec_in_history, 'ancestor:tree',
+ new_tree.branch)
+
+ def test_as_revision_id(self):
+ self.assertAsRevisionId('alt_r2', 'ancestor:tree2')
+
+ def test_default(self):
+ # We don't have a parent to default to
+ self.assertRaises(errors.NotBranchError, self.get_in_history,
+ 'ancestor:')
+
+ # Create a branch with a parent to default to
+ tree3 = self.tree.bzrdir.sprout('tree3').open_workingtree()
+ tree3.commit('foo', rev_id='r3')
+ self.tree = tree3
+ self.assertInHistoryIs(2, 'r2', 'ancestor:')
+
+
+class TestRevisionSpec_branch(TestRevisionSpec):
+
+ def test_non_exact_branch(self):
+ # It seems better to require an exact path to the branch
+ # Branch.open() rather than using Branch.open_containing()
+ self.assertRaises(errors.NotBranchError,
+ self.get_in_history, 'branch:tree2/a')
+
+ def test_simple(self):
+ self.assertInHistoryIs(None, 'alt_r2', 'branch:tree2')
+
+ def test_self(self):
+ self.assertInHistoryIs(2, 'r2', 'branch:tree')
+
+ def test_unrelated(self):
+ new_tree = self.make_branch_and_tree('new_tree')
+
+ new_tree.commit('Commit one', rev_id='new_r1')
+ new_tree.commit('Commit two', rev_id='new_r2')
+ new_tree.commit('Commit three', rev_id='new_r3')
+
+ self.assertInHistoryIs(None, 'new_r3', 'branch:new_tree')
+
+ # XXX: Right now, we use fetch() to make sure the remote revisions
+ # have been pulled into the local branch. We may change that
+ # behavior in the future.
+ self.assertTrue(self.tree.branch.repository.has_revision('new_r3'))
+
+ def test_no_commits(self):
+ new_tree = self.make_branch_and_tree('new_tree')
+ self.assertRaises(errors.NoCommits,
+ self.get_in_history, 'branch:new_tree')
+ self.assertRaises(errors.NoCommits,
+ self.get_as_tree, 'branch:new_tree')
+
+ def test_as_revision_id(self):
+ self.assertAsRevisionId('alt_r2', 'branch:tree2')
+
+ def test_as_tree(self):
+ tree = self.get_as_tree('branch:tree', self.tree2)
+ self.assertEquals('r2', tree.get_revision_id())
+ self.assertFalse(self.tree2.branch.repository.has_revision('r2'))
+
+
+class TestRevisionSpec_submit(TestRevisionSpec):
+
+ def test_submit_branch(self):
+ # Common ancestor of trees is 'alt_r2'
+ self.assertRaises(errors.NoSubmitBranch, self.get_in_history,
+ 'submit:')
+ self.tree.branch.set_parent('../tree2')
+ self.assertInHistoryIs(None, 'alt_r2', 'submit:')
+ self.tree.branch.set_parent('bogus')
+ self.assertRaises(errors.NotBranchError, self.get_in_history,
+ 'submit:')
+ # submit branch overrides parent branch
+ self.tree.branch.set_submit_branch('tree2')
+ self.assertInHistoryIs(None, 'alt_r2', 'submit:')
+
+ def test_as_revision_id(self):
+ self.tree.branch.set_submit_branch('tree2')
+ self.assertAsRevisionId('alt_r2', 'branch:tree2')
+
+
+class TestRevisionSpec_mainline(TestRevisionSpec):
+
+ def test_as_revision_id(self):
+ self.assertAsRevisionId('r1', 'mainline:1')
+ self.assertAsRevisionId('r2', 'mainline:1.1.1')
+ self.assertAsRevisionId('r2', 'mainline:revid:alt_r2')
+ spec = RevisionSpec.from_string('mainline:revid:alt_r22')
+ e = self.assertRaises(errors.InvalidRevisionSpec,
+ spec.as_revision_id, self.tree.branch)
+ self.assertContainsRe(str(e),
+ "Requested revision: 'mainline:revid:alt_r22' does not exist in"
+ " branch: ")
+
+ def test_in_history(self):
+ self.assertInHistoryIs(2, 'r2', 'mainline:revid:alt_r2')
+
+
+class TestRevisionSpec_annotate(TestRevisionSpec):
+
+ def setUp(self):
+ TestRevisionSpec.setUp(self)
+ self.tree = self.make_branch_and_tree('annotate-tree')
+ self.build_tree_contents([('annotate-tree/file1', '1\n')])
+ self.tree.add('file1')
+ self.tree.commit('r1', rev_id='r1')
+ self.build_tree_contents([('annotate-tree/file1', '2\n1\n')])
+ self.tree.commit('r2', rev_id='r2')
+ self.build_tree_contents([('annotate-tree/file1', '2\n1\n3\n')])
+
+ def test_as_revision_id_r1(self):
+ self.assertAsRevisionId('r1', 'annotate:annotate-tree/file1:2')
+
+ def test_as_revision_id_r2(self):
+ self.assertAsRevisionId('r2', 'annotate:annotate-tree/file1:1')
+
+ def test_as_revision_id_uncommitted(self):
+ spec = RevisionSpec.from_string('annotate:annotate-tree/file1:3')
+ e = self.assertRaises(errors.InvalidRevisionSpec,
+ spec.as_revision_id, self.tree.branch)
+ self.assertContainsRe(str(e),
+ r"Requested revision: \'annotate:annotate-tree/file1:3\' does not"
+ " exist in branch: .*\nLine 3 has not been committed.")
+
+ def test_non_existent_line(self):
+ spec = RevisionSpec.from_string('annotate:annotate-tree/file1:4')
+ e = self.assertRaises(errors.InvalidRevisionSpec,
+ spec.as_revision_id, self.tree.branch)
+ self.assertContainsRe(str(e),
+ r"Requested revision: \'annotate:annotate-tree/file1:4\' does not"
+ " exist in branch: .*\nNo such line: 4")
+
+ def test_invalid_line(self):
+ spec = RevisionSpec.from_string('annotate:annotate-tree/file1:q')
+ e = self.assertRaises(errors.InvalidRevisionSpec,
+ spec.as_revision_id, self.tree.branch)
+ self.assertContainsRe(str(e),
+ r"Requested revision: \'annotate:annotate-tree/file1:q\' does not"
+ " exist in branch: .*\nNo such line: q")
+
+ def test_no_such_file(self):
+ spec = RevisionSpec.from_string('annotate:annotate-tree/file2:1')
+ e = self.assertRaises(errors.InvalidRevisionSpec,
+ spec.as_revision_id, self.tree.branch)
+ self.assertContainsRe(str(e),
+ r"Requested revision: \'annotate:annotate-tree/file2:1\' does not"
+ " exist in branch: .*\nFile 'file2' is not versioned")
+
+ def test_no_such_file_with_colon(self):
+ spec = RevisionSpec.from_string('annotate:annotate-tree/fi:le2:1')
+ e = self.assertRaises(errors.InvalidRevisionSpec,
+ spec.as_revision_id, self.tree.branch)
+ self.assertContainsRe(str(e),
+ r"Requested revision: \'annotate:annotate-tree/fi:le2:1\' does not"
+ " exist in branch: .*\nFile 'fi:le2' is not versioned")
diff --git a/bzrlib/tests/test_revisiontree.py b/bzrlib/tests/test_revisiontree.py
new file mode 100644
index 0000000..b24b6d3
--- /dev/null
+++ b/bzrlib/tests/test_revisiontree.py
@@ -0,0 +1,80 @@
+# Copyright (C) 2006, 2008-2011 Canonical Ltd
+# Authors: Robert Collins <robert.collins@canonical.com>
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""Tests for the RevisionTree class."""
+
+from bzrlib import (
+ errors,
+ revision,
+ )
+from bzrlib.tests import TestCaseWithTransport
+
+
+class TestTreeWithCommits(TestCaseWithTransport):
+
+ def setUp(self):
+ super(TestTreeWithCommits, self).setUp()
+ self.t = self.make_branch_and_tree('.')
+ self.rev_id = self.t.commit('foo', allow_pointless=True)
+ self.rev_tree = self.t.branch.repository.revision_tree(self.rev_id)
+
+ def test_empty_no_unknowns(self):
+ self.assertEqual([], list(self.rev_tree.unknowns()))
+
+ def test_no_conflicts(self):
+ self.assertEqual([], list(self.rev_tree.conflicts()))
+
+ def test_parents(self):
+ """RevisionTree.parent_ids should match the revision graph."""
+ # XXX: TODO: Should this be a repository_implementation test ?
+ # at the end of the graph, we get []
+ self.assertEqual([], self.rev_tree.get_parent_ids())
+ # do a commit to look further up
+ revid_2 = self.t.commit('bar', allow_pointless=True)
+ self.assertEqual(
+ [self.rev_id],
+ self.t.branch.repository.revision_tree(revid_2).get_parent_ids())
+ # TODO commit a merge and check it is reported correctly.
+
+ # the parents for a revision_tree(NULL_REVISION) are []:
+ self.assertEqual([],
+ self.t.branch.repository.revision_tree(
+ revision.NULL_REVISION).get_parent_ids())
+
+ def test_empty_no_root(self):
+ null_tree = self.t.branch.repository.revision_tree(
+ revision.NULL_REVISION)
+ self.assertIs(None, null_tree.get_root_id())
+
+ def test_get_file_revision_root(self):
+ self.assertEquals(self.rev_id,
+ self.rev_tree.get_file_revision(self.rev_tree.get_root_id()))
+
+ def test_get_file_revision(self):
+ self.build_tree_contents([('a', 'initial')])
+ self.t.add(['a'])
+ revid1 = self.t.commit('add a')
+ revid2 = self.t.commit('another change', allow_pointless=True)
+ tree = self.t.branch.repository.revision_tree(revid2)
+ self.assertEquals(revid1,
+ tree.get_file_revision(tree.path2id('a')))
+
+ def test_get_file_mtime_ghost(self):
+ file_id = iter(self.rev_tree.all_file_ids()).next()
+ self.rev_tree.root_inventory[file_id].revision = 'ghostrev'
+ self.assertRaises(errors.FileTimestampUnavailable,
+ self.rev_tree.get_file_mtime, file_id)
diff --git a/bzrlib/tests/test_rio.py b/bzrlib/tests/test_rio.py
new file mode 100644
index 0000000..98590aa
--- /dev/null
+++ b/bzrlib/tests/test_rio.py
@@ -0,0 +1,402 @@
+# Copyright (C) 2005, 2006, 2007, 2009, 2010, 2011 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""Tests for rio serialization
+
+A simple, reproducible structured IO format.
+
+rio itself works in Unicode strings. It is typically encoded to UTF-8,
+but this depends on the transport.
+"""
+
+import re
+from tempfile import TemporaryFile
+
+from bzrlib import (
+ rio,
+ )
+from bzrlib.tests import TestCase
+from bzrlib.rio import (
+ RioReader,
+ Stanza,
+ read_stanza,
+ read_stanzas,
+ rio_file,
+ )
+
+
+class TestRio(TestCase):
+
+ def test_stanza(self):
+ """Construct rio stanza in memory"""
+ s = Stanza(number='42', name="fred")
+ self.assertTrue('number' in s)
+ self.assertFalse('color' in s)
+ self.assertFalse('42' in s)
+ self.assertEquals(list(s.iter_pairs()),
+ [('name', 'fred'), ('number', '42')])
+ self.assertEquals(s.get('number'), '42')
+ self.assertEquals(s.get('name'), 'fred')
+
+ def test_value_checks(self):
+ """rio checks types on construction"""
+ # these aren't enforced at construction time
+ ## self.assertRaises(ValueError,
+ ## Stanza, complex=42 + 3j)
+ ## self.assertRaises(ValueError,
+ ## Stanza, several=range(10))
+
+ def test_empty_value(self):
+ """Serialize stanza with empty field"""
+ s = Stanza(empty='')
+ self.assertEqualDiff(s.to_string(),
+ "empty: \n")
+
+ def test_to_lines(self):
+ """Write simple rio stanza to string"""
+ s = Stanza(number='42', name='fred')
+ self.assertEquals(list(s.to_lines()),
+ ['name: fred\n',
+ 'number: 42\n'])
+
+ def test_as_dict(self):
+ """Convert rio Stanza to dictionary"""
+ s = Stanza(number='42', name='fred')
+ sd = s.as_dict()
+ self.assertEquals(sd, dict(number='42', name='fred'))
+
+ def test_to_file(self):
+ """Write rio to file"""
+ tmpf = TemporaryFile()
+ s = Stanza(a_thing='something with "quotes like \\"this\\""', number='42', name='fred')
+ s.write(tmpf)
+ tmpf.seek(0)
+ self.assertEqualDiff(tmpf.read(), r'''
+a_thing: something with "quotes like \"this\""
+name: fred
+number: 42
+'''[1:])
+
+ def test_multiline_string(self):
+ tmpf = TemporaryFile()
+ s = Stanza(motto="war is peace\nfreedom is slavery\nignorance is strength")
+ s.write(tmpf)
+ tmpf.seek(0)
+ self.assertEqualDiff(tmpf.read(), '''\
+motto: war is peace
+\tfreedom is slavery
+\tignorance is strength
+''')
+ tmpf.seek(0)
+ s2 = read_stanza(tmpf)
+ self.assertEquals(s, s2)
+
+ def test_read_stanza(self):
+ """Load stanza from string"""
+ lines = """\
+revision: mbp@sourcefrog.net-123-abc
+timestamp: 1130653962
+timezone: 36000
+committer: Martin Pool <mbp@test.sourcefrog.net>
+""".splitlines(True)
+ s = read_stanza(lines)
+ self.assertTrue('revision' in s)
+ self.assertEqualDiff(s.get('revision'), 'mbp@sourcefrog.net-123-abc')
+ self.assertEquals(list(s.iter_pairs()),
+ [('revision', 'mbp@sourcefrog.net-123-abc'),
+ ('timestamp', '1130653962'),
+ ('timezone', '36000'),
+ ('committer', "Martin Pool <mbp@test.sourcefrog.net>")])
+ self.assertEquals(len(s), 4)
+
+ def test_repeated_field(self):
+ """Repeated field in rio"""
+ s = Stanza()
+ for k, v in [('a', '10'), ('b', '20'), ('a', '100'), ('b', '200'),
+ ('a', '1000'), ('b', '2000')]:
+ s.add(k, v)
+ s2 = read_stanza(s.to_lines())
+ self.assertEquals(s, s2)
+ self.assertEquals(s.get_all('a'), map(str, [10, 100, 1000]))
+ self.assertEquals(s.get_all('b'), map(str, [20, 200, 2000]))
+
+ def test_backslash(self):
+ s = Stanza(q='\\')
+ t = s.to_string()
+ self.assertEqualDiff(t, 'q: \\\n')
+ s2 = read_stanza(s.to_lines())
+ self.assertEquals(s, s2)
+
+ def test_blank_line(self):
+ s = Stanza(none='', one='\n', two='\n\n')
+ self.assertEqualDiff(s.to_string(), """\
+none:\x20
+one:\x20
+\t
+two:\x20
+\t
+\t
+""")
+ s2 = read_stanza(s.to_lines())
+ self.assertEquals(s, s2)
+
+ def test_whitespace_value(self):
+ s = Stanza(space=' ', tabs='\t\t\t', combo='\n\t\t\n')
+ self.assertEqualDiff(s.to_string(), """\
+combo:\x20
+\t\t\t
+\t
+space:\x20\x20
+tabs: \t\t\t
+""")
+ s2 = read_stanza(s.to_lines())
+ self.assertEquals(s, s2)
+ self.rio_file_stanzas([s])
+
+ def test_quoted(self):
+ """rio quoted string cases"""
+ s = Stanza(q1='"hello"',
+ q2=' "for',
+ q3='\n\n"for"\n',
+ q4='for\n"\nfor',
+ q5='\n',
+ q6='"',
+ q7='""',
+ q8='\\',
+ q9='\\"\\"',
+ )
+ s2 = read_stanza(s.to_lines())
+ self.assertEquals(s, s2)
+ # apparent bug in read_stanza
+ # s3 = read_stanza(self.stanzas_to_str([s]))
+ # self.assertEquals(s, s3)
+
+ def test_read_empty(self):
+ """Detect end of rio file"""
+ s = read_stanza([])
+ self.assertEqual(s, None)
+ self.assertTrue(s is None)
+
+ def test_read_nul_byte(self):
+ """File consisting of a nul byte causes an error."""
+ self.assertRaises(ValueError, read_stanza, ['\0'])
+
+ def test_read_nul_bytes(self):
+ """File consisting of many nul bytes causes an error."""
+ self.assertRaises(ValueError, read_stanza, ['\0' * 100])
+
+ def test_read_iter(self):
+ """Read several stanzas from file"""
+ tmpf = TemporaryFile()
+ tmpf.write("""\
+version_header: 1
+
+name: foo
+val: 123
+
+name: bar
+val: 129319
+""")
+ tmpf.seek(0)
+ reader = read_stanzas(tmpf)
+ read_iter = iter(reader)
+ stuff = list(reader)
+ self.assertEqual(stuff,
+ [ Stanza(version_header='1'),
+ Stanza(name="foo", val='123'),
+ Stanza(name="bar", val='129319'), ])
+
+ def test_read_several(self):
+ """Read several stanzas from file"""
+ tmpf = TemporaryFile()
+ tmpf.write("""\
+version_header: 1
+
+name: foo
+val: 123
+
+name: quoted
+address: "Willowglen"
+\t 42 Wallaby Way
+\t Sydney
+
+name: bar
+val: 129319
+""")
+ tmpf.seek(0)
+ s = read_stanza(tmpf)
+ self.assertEquals(s, Stanza(version_header='1'))
+ s = read_stanza(tmpf)
+ self.assertEquals(s, Stanza(name="foo", val='123'))
+ s = read_stanza(tmpf)
+ self.assertEqualDiff(s.get('name'), 'quoted')
+ self.assertEqualDiff(s.get('address'), ' "Willowglen"\n 42 Wallaby Way\n Sydney')
+ s = read_stanza(tmpf)
+ self.assertEquals(s, Stanza(name="bar", val='129319'))
+ s = read_stanza(tmpf)
+ self.assertEquals(s, None)
+ self.check_rio_file(tmpf)
+
+ def check_rio_file(self, real_file):
+ real_file.seek(0)
+ read_write = rio_file(RioReader(real_file)).read()
+ real_file.seek(0)
+ self.assertEquals(read_write, real_file.read())
+
+ @staticmethod
+ def stanzas_to_str(stanzas):
+ return rio_file(stanzas).read()
+
+ def rio_file_stanzas(self, stanzas):
+ new_stanzas = list(RioReader(rio_file(stanzas)))
+ self.assertEqual(new_stanzas, stanzas)
+
+ def test_tricky_quoted(self):
+ tmpf = TemporaryFile()
+ tmpf.write('''\
+s: "one"
+
+s:\x20
+\t"one"
+\t
+
+s: "
+
+s: ""
+
+s: """
+
+s:\x20
+\t
+
+s: \\
+
+s:\x20
+\t\\
+\t\\\\
+\t
+
+s: word\\
+
+s: quote"
+
+s: backslashes\\\\\\
+
+s: both\\\"
+
+''')
+ tmpf.seek(0)
+ expected_vals = ['"one"',
+ '\n"one"\n',
+ '"',
+ '""',
+ '"""',
+ '\n',
+ '\\',
+ '\n\\\n\\\\\n',
+ 'word\\',
+ 'quote\"',
+ 'backslashes\\\\\\',
+ 'both\\\"',
+ ]
+ for expected in expected_vals:
+ stanza = read_stanza(tmpf)
+ self.rio_file_stanzas([stanza])
+ self.assertEquals(len(stanza), 1)
+ self.assertEqualDiff(stanza.get('s'), expected)
+
+ def test_write_empty_stanza(self):
+ """Write empty stanza"""
+ l = list(Stanza().to_lines())
+ self.assertEquals(l, [])
+
+ def test_rio_raises_type_error(self):
+ """TypeError on adding invalid type to Stanza"""
+ s = Stanza()
+ self.assertRaises(TypeError, s.add, 'foo', {})
+
+ def test_rio_raises_type_error_key(self):
+ """TypeError on adding invalid type to Stanza"""
+ s = Stanza()
+ self.assertRaises(TypeError, s.add, 10, {})
+
+ def test_rio_unicode(self):
+ uni_data = u'\N{KATAKANA LETTER O}'
+ s = Stanza(foo=uni_data)
+ self.assertEquals(s.get('foo'), uni_data)
+ raw_lines = s.to_lines()
+ self.assertEquals(raw_lines,
+ ['foo: ' + uni_data.encode('utf-8') + '\n'])
+ new_s = read_stanza(raw_lines)
+ self.assertEquals(new_s.get('foo'), uni_data)
+
+ def test_rio_to_unicode(self):
+ uni_data = u'\N{KATAKANA LETTER O}'
+ s = Stanza(foo=uni_data)
+ unicode_str = s.to_unicode()
+ self.assertEqual(u'foo: %s\n' % (uni_data,), unicode_str)
+ new_s = rio.read_stanza_unicode(unicode_str.splitlines(True))
+ self.assertEqual(uni_data, new_s.get('foo'))
+
+ def test_nested_rio_unicode(self):
+ uni_data = u'\N{KATAKANA LETTER O}'
+ s = Stanza(foo=uni_data)
+ parent_stanza = Stanza(child=s.to_unicode())
+ raw_lines = parent_stanza.to_lines()
+ self.assertEqual(['child: foo: ' + uni_data.encode('utf-8') + '\n',
+ '\t\n',
+ ], raw_lines)
+ new_parent = read_stanza(raw_lines)
+ child_text = new_parent.get('child')
+ self.assertEqual(u'foo: %s\n' % uni_data, child_text)
+ new_child = rio.read_stanza_unicode(child_text.splitlines(True))
+ self.assertEqual(uni_data, new_child.get('foo'))
+
+ def mail_munge(self, lines, dos_nl=True):
+ new_lines = []
+ for line in lines:
+ line = re.sub(' *\n', '\n', line)
+ if dos_nl:
+ line = re.sub('([^\r])\n', '\\1\r\n', line)
+ new_lines.append(line)
+ return new_lines
+
+ def test_patch_rio(self):
+ stanza = Stanza(data='#\n\r\\r ', space=' ' * 255, hash='#' * 255)
+ lines = rio.to_patch_lines(stanza)
+ for line in lines:
+ self.assertContainsRe(line, '^# ')
+ self.assertTrue(72 >= len(line))
+ for line in rio.to_patch_lines(stanza, max_width=12):
+ self.assertTrue(12 >= len(line))
+ new_stanza = rio.read_patch_stanza(self.mail_munge(lines,
+ dos_nl=False))
+ lines = self.mail_munge(lines)
+ new_stanza = rio.read_patch_stanza(lines)
+ self.assertEqual('#\n\r\\r ', new_stanza.get('data'))
+ self.assertEqual(' '* 255, new_stanza.get('space'))
+ self.assertEqual('#'* 255, new_stanza.get('hash'))
+
+ def test_patch_rio_linebreaks(self):
+ stanza = Stanza(breaktest='linebreak -/'*30)
+ self.assertContainsRe(rio.to_patch_lines(stanza, 71)[0],
+ 'linebreak\\\\\n')
+ stanza = Stanza(breaktest='linebreak-/'*30)
+ self.assertContainsRe(rio.to_patch_lines(stanza, 70)[0],
+ 'linebreak-\\\\\n')
+ stanza = Stanza(breaktest='linebreak/'*30)
+ self.assertContainsRe(rio.to_patch_lines(stanza, 70)[0],
+ 'linebreak\\\\\n')
diff --git a/bzrlib/tests/test_rules.py b/bzrlib/tests/test_rules.py
new file mode 100644
index 0000000..b86ddf1
--- /dev/null
+++ b/bzrlib/tests/test_rules.py
@@ -0,0 +1,141 @@
+# Copyright (C) 2008-2011 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""Tests for finding, parsing and searching rule-based preferences."""
+
+import sys
+
+from bzrlib import (
+ errors,
+ rules,
+ tests,
+ )
+
+
+class TestIniBasedRulesSearcher(tests.TestCase):
+
+ def make_searcher(self, text):
+ """Make a _RulesSearcher from a string"""
+ if text is None:
+ lines = None
+ else:
+ lines = text.splitlines()
+ return rules._IniBasedRulesSearcher(lines)
+
+ def test_unknown_namespace(self):
+ self.assertRaises(errors.UnknownRules, rules._IniBasedRulesSearcher,
+ ["[junk]", "foo=bar"])
+
+ def test_get_items_file_missing(self):
+ rs = self.make_searcher(None)
+ self.assertEquals((), rs.get_items('a.txt'))
+ self.assertEquals((), rs.get_selected_items('a.txt', ['foo']))
+ self.assertEquals(None, rs.get_single_value('a.txt', 'foo'))
+
+ def test_get_items_file_empty(self):
+ rs = self.make_searcher("")
+ self.assertEquals((), rs.get_items('a.txt'))
+ self.assertEquals((), rs.get_selected_items('a.txt', ['foo']))
+ self.assertEquals(None, rs.get_single_value('a.txt', 'foo'))
+
+ def test_get_items_from_extension_match(self):
+ rs = self.make_searcher("[name *.txt]\nfoo=bar\na=True\n")
+ self.assertEquals((), rs.get_items('a.py'))
+ self.assertEquals((('foo', 'bar'), ('a', 'True')),
+ rs.get_items('a.txt'))
+ self.assertEquals((('foo', 'bar'), ('a', 'True')),
+ rs.get_items('dir/a.txt'))
+ self.assertEquals((('foo', 'bar'),),
+ rs.get_selected_items('a.txt', ['foo']))
+ self.assertEquals('bar', rs.get_single_value('a.txt', 'foo'))
+
+ def test_get_items_from_multiple_glob_match(self):
+ rs = self.make_searcher(
+ "[name *.txt *.py 'x x' \"y y\"]\nfoo=bar\na=True\n")
+ self.assertEquals((), rs.get_items('NEWS'))
+ self.assertEquals((('foo', 'bar'), ('a', 'True')),
+ rs.get_items('a.py'))
+ self.assertEquals((('foo', 'bar'), ('a', 'True')),
+ rs.get_items('a.txt'))
+ self.assertEquals((('foo', 'bar'), ('a', 'True')),
+ rs.get_items('x x'))
+ self.assertEquals((('foo', 'bar'), ('a', 'True')),
+ rs.get_items('y y'))
+ self.assertEquals('bar', rs.get_single_value('a.txt', 'foo'))
+
+ def test_get_items_pathname_match(self):
+ rs = self.make_searcher("[name ./a.txt]\nfoo=baz\n")
+ self.assertEquals((('foo', 'baz'),),
+ rs.get_items('a.txt'))
+ self.assertEquals('baz', rs.get_single_value('a.txt', 'foo'))
+ self.assertEquals((), rs.get_items('dir/a.txt'))
+ self.assertEquals(None, rs.get_single_value('dir/a.txt', 'foo'))
+
+ def test_get_items_match_first(self):
+ rs = self.make_searcher(
+ "[name ./a.txt]\nfoo=baz\n"
+ "[name *.txt]\nfoo=bar\na=True\n")
+ self.assertEquals((('foo', 'baz'),),
+ rs.get_items('a.txt'))
+ self.assertEquals('baz', rs.get_single_value('a.txt', 'foo'))
+ self.assertEquals((('foo', 'bar'), ('a', 'True')),
+ rs.get_items('dir/a.txt'))
+ self.assertEquals('bar', rs.get_single_value('dir/a.txt', 'foo'))
+
+
+class TestStackedRulesSearcher(tests.TestCase):
+
+ def make_searcher(self, text1=None, text2=None):
+ """Make a _StackedRulesSearcher with 0, 1 or 2 items"""
+ searchers = []
+ if text1 is not None:
+ searchers.append(rules._IniBasedRulesSearcher(
+ text1.splitlines()))
+ if text2 is not None:
+ searchers.append(rules._IniBasedRulesSearcher(
+ text2.splitlines()))
+ return rules._StackedRulesSearcher(searchers)
+
+ def test_stack_searching(self):
+ rs = self.make_searcher(
+ "[name ./a.txt]\nfoo=baz\n",
+ "[name *.txt]\nfoo=bar\na=True\n")
+ self.assertEquals((('foo', 'baz'),),
+ rs.get_items('a.txt'))
+ self.assertEquals('baz', rs.get_single_value('a.txt', 'foo'))
+ self.assertEquals(None, rs.get_single_value('a.txt', 'a'))
+ self.assertEquals((('foo', 'bar'), ('a', 'True')),
+ rs.get_items('dir/a.txt'))
+ self.assertEquals('bar', rs.get_single_value('dir/a.txt', 'foo'))
+ self.assertEquals('True', rs.get_single_value('dir/a.txt', 'a'))
+
+
+class TestRulesPath(tests.TestCase):
+
+ def setUp(self):
+ super(TestRulesPath, self).setUp()
+ self.overrideEnv('HOME', '/home/bogus')
+ if sys.platform == 'win32':
+ self.overrideEnv(
+ 'BZR_HOME', r'C:\Documents and Settings\bogus\Application Data')
+ self.bzr_home = \
+ 'C:/Documents and Settings/bogus/Application Data/bazaar/2.0'
+ else:
+ self.bzr_home = '/home/bogus/.bazaar'
+
+ def test_rules_filename(self):
+ self.assertEqual(rules.rules_filename(),
+ self.bzr_home + '/rules')
diff --git a/bzrlib/tests/test_sampler.py b/bzrlib/tests/test_sampler.py
new file mode 100644
index 0000000..a6f83d7
--- /dev/null
+++ b/bzrlib/tests/test_sampler.py
@@ -0,0 +1,39 @@
+# Copyright (C) 2006 Canonical Ltd
+# Authors: Robert Collins <robert.collins@canonical.com>
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""Document what this test file is expecting to test here.
+
+If you need more than one line, make the first line a good sentence on its
+own and add more explanation here, like this.
+
+Be sure to register your new test script in bzrlib/tests/__init__.py -
+search for sampler in there.
+"""
+
+# import system imports here
+import os
+import sys
+
+#import bzrlib specific imports here
+from bzrlib.tests import TestCaseInTempDir
+
+
+# Now we need a test script:
+class DemoTest(TestCaseInTempDir):
+
+ def test_nothing(self):
+ self.assertEqual(1,1)
diff --git a/bzrlib/tests/test_scenarios.py b/bzrlib/tests/test_scenarios.py
new file mode 100644
index 0000000..4aac9bc
--- /dev/null
+++ b/bzrlib/tests/test_scenarios.py
@@ -0,0 +1,110 @@
+# Copyright (C) 2010 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+
+"""Tests for generating multiple tests for scenarios."""
+
+from bzrlib.tests import (
+ TestCase,
+ TestLoader,
+ iter_suite_tests,
+ multiply_tests,
+ )
+
+from bzrlib.tests.scenarios import (
+ load_tests_apply_scenarios,
+ multiply_scenarios,
+ multiply_tests_by_their_scenarios,
+ )
+
+
+# There aren't any actually parameterized tests here, but this exists as a
+# demonstration; so that you can interactively observe them being multiplied;
+# and so that we check everything hooks up properly.
+load_tests = load_tests_apply_scenarios
+
+
+def vary_by_color():
+ """Very simple static variation example"""
+ for color in ['red', 'green', 'blue']:
+ yield (color, {'color': color})
+
+
+def vary_named_attribute(attr_name):
+ """More sophisticated: vary a named parameter"""
+ yield ('a', {attr_name: 'a'})
+ yield ('b', {attr_name: 'b'})
+
+
+def get_generated_test_attributes(suite, attr_name):
+ """Return the `attr_name` attribute from all tests in the suite"""
+ return sorted([
+ getattr(t, attr_name) for t in iter_suite_tests(suite)])
+
+
+class TestTestScenarios(TestCase):
+
+ def test_multiply_tests(self):
+ loader = TestLoader()
+ suite = loader.suiteClass()
+ multiply_tests(
+ self,
+ vary_by_color(),
+ suite)
+ self.assertEquals(
+ ['blue', 'green', 'red'],
+ get_generated_test_attributes(suite, 'color'))
+
+ def test_multiply_scenarios_from_generators(self):
+ """It's safe to multiply scenarios that come from generators"""
+ s = multiply_scenarios(
+ vary_named_attribute('one'),
+ vary_named_attribute('two'),
+ )
+ self.assertEquals(
+ 2*2,
+ len(s),
+ s)
+
+ def test_multiply_tests_by_their_scenarios(self):
+ loader = TestLoader()
+ suite = loader.suiteClass()
+ test_instance = PretendVaryingTest('test_nothing')
+ multiply_tests_by_their_scenarios(
+ test_instance,
+ suite)
+ self.assertEquals(
+ ['a', 'a', 'b', 'b'],
+ get_generated_test_attributes(suite, 'value'))
+
+ def test_multiply_tests_no_scenarios(self):
+ """Tests with no scenarios attribute aren't multiplied"""
+ suite = TestLoader().suiteClass()
+ multiply_tests_by_their_scenarios(self,
+ suite)
+ self.assertLength(1, list(iter_suite_tests(suite)))
+
+
+class PretendVaryingTest(TestCase):
+
+ scenarios = multiply_scenarios(
+ vary_named_attribute('value'),
+ vary_named_attribute('other'),
+ )
+
+ def test_nothing(self):
+ """This test exists just so it can be multiplied"""
+ pass
diff --git a/bzrlib/tests/test_script.py b/bzrlib/tests/test_script.py
new file mode 100644
index 0000000..11a42fb
--- /dev/null
+++ b/bzrlib/tests/test_script.py
@@ -0,0 +1,639 @@
+# Copyright (C) 2009, 2010 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+
+from bzrlib import (
+ commands,
+ osutils,
+ tests,
+ trace,
+ ui,
+ )
+from bzrlib.tests import script
+
+
+class TestSyntax(tests.TestCase):
+
+ def test_comment_is_ignored(self):
+ self.assertEquals([], script._script_to_commands('#comment\n'))
+
+ def test_comment_multiple_lines(self):
+ self.assertEquals([
+ (['bar'], None, None, None),
+ ],
+ script._script_to_commands("""
+ # this comment is ignored
+ # so is this
+ # no we run bar
+ $ bar
+ """))
+
+ def test_trim_blank_lines(self):
+ """Blank lines are respected, but trimmed at the start and end.
+
+ Python triple-quoted syntax is going to give stubby/empty blank lines
+ right at the start and the end. These are cut off so that callers don't
+ need special syntax to avoid them.
+
+ However we do want to be able to match commands that emit blank lines.
+ """
+ self.assertEquals([
+ (['bar'], None, '\n', None),
+ ],
+ script._script_to_commands("""
+ $bar
+
+ """))
+
+ def test_simple_command(self):
+ self.assertEquals([(['cd', 'trunk'], None, None, None)],
+ script._script_to_commands('$ cd trunk'))
+
+ def test_command_with_single_quoted_param(self):
+ story = """$ bzr commit -m 'two words'"""
+ self.assertEquals([(['bzr', 'commit', '-m', "'two words'"],
+ None, None, None)],
+ script._script_to_commands(story))
+
+ def test_command_with_double_quoted_param(self):
+ story = """$ bzr commit -m "two words" """
+ self.assertEquals([(['bzr', 'commit', '-m', '"two words"'],
+ None, None, None)],
+ script._script_to_commands(story))
+
+ def test_command_with_input(self):
+ self.assertEquals(
+ [(['cat', '>file'], 'content\n', None, None)],
+ script._script_to_commands('$ cat >file\n<content\n'))
+
+ def test_indented(self):
+ # scripts are commonly given indented within the test source code, and
+ # common indentation is stripped off
+ story = """
+ $ bzr add
+ adding file
+ adding file2
+ """
+ self.assertEquals([(['bzr', 'add'], None,
+ 'adding file\nadding file2\n', None)],
+ script._script_to_commands(story))
+
+ def test_command_with_output(self):
+ story = """
+$ bzr add
+adding file
+adding file2
+"""
+ self.assertEquals([(['bzr', 'add'], None,
+ 'adding file\nadding file2\n', None)],
+ script._script_to_commands(story))
+
+ def test_command_with_error(self):
+ story = """
+$ bzr branch foo
+2>bzr: ERROR: Not a branch: "foo"
+"""
+ self.assertEquals([(['bzr', 'branch', 'foo'],
+ None, None, 'bzr: ERROR: Not a branch: "foo"\n')],
+ script._script_to_commands(story))
+
+ def test_input_without_command(self):
+ self.assertRaises(SyntaxError, script._script_to_commands, '<input')
+
+ def test_output_without_command(self):
+ self.assertRaises(SyntaxError, script._script_to_commands, '>input')
+
+ def test_command_with_backquotes(self):
+ story = """
+$ foo = `bzr file-id toto`
+"""
+ self.assertEquals([(['foo', '=', '`bzr file-id toto`'],
+ None, None, None)],
+ script._script_to_commands(story))
+
+
+class TestRedirections(tests.TestCase):
+
+ def _check(self, in_name, out_name, out_mode, remaining, args):
+ self.assertEqual(script._scan_redirection_options(args),
+ (in_name, out_name, out_mode, remaining))
+
+ def test_no_redirection(self):
+ self._check(None, None, None, [], [])
+ self._check(None, None, None, ['foo', 'bar'], ['foo', 'bar'])
+
+ def test_input_redirection(self):
+ self._check('foo', None, None, [], ['<foo'])
+ self._check('foo', None, None, ['bar'], ['bar', '<foo'])
+ self._check('foo', None, None, ['bar'], ['bar', '<', 'foo'])
+ self._check('foo', None, None, ['bar'], ['<foo', 'bar'])
+ self._check('foo', None, None, ['bar', 'baz'], ['bar', '<foo', 'baz'])
+
+ def test_output_redirection(self):
+ self._check(None, 'foo', 'wb+', [], ['>foo'])
+ self._check(None, 'foo', 'wb+', ['bar'], ['bar', '>foo'])
+ self._check(None, 'foo', 'wb+', ['bar'], ['bar', '>', 'foo'])
+ self._check(None, 'foo', 'ab+', [], ['>>foo'])
+ self._check(None, 'foo', 'ab+', ['bar'], ['bar', '>>foo'])
+ self._check(None, 'foo', 'ab+', ['bar'], ['bar', '>>', 'foo'])
+
+ def test_redirection_syntax_errors(self):
+ self._check('', None, None, [], ['<'])
+ self._check(None, '', 'wb+', [], ['>'])
+ self._check(None, '', 'ab+', [], ['>>'])
+ self._check('>', '', 'ab+', [], ['<', '>', '>>'])
+
+
+
+class TestExecution(script.TestCaseWithTransportAndScript):
+
+ def test_unknown_command(self):
+ """A clear error is reported for commands that aren't recognised
+
+ Testing the attributes of the SyntaxError instance is equivalent to
+ using traceback.format_exception_only and comparing with:
+ File "<string>", line 1
+ foo --frob
+ ^
+ SyntaxError: Command not found "foo"
+ """
+ e = self.assertRaises(SyntaxError, self.run_script, "$ foo --frob")
+ self.assertContainsRe(e.msg, "not found.*foo")
+ self.assertEquals(e.text, "foo --frob")
+
+ def test_blank_output_mismatches_output(self):
+ """If you give output, the output must actually be blank.
+
+ See <https://bugs.launchpad.net/bzr/+bug/637830>: previously blank
+ output was a wildcard. Now you must say ... if you want that.
+ """
+ self.assertRaises(AssertionError,
+ self.run_script,
+ """
+ $ echo foo
+ """)
+
+ def test_null_output_matches_option(self):
+ """If you want null output to be a wild card, you can pass
+ null_output_matches_anything to run_script"""
+ self.run_script(
+ """
+ $ echo foo
+ """, null_output_matches_anything=True)
+
+ def test_ellipsis_everything(self):
+ """A simple ellipsis matches everything."""
+ self.run_script("""
+ $ echo foo
+ ...
+ """)
+
+ def test_ellipsis_matches_empty(self):
+ self.run_script("""
+ $ cd .
+ ...
+ """)
+
+ def test_stops_on_unexpected_output(self):
+ story = """
+$ mkdir dir
+$ cd dir
+The cd command ouputs nothing
+"""
+ self.assertRaises(AssertionError, self.run_script, story)
+
+ def test_stops_on_unexpected_error(self):
+ story = """
+$ cat
+<Hello
+$ bzr not-a-command
+"""
+ self.assertRaises(AssertionError, self.run_script, story)
+
+ def test_continue_on_expected_error(self):
+ story = """
+$ bzr not-a-command
+2>..."not-a-command"
+"""
+ self.run_script(story)
+
+ def test_continue_on_error_output(self):
+ # The status matters, not the output
+ story = """
+$ bzr init
+...
+$ cat >file
+<Hello
+$ bzr add file
+...
+$ bzr commit -m 'adding file'
+2>...
+"""
+ self.run_script(story)
+
+ def test_ellipsis_output(self):
+ story = """
+$ cat
+<first line
+<second line
+<last line
+first line
+...
+last line
+"""
+ self.run_script(story)
+ story = """
+$ bzr not-a-command
+2>..."not-a-command"
+"""
+ self.run_script(story)
+
+ story = """
+$ bzr branch not-a-branch
+2>bzr: ERROR: Not a branch...not-a-branch/".
+"""
+ self.run_script(story)
+
+
+class TestArgumentProcessing(script.TestCaseWithTransportAndScript):
+
+ def test_globing(self):
+ self.run_script("""
+$ echo cat >cat
+$ echo dog >dog
+$ cat *
+cat
+dog
+""")
+
+ def test_quoted_globbing(self):
+ self.run_script("""
+$ echo cat >cat
+$ cat '*'
+2>*: No such file or directory
+""")
+
+ def test_quotes_removal(self):
+ self.run_script("""
+$ echo 'cat' "dog" '"chicken"' "'dragon'"
+cat dog "chicken" 'dragon'
+""")
+
+ def test_verbosity_isolated(self):
+ """Global verbosity is isolated from commands run in scripts.
+ """
+ # see also 656694; we should get rid of global verbosity
+ self.run_script("""
+ $ bzr init --quiet a
+ """)
+ self.assertEquals(trace.is_quiet(), False)
+
+
+class TestCat(script.TestCaseWithTransportAndScript):
+
+ def test_cat_usage(self):
+ self.assertRaises(SyntaxError, self.run_script, 'cat foo <bar')
+
+ def test_cat_input_to_output(self):
+ retcode, out, err = self.run_command(['cat'],
+ 'content\n', 'content\n', None)
+ self.assertEquals('content\n', out)
+ self.assertEquals(None, err)
+
+ def test_cat_file_to_output(self):
+ self.build_tree_contents([('file', 'content\n')])
+ retcode, out, err = self.run_command(['cat', 'file'],
+ None, 'content\n', None)
+ self.assertEquals('content\n', out)
+ self.assertEquals(None, err)
+
+ def test_cat_input_to_file(self):
+ retcode, out, err = self.run_command(['cat', '>file'],
+ 'content\n', None, None)
+ self.assertFileEqual('content\n', 'file')
+ self.assertEquals(None, out)
+ self.assertEquals(None, err)
+ retcode, out, err = self.run_command(['cat', '>>file'],
+ 'more\n', None, None)
+ self.assertFileEqual('content\nmore\n', 'file')
+ self.assertEquals(None, out)
+ self.assertEquals(None, err)
+
+ def test_cat_file_to_file(self):
+ self.build_tree_contents([('file', 'content\n')])
+ retcode, out, err = self.run_command(['cat', 'file', '>file2'],
+ None, None, None)
+ self.assertFileEqual('content\n', 'file2')
+
+ def test_cat_files_to_file(self):
+ self.build_tree_contents([('cat', 'cat\n')])
+ self.build_tree_contents([('dog', 'dog\n')])
+ retcode, out, err = self.run_command(['cat', 'cat', 'dog', '>file'],
+ None, None, None)
+ self.assertFileEqual('cat\ndog\n', 'file')
+
+ def test_cat_bogus_input_file(self):
+ self.run_script("""
+$ cat <file
+2>file: No such file or directory
+""")
+
+ def test_cat_bogus_output_file(self):
+ self.run_script("""
+$ cat >
+2>: No such file or directory
+""")
+
+ def test_echo_bogus_output_file(self):
+ # We need a backing file sysytem for that test so it can't be in
+ # TestEcho
+ self.run_script("""
+$ echo >
+2>: No such file or directory
+""")
+
+
+class TestMkdir(script.TestCaseWithTransportAndScript):
+
+ def test_mkdir_usage(self):
+ self.assertRaises(SyntaxError, self.run_script, '$ mkdir')
+ self.assertRaises(SyntaxError, self.run_script, '$ mkdir foo bar')
+
+ def test_mkdir_jailed(self):
+ self.assertRaises(ValueError, self.run_script, '$ mkdir /out-of-jail')
+ self.assertRaises(ValueError, self.run_script, '$ mkdir ../out-of-jail')
+
+ def test_mkdir_in_jail(self):
+ self.run_script("""
+$ mkdir dir
+$ cd dir
+$ mkdir ../dir2
+$ cd ..
+""")
+ self.assertPathExists('dir')
+ self.assertPathExists('dir2')
+
+
+class TestCd(script.TestCaseWithTransportAndScript):
+
+ def test_cd_usage(self):
+ self.assertRaises(SyntaxError, self.run_script, '$ cd foo bar')
+
+ def test_cd_out_of_jail(self):
+ self.assertRaises(ValueError, self.run_script, '$ cd /out-of-jail')
+ self.assertRaises(ValueError, self.run_script, '$ cd ..')
+
+ def test_cd_dir_and_back_home(self):
+ self.assertEquals(self.test_dir, osutils.getcwd())
+ self.run_script("""
+$ mkdir dir
+$ cd dir
+""")
+ self.assertEquals(osutils.pathjoin(self.test_dir, 'dir'),
+ osutils.getcwd())
+
+ self.run_script('$ cd')
+ self.assertEquals(self.test_dir, osutils.getcwd())
+
+
+class TestBzr(script.TestCaseWithTransportAndScript):
+
+ def test_bzr_smoke(self):
+ self.run_script("""
+ $ bzr init branch
+ Created a standalone tree (format: ...)
+ """)
+ self.assertPathExists('branch')
+
+
+class TestEcho(script.TestCaseWithMemoryTransportAndScript):
+
+ def test_echo_usage(self):
+ story = """
+$ echo foo
+<bar
+"""
+ self.assertRaises(SyntaxError, self.run_script, story)
+
+ def test_echo_input(self):
+ self.assertRaises(SyntaxError, self.run_script, """
+ $ echo <foo
+ """)
+
+ def test_echo_to_output(self):
+ retcode, out, err = self.run_command(['echo'], None, '\n', None)
+ self.assertEquals('\n', out)
+ self.assertEquals(None, err)
+
+ def test_echo_some_to_output(self):
+ retcode, out, err = self.run_command(['echo', 'hello'],
+ None, 'hello\n', None)
+ self.assertEquals('hello\n', out)
+ self.assertEquals(None, err)
+
+ def test_echo_more_output(self):
+ retcode, out, err = self.run_command(
+ ['echo', 'hello', 'happy', 'world'],
+ None, 'hello happy world\n', None)
+ self.assertEquals('hello happy world\n', out)
+ self.assertEquals(None, err)
+
+ def test_echo_appended(self):
+ retcode, out, err = self.run_command(['echo', 'hello', '>file'],
+ None, None, None)
+ self.assertEquals(None, out)
+ self.assertEquals(None, err)
+ self.assertFileEqual('hello\n', 'file')
+ retcode, out, err = self.run_command(['echo', 'happy', '>>file'],
+ None, None, None)
+ self.assertEquals(None, out)
+ self.assertEquals(None, err)
+ self.assertFileEqual('hello\nhappy\n', 'file')
+
+ def test_empty_line_in_output_is_respected(self):
+ self.run_script("""
+ $ echo
+
+ $ echo bar
+ bar
+ """)
+
+
+class TestRm(script.TestCaseWithTransportAndScript):
+
+ def test_rm_usage(self):
+ self.assertRaises(SyntaxError, self.run_script, '$ rm')
+ self.assertRaises(SyntaxError, self.run_script, '$ rm -ff foo')
+
+ def test_rm_file(self):
+ self.run_script('$ echo content >file')
+ self.assertPathExists('file')
+ self.run_script('$ rm file')
+ self.assertPathDoesNotExist('file')
+
+ def test_rm_file_force(self):
+ self.assertPathDoesNotExist('file')
+ self.run_script('$ rm -f file')
+ self.assertPathDoesNotExist('file')
+
+ def test_rm_files(self):
+ self.run_script("""
+$ echo content >file
+$ echo content >file2
+""")
+ self.assertPathExists('file2')
+ self.run_script('$ rm file file2')
+ self.assertPathDoesNotExist('file2')
+
+ def test_rm_dir(self):
+ self.run_script('$ mkdir dir')
+ self.assertPathExists('dir')
+ self.run_script("""
+$ rm dir
+2>rm: cannot remove 'dir': Is a directory
+""")
+ self.assertPathExists('dir')
+
+ def test_rm_dir_recursive(self):
+ self.run_script("""
+$ mkdir dir
+$ rm -r dir
+""")
+ self.assertPathDoesNotExist('dir')
+
+
+class TestMv(script.TestCaseWithTransportAndScript):
+
+ def test_usage(self):
+ self.assertRaises(SyntaxError, self.run_script, '$ mv')
+ self.assertRaises(SyntaxError, self.run_script, '$ mv f')
+ self.assertRaises(SyntaxError, self.run_script, '$ mv f1 f2 f3')
+
+ def test_move_file(self):
+ self.run_script('$ echo content >file')
+ self.assertPathExists('file')
+ self.run_script('$ mv file new_name')
+ self.assertPathDoesNotExist('file')
+ self.assertPathExists('new_name')
+
+ def test_move_unknown_file(self):
+ self.assertRaises(AssertionError,
+ self.run_script, '$ mv unknown does-not-exist')
+
+ def test_move_dir(self):
+ self.run_script("""
+$ mkdir dir
+$ echo content >dir/file
+""")
+ self.run_script('$ mv dir new_name')
+ self.assertPathDoesNotExist('dir')
+ self.assertPathExists('new_name')
+ self.assertPathExists('new_name/file')
+
+ def test_move_file_into_dir(self):
+ self.run_script("""
+$ mkdir dir
+$ echo content > file
+""")
+ self.run_script('$ mv file dir')
+ self.assertPathExists('dir')
+ self.assertPathDoesNotExist('file')
+ self.assertPathExists('dir/file')
+
+
+class cmd_test_confirm(commands.Command):
+
+ def run(self):
+ if ui.ui_factory.get_boolean(
+ u'Really do it',
+ # 'bzrlib.tests.test_script.confirm',
+ # {}
+ ):
+ self.outf.write('Do it!\n')
+ else:
+ print 'ok, no'
+
+
+class TestUserInteraction(script.TestCaseWithMemoryTransportAndScript):
+
+ def test_confirm_action(self):
+ """You can write tests that demonstrate user confirmation.
+
+ Specifically, ScriptRunner does't care if the output line for the
+ prompt isn't terminated by a newline from the program; it's implicitly
+ terminated by the input.
+ """
+ commands.builtin_command_registry.register(cmd_test_confirm)
+ self.addCleanup(commands.builtin_command_registry.remove, 'test-confirm')
+ self.run_script("""
+ $ bzr test-confirm
+ 2>Really do it? ([y]es, [n]o): yes
+ <y
+ Do it!
+ $ bzr test-confirm
+ 2>Really do it? ([y]es, [n]o): no
+ <n
+ ok, no
+ """)
+
+class TestShelve(script.TestCaseWithTransportAndScript):
+
+ def setUp(self):
+ super(TestShelve, self).setUp()
+ self.run_script("""
+ $ bzr init test
+ Created a standalone tree (format: 2a)
+ $ cd test
+ $ echo foo > file
+ $ bzr add
+ adding file
+ $ bzr commit -m 'file added'
+ 2>Committing to:...test/
+ 2>added file
+ 2>Committed revision 1.
+ $ echo bar > file
+ """)
+
+ def test_shelve(self):
+ self.run_script("""
+ $ bzr shelve -m 'shelve bar'
+ 2>Shelve? ([y]es, [N]o, [f]inish, [q]uit): yes
+ <y
+ 2>Selected changes:
+ 2> M file
+ 2>Shelve 1 change(s)? ([y]es, [N]o, [f]inish, [q]uit): yes
+ <y
+ 2>Changes shelved with id "1".
+ """,
+ null_output_matches_anything=True)
+ self.run_script("""
+ $ bzr shelve --list
+ 1: shelve bar
+ """)
+
+ def test_dont_shelve(self):
+ # We intentionally provide no input here to test EOF
+ self.run_script("""
+ $ bzr shelve -m 'shelve bar'
+ 2>Shelve? ([y]es, [N]o, [f]inish, [q]uit):
+ 2>No changes to shelve.
+ """,
+ null_output_matches_anything=True)
+ self.run_script("""
+ $ bzr st
+ modified:
+ file
+ """)
diff --git a/bzrlib/tests/test_selftest.py b/bzrlib/tests/test_selftest.py
new file mode 100644
index 0000000..b213297
--- /dev/null
+++ b/bzrlib/tests/test_selftest.py
@@ -0,0 +1,3664 @@
+# Copyright (C) 2005-2011 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""Tests for the test framework."""
+
+from cStringIO import StringIO
+import gc
+import doctest
+import os
+import signal
+import sys
+import threading
+import time
+import unittest
+import warnings
+
+from testtools import (
+ ExtendedToOriginalDecorator,
+ MultiTestResult,
+ )
+from testtools.content import Content
+from testtools.content_type import ContentType
+from testtools.matchers import (
+ DocTestMatches,
+ Equals,
+ )
+import testtools.testresult.doubles
+
+import bzrlib
+from bzrlib import (
+ branchbuilder,
+ bzrdir,
+ controldir,
+ errors,
+ hooks,
+ lockdir,
+ memorytree,
+ osutils,
+ remote,
+ repository,
+ symbol_versioning,
+ tests,
+ transport,
+ workingtree,
+ workingtree_3,
+ workingtree_4,
+ )
+from bzrlib.repofmt import (
+ groupcompress_repo,
+ )
+from bzrlib.symbol_versioning import (
+ deprecated_function,
+ deprecated_in,
+ deprecated_method,
+ )
+from bzrlib.tests import (
+ features,
+ test_lsprof,
+ test_server,
+ TestUtil,
+ )
+from bzrlib.trace import note, mutter
+from bzrlib.transport import memory
+
+
+def _test_ids(test_suite):
+ """Get the ids for the tests in a test suite."""
+ return [t.id() for t in tests.iter_suite_tests(test_suite)]
+
+
+class MetaTestLog(tests.TestCase):
+
+ def test_logging(self):
+ """Test logs are captured when a test fails."""
+ self.log('a test message')
+ details = self.getDetails()
+ log = details['log']
+ self.assertThat(log.content_type, Equals(ContentType(
+ "text", "plain", {"charset": "utf8"})))
+ self.assertThat(u"".join(log.iter_text()), Equals(self.get_log()))
+ self.assertThat(self.get_log(),
+ DocTestMatches(u"...a test message\n", doctest.ELLIPSIS))
+
+
+class TestTreeShape(tests.TestCaseInTempDir):
+
+ def test_unicode_paths(self):
+ self.requireFeature(features.UnicodeFilenameFeature)
+
+ filename = u'hell\u00d8'
+ self.build_tree_contents([(filename, 'contents of hello')])
+ self.assertPathExists(filename)
+
+
+class TestClassesAvailable(tests.TestCase):
+ """As a convenience we expose Test* classes from bzrlib.tests"""
+
+ def test_test_case(self):
+ from bzrlib.tests import TestCase
+
+ def test_test_loader(self):
+ from bzrlib.tests import TestLoader
+
+ def test_test_suite(self):
+ from bzrlib.tests import TestSuite
+
+
+class TestTransportScenarios(tests.TestCase):
+ """A group of tests that test the transport implementation adaption core.
+
+ This is a meta test that the tests are applied to all available
+ transports.
+
+ This will be generalised in the future which is why it is in this
+ test file even though it is specific to transport tests at the moment.
+ """
+
+ def test_get_transport_permutations(self):
+ # this checks that get_test_permutations defined by the module is
+ # called by the get_transport_test_permutations function.
+ class MockModule(object):
+ def get_test_permutations(self):
+ return sample_permutation
+ sample_permutation = [(1,2), (3,4)]
+ from bzrlib.tests.per_transport import get_transport_test_permutations
+ self.assertEqual(sample_permutation,
+ get_transport_test_permutations(MockModule()))
+
+ def test_scenarios_include_all_modules(self):
+ # this checks that the scenario generator returns as many permutations
+ # as there are in all the registered transport modules - we assume if
+ # this matches its probably doing the right thing especially in
+ # combination with the tests for setting the right classes below.
+ from bzrlib.tests.per_transport import transport_test_permutations
+ from bzrlib.transport import _get_transport_modules
+ modules = _get_transport_modules()
+ permutation_count = 0
+ for module in modules:
+ try:
+ permutation_count += len(reduce(getattr,
+ (module + ".get_test_permutations").split('.')[1:],
+ __import__(module))())
+ except errors.DependencyNotPresent:
+ pass
+ scenarios = transport_test_permutations()
+ self.assertEqual(permutation_count, len(scenarios))
+
+ def test_scenarios_include_transport_class(self):
+ # This test used to know about all the possible transports and the
+ # order they were returned but that seems overly brittle (mbp
+ # 20060307)
+ from bzrlib.tests.per_transport import transport_test_permutations
+ scenarios = transport_test_permutations()
+ # there are at least that many builtin transports
+ self.assertTrue(len(scenarios) > 6)
+ one_scenario = scenarios[0]
+ self.assertIsInstance(one_scenario[0], str)
+ self.assertTrue(issubclass(one_scenario[1]["transport_class"],
+ bzrlib.transport.Transport))
+ self.assertTrue(issubclass(one_scenario[1]["transport_server"],
+ bzrlib.transport.Server))
+
+
+class TestBranchScenarios(tests.TestCase):
+
+ def test_scenarios(self):
+ # check that constructor parameters are passed through to the adapted
+ # test.
+ from bzrlib.tests.per_branch import make_scenarios
+ server1 = "a"
+ server2 = "b"
+ formats = [("c", "C"), ("d", "D")]
+ scenarios = make_scenarios(server1, server2, formats)
+ self.assertEqual(2, len(scenarios))
+ self.assertEqual([
+ ('str',
+ {'branch_format': 'c',
+ 'bzrdir_format': 'C',
+ 'transport_readonly_server': 'b',
+ 'transport_server': 'a'}),
+ ('str',
+ {'branch_format': 'd',
+ 'bzrdir_format': 'D',
+ 'transport_readonly_server': 'b',
+ 'transport_server': 'a'})],
+ scenarios)
+
+
+class TestBzrDirScenarios(tests.TestCase):
+
+ def test_scenarios(self):
+ # check that constructor parameters are passed through to the adapted
+ # test.
+ from bzrlib.tests.per_controldir import make_scenarios
+ vfs_factory = "v"
+ server1 = "a"
+ server2 = "b"
+ formats = ["c", "d"]
+ scenarios = make_scenarios(vfs_factory, server1, server2, formats)
+ self.assertEqual([
+ ('str',
+ {'bzrdir_format': 'c',
+ 'transport_readonly_server': 'b',
+ 'transport_server': 'a',
+ 'vfs_transport_factory': 'v'}),
+ ('str',
+ {'bzrdir_format': 'd',
+ 'transport_readonly_server': 'b',
+ 'transport_server': 'a',
+ 'vfs_transport_factory': 'v'})],
+ scenarios)
+
+
+class TestRepositoryScenarios(tests.TestCase):
+
+ def test_formats_to_scenarios(self):
+ from bzrlib.tests.per_repository import formats_to_scenarios
+ formats = [("(c)", remote.RemoteRepositoryFormat()),
+ ("(d)", repository.format_registry.get(
+ 'Bazaar repository format 2a (needs bzr 1.16 or later)\n'))]
+ no_vfs_scenarios = formats_to_scenarios(formats, "server", "readonly",
+ None)
+ vfs_scenarios = formats_to_scenarios(formats, "server", "readonly",
+ vfs_transport_factory="vfs")
+ # no_vfs generate scenarios without vfs_transport_factory
+ expected = [
+ ('RemoteRepositoryFormat(c)',
+ {'bzrdir_format': remote.RemoteBzrDirFormat(),
+ 'repository_format': remote.RemoteRepositoryFormat(),
+ 'transport_readonly_server': 'readonly',
+ 'transport_server': 'server'}),
+ ('RepositoryFormat2a(d)',
+ {'bzrdir_format': bzrdir.BzrDirMetaFormat1(),
+ 'repository_format': groupcompress_repo.RepositoryFormat2a(),
+ 'transport_readonly_server': 'readonly',
+ 'transport_server': 'server'})]
+ self.assertEqual(expected, no_vfs_scenarios)
+ self.assertEqual([
+ ('RemoteRepositoryFormat(c)',
+ {'bzrdir_format': remote.RemoteBzrDirFormat(),
+ 'repository_format': remote.RemoteRepositoryFormat(),
+ 'transport_readonly_server': 'readonly',
+ 'transport_server': 'server',
+ 'vfs_transport_factory': 'vfs'}),
+ ('RepositoryFormat2a(d)',
+ {'bzrdir_format': bzrdir.BzrDirMetaFormat1(),
+ 'repository_format': groupcompress_repo.RepositoryFormat2a(),
+ 'transport_readonly_server': 'readonly',
+ 'transport_server': 'server',
+ 'vfs_transport_factory': 'vfs'})],
+ vfs_scenarios)
+
+
+class TestTestScenarioApplication(tests.TestCase):
+ """Tests for the test adaption facilities."""
+
+ def test_apply_scenario(self):
+ from bzrlib.tests import apply_scenario
+ input_test = TestTestScenarioApplication("test_apply_scenario")
+ # setup two adapted tests
+ adapted_test1 = apply_scenario(input_test,
+ ("new id",
+ {"bzrdir_format":"bzr_format",
+ "repository_format":"repo_fmt",
+ "transport_server":"transport_server",
+ "transport_readonly_server":"readonly-server"}))
+ adapted_test2 = apply_scenario(input_test,
+ ("new id 2", {"bzrdir_format":None}))
+ # input_test should have been altered.
+ self.assertRaises(AttributeError, getattr, input_test, "bzrdir_format")
+ # the new tests are mutually incompatible, ensuring it has
+ # made new ones, and unspecified elements in the scenario
+ # should not have been altered.
+ self.assertEqual("bzr_format", adapted_test1.bzrdir_format)
+ self.assertEqual("repo_fmt", adapted_test1.repository_format)
+ self.assertEqual("transport_server", adapted_test1.transport_server)
+ self.assertEqual("readonly-server",
+ adapted_test1.transport_readonly_server)
+ self.assertEqual(
+ "bzrlib.tests.test_selftest.TestTestScenarioApplication."
+ "test_apply_scenario(new id)",
+ adapted_test1.id())
+ self.assertEqual(None, adapted_test2.bzrdir_format)
+ self.assertEqual(
+ "bzrlib.tests.test_selftest.TestTestScenarioApplication."
+ "test_apply_scenario(new id 2)",
+ adapted_test2.id())
+
+
+class TestInterRepositoryScenarios(tests.TestCase):
+
+ def test_scenarios(self):
+ # check that constructor parameters are passed through to the adapted
+ # test.
+ from bzrlib.tests.per_interrepository import make_scenarios
+ server1 = "a"
+ server2 = "b"
+ formats = [("C0", "C1", "C2", "C3"), ("D0", "D1", "D2", "D3")]
+ scenarios = make_scenarios(server1, server2, formats)
+ self.assertEqual([
+ ('C0,str,str',
+ {'repository_format': 'C1',
+ 'repository_format_to': 'C2',
+ 'transport_readonly_server': 'b',
+ 'transport_server': 'a',
+ 'extra_setup': 'C3'}),
+ ('D0,str,str',
+ {'repository_format': 'D1',
+ 'repository_format_to': 'D2',
+ 'transport_readonly_server': 'b',
+ 'transport_server': 'a',
+ 'extra_setup': 'D3'})],
+ scenarios)
+
+
+class TestWorkingTreeScenarios(tests.TestCase):
+
+ def test_scenarios(self):
+ # check that constructor parameters are passed through to the adapted
+ # test.
+ from bzrlib.tests.per_workingtree import make_scenarios
+ server1 = "a"
+ server2 = "b"
+ formats = [workingtree_4.WorkingTreeFormat4(),
+ workingtree_3.WorkingTreeFormat3(),]
+ scenarios = make_scenarios(server1, server2, formats)
+ self.assertEqual([
+ ('WorkingTreeFormat4',
+ {'bzrdir_format': formats[0]._matchingbzrdir,
+ 'transport_readonly_server': 'b',
+ 'transport_server': 'a',
+ 'workingtree_format': formats[0]}),
+ ('WorkingTreeFormat3',
+ {'bzrdir_format': formats[1]._matchingbzrdir,
+ 'transport_readonly_server': 'b',
+ 'transport_server': 'a',
+ 'workingtree_format': formats[1]})],
+ scenarios)
+
+
+class TestTreeScenarios(tests.TestCase):
+
+ def test_scenarios(self):
+ # the tree implementation scenario generator is meant to setup one
+ # instance for each working tree format, and one additional instance
+ # that will use the default wt format, but create a revision tree for
+ # the tests. this means that the wt ones should have the
+ # workingtree_to_test_tree attribute set to 'return_parameter' and the
+ # revision one set to revision_tree_from_workingtree.
+
+ from bzrlib.tests.per_tree import (
+ _dirstate_tree_from_workingtree,
+ make_scenarios,
+ preview_tree_pre,
+ preview_tree_post,
+ return_parameter,
+ revision_tree_from_workingtree
+ )
+ server1 = "a"
+ server2 = "b"
+ formats = [workingtree_4.WorkingTreeFormat4(),
+ workingtree_3.WorkingTreeFormat3(),]
+ scenarios = make_scenarios(server1, server2, formats)
+ self.assertEqual(7, len(scenarios))
+ default_wt_format = workingtree.format_registry.get_default()
+ wt4_format = workingtree_4.WorkingTreeFormat4()
+ wt5_format = workingtree_4.WorkingTreeFormat5()
+ expected_scenarios = [
+ ('WorkingTreeFormat4',
+ {'bzrdir_format': formats[0]._matchingbzrdir,
+ 'transport_readonly_server': 'b',
+ 'transport_server': 'a',
+ 'workingtree_format': formats[0],
+ '_workingtree_to_test_tree': return_parameter,
+ }),
+ ('WorkingTreeFormat3',
+ {'bzrdir_format': formats[1]._matchingbzrdir,
+ 'transport_readonly_server': 'b',
+ 'transport_server': 'a',
+ 'workingtree_format': formats[1],
+ '_workingtree_to_test_tree': return_parameter,
+ }),
+ ('RevisionTree',
+ {'_workingtree_to_test_tree': revision_tree_from_workingtree,
+ 'bzrdir_format': default_wt_format._matchingbzrdir,
+ 'transport_readonly_server': 'b',
+ 'transport_server': 'a',
+ 'workingtree_format': default_wt_format,
+ }),
+ ('DirStateRevisionTree,WT4',
+ {'_workingtree_to_test_tree': _dirstate_tree_from_workingtree,
+ 'bzrdir_format': wt4_format._matchingbzrdir,
+ 'transport_readonly_server': 'b',
+ 'transport_server': 'a',
+ 'workingtree_format': wt4_format,
+ }),
+ ('DirStateRevisionTree,WT5',
+ {'_workingtree_to_test_tree': _dirstate_tree_from_workingtree,
+ 'bzrdir_format': wt5_format._matchingbzrdir,
+ 'transport_readonly_server': 'b',
+ 'transport_server': 'a',
+ 'workingtree_format': wt5_format,
+ }),
+ ('PreviewTree',
+ {'_workingtree_to_test_tree': preview_tree_pre,
+ 'bzrdir_format': default_wt_format._matchingbzrdir,
+ 'transport_readonly_server': 'b',
+ 'transport_server': 'a',
+ 'workingtree_format': default_wt_format}),
+ ('PreviewTreePost',
+ {'_workingtree_to_test_tree': preview_tree_post,
+ 'bzrdir_format': default_wt_format._matchingbzrdir,
+ 'transport_readonly_server': 'b',
+ 'transport_server': 'a',
+ 'workingtree_format': default_wt_format}),
+ ]
+ self.assertEqual(expected_scenarios, scenarios)
+
+
+class TestInterTreeScenarios(tests.TestCase):
+ """A group of tests that test the InterTreeTestAdapter."""
+
+ def test_scenarios(self):
+ # check that constructor parameters are passed through to the adapted
+ # test.
+ # for InterTree tests we want the machinery to bring up two trees in
+ # each instance: the base one, and the one we are interacting with.
+ # because each optimiser can be direction specific, we need to test
+ # each optimiser in its chosen direction.
+ # unlike the TestProviderAdapter we dont want to automatically add a
+ # parameterized one for WorkingTree - the optimisers will tell us what
+ # ones to add.
+ from bzrlib.tests.per_tree import (
+ return_parameter,
+ )
+ from bzrlib.tests.per_intertree import (
+ make_scenarios,
+ )
+ from bzrlib.workingtree_3 import WorkingTreeFormat3
+ from bzrlib.workingtree_4 import WorkingTreeFormat4
+ input_test = TestInterTreeScenarios(
+ "test_scenarios")
+ server1 = "a"
+ server2 = "b"
+ format1 = WorkingTreeFormat4()
+ format2 = WorkingTreeFormat3()
+ formats = [("1", str, format1, format2, "converter1"),
+ ("2", int, format2, format1, "converter2")]
+ scenarios = make_scenarios(server1, server2, formats)
+ self.assertEqual(2, len(scenarios))
+ expected_scenarios = [
+ ("1", {
+ "bzrdir_format": format1._matchingbzrdir,
+ "intertree_class": formats[0][1],
+ "workingtree_format": formats[0][2],
+ "workingtree_format_to": formats[0][3],
+ "mutable_trees_to_test_trees": formats[0][4],
+ "_workingtree_to_test_tree": return_parameter,
+ "transport_server": server1,
+ "transport_readonly_server": server2,
+ }),
+ ("2", {
+ "bzrdir_format": format2._matchingbzrdir,
+ "intertree_class": formats[1][1],
+ "workingtree_format": formats[1][2],
+ "workingtree_format_to": formats[1][3],
+ "mutable_trees_to_test_trees": formats[1][4],
+ "_workingtree_to_test_tree": return_parameter,
+ "transport_server": server1,
+ "transport_readonly_server": server2,
+ }),
+ ]
+ self.assertEqual(scenarios, expected_scenarios)
+
+
+class TestTestCaseInTempDir(tests.TestCaseInTempDir):
+
+ def test_home_is_not_working(self):
+ self.assertNotEqual(self.test_dir, self.test_home_dir)
+ cwd = osutils.getcwd()
+ self.assertIsSameRealPath(self.test_dir, cwd)
+ self.assertIsSameRealPath(self.test_home_dir, os.environ['HOME'])
+
+ def test_assertEqualStat_equal(self):
+ from bzrlib.tests.test_dirstate import _FakeStat
+ self.build_tree(["foo"])
+ real = os.lstat("foo")
+ fake = _FakeStat(real.st_size, real.st_mtime, real.st_ctime,
+ real.st_dev, real.st_ino, real.st_mode)
+ self.assertEqualStat(real, fake)
+
+ def test_assertEqualStat_notequal(self):
+ self.build_tree(["foo", "longname"])
+ self.assertRaises(AssertionError, self.assertEqualStat,
+ os.lstat("foo"), os.lstat("longname"))
+
+ def test_failUnlessExists(self):
+ """Deprecated failUnlessExists and failIfExists"""
+ self.applyDeprecated(
+ deprecated_in((2, 4)),
+ self.failUnlessExists, '.')
+ self.build_tree(['foo/', 'foo/bar'])
+ self.applyDeprecated(
+ deprecated_in((2, 4)),
+ self.failUnlessExists, 'foo/bar')
+ self.applyDeprecated(
+ deprecated_in((2, 4)),
+ self.failIfExists, 'foo/foo')
+
+ def test_assertPathExists(self):
+ self.assertPathExists('.')
+ self.build_tree(['foo/', 'foo/bar'])
+ self.assertPathExists('foo/bar')
+ self.assertPathDoesNotExist('foo/foo')
+
+
+class TestTestCaseWithMemoryTransport(tests.TestCaseWithMemoryTransport):
+
+ def test_home_is_non_existant_dir_under_root(self):
+ """The test_home_dir for TestCaseWithMemoryTransport is missing.
+
+ This is because TestCaseWithMemoryTransport is for tests that do not
+ need any disk resources: they should be hooked into bzrlib in such a
+ way that no global settings are being changed by the test (only a
+ few tests should need to do that), and having a missing dir as home is
+ an effective way to ensure that this is the case.
+ """
+ self.assertIsSameRealPath(
+ self.TEST_ROOT + "/MemoryTransportMissingHomeDir",
+ self.test_home_dir)
+ self.assertIsSameRealPath(self.test_home_dir, os.environ['HOME'])
+
+ def test_cwd_is_TEST_ROOT(self):
+ self.assertIsSameRealPath(self.test_dir, self.TEST_ROOT)
+ cwd = osutils.getcwd()
+ self.assertIsSameRealPath(self.test_dir, cwd)
+
+ def test_BZR_HOME_and_HOME_are_bytestrings(self):
+ """The $BZR_HOME and $HOME environment variables should not be unicode.
+
+ See https://bugs.launchpad.net/bzr/+bug/464174
+ """
+ self.assertIsInstance(os.environ['BZR_HOME'], str)
+ self.assertIsInstance(os.environ['HOME'], str)
+
+ def test_make_branch_and_memory_tree(self):
+ """In TestCaseWithMemoryTransport we should not make the branch on disk.
+
+ This is hard to comprehensively robustly test, so we settle for making
+ a branch and checking no directory was created at its relpath.
+ """
+ tree = self.make_branch_and_memory_tree('dir')
+ # Guard against regression into MemoryTransport leaking
+ # files to disk instead of keeping them in memory.
+ self.assertFalse(osutils.lexists('dir'))
+ self.assertIsInstance(tree, memorytree.MemoryTree)
+
+ def test_make_branch_and_memory_tree_with_format(self):
+ """make_branch_and_memory_tree should accept a format option."""
+ format = bzrdir.BzrDirMetaFormat1()
+ format.repository_format = repository.format_registry.get_default()
+ tree = self.make_branch_and_memory_tree('dir', format=format)
+ # Guard against regression into MemoryTransport leaking
+ # files to disk instead of keeping them in memory.
+ self.assertFalse(osutils.lexists('dir'))
+ self.assertIsInstance(tree, memorytree.MemoryTree)
+ self.assertEqual(format.repository_format.__class__,
+ tree.branch.repository._format.__class__)
+
+ def test_make_branch_builder(self):
+ builder = self.make_branch_builder('dir')
+ self.assertIsInstance(builder, branchbuilder.BranchBuilder)
+ # Guard against regression into MemoryTransport leaking
+ # files to disk instead of keeping them in memory.
+ self.assertFalse(osutils.lexists('dir'))
+
+ def test_make_branch_builder_with_format(self):
+ # Use a repo layout that doesn't conform to a 'named' layout, to ensure
+ # that the format objects are used.
+ format = bzrdir.BzrDirMetaFormat1()
+ repo_format = repository.format_registry.get_default()
+ format.repository_format = repo_format
+ builder = self.make_branch_builder('dir', format=format)
+ the_branch = builder.get_branch()
+ # Guard against regression into MemoryTransport leaking
+ # files to disk instead of keeping them in memory.
+ self.assertFalse(osutils.lexists('dir'))
+ self.assertEqual(format.repository_format.__class__,
+ the_branch.repository._format.__class__)
+ self.assertEqual(repo_format.get_format_string(),
+ self.get_transport().get_bytes(
+ 'dir/.bzr/repository/format'))
+
+ def test_make_branch_builder_with_format_name(self):
+ builder = self.make_branch_builder('dir', format='knit')
+ the_branch = builder.get_branch()
+ # Guard against regression into MemoryTransport leaking
+ # files to disk instead of keeping them in memory.
+ self.assertFalse(osutils.lexists('dir'))
+ dir_format = controldir.format_registry.make_bzrdir('knit')
+ self.assertEqual(dir_format.repository_format.__class__,
+ the_branch.repository._format.__class__)
+ self.assertEqual('Bazaar-NG Knit Repository Format 1',
+ self.get_transport().get_bytes(
+ 'dir/.bzr/repository/format'))
+
+ def test_dangling_locks_cause_failures(self):
+ class TestDanglingLock(tests.TestCaseWithMemoryTransport):
+ def test_function(self):
+ t = self.get_transport_from_path('.')
+ l = lockdir.LockDir(t, 'lock')
+ l.create()
+ l.attempt_lock()
+ test = TestDanglingLock('test_function')
+ result = test.run()
+ total_failures = result.errors + result.failures
+ if self._lock_check_thorough:
+ self.assertEqual(1, len(total_failures))
+ else:
+ # When _lock_check_thorough is disabled, then we don't trigger a
+ # failure
+ self.assertEqual(0, len(total_failures))
+
+
+class TestTestCaseWithTransport(tests.TestCaseWithTransport):
+ """Tests for the convenience functions TestCaseWithTransport introduces."""
+
+ def test_get_readonly_url_none(self):
+ from bzrlib.transport.readonly import ReadonlyTransportDecorator
+ self.vfs_transport_factory = memory.MemoryServer
+ self.transport_readonly_server = None
+ # calling get_readonly_transport() constructs a decorator on the url
+ # for the server
+ url = self.get_readonly_url()
+ url2 = self.get_readonly_url('foo/bar')
+ t = transport.get_transport_from_url(url)
+ t2 = transport.get_transport_from_url(url2)
+ self.assertIsInstance(t, ReadonlyTransportDecorator)
+ self.assertIsInstance(t2, ReadonlyTransportDecorator)
+ self.assertEqual(t2.base[:-1], t.abspath('foo/bar'))
+
+ def test_get_readonly_url_http(self):
+ from bzrlib.tests.http_server import HttpServer
+ from bzrlib.transport.http import HttpTransportBase
+ self.transport_server = test_server.LocalURLServer
+ self.transport_readonly_server = HttpServer
+ # calling get_readonly_transport() gives us a HTTP server instance.
+ url = self.get_readonly_url()
+ url2 = self.get_readonly_url('foo/bar')
+ # the transport returned may be any HttpTransportBase subclass
+ t = transport.get_transport_from_url(url)
+ t2 = transport.get_transport_from_url(url2)
+ self.assertIsInstance(t, HttpTransportBase)
+ self.assertIsInstance(t2, HttpTransportBase)
+ self.assertEqual(t2.base[:-1], t.abspath('foo/bar'))
+
+ def test_is_directory(self):
+ """Test assertIsDirectory assertion"""
+ t = self.get_transport()
+ self.build_tree(['a_dir/', 'a_file'], transport=t)
+ self.assertIsDirectory('a_dir', t)
+ self.assertRaises(AssertionError, self.assertIsDirectory, 'a_file', t)
+ self.assertRaises(AssertionError, self.assertIsDirectory, 'not_here', t)
+
+ def test_make_branch_builder(self):
+ builder = self.make_branch_builder('dir')
+ rev_id = builder.build_commit()
+ self.assertPathExists('dir')
+ a_dir = controldir.ControlDir.open('dir')
+ self.assertRaises(errors.NoWorkingTree, a_dir.open_workingtree)
+ a_branch = a_dir.open_branch()
+ builder_branch = builder.get_branch()
+ self.assertEqual(a_branch.base, builder_branch.base)
+ self.assertEqual((1, rev_id), builder_branch.last_revision_info())
+ self.assertEqual((1, rev_id), a_branch.last_revision_info())
+
+
+class TestTestCaseTransports(tests.TestCaseWithTransport):
+
+ def setUp(self):
+ super(TestTestCaseTransports, self).setUp()
+ self.vfs_transport_factory = memory.MemoryServer
+
+ def test_make_bzrdir_preserves_transport(self):
+ t = self.get_transport()
+ result_bzrdir = self.make_bzrdir('subdir')
+ self.assertIsInstance(result_bzrdir.transport,
+ memory.MemoryTransport)
+ # should not be on disk, should only be in memory
+ self.assertPathDoesNotExist('subdir')
+
+
+class TestChrootedTest(tests.ChrootedTestCase):
+
+ def test_root_is_root(self):
+ t = transport.get_transport_from_url(self.get_readonly_url())
+ url = t.base
+ self.assertEqual(url, t.clone('..').base)
+
+
+class TestProfileResult(tests.TestCase):
+
+ def test_profiles_tests(self):
+ self.requireFeature(features.lsprof_feature)
+ terminal = testtools.testresult.doubles.ExtendedTestResult()
+ result = tests.ProfileResult(terminal)
+ class Sample(tests.TestCase):
+ def a(self):
+ self.sample_function()
+ def sample_function(self):
+ pass
+ test = Sample("a")
+ test.run(result)
+ case = terminal._events[0][1]
+ self.assertLength(1, case._benchcalls)
+ # We must be able to unpack it as the test reporting code wants
+ (_, _, _), stats = case._benchcalls[0]
+ self.assertTrue(callable(stats.pprint))
+
+
+class TestTestResult(tests.TestCase):
+
+ def check_timing(self, test_case, expected_re):
+ result = bzrlib.tests.TextTestResult(self._log_file,
+ descriptions=0,
+ verbosity=1,
+ )
+ capture = testtools.testresult.doubles.ExtendedTestResult()
+ test_case.run(MultiTestResult(result, capture))
+ run_case = capture._events[0][1]
+ timed_string = result._testTimeString(run_case)
+ self.assertContainsRe(timed_string, expected_re)
+
+ def test_test_reporting(self):
+ class ShortDelayTestCase(tests.TestCase):
+ def test_short_delay(self):
+ time.sleep(0.003)
+ def test_short_benchmark(self):
+ self.time(time.sleep, 0.003)
+ self.check_timing(ShortDelayTestCase('test_short_delay'),
+ r"^ +[0-9]+ms$")
+ # if a benchmark time is given, we now show just that time followed by
+ # a star
+ self.check_timing(ShortDelayTestCase('test_short_benchmark'),
+ r"^ +[0-9]+ms\*$")
+
+ def test_unittest_reporting_unittest_class(self):
+ # getting the time from a non-bzrlib test works ok
+ class ShortDelayTestCase(unittest.TestCase):
+ def test_short_delay(self):
+ time.sleep(0.003)
+ self.check_timing(ShortDelayTestCase('test_short_delay'),
+ r"^ +[0-9]+ms$")
+
+ def _time_hello_world_encoding(self):
+ """Profile two sleep calls
+
+ This is used to exercise the test framework.
+ """
+ self.time(unicode, 'hello', errors='replace')
+ self.time(unicode, 'world', errors='replace')
+
+ def test_lsprofiling(self):
+ """Verbose test result prints lsprof statistics from test cases."""
+ self.requireFeature(features.lsprof_feature)
+ result_stream = StringIO()
+ result = bzrlib.tests.VerboseTestResult(
+ result_stream,
+ descriptions=0,
+ verbosity=2,
+ )
+ # we want profile a call of some sort and check it is output by
+ # addSuccess. We dont care about addError or addFailure as they
+ # are not that interesting for performance tuning.
+ # make a new test instance that when run will generate a profile
+ example_test_case = TestTestResult("_time_hello_world_encoding")
+ example_test_case._gather_lsprof_in_benchmarks = True
+ # execute the test, which should succeed and record profiles
+ example_test_case.run(result)
+ # lsprofile_something()
+ # if this worked we want
+ # LSProf output for <built in function unicode> (['hello'], {'errors': 'replace'})
+ # CallCount Recursive Total(ms) Inline(ms) module:lineno(function)
+ # (the lsprof header)
+ # ... an arbitrary number of lines
+ # and the function call which is time.sleep.
+ # 1 0 ??? ??? ???(sleep)
+ # and then repeated but with 'world', rather than 'hello'.
+ # this should appear in the output stream of our test result.
+ output = result_stream.getvalue()
+ self.assertContainsRe(output,
+ r"LSProf output for <type 'unicode'>\(\('hello',\), {'errors': 'replace'}\)")
+ self.assertContainsRe(output,
+ r" *CallCount *Recursive *Total\(ms\) *Inline\(ms\) *module:lineno\(function\)\n")
+ self.assertContainsRe(output,
+ r"( +1 +0 +0\.\d+ +0\.\d+ +<method 'disable' of '_lsprof\.Profiler' objects>\n)?")
+ self.assertContainsRe(output,
+ r"LSProf output for <type 'unicode'>\(\('world',\), {'errors': 'replace'}\)\n")
+
+ def test_uses_time_from_testtools(self):
+ """Test case timings in verbose results should use testtools times"""
+ import datetime
+ class TimeAddedVerboseTestResult(tests.VerboseTestResult):
+ def startTest(self, test):
+ self.time(datetime.datetime.utcfromtimestamp(1.145))
+ super(TimeAddedVerboseTestResult, self).startTest(test)
+ def addSuccess(self, test):
+ self.time(datetime.datetime.utcfromtimestamp(51.147))
+ super(TimeAddedVerboseTestResult, self).addSuccess(test)
+ def report_tests_starting(self): pass
+ sio = StringIO()
+ self.get_passing_test().run(TimeAddedVerboseTestResult(sio, 0, 2))
+ self.assertEndsWith(sio.getvalue(), "OK 50002ms\n")
+
+ def test_known_failure(self):
+ """Using knownFailure should trigger several result actions."""
+ class InstrumentedTestResult(tests.ExtendedTestResult):
+ def stopTestRun(self): pass
+ def report_tests_starting(self): pass
+ def report_known_failure(self, test, err=None, details=None):
+ self._call = test, 'known failure'
+ result = InstrumentedTestResult(None, None, None, None)
+ class Test(tests.TestCase):
+ def test_function(self):
+ self.knownFailure('failed!')
+ test = Test("test_function")
+ test.run(result)
+ # it should invoke 'report_known_failure'.
+ self.assertEqual(2, len(result._call))
+ self.assertEqual(test.id(), result._call[0].id())
+ self.assertEqual('known failure', result._call[1])
+ # we dont introspec the traceback, if the rest is ok, it would be
+ # exceptional for it not to be.
+ # it should update the known_failure_count on the object.
+ self.assertEqual(1, result.known_failure_count)
+ # the result should be successful.
+ self.assertTrue(result.wasSuccessful())
+
+ def test_verbose_report_known_failure(self):
+ # verbose test output formatting
+ result_stream = StringIO()
+ result = bzrlib.tests.VerboseTestResult(
+ result_stream,
+ descriptions=0,
+ verbosity=2,
+ )
+ _get_test("test_xfail").run(result)
+ self.assertContainsRe(result_stream.getvalue(),
+ "\n\\S+\\.test_xfail\\s+XFAIL\\s+\\d+ms\n"
+ "\\s*(?:Text attachment: )?reason"
+ "(?:\n-+\n|: {{{)"
+ "this_fails"
+ "(?:\n-+\n|}}}\n)")
+
+ def get_passing_test(self):
+ """Return a test object that can't be run usefully."""
+ def passing_test():
+ pass
+ return unittest.FunctionTestCase(passing_test)
+
+ def test_add_not_supported(self):
+ """Test the behaviour of invoking addNotSupported."""
+ class InstrumentedTestResult(tests.ExtendedTestResult):
+ def stopTestRun(self): pass
+ def report_tests_starting(self): pass
+ def report_unsupported(self, test, feature):
+ self._call = test, feature
+ result = InstrumentedTestResult(None, None, None, None)
+ test = SampleTestCase('_test_pass')
+ feature = features.Feature()
+ result.startTest(test)
+ result.addNotSupported(test, feature)
+ # it should invoke 'report_unsupported'.
+ self.assertEqual(2, len(result._call))
+ self.assertEqual(test, result._call[0])
+ self.assertEqual(feature, result._call[1])
+ # the result should be successful.
+ self.assertTrue(result.wasSuccessful())
+ # it should record the test against a count of tests not run due to
+ # this feature.
+ self.assertEqual(1, result.unsupported['Feature'])
+ # and invoking it again should increment that counter
+ result.addNotSupported(test, feature)
+ self.assertEqual(2, result.unsupported['Feature'])
+
+ def test_verbose_report_unsupported(self):
+ # verbose test output formatting
+ result_stream = StringIO()
+ result = bzrlib.tests.VerboseTestResult(
+ result_stream,
+ descriptions=0,
+ verbosity=2,
+ )
+ test = self.get_passing_test()
+ feature = features.Feature()
+ result.startTest(test)
+ prefix = len(result_stream.getvalue())
+ result.report_unsupported(test, feature)
+ output = result_stream.getvalue()[prefix:]
+ lines = output.splitlines()
+ # We don't check for the final '0ms' since it may fail on slow hosts
+ self.assertStartsWith(lines[0], 'NODEP')
+ self.assertEqual(lines[1],
+ " The feature 'Feature' is not available.")
+
+ def test_unavailable_exception(self):
+ """An UnavailableFeature being raised should invoke addNotSupported."""
+ class InstrumentedTestResult(tests.ExtendedTestResult):
+ def stopTestRun(self): pass
+ def report_tests_starting(self): pass
+ def addNotSupported(self, test, feature):
+ self._call = test, feature
+ result = InstrumentedTestResult(None, None, None, None)
+ feature = features.Feature()
+ class Test(tests.TestCase):
+ def test_function(self):
+ raise tests.UnavailableFeature(feature)
+ test = Test("test_function")
+ test.run(result)
+ # it should invoke 'addNotSupported'.
+ self.assertEqual(2, len(result._call))
+ self.assertEqual(test.id(), result._call[0].id())
+ self.assertEqual(feature, result._call[1])
+ # and not count as an error
+ self.assertEqual(0, result.error_count)
+
+ def test_strict_with_unsupported_feature(self):
+ result = bzrlib.tests.TextTestResult(self._log_file, descriptions=0,
+ verbosity=1)
+ test = self.get_passing_test()
+ feature = "Unsupported Feature"
+ result.addNotSupported(test, feature)
+ self.assertFalse(result.wasStrictlySuccessful())
+ self.assertEqual(None, result._extractBenchmarkTime(test))
+
+ def test_strict_with_known_failure(self):
+ result = bzrlib.tests.TextTestResult(self._log_file, descriptions=0,
+ verbosity=1)
+ test = _get_test("test_xfail")
+ test.run(result)
+ self.assertFalse(result.wasStrictlySuccessful())
+ self.assertEqual(None, result._extractBenchmarkTime(test))
+
+ def test_strict_with_success(self):
+ result = bzrlib.tests.TextTestResult(self._log_file, descriptions=0,
+ verbosity=1)
+ test = self.get_passing_test()
+ result.addSuccess(test)
+ self.assertTrue(result.wasStrictlySuccessful())
+ self.assertEqual(None, result._extractBenchmarkTime(test))
+
+ def test_startTests(self):
+ """Starting the first test should trigger startTests."""
+ class InstrumentedTestResult(tests.ExtendedTestResult):
+ calls = 0
+ def startTests(self): self.calls += 1
+ result = InstrumentedTestResult(None, None, None, None)
+ def test_function():
+ pass
+ test = unittest.FunctionTestCase(test_function)
+ test.run(result)
+ self.assertEquals(1, result.calls)
+
+ def test_startTests_only_once(self):
+ """With multiple tests startTests should still only be called once"""
+ class InstrumentedTestResult(tests.ExtendedTestResult):
+ calls = 0
+ def startTests(self): self.calls += 1
+ result = InstrumentedTestResult(None, None, None, None)
+ suite = unittest.TestSuite([
+ unittest.FunctionTestCase(lambda: None),
+ unittest.FunctionTestCase(lambda: None)])
+ suite.run(result)
+ self.assertEquals(1, result.calls)
+ self.assertEquals(2, result.count)
+
+
+class TestRunner(tests.TestCase):
+
+ def dummy_test(self):
+ pass
+
+ def run_test_runner(self, testrunner, test):
+ """Run suite in testrunner, saving global state and restoring it.
+
+ This current saves and restores:
+ TestCaseInTempDir.TEST_ROOT
+
+ There should be no tests in this file that use
+ bzrlib.tests.TextTestRunner without using this convenience method,
+ because of our use of global state.
+ """
+ old_root = tests.TestCaseInTempDir.TEST_ROOT
+ try:
+ tests.TestCaseInTempDir.TEST_ROOT = None
+ return testrunner.run(test)
+ finally:
+ tests.TestCaseInTempDir.TEST_ROOT = old_root
+
+ def test_known_failure_failed_run(self):
+ # run a test that generates a known failure which should be printed in
+ # the final output when real failures occur.
+ class Test(tests.TestCase):
+ def known_failure_test(self):
+ self.expectFailure('failed', self.assertTrue, False)
+ test = unittest.TestSuite()
+ test.addTest(Test("known_failure_test"))
+ def failing_test():
+ raise AssertionError('foo')
+ test.addTest(unittest.FunctionTestCase(failing_test))
+ stream = StringIO()
+ runner = tests.TextTestRunner(stream=stream)
+ result = self.run_test_runner(runner, test)
+ lines = stream.getvalue().splitlines()
+ self.assertContainsRe(stream.getvalue(),
+ '(?sm)^bzr selftest.*$'
+ '.*'
+ '^======================================================================\n'
+ '^FAIL: failing_test\n'
+ '^----------------------------------------------------------------------\n'
+ 'Traceback \\(most recent call last\\):\n'
+ ' .*' # File .*, line .*, in failing_test' - but maybe not from .pyc
+ ' raise AssertionError\\(\'foo\'\\)\n'
+ '.*'
+ '^----------------------------------------------------------------------\n'
+ '.*'
+ 'FAILED \\(failures=1, known_failure_count=1\\)'
+ )
+
+ def test_known_failure_ok_run(self):
+ # run a test that generates a known failure which should be printed in
+ # the final output.
+ class Test(tests.TestCase):
+ def known_failure_test(self):
+ self.knownFailure("Never works...")
+ test = Test("known_failure_test")
+ stream = StringIO()
+ runner = tests.TextTestRunner(stream=stream)
+ result = self.run_test_runner(runner, test)
+ self.assertContainsRe(stream.getvalue(),
+ '\n'
+ '-*\n'
+ 'Ran 1 test in .*\n'
+ '\n'
+ 'OK \\(known_failures=1\\)\n')
+
+ def test_unexpected_success_bad(self):
+ class Test(tests.TestCase):
+ def test_truth(self):
+ self.expectFailure("No absolute truth", self.assertTrue, True)
+ runner = tests.TextTestRunner(stream=StringIO())
+ result = self.run_test_runner(runner, Test("test_truth"))
+ self.assertContainsRe(runner.stream.getvalue(),
+ "=+\n"
+ "FAIL: \\S+\.test_truth\n"
+ "-+\n"
+ "(?:.*\n)*"
+ "\\s*(?:Text attachment: )?reason"
+ "(?:\n-+\n|: {{{)"
+ "No absolute truth"
+ "(?:\n-+\n|}}}\n)"
+ "(?:.*\n)*"
+ "-+\n"
+ "Ran 1 test in .*\n"
+ "\n"
+ "FAILED \\(failures=1\\)\n\\Z")
+
+ def test_result_decorator(self):
+ # decorate results
+ calls = []
+ class LoggingDecorator(ExtendedToOriginalDecorator):
+ def startTest(self, test):
+ ExtendedToOriginalDecorator.startTest(self, test)
+ calls.append('start')
+ test = unittest.FunctionTestCase(lambda:None)
+ stream = StringIO()
+ runner = tests.TextTestRunner(stream=stream,
+ result_decorators=[LoggingDecorator])
+ result = self.run_test_runner(runner, test)
+ self.assertLength(1, calls)
+
+ def test_skipped_test(self):
+ # run a test that is skipped, and check the suite as a whole still
+ # succeeds.
+ # skipping_test must be hidden in here so it's not run as a real test
+ class SkippingTest(tests.TestCase):
+ def skipping_test(self):
+ raise tests.TestSkipped('test intentionally skipped')
+ runner = tests.TextTestRunner(stream=self._log_file)
+ test = SkippingTest("skipping_test")
+ result = self.run_test_runner(runner, test)
+ self.assertTrue(result.wasSuccessful())
+
+ def test_skipped_from_setup(self):
+ calls = []
+ class SkippedSetupTest(tests.TestCase):
+
+ def setUp(self):
+ calls.append('setUp')
+ self.addCleanup(self.cleanup)
+ raise tests.TestSkipped('skipped setup')
+
+ def test_skip(self):
+ self.fail('test reached')
+
+ def cleanup(self):
+ calls.append('cleanup')
+
+ runner = tests.TextTestRunner(stream=self._log_file)
+ test = SkippedSetupTest('test_skip')
+ result = self.run_test_runner(runner, test)
+ self.assertTrue(result.wasSuccessful())
+ # Check if cleanup was called the right number of times.
+ self.assertEqual(['setUp', 'cleanup'], calls)
+
+ def test_skipped_from_test(self):
+ calls = []
+ class SkippedTest(tests.TestCase):
+
+ def setUp(self):
+ tests.TestCase.setUp(self)
+ calls.append('setUp')
+ self.addCleanup(self.cleanup)
+
+ def test_skip(self):
+ raise tests.TestSkipped('skipped test')
+
+ def cleanup(self):
+ calls.append('cleanup')
+
+ runner = tests.TextTestRunner(stream=self._log_file)
+ test = SkippedTest('test_skip')
+ result = self.run_test_runner(runner, test)
+ self.assertTrue(result.wasSuccessful())
+ # Check if cleanup was called the right number of times.
+ self.assertEqual(['setUp', 'cleanup'], calls)
+
+ def test_not_applicable(self):
+ # run a test that is skipped because it's not applicable
+ class Test(tests.TestCase):
+ def not_applicable_test(self):
+ raise tests.TestNotApplicable('this test never runs')
+ out = StringIO()
+ runner = tests.TextTestRunner(stream=out, verbosity=2)
+ test = Test("not_applicable_test")
+ result = self.run_test_runner(runner, test)
+ self._log_file.write(out.getvalue())
+ self.assertTrue(result.wasSuccessful())
+ self.assertTrue(result.wasStrictlySuccessful())
+ self.assertContainsRe(out.getvalue(),
+ r'(?m)not_applicable_test * N/A')
+ self.assertContainsRe(out.getvalue(),
+ r'(?m)^ this test never runs')
+
+ def test_unsupported_features_listed(self):
+ """When unsupported features are encountered they are detailed."""
+ class Feature1(features.Feature):
+ def _probe(self): return False
+ class Feature2(features.Feature):
+ def _probe(self): return False
+ # create sample tests
+ test1 = SampleTestCase('_test_pass')
+ test1._test_needs_features = [Feature1()]
+ test2 = SampleTestCase('_test_pass')
+ test2._test_needs_features = [Feature2()]
+ test = unittest.TestSuite()
+ test.addTest(test1)
+ test.addTest(test2)
+ stream = StringIO()
+ runner = tests.TextTestRunner(stream=stream)
+ result = self.run_test_runner(runner, test)
+ lines = stream.getvalue().splitlines()
+ self.assertEqual([
+ 'OK',
+ "Missing feature 'Feature1' skipped 1 tests.",
+ "Missing feature 'Feature2' skipped 1 tests.",
+ ],
+ lines[-3:])
+
+ def test_verbose_test_count(self):
+ """A verbose test run reports the right test count at the start"""
+ suite = TestUtil.TestSuite([
+ unittest.FunctionTestCase(lambda:None),
+ unittest.FunctionTestCase(lambda:None)])
+ self.assertEqual(suite.countTestCases(), 2)
+ stream = StringIO()
+ runner = tests.TextTestRunner(stream=stream, verbosity=2)
+ # Need to use the CountingDecorator as that's what sets num_tests
+ result = self.run_test_runner(runner, tests.CountingDecorator(suite))
+ self.assertStartsWith(stream.getvalue(), "running 2 tests")
+
+ def test_startTestRun(self):
+ """run should call result.startTestRun()"""
+ calls = []
+ class LoggingDecorator(ExtendedToOriginalDecorator):
+ def startTestRun(self):
+ ExtendedToOriginalDecorator.startTestRun(self)
+ calls.append('startTestRun')
+ test = unittest.FunctionTestCase(lambda:None)
+ stream = StringIO()
+ runner = tests.TextTestRunner(stream=stream,
+ result_decorators=[LoggingDecorator])
+ result = self.run_test_runner(runner, test)
+ self.assertLength(1, calls)
+
+ def test_stopTestRun(self):
+ """run should call result.stopTestRun()"""
+ calls = []
+ class LoggingDecorator(ExtendedToOriginalDecorator):
+ def stopTestRun(self):
+ ExtendedToOriginalDecorator.stopTestRun(self)
+ calls.append('stopTestRun')
+ test = unittest.FunctionTestCase(lambda:None)
+ stream = StringIO()
+ runner = tests.TextTestRunner(stream=stream,
+ result_decorators=[LoggingDecorator])
+ result = self.run_test_runner(runner, test)
+ self.assertLength(1, calls)
+
+ def test_unicode_test_output_on_ascii_stream(self):
+ """Showing results should always succeed even on an ascii console"""
+ class FailureWithUnicode(tests.TestCase):
+ def test_log_unicode(self):
+ self.log(u"\u2606")
+ self.fail("Now print that log!")
+ out = StringIO()
+ self.overrideAttr(osutils, "get_terminal_encoding",
+ lambda trace=False: "ascii")
+ result = self.run_test_runner(tests.TextTestRunner(stream=out),
+ FailureWithUnicode("test_log_unicode"))
+ self.assertContainsRe(out.getvalue(),
+ "(?:Text attachment: )?log"
+ "(?:\n-+\n|: {{{)"
+ "\d+\.\d+ \\\\u2606"
+ "(?:\n-+\n|}}}\n)")
+
+
+class SampleTestCase(tests.TestCase):
+
+ def _test_pass(self):
+ pass
+
+class _TestException(Exception):
+ pass
+
+
+class TestTestCase(tests.TestCase):
+ """Tests that test the core bzrlib TestCase."""
+
+ def test_assertLength_matches_empty(self):
+ a_list = []
+ self.assertLength(0, a_list)
+
+ def test_assertLength_matches_nonempty(self):
+ a_list = [1, 2, 3]
+ self.assertLength(3, a_list)
+
+ def test_assertLength_fails_different(self):
+ a_list = []
+ self.assertRaises(AssertionError, self.assertLength, 1, a_list)
+
+ def test_assertLength_shows_sequence_in_failure(self):
+ a_list = [1, 2, 3]
+ exception = self.assertRaises(AssertionError, self.assertLength, 2,
+ a_list)
+ self.assertEqual('Incorrect length: wanted 2, got 3 for [1, 2, 3]',
+ exception.args[0])
+
+ def test_base_setUp_not_called_causes_failure(self):
+ class TestCaseWithBrokenSetUp(tests.TestCase):
+ def setUp(self):
+ pass # does not call TestCase.setUp
+ def test_foo(self):
+ pass
+ test = TestCaseWithBrokenSetUp('test_foo')
+ result = unittest.TestResult()
+ test.run(result)
+ self.assertFalse(result.wasSuccessful())
+ self.assertEqual(1, result.testsRun)
+
+ def test_base_tearDown_not_called_causes_failure(self):
+ class TestCaseWithBrokenTearDown(tests.TestCase):
+ def tearDown(self):
+ pass # does not call TestCase.tearDown
+ def test_foo(self):
+ pass
+ test = TestCaseWithBrokenTearDown('test_foo')
+ result = unittest.TestResult()
+ test.run(result)
+ self.assertFalse(result.wasSuccessful())
+ self.assertEqual(1, result.testsRun)
+
+ def test_debug_flags_sanitised(self):
+ """The bzrlib debug flags should be sanitised by setUp."""
+ if 'allow_debug' in tests.selftest_debug_flags:
+ raise tests.TestNotApplicable(
+ '-Eallow_debug option prevents debug flag sanitisation')
+ # we could set something and run a test that will check
+ # it gets santised, but this is probably sufficient for now:
+ # if someone runs the test with -Dsomething it will error.
+ flags = set()
+ if self._lock_check_thorough:
+ flags.add('strict_locks')
+ self.assertEqual(flags, bzrlib.debug.debug_flags)
+
+ def change_selftest_debug_flags(self, new_flags):
+ self.overrideAttr(tests, 'selftest_debug_flags', set(new_flags))
+
+ def test_allow_debug_flag(self):
+ """The -Eallow_debug flag prevents bzrlib.debug.debug_flags from being
+ sanitised (i.e. cleared) before running a test.
+ """
+ self.change_selftest_debug_flags(set(['allow_debug']))
+ bzrlib.debug.debug_flags = set(['a-flag'])
+ class TestThatRecordsFlags(tests.TestCase):
+ def test_foo(nested_self):
+ self.flags = set(bzrlib.debug.debug_flags)
+ test = TestThatRecordsFlags('test_foo')
+ test.run(self.make_test_result())
+ flags = set(['a-flag'])
+ if 'disable_lock_checks' not in tests.selftest_debug_flags:
+ flags.add('strict_locks')
+ self.assertEqual(flags, self.flags)
+
+ def test_disable_lock_checks(self):
+ """The -Edisable_lock_checks flag disables thorough checks."""
+ class TestThatRecordsFlags(tests.TestCase):
+ def test_foo(nested_self):
+ self.flags = set(bzrlib.debug.debug_flags)
+ self.test_lock_check_thorough = nested_self._lock_check_thorough
+ self.change_selftest_debug_flags(set())
+ test = TestThatRecordsFlags('test_foo')
+ test.run(self.make_test_result())
+ # By default we do strict lock checking and thorough lock/unlock
+ # tracking.
+ self.assertTrue(self.test_lock_check_thorough)
+ self.assertEqual(set(['strict_locks']), self.flags)
+ # Now set the disable_lock_checks flag, and show that this changed.
+ self.change_selftest_debug_flags(set(['disable_lock_checks']))
+ test = TestThatRecordsFlags('test_foo')
+ test.run(self.make_test_result())
+ self.assertFalse(self.test_lock_check_thorough)
+ self.assertEqual(set(), self.flags)
+
+ def test_this_fails_strict_lock_check(self):
+ class TestThatRecordsFlags(tests.TestCase):
+ def test_foo(nested_self):
+ self.flags1 = set(bzrlib.debug.debug_flags)
+ self.thisFailsStrictLockCheck()
+ self.flags2 = set(bzrlib.debug.debug_flags)
+ # Make sure lock checking is active
+ self.change_selftest_debug_flags(set())
+ test = TestThatRecordsFlags('test_foo')
+ test.run(self.make_test_result())
+ self.assertEqual(set(['strict_locks']), self.flags1)
+ self.assertEqual(set(), self.flags2)
+
+ def test_debug_flags_restored(self):
+ """The bzrlib debug flags should be restored to their original state
+ after the test was run, even if allow_debug is set.
+ """
+ self.change_selftest_debug_flags(set(['allow_debug']))
+ # Now run a test that modifies debug.debug_flags.
+ bzrlib.debug.debug_flags = set(['original-state'])
+ class TestThatModifiesFlags(tests.TestCase):
+ def test_foo(self):
+ bzrlib.debug.debug_flags = set(['modified'])
+ test = TestThatModifiesFlags('test_foo')
+ test.run(self.make_test_result())
+ self.assertEqual(set(['original-state']), bzrlib.debug.debug_flags)
+
+ def make_test_result(self):
+ """Get a test result that writes to the test log file."""
+ return tests.TextTestResult(self._log_file, descriptions=0, verbosity=1)
+
+ def inner_test(self):
+ # the inner child test
+ note("inner_test")
+
+ def outer_child(self):
+ # the outer child test
+ note("outer_start")
+ self.inner_test = TestTestCase("inner_child")
+ result = self.make_test_result()
+ self.inner_test.run(result)
+ note("outer finish")
+ self.addCleanup(osutils.delete_any, self._log_file_name)
+
+ def test_trace_nesting(self):
+ # this tests that each test case nests its trace facility correctly.
+ # we do this by running a test case manually. That test case (A)
+ # should setup a new log, log content to it, setup a child case (B),
+ # which should log independently, then case (A) should log a trailer
+ # and return.
+ # we do two nested children so that we can verify the state of the
+ # logs after the outer child finishes is correct, which a bad clean
+ # up routine in tearDown might trigger a fault in our test with only
+ # one child, we should instead see the bad result inside our test with
+ # the two children.
+ # the outer child test
+ original_trace = bzrlib.trace._trace_file
+ outer_test = TestTestCase("outer_child")
+ result = self.make_test_result()
+ outer_test.run(result)
+ self.assertEqual(original_trace, bzrlib.trace._trace_file)
+
+ def method_that_times_a_bit_twice(self):
+ # call self.time twice to ensure it aggregates
+ self.time(time.sleep, 0.007)
+ self.time(time.sleep, 0.007)
+
+ def test_time_creates_benchmark_in_result(self):
+ """Test that the TestCase.time() method accumulates a benchmark time."""
+ sample_test = TestTestCase("method_that_times_a_bit_twice")
+ output_stream = StringIO()
+ result = bzrlib.tests.VerboseTestResult(
+ output_stream,
+ descriptions=0,
+ verbosity=2)
+ sample_test.run(result)
+ self.assertContainsRe(
+ output_stream.getvalue(),
+ r"\d+ms\*\n$")
+
+ def test_hooks_sanitised(self):
+ """The bzrlib hooks should be sanitised by setUp."""
+ # Note this test won't fail with hooks that the core library doesn't
+ # use - but it trigger with a plugin that adds hooks, so its still a
+ # useful warning in that case.
+ self.assertEqual(bzrlib.branch.BranchHooks(), bzrlib.branch.Branch.hooks)
+ self.assertEqual(
+ bzrlib.smart.server.SmartServerHooks(),
+ bzrlib.smart.server.SmartTCPServer.hooks)
+ self.assertEqual(
+ bzrlib.commands.CommandHooks(), bzrlib.commands.Command.hooks)
+
+ def test__gather_lsprof_in_benchmarks(self):
+ """When _gather_lsprof_in_benchmarks is on, accumulate profile data.
+
+ Each self.time() call is individually and separately profiled.
+ """
+ self.requireFeature(features.lsprof_feature)
+ # overrides the class member with an instance member so no cleanup
+ # needed.
+ self._gather_lsprof_in_benchmarks = True
+ self.time(time.sleep, 0.000)
+ self.time(time.sleep, 0.003)
+ self.assertEqual(2, len(self._benchcalls))
+ self.assertEqual((time.sleep, (0.000,), {}), self._benchcalls[0][0])
+ self.assertEqual((time.sleep, (0.003,), {}), self._benchcalls[1][0])
+ self.assertIsInstance(self._benchcalls[0][1], bzrlib.lsprof.Stats)
+ self.assertIsInstance(self._benchcalls[1][1], bzrlib.lsprof.Stats)
+ del self._benchcalls[:]
+
+ def test_knownFailure(self):
+ """Self.knownFailure() should raise a KnownFailure exception."""
+ self.assertRaises(tests.KnownFailure, self.knownFailure, "A Failure")
+
+ def test_open_bzrdir_safe_roots(self):
+ # even a memory transport should fail to open when its url isn't
+ # permitted.
+ # Manually set one up (TestCase doesn't and shouldn't provide magic
+ # machinery)
+ transport_server = memory.MemoryServer()
+ transport_server.start_server()
+ self.addCleanup(transport_server.stop_server)
+ t = transport.get_transport_from_url(transport_server.get_url())
+ controldir.ControlDir.create(t.base)
+ self.assertRaises(errors.BzrError,
+ controldir.ControlDir.open_from_transport, t)
+ # But if we declare this as safe, we can open the bzrdir.
+ self.permit_url(t.base)
+ self._bzr_selftest_roots.append(t.base)
+ controldir.ControlDir.open_from_transport(t)
+
+ def test_requireFeature_available(self):
+ """self.requireFeature(available) is a no-op."""
+ class Available(features.Feature):
+ def _probe(self):return True
+ feature = Available()
+ self.requireFeature(feature)
+
+ def test_requireFeature_unavailable(self):
+ """self.requireFeature(unavailable) raises UnavailableFeature."""
+ class Unavailable(features.Feature):
+ def _probe(self):return False
+ feature = Unavailable()
+ self.assertRaises(tests.UnavailableFeature,
+ self.requireFeature, feature)
+
+ def test_run_no_parameters(self):
+ test = SampleTestCase('_test_pass')
+ test.run()
+
+ def test_run_enabled_unittest_result(self):
+ """Test we revert to regular behaviour when the test is enabled."""
+ test = SampleTestCase('_test_pass')
+ class EnabledFeature(object):
+ def available(self):
+ return True
+ test._test_needs_features = [EnabledFeature()]
+ result = unittest.TestResult()
+ test.run(result)
+ self.assertEqual(1, result.testsRun)
+ self.assertEqual([], result.errors)
+ self.assertEqual([], result.failures)
+
+ def test_run_disabled_unittest_result(self):
+ """Test our compatability for disabled tests with unittest results."""
+ test = SampleTestCase('_test_pass')
+ class DisabledFeature(object):
+ def available(self):
+ return False
+ test._test_needs_features = [DisabledFeature()]
+ result = unittest.TestResult()
+ test.run(result)
+ self.assertEqual(1, result.testsRun)
+ self.assertEqual([], result.errors)
+ self.assertEqual([], result.failures)
+
+ def test_run_disabled_supporting_result(self):
+ """Test disabled tests behaviour with support aware results."""
+ test = SampleTestCase('_test_pass')
+ class DisabledFeature(object):
+ def __eq__(self, other):
+ return isinstance(other, DisabledFeature)
+ def available(self):
+ return False
+ the_feature = DisabledFeature()
+ test._test_needs_features = [the_feature]
+ class InstrumentedTestResult(unittest.TestResult):
+ def __init__(self):
+ unittest.TestResult.__init__(self)
+ self.calls = []
+ def startTest(self, test):
+ self.calls.append(('startTest', test))
+ def stopTest(self, test):
+ self.calls.append(('stopTest', test))
+ def addNotSupported(self, test, feature):
+ self.calls.append(('addNotSupported', test, feature))
+ result = InstrumentedTestResult()
+ test.run(result)
+ case = result.calls[0][1]
+ self.assertEqual([
+ ('startTest', case),
+ ('addNotSupported', case, the_feature),
+ ('stopTest', case),
+ ],
+ result.calls)
+
+ def test_start_server_registers_url(self):
+ transport_server = memory.MemoryServer()
+ # A little strict, but unlikely to be changed soon.
+ self.assertEqual([], self._bzr_selftest_roots)
+ self.start_server(transport_server)
+ self.assertSubset([transport_server.get_url()],
+ self._bzr_selftest_roots)
+
+ def test_assert_list_raises_on_generator(self):
+ def generator_which_will_raise():
+ # This will not raise until after the first yield
+ yield 1
+ raise _TestException()
+
+ e = self.assertListRaises(_TestException, generator_which_will_raise)
+ self.assertIsInstance(e, _TestException)
+
+ e = self.assertListRaises(Exception, generator_which_will_raise)
+ self.assertIsInstance(e, _TestException)
+
+ def test_assert_list_raises_on_plain(self):
+ def plain_exception():
+ raise _TestException()
+ return []
+
+ e = self.assertListRaises(_TestException, plain_exception)
+ self.assertIsInstance(e, _TestException)
+
+ e = self.assertListRaises(Exception, plain_exception)
+ self.assertIsInstance(e, _TestException)
+
+ def test_assert_list_raises_assert_wrong_exception(self):
+ class _NotTestException(Exception):
+ pass
+
+ def wrong_exception():
+ raise _NotTestException()
+
+ def wrong_exception_generator():
+ yield 1
+ yield 2
+ raise _NotTestException()
+
+ # Wrong exceptions are not intercepted
+ self.assertRaises(_NotTestException,
+ self.assertListRaises, _TestException, wrong_exception)
+ self.assertRaises(_NotTestException,
+ self.assertListRaises, _TestException, wrong_exception_generator)
+
+ def test_assert_list_raises_no_exception(self):
+ def success():
+ return []
+
+ def success_generator():
+ yield 1
+ yield 2
+
+ self.assertRaises(AssertionError,
+ self.assertListRaises, _TestException, success)
+
+ self.assertRaises(AssertionError,
+ self.assertListRaises, _TestException, success_generator)
+
+ def test_overrideAttr_without_value(self):
+ self.test_attr = 'original' # Define a test attribute
+ obj = self # Make 'obj' visible to the embedded test
+ class Test(tests.TestCase):
+
+ def setUp(self):
+ tests.TestCase.setUp(self)
+ self.orig = self.overrideAttr(obj, 'test_attr')
+
+ def test_value(self):
+ self.assertEqual('original', self.orig)
+ self.assertEqual('original', obj.test_attr)
+ obj.test_attr = 'modified'
+ self.assertEqual('modified', obj.test_attr)
+
+ test = Test('test_value')
+ test.run(unittest.TestResult())
+ self.assertEqual('original', obj.test_attr)
+
+ def test_overrideAttr_with_value(self):
+ self.test_attr = 'original' # Define a test attribute
+ obj = self # Make 'obj' visible to the embedded test
+ class Test(tests.TestCase):
+
+ def setUp(self):
+ tests.TestCase.setUp(self)
+ self.orig = self.overrideAttr(obj, 'test_attr', new='modified')
+
+ def test_value(self):
+ self.assertEqual('original', self.orig)
+ self.assertEqual('modified', obj.test_attr)
+
+ test = Test('test_value')
+ test.run(unittest.TestResult())
+ self.assertEqual('original', obj.test_attr)
+
+ def test_recordCalls(self):
+ from bzrlib.tests import test_selftest
+ calls = self.recordCalls(
+ test_selftest, '_add_numbers')
+ self.assertEqual(test_selftest._add_numbers(2, 10),
+ 12)
+ self.assertEquals(calls, [((2, 10), {})])
+
+
+def _add_numbers(a, b):
+ return a + b
+
+
+class _MissingFeature(features.Feature):
+ def _probe(self):
+ return False
+missing_feature = _MissingFeature()
+
+
+def _get_test(name):
+ """Get an instance of a specific example test.
+
+ We protect this in a function so that they don't auto-run in the test
+ suite.
+ """
+
+ class ExampleTests(tests.TestCase):
+
+ def test_fail(self):
+ mutter('this was a failing test')
+ self.fail('this test will fail')
+
+ def test_error(self):
+ mutter('this test errored')
+ raise RuntimeError('gotcha')
+
+ def test_missing_feature(self):
+ mutter('missing the feature')
+ self.requireFeature(missing_feature)
+
+ def test_skip(self):
+ mutter('this test will be skipped')
+ raise tests.TestSkipped('reason')
+
+ def test_success(self):
+ mutter('this test succeeds')
+
+ def test_xfail(self):
+ mutter('test with expected failure')
+ self.knownFailure('this_fails')
+
+ def test_unexpected_success(self):
+ mutter('test with unexpected success')
+ self.expectFailure('should_fail', lambda: None)
+
+ return ExampleTests(name)
+
+
+class TestTestCaseLogDetails(tests.TestCase):
+
+ def _run_test(self, test_name):
+ test = _get_test(test_name)
+ result = testtools.TestResult()
+ test.run(result)
+ return result
+
+ def test_fail_has_log(self):
+ result = self._run_test('test_fail')
+ self.assertEqual(1, len(result.failures))
+ result_content = result.failures[0][1]
+ self.assertContainsRe(result_content,
+ '(?m)^(?:Text attachment: )?log(?:$|: )')
+ self.assertContainsRe(result_content, 'this was a failing test')
+
+ def test_error_has_log(self):
+ result = self._run_test('test_error')
+ self.assertEqual(1, len(result.errors))
+ result_content = result.errors[0][1]
+ self.assertContainsRe(result_content,
+ '(?m)^(?:Text attachment: )?log(?:$|: )')
+ self.assertContainsRe(result_content, 'this test errored')
+
+ def test_skip_has_no_log(self):
+ result = self._run_test('test_skip')
+ self.assertEqual(['reason'], result.skip_reasons.keys())
+ skips = result.skip_reasons['reason']
+ self.assertEqual(1, len(skips))
+ test = skips[0]
+ self.assertFalse('log' in test.getDetails())
+
+ def test_missing_feature_has_no_log(self):
+ # testtools doesn't know about addNotSupported, so it just gets
+ # considered as a skip
+ result = self._run_test('test_missing_feature')
+ self.assertEqual([missing_feature], result.skip_reasons.keys())
+ skips = result.skip_reasons[missing_feature]
+ self.assertEqual(1, len(skips))
+ test = skips[0]
+ self.assertFalse('log' in test.getDetails())
+
+ def test_xfail_has_no_log(self):
+ result = self._run_test('test_xfail')
+ self.assertEqual(1, len(result.expectedFailures))
+ result_content = result.expectedFailures[0][1]
+ self.assertNotContainsRe(result_content,
+ '(?m)^(?:Text attachment: )?log(?:$|: )')
+ self.assertNotContainsRe(result_content, 'test with expected failure')
+
+ def test_unexpected_success_has_log(self):
+ result = self._run_test('test_unexpected_success')
+ self.assertEqual(1, len(result.unexpectedSuccesses))
+ # Inconsistency, unexpectedSuccesses is a list of tests,
+ # expectedFailures is a list of reasons?
+ test = result.unexpectedSuccesses[0]
+ details = test.getDetails()
+ self.assertTrue('log' in details)
+
+
+class TestTestCloning(tests.TestCase):
+ """Tests that test cloning of TestCases (as used by multiply_tests)."""
+
+ def test_cloned_testcase_does_not_share_details(self):
+ """A TestCase cloned with clone_test does not share mutable attributes
+ such as details or cleanups.
+ """
+ class Test(tests.TestCase):
+ def test_foo(self):
+ self.addDetail('foo', Content('text/plain', lambda: 'foo'))
+ orig_test = Test('test_foo')
+ cloned_test = tests.clone_test(orig_test, orig_test.id() + '(cloned)')
+ orig_test.run(unittest.TestResult())
+ self.assertEqual('foo', orig_test.getDetails()['foo'].iter_bytes())
+ self.assertEqual(None, cloned_test.getDetails().get('foo'))
+
+ def test_double_apply_scenario_preserves_first_scenario(self):
+ """Applying two levels of scenarios to a test preserves the attributes
+ added by both scenarios.
+ """
+ class Test(tests.TestCase):
+ def test_foo(self):
+ pass
+ test = Test('test_foo')
+ scenarios_x = [('x=1', {'x': 1}), ('x=2', {'x': 2})]
+ scenarios_y = [('y=1', {'y': 1}), ('y=2', {'y': 2})]
+ suite = tests.multiply_tests(test, scenarios_x, unittest.TestSuite())
+ suite = tests.multiply_tests(suite, scenarios_y, unittest.TestSuite())
+ all_tests = list(tests.iter_suite_tests(suite))
+ self.assertLength(4, all_tests)
+ all_xys = sorted((t.x, t.y) for t in all_tests)
+ self.assertEqual([(1, 1), (1, 2), (2, 1), (2, 2)], all_xys)
+
+
+# NB: Don't delete this; it's not actually from 0.11!
+@deprecated_function(deprecated_in((0, 11, 0)))
+def sample_deprecated_function():
+ """A deprecated function to test applyDeprecated with."""
+ return 2
+
+
+def sample_undeprecated_function(a_param):
+ """A undeprecated function to test applyDeprecated with."""
+
+
+class ApplyDeprecatedHelper(object):
+ """A helper class for ApplyDeprecated tests."""
+
+ @deprecated_method(deprecated_in((0, 11, 0)))
+ def sample_deprecated_method(self, param_one):
+ """A deprecated method for testing with."""
+ return param_one
+
+ def sample_normal_method(self):
+ """A undeprecated method."""
+
+ @deprecated_method(deprecated_in((0, 10, 0)))
+ def sample_nested_deprecation(self):
+ return sample_deprecated_function()
+
+
+class TestExtraAssertions(tests.TestCase):
+ """Tests for new test assertions in bzrlib test suite"""
+
+ def test_assert_isinstance(self):
+ self.assertIsInstance(2, int)
+ self.assertIsInstance(u'', basestring)
+ e = self.assertRaises(AssertionError, self.assertIsInstance, None, int)
+ self.assertEquals(str(e),
+ "None is an instance of <type 'NoneType'> rather than <type 'int'>")
+ self.assertRaises(AssertionError, self.assertIsInstance, 23.3, int)
+ e = self.assertRaises(AssertionError,
+ self.assertIsInstance, None, int, "it's just not")
+ self.assertEquals(str(e),
+ "None is an instance of <type 'NoneType'> rather than <type 'int'>"
+ ": it's just not")
+
+ def test_assertEndsWith(self):
+ self.assertEndsWith('foo', 'oo')
+ self.assertRaises(AssertionError, self.assertEndsWith, 'o', 'oo')
+
+ def test_assertEqualDiff(self):
+ e = self.assertRaises(AssertionError,
+ self.assertEqualDiff, '', '\n')
+ self.assertEquals(str(e),
+ # Don't blink ! The '+' applies to the second string
+ 'first string is missing a final newline.\n+ \n')
+ e = self.assertRaises(AssertionError,
+ self.assertEqualDiff, '\n', '')
+ self.assertEquals(str(e),
+ # Don't blink ! The '-' applies to the second string
+ 'second string is missing a final newline.\n- \n')
+
+
+class TestDeprecations(tests.TestCase):
+
+ def test_applyDeprecated_not_deprecated(self):
+ sample_object = ApplyDeprecatedHelper()
+ # calling an undeprecated callable raises an assertion
+ self.assertRaises(AssertionError, self.applyDeprecated,
+ deprecated_in((0, 11, 0)),
+ sample_object.sample_normal_method)
+ self.assertRaises(AssertionError, self.applyDeprecated,
+ deprecated_in((0, 11, 0)),
+ sample_undeprecated_function, "a param value")
+ # calling a deprecated callable (function or method) with the wrong
+ # expected deprecation fails.
+ self.assertRaises(AssertionError, self.applyDeprecated,
+ deprecated_in((0, 10, 0)),
+ sample_object.sample_deprecated_method, "a param value")
+ self.assertRaises(AssertionError, self.applyDeprecated,
+ deprecated_in((0, 10, 0)),
+ sample_deprecated_function)
+ # calling a deprecated callable (function or method) with the right
+ # expected deprecation returns the functions result.
+ self.assertEqual("a param value",
+ self.applyDeprecated(deprecated_in((0, 11, 0)),
+ sample_object.sample_deprecated_method, "a param value"))
+ self.assertEqual(2, self.applyDeprecated(deprecated_in((0, 11, 0)),
+ sample_deprecated_function))
+ # calling a nested deprecation with the wrong deprecation version
+ # fails even if a deeper nested function was deprecated with the
+ # supplied version.
+ self.assertRaises(AssertionError, self.applyDeprecated,
+ deprecated_in((0, 11, 0)), sample_object.sample_nested_deprecation)
+ # calling a nested deprecation with the right deprecation value
+ # returns the calls result.
+ self.assertEqual(2, self.applyDeprecated(deprecated_in((0, 10, 0)),
+ sample_object.sample_nested_deprecation))
+
+ def test_callDeprecated(self):
+ def testfunc(be_deprecated, result=None):
+ if be_deprecated is True:
+ symbol_versioning.warn('i am deprecated', DeprecationWarning,
+ stacklevel=1)
+ return result
+ result = self.callDeprecated(['i am deprecated'], testfunc, True)
+ self.assertIs(None, result)
+ result = self.callDeprecated([], testfunc, False, 'result')
+ self.assertEqual('result', result)
+ self.callDeprecated(['i am deprecated'], testfunc, be_deprecated=True)
+ self.callDeprecated([], testfunc, be_deprecated=False)
+
+
+class TestWarningTests(tests.TestCase):
+ """Tests for calling methods that raise warnings."""
+
+ def test_callCatchWarnings(self):
+ def meth(a, b):
+ warnings.warn("this is your last warning")
+ return a + b
+ wlist, result = self.callCatchWarnings(meth, 1, 2)
+ self.assertEquals(3, result)
+ # would like just to compare them, but UserWarning doesn't implement
+ # eq well
+ w0, = wlist
+ self.assertIsInstance(w0, UserWarning)
+ self.assertEquals("this is your last warning", str(w0))
+
+
+class TestConvenienceMakers(tests.TestCaseWithTransport):
+ """Test for the make_* convenience functions."""
+
+ def test_make_branch_and_tree_with_format(self):
+ # we should be able to supply a format to make_branch_and_tree
+ self.make_branch_and_tree('a', format=bzrlib.bzrdir.BzrDirMetaFormat1())
+ self.assertIsInstance(bzrlib.controldir.ControlDir.open('a')._format,
+ bzrlib.bzrdir.BzrDirMetaFormat1)
+
+ def test_make_branch_and_memory_tree(self):
+ # we should be able to get a new branch and a mutable tree from
+ # TestCaseWithTransport
+ tree = self.make_branch_and_memory_tree('a')
+ self.assertIsInstance(tree, bzrlib.memorytree.MemoryTree)
+
+ def test_make_tree_for_local_vfs_backed_transport(self):
+ # make_branch_and_tree has to use local branch and repositories
+ # when the vfs transport and local disk are colocated, even if
+ # a different transport is in use for url generation.
+ self.transport_server = test_server.FakeVFATServer
+ self.assertFalse(self.get_url('t1').startswith('file://'))
+ tree = self.make_branch_and_tree('t1')
+ base = tree.bzrdir.root_transport.base
+ self.assertStartsWith(base, 'file://')
+ self.assertEquals(tree.bzrdir.root_transport,
+ tree.branch.bzrdir.root_transport)
+ self.assertEquals(tree.bzrdir.root_transport,
+ tree.branch.repository.bzrdir.root_transport)
+
+
+class SelfTestHelper(object):
+
+ def run_selftest(self, **kwargs):
+ """Run selftest returning its output."""
+ output = StringIO()
+ old_transport = bzrlib.tests.default_transport
+ old_root = tests.TestCaseWithMemoryTransport.TEST_ROOT
+ tests.TestCaseWithMemoryTransport.TEST_ROOT = None
+ try:
+ self.assertEqual(True, tests.selftest(stream=output, **kwargs))
+ finally:
+ bzrlib.tests.default_transport = old_transport
+ tests.TestCaseWithMemoryTransport.TEST_ROOT = old_root
+ output.seek(0)
+ return output
+
+
+class TestSelftest(tests.TestCase, SelfTestHelper):
+ """Tests of bzrlib.tests.selftest."""
+
+ def test_selftest_benchmark_parameter_invokes_test_suite__benchmark__(self):
+ factory_called = []
+ def factory():
+ factory_called.append(True)
+ return TestUtil.TestSuite()
+ out = StringIO()
+ err = StringIO()
+ self.apply_redirected(out, err, None, bzrlib.tests.selftest,
+ test_suite_factory=factory)
+ self.assertEqual([True], factory_called)
+
+ def factory(self):
+ """A test suite factory."""
+ class Test(tests.TestCase):
+ def a(self):
+ pass
+ def b(self):
+ pass
+ def c(self):
+ pass
+ return TestUtil.TestSuite([Test("a"), Test("b"), Test("c")])
+
+ def test_list_only(self):
+ output = self.run_selftest(test_suite_factory=self.factory,
+ list_only=True)
+ self.assertEqual(3, len(output.readlines()))
+
+ def test_list_only_filtered(self):
+ output = self.run_selftest(test_suite_factory=self.factory,
+ list_only=True, pattern="Test.b")
+ self.assertEndsWith(output.getvalue(), "Test.b\n")
+ self.assertLength(1, output.readlines())
+
+ def test_list_only_excludes(self):
+ output = self.run_selftest(test_suite_factory=self.factory,
+ list_only=True, exclude_pattern="Test.b")
+ self.assertNotContainsRe("Test.b", output.getvalue())
+ self.assertLength(2, output.readlines())
+
+ def test_lsprof_tests(self):
+ self.requireFeature(features.lsprof_feature)
+ results = []
+ class Test(object):
+ def __call__(test, result):
+ test.run(result)
+ def run(test, result):
+ results.append(result)
+ def countTestCases(self):
+ return 1
+ self.run_selftest(test_suite_factory=Test, lsprof_tests=True)
+ self.assertLength(1, results)
+ self.assertIsInstance(results.pop(), ExtendedToOriginalDecorator)
+
+ def test_random(self):
+ # test randomising by listing a number of tests.
+ output_123 = self.run_selftest(test_suite_factory=self.factory,
+ list_only=True, random_seed="123")
+ output_234 = self.run_selftest(test_suite_factory=self.factory,
+ list_only=True, random_seed="234")
+ self.assertNotEqual(output_123, output_234)
+ # "Randominzing test order..\n\n
+ self.assertLength(5, output_123.readlines())
+ self.assertLength(5, output_234.readlines())
+
+ def test_random_reuse_is_same_order(self):
+ # test randomising by listing a number of tests.
+ expected = self.run_selftest(test_suite_factory=self.factory,
+ list_only=True, random_seed="123")
+ repeated = self.run_selftest(test_suite_factory=self.factory,
+ list_only=True, random_seed="123")
+ self.assertEqual(expected.getvalue(), repeated.getvalue())
+
+ def test_runner_class(self):
+ self.requireFeature(features.subunit)
+ from subunit import ProtocolTestCase
+ stream = self.run_selftest(runner_class=tests.SubUnitBzrRunner,
+ test_suite_factory=self.factory)
+ test = ProtocolTestCase(stream)
+ result = unittest.TestResult()
+ test.run(result)
+ self.assertEqual(3, result.testsRun)
+
+ def test_starting_with_single_argument(self):
+ output = self.run_selftest(test_suite_factory=self.factory,
+ starting_with=['bzrlib.tests.test_selftest.Test.a'],
+ list_only=True)
+ self.assertEqual('bzrlib.tests.test_selftest.Test.a\n',
+ output.getvalue())
+
+ def test_starting_with_multiple_argument(self):
+ output = self.run_selftest(test_suite_factory=self.factory,
+ starting_with=['bzrlib.tests.test_selftest.Test.a',
+ 'bzrlib.tests.test_selftest.Test.b'],
+ list_only=True)
+ self.assertEqual('bzrlib.tests.test_selftest.Test.a\n'
+ 'bzrlib.tests.test_selftest.Test.b\n',
+ output.getvalue())
+
+ def check_transport_set(self, transport_server):
+ captured_transport = []
+ def seen_transport(a_transport):
+ captured_transport.append(a_transport)
+ class Capture(tests.TestCase):
+ def a(self):
+ seen_transport(bzrlib.tests.default_transport)
+ def factory():
+ return TestUtil.TestSuite([Capture("a")])
+ self.run_selftest(transport=transport_server, test_suite_factory=factory)
+ self.assertEqual(transport_server, captured_transport[0])
+
+ def test_transport_sftp(self):
+ self.requireFeature(features.paramiko)
+ from bzrlib.tests import stub_sftp
+ self.check_transport_set(stub_sftp.SFTPAbsoluteServer)
+
+ def test_transport_memory(self):
+ self.check_transport_set(memory.MemoryServer)
+
+
+class TestSelftestWithIdList(tests.TestCaseInTempDir, SelfTestHelper):
+ # Does IO: reads test.list
+
+ def test_load_list(self):
+ # Provide a list with one test - this test.
+ test_id_line = '%s\n' % self.id()
+ self.build_tree_contents([('test.list', test_id_line)])
+ # And generate a list of the tests in the suite.
+ stream = self.run_selftest(load_list='test.list', list_only=True)
+ self.assertEqual(test_id_line, stream.getvalue())
+
+ def test_load_unknown(self):
+ # Provide a list with one test - this test.
+ # And generate a list of the tests in the suite.
+ err = self.assertRaises(errors.NoSuchFile, self.run_selftest,
+ load_list='missing file name', list_only=True)
+
+
+class TestSubunitLogDetails(tests.TestCase, SelfTestHelper):
+
+ _test_needs_features = [features.subunit]
+
+ def run_subunit_stream(self, test_name):
+ from subunit import ProtocolTestCase
+ def factory():
+ return TestUtil.TestSuite([_get_test(test_name)])
+ stream = self.run_selftest(runner_class=tests.SubUnitBzrRunner,
+ test_suite_factory=factory)
+ test = ProtocolTestCase(stream)
+ result = testtools.TestResult()
+ test.run(result)
+ content = stream.getvalue()
+ return content, result
+
+ def test_fail_has_log(self):
+ content, result = self.run_subunit_stream('test_fail')
+ self.assertEqual(1, len(result.failures))
+ self.assertContainsRe(content, '(?m)^log$')
+ self.assertContainsRe(content, 'this test will fail')
+
+ def test_error_has_log(self):
+ content, result = self.run_subunit_stream('test_error')
+ self.assertContainsRe(content, '(?m)^log$')
+ self.assertContainsRe(content, 'this test errored')
+
+ def test_skip_has_no_log(self):
+ content, result = self.run_subunit_stream('test_skip')
+ self.assertNotContainsRe(content, '(?m)^log$')
+ self.assertNotContainsRe(content, 'this test will be skipped')
+ self.assertEqual(['reason'], result.skip_reasons.keys())
+ skips = result.skip_reasons['reason']
+ self.assertEqual(1, len(skips))
+ test = skips[0]
+ # RemotedTestCase doesn't preserve the "details"
+ ## self.assertFalse('log' in test.getDetails())
+
+ def test_missing_feature_has_no_log(self):
+ content, result = self.run_subunit_stream('test_missing_feature')
+ self.assertNotContainsRe(content, '(?m)^log$')
+ self.assertNotContainsRe(content, 'missing the feature')
+ self.assertEqual(['_MissingFeature\n'], result.skip_reasons.keys())
+ skips = result.skip_reasons['_MissingFeature\n']
+ self.assertEqual(1, len(skips))
+ test = skips[0]
+ # RemotedTestCase doesn't preserve the "details"
+ ## self.assertFalse('log' in test.getDetails())
+
+ def test_xfail_has_no_log(self):
+ content, result = self.run_subunit_stream('test_xfail')
+ self.assertNotContainsRe(content, '(?m)^log$')
+ self.assertNotContainsRe(content, 'test with expected failure')
+ self.assertEqual(1, len(result.expectedFailures))
+ result_content = result.expectedFailures[0][1]
+ self.assertNotContainsRe(result_content,
+ '(?m)^(?:Text attachment: )?log(?:$|: )')
+ self.assertNotContainsRe(result_content, 'test with expected failure')
+
+ def test_unexpected_success_has_log(self):
+ content, result = self.run_subunit_stream('test_unexpected_success')
+ self.assertContainsRe(content, '(?m)^log$')
+ self.assertContainsRe(content, 'test with unexpected success')
+ # GZ 2011-05-18: Old versions of subunit treat unexpected success as a
+ # success, if a min version check is added remove this
+ from subunit import TestProtocolClient as _Client
+ if _Client.addUnexpectedSuccess.im_func is _Client.addSuccess.im_func:
+ self.expectFailure('subunit treats "unexpectedSuccess"'
+ ' as a plain success',
+ self.assertEqual, 1, len(result.unexpectedSuccesses))
+ self.assertEqual(1, len(result.unexpectedSuccesses))
+ test = result.unexpectedSuccesses[0]
+ # RemotedTestCase doesn't preserve the "details"
+ ## self.assertTrue('log' in test.getDetails())
+
+ def test_success_has_no_log(self):
+ content, result = self.run_subunit_stream('test_success')
+ self.assertEqual(1, result.testsRun)
+ self.assertNotContainsRe(content, '(?m)^log$')
+ self.assertNotContainsRe(content, 'this test succeeds')
+
+
+class TestRunBzr(tests.TestCase):
+
+ out = ''
+ err = ''
+
+ def _run_bzr_core(self, argv, retcode=0, encoding=None, stdin=None,
+ working_dir=None):
+ """Override _run_bzr_core to test how it is invoked by run_bzr.
+
+ Attempts to run bzr from inside this class don't actually run it.
+
+ We test how run_bzr actually invokes bzr in another location. Here we
+ only need to test that it passes the right parameters to run_bzr.
+ """
+ self.argv = list(argv)
+ self.retcode = retcode
+ self.encoding = encoding
+ self.stdin = stdin
+ self.working_dir = working_dir
+ return self.retcode, self.out, self.err
+
+ def test_run_bzr_error(self):
+ self.out = "It sure does!\n"
+ out, err = self.run_bzr_error(['^$'], ['rocks'], retcode=34)
+ self.assertEqual(['rocks'], self.argv)
+ self.assertEqual(34, self.retcode)
+ self.assertEqual('It sure does!\n', out)
+ self.assertEquals(out, self.out)
+ self.assertEqual('', err)
+ self.assertEquals(err, self.err)
+
+ def test_run_bzr_error_regexes(self):
+ self.out = ''
+ self.err = "bzr: ERROR: foobarbaz is not versioned"
+ out, err = self.run_bzr_error(
+ ["bzr: ERROR: foobarbaz is not versioned"],
+ ['file-id', 'foobarbaz'])
+
+ def test_encoding(self):
+ """Test that run_bzr passes encoding to _run_bzr_core"""
+ self.run_bzr('foo bar')
+ self.assertEqual(None, self.encoding)
+ self.assertEqual(['foo', 'bar'], self.argv)
+
+ self.run_bzr('foo bar', encoding='baz')
+ self.assertEqual('baz', self.encoding)
+ self.assertEqual(['foo', 'bar'], self.argv)
+
+ def test_retcode(self):
+ """Test that run_bzr passes retcode to _run_bzr_core"""
+ # Default is retcode == 0
+ self.run_bzr('foo bar')
+ self.assertEqual(0, self.retcode)
+ self.assertEqual(['foo', 'bar'], self.argv)
+
+ self.run_bzr('foo bar', retcode=1)
+ self.assertEqual(1, self.retcode)
+ self.assertEqual(['foo', 'bar'], self.argv)
+
+ self.run_bzr('foo bar', retcode=None)
+ self.assertEqual(None, self.retcode)
+ self.assertEqual(['foo', 'bar'], self.argv)
+
+ self.run_bzr(['foo', 'bar'], retcode=3)
+ self.assertEqual(3, self.retcode)
+ self.assertEqual(['foo', 'bar'], self.argv)
+
+ def test_stdin(self):
+ # test that the stdin keyword to run_bzr is passed through to
+ # _run_bzr_core as-is. We do this by overriding
+ # _run_bzr_core in this class, and then calling run_bzr,
+ # which is a convenience function for _run_bzr_core, so
+ # should invoke it.
+ self.run_bzr('foo bar', stdin='gam')
+ self.assertEqual('gam', self.stdin)
+ self.assertEqual(['foo', 'bar'], self.argv)
+
+ self.run_bzr('foo bar', stdin='zippy')
+ self.assertEqual('zippy', self.stdin)
+ self.assertEqual(['foo', 'bar'], self.argv)
+
+ def test_working_dir(self):
+ """Test that run_bzr passes working_dir to _run_bzr_core"""
+ self.run_bzr('foo bar')
+ self.assertEqual(None, self.working_dir)
+ self.assertEqual(['foo', 'bar'], self.argv)
+
+ self.run_bzr('foo bar', working_dir='baz')
+ self.assertEqual('baz', self.working_dir)
+ self.assertEqual(['foo', 'bar'], self.argv)
+
+ def test_reject_extra_keyword_arguments(self):
+ self.assertRaises(TypeError, self.run_bzr, "foo bar",
+ error_regex=['error message'])
+
+
+class TestRunBzrCaptured(tests.TestCaseWithTransport):
+ # Does IO when testing the working_dir parameter.
+
+ def apply_redirected(self, stdin=None, stdout=None, stderr=None,
+ a_callable=None, *args, **kwargs):
+ self.stdin = stdin
+ self.factory_stdin = getattr(bzrlib.ui.ui_factory, "stdin", None)
+ self.factory = bzrlib.ui.ui_factory
+ self.working_dir = osutils.getcwd()
+ stdout.write('foo\n')
+ stderr.write('bar\n')
+ return 0
+
+ def test_stdin(self):
+ # test that the stdin keyword to _run_bzr_core is passed through to
+ # apply_redirected as a StringIO. We do this by overriding
+ # apply_redirected in this class, and then calling _run_bzr_core,
+ # which calls apply_redirected.
+ self.run_bzr(['foo', 'bar'], stdin='gam')
+ self.assertEqual('gam', self.stdin.read())
+ self.assertTrue(self.stdin is self.factory_stdin)
+ self.run_bzr(['foo', 'bar'], stdin='zippy')
+ self.assertEqual('zippy', self.stdin.read())
+ self.assertTrue(self.stdin is self.factory_stdin)
+
+ def test_ui_factory(self):
+ # each invocation of self.run_bzr should get its
+ # own UI factory, which is an instance of TestUIFactory,
+ # with stdin, stdout and stderr attached to the stdin,
+ # stdout and stderr of the invoked run_bzr
+ current_factory = bzrlib.ui.ui_factory
+ self.run_bzr(['foo'])
+ self.assertFalse(current_factory is self.factory)
+ self.assertNotEqual(sys.stdout, self.factory.stdout)
+ self.assertNotEqual(sys.stderr, self.factory.stderr)
+ self.assertEqual('foo\n', self.factory.stdout.getvalue())
+ self.assertEqual('bar\n', self.factory.stderr.getvalue())
+ self.assertIsInstance(self.factory, tests.TestUIFactory)
+
+ def test_working_dir(self):
+ self.build_tree(['one/', 'two/'])
+ cwd = osutils.getcwd()
+
+ # Default is to work in the current directory
+ self.run_bzr(['foo', 'bar'])
+ self.assertEqual(cwd, self.working_dir)
+
+ self.run_bzr(['foo', 'bar'], working_dir=None)
+ self.assertEqual(cwd, self.working_dir)
+
+ # The function should be run in the alternative directory
+ # but afterwards the current working dir shouldn't be changed
+ self.run_bzr(['foo', 'bar'], working_dir='one')
+ self.assertNotEqual(cwd, self.working_dir)
+ self.assertEndsWith(self.working_dir, 'one')
+ self.assertEqual(cwd, osutils.getcwd())
+
+ self.run_bzr(['foo', 'bar'], working_dir='two')
+ self.assertNotEqual(cwd, self.working_dir)
+ self.assertEndsWith(self.working_dir, 'two')
+ self.assertEqual(cwd, osutils.getcwd())
+
+
+class StubProcess(object):
+ """A stub process for testing run_bzr_subprocess."""
+
+ def __init__(self, out="", err="", retcode=0):
+ self.out = out
+ self.err = err
+ self.returncode = retcode
+
+ def communicate(self):
+ return self.out, self.err
+
+
+class TestWithFakedStartBzrSubprocess(tests.TestCaseWithTransport):
+ """Base class for tests testing how we might run bzr."""
+
+ def setUp(self):
+ tests.TestCaseWithTransport.setUp(self)
+ self.subprocess_calls = []
+
+ def start_bzr_subprocess(self, process_args, env_changes=None,
+ skip_if_plan_to_signal=False,
+ working_dir=None,
+ allow_plugins=False):
+ """capture what run_bzr_subprocess tries to do."""
+ self.subprocess_calls.append({'process_args':process_args,
+ 'env_changes':env_changes,
+ 'skip_if_plan_to_signal':skip_if_plan_to_signal,
+ 'working_dir':working_dir, 'allow_plugins':allow_plugins})
+ return self.next_subprocess
+
+
+class TestRunBzrSubprocess(TestWithFakedStartBzrSubprocess):
+
+ def assertRunBzrSubprocess(self, expected_args, process, *args, **kwargs):
+ """Run run_bzr_subprocess with args and kwargs using a stubbed process.
+
+ Inside TestRunBzrSubprocessCommands we use a stub start_bzr_subprocess
+ that will return static results. This assertion method populates those
+ results and also checks the arguments run_bzr_subprocess generates.
+ """
+ self.next_subprocess = process
+ try:
+ result = self.run_bzr_subprocess(*args, **kwargs)
+ except:
+ self.next_subprocess = None
+ for key, expected in expected_args.iteritems():
+ self.assertEqual(expected, self.subprocess_calls[-1][key])
+ raise
+ else:
+ self.next_subprocess = None
+ for key, expected in expected_args.iteritems():
+ self.assertEqual(expected, self.subprocess_calls[-1][key])
+ return result
+
+ def test_run_bzr_subprocess(self):
+ """The run_bzr_helper_external command behaves nicely."""
+ self.assertRunBzrSubprocess({'process_args':['--version']},
+ StubProcess(), '--version')
+ self.assertRunBzrSubprocess({'process_args':['--version']},
+ StubProcess(), ['--version'])
+ # retcode=None disables retcode checking
+ result = self.assertRunBzrSubprocess({},
+ StubProcess(retcode=3), '--version', retcode=None)
+ result = self.assertRunBzrSubprocess({},
+ StubProcess(out="is free software"), '--version')
+ self.assertContainsRe(result[0], 'is free software')
+ # Running a subcommand that is missing errors
+ self.assertRaises(AssertionError, self.assertRunBzrSubprocess,
+ {'process_args':['--versionn']}, StubProcess(retcode=3),
+ '--versionn')
+ # Unless it is told to expect the error from the subprocess
+ result = self.assertRunBzrSubprocess({},
+ StubProcess(retcode=3), '--versionn', retcode=3)
+ # Or to ignore retcode checking
+ result = self.assertRunBzrSubprocess({},
+ StubProcess(err="unknown command", retcode=3), '--versionn',
+ retcode=None)
+ self.assertContainsRe(result[1], 'unknown command')
+
+ def test_env_change_passes_through(self):
+ self.assertRunBzrSubprocess(
+ {'env_changes':{'new':'value', 'changed':'newvalue', 'deleted':None}},
+ StubProcess(), '',
+ env_changes={'new':'value', 'changed':'newvalue', 'deleted':None})
+
+ def test_no_working_dir_passed_as_None(self):
+ self.assertRunBzrSubprocess({'working_dir': None}, StubProcess(), '')
+
+ def test_no_working_dir_passed_through(self):
+ self.assertRunBzrSubprocess({'working_dir': 'dir'}, StubProcess(), '',
+ working_dir='dir')
+
+ def test_run_bzr_subprocess_no_plugins(self):
+ self.assertRunBzrSubprocess({'allow_plugins': False},
+ StubProcess(), '')
+
+ def test_allow_plugins(self):
+ self.assertRunBzrSubprocess({'allow_plugins': True},
+ StubProcess(), '', allow_plugins=True)
+
+
+class TestFinishBzrSubprocess(TestWithFakedStartBzrSubprocess):
+
+ def test_finish_bzr_subprocess_with_error(self):
+ """finish_bzr_subprocess allows specification of the desired exit code.
+ """
+ process = StubProcess(err="unknown command", retcode=3)
+ result = self.finish_bzr_subprocess(process, retcode=3)
+ self.assertEqual('', result[0])
+ self.assertContainsRe(result[1], 'unknown command')
+
+ def test_finish_bzr_subprocess_ignoring_retcode(self):
+ """finish_bzr_subprocess allows the exit code to be ignored."""
+ process = StubProcess(err="unknown command", retcode=3)
+ result = self.finish_bzr_subprocess(process, retcode=None)
+ self.assertEqual('', result[0])
+ self.assertContainsRe(result[1], 'unknown command')
+
+ def test_finish_subprocess_with_unexpected_retcode(self):
+ """finish_bzr_subprocess raises self.failureException if the retcode is
+ not the expected one.
+ """
+ process = StubProcess(err="unknown command", retcode=3)
+ self.assertRaises(self.failureException, self.finish_bzr_subprocess,
+ process)
+
+
+class _DontSpawnProcess(Exception):
+ """A simple exception which just allows us to skip unnecessary steps"""
+
+
+class TestStartBzrSubProcess(tests.TestCase):
+ """Stub test start_bzr_subprocess."""
+
+ def _subprocess_log_cleanup(self):
+ """Inhibits the base version as we don't produce a log file."""
+
+ def _popen(self, *args, **kwargs):
+ """Override the base version to record the command that is run.
+
+ From there we can ensure it is correct without spawning a real process.
+ """
+ self.check_popen_state()
+ self._popen_args = args
+ self._popen_kwargs = kwargs
+ raise _DontSpawnProcess()
+
+ def check_popen_state(self):
+ """Replace to make assertions when popen is called."""
+
+ def test_run_bzr_subprocess_no_plugins(self):
+ self.assertRaises(_DontSpawnProcess, self.start_bzr_subprocess, [])
+ command = self._popen_args[0]
+ self.assertEqual(sys.executable, command[0])
+ self.assertEqual(self.get_bzr_path(), command[1])
+ self.assertEqual(['--no-plugins'], command[2:])
+
+ def test_allow_plugins(self):
+ self.assertRaises(_DontSpawnProcess, self.start_bzr_subprocess, [],
+ allow_plugins=True)
+ command = self._popen_args[0]
+ self.assertEqual([], command[2:])
+
+ def test_set_env(self):
+ self.assertFalse('EXISTANT_ENV_VAR' in os.environ)
+ # set in the child
+ def check_environment():
+ self.assertEqual('set variable', os.environ['EXISTANT_ENV_VAR'])
+ self.check_popen_state = check_environment
+ self.assertRaises(_DontSpawnProcess, self.start_bzr_subprocess, [],
+ env_changes={'EXISTANT_ENV_VAR':'set variable'})
+ # not set in theparent
+ self.assertFalse('EXISTANT_ENV_VAR' in os.environ)
+
+ def test_run_bzr_subprocess_env_del(self):
+ """run_bzr_subprocess can remove environment variables too."""
+ self.assertFalse('EXISTANT_ENV_VAR' in os.environ)
+ def check_environment():
+ self.assertFalse('EXISTANT_ENV_VAR' in os.environ)
+ os.environ['EXISTANT_ENV_VAR'] = 'set variable'
+ self.check_popen_state = check_environment
+ self.assertRaises(_DontSpawnProcess, self.start_bzr_subprocess, [],
+ env_changes={'EXISTANT_ENV_VAR':None})
+ # Still set in parent
+ self.assertEqual('set variable', os.environ['EXISTANT_ENV_VAR'])
+ del os.environ['EXISTANT_ENV_VAR']
+
+ def test_env_del_missing(self):
+ self.assertFalse('NON_EXISTANT_ENV_VAR' in os.environ)
+ def check_environment():
+ self.assertFalse('NON_EXISTANT_ENV_VAR' in os.environ)
+ self.check_popen_state = check_environment
+ self.assertRaises(_DontSpawnProcess, self.start_bzr_subprocess, [],
+ env_changes={'NON_EXISTANT_ENV_VAR':None})
+
+ def test_working_dir(self):
+ """Test that we can specify the working dir for the child"""
+ orig_getcwd = osutils.getcwd
+ orig_chdir = os.chdir
+ chdirs = []
+ def chdir(path):
+ chdirs.append(path)
+ self.overrideAttr(os, 'chdir', chdir)
+ def getcwd():
+ return 'current'
+ self.overrideAttr(osutils, 'getcwd', getcwd)
+ self.assertRaises(_DontSpawnProcess, self.start_bzr_subprocess, [],
+ working_dir='foo')
+ self.assertEqual(['foo', 'current'], chdirs)
+
+ def test_get_bzr_path_with_cwd_bzrlib(self):
+ self.get_source_path = lambda: ""
+ self.overrideAttr(os.path, "isfile", lambda path: True)
+ self.assertEqual(self.get_bzr_path(), "bzr")
+
+
+class TestActuallyStartBzrSubprocess(tests.TestCaseWithTransport):
+ """Tests that really need to do things with an external bzr."""
+
+ def test_start_and_stop_bzr_subprocess_send_signal(self):
+ """finish_bzr_subprocess raises self.failureException if the retcode is
+ not the expected one.
+ """
+ self.disable_missing_extensions_warning()
+ process = self.start_bzr_subprocess(['wait-until-signalled'],
+ skip_if_plan_to_signal=True)
+ self.assertEqual('running\n', process.stdout.readline())
+ result = self.finish_bzr_subprocess(process, send_signal=signal.SIGINT,
+ retcode=3)
+ self.assertEqual('', result[0])
+ self.assertEqual('bzr: interrupted\n', result[1])
+
+
+class TestSelftestFiltering(tests.TestCase):
+
+ def setUp(self):
+ tests.TestCase.setUp(self)
+ self.suite = TestUtil.TestSuite()
+ self.loader = TestUtil.TestLoader()
+ self.suite.addTest(self.loader.loadTestsFromModule(
+ sys.modules['bzrlib.tests.test_selftest']))
+ self.all_names = _test_ids(self.suite)
+
+ def test_condition_id_re(self):
+ test_name = ('bzrlib.tests.test_selftest.TestSelftestFiltering.'
+ 'test_condition_id_re')
+ filtered_suite = tests.filter_suite_by_condition(
+ self.suite, tests.condition_id_re('test_condition_id_re'))
+ self.assertEqual([test_name], _test_ids(filtered_suite))
+
+ def test_condition_id_in_list(self):
+ test_names = ['bzrlib.tests.test_selftest.TestSelftestFiltering.'
+ 'test_condition_id_in_list']
+ id_list = tests.TestIdList(test_names)
+ filtered_suite = tests.filter_suite_by_condition(
+ self.suite, tests.condition_id_in_list(id_list))
+ my_pattern = 'TestSelftestFiltering.*test_condition_id_in_list'
+ re_filtered = tests.filter_suite_by_re(self.suite, my_pattern)
+ self.assertEqual(_test_ids(re_filtered), _test_ids(filtered_suite))
+
+ def test_condition_id_startswith(self):
+ klass = 'bzrlib.tests.test_selftest.TestSelftestFiltering.'
+ start1 = klass + 'test_condition_id_starts'
+ start2 = klass + 'test_condition_id_in'
+ test_names = [ klass + 'test_condition_id_in_list',
+ klass + 'test_condition_id_startswith',
+ ]
+ filtered_suite = tests.filter_suite_by_condition(
+ self.suite, tests.condition_id_startswith([start1, start2]))
+ self.assertEqual(test_names, _test_ids(filtered_suite))
+
+ def test_condition_isinstance(self):
+ filtered_suite = tests.filter_suite_by_condition(
+ self.suite, tests.condition_isinstance(self.__class__))
+ class_pattern = 'bzrlib.tests.test_selftest.TestSelftestFiltering.'
+ re_filtered = tests.filter_suite_by_re(self.suite, class_pattern)
+ self.assertEqual(_test_ids(re_filtered), _test_ids(filtered_suite))
+
+ def test_exclude_tests_by_condition(self):
+ excluded_name = ('bzrlib.tests.test_selftest.TestSelftestFiltering.'
+ 'test_exclude_tests_by_condition')
+ filtered_suite = tests.exclude_tests_by_condition(self.suite,
+ lambda x:x.id() == excluded_name)
+ self.assertEqual(len(self.all_names) - 1,
+ filtered_suite.countTestCases())
+ self.assertFalse(excluded_name in _test_ids(filtered_suite))
+ remaining_names = list(self.all_names)
+ remaining_names.remove(excluded_name)
+ self.assertEqual(remaining_names, _test_ids(filtered_suite))
+
+ def test_exclude_tests_by_re(self):
+ self.all_names = _test_ids(self.suite)
+ filtered_suite = tests.exclude_tests_by_re(self.suite,
+ 'exclude_tests_by_re')
+ excluded_name = ('bzrlib.tests.test_selftest.TestSelftestFiltering.'
+ 'test_exclude_tests_by_re')
+ self.assertEqual(len(self.all_names) - 1,
+ filtered_suite.countTestCases())
+ self.assertFalse(excluded_name in _test_ids(filtered_suite))
+ remaining_names = list(self.all_names)
+ remaining_names.remove(excluded_name)
+ self.assertEqual(remaining_names, _test_ids(filtered_suite))
+
+ def test_filter_suite_by_condition(self):
+ test_name = ('bzrlib.tests.test_selftest.TestSelftestFiltering.'
+ 'test_filter_suite_by_condition')
+ filtered_suite = tests.filter_suite_by_condition(self.suite,
+ lambda x:x.id() == test_name)
+ self.assertEqual([test_name], _test_ids(filtered_suite))
+
+ def test_filter_suite_by_re(self):
+ filtered_suite = tests.filter_suite_by_re(self.suite,
+ 'test_filter_suite_by_r')
+ filtered_names = _test_ids(filtered_suite)
+ self.assertEqual(filtered_names, ['bzrlib.tests.test_selftest.'
+ 'TestSelftestFiltering.test_filter_suite_by_re'])
+
+ def test_filter_suite_by_id_list(self):
+ test_list = ['bzrlib.tests.test_selftest.'
+ 'TestSelftestFiltering.test_filter_suite_by_id_list']
+ filtered_suite = tests.filter_suite_by_id_list(
+ self.suite, tests.TestIdList(test_list))
+ filtered_names = _test_ids(filtered_suite)
+ self.assertEqual(
+ filtered_names,
+ ['bzrlib.tests.test_selftest.'
+ 'TestSelftestFiltering.test_filter_suite_by_id_list'])
+
+ def test_filter_suite_by_id_startswith(self):
+ # By design this test may fail if another test is added whose name also
+ # begins with one of the start value used.
+ klass = 'bzrlib.tests.test_selftest.TestSelftestFiltering.'
+ start1 = klass + 'test_filter_suite_by_id_starts'
+ start2 = klass + 'test_filter_suite_by_id_li'
+ test_list = [klass + 'test_filter_suite_by_id_list',
+ klass + 'test_filter_suite_by_id_startswith',
+ ]
+ filtered_suite = tests.filter_suite_by_id_startswith(
+ self.suite, [start1, start2])
+ self.assertEqual(
+ test_list,
+ _test_ids(filtered_suite),
+ )
+
+ def test_preserve_input(self):
+ # NB: Surely this is something in the stdlib to do this?
+ self.assertTrue(self.suite is tests.preserve_input(self.suite))
+ self.assertTrue("@#$" is tests.preserve_input("@#$"))
+
+ def test_randomize_suite(self):
+ randomized_suite = tests.randomize_suite(self.suite)
+ # randomizing should not add or remove test names.
+ self.assertEqual(set(_test_ids(self.suite)),
+ set(_test_ids(randomized_suite)))
+ # Technically, this *can* fail, because random.shuffle(list) can be
+ # equal to list. Trying multiple times just pushes the frequency back.
+ # As its len(self.all_names)!:1, the failure frequency should be low
+ # enough to ignore. RBC 20071021.
+ # It should change the order.
+ self.assertNotEqual(self.all_names, _test_ids(randomized_suite))
+ # But not the length. (Possibly redundant with the set test, but not
+ # necessarily.)
+ self.assertEqual(len(self.all_names), len(_test_ids(randomized_suite)))
+
+ def test_split_suit_by_condition(self):
+ self.all_names = _test_ids(self.suite)
+ condition = tests.condition_id_re('test_filter_suite_by_r')
+ split_suite = tests.split_suite_by_condition(self.suite, condition)
+ filtered_name = ('bzrlib.tests.test_selftest.TestSelftestFiltering.'
+ 'test_filter_suite_by_re')
+ self.assertEqual([filtered_name], _test_ids(split_suite[0]))
+ self.assertFalse(filtered_name in _test_ids(split_suite[1]))
+ remaining_names = list(self.all_names)
+ remaining_names.remove(filtered_name)
+ self.assertEqual(remaining_names, _test_ids(split_suite[1]))
+
+ def test_split_suit_by_re(self):
+ self.all_names = _test_ids(self.suite)
+ split_suite = tests.split_suite_by_re(self.suite,
+ 'test_filter_suite_by_r')
+ filtered_name = ('bzrlib.tests.test_selftest.TestSelftestFiltering.'
+ 'test_filter_suite_by_re')
+ self.assertEqual([filtered_name], _test_ids(split_suite[0]))
+ self.assertFalse(filtered_name in _test_ids(split_suite[1]))
+ remaining_names = list(self.all_names)
+ remaining_names.remove(filtered_name)
+ self.assertEqual(remaining_names, _test_ids(split_suite[1]))
+
+
+class TestCheckTreeShape(tests.TestCaseWithTransport):
+
+ def test_check_tree_shape(self):
+ files = ['a', 'b/', 'b/c']
+ tree = self.make_branch_and_tree('.')
+ self.build_tree(files)
+ tree.add(files)
+ tree.lock_read()
+ try:
+ self.check_tree_shape(tree, files)
+ finally:
+ tree.unlock()
+
+
+class TestBlackboxSupport(tests.TestCase):
+ """Tests for testsuite blackbox features."""
+
+ def test_run_bzr_failure_not_caught(self):
+ # When we run bzr in blackbox mode, we want any unexpected errors to
+ # propagate up to the test suite so that it can show the error in the
+ # usual way, and we won't get a double traceback.
+ e = self.assertRaises(
+ AssertionError,
+ self.run_bzr, ['assert-fail'])
+ # make sure we got the real thing, not an error from somewhere else in
+ # the test framework
+ self.assertEquals('always fails', str(e))
+ # check that there's no traceback in the test log
+ self.assertNotContainsRe(self.get_log(), r'Traceback')
+
+ def test_run_bzr_user_error_caught(self):
+ # Running bzr in blackbox mode, normal/expected/user errors should be
+ # caught in the regular way and turned into an error message plus exit
+ # code.
+ transport_server = memory.MemoryServer()
+ transport_server.start_server()
+ self.addCleanup(transport_server.stop_server)
+ url = transport_server.get_url()
+ self.permit_url(url)
+ out, err = self.run_bzr(["log", "%s/nonexistantpath" % url], retcode=3)
+ self.assertEqual(out, '')
+ self.assertContainsRe(err,
+ 'bzr: ERROR: Not a branch: ".*nonexistantpath/".\n')
+
+
+class TestTestLoader(tests.TestCase):
+ """Tests for the test loader."""
+
+ def _get_loader_and_module(self):
+ """Gets a TestLoader and a module with one test in it."""
+ loader = TestUtil.TestLoader()
+ module = {}
+ class Stub(tests.TestCase):
+ def test_foo(self):
+ pass
+ class MyModule(object):
+ pass
+ MyModule.a_class = Stub
+ module = MyModule()
+ return loader, module
+
+ def test_module_no_load_tests_attribute_loads_classes(self):
+ loader, module = self._get_loader_and_module()
+ self.assertEqual(1, loader.loadTestsFromModule(module).countTestCases())
+
+ def test_module_load_tests_attribute_gets_called(self):
+ loader, module = self._get_loader_and_module()
+ # 'self' is here because we're faking the module with a class. Regular
+ # load_tests do not need that :)
+ def load_tests(self, standard_tests, module, loader):
+ result = loader.suiteClass()
+ for test in tests.iter_suite_tests(standard_tests):
+ result.addTests([test, test])
+ return result
+ # add a load_tests() method which multiplies the tests from the module.
+ module.__class__.load_tests = load_tests
+ self.assertEqual(2, loader.loadTestsFromModule(module).countTestCases())
+
+ def test_load_tests_from_module_name_smoke_test(self):
+ loader = TestUtil.TestLoader()
+ suite = loader.loadTestsFromModuleName('bzrlib.tests.test_sampler')
+ self.assertEquals(['bzrlib.tests.test_sampler.DemoTest.test_nothing'],
+ _test_ids(suite))
+
+ def test_load_tests_from_module_name_with_bogus_module_name(self):
+ loader = TestUtil.TestLoader()
+ self.assertRaises(ImportError, loader.loadTestsFromModuleName, 'bogus')
+
+
+class TestTestIdList(tests.TestCase):
+
+ def _create_id_list(self, test_list):
+ return tests.TestIdList(test_list)
+
+ def _create_suite(self, test_id_list):
+
+ class Stub(tests.TestCase):
+ def test_foo(self):
+ pass
+
+ def _create_test_id(id):
+ return lambda: id
+
+ suite = TestUtil.TestSuite()
+ for id in test_id_list:
+ t = Stub('test_foo')
+ t.id = _create_test_id(id)
+ suite.addTest(t)
+ return suite
+
+ def _test_ids(self, test_suite):
+ """Get the ids for the tests in a test suite."""
+ return [t.id() for t in tests.iter_suite_tests(test_suite)]
+
+ def test_empty_list(self):
+ id_list = self._create_id_list([])
+ self.assertEquals({}, id_list.tests)
+ self.assertEquals({}, id_list.modules)
+
+ def test_valid_list(self):
+ id_list = self._create_id_list(
+ ['mod1.cl1.meth1', 'mod1.cl1.meth2',
+ 'mod1.func1', 'mod1.cl2.meth2',
+ 'mod1.submod1',
+ 'mod1.submod2.cl1.meth1', 'mod1.submod2.cl2.meth2',
+ ])
+ self.assertTrue(id_list.refers_to('mod1'))
+ self.assertTrue(id_list.refers_to('mod1.submod1'))
+ self.assertTrue(id_list.refers_to('mod1.submod2'))
+ self.assertTrue(id_list.includes('mod1.cl1.meth1'))
+ self.assertTrue(id_list.includes('mod1.submod1'))
+ self.assertTrue(id_list.includes('mod1.func1'))
+
+ def test_bad_chars_in_params(self):
+ id_list = self._create_id_list(['mod1.cl1.meth1(xx.yy)'])
+ self.assertTrue(id_list.refers_to('mod1'))
+ self.assertTrue(id_list.includes('mod1.cl1.meth1(xx.yy)'))
+
+ def test_module_used(self):
+ id_list = self._create_id_list(['mod.class.meth'])
+ self.assertTrue(id_list.refers_to('mod'))
+ self.assertTrue(id_list.refers_to('mod.class'))
+ self.assertTrue(id_list.refers_to('mod.class.meth'))
+
+ def test_test_suite_matches_id_list_with_unknown(self):
+ loader = TestUtil.TestLoader()
+ suite = loader.loadTestsFromModuleName('bzrlib.tests.test_sampler')
+ test_list = ['bzrlib.tests.test_sampler.DemoTest.test_nothing',
+ 'bogus']
+ not_found, duplicates = tests.suite_matches_id_list(suite, test_list)
+ self.assertEquals(['bogus'], not_found)
+ self.assertEquals([], duplicates)
+
+ def test_suite_matches_id_list_with_duplicates(self):
+ loader = TestUtil.TestLoader()
+ suite = loader.loadTestsFromModuleName('bzrlib.tests.test_sampler')
+ dupes = loader.suiteClass()
+ for test in tests.iter_suite_tests(suite):
+ dupes.addTest(test)
+ dupes.addTest(test) # Add it again
+
+ test_list = ['bzrlib.tests.test_sampler.DemoTest.test_nothing',]
+ not_found, duplicates = tests.suite_matches_id_list(
+ dupes, test_list)
+ self.assertEquals([], not_found)
+ self.assertEquals(['bzrlib.tests.test_sampler.DemoTest.test_nothing'],
+ duplicates)
+
+
+class TestTestSuite(tests.TestCase):
+
+ def test__test_suite_testmod_names(self):
+ # Test that a plausible list of test module names are returned
+ # by _test_suite_testmod_names.
+ test_list = tests._test_suite_testmod_names()
+ self.assertSubset([
+ 'bzrlib.tests.blackbox',
+ 'bzrlib.tests.per_transport',
+ 'bzrlib.tests.test_selftest',
+ ],
+ test_list)
+
+ def test__test_suite_modules_to_doctest(self):
+ # Test that a plausible list of modules to doctest is returned
+ # by _test_suite_modules_to_doctest.
+ test_list = tests._test_suite_modules_to_doctest()
+ if __doc__ is None:
+ # When docstrings are stripped, there are no modules to doctest
+ self.assertEqual([], test_list)
+ return
+ self.assertSubset([
+ 'bzrlib.timestamp',
+ ],
+ test_list)
+
+ def test_test_suite(self):
+ # test_suite() loads the entire test suite to operate. To avoid this
+ # overhead, and yet still be confident that things are happening,
+ # we temporarily replace two functions used by test_suite with
+ # test doubles that supply a few sample tests to load, and check they
+ # are loaded.
+ calls = []
+ def testmod_names():
+ calls.append("testmod_names")
+ return [
+ 'bzrlib.tests.blackbox.test_branch',
+ 'bzrlib.tests.per_transport',
+ 'bzrlib.tests.test_selftest',
+ ]
+ self.overrideAttr(tests, '_test_suite_testmod_names', testmod_names)
+ def doctests():
+ calls.append("modules_to_doctest")
+ if __doc__ is None:
+ return []
+ return ['bzrlib.timestamp']
+ self.overrideAttr(tests, '_test_suite_modules_to_doctest', doctests)
+ expected_test_list = [
+ # testmod_names
+ 'bzrlib.tests.blackbox.test_branch.TestBranch.test_branch',
+ ('bzrlib.tests.per_transport.TransportTests'
+ '.test_abspath(LocalTransport,LocalURLServer)'),
+ 'bzrlib.tests.test_selftest.TestTestSuite.test_test_suite',
+ # plugins can't be tested that way since selftest may be run with
+ # --no-plugins
+ ]
+ if __doc__ is not None:
+ expected_test_list.extend([
+ # modules_to_doctest
+ 'bzrlib.timestamp.format_highres_date',
+ ])
+ suite = tests.test_suite()
+ self.assertEqual(set(["testmod_names", "modules_to_doctest"]),
+ set(calls))
+ self.assertSubset(expected_test_list, _test_ids(suite))
+
+ def test_test_suite_list_and_start(self):
+ # We cannot test this at the same time as the main load, because we want
+ # to know that starting_with == None works. So a second load is
+ # incurred - note that the starting_with parameter causes a partial load
+ # rather than a full load so this test should be pretty quick.
+ test_list = ['bzrlib.tests.test_selftest.TestTestSuite.test_test_suite']
+ suite = tests.test_suite(test_list,
+ ['bzrlib.tests.test_selftest.TestTestSuite'])
+ # test_test_suite_list_and_start is not included
+ self.assertEquals(test_list, _test_ids(suite))
+
+
+class TestLoadTestIdList(tests.TestCaseInTempDir):
+
+ def _create_test_list_file(self, file_name, content):
+ fl = open(file_name, 'wt')
+ fl.write(content)
+ fl.close()
+
+ def test_load_unknown(self):
+ self.assertRaises(errors.NoSuchFile,
+ tests.load_test_id_list, 'i_do_not_exist')
+
+ def test_load_test_list(self):
+ test_list_fname = 'test.list'
+ self._create_test_list_file(test_list_fname,
+ 'mod1.cl1.meth1\nmod2.cl2.meth2\n')
+ tlist = tests.load_test_id_list(test_list_fname)
+ self.assertEquals(2, len(tlist))
+ self.assertEquals('mod1.cl1.meth1', tlist[0])
+ self.assertEquals('mod2.cl2.meth2', tlist[1])
+
+ def test_load_dirty_file(self):
+ test_list_fname = 'test.list'
+ self._create_test_list_file(test_list_fname,
+ ' mod1.cl1.meth1\n\nmod2.cl2.meth2 \n'
+ 'bar baz\n')
+ tlist = tests.load_test_id_list(test_list_fname)
+ self.assertEquals(4, len(tlist))
+ self.assertEquals('mod1.cl1.meth1', tlist[0])
+ self.assertEquals('', tlist[1])
+ self.assertEquals('mod2.cl2.meth2', tlist[2])
+ self.assertEquals('bar baz', tlist[3])
+
+
+class TestFilteredByModuleTestLoader(tests.TestCase):
+
+ def _create_loader(self, test_list):
+ id_filter = tests.TestIdList(test_list)
+ loader = TestUtil.FilteredByModuleTestLoader(id_filter.refers_to)
+ return loader
+
+ def test_load_tests(self):
+ test_list = ['bzrlib.tests.test_sampler.DemoTest.test_nothing']
+ loader = self._create_loader(test_list)
+ suite = loader.loadTestsFromModuleName('bzrlib.tests.test_sampler')
+ self.assertEquals(test_list, _test_ids(suite))
+
+ def test_exclude_tests(self):
+ test_list = ['bogus']
+ loader = self._create_loader(test_list)
+ suite = loader.loadTestsFromModuleName('bzrlib.tests.test_sampler')
+ self.assertEquals([], _test_ids(suite))
+
+
+class TestFilteredByNameStartTestLoader(tests.TestCase):
+
+ def _create_loader(self, name_start):
+ def needs_module(name):
+ return name.startswith(name_start) or name_start.startswith(name)
+ loader = TestUtil.FilteredByModuleTestLoader(needs_module)
+ return loader
+
+ def test_load_tests(self):
+ test_list = ['bzrlib.tests.test_sampler.DemoTest.test_nothing']
+ loader = self._create_loader('bzrlib.tests.test_samp')
+
+ suite = loader.loadTestsFromModuleName('bzrlib.tests.test_sampler')
+ self.assertEquals(test_list, _test_ids(suite))
+
+ def test_load_tests_inside_module(self):
+ test_list = ['bzrlib.tests.test_sampler.DemoTest.test_nothing']
+ loader = self._create_loader('bzrlib.tests.test_sampler.Demo')
+
+ suite = loader.loadTestsFromModuleName('bzrlib.tests.test_sampler')
+ self.assertEquals(test_list, _test_ids(suite))
+
+ def test_exclude_tests(self):
+ test_list = ['bogus']
+ loader = self._create_loader('bogus')
+
+ suite = loader.loadTestsFromModuleName('bzrlib.tests.test_sampler')
+ self.assertEquals([], _test_ids(suite))
+
+
+class TestTestPrefixRegistry(tests.TestCase):
+
+ def _get_registry(self):
+ tp_registry = tests.TestPrefixAliasRegistry()
+ return tp_registry
+
+ def test_register_new_prefix(self):
+ tpr = self._get_registry()
+ tpr.register('foo', 'fff.ooo.ooo')
+ self.assertEquals('fff.ooo.ooo', tpr.get('foo'))
+
+ def test_register_existing_prefix(self):
+ tpr = self._get_registry()
+ tpr.register('bar', 'bbb.aaa.rrr')
+ tpr.register('bar', 'bBB.aAA.rRR')
+ self.assertEquals('bbb.aaa.rrr', tpr.get('bar'))
+ self.assertThat(self.get_log(),
+ DocTestMatches("...bar...bbb.aaa.rrr...BB.aAA.rRR",
+ doctest.ELLIPSIS))
+
+ def test_get_unknown_prefix(self):
+ tpr = self._get_registry()
+ self.assertRaises(KeyError, tpr.get, 'I am not a prefix')
+
+ def test_resolve_prefix(self):
+ tpr = self._get_registry()
+ tpr.register('bar', 'bb.aa.rr')
+ self.assertEquals('bb.aa.rr', tpr.resolve_alias('bar'))
+
+ def test_resolve_unknown_alias(self):
+ tpr = self._get_registry()
+ self.assertRaises(errors.BzrCommandError,
+ tpr.resolve_alias, 'I am not a prefix')
+
+ def test_predefined_prefixes(self):
+ tpr = tests.test_prefix_alias_registry
+ self.assertEquals('bzrlib', tpr.resolve_alias('bzrlib'))
+ self.assertEquals('bzrlib.doc', tpr.resolve_alias('bd'))
+ self.assertEquals('bzrlib.utils', tpr.resolve_alias('bu'))
+ self.assertEquals('bzrlib.tests', tpr.resolve_alias('bt'))
+ self.assertEquals('bzrlib.tests.blackbox', tpr.resolve_alias('bb'))
+ self.assertEquals('bzrlib.plugins', tpr.resolve_alias('bp'))
+
+
+class TestThreadLeakDetection(tests.TestCase):
+ """Ensure when tests leak threads we detect and report it"""
+
+ class LeakRecordingResult(tests.ExtendedTestResult):
+ def __init__(self):
+ tests.ExtendedTestResult.__init__(self, StringIO(), 0, 1)
+ self.leaks = []
+ def _report_thread_leak(self, test, leaks, alive):
+ self.leaks.append((test, leaks))
+
+ def test_testcase_without_addCleanups(self):
+ """Check old TestCase instances don't break with leak detection"""
+ class Test(unittest.TestCase):
+ def runTest(self):
+ pass
+ result = self.LeakRecordingResult()
+ test = Test()
+ result.startTestRun()
+ test.run(result)
+ result.stopTestRun()
+ self.assertEqual(result._tests_leaking_threads_count, 0)
+ self.assertEqual(result.leaks, [])
+
+ def test_thread_leak(self):
+ """Ensure a thread that outlives the running of a test is reported
+
+ Uses a thread that blocks on an event, and is started by the inner
+ test case. As the thread outlives the inner case's run, it should be
+ detected as a leak, but the event is then set so that the thread can
+ be safely joined in cleanup so it's not leaked for real.
+ """
+ event = threading.Event()
+ thread = threading.Thread(name="Leaker", target=event.wait)
+ class Test(tests.TestCase):
+ def test_leak(self):
+ thread.start()
+ result = self.LeakRecordingResult()
+ test = Test("test_leak")
+ self.addCleanup(thread.join)
+ self.addCleanup(event.set)
+ result.startTestRun()
+ test.run(result)
+ result.stopTestRun()
+ self.assertEqual(result._tests_leaking_threads_count, 1)
+ self.assertEqual(result._first_thread_leaker_id, test.id())
+ self.assertEqual(result.leaks, [(test, set([thread]))])
+ self.assertContainsString(result.stream.getvalue(), "leaking threads")
+
+ def test_multiple_leaks(self):
+ """Check multiple leaks are blamed on the test cases at fault
+
+ Same concept as the previous test, but has one inner test method that
+ leaks two threads, and one that doesn't leak at all.
+ """
+ event = threading.Event()
+ thread_a = threading.Thread(name="LeakerA", target=event.wait)
+ thread_b = threading.Thread(name="LeakerB", target=event.wait)
+ thread_c = threading.Thread(name="LeakerC", target=event.wait)
+ class Test(tests.TestCase):
+ def test_first_leak(self):
+ thread_b.start()
+ def test_second_no_leak(self):
+ pass
+ def test_third_leak(self):
+ thread_c.start()
+ thread_a.start()
+ result = self.LeakRecordingResult()
+ first_test = Test("test_first_leak")
+ third_test = Test("test_third_leak")
+ self.addCleanup(thread_a.join)
+ self.addCleanup(thread_b.join)
+ self.addCleanup(thread_c.join)
+ self.addCleanup(event.set)
+ result.startTestRun()
+ unittest.TestSuite(
+ [first_test, Test("test_second_no_leak"), third_test]
+ ).run(result)
+ result.stopTestRun()
+ self.assertEqual(result._tests_leaking_threads_count, 2)
+ self.assertEqual(result._first_thread_leaker_id, first_test.id())
+ self.assertEqual(result.leaks, [
+ (first_test, set([thread_b])),
+ (third_test, set([thread_a, thread_c]))])
+ self.assertContainsString(result.stream.getvalue(), "leaking threads")
+
+
+class TestPostMortemDebugging(tests.TestCase):
+ """Check post mortem debugging works when tests fail or error"""
+
+ class TracebackRecordingResult(tests.ExtendedTestResult):
+ def __init__(self):
+ tests.ExtendedTestResult.__init__(self, StringIO(), 0, 1)
+ self.postcode = None
+ def _post_mortem(self, tb=None):
+ """Record the code object at the end of the current traceback"""
+ tb = tb or sys.exc_info()[2]
+ if tb is not None:
+ next = tb.tb_next
+ while next is not None:
+ tb = next
+ next = next.tb_next
+ self.postcode = tb.tb_frame.f_code
+ def report_error(self, test, err):
+ pass
+ def report_failure(self, test, err):
+ pass
+
+ def test_location_unittest_error(self):
+ """Needs right post mortem traceback with erroring unittest case"""
+ class Test(unittest.TestCase):
+ def runTest(self):
+ raise RuntimeError
+ result = self.TracebackRecordingResult()
+ Test().run(result)
+ self.assertEqual(result.postcode, Test.runTest.func_code)
+
+ def test_location_unittest_failure(self):
+ """Needs right post mortem traceback with failing unittest case"""
+ class Test(unittest.TestCase):
+ def runTest(self):
+ raise self.failureException
+ result = self.TracebackRecordingResult()
+ Test().run(result)
+ self.assertEqual(result.postcode, Test.runTest.func_code)
+
+ def test_location_bt_error(self):
+ """Needs right post mortem traceback with erroring bzrlib.tests case"""
+ class Test(tests.TestCase):
+ def test_error(self):
+ raise RuntimeError
+ result = self.TracebackRecordingResult()
+ Test("test_error").run(result)
+ self.assertEqual(result.postcode, Test.test_error.func_code)
+
+ def test_location_bt_failure(self):
+ """Needs right post mortem traceback with failing bzrlib.tests case"""
+ class Test(tests.TestCase):
+ def test_failure(self):
+ raise self.failureException
+ result = self.TracebackRecordingResult()
+ Test("test_failure").run(result)
+ self.assertEqual(result.postcode, Test.test_failure.func_code)
+
+ def test_env_var_triggers_post_mortem(self):
+ """Check pdb.post_mortem is called iff BZR_TEST_PDB is set"""
+ import pdb
+ result = tests.ExtendedTestResult(StringIO(), 0, 1)
+ post_mortem_calls = []
+ self.overrideAttr(pdb, "post_mortem", post_mortem_calls.append)
+ self.overrideEnv('BZR_TEST_PDB', None)
+ result._post_mortem(1)
+ self.overrideEnv('BZR_TEST_PDB', 'on')
+ result._post_mortem(2)
+ self.assertEqual([2], post_mortem_calls)
+
+
+class TestRunSuite(tests.TestCase):
+
+ def test_runner_class(self):
+ """run_suite accepts and uses a runner_class keyword argument."""
+ class Stub(tests.TestCase):
+ def test_foo(self):
+ pass
+ suite = Stub("test_foo")
+ calls = []
+ class MyRunner(tests.TextTestRunner):
+ def run(self, test):
+ calls.append(test)
+ return tests.ExtendedTestResult(self.stream, self.descriptions,
+ self.verbosity)
+ tests.run_suite(suite, runner_class=MyRunner, stream=StringIO())
+ self.assertLength(1, calls)
+
+
+class _Selftest(object):
+ """Mixin for tests needing full selftest output"""
+
+ def _inject_stream_into_subunit(self, stream):
+ """To be overridden by subclasses that run tests out of process"""
+
+ def _run_selftest(self, **kwargs):
+ sio = StringIO()
+ self._inject_stream_into_subunit(sio)
+ tests.selftest(stream=sio, stop_on_failure=False, **kwargs)
+ return sio.getvalue()
+
+
+class _ForkedSelftest(_Selftest):
+ """Mixin for tests needing full selftest output with forked children"""
+
+ _test_needs_features = [features.subunit]
+
+ def _inject_stream_into_subunit(self, stream):
+ """Monkey-patch subunit so the extra output goes to stream not stdout
+
+ Some APIs need rewriting so this kind of bogus hackery can be replaced
+ by passing the stream param from run_tests down into ProtocolTestCase.
+ """
+ from subunit import ProtocolTestCase
+ _original_init = ProtocolTestCase.__init__
+ def _init_with_passthrough(self, *args, **kwargs):
+ _original_init(self, *args, **kwargs)
+ self._passthrough = stream
+ self.overrideAttr(ProtocolTestCase, "__init__", _init_with_passthrough)
+
+ def _run_selftest(self, **kwargs):
+ # GZ 2011-05-26: Add a PosixSystem feature so this check can go away
+ if getattr(os, "fork", None) is None:
+ raise tests.TestNotApplicable("Platform doesn't support forking")
+ # Make sure the fork code is actually invoked by claiming two cores
+ self.overrideAttr(osutils, "local_concurrency", lambda: 2)
+ kwargs.setdefault("suite_decorators", []).append(tests.fork_decorator)
+ return super(_ForkedSelftest, self)._run_selftest(**kwargs)
+
+
+class TestParallelFork(_ForkedSelftest, tests.TestCase):
+ """Check operation of --parallel=fork selftest option"""
+
+ def test_error_in_child_during_fork(self):
+ """Error in a forked child during test setup should get reported"""
+ class Test(tests.TestCase):
+ def testMethod(self):
+ pass
+ # We don't care what, just break something that a child will run
+ self.overrideAttr(tests, "workaround_zealous_crypto_random", None)
+ out = self._run_selftest(test_suite_factory=Test)
+ # Lines from the tracebacks of the two child processes may be mixed
+ # together due to the way subunit parses and forwards the streams,
+ # so permit extra lines between each part of the error output.
+ self.assertContainsRe(out,
+ "Traceback.*:\n"
+ "(?:.*\n)*"
+ ".+ in fork_for_tests\n"
+ "(?:.*\n)*"
+ "\s*workaround_zealous_crypto_random\(\)\n"
+ "(?:.*\n)*"
+ "TypeError:")
+
+
+class TestUncollectedWarnings(_Selftest, tests.TestCase):
+ """Check a test case still alive after being run emits a warning"""
+
+ class Test(tests.TestCase):
+ def test_pass(self):
+ pass
+ def test_self_ref(self):
+ self.also_self = self.test_self_ref
+ def test_skip(self):
+ self.skip("Don't need")
+
+ def _get_suite(self):
+ return TestUtil.TestSuite([
+ self.Test("test_pass"),
+ self.Test("test_self_ref"),
+ self.Test("test_skip"),
+ ])
+
+ def _run_selftest_with_suite(self, **kwargs):
+ old_flags = tests.selftest_debug_flags
+ tests.selftest_debug_flags = old_flags.union(["uncollected_cases"])
+ gc_on = gc.isenabled()
+ if gc_on:
+ gc.disable()
+ try:
+ output = self._run_selftest(test_suite_factory=self._get_suite,
+ **kwargs)
+ finally:
+ if gc_on:
+ gc.enable()
+ tests.selftest_debug_flags = old_flags
+ self.assertNotContainsRe(output, "Uncollected test case.*test_pass")
+ self.assertContainsRe(output, "Uncollected test case.*test_self_ref")
+ return output
+
+ def test_testsuite(self):
+ self._run_selftest_with_suite()
+
+ def test_pattern(self):
+ out = self._run_selftest_with_suite(pattern="test_(?:pass|self_ref)$")
+ self.assertNotContainsRe(out, "test_skip")
+
+ def test_exclude_pattern(self):
+ out = self._run_selftest_with_suite(exclude_pattern="test_skip$")
+ self.assertNotContainsRe(out, "test_skip")
+
+ def test_random_seed(self):
+ self._run_selftest_with_suite(random_seed="now")
+
+ def test_matching_tests_first(self):
+ self._run_selftest_with_suite(matching_tests_first=True,
+ pattern="test_self_ref$")
+
+ def test_starting_with_and_exclude(self):
+ out = self._run_selftest_with_suite(starting_with=["bt."],
+ exclude_pattern="test_skip$")
+ self.assertNotContainsRe(out, "test_skip")
+
+ def test_additonal_decorator(self):
+ out = self._run_selftest_with_suite(
+ suite_decorators=[tests.TestDecorator])
+
+
+class TestUncollectedWarningsSubunit(TestUncollectedWarnings):
+ """Check warnings from tests staying alive are emitted with subunit"""
+
+ _test_needs_features = [features.subunit]
+
+ def _run_selftest_with_suite(self, **kwargs):
+ return TestUncollectedWarnings._run_selftest_with_suite(self,
+ runner_class=tests.SubUnitBzrRunner, **kwargs)
+
+
+class TestUncollectedWarningsForked(_ForkedSelftest, TestUncollectedWarnings):
+ """Check warnings from tests staying alive are emitted when forking"""
+
+
+class TestEnvironHandling(tests.TestCase):
+
+ def test_overrideEnv_None_called_twice_doesnt_leak(self):
+ self.assertFalse('MYVAR' in os.environ)
+ self.overrideEnv('MYVAR', '42')
+ # We use an embedded test to make sure we fix the _captureVar bug
+ class Test(tests.TestCase):
+ def test_me(self):
+ # The first call save the 42 value
+ self.overrideEnv('MYVAR', None)
+ self.assertEquals(None, os.environ.get('MYVAR'))
+ # Make sure we can call it twice
+ self.overrideEnv('MYVAR', None)
+ self.assertEquals(None, os.environ.get('MYVAR'))
+ output = StringIO()
+ result = tests.TextTestResult(output, 0, 1)
+ Test('test_me').run(result)
+ if not result.wasStrictlySuccessful():
+ self.fail(output.getvalue())
+ # We get our value back
+ self.assertEquals('42', os.environ.get('MYVAR'))
+
+
+class TestIsolatedEnv(tests.TestCase):
+ """Test isolating tests from os.environ.
+
+ Since we use tests that are already isolated from os.environ a bit of care
+ should be taken when designing the tests to avoid bootstrap side-effects.
+ The tests start an already clean os.environ which allow doing valid
+ assertions about which variables are present or not and design tests around
+ these assertions.
+ """
+
+ class ScratchMonkey(tests.TestCase):
+
+ def test_me(self):
+ pass
+
+ def test_basics(self):
+ # Make sure we know the definition of BZR_HOME: not part of os.environ
+ # for tests.TestCase.
+ self.assertTrue('BZR_HOME' in tests.isolated_environ)
+ self.assertEquals(None, tests.isolated_environ['BZR_HOME'])
+ # Being part of isolated_environ, BZR_HOME should not appear here
+ self.assertFalse('BZR_HOME' in os.environ)
+ # Make sure we know the definition of LINES: part of os.environ for
+ # tests.TestCase
+ self.assertTrue('LINES' in tests.isolated_environ)
+ self.assertEquals('25', tests.isolated_environ['LINES'])
+ self.assertEquals('25', os.environ['LINES'])
+
+ def test_injecting_unknown_variable(self):
+ # BZR_HOME is known to be absent from os.environ
+ test = self.ScratchMonkey('test_me')
+ tests.override_os_environ(test, {'BZR_HOME': 'foo'})
+ self.assertEquals('foo', os.environ['BZR_HOME'])
+ tests.restore_os_environ(test)
+ self.assertFalse('BZR_HOME' in os.environ)
+
+ def test_injecting_known_variable(self):
+ test = self.ScratchMonkey('test_me')
+ # LINES is known to be present in os.environ
+ tests.override_os_environ(test, {'LINES': '42'})
+ self.assertEquals('42', os.environ['LINES'])
+ tests.restore_os_environ(test)
+ self.assertEquals('25', os.environ['LINES'])
+
+ def test_deleting_variable(self):
+ test = self.ScratchMonkey('test_me')
+ # LINES is known to be present in os.environ
+ tests.override_os_environ(test, {'LINES': None})
+ self.assertTrue('LINES' not in os.environ)
+ tests.restore_os_environ(test)
+ self.assertEquals('25', os.environ['LINES'])
+
+
+class TestDocTestSuiteIsolation(tests.TestCase):
+ """Test that `tests.DocTestSuite` isolates doc tests from os.environ.
+
+ Since tests.TestCase alreay provides an isolation from os.environ, we use
+ the clean environment as a base for testing. To precisely capture the
+ isolation provided by tests.DocTestSuite, we use doctest.DocTestSuite to
+ compare against.
+
+ We want to make sure `tests.DocTestSuite` respect `tests.isolated_environ`,
+ not `os.environ` so each test overrides it to suit its needs.
+
+ """
+
+ def get_doctest_suite_for_string(self, klass, string):
+ class Finder(doctest.DocTestFinder):
+
+ def find(*args, **kwargs):
+ test = doctest.DocTestParser().get_doctest(
+ string, {}, 'foo', 'foo.py', 0)
+ return [test]
+
+ suite = klass(test_finder=Finder())
+ return suite
+
+ def run_doctest_suite_for_string(self, klass, string):
+ suite = self.get_doctest_suite_for_string(klass, string)
+ output = StringIO()
+ result = tests.TextTestResult(output, 0, 1)
+ suite.run(result)
+ return result, output
+
+ def assertDocTestStringSucceds(self, klass, string):
+ result, output = self.run_doctest_suite_for_string(klass, string)
+ if not result.wasStrictlySuccessful():
+ self.fail(output.getvalue())
+
+ def assertDocTestStringFails(self, klass, string):
+ result, output = self.run_doctest_suite_for_string(klass, string)
+ if result.wasStrictlySuccessful():
+ self.fail(output.getvalue())
+
+ def test_injected_variable(self):
+ self.overrideAttr(tests, 'isolated_environ', {'LINES': '42'})
+ test = """
+ >>> import os
+ >>> os.environ['LINES']
+ '42'
+ """
+ # doctest.DocTestSuite fails as it sees '25'
+ self.assertDocTestStringFails(doctest.DocTestSuite, test)
+ # tests.DocTestSuite sees '42'
+ self.assertDocTestStringSucceds(tests.IsolatedDocTestSuite, test)
+
+ def test_deleted_variable(self):
+ self.overrideAttr(tests, 'isolated_environ', {'LINES': None})
+ test = """
+ >>> import os
+ >>> os.environ.get('LINES')
+ """
+ # doctest.DocTestSuite fails as it sees '25'
+ self.assertDocTestStringFails(doctest.DocTestSuite, test)
+ # tests.DocTestSuite sees None
+ self.assertDocTestStringSucceds(tests.IsolatedDocTestSuite, test)
+
+
+class TestSelftestExcludePatterns(tests.TestCase):
+
+ def setUp(self):
+ super(TestSelftestExcludePatterns, self).setUp()
+ self.overrideAttr(tests, 'test_suite', self.suite_factory)
+
+ def suite_factory(self, keep_only=None, starting_with=None):
+ """A test suite factory with only a few tests."""
+ class Test(tests.TestCase):
+ def id(self):
+ # We don't need the full class path
+ return self._testMethodName
+ def a(self):
+ pass
+ def b(self):
+ pass
+ def c(self):
+ pass
+ return TestUtil.TestSuite([Test("a"), Test("b"), Test("c")])
+
+ def assertTestList(self, expected, *selftest_args):
+ # We rely on setUp installing the right test suite factory so we can
+ # test at the command level without loading the whole test suite
+ out, err = self.run_bzr(('selftest', '--list') + selftest_args)
+ actual = out.splitlines()
+ self.assertEquals(expected, actual)
+
+ def test_full_list(self):
+ self.assertTestList(['a', 'b', 'c'])
+
+ def test_single_exclude(self):
+ self.assertTestList(['b', 'c'], '-x', 'a')
+
+ def test_mutiple_excludes(self):
+ self.assertTestList(['c'], '-x', 'a', '-x', 'b')
+
+
+class TestCounterHooks(tests.TestCase, SelfTestHelper):
+
+ _test_needs_features = [features.subunit]
+
+ def setUp(self):
+ super(TestCounterHooks, self).setUp()
+ class Test(tests.TestCase):
+
+ def setUp(self):
+ super(Test, self).setUp()
+ self.hooks = hooks.Hooks()
+ self.hooks.add_hook('myhook', 'Foo bar blah', (2,4))
+ self.install_counter_hook(self.hooks, 'myhook')
+
+ def no_hook(self):
+ pass
+
+ def run_hook_once(self):
+ for hook in self.hooks['myhook']:
+ hook(self)
+
+ self.test_class = Test
+
+ def assertHookCalls(self, expected_calls, test_name):
+ test = self.test_class(test_name)
+ result = unittest.TestResult()
+ test.run(result)
+ self.assertTrue(hasattr(test, '_counters'))
+ self.assertTrue(test._counters.has_key('myhook'))
+ self.assertEquals(expected_calls, test._counters['myhook'])
+
+ def test_no_hook(self):
+ self.assertHookCalls(0, 'no_hook')
+
+ def test_run_hook_once(self):
+ tt = features.testtools
+ if tt.module.__version__ < (0, 9, 8):
+ raise tests.TestSkipped('testtools-0.9.8 required for addDetail')
+ self.assertHookCalls(1, 'run_hook_once')
diff --git a/bzrlib/tests/test_serializer.py b/bzrlib/tests/test_serializer.py
new file mode 100644
index 0000000..0b3e642
--- /dev/null
+++ b/bzrlib/tests/test_serializer.py
@@ -0,0 +1,45 @@
+# Copyright (C) 2005 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+
+"""Tests for the revision/inventory Serializers."""
+
+
+from bzrlib import (
+ chk_serializer,
+ serializer,
+ xml5,
+ xml6,
+ xml7,
+ xml8,
+ )
+from bzrlib.tests import TestCase
+
+
+class TestSerializer(TestCase):
+ """Test serializer"""
+
+ def test_registry(self):
+ self.assertIs(xml5.serializer_v5,
+ serializer.format_registry.get('5'))
+ self.assertIs(xml6.serializer_v6,
+ serializer.format_registry.get('6'))
+ self.assertIs(xml7.serializer_v7,
+ serializer.format_registry.get('7'))
+ self.assertIs(xml8.serializer_v8,
+ serializer.format_registry.get('8'))
+ self.assertIs(chk_serializer.chk_serializer_255_bigpage,
+ serializer.format_registry.get('9'))
diff --git a/bzrlib/tests/test_server.py b/bzrlib/tests/test_server.py
new file mode 100644
index 0000000..2da03f3
--- /dev/null
+++ b/bzrlib/tests/test_server.py
@@ -0,0 +1,743 @@
+# Copyright (C) 2010, 2011 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+import errno
+import socket
+import SocketServer
+import sys
+import threading
+import traceback
+
+
+from bzrlib import (
+ cethread,
+ errors,
+ osutils,
+ transport,
+ urlutils,
+ )
+from bzrlib.transport import (
+ chroot,
+ pathfilter,
+ )
+from bzrlib.smart import (
+ medium,
+ server,
+ )
+
+
+def debug_threads():
+ # FIXME: There is a dependency loop between bzrlib.tests and
+ # bzrlib.tests.test_server that needs to be fixed. In the mean time
+ # defining this function is enough for our needs. -- vila 20100611
+ from bzrlib import tests
+ return 'threads' in tests.selftest_debug_flags
+
+
+class TestServer(transport.Server):
+ """A Transport Server dedicated to tests.
+
+ The TestServer interface provides a server for a given transport. We use
+ these servers as loopback testing tools. For any given transport the
+ Servers it provides must either allow writing, or serve the contents
+ of os.getcwdu() at the time start_server is called.
+
+ Note that these are real servers - they must implement all the things
+ that we want bzr transports to take advantage of.
+ """
+
+ def get_url(self):
+ """Return a url for this server.
+
+ If the transport does not represent a disk directory (i.e. it is
+ a database like svn, or a memory only transport, it should return
+ a connection to a newly established resource for this Server.
+ Otherwise it should return a url that will provide access to the path
+ that was os.getcwdu() when start_server() was called.
+
+ Subsequent calls will return the same resource.
+ """
+ raise NotImplementedError
+
+ def get_bogus_url(self):
+ """Return a url for this protocol, that will fail to connect.
+
+ This may raise NotImplementedError to indicate that this server cannot
+ provide bogus urls.
+ """
+ raise NotImplementedError
+
+
+class LocalURLServer(TestServer):
+ """A pretend server for local transports, using file:// urls.
+
+ Of course no actual server is required to access the local filesystem, so
+ this just exists to tell the test code how to get to it.
+ """
+
+ def start_server(self):
+ pass
+
+ def get_url(self):
+ """See Transport.Server.get_url."""
+ return urlutils.local_path_to_url('')
+
+
+class DecoratorServer(TestServer):
+ """Server for the TransportDecorator for testing with.
+
+ To use this when subclassing TransportDecorator, override override the
+ get_decorator_class method.
+ """
+
+ def start_server(self, server=None):
+ """See bzrlib.transport.Server.start_server.
+
+ :server: decorate the urls given by server. If not provided a
+ LocalServer is created.
+ """
+ if server is not None:
+ self._made_server = False
+ self._server = server
+ else:
+ self._made_server = True
+ self._server = LocalURLServer()
+ self._server.start_server()
+
+ def stop_server(self):
+ if self._made_server:
+ self._server.stop_server()
+
+ def get_decorator_class(self):
+ """Return the class of the decorators we should be constructing."""
+ raise NotImplementedError(self.get_decorator_class)
+
+ def get_url_prefix(self):
+ """What URL prefix does this decorator produce?"""
+ return self.get_decorator_class()._get_url_prefix()
+
+ def get_bogus_url(self):
+ """See bzrlib.transport.Server.get_bogus_url."""
+ return self.get_url_prefix() + self._server.get_bogus_url()
+
+ def get_url(self):
+ """See bzrlib.transport.Server.get_url."""
+ return self.get_url_prefix() + self._server.get_url()
+
+
+class BrokenRenameServer(DecoratorServer):
+ """Server for the BrokenRenameTransportDecorator for testing with."""
+
+ def get_decorator_class(self):
+ from bzrlib.transport import brokenrename
+ return brokenrename.BrokenRenameTransportDecorator
+
+
+class FakeNFSServer(DecoratorServer):
+ """Server for the FakeNFSTransportDecorator for testing with."""
+
+ def get_decorator_class(self):
+ from bzrlib.transport import fakenfs
+ return fakenfs.FakeNFSTransportDecorator
+
+
+class FakeVFATServer(DecoratorServer):
+ """A server that suggests connections through FakeVFATTransportDecorator
+
+ For use in testing.
+ """
+
+ def get_decorator_class(self):
+ from bzrlib.transport import fakevfat
+ return fakevfat.FakeVFATTransportDecorator
+
+
+class LogDecoratorServer(DecoratorServer):
+ """Server for testing."""
+
+ def get_decorator_class(self):
+ from bzrlib.transport import log
+ return log.TransportLogDecorator
+
+
+class NoSmartTransportServer(DecoratorServer):
+ """Server for the NoSmartTransportDecorator for testing with."""
+
+ def get_decorator_class(self):
+ from bzrlib.transport import nosmart
+ return nosmart.NoSmartTransportDecorator
+
+
+class ReadonlyServer(DecoratorServer):
+ """Server for the ReadonlyTransportDecorator for testing with."""
+
+ def get_decorator_class(self):
+ from bzrlib.transport import readonly
+ return readonly.ReadonlyTransportDecorator
+
+
+class TraceServer(DecoratorServer):
+ """Server for the TransportTraceDecorator for testing with."""
+
+ def get_decorator_class(self):
+ from bzrlib.transport import trace
+ return trace.TransportTraceDecorator
+
+
+class UnlistableServer(DecoratorServer):
+ """Server for the UnlistableTransportDecorator for testing with."""
+
+ def get_decorator_class(self):
+ from bzrlib.transport import unlistable
+ return unlistable.UnlistableTransportDecorator
+
+
+class TestingPathFilteringServer(pathfilter.PathFilteringServer):
+
+ def __init__(self):
+ """TestingPathFilteringServer is not usable until start_server
+ is called."""
+
+ def start_server(self, backing_server=None):
+ """Setup the Chroot on backing_server."""
+ if backing_server is not None:
+ self.backing_transport = transport.get_transport_from_url(
+ backing_server.get_url())
+ else:
+ self.backing_transport = transport.get_transport_from_path('.')
+ self.backing_transport.clone('added-by-filter').ensure_base()
+ self.filter_func = lambda x: 'added-by-filter/' + x
+ super(TestingPathFilteringServer, self).start_server()
+
+ def get_bogus_url(self):
+ raise NotImplementedError
+
+
+class TestingChrootServer(chroot.ChrootServer):
+
+ def __init__(self):
+ """TestingChrootServer is not usable until start_server is called."""
+ super(TestingChrootServer, self).__init__(None)
+
+ def start_server(self, backing_server=None):
+ """Setup the Chroot on backing_server."""
+ if backing_server is not None:
+ self.backing_transport = transport.get_transport_from_url(
+ backing_server.get_url())
+ else:
+ self.backing_transport = transport.get_transport_from_path('.')
+ super(TestingChrootServer, self).start_server()
+
+ def get_bogus_url(self):
+ raise NotImplementedError
+
+
+class TestThread(cethread.CatchingExceptionThread):
+
+ def join(self, timeout=5):
+ """Overrides to use a default timeout.
+
+ The default timeout is set to 5 and should expire only when a thread
+ serving a client connection is hung.
+ """
+ super(TestThread, self).join(timeout)
+ if timeout and self.isAlive():
+ # The timeout expired without joining the thread, the thread is
+ # therefore stucked and that's a failure as far as the test is
+ # concerned. We used to hang here.
+
+ # FIXME: we need to kill the thread, but as far as the test is
+ # concerned, raising an assertion is too strong. On most of the
+ # platforms, this doesn't occur, so just mentioning the problem is
+ # enough for now -- vila 2010824
+ sys.stderr.write('thread %s hung\n' % (self.name,))
+ #raise AssertionError('thread %s hung' % (self.name,))
+
+
+class TestingTCPServerMixin(object):
+ """Mixin to support running SocketServer.TCPServer in a thread.
+
+ Tests are connecting from the main thread, the server has to be run in a
+ separate thread.
+ """
+
+ def __init__(self):
+ self.started = threading.Event()
+ self.serving = None
+ self.stopped = threading.Event()
+ # We collect the resources used by the clients so we can release them
+ # when shutting down
+ self.clients = []
+ self.ignored_exceptions = None
+
+ def server_bind(self):
+ self.socket.bind(self.server_address)
+ self.server_address = self.socket.getsockname()
+
+ def serve(self):
+ self.serving = True
+ # We are listening and ready to accept connections
+ self.started.set()
+ try:
+ while self.serving:
+ # Really a connection but the python framework is generic and
+ # call them requests
+ self.handle_request()
+ # Let's close the listening socket
+ self.server_close()
+ finally:
+ self.stopped.set()
+
+ def handle_request(self):
+ """Handle one request.
+
+ The python version swallows some socket exceptions and we don't use
+ timeout, so we override it to better control the server behavior.
+ """
+ request, client_address = self.get_request()
+ if self.verify_request(request, client_address):
+ try:
+ self.process_request(request, client_address)
+ except:
+ self.handle_error(request, client_address)
+ else:
+ self.close_request(request)
+
+ def get_request(self):
+ return self.socket.accept()
+
+ def verify_request(self, request, client_address):
+ """Verify the request.
+
+ Return True if we should proceed with this request, False if we should
+ not even touch a single byte in the socket ! This is useful when we
+ stop the server with a dummy last connection.
+ """
+ return self.serving
+
+ def handle_error(self, request, client_address):
+ # Stop serving and re-raise the last exception seen
+ self.serving = False
+ # The following can be used for debugging purposes, it will display the
+ # exception and the traceback just when it occurs instead of waiting
+ # for the thread to be joined.
+ # SocketServer.BaseServer.handle_error(self, request, client_address)
+
+ # We call close_request manually, because we are going to raise an
+ # exception. The SocketServer implementation calls:
+ # handle_error(...)
+ # close_request(...)
+ # But because we raise the exception, close_request will never be
+ # triggered. This helps client not block waiting for a response when
+ # the server gets an exception.
+ self.close_request(request)
+ raise
+
+ def ignored_exceptions_during_shutdown(self, e):
+ if sys.platform == 'win32':
+ accepted_errnos = [errno.EBADF,
+ errno.EPIPE,
+ errno.WSAEBADF,
+ errno.WSAECONNRESET,
+ errno.WSAENOTCONN,
+ errno.WSAESHUTDOWN,
+ ]
+ else:
+ accepted_errnos = [errno.EBADF,
+ errno.ECONNRESET,
+ errno.ENOTCONN,
+ errno.EPIPE,
+ ]
+ if isinstance(e, socket.error) and e[0] in accepted_errnos:
+ return True
+ return False
+
+ # The following methods are called by the main thread
+
+ def stop_client_connections(self):
+ while self.clients:
+ c = self.clients.pop()
+ self.shutdown_client(c)
+
+ def shutdown_socket(self, sock):
+ """Properly shutdown a socket.
+
+ This should be called only when no other thread is trying to use the
+ socket.
+ """
+ try:
+ sock.shutdown(socket.SHUT_RDWR)
+ sock.close()
+ except Exception, e:
+ if self.ignored_exceptions(e):
+ pass
+ else:
+ raise
+
+ # The following methods are called by the main thread
+
+ def set_ignored_exceptions(self, thread, ignored_exceptions):
+ self.ignored_exceptions = ignored_exceptions
+ thread.set_ignored_exceptions(self.ignored_exceptions)
+
+ def _pending_exception(self, thread):
+ """Raise server uncaught exception.
+
+ Daughter classes can override this if they use daughter threads.
+ """
+ thread.pending_exception()
+
+
+class TestingTCPServer(TestingTCPServerMixin, SocketServer.TCPServer):
+
+ def __init__(self, server_address, request_handler_class):
+ TestingTCPServerMixin.__init__(self)
+ SocketServer.TCPServer.__init__(self, server_address,
+ request_handler_class)
+
+ def get_request(self):
+ """Get the request and client address from the socket."""
+ sock, addr = TestingTCPServerMixin.get_request(self)
+ self.clients.append((sock, addr))
+ return sock, addr
+
+ # The following methods are called by the main thread
+
+ def shutdown_client(self, client):
+ sock, addr = client
+ self.shutdown_socket(sock)
+
+
+class TestingThreadingTCPServer(TestingTCPServerMixin,
+ SocketServer.ThreadingTCPServer):
+
+ def __init__(self, server_address, request_handler_class):
+ TestingTCPServerMixin.__init__(self)
+ SocketServer.ThreadingTCPServer.__init__(self, server_address,
+ request_handler_class)
+
+ def get_request(self):
+ """Get the request and client address from the socket."""
+ sock, addr = TestingTCPServerMixin.get_request(self)
+ # The thread is not created yet, it will be updated in process_request
+ self.clients.append((sock, addr, None))
+ return sock, addr
+
+ def process_request_thread(self, started, detached, stopped,
+ request, client_address):
+ started.set()
+ # We will be on our own once the server tells us we're detached
+ detached.wait()
+ SocketServer.ThreadingTCPServer.process_request_thread(
+ self, request, client_address)
+ self.close_request(request)
+ stopped.set()
+
+ def process_request(self, request, client_address):
+ """Start a new thread to process the request."""
+ started = threading.Event()
+ detached = threading.Event()
+ stopped = threading.Event()
+ t = TestThread(
+ sync_event=stopped,
+ name='%s -> %s' % (client_address, self.server_address),
+ target = self.process_request_thread,
+ args = (started, detached, stopped, request, client_address))
+ # Update the client description
+ self.clients.pop()
+ self.clients.append((request, client_address, t))
+ # Propagate the exception handler since we must use the same one as
+ # TestingTCPServer for connections running in their own threads.
+ t.set_ignored_exceptions(self.ignored_exceptions)
+ t.start()
+ started.wait()
+ # If an exception occured during the thread start, it will get raised.
+ t.pending_exception()
+ if debug_threads():
+ sys.stderr.write('Client thread %s started\n' % (t.name,))
+ # Tell the thread, it's now on its own for exception handling.
+ detached.set()
+
+ # The following methods are called by the main thread
+
+ def shutdown_client(self, client):
+ sock, addr, connection_thread = client
+ self.shutdown_socket(sock)
+ if connection_thread is not None:
+ # The thread has been created only if the request is processed but
+ # after the connection is inited. This could happen during server
+ # shutdown. If an exception occurred in the thread it will be
+ # re-raised
+ if debug_threads():
+ sys.stderr.write('Client thread %s will be joined\n'
+ % (connection_thread.name,))
+ connection_thread.join()
+
+ def set_ignored_exceptions(self, thread, ignored_exceptions):
+ TestingTCPServerMixin.set_ignored_exceptions(self, thread,
+ ignored_exceptions)
+ for sock, addr, connection_thread in self.clients:
+ if connection_thread is not None:
+ connection_thread.set_ignored_exceptions(
+ self.ignored_exceptions)
+
+ def _pending_exception(self, thread):
+ for sock, addr, connection_thread in self.clients:
+ if connection_thread is not None:
+ connection_thread.pending_exception()
+ TestingTCPServerMixin._pending_exception(self, thread)
+
+
+class TestingTCPServerInAThread(transport.Server):
+ """A server in a thread that re-raise thread exceptions."""
+
+ def __init__(self, server_address, server_class, request_handler_class):
+ self.server_class = server_class
+ self.request_handler_class = request_handler_class
+ self.host, self.port = server_address
+ self.server = None
+ self._server_thread = None
+
+ def __repr__(self):
+ return "%s(%s:%s)" % (self.__class__.__name__, self.host, self.port)
+
+ def create_server(self):
+ return self.server_class((self.host, self.port),
+ self.request_handler_class)
+
+ def start_server(self):
+ self.server = self.create_server()
+ self._server_thread = TestThread(
+ sync_event=self.server.started,
+ target=self.run_server)
+ self._server_thread.start()
+ # Wait for the server thread to start (i.e. release the lock)
+ self.server.started.wait()
+ # Get the real address, especially the port
+ self.host, self.port = self.server.server_address
+ self._server_thread.name = self.server.server_address
+ if debug_threads():
+ sys.stderr.write('Server thread %s started\n'
+ % (self._server_thread.name,))
+ # If an exception occured during the server start, it will get raised,
+ # otherwise, the server is blocked on its accept() call.
+ self._server_thread.pending_exception()
+ # From now on, we'll use a different event to ensure the server can set
+ # its exception
+ self._server_thread.set_sync_event(self.server.stopped)
+
+ def run_server(self):
+ self.server.serve()
+
+ def stop_server(self):
+ if self.server is None:
+ return
+ try:
+ # The server has been started successfully, shut it down now. As
+ # soon as we stop serving, no more connection are accepted except
+ # one to get out of the blocking listen.
+ self.set_ignored_exceptions(
+ self.server.ignored_exceptions_during_shutdown)
+ self.server.serving = False
+ if debug_threads():
+ sys.stderr.write('Server thread %s will be joined\n'
+ % (self._server_thread.name,))
+ # The server is listening for a last connection, let's give it:
+ last_conn = None
+ try:
+ last_conn = osutils.connect_socket((self.host, self.port))
+ except socket.error, e:
+ # But ignore connection errors as the point is to unblock the
+ # server thread, it may happen that it's not blocked or even
+ # not started.
+ pass
+ # We start shutting down the clients while the server itself is
+ # shutting down.
+ self.server.stop_client_connections()
+ # Now we wait for the thread running self.server.serve() to finish
+ self.server.stopped.wait()
+ if last_conn is not None:
+ # Close the last connection without trying to use it. The
+ # server will not process a single byte on that socket to avoid
+ # complications (SSL starts with a handshake for example).
+ last_conn.close()
+ # Check for any exception that could have occurred in the server
+ # thread
+ try:
+ self._server_thread.join()
+ except Exception, e:
+ if self.server.ignored_exceptions(e):
+ pass
+ else:
+ raise
+ finally:
+ # Make sure we can be called twice safely, note that this means
+ # that we will raise a single exception even if several occurred in
+ # the various threads involved.
+ self.server = None
+
+ def set_ignored_exceptions(self, ignored_exceptions):
+ """Install an exception handler for the server."""
+ self.server.set_ignored_exceptions(self._server_thread,
+ ignored_exceptions)
+
+ def pending_exception(self):
+ """Raise uncaught exception in the server."""
+ self.server._pending_exception(self._server_thread)
+
+
+class TestingSmartConnectionHandler(SocketServer.BaseRequestHandler,
+ medium.SmartServerSocketStreamMedium):
+
+ def __init__(self, request, client_address, server):
+ medium.SmartServerSocketStreamMedium.__init__(
+ self, request, server.backing_transport,
+ server.root_client_path,
+ timeout=_DEFAULT_TESTING_CLIENT_TIMEOUT)
+ request.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
+ SocketServer.BaseRequestHandler.__init__(self, request, client_address,
+ server)
+
+ def handle(self):
+ try:
+ while not self.finished:
+ server_protocol = self._build_protocol()
+ self._serve_one_request(server_protocol)
+ except errors.ConnectionTimeout:
+ # idle connections aren't considered a failure of the server
+ return
+
+
+_DEFAULT_TESTING_CLIENT_TIMEOUT = 60.0
+
+class TestingSmartServer(TestingThreadingTCPServer, server.SmartTCPServer):
+
+ def __init__(self, server_address, request_handler_class,
+ backing_transport, root_client_path):
+ TestingThreadingTCPServer.__init__(self, server_address,
+ request_handler_class)
+ server.SmartTCPServer.__init__(self, backing_transport,
+ root_client_path, client_timeout=_DEFAULT_TESTING_CLIENT_TIMEOUT)
+
+ def serve(self):
+ self.run_server_started_hooks()
+ try:
+ TestingThreadingTCPServer.serve(self)
+ finally:
+ self.run_server_stopped_hooks()
+
+ def get_url(self):
+ """Return the url of the server"""
+ return "bzr://%s:%d/" % self.server_address
+
+
+class SmartTCPServer_for_testing(TestingTCPServerInAThread):
+ """Server suitable for use by transport tests.
+
+ This server is backed by the process's cwd.
+ """
+ def __init__(self, thread_name_suffix=''):
+ self.client_path_extra = None
+ self.thread_name_suffix = thread_name_suffix
+ self.host = '127.0.0.1'
+ self.port = 0
+ super(SmartTCPServer_for_testing, self).__init__(
+ (self.host, self.port),
+ TestingSmartServer,
+ TestingSmartConnectionHandler)
+
+ def create_server(self):
+ return self.server_class((self.host, self.port),
+ self.request_handler_class,
+ self.backing_transport,
+ self.root_client_path)
+
+
+ def start_server(self, backing_transport_server=None,
+ client_path_extra='/extra/'):
+ """Set up server for testing.
+
+ :param backing_transport_server: backing server to use. If not
+ specified, a LocalURLServer at the current working directory will
+ be used.
+ :param client_path_extra: a path segment starting with '/' to append to
+ the root URL for this server. For instance, a value of '/foo/bar/'
+ will mean the root of the backing transport will be published at a
+ URL like `bzr://127.0.0.1:nnnn/foo/bar/`, rather than
+ `bzr://127.0.0.1:nnnn/`. Default value is `extra`, so that tests
+ by default will fail unless they do the necessary path translation.
+ """
+ if not client_path_extra.startswith('/'):
+ raise ValueError(client_path_extra)
+ self.root_client_path = self.client_path_extra = client_path_extra
+ from bzrlib.transport.chroot import ChrootServer
+ if backing_transport_server is None:
+ backing_transport_server = LocalURLServer()
+ self.chroot_server = ChrootServer(
+ self.get_backing_transport(backing_transport_server))
+ self.chroot_server.start_server()
+ self.backing_transport = transport.get_transport_from_url(
+ self.chroot_server.get_url())
+ super(SmartTCPServer_for_testing, self).start_server()
+
+ def stop_server(self):
+ try:
+ super(SmartTCPServer_for_testing, self).stop_server()
+ finally:
+ self.chroot_server.stop_server()
+
+ def get_backing_transport(self, backing_transport_server):
+ """Get a backing transport from a server we are decorating."""
+ return transport.get_transport_from_url(
+ backing_transport_server.get_url())
+
+ def get_url(self):
+ url = self.server.get_url()
+ return url[:-1] + self.client_path_extra
+
+ def get_bogus_url(self):
+ """Return a URL which will fail to connect"""
+ return 'bzr://127.0.0.1:1/'
+
+
+class ReadonlySmartTCPServer_for_testing(SmartTCPServer_for_testing):
+ """Get a readonly server for testing."""
+
+ def get_backing_transport(self, backing_transport_server):
+ """Get a backing transport from a server we are decorating."""
+ url = 'readonly+' + backing_transport_server.get_url()
+ return transport.get_transport_from_url(url)
+
+
+class SmartTCPServer_for_testing_v2_only(SmartTCPServer_for_testing):
+ """A variation of SmartTCPServer_for_testing that limits the client to
+ using RPCs in protocol v2 (i.e. bzr <= 1.5).
+ """
+
+ def get_url(self):
+ url = super(SmartTCPServer_for_testing_v2_only, self).get_url()
+ url = 'bzr-v2://' + url[len('bzr://'):]
+ return url
+
+
+class ReadonlySmartTCPServer_for_testing_v2_only(
+ SmartTCPServer_for_testing_v2_only):
+ """Get a readonly server for testing."""
+
+ def get_backing_transport(self, backing_transport_server):
+ """Get a backing transport from a server we are decorating."""
+ url = 'readonly+' + backing_transport_server.get_url()
+ return transport.get_transport_from_url(url)
diff --git a/bzrlib/tests/test_setup.py b/bzrlib/tests/test_setup.py
new file mode 100644
index 0000000..ed0d2a2
--- /dev/null
+++ b/bzrlib/tests/test_setup.py
@@ -0,0 +1,81 @@
+# Copyright (C) 2005, 2006, 2008-2011 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""Test for setup.py build process"""
+
+import os
+import sys
+import subprocess
+
+import bzrlib
+from bzrlib import tests
+
+# TODO: Run bzr from the installed copy to see if it works. Really we need to
+# run something that exercises every module, just starting it may not detect
+# some missing modules.
+#
+# TODO: Check that the version numbers are in sync. (Or avoid this...)
+
+class TestSetup(tests.TestCaseInTempDir):
+
+ def test_build_and_install(self):
+ """ test cmd `python setup.py build`
+
+ This tests that the build process and man generator run correctly.
+ It also can catch new subdirectories that weren't added to setup.py.
+ """
+ # setup.py must be run from the root source directory, but the tests
+ # are not necessarily invoked from there
+ self.source_dir = os.path.dirname(os.path.dirname(bzrlib.__file__))
+ if not os.path.isfile(os.path.join(self.source_dir, 'setup.py')):
+ self.skip(
+ 'There is no setup.py file adjacent to the bzrlib directory')
+ try:
+ import distutils.sysconfig
+ makefile_path = distutils.sysconfig.get_makefile_filename()
+ if not os.path.exists(makefile_path):
+ self.skip(
+ 'You must have the python Makefile installed to run this'
+ ' test. Usually this can be found by installing'
+ ' "python-dev"')
+ except ImportError:
+ self.skip(
+ 'You must have distutils installed to run this test.'
+ ' Usually this can be found by installing "python-dev"')
+ self.log('test_build running from %s' % self.source_dir)
+ build_dir = os.path.join(self.test_dir, "build")
+ install_dir = os.path.join(self.test_dir, "install")
+ self.run_setup([
+ 'build', '-b', build_dir,
+ 'install', '--root', install_dir])
+ # Install layout is platform dependant
+ self.assertPathExists(install_dir)
+ self.run_setup(['clean', '-b', build_dir])
+
+ def run_setup(self, args):
+ args = [sys.executable, './setup.py', ] + args
+ self.log('source base directory: %s', self.source_dir)
+ self.log('args: %r', args)
+ p = subprocess.Popen(args,
+ cwd=self.source_dir,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE,
+ )
+ stdout, stderr = p.communicate()
+ self.log('stdout: %r', stdout)
+ self.log('stderr: %r', stderr)
+ self.assertEqual(0, p.returncode,
+ 'invocation of %r failed' % args)
diff --git a/bzrlib/tests/test_sftp_transport.py b/bzrlib/tests/test_sftp_transport.py
new file mode 100644
index 0000000..1c4d04b
--- /dev/null
+++ b/bzrlib/tests/test_sftp_transport.py
@@ -0,0 +1,499 @@
+# Copyright (C) 2005-2011 Robey Pointer <robey@lag.net>
+# Copyright (C) 2005, 2006, 2007 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+import os
+import socket
+import sys
+import time
+
+from bzrlib import (
+ config,
+ controldir,
+ errors,
+ tests,
+ transport as _mod_transport,
+ ui,
+ )
+from bzrlib.osutils import (
+ lexists,
+ )
+from bzrlib.tests import (
+ features,
+ TestCaseWithTransport,
+ TestCase,
+ TestSkipped,
+ )
+from bzrlib.tests.http_server import HttpServer
+import bzrlib.transport.http
+
+if features.paramiko.available():
+ from bzrlib.transport import sftp as _mod_sftp
+ from bzrlib.tests import stub_sftp
+
+
+def set_test_transport_to_sftp(testcase):
+ """A helper to set transports on test case instances."""
+ if getattr(testcase, '_get_remote_is_absolute', None) is None:
+ testcase._get_remote_is_absolute = True
+ if testcase._get_remote_is_absolute:
+ testcase.transport_server = stub_sftp.SFTPAbsoluteServer
+ else:
+ testcase.transport_server = stub_sftp.SFTPHomeDirServer
+ testcase.transport_readonly_server = HttpServer
+
+
+class TestCaseWithSFTPServer(TestCaseWithTransport):
+ """A test case base class that provides a sftp server on localhost."""
+
+ def setUp(self):
+ super(TestCaseWithSFTPServer, self).setUp()
+ self.requireFeature(features.paramiko)
+ set_test_transport_to_sftp(self)
+
+
+class SFTPLockTests(TestCaseWithSFTPServer):
+
+ def test_sftp_locks(self):
+ from bzrlib.errors import LockError
+ t = self.get_transport()
+
+ l = t.lock_write('bogus')
+ self.assertPathExists('bogus.write-lock')
+
+ # Don't wait for the lock, locking an already locked
+ # file should raise an assert
+ self.assertRaises(LockError, t.lock_write, 'bogus')
+
+ l.unlock()
+ self.assertFalse(lexists('bogus.write-lock'))
+
+ with open('something.write-lock', 'wb') as f: f.write('fake lock\n')
+ self.assertRaises(LockError, t.lock_write, 'something')
+ os.remove('something.write-lock')
+
+ l = t.lock_write('something')
+
+ l2 = t.lock_write('bogus')
+
+ l.unlock()
+ l2.unlock()
+
+
+class SFTPTransportTestRelative(TestCaseWithSFTPServer):
+ """Test the SFTP transport with homedir based relative paths."""
+
+ def test__remote_path(self):
+ if sys.platform == 'darwin':
+ # This test is about sftp absolute path handling. There is already
+ # (in this test) a TODO about windows needing an absolute path
+ # without drive letter. To me, using self.test_dir is a trick to
+ # get an absolute path for comparison purposes. That fails for OSX
+ # because the sftp server doesn't resolve the links (and it doesn't
+ # have to). --vila 20070924
+ self.knownFailure('Mac OSX symlinks /tmp to /private/tmp,'
+ ' testing against self.test_dir'
+ ' is not appropriate')
+ t = self.get_transport()
+ # This test require unix-like absolute path
+ test_dir = self.test_dir
+ if sys.platform == 'win32':
+ # using hack suggested by John Meinel.
+ # TODO: write another mock server for this test
+ # and use absolute path without drive letter
+ test_dir = '/' + test_dir
+ # try what is currently used:
+ # remote path = self._abspath(relpath)
+ self.assertIsSameRealPath(test_dir + '/relative',
+ t._remote_path('relative'))
+ # we dont os.path.join because windows gives us the wrong path
+ root_segments = test_dir.split('/')
+ root_parent = '/'.join(root_segments[:-1])
+ # .. should be honoured
+ self.assertIsSameRealPath(root_parent + '/sibling',
+ t._remote_path('../sibling'))
+ # / should be illegal ?
+ ### FIXME decide and then test for all transports. RBC20051208
+
+
+class SFTPTransportTestRelativeRoot(TestCaseWithSFTPServer):
+ """Test the SFTP transport with homedir based relative paths."""
+
+ def setUp(self):
+ # Only SFTPHomeDirServer is tested here
+ self._get_remote_is_absolute = False
+ super(SFTPTransportTestRelativeRoot, self).setUp()
+
+ def test__remote_path_relative_root(self):
+ # relative paths are preserved
+ t = self.get_transport('')
+ self.assertEqual('/~/', t._parsed_url.path)
+ # the remote path should be relative to home dir
+ # (i.e. not begining with a '/')
+ self.assertEqual('a', t._remote_path('a'))
+
+
+class SFTPNonServerTest(TestCase):
+ def setUp(self):
+ TestCase.setUp(self)
+ self.requireFeature(features.paramiko)
+
+ def test_parse_url_with_home_dir(self):
+ s = _mod_sftp.SFTPTransport(
+ 'sftp://ro%62ey:h%40t@example.com:2222/~/relative')
+ self.assertEquals(s._parsed_url.host, 'example.com')
+ self.assertEquals(s._parsed_url.port, 2222)
+ self.assertEquals(s._parsed_url.user, 'robey')
+ self.assertEquals(s._parsed_url.password, 'h@t')
+ self.assertEquals(s._parsed_url.path, '/~/relative/')
+
+ def test_relpath(self):
+ s = _mod_sftp.SFTPTransport('sftp://user@host.com/abs/path')
+ self.assertRaises(errors.PathNotChild, s.relpath,
+ 'sftp://user@host.com/~/rel/path/sub')
+
+ def test_get_paramiko_vendor(self):
+ """Test that if no 'ssh' is available we get builtin paramiko"""
+ from bzrlib.transport import ssh
+ # set '.' as the only location in the path, forcing no 'ssh' to exist
+ self.overrideAttr(ssh, '_ssh_vendor_manager')
+ self.overrideEnv('PATH', '.')
+ ssh._ssh_vendor_manager.clear_cache()
+ vendor = ssh._get_ssh_vendor()
+ self.assertIsInstance(vendor, ssh.ParamikoVendor)
+
+ def test_abspath_root_sibling_server(self):
+ server = stub_sftp.SFTPSiblingAbsoluteServer()
+ server.start_server()
+ self.addCleanup(server.stop_server)
+
+ transport = _mod_transport.get_transport_from_url(server.get_url())
+ self.assertFalse(transport.abspath('/').endswith('/~/'))
+ self.assertTrue(transport.abspath('/').endswith('/'))
+ del transport
+
+
+class SFTPBranchTest(TestCaseWithSFTPServer):
+ """Test some stuff when accessing a bzr Branch over sftp"""
+
+ def test_push_support(self):
+ self.build_tree(['a/', 'a/foo'])
+ t = controldir.ControlDir.create_standalone_workingtree('a')
+ b = t.branch
+ t.add('foo')
+ t.commit('foo', rev_id='a1')
+
+ b2 = controldir.ControlDir.create_branch_and_repo(self.get_url('/b'))
+ b2.pull(b)
+
+ self.assertEquals(b2.last_revision(), 'a1')
+
+ with open('a/foo', 'wt') as f: f.write('something new in foo\n')
+ t.commit('new', rev_id='a2')
+ b2.pull(b)
+
+ self.assertEquals(b2.last_revision(), 'a2')
+
+
+class SSHVendorConnection(TestCaseWithSFTPServer):
+ """Test that the ssh vendors can all connect.
+
+ Verify that a full-handshake (SSH over loopback TCP) sftp connection works.
+
+ We have 3 sftp implementations in the test suite:
+ 'loopback': Doesn't use ssh, just uses a local socket. Most tests are
+ done this way to save the handshaking time, so it is not
+ tested again here
+ 'none': This uses paramiko's built-in ssh client and server, and layers
+ sftp on top of it.
+ None: If 'ssh' exists on the machine, then it will be spawned as a
+ child process.
+ """
+
+ def setUp(self):
+ super(SSHVendorConnection, self).setUp()
+
+ def create_server():
+ """Just a wrapper so that when created, it will set _vendor"""
+ # SFTPFullAbsoluteServer can handle any vendor,
+ # it just needs to be set between the time it is instantiated
+ # and the time .setUp() is called
+ server = stub_sftp.SFTPFullAbsoluteServer()
+ server._vendor = self._test_vendor
+ return server
+ self._test_vendor = 'loopback'
+ self.vfs_transport_server = create_server
+ f = open('a_file', 'wb')
+ try:
+ f.write('foobar\n')
+ finally:
+ f.close()
+
+ def set_vendor(self, vendor):
+ self._test_vendor = vendor
+
+ def test_connection_paramiko(self):
+ from bzrlib.transport import ssh
+ self.set_vendor(ssh.ParamikoVendor())
+ t = self.get_transport()
+ self.assertEqual('foobar\n', t.get('a_file').read())
+
+ def test_connection_vendor(self):
+ raise TestSkipped("We don't test spawning real ssh,"
+ " because it prompts for a password."
+ " Enable this test if we figure out"
+ " how to prevent this.")
+ self.set_vendor(None)
+ t = self.get_transport()
+ self.assertEqual('foobar\n', t.get('a_file').read())
+
+
+class SSHVendorBadConnection(TestCaseWithTransport):
+ """Test that the ssh vendors handle bad connection properly
+
+ We don't subclass TestCaseWithSFTPServer, because we don't actually
+ need an SFTP connection.
+ """
+
+ def setUp(self):
+ self.requireFeature(features.paramiko)
+ super(SSHVendorBadConnection, self).setUp()
+
+ # open a random port, so we know nobody else is using it
+ # but don't actually listen on the port.
+ s = socket.socket()
+ s.bind(('localhost', 0))
+ self.addCleanup(s.close)
+ self.bogus_url = 'sftp://%s:%s/' % s.getsockname()
+
+ def set_vendor(self, vendor, subprocess_stderr=None):
+ from bzrlib.transport import ssh
+ self.overrideAttr(ssh._ssh_vendor_manager, '_cached_ssh_vendor', vendor)
+ if subprocess_stderr is not None:
+ self.overrideAttr(ssh.SubprocessVendor, "_stderr_target",
+ subprocess_stderr)
+
+ def test_bad_connection_paramiko(self):
+ """Test that a real connection attempt raises the right error"""
+ from bzrlib.transport import ssh
+ self.set_vendor(ssh.ParamikoVendor())
+ t = _mod_transport.get_transport_from_url(self.bogus_url)
+ self.assertRaises(errors.ConnectionError, t.get, 'foobar')
+
+ def test_bad_connection_ssh(self):
+ """None => auto-detect vendor"""
+ f = file(os.devnull, "wb")
+ self.addCleanup(f.close)
+ self.set_vendor(None, f)
+ t = _mod_transport.get_transport_from_url(self.bogus_url)
+ try:
+ self.assertRaises(errors.ConnectionError, t.get, 'foobar')
+ except NameError, e:
+ if "global name 'SSHException'" in str(e):
+ self.knownFailure('Known NameError bug in paramiko 1.6.1')
+ raise
+
+
+class SFTPLatencyKnob(TestCaseWithSFTPServer):
+ """Test that the testing SFTPServer's latency knob works."""
+
+ def test_latency_knob_slows_transport(self):
+ # change the latency knob to 500ms. We take about 40ms for a
+ # loopback connection ordinarily.
+ start_time = time.time()
+ self.get_server().add_latency = 0.5
+ transport = self.get_transport()
+ transport.has('not me') # Force connection by issuing a request
+ with_latency_knob_time = time.time() - start_time
+ self.assertTrue(with_latency_knob_time > 0.4)
+
+ def test_default(self):
+ # This test is potentially brittle: under extremely high machine load
+ # it could fail, but that is quite unlikely
+ raise TestSkipped('Timing-sensitive test')
+ start_time = time.time()
+ transport = self.get_transport()
+ transport.has('not me') # Force connection by issuing a request
+ regular_time = time.time() - start_time
+ self.assertTrue(regular_time < 0.5)
+
+
+class FakeSocket(object):
+ """Fake socket object used to test the SocketDelay wrapper without
+ using a real socket.
+ """
+
+ def __init__(self):
+ self._data = ""
+
+ def send(self, data, flags=0):
+ self._data += data
+ return len(data)
+
+ def sendall(self, data, flags=0):
+ self._data += data
+ return len(data)
+
+ def recv(self, size, flags=0):
+ if size < len(self._data):
+ result = self._data[:size]
+ self._data = self._data[size:]
+ return result
+ else:
+ result = self._data
+ self._data = ""
+ return result
+
+
+class TestSocketDelay(TestCase):
+
+ def setUp(self):
+ TestCase.setUp(self)
+ self.requireFeature(features.paramiko)
+
+ def test_delay(self):
+ sending = FakeSocket()
+ receiving = stub_sftp.SocketDelay(sending, 0.1, bandwidth=1000000,
+ really_sleep=False)
+ # check that simulated time is charged only per round-trip:
+ t1 = stub_sftp.SocketDelay.simulated_time
+ receiving.send("connect1")
+ self.assertEqual(sending.recv(1024), "connect1")
+ t2 = stub_sftp.SocketDelay.simulated_time
+ self.assertAlmostEqual(t2 - t1, 0.1)
+ receiving.send("connect2")
+ self.assertEqual(sending.recv(1024), "connect2")
+ sending.send("hello")
+ self.assertEqual(receiving.recv(1024), "hello")
+ t3 = stub_sftp.SocketDelay.simulated_time
+ self.assertAlmostEqual(t3 - t2, 0.1)
+ sending.send("hello")
+ self.assertEqual(receiving.recv(1024), "hello")
+ sending.send("hello")
+ self.assertEqual(receiving.recv(1024), "hello")
+ sending.send("hello")
+ self.assertEqual(receiving.recv(1024), "hello")
+ t4 = stub_sftp.SocketDelay.simulated_time
+ self.assertAlmostEqual(t4, t3)
+
+ def test_bandwidth(self):
+ sending = FakeSocket()
+ receiving = stub_sftp.SocketDelay(sending, 0, bandwidth=8.0/(1024*1024),
+ really_sleep=False)
+ # check that simulated time is charged only per round-trip:
+ t1 = stub_sftp.SocketDelay.simulated_time
+ receiving.send("connect")
+ self.assertEqual(sending.recv(1024), "connect")
+ sending.send("a" * 100)
+ self.assertEqual(receiving.recv(1024), "a" * 100)
+ t2 = stub_sftp.SocketDelay.simulated_time
+ self.assertAlmostEqual(t2 - t1, 100 + 7)
+
+
+class ReadvFile(object):
+ """An object that acts like Paramiko's SFTPFile when readv() is used"""
+
+ def __init__(self, data):
+ self._data = data
+
+ def readv(self, requests):
+ for start, length in requests:
+ yield self._data[start:start+length]
+
+ def close(self):
+ pass
+
+
+def _null_report_activity(*a, **k):
+ pass
+
+
+class Test_SFTPReadvHelper(tests.TestCase):
+
+ def checkGetRequests(self, expected_requests, offsets):
+ self.requireFeature(features.paramiko)
+ helper = _mod_sftp._SFTPReadvHelper(offsets, 'artificial_test',
+ _null_report_activity)
+ self.assertEqual(expected_requests, helper._get_requests())
+
+ def test__get_requests(self):
+ # Small single requests become a single readv request
+ self.checkGetRequests([(0, 100)],
+ [(0, 20), (30, 50), (20, 10), (80, 20)])
+ # Non-contiguous ranges are given as multiple requests
+ self.checkGetRequests([(0, 20), (30, 50)],
+ [(10, 10), (30, 20), (0, 10), (50, 30)])
+ # Ranges larger than _max_request_size (32kB) are broken up into
+ # multiple requests, even if it actually spans multiple logical
+ # requests
+ self.checkGetRequests([(0, 32768), (32768, 32768), (65536, 464)],
+ [(0, 40000), (40000, 100), (40100, 1900),
+ (42000, 24000)])
+
+ def checkRequestAndYield(self, expected, data, offsets):
+ self.requireFeature(features.paramiko)
+ helper = _mod_sftp._SFTPReadvHelper(offsets, 'artificial_test',
+ _null_report_activity)
+ data_f = ReadvFile(data)
+ result = list(helper.request_and_yield_offsets(data_f))
+ self.assertEqual(expected, result)
+
+ def test_request_and_yield_offsets(self):
+ data = 'abcdefghijklmnopqrstuvwxyz'
+ self.checkRequestAndYield([(0, 'a'), (5, 'f'), (10, 'klm')], data,
+ [(0, 1), (5, 1), (10, 3)])
+ # Should combine requests, and split them again
+ self.checkRequestAndYield([(0, 'a'), (1, 'b'), (10, 'klm')], data,
+ [(0, 1), (1, 1), (10, 3)])
+ # Out of order requests. The requests should get combined, but then be
+ # yielded out-of-order. We also need one that is at the end of a
+ # previous range. See bug #293746
+ self.checkRequestAndYield([(0, 'a'), (10, 'k'), (4, 'efg'), (1, 'bcd')],
+ data, [(0, 1), (10, 1), (4, 3), (1, 3)])
+
+
+class TestUsesAuthConfig(TestCaseWithSFTPServer):
+ """Test that AuthenticationConfig can supply default usernames."""
+
+ def get_transport_for_connection(self, set_config):
+ port = self.get_server().port
+ if set_config:
+ conf = config.AuthenticationConfig()
+ conf._get_config().update(
+ {'sftptest': {'scheme': 'ssh', 'port': port, 'user': 'bar'}})
+ conf._save()
+ t = _mod_transport.get_transport_from_url(
+ 'sftp://localhost:%d' % port)
+ # force a connection to be performed.
+ t.has('foo')
+ return t
+
+ def test_sftp_uses_config(self):
+ t = self.get_transport_for_connection(set_config=True)
+ self.assertEqual('bar', t._get_credentials()[0])
+
+ def test_sftp_is_none_if_no_config(self):
+ t = self.get_transport_for_connection(set_config=False)
+ self.assertIs(None, t._get_credentials()[0])
+
+ def test_sftp_doesnt_prompt_username(self):
+ stdout = tests.StringIOWrapper()
+ ui.ui_factory = tests.TestUIFactory(stdin='joe\nfoo\n', stdout=stdout)
+ t = self.get_transport_for_connection(set_config=False)
+ self.assertIs(None, t._get_credentials()[0])
+ # No prompts should've been printed, stdin shouldn't have been read
+ self.assertEquals("", stdout.getvalue())
+ self.assertEquals(0, ui.ui_factory.stdin.tell())
diff --git a/bzrlib/tests/test_shelf.py b/bzrlib/tests/test_shelf.py
new file mode 100644
index 0000000..5467779
--- /dev/null
+++ b/bzrlib/tests/test_shelf.py
@@ -0,0 +1,767 @@
+# Copyright (C) 2008-2011 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+import os
+
+from bzrlib import (
+ errors,
+ osutils,
+ pack,
+ shelf,
+ tests,
+ transform,
+ workingtree,
+ )
+from bzrlib.tests import (
+ features,
+ )
+
+
+EMPTY_SHELF = ("Bazaar pack format 1 (introduced in 0.18)\n"
+ "B23\n"
+ "metadata\n\n"
+ "d11:revision_id5:null:e"
+ "B159\n"
+ "attribs\n\n"
+ "d10:_id_numberi0e18:_new_executabilityde7:_new_idde"
+ "9:_new_namede11:_new_parentde16:_non_present_idsde"
+ "17:_removed_contentsle11:_removed_idle14:_tree_path_idsdeeE")
+
+
+class TestPrepareShelf(tests.TestCaseWithTransport):
+
+ def prepare_shelve_rename(self):
+ tree = self.make_branch_and_tree('.')
+ self.build_tree(['foo'])
+ tree.add(['foo'], ['foo-id'])
+ tree.commit('foo')
+ tree.rename_one('foo', 'bar')
+ tree.lock_tree_write()
+ self.addCleanup(tree.unlock)
+ creator = shelf.ShelfCreator(tree, tree.basis_tree())
+ self.addCleanup(creator.finalize)
+ self.assertEqual([('rename', 'foo-id', 'foo', 'bar')],
+ list(creator.iter_shelvable()))
+ return creator
+
+ def check_shelve_rename(self, creator):
+ work_trans_id = creator.work_transform.trans_id_file_id('foo-id')
+ self.assertEqual('foo', creator.work_transform.final_name(
+ work_trans_id))
+ shelf_trans_id = creator.shelf_transform.trans_id_file_id('foo-id')
+ self.assertEqual('bar', creator.shelf_transform.final_name(
+ shelf_trans_id))
+
+ def test_shelve_rename(self):
+ creator = self.prepare_shelve_rename()
+ creator.shelve_rename('foo-id')
+ self.check_shelve_rename(creator)
+
+ def test_shelve_change_handles_rename(self):
+ creator = self.prepare_shelve_rename()
+ creator.shelve_change(('rename', 'foo-id', 'foo', 'bar'))
+ self.check_shelve_rename(creator)
+
+ def prepare_shelve_move(self):
+ tree = self.make_branch_and_tree('.')
+ self.build_tree(['foo/', 'bar/', 'foo/baz'])
+ tree.add(['foo', 'bar', 'foo/baz'], ['foo-id', 'bar-id', 'baz-id'])
+ tree.commit('foo')
+ tree.rename_one('foo/baz', 'bar/baz')
+ tree.lock_tree_write()
+ self.addCleanup(tree.unlock)
+ creator = shelf.ShelfCreator(tree, tree.basis_tree())
+ self.addCleanup(creator.finalize)
+ self.assertEqual([('rename', 'baz-id', 'foo/baz', 'bar/baz')],
+ list(creator.iter_shelvable()))
+ return creator, tree
+
+ def check_shelve_move(self, creator, tree):
+ work_trans_id = creator.work_transform.trans_id_file_id('baz-id')
+ work_foo = creator.work_transform.trans_id_file_id('foo-id')
+ self.assertEqual(work_foo, creator.work_transform.final_parent(
+ work_trans_id))
+ shelf_trans_id = creator.shelf_transform.trans_id_file_id('baz-id')
+ shelf_bar = creator.shelf_transform.trans_id_file_id('bar-id')
+ self.assertEqual(shelf_bar, creator.shelf_transform.final_parent(
+ shelf_trans_id))
+ creator.transform()
+ self.assertEqual('foo/baz', tree.id2path('baz-id'))
+
+ def test_shelve_move(self):
+ creator, tree = self.prepare_shelve_move()
+ creator.shelve_rename('baz-id')
+ self.check_shelve_move(creator, tree)
+
+ def test_shelve_change_handles_move(self):
+ creator, tree = self.prepare_shelve_move()
+ creator.shelve_change(('rename', 'baz-id', 'foo/baz', 'bar/baz'))
+ self.check_shelve_move(creator, tree)
+
+ def test_shelve_changed_root_id(self):
+ tree = self.make_branch_and_tree('.')
+ self.build_tree(['foo'])
+ tree.set_root_id('first-root-id')
+ tree.add(['foo'], ['foo-id'])
+ tree.commit('foo')
+ tree.set_root_id('second-root-id')
+ tree.lock_tree_write()
+ self.addCleanup(tree.unlock)
+ creator = shelf.ShelfCreator(tree, tree.basis_tree())
+ self.addCleanup(creator.finalize)
+ self.expectFailure('shelf doesn\'t support shelving root changes yet',
+ self.assertEqual, [
+ ('delete file', 'first-root-id', 'directory', ''),
+ ('add file', 'second-root-id', 'directory', ''),
+ ('rename', 'foo-id', u'foo', u'foo'),
+ ], list(creator.iter_shelvable()))
+
+ self.assertEqual([('delete file', 'first-root-id', 'directory', ''),
+ ('add file', 'second-root-id', 'directory', ''),
+ ('rename', 'foo-id', u'foo', u'foo'),
+ ], list(creator.iter_shelvable()))
+
+ def assertShelvedFileEqual(self, expected_content, creator, file_id):
+ s_trans_id = creator.shelf_transform.trans_id_file_id(file_id)
+ shelf_file = creator.shelf_transform._limbo_name(s_trans_id)
+ self.assertFileEqual(expected_content, shelf_file)
+
+ def prepare_content_change(self):
+ tree = self.make_branch_and_tree('.')
+ tree.lock_write()
+ self.addCleanup(tree.unlock)
+ self.build_tree_contents([('foo', 'a\n')])
+ tree.add('foo', 'foo-id')
+ tree.commit('Committed foo')
+ self.build_tree_contents([('foo', 'b\na\nc\n')])
+ creator = shelf.ShelfCreator(tree, tree.basis_tree())
+ self.addCleanup(creator.finalize)
+ return creator
+
+ def test_shelve_content_change(self):
+ creator = self.prepare_content_change()
+ self.assertEqual([('modify text', 'foo-id')],
+ list(creator.iter_shelvable()))
+ creator.shelve_lines('foo-id', ['a\n', 'c\n'])
+ creator.transform()
+ self.assertFileEqual('a\nc\n', 'foo')
+ self.assertShelvedFileEqual('b\na\n', creator, 'foo-id')
+
+ def test_shelve_change_handles_modify_text(self):
+ creator = self.prepare_content_change()
+ creator.shelve_change(('modify text', 'foo-id'))
+ creator.transform()
+ self.assertFileEqual('a\n', 'foo')
+ self.assertShelvedFileEqual('b\na\nc\n', creator, 'foo-id')
+
+ def test_shelve_all(self):
+ creator = self.prepare_content_change()
+ creator.shelve_all()
+ creator.transform()
+ self.assertFileEqual('a\n', 'foo')
+ self.assertShelvedFileEqual('b\na\nc\n', creator, 'foo-id')
+
+ def prepare_shelve_creation(self):
+ tree = self.make_branch_and_tree('.')
+ tree.lock_write()
+ self.addCleanup(tree.unlock)
+ tree.commit('Empty tree')
+ self.build_tree_contents([('foo', 'a\n'), ('bar/',)])
+ tree.add(['foo', 'bar'], ['foo-id', 'bar-id'])
+ creator = shelf.ShelfCreator(tree, tree.basis_tree())
+ self.addCleanup(creator.finalize)
+ self.assertEqual([('add file', 'bar-id', 'directory', 'bar'),
+ ('add file', 'foo-id', 'file', 'foo')],
+ sorted(list(creator.iter_shelvable())))
+ return creator, tree
+
+ def check_shelve_creation(self, creator, tree):
+ self.assertRaises(StopIteration,
+ tree.iter_entries_by_dir(['foo-id']).next)
+ s_trans_id = creator.shelf_transform.trans_id_file_id('foo-id')
+ self.assertEqual('foo-id',
+ creator.shelf_transform.final_file_id(s_trans_id))
+ self.assertPathDoesNotExist('foo')
+ self.assertPathDoesNotExist('bar')
+ self.assertShelvedFileEqual('a\n', creator, 'foo-id')
+ s_bar_trans_id = creator.shelf_transform.trans_id_file_id('bar-id')
+ self.assertEqual('directory',
+ creator.shelf_transform.final_kind(s_bar_trans_id))
+
+ def test_shelve_creation(self):
+ creator, tree = self.prepare_shelve_creation()
+ creator.shelve_creation('foo-id')
+ creator.shelve_creation('bar-id')
+ creator.transform()
+ self.check_shelve_creation(creator, tree)
+
+ def test_shelve_change_handles_creation(self):
+ creator, tree = self.prepare_shelve_creation()
+ creator.shelve_change(('add file', 'foo-id', 'file', 'foo'))
+ creator.shelve_change(('add file', 'bar-id', 'directory', 'bar'))
+ creator.transform()
+ self.check_shelve_creation(creator, tree)
+
+ def _test_shelve_symlink_creation(self, link_name, link_target,
+ shelve_change=False):
+ self.requireFeature(features.SymlinkFeature)
+ tree = self.make_branch_and_tree('.')
+ tree.lock_write()
+ self.addCleanup(tree.unlock)
+ tree.commit('Empty tree')
+ os.symlink(link_target, link_name)
+ tree.add(link_name, 'foo-id')
+ creator = shelf.ShelfCreator(tree, tree.basis_tree())
+ self.addCleanup(creator.finalize)
+ self.assertEqual([('add file', 'foo-id', 'symlink', link_name)],
+ list(creator.iter_shelvable()))
+ if shelve_change:
+ creator.shelve_change(('add file', 'foo-id', 'symlink', link_name))
+ else:
+ creator.shelve_creation('foo-id')
+ creator.transform()
+ s_trans_id = creator.shelf_transform.trans_id_file_id('foo-id')
+ self.assertPathDoesNotExist(link_name)
+ limbo_name = creator.shelf_transform._limbo_name(s_trans_id)
+ self.assertEqual(link_target, osutils.readlink(limbo_name))
+ ptree = creator.shelf_transform.get_preview_tree()
+ self.assertEqual(link_target, ptree.get_symlink_target('foo-id'))
+
+ def test_shelve_symlink_creation(self):
+ self._test_shelve_symlink_creation('foo', 'bar')
+
+ def test_shelve_unicode_symlink_creation(self):
+ self.requireFeature(features.UnicodeFilenameFeature)
+ self._test_shelve_symlink_creation(u'fo\N{Euro Sign}o',
+ u'b\N{Euro Sign}ar')
+
+ def test_shelve_change_handles_symlink_creation(self):
+ self._test_shelve_symlink_creation('foo', 'bar', shelve_change=True)
+
+ def _test_shelve_symlink_target_change(self, link_name,
+ old_target, new_target,
+ shelve_change=False):
+ self.requireFeature(features.SymlinkFeature)
+ tree = self.make_branch_and_tree('.')
+ tree.lock_write()
+ self.addCleanup(tree.unlock)
+ os.symlink(old_target, link_name)
+ tree.add(link_name, 'foo-id')
+ tree.commit("commit symlink")
+ os.unlink(link_name)
+ os.symlink(new_target, link_name)
+ creator = shelf.ShelfCreator(tree, tree.basis_tree())
+ self.addCleanup(creator.finalize)
+ self.assertEqual([('modify target', 'foo-id', link_name,
+ old_target, new_target)],
+ list(creator.iter_shelvable()))
+ if shelve_change:
+ creator.shelve_change(('modify target', 'foo-id', link_name,
+ old_target, new_target))
+ else:
+ creator.shelve_modify_target('foo-id')
+ creator.transform()
+ self.assertEqual(old_target, osutils.readlink(link_name))
+ s_trans_id = creator.shelf_transform.trans_id_file_id('foo-id')
+ limbo_name = creator.shelf_transform._limbo_name(s_trans_id)
+ self.assertEqual(new_target, osutils.readlink(limbo_name))
+ ptree = creator.shelf_transform.get_preview_tree()
+ self.assertEqual(new_target, ptree.get_symlink_target('foo-id'))
+
+ def test_shelve_symlink_target_change(self):
+ self._test_shelve_symlink_target_change('foo', 'bar', 'baz')
+
+ def test_shelve_unicode_symlink_target_change(self):
+ self.requireFeature(features.UnicodeFilenameFeature)
+ self._test_shelve_symlink_target_change(
+ u'fo\N{Euro Sign}o', u'b\N{Euro Sign}ar', u'b\N{Euro Sign}az')
+
+ def test_shelve_change_handles_symlink_target_change(self):
+ self._test_shelve_symlink_target_change('foo', 'bar', 'baz',
+ shelve_change=True)
+
+ def test_shelve_creation_no_contents(self):
+ tree = self.make_branch_and_tree('.')
+ tree.lock_write()
+ self.addCleanup(tree.unlock)
+ tree.commit('Empty tree')
+ self.build_tree(['foo'])
+ tree.add('foo', 'foo-id')
+ os.unlink('foo')
+ creator = shelf.ShelfCreator(tree, tree.basis_tree())
+ self.addCleanup(creator.finalize)
+ self.assertEqual([('add file', 'foo-id', None, 'foo')],
+ sorted(list(creator.iter_shelvable())))
+ creator.shelve_creation('foo-id')
+ creator.transform()
+ self.assertRaises(StopIteration,
+ tree.iter_entries_by_dir(['foo-id']).next)
+ self.assertShelvedFileEqual('', creator, 'foo-id')
+ s_trans_id = creator.shelf_transform.trans_id_file_id('foo-id')
+ self.assertEqual('foo-id',
+ creator.shelf_transform.final_file_id(s_trans_id))
+ self.assertPathDoesNotExist('foo')
+
+ def prepare_shelve_deletion(self):
+ tree = self.make_branch_and_tree('tree')
+ tree.lock_write()
+ self.addCleanup(tree.unlock)
+ self.build_tree_contents([('tree/foo/',), ('tree/foo/bar', 'baz')])
+ tree.add(['foo', 'foo/bar'], ['foo-id', 'bar-id'])
+ tree.commit('Added file and directory')
+ tree.unversion(['foo-id', 'bar-id'])
+ os.unlink('tree/foo/bar')
+ os.rmdir('tree/foo')
+ creator = shelf.ShelfCreator(tree, tree.basis_tree())
+ self.addCleanup(creator.finalize)
+ self.assertEqual([('delete file', 'bar-id', 'file', 'foo/bar'),
+ ('delete file', 'foo-id', 'directory', 'foo')],
+ sorted(list(creator.iter_shelvable())))
+ return creator, tree
+
+ def check_shelve_deletion(self, tree):
+ self.assertTrue(tree.has_id('foo-id'))
+ self.assertTrue(tree.has_id('bar-id'))
+ self.assertFileEqual('baz', 'tree/foo/bar')
+
+ def test_shelve_deletion(self):
+ creator, tree = self.prepare_shelve_deletion()
+ creator.shelve_deletion('foo-id')
+ creator.shelve_deletion('bar-id')
+ creator.transform()
+ self.check_shelve_deletion(tree)
+
+ def test_shelve_change_handles_deletion(self):
+ creator, tree = self.prepare_shelve_deletion()
+ creator.shelve_change(('delete file', 'foo-id', 'directory', 'foo'))
+ creator.shelve_change(('delete file', 'bar-id', 'file', 'foo/bar'))
+ creator.transform()
+ self.check_shelve_deletion(tree)
+
+ def test_shelve_delete_contents(self):
+ tree = self.make_branch_and_tree('tree')
+ self.build_tree(['tree/foo',])
+ tree.add('foo', 'foo-id')
+ tree.commit('Added file and directory')
+ os.unlink('tree/foo')
+ tree.lock_tree_write()
+ self.addCleanup(tree.unlock)
+ creator = shelf.ShelfCreator(tree, tree.basis_tree())
+ self.addCleanup(creator.finalize)
+ self.assertEqual([('delete file', 'foo-id', 'file', 'foo')],
+ sorted(list(creator.iter_shelvable())))
+ creator.shelve_deletion('foo-id')
+ creator.transform()
+ self.assertPathExists('tree/foo')
+
+ def prepare_shelve_change_kind(self):
+ tree = self.make_branch_and_tree('tree')
+ self.build_tree_contents([('tree/foo', 'bar')])
+ tree.add('foo', 'foo-id')
+ tree.commit('Added file and directory')
+ os.unlink('tree/foo')
+ os.mkdir('tree/foo')
+ tree.lock_tree_write()
+ self.addCleanup(tree.unlock)
+ creator = shelf.ShelfCreator(tree, tree.basis_tree())
+ self.addCleanup(creator.finalize)
+ self.assertEqual([('change kind', 'foo-id', 'file', 'directory',
+ 'foo')], sorted(list(creator.iter_shelvable())))
+ return creator
+
+ def check_shelve_change_kind(self, creator):
+ self.assertFileEqual('bar', 'tree/foo')
+ s_trans_id = creator.shelf_transform.trans_id_file_id('foo-id')
+ self.assertEqual('directory',
+ creator.shelf_transform._new_contents[s_trans_id])
+
+ def test_shelve_change_kind(self):
+ creator = self.prepare_shelve_change_kind()
+ creator.shelve_content_change('foo-id')
+ creator.transform()
+ self.check_shelve_change_kind(creator)
+
+ def test_shelve_change_handles_change_kind(self):
+ creator = self.prepare_shelve_change_kind()
+ creator.shelve_change(('change kind', 'foo-id', 'file', 'directory',
+ 'foo'))
+ creator.transform()
+ self.check_shelve_change_kind(creator)
+
+ def test_shelve_change_unknown_change(self):
+ tree = self.make_branch_and_tree('tree')
+ tree.lock_tree_write()
+ self.addCleanup(tree.unlock)
+ creator = shelf.ShelfCreator(tree, tree.basis_tree())
+ self.addCleanup(creator.finalize)
+ e = self.assertRaises(ValueError, creator.shelve_change, ('unknown',))
+ self.assertEqual('Unknown change kind: "unknown"', str(e))
+
+ def test_shelve_unversion(self):
+ tree = self.make_branch_and_tree('tree')
+ self.build_tree(['tree/foo',])
+ tree.add('foo', 'foo-id')
+ tree.commit('Added file and directory')
+ tree.unversion(['foo-id'])
+ tree.lock_tree_write()
+ self.addCleanup(tree.unlock)
+ creator = shelf.ShelfCreator(tree, tree.basis_tree())
+ self.addCleanup(creator.finalize)
+ self.assertEqual([('delete file', 'foo-id', 'file', 'foo')],
+ sorted(list(creator.iter_shelvable())))
+ creator.shelve_deletion('foo-id')
+ creator.transform()
+ self.assertPathExists('tree/foo')
+
+ def test_shelve_serialization(self):
+ tree = self.make_branch_and_tree('.')
+ tree.lock_tree_write()
+ self.addCleanup(tree.unlock)
+ creator = shelf.ShelfCreator(tree, tree.basis_tree())
+ self.addCleanup(creator.finalize)
+ shelf_file = open('shelf', 'wb')
+ self.addCleanup(shelf_file.close)
+ try:
+ creator.write_shelf(shelf_file)
+ finally:
+ shelf_file.close()
+ self.assertFileEqual(EMPTY_SHELF, 'shelf')
+
+ def test_write_shelf(self):
+ tree = self.make_branch_and_tree('tree')
+ self.build_tree(['tree/foo'])
+ tree.add('foo', 'foo-id')
+ tree.lock_tree_write()
+ self.addCleanup(tree.unlock)
+ creator = shelf.ShelfCreator(tree, tree.basis_tree())
+ self.addCleanup(creator.finalize)
+ list(creator.iter_shelvable())
+ creator.shelve_creation('foo-id')
+ shelf_file = open('shelf', 'wb')
+ try:
+ creator.write_shelf(shelf_file)
+ finally:
+ shelf_file.close()
+ parser = pack.ContainerPushParser()
+ shelf_file = open('shelf', 'rb')
+ try:
+ parser.accept_bytes(shelf_file.read())
+ finally:
+ shelf_file.close()
+ tt = transform.TransformPreview(tree)
+ self.addCleanup(tt.finalize)
+ records = iter(parser.read_pending_records())
+ #skip revision-id
+ records.next()
+ tt.deserialize(records)
+
+ def test_shelve_unversioned(self):
+ tree = self.make_branch_and_tree('tree')
+ tree.lock_tree_write()
+ try:
+ self.assertRaises(errors.PathsNotVersionedError,
+ shelf.ShelfCreator, tree, tree.basis_tree(), ['foo'])
+ finally:
+ tree.unlock()
+ # We should be able to lock/unlock the tree if ShelfCreator cleaned
+ # after itself.
+ wt = workingtree.WorkingTree.open('tree')
+ wt.lock_tree_write()
+ wt.unlock()
+ # And a second tentative should raise the same error (no
+ # limbo/pending_deletion leftovers).
+ tree.lock_tree_write()
+ try:
+ self.assertRaises(errors.PathsNotVersionedError,
+ shelf.ShelfCreator, tree, tree.basis_tree(), ['foo'])
+ finally:
+ tree.unlock()
+
+ def test_shelve_skips_added_root(self):
+ """Skip adds of the root when iterating through shelvable changes."""
+ tree = self.make_branch_and_tree('tree')
+ tree.lock_tree_write()
+ self.addCleanup(tree.unlock)
+ creator = shelf.ShelfCreator(tree, tree.basis_tree())
+ self.addCleanup(creator.finalize)
+ self.assertEqual([], list(creator.iter_shelvable()))
+
+ def test_shelve_skips_added_root(self):
+ """Skip adds of the root when iterating through shelvable changes."""
+ tree = self.make_branch_and_tree('tree')
+ tree.lock_tree_write()
+ self.addCleanup(tree.unlock)
+ creator = shelf.ShelfCreator(tree, tree.basis_tree())
+ self.addCleanup(creator.finalize)
+ self.assertEqual([], list(creator.iter_shelvable()))
+
+
+class TestUnshelver(tests.TestCaseWithTransport):
+
+ def test_make_merger(self):
+ tree = self.make_branch_and_tree('tree')
+ tree.commit('first commit')
+ self.build_tree_contents([('tree/foo', 'bar')])
+ tree.lock_write()
+ self.addCleanup(tree.unlock)
+ tree.add('foo', 'foo-id')
+ creator = shelf.ShelfCreator(tree, tree.basis_tree())
+ self.addCleanup(creator.finalize)
+ list(creator.iter_shelvable())
+ creator.shelve_creation('foo-id')
+ shelf_file = open('shelf-file', 'w+b')
+ try:
+ creator.write_shelf(shelf_file)
+ creator.transform()
+ shelf_file.seek(0)
+ unshelver = shelf.Unshelver.from_tree_and_shelf(tree, shelf_file)
+ unshelver.make_merger().do_merge()
+ self.addCleanup(unshelver.finalize)
+ self.assertFileEqual('bar', 'tree/foo')
+ finally:
+ shelf_file.close()
+
+ def test_unshelve_changed(self):
+ tree = self.make_branch_and_tree('tree')
+ tree.lock_write()
+ self.addCleanup(tree.unlock)
+ self.build_tree_contents([('tree/foo', 'a\nb\nc\n')])
+ tree.add('foo', 'foo-id')
+ tree.commit('first commit')
+ self.build_tree_contents([('tree/foo', 'a\nb\nd\n')])
+ creator = shelf.ShelfCreator(tree, tree.basis_tree())
+ self.addCleanup(creator.finalize)
+ list(creator.iter_shelvable())
+ creator.shelve_lines('foo-id', ['a\n', 'b\n', 'c\n'])
+ shelf_file = open('shelf', 'w+b')
+ self.addCleanup(shelf_file.close)
+ creator.write_shelf(shelf_file)
+ creator.transform()
+ self.build_tree_contents([('tree/foo', 'z\na\nb\nc\n')])
+ shelf_file.seek(0)
+ unshelver = shelf.Unshelver.from_tree_and_shelf(tree, shelf_file)
+ self.addCleanup(unshelver.finalize)
+ unshelver.make_merger().do_merge()
+ self.assertFileEqual('z\na\nb\nd\n', 'tree/foo')
+
+ def test_unshelve_deleted(self):
+ tree = self.make_branch_and_tree('tree')
+ tree.lock_write()
+ self.addCleanup(tree.unlock)
+ self.build_tree_contents([('tree/foo/',), ('tree/foo/bar', 'baz')])
+ tree.add(['foo', 'foo/bar'], ['foo-id', 'bar-id'])
+ tree.commit('Added file and directory')
+ tree.unversion(['foo-id', 'bar-id'])
+ os.unlink('tree/foo/bar')
+ os.rmdir('tree/foo')
+ creator = shelf.ShelfCreator(tree, tree.basis_tree())
+ list(creator.iter_shelvable())
+ creator.shelve_deletion('foo-id')
+ creator.shelve_deletion('bar-id')
+ with open('shelf', 'w+b') as shelf_file:
+ creator.write_shelf(shelf_file)
+ creator.transform()
+ creator.finalize()
+ # validate the test setup
+ self.assertTrue(tree.has_id('foo-id'))
+ self.assertTrue(tree.has_id('bar-id'))
+ self.assertFileEqual('baz', 'tree/foo/bar')
+ with open('shelf', 'r+b') as shelf_file:
+ unshelver = shelf.Unshelver.from_tree_and_shelf(tree, shelf_file)
+ self.addCleanup(unshelver.finalize)
+ unshelver.make_merger().do_merge()
+ self.assertFalse(tree.has_id('foo-id'))
+ self.assertFalse(tree.has_id('bar-id'))
+
+ def test_unshelve_base(self):
+ tree = self.make_branch_and_tree('tree')
+ tree.lock_write()
+ self.addCleanup(tree.unlock)
+ tree.commit('rev1', rev_id='rev1')
+ creator = shelf.ShelfCreator(tree, tree.basis_tree())
+ self.addCleanup(creator.finalize)
+ manager = tree.get_shelf_manager()
+ shelf_id, shelf_file = manager.new_shelf()
+ try:
+ creator.write_shelf(shelf_file)
+ finally:
+ shelf_file.close()
+ tree.commit('rev2', rev_id='rev2')
+ shelf_file = manager.read_shelf(1)
+ self.addCleanup(shelf_file.close)
+ unshelver = shelf.Unshelver.from_tree_and_shelf(tree, shelf_file)
+ self.addCleanup(unshelver.finalize)
+ self.assertEqual('rev1', unshelver.base_tree.get_revision_id())
+
+ def test_unshelve_serialization(self):
+ tree = self.make_branch_and_tree('.')
+ self.build_tree_contents([('shelf', EMPTY_SHELF)])
+ shelf_file = open('shelf', 'rb')
+ self.addCleanup(shelf_file.close)
+ unshelver = shelf.Unshelver.from_tree_and_shelf(tree, shelf_file)
+ unshelver.finalize()
+
+ def test_corrupt_shelf(self):
+ tree = self.make_branch_and_tree('.')
+ self.build_tree_contents([('shelf', EMPTY_SHELF.replace('metadata',
+ 'foo'))])
+ shelf_file = open('shelf', 'rb')
+ self.addCleanup(shelf_file.close)
+ e = self.assertRaises(errors.ShelfCorrupt,
+ shelf.Unshelver.from_tree_and_shelf, tree,
+ shelf_file)
+ self.assertEqual('Shelf corrupt.', str(e))
+
+ def test_unshelve_subdir_in_now_removed_dir(self):
+ tree = self.make_branch_and_tree('.')
+ self.addCleanup(tree.lock_write().unlock)
+ self.build_tree(['dir/', 'dir/subdir/', 'dir/subdir/foo'])
+ tree.add(['dir'], ['dir-id'])
+ tree.commit('versioned dir')
+ tree.add(['dir/subdir', 'dir/subdir/foo'], ['subdir-id', 'foo-id'])
+ creator = shelf.ShelfCreator(tree, tree.basis_tree())
+ self.addCleanup(creator.finalize)
+ for change in creator.iter_shelvable():
+ creator.shelve_change(change)
+ shelf_manager = tree.get_shelf_manager()
+ shelf_id = shelf_manager.shelve_changes(creator)
+ self.assertPathDoesNotExist('dir/subdir')
+ tree.remove(['dir'])
+ unshelver = shelf_manager.get_unshelver(shelf_id)
+ self.addCleanup(unshelver.finalize)
+ unshelver.make_merger().do_merge()
+ self.assertPathExists('dir/subdir/foo')
+ self.assertEqual('dir-id', tree.path2id('dir'))
+ self.assertEqual('subdir-id', tree.path2id('dir/subdir'))
+ self.assertEqual('foo-id', tree.path2id('dir/subdir/foo'))
+
+
+class TestShelfManager(tests.TestCaseWithTransport):
+
+ def test_get_shelf_manager(self):
+ tree = self.make_branch_and_tree('.')
+ manager = tree.get_shelf_manager()
+ self.assertEqual(tree._transport.base + 'shelf/',
+ manager.transport.base)
+
+ def get_manager(self):
+ return self.make_branch_and_tree('.').get_shelf_manager()
+
+ def test_get_shelf_filename(self):
+ tree = self.make_branch_and_tree('.')
+ manager = tree.get_shelf_manager()
+ self.assertEqual('shelf-1', manager.get_shelf_filename(1))
+
+ def test_get_shelf_ids(self):
+ tree = self.make_branch_and_tree('.')
+ manager = tree.get_shelf_manager()
+ self.assertEqual([1, 3], manager.get_shelf_ids(
+ ['shelf-1', 'shelf-02', 'shelf-3']))
+
+ def test_new_shelf(self):
+ manager = self.get_manager()
+ shelf_id, shelf_file = manager.new_shelf()
+ shelf_file.close()
+ self.assertEqual(1, shelf_id)
+ shelf_id, shelf_file = manager.new_shelf()
+ shelf_file.close()
+ self.assertEqual(2, shelf_id)
+ manager.delete_shelf(1)
+ shelf_id, shelf_file = manager.new_shelf()
+ shelf_file.close()
+ self.assertEqual(3, shelf_id)
+
+ def test_active_shelves(self):
+ manager = self.get_manager()
+ self.assertEqual([], manager.active_shelves())
+ shelf_id, shelf_file = manager.new_shelf()
+ shelf_file.close()
+ self.assertEqual([1], manager.active_shelves())
+
+ def test_delete_shelf(self):
+ manager = self.get_manager()
+ shelf_id, shelf_file = manager.new_shelf()
+ shelf_file.close()
+ self.assertEqual([1], manager.active_shelves())
+ manager.delete_shelf(1)
+ self.assertEqual([], manager.active_shelves())
+
+ def test_last_shelf(self):
+ manager = self.get_manager()
+ self.assertIs(None, manager.last_shelf())
+ shelf_id, shelf_file = manager.new_shelf()
+ shelf_file.close()
+ self.assertEqual(1, manager.last_shelf())
+
+ def test_read_shelf(self):
+ manager = self.get_manager()
+ shelf_id, shelf_file = manager.new_shelf()
+ try:
+ shelf_file.write('foo')
+ finally:
+ shelf_file.close()
+ shelf_id, shelf_file = manager.new_shelf()
+ try:
+ shelf_file.write('bar')
+ finally:
+ shelf_file.close()
+ shelf_file = manager.read_shelf(1)
+ try:
+ self.assertEqual('foo', shelf_file.read())
+ finally:
+ shelf_file.close()
+ shelf_file = manager.read_shelf(2)
+ try:
+ self.assertEqual('bar', shelf_file.read())
+ finally:
+ shelf_file.close()
+
+ def test_read_non_existant(self):
+ manager = self.get_manager()
+ e = self.assertRaises(errors.NoSuchShelfId, manager.read_shelf, 1)
+ self.assertEqual('No changes are shelved with id "1".', str(e))
+
+ def test_shelve_changes(self):
+ tree = self.make_branch_and_tree('tree')
+ tree.commit('no-change commit')
+ tree.lock_write()
+ self.addCleanup(tree.unlock)
+ self.build_tree_contents([('tree/foo', 'bar')])
+ self.assertFileEqual('bar', 'tree/foo')
+ tree.add('foo', 'foo-id')
+ creator = shelf.ShelfCreator(tree, tree.basis_tree())
+ self.addCleanup(creator.finalize)
+ list(creator.iter_shelvable())
+ creator.shelve_creation('foo-id')
+ shelf_manager = tree.get_shelf_manager()
+ shelf_id = shelf_manager.shelve_changes(creator)
+ self.assertPathDoesNotExist('tree/foo')
+ unshelver = shelf_manager.get_unshelver(shelf_id)
+ self.addCleanup(unshelver.finalize)
+ unshelver.make_merger().do_merge()
+ self.assertFileEqual('bar', 'tree/foo')
+
+ def test_get_metadata(self):
+ tree = self.make_branch_and_tree('.')
+ tree.lock_tree_write()
+ self.addCleanup(tree.unlock)
+ creator = shelf.ShelfCreator(tree, tree.basis_tree())
+ self.addCleanup(creator.finalize)
+ shelf_manager = tree.get_shelf_manager()
+ shelf_id = shelf_manager.shelve_changes(creator, 'foo')
+ metadata = shelf_manager.get_metadata(shelf_id)
+ self.assertEqual('foo', metadata['message'])
+ self.assertEqual('null:', metadata['revision_id'])
diff --git a/bzrlib/tests/test_shelf_ui.py b/bzrlib/tests/test_shelf_ui.py
new file mode 100644
index 0000000..8bf94cf
--- /dev/null
+++ b/bzrlib/tests/test_shelf_ui.py
@@ -0,0 +1,613 @@
+# Copyright (C) 2008, 2009, 2010 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+
+from cStringIO import StringIO
+import os
+import sys
+from textwrap import dedent
+
+from bzrlib import (
+ errors,
+ shelf_ui,
+ revision,
+ tests,
+)
+from bzrlib.tests import script
+from bzrlib.tests import (
+ features,
+ )
+
+
+class ExpectShelver(shelf_ui.Shelver):
+ """A variant of Shelver that intercepts console activity, for testing."""
+
+ def __init__(self, work_tree, target_tree, diff_writer=None,
+ auto=False, auto_apply=False, file_list=None, message=None,
+ destroy=False, reporter=None):
+ shelf_ui.Shelver.__init__(self, work_tree, target_tree, diff_writer,
+ auto, auto_apply, file_list, message,
+ destroy, reporter=reporter)
+ self.expected = []
+ self.diff_writer = StringIO()
+
+ def expect(self, message, response):
+ self.expected.append((message, response))
+
+ def prompt(self, message, choices, default):
+ try:
+ expected_message, response = self.expected.pop(0)
+ except IndexError:
+ raise AssertionError('Unexpected prompt: %s' % message)
+ if message != expected_message:
+ raise AssertionError('Wrong prompt: %s' % message)
+ if choices != '&yes\n&No\n&finish\n&quit':
+ raise AssertionError('Wrong choices: %s' % choices)
+ return response
+
+
+LINES_AJ = 'a\nb\nc\nd\ne\nf\ng\nh\ni\nj\n'
+
+
+LINES_ZY = 'z\nb\nc\nd\ne\nf\ng\nh\ni\ny\n'
+
+
+LINES_AY = 'a\nb\nc\nd\ne\nf\ng\nh\ni\ny\n'
+
+
+class ShelfTestCase(tests.TestCaseWithTransport):
+
+ def create_shelvable_tree(self):
+ tree = self.make_branch_and_tree('tree')
+ self.build_tree_contents([('tree/foo', LINES_AJ)])
+ tree.add('foo', 'foo-id')
+ tree.commit('added foo')
+ self.build_tree_contents([('tree/foo', LINES_ZY)])
+ return tree
+
+
+class TestShelver(ShelfTestCase):
+
+ def test_unexpected_prompt_failure(self):
+ tree = self.create_shelvable_tree()
+ tree.lock_tree_write()
+ self.addCleanup(tree.unlock)
+ shelver = ExpectShelver(tree, tree.basis_tree())
+ self.addCleanup(shelver.finalize)
+ e = self.assertRaises(AssertionError, shelver.run)
+ self.assertEqual('Unexpected prompt: Shelve?', str(e))
+
+ def test_wrong_prompt_failure(self):
+ tree = self.create_shelvable_tree()
+ tree.lock_tree_write()
+ self.addCleanup(tree.unlock)
+ shelver = ExpectShelver(tree, tree.basis_tree())
+ self.addCleanup(shelver.finalize)
+ shelver.expect('foo', 0)
+ e = self.assertRaises(AssertionError, shelver.run)
+ self.assertEqual('Wrong prompt: Shelve?', str(e))
+
+ def test_shelve_not_diff(self):
+ tree = self.create_shelvable_tree()
+ tree.lock_tree_write()
+ self.addCleanup(tree.unlock)
+ shelver = ExpectShelver(tree, tree.basis_tree())
+ self.addCleanup(shelver.finalize)
+ shelver.expect('Shelve?', 1)
+ shelver.expect('Shelve?', 1)
+ # No final shelving prompt because no changes were selected
+ shelver.run()
+ self.assertFileEqual(LINES_ZY, 'tree/foo')
+
+ def test_shelve_diff_no(self):
+ tree = self.create_shelvable_tree()
+ tree.lock_tree_write()
+ self.addCleanup(tree.unlock)
+ shelver = ExpectShelver(tree, tree.basis_tree())
+ self.addCleanup(shelver.finalize)
+ shelver.expect('Shelve?', 0)
+ shelver.expect('Shelve?', 0)
+ shelver.expect('Shelve 2 change(s)?', 1)
+ shelver.run()
+ self.assertFileEqual(LINES_ZY, 'tree/foo')
+
+ def test_shelve_diff(self):
+ tree = self.create_shelvable_tree()
+ tree.lock_tree_write()
+ self.addCleanup(tree.unlock)
+ shelver = ExpectShelver(tree, tree.basis_tree())
+ self.addCleanup(shelver.finalize)
+ shelver.expect('Shelve?', 0)
+ shelver.expect('Shelve?', 0)
+ shelver.expect('Shelve 2 change(s)?', 0)
+ shelver.run()
+ self.assertFileEqual(LINES_AJ, 'tree/foo')
+
+ def test_shelve_one_diff(self):
+ tree = self.create_shelvable_tree()
+ tree.lock_tree_write()
+ self.addCleanup(tree.unlock)
+ shelver = ExpectShelver(tree, tree.basis_tree())
+ self.addCleanup(shelver.finalize)
+ shelver.expect('Shelve?', 0)
+ shelver.expect('Shelve?', 1)
+ shelver.expect('Shelve 1 change(s)?', 0)
+ shelver.run()
+ self.assertFileEqual(LINES_AY, 'tree/foo')
+
+ def test_shelve_binary_change(self):
+ tree = self.create_shelvable_tree()
+ self.build_tree_contents([('tree/foo', '\x00')])
+ tree.lock_tree_write()
+ self.addCleanup(tree.unlock)
+ shelver = ExpectShelver(tree, tree.basis_tree())
+ self.addCleanup(shelver.finalize)
+ shelver.expect('Shelve binary changes?', 0)
+ shelver.expect('Shelve 1 change(s)?', 0)
+ shelver.run()
+ self.assertFileEqual(LINES_AJ, 'tree/foo')
+
+ def test_shelve_rename(self):
+ tree = self.create_shelvable_tree()
+ tree.rename_one('foo', 'bar')
+ tree.lock_tree_write()
+ self.addCleanup(tree.unlock)
+ shelver = ExpectShelver(tree, tree.basis_tree())
+ self.addCleanup(shelver.finalize)
+ shelver.expect('Shelve renaming "foo" => "bar"?', 0)
+ shelver.expect('Shelve?', 0)
+ shelver.expect('Shelve?', 0)
+ shelver.expect('Shelve 3 change(s)?', 0)
+ shelver.run()
+ self.assertFileEqual(LINES_AJ, 'tree/foo')
+
+ def test_shelve_deletion(self):
+ tree = self.create_shelvable_tree()
+ os.unlink('tree/foo')
+ tree.lock_tree_write()
+ self.addCleanup(tree.unlock)
+ shelver = ExpectShelver(tree, tree.basis_tree())
+ self.addCleanup(shelver.finalize)
+ shelver.expect('Shelve removing file "foo"?', 0)
+ shelver.expect('Shelve 1 change(s)?', 0)
+ shelver.run()
+ self.assertFileEqual(LINES_AJ, 'tree/foo')
+
+ def test_shelve_creation(self):
+ tree = self.make_branch_and_tree('tree')
+ tree.commit('add tree root')
+ self.build_tree(['tree/foo'])
+ tree.add('foo')
+ tree.lock_tree_write()
+ self.addCleanup(tree.unlock)
+ shelver = ExpectShelver(tree, tree.basis_tree())
+ self.addCleanup(shelver.finalize)
+ shelver.expect('Shelve adding file "foo"?', 0)
+ shelver.expect('Shelve 1 change(s)?', 0)
+ shelver.run()
+ self.assertPathDoesNotExist('tree/foo')
+
+ def test_shelve_kind_change(self):
+ tree = self.create_shelvable_tree()
+ os.unlink('tree/foo')
+ os.mkdir('tree/foo')
+ tree.lock_tree_write()
+ self.addCleanup(tree.unlock)
+ shelver = ExpectShelver(tree, tree.basis_tree())
+ self.addCleanup(shelver.finalize)
+ shelver.expect('Shelve changing "foo" from file to directory?',
+ 0)
+ shelver.expect('Shelve 1 change(s)?', 0)
+
+ def test_shelve_modify_target(self):
+ self.requireFeature(features.SymlinkFeature)
+ tree = self.create_shelvable_tree()
+ os.symlink('bar', 'tree/baz')
+ tree.add('baz', 'baz-id')
+ tree.commit("Add symlink")
+ os.unlink('tree/baz')
+ os.symlink('vax', 'tree/baz')
+ tree.lock_tree_write()
+ self.addCleanup(tree.unlock)
+ shelver = ExpectShelver(tree, tree.basis_tree())
+ self.addCleanup(shelver.finalize)
+ shelver.expect('Shelve changing target of "baz" from "bar" to '
+ '"vax"?', 0)
+ shelver.expect('Shelve 1 change(s)?', 0)
+ shelver.run()
+ self.assertEqual('bar', os.readlink('tree/baz'))
+
+ def test_shelve_finish(self):
+ tree = self.create_shelvable_tree()
+ tree.lock_tree_write()
+ self.addCleanup(tree.unlock)
+ shelver = ExpectShelver(tree, tree.basis_tree())
+ self.addCleanup(shelver.finalize)
+ shelver.expect('Shelve?', 2)
+ shelver.expect('Shelve 2 change(s)?', 0)
+ shelver.run()
+ self.assertFileEqual(LINES_AJ, 'tree/foo')
+
+ def test_shelve_quit(self):
+ tree = self.create_shelvable_tree()
+ tree.lock_tree_write()
+ self.addCleanup(tree.unlock)
+ shelver = ExpectShelver(tree, tree.basis_tree())
+ self.addCleanup(shelver.finalize)
+ shelver.expect('Shelve?', 3)
+ self.assertRaises(errors.UserAbort, shelver.run)
+ self.assertFileEqual(LINES_ZY, 'tree/foo')
+
+ def test_shelve_all(self):
+ tree = self.create_shelvable_tree()
+ shelver = ExpectShelver.from_args(sys.stdout, all=True,
+ directory='tree')
+ try:
+ shelver.run()
+ finally:
+ shelver.finalize()
+ self.assertFileEqual(LINES_AJ, 'tree/foo')
+
+ def test_shelve_filename(self):
+ tree = self.create_shelvable_tree()
+ self.build_tree(['tree/bar'])
+ tree.add('bar')
+ tree.lock_tree_write()
+ self.addCleanup(tree.unlock)
+ shelver = ExpectShelver(tree, tree.basis_tree(), file_list=['bar'])
+ self.addCleanup(shelver.finalize)
+ shelver.expect('Shelve adding file "bar"?', 0)
+ shelver.expect('Shelve 1 change(s)?', 0)
+ shelver.run()
+
+ def test_shelve_destroy(self):
+ tree = self.create_shelvable_tree()
+ shelver = shelf_ui.Shelver.from_args(sys.stdout, all=True,
+ directory='tree', destroy=True)
+ self.addCleanup(shelver.finalize)
+ shelver.run()
+ self.assertIs(None, tree.get_shelf_manager().last_shelf())
+ self.assertFileEqual(LINES_AJ, 'tree/foo')
+
+ @staticmethod
+ def shelve_all(tree, target_revision_id):
+ tree.lock_write()
+ try:
+ target = tree.branch.repository.revision_tree(target_revision_id)
+ shelver = shelf_ui.Shelver(tree, target, auto=True,
+ auto_apply=True)
+ try:
+ shelver.run()
+ finally:
+ shelver.finalize()
+ finally:
+ tree.unlock()
+
+ def test_shelve_old_root_preserved(self):
+ tree1 = self.make_branch_and_tree('tree1')
+ tree1.commit('add root')
+ tree1_root_id = tree1.get_root_id()
+ tree2 = self.make_branch_and_tree('tree2')
+ rev2 = tree2.commit('add root')
+ self.assertNotEquals(tree1_root_id, tree2.get_root_id())
+ tree1.merge_from_branch(tree2.branch,
+ from_revision=revision.NULL_REVISION)
+ tree1.commit('merging in tree2')
+ self.assertEquals(tree1_root_id, tree1.get_root_id())
+ # This is essentially assertNotRaises(InconsistentDelta)
+ # With testtools 0.9.9, it can be rewritten as:
+ # with ExpectedException(AssertionError,
+ # 'InconsistentDelta not raised'):
+ # with ExpectedException(errors.InconsistentDelta, ''):
+ # self.shelve_all(tree1, rev2)
+ e = self.assertRaises(AssertionError, self.assertRaises,
+ errors.InconsistentDelta, self.shelve_all, tree1,
+ rev2)
+ self.assertContainsRe('InconsistentDelta not raised', str(e))
+
+ def test_shelve_split(self):
+ outer_tree = self.make_branch_and_tree('outer')
+ outer_tree.commit('Add root')
+ inner_tree = self.make_branch_and_tree('outer/inner')
+ rev2 = inner_tree.commit('Add root')
+ outer_tree.subsume(inner_tree)
+ # This is essentially assertNotRaises(ValueError).
+ # The ValueError is 'None is not a valid file id'.
+ self.expectFailure('Cannot shelve a join back to the inner tree.',
+ self.assertRaises, AssertionError,
+ self.assertRaises, ValueError, self.shelve_all,
+ outer_tree, rev2)
+
+
+class TestApplyReporter(ShelfTestCase):
+
+ def test_shelve_not_diff(self):
+ tree = self.create_shelvable_tree()
+ tree.lock_tree_write()
+ self.addCleanup(tree.unlock)
+ shelver = ExpectShelver(tree, tree.basis_tree(),
+ reporter=shelf_ui.ApplyReporter())
+ self.addCleanup(shelver.finalize)
+ shelver.expect('Apply change?', 1)
+ shelver.expect('Apply change?', 1)
+ # No final shelving prompt because no changes were selected
+ shelver.run()
+ self.assertFileEqual(LINES_ZY, 'tree/foo')
+
+ def test_shelve_diff_no(self):
+ tree = self.create_shelvable_tree()
+ tree.lock_tree_write()
+ self.addCleanup(tree.unlock)
+ shelver = ExpectShelver(tree, tree.basis_tree(),
+ reporter=shelf_ui.ApplyReporter())
+ self.addCleanup(shelver.finalize)
+ shelver.expect('Apply change?', 0)
+ shelver.expect('Apply change?', 0)
+ shelver.expect('Apply 2 change(s)?', 1)
+ shelver.run()
+ self.assertFileEqual(LINES_ZY, 'tree/foo')
+
+ def test_shelve_diff(self):
+ tree = self.create_shelvable_tree()
+ tree.lock_tree_write()
+ self.addCleanup(tree.unlock)
+ shelver = ExpectShelver(tree, tree.basis_tree(),
+ reporter=shelf_ui.ApplyReporter())
+ self.addCleanup(shelver.finalize)
+ shelver.expect('Apply change?', 0)
+ shelver.expect('Apply change?', 0)
+ shelver.expect('Apply 2 change(s)?', 0)
+ shelver.run()
+ self.assertFileEqual(LINES_AJ, 'tree/foo')
+
+ def test_shelve_binary_change(self):
+ tree = self.create_shelvable_tree()
+ self.build_tree_contents([('tree/foo', '\x00')])
+ tree.lock_tree_write()
+ self.addCleanup(tree.unlock)
+ shelver = ExpectShelver(tree, tree.basis_tree(),
+ reporter=shelf_ui.ApplyReporter())
+ self.addCleanup(shelver.finalize)
+ shelver.expect('Apply binary changes?', 0)
+ shelver.expect('Apply 1 change(s)?', 0)
+ shelver.run()
+ self.assertFileEqual(LINES_AJ, 'tree/foo')
+
+ def test_shelve_rename(self):
+ tree = self.create_shelvable_tree()
+ tree.rename_one('foo', 'bar')
+ tree.lock_tree_write()
+ self.addCleanup(tree.unlock)
+ shelver = ExpectShelver(tree, tree.basis_tree(),
+ reporter=shelf_ui.ApplyReporter())
+ self.addCleanup(shelver.finalize)
+ shelver.expect('Rename "bar" => "foo"?', 0)
+ shelver.expect('Apply change?', 0)
+ shelver.expect('Apply change?', 0)
+ shelver.expect('Apply 3 change(s)?', 0)
+ shelver.run()
+ self.assertFileEqual(LINES_AJ, 'tree/foo')
+
+ def test_shelve_deletion(self):
+ tree = self.create_shelvable_tree()
+ os.unlink('tree/foo')
+ tree.lock_tree_write()
+ self.addCleanup(tree.unlock)
+ shelver = ExpectShelver(tree, tree.basis_tree(),
+ reporter=shelf_ui.ApplyReporter())
+ self.addCleanup(shelver.finalize)
+ shelver.expect('Add file "foo"?', 0)
+ shelver.expect('Apply 1 change(s)?', 0)
+ shelver.run()
+ self.assertFileEqual(LINES_AJ, 'tree/foo')
+
+ def test_shelve_creation(self):
+ tree = self.make_branch_and_tree('tree')
+ tree.commit('add tree root')
+ self.build_tree(['tree/foo'])
+ tree.add('foo')
+ tree.lock_tree_write()
+ self.addCleanup(tree.unlock)
+ shelver = ExpectShelver(tree, tree.basis_tree(),
+ reporter=shelf_ui.ApplyReporter())
+ self.addCleanup(shelver.finalize)
+ shelver.expect('Delete file "foo"?', 0)
+ shelver.expect('Apply 1 change(s)?', 0)
+ shelver.run()
+ self.assertPathDoesNotExist('tree/foo')
+
+ def test_shelve_kind_change(self):
+ tree = self.create_shelvable_tree()
+ os.unlink('tree/foo')
+ os.mkdir('tree/foo')
+ tree.lock_tree_write()
+ self.addCleanup(tree.unlock)
+ shelver = ExpectShelver(tree, tree.basis_tree(),
+ reporter=shelf_ui.ApplyReporter())
+ self.addCleanup(shelver.finalize)
+ shelver.expect('Change "foo" from directory to a file?', 0)
+ shelver.expect('Apply 1 change(s)?', 0)
+
+ def test_shelve_modify_target(self):
+ self.requireFeature(features.SymlinkFeature)
+ tree = self.create_shelvable_tree()
+ os.symlink('bar', 'tree/baz')
+ tree.add('baz', 'baz-id')
+ tree.commit("Add symlink")
+ os.unlink('tree/baz')
+ os.symlink('vax', 'tree/baz')
+ tree.lock_tree_write()
+ self.addCleanup(tree.unlock)
+ shelver = ExpectShelver(tree, tree.basis_tree(),
+ reporter=shelf_ui.ApplyReporter())
+ self.addCleanup(shelver.finalize)
+ shelver.expect('Change target of "baz" from "vax" to "bar"?',
+ 0)
+ shelver.expect('Apply 1 change(s)?', 0)
+ shelver.run()
+ self.assertEqual('bar', os.readlink('tree/baz'))
+
+
+class TestUnshelver(tests.TestCaseWithTransport):
+
+ def create_tree_with_shelf(self):
+ tree = self.make_branch_and_tree('tree')
+ tree.lock_write()
+ try:
+ self.build_tree_contents([('tree/foo', LINES_AJ)])
+ tree.add('foo', 'foo-id')
+ tree.commit('added foo')
+ self.build_tree_contents([('tree/foo', LINES_ZY)])
+ shelver = shelf_ui.Shelver(tree, tree.basis_tree(),
+ auto_apply=True, auto=True)
+ try:
+ shelver.run()
+ finally:
+ shelver.finalize()
+ finally:
+ tree.unlock()
+ return tree
+
+ def test_unshelve(self):
+ tree = self.create_tree_with_shelf()
+ tree.lock_write()
+ self.addCleanup(tree.unlock)
+ manager = tree.get_shelf_manager()
+ shelf_ui.Unshelver(tree, manager, 1, True, True, True).run()
+ self.assertFileEqual(LINES_ZY, 'tree/foo')
+
+ def test_unshelve_args(self):
+ tree = self.create_tree_with_shelf()
+ unshelver = shelf_ui.Unshelver.from_args(directory='tree')
+ try:
+ unshelver.run()
+ finally:
+ unshelver.tree.unlock()
+ self.assertFileEqual(LINES_ZY, 'tree/foo')
+ self.assertIs(None, tree.get_shelf_manager().last_shelf())
+
+ def test_unshelve_args_dry_run(self):
+ tree = self.create_tree_with_shelf()
+ unshelver = shelf_ui.Unshelver.from_args(directory='tree',
+ action='dry-run')
+ try:
+ unshelver.run()
+ finally:
+ unshelver.tree.unlock()
+ self.assertFileEqual(LINES_AJ, 'tree/foo')
+ self.assertEqual(1, tree.get_shelf_manager().last_shelf())
+
+ def test_unshelve_args_preview(self):
+ tree = self.create_tree_with_shelf()
+ write_diff_to = StringIO()
+ unshelver = shelf_ui.Unshelver.from_args(
+ directory='tree', action='preview', write_diff_to=write_diff_to)
+ try:
+ unshelver.run()
+ finally:
+ unshelver.tree.unlock()
+ # The changes were not unshelved.
+ self.assertFileEqual(LINES_AJ, 'tree/foo')
+ self.assertEqual(1, tree.get_shelf_manager().last_shelf())
+
+ # But the diff was written to write_diff_to.
+ diff = write_diff_to.getvalue()
+ expected = dedent("""\
+ @@ -1,4 +1,4 @@
+ -a
+ +z
+ b
+ c
+ d
+ @@ -7,4 +7,4 @@
+ g
+ h
+ i
+ -j
+ +y
+
+ """)
+ self.assertEqualDiff(expected, diff[-len(expected):])
+
+ def test_unshelve_args_delete_only(self):
+ tree = self.make_branch_and_tree('tree')
+ manager = tree.get_shelf_manager()
+ shelf_file = manager.new_shelf()[1]
+ try:
+ shelf_file.write('garbage')
+ finally:
+ shelf_file.close()
+ unshelver = shelf_ui.Unshelver.from_args(directory='tree',
+ action='delete-only')
+ try:
+ unshelver.run()
+ finally:
+ unshelver.tree.unlock()
+ self.assertIs(None, manager.last_shelf())
+
+ def test_unshelve_args_invalid_shelf_id(self):
+ tree = self.make_branch_and_tree('tree')
+ manager = tree.get_shelf_manager()
+ shelf_file = manager.new_shelf()[1]
+ try:
+ shelf_file.write('garbage')
+ finally:
+ shelf_file.close()
+ self.assertRaises(errors.InvalidShelfId,
+ shelf_ui.Unshelver.from_args, directory='tree',
+ action='delete-only', shelf_id='foo')
+
+
+class TestUnshelveScripts(TestUnshelver,
+ script.TestCaseWithTransportAndScript):
+
+ def test_unshelve_messages_keep(self):
+ self.create_tree_with_shelf()
+ self.run_script("""
+$ cd tree
+$ bzr unshelve --keep
+2>Using changes with id "1".
+2> M foo
+2>All changes applied successfully.
+""")
+
+ def test_unshelve_messages_delete(self):
+ self.create_tree_with_shelf()
+ self.run_script("""
+$ cd tree
+$ bzr unshelve --delete-only
+2>Deleted changes with id "1".
+""")
+
+ def test_unshelve_messages_apply(self):
+ self.create_tree_with_shelf()
+ self.run_script("""
+$ cd tree
+$ bzr unshelve --apply
+2>Using changes with id "1".
+2> M foo
+2>All changes applied successfully.
+2>Deleted changes with id "1".
+""")
+
+ def test_unshelve_messages_dry_run(self):
+ self.create_tree_with_shelf()
+ self.run_script("""
+$ cd tree
+$ bzr unshelve --dry-run
+2>Using changes with id "1".
+2> M foo
+""")
diff --git a/bzrlib/tests/test_smart.py b/bzrlib/tests/test_smart.py
new file mode 100644
index 0000000..4bf9622
--- /dev/null
+++ b/bzrlib/tests/test_smart.py
@@ -0,0 +1,2712 @@
+# Copyright (C) 2006-2012 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""Tests for the smart wire/domain protocol.
+
+This module contains tests for the domain-level smart requests and responses,
+such as the 'Branch.lock_write' request. Many of these use specific disk
+formats to exercise calls that only make sense for formats with specific
+properties.
+
+Tests for low-level protocol encoding are found in test_smart_transport.
+"""
+
+import bz2
+import zlib
+
+from bzrlib import (
+ bencode,
+ branch as _mod_branch,
+ controldir,
+ errors,
+ gpg,
+ inventory_delta,
+ tests,
+ transport,
+ urlutils,
+ versionedfile,
+ )
+from bzrlib.smart import (
+ branch as smart_branch,
+ bzrdir as smart_dir,
+ repository as smart_repo,
+ packrepository as smart_packrepo,
+ request as smart_req,
+ server,
+ vfs,
+ )
+from bzrlib.testament import Testament
+from bzrlib.tests import test_server
+from bzrlib.transport import (
+ chroot,
+ memory,
+ )
+
+
+def load_tests(standard_tests, module, loader):
+ """Multiply tests version and protocol consistency."""
+ # FindRepository tests.
+ scenarios = [
+ ("find_repository", {
+ "_request_class": smart_dir.SmartServerRequestFindRepositoryV1}),
+ ("find_repositoryV2", {
+ "_request_class": smart_dir.SmartServerRequestFindRepositoryV2}),
+ ("find_repositoryV3", {
+ "_request_class": smart_dir.SmartServerRequestFindRepositoryV3}),
+ ]
+ to_adapt, result = tests.split_suite_by_re(standard_tests,
+ "TestSmartServerRequestFindRepository")
+ v2_only, v1_and_2 = tests.split_suite_by_re(to_adapt,
+ "_v2")
+ tests.multiply_tests(v1_and_2, scenarios, result)
+ # The first scenario is only applicable to v1 protocols, it is deleted
+ # since.
+ tests.multiply_tests(v2_only, scenarios[1:], result)
+ return result
+
+
+class TestCaseWithChrootedTransport(tests.TestCaseWithTransport):
+
+ def setUp(self):
+ self.vfs_transport_factory = memory.MemoryServer
+ tests.TestCaseWithTransport.setUp(self)
+ self._chroot_server = None
+
+ def get_transport(self, relpath=None):
+ if self._chroot_server is None:
+ backing_transport = tests.TestCaseWithTransport.get_transport(self)
+ self._chroot_server = chroot.ChrootServer(backing_transport)
+ self.start_server(self._chroot_server)
+ t = transport.get_transport_from_url(self._chroot_server.get_url())
+ if relpath is not None:
+ t = t.clone(relpath)
+ return t
+
+
+class TestCaseWithSmartMedium(tests.TestCaseWithMemoryTransport):
+
+ def setUp(self):
+ super(TestCaseWithSmartMedium, self).setUp()
+ # We're allowed to set the transport class here, so that we don't use
+ # the default or a parameterized class, but rather use the
+ # TestCaseWithTransport infrastructure to set up a smart server and
+ # transport.
+ self.overrideAttr(self, "transport_server", self.make_transport_server)
+
+ def make_transport_server(self):
+ return test_server.SmartTCPServer_for_testing('-' + self.id())
+
+ def get_smart_medium(self):
+ """Get a smart medium to use in tests."""
+ return self.get_transport().get_smart_medium()
+
+
+class TestByteStreamToStream(tests.TestCase):
+
+ def test_repeated_substreams_same_kind_are_one_stream(self):
+ # Make a stream - an iterable of bytestrings.
+ stream = [('text', [versionedfile.FulltextContentFactory(('k1',), None,
+ None, 'foo')]),('text', [
+ versionedfile.FulltextContentFactory(('k2',), None, None, 'bar')])]
+ fmt = controldir.format_registry.get('pack-0.92')().repository_format
+ bytes = smart_repo._stream_to_byte_stream(stream, fmt)
+ streams = []
+ # Iterate the resulting iterable; checking that we get only one stream
+ # out.
+ fmt, stream = smart_repo._byte_stream_to_stream(bytes)
+ for kind, substream in stream:
+ streams.append((kind, list(substream)))
+ self.assertLength(1, streams)
+ self.assertLength(2, streams[0][1])
+
+
+class TestSmartServerResponse(tests.TestCase):
+
+ def test__eq__(self):
+ self.assertEqual(smart_req.SmartServerResponse(('ok', )),
+ smart_req.SmartServerResponse(('ok', )))
+ self.assertEqual(smart_req.SmartServerResponse(('ok', ), 'body'),
+ smart_req.SmartServerResponse(('ok', ), 'body'))
+ self.assertNotEqual(smart_req.SmartServerResponse(('ok', )),
+ smart_req.SmartServerResponse(('notok', )))
+ self.assertNotEqual(smart_req.SmartServerResponse(('ok', ), 'body'),
+ smart_req.SmartServerResponse(('ok', )))
+ self.assertNotEqual(None,
+ smart_req.SmartServerResponse(('ok', )))
+
+ def test__str__(self):
+ """SmartServerResponses can be stringified."""
+ self.assertEqual(
+ "<SuccessfulSmartServerResponse args=('args',) body='body'>",
+ str(smart_req.SuccessfulSmartServerResponse(('args',), 'body')))
+ self.assertEqual(
+ "<FailedSmartServerResponse args=('args',) body='body'>",
+ str(smart_req.FailedSmartServerResponse(('args',), 'body')))
+
+
+class TestSmartServerRequest(tests.TestCaseWithMemoryTransport):
+
+ def test_translate_client_path(self):
+ transport = self.get_transport()
+ request = smart_req.SmartServerRequest(transport, 'foo/')
+ self.assertEqual('./', request.translate_client_path('foo/'))
+ self.assertRaises(
+ errors.InvalidURLJoin, request.translate_client_path, 'foo/..')
+ self.assertRaises(
+ errors.PathNotChild, request.translate_client_path, '/')
+ self.assertRaises(
+ errors.PathNotChild, request.translate_client_path, 'bar/')
+ self.assertEqual('./baz', request.translate_client_path('foo/baz'))
+ e_acute = u'\N{LATIN SMALL LETTER E WITH ACUTE}'.encode('utf-8')
+ self.assertEqual('./' + urlutils.escape(e_acute),
+ request.translate_client_path('foo/' + e_acute))
+
+ def test_translate_client_path_vfs(self):
+ """VfsRequests receive escaped paths rather than raw UTF-8."""
+ transport = self.get_transport()
+ request = vfs.VfsRequest(transport, 'foo/')
+ e_acute = u'\N{LATIN SMALL LETTER E WITH ACUTE}'.encode('utf-8')
+ escaped = urlutils.escape('foo/' + e_acute)
+ self.assertEqual('./' + urlutils.escape(e_acute),
+ request.translate_client_path(escaped))
+
+ def test_transport_from_client_path(self):
+ transport = self.get_transport()
+ request = smart_req.SmartServerRequest(transport, 'foo/')
+ self.assertEqual(
+ transport.base,
+ request.transport_from_client_path('foo/').base)
+
+
+class TestSmartServerBzrDirRequestCloningMetaDir(
+ tests.TestCaseWithMemoryTransport):
+ """Tests for BzrDir.cloning_metadir."""
+
+ def test_cloning_metadir(self):
+ """When there is a bzrdir present, the call succeeds."""
+ backing = self.get_transport()
+ dir = self.make_bzrdir('.')
+ local_result = dir.cloning_metadir()
+ request_class = smart_dir.SmartServerBzrDirRequestCloningMetaDir
+ request = request_class(backing)
+ expected = smart_req.SuccessfulSmartServerResponse(
+ (local_result.network_name(),
+ local_result.repository_format.network_name(),
+ ('branch', local_result.get_branch_format().network_name())))
+ self.assertEqual(expected, request.execute('', 'False'))
+
+ def test_cloning_metadir_reference(self):
+ """The request fails when bzrdir contains a branch reference."""
+ backing = self.get_transport()
+ referenced_branch = self.make_branch('referenced')
+ dir = self.make_bzrdir('.')
+ local_result = dir.cloning_metadir()
+ reference = _mod_branch.BranchReferenceFormat().initialize(
+ dir, target_branch=referenced_branch)
+ reference_url = _mod_branch.BranchReferenceFormat().get_reference(dir)
+ # The server shouldn't try to follow the branch reference, so it's fine
+ # if the referenced branch isn't reachable.
+ backing.rename('referenced', 'moved')
+ request_class = smart_dir.SmartServerBzrDirRequestCloningMetaDir
+ request = request_class(backing)
+ expected = smart_req.FailedSmartServerResponse(('BranchReference',))
+ self.assertEqual(expected, request.execute('', 'False'))
+
+
+class TestSmartServerBzrDirRequestCloningMetaDir(
+ tests.TestCaseWithMemoryTransport):
+ """Tests for BzrDir.checkout_metadir."""
+
+ def test_checkout_metadir(self):
+ backing = self.get_transport()
+ request = smart_dir.SmartServerBzrDirRequestCheckoutMetaDir(
+ backing)
+ branch = self.make_branch('.', format='2a')
+ response = request.execute('')
+ self.assertEqual(
+ smart_req.SmartServerResponse(
+ ('Bazaar-NG meta directory, format 1\n',
+ 'Bazaar repository format 2a (needs bzr 1.16 or later)\n',
+ 'Bazaar Branch Format 7 (needs bzr 1.6)\n')),
+ response)
+
+
+class TestSmartServerBzrDirRequestDestroyBranch(
+ tests.TestCaseWithMemoryTransport):
+ """Tests for BzrDir.destroy_branch."""
+
+ def test_destroy_branch_default(self):
+ """The default branch can be removed."""
+ backing = self.get_transport()
+ dir = self.make_branch('.').bzrdir
+ request_class = smart_dir.SmartServerBzrDirRequestDestroyBranch
+ request = request_class(backing)
+ expected = smart_req.SuccessfulSmartServerResponse(('ok',))
+ self.assertEqual(expected, request.execute('', None))
+
+ def test_destroy_branch_named(self):
+ """A named branch can be removed."""
+ backing = self.get_transport()
+ dir = self.make_repository('.', format="development-colo").bzrdir
+ dir.create_branch(name="branchname")
+ request_class = smart_dir.SmartServerBzrDirRequestDestroyBranch
+ request = request_class(backing)
+ expected = smart_req.SuccessfulSmartServerResponse(('ok',))
+ self.assertEqual(expected, request.execute('', "branchname"))
+
+ def test_destroy_branch_missing(self):
+ """An error is raised if the branch didn't exist."""
+ backing = self.get_transport()
+ dir = self.make_bzrdir('.', format="development-colo")
+ request_class = smart_dir.SmartServerBzrDirRequestDestroyBranch
+ request = request_class(backing)
+ expected = smart_req.FailedSmartServerResponse(('nobranch',), None)
+ self.assertEqual(expected, request.execute('', "branchname"))
+
+
+class TestSmartServerBzrDirRequestHasWorkingTree(
+ tests.TestCaseWithTransport):
+ """Tests for BzrDir.has_workingtree."""
+
+ def test_has_workingtree_yes(self):
+ """A working tree is present."""
+ backing = self.get_transport()
+ dir = self.make_branch_and_tree('.').bzrdir
+ request_class = smart_dir.SmartServerBzrDirRequestHasWorkingTree
+ request = request_class(backing)
+ expected = smart_req.SuccessfulSmartServerResponse(('yes',))
+ self.assertEqual(expected, request.execute(''))
+
+ def test_has_workingtree_no(self):
+ """A working tree is missing."""
+ backing = self.get_transport()
+ dir = self.make_bzrdir('.')
+ request_class = smart_dir.SmartServerBzrDirRequestHasWorkingTree
+ request = request_class(backing)
+ expected = smart_req.SuccessfulSmartServerResponse(('no',))
+ self.assertEqual(expected, request.execute(''))
+
+
+class TestSmartServerBzrDirRequestDestroyRepository(
+ tests.TestCaseWithMemoryTransport):
+ """Tests for BzrDir.destroy_repository."""
+
+ def test_destroy_repository_default(self):
+ """The repository can be removed."""
+ backing = self.get_transport()
+ dir = self.make_repository('.').bzrdir
+ request_class = smart_dir.SmartServerBzrDirRequestDestroyRepository
+ request = request_class(backing)
+ expected = smart_req.SuccessfulSmartServerResponse(('ok',))
+ self.assertEqual(expected, request.execute(''))
+
+ def test_destroy_repository_missing(self):
+ """An error is raised if the repository didn't exist."""
+ backing = self.get_transport()
+ dir = self.make_bzrdir('.')
+ request_class = smart_dir.SmartServerBzrDirRequestDestroyRepository
+ request = request_class(backing)
+ expected = smart_req.FailedSmartServerResponse(
+ ('norepository',), None)
+ self.assertEqual(expected, request.execute(''))
+
+
+class TestSmartServerRequestCreateRepository(tests.TestCaseWithMemoryTransport):
+ """Tests for BzrDir.create_repository."""
+
+ def test_makes_repository(self):
+ """When there is a bzrdir present, the call succeeds."""
+ backing = self.get_transport()
+ self.make_bzrdir('.')
+ request_class = smart_dir.SmartServerRequestCreateRepository
+ request = request_class(backing)
+ reference_bzrdir_format = controldir.format_registry.get('pack-0.92')()
+ reference_format = reference_bzrdir_format.repository_format
+ network_name = reference_format.network_name()
+ expected = smart_req.SuccessfulSmartServerResponse(
+ ('ok', 'no', 'no', 'no', network_name))
+ self.assertEqual(expected, request.execute('', network_name, 'True'))
+
+
+class TestSmartServerRequestFindRepository(tests.TestCaseWithMemoryTransport):
+ """Tests for BzrDir.find_repository."""
+
+ def test_no_repository(self):
+ """When there is no repository to be found, ('norepository', ) is returned."""
+ backing = self.get_transport()
+ request = self._request_class(backing)
+ self.make_bzrdir('.')
+ self.assertEqual(smart_req.SmartServerResponse(('norepository', )),
+ request.execute(''))
+
+ def test_nonshared_repository(self):
+ # nonshared repositorys only allow 'find' to return a handle when the
+ # path the repository is being searched on is the same as that that
+ # the repository is at.
+ backing = self.get_transport()
+ request = self._request_class(backing)
+ result = self._make_repository_and_result()
+ self.assertEqual(result, request.execute(''))
+ self.make_bzrdir('subdir')
+ self.assertEqual(smart_req.SmartServerResponse(('norepository', )),
+ request.execute('subdir'))
+
+ def _make_repository_and_result(self, shared=False, format=None):
+ """Convenience function to setup a repository.
+
+ :result: The SmartServerResponse to expect when opening it.
+ """
+ repo = self.make_repository('.', shared=shared, format=format)
+ if repo.supports_rich_root():
+ rich_root = 'yes'
+ else:
+ rich_root = 'no'
+ if repo._format.supports_tree_reference:
+ subtrees = 'yes'
+ else:
+ subtrees = 'no'
+ if repo._format.supports_external_lookups:
+ external = 'yes'
+ else:
+ external = 'no'
+ if (smart_dir.SmartServerRequestFindRepositoryV3 ==
+ self._request_class):
+ return smart_req.SuccessfulSmartServerResponse(
+ ('ok', '', rich_root, subtrees, external,
+ repo._format.network_name()))
+ elif (smart_dir.SmartServerRequestFindRepositoryV2 ==
+ self._request_class):
+ # All tests so far are on formats, and for non-external
+ # repositories.
+ return smart_req.SuccessfulSmartServerResponse(
+ ('ok', '', rich_root, subtrees, external))
+ else:
+ return smart_req.SuccessfulSmartServerResponse(
+ ('ok', '', rich_root, subtrees))
+
+ def test_shared_repository(self):
+ """When there is a shared repository, we get 'ok', 'relpath-to-repo'."""
+ backing = self.get_transport()
+ request = self._request_class(backing)
+ result = self._make_repository_and_result(shared=True)
+ self.assertEqual(result, request.execute(''))
+ self.make_bzrdir('subdir')
+ result2 = smart_req.SmartServerResponse(
+ result.args[0:1] + ('..', ) + result.args[2:])
+ self.assertEqual(result2,
+ request.execute('subdir'))
+ self.make_bzrdir('subdir/deeper')
+ result3 = smart_req.SmartServerResponse(
+ result.args[0:1] + ('../..', ) + result.args[2:])
+ self.assertEqual(result3,
+ request.execute('subdir/deeper'))
+
+ def test_rich_root_and_subtree_encoding(self):
+ """Test for the format attributes for rich root and subtree support."""
+ backing = self.get_transport()
+ request = self._request_class(backing)
+ result = self._make_repository_and_result(
+ format='development-subtree')
+ # check the test will be valid
+ self.assertEqual('yes', result.args[2])
+ self.assertEqual('yes', result.args[3])
+ self.assertEqual(result, request.execute(''))
+
+ def test_supports_external_lookups_no_v2(self):
+ """Test for the supports_external_lookups attribute."""
+ backing = self.get_transport()
+ request = self._request_class(backing)
+ result = self._make_repository_and_result(
+ format='development-subtree')
+ # check the test will be valid
+ self.assertEqual('yes', result.args[4])
+ self.assertEqual(result, request.execute(''))
+
+
+class TestSmartServerBzrDirRequestGetConfigFile(
+ tests.TestCaseWithMemoryTransport):
+ """Tests for BzrDir.get_config_file."""
+
+ def test_present(self):
+ backing = self.get_transport()
+ dir = self.make_bzrdir('.')
+ dir.get_config().set_default_stack_on("/")
+ local_result = dir._get_config()._get_config_file().read()
+ request_class = smart_dir.SmartServerBzrDirRequestConfigFile
+ request = request_class(backing)
+ expected = smart_req.SuccessfulSmartServerResponse((), local_result)
+ self.assertEqual(expected, request.execute(''))
+
+ def test_missing(self):
+ backing = self.get_transport()
+ dir = self.make_bzrdir('.')
+ request_class = smart_dir.SmartServerBzrDirRequestConfigFile
+ request = request_class(backing)
+ expected = smart_req.SuccessfulSmartServerResponse((), '')
+ self.assertEqual(expected, request.execute(''))
+
+
+class TestSmartServerBzrDirRequestGetBranches(
+ tests.TestCaseWithMemoryTransport):
+ """Tests for BzrDir.get_branches."""
+
+ def test_simple(self):
+ backing = self.get_transport()
+ branch = self.make_branch('.')
+ request_class = smart_dir.SmartServerBzrDirRequestGetBranches
+ request = request_class(backing)
+ local_result = bencode.bencode(
+ {"": ("branch", branch._format.network_name())})
+ expected = smart_req.SuccessfulSmartServerResponse(
+ ("success", ), local_result)
+ self.assertEqual(expected, request.execute(''))
+
+ def test_empty(self):
+ backing = self.get_transport()
+ dir = self.make_bzrdir('.')
+ request_class = smart_dir.SmartServerBzrDirRequestGetBranches
+ request = request_class(backing)
+ local_result = bencode.bencode({})
+ expected = smart_req.SuccessfulSmartServerResponse(
+ ('success',), local_result)
+ self.assertEqual(expected, request.execute(''))
+
+
+class TestSmartServerRequestInitializeBzrDir(tests.TestCaseWithMemoryTransport):
+
+ def test_empty_dir(self):
+ """Initializing an empty dir should succeed and do it."""
+ backing = self.get_transport()
+ request = smart_dir.SmartServerRequestInitializeBzrDir(backing)
+ self.assertEqual(smart_req.SmartServerResponse(('ok', )),
+ request.execute(''))
+ made_dir = controldir.ControlDir.open_from_transport(backing)
+ # no branch, tree or repository is expected with the current
+ # default formart.
+ self.assertRaises(errors.NoWorkingTree, made_dir.open_workingtree)
+ self.assertRaises(errors.NotBranchError, made_dir.open_branch)
+ self.assertRaises(errors.NoRepositoryPresent, made_dir.open_repository)
+
+ def test_missing_dir(self):
+ """Initializing a missing directory should fail like the bzrdir api."""
+ backing = self.get_transport()
+ request = smart_dir.SmartServerRequestInitializeBzrDir(backing)
+ self.assertRaises(errors.NoSuchFile,
+ request.execute, 'subdir')
+
+ def test_initialized_dir(self):
+ """Initializing an extant bzrdir should fail like the bzrdir api."""
+ backing = self.get_transport()
+ request = smart_dir.SmartServerRequestInitializeBzrDir(backing)
+ self.make_bzrdir('subdir')
+ self.assertRaises(errors.AlreadyControlDirError,
+ request.execute, 'subdir')
+
+
+class TestSmartServerRequestBzrDirInitializeEx(
+ tests.TestCaseWithMemoryTransport):
+ """Basic tests for BzrDir.initialize_ex_1.16 in the smart server.
+
+ The main unit tests in test_bzrdir exercise the API comprehensively.
+ """
+
+ def test_empty_dir(self):
+ """Initializing an empty dir should succeed and do it."""
+ backing = self.get_transport()
+ name = self.make_bzrdir('reference')._format.network_name()
+ request = smart_dir.SmartServerRequestBzrDirInitializeEx(backing)
+ self.assertEqual(
+ smart_req.SmartServerResponse(('', '', '', '', '', '', name,
+ 'False', '', '', '')),
+ request.execute(name, '', 'True', 'False', 'False', '', '', '', '',
+ 'False'))
+ made_dir = controldir.ControlDir.open_from_transport(backing)
+ # no branch, tree or repository is expected with the current
+ # default format.
+ self.assertRaises(errors.NoWorkingTree, made_dir.open_workingtree)
+ self.assertRaises(errors.NotBranchError, made_dir.open_branch)
+ self.assertRaises(errors.NoRepositoryPresent, made_dir.open_repository)
+
+ def test_missing_dir(self):
+ """Initializing a missing directory should fail like the bzrdir api."""
+ backing = self.get_transport()
+ name = self.make_bzrdir('reference')._format.network_name()
+ request = smart_dir.SmartServerRequestBzrDirInitializeEx(backing)
+ self.assertRaises(errors.NoSuchFile, request.execute, name,
+ 'subdir/dir', 'False', 'False', 'False', '', '', '', '', 'False')
+
+ def test_initialized_dir(self):
+ """Initializing an extant directory should fail like the bzrdir api."""
+ backing = self.get_transport()
+ name = self.make_bzrdir('reference')._format.network_name()
+ request = smart_dir.SmartServerRequestBzrDirInitializeEx(backing)
+ self.make_bzrdir('subdir')
+ self.assertRaises(errors.FileExists, request.execute, name, 'subdir',
+ 'False', 'False', 'False', '', '', '', '', 'False')
+
+
+class TestSmartServerRequestOpenBzrDir(tests.TestCaseWithMemoryTransport):
+
+ def test_no_directory(self):
+ backing = self.get_transport()
+ request = smart_dir.SmartServerRequestOpenBzrDir(backing)
+ self.assertEqual(smart_req.SmartServerResponse(('no', )),
+ request.execute('does-not-exist'))
+
+ def test_empty_directory(self):
+ backing = self.get_transport()
+ backing.mkdir('empty')
+ request = smart_dir.SmartServerRequestOpenBzrDir(backing)
+ self.assertEqual(smart_req.SmartServerResponse(('no', )),
+ request.execute('empty'))
+
+ def test_outside_root_client_path(self):
+ backing = self.get_transport()
+ request = smart_dir.SmartServerRequestOpenBzrDir(backing,
+ root_client_path='root')
+ self.assertEqual(smart_req.SmartServerResponse(('no', )),
+ request.execute('not-root'))
+
+
+class TestSmartServerRequestOpenBzrDir_2_1(tests.TestCaseWithMemoryTransport):
+
+ def test_no_directory(self):
+ backing = self.get_transport()
+ request = smart_dir.SmartServerRequestOpenBzrDir_2_1(backing)
+ self.assertEqual(smart_req.SmartServerResponse(('no', )),
+ request.execute('does-not-exist'))
+
+ def test_empty_directory(self):
+ backing = self.get_transport()
+ backing.mkdir('empty')
+ request = smart_dir.SmartServerRequestOpenBzrDir_2_1(backing)
+ self.assertEqual(smart_req.SmartServerResponse(('no', )),
+ request.execute('empty'))
+
+ def test_present_without_workingtree(self):
+ backing = self.get_transport()
+ request = smart_dir.SmartServerRequestOpenBzrDir_2_1(backing)
+ self.make_bzrdir('.')
+ self.assertEqual(smart_req.SmartServerResponse(('yes', 'no')),
+ request.execute(''))
+
+ def test_outside_root_client_path(self):
+ backing = self.get_transport()
+ request = smart_dir.SmartServerRequestOpenBzrDir_2_1(backing,
+ root_client_path='root')
+ self.assertEqual(smart_req.SmartServerResponse(('no',)),
+ request.execute('not-root'))
+
+
+class TestSmartServerRequestOpenBzrDir_2_1_disk(TestCaseWithChrootedTransport):
+
+ def test_present_with_workingtree(self):
+ self.vfs_transport_factory = test_server.LocalURLServer
+ backing = self.get_transport()
+ request = smart_dir.SmartServerRequestOpenBzrDir_2_1(backing)
+ bd = self.make_bzrdir('.')
+ bd.create_repository()
+ bd.create_branch()
+ bd.create_workingtree()
+ self.assertEqual(smart_req.SmartServerResponse(('yes', 'yes')),
+ request.execute(''))
+
+
+class TestSmartServerRequestOpenBranch(TestCaseWithChrootedTransport):
+
+ def test_no_branch(self):
+ """When there is no branch, ('nobranch', ) is returned."""
+ backing = self.get_transport()
+ request = smart_dir.SmartServerRequestOpenBranch(backing)
+ self.make_bzrdir('.')
+ self.assertEqual(smart_req.SmartServerResponse(('nobranch', )),
+ request.execute(''))
+
+ def test_branch(self):
+ """When there is a branch, 'ok' is returned."""
+ backing = self.get_transport()
+ request = smart_dir.SmartServerRequestOpenBranch(backing)
+ self.make_branch('.')
+ self.assertEqual(smart_req.SmartServerResponse(('ok', '')),
+ request.execute(''))
+
+ def test_branch_reference(self):
+ """When there is a branch reference, the reference URL is returned."""
+ self.vfs_transport_factory = test_server.LocalURLServer
+ backing = self.get_transport()
+ request = smart_dir.SmartServerRequestOpenBranch(backing)
+ branch = self.make_branch('branch')
+ checkout = branch.create_checkout('reference',lightweight=True)
+ reference_url = _mod_branch.BranchReferenceFormat().get_reference(
+ checkout.bzrdir)
+ self.assertFileEqual(reference_url, 'reference/.bzr/branch/location')
+ self.assertEqual(smart_req.SmartServerResponse(('ok', reference_url)),
+ request.execute('reference'))
+
+ def test_notification_on_branch_from_repository(self):
+ """When there is a repository, the error should return details."""
+ backing = self.get_transport()
+ request = smart_dir.SmartServerRequestOpenBranch(backing)
+ repo = self.make_repository('.')
+ self.assertEqual(smart_req.SmartServerResponse(('nobranch',)),
+ request.execute(''))
+
+
+class TestSmartServerRequestOpenBranchV2(TestCaseWithChrootedTransport):
+
+ def test_no_branch(self):
+ """When there is no branch, ('nobranch', ) is returned."""
+ backing = self.get_transport()
+ self.make_bzrdir('.')
+ request = smart_dir.SmartServerRequestOpenBranchV2(backing)
+ self.assertEqual(smart_req.SmartServerResponse(('nobranch', )),
+ request.execute(''))
+
+ def test_branch(self):
+ """When there is a branch, 'ok' is returned."""
+ backing = self.get_transport()
+ expected = self.make_branch('.')._format.network_name()
+ request = smart_dir.SmartServerRequestOpenBranchV2(backing)
+ self.assertEqual(smart_req.SuccessfulSmartServerResponse(
+ ('branch', expected)),
+ request.execute(''))
+
+ def test_branch_reference(self):
+ """When there is a branch reference, the reference URL is returned."""
+ self.vfs_transport_factory = test_server.LocalURLServer
+ backing = self.get_transport()
+ request = smart_dir.SmartServerRequestOpenBranchV2(backing)
+ branch = self.make_branch('branch')
+ checkout = branch.create_checkout('reference',lightweight=True)
+ reference_url = _mod_branch.BranchReferenceFormat().get_reference(
+ checkout.bzrdir)
+ self.assertFileEqual(reference_url, 'reference/.bzr/branch/location')
+ self.assertEqual(smart_req.SuccessfulSmartServerResponse(
+ ('ref', reference_url)),
+ request.execute('reference'))
+
+ def test_stacked_branch(self):
+ """Opening a stacked branch does not open the stacked-on branch."""
+ trunk = self.make_branch('trunk')
+ feature = self.make_branch('feature')
+ feature.set_stacked_on_url(trunk.base)
+ opened_branches = []
+ _mod_branch.Branch.hooks.install_named_hook(
+ 'open', opened_branches.append, None)
+ backing = self.get_transport()
+ request = smart_dir.SmartServerRequestOpenBranchV2(backing)
+ request.setup_jail()
+ try:
+ response = request.execute('feature')
+ finally:
+ request.teardown_jail()
+ expected_format = feature._format.network_name()
+ self.assertEqual(smart_req.SuccessfulSmartServerResponse(
+ ('branch', expected_format)),
+ response)
+ self.assertLength(1, opened_branches)
+
+ def test_notification_on_branch_from_repository(self):
+ """When there is a repository, the error should return details."""
+ backing = self.get_transport()
+ request = smart_dir.SmartServerRequestOpenBranchV2(backing)
+ repo = self.make_repository('.')
+ self.assertEqual(smart_req.SmartServerResponse(('nobranch',)),
+ request.execute(''))
+
+
+class TestSmartServerRequestOpenBranchV3(TestCaseWithChrootedTransport):
+
+ def test_no_branch(self):
+ """When there is no branch, ('nobranch', ) is returned."""
+ backing = self.get_transport()
+ self.make_bzrdir('.')
+ request = smart_dir.SmartServerRequestOpenBranchV3(backing)
+ self.assertEqual(smart_req.SmartServerResponse(('nobranch',)),
+ request.execute(''))
+
+ def test_branch(self):
+ """When there is a branch, 'ok' is returned."""
+ backing = self.get_transport()
+ expected = self.make_branch('.')._format.network_name()
+ request = smart_dir.SmartServerRequestOpenBranchV3(backing)
+ self.assertEqual(smart_req.SuccessfulSmartServerResponse(
+ ('branch', expected)),
+ request.execute(''))
+
+ def test_branch_reference(self):
+ """When there is a branch reference, the reference URL is returned."""
+ self.vfs_transport_factory = test_server.LocalURLServer
+ backing = self.get_transport()
+ request = smart_dir.SmartServerRequestOpenBranchV3(backing)
+ branch = self.make_branch('branch')
+ checkout = branch.create_checkout('reference',lightweight=True)
+ reference_url = _mod_branch.BranchReferenceFormat().get_reference(
+ checkout.bzrdir)
+ self.assertFileEqual(reference_url, 'reference/.bzr/branch/location')
+ self.assertEqual(smart_req.SuccessfulSmartServerResponse(
+ ('ref', reference_url)),
+ request.execute('reference'))
+
+ def test_stacked_branch(self):
+ """Opening a stacked branch does not open the stacked-on branch."""
+ trunk = self.make_branch('trunk')
+ feature = self.make_branch('feature')
+ feature.set_stacked_on_url(trunk.base)
+ opened_branches = []
+ _mod_branch.Branch.hooks.install_named_hook(
+ 'open', opened_branches.append, None)
+ backing = self.get_transport()
+ request = smart_dir.SmartServerRequestOpenBranchV3(backing)
+ request.setup_jail()
+ try:
+ response = request.execute('feature')
+ finally:
+ request.teardown_jail()
+ expected_format = feature._format.network_name()
+ self.assertEqual(smart_req.SuccessfulSmartServerResponse(
+ ('branch', expected_format)),
+ response)
+ self.assertLength(1, opened_branches)
+
+ def test_notification_on_branch_from_repository(self):
+ """When there is a repository, the error should return details."""
+ backing = self.get_transport()
+ request = smart_dir.SmartServerRequestOpenBranchV3(backing)
+ repo = self.make_repository('.')
+ self.assertEqual(smart_req.SmartServerResponse(
+ ('nobranch', 'location is a repository')),
+ request.execute(''))
+
+
+class TestSmartServerRequestRevisionHistory(tests.TestCaseWithMemoryTransport):
+
+ def test_empty(self):
+ """For an empty branch, the body is empty."""
+ backing = self.get_transport()
+ request = smart_branch.SmartServerRequestRevisionHistory(backing)
+ self.make_branch('.')
+ self.assertEqual(smart_req.SmartServerResponse(('ok', ), ''),
+ request.execute(''))
+
+ def test_not_empty(self):
+ """For a non-empty branch, the body is empty."""
+ backing = self.get_transport()
+ request = smart_branch.SmartServerRequestRevisionHistory(backing)
+ tree = self.make_branch_and_memory_tree('.')
+ tree.lock_write()
+ tree.add('')
+ r1 = tree.commit('1st commit')
+ r2 = tree.commit('2nd commit', rev_id=u'\xc8'.encode('utf-8'))
+ tree.unlock()
+ self.assertEqual(
+ smart_req.SmartServerResponse(('ok', ), ('\x00'.join([r1, r2]))),
+ request.execute(''))
+
+
+class TestSmartServerBranchRequest(tests.TestCaseWithMemoryTransport):
+
+ def test_no_branch(self):
+ """When there is a bzrdir and no branch, NotBranchError is raised."""
+ backing = self.get_transport()
+ request = smart_branch.SmartServerBranchRequest(backing)
+ self.make_bzrdir('.')
+ self.assertRaises(errors.NotBranchError,
+ request.execute, '')
+
+ def test_branch_reference(self):
+ """When there is a branch reference, NotBranchError is raised."""
+ backing = self.get_transport()
+ request = smart_branch.SmartServerBranchRequest(backing)
+ branch = self.make_branch('branch')
+ checkout = branch.create_checkout('reference',lightweight=True)
+ self.assertRaises(errors.NotBranchError,
+ request.execute, 'checkout')
+
+
+class TestSmartServerBranchRequestLastRevisionInfo(
+ tests.TestCaseWithMemoryTransport):
+
+ def test_empty(self):
+ """For an empty branch, the result is ('ok', '0', 'null:')."""
+ backing = self.get_transport()
+ request = smart_branch.SmartServerBranchRequestLastRevisionInfo(backing)
+ self.make_branch('.')
+ self.assertEqual(smart_req.SmartServerResponse(('ok', '0', 'null:')),
+ request.execute(''))
+
+ def test_not_empty(self):
+ """For a non-empty branch, the result is ('ok', 'revno', 'revid')."""
+ backing = self.get_transport()
+ request = smart_branch.SmartServerBranchRequestLastRevisionInfo(backing)
+ tree = self.make_branch_and_memory_tree('.')
+ tree.lock_write()
+ tree.add('')
+ rev_id_utf8 = u'\xc8'.encode('utf-8')
+ r1 = tree.commit('1st commit')
+ r2 = tree.commit('2nd commit', rev_id=rev_id_utf8)
+ tree.unlock()
+ self.assertEqual(
+ smart_req.SmartServerResponse(('ok', '2', rev_id_utf8)),
+ request.execute(''))
+
+
+class TestSmartServerBranchRequestRevisionIdToRevno(
+ tests.TestCaseWithMemoryTransport):
+
+ def test_null(self):
+ backing = self.get_transport()
+ request = smart_branch.SmartServerBranchRequestRevisionIdToRevno(
+ backing)
+ self.make_branch('.')
+ self.assertEqual(smart_req.SmartServerResponse(('ok', '0')),
+ request.execute('', 'null:'))
+
+ def test_simple(self):
+ backing = self.get_transport()
+ request = smart_branch.SmartServerBranchRequestRevisionIdToRevno(
+ backing)
+ tree = self.make_branch_and_memory_tree('.')
+ tree.lock_write()
+ tree.add('')
+ r1 = tree.commit('1st commit')
+ tree.unlock()
+ self.assertEqual(
+ smart_req.SmartServerResponse(('ok', '1')),
+ request.execute('', r1))
+
+ def test_not_found(self):
+ backing = self.get_transport()
+ request = smart_branch.SmartServerBranchRequestRevisionIdToRevno(
+ backing)
+ branch = self.make_branch('.')
+ self.assertEqual(
+ smart_req.FailedSmartServerResponse(
+ ('NoSuchRevision', 'idontexist')),
+ request.execute('', 'idontexist'))
+
+
+class TestSmartServerBranchRequestGetConfigFile(
+ tests.TestCaseWithMemoryTransport):
+
+ def test_default(self):
+ """With no file, we get empty content."""
+ backing = self.get_transport()
+ request = smart_branch.SmartServerBranchGetConfigFile(backing)
+ branch = self.make_branch('.')
+ # there should be no file by default
+ content = ''
+ self.assertEqual(smart_req.SmartServerResponse(('ok', ), content),
+ request.execute(''))
+
+ def test_with_content(self):
+ # SmartServerBranchGetConfigFile should return the content from
+ # branch.control_files.get('branch.conf') for now - in the future it may
+ # perform more complex processing.
+ backing = self.get_transport()
+ request = smart_branch.SmartServerBranchGetConfigFile(backing)
+ branch = self.make_branch('.')
+ branch._transport.put_bytes('branch.conf', 'foo bar baz')
+ self.assertEqual(smart_req.SmartServerResponse(('ok', ), 'foo bar baz'),
+ request.execute(''))
+
+
+class TestLockedBranch(tests.TestCaseWithMemoryTransport):
+
+ def get_lock_tokens(self, branch):
+ branch_token = branch.lock_write().branch_token
+ repo_token = branch.repository.lock_write().repository_token
+ branch.repository.unlock()
+ return branch_token, repo_token
+
+
+class TestSmartServerBranchRequestPutConfigFile(TestLockedBranch):
+
+ def test_with_content(self):
+ backing = self.get_transport()
+ request = smart_branch.SmartServerBranchPutConfigFile(backing)
+ branch = self.make_branch('.')
+ branch_token, repo_token = self.get_lock_tokens(branch)
+ self.assertIs(None, request.execute('', branch_token, repo_token))
+ self.assertEqual(
+ smart_req.SmartServerResponse(('ok', )),
+ request.do_body('foo bar baz'))
+ self.assertEquals(
+ branch.control_transport.get_bytes('branch.conf'),
+ 'foo bar baz')
+ branch.unlock()
+
+
+class TestSmartServerBranchRequestSetConfigOption(TestLockedBranch):
+
+ def test_value_name(self):
+ branch = self.make_branch('.')
+ request = smart_branch.SmartServerBranchRequestSetConfigOption(
+ branch.bzrdir.root_transport)
+ branch_token, repo_token = self.get_lock_tokens(branch)
+ config = branch._get_config()
+ result = request.execute('', branch_token, repo_token, 'bar', 'foo',
+ '')
+ self.assertEqual(smart_req.SuccessfulSmartServerResponse(()), result)
+ self.assertEqual('bar', config.get_option('foo'))
+ # Cleanup
+ branch.unlock()
+
+ def test_value_name_section(self):
+ branch = self.make_branch('.')
+ request = smart_branch.SmartServerBranchRequestSetConfigOption(
+ branch.bzrdir.root_transport)
+ branch_token, repo_token = self.get_lock_tokens(branch)
+ config = branch._get_config()
+ result = request.execute('', branch_token, repo_token, 'bar', 'foo',
+ 'gam')
+ self.assertEqual(smart_req.SuccessfulSmartServerResponse(()), result)
+ self.assertEqual('bar', config.get_option('foo', 'gam'))
+ # Cleanup
+ branch.unlock()
+
+
+class TestSmartServerBranchRequestSetConfigOptionDict(TestLockedBranch):
+
+ def setUp(self):
+ TestLockedBranch.setUp(self)
+ # A dict with non-ascii keys and values to exercise unicode
+ # roundtripping.
+ self.encoded_value_dict = (
+ 'd5:ascii1:a11:unicode \xe2\x8c\x9a3:\xe2\x80\xbde')
+ self.value_dict = {
+ 'ascii': 'a', u'unicode \N{WATCH}': u'\N{INTERROBANG}'}
+
+ def test_value_name(self):
+ branch = self.make_branch('.')
+ request = smart_branch.SmartServerBranchRequestSetConfigOptionDict(
+ branch.bzrdir.root_transport)
+ branch_token, repo_token = self.get_lock_tokens(branch)
+ config = branch._get_config()
+ result = request.execute('', branch_token, repo_token,
+ self.encoded_value_dict, 'foo', '')
+ self.assertEqual(smart_req.SuccessfulSmartServerResponse(()), result)
+ self.assertEqual(self.value_dict, config.get_option('foo'))
+ # Cleanup
+ branch.unlock()
+
+ def test_value_name_section(self):
+ branch = self.make_branch('.')
+ request = smart_branch.SmartServerBranchRequestSetConfigOptionDict(
+ branch.bzrdir.root_transport)
+ branch_token, repo_token = self.get_lock_tokens(branch)
+ config = branch._get_config()
+ result = request.execute('', branch_token, repo_token,
+ self.encoded_value_dict, 'foo', 'gam')
+ self.assertEqual(smart_req.SuccessfulSmartServerResponse(()), result)
+ self.assertEqual(self.value_dict, config.get_option('foo', 'gam'))
+ # Cleanup
+ branch.unlock()
+
+
+class TestSmartServerBranchRequestSetTagsBytes(TestLockedBranch):
+ # Only called when the branch format and tags match [yay factory
+ # methods] so only need to test straight forward cases.
+
+ def test_set_bytes(self):
+ base_branch = self.make_branch('base')
+ tag_bytes = base_branch._get_tags_bytes()
+ # get_lock_tokens takes out a lock.
+ branch_token, repo_token = self.get_lock_tokens(base_branch)
+ request = smart_branch.SmartServerBranchSetTagsBytes(
+ self.get_transport())
+ response = request.execute('base', branch_token, repo_token)
+ self.assertEqual(None, response)
+ response = request.do_chunk(tag_bytes)
+ self.assertEqual(None, response)
+ response = request.do_end()
+ self.assertEquals(
+ smart_req.SuccessfulSmartServerResponse(()), response)
+ base_branch.unlock()
+
+ def test_lock_failed(self):
+ base_branch = self.make_branch('base')
+ base_branch.lock_write()
+ tag_bytes = base_branch._get_tags_bytes()
+ request = smart_branch.SmartServerBranchSetTagsBytes(
+ self.get_transport())
+ self.assertRaises(errors.TokenMismatch, request.execute,
+ 'base', 'wrong token', 'wrong token')
+ # The request handler will keep processing the message parts, so even
+ # if the request fails immediately do_chunk and do_end are still
+ # called.
+ request.do_chunk(tag_bytes)
+ request.do_end()
+ base_branch.unlock()
+
+
+
+class SetLastRevisionTestBase(TestLockedBranch):
+ """Base test case for verbs that implement set_last_revision."""
+
+ def setUp(self):
+ tests.TestCaseWithMemoryTransport.setUp(self)
+ backing_transport = self.get_transport()
+ self.request = self.request_class(backing_transport)
+ self.tree = self.make_branch_and_memory_tree('.')
+
+ def lock_branch(self):
+ return self.get_lock_tokens(self.tree.branch)
+
+ def unlock_branch(self):
+ self.tree.branch.unlock()
+
+ def set_last_revision(self, revision_id, revno):
+ branch_token, repo_token = self.lock_branch()
+ response = self._set_last_revision(
+ revision_id, revno, branch_token, repo_token)
+ self.unlock_branch()
+ return response
+
+ def assertRequestSucceeds(self, revision_id, revno):
+ response = self.set_last_revision(revision_id, revno)
+ self.assertEqual(smart_req.SuccessfulSmartServerResponse(('ok',)),
+ response)
+
+
+class TestSetLastRevisionVerbMixin(object):
+ """Mixin test case for verbs that implement set_last_revision."""
+
+ def test_set_null_to_null(self):
+ """An empty branch can have its last revision set to 'null:'."""
+ self.assertRequestSucceeds('null:', 0)
+
+ def test_NoSuchRevision(self):
+ """If the revision_id is not present, the verb returns NoSuchRevision.
+ """
+ revision_id = 'non-existent revision'
+ self.assertEqual(smart_req.FailedSmartServerResponse(('NoSuchRevision',
+ revision_id)),
+ self.set_last_revision(revision_id, 1))
+
+ def make_tree_with_two_commits(self):
+ self.tree.lock_write()
+ self.tree.add('')
+ rev_id_utf8 = u'\xc8'.encode('utf-8')
+ r1 = self.tree.commit('1st commit', rev_id=rev_id_utf8)
+ r2 = self.tree.commit('2nd commit', rev_id='rev-2')
+ self.tree.unlock()
+
+ def test_branch_last_revision_info_is_updated(self):
+ """A branch's tip can be set to a revision that is present in its
+ repository.
+ """
+ # Make a branch with an empty revision history, but two revisions in
+ # its repository.
+ self.make_tree_with_two_commits()
+ rev_id_utf8 = u'\xc8'.encode('utf-8')
+ self.tree.branch.set_last_revision_info(0, 'null:')
+ self.assertEqual(
+ (0, 'null:'), self.tree.branch.last_revision_info())
+ # We can update the branch to a revision that is present in the
+ # repository.
+ self.assertRequestSucceeds(rev_id_utf8, 1)
+ self.assertEqual(
+ (1, rev_id_utf8), self.tree.branch.last_revision_info())
+
+ def test_branch_last_revision_info_rewind(self):
+ """A branch's tip can be set to a revision that is an ancestor of the
+ current tip.
+ """
+ self.make_tree_with_two_commits()
+ rev_id_utf8 = u'\xc8'.encode('utf-8')
+ self.assertEqual(
+ (2, 'rev-2'), self.tree.branch.last_revision_info())
+ self.assertRequestSucceeds(rev_id_utf8, 1)
+ self.assertEqual(
+ (1, rev_id_utf8), self.tree.branch.last_revision_info())
+
+ def test_TipChangeRejected(self):
+ """If a pre_change_branch_tip hook raises TipChangeRejected, the verb
+ returns TipChangeRejected.
+ """
+ rejection_message = u'rejection message\N{INTERROBANG}'
+ def hook_that_rejects(params):
+ raise errors.TipChangeRejected(rejection_message)
+ _mod_branch.Branch.hooks.install_named_hook(
+ 'pre_change_branch_tip', hook_that_rejects, None)
+ self.assertEqual(
+ smart_req.FailedSmartServerResponse(
+ ('TipChangeRejected', rejection_message.encode('utf-8'))),
+ self.set_last_revision('null:', 0))
+
+
+class TestSmartServerBranchRequestSetLastRevision(
+ SetLastRevisionTestBase, TestSetLastRevisionVerbMixin):
+ """Tests for Branch.set_last_revision verb."""
+
+ request_class = smart_branch.SmartServerBranchRequestSetLastRevision
+
+ def _set_last_revision(self, revision_id, revno, branch_token, repo_token):
+ return self.request.execute(
+ '', branch_token, repo_token, revision_id)
+
+
+class TestSmartServerBranchRequestSetLastRevisionInfo(
+ SetLastRevisionTestBase, TestSetLastRevisionVerbMixin):
+ """Tests for Branch.set_last_revision_info verb."""
+
+ request_class = smart_branch.SmartServerBranchRequestSetLastRevisionInfo
+
+ def _set_last_revision(self, revision_id, revno, branch_token, repo_token):
+ return self.request.execute(
+ '', branch_token, repo_token, revno, revision_id)
+
+ def test_NoSuchRevision(self):
+ """Branch.set_last_revision_info does not have to return
+ NoSuchRevision if the revision_id is absent.
+ """
+ raise tests.TestNotApplicable()
+
+
+class TestSmartServerBranchRequestSetLastRevisionEx(
+ SetLastRevisionTestBase, TestSetLastRevisionVerbMixin):
+ """Tests for Branch.set_last_revision_ex verb."""
+
+ request_class = smart_branch.SmartServerBranchRequestSetLastRevisionEx
+
+ def _set_last_revision(self, revision_id, revno, branch_token, repo_token):
+ return self.request.execute(
+ '', branch_token, repo_token, revision_id, 0, 0)
+
+ def assertRequestSucceeds(self, revision_id, revno):
+ response = self.set_last_revision(revision_id, revno)
+ self.assertEqual(
+ smart_req.SuccessfulSmartServerResponse(('ok', revno, revision_id)),
+ response)
+
+ def test_branch_last_revision_info_rewind(self):
+ """A branch's tip can be set to a revision that is an ancestor of the
+ current tip, but only if allow_overwrite_descendant is passed.
+ """
+ self.make_tree_with_two_commits()
+ rev_id_utf8 = u'\xc8'.encode('utf-8')
+ self.assertEqual(
+ (2, 'rev-2'), self.tree.branch.last_revision_info())
+ # If allow_overwrite_descendant flag is 0, then trying to set the tip
+ # to an older revision ID has no effect.
+ branch_token, repo_token = self.lock_branch()
+ response = self.request.execute(
+ '', branch_token, repo_token, rev_id_utf8, 0, 0)
+ self.assertEqual(
+ smart_req.SuccessfulSmartServerResponse(('ok', 2, 'rev-2')),
+ response)
+ self.assertEqual(
+ (2, 'rev-2'), self.tree.branch.last_revision_info())
+
+ # If allow_overwrite_descendant flag is 1, then setting the tip to an
+ # ancestor works.
+ response = self.request.execute(
+ '', branch_token, repo_token, rev_id_utf8, 0, 1)
+ self.assertEqual(
+ smart_req.SuccessfulSmartServerResponse(('ok', 1, rev_id_utf8)),
+ response)
+ self.unlock_branch()
+ self.assertEqual(
+ (1, rev_id_utf8), self.tree.branch.last_revision_info())
+
+ def make_branch_with_divergent_history(self):
+ """Make a branch with divergent history in its repo.
+
+ The branch's tip will be 'child-2', and the repo will also contain
+ 'child-1', which diverges from a common base revision.
+ """
+ self.tree.lock_write()
+ self.tree.add('')
+ r1 = self.tree.commit('1st commit')
+ revno_1, revid_1 = self.tree.branch.last_revision_info()
+ r2 = self.tree.commit('2nd commit', rev_id='child-1')
+ # Undo the second commit
+ self.tree.branch.set_last_revision_info(revno_1, revid_1)
+ self.tree.set_parent_ids([revid_1])
+ # Make a new second commit, child-2. child-2 has diverged from
+ # child-1.
+ new_r2 = self.tree.commit('2nd commit', rev_id='child-2')
+ self.tree.unlock()
+
+ def test_not_allow_diverged(self):
+ """If allow_diverged is not passed, then setting a divergent history
+ returns a Diverged error.
+ """
+ self.make_branch_with_divergent_history()
+ self.assertEqual(
+ smart_req.FailedSmartServerResponse(('Diverged',)),
+ self.set_last_revision('child-1', 2))
+ # The branch tip was not changed.
+ self.assertEqual('child-2', self.tree.branch.last_revision())
+
+ def test_allow_diverged(self):
+ """If allow_diverged is passed, then setting a divergent history
+ succeeds.
+ """
+ self.make_branch_with_divergent_history()
+ branch_token, repo_token = self.lock_branch()
+ response = self.request.execute(
+ '', branch_token, repo_token, 'child-1', 1, 0)
+ self.assertEqual(
+ smart_req.SuccessfulSmartServerResponse(('ok', 2, 'child-1')),
+ response)
+ self.unlock_branch()
+ # The branch tip was changed.
+ self.assertEqual('child-1', self.tree.branch.last_revision())
+
+
+class TestSmartServerBranchBreakLock(tests.TestCaseWithMemoryTransport):
+
+ def test_lock_to_break(self):
+ base_branch = self.make_branch('base')
+ request = smart_branch.SmartServerBranchBreakLock(
+ self.get_transport())
+ base_branch.lock_write()
+ self.assertEqual(
+ smart_req.SuccessfulSmartServerResponse(('ok', ), None),
+ request.execute('base'))
+
+ def test_nothing_to_break(self):
+ base_branch = self.make_branch('base')
+ request = smart_branch.SmartServerBranchBreakLock(
+ self.get_transport())
+ self.assertEqual(
+ smart_req.SuccessfulSmartServerResponse(('ok', ), None),
+ request.execute('base'))
+
+
+class TestSmartServerBranchRequestGetParent(tests.TestCaseWithMemoryTransport):
+
+ def test_get_parent_none(self):
+ base_branch = self.make_branch('base')
+ request = smart_branch.SmartServerBranchGetParent(self.get_transport())
+ response = request.execute('base')
+ self.assertEquals(
+ smart_req.SuccessfulSmartServerResponse(('',)), response)
+
+ def test_get_parent_something(self):
+ base_branch = self.make_branch('base')
+ base_branch.set_parent(self.get_url('foo'))
+ request = smart_branch.SmartServerBranchGetParent(self.get_transport())
+ response = request.execute('base')
+ self.assertEquals(
+ smart_req.SuccessfulSmartServerResponse(("../foo",)),
+ response)
+
+
+class TestSmartServerBranchRequestSetParent(TestLockedBranch):
+
+ def test_set_parent_none(self):
+ branch = self.make_branch('base', format="1.9")
+ branch.lock_write()
+ branch._set_parent_location('foo')
+ branch.unlock()
+ request = smart_branch.SmartServerBranchRequestSetParentLocation(
+ self.get_transport())
+ branch_token, repo_token = self.get_lock_tokens(branch)
+ try:
+ response = request.execute('base', branch_token, repo_token, '')
+ finally:
+ branch.unlock()
+ self.assertEqual(smart_req.SuccessfulSmartServerResponse(()), response)
+ # Refresh branch as SetParentLocation modified it
+ branch = branch.bzrdir.open_branch()
+ self.assertEqual(None, branch.get_parent())
+
+ def test_set_parent_something(self):
+ branch = self.make_branch('base', format="1.9")
+ request = smart_branch.SmartServerBranchRequestSetParentLocation(
+ self.get_transport())
+ branch_token, repo_token = self.get_lock_tokens(branch)
+ try:
+ response = request.execute('base', branch_token, repo_token,
+ 'http://bar/')
+ finally:
+ branch.unlock()
+ self.assertEqual(smart_req.SuccessfulSmartServerResponse(()), response)
+ refreshed = _mod_branch.Branch.open(branch.base)
+ self.assertEqual('http://bar/', refreshed.get_parent())
+
+
+class TestSmartServerBranchRequestGetTagsBytes(
+ tests.TestCaseWithMemoryTransport):
+ # Only called when the branch format and tags match [yay factory
+ # methods] so only need to test straight forward cases.
+
+ def test_get_bytes(self):
+ base_branch = self.make_branch('base')
+ request = smart_branch.SmartServerBranchGetTagsBytes(
+ self.get_transport())
+ response = request.execute('base')
+ self.assertEquals(
+ smart_req.SuccessfulSmartServerResponse(('',)), response)
+
+
+class TestSmartServerBranchRequestGetStackedOnURL(tests.TestCaseWithMemoryTransport):
+
+ def test_get_stacked_on_url(self):
+ base_branch = self.make_branch('base', format='1.6')
+ stacked_branch = self.make_branch('stacked', format='1.6')
+ # typically should be relative
+ stacked_branch.set_stacked_on_url('../base')
+ request = smart_branch.SmartServerBranchRequestGetStackedOnURL(
+ self.get_transport())
+ response = request.execute('stacked')
+ self.assertEquals(
+ smart_req.SmartServerResponse(('ok', '../base')),
+ response)
+
+
+class TestSmartServerBranchRequestLockWrite(TestLockedBranch):
+
+ def setUp(self):
+ tests.TestCaseWithMemoryTransport.setUp(self)
+
+ def test_lock_write_on_unlocked_branch(self):
+ backing = self.get_transport()
+ request = smart_branch.SmartServerBranchRequestLockWrite(backing)
+ branch = self.make_branch('.', format='knit')
+ repository = branch.repository
+ response = request.execute('')
+ branch_nonce = branch.control_files._lock.peek().get('nonce')
+ repository_nonce = repository.control_files._lock.peek().get('nonce')
+ self.assertEqual(smart_req.SmartServerResponse(
+ ('ok', branch_nonce, repository_nonce)),
+ response)
+ # The branch (and associated repository) is now locked. Verify that
+ # with a new branch object.
+ new_branch = repository.bzrdir.open_branch()
+ self.assertRaises(errors.LockContention, new_branch.lock_write)
+ # Cleanup
+ request = smart_branch.SmartServerBranchRequestUnlock(backing)
+ response = request.execute('', branch_nonce, repository_nonce)
+
+ def test_lock_write_on_locked_branch(self):
+ backing = self.get_transport()
+ request = smart_branch.SmartServerBranchRequestLockWrite(backing)
+ branch = self.make_branch('.')
+ branch_token = branch.lock_write().branch_token
+ branch.leave_lock_in_place()
+ branch.unlock()
+ response = request.execute('')
+ self.assertEqual(
+ smart_req.SmartServerResponse(('LockContention',)), response)
+ # Cleanup
+ branch.lock_write(branch_token)
+ branch.dont_leave_lock_in_place()
+ branch.unlock()
+
+ def test_lock_write_with_tokens_on_locked_branch(self):
+ backing = self.get_transport()
+ request = smart_branch.SmartServerBranchRequestLockWrite(backing)
+ branch = self.make_branch('.', format='knit')
+ branch_token, repo_token = self.get_lock_tokens(branch)
+ branch.leave_lock_in_place()
+ branch.repository.leave_lock_in_place()
+ branch.unlock()
+ response = request.execute('',
+ branch_token, repo_token)
+ self.assertEqual(
+ smart_req.SmartServerResponse(('ok', branch_token, repo_token)),
+ response)
+ # Cleanup
+ branch.repository.lock_write(repo_token)
+ branch.repository.dont_leave_lock_in_place()
+ branch.repository.unlock()
+ branch.lock_write(branch_token)
+ branch.dont_leave_lock_in_place()
+ branch.unlock()
+
+ def test_lock_write_with_mismatched_tokens_on_locked_branch(self):
+ backing = self.get_transport()
+ request = smart_branch.SmartServerBranchRequestLockWrite(backing)
+ branch = self.make_branch('.', format='knit')
+ branch_token, repo_token = self.get_lock_tokens(branch)
+ branch.leave_lock_in_place()
+ branch.repository.leave_lock_in_place()
+ branch.unlock()
+ response = request.execute('',
+ branch_token+'xxx', repo_token)
+ self.assertEqual(
+ smart_req.SmartServerResponse(('TokenMismatch',)), response)
+ # Cleanup
+ branch.repository.lock_write(repo_token)
+ branch.repository.dont_leave_lock_in_place()
+ branch.repository.unlock()
+ branch.lock_write(branch_token)
+ branch.dont_leave_lock_in_place()
+ branch.unlock()
+
+ def test_lock_write_on_locked_repo(self):
+ backing = self.get_transport()
+ request = smart_branch.SmartServerBranchRequestLockWrite(backing)
+ branch = self.make_branch('.', format='knit')
+ repo = branch.repository
+ repo_token = repo.lock_write().repository_token
+ repo.leave_lock_in_place()
+ repo.unlock()
+ response = request.execute('')
+ self.assertEqual(
+ smart_req.SmartServerResponse(('LockContention',)), response)
+ # Cleanup
+ repo.lock_write(repo_token)
+ repo.dont_leave_lock_in_place()
+ repo.unlock()
+
+ def test_lock_write_on_readonly_transport(self):
+ backing = self.get_readonly_transport()
+ request = smart_branch.SmartServerBranchRequestLockWrite(backing)
+ branch = self.make_branch('.')
+ root = self.get_transport().clone('/')
+ path = urlutils.relative_url(root.base, self.get_transport().base)
+ response = request.execute(path)
+ error_name, lock_str, why_str = response.args
+ self.assertFalse(response.is_successful())
+ self.assertEqual('LockFailed', error_name)
+
+
+class TestSmartServerBranchRequestGetPhysicalLockStatus(TestLockedBranch):
+
+ def setUp(self):
+ tests.TestCaseWithMemoryTransport.setUp(self)
+
+ def test_true(self):
+ backing = self.get_transport()
+ request = smart_branch.SmartServerBranchRequestGetPhysicalLockStatus(
+ backing)
+ branch = self.make_branch('.')
+ branch_token, repo_token = self.get_lock_tokens(branch)
+ self.assertEquals(True, branch.get_physical_lock_status())
+ response = request.execute('')
+ self.assertEqual(
+ smart_req.SmartServerResponse(('yes',)), response)
+ branch.unlock()
+
+ def test_false(self):
+ backing = self.get_transport()
+ request = smart_branch.SmartServerBranchRequestGetPhysicalLockStatus(
+ backing)
+ branch = self.make_branch('.')
+ self.assertEquals(False, branch.get_physical_lock_status())
+ response = request.execute('')
+ self.assertEqual(
+ smart_req.SmartServerResponse(('no',)), response)
+
+
+class TestSmartServerBranchRequestUnlock(TestLockedBranch):
+
+ def setUp(self):
+ tests.TestCaseWithMemoryTransport.setUp(self)
+
+ def test_unlock_on_locked_branch_and_repo(self):
+ backing = self.get_transport()
+ request = smart_branch.SmartServerBranchRequestUnlock(backing)
+ branch = self.make_branch('.', format='knit')
+ # Lock the branch
+ branch_token, repo_token = self.get_lock_tokens(branch)
+ # Unlock the branch (and repo) object, leaving the physical locks
+ # in place.
+ branch.leave_lock_in_place()
+ branch.repository.leave_lock_in_place()
+ branch.unlock()
+ response = request.execute('',
+ branch_token, repo_token)
+ self.assertEqual(
+ smart_req.SmartServerResponse(('ok',)), response)
+ # The branch is now unlocked. Verify that with a new branch
+ # object.
+ new_branch = branch.bzrdir.open_branch()
+ new_branch.lock_write()
+ new_branch.unlock()
+
+ def test_unlock_on_unlocked_branch_unlocked_repo(self):
+ backing = self.get_transport()
+ request = smart_branch.SmartServerBranchRequestUnlock(backing)
+ branch = self.make_branch('.', format='knit')
+ response = request.execute(
+ '', 'branch token', 'repo token')
+ self.assertEqual(
+ smart_req.SmartServerResponse(('TokenMismatch',)), response)
+
+ def test_unlock_on_unlocked_branch_locked_repo(self):
+ backing = self.get_transport()
+ request = smart_branch.SmartServerBranchRequestUnlock(backing)
+ branch = self.make_branch('.', format='knit')
+ # Lock the repository.
+ repo_token = branch.repository.lock_write().repository_token
+ branch.repository.leave_lock_in_place()
+ branch.repository.unlock()
+ # Issue branch lock_write request on the unlocked branch (with locked
+ # repo).
+ response = request.execute('', 'branch token', repo_token)
+ self.assertEqual(
+ smart_req.SmartServerResponse(('TokenMismatch',)), response)
+ # Cleanup
+ branch.repository.lock_write(repo_token)
+ branch.repository.dont_leave_lock_in_place()
+ branch.repository.unlock()
+
+
+class TestSmartServerRepositoryRequest(tests.TestCaseWithMemoryTransport):
+
+ def test_no_repository(self):
+ """Raise NoRepositoryPresent when there is a bzrdir and no repo."""
+ # we test this using a shared repository above the named path,
+ # thus checking the right search logic is used - that is, that
+ # its the exact path being looked at and the server is not
+ # searching.
+ backing = self.get_transport()
+ request = smart_repo.SmartServerRepositoryRequest(backing)
+ self.make_repository('.', shared=True)
+ self.make_bzrdir('subdir')
+ self.assertRaises(errors.NoRepositoryPresent,
+ request.execute, 'subdir')
+
+
+class TestSmartServerRepositoryAddSignatureText(tests.TestCaseWithMemoryTransport):
+
+ def test_add_text(self):
+ backing = self.get_transport()
+ request = smart_repo.SmartServerRepositoryAddSignatureText(backing)
+ tree = self.make_branch_and_memory_tree('.')
+ write_token = tree.lock_write()
+ self.addCleanup(tree.unlock)
+ tree.add('')
+ tree.commit("Message", rev_id='rev1')
+ tree.branch.repository.start_write_group()
+ write_group_tokens = tree.branch.repository.suspend_write_group()
+ self.assertEqual(None, request.execute('', write_token,
+ 'rev1', *write_group_tokens))
+ response = request.do_body('somesignature')
+ self.assertTrue(response.is_successful())
+ self.assertEqual(response.args[0], 'ok')
+ write_group_tokens = response.args[1:]
+ tree.branch.repository.resume_write_group(write_group_tokens)
+ tree.branch.repository.commit_write_group()
+ tree.unlock()
+ self.assertEqual("somesignature",
+ tree.branch.repository.get_signature_text("rev1"))
+
+
+class TestSmartServerRepositoryAllRevisionIds(
+ tests.TestCaseWithMemoryTransport):
+
+ def test_empty(self):
+ """An empty body should be returned for an empty repository."""
+ backing = self.get_transport()
+ request = smart_repo.SmartServerRepositoryAllRevisionIds(backing)
+ self.make_repository('.')
+ self.assertEquals(
+ smart_req.SuccessfulSmartServerResponse(("ok", ), ""),
+ request.execute(''))
+
+ def test_some_revisions(self):
+ """An empty body should be returned for an empty repository."""
+ backing = self.get_transport()
+ request = smart_repo.SmartServerRepositoryAllRevisionIds(backing)
+ tree = self.make_branch_and_memory_tree('.')
+ tree.lock_write()
+ tree.add('')
+ tree.commit(rev_id='origineel', message="message")
+ tree.commit(rev_id='nog-een-revisie', message="message")
+ tree.unlock()
+ self.assertEquals(
+ smart_req.SuccessfulSmartServerResponse(("ok", ),
+ "origineel\nnog-een-revisie"),
+ request.execute(''))
+
+
+class TestSmartServerRepositoryBreakLock(tests.TestCaseWithMemoryTransport):
+
+ def test_lock_to_break(self):
+ backing = self.get_transport()
+ request = smart_repo.SmartServerRepositoryBreakLock(backing)
+ tree = self.make_branch_and_memory_tree('.')
+ tree.branch.repository.lock_write()
+ self.assertEqual(
+ smart_req.SuccessfulSmartServerResponse(('ok', ), None),
+ request.execute(''))
+
+ def test_nothing_to_break(self):
+ backing = self.get_transport()
+ request = smart_repo.SmartServerRepositoryBreakLock(backing)
+ tree = self.make_branch_and_memory_tree('.')
+ self.assertEqual(
+ smart_req.SuccessfulSmartServerResponse(('ok', ), None),
+ request.execute(''))
+
+
+class TestSmartServerRepositoryGetParentMap(tests.TestCaseWithMemoryTransport):
+
+ def test_trivial_bzipped(self):
+ # This tests that the wire encoding is actually bzipped
+ backing = self.get_transport()
+ request = smart_repo.SmartServerRepositoryGetParentMap(backing)
+ tree = self.make_branch_and_memory_tree('.')
+
+ self.assertEqual(None,
+ request.execute('', 'missing-id'))
+ # Note that it returns a body that is bzipped.
+ self.assertEqual(
+ smart_req.SuccessfulSmartServerResponse(('ok', ), bz2.compress('')),
+ request.do_body('\n\n0\n'))
+
+ def test_trivial_include_missing(self):
+ backing = self.get_transport()
+ request = smart_repo.SmartServerRepositoryGetParentMap(backing)
+ tree = self.make_branch_and_memory_tree('.')
+
+ self.assertEqual(None,
+ request.execute('', 'missing-id', 'include-missing:'))
+ self.assertEqual(
+ smart_req.SuccessfulSmartServerResponse(('ok', ),
+ bz2.compress('missing:missing-id')),
+ request.do_body('\n\n0\n'))
+
+
+class TestSmartServerRepositoryGetRevisionGraph(
+ tests.TestCaseWithMemoryTransport):
+
+ def test_none_argument(self):
+ backing = self.get_transport()
+ request = smart_repo.SmartServerRepositoryGetRevisionGraph(backing)
+ tree = self.make_branch_and_memory_tree('.')
+ tree.lock_write()
+ tree.add('')
+ r1 = tree.commit('1st commit')
+ r2 = tree.commit('2nd commit', rev_id=u'\xc8'.encode('utf-8'))
+ tree.unlock()
+
+ # the lines of revision_id->revision_parent_list has no guaranteed
+ # order coming out of a dict, so sort both our test and response
+ lines = sorted([' '.join([r2, r1]), r1])
+ response = request.execute('', '')
+ response.body = '\n'.join(sorted(response.body.split('\n')))
+
+ self.assertEqual(
+ smart_req.SmartServerResponse(('ok', ), '\n'.join(lines)), response)
+
+ def test_specific_revision_argument(self):
+ backing = self.get_transport()
+ request = smart_repo.SmartServerRepositoryGetRevisionGraph(backing)
+ tree = self.make_branch_and_memory_tree('.')
+ tree.lock_write()
+ tree.add('')
+ rev_id_utf8 = u'\xc9'.encode('utf-8')
+ r1 = tree.commit('1st commit', rev_id=rev_id_utf8)
+ r2 = tree.commit('2nd commit', rev_id=u'\xc8'.encode('utf-8'))
+ tree.unlock()
+
+ self.assertEqual(smart_req.SmartServerResponse(('ok', ), rev_id_utf8),
+ request.execute('', rev_id_utf8))
+
+ def test_no_such_revision(self):
+ backing = self.get_transport()
+ request = smart_repo.SmartServerRepositoryGetRevisionGraph(backing)
+ tree = self.make_branch_and_memory_tree('.')
+ tree.lock_write()
+ tree.add('')
+ r1 = tree.commit('1st commit')
+ tree.unlock()
+
+ # Note that it still returns body (of zero bytes).
+ self.assertEqual(smart_req.SmartServerResponse(
+ ('nosuchrevision', 'missingrevision', ), ''),
+ request.execute('', 'missingrevision'))
+
+
+class TestSmartServerRepositoryGetRevIdForRevno(
+ tests.TestCaseWithMemoryTransport):
+
+ def test_revno_found(self):
+ backing = self.get_transport()
+ request = smart_repo.SmartServerRepositoryGetRevIdForRevno(backing)
+ tree = self.make_branch_and_memory_tree('.')
+ tree.lock_write()
+ tree.add('')
+ rev1_id_utf8 = u'\xc8'.encode('utf-8')
+ rev2_id_utf8 = u'\xc9'.encode('utf-8')
+ tree.commit('1st commit', rev_id=rev1_id_utf8)
+ tree.commit('2nd commit', rev_id=rev2_id_utf8)
+ tree.unlock()
+
+ self.assertEqual(smart_req.SmartServerResponse(('ok', rev1_id_utf8)),
+ request.execute('', 1, (2, rev2_id_utf8)))
+
+ def test_known_revid_missing(self):
+ backing = self.get_transport()
+ request = smart_repo.SmartServerRepositoryGetRevIdForRevno(backing)
+ repo = self.make_repository('.')
+ self.assertEqual(
+ smart_req.FailedSmartServerResponse(('nosuchrevision', 'ghost')),
+ request.execute('', 1, (2, 'ghost')))
+
+ def test_history_incomplete(self):
+ backing = self.get_transport()
+ request = smart_repo.SmartServerRepositoryGetRevIdForRevno(backing)
+ parent = self.make_branch_and_memory_tree('parent', format='1.9')
+ parent.lock_write()
+ parent.add([''], ['TREE_ROOT'])
+ r1 = parent.commit(message='first commit')
+ r2 = parent.commit(message='second commit')
+ parent.unlock()
+ local = self.make_branch_and_memory_tree('local', format='1.9')
+ local.branch.pull(parent.branch)
+ local.set_parent_ids([r2])
+ r3 = local.commit(message='local commit')
+ local.branch.create_clone_on_transport(
+ self.get_transport('stacked'), stacked_on=self.get_url('parent'))
+ self.assertEqual(
+ smart_req.SmartServerResponse(('history-incomplete', 2, r2)),
+ request.execute('stacked', 1, (3, r3)))
+
+
+class TestSmartServerRepositoryIterRevisions(
+ tests.TestCaseWithMemoryTransport):
+
+ def test_basic(self):
+ backing = self.get_transport()
+ request = smart_repo.SmartServerRepositoryIterRevisions(backing)
+ tree = self.make_branch_and_memory_tree('.', format='2a')
+ tree.lock_write()
+ tree.add('')
+ tree.commit('1st commit', rev_id="rev1")
+ tree.commit('2nd commit', rev_id="rev2")
+ tree.unlock()
+
+ self.assertIs(None, request.execute(''))
+ response = request.do_body("rev1\nrev2")
+ self.assertTrue(response.is_successful())
+ # Format 2a uses serializer format 10
+ self.assertEquals(response.args, ("ok", "10"))
+
+ self.addCleanup(tree.branch.lock_read().unlock)
+ entries = [zlib.compress(record.get_bytes_as("fulltext")) for record in
+ tree.branch.repository.revisions.get_record_stream(
+ [("rev1", ), ("rev2", )], "unordered", True)]
+
+ contents = "".join(response.body_stream)
+ self.assertTrue(contents in (
+ "".join([entries[0], entries[1]]),
+ "".join([entries[1], entries[0]])))
+
+ def test_missing(self):
+ backing = self.get_transport()
+ request = smart_repo.SmartServerRepositoryIterRevisions(backing)
+ tree = self.make_branch_and_memory_tree('.', format='2a')
+
+ self.assertIs(None, request.execute(''))
+ response = request.do_body("rev1\nrev2")
+ self.assertTrue(response.is_successful())
+ # Format 2a uses serializer format 10
+ self.assertEquals(response.args, ("ok", "10"))
+
+ contents = "".join(response.body_stream)
+ self.assertEquals(contents, "")
+
+
+class GetStreamTestBase(tests.TestCaseWithMemoryTransport):
+
+ def make_two_commit_repo(self):
+ tree = self.make_branch_and_memory_tree('.')
+ tree.lock_write()
+ tree.add('')
+ r1 = tree.commit('1st commit')
+ r2 = tree.commit('2nd commit', rev_id=u'\xc8'.encode('utf-8'))
+ tree.unlock()
+ repo = tree.branch.repository
+ return repo, r1, r2
+
+
+class TestSmartServerRepositoryGetStream(GetStreamTestBase):
+
+ def test_ancestry_of(self):
+ """The search argument may be a 'ancestry-of' some heads'."""
+ backing = self.get_transport()
+ request = smart_repo.SmartServerRepositoryGetStream(backing)
+ repo, r1, r2 = self.make_two_commit_repo()
+ fetch_spec = ['ancestry-of', r2]
+ lines = '\n'.join(fetch_spec)
+ request.execute('', repo._format.network_name())
+ response = request.do_body(lines)
+ self.assertEqual(('ok',), response.args)
+ stream_bytes = ''.join(response.body_stream)
+ self.assertStartsWith(stream_bytes, 'Bazaar pack format 1')
+
+ def test_search(self):
+ """The search argument may be a 'search' of some explicit keys."""
+ backing = self.get_transport()
+ request = smart_repo.SmartServerRepositoryGetStream(backing)
+ repo, r1, r2 = self.make_two_commit_repo()
+ fetch_spec = ['search', '%s %s' % (r1, r2), 'null:', '2']
+ lines = '\n'.join(fetch_spec)
+ request.execute('', repo._format.network_name())
+ response = request.do_body(lines)
+ self.assertEqual(('ok',), response.args)
+ stream_bytes = ''.join(response.body_stream)
+ self.assertStartsWith(stream_bytes, 'Bazaar pack format 1')
+
+ def test_search_everything(self):
+ """A search of 'everything' returns a stream."""
+ backing = self.get_transport()
+ request = smart_repo.SmartServerRepositoryGetStream_1_19(backing)
+ repo, r1, r2 = self.make_two_commit_repo()
+ serialised_fetch_spec = 'everything'
+ request.execute('', repo._format.network_name())
+ response = request.do_body(serialised_fetch_spec)
+ self.assertEqual(('ok',), response.args)
+ stream_bytes = ''.join(response.body_stream)
+ self.assertStartsWith(stream_bytes, 'Bazaar pack format 1')
+
+
+class TestSmartServerRequestHasRevision(tests.TestCaseWithMemoryTransport):
+
+ def test_missing_revision(self):
+ """For a missing revision, ('no', ) is returned."""
+ backing = self.get_transport()
+ request = smart_repo.SmartServerRequestHasRevision(backing)
+ self.make_repository('.')
+ self.assertEqual(smart_req.SmartServerResponse(('no', )),
+ request.execute('', 'revid'))
+
+ def test_present_revision(self):
+ """For a present revision, ('yes', ) is returned."""
+ backing = self.get_transport()
+ request = smart_repo.SmartServerRequestHasRevision(backing)
+ tree = self.make_branch_and_memory_tree('.')
+ tree.lock_write()
+ tree.add('')
+ rev_id_utf8 = u'\xc8abc'.encode('utf-8')
+ r1 = tree.commit('a commit', rev_id=rev_id_utf8)
+ tree.unlock()
+ self.assertTrue(tree.branch.repository.has_revision(rev_id_utf8))
+ self.assertEqual(smart_req.SmartServerResponse(('yes', )),
+ request.execute('', rev_id_utf8))
+
+
+class TestSmartServerRepositoryIterFilesBytes(tests.TestCaseWithTransport):
+
+ def test_single(self):
+ backing = self.get_transport()
+ request = smart_repo.SmartServerRepositoryIterFilesBytes(backing)
+ t = self.make_branch_and_tree('.')
+ self.addCleanup(t.lock_write().unlock)
+ self.build_tree_contents([("file", "somecontents")])
+ t.add(["file"], ["thefileid"])
+ t.commit(rev_id='somerev', message="add file")
+ self.assertIs(None, request.execute(''))
+ response = request.do_body("thefileid\0somerev\n")
+ self.assertTrue(response.is_successful())
+ self.assertEquals(response.args, ("ok", ))
+ self.assertEquals("".join(response.body_stream),
+ "ok\x000\n" + zlib.compress("somecontents"))
+
+ def test_missing(self):
+ backing = self.get_transport()
+ request = smart_repo.SmartServerRepositoryIterFilesBytes(backing)
+ t = self.make_branch_and_tree('.')
+ self.addCleanup(t.lock_write().unlock)
+ self.assertIs(None, request.execute(''))
+ response = request.do_body("thefileid\0revision\n")
+ self.assertTrue(response.is_successful())
+ self.assertEquals(response.args, ("ok", ))
+ self.assertEquals("".join(response.body_stream),
+ "absent\x00thefileid\x00revision\x000\n")
+
+
+class TestSmartServerRequestHasSignatureForRevisionId(
+ tests.TestCaseWithMemoryTransport):
+
+ def test_missing_revision(self):
+ """For a missing revision, NoSuchRevision is returned."""
+ backing = self.get_transport()
+ request = smart_repo.SmartServerRequestHasSignatureForRevisionId(
+ backing)
+ self.make_repository('.')
+ self.assertEqual(
+ smart_req.FailedSmartServerResponse(
+ ('nosuchrevision', 'revid'), None),
+ request.execute('', 'revid'))
+
+ def test_missing_signature(self):
+ """For a missing signature, ('no', ) is returned."""
+ backing = self.get_transport()
+ request = smart_repo.SmartServerRequestHasSignatureForRevisionId(
+ backing)
+ tree = self.make_branch_and_memory_tree('.')
+ tree.lock_write()
+ tree.add('')
+ r1 = tree.commit('a commit', rev_id='A')
+ tree.unlock()
+ self.assertTrue(tree.branch.repository.has_revision('A'))
+ self.assertEqual(smart_req.SmartServerResponse(('no', )),
+ request.execute('', 'A'))
+
+ def test_present_signature(self):
+ """For a present signature, ('yes', ) is returned."""
+ backing = self.get_transport()
+ request = smart_repo.SmartServerRequestHasSignatureForRevisionId(
+ backing)
+ strategy = gpg.LoopbackGPGStrategy(None)
+ tree = self.make_branch_and_memory_tree('.')
+ tree.lock_write()
+ tree.add('')
+ r1 = tree.commit('a commit', rev_id='A')
+ tree.branch.repository.start_write_group()
+ tree.branch.repository.sign_revision('A', strategy)
+ tree.branch.repository.commit_write_group()
+ tree.unlock()
+ self.assertTrue(tree.branch.repository.has_revision('A'))
+ self.assertEqual(smart_req.SmartServerResponse(('yes', )),
+ request.execute('', 'A'))
+
+
+class TestSmartServerRepositoryGatherStats(tests.TestCaseWithMemoryTransport):
+
+ def test_empty_revid(self):
+ """With an empty revid, we get only size an number and revisions"""
+ backing = self.get_transport()
+ request = smart_repo.SmartServerRepositoryGatherStats(backing)
+ repository = self.make_repository('.')
+ stats = repository.gather_stats()
+ expected_body = 'revisions: 0\n'
+ self.assertEqual(smart_req.SmartServerResponse(('ok', ), expected_body),
+ request.execute('', '', 'no'))
+
+ def test_revid_with_committers(self):
+ """For a revid we get more infos."""
+ backing = self.get_transport()
+ rev_id_utf8 = u'\xc8abc'.encode('utf-8')
+ request = smart_repo.SmartServerRepositoryGatherStats(backing)
+ tree = self.make_branch_and_memory_tree('.')
+ tree.lock_write()
+ tree.add('')
+ # Let's build a predictable result
+ tree.commit('a commit', timestamp=123456.2, timezone=3600)
+ tree.commit('a commit', timestamp=654321.4, timezone=0,
+ rev_id=rev_id_utf8)
+ tree.unlock()
+
+ stats = tree.branch.repository.gather_stats()
+ expected_body = ('firstrev: 123456.200 3600\n'
+ 'latestrev: 654321.400 0\n'
+ 'revisions: 2\n')
+ self.assertEqual(smart_req.SmartServerResponse(('ok', ), expected_body),
+ request.execute('',
+ rev_id_utf8, 'no'))
+
+ def test_not_empty_repository_with_committers(self):
+ """For a revid and requesting committers we get the whole thing."""
+ backing = self.get_transport()
+ rev_id_utf8 = u'\xc8abc'.encode('utf-8')
+ request = smart_repo.SmartServerRepositoryGatherStats(backing)
+ tree = self.make_branch_and_memory_tree('.')
+ tree.lock_write()
+ tree.add('')
+ # Let's build a predictable result
+ tree.commit('a commit', timestamp=123456.2, timezone=3600,
+ committer='foo')
+ tree.commit('a commit', timestamp=654321.4, timezone=0,
+ committer='bar', rev_id=rev_id_utf8)
+ tree.unlock()
+ stats = tree.branch.repository.gather_stats()
+
+ expected_body = ('committers: 2\n'
+ 'firstrev: 123456.200 3600\n'
+ 'latestrev: 654321.400 0\n'
+ 'revisions: 2\n')
+ self.assertEqual(smart_req.SmartServerResponse(('ok', ), expected_body),
+ request.execute('',
+ rev_id_utf8, 'yes'))
+
+ def test_unknown_revid(self):
+ """An unknown revision id causes a 'nosuchrevision' error."""
+ backing = self.get_transport()
+ request = smart_repo.SmartServerRepositoryGatherStats(backing)
+ repository = self.make_repository('.')
+ expected_body = 'revisions: 0\n'
+ self.assertEqual(
+ smart_req.FailedSmartServerResponse(
+ ('nosuchrevision', 'mia'), None),
+ request.execute('', 'mia', 'yes'))
+
+
+class TestSmartServerRepositoryIsShared(tests.TestCaseWithMemoryTransport):
+
+ def test_is_shared(self):
+ """For a shared repository, ('yes', ) is returned."""
+ backing = self.get_transport()
+ request = smart_repo.SmartServerRepositoryIsShared(backing)
+ self.make_repository('.', shared=True)
+ self.assertEqual(smart_req.SmartServerResponse(('yes', )),
+ request.execute('', ))
+
+ def test_is_not_shared(self):
+ """For a shared repository, ('no', ) is returned."""
+ backing = self.get_transport()
+ request = smart_repo.SmartServerRepositoryIsShared(backing)
+ self.make_repository('.', shared=False)
+ self.assertEqual(smart_req.SmartServerResponse(('no', )),
+ request.execute('', ))
+
+
+class TestSmartServerRepositoryGetRevisionSignatureText(
+ tests.TestCaseWithMemoryTransport):
+
+ def test_get_signature(self):
+ backing = self.get_transport()
+ request = smart_repo.SmartServerRepositoryGetRevisionSignatureText(
+ backing)
+ bb = self.make_branch_builder('.')
+ bb.build_commit(rev_id='A')
+ repo = bb.get_branch().repository
+ strategy = gpg.LoopbackGPGStrategy(None)
+ self.addCleanup(repo.lock_write().unlock)
+ repo.start_write_group()
+ repo.sign_revision('A', strategy)
+ repo.commit_write_group()
+ expected_body = (
+ '-----BEGIN PSEUDO-SIGNED CONTENT-----\n' +
+ Testament.from_revision(repo, 'A').as_short_text() +
+ '-----END PSEUDO-SIGNED CONTENT-----\n')
+ self.assertEqual(
+ smart_req.SmartServerResponse(('ok', ), expected_body),
+ request.execute('', 'A'))
+
+
+class TestSmartServerRepositoryMakeWorkingTrees(
+ tests.TestCaseWithMemoryTransport):
+
+ def test_make_working_trees(self):
+ """For a repository with working trees, ('yes', ) is returned."""
+ backing = self.get_transport()
+ request = smart_repo.SmartServerRepositoryMakeWorkingTrees(backing)
+ r = self.make_repository('.')
+ r.set_make_working_trees(True)
+ self.assertEqual(smart_req.SmartServerResponse(('yes', )),
+ request.execute('', ))
+
+ def test_is_not_shared(self):
+ """For a repository with working trees, ('no', ) is returned."""
+ backing = self.get_transport()
+ request = smart_repo.SmartServerRepositoryMakeWorkingTrees(backing)
+ r = self.make_repository('.')
+ r.set_make_working_trees(False)
+ self.assertEqual(smart_req.SmartServerResponse(('no', )),
+ request.execute('', ))
+
+
+class TestSmartServerRepositoryLockWrite(tests.TestCaseWithMemoryTransport):
+
+ def test_lock_write_on_unlocked_repo(self):
+ backing = self.get_transport()
+ request = smart_repo.SmartServerRepositoryLockWrite(backing)
+ repository = self.make_repository('.', format='knit')
+ response = request.execute('')
+ nonce = repository.control_files._lock.peek().get('nonce')
+ self.assertEqual(smart_req.SmartServerResponse(('ok', nonce)), response)
+ # The repository is now locked. Verify that with a new repository
+ # object.
+ new_repo = repository.bzrdir.open_repository()
+ self.assertRaises(errors.LockContention, new_repo.lock_write)
+ # Cleanup
+ request = smart_repo.SmartServerRepositoryUnlock(backing)
+ response = request.execute('', nonce)
+
+ def test_lock_write_on_locked_repo(self):
+ backing = self.get_transport()
+ request = smart_repo.SmartServerRepositoryLockWrite(backing)
+ repository = self.make_repository('.', format='knit')
+ repo_token = repository.lock_write().repository_token
+ repository.leave_lock_in_place()
+ repository.unlock()
+ response = request.execute('')
+ self.assertEqual(
+ smart_req.SmartServerResponse(('LockContention',)), response)
+ # Cleanup
+ repository.lock_write(repo_token)
+ repository.dont_leave_lock_in_place()
+ repository.unlock()
+
+ def test_lock_write_on_readonly_transport(self):
+ backing = self.get_readonly_transport()
+ request = smart_repo.SmartServerRepositoryLockWrite(backing)
+ repository = self.make_repository('.', format='knit')
+ response = request.execute('')
+ self.assertFalse(response.is_successful())
+ self.assertEqual('LockFailed', response.args[0])
+
+
+class TestInsertStreamBase(tests.TestCaseWithMemoryTransport):
+
+ def make_empty_byte_stream(self, repo):
+ byte_stream = smart_repo._stream_to_byte_stream([], repo._format)
+ return ''.join(byte_stream)
+
+
+class TestSmartServerRepositoryInsertStream(TestInsertStreamBase):
+
+ def test_insert_stream_empty(self):
+ backing = self.get_transport()
+ request = smart_repo.SmartServerRepositoryInsertStream(backing)
+ repository = self.make_repository('.')
+ response = request.execute('', '')
+ self.assertEqual(None, response)
+ response = request.do_chunk(self.make_empty_byte_stream(repository))
+ self.assertEqual(None, response)
+ response = request.do_end()
+ self.assertEqual(smart_req.SmartServerResponse(('ok', )), response)
+
+
+class TestSmartServerRepositoryInsertStreamLocked(TestInsertStreamBase):
+
+ def test_insert_stream_empty(self):
+ backing = self.get_transport()
+ request = smart_repo.SmartServerRepositoryInsertStreamLocked(
+ backing)
+ repository = self.make_repository('.', format='knit')
+ lock_token = repository.lock_write().repository_token
+ response = request.execute('', '', lock_token)
+ self.assertEqual(None, response)
+ response = request.do_chunk(self.make_empty_byte_stream(repository))
+ self.assertEqual(None, response)
+ response = request.do_end()
+ self.assertEqual(smart_req.SmartServerResponse(('ok', )), response)
+ repository.unlock()
+
+ def test_insert_stream_with_wrong_lock_token(self):
+ backing = self.get_transport()
+ request = smart_repo.SmartServerRepositoryInsertStreamLocked(
+ backing)
+ repository = self.make_repository('.', format='knit')
+ lock_token = repository.lock_write().repository_token
+ self.assertRaises(
+ errors.TokenMismatch, request.execute, '', '', 'wrong-token')
+ repository.unlock()
+
+
+class TestSmartServerRepositoryUnlock(tests.TestCaseWithMemoryTransport):
+
+ def setUp(self):
+ tests.TestCaseWithMemoryTransport.setUp(self)
+
+ def test_unlock_on_locked_repo(self):
+ backing = self.get_transport()
+ request = smart_repo.SmartServerRepositoryUnlock(backing)
+ repository = self.make_repository('.', format='knit')
+ token = repository.lock_write().repository_token
+ repository.leave_lock_in_place()
+ repository.unlock()
+ response = request.execute('', token)
+ self.assertEqual(
+ smart_req.SmartServerResponse(('ok',)), response)
+ # The repository is now unlocked. Verify that with a new repository
+ # object.
+ new_repo = repository.bzrdir.open_repository()
+ new_repo.lock_write()
+ new_repo.unlock()
+
+ def test_unlock_on_unlocked_repo(self):
+ backing = self.get_transport()
+ request = smart_repo.SmartServerRepositoryUnlock(backing)
+ repository = self.make_repository('.', format='knit')
+ response = request.execute('', 'some token')
+ self.assertEqual(
+ smart_req.SmartServerResponse(('TokenMismatch',)), response)
+
+
+class TestSmartServerRepositoryGetPhysicalLockStatus(
+ tests.TestCaseWithTransport):
+
+ def test_with_write_lock(self):
+ backing = self.get_transport()
+ repo = self.make_repository('.')
+ self.addCleanup(repo.lock_write().unlock)
+ # lock_write() doesn't necessarily actually take a physical
+ # lock out.
+ if repo.get_physical_lock_status():
+ expected = 'yes'
+ else:
+ expected = 'no'
+ request_class = smart_repo.SmartServerRepositoryGetPhysicalLockStatus
+ request = request_class(backing)
+ self.assertEqual(smart_req.SuccessfulSmartServerResponse((expected,)),
+ request.execute('', ))
+
+ def test_without_write_lock(self):
+ backing = self.get_transport()
+ repo = self.make_repository('.')
+ self.assertEquals(False, repo.get_physical_lock_status())
+ request_class = smart_repo.SmartServerRepositoryGetPhysicalLockStatus
+ request = request_class(backing)
+ self.assertEqual(smart_req.SuccessfulSmartServerResponse(('no',)),
+ request.execute('', ))
+
+
+class TestSmartServerRepositoryReconcile(tests.TestCaseWithTransport):
+
+ def test_reconcile(self):
+ backing = self.get_transport()
+ repo = self.make_repository('.')
+ token = repo.lock_write().repository_token
+ self.addCleanup(repo.unlock)
+ request_class = smart_repo.SmartServerRepositoryReconcile
+ request = request_class(backing)
+ self.assertEqual(smart_req.SuccessfulSmartServerResponse(
+ ('ok', ),
+ 'garbage_inventories: 0\n'
+ 'inconsistent_parents: 0\n'),
+ request.execute('', token))
+
+
+class TestSmartServerIsReadonly(tests.TestCaseWithMemoryTransport):
+
+ def test_is_readonly_no(self):
+ backing = self.get_transport()
+ request = smart_req.SmartServerIsReadonly(backing)
+ response = request.execute()
+ self.assertEqual(
+ smart_req.SmartServerResponse(('no',)), response)
+
+ def test_is_readonly_yes(self):
+ backing = self.get_readonly_transport()
+ request = smart_req.SmartServerIsReadonly(backing)
+ response = request.execute()
+ self.assertEqual(
+ smart_req.SmartServerResponse(('yes',)), response)
+
+
+class TestSmartServerRepositorySetMakeWorkingTrees(
+ tests.TestCaseWithMemoryTransport):
+
+ def test_set_false(self):
+ backing = self.get_transport()
+ repo = self.make_repository('.', shared=True)
+ repo.set_make_working_trees(True)
+ request_class = smart_repo.SmartServerRepositorySetMakeWorkingTrees
+ request = request_class(backing)
+ self.assertEqual(smart_req.SuccessfulSmartServerResponse(('ok',)),
+ request.execute('', 'False'))
+ repo = repo.bzrdir.open_repository()
+ self.assertFalse(repo.make_working_trees())
+
+ def test_set_true(self):
+ backing = self.get_transport()
+ repo = self.make_repository('.', shared=True)
+ repo.set_make_working_trees(False)
+ request_class = smart_repo.SmartServerRepositorySetMakeWorkingTrees
+ request = request_class(backing)
+ self.assertEqual(smart_req.SuccessfulSmartServerResponse(('ok',)),
+ request.execute('', 'True'))
+ repo = repo.bzrdir.open_repository()
+ self.assertTrue(repo.make_working_trees())
+
+
+class TestSmartServerRepositoryGetSerializerFormat(
+ tests.TestCaseWithMemoryTransport):
+
+ def test_get_serializer_format(self):
+ backing = self.get_transport()
+ repo = self.make_repository('.', format='2a')
+ request_class = smart_repo.SmartServerRepositoryGetSerializerFormat
+ request = request_class(backing)
+ self.assertEqual(
+ smart_req.SuccessfulSmartServerResponse(('ok', '10')),
+ request.execute(''))
+
+
+class TestSmartServerRepositoryWriteGroup(
+ tests.TestCaseWithMemoryTransport):
+
+ def test_start_write_group(self):
+ backing = self.get_transport()
+ repo = self.make_repository('.')
+ lock_token = repo.lock_write().repository_token
+ self.addCleanup(repo.unlock)
+ request_class = smart_repo.SmartServerRepositoryStartWriteGroup
+ request = request_class(backing)
+ self.assertEqual(smart_req.SuccessfulSmartServerResponse(('ok', [])),
+ request.execute('', lock_token))
+
+ def test_start_write_group_unsuspendable(self):
+ backing = self.get_transport()
+ repo = self.make_repository('.', format='knit')
+ lock_token = repo.lock_write().repository_token
+ self.addCleanup(repo.unlock)
+ request_class = smart_repo.SmartServerRepositoryStartWriteGroup
+ request = request_class(backing)
+ self.assertEqual(
+ smart_req.FailedSmartServerResponse(('UnsuspendableWriteGroup',)),
+ request.execute('', lock_token))
+
+ def test_commit_write_group(self):
+ backing = self.get_transport()
+ repo = self.make_repository('.')
+ lock_token = repo.lock_write().repository_token
+ self.addCleanup(repo.unlock)
+ repo.start_write_group()
+ tokens = repo.suspend_write_group()
+ request_class = smart_repo.SmartServerRepositoryCommitWriteGroup
+ request = request_class(backing)
+ self.assertEqual(smart_req.SuccessfulSmartServerResponse(('ok',)),
+ request.execute('', lock_token, tokens))
+
+ def test_abort_write_group(self):
+ backing = self.get_transport()
+ repo = self.make_repository('.')
+ lock_token = repo.lock_write().repository_token
+ repo.start_write_group()
+ tokens = repo.suspend_write_group()
+ self.addCleanup(repo.unlock)
+ request_class = smart_repo.SmartServerRepositoryAbortWriteGroup
+ request = request_class(backing)
+ self.assertEqual(smart_req.SuccessfulSmartServerResponse(('ok',)),
+ request.execute('', lock_token, tokens))
+
+ def test_check_write_group(self):
+ backing = self.get_transport()
+ repo = self.make_repository('.')
+ lock_token = repo.lock_write().repository_token
+ repo.start_write_group()
+ tokens = repo.suspend_write_group()
+ self.addCleanup(repo.unlock)
+ request_class = smart_repo.SmartServerRepositoryCheckWriteGroup
+ request = request_class(backing)
+ self.assertEqual(smart_req.SuccessfulSmartServerResponse(('ok',)),
+ request.execute('', lock_token, tokens))
+
+ def test_check_write_group_invalid(self):
+ backing = self.get_transport()
+ repo = self.make_repository('.')
+ lock_token = repo.lock_write().repository_token
+ self.addCleanup(repo.unlock)
+ request_class = smart_repo.SmartServerRepositoryCheckWriteGroup
+ request = request_class(backing)
+ self.assertEqual(smart_req.FailedSmartServerResponse(
+ ('UnresumableWriteGroup', ['random'],
+ 'Malformed write group token')),
+ request.execute('', lock_token, ["random"]))
+
+
+class TestSmartServerPackRepositoryAutopack(tests.TestCaseWithTransport):
+
+ def make_repo_needing_autopacking(self, path='.'):
+ # Make a repo in need of autopacking.
+ tree = self.make_branch_and_tree('.', format='pack-0.92')
+ repo = tree.branch.repository
+ # monkey-patch the pack collection to disable autopacking
+ repo._pack_collection._max_pack_count = lambda count: count
+ for x in range(10):
+ tree.commit('commit %s' % x)
+ self.assertEqual(10, len(repo._pack_collection.names()))
+ del repo._pack_collection._max_pack_count
+ return repo
+
+ def test_autopack_needed(self):
+ repo = self.make_repo_needing_autopacking()
+ repo.lock_write()
+ self.addCleanup(repo.unlock)
+ backing = self.get_transport()
+ request = smart_packrepo.SmartServerPackRepositoryAutopack(
+ backing)
+ response = request.execute('')
+ self.assertEqual(smart_req.SmartServerResponse(('ok',)), response)
+ repo._pack_collection.reload_pack_names()
+ self.assertEqual(1, len(repo._pack_collection.names()))
+
+ def test_autopack_not_needed(self):
+ tree = self.make_branch_and_tree('.', format='pack-0.92')
+ repo = tree.branch.repository
+ repo.lock_write()
+ self.addCleanup(repo.unlock)
+ for x in range(9):
+ tree.commit('commit %s' % x)
+ backing = self.get_transport()
+ request = smart_packrepo.SmartServerPackRepositoryAutopack(
+ backing)
+ response = request.execute('')
+ self.assertEqual(smart_req.SmartServerResponse(('ok',)), response)
+ repo._pack_collection.reload_pack_names()
+ self.assertEqual(9, len(repo._pack_collection.names()))
+
+ def test_autopack_on_nonpack_format(self):
+ """A request to autopack a non-pack repo is a no-op."""
+ repo = self.make_repository('.', format='knit')
+ backing = self.get_transport()
+ request = smart_packrepo.SmartServerPackRepositoryAutopack(
+ backing)
+ response = request.execute('')
+ self.assertEqual(smart_req.SmartServerResponse(('ok',)), response)
+
+
+class TestSmartServerVfsGet(tests.TestCaseWithMemoryTransport):
+
+ def test_unicode_path(self):
+ """VFS requests expect unicode paths to be escaped."""
+ filename = u'foo\N{INTERROBANG}'
+ filename_escaped = urlutils.escape(filename)
+ backing = self.get_transport()
+ request = vfs.GetRequest(backing)
+ backing.put_bytes_non_atomic(filename_escaped, 'contents')
+ self.assertEqual(smart_req.SmartServerResponse(('ok', ), 'contents'),
+ request.execute(filename_escaped))
+
+
+class TestHandlers(tests.TestCase):
+ """Tests for the request.request_handlers object."""
+
+ def test_all_registrations_exist(self):
+ """All registered request_handlers can be found."""
+ # If there's a typo in a register_lazy call, this loop will fail with
+ # an AttributeError.
+ for key in smart_req.request_handlers.keys():
+ try:
+ item = smart_req.request_handlers.get(key)
+ except AttributeError, e:
+ raise AttributeError('failed to get %s: %s' % (key, e))
+
+ def assertHandlerEqual(self, verb, handler):
+ self.assertEqual(smart_req.request_handlers.get(verb), handler)
+
+ def test_registered_methods(self):
+ """Test that known methods are registered to the correct object."""
+ self.assertHandlerEqual('Branch.break_lock',
+ smart_branch.SmartServerBranchBreakLock)
+ self.assertHandlerEqual('Branch.get_config_file',
+ smart_branch.SmartServerBranchGetConfigFile)
+ self.assertHandlerEqual('Branch.put_config_file',
+ smart_branch.SmartServerBranchPutConfigFile)
+ self.assertHandlerEqual('Branch.get_parent',
+ smart_branch.SmartServerBranchGetParent)
+ self.assertHandlerEqual('Branch.get_physical_lock_status',
+ smart_branch.SmartServerBranchRequestGetPhysicalLockStatus)
+ self.assertHandlerEqual('Branch.get_tags_bytes',
+ smart_branch.SmartServerBranchGetTagsBytes)
+ self.assertHandlerEqual('Branch.lock_write',
+ smart_branch.SmartServerBranchRequestLockWrite)
+ self.assertHandlerEqual('Branch.last_revision_info',
+ smart_branch.SmartServerBranchRequestLastRevisionInfo)
+ self.assertHandlerEqual('Branch.revision_history',
+ smart_branch.SmartServerRequestRevisionHistory)
+ self.assertHandlerEqual('Branch.revision_id_to_revno',
+ smart_branch.SmartServerBranchRequestRevisionIdToRevno)
+ self.assertHandlerEqual('Branch.set_config_option',
+ smart_branch.SmartServerBranchRequestSetConfigOption)
+ self.assertHandlerEqual('Branch.set_last_revision',
+ smart_branch.SmartServerBranchRequestSetLastRevision)
+ self.assertHandlerEqual('Branch.set_last_revision_info',
+ smart_branch.SmartServerBranchRequestSetLastRevisionInfo)
+ self.assertHandlerEqual('Branch.set_last_revision_ex',
+ smart_branch.SmartServerBranchRequestSetLastRevisionEx)
+ self.assertHandlerEqual('Branch.set_parent_location',
+ smart_branch.SmartServerBranchRequestSetParentLocation)
+ self.assertHandlerEqual('Branch.unlock',
+ smart_branch.SmartServerBranchRequestUnlock)
+ self.assertHandlerEqual('BzrDir.destroy_branch',
+ smart_dir.SmartServerBzrDirRequestDestroyBranch)
+ self.assertHandlerEqual('BzrDir.find_repository',
+ smart_dir.SmartServerRequestFindRepositoryV1)
+ self.assertHandlerEqual('BzrDir.find_repositoryV2',
+ smart_dir.SmartServerRequestFindRepositoryV2)
+ self.assertHandlerEqual('BzrDirFormat.initialize',
+ smart_dir.SmartServerRequestInitializeBzrDir)
+ self.assertHandlerEqual('BzrDirFormat.initialize_ex_1.16',
+ smart_dir.SmartServerRequestBzrDirInitializeEx)
+ self.assertHandlerEqual('BzrDir.checkout_metadir',
+ smart_dir.SmartServerBzrDirRequestCheckoutMetaDir)
+ self.assertHandlerEqual('BzrDir.cloning_metadir',
+ smart_dir.SmartServerBzrDirRequestCloningMetaDir)
+ self.assertHandlerEqual('BzrDir.get_branches',
+ smart_dir.SmartServerBzrDirRequestGetBranches)
+ self.assertHandlerEqual('BzrDir.get_config_file',
+ smart_dir.SmartServerBzrDirRequestConfigFile)
+ self.assertHandlerEqual('BzrDir.open_branch',
+ smart_dir.SmartServerRequestOpenBranch)
+ self.assertHandlerEqual('BzrDir.open_branchV2',
+ smart_dir.SmartServerRequestOpenBranchV2)
+ self.assertHandlerEqual('BzrDir.open_branchV3',
+ smart_dir.SmartServerRequestOpenBranchV3)
+ self.assertHandlerEqual('PackRepository.autopack',
+ smart_packrepo.SmartServerPackRepositoryAutopack)
+ self.assertHandlerEqual('Repository.add_signature_text',
+ smart_repo.SmartServerRepositoryAddSignatureText)
+ self.assertHandlerEqual('Repository.all_revision_ids',
+ smart_repo.SmartServerRepositoryAllRevisionIds)
+ self.assertHandlerEqual('Repository.break_lock',
+ smart_repo.SmartServerRepositoryBreakLock)
+ self.assertHandlerEqual('Repository.gather_stats',
+ smart_repo.SmartServerRepositoryGatherStats)
+ self.assertHandlerEqual('Repository.get_parent_map',
+ smart_repo.SmartServerRepositoryGetParentMap)
+ self.assertHandlerEqual('Repository.get_physical_lock_status',
+ smart_repo.SmartServerRepositoryGetPhysicalLockStatus)
+ self.assertHandlerEqual('Repository.get_rev_id_for_revno',
+ smart_repo.SmartServerRepositoryGetRevIdForRevno)
+ self.assertHandlerEqual('Repository.get_revision_graph',
+ smart_repo.SmartServerRepositoryGetRevisionGraph)
+ self.assertHandlerEqual('Repository.get_revision_signature_text',
+ smart_repo.SmartServerRepositoryGetRevisionSignatureText)
+ self.assertHandlerEqual('Repository.get_stream',
+ smart_repo.SmartServerRepositoryGetStream)
+ self.assertHandlerEqual('Repository.get_stream_1.19',
+ smart_repo.SmartServerRepositoryGetStream_1_19)
+ self.assertHandlerEqual('Repository.iter_revisions',
+ smart_repo.SmartServerRepositoryIterRevisions)
+ self.assertHandlerEqual('Repository.has_revision',
+ smart_repo.SmartServerRequestHasRevision)
+ self.assertHandlerEqual('Repository.insert_stream',
+ smart_repo.SmartServerRepositoryInsertStream)
+ self.assertHandlerEqual('Repository.insert_stream_locked',
+ smart_repo.SmartServerRepositoryInsertStreamLocked)
+ self.assertHandlerEqual('Repository.is_shared',
+ smart_repo.SmartServerRepositoryIsShared)
+ self.assertHandlerEqual('Repository.iter_files_bytes',
+ smart_repo.SmartServerRepositoryIterFilesBytes)
+ self.assertHandlerEqual('Repository.lock_write',
+ smart_repo.SmartServerRepositoryLockWrite)
+ self.assertHandlerEqual('Repository.make_working_trees',
+ smart_repo.SmartServerRepositoryMakeWorkingTrees)
+ self.assertHandlerEqual('Repository.pack',
+ smart_repo.SmartServerRepositoryPack)
+ self.assertHandlerEqual('Repository.reconcile',
+ smart_repo.SmartServerRepositoryReconcile)
+ self.assertHandlerEqual('Repository.tarball',
+ smart_repo.SmartServerRepositoryTarball)
+ self.assertHandlerEqual('Repository.unlock',
+ smart_repo.SmartServerRepositoryUnlock)
+ self.assertHandlerEqual('Repository.start_write_group',
+ smart_repo.SmartServerRepositoryStartWriteGroup)
+ self.assertHandlerEqual('Repository.check_write_group',
+ smart_repo.SmartServerRepositoryCheckWriteGroup)
+ self.assertHandlerEqual('Repository.commit_write_group',
+ smart_repo.SmartServerRepositoryCommitWriteGroup)
+ self.assertHandlerEqual('Repository.abort_write_group',
+ smart_repo.SmartServerRepositoryAbortWriteGroup)
+ self.assertHandlerEqual('VersionedFileRepository.get_serializer_format',
+ smart_repo.SmartServerRepositoryGetSerializerFormat)
+ self.assertHandlerEqual('VersionedFileRepository.get_inventories',
+ smart_repo.SmartServerRepositoryGetInventories)
+ self.assertHandlerEqual('Transport.is_readonly',
+ smart_req.SmartServerIsReadonly)
+
+
+class SmartTCPServerHookTests(tests.TestCaseWithMemoryTransport):
+ """Tests for SmartTCPServer hooks."""
+
+ def setUp(self):
+ super(SmartTCPServerHookTests, self).setUp()
+ self.server = server.SmartTCPServer(self.get_transport())
+
+ def test_run_server_started_hooks(self):
+ """Test the server started hooks get fired properly."""
+ started_calls = []
+ server.SmartTCPServer.hooks.install_named_hook('server_started',
+ lambda backing_urls, url: started_calls.append((backing_urls, url)),
+ None)
+ started_ex_calls = []
+ server.SmartTCPServer.hooks.install_named_hook('server_started_ex',
+ lambda backing_urls, url: started_ex_calls.append((backing_urls, url)),
+ None)
+ self.server._sockname = ('example.com', 42)
+ self.server.run_server_started_hooks()
+ self.assertEquals(started_calls,
+ [([self.get_transport().base], 'bzr://example.com:42/')])
+ self.assertEquals(started_ex_calls,
+ [([self.get_transport().base], self.server)])
+
+ def test_run_server_started_hooks_ipv6(self):
+ """Test that socknames can contain 4-tuples."""
+ self.server._sockname = ('::', 42, 0, 0)
+ started_calls = []
+ server.SmartTCPServer.hooks.install_named_hook('server_started',
+ lambda backing_urls, url: started_calls.append((backing_urls, url)),
+ None)
+ self.server.run_server_started_hooks()
+ self.assertEquals(started_calls,
+ [([self.get_transport().base], 'bzr://:::42/')])
+
+ def test_run_server_stopped_hooks(self):
+ """Test the server stopped hooks."""
+ self.server._sockname = ('example.com', 42)
+ stopped_calls = []
+ server.SmartTCPServer.hooks.install_named_hook('server_stopped',
+ lambda backing_urls, url: stopped_calls.append((backing_urls, url)),
+ None)
+ self.server.run_server_stopped_hooks()
+ self.assertEquals(stopped_calls,
+ [([self.get_transport().base], 'bzr://example.com:42/')])
+
+
+class TestSmartServerRepositoryPack(tests.TestCaseWithMemoryTransport):
+
+ def test_pack(self):
+ backing = self.get_transport()
+ request = smart_repo.SmartServerRepositoryPack(backing)
+ tree = self.make_branch_and_memory_tree('.')
+ repo_token = tree.branch.repository.lock_write().repository_token
+
+ self.assertIs(None, request.execute('', repo_token, False))
+
+ self.assertEqual(
+ smart_req.SuccessfulSmartServerResponse(('ok', ), ),
+ request.do_body(''))
+
+
+class TestSmartServerRepositoryGetInventories(tests.TestCaseWithTransport):
+
+ def _get_serialized_inventory_delta(self, repository, base_revid, revid):
+ base_inv = repository.revision_tree(base_revid).root_inventory
+ inv = repository.revision_tree(revid).root_inventory
+ inv_delta = inv._make_delta(base_inv)
+ serializer = inventory_delta.InventoryDeltaSerializer(True, False)
+ return "".join(serializer.delta_to_lines(base_revid, revid, inv_delta))
+
+ def test_single(self):
+ backing = self.get_transport()
+ request = smart_repo.SmartServerRepositoryGetInventories(backing)
+ t = self.make_branch_and_tree('.', format='2a')
+ self.addCleanup(t.lock_write().unlock)
+ self.build_tree_contents([("file", "somecontents")])
+ t.add(["file"], ["thefileid"])
+ t.commit(rev_id='somerev', message="add file")
+ self.assertIs(None, request.execute('', 'unordered'))
+ response = request.do_body("somerev\n")
+ self.assertTrue(response.is_successful())
+ self.assertEquals(response.args, ("ok", ))
+ stream = [('inventory-deltas', [
+ versionedfile.FulltextContentFactory('somerev', None, None,
+ self._get_serialized_inventory_delta(
+ t.branch.repository, 'null:', 'somerev'))])]
+ fmt = controldir.format_registry.get('2a')().repository_format
+ self.assertEquals(
+ "".join(response.body_stream),
+ "".join(smart_repo._stream_to_byte_stream(stream, fmt)))
+
+ def test_empty(self):
+ backing = self.get_transport()
+ request = smart_repo.SmartServerRepositoryGetInventories(backing)
+ t = self.make_branch_and_tree('.', format='2a')
+ self.addCleanup(t.lock_write().unlock)
+ self.build_tree_contents([("file", "somecontents")])
+ t.add(["file"], ["thefileid"])
+ t.commit(rev_id='somerev', message="add file")
+ self.assertIs(None, request.execute('', 'unordered'))
+ response = request.do_body("")
+ self.assertTrue(response.is_successful())
+ self.assertEquals(response.args, ("ok", ))
+ self.assertEquals("".join(response.body_stream),
+ "Bazaar pack format 1 (introduced in 0.18)\nB54\n\nBazaar repository format 2a (needs bzr 1.16 or later)\nE")
diff --git a/bzrlib/tests/test_smart_add.py b/bzrlib/tests/test_smart_add.py
new file mode 100644
index 0000000..3906066
--- /dev/null
+++ b/bzrlib/tests/test_smart_add.py
@@ -0,0 +1,161 @@
+# Copyright (C) 2005, 2006, 2007, 2009, 2010 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+from cStringIO import StringIO
+
+from bzrlib import (
+ add,
+ inventory,
+ osutils,
+ tests,
+ )
+
+
+class AddCustomIDAction(add.AddAction):
+
+ def __call__(self, inv, parent_ie, path, kind):
+ # The first part just logs if appropriate
+ # Now generate a custom id
+ file_id = osutils.safe_file_id(kind + '-'
+ + path.replace('/', '%'),
+ warn=False)
+ if self.should_print:
+ self._to_file.write('added %s with id %s\n'
+ % (path, file_id))
+ return file_id
+
+
+class TestAddFrom(tests.TestCaseWithTransport):
+ """Tests for AddFromBaseAction"""
+
+ def make_base_tree(self):
+ self.base_tree = self.make_branch_and_tree('base')
+ self.build_tree(['base/a', 'base/b',
+ 'base/dir/', 'base/dir/a',
+ 'base/dir/subdir/',
+ 'base/dir/subdir/b',
+ ])
+ self.base_tree.add(['a', 'b', 'dir', 'dir/a',
+ 'dir/subdir', 'dir/subdir/b'])
+ self.base_tree.commit('creating initial tree.')
+
+ def add_helper(self, base_tree, base_path, new_tree, file_list,
+ should_print=False):
+ to_file = StringIO()
+ base_tree.lock_read()
+ try:
+ new_tree.lock_write()
+ try:
+ action = add.AddFromBaseAction(base_tree, base_path,
+ to_file=to_file,
+ should_print=should_print)
+ new_tree.smart_add(file_list, action=action)
+ finally:
+ new_tree.unlock()
+ finally:
+ base_tree.unlock()
+ return to_file.getvalue()
+
+ def test_copy_all(self):
+ self.make_base_tree()
+ new_tree = self.make_branch_and_tree('new')
+ files = ['a', 'b',
+ 'dir/', 'dir/a',
+ 'dir/subdir/',
+ 'dir/subdir/b',
+ ]
+ self.build_tree(['new/' + fn for fn in files])
+ self.add_helper(self.base_tree, '', new_tree, ['new'])
+
+ for fn in files:
+ base_file_id = self.base_tree.path2id(fn)
+ new_file_id = new_tree.path2id(fn)
+ self.assertEqual(base_file_id, new_file_id)
+
+ def test_copy_from_dir(self):
+ self.make_base_tree()
+ new_tree = self.make_branch_and_tree('new')
+
+ self.build_tree(['new/a', 'new/b', 'new/c',
+ 'new/subdir/', 'new/subdir/b', 'new/subdir/d'])
+ new_tree.set_root_id(self.base_tree.get_root_id())
+ self.add_helper(self.base_tree, 'dir', new_tree, ['new'])
+
+ # We know 'a' and 'b' exist in the root, and they are being added
+ # in a new 'root'. Since ROOT ids have been set as the same, we will
+ # use those ids
+ self.assertEqual(self.base_tree.path2id('a'),
+ new_tree.path2id('a'))
+ self.assertEqual(self.base_tree.path2id('b'),
+ new_tree.path2id('b'))
+
+ # Because we specified 'dir/' as the base path, and we have
+ # nothing named 'subdir' in the base tree, we should grab the
+ # ids from there
+ self.assertEqual(self.base_tree.path2id('dir/subdir'),
+ new_tree.path2id('subdir'))
+ self.assertEqual(self.base_tree.path2id('dir/subdir/b'),
+ new_tree.path2id('subdir/b'))
+
+ # These should get newly generated ids
+ c_id = new_tree.path2id('c')
+ self.assertNotEqual(None, c_id)
+ self.base_tree.lock_read()
+ self.addCleanup(self.base_tree.unlock)
+ self.assertFalse(self.base_tree.has_id(c_id))
+
+ d_id = new_tree.path2id('subdir/d')
+ self.assertNotEqual(None, d_id)
+ self.assertFalse(self.base_tree.has_id(d_id))
+
+ def test_copy_existing_dir(self):
+ self.make_base_tree()
+ new_tree = self.make_branch_and_tree('new')
+ self.build_tree(['new/subby/', 'new/subby/a', 'new/subby/b'])
+
+ subdir_file_id = self.base_tree.path2id('dir/subdir')
+ new_tree.add(['subby'], [subdir_file_id])
+ self.add_helper(self.base_tree, '', new_tree, ['new'])
+ # Because 'subby' already points to subdir, we should add
+ # 'b' with the same id
+ self.assertEqual(self.base_tree.path2id('dir/subdir/b'),
+ new_tree.path2id('subby/b'))
+
+ # 'subby/a' should be added with a new id because there is no
+ # matching path or child of 'subby'.
+ a_id = new_tree.path2id('subby/a')
+ self.assertNotEqual(None, a_id)
+ self.base_tree.lock_read()
+ self.addCleanup(self.base_tree.unlock)
+ self.assertFalse(self.base_tree.has_id(a_id))
+
+
+class TestAddActions(tests.TestCase):
+
+ def test_quiet(self):
+ self.run_action("")
+
+ def test__print(self):
+ self.run_action("adding path\n")
+
+ def run_action(self, output):
+ inv = inventory.Inventory()
+ stdout = StringIO()
+ action = add.AddAction(to_file=stdout, should_print=bool(output))
+
+ self.apply_redirected(None, stdout, None, action, inv, None,
+ 'path', 'file')
+ self.assertEqual(stdout.getvalue(), output)
diff --git a/bzrlib/tests/test_smart_request.py b/bzrlib/tests/test_smart_request.py
new file mode 100644
index 0000000..74da3a0
--- /dev/null
+++ b/bzrlib/tests/test_smart_request.py
@@ -0,0 +1,272 @@
+# Copyright (C) 2009, 2010 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""Tests for smart server request infrastructure (bzrlib.smart.request)."""
+
+import threading
+
+from bzrlib import (
+ errors,
+ transport,
+ )
+from bzrlib.bzrdir import BzrDir
+from bzrlib.smart import request
+from bzrlib.tests import TestCase, TestCaseWithMemoryTransport
+
+
+class NoBodyRequest(request.SmartServerRequest):
+ """A request that does not implement do_body."""
+
+ def do(self):
+ return request.SuccessfulSmartServerResponse(('ok',))
+
+
+class DoErrorRequest(request.SmartServerRequest):
+ """A request that raises an error from self.do()."""
+
+ def do(self):
+ raise errors.NoSuchFile('xyzzy')
+
+
+class DoUnexpectedErrorRequest(request.SmartServerRequest):
+ """A request that encounters a generic error in self.do()"""
+
+ def do(self):
+ dict()[1]
+
+
+class ChunkErrorRequest(request.SmartServerRequest):
+ """A request that raises an error from self.do_chunk()."""
+
+ def do(self):
+ """No-op."""
+ pass
+
+ def do_chunk(self, bytes):
+ raise errors.NoSuchFile('xyzzy')
+
+
+class EndErrorRequest(request.SmartServerRequest):
+ """A request that raises an error from self.do_end()."""
+
+ def do(self):
+ """No-op."""
+ pass
+
+ def do_chunk(self, bytes):
+ """No-op."""
+ pass
+
+ def do_end(self):
+ raise errors.NoSuchFile('xyzzy')
+
+
+class CheckJailRequest(request.SmartServerRequest):
+
+ def __init__(self, *args):
+ request.SmartServerRequest.__init__(self, *args)
+ self.jail_transports_log = []
+
+ def do(self):
+ self.jail_transports_log.append(request.jail_info.transports)
+
+ def do_chunk(self, bytes):
+ self.jail_transports_log.append(request.jail_info.transports)
+
+ def do_end(self):
+ self.jail_transports_log.append(request.jail_info.transports)
+
+
+class TestSmartRequest(TestCase):
+
+ def test_request_class_without_do_body(self):
+ """If a request has no body data, and the request's implementation does
+ not override do_body, then no exception is raised.
+ """
+ # Create a SmartServerRequestHandler with a SmartServerRequest subclass
+ # that does not implement do_body.
+ handler = request.SmartServerRequestHandler(
+ None, {'foo': NoBodyRequest}, '/')
+ # Emulate a request with no body (i.e. just args).
+ handler.args_received(('foo',))
+ handler.end_received()
+ # Request done, no exception was raised.
+
+ def test_only_request_code_is_jailed(self):
+ transport = 'dummy transport'
+ handler = request.SmartServerRequestHandler(
+ transport, {'foo': CheckJailRequest}, '/')
+ handler.args_received(('foo',))
+ self.assertEqual(None, request.jail_info.transports)
+ handler.accept_body('bytes')
+ self.assertEqual(None, request.jail_info.transports)
+ handler.end_received()
+ self.assertEqual(None, request.jail_info.transports)
+ self.assertEqual(
+ [[transport]] * 3, handler._command.jail_transports_log)
+
+ def test_all_registered_requests_are_safety_qualified(self):
+ unclassified_requests = []
+ allowed_info = ('read', 'idem', 'mutate', 'semivfs', 'semi', 'stream')
+ for key in request.request_handlers.keys():
+ info = request.request_handlers.get_info(key)
+ if info is None or info not in allowed_info:
+ unclassified_requests.append(key)
+ if unclassified_requests:
+ self.fail('These requests were not categorized as safe/unsafe'
+ ' to retry: %s' % (unclassified_requests,))
+
+
+class TestSmartRequestHandlerErrorTranslation(TestCase):
+ """Tests that SmartServerRequestHandler will translate exceptions raised by
+ a SmartServerRequest into FailedSmartServerResponses.
+ """
+
+ def assertNoResponse(self, handler):
+ self.assertEqual(None, handler.response)
+
+ def assertResponseIsTranslatedError(self, handler):
+ expected_translation = ('NoSuchFile', 'xyzzy')
+ self.assertEqual(
+ request.FailedSmartServerResponse(expected_translation),
+ handler.response)
+
+ def test_error_translation_from_args_received(self):
+ handler = request.SmartServerRequestHandler(
+ None, {'foo': DoErrorRequest}, '/')
+ handler.args_received(('foo',))
+ self.assertResponseIsTranslatedError(handler)
+
+ def test_error_translation_from_chunk_received(self):
+ handler = request.SmartServerRequestHandler(
+ None, {'foo': ChunkErrorRequest}, '/')
+ handler.args_received(('foo',))
+ self.assertNoResponse(handler)
+ handler.accept_body('bytes')
+ self.assertResponseIsTranslatedError(handler)
+
+ def test_error_translation_from_end_received(self):
+ handler = request.SmartServerRequestHandler(
+ None, {'foo': EndErrorRequest}, '/')
+ handler.args_received(('foo',))
+ self.assertNoResponse(handler)
+ handler.end_received()
+ self.assertResponseIsTranslatedError(handler)
+
+ def test_unexpected_error_translation(self):
+ handler = request.SmartServerRequestHandler(
+ None, {'foo': DoUnexpectedErrorRequest}, '/')
+ handler.args_received(('foo',))
+ self.assertEqual(
+ request.FailedSmartServerResponse(('error', 'KeyError', "1")),
+ handler.response)
+
+
+class TestRequestHanderErrorTranslation(TestCase):
+ """Tests for bzrlib.smart.request._translate_error."""
+
+ def assertTranslationEqual(self, expected_tuple, error):
+ self.assertEqual(expected_tuple, request._translate_error(error))
+
+ def test_NoSuchFile(self):
+ self.assertTranslationEqual(
+ ('NoSuchFile', 'path'), errors.NoSuchFile('path'))
+
+ def test_LockContention(self):
+ # For now, LockContentions are always transmitted with no details.
+ # Eventually they should include a relpath or url or something else to
+ # identify which lock is busy.
+ self.assertTranslationEqual(
+ ('LockContention',), errors.LockContention('lock', 'msg'))
+
+ def test_TokenMismatch(self):
+ self.assertTranslationEqual(
+ ('TokenMismatch', 'some-token', 'actual-token'),
+ errors.TokenMismatch('some-token', 'actual-token'))
+
+ def test_MemoryError(self):
+ self.assertTranslationEqual(("MemoryError",), MemoryError())
+
+ def test_generic_Exception(self):
+ self.assertTranslationEqual(('error', 'Exception', ""),
+ Exception())
+
+ def test_generic_BzrError(self):
+ self.assertTranslationEqual(('error', 'BzrError', "some text"),
+ errors.BzrError(msg="some text"))
+
+ def test_generic_zlib_error(self):
+ from zlib import error
+ msg = "Error -3 while decompressing data: incorrect data check"
+ self.assertTranslationEqual(('error', 'zlib.error', msg),
+ error(msg))
+
+
+class TestRequestJail(TestCaseWithMemoryTransport):
+
+ def test_jail(self):
+ transport = self.get_transport('blah')
+ req = request.SmartServerRequest(transport)
+ self.assertEqual(None, request.jail_info.transports)
+ req.setup_jail()
+ self.assertEqual([transport], request.jail_info.transports)
+ req.teardown_jail()
+ self.assertEqual(None, request.jail_info.transports)
+
+
+class TestJailHook(TestCaseWithMemoryTransport):
+
+ def setUp(self):
+ super(TestJailHook, self).setUp()
+ def clear_jail_info():
+ request.jail_info.transports = None
+ self.addCleanup(clear_jail_info)
+
+ def test_jail_hook(self):
+ request.jail_info.transports = None
+ _pre_open_hook = request._pre_open_hook
+ # Any transport is fine if jail_info.transports is None
+ t = self.get_transport('foo')
+ _pre_open_hook(t)
+ # A transport in jail_info.transports is allowed
+ request.jail_info.transports = [t]
+ _pre_open_hook(t)
+ # A child of a transport in jail_info is allowed
+ _pre_open_hook(t.clone('child'))
+ # A parent is not allowed
+ self.assertRaises(errors.JailBreak, _pre_open_hook, t.clone('..'))
+ # A completely unrelated transport is not allowed
+ self.assertRaises(errors.JailBreak, _pre_open_hook,
+ transport.get_transport_from_url('http://host/'))
+
+ def test_open_bzrdir_in_non_main_thread(self):
+ """Opening a bzrdir in a non-main thread should work ok.
+
+ This makes sure that the globally-installed
+ bzrlib.smart.request._pre_open_hook, which uses a threading.local(),
+ works in a newly created thread.
+ """
+ bzrdir = self.make_bzrdir('.')
+ transport = bzrdir.root_transport
+ thread_result = []
+ def t():
+ BzrDir.open_from_transport(transport)
+ thread_result.append('ok')
+ thread = threading.Thread(target=t)
+ thread.start()
+ thread.join()
+ self.assertEqual(['ok'], thread_result)
+
diff --git a/bzrlib/tests/test_smart_signals.py b/bzrlib/tests/test_smart_signals.py
new file mode 100644
index 0000000..70b690b
--- /dev/null
+++ b/bzrlib/tests/test_smart_signals.py
@@ -0,0 +1,189 @@
+# Copyright (C) 2011 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+
+import os
+import signal
+import threading
+import weakref
+
+from bzrlib import tests, transport
+from bzrlib.smart import client, medium, server, signals
+
+# Windows doesn't define SIGHUP. And while we could just skip a lot of these
+# tests, we often don't actually care about interaction with 'signal', so we
+# can still run the tests for code coverage.
+SIGHUP = getattr(signal, 'SIGHUP', 1)
+
+
+class TestSignalHandlers(tests.TestCase):
+
+ def setUp(self):
+ super(TestSignalHandlers, self).setUp()
+ # This allows us to mutate the signal handler callbacks, but leave it
+ # 'pristine' after the test case.
+ # TODO: Arguably, this could be put into the base test.TestCase, along
+ # with a tearDown that asserts that all the entries have been
+ # removed properly. Global state is always a bit messy. A shame
+ # that we need it for signal handling.
+ orig = signals._setup_on_hangup_dict()
+ self.assertIs(None, orig)
+ def cleanup():
+ signals._on_sighup = None
+ self.addCleanup(cleanup)
+
+ def test_registered_callback_gets_called(self):
+ calls = []
+ def call_me():
+ calls.append('called')
+ signals.register_on_hangup('myid', call_me)
+ signals._sighup_handler(SIGHUP, None)
+ self.assertEqual(['called'], calls)
+ signals.unregister_on_hangup('myid')
+
+ def test_unregister_not_present(self):
+ # We don't want unregister to fail, since it is generally run at times
+ # that shouldn't interrupt other flow.
+ signals.unregister_on_hangup('no-such-id')
+ log = self.get_log()
+ self.assertContainsRe(log, 'Error occurred during unregister_on_hangup:')
+ self.assertContainsRe(log, '(?s)Traceback.*KeyError')
+
+ def test_failing_callback(self):
+ calls = []
+ def call_me():
+ calls.append('called')
+ def fail_me():
+ raise RuntimeError('something bad happened')
+ signals.register_on_hangup('myid', call_me)
+ signals.register_on_hangup('otherid', fail_me)
+ # _sighup_handler should call both, even though it got an exception
+ signals._sighup_handler(SIGHUP, None)
+ signals.unregister_on_hangup('myid')
+ signals.unregister_on_hangup('otherid')
+ log = self.get_log()
+ self.assertContainsRe(log, '(?s)Traceback.*RuntimeError')
+ self.assertEqual(['called'], calls)
+
+ def test_unregister_during_call(self):
+ # _sighup_handler should handle if some callbacks actually remove
+ # themselves while running.
+ calls = []
+ def call_me_and_unregister():
+ signals.unregister_on_hangup('myid')
+ calls.append('called_and_unregistered')
+ def call_me():
+ calls.append('called')
+ signals.register_on_hangup('myid', call_me_and_unregister)
+ signals.register_on_hangup('other', call_me)
+ signals._sighup_handler(SIGHUP, None)
+
+ def test_keyboard_interrupt_propagated(self):
+ # In case we get 'stuck' while running a hangup function, we should
+ # not suppress KeyboardInterrupt
+ def call_me_and_raise():
+ raise KeyboardInterrupt()
+ signals.register_on_hangup('myid', call_me_and_raise)
+ self.assertRaises(KeyboardInterrupt,
+ signals._sighup_handler, SIGHUP, None)
+ signals.unregister_on_hangup('myid')
+
+ def test_weak_references(self):
+ # TODO: This is probably a very-CPython-specific test
+ # Adding yourself to the callback should not make you immortal
+ # We overrideAttr during the test suite, so that we don't pollute the
+ # original dict. However, we can test that what we override matches
+ # what we are putting there.
+ self.assertIsInstance(signals._on_sighup,
+ weakref.WeakValueDictionary)
+ calls = []
+ def call_me():
+ calls.append('called')
+ signals.register_on_hangup('myid', call_me)
+ del call_me
+ # Non-CPython might want to do a gc.collect() here
+ signals._sighup_handler(SIGHUP, None)
+ self.assertEqual([], calls)
+
+ def test_not_installed(self):
+ # If you haven't called bzrlib.smart.signals.install_sighup_handler,
+ # then _on_sighup should be None, and all the calls become no-ops.
+ signals._on_sighup = None
+ calls = []
+ def call_me():
+ calls.append('called')
+ signals.register_on_hangup('myid', calls)
+ signals._sighup_handler(SIGHUP, None)
+ signals.unregister_on_hangup('myid')
+ log = self.get_log()
+ self.assertEqual('', log)
+
+ def test_install_sighup_handler(self):
+ # install_sighup_handler should set up a signal handler for SIGHUP, as
+ # well as the signals._on_sighup dict.
+ signals._on_sighup = None
+ orig = signals.install_sighup_handler()
+ if getattr(signal, 'SIGHUP', None) is not None:
+ cur = signal.getsignal(SIGHUP)
+ self.assertEqual(signals._sighup_handler, cur)
+ self.assertIsNot(None, signals._on_sighup)
+ signals.restore_sighup_handler(orig)
+ self.assertIs(None, signals._on_sighup)
+
+
+class TestInetServer(tests.TestCase):
+
+ def create_file_pipes(self):
+ r, w = os.pipe()
+ rf = os.fdopen(r, 'rb')
+ wf = os.fdopen(w, 'wb')
+ return rf, wf
+
+ def test_inet_server_responds_to_sighup(self):
+ t = transport.get_transport('memory:///')
+ content = 'a'*1024*1024
+ t.put_bytes('bigfile', content)
+ factory = server.BzrServerFactory()
+ # Override stdin/stdout so that we can inject our own handles
+ client_read, server_write = self.create_file_pipes()
+ server_read, client_write = self.create_file_pipes()
+ factory._get_stdin_stdout = lambda: (server_read, server_write)
+ factory.set_up(t, None, None, inet=True, timeout=4.0)
+ self.addCleanup(factory.tear_down)
+ started = threading.Event()
+ stopped = threading.Event()
+ def serving():
+ started.set()
+ factory.smart_server.serve()
+ stopped.set()
+ server_thread = threading.Thread(target=serving)
+ server_thread.start()
+ started.wait()
+ client_medium = medium.SmartSimplePipesClientMedium(client_read,
+ client_write, 'base')
+ client_client = client._SmartClient(client_medium)
+ resp, response_handler = client_client.call_expecting_body('get',
+ 'bigfile')
+ signals._sighup_handler(SIGHUP, None)
+ self.assertTrue(factory.smart_server.finished)
+ # We can still finish reading the file content, but more than that, and
+ # the file is closed.
+ v = response_handler.read_body_bytes()
+ if v != content:
+ self.fail('Got the wrong content back, expected 1M "a"')
+ stopped.wait()
+ server_thread.join()
+
diff --git a/bzrlib/tests/test_smart_transport.py b/bzrlib/tests/test_smart_transport.py
new file mode 100644
index 0000000..a4e8a39
--- /dev/null
+++ b/bzrlib/tests/test_smart_transport.py
@@ -0,0 +1,4299 @@
+# Copyright (C) 2006-2011 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""Tests for smart transport"""
+
+# all of this deals with byte strings so this is safe
+from cStringIO import StringIO
+import doctest
+import errno
+import os
+import socket
+import subprocess
+import sys
+import threading
+import time
+
+from testtools.matchers import DocTestMatches
+
+import bzrlib
+from bzrlib import (
+ bzrdir,
+ controldir,
+ debug,
+ errors,
+ osutils,
+ tests,
+ transport as _mod_transport,
+ urlutils,
+ )
+from bzrlib.smart import (
+ client,
+ medium,
+ message,
+ protocol,
+ request as _mod_request,
+ server as _mod_server,
+ vfs,
+)
+from bzrlib.tests import (
+ features,
+ test_smart,
+ test_server,
+ )
+from bzrlib.transport import (
+ http,
+ local,
+ memory,
+ remote,
+ ssh,
+ )
+
+
+def create_file_pipes():
+ r, w = os.pipe()
+ # These must be opened without buffering, or we get undefined results
+ rf = os.fdopen(r, 'rb', 0)
+ wf = os.fdopen(w, 'wb', 0)
+ return rf, wf
+
+
+def portable_socket_pair():
+ """Return a pair of TCP sockets connected to each other.
+
+ Unlike socket.socketpair, this should work on Windows.
+ """
+ listen_sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
+ listen_sock.bind(('127.0.0.1', 0))
+ listen_sock.listen(1)
+ client_sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
+ client_sock.connect(listen_sock.getsockname())
+ server_sock, addr = listen_sock.accept()
+ listen_sock.close()
+ return server_sock, client_sock
+
+
+class StringIOSSHVendor(object):
+ """A SSH vendor that uses StringIO to buffer writes and answer reads."""
+
+ def __init__(self, read_from, write_to):
+ self.read_from = read_from
+ self.write_to = write_to
+ self.calls = []
+
+ def connect_ssh(self, username, password, host, port, command):
+ self.calls.append(('connect_ssh', username, password, host, port,
+ command))
+ return StringIOSSHConnection(self)
+
+
+class FirstRejectedStringIOSSHVendor(StringIOSSHVendor):
+ """The first connection will be considered closed.
+
+ The second connection will succeed normally.
+ """
+
+ def __init__(self, read_from, write_to, fail_at_write=True):
+ super(FirstRejectedStringIOSSHVendor, self).__init__(read_from,
+ write_to)
+ self.fail_at_write = fail_at_write
+ self._first = True
+
+ def connect_ssh(self, username, password, host, port, command):
+ self.calls.append(('connect_ssh', username, password, host, port,
+ command))
+ if self._first:
+ self._first = False
+ return ClosedSSHConnection(self)
+ return StringIOSSHConnection(self)
+
+
+class StringIOSSHConnection(ssh.SSHConnection):
+ """A SSH connection that uses StringIO to buffer writes and answer reads."""
+
+ def __init__(self, vendor):
+ self.vendor = vendor
+
+ def close(self):
+ self.vendor.calls.append(('close', ))
+ self.vendor.read_from.close()
+ self.vendor.write_to.close()
+
+ def get_sock_or_pipes(self):
+ return 'pipes', (self.vendor.read_from, self.vendor.write_to)
+
+
+class ClosedSSHConnection(ssh.SSHConnection):
+ """An SSH connection that just has closed channels."""
+
+ def __init__(self, vendor):
+ self.vendor = vendor
+
+ def close(self):
+ self.vendor.calls.append(('close', ))
+
+ def get_sock_or_pipes(self):
+ # We create matching pipes, and then close the ssh side
+ bzr_read, ssh_write = create_file_pipes()
+ # We always fail when bzr goes to read
+ ssh_write.close()
+ if self.vendor.fail_at_write:
+ # If set, we'll also fail when bzr goes to write
+ ssh_read, bzr_write = create_file_pipes()
+ ssh_read.close()
+ else:
+ bzr_write = self.vendor.write_to
+ return 'pipes', (bzr_read, bzr_write)
+
+
+class _InvalidHostnameFeature(features.Feature):
+ """Does 'non_existent.invalid' fail to resolve?
+
+ RFC 2606 states that .invalid is reserved for invalid domain names, and
+ also underscores are not a valid character in domain names. Despite this,
+ it's possible a badly misconfigured name server might decide to always
+ return an address for any name, so this feature allows us to distinguish a
+ broken system from a broken test.
+ """
+
+ def _probe(self):
+ try:
+ socket.gethostbyname('non_existent.invalid')
+ except socket.gaierror:
+ # The host name failed to resolve. Good.
+ return True
+ else:
+ return False
+
+ def feature_name(self):
+ return 'invalid hostname'
+
+InvalidHostnameFeature = _InvalidHostnameFeature()
+
+
+class SmartClientMediumTests(tests.TestCase):
+ """Tests for SmartClientMedium.
+
+ We should create a test scenario for this: we need a server module that
+ construct the test-servers (like make_loopsocket_and_medium), and the list
+ of SmartClientMedium classes to test.
+ """
+
+ def make_loopsocket_and_medium(self):
+ """Create a loopback socket for testing, and a medium aimed at it."""
+ sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
+ sock.bind(('127.0.0.1', 0))
+ sock.listen(1)
+ port = sock.getsockname()[1]
+ client_medium = medium.SmartTCPClientMedium('127.0.0.1', port, 'base')
+ return sock, client_medium
+
+ def receive_bytes_on_server(self, sock, bytes):
+ """Accept a connection on sock and read 3 bytes.
+
+ The bytes are appended to the list bytes.
+
+ :return: a Thread which is running to do the accept and recv.
+ """
+ def _receive_bytes_on_server():
+ connection, address = sock.accept()
+ bytes.append(osutils.recv_all(connection, 3))
+ connection.close()
+ t = threading.Thread(target=_receive_bytes_on_server)
+ t.start()
+ return t
+
+ def test_construct_smart_simple_pipes_client_medium(self):
+ # the SimplePipes client medium takes two pipes:
+ # readable pipe, writeable pipe.
+ # Constructing one should just save these and do nothing.
+ # We test this by passing in None.
+ client_medium = medium.SmartSimplePipesClientMedium(None, None, None)
+
+ def test_simple_pipes_client_request_type(self):
+ # SimplePipesClient should use SmartClientStreamMediumRequest's.
+ client_medium = medium.SmartSimplePipesClientMedium(None, None, None)
+ request = client_medium.get_request()
+ self.assertIsInstance(request, medium.SmartClientStreamMediumRequest)
+
+ def test_simple_pipes_client_get_concurrent_requests(self):
+ # the simple_pipes client does not support pipelined requests:
+ # but it does support serial requests: we construct one after
+ # another is finished. This is a smoke test testing the integration
+ # of the SmartClientStreamMediumRequest and the SmartClientStreamMedium
+ # classes - as the sibling classes share this logic, they do not have
+ # explicit tests for this.
+ output = StringIO()
+ client_medium = medium.SmartSimplePipesClientMedium(
+ None, output, 'base')
+ request = client_medium.get_request()
+ request.finished_writing()
+ request.finished_reading()
+ request2 = client_medium.get_request()
+ request2.finished_writing()
+ request2.finished_reading()
+
+ def test_simple_pipes_client__accept_bytes_writes_to_writable(self):
+ # accept_bytes writes to the writeable pipe.
+ output = StringIO()
+ client_medium = medium.SmartSimplePipesClientMedium(
+ None, output, 'base')
+ client_medium._accept_bytes('abc')
+ self.assertEqual('abc', output.getvalue())
+
+ def test_simple_pipes__accept_bytes_subprocess_closed(self):
+ # It is unfortunate that we have to use Popen for this. However,
+ # os.pipe() does not behave the same as subprocess.Popen().
+ # On Windows, if you use os.pipe() and close the write side,
+ # read.read() hangs. On Linux, read.read() returns the empty string.
+ p = subprocess.Popen([sys.executable, '-c',
+ 'import sys\n'
+ 'sys.stdout.write(sys.stdin.read(4))\n'
+ 'sys.stdout.close()\n'],
+ stdout=subprocess.PIPE, stdin=subprocess.PIPE)
+ client_medium = medium.SmartSimplePipesClientMedium(
+ p.stdout, p.stdin, 'base')
+ client_medium._accept_bytes('abc\n')
+ self.assertEqual('abc', client_medium._read_bytes(3))
+ p.wait()
+ # While writing to the underlying pipe,
+ # Windows py2.6.6 we get IOError(EINVAL)
+ # Lucid py2.6.5, we get IOError(EPIPE)
+ # In both cases, it should be wrapped to ConnectionReset
+ self.assertRaises(errors.ConnectionReset,
+ client_medium._accept_bytes, 'more')
+
+ def test_simple_pipes__accept_bytes_pipe_closed(self):
+ child_read, client_write = create_file_pipes()
+ client_medium = medium.SmartSimplePipesClientMedium(
+ None, client_write, 'base')
+ client_medium._accept_bytes('abc\n')
+ self.assertEqual('abc\n', child_read.read(4))
+ # While writing to the underlying pipe,
+ # Windows py2.6.6 we get IOError(EINVAL)
+ # Lucid py2.6.5, we get IOError(EPIPE)
+ # In both cases, it should be wrapped to ConnectionReset
+ child_read.close()
+ self.assertRaises(errors.ConnectionReset,
+ client_medium._accept_bytes, 'more')
+
+ def test_simple_pipes__flush_pipe_closed(self):
+ child_read, client_write = create_file_pipes()
+ client_medium = medium.SmartSimplePipesClientMedium(
+ None, client_write, 'base')
+ client_medium._accept_bytes('abc\n')
+ child_read.close()
+ # Even though the pipe is closed, flush on the write side seems to be a
+ # no-op, rather than a failure.
+ client_medium._flush()
+
+ def test_simple_pipes__flush_subprocess_closed(self):
+ p = subprocess.Popen([sys.executable, '-c',
+ 'import sys\n'
+ 'sys.stdout.write(sys.stdin.read(4))\n'
+ 'sys.stdout.close()\n'],
+ stdout=subprocess.PIPE, stdin=subprocess.PIPE)
+ client_medium = medium.SmartSimplePipesClientMedium(
+ p.stdout, p.stdin, 'base')
+ client_medium._accept_bytes('abc\n')
+ p.wait()
+ # Even though the child process is dead, flush seems to be a no-op.
+ client_medium._flush()
+
+ def test_simple_pipes__read_bytes_pipe_closed(self):
+ child_read, client_write = create_file_pipes()
+ client_medium = medium.SmartSimplePipesClientMedium(
+ child_read, client_write, 'base')
+ client_medium._accept_bytes('abc\n')
+ client_write.close()
+ self.assertEqual('abc\n', client_medium._read_bytes(4))
+ self.assertEqual('', client_medium._read_bytes(4))
+
+ def test_simple_pipes__read_bytes_subprocess_closed(self):
+ p = subprocess.Popen([sys.executable, '-c',
+ 'import sys\n'
+ 'if sys.platform == "win32":\n'
+ ' import msvcrt, os\n'
+ ' msvcrt.setmode(sys.stdout.fileno(), os.O_BINARY)\n'
+ ' msvcrt.setmode(sys.stdin.fileno(), os.O_BINARY)\n'
+ 'sys.stdout.write(sys.stdin.read(4))\n'
+ 'sys.stdout.close()\n'],
+ stdout=subprocess.PIPE, stdin=subprocess.PIPE)
+ client_medium = medium.SmartSimplePipesClientMedium(
+ p.stdout, p.stdin, 'base')
+ client_medium._accept_bytes('abc\n')
+ p.wait()
+ self.assertEqual('abc\n', client_medium._read_bytes(4))
+ self.assertEqual('', client_medium._read_bytes(4))
+
+ def test_simple_pipes_client_disconnect_does_nothing(self):
+ # calling disconnect does nothing.
+ input = StringIO()
+ output = StringIO()
+ client_medium = medium.SmartSimplePipesClientMedium(
+ input, output, 'base')
+ # send some bytes to ensure disconnecting after activity still does not
+ # close.
+ client_medium._accept_bytes('abc')
+ client_medium.disconnect()
+ self.assertFalse(input.closed)
+ self.assertFalse(output.closed)
+
+ def test_simple_pipes_client_accept_bytes_after_disconnect(self):
+ # calling disconnect on the client does not alter the pipe that
+ # accept_bytes writes to.
+ input = StringIO()
+ output = StringIO()
+ client_medium = medium.SmartSimplePipesClientMedium(
+ input, output, 'base')
+ client_medium._accept_bytes('abc')
+ client_medium.disconnect()
+ client_medium._accept_bytes('abc')
+ self.assertFalse(input.closed)
+ self.assertFalse(output.closed)
+ self.assertEqual('abcabc', output.getvalue())
+
+ def test_simple_pipes_client_ignores_disconnect_when_not_connected(self):
+ # Doing a disconnect on a new (and thus unconnected) SimplePipes medium
+ # does nothing.
+ client_medium = medium.SmartSimplePipesClientMedium(None, None, 'base')
+ client_medium.disconnect()
+
+ def test_simple_pipes_client_can_always_read(self):
+ # SmartSimplePipesClientMedium is never disconnected, so read_bytes
+ # always tries to read from the underlying pipe.
+ input = StringIO('abcdef')
+ client_medium = medium.SmartSimplePipesClientMedium(input, None, 'base')
+ self.assertEqual('abc', client_medium.read_bytes(3))
+ client_medium.disconnect()
+ self.assertEqual('def', client_medium.read_bytes(3))
+
+ def test_simple_pipes_client_supports__flush(self):
+ # invoking _flush on a SimplePipesClient should flush the output
+ # pipe. We test this by creating an output pipe that records
+ # flush calls made to it.
+ from StringIO import StringIO # get regular StringIO
+ input = StringIO()
+ output = StringIO()
+ flush_calls = []
+ def logging_flush(): flush_calls.append('flush')
+ output.flush = logging_flush
+ client_medium = medium.SmartSimplePipesClientMedium(
+ input, output, 'base')
+ # this call is here to ensure we only flush once, not on every
+ # _accept_bytes call.
+ client_medium._accept_bytes('abc')
+ client_medium._flush()
+ client_medium.disconnect()
+ self.assertEqual(['flush'], flush_calls)
+
+ def test_construct_smart_ssh_client_medium(self):
+ # the SSH client medium takes:
+ # host, port, username, password, vendor
+ # Constructing one should just save these and do nothing.
+ # we test this by creating a empty bound socket and constructing
+ # a medium.
+ sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
+ sock.bind(('127.0.0.1', 0))
+ unopened_port = sock.getsockname()[1]
+ # having vendor be invalid means that if it tries to connect via the
+ # vendor it will blow up.
+ ssh_params = medium.SSHParams('127.0.0.1', unopened_port, None, None)
+ client_medium = medium.SmartSSHClientMedium(
+ 'base', ssh_params, "not a vendor")
+ sock.close()
+
+ def test_ssh_client_connects_on_first_use(self):
+ # The only thing that initiates a connection from the medium is giving
+ # it bytes.
+ output = StringIO()
+ vendor = StringIOSSHVendor(StringIO(), output)
+ ssh_params = medium.SSHParams(
+ 'a hostname', 'a port', 'a username', 'a password', 'bzr')
+ client_medium = medium.SmartSSHClientMedium('base', ssh_params, vendor)
+ client_medium._accept_bytes('abc')
+ self.assertEqual('abc', output.getvalue())
+ self.assertEqual([('connect_ssh', 'a username', 'a password',
+ 'a hostname', 'a port',
+ ['bzr', 'serve', '--inet', '--directory=/', '--allow-writes'])],
+ vendor.calls)
+
+ def test_ssh_client_changes_command_when_bzr_remote_path_passed(self):
+ # The only thing that initiates a connection from the medium is giving
+ # it bytes.
+ output = StringIO()
+ vendor = StringIOSSHVendor(StringIO(), output)
+ ssh_params = medium.SSHParams(
+ 'a hostname', 'a port', 'a username', 'a password',
+ bzr_remote_path='fugly')
+ client_medium = medium.SmartSSHClientMedium('base', ssh_params, vendor)
+ client_medium._accept_bytes('abc')
+ self.assertEqual('abc', output.getvalue())
+ self.assertEqual([('connect_ssh', 'a username', 'a password',
+ 'a hostname', 'a port',
+ ['fugly', 'serve', '--inet', '--directory=/', '--allow-writes'])],
+ vendor.calls)
+
+ def test_ssh_client_disconnect_does_so(self):
+ # calling disconnect should disconnect both the read_from and write_to
+ # file-like object it from the ssh connection.
+ input = StringIO()
+ output = StringIO()
+ vendor = StringIOSSHVendor(input, output)
+ client_medium = medium.SmartSSHClientMedium(
+ 'base', medium.SSHParams('a hostname'), vendor)
+ client_medium._accept_bytes('abc')
+ client_medium.disconnect()
+ self.assertTrue(input.closed)
+ self.assertTrue(output.closed)
+ self.assertEqual([
+ ('connect_ssh', None, None, 'a hostname', None,
+ ['bzr', 'serve', '--inet', '--directory=/', '--allow-writes']),
+ ('close', ),
+ ],
+ vendor.calls)
+
+ def test_ssh_client_disconnect_allows_reconnection(self):
+ # calling disconnect on the client terminates the connection, but should
+ # not prevent additional connections occuring.
+ # we test this by initiating a second connection after doing a
+ # disconnect.
+ input = StringIO()
+ output = StringIO()
+ vendor = StringIOSSHVendor(input, output)
+ client_medium = medium.SmartSSHClientMedium(
+ 'base', medium.SSHParams('a hostname'), vendor)
+ client_medium._accept_bytes('abc')
+ client_medium.disconnect()
+ # the disconnect has closed output, so we need a new output for the
+ # new connection to write to.
+ input2 = StringIO()
+ output2 = StringIO()
+ vendor.read_from = input2
+ vendor.write_to = output2
+ client_medium._accept_bytes('abc')
+ client_medium.disconnect()
+ self.assertTrue(input.closed)
+ self.assertTrue(output.closed)
+ self.assertTrue(input2.closed)
+ self.assertTrue(output2.closed)
+ self.assertEqual([
+ ('connect_ssh', None, None, 'a hostname', None,
+ ['bzr', 'serve', '--inet', '--directory=/', '--allow-writes']),
+ ('close', ),
+ ('connect_ssh', None, None, 'a hostname', None,
+ ['bzr', 'serve', '--inet', '--directory=/', '--allow-writes']),
+ ('close', ),
+ ],
+ vendor.calls)
+
+ def test_ssh_client_repr(self):
+ client_medium = medium.SmartSSHClientMedium(
+ 'base', medium.SSHParams("example.com", "4242", "username"))
+ self.assertEquals(
+ "SmartSSHClientMedium(bzr+ssh://username@example.com:4242/)",
+ repr(client_medium))
+
+ def test_ssh_client_repr_no_port(self):
+ client_medium = medium.SmartSSHClientMedium(
+ 'base', medium.SSHParams("example.com", None, "username"))
+ self.assertEquals(
+ "SmartSSHClientMedium(bzr+ssh://username@example.com/)",
+ repr(client_medium))
+
+ def test_ssh_client_repr_no_username(self):
+ client_medium = medium.SmartSSHClientMedium(
+ 'base', medium.SSHParams("example.com", None, None))
+ self.assertEquals(
+ "SmartSSHClientMedium(bzr+ssh://example.com/)",
+ repr(client_medium))
+
+ def test_ssh_client_ignores_disconnect_when_not_connected(self):
+ # Doing a disconnect on a new (and thus unconnected) SSH medium
+ # does not fail. It's ok to disconnect an unconnected medium.
+ client_medium = medium.SmartSSHClientMedium(
+ 'base', medium.SSHParams(None))
+ client_medium.disconnect()
+
+ def test_ssh_client_raises_on_read_when_not_connected(self):
+ # Doing a read on a new (and thus unconnected) SSH medium raises
+ # MediumNotConnected.
+ client_medium = medium.SmartSSHClientMedium(
+ 'base', medium.SSHParams(None))
+ self.assertRaises(errors.MediumNotConnected, client_medium.read_bytes,
+ 0)
+ self.assertRaises(errors.MediumNotConnected, client_medium.read_bytes,
+ 1)
+
+ def test_ssh_client_supports__flush(self):
+ # invoking _flush on a SSHClientMedium should flush the output
+ # pipe. We test this by creating an output pipe that records
+ # flush calls made to it.
+ from StringIO import StringIO # get regular StringIO
+ input = StringIO()
+ output = StringIO()
+ flush_calls = []
+ def logging_flush(): flush_calls.append('flush')
+ output.flush = logging_flush
+ vendor = StringIOSSHVendor(input, output)
+ client_medium = medium.SmartSSHClientMedium(
+ 'base', medium.SSHParams('a hostname'), vendor=vendor)
+ # this call is here to ensure we only flush once, not on every
+ # _accept_bytes call.
+ client_medium._accept_bytes('abc')
+ client_medium._flush()
+ client_medium.disconnect()
+ self.assertEqual(['flush'], flush_calls)
+
+ def test_construct_smart_tcp_client_medium(self):
+ # the TCP client medium takes a host and a port. Constructing it won't
+ # connect to anything.
+ sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
+ sock.bind(('127.0.0.1', 0))
+ unopened_port = sock.getsockname()[1]
+ client_medium = medium.SmartTCPClientMedium(
+ '127.0.0.1', unopened_port, 'base')
+ sock.close()
+
+ def test_tcp_client_connects_on_first_use(self):
+ # The only thing that initiates a connection from the medium is giving
+ # it bytes.
+ sock, medium = self.make_loopsocket_and_medium()
+ bytes = []
+ t = self.receive_bytes_on_server(sock, bytes)
+ medium.accept_bytes('abc')
+ t.join()
+ sock.close()
+ self.assertEqual(['abc'], bytes)
+
+ def test_tcp_client_disconnect_does_so(self):
+ # calling disconnect on the client terminates the connection.
+ # we test this by forcing a short read during a socket.MSG_WAITALL
+ # call: write 2 bytes, try to read 3, and then the client disconnects.
+ sock, medium = self.make_loopsocket_and_medium()
+ bytes = []
+ t = self.receive_bytes_on_server(sock, bytes)
+ medium.accept_bytes('ab')
+ medium.disconnect()
+ t.join()
+ sock.close()
+ self.assertEqual(['ab'], bytes)
+ # now disconnect again: this should not do anything, if disconnection
+ # really did disconnect.
+ medium.disconnect()
+
+
+ def test_tcp_client_ignores_disconnect_when_not_connected(self):
+ # Doing a disconnect on a new (and thus unconnected) TCP medium
+ # does not fail. It's ok to disconnect an unconnected medium.
+ client_medium = medium.SmartTCPClientMedium(None, None, None)
+ client_medium.disconnect()
+
+ def test_tcp_client_raises_on_read_when_not_connected(self):
+ # Doing a read on a new (and thus unconnected) TCP medium raises
+ # MediumNotConnected.
+ client_medium = medium.SmartTCPClientMedium(None, None, None)
+ self.assertRaises(errors.MediumNotConnected, client_medium.read_bytes, 0)
+ self.assertRaises(errors.MediumNotConnected, client_medium.read_bytes, 1)
+
+ def test_tcp_client_supports__flush(self):
+ # invoking _flush on a TCPClientMedium should do something useful.
+ # RBC 20060922 not sure how to test/tell in this case.
+ sock, medium = self.make_loopsocket_and_medium()
+ bytes = []
+ t = self.receive_bytes_on_server(sock, bytes)
+ # try with nothing buffered
+ medium._flush()
+ medium._accept_bytes('ab')
+ # and with something sent.
+ medium._flush()
+ medium.disconnect()
+ t.join()
+ sock.close()
+ self.assertEqual(['ab'], bytes)
+ # now disconnect again : this should not do anything, if disconnection
+ # really did disconnect.
+ medium.disconnect()
+
+ def test_tcp_client_host_unknown_connection_error(self):
+ self.requireFeature(InvalidHostnameFeature)
+ client_medium = medium.SmartTCPClientMedium(
+ 'non_existent.invalid', 4155, 'base')
+ self.assertRaises(
+ errors.ConnectionError, client_medium._ensure_connection)
+
+
+class TestSmartClientStreamMediumRequest(tests.TestCase):
+ """Tests the for SmartClientStreamMediumRequest.
+
+ SmartClientStreamMediumRequest is a helper for the three stream based
+ mediums: TCP, SSH, SimplePipes, so we only test it once, and then test that
+ those three mediums implement the interface it expects.
+ """
+
+ def test_accept_bytes_after_finished_writing_errors(self):
+ # calling accept_bytes after calling finished_writing raises
+ # WritingCompleted to prevent bad assumptions on stream environments
+ # breaking the needs of message-based environments.
+ output = StringIO()
+ client_medium = medium.SmartSimplePipesClientMedium(
+ None, output, 'base')
+ request = medium.SmartClientStreamMediumRequest(client_medium)
+ request.finished_writing()
+ self.assertRaises(errors.WritingCompleted, request.accept_bytes, None)
+
+ def test_accept_bytes(self):
+ # accept bytes should invoke _accept_bytes on the stream medium.
+ # we test this by using the SimplePipes medium - the most trivial one
+ # and checking that the pipes get the data.
+ input = StringIO()
+ output = StringIO()
+ client_medium = medium.SmartSimplePipesClientMedium(
+ input, output, 'base')
+ request = medium.SmartClientStreamMediumRequest(client_medium)
+ request.accept_bytes('123')
+ request.finished_writing()
+ request.finished_reading()
+ self.assertEqual('', input.getvalue())
+ self.assertEqual('123', output.getvalue())
+
+ def test_construct_sets_stream_request(self):
+ # constructing a SmartClientStreamMediumRequest on a StreamMedium sets
+ # the current request to the new SmartClientStreamMediumRequest
+ output = StringIO()
+ client_medium = medium.SmartSimplePipesClientMedium(
+ None, output, 'base')
+ request = medium.SmartClientStreamMediumRequest(client_medium)
+ self.assertIs(client_medium._current_request, request)
+
+ def test_construct_while_another_request_active_throws(self):
+ # constructing a SmartClientStreamMediumRequest on a StreamMedium with
+ # a non-None _current_request raises TooManyConcurrentRequests.
+ output = StringIO()
+ client_medium = medium.SmartSimplePipesClientMedium(
+ None, output, 'base')
+ client_medium._current_request = "a"
+ self.assertRaises(errors.TooManyConcurrentRequests,
+ medium.SmartClientStreamMediumRequest, client_medium)
+
+ def test_finished_read_clears_current_request(self):
+ # calling finished_reading clears the current request from the requests
+ # medium
+ output = StringIO()
+ client_medium = medium.SmartSimplePipesClientMedium(
+ None, output, 'base')
+ request = medium.SmartClientStreamMediumRequest(client_medium)
+ request.finished_writing()
+ request.finished_reading()
+ self.assertEqual(None, client_medium._current_request)
+
+ def test_finished_read_before_finished_write_errors(self):
+ # calling finished_reading before calling finished_writing triggers a
+ # WritingNotComplete error.
+ client_medium = medium.SmartSimplePipesClientMedium(
+ None, None, 'base')
+ request = medium.SmartClientStreamMediumRequest(client_medium)
+ self.assertRaises(errors.WritingNotComplete, request.finished_reading)
+
+ def test_read_bytes(self):
+ # read bytes should invoke _read_bytes on the stream medium.
+ # we test this by using the SimplePipes medium - the most trivial one
+ # and checking that the data is supplied. Its possible that a
+ # faulty implementation could poke at the pipe variables them selves,
+ # but we trust that this will be caught as it will break the integration
+ # smoke tests.
+ input = StringIO('321')
+ output = StringIO()
+ client_medium = medium.SmartSimplePipesClientMedium(
+ input, output, 'base')
+ request = medium.SmartClientStreamMediumRequest(client_medium)
+ request.finished_writing()
+ self.assertEqual('321', request.read_bytes(3))
+ request.finished_reading()
+ self.assertEqual('', input.read())
+ self.assertEqual('', output.getvalue())
+
+ def test_read_bytes_before_finished_write_errors(self):
+ # calling read_bytes before calling finished_writing triggers a
+ # WritingNotComplete error because the Smart protocol is designed to be
+ # compatible with strict message based protocols like HTTP where the
+ # request cannot be submitted until the writing has completed.
+ client_medium = medium.SmartSimplePipesClientMedium(None, None, 'base')
+ request = medium.SmartClientStreamMediumRequest(client_medium)
+ self.assertRaises(errors.WritingNotComplete, request.read_bytes, None)
+
+ def test_read_bytes_after_finished_reading_errors(self):
+ # calling read_bytes after calling finished_reading raises
+ # ReadingCompleted to prevent bad assumptions on stream environments
+ # breaking the needs of message-based environments.
+ output = StringIO()
+ client_medium = medium.SmartSimplePipesClientMedium(
+ None, output, 'base')
+ request = medium.SmartClientStreamMediumRequest(client_medium)
+ request.finished_writing()
+ request.finished_reading()
+ self.assertRaises(errors.ReadingCompleted, request.read_bytes, None)
+
+ def test_reset(self):
+ server_sock, client_sock = portable_socket_pair()
+ # TODO: Use SmartClientAlreadyConnectedSocketMedium for the versions of
+ # bzr where it exists.
+ client_medium = medium.SmartTCPClientMedium(None, None, None)
+ client_medium._socket = client_sock
+ client_medium._connected = True
+ req = client_medium.get_request()
+ self.assertRaises(errors.TooManyConcurrentRequests,
+ client_medium.get_request)
+ client_medium.reset()
+ # The stream should be reset, marked as disconnected, though ready for
+ # us to make a new request
+ self.assertFalse(client_medium._connected)
+ self.assertIs(None, client_medium._socket)
+ try:
+ self.assertEqual('', client_sock.recv(1))
+ except socket.error, e:
+ if e.errno not in (errno.EBADF,):
+ raise
+ req = client_medium.get_request()
+
+
+class RemoteTransportTests(test_smart.TestCaseWithSmartMedium):
+
+ def test_plausible_url(self):
+ self.assert_(self.get_url().startswith('bzr://'))
+
+ def test_probe_transport(self):
+ t = self.get_transport()
+ self.assertIsInstance(t, remote.RemoteTransport)
+
+ def test_get_medium_from_transport(self):
+ """Remote transport has a medium always, which it can return."""
+ t = self.get_transport()
+ client_medium = t.get_smart_medium()
+ self.assertIsInstance(client_medium, medium.SmartClientMedium)
+
+
+class ErrorRaisingProtocol(object):
+
+ def __init__(self, exception):
+ self.exception = exception
+
+ def next_read_size(self):
+ raise self.exception
+
+
+class SampleRequest(object):
+
+ def __init__(self, expected_bytes):
+ self.accepted_bytes = ''
+ self._finished_reading = False
+ self.expected_bytes = expected_bytes
+ self.unused_data = ''
+
+ def accept_bytes(self, bytes):
+ self.accepted_bytes += bytes
+ if self.accepted_bytes.startswith(self.expected_bytes):
+ self._finished_reading = True
+ self.unused_data = self.accepted_bytes[len(self.expected_bytes):]
+
+ def next_read_size(self):
+ if self._finished_reading:
+ return 0
+ else:
+ return 1
+
+
+class TestSmartServerStreamMedium(tests.TestCase):
+
+ def setUp(self):
+ super(TestSmartServerStreamMedium, self).setUp()
+ self.overrideEnv('BZR_NO_SMART_VFS', None)
+
+ def create_pipe_medium(self, to_server, from_server, transport,
+ timeout=4.0):
+ """Create a new SmartServerPipeStreamMedium."""
+ return medium.SmartServerPipeStreamMedium(to_server, from_server,
+ transport, timeout=timeout)
+
+ def create_pipe_context(self, to_server_bytes, transport):
+ """Create a SmartServerSocketStreamMedium.
+
+ This differes from create_pipe_medium, in that we initialize the
+ request that is sent to the server, and return the StringIO class that
+ will hold the response.
+ """
+ to_server = StringIO(to_server_bytes)
+ from_server = StringIO()
+ m = self.create_pipe_medium(to_server, from_server, transport)
+ return m, from_server
+
+ def create_socket_medium(self, server_sock, transport, timeout=4.0):
+ """Initialize a new medium.SmartServerSocketStreamMedium."""
+ return medium.SmartServerSocketStreamMedium(server_sock, transport,
+ timeout=timeout)
+
+ def create_socket_context(self, transport, timeout=4.0):
+ """Create a new SmartServerSocketStreamMedium with default context.
+
+ This will call portable_socket_pair and pass the server side to
+ create_socket_medium along with transport.
+ It then returns the client_sock and the server.
+ """
+ server_sock, client_sock = portable_socket_pair()
+ server = self.create_socket_medium(server_sock, transport,
+ timeout=timeout)
+ return server, client_sock
+
+ def test_smart_query_version(self):
+ """Feed a canned query version to a server"""
+ # wire-to-wire, using the whole stack
+ transport = local.LocalTransport(urlutils.local_path_to_url('/'))
+ server, from_server = self.create_pipe_context('hello\n', transport)
+ smart_protocol = protocol.SmartServerRequestProtocolOne(transport,
+ from_server.write)
+ server._serve_one_request(smart_protocol)
+ self.assertEqual('ok\0012\n',
+ from_server.getvalue())
+
+ def test_response_to_canned_get(self):
+ transport = memory.MemoryTransport('memory:///')
+ transport.put_bytes('testfile', 'contents\nof\nfile\n')
+ server, from_server = self.create_pipe_context('get\001./testfile\n',
+ transport)
+ smart_protocol = protocol.SmartServerRequestProtocolOne(transport,
+ from_server.write)
+ server._serve_one_request(smart_protocol)
+ self.assertEqual('ok\n'
+ '17\n'
+ 'contents\nof\nfile\n'
+ 'done\n',
+ from_server.getvalue())
+
+ def test_response_to_canned_get_of_utf8(self):
+ # wire-to-wire, using the whole stack, with a UTF-8 filename.
+ transport = memory.MemoryTransport('memory:///')
+ utf8_filename = u'testfile\N{INTERROBANG}'.encode('utf-8')
+ # VFS requests use filenames, not raw UTF-8.
+ hpss_path = urlutils.escape(utf8_filename)
+ transport.put_bytes(utf8_filename, 'contents\nof\nfile\n')
+ server, from_server = self.create_pipe_context(
+ 'get\001' + hpss_path + '\n', transport)
+ smart_protocol = protocol.SmartServerRequestProtocolOne(transport,
+ from_server.write)
+ server._serve_one_request(smart_protocol)
+ self.assertEqual('ok\n'
+ '17\n'
+ 'contents\nof\nfile\n'
+ 'done\n',
+ from_server.getvalue())
+
+ def test_pipe_like_stream_with_bulk_data(self):
+ sample_request_bytes = 'command\n9\nbulk datadone\n'
+ server, from_server = self.create_pipe_context(
+ sample_request_bytes, None)
+ sample_protocol = SampleRequest(expected_bytes=sample_request_bytes)
+ server._serve_one_request(sample_protocol)
+ self.assertEqual('', from_server.getvalue())
+ self.assertEqual(sample_request_bytes, sample_protocol.accepted_bytes)
+ self.assertFalse(server.finished)
+
+ def test_socket_stream_with_bulk_data(self):
+ sample_request_bytes = 'command\n9\nbulk datadone\n'
+ server, client_sock = self.create_socket_context(None)
+ sample_protocol = SampleRequest(expected_bytes=sample_request_bytes)
+ client_sock.sendall(sample_request_bytes)
+ server._serve_one_request(sample_protocol)
+ server._disconnect_client()
+ self.assertEqual('', client_sock.recv(1))
+ self.assertEqual(sample_request_bytes, sample_protocol.accepted_bytes)
+ self.assertFalse(server.finished)
+
+ def test_pipe_like_stream_shutdown_detection(self):
+ server, _ = self.create_pipe_context('', None)
+ server._serve_one_request(SampleRequest('x'))
+ self.assertTrue(server.finished)
+
+ def test_socket_stream_shutdown_detection(self):
+ server, client_sock = self.create_socket_context(None)
+ client_sock.close()
+ server._serve_one_request(SampleRequest('x'))
+ self.assertTrue(server.finished)
+
+ def test_socket_stream_incomplete_request(self):
+ """The medium should still construct the right protocol version even if
+ the initial read only reads part of the request.
+
+ Specifically, it should correctly read the protocol version line even
+ if the partial read doesn't end in a newline. An older, naive
+ implementation of _get_line in the server used to have a bug in that
+ case.
+ """
+ incomplete_request_bytes = protocol.REQUEST_VERSION_TWO + 'hel'
+ rest_of_request_bytes = 'lo\n'
+ expected_response = (
+ protocol.RESPONSE_VERSION_TWO + 'success\nok\x012\n')
+ server, client_sock = self.create_socket_context(None)
+ client_sock.sendall(incomplete_request_bytes)
+ server_protocol = server._build_protocol()
+ client_sock.sendall(rest_of_request_bytes)
+ server._serve_one_request(server_protocol)
+ server._disconnect_client()
+ self.assertEqual(expected_response, osutils.recv_all(client_sock, 50),
+ "Not a version 2 response to 'hello' request.")
+ self.assertEqual('', client_sock.recv(1))
+
+ def test_pipe_stream_incomplete_request(self):
+ """The medium should still construct the right protocol version even if
+ the initial read only reads part of the request.
+
+ Specifically, it should correctly read the protocol version line even
+ if the partial read doesn't end in a newline. An older, naive
+ implementation of _get_line in the server used to have a bug in that
+ case.
+ """
+ incomplete_request_bytes = protocol.REQUEST_VERSION_TWO + 'hel'
+ rest_of_request_bytes = 'lo\n'
+ expected_response = (
+ protocol.RESPONSE_VERSION_TWO + 'success\nok\x012\n')
+ # Make a pair of pipes, to and from the server
+ to_server, to_server_w = os.pipe()
+ from_server_r, from_server = os.pipe()
+ to_server = os.fdopen(to_server, 'r', 0)
+ to_server_w = os.fdopen(to_server_w, 'w', 0)
+ from_server_r = os.fdopen(from_server_r, 'r', 0)
+ from_server = os.fdopen(from_server, 'w', 0)
+ server = self.create_pipe_medium(to_server, from_server, None)
+ # Like test_socket_stream_incomplete_request, write an incomplete
+ # request (that does not end in '\n') and build a protocol from it.
+ to_server_w.write(incomplete_request_bytes)
+ server_protocol = server._build_protocol()
+ # Send the rest of the request, and finish serving it.
+ to_server_w.write(rest_of_request_bytes)
+ server._serve_one_request(server_protocol)
+ to_server_w.close()
+ from_server.close()
+ self.assertEqual(expected_response, from_server_r.read(),
+ "Not a version 2 response to 'hello' request.")
+ self.assertEqual('', from_server_r.read(1))
+ from_server_r.close()
+ to_server.close()
+
+ def test_pipe_like_stream_with_two_requests(self):
+ # If two requests are read in one go, then two calls to
+ # _serve_one_request should still process both of them as if they had
+ # been received separately.
+ sample_request_bytes = 'command\n'
+ server, from_server = self.create_pipe_context(
+ sample_request_bytes * 2, None)
+ first_protocol = SampleRequest(expected_bytes=sample_request_bytes)
+ server._serve_one_request(first_protocol)
+ self.assertEqual(0, first_protocol.next_read_size())
+ self.assertEqual('', from_server.getvalue())
+ self.assertFalse(server.finished)
+ # Make a new protocol, call _serve_one_request with it to collect the
+ # second request.
+ second_protocol = SampleRequest(expected_bytes=sample_request_bytes)
+ server._serve_one_request(second_protocol)
+ self.assertEqual('', from_server.getvalue())
+ self.assertEqual(sample_request_bytes, second_protocol.accepted_bytes)
+ self.assertFalse(server.finished)
+
+ def test_socket_stream_with_two_requests(self):
+ # If two requests are read in one go, then two calls to
+ # _serve_one_request should still process both of them as if they had
+ # been received separately.
+ sample_request_bytes = 'command\n'
+ server, client_sock = self.create_socket_context(None)
+ first_protocol = SampleRequest(expected_bytes=sample_request_bytes)
+ # Put two whole requests on the wire.
+ client_sock.sendall(sample_request_bytes * 2)
+ server._serve_one_request(first_protocol)
+ self.assertEqual(0, first_protocol.next_read_size())
+ self.assertFalse(server.finished)
+ # Make a new protocol, call _serve_one_request with it to collect the
+ # second request.
+ second_protocol = SampleRequest(expected_bytes=sample_request_bytes)
+ stream_still_open = server._serve_one_request(second_protocol)
+ self.assertEqual(sample_request_bytes, second_protocol.accepted_bytes)
+ self.assertFalse(server.finished)
+ server._disconnect_client()
+ self.assertEqual('', client_sock.recv(1))
+
+ def test_pipe_like_stream_error_handling(self):
+ # Use plain python StringIO so we can monkey-patch the close method to
+ # not discard the contents.
+ from StringIO import StringIO
+ to_server = StringIO('')
+ from_server = StringIO()
+ self.closed = False
+ def close():
+ self.closed = True
+ from_server.close = close
+ server = self.create_pipe_medium(
+ to_server, from_server, None)
+ fake_protocol = ErrorRaisingProtocol(Exception('boom'))
+ server._serve_one_request(fake_protocol)
+ self.assertEqual('', from_server.getvalue())
+ self.assertTrue(self.closed)
+ self.assertTrue(server.finished)
+
+ def test_socket_stream_error_handling(self):
+ server, client_sock = self.create_socket_context(None)
+ fake_protocol = ErrorRaisingProtocol(Exception('boom'))
+ server._serve_one_request(fake_protocol)
+ # recv should not block, because the other end of the socket has been
+ # closed.
+ self.assertEqual('', client_sock.recv(1))
+ self.assertTrue(server.finished)
+
+ def test_pipe_like_stream_keyboard_interrupt_handling(self):
+ server, from_server = self.create_pipe_context('', None)
+ fake_protocol = ErrorRaisingProtocol(KeyboardInterrupt('boom'))
+ self.assertRaises(
+ KeyboardInterrupt, server._serve_one_request, fake_protocol)
+ self.assertEqual('', from_server.getvalue())
+
+ def test_socket_stream_keyboard_interrupt_handling(self):
+ server, client_sock = self.create_socket_context(None)
+ fake_protocol = ErrorRaisingProtocol(KeyboardInterrupt('boom'))
+ self.assertRaises(
+ KeyboardInterrupt, server._serve_one_request, fake_protocol)
+ server._disconnect_client()
+ self.assertEqual('', client_sock.recv(1))
+
+ def build_protocol_pipe_like(self, bytes):
+ server, _ = self.create_pipe_context(bytes, None)
+ return server._build_protocol()
+
+ def build_protocol_socket(self, bytes):
+ server, client_sock = self.create_socket_context(None)
+ client_sock.sendall(bytes)
+ client_sock.close()
+ return server._build_protocol()
+
+ def assertProtocolOne(self, server_protocol):
+ # Use assertIs because assertIsInstance will wrongly pass
+ # SmartServerRequestProtocolTwo (because it subclasses
+ # SmartServerRequestProtocolOne).
+ self.assertIs(
+ type(server_protocol), protocol.SmartServerRequestProtocolOne)
+
+ def assertProtocolTwo(self, server_protocol):
+ self.assertIsInstance(
+ server_protocol, protocol.SmartServerRequestProtocolTwo)
+
+ def test_pipe_like_build_protocol_empty_bytes(self):
+ # Any empty request (i.e. no bytes) is detected as protocol version one.
+ server_protocol = self.build_protocol_pipe_like('')
+ self.assertProtocolOne(server_protocol)
+
+ def test_socket_like_build_protocol_empty_bytes(self):
+ # Any empty request (i.e. no bytes) is detected as protocol version one.
+ server_protocol = self.build_protocol_socket('')
+ self.assertProtocolOne(server_protocol)
+
+ def test_pipe_like_build_protocol_non_two(self):
+ # A request that doesn't start with "bzr request 2\n" is version one.
+ server_protocol = self.build_protocol_pipe_like('abc\n')
+ self.assertProtocolOne(server_protocol)
+
+ def test_socket_build_protocol_non_two(self):
+ # A request that doesn't start with "bzr request 2\n" is version one.
+ server_protocol = self.build_protocol_socket('abc\n')
+ self.assertProtocolOne(server_protocol)
+
+ def test_pipe_like_build_protocol_two(self):
+ # A request that starts with "bzr request 2\n" is version two.
+ server_protocol = self.build_protocol_pipe_like('bzr request 2\n')
+ self.assertProtocolTwo(server_protocol)
+
+ def test_socket_build_protocol_two(self):
+ # A request that starts with "bzr request 2\n" is version two.
+ server_protocol = self.build_protocol_socket('bzr request 2\n')
+ self.assertProtocolTwo(server_protocol)
+
+ def test__build_protocol_returns_if_stopping(self):
+ # _build_protocol should notice that we are stopping, and return
+ # without waiting for bytes from the client.
+ server, client_sock = self.create_socket_context(None)
+ server._stop_gracefully()
+ self.assertIs(None, server._build_protocol())
+
+ def test_socket_set_timeout(self):
+ server, _ = self.create_socket_context(None, timeout=1.23)
+ self.assertEqual(1.23, server._client_timeout)
+
+ def test_pipe_set_timeout(self):
+ server = self.create_pipe_medium(None, None, None,
+ timeout=1.23)
+ self.assertEqual(1.23, server._client_timeout)
+
+ def test_socket_wait_for_bytes_with_timeout_with_data(self):
+ server, client_sock = self.create_socket_context(None)
+ client_sock.sendall('data\n')
+ # This should not block or consume any actual content
+ self.assertFalse(server._wait_for_bytes_with_timeout(0.1))
+ data = server.read_bytes(5)
+ self.assertEqual('data\n', data)
+
+ def test_socket_wait_for_bytes_with_timeout_no_data(self):
+ server, client_sock = self.create_socket_context(None)
+ # This should timeout quickly, reporting that there wasn't any data
+ self.assertRaises(errors.ConnectionTimeout,
+ server._wait_for_bytes_with_timeout, 0.01)
+ client_sock.close()
+ data = server.read_bytes(1)
+ self.assertEqual('', data)
+
+ def test_socket_wait_for_bytes_with_timeout_closed(self):
+ server, client_sock = self.create_socket_context(None)
+ # With the socket closed, this should return right away.
+ # It seems select.select() returns that you *can* read on the socket,
+ # even though it closed. Presumably as a way to tell it is closed?
+ # Testing shows that without sock.close() this times-out failing the
+ # test, but with it, it returns False immediately.
+ client_sock.close()
+ self.assertFalse(server._wait_for_bytes_with_timeout(10))
+ data = server.read_bytes(1)
+ self.assertEqual('', data)
+
+ def test_socket_wait_for_bytes_with_shutdown(self):
+ server, client_sock = self.create_socket_context(None)
+ t = time.time()
+ # Override the _timer functionality, so that time never increments,
+ # this way, we can be sure we stopped because of the flag, and not
+ # because of a timeout, etc.
+ server._timer = lambda: t
+ server._client_poll_timeout = 0.1
+ server._stop_gracefully()
+ server._wait_for_bytes_with_timeout(1.0)
+
+ def test_socket_serve_timeout_closes_socket(self):
+ server, client_sock = self.create_socket_context(None, timeout=0.1)
+ # This should timeout quickly, and then close the connection so that
+ # client_sock recv doesn't block.
+ server.serve()
+ self.assertEqual('', client_sock.recv(1))
+
+ def test_pipe_wait_for_bytes_with_timeout_with_data(self):
+ # We intentionally use a real pipe here, so that we can 'select' on it.
+ # You can't select() on a StringIO
+ (r_server, w_client) = os.pipe()
+ self.addCleanup(os.close, w_client)
+ with os.fdopen(r_server, 'rb') as rf_server:
+ server = self.create_pipe_medium(
+ rf_server, None, None)
+ os.write(w_client, 'data\n')
+ # This should not block or consume any actual content
+ server._wait_for_bytes_with_timeout(0.1)
+ data = server.read_bytes(5)
+ self.assertEqual('data\n', data)
+
+ def test_pipe_wait_for_bytes_with_timeout_no_data(self):
+ # We intentionally use a real pipe here, so that we can 'select' on it.
+ # You can't select() on a StringIO
+ (r_server, w_client) = os.pipe()
+ # We can't add an os.close cleanup here, because we need to control
+ # when the file handle gets closed ourselves.
+ with os.fdopen(r_server, 'rb') as rf_server:
+ server = self.create_pipe_medium(
+ rf_server, None, None)
+ if sys.platform == 'win32':
+ # Windows cannot select() on a pipe, so we just always return
+ server._wait_for_bytes_with_timeout(0.01)
+ else:
+ self.assertRaises(errors.ConnectionTimeout,
+ server._wait_for_bytes_with_timeout, 0.01)
+ os.close(w_client)
+ data = server.read_bytes(5)
+ self.assertEqual('', data)
+
+ def test_pipe_wait_for_bytes_no_fileno(self):
+ server, _ = self.create_pipe_context('', None)
+ # Our file doesn't support polling, so we should always just return
+ # 'you have data to consume.
+ server._wait_for_bytes_with_timeout(0.01)
+
+
+class TestGetProtocolFactoryForBytes(tests.TestCase):
+ """_get_protocol_factory_for_bytes identifies the protocol factory a server
+ should use to decode a given request. Any bytes not part of the version
+ marker string (and thus part of the actual request) are returned alongside
+ the protocol factory.
+ """
+
+ def test_version_three(self):
+ result = medium._get_protocol_factory_for_bytes(
+ 'bzr message 3 (bzr 1.6)\nextra bytes')
+ protocol_factory, remainder = result
+ self.assertEqual(
+ protocol.build_server_protocol_three, protocol_factory)
+ self.assertEqual('extra bytes', remainder)
+
+ def test_version_two(self):
+ result = medium._get_protocol_factory_for_bytes(
+ 'bzr request 2\nextra bytes')
+ protocol_factory, remainder = result
+ self.assertEqual(
+ protocol.SmartServerRequestProtocolTwo, protocol_factory)
+ self.assertEqual('extra bytes', remainder)
+
+ def test_version_one(self):
+ """Version one requests have no version markers."""
+ result = medium._get_protocol_factory_for_bytes('anything\n')
+ protocol_factory, remainder = result
+ self.assertEqual(
+ protocol.SmartServerRequestProtocolOne, protocol_factory)
+ self.assertEqual('anything\n', remainder)
+
+
+class TestSmartTCPServer(tests.TestCase):
+
+ def make_server(self):
+ """Create a SmartTCPServer that we can exercise.
+
+ Note: we don't use SmartTCPServer_for_testing because the testing
+ version overrides lots of functionality like 'serve', and we want to
+ test the raw service.
+
+ This will start the server in another thread, and wait for it to
+ indicate it has finished starting up.
+
+ :return: (server, server_thread)
+ """
+ t = _mod_transport.get_transport_from_url('memory:///')
+ server = _mod_server.SmartTCPServer(t, client_timeout=4.0)
+ server._ACCEPT_TIMEOUT = 0.1
+ # We don't use 'localhost' because that might be an IPv6 address.
+ server.start_server('127.0.0.1', 0)
+ server_thread = threading.Thread(target=server.serve,
+ args=(self.id(),))
+ server_thread.start()
+ # Ensure this gets called at some point
+ self.addCleanup(server._stop_gracefully)
+ server._started.wait()
+ return server, server_thread
+
+ def ensure_client_disconnected(self, client_sock):
+ """Ensure that a socket is closed, discarding all errors."""
+ try:
+ client_sock.close()
+ except Exception:
+ pass
+
+ def connect_to_server(self, server):
+ """Create a client socket that can talk to the server."""
+ client_sock = socket.socket()
+ server_info = server._server_socket.getsockname()
+ client_sock.connect(server_info)
+ self.addCleanup(self.ensure_client_disconnected, client_sock)
+ return client_sock
+
+ def connect_to_server_and_hangup(self, server):
+ """Connect to the server, and then hang up.
+ That way it doesn't sit waiting for 'accept()' to timeout.
+ """
+ # If the server has already signaled that the socket is closed, we
+ # don't need to try to connect to it. Not being set, though, the server
+ # might still close the socket while we try to connect to it. So we
+ # still have to catch the exception.
+ if server._stopped.isSet():
+ return
+ try:
+ client_sock = self.connect_to_server(server)
+ client_sock.close()
+ except socket.error, e:
+ # If the server has hung up already, that is fine.
+ pass
+
+ def say_hello(self, client_sock):
+ """Send the 'hello' smart RPC, and expect the response."""
+ client_sock.send('hello\n')
+ self.assertEqual('ok\x012\n', client_sock.recv(5))
+
+ def shutdown_server_cleanly(self, server, server_thread):
+ server._stop_gracefully()
+ self.connect_to_server_and_hangup(server)
+ server._stopped.wait()
+ server._fully_stopped.wait()
+ server_thread.join()
+
+ def test_get_error_unexpected(self):
+ """Error reported by server with no specific representation"""
+ self.overrideEnv('BZR_NO_SMART_VFS', None)
+ class FlakyTransport(object):
+ base = 'a_url'
+ def external_url(self):
+ return self.base
+ def get(self, path):
+ raise Exception("some random exception from inside server")
+
+ class FlakyServer(test_server.SmartTCPServer_for_testing):
+ def get_backing_transport(self, backing_transport_server):
+ return FlakyTransport()
+
+ smart_server = FlakyServer()
+ smart_server.start_server()
+ self.addCleanup(smart_server.stop_server)
+ t = remote.RemoteTCPTransport(smart_server.get_url())
+ self.addCleanup(t.disconnect)
+ err = self.assertRaises(errors.UnknownErrorFromSmartServer,
+ t.get, 'something')
+ self.assertContainsRe(str(err), 'some random exception')
+
+ def test_propagates_timeout(self):
+ server = _mod_server.SmartTCPServer(None, client_timeout=1.23)
+ server_sock, client_sock = portable_socket_pair()
+ handler = server._make_handler(server_sock)
+ self.assertEqual(1.23, handler._client_timeout)
+
+ def test_serve_conn_tracks_connections(self):
+ server = _mod_server.SmartTCPServer(None, client_timeout=4.0)
+ server_sock, client_sock = portable_socket_pair()
+ server.serve_conn(server_sock, '-%s' % (self.id(),))
+ self.assertEqual(1, len(server._active_connections))
+ # We still want to talk on the connection. Polling should indicate it
+ # is still active.
+ server._poll_active_connections()
+ self.assertEqual(1, len(server._active_connections))
+ # Closing the socket will end the active thread, and polling will
+ # notice and remove it from the active set.
+ client_sock.close()
+ server._poll_active_connections(0.1)
+ self.assertEqual(0, len(server._active_connections))
+
+ def test_serve_closes_out_finished_connections(self):
+ server, server_thread = self.make_server()
+ # The server is started, connect to it.
+ client_sock = self.connect_to_server(server)
+ # We send and receive on the connection, so that we know the
+ # server-side has seen the connect, and started handling the
+ # results.
+ self.say_hello(client_sock)
+ self.assertEqual(1, len(server._active_connections))
+ # Grab a handle to the thread that is processing our request
+ _, server_side_thread = server._active_connections[0]
+ # Close the connection, ask the server to stop, and wait for the
+ # server to stop, as well as the thread that was servicing the
+ # client request.
+ client_sock.close()
+ # Wait for the server-side request thread to notice we are closed.
+ server_side_thread.join()
+ # Stop the server, it should notice the connection has finished.
+ self.shutdown_server_cleanly(server, server_thread)
+ # The server should have noticed that all clients are gone before
+ # exiting.
+ self.assertEqual(0, len(server._active_connections))
+
+ def test_serve_reaps_finished_connections(self):
+ server, server_thread = self.make_server()
+ client_sock1 = self.connect_to_server(server)
+ # We send and receive on the connection, so that we know the
+ # server-side has seen the connect, and started handling the
+ # results.
+ self.say_hello(client_sock1)
+ server_handler1, server_side_thread1 = server._active_connections[0]
+ client_sock1.close()
+ server_side_thread1.join()
+ # By waiting until the first connection is fully done, the server
+ # should notice after another connection that the first has finished.
+ client_sock2 = self.connect_to_server(server)
+ self.say_hello(client_sock2)
+ server_handler2, server_side_thread2 = server._active_connections[-1]
+ # There is a race condition. We know that client_sock2 has been
+ # registered, but not that _poll_active_connections has been called. We
+ # know that it will be called before the server will accept a new
+ # connection, however. So connect one more time, and assert that we
+ # either have 1 or 2 active connections (never 3), and that the 'first'
+ # connection is not connection 1
+ client_sock3 = self.connect_to_server(server)
+ self.say_hello(client_sock3)
+ # Copy the list, so we don't have it mutating behind our back
+ conns = list(server._active_connections)
+ self.assertEqual(2, len(conns))
+ self.assertNotEqual((server_handler1, server_side_thread1), conns[0])
+ self.assertEqual((server_handler2, server_side_thread2), conns[0])
+ client_sock2.close()
+ client_sock3.close()
+ self.shutdown_server_cleanly(server, server_thread)
+
+ def test_graceful_shutdown_waits_for_clients_to_stop(self):
+ server, server_thread = self.make_server()
+ # We need something big enough that it won't fit in a single recv. So
+ # the server thread gets blocked writing content to the client until we
+ # finish reading on the client.
+ server.backing_transport.put_bytes('bigfile',
+ 'a'*1024*1024)
+ client_sock = self.connect_to_server(server)
+ self.say_hello(client_sock)
+ _, server_side_thread = server._active_connections[0]
+ # Start the RPC, but don't finish reading the response
+ client_medium = medium.SmartClientAlreadyConnectedSocketMedium(
+ 'base', client_sock)
+ client_client = client._SmartClient(client_medium)
+ resp, response_handler = client_client.call_expecting_body('get',
+ 'bigfile')
+ self.assertEqual(('ok',), resp)
+ # Ask the server to stop gracefully, and wait for it.
+ server._stop_gracefully()
+ self.connect_to_server_and_hangup(server)
+ server._stopped.wait()
+ # It should not be accepting another connection.
+ self.assertRaises(socket.error, self.connect_to_server, server)
+ # It should also not be fully stopped
+ server._fully_stopped.wait(0.01)
+ self.assertFalse(server._fully_stopped.isSet())
+ response_handler.read_body_bytes()
+ client_sock.close()
+ server_side_thread.join()
+ server_thread.join()
+ self.assertTrue(server._fully_stopped.isSet())
+ log = self.get_log()
+ self.assertThat(log, DocTestMatches("""\
+ INFO Requested to stop gracefully
+... Stopping SmartServerSocketStreamMedium(client=('127.0.0.1', ...
+ INFO Waiting for 1 client(s) to finish
+""", flags=doctest.ELLIPSIS|doctest.REPORT_UDIFF))
+
+ def test_stop_gracefully_tells_handlers_to_stop(self):
+ server, server_thread = self.make_server()
+ client_sock = self.connect_to_server(server)
+ self.say_hello(client_sock)
+ server_handler, server_side_thread = server._active_connections[0]
+ self.assertFalse(server_handler.finished)
+ server._stop_gracefully()
+ self.assertTrue(server_handler.finished)
+ client_sock.close()
+ self.connect_to_server_and_hangup(server)
+ server_thread.join()
+
+
+class SmartTCPTests(tests.TestCase):
+ """Tests for connection/end to end behaviour using the TCP server.
+
+ All of these tests are run with a server running in another thread serving
+ a MemoryTransport, and a connection to it already open.
+
+ the server is obtained by calling self.start_server(readonly=False).
+ """
+
+ def start_server(self, readonly=False, backing_transport=None):
+ """Setup the server.
+
+ :param readonly: Create a readonly server.
+ """
+ # NB: Tests using this fall into two categories: tests of the server,
+ # tests wanting a server. The latter should be updated to use
+ # self.vfs_transport_factory etc.
+ if backing_transport is None:
+ mem_server = memory.MemoryServer()
+ mem_server.start_server()
+ self.addCleanup(mem_server.stop_server)
+ self.permit_url(mem_server.get_url())
+ self.backing_transport = _mod_transport.get_transport_from_url(
+ mem_server.get_url())
+ else:
+ self.backing_transport = backing_transport
+ if readonly:
+ self.real_backing_transport = self.backing_transport
+ self.backing_transport = _mod_transport.get_transport_from_url(
+ "readonly+" + self.backing_transport.abspath('.'))
+ self.server = _mod_server.SmartTCPServer(self.backing_transport,
+ client_timeout=4.0)
+ self.server.start_server('127.0.0.1', 0)
+ self.server.start_background_thread('-' + self.id())
+ self.transport = remote.RemoteTCPTransport(self.server.get_url())
+ self.addCleanup(self.stop_server)
+ self.permit_url(self.server.get_url())
+
+ def stop_server(self):
+ """Disconnect the client and stop the server.
+
+ This must be re-entrant as some tests will call it explicitly in
+ addition to the normal cleanup.
+ """
+ if getattr(self, 'transport', None):
+ self.transport.disconnect()
+ del self.transport
+ if getattr(self, 'server', None):
+ self.server.stop_background_thread()
+ del self.server
+
+
+class TestServerSocketUsage(SmartTCPTests):
+
+ def test_server_start_stop(self):
+ """It should be safe to stop the server with no requests."""
+ self.start_server()
+ t = remote.RemoteTCPTransport(self.server.get_url())
+ self.stop_server()
+ self.assertRaises(errors.ConnectionError, t.has, '.')
+
+ def test_server_closes_listening_sock_on_shutdown_after_request(self):
+ """The server should close its listening socket when it's stopped."""
+ self.start_server()
+ server_url = self.server.get_url()
+ self.transport.has('.')
+ self.stop_server()
+ # if the listening socket has closed, we should get a BADFD error
+ # when connecting, rather than a hang.
+ t = remote.RemoteTCPTransport(server_url)
+ self.assertRaises(errors.ConnectionError, t.has, '.')
+
+
+class WritableEndToEndTests(SmartTCPTests):
+ """Client to server tests that require a writable transport."""
+
+ def setUp(self):
+ super(WritableEndToEndTests, self).setUp()
+ self.start_server()
+
+ def test_start_tcp_server(self):
+ url = self.server.get_url()
+ self.assertContainsRe(url, r'^bzr://127\.0\.0\.1:[0-9]{2,}/')
+
+ def test_smart_transport_has(self):
+ """Checking for file existence over smart."""
+ self.overrideEnv('BZR_NO_SMART_VFS', None)
+ self.backing_transport.put_bytes("foo", "contents of foo\n")
+ self.assertTrue(self.transport.has("foo"))
+ self.assertFalse(self.transport.has("non-foo"))
+
+ def test_smart_transport_get(self):
+ """Read back a file over smart."""
+ self.overrideEnv('BZR_NO_SMART_VFS', None)
+ self.backing_transport.put_bytes("foo", "contents\nof\nfoo\n")
+ fp = self.transport.get("foo")
+ self.assertEqual('contents\nof\nfoo\n', fp.read())
+
+ def test_get_error_enoent(self):
+ """Error reported from server getting nonexistent file."""
+ # The path in a raised NoSuchFile exception should be the precise path
+ # asked for by the client. This gives meaningful and unsurprising errors
+ # for users.
+ self.overrideEnv('BZR_NO_SMART_VFS', None)
+ err = self.assertRaises(
+ errors.NoSuchFile, self.transport.get, 'not%20a%20file')
+ self.assertSubset([err.path], ['not%20a%20file', './not%20a%20file'])
+
+ def test_simple_clone_conn(self):
+ """Test that cloning reuses the same connection."""
+ # we create a real connection not a loopback one, but it will use the
+ # same server and pipes
+ conn2 = self.transport.clone('.')
+ self.assertIs(self.transport.get_smart_medium(),
+ conn2.get_smart_medium())
+
+ def test__remote_path(self):
+ self.assertEquals('/foo/bar',
+ self.transport._remote_path('foo/bar'))
+
+ def test_clone_changes_base(self):
+ """Cloning transport produces one with a new base location"""
+ conn2 = self.transport.clone('subdir')
+ self.assertEquals(self.transport.base + 'subdir/',
+ conn2.base)
+
+ def test_open_dir(self):
+ """Test changing directory"""
+ self.overrideEnv('BZR_NO_SMART_VFS', None)
+ transport = self.transport
+ self.backing_transport.mkdir('toffee')
+ self.backing_transport.mkdir('toffee/apple')
+ self.assertEquals('/toffee', transport._remote_path('toffee'))
+ toffee_trans = transport.clone('toffee')
+ # Check that each transport has only the contents of its directory
+ # directly visible. If state was being held in the wrong object, it's
+ # conceivable that cloning a transport would alter the state of the
+ # cloned-from transport.
+ self.assertTrue(transport.has('toffee'))
+ self.assertFalse(toffee_trans.has('toffee'))
+ self.assertFalse(transport.has('apple'))
+ self.assertTrue(toffee_trans.has('apple'))
+
+ def test_open_bzrdir(self):
+ """Open an existing bzrdir over smart transport"""
+ transport = self.transport
+ t = self.backing_transport
+ bzrdir.BzrDirFormat.get_default_format().initialize_on_transport(t)
+ result_dir = controldir.ControlDir.open_containing_from_transport(
+ transport)
+
+
+class ReadOnlyEndToEndTests(SmartTCPTests):
+ """Tests from the client to the server using a readonly backing transport."""
+
+ def test_mkdir_error_readonly(self):
+ """TransportNotPossible should be preserved from the backing transport."""
+ self.overrideEnv('BZR_NO_SMART_VFS', None)
+ self.start_server(readonly=True)
+ self.assertRaises(errors.TransportNotPossible, self.transport.mkdir,
+ 'foo')
+
+
+class TestServerHooks(SmartTCPTests):
+
+ def capture_server_call(self, backing_urls, public_url):
+ """Record a server_started|stopped hook firing."""
+ self.hook_calls.append((backing_urls, public_url))
+
+ def test_server_started_hook_memory(self):
+ """The server_started hook fires when the server is started."""
+ self.hook_calls = []
+ _mod_server.SmartTCPServer.hooks.install_named_hook('server_started',
+ self.capture_server_call, None)
+ self.start_server()
+ # at this point, the server will be starting a thread up.
+ # there is no indicator at the moment, so bodge it by doing a request.
+ self.transport.has('.')
+ # The default test server uses MemoryTransport and that has no external
+ # url:
+ self.assertEqual([([self.backing_transport.base], self.transport.base)],
+ self.hook_calls)
+
+ def test_server_started_hook_file(self):
+ """The server_started hook fires when the server is started."""
+ self.hook_calls = []
+ _mod_server.SmartTCPServer.hooks.install_named_hook('server_started',
+ self.capture_server_call, None)
+ self.start_server(
+ backing_transport=_mod_transport.get_transport_from_path("."))
+ # at this point, the server will be starting a thread up.
+ # there is no indicator at the moment, so bodge it by doing a request.
+ self.transport.has('.')
+ # The default test server uses MemoryTransport and that has no external
+ # url:
+ self.assertEqual([([
+ self.backing_transport.base, self.backing_transport.external_url()],
+ self.transport.base)],
+ self.hook_calls)
+
+ def test_server_stopped_hook_simple_memory(self):
+ """The server_stopped hook fires when the server is stopped."""
+ self.hook_calls = []
+ _mod_server.SmartTCPServer.hooks.install_named_hook('server_stopped',
+ self.capture_server_call, None)
+ self.start_server()
+ result = [([self.backing_transport.base], self.transport.base)]
+ # check the stopping message isn't emitted up front.
+ self.assertEqual([], self.hook_calls)
+ # nor after a single message
+ self.transport.has('.')
+ self.assertEqual([], self.hook_calls)
+ # clean up the server
+ self.stop_server()
+ # now it should have fired.
+ self.assertEqual(result, self.hook_calls)
+
+ def test_server_stopped_hook_simple_file(self):
+ """The server_stopped hook fires when the server is stopped."""
+ self.hook_calls = []
+ _mod_server.SmartTCPServer.hooks.install_named_hook('server_stopped',
+ self.capture_server_call, None)
+ self.start_server(
+ backing_transport=_mod_transport.get_transport_from_path("."))
+ result = [(
+ [self.backing_transport.base, self.backing_transport.external_url()]
+ , self.transport.base)]
+ # check the stopping message isn't emitted up front.
+ self.assertEqual([], self.hook_calls)
+ # nor after a single message
+ self.transport.has('.')
+ self.assertEqual([], self.hook_calls)
+ # clean up the server
+ self.stop_server()
+ # now it should have fired.
+ self.assertEqual(result, self.hook_calls)
+
+# TODO: test that when the server suffers an exception that it calls the
+# server-stopped hook.
+
+
+class SmartServerCommandTests(tests.TestCaseWithTransport):
+ """Tests that call directly into the command objects, bypassing the network
+ and the request dispatching.
+
+ Note: these tests are rudimentary versions of the command object tests in
+ test_smart.py.
+ """
+
+ def test_hello(self):
+ cmd = _mod_request.HelloRequest(None, '/')
+ response = cmd.execute()
+ self.assertEqual(('ok', '2'), response.args)
+ self.assertEqual(None, response.body)
+
+ def test_get_bundle(self):
+ from bzrlib.bundle import serializer
+ wt = self.make_branch_and_tree('.')
+ self.build_tree_contents([('hello', 'hello world')])
+ wt.add('hello')
+ rev_id = wt.commit('add hello')
+
+ cmd = _mod_request.GetBundleRequest(self.get_transport(), '/')
+ response = cmd.execute('.', rev_id)
+ bundle = serializer.read_bundle(StringIO(response.body))
+ self.assertEqual((), response.args)
+
+
+class SmartServerRequestHandlerTests(tests.TestCaseWithTransport):
+ """Test that call directly into the handler logic, bypassing the network."""
+
+ def setUp(self):
+ super(SmartServerRequestHandlerTests, self).setUp()
+ self.overrideEnv('BZR_NO_SMART_VFS', None)
+
+ def build_handler(self, transport):
+ """Returns a handler for the commands in protocol version one."""
+ return _mod_request.SmartServerRequestHandler(
+ transport, _mod_request.request_handlers, '/')
+
+ def test_construct_request_handler(self):
+ """Constructing a request handler should be easy and set defaults."""
+ handler = _mod_request.SmartServerRequestHandler(None, commands=None,
+ root_client_path='/')
+ self.assertFalse(handler.finished_reading)
+
+ def test_hello(self):
+ handler = self.build_handler(None)
+ handler.args_received(('hello',))
+ self.assertEqual(('ok', '2'), handler.response.args)
+ self.assertEqual(None, handler.response.body)
+
+ def test_disable_vfs_handler_classes_via_environment(self):
+ # VFS handler classes will raise an error from "execute" if
+ # BZR_NO_SMART_VFS is set.
+ handler = vfs.HasRequest(None, '/')
+ # set environment variable after construction to make sure it's
+ # examined.
+ self.overrideEnv('BZR_NO_SMART_VFS', '')
+ self.assertRaises(errors.DisabledMethod, handler.execute)
+
+ def test_readonly_exception_becomes_transport_not_possible(self):
+ """The response for a read-only error is ('ReadOnlyError')."""
+ handler = self.build_handler(self.get_readonly_transport())
+ # send a mkdir for foo, with no explicit mode - should fail.
+ handler.args_received(('mkdir', 'foo', ''))
+ # and the failure should be an explicit ReadOnlyError
+ self.assertEqual(("ReadOnlyError", ), handler.response.args)
+ # XXX: TODO: test that other TransportNotPossible errors are
+ # presented as TransportNotPossible - not possible to do that
+ # until I figure out how to trigger that relatively cleanly via
+ # the api. RBC 20060918
+
+ def test_hello_has_finished_body_on_dispatch(self):
+ """The 'hello' command should set finished_reading."""
+ handler = self.build_handler(None)
+ handler.args_received(('hello',))
+ self.assertTrue(handler.finished_reading)
+ self.assertNotEqual(None, handler.response)
+
+ def test_put_bytes_non_atomic(self):
+ """'put_...' should set finished_reading after reading the bytes."""
+ handler = self.build_handler(self.get_transport())
+ handler.args_received(('put_non_atomic', 'a-file', '', 'F', ''))
+ self.assertFalse(handler.finished_reading)
+ handler.accept_body('1234')
+ self.assertFalse(handler.finished_reading)
+ handler.accept_body('5678')
+ handler.end_of_body()
+ self.assertTrue(handler.finished_reading)
+ self.assertEqual(('ok', ), handler.response.args)
+ self.assertEqual(None, handler.response.body)
+
+ def test_readv_accept_body(self):
+ """'readv' should set finished_reading after reading offsets."""
+ self.build_tree(['a-file'])
+ handler = self.build_handler(self.get_readonly_transport())
+ handler.args_received(('readv', 'a-file'))
+ self.assertFalse(handler.finished_reading)
+ handler.accept_body('2,')
+ self.assertFalse(handler.finished_reading)
+ handler.accept_body('3')
+ handler.end_of_body()
+ self.assertTrue(handler.finished_reading)
+ self.assertEqual(('readv', ), handler.response.args)
+ # co - nte - nt of a-file is the file contents we are extracting from.
+ self.assertEqual('nte', handler.response.body)
+
+ def test_readv_short_read_response_contents(self):
+ """'readv' when a short read occurs sets the response appropriately."""
+ self.build_tree(['a-file'])
+ handler = self.build_handler(self.get_readonly_transport())
+ handler.args_received(('readv', 'a-file'))
+ # read beyond the end of the file.
+ handler.accept_body('100,1')
+ handler.end_of_body()
+ self.assertTrue(handler.finished_reading)
+ self.assertEqual(('ShortReadvError', './a-file', '100', '1', '0'),
+ handler.response.args)
+ self.assertEqual(None, handler.response.body)
+
+
+class RemoteTransportRegistration(tests.TestCase):
+
+ def test_registration(self):
+ t = _mod_transport.get_transport_from_url('bzr+ssh://example.com/path')
+ self.assertIsInstance(t, remote.RemoteSSHTransport)
+ self.assertEqual('example.com', t._parsed_url.host)
+
+ def test_bzr_https(self):
+ # https://bugs.launchpad.net/bzr/+bug/128456
+ t = _mod_transport.get_transport_from_url('bzr+https://example.com/path')
+ self.assertIsInstance(t, remote.RemoteHTTPTransport)
+ self.assertStartsWith(
+ t._http_transport.base,
+ 'https://')
+
+
+class TestRemoteTransport(tests.TestCase):
+
+ def test_use_connection_factory(self):
+ # We want to be able to pass a client as a parameter to RemoteTransport.
+ input = StringIO('ok\n3\nbardone\n')
+ output = StringIO()
+ client_medium = medium.SmartSimplePipesClientMedium(
+ input, output, 'base')
+ transport = remote.RemoteTransport(
+ 'bzr://localhost/', medium=client_medium)
+ # Disable version detection.
+ client_medium._protocol_version = 1
+
+ # We want to make sure the client is used when the first remote
+ # method is called. No data should have been sent, or read.
+ self.assertEqual(0, input.tell())
+ self.assertEqual('', output.getvalue())
+
+ # Now call a method that should result in one request: as the
+ # transport makes its own protocol instances, we check on the wire.
+ # XXX: TODO: give the transport a protocol factory, which can make
+ # an instrumented protocol for us.
+ self.assertEqual('bar', transport.get_bytes('foo'))
+ # only the needed data should have been sent/received.
+ self.assertEqual(13, input.tell())
+ self.assertEqual('get\x01/foo\n', output.getvalue())
+
+ def test__translate_error_readonly(self):
+ """Sending a ReadOnlyError to _translate_error raises TransportNotPossible."""
+ client_medium = medium.SmartSimplePipesClientMedium(None, None, 'base')
+ transport = remote.RemoteTransport(
+ 'bzr://localhost/', medium=client_medium)
+ err = errors.ErrorFromSmartServer(("ReadOnlyError", ))
+ self.assertRaises(errors.TransportNotPossible,
+ transport._translate_error, err)
+
+
+class TestSmartProtocol(tests.TestCase):
+ """Base class for smart protocol tests.
+
+ Each test case gets a smart_server and smart_client created during setUp().
+
+ It is planned that the client can be called with self.call_client() giving
+ it an expected server response, which will be fed into it when it tries to
+ read. Likewise, self.call_server will call a servers method with a canned
+ serialised client request. Output done by the client or server for these
+ calls will be captured to self.to_server and self.to_client. Each element
+ in the list is a write call from the client or server respectively.
+
+ Subclasses can override client_protocol_class and server_protocol_class.
+ """
+
+ request_encoder = None
+ response_decoder = None
+ server_protocol_class = None
+ client_protocol_class = None
+
+ def make_client_protocol_and_output(self, input_bytes=None):
+ """
+ :returns: a Request
+ """
+ # This is very similar to
+ # bzrlib.smart.client._SmartClient._build_client_protocol
+ # XXX: make this use _SmartClient!
+ if input_bytes is None:
+ input = StringIO()
+ else:
+ input = StringIO(input_bytes)
+ output = StringIO()
+ client_medium = medium.SmartSimplePipesClientMedium(
+ input, output, 'base')
+ request = client_medium.get_request()
+ if self.client_protocol_class is not None:
+ client_protocol = self.client_protocol_class(request)
+ return client_protocol, client_protocol, output
+ else:
+ self.assertNotEqual(None, self.request_encoder)
+ self.assertNotEqual(None, self.response_decoder)
+ requester = self.request_encoder(request)
+ response_handler = message.ConventionalResponseHandler()
+ response_protocol = self.response_decoder(
+ response_handler, expect_version_marker=True)
+ response_handler.setProtoAndMediumRequest(
+ response_protocol, request)
+ return requester, response_handler, output
+
+ def make_client_protocol(self, input_bytes=None):
+ result = self.make_client_protocol_and_output(input_bytes=input_bytes)
+ requester, response_handler, output = result
+ return requester, response_handler
+
+ def make_server_protocol(self):
+ out_stream = StringIO()
+ smart_protocol = self.server_protocol_class(None, out_stream.write)
+ return smart_protocol, out_stream
+
+ def setUp(self):
+ super(TestSmartProtocol, self).setUp()
+ self.response_marker = getattr(
+ self.client_protocol_class, 'response_marker', None)
+ self.request_marker = getattr(
+ self.client_protocol_class, 'request_marker', None)
+
+ def assertOffsetSerialisation(self, expected_offsets, expected_serialised,
+ requester):
+ """Check that smart (de)serialises offsets as expected.
+
+ We check both serialisation and deserialisation at the same time
+ to ensure that the round tripping cannot skew: both directions should
+ be as expected.
+
+ :param expected_offsets: a readv offset list.
+ :param expected_seralised: an expected serial form of the offsets.
+ """
+ # XXX: '_deserialise_offsets' should be a method of the
+ # SmartServerRequestProtocol in future.
+ readv_cmd = vfs.ReadvRequest(None, '/')
+ offsets = readv_cmd._deserialise_offsets(expected_serialised)
+ self.assertEqual(expected_offsets, offsets)
+ serialised = requester._serialise_offsets(offsets)
+ self.assertEqual(expected_serialised, serialised)
+
+ def build_protocol_waiting_for_body(self):
+ smart_protocol, out_stream = self.make_server_protocol()
+ smart_protocol._has_dispatched = True
+ smart_protocol.request = _mod_request.SmartServerRequestHandler(
+ None, _mod_request.request_handlers, '/')
+ # GZ 2010-08-10: Cycle with closure affects 4 tests
+ class FakeCommand(_mod_request.SmartServerRequest):
+ def do_body(self_cmd, body_bytes):
+ self.end_received = True
+ self.assertEqual('abcdefg', body_bytes)
+ return _mod_request.SuccessfulSmartServerResponse(('ok', ))
+ smart_protocol.request._command = FakeCommand(None)
+ # Call accept_bytes to make sure that internal state like _body_decoder
+ # is initialised. This test should probably be given a clearer
+ # interface to work with that will not cause this inconsistency.
+ # -- Andrew Bennetts, 2006-09-28
+ smart_protocol.accept_bytes('')
+ return smart_protocol
+
+ def assertServerToClientEncoding(self, expected_bytes, expected_tuple,
+ input_tuples):
+ """Assert that each input_tuple serialises as expected_bytes, and the
+ bytes deserialise as expected_tuple.
+ """
+ # check the encoding of the server for all input_tuples matches
+ # expected bytes
+ for input_tuple in input_tuples:
+ server_protocol, server_output = self.make_server_protocol()
+ server_protocol._send_response(
+ _mod_request.SuccessfulSmartServerResponse(input_tuple))
+ self.assertEqual(expected_bytes, server_output.getvalue())
+ # check the decoding of the client smart_protocol from expected_bytes:
+ requester, response_handler = self.make_client_protocol(expected_bytes)
+ requester.call('foo')
+ self.assertEqual(expected_tuple, response_handler.read_response_tuple())
+
+
+class CommonSmartProtocolTestMixin(object):
+
+ def test_connection_closed_reporting(self):
+ requester, response_handler = self.make_client_protocol()
+ requester.call('hello')
+ ex = self.assertRaises(errors.ConnectionReset,
+ response_handler.read_response_tuple)
+ self.assertEqual("Connection closed: "
+ "Unexpected end of message. Please check connectivity "
+ "and permissions, and report a bug if problems persist. ",
+ str(ex))
+
+ def test_server_offset_serialisation(self):
+ """The Smart protocol serialises offsets as a comma and \n string.
+
+ We check a number of boundary cases are as expected: empty, one offset,
+ one with the order of reads not increasing (an out of order read), and
+ one that should coalesce.
+ """
+ requester, response_handler = self.make_client_protocol()
+ self.assertOffsetSerialisation([], '', requester)
+ self.assertOffsetSerialisation([(1,2)], '1,2', requester)
+ self.assertOffsetSerialisation([(10,40), (0,5)], '10,40\n0,5',
+ requester)
+ self.assertOffsetSerialisation([(1,2), (3,4), (100, 200)],
+ '1,2\n3,4\n100,200', requester)
+
+
+class TestVersionOneFeaturesInProtocolOne(
+ TestSmartProtocol, CommonSmartProtocolTestMixin):
+ """Tests for version one smart protocol features as implemeted by version
+ one."""
+
+ client_protocol_class = protocol.SmartClientRequestProtocolOne
+ server_protocol_class = protocol.SmartServerRequestProtocolOne
+
+ def test_construct_version_one_server_protocol(self):
+ smart_protocol = protocol.SmartServerRequestProtocolOne(None, None)
+ self.assertEqual('', smart_protocol.unused_data)
+ self.assertEqual('', smart_protocol.in_buffer)
+ self.assertFalse(smart_protocol._has_dispatched)
+ self.assertEqual(1, smart_protocol.next_read_size())
+
+ def test_construct_version_one_client_protocol(self):
+ # we can construct a client protocol from a client medium request
+ output = StringIO()
+ client_medium = medium.SmartSimplePipesClientMedium(
+ None, output, 'base')
+ request = client_medium.get_request()
+ client_protocol = protocol.SmartClientRequestProtocolOne(request)
+
+ def test_accept_bytes_of_bad_request_to_protocol(self):
+ out_stream = StringIO()
+ smart_protocol = protocol.SmartServerRequestProtocolOne(
+ None, out_stream.write)
+ smart_protocol.accept_bytes('abc')
+ self.assertEqual('abc', smart_protocol.in_buffer)
+ smart_protocol.accept_bytes('\n')
+ self.assertEqual(
+ "error\x01Generic bzr smart protocol error: bad request 'abc'\n",
+ out_stream.getvalue())
+ self.assertTrue(smart_protocol._has_dispatched)
+ self.assertEqual(0, smart_protocol.next_read_size())
+
+ def test_accept_body_bytes_to_protocol(self):
+ protocol = self.build_protocol_waiting_for_body()
+ self.assertEqual(6, protocol.next_read_size())
+ protocol.accept_bytes('7\nabc')
+ self.assertEqual(9, protocol.next_read_size())
+ protocol.accept_bytes('defgd')
+ protocol.accept_bytes('one\n')
+ self.assertEqual(0, protocol.next_read_size())
+ self.assertTrue(self.end_received)
+
+ def test_accept_request_and_body_all_at_once(self):
+ self.overrideEnv('BZR_NO_SMART_VFS', None)
+ mem_transport = memory.MemoryTransport()
+ mem_transport.put_bytes('foo', 'abcdefghij')
+ out_stream = StringIO()
+ smart_protocol = protocol.SmartServerRequestProtocolOne(mem_transport,
+ out_stream.write)
+ smart_protocol.accept_bytes('readv\x01foo\n3\n3,3done\n')
+ self.assertEqual(0, smart_protocol.next_read_size())
+ self.assertEqual('readv\n3\ndefdone\n', out_stream.getvalue())
+ self.assertEqual('', smart_protocol.unused_data)
+ self.assertEqual('', smart_protocol.in_buffer)
+
+ def test_accept_excess_bytes_are_preserved(self):
+ out_stream = StringIO()
+ smart_protocol = protocol.SmartServerRequestProtocolOne(
+ None, out_stream.write)
+ smart_protocol.accept_bytes('hello\nhello\n')
+ self.assertEqual("ok\x012\n", out_stream.getvalue())
+ self.assertEqual("hello\n", smart_protocol.unused_data)
+ self.assertEqual("", smart_protocol.in_buffer)
+
+ def test_accept_excess_bytes_after_body(self):
+ protocol = self.build_protocol_waiting_for_body()
+ protocol.accept_bytes('7\nabcdefgdone\nX')
+ self.assertTrue(self.end_received)
+ self.assertEqual("X", protocol.unused_data)
+ self.assertEqual("", protocol.in_buffer)
+ protocol.accept_bytes('Y')
+ self.assertEqual("XY", protocol.unused_data)
+ self.assertEqual("", protocol.in_buffer)
+
+ def test_accept_excess_bytes_after_dispatch(self):
+ out_stream = StringIO()
+ smart_protocol = protocol.SmartServerRequestProtocolOne(
+ None, out_stream.write)
+ smart_protocol.accept_bytes('hello\n')
+ self.assertEqual("ok\x012\n", out_stream.getvalue())
+ smart_protocol.accept_bytes('hel')
+ self.assertEqual("hel", smart_protocol.unused_data)
+ smart_protocol.accept_bytes('lo\n')
+ self.assertEqual("hello\n", smart_protocol.unused_data)
+ self.assertEqual("", smart_protocol.in_buffer)
+
+ def test__send_response_sets_finished_reading(self):
+ smart_protocol = protocol.SmartServerRequestProtocolOne(
+ None, lambda x: None)
+ self.assertEqual(1, smart_protocol.next_read_size())
+ smart_protocol._send_response(
+ _mod_request.SuccessfulSmartServerResponse(('x',)))
+ self.assertEqual(0, smart_protocol.next_read_size())
+
+ def test__send_response_errors_with_base_response(self):
+ """Ensure that only the Successful/Failed subclasses are used."""
+ smart_protocol = protocol.SmartServerRequestProtocolOne(
+ None, lambda x: None)
+ self.assertRaises(AttributeError, smart_protocol._send_response,
+ _mod_request.SmartServerResponse(('x',)))
+
+ def test_query_version(self):
+ """query_version on a SmartClientProtocolOne should return a number.
+
+ The protocol provides the query_version because the domain level clients
+ may all need to be able to probe for capabilities.
+ """
+ # What we really want to test here is that SmartClientProtocolOne calls
+ # accept_bytes(tuple_based_encoding_of_hello) and reads and parses the
+ # response of tuple-encoded (ok, 1). Also, separately we should test
+ # the error if the response is a non-understood version.
+ input = StringIO('ok\x012\n')
+ output = StringIO()
+ client_medium = medium.SmartSimplePipesClientMedium(
+ input, output, 'base')
+ request = client_medium.get_request()
+ smart_protocol = protocol.SmartClientRequestProtocolOne(request)
+ self.assertEqual(2, smart_protocol.query_version())
+
+ def test_client_call_empty_response(self):
+ # protocol.call() can get back an empty tuple as a response. This occurs
+ # when the parsed line is an empty line, and results in a tuple with
+ # one element - an empty string.
+ self.assertServerToClientEncoding('\n', ('', ), [(), ('', )])
+
+ def test_client_call_three_element_response(self):
+ # protocol.call() can get back tuples of other lengths. A three element
+ # tuple should be unpacked as three strings.
+ self.assertServerToClientEncoding('a\x01b\x0134\n', ('a', 'b', '34'),
+ [('a', 'b', '34')])
+
+ def test_client_call_with_body_bytes_uploads(self):
+ # protocol.call_with_body_bytes should length-prefix the bytes onto the
+ # wire.
+ expected_bytes = "foo\n7\nabcdefgdone\n"
+ input = StringIO("\n")
+ output = StringIO()
+ client_medium = medium.SmartSimplePipesClientMedium(
+ input, output, 'base')
+ request = client_medium.get_request()
+ smart_protocol = protocol.SmartClientRequestProtocolOne(request)
+ smart_protocol.call_with_body_bytes(('foo', ), "abcdefg")
+ self.assertEqual(expected_bytes, output.getvalue())
+
+ def test_client_call_with_body_readv_array(self):
+ # protocol.call_with_upload should encode the readv array and then
+ # length-prefix the bytes onto the wire.
+ expected_bytes = "foo\n7\n1,2\n5,6done\n"
+ input = StringIO("\n")
+ output = StringIO()
+ client_medium = medium.SmartSimplePipesClientMedium(
+ input, output, 'base')
+ request = client_medium.get_request()
+ smart_protocol = protocol.SmartClientRequestProtocolOne(request)
+ smart_protocol.call_with_body_readv_array(('foo', ), [(1,2),(5,6)])
+ self.assertEqual(expected_bytes, output.getvalue())
+
+ def _test_client_read_response_tuple_raises_UnknownSmartMethod(self,
+ server_bytes):
+ input = StringIO(server_bytes)
+ output = StringIO()
+ client_medium = medium.SmartSimplePipesClientMedium(
+ input, output, 'base')
+ request = client_medium.get_request()
+ smart_protocol = protocol.SmartClientRequestProtocolOne(request)
+ smart_protocol.call('foo')
+ self.assertRaises(
+ errors.UnknownSmartMethod, smart_protocol.read_response_tuple)
+ # The request has been finished. There is no body to read, and
+ # attempts to read one will fail.
+ self.assertRaises(
+ errors.ReadingCompleted, smart_protocol.read_body_bytes)
+
+ def test_client_read_response_tuple_raises_UnknownSmartMethod(self):
+ """read_response_tuple raises UnknownSmartMethod if the response says
+ the server did not recognise the request.
+ """
+ server_bytes = (
+ "error\x01Generic bzr smart protocol error: bad request 'foo'\n")
+ self._test_client_read_response_tuple_raises_UnknownSmartMethod(
+ server_bytes)
+
+ def test_client_read_response_tuple_raises_UnknownSmartMethod_0_11(self):
+ """read_response_tuple also raises UnknownSmartMethod if the response
+ from a bzr 0.11 says the server did not recognise the request.
+
+ (bzr 0.11 sends a slightly different error message to later versions.)
+ """
+ server_bytes = (
+ "error\x01Generic bzr smart protocol error: bad request u'foo'\n")
+ self._test_client_read_response_tuple_raises_UnknownSmartMethod(
+ server_bytes)
+
+ def test_client_read_body_bytes_all(self):
+ # read_body_bytes should decode the body bytes from the wire into
+ # a response.
+ expected_bytes = "1234567"
+ server_bytes = "ok\n7\n1234567done\n"
+ input = StringIO(server_bytes)
+ output = StringIO()
+ client_medium = medium.SmartSimplePipesClientMedium(
+ input, output, 'base')
+ request = client_medium.get_request()
+ smart_protocol = protocol.SmartClientRequestProtocolOne(request)
+ smart_protocol.call('foo')
+ smart_protocol.read_response_tuple(True)
+ self.assertEqual(expected_bytes, smart_protocol.read_body_bytes())
+
+ def test_client_read_body_bytes_incremental(self):
+ # test reading a few bytes at a time from the body
+ # XXX: possibly we should test dribbling the bytes into the stringio
+ # to make the state machine work harder: however, as we use the
+ # LengthPrefixedBodyDecoder that is already well tested - we can skip
+ # that.
+ expected_bytes = "1234567"
+ server_bytes = "ok\n7\n1234567done\n"
+ input = StringIO(server_bytes)
+ output = StringIO()
+ client_medium = medium.SmartSimplePipesClientMedium(
+ input, output, 'base')
+ request = client_medium.get_request()
+ smart_protocol = protocol.SmartClientRequestProtocolOne(request)
+ smart_protocol.call('foo')
+ smart_protocol.read_response_tuple(True)
+ self.assertEqual(expected_bytes[0:2], smart_protocol.read_body_bytes(2))
+ self.assertEqual(expected_bytes[2:4], smart_protocol.read_body_bytes(2))
+ self.assertEqual(expected_bytes[4:6], smart_protocol.read_body_bytes(2))
+ self.assertEqual(expected_bytes[6], smart_protocol.read_body_bytes())
+
+ def test_client_cancel_read_body_does_not_eat_body_bytes(self):
+ # cancelling the expected body needs to finish the request, but not
+ # read any more bytes.
+ expected_bytes = "1234567"
+ server_bytes = "ok\n7\n1234567done\n"
+ input = StringIO(server_bytes)
+ output = StringIO()
+ client_medium = medium.SmartSimplePipesClientMedium(
+ input, output, 'base')
+ request = client_medium.get_request()
+ smart_protocol = protocol.SmartClientRequestProtocolOne(request)
+ smart_protocol.call('foo')
+ smart_protocol.read_response_tuple(True)
+ smart_protocol.cancel_read_body()
+ self.assertEqual(3, input.tell())
+ self.assertRaises(
+ errors.ReadingCompleted, smart_protocol.read_body_bytes)
+
+ def test_client_read_body_bytes_interrupted_connection(self):
+ server_bytes = "ok\n999\nincomplete body"
+ input = StringIO(server_bytes)
+ output = StringIO()
+ client_medium = medium.SmartSimplePipesClientMedium(
+ input, output, 'base')
+ request = client_medium.get_request()
+ smart_protocol = self.client_protocol_class(request)
+ smart_protocol.call('foo')
+ smart_protocol.read_response_tuple(True)
+ self.assertRaises(
+ errors.ConnectionReset, smart_protocol.read_body_bytes)
+
+
+class TestVersionOneFeaturesInProtocolTwo(
+ TestSmartProtocol, CommonSmartProtocolTestMixin):
+ """Tests for version one smart protocol features as implemeted by version
+ two.
+ """
+
+ client_protocol_class = protocol.SmartClientRequestProtocolTwo
+ server_protocol_class = protocol.SmartServerRequestProtocolTwo
+
+ def test_construct_version_two_server_protocol(self):
+ smart_protocol = protocol.SmartServerRequestProtocolTwo(None, None)
+ self.assertEqual('', smart_protocol.unused_data)
+ self.assertEqual('', smart_protocol.in_buffer)
+ self.assertFalse(smart_protocol._has_dispatched)
+ self.assertEqual(1, smart_protocol.next_read_size())
+
+ def test_construct_version_two_client_protocol(self):
+ # we can construct a client protocol from a client medium request
+ output = StringIO()
+ client_medium = medium.SmartSimplePipesClientMedium(
+ None, output, 'base')
+ request = client_medium.get_request()
+ client_protocol = protocol.SmartClientRequestProtocolTwo(request)
+
+ def test_accept_bytes_of_bad_request_to_protocol(self):
+ out_stream = StringIO()
+ smart_protocol = self.server_protocol_class(None, out_stream.write)
+ smart_protocol.accept_bytes('abc')
+ self.assertEqual('abc', smart_protocol.in_buffer)
+ smart_protocol.accept_bytes('\n')
+ self.assertEqual(
+ self.response_marker +
+ "failed\nerror\x01Generic bzr smart protocol error: bad request 'abc'\n",
+ out_stream.getvalue())
+ self.assertTrue(smart_protocol._has_dispatched)
+ self.assertEqual(0, smart_protocol.next_read_size())
+
+ def test_accept_body_bytes_to_protocol(self):
+ protocol = self.build_protocol_waiting_for_body()
+ self.assertEqual(6, protocol.next_read_size())
+ protocol.accept_bytes('7\nabc')
+ self.assertEqual(9, protocol.next_read_size())
+ protocol.accept_bytes('defgd')
+ protocol.accept_bytes('one\n')
+ self.assertEqual(0, protocol.next_read_size())
+ self.assertTrue(self.end_received)
+
+ def test_accept_request_and_body_all_at_once(self):
+ self.overrideEnv('BZR_NO_SMART_VFS', None)
+ mem_transport = memory.MemoryTransport()
+ mem_transport.put_bytes('foo', 'abcdefghij')
+ out_stream = StringIO()
+ smart_protocol = self.server_protocol_class(
+ mem_transport, out_stream.write)
+ smart_protocol.accept_bytes('readv\x01foo\n3\n3,3done\n')
+ self.assertEqual(0, smart_protocol.next_read_size())
+ self.assertEqual(self.response_marker +
+ 'success\nreadv\n3\ndefdone\n',
+ out_stream.getvalue())
+ self.assertEqual('', smart_protocol.unused_data)
+ self.assertEqual('', smart_protocol.in_buffer)
+
+ def test_accept_excess_bytes_are_preserved(self):
+ out_stream = StringIO()
+ smart_protocol = self.server_protocol_class(None, out_stream.write)
+ smart_protocol.accept_bytes('hello\nhello\n')
+ self.assertEqual(self.response_marker + "success\nok\x012\n",
+ out_stream.getvalue())
+ self.assertEqual("hello\n", smart_protocol.unused_data)
+ self.assertEqual("", smart_protocol.in_buffer)
+
+ def test_accept_excess_bytes_after_body(self):
+ # The excess bytes look like the start of another request.
+ server_protocol = self.build_protocol_waiting_for_body()
+ server_protocol.accept_bytes('7\nabcdefgdone\n' + self.response_marker)
+ self.assertTrue(self.end_received)
+ self.assertEqual(self.response_marker,
+ server_protocol.unused_data)
+ self.assertEqual("", server_protocol.in_buffer)
+ server_protocol.accept_bytes('Y')
+ self.assertEqual(self.response_marker + "Y",
+ server_protocol.unused_data)
+ self.assertEqual("", server_protocol.in_buffer)
+
+ def test_accept_excess_bytes_after_dispatch(self):
+ out_stream = StringIO()
+ smart_protocol = self.server_protocol_class(None, out_stream.write)
+ smart_protocol.accept_bytes('hello\n')
+ self.assertEqual(self.response_marker + "success\nok\x012\n",
+ out_stream.getvalue())
+ smart_protocol.accept_bytes(self.request_marker + 'hel')
+ self.assertEqual(self.request_marker + "hel",
+ smart_protocol.unused_data)
+ smart_protocol.accept_bytes('lo\n')
+ self.assertEqual(self.request_marker + "hello\n",
+ smart_protocol.unused_data)
+ self.assertEqual("", smart_protocol.in_buffer)
+
+ def test__send_response_sets_finished_reading(self):
+ smart_protocol = self.server_protocol_class(None, lambda x: None)
+ self.assertEqual(1, smart_protocol.next_read_size())
+ smart_protocol._send_response(
+ _mod_request.SuccessfulSmartServerResponse(('x',)))
+ self.assertEqual(0, smart_protocol.next_read_size())
+
+ def test__send_response_errors_with_base_response(self):
+ """Ensure that only the Successful/Failed subclasses are used."""
+ smart_protocol = self.server_protocol_class(None, lambda x: None)
+ self.assertRaises(AttributeError, smart_protocol._send_response,
+ _mod_request.SmartServerResponse(('x',)))
+
+ def test_query_version(self):
+ """query_version on a SmartClientProtocolTwo should return a number.
+
+ The protocol provides the query_version because the domain level clients
+ may all need to be able to probe for capabilities.
+ """
+ # What we really want to test here is that SmartClientProtocolTwo calls
+ # accept_bytes(tuple_based_encoding_of_hello) and reads and parses the
+ # response of tuple-encoded (ok, 1). Also, separately we should test
+ # the error if the response is a non-understood version.
+ input = StringIO(self.response_marker + 'success\nok\x012\n')
+ output = StringIO()
+ client_medium = medium.SmartSimplePipesClientMedium(
+ input, output, 'base')
+ request = client_medium.get_request()
+ smart_protocol = self.client_protocol_class(request)
+ self.assertEqual(2, smart_protocol.query_version())
+
+ def test_client_call_empty_response(self):
+ # protocol.call() can get back an empty tuple as a response. This occurs
+ # when the parsed line is an empty line, and results in a tuple with
+ # one element - an empty string.
+ self.assertServerToClientEncoding(
+ self.response_marker + 'success\n\n', ('', ), [(), ('', )])
+
+ def test_client_call_three_element_response(self):
+ # protocol.call() can get back tuples of other lengths. A three element
+ # tuple should be unpacked as three strings.
+ self.assertServerToClientEncoding(
+ self.response_marker + 'success\na\x01b\x0134\n',
+ ('a', 'b', '34'),
+ [('a', 'b', '34')])
+
+ def test_client_call_with_body_bytes_uploads(self):
+ # protocol.call_with_body_bytes should length-prefix the bytes onto the
+ # wire.
+ expected_bytes = self.request_marker + "foo\n7\nabcdefgdone\n"
+ input = StringIO("\n")
+ output = StringIO()
+ client_medium = medium.SmartSimplePipesClientMedium(
+ input, output, 'base')
+ request = client_medium.get_request()
+ smart_protocol = self.client_protocol_class(request)
+ smart_protocol.call_with_body_bytes(('foo', ), "abcdefg")
+ self.assertEqual(expected_bytes, output.getvalue())
+
+ def test_client_call_with_body_readv_array(self):
+ # protocol.call_with_upload should encode the readv array and then
+ # length-prefix the bytes onto the wire.
+ expected_bytes = self.request_marker + "foo\n7\n1,2\n5,6done\n"
+ input = StringIO("\n")
+ output = StringIO()
+ client_medium = medium.SmartSimplePipesClientMedium(
+ input, output, 'base')
+ request = client_medium.get_request()
+ smart_protocol = self.client_protocol_class(request)
+ smart_protocol.call_with_body_readv_array(('foo', ), [(1,2),(5,6)])
+ self.assertEqual(expected_bytes, output.getvalue())
+
+ def test_client_read_body_bytes_all(self):
+ # read_body_bytes should decode the body bytes from the wire into
+ # a response.
+ expected_bytes = "1234567"
+ server_bytes = (self.response_marker +
+ "success\nok\n7\n1234567done\n")
+ input = StringIO(server_bytes)
+ output = StringIO()
+ client_medium = medium.SmartSimplePipesClientMedium(
+ input, output, 'base')
+ request = client_medium.get_request()
+ smart_protocol = self.client_protocol_class(request)
+ smart_protocol.call('foo')
+ smart_protocol.read_response_tuple(True)
+ self.assertEqual(expected_bytes, smart_protocol.read_body_bytes())
+
+ def test_client_read_body_bytes_incremental(self):
+ # test reading a few bytes at a time from the body
+ # XXX: possibly we should test dribbling the bytes into the stringio
+ # to make the state machine work harder: however, as we use the
+ # LengthPrefixedBodyDecoder that is already well tested - we can skip
+ # that.
+ expected_bytes = "1234567"
+ server_bytes = self.response_marker + "success\nok\n7\n1234567done\n"
+ input = StringIO(server_bytes)
+ output = StringIO()
+ client_medium = medium.SmartSimplePipesClientMedium(
+ input, output, 'base')
+ request = client_medium.get_request()
+ smart_protocol = self.client_protocol_class(request)
+ smart_protocol.call('foo')
+ smart_protocol.read_response_tuple(True)
+ self.assertEqual(expected_bytes[0:2], smart_protocol.read_body_bytes(2))
+ self.assertEqual(expected_bytes[2:4], smart_protocol.read_body_bytes(2))
+ self.assertEqual(expected_bytes[4:6], smart_protocol.read_body_bytes(2))
+ self.assertEqual(expected_bytes[6], smart_protocol.read_body_bytes())
+
+ def test_client_cancel_read_body_does_not_eat_body_bytes(self):
+ # cancelling the expected body needs to finish the request, but not
+ # read any more bytes.
+ server_bytes = self.response_marker + "success\nok\n7\n1234567done\n"
+ input = StringIO(server_bytes)
+ output = StringIO()
+ client_medium = medium.SmartSimplePipesClientMedium(
+ input, output, 'base')
+ request = client_medium.get_request()
+ smart_protocol = self.client_protocol_class(request)
+ smart_protocol.call('foo')
+ smart_protocol.read_response_tuple(True)
+ smart_protocol.cancel_read_body()
+ self.assertEqual(len(self.response_marker + 'success\nok\n'),
+ input.tell())
+ self.assertRaises(
+ errors.ReadingCompleted, smart_protocol.read_body_bytes)
+
+ def test_client_read_body_bytes_interrupted_connection(self):
+ server_bytes = (self.response_marker +
+ "success\nok\n999\nincomplete body")
+ input = StringIO(server_bytes)
+ output = StringIO()
+ client_medium = medium.SmartSimplePipesClientMedium(
+ input, output, 'base')
+ request = client_medium.get_request()
+ smart_protocol = self.client_protocol_class(request)
+ smart_protocol.call('foo')
+ smart_protocol.read_response_tuple(True)
+ self.assertRaises(
+ errors.ConnectionReset, smart_protocol.read_body_bytes)
+
+
+class TestSmartProtocolTwoSpecificsMixin(object):
+
+ def assertBodyStreamSerialisation(self, expected_serialisation,
+ body_stream):
+ """Assert that body_stream is serialised as expected_serialisation."""
+ out_stream = StringIO()
+ protocol._send_stream(body_stream, out_stream.write)
+ self.assertEqual(expected_serialisation, out_stream.getvalue())
+
+ def assertBodyStreamRoundTrips(self, body_stream):
+ """Assert that body_stream is the same after being serialised and
+ deserialised.
+ """
+ out_stream = StringIO()
+ protocol._send_stream(body_stream, out_stream.write)
+ decoder = protocol.ChunkedBodyDecoder()
+ decoder.accept_bytes(out_stream.getvalue())
+ decoded_stream = list(iter(decoder.read_next_chunk, None))
+ self.assertEqual(body_stream, decoded_stream)
+
+ def test_body_stream_serialisation_empty(self):
+ """A body_stream with no bytes can be serialised."""
+ self.assertBodyStreamSerialisation('chunked\nEND\n', [])
+ self.assertBodyStreamRoundTrips([])
+
+ def test_body_stream_serialisation(self):
+ stream = ['chunk one', 'chunk two', 'chunk three']
+ self.assertBodyStreamSerialisation(
+ 'chunked\n' + '9\nchunk one' + '9\nchunk two' + 'b\nchunk three' +
+ 'END\n',
+ stream)
+ self.assertBodyStreamRoundTrips(stream)
+
+ def test_body_stream_with_empty_element_serialisation(self):
+ """A body stream can include ''.
+
+ The empty string can be transmitted like any other string.
+ """
+ stream = ['', 'chunk']
+ self.assertBodyStreamSerialisation(
+ 'chunked\n' + '0\n' + '5\nchunk' + 'END\n', stream)
+ self.assertBodyStreamRoundTrips(stream)
+
+ def test_body_stream_error_serialistion(self):
+ stream = ['first chunk',
+ _mod_request.FailedSmartServerResponse(
+ ('FailureName', 'failure arg'))]
+ expected_bytes = (
+ 'chunked\n' + 'b\nfirst chunk' +
+ 'ERR\n' + 'b\nFailureName' + 'b\nfailure arg' +
+ 'END\n')
+ self.assertBodyStreamSerialisation(expected_bytes, stream)
+ self.assertBodyStreamRoundTrips(stream)
+
+ def test__send_response_includes_failure_marker(self):
+ """FailedSmartServerResponse have 'failed\n' after the version."""
+ out_stream = StringIO()
+ smart_protocol = protocol.SmartServerRequestProtocolTwo(
+ None, out_stream.write)
+ smart_protocol._send_response(
+ _mod_request.FailedSmartServerResponse(('x',)))
+ self.assertEqual(protocol.RESPONSE_VERSION_TWO + 'failed\nx\n',
+ out_stream.getvalue())
+
+ def test__send_response_includes_success_marker(self):
+ """SuccessfulSmartServerResponse have 'success\n' after the version."""
+ out_stream = StringIO()
+ smart_protocol = protocol.SmartServerRequestProtocolTwo(
+ None, out_stream.write)
+ smart_protocol._send_response(
+ _mod_request.SuccessfulSmartServerResponse(('x',)))
+ self.assertEqual(protocol.RESPONSE_VERSION_TWO + 'success\nx\n',
+ out_stream.getvalue())
+
+ def test__send_response_with_body_stream_sets_finished_reading(self):
+ smart_protocol = protocol.SmartServerRequestProtocolTwo(
+ None, lambda x: None)
+ self.assertEqual(1, smart_protocol.next_read_size())
+ smart_protocol._send_response(
+ _mod_request.SuccessfulSmartServerResponse(('x',), body_stream=[]))
+ self.assertEqual(0, smart_protocol.next_read_size())
+
+ def test_streamed_body_bytes(self):
+ body_header = 'chunked\n'
+ two_body_chunks = "4\n1234" + "3\n567"
+ body_terminator = "END\n"
+ server_bytes = (protocol.RESPONSE_VERSION_TWO +
+ "success\nok\n" + body_header + two_body_chunks +
+ body_terminator)
+ input = StringIO(server_bytes)
+ output = StringIO()
+ client_medium = medium.SmartSimplePipesClientMedium(
+ input, output, 'base')
+ request = client_medium.get_request()
+ smart_protocol = protocol.SmartClientRequestProtocolTwo(request)
+ smart_protocol.call('foo')
+ smart_protocol.read_response_tuple(True)
+ stream = smart_protocol.read_streamed_body()
+ self.assertEqual(['1234', '567'], list(stream))
+
+ def test_read_streamed_body_error(self):
+ """When a stream is interrupted by an error..."""
+ body_header = 'chunked\n'
+ a_body_chunk = '4\naaaa'
+ err_signal = 'ERR\n'
+ err_chunks = 'a\nerror arg1' + '4\narg2'
+ finish = 'END\n'
+ body = body_header + a_body_chunk + err_signal + err_chunks + finish
+ server_bytes = (protocol.RESPONSE_VERSION_TWO +
+ "success\nok\n" + body)
+ input = StringIO(server_bytes)
+ output = StringIO()
+ client_medium = medium.SmartSimplePipesClientMedium(
+ input, output, 'base')
+ smart_request = client_medium.get_request()
+ smart_protocol = protocol.SmartClientRequestProtocolTwo(smart_request)
+ smart_protocol.call('foo')
+ smart_protocol.read_response_tuple(True)
+ expected_chunks = [
+ 'aaaa',
+ _mod_request.FailedSmartServerResponse(('error arg1', 'arg2'))]
+ stream = smart_protocol.read_streamed_body()
+ self.assertEqual(expected_chunks, list(stream))
+
+ def test_streamed_body_bytes_interrupted_connection(self):
+ body_header = 'chunked\n'
+ incomplete_body_chunk = "9999\nincomplete chunk"
+ server_bytes = (protocol.RESPONSE_VERSION_TWO +
+ "success\nok\n" + body_header + incomplete_body_chunk)
+ input = StringIO(server_bytes)
+ output = StringIO()
+ client_medium = medium.SmartSimplePipesClientMedium(
+ input, output, 'base')
+ request = client_medium.get_request()
+ smart_protocol = protocol.SmartClientRequestProtocolTwo(request)
+ smart_protocol.call('foo')
+ smart_protocol.read_response_tuple(True)
+ stream = smart_protocol.read_streamed_body()
+ self.assertRaises(errors.ConnectionReset, stream.next)
+
+ def test_client_read_response_tuple_sets_response_status(self):
+ server_bytes = protocol.RESPONSE_VERSION_TWO + "success\nok\n"
+ input = StringIO(server_bytes)
+ output = StringIO()
+ client_medium = medium.SmartSimplePipesClientMedium(
+ input, output, 'base')
+ request = client_medium.get_request()
+ smart_protocol = protocol.SmartClientRequestProtocolTwo(request)
+ smart_protocol.call('foo')
+ smart_protocol.read_response_tuple(False)
+ self.assertEqual(True, smart_protocol.response_status)
+
+ def test_client_read_response_tuple_raises_UnknownSmartMethod(self):
+ """read_response_tuple raises UnknownSmartMethod if the response says
+ the server did not recognise the request.
+ """
+ server_bytes = (
+ protocol.RESPONSE_VERSION_TWO +
+ "failed\n" +
+ "error\x01Generic bzr smart protocol error: bad request 'foo'\n")
+ input = StringIO(server_bytes)
+ output = StringIO()
+ client_medium = medium.SmartSimplePipesClientMedium(
+ input, output, 'base')
+ request = client_medium.get_request()
+ smart_protocol = protocol.SmartClientRequestProtocolTwo(request)
+ smart_protocol.call('foo')
+ self.assertRaises(
+ errors.UnknownSmartMethod, smart_protocol.read_response_tuple)
+ # The request has been finished. There is no body to read, and
+ # attempts to read one will fail.
+ self.assertRaises(
+ errors.ReadingCompleted, smart_protocol.read_body_bytes)
+
+
+class TestSmartProtocolTwoSpecifics(
+ TestSmartProtocol, TestSmartProtocolTwoSpecificsMixin):
+ """Tests for aspects of smart protocol version two that are unique to
+ version two.
+
+ Thus tests involving body streams and success/failure markers belong here.
+ """
+
+ client_protocol_class = protocol.SmartClientRequestProtocolTwo
+ server_protocol_class = protocol.SmartServerRequestProtocolTwo
+
+
+class TestVersionOneFeaturesInProtocolThree(
+ TestSmartProtocol, CommonSmartProtocolTestMixin):
+ """Tests for version one smart protocol features as implemented by version
+ three.
+ """
+
+ request_encoder = protocol.ProtocolThreeRequester
+ response_decoder = protocol.ProtocolThreeDecoder
+ # build_server_protocol_three is a function, so we can't set it as a class
+ # attribute directly, because then Python will assume it is actually a
+ # method. So we make server_protocol_class be a static method, rather than
+ # simply doing:
+ # "server_protocol_class = protocol.build_server_protocol_three".
+ server_protocol_class = staticmethod(protocol.build_server_protocol_three)
+
+ def setUp(self):
+ super(TestVersionOneFeaturesInProtocolThree, self).setUp()
+ self.response_marker = protocol.MESSAGE_VERSION_THREE
+ self.request_marker = protocol.MESSAGE_VERSION_THREE
+
+ def test_construct_version_three_server_protocol(self):
+ smart_protocol = protocol.ProtocolThreeDecoder(None)
+ self.assertEqual('', smart_protocol.unused_data)
+ self.assertEqual([], smart_protocol._in_buffer_list)
+ self.assertEqual(0, smart_protocol._in_buffer_len)
+ self.assertFalse(smart_protocol._has_dispatched)
+ # The protocol starts by expecting four bytes, a length prefix for the
+ # headers.
+ self.assertEqual(4, smart_protocol.next_read_size())
+
+
+class LoggingMessageHandler(object):
+
+ def __init__(self):
+ self.event_log = []
+
+ def _log(self, *args):
+ self.event_log.append(args)
+
+ def headers_received(self, headers):
+ self._log('headers', headers)
+
+ def protocol_error(self, exception):
+ self._log('protocol_error', exception)
+
+ def byte_part_received(self, byte):
+ self._log('byte', byte)
+
+ def bytes_part_received(self, bytes):
+ self._log('bytes', bytes)
+
+ def structure_part_received(self, structure):
+ self._log('structure', structure)
+
+ def end_received(self):
+ self._log('end')
+
+
+class TestProtocolThree(TestSmartProtocol):
+ """Tests for v3 of the server-side protocol."""
+
+ request_encoder = protocol.ProtocolThreeRequester
+ response_decoder = protocol.ProtocolThreeDecoder
+ server_protocol_class = protocol.ProtocolThreeDecoder
+
+ def test_trivial_request(self):
+ """Smoke test for the simplest possible v3 request: empty headers, no
+ message parts.
+ """
+ output = StringIO()
+ headers = '\0\0\0\x02de' # length-prefixed, bencoded empty dict
+ end = 'e'
+ request_bytes = headers + end
+ smart_protocol = self.server_protocol_class(LoggingMessageHandler())
+ smart_protocol.accept_bytes(request_bytes)
+ self.assertEqual(0, smart_protocol.next_read_size())
+ self.assertEqual('', smart_protocol.unused_data)
+
+ def test_repeated_excess(self):
+ """Repeated calls to accept_bytes after the message end has been parsed
+ accumlates the bytes in the unused_data attribute.
+ """
+ output = StringIO()
+ headers = '\0\0\0\x02de' # length-prefixed, bencoded empty dict
+ end = 'e'
+ request_bytes = headers + end
+ smart_protocol = self.server_protocol_class(LoggingMessageHandler())
+ smart_protocol.accept_bytes(request_bytes)
+ self.assertEqual('', smart_protocol.unused_data)
+ smart_protocol.accept_bytes('aaa')
+ self.assertEqual('aaa', smart_protocol.unused_data)
+ smart_protocol.accept_bytes('bbb')
+ self.assertEqual('aaabbb', smart_protocol.unused_data)
+ self.assertEqual(0, smart_protocol.next_read_size())
+
+ def make_protocol_expecting_message_part(self):
+ headers = '\0\0\0\x02de' # length-prefixed, bencoded empty dict
+ message_handler = LoggingMessageHandler()
+ smart_protocol = self.server_protocol_class(message_handler)
+ smart_protocol.accept_bytes(headers)
+ # Clear the event log
+ del message_handler.event_log[:]
+ return smart_protocol, message_handler.event_log
+
+ def test_decode_one_byte(self):
+ """The protocol can decode a 'one byte' message part."""
+ smart_protocol, event_log = self.make_protocol_expecting_message_part()
+ smart_protocol.accept_bytes('ox')
+ self.assertEqual([('byte', 'x')], event_log)
+
+ def test_decode_bytes(self):
+ """The protocol can decode a 'bytes' message part."""
+ smart_protocol, event_log = self.make_protocol_expecting_message_part()
+ smart_protocol.accept_bytes(
+ 'b' # message part kind
+ '\0\0\0\x07' # length prefix
+ 'payload' # payload
+ )
+ self.assertEqual([('bytes', 'payload')], event_log)
+
+ def test_decode_structure(self):
+ """The protocol can decode a 'structure' message part."""
+ smart_protocol, event_log = self.make_protocol_expecting_message_part()
+ smart_protocol.accept_bytes(
+ 's' # message part kind
+ '\0\0\0\x07' # length prefix
+ 'l3:ARGe' # ['ARG']
+ )
+ self.assertEqual([('structure', ('ARG',))], event_log)
+
+ def test_decode_multiple_bytes(self):
+ """The protocol can decode a multiple 'bytes' message parts."""
+ smart_protocol, event_log = self.make_protocol_expecting_message_part()
+ smart_protocol.accept_bytes(
+ 'b' # message part kind
+ '\0\0\0\x05' # length prefix
+ 'first' # payload
+ 'b' # message part kind
+ '\0\0\0\x06'
+ 'second'
+ )
+ self.assertEqual(
+ [('bytes', 'first'), ('bytes', 'second')], event_log)
+
+
+class TestConventionalResponseHandlerBodyStream(tests.TestCase):
+
+ def make_response_handler(self, response_bytes):
+ from bzrlib.smart.message import ConventionalResponseHandler
+ response_handler = ConventionalResponseHandler()
+ protocol_decoder = protocol.ProtocolThreeDecoder(response_handler)
+ # put decoder in desired state (waiting for message parts)
+ protocol_decoder.state_accept = protocol_decoder._state_accept_expecting_message_part
+ output = StringIO()
+ client_medium = medium.SmartSimplePipesClientMedium(
+ StringIO(response_bytes), output, 'base')
+ medium_request = client_medium.get_request()
+ medium_request.finished_writing()
+ response_handler.setProtoAndMediumRequest(
+ protocol_decoder, medium_request)
+ return response_handler
+
+ def test_interrupted_by_error(self):
+ response_handler = self.make_response_handler(interrupted_body_stream)
+ stream = response_handler.read_streamed_body()
+ self.assertEqual('aaa', stream.next())
+ self.assertEqual('bbb', stream.next())
+ exc = self.assertRaises(errors.ErrorFromSmartServer, stream.next)
+ self.assertEqual(('error', 'Exception', 'Boom!'), exc.error_tuple)
+
+ def test_interrupted_by_connection_lost(self):
+ interrupted_body_stream = (
+ 'oS' # successful response
+ 's\0\0\0\x02le' # empty args
+ 'b\0\0\xff\xffincomplete chunk')
+ response_handler = self.make_response_handler(interrupted_body_stream)
+ stream = response_handler.read_streamed_body()
+ self.assertRaises(errors.ConnectionReset, stream.next)
+
+ def test_read_body_bytes_interrupted_by_connection_lost(self):
+ interrupted_body_stream = (
+ 'oS' # successful response
+ 's\0\0\0\x02le' # empty args
+ 'b\0\0\xff\xffincomplete chunk')
+ response_handler = self.make_response_handler(interrupted_body_stream)
+ self.assertRaises(
+ errors.ConnectionReset, response_handler.read_body_bytes)
+
+ def test_multiple_bytes_parts(self):
+ multiple_bytes_parts = (
+ 'oS' # successful response
+ 's\0\0\0\x02le' # empty args
+ 'b\0\0\0\x0bSome bytes\n' # some bytes
+ 'b\0\0\0\x0aMore bytes' # more bytes
+ 'e' # message end
+ )
+ response_handler = self.make_response_handler(multiple_bytes_parts)
+ self.assertEqual(
+ 'Some bytes\nMore bytes', response_handler.read_body_bytes())
+ response_handler = self.make_response_handler(multiple_bytes_parts)
+ self.assertEqual(
+ ['Some bytes\n', 'More bytes'],
+ list(response_handler.read_streamed_body()))
+
+
+class FakeResponder(object):
+
+ response_sent = False
+
+ def send_error(self, exc):
+ raise exc
+
+ def send_response(self, response):
+ pass
+
+
+class TestConventionalRequestHandlerBodyStream(tests.TestCase):
+ """Tests for ConventionalRequestHandler's handling of request bodies."""
+
+ def make_request_handler(self, request_bytes):
+ """Make a ConventionalRequestHandler for the given bytes using test
+ doubles for the request_handler and the responder.
+ """
+ from bzrlib.smart.message import ConventionalRequestHandler
+ request_handler = InstrumentedRequestHandler()
+ request_handler.response = _mod_request.SuccessfulSmartServerResponse(('arg', 'arg'))
+ responder = FakeResponder()
+ message_handler = ConventionalRequestHandler(request_handler, responder)
+ protocol_decoder = protocol.ProtocolThreeDecoder(message_handler)
+ # put decoder in desired state (waiting for message parts)
+ protocol_decoder.state_accept = protocol_decoder._state_accept_expecting_message_part
+ protocol_decoder.accept_bytes(request_bytes)
+ return request_handler
+
+ def test_multiple_bytes_parts(self):
+ """Each bytes part triggers a call to the request_handler's
+ accept_body method.
+ """
+ multiple_bytes_parts = (
+ 's\0\0\0\x07l3:fooe' # args
+ 'b\0\0\0\x0bSome bytes\n' # some bytes
+ 'b\0\0\0\x0aMore bytes' # more bytes
+ 'e' # message end
+ )
+ request_handler = self.make_request_handler(multiple_bytes_parts)
+ accept_body_calls = [
+ call_info[1] for call_info in request_handler.calls
+ if call_info[0] == 'accept_body']
+ self.assertEqual(
+ ['Some bytes\n', 'More bytes'], accept_body_calls)
+
+ def test_error_flag_after_body(self):
+ body_then_error = (
+ 's\0\0\0\x07l3:fooe' # request args
+ 'b\0\0\0\x0bSome bytes\n' # some bytes
+ 'b\0\0\0\x0aMore bytes' # more bytes
+ 'oE' # error flag
+ 's\0\0\0\x07l3:bare' # error args
+ 'e' # message end
+ )
+ request_handler = self.make_request_handler(body_then_error)
+ self.assertEqual(
+ [('post_body_error_received', ('bar',)), ('end_received',)],
+ request_handler.calls[-2:])
+
+
+class TestMessageHandlerErrors(tests.TestCase):
+ """Tests for v3 that unrecognised (but well-formed) requests/responses are
+ still fully read off the wire, so that subsequent requests/responses on the
+ same medium can be decoded.
+ """
+
+ def test_non_conventional_request(self):
+ """ConventionalRequestHandler (the default message handler on the
+ server side) will reject an unconventional message, but still consume
+ all the bytes of that message and signal when it has done so.
+
+ This is what allows a server to continue to accept requests after the
+ client sends a completely unrecognised request.
+ """
+ # Define an invalid request (but one that is a well-formed message).
+ # This particular invalid request not only lacks the mandatory
+ # verb+args tuple, it has a single-byte part, which is forbidden. In
+ # fact it has that part twice, to trigger multiple errors.
+ invalid_request = (
+ protocol.MESSAGE_VERSION_THREE + # protocol version marker
+ '\0\0\0\x02de' + # empty headers
+ 'oX' + # a single byte part: 'X'. ConventionalRequestHandler will
+ # error at this part.
+ 'oX' + # and again.
+ 'e' # end of message
+ )
+
+ to_server = StringIO(invalid_request)
+ from_server = StringIO()
+ transport = memory.MemoryTransport('memory:///')
+ server = medium.SmartServerPipeStreamMedium(
+ to_server, from_server, transport, timeout=4.0)
+ proto = server._build_protocol()
+ message_handler = proto.message_handler
+ server._serve_one_request(proto)
+ # All the bytes have been read from the medium...
+ self.assertEqual('', to_server.read())
+ # ...and the protocol decoder has consumed all the bytes, and has
+ # finished reading.
+ self.assertEqual('', proto.unused_data)
+ self.assertEqual(0, proto.next_read_size())
+
+
+class InstrumentedRequestHandler(object):
+ """Test Double of SmartServerRequestHandler."""
+
+ def __init__(self):
+ self.calls = []
+ self.finished_reading = False
+
+ def no_body_received(self):
+ self.calls.append(('no_body_received',))
+
+ def end_received(self):
+ self.calls.append(('end_received',))
+ self.finished_reading = True
+
+ def args_received(self, args):
+ self.calls.append(('args_received', args))
+
+ def accept_body(self, bytes):
+ self.calls.append(('accept_body', bytes))
+
+ def end_of_body(self):
+ self.calls.append(('end_of_body',))
+ self.finished_reading = True
+
+ def post_body_error_received(self, error_args):
+ self.calls.append(('post_body_error_received', error_args))
+
+
+class StubRequest(object):
+
+ def finished_reading(self):
+ pass
+
+
+class TestClientDecodingProtocolThree(TestSmartProtocol):
+ """Tests for v3 of the client-side protocol decoding."""
+
+ def make_logging_response_decoder(self):
+ """Make v3 response decoder using a test response handler."""
+ response_handler = LoggingMessageHandler()
+ decoder = protocol.ProtocolThreeDecoder(response_handler)
+ return decoder, response_handler
+
+ def make_conventional_response_decoder(self):
+ """Make v3 response decoder using a conventional response handler."""
+ response_handler = message.ConventionalResponseHandler()
+ decoder = protocol.ProtocolThreeDecoder(response_handler)
+ response_handler.setProtoAndMediumRequest(decoder, StubRequest())
+ return decoder, response_handler
+
+ def test_trivial_response_decoding(self):
+ """Smoke test for the simplest possible v3 response: empty headers,
+ status byte, empty args, no body.
+ """
+ headers = '\0\0\0\x02de' # length-prefixed, bencoded empty dict
+ response_status = 'oS' # success
+ args = 's\0\0\0\x02le' # length-prefixed, bencoded empty list
+ end = 'e' # end marker
+ message_bytes = headers + response_status + args + end
+ decoder, response_handler = self.make_logging_response_decoder()
+ decoder.accept_bytes(message_bytes)
+ # The protocol decoder has finished, and consumed all bytes
+ self.assertEqual(0, decoder.next_read_size())
+ self.assertEqual('', decoder.unused_data)
+ # The message handler has been invoked with all the parts of the
+ # trivial response: empty headers, status byte, no args, end.
+ self.assertEqual(
+ [('headers', {}), ('byte', 'S'), ('structure', ()), ('end',)],
+ response_handler.event_log)
+
+ def test_incomplete_message(self):
+ """A decoder will keep signalling that it needs more bytes via
+ next_read_size() != 0 until it has seen a complete message, regardless
+ which state it is in.
+ """
+ # Define a simple response that uses all possible message parts.
+ headers = '\0\0\0\x02de' # length-prefixed, bencoded empty dict
+ response_status = 'oS' # success
+ args = 's\0\0\0\x02le' # length-prefixed, bencoded empty list
+ body = 'b\0\0\0\x04BODY' # a body: 'BODY'
+ end = 'e' # end marker
+ simple_response = headers + response_status + args + body + end
+ # Feed the request to the decoder one byte at a time.
+ decoder, response_handler = self.make_logging_response_decoder()
+ for byte in simple_response:
+ self.assertNotEqual(0, decoder.next_read_size())
+ decoder.accept_bytes(byte)
+ # Now the response is complete
+ self.assertEqual(0, decoder.next_read_size())
+
+ def test_read_response_tuple_raises_UnknownSmartMethod(self):
+ """read_response_tuple raises UnknownSmartMethod if the server replied
+ with 'UnknownMethod'.
+ """
+ headers = '\0\0\0\x02de' # length-prefixed, bencoded empty dict
+ response_status = 'oE' # error flag
+ # args: ('UnknownMethod', 'method-name')
+ args = 's\0\0\0\x20l13:UnknownMethod11:method-namee'
+ end = 'e' # end marker
+ message_bytes = headers + response_status + args + end
+ decoder, response_handler = self.make_conventional_response_decoder()
+ decoder.accept_bytes(message_bytes)
+ error = self.assertRaises(
+ errors.UnknownSmartMethod, response_handler.read_response_tuple)
+ self.assertEqual('method-name', error.verb)
+
+ def test_read_response_tuple_error(self):
+ """If the response has an error, it is raised as an exception."""
+ headers = '\0\0\0\x02de' # length-prefixed, bencoded empty dict
+ response_status = 'oE' # error
+ args = 's\0\0\0\x1al9:first arg10:second arge' # two args
+ end = 'e' # end marker
+ message_bytes = headers + response_status + args + end
+ decoder, response_handler = self.make_conventional_response_decoder()
+ decoder.accept_bytes(message_bytes)
+ error = self.assertRaises(
+ errors.ErrorFromSmartServer, response_handler.read_response_tuple)
+ self.assertEqual(('first arg', 'second arg'), error.error_tuple)
+
+
+class TestClientEncodingProtocolThree(TestSmartProtocol):
+
+ request_encoder = protocol.ProtocolThreeRequester
+ response_decoder = protocol.ProtocolThreeDecoder
+ server_protocol_class = protocol.ProtocolThreeDecoder
+
+ def make_client_encoder_and_output(self):
+ result = self.make_client_protocol_and_output()
+ requester, response_handler, output = result
+ return requester, output
+
+ def test_call_smoke_test(self):
+ """A smoke test for ProtocolThreeRequester.call.
+
+ This test checks that a particular simple invocation of call emits the
+ correct bytes for that invocation.
+ """
+ requester, output = self.make_client_encoder_and_output()
+ requester.set_headers({'header name': 'header value'})
+ requester.call('one arg')
+ self.assertEquals(
+ 'bzr message 3 (bzr 1.6)\n' # protocol version
+ '\x00\x00\x00\x1fd11:header name12:header valuee' # headers
+ 's\x00\x00\x00\x0bl7:one arge' # args
+ 'e', # end
+ output.getvalue())
+
+ def test_call_with_body_bytes_smoke_test(self):
+ """A smoke test for ProtocolThreeRequester.call_with_body_bytes.
+
+ This test checks that a particular simple invocation of
+ call_with_body_bytes emits the correct bytes for that invocation.
+ """
+ requester, output = self.make_client_encoder_and_output()
+ requester.set_headers({'header name': 'header value'})
+ requester.call_with_body_bytes(('one arg',), 'body bytes')
+ self.assertEquals(
+ 'bzr message 3 (bzr 1.6)\n' # protocol version
+ '\x00\x00\x00\x1fd11:header name12:header valuee' # headers
+ 's\x00\x00\x00\x0bl7:one arge' # args
+ 'b' # there is a prefixed body
+ '\x00\x00\x00\nbody bytes' # the prefixed body
+ 'e', # end
+ output.getvalue())
+
+ def test_call_writes_just_once(self):
+ """A bodyless request is written to the medium all at once."""
+ medium_request = StubMediumRequest()
+ encoder = protocol.ProtocolThreeRequester(medium_request)
+ encoder.call('arg1', 'arg2', 'arg3')
+ self.assertEqual(
+ ['accept_bytes', 'finished_writing'], medium_request.calls)
+
+ def test_call_with_body_bytes_writes_just_once(self):
+ """A request with body bytes is written to the medium all at once."""
+ medium_request = StubMediumRequest()
+ encoder = protocol.ProtocolThreeRequester(medium_request)
+ encoder.call_with_body_bytes(('arg', 'arg'), 'body bytes')
+ self.assertEqual(
+ ['accept_bytes', 'finished_writing'], medium_request.calls)
+
+ def test_call_with_body_stream_smoke_test(self):
+ """A smoke test for ProtocolThreeRequester.call_with_body_stream.
+
+ This test checks that a particular simple invocation of
+ call_with_body_stream emits the correct bytes for that invocation.
+ """
+ requester, output = self.make_client_encoder_and_output()
+ requester.set_headers({'header name': 'header value'})
+ stream = ['chunk 1', 'chunk two']
+ requester.call_with_body_stream(('one arg',), stream)
+ self.assertEquals(
+ 'bzr message 3 (bzr 1.6)\n' # protocol version
+ '\x00\x00\x00\x1fd11:header name12:header valuee' # headers
+ 's\x00\x00\x00\x0bl7:one arge' # args
+ 'b\x00\x00\x00\x07chunk 1' # a prefixed body chunk
+ 'b\x00\x00\x00\x09chunk two' # a prefixed body chunk
+ 'e', # end
+ output.getvalue())
+
+ def test_call_with_body_stream_empty_stream(self):
+ """call_with_body_stream with an empty stream."""
+ requester, output = self.make_client_encoder_and_output()
+ requester.set_headers({})
+ stream = []
+ requester.call_with_body_stream(('one arg',), stream)
+ self.assertEquals(
+ 'bzr message 3 (bzr 1.6)\n' # protocol version
+ '\x00\x00\x00\x02de' # headers
+ 's\x00\x00\x00\x0bl7:one arge' # args
+ # no body chunks
+ 'e', # end
+ output.getvalue())
+
+ def test_call_with_body_stream_error(self):
+ """call_with_body_stream will abort the streamed body with an
+ error if the stream raises an error during iteration.
+
+ The resulting request will still be a complete message.
+ """
+ requester, output = self.make_client_encoder_and_output()
+ requester.set_headers({})
+ def stream_that_fails():
+ yield 'aaa'
+ yield 'bbb'
+ raise Exception('Boom!')
+ self.assertRaises(Exception, requester.call_with_body_stream,
+ ('one arg',), stream_that_fails())
+ self.assertEquals(
+ 'bzr message 3 (bzr 1.6)\n' # protocol version
+ '\x00\x00\x00\x02de' # headers
+ 's\x00\x00\x00\x0bl7:one arge' # args
+ 'b\x00\x00\x00\x03aaa' # body
+ 'b\x00\x00\x00\x03bbb' # more body
+ 'oE' # error flag
+ 's\x00\x00\x00\x09l5:errore' # error args: ('error',)
+ 'e', # end
+ output.getvalue())
+
+ def test_records_start_of_body_stream(self):
+ requester, output = self.make_client_encoder_and_output()
+ requester.set_headers({})
+ in_stream = [False]
+ def stream_checker():
+ self.assertTrue(requester.body_stream_started)
+ in_stream[0] = True
+ yield 'content'
+ flush_called = []
+ orig_flush = requester.flush
+ def tracked_flush():
+ flush_called.append(in_stream[0])
+ if in_stream[0]:
+ self.assertTrue(requester.body_stream_started)
+ else:
+ self.assertFalse(requester.body_stream_started)
+ return orig_flush()
+ requester.flush = tracked_flush
+ requester.call_with_body_stream(('one arg',), stream_checker())
+ self.assertEqual(
+ 'bzr message 3 (bzr 1.6)\n' # protocol version
+ '\x00\x00\x00\x02de' # headers
+ 's\x00\x00\x00\x0bl7:one arge' # args
+ 'b\x00\x00\x00\x07content' # body
+ 'e', output.getvalue())
+ self.assertEqual([False, True, True], flush_called)
+
+
+class StubMediumRequest(object):
+ """A stub medium request that tracks the number of times accept_bytes is
+ called.
+ """
+
+ def __init__(self):
+ self.calls = []
+ self._medium = 'dummy medium'
+
+ def accept_bytes(self, bytes):
+ self.calls.append('accept_bytes')
+
+ def finished_writing(self):
+ self.calls.append('finished_writing')
+
+
+interrupted_body_stream = (
+ 'oS' # status flag (success)
+ 's\x00\x00\x00\x08l4:argse' # args struct ('args,')
+ 'b\x00\x00\x00\x03aaa' # body part ('aaa')
+ 'b\x00\x00\x00\x03bbb' # body part ('bbb')
+ 'oE' # status flag (error)
+ # err struct ('error', 'Exception', 'Boom!')
+ 's\x00\x00\x00\x1bl5:error9:Exception5:Boom!e'
+ 'e' # EOM
+ )
+
+
+class TestResponseEncodingProtocolThree(tests.TestCase):
+
+ def make_response_encoder(self):
+ out_stream = StringIO()
+ response_encoder = protocol.ProtocolThreeResponder(out_stream.write)
+ return response_encoder, out_stream
+
+ def test_send_error_unknown_method(self):
+ encoder, out_stream = self.make_response_encoder()
+ encoder.send_error(errors.UnknownSmartMethod('method name'))
+ # Use assertEndsWith so that we don't compare the header, which varies
+ # by bzrlib.__version__.
+ self.assertEndsWith(
+ out_stream.getvalue(),
+ # error status
+ 'oE' +
+ # tuple: 'UnknownMethod', 'method name'
+ 's\x00\x00\x00\x20l13:UnknownMethod11:method namee'
+ # end of message
+ 'e')
+
+ def test_send_broken_body_stream(self):
+ encoder, out_stream = self.make_response_encoder()
+ encoder._headers = {}
+ def stream_that_fails():
+ yield 'aaa'
+ yield 'bbb'
+ raise Exception('Boom!')
+ response = _mod_request.SuccessfulSmartServerResponse(
+ ('args',), body_stream=stream_that_fails())
+ encoder.send_response(response)
+ expected_response = (
+ 'bzr message 3 (bzr 1.6)\n' # protocol marker
+ '\x00\x00\x00\x02de' # headers dict (empty)
+ + interrupted_body_stream)
+ self.assertEqual(expected_response, out_stream.getvalue())
+
+
+class TestResponseEncoderBufferingProtocolThree(tests.TestCase):
+ """Tests for buffering of responses.
+
+ We want to avoid doing many small writes when one would do, to avoid
+ unnecessary network overhead.
+ """
+
+ def setUp(self):
+ tests.TestCase.setUp(self)
+ self.writes = []
+ self.responder = protocol.ProtocolThreeResponder(self.writes.append)
+
+ def assertWriteCount(self, expected_count):
+ # self.writes can be quite large; don't show the whole thing
+ self.assertEqual(
+ expected_count, len(self.writes),
+ "Too many writes: %d, expected %d" % (len(self.writes), expected_count))
+
+ def test_send_error_writes_just_once(self):
+ """An error response is written to the medium all at once."""
+ self.responder.send_error(Exception('An exception string.'))
+ self.assertWriteCount(1)
+
+ def test_send_response_writes_just_once(self):
+ """A normal response with no body is written to the medium all at once.
+ """
+ response = _mod_request.SuccessfulSmartServerResponse(('arg', 'arg'))
+ self.responder.send_response(response)
+ self.assertWriteCount(1)
+
+ def test_send_response_with_body_writes_just_once(self):
+ """A normal response with a monolithic body is written to the medium
+ all at once.
+ """
+ response = _mod_request.SuccessfulSmartServerResponse(
+ ('arg', 'arg'), body='body bytes')
+ self.responder.send_response(response)
+ self.assertWriteCount(1)
+
+ def test_send_response_with_body_stream_buffers_writes(self):
+ """A normal response with a stream body writes to the medium once."""
+ # Construct a response with stream with 2 chunks in it.
+ response = _mod_request.SuccessfulSmartServerResponse(
+ ('arg', 'arg'), body_stream=['chunk1', 'chunk2'])
+ self.responder.send_response(response)
+ # Per the discussion in bug 590638 we flush once after the header and
+ # then once after each chunk
+ self.assertWriteCount(3)
+
+
+class TestSmartClientUnicode(tests.TestCase):
+ """_SmartClient tests for unicode arguments.
+
+ Unicode arguments to call_with_body_bytes are not correct (remote method
+ names, arguments, and bodies must all be expressed as byte strings), but
+ _SmartClient should gracefully reject them, rather than getting into a
+ broken state that prevents future correct calls from working. That is, it
+ should be possible to issue more requests on the medium afterwards, rather
+ than allowing one bad call to call_with_body_bytes to cause later calls to
+ mysteriously fail with TooManyConcurrentRequests.
+ """
+
+ def assertCallDoesNotBreakMedium(self, method, args, body):
+ """Call a medium with the given method, args and body, then assert that
+ the medium is left in a sane state, i.e. is capable of allowing further
+ requests.
+ """
+ input = StringIO("\n")
+ output = StringIO()
+ client_medium = medium.SmartSimplePipesClientMedium(
+ input, output, 'ignored base')
+ smart_client = client._SmartClient(client_medium)
+ self.assertRaises(TypeError,
+ smart_client.call_with_body_bytes, method, args, body)
+ self.assertEqual("", output.getvalue())
+ self.assertEqual(None, client_medium._current_request)
+
+ def test_call_with_body_bytes_unicode_method(self):
+ self.assertCallDoesNotBreakMedium(u'method', ('args',), 'body')
+
+ def test_call_with_body_bytes_unicode_args(self):
+ self.assertCallDoesNotBreakMedium('method', (u'args',), 'body')
+ self.assertCallDoesNotBreakMedium('method', ('arg1', u'arg2'), 'body')
+
+ def test_call_with_body_bytes_unicode_body(self):
+ self.assertCallDoesNotBreakMedium('method', ('args',), u'body')
+
+
+class MockMedium(medium.SmartClientMedium):
+ """A mock medium that can be used to test _SmartClient.
+
+ It can be given a series of requests to expect (and responses it should
+ return for them). It can also be told when the client is expected to
+ disconnect a medium. Expectations must be satisfied in the order they are
+ given, or else an AssertionError will be raised.
+
+ Typical use looks like::
+
+ medium = MockMedium()
+ medium.expect_request(...)
+ medium.expect_request(...)
+ medium.expect_request(...)
+ """
+
+ def __init__(self):
+ super(MockMedium, self).__init__('dummy base')
+ self._mock_request = _MockMediumRequest(self)
+ self._expected_events = []
+
+ def expect_request(self, request_bytes, response_bytes,
+ allow_partial_read=False):
+ """Expect 'request_bytes' to be sent, and reply with 'response_bytes'.
+
+ No assumption is made about how many times accept_bytes should be
+ called to send the request. Similarly, no assumption is made about how
+ many times read_bytes/read_line are called by protocol code to read a
+ response. e.g.::
+
+ request.accept_bytes('ab')
+ request.accept_bytes('cd')
+ request.finished_writing()
+
+ and::
+
+ request.accept_bytes('abcd')
+ request.finished_writing()
+
+ Will both satisfy ``medium.expect_request('abcd', ...)``. Thus tests
+ using this should not break due to irrelevant changes in protocol
+ implementations.
+
+ :param allow_partial_read: if True, no assertion is raised if a
+ response is not fully read. Setting this is useful when the client
+ is expected to disconnect without needing to read the complete
+ response. Default is False.
+ """
+ self._expected_events.append(('send request', request_bytes))
+ if allow_partial_read:
+ self._expected_events.append(
+ ('read response (partial)', response_bytes))
+ else:
+ self._expected_events.append(('read response', response_bytes))
+
+ def expect_disconnect(self):
+ """Expect the client to call ``medium.disconnect()``."""
+ self._expected_events.append('disconnect')
+
+ def _assertEvent(self, observed_event):
+ """Raise AssertionError unless observed_event matches the next expected
+ event.
+
+ :seealso: expect_request
+ :seealso: expect_disconnect
+ """
+ try:
+ expected_event = self._expected_events.pop(0)
+ except IndexError:
+ raise AssertionError(
+ 'Mock medium observed event %r, but no more events expected'
+ % (observed_event,))
+ if expected_event[0] == 'read response (partial)':
+ if observed_event[0] != 'read response':
+ raise AssertionError(
+ 'Mock medium observed event %r, but expected event %r'
+ % (observed_event, expected_event))
+ elif observed_event != expected_event:
+ raise AssertionError(
+ 'Mock medium observed event %r, but expected event %r'
+ % (observed_event, expected_event))
+ if self._expected_events:
+ next_event = self._expected_events[0]
+ if next_event[0].startswith('read response'):
+ self._mock_request._response = next_event[1]
+
+ def get_request(self):
+ return self._mock_request
+
+ def disconnect(self):
+ if self._mock_request._read_bytes:
+ self._assertEvent(('read response', self._mock_request._read_bytes))
+ self._mock_request._read_bytes = ''
+ self._assertEvent('disconnect')
+
+
+class _MockMediumRequest(object):
+ """A mock ClientMediumRequest used by MockMedium."""
+
+ def __init__(self, mock_medium):
+ self._medium = mock_medium
+ self._written_bytes = ''
+ self._read_bytes = ''
+ self._response = None
+
+ def accept_bytes(self, bytes):
+ self._written_bytes += bytes
+
+ def finished_writing(self):
+ self._medium._assertEvent(('send request', self._written_bytes))
+ self._written_bytes = ''
+
+ def finished_reading(self):
+ self._medium._assertEvent(('read response', self._read_bytes))
+ self._read_bytes = ''
+
+ def read_bytes(self, size):
+ resp = self._response
+ bytes, resp = resp[:size], resp[size:]
+ self._response = resp
+ self._read_bytes += bytes
+ return bytes
+
+ def read_line(self):
+ resp = self._response
+ try:
+ line, resp = resp.split('\n', 1)
+ line += '\n'
+ except ValueError:
+ line, resp = resp, ''
+ self._response = resp
+ self._read_bytes += line
+ return line
+
+
+class Test_SmartClientVersionDetection(tests.TestCase):
+ """Tests for _SmartClient's automatic protocol version detection.
+
+ On the first remote call, _SmartClient will keep retrying the request with
+ different protocol versions until it finds one that works.
+ """
+
+ def test_version_three_server(self):
+ """With a protocol 3 server, only one request is needed."""
+ medium = MockMedium()
+ smart_client = client._SmartClient(medium, headers={})
+ message_start = protocol.MESSAGE_VERSION_THREE + '\x00\x00\x00\x02de'
+ medium.expect_request(
+ message_start +
+ 's\x00\x00\x00\x1el11:method-name5:arg 15:arg 2ee',
+ message_start + 's\0\0\0\x13l14:response valueee')
+ result = smart_client.call('method-name', 'arg 1', 'arg 2')
+ # The call succeeded without raising any exceptions from the mock
+ # medium, and the smart_client returns the response from the server.
+ self.assertEqual(('response value',), result)
+ self.assertEqual([], medium._expected_events)
+ # Also, the v3 works then the server should be assumed to support RPCs
+ # introduced in 1.6.
+ self.assertFalse(medium._is_remote_before((1, 6)))
+
+ def test_version_two_server(self):
+ """If the server only speaks protocol 2, the client will first try
+ version 3, then fallback to protocol 2.
+
+ Further, _SmartClient caches the detection, so future requests will all
+ use protocol 2 immediately.
+ """
+ medium = MockMedium()
+ smart_client = client._SmartClient(medium, headers={})
+ # First the client should send a v3 request, but the server will reply
+ # with a v2 error.
+ medium.expect_request(
+ 'bzr message 3 (bzr 1.6)\n\x00\x00\x00\x02de' +
+ 's\x00\x00\x00\x1el11:method-name5:arg 15:arg 2ee',
+ 'bzr response 2\nfailed\n\n')
+ # So then the client should disconnect to reset the connection, because
+ # the client needs to assume the server cannot read any further
+ # requests off the original connection.
+ medium.expect_disconnect()
+ # The client should then retry the original request in v2
+ medium.expect_request(
+ 'bzr request 2\nmethod-name\x01arg 1\x01arg 2\n',
+ 'bzr response 2\nsuccess\nresponse value\n')
+ result = smart_client.call('method-name', 'arg 1', 'arg 2')
+ # The smart_client object will return the result of the successful
+ # query.
+ self.assertEqual(('response value',), result)
+
+ # Now try another request, and this time the client will just use
+ # protocol 2. (i.e. the autodetection won't be repeated)
+ medium.expect_request(
+ 'bzr request 2\nanother-method\n',
+ 'bzr response 2\nsuccess\nanother response\n')
+ result = smart_client.call('another-method')
+ self.assertEqual(('another response',), result)
+ self.assertEqual([], medium._expected_events)
+
+ # Also, because v3 is not supported, the client medium should assume
+ # that RPCs introduced in 1.6 aren't supported either.
+ self.assertTrue(medium._is_remote_before((1, 6)))
+
+ def test_unknown_version(self):
+ """If the server does not use any known (or at least supported)
+ protocol version, a SmartProtocolError is raised.
+ """
+ medium = MockMedium()
+ smart_client = client._SmartClient(medium, headers={})
+ unknown_protocol_bytes = 'Unknown protocol!'
+ # The client will try v3 and v2 before eventually giving up.
+ medium.expect_request(
+ 'bzr message 3 (bzr 1.6)\n\x00\x00\x00\x02de' +
+ 's\x00\x00\x00\x1el11:method-name5:arg 15:arg 2ee',
+ unknown_protocol_bytes)
+ medium.expect_disconnect()
+ medium.expect_request(
+ 'bzr request 2\nmethod-name\x01arg 1\x01arg 2\n',
+ unknown_protocol_bytes)
+ medium.expect_disconnect()
+ self.assertRaises(
+ errors.SmartProtocolError,
+ smart_client.call, 'method-name', 'arg 1', 'arg 2')
+ self.assertEqual([], medium._expected_events)
+
+ def test_first_response_is_error(self):
+ """If the server replies with an error, then the version detection
+ should be complete.
+
+ This test is very similar to test_version_two_server, but catches a bug
+ we had in the case where the first reply was an error response.
+ """
+ medium = MockMedium()
+ smart_client = client._SmartClient(medium, headers={})
+ message_start = protocol.MESSAGE_VERSION_THREE + '\x00\x00\x00\x02de'
+ # Issue a request that gets an error reply in a non-default protocol
+ # version.
+ medium.expect_request(
+ message_start +
+ 's\x00\x00\x00\x10l11:method-nameee',
+ 'bzr response 2\nfailed\n\n')
+ medium.expect_disconnect()
+ medium.expect_request(
+ 'bzr request 2\nmethod-name\n',
+ 'bzr response 2\nfailed\nFooBarError\n')
+ err = self.assertRaises(
+ errors.ErrorFromSmartServer,
+ smart_client.call, 'method-name')
+ self.assertEqual(('FooBarError',), err.error_tuple)
+ # Now the medium should have remembered the protocol version, so
+ # subsequent requests will use the remembered version immediately.
+ medium.expect_request(
+ 'bzr request 2\nmethod-name\n',
+ 'bzr response 2\nsuccess\nresponse value\n')
+ result = smart_client.call('method-name')
+ self.assertEqual(('response value',), result)
+ self.assertEqual([], medium._expected_events)
+
+
+class Test_SmartClient(tests.TestCase):
+
+ def test_call_default_headers(self):
+ """ProtocolThreeRequester.call by default sends a 'Software
+ version' header.
+ """
+ smart_client = client._SmartClient('dummy medium')
+ self.assertEqual(
+ bzrlib.__version__, smart_client._headers['Software version'])
+ # XXX: need a test that smart_client._headers is passed to the request
+ # encoder.
+
+
+class Test_SmartClientRequest(tests.TestCase):
+
+ def make_client_with_failing_medium(self, fail_at_write=True, response=''):
+ response_io = StringIO(response)
+ output = StringIO()
+ vendor = FirstRejectedStringIOSSHVendor(response_io, output,
+ fail_at_write=fail_at_write)
+ ssh_params = medium.SSHParams('a host', 'a port', 'a user', 'a pass')
+ client_medium = medium.SmartSSHClientMedium('base', ssh_params, vendor)
+ smart_client = client._SmartClient(client_medium, headers={})
+ return output, vendor, smart_client
+
+ def make_response(self, args, body=None, body_stream=None):
+ response_io = StringIO()
+ response = _mod_request.SuccessfulSmartServerResponse(args, body=body,
+ body_stream=body_stream)
+ responder = protocol.ProtocolThreeResponder(response_io.write)
+ responder.send_response(response)
+ return response_io.getvalue()
+
+ def test__call_doesnt_retry_append(self):
+ response = self.make_response(('appended', '8'))
+ output, vendor, smart_client = self.make_client_with_failing_medium(
+ fail_at_write=False, response=response)
+ smart_request = client._SmartClientRequest(smart_client, 'append',
+ ('foo', ''), body='content\n')
+ self.assertRaises(errors.ConnectionReset, smart_request._call, 3)
+
+ def test__call_retries_get_bytes(self):
+ response = self.make_response(('ok',), 'content\n')
+ output, vendor, smart_client = self.make_client_with_failing_medium(
+ fail_at_write=False, response=response)
+ smart_request = client._SmartClientRequest(smart_client, 'get',
+ ('foo',))
+ response, response_handler = smart_request._call(3)
+ self.assertEqual(('ok',), response)
+ self.assertEqual('content\n', response_handler.read_body_bytes())
+
+ def test__call_noretry_get_bytes(self):
+ debug.debug_flags.add('noretry')
+ response = self.make_response(('ok',), 'content\n')
+ output, vendor, smart_client = self.make_client_with_failing_medium(
+ fail_at_write=False, response=response)
+ smart_request = client._SmartClientRequest(smart_client, 'get',
+ ('foo',))
+ self.assertRaises(errors.ConnectionReset, smart_request._call, 3)
+
+ def test__send_no_retry_pipes(self):
+ client_read, server_write = create_file_pipes()
+ server_read, client_write = create_file_pipes()
+ client_medium = medium.SmartSimplePipesClientMedium(client_read,
+ client_write, base='/')
+ smart_client = client._SmartClient(client_medium)
+ smart_request = client._SmartClientRequest(smart_client,
+ 'hello', ())
+ # Close the server side
+ server_read.close()
+ encoder, response_handler = smart_request._construct_protocol(3)
+ self.assertRaises(errors.ConnectionReset,
+ smart_request._send_no_retry, encoder)
+
+ def test__send_read_response_sockets(self):
+ listen_sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
+ listen_sock.bind(('127.0.0.1', 0))
+ listen_sock.listen(1)
+ host, port = listen_sock.getsockname()
+ client_medium = medium.SmartTCPClientMedium(host, port, '/')
+ client_medium._ensure_connection()
+ smart_client = client._SmartClient(client_medium)
+ smart_request = client._SmartClientRequest(smart_client, 'hello', ())
+ # Accept the connection, but don't actually talk to the client.
+ server_sock, _ = listen_sock.accept()
+ server_sock.close()
+ # Sockets buffer and don't really notice that the server has closed the
+ # connection until we try to read again.
+ handler = smart_request._send(3)
+ self.assertRaises(errors.ConnectionReset,
+ handler.read_response_tuple, expect_body=False)
+
+ def test__send_retries_on_write(self):
+ output, vendor, smart_client = self.make_client_with_failing_medium()
+ smart_request = client._SmartClientRequest(smart_client, 'hello', ())
+ handler = smart_request._send(3)
+ self.assertEqual('bzr message 3 (bzr 1.6)\n' # protocol
+ '\x00\x00\x00\x02de' # empty headers
+ 's\x00\x00\x00\tl5:helloee',
+ output.getvalue())
+ self.assertEqual(
+ [('connect_ssh', 'a user', 'a pass', 'a host', 'a port',
+ ['bzr', 'serve', '--inet', '--directory=/', '--allow-writes']),
+ ('close',),
+ ('connect_ssh', 'a user', 'a pass', 'a host', 'a port',
+ ['bzr', 'serve', '--inet', '--directory=/', '--allow-writes']),
+ ],
+ vendor.calls)
+
+ def test__send_doesnt_retry_read_failure(self):
+ output, vendor, smart_client = self.make_client_with_failing_medium(
+ fail_at_write=False)
+ smart_request = client._SmartClientRequest(smart_client, 'hello', ())
+ handler = smart_request._send(3)
+ self.assertEqual('bzr message 3 (bzr 1.6)\n' # protocol
+ '\x00\x00\x00\x02de' # empty headers
+ 's\x00\x00\x00\tl5:helloee',
+ output.getvalue())
+ self.assertEqual(
+ [('connect_ssh', 'a user', 'a pass', 'a host', 'a port',
+ ['bzr', 'serve', '--inet', '--directory=/', '--allow-writes']),
+ ],
+ vendor.calls)
+ self.assertRaises(errors.ConnectionReset, handler.read_response_tuple)
+
+ def test__send_request_retries_body_stream_if_not_started(self):
+ output, vendor, smart_client = self.make_client_with_failing_medium()
+ smart_request = client._SmartClientRequest(smart_client, 'hello', (),
+ body_stream=['a', 'b'])
+ response_handler = smart_request._send(3)
+ # We connect, get disconnected, and notice before consuming the stream,
+ # so we try again one time and succeed.
+ self.assertEqual(
+ [('connect_ssh', 'a user', 'a pass', 'a host', 'a port',
+ ['bzr', 'serve', '--inet', '--directory=/', '--allow-writes']),
+ ('close',),
+ ('connect_ssh', 'a user', 'a pass', 'a host', 'a port',
+ ['bzr', 'serve', '--inet', '--directory=/', '--allow-writes']),
+ ],
+ vendor.calls)
+ self.assertEqual('bzr message 3 (bzr 1.6)\n' # protocol
+ '\x00\x00\x00\x02de' # empty headers
+ 's\x00\x00\x00\tl5:helloe'
+ 'b\x00\x00\x00\x01a'
+ 'b\x00\x00\x00\x01b'
+ 'e',
+ output.getvalue())
+
+ def test__send_request_stops_if_body_started(self):
+ # We intentionally use the python StringIO so that we can subclass it.
+ from StringIO import StringIO
+ response = StringIO()
+
+ class FailAfterFirstWrite(StringIO):
+ """Allow one 'write' call to pass, fail the rest"""
+ def __init__(self):
+ StringIO.__init__(self)
+ self._first = True
+
+ def write(self, s):
+ if self._first:
+ self._first = False
+ return StringIO.write(self, s)
+ raise IOError(errno.EINVAL, 'invalid file handle')
+ output = FailAfterFirstWrite()
+
+ vendor = FirstRejectedStringIOSSHVendor(response, output,
+ fail_at_write=False)
+ ssh_params = medium.SSHParams('a host', 'a port', 'a user', 'a pass')
+ client_medium = medium.SmartSSHClientMedium('base', ssh_params, vendor)
+ smart_client = client._SmartClient(client_medium, headers={})
+ smart_request = client._SmartClientRequest(smart_client, 'hello', (),
+ body_stream=['a', 'b'])
+ self.assertRaises(errors.ConnectionReset, smart_request._send, 3)
+ # We connect, and manage to get to the point that we start consuming
+ # the body stream. The next write fails, so we just stop.
+ self.assertEqual(
+ [('connect_ssh', 'a user', 'a pass', 'a host', 'a port',
+ ['bzr', 'serve', '--inet', '--directory=/', '--allow-writes']),
+ ('close',),
+ ],
+ vendor.calls)
+ self.assertEqual('bzr message 3 (bzr 1.6)\n' # protocol
+ '\x00\x00\x00\x02de' # empty headers
+ 's\x00\x00\x00\tl5:helloe',
+ output.getvalue())
+
+ def test__send_disabled_retry(self):
+ debug.debug_flags.add('noretry')
+ output, vendor, smart_client = self.make_client_with_failing_medium()
+ smart_request = client._SmartClientRequest(smart_client, 'hello', ())
+ self.assertRaises(errors.ConnectionReset, smart_request._send, 3)
+ self.assertEqual(
+ [('connect_ssh', 'a user', 'a pass', 'a host', 'a port',
+ ['bzr', 'serve', '--inet', '--directory=/', '--allow-writes']),
+ ('close',),
+ ],
+ vendor.calls)
+
+
+class LengthPrefixedBodyDecoder(tests.TestCase):
+
+ # XXX: TODO: make accept_reading_trailer invoke translate_response or
+ # something similar to the ProtocolBase method.
+
+ def test_construct(self):
+ decoder = protocol.LengthPrefixedBodyDecoder()
+ self.assertFalse(decoder.finished_reading)
+ self.assertEqual(6, decoder.next_read_size())
+ self.assertEqual('', decoder.read_pending_data())
+ self.assertEqual('', decoder.unused_data)
+
+ def test_accept_bytes(self):
+ decoder = protocol.LengthPrefixedBodyDecoder()
+ decoder.accept_bytes('')
+ self.assertFalse(decoder.finished_reading)
+ self.assertEqual(6, decoder.next_read_size())
+ self.assertEqual('', decoder.read_pending_data())
+ self.assertEqual('', decoder.unused_data)
+ decoder.accept_bytes('7')
+ self.assertFalse(decoder.finished_reading)
+ self.assertEqual(6, decoder.next_read_size())
+ self.assertEqual('', decoder.read_pending_data())
+ self.assertEqual('', decoder.unused_data)
+ decoder.accept_bytes('\na')
+ self.assertFalse(decoder.finished_reading)
+ self.assertEqual(11, decoder.next_read_size())
+ self.assertEqual('a', decoder.read_pending_data())
+ self.assertEqual('', decoder.unused_data)
+ decoder.accept_bytes('bcdefgd')
+ self.assertFalse(decoder.finished_reading)
+ self.assertEqual(4, decoder.next_read_size())
+ self.assertEqual('bcdefg', decoder.read_pending_data())
+ self.assertEqual('', decoder.unused_data)
+ decoder.accept_bytes('one')
+ self.assertFalse(decoder.finished_reading)
+ self.assertEqual(1, decoder.next_read_size())
+ self.assertEqual('', decoder.read_pending_data())
+ self.assertEqual('', decoder.unused_data)
+ decoder.accept_bytes('\nblarg')
+ self.assertTrue(decoder.finished_reading)
+ self.assertEqual(1, decoder.next_read_size())
+ self.assertEqual('', decoder.read_pending_data())
+ self.assertEqual('blarg', decoder.unused_data)
+
+ def test_accept_bytes_all_at_once_with_excess(self):
+ decoder = protocol.LengthPrefixedBodyDecoder()
+ decoder.accept_bytes('1\nadone\nunused')
+ self.assertTrue(decoder.finished_reading)
+ self.assertEqual(1, decoder.next_read_size())
+ self.assertEqual('a', decoder.read_pending_data())
+ self.assertEqual('unused', decoder.unused_data)
+
+ def test_accept_bytes_exact_end_of_body(self):
+ decoder = protocol.LengthPrefixedBodyDecoder()
+ decoder.accept_bytes('1\na')
+ self.assertFalse(decoder.finished_reading)
+ self.assertEqual(5, decoder.next_read_size())
+ self.assertEqual('a', decoder.read_pending_data())
+ self.assertEqual('', decoder.unused_data)
+ decoder.accept_bytes('done\n')
+ self.assertTrue(decoder.finished_reading)
+ self.assertEqual(1, decoder.next_read_size())
+ self.assertEqual('', decoder.read_pending_data())
+ self.assertEqual('', decoder.unused_data)
+
+
+class TestChunkedBodyDecoder(tests.TestCase):
+ """Tests for ChunkedBodyDecoder.
+
+ This is the body decoder used for protocol version two.
+ """
+
+ def test_construct(self):
+ decoder = protocol.ChunkedBodyDecoder()
+ self.assertFalse(decoder.finished_reading)
+ self.assertEqual(8, decoder.next_read_size())
+ self.assertEqual(None, decoder.read_next_chunk())
+ self.assertEqual('', decoder.unused_data)
+
+ def test_empty_content(self):
+ """'chunked\nEND\n' is the complete encoding of a zero-length body.
+ """
+ decoder = protocol.ChunkedBodyDecoder()
+ decoder.accept_bytes('chunked\n')
+ decoder.accept_bytes('END\n')
+ self.assertTrue(decoder.finished_reading)
+ self.assertEqual(None, decoder.read_next_chunk())
+ self.assertEqual('', decoder.unused_data)
+
+ def test_one_chunk(self):
+ """A body in a single chunk is decoded correctly."""
+ decoder = protocol.ChunkedBodyDecoder()
+ decoder.accept_bytes('chunked\n')
+ chunk_length = 'f\n'
+ chunk_content = '123456789abcdef'
+ finish = 'END\n'
+ decoder.accept_bytes(chunk_length + chunk_content + finish)
+ self.assertTrue(decoder.finished_reading)
+ self.assertEqual(chunk_content, decoder.read_next_chunk())
+ self.assertEqual('', decoder.unused_data)
+
+ def test_incomplete_chunk(self):
+ """When there are less bytes in the chunk than declared by the length,
+ then we haven't finished reading yet.
+ """
+ decoder = protocol.ChunkedBodyDecoder()
+ decoder.accept_bytes('chunked\n')
+ chunk_length = '8\n'
+ three_bytes = '123'
+ decoder.accept_bytes(chunk_length + three_bytes)
+ self.assertFalse(decoder.finished_reading)
+ self.assertEqual(
+ 5 + 4, decoder.next_read_size(),
+ "The next_read_size hint should be the number of missing bytes in "
+ "this chunk plus 4 (the length of the end-of-body marker: "
+ "'END\\n')")
+ self.assertEqual(None, decoder.read_next_chunk())
+
+ def test_incomplete_length(self):
+ """A chunk length hasn't been read until a newline byte has been read.
+ """
+ decoder = protocol.ChunkedBodyDecoder()
+ decoder.accept_bytes('chunked\n')
+ decoder.accept_bytes('9')
+ self.assertEqual(
+ 1, decoder.next_read_size(),
+ "The next_read_size hint should be 1, because we don't know the "
+ "length yet.")
+ decoder.accept_bytes('\n')
+ self.assertEqual(
+ 9 + 4, decoder.next_read_size(),
+ "The next_read_size hint should be the length of the chunk plus 4 "
+ "(the length of the end-of-body marker: 'END\\n')")
+ self.assertFalse(decoder.finished_reading)
+ self.assertEqual(None, decoder.read_next_chunk())
+
+ def test_two_chunks(self):
+ """Content from multiple chunks is concatenated."""
+ decoder = protocol.ChunkedBodyDecoder()
+ decoder.accept_bytes('chunked\n')
+ chunk_one = '3\naaa'
+ chunk_two = '5\nbbbbb'
+ finish = 'END\n'
+ decoder.accept_bytes(chunk_one + chunk_two + finish)
+ self.assertTrue(decoder.finished_reading)
+ self.assertEqual('aaa', decoder.read_next_chunk())
+ self.assertEqual('bbbbb', decoder.read_next_chunk())
+ self.assertEqual(None, decoder.read_next_chunk())
+ self.assertEqual('', decoder.unused_data)
+
+ def test_excess_bytes(self):
+ """Bytes after the chunked body are reported as unused bytes."""
+ decoder = protocol.ChunkedBodyDecoder()
+ decoder.accept_bytes('chunked\n')
+ chunked_body = "5\naaaaaEND\n"
+ excess_bytes = "excess bytes"
+ decoder.accept_bytes(chunked_body + excess_bytes)
+ self.assertTrue(decoder.finished_reading)
+ self.assertEqual('aaaaa', decoder.read_next_chunk())
+ self.assertEqual(excess_bytes, decoder.unused_data)
+ self.assertEqual(
+ 1, decoder.next_read_size(),
+ "next_read_size hint should be 1 when finished_reading.")
+
+ def test_multidigit_length(self):
+ """Lengths in the chunk prefixes can have multiple digits."""
+ decoder = protocol.ChunkedBodyDecoder()
+ decoder.accept_bytes('chunked\n')
+ length = 0x123
+ chunk_prefix = hex(length) + '\n'
+ chunk_bytes = 'z' * length
+ finish = 'END\n'
+ decoder.accept_bytes(chunk_prefix + chunk_bytes + finish)
+ self.assertTrue(decoder.finished_reading)
+ self.assertEqual(chunk_bytes, decoder.read_next_chunk())
+
+ def test_byte_at_a_time(self):
+ """A complete body fed to the decoder one byte at a time should not
+ confuse the decoder. That is, it should give the same result as if the
+ bytes had been received in one batch.
+
+ This test is the same as test_one_chunk apart from the way accept_bytes
+ is called.
+ """
+ decoder = protocol.ChunkedBodyDecoder()
+ decoder.accept_bytes('chunked\n')
+ chunk_length = 'f\n'
+ chunk_content = '123456789abcdef'
+ finish = 'END\n'
+ for byte in (chunk_length + chunk_content + finish):
+ decoder.accept_bytes(byte)
+ self.assertTrue(decoder.finished_reading)
+ self.assertEqual(chunk_content, decoder.read_next_chunk())
+ self.assertEqual('', decoder.unused_data)
+
+ def test_read_pending_data_resets(self):
+ """read_pending_data does not return the same bytes twice."""
+ decoder = protocol.ChunkedBodyDecoder()
+ decoder.accept_bytes('chunked\n')
+ chunk_one = '3\naaa'
+ chunk_two = '3\nbbb'
+ finish = 'END\n'
+ decoder.accept_bytes(chunk_one)
+ self.assertEqual('aaa', decoder.read_next_chunk())
+ decoder.accept_bytes(chunk_two)
+ self.assertEqual('bbb', decoder.read_next_chunk())
+ self.assertEqual(None, decoder.read_next_chunk())
+
+ def test_decode_error(self):
+ decoder = protocol.ChunkedBodyDecoder()
+ decoder.accept_bytes('chunked\n')
+ chunk_one = 'b\nfirst chunk'
+ error_signal = 'ERR\n'
+ error_chunks = '5\npart1' + '5\npart2'
+ finish = 'END\n'
+ decoder.accept_bytes(chunk_one + error_signal + error_chunks + finish)
+ self.assertTrue(decoder.finished_reading)
+ self.assertEqual('first chunk', decoder.read_next_chunk())
+ expected_failure = _mod_request.FailedSmartServerResponse(
+ ('part1', 'part2'))
+ self.assertEqual(expected_failure, decoder.read_next_chunk())
+
+ def test_bad_header(self):
+ """accept_bytes raises a SmartProtocolError if a chunked body does not
+ start with the right header.
+ """
+ decoder = protocol.ChunkedBodyDecoder()
+ self.assertRaises(
+ errors.SmartProtocolError, decoder.accept_bytes, 'bad header\n')
+
+
+class TestSuccessfulSmartServerResponse(tests.TestCase):
+
+ def test_construct_no_body(self):
+ response = _mod_request.SuccessfulSmartServerResponse(('foo', 'bar'))
+ self.assertEqual(('foo', 'bar'), response.args)
+ self.assertEqual(None, response.body)
+
+ def test_construct_with_body(self):
+ response = _mod_request.SuccessfulSmartServerResponse(('foo', 'bar'),
+ 'bytes')
+ self.assertEqual(('foo', 'bar'), response.args)
+ self.assertEqual('bytes', response.body)
+ # repr(response) doesn't trigger exceptions.
+ repr(response)
+
+ def test_construct_with_body_stream(self):
+ bytes_iterable = ['abc']
+ response = _mod_request.SuccessfulSmartServerResponse(
+ ('foo', 'bar'), body_stream=bytes_iterable)
+ self.assertEqual(('foo', 'bar'), response.args)
+ self.assertEqual(bytes_iterable, response.body_stream)
+
+ def test_construct_rejects_body_and_body_stream(self):
+ """'body' and 'body_stream' are mutually exclusive."""
+ self.assertRaises(
+ errors.BzrError,
+ _mod_request.SuccessfulSmartServerResponse, (), 'body', ['stream'])
+
+ def test_is_successful(self):
+ """is_successful should return True for SuccessfulSmartServerResponse."""
+ response = _mod_request.SuccessfulSmartServerResponse(('error',))
+ self.assertEqual(True, response.is_successful())
+
+
+class TestFailedSmartServerResponse(tests.TestCase):
+
+ def test_construct(self):
+ response = _mod_request.FailedSmartServerResponse(('foo', 'bar'))
+ self.assertEqual(('foo', 'bar'), response.args)
+ self.assertEqual(None, response.body)
+ response = _mod_request.FailedSmartServerResponse(('foo', 'bar'), 'bytes')
+ self.assertEqual(('foo', 'bar'), response.args)
+ self.assertEqual('bytes', response.body)
+ # repr(response) doesn't trigger exceptions.
+ repr(response)
+
+ def test_is_successful(self):
+ """is_successful should return False for FailedSmartServerResponse."""
+ response = _mod_request.FailedSmartServerResponse(('error',))
+ self.assertEqual(False, response.is_successful())
+
+
+class FakeHTTPMedium(object):
+ def __init__(self):
+ self.written_request = None
+ self._current_request = None
+ def send_http_smart_request(self, bytes):
+ self.written_request = bytes
+ return None
+
+
+class HTTPTunnellingSmokeTest(tests.TestCase):
+
+ def setUp(self):
+ super(HTTPTunnellingSmokeTest, self).setUp()
+ # We use the VFS layer as part of HTTP tunnelling tests.
+ self.overrideEnv('BZR_NO_SMART_VFS', None)
+
+ def test_smart_http_medium_request_accept_bytes(self):
+ medium = FakeHTTPMedium()
+ request = http.SmartClientHTTPMediumRequest(medium)
+ request.accept_bytes('abc')
+ request.accept_bytes('def')
+ self.assertEqual(None, medium.written_request)
+ request.finished_writing()
+ self.assertEqual('abcdef', medium.written_request)
+
+
+class RemoteHTTPTransportTestCase(tests.TestCase):
+
+ def test_remote_path_after_clone_child(self):
+ # If a user enters "bzr+http://host/foo", we want to sent all smart
+ # requests for child URLs of that to the original URL. i.e., we want to
+ # POST to "bzr+http://host/foo/.bzr/smart" and never something like
+ # "bzr+http://host/foo/.bzr/branch/.bzr/smart". So, a cloned
+ # RemoteHTTPTransport remembers the initial URL, and adjusts the
+ # relpaths it sends in smart requests accordingly.
+ base_transport = remote.RemoteHTTPTransport('bzr+http://host/path')
+ new_transport = base_transport.clone('child_dir')
+ self.assertEqual(base_transport._http_transport,
+ new_transport._http_transport)
+ self.assertEqual('child_dir/foo', new_transport._remote_path('foo'))
+ self.assertEqual(
+ 'child_dir/',
+ new_transport._client.remote_path_from_transport(new_transport))
+
+ def test_remote_path_unnormal_base(self):
+ # If the transport's base isn't normalised, the _remote_path should
+ # still be calculated correctly.
+ base_transport = remote.RemoteHTTPTransport('bzr+http://host/%7Ea/b')
+ self.assertEqual('c', base_transport._remote_path('c'))
+
+ def test_clone_unnormal_base(self):
+ # If the transport's base isn't normalised, cloned transports should
+ # still work correctly.
+ base_transport = remote.RemoteHTTPTransport('bzr+http://host/%7Ea/b')
+ new_transport = base_transport.clone('c')
+ self.assertEqual(base_transport.base + 'c/', new_transport.base)
+ self.assertEqual(
+ 'c/',
+ new_transport._client.remote_path_from_transport(new_transport))
+
+ def test__redirect_to(self):
+ t = remote.RemoteHTTPTransport('bzr+http://www.example.com/foo')
+ r = t._redirected_to('http://www.example.com/foo',
+ 'http://www.example.com/bar')
+ self.assertEquals(type(r), type(t))
+
+ def test__redirect_sibling_protocol(self):
+ t = remote.RemoteHTTPTransport('bzr+http://www.example.com/foo')
+ r = t._redirected_to('http://www.example.com/foo',
+ 'https://www.example.com/bar')
+ self.assertEquals(type(r), type(t))
+ self.assertStartsWith(r.base, 'bzr+https')
+
+ def test__redirect_to_with_user(self):
+ t = remote.RemoteHTTPTransport('bzr+http://joe@www.example.com/foo')
+ r = t._redirected_to('http://www.example.com/foo',
+ 'http://www.example.com/bar')
+ self.assertEquals(type(r), type(t))
+ self.assertEquals('joe', t._parsed_url.user)
+ self.assertEquals(t._parsed_url.user, r._parsed_url.user)
+
+ def test_redirected_to_same_host_different_protocol(self):
+ t = remote.RemoteHTTPTransport('bzr+http://joe@www.example.com/foo')
+ r = t._redirected_to('http://www.example.com/foo',
+ 'ftp://www.example.com/foo')
+ self.assertNotEquals(type(r), type(t))
+
+
diff --git a/bzrlib/tests/test_smtp_connection.py b/bzrlib/tests/test_smtp_connection.py
new file mode 100644
index 0000000..8424522
--- /dev/null
+++ b/bzrlib/tests/test_smtp_connection.py
@@ -0,0 +1,274 @@
+# Copyright (C) 2007, 2009, 2010, 2011 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+from email.Message import Message
+import errno
+import smtplib
+import socket
+
+from bzrlib import (
+ config,
+ email_message,
+ errors,
+ smtp_connection,
+ tests,
+ ui,
+ )
+
+
+def connection_refuser():
+ def connect(server):
+ raise socket.error(errno.ECONNREFUSED, 'Connection Refused')
+ smtp = smtplib.SMTP()
+ smtp.connect = connect
+ return smtp
+
+
+class StubSMTPFactory(object):
+ """A fake SMTP connection to test the connection setup."""
+ def __init__(self, fail_on=None, smtp_features=None):
+ self._fail_on = fail_on or []
+ self._calls = []
+ self._smtp_features = smtp_features or []
+ self._ehlo_called = False
+
+ def __call__(self):
+ # The factory pretends to be a connection
+ return self
+
+ def connect(self, server):
+ self._calls.append(('connect', server))
+
+ def helo(self):
+ self._calls.append(('helo',))
+ if 'helo' in self._fail_on:
+ return 500, 'helo failure'
+ else:
+ return 200, 'helo success'
+
+ def ehlo(self):
+ self._calls.append(('ehlo',))
+ if 'ehlo' in self._fail_on:
+ return 500, 'ehlo failure'
+ else:
+ self._ehlo_called = True
+ return 200, 'ehlo success'
+
+ def has_extn(self, extension):
+ self._calls.append(('has_extn', extension))
+ return self._ehlo_called and extension in self._smtp_features
+
+ def starttls(self):
+ self._calls.append(('starttls',))
+ if 'starttls' in self._fail_on:
+ return 500, 'starttls failure'
+ else:
+ self._ehlo_called = True
+ return 200, 'starttls success'
+
+
+class WideOpenSMTPFactory(StubSMTPFactory):
+ """A fake smtp server that implements login by accepting anybody."""
+
+ def login(self, user, password):
+ self._calls.append(('login', user, password))
+
+
+class TestSMTPConnection(tests.TestCaseInTempDir):
+
+ def get_connection(self, text, smtp_factory=None):
+ my_config = config.MemoryStack(text)
+ return smtp_connection.SMTPConnection(
+ my_config, _smtp_factory=smtp_factory)
+
+ def test_defaults(self):
+ conn = self.get_connection('')
+ self.assertEqual('localhost', conn._smtp_server)
+ self.assertEqual(None, conn._smtp_username)
+ self.assertEqual(None, conn._smtp_password)
+
+ def test_smtp_server(self):
+ conn = self.get_connection('smtp_server=host:10')
+ self.assertEqual('host:10', conn._smtp_server)
+
+ def test_missing_server(self):
+ conn = self.get_connection('', smtp_factory=connection_refuser)
+ self.assertRaises(errors.DefaultSMTPConnectionRefused, conn._connect)
+ conn = self.get_connection('smtp_server=smtp.example.com',
+ smtp_factory=connection_refuser)
+ self.assertRaises(errors.SMTPConnectionRefused, conn._connect)
+
+ def test_smtp_username(self):
+ conn = self.get_connection('')
+ self.assertIs(None, conn._smtp_username)
+
+ conn = self.get_connection('smtp_username=joebody')
+ self.assertEqual(u'joebody', conn._smtp_username)
+
+ def test_smtp_password_from_config(self):
+ conn = self.get_connection('')
+ self.assertIs(None, conn._smtp_password)
+
+ conn = self.get_connection('smtp_password=mypass')
+ self.assertEqual(u'mypass', conn._smtp_password)
+
+ def test_smtp_password_from_user(self):
+ user = 'joe'
+ password = 'hispass'
+ factory = WideOpenSMTPFactory()
+ conn = self.get_connection('[DEFAULT]\nsmtp_username=%s\n' % user,
+ smtp_factory=factory)
+ self.assertIs(None, conn._smtp_password)
+
+ ui.ui_factory = ui.CannedInputUIFactory([password])
+ conn._connect()
+ self.assertEqual(password, conn._smtp_password)
+
+ def test_smtp_password_from_auth_config(self):
+ user = 'joe'
+ password = 'hispass'
+ factory = WideOpenSMTPFactory()
+ conn = self.get_connection('[DEFAULT]\nsmtp_username=%s\n' % user,
+ smtp_factory=factory)
+ self.assertEqual(user, conn._smtp_username)
+ self.assertIs(None, conn._smtp_password)
+ # Create a config file with the right password
+ conf = config.AuthenticationConfig()
+ conf._get_config().update({'smtptest':
+ {'scheme': 'smtp', 'user':user,
+ 'password': password}})
+ conf._save()
+
+ conn._connect()
+ self.assertEqual(password, conn._smtp_password)
+
+ def test_authenticate_with_byte_strings(self):
+ user = 'joe'
+ unicode_pass = u'h\xECspass'
+ utf8_pass = unicode_pass.encode('utf-8')
+ factory = WideOpenSMTPFactory()
+ conn = self.get_connection(
+ '[DEFAULT]\nsmtp_username=%s\nsmtp_password=%s\n'
+ % (user, utf8_pass), smtp_factory=factory)
+ self.assertEqual(unicode_pass, conn._smtp_password)
+ conn._connect()
+ self.assertEqual([('connect', 'localhost'),
+ ('ehlo',),
+ ('has_extn', 'starttls'),
+ ('login', user, utf8_pass)], factory._calls)
+ smtp_username, smtp_password = factory._calls[-1][1:]
+ self.assertIsInstance(smtp_username, str)
+ self.assertIsInstance(smtp_password, str)
+
+ def test_create_connection(self):
+ factory = StubSMTPFactory()
+ conn = self.get_connection('', smtp_factory=factory)
+ conn._create_connection()
+ self.assertEqual([('connect', 'localhost'),
+ ('ehlo',),
+ ('has_extn', 'starttls')], factory._calls)
+
+ def test_create_connection_ehlo_fails(self):
+ # Check that we call HELO if EHLO failed.
+ factory = StubSMTPFactory(fail_on=['ehlo'])
+ conn = self.get_connection('', smtp_factory=factory)
+ conn._create_connection()
+ self.assertEqual([('connect', 'localhost'),
+ ('ehlo',),
+ ('helo',),
+ ('has_extn', 'starttls')], factory._calls)
+
+ def test_create_connection_ehlo_helo_fails(self):
+ # Check that we raise an exception if both EHLO and HELO fail.
+ factory = StubSMTPFactory(fail_on=['ehlo', 'helo'])
+ conn = self.get_connection('', smtp_factory=factory)
+ self.assertRaises(errors.SMTPError, conn._create_connection)
+ self.assertEqual([('connect', 'localhost'),
+ ('ehlo',),
+ ('helo',)], factory._calls)
+
+ def test_create_connection_starttls(self):
+ # Check that STARTTLS plus a second EHLO are called if the
+ # server says it supports the feature.
+ factory = StubSMTPFactory(smtp_features=['starttls'])
+ conn = self.get_connection('', smtp_factory=factory)
+ conn._create_connection()
+ self.assertEqual([('connect', 'localhost'),
+ ('ehlo',),
+ ('has_extn', 'starttls'),
+ ('starttls',),
+ ('ehlo',)], factory._calls)
+
+ def test_create_connection_starttls_fails(self):
+ # Check that we raise an exception if the server claims to
+ # support STARTTLS, but then fails when we try to activate it.
+ factory = StubSMTPFactory(fail_on=['starttls'],
+ smtp_features=['starttls'])
+ conn = self.get_connection('', smtp_factory=factory)
+ self.assertRaises(errors.SMTPError, conn._create_connection)
+ self.assertEqual([('connect', 'localhost'),
+ ('ehlo',),
+ ('has_extn', 'starttls'),
+ ('starttls',)], factory._calls)
+
+ def test_get_message_addresses(self):
+ msg = Message()
+
+ from_, to = smtp_connection.SMTPConnection.get_message_addresses(msg)
+ self.assertEqual('', from_)
+ self.assertEqual([], to)
+
+ msg['From'] = '"J. Random Developer" <jrandom@example.com>'
+ msg['To'] = 'John Doe <john@doe.com>, Jane Doe <jane@doe.com>'
+ msg['CC'] = u'Pepe P\xe9rez <pperez@ejemplo.com>'
+ msg['Bcc'] = 'user@localhost'
+
+ from_, to = smtp_connection.SMTPConnection.get_message_addresses(msg)
+ self.assertEqual('jrandom@example.com', from_)
+ self.assertEqual(sorted(['john@doe.com', 'jane@doe.com',
+ 'pperez@ejemplo.com', 'user@localhost']), sorted(to))
+
+ # now with bzrlib's EmailMessage
+ msg = email_message.EmailMessage(
+ '"J. Random Developer" <jrandom@example.com>',
+ ['John Doe <john@doe.com>', 'Jane Doe <jane@doe.com>',
+ u'Pepe P\xe9rez <pperez@ejemplo.com>', 'user@localhost' ],
+ 'subject')
+
+ from_, to = smtp_connection.SMTPConnection.get_message_addresses(msg)
+ self.assertEqual('jrandom@example.com', from_)
+ self.assertEqual(sorted(['john@doe.com', 'jane@doe.com',
+ 'pperez@ejemplo.com', 'user@localhost']), sorted(to))
+
+ def test_destination_address_required(self):
+ msg = Message()
+ msg['From'] = '"J. Random Developer" <jrandom@example.com>'
+ self.assertRaises(
+ errors.NoDestinationAddress,
+ smtp_connection.SMTPConnection(config.MemoryStack("")
+ ).send_email, msg)
+
+ msg = email_message.EmailMessage('from@from.com', '', 'subject')
+ self.assertRaises(
+ errors.NoDestinationAddress,
+ smtp_connection.SMTPConnection(config.MemoryStack("")
+ ).send_email, msg)
+
+ msg = email_message.EmailMessage('from@from.com', [], 'subject')
+ self.assertRaises(
+ errors.NoDestinationAddress,
+ smtp_connection.SMTPConnection(config.MemoryStack("")
+ ).send_email, msg)
diff --git a/bzrlib/tests/test_source.py b/bzrlib/tests/test_source.py
new file mode 100644
index 0000000..725d300
--- /dev/null
+++ b/bzrlib/tests/test_source.py
@@ -0,0 +1,438 @@
+# Copyright (C) 2005-2011 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""These tests are tests about the source code of bzrlib itself.
+
+They are useful for testing code quality, checking coverage metric etc.
+"""
+
+import os
+import parser
+import re
+import symbol
+import sys
+import token
+
+from bzrlib import (
+ osutils,
+ )
+import bzrlib.branch
+from bzrlib.tests import (
+ TestCase,
+ TestSkipped,
+ )
+
+
+# Files which are listed here will be skipped when testing for Copyright (or
+# GPL) statements.
+COPYRIGHT_EXCEPTIONS = [
+ 'bzrlib/_bencode_py.py',
+ 'bzrlib/doc_generate/conf.py',
+ 'bzrlib/lsprof.py',
+ ]
+
+LICENSE_EXCEPTIONS = [
+ 'bzrlib/_bencode_py.py',
+ 'bzrlib/doc_generate/conf.py',
+ 'bzrlib/lsprof.py',
+ ]
+# Technically, 'bzrlib/lsprof.py' should be 'bzrlib/util/lsprof.py',
+# (we do not check bzrlib/util/, since that is code bundled from elsewhere)
+# but for compatibility with previous releases, we don't want to move it.
+#
+# sphinx_conf is semi-autogenerated.
+
+
+class TestSourceHelper(TestCase):
+
+ def source_file_name(self, package):
+ """Return the path of the .py file for package."""
+ if getattr(sys, "frozen", None) is not None:
+ raise TestSkipped("can't test sources in frozen distributions.")
+ path = package.__file__
+ if path[-1] in 'co':
+ return path[:-1]
+ else:
+ return path
+
+
+class TestApiUsage(TestSourceHelper):
+
+ def find_occurences(self, rule, filename):
+ """Find the number of occurences of rule in a file."""
+ occurences = 0
+ source = file(filename, 'r')
+ for line in source:
+ if line.find(rule) > -1:
+ occurences += 1
+ return occurences
+
+ def test_branch_working_tree(self):
+ """Test that the number of uses of working_tree in branch is stable."""
+ occurences = self.find_occurences('self.working_tree()',
+ self.source_file_name(bzrlib.branch))
+ # do not even think of increasing this number. If you think you need to
+ # increase it, then you almost certainly are doing something wrong as
+ # the relationship from working_tree to branch is one way.
+ # Note that this is an exact equality so that when the number drops,
+ #it is not given a buffer but rather has this test updated immediately.
+ self.assertEqual(0, occurences)
+
+ def test_branch_WorkingTree(self):
+ """Test that the number of uses of working_tree in branch is stable."""
+ occurences = self.find_occurences('WorkingTree',
+ self.source_file_name(bzrlib.branch))
+ # Do not even think of increasing this number. If you think you need to
+ # increase it, then you almost certainly are doing something wrong as
+ # the relationship from working_tree to branch is one way.
+ # As of 20070809, there are no longer any mentions at all.
+ self.assertEqual(0, occurences)
+
+
+class TestSource(TestSourceHelper):
+
+ def get_bzrlib_dir(self):
+ """Get the path to the root of bzrlib"""
+ source = self.source_file_name(bzrlib)
+ source_dir = os.path.dirname(source)
+
+ # Avoid the case when bzrlib is packaged in a zip file
+ if not os.path.isdir(source_dir):
+ raise TestSkipped(
+ 'Cannot find bzrlib source directory. Expected %s'
+ % source_dir)
+ return source_dir
+
+ def get_source_files(self, extensions=None):
+ """Yield all source files for bzr and bzrlib
+
+ :param our_files_only: If true, exclude files from included libraries
+ or plugins.
+ """
+ bzrlib_dir = self.get_bzrlib_dir()
+ if extensions is None:
+ extensions = ('.py',)
+
+ # This is the front-end 'bzr' script
+ bzr_path = self.get_bzr_path()
+ yield bzr_path
+
+ for root, dirs, files in os.walk(bzrlib_dir):
+ for d in dirs:
+ if d.endswith('.tmp'):
+ dirs.remove(d)
+ for f in files:
+ for extension in extensions:
+ if f.endswith(extension):
+ break
+ else:
+ # Did not match the accepted extensions
+ continue
+ yield osutils.pathjoin(root, f)
+
+ def get_source_file_contents(self, extensions=None):
+ for fname in self.get_source_files(extensions=extensions):
+ f = open(fname, 'rb')
+ try:
+ text = f.read()
+ finally:
+ f.close()
+ yield fname, text
+
+ def is_our_code(self, fname):
+ """True if it's a "real" part of bzrlib rather than external code"""
+ if '/util/' in fname or '/plugins/' in fname:
+ return False
+ else:
+ return True
+
+ def is_copyright_exception(self, fname):
+ """Certain files are allowed to be different"""
+ if not self.is_our_code(fname):
+ # We don't ask that external utilities or plugins be
+ # (C) Canonical Ltd
+ return True
+ for exc in COPYRIGHT_EXCEPTIONS:
+ if fname.endswith(exc):
+ return True
+ return False
+
+ def is_license_exception(self, fname):
+ """Certain files are allowed to be different"""
+ if not self.is_our_code(fname):
+ return True
+ for exc in LICENSE_EXCEPTIONS:
+ if fname.endswith(exc):
+ return True
+ return False
+
+ def test_tmpdir_not_in_source_files(self):
+ """When scanning for source files, we don't descend test tempdirs"""
+ for filename in self.get_source_files():
+ if re.search(r'test....\.tmp', filename):
+ self.fail("get_source_file() returned filename %r "
+ "from within a temporary directory"
+ % filename)
+
+ def test_copyright(self):
+ """Test that all .py and .pyx files have a valid copyright statement"""
+ incorrect = []
+
+ copyright_re = re.compile('#\\s*copyright.*(?=\n)', re.I)
+ copyright_canonical_re = re.compile(
+ r'# Copyright \(C\) ' # Opening "# Copyright (C)"
+ r'(\d+)(, \d+)*' # followed by a series of dates
+ r'.*Canonical Ltd') # and containing 'Canonical Ltd'.
+
+ for fname, text in self.get_source_file_contents(
+ extensions=('.py', '.pyx')):
+ if self.is_copyright_exception(fname):
+ continue
+ match = copyright_canonical_re.search(text)
+ if not match:
+ match = copyright_re.search(text)
+ if match:
+ incorrect.append((fname, 'found: %s' % (match.group(),)))
+ else:
+ incorrect.append((fname, 'no copyright line found\n'))
+ else:
+ if 'by Canonical' in match.group():
+ incorrect.append((fname,
+ 'should not have: "by Canonical": %s'
+ % (match.group(),)))
+
+ if incorrect:
+ help_text = ["Some files have missing or incorrect copyright"
+ " statements.",
+ "",
+ "Please either add them to the list of"
+ " COPYRIGHT_EXCEPTIONS in"
+ " bzrlib/tests/test_source.py",
+ # this is broken to prevent a false match
+ "or add '# Copyright (C)"
+ " 2007 Canonical Ltd' to these files:",
+ "",
+ ]
+ for fname, comment in incorrect:
+ help_text.append(fname)
+ help_text.append((' ' * 4) + comment)
+
+ self.fail('\n'.join(help_text))
+
+ def test_gpl(self):
+ """Test that all .py and .pyx files have a GPL disclaimer."""
+ incorrect = []
+
+ gpl_txt = """
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+"""
+ gpl_re = re.compile(re.escape(gpl_txt), re.MULTILINE)
+
+ for fname, text in self.get_source_file_contents(
+ extensions=('.py', '.pyx')):
+ if self.is_license_exception(fname):
+ continue
+ if not gpl_re.search(text):
+ incorrect.append(fname)
+
+ if incorrect:
+ help_text = ['Some files have missing or incomplete GPL statement',
+ "",
+ "Please either add them to the list of"
+ " LICENSE_EXCEPTIONS in"
+ " bzrlib/tests/test_source.py",
+ "Or add the following text to the beginning:",
+ gpl_txt]
+ for fname in incorrect:
+ help_text.append((' ' * 4) + fname)
+
+ self.fail('\n'.join(help_text))
+
+ def _push_file(self, dict_, fname, line_no):
+ if fname not in dict_:
+ dict_[fname] = [line_no]
+ else:
+ dict_[fname].append(line_no)
+
+ def _format_message(self, dict_, message):
+ files = ["%s: %s" % (f, ', '.join([str(i + 1) for i in lines]))
+ for f, lines in dict_.items()]
+ files.sort()
+ return message + '\n\n %s' % ('\n '.join(files))
+
+ def test_coding_style(self):
+ """Check if bazaar code conforms to some coding style conventions.
+
+ Generally we expect PEP8, but we do not generally strictly enforce
+ this, and there are existing files that do not comply. The 'pep8'
+ tool, available separately, will check for more cases.
+
+ This test only enforces conditions that are globally true at the
+ moment, and that should cause a patch to be rejected: spaces rather
+ than tabs, unix newlines, and a newline at the end of the file.
+ """
+ tabs = {}
+ illegal_newlines = {}
+ no_newline_at_eof = []
+ for fname, text in self.get_source_file_contents(
+ extensions=('.py', '.pyx')):
+ if not self.is_our_code(fname):
+ continue
+ lines = text.splitlines(True)
+ last_line_no = len(lines) - 1
+ for line_no, line in enumerate(lines):
+ if '\t' in line:
+ self._push_file(tabs, fname, line_no)
+ if not line.endswith('\n') or line.endswith('\r\n'):
+ if line_no != last_line_no: # not no_newline_at_eof
+ self._push_file(illegal_newlines, fname, line_no)
+ if not lines[-1].endswith('\n'):
+ no_newline_at_eof.append(fname)
+ problems = []
+ if tabs:
+ problems.append(self._format_message(tabs,
+ 'Tab characters were found in the following source files.'
+ '\nThey should either be replaced by "\\t" or by spaces:'))
+ if illegal_newlines:
+ problems.append(self._format_message(illegal_newlines,
+ 'Non-unix newlines were found in the following source files:'))
+ if no_newline_at_eof:
+ no_newline_at_eof.sort()
+ problems.append("The following source files doesn't have a "
+ "newline at the end:"
+ '\n\n %s'
+ % ('\n '.join(no_newline_at_eof)))
+ if problems:
+ self.fail('\n\n'.join(problems))
+
+ def test_no_asserts(self):
+ """bzr shouldn't use the 'assert' statement."""
+ # assert causes too much variation between -O and not, and tends to
+ # give bad errors to the user
+ def search(x):
+ # scan down through x for assert statements, report any problems
+ # this is a bit cheesy; it may get some false positives?
+ if x[0] == symbol.assert_stmt:
+ return True
+ elif x[0] == token.NAME:
+ # can't search further down
+ return False
+ for sub in x[1:]:
+ if sub and search(sub):
+ return True
+ return False
+ badfiles = []
+ assert_re = re.compile(r'\bassert\b')
+ for fname, text in self.get_source_file_contents():
+ if not self.is_our_code(fname):
+ continue
+ if not assert_re.search(text):
+ continue
+ ast = parser.ast2tuple(parser.suite(text))
+ if search(ast):
+ badfiles.append(fname)
+ if badfiles:
+ self.fail(
+ "these files contain an assert statement and should not:\n%s"
+ % '\n'.join(badfiles))
+
+ def test_extension_exceptions(self):
+ """Extension functions should propagate exceptions.
+
+ Either they should return an object, have an 'except' clause, or
+ have a "# cannot_raise" to indicate that we've audited them and
+ defined them as not raising exceptions.
+ """
+ both_exc_and_no_exc = []
+ missing_except = []
+ class_re = re.compile(r'^(cdef\s+)?(public\s+)?'
+ r'(api\s+)?class (\w+).*:', re.MULTILINE)
+ extern_class_re = re.compile(r'## extern cdef class (\w+)',
+ re.MULTILINE)
+ except_re = re.compile(
+ r'cdef\s+' # start with cdef
+ r'([\w *]*?)\s*' # this is the return signature
+ r'(\w+)\s*\(' # the function name
+ r'[^)]*\)\s*' # parameters
+ r'(.*)\s*:' # the except clause
+ r'\s*(#\s*cannot[- _]raise)?') # cannot raise comment
+ for fname, text in self.get_source_file_contents(
+ extensions=('.pyx',)):
+ known_classes = set([m[-1] for m in class_re.findall(text)])
+ known_classes.update(extern_class_re.findall(text))
+ cdefs = except_re.findall(text)
+ for sig, func, exc_clause, no_exc_comment in cdefs:
+ if sig.startswith('api '):
+ sig = sig[4:]
+ if not sig or sig in known_classes:
+ sig = 'object'
+ if 'nogil' in exc_clause:
+ exc_clause = exc_clause.replace('nogil', '').strip()
+ if exc_clause and no_exc_comment:
+ both_exc_and_no_exc.append((fname, func))
+ if sig != 'object' and not (exc_clause or no_exc_comment):
+ missing_except.append((fname, func))
+ error_msg = []
+ if both_exc_and_no_exc:
+ error_msg.append(
+ 'The following functions had "cannot raise" comments'
+ ' but did have an except clause set:')
+ for fname, func in both_exc_and_no_exc:
+ error_msg.append('%s:%s' % (fname, func))
+ error_msg.extend(('', ''))
+ if missing_except:
+ error_msg.append(
+ 'The following functions have fixed return types,'
+ ' but no except clause.')
+ error_msg.append(
+ 'Either add an except or append "# cannot_raise".')
+ for fname, func in missing_except:
+ error_msg.append('%s:%s' % (fname, func))
+ error_msg.extend(('', ''))
+ if error_msg:
+ self.fail('\n'.join(error_msg))
+
+ def test_feature_absolute_import(self):
+ """Using absolute imports means avoiding unnecesary stat and
+ open calls.
+
+ Make sure that all non-test files have absolute imports enabled.
+ """
+ missing_absolute_import = []
+ for fname, text in self.get_source_file_contents(
+ extensions=('.py', )):
+ if "/tests/" in fname or "test_" in fname:
+ # We don't really care about tests
+ continue
+ if not "from __future__ import absolute_import" in text:
+ missing_absolute_import.append(fname)
+
+ if missing_absolute_import:
+ self.fail(
+ 'The following files do not have absolute_import enabled:\n'
+ '\n' + '\n'.join(missing_absolute_import))
diff --git a/bzrlib/tests/test_ssh_transport.py b/bzrlib/tests/test_ssh_transport.py
new file mode 100644
index 0000000..9e37c3b
--- /dev/null
+++ b/bzrlib/tests/test_ssh_transport.py
@@ -0,0 +1,254 @@
+# Copyright (C) 2007-2010 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+from bzrlib.tests import TestCase
+from bzrlib.errors import SSHVendorNotFound, UnknownSSH
+from bzrlib.transport.ssh import (
+ OpenSSHSubprocessVendor,
+ PLinkSubprocessVendor,
+ SSHCorpSubprocessVendor,
+ LSHSubprocessVendor,
+ SSHVendorManager,
+ )
+
+
+class TestSSHVendorManager(SSHVendorManager):
+
+ _ssh_version_string = ""
+
+ def set_ssh_version_string(self, version):
+ self._ssh_version_string = version
+
+ def _get_ssh_version_string(self, args):
+ return self._ssh_version_string
+
+
+class SSHVendorManagerTests(TestCase):
+
+ def test_register_vendor(self):
+ manager = TestSSHVendorManager()
+ self.assertRaises(SSHVendorNotFound, manager.get_vendor, {})
+ vendor = object()
+ manager.register_vendor("vendor", vendor)
+ self.assertIs(manager.get_vendor({"BZR_SSH": "vendor"}), vendor)
+
+ def test_default_vendor(self):
+ manager = TestSSHVendorManager()
+ self.assertRaises(SSHVendorNotFound, manager.get_vendor, {})
+ vendor = object()
+ manager.register_default_vendor(vendor)
+ self.assertIs(manager.get_vendor({}), vendor)
+
+ def test_get_vendor_by_environment(self):
+ manager = TestSSHVendorManager()
+ self.assertRaises(SSHVendorNotFound, manager.get_vendor, {})
+ self.assertRaises(UnknownSSH,
+ manager.get_vendor, {"BZR_SSH": "vendor"})
+ vendor = object()
+ manager.register_vendor("vendor", vendor)
+ self.assertIs(manager.get_vendor({"BZR_SSH": "vendor"}), vendor)
+
+ def test_get_vendor_by_inspection_openssh(self):
+ manager = TestSSHVendorManager()
+ self.assertRaises(SSHVendorNotFound, manager.get_vendor, {})
+ manager.set_ssh_version_string("OpenSSH")
+ self.assertIsInstance(manager.get_vendor({}), OpenSSHSubprocessVendor)
+
+ def test_get_vendor_by_inspection_sshcorp(self):
+ manager = TestSSHVendorManager()
+ self.assertRaises(SSHVendorNotFound, manager.get_vendor, {})
+ manager.set_ssh_version_string("SSH Secure Shell")
+ self.assertIsInstance(manager.get_vendor({}), SSHCorpSubprocessVendor)
+
+ def test_get_vendor_by_inspection_lsh(self):
+ manager = TestSSHVendorManager()
+ self.assertRaises(SSHVendorNotFound, manager.get_vendor, {})
+ manager.set_ssh_version_string("lsh")
+ self.assertIsInstance(manager.get_vendor({}), LSHSubprocessVendor)
+
+ def test_get_vendor_by_inspection_plink(self):
+ manager = TestSSHVendorManager()
+ self.assertRaises(SSHVendorNotFound, manager.get_vendor, {})
+ manager.set_ssh_version_string("plink")
+ # Auto-detect of plink vendor disabled, on Windows recommended
+ # default ssh-client is paramiko
+ # see https://bugs.launchpad.net/bugs/414743
+ #~self.assertIsInstance(manager.get_vendor({}), PLinkSubprocessVendor)
+ self.assertRaises(SSHVendorNotFound, manager.get_vendor, {})
+
+ def test_cached_vendor(self):
+ manager = TestSSHVendorManager()
+ self.assertRaises(SSHVendorNotFound, manager.get_vendor, {})
+ vendor = object()
+ manager.register_vendor("vendor", vendor)
+ self.assertRaises(SSHVendorNotFound, manager.get_vendor, {})
+ # Once the vendor is found the result is cached (mainly because of the
+ # 'get_vendor' sometimes can be an expensive operation) and later
+ # invocations of the 'get_vendor' just returns the cached value.
+ self.assertIs(manager.get_vendor({"BZR_SSH": "vendor"}), vendor)
+ self.assertIs(manager.get_vendor({}), vendor)
+ # The cache can be cleared by the 'clear_cache' method
+ manager.clear_cache()
+ self.assertRaises(SSHVendorNotFound, manager.get_vendor, {})
+
+ def test_get_vendor_search_order(self):
+ # The 'get_vendor' method search for SSH vendors as following:
+ #
+ # 1. Check previously cached value
+ # 2. Check BZR_SSH environment variable
+ # 3. Check the system for known SSH vendors
+ # 4. Fall back to the default vendor if registered
+ #
+ # Let's now check the each check method in the reverse order
+ # clearing the cache between each invocation:
+
+ manager = TestSSHVendorManager()
+ # At first no vendors are found
+ self.assertRaises(SSHVendorNotFound, manager.get_vendor, {})
+
+ # If the default vendor is registered it will be returned
+ default_vendor = object()
+ manager.register_default_vendor(default_vendor)
+ self.assertIs(manager.get_vendor({}), default_vendor)
+
+ # If the known vendor is found in the system it will be returned
+ manager.clear_cache()
+ manager.set_ssh_version_string("OpenSSH")
+ self.assertIsInstance(manager.get_vendor({}), OpenSSHSubprocessVendor)
+
+ # If the BZR_SSH environment variable is found it will be treated as
+ # the vendor name
+ manager.clear_cache()
+ vendor = object()
+ manager.register_vendor("vendor", vendor)
+ self.assertIs(manager.get_vendor({"BZR_SSH": "vendor"}), vendor)
+
+ # Last cached value always checked first
+ self.assertIs(manager.get_vendor({}), vendor)
+
+ def test_get_vendor_from_path_win32_plink(self):
+ manager = TestSSHVendorManager()
+ manager.set_ssh_version_string("plink: Release 0.60")
+ plink_path = "C:/Program Files/PuTTY/plink.exe"
+ vendor = manager.get_vendor({"BZR_SSH": plink_path})
+ self.assertIsInstance(vendor, PLinkSubprocessVendor)
+ args = vendor._get_vendor_specific_argv("user", "host", 22, ["bzr"])
+ self.assertEqual(args[0], plink_path)
+
+ def test_get_vendor_from_path_nix_openssh(self):
+ manager = TestSSHVendorManager()
+ manager.set_ssh_version_string(
+ "OpenSSH_5.1p1 Debian-5, OpenSSL, 0.9.8g 19 Oct 2007")
+ openssh_path = "/usr/bin/ssh"
+ vendor = manager.get_vendor({"BZR_SSH": openssh_path})
+ self.assertIsInstance(vendor, OpenSSHSubprocessVendor)
+ args = vendor._get_vendor_specific_argv("user", "host", 22, ["bzr"])
+ self.assertEqual(args[0], openssh_path)
+
+
+class SubprocessVendorsTests(TestCase):
+
+ def test_openssh_command_arguments(self):
+ vendor = OpenSSHSubprocessVendor()
+ self.assertEqual(
+ vendor._get_vendor_specific_argv(
+ "user", "host", 100, command=["bzr"]),
+ ["ssh", "-oForwardX11=no", "-oForwardAgent=no",
+ "-oClearAllForwardings=yes",
+ "-oNoHostAuthenticationForLocalhost=yes",
+ "-p", "100",
+ "-l", "user",
+ "host", "bzr"]
+ )
+
+ def test_openssh_subsystem_arguments(self):
+ vendor = OpenSSHSubprocessVendor()
+ self.assertEqual(
+ vendor._get_vendor_specific_argv(
+ "user", "host", 100, subsystem="sftp"),
+ ["ssh", "-oForwardX11=no", "-oForwardAgent=no",
+ "-oClearAllForwardings=yes",
+ "-oNoHostAuthenticationForLocalhost=yes",
+ "-p", "100",
+ "-l", "user",
+ "-s", "host", "sftp"]
+ )
+
+ def test_sshcorp_command_arguments(self):
+ vendor = SSHCorpSubprocessVendor()
+ self.assertEqual(
+ vendor._get_vendor_specific_argv(
+ "user", "host", 100, command=["bzr"]),
+ ["ssh", "-x",
+ "-p", "100",
+ "-l", "user",
+ "host", "bzr"]
+ )
+
+ def test_sshcorp_subsystem_arguments(self):
+ vendor = SSHCorpSubprocessVendor()
+ self.assertEqual(
+ vendor._get_vendor_specific_argv(
+ "user", "host", 100, subsystem="sftp"),
+ ["ssh", "-x",
+ "-p", "100",
+ "-l", "user",
+ "-s", "sftp", "host"]
+ )
+
+ def test_lsh_command_arguments(self):
+ vendor = LSHSubprocessVendor()
+ self.assertEqual(
+ vendor._get_vendor_specific_argv(
+ "user", "host", 100, command=["bzr"]),
+ ["lsh",
+ "-p", "100",
+ "-l", "user",
+ "host", "bzr"]
+ )
+
+ def test_lsh_subsystem_arguments(self):
+ vendor = LSHSubprocessVendor()
+ self.assertEqual(
+ vendor._get_vendor_specific_argv(
+ "user", "host", 100, subsystem="sftp"),
+ ["lsh",
+ "-p", "100",
+ "-l", "user",
+ "--subsystem", "sftp", "host"]
+ )
+
+ def test_plink_command_arguments(self):
+ vendor = PLinkSubprocessVendor()
+ self.assertEqual(
+ vendor._get_vendor_specific_argv(
+ "user", "host", 100, command=["bzr"]),
+ ["plink", "-x", "-a", "-ssh", "-2", "-batch",
+ "-P", "100",
+ "-l", "user",
+ "host", "bzr"]
+ )
+
+ def test_plink_subsystem_arguments(self):
+ vendor = PLinkSubprocessVendor()
+ self.assertEqual(
+ vendor._get_vendor_specific_argv(
+ "user", "host", 100, subsystem="sftp"),
+ ["plink", "-x", "-a", "-ssh", "-2", "-batch",
+ "-P", "100",
+ "-l", "user",
+ "-s", "host", "sftp"]
+ )
diff --git a/bzrlib/tests/test_status.py b/bzrlib/tests/test_status.py
new file mode 100644
index 0000000..420b548
--- /dev/null
+++ b/bzrlib/tests/test_status.py
@@ -0,0 +1,199 @@
+# Copyright (C) 2006-2010 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+
+from StringIO import StringIO
+
+from bzrlib import (
+ config,
+ status as _mod_status,
+ )
+from bzrlib.revisionspec import RevisionSpec
+from bzrlib.status import show_pending_merges, show_tree_status
+from bzrlib.tests import TestCaseWithTransport
+
+
+class TestStatus(TestCaseWithTransport):
+
+ def test_pending_none(self):
+ # Test whether show_pending_merges works in a tree with no commits
+ tree = self.make_branch_and_tree('a')
+ tree.commit('empty commit')
+ tree2 = self.make_branch_and_tree('b')
+ # set a left most parent that is not a present commit
+ tree2.add_parent_tree_id('some-ghost', allow_leftmost_as_ghost=True)
+ # do a merge
+ tree2.merge_from_branch(tree.branch)
+ output = StringIO()
+ tree2.lock_read()
+ try:
+ show_pending_merges(tree2, output)
+ finally:
+ tree2.unlock()
+ self.assertContainsRe(output.getvalue(), 'empty commit')
+
+ def make_multiple_pending_tree(self):
+ config.GlobalStack().set('email', 'Joe Foo <joe@foo.com>')
+ tree = self.make_branch_and_tree('a')
+ tree.commit('commit 1', timestamp=1196796819, timezone=0)
+ tree2 = tree.bzrdir.clone('b').open_workingtree()
+ tree.commit('commit 2', timestamp=1196796819, timezone=0)
+ tree2.commit('commit 2b', timestamp=1196796819, timezone=0)
+ tree3 = tree2.bzrdir.clone('c').open_workingtree()
+ tree2.commit('commit 3b', timestamp=1196796819, timezone=0)
+ tree3.commit('commit 3c', timestamp=1196796819, timezone=0)
+ tree.merge_from_branch(tree2.branch)
+ tree.merge_from_branch(tree3.branch, force=True)
+ return tree
+
+ def test_multiple_pending(self):
+ tree = self.make_multiple_pending_tree()
+ output = StringIO()
+ tree.lock_read()
+ self.addCleanup(tree.unlock)
+ show_pending_merges(tree, output)
+ # 2b doesn't appear because it's an ancestor of 3b
+ self.assertEqualDiff(
+ 'pending merge tips: (use -v to see all merge revisions)\n'
+ ' Joe Foo 2007-12-04 commit 3b\n'
+ ' Joe Foo 2007-12-04 commit 3c\n',
+ output.getvalue())
+
+ def test_multiple_pending_verbose(self):
+ tree = self.make_multiple_pending_tree()
+ output = StringIO()
+ tree.lock_read()
+ self.addCleanup(tree.unlock)
+ show_pending_merges(tree, output, verbose=True)
+ # Even though 2b is in the ancestry of 3c, it should only be displayed
+ # under the first merge parent.
+ self.assertEqualDiff('pending merges:\n'
+ ' Joe Foo 2007-12-04 commit 3b\n'
+ ' Joe Foo 2007-12-04 commit 2b\n'
+ ' Joe Foo 2007-12-04 commit 3c\n',
+ output.getvalue())
+
+ def test_with_pending_ghost(self):
+ """Test when a pending merge is itself a ghost"""
+ tree = self.make_branch_and_tree('a')
+ tree.commit('first')
+ tree.add_parent_tree_id('a-ghost-revision')
+ tree.lock_read()
+ self.addCleanup(tree.unlock)
+ output = StringIO()
+ show_pending_merges(tree, output)
+ self.assertEqualDiff(
+ 'pending merge tips: (use -v to see all merge revisions)\n'
+ ' (ghost) a-ghost-revision\n',
+ output.getvalue())
+
+ def test_pending_with_ghosts(self):
+ """Test when a pending merge's ancestry includes ghosts."""
+ config.GlobalStack().set('email', 'Joe Foo <joe@foo.com>')
+ tree = self.make_branch_and_tree('a')
+ tree.commit('empty commit')
+ tree2 = tree.bzrdir.clone('b').open_workingtree()
+ tree2.commit('a non-ghost', timestamp=1196796819, timezone=0)
+ tree2.add_parent_tree_id('a-ghost-revision')
+ tree2.commit('commit with ghost', timestamp=1196796819, timezone=0)
+ tree2.commit('another non-ghost', timestamp=1196796819, timezone=0)
+ tree.merge_from_branch(tree2.branch)
+ tree.lock_read()
+ self.addCleanup(tree.unlock)
+ output = StringIO()
+ show_pending_merges(tree, output, verbose=True)
+ self.assertEqualDiff('pending merges:\n'
+ ' Joe Foo 2007-12-04 another non-ghost\n'
+ ' Joe Foo 2007-12-04 [merge] commit with ghost\n'
+ ' (ghost) a-ghost-revision\n'
+ ' Joe Foo 2007-12-04 a non-ghost\n',
+ output.getvalue())
+
+ def tests_revision_to_revision(self):
+ """doing a status between two revision trees should work."""
+ tree = self.make_branch_and_tree('.')
+ r1_id = tree.commit('one', allow_pointless=True)
+ r2_id = tree.commit('two', allow_pointless=True)
+ r2_tree = tree.branch.repository.revision_tree(r2_id)
+ output = StringIO()
+ show_tree_status(tree, to_file=output,
+ revision=[RevisionSpec.from_string("revid:%s" % r1_id),
+ RevisionSpec.from_string("revid:%s" % r2_id)])
+ # return does not matter as long as it did not raise.
+
+
+class TestHooks(TestCaseWithTransport):
+
+ def test_constructor(self):
+ """Check that creating a StatusHooks instance has the right defaults.
+ """
+ hooks = _mod_status.StatusHooks()
+ self.assertTrue("post_status" in hooks, "post_status not in %s" % hooks)
+ self.assertTrue("pre_status" in hooks, "pre_status not in %s" % hooks)
+
+ def test_installed_hooks_are_StatusHooks(self):
+ """The installed hooks object should be a StatusHooks.
+ """
+ # the installed hooks are saved in self._preserved_hooks.
+ self.assertIsInstance(self._preserved_hooks[_mod_status][1],
+ _mod_status.StatusHooks)
+
+ def test_post_status_hook(self):
+ """Ensure that post_status hook is invoked with the right args.
+ """
+ calls = []
+ _mod_status.hooks.install_named_hook('post_status', calls.append, None)
+ self.assertLength(0, calls)
+ tree = self.make_branch_and_tree('.')
+ r1_id = tree.commit('one', allow_pointless=True)
+ r2_id = tree.commit('two', allow_pointless=True)
+ r2_tree = tree.branch.repository.revision_tree(r2_id)
+ output = StringIO()
+ show_tree_status(tree, to_file=output,
+ revision=[RevisionSpec.from_string("revid:%s" % r1_id),
+ RevisionSpec.from_string("revid:%s" % r2_id)])
+ self.assertLength(1, calls)
+ params = calls[0]
+ self.assertIsInstance(params, _mod_status.StatusHookParams)
+ attrs = ['old_tree', 'new_tree', 'to_file', 'versioned',
+ 'show_ids', 'short', 'verbose', 'specific_files']
+ for a in attrs:
+ self.assertTrue(hasattr(params, a),
+ 'Attribute "%s" not found in StatusHookParam' % a)
+
+ def test_pre_status_hook(self):
+ """Ensure that pre_status hook is invoked with the right args.
+ """
+ calls = []
+ _mod_status.hooks.install_named_hook('pre_status', calls.append, None)
+ self.assertLength(0, calls)
+ tree = self.make_branch_and_tree('.')
+ r1_id = tree.commit('one', allow_pointless=True)
+ r2_id = tree.commit('two', allow_pointless=True)
+ r2_tree = tree.branch.repository.revision_tree(r2_id)
+ output = StringIO()
+ show_tree_status(tree, to_file=output,
+ revision=[RevisionSpec.from_string("revid:%s" % r1_id),
+ RevisionSpec.from_string("revid:%s" % r2_id)])
+ self.assertLength(1, calls)
+ params = calls[0]
+ self.assertIsInstance(params, _mod_status.StatusHookParams)
+ attrs = ['old_tree', 'new_tree', 'to_file', 'versioned',
+ 'show_ids', 'short', 'verbose', 'specific_files']
+ for a in attrs:
+ self.assertTrue(hasattr(params, a),
+ 'Attribute "%s" not found in StatusHookParam' % a)
+
diff --git a/bzrlib/tests/test_store.py b/bzrlib/tests/test_store.py
new file mode 100644
index 0000000..9a403e9
--- /dev/null
+++ b/bzrlib/tests/test_store.py
@@ -0,0 +1,449 @@
+# Copyright (C) 2005-2009, 2011 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""Test Store implementations."""
+
+from cStringIO import StringIO
+import os
+import gzip
+
+import bzrlib.errors as errors
+from bzrlib.errors import BzrError
+from bzrlib.store import TransportStore
+from bzrlib.store.text import TextStore
+from bzrlib.store.versioned import VersionedFileStore
+from bzrlib.tests import TestCase, TestCaseInTempDir, TestCaseWithTransport
+import bzrlib.transactions as transactions
+import bzrlib.transport as transport
+from bzrlib.transport.memory import MemoryTransport
+from bzrlib.weave import WeaveFile
+
+
+class TestStores(object):
+ """Mixin template class that provides some common tests for stores"""
+
+ def check_content(self, store, fileid, value):
+ f = store.get(fileid)
+ self.assertEqual(f.read(), value)
+
+ def fill_store(self, store):
+ store.add(StringIO('hello'), 'a')
+ store.add(StringIO('other'), 'b')
+ store.add(StringIO('something'), 'c')
+ store.add(StringIO('goodbye'), '123123')
+
+ def test_copy_all(self):
+ """Test copying"""
+ os.mkdir('a')
+ store_a = self.get_store('a')
+ store_a.add(StringIO('foo'), '1')
+ os.mkdir('b')
+ store_b = self.get_store('b')
+ store_b.copy_all_ids(store_a)
+ self.assertEqual(store_a.get('1').read(), 'foo')
+ self.assertEqual(store_b.get('1').read(), 'foo')
+ # TODO: Switch the exception form UnlistableStore to
+ # or make Stores throw UnlistableStore if their
+ # Transport doesn't support listing
+ # store_c = RemoteStore('http://example.com/')
+ # self.assertRaises(UnlistableStore, copy_all, store_c, store_b)
+
+ def test_get(self):
+ store = self.get_store()
+ self.fill_store(store)
+
+ self.check_content(store, 'a', 'hello')
+ self.check_content(store, 'b', 'other')
+ self.check_content(store, 'c', 'something')
+
+ # Make sure that requesting a non-existing file fails
+ self.assertRaises(KeyError, self.check_content, store, 'd', None)
+
+ def test_multiple_add(self):
+ """Multiple add with same ID should raise a BzrError"""
+ store = self.get_store()
+ self.fill_store(store)
+ self.assertRaises(BzrError, store.add, StringIO('goodbye'), '123123')
+
+
+class TestCompressedTextStore(TestCaseInTempDir, TestStores):
+
+ def get_store(self, path=u'.'):
+ t = transport.get_transport_from_path(path)
+ return TextStore(t, compressed=True)
+
+ def test_total_size(self):
+ store = self.get_store(u'.')
+ store.register_suffix('dsc')
+ store.add(StringIO('goodbye'), '123123')
+ store.add(StringIO('goodbye2'), '123123', 'dsc')
+ # these get gzipped - content should be stable
+ self.assertEqual(store.total_size(), (2, 55))
+
+ def test__relpath_suffixed(self):
+ my_store = TextStore(MockTransport(),
+ prefixed=True, compressed=True)
+ my_store.register_suffix('dsc')
+ self.assertEqual('45/foo.dsc', my_store._relpath('foo', ['dsc']))
+
+
+class TestMemoryStore(TestCase):
+
+ def get_store(self):
+ return TextStore(MemoryTransport())
+
+ def test_add_and_retrieve(self):
+ store = self.get_store()
+ store.add(StringIO('hello'), 'aa')
+ self.assertNotEqual(store.get('aa'), None)
+ self.assertEqual(store.get('aa').read(), 'hello')
+ store.add(StringIO('hello world'), 'bb')
+ self.assertNotEqual(store.get('bb'), None)
+ self.assertEqual(store.get('bb').read(), 'hello world')
+
+ def test_missing_is_absent(self):
+ store = self.get_store()
+ self.assertFalse('aa' in store)
+
+ def test_adding_fails_when_present(self):
+ my_store = self.get_store()
+ my_store.add(StringIO('hello'), 'aa')
+ self.assertRaises(BzrError,
+ my_store.add, StringIO('hello'), 'aa')
+
+ def test_total_size(self):
+ store = self.get_store()
+ store.add(StringIO('goodbye'), '123123')
+ store.add(StringIO('goodbye2'), '123123.dsc')
+ self.assertEqual(store.total_size(), (2, 15))
+ # TODO: Switch the exception form UnlistableStore to
+ # or make Stores throw UnlistableStore if their
+ # Transport doesn't support listing
+ # store_c = RemoteStore('http://example.com/')
+ # self.assertRaises(UnlistableStore, copy_all, store_c, store_b)
+
+
+class TestTextStore(TestCaseInTempDir, TestStores):
+
+ def get_store(self, path=u'.'):
+ t = transport.get_transport_from_path(path)
+ return TextStore(t, compressed=False)
+
+ def test_total_size(self):
+ store = self.get_store()
+ store.add(StringIO('goodbye'), '123123')
+ store.add(StringIO('goodbye2'), '123123.dsc')
+ self.assertEqual(store.total_size(), (2, 15))
+ # TODO: Switch the exception form UnlistableStore to
+ # or make Stores throw UnlistableStore if their
+ # Transport doesn't support listing
+ # store_c = RemoteStore('http://example.com/')
+ # self.assertRaises(UnlistableStore, copy_all, store_c, store_b)
+
+
+class TestMixedTextStore(TestCaseInTempDir, TestStores):
+
+ def get_store(self, path=u'.', compressed=True):
+ t = transport.get_transport_from_path(path)
+ return TextStore(t, compressed=compressed)
+
+ def test_get_mixed(self):
+ cs = self.get_store(u'.', compressed=True)
+ s = self.get_store(u'.', compressed=False)
+ cs.add(StringIO('hello there'), 'a')
+
+ self.assertPathExists('a.gz')
+ self.assertFalse(os.path.lexists('a'))
+
+ self.assertEquals(gzip.GzipFile('a.gz').read(), 'hello there')
+
+ self.assertEquals(cs.has_id('a'), True)
+ self.assertEquals(s.has_id('a'), True)
+ self.assertEquals(cs.get('a').read(), 'hello there')
+ self.assertEquals(s.get('a').read(), 'hello there')
+
+ self.assertRaises(BzrError, s.add, StringIO('goodbye'), 'a')
+
+ s.add(StringIO('goodbye'), 'b')
+ self.assertPathExists('b')
+ self.assertFalse(os.path.lexists('b.gz'))
+ self.assertEquals(open('b').read(), 'goodbye')
+
+ self.assertEquals(cs.has_id('b'), True)
+ self.assertEquals(s.has_id('b'), True)
+ self.assertEquals(cs.get('b').read(), 'goodbye')
+ self.assertEquals(s.get('b').read(), 'goodbye')
+
+ self.assertRaises(BzrError, cs.add, StringIO('again'), 'b')
+
+class MockTransport(transport.Transport):
+ """A fake transport for testing with."""
+
+ def has(self, filename):
+ return False
+
+ def __init__(self, url=None):
+ if url is None:
+ url = "http://example.com"
+ super(MockTransport, self).__init__(url)
+
+ def mkdir(self, filename):
+ return
+
+
+class InstrumentedTransportStore(TransportStore):
+ """An instrumented TransportStore.
+
+ Here we replace template method worker methods with calls that record the
+ expected results.
+ """
+
+ def _add(self, filename, file):
+ self._calls.append(("_add", filename, file))
+
+ def __init__(self, transport, prefixed=False):
+ super(InstrumentedTransportStore, self).__init__(transport, prefixed)
+ self._calls = []
+
+
+class TestInstrumentedTransportStore(TestCase):
+
+ def test__add_records(self):
+ my_store = InstrumentedTransportStore(MockTransport())
+ my_store._add("filename", "file")
+ self.assertEqual([("_add", "filename", "file")], my_store._calls)
+
+
+class TestMockTransport(TestCase):
+
+ def test_isinstance(self):
+ self.assertIsInstance(MockTransport(), transport.Transport)
+
+ def test_has(self):
+ self.assertEqual(False, MockTransport().has('foo'))
+
+ def test_mkdir(self):
+ MockTransport().mkdir('45')
+
+
+class TestTransportStore(TestCase):
+
+ def test__relpath_invalid(self):
+ my_store = TransportStore(MockTransport())
+ self.assertRaises(ValueError, my_store._relpath, '/foo')
+ self.assertRaises(ValueError, my_store._relpath, 'foo/')
+
+ def test_register_invalid_suffixes(self):
+ my_store = TransportStore(MockTransport())
+ self.assertRaises(ValueError, my_store.register_suffix, '/')
+ self.assertRaises(ValueError, my_store.register_suffix, '.gz/bar')
+
+ def test__relpath_unregister_suffixes(self):
+ my_store = TransportStore(MockTransport())
+ self.assertRaises(ValueError, my_store._relpath, 'foo', ['gz'])
+ self.assertRaises(ValueError, my_store._relpath, 'foo', ['dsc', 'gz'])
+
+ def test__relpath_simple(self):
+ my_store = TransportStore(MockTransport())
+ self.assertEqual("foo", my_store._relpath('foo'))
+
+ def test__relpath_prefixed(self):
+ my_store = TransportStore(MockTransport(), True)
+ self.assertEqual('45/foo', my_store._relpath('foo'))
+
+ def test__relpath_simple_suffixed(self):
+ my_store = TransportStore(MockTransport())
+ my_store.register_suffix('bar')
+ my_store.register_suffix('baz')
+ self.assertEqual('foo.baz', my_store._relpath('foo', ['baz']))
+ self.assertEqual('foo.bar.baz', my_store._relpath('foo', ['bar', 'baz']))
+
+ def test__relpath_prefixed_suffixed(self):
+ my_store = TransportStore(MockTransport(), True)
+ my_store.register_suffix('bar')
+ my_store.register_suffix('baz')
+ self.assertEqual('45/foo.baz', my_store._relpath('foo', ['baz']))
+ self.assertEqual('45/foo.bar.baz',
+ my_store._relpath('foo', ['bar', 'baz']))
+
+ def test_add_simple(self):
+ stream = StringIO("content")
+ my_store = InstrumentedTransportStore(MockTransport())
+ my_store.add(stream, "foo")
+ self.assertEqual([("_add", "foo", stream)], my_store._calls)
+
+ def test_add_prefixed(self):
+ stream = StringIO("content")
+ my_store = InstrumentedTransportStore(MockTransport(), True)
+ my_store.add(stream, "foo")
+ self.assertEqual([("_add", "45/foo", stream)], my_store._calls)
+
+ def test_add_simple_suffixed(self):
+ stream = StringIO("content")
+ my_store = InstrumentedTransportStore(MockTransport())
+ my_store.register_suffix('dsc')
+ my_store.add(stream, "foo", 'dsc')
+ self.assertEqual([("_add", "foo.dsc", stream)], my_store._calls)
+
+ def test_add_simple_suffixed(self):
+ stream = StringIO("content")
+ my_store = InstrumentedTransportStore(MockTransport(), True)
+ my_store.register_suffix('dsc')
+ my_store.add(stream, "foo", 'dsc')
+ self.assertEqual([("_add", "45/foo.dsc", stream)], my_store._calls)
+
+ def get_populated_store(self, prefixed=False,
+ store_class=TextStore, compressed=False):
+ my_store = store_class(MemoryTransport(), prefixed,
+ compressed=compressed)
+ my_store.register_suffix('sig')
+ stream = StringIO("signature")
+ my_store.add(stream, "foo", 'sig')
+ stream = StringIO("content")
+ my_store.add(stream, "foo")
+ stream = StringIO("signature for missing base")
+ my_store.add(stream, "missing", 'sig')
+ return my_store
+
+ def test_has_simple(self):
+ my_store = self.get_populated_store()
+ self.assertEqual(True, my_store.has_id('foo'))
+ my_store = self.get_populated_store(True)
+ self.assertEqual(True, my_store.has_id('foo'))
+
+ def test_has_suffixed(self):
+ my_store = self.get_populated_store()
+ self.assertEqual(True, my_store.has_id('foo', 'sig'))
+ my_store = self.get_populated_store(True)
+ self.assertEqual(True, my_store.has_id('foo', 'sig'))
+
+ def test_has_suffixed_no_base(self):
+ my_store = self.get_populated_store()
+ self.assertEqual(False, my_store.has_id('missing'))
+ my_store = self.get_populated_store(True)
+ self.assertEqual(False, my_store.has_id('missing'))
+
+ def test_get_simple(self):
+ my_store = self.get_populated_store()
+ self.assertEqual('content', my_store.get('foo').read())
+ my_store = self.get_populated_store(True)
+ self.assertEqual('content', my_store.get('foo').read())
+
+ def test_get_suffixed(self):
+ my_store = self.get_populated_store()
+ self.assertEqual('signature', my_store.get('foo', 'sig').read())
+ my_store = self.get_populated_store(True)
+ self.assertEqual('signature', my_store.get('foo', 'sig').read())
+
+ def test_get_suffixed_no_base(self):
+ my_store = self.get_populated_store()
+ self.assertEqual('signature for missing base',
+ my_store.get('missing', 'sig').read())
+ my_store = self.get_populated_store(True)
+ self.assertEqual('signature for missing base',
+ my_store.get('missing', 'sig').read())
+
+ def test___iter__no_suffix(self):
+ my_store = TextStore(MemoryTransport(),
+ prefixed=False, compressed=False)
+ stream = StringIO("content")
+ my_store.add(stream, "foo")
+ self.assertEqual(set(['foo']),
+ set(my_store.__iter__()))
+
+ def test___iter__(self):
+ self.assertEqual(set(['foo']),
+ set(self.get_populated_store().__iter__()))
+ self.assertEqual(set(['foo']),
+ set(self.get_populated_store(True).__iter__()))
+
+ def test___iter__compressed(self):
+ self.assertEqual(set(['foo']),
+ set(self.get_populated_store(
+ compressed=True).__iter__()))
+ self.assertEqual(set(['foo']),
+ set(self.get_populated_store(
+ True, compressed=True).__iter__()))
+
+ def test___len__(self):
+ self.assertEqual(1, len(self.get_populated_store()))
+
+ def test_copy_suffixes(self):
+ from_store = self.get_populated_store()
+ to_store = TextStore(MemoryTransport(),
+ prefixed=True, compressed=True)
+ to_store.register_suffix('sig')
+ to_store.copy_all_ids(from_store)
+ self.assertEqual(1, len(to_store))
+ self.assertEqual(set(['foo']), set(to_store.__iter__()))
+ self.assertEqual('content', to_store.get('foo').read())
+ self.assertEqual('signature', to_store.get('foo', 'sig').read())
+ self.assertRaises(KeyError, to_store.get, 'missing', 'sig')
+
+ def test_relpath_escaped(self):
+ my_store = TransportStore(MemoryTransport())
+ self.assertEqual('%25', my_store._relpath('%'))
+
+ def test_escaped_uppercase(self):
+ """Uppercase letters are escaped for safety on Windows"""
+ my_store = TransportStore(MemoryTransport(), prefixed=True,
+ escaped=True)
+ # a particularly perverse file-id! :-)
+ self.assertEquals(my_store._relpath('C:<>'), 'be/%2543%253a%253c%253e')
+
+
+class TestVersionFileStore(TestCaseWithTransport):
+
+ def get_scope(self):
+ return self._transaction
+
+ def setUp(self):
+ super(TestVersionFileStore, self).setUp()
+ self.vfstore = VersionedFileStore(MemoryTransport(),
+ versionedfile_class=WeaveFile)
+ self.vfstore.get_scope = self.get_scope
+ self._transaction = None
+
+ def test_get_weave_registers_dirty_in_write(self):
+ self._transaction = transactions.WriteTransaction()
+ vf = self.vfstore.get_weave_or_empty('id', self._transaction)
+ self._transaction.finish()
+ self._transaction = None
+ self.assertRaises(errors.OutSideTransaction, vf.add_lines, 'b', [], [])
+ self._transaction = transactions.WriteTransaction()
+ vf = self.vfstore.get_weave('id', self._transaction)
+ self._transaction.finish()
+ self._transaction = None
+ self.assertRaises(errors.OutSideTransaction, vf.add_lines, 'b', [], [])
+
+ def test_get_weave_readonly_cant_write(self):
+ self._transaction = transactions.WriteTransaction()
+ vf = self.vfstore.get_weave_or_empty('id', self._transaction)
+ self._transaction.finish()
+ self._transaction = transactions.ReadOnlyTransaction()
+ vf = self.vfstore.get_weave_or_empty('id', self._transaction)
+ self.assertRaises(errors.ReadOnlyError, vf.add_lines, 'b', [], [])
+
+ def test___iter__escaped(self):
+ self.vfstore = VersionedFileStore(MemoryTransport(),
+ prefixed=True, escaped=True, versionedfile_class=WeaveFile)
+ self.vfstore.get_scope = self.get_scope
+ self._transaction = transactions.WriteTransaction()
+ vf = self.vfstore.get_weave_or_empty(' ', self._transaction)
+ vf.add_lines('a', [], [])
+ del vf
+ self._transaction.finish()
+ self.assertEqual([' '], list(self.vfstore))
diff --git a/bzrlib/tests/test_strace.py b/bzrlib/tests/test_strace.py
new file mode 100644
index 0000000..b406d1d
--- /dev/null
+++ b/bzrlib/tests/test_strace.py
@@ -0,0 +1,96 @@
+# Copyright (C) 2007-2011 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""Tests for the strace-invoking support."""
+
+import threading
+
+from bzrlib import (
+ strace,
+ tests,
+ )
+from bzrlib.strace import strace_detailed, StraceResult
+from bzrlib.tests.features import (
+ strace_feature,
+ )
+
+
+class TestStrace(tests.TestCaseWithTransport):
+
+ _test_needs_features = [strace_feature]
+
+ def setUp(self):
+ # NB: see http://pad.lv/626679 and
+ # <https://code.launchpad.net/~mbp/bzr/626679-strace/+merge/34157>:
+ # testing strace by connecting to ourselves has repeatedly caused
+ # hangs in running the test suite; these are fixable given enough
+ # determination but given that strace is not used by any other tests
+ # at the moment and that it's only test-support code, we just leave it
+ # untested -- mbp 20100901
+ raise tests.TestSkipped("strace selftests are broken and disabled")
+
+ def _check_threads(self):
+ # For bug #226769, it was decided that the strace tests should not be
+ # run when more than one thread is active. A lot of tests are currently
+ # leaking threads for good or bad reasons, once they are fixed or
+ # strace itself is fixed (bug #103133), we can get rid of the
+ # restriction.
+ active = threading.activeCount()
+ if active > 1: # There is always the main thread at least
+ self.knownFailure(
+ '%d active threads, bug #103133 needs to be fixed.' % active)
+
+ def strace_detailed_or_skip(self, *args, **kwargs):
+ """Run strace, but cope if it's not allowed"""
+ try:
+ return strace_detailed(*args, **kwargs)
+ except strace.StraceError, e:
+ if e.err_messages.startswith(
+ "attach: ptrace(PTRACE_ATTACH, ...): Operation not permitted"):
+ raise tests.TestSkipped("ptrace not permitted")
+ else:
+ raise
+
+ def test_strace_callable_is_called(self):
+ self._check_threads()
+
+ output = []
+ def function(positional, *args, **kwargs):
+ output.append((positional, args, kwargs))
+ self.strace_detailed_or_skip(
+ function, ["a", "b"], {"c": "c"},
+ follow_children=False)
+ self.assertEqual([("a", ("b",), {"c":"c"})], output)
+
+ def test_strace_callable_result(self):
+ self._check_threads()
+
+ def function():
+ return "foo"
+ result, strace_result = self.strace_detailed_or_skip(function,[], {},
+ follow_children=False)
+ self.assertEqual("foo", result)
+ self.assertIsInstance(strace_result, StraceResult)
+
+ def test_strace_result_has_raw_log(self):
+ """Checks that a reasonable raw strace log was found by strace."""
+ self._check_threads()
+
+ def function():
+ self.build_tree(['myfile'])
+ unused, result = self.strace_detailed_or_skip(function, [], {},
+ follow_children=False)
+ self.assertContainsRe(result.raw_log, 'myfile')
diff --git a/bzrlib/tests/test_subsume.py b/bzrlib/tests/test_subsume.py
new file mode 100644
index 0000000..ba20e47
--- /dev/null
+++ b/bzrlib/tests/test_subsume.py
@@ -0,0 +1,114 @@
+# Copyright (C) 2006-2009, 2011 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+from bzrlib import errors, workingtree, tests
+
+
+class TestWorkingTree(tests.TestCaseWithTransport):
+
+ def make_branch_and_tree(self, relpath, format=None):
+ if format is None:
+ format = 'development-subtree'
+ return tests.TestCaseWithTransport.make_branch_and_tree(self, relpath,
+ format)
+
+ def make_trees(self, format=None, same_root=False):
+ self.build_tree(['tree/',
+ 'tree/file',
+ 'tree/subtree/',
+ 'tree/subtree/file2'])
+ base_tree = self.make_branch_and_tree('tree', format=format)
+ base_tree.add('file', 'file-id')
+ base_tree.commit('first commit', rev_id='tree-1')
+ sub_tree = self.make_branch_and_tree('tree/subtree',
+ format='development-subtree')
+ if same_root is True:
+ sub_tree.set_root_id(base_tree.get_root_id())
+ sub_tree.add('file2', 'file2-id')
+ sub_tree.commit('first commit', rev_id='subtree-1')
+ return base_tree, sub_tree
+
+ def test_old_knit1_failure(self):
+ """Ensure that BadSubsumeSource is raised.
+
+ SubsumeTargetNeedsUpgrade must not be raised, because upgrading the
+ target won't help.
+ """
+ base_tree, sub_tree = self.make_trees(format='knit',
+ same_root=True)
+ self.assertRaises(errors.BadSubsumeSource, base_tree.subsume,
+ sub_tree)
+
+ def test_knit1_failure(self):
+ base_tree, sub_tree = self.make_trees(format='knit')
+ self.assertRaises(errors.SubsumeTargetNeedsUpgrade, base_tree.subsume,
+ sub_tree)
+
+ def test_subsume_tree(self):
+ base_tree, sub_tree = self.make_trees()
+ self.assertNotEqual(base_tree.get_root_id(), sub_tree.get_root_id())
+ sub_root_id = sub_tree.get_root_id()
+ # this test checks the subdir is removed, so it needs to know the
+ # control directory; that changes rarely so just hardcode (and check)
+ # it is correct.
+ self.assertPathExists('tree/subtree/.bzr')
+ base_tree.subsume(sub_tree)
+ self.assertEqual(['tree-1', 'subtree-1'], base_tree.get_parent_ids())
+ self.assertEqual(sub_root_id, base_tree.path2id('subtree'))
+ self.assertEqual('file2-id', base_tree.path2id('subtree/file2'))
+ # subsuming the tree removes the control directory, so you can't open
+ # it.
+ self.assertPathDoesNotExist('tree/subtree/.bzr')
+ file2 = open('tree/subtree/file2', 'rb')
+ try:
+ file2_contents = file2.read()
+ finally:
+ file2.close()
+ base_tree = workingtree.WorkingTree.open('tree')
+ base_tree.commit('combined', rev_id='combined-1')
+ self.assertEqual('file2-id', base_tree.path2id('subtree/file2'))
+ self.assertEqual('subtree/file2', base_tree.id2path('file2-id'))
+ self.assertEqualDiff(file2_contents,
+ base_tree.get_file_text('file2-id'))
+ basis_tree = base_tree.basis_tree()
+ basis_tree.lock_read()
+ self.addCleanup(basis_tree.unlock)
+ self.assertEqualDiff(file2_contents,
+ base_tree.get_file_text('file2-id'))
+ self.assertEqualDiff(file2_contents,
+ basis_tree.get_file_text('file2-id'))
+ self.assertEqual('subtree-1',
+ basis_tree.get_file_revision('file2-id'))
+ self.assertEqual('combined-1',
+ basis_tree.get_file_revision(sub_root_id))
+
+ def test_subsume_failure(self):
+ base_tree, sub_tree = self.make_trees()
+ if base_tree.get_root_id() == sub_tree.get_root_id():
+ raise tests.TestSkipped('This test requires unique roots')
+ sub_root_id = sub_tree.get_root_id()
+ self.assertRaises(errors.BadSubsumeSource, base_tree.subsume,
+ base_tree)
+ self.assertRaises(errors.BadSubsumeSource, sub_tree.subsume,
+ base_tree)
+ self.build_tree(['subtree2/'])
+ sub_tree2 = self.make_branch_and_tree('subtree2')
+ self.assertRaises(errors.BadSubsumeSource, sub_tree.subsume,
+ sub_tree2)
+ self.build_tree(['tree/subtree/subtree3/'])
+ sub_tree3 = self.make_branch_and_tree('tree/subtree/subtree3')
+ self.assertRaises(errors.BadSubsumeSource, base_tree.subsume,
+ sub_tree3)
diff --git a/bzrlib/tests/test_switch.py b/bzrlib/tests/test_switch.py
new file mode 100644
index 0000000..421ea08
--- /dev/null
+++ b/bzrlib/tests/test_switch.py
@@ -0,0 +1,191 @@
+# Copyright (C) 2007-2011 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""Tests for bzrlib.switch."""
+
+
+import os
+
+from bzrlib import (
+ branch,
+ errors,
+ merge as _mod_merge,
+ switch,
+ tests,
+ )
+
+
+class TestSwitch(tests.TestCaseWithTransport):
+
+ def setUp(self):
+ super(TestSwitch, self).setUp()
+ self.lightweight = True
+
+ def _setup_tree(self):
+ tree = self.make_branch_and_tree('branch-1')
+ self.build_tree(['branch-1/file-1'])
+ tree.add('file-1')
+ tree.commit('rev1')
+ return tree
+
+ def test_switch_updates(self):
+ """Test switch updates tree and keeps uncommitted changes."""
+ tree = self._setup_tree()
+ to_branch = tree.bzrdir.sprout('branch-2').open_branch()
+ self.build_tree(['branch-1/file-2'])
+ tree.add('file-2')
+ tree.remove('file-1')
+ tree.commit('rev2')
+ checkout = tree.branch.create_checkout('checkout',
+ lightweight=self.lightweight)
+ self.build_tree(['checkout/file-3'])
+ checkout.add('file-3')
+ self.assertPathDoesNotExist('checkout/file-1')
+ self.assertPathExists('checkout/file-2')
+ switch.switch(checkout.bzrdir, to_branch)
+ self.assertPathExists('checkout/file-1')
+ self.assertPathDoesNotExist('checkout/file-2')
+ self.assertPathExists('checkout/file-3')
+
+ def test_switch_after_branch_moved(self):
+ """Test switch after the branch is moved."""
+ tree = self._setup_tree()
+ checkout = tree.branch.create_checkout('checkout',
+ lightweight=self.lightweight)
+ self.build_tree(['branch-1/file-2'])
+ tree.add('file-2')
+ tree.remove('file-1')
+ tree.commit('rev2')
+ self.build_tree(['checkout/file-3'])
+ checkout.add('file-3')
+ checkout_dir = checkout.bzrdir
+ # rename the branch on disk, the checkout object is now invalid.
+ os.rename('branch-1', 'branch-2')
+ to_branch = branch.Branch.open('branch-2')
+ # Check fails without --force
+ err = self.assertRaises(
+ (errors.BzrCommandError, errors.NotBranchError),
+ switch.switch, checkout.bzrdir, to_branch)
+ if isinstance(err, errors.BzrCommandError):
+ self.assertContainsRe(str(err),
+ 'Unable to connect to current master branch .*'
+ 'To switch anyway, use --force.')
+ switch.switch(checkout.bzrdir, to_branch, force=True)
+ self.assertPathDoesNotExist('checkout/file-1')
+ self.assertPathExists('checkout/file-2')
+ self.assertPathExists('checkout/file-3')
+
+ def test_switch_when_pending_merges(self):
+ """Test graceful failure if pending merges are outstanding."""
+ # Create 2 branches and a checkout
+ tree = self._setup_tree()
+ tree2 = tree.bzrdir.sprout('branch-2').open_workingtree()
+ checkout = tree.branch.create_checkout('checkout',
+ lightweight=self.lightweight)
+ # Change tree2 and merge it into the checkout without committing
+ self.build_tree(['branch-2/file-2'])
+ tree2.add('file-2')
+ tree2.commit('rev2')
+ checkout.merge_from_branch(tree2.branch)
+ # Check the error reporting is as expected
+ err = self.assertRaises(errors.BzrCommandError,
+ switch.switch, checkout.bzrdir, tree2.branch)
+ self.assertContainsRe(str(err),
+ "Pending merges must be committed or reverted before using switch")
+
+ def test_switch_with_revision(self):
+ """Test switch when a revision is given."""
+ # Create a tree with 2 revisions
+ tree = self.make_branch_and_tree('branch-1')
+ self.build_tree(['branch-1/file-1'])
+ tree.add('file-1')
+ tree.commit(rev_id='rev1', message='rev1')
+ self.build_tree(['branch-1/file-2'])
+ tree.add('file-2')
+ tree.commit(rev_id='rev2', message='rev2')
+ # Check it out and switch to revision 1
+ checkout = tree.branch.create_checkout('checkout',
+ lightweight=self.lightweight)
+ switch.switch(checkout.bzrdir, tree.branch, revision_id="rev1")
+ self.assertPathExists('checkout/file-1')
+ self.assertPathDoesNotExist('checkout/file-2')
+
+ def test_switch_changing_root_id(self):
+ tree = self._setup_tree()
+ tree2 = self.make_branch_and_tree('tree-2')
+ tree2.set_root_id('custom-root-id')
+ self.build_tree(['tree-2/file-2'])
+ tree2.add(['file-2'])
+ tree2.commit('rev1b')
+ checkout = tree.branch.create_checkout('checkout',
+ lightweight=self.lightweight)
+ switch.switch(checkout.bzrdir, tree2.branch)
+ self.assertEqual('custom-root-id', tree2.get_root_id())
+
+ def test_switch_configurable_file_merger(self):
+ class DummyMerger(_mod_merge.ConfigurableFileMerger):
+ name_prefix = 'file'
+
+ _mod_merge.Merger.hooks.install_named_hook(
+ 'merge_file_content', DummyMerger,
+ 'test factory')
+ foo = self.make_branch('foo')
+ checkout = foo.create_checkout('checkout', lightweight=True)
+ self.build_tree_contents([('checkout/file', 'a')])
+ checkout.add('file')
+ checkout.commit('a')
+ bar = foo.bzrdir.sprout('bar').open_workingtree()
+ self.build_tree_contents([('bar/file', 'b')])
+ bar.commit('b')
+ self.build_tree_contents([('checkout/file', 'c')])
+ switch.switch(checkout.bzrdir, bar.branch)
+
+
+class TestSwitchHeavyweight(TestSwitch):
+
+ def setUp(self):
+ super(TestSwitchHeavyweight, self).setUp()
+ self.lightweight = False
+
+ def test_switch_with_local_commits(self):
+ """Test switch complains about local commits unless --force given."""
+ tree = self._setup_tree()
+ to_branch = tree.bzrdir.sprout('branch-2').open_branch()
+ self.build_tree(['branch-1/file-2'])
+ tree.add('file-2')
+ tree.remove('file-1')
+ tree.commit('rev2')
+ checkout = tree.branch.create_checkout('checkout')
+ self.build_tree(['checkout/file-3'])
+ checkout.add('file-3')
+ checkout.commit(message='local only commit', local=True)
+ self.build_tree(['checkout/file-4'])
+ # Check the error reporting is as expected
+ err = self.assertRaises(errors.BzrCommandError,
+ switch.switch, checkout.bzrdir, to_branch)
+ self.assertContainsRe(str(err),
+ 'Cannot switch as local commits found in the checkout.')
+ # Check all is ok when force is given
+ self.assertPathDoesNotExist('checkout/file-1')
+ self.assertPathExists('checkout/file-2')
+ switch.switch(checkout.bzrdir, to_branch, force=True)
+ self.assertPathExists('checkout/file-1')
+ self.assertPathDoesNotExist('checkout/file-2')
+ self.assertPathDoesNotExist('checkout/file-3')
+ self.assertPathExists('checkout/file-4')
+ # Check that the checkout is a true mirror of the bound branch
+ self.assertEqual(to_branch.last_revision_info(),
+ checkout.branch.last_revision_info())
diff --git a/bzrlib/tests/test_symbol_versioning.py b/bzrlib/tests/test_symbol_versioning.py
new file mode 100644
index 0000000..fc036a4
--- /dev/null
+++ b/bzrlib/tests/test_symbol_versioning.py
@@ -0,0 +1,297 @@
+# Copyright (C) 2006-2011 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""Symbol versioning tests."""
+
+import warnings
+
+from bzrlib import symbol_versioning
+from bzrlib.symbol_versioning import (
+ deprecated_function,
+ deprecated_in,
+ deprecated_method,
+ )
+from bzrlib.tests import TestCase
+
+
+@deprecated_function(deprecated_in((0, 7, 0)))
+def sample_deprecated_function():
+ """Deprecated function docstring."""
+ return 1
+
+
+a_deprecated_list = symbol_versioning.deprecated_list(deprecated_in((0, 9, 0)),
+ 'a_deprecated_list', ['one'], extra="Don't use me")
+
+
+a_deprecated_dict = symbol_versioning.DeprecatedDict(
+ deprecated_in((0, 14, 0)),
+ 'a_deprecated_dict',
+ dict(a=42),
+ advice='Pull the other one!',
+ )
+
+
+class TestDeprecationWarnings(TestCase):
+
+ def capture_warning(self, message, category, stacklevel=None):
+ self._warnings.append((message, category, stacklevel))
+
+ def setUp(self):
+ super(TestDeprecationWarnings, self).setUp()
+ self._warnings = []
+
+ @deprecated_method(deprecated_in((0, 7, 0)))
+ def deprecated_method(self):
+ """Deprecated method docstring.
+
+ This might explain stuff.
+ """
+ return 1
+
+ @staticmethod
+ @deprecated_function(deprecated_in((0, 7, 0)))
+ def deprecated_static():
+ """Deprecated static."""
+ return 1
+
+ def test_deprecated_static(self):
+ # XXX: The results are not quite right because the class name is not
+ # shown - however it is enough to give people a good indication of
+ # where the problem is.
+ expected_warning = (
+ "bzrlib.tests.test_symbol_versioning."
+ "deprecated_static "
+ "was deprecated in version 0.7.0.", DeprecationWarning, 2)
+ expected_docstring = (
+ 'Deprecated static.\n'
+ '\n'
+ 'This function was deprecated in version 0.7.0.\n'
+ )
+ self.check_deprecated_callable(
+ expected_warning, expected_docstring,
+ "deprecated_static",
+ "bzrlib.tests.test_symbol_versioning",
+ self.deprecated_static)
+
+ def test_deprecated_method(self):
+ expected_warning = (
+ "bzrlib.tests.test_symbol_versioning."
+ "TestDeprecationWarnings.deprecated_method "
+ "was deprecated in version 0.7.0.", DeprecationWarning, 2)
+ expected_docstring = (
+ 'Deprecated method docstring.\n'
+ '\n'
+ ' This might explain stuff.\n'
+ ' \n'
+ ' This method was deprecated in version 0.7.0.\n'
+ ' ')
+ self.check_deprecated_callable(expected_warning, expected_docstring,
+ "deprecated_method",
+ "bzrlib.tests.test_symbol_versioning",
+ self.deprecated_method)
+
+ def test_deprecated_function(self):
+ expected_warning = (
+ "bzrlib.tests.test_symbol_versioning.sample_deprecated_function "
+ "was deprecated in version 0.7.0.", DeprecationWarning, 2)
+ expected_docstring = ('Deprecated function docstring.\n'
+ '\n'
+ 'This function was deprecated in version 0.7.0.\n'
+ )
+ self.check_deprecated_callable(expected_warning, expected_docstring,
+ "sample_deprecated_function",
+ "bzrlib.tests.test_symbol_versioning",
+ sample_deprecated_function)
+
+ def test_deprecated_list(self):
+ expected_warning = (
+ "Modifying a_deprecated_list was deprecated in version 0.9.0."
+ " Don't use me", DeprecationWarning, 3)
+ old_warning_method = symbol_versioning.warn
+ try:
+ symbol_versioning.set_warning_method(self.capture_warning)
+ self.assertEqual(['one'], a_deprecated_list)
+ self.assertEqual([], self._warnings)
+
+ a_deprecated_list.append('foo')
+ self.assertEqual([expected_warning], self._warnings)
+ self.assertEqual(['one', 'foo'], a_deprecated_list)
+
+ a_deprecated_list.extend(['bar', 'baz'])
+ self.assertEqual([expected_warning]*2, self._warnings)
+ self.assertEqual(['one', 'foo', 'bar', 'baz'], a_deprecated_list)
+
+ a_deprecated_list.insert(1, 'xxx')
+ self.assertEqual([expected_warning]*3, self._warnings)
+ self.assertEqual(['one', 'xxx', 'foo', 'bar', 'baz'], a_deprecated_list)
+
+ a_deprecated_list.remove('foo')
+ self.assertEqual([expected_warning]*4, self._warnings)
+ self.assertEqual(['one', 'xxx', 'bar', 'baz'], a_deprecated_list)
+
+ val = a_deprecated_list.pop()
+ self.assertEqual([expected_warning]*5, self._warnings)
+ self.assertEqual('baz', val)
+ self.assertEqual(['one', 'xxx', 'bar'], a_deprecated_list)
+
+ val = a_deprecated_list.pop(1)
+ self.assertEqual([expected_warning]*6, self._warnings)
+ self.assertEqual('xxx', val)
+ self.assertEqual(['one', 'bar'], a_deprecated_list)
+ finally:
+ symbol_versioning.set_warning_method(old_warning_method)
+
+ def test_deprecated_dict(self):
+ expected_warning = (
+ "access to a_deprecated_dict was deprecated in version 0.14.0."
+ " Pull the other one!", DeprecationWarning, 2)
+ old_warning_method = symbol_versioning.warn
+ try:
+ symbol_versioning.set_warning_method(self.capture_warning)
+ self.assertEqual(len(a_deprecated_dict), 1)
+ self.assertEqual([expected_warning], self._warnings)
+
+ a_deprecated_dict['b'] = 42
+ self.assertEqual(a_deprecated_dict['b'], 42)
+ self.assertTrue('b' in a_deprecated_dict)
+ del a_deprecated_dict['b']
+ self.assertFalse('b' in a_deprecated_dict)
+ self.assertEqual([expected_warning] * 6, self._warnings)
+ finally:
+ symbol_versioning.set_warning_method(old_warning_method)
+
+
+ def check_deprecated_callable(self, expected_warning, expected_docstring,
+ expected_name, expected_module,
+ deprecated_callable):
+ if __doc__ is None:
+ # With -OO the docstring should just be the deprecated version
+ expected_docstring = expected_docstring.split('\n')[-2].lstrip()
+ old_warning_method = symbol_versioning.warn
+ try:
+ symbol_versioning.set_warning_method(self.capture_warning)
+ self.assertEqual(1, deprecated_callable())
+ self.assertEqual([expected_warning], self._warnings)
+ deprecated_callable()
+ self.assertEqual([expected_warning, expected_warning],
+ self._warnings)
+ self.assertEqualDiff(expected_docstring, deprecated_callable.__doc__)
+ self.assertEqualDiff(expected_name, deprecated_callable.__name__)
+ self.assertEqualDiff(expected_module, deprecated_callable.__module__)
+ self.assertTrue(deprecated_callable.is_deprecated)
+ finally:
+ symbol_versioning.set_warning_method(old_warning_method)
+
+ def test_deprecated_passed(self):
+ self.assertEqual(True, symbol_versioning.deprecated_passed(None))
+ self.assertEqual(True, symbol_versioning.deprecated_passed(True))
+ self.assertEqual(True, symbol_versioning.deprecated_passed(False))
+ self.assertEqual(False,
+ symbol_versioning.deprecated_passed(
+ symbol_versioning.DEPRECATED_PARAMETER))
+
+ def test_deprecation_string(self):
+ """We can get a deprecation string for a method or function."""
+ self.assertEqual('bzrlib.tests.test_symbol_versioning.'
+ 'TestDeprecationWarnings.test_deprecation_string was deprecated in '
+ 'version 0.11.0.',
+ symbol_versioning.deprecation_string(
+ self.test_deprecation_string,
+ deprecated_in((0, 11, 0))))
+ self.assertEqual('bzrlib.symbol_versioning.deprecated_function was '
+ 'deprecated in version 0.11.0.',
+ symbol_versioning.deprecation_string(
+ symbol_versioning.deprecated_function,
+ deprecated_in((0, 11, 0))))
+
+
+class TestSuppressAndActivate(TestCase):
+
+ def setUp(self):
+ TestCase.setUp(self)
+ existing_filters = list(warnings.filters)
+ def restore():
+ warnings.filters[:] = existing_filters
+ self.addCleanup(restore)
+ # Clean out the filters so we have a clean slate.
+ warnings.resetwarnings()
+
+ def assertFirstWarning(self, action, category):
+ """Test the first warning in the filters is correct"""
+ first = warnings.filters[0]
+ self.assertEqual((action, category), (first[0], first[2]))
+
+ def test_suppress_deprecation_warnings(self):
+ """suppress_deprecation_warnings sets DeprecationWarning to ignored."""
+ symbol_versioning.suppress_deprecation_warnings()
+ self.assertFirstWarning('ignore', DeprecationWarning)
+
+ def test_set_restore_filters(self):
+ original_filters = warnings.filters[:]
+ symbol_versioning.suppress_deprecation_warnings()()
+ self.assertEqual(original_filters, warnings.filters)
+
+ def test_suppress_deprecation_with_warning_filter(self):
+ """don't suppress if we already have a filter"""
+ warnings.filterwarnings('error', category=Warning)
+ self.assertFirstWarning('error', Warning)
+ self.assertEqual(1, len(warnings.filters))
+ symbol_versioning.suppress_deprecation_warnings(override=False)
+ self.assertFirstWarning('error', Warning)
+ self.assertEqual(1, len(warnings.filters))
+
+ def test_suppress_deprecation_with_filter(self):
+ """don't suppress if we already have a filter"""
+ warnings.filterwarnings('error', category=DeprecationWarning)
+ self.assertFirstWarning('error', DeprecationWarning)
+ self.assertEqual(1, len(warnings.filters))
+ symbol_versioning.suppress_deprecation_warnings(override=False)
+ self.assertFirstWarning('error', DeprecationWarning)
+ self.assertEqual(1, len(warnings.filters))
+ symbol_versioning.suppress_deprecation_warnings(override=True)
+ self.assertFirstWarning('ignore', DeprecationWarning)
+ self.assertEqual(2, len(warnings.filters))
+
+ def test_activate_deprecation_no_error(self):
+ # First nuke the filters, so we know it is clean
+ symbol_versioning.activate_deprecation_warnings()
+ self.assertFirstWarning('default', DeprecationWarning)
+
+ def test_activate_deprecation_with_error(self):
+ # First nuke the filters, so we know it is clean
+ # Add a warning == error rule
+ warnings.filterwarnings('error', category=Warning)
+ self.assertFirstWarning('error', Warning)
+ self.assertEqual(1, len(warnings.filters))
+ symbol_versioning.activate_deprecation_warnings(override=False)
+ # There should not be a new warning
+ self.assertFirstWarning('error', Warning)
+ self.assertEqual(1, len(warnings.filters))
+
+ def test_activate_deprecation_with_DW_error(self):
+ # First nuke the filters, so we know it is clean
+ # Add a warning == error rule
+ warnings.filterwarnings('error', category=DeprecationWarning)
+ self.assertFirstWarning('error', DeprecationWarning)
+ self.assertEqual(1, len(warnings.filters))
+ symbol_versioning.activate_deprecation_warnings(override=False)
+ # There should not be a new warning
+ self.assertFirstWarning('error', DeprecationWarning)
+ self.assertEqual(1, len(warnings.filters))
+ symbol_versioning.activate_deprecation_warnings(override=True)
+ self.assertFirstWarning('default', DeprecationWarning)
+ self.assertEqual(2, len(warnings.filters))
diff --git a/bzrlib/tests/test_tag.py b/bzrlib/tests/test_tag.py
new file mode 100644
index 0000000..6fa86f2
--- /dev/null
+++ b/bzrlib/tests/test_tag.py
@@ -0,0 +1,192 @@
+# Copyright (C) 2007, 2009, 2010 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""Tests for bzrlib.tag."""
+
+
+from bzrlib import (
+ controldir,
+ errors,
+ )
+from bzrlib.tag import (
+ BasicTags,
+ DisabledTags,
+ )
+from bzrlib.tests import (
+ TestCase,
+ TestCaseWithTransport,
+ )
+
+
+class TestTagSerialization(TestCase):
+
+ def test_tag_serialization(self):
+ """Test the precise representation of tag dicts."""
+ # Don't change this after we commit to this format, as it checks
+ # that the format is stable and compatible across releases.
+ #
+ # This release stores them in bencode as a dictionary from name to
+ # target.
+ store = BasicTags(branch=None)
+ td = dict(stable='stable-revid', boring='boring-revid')
+ packed = store._serialize_tag_dict(td)
+ expected = r'd6:boring12:boring-revid6:stable12:stable-revide'
+ self.assertEqualDiff(packed, expected)
+ self.assertEqual(store._deserialize_tag_dict(packed), td)
+
+
+class TestTagRevisionRenames(TestCaseWithTransport):
+
+ def make_branch_supporting_tags(self, relpath):
+ return self.make_branch(relpath, format='dirstate-tags')
+
+ def test_simple(self):
+ store = self.make_branch_supporting_tags('a').tags
+ store.set_tag("foo", "myoldrevid")
+ store.rename_revisions({"myoldrevid": "mynewrevid"})
+ self.assertEquals({"foo": "mynewrevid"}, store.get_tag_dict())
+
+ def test_unknown_ignored(self):
+ store = self.make_branch_supporting_tags('a').tags
+ store.set_tag("foo", "myoldrevid")
+ store.rename_revisions({"anotherrevid": "mynewrevid"})
+ self.assertEquals({"foo": "myoldrevid"}, store.get_tag_dict())
+
+
+class TestTagMerging(TestCaseWithTransport):
+
+ def make_knit_branch(self, relpath):
+ old_bdf = controldir.format_registry.make_bzrdir('knit')
+ return controldir.ControlDir.create_branch_convenience(relpath, format=old_bdf)
+
+ def make_branch_supporting_tags(self, relpath):
+ return self.make_branch(relpath, format='dirstate-tags')
+
+ def test_merge_not_possible(self):
+ # test merging between branches which do and don't support tags
+ old_branch = self.make_knit_branch('old')
+ new_branch = self.make_branch_supporting_tags('new')
+ # just to make sure this test is valid
+ self.assertFalse(old_branch.supports_tags(),
+ "%s is expected to not support tags but does" % old_branch)
+ self.assertTrue(new_branch.supports_tags(),
+ "%s is expected to support tags but does not" % new_branch)
+ # there are no tags in the old one, and we can merge from it into the
+ # new one
+ old_branch.tags.merge_to(new_branch.tags)
+ # we couldn't merge tags from the new branch to the old one, but as
+ # there are not any yet this isn't a problem
+ new_branch.tags.merge_to(old_branch.tags)
+ # but if there is a tag in the new one, we get a warning when trying
+ # to move it back
+ new_branch.tags.set_tag(u'\u2040tag', 'revid')
+ old_branch.tags.merge_to(new_branch.tags)
+ self.assertRaises(errors.TagsNotSupported,
+ new_branch.tags.merge_to, old_branch.tags)
+
+ def test_merge_to(self):
+ a = self.make_branch_supporting_tags('a')
+ b = self.make_branch_supporting_tags('b')
+ # simple merge
+ a.tags.set_tag('tag-1', 'x')
+ b.tags.set_tag('tag-2', 'y')
+ a.tags.merge_to(b.tags)
+ self.assertEqual('x', b.tags.lookup_tag('tag-1'))
+ self.assertEqual('y', b.tags.lookup_tag('tag-2'))
+ self.assertRaises(errors.NoSuchTag, a.tags.lookup_tag, 'tag-2')
+ # conflicting merge
+ a.tags.set_tag('tag-2', 'z')
+ updates, conflicts = a.tags.merge_to(b.tags)
+ self.assertEqual({}, updates)
+ self.assertEqual(list(conflicts), [('tag-2', 'z', 'y')])
+ self.assertEqual('y', b.tags.lookup_tag('tag-2'))
+ # overwrite conflicts
+ updates, conflicts = a.tags.merge_to(b.tags, overwrite=True)
+ self.assertEqual(list(conflicts), [])
+ self.assertEqual({u'tag-2': 'z'}, updates)
+ self.assertEqual('z', b.tags.lookup_tag('tag-2'))
+
+
+class TestTagsInCheckouts(TestCaseWithTransport):
+ """Tests for how tags are synchronised between the master and child branch
+ of a checkout.
+ """
+
+ def test_update_tag_into_checkout(self):
+ # checkouts are directly connected to the tags of their master branch:
+ # adding a tag in the checkout pushes it to the master
+ # https://bugs.launchpad.net/bzr/+bug/93860
+ master = self.make_branch('master')
+ child = self.make_branch('child')
+ child.bind(master)
+ child.tags.set_tag('foo', 'rev-1')
+ self.assertEquals('rev-1', master.tags.lookup_tag('foo'))
+ # deleting a tag updates the master too
+ child.tags.delete_tag('foo')
+ self.assertRaises(errors.NoSuchTag,
+ master.tags.lookup_tag, 'foo')
+
+ def test_tag_copied_by_initial_checkout(self):
+ # https://bugs.launchpad.net/bzr/+bug/93860
+ master = self.make_branch('master')
+ master.tags.set_tag('foo', 'rev-1')
+ co_tree = master.create_checkout('checkout')
+ self.assertEquals('rev-1',
+ co_tree.branch.tags.lookup_tag('foo'))
+
+ def test_update_updates_tags(self):
+ # https://bugs.launchpad.net/bzr/+bug/93856
+ master = self.make_branch('master')
+ master.tags.set_tag('foo', 'rev-1')
+ child = self.make_branch('child')
+ child.bind(master)
+ child.update()
+ # after an update, the child has all the master's tags
+ self.assertEquals('rev-1', child.tags.lookup_tag('foo'))
+ # add another tag and update again
+ master.tags.set_tag('tag2', 'target2')
+ child.update()
+ self.assertEquals('target2', child.tags.lookup_tag('tag2'))
+
+ def test_tag_deletion_from_master_to_bound(self):
+ master = self.make_branch('master')
+ master.tags.set_tag('foo', 'rev-1')
+ child = self.make_branch('child')
+ child.bind(master)
+ child.update()
+ # and deletion of tags should also propagate
+ master.tags.delete_tag('foo')
+ self.knownFailure("tag deletion does not propagate: "
+ "https://bugs.launchpad.net/bzr/+bug/138802")
+ self.assertRaises(errors.NoSuchTag,
+ child.tags.lookup_tag, 'foo')
+
+
+class DisabledTagsTests(TestCaseWithTransport):
+
+ def setUp(self):
+ super(DisabledTagsTests, self).setUp()
+ branch = self.make_branch('.')
+ self.tags = DisabledTags(branch)
+
+ def test_set_tag(self):
+ self.assertRaises(errors.TagsNotSupported, self.tags.set_tag)
+
+ def test_get_reverse_tag_dict(self):
+ self.assertEqual(self.tags.get_reverse_tag_dict(), {})
+
+
+
diff --git a/bzrlib/tests/test_test_server.py b/bzrlib/tests/test_test_server.py
new file mode 100644
index 0000000..bd05a62
--- /dev/null
+++ b/bzrlib/tests/test_test_server.py
@@ -0,0 +1,336 @@
+# Copyright (C) 2010, 2011 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+import errno
+import socket
+import SocketServer
+import threading
+
+
+from bzrlib import (
+ osutils,
+ tests,
+ )
+from bzrlib.tests import test_server
+from bzrlib.tests.scenarios import load_tests_apply_scenarios
+
+
+load_tests = load_tests_apply_scenarios
+
+
+def portable_socket_pair():
+ """Return a pair of TCP sockets connected to each other.
+
+ Unlike socket.socketpair, this should work on Windows.
+ """
+ listen_sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
+ listen_sock.bind(('127.0.0.1', 0))
+ listen_sock.listen(1)
+ client_sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
+ client_sock.connect(listen_sock.getsockname())
+ server_sock, addr = listen_sock.accept()
+ listen_sock.close()
+ return server_sock, client_sock
+
+
+class TCPClient(object):
+
+ def __init__(self):
+ self.sock = None
+
+ def connect(self, addr):
+ if self.sock is not None:
+ raise AssertionError('Already connected to %r'
+ % (self.sock.getsockname(),))
+ self.sock = osutils.connect_socket(addr)
+
+ def disconnect(self):
+ if self.sock is not None:
+ try:
+ self.sock.shutdown(socket.SHUT_RDWR)
+ self.sock.close()
+ except socket.error, e:
+ if e[0] in (errno.EBADF, errno.ENOTCONN):
+ # Right, the socket is already down
+ pass
+ else:
+ raise
+ self.sock = None
+
+ def write(self, s):
+ return self.sock.sendall(s)
+
+ def read(self, bufsize=4096):
+ return self.sock.recv(bufsize)
+
+
+class TCPConnectionHandler(SocketServer.BaseRequestHandler):
+
+ def handle(self):
+ self.done = False
+ self.handle_connection()
+ while not self.done:
+ self.handle_connection()
+
+ def readline(self):
+ # TODO: We should be buffering any extra data sent, etc. However, in
+ # practice, we don't send extra content, so we haven't bothered
+ # to implement it yet.
+ req = self.request.recv(4096)
+ # An empty string is allowed, to indicate the end of the connection
+ if not req or (req.endswith('\n') and req.count('\n') == 1):
+ return req
+ raise ValueError('[%r] not a simple line' % (req,))
+
+ def handle_connection(self):
+ req = self.readline()
+ if not req:
+ self.done = True
+ elif req == 'ping\n':
+ self.request.sendall('pong\n')
+ else:
+ raise ValueError('[%s] not understood' % req)
+
+
+class TestTCPServerInAThread(tests.TestCase):
+
+ scenarios = [
+ (name, {'server_class': getattr(test_server, name)})
+ for name in
+ ('TestingTCPServer', 'TestingThreadingTCPServer')]
+
+ def get_server(self, server_class=None, connection_handler_class=None):
+ if server_class is not None:
+ self.server_class = server_class
+ if connection_handler_class is None:
+ connection_handler_class = TCPConnectionHandler
+ server = test_server.TestingTCPServerInAThread(
+ ('localhost', 0), self.server_class, connection_handler_class)
+ server.start_server()
+ self.addCleanup(server.stop_server)
+ return server
+
+ def get_client(self):
+ client = TCPClient()
+ self.addCleanup(client.disconnect)
+ return client
+
+ def get_server_connection(self, server, conn_rank):
+ return server.server.clients[conn_rank]
+
+ def assertClientAddr(self, client, server, conn_rank):
+ conn = self.get_server_connection(server, conn_rank)
+ self.assertEquals(client.sock.getsockname(), conn[1])
+
+ def test_start_stop(self):
+ server = self.get_server()
+ client = self.get_client()
+ server.stop_server()
+ # since the server doesn't accept connections anymore attempting to
+ # connect should fail
+ client = self.get_client()
+ self.assertRaises(socket.error,
+ client.connect, (server.host, server.port))
+
+ def test_client_talks_server_respond(self):
+ server = self.get_server()
+ client = self.get_client()
+ client.connect((server.host, server.port))
+ self.assertIs(None, client.write('ping\n'))
+ resp = client.read()
+ self.assertClientAddr(client, server, 0)
+ self.assertEquals('pong\n', resp)
+
+ def test_server_fails_to_start(self):
+ class CantStart(Exception):
+ pass
+
+ class CantStartServer(test_server.TestingTCPServer):
+
+ def server_bind(self):
+ raise CantStart()
+
+ # The exception is raised in the main thread
+ self.assertRaises(CantStart,
+ self.get_server, server_class=CantStartServer)
+
+ def test_server_fails_while_serving_or_stopping(self):
+ class CantConnect(Exception):
+ pass
+
+ class FailingConnectionHandler(TCPConnectionHandler):
+
+ def handle(self):
+ raise CantConnect()
+
+ server = self.get_server(
+ connection_handler_class=FailingConnectionHandler)
+ # The server won't fail until a client connect
+ client = self.get_client()
+ client.connect((server.host, server.port))
+ # We make sure the server wants to handle a request, but the request is
+ # guaranteed to fail. However, the server should make sure that the
+ # connection gets closed, and stop_server should then raise the
+ # original exception.
+ client.write('ping\n')
+ try:
+ self.assertEqual('', client.read())
+ except socket.error, e:
+ # On Windows, failing during 'handle' means we get
+ # 'forced-close-of-connection'. Possibly because we haven't
+ # processed the write request before we close the socket.
+ WSAECONNRESET = 10054
+ if e.errno in (WSAECONNRESET,):
+ pass
+ # Now the server has raised the exception in its own thread
+ self.assertRaises(CantConnect, server.stop_server)
+
+ def test_server_crash_while_responding(self):
+ # We want to ensure the exception has been caught
+ caught = threading.Event()
+ caught.clear()
+ # The thread that will serve the client, this needs to be an attribute
+ # so the handler below can modify it when it's executed (it's
+ # instantiated when the request is processed)
+ self.connection_thread = None
+
+ class FailToRespond(Exception):
+ pass
+
+ class FailingDuringResponseHandler(TCPConnectionHandler):
+
+ # We use 'request' instead of 'self' below because the test matters
+ # more and we need a container to properly set connection_thread.
+ def handle_connection(request):
+ req = request.readline()
+ # Capture the thread and make it use 'caught' so we can wait on
+ # the event that will be set when the exception is caught. We
+ # also capture the thread to know where to look.
+ self.connection_thread = threading.currentThread()
+ self.connection_thread.set_sync_event(caught)
+ raise FailToRespond()
+
+ server = self.get_server(
+ connection_handler_class=FailingDuringResponseHandler)
+ client = self.get_client()
+ client.connect((server.host, server.port))
+ client.write('ping\n')
+ # Wait for the exception to be caught
+ caught.wait()
+ self.assertEqual('', client.read()) # connection closed
+ # Check that the connection thread did catch the exception,
+ # http://pad.lv/869366 was wrongly checking the server thread which
+ # works for TestingTCPServer where the connection is handled in the
+ # same thread than the server one but was racy for
+ # TestingThreadingTCPServer. Since the connection thread detaches
+ # itself before handling the request, we are guaranteed that the
+ # exception won't leak into the server thread anymore.
+ self.assertRaises(FailToRespond,
+ self.connection_thread.pending_exception)
+
+ def test_exception_swallowed_while_serving(self):
+ # We need to ensure the exception has been caught
+ caught = threading.Event()
+ caught.clear()
+ # The thread that will serve the client, this needs to be an attribute
+ # so the handler below can access it when it's executed (it's
+ # instantiated when the request is processed)
+ self.connection_thread = None
+ class CantServe(Exception):
+ pass
+
+ class FailingWhileServingConnectionHandler(TCPConnectionHandler):
+
+ # We use 'request' instead of 'self' below because the test matters
+ # more and we need a container to properly set connection_thread.
+ def handle(request):
+ # Capture the thread and make it use 'caught' so we can wait on
+ # the event that will be set when the exception is caught. We
+ # also capture the thread to know where to look.
+ self.connection_thread = threading.currentThread()
+ self.connection_thread.set_sync_event(caught)
+ raise CantServe()
+
+ server = self.get_server(
+ connection_handler_class=FailingWhileServingConnectionHandler)
+ self.assertEquals(True, server.server.serving)
+ # Install the exception swallower
+ server.set_ignored_exceptions(CantServe)
+ client = self.get_client()
+ # Connect to the server so the exception is raised there
+ client.connect((server.host, server.port))
+ # Wait for the exception to be caught
+ caught.wait()
+ self.assertEqual('', client.read()) # connection closed
+ # The connection wasn't served properly but the exception should have
+ # been swallowed (see test_server_crash_while_responding remark about
+ # http://pad.lv/869366 explaining why we can't check the server thread
+ # here). More precisely, the exception *has* been caught and captured
+ # but it is cleared when joining the thread (or trying to acquire the
+ # exception) and as such won't propagate to the server thread.
+ self.assertIs(None, self.connection_thread.pending_exception())
+ self.assertIs(None, server.pending_exception())
+
+ def test_handle_request_closes_if_it_doesnt_process(self):
+ server = self.get_server()
+ client = self.get_client()
+ server.server.serving = False
+ client.connect((server.host, server.port))
+ self.assertEqual('', client.read())
+
+
+class TestTestingSmartServer(tests.TestCase):
+
+ def test_sets_client_timeout(self):
+ server = test_server.TestingSmartServer(('localhost', 0), None, None,
+ root_client_path='/no-such-client/path')
+ self.assertEqual(test_server._DEFAULT_TESTING_CLIENT_TIMEOUT,
+ server._client_timeout)
+ sock = socket.socket()
+ h = server._make_handler(sock)
+ self.assertEqual(test_server._DEFAULT_TESTING_CLIENT_TIMEOUT,
+ h._client_timeout)
+
+
+class FakeServer(object):
+ """Minimal implementation to pass to TestingSmartConnectionHandler"""
+ backing_transport = None
+ root_client_path = '/'
+
+
+class TestTestingSmartConnectionHandler(tests.TestCase):
+
+ def test_connection_timeout_suppressed(self):
+ self.overrideAttr(test_server, '_DEFAULT_TESTING_CLIENT_TIMEOUT', 0.01)
+ s = FakeServer()
+ server_sock, client_sock = portable_socket_pair()
+ # This should timeout quickly, but not generate an exception.
+ handler = test_server.TestingSmartConnectionHandler(server_sock,
+ server_sock.getpeername(), s)
+
+ def test_connection_shutdown_while_serving_no_error(self):
+ s = FakeServer()
+ server_sock, client_sock = portable_socket_pair()
+ class ShutdownConnectionHandler(
+ test_server.TestingSmartConnectionHandler):
+
+ def _build_protocol(self):
+ self.finished = True
+ return super(ShutdownConnectionHandler, self)._build_protocol()
+ # This should trigger shutdown after the entering _build_protocol, and
+ # we should exit cleanly, without raising an exception.
+ handler = ShutdownConnectionHandler(server_sock,
+ server_sock.getpeername(), s)
diff --git a/bzrlib/tests/test_testament.py b/bzrlib/tests/test_testament.py
new file mode 100644
index 0000000..9846df1
--- /dev/null
+++ b/bzrlib/tests/test_testament.py
@@ -0,0 +1,563 @@
+# Copyright (C) 2005-2010 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""Test testaments for gpg signing."""
+
+# TODO: Testaments with x-bits
+
+import os
+
+from bzrlib import osutils
+from bzrlib.tests import TestCaseWithTransport
+from bzrlib.testament import (
+ Testament,
+ StrictTestament,
+ StrictTestament3,
+ )
+from bzrlib.transform import TreeTransform
+from bzrlib.tests.features import (
+ SymlinkFeature,
+ )
+
+
+class TestamentSetup(TestCaseWithTransport):
+
+ def setUp(self):
+ super(TestamentSetup, self).setUp()
+ self.wt = self.make_branch_and_tree('.', format='development-subtree')
+ self.wt.set_root_id('TREE_ROT')
+ b = self.b = self.wt.branch
+ b.nick = "test branch"
+ self.wt.commit(message='initial null commit',
+ committer='test@user',
+ timestamp=1129025423, # 'Tue Oct 11 20:10:23 2005'
+ timezone=0,
+ rev_id='test@user-1')
+ self.build_tree_contents([('hello', 'contents of hello file'),
+ ('src/', ),
+ ('src/foo.c', 'int main()\n{\n}\n')])
+ self.wt.add(['hello', 'src', 'src/foo.c'],
+ ['hello-id', 'src-id', 'foo.c-id'])
+ tt = TreeTransform(self.wt)
+ trans_id = tt.trans_id_tree_path('hello')
+ tt.set_executability(True, trans_id)
+ tt.apply()
+ self.wt.commit(message='add files and directories',
+ timestamp=1129025483,
+ timezone=36000,
+ rev_id='test@user-2',
+ committer='test@user')
+
+
+class TestamentTests(TestamentSetup):
+
+ def testament_class(self):
+ return Testament
+
+ def expected(self, key):
+ return texts[self.testament_class()][key]
+
+ def from_revision(self, repository, revision_id):
+ return self.testament_class().from_revision(repository, revision_id)
+
+ def test_null_testament(self):
+ """Testament for a revision with no contents."""
+ t = self.from_revision(self.b.repository, 'test@user-1')
+ ass = self.assertTrue
+ eq = self.assertEqual
+ ass(isinstance(t, Testament))
+ eq(t.revision_id, 'test@user-1')
+ eq(t.committer, 'test@user')
+ eq(t.timestamp, 1129025423)
+ eq(t.timezone, 0)
+
+ def test_testment_text_form(self):
+ """Conversion of testament to canonical text form."""
+ t = self.from_revision(self.b.repository, 'test@user-1')
+ text_form = t.as_text()
+ self.log('testament text form:\n' + text_form)
+ self.assertEqualDiff(text_form, self.expected('rev_1'))
+ short_text_form = t.as_short_text()
+ self.assertEqualDiff(short_text_form, self.expected('rev_1_short'))
+
+ def test_testament_with_contents(self):
+ """Testament containing a file and a directory."""
+ t = self.from_revision(self.b.repository, 'test@user-2')
+ text_form = t.as_text()
+ self.log('testament text form:\n' + text_form)
+ self.assertEqualDiff(text_form, self.expected('rev_2'))
+ actual_short = t.as_short_text()
+ self.assertEqualDiff(actual_short, self.expected('rev_2_short'))
+
+ def test_testament_symlinks(self):
+ """Testament containing symlink (where possible)"""
+ self.requireFeature(SymlinkFeature)
+ os.symlink('wibble/linktarget', 'link')
+ self.wt.add(['link'], ['link-id'])
+ self.wt.commit(message='add symlink',
+ timestamp=1129025493,
+ timezone=36000,
+ rev_id='test@user-3',
+ committer='test@user')
+ t = self.from_revision(self.b.repository, 'test@user-3')
+ self.assertEqualDiff(t.as_text(), self.expected('rev_3'))
+
+ def test_testament_revprops(self):
+ """Testament to revision with extra properties"""
+ props = dict(flavor='sour cherry\ncream cheese',
+ size='medium',
+ empty='',
+ )
+ self.wt.commit(message='revision with properties',
+ timestamp=1129025493,
+ timezone=36000,
+ rev_id='test@user-3',
+ committer='test@user',
+ revprops=props)
+ t = self.from_revision(self.b.repository, 'test@user-3')
+ self.assertEqualDiff(t.as_text(), self.expected('rev_props'))
+
+ def test_testament_unicode_commit_message(self):
+ self.wt.commit(
+ message=u'non-ascii commit \N{COPYRIGHT SIGN} me',
+ timestamp=1129025493,
+ timezone=36000,
+ rev_id='test@user-3',
+ committer=u'Erik B\xe5gfors <test@user>',
+ revprops={'uni':u'\xb5'}
+ )
+ t = self.from_revision(self.b.repository, 'test@user-3')
+ self.assertEqualDiff(
+ self.expected('sample_unicode').encode('utf-8'), t.as_text())
+
+ def test_from_tree(self):
+ tree = self.b.repository.revision_tree('test@user-2')
+ testament = self.testament_class().from_revision_tree(tree)
+ text_1 = testament.as_short_text()
+ text_2 = self.from_revision(self.b.repository,
+ 'test@user-2').as_short_text()
+ self.assertEqual(text_1, text_2)
+
+ def test___init__(self):
+ revision = self.b.repository.get_revision('test@user-2')
+ tree = self.b.repository.revision_tree('test@user-2')
+ testament_1 = self.testament_class()(revision, tree)
+ text_1 = testament_1.as_short_text()
+ text_2 = self.from_revision(self.b.repository,
+ 'test@user-2').as_short_text()
+ self.assertEqual(text_1, text_2)
+
+
+class TestamentTestsStrict(TestamentTests):
+
+ def testament_class(self):
+ return StrictTestament
+
+
+class TestamentTestsStrict2(TestamentTests):
+
+ def testament_class(self):
+ return StrictTestament3
+
+
+REV_1_TESTAMENT = """\
+bazaar-ng testament version 1
+revision-id: test@user-1
+committer: test@user
+timestamp: 1129025423
+timezone: 0
+parents:
+message:
+ initial null commit
+inventory:
+properties:
+ branch-nick:
+ test branch
+"""
+
+
+REV_1_STRICT_TESTAMENT = """\
+bazaar-ng testament version 2.1
+revision-id: test@user-1
+committer: test@user
+timestamp: 1129025423
+timezone: 0
+parents:
+message:
+ initial null commit
+inventory:
+properties:
+ branch-nick:
+ test branch
+"""
+
+
+REV_1_STRICT_TESTAMENT3 = """\
+bazaar testament version 3 strict
+revision-id: test@user-1
+committer: test@user
+timestamp: 1129025423
+timezone: 0
+parents:
+message:
+ initial null commit
+inventory:
+ directory . TREE_ROT test@user-1 no
+properties:
+ branch-nick:
+ test branch
+"""
+
+
+REV_1_SHORT = """\
+bazaar-ng testament short form 1
+revision-id: test@user-1
+sha1: %s
+""" % osutils.sha_string(REV_1_TESTAMENT)
+
+
+REV_1_SHORT_STRICT = """\
+bazaar-ng testament short form 2.1
+revision-id: test@user-1
+sha1: %s
+""" % osutils.sha_string(REV_1_STRICT_TESTAMENT)
+
+
+REV_1_SHORT_STRICT3 = """\
+bazaar testament short form 3 strict
+revision-id: test@user-1
+sha1: %s
+""" % osutils.sha_string(REV_1_STRICT_TESTAMENT3)
+
+
+REV_2_TESTAMENT = """\
+bazaar-ng testament version 1
+revision-id: test@user-2
+committer: test@user
+timestamp: 1129025483
+timezone: 36000
+parents:
+ test@user-1
+message:
+ add files and directories
+inventory:
+ file hello hello-id 34dd0ac19a24bf80c4d33b5c8960196e8d8d1f73
+ directory src src-id
+ file src/foo.c foo.c-id a2a049c20f908ae31b231d98779eb63c66448f24
+properties:
+ branch-nick:
+ test branch
+"""
+
+
+REV_2_STRICT_TESTAMENT = """\
+bazaar-ng testament version 2.1
+revision-id: test@user-2
+committer: test@user
+timestamp: 1129025483
+timezone: 36000
+parents:
+ test@user-1
+message:
+ add files and directories
+inventory:
+ file hello hello-id 34dd0ac19a24bf80c4d33b5c8960196e8d8d1f73 test@user-2 yes
+ directory src src-id test@user-2 no
+ file src/foo.c foo.c-id a2a049c20f908ae31b231d98779eb63c66448f24 test@user-2 no
+properties:
+ branch-nick:
+ test branch
+"""
+
+
+REV_2_STRICT_TESTAMENT3 = """\
+bazaar testament version 3 strict
+revision-id: test@user-2
+committer: test@user
+timestamp: 1129025483
+timezone: 36000
+parents:
+ test@user-1
+message:
+ add files and directories
+inventory:
+ directory . TREE_ROT test@user-1 no
+ file hello hello-id 34dd0ac19a24bf80c4d33b5c8960196e8d8d1f73 test@user-2 yes
+ directory src src-id test@user-2 no
+ file src/foo.c foo.c-id a2a049c20f908ae31b231d98779eb63c66448f24 test@user-2 no
+properties:
+ branch-nick:
+ test branch
+"""
+
+
+REV_2_SHORT = """\
+bazaar-ng testament short form 1
+revision-id: test@user-2
+sha1: %s
+""" % osutils.sha_string(REV_2_TESTAMENT)
+
+
+REV_2_SHORT_STRICT = """\
+bazaar-ng testament short form 2.1
+revision-id: test@user-2
+sha1: %s
+""" % osutils.sha_string(REV_2_STRICT_TESTAMENT)
+
+
+REV_2_SHORT_STRICT3 = """\
+bazaar testament short form 3 strict
+revision-id: test@user-2
+sha1: %s
+""" % osutils.sha_string(REV_2_STRICT_TESTAMENT3)
+
+
+REV_PROPS_TESTAMENT = """\
+bazaar-ng testament version 1
+revision-id: test@user-3
+committer: test@user
+timestamp: 1129025493
+timezone: 36000
+parents:
+ test@user-2
+message:
+ revision with properties
+inventory:
+ file hello hello-id 34dd0ac19a24bf80c4d33b5c8960196e8d8d1f73
+ directory src src-id
+ file src/foo.c foo.c-id a2a049c20f908ae31b231d98779eb63c66448f24
+properties:
+ branch-nick:
+ test branch
+ empty:
+ flavor:
+ sour cherry
+ cream cheese
+ size:
+ medium
+"""
+
+
+REV_PROPS_TESTAMENT_STRICT = """\
+bazaar-ng testament version 2.1
+revision-id: test@user-3
+committer: test@user
+timestamp: 1129025493
+timezone: 36000
+parents:
+ test@user-2
+message:
+ revision with properties
+inventory:
+ file hello hello-id 34dd0ac19a24bf80c4d33b5c8960196e8d8d1f73 test@user-2 yes
+ directory src src-id test@user-2 no
+ file src/foo.c foo.c-id a2a049c20f908ae31b231d98779eb63c66448f24 test@user-2 no
+properties:
+ branch-nick:
+ test branch
+ empty:
+ flavor:
+ sour cherry
+ cream cheese
+ size:
+ medium
+"""
+
+
+REV_PROPS_TESTAMENT_STRICT3 = """\
+bazaar testament version 3 strict
+revision-id: test@user-3
+committer: test@user
+timestamp: 1129025493
+timezone: 36000
+parents:
+ test@user-2
+message:
+ revision with properties
+inventory:
+ directory . TREE_ROT test@user-1 no
+ file hello hello-id 34dd0ac19a24bf80c4d33b5c8960196e8d8d1f73 test@user-2 yes
+ directory src src-id test@user-2 no
+ file src/foo.c foo.c-id a2a049c20f908ae31b231d98779eb63c66448f24 test@user-2 no
+properties:
+ branch-nick:
+ test branch
+ empty:
+ flavor:
+ sour cherry
+ cream cheese
+ size:
+ medium
+"""
+
+
+REV_3_TESTAMENT = """\
+bazaar-ng testament version 1
+revision-id: test@user-3
+committer: test@user
+timestamp: 1129025493
+timezone: 36000
+parents:
+ test@user-2
+message:
+ add symlink
+inventory:
+ file hello hello-id 34dd0ac19a24bf80c4d33b5c8960196e8d8d1f73
+ symlink link link-id wibble/linktarget
+ directory src src-id
+ file src/foo.c foo.c-id a2a049c20f908ae31b231d98779eb63c66448f24
+properties:
+ branch-nick:
+ test branch
+"""
+
+
+REV_3_TESTAMENT_STRICT = """\
+bazaar-ng testament version 2.1
+revision-id: test@user-3
+committer: test@user
+timestamp: 1129025493
+timezone: 36000
+parents:
+ test@user-2
+message:
+ add symlink
+inventory:
+ file hello hello-id 34dd0ac19a24bf80c4d33b5c8960196e8d8d1f73 test@user-2 yes
+ symlink link link-id wibble/linktarget test@user-3 no
+ directory src src-id test@user-2 no
+ file src/foo.c foo.c-id a2a049c20f908ae31b231d98779eb63c66448f24 test@user-2 no
+properties:
+ branch-nick:
+ test branch
+"""
+
+
+REV_3_TESTAMENT_STRICT3 = """\
+bazaar testament version 3 strict
+revision-id: test@user-3
+committer: test@user
+timestamp: 1129025493
+timezone: 36000
+parents:
+ test@user-2
+message:
+ add symlink
+inventory:
+ directory . TREE_ROT test@user-1 no
+ file hello hello-id 34dd0ac19a24bf80c4d33b5c8960196e8d8d1f73 test@user-2 yes
+ symlink link link-id wibble/linktarget test@user-3 no
+ directory src src-id test@user-2 no
+ file src/foo.c foo.c-id a2a049c20f908ae31b231d98779eb63c66448f24 test@user-2 no
+properties:
+ branch-nick:
+ test branch
+"""
+
+
+SAMPLE_UNICODE_TESTAMENT = u"""\
+bazaar-ng testament version 1
+revision-id: test@user-3
+committer: Erik B\xe5gfors <test@user>
+timestamp: 1129025493
+timezone: 36000
+parents:
+ test@user-2
+message:
+ non-ascii commit \N{COPYRIGHT SIGN} me
+inventory:
+ file hello hello-id 34dd0ac19a24bf80c4d33b5c8960196e8d8d1f73
+ directory src src-id
+ file src/foo.c foo.c-id a2a049c20f908ae31b231d98779eb63c66448f24
+properties:
+ branch-nick:
+ test branch
+ uni:
+ \xb5
+"""
+
+
+SAMPLE_UNICODE_TESTAMENT_STRICT = u"""\
+bazaar-ng testament version 2.1
+revision-id: test@user-3
+committer: Erik B\xe5gfors <test@user>
+timestamp: 1129025493
+timezone: 36000
+parents:
+ test@user-2
+message:
+ non-ascii commit \N{COPYRIGHT SIGN} me
+inventory:
+ file hello hello-id 34dd0ac19a24bf80c4d33b5c8960196e8d8d1f73 test@user-2 yes
+ directory src src-id test@user-2 no
+ file src/foo.c foo.c-id a2a049c20f908ae31b231d98779eb63c66448f24 test@user-2 no
+properties:
+ branch-nick:
+ test branch
+ uni:
+ \xb5
+"""
+
+
+SAMPLE_UNICODE_TESTAMENT_STRICT3 = u"""\
+bazaar testament version 3 strict
+revision-id: test@user-3
+committer: Erik B\xe5gfors <test@user>
+timestamp: 1129025493
+timezone: 36000
+parents:
+ test@user-2
+message:
+ non-ascii commit \N{COPYRIGHT SIGN} me
+inventory:
+ directory . TREE_ROT test@user-1 no
+ file hello hello-id 34dd0ac19a24bf80c4d33b5c8960196e8d8d1f73 test@user-2 yes
+ directory src src-id test@user-2 no
+ file src/foo.c foo.c-id a2a049c20f908ae31b231d98779eb63c66448f24 test@user-2 no
+properties:
+ branch-nick:
+ test branch
+ uni:
+ \xb5
+"""
+
+
+texts = {
+ Testament: { 'rev_1': REV_1_TESTAMENT,
+ 'rev_1_short': REV_1_SHORT,
+ 'rev_2': REV_2_TESTAMENT,
+ 'rev_2_short': REV_2_SHORT,
+ 'rev_3': REV_3_TESTAMENT,
+ 'rev_props': REV_PROPS_TESTAMENT,
+ 'sample_unicode': SAMPLE_UNICODE_TESTAMENT,
+ },
+ StrictTestament: {'rev_1': REV_1_STRICT_TESTAMENT,
+ 'rev_1_short': REV_1_SHORT_STRICT,
+ 'rev_2': REV_2_STRICT_TESTAMENT,
+ 'rev_2_short': REV_2_SHORT_STRICT,
+ 'rev_3': REV_3_TESTAMENT_STRICT,
+ 'rev_props': REV_PROPS_TESTAMENT_STRICT,
+ 'sample_unicode': SAMPLE_UNICODE_TESTAMENT_STRICT,
+ },
+ StrictTestament3: {'rev_1': REV_1_STRICT_TESTAMENT3,
+ 'rev_1_short': REV_1_SHORT_STRICT3,
+ 'rev_2': REV_2_STRICT_TESTAMENT3,
+ 'rev_2_short': REV_2_SHORT_STRICT3,
+ 'rev_3': REV_3_TESTAMENT_STRICT3,
+ 'rev_props': REV_PROPS_TESTAMENT_STRICT3,
+ 'sample_unicode': SAMPLE_UNICODE_TESTAMENT_STRICT3,
+ },
+}
diff --git a/bzrlib/tests/test_textfile.py b/bzrlib/tests/test_textfile.py
new file mode 100644
index 0000000..44ba148
--- /dev/null
+++ b/bzrlib/tests/test_textfile.py
@@ -0,0 +1,47 @@
+# Copyright (C) 2006 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+from StringIO import StringIO
+
+from bzrlib.errors import BinaryFile
+from bzrlib.tests import TestCase, TestCaseInTempDir
+from bzrlib.textfile import text_file, check_text_lines, check_text_path
+
+
+class TextFile(TestCase):
+
+ def test_text_file(self):
+ s = StringIO('ab' * 2048)
+ self.assertEqual(text_file(s).read(), s.getvalue())
+ s = StringIO('a' * 1023 + '\x00')
+ self.assertRaises(BinaryFile, text_file, s)
+ s = StringIO('a' * 1024 + '\x00')
+ self.assertEqual(text_file(s).read(), s.getvalue())
+
+ def test_check_text_lines(self):
+ lines = ['ab' * 2048]
+ check_text_lines(lines)
+ lines = ['a' * 1023 + '\x00']
+ self.assertRaises(BinaryFile, check_text_lines, lines)
+
+
+class TextPath(TestCaseInTempDir):
+
+ def test_text_file(self):
+ with file('boo', 'wb') as f: f.write('ab' * 2048)
+ check_text_path('boo')
+ with file('boo', 'wb') as f: f.write('a' * 1023 + '\x00')
+ self.assertRaises(BinaryFile, check_text_path, 'boo')
diff --git a/bzrlib/tests/test_textmerge.py b/bzrlib/tests/test_textmerge.py
new file mode 100644
index 0000000..2facdd5
--- /dev/null
+++ b/bzrlib/tests/test_textmerge.py
@@ -0,0 +1,41 @@
+# Copyright (C) 2006 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+#
+# Author: Aaron Bentley <aaron.bentley@utoronto.ca>
+from bzrlib.textmerge import Merge2
+from bzrlib.tests import TestCase
+class TestMerge2(TestCase):
+ def test_agreed(self):
+ lines = "a\nb\nc\nd\ne\nf\n".splitlines(True)
+ mlines = list(Merge2(lines, lines).merge_lines()[0])
+ self.assertEqualDiff(mlines, lines)
+
+ def test_conflict(self):
+ lines_a = "a\nb\nc\nd\ne\nf\ng\nh\n".splitlines(True)
+ lines_b = "z\nb\nx\nd\ne\ne\nf\ng\ny\n".splitlines(True)
+ expected = "<\na\n=\nz\n>\nb\n<\nc\n=\nx\n>\nd\ne\n<\n=\ne\n>\nf\n"\
+ "g\n<\nh\n=\ny\n>\n"
+ m2 = Merge2(lines_a, lines_b, '<\n', '>\n', '=\n')
+ mlines= m2.merge_lines()[0]
+ self.assertEqualDiff(''.join(mlines), expected)
+ mlines= m2.merge_lines(reprocess=True)[0]
+ self.assertEqualDiff(''.join(mlines), expected)
+
+ def test_reprocess(self):
+ struct = [('a', 'b'), ('c',), ('def','geh'), ('i',)]
+ expect = [('a', 'b'), ('c',), ('d', 'g'), ('e',), ('f', 'h'), ('i',)]
+ result = Merge2.reprocess_struct(struct)
+ self.assertEqual(list(result), expect)
diff --git a/bzrlib/tests/test_timestamp.py b/bzrlib/tests/test_timestamp.py
new file mode 100644
index 0000000..f192a68
--- /dev/null
+++ b/bzrlib/tests/test_timestamp.py
@@ -0,0 +1,134 @@
+# Copyright (C) 2007, 2009, 2011 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+import random
+import time
+
+
+from bzrlib import (
+ tests,
+ timestamp,
+ )
+from bzrlib.osutils import local_time_offset
+
+
+class TestPatchHeader(tests.TestCase):
+
+ def test_format_patch_date(self):
+ # epoch is always in utc
+ self.assertEqual('1970-01-01 00:00:00 +0000',
+ timestamp.format_patch_date(0))
+ self.assertEqual('1970-01-01 00:00:00 +0000',
+ timestamp.format_patch_date(0, 5 * 3600))
+ self.assertEqual('1970-01-01 00:00:00 +0000',
+ timestamp.format_patch_date(0, -5 * 3600))
+ # regular timestamp with typical timezone
+ self.assertEqual('2007-03-06 10:04:19 -0500',
+ timestamp.format_patch_date(1173193459, -5 * 3600))
+ # the timezone part is HHMM
+ self.assertEqual('2007-03-06 09:34:19 -0530',
+ timestamp.format_patch_date(1173193459, -5.5 * 3600))
+ # timezones can be offset by single minutes (but no less)
+ self.assertEqual('2007-03-06 15:05:19 +0001',
+ timestamp.format_patch_date(1173193459, +1 * 60))
+
+ def test_parse_patch_date(self):
+ self.assertEqual((0, 0),
+ timestamp.parse_patch_date('1970-01-01 00:00:00 +0000'))
+ # even though we don't emit pre-epoch dates, we can parse them
+ self.assertEqual((0, -5 * 3600),
+ timestamp.parse_patch_date('1969-12-31 19:00:00 -0500'))
+ self.assertEqual((0, +5 * 3600),
+ timestamp.parse_patch_date('1970-01-01 05:00:00 +0500'))
+ self.assertEqual((1173193459, -5 * 3600),
+ timestamp.parse_patch_date('2007-03-06 10:04:19 -0500'))
+ # offset of three minutes
+ self.assertEqual((1173193459, +3 * 60),
+ timestamp.parse_patch_date('2007-03-06 15:07:19 +0003'))
+ # No space between time and offset
+ self.assertEqual((1173193459, -5 * 3600),
+ timestamp.parse_patch_date('2007-03-06 10:04:19-0500'))
+ # Extra spacing
+ self.assertEqual((1173193459, -5 * 3600),
+ timestamp.parse_patch_date('2007-03-06 10:04:19 -0500'))
+
+ def test_parse_patch_date_bad(self):
+ self.assertRaises(ValueError, timestamp.parse_patch_date,
+ 'NOT A TIME')
+ # Extra data at end
+ self.assertRaises(ValueError, timestamp.parse_patch_date,
+ '2007-03-06 10:04:19 -0500x')
+ # Missing day
+ self.assertRaises(ValueError, timestamp.parse_patch_date,
+ '2007-03 10:04:19 -0500')
+ # Missing seconds
+ self.assertRaises(ValueError, timestamp.parse_patch_date,
+ '2007-03-06 10:04 -0500')
+ # Missing offset
+ self.assertRaises(ValueError, timestamp.parse_patch_date,
+ '2007-03-06 10:04:19')
+ # Missing plus or minus in offset
+ self.assertRaises(ValueError, timestamp.parse_patch_date,
+ '2007-03-06 10:04:19 0500')
+ # Invalid hour in offset
+ self.assertRaises(ValueError, timestamp.parse_patch_date,
+ '2007-03-06 10:04:19 +2400')
+ self.assertRaises(ValueError, timestamp.parse_patch_date,
+ '2007-03-06 10:04:19 -2400')
+ # Invalid minute in offset
+ self.assertRaises(ValueError, timestamp.parse_patch_date,
+ '2007-03-06 10:04:19 -0560')
+ # Too many digits in offset
+ self.assertRaises(ValueError, timestamp.parse_patch_date,
+ '2007-03-06 10:04:19 79500')
+ # Minus sign in middle of offset
+ self.assertRaises(ValueError, timestamp.parse_patch_date,
+ '2007-03-06 10:04:19 +05-5')
+
+
+class UnpackHighresDateTests(tests.TestCase):
+
+ def test_unpack_highres_date(self):
+ self.assertEquals(
+ (1120153132.3508501, -18000),
+ timestamp.unpack_highres_date('Thu 2005-06-30 12:38:52.350850105 -0500'))
+ self.assertEquals(
+ (1120153132.3508501, 0),
+ timestamp.unpack_highres_date('Thu 2005-06-30 17:38:52.350850105 +0000'))
+ self.assertEquals(
+ (1120153132.3508501, 7200),
+ timestamp.unpack_highres_date('Thu 2005-06-30 19:38:52.350850105 +0200'))
+ self.assertEquals(
+ (1152428738.867522, 19800),
+ timestamp.unpack_highres_date('Sun 2006-07-09 12:35:38.867522001 +0530'))
+
+ def test_random(self):
+ t = time.time()
+ o = local_time_offset()
+ t2, o2 = timestamp.unpack_highres_date(timestamp.format_highres_date(t, o))
+ self.assertEquals(t, t2)
+ self.assertEquals(o, o2)
+ t -= 24*3600*365*2 # Start 2 years ago
+ o = -12*3600
+ for count in xrange(500):
+ t += random.random()*24*3600*30
+ o = ((o/3600 + 13) % 25 - 12)*3600 # Add 1 wrap around from [-12, 12]
+ date = timestamp.format_highres_date(t, o)
+ t2, o2 = timestamp.unpack_highres_date(date)
+ self.assertEquals(t, t2,
+ 'Failed on date %r, %s,%s diff:%s' % (date, t, o, t2-t))
+ self.assertEquals(o, o2,
+ 'Failed on date %r, %s,%s diff:%s' % (date, t, o, t2-t))
diff --git a/bzrlib/tests/test_trace.py b/bzrlib/tests/test_trace.py
new file mode 100644
index 0000000..af547e9
--- /dev/null
+++ b/bzrlib/tests/test_trace.py
@@ -0,0 +1,443 @@
+# Copyright (C) 2005-2011 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+# "weren't nothing promised to you. do i look like i got a promise face?"
+
+"""Tests for trace library"""
+
+from cStringIO import StringIO
+import errno
+import logging
+import os
+import re
+import sys
+import tempfile
+
+from bzrlib import (
+ debug,
+ errors,
+ trace,
+ )
+from bzrlib.tests import features, TestCaseInTempDir, TestCase
+from bzrlib.trace import (
+ mutter, mutter_callsite, report_exception,
+ set_verbosity_level, get_verbosity_level, is_quiet, is_verbose, be_quiet,
+ pop_log_file,
+ push_log_file,
+ _rollover_trace_maybe,
+ show_error,
+ )
+
+
+def _format_exception():
+ """Format an exception as it would normally be displayed to the user"""
+ buf = StringIO()
+ report_exception(sys.exc_info(), buf)
+ return buf.getvalue()
+
+
+class TestTrace(TestCase):
+
+ def test_format_sys_exception(self):
+ # Test handling of an internal/unexpected error that probably
+ # indicates a bug in bzr. The details of the message may vary
+ # depending on whether apport is available or not. See test_crash for
+ # more.
+ try:
+ raise NotImplementedError, "time travel"
+ except NotImplementedError:
+ pass
+ err = _format_exception()
+ self.assertEqualDiff(err.splitlines()[0],
+ 'bzr: ERROR: exceptions.NotImplementedError: time travel')
+ self.assertContainsRe(err,
+ 'Bazaar has encountered an internal error.')
+
+ def test_format_interrupt_exception(self):
+ try:
+ raise KeyboardInterrupt()
+ except KeyboardInterrupt:
+ # XXX: Some risk that a *real* keyboard interrupt won't be seen
+ pass
+ msg = _format_exception()
+ self.assertTrue(len(msg) > 0)
+ self.assertEqualDiff(msg, 'bzr: interrupted\n')
+
+ def test_format_memory_error(self):
+ try:
+ raise MemoryError()
+ except MemoryError:
+ pass
+ msg = _format_exception()
+ self.assertEquals(msg,
+ "bzr: out of memory\nUse -Dmem_dump to dump memory to a file.\n")
+
+ def test_format_mem_dump(self):
+ self.requireFeature(features.meliae)
+ debug.debug_flags.add('mem_dump')
+ try:
+ raise MemoryError()
+ except MemoryError:
+ pass
+ msg = _format_exception()
+ self.assertStartsWith(msg,
+ "bzr: out of memory\nMemory dumped to ")
+
+ def test_format_os_error(self):
+ try:
+ os.rmdir('nosuchfile22222')
+ except OSError, e:
+ e_str = str(e)
+ msg = _format_exception()
+ # Linux seems to give "No such file" but Windows gives "The system
+ # cannot find the file specified".
+ self.assertEqual('bzr: ERROR: %s\n' % (e_str,), msg)
+
+ def test_format_io_error(self):
+ try:
+ file('nosuchfile22222')
+ except IOError:
+ pass
+ msg = _format_exception()
+ # Even though Windows and Linux differ for 'os.rmdir', they both give
+ # 'No such file' for open()
+ # However it now gets translated so we can not test for a specific message
+ self.assertContainsRe(msg,
+ r'^bzr: ERROR: \[Errno .*\] .*nosuchfile')
+
+ def test_format_pywintypes_error(self):
+ self.requireFeature(features.pywintypes)
+ import pywintypes, win32file
+ try:
+ win32file.RemoveDirectory('nosuchfile22222')
+ except pywintypes.error:
+ pass
+ msg = _format_exception()
+ # GZ 2010-05-03: Formatting for pywintypes.error is basic, a 3-tuple
+ # with errno, function name, and locale error message
+ self.assertContainsRe(msg,
+ r"^bzr: ERROR: \(2, 'RemoveDirectory[AW]?', .*\)")
+
+ def test_format_sockets_error(self):
+ try:
+ import socket
+ sock = socket.socket()
+ sock.send("This should fail.")
+ except socket.error:
+ pass
+ msg = _format_exception()
+
+ self.assertNotContainsRe(msg,
+ r"Traceback (most recent call last):")
+
+ def test_format_unicode_error(self):
+ try:
+ raise errors.BzrCommandError(u'argument foo\xb5 does not exist')
+ except errors.BzrCommandError:
+ pass
+ msg = _format_exception()
+
+ def test_format_exception(self):
+ """Short formatting of bzr exceptions"""
+ try:
+ raise errors.NotBranchError('wibble')
+ except errors.NotBranchError:
+ pass
+ msg = _format_exception()
+ self.assertTrue(len(msg) > 0)
+ self.assertEqualDiff(msg, 'bzr: ERROR: Not a branch: \"wibble\".\n')
+
+ def test_report_external_import_error(self):
+ """Short friendly message for missing system modules."""
+ try:
+ import ImaginaryModule
+ except ImportError, e:
+ pass
+ else:
+ self.fail("somehow succeeded in importing %r" % ImaginaryModule)
+ msg = _format_exception()
+ self.assertEqual(msg,
+ 'bzr: ERROR: No module named ImaginaryModule\n'
+ 'You may need to install this Python library separately.\n')
+
+ def test_report_import_syntax_error(self):
+ try:
+ raise ImportError("syntax error")
+ except ImportError, e:
+ pass
+ msg = _format_exception()
+ self.assertContainsRe(msg,
+ r'Bazaar has encountered an internal error')
+
+ def test_trace_unicode(self):
+ """Write Unicode to trace log"""
+ self.log(u'the unicode character for benzene is \N{BENZENE RING}')
+ log = self.get_log()
+ self.assertContainsRe(log, "the unicode character for benzene is")
+
+ def test_trace_argument_unicode(self):
+ """Write a Unicode argument to the trace log"""
+ mutter(u'the unicode character for benzene is %s', u'\N{BENZENE RING}')
+ log = self.get_log()
+ self.assertContainsRe(log, 'the unicode character')
+
+ def test_trace_argument_utf8(self):
+ """Write a Unicode argument to the trace log"""
+ mutter(u'the unicode character for benzene is %s',
+ u'\N{BENZENE RING}'.encode('utf-8'))
+ log = self.get_log()
+ self.assertContainsRe(log, 'the unicode character')
+
+ def test_report_broken_pipe(self):
+ try:
+ raise IOError(errno.EPIPE, 'broken pipe foofofo')
+ except IOError, e:
+ msg = _format_exception()
+ self.assertEquals(msg, "bzr: broken pipe\n")
+ else:
+ self.fail("expected error not raised")
+
+ def assertLogStartsWith(self, log, string):
+ """Like assertStartsWith, but skips the log timestamp."""
+ self.assertContainsRe(log,
+ '^\\d+\\.\\d+ ' + re.escape(string))
+
+ def test_mutter_callsite_1(self):
+ """mutter_callsite can capture 1 level of stack frame."""
+ mutter_callsite(1, "foo %s", "a string")
+ log = self.get_log()
+ # begin with the message
+ self.assertLogStartsWith(log, 'foo a string\nCalled from:\n')
+ # should show two frame: this frame and the one above
+ self.assertContainsRe(log,
+ 'test_trace\\.py", line \\d+, in test_mutter_callsite_1\n')
+ # this frame should be the final one
+ self.assertEndsWith(log, ' "a string")\n')
+
+ def test_mutter_callsite_2(self):
+ """mutter_callsite can capture 2 levels of stack frame."""
+ mutter_callsite(2, "foo %s", "a string")
+ log = self.get_log()
+ # begin with the message
+ self.assertLogStartsWith(log, 'foo a string\nCalled from:\n')
+ # should show two frame: this frame and the one above
+ self.assertContainsRe(log,
+ 'test_trace.py", line \d+, in test_mutter_callsite_2\n')
+ # this frame should be the final one
+ self.assertEndsWith(log, ' "a string")\n')
+
+ def test_mutter_never_fails(self):
+ # Even if the decode/encode stage fails, mutter should not
+ # raise an exception
+ # This test checks that mutter doesn't fail; the current behaviour
+ # is that it doesn't fail *and writes non-utf8*.
+ mutter(u'Writing a greek mu (\xb5) works in a unicode string')
+ mutter('But fails in an ascii string \xb5')
+ mutter('and in an ascii argument: %s', '\xb5')
+ log = self.get_log()
+ self.assertContainsRe(log, 'Writing a greek mu')
+ self.assertContainsRe(log, "But fails in an ascii string")
+ # However, the log content object does unicode replacement on reading
+ # to let it get unicode back where good data has been written. So we
+ # have to do a replaceent here as well.
+ self.assertContainsRe(log, "ascii argument: \xb5".decode('utf8',
+ 'replace'))
+
+ def test_show_error(self):
+ show_error('error1')
+ show_error(u'error2 \xb5 blah')
+ show_error('arg: %s', 'blah')
+ show_error('arg2: %(key)s', {'key':'stuff'})
+ try:
+ raise Exception("oops")
+ except:
+ show_error('kwarg', exc_info=True)
+ log = self.get_log()
+ self.assertContainsRe(log, 'error1')
+ self.assertContainsRe(log, u'error2 \xb5 blah')
+ self.assertContainsRe(log, 'arg: blah')
+ self.assertContainsRe(log, 'arg2: stuff')
+ self.assertContainsRe(log, 'kwarg')
+ self.assertContainsRe(log, 'Traceback \\(most recent call last\\):')
+ self.assertContainsRe(log, 'File ".*test_trace.py", line .*, in test_show_error')
+ self.assertContainsRe(log, 'raise Exception\\("oops"\\)')
+ self.assertContainsRe(log, 'Exception: oops')
+
+ def test_push_log_file(self):
+ """Can push and pop log file, and this catches mutter messages.
+
+ This is primarily for use in the test framework.
+ """
+ tmp1 = tempfile.NamedTemporaryFile()
+ tmp2 = tempfile.NamedTemporaryFile()
+ try:
+ memento1 = push_log_file(tmp1)
+ mutter("comment to file1")
+ try:
+ memento2 = push_log_file(tmp2)
+ try:
+ mutter("comment to file2")
+ finally:
+ pop_log_file(memento2)
+ mutter("again to file1")
+ finally:
+ pop_log_file(memento1)
+ # the files were opened in binary mode, so should have exactly
+ # these bytes. and removing the file as the log target should
+ # have caused them to be flushed out. need to match using regexps
+ # as there's a timestamp at the front.
+ tmp1.seek(0)
+ self.assertContainsRe(tmp1.read(),
+ r"\d+\.\d+ comment to file1\n\d+\.\d+ again to file1\n")
+ tmp2.seek(0)
+ self.assertContainsRe(tmp2.read(),
+ r"\d+\.\d+ comment to file2\n")
+ finally:
+ tmp1.close()
+ tmp2.close()
+
+ def test__open_bzr_log_uses_stderr_for_failures(self):
+ # If _open_bzr_log cannot open the file, then we should write the
+ # warning to stderr. Since this is normally happening before logging is
+ # set up.
+ self.overrideAttr(sys, 'stderr', StringIO())
+ # Set the log file to something that cannot exist
+ self.overrideEnv('BZR_LOG', os.getcwd() + '/no-dir/bzr.log')
+ self.overrideAttr(trace, '_bzr_log_filename')
+ logf = trace._open_bzr_log()
+ self.assertIs(None, logf)
+ self.assertContainsRe(sys.stderr.getvalue(),
+ 'failed to open trace file: .*/no-dir/bzr.log')
+
+
+class TestVerbosityLevel(TestCase):
+
+ def test_verbosity_level(self):
+ set_verbosity_level(1)
+ self.assertEqual(1, get_verbosity_level())
+ self.assertTrue(is_verbose())
+ self.assertFalse(is_quiet())
+ set_verbosity_level(-1)
+ self.assertEqual(-1, get_verbosity_level())
+ self.assertFalse(is_verbose())
+ self.assertTrue(is_quiet())
+ set_verbosity_level(0)
+ self.assertEqual(0, get_verbosity_level())
+ self.assertFalse(is_verbose())
+ self.assertFalse(is_quiet())
+
+ def test_be_quiet(self):
+ # Confirm the old API still works
+ be_quiet(True)
+ self.assertEqual(-1, get_verbosity_level())
+ be_quiet(False)
+ self.assertEqual(0, get_verbosity_level())
+
+
+class TestLogging(TestCase):
+ """Check logging functionality robustly records information"""
+
+ def test_note(self):
+ trace.note("Noted")
+ self.assertEqual(" INFO Noted\n", self.get_log())
+
+ def test_warning(self):
+ trace.warning("Warned")
+ self.assertEqual(" WARNING Warned\n", self.get_log())
+
+ def test_log(self):
+ logging.getLogger("bzr").error("Errored")
+ self.assertEqual(" ERROR Errored\n", self.get_log())
+
+ def test_log_sub(self):
+ logging.getLogger("bzr.test_log_sub").debug("Whispered")
+ self.assertEqual(" DEBUG Whispered\n", self.get_log())
+
+ def test_log_unicode_msg(self):
+ logging.getLogger("bzr").debug(u"\xa7")
+ self.assertEqual(u" DEBUG \xa7\n", self.get_log())
+
+ def test_log_unicode_arg(self):
+ logging.getLogger("bzr").debug("%s", u"\xa7")
+ self.assertEqual(u" DEBUG \xa7\n", self.get_log())
+
+ def test_log_utf8_msg(self):
+ logging.getLogger("bzr").debug("\xc2\xa7")
+ self.assertEqual(u" DEBUG \xa7\n", self.get_log())
+
+ def test_log_utf8_arg(self):
+ logging.getLogger("bzr").debug("%s", "\xc2\xa7")
+ self.assertEqual(u" DEBUG \xa7\n", self.get_log())
+
+ def test_log_bytes_msg(self):
+ logging.getLogger("bzr").debug("\xa7")
+ log = self.get_log()
+ self.assertContainsString(log, "UnicodeDecodeError: ")
+ self.assertContainsString(log,
+ "Logging record unformattable: '\\xa7' % ()\n")
+
+ def test_log_bytes_arg(self):
+ logging.getLogger("bzr").debug("%s", "\xa7")
+ log = self.get_log()
+ self.assertContainsString(log, "UnicodeDecodeError: ")
+ self.assertContainsString(log,
+ "Logging record unformattable: '%s' % ('\\xa7',)\n")
+
+ def test_log_mixed_strings(self):
+ logging.getLogger("bzr").debug(u"%s", "\xa7")
+ log = self.get_log()
+ self.assertContainsString(log, "UnicodeDecodeError: ")
+ self.assertContainsString(log,
+ "Logging record unformattable: u'%s' % ('\\xa7',)\n")
+
+ def test_log_repr_broken(self):
+ class BadRepr(object):
+ def __repr__(self):
+ raise ValueError("Broken object")
+ logging.getLogger("bzr").debug("%s", BadRepr())
+ log = self.get_log()
+ self.assertContainsRe(log, "ValueError: Broken object\n")
+ self.assertContainsRe(log, "Logging record unformattable: '%s' % .*\n")
+
+
+class TestBzrLog(TestCaseInTempDir):
+
+ def test_log_rollover(self):
+ temp_log_name = 'test-log'
+ trace_file = open(temp_log_name, 'at')
+ trace_file.writelines(['test_log_rollover padding\n'] * 200000)
+ trace_file.close()
+ _rollover_trace_maybe(temp_log_name)
+ # should have been rolled over
+ self.assertFalse(os.access(temp_log_name, os.R_OK))
+
+
+class TestTraceConfiguration(TestCaseInTempDir):
+
+ def test_default_config(self):
+ config = trace.DefaultConfig()
+ self.overrideAttr(trace, "_bzr_log_filename", None)
+ trace._bzr_log_filename = None
+ expected_filename = trace._get_bzr_log_filename()
+ self.assertEqual(None, trace._bzr_log_filename)
+ config.__enter__()
+ try:
+ # Should have entered and setup a default filename.
+ self.assertEqual(expected_filename, trace._bzr_log_filename)
+ finally:
+ config.__exit__(None, None, None)
+ # Should have exited and cleaned up.
+ self.assertEqual(None, trace._bzr_log_filename)
diff --git a/bzrlib/tests/test_transactions.py b/bzrlib/tests/test_transactions.py
new file mode 100644
index 0000000..8ce736b
--- /dev/null
+++ b/bzrlib/tests/test_transactions.py
@@ -0,0 +1,299 @@
+# Copyright (C) 2005, 2006, 2009, 2011 Canonical Ltd
+# Authors: Robert Collins <robert.collins@canonical.com>
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""Tests for the behaviour of the Transaction concept in bzr."""
+
+#import bzrlib specific imports here
+import bzrlib.errors as errors
+from bzrlib.tests import TestCase
+import bzrlib.transactions as transactions
+
+
+class DummyWeave(object):
+ """A class that can be instantiated and compared."""
+
+ def __init__(self, message):
+ self._message = message
+ self.finished = False
+
+ def __eq__(self, other):
+ if other is None:
+ return False
+ return self._message == other._message
+
+ def transaction_finished(self):
+ self.finished = True
+
+
+class TestSymbols(TestCase):
+
+ def test_public_symbols(self):
+ from bzrlib.transactions import ReadOnlyTransaction
+ from bzrlib.transactions import PassThroughTransaction
+
+
+class TestReadOnlyTransaction(TestCase):
+
+ def setUp(self):
+ self.transaction = transactions.ReadOnlyTransaction()
+ super(TestReadOnlyTransaction, self).setUp()
+
+ def test_register_clean(self):
+ self.transaction.register_clean("anobject")
+
+ def test_register_dirty_raises(self):
+ self.assertRaises(errors.ReadOnlyError,
+ self.transaction.register_dirty,"anobject")
+
+ def test_map(self):
+ self.assertNotEqual(None, getattr(self.transaction, "map", None))
+
+ def test_add_and_get(self):
+ weave = "a weave"
+ self.transaction.map.add_weave("id", weave)
+ self.assertEqual(weave, self.transaction.map.find_weave("id"))
+
+ def test_finish_returns(self):
+ self.transaction.finish()
+
+ def test_finish_does_not_tell_versioned_file_finished(self):
+ # read only transactions never write, so theres no
+ # need to inform versioned files about finishing
+ weave = DummyWeave('a weave')
+ self.transaction.finish()
+ self.assertFalse(weave.finished)
+
+ def test_zero_size_cache(self):
+ self.transaction.set_cache_size(0)
+ weave = DummyWeave('a weave')
+ self.transaction.map.add_weave("id", weave)
+ self.assertEqual(weave, self.transaction.map.find_weave("id"))
+ weave = None
+ # add an object, should fall right out if there are no references
+ self.transaction.register_clean(self.transaction.map.find_weave("id"))
+ self.assertEqual(None, self.transaction.map.find_weave("id"))
+ # but if we have a reference it should stick around
+ weave = DummyWeave("another weave")
+ self.transaction.map.add_weave("id", weave)
+ self.transaction.register_clean(self.transaction.map.find_weave("id"))
+ self.assertEqual(weave, self.transaction.map.find_weave("id"))
+ del weave
+ # its not a weakref system
+ self.assertEqual(DummyWeave("another weave"),
+ self.transaction.map.find_weave("id"))
+
+ def test_small_cache(self):
+ self.transaction.set_cache_size(1)
+ # add an object, should not fall right out if there are no references
+ #sys.getrefcounts(foo)
+ self.transaction.map.add_weave("id", DummyWeave("a weave"))
+ self.transaction.register_clean(self.transaction.map.find_weave("id"))
+ self.assertEqual(DummyWeave("a weave"),
+ self.transaction.map.find_weave("id"))
+ self.transaction.map.add_weave("id2", DummyWeave("a weave also"))
+ self.transaction.register_clean(self.transaction.map.find_weave("id2"))
+ # currently a fifo
+ self.assertEqual(None, self.transaction.map.find_weave("id"))
+ self.assertEqual(DummyWeave("a weave also"),
+ self.transaction.map.find_weave("id2"))
+
+ def test_small_cache_with_references(self):
+ # if we have a reference it should stick around
+ weave = "a weave"
+ weave2 = "another weave"
+ self.transaction.map.add_weave("id", weave)
+ self.transaction.map.add_weave("id2", weave2)
+ self.assertEqual(weave, self.transaction.map.find_weave("id"))
+ self.assertEqual(weave2, self.transaction.map.find_weave("id2"))
+ weave = None
+ # its not a weakref system
+ self.assertEqual("a weave", self.transaction.map.find_weave("id"))
+
+ def test_precious_with_zero_size_cache(self):
+ self.transaction.set_cache_size(0)
+ weave = DummyWeave('a weave')
+ self.transaction.map.add_weave("id", weave)
+ self.assertEqual(weave, self.transaction.map.find_weave("id"))
+ weave = None
+ # add an object, should not fall out even with no references.
+ self.transaction.register_clean(self.transaction.map.find_weave("id"),
+ precious=True)
+ self.assertEqual(DummyWeave('a weave'),
+ self.transaction.map.find_weave("id"))
+
+ def test_writable(self):
+ self.assertFalse(self.transaction.writeable())
+
+
+class TestPassThroughTransaction(TestCase):
+
+ def test_construct(self):
+ transactions.PassThroughTransaction()
+
+ def test_register_clean(self):
+ transaction = transactions.PassThroughTransaction()
+ transaction.register_clean("anobject")
+
+ def test_register_dirty(self):
+ transaction = transactions.PassThroughTransaction()
+ transaction.register_dirty("anobject")
+
+ def test_map(self):
+ transaction = transactions.PassThroughTransaction()
+ self.assertNotEqual(None, getattr(transaction, "map", None))
+
+ def test_add_and_get(self):
+ transaction = transactions.PassThroughTransaction()
+ weave = "a weave"
+ transaction.map.add_weave("id", weave)
+ self.assertEqual(None, transaction.map.find_weave("id"))
+
+ def test_finish_returns(self):
+ transaction = transactions.PassThroughTransaction()
+ transaction.finish()
+
+ def test_finish_tells_versioned_file_finished(self):
+ # pass through transactions allow writes so they
+ # need to inform versioned files about finishing
+ weave = DummyWeave('a weave')
+ transaction = transactions.PassThroughTransaction()
+ transaction.register_dirty(weave)
+ transaction.finish()
+ self.assertTrue(weave.finished)
+
+ def test_cache_is_ignored(self):
+ transaction = transactions.PassThroughTransaction()
+ transaction.set_cache_size(100)
+ weave = "a weave"
+ transaction.map.add_weave("id", weave)
+ self.assertEqual(None, transaction.map.find_weave("id"))
+
+ def test_writable(self):
+ transaction = transactions.PassThroughTransaction()
+ self.assertTrue(transaction.writeable())
+
+
+class TestWriteTransaction(TestCase):
+
+ def setUp(self):
+ self.transaction = transactions.WriteTransaction()
+ super(TestWriteTransaction, self).setUp()
+
+ def test_register_clean(self):
+ self.transaction.register_clean("anobject")
+
+ def test_register_dirty(self):
+ self.transaction.register_dirty("anobject")
+
+ def test_map(self):
+ self.assertNotEqual(None, getattr(self.transaction, "map", None))
+
+ def test_add_and_get(self):
+ weave = "a weave"
+ self.transaction.map.add_weave("id", weave)
+ self.assertEqual(weave, self.transaction.map.find_weave("id"))
+
+ def test_finish_returns(self):
+ self.transaction.finish()
+
+ def test_finish_tells_versioned_file_finished(self):
+ # write transactions allow writes so they
+ # need to inform versioned files about finishing
+ weave = DummyWeave('a weave')
+ self.transaction.register_dirty(weave)
+ self.transaction.finish()
+ self.assertTrue(weave.finished)
+
+ def test_zero_size_cache(self):
+ self.transaction.set_cache_size(0)
+ # add an object, should fall right out if there are no references
+ weave = DummyWeave('a weave')
+ self.transaction.map.add_weave("id", weave)
+ self.assertEqual(weave, self.transaction.map.find_weave("id"))
+ weave = None
+ self.transaction.register_clean(self.transaction.map.find_weave("id"))
+ self.assertEqual(None, self.transaction.map.find_weave("id"))
+ # but if we have a reference to a clean object it should stick around
+ weave = DummyWeave("another weave")
+ self.transaction.map.add_weave("id", weave)
+ self.transaction.register_clean(self.transaction.map.find_weave("id"))
+ self.assertEqual(weave, self.transaction.map.find_weave("id"))
+ del weave
+ # its not a weakref system
+ self.assertEqual(DummyWeave("another weave"),
+ self.transaction.map.find_weave("id"))
+
+ def test_zero_size_cache_dirty_objects(self):
+ self.transaction.set_cache_size(0)
+ # add a dirty object, which should not fall right out.
+ weave = DummyWeave('a weave')
+ self.transaction.map.add_weave("id", weave)
+ self.assertEqual(weave, self.transaction.map.find_weave("id"))
+ weave = None
+ self.transaction.register_dirty(self.transaction.map.find_weave("id"))
+ self.assertNotEqual(None, self.transaction.map.find_weave("id"))
+
+ def test_clean_to_dirty(self):
+ # a clean object may become dirty.
+ weave = DummyWeave('A weave')
+ self.transaction.map.add_weave("id", weave)
+ self.transaction.register_clean(weave)
+ self.transaction.register_dirty(weave)
+ self.assertTrue(self.transaction.is_dirty(weave))
+ self.assertFalse(self.transaction.is_clean(weave))
+
+ def test_small_cache(self):
+ self.transaction.set_cache_size(1)
+ # add an object, should not fall right out if there are no references
+ #sys.getrefcounts(foo)
+ self.transaction.map.add_weave("id", DummyWeave("a weave"))
+ self.transaction.register_clean(self.transaction.map.find_weave("id"))
+ self.assertEqual(DummyWeave("a weave"),
+ self.transaction.map.find_weave("id"))
+ self.transaction.map.add_weave("id2", DummyWeave("a weave also"))
+ self.transaction.register_clean(self.transaction.map.find_weave("id2"))
+ # currently a fifo
+ self.assertEqual(None, self.transaction.map.find_weave("id"))
+ self.assertEqual(DummyWeave("a weave also"),
+ self.transaction.map.find_weave("id2"))
+
+ def test_small_cache_with_references(self):
+ # if we have a reference it should stick around
+ weave = "a weave"
+ weave2 = "another weave"
+ self.transaction.map.add_weave("id", weave)
+ self.transaction.map.add_weave("id2", weave2)
+ self.assertEqual(weave, self.transaction.map.find_weave("id"))
+ self.assertEqual(weave2, self.transaction.map.find_weave("id2"))
+ weave = None
+ # its not a weakref system
+ self.assertEqual("a weave", self.transaction.map.find_weave("id"))
+
+ def test_precious_with_zero_size_cache(self):
+ self.transaction.set_cache_size(0)
+ weave = DummyWeave('a weave')
+ self.transaction.map.add_weave("id", weave)
+ self.assertEqual(weave, self.transaction.map.find_weave("id"))
+ weave = None
+ # add an object, should not fall out even with no references.
+ self.transaction.register_clean(self.transaction.map.find_weave("id"),
+ precious=True)
+ self.assertEqual(DummyWeave('a weave'),
+ self.transaction.map.find_weave("id"))
+
+ def test_writable(self):
+ self.assertTrue(self.transaction.writeable())
diff --git a/bzrlib/tests/test_transform.py b/bzrlib/tests/test_transform.py
new file mode 100644
index 0000000..d1a0b33
--- /dev/null
+++ b/bzrlib/tests/test_transform.py
@@ -0,0 +1,3729 @@
+# Copyright (C) 2006-2011 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+import errno
+import os
+from StringIO import StringIO
+import sys
+import time
+
+from bzrlib import (
+ bencode,
+ errors,
+ filters,
+ generate_ids,
+ osutils,
+ revision as _mod_revision,
+ rules,
+ symbol_versioning,
+ tests,
+ trace,
+ transform,
+ urlutils,
+ )
+from bzrlib.conflicts import (
+ DeletingParent,
+ DuplicateEntry,
+ DuplicateID,
+ MissingParent,
+ NonDirectoryParent,
+ ParentLoop,
+ UnversionedParent,
+)
+from bzrlib.controldir import ControlDir
+from bzrlib.diff import show_diff_trees
+from bzrlib.errors import (
+ DuplicateKey,
+ ExistingLimbo,
+ ExistingPendingDeletion,
+ ImmortalLimbo,
+ ImmortalPendingDeletion,
+ LockError,
+ MalformedTransform,
+ ReusingTransform,
+)
+from bzrlib.osutils import (
+ file_kind,
+ pathjoin,
+)
+from bzrlib.merge import Merge3Merger, Merger
+from bzrlib.mutabletree import MutableTree
+from bzrlib.tests import (
+ features,
+ TestCaseInTempDir,
+ TestSkipped,
+ )
+from bzrlib.tests.features import (
+ HardlinkFeature,
+ SymlinkFeature,
+ )
+from bzrlib.transform import (
+ build_tree,
+ create_from_tree,
+ cook_conflicts,
+ _FileMover,
+ FinalPaths,
+ resolve_conflicts,
+ resolve_checkout,
+ ROOT_PARENT,
+ TransformPreview,
+ TreeTransform,
+)
+
+
+class TestTreeTransform(tests.TestCaseWithTransport):
+
+ def setUp(self):
+ super(TestTreeTransform, self).setUp()
+ self.wt = self.make_branch_and_tree('.', format='development-subtree')
+ os.chdir('..')
+
+ def get_transform(self):
+ transform = TreeTransform(self.wt)
+ self.addCleanup(transform.finalize)
+ return transform, transform.root
+
+ def get_transform_for_sha1_test(self):
+ trans, root = self.get_transform()
+ self.wt.lock_tree_write()
+ self.addCleanup(self.wt.unlock)
+ contents = ['just some content\n']
+ sha1 = osutils.sha_strings(contents)
+ # Roll back the clock
+ trans._creation_mtime = time.time() - 20.0
+ return trans, root, contents, sha1
+
+ def test_existing_limbo(self):
+ transform, root = self.get_transform()
+ limbo_name = transform._limbodir
+ deletion_path = transform._deletiondir
+ os.mkdir(pathjoin(limbo_name, 'hehe'))
+ self.assertRaises(ImmortalLimbo, transform.apply)
+ self.assertRaises(LockError, self.wt.unlock)
+ self.assertRaises(ExistingLimbo, self.get_transform)
+ self.assertRaises(LockError, self.wt.unlock)
+ os.rmdir(pathjoin(limbo_name, 'hehe'))
+ os.rmdir(limbo_name)
+ os.rmdir(deletion_path)
+ transform, root = self.get_transform()
+ transform.apply()
+
+ def test_existing_pending_deletion(self):
+ transform, root = self.get_transform()
+ deletion_path = self._limbodir = urlutils.local_path_from_url(
+ transform._tree._transport.abspath('pending-deletion'))
+ os.mkdir(pathjoin(deletion_path, 'blocking-directory'))
+ self.assertRaises(ImmortalPendingDeletion, transform.apply)
+ self.assertRaises(LockError, self.wt.unlock)
+ self.assertRaises(ExistingPendingDeletion, self.get_transform)
+
+ def test_build(self):
+ transform, root = self.get_transform()
+ self.wt.lock_tree_write()
+ self.addCleanup(self.wt.unlock)
+ self.assertIs(transform.get_tree_parent(root), ROOT_PARENT)
+ imaginary_id = transform.trans_id_tree_path('imaginary')
+ imaginary_id2 = transform.trans_id_tree_path('imaginary/')
+ self.assertEqual(imaginary_id, imaginary_id2)
+ self.assertEqual(root, transform.get_tree_parent(imaginary_id))
+ self.assertEqual('directory', transform.final_kind(root))
+ self.assertEqual(self.wt.get_root_id(), transform.final_file_id(root))
+ trans_id = transform.create_path('name', root)
+ self.assertIs(transform.final_file_id(trans_id), None)
+ self.assertIs(None, transform.final_kind(trans_id))
+ transform.create_file('contents', trans_id)
+ transform.set_executability(True, trans_id)
+ transform.version_file('my_pretties', trans_id)
+ self.assertRaises(DuplicateKey, transform.version_file,
+ 'my_pretties', trans_id)
+ self.assertEqual(transform.final_file_id(trans_id), 'my_pretties')
+ self.assertEqual(transform.final_parent(trans_id), root)
+ self.assertIs(transform.final_parent(root), ROOT_PARENT)
+ self.assertIs(transform.get_tree_parent(root), ROOT_PARENT)
+ oz_id = transform.create_path('oz', root)
+ transform.create_directory(oz_id)
+ transform.version_file('ozzie', oz_id)
+ trans_id2 = transform.create_path('name2', root)
+ transform.create_file('contents', trans_id2)
+ transform.set_executability(False, trans_id2)
+ transform.version_file('my_pretties2', trans_id2)
+ modified_paths = transform.apply().modified_paths
+ self.assertEqual('contents', self.wt.get_file_byname('name').read())
+ self.assertEqual(self.wt.path2id('name'), 'my_pretties')
+ self.assertIs(self.wt.is_executable('my_pretties'), True)
+ self.assertIs(self.wt.is_executable('my_pretties2'), False)
+ self.assertEqual('directory', file_kind(self.wt.abspath('oz')))
+ self.assertEqual(len(modified_paths), 3)
+ tree_mod_paths = [self.wt.id2abspath(f) for f in
+ ('ozzie', 'my_pretties', 'my_pretties2')]
+ self.assertSubset(tree_mod_paths, modified_paths)
+ # is it safe to finalize repeatedly?
+ transform.finalize()
+ transform.finalize()
+
+ def test_apply_informs_tree_of_observed_sha1(self):
+ trans, root, contents, sha1 = self.get_transform_for_sha1_test()
+ trans_id = trans.new_file('file1', root, contents, file_id='file1-id',
+ sha1=sha1)
+ calls = []
+ orig = self.wt._observed_sha1
+ def _observed_sha1(*args):
+ calls.append(args)
+ orig(*args)
+ self.wt._observed_sha1 = _observed_sha1
+ trans.apply()
+ self.assertEqual([(None, 'file1', trans._observed_sha1s[trans_id])],
+ calls)
+
+ def test_create_file_caches_sha1(self):
+ trans, root, contents, sha1 = self.get_transform_for_sha1_test()
+ trans_id = trans.create_path('file1', root)
+ trans.create_file(contents, trans_id, sha1=sha1)
+ st_val = osutils.lstat(trans._limbo_name(trans_id))
+ o_sha1, o_st_val = trans._observed_sha1s[trans_id]
+ self.assertEqual(o_sha1, sha1)
+ self.assertEqualStat(o_st_val, st_val)
+
+ def test__apply_insertions_updates_sha1(self):
+ trans, root, contents, sha1 = self.get_transform_for_sha1_test()
+ trans_id = trans.create_path('file1', root)
+ trans.create_file(contents, trans_id, sha1=sha1)
+ st_val = osutils.lstat(trans._limbo_name(trans_id))
+ o_sha1, o_st_val = trans._observed_sha1s[trans_id]
+ self.assertEqual(o_sha1, sha1)
+ self.assertEqualStat(o_st_val, st_val)
+ creation_mtime = trans._creation_mtime + 10.0
+ # We fake a time difference from when the file was created until now it
+ # is being renamed by using os.utime. Note that the change we actually
+ # want to see is the real ctime change from 'os.rename()', but as long
+ # as we observe a new stat value, we should be fine.
+ os.utime(trans._limbo_name(trans_id), (creation_mtime, creation_mtime))
+ trans.apply()
+ new_st_val = osutils.lstat(self.wt.abspath('file1'))
+ o_sha1, o_st_val = trans._observed_sha1s[trans_id]
+ self.assertEqual(o_sha1, sha1)
+ self.assertEqualStat(o_st_val, new_st_val)
+ self.assertNotEqual(st_val.st_mtime, new_st_val.st_mtime)
+
+ def test_new_file_caches_sha1(self):
+ trans, root, contents, sha1 = self.get_transform_for_sha1_test()
+ trans_id = trans.new_file('file1', root, contents, file_id='file1-id',
+ sha1=sha1)
+ st_val = osutils.lstat(trans._limbo_name(trans_id))
+ o_sha1, o_st_val = trans._observed_sha1s[trans_id]
+ self.assertEqual(o_sha1, sha1)
+ self.assertEqualStat(o_st_val, st_val)
+
+ def test_cancel_creation_removes_observed_sha1(self):
+ trans, root, contents, sha1 = self.get_transform_for_sha1_test()
+ trans_id = trans.new_file('file1', root, contents, file_id='file1-id',
+ sha1=sha1)
+ self.assertTrue(trans_id in trans._observed_sha1s)
+ trans.cancel_creation(trans_id)
+ self.assertFalse(trans_id in trans._observed_sha1s)
+
+ def test_create_files_same_timestamp(self):
+ transform, root = self.get_transform()
+ self.wt.lock_tree_write()
+ self.addCleanup(self.wt.unlock)
+ # Roll back the clock, so that we know everything is being set to the
+ # exact time
+ transform._creation_mtime = creation_mtime = time.time() - 20.0
+ transform.create_file('content-one',
+ transform.create_path('one', root))
+ time.sleep(1) # *ugly*
+ transform.create_file('content-two',
+ transform.create_path('two', root))
+ transform.apply()
+ fo, st1 = self.wt.get_file_with_stat(None, path='one', filtered=False)
+ fo.close()
+ fo, st2 = self.wt.get_file_with_stat(None, path='two', filtered=False)
+ fo.close()
+ # We only guarantee 2s resolution
+ self.assertTrue(abs(creation_mtime - st1.st_mtime) < 2.0,
+ "%s != %s within 2 seconds" % (creation_mtime, st1.st_mtime))
+ # But if we have more than that, all files should get the same result
+ self.assertEqual(st1.st_mtime, st2.st_mtime)
+
+ def test_change_root_id(self):
+ transform, root = self.get_transform()
+ self.assertNotEqual('new-root-id', self.wt.get_root_id())
+ transform.new_directory('', ROOT_PARENT, 'new-root-id')
+ transform.delete_contents(root)
+ transform.unversion_file(root)
+ transform.fixup_new_roots()
+ transform.apply()
+ self.assertEqual('new-root-id', self.wt.get_root_id())
+
+ def test_change_root_id_add_files(self):
+ transform, root = self.get_transform()
+ self.assertNotEqual('new-root-id', self.wt.get_root_id())
+ new_trans_id = transform.new_directory('', ROOT_PARENT, 'new-root-id')
+ transform.new_file('file', new_trans_id, ['new-contents\n'],
+ 'new-file-id')
+ transform.delete_contents(root)
+ transform.unversion_file(root)
+ transform.fixup_new_roots()
+ transform.apply()
+ self.assertEqual('new-root-id', self.wt.get_root_id())
+ self.assertEqual('new-file-id', self.wt.path2id('file'))
+ self.assertFileEqual('new-contents\n', self.wt.abspath('file'))
+
+ def test_add_two_roots(self):
+ transform, root = self.get_transform()
+ new_trans_id = transform.new_directory('', ROOT_PARENT, 'new-root-id')
+ new_trans_id = transform.new_directory('', ROOT_PARENT, 'alt-root-id')
+ self.assertRaises(ValueError, transform.fixup_new_roots)
+
+ def test_retain_existing_root(self):
+ tt, root = self.get_transform()
+ with tt:
+ tt.new_directory('', ROOT_PARENT, 'new-root-id')
+ tt.fixup_new_roots()
+ self.assertNotEqual('new-root-id', tt.final_file_id(tt.root))
+
+ def test_retain_existing_root_added_file(self):
+ tt, root = self.get_transform()
+ new_trans_id = tt.new_directory('', ROOT_PARENT, 'new-root-id')
+ child = tt.new_directory('child', new_trans_id, 'child-id')
+ tt.fixup_new_roots()
+ self.assertEqual(tt.root, tt.final_parent(child))
+
+ def test_add_unversioned_root(self):
+ transform, root = self.get_transform()
+ new_trans_id = transform.new_directory('', ROOT_PARENT, None)
+ transform.delete_contents(transform.root)
+ transform.fixup_new_roots()
+ self.assertNotIn(transform.root, transform._new_id)
+
+ def test_remove_root_fixup(self):
+ transform, root = self.get_transform()
+ old_root_id = self.wt.get_root_id()
+ self.assertNotEqual('new-root-id', old_root_id)
+ transform.delete_contents(root)
+ transform.unversion_file(root)
+ transform.fixup_new_roots()
+ transform.apply()
+ self.assertEqual(old_root_id, self.wt.get_root_id())
+
+ transform, root = self.get_transform()
+ new_trans_id = transform.new_directory('', ROOT_PARENT, 'new-root-id')
+ new_trans_id = transform.new_directory('', ROOT_PARENT, 'alt-root-id')
+ self.assertRaises(ValueError, transform.fixup_new_roots)
+
+ def test_fixup_new_roots_permits_empty_tree(self):
+ transform, root = self.get_transform()
+ transform.delete_contents(root)
+ transform.unversion_file(root)
+ transform.fixup_new_roots()
+ self.assertIs(None, transform.final_kind(root))
+ self.assertIs(None, transform.final_file_id(root))
+
+ def test_apply_retains_root_directory(self):
+ # Do not attempt to delete the physical root directory, because that
+ # is impossible.
+ transform, root = self.get_transform()
+ with transform:
+ transform.delete_contents(root)
+ e = self.assertRaises(AssertionError, self.assertRaises,
+ errors.TransformRenameFailed,
+ transform.apply)
+ self.assertContainsRe('TransformRenameFailed not raised', str(e))
+
+ def test_apply_retains_file_id(self):
+ transform, root = self.get_transform()
+ old_root_id = transform.tree_file_id(root)
+ transform.unversion_file(root)
+ transform.apply()
+ self.assertEqual(old_root_id, self.wt.get_root_id())
+
+ def test_hardlink(self):
+ self.requireFeature(HardlinkFeature)
+ transform, root = self.get_transform()
+ transform.new_file('file1', root, 'contents')
+ transform.apply()
+ target = self.make_branch_and_tree('target')
+ target_transform = TreeTransform(target)
+ trans_id = target_transform.create_path('file1', target_transform.root)
+ target_transform.create_hardlink(self.wt.abspath('file1'), trans_id)
+ target_transform.apply()
+ self.assertPathExists('target/file1')
+ source_stat = os.stat(self.wt.abspath('file1'))
+ target_stat = os.stat('target/file1')
+ self.assertEqual(source_stat, target_stat)
+
+ def test_convenience(self):
+ transform, root = self.get_transform()
+ self.wt.lock_tree_write()
+ self.addCleanup(self.wt.unlock)
+ trans_id = transform.new_file('name', root, 'contents',
+ 'my_pretties', True)
+ oz = transform.new_directory('oz', root, 'oz-id')
+ dorothy = transform.new_directory('dorothy', oz, 'dorothy-id')
+ toto = transform.new_file('toto', dorothy, 'toto-contents',
+ 'toto-id', False)
+
+ self.assertEqual(len(transform.find_conflicts()), 0)
+ transform.apply()
+ self.assertRaises(ReusingTransform, transform.find_conflicts)
+ self.assertEqual('contents', file(self.wt.abspath('name')).read())
+ self.assertEqual(self.wt.path2id('name'), 'my_pretties')
+ self.assertIs(self.wt.is_executable('my_pretties'), True)
+ self.assertEqual(self.wt.path2id('oz'), 'oz-id')
+ self.assertEqual(self.wt.path2id('oz/dorothy'), 'dorothy-id')
+ self.assertEqual(self.wt.path2id('oz/dorothy/toto'), 'toto-id')
+
+ self.assertEqual('toto-contents',
+ self.wt.get_file_byname('oz/dorothy/toto').read())
+ self.assertIs(self.wt.is_executable('toto-id'), False)
+
+ def test_tree_reference(self):
+ transform, root = self.get_transform()
+ tree = transform._tree
+ trans_id = transform.new_directory('reference', root, 'subtree-id')
+ transform.set_tree_reference('subtree-revision', trans_id)
+ transform.apply()
+ tree.lock_read()
+ self.addCleanup(tree.unlock)
+ self.assertEqual('subtree-revision',
+ tree.root_inventory['subtree-id'].reference_revision)
+
+ def test_conflicts(self):
+ transform, root = self.get_transform()
+ trans_id = transform.new_file('name', root, 'contents',
+ 'my_pretties')
+ self.assertEqual(len(transform.find_conflicts()), 0)
+ trans_id2 = transform.new_file('name', root, 'Crontents', 'toto')
+ self.assertEqual(transform.find_conflicts(),
+ [('duplicate', trans_id, trans_id2, 'name')])
+ self.assertRaises(MalformedTransform, transform.apply)
+ transform.adjust_path('name', trans_id, trans_id2)
+ self.assertEqual(transform.find_conflicts(),
+ [('non-directory parent', trans_id)])
+ tinman_id = transform.trans_id_tree_path('tinman')
+ transform.adjust_path('name', tinman_id, trans_id2)
+ self.assertEqual(transform.find_conflicts(),
+ [('unversioned parent', tinman_id),
+ ('missing parent', tinman_id)])
+ lion_id = transform.create_path('lion', root)
+ self.assertEqual(transform.find_conflicts(),
+ [('unversioned parent', tinman_id),
+ ('missing parent', tinman_id)])
+ transform.adjust_path('name', lion_id, trans_id2)
+ self.assertEqual(transform.find_conflicts(),
+ [('unversioned parent', lion_id),
+ ('missing parent', lion_id)])
+ transform.version_file("Courage", lion_id)
+ self.assertEqual(transform.find_conflicts(),
+ [('missing parent', lion_id),
+ ('versioning no contents', lion_id)])
+ transform.adjust_path('name2', root, trans_id2)
+ self.assertEqual(transform.find_conflicts(),
+ [('versioning no contents', lion_id)])
+ transform.create_file('Contents, okay?', lion_id)
+ transform.adjust_path('name2', trans_id2, trans_id2)
+ self.assertEqual(transform.find_conflicts(),
+ [('parent loop', trans_id2),
+ ('non-directory parent', trans_id2)])
+ transform.adjust_path('name2', root, trans_id2)
+ oz_id = transform.new_directory('oz', root)
+ transform.set_executability(True, oz_id)
+ self.assertEqual(transform.find_conflicts(),
+ [('unversioned executability', oz_id)])
+ transform.version_file('oz-id', oz_id)
+ self.assertEqual(transform.find_conflicts(),
+ [('non-file executability', oz_id)])
+ transform.set_executability(None, oz_id)
+ tip_id = transform.new_file('tip', oz_id, 'ozma', 'tip-id')
+ transform.apply()
+ self.assertEqual(self.wt.path2id('name'), 'my_pretties')
+ self.assertEqual('contents', file(self.wt.abspath('name')).read())
+ transform2, root = self.get_transform()
+ oz_id = transform2.trans_id_tree_file_id('oz-id')
+ newtip = transform2.new_file('tip', oz_id, 'other', 'tip-id')
+ result = transform2.find_conflicts()
+ fp = FinalPaths(transform2)
+ self.assert_('oz/tip' in transform2._tree_path_ids)
+ self.assertEqual(fp.get_path(newtip), pathjoin('oz', 'tip'))
+ self.assertEqual(len(result), 2)
+ self.assertEqual((result[0][0], result[0][1]),
+ ('duplicate', newtip))
+ self.assertEqual((result[1][0], result[1][2]),
+ ('duplicate id', newtip))
+ transform2.finalize()
+ transform3 = TreeTransform(self.wt)
+ self.addCleanup(transform3.finalize)
+ oz_id = transform3.trans_id_tree_file_id('oz-id')
+ transform3.delete_contents(oz_id)
+ self.assertEqual(transform3.find_conflicts(),
+ [('missing parent', oz_id)])
+ root_id = transform3.root
+ tip_id = transform3.trans_id_tree_file_id('tip-id')
+ transform3.adjust_path('tip', root_id, tip_id)
+ transform3.apply()
+
+ def test_conflict_on_case_insensitive(self):
+ tree = self.make_branch_and_tree('tree')
+ # Don't try this at home, kids!
+ # Force the tree to report that it is case sensitive, for conflict
+ # resolution tests
+ tree.case_sensitive = True
+ transform = TreeTransform(tree)
+ self.addCleanup(transform.finalize)
+ transform.new_file('file', transform.root, 'content')
+ transform.new_file('FiLe', transform.root, 'content')
+ result = transform.find_conflicts()
+ self.assertEqual([], result)
+ transform.finalize()
+ # Force the tree to report that it is case insensitive, for conflict
+ # generation tests
+ tree.case_sensitive = False
+ transform = TreeTransform(tree)
+ self.addCleanup(transform.finalize)
+ transform.new_file('file', transform.root, 'content')
+ transform.new_file('FiLe', transform.root, 'content')
+ result = transform.find_conflicts()
+ self.assertEqual([('duplicate', 'new-1', 'new-2', 'file')], result)
+
+ def test_conflict_on_case_insensitive_existing(self):
+ tree = self.make_branch_and_tree('tree')
+ self.build_tree(['tree/FiLe'])
+ # Don't try this at home, kids!
+ # Force the tree to report that it is case sensitive, for conflict
+ # resolution tests
+ tree.case_sensitive = True
+ transform = TreeTransform(tree)
+ self.addCleanup(transform.finalize)
+ transform.new_file('file', transform.root, 'content')
+ result = transform.find_conflicts()
+ self.assertEqual([], result)
+ transform.finalize()
+ # Force the tree to report that it is case insensitive, for conflict
+ # generation tests
+ tree.case_sensitive = False
+ transform = TreeTransform(tree)
+ self.addCleanup(transform.finalize)
+ transform.new_file('file', transform.root, 'content')
+ result = transform.find_conflicts()
+ self.assertEqual([('duplicate', 'new-1', 'new-2', 'file')], result)
+
+ def test_resolve_case_insensitive_conflict(self):
+ tree = self.make_branch_and_tree('tree')
+ # Don't try this at home, kids!
+ # Force the tree to report that it is case insensitive, for conflict
+ # resolution tests
+ tree.case_sensitive = False
+ transform = TreeTransform(tree)
+ self.addCleanup(transform.finalize)
+ transform.new_file('file', transform.root, 'content')
+ transform.new_file('FiLe', transform.root, 'content')
+ resolve_conflicts(transform)
+ transform.apply()
+ self.assertPathExists('tree/file')
+ self.assertPathExists('tree/FiLe.moved')
+
+ def test_resolve_checkout_case_conflict(self):
+ tree = self.make_branch_and_tree('tree')
+ # Don't try this at home, kids!
+ # Force the tree to report that it is case insensitive, for conflict
+ # resolution tests
+ tree.case_sensitive = False
+ transform = TreeTransform(tree)
+ self.addCleanup(transform.finalize)
+ transform.new_file('file', transform.root, 'content')
+ transform.new_file('FiLe', transform.root, 'content')
+ resolve_conflicts(transform,
+ pass_func=lambda t, c: resolve_checkout(t, c, []))
+ transform.apply()
+ self.assertPathExists('tree/file')
+ self.assertPathExists('tree/FiLe.moved')
+
+ def test_apply_case_conflict(self):
+ """Ensure that a transform with case conflicts can always be applied"""
+ tree = self.make_branch_and_tree('tree')
+ transform = TreeTransform(tree)
+ self.addCleanup(transform.finalize)
+ transform.new_file('file', transform.root, 'content')
+ transform.new_file('FiLe', transform.root, 'content')
+ dir = transform.new_directory('dir', transform.root)
+ transform.new_file('dirfile', dir, 'content')
+ transform.new_file('dirFiLe', dir, 'content')
+ resolve_conflicts(transform)
+ transform.apply()
+ self.assertPathExists('tree/file')
+ if not os.path.exists('tree/FiLe.moved'):
+ self.assertPathExists('tree/FiLe')
+ self.assertPathExists('tree/dir/dirfile')
+ if not os.path.exists('tree/dir/dirFiLe.moved'):
+ self.assertPathExists('tree/dir/dirFiLe')
+
+ def test_case_insensitive_limbo(self):
+ tree = self.make_branch_and_tree('tree')
+ # Don't try this at home, kids!
+ # Force the tree to report that it is case insensitive
+ tree.case_sensitive = False
+ transform = TreeTransform(tree)
+ self.addCleanup(transform.finalize)
+ dir = transform.new_directory('dir', transform.root)
+ first = transform.new_file('file', dir, 'content')
+ second = transform.new_file('FiLe', dir, 'content')
+ self.assertContainsRe(transform._limbo_name(first), 'new-1/file')
+ self.assertNotContainsRe(transform._limbo_name(second), 'new-1/FiLe')
+
+ def test_adjust_path_updates_child_limbo_names(self):
+ tree = self.make_branch_and_tree('tree')
+ transform = TreeTransform(tree)
+ self.addCleanup(transform.finalize)
+ foo_id = transform.new_directory('foo', transform.root)
+ bar_id = transform.new_directory('bar', foo_id)
+ baz_id = transform.new_directory('baz', bar_id)
+ qux_id = transform.new_directory('qux', baz_id)
+ transform.adjust_path('quxx', foo_id, bar_id)
+ self.assertStartsWith(transform._limbo_name(qux_id),
+ transform._limbo_name(bar_id))
+
+ def test_add_del(self):
+ start, root = self.get_transform()
+ start.new_directory('a', root, 'a')
+ start.apply()
+ transform, root = self.get_transform()
+ transform.delete_versioned(transform.trans_id_tree_file_id('a'))
+ transform.new_directory('a', root, 'a')
+ transform.apply()
+
+ def test_unversioning(self):
+ create_tree, root = self.get_transform()
+ parent_id = create_tree.new_directory('parent', root, 'parent-id')
+ create_tree.new_file('child', parent_id, 'child', 'child-id')
+ create_tree.apply()
+ unversion = TreeTransform(self.wt)
+ self.addCleanup(unversion.finalize)
+ parent = unversion.trans_id_tree_path('parent')
+ unversion.unversion_file(parent)
+ self.assertEqual(unversion.find_conflicts(),
+ [('unversioned parent', parent_id)])
+ file_id = unversion.trans_id_tree_file_id('child-id')
+ unversion.unversion_file(file_id)
+ unversion.apply()
+
+ def test_name_invariants(self):
+ create_tree, root = self.get_transform()
+ # prepare tree
+ root = create_tree.root
+ create_tree.new_file('name1', root, 'hello1', 'name1')
+ create_tree.new_file('name2', root, 'hello2', 'name2')
+ ddir = create_tree.new_directory('dying_directory', root, 'ddir')
+ create_tree.new_file('dying_file', ddir, 'goodbye1', 'dfile')
+ create_tree.new_file('moving_file', ddir, 'later1', 'mfile')
+ create_tree.new_file('moving_file2', root, 'later2', 'mfile2')
+ create_tree.apply()
+
+ mangle_tree,root = self.get_transform()
+ root = mangle_tree.root
+ #swap names
+ name1 = mangle_tree.trans_id_tree_file_id('name1')
+ name2 = mangle_tree.trans_id_tree_file_id('name2')
+ mangle_tree.adjust_path('name2', root, name1)
+ mangle_tree.adjust_path('name1', root, name2)
+
+ #tests for deleting parent directories
+ ddir = mangle_tree.trans_id_tree_file_id('ddir')
+ mangle_tree.delete_contents(ddir)
+ dfile = mangle_tree.trans_id_tree_file_id('dfile')
+ mangle_tree.delete_versioned(dfile)
+ mangle_tree.unversion_file(dfile)
+ mfile = mangle_tree.trans_id_tree_file_id('mfile')
+ mangle_tree.adjust_path('mfile', root, mfile)
+
+ #tests for adding parent directories
+ newdir = mangle_tree.new_directory('new_directory', root, 'newdir')
+ mfile2 = mangle_tree.trans_id_tree_file_id('mfile2')
+ mangle_tree.adjust_path('mfile2', newdir, mfile2)
+ mangle_tree.new_file('newfile', newdir, 'hello3', 'dfile')
+ self.assertEqual(mangle_tree.final_file_id(mfile2), 'mfile2')
+ self.assertEqual(mangle_tree.final_parent(mfile2), newdir)
+ self.assertEqual(mangle_tree.final_file_id(mfile2), 'mfile2')
+ mangle_tree.apply()
+ self.assertEqual(file(self.wt.abspath('name1')).read(), 'hello2')
+ self.assertEqual(file(self.wt.abspath('name2')).read(), 'hello1')
+ mfile2_path = self.wt.abspath(pathjoin('new_directory','mfile2'))
+ self.assertEqual(mangle_tree.final_parent(mfile2), newdir)
+ self.assertEqual(file(mfile2_path).read(), 'later2')
+ self.assertEqual(self.wt.id2path('mfile2'), 'new_directory/mfile2')
+ self.assertEqual(self.wt.path2id('new_directory/mfile2'), 'mfile2')
+ newfile_path = self.wt.abspath(pathjoin('new_directory','newfile'))
+ self.assertEqual(file(newfile_path).read(), 'hello3')
+ self.assertEqual(self.wt.path2id('dying_directory'), 'ddir')
+ self.assertIs(self.wt.path2id('dying_directory/dying_file'), None)
+ mfile2_path = self.wt.abspath(pathjoin('new_directory','mfile2'))
+
+ def test_both_rename(self):
+ create_tree,root = self.get_transform()
+ newdir = create_tree.new_directory('selftest', root, 'selftest-id')
+ create_tree.new_file('blackbox.py', newdir, 'hello1', 'blackbox-id')
+ create_tree.apply()
+ mangle_tree,root = self.get_transform()
+ selftest = mangle_tree.trans_id_tree_file_id('selftest-id')
+ blackbox = mangle_tree.trans_id_tree_file_id('blackbox-id')
+ mangle_tree.adjust_path('test', root, selftest)
+ mangle_tree.adjust_path('test_too_much', root, selftest)
+ mangle_tree.set_executability(True, blackbox)
+ mangle_tree.apply()
+
+ def test_both_rename2(self):
+ create_tree,root = self.get_transform()
+ bzrlib = create_tree.new_directory('bzrlib', root, 'bzrlib-id')
+ tests = create_tree.new_directory('tests', bzrlib, 'tests-id')
+ blackbox = create_tree.new_directory('blackbox', tests, 'blackbox-id')
+ create_tree.new_file('test_too_much.py', blackbox, 'hello1',
+ 'test_too_much-id')
+ create_tree.apply()
+ mangle_tree,root = self.get_transform()
+ bzrlib = mangle_tree.trans_id_tree_file_id('bzrlib-id')
+ tests = mangle_tree.trans_id_tree_file_id('tests-id')
+ test_too_much = mangle_tree.trans_id_tree_file_id('test_too_much-id')
+ mangle_tree.adjust_path('selftest', bzrlib, tests)
+ mangle_tree.adjust_path('blackbox.py', tests, test_too_much)
+ mangle_tree.set_executability(True, test_too_much)
+ mangle_tree.apply()
+
+ def test_both_rename3(self):
+ create_tree,root = self.get_transform()
+ tests = create_tree.new_directory('tests', root, 'tests-id')
+ create_tree.new_file('test_too_much.py', tests, 'hello1',
+ 'test_too_much-id')
+ create_tree.apply()
+ mangle_tree,root = self.get_transform()
+ tests = mangle_tree.trans_id_tree_file_id('tests-id')
+ test_too_much = mangle_tree.trans_id_tree_file_id('test_too_much-id')
+ mangle_tree.adjust_path('selftest', root, tests)
+ mangle_tree.adjust_path('blackbox.py', tests, test_too_much)
+ mangle_tree.set_executability(True, test_too_much)
+ mangle_tree.apply()
+
+ def test_move_dangling_ie(self):
+ create_tree, root = self.get_transform()
+ # prepare tree
+ root = create_tree.root
+ create_tree.new_file('name1', root, 'hello1', 'name1')
+ create_tree.apply()
+ delete_contents, root = self.get_transform()
+ file = delete_contents.trans_id_tree_file_id('name1')
+ delete_contents.delete_contents(file)
+ delete_contents.apply()
+ move_id, root = self.get_transform()
+ name1 = move_id.trans_id_tree_file_id('name1')
+ newdir = move_id.new_directory('dir', root, 'newdir')
+ move_id.adjust_path('name2', newdir, name1)
+ move_id.apply()
+
+ def test_replace_dangling_ie(self):
+ create_tree, root = self.get_transform()
+ # prepare tree
+ root = create_tree.root
+ create_tree.new_file('name1', root, 'hello1', 'name1')
+ create_tree.apply()
+ delete_contents = TreeTransform(self.wt)
+ self.addCleanup(delete_contents.finalize)
+ file = delete_contents.trans_id_tree_file_id('name1')
+ delete_contents.delete_contents(file)
+ delete_contents.apply()
+ delete_contents.finalize()
+ replace = TreeTransform(self.wt)
+ self.addCleanup(replace.finalize)
+ name2 = replace.new_file('name2', root, 'hello2', 'name1')
+ conflicts = replace.find_conflicts()
+ name1 = replace.trans_id_tree_file_id('name1')
+ self.assertEqual(conflicts, [('duplicate id', name1, name2)])
+ resolve_conflicts(replace)
+ replace.apply()
+
+ def _test_symlinks(self, link_name1,link_target1,
+ link_name2, link_target2):
+
+ def ozpath(p): return 'oz/' + p
+
+ self.requireFeature(SymlinkFeature)
+ transform, root = self.get_transform()
+ oz_id = transform.new_directory('oz', root, 'oz-id')
+ wizard = transform.new_symlink(link_name1, oz_id, link_target1,
+ 'wizard-id')
+ wiz_id = transform.create_path(link_name2, oz_id)
+ transform.create_symlink(link_target2, wiz_id)
+ transform.version_file('wiz-id2', wiz_id)
+ transform.set_executability(True, wiz_id)
+ self.assertEqual(transform.find_conflicts(),
+ [('non-file executability', wiz_id)])
+ transform.set_executability(None, wiz_id)
+ transform.apply()
+ self.assertEqual(self.wt.path2id(ozpath(link_name1)), 'wizard-id')
+ self.assertEqual('symlink',
+ file_kind(self.wt.abspath(ozpath(link_name1))))
+ self.assertEqual(link_target2,
+ osutils.readlink(self.wt.abspath(ozpath(link_name2))))
+ self.assertEqual(link_target1,
+ osutils.readlink(self.wt.abspath(ozpath(link_name1))))
+
+ def test_symlinks(self):
+ self._test_symlinks('wizard', 'wizard-target',
+ 'wizard2', 'behind_curtain')
+
+ def test_symlinks_unicode(self):
+ self.requireFeature(features.UnicodeFilenameFeature)
+ self._test_symlinks(u'\N{Euro Sign}wizard',
+ u'wizard-targ\N{Euro Sign}t',
+ u'\N{Euro Sign}wizard2',
+ u'b\N{Euro Sign}hind_curtain')
+
+ def test_unable_create_symlink(self):
+ def tt_helper():
+ wt = self.make_branch_and_tree('.')
+ tt = TreeTransform(wt) # TreeTransform obtains write lock
+ try:
+ tt.new_symlink('foo', tt.root, 'bar')
+ tt.apply()
+ finally:
+ wt.unlock()
+ os_symlink = getattr(os, 'symlink', None)
+ os.symlink = None
+ try:
+ err = self.assertRaises(errors.UnableCreateSymlink, tt_helper)
+ self.assertEquals(
+ "Unable to create symlink 'foo' on this platform",
+ str(err))
+ finally:
+ if os_symlink:
+ os.symlink = os_symlink
+
+ def get_conflicted(self):
+ create,root = self.get_transform()
+ create.new_file('dorothy', root, 'dorothy', 'dorothy-id')
+ oz = create.new_directory('oz', root, 'oz-id')
+ create.new_directory('emeraldcity', oz, 'emerald-id')
+ create.apply()
+ conflicts,root = self.get_transform()
+ # set up duplicate entry, duplicate id
+ new_dorothy = conflicts.new_file('dorothy', root, 'dorothy',
+ 'dorothy-id')
+ old_dorothy = conflicts.trans_id_tree_file_id('dorothy-id')
+ oz = conflicts.trans_id_tree_file_id('oz-id')
+ # set up DeletedParent parent conflict
+ conflicts.delete_versioned(oz)
+ emerald = conflicts.trans_id_tree_file_id('emerald-id')
+ # set up MissingParent conflict
+ munchkincity = conflicts.trans_id_file_id('munchkincity-id')
+ conflicts.adjust_path('munchkincity', root, munchkincity)
+ conflicts.new_directory('auntem', munchkincity, 'auntem-id')
+ # set up parent loop
+ conflicts.adjust_path('emeraldcity', emerald, emerald)
+ return conflicts, emerald, oz, old_dorothy, new_dorothy
+
+ def test_conflict_resolution(self):
+ conflicts, emerald, oz, old_dorothy, new_dorothy =\
+ self.get_conflicted()
+ resolve_conflicts(conflicts)
+ self.assertEqual(conflicts.final_name(old_dorothy), 'dorothy.moved')
+ self.assertIs(conflicts.final_file_id(old_dorothy), None)
+ self.assertEqual(conflicts.final_name(new_dorothy), 'dorothy')
+ self.assertEqual(conflicts.final_file_id(new_dorothy), 'dorothy-id')
+ self.assertEqual(conflicts.final_parent(emerald), oz)
+ conflicts.apply()
+
+ def test_cook_conflicts(self):
+ tt, emerald, oz, old_dorothy, new_dorothy = self.get_conflicted()
+ raw_conflicts = resolve_conflicts(tt)
+ cooked_conflicts = cook_conflicts(raw_conflicts, tt)
+ duplicate = DuplicateEntry('Moved existing file to', 'dorothy.moved',
+ 'dorothy', None, 'dorothy-id')
+ self.assertEqual(cooked_conflicts[0], duplicate)
+ duplicate_id = DuplicateID('Unversioned existing file',
+ 'dorothy.moved', 'dorothy', None,
+ 'dorothy-id')
+ self.assertEqual(cooked_conflicts[1], duplicate_id)
+ missing_parent = MissingParent('Created directory', 'munchkincity',
+ 'munchkincity-id')
+ deleted_parent = DeletingParent('Not deleting', 'oz', 'oz-id')
+ self.assertEqual(cooked_conflicts[2], missing_parent)
+ unversioned_parent = UnversionedParent('Versioned directory',
+ 'munchkincity',
+ 'munchkincity-id')
+ unversioned_parent2 = UnversionedParent('Versioned directory', 'oz',
+ 'oz-id')
+ self.assertEqual(cooked_conflicts[3], unversioned_parent)
+ parent_loop = ParentLoop('Cancelled move', 'oz/emeraldcity',
+ 'oz/emeraldcity', 'emerald-id', 'emerald-id')
+ self.assertEqual(cooked_conflicts[4], deleted_parent)
+ self.assertEqual(cooked_conflicts[5], unversioned_parent2)
+ self.assertEqual(cooked_conflicts[6], parent_loop)
+ self.assertEqual(len(cooked_conflicts), 7)
+ tt.finalize()
+
+ def test_string_conflicts(self):
+ tt, emerald, oz, old_dorothy, new_dorothy = self.get_conflicted()
+ raw_conflicts = resolve_conflicts(tt)
+ cooked_conflicts = cook_conflicts(raw_conflicts, tt)
+ tt.finalize()
+ conflicts_s = [unicode(c) for c in cooked_conflicts]
+ self.assertEqual(len(cooked_conflicts), len(conflicts_s))
+ self.assertEqual(conflicts_s[0], 'Conflict adding file dorothy. '
+ 'Moved existing file to '
+ 'dorothy.moved.')
+ self.assertEqual(conflicts_s[1], 'Conflict adding id to dorothy. '
+ 'Unversioned existing file '
+ 'dorothy.moved.')
+ self.assertEqual(conflicts_s[2], 'Conflict adding files to'
+ ' munchkincity. Created directory.')
+ self.assertEqual(conflicts_s[3], 'Conflict because munchkincity is not'
+ ' versioned, but has versioned'
+ ' children. Versioned directory.')
+ self.assertEqualDiff(conflicts_s[4], "Conflict: can't delete oz because it"
+ " is not empty. Not deleting.")
+ self.assertEqual(conflicts_s[5], 'Conflict because oz is not'
+ ' versioned, but has versioned'
+ ' children. Versioned directory.')
+ self.assertEqual(conflicts_s[6], 'Conflict moving oz/emeraldcity into'
+ ' oz/emeraldcity. Cancelled move.')
+
+ def prepare_wrong_parent_kind(self):
+ tt, root = self.get_transform()
+ tt.new_file('parent', root, 'contents', 'parent-id')
+ tt.apply()
+ tt, root = self.get_transform()
+ parent_id = tt.trans_id_file_id('parent-id')
+ tt.new_file('child,', parent_id, 'contents2', 'file-id')
+ return tt
+
+ def test_find_conflicts_wrong_parent_kind(self):
+ tt = self.prepare_wrong_parent_kind()
+ tt.find_conflicts()
+
+ def test_resolve_conflicts_wrong_existing_parent_kind(self):
+ tt = self.prepare_wrong_parent_kind()
+ raw_conflicts = resolve_conflicts(tt)
+ self.assertEqual(set([('non-directory parent', 'Created directory',
+ 'new-3')]), raw_conflicts)
+ cooked_conflicts = cook_conflicts(raw_conflicts, tt)
+ self.assertEqual([NonDirectoryParent('Created directory', 'parent.new',
+ 'parent-id')], cooked_conflicts)
+ tt.apply()
+ self.assertEqual(None, self.wt.path2id('parent'))
+ self.assertEqual('parent-id', self.wt.path2id('parent.new'))
+
+ def test_resolve_conflicts_wrong_new_parent_kind(self):
+ tt, root = self.get_transform()
+ parent_id = tt.new_directory('parent', root, 'parent-id')
+ tt.new_file('child,', parent_id, 'contents2', 'file-id')
+ tt.apply()
+ tt, root = self.get_transform()
+ parent_id = tt.trans_id_file_id('parent-id')
+ tt.delete_contents(parent_id)
+ tt.create_file('contents', parent_id)
+ raw_conflicts = resolve_conflicts(tt)
+ self.assertEqual(set([('non-directory parent', 'Created directory',
+ 'new-3')]), raw_conflicts)
+ tt.apply()
+ self.assertEqual(None, self.wt.path2id('parent'))
+ self.assertEqual('parent-id', self.wt.path2id('parent.new'))
+
+ def test_resolve_conflicts_wrong_parent_kind_unversioned(self):
+ tt, root = self.get_transform()
+ parent_id = tt.new_directory('parent', root)
+ tt.new_file('child,', parent_id, 'contents2')
+ tt.apply()
+ tt, root = self.get_transform()
+ parent_id = tt.trans_id_tree_path('parent')
+ tt.delete_contents(parent_id)
+ tt.create_file('contents', parent_id)
+ resolve_conflicts(tt)
+ tt.apply()
+ self.assertIs(None, self.wt.path2id('parent'))
+ self.assertIs(None, self.wt.path2id('parent.new'))
+
+ def test_resolve_conflicts_missing_parent(self):
+ wt = self.make_branch_and_tree('.')
+ tt = TreeTransform(wt)
+ self.addCleanup(tt.finalize)
+ parent = tt.trans_id_file_id('parent-id')
+ tt.new_file('file', parent, 'Contents')
+ raw_conflicts = resolve_conflicts(tt)
+ # Since the directory doesn't exist it's seen as 'missing'. So
+ # 'resolve_conflicts' create a conflict asking for it to be created.
+ self.assertLength(1, raw_conflicts)
+ self.assertEqual(('missing parent', 'Created directory', 'new-1'),
+ raw_conflicts.pop())
+ # apply fail since the missing directory doesn't exist
+ self.assertRaises(errors.NoFinalPath, tt.apply)
+
+ def test_moving_versioned_directories(self):
+ create, root = self.get_transform()
+ kansas = create.new_directory('kansas', root, 'kansas-id')
+ create.new_directory('house', kansas, 'house-id')
+ create.new_directory('oz', root, 'oz-id')
+ create.apply()
+ cyclone, root = self.get_transform()
+ oz = cyclone.trans_id_tree_file_id('oz-id')
+ house = cyclone.trans_id_tree_file_id('house-id')
+ cyclone.adjust_path('house', oz, house)
+ cyclone.apply()
+
+ def test_moving_root(self):
+ create, root = self.get_transform()
+ fun = create.new_directory('fun', root, 'fun-id')
+ create.new_directory('sun', root, 'sun-id')
+ create.new_directory('moon', root, 'moon')
+ create.apply()
+ transform, root = self.get_transform()
+ transform.adjust_root_path('oldroot', fun)
+ new_root = transform.trans_id_tree_path('')
+ transform.version_file('new-root', new_root)
+ transform.apply()
+
+ def test_renames(self):
+ create, root = self.get_transform()
+ old = create.new_directory('old-parent', root, 'old-id')
+ intermediate = create.new_directory('intermediate', old, 'im-id')
+ myfile = create.new_file('myfile', intermediate, 'myfile-text',
+ 'myfile-id')
+ create.apply()
+ rename, root = self.get_transform()
+ old = rename.trans_id_file_id('old-id')
+ rename.adjust_path('new', root, old)
+ myfile = rename.trans_id_file_id('myfile-id')
+ rename.set_executability(True, myfile)
+ rename.apply()
+
+ def test_rename_fails(self):
+ self.requireFeature(features.not_running_as_root)
+ # see https://bugs.launchpad.net/bzr/+bug/491763
+ create, root_id = self.get_transform()
+ first_dir = create.new_directory('first-dir', root_id, 'first-id')
+ myfile = create.new_file('myfile', root_id, 'myfile-text',
+ 'myfile-id')
+ create.apply()
+ if os.name == "posix" and sys.platform != "cygwin":
+ # posix filesystems fail on renaming if the readonly bit is set
+ osutils.make_readonly(self.wt.abspath('first-dir'))
+ elif os.name == "nt":
+ # windows filesystems fail on renaming open files
+ self.addCleanup(file(self.wt.abspath('myfile')).close)
+ else:
+ self.skip("Don't know how to force a permissions error on rename")
+ # now transform to rename
+ rename_transform, root_id = self.get_transform()
+ file_trans_id = rename_transform.trans_id_file_id('myfile-id')
+ dir_id = rename_transform.trans_id_file_id('first-id')
+ rename_transform.adjust_path('newname', dir_id, file_trans_id)
+ e = self.assertRaises(errors.TransformRenameFailed,
+ rename_transform.apply)
+ # On nix looks like:
+ # "Failed to rename .../work/.bzr/checkout/limbo/new-1
+ # to .../first-dir/newname: [Errno 13] Permission denied"
+ # On windows looks like:
+ # "Failed to rename .../work/myfile to
+ # .../work/.bzr/checkout/limbo/new-1: [Errno 13] Permission denied"
+ # This test isn't concerned with exactly what the error looks like,
+ # and the strerror will vary across OS and locales, but the assert
+ # that the exeception attributes are what we expect
+ self.assertEqual(e.errno, errno.EACCES)
+ if os.name == "posix":
+ self.assertEndsWith(e.to_path, "/first-dir/newname")
+ else:
+ self.assertEqual(os.path.basename(e.from_path), "myfile")
+
+ def test_set_executability_order(self):
+ """Ensure that executability behaves the same, no matter what order.
+
+ - create file and set executability simultaneously
+ - create file and set executability afterward
+ - unsetting the executability of a file whose executability has not been
+ declared should throw an exception (this may happen when a
+ merge attempts to create a file with a duplicate ID)
+ """
+ transform, root = self.get_transform()
+ wt = transform._tree
+ wt.lock_read()
+ self.addCleanup(wt.unlock)
+ transform.new_file('set_on_creation', root, 'Set on creation', 'soc',
+ True)
+ sac = transform.new_file('set_after_creation', root,
+ 'Set after creation', 'sac')
+ transform.set_executability(True, sac)
+ uws = transform.new_file('unset_without_set', root, 'Unset badly',
+ 'uws')
+ self.assertRaises(KeyError, transform.set_executability, None, uws)
+ transform.apply()
+ self.assertTrue(wt.is_executable('soc'))
+ self.assertTrue(wt.is_executable('sac'))
+
+ def test_preserve_mode(self):
+ """File mode is preserved when replacing content"""
+ if sys.platform == 'win32':
+ raise TestSkipped('chmod has no effect on win32')
+ transform, root = self.get_transform()
+ transform.new_file('file1', root, 'contents', 'file1-id', True)
+ transform.apply()
+ self.wt.lock_write()
+ self.addCleanup(self.wt.unlock)
+ self.assertTrue(self.wt.is_executable('file1-id'))
+ transform, root = self.get_transform()
+ file1_id = transform.trans_id_tree_file_id('file1-id')
+ transform.delete_contents(file1_id)
+ transform.create_file('contents2', file1_id)
+ transform.apply()
+ self.assertTrue(self.wt.is_executable('file1-id'))
+
+ def test__set_mode_stats_correctly(self):
+ """_set_mode stats to determine file mode."""
+ if sys.platform == 'win32':
+ raise TestSkipped('chmod has no effect on win32')
+
+ stat_paths = []
+ real_stat = os.stat
+ def instrumented_stat(path):
+ stat_paths.append(path)
+ return real_stat(path)
+
+ transform, root = self.get_transform()
+
+ bar1_id = transform.new_file('bar', root, 'bar contents 1\n',
+ file_id='bar-id-1', executable=False)
+ transform.apply()
+
+ transform, root = self.get_transform()
+ bar1_id = transform.trans_id_tree_path('bar')
+ bar2_id = transform.trans_id_tree_path('bar2')
+ try:
+ os.stat = instrumented_stat
+ transform.create_file('bar2 contents\n', bar2_id, mode_id=bar1_id)
+ finally:
+ os.stat = real_stat
+ transform.finalize()
+
+ bar1_abspath = self.wt.abspath('bar')
+ self.assertEqual([bar1_abspath], stat_paths)
+
+ def test_iter_changes(self):
+ self.wt.set_root_id('eert_toor')
+ transform, root = self.get_transform()
+ transform.new_file('old', root, 'blah', 'id-1', True)
+ transform.apply()
+ transform, root = self.get_transform()
+ try:
+ self.assertEqual([], list(transform.iter_changes()))
+ old = transform.trans_id_tree_file_id('id-1')
+ transform.unversion_file(old)
+ self.assertEqual([('id-1', ('old', None), False, (True, False),
+ ('eert_toor', 'eert_toor'), ('old', 'old'), ('file', 'file'),
+ (True, True))], list(transform.iter_changes()))
+ transform.new_directory('new', root, 'id-1')
+ self.assertEqual([('id-1', ('old', 'new'), True, (True, True),
+ ('eert_toor', 'eert_toor'), ('old', 'new'),
+ ('file', 'directory'),
+ (True, False))], list(transform.iter_changes()))
+ finally:
+ transform.finalize()
+
+ def test_iter_changes_new(self):
+ self.wt.set_root_id('eert_toor')
+ transform, root = self.get_transform()
+ transform.new_file('old', root, 'blah')
+ transform.apply()
+ transform, root = self.get_transform()
+ try:
+ old = transform.trans_id_tree_path('old')
+ transform.version_file('id-1', old)
+ self.assertEqual([('id-1', (None, 'old'), False, (False, True),
+ ('eert_toor', 'eert_toor'), ('old', 'old'), ('file', 'file'),
+ (False, False))], list(transform.iter_changes()))
+ finally:
+ transform.finalize()
+
+ def test_iter_changes_modifications(self):
+ self.wt.set_root_id('eert_toor')
+ transform, root = self.get_transform()
+ transform.new_file('old', root, 'blah', 'id-1')
+ transform.new_file('new', root, 'blah')
+ transform.new_directory('subdir', root, 'subdir-id')
+ transform.apply()
+ transform, root = self.get_transform()
+ try:
+ old = transform.trans_id_tree_path('old')
+ subdir = transform.trans_id_tree_file_id('subdir-id')
+ new = transform.trans_id_tree_path('new')
+ self.assertEqual([], list(transform.iter_changes()))
+
+ #content deletion
+ transform.delete_contents(old)
+ self.assertEqual([('id-1', ('old', 'old'), True, (True, True),
+ ('eert_toor', 'eert_toor'), ('old', 'old'), ('file', None),
+ (False, False))], list(transform.iter_changes()))
+
+ #content change
+ transform.create_file('blah', old)
+ self.assertEqual([('id-1', ('old', 'old'), True, (True, True),
+ ('eert_toor', 'eert_toor'), ('old', 'old'), ('file', 'file'),
+ (False, False))], list(transform.iter_changes()))
+ transform.cancel_deletion(old)
+ self.assertEqual([('id-1', ('old', 'old'), True, (True, True),
+ ('eert_toor', 'eert_toor'), ('old', 'old'), ('file', 'file'),
+ (False, False))], list(transform.iter_changes()))
+ transform.cancel_creation(old)
+
+ # move file_id to a different file
+ self.assertEqual([], list(transform.iter_changes()))
+ transform.unversion_file(old)
+ transform.version_file('id-1', new)
+ transform.adjust_path('old', root, new)
+ self.assertEqual([('id-1', ('old', 'old'), True, (True, True),
+ ('eert_toor', 'eert_toor'), ('old', 'old'), ('file', 'file'),
+ (False, False))], list(transform.iter_changes()))
+ transform.cancel_versioning(new)
+ transform._removed_id = set()
+
+ #execute bit
+ self.assertEqual([], list(transform.iter_changes()))
+ transform.set_executability(True, old)
+ self.assertEqual([('id-1', ('old', 'old'), False, (True, True),
+ ('eert_toor', 'eert_toor'), ('old', 'old'), ('file', 'file'),
+ (False, True))], list(transform.iter_changes()))
+ transform.set_executability(None, old)
+
+ # filename
+ self.assertEqual([], list(transform.iter_changes()))
+ transform.adjust_path('new', root, old)
+ transform._new_parent = {}
+ self.assertEqual([('id-1', ('old', 'new'), False, (True, True),
+ ('eert_toor', 'eert_toor'), ('old', 'new'), ('file', 'file'),
+ (False, False))], list(transform.iter_changes()))
+ transform._new_name = {}
+
+ # parent directory
+ self.assertEqual([], list(transform.iter_changes()))
+ transform.adjust_path('new', subdir, old)
+ transform._new_name = {}
+ self.assertEqual([('id-1', ('old', 'subdir/old'), False,
+ (True, True), ('eert_toor', 'subdir-id'), ('old', 'old'),
+ ('file', 'file'), (False, False))],
+ list(transform.iter_changes()))
+ transform._new_path = {}
+
+ finally:
+ transform.finalize()
+
+ def test_iter_changes_modified_bleed(self):
+ self.wt.set_root_id('eert_toor')
+ """Modified flag should not bleed from one change to another"""
+ # unfortunately, we have no guarantee that file1 (which is modified)
+ # will be applied before file2. And if it's applied after file2, it
+ # obviously can't bleed into file2's change output. But for now, it
+ # works.
+ transform, root = self.get_transform()
+ transform.new_file('file1', root, 'blah', 'id-1')
+ transform.new_file('file2', root, 'blah', 'id-2')
+ transform.apply()
+ transform, root = self.get_transform()
+ try:
+ transform.delete_contents(transform.trans_id_file_id('id-1'))
+ transform.set_executability(True,
+ transform.trans_id_file_id('id-2'))
+ self.assertEqual([('id-1', (u'file1', u'file1'), True, (True, True),
+ ('eert_toor', 'eert_toor'), ('file1', u'file1'),
+ ('file', None), (False, False)),
+ ('id-2', (u'file2', u'file2'), False, (True, True),
+ ('eert_toor', 'eert_toor'), ('file2', u'file2'),
+ ('file', 'file'), (False, True))],
+ list(transform.iter_changes()))
+ finally:
+ transform.finalize()
+
+ def test_iter_changes_move_missing(self):
+ """Test moving ids with no files around"""
+ self.wt.set_root_id('toor_eert')
+ # Need two steps because versioning a non-existant file is a conflict.
+ transform, root = self.get_transform()
+ transform.new_directory('floater', root, 'floater-id')
+ transform.apply()
+ transform, root = self.get_transform()
+ transform.delete_contents(transform.trans_id_tree_path('floater'))
+ transform.apply()
+ transform, root = self.get_transform()
+ floater = transform.trans_id_tree_path('floater')
+ try:
+ transform.adjust_path('flitter', root, floater)
+ self.assertEqual([('floater-id', ('floater', 'flitter'), False,
+ (True, True), ('toor_eert', 'toor_eert'), ('floater', 'flitter'),
+ (None, None), (False, False))], list(transform.iter_changes()))
+ finally:
+ transform.finalize()
+
+ def test_iter_changes_pointless(self):
+ """Ensure that no-ops are not treated as modifications"""
+ self.wt.set_root_id('eert_toor')
+ transform, root = self.get_transform()
+ transform.new_file('old', root, 'blah', 'id-1')
+ transform.new_directory('subdir', root, 'subdir-id')
+ transform.apply()
+ transform, root = self.get_transform()
+ try:
+ old = transform.trans_id_tree_path('old')
+ subdir = transform.trans_id_tree_file_id('subdir-id')
+ self.assertEqual([], list(transform.iter_changes()))
+ transform.delete_contents(subdir)
+ transform.create_directory(subdir)
+ transform.set_executability(False, old)
+ transform.unversion_file(old)
+ transform.version_file('id-1', old)
+ transform.adjust_path('old', root, old)
+ self.assertEqual([], list(transform.iter_changes()))
+ finally:
+ transform.finalize()
+
+ def test_rename_count(self):
+ transform, root = self.get_transform()
+ transform.new_file('name1', root, 'contents')
+ self.assertEqual(transform.rename_count, 0)
+ transform.apply()
+ self.assertEqual(transform.rename_count, 1)
+ transform2, root = self.get_transform()
+ transform2.adjust_path('name2', root,
+ transform2.trans_id_tree_path('name1'))
+ self.assertEqual(transform2.rename_count, 0)
+ transform2.apply()
+ self.assertEqual(transform2.rename_count, 2)
+
+ def test_change_parent(self):
+ """Ensure that after we change a parent, the results are still right.
+
+ Renames and parent changes on pending transforms can happen as part
+ of conflict resolution, and are explicitly permitted by the
+ TreeTransform API.
+
+ This test ensures they work correctly with the rename-avoidance
+ optimization.
+ """
+ transform, root = self.get_transform()
+ parent1 = transform.new_directory('parent1', root)
+ child1 = transform.new_file('child1', parent1, 'contents')
+ parent2 = transform.new_directory('parent2', root)
+ transform.adjust_path('child1', parent2, child1)
+ transform.apply()
+ self.assertPathDoesNotExist(self.wt.abspath('parent1/child1'))
+ self.assertPathExists(self.wt.abspath('parent2/child1'))
+ # rename limbo/new-1 => parent1, rename limbo/new-3 => parent2
+ # no rename for child1 (counting only renames during apply)
+ self.assertEqual(2, transform.rename_count)
+
+ def test_cancel_parent(self):
+ """Cancelling a parent doesn't cause deletion of a non-empty directory
+
+ This is like the test_change_parent, except that we cancel the parent
+ before adjusting the path. The transform must detect that the
+ directory is non-empty, and move children to safe locations.
+ """
+ transform, root = self.get_transform()
+ parent1 = transform.new_directory('parent1', root)
+ child1 = transform.new_file('child1', parent1, 'contents')
+ child2 = transform.new_file('child2', parent1, 'contents')
+ try:
+ transform.cancel_creation(parent1)
+ except OSError:
+ self.fail('Failed to move child1 before deleting parent1')
+ transform.cancel_creation(child2)
+ transform.create_directory(parent1)
+ try:
+ transform.cancel_creation(parent1)
+ # If the transform incorrectly believes that child2 is still in
+ # parent1's limbo directory, it will try to rename it and fail
+ # because was already moved by the first cancel_creation.
+ except OSError:
+ self.fail('Transform still thinks child2 is a child of parent1')
+ parent2 = transform.new_directory('parent2', root)
+ transform.adjust_path('child1', parent2, child1)
+ transform.apply()
+ self.assertPathDoesNotExist(self.wt.abspath('parent1'))
+ self.assertPathExists(self.wt.abspath('parent2/child1'))
+ # rename limbo/new-3 => parent2, rename limbo/new-2 => child1
+ self.assertEqual(2, transform.rename_count)
+
+ def test_adjust_and_cancel(self):
+ """Make sure adjust_path keeps track of limbo children properly"""
+ transform, root = self.get_transform()
+ parent1 = transform.new_directory('parent1', root)
+ child1 = transform.new_file('child1', parent1, 'contents')
+ parent2 = transform.new_directory('parent2', root)
+ transform.adjust_path('child1', parent2, child1)
+ transform.cancel_creation(child1)
+ try:
+ transform.cancel_creation(parent1)
+ # if the transform thinks child1 is still in parent1's limbo
+ # directory, it will attempt to move it and fail.
+ except OSError:
+ self.fail('Transform still thinks child1 is a child of parent1')
+ transform.finalize()
+
+ def test_noname_contents(self):
+ """TreeTransform should permit deferring naming files."""
+ transform, root = self.get_transform()
+ parent = transform.trans_id_file_id('parent-id')
+ try:
+ transform.create_directory(parent)
+ except KeyError:
+ self.fail("Can't handle contents with no name")
+ transform.finalize()
+
+ def test_noname_contents_nested(self):
+ """TreeTransform should permit deferring naming files."""
+ transform, root = self.get_transform()
+ parent = transform.trans_id_file_id('parent-id')
+ try:
+ transform.create_directory(parent)
+ except KeyError:
+ self.fail("Can't handle contents with no name")
+ child = transform.new_directory('child', parent)
+ transform.adjust_path('parent', root, parent)
+ transform.apply()
+ self.assertPathExists(self.wt.abspath('parent/child'))
+ self.assertEqual(1, transform.rename_count)
+
+ def test_reuse_name(self):
+ """Avoid reusing the same limbo name for different files"""
+ transform, root = self.get_transform()
+ parent = transform.new_directory('parent', root)
+ child1 = transform.new_directory('child', parent)
+ try:
+ child2 = transform.new_directory('child', parent)
+ except OSError:
+ self.fail('Tranform tried to use the same limbo name twice')
+ transform.adjust_path('child2', parent, child2)
+ transform.apply()
+ # limbo/new-1 => parent, limbo/new-3 => parent/child2
+ # child2 is put into top-level limbo because child1 has already
+ # claimed the direct limbo path when child2 is created. There is no
+ # advantage in renaming files once they're in top-level limbo, except
+ # as part of apply.
+ self.assertEqual(2, transform.rename_count)
+
+ def test_reuse_when_first_moved(self):
+ """Don't avoid direct paths when it is safe to use them"""
+ transform, root = self.get_transform()
+ parent = transform.new_directory('parent', root)
+ child1 = transform.new_directory('child', parent)
+ transform.adjust_path('child1', parent, child1)
+ child2 = transform.new_directory('child', parent)
+ transform.apply()
+ # limbo/new-1 => parent
+ self.assertEqual(1, transform.rename_count)
+
+ def test_reuse_after_cancel(self):
+ """Don't avoid direct paths when it is safe to use them"""
+ transform, root = self.get_transform()
+ parent2 = transform.new_directory('parent2', root)
+ child1 = transform.new_directory('child1', parent2)
+ transform.cancel_creation(parent2)
+ transform.create_directory(parent2)
+ child2 = transform.new_directory('child1', parent2)
+ transform.adjust_path('child2', parent2, child1)
+ transform.apply()
+ # limbo/new-1 => parent2, limbo/new-2 => parent2/child1
+ self.assertEqual(2, transform.rename_count)
+
+ def test_finalize_order(self):
+ """Finalize must be done in child-to-parent order"""
+ transform, root = self.get_transform()
+ parent = transform.new_directory('parent', root)
+ child = transform.new_directory('child', parent)
+ try:
+ transform.finalize()
+ except OSError:
+ self.fail('Tried to remove parent before child1')
+
+ def test_cancel_with_cancelled_child_should_succeed(self):
+ transform, root = self.get_transform()
+ parent = transform.new_directory('parent', root)
+ child = transform.new_directory('child', parent)
+ transform.cancel_creation(child)
+ transform.cancel_creation(parent)
+ transform.finalize()
+
+ def test_rollback_on_directory_clash(self):
+ def tt_helper():
+ wt = self.make_branch_and_tree('.')
+ tt = TreeTransform(wt) # TreeTransform obtains write lock
+ try:
+ foo = tt.new_directory('foo', tt.root)
+ tt.new_file('bar', foo, 'foobar')
+ baz = tt.new_directory('baz', tt.root)
+ tt.new_file('qux', baz, 'quux')
+ # Ask for a rename 'foo' -> 'baz'
+ tt.adjust_path('baz', tt.root, foo)
+ # Lie to tt that we've already resolved all conflicts.
+ tt.apply(no_conflicts=True)
+ except:
+ wt.unlock()
+ raise
+ # The rename will fail because the target directory is not empty (but
+ # raises FileExists anyway).
+ err = self.assertRaises(errors.FileExists, tt_helper)
+ self.assertEndsWith(err.path, "/baz")
+
+ def test_two_directories_clash(self):
+ def tt_helper():
+ wt = self.make_branch_and_tree('.')
+ tt = TreeTransform(wt) # TreeTransform obtains write lock
+ try:
+ foo_1 = tt.new_directory('foo', tt.root)
+ tt.new_directory('bar', foo_1)
+ # Adding the same directory with a different content
+ foo_2 = tt.new_directory('foo', tt.root)
+ tt.new_directory('baz', foo_2)
+ # Lie to tt that we've already resolved all conflicts.
+ tt.apply(no_conflicts=True)
+ except:
+ wt.unlock()
+ raise
+ err = self.assertRaises(errors.FileExists, tt_helper)
+ self.assertEndsWith(err.path, "/foo")
+
+ def test_two_directories_clash_finalize(self):
+ def tt_helper():
+ wt = self.make_branch_and_tree('.')
+ tt = TreeTransform(wt) # TreeTransform obtains write lock
+ try:
+ foo_1 = tt.new_directory('foo', tt.root)
+ tt.new_directory('bar', foo_1)
+ # Adding the same directory with a different content
+ foo_2 = tt.new_directory('foo', tt.root)
+ tt.new_directory('baz', foo_2)
+ # Lie to tt that we've already resolved all conflicts.
+ tt.apply(no_conflicts=True)
+ except:
+ tt.finalize()
+ raise
+ err = self.assertRaises(errors.FileExists, tt_helper)
+ self.assertEndsWith(err.path, "/foo")
+
+ def test_file_to_directory(self):
+ wt = self.make_branch_and_tree('.')
+ self.build_tree(['foo'])
+ wt.add(['foo'])
+ wt.commit("one")
+ tt = TreeTransform(wt)
+ self.addCleanup(tt.finalize)
+ foo_trans_id = tt.trans_id_tree_path("foo")
+ tt.delete_contents(foo_trans_id)
+ tt.create_directory(foo_trans_id)
+ bar_trans_id = tt.trans_id_tree_path("foo/bar")
+ tt.create_file(["aa\n"], bar_trans_id)
+ tt.version_file("bar-1", bar_trans_id)
+ tt.apply()
+ self.assertPathExists("foo/bar")
+ wt.lock_read()
+ try:
+ self.assertEqual(wt.kind(wt.path2id("foo")), "directory")
+ finally:
+ wt.unlock()
+ wt.commit("two")
+ changes = wt.changes_from(wt.basis_tree())
+ self.assertFalse(changes.has_changed(), changes)
+
+ def test_file_to_symlink(self):
+ self.requireFeature(SymlinkFeature)
+ wt = self.make_branch_and_tree('.')
+ self.build_tree(['foo'])
+ wt.add(['foo'])
+ wt.commit("one")
+ tt = TreeTransform(wt)
+ self.addCleanup(tt.finalize)
+ foo_trans_id = tt.trans_id_tree_path("foo")
+ tt.delete_contents(foo_trans_id)
+ tt.create_symlink("bar", foo_trans_id)
+ tt.apply()
+ self.assertPathExists("foo")
+ wt.lock_read()
+ self.addCleanup(wt.unlock)
+ self.assertEqual(wt.kind(wt.path2id("foo")), "symlink")
+
+ def test_dir_to_file(self):
+ wt = self.make_branch_and_tree('.')
+ self.build_tree(['foo/', 'foo/bar'])
+ wt.add(['foo', 'foo/bar'])
+ wt.commit("one")
+ tt = TreeTransform(wt)
+ self.addCleanup(tt.finalize)
+ foo_trans_id = tt.trans_id_tree_path("foo")
+ bar_trans_id = tt.trans_id_tree_path("foo/bar")
+ tt.delete_contents(foo_trans_id)
+ tt.delete_versioned(bar_trans_id)
+ tt.create_file(["aa\n"], foo_trans_id)
+ tt.apply()
+ self.assertPathExists("foo")
+ wt.lock_read()
+ self.addCleanup(wt.unlock)
+ self.assertEqual(wt.kind(wt.path2id("foo")), "file")
+
+ def test_dir_to_hardlink(self):
+ self.requireFeature(HardlinkFeature)
+ wt = self.make_branch_and_tree('.')
+ self.build_tree(['foo/', 'foo/bar'])
+ wt.add(['foo', 'foo/bar'])
+ wt.commit("one")
+ tt = TreeTransform(wt)
+ self.addCleanup(tt.finalize)
+ foo_trans_id = tt.trans_id_tree_path("foo")
+ bar_trans_id = tt.trans_id_tree_path("foo/bar")
+ tt.delete_contents(foo_trans_id)
+ tt.delete_versioned(bar_trans_id)
+ self.build_tree(['baz'])
+ tt.create_hardlink("baz", foo_trans_id)
+ tt.apply()
+ self.assertPathExists("foo")
+ self.assertPathExists("baz")
+ wt.lock_read()
+ self.addCleanup(wt.unlock)
+ self.assertEqual(wt.kind(wt.path2id("foo")), "file")
+
+ def test_no_final_path(self):
+ transform, root = self.get_transform()
+ trans_id = transform.trans_id_file_id('foo')
+ transform.create_file('bar', trans_id)
+ transform.cancel_creation(trans_id)
+ transform.apply()
+
+ def test_create_from_tree(self):
+ tree1 = self.make_branch_and_tree('tree1')
+ self.build_tree_contents([('tree1/foo/',), ('tree1/bar', 'baz')])
+ tree1.add(['foo', 'bar'], ['foo-id', 'bar-id'])
+ tree2 = self.make_branch_and_tree('tree2')
+ tt = TreeTransform(tree2)
+ foo_trans_id = tt.create_path('foo', tt.root)
+ create_from_tree(tt, foo_trans_id, tree1, 'foo-id')
+ bar_trans_id = tt.create_path('bar', tt.root)
+ create_from_tree(tt, bar_trans_id, tree1, 'bar-id')
+ tt.apply()
+ self.assertEqual('directory', osutils.file_kind('tree2/foo'))
+ self.assertFileEqual('baz', 'tree2/bar')
+
+ def test_create_from_tree_bytes(self):
+ """Provided lines are used instead of tree content."""
+ tree1 = self.make_branch_and_tree('tree1')
+ self.build_tree_contents([('tree1/foo', 'bar'),])
+ tree1.add('foo', 'foo-id')
+ tree2 = self.make_branch_and_tree('tree2')
+ tt = TreeTransform(tree2)
+ foo_trans_id = tt.create_path('foo', tt.root)
+ create_from_tree(tt, foo_trans_id, tree1, 'foo-id', bytes='qux')
+ tt.apply()
+ self.assertFileEqual('qux', 'tree2/foo')
+
+ def test_create_from_tree_symlink(self):
+ self.requireFeature(SymlinkFeature)
+ tree1 = self.make_branch_and_tree('tree1')
+ os.symlink('bar', 'tree1/foo')
+ tree1.add('foo', 'foo-id')
+ tt = TreeTransform(self.make_branch_and_tree('tree2'))
+ foo_trans_id = tt.create_path('foo', tt.root)
+ create_from_tree(tt, foo_trans_id, tree1, 'foo-id')
+ tt.apply()
+ self.assertEqual('bar', os.readlink('tree2/foo'))
+
+
+class TransformGroup(object):
+
+ def __init__(self, dirname, root_id):
+ self.name = dirname
+ os.mkdir(dirname)
+ self.wt = ControlDir.create_standalone_workingtree(dirname)
+ self.wt.set_root_id(root_id)
+ self.b = self.wt.branch
+ self.tt = TreeTransform(self.wt)
+ self.root = self.tt.trans_id_tree_file_id(self.wt.get_root_id())
+
+
+def conflict_text(tree, merge):
+ template = '%s TREE\n%s%s\n%s%s MERGE-SOURCE\n'
+ return template % ('<' * 7, tree, '=' * 7, merge, '>' * 7)
+
+
+class TestInventoryAltered(tests.TestCaseWithTransport):
+
+ def test_inventory_altered_unchanged(self):
+ tree = self.make_branch_and_tree('tree')
+ self.build_tree(['tree/foo'])
+ tree.add('foo', 'foo-id')
+ with TransformPreview(tree) as tt:
+ self.assertEqual([], tt._inventory_altered())
+
+ def test_inventory_altered_changed_parent_id(self):
+ tree = self.make_branch_and_tree('tree')
+ self.build_tree(['tree/foo'])
+ tree.add('foo', 'foo-id')
+ with TransformPreview(tree) as tt:
+ tt.unversion_file(tt.root)
+ tt.version_file('new-id', tt.root)
+ foo_trans_id = tt.trans_id_tree_file_id('foo-id')
+ foo_tuple = ('foo', foo_trans_id)
+ root_tuple = ('', tt.root)
+ self.assertEqual([root_tuple, foo_tuple], tt._inventory_altered())
+
+ def test_inventory_altered_noop_changed_parent_id(self):
+ tree = self.make_branch_and_tree('tree')
+ self.build_tree(['tree/foo'])
+ tree.add('foo', 'foo-id')
+ with TransformPreview(tree) as tt:
+ tt.unversion_file(tt.root)
+ tt.version_file(tree.get_root_id(), tt.root)
+ foo_trans_id = tt.trans_id_tree_file_id('foo-id')
+ self.assertEqual([], tt._inventory_altered())
+
+
+class TestTransformMerge(TestCaseInTempDir):
+
+ def test_text_merge(self):
+ root_id = generate_ids.gen_root_id()
+ base = TransformGroup("base", root_id)
+ base.tt.new_file('a', base.root, 'a\nb\nc\nd\be\n', 'a')
+ base.tt.new_file('b', base.root, 'b1', 'b')
+ base.tt.new_file('c', base.root, 'c', 'c')
+ base.tt.new_file('d', base.root, 'd', 'd')
+ base.tt.new_file('e', base.root, 'e', 'e')
+ base.tt.new_file('f', base.root, 'f', 'f')
+ base.tt.new_directory('g', base.root, 'g')
+ base.tt.new_directory('h', base.root, 'h')
+ base.tt.apply()
+ other = TransformGroup("other", root_id)
+ other.tt.new_file('a', other.root, 'y\nb\nc\nd\be\n', 'a')
+ other.tt.new_file('b', other.root, 'b2', 'b')
+ other.tt.new_file('c', other.root, 'c2', 'c')
+ other.tt.new_file('d', other.root, 'd', 'd')
+ other.tt.new_file('e', other.root, 'e2', 'e')
+ other.tt.new_file('f', other.root, 'f', 'f')
+ other.tt.new_file('g', other.root, 'g', 'g')
+ other.tt.new_file('h', other.root, 'h\ni\nj\nk\n', 'h')
+ other.tt.new_file('i', other.root, 'h\ni\nj\nk\n', 'i')
+ other.tt.apply()
+ this = TransformGroup("this", root_id)
+ this.tt.new_file('a', this.root, 'a\nb\nc\nd\bz\n', 'a')
+ this.tt.new_file('b', this.root, 'b', 'b')
+ this.tt.new_file('c', this.root, 'c', 'c')
+ this.tt.new_file('d', this.root, 'd2', 'd')
+ this.tt.new_file('e', this.root, 'e2', 'e')
+ this.tt.new_file('f', this.root, 'f', 'f')
+ this.tt.new_file('g', this.root, 'g', 'g')
+ this.tt.new_file('h', this.root, '1\n2\n3\n4\n', 'h')
+ this.tt.new_file('i', this.root, '1\n2\n3\n4\n', 'i')
+ this.tt.apply()
+ Merge3Merger(this.wt, this.wt, base.wt, other.wt)
+
+ # textual merge
+ self.assertEqual(this.wt.get_file('a').read(), 'y\nb\nc\nd\bz\n')
+ # three-way text conflict
+ self.assertEqual(this.wt.get_file('b').read(),
+ conflict_text('b', 'b2'))
+ # OTHER wins
+ self.assertEqual(this.wt.get_file('c').read(), 'c2')
+ # THIS wins
+ self.assertEqual(this.wt.get_file('d').read(), 'd2')
+ # Ambigious clean merge
+ self.assertEqual(this.wt.get_file('e').read(), 'e2')
+ # No change
+ self.assertEqual(this.wt.get_file('f').read(), 'f')
+ # Correct correct results when THIS == OTHER
+ self.assertEqual(this.wt.get_file('g').read(), 'g')
+ # Text conflict when THIS & OTHER are text and BASE is dir
+ self.assertEqual(this.wt.get_file('h').read(),
+ conflict_text('1\n2\n3\n4\n', 'h\ni\nj\nk\n'))
+ self.assertEqual(this.wt.get_file_byname('h.THIS').read(),
+ '1\n2\n3\n4\n')
+ self.assertEqual(this.wt.get_file_byname('h.OTHER').read(),
+ 'h\ni\nj\nk\n')
+ self.assertEqual(file_kind(this.wt.abspath('h.BASE')), 'directory')
+ self.assertEqual(this.wt.get_file('i').read(),
+ conflict_text('1\n2\n3\n4\n', 'h\ni\nj\nk\n'))
+ self.assertEqual(this.wt.get_file_byname('i.THIS').read(),
+ '1\n2\n3\n4\n')
+ self.assertEqual(this.wt.get_file_byname('i.OTHER').read(),
+ 'h\ni\nj\nk\n')
+ self.assertEqual(os.path.exists(this.wt.abspath('i.BASE')), False)
+ modified = ['a', 'b', 'c', 'h', 'i']
+ merge_modified = this.wt.merge_modified()
+ self.assertSubset(merge_modified, modified)
+ self.assertEqual(len(merge_modified), len(modified))
+ with file(this.wt.id2abspath('a'), 'wb') as f: f.write('booga')
+ modified.pop(0)
+ merge_modified = this.wt.merge_modified()
+ self.assertSubset(merge_modified, modified)
+ self.assertEqual(len(merge_modified), len(modified))
+ this.wt.remove('b')
+ this.wt.revert()
+
+ def test_file_merge(self):
+ self.requireFeature(SymlinkFeature)
+ root_id = generate_ids.gen_root_id()
+ base = TransformGroup("BASE", root_id)
+ this = TransformGroup("THIS", root_id)
+ other = TransformGroup("OTHER", root_id)
+ for tg in this, base, other:
+ tg.tt.new_directory('a', tg.root, 'a')
+ tg.tt.new_symlink('b', tg.root, 'b', 'b')
+ tg.tt.new_file('c', tg.root, 'c', 'c')
+ tg.tt.new_symlink('d', tg.root, tg.name, 'd')
+ targets = ((base, 'base-e', 'base-f', None, None),
+ (this, 'other-e', 'this-f', 'other-g', 'this-h'),
+ (other, 'other-e', None, 'other-g', 'other-h'))
+ for tg, e_target, f_target, g_target, h_target in targets:
+ for link, target in (('e', e_target), ('f', f_target),
+ ('g', g_target), ('h', h_target)):
+ if target is not None:
+ tg.tt.new_symlink(link, tg.root, target, link)
+
+ for tg in this, base, other:
+ tg.tt.apply()
+ Merge3Merger(this.wt, this.wt, base.wt, other.wt)
+ self.assertIs(os.path.isdir(this.wt.abspath('a')), True)
+ self.assertIs(os.path.islink(this.wt.abspath('b')), True)
+ self.assertIs(os.path.isfile(this.wt.abspath('c')), True)
+ for suffix in ('THIS', 'BASE', 'OTHER'):
+ self.assertEqual(os.readlink(this.wt.abspath('d.'+suffix)), suffix)
+ self.assertIs(os.path.lexists(this.wt.abspath('d')), False)
+ self.assertEqual(this.wt.id2path('d'), 'd.OTHER')
+ self.assertEqual(this.wt.id2path('f'), 'f.THIS')
+ self.assertEqual(os.readlink(this.wt.abspath('e')), 'other-e')
+ self.assertIs(os.path.lexists(this.wt.abspath('e.THIS')), False)
+ self.assertIs(os.path.lexists(this.wt.abspath('e.OTHER')), False)
+ self.assertIs(os.path.lexists(this.wt.abspath('e.BASE')), False)
+ self.assertIs(os.path.lexists(this.wt.abspath('g')), True)
+ self.assertIs(os.path.lexists(this.wt.abspath('g.BASE')), False)
+ self.assertIs(os.path.lexists(this.wt.abspath('h')), False)
+ self.assertIs(os.path.lexists(this.wt.abspath('h.BASE')), False)
+ self.assertIs(os.path.lexists(this.wt.abspath('h.THIS')), True)
+ self.assertIs(os.path.lexists(this.wt.abspath('h.OTHER')), True)
+
+ def test_filename_merge(self):
+ root_id = generate_ids.gen_root_id()
+ base = TransformGroup("BASE", root_id)
+ this = TransformGroup("THIS", root_id)
+ other = TransformGroup("OTHER", root_id)
+ base_a, this_a, other_a = [t.tt.new_directory('a', t.root, 'a')
+ for t in [base, this, other]]
+ base_b, this_b, other_b = [t.tt.new_directory('b', t.root, 'b')
+ for t in [base, this, other]]
+ base.tt.new_directory('c', base_a, 'c')
+ this.tt.new_directory('c1', this_a, 'c')
+ other.tt.new_directory('c', other_b, 'c')
+
+ base.tt.new_directory('d', base_a, 'd')
+ this.tt.new_directory('d1', this_b, 'd')
+ other.tt.new_directory('d', other_a, 'd')
+
+ base.tt.new_directory('e', base_a, 'e')
+ this.tt.new_directory('e', this_a, 'e')
+ other.tt.new_directory('e1', other_b, 'e')
+
+ base.tt.new_directory('f', base_a, 'f')
+ this.tt.new_directory('f1', this_b, 'f')
+ other.tt.new_directory('f1', other_b, 'f')
+
+ for tg in [this, base, other]:
+ tg.tt.apply()
+ Merge3Merger(this.wt, this.wt, base.wt, other.wt)
+ self.assertEqual(this.wt.id2path('c'), pathjoin('b/c1'))
+ self.assertEqual(this.wt.id2path('d'), pathjoin('b/d1'))
+ self.assertEqual(this.wt.id2path('e'), pathjoin('b/e1'))
+ self.assertEqual(this.wt.id2path('f'), pathjoin('b/f1'))
+
+ def test_filename_merge_conflicts(self):
+ root_id = generate_ids.gen_root_id()
+ base = TransformGroup("BASE", root_id)
+ this = TransformGroup("THIS", root_id)
+ other = TransformGroup("OTHER", root_id)
+ base_a, this_a, other_a = [t.tt.new_directory('a', t.root, 'a')
+ for t in [base, this, other]]
+ base_b, this_b, other_b = [t.tt.new_directory('b', t.root, 'b')
+ for t in [base, this, other]]
+
+ base.tt.new_file('g', base_a, 'g', 'g')
+ other.tt.new_file('g1', other_b, 'g1', 'g')
+
+ base.tt.new_file('h', base_a, 'h', 'h')
+ this.tt.new_file('h1', this_b, 'h1', 'h')
+
+ base.tt.new_file('i', base.root, 'i', 'i')
+ other.tt.new_directory('i1', this_b, 'i')
+
+ for tg in [this, base, other]:
+ tg.tt.apply()
+ Merge3Merger(this.wt, this.wt, base.wt, other.wt)
+
+ self.assertEqual(this.wt.id2path('g'), pathjoin('b/g1.OTHER'))
+ self.assertIs(os.path.lexists(this.wt.abspath('b/g1.BASE')), True)
+ self.assertIs(os.path.lexists(this.wt.abspath('b/g1.THIS')), False)
+ self.assertEqual(this.wt.id2path('h'), pathjoin('b/h1.THIS'))
+ self.assertIs(os.path.lexists(this.wt.abspath('b/h1.BASE')), True)
+ self.assertIs(os.path.lexists(this.wt.abspath('b/h1.OTHER')), False)
+ self.assertEqual(this.wt.id2path('i'), pathjoin('b/i1.OTHER'))
+
+
+class TestBuildTree(tests.TestCaseWithTransport):
+
+ def test_build_tree_with_symlinks(self):
+ self.requireFeature(SymlinkFeature)
+ os.mkdir('a')
+ a = ControlDir.create_standalone_workingtree('a')
+ os.mkdir('a/foo')
+ with file('a/foo/bar', 'wb') as f: f.write('contents')
+ os.symlink('a/foo/bar', 'a/foo/baz')
+ a.add(['foo', 'foo/bar', 'foo/baz'])
+ a.commit('initial commit')
+ b = ControlDir.create_standalone_workingtree('b')
+ basis = a.basis_tree()
+ basis.lock_read()
+ self.addCleanup(basis.unlock)
+ build_tree(basis, b)
+ self.assertIs(os.path.isdir('b/foo'), True)
+ self.assertEqual(file('b/foo/bar', 'rb').read(), "contents")
+ self.assertEqual(os.readlink('b/foo/baz'), 'a/foo/bar')
+
+ def test_build_with_references(self):
+ tree = self.make_branch_and_tree('source',
+ format='development-subtree')
+ subtree = self.make_branch_and_tree('source/subtree',
+ format='development-subtree')
+ tree.add_reference(subtree)
+ tree.commit('a revision')
+ tree.branch.create_checkout('target')
+ self.assertPathExists('target')
+ self.assertPathExists('target/subtree')
+
+ def test_file_conflict_handling(self):
+ """Ensure that when building trees, conflict handling is done"""
+ source = self.make_branch_and_tree('source')
+ target = self.make_branch_and_tree('target')
+ self.build_tree(['source/file', 'target/file'])
+ source.add('file', 'new-file')
+ source.commit('added file')
+ build_tree(source.basis_tree(), target)
+ self.assertEqual([DuplicateEntry('Moved existing file to',
+ 'file.moved', 'file', None, 'new-file')],
+ target.conflicts())
+ target2 = self.make_branch_and_tree('target2')
+ target_file = file('target2/file', 'wb')
+ try:
+ source_file = file('source/file', 'rb')
+ try:
+ target_file.write(source_file.read())
+ finally:
+ source_file.close()
+ finally:
+ target_file.close()
+ build_tree(source.basis_tree(), target2)
+ self.assertEqual([], target2.conflicts())
+
+ def test_symlink_conflict_handling(self):
+ """Ensure that when building trees, conflict handling is done"""
+ self.requireFeature(SymlinkFeature)
+ source = self.make_branch_and_tree('source')
+ os.symlink('foo', 'source/symlink')
+ source.add('symlink', 'new-symlink')
+ source.commit('added file')
+ target = self.make_branch_and_tree('target')
+ os.symlink('bar', 'target/symlink')
+ build_tree(source.basis_tree(), target)
+ self.assertEqual([DuplicateEntry('Moved existing file to',
+ 'symlink.moved', 'symlink', None, 'new-symlink')],
+ target.conflicts())
+ target = self.make_branch_and_tree('target2')
+ os.symlink('foo', 'target2/symlink')
+ build_tree(source.basis_tree(), target)
+ self.assertEqual([], target.conflicts())
+
+ def test_directory_conflict_handling(self):
+ """Ensure that when building trees, conflict handling is done"""
+ source = self.make_branch_and_tree('source')
+ target = self.make_branch_and_tree('target')
+ self.build_tree(['source/dir1/', 'source/dir1/file', 'target/dir1/'])
+ source.add(['dir1', 'dir1/file'], ['new-dir1', 'new-file'])
+ source.commit('added file')
+ build_tree(source.basis_tree(), target)
+ self.assertEqual([], target.conflicts())
+ self.assertPathExists('target/dir1/file')
+
+ # Ensure contents are merged
+ target = self.make_branch_and_tree('target2')
+ self.build_tree(['target2/dir1/', 'target2/dir1/file2'])
+ build_tree(source.basis_tree(), target)
+ self.assertEqual([], target.conflicts())
+ self.assertPathExists('target2/dir1/file2')
+ self.assertPathExists('target2/dir1/file')
+
+ # Ensure new contents are suppressed for existing branches
+ target = self.make_branch_and_tree('target3')
+ self.make_branch('target3/dir1')
+ self.build_tree(['target3/dir1/file2'])
+ build_tree(source.basis_tree(), target)
+ self.assertPathDoesNotExist('target3/dir1/file')
+ self.assertPathExists('target3/dir1/file2')
+ self.assertPathExists('target3/dir1.diverted/file')
+ self.assertEqual([DuplicateEntry('Diverted to',
+ 'dir1.diverted', 'dir1', 'new-dir1', None)],
+ target.conflicts())
+
+ target = self.make_branch_and_tree('target4')
+ self.build_tree(['target4/dir1/'])
+ self.make_branch('target4/dir1/file')
+ build_tree(source.basis_tree(), target)
+ self.assertPathExists('target4/dir1/file')
+ self.assertEqual('directory', file_kind('target4/dir1/file'))
+ self.assertPathExists('target4/dir1/file.diverted')
+ self.assertEqual([DuplicateEntry('Diverted to',
+ 'dir1/file.diverted', 'dir1/file', 'new-file', None)],
+ target.conflicts())
+
+ def test_mixed_conflict_handling(self):
+ """Ensure that when building trees, conflict handling is done"""
+ source = self.make_branch_and_tree('source')
+ target = self.make_branch_and_tree('target')
+ self.build_tree(['source/name', 'target/name/'])
+ source.add('name', 'new-name')
+ source.commit('added file')
+ build_tree(source.basis_tree(), target)
+ self.assertEqual([DuplicateEntry('Moved existing file to',
+ 'name.moved', 'name', None, 'new-name')], target.conflicts())
+
+ def test_raises_in_populated(self):
+ source = self.make_branch_and_tree('source')
+ self.build_tree(['source/name'])
+ source.add('name')
+ source.commit('added name')
+ target = self.make_branch_and_tree('target')
+ self.build_tree(['target/name'])
+ target.add('name')
+ self.assertRaises(errors.WorkingTreeAlreadyPopulated,
+ build_tree, source.basis_tree(), target)
+
+ def test_build_tree_rename_count(self):
+ source = self.make_branch_and_tree('source')
+ self.build_tree(['source/file1', 'source/dir1/'])
+ source.add(['file1', 'dir1'])
+ source.commit('add1')
+ target1 = self.make_branch_and_tree('target1')
+ transform_result = build_tree(source.basis_tree(), target1)
+ self.assertEqual(2, transform_result.rename_count)
+
+ self.build_tree(['source/dir1/file2'])
+ source.add(['dir1/file2'])
+ source.commit('add3')
+ target2 = self.make_branch_and_tree('target2')
+ transform_result = build_tree(source.basis_tree(), target2)
+ # children of non-root directories should not be renamed
+ self.assertEqual(2, transform_result.rename_count)
+
+ def create_ab_tree(self):
+ """Create a committed test tree with two files"""
+ source = self.make_branch_and_tree('source')
+ self.build_tree_contents([('source/file1', 'A')])
+ self.build_tree_contents([('source/file2', 'B')])
+ source.add(['file1', 'file2'], ['file1-id', 'file2-id'])
+ source.commit('commit files')
+ source.lock_write()
+ self.addCleanup(source.unlock)
+ return source
+
+ def test_build_tree_accelerator_tree(self):
+ source = self.create_ab_tree()
+ self.build_tree_contents([('source/file2', 'C')])
+ calls = []
+ real_source_get_file = source.get_file
+ def get_file(file_id, path=None):
+ calls.append(file_id)
+ return real_source_get_file(file_id, path)
+ source.get_file = get_file
+ target = self.make_branch_and_tree('target')
+ revision_tree = source.basis_tree()
+ revision_tree.lock_read()
+ self.addCleanup(revision_tree.unlock)
+ build_tree(revision_tree, target, source)
+ self.assertEqual(['file1-id'], calls)
+ target.lock_read()
+ self.addCleanup(target.unlock)
+ self.assertEqual([], list(target.iter_changes(revision_tree)))
+
+ def test_build_tree_accelerator_tree_observes_sha1(self):
+ source = self.create_ab_tree()
+ sha1 = osutils.sha_string('A')
+ target = self.make_branch_and_tree('target')
+ target.lock_write()
+ self.addCleanup(target.unlock)
+ state = target.current_dirstate()
+ state._cutoff_time = time.time() + 60
+ build_tree(source.basis_tree(), target, source)
+ entry = state._get_entry(0, path_utf8='file1')
+ self.assertEqual(sha1, entry[1][0][1])
+
+ def test_build_tree_accelerator_tree_missing_file(self):
+ source = self.create_ab_tree()
+ os.unlink('source/file1')
+ source.remove(['file2'])
+ target = self.make_branch_and_tree('target')
+ revision_tree = source.basis_tree()
+ revision_tree.lock_read()
+ self.addCleanup(revision_tree.unlock)
+ build_tree(revision_tree, target, source)
+ target.lock_read()
+ self.addCleanup(target.unlock)
+ self.assertEqual([], list(target.iter_changes(revision_tree)))
+
+ def test_build_tree_accelerator_wrong_kind(self):
+ self.requireFeature(SymlinkFeature)
+ source = self.make_branch_and_tree('source')
+ self.build_tree_contents([('source/file1', '')])
+ self.build_tree_contents([('source/file2', '')])
+ source.add(['file1', 'file2'], ['file1-id', 'file2-id'])
+ source.commit('commit files')
+ os.unlink('source/file2')
+ self.build_tree_contents([('source/file2/', 'C')])
+ os.unlink('source/file1')
+ os.symlink('file2', 'source/file1')
+ calls = []
+ real_source_get_file = source.get_file
+ def get_file(file_id, path=None):
+ calls.append(file_id)
+ return real_source_get_file(file_id, path)
+ source.get_file = get_file
+ target = self.make_branch_and_tree('target')
+ revision_tree = source.basis_tree()
+ revision_tree.lock_read()
+ self.addCleanup(revision_tree.unlock)
+ build_tree(revision_tree, target, source)
+ self.assertEqual([], calls)
+ target.lock_read()
+ self.addCleanup(target.unlock)
+ self.assertEqual([], list(target.iter_changes(revision_tree)))
+
+ def test_build_tree_hardlink(self):
+ self.requireFeature(HardlinkFeature)
+ source = self.create_ab_tree()
+ target = self.make_branch_and_tree('target')
+ revision_tree = source.basis_tree()
+ revision_tree.lock_read()
+ self.addCleanup(revision_tree.unlock)
+ build_tree(revision_tree, target, source, hardlink=True)
+ target.lock_read()
+ self.addCleanup(target.unlock)
+ self.assertEqual([], list(target.iter_changes(revision_tree)))
+ source_stat = os.stat('source/file1')
+ target_stat = os.stat('target/file1')
+ self.assertEqual(source_stat, target_stat)
+
+ # Explicitly disallowing hardlinks should prevent them.
+ target2 = self.make_branch_and_tree('target2')
+ build_tree(revision_tree, target2, source, hardlink=False)
+ target2.lock_read()
+ self.addCleanup(target2.unlock)
+ self.assertEqual([], list(target2.iter_changes(revision_tree)))
+ source_stat = os.stat('source/file1')
+ target2_stat = os.stat('target2/file1')
+ self.assertNotEqual(source_stat, target2_stat)
+
+ def test_build_tree_accelerator_tree_moved(self):
+ source = self.make_branch_and_tree('source')
+ self.build_tree_contents([('source/file1', 'A')])
+ source.add(['file1'], ['file1-id'])
+ source.commit('commit files')
+ source.rename_one('file1', 'file2')
+ source.lock_read()
+ self.addCleanup(source.unlock)
+ target = self.make_branch_and_tree('target')
+ revision_tree = source.basis_tree()
+ revision_tree.lock_read()
+ self.addCleanup(revision_tree.unlock)
+ build_tree(revision_tree, target, source)
+ target.lock_read()
+ self.addCleanup(target.unlock)
+ self.assertEqual([], list(target.iter_changes(revision_tree)))
+
+ def test_build_tree_hardlinks_preserve_execute(self):
+ self.requireFeature(HardlinkFeature)
+ source = self.create_ab_tree()
+ tt = TreeTransform(source)
+ trans_id = tt.trans_id_tree_file_id('file1-id')
+ tt.set_executability(True, trans_id)
+ tt.apply()
+ self.assertTrue(source.is_executable('file1-id'))
+ target = self.make_branch_and_tree('target')
+ revision_tree = source.basis_tree()
+ revision_tree.lock_read()
+ self.addCleanup(revision_tree.unlock)
+ build_tree(revision_tree, target, source, hardlink=True)
+ target.lock_read()
+ self.addCleanup(target.unlock)
+ self.assertEqual([], list(target.iter_changes(revision_tree)))
+ self.assertTrue(source.is_executable('file1-id'))
+
+ def install_rot13_content_filter(self, pattern):
+ # We could use
+ # self.addCleanup(filters._reset_registry, filters._reset_registry())
+ # below, but that looks a bit... hard to read even if it's exactly
+ # the same thing.
+ original_registry = filters._reset_registry()
+ def restore_registry():
+ filters._reset_registry(original_registry)
+ self.addCleanup(restore_registry)
+ def rot13(chunks, context=None):
+ return [''.join(chunks).encode('rot13')]
+ rot13filter = filters.ContentFilter(rot13, rot13)
+ filters.filter_stacks_registry.register(
+ 'rot13', {'yes': [rot13filter]}.get)
+ os.mkdir(self.test_home_dir + '/.bazaar')
+ rules_filename = self.test_home_dir + '/.bazaar/rules'
+ f = open(rules_filename, 'wb')
+ f.write('[name %s]\nrot13=yes\n' % (pattern,))
+ f.close()
+ def uninstall_rules():
+ os.remove(rules_filename)
+ rules.reset_rules()
+ self.addCleanup(uninstall_rules)
+ rules.reset_rules()
+
+ def test_build_tree_content_filtered_files_are_not_hardlinked(self):
+ """build_tree will not hardlink files that have content filtering rules
+ applied to them (but will still hardlink other files from the same tree
+ if it can).
+ """
+ self.requireFeature(HardlinkFeature)
+ self.install_rot13_content_filter('file1')
+ source = self.create_ab_tree()
+ target = self.make_branch_and_tree('target')
+ revision_tree = source.basis_tree()
+ revision_tree.lock_read()
+ self.addCleanup(revision_tree.unlock)
+ build_tree(revision_tree, target, source, hardlink=True)
+ target.lock_read()
+ self.addCleanup(target.unlock)
+ self.assertEqual([], list(target.iter_changes(revision_tree)))
+ source_stat = os.stat('source/file1')
+ target_stat = os.stat('target/file1')
+ self.assertNotEqual(source_stat, target_stat)
+ source_stat = os.stat('source/file2')
+ target_stat = os.stat('target/file2')
+ self.assertEqualStat(source_stat, target_stat)
+
+ def test_case_insensitive_build_tree_inventory(self):
+ if (features.CaseInsensitiveFilesystemFeature.available()
+ or features.CaseInsCasePresFilenameFeature.available()):
+ raise tests.UnavailableFeature('Fully case sensitive filesystem')
+ source = self.make_branch_and_tree('source')
+ self.build_tree(['source/file', 'source/FILE'])
+ source.add(['file', 'FILE'], ['lower-id', 'upper-id'])
+ source.commit('added files')
+ # Don't try this at home, kids!
+ # Force the tree to report that it is case insensitive
+ target = self.make_branch_and_tree('target')
+ target.case_sensitive = False
+ build_tree(source.basis_tree(), target, source, delta_from_tree=True)
+ self.assertEqual('file.moved', target.id2path('lower-id'))
+ self.assertEqual('FILE', target.id2path('upper-id'))
+
+ def test_build_tree_observes_sha(self):
+ source = self.make_branch_and_tree('source')
+ self.build_tree(['source/file1', 'source/dir/', 'source/dir/file2'])
+ source.add(['file1', 'dir', 'dir/file2'],
+ ['file1-id', 'dir-id', 'file2-id'])
+ source.commit('new files')
+ target = self.make_branch_and_tree('target')
+ target.lock_write()
+ self.addCleanup(target.unlock)
+ # We make use of the fact that DirState caches its cutoff time. So we
+ # set the 'safe' time to one minute in the future.
+ state = target.current_dirstate()
+ state._cutoff_time = time.time() + 60
+ build_tree(source.basis_tree(), target)
+ entry1_sha = osutils.sha_file_by_name('source/file1')
+ entry2_sha = osutils.sha_file_by_name('source/dir/file2')
+ # entry[1] is the state information, entry[1][0] is the state of the
+ # working tree, entry[1][0][1] is the sha value for the current working
+ # tree
+ entry1 = state._get_entry(0, path_utf8='file1')
+ self.assertEqual(entry1_sha, entry1[1][0][1])
+ # The 'size' field must also be set.
+ self.assertEqual(25, entry1[1][0][2])
+ entry1_state = entry1[1][0]
+ entry2 = state._get_entry(0, path_utf8='dir/file2')
+ self.assertEqual(entry2_sha, entry2[1][0][1])
+ self.assertEqual(29, entry2[1][0][2])
+ entry2_state = entry2[1][0]
+ # Now, make sure that we don't have to re-read the content. The
+ # packed_stat should match exactly.
+ self.assertEqual(entry1_sha, target.get_file_sha1('file1-id', 'file1'))
+ self.assertEqual(entry2_sha,
+ target.get_file_sha1('file2-id', 'dir/file2'))
+ self.assertEqual(entry1_state, entry1[1][0])
+ self.assertEqual(entry2_state, entry2[1][0])
+
+
+class TestCommitTransform(tests.TestCaseWithTransport):
+
+ def get_branch(self):
+ tree = self.make_branch_and_tree('tree')
+ tree.lock_write()
+ self.addCleanup(tree.unlock)
+ tree.commit('empty commit')
+ return tree.branch
+
+ def get_branch_and_transform(self):
+ branch = self.get_branch()
+ tt = TransformPreview(branch.basis_tree())
+ self.addCleanup(tt.finalize)
+ return branch, tt
+
+ def test_commit_wrong_basis(self):
+ branch = self.get_branch()
+ basis = branch.repository.revision_tree(
+ _mod_revision.NULL_REVISION)
+ tt = TransformPreview(basis)
+ self.addCleanup(tt.finalize)
+ e = self.assertRaises(ValueError, tt.commit, branch, '')
+ self.assertEqual('TreeTransform not based on branch basis: null:',
+ str(e))
+
+ def test_empy_commit(self):
+ branch, tt = self.get_branch_and_transform()
+ rev = tt.commit(branch, 'my message')
+ self.assertEqual(2, branch.revno())
+ repo = branch.repository
+ self.assertEqual('my message', repo.get_revision(rev).message)
+
+ def test_merge_parents(self):
+ branch, tt = self.get_branch_and_transform()
+ rev = tt.commit(branch, 'my message', ['rev1b', 'rev1c'])
+ self.assertEqual(['rev1b', 'rev1c'],
+ branch.basis_tree().get_parent_ids()[1:])
+
+ def test_first_commit(self):
+ branch = self.make_branch('branch')
+ branch.lock_write()
+ self.addCleanup(branch.unlock)
+ tt = TransformPreview(branch.basis_tree())
+ self.addCleanup(tt.finalize)
+ tt.new_directory('', ROOT_PARENT, 'TREE_ROOT')
+ rev = tt.commit(branch, 'my message')
+ self.assertEqual([], branch.basis_tree().get_parent_ids())
+ self.assertNotEqual(_mod_revision.NULL_REVISION,
+ branch.last_revision())
+
+ def test_first_commit_with_merge_parents(self):
+ branch = self.make_branch('branch')
+ branch.lock_write()
+ self.addCleanup(branch.unlock)
+ tt = TransformPreview(branch.basis_tree())
+ self.addCleanup(tt.finalize)
+ e = self.assertRaises(ValueError, tt.commit, branch,
+ 'my message', ['rev1b-id'])
+ self.assertEqual('Cannot supply merge parents for first commit.',
+ str(e))
+ self.assertEqual(_mod_revision.NULL_REVISION, branch.last_revision())
+
+ def test_add_files(self):
+ branch, tt = self.get_branch_and_transform()
+ tt.new_file('file', tt.root, 'contents', 'file-id')
+ trans_id = tt.new_directory('dir', tt.root, 'dir-id')
+ if SymlinkFeature.available():
+ tt.new_symlink('symlink', trans_id, 'target', 'symlink-id')
+ rev = tt.commit(branch, 'message')
+ tree = branch.basis_tree()
+ self.assertEqual('file', tree.id2path('file-id'))
+ self.assertEqual('contents', tree.get_file_text('file-id'))
+ self.assertEqual('dir', tree.id2path('dir-id'))
+ if SymlinkFeature.available():
+ self.assertEqual('dir/symlink', tree.id2path('symlink-id'))
+ self.assertEqual('target', tree.get_symlink_target('symlink-id'))
+
+ def test_add_unversioned(self):
+ branch, tt = self.get_branch_and_transform()
+ tt.new_file('file', tt.root, 'contents')
+ self.assertRaises(errors.StrictCommitFailed, tt.commit, branch,
+ 'message', strict=True)
+
+ def test_modify_strict(self):
+ branch, tt = self.get_branch_and_transform()
+ tt.new_file('file', tt.root, 'contents', 'file-id')
+ tt.commit(branch, 'message', strict=True)
+ tt = TransformPreview(branch.basis_tree())
+ self.addCleanup(tt.finalize)
+ trans_id = tt.trans_id_file_id('file-id')
+ tt.delete_contents(trans_id)
+ tt.create_file('contents', trans_id)
+ tt.commit(branch, 'message', strict=True)
+
+ def test_commit_malformed(self):
+ """Committing a malformed transform should raise an exception.
+
+ In this case, we are adding a file without adding its parent.
+ """
+ branch, tt = self.get_branch_and_transform()
+ parent_id = tt.trans_id_file_id('parent-id')
+ tt.new_file('file', parent_id, 'contents', 'file-id')
+ self.assertRaises(errors.MalformedTransform, tt.commit, branch,
+ 'message')
+
+ def test_commit_rich_revision_data(self):
+ branch, tt = self.get_branch_and_transform()
+ rev_id = tt.commit(branch, 'message', timestamp=1, timezone=43201,
+ committer='me <me@example.com>',
+ revprops={'foo': 'bar'}, revision_id='revid-1',
+ authors=['Author1 <author1@example.com>',
+ 'Author2 <author2@example.com>',
+ ])
+ self.assertEqual('revid-1', rev_id)
+ revision = branch.repository.get_revision(rev_id)
+ self.assertEqual(1, revision.timestamp)
+ self.assertEqual(43201, revision.timezone)
+ self.assertEqual('me <me@example.com>', revision.committer)
+ self.assertEqual(['Author1 <author1@example.com>',
+ 'Author2 <author2@example.com>'],
+ revision.get_apparent_authors())
+ del revision.properties['authors']
+ self.assertEqual({'foo': 'bar',
+ 'branch-nick': 'tree'},
+ revision.properties)
+
+ def test_no_explicit_revprops(self):
+ branch, tt = self.get_branch_and_transform()
+ rev_id = tt.commit(branch, 'message', authors=[
+ 'Author1 <author1@example.com>',
+ 'Author2 <author2@example.com>', ])
+ revision = branch.repository.get_revision(rev_id)
+ self.assertEqual(['Author1 <author1@example.com>',
+ 'Author2 <author2@example.com>'],
+ revision.get_apparent_authors())
+ self.assertEqual('tree', revision.properties['branch-nick'])
+
+
+class TestFileMover(tests.TestCaseWithTransport):
+
+ def test_file_mover(self):
+ self.build_tree(['a/', 'a/b', 'c/', 'c/d'])
+ mover = _FileMover()
+ mover.rename('a', 'q')
+ self.assertPathExists('q')
+ self.assertPathDoesNotExist('a')
+ self.assertPathExists('q/b')
+ self.assertPathExists('c')
+ self.assertPathExists('c/d')
+
+ def test_pre_delete_rollback(self):
+ self.build_tree(['a/'])
+ mover = _FileMover()
+ mover.pre_delete('a', 'q')
+ self.assertPathExists('q')
+ self.assertPathDoesNotExist('a')
+ mover.rollback()
+ self.assertPathDoesNotExist('q')
+ self.assertPathExists('a')
+
+ def test_apply_deletions(self):
+ self.build_tree(['a/', 'b/'])
+ mover = _FileMover()
+ mover.pre_delete('a', 'q')
+ mover.pre_delete('b', 'r')
+ self.assertPathExists('q')
+ self.assertPathExists('r')
+ self.assertPathDoesNotExist('a')
+ self.assertPathDoesNotExist('b')
+ mover.apply_deletions()
+ self.assertPathDoesNotExist('q')
+ self.assertPathDoesNotExist('r')
+ self.assertPathDoesNotExist('a')
+ self.assertPathDoesNotExist('b')
+
+ def test_file_mover_rollback(self):
+ self.build_tree(['a/', 'a/b', 'c/', 'c/d/', 'c/e/'])
+ mover = _FileMover()
+ mover.rename('c/d', 'c/f')
+ mover.rename('c/e', 'c/d')
+ try:
+ mover.rename('a', 'c')
+ except errors.FileExists, e:
+ mover.rollback()
+ self.assertPathExists('a')
+ self.assertPathExists('c/d')
+
+
+class Bogus(Exception):
+ pass
+
+
+class TestTransformRollback(tests.TestCaseWithTransport):
+
+ class ExceptionFileMover(_FileMover):
+
+ def __init__(self, bad_source=None, bad_target=None):
+ _FileMover.__init__(self)
+ self.bad_source = bad_source
+ self.bad_target = bad_target
+
+ def rename(self, source, target):
+ if (self.bad_source is not None and
+ source.endswith(self.bad_source)):
+ raise Bogus
+ elif (self.bad_target is not None and
+ target.endswith(self.bad_target)):
+ raise Bogus
+ else:
+ _FileMover.rename(self, source, target)
+
+ def test_rollback_rename(self):
+ tree = self.make_branch_and_tree('.')
+ self.build_tree(['a/', 'a/b'])
+ tt = TreeTransform(tree)
+ self.addCleanup(tt.finalize)
+ a_id = tt.trans_id_tree_path('a')
+ tt.adjust_path('c', tt.root, a_id)
+ tt.adjust_path('d', a_id, tt.trans_id_tree_path('a/b'))
+ self.assertRaises(Bogus, tt.apply,
+ _mover=self.ExceptionFileMover(bad_source='a'))
+ self.assertPathExists('a')
+ self.assertPathExists('a/b')
+ tt.apply()
+ self.assertPathExists('c')
+ self.assertPathExists('c/d')
+
+ def test_rollback_rename_into_place(self):
+ tree = self.make_branch_and_tree('.')
+ self.build_tree(['a/', 'a/b'])
+ tt = TreeTransform(tree)
+ self.addCleanup(tt.finalize)
+ a_id = tt.trans_id_tree_path('a')
+ tt.adjust_path('c', tt.root, a_id)
+ tt.adjust_path('d', a_id, tt.trans_id_tree_path('a/b'))
+ self.assertRaises(Bogus, tt.apply,
+ _mover=self.ExceptionFileMover(bad_target='c/d'))
+ self.assertPathExists('a')
+ self.assertPathExists('a/b')
+ tt.apply()
+ self.assertPathExists('c')
+ self.assertPathExists('c/d')
+
+ def test_rollback_deletion(self):
+ tree = self.make_branch_and_tree('.')
+ self.build_tree(['a/', 'a/b'])
+ tt = TreeTransform(tree)
+ self.addCleanup(tt.finalize)
+ a_id = tt.trans_id_tree_path('a')
+ tt.delete_contents(a_id)
+ tt.adjust_path('d', tt.root, tt.trans_id_tree_path('a/b'))
+ self.assertRaises(Bogus, tt.apply,
+ _mover=self.ExceptionFileMover(bad_target='d'))
+ self.assertPathExists('a')
+ self.assertPathExists('a/b')
+
+
+class TestFinalizeRobustness(tests.TestCaseWithTransport):
+ """Ensure treetransform creation errors can be safely cleaned up after"""
+
+ def _override_globals_in_method(self, instance, method_name, globals):
+ """Replace method on instance with one with updated globals"""
+ import types
+ func = getattr(instance, method_name).im_func
+ new_globals = dict(func.func_globals)
+ new_globals.update(globals)
+ new_func = types.FunctionType(func.func_code, new_globals,
+ func.func_name, func.func_defaults)
+ setattr(instance, method_name,
+ types.MethodType(new_func, instance, instance.__class__))
+ self.addCleanup(delattr, instance, method_name)
+
+ @staticmethod
+ def _fake_open_raises_before(name, mode):
+ """Like open() but raises before doing anything"""
+ raise RuntimeError
+
+ @staticmethod
+ def _fake_open_raises_after(name, mode):
+ """Like open() but raises after creating file without returning"""
+ open(name, mode).close()
+ raise RuntimeError
+
+ def create_transform_and_root_trans_id(self):
+ """Setup a transform creating a file in limbo"""
+ tree = self.make_branch_and_tree('.')
+ tt = TreeTransform(tree)
+ return tt, tt.create_path("a", tt.root)
+
+ def create_transform_and_subdir_trans_id(self):
+ """Setup a transform creating a directory containing a file in limbo"""
+ tree = self.make_branch_and_tree('.')
+ tt = TreeTransform(tree)
+ d_trans_id = tt.create_path("d", tt.root)
+ tt.create_directory(d_trans_id)
+ f_trans_id = tt.create_path("a", d_trans_id)
+ tt.adjust_path("a", d_trans_id, f_trans_id)
+ return tt, f_trans_id
+
+ def test_root_create_file_open_raises_before_creation(self):
+ tt, trans_id = self.create_transform_and_root_trans_id()
+ self._override_globals_in_method(tt, "create_file",
+ {"open": self._fake_open_raises_before})
+ self.assertRaises(RuntimeError, tt.create_file, ["contents"], trans_id)
+ path = tt._limbo_name(trans_id)
+ self.assertPathDoesNotExist(path)
+ tt.finalize()
+ self.assertPathDoesNotExist(tt._limbodir)
+
+ def test_root_create_file_open_raises_after_creation(self):
+ tt, trans_id = self.create_transform_and_root_trans_id()
+ self._override_globals_in_method(tt, "create_file",
+ {"open": self._fake_open_raises_after})
+ self.assertRaises(RuntimeError, tt.create_file, ["contents"], trans_id)
+ path = tt._limbo_name(trans_id)
+ self.assertPathExists(path)
+ tt.finalize()
+ self.assertPathDoesNotExist(path)
+ self.assertPathDoesNotExist(tt._limbodir)
+
+ def test_subdir_create_file_open_raises_before_creation(self):
+ tt, trans_id = self.create_transform_and_subdir_trans_id()
+ self._override_globals_in_method(tt, "create_file",
+ {"open": self._fake_open_raises_before})
+ self.assertRaises(RuntimeError, tt.create_file, ["contents"], trans_id)
+ path = tt._limbo_name(trans_id)
+ self.assertPathDoesNotExist(path)
+ tt.finalize()
+ self.assertPathDoesNotExist(tt._limbodir)
+
+ def test_subdir_create_file_open_raises_after_creation(self):
+ tt, trans_id = self.create_transform_and_subdir_trans_id()
+ self._override_globals_in_method(tt, "create_file",
+ {"open": self._fake_open_raises_after})
+ self.assertRaises(RuntimeError, tt.create_file, ["contents"], trans_id)
+ path = tt._limbo_name(trans_id)
+ self.assertPathExists(path)
+ tt.finalize()
+ self.assertPathDoesNotExist(path)
+ self.assertPathDoesNotExist(tt._limbodir)
+
+ def test_rename_in_limbo_rename_raises_after_rename(self):
+ tt, trans_id = self.create_transform_and_root_trans_id()
+ parent1 = tt.new_directory('parent1', tt.root)
+ child1 = tt.new_file('child1', parent1, 'contents')
+ parent2 = tt.new_directory('parent2', tt.root)
+
+ class FakeOSModule(object):
+ def rename(self, old, new):
+ os.rename(old, new)
+ raise RuntimeError
+ self._override_globals_in_method(tt, "_rename_in_limbo",
+ {"os": FakeOSModule()})
+ self.assertRaises(
+ RuntimeError, tt.adjust_path, "child1", parent2, child1)
+ path = osutils.pathjoin(tt._limbo_name(parent2), "child1")
+ self.assertPathExists(path)
+ tt.finalize()
+ self.assertPathDoesNotExist(path)
+ self.assertPathDoesNotExist(tt._limbodir)
+
+ def test_rename_in_limbo_rename_raises_before_rename(self):
+ tt, trans_id = self.create_transform_and_root_trans_id()
+ parent1 = tt.new_directory('parent1', tt.root)
+ child1 = tt.new_file('child1', parent1, 'contents')
+ parent2 = tt.new_directory('parent2', tt.root)
+
+ class FakeOSModule(object):
+ def rename(self, old, new):
+ raise RuntimeError
+ self._override_globals_in_method(tt, "_rename_in_limbo",
+ {"os": FakeOSModule()})
+ self.assertRaises(
+ RuntimeError, tt.adjust_path, "child1", parent2, child1)
+ path = osutils.pathjoin(tt._limbo_name(parent1), "child1")
+ self.assertPathExists(path)
+ tt.finalize()
+ self.assertPathDoesNotExist(path)
+ self.assertPathDoesNotExist(tt._limbodir)
+
+
+class TestTransformMissingParent(tests.TestCaseWithTransport):
+
+ def make_tt_with_versioned_dir(self):
+ wt = self.make_branch_and_tree('.')
+ self.build_tree(['dir/',])
+ wt.add(['dir'], ['dir-id'])
+ wt.commit('Create dir')
+ tt = TreeTransform(wt)
+ self.addCleanup(tt.finalize)
+ return wt, tt
+
+ def test_resolve_create_parent_for_versioned_file(self):
+ wt, tt = self.make_tt_with_versioned_dir()
+ dir_tid = tt.trans_id_tree_file_id('dir-id')
+ file_tid = tt.new_file('file', dir_tid, 'Contents', file_id='file-id')
+ tt.delete_contents(dir_tid)
+ tt.unversion_file(dir_tid)
+ conflicts = resolve_conflicts(tt)
+ # one conflict for the missing directory, one for the unversioned
+ # parent
+ self.assertLength(2, conflicts)
+
+ def test_non_versioned_file_create_conflict(self):
+ wt, tt = self.make_tt_with_versioned_dir()
+ dir_tid = tt.trans_id_tree_file_id('dir-id')
+ tt.new_file('file', dir_tid, 'Contents')
+ tt.delete_contents(dir_tid)
+ tt.unversion_file(dir_tid)
+ conflicts = resolve_conflicts(tt)
+ # no conflicts or rather: orphaning 'file' resolve the 'dir' conflict
+ self.assertLength(1, conflicts)
+ self.assertEqual(('deleting parent', 'Not deleting', 'new-1'),
+ conflicts.pop())
+
+
+A_ENTRY = ('a-id', ('a', 'a'), True, (True, True),
+ ('TREE_ROOT', 'TREE_ROOT'), ('a', 'a'), ('file', 'file'),
+ (False, False))
+ROOT_ENTRY = ('TREE_ROOT', ('', ''), False, (True, True), (None, None),
+ ('', ''), ('directory', 'directory'), (False, False))
+
+
+class TestTransformPreview(tests.TestCaseWithTransport):
+
+ def create_tree(self):
+ tree = self.make_branch_and_tree('.')
+ self.build_tree_contents([('a', 'content 1')])
+ tree.set_root_id('TREE_ROOT')
+ tree.add('a', 'a-id')
+ tree.commit('rev1', rev_id='rev1')
+ return tree.branch.repository.revision_tree('rev1')
+
+ def get_empty_preview(self):
+ repository = self.make_repository('repo')
+ tree = repository.revision_tree(_mod_revision.NULL_REVISION)
+ preview = TransformPreview(tree)
+ self.addCleanup(preview.finalize)
+ return preview
+
+ def test_transform_preview(self):
+ revision_tree = self.create_tree()
+ preview = TransformPreview(revision_tree)
+ self.addCleanup(preview.finalize)
+
+ def test_transform_preview_tree(self):
+ revision_tree = self.create_tree()
+ preview = TransformPreview(revision_tree)
+ self.addCleanup(preview.finalize)
+ preview.get_preview_tree()
+
+ def test_transform_new_file(self):
+ revision_tree = self.create_tree()
+ preview = TransformPreview(revision_tree)
+ self.addCleanup(preview.finalize)
+ preview.new_file('file2', preview.root, 'content B\n', 'file2-id')
+ preview_tree = preview.get_preview_tree()
+ self.assertEqual(preview_tree.kind('file2-id'), 'file')
+ self.assertEqual(
+ preview_tree.get_file('file2-id').read(), 'content B\n')
+
+ def test_diff_preview_tree(self):
+ revision_tree = self.create_tree()
+ preview = TransformPreview(revision_tree)
+ self.addCleanup(preview.finalize)
+ preview.new_file('file2', preview.root, 'content B\n', 'file2-id')
+ preview_tree = preview.get_preview_tree()
+ out = StringIO()
+ show_diff_trees(revision_tree, preview_tree, out)
+ lines = out.getvalue().splitlines()
+ self.assertEqual(lines[0], "=== added file 'file2'")
+ # 3 lines of diff administrivia
+ self.assertEqual(lines[4], "+content B")
+
+ def test_transform_conflicts(self):
+ revision_tree = self.create_tree()
+ preview = TransformPreview(revision_tree)
+ self.addCleanup(preview.finalize)
+ preview.new_file('a', preview.root, 'content 2')
+ resolve_conflicts(preview)
+ trans_id = preview.trans_id_file_id('a-id')
+ self.assertEqual('a.moved', preview.final_name(trans_id))
+
+ def get_tree_and_preview_tree(self):
+ revision_tree = self.create_tree()
+ preview = TransformPreview(revision_tree)
+ self.addCleanup(preview.finalize)
+ a_trans_id = preview.trans_id_file_id('a-id')
+ preview.delete_contents(a_trans_id)
+ preview.create_file('b content', a_trans_id)
+ preview_tree = preview.get_preview_tree()
+ return revision_tree, preview_tree
+
+ def test_iter_changes(self):
+ revision_tree, preview_tree = self.get_tree_and_preview_tree()
+ root = revision_tree.get_root_id()
+ self.assertEqual([('a-id', ('a', 'a'), True, (True, True),
+ (root, root), ('a', 'a'), ('file', 'file'),
+ (False, False))],
+ list(preview_tree.iter_changes(revision_tree)))
+
+ def test_include_unchanged_succeeds(self):
+ revision_tree, preview_tree = self.get_tree_and_preview_tree()
+ changes = preview_tree.iter_changes(revision_tree,
+ include_unchanged=True)
+ root = revision_tree.get_root_id()
+
+ self.assertEqual([ROOT_ENTRY, A_ENTRY], list(changes))
+
+ def test_specific_files(self):
+ revision_tree, preview_tree = self.get_tree_and_preview_tree()
+ changes = preview_tree.iter_changes(revision_tree,
+ specific_files=[''])
+ self.assertEqual([A_ENTRY], list(changes))
+
+ def test_want_unversioned(self):
+ revision_tree, preview_tree = self.get_tree_and_preview_tree()
+ changes = preview_tree.iter_changes(revision_tree,
+ want_unversioned=True)
+ self.assertEqual([A_ENTRY], list(changes))
+
+ def test_ignore_extra_trees_no_specific_files(self):
+ # extra_trees is harmless without specific_files, so we'll silently
+ # accept it, even though we won't use it.
+ revision_tree, preview_tree = self.get_tree_and_preview_tree()
+ preview_tree.iter_changes(revision_tree, extra_trees=[preview_tree])
+
+ def test_ignore_require_versioned_no_specific_files(self):
+ # require_versioned is meaningless without specific_files.
+ revision_tree, preview_tree = self.get_tree_and_preview_tree()
+ preview_tree.iter_changes(revision_tree, require_versioned=False)
+
+ def test_ignore_pb(self):
+ # pb could be supported, but TT.iter_changes doesn't support it.
+ revision_tree, preview_tree = self.get_tree_and_preview_tree()
+ preview_tree.iter_changes(revision_tree)
+
+ def test_kind(self):
+ revision_tree = self.create_tree()
+ preview = TransformPreview(revision_tree)
+ self.addCleanup(preview.finalize)
+ preview.new_file('file', preview.root, 'contents', 'file-id')
+ preview.new_directory('directory', preview.root, 'dir-id')
+ preview_tree = preview.get_preview_tree()
+ self.assertEqual('file', preview_tree.kind('file-id'))
+ self.assertEqual('directory', preview_tree.kind('dir-id'))
+
+ def test_get_file_mtime(self):
+ preview = self.get_empty_preview()
+ file_trans_id = preview.new_file('file', preview.root, 'contents',
+ 'file-id')
+ limbo_path = preview._limbo_name(file_trans_id)
+ preview_tree = preview.get_preview_tree()
+ self.assertEqual(os.stat(limbo_path).st_mtime,
+ preview_tree.get_file_mtime('file-id'))
+
+ def test_get_file_mtime_renamed(self):
+ work_tree = self.make_branch_and_tree('tree')
+ self.build_tree(['tree/file'])
+ work_tree.add('file', 'file-id')
+ preview = TransformPreview(work_tree)
+ self.addCleanup(preview.finalize)
+ file_trans_id = preview.trans_id_tree_file_id('file-id')
+ preview.adjust_path('renamed', preview.root, file_trans_id)
+ preview_tree = preview.get_preview_tree()
+ preview_mtime = preview_tree.get_file_mtime('file-id', 'renamed')
+ work_mtime = work_tree.get_file_mtime('file-id', 'file')
+
+ def test_get_file_size(self):
+ work_tree = self.make_branch_and_tree('tree')
+ self.build_tree_contents([('tree/old', 'old')])
+ work_tree.add('old', 'old-id')
+ preview = TransformPreview(work_tree)
+ self.addCleanup(preview.finalize)
+ new_id = preview.new_file('name', preview.root, 'contents', 'new-id',
+ 'executable')
+ tree = preview.get_preview_tree()
+ self.assertEqual(len('old'), tree.get_file_size('old-id'))
+ self.assertEqual(len('contents'), tree.get_file_size('new-id'))
+
+ def test_get_file(self):
+ preview = self.get_empty_preview()
+ preview.new_file('file', preview.root, 'contents', 'file-id')
+ preview_tree = preview.get_preview_tree()
+ tree_file = preview_tree.get_file('file-id')
+ try:
+ self.assertEqual('contents', tree_file.read())
+ finally:
+ tree_file.close()
+
+ def test_get_symlink_target(self):
+ self.requireFeature(SymlinkFeature)
+ preview = self.get_empty_preview()
+ preview.new_symlink('symlink', preview.root, 'target', 'symlink-id')
+ preview_tree = preview.get_preview_tree()
+ self.assertEqual('target',
+ preview_tree.get_symlink_target('symlink-id'))
+
+ def test_all_file_ids(self):
+ tree = self.make_branch_and_tree('tree')
+ self.build_tree(['tree/a', 'tree/b', 'tree/c'])
+ tree.add(['a', 'b', 'c'], ['a-id', 'b-id', 'c-id'])
+ preview = TransformPreview(tree)
+ self.addCleanup(preview.finalize)
+ preview.unversion_file(preview.trans_id_file_id('b-id'))
+ c_trans_id = preview.trans_id_file_id('c-id')
+ preview.unversion_file(c_trans_id)
+ preview.version_file('c-id', c_trans_id)
+ preview_tree = preview.get_preview_tree()
+ self.assertEqual(set(['a-id', 'c-id', tree.get_root_id()]),
+ preview_tree.all_file_ids())
+
+ def test_path2id_deleted_unchanged(self):
+ tree = self.make_branch_and_tree('tree')
+ self.build_tree(['tree/unchanged', 'tree/deleted'])
+ tree.add(['unchanged', 'deleted'], ['unchanged-id', 'deleted-id'])
+ preview = TransformPreview(tree)
+ self.addCleanup(preview.finalize)
+ preview.unversion_file(preview.trans_id_file_id('deleted-id'))
+ preview_tree = preview.get_preview_tree()
+ self.assertEqual('unchanged-id', preview_tree.path2id('unchanged'))
+ self.assertIs(None, preview_tree.path2id('deleted'))
+
+ def test_path2id_created(self):
+ tree = self.make_branch_and_tree('tree')
+ self.build_tree(['tree/unchanged'])
+ tree.add(['unchanged'], ['unchanged-id'])
+ preview = TransformPreview(tree)
+ self.addCleanup(preview.finalize)
+ preview.new_file('new', preview.trans_id_file_id('unchanged-id'),
+ 'contents', 'new-id')
+ preview_tree = preview.get_preview_tree()
+ self.assertEqual('new-id', preview_tree.path2id('unchanged/new'))
+
+ def test_path2id_moved(self):
+ tree = self.make_branch_and_tree('tree')
+ self.build_tree(['tree/old_parent/', 'tree/old_parent/child'])
+ tree.add(['old_parent', 'old_parent/child'],
+ ['old_parent-id', 'child-id'])
+ preview = TransformPreview(tree)
+ self.addCleanup(preview.finalize)
+ new_parent = preview.new_directory('new_parent', preview.root,
+ 'new_parent-id')
+ preview.adjust_path('child', new_parent,
+ preview.trans_id_file_id('child-id'))
+ preview_tree = preview.get_preview_tree()
+ self.assertIs(None, preview_tree.path2id('old_parent/child'))
+ self.assertEqual('child-id', preview_tree.path2id('new_parent/child'))
+
+ def test_path2id_renamed_parent(self):
+ tree = self.make_branch_and_tree('tree')
+ self.build_tree(['tree/old_name/', 'tree/old_name/child'])
+ tree.add(['old_name', 'old_name/child'],
+ ['parent-id', 'child-id'])
+ preview = TransformPreview(tree)
+ self.addCleanup(preview.finalize)
+ preview.adjust_path('new_name', preview.root,
+ preview.trans_id_file_id('parent-id'))
+ preview_tree = preview.get_preview_tree()
+ self.assertIs(None, preview_tree.path2id('old_name/child'))
+ self.assertEqual('child-id', preview_tree.path2id('new_name/child'))
+
+ def assertMatchingIterEntries(self, tt, specific_file_ids=None):
+ preview_tree = tt.get_preview_tree()
+ preview_result = list(preview_tree.iter_entries_by_dir(
+ specific_file_ids))
+ tree = tt._tree
+ tt.apply()
+ actual_result = list(tree.iter_entries_by_dir(specific_file_ids))
+ self.assertEqual(actual_result, preview_result)
+
+ def test_iter_entries_by_dir_new(self):
+ tree = self.make_branch_and_tree('tree')
+ tt = TreeTransform(tree)
+ tt.new_file('new', tt.root, 'contents', 'new-id')
+ self.assertMatchingIterEntries(tt)
+
+ def test_iter_entries_by_dir_deleted(self):
+ tree = self.make_branch_and_tree('tree')
+ self.build_tree(['tree/deleted'])
+ tree.add('deleted', 'deleted-id')
+ tt = TreeTransform(tree)
+ tt.delete_contents(tt.trans_id_file_id('deleted-id'))
+ self.assertMatchingIterEntries(tt)
+
+ def test_iter_entries_by_dir_unversioned(self):
+ tree = self.make_branch_and_tree('tree')
+ self.build_tree(['tree/removed'])
+ tree.add('removed', 'removed-id')
+ tt = TreeTransform(tree)
+ tt.unversion_file(tt.trans_id_file_id('removed-id'))
+ self.assertMatchingIterEntries(tt)
+
+ def test_iter_entries_by_dir_moved(self):
+ tree = self.make_branch_and_tree('tree')
+ self.build_tree(['tree/moved', 'tree/new_parent/'])
+ tree.add(['moved', 'new_parent'], ['moved-id', 'new_parent-id'])
+ tt = TreeTransform(tree)
+ tt.adjust_path('moved', tt.trans_id_file_id('new_parent-id'),
+ tt.trans_id_file_id('moved-id'))
+ self.assertMatchingIterEntries(tt)
+
+ def test_iter_entries_by_dir_specific_file_ids(self):
+ tree = self.make_branch_and_tree('tree')
+ tree.set_root_id('tree-root-id')
+ self.build_tree(['tree/parent/', 'tree/parent/child'])
+ tree.add(['parent', 'parent/child'], ['parent-id', 'child-id'])
+ tt = TreeTransform(tree)
+ self.assertMatchingIterEntries(tt, ['tree-root-id', 'child-id'])
+
+ def test_symlink_content_summary(self):
+ self.requireFeature(SymlinkFeature)
+ preview = self.get_empty_preview()
+ preview.new_symlink('path', preview.root, 'target', 'path-id')
+ summary = preview.get_preview_tree().path_content_summary('path')
+ self.assertEqual(('symlink', None, None, 'target'), summary)
+
+ def test_missing_content_summary(self):
+ preview = self.get_empty_preview()
+ summary = preview.get_preview_tree().path_content_summary('path')
+ self.assertEqual(('missing', None, None, None), summary)
+
+ def test_deleted_content_summary(self):
+ tree = self.make_branch_and_tree('tree')
+ self.build_tree(['tree/path/'])
+ tree.add('path')
+ preview = TransformPreview(tree)
+ self.addCleanup(preview.finalize)
+ preview.delete_contents(preview.trans_id_tree_path('path'))
+ summary = preview.get_preview_tree().path_content_summary('path')
+ self.assertEqual(('missing', None, None, None), summary)
+
+ def test_file_content_summary_executable(self):
+ preview = self.get_empty_preview()
+ path_id = preview.new_file('path', preview.root, 'contents', 'path-id')
+ preview.set_executability(True, path_id)
+ summary = preview.get_preview_tree().path_content_summary('path')
+ self.assertEqual(4, len(summary))
+ self.assertEqual('file', summary[0])
+ # size must be known
+ self.assertEqual(len('contents'), summary[1])
+ # executable
+ self.assertEqual(True, summary[2])
+ # will not have hash (not cheap to determine)
+ self.assertIs(None, summary[3])
+
+ def test_change_executability(self):
+ tree = self.make_branch_and_tree('tree')
+ self.build_tree(['tree/path'])
+ tree.add('path')
+ preview = TransformPreview(tree)
+ self.addCleanup(preview.finalize)
+ path_id = preview.trans_id_tree_path('path')
+ preview.set_executability(True, path_id)
+ summary = preview.get_preview_tree().path_content_summary('path')
+ self.assertEqual(True, summary[2])
+
+ def test_file_content_summary_non_exec(self):
+ preview = self.get_empty_preview()
+ preview.new_file('path', preview.root, 'contents', 'path-id')
+ summary = preview.get_preview_tree().path_content_summary('path')
+ self.assertEqual(4, len(summary))
+ self.assertEqual('file', summary[0])
+ # size must be known
+ self.assertEqual(len('contents'), summary[1])
+ # not executable
+ self.assertEqual(False, summary[2])
+ # will not have hash (not cheap to determine)
+ self.assertIs(None, summary[3])
+
+ def test_dir_content_summary(self):
+ preview = self.get_empty_preview()
+ preview.new_directory('path', preview.root, 'path-id')
+ summary = preview.get_preview_tree().path_content_summary('path')
+ self.assertEqual(('directory', None, None, None), summary)
+
+ def test_tree_content_summary(self):
+ preview = self.get_empty_preview()
+ path = preview.new_directory('path', preview.root, 'path-id')
+ preview.set_tree_reference('rev-1', path)
+ summary = preview.get_preview_tree().path_content_summary('path')
+ self.assertEqual(4, len(summary))
+ self.assertEqual('tree-reference', summary[0])
+
+ def test_annotate(self):
+ tree = self.make_branch_and_tree('tree')
+ self.build_tree_contents([('tree/file', 'a\n')])
+ tree.add('file', 'file-id')
+ tree.commit('a', rev_id='one')
+ self.build_tree_contents([('tree/file', 'a\nb\n')])
+ preview = TransformPreview(tree)
+ self.addCleanup(preview.finalize)
+ file_trans_id = preview.trans_id_file_id('file-id')
+ preview.delete_contents(file_trans_id)
+ preview.create_file('a\nb\nc\n', file_trans_id)
+ preview_tree = preview.get_preview_tree()
+ expected = [
+ ('one', 'a\n'),
+ ('me:', 'b\n'),
+ ('me:', 'c\n'),
+ ]
+ annotation = preview_tree.annotate_iter('file-id', 'me:')
+ self.assertEqual(expected, annotation)
+
+ def test_annotate_missing(self):
+ preview = self.get_empty_preview()
+ preview.new_file('file', preview.root, 'a\nb\nc\n', 'file-id')
+ preview_tree = preview.get_preview_tree()
+ expected = [
+ ('me:', 'a\n'),
+ ('me:', 'b\n'),
+ ('me:', 'c\n'),
+ ]
+ annotation = preview_tree.annotate_iter('file-id', 'me:')
+ self.assertEqual(expected, annotation)
+
+ def test_annotate_rename(self):
+ tree = self.make_branch_and_tree('tree')
+ self.build_tree_contents([('tree/file', 'a\n')])
+ tree.add('file', 'file-id')
+ tree.commit('a', rev_id='one')
+ preview = TransformPreview(tree)
+ self.addCleanup(preview.finalize)
+ file_trans_id = preview.trans_id_file_id('file-id')
+ preview.adjust_path('newname', preview.root, file_trans_id)
+ preview_tree = preview.get_preview_tree()
+ expected = [
+ ('one', 'a\n'),
+ ]
+ annotation = preview_tree.annotate_iter('file-id', 'me:')
+ self.assertEqual(expected, annotation)
+
+ def test_annotate_deleted(self):
+ tree = self.make_branch_and_tree('tree')
+ self.build_tree_contents([('tree/file', 'a\n')])
+ tree.add('file', 'file-id')
+ tree.commit('a', rev_id='one')
+ self.build_tree_contents([('tree/file', 'a\nb\n')])
+ preview = TransformPreview(tree)
+ self.addCleanup(preview.finalize)
+ file_trans_id = preview.trans_id_file_id('file-id')
+ preview.delete_contents(file_trans_id)
+ preview_tree = preview.get_preview_tree()
+ annotation = preview_tree.annotate_iter('file-id', 'me:')
+ self.assertIs(None, annotation)
+
+ def test_stored_kind(self):
+ preview = self.get_empty_preview()
+ preview.new_file('file', preview.root, 'a\nb\nc\n', 'file-id')
+ preview_tree = preview.get_preview_tree()
+ self.assertEqual('file', preview_tree.stored_kind('file-id'))
+
+ def test_is_executable(self):
+ preview = self.get_empty_preview()
+ preview.new_file('file', preview.root, 'a\nb\nc\n', 'file-id')
+ preview.set_executability(True, preview.trans_id_file_id('file-id'))
+ preview_tree = preview.get_preview_tree()
+ self.assertEqual(True, preview_tree.is_executable('file-id'))
+
+ def test_get_set_parent_ids(self):
+ revision_tree, preview_tree = self.get_tree_and_preview_tree()
+ self.assertEqual([], preview_tree.get_parent_ids())
+ preview_tree.set_parent_ids(['rev-1'])
+ self.assertEqual(['rev-1'], preview_tree.get_parent_ids())
+
+ def test_plan_file_merge(self):
+ work_a = self.make_branch_and_tree('wta')
+ self.build_tree_contents([('wta/file', 'a\nb\nc\nd\n')])
+ work_a.add('file', 'file-id')
+ base_id = work_a.commit('base version')
+ tree_b = work_a.bzrdir.sprout('wtb').open_workingtree()
+ preview = TransformPreview(work_a)
+ self.addCleanup(preview.finalize)
+ trans_id = preview.trans_id_file_id('file-id')
+ preview.delete_contents(trans_id)
+ preview.create_file('b\nc\nd\ne\n', trans_id)
+ self.build_tree_contents([('wtb/file', 'a\nc\nd\nf\n')])
+ tree_a = preview.get_preview_tree()
+ tree_a.set_parent_ids([base_id])
+ self.assertEqual([
+ ('killed-a', 'a\n'),
+ ('killed-b', 'b\n'),
+ ('unchanged', 'c\n'),
+ ('unchanged', 'd\n'),
+ ('new-a', 'e\n'),
+ ('new-b', 'f\n'),
+ ], list(tree_a.plan_file_merge('file-id', tree_b)))
+
+ def test_plan_file_merge_revision_tree(self):
+ work_a = self.make_branch_and_tree('wta')
+ self.build_tree_contents([('wta/file', 'a\nb\nc\nd\n')])
+ work_a.add('file', 'file-id')
+ base_id = work_a.commit('base version')
+ tree_b = work_a.bzrdir.sprout('wtb').open_workingtree()
+ preview = TransformPreview(work_a.basis_tree())
+ self.addCleanup(preview.finalize)
+ trans_id = preview.trans_id_file_id('file-id')
+ preview.delete_contents(trans_id)
+ preview.create_file('b\nc\nd\ne\n', trans_id)
+ self.build_tree_contents([('wtb/file', 'a\nc\nd\nf\n')])
+ tree_a = preview.get_preview_tree()
+ tree_a.set_parent_ids([base_id])
+ self.assertEqual([
+ ('killed-a', 'a\n'),
+ ('killed-b', 'b\n'),
+ ('unchanged', 'c\n'),
+ ('unchanged', 'd\n'),
+ ('new-a', 'e\n'),
+ ('new-b', 'f\n'),
+ ], list(tree_a.plan_file_merge('file-id', tree_b)))
+
+ def test_walkdirs(self):
+ preview = self.get_empty_preview()
+ root = preview.new_directory('', ROOT_PARENT, 'tree-root')
+ # FIXME: new_directory should mark root.
+ preview.fixup_new_roots()
+ preview_tree = preview.get_preview_tree()
+ file_trans_id = preview.new_file('a', preview.root, 'contents',
+ 'a-id')
+ expected = [(('', 'tree-root'),
+ [('a', 'a', 'file', None, 'a-id', 'file')])]
+ self.assertEqual(expected, list(preview_tree.walkdirs()))
+
+ def test_extras(self):
+ work_tree = self.make_branch_and_tree('tree')
+ self.build_tree(['tree/removed-file', 'tree/existing-file',
+ 'tree/not-removed-file'])
+ work_tree.add(['removed-file', 'not-removed-file'])
+ preview = TransformPreview(work_tree)
+ self.addCleanup(preview.finalize)
+ preview.new_file('new-file', preview.root, 'contents')
+ preview.new_file('new-versioned-file', preview.root, 'contents',
+ 'new-versioned-id')
+ tree = preview.get_preview_tree()
+ preview.unversion_file(preview.trans_id_tree_path('removed-file'))
+ self.assertEqual(set(['new-file', 'removed-file', 'existing-file']),
+ set(tree.extras()))
+
+ def test_merge_into_preview(self):
+ work_tree = self.make_branch_and_tree('tree')
+ self.build_tree_contents([('tree/file','b\n')])
+ work_tree.add('file', 'file-id')
+ work_tree.commit('first commit')
+ child_tree = work_tree.bzrdir.sprout('child').open_workingtree()
+ self.build_tree_contents([('child/file','b\nc\n')])
+ child_tree.commit('child commit')
+ child_tree.lock_write()
+ self.addCleanup(child_tree.unlock)
+ work_tree.lock_write()
+ self.addCleanup(work_tree.unlock)
+ preview = TransformPreview(work_tree)
+ self.addCleanup(preview.finalize)
+ file_trans_id = preview.trans_id_file_id('file-id')
+ preview.delete_contents(file_trans_id)
+ preview.create_file('a\nb\n', file_trans_id)
+ preview_tree = preview.get_preview_tree()
+ merger = Merger.from_revision_ids(None, preview_tree,
+ child_tree.branch.last_revision(),
+ other_branch=child_tree.branch,
+ tree_branch=work_tree.branch)
+ merger.merge_type = Merge3Merger
+ tt = merger.make_merger().make_preview_transform()
+ self.addCleanup(tt.finalize)
+ final_tree = tt.get_preview_tree()
+ self.assertEqual('a\nb\nc\n', final_tree.get_file_text('file-id'))
+
+ def test_merge_preview_into_workingtree(self):
+ tree = self.make_branch_and_tree('tree')
+ tree.set_root_id('TREE_ROOT')
+ tt = TransformPreview(tree)
+ self.addCleanup(tt.finalize)
+ tt.new_file('name', tt.root, 'content', 'file-id')
+ tree2 = self.make_branch_and_tree('tree2')
+ tree2.set_root_id('TREE_ROOT')
+ merger = Merger.from_uncommitted(tree2, tt.get_preview_tree(),
+ None, tree.basis_tree())
+ merger.merge_type = Merge3Merger
+ merger.do_merge()
+
+ def test_merge_preview_into_workingtree_handles_conflicts(self):
+ tree = self.make_branch_and_tree('tree')
+ self.build_tree_contents([('tree/foo', 'bar')])
+ tree.add('foo', 'foo-id')
+ tree.commit('foo')
+ tt = TransformPreview(tree)
+ self.addCleanup(tt.finalize)
+ trans_id = tt.trans_id_file_id('foo-id')
+ tt.delete_contents(trans_id)
+ tt.create_file('baz', trans_id)
+ tree2 = tree.bzrdir.sprout('tree2').open_workingtree()
+ self.build_tree_contents([('tree2/foo', 'qux')])
+ pb = None
+ merger = Merger.from_uncommitted(tree2, tt.get_preview_tree(),
+ pb, tree.basis_tree())
+ merger.merge_type = Merge3Merger
+ merger.do_merge()
+
+ def test_has_filename(self):
+ wt = self.make_branch_and_tree('tree')
+ self.build_tree(['tree/unmodified', 'tree/removed', 'tree/modified'])
+ tt = TransformPreview(wt)
+ removed_id = tt.trans_id_tree_path('removed')
+ tt.delete_contents(removed_id)
+ tt.new_file('new', tt.root, 'contents')
+ modified_id = tt.trans_id_tree_path('modified')
+ tt.delete_contents(modified_id)
+ tt.create_file('modified-contents', modified_id)
+ self.addCleanup(tt.finalize)
+ tree = tt.get_preview_tree()
+ self.assertTrue(tree.has_filename('unmodified'))
+ self.assertFalse(tree.has_filename('not-present'))
+ self.assertFalse(tree.has_filename('removed'))
+ self.assertTrue(tree.has_filename('new'))
+ self.assertTrue(tree.has_filename('modified'))
+
+ def test_is_executable(self):
+ tree = self.make_branch_and_tree('tree')
+ preview = TransformPreview(tree)
+ self.addCleanup(preview.finalize)
+ preview.new_file('foo', preview.root, 'bar', 'baz-id')
+ preview_tree = preview.get_preview_tree()
+ self.assertEqual(False, preview_tree.is_executable('baz-id',
+ 'tree/foo'))
+ self.assertEqual(False, preview_tree.is_executable('baz-id'))
+
+ def test_commit_preview_tree(self):
+ tree = self.make_branch_and_tree('tree')
+ rev_id = tree.commit('rev1')
+ tree.branch.lock_write()
+ self.addCleanup(tree.branch.unlock)
+ tt = TransformPreview(tree)
+ tt.new_file('file', tt.root, 'contents', 'file_id')
+ self.addCleanup(tt.finalize)
+ preview = tt.get_preview_tree()
+ preview.set_parent_ids([rev_id])
+ builder = tree.branch.get_commit_builder([rev_id])
+ list(builder.record_iter_changes(preview, rev_id, tt.iter_changes()))
+ builder.finish_inventory()
+ rev2_id = builder.commit('rev2')
+ rev2_tree = tree.branch.repository.revision_tree(rev2_id)
+ self.assertEqual('contents', rev2_tree.get_file_text('file_id'))
+
+ def test_ascii_limbo_paths(self):
+ self.requireFeature(features.UnicodeFilenameFeature)
+ branch = self.make_branch('any')
+ tree = branch.repository.revision_tree(_mod_revision.NULL_REVISION)
+ tt = TransformPreview(tree)
+ self.addCleanup(tt.finalize)
+ foo_id = tt.new_directory('', ROOT_PARENT)
+ bar_id = tt.new_file(u'\u1234bar', foo_id, 'contents')
+ limbo_path = tt._limbo_name(bar_id)
+ self.assertEqual(limbo_path.encode('ascii', 'replace'), limbo_path)
+
+
+class FakeSerializer(object):
+ """Serializer implementation that simply returns the input.
+
+ The input is returned in the order used by pack.ContainerPushParser.
+ """
+ @staticmethod
+ def bytes_record(bytes, names):
+ return names, bytes
+
+
+class TestSerializeTransform(tests.TestCaseWithTransport):
+
+ _test_needs_features = [features.UnicodeFilenameFeature]
+
+ def get_preview(self, tree=None):
+ if tree is None:
+ tree = self.make_branch_and_tree('tree')
+ tt = TransformPreview(tree)
+ self.addCleanup(tt.finalize)
+ return tt
+
+ def assertSerializesTo(self, expected, tt):
+ records = list(tt.serialize(FakeSerializer()))
+ self.assertEqual(expected, records)
+
+ @staticmethod
+ def default_attribs():
+ return {
+ '_id_number': 1,
+ '_new_name': {},
+ '_new_parent': {},
+ '_new_executability': {},
+ '_new_id': {},
+ '_tree_path_ids': {'': 'new-0'},
+ '_removed_id': [],
+ '_removed_contents': [],
+ '_non_present_ids': {},
+ }
+
+ def make_records(self, attribs, contents):
+ records = [
+ (((('attribs'),),), bencode.bencode(attribs))]
+ records.extend([(((n, k),), c) for n, k, c in contents])
+ return records
+
+ def creation_records(self):
+ attribs = self.default_attribs()
+ attribs['_id_number'] = 3
+ attribs['_new_name'] = {
+ 'new-1': u'foo\u1234'.encode('utf-8'), 'new-2': 'qux'}
+ attribs['_new_id'] = {'new-1': 'baz', 'new-2': 'quxx'}
+ attribs['_new_parent'] = {'new-1': 'new-0', 'new-2': 'new-0'}
+ attribs['_new_executability'] = {'new-1': 1}
+ contents = [
+ ('new-1', 'file', 'i 1\nbar\n'),
+ ('new-2', 'directory', ''),
+ ]
+ return self.make_records(attribs, contents)
+
+ def test_serialize_creation(self):
+ tt = self.get_preview()
+ tt.new_file(u'foo\u1234', tt.root, 'bar', 'baz', True)
+ tt.new_directory('qux', tt.root, 'quxx')
+ self.assertSerializesTo(self.creation_records(), tt)
+
+ def test_deserialize_creation(self):
+ tt = self.get_preview()
+ tt.deserialize(iter(self.creation_records()))
+ self.assertEqual(3, tt._id_number)
+ self.assertEqual({'new-1': u'foo\u1234',
+ 'new-2': 'qux'}, tt._new_name)
+ self.assertEqual({'new-1': 'baz', 'new-2': 'quxx'}, tt._new_id)
+ self.assertEqual({'new-1': tt.root, 'new-2': tt.root}, tt._new_parent)
+ self.assertEqual({'baz': 'new-1', 'quxx': 'new-2'}, tt._r_new_id)
+ self.assertEqual({'new-1': True}, tt._new_executability)
+ self.assertEqual({'new-1': 'file',
+ 'new-2': 'directory'}, tt._new_contents)
+ foo_limbo = open(tt._limbo_name('new-1'), 'rb')
+ try:
+ foo_content = foo_limbo.read()
+ finally:
+ foo_limbo.close()
+ self.assertEqual('bar', foo_content)
+
+ def symlink_creation_records(self):
+ attribs = self.default_attribs()
+ attribs['_id_number'] = 2
+ attribs['_new_name'] = {'new-1': u'foo\u1234'.encode('utf-8')}
+ attribs['_new_parent'] = {'new-1': 'new-0'}
+ contents = [('new-1', 'symlink', u'bar\u1234'.encode('utf-8'))]
+ return self.make_records(attribs, contents)
+
+ def test_serialize_symlink_creation(self):
+ self.requireFeature(features.SymlinkFeature)
+ tt = self.get_preview()
+ tt.new_symlink(u'foo\u1234', tt.root, u'bar\u1234')
+ self.assertSerializesTo(self.symlink_creation_records(), tt)
+
+ def test_deserialize_symlink_creation(self):
+ self.requireFeature(features.SymlinkFeature)
+ tt = self.get_preview()
+ tt.deserialize(iter(self.symlink_creation_records()))
+ abspath = tt._limbo_name('new-1')
+ foo_content = osutils.readlink(abspath)
+ self.assertEqual(u'bar\u1234', foo_content)
+
+ def make_destruction_preview(self):
+ tree = self.make_branch_and_tree('.')
+ self.build_tree([u'foo\u1234', 'bar'])
+ tree.add([u'foo\u1234', 'bar'], ['foo-id', 'bar-id'])
+ return self.get_preview(tree)
+
+ def destruction_records(self):
+ attribs = self.default_attribs()
+ attribs['_id_number'] = 3
+ attribs['_removed_id'] = ['new-1']
+ attribs['_removed_contents'] = ['new-2']
+ attribs['_tree_path_ids'] = {
+ '': 'new-0',
+ u'foo\u1234'.encode('utf-8'): 'new-1',
+ 'bar': 'new-2',
+ }
+ return self.make_records(attribs, [])
+
+ def test_serialize_destruction(self):
+ tt = self.make_destruction_preview()
+ foo_trans_id = tt.trans_id_tree_file_id('foo-id')
+ tt.unversion_file(foo_trans_id)
+ bar_trans_id = tt.trans_id_tree_file_id('bar-id')
+ tt.delete_contents(bar_trans_id)
+ self.assertSerializesTo(self.destruction_records(), tt)
+
+ def test_deserialize_destruction(self):
+ tt = self.make_destruction_preview()
+ tt.deserialize(iter(self.destruction_records()))
+ self.assertEqual({u'foo\u1234': 'new-1',
+ 'bar': 'new-2',
+ '': tt.root}, tt._tree_path_ids)
+ self.assertEqual({'new-1': u'foo\u1234',
+ 'new-2': 'bar',
+ tt.root: ''}, tt._tree_id_paths)
+ self.assertEqual(set(['new-1']), tt._removed_id)
+ self.assertEqual(set(['new-2']), tt._removed_contents)
+
+ def missing_records(self):
+ attribs = self.default_attribs()
+ attribs['_id_number'] = 2
+ attribs['_non_present_ids'] = {
+ 'boo': 'new-1',}
+ return self.make_records(attribs, [])
+
+ def test_serialize_missing(self):
+ tt = self.get_preview()
+ boo_trans_id = tt.trans_id_file_id('boo')
+ self.assertSerializesTo(self.missing_records(), tt)
+
+ def test_deserialize_missing(self):
+ tt = self.get_preview()
+ tt.deserialize(iter(self.missing_records()))
+ self.assertEqual({'boo': 'new-1'}, tt._non_present_ids)
+
+ def make_modification_preview(self):
+ LINES_ONE = 'aa\nbb\ncc\ndd\n'
+ LINES_TWO = 'z\nbb\nx\ndd\n'
+ tree = self.make_branch_and_tree('tree')
+ self.build_tree_contents([('tree/file', LINES_ONE)])
+ tree.add('file', 'file-id')
+ return self.get_preview(tree), LINES_TWO
+
+ def modification_records(self):
+ attribs = self.default_attribs()
+ attribs['_id_number'] = 2
+ attribs['_tree_path_ids'] = {
+ 'file': 'new-1',
+ '': 'new-0',}
+ attribs['_removed_contents'] = ['new-1']
+ contents = [('new-1', 'file',
+ 'i 1\nz\n\nc 0 1 1 1\ni 1\nx\n\nc 0 3 3 1\n')]
+ return self.make_records(attribs, contents)
+
+ def test_serialize_modification(self):
+ tt, LINES = self.make_modification_preview()
+ trans_id = tt.trans_id_file_id('file-id')
+ tt.delete_contents(trans_id)
+ tt.create_file(LINES, trans_id)
+ self.assertSerializesTo(self.modification_records(), tt)
+
+ def test_deserialize_modification(self):
+ tt, LINES = self.make_modification_preview()
+ tt.deserialize(iter(self.modification_records()))
+ self.assertFileEqual(LINES, tt._limbo_name('new-1'))
+
+ def make_kind_change_preview(self):
+ LINES = 'a\nb\nc\nd\n'
+ tree = self.make_branch_and_tree('tree')
+ self.build_tree(['tree/foo/'])
+ tree.add('foo', 'foo-id')
+ return self.get_preview(tree), LINES
+
+ def kind_change_records(self):
+ attribs = self.default_attribs()
+ attribs['_id_number'] = 2
+ attribs['_tree_path_ids'] = {
+ 'foo': 'new-1',
+ '': 'new-0',}
+ attribs['_removed_contents'] = ['new-1']
+ contents = [('new-1', 'file',
+ 'i 4\na\nb\nc\nd\n\n')]
+ return self.make_records(attribs, contents)
+
+ def test_serialize_kind_change(self):
+ tt, LINES = self.make_kind_change_preview()
+ trans_id = tt.trans_id_file_id('foo-id')
+ tt.delete_contents(trans_id)
+ tt.create_file(LINES, trans_id)
+ self.assertSerializesTo(self.kind_change_records(), tt)
+
+ def test_deserialize_kind_change(self):
+ tt, LINES = self.make_kind_change_preview()
+ tt.deserialize(iter(self.kind_change_records()))
+ self.assertFileEqual(LINES, tt._limbo_name('new-1'))
+
+ def make_add_contents_preview(self):
+ LINES = 'a\nb\nc\nd\n'
+ tree = self.make_branch_and_tree('tree')
+ self.build_tree(['tree/foo'])
+ tree.add('foo')
+ os.unlink('tree/foo')
+ return self.get_preview(tree), LINES
+
+ def add_contents_records(self):
+ attribs = self.default_attribs()
+ attribs['_id_number'] = 2
+ attribs['_tree_path_ids'] = {
+ 'foo': 'new-1',
+ '': 'new-0',}
+ contents = [('new-1', 'file',
+ 'i 4\na\nb\nc\nd\n\n')]
+ return self.make_records(attribs, contents)
+
+ def test_serialize_add_contents(self):
+ tt, LINES = self.make_add_contents_preview()
+ trans_id = tt.trans_id_tree_path('foo')
+ tt.create_file(LINES, trans_id)
+ self.assertSerializesTo(self.add_contents_records(), tt)
+
+ def test_deserialize_add_contents(self):
+ tt, LINES = self.make_add_contents_preview()
+ tt.deserialize(iter(self.add_contents_records()))
+ self.assertFileEqual(LINES, tt._limbo_name('new-1'))
+
+ def test_get_parents_lines(self):
+ LINES_ONE = 'aa\nbb\ncc\ndd\n'
+ LINES_TWO = 'z\nbb\nx\ndd\n'
+ tree = self.make_branch_and_tree('tree')
+ self.build_tree_contents([('tree/file', LINES_ONE)])
+ tree.add('file', 'file-id')
+ tt = self.get_preview(tree)
+ trans_id = tt.trans_id_tree_path('file')
+ self.assertEqual((['aa\n', 'bb\n', 'cc\n', 'dd\n'],),
+ tt._get_parents_lines(trans_id))
+
+ def test_get_parents_texts(self):
+ LINES_ONE = 'aa\nbb\ncc\ndd\n'
+ LINES_TWO = 'z\nbb\nx\ndd\n'
+ tree = self.make_branch_and_tree('tree')
+ self.build_tree_contents([('tree/file', LINES_ONE)])
+ tree.add('file', 'file-id')
+ tt = self.get_preview(tree)
+ trans_id = tt.trans_id_tree_path('file')
+ self.assertEqual((LINES_ONE,),
+ tt._get_parents_texts(trans_id))
+
+
+class TestOrphan(tests.TestCaseWithTransport):
+
+ def test_no_orphan_for_transform_preview(self):
+ tree = self.make_branch_and_tree('tree')
+ tt = transform.TransformPreview(tree)
+ self.addCleanup(tt.finalize)
+ self.assertRaises(NotImplementedError, tt.new_orphan, 'foo', 'bar')
+
+ def _set_orphan_policy(self, wt, policy):
+ wt.branch.get_config_stack().set('bzr.transform.orphan_policy',
+ policy)
+
+ def _prepare_orphan(self, wt):
+ self.build_tree(['dir/', 'dir/file', 'dir/foo'])
+ wt.add(['dir', 'dir/file'], ['dir-id', 'file-id'])
+ wt.commit('add dir and file ignoring foo')
+ tt = transform.TreeTransform(wt)
+ self.addCleanup(tt.finalize)
+ # dir and bar are deleted
+ dir_tid = tt.trans_id_tree_path('dir')
+ file_tid = tt.trans_id_tree_path('dir/file')
+ orphan_tid = tt.trans_id_tree_path('dir/foo')
+ tt.delete_contents(file_tid)
+ tt.unversion_file(file_tid)
+ tt.delete_contents(dir_tid)
+ tt.unversion_file(dir_tid)
+ # There should be a conflict because dir still contain foo
+ raw_conflicts = tt.find_conflicts()
+ self.assertLength(1, raw_conflicts)
+ self.assertEqual(('missing parent', 'new-1'), raw_conflicts[0])
+ return tt, orphan_tid
+
+ def test_new_orphan_created(self):
+ wt = self.make_branch_and_tree('.')
+ self._set_orphan_policy(wt, 'move')
+ tt, orphan_tid = self._prepare_orphan(wt)
+ warnings = []
+ def warning(*args):
+ warnings.append(args[0] % args[1:])
+ self.overrideAttr(trace, 'warning', warning)
+ remaining_conflicts = resolve_conflicts(tt)
+ self.assertEquals(['dir/foo has been orphaned in bzr-orphans'],
+ warnings)
+ # Yeah for resolved conflicts !
+ self.assertLength(0, remaining_conflicts)
+ # We have a new orphan
+ self.assertEquals('foo.~1~', tt.final_name(orphan_tid))
+ self.assertEquals('bzr-orphans',
+ tt.final_name(tt.final_parent(orphan_tid)))
+
+ def test_never_orphan(self):
+ wt = self.make_branch_and_tree('.')
+ self._set_orphan_policy(wt, 'conflict')
+ tt, orphan_tid = self._prepare_orphan(wt)
+ remaining_conflicts = resolve_conflicts(tt)
+ self.assertLength(1, remaining_conflicts)
+ self.assertEqual(('deleting parent', 'Not deleting', 'new-1'),
+ remaining_conflicts.pop())
+
+ def test_orphan_error(self):
+ def bogus_orphan(tt, orphan_id, parent_id):
+ raise transform.OrphaningError(tt.final_name(orphan_id),
+ tt.final_name(parent_id))
+ transform.orphaning_registry.register('bogus', bogus_orphan,
+ 'Raise an error when orphaning')
+ wt = self.make_branch_and_tree('.')
+ self._set_orphan_policy(wt, 'bogus')
+ tt, orphan_tid = self._prepare_orphan(wt)
+ remaining_conflicts = resolve_conflicts(tt)
+ self.assertLength(1, remaining_conflicts)
+ self.assertEqual(('deleting parent', 'Not deleting', 'new-1'),
+ remaining_conflicts.pop())
+
+ def test_unknown_orphan_policy(self):
+ wt = self.make_branch_and_tree('.')
+ # Set a fictional policy nobody ever implemented
+ self._set_orphan_policy(wt, 'donttouchmypreciouuus')
+ tt, orphan_tid = self._prepare_orphan(wt)
+ warnings = []
+ def warning(*args):
+ warnings.append(args[0] % args[1:])
+ self.overrideAttr(trace, 'warning', warning)
+ remaining_conflicts = resolve_conflicts(tt)
+ # We fallback to the default policy which create a conflict
+ self.assertLength(1, remaining_conflicts)
+ self.assertEqual(('deleting parent', 'Not deleting', 'new-1'),
+ remaining_conflicts.pop())
+ self.assertLength(1, warnings)
+ self.assertStartsWith(warnings[0], 'Value "donttouchmypreciouuus" ')
+
+
+class TestTransformHooks(tests.TestCaseWithTransport):
+
+ def setUp(self):
+ super(TestTransformHooks, self).setUp()
+ self.wt = self.make_branch_and_tree('.')
+ os.chdir('..')
+
+ def get_transform(self):
+ transform = TreeTransform(self.wt)
+ self.addCleanup(transform.finalize)
+ return transform, transform.root
+
+ def test_pre_commit_hooks(self):
+ calls = []
+ def record_pre_transform(tree, tt):
+ calls.append((tree, tt))
+ MutableTree.hooks.install_named_hook('pre_transform',
+ record_pre_transform, "Pre transform")
+ transform, root = self.get_transform()
+ old_root_id = transform.tree_file_id(root)
+ transform.apply()
+ self.assertEqual(old_root_id, self.wt.get_root_id())
+ self.assertEquals([(self.wt, transform)], calls)
+
+ def test_post_commit_hooks(self):
+ calls = []
+ def record_post_transform(tree, tt):
+ calls.append((tree, tt))
+ MutableTree.hooks.install_named_hook('post_transform',
+ record_post_transform, "Post transform")
+ transform, root = self.get_transform()
+ old_root_id = transform.tree_file_id(root)
+ transform.apply()
+ self.assertEqual(old_root_id, self.wt.get_root_id())
+ self.assertEquals([(self.wt, transform)], calls)
diff --git a/bzrlib/tests/test_transport.py b/bzrlib/tests/test_transport.py
new file mode 100644
index 0000000..7f1e4fa
--- /dev/null
+++ b/bzrlib/tests/test_transport.py
@@ -0,0 +1,1147 @@
+# Copyright (C) 2005-2011 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+
+from cStringIO import StringIO
+import errno
+import os
+import subprocess
+import sys
+import threading
+
+from bzrlib import (
+ errors,
+ osutils,
+ tests,
+ transport,
+ urlutils,
+ )
+from bzrlib.directory_service import directories
+from bzrlib.transport import (
+ chroot,
+ fakenfs,
+ http,
+ local,
+ location_to_url,
+ memory,
+ pathfilter,
+ readonly,
+ )
+import bzrlib.transport.trace
+from bzrlib.tests import (
+ features,
+ test_server,
+ )
+
+
+# TODO: Should possibly split transport-specific tests into their own files.
+
+
+class TestTransport(tests.TestCase):
+ """Test the non transport-concrete class functionality."""
+
+ def test__get_set_protocol_handlers(self):
+ handlers = transport._get_protocol_handlers()
+ self.assertNotEqual([], handlers.keys())
+ transport._clear_protocol_handlers()
+ self.addCleanup(transport._set_protocol_handlers, handlers)
+ self.assertEqual([], transport._get_protocol_handlers().keys())
+
+ def test_get_transport_modules(self):
+ handlers = transport._get_protocol_handlers()
+ self.addCleanup(transport._set_protocol_handlers, handlers)
+ # don't pollute the current handlers
+ transport._clear_protocol_handlers()
+
+ class SampleHandler(object):
+ """I exist, isnt that enough?"""
+ transport._clear_protocol_handlers()
+ transport.register_transport_proto('foo')
+ transport.register_lazy_transport('foo',
+ 'bzrlib.tests.test_transport',
+ 'TestTransport.SampleHandler')
+ transport.register_transport_proto('bar')
+ transport.register_lazy_transport('bar',
+ 'bzrlib.tests.test_transport',
+ 'TestTransport.SampleHandler')
+ self.assertEqual([SampleHandler.__module__,
+ 'bzrlib.transport.chroot',
+ 'bzrlib.transport.pathfilter'],
+ transport._get_transport_modules())
+
+ def test_transport_dependency(self):
+ """Transport with missing dependency causes no error"""
+ saved_handlers = transport._get_protocol_handlers()
+ self.addCleanup(transport._set_protocol_handlers, saved_handlers)
+ # don't pollute the current handlers
+ transport._clear_protocol_handlers()
+ transport.register_transport_proto('foo')
+ transport.register_lazy_transport(
+ 'foo', 'bzrlib.tests.test_transport', 'BadTransportHandler')
+ try:
+ transport.get_transport_from_url('foo://fooserver/foo')
+ except errors.UnsupportedProtocol, e:
+ e_str = str(e)
+ self.assertEquals('Unsupported protocol'
+ ' for url "foo://fooserver/foo":'
+ ' Unable to import library "some_lib":'
+ ' testing missing dependency', str(e))
+ else:
+ self.fail('Did not raise UnsupportedProtocol')
+
+ def test_transport_fallback(self):
+ """Transport with missing dependency causes no error"""
+ saved_handlers = transport._get_protocol_handlers()
+ self.addCleanup(transport._set_protocol_handlers, saved_handlers)
+ transport._clear_protocol_handlers()
+ transport.register_transport_proto('foo')
+ transport.register_lazy_transport(
+ 'foo', 'bzrlib.tests.test_transport', 'BackupTransportHandler')
+ transport.register_lazy_transport(
+ 'foo', 'bzrlib.tests.test_transport', 'BadTransportHandler')
+ t = transport.get_transport_from_url('foo://fooserver/foo')
+ self.assertTrue(isinstance(t, BackupTransportHandler))
+
+ def test_ssh_hints(self):
+ """Transport ssh:// should raise an error pointing out bzr+ssh://"""
+ try:
+ transport.get_transport_from_url('ssh://fooserver/foo')
+ except errors.UnsupportedProtocol, e:
+ e_str = str(e)
+ self.assertEquals('Unsupported protocol'
+ ' for url "ssh://fooserver/foo":'
+ ' bzr supports bzr+ssh to operate over ssh,'
+ ' use "bzr+ssh://fooserver/foo".',
+ str(e))
+ else:
+ self.fail('Did not raise UnsupportedProtocol')
+
+ def test_LateReadError(self):
+ """The LateReadError helper should raise on read()."""
+ a_file = transport.LateReadError('a path')
+ try:
+ a_file.read()
+ except errors.ReadError, error:
+ self.assertEqual('a path', error.path)
+ self.assertRaises(errors.ReadError, a_file.read, 40)
+ a_file.close()
+
+ def test_local_abspath_non_local_transport(self):
+ # the base implementation should throw
+ t = memory.MemoryTransport()
+ e = self.assertRaises(errors.NotLocalUrl, t.local_abspath, 't')
+ self.assertEqual('memory:///t is not a local path.', str(e))
+
+
+class TestCoalesceOffsets(tests.TestCase):
+
+ def check(self, expected, offsets, limit=0, max_size=0, fudge=0):
+ coalesce = transport.Transport._coalesce_offsets
+ exp = [transport._CoalescedOffset(*x) for x in expected]
+ out = list(coalesce(offsets, limit=limit, fudge_factor=fudge,
+ max_size=max_size))
+ self.assertEqual(exp, out)
+
+ def test_coalesce_empty(self):
+ self.check([], [])
+
+ def test_coalesce_simple(self):
+ self.check([(0, 10, [(0, 10)])], [(0, 10)])
+
+ def test_coalesce_unrelated(self):
+ self.check([(0, 10, [(0, 10)]),
+ (20, 10, [(0, 10)]),
+ ], [(0, 10), (20, 10)])
+
+ def test_coalesce_unsorted(self):
+ self.check([(20, 10, [(0, 10)]),
+ (0, 10, [(0, 10)]),
+ ], [(20, 10), (0, 10)])
+
+ def test_coalesce_nearby(self):
+ self.check([(0, 20, [(0, 10), (10, 10)])],
+ [(0, 10), (10, 10)])
+
+ def test_coalesce_overlapped(self):
+ self.assertRaises(ValueError,
+ self.check, [(0, 15, [(0, 10), (5, 10)])],
+ [(0, 10), (5, 10)])
+
+ def test_coalesce_limit(self):
+ self.check([(10, 50, [(0, 10), (10, 10), (20, 10),
+ (30, 10), (40, 10)]),
+ (60, 50, [(0, 10), (10, 10), (20, 10),
+ (30, 10), (40, 10)]),
+ ], [(10, 10), (20, 10), (30, 10), (40, 10),
+ (50, 10), (60, 10), (70, 10), (80, 10),
+ (90, 10), (100, 10)],
+ limit=5)
+
+ def test_coalesce_no_limit(self):
+ self.check([(10, 100, [(0, 10), (10, 10), (20, 10),
+ (30, 10), (40, 10), (50, 10),
+ (60, 10), (70, 10), (80, 10),
+ (90, 10)]),
+ ], [(10, 10), (20, 10), (30, 10), (40, 10),
+ (50, 10), (60, 10), (70, 10), (80, 10),
+ (90, 10), (100, 10)])
+
+ def test_coalesce_fudge(self):
+ self.check([(10, 30, [(0, 10), (20, 10)]),
+ (100, 10, [(0, 10)]),
+ ], [(10, 10), (30, 10), (100, 10)],
+ fudge=10)
+
+ def test_coalesce_max_size(self):
+ self.check([(10, 20, [(0, 10), (10, 10)]),
+ (30, 50, [(0, 50)]),
+ # If one range is above max_size, it gets its own coalesced
+ # offset
+ (100, 80, [(0, 80)]),],
+ [(10, 10), (20, 10), (30, 50), (100, 80)],
+ max_size=50)
+
+ def test_coalesce_no_max_size(self):
+ self.check([(10, 170, [(0, 10), (10, 10), (20, 50), (70, 100)])],
+ [(10, 10), (20, 10), (30, 50), (80, 100)],
+ )
+
+ def test_coalesce_default_limit(self):
+ # By default we use a 100MB max size.
+ ten_mb = 10 * 1024 * 1024
+ self.check([(0, 10 * ten_mb, [(i * ten_mb, ten_mb) for i in range(10)]),
+ (10*ten_mb, ten_mb, [(0, ten_mb)])],
+ [(i*ten_mb, ten_mb) for i in range(11)])
+ self.check([(0, 11 * ten_mb, [(i * ten_mb, ten_mb) for i in range(11)])],
+ [(i * ten_mb, ten_mb) for i in range(11)],
+ max_size=1*1024*1024*1024)
+
+
+class TestMemoryServer(tests.TestCase):
+
+ def test_create_server(self):
+ server = memory.MemoryServer()
+ server.start_server()
+ url = server.get_url()
+ self.assertTrue(url in transport.transport_list_registry)
+ t = transport.get_transport_from_url(url)
+ del t
+ server.stop_server()
+ self.assertFalse(url in transport.transport_list_registry)
+ self.assertRaises(errors.UnsupportedProtocol,
+ transport.get_transport, url)
+
+
+class TestMemoryTransport(tests.TestCase):
+
+ def test_get_transport(self):
+ memory.MemoryTransport()
+
+ def test_clone(self):
+ t = memory.MemoryTransport()
+ self.assertTrue(isinstance(t, memory.MemoryTransport))
+ self.assertEqual("memory:///", t.clone("/").base)
+
+ def test_abspath(self):
+ t = memory.MemoryTransport()
+ self.assertEqual("memory:///relpath", t.abspath('relpath'))
+
+ def test_abspath_of_root(self):
+ t = memory.MemoryTransport()
+ self.assertEqual("memory:///", t.base)
+ self.assertEqual("memory:///", t.abspath('/'))
+
+ def test_abspath_of_relpath_starting_at_root(self):
+ t = memory.MemoryTransport()
+ self.assertEqual("memory:///foo", t.abspath('/foo'))
+
+ def test_append_and_get(self):
+ t = memory.MemoryTransport()
+ t.append_bytes('path', 'content')
+ self.assertEqual(t.get('path').read(), 'content')
+ t.append_file('path', StringIO('content'))
+ self.assertEqual(t.get('path').read(), 'contentcontent')
+
+ def test_put_and_get(self):
+ t = memory.MemoryTransport()
+ t.put_file('path', StringIO('content'))
+ self.assertEqual(t.get('path').read(), 'content')
+ t.put_bytes('path', 'content')
+ self.assertEqual(t.get('path').read(), 'content')
+
+ def test_append_without_dir_fails(self):
+ t = memory.MemoryTransport()
+ self.assertRaises(errors.NoSuchFile,
+ t.append_bytes, 'dir/path', 'content')
+
+ def test_put_without_dir_fails(self):
+ t = memory.MemoryTransport()
+ self.assertRaises(errors.NoSuchFile,
+ t.put_file, 'dir/path', StringIO('content'))
+
+ def test_get_missing(self):
+ transport = memory.MemoryTransport()
+ self.assertRaises(errors.NoSuchFile, transport.get, 'foo')
+
+ def test_has_missing(self):
+ t = memory.MemoryTransport()
+ self.assertEquals(False, t.has('foo'))
+
+ def test_has_present(self):
+ t = memory.MemoryTransport()
+ t.append_bytes('foo', 'content')
+ self.assertEquals(True, t.has('foo'))
+
+ def test_list_dir(self):
+ t = memory.MemoryTransport()
+ t.put_bytes('foo', 'content')
+ t.mkdir('dir')
+ t.put_bytes('dir/subfoo', 'content')
+ t.put_bytes('dirlike', 'content')
+
+ self.assertEquals(['dir', 'dirlike', 'foo'], sorted(t.list_dir('.')))
+ self.assertEquals(['subfoo'], sorted(t.list_dir('dir')))
+
+ def test_mkdir(self):
+ t = memory.MemoryTransport()
+ t.mkdir('dir')
+ t.append_bytes('dir/path', 'content')
+ self.assertEqual(t.get('dir/path').read(), 'content')
+
+ def test_mkdir_missing_parent(self):
+ t = memory.MemoryTransport()
+ self.assertRaises(errors.NoSuchFile, t.mkdir, 'dir/dir')
+
+ def test_mkdir_twice(self):
+ t = memory.MemoryTransport()
+ t.mkdir('dir')
+ self.assertRaises(errors.FileExists, t.mkdir, 'dir')
+
+ def test_parameters(self):
+ t = memory.MemoryTransport()
+ self.assertEqual(True, t.listable())
+ self.assertEqual(False, t.is_readonly())
+
+ def test_iter_files_recursive(self):
+ t = memory.MemoryTransport()
+ t.mkdir('dir')
+ t.put_bytes('dir/foo', 'content')
+ t.put_bytes('dir/bar', 'content')
+ t.put_bytes('bar', 'content')
+ paths = set(t.iter_files_recursive())
+ self.assertEqual(set(['dir/foo', 'dir/bar', 'bar']), paths)
+
+ def test_stat(self):
+ t = memory.MemoryTransport()
+ t.put_bytes('foo', 'content')
+ t.put_bytes('bar', 'phowar')
+ self.assertEqual(7, t.stat('foo').st_size)
+ self.assertEqual(6, t.stat('bar').st_size)
+
+
+class ChrootDecoratorTransportTest(tests.TestCase):
+ """Chroot decoration specific tests."""
+
+ def test_abspath(self):
+ # The abspath is always relative to the chroot_url.
+ server = chroot.ChrootServer(
+ transport.get_transport_from_url('memory:///foo/bar/'))
+ self.start_server(server)
+ t = transport.get_transport_from_url(server.get_url())
+ self.assertEqual(server.get_url(), t.abspath('/'))
+
+ subdir_t = t.clone('subdir')
+ self.assertEqual(server.get_url(), subdir_t.abspath('/'))
+
+ def test_clone(self):
+ server = chroot.ChrootServer(
+ transport.get_transport_from_url('memory:///foo/bar/'))
+ self.start_server(server)
+ t = transport.get_transport_from_url(server.get_url())
+ # relpath from root and root path are the same
+ relpath_cloned = t.clone('foo')
+ abspath_cloned = t.clone('/foo')
+ self.assertEqual(server, relpath_cloned.server)
+ self.assertEqual(server, abspath_cloned.server)
+
+ def test_chroot_url_preserves_chroot(self):
+ """Calling get_transport on a chroot transport's base should produce a
+ transport with exactly the same behaviour as the original chroot
+ transport.
+
+ This is so that it is not possible to escape a chroot by doing::
+ url = chroot_transport.base
+ parent_url = urlutils.join(url, '..')
+ new_t = transport.get_transport_from_url(parent_url)
+ """
+ server = chroot.ChrootServer(
+ transport.get_transport_from_url('memory:///path/subpath'))
+ self.start_server(server)
+ t = transport.get_transport_from_url(server.get_url())
+ new_t = transport.get_transport_from_url(t.base)
+ self.assertEqual(t.server, new_t.server)
+ self.assertEqual(t.base, new_t.base)
+
+ def test_urljoin_preserves_chroot(self):
+ """Using urlutils.join(url, '..') on a chroot URL should not produce a
+ URL that escapes the intended chroot.
+
+ This is so that it is not possible to escape a chroot by doing::
+ url = chroot_transport.base
+ parent_url = urlutils.join(url, '..')
+ new_t = transport.get_transport_from_url(parent_url)
+ """
+ server = chroot.ChrootServer(
+ transport.get_transport_from_url('memory:///path/'))
+ self.start_server(server)
+ t = transport.get_transport_from_url(server.get_url())
+ self.assertRaises(
+ errors.InvalidURLJoin, urlutils.join, t.base, '..')
+
+
+class TestChrootServer(tests.TestCase):
+
+ def test_construct(self):
+ backing_transport = memory.MemoryTransport()
+ server = chroot.ChrootServer(backing_transport)
+ self.assertEqual(backing_transport, server.backing_transport)
+
+ def test_setUp(self):
+ backing_transport = memory.MemoryTransport()
+ server = chroot.ChrootServer(backing_transport)
+ server.start_server()
+ self.addCleanup(server.stop_server)
+ self.assertTrue(server.scheme
+ in transport._get_protocol_handlers().keys())
+
+ def test_stop_server(self):
+ backing_transport = memory.MemoryTransport()
+ server = chroot.ChrootServer(backing_transport)
+ server.start_server()
+ server.stop_server()
+ self.assertFalse(server.scheme
+ in transport._get_protocol_handlers().keys())
+
+ def test_get_url(self):
+ backing_transport = memory.MemoryTransport()
+ server = chroot.ChrootServer(backing_transport)
+ server.start_server()
+ self.addCleanup(server.stop_server)
+ self.assertEqual('chroot-%d:///' % id(server), server.get_url())
+
+
+class TestHooks(tests.TestCase):
+ """Basic tests for transport hooks"""
+
+ def _get_connected_transport(self):
+ return transport.ConnectedTransport("bogus:nowhere")
+
+ def test_transporthooks_initialisation(self):
+ """Check all expected transport hook points are set up"""
+ hookpoint = transport.TransportHooks()
+ self.assertTrue("post_connect" in hookpoint,
+ "post_connect not in %s" % (hookpoint,))
+
+ def test_post_connect(self):
+ """Ensure the post_connect hook is called when _set_transport is"""
+ calls = []
+ transport.Transport.hooks.install_named_hook("post_connect",
+ calls.append, None)
+ t = self._get_connected_transport()
+ self.assertLength(0, calls)
+ t._set_connection("connection", "auth")
+ self.assertEqual(calls, [t])
+
+
+class PathFilteringDecoratorTransportTest(tests.TestCase):
+ """Pathfilter decoration specific tests."""
+
+ def test_abspath(self):
+ # The abspath is always relative to the base of the backing transport.
+ server = pathfilter.PathFilteringServer(
+ transport.get_transport_from_url('memory:///foo/bar/'),
+ lambda x: x)
+ server.start_server()
+ t = transport.get_transport_from_url(server.get_url())
+ self.assertEqual(server.get_url(), t.abspath('/'))
+
+ subdir_t = t.clone('subdir')
+ self.assertEqual(server.get_url(), subdir_t.abspath('/'))
+ server.stop_server()
+
+ def make_pf_transport(self, filter_func=None):
+ """Make a PathFilteringTransport backed by a MemoryTransport.
+
+ :param filter_func: by default this will be a no-op function. Use this
+ parameter to override it."""
+ if filter_func is None:
+ filter_func = lambda x: x
+ server = pathfilter.PathFilteringServer(
+ transport.get_transport_from_url('memory:///foo/bar/'), filter_func)
+ server.start_server()
+ self.addCleanup(server.stop_server)
+ return transport.get_transport_from_url(server.get_url())
+
+ def test__filter(self):
+ # _filter (with an identity func as filter_func) always returns
+ # paths relative to the base of the backing transport.
+ t = self.make_pf_transport()
+ self.assertEqual('foo', t._filter('foo'))
+ self.assertEqual('foo/bar', t._filter('foo/bar'))
+ self.assertEqual('', t._filter('..'))
+ self.assertEqual('', t._filter('/'))
+ # The base of the pathfiltering transport is taken into account too.
+ t = t.clone('subdir1/subdir2')
+ self.assertEqual('subdir1/subdir2/foo', t._filter('foo'))
+ self.assertEqual('subdir1/subdir2/foo/bar', t._filter('foo/bar'))
+ self.assertEqual('subdir1', t._filter('..'))
+ self.assertEqual('', t._filter('/'))
+
+ def test_filter_invocation(self):
+ filter_log = []
+
+ def filter(path):
+ filter_log.append(path)
+ return path
+ t = self.make_pf_transport(filter)
+ t.has('abc')
+ self.assertEqual(['abc'], filter_log)
+ del filter_log[:]
+ t.clone('abc').has('xyz')
+ self.assertEqual(['abc/xyz'], filter_log)
+ del filter_log[:]
+ t.has('/abc')
+ self.assertEqual(['abc'], filter_log)
+
+ def test_clone(self):
+ t = self.make_pf_transport()
+ # relpath from root and root path are the same
+ relpath_cloned = t.clone('foo')
+ abspath_cloned = t.clone('/foo')
+ self.assertEqual(t.server, relpath_cloned.server)
+ self.assertEqual(t.server, abspath_cloned.server)
+
+ def test_url_preserves_pathfiltering(self):
+ """Calling get_transport on a pathfiltered transport's base should
+ produce a transport with exactly the same behaviour as the original
+ pathfiltered transport.
+
+ This is so that it is not possible to escape (accidentally or
+ otherwise) the filtering by doing::
+ url = filtered_transport.base
+ parent_url = urlutils.join(url, '..')
+ new_t = transport.get_transport_from_url(parent_url)
+ """
+ t = self.make_pf_transport()
+ new_t = transport.get_transport_from_url(t.base)
+ self.assertEqual(t.server, new_t.server)
+ self.assertEqual(t.base, new_t.base)
+
+
+class ReadonlyDecoratorTransportTest(tests.TestCase):
+ """Readonly decoration specific tests."""
+
+ def test_local_parameters(self):
+ # connect to . in readonly mode
+ t = readonly.ReadonlyTransportDecorator('readonly+.')
+ self.assertEqual(True, t.listable())
+ self.assertEqual(True, t.is_readonly())
+
+ def test_http_parameters(self):
+ from bzrlib.tests.http_server import HttpServer
+ # connect to '.' via http which is not listable
+ server = HttpServer()
+ self.start_server(server)
+ t = transport.get_transport_from_url('readonly+' + server.get_url())
+ self.assertIsInstance(t, readonly.ReadonlyTransportDecorator)
+ self.assertEqual(False, t.listable())
+ self.assertEqual(True, t.is_readonly())
+
+
+class FakeNFSDecoratorTests(tests.TestCaseInTempDir):
+ """NFS decorator specific tests."""
+
+ def get_nfs_transport(self, url):
+ # connect to url with nfs decoration
+ return fakenfs.FakeNFSTransportDecorator('fakenfs+' + url)
+
+ def test_local_parameters(self):
+ # the listable and is_readonly parameters
+ # are not changed by the fakenfs decorator
+ t = self.get_nfs_transport('.')
+ self.assertEqual(True, t.listable())
+ self.assertEqual(False, t.is_readonly())
+
+ def test_http_parameters(self):
+ # the listable and is_readonly parameters
+ # are not changed by the fakenfs decorator
+ from bzrlib.tests.http_server import HttpServer
+ # connect to '.' via http which is not listable
+ server = HttpServer()
+ self.start_server(server)
+ t = self.get_nfs_transport(server.get_url())
+ self.assertIsInstance(t, fakenfs.FakeNFSTransportDecorator)
+ self.assertEqual(False, t.listable())
+ self.assertEqual(True, t.is_readonly())
+
+ def test_fakenfs_server_default(self):
+ # a FakeNFSServer() should bring up a local relpath server for itself
+ server = test_server.FakeNFSServer()
+ self.start_server(server)
+ # the url should be decorated appropriately
+ self.assertStartsWith(server.get_url(), 'fakenfs+')
+ # and we should be able to get a transport for it
+ t = transport.get_transport_from_url(server.get_url())
+ # which must be a FakeNFSTransportDecorator instance.
+ self.assertIsInstance(t, fakenfs.FakeNFSTransportDecorator)
+
+ def test_fakenfs_rename_semantics(self):
+ # a FakeNFS transport must mangle the way rename errors occur to
+ # look like NFS problems.
+ t = self.get_nfs_transport('.')
+ self.build_tree(['from/', 'from/foo', 'to/', 'to/bar'],
+ transport=t)
+ self.assertRaises(errors.ResourceBusy, t.rename, 'from', 'to')
+
+
+class FakeVFATDecoratorTests(tests.TestCaseInTempDir):
+ """Tests for simulation of VFAT restrictions"""
+
+ def get_vfat_transport(self, url):
+ """Return vfat-backed transport for test directory"""
+ from bzrlib.transport.fakevfat import FakeVFATTransportDecorator
+ return FakeVFATTransportDecorator('vfat+' + url)
+
+ def test_transport_creation(self):
+ from bzrlib.transport.fakevfat import FakeVFATTransportDecorator
+ t = self.get_vfat_transport('.')
+ self.assertIsInstance(t, FakeVFATTransportDecorator)
+
+ def test_transport_mkdir(self):
+ t = self.get_vfat_transport('.')
+ t.mkdir('HELLO')
+ self.assertTrue(t.has('hello'))
+ self.assertTrue(t.has('Hello'))
+
+ def test_forbidden_chars(self):
+ t = self.get_vfat_transport('.')
+ self.assertRaises(ValueError, t.has, "<NU>")
+
+
+class BadTransportHandler(transport.Transport):
+ def __init__(self, base_url):
+ raise errors.DependencyNotPresent('some_lib',
+ 'testing missing dependency')
+
+
+class BackupTransportHandler(transport.Transport):
+ """Test transport that works as a backup for the BadTransportHandler"""
+ pass
+
+
+class TestTransportImplementation(tests.TestCaseInTempDir):
+ """Implementation verification for transports.
+
+ To verify a transport we need a server factory, which is a callable
+ that accepts no parameters and returns an implementation of
+ bzrlib.transport.Server.
+
+ That Server is then used to construct transport instances and test
+ the transport via loopback activity.
+
+ Currently this assumes that the Transport object is connected to the
+ current working directory. So that whatever is done
+ through the transport, should show up in the working
+ directory, and vice-versa. This is a bug, because its possible to have
+ URL schemes which provide access to something that may not be
+ result in storage on the local disk, i.e. due to file system limits, or
+ due to it being a database or some other non-filesystem tool.
+
+ This also tests to make sure that the functions work with both
+ generators and lists (assuming iter(list) is effectively a generator)
+ """
+
+ def setUp(self):
+ super(TestTransportImplementation, self).setUp()
+ self._server = self.transport_server()
+ self.start_server(self._server)
+
+ def get_transport(self, relpath=None):
+ """Return a connected transport to the local directory.
+
+ :param relpath: a path relative to the base url.
+ """
+ base_url = self._server.get_url()
+ url = self._adjust_url(base_url, relpath)
+ # try getting the transport via the regular interface:
+ t = transport.get_transport_from_url(url)
+ # vila--20070607 if the following are commented out the test suite
+ # still pass. Is this really still needed or was it a forgotten
+ # temporary fix ?
+ if not isinstance(t, self.transport_class):
+ # we did not get the correct transport class type. Override the
+ # regular connection behaviour by direct construction.
+ t = self.transport_class(url)
+ return t
+
+
+class TestTransportFromPath(tests.TestCaseInTempDir):
+
+ def test_with_path(self):
+ t = transport.get_transport_from_path(self.test_dir)
+ self.assertIsInstance(t, local.LocalTransport)
+ self.assertEquals(t.base.rstrip("/"),
+ urlutils.local_path_to_url(self.test_dir))
+
+ def test_with_url(self):
+ t = transport.get_transport_from_path("file:")
+ self.assertIsInstance(t, local.LocalTransport)
+ self.assertEquals(t.base.rstrip("/"),
+ urlutils.local_path_to_url(os.path.join(self.test_dir, "file:")))
+
+
+class TestTransportFromUrl(tests.TestCaseInTempDir):
+
+ def test_with_path(self):
+ self.assertRaises(errors.InvalidURL, transport.get_transport_from_url,
+ self.test_dir)
+
+ def test_with_url(self):
+ url = urlutils.local_path_to_url(self.test_dir)
+ t = transport.get_transport_from_url(url)
+ self.assertIsInstance(t, local.LocalTransport)
+ self.assertEquals(t.base.rstrip("/"), url)
+
+ def test_with_url_and_segment_parameters(self):
+ url = urlutils.local_path_to_url(self.test_dir)+",branch=foo"
+ t = transport.get_transport_from_url(url)
+ self.assertIsInstance(t, local.LocalTransport)
+ self.assertEquals(t.base.rstrip("/"), url)
+ with open(os.path.join(self.test_dir, "afile"), 'w') as f:
+ f.write("data")
+ self.assertTrue(t.has("afile"))
+
+
+class TestLocalTransports(tests.TestCase):
+
+ def test_get_transport_from_abspath(self):
+ here = osutils.abspath('.')
+ t = transport.get_transport(here)
+ self.assertIsInstance(t, local.LocalTransport)
+ self.assertEquals(t.base, urlutils.local_path_to_url(here) + '/')
+
+ def test_get_transport_from_relpath(self):
+ here = osutils.abspath('.')
+ t = transport.get_transport('.')
+ self.assertIsInstance(t, local.LocalTransport)
+ self.assertEquals(t.base, urlutils.local_path_to_url('.') + '/')
+
+ def test_get_transport_from_local_url(self):
+ here = osutils.abspath('.')
+ here_url = urlutils.local_path_to_url(here) + '/'
+ t = transport.get_transport(here_url)
+ self.assertIsInstance(t, local.LocalTransport)
+ self.assertEquals(t.base, here_url)
+
+ def test_local_abspath(self):
+ here = osutils.abspath('.')
+ t = transport.get_transport(here)
+ self.assertEquals(t.local_abspath(''), here)
+
+
+class TestLocalTransportMutation(tests.TestCaseInTempDir):
+
+ def test_local_transport_mkdir(self):
+ here = osutils.abspath('.')
+ t = transport.get_transport(here)
+ t.mkdir('test')
+ self.assertTrue(os.path.exists('test'))
+
+ def test_local_transport_mkdir_permission_denied(self):
+ # See https://bugs.launchpad.net/bzr/+bug/606537
+ here = osutils.abspath('.')
+ t = transport.get_transport(here)
+ def fake_chmod(path, mode):
+ e = OSError('permission denied')
+ e.errno = errno.EPERM
+ raise e
+ self.overrideAttr(os, 'chmod', fake_chmod)
+ t.mkdir('test')
+ t.mkdir('test2', mode=0707)
+ self.assertTrue(os.path.exists('test'))
+ self.assertTrue(os.path.exists('test2'))
+
+
+class TestLocalTransportWriteStream(tests.TestCaseWithTransport):
+
+ def test_local_fdatasync_calls_fdatasync(self):
+ """Check fdatasync on a stream tries to flush the data to the OS.
+
+ We can't easily observe the external effect but we can at least see
+ it's called.
+ """
+ sentinel = object()
+ fdatasync = getattr(os, 'fdatasync', sentinel)
+ if fdatasync is sentinel:
+ raise tests.TestNotApplicable('fdatasync not supported')
+ t = self.get_transport('.')
+ calls = self.recordCalls(os, 'fdatasync')
+ w = t.open_write_stream('out')
+ w.write('foo')
+ w.fdatasync()
+ with open('out', 'rb') as f:
+ # Should have been flushed.
+ self.assertEquals(f.read(), 'foo')
+ self.assertEquals(len(calls), 1, calls)
+
+ def test_missing_directory(self):
+ t = self.get_transport('.')
+ self.assertRaises(errors.NoSuchFile, t.open_write_stream, 'dir/foo')
+
+
+class TestWin32LocalTransport(tests.TestCase):
+
+ def test_unc_clone_to_root(self):
+ # Win32 UNC path like \\HOST\path
+ # clone to root should stop at least at \\HOST part
+ # not on \\
+ t = local.EmulatedWin32LocalTransport('file://HOST/path/to/some/dir/')
+ for i in xrange(4):
+ t = t.clone('..')
+ self.assertEquals(t.base, 'file://HOST/')
+ # make sure we reach the root
+ t = t.clone('..')
+ self.assertEquals(t.base, 'file://HOST/')
+
+
+class TestConnectedTransport(tests.TestCase):
+ """Tests for connected to remote server transports"""
+
+ def test_parse_url(self):
+ t = transport.ConnectedTransport(
+ 'http://simple.example.com/home/source')
+ self.assertEquals(t._parsed_url.host, 'simple.example.com')
+ self.assertEquals(t._parsed_url.port, None)
+ self.assertEquals(t._parsed_url.path, '/home/source/')
+ self.assertTrue(t._parsed_url.user is None)
+ self.assertTrue(t._parsed_url.password is None)
+
+ self.assertEquals(t.base, 'http://simple.example.com/home/source/')
+
+ def test_parse_url_with_at_in_user(self):
+ # Bug 228058
+ t = transport.ConnectedTransport('ftp://user@host.com@www.host.com/')
+ self.assertEquals(t._parsed_url.user, 'user@host.com')
+
+ def test_parse_quoted_url(self):
+ t = transport.ConnectedTransport(
+ 'http://ro%62ey:h%40t@ex%41mple.com:2222/path')
+ self.assertEquals(t._parsed_url.host, 'exAmple.com')
+ self.assertEquals(t._parsed_url.port, 2222)
+ self.assertEquals(t._parsed_url.user, 'robey')
+ self.assertEquals(t._parsed_url.password, 'h@t')
+ self.assertEquals(t._parsed_url.path, '/path/')
+
+ # Base should not keep track of the password
+ self.assertEquals(t.base, 'http://ro%62ey@ex%41mple.com:2222/path/')
+
+ def test_parse_invalid_url(self):
+ self.assertRaises(errors.InvalidURL,
+ transport.ConnectedTransport,
+ 'sftp://lily.org:~janneke/public/bzr/gub')
+
+ def test_relpath(self):
+ t = transport.ConnectedTransport('sftp://user@host.com/abs/path')
+
+ self.assertEquals(t.relpath('sftp://user@host.com/abs/path/sub'),
+ 'sub')
+ self.assertRaises(errors.PathNotChild, t.relpath,
+ 'http://user@host.com/abs/path/sub')
+ self.assertRaises(errors.PathNotChild, t.relpath,
+ 'sftp://user2@host.com/abs/path/sub')
+ self.assertRaises(errors.PathNotChild, t.relpath,
+ 'sftp://user@otherhost.com/abs/path/sub')
+ self.assertRaises(errors.PathNotChild, t.relpath,
+ 'sftp://user@host.com:33/abs/path/sub')
+ # Make sure it works when we don't supply a username
+ t = transport.ConnectedTransport('sftp://host.com/abs/path')
+ self.assertEquals(t.relpath('sftp://host.com/abs/path/sub'), 'sub')
+
+ # Make sure it works when parts of the path will be url encoded
+ t = transport.ConnectedTransport('sftp://host.com/dev/%path')
+ self.assertEquals(t.relpath('sftp://host.com/dev/%path/sub'), 'sub')
+
+ def test_connection_sharing_propagate_credentials(self):
+ t = transport.ConnectedTransport('ftp://user@host.com/abs/path')
+ self.assertEquals('user', t._parsed_url.user)
+ self.assertEquals('host.com', t._parsed_url.host)
+ self.assertIs(None, t._get_connection())
+ self.assertIs(None, t._parsed_url.password)
+ c = t.clone('subdir')
+ self.assertIs(None, c._get_connection())
+ self.assertIs(None, t._parsed_url.password)
+
+ # Simulate the user entering a password
+ password = 'secret'
+ connection = object()
+ t._set_connection(connection, password)
+ self.assertIs(connection, t._get_connection())
+ self.assertIs(password, t._get_credentials())
+ self.assertIs(connection, c._get_connection())
+ self.assertIs(password, c._get_credentials())
+
+ # credentials can be updated
+ new_password = 'even more secret'
+ c._update_credentials(new_password)
+ self.assertIs(connection, t._get_connection())
+ self.assertIs(new_password, t._get_credentials())
+ self.assertIs(connection, c._get_connection())
+ self.assertIs(new_password, c._get_credentials())
+
+
+class TestReusedTransports(tests.TestCase):
+ """Tests for transport reuse"""
+
+ def test_reuse_same_transport(self):
+ possible_transports = []
+ t1 = transport.get_transport_from_url('http://foo/',
+ possible_transports=possible_transports)
+ self.assertEqual([t1], possible_transports)
+ t2 = transport.get_transport_from_url('http://foo/',
+ possible_transports=[t1])
+ self.assertIs(t1, t2)
+
+ # Also check that final '/' are handled correctly
+ t3 = transport.get_transport_from_url('http://foo/path/')
+ t4 = transport.get_transport_from_url('http://foo/path',
+ possible_transports=[t3])
+ self.assertIs(t3, t4)
+
+ t5 = transport.get_transport_from_url('http://foo/path')
+ t6 = transport.get_transport_from_url('http://foo/path/',
+ possible_transports=[t5])
+ self.assertIs(t5, t6)
+
+ def test_don_t_reuse_different_transport(self):
+ t1 = transport.get_transport_from_url('http://foo/path')
+ t2 = transport.get_transport_from_url('http://bar/path',
+ possible_transports=[t1])
+ self.assertIsNot(t1, t2)
+
+
+class TestTransportTrace(tests.TestCase):
+
+ def test_decorator(self):
+ t = transport.get_transport_from_url('trace+memory://')
+ self.assertIsInstance(
+ t, bzrlib.transport.trace.TransportTraceDecorator)
+
+ def test_clone_preserves_activity(self):
+ t = transport.get_transport_from_url('trace+memory://')
+ t2 = t.clone('.')
+ self.assertTrue(t is not t2)
+ self.assertTrue(t._activity is t2._activity)
+
+ # the following specific tests are for the operations that have made use of
+ # logging in tests; we could test every single operation but doing that
+ # still won't cause a test failure when the top level Transport API
+ # changes; so there is little return doing that.
+ def test_get(self):
+ t = transport.get_transport_from_url('trace+memory:///')
+ t.put_bytes('foo', 'barish')
+ t.get('foo')
+ expected_result = []
+ # put_bytes records the bytes, not the content to avoid memory
+ # pressure.
+ expected_result.append(('put_bytes', 'foo', 6, None))
+ # get records the file name only.
+ expected_result.append(('get', 'foo'))
+ self.assertEqual(expected_result, t._activity)
+
+ def test_readv(self):
+ t = transport.get_transport_from_url('trace+memory:///')
+ t.put_bytes('foo', 'barish')
+ list(t.readv('foo', [(0, 1), (3, 2)],
+ adjust_for_latency=True, upper_limit=6))
+ expected_result = []
+ # put_bytes records the bytes, not the content to avoid memory
+ # pressure.
+ expected_result.append(('put_bytes', 'foo', 6, None))
+ # readv records the supplied offset request
+ expected_result.append(('readv', 'foo', [(0, 1), (3, 2)], True, 6))
+ self.assertEqual(expected_result, t._activity)
+
+
+class TestSSHConnections(tests.TestCaseWithTransport):
+
+ def test_bzr_connect_to_bzr_ssh(self):
+ """get_transport of a bzr+ssh:// behaves correctly.
+
+ bzr+ssh:// should cause bzr to run a remote bzr smart server over SSH.
+ """
+ # This test actually causes a bzr instance to be invoked, which is very
+ # expensive: it should be the only such test in the test suite.
+ # A reasonable evolution for this would be to simply check inside
+ # check_channel_exec_request that the command is appropriate, and then
+ # satisfy requests in-process.
+ self.requireFeature(features.paramiko)
+ # SFTPFullAbsoluteServer has a get_url method, and doesn't
+ # override the interface (doesn't change self._vendor).
+ # Note that this does encryption, so can be slow.
+ from bzrlib.tests import stub_sftp
+
+ # Start an SSH server
+ self.command_executed = []
+ # XXX: This is horrible -- we define a really dumb SSH server that
+ # executes commands, and manage the hooking up of stdin/out/err to the
+ # SSH channel ourselves. Surely this has already been implemented
+ # elsewhere?
+ started = []
+
+ class StubSSHServer(stub_sftp.StubServer):
+
+ test = self
+
+ def check_channel_exec_request(self, channel, command):
+ self.test.command_executed.append(command)
+ proc = subprocess.Popen(
+ command, shell=True, stdin=subprocess.PIPE,
+ stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+
+ # XXX: horribly inefficient, not to mention ugly.
+ # Start a thread for each of stdin/out/err, and relay bytes
+ # from the subprocess to channel and vice versa.
+ def ferry_bytes(read, write, close):
+ while True:
+ bytes = read(1)
+ if bytes == '':
+ close()
+ break
+ write(bytes)
+
+ file_functions = [
+ (channel.recv, proc.stdin.write, proc.stdin.close),
+ (proc.stdout.read, channel.sendall, channel.close),
+ (proc.stderr.read, channel.sendall_stderr, channel.close)]
+ started.append(proc)
+ for read, write, close in file_functions:
+ t = threading.Thread(
+ target=ferry_bytes, args=(read, write, close))
+ t.start()
+ started.append(t)
+
+ return True
+
+ ssh_server = stub_sftp.SFTPFullAbsoluteServer(StubSSHServer)
+ # We *don't* want to override the default SSH vendor: the detected one
+ # is the one to use.
+
+ # FIXME: I don't understand the above comment, SFTPFullAbsoluteServer
+ # inherits from SFTPServer which forces the SSH vendor to
+ # ssh.ParamikoVendor(). So it's forced, not detected. --vila 20100623
+ self.start_server(ssh_server)
+ port = ssh_server.port
+
+ if sys.platform == 'win32':
+ bzr_remote_path = sys.executable + ' ' + self.get_bzr_path()
+ else:
+ bzr_remote_path = self.get_bzr_path()
+ self.overrideEnv('BZR_REMOTE_PATH', bzr_remote_path)
+
+ # Access the branch via a bzr+ssh URL. The BZR_REMOTE_PATH environment
+ # variable is used to tell bzr what command to run on the remote end.
+ path_to_branch = osutils.abspath('.')
+ if sys.platform == 'win32':
+ # On Windows, we export all drives as '/C:/, etc. So we need to
+ # prefix a '/' to get the right path.
+ path_to_branch = '/' + path_to_branch
+ url = 'bzr+ssh://fred:secret@localhost:%d%s' % (port, path_to_branch)
+ t = transport.get_transport(url)
+ self.permit_url(t.base)
+ t.mkdir('foo')
+
+ self.assertEqual(
+ ['%s serve --inet --directory=/ --allow-writes' % bzr_remote_path],
+ self.command_executed)
+ # Make sure to disconnect, so that the remote process can stop, and we
+ # can cleanup. Then pause the test until everything is shutdown
+ t._client._medium.disconnect()
+ if not started:
+ return
+ # First wait for the subprocess
+ started[0].wait()
+ # And the rest are threads
+ for t in started[1:]:
+ t.join()
+
+
+class TestUnhtml(tests.TestCase):
+
+ """Tests for unhtml_roughly"""
+
+ def test_truncation(self):
+ fake_html = "<p>something!\n" * 1000
+ result = http.unhtml_roughly(fake_html)
+ self.assertEquals(len(result), 1000)
+ self.assertStartsWith(result, " something!")
+
+
+class SomeDirectory(object):
+
+ def look_up(self, name, url):
+ return "http://bar"
+
+
+class TestLocationToUrl(tests.TestCase):
+
+ def get_base_location(self):
+ path = osutils.abspath('/foo/bar')
+ if path.startswith('/'):
+ url = 'file://%s' % (path,)
+ else:
+ # On Windows, abspaths start with the drive letter, so we have to
+ # add in the extra '/'
+ url = 'file:///%s' % (path,)
+ return path, url
+
+ def test_regular_url(self):
+ self.assertEquals("file://foo", location_to_url("file://foo"))
+
+ def test_directory(self):
+ directories.register("bar:", SomeDirectory, "Dummy directory")
+ self.addCleanup(directories.remove, "bar:")
+ self.assertEquals("http://bar", location_to_url("bar:"))
+
+ def test_unicode_url(self):
+ self.assertRaises(errors.InvalidURL, location_to_url,
+ "http://fo/\xc3\xaf".decode("utf-8"))
+
+ def test_unicode_path(self):
+ path, url = self.get_base_location()
+ location = path + "\xc3\xaf".decode("utf-8")
+ url += '%C3%AF'
+ self.assertEquals(url, location_to_url(location))
+
+ def test_path(self):
+ path, url = self.get_base_location()
+ self.assertEquals(url, location_to_url(path))
+
+ def test_relative_file_url(self):
+ self.assertEquals(urlutils.local_path_to_url(".") + "/bar",
+ location_to_url("file:bar"))
+
+ def test_absolute_file_url(self):
+ self.assertEquals("file:///bar", location_to_url("file:/bar"))
diff --git a/bzrlib/tests/test_transport_log.py b/bzrlib/tests/test_transport_log.py
new file mode 100644
index 0000000..28a69a2
--- /dev/null
+++ b/bzrlib/tests/test_transport_log.py
@@ -0,0 +1,76 @@
+# Copyright (C) 2008-2011 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+
+"""Tests for log+ transport decorator."""
+
+
+from bzrlib import transport
+from bzrlib.tests import TestCaseWithMemoryTransport
+from bzrlib.trace import mutter
+from bzrlib.transport.log import TransportLogDecorator
+
+
+class TestTransportLog(TestCaseWithMemoryTransport):
+
+ def test_log_transport(self):
+ base_transport = self.get_transport('')
+ logging_transport = transport.get_transport(
+ 'log+' + base_transport.base)
+
+ # operations such as mkdir are logged
+ mutter('where are you?')
+ logging_transport.mkdir('subdir')
+ log = self.get_log()
+ self.assertContainsRe(log, r'mkdir memory\+\d+://.*subdir')
+ self.assertContainsRe(log, ' --> None')
+ # they have the expected effect
+ self.assertTrue(logging_transport.has('subdir'))
+ # and they operate on the underlying transport
+ self.assertTrue(base_transport.has('subdir'))
+
+ def test_log_readv(self):
+ # see <https://bugs.launchpad.net/bzr/+bug/340347>
+
+ # transports are not required to return a generator, but we
+ # specifically want to check that those that do cause it to be passed
+ # through, for the sake of minimum interference
+ base_transport = DummyReadvTransport()
+ # construct it directly to avoid needing the dummy transport to be
+ # registered etc
+ logging_transport = TransportLogDecorator(
+ 'log+dummy:///', _decorated=base_transport)
+
+ result = base_transport.readv('foo', [(0, 10)])
+ # sadly there's no types.IteratorType, and GeneratorType is too
+ # specific
+ self.assertTrue(getattr(result, 'next'))
+
+ result = logging_transport.readv('foo', [(0, 10)])
+ self.assertTrue(getattr(result, 'next'))
+ self.assertEquals(list(result),
+ [(0, 'abcdefghij')])
+
+
+class DummyReadvTransport(object):
+
+ base = 'dummy:///'
+
+ def readv(self, filename, offset_length_pairs):
+ yield (0, 'abcdefghij')
+
+ def abspath(self, path):
+ return self.base + path
diff --git a/bzrlib/tests/test_tree.py b/bzrlib/tests/test_tree.py
new file mode 100644
index 0000000..13295e0
--- /dev/null
+++ b/bzrlib/tests/test_tree.py
@@ -0,0 +1,418 @@
+# Copyright (C) 2006-2009, 2011 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""Tests for Tree and InterTree."""
+
+from bzrlib import (
+ errors,
+ revision,
+ tree as _mod_tree,
+ )
+from bzrlib.tests import TestCaseWithTransport
+from bzrlib.tree import InterTree
+
+
+class TestInterTree(TestCaseWithTransport):
+
+ def test_revision_tree_revision_tree(self):
+ # we should have an InterTree registered for RevisionTree to
+ # RevisionTree.
+ tree = self.make_branch_and_tree('.')
+ rev_id = tree.commit('first post')
+ rev_id2 = tree.commit('second post', allow_pointless=True)
+ rev_tree = tree.branch.repository.revision_tree(rev_id)
+ rev_tree2 = tree.branch.repository.revision_tree(rev_id2)
+ optimiser = InterTree.get(rev_tree, rev_tree2)
+ self.assertIsInstance(optimiser, InterTree)
+ optimiser = InterTree.get(rev_tree2, rev_tree)
+ self.assertIsInstance(optimiser, InterTree)
+
+ def test_working_tree_revision_tree(self):
+ # we should have an InterTree available for WorkingTree to
+ # RevisionTree.
+ tree = self.make_branch_and_tree('.')
+ rev_id = tree.commit('first post')
+ rev_tree = tree.branch.repository.revision_tree(rev_id)
+ optimiser = InterTree.get(rev_tree, tree)
+ self.assertIsInstance(optimiser, InterTree)
+ optimiser = InterTree.get(tree, rev_tree)
+ self.assertIsInstance(optimiser, InterTree)
+
+ def test_working_tree_working_tree(self):
+ # we should have an InterTree available for WorkingTree to
+ # WorkingTree.
+ tree = self.make_branch_and_tree('1')
+ tree2 = self.make_branch_and_tree('2')
+ optimiser = InterTree.get(tree, tree2)
+ self.assertIsInstance(optimiser, InterTree)
+ optimiser = InterTree.get(tree2, tree)
+ self.assertIsInstance(optimiser, InterTree)
+
+
+class RecordingOptimiser(InterTree):
+
+ calls = []
+
+ def compare(self, want_unchanged=False, specific_files=None,
+ extra_trees=None, require_versioned=False, include_root=False,
+ want_unversioned=False):
+ self.calls.append(
+ ('compare', self.source, self.target, want_unchanged,
+ specific_files, extra_trees, require_versioned,
+ include_root, want_unversioned)
+ )
+
+ @classmethod
+ def is_compatible(klass, source, target):
+ return True
+
+
+class TestTree(TestCaseWithTransport):
+
+ def test_compare_calls_InterTree_compare(self):
+ """This test tests the way Tree.compare() uses InterTree."""
+ old_optimisers = InterTree._optimisers
+ try:
+ InterTree._optimisers = []
+ RecordingOptimiser.calls = []
+ InterTree.register_optimiser(RecordingOptimiser)
+ tree = self.make_branch_and_tree('1')
+ tree2 = self.make_branch_and_tree('2')
+ # do a series of calls:
+ # trivial usage
+ tree.changes_from(tree2)
+ # pass in all optional arguments by position
+ tree.changes_from(tree2, 'unchanged', 'specific', 'extra',
+ 'require', True)
+ # pass in all optional arguments by keyword
+ tree.changes_from(tree2,
+ specific_files='specific',
+ want_unchanged='unchanged',
+ extra_trees='extra',
+ require_versioned='require',
+ include_root=True,
+ want_unversioned=True,
+ )
+ finally:
+ InterTree._optimisers = old_optimisers
+ self.assertEqual(
+ [
+ ('compare', tree2, tree, False, None, None, False, False, False),
+ ('compare', tree2, tree, 'unchanged', 'specific', 'extra',
+ 'require', True, False),
+ ('compare', tree2, tree, 'unchanged', 'specific', 'extra',
+ 'require', True, True),
+ ], RecordingOptimiser.calls)
+
+ def test_changes_from_with_root(self):
+ """Ensure the include_root option does what's expected."""
+ wt = self.make_branch_and_tree('.')
+ delta = wt.changes_from(wt.basis_tree())
+ self.assertEqual(len(delta.added), 0)
+ delta = wt.changes_from(wt.basis_tree(), include_root=True)
+ self.assertEqual(len(delta.added), 1)
+ self.assertEqual(delta.added[0][0], '')
+
+ def test_changes_from_with_require_versioned(self):
+ """Ensure the require_versioned option does what's expected."""
+ wt = self.make_branch_and_tree('.')
+ self.build_tree(['known_file', 'unknown_file'])
+ wt.add('known_file')
+
+ self.assertRaises(errors.PathsNotVersionedError,
+ wt.changes_from, wt.basis_tree(), wt, specific_files=['known_file',
+ 'unknown_file'], require_versioned=True)
+
+ # we need to pass a known file with an unknown file to get this to
+ # fail when expected.
+ delta = wt.changes_from(wt.basis_tree(),
+ specific_files=['known_file', 'unknown_file'] ,
+ require_versioned=False)
+ self.assertEqual(len(delta.added), 1)
+
+
+class TestMultiWalker(TestCaseWithTransport):
+
+ def assertStepOne(self, has_more, path, file_id, iterator):
+ retval = _mod_tree.MultiWalker._step_one(iterator)
+ if not has_more:
+ self.assertIs(None, path)
+ self.assertIs(None, file_id)
+ self.assertEqual((False, None, None), retval)
+ else:
+ self.assertEqual((has_more, path, file_id),
+ (retval[0], retval[1], retval[2].file_id))
+
+ def test__step_one_empty(self):
+ tree = self.make_branch_and_tree('empty')
+ repo = tree.branch.repository
+ empty_tree = repo.revision_tree(revision.NULL_REVISION)
+
+ iterator = empty_tree.iter_entries_by_dir()
+ self.assertStepOne(False, None, None, iterator)
+ self.assertStepOne(False, None, None, iterator)
+
+ def test__step_one(self):
+ tree = self.make_branch_and_tree('tree')
+ self.build_tree(['tree/a', 'tree/b/', 'tree/b/c'])
+ tree.add(['a', 'b', 'b/c'], ['a-id', 'b-id', 'c-id'])
+
+ iterator = tree.iter_entries_by_dir()
+ tree.lock_read()
+ self.addCleanup(tree.unlock)
+
+ root_id = tree.path2id('')
+ self.assertStepOne(True, '', root_id, iterator)
+ self.assertStepOne(True, 'a', 'a-id', iterator)
+ self.assertStepOne(True, 'b', 'b-id', iterator)
+ self.assertStepOne(True, 'b/c', 'c-id', iterator)
+ self.assertStepOne(False, None, None, iterator)
+ self.assertStepOne(False, None, None, iterator)
+
+ def assertWalkerNext(self, exp_path, exp_file_id, master_has_node,
+ exp_other_paths, iterator):
+ """Check what happens when we step the iterator.
+
+ :param path: The path for this entry
+ :param file_id: The file_id for this entry
+ :param master_has_node: Does the master tree have this entry?
+ :param exp_other_paths: A list of other_path values.
+ :param iterator: The iterator to step
+ """
+ path, file_id, master_ie, other_values = iterator.next()
+ self.assertEqual((exp_path, exp_file_id), (path, file_id),
+ 'Master entry did not match')
+ if master_has_node:
+ self.assertIsNot(None, master_ie, 'master should have an entry')
+ else:
+ self.assertIs(None, master_ie, 'master should not have an entry')
+ self.assertEqual(len(exp_other_paths), len(other_values),
+ 'Wrong number of other entries')
+ other_paths = []
+ other_file_ids = []
+ for path, ie in other_values:
+ other_paths.append(path)
+ if ie is None:
+ other_file_ids.append(None)
+ else:
+ other_file_ids.append(ie.file_id)
+
+ exp_file_ids = []
+ for path in exp_other_paths:
+ if path is None:
+ exp_file_ids.append(None)
+ else:
+ exp_file_ids.append(file_id)
+ self.assertEqual(exp_other_paths, other_paths, "Other paths incorrect")
+ self.assertEqual(exp_file_ids, other_file_ids,
+ "Other file_ids incorrect")
+
+ def lock_and_get_basis_and_root_id(self, tree):
+ tree.lock_read()
+ self.addCleanup(tree.unlock)
+ basis_tree = tree.basis_tree()
+ basis_tree.lock_read()
+ self.addCleanup(basis_tree.unlock)
+ root_id = tree.path2id('')
+ return basis_tree, root_id
+
+ def test_simple_stepping(self):
+ tree = self.make_branch_and_tree('tree')
+ self.build_tree(['tree/a', 'tree/b/', 'tree/b/c'])
+ tree.add(['a', 'b', 'b/c'], ['a-id', 'b-id', 'c-id'])
+
+ tree.commit('first', rev_id='first-rev-id')
+
+ basis_tree, root_id = self.lock_and_get_basis_and_root_id(tree)
+
+ walker = _mod_tree.MultiWalker(tree, [basis_tree])
+ iterator = walker.iter_all()
+ self.assertWalkerNext(u'', root_id, True, [u''], iterator)
+ self.assertWalkerNext(u'a', 'a-id', True, [u'a'], iterator)
+ self.assertWalkerNext(u'b', 'b-id', True, [u'b'], iterator)
+ self.assertWalkerNext(u'b/c', 'c-id', True, [u'b/c'], iterator)
+ self.assertRaises(StopIteration, iterator.next)
+
+ def test_master_has_extra(self):
+ tree = self.make_branch_and_tree('tree')
+ self.build_tree(['tree/a', 'tree/b/', 'tree/c', 'tree/d'])
+ tree.add(['a', 'b', 'd'], ['a-id', 'b-id', 'd-id'])
+
+ tree.commit('first', rev_id='first-rev-id')
+
+ tree.add(['c'], ['c-id'])
+ basis_tree, root_id = self.lock_and_get_basis_and_root_id(tree)
+
+ walker = _mod_tree.MultiWalker(tree, [basis_tree])
+ iterator = walker.iter_all()
+ self.assertWalkerNext(u'', root_id, True, [u''], iterator)
+ self.assertWalkerNext(u'a', 'a-id', True, [u'a'], iterator)
+ self.assertWalkerNext(u'b', 'b-id', True, [u'b'], iterator)
+ self.assertWalkerNext(u'c', 'c-id', True, [None], iterator)
+ self.assertWalkerNext(u'd', 'd-id', True, [u'd'], iterator)
+ self.assertRaises(StopIteration, iterator.next)
+
+ def test_master_renamed_to_earlier(self):
+ """The record is still present, it just shows up early."""
+ tree = self.make_branch_and_tree('tree')
+ self.build_tree(['tree/a', 'tree/c', 'tree/d'])
+ tree.add(['a', 'c', 'd'], ['a-id', 'c-id', 'd-id'])
+ tree.commit('first', rev_id='first-rev-id')
+ tree.rename_one('d', 'b')
+
+ basis_tree, root_id = self.lock_and_get_basis_and_root_id(tree)
+
+ walker = _mod_tree.MultiWalker(tree, [basis_tree])
+ iterator = walker.iter_all()
+ self.assertWalkerNext(u'', root_id, True, [u''], iterator)
+ self.assertWalkerNext(u'a', 'a-id', True, [u'a'], iterator)
+ self.assertWalkerNext(u'b', 'd-id', True, [u'd'], iterator)
+ self.assertWalkerNext(u'c', 'c-id', True, [u'c'], iterator)
+ self.assertRaises(StopIteration, iterator.next)
+
+ def test_master_renamed_to_later(self):
+ tree = self.make_branch_and_tree('tree')
+ self.build_tree(['tree/a', 'tree/b', 'tree/d'])
+ tree.add(['a', 'b', 'd'], ['a-id', 'b-id', 'd-id'])
+ tree.commit('first', rev_id='first-rev-id')
+ tree.rename_one('b', 'e')
+
+ basis_tree, root_id = self.lock_and_get_basis_and_root_id(tree)
+
+ walker = _mod_tree.MultiWalker(tree, [basis_tree])
+ iterator = walker.iter_all()
+ self.assertWalkerNext(u'', root_id, True, [u''], iterator)
+ self.assertWalkerNext(u'a', 'a-id', True, [u'a'], iterator)
+ self.assertWalkerNext(u'd', 'd-id', True, [u'd'], iterator)
+ self.assertWalkerNext(u'e', 'b-id', True, [u'b'], iterator)
+ self.assertRaises(StopIteration, iterator.next)
+
+ def test_other_extra_in_middle(self):
+ tree = self.make_branch_and_tree('tree')
+ self.build_tree(['tree/a', 'tree/b', 'tree/d'])
+ tree.add(['a', 'b', 'd'], ['a-id', 'b-id', 'd-id'])
+ tree.commit('first', rev_id='first-rev-id')
+ tree.remove(['b'])
+
+ basis_tree, root_id = self.lock_and_get_basis_and_root_id(tree)
+ walker = _mod_tree.MultiWalker(tree, [basis_tree])
+ iterator = walker.iter_all()
+ self.assertWalkerNext(u'', root_id, True, [u''], iterator)
+ self.assertWalkerNext(u'a', 'a-id', True, [u'a'], iterator)
+ self.assertWalkerNext(u'd', 'd-id', True, [u'd'], iterator)
+ self.assertWalkerNext(u'b', 'b-id', False, [u'b'], iterator)
+ self.assertRaises(StopIteration, iterator.next)
+
+ def test_other_extra_at_end(self):
+ tree = self.make_branch_and_tree('tree')
+ self.build_tree(['tree/a', 'tree/b', 'tree/d'])
+ tree.add(['a', 'b', 'd'], ['a-id', 'b-id', 'd-id'])
+ tree.commit('first', rev_id='first-rev-id')
+ tree.remove(['d'])
+
+ basis_tree, root_id = self.lock_and_get_basis_and_root_id(tree)
+ walker = _mod_tree.MultiWalker(tree, [basis_tree])
+ iterator = walker.iter_all()
+ self.assertWalkerNext(u'', root_id, True, [u''], iterator)
+ self.assertWalkerNext(u'a', 'a-id', True, [u'a'], iterator)
+ self.assertWalkerNext(u'b', 'b-id', True, [u'b'], iterator)
+ self.assertWalkerNext(u'd', 'd-id', False, [u'd'], iterator)
+ self.assertRaises(StopIteration, iterator.next)
+
+ def test_others_extra_at_end(self):
+ tree = self.make_branch_and_tree('tree')
+ self.build_tree(['tree/a', 'tree/b', 'tree/c', 'tree/d', 'tree/e'])
+ tree.add(['a', 'b', 'c', 'd', 'e'],
+ ['a-id', 'b-id', 'c-id', 'd-id', 'e-id'])
+ tree.commit('first', rev_id='first-rev-id')
+ tree.remove(['e'])
+ tree.commit('second', rev_id='second-rev-id')
+ tree.remove(['d'])
+ tree.commit('third', rev_id='third-rev-id')
+ tree.remove(['c'])
+
+ basis_tree, root_id = self.lock_and_get_basis_and_root_id(tree)
+ first_tree = tree.branch.repository.revision_tree('first-rev-id')
+ second_tree = tree.branch.repository.revision_tree('second-rev-id')
+ walker = _mod_tree.MultiWalker(tree, [basis_tree, first_tree,
+ second_tree])
+ iterator = walker.iter_all()
+ self.assertWalkerNext(u'', root_id, True, [u'', u'', u''], iterator)
+ self.assertWalkerNext(u'a', 'a-id', True, [u'a', u'a', u'a'], iterator)
+ self.assertWalkerNext(u'b', 'b-id', True, [u'b', u'b', u'b'], iterator)
+ self.assertWalkerNext(u'c', 'c-id', False, [u'c', u'c', u'c'], iterator)
+ self.assertWalkerNext(u'd', 'd-id', False, [None, u'd', u'd'], iterator)
+ self.assertWalkerNext(u'e', 'e-id', False, [None, u'e', None], iterator)
+ self.assertRaises(StopIteration, iterator.next)
+
+ def test_different_file_id_in_others(self):
+ tree = self.make_branch_and_tree('tree')
+ self.build_tree(['tree/a', 'tree/b', 'tree/c/'])
+ tree.add(['a', 'b', 'c'], ['a-id', 'b-id', 'c-id'])
+ tree.commit('first', rev_id='first-rev-id')
+
+ tree.rename_one('b', 'c/d')
+ self.build_tree(['tree/b'])
+ tree.add(['b'], ['b2-id'])
+ tree.commit('second', rev_id='second-rev-id')
+
+ tree.rename_one('a', 'c/e')
+ self.build_tree(['tree/a'])
+ tree.add(['a'], ['a2-id'])
+
+ basis_tree, root_id = self.lock_and_get_basis_and_root_id(tree)
+ first_tree = tree.branch.repository.revision_tree('first-rev-id')
+ walker = _mod_tree.MultiWalker(tree, [basis_tree, first_tree])
+
+ iterator = walker.iter_all()
+ self.assertWalkerNext(u'', root_id, True, [u'', u''], iterator)
+ self.assertWalkerNext(u'a', 'a2-id', True, [None, None], iterator)
+ self.assertWalkerNext(u'b', 'b2-id', True, [u'b', None], iterator)
+ self.assertWalkerNext(u'c', 'c-id', True, [u'c', u'c'], iterator)
+ self.assertWalkerNext(u'c/d', 'b-id', True, [u'c/d', u'b'], iterator)
+ self.assertWalkerNext(u'c/e', 'a-id', True, [u'a', u'a'], iterator)
+ self.assertRaises(StopIteration, iterator.next)
+
+ def assertCmpByDirblock(self, cmp_val, path1, path2):
+ self.assertEqual(cmp_val,
+ _mod_tree.MultiWalker._cmp_path_by_dirblock(path1, path2))
+
+ def test__cmp_path_by_dirblock(self):
+ # We only support Unicode strings at this point
+ self.assertRaises(TypeError,
+ _mod_tree.MultiWalker._cmp_path_by_dirblock, '', 'b')
+ self.assertCmpByDirblock(0, u'', u'')
+ self.assertCmpByDirblock(0, u'a', u'a')
+ self.assertCmpByDirblock(0, u'a/b', u'a/b')
+ self.assertCmpByDirblock(0, u'a/b/c', u'a/b/c')
+ self.assertCmpByDirblock(1, u'a-a', u'a')
+ self.assertCmpByDirblock(-1, u'a-a', u'a/a')
+ self.assertCmpByDirblock(-1, u'a=a', u'a/a')
+ self.assertCmpByDirblock(1, u'a-a/a', u'a/a')
+ self.assertCmpByDirblock(1, u'a=a/a', u'a/a')
+ self.assertCmpByDirblock(1, u'a-a/a', u'a/a/a')
+ self.assertCmpByDirblock(1, u'a=a/a', u'a/a/a')
+ self.assertCmpByDirblock(1, u'a-a/a/a', u'a/a/a')
+ self.assertCmpByDirblock(1, u'a=a/a/a', u'a/a/a')
+
+ def assertPathToKey(self, expected, path):
+ self.assertEqual(expected, _mod_tree.MultiWalker._path_to_key(path))
+
+ def test__path_to_key(self):
+ self.assertPathToKey(([u''], u''), u'')
+ self.assertPathToKey(([u''], u'a'), u'a')
+ self.assertPathToKey(([u'a'], u'b'), u'a/b')
+ self.assertPathToKey(([u'a', u'b'], u'c'), u'a/b/c')
diff --git a/bzrlib/tests/test_treebuilder.py b/bzrlib/tests/test_treebuilder.py
new file mode 100644
index 0000000..0d19861
--- /dev/null
+++ b/bzrlib/tests/test_treebuilder.py
@@ -0,0 +1,95 @@
+# Copyright (C) 2006 Canonical Ltd
+# Authors: Robert Collins <robert.collins@canonical.com>
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""Tests for the TreeBuilder helper class."""
+
+from bzrlib import errors, tests
+from bzrlib.memorytree import MemoryTree
+from bzrlib.tests import TestCaseWithTransport
+from bzrlib.treebuilder import TreeBuilder
+
+
+class FakeTree(object):
+ """A pretend tree to test the calls made by TreeBuilder."""
+
+ def __init__(self):
+ self._calls = []
+
+ def lock_tree_write(self):
+ self._calls.append("lock_tree_write")
+
+ def unlock(self):
+ self._calls.append("unlock")
+
+
+class TestFakeTree(TestCaseWithTransport):
+
+ def testFakeTree(self):
+ """Check that FakeTree works as required for the TreeBuilder tests."""
+ tree = FakeTree()
+ self.assertEqual([], tree._calls)
+ tree.lock_tree_write()
+ self.assertEqual(["lock_tree_write"], tree._calls)
+ tree.unlock()
+ self.assertEqual(["lock_tree_write", "unlock"], tree._calls)
+
+
+class TestTreeBuilderMemoryTree(tests.TestCaseWithMemoryTransport):
+
+ def test_create(self):
+ builder = TreeBuilder()
+
+ def test_start_tree_locks_write(self):
+ builder = TreeBuilder()
+ tree = FakeTree()
+ builder.start_tree(tree)
+ self.assertEqual(["lock_tree_write"], tree._calls)
+
+ def test_start_tree_when_started_fails(self):
+ builder = TreeBuilder()
+ tree = FakeTree()
+ builder.start_tree(tree)
+ self.assertRaises(errors.AlreadyBuilding, builder.start_tree, tree)
+
+ def test_finish_tree_not_started_errors(self):
+ builder = TreeBuilder()
+ self.assertRaises(errors.NotBuilding, builder.finish_tree)
+
+ def test_finish_tree_unlocks(self):
+ builder = TreeBuilder()
+ tree = FakeTree()
+ builder.start_tree(tree)
+ builder.finish_tree()
+ self.assertEqual(["lock_tree_write", "unlock"], tree._calls)
+
+ def test_build_tree_not_started_errors(self):
+ builder = TreeBuilder()
+ self.assertRaises(errors.NotBuilding, builder.build, "foo")
+
+ def test_build_tree(self):
+ """Test building works using a MemoryTree."""
+ branch = self.make_branch('branch')
+ tree = MemoryTree.create_on_branch(branch)
+ builder = TreeBuilder()
+ builder.start_tree(tree)
+ builder.build(['foo', "bar/", "bar/file"])
+ self.assertEqual('contents of foo\n',
+ tree.get_file(tree.path2id('foo')).read())
+ self.assertEqual('contents of bar/file\n',
+ tree.get_file(tree.path2id('bar/file')).read())
+ builder.finish_tree()
+
diff --git a/bzrlib/tests/test_treeshape.py b/bzrlib/tests/test_treeshape.py
new file mode 100644
index 0000000..d581070
--- /dev/null
+++ b/bzrlib/tests/test_treeshape.py
@@ -0,0 +1,44 @@
+# Copyright (C) 2010 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+
+import os
+
+
+from bzrlib import tests
+from bzrlib.tests import (
+ features,
+ )
+
+
+class TestTreeShape(tests.TestCaseWithTransport):
+
+ def test_build_tree(self):
+ """Test tree-building test helper"""
+ self.build_tree_contents([
+ ('foo', 'new contents'),
+ ('.bzr/',),
+ ('.bzr/README', 'hello'),
+ ])
+ self.assertPathExists('foo')
+ self.assertPathExists('.bzr/README')
+ self.assertFileEqual('hello', '.bzr/README')
+
+ def test_build_tree_symlink(self):
+ self.requireFeature(features.SymlinkFeature)
+ self.build_tree_contents([('link@', 'target')])
+ self.assertEqual('target',
+ os.readlink('link'))
diff --git a/bzrlib/tests/test_tsort.py b/bzrlib/tests/test_tsort.py
new file mode 100644
index 0000000..ac8d560
--- /dev/null
+++ b/bzrlib/tests/test_tsort.py
@@ -0,0 +1,695 @@
+# Copyright (C) 2005 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+
+"""Tests for topological sort."""
+
+import pprint
+
+from bzrlib.tests import TestCase
+from bzrlib.tsort import topo_sort, TopoSorter, MergeSorter, merge_sort
+from bzrlib.errors import GraphCycleError
+from bzrlib.revision import NULL_REVISION
+
+
+class TopoSortTests(TestCase):
+
+ def assertSortAndIterate(self, graph, result_list):
+ """Check that sorting and iter_topo_order on graph works."""
+ self.assertEquals(result_list, topo_sort(graph))
+ self.assertEqual(result_list,
+ list(TopoSorter(graph).iter_topo_order()))
+
+ def assertSortAndIterateRaise(self, exception_type, graph):
+ """Try both iterating and topo_sorting graph and expect an exception."""
+ self.assertRaises(exception_type, topo_sort, graph)
+ self.assertRaises(exception_type,
+ list,
+ TopoSorter(graph).iter_topo_order())
+
+ def assertSortAndIterateOrder(self, graph):
+ """Check topo_sort and iter_topo_order is genuinely topological order.
+
+ For every child in the graph, check if it comes after all of it's
+ parents.
+ """
+ sort_result = topo_sort(graph)
+ iter_result = list(TopoSorter(graph).iter_topo_order())
+ for (node, parents) in graph:
+ for parent in parents:
+ if sort_result.index(node) < sort_result.index(parent):
+ self.fail("parent %s must come before child %s:\n%s"
+ % (parent, node, sort_result))
+ if iter_result.index(node) < iter_result.index(parent):
+ self.fail("parent %s must come before child %s:\n%s"
+ % (parent, node, iter_result))
+
+ def test_tsort_empty(self):
+ """TopoSort empty list"""
+ self.assertSortAndIterate([], [])
+
+ def test_tsort_easy(self):
+ """TopoSort list with one node"""
+ self.assertSortAndIterate({0: []}.items(), [0])
+
+ def test_tsort_cycle(self):
+ """TopoSort traps graph with cycles"""
+ self.assertSortAndIterateRaise(GraphCycleError,
+ {0: [1],
+ 1: [0]}.items())
+
+ def test_tsort_cycle_2(self):
+ """TopoSort traps graph with longer cycle"""
+ self.assertSortAndIterateRaise(GraphCycleError,
+ {0: [1],
+ 1: [2],
+ 2: [0]}.items())
+
+ def test_topo_sort_cycle_with_tail(self):
+ """TopoSort traps graph with longer cycle"""
+ self.assertSortAndIterateRaise(GraphCycleError,
+ {0: [1],
+ 1: [2],
+ 2: [3, 4],
+ 3: [0],
+ 4: []}.items())
+
+ def test_tsort_1(self):
+ """TopoSort simple nontrivial graph"""
+ self.assertSortAndIterate({0: [3],
+ 1: [4],
+ 2: [1, 4],
+ 3: [],
+ 4: [0, 3]}.items(),
+ [3, 0, 4, 1, 2])
+
+ def test_tsort_partial(self):
+ """Topological sort with partial ordering.
+
+ Multiple correct orderings are possible, so test for
+ correctness, not for exact match on the resulting list.
+ """
+ self.assertSortAndIterateOrder([(0, []),
+ (1, [0]),
+ (2, [0]),
+ (3, [0]),
+ (4, [1, 2, 3]),
+ (5, [1, 2]),
+ (6, [1, 2]),
+ (7, [2, 3]),
+ (8, [0, 1, 4, 5, 6])])
+
+ def test_tsort_unincluded_parent(self):
+ """Sort nodes, but don't include some parents in the output"""
+ self.assertSortAndIterate([(0, [1]),
+ (1, [2])],
+ [1, 0])
+
+
+class MergeSortTests(TestCase):
+
+ def assertSortAndIterate(self, graph, branch_tip, result_list,
+ generate_revno, mainline_revisions=None):
+ """Check that merge based sorting and iter_topo_order on graph works."""
+ value = merge_sort(graph, branch_tip,
+ mainline_revisions=mainline_revisions,
+ generate_revno=generate_revno)
+ if result_list != value:
+ self.assertEqualDiff(pprint.pformat(result_list),
+ pprint.pformat(value))
+ self.assertEqual(result_list,
+ list(MergeSorter(
+ graph,
+ branch_tip,
+ mainline_revisions=mainline_revisions,
+ generate_revno=generate_revno,
+ ).iter_topo_order()))
+
+ def test_merge_sort_empty(self):
+ # sorting of an emptygraph does not error
+ self.assertSortAndIterate({}, None, [], False)
+ self.assertSortAndIterate({}, None, [], True)
+ self.assertSortAndIterate({}, NULL_REVISION, [], False)
+ self.assertSortAndIterate({}, NULL_REVISION, [], True)
+
+ def test_merge_sort_not_empty_no_tip(self):
+ # merge sorting of a branch starting with None should result
+ # in an empty list: no revisions are dragged in.
+ self.assertSortAndIterate({0: []}.items(), None, [], False)
+ self.assertSortAndIterate({0: []}.items(), None, [], True)
+
+ def test_merge_sort_one_revision(self):
+ # sorting with one revision as the tip returns the correct fields:
+ # sequence - 0, revision id, merge depth - 0, end_of_merge
+ self.assertSortAndIterate({'id': []}.items(),
+ 'id',
+ [(0, 'id', 0, True)],
+ False)
+ self.assertSortAndIterate({'id': []}.items(),
+ 'id',
+ [(0, 'id', 0, (1,), True)],
+ True)
+
+ def test_sequence_numbers_increase_no_merges(self):
+ # emit a few revisions with no merges to check the sequence
+ # numbering works in trivial cases
+ self.assertSortAndIterate(
+ {'A': [],
+ 'B': ['A'],
+ 'C': ['B']}.items(),
+ 'C',
+ [(0, 'C', 0, False),
+ (1, 'B', 0, False),
+ (2, 'A', 0, True),
+ ],
+ False
+ )
+ self.assertSortAndIterate(
+ {'A': [],
+ 'B': ['A'],
+ 'C': ['B']}.items(),
+ 'C',
+ [(0, 'C', 0, (3,), False),
+ (1, 'B', 0, (2,), False),
+ (2, 'A', 0, (1,), True),
+ ],
+ True
+ )
+
+ def test_sequence_numbers_increase_with_merges(self):
+ # test that sequence numbers increase across merges
+ self.assertSortAndIterate(
+ {'A': [],
+ 'B': ['A'],
+ 'C': ['A', 'B']}.items(),
+ 'C',
+ [(0, 'C', 0, False),
+ (1, 'B', 1, True),
+ (2, 'A', 0, True),
+ ],
+ False
+ )
+ self.assertSortAndIterate(
+ {'A': [],
+ 'B': ['A'],
+ 'C': ['A', 'B']}.items(),
+ 'C',
+ [(0, 'C', 0, (2,), False),
+ (1, 'B', 1, (1,1,1), True),
+ (2, 'A', 0, (1,), True),
+ ],
+ True
+ )
+
+ def test_merge_sort_race(self):
+ # A
+ # |
+ # B-.
+ # |\ \
+ # | | C
+ # | |/
+ # | D
+ # |/
+ # F
+ graph = {'A': [],
+ 'B': ['A'],
+ 'C': ['B'],
+ 'D': ['B', 'C'],
+ 'F': ['B', 'D'],
+ }
+ self.assertSortAndIterate(graph, 'F',
+ [(0, 'F', 0, (3,), False),
+ (1, 'D', 1, (2,2,1), False),
+ (2, 'C', 2, (2,1,1), True),
+ (3, 'B', 0, (2,), False),
+ (4, 'A', 0, (1,), True),
+ ], True)
+ # A
+ # |
+ # B-.
+ # |\ \
+ # | X C
+ # | |/
+ # | D
+ # |/
+ # F
+ graph = {'A': [],
+ 'B': ['A'],
+ 'C': ['B'],
+ 'X': ['B'],
+ 'D': ['X', 'C'],
+ 'F': ['B', 'D'],
+ }
+ self.assertSortAndIterate(graph, 'F',
+ [(0, 'F', 0, (3,), False),
+ (1, 'D', 1, (2,1,2), False),
+ (2, 'C', 2, (2,2,1), True),
+ (3, 'X', 1, (2,1,1), True),
+ (4, 'B', 0, (2,), False),
+ (5, 'A', 0, (1,), True),
+ ], True)
+
+ def test_merge_depth_with_nested_merges(self):
+ # the merge depth marker should reflect the depth of the revision
+ # in terms of merges out from the mainline
+ # revid, depth, parents:
+ # A 0 [D, B]
+ # B 1 [C, F]
+ # C 1 [H]
+ # D 0 [H, E]
+ # E 1 [G, F]
+ # F 2 [G]
+ # G 1 [H]
+ # H 0
+ self.assertSortAndIterate(
+ {'A': ['D', 'B'],
+ 'B': ['C', 'F'],
+ 'C': ['H'],
+ 'D': ['H', 'E'],
+ 'E': ['G', 'F'],
+ 'F': ['G'],
+ 'G': ['H'],
+ 'H': []
+ }.items(),
+ 'A',
+ [(0, 'A', 0, False),
+ (1, 'B', 1, False),
+ (2, 'C', 1, True),
+ (3, 'D', 0, False),
+ (4, 'E', 1, False),
+ (5, 'F', 2, True),
+ (6, 'G', 1, True),
+ (7, 'H', 0, True),
+ ],
+ False
+ )
+ self.assertSortAndIterate(
+ {'A': ['D', 'B'],
+ 'B': ['C', 'F'],
+ 'C': ['H'],
+ 'D': ['H', 'E'],
+ 'E': ['G', 'F'],
+ 'F': ['G'],
+ 'G': ['H'],
+ 'H': []
+ }.items(),
+ 'A',
+ [(0, 'A', 0, (3,), False),
+ (1, 'B', 1, (1,3,2), False),
+ (2, 'C', 1, (1,3,1), True),
+ (3, 'D', 0, (2,), False),
+ (4, 'E', 1, (1,1,2), False),
+ (5, 'F', 2, (1,2,1), True),
+ (6, 'G', 1, (1,1,1), True),
+ (7, 'H', 0, (1,), True),
+ ],
+ True
+ )
+
+ def test_dotted_revnos_with_simple_merges(self):
+ # A 1
+ # |\
+ # B C 2, 1.1.1
+ # | |\
+ # D E F 3, 1.1.2, 1.2.1
+ # |/ /|
+ # G H I 4, 1.2.2, 1.3.1
+ # |/ /
+ # J K 5, 1.3.2
+ # |/
+ # L 6
+ self.assertSortAndIterate(
+ {'A': [],
+ 'B': ['A'],
+ 'C': ['A'],
+ 'D': ['B'],
+ 'E': ['C'],
+ 'F': ['C'],
+ 'G': ['D', 'E'],
+ 'H': ['F'],
+ 'I': ['F'],
+ 'J': ['G', 'H'],
+ 'K': ['I'],
+ 'L': ['J', 'K'],
+ }.items(),
+ 'L',
+ [(0, 'L', 0, (6,), False),
+ (1, 'K', 1, (1,3,2), False),
+ (2, 'I', 1, (1,3,1), True),
+ (3, 'J', 0, (5,), False),
+ (4, 'H', 1, (1,2,2), False),
+ (5, 'F', 1, (1,2,1), True),
+ (6, 'G', 0, (4,), False),
+ (7, 'E', 1, (1,1,2), False),
+ (8, 'C', 1, (1,1,1), True),
+ (9, 'D', 0, (3,), False),
+ (10, 'B', 0, (2,), False),
+ (11, 'A', 0, (1,), True),
+ ],
+ True
+ )
+ # Adding a shortcut from the first revision should not change any of
+ # the existing numbers
+ self.assertSortAndIterate(
+ {'A': [],
+ 'B': ['A'],
+ 'C': ['A'],
+ 'D': ['B'],
+ 'E': ['C'],
+ 'F': ['C'],
+ 'G': ['D', 'E'],
+ 'H': ['F'],
+ 'I': ['F'],
+ 'J': ['G', 'H'],
+ 'K': ['I'],
+ 'L': ['J', 'K'],
+ 'M': ['A'],
+ 'N': ['L', 'M'],
+ }.items(),
+ 'N',
+ [(0, 'N', 0, (7,), False),
+ (1, 'M', 1, (1,4,1), True),
+ (2, 'L', 0, (6,), False),
+ (3, 'K', 1, (1,3,2), False),
+ (4, 'I', 1, (1,3,1), True),
+ (5, 'J', 0, (5,), False),
+ (6, 'H', 1, (1,2,2), False),
+ (7, 'F', 1, (1,2,1), True),
+ (8, 'G', 0, (4,), False),
+ (9, 'E', 1, (1,1,2), False),
+ (10, 'C', 1, (1,1,1), True),
+ (11, 'D', 0, (3,), False),
+ (12, 'B', 0, (2,), False),
+ (13, 'A', 0, (1,), True),
+ ],
+ True
+ )
+
+ def test_end_of_merge_not_last_revision_in_branch(self):
+ # within a branch only the last revision gets an
+ # end of merge marker.
+ self.assertSortAndIterate(
+ {'A': ['B'],
+ 'B': [],
+ },
+ 'A',
+ [(0, 'A', 0, False),
+ (1, 'B', 0, True)
+ ],
+ False
+ )
+ self.assertSortAndIterate(
+ {'A': ['B'],
+ 'B': [],
+ },
+ 'A',
+ [(0, 'A', 0, (2,), False),
+ (1, 'B', 0, (1,), True)
+ ],
+ True
+ )
+
+ def test_end_of_merge_multiple_revisions_merged_at_once(self):
+ # when multiple branches are merged at once, both of their
+ # branch-endpoints should be listed as end-of-merge.
+ # Also, the order of the multiple merges should be
+ # left-right shown top to bottom.
+ # * means end of merge
+ # A 0 [H, B, E]
+ # B 1 [D, C]
+ # C 2 [D] *
+ # D 1 [H] *
+ # E 1 [G, F]
+ # F 2 [G] *
+ # G 1 [H] *
+ # H 0 [] *
+ self.assertSortAndIterate(
+ {'A': ['H', 'B', 'E'],
+ 'B': ['D', 'C'],
+ 'C': ['D'],
+ 'D': ['H'],
+ 'E': ['G', 'F'],
+ 'F': ['G'],
+ 'G': ['H'],
+ 'H': [],
+ },
+ 'A',
+ [(0, 'A', 0, False),
+ (1, 'B', 1, False),
+ (2, 'C', 2, True),
+ (3, 'D', 1, True),
+ (4, 'E', 1, False),
+ (5, 'F', 2, True),
+ (6, 'G', 1, True),
+ (7, 'H', 0, True),
+ ],
+ False
+ )
+ self.assertSortAndIterate(
+ {'A': ['H', 'B', 'E'],
+ 'B': ['D', 'C'],
+ 'C': ['D'],
+ 'D': ['H'],
+ 'E': ['G', 'F'],
+ 'F': ['G'],
+ 'G': ['H'],
+ 'H': [],
+ },
+ 'A',
+ [(0, 'A', 0, (2,), False),
+ (1, 'B', 1, (1,3,2), False),
+ (2, 'C', 2, (1,4,1), True),
+ (3, 'D', 1, (1,3,1), True),
+ (4, 'E', 1, (1,1,2), False),
+ (5, 'F', 2, (1,2,1), True),
+ (6, 'G', 1, (1,1,1), True),
+ (7, 'H', 0, (1,), True),
+ ],
+ True
+ )
+
+ def test_mainline_revs_partial(self):
+ # when a mainline_revisions list is passed this must
+ # override the graphs idea of mainline, and must also
+ # truncate the output to the specified range, if needed.
+ # so we test both at once: a mainline_revisions list that
+ # disagrees with the graph about which revs are 'mainline'
+ # and also truncates the output.
+ # graph:
+ # A 0 [E, B]
+ # B 1 [D, C]
+ # C 2 [D]
+ # D 1 [E]
+ # E 0
+ # with a mainline of NONE,E,A (the inferred one) this will show the merge
+ # depths above.
+ # with a overriden mainline of NONE,E,D,B,A it should show:
+ # A 0
+ # B 0
+ # C 1
+ # D 0
+ # E 0
+ # and thus when truncated to D,B,A it should show
+ # A 0
+ # B 0
+ # C 1
+ # because C is brought in by B in this view and D
+ # is the terminating revision id
+ # this should also preserve revision numbers: C should still be 2.1.1
+ self.assertSortAndIterate(
+ {'A': ['E', 'B'],
+ 'B': ['D', 'C'],
+ 'C': ['D'],
+ 'D': ['E'],
+ 'E': []
+ },
+ 'A',
+ [(0, 'A', 0, False),
+ (1, 'B', 0, False),
+ (2, 'C', 1, True),
+ ],
+ False,
+ mainline_revisions=['D', 'B', 'A']
+ )
+ self.assertSortAndIterate(
+ {'A': ['E', 'B'],
+ 'B': ['D', 'C'],
+ 'C': ['D'],
+ 'D': ['E'],
+ 'E': []
+ },
+ 'A',
+ [(0, 'A', 0, (4,), False),
+ (1, 'B', 0, (3,), False),
+ (2, 'C', 1, (2,1,1), True),
+ ],
+ True,
+ mainline_revisions=['D', 'B', 'A']
+ )
+
+ def test_mainline_revs_with_none(self):
+ # a simple test to ensure that a mainline_revs
+ # list which goes all the way to None works
+ self.assertSortAndIterate(
+ {'A': [],
+ },
+ 'A',
+ [(0, 'A', 0, True),
+ ],
+ False,
+ mainline_revisions=[None, 'A']
+ )
+ self.assertSortAndIterate(
+ {'A': [],
+ },
+ 'A',
+ [(0, 'A', 0, (1,), True),
+ ],
+ True,
+ mainline_revisions=[None, 'A']
+ )
+
+ def test_mainline_revs_with_ghost(self):
+ # We have a mainline, but the end of it is actually a ghost
+ # The graph that is passed to tsort has had ghosts filtered out, but
+ # the mainline history has not.
+ self.assertSortAndIterate(
+ {'B':[],
+ 'C':['B']}.items(),
+ 'C',
+ [(0, 'C', 0, (2,), False),
+ (1, 'B', 0, (1,), True),
+ ],
+ True, mainline_revisions=['A', 'B', 'C'])
+
+ def test_parallel_root_sequence_numbers_increase_with_merges(self):
+ """When there are parallel roots, check their revnos."""
+ self.assertSortAndIterate(
+ {'A': [],
+ 'B': [],
+ 'C': ['A', 'B']}.items(),
+ 'C',
+ [(0, 'C', 0, (2,), False),
+ (1, 'B', 1, (0,1,1), True),
+ (2, 'A', 0, (1,), True),
+ ],
+ True
+ )
+
+ def test_revnos_are_globally_assigned(self):
+ """revnos are assigned according to the revision they derive from."""
+ # in this test we setup a number of branches that all derive from
+ # the first revision, and then merge them one at a time, which
+ # should give the revisions as they merge numbers still deriving from
+ # the revision were based on.
+ # merge 3: J: ['G', 'I']
+ # branch 3:
+ # I: ['H']
+ # H: ['A']
+ # merge 2: G: ['D', 'F']
+ # branch 2:
+ # F: ['E']
+ # E: ['A']
+ # merge 1: D: ['A', 'C']
+ # branch 1:
+ # C: ['B']
+ # B: ['A']
+ # root: A: []
+ self.assertSortAndIterate(
+ {'J': ['G', 'I'],
+ 'I': ['H',],
+ 'H': ['A'],
+ 'G': ['D', 'F'],
+ 'F': ['E'],
+ 'E': ['A'],
+ 'D': ['A', 'C'],
+ 'C': ['B'],
+ 'B': ['A'],
+ 'A': [],
+ }.items(),
+ 'J',
+ [(0, 'J', 0, (4,), False),
+ (1, 'I', 1, (1,3,2), False),
+ (2, 'H', 1, (1,3,1), True),
+ (3, 'G', 0, (3,), False),
+ (4, 'F', 1, (1,2,2), False),
+ (5, 'E', 1, (1,2,1), True),
+ (6, 'D', 0, (2,), False),
+ (7, 'C', 1, (1,1,2), False),
+ (8, 'B', 1, (1,1,1), True),
+ (9, 'A', 0, (1,), True),
+ ],
+ True
+ )
+
+ def test_roots_and_sub_branches_versus_ghosts(self):
+ """Extra roots and their mini branches use the same numbering.
+
+ All of them use the 0-node numbering.
+ """
+ # A D K
+ # | |\ |\
+ # B E F L M
+ # | |/ |/
+ # C G N
+ # |/ |\
+ # H I O P
+ # |/ |/
+ # J Q
+ # |.---'
+ # R
+ self.assertSortAndIterate(
+ {'A': [],
+ 'B': ['A'],
+ 'C': ['B'],
+ 'D': [],
+ 'E': ['D'],
+ 'F': ['D'],
+ 'G': ['E', 'F'],
+ 'H': ['C', 'G'],
+ 'I': [],
+ 'J': ['H', 'I'],
+ 'K': [],
+ 'L': ['K'],
+ 'M': ['K'],
+ 'N': ['L', 'M'],
+ 'O': ['N'],
+ 'P': ['N'],
+ 'Q': ['O', 'P'],
+ 'R': ['J', 'Q'],
+ }.items(),
+ 'R',
+ [( 0, 'R', 0, (6,), False),
+ ( 1, 'Q', 1, (0,4,5), False),
+ ( 2, 'P', 2, (0,6,1), True),
+ ( 3, 'O', 1, (0,4,4), False),
+ ( 4, 'N', 1, (0,4,3), False),
+ ( 5, 'M', 2, (0,5,1), True),
+ ( 6, 'L', 1, (0,4,2), False),
+ ( 7, 'K', 1, (0,4,1), True),
+ ( 8, 'J', 0, (5,), False),
+ ( 9, 'I', 1, (0,3,1), True),
+ (10, 'H', 0, (4,), False),
+ (11, 'G', 1, (0,1,3), False),
+ (12, 'F', 2, (0,2,1), True),
+ (13, 'E', 1, (0,1,2), False),
+ (14, 'D', 1, (0,1,1), True),
+ (15, 'C', 0, (3,), False),
+ (16, 'B', 0, (2,), False),
+ (17, 'A', 0, (1,), True),
+ ],
+ True
+ )
diff --git a/bzrlib/tests/test_tuned_gzip.py b/bzrlib/tests/test_tuned_gzip.py
new file mode 100644
index 0000000..cd9d32b
--- /dev/null
+++ b/bzrlib/tests/test_tuned_gzip.py
@@ -0,0 +1,130 @@
+# Copyright (C) 2006, 2009, 2010, 2011 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+
+"""Tests for tuned_gzip."""
+
+
+# do not use bzrlib test cases here - this should be suitable for sending
+# upstream.
+from cStringIO import StringIO
+import zlib
+
+
+from bzrlib import (
+ symbol_versioning,
+ tuned_gzip,
+ tests,
+ )
+
+
+class FakeDecompress(object):
+ """A fake decompressor for testing GzipFile."""
+
+ def __init__(self):
+ self.unused_data=''
+
+ def decompress(self, buf):
+ """Return an empty string as though we are at eof."""
+ # note that the zlib module *overwrites* unused data
+ # on writes after EOF.
+ self.unused_data = buf
+ return ''
+
+
+class TestFakeDecompress(tests.TestCase):
+ """We use a fake decompressor to test GzipFile.
+
+ This class tests the behaviours we want from it.
+ """
+
+ def test_decompress(self):
+ # decompressing returns no data.
+ decompress = FakeDecompress()
+ self.assertEqual('', decompress.decompress('0'))
+
+ def test_unused_data(self):
+ # after decompressing, we have 1 unused byte.
+ # this is normally set by decompressors when they
+ # detect the end of a compressed stream.
+ decompress = FakeDecompress()
+ decompress.decompress('0')
+ self.assertEqual('0', decompress.unused_data)
+ # decompressing again (when the short read is read)
+ # will give us the latest input in the unused_data
+ # this is arguably a bug in zlib but ...
+ decompress.decompress('1234567')
+ self.assertEqual('1234567', decompress.unused_data)
+
+
+class TestGzip(tests.TestCase):
+
+ def test__read_short_remainder(self):
+ # a _read call at the end of a compressed hunk should
+ # read more bytes if there is less than 8 bytes (the
+ # gzip trailer) unread.
+ stream = StringIO('\0\0\0\0\0\0\0\0')
+ myfile = self.applyDeprecated(
+ symbol_versioning.deprecated_in((2, 3, 0)),
+ tuned_gzip.GzipFile, fileobj=stream)
+ # disable the _new_member check, we are microtesting.
+ myfile._new_member = False
+ myfile.crc = zlib.crc32('')
+ myfile.decompress = FakeDecompress()
+ myfile.size = 0
+ myfile._read(1)
+ # all the data should have been read now
+ self.assertEqual('', stream.read())
+ # and it should be new member time in the stream.
+ self.assertTrue(myfile._new_member)
+
+ def test_negative_crc(self):
+ """Content with a negative crc should not break when written"""
+ sio = StringIO()
+ gfile = self.applyDeprecated(
+ symbol_versioning.deprecated_in((2, 3, 0)),
+ tuned_gzip.GzipFile, mode="w", fileobj=sio)
+ gfile.write("\xFF")
+ gfile.close()
+ self.assertEqual(gfile.crc & 0xFFFFFFFFL, 0xFF000000L)
+ self.assertEqual(sio.getvalue()[-8:-4], "\x00\x00\x00\xFF")
+
+
+class TestToGzip(tests.TestCase):
+
+ def assertToGzip(self, chunks):
+ bytes = ''.join(chunks)
+ gzfromchunks = tuned_gzip.chunks_to_gzip(chunks)
+ gzfrombytes = tuned_gzip.bytes_to_gzip(bytes)
+ self.assertEqual(gzfrombytes, gzfromchunks)
+ decoded = self.applyDeprecated(
+ symbol_versioning.deprecated_in((2, 3, 0)),
+ tuned_gzip.GzipFile, fileobj=StringIO(gzfromchunks)).read()
+ self.assertEqual(bytes, decoded)
+
+ def test_single_chunk(self):
+ self.assertToGzip(['a modest chunk\nwith some various\nbits\n'])
+
+ def test_simple_text(self):
+ self.assertToGzip(['some\n', 'strings\n', 'to\n', 'process\n'])
+
+ def test_large_chunks(self):
+ self.assertToGzip(['a large string\n'*1024])
+ self.assertToGzip(['a large string\n']*1024)
+
+ def test_enormous_chunks(self):
+ self.assertToGzip(['a large string\n'*1024*256])
+ self.assertToGzip(['a large string\n']*1024*256)
diff --git a/bzrlib/tests/test_ui.py b/bzrlib/tests/test_ui.py
new file mode 100644
index 0000000..e02d474
--- /dev/null
+++ b/bzrlib/tests/test_ui.py
@@ -0,0 +1,566 @@
+# Copyright (C) 2005-2011 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""Tests for the bzrlib ui
+"""
+
+import time
+
+from StringIO import StringIO
+
+from testtools.matchers import *
+
+from bzrlib import (
+ config,
+ remote,
+ tests,
+ ui as _mod_ui,
+ )
+from bzrlib.tests import (
+ fixtures,
+ )
+from bzrlib.ui import text as _mod_ui_text
+from bzrlib.tests.testui import (
+ ProgressRecordingUIFactory,
+ )
+
+
+class TTYStringIO(StringIO):
+ """A helper class which makes a StringIO look like a terminal"""
+
+ def isatty(self):
+ return True
+
+
+class NonTTYStringIO(StringIO):
+ """Helper that implements isatty() but returns False"""
+
+ def isatty(self):
+ return False
+
+
+class TestUIConfiguration(tests.TestCaseWithTransport):
+
+ def test_output_encoding_configuration(self):
+ enc = fixtures.generate_unicode_encodings().next()
+ config.GlobalConfig().set_user_option('output_encoding',
+ enc)
+ ui = tests.TestUIFactory(stdin=None,
+ stdout=tests.StringIOWrapper(),
+ stderr=tests.StringIOWrapper())
+ output = ui.make_output_stream()
+ self.assertEquals(output.encoding, enc)
+
+
+class TestTextUIFactory(tests.TestCase):
+
+ def make_test_ui_factory(self, stdin_contents):
+ ui = tests.TestUIFactory(stdin=stdin_contents,
+ stdout=tests.StringIOWrapper(),
+ stderr=tests.StringIOWrapper())
+ return ui
+
+ def test_text_factory_confirm(self):
+ # turns into reading a regular boolean
+ ui = self.make_test_ui_factory('n\n')
+ self.assertEquals(ui.confirm_action(u'Should %(thing)s pass?',
+ 'bzrlib.tests.test_ui.confirmation',
+ {'thing': 'this'},),
+ False)
+
+ def test_text_factory_ascii_password(self):
+ ui = self.make_test_ui_factory('secret\n')
+ pb = ui.nested_progress_bar()
+ try:
+ self.assertEqual('secret',
+ self.apply_redirected(ui.stdin, ui.stdout,
+ ui.stderr,
+ ui.get_password))
+ # ': ' is appended to prompt
+ self.assertEqual(': ', ui.stderr.getvalue())
+ self.assertEqual('', ui.stdout.readline())
+ # stdin should be empty
+ self.assertEqual('', ui.stdin.readline())
+ finally:
+ pb.finished()
+
+ def test_text_factory_utf8_password(self):
+ """Test an utf8 password.
+
+ We can't predict what encoding users will have for stdin, so we force
+ it to utf8 to test that we transport the password correctly.
+ """
+ ui = self.make_test_ui_factory(u'baz\u1234'.encode('utf8'))
+ ui.stderr.encoding = ui.stdout.encoding = ui.stdin.encoding = 'utf8'
+ pb = ui.nested_progress_bar()
+ try:
+ password = self.apply_redirected(ui.stdin, ui.stdout, ui.stderr,
+ ui.get_password,
+ u'Hello \u1234 %(user)s',
+ user=u'some\u1234')
+ # We use StringIO objects, we need to decode them
+ self.assertEqual(u'baz\u1234', password.decode('utf8'))
+ self.assertEqual(u'Hello \u1234 some\u1234: ',
+ ui.stderr.getvalue().decode('utf8'))
+ # stdin and stdout should be empty
+ self.assertEqual('', ui.stdin.readline())
+ self.assertEqual('', ui.stdout.readline())
+ finally:
+ pb.finished()
+
+ def test_text_ui_get_boolean(self):
+ stdin = tests.StringIOWrapper("y\n" # True
+ "n\n" # False
+ " \n y \n" # True
+ " no \n" # False
+ "yes with garbage\nY\n" # True
+ "not an answer\nno\n" # False
+ "I'm sure!\nyes\n" # True
+ "NO\n" # False
+ "foo\n")
+ stdout = tests.StringIOWrapper()
+ stderr = tests.StringIOWrapper()
+ factory = _mod_ui_text.TextUIFactory(stdin, stdout, stderr)
+ self.assertEqual(True, factory.get_boolean(u""))
+ self.assertEqual(False, factory.get_boolean(u""))
+ self.assertEqual(True, factory.get_boolean(u""))
+ self.assertEqual(False, factory.get_boolean(u""))
+ self.assertEqual(True, factory.get_boolean(u""))
+ self.assertEqual(False, factory.get_boolean(u""))
+ self.assertEqual(True, factory.get_boolean(u""))
+ self.assertEqual(False, factory.get_boolean(u""))
+ self.assertEqual("foo\n", factory.stdin.read())
+ # stdin should be empty
+ self.assertEqual('', factory.stdin.readline())
+ # return false on EOF
+ self.assertEqual(False, factory.get_boolean(u""))
+
+ def test_text_ui_choose_bad_parameters(self):
+ stdin = tests.StringIOWrapper()
+ stdout = tests.StringIOWrapper()
+ stderr = tests.StringIOWrapper()
+ factory = _mod_ui_text.TextUIFactory(stdin, stdout, stderr)
+ # invalid default index
+ self.assertRaises(ValueError, factory.choose, u"", u"&Yes\n&No", 3)
+ # duplicated choice
+ self.assertRaises(ValueError, factory.choose, u"", u"&choice\n&ChOiCe")
+ # duplicated shortcut
+ self.assertRaises(ValueError, factory.choose, u"", u"&choice1\nchoi&ce2")
+
+ def test_text_ui_choose_prompt(self):
+ stdin = tests.StringIOWrapper()
+ stdout = tests.StringIOWrapper()
+ stderr = tests.StringIOWrapper()
+ factory = _mod_ui_text.TextUIFactory(stdin, stdout, stderr)
+ # choices with explicit shortcuts
+ factory.choose(u"prompt", u"&yes\n&No\nmore &info")
+ self.assertEqual("prompt ([y]es, [N]o, more [i]nfo): \n", factory.stderr.getvalue())
+ # automatic shortcuts
+ factory.stderr.truncate(0)
+ factory.choose(u"prompt", u"yes\nNo\nmore info")
+ self.assertEqual("prompt ([y]es, [N]o, [m]ore info): \n", factory.stderr.getvalue())
+
+ def test_text_ui_choose_return_values(self):
+ choose = lambda: factory.choose(u"", u"&Yes\n&No\nMaybe\nmore &info", 3)
+ stdin = tests.StringIOWrapper("y\n" # 0
+ "n\n" # 1
+ " \n" # default: 3
+ " no \n" # 1
+ "b\na\nd \n" # bad shortcuts, all ignored
+ "yes with garbage\nY\n" # 0
+ "not an answer\nno\n" # 1
+ "info\nmore info\n" # 3
+ "Maybe\n" # 2
+ "foo\n")
+ stdout = tests.StringIOWrapper()
+ stderr = tests.StringIOWrapper()
+ factory = _mod_ui_text.TextUIFactory(stdin, stdout, stderr)
+ self.assertEqual(0, choose())
+ self.assertEqual(1, choose())
+ self.assertEqual(3, choose())
+ self.assertEqual(1, choose())
+ self.assertEqual(0, choose())
+ self.assertEqual(1, choose())
+ self.assertEqual(3, choose())
+ self.assertEqual(2, choose())
+ self.assertEqual("foo\n", factory.stdin.read())
+ # stdin should be empty
+ self.assertEqual('', factory.stdin.readline())
+ # return None on EOF
+ self.assertEqual(None, choose())
+
+ def test_text_ui_choose_no_default(self):
+ stdin = tests.StringIOWrapper(" \n" # no default, invalid!
+ " yes \n" # 0
+ "foo\n")
+ stdout = tests.StringIOWrapper()
+ stderr = tests.StringIOWrapper()
+ factory = _mod_ui_text.TextUIFactory(stdin, stdout, stderr)
+ self.assertEqual(0, factory.choose(u"", u"&Yes\n&No"))
+ self.assertEqual("foo\n", factory.stdin.read())
+
+ def test_text_ui_get_integer(self):
+ stdin = tests.StringIOWrapper(
+ "1\n"
+ " -2 \n"
+ "hmmm\nwhat else ?\nCome on\nok 42\n4.24\n42\n")
+ stdout = tests.StringIOWrapper()
+ stderr = tests.StringIOWrapper()
+ factory = _mod_ui_text.TextUIFactory(stdin, stdout, stderr)
+ self.assertEqual(1, factory.get_integer(u""))
+ self.assertEqual(-2, factory.get_integer(u""))
+ self.assertEqual(42, factory.get_integer(u""))
+
+ def test_text_factory_prompt(self):
+ # see <https://launchpad.net/bugs/365891>
+ StringIO = tests.StringIOWrapper
+ factory = _mod_ui_text.TextUIFactory(StringIO(), StringIO(), StringIO())
+ factory.prompt(u'foo %2e')
+ self.assertEqual('', factory.stdout.getvalue())
+ self.assertEqual('foo %2e', factory.stderr.getvalue())
+
+ def test_text_factory_prompts_and_clears(self):
+ # a get_boolean call should clear the pb before prompting
+ out = TTYStringIO()
+ self.overrideEnv('TERM', 'xterm')
+ factory = _mod_ui_text.TextUIFactory(
+ stdin=tests.StringIOWrapper("yada\ny\n"),
+ stdout=out, stderr=out)
+ factory._avail_width = lambda: 79
+ pb = factory.nested_progress_bar()
+ pb.show_bar = False
+ pb.show_spinner = False
+ pb.show_count = False
+ pb.update("foo", 0, 1)
+ self.assertEqual(True,
+ self.apply_redirected(None, factory.stdout,
+ factory.stdout,
+ factory.get_boolean,
+ u"what do you want"))
+ output = out.getvalue()
+ self.assertContainsRe(output,
+ "| foo *\r\r *\r*")
+ self.assertContainsString(output,
+ r"what do you want? ([y]es, [n]o): what do you want? ([y]es, [n]o): ")
+ # stdin should have been totally consumed
+ self.assertEqual('', factory.stdin.readline())
+
+ def test_text_tick_after_update(self):
+ ui_factory = _mod_ui_text.TextUIFactory(stdout=tests.StringIOWrapper(),
+ stderr=tests.StringIOWrapper())
+ pb = ui_factory.nested_progress_bar()
+ try:
+ pb.update('task', 0, 3)
+ # Reset the clock, so that it actually tries to repaint itself
+ ui_factory._progress_view._last_repaint = time.time() - 1.0
+ pb.tick()
+ finally:
+ pb.finished()
+
+ def test_text_ui_getusername(self):
+ factory = _mod_ui_text.TextUIFactory(None, None, None)
+ factory.stdin = tests.StringIOWrapper("someuser\n\n")
+ factory.stdout = tests.StringIOWrapper()
+ factory.stderr = tests.StringIOWrapper()
+ factory.stdout.encoding = "utf8"
+ # there is no output from the base factory
+ self.assertEqual("someuser",
+ factory.get_username(u'Hello %(host)s', host='some'))
+ self.assertEquals("Hello some: ", factory.stderr.getvalue())
+ self.assertEquals('', factory.stdout.getvalue())
+ self.assertEqual("", factory.get_username(u"Gebruiker"))
+ # stdin should be empty
+ self.assertEqual('', factory.stdin.readline())
+
+ def test_text_ui_getusername_utf8(self):
+ ui = tests.TestUIFactory(stdin=u'someuser\u1234'.encode('utf8'),
+ stdout=tests.StringIOWrapper(),
+ stderr=tests.StringIOWrapper())
+ ui.stderr.encoding = ui.stdout.encoding = ui.stdin.encoding = "utf8"
+ pb = ui.nested_progress_bar()
+ try:
+ # there is no output from the base factory
+ username = self.apply_redirected(ui.stdin, ui.stdout, ui.stderr,
+ ui.get_username, u'Hello\u1234 %(host)s', host=u'some\u1234')
+ self.assertEquals(u"someuser\u1234", username.decode('utf8'))
+ self.assertEquals(u"Hello\u1234 some\u1234: ",
+ ui.stderr.getvalue().decode("utf8"))
+ self.assertEquals('', ui.stdout.getvalue())
+ finally:
+ pb.finished()
+
+ def test_quietness(self):
+ self.overrideEnv('BZR_PROGRESS_BAR', 'text')
+ ui_factory = _mod_ui_text.TextUIFactory(None,
+ TTYStringIO(),
+ TTYStringIO())
+ self.assertIsInstance(ui_factory._progress_view,
+ _mod_ui_text.TextProgressView)
+ ui_factory.be_quiet(True)
+ self.assertIsInstance(ui_factory._progress_view,
+ _mod_ui_text.NullProgressView)
+
+ def test_text_ui_show_user_warning(self):
+ from bzrlib.repofmt.groupcompress_repo import RepositoryFormat2a
+ from bzrlib.repofmt.knitpack_repo import RepositoryFormatKnitPack5
+ err = StringIO()
+ out = StringIO()
+ ui = tests.TextUIFactory(stdin=None, stdout=out, stderr=err)
+ remote_fmt = remote.RemoteRepositoryFormat()
+ remote_fmt._network_name = RepositoryFormatKnitPack5().network_name()
+ ui.show_user_warning('cross_format_fetch', from_format=RepositoryFormat2a(),
+ to_format=remote_fmt)
+ self.assertEquals('', out.getvalue())
+ self.assertEquals("Doing on-the-fly conversion from RepositoryFormat2a() to "
+ "RemoteRepositoryFormat(_network_name='Bazaar RepositoryFormatKnitPack5 "
+ "(bzr 1.6)\\n').\nThis may take some time. Upgrade the repositories to "
+ "the same format for better performance.\n",
+ err.getvalue())
+ # and now with it suppressed please
+ err = StringIO()
+ out = StringIO()
+ ui = tests.TextUIFactory(stdin=None, stdout=out, stderr=err)
+ ui.suppressed_warnings.add('cross_format_fetch')
+ ui.show_user_warning('cross_format_fetch', from_format=RepositoryFormat2a(),
+ to_format=remote_fmt)
+ self.assertEquals('', out.getvalue())
+ self.assertEquals('', err.getvalue())
+
+
+class TestTextUIOutputStream(tests.TestCase):
+ """Tests for output stream that synchronizes with progress bar."""
+
+ def test_output_clears_terminal(self):
+ stdout = tests.StringIOWrapper()
+ stderr = tests.StringIOWrapper()
+ clear_calls = []
+
+ uif = _mod_ui_text.TextUIFactory(None, stdout, stderr)
+ uif.clear_term = lambda: clear_calls.append('clear')
+
+ stream = _mod_ui_text.TextUIOutputStream(uif, uif.stdout)
+ stream.write("Hello world!\n")
+ stream.write("there's more...\n")
+ stream.writelines(["1\n", "2\n", "3\n"])
+
+ self.assertEqual(stdout.getvalue(),
+ "Hello world!\n"
+ "there's more...\n"
+ "1\n2\n3\n")
+ self.assertEqual(['clear', 'clear', 'clear'],
+ clear_calls)
+
+ stream.flush()
+
+
+class UITests(tests.TestCase):
+
+ def test_progress_construction(self):
+ """TextUIFactory constructs the right progress view.
+ """
+ FileStringIO = tests.StringIOWrapper
+ for (file_class, term, pb, expected_pb_class) in (
+ # on an xterm, either use them or not as the user requests,
+ # otherwise default on
+ (TTYStringIO, 'xterm', 'none', _mod_ui_text.NullProgressView),
+ (TTYStringIO, 'xterm', 'text', _mod_ui_text.TextProgressView),
+ (TTYStringIO, 'xterm', None, _mod_ui_text.TextProgressView),
+ # on a dumb terminal, again if there's explicit configuration do
+ # it, otherwise default off
+ (TTYStringIO, 'dumb', 'none', _mod_ui_text.NullProgressView),
+ (TTYStringIO, 'dumb', 'text', _mod_ui_text.TextProgressView),
+ (TTYStringIO, 'dumb', None, _mod_ui_text.NullProgressView),
+ # on a non-tty terminal, it's null regardless of $TERM
+ (FileStringIO, 'xterm', None, _mod_ui_text.NullProgressView),
+ (FileStringIO, 'dumb', None, _mod_ui_text.NullProgressView),
+ # however, it can still be forced on
+ (FileStringIO, 'dumb', 'text', _mod_ui_text.TextProgressView),
+ ):
+ self.overrideEnv('TERM', term)
+ self.overrideEnv('BZR_PROGRESS_BAR', pb)
+ stdin = file_class('')
+ stderr = file_class()
+ stdout = file_class()
+ uif = _mod_ui.make_ui_for_terminal(stdin, stdout, stderr)
+ self.assertIsInstance(uif, _mod_ui_text.TextUIFactory,
+ "TERM=%s BZR_PROGRESS_BAR=%s uif=%r" % (term, pb, uif,))
+ self.assertIsInstance(uif.make_progress_view(),
+ expected_pb_class,
+ "TERM=%s BZR_PROGRESS_BAR=%s uif=%r" % (term, pb, uif,))
+
+ def test_text_ui_non_terminal(self):
+ """Even on non-ttys, make_ui_for_terminal gives a text ui."""
+ stdin = NonTTYStringIO('')
+ stderr = NonTTYStringIO()
+ stdout = NonTTYStringIO()
+ for term_type in ['dumb', None, 'xterm']:
+ self.overrideEnv('TERM', term_type)
+ uif = _mod_ui.make_ui_for_terminal(stdin, stdout, stderr)
+ self.assertIsInstance(uif, _mod_ui_text.TextUIFactory,
+ 'TERM=%r' % (term_type,))
+
+
+class SilentUITests(tests.TestCase):
+
+ def test_silent_factory_get_password(self):
+ # A silent factory that can't do user interaction can't get a
+ # password. Possibly it should raise a more specific error but it
+ # can't succeed.
+ ui = _mod_ui.SilentUIFactory()
+ stdout = tests.StringIOWrapper()
+ self.assertRaises(
+ NotImplementedError,
+ self.apply_redirected,
+ None, stdout, stdout, ui.get_password)
+ # and it didn't write anything out either
+ self.assertEqual('', stdout.getvalue())
+
+ def test_silent_ui_getbool(self):
+ factory = _mod_ui.SilentUIFactory()
+ stdout = tests.StringIOWrapper()
+ self.assertRaises(
+ NotImplementedError,
+ self.apply_redirected,
+ None, stdout, stdout, factory.get_boolean, u"foo")
+
+
+class TestUIFactoryTests(tests.TestCase):
+
+ def test_test_ui_factory_progress(self):
+ # there's no output; we just want to make sure this doesn't crash -
+ # see https://bugs.launchpad.net/bzr/+bug/408201
+ ui = tests.TestUIFactory()
+ pb = ui.nested_progress_bar()
+ pb.update('hello')
+ pb.tick()
+ pb.finished()
+
+
+class CannedInputUIFactoryTests(tests.TestCase):
+
+ def test_canned_input_get_input(self):
+ uif = _mod_ui.CannedInputUIFactory([True, 'mbp', 'password', 42])
+ self.assertEqual(True, uif.get_boolean(u'Extra cheese?'))
+ self.assertEqual('mbp', uif.get_username(u'Enter your user name'))
+ self.assertEqual('password',
+ uif.get_password(u'Password for %(host)s',
+ host='example.com'))
+ self.assertEqual(42, uif.get_integer(u'And all that jazz ?'))
+
+
+class TestBoolFromString(tests.TestCase):
+
+ def assertIsTrue(self, s, accepted_values=None):
+ res = _mod_ui.bool_from_string(s, accepted_values=accepted_values)
+ self.assertEquals(True, res)
+
+ def assertIsFalse(self, s, accepted_values=None):
+ res = _mod_ui.bool_from_string(s, accepted_values=accepted_values)
+ self.assertEquals(False, res)
+
+ def assertIsNone(self, s, accepted_values=None):
+ res = _mod_ui.bool_from_string(s, accepted_values=accepted_values)
+ self.assertIs(None, res)
+
+ def test_know_valid_values(self):
+ self.assertIsTrue('true')
+ self.assertIsFalse('false')
+ self.assertIsTrue('1')
+ self.assertIsFalse('0')
+ self.assertIsTrue('on')
+ self.assertIsFalse('off')
+ self.assertIsTrue('yes')
+ self.assertIsFalse('no')
+ self.assertIsTrue('y')
+ self.assertIsFalse('n')
+ # Also try some case variations
+ self.assertIsTrue('True')
+ self.assertIsFalse('False')
+ self.assertIsTrue('On')
+ self.assertIsFalse('Off')
+ self.assertIsTrue('ON')
+ self.assertIsFalse('OFF')
+ self.assertIsTrue('oN')
+ self.assertIsFalse('oFf')
+
+ def test_invalid_values(self):
+ self.assertIsNone(None)
+ self.assertIsNone('doubt')
+ self.assertIsNone('frue')
+ self.assertIsNone('talse')
+ self.assertIsNone('42')
+
+ def test_provided_values(self):
+ av = dict(y=True, n=False, yes=True, no=False)
+ self.assertIsTrue('y', av)
+ self.assertIsTrue('Y', av)
+ self.assertIsTrue('Yes', av)
+ self.assertIsFalse('n', av)
+ self.assertIsFalse('N', av)
+ self.assertIsFalse('No', av)
+ self.assertIsNone('1', av)
+ self.assertIsNone('0', av)
+ self.assertIsNone('on', av)
+ self.assertIsNone('off', av)
+
+
+class TestConfirmationUserInterfacePolicy(tests.TestCase):
+
+ def test_confirm_action_default(self):
+ base_ui = _mod_ui.NoninteractiveUIFactory()
+ for answer in [True, False]:
+ self.assertEquals(
+ _mod_ui.ConfirmationUserInterfacePolicy(base_ui, answer, {})
+ .confirm_action("Do something?",
+ "bzrlib.tests.do_something", {}),
+ answer)
+
+ def test_confirm_action_specific(self):
+ base_ui = _mod_ui.NoninteractiveUIFactory()
+ for default_answer in [True, False]:
+ for specific_answer in [True, False]:
+ for conf_id in ['given_id', 'other_id']:
+ wrapper = _mod_ui.ConfirmationUserInterfacePolicy(
+ base_ui, default_answer, dict(given_id=specific_answer))
+ result = wrapper.confirm_action("Do something?", conf_id, {})
+ if conf_id == 'given_id':
+ self.assertEquals(result, specific_answer)
+ else:
+ self.assertEquals(result, default_answer)
+
+ def test_repr(self):
+ base_ui = _mod_ui.NoninteractiveUIFactory()
+ wrapper = _mod_ui.ConfirmationUserInterfacePolicy(
+ base_ui, True, dict(a=2))
+ self.assertThat(repr(wrapper),
+ Equals("ConfirmationUserInterfacePolicy("
+ "NoninteractiveUIFactory(), True, {'a': 2})"))
+
+
+class TestProgressRecordingUI(tests.TestCase):
+ """Test test-oriented UIFactory that records progress updates"""
+
+ def test_nested_ignore_depth_beyond_one(self):
+ # we only want to capture the first level out progress, not
+ # want sub-components might do. So we have nested bars ignored.
+ factory = ProgressRecordingUIFactory()
+ pb1 = factory.nested_progress_bar()
+ pb1.update('foo', 0, 1)
+ pb2 = factory.nested_progress_bar()
+ pb2.update('foo', 0, 1)
+ pb2.finished()
+ pb1.finished()
+ self.assertEqual([("update", 0, 1, 'foo')], factory._calls)
diff --git a/bzrlib/tests/test_uncommit.py b/bzrlib/tests/test_uncommit.py
new file mode 100644
index 0000000..90145c0
--- /dev/null
+++ b/bzrlib/tests/test_uncommit.py
@@ -0,0 +1,146 @@
+# Copyright (C) 2008 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""Test uncommit."""
+
+
+from bzrlib import (
+ errors,
+ tests,
+ uncommit,
+ )
+
+
+class TestUncommit(tests.TestCaseWithTransport):
+
+ def make_linear_tree(self):
+ tree = self.make_branch_and_tree('tree')
+ tree.lock_write()
+ try:
+ self.build_tree(['tree/one'])
+ tree.add('one')
+ rev_id1 = tree.commit('one')
+ self.build_tree(['tree/two'])
+ tree.add('two')
+ rev_id2 = tree.commit('two')
+ finally:
+ tree.unlock()
+ return tree, [rev_id1, rev_id2]
+
+ def test_uncommit(self):
+ tree, history = self.make_linear_tree()
+ self.assertEqual(history[1], tree.last_revision())
+ self.assertEqual((2, history[1]), tree.branch.last_revision_info())
+ uncommit.uncommit(tree.branch, tree=tree)
+ self.assertEqual(history[0], tree.last_revision())
+ self.assertEqual((1, history[0]), tree.branch.last_revision_info())
+
+ # The file should not be removed
+ self.assertPathExists('tree/two')
+ # And it should still be listed as added
+ self.assertIsNot(None, tree.path2id('two'))
+
+ def test_uncommit_bound(self):
+ tree, history = self.make_linear_tree()
+ child = tree.bzrdir.sprout('child').open_workingtree()
+ child.branch.bind(tree.branch)
+
+ self.assertEqual(history[1], tree.last_revision())
+ self.assertEqual((2, history[1]), tree.branch.last_revision_info())
+ self.assertEqual(history[1], child.last_revision())
+ self.assertEqual((2, history[1]), child.branch.last_revision_info())
+
+ # Uncommit in a bound branch should uncommit the master branch, but not
+ # touch the other working tree.
+ uncommit.uncommit(child.branch, tree=child)
+
+ self.assertEqual(history[1], tree.last_revision())
+ self.assertEqual((1, history[0]), tree.branch.last_revision_info())
+ self.assertEqual(history[0], child.last_revision())
+ self.assertEqual((1, history[0]), child.branch.last_revision_info())
+
+ def test_uncommit_bound_local(self):
+ tree, history = self.make_linear_tree()
+ child = tree.bzrdir.sprout('child').open_workingtree()
+ child.branch.bind(tree.branch)
+
+ self.assertEqual(history[1], tree.last_revision())
+ self.assertEqual((2, history[1]), tree.branch.last_revision_info())
+ self.assertEqual(history[1], child.last_revision())
+ self.assertEqual((2, history[1]), child.branch.last_revision_info())
+
+ # Uncommit local=True should only affect the local branch
+ uncommit.uncommit(child.branch, tree=child, local=True)
+
+ self.assertEqual(history[1], tree.last_revision())
+ self.assertEqual((2, history[1]), tree.branch.last_revision_info())
+ self.assertEqual(history[0], child.last_revision())
+ self.assertEqual((1, history[0]), child.branch.last_revision_info())
+
+ def test_uncommit_unbound_local(self):
+ tree, history = self.make_linear_tree()
+
+ # If this tree isn't bound, local=True raises an exception
+ self.assertRaises(errors.LocalRequiresBoundBranch,
+ uncommit.uncommit, tree.branch, tree=tree, local=True)
+
+ def test_uncommit_remove_tags(self):
+ tree, history = self.make_linear_tree()
+ self.assertEqual(history[1], tree.last_revision())
+ self.assertEqual((2, history[1]), tree.branch.last_revision_info())
+ tree.branch.tags.set_tag(u"pointsatexisting", history[0])
+ tree.branch.tags.set_tag(u"pointsatremoved", history[1])
+ uncommit.uncommit(tree.branch, tree=tree)
+ self.assertEqual(history[0], tree.last_revision())
+ self.assertEqual((1, history[0]), tree.branch.last_revision_info())
+ self.assertEqual({
+ "pointsatexisting": history[0]
+ }, tree.branch.tags.get_tag_dict())
+
+ def test_uncommit_remove_tags_keeps_pending_merges(self):
+ tree, history = self.make_linear_tree()
+ copy = tree.bzrdir.sprout('copyoftree').open_workingtree()
+ copy.commit(message='merged', rev_id='merged')
+ tree.merge_from_branch(copy.branch)
+ tree.branch.tags.set_tag('pointsatmerged', 'merged')
+ history.append(tree.commit('merge'))
+ self.assertEquals('merged', tree.branch.tags.lookup_tag('pointsatmerged'))
+ self.assertEqual(history[2], tree.last_revision())
+ self.assertEqual((3, history[2]), tree.branch.last_revision_info())
+ tree.branch.tags.set_tag(u"pointsatexisting", history[1])
+ tree.branch.tags.set_tag(u"pointsatremoved", history[2])
+ uncommit.uncommit(tree.branch, tree=tree)
+ self.assertEqual(history[1], tree.last_revision())
+ self.assertEqual((2, history[1]), tree.branch.last_revision_info())
+ self.assertEquals([history[1], 'merged'], tree.get_parent_ids())
+ self.assertEqual({
+ "pointsatexisting": history[1],
+ "pointsatmerged": 'merged',
+ }, tree.branch.tags.get_tag_dict())
+
+ def test_uncommit_keep_tags(self):
+ tree, history = self.make_linear_tree()
+ self.assertEqual(history[1], tree.last_revision())
+ self.assertEqual((2, history[1]), tree.branch.last_revision_info())
+ tree.branch.tags.set_tag(u"pointsatexisting", history[0])
+ tree.branch.tags.set_tag(u"pointsatremoved", history[1])
+ uncommit.uncommit(tree.branch, tree=tree, keep_tags=True)
+ self.assertEqual(history[0], tree.last_revision())
+ self.assertEqual((1, history[0]), tree.branch.last_revision_info())
+ self.assertEqual({
+ "pointsatexisting": history[0],
+ "pointsatremoved": history[1],
+ }, tree.branch.tags.get_tag_dict())
diff --git a/bzrlib/tests/test_upgrade.py b/bzrlib/tests/test_upgrade.py
new file mode 100644
index 0000000..5d4defb
--- /dev/null
+++ b/bzrlib/tests/test_upgrade.py
@@ -0,0 +1,213 @@
+# Copyright (C) 2005-2011 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""Tests for upgrade of old trees.
+
+This file contains canned versions of some old trees, which are instantiated
+and then upgraded to the new format."""
+
+# TODO queue for upgrade:
+# test the error message when upgrading an unknown BzrDir format.
+
+from bzrlib import (
+ branch,
+ controldir,
+ tests,
+ upgrade,
+ workingtree,
+ workingtree_4,
+ )
+
+
+class TestUpgrade(tests.TestCaseWithTransport):
+
+ def test_upgrade_rich_root(self):
+ tree = self.make_branch_and_tree('tree', format='rich-root')
+ rev_id = tree.commit('first post')
+ upgrade.upgrade('tree')
+
+ def test_convert_branch5_branch6(self):
+ b = self.make_branch('branch', format='knit')
+ b._set_revision_history(['CD'])
+ b.set_parent('file:///EF')
+ b.set_bound_location('file:///GH')
+ b.set_push_location('file:///IJ')
+ target = controldir.format_registry.make_bzrdir('dirstate-with-subtree')
+ converter = b.bzrdir._format.get_converter(target)
+ converter.convert(b.bzrdir, None)
+ new_branch = branch.Branch.open(self.get_url('branch'))
+ self.assertIs(new_branch.__class__, branch.BzrBranch6)
+ self.assertEqual('CD', new_branch.last_revision())
+ self.assertEqual('file:///EF', new_branch.get_parent())
+ self.assertEqual('file:///GH', new_branch.get_bound_location())
+ branch_config = new_branch.get_config_stack()
+ self.assertEqual('file:///IJ', branch_config.get('push_location'))
+
+ b2 = self.make_branch('branch2', format='knit')
+ converter = b2.bzrdir._format.get_converter(target)
+ converter.convert(b2.bzrdir, None)
+ b2 = branch.Branch.open(self.get_url('branch'))
+ self.assertIs(b2.__class__, branch.BzrBranch6)
+
+ def test_convert_branch7_branch8(self):
+ b = self.make_branch('branch', format='1.9')
+ target = controldir.format_registry.make_bzrdir('1.9')
+ target.set_branch_format(branch.BzrBranchFormat8())
+ converter = b.bzrdir._format.get_converter(target)
+ converter.convert(b.bzrdir, None)
+ b = branch.Branch.open(self.get_url('branch'))
+ self.assertIs(b.__class__, branch.BzrBranch8)
+ self.assertEqual({}, b._get_all_reference_info())
+
+ def test_convert_knit_dirstate_empty(self):
+ # test that asking for an upgrade from knit to dirstate works.
+ tree = self.make_branch_and_tree('tree', format='knit')
+ target = controldir.format_registry.make_bzrdir('dirstate')
+ converter = tree.bzrdir._format.get_converter(target)
+ converter.convert(tree.bzrdir, None)
+ new_tree = workingtree.WorkingTree.open('tree')
+ self.assertIs(new_tree.__class__, workingtree_4.WorkingTree4)
+ self.assertEqual('null:', new_tree.last_revision())
+
+ def test_convert_knit_dirstate_content(self):
+ # smoke test for dirstate conversion: we call dirstate primitives,
+ # and its there that the core logic is tested.
+ tree = self.make_branch_and_tree('tree', format='knit')
+ self.build_tree(['tree/file'])
+ tree.add(['file'], ['file-id'])
+ target = controldir.format_registry.make_bzrdir('dirstate')
+ converter = tree.bzrdir._format.get_converter(target)
+ converter.convert(tree.bzrdir, None)
+ new_tree = workingtree.WorkingTree.open('tree')
+ self.assertIs(new_tree.__class__, workingtree_4.WorkingTree4)
+ self.assertEqual('null:', new_tree.last_revision())
+
+ def test_convert_knit_one_parent_dirstate(self):
+ # test that asking for an upgrade from knit to dirstate works.
+ tree = self.make_branch_and_tree('tree', format='knit')
+ rev_id = tree.commit('first post')
+ target = controldir.format_registry.make_bzrdir('dirstate')
+ converter = tree.bzrdir._format.get_converter(target)
+ converter.convert(tree.bzrdir, None)
+ new_tree = workingtree.WorkingTree.open('tree')
+ self.assertIs(new_tree.__class__, workingtree_4.WorkingTree4)
+ self.assertEqual(rev_id, new_tree.last_revision())
+ for path in ['basis-inventory-cache', 'inventory', 'last-revision',
+ 'pending-merges', 'stat-cache']:
+ self.assertPathDoesNotExist('tree/.bzr/checkout/' + path)
+
+ def test_convert_knit_merges_dirstate(self):
+ tree = self.make_branch_and_tree('tree', format='knit')
+ rev_id = tree.commit('first post')
+ merge_tree = tree.bzrdir.sprout('tree2').open_workingtree()
+ rev_id2 = tree.commit('second post')
+ rev_id3 = merge_tree.commit('second merge post')
+ tree.merge_from_branch(merge_tree.branch)
+ target = controldir.format_registry.make_bzrdir('dirstate')
+ converter = tree.bzrdir._format.get_converter(target)
+ converter.convert(tree.bzrdir, None)
+ new_tree = workingtree.WorkingTree.open('tree')
+ self.assertIs(new_tree.__class__, workingtree_4.WorkingTree4)
+ self.assertEqual(rev_id2, new_tree.last_revision())
+ self.assertEqual([rev_id2, rev_id3], new_tree.get_parent_ids())
+ for path in ['basis-inventory-cache', 'inventory', 'last-revision',
+ 'pending-merges', 'stat-cache']:
+ self.assertPathDoesNotExist('tree/.bzr/checkout/' + path)
+
+
+class TestSmartUpgrade(tests.TestCaseWithTransport):
+
+ from_format = controldir.format_registry.make_bzrdir("pack-0.92")
+ to_format = controldir.format_registry.make_bzrdir("2a")
+
+ def make_standalone_branch(self):
+ wt = self.make_branch_and_tree("branch1", format=self.from_format)
+ return wt.bzrdir
+
+ def test_upgrade_standalone_branch(self):
+ control = self.make_standalone_branch()
+ tried, worked, issues = upgrade.smart_upgrade(
+ [control], format=self.to_format)
+ self.assertLength(1, tried)
+ self.assertEqual(tried[0], control)
+ self.assertLength(1, worked)
+ self.assertEqual(worked[0], control)
+ self.assertLength(0, issues)
+ self.assertPathExists('branch1/backup.bzr.~1~')
+ self.assertEqual(control.open_repository()._format,
+ self.to_format._repository_format)
+
+ def test_upgrade_standalone_branch_cleanup(self):
+ control = self.make_standalone_branch()
+ tried, worked, issues = upgrade.smart_upgrade(
+ [control], format=self.to_format, clean_up=True)
+ self.assertLength(1, tried)
+ self.assertEqual(tried[0], control)
+ self.assertLength(1, worked)
+ self.assertEqual(worked[0], control)
+ self.assertLength(0, issues)
+ self.assertPathExists('branch1')
+ self.assertPathExists('branch1/.bzr')
+ self.assertPathDoesNotExist('branch1/backup.bzr.~1~')
+ self.assertEqual(control.open_repository()._format,
+ self.to_format._repository_format)
+
+ def make_repo_with_branches(self):
+ repo = self.make_repository('repo', shared=True,
+ format=self.from_format)
+ # Note: self.make_branch() always creates a new repo at the location
+ # so we need to avoid using that here ...
+ b1 = controldir.ControlDir.create_branch_convenience("repo/branch1",
+ format=self.from_format)
+ b2 = controldir.ControlDir.create_branch_convenience("repo/branch2",
+ format=self.from_format)
+ return repo.bzrdir
+
+ def test_upgrade_repo_with_branches(self):
+ control = self.make_repo_with_branches()
+ tried, worked, issues = upgrade.smart_upgrade(
+ [control], format=self.to_format)
+ self.assertLength(3, tried)
+ self.assertEqual(tried[0], control)
+ self.assertLength(3, worked)
+ self.assertEqual(worked[0], control)
+ self.assertLength(0, issues)
+ self.assertPathExists('repo/backup.bzr.~1~')
+ self.assertPathExists('repo/branch1/backup.bzr.~1~')
+ self.assertPathExists('repo/branch2/backup.bzr.~1~')
+ self.assertEqual(control.open_repository()._format,
+ self.to_format._repository_format)
+ b1 = branch.Branch.open('repo/branch1')
+ self.assertEqual(b1._format, self.to_format._branch_format)
+
+ def test_upgrade_repo_with_branches_cleanup(self):
+ control = self.make_repo_with_branches()
+ tried, worked, issues = upgrade.smart_upgrade(
+ [control], format=self.to_format, clean_up=True)
+ self.assertLength(3, tried)
+ self.assertEqual(tried[0], control)
+ self.assertLength(3, worked)
+ self.assertEqual(worked[0], control)
+ self.assertLength(0, issues)
+ self.assertPathExists('repo')
+ self.assertPathExists('repo/.bzr')
+ self.assertPathDoesNotExist('repo/backup.bzr.~1~')
+ self.assertPathDoesNotExist('repo/branch1/backup.bzr.~1~')
+ self.assertPathDoesNotExist('repo/branch2/backup.bzr.~1~')
+ self.assertEqual(control.open_repository()._format,
+ self.to_format._repository_format)
+ b1 = branch.Branch.open('repo/branch1')
+ self.assertEqual(b1._format, self.to_format._branch_format)
diff --git a/bzrlib/tests/test_upgrade_stacked.py b/bzrlib/tests/test_upgrade_stacked.py
new file mode 100644
index 0000000..683d726
--- /dev/null
+++ b/bzrlib/tests/test_upgrade_stacked.py
@@ -0,0 +1,90 @@
+# Copyright (C) 2008, 2009, 2011 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+
+"""Tests for upgrades of various stacking situations."""
+
+from bzrlib import (
+ controldir,
+ check,
+ errors,
+ tests,
+ )
+from bzrlib.upgrade import upgrade
+from bzrlib.tests.scenarios import load_tests_apply_scenarios
+
+
+def upgrade_scenarios():
+ scenario_pairs = [ # old format, new format, model_change
+# ('knit', 'rich-root', True),
+ ('knit', '1.6', False),
+# ('pack-0.92', '1.6', False),
+ ('1.6', '1.6.1-rich-root', True),
+ ]
+ scenarios = []
+ for (old_name, new_name, model_change) in scenario_pairs:
+ name = old_name + ', ' + new_name
+ scenarios.append((name,
+ dict(scenario_old_format=old_name,
+ scenario_new_format=new_name,
+ scenario_model_change=model_change)))
+ return scenarios
+
+
+load_tests = load_tests_apply_scenarios
+
+
+class TestStackUpgrade(tests.TestCaseWithTransport):
+ # TODO: This should possibly be repeated for all stacking repositories,
+ # pairwise by rich/non-rich format; should possibly also try other kinds
+ # of upgrades like knit->pack. -- mbp 20080804
+
+ scenarios = upgrade_scenarios()
+
+ def test_stack_upgrade(self):
+ """Correct checks when stacked-on repository is upgraded.
+
+ We initially stack on a repo with the same rich root support,
+ we then upgrade it and should fail, we then upgrade the overlaid
+ repository.
+ """
+ base = self.make_branch_and_tree('base',
+ format=self.scenario_old_format)
+ self.build_tree(['base/foo'])
+ base.commit('base commit')
+ # make another one stacked
+ stacked = base.bzrdir.sprout('stacked', stacked=True)
+ # this must really be stacked (or get_stacked_on_url raises an error)
+ self.assertTrue(stacked.open_branch().get_stacked_on_url())
+ # now we'll upgrade the underlying branch, then upgrade the stacked
+ # branch, and this should still work.
+ new_format = controldir.format_registry.make_bzrdir(
+ self.scenario_new_format)
+ upgrade('base', new_format)
+ # in some cases you'll get an error if the underlying model has
+ # changed; if just the data format has changed this should still work
+ if self.scenario_model_change:
+ self.assertRaises(errors.IncompatibleRepositories,
+ stacked.open_branch)
+ else:
+ check.check_dwim('stacked', False, True, True)
+ stacked = controldir.ControlDir.open('stacked')
+ # but we can upgrade the stacked repository
+ upgrade('stacked', new_format)
+ # and now it opens ok
+ stacked = controldir.ControlDir.open('stacked')
+ # And passes check.
+ check.check_dwim('stacked', False, True, True)
diff --git a/bzrlib/tests/test_url_policy_open.py b/bzrlib/tests/test_url_policy_open.py
new file mode 100644
index 0000000..9bad00f
--- /dev/null
+++ b/bzrlib/tests/test_url_policy_open.py
@@ -0,0 +1,359 @@
+# Copyright (C) 2011 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""Tests for the branch open with specific URL policy code."""
+
+from bzrlib import urlutils
+from bzrlib.branch import (
+ Branch,
+ BranchReferenceFormat,
+ )
+from bzrlib.bzrdir import (
+ BzrProber,
+ )
+from bzrlib.controldir import (
+ ControlDir,
+ ControlDirFormat,
+ )
+from bzrlib.errors import NotBranchError
+from bzrlib.url_policy_open import (
+ BadUrl,
+ _BlacklistPolicy,
+ BranchLoopError,
+ BranchReferenceForbidden,
+ open_only_scheme,
+ BranchOpener,
+ WhitelistPolicy,
+ )
+from bzrlib.tests import (
+ TestCase,
+ TestCaseWithTransport,
+ )
+from bzrlib.transport import chroot
+
+
+class TestBranchOpenerCheckAndFollowBranchReference(TestCase):
+ """Unit tests for `BranchOpener.check_and_follow_branch_reference`."""
+
+ def setUp(self):
+ super(TestBranchOpenerCheckAndFollowBranchReference, self).setUp()
+ BranchOpener.install_hook()
+
+ class StubbedBranchOpener(BranchOpener):
+ """BranchOpener that provides canned answers.
+
+ We implement the methods we need to to be able to control all the
+ inputs to the `follow_reference` method, which is what is
+ being tested in this class.
+ """
+
+ def __init__(self, references, policy):
+ parent_cls = TestBranchOpenerCheckAndFollowBranchReference
+ super(parent_cls.StubbedBranchOpener, self).__init__(policy)
+ self._reference_values = {}
+ for i in range(len(references) - 1):
+ self._reference_values[references[i]] = references[i + 1]
+ self.follow_reference_calls = []
+
+ def follow_reference(self, url):
+ self.follow_reference_calls.append(url)
+ return self._reference_values[url]
+
+ def make_branch_opener(self, should_follow_references, references,
+ unsafe_urls=None):
+ policy = _BlacklistPolicy(should_follow_references, unsafe_urls)
+ opener = self.StubbedBranchOpener(references, policy)
+ return opener
+
+ def test_check_initial_url(self):
+ # check_and_follow_branch_reference rejects all URLs that are not
+ # allowed.
+ opener = self.make_branch_opener(None, [], set(['a']))
+ self.assertRaises(
+ BadUrl, opener.check_and_follow_branch_reference, 'a')
+
+ def test_not_reference(self):
+ # When branch references are forbidden, check_and_follow_branch_reference
+ # does not raise on non-references.
+ opener = self.make_branch_opener(False, ['a', None])
+ self.assertEquals(
+ 'a', opener.check_and_follow_branch_reference('a'))
+ self.assertEquals(['a'], opener.follow_reference_calls)
+
+ def test_branch_reference_forbidden(self):
+ # check_and_follow_branch_reference raises BranchReferenceForbidden if
+ # branch references are forbidden and the source URL points to a
+ # branch reference.
+ opener = self.make_branch_opener(False, ['a', 'b'])
+ self.assertRaises(
+ BranchReferenceForbidden,
+ opener.check_and_follow_branch_reference, 'a')
+ self.assertEquals(['a'], opener.follow_reference_calls)
+
+ def test_allowed_reference(self):
+ # check_and_follow_branch_reference does not raise if following references
+ # is allowed and the source URL points to a branch reference to a
+ # permitted location.
+ opener = self.make_branch_opener(True, ['a', 'b', None])
+ self.assertEquals(
+ 'b', opener.check_and_follow_branch_reference('a'))
+ self.assertEquals(['a', 'b'], opener.follow_reference_calls)
+
+ def test_check_referenced_urls(self):
+ # check_and_follow_branch_reference checks if the URL a reference points
+ # to is safe.
+ opener = self.make_branch_opener(
+ True, ['a', 'b', None], unsafe_urls=set('b'))
+ self.assertRaises(
+ BadUrl, opener.check_and_follow_branch_reference, 'a')
+ self.assertEquals(['a'], opener.follow_reference_calls)
+
+ def test_self_referencing_branch(self):
+ # check_and_follow_branch_reference raises BranchReferenceLoopError if
+ # following references is allowed and the source url points to a
+ # self-referencing branch reference.
+ opener = self.make_branch_opener(True, ['a', 'a'])
+ self.assertRaises(
+ BranchLoopError, opener.check_and_follow_branch_reference, 'a')
+ self.assertEquals(['a'], opener.follow_reference_calls)
+
+ def test_branch_reference_loop(self):
+ # check_and_follow_branch_reference raises BranchReferenceLoopError if
+ # following references is allowed and the source url points to a loop
+ # of branch references.
+ references = ['a', 'b', 'a']
+ opener = self.make_branch_opener(True, references)
+ self.assertRaises(
+ BranchLoopError, opener.check_and_follow_branch_reference, 'a')
+ self.assertEquals(['a', 'b'], opener.follow_reference_calls)
+
+
+class TrackingProber(BzrProber):
+ """Subclass of BzrProber which tracks URLs it has been asked to open."""
+
+ seen_urls = []
+
+ @classmethod
+ def probe_transport(klass, transport):
+ klass.seen_urls.append(transport.base)
+ return BzrProber.probe_transport(transport)
+
+
+class TestBranchOpenerStacking(TestCaseWithTransport):
+
+ def setUp(self):
+ super(TestBranchOpenerStacking, self).setUp()
+ BranchOpener.install_hook()
+
+ def make_branch_opener(self, allowed_urls, probers=None):
+ policy = WhitelistPolicy(True, allowed_urls, True)
+ return BranchOpener(policy, probers)
+
+ def test_probers(self):
+ # Only the specified probers should be used
+ b = self.make_branch('branch')
+ opener = self.make_branch_opener([b.base], probers=[])
+ self.assertRaises(NotBranchError, opener.open, b.base)
+ opener = self.make_branch_opener([b.base], probers=[BzrProber])
+ self.assertEquals(b.base, opener.open(b.base).base)
+
+ def test_default_probers(self):
+ # If no probers are specified to the constructor
+ # of BranchOpener, then a safe set will be used,
+ # rather than all probers registered in bzr.
+ self.addCleanup(ControlDirFormat.unregister_prober, TrackingProber)
+ ControlDirFormat.register_prober(TrackingProber)
+ # Open a location without any branches, so that all probers are
+ # tried.
+ # First, check that the TrackingProber tracks correctly.
+ TrackingProber.seen_urls = []
+ opener = self.make_branch_opener(["."], probers=[TrackingProber])
+ self.assertRaises(NotBranchError, opener.open, ".")
+ self.assertEquals(1, len(TrackingProber.seen_urls))
+ TrackingProber.seen_urls = []
+ # And make sure it's registered in such a way that ControlDir.open would
+ # use it.
+ self.assertRaises(NotBranchError, ControlDir.open, ".")
+ self.assertEquals(1, len(TrackingProber.seen_urls))
+
+ def test_allowed_url(self):
+ # the opener does not raise an exception for branches stacked on
+ # branches with allowed URLs.
+ stacked_on_branch = self.make_branch('base-branch', format='1.6')
+ stacked_branch = self.make_branch('stacked-branch', format='1.6')
+ stacked_branch.set_stacked_on_url(stacked_on_branch.base)
+ opener = self.make_branch_opener(
+ [stacked_branch.base, stacked_on_branch.base])
+ # This doesn't raise an exception.
+ opener.open(stacked_branch.base)
+
+ def test_nstackable_repository(self):
+ # treats branches with UnstackableRepositoryFormats as
+ # being not stacked.
+ branch = self.make_branch('unstacked', format='knit')
+ opener = self.make_branch_opener([branch.base])
+ # This doesn't raise an exception.
+ opener.open(branch.base)
+
+ def test_allowed_relative_url(self):
+ # passes on absolute urls to check_one_url, even if the
+ # value of stacked_on_location in the config is set to a relative URL.
+ stacked_on_branch = self.make_branch('base-branch', format='1.6')
+ stacked_branch = self.make_branch('stacked-branch', format='1.6')
+ stacked_branch.set_stacked_on_url('../base-branch')
+ opener = self.make_branch_opener(
+ [stacked_branch.base, stacked_on_branch.base])
+ # Note that stacked_on_branch.base is not '../base-branch', it's an
+ # absolute URL.
+ self.assertNotEqual('../base-branch', stacked_on_branch.base)
+ # This doesn't raise an exception.
+ opener.open(stacked_branch.base)
+
+ def test_allowed_relative_nested(self):
+ # Relative URLs are resolved relative to the stacked branch.
+ self.get_transport().mkdir('subdir')
+ a = self.make_branch('subdir/a', format='1.6')
+ b = self.make_branch('b', format='1.6')
+ b.set_stacked_on_url('../subdir/a')
+ c = self.make_branch('subdir/c', format='1.6')
+ c.set_stacked_on_url('../../b')
+ opener = self.make_branch_opener([c.base, b.base, a.base])
+ # This doesn't raise an exception.
+ opener.open(c.base)
+
+ def test_forbidden_url(self):
+ # raises a BadUrl exception if a branch is stacked on a
+ # branch with a forbidden URL.
+ stacked_on_branch = self.make_branch('base-branch', format='1.6')
+ stacked_branch = self.make_branch('stacked-branch', format='1.6')
+ stacked_branch.set_stacked_on_url(stacked_on_branch.base)
+ opener = self.make_branch_opener([stacked_branch.base])
+ self.assertRaises(BadUrl, opener.open, stacked_branch.base)
+
+ def test_forbidden_url_nested(self):
+ # raises a BadUrl exception if a branch is stacked on a
+ # branch that is in turn stacked on a branch with a forbidden URL.
+ a = self.make_branch('a', format='1.6')
+ b = self.make_branch('b', format='1.6')
+ b.set_stacked_on_url(a.base)
+ c = self.make_branch('c', format='1.6')
+ c.set_stacked_on_url(b.base)
+ opener = self.make_branch_opener([c.base, b.base])
+ self.assertRaises(BadUrl, opener.open, c.base)
+
+ def test_self_stacked_branch(self):
+ # raises StackingLoopError if a branch is stacked on
+ # itself. This avoids infinite recursion errors.
+ a = self.make_branch('a', format='1.6')
+ # Bazaar 1.17 and up make it harder to create branches like this.
+ # It's still worth testing that we don't blow up in the face of them,
+ # so we grovel around a bit to create one anyway.
+ a.get_config().set_user_option('stacked_on_location', a.base)
+ opener = self.make_branch_opener([a.base])
+ self.assertRaises(BranchLoopError, opener.open, a.base)
+
+ def test_loop_stacked_branch(self):
+ # raises StackingLoopError if a branch is stacked in such
+ # a way so that it is ultimately stacked on itself. e.g. a stacked on
+ # b stacked on a.
+ a = self.make_branch('a', format='1.6')
+ b = self.make_branch('b', format='1.6')
+ a.set_stacked_on_url(b.base)
+ b.set_stacked_on_url(a.base)
+ opener = self.make_branch_opener([a.base, b.base])
+ self.assertRaises(BranchLoopError, opener.open, a.base)
+ self.assertRaises(BranchLoopError, opener.open, b.base)
+
+ def test_custom_opener(self):
+ # A custom function for opening a control dir can be specified.
+ a = self.make_branch('a', format='2a')
+ b = self.make_branch('b', format='2a')
+ b.set_stacked_on_url(a.base)
+
+ TrackingProber.seen_urls = []
+ opener = self.make_branch_opener(
+ [a.base, b.base], probers=[TrackingProber])
+ opener.open(b.base)
+ self.assertEquals(
+ set(TrackingProber.seen_urls), set([b.base, a.base]))
+
+ def test_custom_opener_with_branch_reference(self):
+ # A custom function for opening a control dir can be specified.
+ a = self.make_branch('a', format='2a')
+ b_dir = self.make_bzrdir('b')
+ b = BranchReferenceFormat().initialize(b_dir, target_branch=a)
+ TrackingProber.seen_urls = []
+ opener = self.make_branch_opener(
+ [a.base, b.base], probers=[TrackingProber])
+ opener.open(b.base)
+ self.assertEquals(
+ set(TrackingProber.seen_urls), set([b.base, a.base]))
+
+
+class TestOpenOnlyScheme(TestCaseWithTransport):
+ """Tests for `open_only_scheme`."""
+
+ def setUp(self):
+ super(TestOpenOnlyScheme, self).setUp()
+ BranchOpener.install_hook()
+
+ def test_hook_does_not_interfere(self):
+ # The transform_fallback_location hook does not interfere with regular
+ # stacked branch access outside of open_only_scheme.
+ self.make_branch('stacked')
+ self.make_branch('stacked-on')
+ Branch.open('stacked').set_stacked_on_url('../stacked-on')
+ Branch.open('stacked')
+
+ def get_chrooted_scheme(self, relpath):
+ """Create a server that is chrooted to `relpath`.
+
+ :return: ``(scheme, get_url)`` where ``scheme`` is the scheme of the
+ chroot server and ``get_url`` returns URLs on said server.
+ """
+ transport = self.get_transport(relpath)
+ chroot_server = chroot.ChrootServer(transport)
+ chroot_server.start_server()
+ self.addCleanup(chroot_server.stop_server)
+
+ def get_url(relpath):
+ return chroot_server.get_url() + relpath
+
+ return urlutils.URL.from_string(chroot_server.get_url()).scheme, get_url
+
+ def test_stacked_within_scheme(self):
+ # A branch that is stacked on a URL of the same scheme is safe to
+ # open.
+ self.get_transport().mkdir('inside')
+ self.make_branch('inside/stacked')
+ self.make_branch('inside/stacked-on')
+ scheme, get_chrooted_url = self.get_chrooted_scheme('inside')
+ Branch.open(get_chrooted_url('stacked')).set_stacked_on_url(
+ get_chrooted_url('stacked-on'))
+ open_only_scheme(scheme, get_chrooted_url('stacked'))
+
+ def test_stacked_outside_scheme(self):
+ # A branch that is stacked on a URL that is not of the same scheme is
+ # not safe to open.
+ self.get_transport().mkdir('inside')
+ self.get_transport().mkdir('outside')
+ self.make_branch('inside/stacked')
+ self.make_branch('outside/stacked-on')
+ scheme, get_chrooted_url = self.get_chrooted_scheme('inside')
+ Branch.open(get_chrooted_url('stacked')).set_stacked_on_url(
+ self.get_url('outside/stacked-on'))
+ self.assertRaises(
+ BadUrl, open_only_scheme, scheme, get_chrooted_url('stacked'))
diff --git a/bzrlib/tests/test_urlutils.py b/bzrlib/tests/test_urlutils.py
new file mode 100644
index 0000000..e5042f3
--- /dev/null
+++ b/bzrlib/tests/test_urlutils.py
@@ -0,0 +1,1045 @@
+# Copyright (C) 2006-2012 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""Tests for the urlutils wrapper."""
+
+import os
+import sys
+
+from bzrlib import osutils, urlutils, win32utils
+from bzrlib.errors import (
+ InvalidURL,
+ InvalidURLJoin,
+ InvalidRebaseURLs,
+ PathNotChild,
+ )
+from bzrlib.tests import TestCaseInTempDir, TestCase, TestSkipped
+
+
+class TestUrlToPath(TestCase):
+
+ def test_basename(self):
+ # bzrlib.urlutils.basename
+ # Test bzrlib.urlutils.split()
+ basename = urlutils.basename
+ if sys.platform == 'win32':
+ self.assertRaises(InvalidURL, basename, 'file:///path/to/foo')
+ self.assertEqual('foo', basename('file:///C|/foo'))
+ self.assertEqual('foo', basename('file:///C:/foo'))
+ self.assertEqual('', basename('file:///C:/'))
+ else:
+ self.assertEqual('foo', basename('file:///foo'))
+ self.assertEqual('', basename('file:///'))
+
+ self.assertEqual('foo', basename('http://host/path/to/foo'))
+ self.assertEqual('foo', basename('http://host/path/to/foo/'))
+ self.assertEqual('',
+ basename('http://host/path/to/foo/', exclude_trailing_slash=False))
+ self.assertEqual('path', basename('http://host/path'))
+ self.assertEqual('', basename('http://host/'))
+ self.assertEqual('', basename('http://host'))
+ self.assertEqual('path', basename('http:///nohost/path'))
+
+ self.assertEqual('path', basename('random+scheme://user:pass@ahost:port/path'))
+ self.assertEqual('path', basename('random+scheme://user:pass@ahost:port/path/'))
+ self.assertEqual('', basename('random+scheme://user:pass@ahost:port/'))
+
+ # relative paths
+ self.assertEqual('foo', basename('path/to/foo'))
+ self.assertEqual('foo', basename('path/to/foo/'))
+ self.assertEqual('', basename('path/to/foo/',
+ exclude_trailing_slash=False))
+ self.assertEqual('foo', basename('path/../foo'))
+ self.assertEqual('foo', basename('../path/foo'))
+
+ def test_normalize_url_files(self):
+ # Test that local paths are properly normalized
+ normalize_url = urlutils.normalize_url
+
+ def norm_file(expected, path):
+ url = normalize_url(path)
+ self.assertStartsWith(url, 'file:///')
+ if sys.platform == 'win32':
+ url = url[len('file:///C:'):]
+ else:
+ url = url[len('file://'):]
+
+ self.assertEndsWith(url, expected)
+
+ norm_file('path/to/foo', 'path/to/foo')
+ norm_file('/path/to/foo', '/path/to/foo')
+ norm_file('path/to/foo', '../path/to/foo')
+
+ # Local paths are assumed to *not* be escaped at all
+ try:
+ u'uni/\xb5'.encode(osutils.get_user_encoding())
+ except UnicodeError:
+ # locale cannot handle unicode
+ pass
+ else:
+ norm_file('uni/%C2%B5', u'uni/\xb5')
+
+ norm_file('uni/%25C2%25B5', u'uni/%C2%B5')
+ norm_file('uni/%20b', u'uni/ b')
+ # All the crazy characters get escaped in local paths => file:/// urls
+ # The ' ' character must not be at the end, because on win32
+ # it gets stripped off by ntpath.abspath
+ norm_file('%27%20%3B/%3F%3A%40%26%3D%2B%24%2C%23', "' ;/?:@&=+$,#")
+
+ def test_normalize_url_hybrid(self):
+ # Anything with a scheme:// should be treated as a hybrid url
+ # which changes what characters get escaped.
+ normalize_url = urlutils.normalize_url
+
+ eq = self.assertEqual
+ eq('file:///foo/', normalize_url(u'file:///foo/'))
+ eq('file:///foo/%20', normalize_url(u'file:///foo/ '))
+ eq('file:///foo/%20', normalize_url(u'file:///foo/%20'))
+ # Don't escape reserved characters
+ eq('file:///ab_c.d-e/%f:?g&h=i+j;k,L#M$',
+ normalize_url('file:///ab_c.d-e/%f:?g&h=i+j;k,L#M$'))
+ eq('http://ab_c.d-e/%f:?g&h=i+j;k,L#M$',
+ normalize_url('http://ab_c.d-e/%f:?g&h=i+j;k,L#M$'))
+
+ # Escape unicode characters, but not already escaped chars
+ eq('http://host/ab/%C2%B5/%C2%B5',
+ normalize_url(u'http://host/ab/%C2%B5/\xb5'))
+
+ # Unescape characters that don't need to be escaped
+ eq('http://host/~bob%2525-._',
+ normalize_url('http://host/%7Ebob%2525%2D%2E%5F'))
+ eq('http://host/~bob%2525-._',
+ normalize_url(u'http://host/%7Ebob%2525%2D%2E%5F'))
+
+ # Normalize verifies URLs when they are not unicode
+ # (indicating they did not come from the user)
+ self.assertRaises(InvalidURL, normalize_url, 'http://host/\xb5')
+ self.assertRaises(InvalidURL, normalize_url, 'http://host/ ')
+
+ def test_url_scheme_re(self):
+ # Test paths that may be URLs
+ def test_one(url, scheme_and_path):
+ """Assert that _url_scheme_re correctly matches
+
+ :param scheme_and_path: The (scheme, path) that should be matched
+ can be None, to indicate it should not match
+ """
+ m = urlutils._url_scheme_re.match(url)
+ if scheme_and_path is None:
+ self.assertEqual(None, m)
+ else:
+ self.assertEqual(scheme_and_path[0], m.group('scheme'))
+ self.assertEqual(scheme_and_path[1], m.group('path'))
+
+ # Local paths
+ test_one('/path', None)
+ test_one('C:/path', None)
+ test_one('../path/to/foo', None)
+ test_one(u'../path/to/fo\xe5', None)
+
+ # Real URLS
+ test_one('http://host/path/', ('http', 'host/path/'))
+ test_one('sftp://host/path/to/foo', ('sftp', 'host/path/to/foo'))
+ test_one('file:///usr/bin', ('file', '/usr/bin'))
+ test_one('file:///C:/Windows', ('file', '/C:/Windows'))
+ test_one('file:///C|/Windows', ('file', '/C|/Windows'))
+ test_one(u'readonly+sftp://host/path/\xe5', ('readonly+sftp', u'host/path/\xe5'))
+
+ # Weird stuff
+ # Can't have slashes or colons in the scheme
+ test_one('/path/to/://foo', None)
+ test_one('scheme:stuff://foo', ('scheme', 'stuff://foo'))
+ # Must have more than one character for scheme
+ test_one('C://foo', None)
+ test_one('ab://foo', ('ab', 'foo'))
+
+ def test_dirname(self):
+ # Test bzrlib.urlutils.dirname()
+ dirname = urlutils.dirname
+ if sys.platform == 'win32':
+ self.assertRaises(InvalidURL, dirname, 'file:///path/to/foo')
+ self.assertEqual('file:///C|/', dirname('file:///C|/foo'))
+ self.assertEqual('file:///C|/', dirname('file:///C|/'))
+ else:
+ self.assertEqual('file:///', dirname('file:///foo'))
+ self.assertEqual('file:///', dirname('file:///'))
+
+ self.assertEqual('http://host/path/to', dirname('http://host/path/to/foo'))
+ self.assertEqual('http://host/path/to', dirname('http://host/path/to/foo/'))
+ self.assertEqual('http://host/path/to/foo',
+ dirname('http://host/path/to/foo/', exclude_trailing_slash=False))
+ self.assertEqual('http://host/', dirname('http://host/path'))
+ self.assertEqual('http://host/', dirname('http://host/'))
+ self.assertEqual('http://host', dirname('http://host'))
+ self.assertEqual('http:///nohost', dirname('http:///nohost/path'))
+
+ self.assertEqual('random+scheme://user:pass@ahost:port/',
+ dirname('random+scheme://user:pass@ahost:port/path'))
+ self.assertEqual('random+scheme://user:pass@ahost:port/',
+ dirname('random+scheme://user:pass@ahost:port/path/'))
+ self.assertEqual('random+scheme://user:pass@ahost:port/',
+ dirname('random+scheme://user:pass@ahost:port/'))
+
+ # relative paths
+ self.assertEqual('path/to', dirname('path/to/foo'))
+ self.assertEqual('path/to', dirname('path/to/foo/'))
+ self.assertEqual('path/to/foo',
+ dirname('path/to/foo/', exclude_trailing_slash=False))
+ self.assertEqual('path/..', dirname('path/../foo'))
+ self.assertEqual('../path', dirname('../path/foo'))
+
+ def test_is_url(self):
+ self.assertTrue(urlutils.is_url('http://foo/bar'))
+ self.assertTrue(urlutils.is_url('bzr+ssh://foo/bar'))
+ self.assertTrue(urlutils.is_url('lp:foo/bar'))
+ self.assertTrue(urlutils.is_url('file:///foo/bar'))
+ self.assertFalse(urlutils.is_url(''))
+ self.assertFalse(urlutils.is_url('foo'))
+ self.assertFalse(urlutils.is_url('foo/bar'))
+ self.assertFalse(urlutils.is_url('/foo'))
+ self.assertFalse(urlutils.is_url('/foo/bar'))
+ self.assertFalse(urlutils.is_url('C:/'))
+ self.assertFalse(urlutils.is_url('C:/foo'))
+ self.assertFalse(urlutils.is_url('C:/foo/bar'))
+
+ def test_join(self):
+ def test(expected, *args):
+ joined = urlutils.join(*args)
+ self.assertEqual(expected, joined)
+
+ # Test relative path joining
+ test('foo', 'foo') # relative fragment with nothing is preserved.
+ test('foo/bar', 'foo', 'bar')
+ test('http://foo/bar', 'http://foo', 'bar')
+ test('http://foo/bar', 'http://foo', '.', 'bar')
+ test('http://foo/baz', 'http://foo', 'bar', '../baz')
+ test('http://foo/bar/baz', 'http://foo', 'bar/baz')
+ test('http://foo/baz', 'http://foo', 'bar/../baz')
+ test('http://foo/baz', 'http://foo/bar/', '../baz')
+ test('lp:foo/bar', 'lp:foo', 'bar')
+ test('lp:foo/bar/baz', 'lp:foo', 'bar/baz')
+
+ # Absolute paths
+ test('http://foo', 'http://foo') # abs url with nothing is preserved.
+ test('http://bar', 'http://foo', 'http://bar')
+ test('sftp://bzr/foo', 'http://foo', 'bar', 'sftp://bzr/foo')
+ test('file:///bar', 'foo', 'file:///bar')
+ test('http://bar/', 'http://foo', 'http://bar/')
+ test('http://bar/a', 'http://foo', 'http://bar/a')
+ test('http://bar/a/', 'http://foo', 'http://bar/a/')
+ test('lp:bar', 'http://foo', 'lp:bar')
+ test('lp:bar', 'lp:foo', 'lp:bar')
+ test('file:///stuff', 'lp:foo', 'file:///stuff')
+
+ # From a base path
+ test('file:///foo', 'file:///', 'foo')
+ test('file:///bar/foo', 'file:///bar/', 'foo')
+ test('http://host/foo', 'http://host/', 'foo')
+ test('http://host/', 'http://host', '')
+
+ # Invalid joinings
+ # Cannot go above root
+ # Implicitly at root:
+ self.assertRaises(InvalidURLJoin, urlutils.join,
+ 'http://foo', '../baz')
+ self.assertRaises(InvalidURLJoin, urlutils.join,
+ 'http://foo', '/..')
+ # Joining from a path explicitly under the root.
+ self.assertRaises(InvalidURLJoin, urlutils.join,
+ 'http://foo/a', '../../b')
+
+ def test_joinpath(self):
+ def test(expected, *args):
+ joined = urlutils.joinpath(*args)
+ self.assertEqual(expected, joined)
+
+ # Test a single element
+ test('foo', 'foo')
+
+ # Test relative path joining
+ test('foo/bar', 'foo', 'bar')
+ test('foo/bar', 'foo', '.', 'bar')
+ test('foo/baz', 'foo', 'bar', '../baz')
+ test('foo/bar/baz', 'foo', 'bar/baz')
+ test('foo/baz', 'foo', 'bar/../baz')
+
+ # Test joining to an absolute path
+ test('/foo', '/foo')
+ test('/foo', '/foo', '.')
+ test('/foo/bar', '/foo', 'bar')
+ test('/', '/foo', '..')
+
+ # Test joining with an absolute path
+ test('/bar', 'foo', '/bar')
+
+ # Test joining to a path with a trailing slash
+ test('foo/bar', 'foo/', 'bar')
+
+ # Invalid joinings
+ # Cannot go above root
+ self.assertRaises(InvalidURLJoin, urlutils.joinpath, '/', '../baz')
+ self.assertRaises(InvalidURLJoin, urlutils.joinpath, '/', '..')
+ self.assertRaises(InvalidURLJoin, urlutils.joinpath, '/', '/..')
+
+ def test_join_segment_parameters_raw(self):
+ join_segment_parameters_raw = urlutils.join_segment_parameters_raw
+ self.assertEquals("/somedir/path",
+ join_segment_parameters_raw("/somedir/path"))
+ self.assertEquals("/somedir/path,rawdata",
+ join_segment_parameters_raw("/somedir/path", "rawdata"))
+ self.assertRaises(InvalidURLJoin,
+ join_segment_parameters_raw, "/somedir/path",
+ "rawdata1,rawdata2,rawdata3")
+ self.assertEquals("/somedir/path,bla,bar",
+ join_segment_parameters_raw("/somedir/path", "bla", "bar"))
+ self.assertEquals("/somedir,exist=some/path,bla,bar",
+ join_segment_parameters_raw("/somedir,exist=some/path",
+ "bla", "bar"))
+ self.assertRaises(TypeError, join_segment_parameters_raw,
+ "/somepath", 42)
+
+ def test_join_segment_parameters(self):
+ join_segment_parameters = urlutils.join_segment_parameters
+ self.assertEquals("/somedir/path",
+ join_segment_parameters("/somedir/path", {}))
+ self.assertEquals("/somedir/path,key1=val1",
+ join_segment_parameters("/somedir/path", {"key1": "val1"}))
+ self.assertRaises(InvalidURLJoin,
+ join_segment_parameters, "/somedir/path",
+ {"branch": "brr,brr,brr"})
+ self.assertRaises(InvalidURLJoin,
+ join_segment_parameters, "/somedir/path", {"key1=val1": "val2"})
+ self.assertEquals("/somedir/path,key1=val1,key2=val2",
+ join_segment_parameters("/somedir/path", {
+ "key1": "val1", "key2": "val2"}))
+ self.assertEquals("/somedir/path,key1=val1,key2=val2",
+ join_segment_parameters("/somedir/path,key1=val1", {
+ "key2": "val2"}))
+ self.assertEquals("/somedir/path,key1=val2",
+ join_segment_parameters("/somedir/path,key1=val1", {
+ "key1": "val2"}))
+ self.assertEquals("/somedir,exist=some/path,key1=val1",
+ join_segment_parameters("/somedir,exist=some/path",
+ {"key1": "val1"}))
+ self.assertEquals("/,key1=val1,key2=val2",
+ join_segment_parameters("/,key1=val1", {"key2": "val2"}))
+ self.assertRaises(TypeError,
+ join_segment_parameters, "/,key1=val1", {"foo": 42})
+
+ def test_function_type(self):
+ if sys.platform == 'win32':
+ self.assertEqual(urlutils._win32_local_path_to_url,
+ urlutils.local_path_to_url)
+ self.assertEqual(urlutils._win32_local_path_from_url,
+ urlutils.local_path_from_url)
+ else:
+ self.assertEqual(urlutils._posix_local_path_to_url,
+ urlutils.local_path_to_url)
+ self.assertEqual(urlutils._posix_local_path_from_url,
+ urlutils.local_path_from_url)
+
+ def test_posix_local_path_to_url(self):
+ to_url = urlutils._posix_local_path_to_url
+ self.assertEqual('file:///path/to/foo',
+ to_url('/path/to/foo'))
+
+ self.assertEqual('file:///path/to/foo%2Cbar',
+ to_url('/path/to/foo,bar'))
+
+ try:
+ result = to_url(u'/path/to/r\xe4ksm\xf6rg\xe5s')
+ except UnicodeError:
+ raise TestSkipped("local encoding cannot handle unicode")
+
+ self.assertEqual('file:///path/to/r%C3%A4ksm%C3%B6rg%C3%A5s', result)
+ self.assertFalse(isinstance(result, unicode))
+
+ def test_posix_local_path_from_url(self):
+ from_url = urlutils._posix_local_path_from_url
+ self.assertEqual('/path/to/foo',
+ from_url('file:///path/to/foo'))
+ self.assertEqual('/path/to/foo',
+ from_url('file:///path/to/foo,branch=foo'))
+ self.assertEqual(u'/path/to/r\xe4ksm\xf6rg\xe5s',
+ from_url('file:///path/to/r%C3%A4ksm%C3%B6rg%C3%A5s'))
+ self.assertEqual(u'/path/to/r\xe4ksm\xf6rg\xe5s',
+ from_url('file:///path/to/r%c3%a4ksm%c3%b6rg%c3%a5s'))
+ self.assertEqual(u'/path/to/r\xe4ksm\xf6rg\xe5s',
+ from_url('file://localhost/path/to/r%c3%a4ksm%c3%b6rg%c3%a5s'))
+
+ self.assertRaises(InvalidURL, from_url, '/path/to/foo')
+ self.assertRaises(
+ InvalidURL, from_url,
+ 'file://remotehost/path/to/r%c3%a4ksm%c3%b6rg%c3%a5s')
+
+ def test_win32_local_path_to_url(self):
+ to_url = urlutils._win32_local_path_to_url
+ self.assertEqual('file:///C:/path/to/foo',
+ to_url('C:/path/to/foo'))
+ # BOGUS: on win32, ntpath.abspath will strip trailing
+ # whitespace, so this will always fail
+ # Though under linux, it fakes abspath support
+ # and thus will succeed
+ # self.assertEqual('file:///C:/path/to/foo%20',
+ # to_url('C:/path/to/foo '))
+ self.assertEqual('file:///C:/path/to/f%20oo',
+ to_url('C:/path/to/f oo'))
+
+ self.assertEqual('file:///', to_url('/'))
+
+ self.assertEqual('file:///C:/path/to/foo%2Cbar',
+ to_url('C:/path/to/foo,bar'))
+ try:
+ result = to_url(u'd:/path/to/r\xe4ksm\xf6rg\xe5s')
+ except UnicodeError:
+ raise TestSkipped("local encoding cannot handle unicode")
+
+ self.assertEqual('file:///D:/path/to/r%C3%A4ksm%C3%B6rg%C3%A5s', result)
+ self.assertFalse(isinstance(result, unicode))
+
+ def test_win32_unc_path_to_url(self):
+ to_url = urlutils._win32_local_path_to_url
+ self.assertEqual('file://HOST/path',
+ to_url(r'\\HOST\path'))
+ self.assertEqual('file://HOST/path',
+ to_url('//HOST/path'))
+
+ try:
+ result = to_url(u'//HOST/path/to/r\xe4ksm\xf6rg\xe5s')
+ except UnicodeError:
+ raise TestSkipped("local encoding cannot handle unicode")
+
+ self.assertEqual('file://HOST/path/to/r%C3%A4ksm%C3%B6rg%C3%A5s', result)
+ self.assertFalse(isinstance(result, unicode))
+
+ def test_win32_local_path_from_url(self):
+ from_url = urlutils._win32_local_path_from_url
+ self.assertEqual('C:/path/to/foo',
+ from_url('file:///C|/path/to/foo'))
+ self.assertEqual(u'D:/path/to/r\xe4ksm\xf6rg\xe5s',
+ from_url('file:///d|/path/to/r%C3%A4ksm%C3%B6rg%C3%A5s'))
+ self.assertEqual(u'D:/path/to/r\xe4ksm\xf6rg\xe5s',
+ from_url('file:///d:/path/to/r%c3%a4ksm%c3%b6rg%c3%a5s'))
+ self.assertEqual('/', from_url('file:///'))
+ self.assertEqual('C:/path/to/foo',
+ from_url('file:///C|/path/to/foo,branch=foo'))
+
+ self.assertRaises(InvalidURL, from_url, 'file:///C:')
+ self.assertRaises(InvalidURL, from_url, 'file:///c')
+ self.assertRaises(InvalidURL, from_url, '/path/to/foo')
+ # Not a valid _win32 url, no drive letter
+ self.assertRaises(InvalidURL, from_url, 'file:///path/to/foo')
+
+ def test_win32_unc_path_from_url(self):
+ from_url = urlutils._win32_local_path_from_url
+ self.assertEqual('//HOST/path', from_url('file://HOST/path'))
+ self.assertEqual('//HOST/path',
+ from_url('file://HOST/path,branch=foo'))
+ # despite IE allows 2, 4, 5 and 6 slashes in URL to another machine
+ # we want to use only 2 slashes
+ # Firefox understand only 5 slashes in URL, but it's ugly
+ self.assertRaises(InvalidURL, from_url, 'file:////HOST/path')
+ self.assertRaises(InvalidURL, from_url, 'file://///HOST/path')
+ self.assertRaises(InvalidURL, from_url, 'file://////HOST/path')
+ # check for file://C:/ instead of file:///C:/
+ self.assertRaises(InvalidURL, from_url, 'file://C:/path')
+
+ def test_win32_extract_drive_letter(self):
+ extract = urlutils._win32_extract_drive_letter
+ self.assertEqual(('file:///C:', '/foo'), extract('file://', '/C:/foo'))
+ self.assertEqual(('file:///d|', '/path'), extract('file://', '/d|/path'))
+ self.assertRaises(InvalidURL, extract, 'file://', '/path')
+ # Root drives without slash treated as invalid, see bug #841322
+ self.assertEqual(('file:///C:', '/'), extract('file://', '/C:/'))
+ self.assertRaises(InvalidURL, extract, 'file://', '/C:')
+ # Invalid without drive separator or following forward slash
+ self.assertRaises(InvalidURL, extract, 'file://', '/C')
+ self.assertRaises(InvalidURL, extract, 'file://', '/C:ool')
+
+ def test_split(self):
+ # Test bzrlib.urlutils.split()
+ split = urlutils.split
+ if sys.platform == 'win32':
+ self.assertRaises(InvalidURL, split, 'file:///path/to/foo')
+ self.assertEqual(('file:///C|/', 'foo'), split('file:///C|/foo'))
+ self.assertEqual(('file:///C:/', ''), split('file:///C:/'))
+ else:
+ self.assertEqual(('file:///', 'foo'), split('file:///foo'))
+ self.assertEqual(('file:///', ''), split('file:///'))
+
+ self.assertEqual(('http://host/path/to', 'foo'), split('http://host/path/to/foo'))
+ self.assertEqual(('http://host/path/to', 'foo'), split('http://host/path/to/foo/'))
+ self.assertEqual(('http://host/path/to/foo', ''),
+ split('http://host/path/to/foo/', exclude_trailing_slash=False))
+ self.assertEqual(('http://host/', 'path'), split('http://host/path'))
+ self.assertEqual(('http://host/', ''), split('http://host/'))
+ self.assertEqual(('http://host', ''), split('http://host'))
+ self.assertEqual(('http:///nohost', 'path'), split('http:///nohost/path'))
+
+ self.assertEqual(('random+scheme://user:pass@ahost:port/', 'path'),
+ split('random+scheme://user:pass@ahost:port/path'))
+ self.assertEqual(('random+scheme://user:pass@ahost:port/', 'path'),
+ split('random+scheme://user:pass@ahost:port/path/'))
+ self.assertEqual(('random+scheme://user:pass@ahost:port/', ''),
+ split('random+scheme://user:pass@ahost:port/'))
+
+ # relative paths
+ self.assertEqual(('path/to', 'foo'), split('path/to/foo'))
+ self.assertEqual(('path/to', 'foo'), split('path/to/foo/'))
+ self.assertEqual(('path/to/foo', ''),
+ split('path/to/foo/', exclude_trailing_slash=False))
+ self.assertEqual(('path/..', 'foo'), split('path/../foo'))
+ self.assertEqual(('../path', 'foo'), split('../path/foo'))
+
+ def test_split_segment_parameters_raw(self):
+ split_segment_parameters_raw = urlutils.split_segment_parameters_raw
+ # Check relative references with absolute paths
+ self.assertEquals(("/some/path", []),
+ split_segment_parameters_raw("/some/path"))
+ self.assertEquals(("/some/path", ["tip"]),
+ split_segment_parameters_raw("/some/path,tip"))
+ self.assertEquals(("/some,dir/path", ["tip"]),
+ split_segment_parameters_raw("/some,dir/path,tip"))
+ self.assertEquals(("/somedir/path", ["heads%2Ftip"]),
+ split_segment_parameters_raw("/somedir/path,heads%2Ftip"))
+ self.assertEquals(("/somedir/path", ["heads%2Ftip", "bar"]),
+ split_segment_parameters_raw("/somedir/path,heads%2Ftip,bar"))
+ # Check relative references with relative paths
+ self.assertEquals(("", ["key1=val1"]),
+ split_segment_parameters_raw(",key1=val1"))
+ self.assertEquals(("foo/", ["key1=val1"]),
+ split_segment_parameters_raw("foo/,key1=val1"))
+ self.assertEquals(("foo", ["key1=val1"]),
+ split_segment_parameters_raw("foo,key1=val1"))
+ self.assertEquals(("foo/base,la=bla/other/elements", []),
+ split_segment_parameters_raw("foo/base,la=bla/other/elements"))
+ self.assertEquals(("foo/base,la=bla/other/elements", ["a=b"]),
+ split_segment_parameters_raw("foo/base,la=bla/other/elements,a=b"))
+ # TODO: Check full URLs as well as relative references
+
+ def test_split_segment_parameters(self):
+ split_segment_parameters = urlutils.split_segment_parameters
+ # Check relative references with absolute paths
+ self.assertEquals(("/some/path", {}),
+ split_segment_parameters("/some/path"))
+ self.assertEquals(("/some/path", {"branch": "tip"}),
+ split_segment_parameters("/some/path,branch=tip"))
+ self.assertEquals(("/some,dir/path", {"branch": "tip"}),
+ split_segment_parameters("/some,dir/path,branch=tip"))
+ self.assertEquals(("/somedir/path", {"ref": "heads%2Ftip"}),
+ split_segment_parameters("/somedir/path,ref=heads%2Ftip"))
+ self.assertEquals(("/somedir/path",
+ {"ref": "heads%2Ftip", "key1": "val1"}),
+ split_segment_parameters(
+ "/somedir/path,ref=heads%2Ftip,key1=val1"))
+ self.assertEquals(("/somedir/path", {"ref": "heads%2F=tip"}),
+ split_segment_parameters("/somedir/path,ref=heads%2F=tip"))
+ # Check relative references with relative paths
+ self.assertEquals(("", {"key1": "val1"}),
+ split_segment_parameters(",key1=val1"))
+ self.assertEquals(("foo/", {"key1": "val1"}),
+ split_segment_parameters("foo/,key1=val1"))
+ self.assertEquals(("foo/base,key1=val1/other/elements", {}),
+ split_segment_parameters("foo/base,key1=val1/other/elements"))
+ self.assertEquals(("foo/base,key1=val1/other/elements",
+ {"key2": "val2"}), split_segment_parameters(
+ "foo/base,key1=val1/other/elements,key2=val2"))
+ # TODO: Check full URLs as well as relative references
+
+ def test_win32_strip_local_trailing_slash(self):
+ strip = urlutils._win32_strip_local_trailing_slash
+ self.assertEqual('file://', strip('file://'))
+ self.assertEqual('file:///', strip('file:///'))
+ self.assertEqual('file:///C', strip('file:///C'))
+ self.assertEqual('file:///C:', strip('file:///C:'))
+ self.assertEqual('file:///d|', strip('file:///d|'))
+ self.assertEqual('file:///C:/', strip('file:///C:/'))
+ self.assertEqual('file:///C:/a', strip('file:///C:/a/'))
+
+ def test_strip_trailing_slash(self):
+ sts = urlutils.strip_trailing_slash
+ if sys.platform == 'win32':
+ self.assertEqual('file:///C|/', sts('file:///C|/'))
+ self.assertEqual('file:///C:/foo', sts('file:///C:/foo'))
+ self.assertEqual('file:///C|/foo', sts('file:///C|/foo/'))
+ else:
+ self.assertEqual('file:///', sts('file:///'))
+ self.assertEqual('file:///foo', sts('file:///foo'))
+ self.assertEqual('file:///foo', sts('file:///foo/'))
+
+ self.assertEqual('http://host/', sts('http://host/'))
+ self.assertEqual('http://host/foo', sts('http://host/foo'))
+ self.assertEqual('http://host/foo', sts('http://host/foo/'))
+
+ # No need to fail just because the slash is missing
+ self.assertEqual('http://host', sts('http://host'))
+ # TODO: jam 20060502 Should this raise InvalidURL?
+ self.assertEqual('file://', sts('file://'))
+
+ self.assertEqual('random+scheme://user:pass@ahost:port/path',
+ sts('random+scheme://user:pass@ahost:port/path'))
+ self.assertEqual('random+scheme://user:pass@ahost:port/path',
+ sts('random+scheme://user:pass@ahost:port/path/'))
+ self.assertEqual('random+scheme://user:pass@ahost:port/',
+ sts('random+scheme://user:pass@ahost:port/'))
+
+ # Make sure relative paths work too
+ self.assertEqual('path/to/foo', sts('path/to/foo'))
+ self.assertEqual('path/to/foo', sts('path/to/foo/'))
+ self.assertEqual('../to/foo', sts('../to/foo/'))
+ self.assertEqual('path/../foo', sts('path/../foo/'))
+
+ def test_unescape_for_display_utf8(self):
+ # Test that URLs are converted to nice unicode strings for display
+ def test(expected, url, encoding='utf-8'):
+ disp_url = urlutils.unescape_for_display(url, encoding=encoding)
+ self.assertIsInstance(disp_url, unicode)
+ self.assertEqual(expected, disp_url)
+
+ test('http://foo', 'http://foo')
+ if sys.platform == 'win32':
+ test('C:/foo/path', 'file:///C|/foo/path')
+ test('C:/foo/path', 'file:///C:/foo/path')
+ else:
+ test('/foo/path', 'file:///foo/path')
+
+ test('http://foo/%2Fbaz', 'http://foo/%2Fbaz')
+ test(u'http://host/r\xe4ksm\xf6rg\xe5s',
+ 'http://host/r%C3%A4ksm%C3%B6rg%C3%A5s')
+
+ # Make sure special escaped characters stay escaped
+ test(u'http://host/%3B%2F%3F%3A%40%26%3D%2B%24%2C%23',
+ 'http://host/%3B%2F%3F%3A%40%26%3D%2B%24%2C%23')
+
+ # Can we handle sections that don't have utf-8 encoding?
+ test(u'http://host/%EE%EE%EE/r\xe4ksm\xf6rg\xe5s',
+ 'http://host/%EE%EE%EE/r%C3%A4ksm%C3%B6rg%C3%A5s')
+
+ # Test encoding into output that can handle some characters
+ test(u'http://host/%EE%EE%EE/r\xe4ksm\xf6rg\xe5s',
+ 'http://host/%EE%EE%EE/r%C3%A4ksm%C3%B6rg%C3%A5s',
+ encoding='iso-8859-1')
+
+ # This one can be encoded into utf8
+ test(u'http://host/\u062c\u0648\u062c\u0648',
+ 'http://host/%d8%ac%d9%88%d8%ac%d9%88',
+ encoding='utf-8')
+
+ # This can't be put into 8859-1 and so stays as escapes
+ test(u'http://host/%d8%ac%d9%88%d8%ac%d9%88',
+ 'http://host/%d8%ac%d9%88%d8%ac%d9%88',
+ encoding='iso-8859-1')
+
+ def test_escape(self):
+ self.assertEqual('%25', urlutils.escape('%'))
+ self.assertEqual('%C3%A5', urlutils.escape(u'\xe5'))
+ self.assertFalse(isinstance(urlutils.escape(u'\xe5'), unicode))
+
+ def test_escape_tildes(self):
+ self.assertEqual('~foo', urlutils.escape('~foo'))
+
+ def test_unescape(self):
+ self.assertEqual('%', urlutils.unescape('%25'))
+ self.assertEqual(u'\xe5', urlutils.unescape('%C3%A5'))
+
+ self.assertRaises(InvalidURL, urlutils.unescape, u'\xe5')
+ self.assertRaises(InvalidURL, urlutils.unescape, '\xe5')
+ self.assertRaises(InvalidURL, urlutils.unescape, '%E5')
+
+ def test_escape_unescape(self):
+ self.assertEqual(u'\xe5', urlutils.unescape(urlutils.escape(u'\xe5')))
+ self.assertEqual('%', urlutils.unescape(urlutils.escape('%')))
+
+ def test_relative_url(self):
+ def test(expected, base, other):
+ result = urlutils.relative_url(base, other)
+ self.assertEqual(expected, result)
+
+ test('a', 'http://host/', 'http://host/a')
+ test('http://entirely/different', 'sftp://host/branch',
+ 'http://entirely/different')
+ test('../person/feature', 'http://host/branch/mainline',
+ 'http://host/branch/person/feature')
+ test('..', 'http://host/branch', 'http://host/')
+ test('http://host2/branch', 'http://host1/branch', 'http://host2/branch')
+ test('.', 'http://host1/branch', 'http://host1/branch')
+ test('../../../branch/2b', 'file:///home/jelmer/foo/bar/2b',
+ 'file:///home/jelmer/branch/2b')
+ test('../../branch/2b', 'sftp://host/home/jelmer/bar/2b',
+ 'sftp://host/home/jelmer/branch/2b')
+ test('../../branch/feature/%2b', 'http://host/home/jelmer/bar/%2b',
+ 'http://host/home/jelmer/branch/feature/%2b')
+ test('../../branch/feature/2b', 'http://host/home/jelmer/bar/2b/',
+ 'http://host/home/jelmer/branch/feature/2b')
+ # relative_url should preserve a trailing slash
+ test('../../branch/feature/2b/', 'http://host/home/jelmer/bar/2b/',
+ 'http://host/home/jelmer/branch/feature/2b/')
+ test('../../branch/feature/2b/', 'http://host/home/jelmer/bar/2b',
+ 'http://host/home/jelmer/branch/feature/2b/')
+
+ # TODO: treat http://host as http://host/
+ # relative_url is typically called from a branch.base or
+ # transport.base which always ends with a /
+ #test('a', 'http://host', 'http://host/a')
+ test('http://host/a', 'http://host', 'http://host/a')
+ #test('.', 'http://host', 'http://host/')
+ test('http://host/', 'http://host', 'http://host/')
+ #test('.', 'http://host/', 'http://host')
+ test('http://host', 'http://host/', 'http://host')
+
+ # On Windows file:///C:/path/to and file:///D:/other/path
+ # should not use relative url over the non-existent '/' directory.
+ if sys.platform == 'win32':
+ # on the same drive
+ test('../../other/path',
+ 'file:///C:/path/to', 'file:///C:/other/path')
+ #~next two tests is failed, i.e. urlutils.relative_url expects
+ #~to see normalized file URLs?
+ #~test('../../other/path',
+ #~ 'file:///C:/path/to', 'file:///c:/other/path')
+ #~test('../../other/path',
+ #~ 'file:///C:/path/to', 'file:///C|/other/path')
+
+ # check UNC paths too
+ test('../../other/path',
+ 'file://HOST/base/path/to', 'file://HOST/base/other/path')
+ # on different drives
+ test('file:///D:/other/path',
+ 'file:///C:/path/to', 'file:///D:/other/path')
+ # TODO: strictly saying in UNC path //HOST/base is full analog
+ # of drive letter for hard disk, and this situation is also
+ # should be exception from rules. [bialix 20071221]
+
+
+class TestCwdToURL(TestCaseInTempDir):
+ """Test that local_path_to_url works based on the cwd"""
+
+ def test_dot(self):
+ # This test will fail if getcwd is not ascii
+ os.mkdir('mytest')
+ os.chdir('mytest')
+
+ url = urlutils.local_path_to_url('.')
+ self.assertEndsWith(url, '/mytest')
+
+ def test_non_ascii(self):
+ if win32utils.winver == 'Windows 98':
+ raise TestSkipped('Windows 98 cannot handle unicode filenames')
+
+ try:
+ os.mkdir(u'dod\xe9')
+ except UnicodeError:
+ raise TestSkipped('cannot create unicode directory')
+
+ os.chdir(u'dod\xe9')
+
+ # On Mac OSX this directory is actually:
+ # u'/dode\u0301' => '/dode\xcc\x81
+ # but we should normalize it back to
+ # u'/dod\xe9' => '/dod\xc3\xa9'
+ url = urlutils.local_path_to_url('.')
+ self.assertEndsWith(url, '/dod%C3%A9')
+
+
+class TestDeriveToLocation(TestCase):
+ """Test that the mapping of FROM_LOCATION to TO_LOCATION works."""
+
+ def test_to_locations_derived_from_paths(self):
+ derive = urlutils.derive_to_location
+ self.assertEqual("bar", derive("bar"))
+ self.assertEqual("bar", derive("../bar"))
+ self.assertEqual("bar", derive("/foo/bar"))
+ self.assertEqual("bar", derive("c:/foo/bar"))
+ self.assertEqual("bar", derive("c:bar"))
+
+ def test_to_locations_derived_from_urls(self):
+ derive = urlutils.derive_to_location
+ self.assertEqual("bar", derive("http://foo/bar"))
+ self.assertEqual("bar", derive("bzr+ssh://foo/bar"))
+ self.assertEqual("foo-bar", derive("lp:foo-bar"))
+
+
+class TestRebaseURL(TestCase):
+ """Test the behavior of rebase_url."""
+
+ def test_non_relative(self):
+ result = urlutils.rebase_url('file://foo', 'file://foo',
+ 'file://foo/bar')
+ self.assertEqual('file://foo', result)
+ result = urlutils.rebase_url('/foo', 'file://foo',
+ 'file://foo/bar')
+ self.assertEqual('/foo', result)
+
+ def test_different_ports(self):
+ e = self.assertRaises(InvalidRebaseURLs, urlutils.rebase_url,
+ 'foo', 'http://bar:80', 'http://bar:81')
+ self.assertEqual(str(e), "URLs differ by more than path:"
+ " 'http://bar:80' and 'http://bar:81'")
+
+ def test_different_hosts(self):
+ e = self.assertRaises(InvalidRebaseURLs, urlutils.rebase_url,
+ 'foo', 'http://bar', 'http://baz')
+ self.assertEqual(str(e), "URLs differ by more than path: 'http://bar'"
+ " and 'http://baz'")
+
+ def test_different_protocol(self):
+ e = self.assertRaises(InvalidRebaseURLs, urlutils.rebase_url,
+ 'foo', 'http://bar', 'ftp://bar')
+ self.assertEqual(str(e), "URLs differ by more than path: 'http://bar'"
+ " and 'ftp://bar'")
+
+ def test_rebase_success(self):
+ self.assertEqual('../bar', urlutils.rebase_url('bar', 'http://baz/',
+ 'http://baz/qux'))
+ self.assertEqual('qux/bar', urlutils.rebase_url('bar',
+ 'http://baz/qux', 'http://baz/'))
+ self.assertEqual('.', urlutils.rebase_url('foo',
+ 'http://bar/', 'http://bar/foo/'))
+ self.assertEqual('qux/bar', urlutils.rebase_url('../bar',
+ 'http://baz/qux/foo', 'http://baz/'))
+
+ def test_determine_relative_path(self):
+ self.assertEqual('../../baz/bar',
+ urlutils.determine_relative_path(
+ '/qux/quxx', '/baz/bar'))
+ self.assertEqual('..',
+ urlutils.determine_relative_path(
+ '/bar/baz', '/bar'))
+ self.assertEqual('baz',
+ urlutils.determine_relative_path(
+ '/bar', '/bar/baz'))
+ self.assertEqual('.', urlutils.determine_relative_path(
+ '/bar', '/bar'))
+
+
+class TestParseURL(TestCase):
+
+ def test_parse_simple(self):
+ parsed = urlutils.parse_url('http://example.com:80/one')
+ self.assertEquals(('http', None, None, 'example.com', 80, '/one'),
+ parsed)
+
+ def test_ipv6(self):
+ parsed = urlutils.parse_url('http://[1:2:3::40]/one')
+ self.assertEquals(('http', None, None, '1:2:3::40', None, '/one'),
+ parsed)
+
+ def test_ipv6_port(self):
+ parsed = urlutils.parse_url('http://[1:2:3::40]:80/one')
+ self.assertEquals(('http', None, None, '1:2:3::40', 80, '/one'),
+ parsed)
+
+
+class TestURL(TestCase):
+
+ def test_parse_simple(self):
+ parsed = urlutils.URL.from_string('http://example.com:80/one')
+ self.assertEquals('http', parsed.scheme)
+ self.assertIs(None, parsed.user)
+ self.assertIs(None, parsed.password)
+ self.assertEquals('example.com', parsed.host)
+ self.assertEquals(80, parsed.port)
+ self.assertEquals('/one', parsed.path)
+
+ def test_ipv6(self):
+ parsed = urlutils.URL.from_string('http://[1:2:3::40]/one')
+ self.assertEquals('http', parsed.scheme)
+ self.assertIs(None, parsed.port)
+ self.assertIs(None, parsed.user)
+ self.assertIs(None, parsed.password)
+ self.assertEquals('1:2:3::40', parsed.host)
+ self.assertEquals('/one', parsed.path)
+
+ def test_ipv6_port(self):
+ parsed = urlutils.URL.from_string('http://[1:2:3::40]:80/one')
+ self.assertEquals('http', parsed.scheme)
+ self.assertEquals('1:2:3::40', parsed.host)
+ self.assertIs(None, parsed.user)
+ self.assertIs(None, parsed.password)
+ self.assertEquals(80, parsed.port)
+ self.assertEquals('/one', parsed.path)
+
+ def test_quoted(self):
+ parsed = urlutils.URL.from_string(
+ 'http://ro%62ey:h%40t@ex%41mple.com:2222/path')
+ self.assertEquals(parsed.quoted_host, 'ex%41mple.com')
+ self.assertEquals(parsed.host, 'exAmple.com')
+ self.assertEquals(parsed.port, 2222)
+ self.assertEquals(parsed.quoted_user, 'ro%62ey')
+ self.assertEquals(parsed.user, 'robey')
+ self.assertEquals(parsed.quoted_password, 'h%40t')
+ self.assertEquals(parsed.password, 'h@t')
+ self.assertEquals(parsed.path, '/path')
+
+ def test_eq(self):
+ parsed1 = urlutils.URL.from_string('http://[1:2:3::40]:80/one')
+ parsed2 = urlutils.URL.from_string('http://[1:2:3::40]:80/one')
+ self.assertEquals(parsed1, parsed2)
+ self.assertEquals(parsed1, parsed1)
+ parsed2.path = '/two'
+ self.assertNotEquals(parsed1, parsed2)
+
+ def test_repr(self):
+ parsed = urlutils.URL.from_string('http://[1:2:3::40]:80/one')
+ self.assertEquals(
+ "<URL('http', None, None, '1:2:3::40', 80, '/one')>",
+ repr(parsed))
+
+ def test_str(self):
+ parsed = urlutils.URL.from_string('http://[1:2:3::40]:80/one')
+ self.assertEquals('http://[1:2:3::40]:80/one', str(parsed))
+
+ def test__combine_paths(self):
+ combine = urlutils.URL._combine_paths
+ self.assertEqual('/home/sarah/project/foo',
+ combine('/home/sarah', 'project/foo'))
+ self.assertEqual('/etc',
+ combine('/home/sarah', '../../etc'))
+ self.assertEqual('/etc',
+ combine('/home/sarah', '../../../etc'))
+ self.assertEqual('/etc',
+ combine('/home/sarah', '/etc'))
+
+ def test_clone(self):
+ url = urlutils.URL.from_string('http://[1:2:3::40]:80/one')
+ url1 = url.clone("two")
+ self.assertEquals("/one/two", url1.path)
+ url2 = url.clone("/two")
+ self.assertEquals("/two", url2.path)
+ url3 = url.clone()
+ self.assertIsNot(url, url3)
+ self.assertEquals(url, url3)
+
+
+class TestFileRelpath(TestCase):
+
+ # GZ 2011-11-18: A way to override all path handling functions to one
+ # platform or another for testing would be nice.
+
+ def _with_posix_paths(self):
+ self.overrideAttr(urlutils, "local_path_from_url",
+ urlutils._posix_local_path_from_url)
+ self.overrideAttr(urlutils, "MIN_ABS_FILEURL_LENGTH", len("file:///"))
+ self.overrideAttr(osutils, "normpath", osutils._posix_normpath)
+ self.overrideAttr(osutils, "abspath", osutils._posix_abspath)
+ self.overrideAttr(osutils, "normpath", osutils._posix_normpath)
+ self.overrideAttr(osutils, "pathjoin", osutils.posixpath.join)
+ self.overrideAttr(osutils, "split", osutils.posixpath.split)
+ self.overrideAttr(osutils, "MIN_ABS_PATHLENGTH", 1)
+
+ def _with_win32_paths(self):
+ self.overrideAttr(urlutils, "local_path_from_url",
+ urlutils._win32_local_path_from_url)
+ self.overrideAttr(urlutils, "MIN_ABS_FILEURL_LENGTH",
+ urlutils.WIN32_MIN_ABS_FILEURL_LENGTH)
+ self.overrideAttr(osutils, "abspath", osutils._win32_abspath)
+ self.overrideAttr(osutils, "normpath", osutils._win32_normpath)
+ self.overrideAttr(osutils, "pathjoin", osutils._win32_pathjoin)
+ self.overrideAttr(osutils, "split", osutils.ntpath.split)
+ self.overrideAttr(osutils, "MIN_ABS_PATHLENGTH", 3)
+
+ def test_same_url_posix(self):
+ self._with_posix_paths()
+ self.assertEquals("",
+ urlutils.file_relpath("file:///a", "file:///a"))
+ self.assertEquals("",
+ urlutils.file_relpath("file:///a", "file:///a/"))
+ self.assertEquals("",
+ urlutils.file_relpath("file:///a/", "file:///a"))
+
+ def test_same_url_win32(self):
+ self._with_win32_paths()
+ self.assertEquals("",
+ urlutils.file_relpath("file:///A:/", "file:///A:/"))
+ self.assertEquals("",
+ urlutils.file_relpath("file:///A|/", "file:///A:/"))
+ self.assertEquals("",
+ urlutils.file_relpath("file:///A:/b/", "file:///A:/b/"))
+ self.assertEquals("",
+ urlutils.file_relpath("file:///A:/b", "file:///A:/b/"))
+ self.assertEquals("",
+ urlutils.file_relpath("file:///A:/b/", "file:///A:/b"))
+
+ def test_child_posix(self):
+ self._with_posix_paths()
+ self.assertEquals("b",
+ urlutils.file_relpath("file:///a", "file:///a/b"))
+ self.assertEquals("b",
+ urlutils.file_relpath("file:///a/", "file:///a/b"))
+ self.assertEquals("b/c",
+ urlutils.file_relpath("file:///a", "file:///a/b/c"))
+
+ def test_child_win32(self):
+ self._with_win32_paths()
+ self.assertEquals("b",
+ urlutils.file_relpath("file:///A:/", "file:///A:/b"))
+ self.assertEquals("b",
+ urlutils.file_relpath("file:///A|/", "file:///A:/b"))
+ self.assertEquals("c",
+ urlutils.file_relpath("file:///A:/b", "file:///A:/b/c"))
+ self.assertEquals("c",
+ urlutils.file_relpath("file:///A:/b/", "file:///A:/b/c"))
+ self.assertEquals("c/d",
+ urlutils.file_relpath("file:///A:/b", "file:///A:/b/c/d"))
+
+ def test_sibling_posix(self):
+ self._with_posix_paths()
+ self.assertRaises(PathNotChild,
+ urlutils.file_relpath, "file:///a/b", "file:///a/c")
+ self.assertRaises(PathNotChild,
+ urlutils.file_relpath, "file:///a/b/", "file:///a/c")
+ self.assertRaises(PathNotChild,
+ urlutils.file_relpath, "file:///a/b/", "file:///a/c/")
+
+ def test_sibling_win32(self):
+ self._with_win32_paths()
+ self.assertRaises(PathNotChild,
+ urlutils.file_relpath, "file:///A:/b", "file:///A:/c")
+ self.assertRaises(PathNotChild,
+ urlutils.file_relpath, "file:///A:/b/", "file:///A:/c")
+ self.assertRaises(PathNotChild,
+ urlutils.file_relpath, "file:///A:/b/", "file:///A:/c/")
+
+ def test_parent_posix(self):
+ self._with_posix_paths()
+ self.assertRaises(PathNotChild,
+ urlutils.file_relpath, "file:///a/b", "file:///a")
+ self.assertRaises(PathNotChild,
+ urlutils.file_relpath, "file:///a/b", "file:///a/")
+
+ def test_parent_win32(self):
+ self._with_win32_paths()
+ self.assertRaises(PathNotChild,
+ urlutils.file_relpath, "file:///A:/b", "file:///A:/")
+ self.assertRaises(PathNotChild,
+ urlutils.file_relpath, "file:///A:/b/c", "file:///A:/b")
+
+
+class QuoteTests(TestCase):
+
+ def test_quote(self):
+ self.assertEqual('abc%20def', urlutils.quote('abc def'))
+ self.assertEqual('abc%2Fdef', urlutils.quote('abc/def', safe=''))
+ self.assertEqual('abc/def', urlutils.quote('abc/def', safe='/'))
+
+ def test_quote_tildes(self):
+ self.assertEqual('%7Efoo', urlutils.quote('~foo'))
+ self.assertEqual('~foo', urlutils.quote('~foo', safe='/~'))
+
+ def test_unquote(self):
+ self.assertEqual('%', urlutils.unquote('%25'))
+ self.assertEqual('\xc3\xa5', urlutils.unquote('%C3%A5'))
+ self.assertEqual(u"\xe5", urlutils.unquote(u'\xe5'))
diff --git a/bzrlib/tests/test_utextwrap.py b/bzrlib/tests/test_utextwrap.py
new file mode 100644
index 0000000..33ccdbf
--- /dev/null
+++ b/bzrlib/tests/test_utextwrap.py
@@ -0,0 +1,211 @@
+# Copyright (C) 2011 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+#
+
+"""Tests of the bzrlib.utextwrap."""
+
+from bzrlib import (
+ tests,
+ utextwrap,
+ )
+from bzrlib.tests import features
+
+
+# Japanese "Good morning".
+# Each character have double width. So total 8 width on console.
+_str_D = u'\u304a\u306f\u3088\u3046'
+
+_str_S = u"hello"
+
+# Combine single width characters and double width characters.
+_str_SD = _str_S + _str_D
+_str_DS = _str_D + _str_S
+
+class TestUTextWrap(tests.TestCase):
+
+ def check_width(self, text, expected_width):
+ w = utextwrap.UTextWrapper()
+ self.assertEqual(
+ w._width(text),
+ expected_width,
+ "Width of %r should be %d" % (text, expected_width))
+
+ def test_width(self):
+ self.check_width(_str_D, 8)
+ self.check_width(_str_SD, 13)
+
+ def check_cut(self, text, width, pos):
+ w = utextwrap.UTextWrapper()
+ self.assertEqual((text[:pos], text[pos:]), w._cut(text, width))
+
+ def test_cut(self):
+ s = _str_SD
+ self.check_cut(s, 0, 0)
+ self.check_cut(s, 1, 1)
+ self.check_cut(s, 5, 5)
+ self.check_cut(s, 6, 5)
+ self.check_cut(s, 7, 6)
+ self.check_cut(s, 12, 8)
+ self.check_cut(s, 13, 9)
+ self.check_cut(s, 14, 9)
+ self.check_cut(u'A'*5, 3, 3)
+
+ def test_split(self):
+ w = utextwrap.UTextWrapper()
+ self.assertEqual(list(_str_D), w._split(_str_D))
+ self.assertEqual([_str_S]+list(_str_D), w._split(_str_SD))
+ self.assertEqual(list(_str_D)+[_str_S], w._split(_str_DS))
+
+ def test_wrap(self):
+ self.assertEqual(list(_str_D), utextwrap.wrap(_str_D, 1))
+ self.assertEqual(list(_str_D), utextwrap.wrap(_str_D, 2))
+ self.assertEqual(list(_str_D), utextwrap.wrap(_str_D, 3))
+ self.assertEqual(list(_str_D),
+ utextwrap.wrap(_str_D, 3, break_long_words=False))
+
+class TestUTextFill(tests.TestCase):
+
+ def test_fill_simple(self):
+ # Test only can call fill() because it's just '\n'.join(wrap(text)).
+ self.assertEqual("%s\n%s" % (_str_D[:2], _str_D[2:]),
+ utextwrap.fill(_str_D, 4))
+
+ def test_fill_with_breaks(self):
+ # Demonstrate complicated case.
+ text = u"spam ham egg spamhamegg" + _str_D + u" spam" + _str_D*2
+ self.assertEqual(u'\n'.join(["spam ham",
+ "egg spam",
+ "hamegg" + _str_D[0],
+ _str_D[1:],
+ "spam" + _str_D[:2],
+ _str_D[2:]+_str_D[:2],
+ _str_D[2:]]),
+ utextwrap.fill(text, 8))
+
+ def test_fill_without_breaks(self):
+ text = u"spam ham egg spamhamegg" + _str_D + u" spam" + _str_D*2
+ self.assertEqual(u'\n'.join(["spam ham",
+ "egg",
+ "spamhamegg",
+ # border between single width and double
+ # width.
+ _str_D,
+ "spam" + _str_D[:2],
+ _str_D[2:]+_str_D[:2],
+ _str_D[2:]]),
+ utextwrap.fill(text, 8, break_long_words=False))
+
+ def test_fill_indent_with_breaks(self):
+ w = utextwrap.UTextWrapper(8, initial_indent=' '*4,
+ subsequent_indent=' '*4)
+ self.assertEqual(u'\n'.join([" hell",
+ " o" + _str_D[0],
+ " " + _str_D[1:3],
+ " " + _str_D[3]
+ ]),
+ w.fill(_str_SD))
+
+ def test_fill_indent_without_breaks(self):
+ w = utextwrap.UTextWrapper(8, initial_indent=' '*4,
+ subsequent_indent=' '*4)
+ w.break_long_words = False
+ self.assertEqual(u'\n'.join([" hello",
+ " " + _str_D[:2],
+ " " + _str_D[2:],
+ ]),
+ w.fill(_str_SD))
+
+ def test_fill_indent_without_breaks_with_fixed_width(self):
+ w = utextwrap.UTextWrapper(8, initial_indent=' '*4,
+ subsequent_indent=' '*4)
+ w.break_long_words = False
+ w.width = 3
+ self.assertEqual(u'\n'.join([" hello",
+ " " + _str_D[0],
+ " " + _str_D[1],
+ " " + _str_D[2],
+ " " + _str_D[3],
+ ]),
+ w.fill(_str_SD))
+
+class TestUTextWrapAmbiWidth(tests.TestCase):
+ _cyrill_char = u"\u0410" # east_asian_width() == 'A'
+
+ def test_ambiwidth1(self):
+ w = utextwrap.UTextWrapper(4, ambiguous_width=1)
+ s = self._cyrill_char*8
+ self.assertEqual([self._cyrill_char*4]*2, w.wrap(s))
+
+ def test_ambiwidth2(self):
+ w = utextwrap.UTextWrapper(4, ambiguous_width=2)
+ s = self._cyrill_char*8
+ self.assertEqual([self._cyrill_char*2]*4, w.wrap(s))
+
+
+# Regression test with Python's test_textwrap
+# Note that some distribution including Ubuntu doesn't install
+# Python's test suite.
+try:
+ from test import test_textwrap
+
+ def override_textwrap_symbols(testcase):
+ # Override the symbols imported by test_textwrap so it uses our own
+ # replacements.
+ testcase.overrideAttr(test_textwrap, 'TextWrapper',
+ utextwrap.UTextWrapper)
+ testcase.overrideAttr(test_textwrap, 'wrap', utextwrap.wrap)
+ testcase.overrideAttr(test_textwrap, 'fill', utextwrap.fill)
+
+
+ def setup_both(testcase, base_class, reused_class):
+ super(base_class, testcase).setUp()
+ override_textwrap_symbols(testcase)
+ reused_class.setUp(testcase)
+
+
+ class TestWrap(tests.TestCase, test_textwrap.WrapTestCase):
+
+ def setUp(self):
+ setup_both(self, TestWrap, test_textwrap.WrapTestCase)
+
+
+ class TestLongWord(tests.TestCase, test_textwrap.LongWordTestCase):
+
+ def setUp(self):
+ setup_both(self, TestLongWord, test_textwrap.LongWordTestCase)
+
+
+ class TestIndent(tests.TestCase, test_textwrap.IndentTestCases):
+
+ def setUp(self):
+ setup_both(self, TestIndent, test_textwrap.IndentTestCases)
+
+except ImportError:
+
+ class TestWrap(tests.TestCase):
+
+ def test_wrap(self):
+ raise tests.TestSkipped("test.test_textwrap is not available.")
+
+ class TestLongWord(tests.TestCase):
+
+ def test_longword(self):
+ raise tests.TestSkipped("test.test_textwrap is not available.")
+
+ class TestIndent(tests.TestCase):
+
+ def test_indent(self):
+ raise tests.TestSkipped("test.test_textwrap is not available.")
diff --git a/bzrlib/tests/test_version.py b/bzrlib/tests/test_version.py
new file mode 100644
index 0000000..04bd679
--- /dev/null
+++ b/bzrlib/tests/test_version.py
@@ -0,0 +1,49 @@
+# Copyright (C) 2006, 2009, 2010, 2011 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""Tests for versioning of bzrlib."""
+
+from cStringIO import StringIO
+import re
+
+from bzrlib import version, workingtree
+from bzrlib.tests import TestCase, TestSkipped
+
+class TestBzrlibVersioning(TestCase):
+
+ def test_get_bzr_source_tree(self):
+ """Get tree for bzr source, if any."""
+ self.permit_source_tree_branch_repo()
+ # We don't know if these tests are being run from a checkout or branch
+ # of bzr, from an installed copy, or from source unpacked from a
+ # tarball. We don't construct a branch just for testing this, so we
+ # just assert that it must either return None or the tree.
+ src_tree = version._get_bzr_source_tree()
+ if src_tree is None:
+ raise TestSkipped("bzr tests aren't run from a bzr working tree")
+ else:
+ # ensure that what we got was in fact a working tree instance.
+ self.assertIsInstance(src_tree, workingtree.WorkingTree)
+
+ def test_python_binary_path(self):
+ self.permit_source_tree_branch_repo()
+ sio = StringIO()
+ version.show_version(show_config=False, show_copyright=False,
+ to_file=sio)
+ out = sio.getvalue()
+ m = re.search(r"Python interpreter: (.*) [0-9]", out)
+ self.assertIsNot(m, None)
+ self.assertPathExists(m.group(1))
diff --git a/bzrlib/tests/test_version_info.py b/bzrlib/tests/test_version_info.py
new file mode 100644
index 0000000..586de17
--- /dev/null
+++ b/bzrlib/tests/test_version_info.py
@@ -0,0 +1,421 @@
+# Copyright (C) 2005-2011 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""Tests for version_info"""
+
+from cStringIO import StringIO
+import imp
+import os
+import sys
+
+from bzrlib import (
+ errors,
+ registry,
+ tests,
+ version_info_formats,
+ )
+from bzrlib.tests import TestCaseWithTransport
+from bzrlib.rio import read_stanzas
+
+from bzrlib.version_info_formats.format_custom import CustomVersionInfoBuilder
+from bzrlib.version_info_formats.format_rio import RioVersionInfoBuilder
+from bzrlib.version_info_formats.format_python import PythonVersionInfoBuilder
+
+
+class VersionInfoTestCase(TestCaseWithTransport):
+
+ def create_branch(self):
+ wt = self.make_branch_and_tree('branch')
+
+ self.build_tree(['branch/a'])
+ wt.add('a')
+ wt.commit('a', rev_id='r1')
+
+ self.build_tree(['branch/b'])
+ wt.add('b')
+ wt.commit('b', rev_id='r2')
+
+ self.build_tree_contents([('branch/a', 'new contents\n')])
+ wt.commit(u'\xe52', rev_id='r3')
+
+ return wt
+
+ def create_tree_with_dotted_revno(self):
+ wt = self.make_branch_and_tree('branch')
+ self.build_tree(['branch/a'])
+ wt.add('a')
+ wt.commit('a', rev_id='r1')
+
+ other = wt.bzrdir.sprout('other').open_workingtree()
+ self.build_tree(['other/b.a'])
+ other.add(['b.a'])
+ other.commit('b.a', rev_id='o2')
+
+ os.chdir('branch')
+ self.run_bzr('merge ../other')
+ wt.commit('merge', rev_id='merge')
+
+ wt.update(revision='o2')
+
+ return wt
+
+
+class TestVersionInfoRio(VersionInfoTestCase):
+
+ def test_rio_null(self):
+ wt = self.make_branch_and_tree('branch')
+
+ sio = StringIO()
+ builder = RioVersionInfoBuilder(wt.branch, working_tree=wt)
+ builder.generate(sio)
+ val = sio.getvalue()
+ self.assertContainsRe(val, 'build-date:')
+ self.assertContainsRe(val, 'revno: 0')
+
+ def test_rio_dotted_revno(self):
+ wt = self.create_tree_with_dotted_revno()
+
+ sio = StringIO()
+ builder = RioVersionInfoBuilder(wt.branch, working_tree=wt)
+ builder.generate(sio)
+ val = sio.getvalue()
+ self.assertContainsRe(val, 'revno: 1.1.1')
+
+ def regen_text(self, wt, **kwargs):
+ sio = StringIO()
+ builder = RioVersionInfoBuilder(wt.branch, working_tree=wt, **kwargs)
+ builder.generate(sio)
+ val = sio.getvalue()
+ return val
+
+ def test_simple(self):
+ wt = self.create_branch()
+
+ val = self.regen_text(wt)
+ self.assertContainsRe(val, 'build-date:')
+ self.assertContainsRe(val, 'date:')
+ self.assertContainsRe(val, 'revno: 3')
+ self.assertContainsRe(val, 'revision-id: r3')
+
+ def test_clean(self):
+ wt = self.create_branch()
+ val = self.regen_text(wt, check_for_clean=True)
+ self.assertContainsRe(val, 'clean: True')
+
+ def test_no_clean(self):
+ wt = self.create_branch()
+ self.build_tree(['branch/c'])
+ val = self.regen_text(wt, check_for_clean=True)
+ self.assertContainsRe(val, 'clean: False')
+
+ def test_history(self):
+ wt = self.create_branch()
+
+ val = self.regen_text(wt, include_revision_history=True)
+ self.assertContainsRe(val, 'id: r1')
+ self.assertContainsRe(val, 'message: a')
+ self.assertContainsRe(val, 'id: r2')
+ self.assertContainsRe(val, 'message: b')
+ self.assertContainsRe(val, 'id: r3')
+ self.assertContainsRe(val, 'message: \xc3\xa52') # utf8 encoding '\xe5'
+
+ def regen(self, wt, **kwargs):
+ sio = StringIO()
+ builder = RioVersionInfoBuilder(wt.branch, working_tree=wt, **kwargs)
+ builder.generate(sio)
+ sio.seek(0)
+ stanzas = list(read_stanzas(sio))
+ self.assertEqual(1, len(stanzas))
+ return stanzas[0]
+
+ def test_rio_version_hook(self):
+ def update_stanza(rev, stanza):
+ stanza.add('bla', 'bloe')
+ RioVersionInfoBuilder.hooks.install_named_hook(
+ 'revision', update_stanza, None)
+ wt = self.create_branch()
+
+ stanza = self.regen(wt)
+ self.assertEqual(['bloe'], stanza.get_all('bla'))
+
+ def get_one_stanza(self, stanza, key):
+ new_stanzas = list(read_stanzas(StringIO(stanza[key].encode('utf8'))))
+ self.assertEqual(1, len(new_stanzas))
+ return new_stanzas[0]
+
+ def test_build_date(self):
+ wt = self.create_branch()
+ stanza = self.regen(wt)
+ self.assertTrue('date' in stanza)
+ self.assertTrue('build-date' in stanza)
+ self.assertEqual(['3'], stanza.get_all('revno'))
+ self.assertEqual(['r3'], stanza.get_all('revision-id'))
+
+ def test_not_clean(self):
+ wt = self.create_branch()
+ self.build_tree(['branch/c'])
+ stanza = self.regen(wt, check_for_clean=True, include_file_revisions=True)
+ self.assertEqual(['False'], stanza.get_all('clean'))
+
+ def test_file_revisions(self):
+ wt = self.create_branch()
+ self.build_tree(['branch/c'])
+ stanza = self.regen(wt, check_for_clean=True, include_file_revisions=True)
+ # This assumes it's being run against a tree that does not update the
+ # root revision on every commit.
+ file_rev_stanza = self.get_one_stanza(stanza, 'file-revisions')
+ self.assertEqual(['', 'a', 'b', 'c'], file_rev_stanza.get_all('path'))
+ self.assertEqual(['r1', 'r3', 'r2', 'unversioned'],
+ file_rev_stanza.get_all('revision'))
+
+ def test_revision_history(self):
+ wt = self.create_branch()
+ stanza = self.regen(wt, include_revision_history=True)
+ revision_stanza = self.get_one_stanza(stanza, 'revisions')
+ self.assertEqual(['r1', 'r2', 'r3'], revision_stanza.get_all('id'))
+ self.assertEqual(['a', 'b', u'\xe52'], revision_stanza.get_all('message'))
+ self.assertEqual(3, len(revision_stanza.get_all('date')))
+
+ def test_file_revisions_with_rename(self):
+ # a was modified, so it should show up modified again
+ wt = self.create_branch()
+ self.build_tree(['branch/a', 'branch/c'])
+ wt.add('c')
+ wt.rename_one('b', 'd')
+ stanza = self.regen(wt, check_for_clean=True, include_file_revisions=True)
+ file_rev_stanza = self.get_one_stanza(stanza, 'file-revisions')
+ self.assertEqual(['', 'a', 'b', 'c', 'd'],
+ file_rev_stanza.get_all('path'))
+ self.assertEqual(['r1', 'modified', 'renamed to d', 'new',
+ 'renamed from b'],
+ file_rev_stanza.get_all('revision'))
+
+ def test_file_revisions_with_removal(self):
+ wt = self.create_branch()
+ self.build_tree(['branch/a', 'branch/c'])
+ wt.add('c')
+ wt.rename_one('b', 'd')
+
+ wt.commit('modified', rev_id='r4')
+
+ wt.remove(['c', 'd'])
+ os.remove('branch/d')
+ stanza = self.regen(wt, check_for_clean=True, include_file_revisions=True)
+ file_rev_stanza = self.get_one_stanza(stanza, 'file-revisions')
+ self.assertEqual(['', 'a', 'c', 'd'], file_rev_stanza.get_all('path'))
+ self.assertEqual(['r1', 'r4', 'unversioned', 'removed'],
+ file_rev_stanza.get_all('revision'))
+
+ def test_revision(self):
+ wt = self.create_branch()
+ self.build_tree(['branch/a', 'branch/c'])
+ wt.add('c')
+ wt.rename_one('b', 'd')
+
+ stanza = self.regen(wt, check_for_clean=True,
+ include_file_revisions=True, revision_id=wt.last_revision())
+ file_rev_stanza = self.get_one_stanza(stanza, 'file-revisions')
+ self.assertEqual(['', 'a', 'b'], file_rev_stanza.get_all('path'))
+ self.assertEqual(['r1', 'r3', 'r2'],
+ file_rev_stanza.get_all('revision'))
+
+
+class PythonVersionInfoTests(VersionInfoTestCase):
+
+ def test_python_null(self):
+ wt = self.make_branch_and_tree('branch')
+
+ sio = StringIO()
+ builder = PythonVersionInfoBuilder(wt.branch, working_tree=wt)
+ builder.generate(sio)
+ val = sio.getvalue()
+ self.assertContainsRe(val, "'revision_id': None")
+ self.assertContainsRe(val, "'revno': '0'")
+ self.assertNotContainsString(val, '\n\n\n\n')
+
+ def test_python_dotted_revno(self):
+ wt = self.create_tree_with_dotted_revno()
+
+ sio = StringIO()
+ builder = PythonVersionInfoBuilder(wt.branch, working_tree=wt)
+ builder.generate(sio)
+ val = sio.getvalue()
+ self.assertContainsRe(val, "'revno': '1.1.1'")
+
+ def regen(self, wt, **kwargs):
+ """Create a test module, import and return it"""
+ outf = open('test_version_information.py', 'wb')
+ try:
+ builder = PythonVersionInfoBuilder(wt.branch, working_tree=wt,
+ **kwargs)
+ builder.generate(outf)
+ finally:
+ outf.close()
+ module_info = imp.find_module('test_version_information',
+ [os.getcwdu()])
+ tvi = imp.load_module('tvi', *module_info)
+ # Make sure the module isn't cached
+ sys.modules.pop('tvi', None)
+ sys.modules.pop('test_version_information', None)
+ # Delete the compiled versions, because we are generating
+ # a new file fast enough that python doesn't detect it
+ # needs to recompile, and using sleep() just makes the
+ # test slow
+ if os.path.exists('test_version_information.pyc'):
+ os.remove('test_version_information.pyc')
+ if os.path.exists('test_version_information.pyo'):
+ os.remove('test_version_information.pyo')
+ return tvi
+
+ def test_python_version(self):
+ wt = self.create_branch()
+
+ tvi = self.regen(wt)
+ self.assertEqual('3', tvi.version_info['revno'])
+ self.assertEqual('r3', tvi.version_info['revision_id'])
+ self.assertTrue(tvi.version_info.has_key('date'))
+ self.assertEqual(None, tvi.version_info['clean'])
+
+ tvi = self.regen(wt, check_for_clean=True)
+ self.assertEqual(True, tvi.version_info['clean'])
+
+ self.build_tree(['branch/c'])
+ tvi = self.regen(wt, check_for_clean=True, include_file_revisions=True)
+ self.assertEqual(False, tvi.version_info['clean'])
+ self.assertEqual(['', 'a', 'b', 'c'],
+ sorted(tvi.file_revisions.keys()))
+ self.assertEqual('r3', tvi.file_revisions['a'])
+ self.assertEqual('r2', tvi.file_revisions['b'])
+ self.assertEqual('unversioned', tvi.file_revisions['c'])
+ os.remove('branch/c')
+
+ tvi = self.regen(wt, include_revision_history=True)
+
+ rev_info = [(rev, message) for rev, message, timestamp, timezone
+ in tvi.revisions]
+ self.assertEqual([('r1', 'a'), ('r2', 'b'), ('r3', u'\xe52')], rev_info)
+
+ # a was modified, so it should show up modified again
+ self.build_tree(['branch/a', 'branch/c'])
+ wt.add('c')
+ wt.rename_one('b', 'd')
+ tvi = self.regen(wt, check_for_clean=True, include_file_revisions=True)
+ self.assertEqual(['', 'a', 'b', 'c', 'd'],
+ sorted(tvi.file_revisions.keys()))
+ self.assertEqual('modified', tvi.file_revisions['a'])
+ self.assertEqual('renamed to d', tvi.file_revisions['b'])
+ self.assertEqual('new', tvi.file_revisions['c'])
+ self.assertEqual('renamed from b', tvi.file_revisions['d'])
+
+ wt.commit('modified', rev_id='r4')
+ wt.remove(['c', 'd'])
+ os.remove('branch/d')
+ tvi = self.regen(wt, check_for_clean=True, include_file_revisions=True)
+ self.assertEqual(['', 'a', 'c', 'd'],
+ sorted(tvi.file_revisions.keys()))
+ self.assertEqual('r4', tvi.file_revisions['a'])
+ self.assertEqual('unversioned', tvi.file_revisions['c'])
+ self.assertEqual('removed', tvi.file_revisions['d'])
+
+
+class CustomVersionInfoTests(VersionInfoTestCase):
+
+ def test_custom_null(self):
+ sio = StringIO()
+ wt = self.make_branch_and_tree('branch')
+ builder = CustomVersionInfoBuilder(wt.branch, working_tree=wt,
+ template='revno: {revno}')
+ builder.generate(sio)
+ self.assertEquals("revno: 0", sio.getvalue())
+
+ builder = CustomVersionInfoBuilder(wt.branch, working_tree=wt,
+ template='{revno} revid: {revision_id}')
+ # revision_id is not available yet
+ self.assertRaises(errors.MissingTemplateVariable,
+ builder.generate, sio)
+
+ def test_custom_dotted_revno(self):
+ sio = StringIO()
+ wt = self.create_tree_with_dotted_revno()
+ builder = CustomVersionInfoBuilder(wt.branch, working_tree=wt,
+ template='{revno} revid: {revision_id}')
+ builder.generate(sio)
+ self.assertEquals("1.1.1 revid: o2", sio.getvalue())
+
+ def regen(self, wt, tpl, **kwargs):
+ sio = StringIO()
+ builder = CustomVersionInfoBuilder(wt.branch, working_tree=wt,
+ template=tpl, **kwargs)
+ builder.generate(sio)
+ val = sio.getvalue()
+ return val
+
+ def test_build_date(self):
+ wt = self.create_branch()
+
+ val = self.regen(wt, 'build-date: "{build_date}"\ndate: "{date}"')
+ self.assertContainsRe(val, 'build-date: "[0-9-+: ]+"')
+ self.assertContainsRe(val, 'date: "[0-9-+: ]+"')
+
+ def test_revno(self):
+ wt = self.create_branch()
+ val = self.regen(wt, 'revno: {revno}')
+ self.assertEqual(val, 'revno: 3')
+
+ def test_revision_id(self):
+ wt = self.create_branch()
+ val = self.regen(wt, 'revision-id: {revision_id}')
+ self.assertEqual(val, 'revision-id: r3')
+
+ def test_clean(self):
+ wt = self.create_branch()
+ val = self.regen(wt, 'clean: {clean}', check_for_clean=True)
+ self.assertEqual(val, 'clean: 1')
+
+ def test_not_clean(self):
+ wt = self.create_branch()
+
+ self.build_tree(['branch/c'])
+ val = self.regen(wt, 'clean: {clean}', check_for_clean=True)
+ self.assertEqual(val, 'clean: 0')
+ os.remove('branch/c')
+
+ def test_custom_without_template(self):
+ builder = CustomVersionInfoBuilder(None)
+ sio = StringIO()
+ self.assertRaises(errors.NoTemplate, builder.generate, sio)
+
+
+class TestBuilder(version_info_formats.VersionInfoBuilder):
+ pass
+
+
+class TestVersionInfoFormatRegistry(tests.TestCase):
+
+ def setUp(self):
+ super(TestVersionInfoFormatRegistry, self).setUp()
+ self.overrideAttr(version_info_formats,
+ 'format_registry', registry.Registry())
+
+ def test_register_remove(self):
+ registry = version_info_formats.format_registry
+ registry.register('testbuilder',
+ TestBuilder, 'a simple test builder')
+ self.assertIs(TestBuilder, registry.get('testbuilder'))
+ self.assertEqual('a simple test builder',
+ registry.get_help('testbuilder'))
+ registry.remove('testbuilder')
+ self.assertRaises(KeyError, registry.get, 'testbuilder')
diff --git a/bzrlib/tests/test_versionedfile.py b/bzrlib/tests/test_versionedfile.py
new file mode 100644
index 0000000..27bfe51
--- /dev/null
+++ b/bzrlib/tests/test_versionedfile.py
@@ -0,0 +1,139 @@
+# Copyright (C) 2010 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""Tests for VersionedFile classes"""
+
+from bzrlib import (
+ errors,
+ groupcompress,
+ multiparent,
+ tests,
+ versionedfile,
+ )
+
+
+class Test_MPDiffGenerator(tests.TestCaseWithMemoryTransport):
+ # Should this be a per vf test?
+
+ def make_vf(self):
+ t = self.get_transport('')
+ factory = groupcompress.make_pack_factory(True, True, 1)
+ return factory(t)
+
+ def make_three_vf(self):
+ vf = self.make_vf()
+ vf.add_lines(('one',), (), ['first\n'])
+ vf.add_lines(('two',), [('one',)], ['first\n', 'second\n'])
+ vf.add_lines(('three',), [('one',), ('two',)],
+ ['first\n', 'second\n', 'third\n'])
+ return vf
+
+ def test_finds_parents(self):
+ vf = self.make_three_vf()
+ gen = versionedfile._MPDiffGenerator(vf, [('three',)])
+ needed_keys, refcount = gen._find_needed_keys()
+ self.assertEqual(sorted([('one',), ('two',), ('three',)]),
+ sorted(needed_keys))
+ self.assertEqual({('one',): 1, ('two',): 1}, refcount)
+
+ def test_ignores_ghost_parents(self):
+ # If a parent is a ghost, it is just ignored
+ vf = self.make_vf()
+ vf.add_lines(('two',), [('one',)], ['first\n', 'second\n'])
+ gen = versionedfile._MPDiffGenerator(vf, [('two',)])
+ needed_keys, refcount = gen._find_needed_keys()
+ self.assertEqual(sorted([('two',)]), sorted(needed_keys))
+ # It is returned, but we don't really care as we won't extract it
+ self.assertEqual({('one',): 1}, refcount)
+ self.assertEqual([('one',)], sorted(gen.ghost_parents))
+ self.assertEqual([], sorted(gen.present_parents))
+
+ def test_raises_on_ghost_keys(self):
+ # If the requested key is a ghost, then we have a problem
+ vf = self.make_vf()
+ gen = versionedfile._MPDiffGenerator(vf, [('one',)])
+ self.assertRaises(errors.RevisionNotPresent,
+ gen._find_needed_keys)
+
+ def test_refcount_multiple_children(self):
+ vf = self.make_three_vf()
+ gen = versionedfile._MPDiffGenerator(vf, [('two',), ('three',)])
+ needed_keys, refcount = gen._find_needed_keys()
+ self.assertEqual(sorted([('one',), ('two',), ('three',)]),
+ sorted(needed_keys))
+ self.assertEqual({('one',): 2, ('two',): 1}, refcount)
+ self.assertEqual([('one',)], sorted(gen.present_parents))
+
+ def test_process_contents(self):
+ vf = self.make_three_vf()
+ gen = versionedfile._MPDiffGenerator(vf, [('two',), ('three',)])
+ gen._find_needed_keys()
+ self.assertEqual({('two',): (('one',),),
+ ('three',): (('one',), ('two',))},
+ gen.parent_map)
+ self.assertEqual({('one',): 2, ('two',): 1}, gen.refcounts)
+ self.assertEqual(sorted([('one',), ('two',), ('three',)]),
+ sorted(gen.needed_keys))
+ stream = vf.get_record_stream(gen.needed_keys, 'topological', True)
+ record = stream.next()
+ self.assertEqual(('one',), record.key)
+ # one is not needed in the output, but it is needed by children. As
+ # such, it should end up in the various caches
+ gen._process_one_record(record.key, record.get_bytes_as('chunked'))
+ # The chunks should be cached, the refcount untouched
+ self.assertEqual([('one',)], gen.chunks.keys())
+ self.assertEqual({('one',): 2, ('two',): 1}, gen.refcounts)
+ self.assertEqual([], gen.diffs.keys())
+ # Next we get 'two', which is something we output, but also needed for
+ # three
+ record = stream.next()
+ self.assertEqual(('two',), record.key)
+ gen._process_one_record(record.key, record.get_bytes_as('chunked'))
+ # Both are now cached, and the diff for two has been extracted, and
+ # one's refcount has been updated. two has been removed from the
+ # parent_map
+ self.assertEqual(sorted([('one',), ('two',)]),
+ sorted(gen.chunks.keys()))
+ self.assertEqual({('one',): 1, ('two',): 1}, gen.refcounts)
+ self.assertEqual([('two',)], gen.diffs.keys())
+ self.assertEqual({('three',): (('one',), ('two',))},
+ gen.parent_map)
+ # Finally 'three', which allows us to remove all parents from the
+ # caches
+ record = stream.next()
+ self.assertEqual(('three',), record.key)
+ gen._process_one_record(record.key, record.get_bytes_as('chunked'))
+ # Both are now cached, and the diff for two has been extracted, and
+ # one's refcount has been updated
+ self.assertEqual([], gen.chunks.keys())
+ self.assertEqual({}, gen.refcounts)
+ self.assertEqual(sorted([('two',), ('three',)]),
+ sorted(gen.diffs.keys()))
+
+ def test_compute_diffs(self):
+ vf = self.make_three_vf()
+ # The content is in the order requested, even if it isn't topological
+ gen = versionedfile._MPDiffGenerator(vf, [('two',), ('three',),
+ ('one',)])
+ diffs = gen.compute_diffs()
+ expected_diffs = [
+ multiparent.MultiParent([multiparent.ParentText(0, 0, 0, 1),
+ multiparent.NewText(['second\n'])]),
+ multiparent.MultiParent([multiparent.ParentText(1, 0, 0, 2),
+ multiparent.NewText(['third\n'])]),
+ multiparent.MultiParent([multiparent.NewText(['first\n'])]),
+ ]
+ self.assertEqual(expected_diffs, diffs)
diff --git a/bzrlib/tests/test_vf_search.py b/bzrlib/tests/test_vf_search.py
new file mode 100644
index 0000000..840451a
--- /dev/null
+++ b/bzrlib/tests/test_vf_search.py
@@ -0,0 +1,239 @@
+# Copyright (C) 2007-2011 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+from bzrlib import (
+ graph as _mod_graph,
+ tests,
+ vf_search,
+ )
+from bzrlib.revision import NULL_REVISION
+from bzrlib.tests.test_graph import TestGraphBase
+
+# Ancestry 1:
+#
+# NULL_REVISION
+# |
+# rev1
+# /\
+# rev2a rev2b
+# | |
+# rev3 /
+# | /
+# rev4
+ancestry_1 = {'rev1': [NULL_REVISION], 'rev2a': ['rev1'], 'rev2b': ['rev1'],
+ 'rev3': ['rev2a'], 'rev4': ['rev3', 'rev2b']}
+
+# Ancestry 2:
+#
+# NULL_REVISION
+# / \
+# rev1a rev1b
+# |
+# rev2a
+# |
+# rev3a
+# |
+# rev4a
+ancestry_2 = {'rev1a': [NULL_REVISION], 'rev2a': ['rev1a'],
+ 'rev1b': [NULL_REVISION], 'rev3a': ['rev2a'], 'rev4a': ['rev3a']}
+
+
+# Extended history shortcut
+# NULL_REVISION
+# |
+# a
+# |\
+# b |
+# | |
+# c |
+# | |
+# d |
+# |\|
+# e f
+extended_history_shortcut = {'a': [NULL_REVISION],
+ 'b': ['a'],
+ 'c': ['b'],
+ 'd': ['c'],
+ 'e': ['d'],
+ 'f': ['a', 'd'],
+ }
+
+
+class TestSearchResultRefine(tests.TestCase):
+
+ def make_graph(self, ancestors):
+ return _mod_graph.Graph(_mod_graph.DictParentsProvider(ancestors))
+
+ def test_refine(self):
+ # Used when pulling from a stacked repository, so test some revisions
+ # being satisfied from the stacking branch.
+ g = self.make_graph(
+ {"tip":["mid"], "mid":["base"], "tag":["base"],
+ "base":[NULL_REVISION], NULL_REVISION:[]})
+ result = vf_search.SearchResult(set(['tip', 'tag']),
+ set([NULL_REVISION]), 4, set(['tip', 'mid', 'tag', 'base']))
+ result = result.refine(set(['tip']), set(['mid']))
+ recipe = result.get_recipe()
+ # We should be starting from tag (original head) and mid (seen ref)
+ self.assertEqual(set(['mid', 'tag']), recipe[1])
+ # We should be stopping at NULL (original stop) and tip (seen head)
+ self.assertEqual(set([NULL_REVISION, 'tip']), recipe[2])
+ self.assertEqual(3, recipe[3])
+ result = result.refine(set(['mid', 'tag', 'base']),
+ set([NULL_REVISION]))
+ recipe = result.get_recipe()
+ # We should be starting from nothing (NULL was known as a cut point)
+ self.assertEqual(set([]), recipe[1])
+ # We should be stopping at NULL (original stop) and tip (seen head) and
+ # tag (seen head) and mid(seen mid-point head). We could come back and
+ # define this as not including mid, for minimal results, but it is
+ # still 'correct' to include mid, and simpler/easier.
+ self.assertEqual(set([NULL_REVISION, 'tip', 'tag', 'mid']), recipe[2])
+ self.assertEqual(0, recipe[3])
+ self.assertTrue(result.is_empty())
+
+
+class TestSearchResultFromParentMap(TestGraphBase):
+
+ def assertSearchResult(self, start_keys, stop_keys, key_count, parent_map,
+ missing_keys=()):
+ (start, stop, count) = vf_search.search_result_from_parent_map(
+ parent_map, missing_keys)
+ self.assertEqual((sorted(start_keys), sorted(stop_keys), key_count),
+ (sorted(start), sorted(stop), count))
+
+ def test_no_parents(self):
+ self.assertSearchResult([], [], 0, {})
+ self.assertSearchResult([], [], 0, None)
+
+ def test_ancestry_1(self):
+ self.assertSearchResult(['rev4'], [NULL_REVISION], len(ancestry_1),
+ ancestry_1)
+
+ def test_ancestry_2(self):
+ self.assertSearchResult(['rev1b', 'rev4a'], [NULL_REVISION],
+ len(ancestry_2), ancestry_2)
+ self.assertSearchResult(['rev1b', 'rev4a'], [],
+ len(ancestry_2)+1, ancestry_2,
+ missing_keys=[NULL_REVISION])
+
+ def test_partial_search(self):
+ parent_map = dict((k,extended_history_shortcut[k])
+ for k in ['e', 'f'])
+ self.assertSearchResult(['e', 'f'], ['d', 'a'], 2,
+ parent_map)
+ parent_map.update((k,extended_history_shortcut[k])
+ for k in ['d', 'a'])
+ self.assertSearchResult(['e', 'f'], ['c', NULL_REVISION], 4,
+ parent_map)
+ parent_map['c'] = extended_history_shortcut['c']
+ self.assertSearchResult(['e', 'f'], ['b'], 6,
+ parent_map, missing_keys=[NULL_REVISION])
+ parent_map['b'] = extended_history_shortcut['b']
+ self.assertSearchResult(['e', 'f'], [], 7,
+ parent_map, missing_keys=[NULL_REVISION])
+
+
+class TestLimitedSearchResultFromParentMap(TestGraphBase):
+
+ def assertSearchResult(self, start_keys, stop_keys, key_count, parent_map,
+ missing_keys, tip_keys, depth):
+ (start, stop, count) = vf_search.limited_search_result_from_parent_map(
+ parent_map, missing_keys, tip_keys, depth)
+ self.assertEqual((sorted(start_keys), sorted(stop_keys), key_count),
+ (sorted(start), sorted(stop), count))
+
+ def test_empty_ancestry(self):
+ self.assertSearchResult([], [], 0, {}, (), ['tip-rev-id'], 10)
+
+ def test_ancestry_1(self):
+ self.assertSearchResult(['rev4'], ['rev1'], 4,
+ ancestry_1, (), ['rev1'], 10)
+ self.assertSearchResult(['rev2a', 'rev2b'], ['rev1'], 2,
+ ancestry_1, (), ['rev1'], 1)
+
+
+ def test_multiple_heads(self):
+ self.assertSearchResult(['e', 'f'], ['a'], 5,
+ extended_history_shortcut, (), ['a'], 10)
+ # Note that even though we only take 1 step back, we find 'f', which
+ # means the described search will still find d and c.
+ self.assertSearchResult(['f'], ['a'], 4,
+ extended_history_shortcut, (), ['a'], 1)
+ self.assertSearchResult(['f'], ['a'], 4,
+ extended_history_shortcut, (), ['a'], 2)
+
+
+class TestPendingAncestryResultRefine(tests.TestCase):
+
+ def make_graph(self, ancestors):
+ return _mod_graph.Graph(_mod_graph.DictParentsProvider(ancestors))
+
+ def test_refine(self):
+ # Used when pulling from a stacked repository, so test some revisions
+ # being satisfied from the stacking branch.
+ g = self.make_graph(
+ {"tip":["mid"], "mid":["base"], "tag":["base"],
+ "base":[NULL_REVISION], NULL_REVISION:[]})
+ result = vf_search.PendingAncestryResult(['tip', 'tag'], None)
+ result = result.refine(set(['tip']), set(['mid']))
+ self.assertEqual(set(['mid', 'tag']), result.heads)
+ result = result.refine(set(['mid', 'tag', 'base']),
+ set([NULL_REVISION]))
+ self.assertEqual(set([NULL_REVISION]), result.heads)
+ self.assertTrue(result.is_empty())
+
+
+class TestPendingAncestryResultGetKeys(tests.TestCaseWithMemoryTransport):
+ """Tests for bzrlib.graph.PendingAncestryResult."""
+
+ def test_get_keys(self):
+ builder = self.make_branch_builder('b')
+ builder.start_series()
+ builder.build_snapshot('rev-1', None, [
+ ('add', ('', 'root-id', 'directory', ''))])
+ builder.build_snapshot('rev-2', ['rev-1'], [])
+ builder.finish_series()
+ repo = builder.get_branch().repository
+ repo.lock_read()
+ self.addCleanup(repo.unlock)
+ result = vf_search.PendingAncestryResult(['rev-2'], repo)
+ self.assertEqual(set(['rev-1', 'rev-2']), set(result.get_keys()))
+
+ def test_get_keys_excludes_ghosts(self):
+ builder = self.make_branch_builder('b')
+ builder.start_series()
+ builder.build_snapshot('rev-1', None, [
+ ('add', ('', 'root-id', 'directory', ''))])
+ builder.build_snapshot('rev-2', ['rev-1', 'ghost'], [])
+ builder.finish_series()
+ repo = builder.get_branch().repository
+ repo.lock_read()
+ self.addCleanup(repo.unlock)
+ result = vf_search.PendingAncestryResult(['rev-2'], repo)
+ self.assertEqual(sorted(['rev-1', 'rev-2']), sorted(result.get_keys()))
+
+ def test_get_keys_excludes_null(self):
+ # Make a 'graph' with an iter_ancestry that returns NULL_REVISION
+ # somewhere other than the last element, which can happen in real
+ # ancestries.
+ class StubGraph(object):
+ def iter_ancestry(self, keys):
+ return [(NULL_REVISION, ()), ('foo', (NULL_REVISION,))]
+ result = vf_search.PendingAncestryResult(['rev-3'], None)
+ result_keys = result._get_keys(StubGraph())
+ # Only the non-null keys from the ancestry appear.
+ self.assertEqual(set(['foo']), set(result_keys))
diff --git a/bzrlib/tests/test_weave.py b/bzrlib/tests/test_weave.py
new file mode 100644
index 0000000..60ddb8a
--- /dev/null
+++ b/bzrlib/tests/test_weave.py
@@ -0,0 +1,770 @@
+# Copyright (C) 2005-2009, 2011 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+
+# TODO: tests regarding version names
+# TODO: rbc 20050108 test that join does not leave an inconsistent weave
+# if it fails.
+
+"""test suite for weave algorithm"""
+
+from pprint import pformat
+
+from bzrlib import (
+ errors,
+ )
+from bzrlib.osutils import sha_string
+from bzrlib.tests import TestCase, TestCaseInTempDir
+from bzrlib.weave import Weave, WeaveFormatError
+from bzrlib.weavefile import write_weave, read_weave
+
+
+# texts for use in testing
+TEXT_0 = ["Hello world"]
+TEXT_1 = ["Hello world",
+ "A second line"]
+
+
+class TestBase(TestCase):
+
+ def check_read_write(self, k):
+ """Check the weave k can be written & re-read."""
+ from tempfile import TemporaryFile
+ tf = TemporaryFile()
+
+ write_weave(k, tf)
+ tf.seek(0)
+ k2 = read_weave(tf)
+
+ if k != k2:
+ tf.seek(0)
+ self.log('serialized weave:')
+ self.log(tf.read())
+
+ self.log('')
+ self.log('parents: %s' % (k._parents == k2._parents))
+ self.log(' %r' % k._parents)
+ self.log(' %r' % k2._parents)
+ self.log('')
+ self.fail('read/write check failed')
+
+
+class WeaveContains(TestBase):
+ """Weave __contains__ operator"""
+
+ def runTest(self):
+ k = Weave(get_scope=lambda:None)
+ self.assertFalse('foo' in k)
+ k.add_lines('foo', [], TEXT_1)
+ self.assertTrue('foo' in k)
+
+
+class Easy(TestBase):
+
+ def runTest(self):
+ k = Weave()
+
+
+class AnnotateOne(TestBase):
+
+ def runTest(self):
+ k = Weave()
+ k.add_lines('text0', [], TEXT_0)
+ self.assertEqual(k.annotate('text0'),
+ [('text0', TEXT_0[0])])
+
+
+class InvalidAdd(TestBase):
+ """Try to use invalid version number during add."""
+
+ def runTest(self):
+ k = Weave()
+
+ self.assertRaises(errors.RevisionNotPresent,
+ k.add_lines,
+ 'text0',
+ ['69'],
+ ['new text!'])
+
+
+class RepeatedAdd(TestBase):
+ """Add the same version twice; harmless."""
+
+ def test_duplicate_add(self):
+ k = Weave()
+ idx = k.add_lines('text0', [], TEXT_0)
+ idx2 = k.add_lines('text0', [], TEXT_0)
+ self.assertEqual(idx, idx2)
+
+
+class InvalidRepeatedAdd(TestBase):
+
+ def runTest(self):
+ k = Weave()
+ k.add_lines('basis', [], TEXT_0)
+ idx = k.add_lines('text0', [], TEXT_0)
+ self.assertRaises(errors.RevisionAlreadyPresent,
+ k.add_lines,
+ 'text0',
+ [],
+ ['not the same text'])
+ self.assertRaises(errors.RevisionAlreadyPresent,
+ k.add_lines,
+ 'text0',
+ ['basis'], # not the right parents
+ TEXT_0)
+
+
+class InsertLines(TestBase):
+ """Store a revision that adds one line to the original.
+
+ Look at the annotations to make sure that the first line is matched
+ and not stored repeatedly."""
+ def runTest(self):
+ k = Weave()
+
+ k.add_lines('text0', [], ['line 1'])
+ k.add_lines('text1', ['text0'], ['line 1', 'line 2'])
+
+ self.assertEqual(k.annotate('text0'),
+ [('text0', 'line 1')])
+
+ self.assertEqual(k.get_lines(1),
+ ['line 1',
+ 'line 2'])
+
+ self.assertEqual(k.annotate('text1'),
+ [('text0', 'line 1'),
+ ('text1', 'line 2')])
+
+ k.add_lines('text2', ['text0'], ['line 1', 'diverged line'])
+
+ self.assertEqual(k.annotate('text2'),
+ [('text0', 'line 1'),
+ ('text2', 'diverged line')])
+
+ text3 = ['line 1', 'middle line', 'line 2']
+ k.add_lines('text3',
+ ['text0', 'text1'],
+ text3)
+
+ # self.log("changes to text3: " + pformat(list(k._delta(set([0, 1]), text3))))
+
+ self.log("k._weave=" + pformat(k._weave))
+
+ self.assertEqual(k.annotate('text3'),
+ [('text0', 'line 1'),
+ ('text3', 'middle line'),
+ ('text1', 'line 2')])
+
+ # now multiple insertions at different places
+ k.add_lines('text4',
+ ['text0', 'text1', 'text3'],
+ ['line 1', 'aaa', 'middle line', 'bbb', 'line 2', 'ccc'])
+
+ self.assertEqual(k.annotate('text4'),
+ [('text0', 'line 1'),
+ ('text4', 'aaa'),
+ ('text3', 'middle line'),
+ ('text4', 'bbb'),
+ ('text1', 'line 2'),
+ ('text4', 'ccc')])
+
+
+class DeleteLines(TestBase):
+ """Deletion of lines from existing text.
+
+ Try various texts all based on a common ancestor."""
+ def runTest(self):
+ k = Weave()
+
+ base_text = ['one', 'two', 'three', 'four']
+
+ k.add_lines('text0', [], base_text)
+
+ texts = [['one', 'two', 'three'],
+ ['two', 'three', 'four'],
+ ['one', 'four'],
+ ['one', 'two', 'three', 'four'],
+ ]
+
+ i = 1
+ for t in texts:
+ ver = k.add_lines('text%d' % i,
+ ['text0'], t)
+ i += 1
+
+ self.log('final weave:')
+ self.log('k._weave=' + pformat(k._weave))
+
+ for i in range(len(texts)):
+ self.assertEqual(k.get_lines(i+1),
+ texts[i])
+
+
+class SuicideDelete(TestBase):
+ """Invalid weave which tries to add and delete simultaneously."""
+ def runTest(self):
+ k = Weave()
+
+ k._parents = [(),
+ ]
+ k._weave = [('{', 0),
+ 'first line',
+ ('[', 0),
+ 'deleted in 0',
+ (']', 0),
+ ('}', 0),
+ ]
+ ################################### SKIPPED
+ # Weave.get doesn't trap this anymore
+ return
+
+ self.assertRaises(WeaveFormatError,
+ k.get_lines,
+ 0)
+
+
+class CannedDelete(TestBase):
+ """Unpack canned weave with deleted lines."""
+ def runTest(self):
+ k = Weave()
+
+ k._parents = [(),
+ frozenset([0]),
+ ]
+ k._weave = [('{', 0),
+ 'first line',
+ ('[', 1),
+ 'line to be deleted',
+ (']', 1),
+ 'last line',
+ ('}', 0),
+ ]
+ k._sha1s = [sha_string('first lineline to be deletedlast line')
+ , sha_string('first linelast line')]
+
+ self.assertEqual(k.get_lines(0),
+ ['first line',
+ 'line to be deleted',
+ 'last line',
+ ])
+
+ self.assertEqual(k.get_lines(1),
+ ['first line',
+ 'last line',
+ ])
+
+
+class CannedReplacement(TestBase):
+ """Unpack canned weave with deleted lines."""
+ def runTest(self):
+ k = Weave()
+
+ k._parents = [frozenset(),
+ frozenset([0]),
+ ]
+ k._weave = [('{', 0),
+ 'first line',
+ ('[', 1),
+ 'line to be deleted',
+ (']', 1),
+ ('{', 1),
+ 'replacement line',
+ ('}', 1),
+ 'last line',
+ ('}', 0),
+ ]
+ k._sha1s = [sha_string('first lineline to be deletedlast line')
+ , sha_string('first linereplacement linelast line')]
+
+ self.assertEqual(k.get_lines(0),
+ ['first line',
+ 'line to be deleted',
+ 'last line',
+ ])
+
+ self.assertEqual(k.get_lines(1),
+ ['first line',
+ 'replacement line',
+ 'last line',
+ ])
+
+
+class BadWeave(TestBase):
+ """Test that we trap an insert which should not occur."""
+ def runTest(self):
+ k = Weave()
+
+ k._parents = [frozenset(),
+ ]
+ k._weave = ['bad line',
+ ('{', 0),
+ 'foo {',
+ ('{', 1),
+ ' added in version 1',
+ ('{', 2),
+ ' added in v2',
+ ('}', 2),
+ ' also from v1',
+ ('}', 1),
+ '}',
+ ('}', 0)]
+
+ ################################### SKIPPED
+ # Weave.get doesn't trap this anymore
+ return
+
+
+ self.assertRaises(WeaveFormatError,
+ k.get,
+ 0)
+
+
+class BadInsert(TestBase):
+ """Test that we trap an insert which should not occur."""
+ def runTest(self):
+ k = Weave()
+
+ k._parents = [frozenset(),
+ frozenset([0]),
+ frozenset([0]),
+ frozenset([0,1,2]),
+ ]
+ k._weave = [('{', 0),
+ 'foo {',
+ ('{', 1),
+ ' added in version 1',
+ ('{', 1),
+ ' more in 1',
+ ('}', 1),
+ ('}', 1),
+ ('}', 0)]
+
+
+ # this is not currently enforced by get
+ return ##########################################
+
+ self.assertRaises(WeaveFormatError,
+ k.get,
+ 0)
+
+ self.assertRaises(WeaveFormatError,
+ k.get,
+ 1)
+
+
+class InsertNested(TestBase):
+ """Insertion with nested instructions."""
+ def runTest(self):
+ k = Weave()
+
+ k._parents = [frozenset(),
+ frozenset([0]),
+ frozenset([0]),
+ frozenset([0,1,2]),
+ ]
+ k._weave = [('{', 0),
+ 'foo {',
+ ('{', 1),
+ ' added in version 1',
+ ('{', 2),
+ ' added in v2',
+ ('}', 2),
+ ' also from v1',
+ ('}', 1),
+ '}',
+ ('}', 0)]
+
+ k._sha1s = [sha_string('foo {}')
+ , sha_string('foo { added in version 1 also from v1}')
+ , sha_string('foo { added in v2}')
+ , sha_string('foo { added in version 1 added in v2 also from v1}')
+ ]
+
+ self.assertEqual(k.get_lines(0),
+ ['foo {',
+ '}'])
+
+ self.assertEqual(k.get_lines(1),
+ ['foo {',
+ ' added in version 1',
+ ' also from v1',
+ '}'])
+
+ self.assertEqual(k.get_lines(2),
+ ['foo {',
+ ' added in v2',
+ '}'])
+
+ self.assertEqual(k.get_lines(3),
+ ['foo {',
+ ' added in version 1',
+ ' added in v2',
+ ' also from v1',
+ '}'])
+
+
+class DeleteLines2(TestBase):
+ """Test recording revisions that delete lines.
+
+ This relies on the weave having a way to represent lines knocked
+ out by a later revision."""
+ def runTest(self):
+ k = Weave()
+
+ k.add_lines('text0', [], ["line the first",
+ "line 2",
+ "line 3",
+ "fine"])
+
+ self.assertEqual(len(k.get_lines(0)), 4)
+
+ k.add_lines('text1', ['text0'], ["line the first",
+ "fine"])
+
+ self.assertEqual(k.get_lines(1),
+ ["line the first",
+ "fine"])
+
+ self.assertEqual(k.annotate('text1'),
+ [('text0', "line the first"),
+ ('text0', "fine")])
+
+
+class IncludeVersions(TestBase):
+ """Check texts that are stored across multiple revisions.
+
+ Here we manually create a weave with particular encoding and make
+ sure it unpacks properly.
+
+ Text 0 includes nothing; text 1 includes text 0 and adds some
+ lines.
+ """
+
+ def runTest(self):
+ k = Weave()
+
+ k._parents = [frozenset(), frozenset([0])]
+ k._weave = [('{', 0),
+ "first line",
+ ('}', 0),
+ ('{', 1),
+ "second line",
+ ('}', 1)]
+
+ k._sha1s = [sha_string('first line')
+ , sha_string('first linesecond line')]
+
+ self.assertEqual(k.get_lines(1),
+ ["first line",
+ "second line"])
+
+ self.assertEqual(k.get_lines(0),
+ ["first line"])
+
+
+class DivergedIncludes(TestBase):
+ """Weave with two diverged texts based on version 0.
+ """
+ def runTest(self):
+ # FIXME make the weave, dont poke at it.
+ k = Weave()
+
+ k._names = ['0', '1', '2']
+ k._name_map = {'0':0, '1':1, '2':2}
+ k._parents = [frozenset(),
+ frozenset([0]),
+ frozenset([0]),
+ ]
+ k._weave = [('{', 0),
+ "first line",
+ ('}', 0),
+ ('{', 1),
+ "second line",
+ ('}', 1),
+ ('{', 2),
+ "alternative second line",
+ ('}', 2),
+ ]
+
+ k._sha1s = [sha_string('first line')
+ , sha_string('first linesecond line')
+ , sha_string('first linealternative second line')]
+
+ self.assertEqual(k.get_lines(0),
+ ["first line"])
+
+ self.assertEqual(k.get_lines(1),
+ ["first line",
+ "second line"])
+
+ self.assertEqual(k.get_lines('2'),
+ ["first line",
+ "alternative second line"])
+
+ self.assertEqual(list(k.get_ancestry(['2'])),
+ ['0', '2'])
+
+
+class ReplaceLine(TestBase):
+ def runTest(self):
+ k = Weave()
+
+ text0 = ['cheddar', 'stilton', 'gruyere']
+ text1 = ['cheddar', 'blue vein', 'neufchatel', 'chevre']
+
+ k.add_lines('text0', [], text0)
+ k.add_lines('text1', ['text0'], text1)
+
+ self.log('k._weave=' + pformat(k._weave))
+
+ self.assertEqual(k.get_lines(0), text0)
+ self.assertEqual(k.get_lines(1), text1)
+
+
+class Merge(TestBase):
+ """Storage of versions that merge diverged parents"""
+
+ def runTest(self):
+ k = Weave()
+
+ texts = [['header'],
+ ['header', '', 'line from 1'],
+ ['header', '', 'line from 2', 'more from 2'],
+ ['header', '', 'line from 1', 'fixup line', 'line from 2'],
+ ]
+
+ k.add_lines('text0', [], texts[0])
+ k.add_lines('text1', ['text0'], texts[1])
+ k.add_lines('text2', ['text0'], texts[2])
+ k.add_lines('merge', ['text0', 'text1', 'text2'], texts[3])
+
+ for i, t in enumerate(texts):
+ self.assertEqual(k.get_lines(i), t)
+
+ self.assertEqual(k.annotate('merge'),
+ [('text0', 'header'),
+ ('text1', ''),
+ ('text1', 'line from 1'),
+ ('merge', 'fixup line'),
+ ('text2', 'line from 2'),
+ ])
+
+ self.assertEqual(list(k.get_ancestry(['merge'])),
+ ['text0', 'text1', 'text2', 'merge'])
+
+ self.log('k._weave=' + pformat(k._weave))
+
+ self.check_read_write(k)
+
+
+class Conflicts(TestBase):
+ """Test detection of conflicting regions during a merge.
+
+ A base version is inserted, then two descendents try to
+ insert different lines in the same place. These should be
+ reported as a possible conflict and forwarded to the user."""
+ def runTest(self):
+ return # NOT RUN
+ k = Weave()
+
+ k.add_lines([], ['aaa', 'bbb'])
+ k.add_lines([0], ['aaa', '111', 'bbb'])
+ k.add_lines([1], ['aaa', '222', 'bbb'])
+
+ merged = k.merge([1, 2])
+
+ self.assertEquals([[['aaa']],
+ [['111'], ['222']],
+ [['bbb']]])
+
+
+class NonConflict(TestBase):
+ """Two descendants insert compatible changes.
+
+ No conflict should be reported."""
+ def runTest(self):
+ return # NOT RUN
+ k = Weave()
+
+ k.add_lines([], ['aaa', 'bbb'])
+ k.add_lines([0], ['111', 'aaa', 'ccc', 'bbb'])
+ k.add_lines([1], ['aaa', 'ccc', 'bbb', '222'])
+
+
+class Khayyam(TestBase):
+ """Test changes to multi-line texts, and read/write"""
+
+ def test_multi_line_merge(self):
+ rawtexts = [
+ """A Book of Verses underneath the Bough,
+ A Jug of Wine, a Loaf of Bread, -- and Thou
+ Beside me singing in the Wilderness --
+ Oh, Wilderness were Paradise enow!""",
+
+ """A Book of Verses underneath the Bough,
+ A Jug of Wine, a Loaf of Bread, -- and Thou
+ Beside me singing in the Wilderness --
+ Oh, Wilderness were Paradise now!""",
+
+ """A Book of poems underneath the tree,
+ A Jug of Wine, a Loaf of Bread,
+ and Thou
+ Beside me singing in the Wilderness --
+ Oh, Wilderness were Paradise now!
+
+ -- O. Khayyam""",
+
+ """A Book of Verses underneath the Bough,
+ A Jug of Wine, a Loaf of Bread,
+ and Thou
+ Beside me singing in the Wilderness --
+ Oh, Wilderness were Paradise now!""",
+ ]
+ texts = [[l.strip() for l in t.split('\n')] for t in rawtexts]
+
+ k = Weave()
+ parents = set()
+ i = 0
+ for t in texts:
+ ver = k.add_lines('text%d' % i,
+ list(parents), t)
+ parents.add('text%d' % i)
+ i += 1
+
+ self.log("k._weave=" + pformat(k._weave))
+
+ for i, t in enumerate(texts):
+ self.assertEqual(k.get_lines(i), t)
+
+ self.check_read_write(k)
+
+
+class JoinWeavesTests(TestBase):
+
+ def setUp(self):
+ super(JoinWeavesTests, self).setUp()
+ self.weave1 = Weave()
+ self.lines1 = ['hello\n']
+ self.lines3 = ['hello\n', 'cruel\n', 'world\n']
+ self.weave1.add_lines('v1', [], self.lines1)
+ self.weave1.add_lines('v2', ['v1'], ['hello\n', 'world\n'])
+ self.weave1.add_lines('v3', ['v2'], self.lines3)
+
+ def test_written_detection(self):
+ # Test detection of weave file corruption.
+ #
+ # Make sure that we can detect if a weave file has
+ # been corrupted. This doesn't test all forms of corruption,
+ # but it at least helps verify the data you get, is what you want.
+ from cStringIO import StringIO
+
+ w = Weave()
+ w.add_lines('v1', [], ['hello\n'])
+ w.add_lines('v2', ['v1'], ['hello\n', 'there\n'])
+
+ tmpf = StringIO()
+ write_weave(w, tmpf)
+
+ # Because we are corrupting, we need to make sure we have the exact text
+ self.assertEquals('# bzr weave file v5\n'
+ 'i\n1 f572d396fae9206628714fb2ce00f72e94f2258f\nn v1\n\n'
+ 'i 0\n1 90f265c6e75f1c8f9ab76dcf85528352c5f215ef\nn v2\n\n'
+ 'w\n{ 0\n. hello\n}\n{ 1\n. there\n}\nW\n',
+ tmpf.getvalue())
+
+ # Change a single letter
+ tmpf = StringIO('# bzr weave file v5\n'
+ 'i\n1 f572d396fae9206628714fb2ce00f72e94f2258f\nn v1\n\n'
+ 'i 0\n1 90f265c6e75f1c8f9ab76dcf85528352c5f215ef\nn v2\n\n'
+ 'w\n{ 0\n. hello\n}\n{ 1\n. There\n}\nW\n')
+
+ w = read_weave(tmpf)
+
+ self.assertEqual('hello\n', w.get_text('v1'))
+ self.assertRaises(errors.WeaveInvalidChecksum, w.get_text, 'v2')
+ self.assertRaises(errors.WeaveInvalidChecksum, w.get_lines, 'v2')
+ self.assertRaises(errors.WeaveInvalidChecksum, w.check)
+
+ # Change the sha checksum
+ tmpf = StringIO('# bzr weave file v5\n'
+ 'i\n1 f572d396fae9206628714fb2ce00f72e94f2258f\nn v1\n\n'
+ 'i 0\n1 f0f265c6e75f1c8f9ab76dcf85528352c5f215ef\nn v2\n\n'
+ 'w\n{ 0\n. hello\n}\n{ 1\n. there\n}\nW\n')
+
+ w = read_weave(tmpf)
+
+ self.assertEqual('hello\n', w.get_text('v1'))
+ self.assertRaises(errors.WeaveInvalidChecksum, w.get_text, 'v2')
+ self.assertRaises(errors.WeaveInvalidChecksum, w.get_lines, 'v2')
+ self.assertRaises(errors.WeaveInvalidChecksum, w.check)
+
+
+class TestWeave(TestCase):
+
+ def test_allow_reserved_false(self):
+ w = Weave('name', allow_reserved=False)
+ # Add lines is checked at the WeaveFile level, not at the Weave level
+ w.add_lines('name:', [], TEXT_1)
+ # But get_lines is checked at this level
+ self.assertRaises(errors.ReservedId, w.get_lines, 'name:')
+
+ def test_allow_reserved_true(self):
+ w = Weave('name', allow_reserved=True)
+ w.add_lines('name:', [], TEXT_1)
+ self.assertEqual(TEXT_1, w.get_lines('name:'))
+
+
+class InstrumentedWeave(Weave):
+ """Keep track of how many times functions are called."""
+
+ def __init__(self, weave_name=None):
+ self._extract_count = 0
+ Weave.__init__(self, weave_name=weave_name)
+
+ def _extract(self, versions):
+ self._extract_count += 1
+ return Weave._extract(self, versions)
+
+
+class TestNeedsReweave(TestCase):
+ """Internal corner cases for when reweave is needed."""
+
+ def test_compatible_parents(self):
+ w1 = Weave('a')
+ my_parents = set([1, 2, 3])
+ # subsets are ok
+ self.assertTrue(w1._compatible_parents(my_parents, set([3])))
+ # same sets
+ self.assertTrue(w1._compatible_parents(my_parents, set(my_parents)))
+ # same empty corner case
+ self.assertTrue(w1._compatible_parents(set(), set()))
+ # other cannot contain stuff my_parents does not
+ self.assertFalse(w1._compatible_parents(set(), set([1])))
+ self.assertFalse(w1._compatible_parents(my_parents, set([1, 2, 3, 4])))
+ self.assertFalse(w1._compatible_parents(my_parents, set([4])))
+
+
+class TestWeaveFile(TestCaseInTempDir):
+
+ def test_empty_file(self):
+ f = open('empty.weave', 'wb+')
+ try:
+ self.assertRaises(errors.WeaveFormatError,
+ read_weave, f)
+ finally:
+ f.close()
diff --git a/bzrlib/tests/test_whitebox.py b/bzrlib/tests/test_whitebox.py
new file mode 100644
index 0000000..b76f519
--- /dev/null
+++ b/bzrlib/tests/test_whitebox.py
@@ -0,0 +1,65 @@
+# Copyright (C) 2005, 2006, 2008-2012 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+import os
+
+import bzrlib
+from bzrlib import (
+ errors,
+ osutils,
+ tests,
+ )
+from bzrlib.osutils import relpath, pathjoin, abspath, realpath
+
+
+class MoreTests(tests.TestCaseWithTransport):
+
+ def test_relpath(self):
+ """test for branch path lookups
+
+ bzrlib.osutils._relpath do a simple but subtle
+ job: given a path (either relative to cwd or absolute), work out
+ if it is inside a branch and return the path relative to the base.
+ """
+ dtmp = osutils.mkdtemp()
+ self.addCleanup(osutils.rmtree, dtmp)
+ # On Mac OSX, /tmp actually expands to /private/tmp
+ dtmp = realpath(dtmp)
+
+ def rp(p):
+ return relpath(dtmp, p)
+
+ # check paths inside dtmp while standing outside it
+ self.assertEqual('foo', rp(pathjoin(dtmp, 'foo')))
+
+ # root = nothing
+ self.assertEqual('', rp(dtmp))
+ self.assertRaises(errors.PathNotChild, rp, '/etc')
+
+ # now some near-miss operations -- note that
+ # os.path.commonprefix gets these wrong!
+ self.assertRaises(errors.PathNotChild, rp, dtmp.rstrip('\\/') + '2')
+ self.assertRaises(errors.PathNotChild, rp, dtmp.rstrip('\\/') + '2/foo')
+
+ # now operations based on relpath of files in current
+ # directory, or nearby
+
+ os.chdir(dtmp)
+ self.assertEqual('foo/bar/quux', rp('foo/bar/quux'))
+ self.assertEqual('foo', rp('foo'))
+ self.assertEqual('foo', rp('./foo'))
+ self.assertEqual('foo', rp(abspath('foo')))
+ self.assertRaises(errors.PathNotChild, rp, '../foo')
diff --git a/bzrlib/tests/test_win32utils.py b/bzrlib/tests/test_win32utils.py
new file mode 100644
index 0000000..e7f1d02
--- /dev/null
+++ b/bzrlib/tests/test_win32utils.py
@@ -0,0 +1,429 @@
+# Copyright (C) 2007-2011 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""Tests for win32utils."""
+
+import os
+
+from bzrlib import (
+ osutils,
+ symbol_versioning,
+ tests,
+ win32utils,
+ )
+from bzrlib.tests import (
+ TestCase,
+ TestCaseInTempDir,
+ TestSkipped,
+ )
+from bzrlib.tests.features import backslashdir_feature
+from bzrlib.win32utils import glob_expand, get_app_path
+from bzrlib.tests import (
+ features,
+ )
+
+
+Win32RegistryFeature = features.ModuleAvailableFeature('_winreg')
+CtypesFeature = features.ModuleAvailableFeature('ctypes')
+Win32comShellFeature = features.ModuleAvailableFeature('win32com.shell')
+Win32ApiFeature = features.ModuleAvailableFeature('win32api')
+
+
+# Tests
+# -----
+
+class TestWin32UtilsGlobExpand(TestCaseInTempDir):
+
+ _test_needs_features = []
+
+ def test_empty_tree(self):
+ self.build_tree([])
+ self._run_testset([
+ [['a'], ['a']],
+ [['?'], ['?']],
+ [['*'], ['*']],
+ [['a', 'a'], ['a', 'a']]])
+
+ def build_ascii_tree(self):
+ self.build_tree(['a', 'a1', 'a2', 'a11', 'a.1',
+ 'b', 'b1', 'b2', 'b3',
+ 'c/', 'c/c1', 'c/c2',
+ 'd/', 'd/d1', 'd/d2', 'd/e/', 'd/e/e1'])
+
+ def build_unicode_tree(self):
+ self.requireFeature(features.UnicodeFilenameFeature)
+ self.build_tree([u'\u1234', u'\u1234\u1234', u'\u1235/',
+ u'\u1235/\u1235'])
+
+ def test_tree_ascii(self):
+ """Checks the glob expansion and path separation char
+ normalization"""
+ self.build_ascii_tree()
+ self._run_testset([
+ # no wildcards
+ [[u'a'], [u'a']],
+ [[u'a', u'a' ], [u'a', u'a']],
+
+ [[u'd'], [u'd']],
+ [[u'd/'], [u'd/']],
+
+ # wildcards
+ [[u'a*'], [u'a', u'a1', u'a2', u'a11', u'a.1']],
+ [[u'?'], [u'a', u'b', u'c', u'd']],
+ [[u'a?'], [u'a1', u'a2']],
+ [[u'a??'], [u'a11', u'a.1']],
+ [[u'b[1-2]'], [u'b1', u'b2']],
+
+ [[u'd/*'], [u'd/d1', u'd/d2', u'd/e']],
+ [[u'?/*'], [u'c/c1', u'c/c2', u'd/d1', u'd/d2', u'd/e']],
+ [[u'*/*'], [u'c/c1', u'c/c2', u'd/d1', u'd/d2', u'd/e']],
+ [[u'*/'], [u'c/', u'd/']],
+ ])
+
+ def test_backslash_globbing(self):
+ self.requireFeature(backslashdir_feature)
+ self.build_ascii_tree()
+ self._run_testset([
+ [[u'd\\'], [u'd/']],
+ [[u'd\\*'], [u'd/d1', u'd/d2', u'd/e']],
+ [[u'?\\*'], [u'c/c1', u'c/c2', u'd/d1', u'd/d2', u'd/e']],
+ [[u'*\\*'], [u'c/c1', u'c/c2', u'd/d1', u'd/d2', u'd/e']],
+ [[u'*\\'], [u'c/', u'd/']],
+ ])
+
+ def test_case_insensitive_globbing(self):
+ if os.path.normcase("AbC") == "AbC":
+ self.skip("Test requires case insensitive globbing function")
+ self.build_ascii_tree()
+ self._run_testset([
+ [[u'A'], [u'A']],
+ [[u'A?'], [u'a1', u'a2']],
+ ])
+
+ def test_tree_unicode(self):
+ """Checks behaviour with non-ascii filenames"""
+ self.build_unicode_tree()
+ self._run_testset([
+ # no wildcards
+ [[u'\u1234'], [u'\u1234']],
+ [[u'\u1235'], [u'\u1235']],
+
+ [[u'\u1235/'], [u'\u1235/']],
+ [[u'\u1235/\u1235'], [u'\u1235/\u1235']],
+
+ # wildcards
+ [[u'?'], [u'\u1234', u'\u1235']],
+ [[u'*'], [u'\u1234', u'\u1234\u1234', u'\u1235']],
+ [[u'\u1234*'], [u'\u1234', u'\u1234\u1234']],
+
+ [[u'\u1235/?'], [u'\u1235/\u1235']],
+ [[u'\u1235/*'], [u'\u1235/\u1235']],
+ [[u'?/'], [u'\u1235/']],
+ [[u'*/'], [u'\u1235/']],
+ [[u'?/?'], [u'\u1235/\u1235']],
+ [[u'*/*'], [u'\u1235/\u1235']],
+ ])
+
+ def test_unicode_backslashes(self):
+ self.requireFeature(backslashdir_feature)
+ self.build_unicode_tree()
+ self._run_testset([
+ # no wildcards
+ [[u'\u1235\\'], [u'\u1235/']],
+ [[u'\u1235\\\u1235'], [u'\u1235/\u1235']],
+ [[u'\u1235\\?'], [u'\u1235/\u1235']],
+ [[u'\u1235\\*'], [u'\u1235/\u1235']],
+ [[u'?\\'], [u'\u1235/']],
+ [[u'*\\'], [u'\u1235/']],
+ [[u'?\\?'], [u'\u1235/\u1235']],
+ [[u'*\\*'], [u'\u1235/\u1235']],
+ ])
+
+ def _run_testset(self, testset):
+ for pattern, expected in testset:
+ result = glob_expand(pattern)
+ expected.sort()
+ result.sort()
+ self.assertEqual(expected, result, 'pattern %s' % pattern)
+
+
+class TestAppPaths(TestCase):
+
+ _test_needs_features = [Win32RegistryFeature]
+
+ def test_iexplore(self):
+ # typical windows users should have IE installed
+ for a in ('iexplore', 'iexplore.exe'):
+ p = get_app_path(a)
+ d, b = os.path.split(p)
+ self.assertEquals('iexplore.exe', b.lower())
+ self.assertNotEquals('', d)
+
+ def test_wordpad(self):
+ # typical windows users should have wordpad in the system
+ # but there is problem: its path has the format REG_EXPAND_SZ
+ # so naive attempt to get the path is not working
+ self.requireFeature(Win32ApiFeature)
+ for a in ('wordpad', 'wordpad.exe'):
+ p = get_app_path(a)
+ d, b = os.path.split(p)
+ self.assertEquals('wordpad.exe', b.lower())
+ self.assertNotEquals('', d)
+
+ def test_not_existing(self):
+ p = get_app_path('not-existing')
+ self.assertEquals('not-existing', p)
+
+
+class TestLocations(TestCase):
+ """Tests for windows specific path and name retrieving functions"""
+
+ def test__ensure_unicode_deprecated(self):
+ s = "text"
+ u1 = self.applyDeprecated(symbol_versioning.deprecated_in((2, 5, 0)),
+ win32utils._ensure_unicode, s)
+ self.assertEqual(s, u1)
+ self.assertIsInstance(u1, unicode)
+ u2 = self.applyDeprecated(symbol_versioning.deprecated_in((2, 5, 0)),
+ win32utils._ensure_unicode, u1)
+ self.assertIs(u1, u2)
+
+ def test_appdata_unicode_deprecated(self):
+ self.overrideEnv("APPDATA", "fakepath")
+ s = win32utils.get_appdata_location()
+ u = self.applyDeprecated(symbol_versioning.deprecated_in((2, 5, 0)),
+ win32utils.get_appdata_location_unicode)
+ self.assertEqual(s, u)
+ self.assertIsInstance(s, unicode)
+
+ def test_home_unicode_deprecated(self):
+ s = win32utils.get_home_location()
+ u = self.applyDeprecated(symbol_versioning.deprecated_in((2, 5, 0)),
+ win32utils.get_home_location_unicode)
+ self.assertEqual(s, u)
+ self.assertIsInstance(s, unicode)
+
+ def test_user_unicode_deprecated(self):
+ self.overrideEnv("USERNAME", "alien")
+ s = win32utils.get_user_name()
+ u = self.applyDeprecated(symbol_versioning.deprecated_in((2, 5, 0)),
+ win32utils.get_user_name_unicode)
+ self.assertEqual(s, u)
+ self.assertIsInstance(s, unicode)
+
+ def test_host_unicode_deprecated(self):
+ self.overrideEnv("COMPUTERNAME", "alienbox")
+ s = win32utils.get_host_name()
+ u = self.applyDeprecated(symbol_versioning.deprecated_in((2, 5, 0)),
+ win32utils.get_host_name_unicode)
+ self.assertEqual(s, u)
+ self.assertIsInstance(s, unicode)
+
+
+class TestLocationsCtypes(TestCase):
+
+ _test_needs_features = [CtypesFeature]
+
+ def assertPathsEqual(self, p1, p2):
+ # TODO: The env var values in particular might return the "short"
+ # version (ie, "C:\DOCUME~1\..."). Its even possible the returned
+ # values will differ only by case - handle these situations as we
+ # come across them.
+ self.assertEquals(p1, p2)
+
+ def test_appdata_not_using_environment(self):
+ # Test that we aren't falling back to the environment
+ first = win32utils.get_appdata_location()
+ self.overrideEnv("APPDATA", None)
+ self.assertPathsEqual(first, win32utils.get_appdata_location())
+
+ def test_appdata_matches_environment(self):
+ # Typically the APPDATA environment variable will match
+ # get_appdata_location
+ # XXX - See bug 262874, which asserts the correct encoding is 'mbcs',
+ encoding = osutils.get_user_encoding()
+ env_val = os.environ.get("APPDATA", None)
+ if not env_val:
+ raise TestSkipped("No APPDATA environment variable exists")
+ self.assertPathsEqual(win32utils.get_appdata_location(),
+ env_val.decode(encoding))
+
+ def test_local_appdata_not_using_environment(self):
+ # Test that we aren't falling back to the environment
+ first = win32utils.get_local_appdata_location()
+ self.overrideEnv("LOCALAPPDATA", None)
+ self.assertPathsEqual(first, win32utils.get_local_appdata_location())
+
+ def test_local_appdata_matches_environment(self):
+ # LOCALAPPDATA typically only exists on Vista, so we only attempt to
+ # compare when it exists.
+ lad = win32utils.get_local_appdata_location()
+ env = os.environ.get("LOCALAPPDATA")
+ if env:
+ # XXX - See bug 262874, which asserts the correct encoding is 'mbcs'
+ encoding = osutils.get_user_encoding()
+ self.assertPathsEqual(lad, env.decode(encoding))
+
+
+class TestLocationsPywin32(TestLocationsCtypes):
+
+ _test_needs_features = [Win32comShellFeature]
+
+ def setUp(self):
+ super(TestLocationsPywin32, self).setUp()
+ # We perform the exact same tests after disabling the use of ctypes.
+ # This causes the implementation to fall back to pywin32.
+ self.overrideAttr(win32utils, 'has_ctypes', False)
+ # FIXME: this should be done by parametrization -- vila 100123
+
+
+class TestSetHidden(TestCaseInTempDir):
+
+ def test_unicode_dir(self):
+ # we should handle unicode paths without errors
+ self.requireFeature(features.UnicodeFilenameFeature)
+ os.mkdir(u'\u1234')
+ win32utils.set_file_attr_hidden(u'\u1234')
+
+ def test_dot_bzr_in_unicode_dir(self):
+ # we should not raise traceback if we try to set hidden attribute
+ # on .bzr directory below unicode path
+ self.requireFeature(features.UnicodeFilenameFeature)
+ os.makedirs(u'\u1234\\.bzr')
+ path = osutils.abspath(u'\u1234\\.bzr')
+ win32utils.set_file_attr_hidden(path)
+
+
+class Test_CommandLineToArgv(tests.TestCaseInTempDir):
+
+ def assertCommandLine(self, expected, line, argv=None,
+ single_quotes_allowed=False):
+ # Strictly speaking we should respect parameter order versus glob
+ # expansions, but it's not really worth the effort here
+ if argv is None:
+ argv = [line]
+ argv = win32utils._command_line_to_argv(line, argv,
+ single_quotes_allowed=single_quotes_allowed)
+ self.assertEqual(expected, sorted(argv))
+
+ def test_glob_paths(self):
+ self.build_tree(['a/', 'a/b.c', 'a/c.c', 'a/c.h'])
+ self.assertCommandLine([u'a/b.c', u'a/c.c'], 'a/*.c')
+ self.build_tree(['b/', 'b/b.c', 'b/d.c', 'b/d.h'])
+ self.assertCommandLine([u'a/b.c', u'b/b.c'], '*/b.c')
+ self.assertCommandLine([u'a/b.c', u'a/c.c', u'b/b.c', u'b/d.c'],
+ '*/*.c')
+ # Bash style, just pass through the argument if nothing matches
+ self.assertCommandLine([u'*/*.qqq'], '*/*.qqq')
+
+ def test_quoted_globs(self):
+ self.build_tree(['a/', 'a/b.c', 'a/c.c', 'a/c.h'])
+ self.assertCommandLine([u'a/*.c'], '"a/*.c"')
+ self.assertCommandLine([u"'a/*.c'"], "'a/*.c'")
+ self.assertCommandLine([u'a/*.c'], "'a/*.c'",
+ single_quotes_allowed=True)
+
+ def test_slashes_changed(self):
+ # Quoting doesn't change the supplied args
+ self.assertCommandLine([u'a\\*.c'], '"a\\*.c"')
+ self.assertCommandLine([u'a\\*.c'], "'a\\*.c'",
+ single_quotes_allowed=True)
+ # Expands the glob, but nothing matches, swaps slashes
+ self.assertCommandLine([u'a/*.c'], 'a\\*.c')
+ self.assertCommandLine([u'a/?.c'], 'a\\?.c')
+ # No glob, doesn't touch slashes
+ self.assertCommandLine([u'a\\foo.c'], 'a\\foo.c')
+
+ def test_single_quote_support(self):
+ self.assertCommandLine(["add", "let's-do-it.txt"],
+ "add let's-do-it.txt",
+ ["add", "let's-do-it.txt"])
+ self.expectFailure("Using single quotes breaks trimming from argv",
+ self.assertCommandLine, ["add", "lets do it.txt"],
+ "add 'lets do it.txt'", ["add", "'lets", "do", "it.txt'"],
+ single_quotes_allowed=True)
+
+ def test_case_insensitive_globs(self):
+ if os.path.normcase("AbC") == "AbC":
+ self.skip("Test requires case insensitive globbing function")
+ self.build_tree(['a/', 'a/b.c', 'a/c.c', 'a/c.h'])
+ self.assertCommandLine([u'A/b.c'], 'A/B*')
+
+ def test_backslashes(self):
+ self.requireFeature(backslashdir_feature)
+ self.build_tree(['a/', 'a/b.c', 'a/c.c', 'a/c.h'])
+ self.assertCommandLine([u'a/b.c'], 'a\\b*')
+
+ def test_with_pdb(self):
+ """Check stripping Python arguments before bzr script per lp:587868"""
+ self.assertCommandLine([u"rocks"], "-m pdb rocks", ["rocks"])
+ self.build_tree(['d/', 'd/f1', 'd/f2'])
+ self.assertCommandLine([u"rm", u"x*"], "-m pdb rm x*", ["rm", u"x*"])
+ self.assertCommandLine([u"add", u"d/f1", u"d/f2"], "-m pdb add d/*",
+ ["add", u"d/*"])
+
+
+class TestGetEnvironUnicode(tests.TestCase):
+ """Tests for accessing the environment via the windows wide api"""
+
+ _test_needs_features = [CtypesFeature, features.win32_feature]
+
+ def setUp(self):
+ super(TestGetEnvironUnicode, self).setUp()
+ self.overrideEnv("TEST", "1")
+
+ def test_get(self):
+ """In the normal case behaves the same as os.environ access"""
+ self.assertEqual("1", win32utils.get_environ_unicode("TEST"))
+
+ def test_unset(self):
+ """A variable not present in the environment gives None by default"""
+ del os.environ["TEST"]
+ self.assertIs(None, win32utils.get_environ_unicode("TEST"))
+
+ def test_unset_default(self):
+ """A variable not present in the environment gives passed default"""
+ del os.environ["TEST"]
+ self.assertIs("a", win32utils.get_environ_unicode("TEST", "a"))
+
+ def test_unicode(self):
+ """A non-ascii variable is returned as unicode"""
+ unicode_val = u"\xa7" # non-ascii character present in many encodings
+ try:
+ bytes_val = unicode_val.encode(osutils.get_user_encoding())
+ except UnicodeEncodeError:
+ self.skip("Couldn't encode non-ascii string to place in environ")
+ os.environ["TEST"] = bytes_val
+ self.assertEqual(unicode_val, win32utils.get_environ_unicode("TEST"))
+
+ def test_long(self):
+ """A variable bigger than heuristic buffer size is still accessible"""
+ big_val = "x" * (2<<10)
+ os.environ["TEST"] = big_val
+ self.assertEqual(big_val, win32utils.get_environ_unicode("TEST"))
+
+ def test_unexpected_error(self):
+ """An error from the underlying platform function is propogated"""
+ ERROR_INVALID_PARAMETER = 87
+ SetLastError = win32utils.ctypes.windll.kernel32.SetLastError
+ def failer(*args, **kwargs):
+ SetLastError(ERROR_INVALID_PARAMETER)
+ return 0
+ self.overrideAttr(win32utils.get_environ_unicode, "_c_function",
+ failer)
+ e = self.assertRaises(WindowsError,
+ win32utils.get_environ_unicode, "TEST")
+ self.assertEqual(e.winerror, ERROR_INVALID_PARAMETER)
diff --git a/bzrlib/tests/test_workingtree.py b/bzrlib/tests/test_workingtree.py
new file mode 100644
index 0000000..bf9558e
--- /dev/null
+++ b/bzrlib/tests/test_workingtree.py
@@ -0,0 +1,497 @@
+# Copyright (C) 2005-2011 Canonical Ltd
+# Authors: Robert Collins <robert.collins@canonical.com>
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+from bzrlib import (
+ bzrdir,
+ conflicts,
+ errors,
+ symbol_versioning,
+ transport,
+ workingtree,
+ workingtree_3,
+ workingtree_4,
+ )
+from bzrlib.lockdir import LockDir
+from bzrlib.mutabletree import needs_tree_write_lock
+from bzrlib.tests import TestCase, TestCaseWithTransport, TestSkipped
+from bzrlib.workingtree import (
+ TreeEntry,
+ TreeDirectory,
+ TreeFile,
+ TreeLink,
+ )
+
+
+class TestTreeDirectory(TestCaseWithTransport):
+
+ def test_kind_character(self):
+ self.assertEqual(TreeDirectory().kind_character(), '/')
+
+
+class TestTreeEntry(TestCaseWithTransport):
+
+ def test_kind_character(self):
+ self.assertEqual(TreeEntry().kind_character(), '???')
+
+
+class TestTreeFile(TestCaseWithTransport):
+
+ def test_kind_character(self):
+ self.assertEqual(TreeFile().kind_character(), '')
+
+
+class TestTreeLink(TestCaseWithTransport):
+
+ def test_kind_character(self):
+ self.assertEqual(TreeLink().kind_character(), '')
+
+
+class TestDefaultFormat(TestCaseWithTransport):
+
+ def test_get_set_default_format(self):
+ old_format = workingtree.format_registry.get_default()
+ # default is 6
+ self.assertTrue(isinstance(old_format, workingtree_4.WorkingTreeFormat6))
+ workingtree.format_registry.set_default(SampleTreeFormat())
+ try:
+ # the default branch format is used by the meta dir format
+ # which is not the default bzrdir format at this point
+ dir = bzrdir.BzrDirMetaFormat1().initialize('.')
+ dir.create_repository()
+ dir.create_branch()
+ result = dir.create_workingtree()
+ self.assertEqual(result, 'A tree')
+ finally:
+ workingtree.format_registry.set_default(old_format)
+ self.assertEqual(old_format, workingtree.format_registry.get_default())
+
+ def test_from_string(self):
+ self.assertIsInstance(
+ SampleTreeFormat.from_string("Sample tree format."),
+ SampleTreeFormat)
+ self.assertRaises(AssertionError,
+ SampleTreeFormat.from_string, "Different format string.")
+
+ def test_get_set_default_format_by_key(self):
+ old_format = workingtree.format_registry.get_default()
+ # default is 6
+ format = SampleTreeFormat()
+ workingtree.format_registry.register(format)
+ self.addCleanup(workingtree.format_registry.remove, format)
+ self.assertTrue(isinstance(old_format, workingtree_4.WorkingTreeFormat6))
+ workingtree.format_registry.set_default_key(format.get_format_string())
+ try:
+ # the default branch format is used by the meta dir format
+ # which is not the default bzrdir format at this point
+ dir = bzrdir.BzrDirMetaFormat1().initialize('.')
+ dir.create_repository()
+ dir.create_branch()
+ result = dir.create_workingtree()
+ self.assertEqual(result, 'A tree')
+ finally:
+ workingtree.format_registry.set_default_key(
+ old_format.get_format_string())
+ self.assertEqual(old_format, workingtree.format_registry.get_default())
+
+ def test_open(self):
+ tree = self.make_branch_and_tree('.')
+ open_direct = workingtree.WorkingTree.open('.')
+ self.assertEqual(tree.basedir, open_direct.basedir)
+ open_no_args = workingtree.WorkingTree.open()
+ self.assertEqual(tree.basedir, open_no_args.basedir)
+
+ def test_open_containing(self):
+ tree = self.make_branch_and_tree('.')
+ open_direct, relpath = workingtree.WorkingTree.open_containing('.')
+ self.assertEqual(tree.basedir, open_direct.basedir)
+ self.assertEqual('', relpath)
+ open_no_args, relpath = workingtree.WorkingTree.open_containing()
+ self.assertEqual(tree.basedir, open_no_args.basedir)
+ self.assertEqual('', relpath)
+ open_subdir, relpath = workingtree.WorkingTree.open_containing('subdir')
+ self.assertEqual(tree.basedir, open_subdir.basedir)
+ self.assertEqual('subdir', relpath)
+
+
+class SampleTreeFormat(workingtree.WorkingTreeFormatMetaDir):
+ """A sample format
+
+ this format is initializable, unsupported to aid in testing the
+ open and open_downlevel routines.
+ """
+
+ @classmethod
+ def get_format_string(cls):
+ """See WorkingTreeFormat.get_format_string()."""
+ return "Sample tree format."
+
+ def initialize(self, a_bzrdir, revision_id=None, from_branch=None,
+ accelerator_tree=None, hardlink=False):
+ """Sample branches cannot be created."""
+ t = a_bzrdir.get_workingtree_transport(self)
+ t.put_bytes('format', self.get_format_string())
+ return 'A tree'
+
+ def is_supported(self):
+ return False
+
+ def open(self, transport, _found=False):
+ return "opened tree."
+
+
+class SampleExtraTreeFormat(workingtree.WorkingTreeFormat):
+ """A sample format that does not support use in a metadir.
+
+ """
+
+ def get_format_string(self):
+ # Not usable in a metadir, so no format string
+ return None
+
+ def initialize(self, a_bzrdir, revision_id=None, from_branch=None,
+ accelerator_tree=None, hardlink=False):
+ raise NotImplementedError(self.initialize)
+
+ def is_supported(self):
+ return False
+
+ def open(self, transport, _found=False):
+ raise NotImplementedError(self.open)
+
+
+class TestWorkingTreeFormat(TestCaseWithTransport):
+ """Tests for the WorkingTreeFormat facility."""
+
+ def test_find_format_string(self):
+ # is the right format object found for a working tree?
+ branch = self.make_branch('branch')
+ self.assertRaises(errors.NoWorkingTree,
+ workingtree.WorkingTreeFormatMetaDir.find_format_string, branch.bzrdir)
+ transport = branch.bzrdir.get_workingtree_transport(None)
+ transport.mkdir('.')
+ transport.put_bytes("format", "some format name")
+ # The format does not have to be known by Bazaar,
+ # find_format_string just retrieves the name
+ self.assertEquals("some format name",
+ workingtree.WorkingTreeFormatMetaDir.find_format_string(branch.bzrdir))
+
+ def test_find_format(self):
+ # is the right format object found for a working tree?
+ # create a branch with a few known format objects.
+ self.build_tree(["foo/", "bar/"])
+ def check_format(format, url):
+ dir = format._matchingbzrdir.initialize(url)
+ dir.create_repository()
+ dir.create_branch()
+ format.initialize(dir)
+ t = transport.get_transport(url)
+ found_format = workingtree.WorkingTreeFormatMetaDir.find_format(dir)
+ self.assertIsInstance(found_format, format.__class__)
+ check_format(workingtree_3.WorkingTreeFormat3(), "bar")
+
+ def test_find_format_no_tree(self):
+ dir = bzrdir.BzrDirMetaFormat1().initialize('.')
+ self.assertRaises(errors.NoWorkingTree,
+ workingtree.WorkingTreeFormatMetaDir.find_format,
+ dir)
+
+ def test_find_format_unknown_format(self):
+ dir = bzrdir.BzrDirMetaFormat1().initialize('.')
+ dir.create_repository()
+ dir.create_branch()
+ SampleTreeFormat().initialize(dir)
+ self.assertRaises(errors.UnknownFormatError,
+ workingtree.WorkingTreeFormatMetaDir.find_format,
+ dir)
+
+ def test_find_format_with_features(self):
+ tree = self.make_branch_and_tree('.', format='2a')
+ tree.update_feature_flags({"name": "necessity"})
+ found_format = workingtree.WorkingTreeFormatMetaDir.find_format(
+ tree.bzrdir)
+ self.assertIsInstance(found_format, workingtree.WorkingTreeFormat)
+ self.assertEquals(found_format.features.get("name"), "necessity")
+ self.assertRaises(errors.MissingFeature, found_format.check_support_status,
+ True)
+ self.addCleanup(workingtree.WorkingTreeFormatMetaDir.unregister_feature,
+ "name")
+ workingtree.WorkingTreeFormatMetaDir.register_feature("name")
+ found_format.check_support_status(True)
+
+
+class TestWorkingTreeIterEntriesByDir_wSubtrees(TestCaseWithTransport):
+
+ def make_simple_tree(self):
+ tree = self.make_branch_and_tree('tree', format='development-subtree')
+ self.build_tree(['tree/a/', 'tree/a/b/', 'tree/a/b/c'])
+ tree.set_root_id('root-id')
+ tree.add(['a', 'a/b', 'a/b/c'], ['a-id', 'b-id', 'c-id'])
+ tree.commit('initial')
+ return tree
+
+ def test_just_directory(self):
+ tree = self.make_simple_tree()
+ self.assertEqual([('directory', 'root-id'),
+ ('directory', 'a-id'),
+ ('directory', 'b-id'),
+ ('file', 'c-id')],
+ [(ie.kind, ie.file_id)
+ for path, ie in tree.iter_entries_by_dir()])
+ subtree = self.make_branch_and_tree('tree/a/b')
+ self.assertEqual([('tree-reference', 'b-id')],
+ [(ie.kind, ie.file_id)
+ for path, ie in tree.iter_entries_by_dir(['b-id'])])
+
+ def test_direct_subtree(self):
+ tree = self.make_simple_tree()
+ subtree = self.make_branch_and_tree('tree/a/b')
+ self.assertEqual([('directory', 'root-id'),
+ ('directory', 'a-id'),
+ ('tree-reference', 'b-id')],
+ [(ie.kind, ie.file_id)
+ for path, ie in tree.iter_entries_by_dir()])
+
+ def test_indirect_subtree(self):
+ tree = self.make_simple_tree()
+ subtree = self.make_branch_and_tree('tree/a')
+ self.assertEqual([('directory', 'root-id'),
+ ('tree-reference', 'a-id')],
+ [(ie.kind, ie.file_id)
+ for path, ie in tree.iter_entries_by_dir()])
+
+
+class TestWorkingTreeFormatRegistry(TestCase):
+
+ def setUp(self):
+ super(TestWorkingTreeFormatRegistry, self).setUp()
+ self.registry = workingtree.WorkingTreeFormatRegistry()
+
+ def test_register_unregister_format(self):
+ format = SampleTreeFormat()
+ self.registry.register(format)
+ self.assertEquals(format, self.registry.get("Sample tree format."))
+ self.registry.remove(format)
+ self.assertRaises(KeyError, self.registry.get, "Sample tree format.")
+
+ def test_get_all(self):
+ format = SampleTreeFormat()
+ self.assertEquals([], self.registry._get_all())
+ self.registry.register(format)
+ self.assertEquals([format], self.registry._get_all())
+
+ def test_register_extra(self):
+ format = SampleExtraTreeFormat()
+ self.assertEquals([], self.registry._get_all())
+ self.registry.register_extra(format)
+ self.assertEquals([format], self.registry._get_all())
+
+ def test_register_extra_lazy(self):
+ self.assertEquals([], self.registry._get_all())
+ self.registry.register_extra_lazy("bzrlib.tests.test_workingtree",
+ "SampleExtraTreeFormat")
+ formats = self.registry._get_all()
+ self.assertEquals(1, len(formats))
+ self.assertIsInstance(formats[0], SampleExtraTreeFormat)
+
+
+class TestWorkingTreeFormat3(TestCaseWithTransport):
+ """Tests specific to WorkingTreeFormat3."""
+
+ def test_disk_layout(self):
+ control = bzrdir.BzrDirMetaFormat1().initialize(self.get_url())
+ control.create_repository()
+ control.create_branch()
+ tree = workingtree_3.WorkingTreeFormat3().initialize(control)
+ # we want:
+ # format 'Bazaar-NG Working Tree format 3'
+ # inventory = blank inventory
+ # pending-merges = ''
+ # stat-cache = ??
+ # no inventory.basis yet
+ t = control.get_workingtree_transport(None)
+ self.assertEqualDiff('Bazaar-NG Working Tree format 3',
+ t.get('format').read())
+ self.assertEqualDiff(t.get('inventory').read(),
+ '<inventory format="5">\n'
+ '</inventory>\n',
+ )
+ self.assertEqualDiff('### bzr hashcache v5\n',
+ t.get('stat-cache').read())
+ self.assertFalse(t.has('inventory.basis'))
+ # no last-revision file means 'None' or 'NULLREVISION'
+ self.assertFalse(t.has('last-revision'))
+ # TODO RBC 20060210 do a commit, check the inventory.basis is created
+ # correctly and last-revision file becomes present.
+
+ def test_uses_lockdir(self):
+ """WorkingTreeFormat3 uses its own LockDir:
+
+ - lock is a directory
+ - when the WorkingTree is locked, LockDir can see that
+ """
+ t = self.get_transport()
+ url = self.get_url()
+ dir = bzrdir.BzrDirMetaFormat1().initialize(url)
+ repo = dir.create_repository()
+ branch = dir.create_branch()
+ try:
+ tree = workingtree_3.WorkingTreeFormat3().initialize(dir)
+ except errors.NotLocalUrl:
+ raise TestSkipped('Not a local URL')
+ self.assertIsDirectory('.bzr', t)
+ self.assertIsDirectory('.bzr/checkout', t)
+ self.assertIsDirectory('.bzr/checkout/lock', t)
+ our_lock = LockDir(t, '.bzr/checkout/lock')
+ self.assertEquals(our_lock.peek(), None)
+ tree.lock_write()
+ self.assertTrue(our_lock.peek())
+ tree.unlock()
+ self.assertEquals(our_lock.peek(), None)
+
+ def test_missing_pending_merges(self):
+ control = bzrdir.BzrDirMetaFormat1().initialize(self.get_url())
+ control.create_repository()
+ control.create_branch()
+ tree = workingtree_3.WorkingTreeFormat3().initialize(control)
+ tree._transport.delete("pending-merges")
+ self.assertEqual([], tree.get_parent_ids())
+
+
+class InstrumentedTree(object):
+ """A instrumented tree to check the needs_tree_write_lock decorator."""
+
+ def __init__(self):
+ self._locks = []
+
+ def lock_tree_write(self):
+ self._locks.append('t')
+
+ @needs_tree_write_lock
+ def method_with_tree_write_lock(self, *args, **kwargs):
+ """A lock_tree_write decorated method that returns its arguments."""
+ return args, kwargs
+
+ @needs_tree_write_lock
+ def method_that_raises(self):
+ """This method causes an exception when called with parameters.
+
+ This allows the decorator code to be checked - it should still call
+ unlock.
+ """
+
+ def unlock(self):
+ self._locks.append('u')
+
+
+class TestInstrumentedTree(TestCase):
+
+ def test_needs_tree_write_lock(self):
+ """@needs_tree_write_lock should be semantically transparent."""
+ tree = InstrumentedTree()
+ self.assertEqual(
+ 'method_with_tree_write_lock',
+ tree.method_with_tree_write_lock.__name__)
+ self.assertDocstring(
+ "A lock_tree_write decorated method that returns its arguments.",
+ tree.method_with_tree_write_lock)
+ args = (1, 2, 3)
+ kwargs = {'a':'b'}
+ result = tree.method_with_tree_write_lock(1,2,3, a='b')
+ self.assertEqual((args, kwargs), result)
+ self.assertEqual(['t', 'u'], tree._locks)
+ self.assertRaises(TypeError, tree.method_that_raises, 'foo')
+ self.assertEqual(['t', 'u', 't', 'u'], tree._locks)
+
+
+class TestRevert(TestCaseWithTransport):
+
+ def test_revert_conflicts_recursive(self):
+ this_tree = self.make_branch_and_tree('this-tree')
+ self.build_tree_contents([('this-tree/foo/',),
+ ('this-tree/foo/bar', 'bar')])
+ this_tree.add(['foo', 'foo/bar'])
+ this_tree.commit('created foo/bar')
+ other_tree = this_tree.bzrdir.sprout('other-tree').open_workingtree()
+ self.build_tree_contents([('other-tree/foo/bar', 'baz')])
+ other_tree.commit('changed bar')
+ self.build_tree_contents([('this-tree/foo/bar', 'qux')])
+ this_tree.commit('changed qux')
+ this_tree.merge_from_branch(other_tree.branch)
+ self.assertEqual(1, len(this_tree.conflicts()))
+ this_tree.revert(['foo'])
+ self.assertEqual(0, len(this_tree.conflicts()))
+
+
+class TestAutoResolve(TestCaseWithTransport):
+
+ def test_auto_resolve(self):
+ base = self.make_branch_and_tree('base')
+ self.build_tree_contents([('base/hello', 'Hello')])
+ base.add('hello', 'hello_id')
+ base.commit('Hello')
+ other = base.bzrdir.sprout('other').open_workingtree()
+ self.build_tree_contents([('other/hello', 'hELLO')])
+ other.commit('Case switch')
+ this = base.bzrdir.sprout('this').open_workingtree()
+ self.assertPathExists('this/hello')
+ self.build_tree_contents([('this/hello', 'Hello World')])
+ this.commit('Add World')
+ this.merge_from_branch(other.branch)
+ self.assertEqual([conflicts.TextConflict('hello', 'hello_id')],
+ this.conflicts())
+ this.auto_resolve()
+ self.assertEqual([conflicts.TextConflict('hello', 'hello_id')],
+ this.conflicts())
+ self.build_tree_contents([('this/hello', '<<<<<<<')])
+ this.auto_resolve()
+ self.assertEqual([conflicts.TextConflict('hello', 'hello_id')],
+ this.conflicts())
+ self.build_tree_contents([('this/hello', '=======')])
+ this.auto_resolve()
+ self.assertEqual([conflicts.TextConflict('hello', 'hello_id')],
+ this.conflicts())
+ self.build_tree_contents([('this/hello', '\n>>>>>>>')])
+ remaining, resolved = this.auto_resolve()
+ self.assertEqual([conflicts.TextConflict('hello', 'hello_id')],
+ this.conflicts())
+ self.assertEqual([], resolved)
+ self.build_tree_contents([('this/hello', 'hELLO wORLD')])
+ remaining, resolved = this.auto_resolve()
+ self.assertEqual([], this.conflicts())
+ self.assertEqual([conflicts.TextConflict('hello', 'hello_id')],
+ resolved)
+ self.assertPathDoesNotExist('this/hello.BASE')
+
+ def test_auto_resolve_dir(self):
+ tree = self.make_branch_and_tree('tree')
+ self.build_tree(['tree/hello/'])
+ tree.add('hello', 'hello-id')
+ file_conflict = conflicts.TextConflict('file', 'hello-id')
+ tree.set_conflicts(conflicts.ConflictList([file_conflict]))
+ tree.auto_resolve()
+
+
+class TestFindTrees(TestCaseWithTransport):
+
+ def test_find_trees(self):
+ self.make_branch_and_tree('foo')
+ self.make_branch_and_tree('foo/bar')
+ # Sticking a tree inside a control dir is heinous, so let's skip it
+ self.make_branch_and_tree('foo/.bzr/baz')
+ self.make_branch('qux')
+ trees = workingtree.WorkingTree.find_trees('.')
+ self.assertEqual(2, len(list(trees)))
diff --git a/bzrlib/tests/test_workingtree_4.py b/bzrlib/tests/test_workingtree_4.py
new file mode 100644
index 0000000..98e10b1
--- /dev/null
+++ b/bzrlib/tests/test_workingtree_4.py
@@ -0,0 +1,874 @@
+# Copyright (C) 2007-2011 Canonical Ltd
+# Authors: Robert Collins <robert.collins@canonical.com>
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""Tests for WorkingTreeFormat4"""
+
+import os
+import time
+
+from bzrlib import (
+ bzrdir,
+ dirstate,
+ errors,
+ inventory,
+ osutils,
+ workingtree_4,
+ )
+from bzrlib.lockdir import LockDir
+from bzrlib.tests import TestCaseWithTransport, TestSkipped, features
+from bzrlib.tree import InterTree
+
+
+class TestWorkingTreeFormat4(TestCaseWithTransport):
+ """Tests specific to WorkingTreeFormat4."""
+
+ def test_disk_layout(self):
+ control = bzrdir.BzrDirMetaFormat1().initialize(self.get_url())
+ control.create_repository()
+ control.create_branch()
+ tree = workingtree_4.WorkingTreeFormat4().initialize(control)
+ # we want:
+ # format 'Bazaar Working Tree format 4'
+ # stat-cache = ??
+ t = control.get_workingtree_transport(None)
+ self.assertEqualDiff('Bazaar Working Tree Format 4 (bzr 0.15)\n',
+ t.get('format').read())
+ self.assertFalse(t.has('inventory.basis'))
+ # no last-revision file means 'None' or 'NULLREVISION'
+ self.assertFalse(t.has('last-revision'))
+ state = dirstate.DirState.on_file(t.local_abspath('dirstate'))
+ state.lock_read()
+ try:
+ self.assertEqual([], state.get_parent_ids())
+ finally:
+ state.unlock()
+
+ def test_resets_ignores_on_last_unlock(self):
+ # Only the last unlock call will actually reset the
+ # ignores. (bug #785671)
+ tree = self.make_workingtree()
+ tree.lock_read()
+ try:
+ tree.lock_read()
+ try:
+ tree.is_ignored("foo")
+ finally:
+ tree.unlock()
+ self.assertIsNot(None, tree._ignoreglobster)
+ finally:
+ tree.unlock()
+ self.assertIs(None, tree._ignoreglobster)
+
+ def test_uses_lockdir(self):
+ """WorkingTreeFormat4 uses its own LockDir:
+
+ - lock is a directory
+ - when the WorkingTree is locked, LockDir can see that
+ """
+ # this test could be factored into a subclass of tests common to both
+ # format 3 and 4, but for now its not much of an issue as there is only
+ # one in common.
+ t = self.get_transport()
+ tree = self.make_workingtree()
+ self.assertIsDirectory('.bzr', t)
+ self.assertIsDirectory('.bzr/checkout', t)
+ self.assertIsDirectory('.bzr/checkout/lock', t)
+ our_lock = LockDir(t, '.bzr/checkout/lock')
+ self.assertEquals(our_lock.peek(), None)
+ tree.lock_write()
+ self.assertTrue(our_lock.peek())
+ tree.unlock()
+ self.assertEquals(our_lock.peek(), None)
+
+ def make_workingtree(self, relpath=''):
+ url = self.get_url(relpath)
+ if relpath:
+ self.build_tree([relpath + '/'])
+ dir = bzrdir.BzrDirMetaFormat1().initialize(url)
+ repo = dir.create_repository()
+ branch = dir.create_branch()
+ try:
+ return workingtree_4.WorkingTreeFormat4().initialize(dir)
+ except errors.NotLocalUrl:
+ raise TestSkipped('Not a local URL')
+
+ def test_dirstate_stores_all_parent_inventories(self):
+ tree = self.make_workingtree()
+
+ # We're going to build in tree a working tree
+ # with three parent trees, with some files in common.
+
+ # We really don't want to do commit or merge in the new dirstate-based
+ # tree, because that might not work yet. So instead we build
+ # revisions elsewhere and pull them across, doing by hand part of the
+ # work that merge would do.
+
+ subtree = self.make_branch_and_tree('subdir')
+ # writelock the tree so its repository doesn't get readlocked by
+ # the revision tree locks. This works around the bug where we dont
+ # permit lock upgrading.
+ subtree.lock_write()
+ self.addCleanup(subtree.unlock)
+ self.build_tree(['subdir/file-a',])
+ subtree.add(['file-a'], ['id-a'])
+ rev1 = subtree.commit('commit in subdir')
+
+ subtree2 = subtree.bzrdir.sprout('subdir2').open_workingtree()
+ self.build_tree(['subdir2/file-b'])
+ subtree2.add(['file-b'], ['id-b'])
+ rev2 = subtree2.commit('commit in subdir2')
+
+ subtree.flush()
+ subtree3 = subtree.bzrdir.sprout('subdir3').open_workingtree()
+ rev3 = subtree3.commit('merge from subdir2')
+
+ repo = tree.branch.repository
+ repo.fetch(subtree.branch.repository, rev1)
+ repo.fetch(subtree2.branch.repository, rev2)
+ repo.fetch(subtree3.branch.repository, rev3)
+ # will also pull the others...
+
+ # create repository based revision trees
+ rev1_revtree = repo.revision_tree(rev1)
+ rev2_revtree = repo.revision_tree(rev2)
+ rev3_revtree = repo.revision_tree(rev3)
+ # tree doesn't contain a text merge yet but we'll just
+ # set the parents as if a merge had taken place.
+ # this should cause the tree data to be folded into the
+ # dirstate.
+ tree.set_parent_trees([
+ (rev1, rev1_revtree),
+ (rev2, rev2_revtree),
+ (rev3, rev3_revtree), ])
+
+ # create tree-sourced revision trees
+ rev1_tree = tree.revision_tree(rev1)
+ rev1_tree.lock_read()
+ self.addCleanup(rev1_tree.unlock)
+ rev2_tree = tree.revision_tree(rev2)
+ rev2_tree.lock_read()
+ self.addCleanup(rev2_tree.unlock)
+ rev3_tree = tree.revision_tree(rev3)
+ rev3_tree.lock_read()
+ self.addCleanup(rev3_tree.unlock)
+
+ # now we should be able to get them back out
+ self.assertTreesEqual(rev1_revtree, rev1_tree)
+ self.assertTreesEqual(rev2_revtree, rev2_tree)
+ self.assertTreesEqual(rev3_revtree, rev3_tree)
+
+ def test_dirstate_doesnt_read_parents_from_repo_when_setting(self):
+ """Setting parent trees on a dirstate working tree takes
+ the trees it's given and doesn't need to read them from the
+ repository.
+ """
+ tree = self.make_workingtree()
+
+ subtree = self.make_branch_and_tree('subdir')
+ rev1 = subtree.commit('commit in subdir')
+ rev1_tree = subtree.basis_tree()
+ rev1_tree.lock_read()
+ self.addCleanup(rev1_tree.unlock)
+
+ tree.branch.pull(subtree.branch)
+
+ # break the repository's legs to make sure it only uses the trees
+ # it's given; any calls to forbidden methods will raise an
+ # AssertionError
+ repo = tree.branch.repository
+ self.overrideAttr(repo, "get_revision", self.fail)
+ self.overrideAttr(repo, "get_inventory", self.fail)
+ self.overrideAttr(repo, "_get_inventory_xml", self.fail)
+ # try to set the parent trees.
+ tree.set_parent_trees([(rev1, rev1_tree)])
+
+ def test_dirstate_doesnt_read_from_repo_when_returning_cache_tree(self):
+ """Getting parent trees from a dirstate tree does not read from the
+ repos inventory store. This is an important part of the dirstate
+ performance optimisation work.
+ """
+ tree = self.make_workingtree()
+
+ subtree = self.make_branch_and_tree('subdir')
+ # writelock the tree so its repository doesn't get readlocked by
+ # the revision tree locks. This works around the bug where we dont
+ # permit lock upgrading.
+ subtree.lock_write()
+ self.addCleanup(subtree.unlock)
+ rev1 = subtree.commit('commit in subdir')
+ rev1_tree = subtree.basis_tree()
+ rev1_tree.lock_read()
+ rev1_tree.root_inventory
+ self.addCleanup(rev1_tree.unlock)
+ rev2 = subtree.commit('second commit in subdir', allow_pointless=True)
+ rev2_tree = subtree.basis_tree()
+ rev2_tree.lock_read()
+ rev2_tree.root_inventory
+ self.addCleanup(rev2_tree.unlock)
+
+ tree.branch.pull(subtree.branch)
+
+ # break the repository's legs to make sure it only uses the trees
+ # it's given; any calls to forbidden methods will raise an
+ # AssertionError
+ repo = tree.branch.repository
+ # dont uncomment this: the revision object must be accessed to
+ # answer 'get_parent_ids' for the revision tree- dirstate does not
+ # cache the parents of a parent tree at this point.
+ #repo.get_revision = self.fail
+ self.overrideAttr(repo, "get_inventory", self.fail)
+ self.overrideAttr(repo, "_get_inventory_xml", self.fail)
+ # set the parent trees.
+ tree.set_parent_trees([(rev1, rev1_tree), (rev2, rev2_tree)])
+ # read the first tree
+ result_rev1_tree = tree.revision_tree(rev1)
+ # read the second
+ result_rev2_tree = tree.revision_tree(rev2)
+ # compare - there should be no differences between the handed and
+ # returned trees
+ self.assertTreesEqual(rev1_tree, result_rev1_tree)
+ self.assertTreesEqual(rev2_tree, result_rev2_tree)
+
+ def test_dirstate_doesnt_cache_non_parent_trees(self):
+ """Getting parent trees from a dirstate tree does not read from the
+ repos inventory store. This is an important part of the dirstate
+ performance optimisation work.
+ """
+ tree = self.make_workingtree()
+
+ # make a tree that we can try for, which is able to be returned but
+ # must not be
+ subtree = self.make_branch_and_tree('subdir')
+ rev1 = subtree.commit('commit in subdir')
+ tree.branch.pull(subtree.branch)
+ # check it fails
+ self.assertRaises(errors.NoSuchRevision, tree.revision_tree, rev1)
+
+ def test_no_dirstate_outside_lock(self):
+ # temporary test until the code is mature enough to test from outside.
+ """Getting a dirstate object fails if there is no lock."""
+ def lock_and_call_current_dirstate(tree, lock_method):
+ getattr(tree, lock_method)()
+ tree.current_dirstate()
+ tree.unlock()
+ tree = self.make_workingtree()
+ self.assertRaises(errors.ObjectNotLocked, tree.current_dirstate)
+ lock_and_call_current_dirstate(tree, 'lock_read')
+ self.assertRaises(errors.ObjectNotLocked, tree.current_dirstate)
+ lock_and_call_current_dirstate(tree, 'lock_write')
+ self.assertRaises(errors.ObjectNotLocked, tree.current_dirstate)
+ lock_and_call_current_dirstate(tree, 'lock_tree_write')
+ self.assertRaises(errors.ObjectNotLocked, tree.current_dirstate)
+
+ def test_set_parent_trees_uses_update_basis_by_delta(self):
+ builder = self.make_branch_builder('source')
+ builder.start_series()
+ self.addCleanup(builder.finish_series)
+ builder.build_snapshot('A', [], [
+ ('add', ('', 'root-id', 'directory', None)),
+ ('add', ('a', 'a-id', 'file', 'content\n'))])
+ builder.build_snapshot('B', ['A'], [
+ ('modify', ('a-id', 'new content\nfor a\n')),
+ ('add', ('b', 'b-id', 'file', 'b-content\n'))])
+ tree = self.make_workingtree('tree')
+ source_branch = builder.get_branch()
+ tree.branch.repository.fetch(source_branch.repository, 'B')
+ tree.pull(source_branch, stop_revision='A')
+ tree.lock_write()
+ self.addCleanup(tree.unlock)
+ state = tree.current_dirstate()
+ called = []
+ orig_update = state.update_basis_by_delta
+ def log_update_basis_by_delta(delta, new_revid):
+ called.append(new_revid)
+ return orig_update(delta, new_revid)
+ state.update_basis_by_delta = log_update_basis_by_delta
+ basis = tree.basis_tree()
+ self.assertEqual('a-id', basis.path2id('a'))
+ self.assertEqual(None, basis.path2id('b'))
+ def fail_set_parent_trees(trees, ghosts):
+ raise AssertionError('dirstate.set_parent_trees() was called')
+ state.set_parent_trees = fail_set_parent_trees
+ repo = tree.branch.repository
+ tree.pull(source_branch, stop_revision='B')
+ self.assertEqual(['B'], called)
+ basis = tree.basis_tree()
+ self.assertEqual('a-id', basis.path2id('a'))
+ self.assertEqual('b-id', basis.path2id('b'))
+
+ def test_set_parent_trees_handles_missing_basis(self):
+ builder = self.make_branch_builder('source')
+ builder.start_series()
+ self.addCleanup(builder.finish_series)
+ builder.build_snapshot('A', [], [
+ ('add', ('', 'root-id', 'directory', None)),
+ ('add', ('a', 'a-id', 'file', 'content\n'))])
+ builder.build_snapshot('B', ['A'], [
+ ('modify', ('a-id', 'new content\nfor a\n')),
+ ('add', ('b', 'b-id', 'file', 'b-content\n'))])
+ builder.build_snapshot('C', ['A'], [
+ ('add', ('c', 'c-id', 'file', 'c-content\n'))])
+ b_c = self.make_branch('branch_with_c')
+ b_c.pull(builder.get_branch(), stop_revision='C')
+ b_b = self.make_branch('branch_with_b')
+ b_b.pull(builder.get_branch(), stop_revision='B')
+ # This is reproducing some of what 'switch' does, just to isolate the
+ # set_parent_trees() step.
+ wt = b_b.create_checkout('tree', lightweight=True)
+ fmt = wt.bzrdir.find_branch_format()
+ fmt.set_reference(wt.bzrdir, None, b_c)
+ # Re-open with the new reference
+ wt = wt.bzrdir.open_workingtree()
+ wt.set_parent_trees([('C', b_c.repository.revision_tree('C'))])
+ self.assertEqual(None, wt.basis_tree().path2id('b'))
+
+ def test_new_dirstate_on_new_lock(self):
+ # until we have detection for when a dirstate can be reused, we
+ # want to reparse dirstate on every new lock.
+ known_dirstates = set()
+ def lock_and_compare_all_current_dirstate(tree, lock_method):
+ getattr(tree, lock_method)()
+ state = tree.current_dirstate()
+ self.assertFalse(state in known_dirstates)
+ known_dirstates.add(state)
+ tree.unlock()
+ tree = self.make_workingtree()
+ # lock twice with each type to prevent silly per-lock-type bugs.
+ # each lock and compare looks for a unique state object.
+ lock_and_compare_all_current_dirstate(tree, 'lock_read')
+ lock_and_compare_all_current_dirstate(tree, 'lock_read')
+ lock_and_compare_all_current_dirstate(tree, 'lock_tree_write')
+ lock_and_compare_all_current_dirstate(tree, 'lock_tree_write')
+ lock_and_compare_all_current_dirstate(tree, 'lock_write')
+ lock_and_compare_all_current_dirstate(tree, 'lock_write')
+
+ def test_constructing_invalid_interdirstate_raises(self):
+ tree = self.make_workingtree()
+ rev_id = tree.commit('first post')
+ rev_id2 = tree.commit('second post')
+ rev_tree = tree.branch.repository.revision_tree(rev_id)
+ # Exception is not a great thing to raise, but this test is
+ # very short, and code is used to sanity check other tests, so
+ # a full error object is YAGNI.
+ self.assertRaises(
+ Exception, workingtree_4.InterDirStateTree, rev_tree, tree)
+ self.assertRaises(
+ Exception, workingtree_4.InterDirStateTree, tree, rev_tree)
+
+ def test_revtree_to_revtree_not_interdirstate(self):
+ # we should not get a dirstate optimiser for two repository sourced
+ # revtrees. we can't prove a negative, so we dont do exhaustive tests
+ # of all formats; though that could be written in the future it doesn't
+ # seem well worth it.
+ tree = self.make_workingtree()
+ rev_id = tree.commit('first post')
+ rev_id2 = tree.commit('second post')
+ rev_tree = tree.branch.repository.revision_tree(rev_id)
+ rev_tree2 = tree.branch.repository.revision_tree(rev_id2)
+ optimiser = InterTree.get(rev_tree, rev_tree2)
+ self.assertIsInstance(optimiser, InterTree)
+ self.assertFalse(isinstance(optimiser, workingtree_4.InterDirStateTree))
+ optimiser = InterTree.get(rev_tree2, rev_tree)
+ self.assertIsInstance(optimiser, InterTree)
+ self.assertFalse(isinstance(optimiser, workingtree_4.InterDirStateTree))
+
+ def test_revtree_not_in_dirstate_to_dirstate_not_interdirstate(self):
+ # we should not get a dirstate optimiser when the revision id for of
+ # the source is not in the dirstate of the target.
+ tree = self.make_workingtree()
+ rev_id = tree.commit('first post')
+ rev_id2 = tree.commit('second post')
+ rev_tree = tree.branch.repository.revision_tree(rev_id)
+ tree.lock_read()
+ optimiser = InterTree.get(rev_tree, tree)
+ self.assertIsInstance(optimiser, InterTree)
+ self.assertFalse(isinstance(optimiser, workingtree_4.InterDirStateTree))
+ optimiser = InterTree.get(tree, rev_tree)
+ self.assertIsInstance(optimiser, InterTree)
+ self.assertFalse(isinstance(optimiser, workingtree_4.InterDirStateTree))
+ tree.unlock()
+
+ def test_empty_basis_to_dirstate_tree(self):
+ # we should get a InterDirStateTree for doing
+ # 'changes_from' from the first basis dirstate revision tree to a
+ # WorkingTree4.
+ tree = self.make_workingtree()
+ tree.lock_read()
+ basis_tree = tree.basis_tree()
+ basis_tree.lock_read()
+ optimiser = InterTree.get(basis_tree, tree)
+ tree.unlock()
+ basis_tree.unlock()
+ self.assertIsInstance(optimiser, workingtree_4.InterDirStateTree)
+
+ def test_nonempty_basis_to_dirstate_tree(self):
+ # we should get a InterDirStateTree for doing
+ # 'changes_from' from a non-null basis dirstate revision tree to a
+ # WorkingTree4.
+ tree = self.make_workingtree()
+ tree.commit('first post')
+ tree.lock_read()
+ basis_tree = tree.basis_tree()
+ basis_tree.lock_read()
+ optimiser = InterTree.get(basis_tree, tree)
+ tree.unlock()
+ basis_tree.unlock()
+ self.assertIsInstance(optimiser, workingtree_4.InterDirStateTree)
+
+ def test_empty_basis_revtree_to_dirstate_tree(self):
+ # we should get a InterDirStateTree for doing
+ # 'changes_from' from an empty repository based rev tree to a
+ # WorkingTree4.
+ tree = self.make_workingtree()
+ tree.lock_read()
+ basis_tree = tree.branch.repository.revision_tree(tree.last_revision())
+ basis_tree.lock_read()
+ optimiser = InterTree.get(basis_tree, tree)
+ tree.unlock()
+ basis_tree.unlock()
+ self.assertIsInstance(optimiser, workingtree_4.InterDirStateTree)
+
+ def test_nonempty_basis_revtree_to_dirstate_tree(self):
+ # we should get a InterDirStateTree for doing
+ # 'changes_from' from a non-null repository based rev tree to a
+ # WorkingTree4.
+ tree = self.make_workingtree()
+ tree.commit('first post')
+ tree.lock_read()
+ basis_tree = tree.branch.repository.revision_tree(tree.last_revision())
+ basis_tree.lock_read()
+ optimiser = InterTree.get(basis_tree, tree)
+ tree.unlock()
+ basis_tree.unlock()
+ self.assertIsInstance(optimiser, workingtree_4.InterDirStateTree)
+
+ def test_tree_to_basis_in_other_tree(self):
+ # we should get a InterDirStateTree when
+ # the source revid is in the dirstate object of the target and
+ # the dirstates are different. This is largely covered by testing
+ # with repository revtrees, so is just for extra confidence.
+ tree = self.make_workingtree('a')
+ tree.commit('first post')
+ tree2 = self.make_workingtree('b')
+ tree2.pull(tree.branch)
+ basis_tree = tree.basis_tree()
+ tree2.lock_read()
+ basis_tree.lock_read()
+ optimiser = InterTree.get(basis_tree, tree2)
+ tree2.unlock()
+ basis_tree.unlock()
+ self.assertIsInstance(optimiser, workingtree_4.InterDirStateTree)
+
+ def test_merged_revtree_to_tree(self):
+ # we should get a InterDirStateTree when
+ # the source tree is a merged tree present in the dirstate of target.
+ tree = self.make_workingtree('a')
+ tree.commit('first post')
+ tree.commit('tree 1 commit 2')
+ tree2 = self.make_workingtree('b')
+ tree2.pull(tree.branch)
+ tree2.commit('tree 2 commit 2')
+ tree.merge_from_branch(tree2.branch)
+ second_parent_tree = tree.revision_tree(tree.get_parent_ids()[1])
+ second_parent_tree.lock_read()
+ tree.lock_read()
+ optimiser = InterTree.get(second_parent_tree, tree)
+ tree.unlock()
+ second_parent_tree.unlock()
+ self.assertIsInstance(optimiser, workingtree_4.InterDirStateTree)
+
+ def test_id2path(self):
+ tree = self.make_workingtree('tree')
+ self.build_tree(['tree/a', 'tree/b'])
+ tree.add(['a'], ['a-id'])
+ self.assertEqual(u'a', tree.id2path('a-id'))
+ self.assertRaises(errors.NoSuchId, tree.id2path, 'a')
+ tree.commit('a')
+ tree.add(['b'], ['b-id'])
+
+ try:
+ new_path = u'b\u03bcrry'
+ tree.rename_one('a', new_path)
+ except UnicodeEncodeError:
+ # support running the test on non-unicode platforms
+ new_path = 'c'
+ tree.rename_one('a', new_path)
+ self.assertEqual(new_path, tree.id2path('a-id'))
+ tree.commit(u'b\xb5rry')
+ tree.unversion(['a-id'])
+ self.assertRaises(errors.NoSuchId, tree.id2path, 'a-id')
+ self.assertEqual('b', tree.id2path('b-id'))
+ self.assertRaises(errors.NoSuchId, tree.id2path, 'c-id')
+
+ def test_unique_root_id_per_tree(self):
+ # each time you initialize a new tree, it gets a different root id
+ format_name = 'development-subtree'
+ tree1 = self.make_branch_and_tree('tree1',
+ format=format_name)
+ tree2 = self.make_branch_and_tree('tree2',
+ format=format_name)
+ self.assertNotEqual(tree1.get_root_id(), tree2.get_root_id())
+ # when you branch, it inherits the same root id
+ rev1 = tree1.commit('first post')
+ tree3 = tree1.bzrdir.sprout('tree3').open_workingtree()
+ self.assertEqual(tree3.get_root_id(), tree1.get_root_id())
+
+ def test_set_root_id(self):
+ # similar to some code that fails in the dirstate-plus-subtree branch
+ # -- setting the root id while adding a parent seems to scramble the
+ # dirstate invariants. -- mbp 20070303
+ def validate():
+ wt.lock_read()
+ try:
+ wt.current_dirstate()._validate()
+ finally:
+ wt.unlock()
+ wt = self.make_workingtree('tree')
+ wt.set_root_id('TREE-ROOTID')
+ validate()
+ wt.commit('somenthing')
+ validate()
+ # now switch and commit again
+ wt.set_root_id('tree-rootid')
+ validate()
+ wt.commit('again')
+ validate()
+
+ def test_default_root_id(self):
+ tree = self.make_branch_and_tree('tag', format='dirstate-tags')
+ self.assertEqual(inventory.ROOT_ID, tree.get_root_id())
+ tree = self.make_branch_and_tree('subtree',
+ format='development-subtree')
+ self.assertNotEqual(inventory.ROOT_ID, tree.get_root_id())
+
+ def test_non_subtree_with_nested_trees(self):
+ # prior to dirstate, st/diff/commit ignored nested trees.
+ # dirstate, as opposed to development-subtree, should
+ # behave the same way.
+ tree = self.make_branch_and_tree('.', format='dirstate')
+ self.assertFalse(tree.supports_tree_reference())
+ self.build_tree(['dir/'])
+ # for testing easily.
+ tree.set_root_id('root')
+ tree.add(['dir'], ['dir-id'])
+ subtree = self.make_branch_and_tree('dir')
+ # the most primitive operation: kind
+ self.assertEqual('directory', tree.kind('dir-id'))
+ # a diff against the basis should give us a directory and the root (as
+ # the root is new too).
+ tree.lock_read()
+ expected = [('dir-id',
+ (None, u'dir'),
+ True,
+ (False, True),
+ (None, 'root'),
+ (None, u'dir'),
+ (None, 'directory'),
+ (None, False)),
+ ('root', (None, u''), True, (False, True), (None, None),
+ (None, u''), (None, 'directory'), (None, 0))]
+ self.assertEqual(expected, list(tree.iter_changes(tree.basis_tree(),
+ specific_files=['dir'])))
+ tree.unlock()
+ # do a commit, we want to trigger the dirstate fast-path too
+ tree.commit('first post')
+ # change the path for the subdir, which will trigger getting all
+ # its data:
+ os.rename('dir', 'also-dir')
+ # now the diff will use the fast path
+ tree.lock_read()
+ expected = [('dir-id',
+ (u'dir', u'dir'),
+ True,
+ (True, True),
+ ('root', 'root'),
+ ('dir', 'dir'),
+ ('directory', None),
+ (False, False))]
+ self.assertEqual(expected, list(tree.iter_changes(tree.basis_tree())))
+ tree.unlock()
+
+ def test_with_subtree_supports_tree_references(self):
+ # development-subtree should support tree-references.
+ tree = self.make_branch_and_tree('.', format='development-subtree')
+ self.assertTrue(tree.supports_tree_reference())
+ # having checked this is on, the tree interface, and intertree
+ # interface tests, will proceed to test the subtree support of
+ # workingtree_4.
+
+ def test_iter_changes_ignores_unversioned_dirs(self):
+ """iter_changes should not descend into unversioned directories."""
+ tree = self.make_branch_and_tree('.', format='dirstate')
+ # We have an unversioned directory at the root, a versioned one with
+ # other versioned files and an unversioned directory, and another
+ # versioned dir with nothing but an unversioned directory.
+ self.build_tree(['unversioned/',
+ 'unversioned/a',
+ 'unversioned/b/',
+ 'versioned/',
+ 'versioned/unversioned/',
+ 'versioned/unversioned/a',
+ 'versioned/unversioned/b/',
+ 'versioned2/',
+ 'versioned2/a',
+ 'versioned2/unversioned/',
+ 'versioned2/unversioned/a',
+ 'versioned2/unversioned/b/',
+ ])
+ tree.add(['versioned', 'versioned2', 'versioned2/a'])
+ tree.commit('one', rev_id='rev-1')
+ # Trap osutils._walkdirs_utf8 to spy on what dirs have been accessed.
+ returned = []
+ def walkdirs_spy(*args, **kwargs):
+ for val in orig(*args, **kwargs):
+ returned.append(val[0][0])
+ yield val
+ orig = self.overrideAttr(osutils, '_walkdirs_utf8', walkdirs_spy)
+
+ basis = tree.basis_tree()
+ tree.lock_read()
+ self.addCleanup(tree.unlock)
+ basis.lock_read()
+ self.addCleanup(basis.unlock)
+ changes = [c[1] for c in
+ tree.iter_changes(basis, want_unversioned=True)]
+ self.assertEqual([(None, 'unversioned'),
+ (None, 'versioned/unversioned'),
+ (None, 'versioned2/unversioned'),
+ ], changes)
+ self.assertEqual(['', 'versioned', 'versioned2'], returned)
+ del returned[:] # reset
+ changes = [c[1] for c in tree.iter_changes(basis)]
+ self.assertEqual([], changes)
+ self.assertEqual(['', 'versioned', 'versioned2'], returned)
+
+ def test_iter_changes_unversioned_error(self):
+ """ Check if a PathsNotVersionedError is correctly raised and the
+ paths list contains all unversioned entries only.
+ """
+ tree = self.make_branch_and_tree('tree')
+ self.build_tree_contents([('tree/bar', '')])
+ tree.add(['bar'], ['bar-id'])
+ tree.lock_read()
+ self.addCleanup(tree.unlock)
+ tree_iter_changes = lambda files: [
+ c for c in tree.iter_changes(tree.basis_tree(), specific_files=files,
+ require_versioned=True)
+ ]
+ e = self.assertRaises(errors.PathsNotVersionedError,
+ tree_iter_changes, ['bar', 'foo'])
+ self.assertEqual(e.paths, ['foo'])
+
+ def test_iter_changes_unversioned_non_ascii(self):
+ """Unversioned non-ascii paths should be reported as unicode"""
+ self.requireFeature(features.UnicodeFilenameFeature)
+ tree = self.make_branch_and_tree('.')
+ self.build_tree_contents([('f', '')])
+ tree.add(['f'], ['f-id'])
+ def tree_iter_changes(tree, files):
+ return list(tree.iter_changes(tree.basis_tree(),
+ specific_files=files, require_versioned=True))
+ tree.lock_read()
+ self.addCleanup(tree.unlock)
+ e = self.assertRaises(errors.PathsNotVersionedError,
+ tree_iter_changes, tree, [u'\xa7', u'\u03c0'])
+ self.assertEqual(e.paths, [u'\xa7', u'\u03c0'])
+
+ def get_tree_with_cachable_file_foo(self):
+ tree = self.make_branch_and_tree('.')
+ tree.lock_write()
+ self.addCleanup(tree.unlock)
+ self.build_tree_contents([('foo', 'a bit of content for foo\n')])
+ tree.add(['foo'], ['foo-id'])
+ tree.current_dirstate()._cutoff_time = time.time() + 60
+ return tree
+
+ def test_commit_updates_hash_cache(self):
+ tree = self.get_tree_with_cachable_file_foo()
+ revid = tree.commit('a commit')
+ # tree's dirstate should now have a valid stat entry for foo.
+ entry = tree._get_entry(path='foo')
+ expected_sha1 = osutils.sha_file_by_name('foo')
+ self.assertEqual(expected_sha1, entry[1][0][1])
+ self.assertEqual(len('a bit of content for foo\n'), entry[1][0][2])
+
+ def test_observed_sha1_cachable(self):
+ tree = self.get_tree_with_cachable_file_foo()
+ expected_sha1 = osutils.sha_file_by_name('foo')
+ statvalue = os.lstat("foo")
+ tree._observed_sha1("foo-id", "foo", (expected_sha1, statvalue))
+ entry = tree._get_entry(path="foo")
+ entry_state = entry[1][0]
+ self.assertEqual(expected_sha1, entry_state[1])
+ self.assertEqual(statvalue.st_size, entry_state[2])
+ tree.unlock()
+ tree.lock_read()
+ tree = tree.bzrdir.open_workingtree()
+ tree.lock_read()
+ self.addCleanup(tree.unlock)
+ entry = tree._get_entry(path="foo")
+ entry_state = entry[1][0]
+ self.assertEqual(expected_sha1, entry_state[1])
+ self.assertEqual(statvalue.st_size, entry_state[2])
+
+ def test_observed_sha1_new_file(self):
+ tree = self.make_branch_and_tree('.')
+ self.build_tree(['foo'])
+ tree.add(['foo'], ['foo-id'])
+ tree.lock_read()
+ try:
+ current_sha1 = tree._get_entry(path="foo")[1][0][1]
+ finally:
+ tree.unlock()
+ tree.lock_write()
+ try:
+ tree._observed_sha1("foo-id", "foo",
+ (osutils.sha_file_by_name('foo'), os.lstat("foo")))
+ # Must not have changed
+ self.assertEqual(current_sha1,
+ tree._get_entry(path="foo")[1][0][1])
+ finally:
+ tree.unlock()
+
+ def test_get_file_with_stat_id_only(self):
+ # Explicit test to ensure we get a lstat value from WT4 trees.
+ tree = self.make_branch_and_tree('.')
+ self.build_tree(['foo'])
+ tree.add(['foo'], ['foo-id'])
+ tree.lock_read()
+ self.addCleanup(tree.unlock)
+ file_obj, statvalue = tree.get_file_with_stat('foo-id')
+ expected = os.lstat('foo')
+ self.assertEqualStat(expected, statvalue)
+ self.assertEqual(["contents of foo\n"], file_obj.readlines())
+
+
+class TestCorruptDirstate(TestCaseWithTransport):
+ """Tests for how we handle when the dirstate has been corrupted."""
+
+ def create_wt4(self):
+ control = bzrdir.BzrDirMetaFormat1().initialize(self.get_url())
+ control.create_repository()
+ control.create_branch()
+ tree = workingtree_4.WorkingTreeFormat4().initialize(control)
+ return tree
+
+ def test_invalid_rename(self):
+ tree = self.create_wt4()
+ # Create a corrupted dirstate
+ tree.lock_write()
+ try:
+ # We need a parent, or we always compare with NULL
+ tree.commit('init')
+ state = tree.current_dirstate()
+ state._read_dirblocks_if_needed()
+ # Now add in an invalid entry, a rename with a dangling pointer
+ state._dirblocks[1][1].append((('', 'foo', 'foo-id'),
+ [('f', '', 0, False, ''),
+ ('r', 'bar', 0 , False, '')]))
+ self.assertListRaises(errors.CorruptDirstate,
+ tree.iter_changes, tree.basis_tree())
+ finally:
+ tree.unlock()
+
+ def get_simple_dirblocks(self, state):
+ """Extract the simple information from the DirState.
+
+ This returns the dirblocks, only with the sha1sum and stat details
+ filtered out.
+ """
+ simple_blocks = []
+ for block in state._dirblocks:
+ simple_block = (block[0], [])
+ for entry in block[1]:
+ # Include the key for each entry, and for each parent include
+ # just the minikind, so we know if it was
+ # present/absent/renamed/etc
+ simple_block[1].append((entry[0], [i[0] for i in entry[1]]))
+ simple_blocks.append(simple_block)
+ return simple_blocks
+
+ def test_update_basis_with_invalid_delta(self):
+ """When given an invalid delta, it should abort, and not be saved."""
+ self.build_tree(['dir/', 'dir/file'])
+ tree = self.create_wt4()
+ tree.lock_write()
+ self.addCleanup(tree.unlock)
+ tree.add(['dir', 'dir/file'], ['dir-id', 'file-id'])
+ first_revision_id = tree.commit('init')
+
+ root_id = tree.path2id('')
+ state = tree.current_dirstate()
+ state._read_dirblocks_if_needed()
+ self.assertEqual([
+ ('', [(('', '', root_id), ['d', 'd'])]),
+ ('', [(('', 'dir', 'dir-id'), ['d', 'd'])]),
+ ('dir', [(('dir', 'file', 'file-id'), ['f', 'f'])]),
+ ], self.get_simple_dirblocks(state))
+
+ tree.remove(['dir/file'])
+ self.assertEqual([
+ ('', [(('', '', root_id), ['d', 'd'])]),
+ ('', [(('', 'dir', 'dir-id'), ['d', 'd'])]),
+ ('dir', [(('dir', 'file', 'file-id'), ['a', 'f'])]),
+ ], self.get_simple_dirblocks(state))
+ # Make sure the removal is written to disk
+ tree.flush()
+
+ # self.assertRaises(Exception, tree.update_basis_by_delta,
+ new_dir = inventory.InventoryDirectory('dir-id', 'new-dir', root_id)
+ new_dir.revision = 'new-revision-id'
+ new_file = inventory.InventoryFile('file-id', 'new-file', root_id)
+ new_file.revision = 'new-revision-id'
+ self.assertRaises(errors.InconsistentDelta,
+ tree.update_basis_by_delta, 'new-revision-id',
+ [('dir', 'new-dir', 'dir-id', new_dir),
+ ('dir/file', 'new-dir/new-file', 'file-id', new_file),
+ ])
+ del state
+
+ # Now when we re-read the file it should not have been modified
+ tree.unlock()
+ tree.lock_read()
+ self.assertEqual(first_revision_id, tree.last_revision())
+ state = tree.current_dirstate()
+ state._read_dirblocks_if_needed()
+ self.assertEqual([
+ ('', [(('', '', root_id), ['d', 'd'])]),
+ ('', [(('', 'dir', 'dir-id'), ['d', 'd'])]),
+ ('dir', [(('dir', 'file', 'file-id'), ['a', 'f'])]),
+ ], self.get_simple_dirblocks(state))
+
+
+class TestInventoryCoherency(TestCaseWithTransport):
+
+ def test_inventory_is_synced_when_unversioning_a_dir(self):
+ """Unversioning the root of a subtree unversions the entire subtree."""
+ tree = self.make_branch_and_tree('.')
+ self.build_tree(['a/', 'a/b', 'c/'])
+ tree.add(['a', 'a/b', 'c'], ['a-id', 'b-id', 'c-id'])
+ # within a lock unversion should take effect
+ tree.lock_write()
+ self.addCleanup(tree.unlock)
+ # Force access to the in memory inventory to trigger bug #494221: try
+ # maintaining the in-memory inventory
+ inv = tree.root_inventory
+ self.assertTrue(inv.has_id('a-id'))
+ self.assertTrue(inv.has_id('b-id'))
+ tree.unversion(['a-id', 'b-id'])
+ self.assertFalse(inv.has_id('a-id'))
+ self.assertFalse(inv.has_id('b-id'))
diff --git a/bzrlib/tests/test_wsgi.py b/bzrlib/tests/test_wsgi.py
new file mode 100644
index 0000000..d55949f
--- /dev/null
+++ b/bzrlib/tests/test_wsgi.py
@@ -0,0 +1,328 @@
+# Copyright (C) 2006-2009, 2011 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""Tests for WSGI application"""
+
+from cStringIO import StringIO
+
+from bzrlib import tests
+from bzrlib.smart import medium, protocol
+from bzrlib.transport.http import wsgi
+from bzrlib.transport import chroot, memory
+
+
+class WSGITestMixin(object):
+
+ def build_environ(self, updates=None):
+ """Builds an environ dict with all fields required by PEP 333.
+
+ :param updates: a dict to that will be incorporated into the returned
+ dict using dict.update(updates).
+ """
+ environ = {
+ # Required CGI variables
+ 'REQUEST_METHOD': 'GET',
+ 'SCRIPT_NAME': '/script/name/',
+ 'PATH_INFO': 'path/info',
+ 'SERVER_NAME': 'test',
+ 'SERVER_PORT': '9999',
+ 'SERVER_PROTOCOL': 'HTTP/1.0',
+
+ # Required WSGI variables
+ 'wsgi.version': (1,0),
+ 'wsgi.url_scheme': 'http',
+ 'wsgi.input': StringIO(''),
+ 'wsgi.errors': StringIO(),
+ 'wsgi.multithread': False,
+ 'wsgi.multiprocess': False,
+ 'wsgi.run_once': True,
+ }
+ if updates is not None:
+ environ.update(updates)
+ return environ
+
+ def read_response(self, iterable):
+ response = ''
+ for string in iterable:
+ response += string
+ return response
+
+ def start_response(self, status, headers):
+ self.status = status
+ self.headers = headers
+
+
+class TestWSGI(tests.TestCaseInTempDir, WSGITestMixin):
+
+ def setUp(self):
+ tests.TestCase.setUp(self)
+ self.status = None
+ self.headers = None
+
+ def test_construct(self):
+ app = wsgi.SmartWSGIApp(FakeTransport())
+ self.assertIsInstance(
+ app.backing_transport, chroot.ChrootTransport)
+
+ def test_http_get_rejected(self):
+ # GET requests are rejected.
+ app = wsgi.SmartWSGIApp(FakeTransport())
+ environ = self.build_environ({'REQUEST_METHOD': 'GET'})
+ iterable = app(environ, self.start_response)
+ self.read_response(iterable)
+ self.assertEqual('405 Method not allowed', self.status)
+ self.assertTrue(('Allow', 'POST') in self.headers)
+
+ def _fake_make_request(self, transport, write_func, bytes, rcp):
+ request = FakeRequest(transport, write_func)
+ request.accept_bytes(bytes)
+ self.request = request
+ return request
+
+ def test_smart_wsgi_app_uses_given_relpath(self):
+ # The SmartWSGIApp should use the "bzrlib.relpath" field from the
+ # WSGI environ to clone from its backing transport to get a specific
+ # transport for this request.
+ transport = FakeTransport()
+ wsgi_app = wsgi.SmartWSGIApp(transport)
+ wsgi_app.backing_transport = transport
+ wsgi_app.make_request = self._fake_make_request
+ fake_input = StringIO('fake request')
+ environ = self.build_environ({
+ 'REQUEST_METHOD': 'POST',
+ 'CONTENT_LENGTH': len(fake_input.getvalue()),
+ 'wsgi.input': fake_input,
+ 'bzrlib.relpath': 'foo/bar',
+ })
+ iterable = wsgi_app(environ, self.start_response)
+ response = self.read_response(iterable)
+ self.assertEqual([('clone', 'foo/bar/')] , transport.calls)
+
+ def test_smart_wsgi_app_request_and_response(self):
+ # SmartWSGIApp reads the smart request from the 'wsgi.input' file-like
+ # object in the environ dict, and returns the response via the iterable
+ # returned to the WSGI handler.
+ transport = memory.MemoryTransport()
+ transport.put_bytes('foo', 'some bytes')
+ wsgi_app = wsgi.SmartWSGIApp(transport)
+ wsgi_app.make_request = self._fake_make_request
+ fake_input = StringIO('fake request')
+ environ = self.build_environ({
+ 'REQUEST_METHOD': 'POST',
+ 'CONTENT_LENGTH': len(fake_input.getvalue()),
+ 'wsgi.input': fake_input,
+ 'bzrlib.relpath': 'foo',
+ })
+ iterable = wsgi_app(environ, self.start_response)
+ response = self.read_response(iterable)
+ self.assertEqual('200 OK', self.status)
+ self.assertEqual('got bytes: fake request', response)
+
+ def test_relpath_setter(self):
+ # wsgi.RelpathSetter is WSGI "middleware" to set the 'bzrlib.relpath'
+ # variable.
+ calls = []
+ def fake_app(environ, start_response):
+ calls.append(environ['bzrlib.relpath'])
+ wrapped_app = wsgi.RelpathSetter(
+ fake_app, prefix='/abc/', path_var='FOO')
+ wrapped_app({'FOO': '/abc/xyz/.bzr/smart'}, None)
+ self.assertEqual(['xyz'], calls)
+
+ def test_relpath_setter_bad_path_prefix(self):
+ # wsgi.RelpathSetter will reject paths with that don't match the prefix
+ # with a 404. This is probably a sign of misconfiguration; a server
+ # shouldn't ever be invoking our WSGI application with bad paths.
+ def fake_app(environ, start_response):
+ self.fail('The app should never be called when the path is wrong')
+ wrapped_app = wsgi.RelpathSetter(
+ fake_app, prefix='/abc/', path_var='FOO')
+ iterable = wrapped_app(
+ {'FOO': 'AAA/abc/xyz/.bzr/smart'}, self.start_response)
+ self.read_response(iterable)
+ self.assertTrue(self.status.startswith('404'))
+
+ def test_relpath_setter_bad_path_suffix(self):
+ # Similar to test_relpath_setter_bad_path_prefix: wsgi.RelpathSetter
+ # will reject paths with that don't match the suffix '.bzr/smart' with a
+ # 404 as well. Again, this shouldn't be seen by our WSGI application if
+ # the server is configured correctly.
+ def fake_app(environ, start_response):
+ self.fail('The app should never be called when the path is wrong')
+ wrapped_app = wsgi.RelpathSetter(
+ fake_app, prefix='/abc/', path_var='FOO')
+ iterable = wrapped_app(
+ {'FOO': '/abc/xyz/.bzr/AAA'}, self.start_response)
+ self.read_response(iterable)
+ self.assertTrue(self.status.startswith('404'))
+
+ def test_make_app(self):
+ # The make_app helper constructs a SmartWSGIApp wrapped in a
+ # RelpathSetter.
+ app = wsgi.make_app(
+ root='a root',
+ prefix='a prefix',
+ path_var='a path_var')
+ self.assertIsInstance(app, wsgi.RelpathSetter)
+ self.assertIsInstance(app.app, wsgi.SmartWSGIApp)
+ self.assertStartsWith(app.app.backing_transport.base, 'chroot-')
+ backing_transport = app.app.backing_transport
+ chroot_backing_transport = backing_transport.server.backing_transport
+ self.assertEndsWith(chroot_backing_transport.base, 'a%20root/')
+ self.assertEqual(app.app.root_client_path, 'a prefix')
+ self.assertEqual(app.path_var, 'a path_var')
+
+ def test_incomplete_request(self):
+ transport = FakeTransport()
+ wsgi_app = wsgi.SmartWSGIApp(transport)
+ def make_request(transport, write_func, bytes, root_client_path):
+ request = IncompleteRequest(transport, write_func)
+ request.accept_bytes(bytes)
+ self.request = request
+ return request
+ wsgi_app.make_request = make_request
+
+ fake_input = StringIO('incomplete request')
+ environ = self.build_environ({
+ 'REQUEST_METHOD': 'POST',
+ 'CONTENT_LENGTH': len(fake_input.getvalue()),
+ 'wsgi.input': fake_input,
+ 'bzrlib.relpath': 'foo/bar',
+ })
+ iterable = wsgi_app(environ, self.start_response)
+ response = self.read_response(iterable)
+ self.assertEqual('200 OK', self.status)
+ self.assertEqual('error\x01incomplete request\n', response)
+
+ def test_protocol_version_detection_one(self):
+ # SmartWSGIApp detects requests that don't start with
+ # REQUEST_VERSION_TWO as version one.
+ transport = memory.MemoryTransport()
+ wsgi_app = wsgi.SmartWSGIApp(transport)
+ fake_input = StringIO('hello\n')
+ environ = self.build_environ({
+ 'REQUEST_METHOD': 'POST',
+ 'CONTENT_LENGTH': len(fake_input.getvalue()),
+ 'wsgi.input': fake_input,
+ 'bzrlib.relpath': 'foo',
+ })
+ iterable = wsgi_app(environ, self.start_response)
+ response = self.read_response(iterable)
+ self.assertEqual('200 OK', self.status)
+ # Expect a version 1-encoded response.
+ self.assertEqual('ok\x012\n', response)
+
+ def test_protocol_version_detection_two(self):
+ # SmartWSGIApp detects requests that start with REQUEST_VERSION_TWO
+ # as version two.
+ transport = memory.MemoryTransport()
+ wsgi_app = wsgi.SmartWSGIApp(transport)
+ fake_input = StringIO(protocol.REQUEST_VERSION_TWO + 'hello\n')
+ environ = self.build_environ({
+ 'REQUEST_METHOD': 'POST',
+ 'CONTENT_LENGTH': len(fake_input.getvalue()),
+ 'wsgi.input': fake_input,
+ 'bzrlib.relpath': 'foo',
+ })
+ iterable = wsgi_app(environ, self.start_response)
+ response = self.read_response(iterable)
+ self.assertEqual('200 OK', self.status)
+ # Expect a version 2-encoded response.
+ self.assertEqual(
+ protocol.RESPONSE_VERSION_TWO + 'success\nok\x012\n', response)
+
+
+class TestWSGIJail(tests.TestCaseWithMemoryTransport, WSGITestMixin):
+
+ def make_hpss_wsgi_request(self, wsgi_relpath, *args):
+ write_buf = StringIO()
+ request_medium = medium.SmartSimplePipesClientMedium(
+ None, write_buf, 'fake:' + wsgi_relpath)
+ request_encoder = protocol.ProtocolThreeRequester(
+ request_medium.get_request())
+ request_encoder.call(*args)
+ write_buf.seek(0)
+ environ = self.build_environ({
+ 'REQUEST_METHOD': 'POST',
+ 'CONTENT_LENGTH': len(write_buf.getvalue()),
+ 'wsgi.input': write_buf,
+ 'bzrlib.relpath': wsgi_relpath,
+ })
+ return environ
+
+ def test_jail_root(self):
+ """The WSGI HPSS glue allows access to the whole WSGI backing
+ transport, regardless of which HTTP path the request was delivered
+ to.
+ """
+ # make a branch in a shared repo
+ self.make_repository('repo', shared=True)
+ branch = self.make_bzrdir('repo/branch').create_branch()
+ # serve the repo via bzr+http WSGI
+ wsgi_app = wsgi.SmartWSGIApp(self.get_transport())
+ # send a request to /repo/branch that will have to access /repo.
+ environ = self.make_hpss_wsgi_request(
+ '/repo/branch', 'BzrDir.open_branchV2', '.')
+ iterable = wsgi_app(environ, self.start_response)
+ response_bytes = self.read_response(iterable)
+ self.assertEqual('200 OK', self.status)
+ # expect a successful response, rather than a jail break error
+ from bzrlib.tests.test_smart_transport import LoggingMessageHandler
+ message_handler = LoggingMessageHandler()
+ decoder = protocol.ProtocolThreeDecoder(
+ message_handler, expect_version_marker=True)
+ decoder.accept_bytes(response_bytes)
+ self.assertTrue(
+ ('structure', ('branch', branch._format.network_name()))
+ in message_handler.event_log)
+
+
+class FakeRequest(object):
+
+ def __init__(self, transport, write_func):
+ self.transport = transport
+ self.write_func = write_func
+ self.accepted_bytes = ''
+
+ def accept_bytes(self, bytes):
+ self.accepted_bytes = bytes
+ self.write_func('got bytes: ' + bytes)
+
+ def next_read_size(self):
+ return 0
+
+
+class FakeTransport(object):
+
+ def __init__(self):
+ self.calls = []
+ self.base = 'fake:///'
+
+ def abspath(self, relpath):
+ return 'fake:///' + relpath
+
+ def clone(self, relpath):
+ self.calls.append(('clone', relpath))
+ return self
+
+
+class IncompleteRequest(FakeRequest):
+ """A request-like object that always expects to read more bytes."""
+
+ def next_read_size(self):
+ # this request always asks for more
+ return 1
+
diff --git a/bzrlib/tests/test_xml.py b/bzrlib/tests/test_xml.py
new file mode 100644
index 0000000..6f429bc
--- /dev/null
+++ b/bzrlib/tests/test_xml.py
@@ -0,0 +1,538 @@
+# Copyright (C) 2005-2011 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+from cStringIO import StringIO
+
+from bzrlib import (
+ errors,
+ fifo_cache,
+ inventory,
+ xml6,
+ xml7,
+ xml8,
+ )
+from bzrlib.tests import TestCase
+from bzrlib.inventory import Inventory
+import bzrlib.xml5
+
+_revision_v5 = """<revision committer="Martin Pool &lt;mbp@sourcefrog.net&gt;"
+ inventory_sha1="e79c31c1deb64c163cf660fdedd476dd579ffd41"
+ revision_id="mbp@sourcefrog.net-20050905080035-e0439293f8b6b9f9"
+ timestamp="1125907235.212"
+ timezone="36000">
+<message>- start splitting code for xml (de)serialization away from objects
+ preparatory to supporting multiple formats by a single library
+</message>
+<parents>
+<revision_ref revision_id="mbp@sourcefrog.net-20050905063503-43948f59fa127d92"/>
+</parents>
+</revision>
+"""
+
+_revision_v5_utc = """\
+<revision committer="Martin Pool &lt;mbp@sourcefrog.net&gt;"
+ inventory_sha1="e79c31c1deb64c163cf660fdedd476dd579ffd41"
+ revision_id="mbp@sourcefrog.net-20050905080035-e0439293f8b6b9f9"
+ timestamp="1125907235.212"
+ timezone="0">
+<message>- start splitting code for xml (de)serialization away from objects
+ preparatory to supporting multiple formats by a single library
+</message>
+<parents>
+<revision_ref revision_id="mbp@sourcefrog.net-20050905063503-43948f59fa127d92"/>
+</parents>
+</revision>
+"""
+
+_committed_inv_v5 = """<inventory>
+<file file_id="bar-20050901064931-73b4b1138abc9cd2"
+ name="bar" parent_id="TREE_ROOT"
+ revision="mbp@foo-123123"
+ text_sha1="A" text_size="1"/>
+<directory name="subdir"
+ file_id="foo-20050801201819-4139aa4a272f4250"
+ parent_id="TREE_ROOT"
+ revision="mbp@foo-00"/>
+<file executable="yes" file_id="bar-20050824000535-6bc48cfad47ed134"
+ name="bar" parent_id="foo-20050801201819-4139aa4a272f4250"
+ revision="mbp@foo-00"
+ text_sha1="B" text_size="0"/>
+</inventory>
+"""
+
+_basis_inv_v5 = """<inventory revision_id="mbp@sourcefrog.net-20050905063503-43948f59fa127d92">
+<file file_id="bar-20050901064931-73b4b1138abc9cd2"
+ name="bar" parent_id="TREE_ROOT"
+ revision="mbp@foo-123123"/>
+<directory name="subdir"
+ file_id="foo-20050801201819-4139aa4a272f4250"
+ parent_id="TREE_ROOT"
+ revision="mbp@foo-00"/>
+<file file_id="bar-20050824000535-6bc48cfad47ed134"
+ name="bar" parent_id="foo-20050801201819-4139aa4a272f4250"
+ revision="mbp@foo-00"/>
+</inventory>
+"""
+
+
+# DO NOT REFLOW THIS. Its the exact revision we want.
+_expected_rev_v5 = """<revision committer="Martin Pool &lt;mbp@sourcefrog.net&gt;" format="5" inventory_sha1="e79c31c1deb64c163cf660fdedd476dd579ffd41" revision_id="mbp@sourcefrog.net-20050905080035-e0439293f8b6b9f9" timestamp="1125907235.212" timezone="36000">
+<message>- start splitting code for xml (de)serialization away from objects
+ preparatory to supporting multiple formats by a single library
+</message>
+<parents>
+<revision_ref revision_id="mbp@sourcefrog.net-20050905063503-43948f59fa127d92" />
+</parents>
+</revision>
+"""
+
+
+# DO NOT REFLOW THIS. Its the exact inventory we want.
+_expected_inv_v5 = """<inventory format="5">
+<file file_id="bar-20050901064931-73b4b1138abc9cd2" name="bar" revision="mbp@foo-123123" text_sha1="A" text_size="1" />
+<directory file_id="foo-20050801201819-4139aa4a272f4250" name="subdir" revision="mbp@foo-00" />
+<file executable="yes" file_id="bar-20050824000535-6bc48cfad47ed134" name="bar" parent_id="foo-20050801201819-4139aa4a272f4250" revision="mbp@foo-00" text_sha1="B" text_size="0" />
+</inventory>
+"""
+
+
+_expected_inv_v5_root = """<inventory file_id="f&lt;" format="5" revision_id="mother!">
+<file file_id="bar-20050901064931-73b4b1138abc9cd2" name="bar" parent_id="f&lt;" revision="mbp@foo-123123" text_sha1="A" text_size="1" />
+<directory file_id="foo-20050801201819-4139aa4a272f4250" name="subdir" parent_id="f&lt;" revision="mbp@foo-00" />
+<file executable="yes" file_id="bar-20050824000535-6bc48cfad47ed134" name="bar" parent_id="foo-20050801201819-4139aa4a272f4250" revision="mbp@foo-00" text_sha1="B" text_size="0" />
+<symlink file_id="link-1" name="link" parent_id="foo-20050801201819-4139aa4a272f4250" revision="mbp@foo-00" symlink_target="a" />
+</inventory>
+"""
+
+_expected_inv_v6 = """<inventory format="6" revision_id="rev_outer">
+<directory file_id="tree-root-321" name="" revision="rev_outer" />
+<directory file_id="dir-id" name="dir" parent_id="tree-root-321" revision="rev_outer" />
+<file file_id="file-id" name="file" parent_id="tree-root-321" revision="rev_outer" text_sha1="A" text_size="1" />
+<symlink file_id="link-id" name="link" parent_id="tree-root-321" revision="rev_outer" symlink_target="a" />
+</inventory>
+"""
+
+_expected_inv_v7 = """<inventory format="7" revision_id="rev_outer">
+<directory file_id="tree-root-321" name="" revision="rev_outer" />
+<directory file_id="dir-id" name="dir" parent_id="tree-root-321" revision="rev_outer" />
+<file file_id="file-id" name="file" parent_id="tree-root-321" revision="rev_outer" text_sha1="A" text_size="1" />
+<symlink file_id="link-id" name="link" parent_id="tree-root-321" revision="rev_outer" symlink_target="a" />
+<tree-reference file_id="nested-id" name="nested" parent_id="tree-root-321" revision="rev_outer" reference_revision="rev_inner" />
+</inventory>
+"""
+
+_expected_rev_v8 = """<revision committer="Martin Pool &lt;mbp@sourcefrog.net&gt;" format="8" inventory_sha1="e79c31c1deb64c163cf660fdedd476dd579ffd41" revision_id="mbp@sourcefrog.net-20050905080035-e0439293f8b6b9f9" timestamp="1125907235.212" timezone="36000">
+<message>- start splitting code for xml (de)serialization away from objects
+ preparatory to supporting multiple formats by a single library
+</message>
+<parents>
+<revision_ref revision_id="mbp@sourcefrog.net-20050905063503-43948f59fa127d92" />
+</parents>
+</revision>
+"""
+
+_expected_inv_v8 = """<inventory format="8" revision_id="rev_outer">
+<directory file_id="tree-root-321" name="" revision="rev_outer" />
+<directory file_id="dir-id" name="dir" parent_id="tree-root-321" revision="rev_outer" />
+<file file_id="file-id" name="file" parent_id="tree-root-321" revision="rev_outer" text_sha1="A" text_size="1" />
+<symlink file_id="link-id" name="link" parent_id="tree-root-321" revision="rev_outer" symlink_target="a" />
+</inventory>
+"""
+
+_revision_utf8_v5 = """<revision committer="Erik B&#229;gfors &lt;erik@foo.net&gt;"
+ inventory_sha1="e79c31c1deb64c163cf660fdedd476dd579ffd41"
+ revision_id="erik@b&#229;gfors-02"
+ timestamp="1125907235.212"
+ timezone="36000">
+<message>Include &#181;nicode characters
+</message>
+<parents>
+<revision_ref revision_id="erik@b&#229;gfors-01"/>
+</parents>
+</revision>
+"""
+
+_inventory_utf8_v5 = """<inventory file_id="TRE&#233;_ROOT" format="5"
+ revision_id="erik@b&#229;gfors-02">
+<file file_id="b&#229;r-01"
+ name="b&#229;r" parent_id="TRE&#233;_ROOT"
+ revision="erik@b&#229;gfors-01"/>
+<directory name="s&#181;bdir"
+ file_id="s&#181;bdir-01"
+ parent_id="TRE&#233;_ROOT"
+ revision="erik@b&#229;gfors-01"/>
+<file executable="yes" file_id="b&#229;r-02"
+ name="b&#229;r" parent_id="s&#181;bdir-01"
+ revision="erik@b&#229;gfors-02"/>
+</inventory>
+"""
+
+# Before revision_id was always stored as an attribute
+_inventory_v5a = """<inventory format="5">
+</inventory>
+"""
+
+# Before revision_id was always stored as an attribute
+_inventory_v5b = """<inventory format="5" revision_id="a-rev-id">
+</inventory>
+"""
+
+
+class TestSerializer(TestCase):
+ """Test XML serialization"""
+
+ def test_unpack_revision_5(self):
+ """Test unpacking a canned revision v5"""
+ inp = StringIO(_revision_v5)
+ rev = bzrlib.xml5.serializer_v5.read_revision(inp)
+ eq = self.assertEqual
+ eq(rev.committer,
+ "Martin Pool <mbp@sourcefrog.net>")
+ eq(len(rev.parent_ids), 1)
+ eq(rev.timezone, 36000)
+ eq(rev.parent_ids[0],
+ "mbp@sourcefrog.net-20050905063503-43948f59fa127d92")
+
+ def test_unpack_revision_5_utc(self):
+ inp = StringIO(_revision_v5_utc)
+ rev = bzrlib.xml5.serializer_v5.read_revision(inp)
+ eq = self.assertEqual
+ eq(rev.committer,
+ "Martin Pool <mbp@sourcefrog.net>")
+ eq(len(rev.parent_ids), 1)
+ eq(rev.timezone, 0)
+ eq(rev.parent_ids[0],
+ "mbp@sourcefrog.net-20050905063503-43948f59fa127d92")
+
+ def test_unpack_inventory_5(self):
+ """Unpack canned new-style inventory"""
+ inp = StringIO(_committed_inv_v5)
+ inv = bzrlib.xml5.serializer_v5.read_inventory(inp)
+ eq = self.assertEqual
+ eq(len(inv), 4)
+ ie = inv['bar-20050824000535-6bc48cfad47ed134']
+ eq(ie.kind, 'file')
+ eq(ie.revision, 'mbp@foo-00')
+ eq(ie.name, 'bar')
+ eq(inv[ie.parent_id].kind, 'directory')
+
+ def test_unpack_basis_inventory_5(self):
+ """Unpack canned new-style inventory"""
+ inp = StringIO(_basis_inv_v5)
+ inv = bzrlib.xml5.serializer_v5.read_inventory(inp)
+ eq = self.assertEqual
+ eq(len(inv), 4)
+ eq(inv.revision_id, 'mbp@sourcefrog.net-20050905063503-43948f59fa127d92')
+ ie = inv['bar-20050824000535-6bc48cfad47ed134']
+ eq(ie.kind, 'file')
+ eq(ie.revision, 'mbp@foo-00')
+ eq(ie.name, 'bar')
+ eq(inv[ie.parent_id].kind, 'directory')
+
+ def test_unpack_inventory_5a(self):
+ inv = bzrlib.xml5.serializer_v5.read_inventory_from_string(
+ _inventory_v5a, revision_id='test-rev-id')
+ self.assertEqual('test-rev-id', inv.root.revision)
+
+ def test_unpack_inventory_5a_cache_and_copy(self):
+ # Passing an entry_cache should get populated with the objects
+ # But the returned objects should be copies if return_from_cache is
+ # False
+ entry_cache = fifo_cache.FIFOCache()
+ inv = bzrlib.xml5.serializer_v5.read_inventory_from_string(
+ _inventory_v5a, revision_id='test-rev-id',
+ entry_cache=entry_cache, return_from_cache=False)
+ for entry in inv.iter_just_entries():
+ key = (entry.file_id, entry.revision)
+ if entry.file_id is inv.root.file_id:
+ # The root id is inferred for xml v5
+ self.assertFalse(key in entry_cache)
+ else:
+ self.assertIsNot(entry, entry_cache[key])
+
+ def test_unpack_inventory_5a_cache_no_copy(self):
+ # Passing an entry_cache should get populated with the objects
+ # The returned objects should be exact if return_from_cache is
+ # True
+ entry_cache = fifo_cache.FIFOCache()
+ inv = bzrlib.xml5.serializer_v5.read_inventory_from_string(
+ _inventory_v5a, revision_id='test-rev-id',
+ entry_cache=entry_cache, return_from_cache=True)
+ for entry in inv.iter_just_entries():
+ key = (entry.file_id, entry.revision)
+ if entry.file_id is inv.root.file_id:
+ # The root id is inferred for xml v5
+ self.assertFalse(key in entry_cache)
+ else:
+ self.assertIs(entry, entry_cache[key])
+
+ def test_unpack_inventory_5b(self):
+ inv = bzrlib.xml5.serializer_v5.read_inventory_from_string(
+ _inventory_v5b, revision_id='test-rev-id')
+ self.assertEqual('a-rev-id', inv.root.revision)
+
+ def test_repack_inventory_5(self):
+ inp = StringIO(_committed_inv_v5)
+ inv = bzrlib.xml5.serializer_v5.read_inventory(inp)
+ outp = StringIO()
+ bzrlib.xml5.serializer_v5.write_inventory(inv, outp)
+ self.assertEqualDiff(_expected_inv_v5, outp.getvalue())
+ inv2 = bzrlib.xml5.serializer_v5.read_inventory(StringIO(outp.getvalue()))
+ self.assertEqual(inv, inv2)
+
+ def assertRoundTrips(self, xml_string):
+ inp = StringIO(xml_string)
+ inv = bzrlib.xml5.serializer_v5.read_inventory(inp)
+ outp = StringIO()
+ bzrlib.xml5.serializer_v5.write_inventory(inv, outp)
+ self.assertEqualDiff(xml_string, outp.getvalue())
+ lines = bzrlib.xml5.serializer_v5.write_inventory_to_lines(inv)
+ outp.seek(0)
+ self.assertEqual(outp.readlines(), lines)
+ inv2 = bzrlib.xml5.serializer_v5.read_inventory(StringIO(outp.getvalue()))
+ self.assertEqual(inv, inv2)
+
+ def tests_serialize_inventory_v5_with_root(self):
+ self.assertRoundTrips(_expected_inv_v5_root)
+
+ def check_repack_revision(self, txt):
+ """Check that repacking a revision yields the same information"""
+ inp = StringIO(txt)
+ rev = bzrlib.xml5.serializer_v5.read_revision(inp)
+ outp = StringIO()
+ bzrlib.xml5.serializer_v5.write_revision(rev, outp)
+ outfile_contents = outp.getvalue()
+ rev2 = bzrlib.xml5.serializer_v5.read_revision(StringIO(outfile_contents))
+ self.assertEqual(rev, rev2)
+
+ def test_repack_revision_5(self):
+ """Round-trip revision to XML v5"""
+ self.check_repack_revision(_revision_v5)
+
+ def test_repack_revision_5_utc(self):
+ self.check_repack_revision(_revision_v5_utc)
+
+ def test_pack_revision_5(self):
+ """Pack revision to XML v5"""
+ # fixed 20051025, revisions should have final newline
+ rev = bzrlib.xml5.serializer_v5.read_revision_from_string(_revision_v5)
+ outp = StringIO()
+ bzrlib.xml5.serializer_v5.write_revision(rev, outp)
+ outfile_contents = outp.getvalue()
+ self.assertEqual(outfile_contents[-1], '\n')
+ self.assertEqualDiff(outfile_contents, bzrlib.xml5.serializer_v5.write_revision_to_string(rev))
+ self.assertEqualDiff(outfile_contents, _expected_rev_v5)
+
+ def test_empty_property_value(self):
+ """Create an empty property value check that it serializes correctly"""
+ s_v5 = bzrlib.xml5.serializer_v5
+ rev = s_v5.read_revision_from_string(_revision_v5)
+ outp = StringIO()
+ props = {'empty':'', 'one':'one'}
+ rev.properties = props
+ txt = s_v5.write_revision_to_string(rev)
+ new_rev = s_v5.read_revision_from_string(txt)
+ self.assertEqual(props, new_rev.properties)
+
+ def get_sample_inventory(self):
+ inv = Inventory('tree-root-321', revision_id='rev_outer')
+ inv.add(inventory.InventoryFile('file-id', 'file', 'tree-root-321'))
+ inv.add(inventory.InventoryDirectory('dir-id', 'dir',
+ 'tree-root-321'))
+ inv.add(inventory.InventoryLink('link-id', 'link', 'tree-root-321'))
+ inv['tree-root-321'].revision = 'rev_outer'
+ inv['dir-id'].revision = 'rev_outer'
+ inv['file-id'].revision = 'rev_outer'
+ inv['file-id'].text_sha1 = 'A'
+ inv['file-id'].text_size = 1
+ inv['link-id'].revision = 'rev_outer'
+ inv['link-id'].symlink_target = 'a'
+ return inv
+
+ def test_roundtrip_inventory_v7(self):
+ inv = self.get_sample_inventory()
+ inv.add(inventory.TreeReference('nested-id', 'nested', 'tree-root-321',
+ 'rev_outer', 'rev_inner'))
+ txt = xml7.serializer_v7.write_inventory_to_string(inv)
+ lines = xml7.serializer_v7.write_inventory_to_lines(inv)
+ self.assertEqual(bzrlib.osutils.split_lines(txt), lines)
+ self.assertEqualDiff(_expected_inv_v7, txt)
+ inv2 = xml7.serializer_v7.read_inventory_from_string(txt)
+ self.assertEqual(5, len(inv2))
+ for path, ie in inv.iter_entries():
+ self.assertEqual(ie, inv2[ie.file_id])
+
+ def test_roundtrip_inventory_v6(self):
+ inv = self.get_sample_inventory()
+ txt = xml6.serializer_v6.write_inventory_to_string(inv)
+ lines = xml6.serializer_v6.write_inventory_to_lines(inv)
+ self.assertEqual(bzrlib.osutils.split_lines(txt), lines)
+ self.assertEqualDiff(_expected_inv_v6, txt)
+ inv2 = xml6.serializer_v6.read_inventory_from_string(txt)
+ self.assertEqual(4, len(inv2))
+ for path, ie in inv.iter_entries():
+ self.assertEqual(ie, inv2[ie.file_id])
+
+ def test_wrong_format_v7(self):
+ """Can't accidentally open a file with wrong serializer"""
+ s_v6 = bzrlib.xml6.serializer_v6
+ s_v7 = xml7.serializer_v7
+ self.assertRaises(errors.UnexpectedInventoryFormat,
+ s_v7.read_inventory_from_string, _expected_inv_v5)
+ self.assertRaises(errors.UnexpectedInventoryFormat,
+ s_v6.read_inventory_from_string, _expected_inv_v7)
+
+ def test_tree_reference(self):
+ s_v5 = bzrlib.xml5.serializer_v5
+ s_v6 = bzrlib.xml6.serializer_v6
+ s_v7 = xml7.serializer_v7
+ inv = Inventory('tree-root-321', revision_id='rev-outer')
+ inv.root.revision = 'root-rev'
+ inv.add(inventory.TreeReference('nested-id', 'nested', 'tree-root-321',
+ 'rev-outer', 'rev-inner'))
+ self.assertRaises(errors.UnsupportedInventoryKind,
+ s_v5.write_inventory_to_string, inv)
+ self.assertRaises(errors.UnsupportedInventoryKind,
+ s_v6.write_inventory_to_string, inv)
+ txt = s_v7.write_inventory_to_string(inv)
+ lines = s_v7.write_inventory_to_lines(inv)
+ self.assertEqual(bzrlib.osutils.split_lines(txt), lines)
+ inv2 = s_v7.read_inventory_from_string(txt)
+ self.assertEqual('tree-root-321', inv2['nested-id'].parent_id)
+ self.assertEqual('rev-outer', inv2['nested-id'].revision)
+ self.assertEqual('rev-inner', inv2['nested-id'].reference_revision)
+
+ def test_roundtrip_inventory_v8(self):
+ inv = self.get_sample_inventory()
+ txt = xml8.serializer_v8.write_inventory_to_string(inv)
+ inv2 = xml8.serializer_v8.read_inventory_from_string(txt)
+ self.assertEqual(4, len(inv2))
+ for path, ie in inv.iter_entries():
+ self.assertEqual(ie, inv2[ie.file_id])
+
+ def test_inventory_text_v8(self):
+ inv = self.get_sample_inventory()
+ txt = xml8.serializer_v8.write_inventory_to_string(inv)
+ lines = xml8.serializer_v8.write_inventory_to_lines(inv)
+ self.assertEqual(bzrlib.osutils.split_lines(txt), lines)
+ self.assertEqualDiff(_expected_inv_v8, txt)
+
+ def test_revision_text_v6(self):
+ """Pack revision to XML v6"""
+ rev = bzrlib.xml6.serializer_v6.read_revision_from_string(
+ _expected_rev_v5)
+ serialized = bzrlib.xml6.serializer_v6.write_revision_to_string(rev)
+ self.assertEqualDiff(serialized, _expected_rev_v5)
+
+ def test_revision_text_v7(self):
+ """Pack revision to XML v7"""
+ rev = bzrlib.xml7.serializer_v7.read_revision_from_string(
+ _expected_rev_v5)
+ serialized = bzrlib.xml7.serializer_v7.write_revision_to_string(rev)
+ self.assertEqualDiff(serialized, _expected_rev_v5)
+
+ def test_revision_text_v8(self):
+ """Pack revision to XML v8"""
+ rev = bzrlib.xml8.serializer_v8.read_revision_from_string(
+ _expected_rev_v8)
+ serialized = bzrlib.xml8.serializer_v8.write_revision_to_string(rev)
+ self.assertEqualDiff(serialized, _expected_rev_v8)
+
+ def test_revision_ids_are_utf8(self):
+ """Parsed revision_ids should all be utf-8 strings, not unicode."""
+ s_v5 = bzrlib.xml5.serializer_v5
+ rev = s_v5.read_revision_from_string(_revision_utf8_v5)
+ self.assertEqual('erik@b\xc3\xa5gfors-02', rev.revision_id)
+ self.assertIsInstance(rev.revision_id, str)
+ self.assertEqual(['erik@b\xc3\xa5gfors-01'], rev.parent_ids)
+ for parent_id in rev.parent_ids:
+ self.assertIsInstance(parent_id, str)
+ self.assertEqual(u'Include \xb5nicode characters\n', rev.message)
+ self.assertIsInstance(rev.message, unicode)
+
+ # ie.revision should either be None or a utf-8 revision id
+ inv = s_v5.read_inventory_from_string(_inventory_utf8_v5)
+ rev_id_1 = u'erik@b\xe5gfors-01'.encode('utf8')
+ rev_id_2 = u'erik@b\xe5gfors-02'.encode('utf8')
+ fid_root = u'TRE\xe9_ROOT'.encode('utf8')
+ fid_bar1 = u'b\xe5r-01'.encode('utf8')
+ fid_sub = u's\xb5bdir-01'.encode('utf8')
+ fid_bar2 = u'b\xe5r-02'.encode('utf8')
+ expected = [(u'', fid_root, None, rev_id_2),
+ (u'b\xe5r', fid_bar1, fid_root, rev_id_1),
+ (u's\xb5bdir', fid_sub, fid_root, rev_id_1),
+ (u's\xb5bdir/b\xe5r', fid_bar2, fid_sub, rev_id_2),
+ ]
+ self.assertEqual(rev_id_2, inv.revision_id)
+ self.assertIsInstance(inv.revision_id, str)
+
+ actual = list(inv.iter_entries_by_dir())
+ for ((exp_path, exp_file_id, exp_parent_id, exp_rev_id),
+ (act_path, act_ie)) in zip(expected, actual):
+ self.assertEqual(exp_path, act_path)
+ self.assertIsInstance(act_path, unicode)
+ self.assertEqual(exp_file_id, act_ie.file_id)
+ self.assertIsInstance(act_ie.file_id, str)
+ self.assertEqual(exp_parent_id, act_ie.parent_id)
+ if exp_parent_id is not None:
+ self.assertIsInstance(act_ie.parent_id, str)
+ self.assertEqual(exp_rev_id, act_ie.revision)
+ if exp_rev_id is not None:
+ self.assertIsInstance(act_ie.revision, str)
+
+ self.assertEqual(len(expected), len(actual))
+
+
+class TestEncodeAndEscape(TestCase):
+ """Whitebox testing of the _encode_and_escape function."""
+
+ def setUp(self):
+ TestCase.setUp(self)
+ # Keep the cache clear before and after the test
+ bzrlib.xml_serializer._clear_cache()
+ self.addCleanup(bzrlib.xml_serializer._clear_cache)
+
+ def test_simple_ascii(self):
+ # _encode_and_escape always appends a final ", because these parameters
+ # are being used in xml attributes, and by returning it now, we have to
+ # do fewer string operations later.
+ val = bzrlib.xml_serializer.encode_and_escape('foo bar')
+ self.assertEqual('foo bar"', val)
+ # The second time should be cached
+ val2 = bzrlib.xml_serializer.encode_and_escape('foo bar')
+ self.assertIs(val2, val)
+
+ def test_ascii_with_xml(self):
+ self.assertEqual('&amp;&apos;&quot;&lt;&gt;"',
+ bzrlib.xml_serializer.encode_and_escape('&\'"<>'))
+
+ def test_utf8_with_xml(self):
+ # u'\xb5\xe5&\u062c'
+ utf8_str = '\xc2\xb5\xc3\xa5&\xd8\xac'
+ self.assertEqual('&#181;&#229;&amp;&#1580;"',
+ bzrlib.xml_serializer.encode_and_escape(utf8_str))
+
+ def test_unicode(self):
+ uni_str = u'\xb5\xe5&\u062c'
+ self.assertEqual('&#181;&#229;&amp;&#1580;"',
+ bzrlib.xml_serializer.encode_and_escape(uni_str))
+
+
+class TestMisc(TestCase):
+
+ def test_unescape_xml(self):
+ """We get some kind of error when malformed entities are passed"""
+ self.assertRaises(KeyError, bzrlib.xml8._unescape_xml, 'foo&bar;')
diff --git a/bzrlib/tests/testui.py b/bzrlib/tests/testui.py
new file mode 100644
index 0000000..9a4a81c
--- /dev/null
+++ b/bzrlib/tests/testui.py
@@ -0,0 +1,46 @@
+# Copyright (C) 2010 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""UI implementations for use in testing.
+"""
+
+
+from bzrlib import (
+ progress,
+ ui,
+ )
+
+
+class ProgressRecordingUIFactory(ui.UIFactory, progress.DummyProgress):
+ """Captures progress updates made through it.
+
+ This is overloaded as both the UIFactory and the progress model."""
+
+ def __init__(self):
+ super(ProgressRecordingUIFactory, self).__init__()
+ self._calls = []
+ self.depth = 0
+
+ def nested_progress_bar(self):
+ self.depth += 1
+ return self
+
+ def finished(self):
+ self.depth -= 1
+
+ def update(self, message, count=None, total=None):
+ if self.depth == 1:
+ self._calls.append(("update", count, total, message))
diff --git a/bzrlib/tests/transport_util.py b/bzrlib/tests/transport_util.py
new file mode 100644
index 0000000..189e17d
--- /dev/null
+++ b/bzrlib/tests/transport_util.py
@@ -0,0 +1,47 @@
+# Copyright (C) 2007-2010 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+from bzrlib.tests import features
+
+# SFTPTransport offers better performances but relies on paramiko, if paramiko
+# is not available, we fallback to FtpTransport
+if features.paramiko.available():
+ from bzrlib.tests import test_sftp_transport
+ from bzrlib.transport import sftp, Transport
+ _backing_scheme = 'sftp'
+ _backing_transport_class = sftp.SFTPTransport
+ _backing_test_class = test_sftp_transport.TestCaseWithSFTPServer
+else:
+ from bzrlib.transport import ftp, Transport
+ from bzrlib.tests import test_ftp_transport
+ _backing_scheme = 'ftp'
+ _backing_transport_class = ftp.FtpTransport
+ _backing_test_class = test_ftp_transport.TestCaseWithFTPServer
+
+
+class TestCaseWithConnectionHookedTransport(_backing_test_class):
+
+ def setUp(self):
+ super(TestCaseWithConnectionHookedTransport, self).setUp()
+ self.reset_connections()
+
+ def start_logging_connections(self):
+ Transport.hooks.install_named_hook('post_connect',
+ self.connections.append, None)
+
+ def reset_connections(self):
+ self.connections = []
+
diff --git a/bzrlib/tests/treeshape.py b/bzrlib/tests/treeshape.py
new file mode 100644
index 0000000..0b88f7b
--- /dev/null
+++ b/bzrlib/tests/treeshape.py
@@ -0,0 +1,82 @@
+# Copyright (C) 2005, 2010 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+
+"""Test helper for constructing and testing directories.
+
+This module transforms filesystem directories to and from Python lists.
+As a Python list the descriptions can be stored in test cases, compared,
+etc.
+"""
+
+# TODO: Script to write a description of a directory for testing
+# TODO: Helper that compares two structures and raises a helpful error
+# where they differ. Option to ignore some files or directories in the
+# comparison.
+
+import os
+import stat
+
+from bzrlib.trace import warning
+from bzrlib.osutils import pathjoin
+
+def build_tree_contents(template):
+ """Reconstitute some files from a text description.
+
+ Each element of template is a tuple. The first element is a filename,
+ with an optional ending character indicating the type.
+
+ The template is built relative to the Python process's current
+ working directory.
+
+ ('foo/',) will build a directory.
+ ('foo', 'bar') will write 'bar' to 'foo'
+ ('foo@', 'linktarget') will raise an error
+ """
+ for tt in template:
+ name = tt[0]
+ if name[-1] == '/':
+ os.mkdir(name)
+ elif name[-1] == '@':
+ os.symlink(tt[1], tt[0][:-1])
+ else:
+ f = file(name, 'wb')
+ try:
+ f.write(tt[1])
+ finally:
+ f.close()
+
+
+def capture_tree_contents(top):
+ """Make a Python datastructure description of a tree.
+
+ If top is an absolute path the descriptions will be absolute."""
+ for dirpath, dirnames, filenames in os.walk(top):
+ yield (dirpath + '/', )
+ filenames.sort()
+ for fn in filenames:
+ fullpath = pathjoin(dirpath, fn)
+ if (fullpath[-1] in '@/'):
+ raise AssertionError(fullpath)
+ info = os.lstat(fullpath)
+ if stat.S_ISLNK(info.st_mode):
+ yield (fullpath + '@', os.readlink(fullpath))
+ elif stat.S_ISREG(info.st_mode):
+ yield (fullpath, file(fullpath, 'rb').read())
+ else:
+ warning("can't capture file %s with mode %#o",
+ fullpath, info.st_mode)
+ pass
diff --git a/bzrlib/textfile.py b/bzrlib/textfile.py
new file mode 100644
index 0000000..437f830
--- /dev/null
+++ b/bzrlib/textfile.py
@@ -0,0 +1,55 @@
+# Copyright (C) 2006 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""Utilities for distinguishing binary files from text files"""
+
+from __future__ import absolute_import
+
+from itertools import chain
+
+from bzrlib.errors import BinaryFile
+from bzrlib.iterablefile import IterableFile
+from bzrlib.osutils import file_iterator
+
+
+def text_file(input):
+ """Produce a file iterator that is guaranteed to be text, without seeking.
+ BinaryFile is raised if the file contains a NUL in the first 1024 bytes.
+ """
+ first_chunk = input.read(1024)
+ if '\x00' in first_chunk:
+ raise BinaryFile()
+ return IterableFile(chain((first_chunk,), file_iterator(input)))
+
+
+def check_text_lines(lines):
+ """Raise BinaryFile if the supplied lines contain NULs.
+ Only the first 1024 characters are checked.
+ """
+ f = IterableFile(lines)
+ if '\x00' in f.read(1024):
+ raise BinaryFile()
+
+
+def check_text_path(path):
+ """Check whether the supplied path is a text, not binary file.
+ Raise BinaryFile if a NUL occurs in the first 1024 bytes.
+ """
+ f = open(path, 'rb')
+ try:
+ text_file(f)
+ finally:
+ f.close()
diff --git a/bzrlib/textinv.py b/bzrlib/textinv.py
new file mode 100644
index 0000000..cac51b4
--- /dev/null
+++ b/bzrlib/textinv.py
@@ -0,0 +1,93 @@
+# Copyright (C) 2005 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+from __future__ import absolute_import
+
+from bzrlib.errors import BzrError
+from bzrlib.inventory import Inventory
+
+
+START_MARK = "# bzr inventory format 3\n"
+END_MARK = "# end of inventory\n"
+
+
+def escape(s):
+ """Very simple URL-like escaping.
+
+ (Why not just use backslashes? Because then we couldn't parse
+ lines just by splitting on spaces.)"""
+ return (s.replace('\\', r'\x5c')
+ .replace(' ', r'\x20')
+ .replace('\t', r'\x09')
+ .replace('\n', r'\x0a'))
+
+
+
+def unescape(s):
+ if s.find(' ') != -1:
+ raise AssertionError()
+ s = (s.replace(r'\x20', ' ')
+ .replace(r'\x09', '\t')
+ .replace(r'\x0a', '\n')
+ .replace(r'\x5c', '\\'))
+
+ # TODO: What if there's anything else?
+
+ return s
+
+
+
+
+def write_text_inventory(inv, outf):
+ """Write out inv in a simple trad-unix text format."""
+ outf.write(START_MARK)
+ for path, ie in inv.iter_entries():
+ if inv.is_root(ie.file_id):
+ continue
+
+ outf.write(ie.file_id + ' ')
+ outf.write(escape(ie.name) + ' ')
+ outf.write(ie.kind + ' ')
+ outf.write(ie.parent_id + ' ')
+
+ if ie.kind == 'file':
+ outf.write(ie.text_id)
+ outf.write(' ' + ie.text_sha1)
+ outf.write(' ' + str(ie.text_size))
+ outf.write("\n")
+ outf.write(END_MARK)
+
+
+def read_text_inventory(tf):
+ """Return an inventory read in from tf"""
+ if tf.readline() != START_MARK:
+ raise BzrError("missing start mark")
+
+ inv = Inventory()
+
+ for l in tf:
+ fields = l.split(' ')
+ if fields[0] == '#':
+ break
+ ie = {'file_id': fields[0],
+ 'name': unescape(fields[1]),
+ 'kind': fields[2],
+ 'parent_id': fields[3]}
+ ##inv.add(ie)
+
+ if l != END_MARK:
+ raise BzrError("missing end mark")
+ return inv
diff --git a/bzrlib/textmerge.py b/bzrlib/textmerge.py
new file mode 100644
index 0000000..4de0cda
--- /dev/null
+++ b/bzrlib/textmerge.py
@@ -0,0 +1,150 @@
+# Copyright (C) 2006, 2009, 2010 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+#
+# Author: Martin Pool <mbp@canonical.com>
+# Aaron Bentley <aaron.bentley@utoronto.ca>
+
+from __future__ import absolute_import
+
+from bzrlib.lazy_import import lazy_import
+lazy_import(globals(), """
+from bzrlib import patiencediff
+""")
+
+
+class TextMerge(object):
+ """Base class for text-mergers
+ Subclasses must implement _merge_struct.
+
+ Many methods produce or consume structured merge information.
+ This is an iterable of tuples of lists of lines.
+ Each tuple may have a length of 1 - 3, depending on whether the region it
+ represents is conflicted.
+
+ Unconflicted region tuples have length 1.
+ Conflicted region tuples have length 2 or 3. Index 1 is text_a, e.g. THIS.
+ Index 1 is text_b, e.g. OTHER. Index 2 is optional. If present, it
+ represents BASE.
+ """
+ # TODO: Show some version information (e.g. author, date) on conflicted
+ # regions.
+ A_MARKER = '<<<<<<< \n'
+ B_MARKER = '>>>>>>> \n'
+ SPLIT_MARKER = '=======\n'
+ def __init__(self, a_marker=A_MARKER, b_marker=B_MARKER,
+ split_marker=SPLIT_MARKER):
+ self.a_marker = a_marker
+ self.b_marker = b_marker
+ self.split_marker = split_marker
+
+ def _merge_struct(self):
+ """Return structured merge info. Must be implemented by subclasses.
+ See TextMerge docstring for details on the format.
+ """
+ raise NotImplementedError('_merge_struct is abstract')
+
+ def struct_to_lines(self, struct_iter):
+ """Convert merge result tuples to lines"""
+ for lines in struct_iter:
+ if len(lines) == 1:
+ for line in lines[0]:
+ yield line
+ else:
+ yield self.a_marker
+ for line in lines[0]:
+ yield line
+ yield self.split_marker
+ for line in lines[1]:
+ yield line
+ yield self.b_marker
+
+ def iter_useful(self, struct_iter):
+ """Iterate through input tuples, skipping empty ones."""
+ for group in struct_iter:
+ if len(group[0]) > 0:
+ yield group
+ elif len(group) > 1 and len(group[1]) > 0:
+ yield group
+
+ def merge_lines(self, reprocess=False):
+ """Produce an iterable of lines, suitable for writing to a file
+ Returns a tuple of (line iterable, conflict indicator)
+ If reprocess is True, a two-way merge will be performed on the
+ intermediate structure, to reduce conflict regions.
+ """
+ struct = []
+ conflicts = False
+ for group in self.merge_struct(reprocess):
+ struct.append(group)
+ if len(group) > 1:
+ conflicts = True
+ return self.struct_to_lines(struct), conflicts
+
+ def merge_struct(self, reprocess=False):
+ """Produce structured merge info"""
+ struct_iter = self.iter_useful(self._merge_struct())
+ if reprocess is True:
+ return self.reprocess_struct(struct_iter)
+ else:
+ return struct_iter
+
+ @staticmethod
+ def reprocess_struct(struct_iter):
+ """ Perform a two-way merge on structural merge info.
+ This reduces the size of conflict regions, but breaks the connection
+ between the BASE text and the conflict region.
+
+ This process may split a single conflict region into several smaller
+ ones, but will not introduce new conflicts.
+ """
+ for group in struct_iter:
+ if len(group) == 1:
+ yield group
+ else:
+ for newgroup in Merge2(group[0], group[1]).merge_struct():
+ yield newgroup
+
+
+class Merge2(TextMerge):
+ """ Two-way merge.
+ In a two way merge, common regions are shown as unconflicting, and uncommon
+ regions produce conflicts.
+ """
+
+ def __init__(self, lines_a, lines_b, a_marker=TextMerge.A_MARKER,
+ b_marker=TextMerge.B_MARKER,
+ split_marker=TextMerge.SPLIT_MARKER):
+ TextMerge.__init__(self, a_marker, b_marker, split_marker)
+ self.lines_a = lines_a
+ self.lines_b = lines_b
+
+ def _merge_struct(self):
+ """Return structured merge info.
+ See TextMerge docstring.
+ """
+ sm = patiencediff.PatienceSequenceMatcher(
+ None, self.lines_a, self.lines_b)
+ pos_a = 0
+ pos_b = 0
+ for ai, bi, l in sm.get_matching_blocks():
+ # non-matching lines
+ yield(self.lines_a[pos_a:ai], self.lines_b[pos_b:bi])
+ # matching lines
+ yield(self.lines_a[ai:ai+l],)
+ pos_a = ai + l
+ pos_b = bi + l
+ # final non-matching lines
+ yield(self.lines_a[pos_a:-1], self.lines_b[pos_b:-1])
diff --git a/bzrlib/timestamp.py b/bzrlib/timestamp.py
new file mode 100644
index 0000000..efdfd52
--- /dev/null
+++ b/bzrlib/timestamp.py
@@ -0,0 +1,159 @@
+# Copyright (C) 2007, 2008, 2009, 2011 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+from __future__ import absolute_import
+
+import calendar
+import time
+import re
+
+from bzrlib import osutils
+
+
+def format_highres_date(t, offset=0):
+ """Format a date, such that it includes higher precision in the
+ seconds field.
+
+ :param t: The local time in fractional seconds since the epoch
+ :type t: float
+ :param offset: The timezone offset in integer seconds
+ :type offset: int
+
+ Example: format_highres_date(time.time(), -time.timezone)
+ this will return a date stamp for right now,
+ formatted for the local timezone.
+
+ >>> from bzrlib.osutils import format_date
+ >>> format_date(1120153132.350850105, 0)
+ 'Thu 2005-06-30 17:38:52 +0000'
+ >>> format_highres_date(1120153132.350850105, 0)
+ 'Thu 2005-06-30 17:38:52.350850105 +0000'
+ >>> format_date(1120153132.350850105, -5*3600)
+ 'Thu 2005-06-30 12:38:52 -0500'
+ >>> format_highres_date(1120153132.350850105, -5*3600)
+ 'Thu 2005-06-30 12:38:52.350850105 -0500'
+ >>> format_highres_date(1120153132.350850105, 7200)
+ 'Thu 2005-06-30 19:38:52.350850105 +0200'
+ >>> format_highres_date(1152428738.867522, 19800)
+ 'Sun 2006-07-09 12:35:38.867522001 +0530'
+ """
+ if not isinstance(t, float):
+ raise ValueError(t)
+
+ # This has to be formatted for "original" date, so that the
+ # revision XML entry will be reproduced faithfully.
+ if offset is None:
+ offset = 0
+ tt = time.gmtime(t + offset)
+
+ return (osutils.weekdays[tt[6]] +
+ time.strftime(" %Y-%m-%d %H:%M:%S", tt)
+ # Get the high-res seconds, but ignore the 0
+ + ('%.9f' % (t - int(t)))[1:]
+ + ' %+03d%02d' % (offset / 3600, (offset / 60) % 60))
+
+
+def unpack_highres_date(date):
+ """This takes the high-resolution date stamp, and
+ converts it back into the tuple (timestamp, timezone)
+ Where timestamp is in real UTC since epoch seconds, and timezone is an
+ integer number of seconds offset.
+
+ :param date: A date formated by format_highres_date
+ :type date: string
+
+ """
+ # Weekday parsing is locale sensitive, so drop the weekday
+ space_loc = date.find(' ')
+ if space_loc == -1 or date[:space_loc] not in osutils.weekdays:
+ raise ValueError(
+ 'Date string does not contain a day of week: %r' % date)
+ # Up until the first period is a datestamp that is generated
+ # as normal from time.strftime, so use time.strptime to
+ # parse it
+ dot_loc = date.find('.')
+ if dot_loc == -1:
+ raise ValueError(
+ 'Date string does not contain high-precision seconds: %r' % date)
+ base_time = time.strptime(date[space_loc:dot_loc], " %Y-%m-%d %H:%M:%S")
+ fract_seconds, offset = date[dot_loc:].split()
+ fract_seconds = float(fract_seconds)
+
+ offset = int(offset)
+
+ hours = int(offset / 100)
+ minutes = (offset % 100)
+ seconds_offset = (hours * 3600) + (minutes * 60)
+
+ # time.mktime returns localtime, but calendar.timegm returns UTC time
+ timestamp = calendar.timegm(base_time)
+ timestamp -= seconds_offset
+ # Add back in the fractional seconds
+ timestamp += fract_seconds
+ return (timestamp, seconds_offset)
+
+
+def format_patch_date(secs, offset=0):
+ """Format a POSIX timestamp and optional offset as a patch-style date.
+
+ Inverse of parse_patch_date.
+ """
+ if offset % 60 != 0:
+ raise ValueError(
+ "can't represent timezone %s offset by fractional minutes" % offset)
+ # so that we don't need to do calculations on pre-epoch times,
+ # which doesn't work with win32 python gmtime, we always
+ # give the epoch in utc
+ if secs == 0:
+ offset = 0
+ if secs + offset < 0:
+ from warnings import warn
+ warn("gmtime of negative time (%s, %s) may not work on Windows" %
+ (secs, offset))
+ return osutils.format_date(secs, offset=offset,
+ date_fmt='%Y-%m-%d %H:%M:%S')
+
+
+# Format for patch dates: %Y-%m-%d %H:%M:%S [+-]%H%M
+# Groups: 1 = %Y-%m-%d %H:%M:%S; 2 = [+-]%H; 3 = %M
+RE_PATCHDATE = re.compile("(\d+-\d+-\d+\s+\d+:\d+:\d+)\s*([+-]\d\d)(\d\d)$")
+RE_PATCHDATE_NOOFFSET = re.compile("\d+-\d+-\d+\s+\d+:\d+:\d+$")
+
+def parse_patch_date(date_str):
+ """Parse a patch-style date into a POSIX timestamp and offset.
+
+ Inverse of format_patch_date.
+ """
+ match = RE_PATCHDATE.match(date_str)
+ if match is None:
+ if RE_PATCHDATE_NOOFFSET.match(date_str) is not None:
+ raise ValueError("time data %r is missing a timezone offset"
+ % date_str)
+ else:
+ raise ValueError("time data %r does not match format " % date_str
+ + "'%Y-%m-%d %H:%M:%S %z'")
+ secs_str = match.group(1)
+ offset_hours, offset_mins = int(match.group(2)), int(match.group(3))
+ if abs(offset_hours) >= 24 or offset_mins >= 60:
+ raise ValueError("invalid timezone %r" %
+ (match.group(2) + match.group(3)))
+ offset = offset_hours * 3600 + offset_mins * 60
+ tm_time = time.strptime(secs_str, '%Y-%m-%d %H:%M:%S')
+ # adjust seconds according to offset before converting to POSIX
+ # timestamp, to avoid edge problems
+ tm_time = tm_time[:5] + (tm_time[5] - offset,) + tm_time[6:]
+ secs = calendar.timegm(tm_time)
+ return secs, offset
diff --git a/bzrlib/trace.py b/bzrlib/trace.py
new file mode 100644
index 0000000..03e123a
--- /dev/null
+++ b/bzrlib/trace.py
@@ -0,0 +1,644 @@
+# Copyright (C) 2005-2011 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""Messages and logging.
+
+Messages are supplied by callers as a string-formatting template, plus values
+to be inserted into it. The actual %-formatting is deferred to the log
+library so that it doesn't need to be done for messages that won't be emitted.
+
+Messages are classified by severity levels: critical, error, warning, info,
+and debug.
+
+They can be sent to two places: to stderr, and to ~/.bzr.log. For purposes
+such as running the test suite, they can also be redirected away from both of
+those two places to another location.
+
+~/.bzr.log gets all messages, and full tracebacks for uncaught exceptions.
+This trace file is always in UTF-8, regardless of the user's default encoding,
+so that we can always rely on writing any message.
+
+Output to stderr depends on the mode chosen by the user. By default, messages
+of info and above are sent out, which results in progress messages such as the
+list of files processed by add and commit. In debug mode, stderr gets debug messages too.
+
+Errors that terminate an operation are generally passed back as exceptions;
+others may be just emitted as messages.
+
+Exceptions are reported in a brief form to stderr so as not to look scary.
+BzrErrors are required to be able to format themselves into a properly
+explanatory message. This is not true for builtin exceptions such as
+KeyError, which typically just str to "0". They're printed in a different
+form.
+"""
+
+from __future__ import absolute_import
+
+# FIXME: Unfortunately it turns out that python's logging module
+# is quite expensive, even when the message is not printed by any handlers.
+# We should perhaps change back to just simply doing it here.
+#
+# On the other hand, as of 1.2 we generally only call the mutter() statement
+# if (according to debug_flags) we actually intend to write it. So the
+# increased cost of logging.py is not so bad, and we could standardize on
+# that.
+
+import logging
+import os
+import sys
+import time
+
+from bzrlib.lazy_import import lazy_import
+lazy_import(globals(), """
+from cStringIO import StringIO
+import errno
+import locale
+import tempfile
+import traceback
+""")
+
+import bzrlib
+
+lazy_import(globals(), """
+from bzrlib import (
+ debug,
+ errors,
+ osutils,
+ ui,
+ )
+""")
+
+
+# global verbosity for bzrlib; controls the log level for stderr; 0=normal; <0
+# is quiet; >0 is verbose.
+_verbosity_level = 0
+
+# File-like object where mutter/debug output is currently sent. Can be
+# changed by _push_log_file etc. This is directly manipulated by some
+# external code; maybe there should be functions to do that more precisely
+# than push/pop_log_file.
+_trace_file = None
+
+# Absolute path for ~/.bzr.log. Not changed even if the log/trace output is
+# redirected elsewhere. Used to show the location in --version.
+_bzr_log_filename = None
+
+# The time the first message was written to the trace file, so that we can
+# show relative times since startup.
+_bzr_log_start_time = bzrlib._start_time
+
+
+# held in a global for quick reference
+_bzr_logger = logging.getLogger('bzr')
+
+
+def note(*args, **kwargs):
+ """Output a note to the user.
+
+ Takes the same parameters as logging.info.
+
+ :return: None
+ """
+ # FIXME: clearing the ui and then going through the abstract logging
+ # framework is whack; we should probably have a logging Handler that
+ # deals with terminal output if needed.
+ ui.ui_factory.clear_term()
+ _bzr_logger.info(*args, **kwargs)
+
+
+def warning(*args, **kwargs):
+ ui.ui_factory.clear_term()
+ _bzr_logger.warning(*args, **kwargs)
+
+
+def show_error(*args, **kwargs):
+ """Show an error message to the user.
+
+ Don't use this for exceptions, use report_exception instead.
+ """
+ _bzr_logger.error(*args, **kwargs)
+
+
+def mutter(fmt, *args):
+ if _trace_file is None:
+ return
+ # XXX: Don't check this every time; instead anyone who closes the file
+ # ought to deregister it. We can tolerate None.
+ if (getattr(_trace_file, 'closed', None) is not None) and _trace_file.closed:
+ return
+
+ if isinstance(fmt, unicode):
+ fmt = fmt.encode('utf8')
+
+ if len(args) > 0:
+ # It seems that if we do ascii % (unicode, ascii) we can
+ # get a unicode cannot encode ascii error, so make sure that "fmt"
+ # is a unicode string
+ real_args = []
+ for arg in args:
+ if isinstance(arg, unicode):
+ arg = arg.encode('utf8')
+ real_args.append(arg)
+ out = fmt % tuple(real_args)
+ else:
+ out = fmt
+ now = time.time()
+ timestamp = '%0.3f ' % (now - _bzr_log_start_time,)
+ out = timestamp + out + '\n'
+ _trace_file.write(out)
+ # there's no explicit flushing; the file is typically line buffered.
+
+
+def mutter_callsite(stacklevel, fmt, *args):
+ """Perform a mutter of fmt and args, logging the call trace.
+
+ :param stacklevel: The number of frames to show. None will show all
+ frames.
+ :param fmt: The format string to pass to mutter.
+ :param args: A list of substitution variables.
+ """
+ outf = StringIO()
+ if stacklevel is None:
+ limit = None
+ else:
+ limit = stacklevel + 1
+ traceback.print_stack(limit=limit, file=outf)
+ formatted_lines = outf.getvalue().splitlines()
+ formatted_stack = '\n'.join(formatted_lines[:-2])
+ mutter(fmt + "\nCalled from:\n%s", *(args + (formatted_stack,)))
+
+
+def _rollover_trace_maybe(trace_fname):
+ import stat
+ try:
+ size = os.stat(trace_fname)[stat.ST_SIZE]
+ if size <= 4 << 20:
+ return
+ old_fname = trace_fname + '.old'
+ osutils.rename(trace_fname, old_fname)
+ except OSError:
+ return
+
+
+def _get_bzr_log_filename():
+ bzr_log = osutils.path_from_environ('BZR_LOG')
+ if bzr_log:
+ return bzr_log
+ home = osutils.path_from_environ('BZR_HOME')
+ if home is None:
+ # GZ 2012-02-01: Logging to the home dir is bad, but XDG is unclear
+ # over what would be better. On windows, bug 240550
+ # suggests LOCALAPPDATA be used instead.
+ home = osutils._get_home_dir()
+ return os.path.join(home, '.bzr.log')
+
+
+def _open_bzr_log():
+ """Open the .bzr.log trace file.
+
+ If the log is more than a particular length, the old file is renamed to
+ .bzr.log.old and a new file is started. Otherwise, we append to the
+ existing file.
+
+ This sets the global _bzr_log_filename.
+ """
+ global _bzr_log_filename
+
+ def _open_or_create_log_file(filename):
+ """Open existing log file, or create with ownership and permissions
+
+ It inherits the ownership and permissions (masked by umask) from
+ the containing directory to cope better with being run under sudo
+ with $HOME still set to the user's homedir.
+ """
+ flags = os.O_WRONLY | os.O_APPEND | osutils.O_TEXT
+ while True:
+ try:
+ fd = os.open(filename, flags)
+ break
+ except OSError, e:
+ if e.errno != errno.ENOENT:
+ raise
+ try:
+ fd = os.open(filename, flags | os.O_CREAT | os.O_EXCL, 0666)
+ except OSError, e:
+ if e.errno != errno.EEXIST:
+ raise
+ else:
+ osutils.copy_ownership_from_path(filename)
+ break
+ return os.fdopen(fd, 'at', 0) # unbuffered
+
+
+ _bzr_log_filename = _get_bzr_log_filename()
+ _rollover_trace_maybe(_bzr_log_filename)
+ try:
+ bzr_log_file = _open_or_create_log_file(_bzr_log_filename)
+ bzr_log_file.write('\n')
+ if bzr_log_file.tell() <= 2:
+ bzr_log_file.write("this is a debug log for diagnosing/reporting problems in bzr\n")
+ bzr_log_file.write("you can delete or truncate this file, or include sections in\n")
+ bzr_log_file.write("bug reports to https://bugs.launchpad.net/bzr/+filebug\n\n")
+
+ return bzr_log_file
+
+ except EnvironmentError, e:
+ # If we are failing to open the log, then most likely logging has not
+ # been set up yet. So we just write to stderr rather than using
+ # 'warning()'. If we using warning(), users get the unhelpful 'no
+ # handlers registered for "bzr"' when something goes wrong on the
+ # server. (bug #503886)
+ sys.stderr.write("failed to open trace file: %s\n" % (e,))
+ # TODO: What should happen if we fail to open the trace file? Maybe the
+ # objects should be pointed at /dev/null or the equivalent? Currently
+ # returns None which will cause failures later.
+ return None
+
+
+def enable_default_logging():
+ """Configure default logging: messages to stderr and debug to .bzr.log
+
+ This should only be called once per process.
+
+ Non-command-line programs embedding bzrlib do not need to call this. They
+ can instead either pass a file to _push_log_file, or act directly on
+ logging.getLogger("bzr").
+
+ Output can be redirected away by calling _push_log_file.
+
+ :return: A memento from push_log_file for restoring the log state.
+ """
+ start_time = osutils.format_local_date(_bzr_log_start_time,
+ timezone='local')
+ bzr_log_file = _open_bzr_log()
+ if bzr_log_file is not None:
+ bzr_log_file.write(start_time.encode('utf-8') + '\n')
+ memento = push_log_file(bzr_log_file,
+ r'[%(process)5d] %(asctime)s.%(msecs)03d %(levelname)s: %(message)s',
+ r'%Y-%m-%d %H:%M:%S')
+ # after hooking output into bzr_log, we also need to attach a stderr
+ # handler, writing only at level info and with encoding
+ stderr_handler = EncodedStreamHandler(sys.stderr,
+ osutils.get_terminal_encoding(), 'replace', level=logging.INFO)
+ logging.getLogger('bzr').addHandler(stderr_handler)
+ return memento
+
+
+def push_log_file(to_file, log_format=None, date_format=None):
+ """Intercept log and trace messages and send them to a file.
+
+ :param to_file: A file-like object to which messages will be sent.
+
+ :returns: A memento that should be passed to _pop_log_file to restore the
+ previously active logging.
+ """
+ global _trace_file
+ # make a new handler
+ new_handler = EncodedStreamHandler(to_file, "utf-8", level=logging.DEBUG)
+ if log_format is None:
+ log_format = '%(levelname)8s %(message)s'
+ new_handler.setFormatter(logging.Formatter(log_format, date_format))
+ # save and remove any existing log handlers
+ bzr_logger = logging.getLogger('bzr')
+ old_handlers = bzr_logger.handlers[:]
+ del bzr_logger.handlers[:]
+ # set that as the default logger
+ bzr_logger.addHandler(new_handler)
+ bzr_logger.setLevel(logging.DEBUG)
+ # TODO: check if any changes are needed to the root logger
+ #
+ # TODO: also probably need to save and restore the level on bzr_logger.
+ # but maybe we can avoid setting the logger level altogether, and just set
+ # the level on the handler?
+ #
+ # save the old trace file
+ old_trace_file = _trace_file
+ # send traces to the new one
+ _trace_file = to_file
+ result = new_handler, _trace_file
+ return ('log_memento', old_handlers, new_handler, old_trace_file, to_file)
+
+
+def pop_log_file((magic, old_handlers, new_handler, old_trace_file, new_trace_file)):
+ """Undo changes to logging/tracing done by _push_log_file.
+
+ This flushes, but does not close the trace file (so that anything that was
+ in it is output.
+
+ Takes the memento returned from _push_log_file."""
+ global _trace_file
+ _trace_file = old_trace_file
+ bzr_logger = logging.getLogger('bzr')
+ bzr_logger.removeHandler(new_handler)
+ # must be closed, otherwise logging will try to close it at exit, and the
+ # file will likely already be closed underneath.
+ new_handler.close()
+ bzr_logger.handlers = old_handlers
+ if new_trace_file is not None:
+ new_trace_file.flush()
+
+
+def log_exception_quietly():
+ """Log the last exception to the trace file only.
+
+ Used for exceptions that occur internally and that may be
+ interesting to developers but not to users. For example,
+ errors loading plugins.
+ """
+ mutter(traceback.format_exc())
+
+
+def set_verbosity_level(level):
+ """Set the verbosity level.
+
+ :param level: -ve for quiet, 0 for normal, +ve for verbose
+ """
+ global _verbosity_level
+ _verbosity_level = level
+ _update_logging_level(level < 0)
+ ui.ui_factory.be_quiet(level < 0)
+
+
+def get_verbosity_level():
+ """Get the verbosity level.
+
+ See set_verbosity_level() for values.
+ """
+ return _verbosity_level
+
+
+def be_quiet(quiet=True):
+ if quiet:
+ set_verbosity_level(-1)
+ else:
+ set_verbosity_level(0)
+
+
+def _update_logging_level(quiet=True):
+ """Hide INFO messages if quiet."""
+ if quiet:
+ _bzr_logger.setLevel(logging.WARNING)
+ else:
+ _bzr_logger.setLevel(logging.INFO)
+
+
+def is_quiet():
+ """Is the verbosity level negative?"""
+ return _verbosity_level < 0
+
+
+def is_verbose():
+ """Is the verbosity level positive?"""
+ return _verbosity_level > 0
+
+
+def debug_memory(message='', short=True):
+ """Write out a memory dump."""
+ if sys.platform == 'win32':
+ from bzrlib import win32utils
+ win32utils.debug_memory_win32api(message=message, short=short)
+ else:
+ _debug_memory_proc(message=message, short=short)
+
+
+_short_fields = ('VmPeak', 'VmSize', 'VmRSS')
+
+def _debug_memory_proc(message='', short=True):
+ try:
+ status_file = file('/proc/%s/status' % os.getpid(), 'rb')
+ except IOError:
+ return
+ try:
+ status = status_file.read()
+ finally:
+ status_file.close()
+ if message:
+ note(message)
+ for line in status.splitlines():
+ if not short:
+ note(line)
+ else:
+ for field in _short_fields:
+ if line.startswith(field):
+ note(line)
+ break
+
+def _dump_memory_usage(err_file):
+ try:
+ try:
+ fd, name = tempfile.mkstemp(prefix="bzr_memdump", suffix=".json")
+ dump_file = os.fdopen(fd, 'w')
+ from meliae import scanner
+ scanner.dump_gc_objects(dump_file)
+ err_file.write("Memory dumped to %s\n" % name)
+ except ImportError:
+ err_file.write("Dumping memory requires meliae module.\n")
+ log_exception_quietly()
+ except:
+ err_file.write("Exception while dumping memory.\n")
+ log_exception_quietly()
+ finally:
+ if dump_file is not None:
+ dump_file.close()
+ elif fd is not None:
+ os.close(fd)
+
+
+def _qualified_exception_name(eclass, unqualified_bzrlib_errors=False):
+ """Give name of error class including module for non-builtin exceptions
+
+ If `unqualified_bzrlib_errors` is True, errors specific to bzrlib will
+ also omit the module prefix.
+ """
+ class_name = eclass.__name__
+ module_name = eclass.__module__
+ if module_name in ("exceptions", "__main__") or (
+ unqualified_bzrlib_errors and module_name == "bzrlib.errors"):
+ return class_name
+ return "%s.%s" % (module_name, class_name)
+
+
+def report_exception(exc_info, err_file):
+ """Report an exception to err_file (typically stderr) and to .bzr.log.
+
+ This will show either a full traceback or a short message as appropriate.
+
+ :return: The appropriate exit code for this error.
+ """
+ # Log the full traceback to ~/.bzr.log
+ log_exception_quietly()
+ if 'error' in debug.debug_flags:
+ print_exception(exc_info, err_file)
+ return errors.EXIT_ERROR
+ exc_type, exc_object, exc_tb = exc_info
+ if isinstance(exc_object, KeyboardInterrupt):
+ err_file.write("bzr: interrupted\n")
+ return errors.EXIT_ERROR
+ elif isinstance(exc_object, MemoryError):
+ err_file.write("bzr: out of memory\n")
+ if 'mem_dump' in debug.debug_flags:
+ _dump_memory_usage(err_file)
+ else:
+ err_file.write("Use -Dmem_dump to dump memory to a file.\n")
+ return errors.EXIT_ERROR
+ elif isinstance(exc_object, ImportError) \
+ and str(exc_object).startswith("No module named "):
+ report_user_error(exc_info, err_file,
+ 'You may need to install this Python library separately.')
+ return errors.EXIT_ERROR
+ elif not getattr(exc_object, 'internal_error', True):
+ report_user_error(exc_info, err_file)
+ return errors.EXIT_ERROR
+ elif osutils.is_environment_error(exc_object):
+ if getattr(exc_object, 'errno', None) == errno.EPIPE:
+ err_file.write("bzr: broken pipe\n")
+ return errors.EXIT_ERROR
+ # Might be nice to catch all of these and show them as something more
+ # specific, but there are too many cases at the moment.
+ report_user_error(exc_info, err_file)
+ return errors.EXIT_ERROR
+ else:
+ report_bug(exc_info, err_file)
+ return errors.EXIT_INTERNAL_ERROR
+
+
+def print_exception(exc_info, err_file):
+ exc_type, exc_object, exc_tb = exc_info
+ err_file.write("bzr: ERROR: %s.%s: %s\n" % (
+ exc_type.__module__, exc_type.__name__, exc_object))
+ err_file.write('\n')
+ traceback.print_exception(exc_type, exc_object, exc_tb, file=err_file)
+
+
+# TODO: Should these be specially encoding the output?
+def report_user_error(exc_info, err_file, advice=None):
+ """Report to err_file an error that's not an internal error.
+
+ These don't get a traceback unless -Derror was given.
+
+ :param exc_info: 3-tuple from sys.exc_info()
+ :param advice: Extra advice to the user to be printed following the
+ exception.
+ """
+ err_file.write("bzr: ERROR: %s\n" % (exc_info[1],))
+ if advice:
+ err_file.write("%s\n" % (advice,))
+
+
+def report_bug(exc_info, err_file):
+ """Report an exception that probably indicates a bug in bzr"""
+ from bzrlib.crash import report_bug
+ report_bug(exc_info, err_file)
+
+
+def _flush_stdout_stderr():
+ # called from the bzrlib library finalizer returned by bzrlib.initialize()
+ try:
+ sys.stdout.flush()
+ sys.stderr.flush()
+ except ValueError, e:
+ # On Windows, I get ValueError calling stdout.flush() on a closed
+ # handle
+ pass
+ except IOError, e:
+ import errno
+ if e.errno in [errno.EINVAL, errno.EPIPE]:
+ pass
+ else:
+ raise
+
+
+def _flush_trace():
+ # called from the bzrlib library finalizer returned by bzrlib.initialize()
+ global _trace_file
+ if _trace_file:
+ _trace_file.flush()
+
+
+class EncodedStreamHandler(logging.Handler):
+ """Robustly write logging events to a stream using the specified encoding
+
+ Messages are expected to be formatted to unicode, but UTF-8 byte strings
+ are also accepted. An error during formatting or a str message in another
+ encoding will be quitely noted as an error in the Bazaar log file.
+
+ The stream is not closed so sys.stdout or sys.stderr may be passed.
+ """
+
+ def __init__(self, stream, encoding=None, errors='strict', level=0):
+ logging.Handler.__init__(self, level)
+ self.stream = stream
+ if encoding is None:
+ encoding = getattr(stream, "encoding", "ascii")
+ self.encoding = encoding
+ self.errors = errors
+
+ def flush(self):
+ flush = getattr(self.stream, "flush", None)
+ if flush is not None:
+ flush()
+
+ def emit(self, record):
+ try:
+ line = self.format(record)
+ if not isinstance(line, unicode):
+ line = line.decode("utf-8")
+ self.stream.write(line.encode(self.encoding, self.errors) + "\n")
+ except Exception:
+ log_exception_quietly()
+ # Try saving the details that would have been logged in some form
+ msg = args = "<Unformattable>"
+ try:
+ msg = repr(record.msg).encode("ascii")
+ args = repr(record.args).encode("ascii")
+ except Exception:
+ pass
+ # Using mutter() bypasses the logging module and writes directly
+ # to the file so there's no danger of getting into a loop here.
+ mutter("Logging record unformattable: %s %% %s", msg, args)
+
+
+class Config(object):
+ """Configuration of message tracing in bzrlib.
+
+ This implements the context manager protocol and should manage any global
+ variables still used. The default config used is DefaultConfig, but
+ embedded uses of bzrlib may wish to use a custom manager.
+ """
+
+ def __enter__(self):
+ return self # This is bound to the 'as' clause in a with statement.
+
+ def __exit__(self, exc_type, exc_val, exc_tb):
+ return False # propogate exceptions.
+
+
+class DefaultConfig(Config):
+ """A default configuration for tracing of messages in bzrlib.
+
+ This implements the context manager protocol.
+ """
+
+ def __enter__(self):
+ self._original_filename = _bzr_log_filename
+ self._original_state = enable_default_logging()
+ return self # This is bound to the 'as' clause in a with statement.
+
+ def __exit__(self, exc_type, exc_val, exc_tb):
+ pop_log_file(self._original_state)
+ global _bzr_log_filename
+ _bzr_log_filename = self._original_filename
+ return False # propogate exceptions.
diff --git a/bzrlib/transactions.py b/bzrlib/transactions.py
new file mode 100644
index 0000000..0455061
--- /dev/null
+++ b/bzrlib/transactions.py
@@ -0,0 +1,199 @@
+# Copyright (C) 2005 Canonical Ltd
+# Authors: Robert Collins <robert.collins@canonical.com>
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""This module provides a transactional facility.
+
+Transactions provide hooks to allow data objects (i.e. inventory weaves or
+the revision-history file) to be placed in a registry and retrieved later
+during the same transaction. Transactions in bzr are not atomic - they
+depend on data ordering of writes, so we do not have commit or rollback
+facilities at the transaction level.
+
+Read only transactions raise an assert when objects are listed as dirty
+against them - preventing unintended writes.
+
+Write transactions preserve dirty objects in the cache, though due to the
+write ordering approach we use for consistency 'dirty' is a misleading term.
+A dirty object is one we have modified.
+
+Both read and write transactions *may* flush unchanged objects out of
+memory, unless they are marked as 'precious' which indicates that
+repeated reads cannot be obtained if the object is ejected, or that
+the object is an expensive one for obtaining.
+"""
+
+from __future__ import absolute_import
+
+import sys
+
+import bzrlib.errors as errors
+from bzrlib.identitymap import IdentityMap, NullIdentityMap
+from bzrlib.trace import mutter
+
+
+class ReadOnlyTransaction(object):
+ """A read only unit of work for data objects."""
+
+ def finish(self):
+ """Clean up this transaction."""
+
+ def __init__(self):
+ super(ReadOnlyTransaction, self).__init__()
+ self.map = IdentityMap()
+ self._clean_objects = set()
+ self._clean_queue = []
+ self._limit = -1
+ self._precious_objects = set()
+
+ def is_clean(self, an_object):
+ """Return True if an_object is clean."""
+ return (an_object in self._clean_objects)
+
+ def register_clean(self, an_object, precious=False):
+ """Register an_object as being clean.
+
+ If the precious hint is True, the object will not
+ be ejected from the object identity map ever.
+ """
+ self._clean_objects.add(an_object)
+ self._clean_queue.append(an_object)
+ if precious:
+ self._precious_objects.add(an_object)
+ self._trim()
+
+ def register_dirty(self, an_object):
+ """Register an_object as being dirty."""
+ raise errors.ReadOnlyObjectDirtiedError(an_object)
+
+ def set_cache_size(self, size):
+ """Set a new cache size."""
+ if size < -1:
+ raise ValueError(size)
+ self._limit = size
+ self._trim()
+
+ def _trim(self):
+ """Trim the cache back if needed."""
+ if self._limit < 0 or self._limit - len(self._clean_objects) > 0:
+ return
+ needed = len(self._clean_objects) - self._limit
+ offset = 0
+ while needed and offset < len(self._clean_objects):
+ # references we know of:
+ # temp passed to getrefcount in our frame
+ # temp in getrefcount's frame
+ # the map forward
+ # the map backwards
+ # _clean_objects
+ # _clean_queue
+ # 1 missing ?
+ if (sys.getrefcount(self._clean_queue[offset]) <= 7 and
+ not self._clean_queue[offset] in self._precious_objects):
+ removed = self._clean_queue[offset]
+ self._clean_objects.remove(removed)
+ del self._clean_queue[offset]
+ self.map.remove_object(removed)
+ mutter('removed object %r', removed)
+ needed -= 1
+ else:
+ offset += 1
+
+ def writeable(self):
+ """Read only transactions do not allow writes."""
+
+
+class WriteTransaction(ReadOnlyTransaction):
+ """A write transaction
+
+ - caches domain objects
+ - clean objects can be removed from the cache
+ - dirty objects are retained.
+ """
+
+ def finish(self):
+ """Clean up this transaction."""
+ for thing in self._dirty_objects:
+ callback = getattr(thing, 'transaction_finished', None)
+ if callback is not None:
+ callback()
+
+ def __init__(self):
+ super(WriteTransaction, self).__init__()
+ self._dirty_objects = set()
+
+ def is_dirty(self, an_object):
+ """Return True if an_object is dirty."""
+ return (an_object in self._dirty_objects)
+
+ def register_dirty(self, an_object):
+ """Register an_object as being dirty.
+
+ Dirty objects are not ejected from the identity map
+ until the transaction finishes and get informed
+ when the transaction finishes.
+ """
+ self._dirty_objects.add(an_object)
+ if self.is_clean(an_object):
+ self._clean_objects.remove(an_object)
+ del self._clean_queue[self._clean_queue.index(an_object)]
+ self._trim()
+
+ def writeable(self):
+ """Write transactions allow writes."""
+ return True
+
+
+class PassThroughTransaction(object):
+ """A pass through transaction
+
+ - nothing is cached.
+ - nothing ever gets into the identity map.
+ """
+
+ def finish(self):
+ """Clean up this transaction."""
+ for thing in self._dirty_objects:
+ callback = getattr(thing, 'transaction_finished', None)
+ if callback is not None:
+ callback()
+
+ def __init__(self):
+ super(PassThroughTransaction, self).__init__()
+ self.map = NullIdentityMap()
+ self._dirty_objects = set()
+
+ def register_clean(self, an_object, precious=False):
+ """Register an_object as being clean.
+
+ Note that precious is only a hint, and PassThroughTransaction
+ ignores it.
+ """
+
+ def register_dirty(self, an_object):
+ """Register an_object as being dirty.
+
+ Dirty objects get informed
+ when the transaction finishes.
+ """
+ self._dirty_objects.add(an_object)
+
+ def set_cache_size(self, ignored):
+ """Do nothing, we are passing through."""
+
+ def writeable(self):
+ """Pass through transactions allow writes."""
+ return True
diff --git a/bzrlib/transform.py b/bzrlib/transform.py
new file mode 100644
index 0000000..a1057d5
--- /dev/null
+++ b/bzrlib/transform.py
@@ -0,0 +1,3211 @@
+# Copyright (C) 2006-2011 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+from __future__ import absolute_import
+
+import os
+import errno
+from stat import S_ISREG, S_IEXEC
+import time
+
+from bzrlib import (
+ config as _mod_config,
+ errors,
+ lazy_import,
+ registry,
+ trace,
+ tree,
+ )
+lazy_import.lazy_import(globals(), """
+from bzrlib import (
+ annotate,
+ bencode,
+ controldir,
+ commit,
+ conflicts,
+ delta,
+ inventory,
+ multiparent,
+ osutils,
+ revision as _mod_revision,
+ ui,
+ urlutils,
+ )
+from bzrlib.i18n import gettext
+""")
+from bzrlib.errors import (DuplicateKey, MalformedTransform,
+ ReusingTransform, CantMoveRoot,
+ ImmortalLimbo, NoFinalPath,
+ UnableCreateSymlink)
+from bzrlib.filters import filtered_output_bytes, ContentFilterContext
+from bzrlib.mutabletree import MutableTree
+from bzrlib.osutils import (
+ delete_any,
+ file_kind,
+ has_symlinks,
+ pathjoin,
+ sha_file,
+ splitpath,
+ )
+from bzrlib.progress import ProgressPhase
+from bzrlib.symbol_versioning import (
+ deprecated_function,
+ deprecated_in,
+ deprecated_method,
+ )
+
+
+ROOT_PARENT = "root-parent"
+
+def unique_add(map, key, value):
+ if key in map:
+ raise DuplicateKey(key=key)
+ map[key] = value
+
+
+
+class _TransformResults(object):
+ def __init__(self, modified_paths, rename_count):
+ object.__init__(self)
+ self.modified_paths = modified_paths
+ self.rename_count = rename_count
+
+
+class TreeTransformBase(object):
+ """The base class for TreeTransform and its kin."""
+
+ def __init__(self, tree, pb=None,
+ case_sensitive=True):
+ """Constructor.
+
+ :param tree: The tree that will be transformed, but not necessarily
+ the output tree.
+ :param pb: ignored
+ :param case_sensitive: If True, the target of the transform is
+ case sensitive, not just case preserving.
+ """
+ object.__init__(self)
+ self._tree = tree
+ self._id_number = 0
+ # mapping of trans_id -> new basename
+ self._new_name = {}
+ # mapping of trans_id -> new parent trans_id
+ self._new_parent = {}
+ # mapping of trans_id with new contents -> new file_kind
+ self._new_contents = {}
+ # mapping of trans_id => (sha1 of content, stat_value)
+ self._observed_sha1s = {}
+ # Set of trans_ids whose contents will be removed
+ self._removed_contents = set()
+ # Mapping of trans_id -> new execute-bit value
+ self._new_executability = {}
+ # Mapping of trans_id -> new tree-reference value
+ self._new_reference_revision = {}
+ # Mapping of trans_id -> new file_id
+ self._new_id = {}
+ # Mapping of old file-id -> trans_id
+ self._non_present_ids = {}
+ # Mapping of new file_id -> trans_id
+ self._r_new_id = {}
+ # Set of trans_ids that will be removed
+ self._removed_id = set()
+ # Mapping of path in old tree -> trans_id
+ self._tree_path_ids = {}
+ # Mapping trans_id -> path in old tree
+ self._tree_id_paths = {}
+ # The trans_id that will be used as the tree root
+ root_id = tree.get_root_id()
+ if root_id is not None:
+ self._new_root = self.trans_id_tree_file_id(root_id)
+ else:
+ self._new_root = None
+ # Indicator of whether the transform has been applied
+ self._done = False
+ # A progress bar
+ self._pb = pb
+ # Whether the target is case sensitive
+ self._case_sensitive_target = case_sensitive
+ # A counter of how many files have been renamed
+ self.rename_count = 0
+
+ def __enter__(self):
+ """Support Context Manager API."""
+ return self
+
+ def __exit__(self, exc_type, exc_val, exc_tb):
+ """Support Context Manager API."""
+ self.finalize()
+
+ def finalize(self):
+ """Release the working tree lock, if held.
+
+ This is required if apply has not been invoked, but can be invoked
+ even after apply.
+ """
+ if self._tree is None:
+ return
+ for hook in MutableTree.hooks['post_transform']:
+ hook(self._tree, self)
+ self._tree.unlock()
+ self._tree = None
+
+ def __get_root(self):
+ return self._new_root
+
+ root = property(__get_root)
+
+ def _assign_id(self):
+ """Produce a new tranform id"""
+ new_id = "new-%s" % self._id_number
+ self._id_number +=1
+ return new_id
+
+ def create_path(self, name, parent):
+ """Assign a transaction id to a new path"""
+ trans_id = self._assign_id()
+ unique_add(self._new_name, trans_id, name)
+ unique_add(self._new_parent, trans_id, parent)
+ return trans_id
+
+ def adjust_path(self, name, parent, trans_id):
+ """Change the path that is assigned to a transaction id."""
+ if parent is None:
+ raise ValueError("Parent trans-id may not be None")
+ if trans_id == self._new_root:
+ raise CantMoveRoot
+ self._new_name[trans_id] = name
+ self._new_parent[trans_id] = parent
+
+ def adjust_root_path(self, name, parent):
+ """Emulate moving the root by moving all children, instead.
+
+ We do this by undoing the association of root's transaction id with the
+ current tree. This allows us to create a new directory with that
+ transaction id. We unversion the root directory and version the
+ physically new directory, and hope someone versions the tree root
+ later.
+ """
+ old_root = self._new_root
+ old_root_file_id = self.final_file_id(old_root)
+ # force moving all children of root
+ for child_id in self.iter_tree_children(old_root):
+ if child_id != parent:
+ self.adjust_path(self.final_name(child_id),
+ self.final_parent(child_id), child_id)
+ file_id = self.final_file_id(child_id)
+ if file_id is not None:
+ self.unversion_file(child_id)
+ self.version_file(file_id, child_id)
+
+ # the physical root needs a new transaction id
+ self._tree_path_ids.pop("")
+ self._tree_id_paths.pop(old_root)
+ self._new_root = self.trans_id_tree_file_id(self._tree.get_root_id())
+ if parent == old_root:
+ parent = self._new_root
+ self.adjust_path(name, parent, old_root)
+ self.create_directory(old_root)
+ self.version_file(old_root_file_id, old_root)
+ self.unversion_file(self._new_root)
+
+ def fixup_new_roots(self):
+ """Reinterpret requests to change the root directory
+
+ Instead of creating a root directory, or moving an existing directory,
+ all the attributes and children of the new root are applied to the
+ existing root directory.
+
+ This means that the old root trans-id becomes obsolete, so it is
+ recommended only to invoke this after the root trans-id has become
+ irrelevant.
+
+ """
+ new_roots = [k for k, v in self._new_parent.iteritems() if v ==
+ ROOT_PARENT]
+ if len(new_roots) < 1:
+ return
+ if len(new_roots) != 1:
+ raise ValueError('A tree cannot have two roots!')
+ if self._new_root is None:
+ self._new_root = new_roots[0]
+ return
+ old_new_root = new_roots[0]
+ # unversion the new root's directory.
+ if self.final_kind(self._new_root) is None:
+ file_id = self.final_file_id(old_new_root)
+ else:
+ file_id = self.final_file_id(self._new_root)
+ if old_new_root in self._new_id:
+ self.cancel_versioning(old_new_root)
+ else:
+ self.unversion_file(old_new_root)
+ # if, at this stage, root still has an old file_id, zap it so we can
+ # stick a new one in.
+ if (self.tree_file_id(self._new_root) is not None and
+ self._new_root not in self._removed_id):
+ self.unversion_file(self._new_root)
+ if file_id is not None:
+ self.version_file(file_id, self._new_root)
+
+ # Now move children of new root into old root directory.
+ # Ensure all children are registered with the transaction, but don't
+ # use directly-- some tree children have new parents
+ list(self.iter_tree_children(old_new_root))
+ # Move all children of new root into old root directory.
+ for child in self.by_parent().get(old_new_root, []):
+ self.adjust_path(self.final_name(child), self._new_root, child)
+
+ # Ensure old_new_root has no directory.
+ if old_new_root in self._new_contents:
+ self.cancel_creation(old_new_root)
+ else:
+ self.delete_contents(old_new_root)
+
+ # prevent deletion of root directory.
+ if self._new_root in self._removed_contents:
+ self.cancel_deletion(self._new_root)
+
+ # destroy path info for old_new_root.
+ del self._new_parent[old_new_root]
+ del self._new_name[old_new_root]
+
+ def trans_id_tree_file_id(self, inventory_id):
+ """Determine the transaction id of a working tree file.
+
+ This reflects only files that already exist, not ones that will be
+ added by transactions.
+ """
+ if inventory_id is None:
+ raise ValueError('None is not a valid file id')
+ path = self._tree.id2path(inventory_id)
+ return self.trans_id_tree_path(path)
+
+ def trans_id_file_id(self, file_id):
+ """Determine or set the transaction id associated with a file ID.
+ A new id is only created for file_ids that were never present. If
+ a transaction has been unversioned, it is deliberately still returned.
+ (this will likely lead to an unversioned parent conflict.)
+ """
+ if file_id is None:
+ raise ValueError('None is not a valid file id')
+ if file_id in self._r_new_id and self._r_new_id[file_id] is not None:
+ return self._r_new_id[file_id]
+ else:
+ try:
+ self._tree.iter_entries_by_dir([file_id]).next()
+ except StopIteration:
+ if file_id in self._non_present_ids:
+ return self._non_present_ids[file_id]
+ else:
+ trans_id = self._assign_id()
+ self._non_present_ids[file_id] = trans_id
+ return trans_id
+ else:
+ return self.trans_id_tree_file_id(file_id)
+
+ def trans_id_tree_path(self, path):
+ """Determine (and maybe set) the transaction ID for a tree path."""
+ path = self.canonical_path(path)
+ if path not in self._tree_path_ids:
+ self._tree_path_ids[path] = self._assign_id()
+ self._tree_id_paths[self._tree_path_ids[path]] = path
+ return self._tree_path_ids[path]
+
+ def get_tree_parent(self, trans_id):
+ """Determine id of the parent in the tree."""
+ path = self._tree_id_paths[trans_id]
+ if path == "":
+ return ROOT_PARENT
+ return self.trans_id_tree_path(os.path.dirname(path))
+
+ def delete_contents(self, trans_id):
+ """Schedule the contents of a path entry for deletion"""
+ kind = self.tree_kind(trans_id)
+ if kind is not None:
+ self._removed_contents.add(trans_id)
+
+ def cancel_deletion(self, trans_id):
+ """Cancel a scheduled deletion"""
+ self._removed_contents.remove(trans_id)
+
+ def unversion_file(self, trans_id):
+ """Schedule a path entry to become unversioned"""
+ self._removed_id.add(trans_id)
+
+ def delete_versioned(self, trans_id):
+ """Delete and unversion a versioned file"""
+ self.delete_contents(trans_id)
+ self.unversion_file(trans_id)
+
+ def set_executability(self, executability, trans_id):
+ """Schedule setting of the 'execute' bit
+ To unschedule, set to None
+ """
+ if executability is None:
+ del self._new_executability[trans_id]
+ else:
+ unique_add(self._new_executability, trans_id, executability)
+
+ def set_tree_reference(self, revision_id, trans_id):
+ """Set the reference associated with a directory"""
+ unique_add(self._new_reference_revision, trans_id, revision_id)
+
+ def version_file(self, file_id, trans_id):
+ """Schedule a file to become versioned."""
+ if file_id is None:
+ raise ValueError()
+ unique_add(self._new_id, trans_id, file_id)
+ unique_add(self._r_new_id, file_id, trans_id)
+
+ def cancel_versioning(self, trans_id):
+ """Undo a previous versioning of a file"""
+ file_id = self._new_id[trans_id]
+ del self._new_id[trans_id]
+ del self._r_new_id[file_id]
+
+ def new_paths(self, filesystem_only=False):
+ """Determine the paths of all new and changed files.
+
+ :param filesystem_only: if True, only calculate values for files
+ that require renames or execute bit changes.
+ """
+ new_ids = set()
+ if filesystem_only:
+ stale_ids = self._needs_rename.difference(self._new_name)
+ stale_ids.difference_update(self._new_parent)
+ stale_ids.difference_update(self._new_contents)
+ stale_ids.difference_update(self._new_id)
+ needs_rename = self._needs_rename.difference(stale_ids)
+ id_sets = (needs_rename, self._new_executability)
+ else:
+ id_sets = (self._new_name, self._new_parent, self._new_contents,
+ self._new_id, self._new_executability)
+ for id_set in id_sets:
+ new_ids.update(id_set)
+ return sorted(FinalPaths(self).get_paths(new_ids))
+
+ def _inventory_altered(self):
+ """Determine which trans_ids need new Inventory entries.
+
+ An new entry is needed when anything that would be reflected by an
+ inventory entry changes, including file name, file_id, parent file_id,
+ file kind, and the execute bit.
+
+ Some care is taken to return entries with real changes, not cases
+ where the value is deleted and then restored to its original value,
+ but some actually unchanged values may be returned.
+
+ :returns: A list of (path, trans_id) for all items requiring an
+ inventory change. Ordered by path.
+ """
+ changed_ids = set()
+ # Find entries whose file_ids are new (or changed).
+ new_file_id = set(t for t in self._new_id
+ if self._new_id[t] != self.tree_file_id(t))
+ for id_set in [self._new_name, self._new_parent, new_file_id,
+ self._new_executability]:
+ changed_ids.update(id_set)
+ # removing implies a kind change
+ changed_kind = set(self._removed_contents)
+ # so does adding
+ changed_kind.intersection_update(self._new_contents)
+ # Ignore entries that are already known to have changed.
+ changed_kind.difference_update(changed_ids)
+ # to keep only the truly changed ones
+ changed_kind = (t for t in changed_kind
+ if self.tree_kind(t) != self.final_kind(t))
+ # all kind changes will alter the inventory
+ changed_ids.update(changed_kind)
+ # To find entries with changed parent_ids, find parents which existed,
+ # but changed file_id.
+ changed_file_id = set(t for t in new_file_id if t in self._removed_id)
+ # Now add all their children to the set.
+ for parent_trans_id in new_file_id:
+ changed_ids.update(self.iter_tree_children(parent_trans_id))
+ return sorted(FinalPaths(self).get_paths(changed_ids))
+
+ def final_kind(self, trans_id):
+ """Determine the final file kind, after any changes applied.
+
+ :return: None if the file does not exist/has no contents. (It is
+ conceivable that a path would be created without the corresponding
+ contents insertion command)
+ """
+ if trans_id in self._new_contents:
+ return self._new_contents[trans_id]
+ elif trans_id in self._removed_contents:
+ return None
+ else:
+ return self.tree_kind(trans_id)
+
+ def tree_file_id(self, trans_id):
+ """Determine the file id associated with the trans_id in the tree"""
+ try:
+ path = self._tree_id_paths[trans_id]
+ except KeyError:
+ # the file is a new, unversioned file, or invalid trans_id
+ return None
+ # the file is old; the old id is still valid
+ if self._new_root == trans_id:
+ return self._tree.get_root_id()
+ return self._tree.path2id(path)
+
+ def final_file_id(self, trans_id):
+ """Determine the file id after any changes are applied, or None.
+
+ None indicates that the file will not be versioned after changes are
+ applied.
+ """
+ try:
+ return self._new_id[trans_id]
+ except KeyError:
+ if trans_id in self._removed_id:
+ return None
+ return self.tree_file_id(trans_id)
+
+ def inactive_file_id(self, trans_id):
+ """Return the inactive file_id associated with a transaction id.
+ That is, the one in the tree or in non_present_ids.
+ The file_id may actually be active, too.
+ """
+ file_id = self.tree_file_id(trans_id)
+ if file_id is not None:
+ return file_id
+ for key, value in self._non_present_ids.iteritems():
+ if value == trans_id:
+ return key
+
+ def final_parent(self, trans_id):
+ """Determine the parent file_id, after any changes are applied.
+
+ ROOT_PARENT is returned for the tree root.
+ """
+ try:
+ return self._new_parent[trans_id]
+ except KeyError:
+ return self.get_tree_parent(trans_id)
+
+ def final_name(self, trans_id):
+ """Determine the final filename, after all changes are applied."""
+ try:
+ return self._new_name[trans_id]
+ except KeyError:
+ try:
+ return os.path.basename(self._tree_id_paths[trans_id])
+ except KeyError:
+ raise NoFinalPath(trans_id, self)
+
+ def by_parent(self):
+ """Return a map of parent: children for known parents.
+
+ Only new paths and parents of tree files with assigned ids are used.
+ """
+ by_parent = {}
+ items = list(self._new_parent.iteritems())
+ items.extend((t, self.final_parent(t)) for t in
+ self._tree_id_paths.keys())
+ for trans_id, parent_id in items:
+ if parent_id not in by_parent:
+ by_parent[parent_id] = set()
+ by_parent[parent_id].add(trans_id)
+ return by_parent
+
+ def path_changed(self, trans_id):
+ """Return True if a trans_id's path has changed."""
+ return (trans_id in self._new_name) or (trans_id in self._new_parent)
+
+ def new_contents(self, trans_id):
+ return (trans_id in self._new_contents)
+
+ def find_conflicts(self):
+ """Find any violations of inventory or filesystem invariants"""
+ if self._done is True:
+ raise ReusingTransform()
+ conflicts = []
+ # ensure all children of all existent parents are known
+ # all children of non-existent parents are known, by definition.
+ self._add_tree_children()
+ by_parent = self.by_parent()
+ conflicts.extend(self._unversioned_parents(by_parent))
+ conflicts.extend(self._parent_loops())
+ conflicts.extend(self._duplicate_entries(by_parent))
+ conflicts.extend(self._duplicate_ids())
+ conflicts.extend(self._parent_type_conflicts(by_parent))
+ conflicts.extend(self._improper_versioning())
+ conflicts.extend(self._executability_conflicts())
+ conflicts.extend(self._overwrite_conflicts())
+ return conflicts
+
+ def _check_malformed(self):
+ conflicts = self.find_conflicts()
+ if len(conflicts) != 0:
+ raise MalformedTransform(conflicts=conflicts)
+
+ def _add_tree_children(self):
+ """Add all the children of all active parents to the known paths.
+
+ Active parents are those which gain children, and those which are
+ removed. This is a necessary first step in detecting conflicts.
+ """
+ parents = self.by_parent().keys()
+ parents.extend([t for t in self._removed_contents if
+ self.tree_kind(t) == 'directory'])
+ for trans_id in self._removed_id:
+ file_id = self.tree_file_id(trans_id)
+ if file_id is not None:
+ if self._tree.stored_kind(file_id) == 'directory':
+ parents.append(trans_id)
+ elif self.tree_kind(trans_id) == 'directory':
+ parents.append(trans_id)
+
+ for parent_id in parents:
+ # ensure that all children are registered with the transaction
+ list(self.iter_tree_children(parent_id))
+
+ def _has_named_child(self, name, parent_id, known_children):
+ """Does a parent already have a name child.
+
+ :param name: The searched for name.
+
+ :param parent_id: The parent for which the check is made.
+
+ :param known_children: The already known children. This should have
+ been recently obtained from `self.by_parent.get(parent_id)`
+ (or will be if None is passed).
+ """
+ if known_children is None:
+ known_children = self.by_parent().get(parent_id, [])
+ for child in known_children:
+ if self.final_name(child) == name:
+ return True
+ parent_path = self._tree_id_paths.get(parent_id, None)
+ if parent_path is None:
+ # No parent... no children
+ return False
+ child_path = joinpath(parent_path, name)
+ child_id = self._tree_path_ids.get(child_path, None)
+ if child_id is None:
+ # Not known by the tree transform yet, check the filesystem
+ return osutils.lexists(self._tree.abspath(child_path))
+ else:
+ raise AssertionError('child_id is missing: %s, %s, %s'
+ % (name, parent_id, child_id))
+
+ def _available_backup_name(self, name, target_id):
+ """Find an available backup name.
+
+ :param name: The basename of the file.
+
+ :param target_id: The directory trans_id where the backup should
+ be placed.
+ """
+ known_children = self.by_parent().get(target_id, [])
+ return osutils.available_backup_name(
+ name,
+ lambda base: self._has_named_child(
+ base, target_id, known_children))
+
+ def _parent_loops(self):
+ """No entry should be its own ancestor"""
+ conflicts = []
+ for trans_id in self._new_parent:
+ seen = set()
+ parent_id = trans_id
+ while parent_id != ROOT_PARENT:
+ seen.add(parent_id)
+ try:
+ parent_id = self.final_parent(parent_id)
+ except KeyError:
+ break
+ if parent_id == trans_id:
+ conflicts.append(('parent loop', trans_id))
+ if parent_id in seen:
+ break
+ return conflicts
+
+ def _unversioned_parents(self, by_parent):
+ """If parent directories are versioned, children must be versioned."""
+ conflicts = []
+ for parent_id, children in by_parent.iteritems():
+ if parent_id == ROOT_PARENT:
+ continue
+ if self.final_file_id(parent_id) is not None:
+ continue
+ for child_id in children:
+ if self.final_file_id(child_id) is not None:
+ conflicts.append(('unversioned parent', parent_id))
+ break;
+ return conflicts
+
+ def _improper_versioning(self):
+ """Cannot version a file with no contents, or a bad type.
+
+ However, existing entries with no contents are okay.
+ """
+ conflicts = []
+ for trans_id in self._new_id.iterkeys():
+ kind = self.final_kind(trans_id)
+ if kind is None:
+ conflicts.append(('versioning no contents', trans_id))
+ continue
+ if not inventory.InventoryEntry.versionable_kind(kind):
+ conflicts.append(('versioning bad kind', trans_id, kind))
+ return conflicts
+
+ def _executability_conflicts(self):
+ """Check for bad executability changes.
+
+ Only versioned files may have their executability set, because
+ 1. only versioned entries can have executability under windows
+ 2. only files can be executable. (The execute bit on a directory
+ does not indicate searchability)
+ """
+ conflicts = []
+ for trans_id in self._new_executability:
+ if self.final_file_id(trans_id) is None:
+ conflicts.append(('unversioned executability', trans_id))
+ else:
+ if self.final_kind(trans_id) != "file":
+ conflicts.append(('non-file executability', trans_id))
+ return conflicts
+
+ def _overwrite_conflicts(self):
+ """Check for overwrites (not permitted on Win32)"""
+ conflicts = []
+ for trans_id in self._new_contents:
+ if self.tree_kind(trans_id) is None:
+ continue
+ if trans_id not in self._removed_contents:
+ conflicts.append(('overwrite', trans_id,
+ self.final_name(trans_id)))
+ return conflicts
+
+ def _duplicate_entries(self, by_parent):
+ """No directory may have two entries with the same name."""
+ conflicts = []
+ if (self._new_name, self._new_parent) == ({}, {}):
+ return conflicts
+ for children in by_parent.itervalues():
+ name_ids = []
+ for child_tid in children:
+ name = self.final_name(child_tid)
+ if name is not None:
+ # Keep children only if they still exist in the end
+ if not self._case_sensitive_target:
+ name = name.lower()
+ name_ids.append((name, child_tid))
+ name_ids.sort()
+ last_name = None
+ last_trans_id = None
+ for name, trans_id in name_ids:
+ kind = self.final_kind(trans_id)
+ file_id = self.final_file_id(trans_id)
+ if kind is None and file_id is None:
+ continue
+ if name == last_name:
+ conflicts.append(('duplicate', last_trans_id, trans_id,
+ name))
+ last_name = name
+ last_trans_id = trans_id
+ return conflicts
+
+ def _duplicate_ids(self):
+ """Each inventory id may only be used once"""
+ conflicts = []
+ removed_tree_ids = set((self.tree_file_id(trans_id) for trans_id in
+ self._removed_id))
+ all_ids = self._tree.all_file_ids()
+ active_tree_ids = all_ids.difference(removed_tree_ids)
+ for trans_id, file_id in self._new_id.iteritems():
+ if file_id in active_tree_ids:
+ old_trans_id = self.trans_id_tree_file_id(file_id)
+ conflicts.append(('duplicate id', old_trans_id, trans_id))
+ return conflicts
+
+ def _parent_type_conflicts(self, by_parent):
+ """Children must have a directory parent"""
+ conflicts = []
+ for parent_id, children in by_parent.iteritems():
+ if parent_id == ROOT_PARENT:
+ continue
+ no_children = True
+ for child_id in children:
+ if self.final_kind(child_id) is not None:
+ no_children = False
+ break
+ if no_children:
+ continue
+ # There is at least a child, so we need an existing directory to
+ # contain it.
+ kind = self.final_kind(parent_id)
+ if kind is None:
+ # The directory will be deleted
+ conflicts.append(('missing parent', parent_id))
+ elif kind != "directory":
+ # Meh, we need a *directory* to put something in it
+ conflicts.append(('non-directory parent', parent_id))
+ return conflicts
+
+ def _set_executability(self, path, trans_id):
+ """Set the executability of versioned files """
+ if self._tree._supports_executable():
+ new_executability = self._new_executability[trans_id]
+ abspath = self._tree.abspath(path)
+ current_mode = os.stat(abspath).st_mode
+ if new_executability:
+ umask = os.umask(0)
+ os.umask(umask)
+ to_mode = current_mode | (0100 & ~umask)
+ # Enable x-bit for others only if they can read it.
+ if current_mode & 0004:
+ to_mode |= 0001 & ~umask
+ if current_mode & 0040:
+ to_mode |= 0010 & ~umask
+ else:
+ to_mode = current_mode & ~0111
+ osutils.chmod_if_possible(abspath, to_mode)
+
+ def _new_entry(self, name, parent_id, file_id):
+ """Helper function to create a new filesystem entry."""
+ trans_id = self.create_path(name, parent_id)
+ if file_id is not None:
+ self.version_file(file_id, trans_id)
+ return trans_id
+
+ def new_file(self, name, parent_id, contents, file_id=None,
+ executable=None, sha1=None):
+ """Convenience method to create files.
+
+ name is the name of the file to create.
+ parent_id is the transaction id of the parent directory of the file.
+ contents is an iterator of bytestrings, which will be used to produce
+ the file.
+ :param file_id: The inventory ID of the file, if it is to be versioned.
+ :param executable: Only valid when a file_id has been supplied.
+ """
+ trans_id = self._new_entry(name, parent_id, file_id)
+ # TODO: rather than scheduling a set_executable call,
+ # have create_file create the file with the right mode.
+ self.create_file(contents, trans_id, sha1=sha1)
+ if executable is not None:
+ self.set_executability(executable, trans_id)
+ return trans_id
+
+ def new_directory(self, name, parent_id, file_id=None):
+ """Convenience method to create directories.
+
+ name is the name of the directory to create.
+ parent_id is the transaction id of the parent directory of the
+ directory.
+ file_id is the inventory ID of the directory, if it is to be versioned.
+ """
+ trans_id = self._new_entry(name, parent_id, file_id)
+ self.create_directory(trans_id)
+ return trans_id
+
+ def new_symlink(self, name, parent_id, target, file_id=None):
+ """Convenience method to create symbolic link.
+
+ name is the name of the symlink to create.
+ parent_id is the transaction id of the parent directory of the symlink.
+ target is a bytestring of the target of the symlink.
+ file_id is the inventory ID of the file, if it is to be versioned.
+ """
+ trans_id = self._new_entry(name, parent_id, file_id)
+ self.create_symlink(target, trans_id)
+ return trans_id
+
+ def new_orphan(self, trans_id, parent_id):
+ """Schedule an item to be orphaned.
+
+ When a directory is about to be removed, its children, if they are not
+ versioned are moved out of the way: they don't have a parent anymore.
+
+ :param trans_id: The trans_id of the existing item.
+ :param parent_id: The parent trans_id of the item.
+ """
+ raise NotImplementedError(self.new_orphan)
+
+ def _get_potential_orphans(self, dir_id):
+ """Find the potential orphans in a directory.
+
+ A directory can't be safely deleted if there are versioned files in it.
+ If all the contained files are unversioned then they can be orphaned.
+
+ The 'None' return value means that the directory contains at least one
+ versioned file and should not be deleted.
+
+ :param dir_id: The directory trans id.
+
+ :return: A list of the orphan trans ids or None if at least one
+ versioned file is present.
+ """
+ orphans = []
+ # Find the potential orphans, stop if one item should be kept
+ for child_tid in self.by_parent()[dir_id]:
+ if child_tid in self._removed_contents:
+ # The child is removed as part of the transform. Since it was
+ # versioned before, it's not an orphan
+ continue
+ elif self.final_file_id(child_tid) is None:
+ # The child is not versioned
+ orphans.append(child_tid)
+ else:
+ # We have a versioned file here, searching for orphans is
+ # meaningless.
+ orphans = None
+ break
+ return orphans
+
+ def _affected_ids(self):
+ """Return the set of transform ids affected by the transform"""
+ trans_ids = set(self._removed_id)
+ trans_ids.update(self._new_id.keys())
+ trans_ids.update(self._removed_contents)
+ trans_ids.update(self._new_contents.keys())
+ trans_ids.update(self._new_executability.keys())
+ trans_ids.update(self._new_name.keys())
+ trans_ids.update(self._new_parent.keys())
+ return trans_ids
+
+ def _get_file_id_maps(self):
+ """Return mapping of file_ids to trans_ids in the to and from states"""
+ trans_ids = self._affected_ids()
+ from_trans_ids = {}
+ to_trans_ids = {}
+ # Build up two dicts: trans_ids associated with file ids in the
+ # FROM state, vs the TO state.
+ for trans_id in trans_ids:
+ from_file_id = self.tree_file_id(trans_id)
+ if from_file_id is not None:
+ from_trans_ids[from_file_id] = trans_id
+ to_file_id = self.final_file_id(trans_id)
+ if to_file_id is not None:
+ to_trans_ids[to_file_id] = trans_id
+ return from_trans_ids, to_trans_ids
+
+ def _from_file_data(self, from_trans_id, from_versioned, file_id):
+ """Get data about a file in the from (tree) state
+
+ Return a (name, parent, kind, executable) tuple
+ """
+ from_path = self._tree_id_paths.get(from_trans_id)
+ if from_versioned:
+ # get data from working tree if versioned
+ from_entry = self._tree.iter_entries_by_dir([file_id]).next()[1]
+ from_name = from_entry.name
+ from_parent = from_entry.parent_id
+ else:
+ from_entry = None
+ if from_path is None:
+ # File does not exist in FROM state
+ from_name = None
+ from_parent = None
+ else:
+ # File exists, but is not versioned. Have to use path-
+ # splitting stuff
+ from_name = os.path.basename(from_path)
+ tree_parent = self.get_tree_parent(from_trans_id)
+ from_parent = self.tree_file_id(tree_parent)
+ if from_path is not None:
+ from_kind, from_executable, from_stats = \
+ self._tree._comparison_data(from_entry, from_path)
+ else:
+ from_kind = None
+ from_executable = False
+ return from_name, from_parent, from_kind, from_executable
+
+ def _to_file_data(self, to_trans_id, from_trans_id, from_executable):
+ """Get data about a file in the to (target) state
+
+ Return a (name, parent, kind, executable) tuple
+ """
+ to_name = self.final_name(to_trans_id)
+ to_kind = self.final_kind(to_trans_id)
+ to_parent = self.final_file_id(self.final_parent(to_trans_id))
+ if to_trans_id in self._new_executability:
+ to_executable = self._new_executability[to_trans_id]
+ elif to_trans_id == from_trans_id:
+ to_executable = from_executable
+ else:
+ to_executable = False
+ return to_name, to_parent, to_kind, to_executable
+
+ def iter_changes(self):
+ """Produce output in the same format as Tree.iter_changes.
+
+ Will produce nonsensical results if invoked while inventory/filesystem
+ conflicts (as reported by TreeTransform.find_conflicts()) are present.
+
+ This reads the Transform, but only reproduces changes involving a
+ file_id. Files that are not versioned in either of the FROM or TO
+ states are not reflected.
+ """
+ final_paths = FinalPaths(self)
+ from_trans_ids, to_trans_ids = self._get_file_id_maps()
+ results = []
+ # Now iterate through all active file_ids
+ for file_id in set(from_trans_ids.keys() + to_trans_ids.keys()):
+ modified = False
+ from_trans_id = from_trans_ids.get(file_id)
+ # find file ids, and determine versioning state
+ if from_trans_id is None:
+ from_versioned = False
+ from_trans_id = to_trans_ids[file_id]
+ else:
+ from_versioned = True
+ to_trans_id = to_trans_ids.get(file_id)
+ if to_trans_id is None:
+ to_versioned = False
+ to_trans_id = from_trans_id
+ else:
+ to_versioned = True
+
+ from_name, from_parent, from_kind, from_executable = \
+ self._from_file_data(from_trans_id, from_versioned, file_id)
+
+ to_name, to_parent, to_kind, to_executable = \
+ self._to_file_data(to_trans_id, from_trans_id, from_executable)
+
+ if not from_versioned:
+ from_path = None
+ else:
+ from_path = self._tree_id_paths.get(from_trans_id)
+ if not to_versioned:
+ to_path = None
+ else:
+ to_path = final_paths.get_path(to_trans_id)
+ if from_kind != to_kind:
+ modified = True
+ elif to_kind in ('file', 'symlink') and (
+ to_trans_id != from_trans_id or
+ to_trans_id in self._new_contents):
+ modified = True
+ if (not modified and from_versioned == to_versioned and
+ from_parent==to_parent and from_name == to_name and
+ from_executable == to_executable):
+ continue
+ results.append((file_id, (from_path, to_path), modified,
+ (from_versioned, to_versioned),
+ (from_parent, to_parent),
+ (from_name, to_name),
+ (from_kind, to_kind),
+ (from_executable, to_executable)))
+ return iter(sorted(results, key=lambda x:x[1]))
+
+ def get_preview_tree(self):
+ """Return a tree representing the result of the transform.
+
+ The tree is a snapshot, and altering the TreeTransform will invalidate
+ it.
+ """
+ return _PreviewTree(self)
+
+ def commit(self, branch, message, merge_parents=None, strict=False,
+ timestamp=None, timezone=None, committer=None, authors=None,
+ revprops=None, revision_id=None):
+ """Commit the result of this TreeTransform to a branch.
+
+ :param branch: The branch to commit to.
+ :param message: The message to attach to the commit.
+ :param merge_parents: Additional parent revision-ids specified by
+ pending merges.
+ :param strict: If True, abort the commit if there are unversioned
+ files.
+ :param timestamp: if not None, seconds-since-epoch for the time and
+ date. (May be a float.)
+ :param timezone: Optional timezone for timestamp, as an offset in
+ seconds.
+ :param committer: Optional committer in email-id format.
+ (e.g. "J Random Hacker <jrandom@example.com>")
+ :param authors: Optional list of authors in email-id format.
+ :param revprops: Optional dictionary of revision properties.
+ :param revision_id: Optional revision id. (Specifying a revision-id
+ may reduce performance for some non-native formats.)
+ :return: The revision_id of the revision committed.
+ """
+ self._check_malformed()
+ if strict:
+ unversioned = set(self._new_contents).difference(set(self._new_id))
+ for trans_id in unversioned:
+ if self.final_file_id(trans_id) is None:
+ raise errors.StrictCommitFailed()
+
+ revno, last_rev_id = branch.last_revision_info()
+ if last_rev_id == _mod_revision.NULL_REVISION:
+ if merge_parents is not None:
+ raise ValueError('Cannot supply merge parents for first'
+ ' commit.')
+ parent_ids = []
+ else:
+ parent_ids = [last_rev_id]
+ if merge_parents is not None:
+ parent_ids.extend(merge_parents)
+ if self._tree.get_revision_id() != last_rev_id:
+ raise ValueError('TreeTransform not based on branch basis: %s' %
+ self._tree.get_revision_id())
+ revprops = commit.Commit.update_revprops(revprops, branch, authors)
+ builder = branch.get_commit_builder(parent_ids,
+ timestamp=timestamp,
+ timezone=timezone,
+ committer=committer,
+ revprops=revprops,
+ revision_id=revision_id)
+ preview = self.get_preview_tree()
+ list(builder.record_iter_changes(preview, last_rev_id,
+ self.iter_changes()))
+ builder.finish_inventory()
+ revision_id = builder.commit(message)
+ branch.set_last_revision_info(revno + 1, revision_id)
+ return revision_id
+
+ def _text_parent(self, trans_id):
+ file_id = self.tree_file_id(trans_id)
+ try:
+ if file_id is None or self._tree.kind(file_id) != 'file':
+ return None
+ except errors.NoSuchFile:
+ return None
+ return file_id
+
+ def _get_parents_texts(self, trans_id):
+ """Get texts for compression parents of this file."""
+ file_id = self._text_parent(trans_id)
+ if file_id is None:
+ return ()
+ return (self._tree.get_file_text(file_id),)
+
+ def _get_parents_lines(self, trans_id):
+ """Get lines for compression parents of this file."""
+ file_id = self._text_parent(trans_id)
+ if file_id is None:
+ return ()
+ return (self._tree.get_file_lines(file_id),)
+
+ def serialize(self, serializer):
+ """Serialize this TreeTransform.
+
+ :param serializer: A Serialiser like pack.ContainerSerializer.
+ """
+ new_name = dict((k, v.encode('utf-8')) for k, v in
+ self._new_name.items())
+ new_executability = dict((k, int(v)) for k, v in
+ self._new_executability.items())
+ tree_path_ids = dict((k.encode('utf-8'), v)
+ for k, v in self._tree_path_ids.items())
+ attribs = {
+ '_id_number': self._id_number,
+ '_new_name': new_name,
+ '_new_parent': self._new_parent,
+ '_new_executability': new_executability,
+ '_new_id': self._new_id,
+ '_tree_path_ids': tree_path_ids,
+ '_removed_id': list(self._removed_id),
+ '_removed_contents': list(self._removed_contents),
+ '_non_present_ids': self._non_present_ids,
+ }
+ yield serializer.bytes_record(bencode.bencode(attribs),
+ (('attribs',),))
+ for trans_id, kind in self._new_contents.items():
+ if kind == 'file':
+ lines = osutils.chunks_to_lines(
+ self._read_file_chunks(trans_id))
+ parents = self._get_parents_lines(trans_id)
+ mpdiff = multiparent.MultiParent.from_lines(lines, parents)
+ content = ''.join(mpdiff.to_patch())
+ if kind == 'directory':
+ content = ''
+ if kind == 'symlink':
+ content = self._read_symlink_target(trans_id)
+ yield serializer.bytes_record(content, ((trans_id, kind),))
+
+ def deserialize(self, records):
+ """Deserialize a stored TreeTransform.
+
+ :param records: An iterable of (names, content) tuples, as per
+ pack.ContainerPushParser.
+ """
+ names, content = records.next()
+ attribs = bencode.bdecode(content)
+ self._id_number = attribs['_id_number']
+ self._new_name = dict((k, v.decode('utf-8'))
+ for k, v in attribs['_new_name'].items())
+ self._new_parent = attribs['_new_parent']
+ self._new_executability = dict((k, bool(v)) for k, v in
+ attribs['_new_executability'].items())
+ self._new_id = attribs['_new_id']
+ self._r_new_id = dict((v, k) for k, v in self._new_id.items())
+ self._tree_path_ids = {}
+ self._tree_id_paths = {}
+ for bytepath, trans_id in attribs['_tree_path_ids'].items():
+ path = bytepath.decode('utf-8')
+ self._tree_path_ids[path] = trans_id
+ self._tree_id_paths[trans_id] = path
+ self._removed_id = set(attribs['_removed_id'])
+ self._removed_contents = set(attribs['_removed_contents'])
+ self._non_present_ids = attribs['_non_present_ids']
+ for ((trans_id, kind),), content in records:
+ if kind == 'file':
+ mpdiff = multiparent.MultiParent.from_patch(content)
+ lines = mpdiff.to_lines(self._get_parents_texts(trans_id))
+ self.create_file(lines, trans_id)
+ if kind == 'directory':
+ self.create_directory(trans_id)
+ if kind == 'symlink':
+ self.create_symlink(content.decode('utf-8'), trans_id)
+
+
+class DiskTreeTransform(TreeTransformBase):
+ """Tree transform storing its contents on disk."""
+
+ def __init__(self, tree, limbodir, pb=None,
+ case_sensitive=True):
+ """Constructor.
+ :param tree: The tree that will be transformed, but not necessarily
+ the output tree.
+ :param limbodir: A directory where new files can be stored until
+ they are installed in their proper places
+ :param pb: ignored
+ :param case_sensitive: If True, the target of the transform is
+ case sensitive, not just case preserving.
+ """
+ TreeTransformBase.__init__(self, tree, pb, case_sensitive)
+ self._limbodir = limbodir
+ self._deletiondir = None
+ # A mapping of transform ids to their limbo filename
+ self._limbo_files = {}
+ self._possibly_stale_limbo_files = set()
+ # A mapping of transform ids to a set of the transform ids of children
+ # that their limbo directory has
+ self._limbo_children = {}
+ # Map transform ids to maps of child filename to child transform id
+ self._limbo_children_names = {}
+ # List of transform ids that need to be renamed from limbo into place
+ self._needs_rename = set()
+ self._creation_mtime = None
+
+ def finalize(self):
+ """Release the working tree lock, if held, clean up limbo dir.
+
+ This is required if apply has not been invoked, but can be invoked
+ even after apply.
+ """
+ if self._tree is None:
+ return
+ try:
+ limbo_paths = self._limbo_files.values() + list(
+ self._possibly_stale_limbo_files)
+ limbo_paths = sorted(limbo_paths, reverse=True)
+ for path in limbo_paths:
+ try:
+ delete_any(path)
+ except OSError, e:
+ if e.errno != errno.ENOENT:
+ raise
+ # XXX: warn? perhaps we just got interrupted at an
+ # inconvenient moment, but perhaps files are disappearing
+ # from under us?
+ try:
+ delete_any(self._limbodir)
+ except OSError:
+ # We don't especially care *why* the dir is immortal.
+ raise ImmortalLimbo(self._limbodir)
+ try:
+ if self._deletiondir is not None:
+ delete_any(self._deletiondir)
+ except OSError:
+ raise errors.ImmortalPendingDeletion(self._deletiondir)
+ finally:
+ TreeTransformBase.finalize(self)
+
+ def _limbo_supports_executable(self):
+ """Check if the limbo path supports the executable bit."""
+ # FIXME: Check actual file system capabilities of limbodir
+ return osutils.supports_executable()
+
+ def _limbo_name(self, trans_id):
+ """Generate the limbo name of a file"""
+ limbo_name = self._limbo_files.get(trans_id)
+ if limbo_name is None:
+ limbo_name = self._generate_limbo_path(trans_id)
+ self._limbo_files[trans_id] = limbo_name
+ return limbo_name
+
+ def _generate_limbo_path(self, trans_id):
+ """Generate a limbo path using the trans_id as the relative path.
+
+ This is suitable as a fallback, and when the transform should not be
+ sensitive to the path encoding of the limbo directory.
+ """
+ self._needs_rename.add(trans_id)
+ return pathjoin(self._limbodir, trans_id)
+
+ def adjust_path(self, name, parent, trans_id):
+ previous_parent = self._new_parent.get(trans_id)
+ previous_name = self._new_name.get(trans_id)
+ TreeTransformBase.adjust_path(self, name, parent, trans_id)
+ if (trans_id in self._limbo_files and
+ trans_id not in self._needs_rename):
+ self._rename_in_limbo([trans_id])
+ if previous_parent != parent:
+ self._limbo_children[previous_parent].remove(trans_id)
+ if previous_parent != parent or previous_name != name:
+ del self._limbo_children_names[previous_parent][previous_name]
+
+ def _rename_in_limbo(self, trans_ids):
+ """Fix limbo names so that the right final path is produced.
+
+ This means we outsmarted ourselves-- we tried to avoid renaming
+ these files later by creating them with their final names in their
+ final parents. But now the previous name or parent is no longer
+ suitable, so we have to rename them.
+
+ Even for trans_ids that have no new contents, we must remove their
+ entries from _limbo_files, because they are now stale.
+ """
+ for trans_id in trans_ids:
+ old_path = self._limbo_files[trans_id]
+ self._possibly_stale_limbo_files.add(old_path)
+ del self._limbo_files[trans_id]
+ if trans_id not in self._new_contents:
+ continue
+ new_path = self._limbo_name(trans_id)
+ os.rename(old_path, new_path)
+ self._possibly_stale_limbo_files.remove(old_path)
+ for descendant in self._limbo_descendants(trans_id):
+ desc_path = self._limbo_files[descendant]
+ desc_path = new_path + desc_path[len(old_path):]
+ self._limbo_files[descendant] = desc_path
+
+ def _limbo_descendants(self, trans_id):
+ """Return the set of trans_ids whose limbo paths descend from this."""
+ descendants = set(self._limbo_children.get(trans_id, []))
+ for descendant in list(descendants):
+ descendants.update(self._limbo_descendants(descendant))
+ return descendants
+
+ def create_file(self, contents, trans_id, mode_id=None, sha1=None):
+ """Schedule creation of a new file.
+
+ :seealso: new_file.
+
+ :param contents: an iterator of strings, all of which will be written
+ to the target destination.
+ :param trans_id: TreeTransform handle
+ :param mode_id: If not None, force the mode of the target file to match
+ the mode of the object referenced by mode_id.
+ Otherwise, we will try to preserve mode bits of an existing file.
+ :param sha1: If the sha1 of this content is already known, pass it in.
+ We can use it to prevent future sha1 computations.
+ """
+ name = self._limbo_name(trans_id)
+ f = open(name, 'wb')
+ try:
+ unique_add(self._new_contents, trans_id, 'file')
+ f.writelines(contents)
+ finally:
+ f.close()
+ self._set_mtime(name)
+ self._set_mode(trans_id, mode_id, S_ISREG)
+ # It is unfortunate we have to use lstat instead of fstat, but we just
+ # used utime and chmod on the file, so we need the accurate final
+ # details.
+ if sha1 is not None:
+ self._observed_sha1s[trans_id] = (sha1, osutils.lstat(name))
+
+ def _read_file_chunks(self, trans_id):
+ cur_file = open(self._limbo_name(trans_id), 'rb')
+ try:
+ return cur_file.readlines()
+ finally:
+ cur_file.close()
+
+ def _read_symlink_target(self, trans_id):
+ return os.readlink(self._limbo_name(trans_id))
+
+ def _set_mtime(self, path):
+ """All files that are created get the same mtime.
+
+ This time is set by the first object to be created.
+ """
+ if self._creation_mtime is None:
+ self._creation_mtime = time.time()
+ os.utime(path, (self._creation_mtime, self._creation_mtime))
+
+ def create_hardlink(self, path, trans_id):
+ """Schedule creation of a hard link"""
+ name = self._limbo_name(trans_id)
+ try:
+ os.link(path, name)
+ except OSError, e:
+ if e.errno != errno.EPERM:
+ raise
+ raise errors.HardLinkNotSupported(path)
+ try:
+ unique_add(self._new_contents, trans_id, 'file')
+ except:
+ # Clean up the file, it never got registered so
+ # TreeTransform.finalize() won't clean it up.
+ os.unlink(name)
+ raise
+
+ def create_directory(self, trans_id):
+ """Schedule creation of a new directory.
+
+ See also new_directory.
+ """
+ os.mkdir(self._limbo_name(trans_id))
+ unique_add(self._new_contents, trans_id, 'directory')
+
+ def create_symlink(self, target, trans_id):
+ """Schedule creation of a new symbolic link.
+
+ target is a bytestring.
+ See also new_symlink.
+ """
+ if has_symlinks():
+ os.symlink(target, self._limbo_name(trans_id))
+ unique_add(self._new_contents, trans_id, 'symlink')
+ else:
+ try:
+ path = FinalPaths(self).get_path(trans_id)
+ except KeyError:
+ path = None
+ raise UnableCreateSymlink(path=path)
+
+ def cancel_creation(self, trans_id):
+ """Cancel the creation of new file contents."""
+ del self._new_contents[trans_id]
+ if trans_id in self._observed_sha1s:
+ del self._observed_sha1s[trans_id]
+ children = self._limbo_children.get(trans_id)
+ # if this is a limbo directory with children, move them before removing
+ # the directory
+ if children is not None:
+ self._rename_in_limbo(children)
+ del self._limbo_children[trans_id]
+ del self._limbo_children_names[trans_id]
+ delete_any(self._limbo_name(trans_id))
+
+ def new_orphan(self, trans_id, parent_id):
+ conf = self._tree.get_config_stack()
+ handle_orphan = conf.get('bzr.transform.orphan_policy')
+ handle_orphan(self, trans_id, parent_id)
+
+
+class OrphaningError(errors.BzrError):
+
+ # Only bugs could lead to such exception being seen by the user
+ internal_error = True
+ _fmt = "Error while orphaning %s in %s directory"
+
+ def __init__(self, orphan, parent):
+ errors.BzrError.__init__(self)
+ self.orphan = orphan
+ self.parent = parent
+
+
+class OrphaningForbidden(OrphaningError):
+
+ _fmt = "Policy: %s doesn't allow creating orphans."
+
+ def __init__(self, policy):
+ errors.BzrError.__init__(self)
+ self.policy = policy
+
+
+def move_orphan(tt, orphan_id, parent_id):
+ """See TreeTransformBase.new_orphan.
+
+ This creates a new orphan in the `bzr-orphans` dir at the root of the
+ `TreeTransform`.
+
+ :param tt: The TreeTransform orphaning `trans_id`.
+
+ :param orphan_id: The trans id that should be orphaned.
+
+ :param parent_id: The orphan parent trans id.
+ """
+ # Add the orphan dir if it doesn't exist
+ orphan_dir_basename = 'bzr-orphans'
+ od_id = tt.trans_id_tree_path(orphan_dir_basename)
+ if tt.final_kind(od_id) is None:
+ tt.create_directory(od_id)
+ parent_path = tt._tree_id_paths[parent_id]
+ # Find a name that doesn't exist yet in the orphan dir
+ actual_name = tt.final_name(orphan_id)
+ new_name = tt._available_backup_name(actual_name, od_id)
+ tt.adjust_path(new_name, od_id, orphan_id)
+ trace.warning('%s has been orphaned in %s'
+ % (joinpath(parent_path, actual_name), orphan_dir_basename))
+
+
+def refuse_orphan(tt, orphan_id, parent_id):
+ """See TreeTransformBase.new_orphan.
+
+ This refuses to create orphan, letting the caller handle the conflict.
+ """
+ raise OrphaningForbidden('never')
+
+
+orphaning_registry = registry.Registry()
+orphaning_registry.register(
+ 'conflict', refuse_orphan,
+ 'Leave orphans in place and create a conflict on the directory.')
+orphaning_registry.register(
+ 'move', move_orphan,
+ 'Move orphans into the bzr-orphans directory.')
+orphaning_registry._set_default_key('conflict')
+
+
+opt_transform_orphan = _mod_config.RegistryOption(
+ 'bzr.transform.orphan_policy', orphaning_registry,
+ help='Policy for orphaned files during transform operations.',
+ invalid='warning')
+
+
+class TreeTransform(DiskTreeTransform):
+ """Represent a tree transformation.
+
+ This object is designed to support incremental generation of the transform,
+ in any order.
+
+ However, it gives optimum performance when parent directories are created
+ before their contents. The transform is then able to put child files
+ directly in their parent directory, avoiding later renames.
+
+ It is easy to produce malformed transforms, but they are generally
+ harmless. Attempting to apply a malformed transform will cause an
+ exception to be raised before any modifications are made to the tree.
+
+ Many kinds of malformed transforms can be corrected with the
+ resolve_conflicts function. The remaining ones indicate programming error,
+ such as trying to create a file with no path.
+
+ Two sets of file creation methods are supplied. Convenience methods are:
+ * new_file
+ * new_directory
+ * new_symlink
+
+ These are composed of the low-level methods:
+ * create_path
+ * create_file or create_directory or create_symlink
+ * version_file
+ * set_executability
+
+ Transform/Transaction ids
+ -------------------------
+ trans_ids are temporary ids assigned to all files involved in a transform.
+ It's possible, even common, that not all files in the Tree have trans_ids.
+
+ trans_ids are used because filenames and file_ids are not good enough
+ identifiers; filenames change, and not all files have file_ids. File-ids
+ are also associated with trans-ids, so that moving a file moves its
+ file-id.
+
+ trans_ids are only valid for the TreeTransform that generated them.
+
+ Limbo
+ -----
+ Limbo is a temporary directory use to hold new versions of files.
+ Files are added to limbo by create_file, create_directory, create_symlink,
+ and their convenience variants (new_*). Files may be removed from limbo
+ using cancel_creation. Files are renamed from limbo into their final
+ location as part of TreeTransform.apply
+
+ Limbo must be cleaned up, by either calling TreeTransform.apply or
+ calling TreeTransform.finalize.
+
+ Files are placed into limbo inside their parent directories, where
+ possible. This reduces subsequent renames, and makes operations involving
+ lots of files faster. This optimization is only possible if the parent
+ directory is created *before* creating any of its children, so avoid
+ creating children before parents, where possible.
+
+ Pending-deletion
+ ----------------
+ This temporary directory is used by _FileMover for storing files that are
+ about to be deleted. In case of rollback, the files will be restored.
+ FileMover does not delete files until it is sure that a rollback will not
+ happen.
+ """
+ def __init__(self, tree, pb=None):
+ """Note: a tree_write lock is taken on the tree.
+
+ Use TreeTransform.finalize() to release the lock (can be omitted if
+ TreeTransform.apply() called).
+ """
+ tree.lock_tree_write()
+
+ try:
+ limbodir = urlutils.local_path_from_url(
+ tree._transport.abspath('limbo'))
+ osutils.ensure_empty_directory_exists(
+ limbodir,
+ errors.ExistingLimbo)
+ deletiondir = urlutils.local_path_from_url(
+ tree._transport.abspath('pending-deletion'))
+ osutils.ensure_empty_directory_exists(
+ deletiondir,
+ errors.ExistingPendingDeletion)
+ except:
+ tree.unlock()
+ raise
+
+ # Cache of realpath results, to speed up canonical_path
+ self._realpaths = {}
+ # Cache of relpath results, to speed up canonical_path
+ self._relpaths = {}
+ DiskTreeTransform.__init__(self, tree, limbodir, pb,
+ tree.case_sensitive)
+ self._deletiondir = deletiondir
+
+ def canonical_path(self, path):
+ """Get the canonical tree-relative path"""
+ # don't follow final symlinks
+ abs = self._tree.abspath(path)
+ if abs in self._relpaths:
+ return self._relpaths[abs]
+ dirname, basename = os.path.split(abs)
+ if dirname not in self._realpaths:
+ self._realpaths[dirname] = os.path.realpath(dirname)
+ dirname = self._realpaths[dirname]
+ abs = pathjoin(dirname, basename)
+ if dirname in self._relpaths:
+ relpath = pathjoin(self._relpaths[dirname], basename)
+ relpath = relpath.rstrip('/\\')
+ else:
+ relpath = self._tree.relpath(abs)
+ self._relpaths[abs] = relpath
+ return relpath
+
+ def tree_kind(self, trans_id):
+ """Determine the file kind in the working tree.
+
+ :returns: The file kind or None if the file does not exist
+ """
+ path = self._tree_id_paths.get(trans_id)
+ if path is None:
+ return None
+ try:
+ return file_kind(self._tree.abspath(path))
+ except errors.NoSuchFile:
+ return None
+
+ def _set_mode(self, trans_id, mode_id, typefunc):
+ """Set the mode of new file contents.
+ The mode_id is the existing file to get the mode from (often the same
+ as trans_id). The operation is only performed if there's a mode match
+ according to typefunc.
+ """
+ if mode_id is None:
+ mode_id = trans_id
+ try:
+ old_path = self._tree_id_paths[mode_id]
+ except KeyError:
+ return
+ try:
+ mode = os.stat(self._tree.abspath(old_path)).st_mode
+ except OSError, e:
+ if e.errno in (errno.ENOENT, errno.ENOTDIR):
+ # Either old_path doesn't exist, or the parent of the
+ # target is not a directory (but will be one eventually)
+ # Either way, we know it doesn't exist *right now*
+ # See also bug #248448
+ return
+ else:
+ raise
+ if typefunc(mode):
+ osutils.chmod_if_possible(self._limbo_name(trans_id), mode)
+
+ def iter_tree_children(self, parent_id):
+ """Iterate through the entry's tree children, if any"""
+ try:
+ path = self._tree_id_paths[parent_id]
+ except KeyError:
+ return
+ try:
+ children = os.listdir(self._tree.abspath(path))
+ except OSError, e:
+ if not (osutils._is_error_enotdir(e)
+ or e.errno in (errno.ENOENT, errno.ESRCH)):
+ raise
+ return
+
+ for child in children:
+ childpath = joinpath(path, child)
+ if self._tree.is_control_filename(childpath):
+ continue
+ yield self.trans_id_tree_path(childpath)
+
+ def _generate_limbo_path(self, trans_id):
+ """Generate a limbo path using the final path if possible.
+
+ This optimizes the performance of applying the tree transform by
+ avoiding renames. These renames can be avoided only when the parent
+ directory is already scheduled for creation.
+
+ If the final path cannot be used, falls back to using the trans_id as
+ the relpath.
+ """
+ parent = self._new_parent.get(trans_id)
+ # if the parent directory is already in limbo (e.g. when building a
+ # tree), choose a limbo name inside the parent, to reduce further
+ # renames.
+ use_direct_path = False
+ if self._new_contents.get(parent) == 'directory':
+ filename = self._new_name.get(trans_id)
+ if filename is not None:
+ if parent not in self._limbo_children:
+ self._limbo_children[parent] = set()
+ self._limbo_children_names[parent] = {}
+ use_direct_path = True
+ # the direct path can only be used if no other file has
+ # already taken this pathname, i.e. if the name is unused, or
+ # if it is already associated with this trans_id.
+ elif self._case_sensitive_target:
+ if (self._limbo_children_names[parent].get(filename)
+ in (trans_id, None)):
+ use_direct_path = True
+ else:
+ for l_filename, l_trans_id in\
+ self._limbo_children_names[parent].iteritems():
+ if l_trans_id == trans_id:
+ continue
+ if l_filename.lower() == filename.lower():
+ break
+ else:
+ use_direct_path = True
+
+ if not use_direct_path:
+ return DiskTreeTransform._generate_limbo_path(self, trans_id)
+
+ limbo_name = pathjoin(self._limbo_files[parent], filename)
+ self._limbo_children[parent].add(trans_id)
+ self._limbo_children_names[parent][filename] = trans_id
+ return limbo_name
+
+
+ def apply(self, no_conflicts=False, precomputed_delta=None, _mover=None):
+ """Apply all changes to the inventory and filesystem.
+
+ If filesystem or inventory conflicts are present, MalformedTransform
+ will be thrown.
+
+ If apply succeeds, finalize is not necessary.
+
+ :param no_conflicts: if True, the caller guarantees there are no
+ conflicts, so no check is made.
+ :param precomputed_delta: An inventory delta to use instead of
+ calculating one.
+ :param _mover: Supply an alternate FileMover, for testing
+ """
+ for hook in MutableTree.hooks['pre_transform']:
+ hook(self._tree, self)
+ if not no_conflicts:
+ self._check_malformed()
+ child_pb = ui.ui_factory.nested_progress_bar()
+ try:
+ if precomputed_delta is None:
+ child_pb.update(gettext('Apply phase'), 0, 2)
+ inventory_delta = self._generate_inventory_delta()
+ offset = 1
+ else:
+ inventory_delta = precomputed_delta
+ offset = 0
+ if _mover is None:
+ mover = _FileMover()
+ else:
+ mover = _mover
+ try:
+ child_pb.update(gettext('Apply phase'), 0 + offset, 2 + offset)
+ self._apply_removals(mover)
+ child_pb.update(gettext('Apply phase'), 1 + offset, 2 + offset)
+ modified_paths = self._apply_insertions(mover)
+ except:
+ mover.rollback()
+ raise
+ else:
+ mover.apply_deletions()
+ finally:
+ child_pb.finished()
+ if self.final_file_id(self.root) is None:
+ inventory_delta = [e for e in inventory_delta if e[0] != '']
+ self._tree.apply_inventory_delta(inventory_delta)
+ self._apply_observed_sha1s()
+ self._done = True
+ self.finalize()
+ return _TransformResults(modified_paths, self.rename_count)
+
+ def _generate_inventory_delta(self):
+ """Generate an inventory delta for the current transform."""
+ inventory_delta = []
+ child_pb = ui.ui_factory.nested_progress_bar()
+ new_paths = self._inventory_altered()
+ total_entries = len(new_paths) + len(self._removed_id)
+ try:
+ for num, trans_id in enumerate(self._removed_id):
+ if (num % 10) == 0:
+ child_pb.update(gettext('removing file'), num, total_entries)
+ if trans_id == self._new_root:
+ file_id = self._tree.get_root_id()
+ else:
+ file_id = self.tree_file_id(trans_id)
+ # File-id isn't really being deleted, just moved
+ if file_id in self._r_new_id:
+ continue
+ path = self._tree_id_paths[trans_id]
+ inventory_delta.append((path, None, file_id, None))
+ new_path_file_ids = dict((t, self.final_file_id(t)) for p, t in
+ new_paths)
+ entries = self._tree.iter_entries_by_dir(
+ new_path_file_ids.values())
+ old_paths = dict((e.file_id, p) for p, e in entries)
+ final_kinds = {}
+ for num, (path, trans_id) in enumerate(new_paths):
+ if (num % 10) == 0:
+ child_pb.update(gettext('adding file'),
+ num + len(self._removed_id), total_entries)
+ file_id = new_path_file_ids[trans_id]
+ if file_id is None:
+ continue
+ needs_entry = False
+ kind = self.final_kind(trans_id)
+ if kind is None:
+ kind = self._tree.stored_kind(file_id)
+ parent_trans_id = self.final_parent(trans_id)
+ parent_file_id = new_path_file_ids.get(parent_trans_id)
+ if parent_file_id is None:
+ parent_file_id = self.final_file_id(parent_trans_id)
+ if trans_id in self._new_reference_revision:
+ new_entry = inventory.TreeReference(
+ file_id,
+ self._new_name[trans_id],
+ self.final_file_id(self._new_parent[trans_id]),
+ None, self._new_reference_revision[trans_id])
+ else:
+ new_entry = inventory.make_entry(kind,
+ self.final_name(trans_id),
+ parent_file_id, file_id)
+ old_path = old_paths.get(new_entry.file_id)
+ new_executability = self._new_executability.get(trans_id)
+ if new_executability is not None:
+ new_entry.executable = new_executability
+ inventory_delta.append(
+ (old_path, path, new_entry.file_id, new_entry))
+ finally:
+ child_pb.finished()
+ return inventory_delta
+
+ def _apply_removals(self, mover):
+ """Perform tree operations that remove directory/inventory names.
+
+ That is, delete files that are to be deleted, and put any files that
+ need renaming into limbo. This must be done in strict child-to-parent
+ order.
+
+ If inventory_delta is None, no inventory delta generation is performed.
+ """
+ tree_paths = list(self._tree_path_ids.iteritems())
+ tree_paths.sort(reverse=True)
+ child_pb = ui.ui_factory.nested_progress_bar()
+ try:
+ for num, (path, trans_id) in enumerate(tree_paths):
+ # do not attempt to move root into a subdirectory of itself.
+ if path == '':
+ continue
+ child_pb.update(gettext('removing file'), num, len(tree_paths))
+ full_path = self._tree.abspath(path)
+ if trans_id in self._removed_contents:
+ delete_path = os.path.join(self._deletiondir, trans_id)
+ mover.pre_delete(full_path, delete_path)
+ elif (trans_id in self._new_name
+ or trans_id in self._new_parent):
+ try:
+ mover.rename(full_path, self._limbo_name(trans_id))
+ except errors.TransformRenameFailed, e:
+ if e.errno != errno.ENOENT:
+ raise
+ else:
+ self.rename_count += 1
+ finally:
+ child_pb.finished()
+
+ def _apply_insertions(self, mover):
+ """Perform tree operations that insert directory/inventory names.
+
+ That is, create any files that need to be created, and restore from
+ limbo any files that needed renaming. This must be done in strict
+ parent-to-child order.
+
+ If inventory_delta is None, no inventory delta is calculated, and
+ no list of modified paths is returned.
+ """
+ new_paths = self.new_paths(filesystem_only=True)
+ modified_paths = []
+ new_path_file_ids = dict((t, self.final_file_id(t)) for p, t in
+ new_paths)
+ child_pb = ui.ui_factory.nested_progress_bar()
+ try:
+ for num, (path, trans_id) in enumerate(new_paths):
+ if (num % 10) == 0:
+ child_pb.update(gettext('adding file'), num, len(new_paths))
+ full_path = self._tree.abspath(path)
+ if trans_id in self._needs_rename:
+ try:
+ mover.rename(self._limbo_name(trans_id), full_path)
+ except errors.TransformRenameFailed, e:
+ # We may be renaming a dangling inventory id
+ if e.errno != errno.ENOENT:
+ raise
+ else:
+ self.rename_count += 1
+ # TODO: if trans_id in self._observed_sha1s, we should
+ # re-stat the final target, since ctime will be
+ # updated by the change.
+ if (trans_id in self._new_contents or
+ self.path_changed(trans_id)):
+ if trans_id in self._new_contents:
+ modified_paths.append(full_path)
+ if trans_id in self._new_executability:
+ self._set_executability(path, trans_id)
+ if trans_id in self._observed_sha1s:
+ o_sha1, o_st_val = self._observed_sha1s[trans_id]
+ st = osutils.lstat(full_path)
+ self._observed_sha1s[trans_id] = (o_sha1, st)
+ finally:
+ child_pb.finished()
+ for path, trans_id in new_paths:
+ # new_paths includes stuff like workingtree conflicts. Only the
+ # stuff in new_contents actually comes from limbo.
+ if trans_id in self._limbo_files:
+ del self._limbo_files[trans_id]
+ self._new_contents.clear()
+ return modified_paths
+
+ def _apply_observed_sha1s(self):
+ """After we have finished renaming everything, update observed sha1s
+
+ This has to be done after self._tree.apply_inventory_delta, otherwise
+ it doesn't know anything about the files we are updating. Also, we want
+ to do this as late as possible, so that most entries end up cached.
+ """
+ # TODO: this doesn't update the stat information for directories. So
+ # the first 'bzr status' will still need to rewrite
+ # .bzr/checkout/dirstate. However, we at least don't need to
+ # re-read all of the files.
+ # TODO: If the operation took a while, we could do a time.sleep(3) here
+ # to allow the clock to tick over and ensure we won't have any
+ # problems. (we could observe start time, and finish time, and if
+ # it is less than eg 10% overhead, add a sleep call.)
+ paths = FinalPaths(self)
+ for trans_id, observed in self._observed_sha1s.iteritems():
+ path = paths.get_path(trans_id)
+ # We could get the file_id, but dirstate prefers to use the path
+ # anyway, and it is 'cheaper' to determine.
+ # file_id = self._new_id[trans_id]
+ self._tree._observed_sha1(None, path, observed)
+
+
+class TransformPreview(DiskTreeTransform):
+ """A TreeTransform for generating preview trees.
+
+ Unlike TreeTransform, this version works when the input tree is a
+ RevisionTree, rather than a WorkingTree. As a result, it tends to ignore
+ unversioned files in the input tree.
+ """
+
+ def __init__(self, tree, pb=None, case_sensitive=True):
+ tree.lock_read()
+ limbodir = osutils.mkdtemp(prefix='bzr-limbo-')
+ DiskTreeTransform.__init__(self, tree, limbodir, pb, case_sensitive)
+
+ def canonical_path(self, path):
+ return path
+
+ def tree_kind(self, trans_id):
+ path = self._tree_id_paths.get(trans_id)
+ if path is None:
+ return None
+ kind = self._tree.path_content_summary(path)[0]
+ if kind == 'missing':
+ kind = None
+ return kind
+
+ def _set_mode(self, trans_id, mode_id, typefunc):
+ """Set the mode of new file contents.
+ The mode_id is the existing file to get the mode from (often the same
+ as trans_id). The operation is only performed if there's a mode match
+ according to typefunc.
+ """
+ # is it ok to ignore this? probably
+ pass
+
+ def iter_tree_children(self, parent_id):
+ """Iterate through the entry's tree children, if any"""
+ try:
+ path = self._tree_id_paths[parent_id]
+ except KeyError:
+ return
+ file_id = self.tree_file_id(parent_id)
+ if file_id is None:
+ return
+ entry = self._tree.iter_entries_by_dir([file_id]).next()[1]
+ children = getattr(entry, 'children', {})
+ for child in children:
+ childpath = joinpath(path, child)
+ yield self.trans_id_tree_path(childpath)
+
+ def new_orphan(self, trans_id, parent_id):
+ raise NotImplementedError(self.new_orphan)
+
+
+class _PreviewTree(tree.InventoryTree):
+ """Partial implementation of Tree to support show_diff_trees"""
+
+ def __init__(self, transform):
+ self._transform = transform
+ self._final_paths = FinalPaths(transform)
+ self.__by_parent = None
+ self._parent_ids = []
+ self._all_children_cache = {}
+ self._path2trans_id_cache = {}
+ self._final_name_cache = {}
+ self._iter_changes_cache = dict((c[0], c) for c in
+ self._transform.iter_changes())
+
+ def _content_change(self, file_id):
+ """Return True if the content of this file changed"""
+ changes = self._iter_changes_cache.get(file_id)
+ # changes[2] is true if the file content changed. See
+ # InterTree.iter_changes.
+ return (changes is not None and changes[2])
+
+ def _get_repository(self):
+ repo = getattr(self._transform._tree, '_repository', None)
+ if repo is None:
+ repo = self._transform._tree.branch.repository
+ return repo
+
+ def _iter_parent_trees(self):
+ for revision_id in self.get_parent_ids():
+ try:
+ yield self.revision_tree(revision_id)
+ except errors.NoSuchRevisionInTree:
+ yield self._get_repository().revision_tree(revision_id)
+
+ def _get_file_revision(self, file_id, vf, tree_revision):
+ parent_keys = [(file_id, t.get_file_revision(file_id)) for t in
+ self._iter_parent_trees()]
+ vf.add_lines((file_id, tree_revision), parent_keys,
+ self.get_file_lines(file_id))
+ repo = self._get_repository()
+ base_vf = repo.texts
+ if base_vf not in vf.fallback_versionedfiles:
+ vf.fallback_versionedfiles.append(base_vf)
+ return tree_revision
+
+ def _stat_limbo_file(self, file_id=None, trans_id=None):
+ if trans_id is None:
+ trans_id = self._transform.trans_id_file_id(file_id)
+ name = self._transform._limbo_name(trans_id)
+ return os.lstat(name)
+
+ @property
+ def _by_parent(self):
+ if self.__by_parent is None:
+ self.__by_parent = self._transform.by_parent()
+ return self.__by_parent
+
+ def _comparison_data(self, entry, path):
+ kind, size, executable, link_or_sha1 = self.path_content_summary(path)
+ if kind == 'missing':
+ kind = None
+ executable = False
+ else:
+ file_id = self._transform.final_file_id(self._path2trans_id(path))
+ executable = self.is_executable(file_id, path)
+ return kind, executable, None
+
+ def is_locked(self):
+ return False
+
+ def lock_read(self):
+ # Perhaps in theory, this should lock the TreeTransform?
+ return self
+
+ def unlock(self):
+ pass
+
+ @property
+ @deprecated_method(deprecated_in((2, 5, 0)))
+ def inventory(self):
+ """This Tree does not use inventory as its backing data."""
+ raise NotImplementedError(_PreviewTree.inventory)
+
+ @property
+ def root_inventory(self):
+ """This Tree does not use inventory as its backing data."""
+ raise NotImplementedError(_PreviewTree.root_inventory)
+
+ def get_root_id(self):
+ return self._transform.final_file_id(self._transform.root)
+
+ def all_file_ids(self):
+ tree_ids = set(self._transform._tree.all_file_ids())
+ tree_ids.difference_update(self._transform.tree_file_id(t)
+ for t in self._transform._removed_id)
+ tree_ids.update(self._transform._new_id.values())
+ return tree_ids
+
+ def __iter__(self):
+ return iter(self.all_file_ids())
+
+ def _has_id(self, file_id, fallback_check):
+ if file_id in self._transform._r_new_id:
+ return True
+ elif file_id in set([self._transform.tree_file_id(trans_id) for
+ trans_id in self._transform._removed_id]):
+ return False
+ else:
+ return fallback_check(file_id)
+
+ def has_id(self, file_id):
+ return self._has_id(file_id, self._transform._tree.has_id)
+
+ def has_or_had_id(self, file_id):
+ return self._has_id(file_id, self._transform._tree.has_or_had_id)
+
+ def _path2trans_id(self, path):
+ # We must not use None here, because that is a valid value to store.
+ trans_id = self._path2trans_id_cache.get(path, object)
+ if trans_id is not object:
+ return trans_id
+ segments = splitpath(path)
+ cur_parent = self._transform.root
+ for cur_segment in segments:
+ for child in self._all_children(cur_parent):
+ final_name = self._final_name_cache.get(child)
+ if final_name is None:
+ final_name = self._transform.final_name(child)
+ self._final_name_cache[child] = final_name
+ if final_name == cur_segment:
+ cur_parent = child
+ break
+ else:
+ self._path2trans_id_cache[path] = None
+ return None
+ self._path2trans_id_cache[path] = cur_parent
+ return cur_parent
+
+ def path2id(self, path):
+ if isinstance(path, list):
+ if path == []:
+ path = [""]
+ path = osutils.pathjoin(*path)
+ return self._transform.final_file_id(self._path2trans_id(path))
+
+ def id2path(self, file_id):
+ trans_id = self._transform.trans_id_file_id(file_id)
+ try:
+ return self._final_paths._determine_path(trans_id)
+ except NoFinalPath:
+ raise errors.NoSuchId(self, file_id)
+
+ def _all_children(self, trans_id):
+ children = self._all_children_cache.get(trans_id)
+ if children is not None:
+ return children
+ children = set(self._transform.iter_tree_children(trans_id))
+ # children in the _new_parent set are provided by _by_parent.
+ children.difference_update(self._transform._new_parent.keys())
+ children.update(self._by_parent.get(trans_id, []))
+ self._all_children_cache[trans_id] = children
+ return children
+
+ def iter_children(self, file_id):
+ trans_id = self._transform.trans_id_file_id(file_id)
+ for child_trans_id in self._all_children(trans_id):
+ yield self._transform.final_file_id(child_trans_id)
+
+ def extras(self):
+ possible_extras = set(self._transform.trans_id_tree_path(p) for p
+ in self._transform._tree.extras())
+ possible_extras.update(self._transform._new_contents)
+ possible_extras.update(self._transform._removed_id)
+ for trans_id in possible_extras:
+ if self._transform.final_file_id(trans_id) is None:
+ yield self._final_paths._determine_path(trans_id)
+
+ def _make_inv_entries(self, ordered_entries, specific_file_ids=None,
+ yield_parents=False):
+ for trans_id, parent_file_id in ordered_entries:
+ file_id = self._transform.final_file_id(trans_id)
+ if file_id is None:
+ continue
+ if (specific_file_ids is not None
+ and file_id not in specific_file_ids):
+ continue
+ kind = self._transform.final_kind(trans_id)
+ if kind is None:
+ kind = self._transform._tree.stored_kind(file_id)
+ new_entry = inventory.make_entry(
+ kind,
+ self._transform.final_name(trans_id),
+ parent_file_id, file_id)
+ yield new_entry, trans_id
+
+ def _list_files_by_dir(self):
+ todo = [ROOT_PARENT]
+ ordered_ids = []
+ while len(todo) > 0:
+ parent = todo.pop()
+ parent_file_id = self._transform.final_file_id(parent)
+ children = list(self._all_children(parent))
+ paths = dict(zip(children, self._final_paths.get_paths(children)))
+ children.sort(key=paths.get)
+ todo.extend(reversed(children))
+ for trans_id in children:
+ ordered_ids.append((trans_id, parent_file_id))
+ return ordered_ids
+
+ def iter_child_entries(self, file_id, path=None):
+ self.id2path(file_id)
+ trans_id = self._transform.trans_id_file_id(file_id)
+ todo = [(child_trans_id, trans_id) for child_trans_id in
+ self._all_children(trans_id)]
+ for entry, trans_id in self._make_inv_entries(todo):
+ yield entry
+
+ def iter_entries_by_dir(self, specific_file_ids=None, yield_parents=False):
+ # This may not be a maximally efficient implementation, but it is
+ # reasonably straightforward. An implementation that grafts the
+ # TreeTransform changes onto the tree's iter_entries_by_dir results
+ # might be more efficient, but requires tricky inferences about stack
+ # position.
+ ordered_ids = self._list_files_by_dir()
+ for entry, trans_id in self._make_inv_entries(ordered_ids,
+ specific_file_ids, yield_parents=yield_parents):
+ yield unicode(self._final_paths.get_path(trans_id)), entry
+
+ def _iter_entries_for_dir(self, dir_path):
+ """Return path, entry for items in a directory without recursing down."""
+ dir_file_id = self.path2id(dir_path)
+ ordered_ids = []
+ for file_id in self.iter_children(dir_file_id):
+ trans_id = self._transform.trans_id_file_id(file_id)
+ ordered_ids.append((trans_id, file_id))
+ for entry, trans_id in self._make_inv_entries(ordered_ids):
+ yield unicode(self._final_paths.get_path(trans_id)), entry
+
+ def list_files(self, include_root=False, from_dir=None, recursive=True):
+ """See WorkingTree.list_files."""
+ # XXX This should behave like WorkingTree.list_files, but is really
+ # more like RevisionTree.list_files.
+ if recursive:
+ prefix = None
+ if from_dir:
+ prefix = from_dir + '/'
+ entries = self.iter_entries_by_dir()
+ for path, entry in entries:
+ if entry.name == '' and not include_root:
+ continue
+ if prefix:
+ if not path.startswith(prefix):
+ continue
+ path = path[len(prefix):]
+ yield path, 'V', entry.kind, entry.file_id, entry
+ else:
+ if from_dir is None and include_root is True:
+ root_entry = inventory.make_entry('directory', '',
+ ROOT_PARENT, self.get_root_id())
+ yield '', 'V', 'directory', root_entry.file_id, root_entry
+ entries = self._iter_entries_for_dir(from_dir or '')
+ for path, entry in entries:
+ yield path, 'V', entry.kind, entry.file_id, entry
+
+ def kind(self, file_id):
+ trans_id = self._transform.trans_id_file_id(file_id)
+ return self._transform.final_kind(trans_id)
+
+ def stored_kind(self, file_id):
+ trans_id = self._transform.trans_id_file_id(file_id)
+ try:
+ return self._transform._new_contents[trans_id]
+ except KeyError:
+ return self._transform._tree.stored_kind(file_id)
+
+ def get_file_mtime(self, file_id, path=None):
+ """See Tree.get_file_mtime"""
+ if not self._content_change(file_id):
+ return self._transform._tree.get_file_mtime(file_id)
+ return self._stat_limbo_file(file_id).st_mtime
+
+ def _file_size(self, entry, stat_value):
+ return self.get_file_size(entry.file_id)
+
+ def get_file_size(self, file_id):
+ """See Tree.get_file_size"""
+ trans_id = self._transform.trans_id_file_id(file_id)
+ kind = self._transform.final_kind(trans_id)
+ if kind != 'file':
+ return None
+ if trans_id in self._transform._new_contents:
+ return self._stat_limbo_file(trans_id=trans_id).st_size
+ if self.kind(file_id) == 'file':
+ return self._transform._tree.get_file_size(file_id)
+ else:
+ return None
+
+ def get_file_verifier(self, file_id, path=None, stat_value=None):
+ trans_id = self._transform.trans_id_file_id(file_id)
+ kind = self._transform._new_contents.get(trans_id)
+ if kind is None:
+ return self._transform._tree.get_file_verifier(file_id)
+ if kind == 'file':
+ fileobj = self.get_file(file_id)
+ try:
+ return ("SHA1", sha_file(fileobj))
+ finally:
+ fileobj.close()
+
+ def get_file_sha1(self, file_id, path=None, stat_value=None):
+ trans_id = self._transform.trans_id_file_id(file_id)
+ kind = self._transform._new_contents.get(trans_id)
+ if kind is None:
+ return self._transform._tree.get_file_sha1(file_id)
+ if kind == 'file':
+ fileobj = self.get_file(file_id)
+ try:
+ return sha_file(fileobj)
+ finally:
+ fileobj.close()
+
+ def is_executable(self, file_id, path=None):
+ if file_id is None:
+ return False
+ trans_id = self._transform.trans_id_file_id(file_id)
+ try:
+ return self._transform._new_executability[trans_id]
+ except KeyError:
+ try:
+ return self._transform._tree.is_executable(file_id, path)
+ except OSError, e:
+ if e.errno == errno.ENOENT:
+ return False
+ raise
+ except errors.NoSuchId:
+ return False
+
+ def has_filename(self, path):
+ trans_id = self._path2trans_id(path)
+ if trans_id in self._transform._new_contents:
+ return True
+ elif trans_id in self._transform._removed_contents:
+ return False
+ else:
+ return self._transform._tree.has_filename(path)
+
+ def path_content_summary(self, path):
+ trans_id = self._path2trans_id(path)
+ tt = self._transform
+ tree_path = tt._tree_id_paths.get(trans_id)
+ kind = tt._new_contents.get(trans_id)
+ if kind is None:
+ if tree_path is None or trans_id in tt._removed_contents:
+ return 'missing', None, None, None
+ summary = tt._tree.path_content_summary(tree_path)
+ kind, size, executable, link_or_sha1 = summary
+ else:
+ link_or_sha1 = None
+ limbo_name = tt._limbo_name(trans_id)
+ if trans_id in tt._new_reference_revision:
+ kind = 'tree-reference'
+ if kind == 'file':
+ statval = os.lstat(limbo_name)
+ size = statval.st_size
+ if not tt._limbo_supports_executable():
+ executable = False
+ else:
+ executable = statval.st_mode & S_IEXEC
+ else:
+ size = None
+ executable = None
+ if kind == 'symlink':
+ link_or_sha1 = os.readlink(limbo_name).decode(osutils._fs_enc)
+ executable = tt._new_executability.get(trans_id, executable)
+ return kind, size, executable, link_or_sha1
+
+ def iter_changes(self, from_tree, include_unchanged=False,
+ specific_files=None, pb=None, extra_trees=None,
+ require_versioned=True, want_unversioned=False):
+ """See InterTree.iter_changes.
+
+ This has a fast path that is only used when the from_tree matches
+ the transform tree, and no fancy options are supplied.
+ """
+ if (from_tree is not self._transform._tree or include_unchanged or
+ specific_files or want_unversioned):
+ return tree.InterTree(from_tree, self).iter_changes(
+ include_unchanged=include_unchanged,
+ specific_files=specific_files,
+ pb=pb,
+ extra_trees=extra_trees,
+ require_versioned=require_versioned,
+ want_unversioned=want_unversioned)
+ if want_unversioned:
+ raise ValueError('want_unversioned is not supported')
+ return self._transform.iter_changes()
+
+ def get_file(self, file_id, path=None):
+ """See Tree.get_file"""
+ if not self._content_change(file_id):
+ return self._transform._tree.get_file(file_id, path)
+ trans_id = self._transform.trans_id_file_id(file_id)
+ name = self._transform._limbo_name(trans_id)
+ return open(name, 'rb')
+
+ def get_file_with_stat(self, file_id, path=None):
+ return self.get_file(file_id, path), None
+
+ def annotate_iter(self, file_id,
+ default_revision=_mod_revision.CURRENT_REVISION):
+ changes = self._iter_changes_cache.get(file_id)
+ if changes is None:
+ get_old = True
+ else:
+ changed_content, versioned, kind = (changes[2], changes[3],
+ changes[6])
+ if kind[1] is None:
+ return None
+ get_old = (kind[0] == 'file' and versioned[0])
+ if get_old:
+ old_annotation = self._transform._tree.annotate_iter(file_id,
+ default_revision=default_revision)
+ else:
+ old_annotation = []
+ if changes is None:
+ return old_annotation
+ if not changed_content:
+ return old_annotation
+ # TODO: This is doing something similar to what WT.annotate_iter is
+ # doing, however it fails slightly because it doesn't know what
+ # the *other* revision_id is, so it doesn't know how to give the
+ # other as the origin for some lines, they all get
+ # 'default_revision'
+ # It would be nice to be able to use the new Annotator based
+ # approach, as well.
+ return annotate.reannotate([old_annotation],
+ self.get_file(file_id).readlines(),
+ default_revision)
+
+ def get_symlink_target(self, file_id, path=None):
+ """See Tree.get_symlink_target"""
+ if not self._content_change(file_id):
+ return self._transform._tree.get_symlink_target(file_id)
+ trans_id = self._transform.trans_id_file_id(file_id)
+ name = self._transform._limbo_name(trans_id)
+ return osutils.readlink(name)
+
+ def walkdirs(self, prefix=''):
+ pending = [self._transform.root]
+ while len(pending) > 0:
+ parent_id = pending.pop()
+ children = []
+ subdirs = []
+ prefix = prefix.rstrip('/')
+ parent_path = self._final_paths.get_path(parent_id)
+ parent_file_id = self._transform.final_file_id(parent_id)
+ for child_id in self._all_children(parent_id):
+ path_from_root = self._final_paths.get_path(child_id)
+ basename = self._transform.final_name(child_id)
+ file_id = self._transform.final_file_id(child_id)
+ kind = self._transform.final_kind(child_id)
+ if kind is not None:
+ versioned_kind = kind
+ else:
+ kind = 'unknown'
+ versioned_kind = self._transform._tree.stored_kind(file_id)
+ if versioned_kind == 'directory':
+ subdirs.append(child_id)
+ children.append((path_from_root, basename, kind, None,
+ file_id, versioned_kind))
+ children.sort()
+ if parent_path.startswith(prefix):
+ yield (parent_path, parent_file_id), children
+ pending.extend(sorted(subdirs, key=self._final_paths.get_path,
+ reverse=True))
+
+ def get_parent_ids(self):
+ return self._parent_ids
+
+ def set_parent_ids(self, parent_ids):
+ self._parent_ids = parent_ids
+
+ def get_revision_tree(self, revision_id):
+ return self._transform._tree.get_revision_tree(revision_id)
+
+
+def joinpath(parent, child):
+ """Join tree-relative paths, handling the tree root specially"""
+ if parent is None or parent == "":
+ return child
+ else:
+ return pathjoin(parent, child)
+
+
+class FinalPaths(object):
+ """Make path calculation cheap by memoizing paths.
+
+ The underlying tree must not be manipulated between calls, or else
+ the results will likely be incorrect.
+ """
+ def __init__(self, transform):
+ object.__init__(self)
+ self._known_paths = {}
+ self.transform = transform
+
+ def _determine_path(self, trans_id):
+ if (trans_id == self.transform.root or trans_id == ROOT_PARENT):
+ return ""
+ name = self.transform.final_name(trans_id)
+ parent_id = self.transform.final_parent(trans_id)
+ if parent_id == self.transform.root:
+ return name
+ else:
+ return pathjoin(self.get_path(parent_id), name)
+
+ def get_path(self, trans_id):
+ """Find the final path associated with a trans_id"""
+ if trans_id not in self._known_paths:
+ self._known_paths[trans_id] = self._determine_path(trans_id)
+ return self._known_paths[trans_id]
+
+ def get_paths(self, trans_ids):
+ return [(self.get_path(t), t) for t in trans_ids]
+
+
+
+def topology_sorted_ids(tree):
+ """Determine the topological order of the ids in a tree"""
+ file_ids = list(tree)
+ file_ids.sort(key=tree.id2path)
+ return file_ids
+
+
+def build_tree(tree, wt, accelerator_tree=None, hardlink=False,
+ delta_from_tree=False):
+ """Create working tree for a branch, using a TreeTransform.
+
+ This function should be used on empty trees, having a tree root at most.
+ (see merge and revert functionality for working with existing trees)
+
+ Existing files are handled like so:
+
+ - Existing bzrdirs take precedence over creating new items. They are
+ created as '%s.diverted' % name.
+ - Otherwise, if the content on disk matches the content we are building,
+ it is silently replaced.
+ - Otherwise, conflict resolution will move the old file to 'oldname.moved'.
+
+ :param tree: The tree to convert wt into a copy of
+ :param wt: The working tree that files will be placed into
+ :param accelerator_tree: A tree which can be used for retrieving file
+ contents more quickly than tree itself, i.e. a workingtree. tree
+ will be used for cases where accelerator_tree's content is different.
+ :param hardlink: If true, hard-link files to accelerator_tree, where
+ possible. accelerator_tree must implement abspath, i.e. be a
+ working tree.
+ :param delta_from_tree: If true, build_tree may use the input Tree to
+ generate the inventory delta.
+ """
+ wt.lock_tree_write()
+ try:
+ tree.lock_read()
+ try:
+ if accelerator_tree is not None:
+ accelerator_tree.lock_read()
+ try:
+ return _build_tree(tree, wt, accelerator_tree, hardlink,
+ delta_from_tree)
+ finally:
+ if accelerator_tree is not None:
+ accelerator_tree.unlock()
+ finally:
+ tree.unlock()
+ finally:
+ wt.unlock()
+
+
+def _build_tree(tree, wt, accelerator_tree, hardlink, delta_from_tree):
+ """See build_tree."""
+ for num, _unused in enumerate(wt.all_file_ids()):
+ if num > 0: # more than just a root
+ raise errors.WorkingTreeAlreadyPopulated(base=wt.basedir)
+ file_trans_id = {}
+ top_pb = ui.ui_factory.nested_progress_bar()
+ pp = ProgressPhase("Build phase", 2, top_pb)
+ if tree.get_root_id() is not None:
+ # This is kind of a hack: we should be altering the root
+ # as part of the regular tree shape diff logic.
+ # The conditional test here is to avoid doing an
+ # expensive operation (flush) every time the root id
+ # is set within the tree, nor setting the root and thus
+ # marking the tree as dirty, because we use two different
+ # idioms here: tree interfaces and inventory interfaces.
+ if wt.get_root_id() != tree.get_root_id():
+ wt.set_root_id(tree.get_root_id())
+ wt.flush()
+ tt = TreeTransform(wt)
+ divert = set()
+ try:
+ pp.next_phase()
+ file_trans_id[wt.get_root_id()] = \
+ tt.trans_id_tree_file_id(wt.get_root_id())
+ pb = ui.ui_factory.nested_progress_bar()
+ try:
+ deferred_contents = []
+ num = 0
+ total = len(tree.all_file_ids())
+ if delta_from_tree:
+ precomputed_delta = []
+ else:
+ precomputed_delta = None
+ # Check if tree inventory has content. If so, we populate
+ # existing_files with the directory content. If there are no
+ # entries we skip populating existing_files as its not used.
+ # This improves performance and unncessary work on large
+ # directory trees. (#501307)
+ if total > 0:
+ existing_files = set()
+ for dir, files in wt.walkdirs():
+ existing_files.update(f[0] for f in files)
+ for num, (tree_path, entry) in \
+ enumerate(tree.iter_entries_by_dir()):
+ pb.update(gettext("Building tree"), num - len(deferred_contents), total)
+ if entry.parent_id is None:
+ continue
+ reparent = False
+ file_id = entry.file_id
+ if delta_from_tree:
+ precomputed_delta.append((None, tree_path, file_id, entry))
+ if tree_path in existing_files:
+ target_path = wt.abspath(tree_path)
+ kind = file_kind(target_path)
+ if kind == "directory":
+ try:
+ controldir.ControlDir.open(target_path)
+ except errors.NotBranchError:
+ pass
+ else:
+ divert.add(file_id)
+ if (file_id not in divert and
+ _content_match(tree, entry, file_id, kind,
+ target_path)):
+ tt.delete_contents(tt.trans_id_tree_path(tree_path))
+ if kind == 'directory':
+ reparent = True
+ parent_id = file_trans_id[entry.parent_id]
+ if entry.kind == 'file':
+ # We *almost* replicate new_by_entry, so that we can defer
+ # getting the file text, and get them all at once.
+ trans_id = tt.create_path(entry.name, parent_id)
+ file_trans_id[file_id] = trans_id
+ tt.version_file(file_id, trans_id)
+ executable = tree.is_executable(file_id, tree_path)
+ if executable:
+ tt.set_executability(executable, trans_id)
+ trans_data = (trans_id, tree_path, entry.text_sha1)
+ deferred_contents.append((file_id, trans_data))
+ else:
+ file_trans_id[file_id] = new_by_entry(tt, entry, parent_id,
+ tree)
+ if reparent:
+ new_trans_id = file_trans_id[file_id]
+ old_parent = tt.trans_id_tree_path(tree_path)
+ _reparent_children(tt, old_parent, new_trans_id)
+ offset = num + 1 - len(deferred_contents)
+ _create_files(tt, tree, deferred_contents, pb, offset,
+ accelerator_tree, hardlink)
+ finally:
+ pb.finished()
+ pp.next_phase()
+ divert_trans = set(file_trans_id[f] for f in divert)
+ resolver = lambda t, c: resolve_checkout(t, c, divert_trans)
+ raw_conflicts = resolve_conflicts(tt, pass_func=resolver)
+ if len(raw_conflicts) > 0:
+ precomputed_delta = None
+ conflicts = cook_conflicts(raw_conflicts, tt)
+ for conflict in conflicts:
+ trace.warning(unicode(conflict))
+ try:
+ wt.add_conflicts(conflicts)
+ except errors.UnsupportedOperation:
+ pass
+ result = tt.apply(no_conflicts=True,
+ precomputed_delta=precomputed_delta)
+ finally:
+ tt.finalize()
+ top_pb.finished()
+ return result
+
+
+def _create_files(tt, tree, desired_files, pb, offset, accelerator_tree,
+ hardlink):
+ total = len(desired_files) + offset
+ wt = tt._tree
+ if accelerator_tree is None:
+ new_desired_files = desired_files
+ else:
+ iter = accelerator_tree.iter_changes(tree, include_unchanged=True)
+ unchanged = [(f, p[1]) for (f, p, c, v, d, n, k, e)
+ in iter if not (c or e[0] != e[1])]
+ if accelerator_tree.supports_content_filtering():
+ unchanged = [(f, p) for (f, p) in unchanged
+ if not accelerator_tree.iter_search_rules([p]).next()]
+ unchanged = dict(unchanged)
+ new_desired_files = []
+ count = 0
+ for file_id, (trans_id, tree_path, text_sha1) in desired_files:
+ accelerator_path = unchanged.get(file_id)
+ if accelerator_path is None:
+ new_desired_files.append((file_id,
+ (trans_id, tree_path, text_sha1)))
+ continue
+ pb.update(gettext('Adding file contents'), count + offset, total)
+ if hardlink:
+ tt.create_hardlink(accelerator_tree.abspath(accelerator_path),
+ trans_id)
+ else:
+ contents = accelerator_tree.get_file(file_id, accelerator_path)
+ if wt.supports_content_filtering():
+ filters = wt._content_filter_stack(tree_path)
+ contents = filtered_output_bytes(contents, filters,
+ ContentFilterContext(tree_path, tree))
+ try:
+ tt.create_file(contents, trans_id, sha1=text_sha1)
+ finally:
+ try:
+ contents.close()
+ except AttributeError:
+ # after filtering, contents may no longer be file-like
+ pass
+ count += 1
+ offset += count
+ for count, ((trans_id, tree_path, text_sha1), contents) in enumerate(
+ tree.iter_files_bytes(new_desired_files)):
+ if wt.supports_content_filtering():
+ filters = wt._content_filter_stack(tree_path)
+ contents = filtered_output_bytes(contents, filters,
+ ContentFilterContext(tree_path, tree))
+ tt.create_file(contents, trans_id, sha1=text_sha1)
+ pb.update(gettext('Adding file contents'), count + offset, total)
+
+
+def _reparent_children(tt, old_parent, new_parent):
+ for child in tt.iter_tree_children(old_parent):
+ tt.adjust_path(tt.final_name(child), new_parent, child)
+
+
+def _reparent_transform_children(tt, old_parent, new_parent):
+ by_parent = tt.by_parent()
+ for child in by_parent[old_parent]:
+ tt.adjust_path(tt.final_name(child), new_parent, child)
+ return by_parent[old_parent]
+
+
+def _content_match(tree, entry, file_id, kind, target_path):
+ if entry.kind != kind:
+ return False
+ if entry.kind == "directory":
+ return True
+ if entry.kind == "file":
+ f = file(target_path, 'rb')
+ try:
+ if tree.get_file_text(file_id) == f.read():
+ return True
+ finally:
+ f.close()
+ elif entry.kind == "symlink":
+ if tree.get_symlink_target(file_id) == os.readlink(target_path):
+ return True
+ return False
+
+
+def resolve_checkout(tt, conflicts, divert):
+ new_conflicts = set()
+ for c_type, conflict in ((c[0], c) for c in conflicts):
+ # Anything but a 'duplicate' would indicate programmer error
+ if c_type != 'duplicate':
+ raise AssertionError(c_type)
+ # Now figure out which is new and which is old
+ if tt.new_contents(conflict[1]):
+ new_file = conflict[1]
+ old_file = conflict[2]
+ else:
+ new_file = conflict[2]
+ old_file = conflict[1]
+
+ # We should only get here if the conflict wasn't completely
+ # resolved
+ final_parent = tt.final_parent(old_file)
+ if new_file in divert:
+ new_name = tt.final_name(old_file)+'.diverted'
+ tt.adjust_path(new_name, final_parent, new_file)
+ new_conflicts.add((c_type, 'Diverted to',
+ new_file, old_file))
+ else:
+ new_name = tt.final_name(old_file)+'.moved'
+ tt.adjust_path(new_name, final_parent, old_file)
+ new_conflicts.add((c_type, 'Moved existing file to',
+ old_file, new_file))
+ return new_conflicts
+
+
+def new_by_entry(tt, entry, parent_id, tree):
+ """Create a new file according to its inventory entry"""
+ name = entry.name
+ kind = entry.kind
+ if kind == 'file':
+ contents = tree.get_file(entry.file_id).readlines()
+ executable = tree.is_executable(entry.file_id)
+ return tt.new_file(name, parent_id, contents, entry.file_id,
+ executable)
+ elif kind in ('directory', 'tree-reference'):
+ trans_id = tt.new_directory(name, parent_id, entry.file_id)
+ if kind == 'tree-reference':
+ tt.set_tree_reference(entry.reference_revision, trans_id)
+ return trans_id
+ elif kind == 'symlink':
+ target = tree.get_symlink_target(entry.file_id)
+ return tt.new_symlink(name, parent_id, target, entry.file_id)
+ else:
+ raise errors.BadFileKindError(name, kind)
+
+
+def create_from_tree(tt, trans_id, tree, file_id, bytes=None,
+ filter_tree_path=None):
+ """Create new file contents according to tree contents.
+
+ :param filter_tree_path: the tree path to use to lookup
+ content filters to apply to the bytes output in the working tree.
+ This only applies if the working tree supports content filtering.
+ """
+ kind = tree.kind(file_id)
+ if kind == 'directory':
+ tt.create_directory(trans_id)
+ elif kind == "file":
+ if bytes is None:
+ tree_file = tree.get_file(file_id)
+ try:
+ bytes = tree_file.readlines()
+ finally:
+ tree_file.close()
+ wt = tt._tree
+ if wt.supports_content_filtering() and filter_tree_path is not None:
+ filters = wt._content_filter_stack(filter_tree_path)
+ bytes = filtered_output_bytes(bytes, filters,
+ ContentFilterContext(filter_tree_path, tree))
+ tt.create_file(bytes, trans_id)
+ elif kind == "symlink":
+ tt.create_symlink(tree.get_symlink_target(file_id), trans_id)
+ else:
+ raise AssertionError('Unknown kind %r' % kind)
+
+
+def create_entry_executability(tt, entry, trans_id):
+ """Set the executability of a trans_id according to an inventory entry"""
+ if entry.kind == "file":
+ tt.set_executability(entry.executable, trans_id)
+
+
+def revert(working_tree, target_tree, filenames, backups=False,
+ pb=None, change_reporter=None):
+ """Revert a working tree's contents to those of a target tree."""
+ target_tree.lock_read()
+ pb = ui.ui_factory.nested_progress_bar()
+ tt = TreeTransform(working_tree, pb)
+ try:
+ pp = ProgressPhase("Revert phase", 3, pb)
+ conflicts, merge_modified = _prepare_revert_transform(
+ working_tree, target_tree, tt, filenames, backups, pp)
+ if change_reporter:
+ change_reporter = delta._ChangeReporter(
+ unversioned_filter=working_tree.is_ignored)
+ delta.report_changes(tt.iter_changes(), change_reporter)
+ for conflict in conflicts:
+ trace.warning(unicode(conflict))
+ pp.next_phase()
+ tt.apply()
+ working_tree.set_merge_modified(merge_modified)
+ finally:
+ target_tree.unlock()
+ tt.finalize()
+ pb.clear()
+ return conflicts
+
+
+def _prepare_revert_transform(working_tree, target_tree, tt, filenames,
+ backups, pp, basis_tree=None,
+ merge_modified=None):
+ child_pb = ui.ui_factory.nested_progress_bar()
+ try:
+ if merge_modified is None:
+ merge_modified = working_tree.merge_modified()
+ merge_modified = _alter_files(working_tree, target_tree, tt,
+ child_pb, filenames, backups,
+ merge_modified, basis_tree)
+ finally:
+ child_pb.finished()
+ child_pb = ui.ui_factory.nested_progress_bar()
+ try:
+ raw_conflicts = resolve_conflicts(tt, child_pb,
+ lambda t, c: conflict_pass(t, c, target_tree))
+ finally:
+ child_pb.finished()
+ conflicts = cook_conflicts(raw_conflicts, tt)
+ return conflicts, merge_modified
+
+
+def _alter_files(working_tree, target_tree, tt, pb, specific_files,
+ backups, merge_modified, basis_tree=None):
+ if basis_tree is not None:
+ basis_tree.lock_read()
+ # We ask the working_tree for its changes relative to the target, rather
+ # than the target changes relative to the working tree. Because WT4 has an
+ # optimizer to compare itself to a target, but no optimizer for the
+ # reverse.
+ change_list = working_tree.iter_changes(target_tree,
+ specific_files=specific_files, pb=pb)
+ if target_tree.get_root_id() is None:
+ skip_root = True
+ else:
+ skip_root = False
+ try:
+ deferred_files = []
+ for id_num, (file_id, path, changed_content, versioned, parent, name,
+ kind, executable) in enumerate(change_list):
+ target_path, wt_path = path
+ target_versioned, wt_versioned = versioned
+ target_parent, wt_parent = parent
+ target_name, wt_name = name
+ target_kind, wt_kind = kind
+ target_executable, wt_executable = executable
+ if skip_root and wt_parent is None:
+ continue
+ trans_id = tt.trans_id_file_id(file_id)
+ mode_id = None
+ if changed_content:
+ keep_content = False
+ if wt_kind == 'file' and (backups or target_kind is None):
+ wt_sha1 = working_tree.get_file_sha1(file_id)
+ if merge_modified.get(file_id) != wt_sha1:
+ # acquire the basis tree lazily to prevent the
+ # expense of accessing it when it's not needed ?
+ # (Guessing, RBC, 200702)
+ if basis_tree is None:
+ basis_tree = working_tree.basis_tree()
+ basis_tree.lock_read()
+ if basis_tree.has_id(file_id):
+ if wt_sha1 != basis_tree.get_file_sha1(file_id):
+ keep_content = True
+ elif target_kind is None and not target_versioned:
+ keep_content = True
+ if wt_kind is not None:
+ if not keep_content:
+ tt.delete_contents(trans_id)
+ elif target_kind is not None:
+ parent_trans_id = tt.trans_id_file_id(wt_parent)
+ backup_name = tt._available_backup_name(
+ wt_name, parent_trans_id)
+ tt.adjust_path(backup_name, parent_trans_id, trans_id)
+ new_trans_id = tt.create_path(wt_name, parent_trans_id)
+ if wt_versioned and target_versioned:
+ tt.unversion_file(trans_id)
+ tt.version_file(file_id, new_trans_id)
+ # New contents should have the same unix perms as old
+ # contents
+ mode_id = trans_id
+ trans_id = new_trans_id
+ if target_kind in ('directory', 'tree-reference'):
+ tt.create_directory(trans_id)
+ if target_kind == 'tree-reference':
+ revision = target_tree.get_reference_revision(file_id,
+ target_path)
+ tt.set_tree_reference(revision, trans_id)
+ elif target_kind == 'symlink':
+ tt.create_symlink(target_tree.get_symlink_target(file_id),
+ trans_id)
+ elif target_kind == 'file':
+ deferred_files.append((file_id, (trans_id, mode_id)))
+ if basis_tree is None:
+ basis_tree = working_tree.basis_tree()
+ basis_tree.lock_read()
+ new_sha1 = target_tree.get_file_sha1(file_id)
+ if (basis_tree.has_id(file_id) and
+ new_sha1 == basis_tree.get_file_sha1(file_id)):
+ if file_id in merge_modified:
+ del merge_modified[file_id]
+ else:
+ merge_modified[file_id] = new_sha1
+
+ # preserve the execute bit when backing up
+ if keep_content and wt_executable == target_executable:
+ tt.set_executability(target_executable, trans_id)
+ elif target_kind is not None:
+ raise AssertionError(target_kind)
+ if not wt_versioned and target_versioned:
+ tt.version_file(file_id, trans_id)
+ if wt_versioned and not target_versioned:
+ tt.unversion_file(trans_id)
+ if (target_name is not None and
+ (wt_name != target_name or wt_parent != target_parent)):
+ if target_name == '' and target_parent is None:
+ parent_trans = ROOT_PARENT
+ else:
+ parent_trans = tt.trans_id_file_id(target_parent)
+ if wt_parent is None and wt_versioned:
+ tt.adjust_root_path(target_name, parent_trans)
+ else:
+ tt.adjust_path(target_name, parent_trans, trans_id)
+ if wt_executable != target_executable and target_kind == "file":
+ tt.set_executability(target_executable, trans_id)
+ if working_tree.supports_content_filtering():
+ for index, ((trans_id, mode_id), bytes) in enumerate(
+ target_tree.iter_files_bytes(deferred_files)):
+ file_id = deferred_files[index][0]
+ # We're reverting a tree to the target tree so using the
+ # target tree to find the file path seems the best choice
+ # here IMO - Ian C 27/Oct/2009
+ filter_tree_path = target_tree.id2path(file_id)
+ filters = working_tree._content_filter_stack(filter_tree_path)
+ bytes = filtered_output_bytes(bytes, filters,
+ ContentFilterContext(filter_tree_path, working_tree))
+ tt.create_file(bytes, trans_id, mode_id)
+ else:
+ for (trans_id, mode_id), bytes in target_tree.iter_files_bytes(
+ deferred_files):
+ tt.create_file(bytes, trans_id, mode_id)
+ tt.fixup_new_roots()
+ finally:
+ if basis_tree is not None:
+ basis_tree.unlock()
+ return merge_modified
+
+
+def resolve_conflicts(tt, pb=None, pass_func=None):
+ """Make many conflict-resolution attempts, but die if they fail"""
+ if pass_func is None:
+ pass_func = conflict_pass
+ new_conflicts = set()
+ pb = ui.ui_factory.nested_progress_bar()
+ try:
+ for n in range(10):
+ pb.update(gettext('Resolution pass'), n+1, 10)
+ conflicts = tt.find_conflicts()
+ if len(conflicts) == 0:
+ return new_conflicts
+ new_conflicts.update(pass_func(tt, conflicts))
+ raise MalformedTransform(conflicts=conflicts)
+ finally:
+ pb.finished()
+
+
+def conflict_pass(tt, conflicts, path_tree=None):
+ """Resolve some classes of conflicts.
+
+ :param tt: The transform to resolve conflicts in
+ :param conflicts: The conflicts to resolve
+ :param path_tree: A Tree to get supplemental paths from
+ """
+ new_conflicts = set()
+ for c_type, conflict in ((c[0], c) for c in conflicts):
+ if c_type == 'duplicate id':
+ tt.unversion_file(conflict[1])
+ new_conflicts.add((c_type, 'Unversioned existing file',
+ conflict[1], conflict[2], ))
+ elif c_type == 'duplicate':
+ # files that were renamed take precedence
+ final_parent = tt.final_parent(conflict[1])
+ if tt.path_changed(conflict[1]):
+ existing_file, new_file = conflict[2], conflict[1]
+ else:
+ existing_file, new_file = conflict[1], conflict[2]
+ new_name = tt.final_name(existing_file) + '.moved'
+ tt.adjust_path(new_name, final_parent, existing_file)
+ new_conflicts.add((c_type, 'Moved existing file to',
+ existing_file, new_file))
+ elif c_type == 'parent loop':
+ # break the loop by undoing one of the ops that caused the loop
+ cur = conflict[1]
+ while not tt.path_changed(cur):
+ cur = tt.final_parent(cur)
+ new_conflicts.add((c_type, 'Cancelled move', cur,
+ tt.final_parent(cur),))
+ tt.adjust_path(tt.final_name(cur), tt.get_tree_parent(cur), cur)
+
+ elif c_type == 'missing parent':
+ trans_id = conflict[1]
+ if trans_id in tt._removed_contents:
+ cancel_deletion = True
+ orphans = tt._get_potential_orphans(trans_id)
+ if orphans:
+ cancel_deletion = False
+ # All children are orphans
+ for o in orphans:
+ try:
+ tt.new_orphan(o, trans_id)
+ except OrphaningError:
+ # Something bad happened so we cancel the directory
+ # deletion which will leave it in place with a
+ # conflict. The user can deal with it from there.
+ # Note that this also catch the case where we don't
+ # want to create orphans and leave the directory in
+ # place.
+ cancel_deletion = True
+ break
+ if cancel_deletion:
+ # Cancel the directory deletion
+ tt.cancel_deletion(trans_id)
+ new_conflicts.add(('deleting parent', 'Not deleting',
+ trans_id))
+ else:
+ create = True
+ try:
+ tt.final_name(trans_id)
+ except NoFinalPath:
+ if path_tree is not None:
+ file_id = tt.final_file_id(trans_id)
+ if file_id is None:
+ file_id = tt.inactive_file_id(trans_id)
+ _, entry = path_tree.iter_entries_by_dir(
+ [file_id]).next()
+ # special-case the other tree root (move its
+ # children to current root)
+ if entry.parent_id is None:
+ create = False
+ moved = _reparent_transform_children(
+ tt, trans_id, tt.root)
+ for child in moved:
+ new_conflicts.add((c_type, 'Moved to root',
+ child))
+ else:
+ parent_trans_id = tt.trans_id_file_id(
+ entry.parent_id)
+ tt.adjust_path(entry.name, parent_trans_id,
+ trans_id)
+ if create:
+ tt.create_directory(trans_id)
+ new_conflicts.add((c_type, 'Created directory', trans_id))
+ elif c_type == 'unversioned parent':
+ file_id = tt.inactive_file_id(conflict[1])
+ # special-case the other tree root (move its children instead)
+ if path_tree and path_tree.has_id(file_id):
+ if path_tree.path2id('') == file_id:
+ # This is the root entry, skip it
+ continue
+ tt.version_file(file_id, conflict[1])
+ new_conflicts.add((c_type, 'Versioned directory', conflict[1]))
+ elif c_type == 'non-directory parent':
+ parent_id = conflict[1]
+ parent_parent = tt.final_parent(parent_id)
+ parent_name = tt.final_name(parent_id)
+ parent_file_id = tt.final_file_id(parent_id)
+ new_parent_id = tt.new_directory(parent_name + '.new',
+ parent_parent, parent_file_id)
+ _reparent_transform_children(tt, parent_id, new_parent_id)
+ if parent_file_id is not None:
+ tt.unversion_file(parent_id)
+ new_conflicts.add((c_type, 'Created directory', new_parent_id))
+ elif c_type == 'versioning no contents':
+ tt.cancel_versioning(conflict[1])
+ return new_conflicts
+
+
+def cook_conflicts(raw_conflicts, tt):
+ """Generate a list of cooked conflicts, sorted by file path"""
+ conflict_iter = iter_cook_conflicts(raw_conflicts, tt)
+ return sorted(conflict_iter, key=conflicts.Conflict.sort_key)
+
+
+def iter_cook_conflicts(raw_conflicts, tt):
+ fp = FinalPaths(tt)
+ for conflict in raw_conflicts:
+ c_type = conflict[0]
+ action = conflict[1]
+ modified_path = fp.get_path(conflict[2])
+ modified_id = tt.final_file_id(conflict[2])
+ if len(conflict) == 3:
+ yield conflicts.Conflict.factory(
+ c_type, action=action, path=modified_path, file_id=modified_id)
+
+ else:
+ conflicting_path = fp.get_path(conflict[3])
+ conflicting_id = tt.final_file_id(conflict[3])
+ yield conflicts.Conflict.factory(
+ c_type, action=action, path=modified_path,
+ file_id=modified_id,
+ conflict_path=conflicting_path,
+ conflict_file_id=conflicting_id)
+
+
+class _FileMover(object):
+ """Moves and deletes files for TreeTransform, tracking operations"""
+
+ def __init__(self):
+ self.past_renames = []
+ self.pending_deletions = []
+
+ def rename(self, from_, to):
+ """Rename a file from one path to another."""
+ try:
+ os.rename(from_, to)
+ except OSError, e:
+ if e.errno in (errno.EEXIST, errno.ENOTEMPTY):
+ raise errors.FileExists(to, str(e))
+ # normal OSError doesn't include filenames so it's hard to see where
+ # the problem is, see https://bugs.launchpad.net/bzr/+bug/491763
+ raise errors.TransformRenameFailed(from_, to, str(e), e.errno)
+ self.past_renames.append((from_, to))
+
+ def pre_delete(self, from_, to):
+ """Rename a file out of the way and mark it for deletion.
+
+ Unlike os.unlink, this works equally well for files and directories.
+ :param from_: The current file path
+ :param to: A temporary path for the file
+ """
+ self.rename(from_, to)
+ self.pending_deletions.append(to)
+
+ def rollback(self):
+ """Reverse all renames that have been performed"""
+ for from_, to in reversed(self.past_renames):
+ try:
+ os.rename(to, from_)
+ except OSError, e:
+ raise errors.TransformRenameFailed(to, from_, str(e), e.errno)
+ # after rollback, don't reuse _FileMover
+ past_renames = None
+ pending_deletions = None
+
+ def apply_deletions(self):
+ """Apply all marked deletions"""
+ for path in self.pending_deletions:
+ delete_any(path)
+ # after apply_deletions, don't reuse _FileMover
+ past_renames = None
+ pending_deletions = None
diff --git a/bzrlib/transport/__init__.py b/bzrlib/transport/__init__.py
new file mode 100644
index 0000000..fab2ea1
--- /dev/null
+++ b/bzrlib/transport/__init__.py
@@ -0,0 +1,1896 @@
+# Copyright (C) 2005-2011 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""Transport is an abstraction layer to handle file access.
+
+The abstraction is to allow access from the local filesystem, as well
+as remote (such as http or sftp).
+
+Transports are constructed from a string, being a URL or (as a degenerate
+case) a local filesystem path. This is typically the top directory of
+a bzrdir, repository, or similar object we are interested in working with.
+The Transport returned has methods to read, write and manipulate files within
+it.
+"""
+
+from __future__ import absolute_import
+
+from cStringIO import StringIO
+import sys
+
+from bzrlib.lazy_import import lazy_import
+lazy_import(globals(), """
+import errno
+from stat import S_ISDIR
+import urlparse
+
+from bzrlib import (
+ errors,
+ osutils,
+ symbol_versioning,
+ ui,
+ urlutils,
+ )
+""")
+
+from bzrlib.symbol_versioning import (
+ DEPRECATED_PARAMETER,
+ )
+from bzrlib.trace import (
+ mutter,
+ )
+from bzrlib import (
+ hooks,
+ registry,
+ )
+
+
+# a dictionary of open file streams. Keys are absolute paths, values are
+# transport defined.
+_file_streams = {}
+
+
+def _get_protocol_handlers():
+ """Return a dictionary of {urlprefix: [factory]}"""
+ return transport_list_registry
+
+
+def _set_protocol_handlers(new_handlers):
+ """Replace the current protocol handlers dictionary.
+
+ WARNING this will remove all build in protocols. Use with care.
+ """
+ global transport_list_registry
+ transport_list_registry = new_handlers
+
+
+def _clear_protocol_handlers():
+ global transport_list_registry
+ transport_list_registry = TransportListRegistry()
+
+
+def _get_transport_modules():
+ """Return a list of the modules providing transports."""
+ modules = set()
+ for prefix, factory_list in transport_list_registry.items():
+ for factory in factory_list:
+ modules.add(factory.get_module())
+ # Add chroot and pathfilter directly, because there is no handler
+ # registered for it.
+ modules.add('bzrlib.transport.chroot')
+ modules.add('bzrlib.transport.pathfilter')
+ result = list(modules)
+ result.sort()
+ return result
+
+
+class TransportListRegistry(registry.Registry):
+ """A registry which simplifies tracking available Transports.
+
+ A registration of a new protocol requires two steps:
+ 1) register the prefix with the function register_transport( )
+ 2) register the protocol provider with the function
+ register_transport_provider( ) ( and the "lazy" variant )
+
+ This is needed because:
+ a) a single provider can support multiple protocols ( like the ftp
+ provider which supports both the ftp:// and the aftp:// protocols )
+ b) a single protocol can have multiple providers ( like the http://
+ protocol which is supported by both the urllib and pycurl provider )
+ """
+
+ def register_transport_provider(self, key, obj):
+ self.get(key).insert(0, registry._ObjectGetter(obj))
+
+ def register_lazy_transport_provider(self, key, module_name, member_name):
+ self.get(key).insert(0,
+ registry._LazyObjectGetter(module_name, member_name))
+
+ def register_transport(self, key, help=None):
+ self.register(key, [], help)
+
+
+transport_list_registry = TransportListRegistry()
+
+
+def register_transport_proto(prefix, help=None, info=None,
+ register_netloc=False):
+ transport_list_registry.register_transport(prefix, help)
+ if register_netloc:
+ if not prefix.endswith('://'):
+ raise ValueError(prefix)
+ register_urlparse_netloc_protocol(prefix[:-3])
+
+
+def register_lazy_transport(prefix, module, classname):
+ if not prefix in transport_list_registry:
+ register_transport_proto(prefix)
+ transport_list_registry.register_lazy_transport_provider(
+ prefix, module, classname)
+
+
+def register_transport(prefix, klass):
+ if not prefix in transport_list_registry:
+ register_transport_proto(prefix)
+ transport_list_registry.register_transport_provider(prefix, klass)
+
+
+def register_urlparse_netloc_protocol(protocol):
+ """Ensure that protocol is setup to be used with urlparse netloc parsing."""
+ if protocol not in urlparse.uses_netloc:
+ urlparse.uses_netloc.append(protocol)
+
+
+def _unregister_urlparse_netloc_protocol(protocol):
+ """Remove protocol from urlparse netloc parsing.
+
+ Except for tests, you should never use that function. Using it with 'http',
+ for example, will break all http transports.
+ """
+ if protocol in urlparse.uses_netloc:
+ urlparse.uses_netloc.remove(protocol)
+
+
+def unregister_transport(scheme, factory):
+ """Unregister a transport."""
+ l = transport_list_registry.get(scheme)
+ for i in l:
+ o = i.get_obj( )
+ if o == factory:
+ transport_list_registry.get(scheme).remove(i)
+ break
+ if len(l) == 0:
+ transport_list_registry.remove(scheme)
+
+
+class _CoalescedOffset(object):
+ """A data container for keeping track of coalesced offsets."""
+
+ __slots__ = ['start', 'length', 'ranges']
+
+ def __init__(self, start, length, ranges):
+ self.start = start
+ self.length = length
+ self.ranges = ranges
+
+ def __cmp__(self, other):
+ return cmp((self.start, self.length, self.ranges),
+ (other.start, other.length, other.ranges))
+
+ def __repr__(self):
+ return '%s(%r, %r, %r)' % (self.__class__.__name__,
+ self.start, self.length, self.ranges)
+
+
+class LateReadError(object):
+ """A helper for transports which pretends to be a readable file.
+
+ When read() is called, errors.ReadError is raised.
+ """
+
+ def __init__(self, path):
+ self._path = path
+
+ def close(self):
+ """a no-op - do nothing."""
+
+ def _fail(self):
+ """Raise ReadError."""
+ raise errors.ReadError(self._path)
+
+ def __iter__(self):
+ self._fail()
+
+ def read(self, count=-1):
+ self._fail()
+
+ def readlines(self):
+ self._fail()
+
+
+class FileStream(object):
+ """Base class for FileStreams."""
+
+ def __init__(self, transport, relpath):
+ """Create a FileStream for relpath on transport."""
+ self.transport = transport
+ self.relpath = relpath
+
+ def _close(self):
+ """A hook point for subclasses that need to take action on close."""
+
+ def close(self, want_fdatasync=False):
+ if want_fdatasync:
+ try:
+ self.fdatasync()
+ except errors.TransportNotPossible:
+ pass
+ self._close()
+ del _file_streams[self.transport.abspath(self.relpath)]
+
+ def fdatasync(self):
+ """Force data out to physical disk if possible.
+
+ :raises TransportNotPossible: If this transport has no way to
+ flush to disk.
+ """
+ raise errors.TransportNotPossible(
+ "%s cannot fdatasync" % (self.transport,))
+
+
+class FileFileStream(FileStream):
+ """A file stream object returned by open_write_stream.
+
+ This version uses a file like object to perform writes.
+ """
+
+ def __init__(self, transport, relpath, file_handle):
+ FileStream.__init__(self, transport, relpath)
+ self.file_handle = file_handle
+
+ def _close(self):
+ self.file_handle.close()
+
+ def fdatasync(self):
+ """Force data out to physical disk if possible."""
+ self.file_handle.flush()
+ try:
+ fileno = self.file_handle.fileno()
+ except AttributeError:
+ raise errors.TransportNotPossible()
+ osutils.fdatasync(fileno)
+
+ def write(self, bytes):
+ osutils.pump_string_file(bytes, self.file_handle)
+
+
+class AppendBasedFileStream(FileStream):
+ """A file stream object returned by open_write_stream.
+
+ This version uses append on a transport to perform writes.
+ """
+
+ def write(self, bytes):
+ self.transport.append_bytes(self.relpath, bytes)
+
+
+class TransportHooks(hooks.Hooks):
+ """Mapping of hook names to registered callbacks for transport hooks"""
+ def __init__(self):
+ super(TransportHooks, self).__init__()
+ self.add_hook("post_connect",
+ "Called after a new connection is established or a reconnect "
+ "occurs. The sole argument passed is either the connected "
+ "transport or smart medium instance.", (2, 5))
+
+
+class Transport(object):
+ """This class encapsulates methods for retrieving or putting a file
+ from/to a storage location.
+
+ Most functions have a _multi variant, which allows you to queue up
+ multiple requests. They generally have a dumb base implementation
+ which just iterates over the arguments, but smart Transport
+ implementations can do pipelining.
+ In general implementations should support having a generator or a list
+ as an argument (ie always iterate, never index)
+
+ :ivar base: Base URL for the transport; should always end in a slash.
+ """
+
+ # implementations can override this if it is more efficient
+ # for them to combine larger read chunks together
+ _max_readv_combine = 50
+ # It is better to read this much more data in order, rather
+ # than doing another seek. Even for the local filesystem,
+ # there is a benefit in just reading.
+ # TODO: jam 20060714 Do some real benchmarking to figure out
+ # where the biggest benefit between combining reads and
+ # and seeking is. Consider a runtime auto-tune.
+ _bytes_to_read_before_seek = 0
+
+ hooks = TransportHooks()
+
+ def __init__(self, base):
+ super(Transport, self).__init__()
+ self.base = base
+ (self._raw_base, self._segment_parameters) = (
+ urlutils.split_segment_parameters(base))
+
+ def _translate_error(self, e, path, raise_generic=True):
+ """Translate an IOError or OSError into an appropriate bzr error.
+
+ This handles things like ENOENT, ENOTDIR, EEXIST, and EACCESS
+ """
+ if getattr(e, 'errno', None) is not None:
+ if e.errno in (errno.ENOENT, errno.ENOTDIR):
+ raise errors.NoSuchFile(path, extra=e)
+ elif e.errno == errno.EINVAL:
+ mutter("EINVAL returned on path %s: %r" % (path, e))
+ raise errors.NoSuchFile(path, extra=e)
+ # I would rather use errno.EFOO, but there doesn't seem to be
+ # any matching for 267
+ # This is the error when doing a listdir on a file:
+ # WindowsError: [Errno 267] The directory name is invalid
+ if sys.platform == 'win32' and e.errno in (errno.ESRCH, 267):
+ raise errors.NoSuchFile(path, extra=e)
+ if e.errno == errno.EEXIST:
+ raise errors.FileExists(path, extra=e)
+ if e.errno == errno.EACCES:
+ raise errors.PermissionDenied(path, extra=e)
+ if e.errno == errno.ENOTEMPTY:
+ raise errors.DirectoryNotEmpty(path, extra=e)
+ if e.errno == errno.EBUSY:
+ raise errors.ResourceBusy(path, extra=e)
+ if raise_generic:
+ raise errors.TransportError(orig_error=e)
+
+ def clone(self, offset=None):
+ """Return a new Transport object, cloned from the current location,
+ using a subdirectory or parent directory. This allows connections
+ to be pooled, rather than a new one needed for each subdir.
+ """
+ raise NotImplementedError(self.clone)
+
+ def create_prefix(self, mode=None):
+ """Create all the directories leading down to self.base."""
+ cur_transport = self
+ needed = [cur_transport]
+ # Recurse upwards until we can create a directory successfully
+ while True:
+ new_transport = cur_transport.clone('..')
+ if new_transport.base == cur_transport.base:
+ raise errors.BzrCommandError(
+ "Failed to create path prefix for %s."
+ % cur_transport.base)
+ try:
+ new_transport.mkdir('.', mode=mode)
+ except errors.NoSuchFile:
+ needed.append(new_transport)
+ cur_transport = new_transport
+ except errors.FileExists:
+ break
+ else:
+ break
+ # Now we only need to create child directories
+ while needed:
+ cur_transport = needed.pop()
+ cur_transport.ensure_base(mode=mode)
+
+ def ensure_base(self, mode=None):
+ """Ensure that the directory this transport references exists.
+
+ This will create a directory if it doesn't exist.
+ :return: True if the directory was created, False otherwise.
+ """
+ # The default implementation just uses "Easier to ask for forgiveness
+ # than permission". We attempt to create the directory, and just
+ # suppress FileExists and PermissionDenied (for Windows) exceptions.
+ try:
+ self.mkdir('.', mode=mode)
+ except (errors.FileExists, errors.PermissionDenied):
+ return False
+ else:
+ return True
+
+ def external_url(self):
+ """Return a URL for self that can be given to an external process.
+
+ There is no guarantee that the URL can be accessed from a different
+ machine - e.g. file:/// urls are only usable on the local machine,
+ sftp:/// urls when the server is only bound to localhost are only
+ usable from localhost etc.
+
+ NOTE: This method may remove security wrappers (e.g. on chroot
+ transports) and thus should *only* be used when the result will not
+ be used to obtain a new transport within bzrlib. Ideally chroot
+ transports would know enough to cause the external url to be the exact
+ one used that caused the chrooting in the first place, but that is not
+ currently the case.
+
+ :return: A URL that can be given to another process.
+ :raises InProcessTransport: If the transport is one that cannot be
+ accessed out of the current process (e.g. a MemoryTransport)
+ then InProcessTransport is raised.
+ """
+ raise NotImplementedError(self.external_url)
+
+ def get_segment_parameters(self):
+ """Return the segment parameters for the top segment of the URL.
+ """
+ return self._segment_parameters
+
+ def set_segment_parameter(self, name, value):
+ """Set a segment parameter.
+
+ :param name: Segment parameter name (urlencoded string)
+ :param value: Segment parameter value (urlencoded string)
+ """
+ if value is None:
+ try:
+ del self._segment_parameters[name]
+ except KeyError:
+ pass
+ else:
+ self._segment_parameters[name] = value
+ self.base = urlutils.join_segment_parameters(
+ self._raw_base, self._segment_parameters)
+
+ def _pump(self, from_file, to_file):
+ """Most children will need to copy from one file-like
+ object or string to another one.
+ This just gives them something easy to call.
+ """
+ return osutils.pumpfile(from_file, to_file)
+
+ def _get_total(self, multi):
+ """Try to figure out how many entries are in multi,
+ but if not possible, return None.
+ """
+ try:
+ return len(multi)
+ except TypeError: # We can't tell how many, because relpaths is a generator
+ return None
+
+ def _report_activity(self, bytes, direction):
+ """Notify that this transport has activity.
+
+ Implementations should call this from all methods that actually do IO.
+ Be careful that it's not called twice, if one method is implemented on
+ top of another.
+
+ :param bytes: Number of bytes read or written.
+ :param direction: 'read' or 'write' or None.
+ """
+ ui.ui_factory.report_transport_activity(self, bytes, direction)
+
+ def _update_pb(self, pb, msg, count, total):
+ """Update the progress bar based on the current count
+ and total available, total may be None if it was
+ not possible to determine.
+ """
+ if pb is None:
+ return
+ if total is None:
+ pb.update(msg, count, count+1)
+ else:
+ pb.update(msg, count, total)
+
+ def _iterate_over(self, multi, func, pb, msg, expand=True):
+ """Iterate over all entries in multi, passing them to func,
+ and update the progress bar as you go along.
+
+ :param expand: If True, the entries will be passed to the function
+ by expanding the tuple. If False, it will be passed
+ as a single parameter.
+ """
+ total = self._get_total(multi)
+ result = []
+ count = 0
+ for entry in multi:
+ self._update_pb(pb, msg, count, total)
+ if expand:
+ result.append(func(*entry))
+ else:
+ result.append(func(entry))
+ count += 1
+ return tuple(result)
+
+ def abspath(self, relpath):
+ """Return the full url to the given relative path.
+
+ :param relpath: a string of a relative path
+ """
+
+ # XXX: Robert Collins 20051016 - is this really needed in the public
+ # interface ?
+ raise NotImplementedError(self.abspath)
+
+ def recommended_page_size(self):
+ """Return the recommended page size for this transport.
+
+ This is potentially different for every path in a given namespace.
+ For example, local transports might use an operating system call to
+ get the block size for a given path, which can vary due to mount
+ points.
+
+ :return: The page size in bytes.
+ """
+ return 4 * 1024
+
+ def relpath(self, abspath):
+ """Return the local path portion from a given absolute path.
+
+ This default implementation is not suitable for filesystems with
+ aliasing, such as that given by symlinks, where a path may not
+ start with our base, but still be a relpath once aliasing is
+ resolved.
+ """
+ # TODO: This might want to use bzrlib.osutils.relpath
+ # but we have to watch out because of the prefix issues
+ if not (abspath == self.base[:-1] or abspath.startswith(self.base)):
+ raise errors.PathNotChild(abspath, self.base)
+ pl = len(self.base)
+ return abspath[pl:].strip('/')
+
+ def local_abspath(self, relpath):
+ """Return the absolute path on the local filesystem.
+
+ This function will only be defined for Transports which have a
+ physical local filesystem representation.
+
+ :raises errors.NotLocalUrl: When no local path representation is
+ available.
+ """
+ raise errors.NotLocalUrl(self.abspath(relpath))
+
+ def has(self, relpath):
+ """Does the file relpath exist?
+
+ Note that some transports MAY allow querying on directories, but this
+ is not part of the protocol. In other words, the results of
+ t.has("a_directory_name") are undefined.
+
+ :rtype: bool
+ """
+ raise NotImplementedError(self.has)
+
+ def has_multi(self, relpaths, pb=None):
+ """Return True/False for each entry in relpaths"""
+ total = self._get_total(relpaths)
+ count = 0
+ for relpath in relpaths:
+ self._update_pb(pb, 'has', count, total)
+ yield self.has(relpath)
+ count += 1
+
+ def has_any(self, relpaths):
+ """Return True if any of the paths exist."""
+ for relpath in relpaths:
+ if self.has(relpath):
+ return True
+ return False
+
+ def iter_files_recursive(self):
+ """Iter the relative paths of files in the transports sub-tree.
+
+ *NOTE*: This only lists *files*, not subdirectories!
+
+ As with other listing functions, only some transports implement this,.
+ you may check via listable() to determine if it will.
+ """
+ raise errors.TransportNotPossible("This transport has not "
+ "implemented iter_files_recursive "
+ "(but must claim to be listable "
+ "to trigger this error).")
+
+ def get(self, relpath):
+ """Get the file at the given relative path.
+
+ This may fail in a number of ways:
+ - HTTP servers may return content for a directory. (unexpected
+ content failure)
+ - FTP servers may indicate NoSuchFile for a directory.
+ - SFTP servers may give a file handle for a directory that will
+ fail on read().
+
+ For correct use of the interface, be sure to catch errors.PathError
+ when calling it and catch errors.ReadError when reading from the
+ returned object.
+
+ :param relpath: The relative path to the file
+ :rtype: File-like object.
+ """
+ raise NotImplementedError(self.get)
+
+ def get_bytes(self, relpath):
+ """Get a raw string of the bytes for a file at the given location.
+
+ :param relpath: The relative path to the file
+ """
+ f = self.get(relpath)
+ try:
+ return f.read()
+ finally:
+ f.close()
+
+ def get_smart_medium(self):
+ """Return a smart client medium for this transport if possible.
+
+ A smart medium doesn't imply the presence of a smart server: it implies
+ that the smart protocol can be tunnelled via this transport.
+
+ :raises NoSmartMedium: if no smart server medium is available.
+ """
+ raise errors.NoSmartMedium(self)
+
+ def readv(self, relpath, offsets, adjust_for_latency=False,
+ upper_limit=None):
+ """Get parts of the file at the given relative path.
+
+ :param relpath: The path to read data from.
+ :param offsets: A list of (offset, size) tuples.
+ :param adjust_for_latency: Adjust the requested offsets to accomodate
+ transport latency. This may re-order the offsets, expand them to
+ grab adjacent data when there is likely a high cost to requesting
+ data relative to delivering it.
+ :param upper_limit: When adjust_for_latency is True setting upper_limit
+ allows the caller to tell the transport about the length of the
+ file, so that requests are not issued for ranges beyond the end of
+ the file. This matters because some servers and/or transports error
+ in such a case rather than just satisfying the available ranges.
+ upper_limit should always be provided when adjust_for_latency is
+ True, and should be the size of the file in bytes.
+ :return: A list or generator of (offset, data) tuples
+ """
+ if adjust_for_latency:
+ # Design note: We may wish to have different algorithms for the
+ # expansion of the offsets per-transport. E.g. for local disk to
+ # use page-aligned expansion. If that is the case consider the
+ # following structure:
+ # - a test that transport.readv uses self._offset_expander or some
+ # similar attribute, to do the expansion
+ # - a test for each transport that it has some known-good offset
+ # expander
+ # - unit tests for each offset expander
+ # - a set of tests for the offset expander interface, giving
+ # baseline behaviour (which the current transport
+ # adjust_for_latency tests could be repurposed to).
+ offsets = self._sort_expand_and_combine(offsets, upper_limit)
+ return self._readv(relpath, offsets)
+
+ def _readv(self, relpath, offsets):
+ """Get parts of the file at the given relative path.
+
+ :param relpath: The path to read.
+ :param offsets: A list of (offset, size) tuples.
+ :return: A list or generator of (offset, data) tuples
+ """
+ if not offsets:
+ return
+
+ fp = self.get(relpath)
+ return self._seek_and_read(fp, offsets, relpath)
+
+ def _seek_and_read(self, fp, offsets, relpath='<unknown>'):
+ """An implementation of readv that uses fp.seek and fp.read.
+
+ This uses _coalesce_offsets to issue larger reads and fewer seeks.
+
+ :param fp: A file-like object that supports seek() and read(size).
+ Note that implementations are allowed to call .close() on this file
+ handle, so don't trust that you can use it for other work.
+ :param offsets: A list of offsets to be read from the given file.
+ :return: yield (pos, data) tuples for each request
+ """
+ # We are going to iterate multiple times, we need a list
+ offsets = list(offsets)
+ sorted_offsets = sorted(offsets)
+
+ # turn the list of offsets into a stack
+ offset_stack = iter(offsets)
+ cur_offset_and_size = offset_stack.next()
+ coalesced = self._coalesce_offsets(sorted_offsets,
+ limit=self._max_readv_combine,
+ fudge_factor=self._bytes_to_read_before_seek)
+
+ # Cache the results, but only until they have been fulfilled
+ data_map = {}
+ try:
+ for c_offset in coalesced:
+ # TODO: jam 20060724 it might be faster to not issue seek if
+ # we are already at the right location. This should be
+ # benchmarked.
+ fp.seek(c_offset.start)
+ data = fp.read(c_offset.length)
+ if len(data) < c_offset.length:
+ raise errors.ShortReadvError(relpath, c_offset.start,
+ c_offset.length, actual=len(data))
+ for suboffset, subsize in c_offset.ranges:
+ key = (c_offset.start+suboffset, subsize)
+ data_map[key] = data[suboffset:suboffset+subsize]
+
+ # Now that we've read some data, see if we can yield anything back
+ while cur_offset_and_size in data_map:
+ this_data = data_map.pop(cur_offset_and_size)
+ this_offset = cur_offset_and_size[0]
+ try:
+ cur_offset_and_size = offset_stack.next()
+ except StopIteration:
+ fp.close()
+ cur_offset_and_size = None
+ yield this_offset, this_data
+ finally:
+ fp.close()
+
+ def _sort_expand_and_combine(self, offsets, upper_limit):
+ """Helper for readv.
+
+ :param offsets: A readv vector - (offset, length) tuples.
+ :param upper_limit: The highest byte offset that may be requested.
+ :return: A readv vector that will read all the regions requested by
+ offsets, in start-to-end order, with no duplicated regions,
+ expanded by the transports recommended page size.
+ """
+ offsets = sorted(offsets)
+ # short circuit empty requests
+ if len(offsets) == 0:
+ def empty_yielder():
+ # Quick thunk to stop this function becoming a generator
+ # itself, rather we return a generator that has nothing to
+ # yield.
+ if False:
+ yield None
+ return empty_yielder()
+ # expand by page size at either end
+ maximum_expansion = self.recommended_page_size()
+ new_offsets = []
+ for offset, length in offsets:
+ expansion = maximum_expansion - length
+ if expansion < 0:
+ # we're asking for more than the minimum read anyway.
+ expansion = 0
+ reduction = expansion / 2
+ new_offset = offset - reduction
+ new_length = length + expansion
+ if new_offset < 0:
+ # don't ask for anything < 0
+ new_offset = 0
+ if (upper_limit is not None and
+ new_offset + new_length > upper_limit):
+ new_length = upper_limit - new_offset
+ new_offsets.append((new_offset, new_length))
+ # combine the expanded offsets
+ offsets = []
+ current_offset, current_length = new_offsets[0]
+ current_finish = current_length + current_offset
+ for offset, length in new_offsets[1:]:
+ finish = offset + length
+ if offset > current_finish:
+ # there is a gap, output the current accumulator and start
+ # a new one for the region we're examining.
+ offsets.append((current_offset, current_length))
+ current_offset = offset
+ current_length = length
+ current_finish = finish
+ continue
+ if finish > current_finish:
+ # extend the current accumulator to the end of the region
+ # we're examining.
+ current_finish = finish
+ current_length = finish - current_offset
+ offsets.append((current_offset, current_length))
+ return offsets
+
+ @staticmethod
+ def _coalesce_offsets(offsets, limit=0, fudge_factor=0, max_size=0):
+ """Yield coalesced offsets.
+
+ With a long list of neighboring requests, combine them
+ into a single large request, while retaining the original
+ offsets.
+ Turns [(15, 10), (25, 10)] => [(15, 20, [(0, 10), (10, 10)])]
+ Note that overlapping requests are not permitted. (So [(15, 10), (20,
+ 10)] will raise a ValueError.) This is because the data we access never
+ overlaps, and it allows callers to trust that we only need any byte of
+ data for 1 request (so nothing needs to be buffered to fulfill a second
+ request.)
+
+ :param offsets: A list of (start, length) pairs
+ :param limit: Only combine a maximum of this many pairs Some transports
+ penalize multiple reads more than others, and sometimes it is
+ better to return early.
+ 0 means no limit
+ :param fudge_factor: All transports have some level of 'it is
+ better to read some more data and throw it away rather
+ than seek', so collapse if we are 'close enough'
+ :param max_size: Create coalesced offsets no bigger than this size.
+ When a single offset is bigger than 'max_size', it will keep
+ its size and be alone in the coalesced offset.
+ 0 means no maximum size.
+ :return: return a list of _CoalescedOffset objects, which have members
+ for where to start, how much to read, and how to split those chunks
+ back up
+ """
+ last_end = None
+ cur = _CoalescedOffset(None, None, [])
+ coalesced_offsets = []
+
+ if max_size <= 0:
+ # 'unlimited', but we actually take this to mean 100MB buffer limit
+ max_size = 100*1024*1024
+
+ for start, size in offsets:
+ end = start + size
+ if (last_end is not None
+ and start <= last_end + fudge_factor
+ and start >= cur.start
+ and (limit <= 0 or len(cur.ranges) < limit)
+ and (max_size <= 0 or end - cur.start <= max_size)):
+ if start < last_end:
+ raise ValueError('Overlapping range not allowed:'
+ ' last range ended at %s, new one starts at %s'
+ % (last_end, start))
+ cur.length = end - cur.start
+ cur.ranges.append((start-cur.start, size))
+ else:
+ if cur.start is not None:
+ coalesced_offsets.append(cur)
+ cur = _CoalescedOffset(start, size, [(0, size)])
+ last_end = end
+
+ if cur.start is not None:
+ coalesced_offsets.append(cur)
+ return coalesced_offsets
+
+ def get_multi(self, relpaths, pb=None):
+ """Get a list of file-like objects, one for each entry in relpaths.
+
+ :param relpaths: A list of relative paths.
+ :param pb: An optional ProgressTask for indicating percent done.
+ :return: A list or generator of file-like objects
+ """
+ # TODO: Consider having this actually buffer the requests,
+ # in the default mode, it probably won't give worse performance,
+ # and all children wouldn't have to implement buffering
+ total = self._get_total(relpaths)
+ count = 0
+ for relpath in relpaths:
+ self._update_pb(pb, 'get', count, total)
+ yield self.get(relpath)
+ count += 1
+
+ def put_bytes(self, relpath, bytes, mode=None):
+ """Atomically put the supplied bytes into the given location.
+
+ :param relpath: The location to put the contents, relative to the
+ transport base.
+ :param bytes: A bytestring of data.
+ :param mode: Create the file with the given mode.
+ :return: None
+ """
+ if not isinstance(bytes, str):
+ raise AssertionError(
+ 'bytes must be a plain string, not %s' % type(bytes))
+ return self.put_file(relpath, StringIO(bytes), mode=mode)
+
+ def put_bytes_non_atomic(self, relpath, bytes, mode=None,
+ create_parent_dir=False,
+ dir_mode=None):
+ """Copy the string into the target location.
+
+ This function is not strictly safe to use. See
+ Transport.put_bytes_non_atomic for more information.
+
+ :param relpath: The remote location to put the contents.
+ :param bytes: A string object containing the raw bytes to write into
+ the target file.
+ :param mode: Possible access permissions for new file.
+ None means do not set remote permissions.
+ :param create_parent_dir: If we cannot create the target file because
+ the parent directory does not exist, go ahead and
+ create it, and then try again.
+ :param dir_mode: Possible access permissions for new directories.
+ """
+ if not isinstance(bytes, str):
+ raise AssertionError(
+ 'bytes must be a plain string, not %s' % type(bytes))
+ self.put_file_non_atomic(relpath, StringIO(bytes), mode=mode,
+ create_parent_dir=create_parent_dir,
+ dir_mode=dir_mode)
+
+ def put_file(self, relpath, f, mode=None):
+ """Copy the file-like object into the location.
+
+ :param relpath: Location to put the contents, relative to base.
+ :param f: File-like object.
+ :param mode: The mode for the newly created file,
+ None means just use the default.
+ :return: The length of the file that was written.
+ """
+ # We would like to mark this as NotImplemented, but most likely
+ # transports have defined it in terms of the old api.
+ symbol_versioning.warn('Transport %s should implement put_file,'
+ ' rather than implementing put() as of'
+ ' version 0.11.'
+ % (self.__class__.__name__,),
+ DeprecationWarning)
+ return self.put(relpath, f, mode=mode)
+ #raise NotImplementedError(self.put_file)
+
+ def put_file_non_atomic(self, relpath, f, mode=None,
+ create_parent_dir=False,
+ dir_mode=None):
+ """Copy the file-like object into the target location.
+
+ This function is not strictly safe to use. It is only meant to
+ be used when you already know that the target does not exist.
+ It is not safe, because it will open and truncate the remote
+ file. So there may be a time when the file has invalid contents.
+
+ :param relpath: The remote location to put the contents.
+ :param f: File-like object.
+ :param mode: Possible access permissions for new file.
+ None means do not set remote permissions.
+ :param create_parent_dir: If we cannot create the target file because
+ the parent directory does not exist, go ahead and
+ create it, and then try again.
+ :param dir_mode: Possible access permissions for new directories.
+ """
+ # Default implementation just does an atomic put.
+ try:
+ return self.put_file(relpath, f, mode=mode)
+ except errors.NoSuchFile:
+ if not create_parent_dir:
+ raise
+ parent_dir = osutils.dirname(relpath)
+ if parent_dir:
+ self.mkdir(parent_dir, mode=dir_mode)
+ return self.put_file(relpath, f, mode=mode)
+
+ def mkdir(self, relpath, mode=None):
+ """Create a directory at the given path."""
+ raise NotImplementedError(self.mkdir)
+
+ def mkdir_multi(self, relpaths, mode=None, pb=None):
+ """Create a group of directories"""
+ def mkdir(path):
+ self.mkdir(path, mode=mode)
+ return len(self._iterate_over(relpaths, mkdir, pb, 'mkdir', expand=False))
+
+ def open_write_stream(self, relpath, mode=None):
+ """Open a writable file stream at relpath.
+
+ A file stream is a file like object with a write() method that accepts
+ bytes to write.. Buffering may occur internally until the stream is
+ closed with stream.close(). Calls to readv or the get_* methods will
+ be synchronised with any internal buffering that may be present.
+
+ :param relpath: The relative path to the file.
+ :param mode: The mode for the newly created file,
+ None means just use the default
+ :return: A FileStream. FileStream objects have two methods, write() and
+ close(). There is no guarantee that data is committed to the file
+ if close() has not been called (even if get() is called on the same
+ path).
+ """
+ raise NotImplementedError(self.open_write_stream)
+
+ def append_file(self, relpath, f, mode=None):
+ """Append bytes from a file-like object to a file at relpath.
+
+ The file is created if it does not already exist.
+
+ :param f: a file-like object of the bytes to append.
+ :param mode: Unix mode for newly created files. This is not used for
+ existing files.
+
+ :returns: the length of relpath before the content was written to it.
+ """
+ symbol_versioning.warn('Transport %s should implement append_file,'
+ ' rather than implementing append() as of'
+ ' version 0.11.'
+ % (self.__class__.__name__,),
+ DeprecationWarning)
+ return self.append(relpath, f, mode=mode)
+
+ def append_bytes(self, relpath, bytes, mode=None):
+ """Append bytes to a file at relpath.
+
+ The file is created if it does not already exist.
+
+ :type f: str
+ :param f: a string of the bytes to append.
+ :param mode: Unix mode for newly created files. This is not used for
+ existing files.
+
+ :returns: the length of relpath before the content was written to it.
+ """
+ if not isinstance(bytes, str):
+ raise TypeError(
+ 'bytes must be a plain string, not %s' % type(bytes))
+ return self.append_file(relpath, StringIO(bytes), mode=mode)
+
+ def append_multi(self, files, pb=None):
+ """Append the text in each file-like or string object to
+ the supplied location.
+
+ :param files: A set of (path, f) entries
+ :param pb: An optional ProgressTask for indicating percent done.
+ """
+ return self._iterate_over(files, self.append_file, pb, 'append', expand=True)
+
+ def copy(self, rel_from, rel_to):
+ """Copy the item at rel_from to the location at rel_to.
+
+ Override this for efficiency if a specific transport can do it
+ faster than this default implementation.
+ """
+ self.put_file(rel_to, self.get(rel_from))
+
+ def copy_multi(self, relpaths, pb=None):
+ """Copy a bunch of entries.
+
+ :param relpaths: A list of tuples of the form [(from, to), (from, to),...]
+ """
+ # This is the non-pipelined implementation, so that
+ # implementors don't have to implement everything.
+ return self._iterate_over(relpaths, self.copy, pb, 'copy', expand=True)
+
+ def copy_to(self, relpaths, other, mode=None, pb=None):
+ """Copy a set of entries from self into another Transport.
+
+ :param relpaths: A list/generator of entries to be copied.
+ :param mode: This is the target mode for the newly created files
+ TODO: This interface needs to be updated so that the target location
+ can be different from the source location.
+ """
+ # The dummy implementation just does a simple get + put
+ def copy_entry(path):
+ other.put_file(path, self.get(path), mode=mode)
+
+ return len(self._iterate_over(relpaths, copy_entry, pb, 'copy_to', expand=False))
+
+ def copy_tree(self, from_relpath, to_relpath):
+ """Copy a subtree from one relpath to another.
+
+ If a faster implementation is available, specific transports should
+ implement it.
+ """
+ source = self.clone(from_relpath)
+ target = self.clone(to_relpath)
+
+ # create target directory with the same rwx bits as source.
+ # use mask to ensure that bits other than rwx are ignored.
+ stat = self.stat(from_relpath)
+ target.mkdir('.', stat.st_mode & 0777)
+ source.copy_tree_to_transport(target)
+
+ def copy_tree_to_transport(self, to_transport):
+ """Copy a subtree from one transport to another.
+
+ self.base is used as the source tree root, and to_transport.base
+ is used as the target. to_transport.base must exist (and be a
+ directory).
+ """
+ files = []
+ directories = ['.']
+ while directories:
+ dir = directories.pop()
+ if dir != '.':
+ to_transport.mkdir(dir)
+ for path in self.list_dir(dir):
+ path = dir + '/' + path
+ stat = self.stat(path)
+ if S_ISDIR(stat.st_mode):
+ directories.append(path)
+ else:
+ files.append(path)
+ self.copy_to(files, to_transport)
+
+ def rename(self, rel_from, rel_to):
+ """Rename a file or directory.
+
+ This *must* fail if the destination is a nonempty directory - it must
+ not automatically remove it. It should raise DirectoryNotEmpty, or
+ some other PathError if the case can't be specifically detected.
+
+ If the destination is an empty directory or a file this function may
+ either fail or succeed, depending on the underlying transport. It
+ should not attempt to remove the destination if overwriting is not the
+ native transport behaviour. If at all possible the transport should
+ ensure that the rename either completes or not, without leaving the
+ destination deleted and the new file not moved in place.
+
+ This is intended mainly for use in implementing LockDir.
+ """
+ # transports may need to override this
+ raise NotImplementedError(self.rename)
+
+ def move(self, rel_from, rel_to):
+ """Move the item at rel_from to the location at rel_to.
+
+ The destination is deleted if possible, even if it's a non-empty
+ directory tree.
+
+ If a transport can directly implement this it is suggested that
+ it do so for efficiency.
+ """
+ if S_ISDIR(self.stat(rel_from).st_mode):
+ self.copy_tree(rel_from, rel_to)
+ self.delete_tree(rel_from)
+ else:
+ self.copy(rel_from, rel_to)
+ self.delete(rel_from)
+
+ def move_multi(self, relpaths, pb=None):
+ """Move a bunch of entries.
+
+ :param relpaths: A list of tuples of the form [(from1, to1), (from2, to2),...]
+ """
+ return self._iterate_over(relpaths, self.move, pb, 'move', expand=True)
+
+ def move_multi_to(self, relpaths, rel_to):
+ """Move a bunch of entries to a single location.
+ This differs from move_multi in that you give a list of from, and
+ a single destination, rather than multiple destinations.
+
+ :param relpaths: A list of relative paths [from1, from2, from3, ...]
+ :param rel_to: A directory where each entry should be placed.
+ """
+ # This is not implemented, because you need to do special tricks to
+ # extract the basename, and add it to rel_to
+ raise NotImplementedError(self.move_multi_to)
+
+ def delete(self, relpath):
+ """Delete the item at relpath"""
+ raise NotImplementedError(self.delete)
+
+ def delete_multi(self, relpaths, pb=None):
+ """Queue up a bunch of deletes to be done.
+ """
+ return self._iterate_over(relpaths, self.delete, pb, 'delete', expand=False)
+
+ def delete_tree(self, relpath):
+ """Delete an entire tree. This may require a listable transport."""
+ subtree = self.clone(relpath)
+ files = []
+ directories = ['.']
+ pending_rmdirs = []
+ while directories:
+ dir = directories.pop()
+ if dir != '.':
+ pending_rmdirs.append(dir)
+ for path in subtree.list_dir(dir):
+ path = dir + '/' + path
+ stat = subtree.stat(path)
+ if S_ISDIR(stat.st_mode):
+ directories.append(path)
+ else:
+ files.append(path)
+ subtree.delete_multi(files)
+ pending_rmdirs.reverse()
+ for dir in pending_rmdirs:
+ subtree.rmdir(dir)
+ self.rmdir(relpath)
+
+ def __repr__(self):
+ return "<%s.%s url=%s>" % (self.__module__, self.__class__.__name__, self.base)
+
+ def stat(self, relpath):
+ """Return the stat information for a file.
+ WARNING: This may not be implementable for all protocols, so use
+ sparingly.
+ NOTE: This returns an object with fields such as 'st_size'. It MAY
+ or MAY NOT return the literal result of an os.stat() call, so all
+ access should be via named fields.
+ ALSO NOTE: Stats of directories may not be supported on some
+ transports.
+ """
+ raise NotImplementedError(self.stat)
+
+ def rmdir(self, relpath):
+ """Remove a directory at the given path."""
+ raise NotImplementedError
+
+ def stat_multi(self, relpaths, pb=None):
+ """Stat multiple files and return the information.
+ """
+ #TODO: Is it worth making this a generator instead of a
+ # returning a list?
+ stats = []
+ def gather(path):
+ stats.append(self.stat(path))
+
+ count = self._iterate_over(relpaths, gather, pb, 'stat', expand=False)
+ return stats
+
+ def readlink(self, relpath):
+ """Return a string representing the path to which the symbolic link points."""
+ raise errors.TransportNotPossible("Dereferencing symlinks is not supported on %s" % self)
+
+ def hardlink(self, source, link_name):
+ """Create a hardlink pointing to source named link_name."""
+ raise errors.TransportNotPossible("Hard links are not supported on %s" % self)
+
+ def symlink(self, source, link_name):
+ """Create a symlink pointing to source named link_name."""
+ raise errors.TransportNotPossible("Symlinks are not supported on %s" % self)
+
+ def listable(self):
+ """Return True if this store supports listing."""
+ raise NotImplementedError(self.listable)
+
+ def list_dir(self, relpath):
+ """Return a list of all files at the given location.
+ WARNING: many transports do not support this, so trying avoid using
+ it if at all possible.
+ """
+ raise errors.TransportNotPossible("Transport %r has not "
+ "implemented list_dir "
+ "(but must claim to be listable "
+ "to trigger this error)."
+ % (self))
+
+ def lock_read(self, relpath):
+ """Lock the given file for shared (read) access.
+
+ WARNING: many transports do not support this, so trying avoid using it.
+ These methods may be removed in the future.
+
+ Transports may raise TransportNotPossible if OS-level locks cannot be
+ taken over this transport.
+
+ :return: A lock object, which should contain an unlock() function.
+ """
+ raise errors.TransportNotPossible("transport locks not supported on %s" % self)
+
+ def lock_write(self, relpath):
+ """Lock the given file for exclusive (write) access.
+
+ WARNING: many transports do not support this, so trying avoid using it.
+ These methods may be removed in the future.
+
+ Transports may raise TransportNotPossible if OS-level locks cannot be
+ taken over this transport.
+
+ :return: A lock object, which should contain an unlock() function.
+ """
+ raise errors.TransportNotPossible("transport locks not supported on %s" % self)
+
+ def is_readonly(self):
+ """Return true if this connection cannot be written to."""
+ return False
+
+ def _can_roundtrip_unix_modebits(self):
+ """Return true if this transport can store and retrieve unix modebits.
+
+ (For example, 0700 to make a directory owner-private.)
+
+ Note: most callers will not want to switch on this, but should rather
+ just try and set permissions and let them be either stored or not.
+ This is intended mainly for the use of the test suite.
+
+ Warning: this is not guaranteed to be accurate as sometimes we can't
+ be sure: for example with vfat mounted on unix, or a windows sftp
+ server."""
+ # TODO: Perhaps return a e.g. TransportCharacteristics that can answer
+ # several questions about the transport.
+ return False
+
+ def _reuse_for(self, other_base):
+ # This is really needed for ConnectedTransport only, but it's easier to
+ # have Transport refuses to be reused than testing that the reuse
+ # should be asked to ConnectedTransport only.
+ return None
+
+ def disconnect(self):
+ # This is really needed for ConnectedTransport only, but it's easier to
+ # have Transport do nothing than testing that the disconnect should be
+ # asked to ConnectedTransport only.
+ pass
+
+ def _redirected_to(self, source, target):
+ """Returns a transport suitable to re-issue a redirected request.
+
+ :param source: The source url as returned by the server.
+ :param target: The target url as returned by the server.
+
+ The redirection can be handled only if the relpath involved is not
+ renamed by the redirection.
+
+ :returns: A transport or None.
+ """
+ # This returns None by default, meaning the transport can't handle the
+ # redirection.
+ return None
+
+
+
+class _SharedConnection(object):
+ """A connection shared between several transports."""
+
+ def __init__(self, connection=None, credentials=None, base=None):
+ """Constructor.
+
+ :param connection: An opaque object specific to each transport.
+
+ :param credentials: An opaque object containing the credentials used to
+ create the connection.
+ """
+ self.connection = connection
+ self.credentials = credentials
+ self.base = base
+
+
+class ConnectedTransport(Transport):
+ """A transport connected to a remote server.
+
+ This class provide the basis to implement transports that need to connect
+ to a remote server.
+
+ Host and credentials are available as private attributes, cloning preserves
+ them and share the underlying, protocol specific, connection.
+ """
+
+ def __init__(self, base, _from_transport=None):
+ """Constructor.
+
+ The caller should ensure that _from_transport points at the same host
+ as the new base.
+
+ :param base: transport root URL
+
+ :param _from_transport: optional transport to build from. The built
+ transport will share the connection with this transport.
+ """
+ if not base.endswith('/'):
+ base += '/'
+ self._parsed_url = self._split_url(base)
+ if _from_transport is not None:
+ # Copy the password as it does not appear in base and will be lost
+ # otherwise. It can appear in the _split_url above if the user
+ # provided it on the command line. Otherwise, daughter classes will
+ # prompt the user for one when appropriate.
+ self._parsed_url.password = _from_transport._parsed_url.password
+ self._parsed_url.quoted_password = (
+ _from_transport._parsed_url.quoted_password)
+
+ base = str(self._parsed_url)
+
+ super(ConnectedTransport, self).__init__(base)
+ if _from_transport is None:
+ self._shared_connection = _SharedConnection()
+ else:
+ self._shared_connection = _from_transport._shared_connection
+
+ @property
+ def _user(self):
+ return self._parsed_url.user
+
+ @property
+ def _password(self):
+ return self._parsed_url.password
+
+ @property
+ def _host(self):
+ return self._parsed_url.host
+
+ @property
+ def _port(self):
+ return self._parsed_url.port
+
+ @property
+ def _path(self):
+ return self._parsed_url.path
+
+ @property
+ def _scheme(self):
+ return self._parsed_url.scheme
+
+ def clone(self, offset=None):
+ """Return a new transport with root at self.base + offset
+
+ We leave the daughter classes take advantage of the hint
+ that it's a cloning not a raw creation.
+ """
+ if offset is None:
+ return self.__class__(self.base, _from_transport=self)
+ else:
+ return self.__class__(self.abspath(offset), _from_transport=self)
+
+ @staticmethod
+ def _split_url(url):
+ return urlutils.URL.from_string(url)
+
+ @staticmethod
+ def _unsplit_url(scheme, user, password, host, port, path):
+ """Build the full URL for the given already URL encoded path.
+
+ user, password, host and path will be quoted if they contain reserved
+ chars.
+
+ :param scheme: protocol
+ :param user: login
+ :param password: associated password
+ :param host: the server address
+ :param port: the associated port
+ :param path: the absolute path on the server
+
+ :return: The corresponding URL.
+ """
+ netloc = urlutils.quote(host)
+ if user is not None:
+ # Note that we don't put the password back even if we
+ # have one so that it doesn't get accidentally
+ # exposed.
+ netloc = '%s@%s' % (urlutils.quote(user), netloc)
+ if port is not None:
+ netloc = '%s:%d' % (netloc, port)
+ path = urlutils.escape(path)
+ return urlparse.urlunparse((scheme, netloc, path, None, None, None))
+
+ def relpath(self, abspath):
+ """Return the local path portion from a given absolute path"""
+ parsed_url = self._split_url(abspath)
+ error = []
+ if parsed_url.scheme != self._parsed_url.scheme:
+ error.append('scheme mismatch')
+ if parsed_url.user != self._parsed_url.user:
+ error.append('user name mismatch')
+ if parsed_url.host != self._parsed_url.host:
+ error.append('host mismatch')
+ if parsed_url.port != self._parsed_url.port:
+ error.append('port mismatch')
+ if (not (parsed_url.path == self._parsed_url.path[:-1] or
+ parsed_url.path.startswith(self._parsed_url.path))):
+ error.append('path mismatch')
+ if error:
+ extra = ', '.join(error)
+ raise errors.PathNotChild(abspath, self.base, extra=extra)
+ pl = len(self._parsed_url.path)
+ return parsed_url.path[pl:].strip('/')
+
+ def abspath(self, relpath):
+ """Return the full url to the given relative path.
+
+ :param relpath: the relative path urlencoded
+
+ :returns: the Unicode version of the absolute path for relpath.
+ """
+ return str(self._parsed_url.clone(relpath))
+
+ def _remote_path(self, relpath):
+ """Return the absolute path part of the url to the given relative path.
+
+ This is the path that the remote server expect to receive in the
+ requests, daughter classes should redefine this method if needed and
+ use the result to build their requests.
+
+ :param relpath: the path relative to the transport base urlencoded.
+
+ :return: the absolute Unicode path on the server,
+ """
+ return self._parsed_url.clone(relpath).path
+
+ def _get_shared_connection(self):
+ """Get the object shared amongst cloned transports.
+
+ This should be used only by classes that needs to extend the sharing
+ with objects other than transports.
+
+ Use _get_connection to get the connection itself.
+ """
+ return self._shared_connection
+
+ def _set_connection(self, connection, credentials=None):
+ """Record a newly created connection with its associated credentials.
+
+ Note: To ensure that connection is still shared after a temporary
+ failure and a new one needs to be created, daughter classes should
+ always call this method to set the connection and do so each time a new
+ connection is created.
+
+ :param connection: An opaque object representing the connection used by
+ the daughter class.
+
+ :param credentials: An opaque object representing the credentials
+ needed to create the connection.
+ """
+ self._shared_connection.connection = connection
+ self._shared_connection.credentials = credentials
+ for hook in self.hooks["post_connect"]:
+ hook(self)
+
+ def _get_connection(self):
+ """Returns the transport specific connection object."""
+ return self._shared_connection.connection
+
+ def _get_credentials(self):
+ """Returns the credentials used to establish the connection."""
+ return self._shared_connection.credentials
+
+ def _update_credentials(self, credentials):
+ """Update the credentials of the current connection.
+
+ Some protocols can renegociate the credentials within a connection,
+ this method allows daughter classes to share updated credentials.
+
+ :param credentials: the updated credentials.
+ """
+ # We don't want to call _set_connection here as we are only updating
+ # the credentials not creating a new connection.
+ self._shared_connection.credentials = credentials
+
+ def _reuse_for(self, other_base):
+ """Returns a transport sharing the same connection if possible.
+
+ Note: we share the connection if the expected credentials are the
+ same: (host, port, user). Some protocols may disagree and redefine the
+ criteria in daughter classes.
+
+ Note: we don't compare the passwords here because other_base may have
+ been obtained from an existing transport.base which do not mention the
+ password.
+
+ :param other_base: the URL we want to share the connection with.
+
+ :return: A new transport or None if the connection cannot be shared.
+ """
+ try:
+ parsed_url = self._split_url(other_base)
+ except errors.InvalidURL:
+ # No hope in trying to reuse an existing transport for an invalid
+ # URL
+ return None
+
+ transport = None
+ # Don't compare passwords, they may be absent from other_base or from
+ # self and they don't carry more information than user anyway.
+ if (parsed_url.scheme == self._parsed_url.scheme
+ and parsed_url.user == self._parsed_url.user
+ and parsed_url.host == self._parsed_url.host
+ and parsed_url.port == self._parsed_url.port):
+ path = parsed_url.path
+ if not path.endswith('/'):
+ # This normally occurs at __init__ time, but it's easier to do
+ # it now to avoid creating two transports for the same base.
+ path += '/'
+ if self._parsed_url.path == path:
+ # shortcut, it's really the same transport
+ return self
+ # We don't call clone here because the intent is different: we
+ # build a new transport on a different base (which may be totally
+ # unrelated) but we share the connection.
+ transport = self.__class__(other_base, _from_transport=self)
+ return transport
+
+ def disconnect(self):
+ """Disconnect the transport.
+
+ If and when required the transport willl reconnect automatically.
+ """
+ raise NotImplementedError(self.disconnect)
+
+
+def location_to_url(location):
+ """Determine a fully qualified URL from a location string.
+
+ This will try to interpret location as both a URL and a directory path. It
+ will also lookup the location in directories.
+
+ :param location: Unicode or byte string object with a location
+ :raise InvalidURL: If the location is already a URL, but not valid.
+ :return: Byte string with resulting URL
+ """
+ if not isinstance(location, basestring):
+ raise AssertionError("location not a byte or unicode string")
+ from bzrlib.directory_service import directories
+ location = directories.dereference(location)
+
+ # Catch any URLs which are passing Unicode rather than ASCII
+ try:
+ location = location.encode('ascii')
+ except UnicodeError:
+ if urlutils.is_url(location):
+ raise errors.InvalidURL(path=location,
+ extra='URLs must be properly escaped')
+ location = urlutils.local_path_to_url(location)
+
+ if location.startswith("file:") and not location.startswith("file://"):
+ return urlutils.join(urlutils.local_path_to_url("."), location[5:])
+
+ if not urlutils.is_url(location):
+ return urlutils.local_path_to_url(location)
+
+ return location
+
+
+def get_transport_from_path(path, possible_transports=None):
+ """Open a transport for a local path.
+
+ :param path: Local path as byte or unicode string
+ :return: Transport object for path
+ """
+ return get_transport_from_url(urlutils.local_path_to_url(path),
+ possible_transports)
+
+
+def get_transport_from_url(url, possible_transports=None):
+ """Open a transport to access a URL.
+
+ :param base: a URL
+ :param transports: optional reusable transports list. If not None, created
+ transports will be added to the list.
+
+ :return: A new transport optionally sharing its connection with one of
+ possible_transports.
+ """
+ transport = None
+ if possible_transports is not None:
+ for t in possible_transports:
+ t_same_connection = t._reuse_for(url)
+ if t_same_connection is not None:
+ # Add only new transports
+ if t_same_connection not in possible_transports:
+ possible_transports.append(t_same_connection)
+ return t_same_connection
+
+ last_err = None
+ for proto, factory_list in transport_list_registry.items():
+ if proto is not None and url.startswith(proto):
+ transport, last_err = _try_transport_factories(url, factory_list)
+ if transport:
+ if possible_transports is not None:
+ if transport in possible_transports:
+ raise AssertionError()
+ possible_transports.append(transport)
+ return transport
+ if not urlutils.is_url(url):
+ raise errors.InvalidURL(path=url)
+ raise errors.UnsupportedProtocol(url, last_err)
+
+
+def get_transport(base, possible_transports=None):
+ """Open a transport to access a URL or directory.
+
+ :param base: either a URL or a directory name.
+
+ :param transports: optional reusable transports list. If not None, created
+ transports will be added to the list.
+
+ :return: A new transport optionally sharing its connection with one of
+ possible_transports.
+ """
+ if base is None:
+ base = '.'
+ return get_transport_from_url(location_to_url(base), possible_transports)
+
+
+def _try_transport_factories(base, factory_list):
+ last_err = None
+ for factory in factory_list:
+ try:
+ return factory.get_obj()(base), None
+ except errors.DependencyNotPresent, e:
+ mutter("failed to instantiate transport %r for %r: %r" %
+ (factory, base, e))
+ last_err = e
+ continue
+ return None, last_err
+
+
+def do_catching_redirections(action, transport, redirected):
+ """Execute an action with given transport catching redirections.
+
+ This is a facility provided for callers needing to follow redirections
+ silently. The silence is relative: it is the caller responsability to
+ inform the user about each redirection or only inform the user of a user
+ via the exception parameter.
+
+ :param action: A callable, what the caller want to do while catching
+ redirections.
+ :param transport: The initial transport used.
+ :param redirected: A callable receiving the redirected transport and the
+ RedirectRequested exception.
+
+ :return: Whatever 'action' returns
+ """
+ MAX_REDIRECTIONS = 8
+
+ # If a loop occurs, there is little we can do. So we don't try to detect
+ # them, just getting out if too much redirections occurs. The solution
+ # is outside: where the loop is defined.
+ for redirections in range(MAX_REDIRECTIONS):
+ try:
+ return action(transport)
+ except errors.RedirectRequested, e:
+ redirection_notice = '%s is%s redirected to %s' % (
+ e.source, e.permanently, e.target)
+ transport = redirected(transport, e, redirection_notice)
+ else:
+ # Loop exited without resolving redirect ? Either the
+ # user has kept a very very very old reference or a loop
+ # occurred in the redirections. Nothing we can cure here:
+ # tell the user. Note that as the user has been informed
+ # about each redirection (it is the caller responsibility
+ # to do that in redirected via the provided
+ # redirection_notice). The caller may provide more
+ # information if needed (like what file or directory we
+ # were trying to act upon when the redirection loop
+ # occurred).
+ raise errors.TooManyRedirections
+
+
+class Server(object):
+ """A Transport Server.
+
+ The Server interface provides a server for a given transport type.
+ """
+
+ def start_server(self):
+ """Setup the server to service requests."""
+
+ def stop_server(self):
+ """Remove the server and cleanup any resources it owns."""
+
+
+# None is the default transport, for things with no url scheme
+register_transport_proto('file://',
+ help="Access using the standard filesystem (default)")
+register_lazy_transport('file://', 'bzrlib.transport.local', 'LocalTransport')
+
+register_transport_proto('sftp://',
+ help="Access using SFTP (most SSH servers provide SFTP).",
+ register_netloc=True)
+register_lazy_transport('sftp://', 'bzrlib.transport.sftp', 'SFTPTransport')
+# Decorated http transport
+register_transport_proto('http+urllib://',
+# help="Read-only access of branches exported on the web."
+ register_netloc=True)
+register_lazy_transport('http+urllib://', 'bzrlib.transport.http._urllib',
+ 'HttpTransport_urllib')
+register_transport_proto('https+urllib://',
+# help="Read-only access of branches exported on the web using SSL."
+ register_netloc=True)
+register_lazy_transport('https+urllib://', 'bzrlib.transport.http._urllib',
+ 'HttpTransport_urllib')
+register_transport_proto('http+pycurl://',
+# help="Read-only access of branches exported on the web."
+ register_netloc=True)
+register_lazy_transport('http+pycurl://', 'bzrlib.transport.http._pycurl',
+ 'PyCurlTransport')
+register_transport_proto('https+pycurl://',
+# help="Read-only access of branches exported on the web using SSL."
+ register_netloc=True)
+register_lazy_transport('https+pycurl://', 'bzrlib.transport.http._pycurl',
+ 'PyCurlTransport')
+# Default http transports (last declared wins (if it can be imported))
+register_transport_proto('http://',
+ help="Read-only access of branches exported on the web.")
+register_transport_proto('https://',
+ help="Read-only access of branches exported on the web using SSL.")
+# The default http implementation is urllib
+register_lazy_transport('http://', 'bzrlib.transport.http._pycurl',
+ 'PyCurlTransport')
+register_lazy_transport('http://', 'bzrlib.transport.http._urllib',
+ 'HttpTransport_urllib')
+register_lazy_transport('https://', 'bzrlib.transport.http._pycurl',
+ 'PyCurlTransport')
+register_lazy_transport('https://', 'bzrlib.transport.http._urllib',
+ 'HttpTransport_urllib')
+
+register_transport_proto('ftp://', help="Access using passive FTP.")
+register_lazy_transport('ftp://', 'bzrlib.transport.ftp', 'FtpTransport')
+register_transport_proto('aftp://', help="Access using active FTP.")
+register_lazy_transport('aftp://', 'bzrlib.transport.ftp', 'FtpTransport')
+register_transport_proto('gio+', help="Access using any GIO supported protocols.")
+register_lazy_transport('gio+', 'bzrlib.transport.gio_transport', 'GioTransport')
+
+
+# Default to trying GSSAPI authentication (if the kerberos module is
+# available)
+register_transport_proto('ftp+gssapi://', register_netloc=True)
+register_transport_proto('aftp+gssapi://', register_netloc=True)
+register_transport_proto('ftp+nogssapi://', register_netloc=True)
+register_transport_proto('aftp+nogssapi://', register_netloc=True)
+register_lazy_transport('ftp+gssapi://', 'bzrlib.transport.ftp._gssapi',
+ 'GSSAPIFtpTransport')
+register_lazy_transport('aftp+gssapi://', 'bzrlib.transport.ftp._gssapi',
+ 'GSSAPIFtpTransport')
+register_lazy_transport('ftp://', 'bzrlib.transport.ftp._gssapi',
+ 'GSSAPIFtpTransport')
+register_lazy_transport('aftp://', 'bzrlib.transport.ftp._gssapi',
+ 'GSSAPIFtpTransport')
+register_lazy_transport('ftp+nogssapi://', 'bzrlib.transport.ftp',
+ 'FtpTransport')
+register_lazy_transport('aftp+nogssapi://', 'bzrlib.transport.ftp',
+ 'FtpTransport')
+
+register_transport_proto('memory://')
+register_lazy_transport('memory://', 'bzrlib.transport.memory',
+ 'MemoryTransport')
+
+register_transport_proto('readonly+',
+# help="This modifier converts any transport to be readonly."
+ )
+register_lazy_transport('readonly+', 'bzrlib.transport.readonly',
+ 'ReadonlyTransportDecorator')
+
+register_transport_proto('fakenfs+')
+register_lazy_transport('fakenfs+', 'bzrlib.transport.fakenfs',
+ 'FakeNFSTransportDecorator')
+
+register_transport_proto('log+')
+register_lazy_transport('log+', 'bzrlib.transport.log', 'TransportLogDecorator')
+
+register_transport_proto('trace+')
+register_lazy_transport('trace+', 'bzrlib.transport.trace',
+ 'TransportTraceDecorator')
+
+register_transport_proto('unlistable+')
+register_lazy_transport('unlistable+', 'bzrlib.transport.unlistable',
+ 'UnlistableTransportDecorator')
+
+register_transport_proto('brokenrename+')
+register_lazy_transport('brokenrename+', 'bzrlib.transport.brokenrename',
+ 'BrokenRenameTransportDecorator')
+
+register_transport_proto('vfat+')
+register_lazy_transport('vfat+',
+ 'bzrlib.transport.fakevfat',
+ 'FakeVFATTransportDecorator')
+
+register_transport_proto('nosmart+')
+register_lazy_transport('nosmart+', 'bzrlib.transport.nosmart',
+ 'NoSmartTransportDecorator')
+
+register_transport_proto('bzr://',
+ help="Fast access using the Bazaar smart server.",
+ register_netloc=True)
+
+register_lazy_transport('bzr://', 'bzrlib.transport.remote',
+ 'RemoteTCPTransport')
+register_transport_proto('bzr-v2://', register_netloc=True)
+
+register_lazy_transport('bzr-v2://', 'bzrlib.transport.remote',
+ 'RemoteTCPTransportV2Only')
+register_transport_proto('bzr+http://',
+# help="Fast access using the Bazaar smart server over HTTP."
+ register_netloc=True)
+register_lazy_transport('bzr+http://', 'bzrlib.transport.remote',
+ 'RemoteHTTPTransport')
+register_transport_proto('bzr+https://',
+# help="Fast access using the Bazaar smart server over HTTPS."
+ register_netloc=True)
+register_lazy_transport('bzr+https://',
+ 'bzrlib.transport.remote',
+ 'RemoteHTTPTransport')
+register_transport_proto('bzr+ssh://',
+ help="Fast access using the Bazaar smart server over SSH.",
+ register_netloc=True)
+register_lazy_transport('bzr+ssh://', 'bzrlib.transport.remote',
+ 'RemoteSSHTransport')
+
+register_transport_proto('ssh:')
+register_lazy_transport('ssh:', 'bzrlib.transport.remote',
+ 'HintingSSHTransport')
+
+
+transport_server_registry = registry.Registry()
+transport_server_registry.register_lazy('bzr', 'bzrlib.smart.server',
+ 'serve_bzr', help="The Bazaar smart server protocol over TCP. (default port: 4155)")
+transport_server_registry.default_key = 'bzr'
diff --git a/bzrlib/transport/brokenrename.py b/bzrlib/transport/brokenrename.py
new file mode 100644
index 0000000..7e92772
--- /dev/null
+++ b/bzrlib/transport/brokenrename.py
@@ -0,0 +1,53 @@
+# Copyright (C) 2007 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""Transport implementation that doesn't detect clashing renames.
+"""
+
+from __future__ import absolute_import
+
+from bzrlib import (
+ errors,
+ urlutils,
+ )
+from bzrlib.transport import decorator
+
+
+class BrokenRenameTransportDecorator(decorator.TransportDecorator):
+ """A transport that fails to detect clashing renames"""
+
+ @classmethod
+ def _get_url_prefix(self):
+ """FakeNFS transports are identified by 'brokenrename+'"""
+ return 'brokenrename+'
+
+ def rename(self, rel_from, rel_to):
+ """See Transport.rename().
+ """
+ try:
+ if self._decorated.has(rel_to):
+ rel_to = urlutils.join(rel_to, urlutils.basename(rel_from))
+ self._decorated.rename(rel_from, rel_to)
+ except (errors.DirectoryNotEmpty, errors.FileExists), e:
+ # absorb the error
+ return
+
+
+def get_test_permutations():
+ """Return the permutations to be used in testing."""
+ # we don't use this for general testing, only for the tests that
+ # specifically want it
+ return []
diff --git a/bzrlib/transport/chroot.py b/bzrlib/transport/chroot.py
new file mode 100644
index 0000000..cc8b0ac
--- /dev/null
+++ b/bzrlib/transport/chroot.py
@@ -0,0 +1,69 @@
+# Copyright (C) 2006-2010 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""Implementation of Transport that prevents access to locations above a set
+root.
+"""
+
+from __future__ import absolute_import
+
+from bzrlib.transport import (
+ pathfilter,
+ register_transport,
+ )
+
+
+class ChrootServer(pathfilter.PathFilteringServer):
+ """User space 'chroot' facility.
+
+ The server's get_url returns the url for a chroot transport mapped to the
+ backing transport. The url is of the form chroot-xxx:/// so parent
+ directories of the backing transport are not visible. The chroot url will
+ not allow '..' sequences to result in requests to the chroot affecting
+ directories outside the backing transport.
+
+ PathFilteringServer does all the path sanitation needed to enforce a
+ chroot, so this is a simple subclass of PathFilteringServer that ignores
+ filter_func.
+ """
+
+ def __init__(self, backing_transport):
+ pathfilter.PathFilteringServer.__init__(self, backing_transport, None)
+
+ def _factory(self, url):
+ return ChrootTransport(self, url)
+
+ def start_server(self):
+ self.scheme = 'chroot-%d:///' % id(self)
+ register_transport(self.scheme, self._factory)
+
+
+class ChrootTransport(pathfilter.PathFilteringTransport):
+ """A ChrootTransport.
+
+ Please see ChrootServer for details.
+ """
+
+ def _filter(self, relpath):
+ # A simplified version of PathFilteringTransport's _filter that omits
+ # the call to self.server.filter_func.
+ return self._relpath_from_server_root(relpath)
+
+
+def get_test_permutations():
+ """Return the permutations to be used in testing."""
+ from bzrlib.tests import test_server
+ return [(ChrootTransport, test_server.TestingChrootServer)]
diff --git a/bzrlib/transport/decorator.py b/bzrlib/transport/decorator.py
new file mode 100644
index 0000000..66515fe
--- /dev/null
+++ b/bzrlib/transport/decorator.py
@@ -0,0 +1,198 @@
+# Copyright (C) 2006-2010 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""Implementation of Transport that decorates another transport.
+
+This does not change the transport behaviour at all, but provides all the
+stub functions to allow other decorators to be written easily.
+"""
+
+from __future__ import absolute_import
+
+from bzrlib import transport
+
+
+class TransportDecorator(transport.Transport):
+ """A no-change decorator for Transports.
+
+ Subclasses of this are new transports that are based on an
+ underlying transport and can override or intercept some
+ behavior. For example ReadonlyTransportDecorator prevents
+ all write attempts, and FakeNFSTransportDecorator simulates
+ some NFS quirks.
+
+ This decorator class is not directly usable as a decorator:
+ you must use a subclass which has overridden the _get_url_prefix() class
+ method to return the url prefix for the subclass.
+ """
+
+ def __init__(self, url, _decorated=None, _from_transport=None):
+ """Set the 'base' path of the transport.
+
+ :param _decorated: A private parameter for cloning.
+ :param _from_transport: Is available for subclasses that
+ need to share state across clones.
+ """
+ prefix = self._get_url_prefix()
+ if not url.startswith(prefix):
+ raise ValueError("url %r doesn't start with decorator prefix %r" %
+ (url, prefix))
+ not_decorated_url = url[len(prefix):]
+ if _decorated is None:
+ self._decorated = transport.get_transport(
+ not_decorated_url)
+ else:
+ self._decorated = _decorated
+ super(TransportDecorator, self).__init__(prefix + self._decorated.base)
+
+ def abspath(self, relpath):
+ """See Transport.abspath()."""
+ return self._get_url_prefix() + self._decorated.abspath(relpath)
+
+ def append_file(self, relpath, f, mode=None):
+ """See Transport.append_file()."""
+ return self._decorated.append_file(relpath, f, mode=mode)
+
+ def append_bytes(self, relpath, bytes, mode=None):
+ """See Transport.append_bytes()."""
+ return self._decorated.append_bytes(relpath, bytes, mode=mode)
+
+ def _can_roundtrip_unix_modebits(self):
+ """See Transport._can_roundtrip_unix_modebits()."""
+ return self._decorated._can_roundtrip_unix_modebits()
+
+ def clone(self, offset=None):
+ """See Transport.clone()."""
+ decorated_clone = self._decorated.clone(offset)
+ return self.__class__(
+ self._get_url_prefix() + decorated_clone.base, decorated_clone,
+ self)
+
+ def delete(self, relpath):
+ """See Transport.delete()."""
+ return self._decorated.delete(relpath)
+
+ def delete_tree(self, relpath):
+ """See Transport.delete_tree()."""
+ return self._decorated.delete_tree(relpath)
+
+ def external_url(self):
+ """See bzrlib.transport.Transport.external_url."""
+ # while decorators are in-process only, they
+ # can be handed back into bzrlib safely, so
+ # its just the base.
+ return self.base
+
+ @classmethod
+ def _get_url_prefix(self):
+ """Return the URL prefix of this decorator."""
+ raise NotImplementedError(self._get_url_prefix)
+
+ def get(self, relpath):
+ """See Transport.get()."""
+ return self._decorated.get(relpath)
+
+ def get_smart_client(self):
+ return self._decorated.get_smart_client()
+
+ def has(self, relpath):
+ """See Transport.has()."""
+ return self._decorated.has(relpath)
+
+ def is_readonly(self):
+ """See Transport.is_readonly."""
+ return self._decorated.is_readonly()
+
+ def mkdir(self, relpath, mode=None):
+ """See Transport.mkdir()."""
+ return self._decorated.mkdir(relpath, mode)
+
+ def open_write_stream(self, relpath, mode=None):
+ """See Transport.open_write_stream."""
+ return self._decorated.open_write_stream(relpath, mode=mode)
+
+ def put_file(self, relpath, f, mode=None):
+ """See Transport.put_file()."""
+ return self._decorated.put_file(relpath, f, mode)
+
+ def put_bytes(self, relpath, bytes, mode=None):
+ """See Transport.put_bytes()."""
+ return self._decorated.put_bytes(relpath, bytes, mode)
+
+ def listable(self):
+ """See Transport.listable."""
+ return self._decorated.listable()
+
+ def iter_files_recursive(self):
+ """See Transport.iter_files_recursive()."""
+ return self._decorated.iter_files_recursive()
+
+ def list_dir(self, relpath):
+ """See Transport.list_dir()."""
+ return self._decorated.list_dir(relpath)
+
+ def _readv(self, relpath, offsets):
+ """See Transport._readv."""
+ return self._decorated._readv(relpath, offsets)
+
+ def recommended_page_size(self):
+ """See Transport.recommended_page_size()."""
+ return self._decorated.recommended_page_size()
+
+ def rename(self, rel_from, rel_to):
+ return self._decorated.rename(rel_from, rel_to)
+
+ def rmdir(self, relpath):
+ """See Transport.rmdir."""
+ return self._decorated.rmdir(relpath)
+
+ def _get_segment_parameters(self):
+ return self._decorated.segment_parameters
+
+ def _set_segment_parameters(self, value):
+ self._decorated.segment_parameters = value
+
+ segment_parameters = property(_get_segment_parameters,
+ _set_segment_parameters, "See Transport.segment_parameters")
+
+ def stat(self, relpath):
+ """See Transport.stat()."""
+ return self._decorated.stat(relpath)
+
+ def lock_read(self, relpath):
+ """See Transport.lock_read."""
+ return self._decorated.lock_read(relpath)
+
+ def lock_write(self, relpath):
+ """See Transport.lock_write."""
+ return self._decorated.lock_write(relpath)
+
+ def _redirected_to(self, source, target):
+ redirected = self._decorated._redirected_to(source, target)
+ if redirected is not None:
+ return self.__class__(self._get_url_prefix() + redirected.base,
+ redirected)
+ else:
+ return None
+
+
+def get_test_permutations():
+ """Return the permutations to be used in testing.
+
+ The Decorator class is not directly usable, and testing it would not have
+ any benefit - its the concrete classes which need to be tested.
+ """
+ return []
diff --git a/bzrlib/transport/fakenfs.py b/bzrlib/transport/fakenfs.py
new file mode 100644
index 0000000..522a25b
--- /dev/null
+++ b/bzrlib/transport/fakenfs.py
@@ -0,0 +1,70 @@
+# Copyright (C) 2005, 2006, 2008 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""Transport implementation that adapts another transport to look like NFS.
+
+Currently this means that the rename() call will raise ResourceBusy when a
+target path is a directory.
+
+To get a fake nfs transport use get_transport('fakenfs+' + real_url)
+"""
+
+from __future__ import absolute_import
+
+from stat import S_ISDIR
+
+from bzrlib import (
+ errors,
+ urlutils,
+ )
+from bzrlib.transport import decorator
+
+
+class FakeNFSTransportDecorator(decorator.TransportDecorator):
+ """A transport that behaves like NFS, for testing"""
+
+ @classmethod
+ def _get_url_prefix(self):
+ """FakeNFS transports are identified by 'fakenfs+'"""
+ return 'fakenfs+'
+
+ def rename(self, rel_from, rel_to):
+ """See Transport.rename().
+
+ This variation on rename converts DirectoryNotEmpty and FileExists
+ errors into ResourceBusy if the target is a directory.
+ """
+ try:
+ self._decorated.rename(rel_from, rel_to)
+ except (errors.DirectoryNotEmpty, errors.FileExists), e:
+ # if this is a directory rename, raise
+ # resourcebusy rather than DirectoryNotEmpty
+ stat = self._decorated.stat(rel_to)
+ if S_ISDIR(stat.st_mode):
+ raise errors.ResourceBusy(rel_to)
+ else:
+ raise
+
+ def delete(self, relpath):
+ if urlutils.basename(relpath).startswith('.nfs'):
+ raise errors.ResourceBusy(self.abspath(relpath))
+ return self._decorated.delete(relpath)
+
+
+def get_test_permutations():
+ """Return the permutations to be used in testing."""
+ from bzrlib.tests import test_server
+ return [(FakeNFSTransportDecorator, test_server.FakeNFSServer),]
diff --git a/bzrlib/transport/fakevfat.py b/bzrlib/transport/fakevfat.py
new file mode 100644
index 0000000..e635891
--- /dev/null
+++ b/bzrlib/transport/fakevfat.py
@@ -0,0 +1,106 @@
+# Copyright (C) 2006 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""Fake transport with some restrictions of Windows VFAT filesystems.
+
+VFAT on Windows has several restrictions that are not present on unix
+filesystems, which are imposed by this transport.
+
+VFAT is strictly 8-bit using codepages to represent non-ascii characters.
+This implementation currently doesn't model the codepage but just insists
+on only ascii characters being written.
+
+Restrictions imposed by this transport:
+
+ * filenames are squashed to lowercase
+ * filenames containing non-ascii characters are not allowed
+ * filenames containing the characters "@<>" are not allowed
+ (there should be more?)
+
+Some other restrictions are not implemented yet, but possibly could be:
+
+ * open files can't be deleted or renamed
+ * directories containing open files can't be renamed
+ * special device names (NUL, LPT, ...) are not allowed
+
+"""
+
+from __future__ import absolute_import
+
+import re
+
+from bzrlib.transport import decorator
+
+
+# TODO: It might be nice if these hooks were available in a more general way
+# on all paths passed in to the Transport, so that we didn't have to hook
+# every single method.
+
+# TODO: Perhaps don't inherit from TransportDecorator so that methods
+# which are not implemented here fail by default?
+
+
+class FakeVFATTransportDecorator(decorator.TransportDecorator):
+ """A decorator that can convert any transport to be readonly.
+
+ This is requested via the 'vfat+' prefix to get_transport().
+
+ This is intended only for use in testing and doesn't implement every
+ method very well yet.
+
+ This transport is typically layered on a local or memory transport
+ which actually stored the files.
+ """
+
+ def _can_roundtrip_unix_modebits(self):
+ """See Transport._can_roundtrip_unix_modebits()."""
+ return False
+
+ @classmethod
+ def _get_url_prefix(self):
+ """Readonly transport decorators are invoked via 'vfat+'"""
+ return 'vfat+'
+
+ def _squash_name(self, name):
+ """Return vfat-squashed filename.
+
+ The name is returned as it will be stored on disk. This raises an
+ error if there are invalid characters in the name.
+ """
+ if re.search(r'[?*:;<>]', name):
+ raise ValueError("illegal characters for VFAT filename: %r" % name)
+ return name.lower()
+
+ def get(self, relpath):
+ return self._decorated.get(self._squash_name(relpath))
+
+ def mkdir(self, relpath, mode=None):
+ return self._decorated.mkdir(self._squash_name(relpath), 0755)
+
+ def has(self, relpath):
+ return self._decorated.has(self._squash_name(relpath))
+
+ def _readv(self, relpath, offsets):
+ return self._decorated.readv(self._squash_name(relpath), offsets)
+
+ def put_file(self, relpath, f, mode=None):
+ return self._decorated.put_file(self._squash_name(relpath), f, mode)
+
+
+def get_test_permutations():
+ """Return the permutations to be used in testing."""
+ from bzrlib.tests import test_server
+ return [(FakeVFATTransportDecorator, test_server.FakeVFATServer),]
diff --git a/bzrlib/transport/ftp/__init__.py b/bzrlib/transport/ftp/__init__.py
new file mode 100644
index 0000000..0c93137
--- /dev/null
+++ b/bzrlib/transport/ftp/__init__.py
@@ -0,0 +1,642 @@
+# Copyright (C) 2005-2010 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""Implementation of Transport over ftp.
+
+Written by Daniel Silverstone <dsilvers@digital-scurf.org> with serious
+cargo-culting from the sftp transport and the http transport.
+
+It provides the ftp:// and aftp:// protocols where ftp:// is passive ftp
+and aftp:// is active ftp. Most people will want passive ftp for traversing
+NAT and other firewalls, so it's best to use it unless you explicitly want
+active, in which case aftp:// will be your friend.
+"""
+
+from __future__ import absolute_import
+
+from cStringIO import StringIO
+import ftplib
+import getpass
+import os
+import random
+import socket
+import stat
+import time
+
+from bzrlib import (
+ config,
+ errors,
+ osutils,
+ urlutils,
+ )
+from bzrlib.symbol_versioning import (
+ DEPRECATED_PARAMETER,
+ deprecated_in,
+ deprecated_passed,
+ warn,
+ )
+from bzrlib.trace import mutter, warning
+from bzrlib.transport import (
+ AppendBasedFileStream,
+ ConnectedTransport,
+ _file_streams,
+ register_urlparse_netloc_protocol,
+ Server,
+ )
+
+
+register_urlparse_netloc_protocol('aftp')
+
+
+class FtpPathError(errors.PathError):
+ """FTP failed for path: %(path)s%(extra)s"""
+
+
+class FtpStatResult(object):
+
+ def __init__(self, f, abspath):
+ try:
+ self.st_size = f.size(abspath)
+ self.st_mode = stat.S_IFREG
+ except ftplib.error_perm:
+ pwd = f.pwd()
+ try:
+ f.cwd(abspath)
+ self.st_mode = stat.S_IFDIR
+ finally:
+ f.cwd(pwd)
+
+
+_number_of_retries = 2
+_sleep_between_retries = 5
+
+# FIXME: there are inconsistencies in the way temporary errors are
+# handled. Sometimes we reconnect, sometimes we raise an exception. Care should
+# be taken to analyze the implications for write operations (read operations
+# are safe to retry). Overall even some read operations are never
+# retried. --vila 20070720 (Bug #127164)
+class FtpTransport(ConnectedTransport):
+ """This is the transport agent for ftp:// access."""
+
+ def __init__(self, base, _from_transport=None):
+ """Set the base path where files will be stored."""
+ if not (base.startswith('ftp://') or base.startswith('aftp://')):
+ raise ValueError(base)
+ super(FtpTransport, self).__init__(base,
+ _from_transport=_from_transport)
+ self._unqualified_scheme = 'ftp'
+ if self._parsed_url.scheme == 'aftp':
+ self.is_active = True
+ else:
+ self.is_active = False
+
+ # Most modern FTP servers support the APPE command. If ours doesn't, we
+ # (re)set this flag accordingly later.
+ self._has_append = True
+
+ def _get_FTP(self):
+ """Return the ftplib.FTP instance for this object."""
+ # Ensures that a connection is established
+ connection = self._get_connection()
+ if connection is None:
+ # First connection ever
+ connection, credentials = self._create_connection()
+ self._set_connection(connection, credentials)
+ return connection
+
+ connection_class = ftplib.FTP
+
+ def _create_connection(self, credentials=None):
+ """Create a new connection with the provided credentials.
+
+ :param credentials: The credentials needed to establish the connection.
+
+ :return: The created connection and its associated credentials.
+
+ The input credentials are only the password as it may have been
+ entered interactively by the user and may be different from the one
+ provided in base url at transport creation time. The returned
+ credentials are username, password.
+ """
+ if credentials is None:
+ user, password = self._user, self._password
+ else:
+ user, password = credentials
+
+ auth = config.AuthenticationConfig()
+ if user is None:
+ user = auth.get_user('ftp', self._host, port=self._port,
+ default=getpass.getuser())
+ mutter("Constructing FTP instance against %r" %
+ ((self._host, self._port, user, '********',
+ self.is_active),))
+ try:
+ connection = self.connection_class()
+ connection.connect(host=self._host, port=self._port)
+ self._login(connection, auth, user, password)
+ connection.set_pasv(not self.is_active)
+ # binary mode is the default
+ connection.voidcmd('TYPE I')
+ except socket.error, e:
+ raise errors.SocketConnectionError(self._host, self._port,
+ msg='Unable to connect to',
+ orig_error= e)
+ except ftplib.error_perm, e:
+ raise errors.TransportError(msg="Error setting up connection:"
+ " %s" % str(e), orig_error=e)
+ return connection, (user, password)
+
+ def _login(self, connection, auth, user, password):
+ # '' is a valid password
+ if user and user != 'anonymous' and password is None:
+ password = auth.get_password('ftp', self._host,
+ user, port=self._port)
+ connection.login(user=user, passwd=password)
+
+ def _reconnect(self):
+ """Create a new connection with the previously used credentials"""
+ credentials = self._get_credentials()
+ connection, credentials = self._create_connection(credentials)
+ self._set_connection(connection, credentials)
+
+ def disconnect(self):
+ connection = self._get_connection()
+ if connection is not None:
+ connection.close()
+
+ def _translate_ftp_error(self, err, path, extra=None,
+ unknown_exc=FtpPathError):
+ """Try to translate an ftplib exception to a bzrlib exception.
+
+ :param err: The error to translate into a bzr error
+ :param path: The path which had problems
+ :param extra: Extra information which can be included
+ :param unknown_exc: If None, we will just raise the original exception
+ otherwise we raise unknown_exc(path, extra=extra)
+ """
+ # ftp error numbers are very generic, like "451: Requested action aborted,
+ # local error in processing" so unfortunately we have to match by
+ # strings.
+ s = str(err).lower()
+ if not extra:
+ extra = str(err)
+ else:
+ extra += ': ' + str(err)
+ if ('no such file' in s
+ or 'could not open' in s
+ or 'no such dir' in s
+ or 'could not create file' in s # vsftpd
+ or 'file doesn\'t exist' in s
+ or 'rnfr command failed.' in s # vsftpd RNFR reply if file not found
+ or 'file/directory not found' in s # filezilla server
+ # Microsoft FTP-Service RNFR reply if file not found
+ or (s.startswith('550 ') and 'unable to rename to' in extra)
+ # if containing directory doesn't exist, suggested by
+ # <https://bugs.launchpad.net/bzr/+bug/224373>
+ or (s.startswith('550 ') and "can't find folder" in s)
+ ):
+ raise errors.NoSuchFile(path, extra=extra)
+ elif ('file exists' in s):
+ raise errors.FileExists(path, extra=extra)
+ elif ('not a directory' in s):
+ raise errors.PathError(path, extra=extra)
+ elif 'directory not empty' in s:
+ raise errors.DirectoryNotEmpty(path, extra=extra)
+
+ mutter('unable to understand error for path: %s: %s', path, err)
+
+ if unknown_exc:
+ raise unknown_exc(path, extra=extra)
+ # TODO: jam 20060516 Consider re-raising the error wrapped in
+ # something like TransportError, but this loses the traceback
+ # Also, 'sftp' has a generic 'Failure' mode, which we use failure_exc
+ # to handle. Consider doing something like that here.
+ #raise TransportError(msg='Error for path: %s' % (path,), orig_error=e)
+ raise
+
+ def has(self, relpath):
+ """Does the target location exist?"""
+ # FIXME jam 20060516 We *do* ask about directories in the test suite
+ # We don't seem to in the actual codebase
+ # XXX: I assume we're never asked has(dirname) and thus I use
+ # the FTP size command and assume that if it doesn't raise,
+ # all is good.
+ abspath = self._remote_path(relpath)
+ try:
+ f = self._get_FTP()
+ mutter('FTP has check: %s => %s', relpath, abspath)
+ s = f.size(abspath)
+ mutter("FTP has: %s", abspath)
+ return True
+ except ftplib.error_perm, e:
+ if ('is a directory' in str(e).lower()):
+ mutter("FTP has dir: %s: %s", abspath, e)
+ return True
+ mutter("FTP has not: %s: %s", abspath, e)
+ return False
+
+ def get(self, relpath, retries=0):
+ """Get the file at the given relative path.
+
+ :param relpath: The relative path to the file
+ :param retries: Number of retries after temporary failures so far
+ for this operation.
+
+ We're meant to return a file-like object which bzr will
+ then read from. For now we do this via the magic of StringIO
+ """
+ try:
+ mutter("FTP get: %s", self._remote_path(relpath))
+ f = self._get_FTP()
+ ret = StringIO()
+ f.retrbinary('RETR '+self._remote_path(relpath), ret.write, 8192)
+ ret.seek(0)
+ return ret
+ except ftplib.error_perm, e:
+ raise errors.NoSuchFile(self.abspath(relpath), extra=str(e))
+ except ftplib.error_temp, e:
+ if retries > _number_of_retries:
+ raise errors.TransportError(msg="FTP temporary error during GET %s. Aborting."
+ % self.abspath(relpath),
+ orig_error=e)
+ else:
+ warning("FTP temporary error: %s. Retrying.", str(e))
+ self._reconnect()
+ return self.get(relpath, retries+1)
+ except EOFError, e:
+ if retries > _number_of_retries:
+ raise errors.TransportError("FTP control connection closed during GET %s."
+ % self.abspath(relpath),
+ orig_error=e)
+ else:
+ warning("FTP control connection closed. Trying to reopen.")
+ time.sleep(_sleep_between_retries)
+ self._reconnect()
+ return self.get(relpath, retries+1)
+
+ def put_file(self, relpath, fp, mode=None, retries=0):
+ """Copy the file-like or string object into the location.
+
+ :param relpath: Location to put the contents, relative to base.
+ :param fp: File-like or string object.
+ :param retries: Number of retries after temporary failures so far
+ for this operation.
+
+ TODO: jam 20051215 ftp as a protocol seems to support chmod, but
+ ftplib does not
+ """
+ abspath = self._remote_path(relpath)
+ tmp_abspath = '%s.tmp.%.9f.%d.%d' % (abspath, time.time(),
+ os.getpid(), random.randint(0,0x7FFFFFFF))
+ bytes = None
+ if getattr(fp, 'read', None) is None:
+ # hand in a string IO
+ bytes = fp
+ fp = StringIO(bytes)
+ else:
+ # capture the byte count; .read() may be read only so
+ # decorate it.
+ class byte_counter(object):
+ def __init__(self, fp):
+ self.fp = fp
+ self.counted_bytes = 0
+ def read(self, count):
+ result = self.fp.read(count)
+ self.counted_bytes += len(result)
+ return result
+ fp = byte_counter(fp)
+ try:
+ mutter("FTP put: %s", abspath)
+ f = self._get_FTP()
+ try:
+ f.storbinary('STOR '+tmp_abspath, fp)
+ self._rename_and_overwrite(tmp_abspath, abspath, f)
+ self._setmode(relpath, mode)
+ if bytes is not None:
+ return len(bytes)
+ else:
+ return fp.counted_bytes
+ except (ftplib.error_temp, EOFError), e:
+ warning("Failure during ftp PUT of %s: %s. Deleting temporary file."
+ % (tmp_abspath, e, ))
+ try:
+ f.delete(tmp_abspath)
+ except:
+ warning("Failed to delete temporary file on the"
+ " server.\nFile: %s", tmp_abspath)
+ raise e
+ raise
+ except ftplib.error_perm, e:
+ self._translate_ftp_error(e, abspath, extra='could not store',
+ unknown_exc=errors.NoSuchFile)
+ except ftplib.error_temp, e:
+ if retries > _number_of_retries:
+ raise errors.TransportError(
+ "FTP temporary error during PUT %s: %s. Aborting."
+ % (self.abspath(relpath), e), orig_error=e)
+ else:
+ warning("FTP temporary error: %s. Retrying.", str(e))
+ self._reconnect()
+ self.put_file(relpath, fp, mode, retries+1)
+ except EOFError:
+ if retries > _number_of_retries:
+ raise errors.TransportError("FTP control connection closed during PUT %s."
+ % self.abspath(relpath), orig_error=e)
+ else:
+ warning("FTP control connection closed. Trying to reopen.")
+ time.sleep(_sleep_between_retries)
+ self._reconnect()
+ self.put_file(relpath, fp, mode, retries+1)
+
+ def mkdir(self, relpath, mode=None):
+ """Create a directory at the given path."""
+ abspath = self._remote_path(relpath)
+ try:
+ mutter("FTP mkd: %s", abspath)
+ f = self._get_FTP()
+ try:
+ f.mkd(abspath)
+ except ftplib.error_reply, e:
+ # <https://bugs.launchpad.net/bzr/+bug/224373> Microsoft FTP
+ # server returns "250 Directory created." which is kind of
+ # reasonable, 250 meaning "requested file action OK", but not what
+ # Python's ftplib expects.
+ if e[0][:3] == '250':
+ pass
+ else:
+ raise
+ self._setmode(relpath, mode)
+ except ftplib.error_perm, e:
+ self._translate_ftp_error(e, abspath,
+ unknown_exc=errors.FileExists)
+
+ def open_write_stream(self, relpath, mode=None):
+ """See Transport.open_write_stream."""
+ self.put_bytes(relpath, "", mode)
+ result = AppendBasedFileStream(self, relpath)
+ _file_streams[self.abspath(relpath)] = result
+ return result
+
+ def recommended_page_size(self):
+ """See Transport.recommended_page_size().
+
+ For FTP we suggest a large page size to reduce the overhead
+ introduced by latency.
+ """
+ return 64 * 1024
+
+ def rmdir(self, rel_path):
+ """Delete the directory at rel_path"""
+ abspath = self._remote_path(rel_path)
+ try:
+ mutter("FTP rmd: %s", abspath)
+ f = self._get_FTP()
+ f.rmd(abspath)
+ except ftplib.error_perm, e:
+ self._translate_ftp_error(e, abspath, unknown_exc=errors.PathError)
+
+ def append_file(self, relpath, f, mode=None):
+ """Append the text in the file-like object into the final
+ location.
+ """
+ text = f.read()
+ abspath = self._remote_path(relpath)
+ if self.has(relpath):
+ ftp = self._get_FTP()
+ result = ftp.size(abspath)
+ else:
+ result = 0
+
+ if self._has_append:
+ mutter("FTP appe to %s", abspath)
+ self._try_append(relpath, text, mode)
+ else:
+ self._fallback_append(relpath, text, mode)
+
+ return result
+
+ def _try_append(self, relpath, text, mode=None, retries=0):
+ """Try repeatedly to append the given text to the file at relpath.
+
+ This is a recursive function. On errors, it will be called until the
+ number of retries is exceeded.
+ """
+ try:
+ abspath = self._remote_path(relpath)
+ mutter("FTP appe (try %d) to %s", retries, abspath)
+ ftp = self._get_FTP()
+ cmd = "APPE %s" % abspath
+ conn = ftp.transfercmd(cmd)
+ conn.sendall(text)
+ conn.close()
+ self._setmode(relpath, mode)
+ ftp.getresp()
+ except ftplib.error_perm, e:
+ # Check whether the command is not supported (reply code 502)
+ if str(e).startswith('502 '):
+ warning("FTP server does not support file appending natively. "
+ "Performance may be severely degraded! (%s)", e)
+ self._has_append = False
+ self._fallback_append(relpath, text, mode)
+ else:
+ self._translate_ftp_error(e, abspath, extra='error appending',
+ unknown_exc=errors.NoSuchFile)
+ except ftplib.error_temp, e:
+ if retries > _number_of_retries:
+ raise errors.TransportError(
+ "FTP temporary error during APPEND %s. Aborting."
+ % abspath, orig_error=e)
+ else:
+ warning("FTP temporary error: %s. Retrying.", str(e))
+ self._reconnect()
+ self._try_append(relpath, text, mode, retries+1)
+
+ def _fallback_append(self, relpath, text, mode = None):
+ remote = self.get(relpath)
+ remote.seek(0, os.SEEK_END)
+ remote.write(text)
+ remote.seek(0)
+ return self.put_file(relpath, remote, mode)
+
+ def _setmode(self, relpath, mode):
+ """Set permissions on a path.
+
+ Only set permissions if the FTP server supports the 'SITE CHMOD'
+ extension.
+ """
+ if mode:
+ try:
+ mutter("FTP site chmod: setting permissions to %s on %s",
+ oct(mode), self._remote_path(relpath))
+ ftp = self._get_FTP()
+ cmd = "SITE CHMOD %s %s" % (oct(mode),
+ self._remote_path(relpath))
+ ftp.sendcmd(cmd)
+ except ftplib.error_perm, e:
+ # Command probably not available on this server
+ warning("FTP Could not set permissions to %s on %s. %s",
+ oct(mode), self._remote_path(relpath), str(e))
+
+ # TODO: jam 20060516 I believe ftp allows you to tell an ftp server
+ # to copy something to another machine. And you may be able
+ # to give it its own address as the 'to' location.
+ # So implement a fancier 'copy()'
+
+ def rename(self, rel_from, rel_to):
+ abs_from = self._remote_path(rel_from)
+ abs_to = self._remote_path(rel_to)
+ mutter("FTP rename: %s => %s", abs_from, abs_to)
+ f = self._get_FTP()
+ return self._rename(abs_from, abs_to, f)
+
+ def _rename(self, abs_from, abs_to, f):
+ try:
+ f.rename(abs_from, abs_to)
+ except (ftplib.error_temp, ftplib.error_perm), e:
+ self._translate_ftp_error(e, abs_from,
+ ': unable to rename to %r' % (abs_to))
+
+ def move(self, rel_from, rel_to):
+ """Move the item at rel_from to the location at rel_to"""
+ abs_from = self._remote_path(rel_from)
+ abs_to = self._remote_path(rel_to)
+ try:
+ mutter("FTP mv: %s => %s", abs_from, abs_to)
+ f = self._get_FTP()
+ self._rename_and_overwrite(abs_from, abs_to, f)
+ except ftplib.error_perm, e:
+ self._translate_ftp_error(e, abs_from,
+ extra='unable to rename to %r' % (rel_to,),
+ unknown_exc=errors.PathError)
+
+ def _rename_and_overwrite(self, abs_from, abs_to, f):
+ """Do a fancy rename on the remote server.
+
+ Using the implementation provided by osutils.
+ """
+ osutils.fancy_rename(abs_from, abs_to,
+ rename_func=lambda p1, p2: self._rename(p1, p2, f),
+ unlink_func=lambda p: self._delete(p, f))
+
+ def delete(self, relpath):
+ """Delete the item at relpath"""
+ abspath = self._remote_path(relpath)
+ f = self._get_FTP()
+ self._delete(abspath, f)
+
+ def _delete(self, abspath, f):
+ try:
+ mutter("FTP rm: %s", abspath)
+ f.delete(abspath)
+ except ftplib.error_perm, e:
+ self._translate_ftp_error(e, abspath, 'error deleting',
+ unknown_exc=errors.NoSuchFile)
+
+ def external_url(self):
+ """See bzrlib.transport.Transport.external_url."""
+ # FTP URL's are externally usable.
+ return self.base
+
+ def listable(self):
+ """See Transport.listable."""
+ return True
+
+ def list_dir(self, relpath):
+ """See Transport.list_dir."""
+ basepath = self._remote_path(relpath)
+ mutter("FTP nlst: %s", basepath)
+ f = self._get_FTP()
+ try:
+ try:
+ paths = f.nlst(basepath)
+ except ftplib.error_perm, e:
+ self._translate_ftp_error(e, relpath,
+ extra='error with list_dir')
+ except ftplib.error_temp, e:
+ # xs4all's ftp server raises a 450 temp error when listing an
+ # empty directory. Check for that and just return an empty list
+ # in that case. See bug #215522
+ if str(e).lower().startswith('450 no files found'):
+ mutter('FTP Server returned "%s" for nlst.'
+ ' Assuming it means empty directory',
+ str(e))
+ return []
+ raise
+ finally:
+ # Restore binary mode as nlst switch to ascii mode to retrieve file
+ # list
+ f.voidcmd('TYPE I')
+
+ # If FTP.nlst returns paths prefixed by relpath, strip 'em
+ if paths and paths[0].startswith(basepath):
+ entries = [path[len(basepath)+1:] for path in paths]
+ else:
+ entries = paths
+ # Remove . and .. if present
+ return [urlutils.escape(entry) for entry in entries
+ if entry not in ('.', '..')]
+
+ def iter_files_recursive(self):
+ """See Transport.iter_files_recursive.
+
+ This is cargo-culted from the SFTP transport"""
+ mutter("FTP iter_files_recursive")
+ queue = list(self.list_dir("."))
+ while queue:
+ relpath = queue.pop(0)
+ st = self.stat(relpath)
+ if stat.S_ISDIR(st.st_mode):
+ for i, basename in enumerate(self.list_dir(relpath)):
+ queue.insert(i, relpath+"/"+basename)
+ else:
+ yield relpath
+
+ def stat(self, relpath):
+ """Return the stat information for a file."""
+ abspath = self._remote_path(relpath)
+ try:
+ mutter("FTP stat: %s", abspath)
+ f = self._get_FTP()
+ return FtpStatResult(f, abspath)
+ except ftplib.error_perm, e:
+ self._translate_ftp_error(e, abspath, extra='error w/ stat')
+
+ def lock_read(self, relpath):
+ """Lock the given file for shared (read) access.
+ :return: A lock object, which should be passed to Transport.unlock()
+ """
+ # The old RemoteBranch ignore lock for reading, so we will
+ # continue that tradition and return a bogus lock object.
+ class BogusLock(object):
+ def __init__(self, path):
+ self.path = path
+ def unlock(self):
+ pass
+ return BogusLock(relpath)
+
+ def lock_write(self, relpath):
+ """Lock the given file for exclusive (write) access.
+ WARNING: many transports do not support this, so trying avoid using it
+
+ :return: A lock object, which should be passed to Transport.unlock()
+ """
+ return self.lock_read(relpath)
+
+
+def get_test_permutations():
+ """Return the permutations to be used in testing."""
+ from bzrlib.tests import ftp_server
+ return [(FtpTransport, ftp_server.FTPTestServer)]
diff --git a/bzrlib/transport/ftp/_gssapi.py b/bzrlib/transport/ftp/_gssapi.py
new file mode 100644
index 0000000..7af7508
--- /dev/null
+++ b/bzrlib/transport/ftp/_gssapi.py
@@ -0,0 +1,128 @@
+# Copyright (C) 2008, 2009, 2010 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""Support for secure authentication using GSSAPI over FTP.
+
+See RFC2228 for details.
+"""
+
+from __future__ import absolute_import
+
+import base64, ftplib
+
+from bzrlib import (
+ errors,
+ )
+from bzrlib.i18n import gettext
+from bzrlib.trace import (
+ mutter,
+ note,
+ )
+from bzrlib.transport.ftp import FtpTransport
+
+try:
+ import kerberos
+except ImportError, e:
+ mutter('failed to import kerberos lib: %s', e)
+ raise errors.DependencyNotPresent('kerberos', e)
+
+if getattr(kerberos, "authGSSClientWrap", None) is None:
+ raise errors.DependencyNotPresent('kerberos',
+ "missing encryption function authGSSClientWrap")
+
+
+class GSSAPIFtp(ftplib.FTP):
+ """Extended version of ftplib.FTP that can authenticate using GSSAPI."""
+
+ def mic_putcmd(self, line):
+ rc = kerberos.authGSSClientWrap(self.vc, base64.b64encode(line))
+ wrapped = kerberos.authGSSClientResponse(self.vc)
+ ftplib.FTP.putcmd(self, "MIC " + wrapped)
+
+ def mic_getline(self):
+ resp = ftplib.FTP.getline(self)
+ if resp[:4] != '631 ':
+ raise AssertionError
+ rc = kerberos.authGSSClientUnwrap(self.vc, resp[4:].strip("\r\n"))
+ response = base64.b64decode(kerberos.authGSSClientResponse(self.vc))
+ return response
+
+ def gssapi_login(self, user):
+ # Try GSSAPI login first
+
+ # Used FTP response codes:
+ # 235 [ADAT=base64data] - indicates that the security data exchange
+ # completed successfully.
+ # 334 [ADAT=base64data] - indicates that the requested security
+ # mechanism is ok, and includes security data to be used by the
+ # client to construct the next command.
+ # 335 [ADAT=base64data] - indicates that the security data is
+ # acceptable, and more is required to complete the security
+ # data exchange.
+
+ resp = self.sendcmd('AUTH GSSAPI')
+ if resp.startswith('334 '):
+ rc, self.vc = kerberos.authGSSClientInit("ftp@%s" % self.host)
+ if kerberos.authGSSClientStep(self.vc, "") != 1:
+ while resp[:4] in ('334 ', '335 '):
+ authdata = kerberos.authGSSClientResponse(self.vc)
+ resp = self.sendcmd('ADAT ' + authdata)
+ if resp[:9] in ('235 ADAT=', '335 ADAT='):
+ rc = kerberos.authGSSClientStep(self.vc, resp[9:])
+ if not ((resp.startswith('235 ') and rc == 1) or
+ (resp.startswith('335 ') and rc == 0)):
+ raise ftplib.error_reply, resp
+ note(gettext("Authenticated as %s") %
+ kerberos.authGSSClientUserName(self.vc))
+
+ # Monkey patch ftplib
+ self.putcmd = self.mic_putcmd
+ self.getline = self.mic_getline
+ self.sendcmd('USER ' + user)
+ return resp
+ mutter("Unable to use GSSAPI authentication: %s", resp)
+
+
+class GSSAPIFtpTransport(FtpTransport):
+ """FTP transport implementation that will try to use GSSAPI authentication.
+
+ """
+
+ connection_class = GSSAPIFtp
+
+ def _login(self, connection, auth, user, password):
+ """Login with GSSAPI Authentication.
+
+ The password is used if GSSAPI Authentication is not available.
+
+ The username and password can both be None, in which case the
+ credentials specified in the URL or provided by the
+ AuthenticationConfig() are used.
+ """
+ try:
+ connection.gssapi_login(user=user)
+ except ftplib.error_perm, e:
+ super(GSSAPIFtpTransport, self)._login(connection, auth,
+ user, password)
+
+
+def get_test_permutations():
+ """Return the permutations to be used in testing."""
+ from bzrlib.tests import ftp_server
+ if ftp_server.FTPServerFeature.available():
+ return [(GSSAPIFtpTransport, ftp_server.FTPTestServer)]
+ else:
+ return []
diff --git a/bzrlib/transport/gio_transport.py b/bzrlib/transport/gio_transport.py
new file mode 100644
index 0000000..d3c88e2
--- /dev/null
+++ b/bzrlib/transport/gio_transport.py
@@ -0,0 +1,593 @@
+# Copyright (C) 2010 Canonical Ltd.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+#
+# Author: Mattias Eriksson
+
+"""Implementation of Transport over gio.
+
+Written by Mattias Eriksson <snaggen@acc.umu.se> based on the ftp transport.
+
+It provides the gio+XXX:// protocols where XXX is any of the protocols
+supported by gio.
+"""
+
+from __future__ import absolute_import
+
+from cStringIO import StringIO
+import os
+import random
+import stat
+import time
+import urlparse
+
+from bzrlib import (
+ config,
+ errors,
+ osutils,
+ urlutils,
+ debug,
+ ui,
+ )
+from bzrlib.symbol_versioning import (
+ DEPRECATED_PARAMETER,
+ deprecated_in,
+ deprecated_passed,
+ warn,
+ )
+from bzrlib.trace import mutter
+from bzrlib.transport import (
+ FileStream,
+ ConnectedTransport,
+ _file_streams,
+ )
+
+from bzrlib.tests.test_server import TestServer
+
+try:
+ import glib
+except ImportError, e:
+ raise errors.DependencyNotPresent('glib', e)
+try:
+ import gio
+except ImportError, e:
+ raise errors.DependencyNotPresent('gio', e)
+
+
+class GioLocalURLServer(TestServer):
+ """A pretend server for local transports, using file:// urls.
+
+ Of course no actual server is required to access the local filesystem, so
+ this just exists to tell the test code how to get to it.
+ """
+
+ def start_server(self):
+ pass
+
+ def get_url(self):
+ """See Transport.Server.get_url."""
+ return "gio+" + urlutils.local_path_to_url('')
+
+
+class GioFileStream(FileStream):
+ """A file stream object returned by open_write_stream.
+
+ This version uses GIO to perform writes.
+ """
+
+ def __init__(self, transport, relpath):
+ FileStream.__init__(self, transport, relpath)
+ self.gio_file = transport._get_GIO(relpath)
+ self.stream = self.gio_file.create()
+
+ def _close(self):
+ self.stream.close()
+
+ def write(self, bytes):
+ try:
+ #Using pump_string_file seems to make things crash
+ osutils.pumpfile(StringIO(bytes), self.stream)
+ except gio.Error, e:
+ #self.transport._translate_gio_error(e,self.relpath)
+ raise errors.BzrError(str(e))
+
+
+class GioStatResult(object):
+
+ def __init__(self, f):
+ info = f.query_info('standard::size,standard::type')
+ self.st_size = info.get_size()
+ type = info.get_file_type()
+ if (type == gio.FILE_TYPE_REGULAR):
+ self.st_mode = stat.S_IFREG
+ elif type == gio.FILE_TYPE_DIRECTORY:
+ self.st_mode = stat.S_IFDIR
+
+
+class GioTransport(ConnectedTransport):
+ """This is the transport agent for gio+XXX:// access."""
+
+ def __init__(self, base, _from_transport=None):
+ """Initialize the GIO transport and make sure the url is correct."""
+
+ if not base.startswith('gio+'):
+ raise ValueError(base)
+
+ (scheme, netloc, path, params, query, fragment) = \
+ urlparse.urlparse(base[len('gio+'):], allow_fragments=False)
+ if '@' in netloc:
+ user, netloc = netloc.rsplit('@', 1)
+ #Seems it is not possible to list supported backends for GIO
+ #so a hardcoded list it is then.
+ gio_backends = ['dav', 'file', 'ftp', 'obex', 'sftp', 'ssh', 'smb']
+ if scheme not in gio_backends:
+ raise errors.InvalidURL(base,
+ extra="GIO support is only available for " + \
+ ', '.join(gio_backends))
+
+ #Remove the username and password from the url we send to GIO
+ #by rebuilding the url again.
+ u = (scheme, netloc, path, '', '', '')
+ self.url = urlparse.urlunparse(u)
+
+ # And finally initialize super
+ super(GioTransport, self).__init__(base,
+ _from_transport=_from_transport)
+
+ def _relpath_to_url(self, relpath):
+ full_url = urlutils.join(self.url, relpath)
+ if isinstance(full_url, unicode):
+ raise errors.InvalidURL(full_url)
+ return full_url
+
+ def _get_GIO(self, relpath):
+ """Return the ftplib.GIO instance for this object."""
+ # Ensures that a connection is established
+ connection = self._get_connection()
+ if connection is None:
+ # First connection ever
+ connection, credentials = self._create_connection()
+ self._set_connection(connection, credentials)
+ fileurl = self._relpath_to_url(relpath)
+ file = gio.File(fileurl)
+ return file
+
+ def _auth_cb(self, op, message, default_user, default_domain, flags):
+ #really use bzrlib.auth get_password for this
+ #or possibly better gnome-keyring?
+ auth = config.AuthenticationConfig()
+ parsed_url = urlutils.URL.from_string(self.url)
+ user = None
+ if (flags & gio.ASK_PASSWORD_NEED_USERNAME and
+ flags & gio.ASK_PASSWORD_NEED_DOMAIN):
+ prompt = (u'%s' % (parsed_url.scheme.upper(),) +
+ u' %(host)s DOMAIN\\username')
+ user_and_domain = auth.get_user(parsed_url.scheme,
+ parsed_url.host, port=parsed_url.port, ask=True,
+ prompt=prompt)
+ (domain, user) = user_and_domain.split('\\', 1)
+ op.set_username(user)
+ op.set_domain(domain)
+ elif flags & gio.ASK_PASSWORD_NEED_USERNAME:
+ user = auth.get_user(parsed_url.scheme, parsed_url.host,
+ port=parsed_url.port, ask=True)
+ op.set_username(user)
+ elif flags & gio.ASK_PASSWORD_NEED_DOMAIN:
+ #Don't know how common this case is, but anyway
+ #a DOMAIN and a username prompt should be the
+ #same so I will missuse the ui_factory get_username
+ #a little bit here.
+ prompt = (u'%s' % (parsed_url.scheme.upper(),) +
+ u' %(host)s DOMAIN')
+ domain = ui.ui_factory.get_username(prompt=prompt)
+ op.set_domain(domain)
+
+ if flags & gio.ASK_PASSWORD_NEED_PASSWORD:
+ if user is None:
+ user = op.get_username()
+ password = auth.get_password(parsed_url.scheme, parsed_url.host,
+ user, port=parsed_url.port)
+ op.set_password(password)
+ op.reply(gio.MOUNT_OPERATION_HANDLED)
+
+ def _mount_done_cb(self, obj, res):
+ try:
+ obj.mount_enclosing_volume_finish(res)
+ self.loop.quit()
+ except gio.Error, e:
+ self.loop.quit()
+ raise errors.BzrError("Failed to mount the given location: " + str(e));
+
+ def _create_connection(self, credentials=None):
+ if credentials is None:
+ user, password = self._parsed_url.user, self._parsed_url.password
+ else:
+ user, password = credentials
+
+ try:
+ connection = gio.File(self.url)
+ mount = None
+ try:
+ mount = connection.find_enclosing_mount()
+ except gio.Error, e:
+ if (e.code == gio.ERROR_NOT_MOUNTED):
+ self.loop = glib.MainLoop()
+ ui.ui_factory.show_message('Mounting %s using GIO' % \
+ self.url)
+ op = gio.MountOperation()
+ if user:
+ op.set_username(user)
+ if password:
+ op.set_password(password)
+ op.connect('ask-password', self._auth_cb)
+ m = connection.mount_enclosing_volume(op,
+ self._mount_done_cb)
+ self.loop.run()
+ except gio.Error, e:
+ raise errors.TransportError(msg="Error setting up connection:"
+ " %s" % str(e), orig_error=e)
+ return connection, (user, password)
+
+ def disconnect(self):
+ # FIXME: Nothing seems to be necessary here, which sounds a bit strange
+ # -- vila 20100601
+ pass
+
+ def _reconnect(self):
+ # FIXME: This doesn't seem to be used -- vila 20100601
+ """Create a new connection with the previously used credentials"""
+ credentials = self._get_credentials()
+ connection, credentials = self._create_connection(credentials)
+ self._set_connection(connection, credentials)
+
+ def _remote_path(self, relpath):
+ return self._parsed_url.clone(relpath).path
+
+ def has(self, relpath):
+ """Does the target location exist?"""
+ try:
+ if 'gio' in debug.debug_flags:
+ mutter('GIO has check: %s' % relpath)
+ f = self._get_GIO(relpath)
+ st = GioStatResult(f)
+ if stat.S_ISREG(st.st_mode) or stat.S_ISDIR(st.st_mode):
+ return True
+ return False
+ except gio.Error, e:
+ if e.code == gio.ERROR_NOT_FOUND:
+ return False
+ else:
+ self._translate_gio_error(e, relpath)
+
+ def get(self, relpath, retries=0):
+ """Get the file at the given relative path.
+
+ :param relpath: The relative path to the file
+ :param retries: Number of retries after temporary failures so far
+ for this operation.
+
+ We're meant to return a file-like object which bzr will
+ then read from. For now we do this via the magic of StringIO
+ """
+ try:
+ if 'gio' in debug.debug_flags:
+ mutter("GIO get: %s" % relpath)
+ f = self._get_GIO(relpath)
+ fin = f.read()
+ buf = fin.read()
+ fin.close()
+ ret = StringIO(buf)
+ return ret
+ except gio.Error, e:
+ #If we get a not mounted here it might mean
+ #that a bad path has been entered (or that mount failed)
+ if (e.code == gio.ERROR_NOT_MOUNTED):
+ raise errors.PathError(relpath,
+ extra='Failed to get file, make sure the path is correct. ' \
+ + str(e))
+ else:
+ self._translate_gio_error(e, relpath)
+
+ def put_file(self, relpath, fp, mode=None):
+ """Copy the file-like object into the location.
+
+ :param relpath: Location to put the contents, relative to base.
+ :param fp: File-like or string object.
+ """
+ if 'gio' in debug.debug_flags:
+ mutter("GIO put_file %s" % relpath)
+ tmppath = '%s.tmp.%.9f.%d.%d' % (relpath, time.time(),
+ os.getpid(), random.randint(0, 0x7FFFFFFF))
+ f = None
+ fout = None
+ try:
+ closed = True
+ try:
+ f = self._get_GIO(tmppath)
+ fout = f.create()
+ closed = False
+ length = self._pump(fp, fout)
+ fout.close()
+ closed = True
+ self.stat(tmppath)
+ dest = self._get_GIO(relpath)
+ f.move(dest, flags=gio.FILE_COPY_OVERWRITE)
+ f = None
+ if mode is not None:
+ self._setmode(relpath, mode)
+ return length
+ except gio.Error, e:
+ self._translate_gio_error(e, relpath)
+ finally:
+ if not closed and fout is not None:
+ fout.close()
+ if f is not None and f.query_exists():
+ f.delete()
+
+ def mkdir(self, relpath, mode=None):
+ """Create a directory at the given path."""
+ try:
+ if 'gio' in debug.debug_flags:
+ mutter("GIO mkdir: %s" % relpath)
+ f = self._get_GIO(relpath)
+ f.make_directory()
+ self._setmode(relpath, mode)
+ except gio.Error, e:
+ self._translate_gio_error(e, relpath)
+
+ def open_write_stream(self, relpath, mode=None):
+ """See Transport.open_write_stream."""
+ if 'gio' in debug.debug_flags:
+ mutter("GIO open_write_stream %s" % relpath)
+ if mode is not None:
+ self._setmode(relpath, mode)
+ result = GioFileStream(self, relpath)
+ _file_streams[self.abspath(relpath)] = result
+ return result
+
+ def recommended_page_size(self):
+ """See Transport.recommended_page_size().
+
+ For FTP we suggest a large page size to reduce the overhead
+ introduced by latency.
+ """
+ if 'gio' in debug.debug_flags:
+ mutter("GIO recommended_page")
+ return 64 * 1024
+
+ def rmdir(self, relpath):
+ """Delete the directory at rel_path"""
+ try:
+ if 'gio' in debug.debug_flags:
+ mutter("GIO rmdir %s" % relpath)
+ st = self.stat(relpath)
+ if stat.S_ISDIR(st.st_mode):
+ f = self._get_GIO(relpath)
+ f.delete()
+ else:
+ raise errors.NotADirectory(relpath)
+ except gio.Error, e:
+ self._translate_gio_error(e, relpath)
+ except errors.NotADirectory, e:
+ #just pass it forward
+ raise e
+ except Exception, e:
+ mutter('failed to rmdir %s: %s' % (relpath, e))
+ raise errors.PathError(relpath)
+
+ def append_file(self, relpath, file, mode=None):
+ """Append the text in the file-like object into the final
+ location.
+ """
+ #GIO append_to seems not to append but to truncate
+ #Work around this.
+ if 'gio' in debug.debug_flags:
+ mutter("GIO append_file: %s" % relpath)
+ tmppath = '%s.tmp.%.9f.%d.%d' % (relpath, time.time(),
+ os.getpid(), random.randint(0, 0x7FFFFFFF))
+ try:
+ result = 0
+ fo = self._get_GIO(tmppath)
+ fi = self._get_GIO(relpath)
+ fout = fo.create()
+ try:
+ info = GioStatResult(fi)
+ result = info.st_size
+ fin = fi.read()
+ self._pump(fin, fout)
+ fin.close()
+ #This separate except is to catch and ignore the
+ #gio.ERROR_NOT_FOUND for the already existing file.
+ #It is valid to open a non-existing file for append.
+ #This is caused by the broken gio append_to...
+ except gio.Error, e:
+ if e.code != gio.ERROR_NOT_FOUND:
+ self._translate_gio_error(e, relpath)
+ length = self._pump(file, fout)
+ fout.close()
+ info = GioStatResult(fo)
+ if info.st_size != result + length:
+ raise errors.BzrError("Failed to append size after " \
+ "(%d) is not original (%d) + written (%d) total (%d)" % \
+ (info.st_size, result, length, result + length))
+ fo.move(fi, flags=gio.FILE_COPY_OVERWRITE)
+ return result
+ except gio.Error, e:
+ self._translate_gio_error(e, relpath)
+
+ def _setmode(self, relpath, mode):
+ """Set permissions on a path.
+
+ Only set permissions on Unix systems
+ """
+ if 'gio' in debug.debug_flags:
+ mutter("GIO _setmode %s" % relpath)
+ if mode:
+ try:
+ f = self._get_GIO(relpath)
+ f.set_attribute_uint32(gio.FILE_ATTRIBUTE_UNIX_MODE, mode)
+ except gio.Error, e:
+ if e.code == gio.ERROR_NOT_SUPPORTED:
+ # Command probably not available on this server
+ mutter("GIO Could not set permissions to %s on %s. %s",
+ oct(mode), self._remote_path(relpath), str(e))
+ else:
+ self._translate_gio_error(e, relpath)
+
+ def rename(self, rel_from, rel_to):
+ """Rename without special overwriting"""
+ try:
+ if 'gio' in debug.debug_flags:
+ mutter("GIO move (rename): %s => %s", rel_from, rel_to)
+ f = self._get_GIO(rel_from)
+ t = self._get_GIO(rel_to)
+ f.move(t)
+ except gio.Error, e:
+ self._translate_gio_error(e, rel_from)
+
+ def move(self, rel_from, rel_to):
+ """Move the item at rel_from to the location at rel_to"""
+ try:
+ if 'gio' in debug.debug_flags:
+ mutter("GIO move: %s => %s", rel_from, rel_to)
+ f = self._get_GIO(rel_from)
+ t = self._get_GIO(rel_to)
+ f.move(t, flags=gio.FILE_COPY_OVERWRITE)
+ except gio.Error, e:
+ self._translate_gio_error(e, relfrom)
+
+ def delete(self, relpath):
+ """Delete the item at relpath"""
+ try:
+ if 'gio' in debug.debug_flags:
+ mutter("GIO delete: %s", relpath)
+ f = self._get_GIO(relpath)
+ f.delete()
+ except gio.Error, e:
+ self._translate_gio_error(e, relpath)
+
+ def external_url(self):
+ """See bzrlib.transport.Transport.external_url."""
+ if 'gio' in debug.debug_flags:
+ mutter("GIO external_url", self.base)
+ # GIO external url
+ return self.base
+
+ def listable(self):
+ """See Transport.listable."""
+ if 'gio' in debug.debug_flags:
+ mutter("GIO listable")
+ return True
+
+ def list_dir(self, relpath):
+ """See Transport.list_dir."""
+ if 'gio' in debug.debug_flags:
+ mutter("GIO list_dir")
+ try:
+ entries = []
+ f = self._get_GIO(relpath)
+ children = f.enumerate_children(gio.FILE_ATTRIBUTE_STANDARD_NAME)
+ for child in children:
+ entries.append(urlutils.escape(child.get_name()))
+ return entries
+ except gio.Error, e:
+ self._translate_gio_error(e, relpath)
+
+ def iter_files_recursive(self):
+ """See Transport.iter_files_recursive.
+
+ This is cargo-culted from the SFTP transport"""
+ if 'gio' in debug.debug_flags:
+ mutter("GIO iter_files_recursive")
+ queue = list(self.list_dir("."))
+ while queue:
+ relpath = queue.pop(0)
+ st = self.stat(relpath)
+ if stat.S_ISDIR(st.st_mode):
+ for i, basename in enumerate(self.list_dir(relpath)):
+ queue.insert(i, relpath + "/" + basename)
+ else:
+ yield relpath
+
+ def stat(self, relpath):
+ """Return the stat information for a file."""
+ try:
+ if 'gio' in debug.debug_flags:
+ mutter("GIO stat: %s", relpath)
+ f = self._get_GIO(relpath)
+ return GioStatResult(f)
+ except gio.Error, e:
+ self._translate_gio_error(e, relpath, extra='error w/ stat')
+
+ def lock_read(self, relpath):
+ """Lock the given file for shared (read) access.
+ :return: A lock object, which should be passed to Transport.unlock()
+ """
+ if 'gio' in debug.debug_flags:
+ mutter("GIO lock_read", relpath)
+
+ class BogusLock(object):
+ # The old RemoteBranch ignore lock for reading, so we will
+ # continue that tradition and return a bogus lock object.
+
+ def __init__(self, path):
+ self.path = path
+
+ def unlock(self):
+ pass
+
+ return BogusLock(relpath)
+
+ def lock_write(self, relpath):
+ """Lock the given file for exclusive (write) access.
+ WARNING: many transports do not support this, so trying avoid using it
+
+ :return: A lock object, whichshould be passed to Transport.unlock()
+ """
+ if 'gio' in debug.debug_flags:
+ mutter("GIO lock_write", relpath)
+ return self.lock_read(relpath)
+
+ def _translate_gio_error(self, err, path, extra=None):
+ if 'gio' in debug.debug_flags:
+ mutter("GIO Error: %s %s" % (str(err), path))
+ if extra is None:
+ extra = str(err)
+ if err.code == gio.ERROR_NOT_FOUND:
+ raise errors.NoSuchFile(path, extra=extra)
+ elif err.code == gio.ERROR_EXISTS:
+ raise errors.FileExists(path, extra=extra)
+ elif err.code == gio.ERROR_NOT_DIRECTORY:
+ raise errors.NotADirectory(path, extra=extra)
+ elif err.code == gio.ERROR_NOT_EMPTY:
+ raise errors.DirectoryNotEmpty(path, extra=extra)
+ elif err.code == gio.ERROR_BUSY:
+ raise errors.ResourceBusy(path, extra=extra)
+ elif err.code == gio.ERROR_PERMISSION_DENIED:
+ raise errors.PermissionDenied(path, extra=extra)
+ elif err.code == gio.ERROR_HOST_NOT_FOUND:
+ raise errors.PathError(path, extra=extra)
+ elif err.code == gio.ERROR_IS_DIRECTORY:
+ raise errors.PathError(path, extra=extra)
+ else:
+ mutter('unable to understand error for path: %s: %s', path, err)
+ raise errors.PathError(path,
+ extra="Unhandled gio error: " + str(err))
+
+
+def get_test_permutations():
+ """Return the permutations to be used in testing."""
+ from bzrlib.tests import test_server
+ return [(GioTransport, GioLocalURLServer)]
diff --git a/bzrlib/transport/http/__init__.py b/bzrlib/transport/http/__init__.py
new file mode 100644
index 0000000..f084534
--- /dev/null
+++ b/bzrlib/transport/http/__init__.py
@@ -0,0 +1,658 @@
+# Copyright (C) 2005-2010 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""Base implementation of Transport over http.
+
+There are separate implementation modules for each http client implementation.
+"""
+
+from __future__ import absolute_import
+
+import os
+import re
+import urlparse
+import sys
+import weakref
+
+from bzrlib import (
+ debug,
+ errors,
+ transport,
+ ui,
+ urlutils,
+ )
+from bzrlib.smart import medium
+from bzrlib.trace import mutter
+from bzrlib.transport import (
+ ConnectedTransport,
+ )
+
+# TODO: This is not used anymore by HttpTransport_urllib
+# (extracting the auth info and prompting the user for a password
+# have been split), only the tests still use it. It should be
+# deleted and the tests rewritten ASAP to stay in sync.
+def extract_auth(url, password_manager):
+ """Extract auth parameters from am HTTP/HTTPS url and add them to the given
+ password manager. Return the url, minus those auth parameters (which
+ confuse urllib2).
+ """
+ if not re.match(r'^(https?)(\+\w+)?://', url):
+ raise ValueError(
+ 'invalid absolute url %r' % (url,))
+ scheme, netloc, path, query, fragment = urlparse.urlsplit(url)
+
+ if '@' in netloc:
+ auth, netloc = netloc.split('@', 1)
+ if ':' in auth:
+ username, password = auth.split(':', 1)
+ else:
+ username, password = auth, None
+ if ':' in netloc:
+ host = netloc.split(':', 1)[0]
+ else:
+ host = netloc
+ username = urlutils.unquote(username)
+ if password is not None:
+ password = urlutils.unquote(password)
+ else:
+ password = ui.ui_factory.get_password(
+ prompt=u'HTTP %(user)s@%(host)s password',
+ user=username, host=host)
+ password_manager.add_password(None, host, username, password)
+ url = urlparse.urlunsplit((scheme, netloc, path, query, fragment))
+ return url
+
+
+class HttpTransportBase(ConnectedTransport):
+ """Base class for http implementations.
+
+ Does URL parsing, etc, but not any network IO.
+
+ The protocol can be given as e.g. http+urllib://host/ to use a particular
+ implementation.
+ """
+
+ # _unqualified_scheme: "http" or "https"
+ # _scheme: may have "+pycurl", etc
+
+ def __init__(self, base, _impl_name, _from_transport=None):
+ """Set the base path where files will be stored."""
+ proto_match = re.match(r'^(https?)(\+\w+)?://', base)
+ if not proto_match:
+ raise AssertionError("not a http url: %r" % base)
+ self._unqualified_scheme = proto_match.group(1)
+ self._impl_name = _impl_name
+ super(HttpTransportBase, self).__init__(base,
+ _from_transport=_from_transport)
+ self._medium = None
+ # range hint is handled dynamically throughout the life
+ # of the transport object. We start by trying multi-range
+ # requests and if the server returns bogus results, we
+ # retry with single range requests and, finally, we
+ # forget about range if the server really can't
+ # understand. Once acquired, this piece of info is
+ # propagated to clones.
+ if _from_transport is not None:
+ self._range_hint = _from_transport._range_hint
+ else:
+ self._range_hint = 'multi'
+
+ def has(self, relpath):
+ raise NotImplementedError("has() is abstract on %r" % self)
+
+ def get(self, relpath):
+ """Get the file at the given relative path.
+
+ :param relpath: The relative path to the file
+ """
+ code, response_file = self._get(relpath, None)
+ return response_file
+
+ def _get(self, relpath, ranges, tail_amount=0):
+ """Get a file, or part of a file.
+
+ :param relpath: Path relative to transport base URL
+ :param ranges: None to get the whole file;
+ or a list of _CoalescedOffset to fetch parts of a file.
+ :param tail_amount: The amount to get from the end of the file.
+
+ :returns: (http_code, result_file)
+ """
+ raise NotImplementedError(self._get)
+
+ def _remote_path(self, relpath):
+ """See ConnectedTransport._remote_path.
+
+ user and passwords are not embedded in the path provided to the server.
+ """
+ url = self._parsed_url.clone(relpath)
+ url.user = url.quoted_user = None
+ url.password = url.quoted_password = None
+ url.scheme = self._unqualified_scheme
+ return str(url)
+
+ def _create_auth(self):
+ """Returns a dict containing the credentials provided at build time."""
+ auth = dict(host=self._parsed_url.host, port=self._parsed_url.port,
+ user=self._parsed_url.user, password=self._parsed_url.password,
+ protocol=self._unqualified_scheme,
+ path=self._parsed_url.path)
+ return auth
+
+ def get_smart_medium(self):
+ """See Transport.get_smart_medium."""
+ if self._medium is None:
+ # Since medium holds some state (smart server probing at least), we
+ # need to keep it around. Note that this is needed because medium
+ # has the same 'base' attribute as the transport so it can't be
+ # shared between transports having different bases.
+ self._medium = SmartClientHTTPMedium(self)
+ return self._medium
+
+ def _degrade_range_hint(self, relpath, ranges, exc_info):
+ if self._range_hint == 'multi':
+ self._range_hint = 'single'
+ mutter('Retry "%s" with single range request' % relpath)
+ elif self._range_hint == 'single':
+ self._range_hint = None
+ mutter('Retry "%s" without ranges' % relpath)
+ else:
+ # We tried all the tricks, but nothing worked. We re-raise the
+ # original exception; the 'mutter' calls above will indicate that
+ # further tries were unsuccessful
+ raise exc_info[0], exc_info[1], exc_info[2]
+
+ # _coalesce_offsets is a helper for readv, it try to combine ranges without
+ # degrading readv performances. _bytes_to_read_before_seek is the value
+ # used for the limit parameter and has been tuned for other transports. For
+ # HTTP, the name is inappropriate but the parameter is still useful and
+ # helps reduce the number of chunks in the response. The overhead for a
+ # chunk (headers, length, footer around the data itself is variable but
+ # around 50 bytes. We use 128 to reduce the range specifiers that appear in
+ # the header, some servers (notably Apache) enforce a maximum length for a
+ # header and issue a '400: Bad request' error when too much ranges are
+ # specified.
+ _bytes_to_read_before_seek = 128
+ # No limit on the offset number that get combined into one, we are trying
+ # to avoid downloading the whole file.
+ _max_readv_combine = 0
+ # By default Apache has a limit of ~400 ranges before replying with a 400
+ # Bad Request. So we go underneath that amount to be safe.
+ _max_get_ranges = 200
+ # We impose no limit on the range size. But see _pycurl.py for a different
+ # use.
+ _get_max_size = 0
+
+ def _readv(self, relpath, offsets):
+ """Get parts of the file at the given relative path.
+
+ :param offsets: A list of (offset, size) tuples.
+ :param return: A list or generator of (offset, data) tuples
+ """
+ # offsets may be a generator, we will iterate it several times, so
+ # build a list
+ offsets = list(offsets)
+
+ try_again = True
+ retried_offset = None
+ while try_again:
+ try_again = False
+
+ # Coalesce the offsets to minimize the GET requests issued
+ sorted_offsets = sorted(offsets)
+ coalesced = self._coalesce_offsets(
+ sorted_offsets, limit=self._max_readv_combine,
+ fudge_factor=self._bytes_to_read_before_seek,
+ max_size=self._get_max_size)
+
+ # Turn it into a list, we will iterate it several times
+ coalesced = list(coalesced)
+ if 'http' in debug.debug_flags:
+ mutter('http readv of %s offsets => %s collapsed %s',
+ relpath, len(offsets), len(coalesced))
+
+ # Cache the data read, but only until it's been used
+ data_map = {}
+ # We will iterate on the data received from the GET requests and
+ # serve the corresponding offsets respecting the initial order. We
+ # need an offset iterator for that.
+ iter_offsets = iter(offsets)
+ cur_offset_and_size = iter_offsets.next()
+
+ try:
+ for cur_coal, rfile in self._coalesce_readv(relpath, coalesced):
+ # Split the received chunk
+ for offset, size in cur_coal.ranges:
+ start = cur_coal.start + offset
+ rfile.seek(start, os.SEEK_SET)
+ data = rfile.read(size)
+ data_len = len(data)
+ if data_len != size:
+ raise errors.ShortReadvError(relpath, start, size,
+ actual=data_len)
+ if (start, size) == cur_offset_and_size:
+ # The offset requested are sorted as the coalesced
+ # ones, no need to cache. Win !
+ yield cur_offset_and_size[0], data
+ cur_offset_and_size = iter_offsets.next()
+ else:
+ # Different sorting. We need to cache.
+ data_map[(start, size)] = data
+
+ # Yield everything we can
+ while cur_offset_and_size in data_map:
+ # Clean the cached data since we use it
+ # XXX: will break if offsets contains duplicates --
+ # vila20071129
+ this_data = data_map.pop(cur_offset_and_size)
+ yield cur_offset_and_size[0], this_data
+ cur_offset_and_size = iter_offsets.next()
+
+ except (errors.ShortReadvError, errors.InvalidRange,
+ errors.InvalidHttpRange, errors.HttpBoundaryMissing), e:
+ mutter('Exception %r: %s during http._readv',e, e)
+ if (not isinstance(e, errors.ShortReadvError)
+ or retried_offset == cur_offset_and_size):
+ # We don't degrade the range hint for ShortReadvError since
+ # they do not indicate a problem with the server ability to
+ # handle ranges. Except when we fail to get back a required
+ # offset twice in a row. In that case, falling back to
+ # single range or whole file should help or end up in a
+ # fatal exception.
+ self._degrade_range_hint(relpath, coalesced, sys.exc_info())
+ # Some offsets may have been already processed, so we retry
+ # only the unsuccessful ones.
+ offsets = [cur_offset_and_size] + [o for o in iter_offsets]
+ retried_offset = cur_offset_and_size
+ try_again = True
+
+ def _coalesce_readv(self, relpath, coalesced):
+ """Issue several GET requests to satisfy the coalesced offsets"""
+
+ def get_and_yield(relpath, coalesced):
+ if coalesced:
+ # Note that the _get below may raise
+ # errors.InvalidHttpRange. It's the caller's responsibility to
+ # decide how to retry since it may provide different coalesced
+ # offsets.
+ code, rfile = self._get(relpath, coalesced)
+ for coal in coalesced:
+ yield coal, rfile
+
+ if self._range_hint is None:
+ # Download whole file
+ for c, rfile in get_and_yield(relpath, coalesced):
+ yield c, rfile
+ else:
+ total = len(coalesced)
+ if self._range_hint == 'multi':
+ max_ranges = self._max_get_ranges
+ elif self._range_hint == 'single':
+ max_ranges = total
+ else:
+ raise AssertionError("Unknown _range_hint %r"
+ % (self._range_hint,))
+ # TODO: Some web servers may ignore the range requests and return
+ # the whole file, we may want to detect that and avoid further
+ # requests.
+ # Hint: test_readv_multiple_get_requests will fail once we do that
+ cumul = 0
+ ranges = []
+ for coal in coalesced:
+ if ((self._get_max_size > 0
+ and cumul + coal.length > self._get_max_size)
+ or len(ranges) >= max_ranges):
+ # Get that much and yield
+ for c, rfile in get_and_yield(relpath, ranges):
+ yield c, rfile
+ # Restart with the current offset
+ ranges = [coal]
+ cumul = coal.length
+ else:
+ ranges.append(coal)
+ cumul += coal.length
+ # Get the rest and yield
+ for c, rfile in get_and_yield(relpath, ranges):
+ yield c, rfile
+
+ def recommended_page_size(self):
+ """See Transport.recommended_page_size().
+
+ For HTTP we suggest a large page size to reduce the overhead
+ introduced by latency.
+ """
+ return 64 * 1024
+
+ def _post(self, body_bytes):
+ """POST body_bytes to .bzr/smart on this transport.
+
+ :returns: (response code, response body file-like object).
+ """
+ # TODO: Requiring all the body_bytes to be available at the beginning of
+ # the POST may require large client buffers. It would be nice to have
+ # an interface that allows streaming via POST when possible (and
+ # degrades to a local buffer when not).
+ raise NotImplementedError(self._post)
+
+ def put_file(self, relpath, f, mode=None):
+ """Copy the file-like object into the location.
+
+ :param relpath: Location to put the contents, relative to base.
+ :param f: File-like object.
+ """
+ raise errors.TransportNotPossible('http PUT not supported')
+
+ def mkdir(self, relpath, mode=None):
+ """Create a directory at the given path."""
+ raise errors.TransportNotPossible('http does not support mkdir()')
+
+ def rmdir(self, relpath):
+ """See Transport.rmdir."""
+ raise errors.TransportNotPossible('http does not support rmdir()')
+
+ def append_file(self, relpath, f, mode=None):
+ """Append the text in the file-like object into the final
+ location.
+ """
+ raise errors.TransportNotPossible('http does not support append()')
+
+ def copy(self, rel_from, rel_to):
+ """Copy the item at rel_from to the location at rel_to"""
+ raise errors.TransportNotPossible('http does not support copy()')
+
+ def copy_to(self, relpaths, other, mode=None, pb=None):
+ """Copy a set of entries from self into another Transport.
+
+ :param relpaths: A list/generator of entries to be copied.
+
+ TODO: if other is LocalTransport, is it possible to
+ do better than put(get())?
+ """
+ # At this point HttpTransport might be able to check and see if
+ # the remote location is the same, and rather than download, and
+ # then upload, it could just issue a remote copy_this command.
+ if isinstance(other, HttpTransportBase):
+ raise errors.TransportNotPossible(
+ 'http cannot be the target of copy_to()')
+ else:
+ return super(HttpTransportBase, self).\
+ copy_to(relpaths, other, mode=mode, pb=pb)
+
+ def move(self, rel_from, rel_to):
+ """Move the item at rel_from to the location at rel_to"""
+ raise errors.TransportNotPossible('http does not support move()')
+
+ def delete(self, relpath):
+ """Delete the item at relpath"""
+ raise errors.TransportNotPossible('http does not support delete()')
+
+ def external_url(self):
+ """See bzrlib.transport.Transport.external_url."""
+ # HTTP URL's are externally usable as long as they don't mention their
+ # implementation qualifier
+ url = self._parsed_url.clone()
+ url.scheme = self._unqualified_scheme
+ return str(url)
+
+ def is_readonly(self):
+ """See Transport.is_readonly."""
+ return True
+
+ def listable(self):
+ """See Transport.listable."""
+ return False
+
+ def stat(self, relpath):
+ """Return the stat information for a file.
+ """
+ raise errors.TransportNotPossible('http does not support stat()')
+
+ def lock_read(self, relpath):
+ """Lock the given file for shared (read) access.
+ :return: A lock object, which should be passed to Transport.unlock()
+ """
+ # The old RemoteBranch ignore lock for reading, so we will
+ # continue that tradition and return a bogus lock object.
+ class BogusLock(object):
+ def __init__(self, path):
+ self.path = path
+ def unlock(self):
+ pass
+ return BogusLock(relpath)
+
+ def lock_write(self, relpath):
+ """Lock the given file for exclusive (write) access.
+ WARNING: many transports do not support this, so trying avoid using it
+
+ :return: A lock object, which should be passed to Transport.unlock()
+ """
+ raise errors.TransportNotPossible('http does not support lock_write()')
+
+ def _attempted_range_header(self, offsets, tail_amount):
+ """Prepare a HTTP Range header at a level the server should accept.
+
+ :return: the range header representing offsets/tail_amount or None if
+ no header can be built.
+ """
+
+ if self._range_hint == 'multi':
+ # Generate the header describing all offsets
+ return self._range_header(offsets, tail_amount)
+ elif self._range_hint == 'single':
+ # Combine all the requested ranges into a single
+ # encompassing one
+ if len(offsets) > 0:
+ if tail_amount not in (0, None):
+ # Nothing we can do here to combine ranges with tail_amount
+ # in a single range, just returns None. The whole file
+ # should be downloaded.
+ return None
+ else:
+ start = offsets[0].start
+ last = offsets[-1]
+ end = last.start + last.length - 1
+ whole = self._coalesce_offsets([(start, end - start + 1)],
+ limit=0, fudge_factor=0)
+ return self._range_header(list(whole), 0)
+ else:
+ # Only tail_amount, requested, leave range_header
+ # do its work
+ return self._range_header(offsets, tail_amount)
+ else:
+ return None
+
+ @staticmethod
+ def _range_header(ranges, tail_amount):
+ """Turn a list of bytes ranges into a HTTP Range header value.
+
+ :param ranges: A list of _CoalescedOffset
+ :param tail_amount: The amount to get from the end of the file.
+
+ :return: HTTP range header string.
+
+ At least a non-empty ranges *or* a tail_amount must be
+ provided.
+ """
+ strings = []
+ for offset in ranges:
+ strings.append('%d-%d' % (offset.start,
+ offset.start + offset.length - 1))
+
+ if tail_amount:
+ strings.append('-%d' % tail_amount)
+
+ return ','.join(strings)
+
+ def _redirected_to(self, source, target):
+ """Returns a transport suitable to re-issue a redirected request.
+
+ :param source: The source url as returned by the server.
+ :param target: The target url as returned by the server.
+
+ The redirection can be handled only if the relpath involved is not
+ renamed by the redirection.
+
+ :returns: A transport or None.
+ """
+ parsed_source = self._split_url(source)
+ parsed_target = self._split_url(target)
+ pl = len(self._parsed_url.path)
+ # determine the excess tail - the relative path that was in
+ # the original request but not part of this transports' URL.
+ excess_tail = parsed_source.path[pl:].strip("/")
+ if not target.endswith(excess_tail):
+ # The final part of the url has been renamed, we can't handle the
+ # redirection.
+ return None
+
+ target_path = parsed_target.path
+ if excess_tail:
+ # Drop the tail that was in the redirect but not part of
+ # the path of this transport.
+ target_path = target_path[:-len(excess_tail)]
+
+ if parsed_target.scheme in ('http', 'https'):
+ # Same protocol family (i.e. http[s]), we will preserve the same
+ # http client implementation when a redirection occurs from one to
+ # the other (otherwise users may be surprised that bzr switches
+ # from one implementation to the other, and devs may suffer
+ # debugging it).
+ if (parsed_target.scheme == self._unqualified_scheme
+ and parsed_target.host == self._parsed_url.host
+ and parsed_target.port == self._parsed_url.port
+ and (parsed_target.user is None or
+ parsed_target.user == self._parsed_url.user)):
+ # If a user is specified, it should match, we don't care about
+ # passwords, wrong passwords will be rejected anyway.
+ return self.clone(target_path)
+ else:
+ # Rebuild the url preserving the scheme qualification and the
+ # credentials (if they don't apply, the redirected to server
+ # will tell us, but if they do apply, we avoid prompting the
+ # user)
+ redir_scheme = parsed_target.scheme + '+' + self._impl_name
+ new_url = self._unsplit_url(redir_scheme,
+ self._parsed_url.user,
+ self._parsed_url.password,
+ parsed_target.host, parsed_target.port,
+ target_path)
+ return transport.get_transport_from_url(new_url)
+ else:
+ # Redirected to a different protocol
+ new_url = self._unsplit_url(parsed_target.scheme,
+ parsed_target.user,
+ parsed_target.password,
+ parsed_target.host, parsed_target.port,
+ target_path)
+ return transport.get_transport_from_url(new_url)
+
+
+# TODO: May be better located in smart/medium.py with the other
+# SmartMedium classes
+class SmartClientHTTPMedium(medium.SmartClientMedium):
+
+ def __init__(self, http_transport):
+ super(SmartClientHTTPMedium, self).__init__(http_transport.base)
+ # We don't want to create a circular reference between the http
+ # transport and its associated medium. Since the transport will live
+ # longer than the medium, the medium keep only a weak reference to its
+ # transport.
+ self._http_transport_ref = weakref.ref(http_transport)
+
+ def get_request(self):
+ return SmartClientHTTPMediumRequest(self)
+
+ def should_probe(self):
+ return True
+
+ def remote_path_from_transport(self, transport):
+ # Strip the optional 'bzr+' prefix from transport so it will have the
+ # same scheme as self.
+ transport_base = transport.base
+ if transport_base.startswith('bzr+'):
+ transport_base = transport_base[4:]
+ rel_url = urlutils.relative_url(self.base, transport_base)
+ return urlutils.unquote(rel_url)
+
+ def send_http_smart_request(self, bytes):
+ try:
+ # Get back the http_transport hold by the weak reference
+ t = self._http_transport_ref()
+ code, body_filelike = t._post(bytes)
+ if code != 200:
+ raise errors.InvalidHttpResponse(
+ t._remote_path('.bzr/smart'),
+ 'Expected 200 response code, got %r' % (code,))
+ except (errors.InvalidHttpResponse, errors.ConnectionReset), e:
+ raise errors.SmartProtocolError(str(e))
+ return body_filelike
+
+ def _report_activity(self, bytes, direction):
+ """See SmartMedium._report_activity.
+
+ Does nothing; the underlying plain HTTP transport will report the
+ activity that this medium would report.
+ """
+ pass
+
+ def disconnect(self):
+ """See SmartClientMedium.disconnect()."""
+ t = self._http_transport_ref()
+ t.disconnect()
+
+
+# TODO: May be better located in smart/medium.py with the other
+# SmartMediumRequest classes
+class SmartClientHTTPMediumRequest(medium.SmartClientMediumRequest):
+ """A SmartClientMediumRequest that works with an HTTP medium."""
+
+ def __init__(self, client_medium):
+ medium.SmartClientMediumRequest.__init__(self, client_medium)
+ self._buffer = ''
+
+ def _accept_bytes(self, bytes):
+ self._buffer += bytes
+
+ def _finished_writing(self):
+ data = self._medium.send_http_smart_request(self._buffer)
+ self._response_body = data
+
+ def _read_bytes(self, count):
+ """See SmartClientMediumRequest._read_bytes."""
+ return self._response_body.read(count)
+
+ def _read_line(self):
+ line, excess = medium._get_line(self._response_body.read)
+ if excess != '':
+ raise AssertionError(
+ '_get_line returned excess bytes, but this mediumrequest '
+ 'cannot handle excess. (%r)' % (excess,))
+ return line
+
+ def _finished_reading(self):
+ """See SmartClientMediumRequest._finished_reading."""
+ pass
+
+
+def unhtml_roughly(maybe_html, length_limit=1000):
+ """Very approximate html->text translation, for presenting error bodies.
+
+ :param length_limit: Truncate the result to this many characters.
+
+ >>> unhtml_roughly("<b>bad</b> things happened\\n")
+ ' bad things happened '
+ """
+ return re.subn(r"(<[^>]*>|\n|&nbsp;)", " ", maybe_html)[0][:length_limit]
diff --git a/bzrlib/transport/http/_pycurl.py b/bzrlib/transport/http/_pycurl.py
new file mode 100644
index 0000000..612123d
--- /dev/null
+++ b/bzrlib/transport/http/_pycurl.py
@@ -0,0 +1,448 @@
+# Copyright (C) 2006-2010 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""http/https transport using pycurl"""
+
+from __future__ import absolute_import
+
+# TODO: test reporting of http errors
+#
+# TODO: Transport option to control caching of particular requests; broadly we
+# would want to offer "caching allowed" or "must revalidate", depending on
+# whether we expect a particular file will be modified after it's committed.
+# It's probably safer to just always revalidate. mbp 20060321
+
+# TODO: Some refactoring could be done to avoid the strange idiom
+# used to capture data and headers while setting up the request
+# (and having to pass 'header' to _curl_perform to handle
+# redirections) . This could be achieved by creating a
+# specialized Curl object and returning code, headers and data
+# from _curl_perform. Not done because we may deprecate pycurl in the
+# future -- vila 20070212
+
+from cStringIO import StringIO
+import httplib
+
+import bzrlib
+from bzrlib import (
+ debug,
+ errors,
+ trace,
+ )
+from bzrlib.transport.http import (
+ ca_bundle,
+ HttpTransportBase,
+ response,
+ unhtml_roughly,
+ )
+
+try:
+ import pycurl
+except ImportError, e:
+ trace.mutter("failed to import pycurl: %s", e)
+ raise errors.DependencyNotPresent('pycurl', e)
+
+try:
+ # see if we can actually initialize PyCurl - sometimes it will load but
+ # fail to start up due to this bug:
+ #
+ # 32. (At least on Windows) If libcurl is built with c-ares and there's
+ # no DNS server configured in the system, the ares_init() call fails and
+ # thus curl_easy_init() fails as well. This causes weird effects for
+ # people who use numerical IP addresses only.
+ #
+ # reported by Alexander Belchenko, 2006-04-26
+ pycurl.Curl()
+except pycurl.error, e:
+ trace.mutter("failed to initialize pycurl: %s", e)
+ raise errors.DependencyNotPresent('pycurl', e)
+
+
+
+
+def _get_pycurl_errcode(symbol, default):
+ """
+ Returns the numerical error code for a symbol defined by pycurl.
+
+ Different pycurl implementations define different symbols for error
+ codes. Old versions never define some symbols (wether they can return the
+ corresponding error code or not). The following addresses the problem by
+ defining the symbols we care about. Note: this allows to define symbols
+ for errors that older versions will never return, which is fine.
+ """
+ return pycurl.__dict__.get(symbol, default)
+
+CURLE_COULDNT_CONNECT = _get_pycurl_errcode('E_COULDNT_CONNECT', 7)
+CURLE_COULDNT_RESOLVE_HOST = _get_pycurl_errcode('E_COULDNT_RESOLVE_HOST', 6)
+CURLE_COULDNT_RESOLVE_PROXY = _get_pycurl_errcode('E_COULDNT_RESOLVE_PROXY', 5)
+CURLE_GOT_NOTHING = _get_pycurl_errcode('E_GOT_NOTHING', 52)
+CURLE_PARTIAL_FILE = _get_pycurl_errcode('E_PARTIAL_FILE', 18)
+CURLE_SEND_ERROR = _get_pycurl_errcode('E_SEND_ERROR', 55)
+CURLE_RECV_ERROR = _get_pycurl_errcode('E_RECV_ERROR', 56)
+CURLE_SSL_CACERT = _get_pycurl_errcode('E_SSL_CACERT', 60)
+CURLE_SSL_CACERT_BADFILE = _get_pycurl_errcode('E_SSL_CACERT_BADFILE', 77)
+
+
+class PyCurlTransport(HttpTransportBase):
+ """http client transport using pycurl
+
+ PyCurl is a Python binding to the C "curl" multiprotocol client.
+
+ This transport can be significantly faster than the builtin
+ Python client. Advantages include: DNS caching.
+ """
+
+ def __init__(self, base, _from_transport=None):
+ super(PyCurlTransport, self).__init__(base, 'pycurl',
+ _from_transport=_from_transport)
+ if self._unqualified_scheme == 'https':
+ # Check availability of https into pycurl supported
+ # protocols
+ supported = pycurl.version_info()[8]
+ if 'https' not in supported:
+ raise errors.DependencyNotPresent('pycurl', 'no https support')
+ self.cabundle = ca_bundle.get_ca_path()
+
+ def _get_curl(self):
+ connection = self._get_connection()
+ if connection is None:
+ # First connection ever. There is no credentials for pycurl, either
+ # the password was embedded in the URL or it's not needed. The
+ # connection for pycurl is just the Curl object, it will not
+ # connect to the http server until the first request (which had
+ # just called us).
+ connection = pycurl.Curl()
+ # First request, initialize credentials.
+ auth = self._create_auth()
+ # Proxy handling is out of reach, so we punt
+ self._set_connection(connection, auth)
+ return connection
+
+ def disconnect(self):
+ connection = self._get_connection()
+ if connection is not None:
+ connection.close()
+
+ def has(self, relpath):
+ """See Transport.has()"""
+ # We set NO BODY=0 in _get_full, so it should be safe
+ # to re-use the non-range curl object
+ curl = self._get_curl()
+ abspath = self._remote_path(relpath)
+ curl.setopt(pycurl.URL, abspath)
+ self._set_curl_options(curl)
+ curl.setopt(pycurl.HTTPGET, 1)
+ # don't want the body - ie just do a HEAD request
+ # This means "NO BODY" not 'nobody'
+ curl.setopt(pycurl.NOBODY, 1)
+ # But we need headers to handle redirections
+ header = StringIO()
+ curl.setopt(pycurl.HEADERFUNCTION, header.write)
+ # In some erroneous cases, pycurl will emit text on
+ # stdout if we don't catch it (see InvalidStatus tests
+ # for one such occurrence).
+ blackhole = StringIO()
+ curl.setopt(pycurl.WRITEFUNCTION, blackhole.write)
+ self._curl_perform(curl, header)
+ code = curl.getinfo(pycurl.HTTP_CODE)
+ if code == 404: # not found
+ return False
+ elif code == 200: # "ok"
+ return True
+ else:
+ self._raise_curl_http_error(curl)
+
+ def _get(self, relpath, offsets, tail_amount=0):
+ # This just switches based on the type of request
+ if offsets is not None or tail_amount not in (0, None):
+ return self._get_ranged(relpath, offsets, tail_amount=tail_amount)
+ else:
+ return self._get_full(relpath)
+
+ def _setup_get_request(self, curl, relpath):
+ # Make sure we do a GET request. versions > 7.14.1 also set the
+ # NO BODY flag, but we'll do it ourselves in case it is an older
+ # pycurl version
+ curl.setopt(pycurl.NOBODY, 0)
+ curl.setopt(pycurl.HTTPGET, 1)
+ return self._setup_request(curl, relpath)
+
+ def _setup_request(self, curl, relpath):
+ """Do the common setup stuff for making a request
+
+ :param curl: The curl object to place the request on
+ :param relpath: The relative path that we want to get
+ :return: (abspath, data, header)
+ abspath: full url
+ data: file that will be filled with the body
+ header: file that will be filled with the headers
+ """
+ abspath = self._remote_path(relpath)
+ curl.setopt(pycurl.URL, abspath)
+ self._set_curl_options(curl)
+
+ data = StringIO()
+ header = StringIO()
+ curl.setopt(pycurl.WRITEFUNCTION, data.write)
+ curl.setopt(pycurl.HEADERFUNCTION, header.write)
+
+ return abspath, data, header
+
+ def _get_full(self, relpath):
+ """Make a request for the entire file"""
+ curl = self._get_curl()
+ abspath, data, header = self._setup_get_request(curl, relpath)
+ self._curl_perform(curl, header)
+
+ code = curl.getinfo(pycurl.HTTP_CODE)
+ data.seek(0)
+
+ if code == 404:
+ raise errors.NoSuchFile(abspath)
+ if code != 200:
+ self._raise_curl_http_error(
+ curl, 'expected 200 or 404 for full response.')
+
+ return code, data
+
+ # The parent class use 0 to minimize the requests, but since we can't
+ # exploit the results as soon as they are received (pycurl limitation) we'd
+ # better issue more requests and provide a more responsive UI incurring
+ # more latency costs.
+ # If you modify this, think about modifying the comment in http/__init__.py
+ # too.
+ _get_max_size = 4 * 1024 * 1024
+
+ def _get_ranged(self, relpath, offsets, tail_amount):
+ """Make a request for just part of the file."""
+ curl = self._get_curl()
+ abspath, data, header = self._setup_get_request(curl, relpath)
+
+ range_header = self._attempted_range_header(offsets, tail_amount)
+ if range_header is None:
+ # Forget ranges, the server can't handle them
+ return self._get_full(relpath)
+
+ self._curl_perform(curl, header, ['Range: bytes=%s' % range_header])
+ data.seek(0)
+
+ code = curl.getinfo(pycurl.HTTP_CODE)
+
+ if code == 404: # not found
+ raise errors.NoSuchFile(abspath)
+ elif code in (400, 416):
+ # We don't know which, but one of the ranges we specified was
+ # wrong.
+ raise errors.InvalidHttpRange(abspath, range_header,
+ 'Server return code %d'
+ % curl.getinfo(pycurl.HTTP_CODE))
+ msg = self._parse_headers(header)
+ return code, response.handle_response(abspath, code, msg, data)
+
+ def _parse_headers(self, status_and_headers):
+ """Transform the headers provided by curl into an HTTPMessage"""
+ status_and_headers.seek(0)
+ # Ignore status line
+ status_and_headers.readline()
+ msg = httplib.HTTPMessage(status_and_headers)
+ return msg
+
+ def _post(self, body_bytes):
+ curl = self._get_curl()
+ abspath, data, header = self._setup_request(curl, '.bzr/smart')
+ curl.setopt(pycurl.POST, 1)
+ fake_file = StringIO(body_bytes)
+ curl.setopt(pycurl.POSTFIELDSIZE, len(body_bytes))
+ curl.setopt(pycurl.READFUNCTION, fake_file.read)
+ # We override the Expect: header so that pycurl will send the POST
+ # body immediately.
+ try:
+ self._curl_perform(curl, header,
+ ['Expect: ',
+ 'Content-Type: application/octet-stream'])
+ except pycurl.error, e:
+ if e[0] == CURLE_SEND_ERROR:
+ # When talking to an HTTP/1.0 server, getting a 400+ error code
+ # triggers a bug in some combinations of curl/kernel in rare
+ # occurrences. Basically, the server closes the connection
+ # after sending the error but the client (having received and
+ # parsed the response) still try to send the request body (see
+ # bug #225020 and its upstream associated bug). Since the
+ # error code and the headers are known to be available, we just
+ # swallow the exception, leaving the upper levels handle the
+ # 400+ error.
+ trace.mutter('got pycurl error in POST: %s, %s, %s, url: %s ',
+ e[0], e[1], e, abspath)
+ else:
+ # Re-raise otherwise
+ raise
+ data.seek(0)
+ code = curl.getinfo(pycurl.HTTP_CODE)
+ msg = self._parse_headers(header)
+ return code, response.handle_response(abspath, code, msg, data)
+
+
+ def _raise_curl_http_error(self, curl, info=None, body=None):
+ """Common curl->bzrlib error translation.
+
+ Some methods may choose to override this for particular cases.
+
+ The URL and code are automatically included as appropriate.
+
+ :param info: Extra information to include in the message.
+
+ :param body: File-like object from which the body of the page can be
+ read.
+ """
+ code = curl.getinfo(pycurl.HTTP_CODE)
+ url = curl.getinfo(pycurl.EFFECTIVE_URL)
+ if body is not None:
+ response_body = body.read()
+ plaintext_body = unhtml_roughly(response_body)
+ else:
+ response_body = None
+ plaintext_body = ''
+ if code == 403:
+ raise errors.TransportError(
+ 'Server refuses to fulfill the request (403 Forbidden)'
+ ' for %s: %s' % (url, plaintext_body))
+ else:
+ if info is None:
+ msg = ''
+ else:
+ msg = ': ' + info
+ raise errors.InvalidHttpResponse(
+ url, 'Unable to handle http code %d%s: %s'
+ % (code, msg, plaintext_body))
+
+ def _debug_cb(self, kind, text):
+ if kind in (pycurl.INFOTYPE_HEADER_IN, pycurl.INFOTYPE_DATA_IN):
+ self._report_activity(len(text), 'read')
+ if (kind == pycurl.INFOTYPE_HEADER_IN
+ and 'http' in debug.debug_flags):
+ trace.mutter('< %s' % (text.rstrip(),))
+ elif kind in (pycurl.INFOTYPE_HEADER_OUT, pycurl.INFOTYPE_DATA_OUT):
+ self._report_activity(len(text), 'write')
+ if (kind == pycurl.INFOTYPE_HEADER_OUT
+ and 'http' in debug.debug_flags):
+ lines = []
+ for line in text.rstrip().splitlines():
+ # People are often told to paste -Dhttp output to help
+ # debug. Don't compromise credentials.
+ try:
+ header, details = line.split(':', 1)
+ except ValueError:
+ header = None
+ if header in ('Authorization', 'Proxy-Authorization'):
+ line = '%s: <masked>' % (header,)
+ lines.append(line)
+ trace.mutter('> ' + '\n> '.join(lines))
+ elif kind == pycurl.INFOTYPE_TEXT and 'http' in debug.debug_flags:
+ trace.mutter('* %s' % text.rstrip())
+ elif (kind in (pycurl.INFOTYPE_TEXT, pycurl.INFOTYPE_SSL_DATA_IN,
+ pycurl.INFOTYPE_SSL_DATA_OUT)
+ and 'http' in debug.debug_flags):
+ trace.mutter('* %s' % text)
+
+ def _set_curl_options(self, curl):
+ """Set options for all requests"""
+ ua_str = 'bzr/%s (pycurl: %s)' % (bzrlib.__version__, pycurl.version)
+ curl.setopt(pycurl.USERAGENT, ua_str)
+ curl.setopt(pycurl.VERBOSE, 1)
+ curl.setopt(pycurl.DEBUGFUNCTION, self._debug_cb)
+ if self.cabundle:
+ curl.setopt(pycurl.CAINFO, self.cabundle)
+ # Set accepted auth methods
+ curl.setopt(pycurl.HTTPAUTH, pycurl.HTTPAUTH_ANY)
+ curl.setopt(pycurl.PROXYAUTH, pycurl.HTTPAUTH_ANY)
+ auth = self._get_credentials()
+ user = auth.get('user', None)
+ password = auth.get('password', None)
+ userpass = None
+ if user is not None:
+ userpass = user + ':'
+ if password is not None: # '' is a valid password
+ userpass += password
+ curl.setopt(pycurl.USERPWD, userpass)
+
+ def _curl_perform(self, curl, header, more_headers=[]):
+ """Perform curl operation and translate exceptions."""
+ try:
+ # There's no way in http/1.0 to say "must
+ # revalidate"; we don't want to force it to always
+ # retrieve. so just turn off the default Pragma
+ # provided by Curl.
+ headers = ['Cache-control: max-age=0',
+ 'Pragma: no-cache',
+ 'Connection: Keep-Alive']
+ curl.setopt(pycurl.HTTPHEADER, headers + more_headers)
+ curl.perform()
+ except pycurl.error, e:
+ url = curl.getinfo(pycurl.EFFECTIVE_URL)
+ trace.mutter('got pycurl error: %s, %s, %s, url: %s ',
+ e[0], e[1], e, url)
+ if e[0] in (CURLE_COULDNT_RESOLVE_HOST,
+ CURLE_COULDNT_RESOLVE_PROXY,
+ CURLE_COULDNT_CONNECT,
+ CURLE_GOT_NOTHING,
+ CURLE_SSL_CACERT,
+ CURLE_SSL_CACERT_BADFILE,
+ ):
+ raise errors.ConnectionError(
+ 'curl connection error (%s)\non %s' % (e[1], url))
+ elif e[0] == CURLE_RECV_ERROR:
+ raise errors.ConnectionReset(
+ 'curl connection error (%s)\non %s' % (e[1], url))
+ elif e[0] == CURLE_PARTIAL_FILE:
+ # Pycurl itself has detected a short read. We do not have all
+ # the information for the ShortReadvError, but that should be
+ # enough
+ raise errors.ShortReadvError(url,
+ offset='unknown', length='unknown',
+ actual='unknown',
+ extra='Server aborted the request')
+ raise
+ code = curl.getinfo(pycurl.HTTP_CODE)
+ if code in (301, 302, 303, 307):
+ url = curl.getinfo(pycurl.EFFECTIVE_URL)
+ msg = self._parse_headers(header)
+ redirected_to = msg.getheader('location')
+ raise errors.RedirectRequested(url,
+ redirected_to,
+ is_permanent=(code == 301))
+
+
+def get_test_permutations():
+ """Return the permutations to be used in testing."""
+ from bzrlib.tests import features
+ from bzrlib.tests import http_server
+ permutations = [(PyCurlTransport, http_server.HttpServer_PyCurl),]
+ if features.HTTPSServerFeature.available():
+ from bzrlib.tests import (
+ https_server,
+ ssl_certs,
+ )
+
+ class HTTPS_pycurl_transport(PyCurlTransport):
+
+ def __init__(self, base, _from_transport=None):
+ super(HTTPS_pycurl_transport, self).__init__(base,
+ _from_transport)
+ self.cabundle = str(ssl_certs.build_path('ca.crt'))
+
+ permutations.append((HTTPS_pycurl_transport,
+ https_server.HTTPSServer_PyCurl))
+ return permutations
diff --git a/bzrlib/transport/http/_urllib.py b/bzrlib/transport/http/_urllib.py
new file mode 100644
index 0000000..3dc0556
--- /dev/null
+++ b/bzrlib/transport/http/_urllib.py
@@ -0,0 +1,192 @@
+# Copyright (C) 2006-2010 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+from __future__ import absolute_import
+
+from bzrlib import (
+ errors,
+ trace,
+ )
+from bzrlib.transport import http
+# TODO: handle_response should be integrated into the http/__init__.py
+from bzrlib.transport.http.response import handle_response
+from bzrlib.transport.http._urllib2_wrappers import (
+ Opener,
+ Request,
+ )
+
+
+class HttpTransport_urllib(http.HttpTransportBase):
+ """Python urllib transport for http and https."""
+
+ # In order to debug we have to issue our traces in sync with
+ # httplib, which use print :(
+ _debuglevel = 0
+
+ _opener_class = Opener
+
+ def __init__(self, base, _from_transport=None, ca_certs=None):
+ super(HttpTransport_urllib, self).__init__(
+ base, 'urllib', _from_transport=_from_transport)
+ if _from_transport is not None:
+ self._opener = _from_transport._opener
+ else:
+ self._opener = self._opener_class(
+ report_activity=self._report_activity, ca_certs=ca_certs)
+
+ def _perform(self, request):
+ """Send the request to the server and handles common errors.
+
+ :returns: urllib2 Response object
+ """
+ connection = self._get_connection()
+ if connection is not None:
+ # Give back shared info
+ request.connection = connection
+ (auth, proxy_auth) = self._get_credentials()
+ # Clean the httplib.HTTPConnection pipeline in case the previous
+ # request couldn't do it
+ connection.cleanup_pipe()
+ else:
+ # First request, initialize credentials.
+ # scheme and realm will be set by the _urllib2_wrappers.AuthHandler
+ auth = self._create_auth()
+ # Proxy initialization will be done by the first proxied request
+ proxy_auth = dict()
+ # Ensure authentication info is provided
+ request.auth = auth
+ request.proxy_auth = proxy_auth
+
+ if self._debuglevel > 0:
+ print 'perform: %s base: %s, url: %s' % (request.method, self.base,
+ request.get_full_url())
+ response = self._opener.open(request)
+ if self._get_connection() is not request.connection:
+ # First connection or reconnection
+ self._set_connection(request.connection,
+ (request.auth, request.proxy_auth))
+ else:
+ # http may change the credentials while keeping the
+ # connection opened
+ self._update_credentials((request.auth, request.proxy_auth))
+
+ code = response.code
+ if (request.follow_redirections is False
+ and code in (301, 302, 303, 307)):
+ raise errors.RedirectRequested(request.get_full_url(),
+ request.redirected_to,
+ is_permanent=(code == 301))
+
+ if request.redirected_to is not None:
+ trace.mutter('redirected from: %s to: %s' % (request.get_full_url(),
+ request.redirected_to))
+
+ return response
+
+ def disconnect(self):
+ connection = self._get_connection()
+ if connection is not None:
+ connection.close()
+
+ def _get(self, relpath, offsets, tail_amount=0):
+ """See HttpTransport._get"""
+ abspath = self._remote_path(relpath)
+ headers = {}
+ accepted_errors = [200, 404]
+ if offsets or tail_amount:
+ range_header = self._attempted_range_header(offsets, tail_amount)
+ if range_header is not None:
+ accepted_errors.append(206)
+ accepted_errors.append(400)
+ accepted_errors.append(416)
+ bytes = 'bytes=' + range_header
+ headers = {'Range': bytes}
+
+ request = Request('GET', abspath, None, headers,
+ accepted_errors=accepted_errors)
+ response = self._perform(request)
+
+ code = response.code
+ if code == 404: # not found
+ raise errors.NoSuchFile(abspath)
+ elif code in (400, 416):
+ # We don't know which, but one of the ranges we specified was
+ # wrong.
+ raise errors.InvalidHttpRange(abspath, range_header,
+ 'Server return code %d' % code)
+
+ data = handle_response(abspath, code, response.info(), response)
+ return code, data
+
+ def _post(self, body_bytes):
+ abspath = self._remote_path('.bzr/smart')
+ # We include 403 in accepted_errors so that send_http_smart_request can
+ # handle a 403. Otherwise a 403 causes an unhandled TransportError.
+ response = self._perform(
+ Request('POST', abspath, body_bytes,
+ {'Content-Type': 'application/octet-stream'},
+ accepted_errors=[200, 403]))
+ code = response.code
+ data = handle_response(abspath, code, response.info(), response)
+ return code, data
+
+ def _head(self, relpath):
+ """Request the HEAD of a file.
+
+ Performs the request and leaves callers handle the results.
+ """
+ abspath = self._remote_path(relpath)
+ request = Request('HEAD', abspath,
+ accepted_errors=[200, 404])
+ response = self._perform(request)
+
+ return response
+
+ def has(self, relpath):
+ """Does the target location exist?
+ """
+ response = self._head(relpath)
+
+ code = response.code
+ if code == 200: # "ok",
+ return True
+ else:
+ return False
+
+
+def get_test_permutations():
+ """Return the permutations to be used in testing."""
+ from bzrlib.tests import (
+ features,
+ http_server,
+ )
+ permutations = [(HttpTransport_urllib, http_server.HttpServer_urllib),]
+ if features.HTTPSServerFeature.available():
+ from bzrlib.tests import (
+ https_server,
+ ssl_certs,
+ )
+
+ class HTTPS_urllib_transport(HttpTransport_urllib):
+
+ def __init__(self, base, _from_transport=None):
+ super(HTTPS_urllib_transport, self).__init__(
+ base, _from_transport=_from_transport,
+ ca_certs=ssl_certs.build_path('ca.crt'))
+
+ permutations.append((HTTPS_urllib_transport,
+ https_server.HTTPSServer_urllib))
+ return permutations
diff --git a/bzrlib/transport/http/_urllib2_wrappers.py b/bzrlib/transport/http/_urllib2_wrappers.py
new file mode 100644
index 0000000..360ce4d
--- /dev/null
+++ b/bzrlib/transport/http/_urllib2_wrappers.py
@@ -0,0 +1,1851 @@
+# Copyright (C) 2006-2012 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""Implementation of urllib2 tailored to bzr needs
+
+This file complements the urllib2 class hierarchy with custom classes.
+
+For instance, we create a new HTTPConnection and HTTPSConnection that inherit
+from the original urllib2.HTTP(s)Connection objects, but also have a new base
+which implements a custom getresponse and cleanup_pipe handlers.
+
+And then we implement custom HTTPHandler and HTTPSHandler classes, that use
+the custom HTTPConnection classes.
+
+We have a custom Response class, which lets us maintain a keep-alive
+connection even for requests that urllib2 doesn't expect to contain body data.
+
+And a custom Request class that lets us track redirections, and
+handle authentication schemes.
+
+For coherency with python libraries, we use capitalized header names throughout
+the code, even if the header names will be titled just before sending the
+request (see AbstractHTTPHandler.do_open).
+"""
+
+from __future__ import absolute_import
+
+DEBUG = 0
+
+# FIXME: Oversimplifying, two kind of exceptions should be
+# raised, once a request is issued: URLError before we have been
+# able to process the response, HTTPError after that. Process the
+# response means we are able to leave the socket clean, so if we
+# are not able to do that, we should close the connection. The
+# actual code more or less do that, tests should be written to
+# ensure that.
+
+import errno
+import httplib
+import os
+import socket
+import urllib
+import urllib2
+import urlparse
+import re
+import sys
+import time
+
+from bzrlib import __version__ as bzrlib_version
+from bzrlib import (
+ config,
+ debug,
+ errors,
+ lazy_import,
+ osutils,
+ trace,
+ transport,
+ urlutils,
+ )
+lazy_import.lazy_import(globals(), """
+import ssl
+""")
+
+
+# Note for packagers: if there is no package providing certs for your platform,
+# the curl project produces http://curl.haxx.se/ca/cacert.pem weekly.
+_ssl_ca_certs_known_locations = [
+ u'/etc/ssl/certs/ca-certificates.crt', # Ubuntu/debian/gentoo
+ u'/etc/pki/tls/certs/ca-bundle.crt', # Fedora/CentOS/RH
+ u'/etc/ssl/ca-bundle.pem', # OpenSuse
+ u'/etc/ssl/cert.pem', # OpenSuse
+ u"/usr/local/share/certs/ca-root-nss.crt", # FreeBSD
+ # XXX: Needs checking, can't trust the interweb ;) -- vila 2012-01-25
+ u'/etc/openssl/certs/ca-certificates.crt', # Solaris
+ ]
+def default_ca_certs():
+ if sys.platform == 'win32':
+ return os.path.join(os.path.dirname(sys.executable), u"cacert.pem")
+ elif sys.platform == 'darwin':
+ # FIXME: Needs some default value for osx, waiting for osx installers
+ # guys feedback -- vila 2012-01-25
+ pass
+ else:
+ # Try known locations for friendly OSes providing the root certificates
+ # without making them hard to use for any https client.
+ for path in _ssl_ca_certs_known_locations:
+ if os.path.exists(path):
+ # First found wins
+ return path
+ # A default path that makes sense and will be mentioned in the error
+ # presented to the user, even if not correct for all platforms
+ return _ssl_ca_certs_known_locations[0]
+
+
+def ca_certs_from_store(path):
+ if not os.path.exists(path):
+ raise ValueError("ca certs path %s does not exist" % path)
+ return path
+
+
+def cert_reqs_from_store(unicode_str):
+ import ssl
+ try:
+ return {
+ "required": ssl.CERT_REQUIRED,
+ "none": ssl.CERT_NONE
+ }[unicode_str]
+ except KeyError:
+ raise ValueError("invalid value %s" % unicode_str)
+
+def default_ca_reqs():
+ if sys.platform in ('win32', 'darwin'):
+ # FIXME: Once we get a native access to root certificates there, this
+ # won't needed anymore. See http://pad.lv/920455 -- vila 2012-02-15
+ return u'none'
+ else:
+ return u'required'
+
+opt_ssl_ca_certs = config.Option('ssl.ca_certs',
+ from_unicode=ca_certs_from_store,
+ default=default_ca_certs,
+ invalid='warning',
+ help="""\
+Path to certification authority certificates to trust.
+
+This should be a valid path to a bundle containing all root Certificate
+Authorities used to verify an https server certificate.
+
+Use ssl.cert_reqs=none to disable certificate verification.
+""")
+
+opt_ssl_cert_reqs = config.Option('ssl.cert_reqs',
+ default=default_ca_reqs,
+ from_unicode=cert_reqs_from_store,
+ invalid='error',
+ help="""\
+Whether to require a certificate from the remote side. (default:required)
+
+Possible values:
+ * none: Certificates ignored
+ * required: Certificates required and validated
+""")
+
+checked_kerberos = False
+kerberos = None
+
+
+class addinfourl(urllib2.addinfourl):
+ '''Replacement addinfourl class compatible with python-2.7's xmlrpclib
+
+ In python-2.7, xmlrpclib expects that the response object that it receives
+ has a getheader method. httplib.HTTPResponse provides this but
+ urllib2.addinfourl does not. Add the necessary functions here, ported to
+ use the internal data structures of addinfourl.
+ '''
+
+ def getheader(self, name, default=None):
+ if self.headers is None:
+ raise httplib.ResponseNotReady()
+ return self.headers.getheader(name, default)
+
+ def getheaders(self):
+ if self.headers is None:
+ raise httplib.ResponseNotReady()
+ return self.headers.items()
+
+
+class _ReportingFileSocket(object):
+
+ def __init__(self, filesock, report_activity=None):
+ self.filesock = filesock
+ self._report_activity = report_activity
+
+ def report_activity(self, size, direction):
+ if self._report_activity:
+ self._report_activity(size, direction)
+
+ def read(self, size=1):
+ s = self.filesock.read(size)
+ self.report_activity(len(s), 'read')
+ return s
+
+ def readline(self, size=-1):
+ s = self.filesock.readline(size)
+ self.report_activity(len(s), 'read')
+ return s
+
+ def __getattr__(self, name):
+ return getattr(self.filesock, name)
+
+
+class _ReportingSocket(object):
+
+ def __init__(self, sock, report_activity=None):
+ self.sock = sock
+ self._report_activity = report_activity
+
+ def report_activity(self, size, direction):
+ if self._report_activity:
+ self._report_activity(size, direction)
+
+ def sendall(self, s, *args):
+ self.sock.sendall(s, *args)
+ self.report_activity(len(s), 'write')
+
+ def recv(self, *args):
+ s = self.sock.recv(*args)
+ self.report_activity(len(s), 'read')
+ return s
+
+ def makefile(self, mode='r', bufsize=-1):
+ # httplib creates a fileobject that doesn't do buffering, which
+ # makes fp.readline() very expensive because it only reads one byte
+ # at a time. So we wrap the socket in an object that forces
+ # sock.makefile to make a buffered file.
+ fsock = self.sock.makefile(mode, 65536)
+ # And wrap that into a reporting kind of fileobject
+ return _ReportingFileSocket(fsock, self._report_activity)
+
+ def __getattr__(self, name):
+ return getattr(self.sock, name)
+
+
+# We define our own Response class to keep our httplib pipe clean
+class Response(httplib.HTTPResponse):
+ """Custom HTTPResponse, to avoid the need to decorate.
+
+ httplib prefers to decorate the returned objects, rather
+ than using a custom object.
+ """
+
+ # Some responses have bodies in which we have no interest
+ _body_ignored_responses = [301,302, 303, 307, 400, 401, 403, 404, 501]
+
+ # in finish() below, we may have to discard several MB in the worst
+ # case. To avoid buffering that much, we read and discard by chunks
+ # instead. The underlying file is either a socket or a StringIO, so reading
+ # 8k chunks should be fine.
+ _discarded_buf_size = 8192
+
+ def begin(self):
+ """Begin to read the response from the server.
+
+ httplib assumes that some responses get no content and do
+ not even attempt to read the body in that case, leaving
+ the body in the socket, blocking the next request. Let's
+ try to workaround that.
+ """
+ httplib.HTTPResponse.begin(self)
+ if self.status in self._body_ignored_responses:
+ if self.debuglevel >= 2:
+ print "For status: [%s]," % self.status,
+ print "will ready body, length: %s" % self.length
+ if not (self.length is None or self.will_close):
+ # In some cases, we just can't read the body not
+ # even try or we may encounter a 104, 'Connection
+ # reset by peer' error if there is indeed no body
+ # and the server closed the connection just after
+ # having issued the response headers (even if the
+ # headers indicate a Content-Type...)
+ body = self.read(self.length)
+ if self.debuglevel >= 9:
+ # This one can be huge and is generally not interesting
+ print "Consumed body: [%s]" % body
+ self.close()
+ elif self.status == 200:
+ # Whatever the request is, it went ok, so we surely don't want to
+ # close the connection. Some cases are not correctly detected by
+ # httplib.HTTPConnection.getresponse (called by
+ # httplib.HTTPResponse.begin). The CONNECT response for the https
+ # through proxy case is one. Note: the 'will_close' below refers
+ # to the "true" socket between us and the server, whereas the
+ # 'close()' above refers to the copy of that socket created by
+ # httplib for the response itself. So, in the if above we close the
+ # socket to indicate that we are done with the response whereas
+ # below we keep the socket with the server opened.
+ self.will_close = False
+
+ def finish(self):
+ """Finish reading the body.
+
+ In some cases, the client may have left some bytes to read in the
+ body. That will block the next request to succeed if we use a
+ persistent connection. If we don't use a persistent connection, well,
+ nothing will block the next request since a new connection will be
+ issued anyway.
+
+ :return: the number of bytes left on the socket (may be None)
+ """
+ pending = None
+ if not self.isclosed():
+ # Make sure nothing was left to be read on the socket
+ pending = 0
+ data = True
+ while data and self.length:
+ # read() will update self.length
+ data = self.read(min(self.length, self._discarded_buf_size))
+ pending += len(data)
+ if pending:
+ trace.mutter("%s bytes left on the HTTP socket", pending)
+ self.close()
+ return pending
+
+
+# Not inheriting from 'object' because httplib.HTTPConnection doesn't.
+class AbstractHTTPConnection:
+ """A custom HTTP(S) Connection, which can reset itself on a bad response"""
+
+ response_class = Response
+
+ # When we detect a server responding with the whole file to range requests,
+ # we want to warn. But not below a given thresold.
+ _range_warning_thresold = 1024 * 1024
+
+ def __init__(self, report_activity=None):
+ self._response = None
+ self._report_activity = report_activity
+ self._ranges_received_whole_file = None
+
+ def _mutter_connect(self):
+ netloc = '%s:%s' % (self.host, self.port)
+ if self.proxied_host is not None:
+ netloc += '(proxy for %s)' % self.proxied_host
+ trace.mutter('* About to connect() to %s' % netloc)
+
+ def getresponse(self):
+ """Capture the response to be able to cleanup"""
+ self._response = httplib.HTTPConnection.getresponse(self)
+ return self._response
+
+ def cleanup_pipe(self):
+ """Read the remaining bytes of the last response if any."""
+ if self._response is not None:
+ try:
+ pending = self._response.finish()
+ # Warn the user (once)
+ if (self._ranges_received_whole_file is None
+ and self._response.status == 200
+ and pending and pending > self._range_warning_thresold
+ ):
+ self._ranges_received_whole_file = True
+ trace.warning(
+ 'Got a 200 response when asking for multiple ranges,'
+ ' does your server at %s:%s support range requests?',
+ self.host, self.port)
+ except socket.error, e:
+ # It's conceivable that the socket is in a bad state here
+ # (including some test cases) and in this case, it doesn't need
+ # cleaning anymore, so no need to fail, we just get rid of the
+ # socket and let callers reconnect
+ if (len(e.args) == 0
+ or e.args[0] not in (errno.ECONNRESET, errno.ECONNABORTED)):
+ raise
+ self.close()
+ self._response = None
+ # Preserve our preciousss
+ sock = self.sock
+ self.sock = None
+ # Let httplib.HTTPConnection do its housekeeping
+ self.close()
+ # Restore our preciousss
+ self.sock = sock
+
+ def _wrap_socket_for_reporting(self, sock):
+ """Wrap the socket before anybody use it."""
+ self.sock = _ReportingSocket(sock, self._report_activity)
+
+
+class HTTPConnection(AbstractHTTPConnection, httplib.HTTPConnection):
+
+ # XXX: Needs refactoring at the caller level.
+ def __init__(self, host, port=None, proxied_host=None,
+ report_activity=None, ca_certs=None):
+ AbstractHTTPConnection.__init__(self, report_activity=report_activity)
+ # Use strict=True since we don't support HTTP/0.9
+ httplib.HTTPConnection.__init__(self, host, port, strict=True)
+ self.proxied_host = proxied_host
+ # ca_certs is ignored, it's only relevant for https
+
+ def connect(self):
+ if 'http' in debug.debug_flags:
+ self._mutter_connect()
+ httplib.HTTPConnection.connect(self)
+ self._wrap_socket_for_reporting(self.sock)
+
+
+# These two methods were imported from Python 3.2's ssl module
+
+def _dnsname_to_pat(dn):
+ pats = []
+ for frag in dn.split(r'.'):
+ if frag == '*':
+ # When '*' is a fragment by itself, it matches a non-empty dotless
+ # fragment.
+ pats.append('[^.]+')
+ else:
+ # Otherwise, '*' matches any dotless fragment.
+ frag = re.escape(frag)
+ pats.append(frag.replace(r'\*', '[^.]*'))
+ return re.compile(r'\A' + r'\.'.join(pats) + r'\Z', re.IGNORECASE)
+
+
+def match_hostname(cert, hostname):
+ """Verify that *cert* (in decoded format as returned by
+ SSLSocket.getpeercert()) matches the *hostname*. RFC 2818 rules
+ are mostly followed, but IP addresses are not accepted for *hostname*.
+
+ CertificateError is raised on failure. On success, the function
+ returns nothing.
+ """
+ if not cert:
+ raise ValueError("empty or no certificate")
+ dnsnames = []
+ san = cert.get('subjectAltName', ())
+ for key, value in san:
+ if key == 'DNS':
+ if _dnsname_to_pat(value).match(hostname):
+ return
+ dnsnames.append(value)
+ if not san:
+ # The subject is only checked when subjectAltName is empty
+ for sub in cert.get('subject', ()):
+ for key, value in sub:
+ # XXX according to RFC 2818, the most specific Common Name
+ # must be used.
+ if key == 'commonName':
+ if _dnsname_to_pat(value).match(hostname):
+ return
+ dnsnames.append(value)
+ if len(dnsnames) > 1:
+ raise errors.CertificateError(
+ "hostname %r doesn't match either of %s"
+ % (hostname, ', '.join(map(repr, dnsnames))))
+ elif len(dnsnames) == 1:
+ raise errors.CertificateError("hostname %r doesn't match %r" %
+ (hostname, dnsnames[0]))
+ else:
+ raise errors.CertificateError("no appropriate commonName or "
+ "subjectAltName fields were found")
+
+
+class HTTPSConnection(AbstractHTTPConnection, httplib.HTTPSConnection):
+
+ def __init__(self, host, port=None, key_file=None, cert_file=None,
+ proxied_host=None,
+ report_activity=None, ca_certs=None):
+ AbstractHTTPConnection.__init__(self, report_activity=report_activity)
+ # Use strict=True since we don't support HTTP/0.9
+ httplib.HTTPSConnection.__init__(self, host, port,
+ key_file, cert_file, strict=True)
+ self.proxied_host = proxied_host
+ self.ca_certs = ca_certs
+
+ def connect(self):
+ if 'http' in debug.debug_flags:
+ self._mutter_connect()
+ httplib.HTTPConnection.connect(self)
+ self._wrap_socket_for_reporting(self.sock)
+ if self.proxied_host is None:
+ self.connect_to_origin()
+
+ def connect_to_origin(self):
+ # FIXME JRV 2011-12-18: Use location config here?
+ config_stack = config.GlobalStack()
+ cert_reqs = config_stack.get('ssl.cert_reqs')
+ if self.proxied_host is not None:
+ host = self.proxied_host.split(":", 1)[0]
+ else:
+ host = self.host
+ if cert_reqs == ssl.CERT_NONE:
+ trace.warning("Not checking SSL certificate for %s", host)
+ ca_certs = None
+ else:
+ if self.ca_certs is None:
+ ca_certs = config_stack.get('ssl.ca_certs')
+ else:
+ ca_certs = self.ca_certs
+ if ca_certs is None:
+ trace.warning(
+ "No valid trusted SSL CA certificates file set. See "
+ "'bzr help ssl.ca_certs' for more information on setting "
+ "trusted CAs.")
+ try:
+ ssl_sock = ssl.wrap_socket(self.sock, self.key_file, self.cert_file,
+ cert_reqs=cert_reqs, ca_certs=ca_certs)
+ except ssl.SSLError, e:
+ trace.note(
+ "\n"
+ "See `bzr help ssl.ca_certs` for how to specify trusted CA"
+ "certificates.\n"
+ "Pass -Ossl.cert_reqs=none to disable certificate "
+ "verification entirely.\n")
+ raise
+ if cert_reqs == ssl.CERT_REQUIRED:
+ peer_cert = ssl_sock.getpeercert()
+ match_hostname(peer_cert, host)
+
+ # Wrap the ssl socket before anybody use it
+ self._wrap_socket_for_reporting(ssl_sock)
+
+
+class Request(urllib2.Request):
+ """A custom Request object.
+
+ urllib2 determines the request method heuristically (based on
+ the presence or absence of data). We set the method
+ statically.
+
+ The Request object tracks:
+ - the connection the request will be made on.
+ - the authentication parameters needed to preventively set
+ the authentication header once a first authentication have
+ been made.
+ """
+
+ def __init__(self, method, url, data=None, headers={},
+ origin_req_host=None, unverifiable=False,
+ connection=None, parent=None,
+ accepted_errors=None):
+ urllib2.Request.__init__(self, url, data, headers,
+ origin_req_host, unverifiable)
+ self.method = method
+ self.connection = connection
+ self.accepted_errors = accepted_errors
+ # To handle redirections
+ self.parent = parent
+ self.redirected_to = None
+ # Unless told otherwise, redirections are not followed
+ self.follow_redirections = False
+ # auth and proxy_auth are dicts containing, at least
+ # (scheme, host, port, realm, user, password, protocol, path).
+ # The dict entries are mostly handled by the AuthHandler.
+ # Some authentication schemes may add more entries.
+ self.auth = {}
+ self.proxy_auth = {}
+ self.proxied_host = None
+
+ def get_method(self):
+ return self.method
+
+ def set_proxy(self, proxy, type):
+ """Set the proxy and remember the proxied host."""
+ host, port = urllib.splitport(self.get_host())
+ if port is None:
+ # We need to set the default port ourselves way before it gets set
+ # in the HTTP[S]Connection object at build time.
+ if self.type == 'https':
+ conn_class = HTTPSConnection
+ else:
+ conn_class = HTTPConnection
+ port = conn_class.default_port
+ self.proxied_host = '%s:%s' % (host, port)
+ urllib2.Request.set_proxy(self, proxy, type)
+ # When urllib2 makes a https request with our wrapper code and a proxy,
+ # it sets Host to the https proxy, not the host we want to talk to.
+ # I'm fairly sure this is our fault, but what is the cause is an open
+ # question. -- Robert Collins May 8 2010.
+ self.add_unredirected_header('Host', self.proxied_host)
+
+
+class _ConnectRequest(Request):
+
+ def __init__(self, request):
+ """Constructor
+
+ :param request: the first request sent to the proxied host, already
+ processed by the opener (i.e. proxied_host is already set).
+ """
+ # We give a fake url and redefine get_selector or urllib2 will be
+ # confused
+ Request.__init__(self, 'CONNECT', request.get_full_url(),
+ connection=request.connection)
+ if request.proxied_host is None:
+ raise AssertionError()
+ self.proxied_host = request.proxied_host
+
+ def get_selector(self):
+ return self.proxied_host
+
+ def set_proxy(self, proxy, type):
+ """Set the proxy without remembering the proxied host.
+
+ We already know the proxied host by definition, the CONNECT request
+ occurs only when the connection goes through a proxy. The usual
+ processing (masquerade the request so that the connection is done to
+ the proxy while the request is targeted at another host) does not apply
+ here. In fact, the connection is already established with proxy and we
+ just want to enable the SSL tunneling.
+ """
+ urllib2.Request.set_proxy(self, proxy, type)
+
+
+class ConnectionHandler(urllib2.BaseHandler):
+ """Provides connection-sharing by pre-processing requests.
+
+ urllib2 provides no way to access the HTTPConnection object
+ internally used. But we need it in order to achieve
+ connection sharing. So, we add it to the request just before
+ it is processed, and then we override the do_open method for
+ http[s] requests in AbstractHTTPHandler.
+ """
+
+ handler_order = 1000 # after all pre-processings
+
+ def __init__(self, report_activity=None, ca_certs=None):
+ self._report_activity = report_activity
+ self.ca_certs = ca_certs
+
+ def create_connection(self, request, http_connection_class):
+ host = request.get_host()
+ if not host:
+ # Just a bit of paranoia here, this should have been
+ # handled in the higher levels
+ raise errors.InvalidURL(request.get_full_url(), 'no host given.')
+
+ # We create a connection (but it will not connect until the first
+ # request is made)
+ try:
+ connection = http_connection_class(
+ host, proxied_host=request.proxied_host,
+ report_activity=self._report_activity,
+ ca_certs=self.ca_certs)
+ except httplib.InvalidURL, exception:
+ # There is only one occurrence of InvalidURL in httplib
+ raise errors.InvalidURL(request.get_full_url(),
+ extra='nonnumeric port')
+
+ return connection
+
+ def capture_connection(self, request, http_connection_class):
+ """Capture or inject the request connection.
+
+ Two cases:
+ - the request have no connection: create a new one,
+
+ - the request have a connection: this one have been used
+ already, let's capture it, so that we can give it to
+ another transport to be reused. We don't do that
+ ourselves: the Transport object get the connection from
+ a first request and then propagate it, from request to
+ request or to cloned transports.
+ """
+ connection = request.connection
+ if connection is None:
+ # Create a new one
+ connection = self.create_connection(request, http_connection_class)
+ request.connection = connection
+
+ # All connections will pass here, propagate debug level
+ connection.set_debuglevel(DEBUG)
+ return request
+
+ def http_request(self, request):
+ return self.capture_connection(request, HTTPConnection)
+
+ def https_request(self, request):
+ return self.capture_connection(request, HTTPSConnection)
+
+
+class AbstractHTTPHandler(urllib2.AbstractHTTPHandler):
+ """A custom handler for HTTP(S) requests.
+
+ We overrive urllib2.AbstractHTTPHandler to get a better
+ control of the connection, the ability to implement new
+ request types and return a response able to cope with
+ persistent connections.
+ """
+
+ # We change our order to be before urllib2 HTTP[S]Handlers
+ # and be chosen instead of them (the first http_open called
+ # wins).
+ handler_order = 400
+
+ _default_headers = {'Pragma': 'no-cache',
+ 'Cache-control': 'max-age=0',
+ 'Connection': 'Keep-Alive',
+ 'User-agent': 'bzr/%s (urllib)' % bzrlib_version,
+ 'Accept': '*/*',
+ }
+
+ def __init__(self):
+ urllib2.AbstractHTTPHandler.__init__(self, debuglevel=DEBUG)
+
+ def http_request(self, request):
+ """Common headers setting"""
+
+ request.headers.update(self._default_headers.copy())
+ # FIXME: We may have to add the Content-Length header if
+ # we have data to send.
+ return request
+
+ def retry_or_raise(self, http_class, request, first_try):
+ """Retry the request (once) or raise the exception.
+
+ urllib2 raises exception of application level kind, we
+ just have to translate them.
+
+ httplib can raise exceptions of transport level (badly
+ formatted dialog, loss of connexion or socket level
+ problems). In that case we should issue the request again
+ (httplib will close and reopen a new connection if
+ needed).
+ """
+ # When an exception occurs, we give back the original
+ # Traceback or the bugs are hard to diagnose.
+ exc_type, exc_val, exc_tb = sys.exc_info()
+ if exc_type == socket.gaierror:
+ # No need to retry, that will not help
+ raise errors.ConnectionError("Couldn't resolve host '%s'"
+ % request.get_origin_req_host(),
+ orig_error=exc_val)
+ elif isinstance(exc_val, httplib.ImproperConnectionState):
+ # The httplib pipeline is in incorrect state, it's a bug in our
+ # implementation.
+ raise exc_type, exc_val, exc_tb
+ else:
+ if first_try:
+ if self._debuglevel >= 2:
+ print 'Received exception: [%r]' % exc_val
+ print ' On connection: [%r]' % request.connection
+ method = request.get_method()
+ url = request.get_full_url()
+ print ' Will retry, %s %r' % (method, url)
+ request.connection.close()
+ response = self.do_open(http_class, request, False)
+ else:
+ if self._debuglevel >= 2:
+ print 'Received second exception: [%r]' % exc_val
+ print ' On connection: [%r]' % request.connection
+ if exc_type in (httplib.BadStatusLine, httplib.UnknownProtocol):
+ # httplib.BadStatusLine and
+ # httplib.UnknownProtocol indicates that a
+ # bogus server was encountered or a bad
+ # connection (i.e. transient errors) is
+ # experimented, we have already retried once
+ # for that request so we raise the exception.
+ my_exception = errors.InvalidHttpResponse(
+ request.get_full_url(),
+ 'Bad status line received',
+ orig_error=exc_val)
+ elif (isinstance(exc_val, socket.error) and len(exc_val.args)
+ and exc_val.args[0] in (errno.ECONNRESET, 10053, 10054)):
+ # 10053 == WSAECONNABORTED
+ # 10054 == WSAECONNRESET
+ raise errors.ConnectionReset(
+ "Connection lost while sending request.")
+ else:
+ # All other exception are considered connection related.
+
+ # socket errors generally occurs for reasons
+ # far outside our scope, so closing the
+ # connection and retrying is the best we can
+ # do.
+
+ my_exception = errors.ConnectionError(
+ msg= 'while sending %s %s:' % (request.get_method(),
+ request.get_selector()),
+ orig_error=exc_val)
+
+ if self._debuglevel >= 2:
+ print 'On connection: [%r]' % request.connection
+ method = request.get_method()
+ url = request.get_full_url()
+ print ' Failed again, %s %r' % (method, url)
+ print ' Will raise: [%r]' % my_exception
+ raise my_exception, None, exc_tb
+ return response
+
+ def do_open(self, http_class, request, first_try=True):
+ """See urllib2.AbstractHTTPHandler.do_open for the general idea.
+
+ The request will be retried once if it fails.
+ """
+ connection = request.connection
+ if connection is None:
+ raise AssertionError(
+ 'Cannot process a request without a connection')
+
+ # Get all the headers
+ headers = {}
+ headers.update(request.header_items())
+ headers.update(request.unredirected_hdrs)
+ # Some servers or proxies will choke on headers not properly
+ # cased. httplib/urllib/urllib2 all use capitalize to get canonical
+ # header names, but only python2.5 urllib2 use title() to fix them just
+ # before sending the request. And not all versions of python 2.5 do
+ # that. Since we replace urllib2.AbstractHTTPHandler.do_open we do it
+ # ourself below.
+ headers = dict((name.title(), val) for name, val in headers.iteritems())
+
+ try:
+ method = request.get_method()
+ url = request.get_selector()
+ connection._send_request(method, url,
+ # FIXME: implements 100-continue
+ #None, # We don't send the body yet
+ request.get_data(),
+ headers)
+ if 'http' in debug.debug_flags:
+ trace.mutter('> %s %s' % (method, url))
+ hdrs = []
+ for k,v in headers.iteritems():
+ # People are often told to paste -Dhttp output to help
+ # debug. Don't compromise credentials.
+ if k in ('Authorization', 'Proxy-Authorization'):
+ v = '<masked>'
+ hdrs.append('%s: %s' % (k, v))
+ trace.mutter('> ' + '\n> '.join(hdrs) + '\n')
+ if self._debuglevel >= 1:
+ print 'Request sent: [%r] from (%s)' \
+ % (request, request.connection.sock.getsockname())
+ response = connection.getresponse()
+ convert_to_addinfourl = True
+ except (ssl.SSLError, errors.CertificateError):
+ # Something is wrong with either the certificate or the hostname,
+ # re-trying won't help
+ raise
+ except (socket.gaierror, httplib.BadStatusLine, httplib.UnknownProtocol,
+ socket.error, httplib.HTTPException):
+ response = self.retry_or_raise(http_class, request, first_try)
+ convert_to_addinfourl = False
+
+# FIXME: HTTPConnection does not fully support 100-continue (the
+# server responses are just ignored)
+
+# if code == 100:
+# mutter('Will send the body')
+# # We can send the body now
+# body = request.get_data()
+# if body is None:
+# raise URLError("No data given")
+# connection.send(body)
+# response = connection.getresponse()
+
+ if self._debuglevel >= 2:
+ print 'Receives response: %r' % response
+ print ' For: %r(%r)' % (request.get_method(),
+ request.get_full_url())
+
+ if convert_to_addinfourl:
+ # Shamelessly copied from urllib2
+ req = request
+ r = response
+ r.recv = r.read
+ fp = socket._fileobject(r, bufsize=65536)
+ resp = addinfourl(fp, r.msg, req.get_full_url())
+ resp.code = r.status
+ resp.msg = r.reason
+ resp.version = r.version
+ if self._debuglevel >= 2:
+ print 'Create addinfourl: %r' % resp
+ print ' For: %r(%r)' % (request.get_method(),
+ request.get_full_url())
+ if 'http' in debug.debug_flags:
+ version = 'HTTP/%d.%d'
+ try:
+ version = version % (resp.version / 10,
+ resp.version % 10)
+ except:
+ version = 'HTTP/%r' % resp.version
+ trace.mutter('< %s %s %s' % (version, resp.code,
+ resp.msg))
+ # Use the raw header lines instead of treating resp.info() as a
+ # dict since we may miss duplicated headers otherwise.
+ hdrs = [h.rstrip('\r\n') for h in resp.info().headers]
+ trace.mutter('< ' + '\n< '.join(hdrs) + '\n')
+ else:
+ resp = response
+ return resp
+
+
+class HTTPHandler(AbstractHTTPHandler):
+ """A custom handler that just thunks into HTTPConnection"""
+
+ def http_open(self, request):
+ return self.do_open(HTTPConnection, request)
+
+
+class HTTPSHandler(AbstractHTTPHandler):
+ """A custom handler that just thunks into HTTPSConnection"""
+
+ https_request = AbstractHTTPHandler.http_request
+
+ def https_open(self, request):
+ connection = request.connection
+ if connection.sock is None and \
+ connection.proxied_host is not None and \
+ request.get_method() != 'CONNECT' : # Don't loop
+ # FIXME: We need a gazillion connection tests here, but we still
+ # miss a https server :-( :
+ # - with and without proxy
+ # - with and without certificate
+ # - with self-signed certificate
+ # - with and without authentication
+ # - with good and bad credentials (especially the proxy auth around
+ # CONNECT)
+ # - with basic and digest schemes
+ # - reconnection on errors
+ # - connection persistence behaviour (including reconnection)
+
+ # We are about to connect for the first time via a proxy, we must
+ # issue a CONNECT request first to establish the encrypted link
+ connect = _ConnectRequest(request)
+ response = self.parent.open(connect)
+ if response.code != 200:
+ raise errors.ConnectionError("Can't connect to %s via proxy %s" % (
+ connect.proxied_host, self.host))
+ # Housekeeping
+ connection.cleanup_pipe()
+ # Establish the connection encryption
+ connection.connect_to_origin()
+ # Propagate the connection to the original request
+ request.connection = connection
+ return self.do_open(HTTPSConnection, request)
+
+class HTTPRedirectHandler(urllib2.HTTPRedirectHandler):
+ """Handles redirect requests.
+
+ We have to implement our own scheme because we use a specific
+ Request object and because we want to implement a specific
+ policy.
+ """
+ _debuglevel = DEBUG
+ # RFC2616 says that only read requests should be redirected
+ # without interacting with the user. But bzr use some
+ # shortcuts to optimize against roundtrips which can leads to
+ # write requests being issued before read requests of
+ # containing dirs can be redirected. So we redirect write
+ # requests in the same way which seems to respect the spirit
+ # of the RFC if not its letter.
+
+ def redirect_request(self, req, fp, code, msg, headers, newurl):
+ """See urllib2.HTTPRedirectHandler.redirect_request"""
+ # We would have preferred to update the request instead
+ # of creating a new one, but the urllib2.Request object
+ # has a too complicated creation process to provide a
+ # simple enough equivalent update process. Instead, when
+ # redirecting, we only update the following request in
+ # the redirect chain with a reference to the parent
+ # request .
+
+ # Some codes make no sense in our context and are treated
+ # as errors:
+
+ # 300: Multiple choices for different representations of
+ # the URI. Using that mechanisn with bzr will violate the
+ # protocol neutrality of Transport.
+
+ # 304: Not modified (SHOULD only occurs with conditional
+ # GETs which are not used by our implementation)
+
+ # 305: Use proxy. I can't imagine this one occurring in
+ # our context-- vila/20060909
+
+ # 306: Unused (if the RFC says so...)
+
+ # If the code is 302 and the request is HEAD, some may
+ # think that it is a sufficent hint that the file exists
+ # and that we MAY avoid following the redirections. But
+ # if we want to be sure, we MUST follow them.
+
+ if code in (301, 302, 303, 307):
+ return Request(req.get_method(),newurl,
+ headers = req.headers,
+ origin_req_host = req.get_origin_req_host(),
+ unverifiable = True,
+ # TODO: It will be nice to be able to
+ # detect virtual hosts sharing the same
+ # IP address, that will allow us to
+ # share the same connection...
+ connection = None,
+ parent = req,
+ )
+ else:
+ raise urllib2.HTTPError(req.get_full_url(), code, msg, headers, fp)
+
+ def http_error_302(self, req, fp, code, msg, headers):
+ """Requests the redirected to URI.
+
+ Copied from urllib2 to be able to clean the pipe of the associated
+ connection, *before* issuing the redirected request but *after* having
+ eventually raised an error.
+ """
+ # Some servers (incorrectly) return multiple Location headers
+ # (so probably same goes for URI). Use first header.
+
+ # TODO: Once we get rid of addinfourl objects, the
+ # following will need to be updated to use correct case
+ # for headers.
+ if 'location' in headers:
+ newurl = headers.getheaders('location')[0]
+ elif 'uri' in headers:
+ newurl = headers.getheaders('uri')[0]
+ else:
+ return
+ if self._debuglevel >= 1:
+ print 'Redirected to: %s (followed: %r)' % (newurl,
+ req.follow_redirections)
+ if req.follow_redirections is False:
+ req.redirected_to = newurl
+ return fp
+
+ newurl = urlparse.urljoin(req.get_full_url(), newurl)
+
+ # This call succeeds or raise an error. urllib2 returns
+ # if redirect_request returns None, but our
+ # redirect_request never returns None.
+ redirected_req = self.redirect_request(req, fp, code, msg, headers,
+ newurl)
+
+ # loop detection
+ # .redirect_dict has a key url if url was previously visited.
+ if hasattr(req, 'redirect_dict'):
+ visited = redirected_req.redirect_dict = req.redirect_dict
+ if (visited.get(newurl, 0) >= self.max_repeats or
+ len(visited) >= self.max_redirections):
+ raise urllib2.HTTPError(req.get_full_url(), code,
+ self.inf_msg + msg, headers, fp)
+ else:
+ visited = redirected_req.redirect_dict = req.redirect_dict = {}
+ visited[newurl] = visited.get(newurl, 0) + 1
+
+ # We can close the fp now that we are sure that we won't
+ # use it with HTTPError.
+ fp.close()
+ # We have all we need already in the response
+ req.connection.cleanup_pipe()
+
+ return self.parent.open(redirected_req)
+
+ http_error_301 = http_error_303 = http_error_307 = http_error_302
+
+
+class ProxyHandler(urllib2.ProxyHandler):
+ """Handles proxy setting.
+
+ Copied and modified from urllib2 to be able to modify the request during
+ the request pre-processing instead of modifying it at _open time. As we
+ capture (or create) the connection object during request processing, _open
+ time was too late.
+
+ The main task is to modify the request so that the connection is done to
+ the proxy while the request still refers to the destination host.
+
+ Note: the proxy handling *may* modify the protocol used; the request may be
+ against an https server proxied through an http proxy. So, https_request
+ will be called, but later it's really http_open that will be called. This
+ explains why we don't have to call self.parent.open as the urllib2 did.
+ """
+
+ # Proxies must be in front
+ handler_order = 100
+ _debuglevel = DEBUG
+
+ def __init__(self, proxies=None):
+ urllib2.ProxyHandler.__init__(self, proxies)
+ # First, let's get rid of urllib2 implementation
+ for type, proxy in self.proxies.items():
+ if self._debuglevel >= 3:
+ print 'Will unbind %s_open for %r' % (type, proxy)
+ delattr(self, '%s_open' % type)
+
+ def bind_scheme_request(proxy, scheme):
+ if proxy is None:
+ return
+ scheme_request = scheme + '_request'
+ if self._debuglevel >= 3:
+ print 'Will bind %s for %r' % (scheme_request, proxy)
+ setattr(self, scheme_request,
+ lambda request: self.set_proxy(request, scheme))
+ # We are interested only by the http[s] proxies
+ http_proxy = self.get_proxy_env_var('http')
+ bind_scheme_request(http_proxy, 'http')
+ https_proxy = self.get_proxy_env_var('https')
+ bind_scheme_request(https_proxy, 'https')
+
+ def get_proxy_env_var(self, name, default_to='all'):
+ """Get a proxy env var.
+
+ Note that we indirectly rely on
+ urllib.getproxies_environment taking into account the
+ uppercased values for proxy variables.
+ """
+ try:
+ return self.proxies[name.lower()]
+ except KeyError:
+ if default_to is not None:
+ # Try to get the alternate environment variable
+ try:
+ return self.proxies[default_to]
+ except KeyError:
+ pass
+ return None
+
+ def proxy_bypass(self, host):
+ """Check if host should be proxied or not.
+
+ :returns: True to skip the proxy, False otherwise.
+ """
+ no_proxy = self.get_proxy_env_var('no', default_to=None)
+ bypass = self.evaluate_proxy_bypass(host, no_proxy)
+ if bypass is None:
+ # Nevertheless, there are platform-specific ways to
+ # ignore proxies...
+ return urllib.proxy_bypass(host)
+ else:
+ return bypass
+
+ def evaluate_proxy_bypass(self, host, no_proxy):
+ """Check the host against a comma-separated no_proxy list as a string.
+
+ :param host: ``host:port`` being requested
+
+ :param no_proxy: comma-separated list of hosts to access directly.
+
+ :returns: True to skip the proxy, False not to, or None to
+ leave it to urllib.
+ """
+ if no_proxy is None:
+ # All hosts are proxied
+ return False
+ hhost, hport = urllib.splitport(host)
+ # Does host match any of the domains mentioned in
+ # no_proxy ? The rules about what is authorized in no_proxy
+ # are fuzzy (to say the least). We try to allow most
+ # commonly seen values.
+ for domain in no_proxy.split(','):
+ domain = domain.strip()
+ if domain == '':
+ continue
+ dhost, dport = urllib.splitport(domain)
+ if hport == dport or dport is None:
+ # Protect glob chars
+ dhost = dhost.replace(".", r"\.")
+ dhost = dhost.replace("*", r".*")
+ dhost = dhost.replace("?", r".")
+ if re.match(dhost, hhost, re.IGNORECASE):
+ return True
+ # Nothing explicitly avoid the host
+ return None
+
+ def set_proxy(self, request, type):
+ if self.proxy_bypass(request.get_host()):
+ return request
+
+ proxy = self.get_proxy_env_var(type)
+ if self._debuglevel >= 3:
+ print 'set_proxy %s_request for %r' % (type, proxy)
+ # FIXME: python 2.5 urlparse provides a better _parse_proxy which can
+ # grok user:password@host:port as well as
+ # http://user:password@host:port
+
+ parsed_url = transport.ConnectedTransport._split_url(proxy)
+ if not parsed_url.host:
+ raise errors.InvalidURL(proxy, 'No host component')
+
+ if request.proxy_auth == {}:
+ # No proxy auth parameter are available, we are handling the first
+ # proxied request, intialize. scheme (the authentication scheme)
+ # and realm will be set by the AuthHandler
+ request.proxy_auth = {
+ 'host': parsed_url.host,
+ 'port': parsed_url.port,
+ 'user': parsed_url.user,
+ 'password': parsed_url.password,
+ 'protocol': parsed_url.scheme,
+ # We ignore path since we connect to a proxy
+ 'path': None}
+ if parsed_url.port is None:
+ phost = parsed_url.host
+ else:
+ phost = parsed_url.host + ':%d' % parsed_url.port
+ request.set_proxy(phost, type)
+ if self._debuglevel >= 3:
+ print 'set_proxy: proxy set to %s://%s' % (type, phost)
+ return request
+
+
+class AbstractAuthHandler(urllib2.BaseHandler):
+ """A custom abstract authentication handler for all http authentications.
+
+ Provides the meat to handle authentication errors and
+ preventively set authentication headers after the first
+ successful authentication.
+
+ This can be used for http and proxy, as well as for basic, negotiate and
+ digest authentications.
+
+ This provides an unified interface for all authentication handlers
+ (urllib2 provides far too many with different policies).
+
+ The interaction between this handler and the urllib2
+ framework is not obvious, it works as follow:
+
+ opener.open(request) is called:
+
+ - that may trigger http_request which will add an authentication header
+ (self.build_header) if enough info is available.
+
+ - the request is sent to the server,
+
+ - if an authentication error is received self.auth_required is called,
+ we acquire the authentication info in the error headers and call
+ self.auth_match to check that we are able to try the
+ authentication and complete the authentication parameters,
+
+ - we call parent.open(request), that may trigger http_request
+ and will add a header (self.build_header), but here we have
+ all the required info (keep in mind that the request and
+ authentication used in the recursive calls are really (and must be)
+ the *same* objects).
+
+ - if the call returns a response, the authentication have been
+ successful and the request authentication parameters have been updated.
+ """
+
+ scheme = None
+ """The scheme as it appears in the server header (lower cased)"""
+
+ _max_retry = 3
+ """We don't want to retry authenticating endlessly"""
+
+ requires_username = True
+ """Whether the auth mechanism requires a username."""
+
+ # The following attributes should be defined by daughter
+ # classes:
+ # - auth_required_header: the header received from the server
+ # - auth_header: the header sent in the request
+
+ def __init__(self):
+ # We want to know when we enter into an try/fail cycle of
+ # authentications so we initialize to None to indicate that we aren't
+ # in such a cycle by default.
+ self._retry_count = None
+
+ def _parse_auth_header(self, server_header):
+ """Parse the authentication header.
+
+ :param server_header: The value of the header sent by the server
+ describing the authenticaion request.
+
+ :return: A tuple (scheme, remainder) scheme being the first word in the
+ given header (lower cased), remainder may be None.
+ """
+ try:
+ scheme, remainder = server_header.split(None, 1)
+ except ValueError:
+ scheme = server_header
+ remainder = None
+ return (scheme.lower(), remainder)
+
+ def update_auth(self, auth, key, value):
+ """Update a value in auth marking the auth as modified if needed"""
+ old_value = auth.get(key, None)
+ if old_value != value:
+ auth[key] = value
+ auth['modified'] = True
+
+ def auth_required(self, request, headers):
+ """Retry the request if the auth scheme is ours.
+
+ :param request: The request needing authentication.
+ :param headers: The headers for the authentication error response.
+ :return: None or the response for the authenticated request.
+ """
+ # Don't try to authenticate endlessly
+ if self._retry_count is None:
+ # The retry being recusrsive calls, None identify the first retry
+ self._retry_count = 1
+ else:
+ self._retry_count += 1
+ if self._retry_count > self._max_retry:
+ # Let's be ready for next round
+ self._retry_count = None
+ return None
+ server_headers = headers.getheaders(self.auth_required_header)
+ if not server_headers:
+ # The http error MUST have the associated
+ # header. This must never happen in production code.
+ raise KeyError('%s not found' % self.auth_required_header)
+
+ auth = self.get_auth(request)
+ auth['modified'] = False
+ # Put some common info in auth if the caller didn't
+ if auth.get('path', None) is None:
+ parsed_url = urlutils.URL.from_string(request.get_full_url())
+ self.update_auth(auth, 'protocol', parsed_url.scheme)
+ self.update_auth(auth, 'host', parsed_url.host)
+ self.update_auth(auth, 'port', parsed_url.port)
+ self.update_auth(auth, 'path', parsed_url.path)
+ # FIXME: the auth handler should be selected at a single place instead
+ # of letting all handlers try to match all headers, but the current
+ # design doesn't allow a simple implementation.
+ for server_header in server_headers:
+ # Several schemes can be proposed by the server, try to match each
+ # one in turn
+ matching_handler = self.auth_match(server_header, auth)
+ if matching_handler:
+ # auth_match may have modified auth (by adding the
+ # password or changing the realm, for example)
+ if (request.get_header(self.auth_header, None) is not None
+ and not auth['modified']):
+ # We already tried that, give up
+ return None
+
+ # Only the most secure scheme proposed by the server should be
+ # used, since the handlers use 'handler_order' to describe that
+ # property, the first handler tried takes precedence, the
+ # others should not attempt to authenticate if the best one
+ # failed.
+ best_scheme = auth.get('best_scheme', None)
+ if best_scheme is None:
+ # At that point, if current handler should doesn't succeed
+ # the credentials are wrong (or incomplete), but we know
+ # that the associated scheme should be used.
+ best_scheme = auth['best_scheme'] = self.scheme
+ if best_scheme != self.scheme:
+ continue
+
+ if self.requires_username and auth.get('user', None) is None:
+ # Without a known user, we can't authenticate
+ return None
+
+ # Housekeeping
+ request.connection.cleanup_pipe()
+ # Retry the request with an authentication header added
+ response = self.parent.open(request)
+ if response:
+ self.auth_successful(request, response)
+ return response
+ # We are not qualified to handle the authentication.
+ # Note: the authentication error handling will try all
+ # available handlers. If one of them authenticates
+ # successfully, a response will be returned. If none of
+ # them succeeds, None will be returned and the error
+ # handler will raise the 401 'Unauthorized' or the 407
+ # 'Proxy Authentication Required' error.
+ return None
+
+ def add_auth_header(self, request, header):
+ """Add the authentication header to the request"""
+ request.add_unredirected_header(self.auth_header, header)
+
+ def auth_match(self, header, auth):
+ """Check that we are able to handle that authentication scheme.
+
+ The request authentication parameters may need to be
+ updated with info from the server. Some of these
+ parameters, when combined, are considered to be the
+ authentication key, if one of them change the
+ authentication result may change. 'user' and 'password'
+ are exampls, but some auth schemes may have others
+ (digest's nonce is an example, digest's nonce_count is a
+ *counter-example*). Such parameters must be updated by
+ using the update_auth() method.
+
+ :param header: The authentication header sent by the server.
+ :param auth: The auth parameters already known. They may be
+ updated.
+ :returns: True if we can try to handle the authentication.
+ """
+ raise NotImplementedError(self.auth_match)
+
+ def build_auth_header(self, auth, request):
+ """Build the value of the header used to authenticate.
+
+ :param auth: The auth parameters needed to build the header.
+ :param request: The request needing authentication.
+
+ :return: None or header.
+ """
+ raise NotImplementedError(self.build_auth_header)
+
+ def auth_successful(self, request, response):
+ """The authentification was successful for the request.
+
+ Additional infos may be available in the response.
+
+ :param request: The succesfully authenticated request.
+ :param response: The server response (may contain auth info).
+ """
+ # It may happen that we need to reconnect later, let's be ready
+ self._retry_count = None
+
+ def get_user_password(self, auth):
+ """Ask user for a password if none is already available.
+
+ :param auth: authentication info gathered so far (from the initial url
+ and then during dialog with the server).
+ """
+ auth_conf = config.AuthenticationConfig()
+ user = auth.get('user', None)
+ password = auth.get('password', None)
+ realm = auth['realm']
+ port = auth.get('port', None)
+
+ if user is None:
+ user = auth_conf.get_user(auth['protocol'], auth['host'],
+ port=port, path=auth['path'],
+ realm=realm, ask=True,
+ prompt=self.build_username_prompt(auth))
+ if user is not None and password is None:
+ password = auth_conf.get_password(
+ auth['protocol'], auth['host'], user,
+ port=port,
+ path=auth['path'], realm=realm,
+ prompt=self.build_password_prompt(auth))
+
+ return user, password
+
+ def _build_password_prompt(self, auth):
+ """Build a prompt taking the protocol used into account.
+
+ The AuthHandler is used by http and https, we want that information in
+ the prompt, so we build the prompt from the authentication dict which
+ contains all the needed parts.
+
+ Also, http and proxy AuthHandlers present different prompts to the
+ user. The daughter classes should implements a public
+ build_password_prompt using this method.
+ """
+ prompt = u'%s' % auth['protocol'].upper() + u' %(user)s@%(host)s'
+ realm = auth['realm']
+ if realm is not None:
+ prompt += u", Realm: '%s'" % realm.decode('utf8')
+ prompt += u' password'
+ return prompt
+
+ def _build_username_prompt(self, auth):
+ """Build a prompt taking the protocol used into account.
+
+ The AuthHandler is used by http and https, we want that information in
+ the prompt, so we build the prompt from the authentication dict which
+ contains all the needed parts.
+
+ Also, http and proxy AuthHandlers present different prompts to the
+ user. The daughter classes should implements a public
+ build_username_prompt using this method.
+ """
+ prompt = u'%s' % auth['protocol'].upper() + u' %(host)s'
+ realm = auth['realm']
+ if realm is not None:
+ prompt += u", Realm: '%s'" % realm.decode('utf8')
+ prompt += u' username'
+ return prompt
+
+ def http_request(self, request):
+ """Insert an authentication header if information is available"""
+ auth = self.get_auth(request)
+ if self.auth_params_reusable(auth):
+ self.add_auth_header(request, self.build_auth_header(auth, request))
+ return request
+
+ https_request = http_request # FIXME: Need test
+
+
+class NegotiateAuthHandler(AbstractAuthHandler):
+ """A authentication handler that handles WWW-Authenticate: Negotiate.
+
+ At the moment this handler supports just Kerberos. In the future,
+ NTLM support may also be added.
+ """
+
+ scheme = 'negotiate'
+ handler_order = 480
+ requires_username = False
+
+ def auth_match(self, header, auth):
+ scheme, raw_auth = self._parse_auth_header(header)
+ if scheme != self.scheme:
+ return False
+ self.update_auth(auth, 'scheme', scheme)
+ resp = self._auth_match_kerberos(auth)
+ if resp is None:
+ return False
+ # Optionally should try to authenticate using NTLM here
+ self.update_auth(auth, 'negotiate_response', resp)
+ return True
+
+ def _auth_match_kerberos(self, auth):
+ """Try to create a GSSAPI response for authenticating against a host."""
+ global kerberos, checked_kerberos
+ if kerberos is None and not checked_kerberos:
+ try:
+ import kerberos
+ except ImportError:
+ kerberos = None
+ checked_kerberos = True
+ if kerberos is None:
+ return None
+ ret, vc = kerberos.authGSSClientInit("HTTP@%(host)s" % auth)
+ if ret < 1:
+ trace.warning('Unable to create GSSAPI context for %s: %d',
+ auth['host'], ret)
+ return None
+ ret = kerberos.authGSSClientStep(vc, "")
+ if ret < 0:
+ trace.mutter('authGSSClientStep failed: %d', ret)
+ return None
+ return kerberos.authGSSClientResponse(vc)
+
+ def build_auth_header(self, auth, request):
+ return "Negotiate %s" % auth['negotiate_response']
+
+ def auth_params_reusable(self, auth):
+ # If the auth scheme is known, it means a previous
+ # authentication was successful, all information is
+ # available, no further checks are needed.
+ return (auth.get('scheme', None) == 'negotiate' and
+ auth.get('negotiate_response', None) is not None)
+
+
+class BasicAuthHandler(AbstractAuthHandler):
+ """A custom basic authentication handler."""
+
+ scheme = 'basic'
+ handler_order = 500
+ auth_regexp = re.compile('realm="([^"]*)"', re.I)
+
+ def build_auth_header(self, auth, request):
+ raw = '%s:%s' % (auth['user'], auth['password'])
+ auth_header = 'Basic ' + raw.encode('base64').strip()
+ return auth_header
+
+ def extract_realm(self, header_value):
+ match = self.auth_regexp.search(header_value)
+ realm = None
+ if match:
+ realm = match.group(1)
+ return match, realm
+
+ def auth_match(self, header, auth):
+ scheme, raw_auth = self._parse_auth_header(header)
+ if scheme != self.scheme:
+ return False
+
+ match, realm = self.extract_realm(raw_auth)
+ if match:
+ # Put useful info into auth
+ self.update_auth(auth, 'scheme', scheme)
+ self.update_auth(auth, 'realm', realm)
+ if (auth.get('user', None) is None
+ or auth.get('password', None) is None):
+ user, password = self.get_user_password(auth)
+ self.update_auth(auth, 'user', user)
+ self.update_auth(auth, 'password', password)
+ return match is not None
+
+ def auth_params_reusable(self, auth):
+ # If the auth scheme is known, it means a previous
+ # authentication was successful, all information is
+ # available, no further checks are needed.
+ return auth.get('scheme', None) == 'basic'
+
+
+def get_digest_algorithm_impls(algorithm):
+ H = None
+ KD = None
+ if algorithm == 'MD5':
+ H = lambda x: osutils.md5(x).hexdigest()
+ elif algorithm == 'SHA':
+ H = osutils.sha_string
+ if H is not None:
+ KD = lambda secret, data: H("%s:%s" % (secret, data))
+ return H, KD
+
+
+def get_new_cnonce(nonce, nonce_count):
+ raw = '%s:%d:%s:%s' % (nonce, nonce_count, time.ctime(),
+ urllib2.randombytes(8))
+ return osutils.sha_string(raw)[:16]
+
+
+class DigestAuthHandler(AbstractAuthHandler):
+ """A custom digest authentication handler."""
+
+ scheme = 'digest'
+ # Before basic as digest is a bit more secure and should be preferred
+ handler_order = 490
+
+ def auth_params_reusable(self, auth):
+ # If the auth scheme is known, it means a previous
+ # authentication was successful, all information is
+ # available, no further checks are needed.
+ return auth.get('scheme', None) == 'digest'
+
+ def auth_match(self, header, auth):
+ scheme, raw_auth = self._parse_auth_header(header)
+ if scheme != self.scheme:
+ return False
+
+ # Put the requested authentication info into a dict
+ req_auth = urllib2.parse_keqv_list(urllib2.parse_http_list(raw_auth))
+
+ # Check that we can handle that authentication
+ qop = req_auth.get('qop', None)
+ if qop != 'auth': # No auth-int so far
+ return False
+
+ H, KD = get_digest_algorithm_impls(req_auth.get('algorithm', 'MD5'))
+ if H is None:
+ return False
+
+ realm = req_auth.get('realm', None)
+ # Put useful info into auth
+ self.update_auth(auth, 'scheme', scheme)
+ self.update_auth(auth, 'realm', realm)
+ if auth.get('user', None) is None or auth.get('password', None) is None:
+ user, password = self.get_user_password(auth)
+ self.update_auth(auth, 'user', user)
+ self.update_auth(auth, 'password', password)
+
+ try:
+ if req_auth.get('algorithm', None) is not None:
+ self.update_auth(auth, 'algorithm', req_auth.get('algorithm'))
+ nonce = req_auth['nonce']
+ if auth.get('nonce', None) != nonce:
+ # A new nonce, never used
+ self.update_auth(auth, 'nonce_count', 0)
+ self.update_auth(auth, 'nonce', nonce)
+ self.update_auth(auth, 'qop', qop)
+ auth['opaque'] = req_auth.get('opaque', None)
+ except KeyError:
+ # Some required field is not there
+ return False
+
+ return True
+
+ def build_auth_header(self, auth, request):
+ url_scheme, url_selector = urllib.splittype(request.get_selector())
+ sel_host, uri = urllib.splithost(url_selector)
+
+ A1 = '%s:%s:%s' % (auth['user'], auth['realm'], auth['password'])
+ A2 = '%s:%s' % (request.get_method(), uri)
+
+ nonce = auth['nonce']
+ qop = auth['qop']
+
+ nonce_count = auth['nonce_count'] + 1
+ ncvalue = '%08x' % nonce_count
+ cnonce = get_new_cnonce(nonce, nonce_count)
+
+ H, KD = get_digest_algorithm_impls(auth.get('algorithm', 'MD5'))
+ nonce_data = '%s:%s:%s:%s:%s' % (nonce, ncvalue, cnonce, qop, H(A2))
+ request_digest = KD(H(A1), nonce_data)
+
+ header = 'Digest '
+ header += 'username="%s", realm="%s", nonce="%s"' % (auth['user'],
+ auth['realm'],
+ nonce)
+ header += ', uri="%s"' % uri
+ header += ', cnonce="%s", nc=%s' % (cnonce, ncvalue)
+ header += ', qop="%s"' % qop
+ header += ', response="%s"' % request_digest
+ # Append the optional fields
+ opaque = auth.get('opaque', None)
+ if opaque:
+ header += ', opaque="%s"' % opaque
+ if auth.get('algorithm', None):
+ header += ', algorithm="%s"' % auth.get('algorithm')
+
+ # We have used the nonce once more, update the count
+ auth['nonce_count'] = nonce_count
+
+ return header
+
+
+class HTTPAuthHandler(AbstractAuthHandler):
+ """Custom http authentication handler.
+
+ Send the authentication preventively to avoid the roundtrip
+ associated with the 401 error and keep the revelant info in
+ the auth request attribute.
+ """
+
+ auth_required_header = 'www-authenticate'
+ auth_header = 'Authorization'
+
+ def get_auth(self, request):
+ """Get the auth params from the request"""
+ return request.auth
+
+ def set_auth(self, request, auth):
+ """Set the auth params for the request"""
+ request.auth = auth
+
+ def build_password_prompt(self, auth):
+ return self._build_password_prompt(auth)
+
+ def build_username_prompt(self, auth):
+ return self._build_username_prompt(auth)
+
+ def http_error_401(self, req, fp, code, msg, headers):
+ return self.auth_required(req, headers)
+
+
+class ProxyAuthHandler(AbstractAuthHandler):
+ """Custom proxy authentication handler.
+
+ Send the authentication preventively to avoid the roundtrip
+ associated with the 407 error and keep the revelant info in
+ the proxy_auth request attribute..
+ """
+
+ auth_required_header = 'proxy-authenticate'
+ # FIXME: the correct capitalization is Proxy-Authorization,
+ # but python-2.4 urllib2.Request insist on using capitalize()
+ # instead of title().
+ auth_header = 'Proxy-authorization'
+
+ def get_auth(self, request):
+ """Get the auth params from the request"""
+ return request.proxy_auth
+
+ def set_auth(self, request, auth):
+ """Set the auth params for the request"""
+ request.proxy_auth = auth
+
+ def build_password_prompt(self, auth):
+ prompt = self._build_password_prompt(auth)
+ prompt = u'Proxy ' + prompt
+ return prompt
+
+ def build_username_prompt(self, auth):
+ prompt = self._build_username_prompt(auth)
+ prompt = u'Proxy ' + prompt
+ return prompt
+
+ def http_error_407(self, req, fp, code, msg, headers):
+ return self.auth_required(req, headers)
+
+
+class HTTPBasicAuthHandler(BasicAuthHandler, HTTPAuthHandler):
+ """Custom http basic authentication handler"""
+
+
+class ProxyBasicAuthHandler(BasicAuthHandler, ProxyAuthHandler):
+ """Custom proxy basic authentication handler"""
+
+
+class HTTPDigestAuthHandler(DigestAuthHandler, HTTPAuthHandler):
+ """Custom http basic authentication handler"""
+
+
+class ProxyDigestAuthHandler(DigestAuthHandler, ProxyAuthHandler):
+ """Custom proxy basic authentication handler"""
+
+
+class HTTPNegotiateAuthHandler(NegotiateAuthHandler, HTTPAuthHandler):
+ """Custom http negotiate authentication handler"""
+
+
+class ProxyNegotiateAuthHandler(NegotiateAuthHandler, ProxyAuthHandler):
+ """Custom proxy negotiate authentication handler"""
+
+
+class HTTPErrorProcessor(urllib2.HTTPErrorProcessor):
+ """Process HTTP error responses.
+
+ We don't really process the errors, quite the contrary
+ instead, we leave our Transport handle them.
+ """
+
+ accepted_errors = [200, # Ok
+ 206, # Partial content
+ 404, # Not found
+ ]
+ """The error codes the caller will handle.
+
+ This can be specialized in the request on a case-by case basis, but the
+ common cases are covered here.
+ """
+
+ def http_response(self, request, response):
+ code, msg, hdrs = response.code, response.msg, response.info()
+
+ accepted_errors = request.accepted_errors
+ if accepted_errors is None:
+ accepted_errors = self.accepted_errors
+
+ if code not in accepted_errors:
+ response = self.parent.error('http', request, response,
+ code, msg, hdrs)
+ return response
+
+ https_response = http_response
+
+
+class HTTPDefaultErrorHandler(urllib2.HTTPDefaultErrorHandler):
+ """Translate common errors into bzr Exceptions"""
+
+ def http_error_default(self, req, fp, code, msg, hdrs):
+ if code == 403:
+ raise errors.TransportError(
+ 'Server refuses to fulfill the request (403 Forbidden)'
+ ' for %s' % req.get_full_url())
+ else:
+ raise errors.InvalidHttpResponse(req.get_full_url(),
+ 'Unable to handle http code %d: %s'
+ % (code, msg))
+
+
+class Opener(object):
+ """A wrapper around urllib2.build_opener
+
+ Daughter classes can override to build their own specific opener
+ """
+ # TODO: Provides hooks for daughter classes.
+
+ def __init__(self,
+ connection=ConnectionHandler,
+ redirect=HTTPRedirectHandler,
+ error=HTTPErrorProcessor,
+ report_activity=None,
+ ca_certs=None):
+ self._opener = urllib2.build_opener(
+ connection(report_activity=report_activity, ca_certs=ca_certs),
+ redirect, error,
+ ProxyHandler(),
+ HTTPBasicAuthHandler(),
+ HTTPDigestAuthHandler(),
+ HTTPNegotiateAuthHandler(),
+ ProxyBasicAuthHandler(),
+ ProxyDigestAuthHandler(),
+ ProxyNegotiateAuthHandler(),
+ HTTPHandler,
+ HTTPSHandler,
+ HTTPDefaultErrorHandler,
+ )
+
+ self.open = self._opener.open
+ if DEBUG >= 9:
+ # When dealing with handler order, it's easy to mess
+ # things up, the following will help understand which
+ # handler is used, when and for what.
+ import pprint
+ pprint.pprint(self._opener.__dict__)
diff --git a/bzrlib/transport/http/ca_bundle.py b/bzrlib/transport/http/ca_bundle.py
new file mode 100644
index 0000000..900633f
--- /dev/null
+++ b/bzrlib/transport/http/ca_bundle.py
@@ -0,0 +1,79 @@
+# Copyright (C) 2007 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""Auto-detect of CA bundle for SSL connections"""
+
+from __future__ import absolute_import
+
+import os
+import sys
+from bzrlib.trace import mutter
+
+
+_ca_path = None
+
+
+def get_ca_path(use_cache=True):
+ """Return location of CA bundle"""
+ global _ca_path
+
+ if _ca_path is not None and use_cache:
+ return _ca_path
+
+ # Find CA bundle for SSL
+ # Reimplementation in Python the magic of curl command line tool
+ # from "Details on Server SSL Certificates"
+ # http://curl.haxx.se/docs/sslcerts.html
+ #
+ # 4. If you're using the curl command line tool, you can specify your own
+ # CA cert path by setting the environment variable CURL_CA_BUNDLE to the
+ # path of your choice.
+ #
+ # If you're using the curl command line tool on Windows, curl will
+ # search for a CA cert file named "curl-ca-bundle.crt" in these
+ # directories and in this order:
+ # 1. application's directory
+ # 2. current working directory
+ # 3. Windows System directory (e.g. C:\windows\system32)
+ # 4. Windows Directory (e.g. C:\windows)
+ # 5. all directories along %PATH%
+ #
+ # NOTES:
+ # bialix: Windows directories usually listed in PATH env variable
+ # j-a-meinel: bzr should not look in current working dir
+
+ path = os.environ.get('CURL_CA_BUNDLE')
+ if not path and sys.platform == 'win32':
+ dirs = [os.path.realpath(os.path.dirname(sys.argv[0]))] # app dir
+ paths = os.environ.get('PATH')
+ if paths:
+ # don't include the cwd in the search
+ paths = [i for i in paths.split(os.pathsep) if i not in ('', '.')]
+ dirs.extend(paths)
+ for d in dirs:
+ fname = os.path.join(d, "curl-ca-bundle.crt")
+ if os.path.isfile(fname):
+ path = fname
+ break
+ if path:
+ mutter('using CA bundle: %r', path)
+ else:
+ path = ''
+
+ if use_cache:
+ _ca_path = path
+
+ return path
diff --git a/bzrlib/transport/http/response.py b/bzrlib/transport/http/response.py
new file mode 100644
index 0000000..7fc657d
--- /dev/null
+++ b/bzrlib/transport/http/response.py
@@ -0,0 +1,386 @@
+# Copyright (C) 2006-2011 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""Handlers for HTTP Responses.
+
+The purpose of these classes is to provide a uniform interface for clients
+to standard HTTP responses, single range responses and multipart range
+responses.
+"""
+
+from __future__ import absolute_import
+
+import os
+import httplib
+from cStringIO import StringIO
+import rfc822
+
+from bzrlib import (
+ errors,
+ osutils,
+ )
+
+
+class ResponseFile(object):
+ """A wrapper around the http socket containing the result of a GET request.
+
+ Only read() and seek() (forward) are supported.
+ """
+ def __init__(self, path, infile):
+ """Constructor.
+
+ :param path: File url, for error reports.
+
+ :param infile: File-like socket set at body start.
+ """
+ self._path = path
+ self._file = infile
+ self._pos = 0
+
+ def close(self):
+ """Close this file.
+
+ Dummy implementation for consistency with the 'file' API.
+ """
+
+ def read(self, size=-1):
+ """Read size bytes from the current position in the file.
+
+ :param size: The number of bytes to read. Leave unspecified or pass
+ -1 to read to EOF.
+ """
+ data = self._file.read(size)
+ self._pos += len(data)
+ return data
+
+ def readline(self):
+ data = self._file.readline()
+ self._pos += len(data)
+ return data
+
+ def tell(self):
+ return self._pos
+
+ def seek(self, offset, whence=os.SEEK_SET):
+ if whence == os.SEEK_SET:
+ if offset < self._pos:
+ raise AssertionError(
+ "Can't seek backwards, pos: %s, offset: %s"
+ % (self._pos, offset))
+ to_discard = offset - self._pos
+ elif whence == os.SEEK_CUR:
+ to_discard = offset
+ else:
+ raise AssertionError("Can't seek backwards")
+ if to_discard:
+ # Just discard the unwanted bytes
+ self.read(to_discard)
+
+# A RangeFile expects the following grammar (simplified to outline the
+# assumptions we rely upon).
+
+# file: single_range
+# | multiple_range
+
+# single_range: content_range_header data
+
+# multiple_range: boundary_header boundary (content_range_header data boundary)+
+
+class RangeFile(ResponseFile):
+ """File-like object that allow access to partial available data.
+
+ All accesses should happen sequentially since the acquisition occurs during
+ an http response reception (as sockets can't be seeked, we simulate the
+ seek by just reading and discarding the data).
+
+ The access pattern is defined by a set of ranges discovered as reading
+ progress. Only one range is available at a given time, so all accesses
+ should happen with monotonically increasing offsets.
+ """
+
+ # in _checked_read() below, we may have to discard several MB in the worst
+ # case. To avoid buffering that much, we read and discard by chunks
+ # instead. The underlying file is either a socket or a StringIO, so reading
+ # 8k chunks should be fine.
+ _discarded_buf_size = 8192
+
+ # maximum size of read requests -- used to avoid MemoryError issues in recv
+ _max_read_size = 512 * 1024
+
+ def __init__(self, path, infile):
+ """Constructor.
+
+ :param path: File url, for error reports.
+
+ :param infile: File-like socket set at body start.
+ """
+ super(RangeFile, self).__init__(path, infile)
+ self._boundary = None
+ # When using multi parts response, this will be set with the headers
+ # associated with the range currently read.
+ self._headers = None
+ # Default to the whole file of unspecified size
+ self.set_range(0, -1)
+
+ def set_range(self, start, size):
+ """Change the range mapping"""
+ self._start = start
+ self._size = size
+ # Set the new _pos since that's what we want to expose
+ self._pos = self._start
+
+ def set_boundary(self, boundary):
+ """Define the boundary used in a multi parts message.
+
+ The file should be at the beginning of the body, the first range
+ definition is read and taken into account.
+ """
+ self._boundary = boundary
+ # Decode the headers and setup the first range
+ self.read_boundary()
+ self.read_range_definition()
+
+ def read_boundary(self):
+ """Read the boundary headers defining a new range"""
+ boundary_line = '\r\n'
+ while boundary_line == '\r\n':
+ # RFC2616 19.2 Additional CRLFs may precede the first boundary
+ # string entity.
+ # To be on the safe side we allow it before any boundary line
+ boundary_line = self._file.readline()
+
+ if boundary_line == '':
+ # A timeout in the proxy server caused the response to end early.
+ # See launchpad bug 198646.
+ raise errors.HttpBoundaryMissing(
+ self._path,
+ self._boundary)
+
+ if boundary_line != '--' + self._boundary + '\r\n':
+ # rfc822.unquote() incorrectly unquotes strings enclosed in <>
+ # IIS 6 and 7 incorrectly wrap boundary strings in <>
+ # together they make a beautiful bug, which we will be gracious
+ # about here
+ if (self._unquote_boundary(boundary_line) !=
+ '--' + self._boundary + '\r\n'):
+ raise errors.InvalidHttpResponse(
+ self._path,
+ "Expected a boundary (%s) line, got '%s'"
+ % (self._boundary, boundary_line))
+
+ def _unquote_boundary(self, b):
+ return b[:2] + rfc822.unquote(b[2:-2]) + b[-2:]
+
+ def read_range_definition(self):
+ """Read a new range definition in a multi parts message.
+
+ Parse the headers including the empty line following them so that we
+ are ready to read the data itself.
+ """
+ self._headers = httplib.HTTPMessage(self._file, seekable=0)
+ # Extract the range definition
+ content_range = self._headers.getheader('content-range', None)
+ if content_range is None:
+ raise errors.InvalidHttpResponse(
+ self._path,
+ 'Content-Range header missing in a multi-part response')
+ self.set_range_from_header(content_range)
+
+ def set_range_from_header(self, content_range):
+ """Helper to set the new range from its description in the headers"""
+ try:
+ rtype, values = content_range.split()
+ except ValueError:
+ raise errors.InvalidHttpRange(self._path, content_range,
+ 'Malformed header')
+ if rtype != 'bytes':
+ raise errors.InvalidHttpRange(self._path, content_range,
+ "Unsupported range type '%s'" % rtype)
+ try:
+ # We don't need total, but note that it may be either the file size
+ # or '*' if the server can't or doesn't want to return the file
+ # size.
+ start_end, total = values.split('/')
+ start, end = start_end.split('-')
+ start = int(start)
+ end = int(end)
+ except ValueError:
+ raise errors.InvalidHttpRange(self._path, content_range,
+ 'Invalid range values')
+ size = end - start + 1
+ if size <= 0:
+ raise errors.InvalidHttpRange(self._path, content_range,
+ 'Invalid range, size <= 0')
+ self.set_range(start, size)
+
+ def _checked_read(self, size):
+ """Read the file checking for short reads.
+
+ The data read is discarded along the way.
+ """
+ pos = self._pos
+ remaining = size
+ while remaining > 0:
+ data = self._file.read(min(remaining, self._discarded_buf_size))
+ remaining -= len(data)
+ if not data:
+ raise errors.ShortReadvError(self._path, pos, size,
+ size - remaining)
+ self._pos += size
+
+ def _seek_to_next_range(self):
+ # We will cross range boundaries
+ if self._boundary is None:
+ # If we don't have a boundary, we can't find another range
+ raise errors.InvalidRange(self._path, self._pos,
+ "Range (%s, %s) exhausted"
+ % (self._start, self._size))
+ self.read_boundary()
+ self.read_range_definition()
+
+ def read(self, size=-1):
+ """Read size bytes from the current position in the file.
+
+ Reading across ranges is not supported. We rely on the underlying http
+ client to clean the socket if we leave bytes unread. This may occur for
+ the final boundary line of a multipart response or for any range
+ request not entirely consumed by the client (due to offset coalescing)
+
+ :param size: The number of bytes to read. Leave unspecified or pass
+ -1 to read to EOF.
+ """
+ if (self._size > 0
+ and self._pos == self._start + self._size):
+ if size == 0:
+ return ''
+ else:
+ self._seek_to_next_range()
+ elif self._pos < self._start:
+ raise errors.InvalidRange(
+ self._path, self._pos,
+ "Can't read %s bytes before range (%s, %s)"
+ % (size, self._start, self._size))
+ if self._size > 0:
+ if size > 0 and self._pos + size > self._start + self._size:
+ raise errors.InvalidRange(
+ self._path, self._pos,
+ "Can't read %s bytes across range (%s, %s)"
+ % (size, self._start, self._size))
+
+ # read data from file
+ buffer = StringIO()
+ limited = size
+ if self._size > 0:
+ # Don't read past the range definition
+ limited = self._start + self._size - self._pos
+ if size >= 0:
+ limited = min(limited, size)
+ osutils.pumpfile(self._file, buffer, limited, self._max_read_size)
+ data = buffer.getvalue()
+
+ # Update _pos respecting the data effectively read
+ self._pos += len(data)
+ return data
+
+ def seek(self, offset, whence=0):
+ start_pos = self._pos
+ if whence == 0:
+ final_pos = offset
+ elif whence == 1:
+ final_pos = start_pos + offset
+ elif whence == 2:
+ if self._size > 0:
+ final_pos = self._start + self._size + offset # offset < 0
+ else:
+ raise errors.InvalidRange(
+ self._path, self._pos,
+ "RangeFile: can't seek from end while size is unknown")
+ else:
+ raise ValueError("Invalid value %s for whence." % whence)
+
+ if final_pos < self._pos:
+ # Can't seek backwards
+ raise errors.InvalidRange(
+ self._path, self._pos,
+ 'RangeFile: trying to seek backwards to %s' % final_pos)
+
+ if self._size > 0:
+ cur_limit = self._start + self._size
+ while final_pos > cur_limit:
+ # We will cross range boundaries
+ remain = cur_limit - self._pos
+ if remain > 0:
+ # Finish reading the current range
+ self._checked_read(remain)
+ self._seek_to_next_range()
+ cur_limit = self._start + self._size
+
+ size = final_pos - self._pos
+ if size > 0: # size can be < 0 if we crossed a range boundary
+ # We don't need the data, just read it and throw it away
+ self._checked_read(size)
+
+ def tell(self):
+ return self._pos
+
+
+def handle_response(url, code, msg, data):
+ """Interpret the code & headers and wrap the provided data in a RangeFile.
+
+ This is a factory method which returns an appropriate RangeFile based on
+ the code & headers it's given.
+
+ :param url: The url being processed. Mostly for error reporting
+ :param code: The integer HTTP response code
+ :param msg: An HTTPMessage containing the headers for the response
+ :param data: A file-like object that can be read() to get the
+ requested data
+ :return: A file-like object that can seek()+read() the
+ ranges indicated by the headers.
+ """
+ if code == 200:
+ # A whole file
+ rfile = ResponseFile(url, data)
+ elif code == 206:
+ rfile = RangeFile(url, data)
+ content_type = msg.getheader('content-type', None)
+ if content_type is None:
+ # When there is no content-type header we treat the response as
+ # being of type 'application/octet-stream' as per RFC2616 section
+ # 7.2.1.
+ # Therefore it is obviously not multipart
+ content_type = 'application/octet-stream'
+ is_multipart = False
+ else:
+ is_multipart = (msg.getmaintype() == 'multipart'
+ and msg.getsubtype() == 'byteranges')
+
+ if is_multipart:
+ # Full fledged multipart response
+ rfile.set_boundary(msg.getparam('boundary'))
+ else:
+ # A response to a range request, but not multipart
+ content_range = msg.getheader('content-range', None)
+ if content_range is None:
+ raise errors.InvalidHttpResponse(url,
+ 'Missing the Content-Range header in a 206 range response')
+ rfile.set_range_from_header(content_range)
+ else:
+ raise errors.InvalidHttpResponse(url,
+ 'Unknown response code %s' % code)
+
+ return rfile
+
diff --git a/bzrlib/transport/http/wsgi.py b/bzrlib/transport/http/wsgi.py
new file mode 100644
index 0000000..7befd19
--- /dev/null
+++ b/bzrlib/transport/http/wsgi.py
@@ -0,0 +1,184 @@
+# Copyright (C) 2006-2010 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""WSGI application for bzr HTTP smart server.
+
+For more information about WSGI, see PEP 333:
+ http://www.python.org/dev/peps/pep-0333/
+"""
+
+from __future__ import absolute_import
+
+from cStringIO import StringIO
+
+from bzrlib.smart import medium
+from bzrlib.transport import chroot, get_transport
+from bzrlib.urlutils import local_path_to_url
+
+
+def make_app(root, prefix, path_var='REQUEST_URI', readonly=True,
+ load_plugins=True, enable_logging=True):
+ """Convenience function to construct a WSGI bzr smart server.
+
+ :param root: a local path that requests will be relative to.
+ :param prefix: See RelpathSetter.
+ :param path_var: See RelpathSetter.
+ """
+ local_url = local_path_to_url(root)
+ if readonly:
+ base_transport = get_transport('readonly+' + local_url)
+ else:
+ base_transport = get_transport(local_url)
+ if load_plugins:
+ from bzrlib.plugin import load_plugins
+ load_plugins()
+ if enable_logging:
+ import bzrlib.trace
+ bzrlib.trace.enable_default_logging()
+ app = SmartWSGIApp(base_transport, prefix)
+ app = RelpathSetter(app, '', path_var)
+ return app
+
+
+class RelpathSetter(object):
+ """WSGI middleware to set 'bzrlib.relpath' in the environ.
+
+ Different servers can invoke a SmartWSGIApp in different ways. This
+ middleware allows an adminstrator to configure how to the SmartWSGIApp will
+ determine what path it should be serving for a given request for many common
+ situations.
+
+ For example, a request for "/some/prefix/repo/branch/.bzr/smart" received by
+ a typical Apache and mod_fastcgi configuration will set `REQUEST_URI` to
+ "/some/prefix/repo/branch/.bzr/smart". A RelpathSetter with
+ prefix="/some/prefix/" and path_var="REQUEST_URI" will set that request's
+ 'bzrlib.relpath' variable to "repo/branch".
+ """
+
+ def __init__(self, app, prefix='', path_var='REQUEST_URI'):
+ """Constructor.
+
+ :param app: WSGI app to wrap, e.g. a SmartWSGIApp instance.
+ :param path_var: the variable in the WSGI environ to calculate the
+ 'bzrlib.relpath' variable from.
+ :param prefix: a prefix to strip from the variable specified in
+ path_var before setting 'bzrlib.relpath'.
+ """
+ self.app = app
+ self.prefix = prefix
+ self.path_var = path_var
+
+ def __call__(self, environ, start_response):
+ path = environ[self.path_var]
+ suffix = '/.bzr/smart'
+ if not (path.startswith(self.prefix) and path.endswith(suffix)):
+ start_response('404 Not Found', [])
+ return []
+ environ['bzrlib.relpath'] = path[len(self.prefix):-len(suffix)]
+ return self.app(environ, start_response)
+
+
+class SmartWSGIApp(object):
+ """A WSGI application for the bzr smart server."""
+
+ def __init__(self, backing_transport, root_client_path='/'):
+ """Constructor.
+
+ :param backing_transport: a transport. Requests will be processed
+ relative to this transport.
+ :param root_client_path: the client path that maps to the root of
+ backing_transport. This is used to interpret relpaths received from
+ the client.
+ """
+ # Use a ChrootServer so that this web application won't
+ # accidentally let people access locations they shouldn't.
+ # e.g. consider a smart server request for "get /etc/passwd" or
+ # something.
+ self.chroot_server = chroot.ChrootServer(backing_transport)
+ self.chroot_server.start_server()
+ self.backing_transport = get_transport(self.chroot_server.get_url())
+ self.root_client_path = root_client_path
+ # While the chroot server can technically be torn down at this point,
+ # as all it does is remove the scheme registration from transport's
+ # protocol dictionary, we don't *just in case* there are parts of
+ # bzrlib that will invoke 'get_transport' on urls rather than cloning
+ # around the existing transport.
+ #self.chroot_server.stop_server()
+
+ def __call__(self, environ, start_response):
+ """WSGI application callable."""
+ if environ['REQUEST_METHOD'] != 'POST':
+ start_response('405 Method not allowed', [('Allow', 'POST')])
+ return []
+
+ relpath = environ['bzrlib.relpath']
+
+ if not relpath.startswith('/'):
+ relpath = '/' + relpath
+ if not relpath.endswith('/'):
+ relpath += '/'
+
+ # Compare the HTTP path (relpath) and root_client_path, and calculate
+ # new relpath and root_client_path accordingly, to be used to build the
+ # request.
+ if relpath.startswith(self.root_client_path):
+ # The relpath traverses all of the mandatory root client path.
+ # Remove the root_client_path from the relpath, and set
+ # adjusted_tcp to None to tell the request handler that no further
+ # path translation is required.
+ adjusted_rcp = None
+ adjusted_relpath = relpath[len(self.root_client_path):]
+ elif self.root_client_path.startswith(relpath):
+ # The relpath traverses some of the mandatory root client path.
+ # Subtract the relpath from the root_client_path, and set the
+ # relpath to '.'.
+ adjusted_rcp = '/' + self.root_client_path[len(relpath):]
+ adjusted_relpath = '.'
+ else:
+ adjusted_rcp = self.root_client_path
+ adjusted_relpath = relpath
+
+ if adjusted_relpath.startswith('/'):
+ adjusted_relpath = adjusted_relpath[1:]
+ if adjusted_relpath.startswith('/'):
+ raise AssertionError(adjusted_relpath)
+
+ transport = self.backing_transport.clone(adjusted_relpath)
+ out_buffer = StringIO()
+ request_data_length = int(environ['CONTENT_LENGTH'])
+ request_data_bytes = environ['wsgi.input'].read(request_data_length)
+ smart_protocol_request = self.make_request(
+ transport, out_buffer.write, request_data_bytes, adjusted_rcp)
+ if smart_protocol_request.next_read_size() != 0:
+ # The request appears to be incomplete, or perhaps it's just a
+ # newer version we don't understand. Regardless, all we can do
+ # is return an error response in the format of our version of the
+ # protocol.
+ response_data = 'error\x01incomplete request\n'
+ else:
+ response_data = out_buffer.getvalue()
+ headers = [('Content-type', 'application/octet-stream')]
+ headers.append(("Content-Length", str(len(response_data))))
+ start_response('200 OK', headers)
+ return [response_data]
+
+ def make_request(self, transport, write_func, request_bytes, rcp):
+ protocol_factory, unused_bytes = medium._get_protocol_factory_for_bytes(
+ request_bytes)
+ server_protocol = protocol_factory(
+ transport, write_func, rcp, self.backing_transport)
+ server_protocol.accept_bytes(unused_bytes)
+ return server_protocol
diff --git a/bzrlib/transport/local.py b/bzrlib/transport/local.py
new file mode 100644
index 0000000..4bb25c3
--- /dev/null
+++ b/bzrlib/transport/local.py
@@ -0,0 +1,585 @@
+# Copyright (C) 2005-2011 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""Transport for the local filesystem.
+
+This is a fairly thin wrapper on regular file IO.
+"""
+
+from __future__ import absolute_import
+
+import os
+from stat import ST_MODE, S_ISDIR, S_IMODE
+import sys
+
+from bzrlib.lazy_import import lazy_import
+lazy_import(globals(), """
+import errno
+import shutil
+
+from bzrlib import (
+ atomicfile,
+ osutils,
+ urlutils,
+ symbol_versioning,
+ )
+from bzrlib.transport import LateReadError
+""")
+
+from bzrlib import transport
+
+
+_append_flags = os.O_CREAT | os.O_APPEND | os.O_WRONLY | osutils.O_BINARY | osutils.O_NOINHERIT
+_put_non_atomic_flags = os.O_CREAT | os.O_TRUNC | os.O_WRONLY | osutils.O_BINARY | osutils.O_NOINHERIT
+
+
+class LocalTransport(transport.Transport):
+ """This is the transport agent for local filesystem access."""
+
+ def __init__(self, base):
+ """Set the base path where files will be stored."""
+ if not base.startswith('file://'):
+ raise AssertionError("not a file:// url: %r" % base)
+ if base[-1] != '/':
+ base = base + '/'
+
+ # Special case : windows has no "root", but does have
+ # multiple lettered drives inside it. #240910
+ if sys.platform == 'win32' and base == 'file:///':
+ base = ''
+ self._local_base = ''
+ super(LocalTransport, self).__init__(base)
+ return
+
+ super(LocalTransport, self).__init__(base)
+ self._local_base = urlutils.local_path_from_url(base)
+ if self._local_base[-1] != '/':
+ self._local_base = self._local_base + '/'
+
+ def clone(self, offset=None):
+ """Return a new LocalTransport with root at self.base + offset
+ Because the local filesystem does not require a connection,
+ we can just return a new object.
+ """
+ if offset is None:
+ return LocalTransport(self.base)
+ else:
+ abspath = self.abspath(offset)
+ if abspath == 'file://':
+ # fix upwalk for UNC path
+ # when clone from //HOST/path updir recursively
+ # we should stop at least at //HOST part
+ abspath = self.base
+ return LocalTransport(abspath)
+
+ def _abspath(self, relative_reference):
+ """Return a path for use in os calls.
+
+ Several assumptions are made:
+ - relative_reference does not contain '..'
+ - relative_reference is url escaped.
+ """
+ if relative_reference in ('.', ''):
+ # _local_base normally has a trailing slash; strip it so that stat
+ # on a transport pointing to a symlink reads the link not the
+ # referent but be careful of / and c:\
+ return osutils.split(self._local_base)[0]
+ return self._local_base + urlutils.unescape(relative_reference)
+
+ def abspath(self, relpath):
+ """Return the full url to the given relative URL."""
+ # TODO: url escape the result. RBC 20060523.
+ # jam 20060426 Using normpath on the real path, because that ensures
+ # proper handling of stuff like
+ path = osutils.normpath(osutils.pathjoin(
+ self._local_base, urlutils.unescape(relpath)))
+ # on windows, our _local_base may or may not have a drive specified
+ # (ie, it may be "/" or "c:/foo").
+ # If 'relpath' is '/' we *always* get back an abspath without
+ # the drive letter - but if our transport already has a drive letter,
+ # we want our abspaths to have a drive letter too - so handle that
+ # here.
+ if (sys.platform == "win32" and self._local_base[1:2] == ":"
+ and path == '/'):
+ path = self._local_base[:3]
+
+ return urlutils.local_path_to_url(path)
+
+ def local_abspath(self, relpath):
+ """Transform the given relative path URL into the actual path on disk
+
+ This function only exists for the LocalTransport, since it is
+ the only one that has direct local access.
+ This is mostly for stuff like WorkingTree which needs to know
+ the local working directory. The returned path will always contain
+ forward slashes as the path separator, regardless of the platform.
+
+ This function is quite expensive: it calls realpath which resolves
+ symlinks.
+ """
+ absurl = self.abspath(relpath)
+ # mutter(u'relpath %s => base: %s, absurl %s', relpath, self.base, absurl)
+ return urlutils.local_path_from_url(absurl)
+
+ def relpath(self, abspath):
+ """Return the local path portion from a given absolute path.
+ """
+ if abspath is None:
+ abspath = u'.'
+
+ return urlutils.file_relpath(self.base, abspath)
+
+ def has(self, relpath):
+ return os.access(self._abspath(relpath), os.F_OK)
+
+ def get(self, relpath):
+ """Get the file at the given relative path.
+
+ :param relpath: The relative path to the file
+ """
+ canonical_url = self.abspath(relpath)
+ if canonical_url in transport._file_streams:
+ transport._file_streams[canonical_url].flush()
+ try:
+ path = self._abspath(relpath)
+ return osutils.open_file(path, 'rb')
+ except (IOError, OSError),e:
+ if e.errno == errno.EISDIR:
+ return LateReadError(relpath)
+ self._translate_error(e, path)
+
+ def put_file(self, relpath, f, mode=None):
+ """Copy the file-like object into the location.
+
+ :param relpath: Location to put the contents, relative to base.
+ :param f: File-like object.
+ :param mode: The mode for the newly created file,
+ None means just use the default
+ """
+
+ path = relpath
+ try:
+ path = self._abspath(relpath)
+ osutils.check_legal_path(path)
+ fp = atomicfile.AtomicFile(path, 'wb', new_mode=mode)
+ except (IOError, OSError),e:
+ self._translate_error(e, path)
+ try:
+ length = self._pump(f, fp)
+ fp.commit()
+ finally:
+ fp.close()
+ return length
+
+ def put_bytes(self, relpath, bytes, mode=None):
+ """Copy the string into the location.
+
+ :param relpath: Location to put the contents, relative to base.
+ :param bytes: String
+ """
+
+ path = relpath
+ try:
+ path = self._abspath(relpath)
+ osutils.check_legal_path(path)
+ fp = atomicfile.AtomicFile(path, 'wb', new_mode=mode)
+ except (IOError, OSError),e:
+ self._translate_error(e, path)
+ try:
+ if bytes:
+ fp.write(bytes)
+ fp.commit()
+ finally:
+ fp.close()
+
+ def _put_non_atomic_helper(self, relpath, writer,
+ mode=None,
+ create_parent_dir=False,
+ dir_mode=None):
+ """Common functionality information for the put_*_non_atomic.
+
+ This tracks all the create_parent_dir stuff.
+
+ :param relpath: the path we are putting to.
+ :param writer: A function that takes an os level file descriptor
+ and writes whatever data it needs to write there.
+ :param mode: The final file mode.
+ :param create_parent_dir: Should we be creating the parent directory
+ if it doesn't exist?
+ """
+ abspath = self._abspath(relpath)
+ if mode is None:
+ # os.open() will automatically use the umask
+ local_mode = 0666
+ else:
+ local_mode = mode
+ try:
+ fd = os.open(abspath, _put_non_atomic_flags, local_mode)
+ except (IOError, OSError),e:
+ # We couldn't create the file, maybe we need to create
+ # the parent directory, and try again
+ if (not create_parent_dir
+ or e.errno not in (errno.ENOENT,errno.ENOTDIR)):
+ self._translate_error(e, relpath)
+ parent_dir = os.path.dirname(abspath)
+ if not parent_dir:
+ self._translate_error(e, relpath)
+ self._mkdir(parent_dir, mode=dir_mode)
+ # We created the parent directory, lets try to open the
+ # file again
+ try:
+ fd = os.open(abspath, _put_non_atomic_flags, local_mode)
+ except (IOError, OSError), e:
+ self._translate_error(e, relpath)
+ try:
+ st = os.fstat(fd)
+ if mode is not None and mode != S_IMODE(st.st_mode):
+ # Because of umask, we may still need to chmod the file.
+ # But in the general case, we won't have to
+ osutils.chmod_if_possible(abspath, mode)
+ writer(fd)
+ finally:
+ os.close(fd)
+
+ def put_file_non_atomic(self, relpath, f, mode=None,
+ create_parent_dir=False,
+ dir_mode=None):
+ """Copy the file-like object into the target location.
+
+ This function is not strictly safe to use. It is only meant to
+ be used when you already know that the target does not exist.
+ It is not safe, because it will open and truncate the remote
+ file. So there may be a time when the file has invalid contents.
+
+ :param relpath: The remote location to put the contents.
+ :param f: File-like object.
+ :param mode: Possible access permissions for new file.
+ None means do not set remote permissions.
+ :param create_parent_dir: If we cannot create the target file because
+ the parent directory does not exist, go ahead and
+ create it, and then try again.
+ """
+ def writer(fd):
+ self._pump_to_fd(f, fd)
+ self._put_non_atomic_helper(relpath, writer, mode=mode,
+ create_parent_dir=create_parent_dir,
+ dir_mode=dir_mode)
+
+ def put_bytes_non_atomic(self, relpath, bytes, mode=None,
+ create_parent_dir=False, dir_mode=None):
+ def writer(fd):
+ if bytes:
+ os.write(fd, bytes)
+ self._put_non_atomic_helper(relpath, writer, mode=mode,
+ create_parent_dir=create_parent_dir,
+ dir_mode=dir_mode)
+
+ def iter_files_recursive(self):
+ """Iter the relative paths of files in the transports sub-tree."""
+ queue = list(self.list_dir(u'.'))
+ while queue:
+ relpath = queue.pop(0)
+ st = self.stat(relpath)
+ if S_ISDIR(st[ST_MODE]):
+ for i, basename in enumerate(self.list_dir(relpath)):
+ queue.insert(i, relpath+'/'+basename)
+ else:
+ yield relpath
+
+ def _mkdir(self, abspath, mode=None):
+ """Create a real directory, filtering through mode"""
+ if mode is None:
+ # os.mkdir() will filter through umask
+ local_mode = 0777
+ else:
+ local_mode = mode
+ try:
+ os.mkdir(abspath, local_mode)
+ except (IOError, OSError),e:
+ self._translate_error(e, abspath)
+ if mode is not None:
+ try:
+ osutils.chmod_if_possible(abspath, mode)
+ except (IOError, OSError), e:
+ self._translate_error(e, abspath)
+
+ def mkdir(self, relpath, mode=None):
+ """Create a directory at the given path."""
+ self._mkdir(self._abspath(relpath), mode=mode)
+
+ def open_write_stream(self, relpath, mode=None):
+ """See Transport.open_write_stream."""
+ abspath = self._abspath(relpath)
+ try:
+ handle = osutils.open_file(abspath, 'wb')
+ except (IOError, OSError),e:
+ self._translate_error(e, abspath)
+ handle.truncate()
+ if mode is not None:
+ self._check_mode_and_size(abspath, handle.fileno(), mode)
+ transport._file_streams[self.abspath(relpath)] = handle
+ return transport.FileFileStream(self, relpath, handle)
+
+ def _get_append_file(self, relpath, mode=None):
+ """Call os.open() for the given relpath"""
+ file_abspath = self._abspath(relpath)
+ if mode is None:
+ # os.open() will automatically use the umask
+ local_mode = 0666
+ else:
+ local_mode = mode
+ try:
+ return file_abspath, os.open(file_abspath, _append_flags, local_mode)
+ except (IOError, OSError),e:
+ self._translate_error(e, relpath)
+
+ def _check_mode_and_size(self, file_abspath, fd, mode=None):
+ """Check the mode of the file, and return the current size"""
+ st = os.fstat(fd)
+ if mode is not None and mode != S_IMODE(st.st_mode):
+ # Because of umask, we may still need to chmod the file.
+ # But in the general case, we won't have to
+ osutils.chmod_if_possible(file_abspath, mode)
+ return st.st_size
+
+ def append_file(self, relpath, f, mode=None):
+ """Append the text in the file-like object into the final location."""
+ file_abspath, fd = self._get_append_file(relpath, mode=mode)
+ try:
+ result = self._check_mode_and_size(file_abspath, fd, mode=mode)
+ self._pump_to_fd(f, fd)
+ finally:
+ os.close(fd)
+ return result
+
+ def append_bytes(self, relpath, bytes, mode=None):
+ """Append the text in the string into the final location."""
+ file_abspath, fd = self._get_append_file(relpath, mode=mode)
+ try:
+ result = self._check_mode_and_size(file_abspath, fd, mode=mode)
+ if bytes:
+ os.write(fd, bytes)
+ finally:
+ os.close(fd)
+ return result
+
+ def _pump_to_fd(self, fromfile, to_fd):
+ """Copy contents of one file to another."""
+ BUFSIZE = 32768
+ while True:
+ b = fromfile.read(BUFSIZE)
+ if not b:
+ break
+ os.write(to_fd, b)
+
+ def copy(self, rel_from, rel_to):
+ """Copy the item at rel_from to the location at rel_to"""
+ path_from = self._abspath(rel_from)
+ path_to = self._abspath(rel_to)
+ try:
+ shutil.copy(path_from, path_to)
+ except (IOError, OSError),e:
+ # TODO: What about path_to?
+ self._translate_error(e, path_from)
+
+ def rename(self, rel_from, rel_to):
+ path_from = self._abspath(rel_from)
+ path_to = self._abspath(rel_to)
+ try:
+ # *don't* call bzrlib.osutils.rename, because we want to
+ # detect conflicting names on rename, and osutils.rename tries to
+ # mask cross-platform differences there
+ os.rename(path_from, path_to)
+ except (IOError, OSError),e:
+ # TODO: What about path_to?
+ self._translate_error(e, path_from)
+
+ def move(self, rel_from, rel_to):
+ """Move the item at rel_from to the location at rel_to"""
+ path_from = self._abspath(rel_from)
+ path_to = self._abspath(rel_to)
+
+ try:
+ # this version will delete the destination if necessary
+ osutils.rename(path_from, path_to)
+ except (IOError, OSError),e:
+ # TODO: What about path_to?
+ self._translate_error(e, path_from)
+
+ def delete(self, relpath):
+ """Delete the item at relpath"""
+ path = relpath
+ try:
+ path = self._abspath(relpath)
+ os.remove(path)
+ except (IOError, OSError),e:
+ self._translate_error(e, path)
+
+ def external_url(self):
+ """See bzrlib.transport.Transport.external_url."""
+ # File URL's are externally usable.
+ return self.base
+
+ def copy_to(self, relpaths, other, mode=None, pb=None):
+ """Copy a set of entries from self into another Transport.
+
+ :param relpaths: A list/generator of entries to be copied.
+ """
+ if isinstance(other, LocalTransport):
+ # Both from & to are on the local filesystem
+ # Unfortunately, I can't think of anything faster than just
+ # copying them across, one by one :(
+ total = self._get_total(relpaths)
+ count = 0
+ for path in relpaths:
+ self._update_pb(pb, 'copy-to', count, total)
+ try:
+ mypath = self._abspath(path)
+ otherpath = other._abspath(path)
+ shutil.copy(mypath, otherpath)
+ if mode is not None:
+ osutils.chmod_if_possible(otherpath, mode)
+ except (IOError, OSError),e:
+ self._translate_error(e, path)
+ count += 1
+ return count
+ else:
+ return super(LocalTransport, self).copy_to(relpaths, other, mode=mode, pb=pb)
+
+ def listable(self):
+ """See Transport.listable."""
+ return True
+
+ def list_dir(self, relpath):
+ """Return a list of all files at the given location.
+ WARNING: many transports do not support this, so trying avoid using
+ it if at all possible.
+ """
+ path = self._abspath(relpath)
+ try:
+ entries = os.listdir(path)
+ except (IOError, OSError), e:
+ self._translate_error(e, path)
+ return [urlutils.escape(entry) for entry in entries]
+
+ def stat(self, relpath):
+ """Return the stat information for a file.
+ """
+ path = relpath
+ try:
+ path = self._abspath(relpath)
+ return os.lstat(path)
+ except (IOError, OSError),e:
+ self._translate_error(e, path)
+
+ def lock_read(self, relpath):
+ """Lock the given file for shared (read) access.
+ :return: A lock object, which should be passed to Transport.unlock()
+ """
+ from bzrlib.lock import ReadLock
+ path = relpath
+ try:
+ path = self._abspath(relpath)
+ return ReadLock(path)
+ except (IOError, OSError), e:
+ self._translate_error(e, path)
+
+ def lock_write(self, relpath):
+ """Lock the given file for exclusive (write) access.
+ WARNING: many transports do not support this, so trying avoid using it
+
+ :return: A lock object, which should be passed to Transport.unlock()
+ """
+ from bzrlib.lock import WriteLock
+ return WriteLock(self._abspath(relpath))
+
+ def rmdir(self, relpath):
+ """See Transport.rmdir."""
+ path = relpath
+ try:
+ path = self._abspath(relpath)
+ os.rmdir(path)
+ except (IOError, OSError),e:
+ self._translate_error(e, path)
+
+ if osutils.host_os_dereferences_symlinks():
+ def readlink(self, relpath):
+ """See Transport.readlink."""
+ return osutils.readlink(self._abspath(relpath))
+
+ if osutils.hardlinks_good():
+ def hardlink(self, source, link_name):
+ """See Transport.link."""
+ try:
+ os.link(self._abspath(source), self._abspath(link_name))
+ except (IOError, OSError), e:
+ self._translate_error(e, source)
+
+ if osutils.has_symlinks():
+ def symlink(self, source, link_name):
+ """See Transport.symlink."""
+ abs_link_dirpath = urlutils.dirname(self.abspath(link_name))
+ source_rel = urlutils.file_relpath(
+ abs_link_dirpath, self.abspath(source))
+
+ try:
+ os.symlink(source_rel, self._abspath(link_name))
+ except (IOError, OSError), e:
+ self._translate_error(e, source_rel)
+
+ def _can_roundtrip_unix_modebits(self):
+ if sys.platform == 'win32':
+ # anyone else?
+ return False
+ else:
+ return True
+
+
+class EmulatedWin32LocalTransport(LocalTransport):
+ """Special transport for testing Win32 [UNC] paths on non-windows"""
+
+ def __init__(self, base):
+ if base[-1] != '/':
+ base = base + '/'
+ super(LocalTransport, self).__init__(base)
+ self._local_base = urlutils._win32_local_path_from_url(base)
+
+ def abspath(self, relpath):
+ path = osutils._win32_normpath(osutils.pathjoin(
+ self._local_base, urlutils.unescape(relpath)))
+ return urlutils._win32_local_path_to_url(path)
+
+ def clone(self, offset=None):
+ """Return a new LocalTransport with root at self.base + offset
+ Because the local filesystem does not require a connection,
+ we can just return a new object.
+ """
+ if offset is None:
+ return EmulatedWin32LocalTransport(self.base)
+ else:
+ abspath = self.abspath(offset)
+ if abspath == 'file://':
+ # fix upwalk for UNC path
+ # when clone from //HOST/path updir recursively
+ # we should stop at least at //HOST part
+ abspath = self.base
+ return EmulatedWin32LocalTransport(abspath)
+
+
+def get_test_permutations():
+ """Return the permutations to be used in testing."""
+ from bzrlib.tests import test_server
+ return [(LocalTransport, test_server.LocalURLServer),]
diff --git a/bzrlib/transport/log.py b/bzrlib/transport/log.py
new file mode 100644
index 0000000..c2d3907
--- /dev/null
+++ b/bzrlib/transport/log.py
@@ -0,0 +1,155 @@
+# Copyright (C) 2008, 2009, 2010 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""Transport decorator that logs transport operations to .bzr.log."""
+
+from __future__ import absolute_import
+
+# see also the transportstats plugin, which gives you some summary information
+# in a machine-readable dump
+
+import StringIO
+import cStringIO
+import time
+import types
+
+from bzrlib.trace import mutter
+from bzrlib.transport import decorator
+
+
+class TransportLogDecorator(decorator.TransportDecorator):
+ """Decorator for Transports that logs interesting operations to .bzr.log.
+
+ In general we want to log things that usually take a network round trip
+ and may be slow.
+
+ Not all operations are logged yet.
+
+ See also TransportTraceDecorator, that records a machine-readable log in
+ memory for eg testing.
+ """
+
+ def __init__(self, *args, **kw):
+ super(TransportLogDecorator, self).__init__(*args, **kw)
+ def _make_hook(hookname):
+ def _hook(relpath, *args, **kw):
+ return self._log_and_call(hookname, relpath, *args, **kw)
+ return _hook
+ for methodname in (
+ 'append_bytes',
+ 'append_file',
+ 'copy_to',
+ 'delete',
+ 'get',
+ 'has',
+ 'open_write_stream',
+ 'mkdir',
+ 'move',
+ 'put_bytes', 'put_bytes_non_atomic', 'put_file put_file_non_atomic',
+ 'list_dir', 'lock_read', 'lock_write',
+ 'readv', 'rename', 'rmdir',
+ 'stat',
+ 'ulock',
+ ):
+ setattr(self, methodname, _make_hook(methodname))
+
+ @classmethod
+ def _get_url_prefix(self):
+ return 'log+'
+
+ def iter_files_recursive(self):
+ # needs special handling because it does not have a relpath parameter
+ mutter("%s %s"
+ % ('iter_files_recursive', self._decorated.base))
+ return self._call_and_log_result('iter_files_recursive', (), {})
+
+ def _log_and_call(self, methodname, relpath, *args, **kwargs):
+ if kwargs:
+ kwargs_str = dict(kwargs)
+ else:
+ kwargs_str = ''
+ mutter("%s %s %s %s"
+ % (methodname, self._decorated.abspath(relpath),
+ self._shorten(self._strip_tuple_parens(args)),
+ kwargs_str))
+ return self._call_and_log_result(methodname, (relpath,) + args, kwargs)
+
+ def _call_and_log_result(self, methodname, args, kwargs):
+ before = time.time()
+ try:
+ result = getattr(self._decorated, methodname)(*args, **kwargs)
+ except Exception, e:
+ mutter(" --> %s" % e)
+ mutter(" %.03fs" % (time.time() - before))
+ raise
+ return self._show_result(before, methodname, result)
+
+ def _show_result(self, before, methodname, result):
+ result_len = None
+ if isinstance(result, types.GeneratorType):
+ # We now consume everything from the generator so that we can show
+ # the results and the time it took to get them. However, to keep
+ # compatibility with callers that may specifically expect a result
+ # (see <https://launchpad.net/bugs/340347>) we also return a new
+ # generator, reset to the starting position.
+ result = list(result)
+ return_result = iter(result)
+ else:
+ return_result = result
+ if isinstance(result, (cStringIO.OutputType, StringIO.StringIO)):
+ val = repr(result.getvalue())
+ result_len = len(val)
+ shown_result = "%s(%s) (%d bytes)" % (result.__class__.__name__,
+ self._shorten(val), result_len)
+ elif methodname == 'readv':
+ num_hunks = len(result)
+ total_bytes = sum((len(d) for o,d in result))
+ shown_result = "readv response, %d hunks, %d total bytes" % (
+ num_hunks, total_bytes)
+ result_len = total_bytes
+ else:
+ shown_result = self._shorten(self._strip_tuple_parens(result))
+ mutter(" --> %s" % shown_result)
+ # The log decorator no longer shows the elapsed time or transfer rate
+ # because they're available in the log prefixes and the transport
+ # activity display respectively.
+ if False:
+ elapsed = time.time() - before
+ if result_len and elapsed > 0:
+ # this is the rate of higher-level data, not the raw network
+ # speed using base-10 units (see HACKING.txt).
+ mutter(" %9.03fs %8dkB/s"
+ % (elapsed, result_len/elapsed/1000))
+ else:
+ mutter(" %9.03fs" % (elapsed))
+ return return_result
+
+ def _shorten(self, x):
+ if len(x) > 70:
+ x = x[:67] + '...'
+ return x
+
+ def _strip_tuple_parens(self, t):
+ t = repr(t)
+ if t[0] == '(' and t[-1] == ')':
+ t = t[1:-1]
+ return t
+
+
+def get_test_permutations():
+ """Return the permutations to be used in testing."""
+ from bzrlib.tests import test_server
+ return [(TransportLogDecorator, test_server.LogDecoratorServer)]
diff --git a/bzrlib/transport/memory.py b/bzrlib/transport/memory.py
new file mode 100644
index 0000000..39fd2f1
--- /dev/null
+++ b/bzrlib/transport/memory.py
@@ -0,0 +1,328 @@
+# Copyright (C) 2005-2010 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""Implementation of Transport that uses memory for its storage.
+
+The contents of the transport will be lost when the object is discarded,
+so this is primarily useful for testing.
+"""
+
+from __future__ import absolute_import
+
+import os
+import errno
+from stat import S_IFREG, S_IFDIR
+from cStringIO import StringIO
+
+from bzrlib import (
+ transport,
+ urlutils,
+ )
+from bzrlib.errors import (
+ FileExists,
+ LockError,
+ InProcessTransport,
+ NoSuchFile,
+ )
+from bzrlib.transport import (
+ AppendBasedFileStream,
+ _file_streams,
+ LateReadError,
+ )
+
+
+
+class MemoryStat(object):
+
+ def __init__(self, size, is_dir, perms):
+ self.st_size = size
+ if not is_dir:
+ if perms is None:
+ perms = 0644
+ self.st_mode = S_IFREG | perms
+ else:
+ if perms is None:
+ perms = 0755
+ self.st_mode = S_IFDIR | perms
+
+
+class MemoryTransport(transport.Transport):
+ """This is an in memory file system for transient data storage."""
+
+ def __init__(self, url=""):
+ """Set the 'base' path where files will be stored."""
+ if url == "":
+ url = "memory:///"
+ if url[-1] != '/':
+ url = url + '/'
+ super(MemoryTransport, self).__init__(url)
+ split = url.find(':') + 3
+ self._scheme = url[:split]
+ self._cwd = url[split:]
+ # dictionaries from absolute path to file mode
+ self._dirs = {'/':None}
+ self._files = {}
+ self._locks = {}
+
+ def clone(self, offset=None):
+ """See Transport.clone()."""
+ path = urlutils.URL._combine_paths(self._cwd, offset)
+ if len(path) == 0 or path[-1] != '/':
+ path += '/'
+ url = self._scheme + path
+ result = self.__class__(url)
+ result._dirs = self._dirs
+ result._files = self._files
+ result._locks = self._locks
+ return result
+
+ def abspath(self, relpath):
+ """See Transport.abspath()."""
+ # while a little slow, this is sufficiently fast to not matter in our
+ # current environment - XXX RBC 20060404 move the clone '..' handling
+ # into here and call abspath from clone
+ temp_t = self.clone(relpath)
+ if temp_t.base.count('/') == 3:
+ return temp_t.base
+ else:
+ return temp_t.base[:-1]
+
+ def append_file(self, relpath, f, mode=None):
+ """See Transport.append_file()."""
+ _abspath = self._abspath(relpath)
+ self._check_parent(_abspath)
+ orig_content, orig_mode = self._files.get(_abspath, ("", None))
+ if mode is None:
+ mode = orig_mode
+ self._files[_abspath] = (orig_content + f.read(), mode)
+ return len(orig_content)
+
+ def _check_parent(self, _abspath):
+ dir = os.path.dirname(_abspath)
+ if dir != '/':
+ if not dir in self._dirs:
+ raise NoSuchFile(_abspath)
+
+ def has(self, relpath):
+ """See Transport.has()."""
+ _abspath = self._abspath(relpath)
+ return (_abspath in self._files) or (_abspath in self._dirs)
+
+ def delete(self, relpath):
+ """See Transport.delete()."""
+ _abspath = self._abspath(relpath)
+ if not _abspath in self._files:
+ raise NoSuchFile(relpath)
+ del self._files[_abspath]
+
+ def external_url(self):
+ """See bzrlib.transport.Transport.external_url."""
+ # MemoryTransport's are only accessible in-process
+ # so we raise here
+ raise InProcessTransport(self)
+
+ def get(self, relpath):
+ """See Transport.get()."""
+ _abspath = self._abspath(relpath)
+ if not _abspath in self._files:
+ if _abspath in self._dirs:
+ return LateReadError(relpath)
+ else:
+ raise NoSuchFile(relpath)
+ return StringIO(self._files[_abspath][0])
+
+ def put_file(self, relpath, f, mode=None):
+ """See Transport.put_file()."""
+ _abspath = self._abspath(relpath)
+ self._check_parent(_abspath)
+ bytes = f.read()
+ if type(bytes) is not str:
+ # Although not strictly correct, we raise UnicodeEncodeError to be
+ # compatible with other transports.
+ raise UnicodeEncodeError(
+ 'undefined', bytes, 0, 1,
+ 'put_file must be given a file of bytes, not unicode.')
+ self._files[_abspath] = (bytes, mode)
+ return len(bytes)
+
+ def mkdir(self, relpath, mode=None):
+ """See Transport.mkdir()."""
+ _abspath = self._abspath(relpath)
+ self._check_parent(_abspath)
+ if _abspath in self._dirs:
+ raise FileExists(relpath)
+ self._dirs[_abspath]=mode
+
+ def open_write_stream(self, relpath, mode=None):
+ """See Transport.open_write_stream."""
+ self.put_bytes(relpath, "", mode)
+ result = AppendBasedFileStream(self, relpath)
+ _file_streams[self.abspath(relpath)] = result
+ return result
+
+ def listable(self):
+ """See Transport.listable."""
+ return True
+
+ def iter_files_recursive(self):
+ for file in self._files:
+ if file.startswith(self._cwd):
+ yield urlutils.escape(file[len(self._cwd):])
+
+ def list_dir(self, relpath):
+ """See Transport.list_dir()."""
+ _abspath = self._abspath(relpath)
+ if _abspath != '/' and _abspath not in self._dirs:
+ raise NoSuchFile(relpath)
+ result = []
+
+ if not _abspath.endswith('/'):
+ _abspath += '/'
+
+ for path_group in self._files, self._dirs:
+ for path in path_group:
+ if path.startswith(_abspath):
+ trailing = path[len(_abspath):]
+ if trailing and '/' not in trailing:
+ result.append(trailing)
+ return map(urlutils.escape, result)
+
+ def rename(self, rel_from, rel_to):
+ """Rename a file or directory; fail if the destination exists"""
+ abs_from = self._abspath(rel_from)
+ abs_to = self._abspath(rel_to)
+ def replace(x):
+ if x == abs_from:
+ x = abs_to
+ elif x.startswith(abs_from + '/'):
+ x = abs_to + x[len(abs_from):]
+ return x
+ def do_renames(container):
+ for path in container:
+ new_path = replace(path)
+ if new_path != path:
+ if new_path in container:
+ raise FileExists(new_path)
+ container[new_path] = container[path]
+ del container[path]
+ do_renames(self._files)
+ do_renames(self._dirs)
+
+ def rmdir(self, relpath):
+ """See Transport.rmdir."""
+ _abspath = self._abspath(relpath)
+ if _abspath in self._files:
+ self._translate_error(IOError(errno.ENOTDIR, relpath), relpath)
+ for path in self._files:
+ if path.startswith(_abspath + '/'):
+ self._translate_error(IOError(errno.ENOTEMPTY, relpath),
+ relpath)
+ for path in self._dirs:
+ if path.startswith(_abspath + '/') and path != _abspath:
+ self._translate_error(IOError(errno.ENOTEMPTY, relpath), relpath)
+ if not _abspath in self._dirs:
+ raise NoSuchFile(relpath)
+ del self._dirs[_abspath]
+
+ def stat(self, relpath):
+ """See Transport.stat()."""
+ _abspath = self._abspath(relpath)
+ if _abspath in self._files:
+ return MemoryStat(len(self._files[_abspath][0]), False,
+ self._files[_abspath][1])
+ elif _abspath in self._dirs:
+ return MemoryStat(0, True, self._dirs[_abspath])
+ else:
+ raise NoSuchFile(_abspath)
+
+ def lock_read(self, relpath):
+ """See Transport.lock_read()."""
+ return _MemoryLock(self._abspath(relpath), self)
+
+ def lock_write(self, relpath):
+ """See Transport.lock_write()."""
+ return _MemoryLock(self._abspath(relpath), self)
+
+ def _abspath(self, relpath):
+ """Generate an internal absolute path."""
+ relpath = urlutils.unescape(relpath)
+ if relpath[:1] == '/':
+ return relpath
+ cwd_parts = self._cwd.split('/')
+ rel_parts = relpath.split('/')
+ r = []
+ for i in cwd_parts + rel_parts:
+ if i == '..':
+ if not r:
+ raise ValueError("illegal relpath %r under %r"
+ % (relpath, self._cwd))
+ r = r[:-1]
+ elif i == '.' or i == '':
+ pass
+ else:
+ r.append(i)
+ return '/' + '/'.join(r)
+
+
+class _MemoryLock(object):
+ """This makes a lock."""
+
+ def __init__(self, path, transport):
+ self.path = path
+ self.transport = transport
+ if self.path in self.transport._locks:
+ raise LockError('File %r already locked' % (self.path,))
+ self.transport._locks[self.path] = self
+
+ def unlock(self):
+ del self.transport._locks[self.path]
+ self.transport = None
+
+
+class MemoryServer(transport.Server):
+ """Server for the MemoryTransport for testing with."""
+
+ def start_server(self):
+ self._dirs = {'/':None}
+ self._files = {}
+ self._locks = {}
+ self._scheme = "memory+%s:///" % id(self)
+ def memory_factory(url):
+ from bzrlib.transport import memory
+ result = memory.MemoryTransport(url)
+ result._dirs = self._dirs
+ result._files = self._files
+ result._locks = self._locks
+ return result
+ self._memory_factory = memory_factory
+ transport.register_transport(self._scheme, self._memory_factory)
+
+ def stop_server(self):
+ # unregister this server
+ transport.unregister_transport(self._scheme, self._memory_factory)
+
+ def get_url(self):
+ """See bzrlib.transport.Server.get_url."""
+ return self._scheme
+
+ def get_bogus_url(self):
+ raise NotImplementedError
+
+
+def get_test_permutations():
+ """Return the permutations to be used in testing."""
+ return [(MemoryTransport, MemoryServer),
+ ]
diff --git a/bzrlib/transport/nosmart.py b/bzrlib/transport/nosmart.py
new file mode 100644
index 0000000..e2ac9f4
--- /dev/null
+++ b/bzrlib/transport/nosmart.py
@@ -0,0 +1,45 @@
+# Copyright (C) 2008 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""Implementation of Transport that never has a smart medium.
+
+This is mainly useful with HTTP transports, which sometimes have a smart medium
+and sometimes don't. By using this decorator, you can force those transports
+to never have a smart medium.
+"""
+
+from __future__ import absolute_import
+
+from bzrlib import errors
+from bzrlib.transport import decorator
+
+
+class NoSmartTransportDecorator(decorator.TransportDecorator):
+ """A decorator for transports that disables get_smart_medium."""
+
+ @classmethod
+ def _get_url_prefix(self):
+ return 'nosmart+'
+
+ def get_smart_medium(self):
+ raise errors.NoSmartMedium(self)
+
+
+def get_test_permutations():
+ """Return the permutations to be used in testing."""
+ from bzrlib.tests import test_server
+ return [(NoSmartTransportDecorator, test_server.NoSmartTransportServer)]
+
diff --git a/bzrlib/transport/pathfilter.py b/bzrlib/transport/pathfilter.py
new file mode 100644
index 0000000..883be82
--- /dev/null
+++ b/bzrlib/transport/pathfilter.py
@@ -0,0 +1,182 @@
+# Copyright (C) 2009, 2010 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""A transport decorator that filters all paths that are passed to it."""
+
+from __future__ import absolute_import
+
+from bzrlib import urlutils
+
+from bzrlib.transport import (
+ register_transport,
+ Server,
+ Transport,
+ unregister_transport,
+ )
+
+
+class PathFilteringServer(Server):
+ """Transport server for PathFilteringTransport.
+
+ It holds the backing_transport and filter_func for PathFilteringTransports.
+ All paths will be passed through filter_func before calling into the
+ backing_transport.
+
+ Note that paths returned from the backing transport are *not* altered in
+ anyway. So, depending on the filter_func, PathFilteringTransports might
+ not conform to the usual expectations of Transport behaviour; e.g. 'name'
+ in t.list_dir('dir') might not imply t.has('dir/name') is True! A filter
+ that merely prefixes a constant path segment will be essentially
+ transparent, whereas a filter that does rot13 to paths will break
+ expectations and probably cause confusing errors. So choose your
+ filter_func with care.
+ """
+
+ def __init__(self, backing_transport, filter_func):
+ """Constructor.
+
+ :param backing_transport: a transport
+ :param filter_func: a callable that takes paths, and translates them
+ into paths for use with the backing transport.
+ """
+ self.backing_transport = backing_transport
+ self.filter_func = filter_func
+
+ def _factory(self, url):
+ return PathFilteringTransport(self, url)
+
+ def get_url(self):
+ return self.scheme
+
+ def start_server(self):
+ self.scheme = 'filtered-%d:///' % id(self)
+ register_transport(self.scheme, self._factory)
+
+ def stop_server(self):
+ unregister_transport(self.scheme, self._factory)
+
+
+class PathFilteringTransport(Transport):
+ """A PathFilteringTransport.
+
+ Please see PathFilteringServer for details.
+ """
+
+ def __init__(self, server, base):
+ self.server = server
+ if not base.endswith('/'):
+ base += '/'
+ Transport.__init__(self, base)
+ self.base_path = self.base[len(self.server.scheme)-1:]
+ self.scheme = self.server.scheme
+
+ def _relpath_from_server_root(self, relpath):
+ unfiltered_path = urlutils.URL._combine_paths(self.base_path, relpath)
+ if not unfiltered_path.startswith('/'):
+ raise ValueError(unfiltered_path)
+ return unfiltered_path[1:]
+
+ def _filter(self, relpath):
+ return self.server.filter_func(self._relpath_from_server_root(relpath))
+
+ def _call(self, methodname, relpath, *args):
+ """Helper for Transport methods of the form:
+ operation(path, [other args ...])
+ """
+ backing_method = getattr(self.server.backing_transport, methodname)
+ return backing_method(self._filter(relpath), *args)
+
+ # Transport methods
+ def abspath(self, relpath):
+ # We do *not* want to filter at this point; e.g if the filter is
+ # homedir expansion, self.base == 'this:///' and relpath == '~/foo',
+ # then the abspath should be this:///~/foo (not this:///home/user/foo).
+ # Instead filtering should happen when self's base is passed to the
+ # backing.
+ return self.scheme + self._relpath_from_server_root(relpath)
+
+ def append_file(self, relpath, f, mode=None):
+ return self._call('append_file', relpath, f, mode)
+
+ def _can_roundtrip_unix_modebits(self):
+ return self.server.backing_transport._can_roundtrip_unix_modebits()
+
+ def clone(self, relpath):
+ return self.__class__(self.server, self.abspath(relpath))
+
+ def delete(self, relpath):
+ return self._call('delete', relpath)
+
+ def delete_tree(self, relpath):
+ return self._call('delete_tree', relpath)
+
+ def external_url(self):
+ """See bzrlib.transport.Transport.external_url."""
+ # PathFilteringTransports, like MemoryTransport, depend on in-process
+ # state and thus the base cannot simply be handed out. See the base
+ # class docstring for more details and possible directions. For now we
+ # return the path-filtered URL.
+ return self.server.backing_transport.external_url()
+
+ def get(self, relpath):
+ return self._call('get', relpath)
+
+ def has(self, relpath):
+ return self._call('has', relpath)
+
+ def is_readonly(self):
+ return self.server.backing_transport.is_readonly()
+
+ def iter_files_recursive(self):
+ backing_transport = self.server.backing_transport.clone(
+ self._filter('.'))
+ return backing_transport.iter_files_recursive()
+
+ def listable(self):
+ return self.server.backing_transport.listable()
+
+ def list_dir(self, relpath):
+ return self._call('list_dir', relpath)
+
+ def lock_read(self, relpath):
+ return self._call('lock_read', relpath)
+
+ def lock_write(self, relpath):
+ return self._call('lock_write', relpath)
+
+ def mkdir(self, relpath, mode=None):
+ return self._call('mkdir', relpath, mode)
+
+ def open_write_stream(self, relpath, mode=None):
+ return self._call('open_write_stream', relpath, mode)
+
+ def put_file(self, relpath, f, mode=None):
+ return self._call('put_file', relpath, f, mode)
+
+ def rename(self, rel_from, rel_to):
+ return self._call('rename', rel_from, self._filter(rel_to))
+
+ def rmdir(self, relpath):
+ return self._call('rmdir', relpath)
+
+ def stat(self, relpath):
+ return self._call('stat', relpath)
+
+
+def get_test_permutations():
+ """Return the permutations to be used in testing."""
+ from bzrlib.tests import test_server
+ return [(PathFilteringTransport, test_server.TestingPathFilteringServer)]
diff --git a/bzrlib/transport/readonly.py b/bzrlib/transport/readonly.py
new file mode 100644
index 0000000..bd0de77
--- /dev/null
+++ b/bzrlib/transport/readonly.py
@@ -0,0 +1,86 @@
+# Copyright (C) 2006, 2009 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""Implementation of Transport that adapts another transport to be readonly."""
+
+from __future__ import absolute_import
+
+from bzrlib.errors import TransportNotPossible, NoSmartMedium
+from bzrlib.transport import decorator
+
+
+class ReadonlyTransportDecorator(decorator.TransportDecorator):
+ """A decorator that can convert any transport to be readonly.
+
+ This is requested via the 'readonly+' prefix to get_transport().
+ """
+
+ def append_file(self, relpath, f, mode=None):
+ """See Transport.append_file()."""
+ raise TransportNotPossible('readonly transport')
+
+ def append_bytes(self, relpath, bytes, mode=None):
+ """See Transport.append_bytes()."""
+ raise TransportNotPossible('readonly transport')
+
+ @classmethod
+ def _get_url_prefix(self):
+ """Readonly transport decorators are invoked via 'readonly+'"""
+ return 'readonly+'
+
+ def delete(self, relpath):
+ """See Transport.delete()."""
+ raise TransportNotPossible('readonly transport')
+
+ def delete_tree(self, relpath):
+ """See Transport.delete_tree()."""
+ raise TransportNotPossible('readonly transport')
+
+ def put_file(self, relpath, f, mode=None):
+ """See Transport.put_file()."""
+ raise TransportNotPossible('readonly transport')
+
+ def put_bytes(self, relpath, bytes, mode=None):
+ """See Transport.put_bytes()."""
+ raise TransportNotPossible('readonly transport')
+
+ def mkdir(self, relpath, mode=None):
+ """See Transport.mkdir()."""
+ raise TransportNotPossible('readonly transport')
+
+ def is_readonly(self):
+ """See Transport.is_readonly."""
+ return True
+
+ def rmdir(self, relpath):
+ """See Transport.rmdir."""
+ raise TransportNotPossible('readonly transport')
+
+ def lock_write(self, relpath):
+ """See Transport.lock_write."""
+ raise TransportNotPossible('readonly transport')
+
+ def get_smart_client(self):
+ raise NoSmartServer(self.base)
+
+ def get_smart_medium(self):
+ raise NoSmartMedium(self)
+
+
+def get_test_permutations():
+ """Return the permutations to be used in testing."""
+ from bzrlib.tests import test_server
+ return [(ReadonlyTransportDecorator, test_server.ReadonlyServer),]
diff --git a/bzrlib/transport/remote.py b/bzrlib/transport/remote.py
new file mode 100644
index 0000000..c697401
--- /dev/null
+++ b/bzrlib/transport/remote.py
@@ -0,0 +1,608 @@
+# Copyright (C) 2006-2010 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""RemoteTransport client for the smart-server.
+
+This module shouldn't be accessed directly. The classes defined here should be
+imported from bzrlib.smart.
+"""
+
+from __future__ import absolute_import
+
+__all__ = ['RemoteTransport', 'RemoteTCPTransport', 'RemoteSSHTransport']
+
+from cStringIO import StringIO
+
+from bzrlib import (
+ config,
+ debug,
+ errors,
+ remote,
+ trace,
+ transport,
+ urlutils,
+ )
+from bzrlib.smart import client, medium
+
+
+class _SmartStat(object):
+
+ def __init__(self, size, mode):
+ self.st_size = size
+ self.st_mode = mode
+
+
+class RemoteTransport(transport.ConnectedTransport):
+ """Connection to a smart server.
+
+ The connection holds references to the medium that can be used to send
+ requests to the server.
+
+ The connection has a notion of the current directory to which it's
+ connected; this is incorporated in filenames passed to the server.
+
+ This supports some higher-level RPC operations and can also be treated
+ like a Transport to do file-like operations.
+
+ The connection can be made over a tcp socket, an ssh pipe or a series of
+ http requests. There are concrete subclasses for each type:
+ RemoteTCPTransport, etc.
+ """
+
+ # When making a readv request, cap it at requesting 5MB of data
+ _max_readv_bytes = 5*1024*1024
+
+ # IMPORTANT FOR IMPLEMENTORS: RemoteTransport MUST NOT be given encoding
+ # responsibilities: Put those on SmartClient or similar. This is vital for
+ # the ability to support multiple versions of the smart protocol over time:
+ # RemoteTransport is an adapter from the Transport object model to the
+ # SmartClient model, not an encoder.
+
+ # FIXME: the medium parameter should be private, only the tests requires
+ # it. It may be even clearer to define a TestRemoteTransport that handles
+ # the specific cases of providing a _client and/or a _medium, and leave
+ # RemoteTransport as an abstract class.
+ def __init__(self, url, _from_transport=None, medium=None, _client=None):
+ """Constructor.
+
+ :param _from_transport: Another RemoteTransport instance that this
+ one is being cloned from. Attributes such as the medium will
+ be reused.
+
+ :param medium: The medium to use for this RemoteTransport. If None,
+ the medium from the _from_transport is shared. If both this
+ and _from_transport are None, a new medium will be built.
+ _from_transport and medium cannot both be specified.
+
+ :param _client: Override the _SmartClient used by this transport. This
+ should only be used for testing purposes; normally this is
+ determined from the medium.
+ """
+ super(RemoteTransport, self).__init__(
+ url, _from_transport=_from_transport)
+
+ # The medium is the connection, except when we need to share it with
+ # other objects (RemoteBzrDir, RemoteRepository etc). In these cases
+ # what we want to share is really the shared connection.
+
+ if (_from_transport is not None
+ and isinstance(_from_transport, RemoteTransport)):
+ _client = _from_transport._client
+ elif _from_transport is None:
+ # If no _from_transport is specified, we need to intialize the
+ # shared medium.
+ credentials = None
+ if medium is None:
+ medium, credentials = self._build_medium()
+ if 'hpss' in debug.debug_flags:
+ trace.mutter('hpss: Built a new medium: %s',
+ medium.__class__.__name__)
+ self._shared_connection = transport._SharedConnection(medium,
+ credentials,
+ self.base)
+ elif medium is None:
+ # No medium was specified, so share the medium from the
+ # _from_transport.
+ medium = self._shared_connection.connection
+ else:
+ raise AssertionError(
+ "Both _from_transport (%r) and medium (%r) passed to "
+ "RemoteTransport.__init__, but these parameters are mutally "
+ "exclusive." % (_from_transport, medium))
+
+ if _client is None:
+ self._client = client._SmartClient(medium)
+ else:
+ self._client = _client
+
+ def _build_medium(self):
+ """Create the medium if _from_transport does not provide one.
+
+ The medium is analogous to the connection for ConnectedTransport: it
+ allows connection sharing.
+ """
+ # No credentials
+ return None, None
+
+ def _report_activity(self, bytes, direction):
+ """See Transport._report_activity.
+
+ Does nothing; the smart medium will report activity triggered by a
+ RemoteTransport.
+ """
+ pass
+
+ def is_readonly(self):
+ """Smart server transport can do read/write file operations."""
+ try:
+ resp = self._call2('Transport.is_readonly')
+ except errors.UnknownSmartMethod:
+ # XXX: nasty hack: servers before 0.16 don't have a
+ # 'Transport.is_readonly' verb, so we do what clients before 0.16
+ # did: assume False.
+ return False
+ if resp == ('yes', ):
+ return True
+ elif resp == ('no', ):
+ return False
+ else:
+ raise errors.UnexpectedSmartServerResponse(resp)
+
+ def get_smart_client(self):
+ return self._get_connection()
+
+ def get_smart_medium(self):
+ return self._get_connection()
+
+ def _remote_path(self, relpath):
+ """Returns the Unicode version of the absolute path for relpath."""
+ return urlutils.URL._combine_paths(self._parsed_url.path, relpath)
+
+ def _call(self, method, *args):
+ resp = self._call2(method, *args)
+ self._ensure_ok(resp)
+
+ def _call2(self, method, *args):
+ """Call a method on the remote server."""
+ try:
+ return self._client.call(method, *args)
+ except errors.ErrorFromSmartServer, err:
+ # The first argument, if present, is always a path.
+ if args:
+ context = {'relpath': args[0]}
+ else:
+ context = {}
+ self._translate_error(err, **context)
+
+ def _call_with_body_bytes(self, method, args, body):
+ """Call a method on the remote server with body bytes."""
+ try:
+ return self._client.call_with_body_bytes(method, args, body)
+ except errors.ErrorFromSmartServer, err:
+ # The first argument, if present, is always a path.
+ if args:
+ context = {'relpath': args[0]}
+ else:
+ context = {}
+ self._translate_error(err, **context)
+
+ def has(self, relpath):
+ """Indicate whether a remote file of the given name exists or not.
+
+ :see: Transport.has()
+ """
+ resp = self._call2('has', self._remote_path(relpath))
+ if resp == ('yes', ):
+ return True
+ elif resp == ('no', ):
+ return False
+ else:
+ raise errors.UnexpectedSmartServerResponse(resp)
+
+ def get(self, relpath):
+ """Return file-like object reading the contents of a remote file.
+
+ :see: Transport.get_bytes()/get_file()
+ """
+ return StringIO(self.get_bytes(relpath))
+
+ def get_bytes(self, relpath):
+ remote = self._remote_path(relpath)
+ try:
+ resp, response_handler = self._client.call_expecting_body('get', remote)
+ except errors.ErrorFromSmartServer, err:
+ self._translate_error(err, relpath)
+ if resp != ('ok', ):
+ response_handler.cancel_read_body()
+ raise errors.UnexpectedSmartServerResponse(resp)
+ return response_handler.read_body_bytes()
+
+ def _serialise_optional_mode(self, mode):
+ if mode is None:
+ return ''
+ else:
+ return '%d' % mode
+
+ def mkdir(self, relpath, mode=None):
+ resp = self._call2('mkdir', self._remote_path(relpath),
+ self._serialise_optional_mode(mode))
+
+ def open_write_stream(self, relpath, mode=None):
+ """See Transport.open_write_stream."""
+ self.put_bytes(relpath, "", mode)
+ result = transport.AppendBasedFileStream(self, relpath)
+ transport._file_streams[self.abspath(relpath)] = result
+ return result
+
+ def put_bytes(self, relpath, upload_contents, mode=None):
+ # FIXME: upload_file is probably not safe for non-ascii characters -
+ # should probably just pass all parameters as length-delimited
+ # strings?
+ if type(upload_contents) is unicode:
+ # Although not strictly correct, we raise UnicodeEncodeError to be
+ # compatible with other transports.
+ raise UnicodeEncodeError(
+ 'undefined', upload_contents, 0, 1,
+ 'put_bytes must be given bytes, not unicode.')
+ resp = self._call_with_body_bytes('put',
+ (self._remote_path(relpath), self._serialise_optional_mode(mode)),
+ upload_contents)
+ self._ensure_ok(resp)
+ return len(upload_contents)
+
+ def put_bytes_non_atomic(self, relpath, bytes, mode=None,
+ create_parent_dir=False,
+ dir_mode=None):
+ """See Transport.put_bytes_non_atomic."""
+ # FIXME: no encoding in the transport!
+ create_parent_str = 'F'
+ if create_parent_dir:
+ create_parent_str = 'T'
+
+ resp = self._call_with_body_bytes(
+ 'put_non_atomic',
+ (self._remote_path(relpath), self._serialise_optional_mode(mode),
+ create_parent_str, self._serialise_optional_mode(dir_mode)),
+ bytes)
+ self._ensure_ok(resp)
+
+ def put_file(self, relpath, upload_file, mode=None):
+ # its not ideal to seek back, but currently put_non_atomic_file depends
+ # on transports not reading before failing - which is a faulty
+ # assumption I think - RBC 20060915
+ pos = upload_file.tell()
+ try:
+ return self.put_bytes(relpath, upload_file.read(), mode)
+ except:
+ upload_file.seek(pos)
+ raise
+
+ def put_file_non_atomic(self, relpath, f, mode=None,
+ create_parent_dir=False,
+ dir_mode=None):
+ return self.put_bytes_non_atomic(relpath, f.read(), mode=mode,
+ create_parent_dir=create_parent_dir,
+ dir_mode=dir_mode)
+
+ def append_file(self, relpath, from_file, mode=None):
+ return self.append_bytes(relpath, from_file.read(), mode)
+
+ def append_bytes(self, relpath, bytes, mode=None):
+ resp = self._call_with_body_bytes(
+ 'append',
+ (self._remote_path(relpath), self._serialise_optional_mode(mode)),
+ bytes)
+ if resp[0] == 'appended':
+ return int(resp[1])
+ raise errors.UnexpectedSmartServerResponse(resp)
+
+ def delete(self, relpath):
+ resp = self._call2('delete', self._remote_path(relpath))
+ self._ensure_ok(resp)
+
+ def external_url(self):
+ """See bzrlib.transport.Transport.external_url."""
+ # the external path for RemoteTransports is the base
+ return self.base
+
+ def recommended_page_size(self):
+ """Return the recommended page size for this transport."""
+ return 64 * 1024
+
+ def _readv(self, relpath, offsets):
+ if not offsets:
+ return
+
+ offsets = list(offsets)
+
+ sorted_offsets = sorted(offsets)
+ coalesced = list(self._coalesce_offsets(sorted_offsets,
+ limit=self._max_readv_combine,
+ fudge_factor=self._bytes_to_read_before_seek,
+ max_size=self._max_readv_bytes))
+
+ # now that we've coallesced things, avoid making enormous requests
+ requests = []
+ cur_request = []
+ cur_len = 0
+ for c in coalesced:
+ if c.length + cur_len > self._max_readv_bytes:
+ requests.append(cur_request)
+ cur_request = [c]
+ cur_len = c.length
+ continue
+ cur_request.append(c)
+ cur_len += c.length
+ if cur_request:
+ requests.append(cur_request)
+ if 'hpss' in debug.debug_flags:
+ trace.mutter('%s.readv %s offsets => %s coalesced'
+ ' => %s requests (%s)',
+ self.__class__.__name__, len(offsets), len(coalesced),
+ len(requests), sum(map(len, requests)))
+ # Cache the results, but only until they have been fulfilled
+ data_map = {}
+ # turn the list of offsets into a single stack to iterate
+ offset_stack = iter(offsets)
+ # using a list so it can be modified when passing down and coming back
+ next_offset = [offset_stack.next()]
+ for cur_request in requests:
+ try:
+ result = self._client.call_with_body_readv_array(
+ ('readv', self._remote_path(relpath),),
+ [(c.start, c.length) for c in cur_request])
+ resp, response_handler = result
+ except errors.ErrorFromSmartServer, err:
+ self._translate_error(err, relpath)
+
+ if resp[0] != 'readv':
+ # This should raise an exception
+ response_handler.cancel_read_body()
+ raise errors.UnexpectedSmartServerResponse(resp)
+
+ for res in self._handle_response(offset_stack, cur_request,
+ response_handler,
+ data_map,
+ next_offset):
+ yield res
+
+ def _handle_response(self, offset_stack, coalesced, response_handler,
+ data_map, next_offset):
+ cur_offset_and_size = next_offset[0]
+ # FIXME: this should know how many bytes are needed, for clarity.
+ data = response_handler.read_body_bytes()
+ data_offset = 0
+ for c_offset in coalesced:
+ if len(data) < c_offset.length:
+ raise errors.ShortReadvError(relpath, c_offset.start,
+ c_offset.length, actual=len(data))
+ for suboffset, subsize in c_offset.ranges:
+ key = (c_offset.start+suboffset, subsize)
+ this_data = data[data_offset+suboffset:
+ data_offset+suboffset+subsize]
+ # Special case when the data is in-order, rather than packing
+ # into a map and then back out again. Benchmarking shows that
+ # this has 100% hit rate, but leave in the data_map work just
+ # in case.
+ # TODO: Could we get away with using buffer() to avoid the
+ # memory copy? Callers would need to realize they may
+ # not have a real string.
+ if key == cur_offset_and_size:
+ yield cur_offset_and_size[0], this_data
+ cur_offset_and_size = next_offset[0] = offset_stack.next()
+ else:
+ data_map[key] = this_data
+ data_offset += c_offset.length
+
+ # Now that we've read some data, see if we can yield anything back
+ while cur_offset_and_size in data_map:
+ this_data = data_map.pop(cur_offset_and_size)
+ yield cur_offset_and_size[0], this_data
+ cur_offset_and_size = next_offset[0] = offset_stack.next()
+
+ def rename(self, rel_from, rel_to):
+ self._call('rename',
+ self._remote_path(rel_from),
+ self._remote_path(rel_to))
+
+ def move(self, rel_from, rel_to):
+ self._call('move',
+ self._remote_path(rel_from),
+ self._remote_path(rel_to))
+
+ def rmdir(self, relpath):
+ resp = self._call('rmdir', self._remote_path(relpath))
+
+ def _ensure_ok(self, resp):
+ if resp[0] != 'ok':
+ raise errors.UnexpectedSmartServerResponse(resp)
+
+ def _translate_error(self, err, relpath=None):
+ remote._translate_error(err, path=relpath)
+
+ def disconnect(self):
+ m = self.get_smart_medium()
+ if m is not None:
+ m.disconnect()
+
+ def stat(self, relpath):
+ resp = self._call2('stat', self._remote_path(relpath))
+ if resp[0] == 'stat':
+ return _SmartStat(int(resp[1]), int(resp[2], 8))
+ raise errors.UnexpectedSmartServerResponse(resp)
+
+ ## def lock_read(self, relpath):
+ ## """Lock the given file for shared (read) access.
+ ## :return: A lock object, which should be passed to Transport.unlock()
+ ## """
+ ## # The old RemoteBranch ignore lock for reading, so we will
+ ## # continue that tradition and return a bogus lock object.
+ ## class BogusLock(object):
+ ## def __init__(self, path):
+ ## self.path = path
+ ## def unlock(self):
+ ## pass
+ ## return BogusLock(relpath)
+
+ def listable(self):
+ return True
+
+ def list_dir(self, relpath):
+ resp = self._call2('list_dir', self._remote_path(relpath))
+ if resp[0] == 'names':
+ return [name.encode('ascii') for name in resp[1:]]
+ raise errors.UnexpectedSmartServerResponse(resp)
+
+ def iter_files_recursive(self):
+ resp = self._call2('iter_files_recursive', self._remote_path(''))
+ if resp[0] == 'names':
+ return resp[1:]
+ raise errors.UnexpectedSmartServerResponse(resp)
+
+
+class RemoteTCPTransport(RemoteTransport):
+ """Connection to smart server over plain tcp.
+
+ This is essentially just a factory to get 'RemoteTransport(url,
+ SmartTCPClientMedium).
+ """
+
+ def _build_medium(self):
+ client_medium = medium.SmartTCPClientMedium(
+ self._parsed_url.host, self._parsed_url.port, self.base)
+ return client_medium, None
+
+
+class RemoteTCPTransportV2Only(RemoteTransport):
+ """Connection to smart server over plain tcp with the client hard-coded to
+ assume protocol v2 and remote server version <= 1.6.
+
+ This should only be used for testing.
+ """
+
+ def _build_medium(self):
+ client_medium = medium.SmartTCPClientMedium(
+ self._parsed_url.host, self._parsed_url.port, self.base)
+ client_medium._protocol_version = 2
+ client_medium._remember_remote_is_before((1, 6))
+ return client_medium, None
+
+
+class RemoteSSHTransport(RemoteTransport):
+ """Connection to smart server over SSH.
+
+ This is essentially just a factory to get 'RemoteTransport(url,
+ SmartSSHClientMedium).
+ """
+
+ def _build_medium(self):
+ location_config = config.LocationConfig(self.base)
+ bzr_remote_path = location_config.get_bzr_remote_path()
+ user = self._parsed_url.user
+ if user is None:
+ auth = config.AuthenticationConfig()
+ user = auth.get_user('ssh', self._parsed_url.host,
+ self._parsed_url.port)
+ ssh_params = medium.SSHParams(self._parsed_url.host,
+ self._parsed_url.port, user, self._parsed_url.password,
+ bzr_remote_path)
+ client_medium = medium.SmartSSHClientMedium(self.base, ssh_params)
+ return client_medium, (user, self._parsed_url.password)
+
+
+class RemoteHTTPTransport(RemoteTransport):
+ """Just a way to connect between a bzr+http:// url and http://.
+
+ This connection operates slightly differently than the RemoteSSHTransport.
+ It uses a plain http:// transport underneath, which defines what remote
+ .bzr/smart URL we are connected to. From there, all paths that are sent are
+ sent as relative paths, this way, the remote side can properly
+ de-reference them, since it is likely doing rewrite rules to translate an
+ HTTP path into a local path.
+ """
+
+ def __init__(self, base, _from_transport=None, http_transport=None):
+ if http_transport is None:
+ # FIXME: the password may be lost here because it appears in the
+ # url only for an intial construction (when the url came from the
+ # command-line).
+ http_url = base[len('bzr+'):]
+ self._http_transport = transport.get_transport_from_url(http_url)
+ else:
+ self._http_transport = http_transport
+ super(RemoteHTTPTransport, self).__init__(
+ base, _from_transport=_from_transport)
+
+ def _build_medium(self):
+ # We let http_transport take care of the credentials
+ return self._http_transport.get_smart_medium(), None
+
+ def _remote_path(self, relpath):
+ """After connecting, HTTP Transport only deals in relative URLs."""
+ # Adjust the relpath based on which URL this smart transport is
+ # connected to.
+ http_base = urlutils.normalize_url(self.get_smart_medium().base)
+ url = urlutils.join(self.base[len('bzr+'):], relpath)
+ url = urlutils.normalize_url(url)
+ return urlutils.relative_url(http_base, url)
+
+ def clone(self, relative_url):
+ """Make a new RemoteHTTPTransport related to me.
+
+ This is re-implemented rather than using the default
+ RemoteTransport.clone() because we must be careful about the underlying
+ http transport.
+
+ Also, the cloned smart transport will POST to the same .bzr/smart
+ location as this transport (although obviously the relative paths in the
+ smart requests may be different). This is so that the server doesn't
+ have to handle .bzr/smart requests at arbitrary places inside .bzr
+ directories, just at the initial URL the user uses.
+ """
+ if relative_url:
+ abs_url = self.abspath(relative_url)
+ else:
+ abs_url = self.base
+ return RemoteHTTPTransport(abs_url,
+ _from_transport=self,
+ http_transport=self._http_transport)
+
+ def _redirected_to(self, source, target):
+ """See transport._redirected_to"""
+ redirected = self._http_transport._redirected_to(source, target)
+ if (redirected is not None
+ and isinstance(redirected, type(self._http_transport))):
+ return RemoteHTTPTransport('bzr+' + redirected.external_url(),
+ http_transport=redirected)
+ else:
+ # Either None or a transport for a different protocol
+ return redirected
+
+
+class HintingSSHTransport(transport.Transport):
+ """Simple transport that handles ssh:// and points out bzr+ssh://."""
+
+ def __init__(self, url):
+ raise errors.UnsupportedProtocol(url,
+ 'bzr supports bzr+ssh to operate over ssh, use "bzr+%s".' % url)
+
+
+def get_test_permutations():
+ """Return (transport, server) permutations for testing."""
+ ### We may need a little more test framework support to construct an
+ ### appropriate RemoteTransport in the future.
+ from bzrlib.tests import test_server
+ return [(RemoteTCPTransport, test_server.SmartTCPServer_for_testing)]
diff --git a/bzrlib/transport/sftp.py b/bzrlib/transport/sftp.py
new file mode 100644
index 0000000..edd6b0a
--- /dev/null
+++ b/bzrlib/transport/sftp.py
@@ -0,0 +1,903 @@
+# Copyright (C) 2005-2010 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""Implementation of Transport over SFTP, using paramiko."""
+
+from __future__ import absolute_import
+
+# TODO: Remove the transport-based lock_read and lock_write methods. They'll
+# then raise TransportNotPossible, which will break remote access to any
+# formats which rely on OS-level locks. That should be fine as those formats
+# are pretty old, but these combinations may have to be removed from the test
+# suite. Those formats all date back to 0.7; so we should be able to remove
+# these methods when we officially drop support for those formats.
+
+import bisect
+import errno
+import itertools
+import os
+import random
+import stat
+import sys
+import time
+import warnings
+
+from bzrlib import (
+ config,
+ debug,
+ errors,
+ urlutils,
+ )
+from bzrlib.errors import (FileExists,
+ NoSuchFile,
+ TransportError,
+ LockError,
+ PathError,
+ ParamikoNotPresent,
+ )
+from bzrlib.osutils import fancy_rename
+from bzrlib.trace import mutter, warning
+from bzrlib.transport import (
+ FileFileStream,
+ _file_streams,
+ ssh,
+ ConnectedTransport,
+ )
+
+# Disable one particular warning that comes from paramiko in Python2.5; if
+# this is emitted at the wrong time it tends to cause spurious test failures
+# or at least noise in the test case::
+#
+# [1770/7639 in 86s, 1 known failures, 50 skipped, 2 missing features]
+# test_permissions.TestSftpPermissions.test_new_files
+# /var/lib/python-support/python2.5/paramiko/message.py:226: DeprecationWarning: integer argument expected, got float
+# self.packet.write(struct.pack('>I', n))
+warnings.filterwarnings('ignore',
+ 'integer argument expected, got float',
+ category=DeprecationWarning,
+ module='paramiko.message')
+
+try:
+ import paramiko
+except ImportError, e:
+ raise ParamikoNotPresent(e)
+else:
+ from paramiko.sftp import (SFTP_FLAG_WRITE, SFTP_FLAG_CREATE,
+ SFTP_FLAG_EXCL, SFTP_FLAG_TRUNC,
+ SFTP_OK, CMD_HANDLE, CMD_OPEN)
+ from paramiko.sftp_attr import SFTPAttributes
+ from paramiko.sftp_file import SFTPFile
+
+
+_paramiko_version = getattr(paramiko, '__version_info__', (0, 0, 0))
+# don't use prefetch unless paramiko version >= 1.5.5 (there were bugs earlier)
+_default_do_prefetch = (_paramiko_version >= (1, 5, 5))
+
+
+class SFTPLock(object):
+ """This fakes a lock in a remote location.
+
+ A present lock is indicated just by the existence of a file. This
+ doesn't work well on all transports and they are only used in
+ deprecated storage formats.
+ """
+
+ __slots__ = ['path', 'lock_path', 'lock_file', 'transport']
+
+ def __init__(self, path, transport):
+ self.lock_file = None
+ self.path = path
+ self.lock_path = path + '.write-lock'
+ self.transport = transport
+ try:
+ # RBC 20060103 FIXME should we be using private methods here ?
+ abspath = transport._remote_path(self.lock_path)
+ self.lock_file = transport._sftp_open_exclusive(abspath)
+ except FileExists:
+ raise LockError('File %r already locked' % (self.path,))
+
+ def unlock(self):
+ if not self.lock_file:
+ return
+ self.lock_file.close()
+ self.lock_file = None
+ try:
+ self.transport.delete(self.lock_path)
+ except (NoSuchFile,):
+ # What specific errors should we catch here?
+ pass
+
+
+class _SFTPReadvHelper(object):
+ """A class to help with managing the state of a readv request."""
+
+ # See _get_requests for an explanation.
+ _max_request_size = 32768
+
+ def __init__(self, original_offsets, relpath, _report_activity):
+ """Create a new readv helper.
+
+ :param original_offsets: The original requests given by the caller of
+ readv()
+ :param relpath: The name of the file (if known)
+ :param _report_activity: A Transport._report_activity bound method,
+ to be called as data arrives.
+ """
+ self.original_offsets = list(original_offsets)
+ self.relpath = relpath
+ self._report_activity = _report_activity
+
+ def _get_requests(self):
+ """Break up the offsets into individual requests over sftp.
+
+ The SFTP spec only requires implementers to support 32kB requests. We
+ could try something larger (openssh supports 64kB), but then we have to
+ handle requests that fail.
+ So instead, we just break up our maximum chunks into 32kB chunks, and
+ asyncronously requests them.
+ Newer versions of paramiko would do the chunking for us, but we want to
+ start processing results right away, so we do it ourselves.
+ """
+ # TODO: Because we issue async requests, we don't 'fudge' any extra
+ # data. I'm not 100% sure that is the best choice.
+
+ # The first thing we do, is to collapse the individual requests as much
+ # as possible, so we don't issues requests <32kB
+ sorted_offsets = sorted(self.original_offsets)
+ coalesced = list(ConnectedTransport._coalesce_offsets(sorted_offsets,
+ limit=0, fudge_factor=0))
+ requests = []
+ for c_offset in coalesced:
+ start = c_offset.start
+ size = c_offset.length
+
+ # Break this up into 32kB requests
+ while size > 0:
+ next_size = min(size, self._max_request_size)
+ requests.append((start, next_size))
+ size -= next_size
+ start += next_size
+ if 'sftp' in debug.debug_flags:
+ mutter('SFTP.readv(%s) %s offsets => %s coalesced => %s requests',
+ self.relpath, len(sorted_offsets), len(coalesced),
+ len(requests))
+ return requests
+
+ def request_and_yield_offsets(self, fp):
+ """Request the data from the remote machine, yielding the results.
+
+ :param fp: A Paramiko SFTPFile object that supports readv.
+ :return: Yield the data requested by the original readv caller, one by
+ one.
+ """
+ requests = self._get_requests()
+ offset_iter = iter(self.original_offsets)
+ cur_offset, cur_size = offset_iter.next()
+ # paramiko .readv() yields strings that are in the order of the requests
+ # So we track the current request to know where the next data is
+ # being returned from.
+ input_start = None
+ last_end = None
+ buffered_data = []
+ buffered_len = 0
+
+ # This is used to buffer chunks which we couldn't process yet
+ # It is (start, end, data) tuples.
+ data_chunks = []
+ # Create an 'unlimited' data stream, so we stop based on requests,
+ # rather than just because the data stream ended. This lets us detect
+ # short readv.
+ data_stream = itertools.chain(fp.readv(requests),
+ itertools.repeat(None))
+ for (start, length), data in itertools.izip(requests, data_stream):
+ if data is None:
+ if cur_coalesced is not None:
+ raise errors.ShortReadvError(self.relpath,
+ start, length, len(data))
+ if len(data) != length:
+ raise errors.ShortReadvError(self.relpath,
+ start, length, len(data))
+ self._report_activity(length, 'read')
+ if last_end is None:
+ # This is the first request, just buffer it
+ buffered_data = [data]
+ buffered_len = length
+ input_start = start
+ elif start == last_end:
+ # The data we are reading fits neatly on the previous
+ # buffer, so this is all part of a larger coalesced range.
+ buffered_data.append(data)
+ buffered_len += length
+ else:
+ # We have an 'interrupt' in the data stream. So we know we are
+ # at a request boundary.
+ if buffered_len > 0:
+ # We haven't consumed the buffer so far, so put it into
+ # data_chunks, and continue.
+ buffered = ''.join(buffered_data)
+ data_chunks.append((input_start, buffered))
+ input_start = start
+ buffered_data = [data]
+ buffered_len = length
+ last_end = start + length
+ if input_start == cur_offset and cur_size <= buffered_len:
+ # Simplify the next steps a bit by transforming buffered_data
+ # into a single string. We also have the nice property that
+ # when there is only one string ''.join([x]) == x, so there is
+ # no data copying.
+ buffered = ''.join(buffered_data)
+ # Clean out buffered data so that we keep memory
+ # consumption low
+ del buffered_data[:]
+ buffered_offset = 0
+ # TODO: We *could* also consider the case where cur_offset is in
+ # in the buffered range, even though it doesn't *start*
+ # the buffered range. But for packs we pretty much always
+ # read in order, so you won't get any extra data in the
+ # middle.
+ while (input_start == cur_offset
+ and (buffered_offset + cur_size) <= buffered_len):
+ # We've buffered enough data to process this request, spit it
+ # out
+ cur_data = buffered[buffered_offset:buffered_offset + cur_size]
+ # move the direct pointer into our buffered data
+ buffered_offset += cur_size
+ # Move the start-of-buffer pointer
+ input_start += cur_size
+ # Yield the requested data
+ yield cur_offset, cur_data
+ cur_offset, cur_size = offset_iter.next()
+ # at this point, we've consumed as much of buffered as we can,
+ # so break off the portion that we consumed
+ if buffered_offset == len(buffered_data):
+ # No tail to leave behind
+ buffered_data = []
+ buffered_len = 0
+ else:
+ buffered = buffered[buffered_offset:]
+ buffered_data = [buffered]
+ buffered_len = len(buffered)
+ # now that the data stream is done, close the handle
+ fp.close()
+ if buffered_len:
+ buffered = ''.join(buffered_data)
+ del buffered_data[:]
+ data_chunks.append((input_start, buffered))
+ if data_chunks:
+ if 'sftp' in debug.debug_flags:
+ mutter('SFTP readv left with %d out-of-order bytes',
+ sum(map(lambda x: len(x[1]), data_chunks)))
+ # We've processed all the readv data, at this point, anything we
+ # couldn't process is in data_chunks. This doesn't happen often, so
+ # this code path isn't optimized
+ # We use an interesting process for data_chunks
+ # Specifically if we have "bisect_left([(start, len, entries)],
+ # (qstart,)])
+ # If start == qstart, then we get the specific node. Otherwise we
+ # get the previous node
+ while True:
+ idx = bisect.bisect_left(data_chunks, (cur_offset,))
+ if idx < len(data_chunks) and data_chunks[idx][0] == cur_offset:
+ # The data starts here
+ data = data_chunks[idx][1][:cur_size]
+ elif idx > 0:
+ # The data is in a portion of a previous page
+ idx -= 1
+ sub_offset = cur_offset - data_chunks[idx][0]
+ data = data_chunks[idx][1]
+ data = data[sub_offset:sub_offset + cur_size]
+ else:
+ # We are missing the page where the data should be found,
+ # something is wrong
+ data = ''
+ if len(data) != cur_size:
+ raise AssertionError('We must have miscalulated.'
+ ' We expected %d bytes, but only found %d'
+ % (cur_size, len(data)))
+ yield cur_offset, data
+ cur_offset, cur_size = offset_iter.next()
+
+
+class SFTPTransport(ConnectedTransport):
+ """Transport implementation for SFTP access."""
+
+ _do_prefetch = _default_do_prefetch
+ # TODO: jam 20060717 Conceivably these could be configurable, either
+ # by auto-tuning at run-time, or by a configuration (per host??)
+ # but the performance curve is pretty flat, so just going with
+ # reasonable defaults.
+ _max_readv_combine = 200
+ # Having to round trip to the server means waiting for a response,
+ # so it is better to download extra bytes.
+ # 8KiB had good performance for both local and remote network operations
+ _bytes_to_read_before_seek = 8192
+
+ # The sftp spec says that implementations SHOULD allow reads
+ # to be at least 32K. paramiko.readv() does an async request
+ # for the chunks. So we need to keep it within a single request
+ # size for paramiko <= 1.6.1. paramiko 1.6.2 will probably chop
+ # up the request itself, rather than us having to worry about it
+ _max_request_size = 32768
+
+ def _remote_path(self, relpath):
+ """Return the path to be passed along the sftp protocol for relpath.
+
+ :param relpath: is a urlencoded string.
+ """
+ remote_path = self._parsed_url.clone(relpath).path
+ # the initial slash should be removed from the path, and treated as a
+ # homedir relative path (the path begins with a double slash if it is
+ # absolute). see draft-ietf-secsh-scp-sftp-ssh-uri-03.txt
+ # RBC 20060118 we are not using this as its too user hostile. instead
+ # we are following lftp and using /~/foo to mean '~/foo'
+ # vila--20070602 and leave absolute paths begin with a single slash.
+ if remote_path.startswith('/~/'):
+ remote_path = remote_path[3:]
+ elif remote_path == '/~':
+ remote_path = ''
+ return remote_path
+
+ def _create_connection(self, credentials=None):
+ """Create a new connection with the provided credentials.
+
+ :param credentials: The credentials needed to establish the connection.
+
+ :return: The created connection and its associated credentials.
+
+ The credentials are only the password as it may have been entered
+ interactively by the user and may be different from the one provided
+ in base url at transport creation time.
+ """
+ if credentials is None:
+ password = self._parsed_url.password
+ else:
+ password = credentials
+
+ vendor = ssh._get_ssh_vendor()
+ user = self._parsed_url.user
+ if user is None:
+ auth = config.AuthenticationConfig()
+ user = auth.get_user('ssh', self._parsed_url.host,
+ self._parsed_url.port)
+ connection = vendor.connect_sftp(self._parsed_url.user, password,
+ self._parsed_url.host, self._parsed_url.port)
+ return connection, (user, password)
+
+ def disconnect(self):
+ connection = self._get_connection()
+ if connection is not None:
+ connection.close()
+
+ def _get_sftp(self):
+ """Ensures that a connection is established"""
+ connection = self._get_connection()
+ if connection is None:
+ # First connection ever
+ connection, credentials = self._create_connection()
+ self._set_connection(connection, credentials)
+ return connection
+
+ def has(self, relpath):
+ """
+ Does the target location exist?
+ """
+ try:
+ self._get_sftp().stat(self._remote_path(relpath))
+ # stat result is about 20 bytes, let's say
+ self._report_activity(20, 'read')
+ return True
+ except IOError:
+ return False
+
+ def get(self, relpath):
+ """Get the file at the given relative path.
+
+ :param relpath: The relative path to the file
+ """
+ try:
+ path = self._remote_path(relpath)
+ f = self._get_sftp().file(path, mode='rb')
+ if self._do_prefetch and (getattr(f, 'prefetch', None) is not None):
+ f.prefetch()
+ return f
+ except (IOError, paramiko.SSHException), e:
+ self._translate_io_exception(e, path, ': error retrieving',
+ failure_exc=errors.ReadError)
+
+ def get_bytes(self, relpath):
+ # reimplement this here so that we can report how many bytes came back
+ f = self.get(relpath)
+ try:
+ bytes = f.read()
+ self._report_activity(len(bytes), 'read')
+ return bytes
+ finally:
+ f.close()
+
+ def _readv(self, relpath, offsets):
+ """See Transport.readv()"""
+ # We overload the default readv() because we want to use a file
+ # that does not have prefetch enabled.
+ # Also, if we have a new paramiko, it implements an async readv()
+ if not offsets:
+ return
+
+ try:
+ path = self._remote_path(relpath)
+ fp = self._get_sftp().file(path, mode='rb')
+ readv = getattr(fp, 'readv', None)
+ if readv:
+ return self._sftp_readv(fp, offsets, relpath)
+ if 'sftp' in debug.debug_flags:
+ mutter('seek and read %s offsets', len(offsets))
+ return self._seek_and_read(fp, offsets, relpath)
+ except (IOError, paramiko.SSHException), e:
+ self._translate_io_exception(e, path, ': error retrieving')
+
+ def recommended_page_size(self):
+ """See Transport.recommended_page_size().
+
+ For SFTP we suggest a large page size to reduce the overhead
+ introduced by latency.
+ """
+ return 64 * 1024
+
+ def _sftp_readv(self, fp, offsets, relpath):
+ """Use the readv() member of fp to do async readv.
+
+ Then read them using paramiko.readv(). paramiko.readv()
+ does not support ranges > 64K, so it caps the request size, and
+ just reads until it gets all the stuff it wants.
+ """
+ helper = _SFTPReadvHelper(offsets, relpath, self._report_activity)
+ return helper.request_and_yield_offsets(fp)
+
+ def put_file(self, relpath, f, mode=None):
+ """
+ Copy the file-like object into the location.
+
+ :param relpath: Location to put the contents, relative to base.
+ :param f: File-like object.
+ :param mode: The final mode for the file
+ """
+ final_path = self._remote_path(relpath)
+ return self._put(final_path, f, mode=mode)
+
+ def _put(self, abspath, f, mode=None):
+ """Helper function so both put() and copy_abspaths can reuse the code"""
+ tmp_abspath = '%s.tmp.%.9f.%d.%d' % (abspath, time.time(),
+ os.getpid(), random.randint(0,0x7FFFFFFF))
+ fout = self._sftp_open_exclusive(tmp_abspath, mode=mode)
+ closed = False
+ try:
+ try:
+ fout.set_pipelined(True)
+ length = self._pump(f, fout)
+ except (IOError, paramiko.SSHException), e:
+ self._translate_io_exception(e, tmp_abspath)
+ # XXX: This doesn't truly help like we would like it to.
+ # The problem is that openssh strips sticky bits. So while we
+ # can properly set group write permission, we lose the group
+ # sticky bit. So it is probably best to stop chmodding, and
+ # just tell users that they need to set the umask correctly.
+ # The attr.st_mode = mode, in _sftp_open_exclusive
+ # will handle when the user wants the final mode to be more
+ # restrictive. And then we avoid a round trip. Unless
+ # paramiko decides to expose an async chmod()
+
+ # This is designed to chmod() right before we close.
+ # Because we set_pipelined() earlier, theoretically we might
+ # avoid the round trip for fout.close()
+ if mode is not None:
+ self._get_sftp().chmod(tmp_abspath, mode)
+ fout.close()
+ closed = True
+ self._rename_and_overwrite(tmp_abspath, abspath)
+ return length
+ except Exception, e:
+ # If we fail, try to clean up the temporary file
+ # before we throw the exception
+ # but don't let another exception mess things up
+ # Write out the traceback, because otherwise
+ # the catch and throw destroys it
+ import traceback
+ mutter(traceback.format_exc())
+ try:
+ if not closed:
+ fout.close()
+ self._get_sftp().remove(tmp_abspath)
+ except:
+ # raise the saved except
+ raise e
+ # raise the original with its traceback if we can.
+ raise
+
+ def _put_non_atomic_helper(self, relpath, writer, mode=None,
+ create_parent_dir=False,
+ dir_mode=None):
+ abspath = self._remote_path(relpath)
+
+ # TODO: jam 20060816 paramiko doesn't publicly expose a way to
+ # set the file mode at create time. If it does, use it.
+ # But for now, we just chmod later anyway.
+
+ def _open_and_write_file():
+ """Try to open the target file, raise error on failure"""
+ fout = None
+ try:
+ try:
+ fout = self._get_sftp().file(abspath, mode='wb')
+ fout.set_pipelined(True)
+ writer(fout)
+ except (paramiko.SSHException, IOError), e:
+ self._translate_io_exception(e, abspath,
+ ': unable to open')
+
+ # This is designed to chmod() right before we close.
+ # Because we set_pipelined() earlier, theoretically we might
+ # avoid the round trip for fout.close()
+ if mode is not None:
+ self._get_sftp().chmod(abspath, mode)
+ finally:
+ if fout is not None:
+ fout.close()
+
+ if not create_parent_dir:
+ _open_and_write_file()
+ return
+
+ # Try error handling to create the parent directory if we need to
+ try:
+ _open_and_write_file()
+ except NoSuchFile:
+ # Try to create the parent directory, and then go back to
+ # writing the file
+ parent_dir = os.path.dirname(abspath)
+ self._mkdir(parent_dir, dir_mode)
+ _open_and_write_file()
+
+ def put_file_non_atomic(self, relpath, f, mode=None,
+ create_parent_dir=False,
+ dir_mode=None):
+ """Copy the file-like object into the target location.
+
+ This function is not strictly safe to use. It is only meant to
+ be used when you already know that the target does not exist.
+ It is not safe, because it will open and truncate the remote
+ file. So there may be a time when the file has invalid contents.
+
+ :param relpath: The remote location to put the contents.
+ :param f: File-like object.
+ :param mode: Possible access permissions for new file.
+ None means do not set remote permissions.
+ :param create_parent_dir: If we cannot create the target file because
+ the parent directory does not exist, go ahead and
+ create it, and then try again.
+ """
+ def writer(fout):
+ self._pump(f, fout)
+ self._put_non_atomic_helper(relpath, writer, mode=mode,
+ create_parent_dir=create_parent_dir,
+ dir_mode=dir_mode)
+
+ def put_bytes_non_atomic(self, relpath, bytes, mode=None,
+ create_parent_dir=False,
+ dir_mode=None):
+ def writer(fout):
+ fout.write(bytes)
+ self._put_non_atomic_helper(relpath, writer, mode=mode,
+ create_parent_dir=create_parent_dir,
+ dir_mode=dir_mode)
+
+ def iter_files_recursive(self):
+ """Walk the relative paths of all files in this transport."""
+ # progress is handled by list_dir
+ queue = list(self.list_dir('.'))
+ while queue:
+ relpath = queue.pop(0)
+ st = self.stat(relpath)
+ if stat.S_ISDIR(st.st_mode):
+ for i, basename in enumerate(self.list_dir(relpath)):
+ queue.insert(i, relpath+'/'+basename)
+ else:
+ yield relpath
+
+ def _mkdir(self, abspath, mode=None):
+ if mode is None:
+ local_mode = 0777
+ else:
+ local_mode = mode
+ try:
+ self._report_activity(len(abspath), 'write')
+ self._get_sftp().mkdir(abspath, local_mode)
+ self._report_activity(1, 'read')
+ if mode is not None:
+ # chmod a dir through sftp will erase any sgid bit set
+ # on the server side. So, if the bit mode are already
+ # set, avoid the chmod. If the mode is not fine but
+ # the sgid bit is set, report a warning to the user
+ # with the umask fix.
+ stat = self._get_sftp().lstat(abspath)
+ mode = mode & 0777 # can't set special bits anyway
+ if mode != stat.st_mode & 0777:
+ if stat.st_mode & 06000:
+ warning('About to chmod %s over sftp, which will result'
+ ' in its suid or sgid bits being cleared. If'
+ ' you want to preserve those bits, change your '
+ ' environment on the server to use umask 0%03o.'
+ % (abspath, 0777 - mode))
+ self._get_sftp().chmod(abspath, mode=mode)
+ except (paramiko.SSHException, IOError), e:
+ self._translate_io_exception(e, abspath, ': unable to mkdir',
+ failure_exc=FileExists)
+
+ def mkdir(self, relpath, mode=None):
+ """Create a directory at the given path."""
+ self._mkdir(self._remote_path(relpath), mode=mode)
+
+ def open_write_stream(self, relpath, mode=None):
+ """See Transport.open_write_stream."""
+ # initialise the file to zero-length
+ # this is three round trips, but we don't use this
+ # api more than once per write_group at the moment so
+ # it is a tolerable overhead. Better would be to truncate
+ # the file after opening. RBC 20070805
+ self.put_bytes_non_atomic(relpath, "", mode)
+ abspath = self._remote_path(relpath)
+ # TODO: jam 20060816 paramiko doesn't publicly expose a way to
+ # set the file mode at create time. If it does, use it.
+ # But for now, we just chmod later anyway.
+ handle = None
+ try:
+ handle = self._get_sftp().file(abspath, mode='wb')
+ handle.set_pipelined(True)
+ except (paramiko.SSHException, IOError), e:
+ self._translate_io_exception(e, abspath,
+ ': unable to open')
+ _file_streams[self.abspath(relpath)] = handle
+ return FileFileStream(self, relpath, handle)
+
+ def _translate_io_exception(self, e, path, more_info='',
+ failure_exc=PathError):
+ """Translate a paramiko or IOError into a friendlier exception.
+
+ :param e: The original exception
+ :param path: The path in question when the error is raised
+ :param more_info: Extra information that can be included,
+ such as what was going on
+ :param failure_exc: Paramiko has the super fun ability to raise completely
+ opaque errors that just set "e.args = ('Failure',)" with
+ no more information.
+ If this parameter is set, it defines the exception
+ to raise in these cases.
+ """
+ # paramiko seems to generate detailless errors.
+ self._translate_error(e, path, raise_generic=False)
+ if getattr(e, 'args', None) is not None:
+ if (e.args == ('No such file or directory',) or
+ e.args == ('No such file',)):
+ raise NoSuchFile(path, str(e) + more_info)
+ if (e.args == ('mkdir failed',) or
+ e.args[0].startswith('syserr: File exists')):
+ raise FileExists(path, str(e) + more_info)
+ # strange but true, for the paramiko server.
+ if (e.args == ('Failure',)):
+ raise failure_exc(path, str(e) + more_info)
+ # Can be something like args = ('Directory not empty:
+ # '/srv/bazaar.launchpad.net/blah...: '
+ # [Errno 39] Directory not empty',)
+ if (e.args[0].startswith('Directory not empty: ')
+ or getattr(e, 'errno', None) == errno.ENOTEMPTY):
+ raise errors.DirectoryNotEmpty(path, str(e))
+ if e.args == ('Operation unsupported',):
+ raise errors.TransportNotPossible()
+ mutter('Raising exception with args %s', e.args)
+ if getattr(e, 'errno', None) is not None:
+ mutter('Raising exception with errno %s', e.errno)
+ raise e
+
+ def append_file(self, relpath, f, mode=None):
+ """
+ Append the text in the file-like object into the final
+ location.
+ """
+ try:
+ path = self._remote_path(relpath)
+ fout = self._get_sftp().file(path, 'ab')
+ if mode is not None:
+ self._get_sftp().chmod(path, mode)
+ result = fout.tell()
+ self._pump(f, fout)
+ return result
+ except (IOError, paramiko.SSHException), e:
+ self._translate_io_exception(e, relpath, ': unable to append')
+
+ def rename(self, rel_from, rel_to):
+ """Rename without special overwriting"""
+ try:
+ self._get_sftp().rename(self._remote_path(rel_from),
+ self._remote_path(rel_to))
+ except (IOError, paramiko.SSHException), e:
+ self._translate_io_exception(e, rel_from,
+ ': unable to rename to %r' % (rel_to))
+
+ def _rename_and_overwrite(self, abs_from, abs_to):
+ """Do a fancy rename on the remote server.
+
+ Using the implementation provided by osutils.
+ """
+ try:
+ sftp = self._get_sftp()
+ fancy_rename(abs_from, abs_to,
+ rename_func=sftp.rename,
+ unlink_func=sftp.remove)
+ except (IOError, paramiko.SSHException), e:
+ self._translate_io_exception(e, abs_from,
+ ': unable to rename to %r' % (abs_to))
+
+ def move(self, rel_from, rel_to):
+ """Move the item at rel_from to the location at rel_to"""
+ path_from = self._remote_path(rel_from)
+ path_to = self._remote_path(rel_to)
+ self._rename_and_overwrite(path_from, path_to)
+
+ def delete(self, relpath):
+ """Delete the item at relpath"""
+ path = self._remote_path(relpath)
+ try:
+ self._get_sftp().remove(path)
+ except (IOError, paramiko.SSHException), e:
+ self._translate_io_exception(e, path, ': unable to delete')
+
+ def external_url(self):
+ """See bzrlib.transport.Transport.external_url."""
+ # the external path for SFTP is the base
+ return self.base
+
+ def listable(self):
+ """Return True if this store supports listing."""
+ return True
+
+ def list_dir(self, relpath):
+ """
+ Return a list of all files at the given location.
+ """
+ # does anything actually use this?
+ # -- Unknown
+ # This is at least used by copy_tree for remote upgrades.
+ # -- David Allouche 2006-08-11
+ path = self._remote_path(relpath)
+ try:
+ entries = self._get_sftp().listdir(path)
+ self._report_activity(sum(map(len, entries)), 'read')
+ except (IOError, paramiko.SSHException), e:
+ self._translate_io_exception(e, path, ': failed to list_dir')
+ return [urlutils.escape(entry) for entry in entries]
+
+ def rmdir(self, relpath):
+ """See Transport.rmdir."""
+ path = self._remote_path(relpath)
+ try:
+ return self._get_sftp().rmdir(path)
+ except (IOError, paramiko.SSHException), e:
+ self._translate_io_exception(e, path, ': failed to rmdir')
+
+ def stat(self, relpath):
+ """Return the stat information for a file."""
+ path = self._remote_path(relpath)
+ try:
+ return self._get_sftp().lstat(path)
+ except (IOError, paramiko.SSHException), e:
+ self._translate_io_exception(e, path, ': unable to stat')
+
+ def readlink(self, relpath):
+ """See Transport.readlink."""
+ path = self._remote_path(relpath)
+ try:
+ return self._get_sftp().readlink(path)
+ except (IOError, paramiko.SSHException), e:
+ self._translate_io_exception(e, path, ': unable to readlink')
+
+ def symlink(self, source, link_name):
+ """See Transport.symlink."""
+ try:
+ conn = self._get_sftp()
+ sftp_retval = conn.symlink(source, link_name)
+ if SFTP_OK != sftp_retval:
+ raise TransportError(
+ '%r: unable to create symlink to %r' % (link_name, source),
+ sftp_retval
+ )
+ except (IOError, paramiko.SSHException), e:
+ self._translate_io_exception(e, link_name,
+ ': unable to create symlink to %r' % (source))
+
+ def lock_read(self, relpath):
+ """
+ Lock the given file for shared (read) access.
+ :return: A lock object, which has an unlock() member function
+ """
+ # FIXME: there should be something clever i can do here...
+ class BogusLock(object):
+ def __init__(self, path):
+ self.path = path
+ def unlock(self):
+ pass
+ return BogusLock(relpath)
+
+ def lock_write(self, relpath):
+ """
+ Lock the given file for exclusive (write) access.
+ WARNING: many transports do not support this, so trying avoid using it
+
+ :return: A lock object, which has an unlock() member function
+ """
+ # This is a little bit bogus, but basically, we create a file
+ # which should not already exist, and if it does, we assume
+ # that there is a lock, and if it doesn't, the we assume
+ # that we have taken the lock.
+ return SFTPLock(relpath, self)
+
+ def _sftp_open_exclusive(self, abspath, mode=None):
+ """Open a remote path exclusively.
+
+ SFTP supports O_EXCL (SFTP_FLAG_EXCL), which fails if
+ the file already exists. However it does not expose this
+ at the higher level of SFTPClient.open(), so we have to
+ sneak away with it.
+
+ WARNING: This breaks the SFTPClient abstraction, so it
+ could easily break against an updated version of paramiko.
+
+ :param abspath: The remote absolute path where the file should be opened
+ :param mode: The mode permissions bits for the new file
+ """
+ # TODO: jam 20060816 Paramiko >= 1.6.2 (probably earlier) supports
+ # using the 'x' flag to indicate SFTP_FLAG_EXCL.
+ # However, there is no way to set the permission mode at open
+ # time using the sftp_client.file() functionality.
+ path = self._get_sftp()._adjust_cwd(abspath)
+ # mutter('sftp abspath %s => %s', abspath, path)
+ attr = SFTPAttributes()
+ if mode is not None:
+ attr.st_mode = mode
+ omode = (SFTP_FLAG_WRITE | SFTP_FLAG_CREATE
+ | SFTP_FLAG_TRUNC | SFTP_FLAG_EXCL)
+ try:
+ t, msg = self._get_sftp()._request(CMD_OPEN, path, omode, attr)
+ if t != CMD_HANDLE:
+ raise TransportError('Expected an SFTP handle')
+ handle = msg.get_string()
+ return SFTPFile(self._get_sftp(), handle, 'wb', -1)
+ except (paramiko.SSHException, IOError), e:
+ self._translate_io_exception(e, abspath, ': unable to open',
+ failure_exc=FileExists)
+
+ def _can_roundtrip_unix_modebits(self):
+ if sys.platform == 'win32':
+ # anyone else?
+ return False
+ else:
+ return True
+
+
+def get_test_permutations():
+ """Return the permutations to be used in testing."""
+ from bzrlib.tests import stub_sftp
+ return [(SFTPTransport, stub_sftp.SFTPAbsoluteServer),
+ (SFTPTransport, stub_sftp.SFTPHomeDirServer),
+ (SFTPTransport, stub_sftp.SFTPSiblingAbsoluteServer),
+ ]
diff --git a/bzrlib/transport/ssh.py b/bzrlib/transport/ssh.py
new file mode 100644
index 0000000..f3882eb
--- /dev/null
+++ b/bzrlib/transport/ssh.py
@@ -0,0 +1,766 @@
+# Copyright (C) 2006-2011 Robey Pointer <robey@lag.net>
+# Copyright (C) 2005, 2006, 2007 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""Foundation SSH support for SFTP and smart server."""
+
+from __future__ import absolute_import
+
+import errno
+import getpass
+import logging
+import os
+import socket
+import subprocess
+import sys
+
+from bzrlib import (
+ config,
+ errors,
+ osutils,
+ trace,
+ ui,
+ )
+
+try:
+ import paramiko
+except ImportError, e:
+ # If we have an ssh subprocess, we don't strictly need paramiko for all ssh
+ # access
+ paramiko = None
+else:
+ from paramiko.sftp_client import SFTPClient
+
+
+SYSTEM_HOSTKEYS = {}
+BZR_HOSTKEYS = {}
+
+
+_paramiko_version = getattr(paramiko, '__version_info__', (0, 0, 0))
+
+# Paramiko 1.5 tries to open a socket.AF_UNIX in order to connect
+# to ssh-agent. That attribute doesn't exist on win32 (it does in cygwin)
+# so we get an AttributeError exception. So we will not try to
+# connect to an agent if we are on win32 and using Paramiko older than 1.6
+_use_ssh_agent = (sys.platform != 'win32' or _paramiko_version >= (1, 6, 0))
+
+
+class SSHVendorManager(object):
+ """Manager for manage SSH vendors."""
+
+ # Note, although at first sign the class interface seems similar to
+ # bzrlib.registry.Registry it is not possible/convenient to directly use
+ # the Registry because the class just has "get()" interface instead of the
+ # Registry's "get(key)".
+
+ def __init__(self):
+ self._ssh_vendors = {}
+ self._cached_ssh_vendor = None
+ self._default_ssh_vendor = None
+
+ def register_default_vendor(self, vendor):
+ """Register default SSH vendor."""
+ self._default_ssh_vendor = vendor
+
+ def register_vendor(self, name, vendor):
+ """Register new SSH vendor by name."""
+ self._ssh_vendors[name] = vendor
+
+ def clear_cache(self):
+ """Clear previously cached lookup result."""
+ self._cached_ssh_vendor = None
+
+ def _get_vendor_by_environment(self, environment=None):
+ """Return the vendor or None based on BZR_SSH environment variable.
+
+ :raises UnknownSSH: if the BZR_SSH environment variable contains
+ unknown vendor name
+ """
+ if environment is None:
+ environment = os.environ
+ if 'BZR_SSH' in environment:
+ vendor_name = environment['BZR_SSH']
+ try:
+ vendor = self._ssh_vendors[vendor_name]
+ except KeyError:
+ vendor = self._get_vendor_from_path(vendor_name)
+ if vendor is None:
+ raise errors.UnknownSSH(vendor_name)
+ vendor.executable_path = vendor_name
+ return vendor
+ return None
+
+ def _get_ssh_version_string(self, args):
+ """Return SSH version string from the subprocess."""
+ try:
+ p = subprocess.Popen(args,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE,
+ **os_specific_subprocess_params())
+ stdout, stderr = p.communicate()
+ except OSError:
+ stdout = stderr = ''
+ return stdout + stderr
+
+ def _get_vendor_by_version_string(self, version, progname):
+ """Return the vendor or None based on output from the subprocess.
+
+ :param version: The output of 'ssh -V' like command.
+ :param args: Command line that was run.
+ """
+ vendor = None
+ if 'OpenSSH' in version:
+ trace.mutter('ssh implementation is OpenSSH')
+ vendor = OpenSSHSubprocessVendor()
+ elif 'SSH Secure Shell' in version:
+ trace.mutter('ssh implementation is SSH Corp.')
+ vendor = SSHCorpSubprocessVendor()
+ elif 'lsh' in version:
+ trace.mutter('ssh implementation is GNU lsh.')
+ vendor = LSHSubprocessVendor()
+ # As plink user prompts are not handled currently, don't auto-detect
+ # it by inspection below, but keep this vendor detection for if a path
+ # is given in BZR_SSH. See https://bugs.launchpad.net/bugs/414743
+ elif 'plink' in version and progname == 'plink':
+ # Checking if "plink" was the executed argument as Windows
+ # sometimes reports 'ssh -V' incorrectly with 'plink' in its
+ # version. See https://bugs.launchpad.net/bzr/+bug/107155
+ trace.mutter("ssh implementation is Putty's plink.")
+ vendor = PLinkSubprocessVendor()
+ return vendor
+
+ def _get_vendor_by_inspection(self):
+ """Return the vendor or None by checking for known SSH implementations."""
+ version = self._get_ssh_version_string(['ssh', '-V'])
+ return self._get_vendor_by_version_string(version, "ssh")
+
+ def _get_vendor_from_path(self, path):
+ """Return the vendor or None using the program at the given path"""
+ version = self._get_ssh_version_string([path, '-V'])
+ return self._get_vendor_by_version_string(version,
+ os.path.splitext(os.path.basename(path))[0])
+
+ def get_vendor(self, environment=None):
+ """Find out what version of SSH is on the system.
+
+ :raises SSHVendorNotFound: if no any SSH vendor is found
+ :raises UnknownSSH: if the BZR_SSH environment variable contains
+ unknown vendor name
+ """
+ if self._cached_ssh_vendor is None:
+ vendor = self._get_vendor_by_environment(environment)
+ if vendor is None:
+ vendor = self._get_vendor_by_inspection()
+ if vendor is None:
+ trace.mutter('falling back to default implementation')
+ vendor = self._default_ssh_vendor
+ if vendor is None:
+ raise errors.SSHVendorNotFound()
+ self._cached_ssh_vendor = vendor
+ return self._cached_ssh_vendor
+
+_ssh_vendor_manager = SSHVendorManager()
+_get_ssh_vendor = _ssh_vendor_manager.get_vendor
+register_default_ssh_vendor = _ssh_vendor_manager.register_default_vendor
+register_ssh_vendor = _ssh_vendor_manager.register_vendor
+
+
+def _ignore_signals():
+ # TODO: This should possibly ignore SIGHUP as well, but bzr currently
+ # doesn't handle it itself.
+ # <https://launchpad.net/products/bzr/+bug/41433/+index>
+ import signal
+ signal.signal(signal.SIGINT, signal.SIG_IGN)
+ # GZ 2010-02-19: Perhaps make this check if breakin is installed instead
+ if signal.getsignal(signal.SIGQUIT) != signal.SIG_DFL:
+ signal.signal(signal.SIGQUIT, signal.SIG_IGN)
+
+
+class SocketAsChannelAdapter(object):
+ """Simple wrapper for a socket that pretends to be a paramiko Channel."""
+
+ def __init__(self, sock):
+ self.__socket = sock
+
+ def get_name(self):
+ return "bzr SocketAsChannelAdapter"
+
+ def send(self, data):
+ return self.__socket.send(data)
+
+ def recv(self, n):
+ try:
+ return self.__socket.recv(n)
+ except socket.error, e:
+ if e.args[0] in (errno.EPIPE, errno.ECONNRESET, errno.ECONNABORTED,
+ errno.EBADF):
+ # Connection has closed. Paramiko expects an empty string in
+ # this case, not an exception.
+ return ''
+ raise
+
+ def recv_ready(self):
+ # TODO: jam 20051215 this function is necessary to support the
+ # pipelined() function. In reality, it probably should use
+ # poll() or select() to actually return if there is data
+ # available, otherwise we probably don't get any benefit
+ return True
+
+ def close(self):
+ self.__socket.close()
+
+
+class SSHVendor(object):
+ """Abstract base class for SSH vendor implementations."""
+
+ def connect_sftp(self, username, password, host, port):
+ """Make an SSH connection, and return an SFTPClient.
+
+ :param username: an ascii string
+ :param password: an ascii string
+ :param host: a host name as an ascii string
+ :param port: a port number
+ :type port: int
+
+ :raises: ConnectionError if it cannot connect.
+
+ :rtype: paramiko.sftp_client.SFTPClient
+ """
+ raise NotImplementedError(self.connect_sftp)
+
+ def connect_ssh(self, username, password, host, port, command):
+ """Make an SSH connection.
+
+ :returns: an SSHConnection.
+ """
+ raise NotImplementedError(self.connect_ssh)
+
+ def _raise_connection_error(self, host, port=None, orig_error=None,
+ msg='Unable to connect to SSH host'):
+ """Raise a SocketConnectionError with properly formatted host.
+
+ This just unifies all the locations that try to raise ConnectionError,
+ so that they format things properly.
+ """
+ raise errors.SocketConnectionError(host=host, port=port, msg=msg,
+ orig_error=orig_error)
+
+
+class LoopbackVendor(SSHVendor):
+ """SSH "vendor" that connects over a plain TCP socket, not SSH."""
+
+ def connect_sftp(self, username, password, host, port):
+ sock = socket.socket()
+ try:
+ sock.connect((host, port))
+ except socket.error, e:
+ self._raise_connection_error(host, port=port, orig_error=e)
+ return SFTPClient(SocketAsChannelAdapter(sock))
+
+register_ssh_vendor('loopback', LoopbackVendor())
+
+
+class ParamikoVendor(SSHVendor):
+ """Vendor that uses paramiko."""
+
+ def _connect(self, username, password, host, port):
+ global SYSTEM_HOSTKEYS, BZR_HOSTKEYS
+
+ load_host_keys()
+
+ try:
+ t = paramiko.Transport((host, port or 22))
+ t.set_log_channel('bzr.paramiko')
+ t.start_client()
+ except (paramiko.SSHException, socket.error), e:
+ self._raise_connection_error(host, port=port, orig_error=e)
+
+ server_key = t.get_remote_server_key()
+ server_key_hex = paramiko.util.hexify(server_key.get_fingerprint())
+ keytype = server_key.get_name()
+ if host in SYSTEM_HOSTKEYS and keytype in SYSTEM_HOSTKEYS[host]:
+ our_server_key = SYSTEM_HOSTKEYS[host][keytype]
+ our_server_key_hex = paramiko.util.hexify(
+ our_server_key.get_fingerprint())
+ elif host in BZR_HOSTKEYS and keytype in BZR_HOSTKEYS[host]:
+ our_server_key = BZR_HOSTKEYS[host][keytype]
+ our_server_key_hex = paramiko.util.hexify(
+ our_server_key.get_fingerprint())
+ else:
+ trace.warning('Adding %s host key for %s: %s'
+ % (keytype, host, server_key_hex))
+ add = getattr(BZR_HOSTKEYS, 'add', None)
+ if add is not None: # paramiko >= 1.X.X
+ BZR_HOSTKEYS.add(host, keytype, server_key)
+ else:
+ BZR_HOSTKEYS.setdefault(host, {})[keytype] = server_key
+ our_server_key = server_key
+ our_server_key_hex = paramiko.util.hexify(
+ our_server_key.get_fingerprint())
+ save_host_keys()
+ if server_key != our_server_key:
+ filename1 = os.path.expanduser('~/.ssh/known_hosts')
+ filename2 = osutils.pathjoin(config.config_dir(), 'ssh_host_keys')
+ raise errors.TransportError(
+ 'Host keys for %s do not match! %s != %s' %
+ (host, our_server_key_hex, server_key_hex),
+ ['Try editing %s or %s' % (filename1, filename2)])
+
+ _paramiko_auth(username, password, host, port, t)
+ return t
+
+ def connect_sftp(self, username, password, host, port):
+ t = self._connect(username, password, host, port)
+ try:
+ return t.open_sftp_client()
+ except paramiko.SSHException, e:
+ self._raise_connection_error(host, port=port, orig_error=e,
+ msg='Unable to start sftp client')
+
+ def connect_ssh(self, username, password, host, port, command):
+ t = self._connect(username, password, host, port)
+ try:
+ channel = t.open_session()
+ cmdline = ' '.join(command)
+ channel.exec_command(cmdline)
+ return _ParamikoSSHConnection(channel)
+ except paramiko.SSHException, e:
+ self._raise_connection_error(host, port=port, orig_error=e,
+ msg='Unable to invoke remote bzr')
+
+_ssh_connection_errors = (EOFError, OSError, IOError, socket.error)
+if paramiko is not None:
+ vendor = ParamikoVendor()
+ register_ssh_vendor('paramiko', vendor)
+ register_ssh_vendor('none', vendor)
+ register_default_ssh_vendor(vendor)
+ _ssh_connection_errors += (paramiko.SSHException,)
+ del vendor
+
+
+class SubprocessVendor(SSHVendor):
+ """Abstract base class for vendors that use pipes to a subprocess."""
+
+ # In general stderr should be inherited from the parent process so prompts
+ # are visible on the terminal. This can be overriden to another file for
+ # tests, but beware of using PIPE which may hang due to not being read.
+ _stderr_target = None
+
+ def _connect(self, argv):
+ # Attempt to make a socketpair to use as stdin/stdout for the SSH
+ # subprocess. We prefer sockets to pipes because they support
+ # non-blocking short reads, allowing us to optimistically read 64k (or
+ # whatever) chunks.
+ try:
+ my_sock, subproc_sock = socket.socketpair()
+ osutils.set_fd_cloexec(my_sock)
+ except (AttributeError, socket.error):
+ # This platform doesn't support socketpair(), so just use ordinary
+ # pipes instead.
+ stdin = stdout = subprocess.PIPE
+ my_sock, subproc_sock = None, None
+ else:
+ stdin = stdout = subproc_sock
+ proc = subprocess.Popen(argv, stdin=stdin, stdout=stdout,
+ stderr=self._stderr_target,
+ **os_specific_subprocess_params())
+ if subproc_sock is not None:
+ subproc_sock.close()
+ return SSHSubprocessConnection(proc, sock=my_sock)
+
+ def connect_sftp(self, username, password, host, port):
+ try:
+ argv = self._get_vendor_specific_argv(username, host, port,
+ subsystem='sftp')
+ sock = self._connect(argv)
+ return SFTPClient(SocketAsChannelAdapter(sock))
+ except _ssh_connection_errors, e:
+ self._raise_connection_error(host, port=port, orig_error=e)
+
+ def connect_ssh(self, username, password, host, port, command):
+ try:
+ argv = self._get_vendor_specific_argv(username, host, port,
+ command=command)
+ return self._connect(argv)
+ except _ssh_connection_errors, e:
+ self._raise_connection_error(host, port=port, orig_error=e)
+
+ def _get_vendor_specific_argv(self, username, host, port, subsystem=None,
+ command=None):
+ """Returns the argument list to run the subprocess with.
+
+ Exactly one of 'subsystem' and 'command' must be specified.
+ """
+ raise NotImplementedError(self._get_vendor_specific_argv)
+
+
+class OpenSSHSubprocessVendor(SubprocessVendor):
+ """SSH vendor that uses the 'ssh' executable from OpenSSH."""
+
+ executable_path = 'ssh'
+
+ def _get_vendor_specific_argv(self, username, host, port, subsystem=None,
+ command=None):
+ args = [self.executable_path,
+ '-oForwardX11=no', '-oForwardAgent=no',
+ '-oClearAllForwardings=yes',
+ '-oNoHostAuthenticationForLocalhost=yes']
+ if port is not None:
+ args.extend(['-p', str(port)])
+ if username is not None:
+ args.extend(['-l', username])
+ if subsystem is not None:
+ args.extend(['-s', host, subsystem])
+ else:
+ args.extend([host] + command)
+ return args
+
+register_ssh_vendor('openssh', OpenSSHSubprocessVendor())
+
+
+class SSHCorpSubprocessVendor(SubprocessVendor):
+ """SSH vendor that uses the 'ssh' executable from SSH Corporation."""
+
+ executable_path = 'ssh'
+
+ def _get_vendor_specific_argv(self, username, host, port, subsystem=None,
+ command=None):
+ args = [self.executable_path, '-x']
+ if port is not None:
+ args.extend(['-p', str(port)])
+ if username is not None:
+ args.extend(['-l', username])
+ if subsystem is not None:
+ args.extend(['-s', subsystem, host])
+ else:
+ args.extend([host] + command)
+ return args
+
+register_ssh_vendor('sshcorp', SSHCorpSubprocessVendor())
+
+
+class LSHSubprocessVendor(SubprocessVendor):
+ """SSH vendor that uses the 'lsh' executable from GNU"""
+
+ executable_path = 'lsh'
+
+ def _get_vendor_specific_argv(self, username, host, port, subsystem=None,
+ command=None):
+ args = [self.executable_path]
+ if port is not None:
+ args.extend(['-p', str(port)])
+ if username is not None:
+ args.extend(['-l', username])
+ if subsystem is not None:
+ args.extend(['--subsystem', subsystem, host])
+ else:
+ args.extend([host] + command)
+ return args
+
+register_ssh_vendor('lsh', LSHSubprocessVendor())
+
+
+class PLinkSubprocessVendor(SubprocessVendor):
+ """SSH vendor that uses the 'plink' executable from Putty."""
+
+ executable_path = 'plink'
+
+ def _get_vendor_specific_argv(self, username, host, port, subsystem=None,
+ command=None):
+ args = [self.executable_path, '-x', '-a', '-ssh', '-2', '-batch']
+ if port is not None:
+ args.extend(['-P', str(port)])
+ if username is not None:
+ args.extend(['-l', username])
+ if subsystem is not None:
+ args.extend(['-s', host, subsystem])
+ else:
+ args.extend([host] + command)
+ return args
+
+register_ssh_vendor('plink', PLinkSubprocessVendor())
+
+
+def _paramiko_auth(username, password, host, port, paramiko_transport):
+ auth = config.AuthenticationConfig()
+ # paramiko requires a username, but it might be none if nothing was
+ # supplied. If so, use the local username.
+ if username is None:
+ username = auth.get_user('ssh', host, port=port,
+ default=getpass.getuser())
+ if _use_ssh_agent:
+ agent = paramiko.Agent()
+ for key in agent.get_keys():
+ trace.mutter('Trying SSH agent key %s'
+ % paramiko.util.hexify(key.get_fingerprint()))
+ try:
+ paramiko_transport.auth_publickey(username, key)
+ return
+ except paramiko.SSHException, e:
+ pass
+
+ # okay, try finding id_rsa or id_dss? (posix only)
+ if _try_pkey_auth(paramiko_transport, paramiko.RSAKey, username, 'id_rsa'):
+ return
+ if _try_pkey_auth(paramiko_transport, paramiko.DSSKey, username, 'id_dsa'):
+ return
+
+ # If we have gotten this far, we are about to try for passwords, do an
+ # auth_none check to see if it is even supported.
+ supported_auth_types = []
+ try:
+ # Note that with paramiko <1.7.5 this logs an INFO message:
+ # Authentication type (none) not permitted.
+ # So we explicitly disable the logging level for this action
+ old_level = paramiko_transport.logger.level
+ paramiko_transport.logger.setLevel(logging.WARNING)
+ try:
+ paramiko_transport.auth_none(username)
+ finally:
+ paramiko_transport.logger.setLevel(old_level)
+ except paramiko.BadAuthenticationType, e:
+ # Supported methods are in the exception
+ supported_auth_types = e.allowed_types
+ except paramiko.SSHException, e:
+ # Don't know what happened, but just ignore it
+ pass
+ # We treat 'keyboard-interactive' and 'password' auth methods identically,
+ # because Paramiko's auth_password method will automatically try
+ # 'keyboard-interactive' auth (using the password as the response) if
+ # 'password' auth is not available. Apparently some Debian and Gentoo
+ # OpenSSH servers require this.
+ # XXX: It's possible for a server to require keyboard-interactive auth that
+ # requires something other than a single password, but we currently don't
+ # support that.
+ if ('password' not in supported_auth_types and
+ 'keyboard-interactive' not in supported_auth_types):
+ raise errors.ConnectionError('Unable to authenticate to SSH host as'
+ '\n %s@%s\nsupported auth types: %s'
+ % (username, host, supported_auth_types))
+
+ if password:
+ try:
+ paramiko_transport.auth_password(username, password)
+ return
+ except paramiko.SSHException, e:
+ pass
+
+ # give up and ask for a password
+ password = auth.get_password('ssh', host, username, port=port)
+ # get_password can still return None, which means we should not prompt
+ if password is not None:
+ try:
+ paramiko_transport.auth_password(username, password)
+ except paramiko.SSHException, e:
+ raise errors.ConnectionError(
+ 'Unable to authenticate to SSH host as'
+ '\n %s@%s\n' % (username, host), e)
+ else:
+ raise errors.ConnectionError('Unable to authenticate to SSH host as'
+ ' %s@%s' % (username, host))
+
+
+def _try_pkey_auth(paramiko_transport, pkey_class, username, filename):
+ filename = os.path.expanduser('~/.ssh/' + filename)
+ try:
+ key = pkey_class.from_private_key_file(filename)
+ paramiko_transport.auth_publickey(username, key)
+ return True
+ except paramiko.PasswordRequiredException:
+ password = ui.ui_factory.get_password(
+ prompt=u'SSH %(filename)s password',
+ filename=filename.decode(osutils._fs_enc))
+ try:
+ key = pkey_class.from_private_key_file(filename, password)
+ paramiko_transport.auth_publickey(username, key)
+ return True
+ except paramiko.SSHException:
+ trace.mutter('SSH authentication via %s key failed.'
+ % (os.path.basename(filename),))
+ except paramiko.SSHException:
+ trace.mutter('SSH authentication via %s key failed.'
+ % (os.path.basename(filename),))
+ except IOError:
+ pass
+ return False
+
+
+def load_host_keys():
+ """
+ Load system host keys (probably doesn't work on windows) and any
+ "discovered" keys from previous sessions.
+ """
+ global SYSTEM_HOSTKEYS, BZR_HOSTKEYS
+ try:
+ SYSTEM_HOSTKEYS = paramiko.util.load_host_keys(
+ os.path.expanduser('~/.ssh/known_hosts'))
+ except IOError, e:
+ trace.mutter('failed to load system host keys: ' + str(e))
+ bzr_hostkey_path = osutils.pathjoin(config.config_dir(), 'ssh_host_keys')
+ try:
+ BZR_HOSTKEYS = paramiko.util.load_host_keys(bzr_hostkey_path)
+ except IOError, e:
+ trace.mutter('failed to load bzr host keys: ' + str(e))
+ save_host_keys()
+
+
+def save_host_keys():
+ """
+ Save "discovered" host keys in $(config)/ssh_host_keys/.
+ """
+ global SYSTEM_HOSTKEYS, BZR_HOSTKEYS
+ bzr_hostkey_path = osutils.pathjoin(config.config_dir(), 'ssh_host_keys')
+ config.ensure_config_dir_exists()
+
+ try:
+ f = open(bzr_hostkey_path, 'w')
+ f.write('# SSH host keys collected by bzr\n')
+ for hostname, keys in BZR_HOSTKEYS.iteritems():
+ for keytype, key in keys.iteritems():
+ f.write('%s %s %s\n' % (hostname, keytype, key.get_base64()))
+ f.close()
+ except IOError, e:
+ trace.mutter('failed to save bzr host keys: ' + str(e))
+
+
+def os_specific_subprocess_params():
+ """Get O/S specific subprocess parameters."""
+ if sys.platform == 'win32':
+ # setting the process group and closing fds is not supported on
+ # win32
+ return {}
+ else:
+ # We close fds other than the pipes as the child process does not need
+ # them to be open.
+ #
+ # We also set the child process to ignore SIGINT. Normally the signal
+ # would be sent to every process in the foreground process group, but
+ # this causes it to be seen only by bzr and not by ssh. Python will
+ # generate a KeyboardInterrupt in bzr, and we will then have a chance
+ # to release locks or do other cleanup over ssh before the connection
+ # goes away.
+ # <https://launchpad.net/products/bzr/+bug/5987>
+ #
+ # Running it in a separate process group is not good because then it
+ # can't get non-echoed input of a password or passphrase.
+ # <https://launchpad.net/products/bzr/+bug/40508>
+ return {'preexec_fn': _ignore_signals,
+ 'close_fds': True,
+ }
+
+import weakref
+_subproc_weakrefs = set()
+
+def _close_ssh_proc(proc, sock):
+ """Carefully close stdin/stdout and reap the SSH process.
+
+ If the pipes are already closed and/or the process has already been
+ wait()ed on, that's ok, and no error is raised. The goal is to do our best
+ to clean up (whether or not a clean up was already tried).
+ """
+ funcs = []
+ for closeable in (proc.stdin, proc.stdout, sock):
+ # We expect that either proc (a subprocess.Popen) will have stdin and
+ # stdout streams to close, or that we will have been passed a socket to
+ # close, with the option not in use being None.
+ if closeable is not None:
+ funcs.append(closeable.close)
+ funcs.append(proc.wait)
+ for func in funcs:
+ try:
+ func()
+ except OSError:
+ # It's ok for the pipe to already be closed, or the process to
+ # already be finished.
+ continue
+
+
+class SSHConnection(object):
+ """Abstract base class for SSH connections."""
+
+ def get_sock_or_pipes(self):
+ """Returns a (kind, io_object) pair.
+
+ If kind == 'socket', then io_object is a socket.
+
+ If kind == 'pipes', then io_object is a pair of file-like objects
+ (read_from, write_to).
+ """
+ raise NotImplementedError(self.get_sock_or_pipes)
+
+ def close(self):
+ raise NotImplementedError(self.close)
+
+
+class SSHSubprocessConnection(SSHConnection):
+ """A connection to an ssh subprocess via pipes or a socket.
+
+ This class is also socket-like enough to be used with
+ SocketAsChannelAdapter (it has 'send' and 'recv' methods).
+ """
+
+ def __init__(self, proc, sock=None):
+ """Constructor.
+
+ :param proc: a subprocess.Popen
+ :param sock: if proc.stdin/out is a socket from a socketpair, then sock
+ should bzrlib's half of that socketpair. If not passed, proc's
+ stdin/out is assumed to be ordinary pipes.
+ """
+ self.proc = proc
+ self._sock = sock
+ # Add a weakref to proc that will attempt to do the same as self.close
+ # to avoid leaving processes lingering indefinitely.
+ def terminate(ref):
+ _subproc_weakrefs.remove(ref)
+ _close_ssh_proc(proc, sock)
+ _subproc_weakrefs.add(weakref.ref(self, terminate))
+
+ def send(self, data):
+ if self._sock is not None:
+ return self._sock.send(data)
+ else:
+ return os.write(self.proc.stdin.fileno(), data)
+
+ def recv(self, count):
+ if self._sock is not None:
+ return self._sock.recv(count)
+ else:
+ return os.read(self.proc.stdout.fileno(), count)
+
+ def close(self):
+ _close_ssh_proc(self.proc, self._sock)
+
+ def get_sock_or_pipes(self):
+ if self._sock is not None:
+ return 'socket', self._sock
+ else:
+ return 'pipes', (self.proc.stdout, self.proc.stdin)
+
+
+class _ParamikoSSHConnection(SSHConnection):
+ """An SSH connection via paramiko."""
+
+ def __init__(self, channel):
+ self.channel = channel
+
+ def get_sock_or_pipes(self):
+ return ('socket', self.channel)
+
+ def close(self):
+ return self.channel.close()
+
+
diff --git a/bzrlib/transport/trace.py b/bzrlib/transport/trace.py
new file mode 100644
index 0000000..f7fa268
--- /dev/null
+++ b/bzrlib/transport/trace.py
@@ -0,0 +1,177 @@
+# Copyright (C) 2007 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""Implementation of Transport that traces transport operations.
+
+This does not change the transport behaviour at all, merely records every call
+and then delegates it.
+"""
+
+from __future__ import absolute_import
+
+from bzrlib.transport import decorator
+
+
+class TransportTraceDecorator(decorator.TransportDecorator):
+ """A tracing decorator for Transports.
+
+ Calls that potentially perform IO are logged to self._activity. The
+ _activity attribute is shared as the transport is cloned, but not if a new
+ transport is created without cloning.
+
+ Not all operations are logged at this point, if you need an unlogged
+ operation please add a test to the tests of this transport, for the logging
+ of the operation you want logged.
+
+ See also TransportLogDecorator, that records a machine-readable log in
+ memory for eg testing.
+ """
+
+ def __init__(self, url, _decorated=None, _from_transport=None):
+ """Set the 'base' path where files will be stored.
+
+ _decorated is a private parameter for cloning.
+ """
+ super(TransportTraceDecorator, self).__init__(url, _decorated)
+ if _from_transport is None:
+ # newly created
+ self._activity = []
+ else:
+ # cloned
+ self._activity = _from_transport._activity
+
+ def append_file(self, relpath, f, mode=None):
+ """See Transport.append_file()."""
+ return self._decorated.append_file(relpath, f, mode=mode)
+
+ def append_bytes(self, relpath, bytes, mode=None):
+ """See Transport.append_bytes()."""
+ return self._decorated.append_bytes(relpath, bytes, mode=mode)
+
+ def delete(self, relpath):
+ """See Transport.delete()."""
+ self._activity.append(('delete', relpath))
+ return self._decorated.delete(relpath)
+
+ def delete_tree(self, relpath):
+ """See Transport.delete_tree()."""
+ return self._decorated.delete_tree(relpath)
+
+ @classmethod
+ def _get_url_prefix(self):
+ """Tracing transports are identified by 'trace+'"""
+ return 'trace+'
+
+ def get(self, relpath):
+ """See Transport.get()."""
+ self._trace(('get', relpath))
+ return self._decorated.get(relpath)
+
+ def get_smart_client(self):
+ return self._decorated.get_smart_client()
+
+ def has(self, relpath):
+ """See Transport.has()."""
+ return self._decorated.has(relpath)
+
+ def is_readonly(self):
+ """See Transport.is_readonly."""
+ return self._decorated.is_readonly()
+
+ def mkdir(self, relpath, mode=None):
+ """See Transport.mkdir()."""
+ self._trace(('mkdir', relpath, mode))
+ return self._decorated.mkdir(relpath, mode)
+
+ def open_write_stream(self, relpath, mode=None):
+ """See Transport.open_write_stream."""
+ return self._decorated.open_write_stream(relpath, mode=mode)
+
+ def put_file(self, relpath, f, mode=None):
+ """See Transport.put_file()."""
+ return self._decorated.put_file(relpath, f, mode)
+
+ def put_bytes(self, relpath, bytes, mode=None):
+ """See Transport.put_bytes()."""
+ self._trace(('put_bytes', relpath, len(bytes), mode))
+ return self._decorated.put_bytes(relpath, bytes, mode)
+
+ def put_bytes_non_atomic(self, relpath, bytes, mode=None,
+ create_parent_dir=False, dir_mode=None):
+ """See Transport.put_bytes_non_atomic."""
+ self._trace(('put_bytes_non_atomic', relpath, len(bytes), mode,
+ create_parent_dir, dir_mode))
+ return self._decorated.put_bytes_non_atomic(relpath, bytes, mode=mode,
+ create_parent_dir=create_parent_dir, dir_mode=dir_mode)
+
+ def listable(self):
+ """See Transport.listable."""
+ return self._decorated.listable()
+
+ def iter_files_recursive(self):
+ """See Transport.iter_files_recursive()."""
+ return self._decorated.iter_files_recursive()
+
+ def list_dir(self, relpath):
+ """See Transport.list_dir()."""
+ return self._decorated.list_dir(relpath)
+
+ def readv(self, relpath, offsets, adjust_for_latency=False,
+ upper_limit=None):
+ # we override at the readv() level rather than _readv() so that any
+ # latency adjustments will be done by the underlying transport
+ self._trace(('readv', relpath, offsets, adjust_for_latency,
+ upper_limit))
+ return self._decorated.readv(relpath, offsets, adjust_for_latency,
+ upper_limit)
+
+ def recommended_page_size(self):
+ """See Transport.recommended_page_size()."""
+ return self._decorated.recommended_page_size()
+
+ def rename(self, rel_from, rel_to):
+ self._activity.append(('rename', rel_from, rel_to))
+ return self._decorated.rename(rel_from, rel_to)
+
+ def rmdir(self, relpath):
+ """See Transport.rmdir."""
+ self._trace(('rmdir', relpath))
+ return self._decorated.rmdir(relpath)
+
+ def stat(self, relpath):
+ """See Transport.stat()."""
+ return self._decorated.stat(relpath)
+
+ def lock_read(self, relpath):
+ """See Transport.lock_read."""
+ return self._decorated.lock_read(relpath)
+
+ def lock_write(self, relpath):
+ """See Transport.lock_write."""
+ return self._decorated.lock_write(relpath)
+
+ def _trace(self, operation_tuple):
+ """Record that a transport operation occured.
+
+ :param operation: Tuple of transport call name and arguments.
+ """
+ self._activity.append(operation_tuple)
+
+
+def get_test_permutations():
+ """Return the permutations to be used in testing."""
+ from bzrlib.tests import test_server
+ return [(TransportTraceDecorator, test_server.TraceServer)]
diff --git a/bzrlib/transport/unlistable.py b/bzrlib/transport/unlistable.py
new file mode 100644
index 0000000..f539726
--- /dev/null
+++ b/bzrlib/transport/unlistable.py
@@ -0,0 +1,46 @@
+# Copyright (C) 2005, 2006 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""Transport implementation that disables listing to simulate HTTP cheaply."""
+
+from __future__ import absolute_import
+
+from bzrlib.transport import Transport
+from bzrlib.transport import decorator
+
+
+class UnlistableTransportDecorator(decorator.TransportDecorator):
+ """A transport that disables file listing for testing."""
+
+ @classmethod
+ def _get_url_prefix(self):
+ """Unlistable transports are identified by 'unlistable+'"""
+ return 'unlistable+'
+
+ def iter_files_recursive(self):
+ Transport.iter_files_recursive(self)
+
+ def listable(self):
+ return False
+
+ def list_dir(self, relpath):
+ Transport.list_dir(self, relpath)
+
+
+def get_test_permutations():
+ """Return the permutations to be used in testing."""
+ from bzrlib.tests import test_server
+ return [(UnlistableTransportDecorator, test_server.UnlistableServer),]
diff --git a/bzrlib/tree.py b/bzrlib/tree.py
new file mode 100644
index 0000000..82d52e1
--- /dev/null
+++ b/bzrlib/tree.py
@@ -0,0 +1,1635 @@
+# Copyright (C) 2005-2011 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""Tree classes, representing directory at point in time.
+"""
+
+from __future__ import absolute_import
+
+import os
+
+from bzrlib.lazy_import import lazy_import
+lazy_import(globals(), """
+import collections
+
+from bzrlib import (
+ conflicts as _mod_conflicts,
+ debug,
+ delta,
+ errors,
+ filters,
+ inventory,
+ osutils,
+ revision as _mod_revision,
+ rules,
+ trace,
+ )
+from bzrlib.i18n import gettext
+""")
+
+from bzrlib.decorators import needs_read_lock
+from bzrlib.inter import InterObject
+from bzrlib.symbol_versioning import (
+ deprecated_in,
+ deprecated_method,
+ )
+
+
+class Tree(object):
+ """Abstract file tree.
+
+ There are several subclasses:
+
+ * `WorkingTree` exists as files on disk editable by the user.
+
+ * `RevisionTree` is a tree as recorded at some point in the past.
+
+ Trees can be compared, etc, regardless of whether they are working
+ trees or versioned trees.
+ """
+
+ def has_versioned_directories(self):
+ """Whether this tree can contain explicitly versioned directories.
+
+ This defaults to True, but some implementations may want to override
+ it.
+ """
+ return True
+
+ def changes_from(self, other, want_unchanged=False, specific_files=None,
+ extra_trees=None, require_versioned=False, include_root=False,
+ want_unversioned=False):
+ """Return a TreeDelta of the changes from other to this tree.
+
+ :param other: A tree to compare with.
+ :param specific_files: An optional list of file paths to restrict the
+ comparison to. When mapping filenames to ids, all matches in all
+ trees (including optional extra_trees) are used, and all children of
+ matched directories are included.
+ :param want_unchanged: An optional boolean requesting the inclusion of
+ unchanged entries in the result.
+ :param extra_trees: An optional list of additional trees to use when
+ mapping the contents of specific_files (paths) to file_ids.
+ :param require_versioned: An optional boolean (defaults to False). When
+ supplied and True all the 'specific_files' must be versioned, or
+ a PathsNotVersionedError will be thrown.
+ :param want_unversioned: Scan for unversioned paths.
+
+ The comparison will be performed by an InterTree object looked up on
+ self and other.
+ """
+ # Martin observes that Tree.changes_from returns a TreeDelta and this
+ # may confuse people, because the class name of the returned object is
+ # a synonym of the object referenced in the method name.
+ return InterTree.get(other, self).compare(
+ want_unchanged=want_unchanged,
+ specific_files=specific_files,
+ extra_trees=extra_trees,
+ require_versioned=require_versioned,
+ include_root=include_root,
+ want_unversioned=want_unversioned,
+ )
+
+ def iter_changes(self, from_tree, include_unchanged=False,
+ specific_files=None, pb=None, extra_trees=None,
+ require_versioned=True, want_unversioned=False):
+ """See InterTree.iter_changes"""
+ intertree = InterTree.get(from_tree, self)
+ return intertree.iter_changes(include_unchanged, specific_files, pb,
+ extra_trees, require_versioned, want_unversioned=want_unversioned)
+
+ def conflicts(self):
+ """Get a list of the conflicts in the tree.
+
+ Each conflict is an instance of bzrlib.conflicts.Conflict.
+ """
+ return _mod_conflicts.ConflictList()
+
+ def extras(self):
+ """For trees that can have unversioned files, return all such paths."""
+ return []
+
+ def get_parent_ids(self):
+ """Get the parent ids for this tree.
+
+ :return: a list of parent ids. [] is returned to indicate
+ a tree with no parents.
+ :raises: BzrError if the parents are not known.
+ """
+ raise NotImplementedError(self.get_parent_ids)
+
+ def has_filename(self, filename):
+ """True if the tree has given filename."""
+ raise NotImplementedError(self.has_filename)
+
+ def has_id(self, file_id):
+ raise NotImplementedError(self.has_id)
+
+ @deprecated_method(deprecated_in((2, 4, 0)))
+ def __contains__(self, file_id):
+ return self.has_id(file_id)
+
+ def has_or_had_id(self, file_id):
+ raise NotImplementedError(self.has_or_had_id)
+
+ def is_ignored(self, filename):
+ """Check whether the filename is ignored by this tree.
+
+ :param filename: The relative filename within the tree.
+ :return: True if the filename is ignored.
+ """
+ return False
+
+ def all_file_ids(self):
+ """Iterate through all file ids, including ids for missing files."""
+ raise NotImplementedError(self.all_file_ids)
+
+ def id2path(self, file_id):
+ """Return the path for a file id.
+
+ :raises NoSuchId:
+ """
+ raise NotImplementedError(self.id2path)
+
+ def iter_entries_by_dir(self, specific_file_ids=None, yield_parents=False):
+ """Walk the tree in 'by_dir' order.
+
+ This will yield each entry in the tree as a (path, entry) tuple.
+ The order that they are yielded is:
+
+ Directories are walked in a depth-first lexicographical order,
+ however, whenever a directory is reached, all of its direct child
+ nodes are yielded in lexicographical order before yielding the
+ grandchildren.
+
+ For example, in the tree::
+
+ a/
+ b/
+ c
+ d/
+ e
+ f/
+ g
+
+ The yield order (ignoring root) would be::
+
+ a, f, a/b, a/d, a/b/c, a/d/e, f/g
+
+ :param yield_parents: If True, yield the parents from the root leading
+ down to specific_file_ids that have been requested. This has no
+ impact if specific_file_ids is None.
+ """
+ raise NotImplementedError(self.iter_entries_by_dir)
+
+ def iter_child_entries(self, file_id, path=None):
+ """Iterate over the children of a directory or tree reference.
+
+ :param file_id: File id of the directory/tree-reference
+ :param path: Optional path of the directory
+ :raise NoSuchId: When the file_id does not exist
+ :return: Iterator over entries in the directory
+ """
+ raise NotImplementedError(self.iter_child_entries)
+
+ def list_files(self, include_root=False, from_dir=None, recursive=True):
+ """List all files in this tree.
+
+ :param include_root: Whether to include the entry for the tree root
+ :param from_dir: Directory under which to list files
+ :param recursive: Whether to list files recursively
+ :return: iterator over tuples of (path, versioned, kind, file_id,
+ inventory entry)
+ """
+ raise NotImplementedError(self.list_files)
+
+ def iter_references(self):
+ if self.supports_tree_reference():
+ for path, entry in self.iter_entries_by_dir():
+ if entry.kind == 'tree-reference':
+ yield path, entry.file_id
+
+ def kind(self, file_id):
+ raise NotImplementedError("Tree subclass %s must implement kind"
+ % self.__class__.__name__)
+
+ def stored_kind(self, file_id):
+ """File kind stored for this file_id.
+
+ May not match kind on disk for working trees. Always available
+ for versioned files, even when the file itself is missing.
+ """
+ return self.kind(file_id)
+
+ def path_content_summary(self, path):
+ """Get a summary of the information about path.
+
+ All the attributes returned are for the canonical form, not the
+ convenient form (if content filters are in use.)
+
+ :param path: A relative path within the tree.
+ :return: A tuple containing kind, size, exec, sha1-or-link.
+ Kind is always present (see tree.kind()).
+ size is present if kind is file and the size of the
+ canonical form can be cheaply determined, None otherwise.
+ exec is None unless kind is file and the platform supports the 'x'
+ bit.
+ sha1-or-link is the link target if kind is symlink, or the sha1 if
+ it can be obtained without reading the file.
+ """
+ raise NotImplementedError(self.path_content_summary)
+
+ def get_reference_revision(self, file_id, path=None):
+ raise NotImplementedError("Tree subclass %s must implement "
+ "get_reference_revision"
+ % self.__class__.__name__)
+
+ def _comparison_data(self, entry, path):
+ """Return a tuple of kind, executable, stat_value for a file.
+
+ entry may be None if there is no inventory entry for the file, but
+ path must always be supplied.
+
+ kind is None if there is no file present (even if an inventory id is
+ present). executable is False for non-file entries.
+ """
+ raise NotImplementedError(self._comparison_data)
+
+ def _file_size(self, entry, stat_value):
+ raise NotImplementedError(self._file_size)
+
+ def get_file(self, file_id, path=None):
+ """Return a file object for the file file_id in the tree.
+
+ If both file_id and path are defined, it is implementation defined as
+ to which one is used.
+ """
+ raise NotImplementedError(self.get_file)
+
+ def get_file_with_stat(self, file_id, path=None):
+ """Get a file handle and stat object for file_id.
+
+ The default implementation returns (self.get_file, None) for backwards
+ compatibility.
+
+ :param file_id: The file id to read.
+ :param path: The path of the file, if it is known.
+ :return: A tuple (file_handle, stat_value_or_None). If the tree has
+ no stat facility, or need for a stat cache feedback during commit,
+ it may return None for the second element of the tuple.
+ """
+ return (self.get_file(file_id, path), None)
+
+ def get_file_text(self, file_id, path=None):
+ """Return the byte content of a file.
+
+ :param file_id: The file_id of the file.
+ :param path: The path of the file.
+
+ If both file_id and path are supplied, an implementation may use
+ either one.
+
+ :returns: A single byte string for the whole file.
+ """
+ my_file = self.get_file(file_id, path)
+ try:
+ return my_file.read()
+ finally:
+ my_file.close()
+
+ def get_file_lines(self, file_id, path=None):
+ """Return the content of a file, as lines.
+
+ :param file_id: The file_id of the file.
+ :param path: The path of the file.
+
+ If both file_id and path are supplied, an implementation may use
+ either one.
+ """
+ return osutils.split_lines(self.get_file_text(file_id, path))
+
+ def get_file_verifier(self, file_id, path=None, stat_value=None):
+ """Return a verifier for a file.
+
+ The default implementation returns a sha1.
+
+ :param file_id: The handle for this file.
+ :param path: The path that this file can be found at.
+ These must point to the same object.
+ :param stat_value: Optional stat value for the object
+ :return: Tuple with verifier name and verifier data
+ """
+ return ("SHA1", self.get_file_sha1(file_id, path=path,
+ stat_value=stat_value))
+
+ def get_file_sha1(self, file_id, path=None, stat_value=None):
+ """Return the SHA1 file for a file.
+
+ :note: callers should use get_file_verifier instead
+ where possible, as the underlying repository implementation may
+ have quicker access to a non-sha1 verifier.
+
+ :param file_id: The handle for this file.
+ :param path: The path that this file can be found at.
+ These must point to the same object.
+ :param stat_value: Optional stat value for the object
+ """
+ raise NotImplementedError(self.get_file_sha1)
+
+ def get_file_mtime(self, file_id, path=None):
+ """Return the modification time for a file.
+
+ :param file_id: The handle for this file.
+ :param path: The path that this file can be found at.
+ These must point to the same object.
+ """
+ raise NotImplementedError(self.get_file_mtime)
+
+ def get_file_size(self, file_id):
+ """Return the size of a file in bytes.
+
+ This applies only to regular files. If invoked on directories or
+ symlinks, it will return None.
+ :param file_id: The file-id of the file
+ """
+ raise NotImplementedError(self.get_file_size)
+
+ def is_executable(self, file_id, path=None):
+ """Check if a file is executable.
+
+ :param file_id: The handle for this file.
+ :param path: The path that this file can be found at.
+ These must point to the same object.
+ """
+ raise NotImplementedError(self.is_executable)
+
+ def iter_files_bytes(self, desired_files):
+ """Iterate through file contents.
+
+ Files will not necessarily be returned in the order they occur in
+ desired_files. No specific order is guaranteed.
+
+ Yields pairs of identifier, bytes_iterator. identifier is an opaque
+ value supplied by the caller as part of desired_files. It should
+ uniquely identify the file version in the caller's context. (Examples:
+ an index number or a TreeTransform trans_id.)
+
+ bytes_iterator is an iterable of bytestrings for the file. The
+ kind of iterable and length of the bytestrings are unspecified, but for
+ this implementation, it is a tuple containing a single bytestring with
+ the complete text of the file.
+
+ :param desired_files: a list of (file_id, identifier) pairs
+ """
+ for file_id, identifier in desired_files:
+ # We wrap the string in a tuple so that we can return an iterable
+ # of bytestrings. (Technically, a bytestring is also an iterable
+ # of bytestrings, but iterating through each character is not
+ # performant.)
+ cur_file = (self.get_file_text(file_id),)
+ yield identifier, cur_file
+
+ def get_symlink_target(self, file_id, path=None):
+ """Get the target for a given file_id.
+
+ It is assumed that the caller already knows that file_id is referencing
+ a symlink.
+ :param file_id: Handle for the symlink entry.
+ :param path: The path of the file.
+ If both file_id and path are supplied, an implementation may use
+ either one.
+ :return: The path the symlink points to.
+ """
+ raise NotImplementedError(self.get_symlink_target)
+
+ def get_root_id(self):
+ """Return the file_id for the root of this tree."""
+ raise NotImplementedError(self.get_root_id)
+
+ def annotate_iter(self, file_id,
+ default_revision=_mod_revision.CURRENT_REVISION):
+ """Return an iterator of revision_id, line tuples.
+
+ For working trees (and mutable trees in general), the special
+ revision_id 'current:' will be used for lines that are new in this
+ tree, e.g. uncommitted changes.
+ :param file_id: The file to produce an annotated version from
+ :param default_revision: For lines that don't match a basis, mark them
+ with this revision id. Not all implementations will make use of
+ this value.
+ """
+ raise NotImplementedError(self.annotate_iter)
+
+ def _get_plan_merge_data(self, file_id, other, base):
+ from bzrlib import versionedfile
+ vf = versionedfile._PlanMergeVersionedFile(file_id)
+ last_revision_a = self._get_file_revision(file_id, vf, 'this:')
+ last_revision_b = other._get_file_revision(file_id, vf, 'other:')
+ if base is None:
+ last_revision_base = None
+ else:
+ last_revision_base = base._get_file_revision(file_id, vf, 'base:')
+ return vf, last_revision_a, last_revision_b, last_revision_base
+
+ def plan_file_merge(self, file_id, other, base=None):
+ """Generate a merge plan based on annotations.
+
+ If the file contains uncommitted changes in this tree, they will be
+ attributed to the 'current:' pseudo-revision. If the file contains
+ uncommitted changes in the other tree, they will be assigned to the
+ 'other:' pseudo-revision.
+ """
+ data = self._get_plan_merge_data(file_id, other, base)
+ vf, last_revision_a, last_revision_b, last_revision_base = data
+ return vf.plan_merge(last_revision_a, last_revision_b,
+ last_revision_base)
+
+ def plan_file_lca_merge(self, file_id, other, base=None):
+ """Generate a merge plan based lca-newness.
+
+ If the file contains uncommitted changes in this tree, they will be
+ attributed to the 'current:' pseudo-revision. If the file contains
+ uncommitted changes in the other tree, they will be assigned to the
+ 'other:' pseudo-revision.
+ """
+ data = self._get_plan_merge_data(file_id, other, base)
+ vf, last_revision_a, last_revision_b, last_revision_base = data
+ return vf.plan_lca_merge(last_revision_a, last_revision_b,
+ last_revision_base)
+
+ def _iter_parent_trees(self):
+ """Iterate through parent trees, defaulting to Tree.revision_tree."""
+ for revision_id in self.get_parent_ids():
+ try:
+ yield self.revision_tree(revision_id)
+ except errors.NoSuchRevisionInTree:
+ yield self.repository.revision_tree(revision_id)
+
+ def _get_file_revision(self, file_id, vf, tree_revision):
+ """Ensure that file_id, tree_revision is in vf to plan the merge."""
+
+ if getattr(self, '_repository', None) is None:
+ last_revision = tree_revision
+ parent_keys = [(file_id, t.get_file_revision(file_id)) for t in
+ self._iter_parent_trees()]
+ vf.add_lines((file_id, last_revision), parent_keys,
+ self.get_file_lines(file_id))
+ repo = self.branch.repository
+ base_vf = repo.texts
+ else:
+ last_revision = self.get_file_revision(file_id)
+ base_vf = self._repository.texts
+ if base_vf not in vf.fallback_versionedfiles:
+ vf.fallback_versionedfiles.append(base_vf)
+ return last_revision
+
+ def _check_retrieved(self, ie, f):
+ if not __debug__:
+ return
+ fp = osutils.fingerprint_file(f)
+ f.seek(0)
+
+ if ie.text_size is not None:
+ if ie.text_size != fp['size']:
+ raise errors.BzrError(
+ "mismatched size for file %r in %r" %
+ (ie.file_id, self._store),
+ ["inventory expects %d bytes" % ie.text_size,
+ "file is actually %d bytes" % fp['size'],
+ "store is probably damaged/corrupt"])
+
+ if ie.text_sha1 != fp['sha1']:
+ raise errors.BzrError("wrong SHA-1 for file %r in %r" %
+ (ie.file_id, self._store),
+ ["inventory expects %s" % ie.text_sha1,
+ "file is actually %s" % fp['sha1'],
+ "store is probably damaged/corrupt"])
+
+ def path2id(self, path):
+ """Return the id for path in this tree."""
+ raise NotImplementedError(self.path2id)
+
+ def paths2ids(self, paths, trees=[], require_versioned=True):
+ """Return all the ids that can be reached by walking from paths.
+
+ Each path is looked up in this tree and any extras provided in
+ trees, and this is repeated recursively: the children in an extra tree
+ of a directory that has been renamed under a provided path in this tree
+ are all returned, even if none exist under a provided path in this
+ tree, and vice versa.
+
+ :param paths: An iterable of paths to start converting to ids from.
+ Alternatively, if paths is None, no ids should be calculated and None
+ will be returned. This is offered to make calling the api unconditional
+ for code that *might* take a list of files.
+ :param trees: Additional trees to consider.
+ :param require_versioned: If False, do not raise NotVersionedError if
+ an element of paths is not versioned in this tree and all of trees.
+ """
+ return find_ids_across_trees(paths, [self] + list(trees), require_versioned)
+
+ def iter_children(self, file_id):
+ """Iterate over the file ids of the children of an entry.
+
+ :param file_id: File id of the entry
+ :return: Iterator over child file ids.
+ """
+ raise NotImplementedError(self.iter_children)
+
+ def lock_read(self):
+ """Lock this tree for multiple read only operations.
+
+ :return: A bzrlib.lock.LogicalLockResult.
+ """
+ pass
+
+ def revision_tree(self, revision_id):
+ """Obtain a revision tree for the revision revision_id.
+
+ The intention of this method is to allow access to possibly cached
+ tree data. Implementors of this method should raise NoSuchRevision if
+ the tree is not locally available, even if they could obtain the
+ tree via a repository or some other means. Callers are responsible
+ for finding the ultimate source for a revision tree.
+
+ :param revision_id: The revision_id of the requested tree.
+ :return: A Tree.
+ :raises: NoSuchRevision if the tree cannot be obtained.
+ """
+ raise errors.NoSuchRevisionInTree(self, revision_id)
+
+ def unknowns(self):
+ """What files are present in this tree and unknown.
+
+ :return: an iterator over the unknown files.
+ """
+ return iter([])
+
+ def unlock(self):
+ pass
+
+ def filter_unversioned_files(self, paths):
+ """Filter out paths that are versioned.
+
+ :return: set of paths.
+ """
+ raise NotImplementedError(self.filter_unversioned_files)
+
+ def walkdirs(self, prefix=""):
+ """Walk the contents of this tree from path down.
+
+ This yields all the data about the contents of a directory at a time.
+ After each directory has been yielded, if the caller has mutated the
+ list to exclude some directories, they are then not descended into.
+
+ The data yielded is of the form:
+ ((directory-relpath, directory-path-from-root, directory-fileid),
+ [(relpath, basename, kind, lstat, path_from_tree_root, file_id,
+ versioned_kind), ...]),
+ - directory-relpath is the containing dirs relpath from prefix
+ - directory-path-from-root is the containing dirs path from /
+ - directory-fileid is the id of the directory if it is versioned.
+ - relpath is the relative path within the subtree being walked.
+ - basename is the basename
+ - kind is the kind of the file now. If unknonwn then the file is not
+ present within the tree - but it may be recorded as versioned. See
+ versioned_kind.
+ - lstat is the stat data *if* the file was statted.
+ - path_from_tree_root is the path from the root of the tree.
+ - file_id is the file_id if the entry is versioned.
+ - versioned_kind is the kind of the file as last recorded in the
+ versioning system. If 'unknown' the file is not versioned.
+ One of 'kind' and 'versioned_kind' must not be 'unknown'.
+
+ :param prefix: Start walking from prefix within the tree rather than
+ at the root. This allows one to walk a subtree but get paths that are
+ relative to a tree rooted higher up.
+ :return: an iterator over the directory data.
+ """
+ raise NotImplementedError(self.walkdirs)
+
+ def supports_content_filtering(self):
+ return False
+
+ def _content_filter_stack(self, path=None, file_id=None):
+ """The stack of content filters for a path if filtering is supported.
+
+ Readers will be applied in first-to-last order.
+ Writers will be applied in last-to-first order.
+ Either the path or the file-id needs to be provided.
+
+ :param path: path relative to the root of the tree
+ or None if unknown
+ :param file_id: file_id or None if unknown
+ :return: the list of filters - [] if there are none
+ """
+ filter_pref_names = filters._get_registered_names()
+ if len(filter_pref_names) == 0:
+ return []
+ if path is None:
+ path = self.id2path(file_id)
+ prefs = self.iter_search_rules([path], filter_pref_names).next()
+ stk = filters._get_filter_stack_for(prefs)
+ if 'filters' in debug.debug_flags:
+ trace.note(gettext("*** {0} content-filter: {1} => {2!r}").format(path,prefs,stk))
+ return stk
+
+ def _content_filter_stack_provider(self):
+ """A function that returns a stack of ContentFilters.
+
+ The function takes a path (relative to the top of the tree) and a
+ file-id as parameters.
+
+ :return: None if content filtering is not supported by this tree.
+ """
+ if self.supports_content_filtering():
+ return lambda path, file_id: \
+ self._content_filter_stack(path, file_id)
+ else:
+ return None
+
+ def iter_search_rules(self, path_names, pref_names=None,
+ _default_searcher=None):
+ """Find the preferences for filenames in a tree.
+
+ :param path_names: an iterable of paths to find attributes for.
+ Paths are given relative to the root of the tree.
+ :param pref_names: the list of preferences to lookup - None for all
+ :param _default_searcher: private parameter to assist testing - don't use
+ :return: an iterator of tuple sequences, one per path-name.
+ See _RulesSearcher.get_items for details on the tuple sequence.
+ """
+ if _default_searcher is None:
+ _default_searcher = rules._per_user_searcher
+ searcher = self._get_rules_searcher(_default_searcher)
+ if searcher is not None:
+ if pref_names is not None:
+ for path in path_names:
+ yield searcher.get_selected_items(path, pref_names)
+ else:
+ for path in path_names:
+ yield searcher.get_items(path)
+
+ def _get_rules_searcher(self, default_searcher):
+ """Get the RulesSearcher for this tree given the default one."""
+ searcher = default_searcher
+ return searcher
+
+
+class InventoryTree(Tree):
+ """A tree that relies on an inventory for its metadata.
+
+ Trees contain an `Inventory` object, and also know how to retrieve
+ file texts mentioned in the inventory, either from a working
+ directory or from a store.
+
+ It is possible for trees to contain files that are not described
+ in their inventory or vice versa; for this use `filenames()`.
+
+ Subclasses should set the _inventory attribute, which is considered
+ private to external API users.
+ """
+
+ def get_canonical_inventory_paths(self, paths):
+ """Like get_canonical_inventory_path() but works on multiple items.
+
+ :param paths: A sequence of paths relative to the root of the tree.
+ :return: A list of paths, with each item the corresponding input path
+ adjusted to account for existing elements that match case
+ insensitively.
+ """
+ return list(self._yield_canonical_inventory_paths(paths))
+
+ def get_canonical_inventory_path(self, path):
+ """Returns the first inventory item that case-insensitively matches path.
+
+ If a path matches exactly, it is returned. If no path matches exactly
+ but more than one path matches case-insensitively, it is implementation
+ defined which is returned.
+
+ If no path matches case-insensitively, the input path is returned, but
+ with as many path entries that do exist changed to their canonical
+ form.
+
+ If you need to resolve many names from the same tree, you should
+ use get_canonical_inventory_paths() to avoid O(N) behaviour.
+
+ :param path: A paths relative to the root of the tree.
+ :return: The input path adjusted to account for existing elements
+ that match case insensitively.
+ """
+ return self._yield_canonical_inventory_paths([path]).next()
+
+ def _yield_canonical_inventory_paths(self, paths):
+ for path in paths:
+ # First, if the path as specified exists exactly, just use it.
+ if self.path2id(path) is not None:
+ yield path
+ continue
+ # go walkin...
+ cur_id = self.get_root_id()
+ cur_path = ''
+ bit_iter = iter(path.split("/"))
+ for elt in bit_iter:
+ lelt = elt.lower()
+ new_path = None
+ for child in self.iter_children(cur_id):
+ try:
+ # XXX: it seem like if the child is known to be in the
+ # tree, we shouldn't need to go from its id back to
+ # its path -- mbp 2010-02-11
+ #
+ # XXX: it seems like we could be more efficient
+ # by just directly looking up the original name and
+ # only then searching all children; also by not
+ # chopping paths so much. -- mbp 2010-02-11
+ child_base = os.path.basename(self.id2path(child))
+ if (child_base == elt):
+ # if we found an exact match, we can stop now; if
+ # we found an approximate match we need to keep
+ # searching because there might be an exact match
+ # later.
+ cur_id = child
+ new_path = osutils.pathjoin(cur_path, child_base)
+ break
+ elif child_base.lower() == lelt:
+ cur_id = child
+ new_path = osutils.pathjoin(cur_path, child_base)
+ except errors.NoSuchId:
+ # before a change is committed we can see this error...
+ continue
+ if new_path:
+ cur_path = new_path
+ else:
+ # got to the end of this directory and no entries matched.
+ # Return what matched so far, plus the rest as specified.
+ cur_path = osutils.pathjoin(cur_path, elt, *list(bit_iter))
+ break
+ yield cur_path
+ # all done.
+
+ @deprecated_method(deprecated_in((2, 5, 0)))
+ def _get_inventory(self):
+ return self._inventory
+
+ inventory = property(_get_inventory,
+ doc="Inventory of this Tree")
+
+ def _get_root_inventory(self):
+ return self._inventory
+
+ root_inventory = property(_get_root_inventory,
+ doc="Root inventory of this tree")
+
+ def _unpack_file_id(self, file_id):
+ """Find the inventory and inventory file id for a tree file id.
+
+ :param file_id: The tree file id, as bytestring or tuple
+ :return: Inventory and inventory file id
+ """
+ if isinstance(file_id, tuple):
+ if len(file_id) != 1:
+ raise ValueError("nested trees not yet supported: %r" % file_id)
+ file_id = file_id[0]
+ return self.root_inventory, file_id
+
+ @needs_read_lock
+ def path2id(self, path):
+ """Return the id for path in this tree."""
+ return self._path2inv_file_id(path)[1]
+
+ def _path2inv_file_id(self, path):
+ """Lookup a inventory and inventory file id by path.
+
+ :param path: Path to look up
+ :return: tuple with inventory and inventory file id
+ """
+ # FIXME: Support nested trees
+ return self.root_inventory, self.root_inventory.path2id(path)
+
+ def id2path(self, file_id):
+ """Return the path for a file id.
+
+ :raises NoSuchId:
+ """
+ inventory, file_id = self._unpack_file_id(file_id)
+ return inventory.id2path(file_id)
+
+ def has_id(self, file_id):
+ inventory, file_id = self._unpack_file_id(file_id)
+ return inventory.has_id(file_id)
+
+ def has_or_had_id(self, file_id):
+ inventory, file_id = self._unpack_file_id(file_id)
+ return inventory.has_id(file_id)
+
+ def all_file_ids(self):
+ return set(
+ [entry.file_id for path, entry in self.iter_entries_by_dir()])
+
+ @deprecated_method(deprecated_in((2, 4, 0)))
+ def __iter__(self):
+ return iter(self.all_file_ids())
+
+ def filter_unversioned_files(self, paths):
+ """Filter out paths that are versioned.
+
+ :return: set of paths.
+ """
+ # NB: we specifically *don't* call self.has_filename, because for
+ # WorkingTrees that can indicate files that exist on disk but that
+ # are not versioned.
+ return set((p for p in paths if self.path2id(p) is None))
+
+ @needs_read_lock
+ def iter_entries_by_dir(self, specific_file_ids=None, yield_parents=False):
+ """Walk the tree in 'by_dir' order.
+
+ This will yield each entry in the tree as a (path, entry) tuple.
+ The order that they are yielded is:
+
+ See Tree.iter_entries_by_dir for details.
+
+ :param yield_parents: If True, yield the parents from the root leading
+ down to specific_file_ids that have been requested. This has no
+ impact if specific_file_ids is None.
+ """
+ if specific_file_ids is None:
+ inventory_file_ids = None
+ else:
+ inventory_file_ids = []
+ for tree_file_id in specific_file_ids:
+ inventory, inv_file_id = self._unpack_file_id(tree_file_id)
+ if not inventory is self.root_inventory: # for now
+ raise AssertionError("%r != %r" % (
+ inventory, self.root_inventory))
+ inventory_file_ids.append(inv_file_id)
+ # FIXME: Handle nested trees
+ return self.root_inventory.iter_entries_by_dir(
+ specific_file_ids=inventory_file_ids, yield_parents=yield_parents)
+
+ @needs_read_lock
+ def iter_child_entries(self, file_id, path=None):
+ inv, inv_file_id = self._unpack_file_id(file_id)
+ return inv[inv_file_id].children.itervalues()
+
+ @deprecated_method(deprecated_in((2, 5, 0)))
+ def get_file_by_path(self, path):
+ return self.get_file(self.path2id(path), path)
+
+ def iter_children(self, file_id, path=None):
+ """See Tree.iter_children."""
+ entry = self.iter_entries_by_dir([file_id]).next()[1]
+ for child in getattr(entry, 'children', {}).itervalues():
+ yield child.file_id
+
+
+def find_ids_across_trees(filenames, trees, require_versioned=True):
+ """Find the ids corresponding to specified filenames.
+
+ All matches in all trees will be used, and all children of matched
+ directories will be used.
+
+ :param filenames: The filenames to find file_ids for (if None, returns
+ None)
+ :param trees: The trees to find file_ids within
+ :param require_versioned: if true, all specified filenames must occur in
+ at least one tree.
+ :return: a set of file ids for the specified filenames and their children.
+ """
+ if not filenames:
+ return None
+ specified_path_ids = _find_ids_across_trees(filenames, trees,
+ require_versioned)
+ return _find_children_across_trees(specified_path_ids, trees)
+
+
+def _find_ids_across_trees(filenames, trees, require_versioned):
+ """Find the ids corresponding to specified filenames.
+
+ All matches in all trees will be used, but subdirectories are not scanned.
+
+ :param filenames: The filenames to find file_ids for
+ :param trees: The trees to find file_ids within
+ :param require_versioned: if true, all specified filenames must occur in
+ at least one tree.
+ :return: a set of file ids for the specified filenames
+ """
+ not_versioned = []
+ interesting_ids = set()
+ for tree_path in filenames:
+ not_found = True
+ for tree in trees:
+ file_id = tree.path2id(tree_path)
+ if file_id is not None:
+ interesting_ids.add(file_id)
+ not_found = False
+ if not_found:
+ not_versioned.append(tree_path)
+ if len(not_versioned) > 0 and require_versioned:
+ raise errors.PathsNotVersionedError(not_versioned)
+ return interesting_ids
+
+
+def _find_children_across_trees(specified_ids, trees):
+ """Return a set including specified ids and their children.
+
+ All matches in all trees will be used.
+
+ :param trees: The trees to find file_ids within
+ :return: a set containing all specified ids and their children
+ """
+ interesting_ids = set(specified_ids)
+ pending = interesting_ids
+ # now handle children of interesting ids
+ # we loop so that we handle all children of each id in both trees
+ while len(pending) > 0:
+ new_pending = set()
+ for file_id in pending:
+ for tree in trees:
+ if not tree.has_or_had_id(file_id):
+ continue
+ for child_id in tree.iter_children(file_id):
+ if child_id not in interesting_ids:
+ new_pending.add(child_id)
+ interesting_ids.update(new_pending)
+ pending = new_pending
+ return interesting_ids
+
+
+class InterTree(InterObject):
+ """This class represents operations taking place between two Trees.
+
+ Its instances have methods like 'compare' and contain references to the
+ source and target trees these operations are to be carried out on.
+
+ Clients of bzrlib should not need to use InterTree directly, rather they
+ should use the convenience methods on Tree such as 'Tree.compare()' which
+ will pass through to InterTree as appropriate.
+ """
+
+ # Formats that will be used to test this InterTree. If both are
+ # None, this InterTree will not be tested (e.g. because a complex
+ # setup is required)
+ _matching_from_tree_format = None
+ _matching_to_tree_format = None
+
+ _optimisers = []
+
+ @classmethod
+ def is_compatible(kls, source, target):
+ # The default implementation is naive and uses the public API, so
+ # it works for all trees.
+ return True
+
+ def _changes_from_entries(self, source_entry, target_entry,
+ source_path=None, target_path=None):
+ """Generate a iter_changes tuple between source_entry and target_entry.
+
+ :param source_entry: An inventory entry from self.source, or None.
+ :param target_entry: An inventory entry from self.target, or None.
+ :param source_path: The path of source_entry, if known. If not known
+ it will be looked up.
+ :param target_path: The path of target_entry, if known. If not known
+ it will be looked up.
+ :return: A tuple, item 0 of which is an iter_changes result tuple, and
+ item 1 is True if there are any changes in the result tuple.
+ """
+ if source_entry is None:
+ if target_entry is None:
+ return None
+ file_id = target_entry.file_id
+ else:
+ file_id = source_entry.file_id
+ if source_entry is not None:
+ source_versioned = True
+ source_name = source_entry.name
+ source_parent = source_entry.parent_id
+ if source_path is None:
+ source_path = self.source.id2path(file_id)
+ source_kind, source_executable, source_stat = \
+ self.source._comparison_data(source_entry, source_path)
+ else:
+ source_versioned = False
+ source_name = None
+ source_parent = None
+ source_kind = None
+ source_executable = None
+ if target_entry is not None:
+ target_versioned = True
+ target_name = target_entry.name
+ target_parent = target_entry.parent_id
+ if target_path is None:
+ target_path = self.target.id2path(file_id)
+ target_kind, target_executable, target_stat = \
+ self.target._comparison_data(target_entry, target_path)
+ else:
+ target_versioned = False
+ target_name = None
+ target_parent = None
+ target_kind = None
+ target_executable = None
+ versioned = (source_versioned, target_versioned)
+ kind = (source_kind, target_kind)
+ changed_content = False
+ if source_kind != target_kind:
+ changed_content = True
+ elif source_kind == 'file':
+ if not self.file_content_matches(file_id, file_id, source_path,
+ target_path, source_stat, target_stat):
+ changed_content = True
+ elif source_kind == 'symlink':
+ if (self.source.get_symlink_target(file_id) !=
+ self.target.get_symlink_target(file_id)):
+ changed_content = True
+ elif source_kind == 'tree-reference':
+ if (self.source.get_reference_revision(file_id, source_path)
+ != self.target.get_reference_revision(file_id, target_path)):
+ changed_content = True
+ parent = (source_parent, target_parent)
+ name = (source_name, target_name)
+ executable = (source_executable, target_executable)
+ if (changed_content is not False or versioned[0] != versioned[1]
+ or parent[0] != parent[1] or name[0] != name[1] or
+ executable[0] != executable[1]):
+ changes = True
+ else:
+ changes = False
+ return (file_id, (source_path, target_path), changed_content,
+ versioned, parent, name, kind, executable), changes
+
+ @needs_read_lock
+ def compare(self, want_unchanged=False, specific_files=None,
+ extra_trees=None, require_versioned=False, include_root=False,
+ want_unversioned=False):
+ """Return the changes from source to target.
+
+ :return: A TreeDelta.
+ :param specific_files: An optional list of file paths to restrict the
+ comparison to. When mapping filenames to ids, all matches in all
+ trees (including optional extra_trees) are used, and all children of
+ matched directories are included.
+ :param want_unchanged: An optional boolean requesting the inclusion of
+ unchanged entries in the result.
+ :param extra_trees: An optional list of additional trees to use when
+ mapping the contents of specific_files (paths) to file_ids.
+ :param require_versioned: An optional boolean (defaults to False). When
+ supplied and True all the 'specific_files' must be versioned, or
+ a PathsNotVersionedError will be thrown.
+ :param want_unversioned: Scan for unversioned paths.
+ """
+ trees = (self.source,)
+ if extra_trees is not None:
+ trees = trees + tuple(extra_trees)
+ # target is usually the newer tree:
+ specific_file_ids = self.target.paths2ids(specific_files, trees,
+ require_versioned=require_versioned)
+ if specific_files and not specific_file_ids:
+ # All files are unversioned, so just return an empty delta
+ # _compare_trees would think we want a complete delta
+ result = delta.TreeDelta()
+ fake_entry = inventory.InventoryFile('unused', 'unused', 'unused')
+ result.unversioned = [(path, None,
+ self.target._comparison_data(fake_entry, path)[0]) for path in
+ specific_files]
+ return result
+ return delta._compare_trees(self.source, self.target, want_unchanged,
+ specific_files, include_root, extra_trees=extra_trees,
+ require_versioned=require_versioned,
+ want_unversioned=want_unversioned)
+
+ def iter_changes(self, include_unchanged=False,
+ specific_files=None, pb=None, extra_trees=[],
+ require_versioned=True, want_unversioned=False):
+ """Generate an iterator of changes between trees.
+
+ A tuple is returned:
+ (file_id, (path_in_source, path_in_target),
+ changed_content, versioned, parent, name, kind,
+ executable)
+
+ Changed_content is True if the file's content has changed. This
+ includes changes to its kind, and to a symlink's target.
+
+ versioned, parent, name, kind, executable are tuples of (from, to).
+ If a file is missing in a tree, its kind is None.
+
+ Iteration is done in parent-to-child order, relative to the target
+ tree.
+
+ There is no guarantee that all paths are in sorted order: the
+ requirement to expand the search due to renames may result in children
+ that should be found early being found late in the search, after
+ lexically later results have been returned.
+ :param require_versioned: Raise errors.PathsNotVersionedError if a
+ path in the specific_files list is not versioned in one of
+ source, target or extra_trees.
+ :param specific_files: An optional list of file paths to restrict the
+ comparison to. When mapping filenames to ids, all matches in all
+ trees (including optional extra_trees) are used, and all children
+ of matched directories are included. The parents in the target tree
+ of the specific files up to and including the root of the tree are
+ always evaluated for changes too.
+ :param want_unversioned: Should unversioned files be returned in the
+ output. An unversioned file is defined as one with (False, False)
+ for the versioned pair.
+ """
+ lookup_trees = [self.source]
+ if extra_trees:
+ lookup_trees.extend(extra_trees)
+ # The ids of items we need to examine to insure delta consistency.
+ precise_file_ids = set()
+ changed_file_ids = []
+ if specific_files == []:
+ specific_file_ids = []
+ else:
+ specific_file_ids = self.target.paths2ids(specific_files,
+ lookup_trees, require_versioned=require_versioned)
+ if specific_files is not None:
+ # reparented or added entries must have their parents included
+ # so that valid deltas can be created. The seen_parents set
+ # tracks the parents that we need to have.
+ # The seen_dirs set tracks directory entries we've yielded.
+ # After outputting version object in to_entries we set difference
+ # the two seen sets and start checking parents.
+ seen_parents = set()
+ seen_dirs = set()
+ if want_unversioned:
+ all_unversioned = sorted([(p.split('/'), p) for p in
+ self.target.extras()
+ if specific_files is None or
+ osutils.is_inside_any(specific_files, p)])
+ all_unversioned = collections.deque(all_unversioned)
+ else:
+ all_unversioned = collections.deque()
+ to_paths = {}
+ from_entries_by_dir = list(self.source.iter_entries_by_dir(
+ specific_file_ids=specific_file_ids))
+ from_data = dict((e.file_id, (p, e)) for p, e in from_entries_by_dir)
+ to_entries_by_dir = list(self.target.iter_entries_by_dir(
+ specific_file_ids=specific_file_ids))
+ num_entries = len(from_entries_by_dir) + len(to_entries_by_dir)
+ entry_count = 0
+ # the unversioned path lookup only occurs on real trees - where there
+ # can be extras. So the fake_entry is solely used to look up
+ # executable it values when execute is not supported.
+ fake_entry = inventory.InventoryFile('unused', 'unused', 'unused')
+ for target_path, target_entry in to_entries_by_dir:
+ while (all_unversioned and
+ all_unversioned[0][0] < target_path.split('/')):
+ unversioned_path = all_unversioned.popleft()
+ target_kind, target_executable, target_stat = \
+ self.target._comparison_data(fake_entry, unversioned_path[1])
+ yield (None, (None, unversioned_path[1]), True, (False, False),
+ (None, None),
+ (None, unversioned_path[0][-1]),
+ (None, target_kind),
+ (None, target_executable))
+ source_path, source_entry = from_data.get(target_entry.file_id,
+ (None, None))
+ result, changes = self._changes_from_entries(source_entry,
+ target_entry, source_path=source_path, target_path=target_path)
+ to_paths[result[0]] = result[1][1]
+ entry_count += 1
+ if result[3][0]:
+ entry_count += 1
+ if pb is not None:
+ pb.update('comparing files', entry_count, num_entries)
+ if changes or include_unchanged:
+ if specific_file_ids is not None:
+ new_parent_id = result[4][1]
+ precise_file_ids.add(new_parent_id)
+ changed_file_ids.append(result[0])
+ yield result
+ # Ensure correct behaviour for reparented/added specific files.
+ if specific_files is not None:
+ # Record output dirs
+ if result[6][1] == 'directory':
+ seen_dirs.add(result[0])
+ # Record parents of reparented/added entries.
+ versioned = result[3]
+ parents = result[4]
+ if not versioned[0] or parents[0] != parents[1]:
+ seen_parents.add(parents[1])
+ while all_unversioned:
+ # yield any trailing unversioned paths
+ unversioned_path = all_unversioned.popleft()
+ to_kind, to_executable, to_stat = \
+ self.target._comparison_data(fake_entry, unversioned_path[1])
+ yield (None, (None, unversioned_path[1]), True, (False, False),
+ (None, None),
+ (None, unversioned_path[0][-1]),
+ (None, to_kind),
+ (None, to_executable))
+ # Yield all remaining source paths
+ for path, from_entry in from_entries_by_dir:
+ file_id = from_entry.file_id
+ if file_id in to_paths:
+ # already returned
+ continue
+ if not self.target.has_id(file_id):
+ # common case - paths we have not emitted are not present in
+ # target.
+ to_path = None
+ else:
+ to_path = self.target.id2path(file_id)
+ entry_count += 1
+ if pb is not None:
+ pb.update('comparing files', entry_count, num_entries)
+ versioned = (True, False)
+ parent = (from_entry.parent_id, None)
+ name = (from_entry.name, None)
+ from_kind, from_executable, stat_value = \
+ self.source._comparison_data(from_entry, path)
+ kind = (from_kind, None)
+ executable = (from_executable, None)
+ changed_content = from_kind is not None
+ # the parent's path is necessarily known at this point.
+ changed_file_ids.append(file_id)
+ yield(file_id, (path, to_path), changed_content, versioned, parent,
+ name, kind, executable)
+ changed_file_ids = set(changed_file_ids)
+ if specific_file_ids is not None:
+ for result in self._handle_precise_ids(precise_file_ids,
+ changed_file_ids):
+ yield result
+
+ def _get_entry(self, tree, file_id):
+ """Get an inventory entry from a tree, with missing entries as None.
+
+ If the tree raises NotImplementedError on accessing .inventory, then
+ this is worked around using iter_entries_by_dir on just the file id
+ desired.
+
+ :param tree: The tree to lookup the entry in.
+ :param file_id: The file_id to lookup.
+ """
+ try:
+ inventory = tree.root_inventory
+ except NotImplementedError:
+ # No inventory available.
+ try:
+ iterator = tree.iter_entries_by_dir(specific_file_ids=[file_id])
+ return iterator.next()[1]
+ except StopIteration:
+ return None
+ else:
+ try:
+ return inventory[file_id]
+ except errors.NoSuchId:
+ return None
+
+ def _handle_precise_ids(self, precise_file_ids, changed_file_ids,
+ discarded_changes=None):
+ """Fill out a partial iter_changes to be consistent.
+
+ :param precise_file_ids: The file ids of parents that were seen during
+ the iter_changes.
+ :param changed_file_ids: The file ids of already emitted items.
+ :param discarded_changes: An optional dict of precalculated
+ iter_changes items which the partial iter_changes had not output
+ but had calculated.
+ :return: A generator of iter_changes items to output.
+ """
+ # process parents of things that had changed under the users
+ # requested paths to prevent incorrect paths or parent ids which
+ # aren't in the tree.
+ while precise_file_ids:
+ precise_file_ids.discard(None)
+ # Don't emit file_ids twice
+ precise_file_ids.difference_update(changed_file_ids)
+ if not precise_file_ids:
+ break
+ # If the there was something at a given output path in source, we
+ # have to include the entry from source in the delta, or we would
+ # be putting this entry into a used path.
+ paths = []
+ for parent_id in precise_file_ids:
+ try:
+ paths.append(self.target.id2path(parent_id))
+ except errors.NoSuchId:
+ # This id has been dragged in from the source by delta
+ # expansion and isn't present in target at all: we don't
+ # need to check for path collisions on it.
+ pass
+ for path in paths:
+ old_id = self.source.path2id(path)
+ precise_file_ids.add(old_id)
+ precise_file_ids.discard(None)
+ current_ids = precise_file_ids
+ precise_file_ids = set()
+ # We have to emit all of precise_file_ids that have been altered.
+ # We may have to output the children of some of those ids if any
+ # directories have stopped being directories.
+ for file_id in current_ids:
+ # Examine file_id
+ if discarded_changes:
+ result = discarded_changes.get(file_id)
+ old_entry = None
+ else:
+ result = None
+ if result is None:
+ old_entry = self._get_entry(self.source, file_id)
+ new_entry = self._get_entry(self.target, file_id)
+ result, changes = self._changes_from_entries(
+ old_entry, new_entry)
+ else:
+ changes = True
+ # Get this parents parent to examine.
+ new_parent_id = result[4][1]
+ precise_file_ids.add(new_parent_id)
+ if changes:
+ if (result[6][0] == 'directory' and
+ result[6][1] != 'directory'):
+ # This stopped being a directory, the old children have
+ # to be included.
+ if old_entry is None:
+ # Reusing a discarded change.
+ old_entry = self._get_entry(self.source, file_id)
+ precise_file_ids.update(
+ self.source.iter_children(file_id))
+ changed_file_ids.add(result[0])
+ yield result
+
+ @needs_read_lock
+ def file_content_matches(self, source_file_id, target_file_id,
+ source_path=None, target_path=None, source_stat=None, target_stat=None):
+ """Check if two files are the same in the source and target trees.
+
+ This only checks that the contents of the files are the same,
+ it does not touch anything else.
+
+ :param source_file_id: File id of the file in the source tree
+ :param target_file_id: File id of the file in the target tree
+ :param source_path: Path of the file in the source tree
+ :param target_path: Path of the file in the target tree
+ :param source_stat: Optional stat value of the file in the source tree
+ :param target_stat: Optional stat value of the file in the target tree
+ :return: Boolean indicating whether the files have the same contents
+ """
+ source_verifier_kind, source_verifier_data = self.source.get_file_verifier(
+ source_file_id, source_path, source_stat)
+ target_verifier_kind, target_verifier_data = self.target.get_file_verifier(
+ target_file_id, target_path, target_stat)
+ if source_verifier_kind == target_verifier_kind:
+ return (source_verifier_data == target_verifier_data)
+ # Fall back to SHA1 for now
+ if source_verifier_kind != "SHA1":
+ source_sha1 = self.source.get_file_sha1(source_file_id,
+ source_path, source_stat)
+ else:
+ source_sha1 = source_verifier_data
+ if target_verifier_kind != "SHA1":
+ target_sha1 = self.target.get_file_sha1(target_file_id,
+ target_path, target_stat)
+ else:
+ target_sha1 = target_verifier_data
+ return (source_sha1 == target_sha1)
+
+InterTree.register_optimiser(InterTree)
+
+
+class MultiWalker(object):
+ """Walk multiple trees simultaneously, getting combined results."""
+
+ # Note: This could be written to not assume you can do out-of-order
+ # lookups. Instead any nodes that don't match in all trees could be
+ # marked as 'deferred', and then returned in the final cleanup loop.
+ # For now, I think it is "nicer" to return things as close to the
+ # "master_tree" order as we can.
+
+ def __init__(self, master_tree, other_trees):
+ """Create a new MultiWalker.
+
+ All trees being walked must implement "iter_entries_by_dir()", such
+ that they yield (path, object) tuples, where that object will have a
+ '.file_id' member, that can be used to check equality.
+
+ :param master_tree: All trees will be 'slaved' to the master_tree such
+ that nodes in master_tree will be used as 'first-pass' sync points.
+ Any nodes that aren't in master_tree will be merged in a second
+ pass.
+ :param other_trees: A list of other trees to walk simultaneously.
+ """
+ self._master_tree = master_tree
+ self._other_trees = other_trees
+
+ # Keep track of any nodes that were properly processed just out of
+ # order, that way we don't return them at the end, we don't have to
+ # track *all* processed file_ids, just the out-of-order ones
+ self._out_of_order_processed = set()
+
+ @staticmethod
+ def _step_one(iterator):
+ """Step an iter_entries_by_dir iterator.
+
+ :return: (has_more, path, ie)
+ If has_more is False, path and ie will be None.
+ """
+ try:
+ path, ie = iterator.next()
+ except StopIteration:
+ return False, None, None
+ else:
+ return True, path, ie
+
+ @staticmethod
+ def _cmp_path_by_dirblock(path1, path2):
+ """Compare two paths based on what directory they are in.
+
+ This generates a sort order, such that all children of a directory are
+ sorted together, and grandchildren are in the same order as the
+ children appear. But all grandchildren come after all children.
+
+ :param path1: first path
+ :param path2: the second path
+ :return: negative number if ``path1`` comes first,
+ 0 if paths are equal
+ and a positive number if ``path2`` sorts first
+ """
+ # Shortcut this special case
+ if path1 == path2:
+ return 0
+ # This is stolen from _dirstate_helpers_py.py, only switching it to
+ # Unicode objects. Consider using encode_utf8() and then using the
+ # optimized versions, or maybe writing optimized unicode versions.
+ if not isinstance(path1, unicode):
+ raise TypeError("'path1' must be a unicode string, not %s: %r"
+ % (type(path1), path1))
+ if not isinstance(path2, unicode):
+ raise TypeError("'path2' must be a unicode string, not %s: %r"
+ % (type(path2), path2))
+ return cmp(MultiWalker._path_to_key(path1),
+ MultiWalker._path_to_key(path2))
+
+ @staticmethod
+ def _path_to_key(path):
+ dirname, basename = osutils.split(path)
+ return (dirname.split(u'/'), basename)
+
+ def _lookup_by_file_id(self, extra_entries, other_tree, file_id):
+ """Lookup an inventory entry by file_id.
+
+ This is called when an entry is missing in the normal order.
+ Generally this is because a file was either renamed, or it was
+ deleted/added. If the entry was found in the inventory and not in
+ extra_entries, it will be added to self._out_of_order_processed
+
+ :param extra_entries: A dictionary of {file_id: (path, ie)}. This
+ should be filled with entries that were found before they were
+ used. If file_id is present, it will be removed from the
+ dictionary.
+ :param other_tree: The Tree to search, in case we didn't find the entry
+ yet.
+ :param file_id: The file_id to look for
+ :return: (path, ie) if found or (None, None) if not present.
+ """
+ if file_id in extra_entries:
+ return extra_entries.pop(file_id)
+ # TODO: Is id2path better as the first call, or is
+ # inventory[file_id] better as a first check?
+ try:
+ cur_path = other_tree.id2path(file_id)
+ except errors.NoSuchId:
+ cur_path = None
+ if cur_path is None:
+ return (None, None)
+ else:
+ self._out_of_order_processed.add(file_id)
+ cur_ie = other_tree.root_inventory[file_id]
+ return (cur_path, cur_ie)
+
+ def iter_all(self):
+ """Match up the values in the different trees."""
+ for result in self._walk_master_tree():
+ yield result
+ self._finish_others()
+ for result in self._walk_others():
+ yield result
+
+ def _walk_master_tree(self):
+ """First pass, walk all trees in lock-step.
+
+ When we are done, all nodes in the master_tree will have been
+ processed. _other_walkers, _other_entries, and _others_extra will be
+ set on 'self' for future processing.
+ """
+ # This iterator has the most "inlining" done, because it tends to touch
+ # every file in the tree, while the others only hit nodes that don't
+ # match.
+ master_iterator = self._master_tree.iter_entries_by_dir()
+
+ other_walkers = [other.iter_entries_by_dir()
+ for other in self._other_trees]
+ other_entries = [self._step_one(walker) for walker in other_walkers]
+ # Track extra nodes in the other trees
+ others_extra = [{} for i in xrange(len(self._other_trees))]
+
+ master_has_more = True
+ step_one = self._step_one
+ lookup_by_file_id = self._lookup_by_file_id
+ out_of_order_processed = self._out_of_order_processed
+
+ while master_has_more:
+ (master_has_more, path, master_ie) = step_one(master_iterator)
+ if not master_has_more:
+ break
+
+ file_id = master_ie.file_id
+ other_values = []
+ other_values_append = other_values.append
+ next_other_entries = []
+ next_other_entries_append = next_other_entries.append
+ for idx, (other_has_more, other_path, other_ie) in enumerate(other_entries):
+ if not other_has_more:
+ other_values_append(lookup_by_file_id(
+ others_extra[idx], self._other_trees[idx], file_id))
+ next_other_entries_append((False, None, None))
+ elif file_id == other_ie.file_id:
+ # This is the critical code path, as most of the entries
+ # should match between most trees.
+ other_values_append((other_path, other_ie))
+ next_other_entries_append(step_one(other_walkers[idx]))
+ else:
+ # This walker did not match, step it until it either
+ # matches, or we know we are past the current walker.
+ other_walker = other_walkers[idx]
+ other_extra = others_extra[idx]
+ while (other_has_more and
+ self._cmp_path_by_dirblock(other_path, path) < 0):
+ other_file_id = other_ie.file_id
+ if other_file_id not in out_of_order_processed:
+ other_extra[other_file_id] = (other_path, other_ie)
+ other_has_more, other_path, other_ie = \
+ step_one(other_walker)
+ if other_has_more and other_ie.file_id == file_id:
+ # We ended up walking to this point, match and step
+ # again
+ other_values_append((other_path, other_ie))
+ other_has_more, other_path, other_ie = \
+ step_one(other_walker)
+ else:
+ # This record isn't in the normal order, see if it
+ # exists at all.
+ other_values_append(lookup_by_file_id(
+ other_extra, self._other_trees[idx], file_id))
+ next_other_entries_append((other_has_more, other_path,
+ other_ie))
+ other_entries = next_other_entries
+
+ # We've matched all the walkers, yield this datapoint
+ yield path, file_id, master_ie, other_values
+ self._other_walkers = other_walkers
+ self._other_entries = other_entries
+ self._others_extra = others_extra
+
+ def _finish_others(self):
+ """Finish walking the other iterators, so we get all entries."""
+ for idx, info in enumerate(self._other_entries):
+ other_extra = self._others_extra[idx]
+ (other_has_more, other_path, other_ie) = info
+ while other_has_more:
+ other_file_id = other_ie.file_id
+ if other_file_id not in self._out_of_order_processed:
+ other_extra[other_file_id] = (other_path, other_ie)
+ other_has_more, other_path, other_ie = \
+ self._step_one(self._other_walkers[idx])
+ del self._other_entries
+
+ def _walk_others(self):
+ """Finish up by walking all the 'deferred' nodes."""
+ # TODO: One alternative would be to grab all possible unprocessed
+ # file_ids, and then sort by path, and then yield them. That
+ # might ensure better ordering, in case a caller strictly
+ # requires parents before children.
+ for idx, other_extra in enumerate(self._others_extra):
+ others = sorted(other_extra.itervalues(),
+ key=lambda x: self._path_to_key(x[0]))
+ for other_path, other_ie in others:
+ file_id = other_ie.file_id
+ # We don't need to check out_of_order_processed here, because
+ # the lookup_by_file_id will be removing anything processed
+ # from the extras cache
+ other_extra.pop(file_id)
+ other_values = [(None, None) for i in xrange(idx)]
+ other_values.append((other_path, other_ie))
+ for alt_idx, alt_extra in enumerate(self._others_extra[idx+1:]):
+ alt_idx = alt_idx + idx + 1
+ alt_extra = self._others_extra[alt_idx]
+ alt_tree = self._other_trees[alt_idx]
+ other_values.append(self._lookup_by_file_id(
+ alt_extra, alt_tree, file_id))
+ yield other_path, file_id, None, other_values
diff --git a/bzrlib/treebuilder.py b/bzrlib/treebuilder.py
new file mode 100644
index 0000000..7df02f4
--- /dev/null
+++ b/bzrlib/treebuilder.py
@@ -0,0 +1,80 @@
+# Copyright (C) 2006, 2008, 2009, 2010 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""TreeBuilder helper class.
+
+TreeBuilders are used to build trees of various shapes or properties. This
+can be extremely useful in testing for instance.
+"""
+
+from __future__ import absolute_import
+
+from bzrlib import errors
+
+
+class TreeBuilder(object):
+ """A TreeBuilder allows the creation of specific content in one tree at a
+ time.
+ """
+
+ def __init__(self):
+ """Construct a TreeBuilder."""
+ self._tree = None
+ self._root_done = False
+
+ def build(self, recipe):
+ """Build recipe into the current tree.
+
+ :param recipe: A sequence of paths. For each path, the corresponding
+ path in the current tree is created and added. If the path ends in
+ '/' then a directory is added, otherwise a regular file is added.
+ """
+ self._ensure_building()
+ if not self._root_done:
+ self._tree.add('', 'root-id', 'directory')
+ self._root_done = True
+ for name in recipe:
+ if name[-1] == '/':
+ self._tree.mkdir(name[:-1])
+ else:
+ end = '\n'
+ content = "contents of %s%s" % (name.encode('utf-8'), end)
+ self._tree.add(name, None, 'file')
+ file_id = self._tree.path2id(name)
+ self._tree.put_file_bytes_non_atomic(file_id, content)
+
+ def _ensure_building(self):
+ """Raise NotBuilding if there is no current tree being built."""
+ if self._tree is None:
+ raise errors.NotBuilding
+
+ def finish_tree(self):
+ """Finish building the current tree."""
+ self._ensure_building()
+ tree = self._tree
+ self._tree = None
+ tree.unlock()
+
+ def start_tree(self, tree):
+ """Start building on tree.
+
+ :param tree: A tree to start building on. It must provide the
+ MutableTree interface.
+ """
+ if self._tree is not None:
+ raise errors.AlreadyBuilding
+ self._tree = tree
+ self._tree.lock_tree_write()
diff --git a/bzrlib/tsort.py b/bzrlib/tsort.py
new file mode 100644
index 0000000..54c7815
--- /dev/null
+++ b/bzrlib/tsort.py
@@ -0,0 +1,713 @@
+# Copyright (C) 2005, 2006, 2008 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""Topological sorting routines."""
+
+from __future__ import absolute_import
+
+
+from bzrlib import (
+ errors,
+ graph as _mod_graph,
+ revision as _mod_revision,
+ )
+
+
+__all__ = ["topo_sort", "TopoSorter", "merge_sort", "MergeSorter"]
+
+
+def topo_sort(graph):
+ """Topological sort a graph.
+
+ graph -- sequence of pairs of node->parents_list.
+
+ The result is a list of node names, such that all parents come before their
+ children.
+
+ node identifiers can be any hashable object, and are typically strings.
+
+ This function has the same purpose as the TopoSorter class, but uses a
+ different algorithm to sort the graph. That means that while both return a
+ list with parents before their child nodes, the exact ordering can be
+ different.
+
+ topo_sort is faster when the whole list is needed, while when iterating
+ over a part of the list, TopoSorter.iter_topo_order should be used.
+ """
+ kg = _mod_graph.KnownGraph(dict(graph))
+ return kg.topo_sort()
+
+
+class TopoSorter(object):
+
+ def __init__(self, graph):
+ """Topological sorting of a graph.
+
+ :param graph: sequence of pairs of node_name->parent_names_list.
+ i.e. [('C', ['B']), ('B', ['A']), ('A', [])]
+ For this input the output from the sort or
+ iter_topo_order routines will be:
+ 'A', 'B', 'C'
+
+ node identifiers can be any hashable object, and are typically strings.
+
+ If you have a graph like [('a', ['b']), ('a', ['c'])] this will only use
+ one of the two values for 'a'.
+
+ The graph is sorted lazily: until you iterate or sort the input is
+ not processed other than to create an internal representation.
+
+ iteration or sorting may raise GraphCycleError if a cycle is present
+ in the graph.
+ """
+ # store a dict of the graph.
+ self._graph = dict(graph)
+
+ def sorted(self):
+ """Sort the graph and return as a list.
+
+ After calling this the sorter is empty and you must create a new one.
+ """
+ return list(self.iter_topo_order())
+
+### Useful if fiddling with this code.
+### # cross check
+### sorted_names = list(self.iter_topo_order())
+### for index in range(len(sorted_names)):
+### rev = sorted_names[index]
+### for left_index in range(index):
+### if rev in self.original_graph[sorted_names[left_index]]:
+### print "revision in parent list of earlier revision"
+### import pdb;pdb.set_trace()
+
+ def iter_topo_order(self):
+ """Yield the nodes of the graph in a topological order.
+
+ After finishing iteration the sorter is empty and you cannot continue
+ iteration.
+ """
+ graph = self._graph
+ visitable = set(graph)
+
+ # this is a stack storing the depth first search into the graph.
+ pending_node_stack = []
+ # at each level of 'recursion' we have to check each parent. This
+ # stack stores the parents we have not yet checked for the node at the
+ # matching depth in pending_node_stack
+ pending_parents_stack = []
+
+ # this is a set of the completed nodes for fast checking whether a
+ # parent in a node we are processing on the stack has already been
+ # emitted and thus can be skipped.
+ completed_node_names = set()
+
+ while graph:
+ # now pick a random node in the source graph, and transfer it to the
+ # top of the depth first search stack of pending nodes.
+ node_name, parents = graph.popitem()
+ pending_node_stack.append(node_name)
+ pending_parents_stack.append(list(parents))
+
+ # loop until pending_node_stack is empty
+ while pending_node_stack:
+ parents_to_visit = pending_parents_stack[-1]
+ # if there are no parents left, the revision is done
+ if not parents_to_visit:
+ # append the revision to the topo sorted list
+ # all the nodes parents have been added to the output,
+ # now we can add it to the output.
+ popped_node = pending_node_stack.pop()
+ pending_parents_stack.pop()
+ completed_node_names.add(popped_node)
+ yield popped_node
+ else:
+ # recurse depth first into a single parent
+ next_node_name = parents_to_visit.pop()
+
+ if next_node_name in completed_node_names:
+ # parent was already completed by a child, skip it.
+ continue
+ if next_node_name not in visitable:
+ # parent is not a node in the original graph, skip it.
+ continue
+
+ # transfer it along with its parents from the source graph
+ # into the top of the current depth first search stack.
+ try:
+ parents = graph.pop(next_node_name)
+ except KeyError:
+ # if the next node is not in the source graph it has
+ # already been popped from it and placed into the
+ # current search stack (but not completed or we would
+ # have hit the continue 6 lines up). this indicates a
+ # cycle.
+ raise errors.GraphCycleError(pending_node_stack)
+ pending_node_stack.append(next_node_name)
+ pending_parents_stack.append(list(parents))
+
+
+def merge_sort(graph, branch_tip, mainline_revisions=None, generate_revno=False):
+ """Topological sort a graph which groups merges.
+
+ :param graph: sequence of pairs of node->parents_list.
+ :param branch_tip: the tip of the branch to graph. Revisions not
+ reachable from branch_tip are not included in the
+ output.
+ :param mainline_revisions: If not None this forces a mainline to be
+ used rather than synthesised from the graph.
+ This must be a valid path through some part
+ of the graph. If the mainline does not cover all
+ the revisions, output stops at the start of the
+ old revision listed in the mainline revisions
+ list.
+ The order for this parameter is oldest-first.
+ :param generate_revno: Optional parameter controlling the generation of
+ revision number sequences in the output. See the output description of
+ the MergeSorter docstring for details.
+ :result: See the MergeSorter docstring for details.
+
+ Node identifiers can be any hashable object, and are typically strings.
+ """
+ return MergeSorter(graph, branch_tip, mainline_revisions,
+ generate_revno).sorted()
+
+
+class MergeSorter(object):
+
+ __slots__ = ['_node_name_stack',
+ '_node_merge_depth_stack',
+ '_pending_parents_stack',
+ '_first_child_stack',
+ '_left_subtree_pushed_stack',
+ '_generate_revno',
+ '_graph',
+ '_mainline_revisions',
+ '_stop_revision',
+ '_original_graph',
+ '_revnos',
+ '_revno_to_branch_count',
+ '_completed_node_names',
+ '_scheduled_nodes',
+ ]
+
+ def __init__(self, graph, branch_tip, mainline_revisions=None,
+ generate_revno=False):
+ """Merge-aware topological sorting of a graph.
+
+ :param graph: sequence of pairs of node_name->parent_names_list.
+ i.e. [('C', ['B']), ('B', ['A']), ('A', [])]
+ For this input the output from the sort or
+ iter_topo_order routines will be:
+ 'A', 'B', 'C'
+ :param branch_tip: the tip of the branch to graph. Revisions not
+ reachable from branch_tip are not included in the
+ output.
+ :param mainline_revisions: If not None this forces a mainline to be
+ used rather than synthesised from the graph.
+ This must be a valid path through some part
+ of the graph. If the mainline does not cover all
+ the revisions, output stops at the start of the
+ old revision listed in the mainline revisions
+ list.
+ The order for this parameter is oldest-first.
+ :param generate_revno: Optional parameter controlling the generation of
+ revision number sequences in the output. See the output description
+ for more details.
+
+ The result is a list sorted so that all parents come before
+ their children. Each element of the list is a tuple containing:
+ (sequence_number, node_name, merge_depth, end_of_merge)
+ * sequence_number: The sequence of this row in the output. Useful for
+ GUIs.
+ * node_name: The node name: opaque text to the merge routine.
+ * merge_depth: How many levels of merging deep this node has been
+ found.
+ * revno_sequence: When requested this field provides a sequence of
+ revision numbers for all revisions. The format is:
+ (REVNO, BRANCHNUM, BRANCHREVNO). BRANCHNUM is the number of the
+ branch that the revno is on. From left to right the REVNO numbers
+ are the sequence numbers within that branch of the revision.
+ For instance, the graph {A:[], B:['A'], C:['A', 'B']} will get
+ the following revno_sequences assigned: A:(1,), B:(1,1,1), C:(2,).
+ This should be read as 'A is the first commit in the trunk',
+ 'B is the first commit on the first branch made from A', 'C is the
+ second commit in the trunk'.
+ * end_of_merge: When True the next node is part of a different merge.
+
+
+ node identifiers can be any hashable object, and are typically strings.
+
+ If you have a graph like [('a', ['b']), ('a', ['c'])] this will only use
+ one of the two values for 'a'.
+
+ The graph is sorted lazily: until you iterate or sort the input is
+ not processed other than to create an internal representation.
+
+ iteration or sorting may raise GraphCycleError if a cycle is present
+ in the graph.
+
+ Background information on the design:
+ -------------------------------------
+ definition: the end of any cluster or 'merge' occurs when:
+ 1 - the next revision has a lower merge depth than we do.
+ i.e.
+ A 0
+ B 1
+ C 2
+ D 1
+ E 0
+ C, D are the ends of clusters, E might be but we need more data.
+ 2 - or the next revision at our merge depth is not our left most
+ ancestor.
+ This is required to handle multiple-merges in one commit.
+ i.e.
+ A 0 [F, B, E]
+ B 1 [D, C]
+ C 2 [D]
+ D 1 [F]
+ E 1 [F]
+ F 0
+ C is the end of a cluster due to rule 1.
+ D is not the end of a cluster from rule 1, but is from rule 2: E
+ is not its left most ancestor
+ E is the end of a cluster due to rule 1
+ F might be but we need more data.
+
+ we show connecting lines to a parent when:
+ - The parent is the start of a merge within this cluster.
+ That is, the merge was not done to the mainline before this cluster
+ was merged to the mainline.
+ This can be detected thus:
+ * The parent has a higher merge depth and is the next revision in
+ the list.
+
+ The next revision in the list constraint is needed for this case:
+ A 0 [D, B]
+ B 1 [C, F] # we do not want to show a line to F which is depth 2
+ but not a merge
+ C 1 [H] # note that this is a long line to show back to the
+ ancestor - see the end of merge rules.
+ D 0 [G, E]
+ E 1 [G, F]
+ F 2 [G]
+ G 1 [H]
+ H 0
+ - Part of this merges 'branch':
+ The parent has the same merge depth and is our left most parent and we
+ are not the end of the cluster.
+ A 0 [C, B] lines: [B, C]
+ B 1 [E, C] lines: [C]
+ C 0 [D] lines: [D]
+ D 0 [F, E] lines: [E, F]
+ E 1 [F] lines: [F]
+ F 0
+ - The end of this merge/cluster:
+ we can ONLY have multiple parents at the end of a cluster if this
+ branch was previously merged into the 'mainline'.
+ - if we have one and only one parent, show it
+ Note that this may be to a greater merge depth - for instance if
+ this branch continued from a deeply nested branch to add something
+ to it.
+ - if we have more than one parent - show the second oldest (older ==
+ further down the list) parent with
+ an equal or lower merge depth
+ XXXX revisit when awake. ddaa asks about the relevance of each one
+ - maybe more than one parent is relevant
+ """
+ self._generate_revno = generate_revno
+ # a dict of the graph.
+ self._graph = dict(graph)
+ # if there is an explicit mainline, alter the graph to match. This is
+ # easier than checking at every merge whether we are on the mainline and
+ # if so which path to take.
+ if mainline_revisions is None:
+ self._mainline_revisions = []
+ self._stop_revision = None
+ else:
+ self._mainline_revisions = list(mainline_revisions)
+ self._stop_revision = self._mainline_revisions[0]
+ # skip the first revision, its what we reach and its parents are
+ # therefore irrelevant
+ for index, revision in enumerate(self._mainline_revisions[1:]):
+ # NB: index 0 means self._mainline_revisions[1]
+ # if the mainline matches the graph, nothing to do.
+ parent = self._mainline_revisions[index]
+ if parent is None:
+ # end of mainline_revisions history
+ continue
+ graph_parent_ids = self._graph[revision]
+ if not graph_parent_ids:
+ # We ran into a ghost, skip over it, this is a workaround for
+ # bug #243536, the _graph has had ghosts stripped, but the
+ # mainline_revisions have not
+ continue
+ if graph_parent_ids[0] == parent:
+ continue
+ # remove it from its prior spot
+ self._graph[revision].remove(parent)
+ # insert it into the start of the mainline
+ self._graph[revision].insert(0, parent)
+ # we need to do a check late in the process to detect end-of-merges
+ # which requires the parents to be accessible: its easier for now
+ # to just keep the original graph around.
+ self._original_graph = dict(self._graph.items())
+ # we need to know the revision numbers of revisions to determine
+ # the revision numbers of their descendants
+ # this is a graph from node to [revno_tuple, first_child]
+ # where first_child is True if no other children have seen this node
+ # and revno_tuple is the tuple that was assigned to the node.
+ # we dont know revnos to start with, so we start it seeded with
+ # [None, True]
+ self._revnos = dict((revision, [None, True])
+ for revision in self._graph)
+ # Each mainline revision counts how many child branches have spawned from it.
+ self._revno_to_branch_count = {}
+
+ # this is a stack storing the depth first search into the graph.
+ self._node_name_stack = []
+ # at each level of recursion we need the merge depth this node is at:
+ self._node_merge_depth_stack = []
+ # at each level of 'recursion' we have to check each parent. This
+ # stack stores the parents we have not yet checked for the node at the
+ # matching depth in _node_name_stack
+ self._pending_parents_stack = []
+ # When we first look at a node we assign it a seqence number from its
+ # leftmost parent.
+ self._first_child_stack = []
+ # this is a set of the nodes who have been completely analysed for fast
+ # membership checking
+ self._completed_node_names = set()
+ # this is the scheduling of nodes list.
+ # Nodes are scheduled
+ # from the bottom left of the tree: in the tree
+ # A 0 [D, B]
+ # B 1 [C]
+ # C 1 [D]
+ # D 0 [F, E]
+ # E 1 [F]
+ # F 0
+ # the scheduling order is: F, E, D, C, B, A
+ # that is - 'left subtree, right subtree, node'
+ # which would mean that when we schedule A we can emit the entire tree.
+ self._scheduled_nodes = []
+ # This records for each node when we have processed its left most
+ # unmerged subtree. After this subtree is scheduled, all other subtrees
+ # have their merge depth increased by one from this nodes merge depth.
+ # it contains tuples - name, merge_depth
+ self._left_subtree_pushed_stack = []
+
+ # seed the search with the tip of the branch
+ if (branch_tip is not None and
+ branch_tip != _mod_revision.NULL_REVISION and
+ branch_tip != (_mod_revision.NULL_REVISION,)):
+ parents = self._graph.pop(branch_tip)
+ self._push_node(branch_tip, 0, parents)
+
+ def sorted(self):
+ """Sort the graph and return as a list.
+
+ After calling this the sorter is empty and you must create a new one.
+ """
+ return list(self.iter_topo_order())
+
+ def iter_topo_order(self):
+ """Yield the nodes of the graph in a topological order.
+
+ After finishing iteration the sorter is empty and you cannot continue
+ iteration.
+ """
+ # These are safe to offload to local variables, because they are used
+ # as a stack and modified in place, never assigned to.
+ node_name_stack = self._node_name_stack
+ node_merge_depth_stack = self._node_merge_depth_stack
+ pending_parents_stack = self._pending_parents_stack
+ left_subtree_pushed_stack = self._left_subtree_pushed_stack
+ completed_node_names = self._completed_node_names
+ scheduled_nodes = self._scheduled_nodes
+
+ graph_pop = self._graph.pop
+
+ def push_node(node_name, merge_depth, parents,
+ node_name_stack_append=node_name_stack.append,
+ node_merge_depth_stack_append=node_merge_depth_stack.append,
+ left_subtree_pushed_stack_append=left_subtree_pushed_stack.append,
+ pending_parents_stack_append=pending_parents_stack.append,
+ first_child_stack_append=self._first_child_stack.append,
+ revnos=self._revnos,
+ ):
+ """Add node_name to the pending node stack.
+
+ Names in this stack will get emitted into the output as they are popped
+ off the stack.
+
+ This inlines a lot of self._variable.append functions as local
+ variables.
+ """
+ node_name_stack_append(node_name)
+ node_merge_depth_stack_append(merge_depth)
+ left_subtree_pushed_stack_append(False)
+ pending_parents_stack_append(list(parents))
+ # as we push it, check if it is the first child
+ parent_info = None
+ if parents:
+ # node has parents, assign from the left most parent.
+ try:
+ parent_info = revnos[parents[0]]
+ except KeyError:
+ # Left-hand parent is a ghost, consider it not to exist
+ pass
+ if parent_info is not None:
+ first_child = parent_info[1]
+ parent_info[1] = False
+ else:
+ # We don't use the same algorithm here, but we need to keep the
+ # stack in line
+ first_child = None
+ first_child_stack_append(first_child)
+
+ def pop_node(node_name_stack_pop=node_name_stack.pop,
+ node_merge_depth_stack_pop=node_merge_depth_stack.pop,
+ first_child_stack_pop=self._first_child_stack.pop,
+ left_subtree_pushed_stack_pop=left_subtree_pushed_stack.pop,
+ pending_parents_stack_pop=pending_parents_stack.pop,
+ original_graph=self._original_graph,
+ revnos=self._revnos,
+ completed_node_names_add=self._completed_node_names.add,
+ scheduled_nodes_append=scheduled_nodes.append,
+ revno_to_branch_count=self._revno_to_branch_count,
+ ):
+ """Pop the top node off the stack
+
+ The node is appended to the sorted output.
+ """
+ # we are returning from the flattened call frame:
+ # pop off the local variables
+ node_name = node_name_stack_pop()
+ merge_depth = node_merge_depth_stack_pop()
+ first_child = first_child_stack_pop()
+ # remove this node from the pending lists:
+ left_subtree_pushed_stack_pop()
+ pending_parents_stack_pop()
+
+ parents = original_graph[node_name]
+ parent_revno = None
+ if parents:
+ # node has parents, assign from the left most parent.
+ try:
+ parent_revno = revnos[parents[0]][0]
+ except KeyError:
+ # left-hand parent is a ghost, treat it as not existing
+ pass
+ if parent_revno is not None:
+ if not first_child:
+ # not the first child, make a new branch
+ base_revno = parent_revno[0]
+ branch_count = revno_to_branch_count.get(base_revno, 0)
+ branch_count += 1
+ revno_to_branch_count[base_revno] = branch_count
+ revno = (parent_revno[0], branch_count, 1)
+ # revno = (parent_revno[0], branch_count, parent_revno[-1]+1)
+ else:
+ # as the first child, we just increase the final revision
+ # number
+ revno = parent_revno[:-1] + (parent_revno[-1] + 1,)
+ else:
+ # no parents, use the root sequence
+ root_count = revno_to_branch_count.get(0, -1)
+ root_count += 1
+ if root_count:
+ revno = (0, root_count, 1)
+ else:
+ revno = (1,)
+ revno_to_branch_count[0] = root_count
+
+ # store the revno for this node for future reference
+ revnos[node_name][0] = revno
+ completed_node_names_add(node_name)
+ scheduled_nodes_append((node_name, merge_depth, revno))
+ return node_name
+
+
+ while node_name_stack:
+ # loop until this call completes.
+ parents_to_visit = pending_parents_stack[-1]
+ # if all parents are done, the revision is done
+ if not parents_to_visit:
+ # append the revision to the topo sorted scheduled list:
+ # all the nodes parents have been scheduled added, now
+ # we can add it to the output.
+ pop_node()
+ else:
+ while pending_parents_stack[-1]:
+ if not left_subtree_pushed_stack[-1]:
+ # recurse depth first into the primary parent
+ next_node_name = pending_parents_stack[-1].pop(0)
+ is_left_subtree = True
+ left_subtree_pushed_stack[-1] = True
+ else:
+ # place any merges in right-to-left order for scheduling
+ # which gives us left-to-right order after we reverse
+ # the scheduled queue. XXX: This has the effect of
+ # allocating common-new revisions to the right-most
+ # subtree rather than the left most, which will
+ # display nicely (you get smaller trees at the top
+ # of the combined merge).
+ next_node_name = pending_parents_stack[-1].pop()
+ is_left_subtree = False
+ if next_node_name in completed_node_names:
+ # this parent was completed by a child on the
+ # call stack. skip it.
+ continue
+ # otherwise transfer it from the source graph into the
+ # top of the current depth first search stack.
+ try:
+ parents = graph_pop(next_node_name)
+ except KeyError:
+ # if the next node is not in the source graph it has
+ # already been popped from it and placed into the
+ # current search stack (but not completed or we would
+ # have hit the continue 4 lines up.
+ # this indicates a cycle.
+ if next_node_name in self._original_graph:
+ raise errors.GraphCycleError(node_name_stack)
+ else:
+ # This is just a ghost parent, ignore it
+ continue
+ next_merge_depth = 0
+ if is_left_subtree:
+ # a new child branch from name_stack[-1]
+ next_merge_depth = 0
+ else:
+ next_merge_depth = 1
+ next_merge_depth = (
+ node_merge_depth_stack[-1] + next_merge_depth)
+ push_node(
+ next_node_name,
+ next_merge_depth,
+ parents)
+ # and do not continue processing parents until this 'call'
+ # has recursed.
+ break
+
+ # We have scheduled the graph. Now deliver the ordered output:
+ sequence_number = 0
+ stop_revision = self._stop_revision
+ generate_revno = self._generate_revno
+ original_graph = self._original_graph
+
+ while scheduled_nodes:
+ node_name, merge_depth, revno = scheduled_nodes.pop()
+ if node_name == stop_revision:
+ return
+ if not len(scheduled_nodes):
+ # last revision is the end of a merge
+ end_of_merge = True
+ elif scheduled_nodes[-1][1] < merge_depth:
+ # the next node is to our left
+ end_of_merge = True
+ elif (scheduled_nodes[-1][1] == merge_depth and
+ (scheduled_nodes[-1][0] not in
+ original_graph[node_name])):
+ # the next node was part of a multiple-merge.
+ end_of_merge = True
+ else:
+ end_of_merge = False
+ if generate_revno:
+ yield (sequence_number, node_name, merge_depth, revno, end_of_merge)
+ else:
+ yield (sequence_number, node_name, merge_depth, end_of_merge)
+ sequence_number += 1
+
+ def _push_node(self, node_name, merge_depth, parents):
+ """Add node_name to the pending node stack.
+
+ Names in this stack will get emitted into the output as they are popped
+ off the stack.
+ """
+ self._node_name_stack.append(node_name)
+ self._node_merge_depth_stack.append(merge_depth)
+ self._left_subtree_pushed_stack.append(False)
+ self._pending_parents_stack.append(list(parents))
+ # as we push it, figure out if this is the first child
+ parent_info = None
+ if parents:
+ # node has parents, assign from the left most parent.
+ try:
+ parent_info = self._revnos[parents[0]]
+ except KeyError:
+ # Left-hand parent is a ghost, consider it not to exist
+ pass
+ if parent_info is not None:
+ first_child = parent_info[1]
+ parent_info[1] = False
+ else:
+ # We don't use the same algorithm here, but we need to keep the
+ # stack in line
+ first_child = None
+ self._first_child_stack.append(first_child)
+
+ def _pop_node(self):
+ """Pop the top node off the stack
+
+ The node is appended to the sorted output.
+ """
+ # we are returning from the flattened call frame:
+ # pop off the local variables
+ node_name = self._node_name_stack.pop()
+ merge_depth = self._node_merge_depth_stack.pop()
+ first_child = self._first_child_stack.pop()
+ # remove this node from the pending lists:
+ self._left_subtree_pushed_stack.pop()
+ self._pending_parents_stack.pop()
+
+ parents = self._original_graph[node_name]
+ parent_revno = None
+ if parents:
+ # node has parents, assign from the left most parent.
+ try:
+ parent_revno = self._revnos[parents[0]][0]
+ except KeyError:
+ # left-hand parent is a ghost, treat it as not existing
+ pass
+ if parent_revno is not None:
+ if not first_child:
+ # not the first child, make a new branch
+ base_revno = parent_revno[0]
+ branch_count = self._revno_to_branch_count.get(base_revno, 0)
+ branch_count += 1
+ self._revno_to_branch_count[base_revno] = branch_count
+ revno = (parent_revno[0], branch_count, 1)
+ # revno = (parent_revno[0], branch_count, parent_revno[-1]+1)
+ else:
+ # as the first child, we just increase the final revision
+ # number
+ revno = parent_revno[:-1] + (parent_revno[-1] + 1,)
+ else:
+ # no parents, use the root sequence
+ root_count = self._revno_to_branch_count.get(0, 0)
+ root_count = self._revno_to_branch_count.get(0, -1)
+ root_count += 1
+ if root_count:
+ revno = (0, root_count, 1)
+ else:
+ revno = (1,)
+ self._revno_to_branch_count[0] = root_count
+
+ # store the revno for this node for future reference
+ self._revnos[node_name][0] = revno
+ self._completed_node_names.add(node_name)
+ self._scheduled_nodes.append((node_name, merge_depth, self._revnos[node_name][0]))
+ return node_name
diff --git a/bzrlib/tuned_gzip.py b/bzrlib/tuned_gzip.py
new file mode 100644
index 0000000..3af6e58
--- /dev/null
+++ b/bzrlib/tuned_gzip.py
@@ -0,0 +1,415 @@
+# Copyright (C) 2006-2011 Canonical Ltd
+# Written by Robert Collins <robert.collins@canonical.com>
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""Bzrlib specific gzip tunings. We plan to feed these to the upstream gzip."""
+
+from __future__ import absolute_import
+
+from cStringIO import StringIO
+
+# make GzipFile faster:
+import gzip
+from gzip import FEXTRA, FCOMMENT, FNAME, FHCRC
+import sys
+import struct
+import zlib
+
+# we want a \n preserved, break on \n only splitlines.
+from bzrlib import symbol_versioning
+
+__all__ = ["GzipFile", "bytes_to_gzip"]
+
+
+def U32(i):
+ """Return i as an unsigned integer, assuming it fits in 32 bits.
+
+ If it's >= 2GB when viewed as a 32-bit unsigned int, return a long.
+ """
+ if i < 0:
+ i += 1L << 32
+ return i
+
+
+def LOWU32(i):
+ """Return the low-order 32 bits of an int, as a non-negative int."""
+ return i & 0xFFFFFFFFL
+
+
+def bytes_to_gzip(bytes, factory=zlib.compressobj,
+ level=zlib.Z_DEFAULT_COMPRESSION, method=zlib.DEFLATED,
+ width=-zlib.MAX_WBITS, mem=zlib.DEF_MEM_LEVEL,
+ crc32=zlib.crc32):
+ """Create a gzip file containing bytes and return its content."""
+ return chunks_to_gzip([bytes])
+
+
+def chunks_to_gzip(chunks, factory=zlib.compressobj,
+ level=zlib.Z_DEFAULT_COMPRESSION, method=zlib.DEFLATED,
+ width=-zlib.MAX_WBITS, mem=zlib.DEF_MEM_LEVEL,
+ crc32=zlib.crc32):
+ """Create a gzip file containing chunks and return its content.
+
+ :param chunks: An iterable of strings. Each string can have arbitrary
+ layout.
+ """
+ result = [
+ '\037\213' # self.fileobj.write('\037\213') # magic header
+ '\010' # self.fileobj.write('\010') # compression method
+ # fname = self.filename[:-3]
+ # flags = 0
+ # if fname:
+ # flags = FNAME
+ '\x00' # self.fileobj.write(chr(flags))
+ '\0\0\0\0' # write32u(self.fileobj, long(time.time()))
+ '\002' # self.fileobj.write('\002')
+ '\377' # self.fileobj.write('\377')
+ # if fname:
+ '' # self.fileobj.write(fname + '\000')
+ ]
+ # using a compressobj avoids a small header and trailer that the compress()
+ # utility function adds.
+ compress = factory(level, method, width, mem, 0)
+ crc = 0
+ total_len = 0
+ for chunk in chunks:
+ crc = crc32(chunk, crc)
+ total_len += len(chunk)
+ zbytes = compress.compress(chunk)
+ if zbytes:
+ result.append(zbytes)
+ result.append(compress.flush())
+ # size may exceed 2GB, or even 4GB
+ result.append(struct.pack("<LL", LOWU32(crc), LOWU32(total_len)))
+ return ''.join(result)
+
+
+class GzipFile(gzip.GzipFile):
+ """Knit tuned version of GzipFile.
+
+ This is based on the following lsprof stats:
+ python 2.4 stock GzipFile write:
+ 58971 0 5644.3090 2721.4730 gzip:193(write)
+ +58971 0 1159.5530 1159.5530 +<built-in method compress>
+ +176913 0 987.0320 987.0320 +<len>
+ +58971 0 423.1450 423.1450 +<zlib.crc32>
+ +58971 0 353.1060 353.1060 +<method 'write' of 'cStringIO.
+ StringO' objects>
+ tuned GzipFile write:
+ 58971 0 4477.2590 2103.1120 bzrlib.knit:1250(write)
+ +58971 0 1297.7620 1297.7620 +<built-in method compress>
+ +58971 0 406.2160 406.2160 +<zlib.crc32>
+ +58971 0 341.9020 341.9020 +<method 'write' of 'cStringIO.
+ StringO' objects>
+ +58971 0 328.2670 328.2670 +<len>
+
+
+ Yes, its only 1.6 seconds, but they add up.
+ """
+
+ def __init__(self, *args, **kwargs):
+ symbol_versioning.warn(
+ symbol_versioning.deprecated_in((2, 3, 0))
+ % 'bzrlib.tuned_gzip.GzipFile',
+ DeprecationWarning, stacklevel=2)
+ gzip.GzipFile.__init__(self, *args, **kwargs)
+
+ def _add_read_data(self, data):
+ # 4169 calls in 183
+ # temp var for len(data) and switch to +='s.
+ # 4169 in 139
+ len_data = len(data)
+ self.crc = zlib.crc32(data, self.crc)
+ self.extrabuf += data
+ self.extrasize += len_data
+ self.size += len_data
+
+ def _write_gzip_header(self):
+ """A tuned version of gzip._write_gzip_header
+
+ We have some extra constrains that plain Gzip does not.
+ 1) We want to write the whole blob at once. rather than multiple
+ calls to fileobj.write().
+ 2) We never have a filename
+ 3) We don't care about the time
+ """
+ self.fileobj.write(
+ '\037\213' # self.fileobj.write('\037\213') # magic header
+ '\010' # self.fileobj.write('\010') # compression method
+ # fname = self.filename[:-3]
+ # flags = 0
+ # if fname:
+ # flags = FNAME
+ '\x00' # self.fileobj.write(chr(flags))
+ '\0\0\0\0' # write32u(self.fileobj, long(time.time()))
+ '\002' # self.fileobj.write('\002')
+ '\377' # self.fileobj.write('\377')
+ # if fname:
+ '' # self.fileobj.write(fname + '\000')
+ )
+
+ def _read(self, size=1024):
+ # various optimisations:
+ # reduces lsprof count from 2500 to
+ # 8337 calls in 1272, 365 internal
+ if self.fileobj is None:
+ raise EOFError, "Reached EOF"
+
+ if self._new_member:
+ # If the _new_member flag is set, we have to
+ # jump to the next member, if there is one.
+ #
+ # First, check if we're at the end of the file;
+ # if so, it's time to stop; no more members to read.
+ next_header_bytes = self.fileobj.read(10)
+ if next_header_bytes == '':
+ raise EOFError, "Reached EOF"
+
+ self._init_read()
+ self._read_gzip_header(next_header_bytes)
+ self.decompress = zlib.decompressobj(-zlib.MAX_WBITS)
+ self._new_member = False
+
+ # Read a chunk of data from the file
+ buf = self.fileobj.read(size)
+
+ # If the EOF has been reached, flush the decompression object
+ # and mark this object as finished.
+
+ if buf == "":
+ self._add_read_data(self.decompress.flush())
+ if len(self.decompress.unused_data) < 8:
+ raise AssertionError("what does flush do?")
+ self._gzip_tail = self.decompress.unused_data[0:8]
+ self._read_eof()
+ # tell the driving read() call we have stuffed all the data
+ # in self.extrabuf
+ raise EOFError, 'Reached EOF'
+
+ self._add_read_data(self.decompress.decompress(buf))
+
+ if self.decompress.unused_data != "":
+ # Ending case: we've come to the end of a member in the file,
+ # so seek back to the start of the data for the next member which
+ # is the length of the decompress objects unused data - the first
+ # 8 bytes for the end crc and size records.
+ #
+ # so seek back to the start of the unused data, finish up
+ # this member, and read a new gzip header.
+ # (The number of bytes to seek back is the length of the unused
+ # data, minus 8 because those 8 bytes are part of this member.
+ seek_length = len (self.decompress.unused_data) - 8
+ if seek_length > 0:
+ # we read too much data
+ self.fileobj.seek(-seek_length, 1)
+ self._gzip_tail = self.decompress.unused_data[0:8]
+ elif seek_length < 0:
+ # we haven't read enough to check the checksum.
+ if not (-8 < seek_length):
+ raise AssertionError("too great a seek")
+ buf = self.fileobj.read(-seek_length)
+ self._gzip_tail = self.decompress.unused_data + buf
+ else:
+ self._gzip_tail = self.decompress.unused_data
+
+ # Check the CRC and file size, and set the flag so we read
+ # a new member on the next call
+ self._read_eof()
+ self._new_member = True
+
+ def _read_eof(self):
+ """tuned to reduce function calls and eliminate file seeking:
+ pass 1:
+ reduces lsprof count from 800 to 288
+ 4168 in 296
+ avoid U32 call by using struct format L
+ 4168 in 200
+ """
+ # We've read to the end of the file, so we should have 8 bytes of
+ # unused data in the decompressor. If we don't, there is a corrupt file.
+ # We use these 8 bytes to calculate the CRC and the recorded file size.
+ # We then check the that the computed CRC and size of the
+ # uncompressed data matches the stored values. Note that the size
+ # stored is the true file size mod 2**32.
+ if not (len(self._gzip_tail) == 8):
+ raise AssertionError("gzip trailer is incorrect length.")
+ crc32, isize = struct.unpack("<LL", self._gzip_tail)
+ # note that isize is unsigned - it can exceed 2GB
+ if crc32 != U32(self.crc):
+ raise IOError, "CRC check failed %d %d" % (crc32, U32(self.crc))
+ elif isize != LOWU32(self.size):
+ raise IOError, "Incorrect length of data produced"
+
+ def _read_gzip_header(self, bytes=None):
+ """Supply bytes if the minimum header size is already read.
+
+ :param bytes: 10 bytes of header data.
+ """
+ """starting cost: 300 in 3998
+ 15998 reads from 3998 calls
+ final cost 168
+ """
+ if bytes is None:
+ bytes = self.fileobj.read(10)
+ magic = bytes[0:2]
+ if magic != '\037\213':
+ raise IOError, 'Not a gzipped file'
+ method = ord(bytes[2:3])
+ if method != 8:
+ raise IOError, 'Unknown compression method'
+ flag = ord(bytes[3:4])
+ # modtime = self.fileobj.read(4) (bytes [4:8])
+ # extraflag = self.fileobj.read(1) (bytes[8:9])
+ # os = self.fileobj.read(1) (bytes[9:10])
+ # self.fileobj.read(6)
+
+ if flag & FEXTRA:
+ # Read & discard the extra field, if present
+ xlen = ord(self.fileobj.read(1))
+ xlen = xlen + 256*ord(self.fileobj.read(1))
+ self.fileobj.read(xlen)
+ if flag & FNAME:
+ # Read and discard a null-terminated string containing the filename
+ while True:
+ s = self.fileobj.read(1)
+ if not s or s=='\000':
+ break
+ if flag & FCOMMENT:
+ # Read and discard a null-terminated string containing a comment
+ while True:
+ s = self.fileobj.read(1)
+ if not s or s=='\000':
+ break
+ if flag & FHCRC:
+ self.fileobj.read(2) # Read & discard the 16-bit header CRC
+
+ def readline(self, size=-1):
+ """Tuned to remove buffer length calls in _unread and...
+
+ also removes multiple len(c) calls, inlines _unread,
+ total savings - lsprof 5800 to 5300
+ phase 2:
+ 4168 calls in 2233
+ 8176 calls to read() in 1684
+ changing the min chunk size to 200 halved all the cache misses
+ leading to a drop to:
+ 4168 calls in 1977
+ 4168 call to read() in 1646
+ - i.e. just reduced the function call overhead. May be worth
+ keeping.
+ """
+ if size < 0: size = sys.maxint
+ bufs = []
+ readsize = min(200, size) # Read from the file in small chunks
+ while True:
+ if size == 0:
+ return "".join(bufs) # Return resulting line
+
+ # c is the chunk
+ c = self.read(readsize)
+ # number of bytes read
+ len_c = len(c)
+ i = c.find('\n')
+ if size is not None:
+ # We set i=size to break out of the loop under two
+ # conditions: 1) there's no newline, and the chunk is
+ # larger than size, or 2) there is a newline, but the
+ # resulting line would be longer than 'size'.
+ if i==-1 and len_c > size: i=size-1
+ elif size <= i: i = size -1
+
+ if i >= 0 or c == '':
+ # if i>= 0 we have a newline or have triggered the above
+ # if size is not None condition.
+ # if c == '' its EOF.
+ bufs.append(c[:i+1]) # Add portion of last chunk
+ # -- inlined self._unread --
+ ## self._unread(c[i+1:], len_c - i) # Push back rest of chunk
+ self.extrabuf = c[i+1:] + self.extrabuf
+ self.extrasize = len_c - i + self.extrasize
+ self.offset -= len_c - i
+ # -- end inlined self._unread --
+ return ''.join(bufs) # Return resulting line
+
+ # Append chunk to list, decrease 'size',
+ bufs.append(c)
+ size = size - len_c
+ readsize = min(size, readsize * 2)
+
+ def readlines(self, sizehint=0):
+ # optimise to avoid all the buffer manipulation
+ # lsprof changed from:
+ # 4168 calls in 5472 with 32000 calls to readline()
+ # to :
+ # 4168 calls in 417.
+ # Negative numbers result in reading all the lines
+
+ # python's gzip routine uses sizehint. This is a more efficient way
+ # than python uses to honor it. But it is even more efficient to
+ # just read the entire thing and use cStringIO to split into lines.
+ # if sizehint <= 0:
+ # sizehint = -1
+ # content = self.read(sizehint)
+ # return bzrlib.osutils.split_lines(content)
+ content = StringIO(self.read(-1))
+ return content.readlines()
+
+ def _unread(self, buf, len_buf=None):
+ """tuned to remove unneeded len calls.
+
+ because this is such an inner routine in readline, and readline is
+ in many inner loops, this has been inlined into readline().
+
+ The len_buf parameter combined with the reduction in len calls dropped
+ the lsprof ms count for this routine on my test data from 800 to 200 -
+ a 75% saving.
+ """
+ if len_buf is None:
+ len_buf = len(buf)
+ self.extrabuf = buf + self.extrabuf
+ self.extrasize = len_buf + self.extrasize
+ self.offset -= len_buf
+
+ def write(self, data):
+ if self.mode != gzip.WRITE:
+ import errno
+ raise IOError(errno.EBADF, "write() on read-only GzipFile object")
+
+ if self.fileobj is None:
+ raise ValueError, "write() on closed GzipFile object"
+ data_len = len(data)
+ if data_len > 0:
+ self.size = self.size + data_len
+ self.crc = zlib.crc32(data, self.crc)
+ self.fileobj.write( self.compress.compress(data) )
+ self.offset += data_len
+
+ def writelines(self, lines):
+ # profiling indicated a significant overhead
+ # calling write for each line.
+ # this batch call is a lot faster :).
+ # (4 seconds to 1 seconds for the sample upgrades I was testing).
+ self.write(''.join(lines))
+
+ if sys.version_info > (2, 7):
+ # As of Python 2.7 the crc32 must be positive when close is called
+ def close(self):
+ if self.fileobj is None:
+ return
+ if self.mode == gzip.WRITE:
+ self.crc &= 0xFFFFFFFFL
+ gzip.GzipFile.close(self)
+
diff --git a/bzrlib/ui/__init__.py b/bzrlib/ui/__init__.py
new file mode 100644
index 0000000..f71216d
--- /dev/null
+++ b/bzrlib/ui/__init__.py
@@ -0,0 +1,556 @@
+# Copyright (C) 2005-2011 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""Abstraction for interacting with the user.
+
+Applications can choose different types of UI, and they deal with displaying
+messages or progress to the user, and with gathering different types of input.
+
+Several levels are supported, and you can also register new factories such as
+for a GUI.
+
+bzrlib.ui.UIFactory
+ Semi-abstract base class
+
+bzrlib.ui.SilentUIFactory
+ Produces no output and cannot take any input; useful for programs using
+ bzrlib in batch mode or for programs such as loggerhead.
+
+bzrlib.ui.CannedInputUIFactory
+ For use in testing; the input values to be returned are provided
+ at construction.
+
+bzrlib.ui.text.TextUIFactory
+ Standard text command-line interface, with stdin, stdout, stderr.
+ May make more or less advanced use of them, eg in drawing progress bars,
+ depending on the detected capabilities of the terminal.
+ GUIs may choose to subclass this so that unimplemented methods fall
+ back to working through the terminal.
+"""
+
+from __future__ import absolute_import
+
+import warnings
+
+from bzrlib.lazy_import import lazy_import
+lazy_import(globals(), """
+from bzrlib import (
+ config,
+ osutils,
+ progress,
+ trace,
+ )
+""")
+
+
+_valid_boolean_strings = dict(yes=True, no=False,
+ y=True, n=False,
+ on=True, off=False,
+ true=True, false=False)
+_valid_boolean_strings['1'] = True
+_valid_boolean_strings['0'] = False
+
+
+def bool_from_string(s, accepted_values=None):
+ """Returns a boolean if the string can be interpreted as such.
+
+ Interpret case insensitive strings as booleans. The default values
+ includes: 'yes', 'no, 'y', 'n', 'true', 'false', '0', '1', 'on',
+ 'off'. Alternative values can be provided with the 'accepted_values'
+ parameter.
+
+ :param s: A string that should be interpreted as a boolean. It should be of
+ type string or unicode.
+
+ :param accepted_values: An optional dict with accepted strings as keys and
+ True/False as values. The strings will be tested against a lowered
+ version of 's'.
+
+ :return: True or False for accepted strings, None otherwise.
+ """
+ if accepted_values is None:
+ accepted_values = _valid_boolean_strings
+ val = None
+ if type(s) in (str, unicode):
+ try:
+ val = accepted_values[s.lower()]
+ except KeyError:
+ pass
+ return val
+
+
+class ConfirmationUserInterfacePolicy(object):
+ """Wrapper for a UIFactory that allows or denies all confirmed actions."""
+
+ def __init__(self, wrapped_ui, default_answer, specific_answers):
+ """Generate a proxy UI that does no confirmations.
+
+ :param wrapped_ui: Underlying UIFactory.
+ :param default_answer: Bool for whether requests for
+ confirmation from the user should be noninteractively accepted or
+ denied.
+ :param specific_answers: Map from confirmation_id to bool answer.
+ """
+ self.wrapped_ui = wrapped_ui
+ self.default_answer = default_answer
+ self.specific_answers = specific_answers
+
+ def __getattr__(self, name):
+ return getattr(self.wrapped_ui, name)
+
+ def __repr__(self):
+ return '%s(%r, %r, %r)' % (
+ self.__class__.__name__,
+ self.wrapped_ui,
+ self.default_answer,
+ self.specific_answers)
+
+ def confirm_action(self, prompt, confirmation_id, prompt_kwargs):
+ if confirmation_id in self.specific_answers:
+ return self.specific_answers[confirmation_id]
+ elif self.default_answer is not None:
+ return self.default_answer
+ else:
+ return self.wrapped_ui.confirm_action(
+ prompt, confirmation_id, prompt_kwargs)
+
+
+class UIFactory(object):
+ """UI abstraction.
+
+ This tells the library how to display things to the user. Through this
+ layer different applications can choose the style of UI.
+
+ UI Factories are also context managers, for some syntactic sugar some users
+ need.
+
+ :ivar suppressed_warnings: Identifiers for user warnings that should
+ no be emitted.
+ """
+
+ _user_warning_templates = dict(
+ cross_format_fetch=("Doing on-the-fly conversion from "
+ "%(from_format)s to %(to_format)s.\n"
+ "This may take some time. Upgrade the repositories to the "
+ "same format for better performance."
+ ),
+ experimental_format_fetch=("Fetching into experimental format "
+ "%(to_format)s.\n"
+ "This format may be unreliable or change in the future "
+ "without an upgrade path.\n"),
+ deprecated_command=(
+ "The command 'bzr %(deprecated_name)s' "
+ "has been deprecated in bzr %(deprecated_in_version)s. "
+ "Please use 'bzr %(recommended_name)s' instead."),
+ deprecated_command_option=(
+ "The option '%(deprecated_name)s' to 'bzr %(command)s' "
+ "has been deprecated in bzr %(deprecated_in_version)s. "
+ "Please use '%(recommended_name)s' instead."),
+ recommend_upgrade=("%(current_format_name)s is deprecated "
+ "and a better format is available.\n"
+ "It is recommended that you upgrade by "
+ "running the command\n"
+ " bzr upgrade %(basedir)s"),
+ locks_steal_dead=(
+ u"Stole dead lock %(lock_url)s %(other_holder_info)s."),
+ )
+
+ def __init__(self):
+ self._task_stack = []
+ self.suppressed_warnings = set()
+ self._quiet = False
+
+ def __enter__(self):
+ """Context manager entry support.
+
+ Override in a concrete factory class if initialisation before use is
+ needed.
+ """
+ return self # This is bound to the 'as' clause in a with statement.
+
+ def __exit__(self, exc_type, exc_val, exc_tb):
+ """Context manager exit support.
+
+ Override in a concrete factory class if more cleanup than a simple
+ self.clear_term() is needed when the UIFactory is finished with.
+ """
+ self.clear_term()
+ return False # propogate exceptions.
+
+ def be_quiet(self, state):
+ """Tell the UI to be more quiet, or not.
+
+ Typically this suppresses progress bars; the application may also look
+ at ui_factory.is_quiet().
+ """
+ self._quiet = state
+
+ def confirm_action(self, prompt, confirmation_id, prompt_kwargs):
+ """Seek user confirmation for an action.
+
+ If the UI is noninteractive, or the user does not want to be asked
+ about this action, True is returned, indicating bzr should just
+ proceed.
+
+ The confirmation id allows the user to configure certain actions to
+ always be confirmed or always denied, and for UIs to specialize the
+ display of particular confirmations.
+
+ :param prompt: Suggested text to display to the user.
+ :param prompt_kwargs: A dictionary of arguments that can be
+ string-interpolated into the prompt.
+ :param confirmation_id: Unique string identifier for the confirmation.
+ """
+ return self.get_boolean(prompt % prompt_kwargs)
+
+ def get_password(self, prompt=u'', **kwargs):
+ """Prompt the user for a password.
+
+ :param prompt: The prompt to present the user (must be unicode)
+ :param kwargs: Arguments which will be expanded into the prompt.
+ This lets front ends display different things if
+ they so choose.
+
+ :return: The password string, return None if the user canceled the
+ request. Note that we do not touch the encoding, users may
+ have whatever they see fit and the password should be
+ transported as is.
+ """
+ raise NotImplementedError(self.get_password)
+
+ def is_quiet(self):
+ return self._quiet
+
+ def make_output_stream(self, encoding=None, encoding_type=None):
+ """Get a stream for sending out bulk text data.
+
+ This is used for commands that produce bulk text, such as log or diff
+ output, as opposed to user interaction. This should work even for
+ non-interactive user interfaces. Typically this goes to a decorated
+ version of stdout, but in a GUI it might be appropriate to send it to a
+ window displaying the text.
+
+ :param encoding: Unicode encoding for output; if not specified
+ uses the configured 'output_encoding' if any; otherwise the
+ terminal encoding.
+ (See get_terminal_encoding.)
+
+ :param encoding_type: How to handle encoding errors:
+ replace/strict/escape/exact. Default is replace.
+ """
+ # XXX: is the caller supposed to close the resulting object?
+ if encoding is None:
+ encoding = config.GlobalStack().get('output_encoding')
+ if encoding is None:
+ encoding = osutils.get_terminal_encoding(trace=True)
+ if encoding_type is None:
+ encoding_type = 'replace'
+ out_stream = self._make_output_stream_explicit(encoding, encoding_type)
+ return out_stream
+
+ def _make_output_stream_explicit(self, encoding, encoding_type):
+ raise NotImplementedError("%s doesn't support make_output_stream"
+ % (self.__class__.__name__))
+
+ def nested_progress_bar(self):
+ """Return a nested progress bar.
+
+ When the bar has been finished with, it should be released by calling
+ bar.finished().
+ """
+ if self._task_stack:
+ t = progress.ProgressTask(self._task_stack[-1], self)
+ else:
+ t = progress.ProgressTask(None, self)
+ self._task_stack.append(t)
+ return t
+
+ def _progress_finished(self, task):
+ """Called by the ProgressTask when it finishes"""
+ if not self._task_stack:
+ warnings.warn("%r finished but nothing is active"
+ % (task,))
+ if task in self._task_stack:
+ self._task_stack.remove(task)
+ else:
+ warnings.warn("%r is not in active stack %r"
+ % (task, self._task_stack))
+ if not self._task_stack:
+ self._progress_all_finished()
+
+ def _progress_all_finished(self):
+ """Called when the top-level progress task finished"""
+ pass
+
+ def _progress_updated(self, task):
+ """Called by the ProgressTask when it changes.
+
+ Should be specialized to draw the progress.
+ """
+ pass
+
+ def clear_term(self):
+ """Prepare the terminal for output.
+
+ This will, for example, clear text progress bars, and leave the
+ cursor at the leftmost position.
+ """
+ pass
+
+ def format_user_warning(self, warning_id, message_args):
+ try:
+ template = self._user_warning_templates[warning_id]
+ except KeyError:
+ fail = "bzr warning: %r, %r" % (warning_id, message_args)
+ warnings.warn("no template for warning: " + fail) # so tests will fail etc
+ return fail
+ try:
+ return template % message_args
+ except ValueError, e:
+ fail = "bzr unprintable warning: %r, %r, %s" % (
+ warning_id, message_args, e)
+ warnings.warn(fail) # so tests will fail etc
+ return fail
+
+ def choose(self, msg, choices, default=None):
+ """Prompt the user for a list of alternatives.
+
+ :param msg: message to be shown as part of the prompt.
+
+ :param choices: list of choices, with the individual choices separated
+ by '\n', e.g.: choose("Save changes?", "&Yes\n&No\n&Cancel"). The
+ letter after the '&' is the shortcut key for that choice. Thus you
+ can type 'c' to select "Cancel". Shorcuts are case insensitive.
+ The shortcut does not need to be the first letter. If a shorcut key
+ is not provided, the first letter for the choice will be used.
+
+ :param default: default choice (index), returned for example when enter
+ is pressed for the console version.
+
+ :return: the index fo the user choice (so '0', '1' or '2' for
+ respectively yes/no/cancel in the previous example).
+ """
+ raise NotImplementedError(self.choose)
+
+ def get_boolean(self, prompt):
+ """Get a boolean question answered from the user.
+
+ :param prompt: a message to prompt the user with. Should be a single
+ line without terminating \\n.
+ :return: True or False for y/yes or n/no.
+ """
+ choice = self.choose(prompt + '?', '&yes\n&no', default=None)
+ return 0 == choice
+
+ def get_integer(self, prompt):
+ """Get an integer from the user.
+
+ :param prompt: a message to prompt the user with. Could be a multi-line
+ prompt but without a terminating \\n.
+
+ :return: A signed integer.
+ """
+ raise NotImplementedError(self.get_integer)
+
+ def make_progress_view(self):
+ """Construct a new ProgressView object for this UI.
+
+ Application code should normally not call this but instead
+ nested_progress_bar().
+ """
+ return NullProgressView()
+
+ def recommend_upgrade(self, current_format_name, basedir):
+ """Recommend the user upgrade a control directory.
+
+ :param current_format_name: Description of the current format
+ :param basedir: Location of the control dir
+ """
+ self.show_user_warning('recommend_upgrade',
+ current_format_name=current_format_name, basedir=basedir)
+
+ def report_transport_activity(self, transport, byte_count, direction):
+ """Called by transports as they do IO.
+
+ This may update a progress bar, spinner, or similar display.
+ By default it does nothing.
+ """
+ pass
+
+ def log_transport_activity(self, display=False):
+ """Write out whatever transport activity has been measured.
+
+ Implementations are allowed to do nothing, but it is useful if they can
+ write a line to the log file.
+
+ :param display: If False, only log to disk, if True also try to display
+ a message to the user.
+ :return: None
+ """
+ # Default implementation just does nothing
+ pass
+
+ def show_user_warning(self, warning_id, **message_args):
+ """Show a warning to the user.
+
+ This is specifically for things that are under the user's control (eg
+ outdated formats), not for internal program warnings like deprecated
+ APIs.
+
+ This can be overridden by UIFactory subclasses to show it in some
+ appropriate way; the default UIFactory is noninteractive and does
+ nothing. format_user_warning maps it to a string, though other
+ presentations can be used for particular UIs.
+
+ :param warning_id: An identifier like 'cross_format_fetch' used to
+ check if the message is suppressed and to look up the string.
+ :param message_args: Arguments to be interpolated into the message.
+ """
+ pass
+
+ def show_error(self, msg):
+ """Show an error message (not an exception) to the user.
+
+ The message should not have an error prefix or trailing newline. That
+ will be added by the factory if appropriate.
+ """
+ raise NotImplementedError(self.show_error)
+
+ def show_message(self, msg):
+ """Show a message to the user."""
+ raise NotImplementedError(self.show_message)
+
+ def show_warning(self, msg):
+ """Show a warning to the user."""
+ raise NotImplementedError(self.show_warning)
+
+
+class NoninteractiveUIFactory(UIFactory):
+ """Base class for UIs with no user."""
+
+ def confirm_action(self, prompt, confirmation_id, prompt_kwargs):
+ return True
+
+ def __repr__(self):
+ return '%s()' % (self.__class__.__name__, )
+
+
+class SilentUIFactory(NoninteractiveUIFactory):
+ """A UI Factory which never prints anything.
+
+ This is the default UI, if another one is never registered by a program
+ using bzrlib, and it's also active for example inside 'bzr serve'.
+
+ Methods that try to read from the user raise an error; methods that do
+ output do nothing.
+ """
+
+ def __init__(self):
+ UIFactory.__init__(self)
+
+ def note(self, msg):
+ pass
+
+ def get_username(self, prompt, **kwargs):
+ return None
+
+ def _make_output_stream_explicit(self, encoding, encoding_type):
+ return NullOutputStream(encoding)
+
+ def show_error(self, msg):
+ pass
+
+ def show_message(self, msg):
+ pass
+
+ def show_warning(self, msg):
+ pass
+
+
+class CannedInputUIFactory(SilentUIFactory):
+ """A silent UI that return canned input."""
+
+ def __init__(self, responses):
+ self.responses = responses
+
+ def __repr__(self):
+ return "%s(%r)" % (self.__class__.__name__, self.responses)
+
+ def confirm_action(self, prompt, confirmation_id, args):
+ return self.get_boolean(prompt % args)
+
+ def get_boolean(self, prompt):
+ return self.responses.pop(0)
+
+ def get_integer(self, prompt):
+ return self.responses.pop(0)
+
+ def get_password(self, prompt=u'', **kwargs):
+ return self.responses.pop(0)
+
+ def get_username(self, prompt, **kwargs):
+ return self.responses.pop(0)
+
+ def assert_all_input_consumed(self):
+ if self.responses:
+ raise AssertionError("expected all input in %r to be consumed"
+ % (self,))
+
+
+ui_factory = SilentUIFactory()
+# IMPORTANT: never import this symbol directly. ONLY ever access it as
+# ui.ui_factory, so that you refer to the current value.
+
+
+def make_ui_for_terminal(stdin, stdout, stderr):
+ """Construct and return a suitable UIFactory for a text mode program.
+ """
+ # this is now always TextUIFactory, which in turn decides whether it
+ # should display progress bars etc
+ from bzrlib.ui.text import TextUIFactory
+ return TextUIFactory(stdin, stdout, stderr)
+
+
+class NullProgressView(object):
+ """Soak up and ignore progress information."""
+
+ def clear(self):
+ pass
+
+ def show_progress(self, task):
+ pass
+
+ def show_transport_activity(self, transport, direction, byte_count):
+ pass
+
+ def log_transport_activity(self, display=False):
+ pass
+
+
+class NullOutputStream(object):
+ """Acts like a file, but discard all output."""
+
+ def __init__(self, encoding):
+ self.encoding = encoding
+
+ def write(self, data):
+ pass
+
+ def writelines(self, data):
+ pass
+
+ def close(self):
+ pass
diff --git a/bzrlib/ui/text.py b/bzrlib/ui/text.py
new file mode 100644
index 0000000..ce91c5c
--- /dev/null
+++ b/bzrlib/ui/text.py
@@ -0,0 +1,673 @@
+# Copyright (C) 2005-2011 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+
+"""Text UI, write output to the console.
+"""
+
+from __future__ import absolute_import
+
+import os
+import sys
+import time
+
+from bzrlib.lazy_import import lazy_import
+lazy_import(globals(), """
+import codecs
+import getpass
+import warnings
+
+from bzrlib import (
+ debug,
+ progress,
+ osutils,
+ trace,
+ )
+
+""")
+
+from bzrlib.ui import (
+ UIFactory,
+ NullProgressView,
+ )
+
+
+class _ChooseUI(object):
+
+ """ Helper class for choose implementation.
+ """
+
+ def __init__(self, ui, msg, choices, default):
+ self.ui = ui
+ self._setup_mode()
+ self._build_alternatives(msg, choices, default)
+
+ def _setup_mode(self):
+ """Setup input mode (line-based, char-based) and echo-back.
+
+ Line-based input is used if the BZR_TEXTUI_INPUT environment
+ variable is set to 'line-based', or if there is no controlling
+ terminal.
+ """
+ if os.environ.get('BZR_TEXTUI_INPUT') != 'line-based' and \
+ self.ui.stdin == sys.stdin and self.ui.stdin.isatty():
+ self.line_based = False
+ self.echo_back = True
+ else:
+ self.line_based = True
+ self.echo_back = not self.ui.stdin.isatty()
+
+ def _build_alternatives(self, msg, choices, default):
+ """Parse choices string.
+
+ Setup final prompt and the lists of choices and associated
+ shortcuts.
+ """
+ index = 0
+ help_list = []
+ self.alternatives = {}
+ choices = choices.split('\n')
+ if default is not None and default not in range(0, len(choices)):
+ raise ValueError("invalid default index")
+ for c in choices:
+ name = c.replace('&', '').lower()
+ choice = (name, index)
+ if name in self.alternatives:
+ raise ValueError("duplicated choice: %s" % name)
+ self.alternatives[name] = choice
+ shortcut = c.find('&')
+ if -1 != shortcut and (shortcut + 1) < len(c):
+ help = c[:shortcut]
+ help += '[' + c[shortcut + 1] + ']'
+ help += c[(shortcut + 2):]
+ shortcut = c[shortcut + 1]
+ else:
+ c = c.replace('&', '')
+ shortcut = c[0]
+ help = '[%s]%s' % (shortcut, c[1:])
+ shortcut = shortcut.lower()
+ if shortcut in self.alternatives:
+ raise ValueError("duplicated shortcut: %s" % shortcut)
+ self.alternatives[shortcut] = choice
+ # Add redirections for default.
+ if index == default:
+ self.alternatives[''] = choice
+ self.alternatives['\r'] = choice
+ help_list.append(help)
+ index += 1
+
+ self.prompt = u'%s (%s): ' % (msg, ', '.join(help_list))
+
+ def _getline(self):
+ line = self.ui.stdin.readline()
+ if '' == line:
+ raise EOFError
+ return line.strip()
+
+ def _getchar(self):
+ char = osutils.getchar()
+ if char == chr(3): # INTR
+ raise KeyboardInterrupt
+ if char == chr(4): # EOF (^d, C-d)
+ raise EOFError
+ return char
+
+ def interact(self):
+ """Keep asking the user until a valid choice is made.
+ """
+ if self.line_based:
+ getchoice = self._getline
+ else:
+ getchoice = self._getchar
+ iter = 0
+ while True:
+ iter += 1
+ if 1 == iter or self.line_based:
+ self.ui.prompt(self.prompt)
+ try:
+ choice = getchoice()
+ except EOFError:
+ self.ui.stderr.write('\n')
+ return None
+ except KeyboardInterrupt:
+ self.ui.stderr.write('\n')
+ raise KeyboardInterrupt
+ choice = choice.lower()
+ if choice not in self.alternatives:
+ # Not a valid choice, keep on asking.
+ continue
+ name, index = self.alternatives[choice]
+ if self.echo_back:
+ self.ui.stderr.write(name + '\n')
+ return index
+
+
+class TextUIFactory(UIFactory):
+ """A UI factory for Text user interefaces."""
+
+ def __init__(self,
+ stdin=None,
+ stdout=None,
+ stderr=None):
+ """Create a TextUIFactory.
+ """
+ super(TextUIFactory, self).__init__()
+ # TODO: there's no good reason not to pass all three streams, maybe we
+ # should deprecate the default values...
+ self.stdin = stdin
+ self.stdout = stdout
+ self.stderr = stderr
+ # paints progress, network activity, etc
+ self._progress_view = self.make_progress_view()
+
+ def choose(self, msg, choices, default=None):
+ """Prompt the user for a list of alternatives.
+
+ Support both line-based and char-based editing.
+
+ In line-based mode, both the shortcut and full choice name are valid
+ answers, e.g. for choose('prompt', '&yes\n&no'): 'y', ' Y ', ' yes',
+ 'YES ' are all valid input lines for choosing 'yes'.
+
+ An empty line, when in line-based mode, or pressing enter in char-based
+ mode will select the default choice (if any).
+
+ Choice is echoed back if:
+ - input is char-based; which means a controlling terminal is available,
+ and osutils.getchar is used
+ - input is line-based, and no controlling terminal is available
+ """
+
+ choose_ui = _ChooseUI(self, msg, choices, default)
+ return choose_ui.interact()
+
+ def be_quiet(self, state):
+ if state and not self._quiet:
+ self.clear_term()
+ UIFactory.be_quiet(self, state)
+ self._progress_view = self.make_progress_view()
+
+ def clear_term(self):
+ """Prepare the terminal for output.
+
+ This will, clear any progress bars, and leave the cursor at the
+ leftmost position."""
+ # XXX: If this is preparing to write to stdout, but that's for example
+ # directed into a file rather than to the terminal, and the progress
+ # bar _is_ going to the terminal, we shouldn't need
+ # to clear it. We might need to separately check for the case of
+ self._progress_view.clear()
+
+ def get_integer(self, prompt):
+ while True:
+ self.prompt(prompt)
+ line = self.stdin.readline()
+ try:
+ return int(line)
+ except ValueError:
+ pass
+
+ def get_non_echoed_password(self):
+ isatty = getattr(self.stdin, 'isatty', None)
+ if isatty is not None and isatty():
+ # getpass() ensure the password is not echoed and other
+ # cross-platform niceties
+ password = getpass.getpass('')
+ else:
+ # echo doesn't make sense without a terminal
+ password = self.stdin.readline()
+ if not password:
+ password = None
+ elif password[-1] == '\n':
+ password = password[:-1]
+ return password
+
+ def get_password(self, prompt=u'', **kwargs):
+ """Prompt the user for a password.
+
+ :param prompt: The prompt to present the user
+ :param kwargs: Arguments which will be expanded into the prompt.
+ This lets front ends display different things if
+ they so choose.
+ :return: The password string, return None if the user
+ canceled the request.
+ """
+ prompt += ': '
+ self.prompt(prompt, **kwargs)
+ # There's currently no way to say 'i decline to enter a password'
+ # as opposed to 'my password is empty' -- does it matter?
+ return self.get_non_echoed_password()
+
+ def get_username(self, prompt, **kwargs):
+ """Prompt the user for a username.
+
+ :param prompt: The prompt to present the user
+ :param kwargs: Arguments which will be expanded into the prompt.
+ This lets front ends display different things if
+ they so choose.
+ :return: The username string, return None if the user
+ canceled the request.
+ """
+ prompt += ': '
+ self.prompt(prompt, **kwargs)
+ username = self.stdin.readline()
+ if not username:
+ username = None
+ elif username[-1] == '\n':
+ username = username[:-1]
+ return username
+
+ def make_progress_view(self):
+ """Construct and return a new ProgressView subclass for this UI.
+ """
+ # with --quiet, never any progress view
+ # <https://bugs.launchpad.net/bzr/+bug/320035>. Otherwise if the
+ # user specifically requests either text or no progress bars, always
+ # do that. otherwise, guess based on $TERM and tty presence.
+ if self.is_quiet():
+ return NullProgressView()
+ elif os.environ.get('BZR_PROGRESS_BAR') == 'text':
+ return TextProgressView(self.stderr)
+ elif os.environ.get('BZR_PROGRESS_BAR') == 'none':
+ return NullProgressView()
+ elif progress._supports_progress(self.stderr):
+ return TextProgressView(self.stderr)
+ else:
+ return NullProgressView()
+
+ def _make_output_stream_explicit(self, encoding, encoding_type):
+ if encoding_type == 'exact':
+ # force sys.stdout to be binary stream on win32;
+ # NB: this leaves the file set in that mode; may cause problems if
+ # one process tries to do binary and then text output
+ if sys.platform == 'win32':
+ fileno = getattr(self.stdout, 'fileno', None)
+ if fileno:
+ import msvcrt
+ msvcrt.setmode(fileno(), os.O_BINARY)
+ return TextUIOutputStream(self, self.stdout)
+ else:
+ encoded_stdout = codecs.getwriter(encoding)(self.stdout,
+ errors=encoding_type)
+ # For whatever reason codecs.getwriter() does not advertise its encoding
+ # it just returns the encoding of the wrapped file, which is completely
+ # bogus. So set the attribute, so we can find the correct encoding later.
+ encoded_stdout.encoding = encoding
+ return TextUIOutputStream(self, encoded_stdout)
+
+ def note(self, msg):
+ """Write an already-formatted message, clearing the progress bar if necessary."""
+ self.clear_term()
+ self.stdout.write(msg + '\n')
+
+ def prompt(self, prompt, **kwargs):
+ """Emit prompt on the CLI.
+
+ :param kwargs: Dictionary of arguments to insert into the prompt,
+ to allow UIs to reformat the prompt.
+ """
+ if type(prompt) != unicode:
+ raise ValueError("prompt %r not a unicode string" % prompt)
+ if kwargs:
+ # See <https://launchpad.net/bugs/365891>
+ prompt = prompt % kwargs
+ prompt = prompt.encode(osutils.get_terminal_encoding(), 'replace')
+ self.clear_term()
+ self.stdout.flush()
+ self.stderr.write(prompt)
+
+ def report_transport_activity(self, transport, byte_count, direction):
+ """Called by transports as they do IO.
+
+ This may update a progress bar, spinner, or similar display.
+ By default it does nothing.
+ """
+ self._progress_view.show_transport_activity(transport,
+ direction, byte_count)
+
+ def log_transport_activity(self, display=False):
+ """See UIFactory.log_transport_activity()"""
+ log = getattr(self._progress_view, 'log_transport_activity', None)
+ if log is not None:
+ log(display=display)
+
+ def show_error(self, msg):
+ self.clear_term()
+ self.stderr.write("bzr: error: %s\n" % msg)
+
+ def show_message(self, msg):
+ self.note(msg)
+
+ def show_warning(self, msg):
+ self.clear_term()
+ if isinstance(msg, unicode):
+ te = osutils.get_terminal_encoding()
+ msg = msg.encode(te, 'replace')
+ self.stderr.write("bzr: warning: %s\n" % msg)
+
+ def _progress_updated(self, task):
+ """A task has been updated and wants to be displayed.
+ """
+ if not self._task_stack:
+ warnings.warn("%r updated but no tasks are active" %
+ (task,))
+ elif task != self._task_stack[-1]:
+ # We used to check it was the top task, but it's hard to always
+ # get this right and it's not necessarily useful: any actual
+ # problems will be evident in use
+ #warnings.warn("%r is not the top progress task %r" %
+ # (task, self._task_stack[-1]))
+ pass
+ self._progress_view.show_progress(task)
+
+ def _progress_all_finished(self):
+ self._progress_view.clear()
+
+ def show_user_warning(self, warning_id, **message_args):
+ """Show a text message to the user.
+
+ Explicitly not for warnings about bzr apis, deprecations or internals.
+ """
+ # eventually trace.warning should migrate here, to avoid logging and
+ # be easier to test; that has a lot of test fallout so for now just
+ # new code can call this
+ if warning_id not in self.suppressed_warnings:
+ self.stderr.write(self.format_user_warning(warning_id, message_args) +
+ '\n')
+
+
+class TextProgressView(object):
+ """Display of progress bar and other information on a tty.
+
+ This shows one line of text, including possibly a network indicator, spinner,
+ progress bar, message, etc.
+
+ One instance of this is created and held by the UI, and fed updates when a
+ task wants to be painted.
+
+ Transports feed data to this through the ui_factory object.
+
+ The Progress views can comprise a tree with _parent_task pointers, but
+ this only prints the stack from the nominated current task up to the root.
+ """
+
+ def __init__(self, term_file, encoding=None, errors="replace"):
+ self._term_file = term_file
+ if encoding is None:
+ self._encoding = getattr(term_file, "encoding", None) or "ascii"
+ else:
+ self._encoding = encoding
+ self._encoding_errors = errors
+ # true when there's output on the screen we may need to clear
+ self._have_output = False
+ self._last_transport_msg = ''
+ self._spin_pos = 0
+ # time we last repainted the screen
+ self._last_repaint = 0
+ # time we last got information about transport activity
+ self._transport_update_time = 0
+ self._last_task = None
+ self._total_byte_count = 0
+ self._bytes_since_update = 0
+ self._bytes_by_direction = {'unknown': 0, 'read': 0, 'write': 0}
+ self._first_byte_time = None
+ self._fraction = 0
+ # force the progress bar to be off, as at the moment it doesn't
+ # correspond reliably to overall command progress
+ self.enable_bar = False
+
+ def _avail_width(self):
+ # we need one extra space for terminals that wrap on last char
+ w = osutils.terminal_width()
+ if w is None:
+ return None
+ else:
+ return w - 1
+
+ def _show_line(self, u):
+ s = u.encode(self._encoding, self._encoding_errors)
+ width = self._avail_width()
+ if width is not None:
+ # GZ 2012-03-28: Counting bytes is wrong for calculating width of
+ # text but better than counting codepoints.
+ s = '%-*.*s' % (width, width, s)
+ self._term_file.write('\r' + s + '\r')
+
+ def clear(self):
+ if self._have_output:
+ self._show_line('')
+ self._have_output = False
+
+ def _render_bar(self):
+ # return a string for the progress bar itself
+ if self.enable_bar and (
+ (self._last_task is None) or self._last_task.show_bar):
+ # If there's no task object, we show space for the bar anyhow.
+ # That's because most invocations of bzr will end showing progress
+ # at some point, though perhaps only after doing some initial IO.
+ # It looks better to draw the progress bar initially rather than
+ # to have what looks like an incomplete progress bar.
+ spin_str = r'/-\|'[self._spin_pos % 4]
+ self._spin_pos += 1
+ cols = 20
+ if self._last_task is None:
+ completion_fraction = 0
+ self._fraction = 0
+ else:
+ completion_fraction = \
+ self._last_task._overall_completion_fraction() or 0
+ if (completion_fraction < self._fraction and 'progress' in
+ debug.debug_flags):
+ import pdb;pdb.set_trace()
+ self._fraction = completion_fraction
+ markers = int(round(float(cols) * completion_fraction)) - 1
+ bar_str = '[' + ('#' * markers + spin_str).ljust(cols) + '] '
+ return bar_str
+ elif (self._last_task is None) or self._last_task.show_spinner:
+ # The last task wanted just a spinner, no bar
+ spin_str = r'/-\|'[self._spin_pos % 4]
+ self._spin_pos += 1
+ return spin_str + ' '
+ else:
+ return ''
+
+ def _format_task(self, task):
+ """Format task-specific parts of progress bar.
+
+ :returns: (text_part, counter_part) both unicode strings.
+ """
+ if not task.show_count:
+ s = ''
+ elif task.current_cnt is not None and task.total_cnt is not None:
+ s = ' %d/%d' % (task.current_cnt, task.total_cnt)
+ elif task.current_cnt is not None:
+ s = ' %d' % (task.current_cnt)
+ else:
+ s = ''
+ # compose all the parent messages
+ t = task
+ m = task.msg
+ while t._parent_task:
+ t = t._parent_task
+ if t.msg:
+ m = t.msg + ':' + m
+ return m, s
+
+ def _render_line(self):
+ bar_string = self._render_bar()
+ if self._last_task:
+ task_part, counter_part = self._format_task(self._last_task)
+ else:
+ task_part = counter_part = ''
+ if self._last_task and not self._last_task.show_transport_activity:
+ trans = ''
+ else:
+ trans = self._last_transport_msg
+ # the bar separates the transport activity from the message, so even
+ # if there's no bar or spinner, we must show something if both those
+ # fields are present
+ if (task_part or trans) and not bar_string:
+ bar_string = '| '
+ # preferentially truncate the task message if we don't have enough
+ # space
+ avail_width = self._avail_width()
+ if avail_width is not None:
+ # if terminal avail_width is unknown, don't truncate
+ current_len = len(bar_string) + len(trans) + len(task_part) + len(counter_part)
+ gap = current_len - avail_width
+ if gap > 0:
+ task_part = task_part[:-gap-2] + '..'
+ s = trans + bar_string + task_part + counter_part
+ if avail_width is not None:
+ if len(s) < avail_width:
+ s = s.ljust(avail_width)
+ elif len(s) > avail_width:
+ s = s[:avail_width]
+ return s
+
+ def _repaint(self):
+ s = self._render_line()
+ self._show_line(s)
+ self._have_output = True
+
+ def show_progress(self, task):
+ """Called by the task object when it has changed.
+
+ :param task: The top task object; its parents are also included
+ by following links.
+ """
+ must_update = task is not self._last_task
+ self._last_task = task
+ now = time.time()
+ if (not must_update) and (now < self._last_repaint + task.update_latency):
+ return
+ if now > self._transport_update_time + 10:
+ # no recent activity; expire it
+ self._last_transport_msg = ''
+ self._last_repaint = now
+ self._repaint()
+
+ def show_transport_activity(self, transport, direction, byte_count):
+ """Called by transports via the ui_factory, as they do IO.
+
+ This may update a progress bar, spinner, or similar display.
+ By default it does nothing.
+ """
+ # XXX: there should be a transport activity model, and that too should
+ # be seen by the progress view, rather than being poked in here.
+ self._total_byte_count += byte_count
+ self._bytes_since_update += byte_count
+ if self._first_byte_time is None:
+ # Note that this isn't great, as technically it should be the time
+ # when the bytes started transferring, not when they completed.
+ # However, we usually start with a small request anyway.
+ self._first_byte_time = time.time()
+ if direction in self._bytes_by_direction:
+ self._bytes_by_direction[direction] += byte_count
+ else:
+ self._bytes_by_direction['unknown'] += byte_count
+ if 'no_activity' in debug.debug_flags:
+ # Can be used as a workaround if
+ # <https://launchpad.net/bugs/321935> reappears and transport
+ # activity is cluttering other output. However, thanks to
+ # TextUIOutputStream this shouldn't be a problem any more.
+ return
+ now = time.time()
+ if self._total_byte_count < 2000:
+ # a little resistance at first, so it doesn't stay stuck at 0
+ # while connecting...
+ return
+ if self._transport_update_time is None:
+ self._transport_update_time = now
+ elif now >= (self._transport_update_time + 0.5):
+ # guard against clock stepping backwards, and don't update too
+ # often
+ rate = (self._bytes_since_update
+ / (now - self._transport_update_time))
+ # using base-10 units (see HACKING.txt).
+ msg = ("%6dkB %5dkB/s " %
+ (self._total_byte_count / 1000, int(rate) / 1000,))
+ self._transport_update_time = now
+ self._last_repaint = now
+ self._bytes_since_update = 0
+ self._last_transport_msg = msg
+ self._repaint()
+
+ def _format_bytes_by_direction(self):
+ if self._first_byte_time is None:
+ bps = 0.0
+ else:
+ transfer_time = time.time() - self._first_byte_time
+ if transfer_time < 0.001:
+ transfer_time = 0.001
+ bps = self._total_byte_count / transfer_time
+
+ # using base-10 units (see HACKING.txt).
+ msg = ('Transferred: %.0fkB'
+ ' (%.1fkB/s r:%.0fkB w:%.0fkB'
+ % (self._total_byte_count / 1000.,
+ bps / 1000.,
+ self._bytes_by_direction['read'] / 1000.,
+ self._bytes_by_direction['write'] / 1000.,
+ ))
+ if self._bytes_by_direction['unknown'] > 0:
+ msg += ' u:%.0fkB)' % (
+ self._bytes_by_direction['unknown'] / 1000.
+ )
+ else:
+ msg += ')'
+ return msg
+
+ def log_transport_activity(self, display=False):
+ msg = self._format_bytes_by_direction()
+ trace.mutter(msg)
+ if display and self._total_byte_count > 0:
+ self.clear()
+ self._term_file.write(msg + '\n')
+
+
+class TextUIOutputStream(object):
+ """Decorates an output stream so that the terminal is cleared before writing.
+
+ This is supposed to ensure that the progress bar does not conflict with bulk
+ text output.
+ """
+ # XXX: this does not handle the case of writing part of a line, then doing
+ # progress bar output: the progress bar will probably write over it.
+ # one option is just to buffer that text until we have a full line;
+ # another is to save and restore it
+
+ # XXX: might need to wrap more methods
+
+ def __init__(self, ui_factory, wrapped_stream):
+ self.ui_factory = ui_factory
+ self.wrapped_stream = wrapped_stream
+ # this does no transcoding, but it must expose the underlying encoding
+ # because some callers need to know what can be written - see for
+ # example unescape_for_display.
+ self.encoding = getattr(wrapped_stream, 'encoding', None)
+
+ def flush(self):
+ self.ui_factory.clear_term()
+ self.wrapped_stream.flush()
+
+ def write(self, to_write):
+ self.ui_factory.clear_term()
+ self.wrapped_stream.write(to_write)
+
+ def writelines(self, lines):
+ self.ui_factory.clear_term()
+ self.wrapped_stream.writelines(lines)
diff --git a/bzrlib/uncommit.py b/bzrlib/uncommit.py
new file mode 100644
index 0000000..75e3f9f
--- /dev/null
+++ b/bzrlib/uncommit.py
@@ -0,0 +1,144 @@
+# Copyright (C) 2006 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""Remove the last revision from the history of the current branch."""
+
+from __future__ import absolute_import
+
+# TODO: make the guts of this methods on tree, branch.
+
+from bzrlib import (
+ errors,
+ revision as _mod_revision,
+ )
+from bzrlib.branch import Branch
+from bzrlib.errors import BoundBranchOutOfDate
+
+
+def remove_tags(branch, graph, old_tip, parents):
+ """Remove tags on revisions between old_tip and new_tip.
+
+ :param branch: Branch to remove tags from
+ :param graph: Graph object for branch repository
+ :param old_tip: Old branch tip
+ :param parents: New parents
+ :return: Names of the removed tags
+ """
+ reverse_tags = branch.tags.get_reverse_tag_dict()
+ ancestors = graph.find_unique_ancestors(old_tip, parents)
+ removed_tags = []
+ for revid, tags in reverse_tags.iteritems():
+ if not revid in ancestors:
+ continue
+ for tag in tags:
+ branch.tags.delete_tag(tag)
+ removed_tags.append(tag)
+ return removed_tags
+
+
+def uncommit(branch, dry_run=False, verbose=False, revno=None, tree=None,
+ local=False, keep_tags=False):
+ """Remove the last revision from the supplied branch.
+
+ :param dry_run: Don't actually change anything
+ :param verbose: Print each step as you do it
+ :param revno: Remove back to this revision
+ :param local: If this branch is bound, only remove the revisions from the
+ local branch. If this branch is not bound, it is an error to pass
+ local=True.
+ :param keep_tags: Whether to keep tags pointing at the removed revisions
+ around.
+ """
+ unlockable = []
+ try:
+ if tree is not None:
+ tree.lock_write()
+ unlockable.append(tree)
+
+ branch.lock_write()
+ unlockable.append(branch)
+
+ pending_merges = []
+ if tree is not None:
+ pending_merges = tree.get_parent_ids()[1:]
+
+ if local:
+ master = None
+ if branch.get_bound_location() is None:
+ raise errors.LocalRequiresBoundBranch()
+ else:
+ master = branch.get_master_branch()
+ if master is not None:
+ master.lock_write()
+ unlockable.append(master)
+ old_revno, old_tip = branch.last_revision_info()
+ if master is not None and old_tip != master.last_revision():
+ raise BoundBranchOutOfDate(branch, master)
+ if revno is None:
+ revno = old_revno
+ new_revno = revno - 1
+
+ cur_revno = old_revno
+ new_revision_id = old_tip
+ graph = branch.repository.get_graph()
+ for rev_id in graph.iter_lefthand_ancestry(old_tip):
+ if cur_revno == new_revno:
+ new_revision_id = rev_id
+ break
+ if verbose:
+ print 'Removing revno %d: %s' % (cur_revno, rev_id)
+ cur_revno -= 1
+ parents = graph.get_parent_map([rev_id]).get(rev_id, None)
+ if not parents:
+ continue
+ # When we finish popping off the pending merges, we want
+ # them to stay in the order that they used to be.
+ # but we pop from the end, so reverse the order, and
+ # then get the order right at the end
+ pending_merges.extend(reversed(parents[1:]))
+ else:
+ # We ran off the end of revisions, which means we should be trying
+ # to get to NULL_REVISION
+ new_revision_id = _mod_revision.NULL_REVISION
+
+ if not dry_run:
+ if master is not None:
+ master.set_last_revision_info(new_revno, new_revision_id)
+ branch.set_last_revision_info(new_revno, new_revision_id)
+ if master is None:
+ hook_local = None
+ hook_master = branch
+ else:
+ hook_local = branch
+ hook_master = master
+ for hook in Branch.hooks['post_uncommit']:
+ hook_new_tip = new_revision_id
+ if hook_new_tip == _mod_revision.NULL_REVISION:
+ hook_new_tip = None
+ hook(hook_local, hook_master, old_revno, old_tip, new_revno,
+ hook_new_tip)
+ if not _mod_revision.is_null(new_revision_id):
+ parents = [new_revision_id]
+ else:
+ parents = []
+ if tree is not None:
+ parents.extend(reversed(pending_merges))
+ tree.set_parent_ids(parents)
+ if branch.supports_tags() and not keep_tags:
+ remove_tags(branch, graph, old_tip, parents)
+ finally:
+ for item in reversed(unlockable):
+ item.unlock()
diff --git a/bzrlib/upgrade.py b/bzrlib/upgrade.py
new file mode 100644
index 0000000..6bd0732
--- /dev/null
+++ b/bzrlib/upgrade.py
@@ -0,0 +1,311 @@
+# Copyright (C) 2005, 2006, 2008-2011 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""bzr upgrade logic."""
+
+from __future__ import absolute_import
+
+from bzrlib import (
+ errors,
+ trace,
+ ui,
+ urlutils,
+ )
+from bzrlib.controldir import (
+ ControlDir,
+ format_registry,
+ )
+from bzrlib.i18n import gettext
+from bzrlib.remote import RemoteBzrDir
+
+
+class Convert(object):
+
+ def __init__(self, url=None, format=None, control_dir=None):
+ """Convert a Bazaar control directory to a given format.
+
+ Either the url or control_dir parameter must be given.
+
+ :param url: the URL of the control directory or None if the
+ control_dir is explicitly given instead
+ :param format: the format to convert to or None for the default
+ :param control_dir: the control directory or None if it is
+ specified via the URL parameter instead
+ """
+ self.format = format
+ # XXX: Change to cleanup
+ warning_id = 'cross_format_fetch'
+ saved_warning = warning_id in ui.ui_factory.suppressed_warnings
+ if url is None and control_dir is None:
+ raise AssertionError(
+ "either the url or control_dir parameter must be set.")
+ if control_dir is not None:
+ self.bzrdir = control_dir
+ else:
+ self.bzrdir = ControlDir.open_unsupported(url)
+ if isinstance(self.bzrdir, RemoteBzrDir):
+ self.bzrdir._ensure_real()
+ self.bzrdir = self.bzrdir._real_bzrdir
+ if self.bzrdir.root_transport.is_readonly():
+ raise errors.UpgradeReadonly
+ self.transport = self.bzrdir.root_transport
+ ui.ui_factory.suppressed_warnings.add(warning_id)
+ try:
+ self.convert()
+ finally:
+ if not saved_warning:
+ ui.ui_factory.suppressed_warnings.remove(warning_id)
+
+ def convert(self):
+ try:
+ branch = self.bzrdir.open_branch()
+ if branch.user_url != self.bzrdir.user_url:
+ ui.ui_factory.note(gettext(
+ 'This is a checkout. The branch (%s) needs to be upgraded'
+ ' separately.') % (urlutils.unescape_for_display(
+ branch.user_url, 'utf-8')))
+ del branch
+ except (errors.NotBranchError, errors.IncompatibleRepositories):
+ # might not be a format we can open without upgrading; see e.g.
+ # https://bugs.launchpad.net/bzr/+bug/253891
+ pass
+ if self.format is None:
+ try:
+ rich_root = self.bzrdir.find_repository()._format.rich_root_data
+ except errors.NoRepositoryPresent:
+ rich_root = False # assume no rich roots
+ if rich_root:
+ format_name = "default-rich-root"
+ else:
+ format_name = "default"
+ format = format_registry.make_bzrdir(format_name)
+ else:
+ format = self.format
+ if not self.bzrdir.needs_format_conversion(format):
+ raise errors.UpToDateFormat(self.bzrdir._format)
+ if not self.bzrdir.can_convert_format():
+ raise errors.BzrError(gettext("cannot upgrade from bzrdir format %s") %
+ self.bzrdir._format)
+ self.bzrdir.check_conversion_target(format)
+ ui.ui_factory.note(gettext('starting upgrade of %s') %
+ urlutils.unescape_for_display(self.transport.base, 'utf-8'))
+
+ self.backup_oldpath, self.backup_newpath = self.bzrdir.backup_bzrdir()
+ while self.bzrdir.needs_format_conversion(format):
+ converter = self.bzrdir._format.get_converter(format)
+ self.bzrdir = converter.convert(self.bzrdir, None)
+ ui.ui_factory.note(gettext('finished'))
+
+ def clean_up(self):
+ """Clean-up after a conversion.
+
+ This removes the backup.bzr directory.
+ """
+ transport = self.transport
+ backup_relpath = transport.relpath(self.backup_newpath)
+ child_pb = ui.ui_factory.nested_progress_bar()
+ child_pb.update(gettext('Deleting backup.bzr'))
+ try:
+ transport.delete_tree(backup_relpath)
+ finally:
+ child_pb.finished()
+
+
+def upgrade(url, format=None, clean_up=False, dry_run=False):
+ """Upgrade locations to format.
+
+ This routine wraps the smart_upgrade() routine with a nicer UI.
+ In particular, it ensures all URLs can be opened before starting
+ and reports a summary at the end if more than one upgrade was attempted.
+ This routine is useful for command line tools. Other bzrlib clients
+ probably ought to use smart_upgrade() instead.
+
+ :param url: a URL of the locations to upgrade.
+ :param format: the format to convert to or None for the best default
+ :param clean-up: if True, the backup.bzr directory is removed if the
+ upgrade succeeded for a given repo/branch/tree
+ :param dry_run: show what would happen but don't actually do any upgrades
+ :return: the list of exceptions encountered
+ """
+ control_dirs = [ControlDir.open_unsupported(url)]
+ attempted, succeeded, exceptions = smart_upgrade(control_dirs,
+ format, clean_up=clean_up, dry_run=dry_run)
+ if len(attempted) > 1:
+ attempted_count = len(attempted)
+ succeeded_count = len(succeeded)
+ failed_count = attempted_count - succeeded_count
+ ui.ui_factory.note(
+ gettext('\nSUMMARY: {0} upgrades attempted, {1} succeeded,'\
+ ' {2} failed').format(
+ attempted_count, succeeded_count, failed_count))
+ return exceptions
+
+
+def smart_upgrade(control_dirs, format, clean_up=False,
+ dry_run=False):
+ """Convert control directories to a new format intelligently.
+
+ If the control directory is a shared repository, dependent branches
+ are also converted provided the repository converted successfully.
+ If the conversion of a branch fails, remaining branches are still tried.
+
+ :param control_dirs: the BzrDirs to upgrade
+ :param format: the format to convert to or None for the best default
+ :param clean_up: if True, the backup.bzr directory is removed if the
+ upgrade succeeded for a given repo/branch/tree
+ :param dry_run: show what would happen but don't actually do any upgrades
+ :return: attempted-control-dirs, succeeded-control-dirs, exceptions
+ """
+ all_attempted = []
+ all_succeeded = []
+ all_exceptions = []
+ for control_dir in control_dirs:
+ attempted, succeeded, exceptions = _smart_upgrade_one(control_dir,
+ format, clean_up=clean_up, dry_run=dry_run)
+ all_attempted.extend(attempted)
+ all_succeeded.extend(succeeded)
+ all_exceptions.extend(exceptions)
+ return all_attempted, all_succeeded, all_exceptions
+
+
+def _smart_upgrade_one(control_dir, format, clean_up=False,
+ dry_run=False):
+ """Convert a control directory to a new format intelligently.
+
+ See smart_upgrade for parameter details.
+ """
+ # If the URL is a shared repository, find the dependent branches
+ dependents = None
+ try:
+ repo = control_dir.open_repository()
+ except errors.NoRepositoryPresent:
+ # A branch or checkout using a shared repository higher up
+ pass
+ else:
+ # The URL is a repository. If it successfully upgrades,
+ # then upgrade the dependent branches as well.
+ if repo.is_shared():
+ dependents = repo.find_branches(using=True)
+
+ # Do the conversions
+ attempted = [control_dir]
+ succeeded, exceptions = _convert_items([control_dir], format, clean_up,
+ dry_run)
+ if succeeded and dependents:
+ ui.ui_factory.note(gettext('Found %d dependent branches - upgrading ...')
+ % (len(dependents),))
+ # Convert dependent branches
+ branch_cdirs = [b.bzrdir for b in dependents]
+ successes, problems = _convert_items(branch_cdirs, format, clean_up,
+ dry_run, label="branch")
+ attempted.extend(branch_cdirs)
+ succeeded.extend(successes)
+ exceptions.extend(problems)
+
+ # Return the result
+ return attempted, succeeded, exceptions
+
+# FIXME: There are several problems below:
+# - RemoteRepository doesn't support _unsupported (really ?)
+# - raising AssertionError is rude and may not be necessary
+# - no tests
+# - the only caller uses only the label
+def _get_object_and_label(control_dir):
+ """Return the primary object and type label for a control directory.
+
+ :return: object, label where:
+ * object is a Branch, Repository or WorkingTree and
+ * label is one of:
+ * branch - a branch
+ * repository - a repository
+ * tree - a lightweight checkout
+ """
+ try:
+ try:
+ br = control_dir.open_branch(unsupported=True,
+ ignore_fallbacks=True)
+ except NotImplementedError:
+ # RemoteRepository doesn't support the unsupported parameter
+ br = control_dir.open_branch(ignore_fallbacks=True)
+ except errors.NotBranchError:
+ pass
+ else:
+ return br, "branch"
+ try:
+ repo = control_dir.open_repository()
+ except errors.NoRepositoryPresent:
+ pass
+ else:
+ return repo, "repository"
+ try:
+ wt = control_dir.open_workingtree()
+ except (errors.NoWorkingTree, errors.NotLocalUrl):
+ pass
+ else:
+ return wt, "tree"
+ raise AssertionError("unknown type of control directory %s", control_dir)
+
+
+def _convert_items(items, format, clean_up, dry_run, label=None):
+ """Convert a sequence of control directories to the given format.
+
+ :param items: the control directories to upgrade
+ :param format: the format to convert to or None for the best default
+ :param clean-up: if True, the backup.bzr directory is removed if the
+ upgrade succeeded for a given repo/branch/tree
+ :param dry_run: show what would happen but don't actually do any upgrades
+ :param label: the label for these items or None to calculate one
+ :return: items successfully upgraded, exceptions
+ """
+ succeeded = []
+ exceptions = []
+ child_pb = ui.ui_factory.nested_progress_bar()
+ child_pb.update(gettext('Upgrading bzrdirs'), 0, len(items))
+ for i, control_dir in enumerate(items):
+ # Do the conversion
+ location = control_dir.root_transport.base
+ bzr_object, bzr_label = _get_object_and_label(control_dir)
+ type_label = label or bzr_label
+ child_pb.update(gettext("Upgrading %s") % (type_label), i+1, len(items))
+ ui.ui_factory.note(gettext('Upgrading {0} {1} ...').format(type_label,
+ urlutils.unescape_for_display(location, 'utf-8'),))
+ try:
+ if not dry_run:
+ cv = Convert(control_dir=control_dir, format=format)
+ except errors.UpToDateFormat, ex:
+ ui.ui_factory.note(str(ex))
+ succeeded.append(control_dir)
+ continue
+ except Exception, ex:
+ trace.warning('conversion error: %s' % ex)
+ exceptions.append(ex)
+ continue
+
+ # Do any required post processing
+ succeeded.append(control_dir)
+ if clean_up:
+ try:
+ ui.ui_factory.note(gettext('Removing backup ...'))
+ if not dry_run:
+ cv.clean_up()
+ except Exception, ex:
+ trace.warning(gettext('failed to clean-up {0}: {1}') % (location, ex))
+ exceptions.append(ex)
+
+ child_pb.finished()
+
+ # Return the result
+ return succeeded, exceptions
diff --git a/bzrlib/url_policy_open.py b/bzrlib/url_policy_open.py
new file mode 100644
index 0000000..88dfde0
--- /dev/null
+++ b/bzrlib/url_policy_open.py
@@ -0,0 +1,314 @@
+# Copyright (C) 2011 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""Branch opening with URL-based restrictions."""
+
+from __future__ import absolute_import
+
+import threading
+
+from bzrlib import (
+ errors,
+ urlutils,
+ )
+from bzrlib.branch import Branch
+from bzrlib.controldir import (
+ ControlDir,
+ )
+
+
+class BadUrl(errors.BzrError):
+
+ _fmt = "Tried to access a branch from bad URL %(url)s."
+
+
+class BranchReferenceForbidden(errors.BzrError):
+
+ _fmt = ("Trying to mirror a branch reference and the branch type "
+ "does not allow references.")
+
+
+class BranchLoopError(errors.BzrError):
+ """Encountered a branch cycle.
+
+ A URL may point to a branch reference or it may point to a stacked branch.
+ In either case, it's possible for there to be a cycle in these references,
+ and this exception is raised when we detect such a cycle.
+ """
+
+ _fmt = "Encountered a branch cycle"""
+
+
+class BranchOpenPolicy(object):
+ """Policy on how to open branches.
+
+ In particular, a policy determines which branches are okay to open by
+ checking their URLs and deciding whether or not to follow branch
+ references.
+ """
+
+ def should_follow_references(self):
+ """Whether we traverse references when mirroring.
+
+ Subclasses must override this method.
+
+ If we encounter a branch reference and this returns false, an error is
+ raised.
+
+ :returns: A boolean to indicate whether to follow a branch reference.
+ """
+ raise NotImplementedError(self.should_follow_references)
+
+ def transform_fallback_location(self, branch, url):
+ """Validate, maybe modify, 'url' to be used as a stacked-on location.
+
+ :param branch: The branch that is being opened.
+ :param url: The URL that the branch provides for its stacked-on
+ location.
+ :return: (new_url, check) where 'new_url' is the URL of the branch to
+ actually open and 'check' is true if 'new_url' needs to be
+ validated by check_and_follow_branch_reference.
+ """
+ raise NotImplementedError(self.transform_fallback_location)
+
+ def check_one_url(self, url):
+ """Check a URL.
+
+ Subclasses must override this method.
+
+ :param url: The source URL to check.
+ :raise BadUrl: subclasses are expected to raise this or a subclass
+ when it finds a URL it deems to be unacceptable.
+ """
+ raise NotImplementedError(self.check_one_url)
+
+
+class _BlacklistPolicy(BranchOpenPolicy):
+ """Branch policy that forbids certain URLs.
+
+ This doesn't cope with various alternative spellings of URLs,
+ with e.g. url encoding. It's mostly useful for tests.
+ """
+
+ def __init__(self, should_follow_references, bad_urls=None):
+ if bad_urls is None:
+ bad_urls = set()
+ self._bad_urls = bad_urls
+ self._should_follow_references = should_follow_references
+
+ def should_follow_references(self):
+ return self._should_follow_references
+
+ def check_one_url(self, url):
+ if url in self._bad_urls:
+ raise BadUrl(url)
+
+ def transform_fallback_location(self, branch, url):
+ """See `BranchOpenPolicy.transform_fallback_location`.
+
+ This class is not used for testing our smarter stacking features so we
+ just do the simplest thing: return the URL that would be used anyway
+ and don't check it.
+ """
+ return urlutils.join(branch.base, url), False
+
+
+class AcceptAnythingPolicy(_BlacklistPolicy):
+ """Accept anything, to make testing easier."""
+
+ def __init__(self):
+ super(AcceptAnythingPolicy, self).__init__(True, set())
+
+
+class WhitelistPolicy(BranchOpenPolicy):
+ """Branch policy that only allows certain URLs."""
+
+ def __init__(self, should_follow_references, allowed_urls=None,
+ check=False):
+ if allowed_urls is None:
+ allowed_urls = []
+ self.allowed_urls = set(url.rstrip('/') for url in allowed_urls)
+ self.check = check
+
+ def should_follow_references(self):
+ return self._should_follow_references
+
+ def check_one_url(self, url):
+ if url.rstrip('/') not in self.allowed_urls:
+ raise BadUrl(url)
+
+ def transform_fallback_location(self, branch, url):
+ """See `BranchOpenPolicy.transform_fallback_location`.
+
+ Here we return the URL that would be used anyway and optionally check
+ it.
+ """
+ return urlutils.join(branch.base, url), self.check
+
+
+class SingleSchemePolicy(BranchOpenPolicy):
+ """Branch open policy that rejects URLs not on the given scheme."""
+
+ def __init__(self, allowed_scheme):
+ self.allowed_scheme = allowed_scheme
+
+ def should_follow_references(self):
+ return True
+
+ def transform_fallback_location(self, branch, url):
+ return urlutils.join(branch.base, url), True
+
+ def check_one_url(self, url):
+ """Check that `url` is okay to open."""
+ if urlutils.URL.from_string(str(url)).scheme != self.allowed_scheme:
+ raise BadUrl(url)
+
+
+class BranchOpener(object):
+ """Branch opener which uses a URL policy.
+
+ All locations that are opened (stacked-on branches, references) are
+ checked against a policy object.
+
+ The policy object is expected to have the following methods:
+ * check_one_url
+ * should_follow_references
+ * transform_fallback_location
+ """
+
+ _threading_data = threading.local()
+
+ def __init__(self, policy, probers=None):
+ """Create a new BranchOpener.
+
+ :param policy: The opener policy to use.
+ :param probers: Optional list of probers to allow.
+ Defaults to local and remote bzr probers.
+ """
+ self.policy = policy
+ self._seen_urls = set()
+ self.probers = probers
+
+ @classmethod
+ def install_hook(cls):
+ """Install the ``transform_fallback_location`` hook.
+
+ This is done at module import time, but transform_fallback_locationHook
+ doesn't do anything unless the `_active_openers` threading.Local
+ object has a 'opener' attribute in this thread.
+
+ This is in a module-level function rather than performed at module
+ level so that it can be called in setUp for testing `BranchOpener`
+ as bzrlib.tests.TestCase.setUp clears hooks.
+ """
+ Branch.hooks.install_named_hook(
+ 'transform_fallback_location',
+ cls.transform_fallback_locationHook,
+ 'BranchOpener.transform_fallback_locationHook')
+
+ def check_and_follow_branch_reference(self, url):
+ """Check URL (and possibly the referenced URL).
+
+ This method checks that `url` passes the policy's `check_one_url`
+ method, and if `url` refers to a branch reference, it checks whether
+ references are allowed and whether the reference's URL passes muster
+ also -- recursively, until a real branch is found.
+
+ :param url: URL to check
+ :raise BranchLoopError: If the branch references form a loop.
+ :raise BranchReferenceForbidden: If this opener forbids branch
+ references.
+ """
+ while True:
+ if url in self._seen_urls:
+ raise BranchLoopError()
+ self._seen_urls.add(url)
+ self.policy.check_one_url(url)
+ next_url = self.follow_reference(url)
+ if next_url is None:
+ return url
+ url = next_url
+ if not self.policy.should_follow_references():
+ raise BranchReferenceForbidden(url)
+
+ @classmethod
+ def transform_fallback_locationHook(cls, branch, url):
+ """Installed as the 'transform_fallback_location' Branch hook.
+
+ This method calls `transform_fallback_location` on the policy object and
+ either returns the url it provides or passes it back to
+ check_and_follow_branch_reference.
+ """
+ try:
+ opener = getattr(cls._threading_data, "opener")
+ except AttributeError:
+ return url
+ new_url, check = opener.policy.transform_fallback_location(branch, url)
+ if check:
+ return opener.check_and_follow_branch_reference(new_url)
+ else:
+ return new_url
+
+ def run_with_transform_fallback_location_hook_installed(
+ self, callable, *args, **kw):
+ if (self.transform_fallback_locationHook not in
+ Branch.hooks['transform_fallback_location']):
+ raise AssertionError("hook not installed")
+ self._threading_data.opener = self
+ try:
+ return callable(*args, **kw)
+ finally:
+ del self._threading_data.opener
+ # We reset _seen_urls here to avoid multiple calls to open giving
+ # spurious loop exceptions.
+ self._seen_urls = set()
+
+ def follow_reference(self, url):
+ """Get the branch-reference value at the specified url.
+
+ This exists as a separate method only to be overriden in unit tests.
+ """
+ bzrdir = ControlDir.open(url, probers=self.probers)
+ return bzrdir.get_branch_reference()
+
+ def open(self, url):
+ """Open the Bazaar branch at url, first checking it.
+
+ What is acceptable means is defined by the policy's `follow_reference` and
+ `check_one_url` methods.
+ """
+ if type(url) != str:
+ raise TypeError
+
+ url = self.check_and_follow_branch_reference(url)
+
+ def open_branch(url):
+ dir = ControlDir.open(url, probers=self.probers)
+ return dir.open_branch()
+ return self.run_with_transform_fallback_location_hook_installed(
+ open_branch, url)
+
+
+def open_only_scheme(allowed_scheme, url):
+ """Open the branch at `url`, only accessing URLs on `allowed_scheme`.
+
+ :raises BadUrl: An attempt was made to open a URL that was not on
+ `allowed_scheme`.
+ """
+ return BranchOpener(SingleSchemePolicy(allowed_scheme)).open(url)
+
+
+BranchOpener.install_hook()
diff --git a/bzrlib/urlutils.py b/bzrlib/urlutils.py
new file mode 100644
index 0000000..7f6ab1d
--- /dev/null
+++ b/bzrlib/urlutils.py
@@ -0,0 +1,969 @@
+# Copyright (C) 2006-2010 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""A collection of function for handling URL operations."""
+
+from __future__ import absolute_import
+
+import os
+import re
+import sys
+
+from bzrlib.lazy_import import lazy_import
+lazy_import(globals(), """
+from posixpath import split as _posix_split
+import urlparse
+
+from bzrlib import (
+ errors,
+ osutils,
+ )
+""")
+
+
+def basename(url, exclude_trailing_slash=True):
+ """Return the last component of a URL.
+
+ :param url: The URL in question
+ :param exclude_trailing_slash: If the url looks like "path/to/foo/"
+ ignore the final slash and return 'foo' rather than ''
+ :return: Just the final component of the URL. This can return ''
+ if you don't exclude_trailing_slash, or if you are at the
+ root of the URL.
+ """
+ return split(url, exclude_trailing_slash=exclude_trailing_slash)[1]
+
+
+def dirname(url, exclude_trailing_slash=True):
+ """Return the parent directory of the given path.
+
+ :param url: Relative or absolute URL
+ :param exclude_trailing_slash: Remove a final slash
+ (treat http://host/foo/ as http://host/foo, but
+ http://host/ stays http://host/)
+ :return: Everything in the URL except the last path chunk
+ """
+ # TODO: jam 20060502 This was named dirname to be consistent
+ # with the os functions, but maybe "parent" would be better
+ return split(url, exclude_trailing_slash=exclude_trailing_slash)[0]
+
+
+# Private copies of quote and unquote, copied from Python's
+# urllib module because urllib unconditionally imports socket, which imports
+# ssl.
+
+always_safe = ('ABCDEFGHIJKLMNOPQRSTUVWXYZ'
+ 'abcdefghijklmnopqrstuvwxyz'
+ '0123456789' '_.-')
+_safe_map = {}
+for i, c in zip(xrange(256), str(bytearray(xrange(256)))):
+ _safe_map[c] = c if (i < 128 and c in always_safe) else '%{0:02X}'.format(i)
+_safe_quoters = {}
+
+
+def quote(s, safe='/'):
+ """quote('abc def') -> 'abc%20def'
+
+ Each part of a URL, e.g. the path info, the query, etc., has a
+ different set of reserved characters that must be quoted.
+
+ RFC 2396 Uniform Resource Identifiers (URI): Generic Syntax lists
+ the following reserved characters.
+
+ reserved = ";" | "/" | "?" | ":" | "@" | "&" | "=" | "+" |
+ "$" | ","
+
+ Each of these characters is reserved in some component of a URL,
+ but not necessarily in all of them.
+
+ By default, the quote function is intended for quoting the path
+ section of a URL. Thus, it will not encode '/'. This character
+ is reserved, but in typical usage the quote function is being
+ called on a path where the existing slash characters are used as
+ reserved characters.
+ """
+ # fastpath
+ if not s:
+ if s is None:
+ raise TypeError('None object cannot be quoted')
+ return s
+ cachekey = (safe, always_safe)
+ try:
+ (quoter, safe) = _safe_quoters[cachekey]
+ except KeyError:
+ safe_map = _safe_map.copy()
+ safe_map.update([(c, c) for c in safe])
+ quoter = safe_map.__getitem__
+ safe = always_safe + safe
+ _safe_quoters[cachekey] = (quoter, safe)
+ if not s.rstrip(safe):
+ return s
+ return ''.join(map(quoter, s))
+
+
+_hexdig = '0123456789ABCDEFabcdef'
+_hextochr = dict((a + b, chr(int(a + b, 16)))
+ for a in _hexdig for b in _hexdig)
+
+def unquote(s):
+ """unquote('abc%20def') -> 'abc def'."""
+ res = s.split('%')
+ # fastpath
+ if len(res) == 1:
+ return s
+ s = res[0]
+ for item in res[1:]:
+ try:
+ s += _hextochr[item[:2]] + item[2:]
+ except KeyError:
+ s += '%' + item
+ except UnicodeDecodeError:
+ s += unichr(int(item[:2], 16)) + item[2:]
+ return s
+
+
+def escape(relpath):
+ """Escape relpath to be a valid url."""
+ if isinstance(relpath, unicode):
+ relpath = relpath.encode('utf-8')
+ # After quoting and encoding, the path should be perfectly
+ # safe as a plain ASCII string, str() just enforces this
+ return str(quote(relpath, safe='/~'))
+
+
+def file_relpath(base, path):
+ """Compute just the relative sub-portion of a url
+
+ This assumes that both paths are already fully specified file:// URLs.
+ """
+ if len(base) < MIN_ABS_FILEURL_LENGTH:
+ raise ValueError('Length of base (%r) must equal or'
+ ' exceed the platform minimum url length (which is %d)' %
+ (base, MIN_ABS_FILEURL_LENGTH))
+ base = osutils.normpath(local_path_from_url(base))
+ path = osutils.normpath(local_path_from_url(path))
+ return escape(osutils.relpath(base, path))
+
+
+def _find_scheme_and_separator(url):
+ """Find the scheme separator (://) and the first path separator
+
+ This is just a helper functions for other path utilities.
+ It could probably be replaced by urlparse
+ """
+ m = _url_scheme_re.match(url)
+ if not m:
+ return None, None
+
+ scheme = m.group('scheme')
+ path = m.group('path')
+
+ # Find the path separating slash
+ # (first slash after the ://)
+ first_path_slash = path.find('/')
+ if first_path_slash == -1:
+ return len(scheme), None
+ return len(scheme), first_path_slash+m.start('path')
+
+
+def is_url(url):
+ """Tests whether a URL is in actual fact a URL."""
+ return _url_scheme_re.match(url) is not None
+
+
+def join(base, *args):
+ """Create a URL by joining sections.
+
+ This will normalize '..', assuming that paths are absolute
+ (it assumes no symlinks in either path)
+
+ If any of *args is an absolute URL, it will be treated correctly.
+ Example:
+ join('http://foo', 'http://bar') => 'http://bar'
+ join('http://foo', 'bar') => 'http://foo/bar'
+ join('http://foo', 'bar', '../baz') => 'http://foo/baz'
+ """
+ if not args:
+ return base
+ scheme_end, path_start = _find_scheme_and_separator(base)
+ if scheme_end is None and path_start is None:
+ path_start = 0
+ elif path_start is None:
+ path_start = len(base)
+ path = base[path_start:]
+ for arg in args:
+ arg_scheme_end, arg_path_start = _find_scheme_and_separator(arg)
+ if arg_scheme_end is None and arg_path_start is None:
+ arg_path_start = 0
+ elif arg_path_start is None:
+ arg_path_start = len(arg)
+ if arg_scheme_end is not None:
+ base = arg
+ path = arg[arg_path_start:]
+ scheme_end = arg_scheme_end
+ path_start = arg_path_start
+ else:
+ path = joinpath(path, arg)
+ return base[:path_start] + path
+
+
+def joinpath(base, *args):
+ """Join URL path segments to a URL path segment.
+
+ This is somewhat like osutils.joinpath, but intended for URLs.
+
+ XXX: this duplicates some normalisation logic, and also duplicates a lot of
+ path handling logic that already exists in some Transport implementations.
+ We really should try to have exactly one place in the code base responsible
+ for combining paths of URLs.
+ """
+ path = base.split('/')
+ if len(path) > 1 and path[-1] == '':
+ #If the path ends in a trailing /, remove it.
+ path.pop()
+ for arg in args:
+ if arg.startswith('/'):
+ path = []
+ for chunk in arg.split('/'):
+ if chunk == '.':
+ continue
+ elif chunk == '..':
+ if path == ['']:
+ raise errors.InvalidURLJoin('Cannot go above root',
+ base, args)
+ path.pop()
+ else:
+ path.append(chunk)
+ if path == ['']:
+ return '/'
+ else:
+ return '/'.join(path)
+
+
+# jam 20060502 Sorted to 'l' because the final target is 'local_path_from_url'
+def _posix_local_path_from_url(url):
+ """Convert a url like file:///path/to/foo into /path/to/foo"""
+ url = split_segment_parameters_raw(url)[0]
+ file_localhost_prefix = 'file://localhost/'
+ if url.startswith(file_localhost_prefix):
+ path = url[len(file_localhost_prefix) - 1:]
+ elif not url.startswith('file:///'):
+ raise errors.InvalidURL(
+ url, 'local urls must start with file:/// or file://localhost/')
+ else:
+ path = url[len('file://'):]
+ # We only strip off 2 slashes
+ return unescape(path)
+
+
+def _posix_local_path_to_url(path):
+ """Convert a local path like ./foo into a URL like file:///path/to/foo
+
+ This also handles transforming escaping unicode characters, etc.
+ """
+ # importing directly from posixpath allows us to test this
+ # on non-posix platforms
+ return 'file://' + escape(osutils._posix_abspath(path))
+
+
+def _win32_local_path_from_url(url):
+ """Convert a url like file:///C:/path/to/foo into C:/path/to/foo"""
+ if not url.startswith('file://'):
+ raise errors.InvalidURL(url, 'local urls must start with file:///, '
+ 'UNC path urls must start with file://')
+ url = split_segment_parameters_raw(url)[0]
+ # We strip off all 3 slashes
+ win32_url = url[len('file:'):]
+ # check for UNC path: //HOST/path
+ if not win32_url.startswith('///'):
+ if (win32_url[2] == '/'
+ or win32_url[3] in '|:'):
+ raise errors.InvalidURL(url, 'Win32 UNC path urls'
+ ' have form file://HOST/path')
+ return unescape(win32_url)
+
+ # allow empty paths so we can serve all roots
+ if win32_url == '///':
+ return '/'
+
+ # usual local path with drive letter
+ if (len(win32_url) < 6
+ or win32_url[3] not in ('abcdefghijklmnopqrstuvwxyz'
+ 'ABCDEFGHIJKLMNOPQRSTUVWXYZ')
+ or win32_url[4] not in '|:'
+ or win32_url[5] != '/'):
+ raise errors.InvalidURL(url, 'Win32 file urls start with'
+ ' file:///x:/, where x is a valid drive letter')
+ return win32_url[3].upper() + u':' + unescape(win32_url[5:])
+
+
+def _win32_local_path_to_url(path):
+ """Convert a local path like ./foo into a URL like file:///C:/path/to/foo
+
+ This also handles transforming escaping unicode characters, etc.
+ """
+ # importing directly from ntpath allows us to test this
+ # on non-win32 platform
+ # FIXME: It turns out that on nt, ntpath.abspath uses nt._getfullpathname
+ # which actually strips trailing space characters.
+ # The worst part is that on linux ntpath.abspath has different
+ # semantics, since 'nt' is not an available module.
+ if path == '/':
+ return 'file:///'
+
+ win32_path = osutils._win32_abspath(path)
+ # check for UNC path \\HOST\path
+ if win32_path.startswith('//'):
+ return 'file:' + escape(win32_path)
+ return ('file:///' + str(win32_path[0].upper()) + ':' +
+ escape(win32_path[2:]))
+
+
+local_path_to_url = _posix_local_path_to_url
+local_path_from_url = _posix_local_path_from_url
+MIN_ABS_FILEURL_LENGTH = len('file:///')
+WIN32_MIN_ABS_FILEURL_LENGTH = len('file:///C:/')
+
+if sys.platform == 'win32':
+ local_path_to_url = _win32_local_path_to_url
+ local_path_from_url = _win32_local_path_from_url
+
+ MIN_ABS_FILEURL_LENGTH = WIN32_MIN_ABS_FILEURL_LENGTH
+
+
+_url_scheme_re = re.compile(r'^(?P<scheme>[^:/]{2,}):(//)?(?P<path>.*)$')
+_url_hex_escapes_re = re.compile(r'(%[0-9a-fA-F]{2})')
+
+
+def _unescape_safe_chars(matchobj):
+ """re.sub callback to convert hex-escapes to plain characters (if safe).
+
+ e.g. '%7E' will be converted to '~'.
+ """
+ hex_digits = matchobj.group(0)[1:]
+ char = chr(int(hex_digits, 16))
+ if char in _url_dont_escape_characters:
+ return char
+ else:
+ return matchobj.group(0).upper()
+
+
+def normalize_url(url):
+ """Make sure that a path string is in fully normalized URL form.
+
+ This handles URLs which have unicode characters, spaces,
+ special characters, etc.
+
+ It has two basic modes of operation, depending on whether the
+ supplied string starts with a url specifier (scheme://) or not.
+ If it does not have a specifier it is considered a local path,
+ and will be converted into a file:/// url. Non-ascii characters
+ will be encoded using utf-8.
+ If it does have a url specifier, it will be treated as a "hybrid"
+ URL. Basically, a URL that should have URL special characters already
+ escaped (like +?&# etc), but may have unicode characters, etc
+ which would not be valid in a real URL.
+
+ :param url: Either a hybrid URL or a local path
+ :return: A normalized URL which only includes 7-bit ASCII characters.
+ """
+ scheme_end, path_start = _find_scheme_and_separator(url)
+ if scheme_end is None:
+ return local_path_to_url(url)
+ prefix = url[:path_start]
+ path = url[path_start:]
+ if not isinstance(url, unicode):
+ for c in url:
+ if c not in _url_safe_characters:
+ raise errors.InvalidURL(url, 'URLs can only contain specific'
+ ' safe characters (not %r)' % c)
+ path = _url_hex_escapes_re.sub(_unescape_safe_chars, path)
+ return str(prefix + ''.join(path))
+
+ # We have a unicode (hybrid) url
+ path_chars = list(path)
+
+ for i in xrange(len(path_chars)):
+ if path_chars[i] not in _url_safe_characters:
+ chars = path_chars[i].encode('utf-8')
+ path_chars[i] = ''.join(
+ ['%%%02X' % ord(c) for c in path_chars[i].encode('utf-8')])
+ path = ''.join(path_chars)
+ path = _url_hex_escapes_re.sub(_unescape_safe_chars, path)
+ return str(prefix + path)
+
+
+def relative_url(base, other):
+ """Return a path to other from base.
+
+ If other is unrelated to base, return other. Else return a relative path.
+ This assumes no symlinks as part of the url.
+ """
+ dummy, base_first_slash = _find_scheme_and_separator(base)
+ if base_first_slash is None:
+ return other
+
+ dummy, other_first_slash = _find_scheme_and_separator(other)
+ if other_first_slash is None:
+ return other
+
+ # this takes care of differing schemes or hosts
+ base_scheme = base[:base_first_slash]
+ other_scheme = other[:other_first_slash]
+ if base_scheme != other_scheme:
+ return other
+ elif sys.platform == 'win32' and base_scheme == 'file://':
+ base_drive = base[base_first_slash+1:base_first_slash+3]
+ other_drive = other[other_first_slash+1:other_first_slash+3]
+ if base_drive != other_drive:
+ return other
+
+ base_path = base[base_first_slash+1:]
+ other_path = other[other_first_slash+1:]
+
+ if base_path.endswith('/'):
+ base_path = base_path[:-1]
+
+ base_sections = base_path.split('/')
+ other_sections = other_path.split('/')
+
+ if base_sections == ['']:
+ base_sections = []
+ if other_sections == ['']:
+ other_sections = []
+
+ output_sections = []
+ for b, o in zip(base_sections, other_sections):
+ if b != o:
+ break
+ output_sections.append(b)
+
+ match_len = len(output_sections)
+ output_sections = ['..' for x in base_sections[match_len:]]
+ output_sections.extend(other_sections[match_len:])
+
+ return "/".join(output_sections) or "."
+
+
+def _win32_extract_drive_letter(url_base, path):
+ """On win32 the drive letter needs to be added to the url base."""
+ # Strip off the drive letter
+ # path is currently /C:/foo
+ if len(path) < 4 or path[2] not in ':|' or path[3] != '/':
+ raise errors.InvalidURL(url_base + path,
+ 'win32 file:/// paths need a drive letter')
+ url_base += path[0:3] # file:// + /C:
+ path = path[3:] # /foo
+ return url_base, path
+
+
+def split(url, exclude_trailing_slash=True):
+ """Split a URL into its parent directory and a child directory.
+
+ :param url: A relative or absolute URL
+ :param exclude_trailing_slash: Strip off a final '/' if it is part
+ of the path (but not if it is part of the protocol specification)
+
+ :return: (parent_url, child_dir). child_dir may be the empty string if we're at
+ the root.
+ """
+ scheme_loc, first_path_slash = _find_scheme_and_separator(url)
+
+ if first_path_slash is None:
+ # We have either a relative path, or no separating slash
+ if scheme_loc is None:
+ # Relative path
+ if exclude_trailing_slash and url.endswith('/'):
+ url = url[:-1]
+ return _posix_split(url)
+ else:
+ # Scheme with no path
+ return url, ''
+
+ # We have a fully defined path
+ url_base = url[:first_path_slash] # http://host, file://
+ path = url[first_path_slash:] # /file/foo
+
+ if sys.platform == 'win32' and url.startswith('file:///'):
+ # Strip off the drive letter
+ # url_base is currently file://
+ # path is currently /C:/foo
+ url_base, path = _win32_extract_drive_letter(url_base, path)
+ # now it should be file:///C: and /foo
+
+ if exclude_trailing_slash and len(path) > 1 and path.endswith('/'):
+ path = path[:-1]
+ head, tail = _posix_split(path)
+ return url_base + head, tail
+
+
+def split_segment_parameters_raw(url):
+ """Split the subsegment of the last segment of a URL.
+
+ :param url: A relative or absolute URL
+ :return: (url, subsegments)
+ """
+ # GZ 2011-11-18: Dodgy removing the terminal slash like this, function
+ # operates on urls not url+segments, and Transport classes
+ # should not be blindly adding slashes in the first place.
+ lurl = strip_trailing_slash(url)
+ # Segments begin at first comma after last forward slash, if one exists
+ segment_start = lurl.find(",", lurl.rfind("/")+1)
+ if segment_start == -1:
+ return (url, [])
+ return (lurl[:segment_start], lurl[segment_start+1:].split(","))
+
+
+def split_segment_parameters(url):
+ """Split the segment parameters of the last segment of a URL.
+
+ :param url: A relative or absolute URL
+ :return: (url, segment_parameters)
+ """
+ (base_url, subsegments) = split_segment_parameters_raw(url)
+ parameters = {}
+ for subsegment in subsegments:
+ (key, value) = subsegment.split("=", 1)
+ parameters[key] = value
+ return (base_url, parameters)
+
+
+def join_segment_parameters_raw(base, *subsegments):
+ """Create a new URL by adding subsegments to an existing one.
+
+ This adds the specified subsegments to the last path in the specified
+ base URL. The subsegments should be bytestrings.
+
+ :note: You probably want to use join_segment_parameters instead.
+ """
+ if not subsegments:
+ return base
+ for subsegment in subsegments:
+ if type(subsegment) is not str:
+ raise TypeError("Subsegment %r is not a bytestring" % subsegment)
+ if "," in subsegment:
+ raise errors.InvalidURLJoin(", exists in subsegments",
+ base, subsegments)
+ return ",".join((base,) + subsegments)
+
+
+def join_segment_parameters(url, parameters):
+ """Create a new URL by adding segment parameters to an existing one.
+
+ The parameters of the last segment in the URL will be updated; if a
+ parameter with the same key already exists it will be overwritten.
+
+ :param url: A URL, as string
+ :param parameters: Dictionary of parameters, keys and values as bytestrings
+ """
+ (base, existing_parameters) = split_segment_parameters(url)
+ new_parameters = {}
+ new_parameters.update(existing_parameters)
+ for key, value in parameters.iteritems():
+ if type(key) is not str:
+ raise TypeError("parameter key %r is not a bytestring" % key)
+ if type(value) is not str:
+ raise TypeError("parameter value %r for %s is not a bytestring" %
+ (key, value))
+ if "=" in key:
+ raise errors.InvalidURLJoin("= exists in parameter key", url,
+ parameters)
+ new_parameters[key] = value
+ return join_segment_parameters_raw(base,
+ *["%s=%s" % item for item in sorted(new_parameters.items())])
+
+
+def _win32_strip_local_trailing_slash(url):
+ """Strip slashes after the drive letter"""
+ if len(url) > WIN32_MIN_ABS_FILEURL_LENGTH:
+ return url[:-1]
+ else:
+ return url
+
+
+def strip_trailing_slash(url):
+ """Strip trailing slash, except for root paths.
+
+ The definition of 'root path' is platform-dependent.
+ This assumes that all URLs are valid netloc urls, such that they
+ form:
+ scheme://host/path
+ It searches for ://, and then refuses to remove the next '/'.
+ It can also handle relative paths
+ Examples:
+ path/to/foo => path/to/foo
+ path/to/foo/ => path/to/foo
+ http://host/path/ => http://host/path
+ http://host/path => http://host/path
+ http://host/ => http://host/
+ file:/// => file:///
+ file:///foo/ => file:///foo
+ # This is unique on win32 platforms, and is the only URL
+ # format which does it differently.
+ file:///c|/ => file:///c:/
+ """
+ if not url.endswith('/'):
+ # Nothing to do
+ return url
+ if sys.platform == 'win32' and url.startswith('file://'):
+ return _win32_strip_local_trailing_slash(url)
+
+ scheme_loc, first_path_slash = _find_scheme_and_separator(url)
+ if scheme_loc is None:
+ # This is a relative path, as it has no scheme
+ # so just chop off the last character
+ return url[:-1]
+
+ if first_path_slash is None or first_path_slash == len(url)-1:
+ # Don't chop off anything if the only slash is the path
+ # separating slash
+ return url
+
+ return url[:-1]
+
+
+def unescape(url):
+ """Unescape relpath from url format.
+
+ This returns a Unicode path from a URL
+ """
+ # jam 20060427 URLs are supposed to be ASCII only strings
+ # If they are passed in as unicode, unquote
+ # will return a UNICODE string, which actually contains
+ # utf-8 bytes. So we have to ensure that they are
+ # plain ASCII strings, or the final .decode will
+ # try to encode the UNICODE => ASCII, and then decode
+ # it into utf-8.
+ try:
+ url = str(url)
+ except UnicodeError, e:
+ raise errors.InvalidURL(url, 'URL was not a plain ASCII url: %s' % (e,))
+
+ unquoted = unquote(url)
+ try:
+ unicode_path = unquoted.decode('utf-8')
+ except UnicodeError, e:
+ raise errors.InvalidURL(url, 'Unable to encode the URL as utf-8: %s' % (e,))
+ return unicode_path
+
+
+# These are characters that if escaped, should stay that way
+_no_decode_chars = ';/?:@&=+$,#'
+_no_decode_ords = [ord(c) for c in _no_decode_chars]
+_no_decode_hex = (['%02x' % o for o in _no_decode_ords]
+ + ['%02X' % o for o in _no_decode_ords])
+_hex_display_map = dict(([('%02x' % o, chr(o)) for o in range(256)]
+ + [('%02X' % o, chr(o)) for o in range(256)]))
+#These entries get mapped to themselves
+_hex_display_map.update((hex,'%'+hex) for hex in _no_decode_hex)
+
+# These characters shouldn't be percent-encoded, and it's always safe to
+# unencode them if they are.
+_url_dont_escape_characters = set(
+ "abcdefghijklmnopqrstuvwxyz" # Lowercase alpha
+ "ABCDEFGHIJKLMNOPQRSTUVWXYZ" # Uppercase alpha
+ "0123456789" # Numbers
+ "-._~" # Unreserved characters
+)
+
+# These characters should not be escaped
+_url_safe_characters = set(
+ "abcdefghijklmnopqrstuvwxyz" # Lowercase alpha
+ "ABCDEFGHIJKLMNOPQRSTUVWXYZ" # Uppercase alpha
+ "0123456789" # Numbers
+ "_.-!~*'()" # Unreserved characters
+ "/;?:@&=+$," # Reserved characters
+ "%#" # Extra reserved characters
+)
+
+def unescape_for_display(url, encoding):
+ """Decode what you can for a URL, so that we get a nice looking path.
+
+ This will turn file:// urls into local paths, and try to decode
+ any portions of a http:// style url that it can.
+
+ Any sections of the URL which can't be represented in the encoding or
+ need to stay as escapes are left alone.
+
+ :param url: A 7-bit ASCII URL
+ :param encoding: The final output encoding
+
+ :return: A unicode string which can be safely encoded into the
+ specified encoding.
+ """
+ if encoding is None:
+ raise ValueError('you cannot specify None for the display encoding')
+ if url.startswith('file://'):
+ try:
+ path = local_path_from_url(url)
+ path.encode(encoding)
+ return path
+ except UnicodeError:
+ return url
+
+ # Split into sections to try to decode utf-8
+ res = url.split('/')
+ for i in xrange(1, len(res)):
+ escaped_chunks = res[i].split('%')
+ for j in xrange(1, len(escaped_chunks)):
+ item = escaped_chunks[j]
+ try:
+ escaped_chunks[j] = _hex_display_map[item[:2]] + item[2:]
+ except KeyError:
+ # Put back the percent symbol
+ escaped_chunks[j] = '%' + item
+ except UnicodeDecodeError:
+ escaped_chunks[j] = unichr(int(item[:2], 16)) + item[2:]
+ unescaped = ''.join(escaped_chunks)
+ try:
+ decoded = unescaped.decode('utf-8')
+ except UnicodeDecodeError:
+ # If this path segment cannot be properly utf-8 decoded
+ # after doing unescaping we will just leave it alone
+ pass
+ else:
+ try:
+ decoded.encode(encoding)
+ except UnicodeEncodeError:
+ # If this chunk cannot be encoded in the local
+ # encoding, then we should leave it alone
+ pass
+ else:
+ # Otherwise take the url decoded one
+ res[i] = decoded
+ return u'/'.join(res)
+
+
+def derive_to_location(from_location):
+ """Derive a TO_LOCATION given a FROM_LOCATION.
+
+ The normal case is a FROM_LOCATION of http://foo/bar => bar.
+ The Right Thing for some logical destinations may differ though
+ because no / may be present at all. In that case, the result is
+ the full name without the scheme indicator, e.g. lp:foo-bar => foo-bar.
+ This latter case also applies when a Windows drive
+ is used without a path, e.g. c:foo-bar => foo-bar.
+ If no /, path separator or : is found, the from_location is returned.
+ """
+ if from_location.find("/") >= 0 or from_location.find(os.sep) >= 0:
+ return os.path.basename(from_location.rstrip("/\\"))
+ else:
+ sep = from_location.find(":")
+ if sep > 0:
+ return from_location[sep+1:]
+ else:
+ return from_location
+
+
+def _is_absolute(url):
+ return (osutils.pathjoin('/foo', url) == url)
+
+
+def rebase_url(url, old_base, new_base):
+ """Convert a relative path from an old base URL to a new base URL.
+
+ The result will be a relative path.
+ Absolute paths and full URLs are returned unaltered.
+ """
+ scheme, separator = _find_scheme_and_separator(url)
+ if scheme is not None:
+ return url
+ if _is_absolute(url):
+ return url
+ old_parsed = urlparse.urlparse(old_base)
+ new_parsed = urlparse.urlparse(new_base)
+ if (old_parsed[:2]) != (new_parsed[:2]):
+ raise errors.InvalidRebaseURLs(old_base, new_base)
+ return determine_relative_path(new_parsed[2],
+ join(old_parsed[2], url))
+
+
+def determine_relative_path(from_path, to_path):
+ """Determine a relative path from from_path to to_path."""
+ from_segments = osutils.splitpath(from_path)
+ to_segments = osutils.splitpath(to_path)
+ count = -1
+ for count, (from_element, to_element) in enumerate(zip(from_segments,
+ to_segments)):
+ if from_element != to_element:
+ break
+ else:
+ count += 1
+ unique_from = from_segments[count:]
+ unique_to = to_segments[count:]
+ segments = (['..'] * len(unique_from) + unique_to)
+ if len(segments) == 0:
+ return '.'
+ return osutils.pathjoin(*segments)
+
+
+class URL(object):
+ """Parsed URL."""
+
+ def __init__(self, scheme, quoted_user, quoted_password, quoted_host,
+ port, quoted_path):
+ self.scheme = scheme
+ self.quoted_host = quoted_host
+ self.host = unquote(self.quoted_host)
+ self.quoted_user = quoted_user
+ if self.quoted_user is not None:
+ self.user = unquote(self.quoted_user)
+ else:
+ self.user = None
+ self.quoted_password = quoted_password
+ if self.quoted_password is not None:
+ self.password = unquote(self.quoted_password)
+ else:
+ self.password = None
+ self.port = port
+ self.quoted_path = _url_hex_escapes_re.sub(_unescape_safe_chars, quoted_path)
+ self.path = unquote(self.quoted_path)
+
+ def __eq__(self, other):
+ return (isinstance(other, self.__class__) and
+ self.scheme == other.scheme and
+ self.host == other.host and
+ self.user == other.user and
+ self.password == other.password and
+ self.path == other.path)
+
+ def __repr__(self):
+ return "<%s(%r, %r, %r, %r, %r, %r)>" % (
+ self.__class__.__name__,
+ self.scheme, self.quoted_user, self.quoted_password,
+ self.quoted_host, self.port, self.quoted_path)
+
+ @classmethod
+ def from_string(cls, url):
+ """Create a URL object from a string.
+
+ :param url: URL as bytestring
+ """
+ if isinstance(url, unicode):
+ raise errors.InvalidURL('should be ascii:\n%r' % url)
+ url = url.encode('utf-8')
+ (scheme, netloc, path, params,
+ query, fragment) = urlparse.urlparse(url, allow_fragments=False)
+ user = password = host = port = None
+ if '@' in netloc:
+ user, host = netloc.rsplit('@', 1)
+ if ':' in user:
+ user, password = user.split(':', 1)
+ else:
+ host = netloc
+
+ if ':' in host and not (host[0] == '[' and host[-1] == ']'):
+ # there *is* port
+ host, port = host.rsplit(':',1)
+ try:
+ port = int(port)
+ except ValueError:
+ raise errors.InvalidURL('invalid port number %s in url:\n%s' %
+ (port, url))
+ if host != "" and host[0] == '[' and host[-1] == ']': #IPv6
+ host = host[1:-1]
+
+ return cls(scheme, user, password, host, port, path)
+
+ def __str__(self):
+ netloc = self.quoted_host
+ if ":" in netloc:
+ netloc = "[%s]" % netloc
+ if self.quoted_user is not None:
+ # Note that we don't put the password back even if we
+ # have one so that it doesn't get accidentally
+ # exposed.
+ netloc = '%s@%s' % (self.quoted_user, netloc)
+ if self.port is not None:
+ netloc = '%s:%d' % (netloc, self.port)
+ return urlparse.urlunparse(
+ (self.scheme, netloc, self.quoted_path, None, None, None))
+
+ @staticmethod
+ def _combine_paths(base_path, relpath):
+ """Transform a Transport-relative path to a remote absolute path.
+
+ This does not handle substitution of ~ but does handle '..' and '.'
+ components.
+
+ Examples::
+
+ t._combine_paths('/home/sarah', 'project/foo')
+ => '/home/sarah/project/foo'
+ t._combine_paths('/home/sarah', '../../etc')
+ => '/etc'
+ t._combine_paths('/home/sarah', '/etc')
+ => '/etc'
+
+ :param base_path: base path
+ :param relpath: relative url string for relative part of remote path.
+ :return: urlencoded string for final path.
+ """
+ if not isinstance(relpath, str):
+ raise errors.InvalidURL(relpath)
+ relpath = _url_hex_escapes_re.sub(_unescape_safe_chars, relpath)
+ if relpath.startswith('/'):
+ base_parts = []
+ else:
+ base_parts = base_path.split('/')
+ if len(base_parts) > 0 and base_parts[-1] == '':
+ base_parts = base_parts[:-1]
+ for p in relpath.split('/'):
+ if p == '..':
+ if len(base_parts) == 0:
+ # In most filesystems, a request for the parent
+ # of root, just returns root.
+ continue
+ base_parts.pop()
+ elif p == '.':
+ continue # No-op
+ elif p != '':
+ base_parts.append(p)
+ path = '/'.join(base_parts)
+ if not path.startswith('/'):
+ path = '/' + path
+ return path
+
+ def clone(self, offset=None):
+ """Return a new URL for a path relative to this URL.
+
+ :param offset: A relative path, already urlencoded
+ :return: `URL` instance
+ """
+ if offset is not None:
+ relative = unescape(offset).encode('utf-8')
+ path = self._combine_paths(self.path, relative)
+ path = quote(path, safe="/~")
+ else:
+ path = self.quoted_path
+ return self.__class__(self.scheme, self.quoted_user,
+ self.quoted_password, self.quoted_host, self.port,
+ path)
+
+
+def parse_url(url):
+ """Extract the server address, the credentials and the path from the url.
+
+ user, password, host and path should be quoted if they contain reserved
+ chars.
+
+ :param url: an quoted url
+ :return: (scheme, user, password, host, port, path) tuple, all fields
+ are unquoted.
+ """
+ parsed_url = URL.from_string(url)
+ return (parsed_url.scheme, parsed_url.user, parsed_url.password,
+ parsed_url.host, parsed_url.port, parsed_url.path)
diff --git a/bzrlib/utextwrap.py b/bzrlib/utextwrap.py
new file mode 100644
index 0000000..14cb0f9
--- /dev/null
+++ b/bzrlib/utextwrap.py
@@ -0,0 +1,266 @@
+# Copyright (C) 2011 Canonical Ltd
+#
+# UTextWrapper._handle_long_word, UTextWrapper._wrap_chunks,
+# UTextWrapper._fix_sentence_endings, wrap and fill is copied from Python's
+# textwrap module (under PSF license) and modified for support CJK.
+# Original Copyright for these functions:
+#
+# Copyright (C) 1999-2001 Gregory P. Ward.
+# Copyright (C) 2002, 2003 Python Software Foundation.
+#
+# Written by Greg Ward <gward@python.net>
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+from __future__ import absolute_import
+
+import sys
+import textwrap
+from unicodedata import east_asian_width as _eawidth
+
+from bzrlib import osutils
+
+__all__ = ["UTextWrapper", "fill", "wrap"]
+
+class UTextWrapper(textwrap.TextWrapper):
+ """
+ Extend TextWrapper for Unicode.
+
+ This textwrapper handles east asian double width and split word
+ even if !break_long_words when word contains double width
+ characters.
+
+ :param ambiguous_width: (keyword argument) width for character when
+ unicodedata.east_asian_width(c) == 'A'
+ (default: 1)
+
+ Limitations:
+ * expand_tabs doesn't fixed. It uses len() for calculating width
+ of string on left of TAB.
+ * Handles one codeunit as a single character having 1 or 2 width.
+ This is not correct when there are surrogate pairs, combined
+ characters or zero-width characters.
+ * Treats all asian character are line breakable. But it is not
+ true because line breaking is prohibited around some characters.
+ (For example, breaking before punctation mark is prohibited.)
+ See UAX # 14 "UNICODE LINE BREAKING ALGORITHM"
+ """
+
+ def __init__(self, width=None, **kwargs):
+ if width is None:
+ width = (osutils.terminal_width() or
+ osutils.default_terminal_width) - 1
+
+ ambi_width = kwargs.pop('ambiguous_width', 1)
+ if ambi_width == 1:
+ self._east_asian_doublewidth = 'FW'
+ elif ambi_width == 2:
+ self._east_asian_doublewidth = 'FWA'
+ else:
+ raise ValueError("ambiguous_width should be 1 or 2")
+
+ # No drop_whitespace param before Python 2.6 it was always dropped
+ if sys.version_info < (2, 6):
+ self.drop_whitespace = kwargs.pop("drop_whitespace", True)
+ if not self.drop_whitespace:
+ raise ValueError("TextWrapper version must drop whitespace")
+ textwrap.TextWrapper.__init__(self, width, **kwargs)
+
+ def _unicode_char_width(self, uc):
+ """Return width of character `uc`.
+
+ :param: uc Single unicode character.
+ """
+ # 'A' means width of the character is not be able to determine.
+ # We assume that it's width is 2 because longer wrap may over
+ # terminal width but shorter wrap may be acceptable.
+ return (_eawidth(uc) in self._east_asian_doublewidth and 2) or 1
+
+ def _width(self, s):
+ """Returns width for s.
+
+ When s is unicode, take care of east asian width.
+ When s is bytes, treat all byte is single width character.
+ """
+ charwidth = self._unicode_char_width
+ return sum(charwidth(c) for c in s)
+
+ def _cut(self, s, width):
+ """Returns head and rest of s. (head+rest == s)
+
+ Head is large as long as _width(head) <= width.
+ """
+ w = 0
+ charwidth = self._unicode_char_width
+ for pos, c in enumerate(s):
+ w += charwidth(c)
+ if w > width:
+ return s[:pos], s[pos:]
+ return s, u''
+
+ def _fix_sentence_endings(self, chunks):
+ """_fix_sentence_endings(chunks : [string])
+
+ Correct for sentence endings buried in 'chunks'. Eg. when the
+ original text contains "... foo.\nBar ...", munge_whitespace()
+ and split() will convert that to [..., "foo.", " ", "Bar", ...]
+ which has one too few spaces; this method simply changes the one
+ space to two.
+
+ Note: This function is copied from textwrap.TextWrap and modified
+ to use unicode always.
+ """
+ i = 0
+ L = len(chunks)-1
+ patsearch = self.sentence_end_re.search
+ while i < L:
+ if chunks[i+1] == u" " and patsearch(chunks[i]):
+ chunks[i+1] = u" "
+ i += 2
+ else:
+ i += 1
+
+ def _handle_long_word(self, chunks, cur_line, cur_len, width):
+ # Figure out when indent is larger than the specified width, and make
+ # sure at least one character is stripped off on every pass
+ if width < 2:
+ space_left = chunks[-1] and self._width(chunks[-1][0]) or 1
+ else:
+ space_left = width - cur_len
+
+ # If we're allowed to break long words, then do so: put as much
+ # of the next chunk onto the current line as will fit.
+ if self.break_long_words:
+ head, rest = self._cut(chunks[-1], space_left)
+ cur_line.append(head)
+ if rest:
+ chunks[-1] = rest
+ else:
+ del chunks[-1]
+
+ # Otherwise, we have to preserve the long word intact. Only add
+ # it to the current line if there's nothing already there --
+ # that minimizes how much we violate the width constraint.
+ elif not cur_line:
+ cur_line.append(chunks.pop())
+
+ # If we're not allowed to break long words, and there's already
+ # text on the current line, do nothing. Next time through the
+ # main loop of _wrap_chunks(), we'll wind up here again, but
+ # cur_len will be zero, so the next line will be entirely
+ # devoted to the long word that we can't handle right now.
+
+ def _wrap_chunks(self, chunks):
+ lines = []
+ if self.width <= 0:
+ raise ValueError("invalid width %r (must be > 0)" % self.width)
+
+ # Arrange in reverse order so items can be efficiently popped
+ # from a stack of chucks.
+ chunks.reverse()
+
+ while chunks:
+
+ # Start the list of chunks that will make up the current line.
+ # cur_len is just the length of all the chunks in cur_line.
+ cur_line = []
+ cur_len = 0
+
+ # Figure out which static string will prefix this line.
+ if lines:
+ indent = self.subsequent_indent
+ else:
+ indent = self.initial_indent
+
+ # Maximum width for this line.
+ width = self.width - len(indent)
+
+ # First chunk on line is whitespace -- drop it, unless this
+ # is the very beginning of the text (ie. no lines started yet).
+ if self.drop_whitespace and chunks[-1].strip() == '' and lines:
+ del chunks[-1]
+
+ while chunks:
+ # Use _width instead of len for east asian width
+ l = self._width(chunks[-1])
+
+ # Can at least squeeze this chunk onto the current line.
+ if cur_len + l <= width:
+ cur_line.append(chunks.pop())
+ cur_len += l
+
+ # Nope, this line is full.
+ else:
+ break
+
+ # The current line is full, and the next chunk is too big to
+ # fit on *any* line (not just this one).
+ if chunks and self._width(chunks[-1]) > width:
+ self._handle_long_word(chunks, cur_line, cur_len, width)
+
+ # If the last chunk on this line is all whitespace, drop it.
+ if self.drop_whitespace and cur_line and not cur_line[-1].strip():
+ del cur_line[-1]
+
+ # Convert current line back to a string and store it in list
+ # of all lines (return value).
+ if cur_line:
+ lines.append(indent + u''.join(cur_line))
+
+ return lines
+
+ def _split(self, text):
+ chunks = textwrap.TextWrapper._split(self, unicode(text))
+ cjk_split_chunks = []
+ for chunk in chunks:
+ prev_pos = 0
+ for pos, char in enumerate(chunk):
+ if self._unicode_char_width(char) == 2:
+ if prev_pos < pos:
+ cjk_split_chunks.append(chunk[prev_pos:pos])
+ cjk_split_chunks.append(char)
+ prev_pos = pos+1
+ if prev_pos < len(chunk):
+ cjk_split_chunks.append(chunk[prev_pos:])
+ return cjk_split_chunks
+
+ def wrap(self, text):
+ # ensure text is unicode
+ return textwrap.TextWrapper.wrap(self, unicode(text))
+
+# -- Convenience interface ---------------------------------------------
+
+def wrap(text, width=None, **kwargs):
+ """Wrap a single paragraph of text, returning a list of wrapped lines.
+
+ Reformat the single paragraph in 'text' so it fits in lines of no
+ more than 'width' columns, and return a list of wrapped lines. By
+ default, tabs in 'text' are expanded with string.expandtabs(), and
+ all other whitespace characters (including newline) are converted to
+ space. See TextWrapper class for available keyword args to customize
+ wrapping behaviour.
+ """
+ return UTextWrapper(width=width, **kwargs).wrap(text)
+
+def fill(text, width=None, **kwargs):
+ """Fill a single paragraph of text, returning a new string.
+
+ Reformat the single paragraph in 'text' to fit in lines of no more
+ than 'width' columns, and return a new string containing the entire
+ wrapped paragraph. As with wrap(), tabs are expanded and other
+ whitespace characters converted to space. See TextWrapper class for
+ available keyword args to customize wrapping behaviour.
+ """
+ return UTextWrapper(width=width, **kwargs).fill(text)
+
diff --git a/bzrlib/util/__init__.py b/bzrlib/util/__init__.py
new file mode 100644
index 0000000..c396168
--- /dev/null
+++ b/bzrlib/util/__init__.py
@@ -0,0 +1 @@
+from __future__ import absolute_import
diff --git a/bzrlib/util/_bencode_py.py b/bzrlib/util/_bencode_py.py
new file mode 100644
index 0000000..1ef81d0
--- /dev/null
+++ b/bzrlib/util/_bencode_py.py
@@ -0,0 +1,174 @@
+# bencode structured encoding
+#
+# Written by Petru Paler
+#
+# Permission is hereby granted, free of charge, to any person
+# obtaining a copy of this software and associated documentation files
+# (the "Software"), to deal in the Software without restriction,
+# including without limitation the rights to use, copy, modify, merge,
+# publish, distribute, sublicense, and/or sell copies of the Software,
+# and to permit persons to whom the Software is furnished to do so,
+# subject to the following conditions:
+#
+# The above copyright notice and this permission notice shall be
+# included in all copies or substantial portions of the Software.
+#
+# Modifications copyright (C) 2008 Canonical Ltd
+
+from __future__ import absolute_import
+
+class BDecoder(object):
+
+ def __init__(self, yield_tuples=False):
+ """Constructor.
+
+ :param yield_tuples: if true, decode "l" elements as tuples rather than
+ lists.
+ """
+ self.yield_tuples = yield_tuples
+ decode_func = {}
+ decode_func['l'] = self.decode_list
+ decode_func['d'] = self.decode_dict
+ decode_func['i'] = self.decode_int
+ decode_func['0'] = self.decode_string
+ decode_func['1'] = self.decode_string
+ decode_func['2'] = self.decode_string
+ decode_func['3'] = self.decode_string
+ decode_func['4'] = self.decode_string
+ decode_func['5'] = self.decode_string
+ decode_func['6'] = self.decode_string
+ decode_func['7'] = self.decode_string
+ decode_func['8'] = self.decode_string
+ decode_func['9'] = self.decode_string
+ self.decode_func = decode_func
+
+ def decode_int(self, x, f):
+ f += 1
+ newf = x.index('e', f)
+ try:
+ n = int(x[f:newf])
+ except (OverflowError, ValueError):
+ n = long(x[f:newf])
+ if x[f] == '-':
+ if x[f + 1] == '0':
+ raise ValueError
+ elif x[f] == '0' and newf != f+1:
+ raise ValueError
+ return (n, newf+1)
+
+ def decode_string(self, x, f):
+ colon = x.index(':', f)
+ try:
+ n = int(x[f:colon])
+ except (OverflowError, ValueError):
+ n = long(x[f:colon])
+ if x[f] == '0' and colon != f+1:
+ raise ValueError
+ colon += 1
+ return (x[colon:colon+n], colon+n)
+
+ def decode_list(self, x, f):
+ r, f = [], f+1
+ while x[f] != 'e':
+ v, f = self.decode_func[x[f]](x, f)
+ r.append(v)
+ if self.yield_tuples:
+ r = tuple(r)
+ return (r, f + 1)
+
+ def decode_dict(self, x, f):
+ r, f = {}, f+1
+ lastkey = None
+ while x[f] != 'e':
+ k, f = self.decode_string(x, f)
+ if lastkey >= k:
+ raise ValueError
+ lastkey = k
+ r[k], f = self.decode_func[x[f]](x, f)
+ return (r, f + 1)
+
+ def bdecode(self, x):
+ if type(x) != str:
+ raise TypeError
+ try:
+ r, l = self.decode_func[x[0]](x, 0)
+ except (IndexError, KeyError, OverflowError), e:
+ import sys
+ raise ValueError, ValueError(str(e)), sys.exc_info()[2]
+ if l != len(x):
+ raise ValueError
+ return r
+
+
+_decoder = BDecoder()
+bdecode = _decoder.bdecode
+
+_tuple_decoder = BDecoder(True)
+bdecode_as_tuple = _tuple_decoder.bdecode
+
+
+from types import StringType, IntType, LongType, DictType, ListType, TupleType
+
+class Bencached(object):
+ __slots__ = ['bencoded']
+
+ def __init__(self, s):
+ self.bencoded = s
+
+def encode_bencached(x,r):
+ r.append(x.bencoded)
+
+def encode_int(x, r):
+ r.extend(('i', str(x), 'e'))
+
+def encode_string(x, r):
+ r.extend((str(len(x)), ':', x))
+
+def encode_list(x, r):
+ r.append('l')
+ for i in x:
+ encode_func[type(i)](i, r)
+ r.append('e')
+
+def encode_dict(x,r):
+ r.append('d')
+ ilist = x.items()
+ ilist.sort()
+ for k, v in ilist:
+ r.extend((str(len(k)), ':', k))
+ encode_func[type(v)](v, r)
+ r.append('e')
+
+encode_func = {}
+encode_func[type(Bencached(0))] = encode_bencached
+encode_func[IntType] = encode_int
+encode_func[LongType] = encode_int
+encode_func[StringType] = encode_string
+encode_func[ListType] = encode_list
+encode_func[TupleType] = encode_list
+encode_func[DictType] = encode_dict
+
+try:
+ from types import BooleanType
+except ImportError:
+ pass
+else:
+ def encode_bool(x,r):
+ encode_int(int(x), r)
+ encode_func[BooleanType] = encode_bool
+
+from bzrlib._static_tuple_py import StaticTuple
+encode_func[StaticTuple] = encode_list
+try:
+ from bzrlib._static_tuple_c import StaticTuple
+except ImportError:
+ pass
+else:
+ encode_func[StaticTuple] = encode_list
+
+
+def bencode(x):
+ r = []
+ encode_func[type(x)](x, r)
+ return ''.join(r)
+
diff --git a/bzrlib/util/configobj/__init__.py b/bzrlib/util/configobj/__init__.py
new file mode 100644
index 0000000..c396168
--- /dev/null
+++ b/bzrlib/util/configobj/__init__.py
@@ -0,0 +1 @@
+from __future__ import absolute_import
diff --git a/bzrlib/util/configobj/configobj.py b/bzrlib/util/configobj/configobj.py
new file mode 100644
index 0000000..697a318
--- /dev/null
+++ b/bzrlib/util/configobj/configobj.py
@@ -0,0 +1,2461 @@
+# configobj.py
+# A config file reader/writer that supports nested sections in config files.
+# Copyright (C) 2005-2009 Michael Foord, Nicola Larosa
+# E-mail: fuzzyman AT voidspace DOT org DOT uk
+# nico AT tekNico DOT net
+
+# ConfigObj 4
+# http://www.voidspace.org.uk/python/configobj.html
+
+# Released subject to the BSD License
+# Please see http://www.voidspace.org.uk/python/license.shtml
+
+# Scripts maintained at http://www.voidspace.org.uk/python/index.shtml
+# For information about bugfixes, updates and support, please join the
+# ConfigObj mailing list:
+# http://lists.sourceforge.net/lists/listinfo/configobj-develop
+# Comments, suggestions and bug reports welcome.
+
+
+from __future__ import absolute_import
+
+import sys
+import os
+import re
+
+compiler = None
+# Bzr modification: Disabled import of 'compiler' module
+# bzr doesn't use the 'unrepr' feature of configobj, so importing compiler just
+# wastes several milliseconds on every single bzr invocation.
+# -- Andrew Bennetts, 2008-10-14
+#try:
+# import compiler
+#except ImportError:
+# # for IronPython
+# pass
+
+
+try:
+ from codecs import BOM_UTF8, BOM_UTF16, BOM_UTF16_BE, BOM_UTF16_LE
+except ImportError:
+ # Python 2.2 does not have these
+ # UTF-8
+ BOM_UTF8 = '\xef\xbb\xbf'
+ # UTF-16, little endian
+ BOM_UTF16_LE = '\xff\xfe'
+ # UTF-16, big endian
+ BOM_UTF16_BE = '\xfe\xff'
+ if sys.byteorder == 'little':
+ # UTF-16, native endianness
+ BOM_UTF16 = BOM_UTF16_LE
+ else:
+ # UTF-16, native endianness
+ BOM_UTF16 = BOM_UTF16_BE
+
+# A dictionary mapping BOM to
+# the encoding to decode with, and what to set the
+# encoding attribute to.
+BOMS = {
+ BOM_UTF8: ('utf_8', None),
+ BOM_UTF16_BE: ('utf16_be', 'utf_16'),
+ BOM_UTF16_LE: ('utf16_le', 'utf_16'),
+ BOM_UTF16: ('utf_16', 'utf_16'),
+ }
+# All legal variants of the BOM codecs.
+# TODO: the list of aliases is not meant to be exhaustive, is there a
+# better way ?
+BOM_LIST = {
+ 'utf_16': 'utf_16',
+ 'u16': 'utf_16',
+ 'utf16': 'utf_16',
+ 'utf-16': 'utf_16',
+ 'utf16_be': 'utf16_be',
+ 'utf_16_be': 'utf16_be',
+ 'utf-16be': 'utf16_be',
+ 'utf16_le': 'utf16_le',
+ 'utf_16_le': 'utf16_le',
+ 'utf-16le': 'utf16_le',
+ 'utf_8': 'utf_8',
+ 'u8': 'utf_8',
+ 'utf': 'utf_8',
+ 'utf8': 'utf_8',
+ 'utf-8': 'utf_8',
+ }
+
+# Map of encodings to the BOM to write.
+BOM_SET = {
+ 'utf_8': BOM_UTF8,
+ 'utf_16': BOM_UTF16,
+ 'utf16_be': BOM_UTF16_BE,
+ 'utf16_le': BOM_UTF16_LE,
+ None: BOM_UTF8
+ }
+
+
+def match_utf8(encoding):
+ return BOM_LIST.get(encoding.lower()) == 'utf_8'
+
+
+# Quote strings used for writing values
+squot = "'%s'"
+dquot = '"%s"'
+noquot = "%s"
+wspace_plus = ' \r\n\v\t\'"'
+tsquot = '"""%s"""'
+tdquot = "'''%s'''"
+
+try:
+ enumerate
+except NameError:
+ def enumerate(obj):
+ """enumerate for Python 2.2."""
+ i = -1
+ for item in obj:
+ i += 1
+ yield i, item
+
+# Sentinel for use in getattr calls to replace hasattr
+MISSING = object()
+
+__version__ = '4.6.0'
+
+__revision__ = '$Id: configobj.py 156 2006-01-31 14:57:08Z fuzzyman $'
+
+__docformat__ = "restructuredtext en"
+
+__all__ = (
+ '__version__',
+ 'DEFAULT_INDENT_TYPE',
+ 'DEFAULT_INTERPOLATION',
+ 'ConfigObjError',
+ 'NestingError',
+ 'ParseError',
+ 'DuplicateError',
+ 'ConfigspecError',
+ 'ConfigObj',
+ 'SimpleVal',
+ 'InterpolationError',
+ 'InterpolationLoopError',
+ 'MissingInterpolationOption',
+ 'RepeatSectionError',
+ 'ReloadError',
+ 'UnreprError',
+ 'UnknownType',
+ '__docformat__',
+ 'flatten_errors',
+)
+
+DEFAULT_INTERPOLATION = 'configparser'
+DEFAULT_INDENT_TYPE = ' '
+MAX_INTERPOL_DEPTH = 10
+
+OPTION_DEFAULTS = {
+ 'interpolation': True,
+ 'raise_errors': False,
+ 'list_values': True,
+ 'create_empty': False,
+ 'file_error': False,
+ 'configspec': None,
+ 'stringify': True,
+ # option may be set to one of ('', ' ', '\t')
+ 'indent_type': None,
+ 'encoding': None,
+ 'default_encoding': None,
+ 'unrepr': False,
+ 'write_empty_values': False,
+}
+
+
+
+def getObj(s):
+ s = "a=" + s
+ if compiler is None:
+ raise ImportError('compiler module not available')
+ p = compiler.parse(s)
+ return p.getChildren()[1].getChildren()[0].getChildren()[1]
+
+
+class UnknownType(Exception):
+ pass
+
+
+class Builder(object):
+
+ def build(self, o):
+ m = getattr(self, 'build_' + o.__class__.__name__, None)
+ if m is None:
+ raise UnknownType(o.__class__.__name__)
+ return m(o)
+
+ def build_List(self, o):
+ return map(self.build, o.getChildren())
+
+ def build_Const(self, o):
+ return o.value
+
+ def build_Dict(self, o):
+ d = {}
+ i = iter(map(self.build, o.getChildren()))
+ for el in i:
+ d[el] = i.next()
+ return d
+
+ def build_Tuple(self, o):
+ return tuple(self.build_List(o))
+
+ def build_Name(self, o):
+ if o.name == 'None':
+ return None
+ if o.name == 'True':
+ return True
+ if o.name == 'False':
+ return False
+
+ # An undefined Name
+ raise UnknownType('Undefined Name')
+
+ def build_Add(self, o):
+ real, imag = map(self.build_Const, o.getChildren())
+ try:
+ real = float(real)
+ except TypeError:
+ raise UnknownType('Add')
+ if not isinstance(imag, complex) or imag.real != 0.0:
+ raise UnknownType('Add')
+ return real+imag
+
+ def build_Getattr(self, o):
+ parent = self.build(o.expr)
+ return getattr(parent, o.attrname)
+
+ def build_UnarySub(self, o):
+ return -self.build_Const(o.getChildren()[0])
+
+ def build_UnaryAdd(self, o):
+ return self.build_Const(o.getChildren()[0])
+
+
+_builder = Builder()
+
+
+def unrepr(s):
+ if not s:
+ return s
+ return _builder.build(getObj(s))
+
+
+
+class ConfigObjError(SyntaxError):
+ """
+ This is the base class for all errors that ConfigObj raises.
+ It is a subclass of SyntaxError.
+ """
+ def __init__(self, message='', line_number=None, line=''):
+ self.line = line
+ self.line_number = line_number
+ SyntaxError.__init__(self, message)
+
+
+class NestingError(ConfigObjError):
+ """
+ This error indicates a level of nesting that doesn't match.
+ """
+
+
+class ParseError(ConfigObjError):
+ """
+ This error indicates that a line is badly written.
+ It is neither a valid ``key = value`` line,
+ nor a valid section marker line.
+ """
+
+
+class ReloadError(IOError):
+ """
+ A 'reload' operation failed.
+ This exception is a subclass of ``IOError``.
+ """
+ def __init__(self):
+ IOError.__init__(self, 'reload failed, filename is not set.')
+
+
+class DuplicateError(ConfigObjError):
+ """
+ The keyword or section specified already exists.
+ """
+
+
+class ConfigspecError(ConfigObjError):
+ """
+ An error occured whilst parsing a configspec.
+ """
+
+
+class InterpolationError(ConfigObjError):
+ """Base class for the two interpolation errors."""
+
+
+class InterpolationLoopError(InterpolationError):
+ """Maximum interpolation depth exceeded in string interpolation."""
+
+ def __init__(self, option):
+ InterpolationError.__init__(
+ self,
+ 'interpolation loop detected in value "%s".' % option)
+
+
+class RepeatSectionError(ConfigObjError):
+ """
+ This error indicates additional sections in a section with a
+ ``__many__`` (repeated) section.
+ """
+
+
+class MissingInterpolationOption(InterpolationError):
+ """A value specified for interpolation was missing."""
+
+ def __init__(self, option):
+ InterpolationError.__init__(
+ self,
+ 'missing option "%s" in interpolation.' % option)
+
+
+class UnreprError(ConfigObjError):
+ """An error parsing in unrepr mode."""
+
+
+
+class InterpolationEngine(object):
+ """
+ A helper class to help perform string interpolation.
+
+ This class is an abstract base class; its descendants perform
+ the actual work.
+ """
+
+ # compiled regexp to use in self.interpolate()
+ _KEYCRE = re.compile(r"%\(([^)]*)\)s")
+
+ def __init__(self, section):
+ # the Section instance that "owns" this engine
+ self.section = section
+
+
+ def interpolate(self, key, value):
+ def recursive_interpolate(key, value, section, backtrail):
+ """The function that does the actual work.
+
+ ``value``: the string we're trying to interpolate.
+ ``section``: the section in which that string was found
+ ``backtrail``: a dict to keep track of where we've been,
+ to detect and prevent infinite recursion loops
+
+ This is similar to a depth-first-search algorithm.
+ """
+ # Have we been here already?
+ if (key, section.name) in backtrail:
+ # Yes - infinite loop detected
+ raise InterpolationLoopError(key)
+ # Place a marker on our backtrail so we won't come back here again
+ backtrail[(key, section.name)] = 1
+
+ # Now start the actual work
+ match = self._KEYCRE.search(value)
+ while match:
+ # The actual parsing of the match is implementation-dependent,
+ # so delegate to our helper function
+ k, v, s = self._parse_match(match)
+ if k is None:
+ # That's the signal that no further interpolation is needed
+ replacement = v
+ else:
+ # Further interpolation may be needed to obtain final value
+ replacement = recursive_interpolate(k, v, s, backtrail)
+ # Replace the matched string with its final value
+ start, end = match.span()
+ value = ''.join((value[:start], replacement, value[end:]))
+ new_search_start = start + len(replacement)
+ # Pick up the next interpolation key, if any, for next time
+ # through the while loop
+ match = self._KEYCRE.search(value, new_search_start)
+
+ # Now safe to come back here again; remove marker from backtrail
+ del backtrail[(key, section.name)]
+
+ return value
+
+ # Back in interpolate(), all we have to do is kick off the recursive
+ # function with appropriate starting values
+ value = recursive_interpolate(key, value, self.section, {})
+ return value
+
+
+ def _fetch(self, key):
+ """Helper function to fetch values from owning section.
+
+ Returns a 2-tuple: the value, and the section where it was found.
+ """
+ # switch off interpolation before we try and fetch anything !
+ save_interp = self.section.main.interpolation
+ self.section.main.interpolation = False
+
+ # Start at section that "owns" this InterpolationEngine
+ current_section = self.section
+ while True:
+ # try the current section first
+ val = current_section.get(key)
+ if val is not None:
+ break
+ # try "DEFAULT" next
+ val = current_section.get('DEFAULT', {}).get(key)
+ if val is not None:
+ break
+ # move up to parent and try again
+ # top-level's parent is itself
+ if current_section.parent is current_section:
+ # reached top level, time to give up
+ break
+ current_section = current_section.parent
+
+ # restore interpolation to previous value before returning
+ self.section.main.interpolation = save_interp
+ if val is None:
+ raise MissingInterpolationOption(key)
+ return val, current_section
+
+
+ def _parse_match(self, match):
+ """Implementation-dependent helper function.
+
+ Will be passed a match object corresponding to the interpolation
+ key we just found (e.g., "%(foo)s" or "$foo"). Should look up that
+ key in the appropriate config file section (using the ``_fetch()``
+ helper function) and return a 3-tuple: (key, value, section)
+
+ ``key`` is the name of the key we're looking for
+ ``value`` is the value found for that key
+ ``section`` is a reference to the section where it was found
+
+ ``key`` and ``section`` should be None if no further
+ interpolation should be performed on the resulting value
+ (e.g., if we interpolated "$$" and returned "$").
+ """
+ raise NotImplementedError()
+
+
+
+class ConfigParserInterpolation(InterpolationEngine):
+ """Behaves like ConfigParser."""
+ _KEYCRE = re.compile(r"%\(([^)]*)\)s")
+
+ def _parse_match(self, match):
+ key = match.group(1)
+ value, section = self._fetch(key)
+ return key, value, section
+
+
+
+class TemplateInterpolation(InterpolationEngine):
+ """Behaves like string.Template."""
+ _delimiter = '$'
+ _KEYCRE = re.compile(r"""
+ \$(?:
+ (?P<escaped>\$) | # Two $ signs
+ (?P<named>[_a-z][_a-z0-9]*) | # $name format
+ {(?P<braced>[^}]*)} # ${name} format
+ )
+ """, re.IGNORECASE | re.VERBOSE)
+
+ def _parse_match(self, match):
+ # Valid name (in or out of braces): fetch value from section
+ key = match.group('named') or match.group('braced')
+ if key is not None:
+ value, section = self._fetch(key)
+ return key, value, section
+ # Escaped delimiter (e.g., $$): return single delimiter
+ if match.group('escaped') is not None:
+ # Return None for key and section to indicate it's time to stop
+ return None, self._delimiter, None
+ # Anything else: ignore completely, just return it unchanged
+ return None, match.group(), None
+
+
+interpolation_engines = {
+ 'configparser': ConfigParserInterpolation,
+ 'template': TemplateInterpolation,
+}
+
+
+def __newobj__(cls, *args):
+ # Hack for pickle
+ return cls.__new__(cls, *args)
+
+class Section(dict):
+ """
+ A dictionary-like object that represents a section in a config file.
+
+ It does string interpolation if the 'interpolation' attribute
+ of the 'main' object is set to True.
+
+ Interpolation is tried first from this object, then from the 'DEFAULT'
+ section of this object, next from the parent and its 'DEFAULT' section,
+ and so on until the main object is reached.
+
+ A Section will behave like an ordered dictionary - following the
+ order of the ``scalars`` and ``sections`` attributes.
+ You can use this to change the order of members.
+
+ Iteration follows the order: scalars, then sections.
+ """
+
+
+ def __setstate__(self, state):
+ dict.update(self, state[0])
+ self.__dict__.update(state[1])
+
+ def __reduce__(self):
+ state = (dict(self), self.__dict__)
+ return (__newobj__, (self.__class__,), state)
+
+
+ def __init__(self, parent, depth, main, indict=None, name=None):
+ """
+ * parent is the section above
+ * depth is the depth level of this section
+ * main is the main ConfigObj
+ * indict is a dictionary to initialise the section with
+ """
+ if indict is None:
+ indict = {}
+ dict.__init__(self)
+ # used for nesting level *and* interpolation
+ self.parent = parent
+ # used for the interpolation attribute
+ self.main = main
+ # level of nesting depth of this Section
+ self.depth = depth
+ # purely for information
+ self.name = name
+ #
+ self._initialise()
+ # we do this explicitly so that __setitem__ is used properly
+ # (rather than just passing to ``dict.__init__``)
+ for entry, value in indict.iteritems():
+ self[entry] = value
+
+
+ def _initialise(self):
+ # the sequence of scalar values in this Section
+ self.scalars = []
+ # the sequence of sections in this Section
+ self.sections = []
+ # for comments :-)
+ self.comments = {}
+ self.inline_comments = {}
+ # the configspec
+ self.configspec = None
+ # for defaults
+ self.defaults = []
+ self.default_values = {}
+
+
+ def _interpolate(self, key, value):
+ try:
+ # do we already have an interpolation engine?
+ engine = self._interpolation_engine
+ except AttributeError:
+ # not yet: first time running _interpolate(), so pick the engine
+ name = self.main.interpolation
+ if name == True: # note that "if name:" would be incorrect here
+ # backwards-compatibility: interpolation=True means use default
+ name = DEFAULT_INTERPOLATION
+ name = name.lower() # so that "Template", "template", etc. all work
+ class_ = interpolation_engines.get(name, None)
+ if class_ is None:
+ # invalid value for self.main.interpolation
+ self.main.interpolation = False
+ return value
+ else:
+ # save reference to engine so we don't have to do this again
+ engine = self._interpolation_engine = class_(self)
+ # let the engine do the actual work
+ return engine.interpolate(key, value)
+
+
+ def __getitem__(self, key):
+ """Fetch the item and do string interpolation."""
+ val = dict.__getitem__(self, key)
+ if self.main.interpolation and isinstance(val, basestring):
+ return self._interpolate(key, val)
+ return val
+
+
+ def __setitem__(self, key, value, unrepr=False):
+ """
+ Correctly set a value.
+
+ Making dictionary values Section instances.
+ (We have to special case 'Section' instances - which are also dicts)
+
+ Keys must be strings.
+ Values need only be strings (or lists of strings) if
+ ``main.stringify`` is set.
+
+ ``unrepr`` must be set when setting a value to a dictionary, without
+ creating a new sub-section.
+ """
+ if not isinstance(key, basestring):
+ raise ValueError('The key "%s" is not a string.' % key)
+
+ # add the comment
+ if key not in self.comments:
+ self.comments[key] = []
+ self.inline_comments[key] = ''
+ # remove the entry from defaults
+ if key in self.defaults:
+ self.defaults.remove(key)
+ #
+ if isinstance(value, Section):
+ if key not in self:
+ self.sections.append(key)
+ dict.__setitem__(self, key, value)
+ elif isinstance(value, dict) and not unrepr:
+ # First create the new depth level,
+ # then create the section
+ if key not in self:
+ self.sections.append(key)
+ new_depth = self.depth + 1
+ dict.__setitem__(
+ self,
+ key,
+ Section(
+ self,
+ new_depth,
+ self.main,
+ indict=value,
+ name=key))
+ else:
+ if key not in self:
+ self.scalars.append(key)
+ if not self.main.stringify:
+ if isinstance(value, basestring):
+ pass
+ elif isinstance(value, (list, tuple)):
+ for entry in value:
+ if not isinstance(entry, basestring):
+ raise TypeError('Value is not a string "%s".' % entry)
+ else:
+ raise TypeError('Value is not a string "%s".' % value)
+ dict.__setitem__(self, key, value)
+
+
+ def __delitem__(self, key):
+ """Remove items from the sequence when deleting."""
+ dict. __delitem__(self, key)
+ if key in self.scalars:
+ self.scalars.remove(key)
+ else:
+ self.sections.remove(key)
+ del self.comments[key]
+ del self.inline_comments[key]
+
+
+ def get(self, key, default=None):
+ """A version of ``get`` that doesn't bypass string interpolation."""
+ try:
+ return self[key]
+ except KeyError:
+ return default
+
+
+ def update(self, indict):
+ """
+ A version of update that uses our ``__setitem__``.
+ """
+ for entry in indict:
+ self[entry] = indict[entry]
+
+
+ def pop(self, key, *args):
+ """
+ 'D.pop(k[,d]) -> v, remove specified key and return the corresponding value.
+ If key is not found, d is returned if given, otherwise KeyError is raised'
+ """
+ val = dict.pop(self, key, *args)
+ if key in self.scalars:
+ del self.comments[key]
+ del self.inline_comments[key]
+ self.scalars.remove(key)
+ elif key in self.sections:
+ del self.comments[key]
+ del self.inline_comments[key]
+ self.sections.remove(key)
+ if self.main.interpolation and isinstance(val, basestring):
+ return self._interpolate(key, val)
+ return val
+
+
+ def popitem(self):
+ """Pops the first (key,val)"""
+ sequence = (self.scalars + self.sections)
+ if not sequence:
+ raise KeyError(": 'popitem(): dictionary is empty'")
+ key = sequence[0]
+ val = self[key]
+ del self[key]
+ return key, val
+
+
+ def clear(self):
+ """
+ A version of clear that also affects scalars/sections
+ Also clears comments and configspec.
+
+ Leaves other attributes alone :
+ depth/main/parent are not affected
+ """
+ dict.clear(self)
+ self.scalars = []
+ self.sections = []
+ self.comments = {}
+ self.inline_comments = {}
+ self.configspec = None
+
+
+ def setdefault(self, key, default=None):
+ """A version of setdefault that sets sequence if appropriate."""
+ try:
+ return self[key]
+ except KeyError:
+ self[key] = default
+ return self[key]
+
+
+ def items(self):
+ """D.items() -> list of D's (key, value) pairs, as 2-tuples"""
+ return zip((self.scalars + self.sections), self.values())
+
+
+ def keys(self):
+ """D.keys() -> list of D's keys"""
+ return (self.scalars + self.sections)
+
+
+ def values(self):
+ """D.values() -> list of D's values"""
+ return [self[key] for key in (self.scalars + self.sections)]
+
+
+ def iteritems(self):
+ """D.iteritems() -> an iterator over the (key, value) items of D"""
+ return iter(self.items())
+
+
+ def iterkeys(self):
+ """D.iterkeys() -> an iterator over the keys of D"""
+ return iter((self.scalars + self.sections))
+
+ __iter__ = iterkeys
+
+
+ def itervalues(self):
+ """D.itervalues() -> an iterator over the values of D"""
+ return iter(self.values())
+
+
+ def __repr__(self):
+ """x.__repr__() <==> repr(x)"""
+ return '{%s}' % ', '.join([('%s: %s' % (repr(key), repr(self[key])))
+ for key in (self.scalars + self.sections)])
+
+ __str__ = __repr__
+ __str__.__doc__ = "x.__str__() <==> str(x)"
+
+
+ # Extra methods - not in a normal dictionary
+
+ def dict(self):
+ """
+ Return a deepcopy of self as a dictionary.
+
+ All members that are ``Section`` instances are recursively turned to
+ ordinary dictionaries - by calling their ``dict`` method.
+
+ >>> n = a.dict()
+ >>> n == a
+ 1
+ >>> n is a
+ 0
+ """
+ newdict = {}
+ for entry in self:
+ this_entry = self[entry]
+ if isinstance(this_entry, Section):
+ this_entry = this_entry.dict()
+ elif isinstance(this_entry, list):
+ # create a copy rather than a reference
+ this_entry = list(this_entry)
+ elif isinstance(this_entry, tuple):
+ # create a copy rather than a reference
+ this_entry = tuple(this_entry)
+ newdict[entry] = this_entry
+ return newdict
+
+
+ def merge(self, indict):
+ """
+ A recursive update - useful for merging config files.
+
+ >>> a = '''[section1]
+ ... option1 = True
+ ... [[subsection]]
+ ... more_options = False
+ ... # end of file'''.splitlines()
+ >>> b = '''# File is user.ini
+ ... [section1]
+ ... option1 = False
+ ... # end of file'''.splitlines()
+ >>> c1 = ConfigObj(b)
+ >>> c2 = ConfigObj(a)
+ >>> c2.merge(c1)
+ >>> c2
+ ConfigObj({'section1': {'option1': 'False', 'subsection': {'more_options': 'False'}}})
+ """
+ for key, val in indict.items():
+ if (key in self and isinstance(self[key], dict) and
+ isinstance(val, dict)):
+ self[key].merge(val)
+ else:
+ self[key] = val
+
+
+ def rename(self, oldkey, newkey):
+ """
+ Change a keyname to another, without changing position in sequence.
+
+ Implemented so that transformations can be made on keys,
+ as well as on values. (used by encode and decode)
+
+ Also renames comments.
+ """
+ if oldkey in self.scalars:
+ the_list = self.scalars
+ elif oldkey in self.sections:
+ the_list = self.sections
+ else:
+ raise KeyError('Key "%s" not found.' % oldkey)
+ pos = the_list.index(oldkey)
+ #
+ val = self[oldkey]
+ dict.__delitem__(self, oldkey)
+ dict.__setitem__(self, newkey, val)
+ the_list.remove(oldkey)
+ the_list.insert(pos, newkey)
+ comm = self.comments[oldkey]
+ inline_comment = self.inline_comments[oldkey]
+ del self.comments[oldkey]
+ del self.inline_comments[oldkey]
+ self.comments[newkey] = comm
+ self.inline_comments[newkey] = inline_comment
+
+
+ def walk(self, function, raise_errors=True,
+ call_on_sections=False, **keywargs):
+ """
+ Walk every member and call a function on the keyword and value.
+
+ Return a dictionary of the return values
+
+ If the function raises an exception, raise the errror
+ unless ``raise_errors=False``, in which case set the return value to
+ ``False``.
+
+ Any unrecognised keyword arguments you pass to walk, will be pased on
+ to the function you pass in.
+
+ Note: if ``call_on_sections`` is ``True`` then - on encountering a
+ subsection, *first* the function is called for the *whole* subsection,
+ and then recurses into it's members. This means your function must be
+ able to handle strings, dictionaries and lists. This allows you
+ to change the key of subsections as well as for ordinary members. The
+ return value when called on the whole subsection has to be discarded.
+
+ See the encode and decode methods for examples, including functions.
+
+ .. admonition:: caution
+
+ You can use ``walk`` to transform the names of members of a section
+ but you mustn't add or delete members.
+
+ >>> config = '''[XXXXsection]
+ ... XXXXkey = XXXXvalue'''.splitlines()
+ >>> cfg = ConfigObj(config)
+ >>> cfg
+ ConfigObj({'XXXXsection': {'XXXXkey': 'XXXXvalue'}})
+ >>> def transform(section, key):
+ ... val = section[key]
+ ... newkey = key.replace('XXXX', 'CLIENT1')
+ ... section.rename(key, newkey)
+ ... if isinstance(val, (tuple, list, dict)):
+ ... pass
+ ... else:
+ ... val = val.replace('XXXX', 'CLIENT1')
+ ... section[newkey] = val
+ >>> cfg.walk(transform, call_on_sections=True)
+ {'CLIENT1section': {'CLIENT1key': None}}
+ >>> cfg
+ ConfigObj({'CLIENT1section': {'CLIENT1key': 'CLIENT1value'}})
+ """
+ out = {}
+ # scalars first
+ for i in range(len(self.scalars)):
+ entry = self.scalars[i]
+ try:
+ val = function(self, entry, **keywargs)
+ # bound again in case name has changed
+ entry = self.scalars[i]
+ out[entry] = val
+ except Exception:
+ if raise_errors:
+ raise
+ else:
+ entry = self.scalars[i]
+ out[entry] = False
+ # then sections
+ for i in range(len(self.sections)):
+ entry = self.sections[i]
+ if call_on_sections:
+ try:
+ function(self, entry, **keywargs)
+ except Exception:
+ if raise_errors:
+ raise
+ else:
+ entry = self.sections[i]
+ out[entry] = False
+ # bound again in case name has changed
+ entry = self.sections[i]
+ # previous result is discarded
+ out[entry] = self[entry].walk(
+ function,
+ raise_errors=raise_errors,
+ call_on_sections=call_on_sections,
+ **keywargs)
+ return out
+
+
+ def as_bool(self, key):
+ """
+ Accepts a key as input. The corresponding value must be a string or
+ the objects (``True`` or 1) or (``False`` or 0). We allow 0 and 1 to
+ retain compatibility with Python 2.2.
+
+ If the string is one of ``True``, ``On``, ``Yes``, or ``1`` it returns
+ ``True``.
+
+ If the string is one of ``False``, ``Off``, ``No``, or ``0`` it returns
+ ``False``.
+
+ ``as_bool`` is not case sensitive.
+
+ Any other input will raise a ``ValueError``.
+
+ >>> a = ConfigObj()
+ >>> a['a'] = 'fish'
+ >>> a.as_bool('a')
+ Traceback (most recent call last):
+ ValueError: Value "fish" is neither True nor False
+ >>> a['b'] = 'True'
+ >>> a.as_bool('b')
+ 1
+ >>> a['b'] = 'off'
+ >>> a.as_bool('b')
+ 0
+ """
+ val = self[key]
+ if val == True:
+ return True
+ elif val == False:
+ return False
+ else:
+ try:
+ if not isinstance(val, basestring):
+ # TODO: Why do we raise a KeyError here?
+ raise KeyError()
+ else:
+ return self.main._bools[val.lower()]
+ except KeyError:
+ raise ValueError('Value "%s" is neither True nor False' % val)
+
+
+ def as_int(self, key):
+ """
+ A convenience method which coerces the specified value to an integer.
+
+ If the value is an invalid literal for ``int``, a ``ValueError`` will
+ be raised.
+
+ >>> a = ConfigObj()
+ >>> a['a'] = 'fish'
+ >>> a.as_int('a')
+ Traceback (most recent call last):
+ ValueError: invalid literal for int() with base 10: 'fish'
+ >>> a['b'] = '1'
+ >>> a.as_int('b')
+ 1
+ >>> a['b'] = '3.2'
+ >>> a.as_int('b')
+ Traceback (most recent call last):
+ ValueError: invalid literal for int() with base 10: '3.2'
+ """
+ return int(self[key])
+
+
+ def as_float(self, key):
+ """
+ A convenience method which coerces the specified value to a float.
+
+ If the value is an invalid literal for ``float``, a ``ValueError`` will
+ be raised.
+
+ >>> a = ConfigObj()
+ >>> a['a'] = 'fish'
+ >>> a.as_float('a')
+ Traceback (most recent call last):
+ ValueError: invalid literal for float(): fish
+ >>> a['b'] = '1'
+ >>> a.as_float('b')
+ 1.0
+ >>> a['b'] = '3.2'
+ >>> a.as_float('b')
+ 3.2000000000000002
+ """
+ return float(self[key])
+
+
+ def as_list(self, key):
+ """
+ A convenience method which fetches the specified value, guaranteeing
+ that it is a list.
+
+ >>> a = ConfigObj()
+ >>> a['a'] = 1
+ >>> a.as_list('a')
+ [1]
+ >>> a['a'] = (1,)
+ >>> a.as_list('a')
+ [1]
+ >>> a['a'] = [1]
+ >>> a.as_list('a')
+ [1]
+ """
+ result = self[key]
+ if isinstance(result, (tuple, list)):
+ return list(result)
+ return [result]
+
+
+ def restore_default(self, key):
+ """
+ Restore (and return) default value for the specified key.
+
+ This method will only work for a ConfigObj that was created
+ with a configspec and has been validated.
+
+ If there is no default value for this key, ``KeyError`` is raised.
+ """
+ default = self.default_values[key]
+ dict.__setitem__(self, key, default)
+ if key not in self.defaults:
+ self.defaults.append(key)
+ return default
+
+
+ def restore_defaults(self):
+ """
+ Recursively restore default values to all members
+ that have them.
+
+ This method will only work for a ConfigObj that was created
+ with a configspec and has been validated.
+
+ It doesn't delete or modify entries without default values.
+ """
+ for key in self.default_values:
+ self.restore_default(key)
+
+ for section in self.sections:
+ self[section].restore_defaults()
+
+
+class ConfigObj(Section):
+ """An object to read, create, and write config files."""
+
+ _keyword = re.compile(r'''^ # line start
+ (\s*) # indentation
+ ( # keyword
+ (?:".*?")| # double quotes
+ (?:'.*?')| # single quotes
+ (?:[^'"=].*?) # no quotes
+ )
+ \s*=\s* # divider
+ (.*) # value (including list values and comments)
+ $ # line end
+ ''',
+ re.VERBOSE)
+
+ _sectionmarker = re.compile(r'''^
+ (\s*) # 1: indentation
+ ((?:\[\s*)+) # 2: section marker open
+ ( # 3: section name open
+ (?:"\s*\S.*?\s*")| # at least one non-space with double quotes
+ (?:'\s*\S.*?\s*')| # at least one non-space with single quotes
+ (?:[^'"\s].*?) # at least one non-space unquoted
+ ) # section name close
+ ((?:\s*\])+) # 4: section marker close
+ \s*(\#.*)? # 5: optional comment
+ $''',
+ re.VERBOSE)
+
+ # this regexp pulls list values out as a single string
+ # or single values and comments
+ # FIXME: this regex adds a '' to the end of comma terminated lists
+ # workaround in ``_handle_value``
+ _valueexp = re.compile(r'''^
+ (?:
+ (?:
+ (
+ (?:
+ (?:
+ (?:".*?")| # double quotes
+ (?:'.*?')| # single quotes
+ (?:[^'",\#][^,\#]*?) # unquoted
+ )
+ \s*,\s* # comma
+ )* # match all list items ending in a comma (if any)
+ )
+ (
+ (?:".*?")| # double quotes
+ (?:'.*?')| # single quotes
+ (?:[^'",\#\s][^,]*?)| # unquoted
+ (?:(?<!,)) # Empty value
+ )? # last item in a list - or string value
+ )|
+ (,) # alternatively a single comma - empty list
+ )
+ \s*(\#.*)? # optional comment
+ $''',
+ re.VERBOSE)
+
+ # use findall to get the members of a list value
+ _listvalueexp = re.compile(r'''
+ (
+ (?:".*?")| # double quotes
+ (?:'.*?')| # single quotes
+ (?:[^'",\#].*?) # unquoted
+ )
+ \s*,\s* # comma
+ ''',
+ re.VERBOSE)
+
+ # this regexp is used for the value
+ # when lists are switched off
+ _nolistvalue = re.compile(r'''^
+ (
+ (?:".*?")| # double quotes
+ (?:'.*?')| # single quotes
+ (?:[^'"\#].*?)| # unquoted
+ (?:) # Empty value
+ )
+ \s*(\#.*)? # optional comment
+ $''',
+ re.VERBOSE)
+
+ # regexes for finding triple quoted values on one line
+ _single_line_single = re.compile(r"^'''(.*?)'''\s*(#.*)?$")
+ _single_line_double = re.compile(r'^"""(.*?)"""\s*(#.*)?$')
+ _multi_line_single = re.compile(r"^(.*?)'''\s*(#.*)?$")
+ _multi_line_double = re.compile(r'^(.*?)"""\s*(#.*)?$')
+
+ _triple_quote = {
+ "'''": (_single_line_single, _multi_line_single),
+ '"""': (_single_line_double, _multi_line_double),
+ }
+
+ # Used by the ``istrue`` Section method
+ _bools = {
+ 'yes': True, 'no': False,
+ 'on': True, 'off': False,
+ '1': True, '0': False,
+ 'true': True, 'false': False,
+ }
+
+
+ def __init__(self, infile=None, options=None, _inspec=False, **kwargs):
+ """
+ Parse a config file or create a config file object.
+
+ ``ConfigObj(infile=None, options=None, **kwargs)``
+ """
+ self._inspec = _inspec
+ # init the superclass
+ Section.__init__(self, self, 0, self)
+
+ infile = infile or []
+ options = dict(options or {})
+
+ # keyword arguments take precedence over an options dictionary
+ options.update(kwargs)
+ if _inspec:
+ options['list_values'] = False
+
+ defaults = OPTION_DEFAULTS.copy()
+ # TODO: check the values too.
+ for entry in options:
+ if entry not in defaults:
+ raise TypeError('Unrecognised option "%s".' % entry)
+
+ # Add any explicit options to the defaults
+ defaults.update(options)
+ self._initialise(defaults)
+ configspec = defaults['configspec']
+ self._original_configspec = configspec
+ self._load(infile, configspec)
+
+
+ def _load(self, infile, configspec):
+ if isinstance(infile, basestring):
+ self.filename = infile
+ if os.path.isfile(infile):
+ h = open(infile, 'rb')
+ infile = h.read() or []
+ h.close()
+ elif self.file_error:
+ # raise an error if the file doesn't exist
+ raise IOError('Config file not found: "%s".' % self.filename)
+ else:
+ # file doesn't already exist
+ if self.create_empty:
+ # this is a good test that the filename specified
+ # isn't impossible - like on a non-existent device
+ h = open(infile, 'w')
+ h.write('')
+ h.close()
+ infile = []
+
+ elif isinstance(infile, (list, tuple)):
+ infile = list(infile)
+
+ elif isinstance(infile, dict):
+ # initialise self
+ # the Section class handles creating subsections
+ if isinstance(infile, ConfigObj):
+ # get a copy of our ConfigObj
+ infile = infile.dict()
+
+ for entry in infile:
+ self[entry] = infile[entry]
+ del self._errors
+
+ if configspec is not None:
+ self._handle_configspec(configspec)
+ else:
+ self.configspec = None
+ return
+
+ elif getattr(infile, 'read', MISSING) is not MISSING:
+ # This supports file like objects
+ infile = infile.read() or []
+ # needs splitting into lines - but needs doing *after* decoding
+ # in case it's not an 8 bit encoding
+ else:
+ raise TypeError('infile must be a filename, file like object, or list of lines.')
+
+ if infile:
+ # don't do it for the empty ConfigObj
+ infile = self._handle_bom(infile)
+ # infile is now *always* a list
+ #
+ # Set the newlines attribute (first line ending it finds)
+ # and strip trailing '\n' or '\r' from lines
+ for line in infile:
+ if (not line) or (line[-1] not in ('\r', '\n', '\r\n')):
+ continue
+ for end in ('\r\n', '\n', '\r'):
+ if line.endswith(end):
+ self.newlines = end
+ break
+ break
+
+ infile = [line.rstrip('\r\n') for line in infile]
+
+ self._parse(infile)
+ # if we had any errors, now is the time to raise them
+ if self._errors:
+ info = "at line %s." % self._errors[0].line_number
+ if len(self._errors) > 1:
+ msg = "Parsing failed with several errors.\nFirst error %s" % info
+ error = ConfigObjError(msg)
+ else:
+ error = self._errors[0]
+ # set the errors attribute; it's a list of tuples:
+ # (error_type, message, line_number)
+ error.errors = self._errors
+ # set the config attribute
+ error.config = self
+ raise error
+ # delete private attributes
+ del self._errors
+
+ if configspec is None:
+ self.configspec = None
+ else:
+ self._handle_configspec(configspec)
+
+
+ def _initialise(self, options=None):
+ if options is None:
+ options = OPTION_DEFAULTS
+
+ # initialise a few variables
+ self.filename = None
+ self._errors = []
+ self.raise_errors = options['raise_errors']
+ self.interpolation = options['interpolation']
+ self.list_values = options['list_values']
+ self.create_empty = options['create_empty']
+ self.file_error = options['file_error']
+ self.stringify = options['stringify']
+ self.indent_type = options['indent_type']
+ self.encoding = options['encoding']
+ self.default_encoding = options['default_encoding']
+ self.BOM = False
+ self.newlines = None
+ self.write_empty_values = options['write_empty_values']
+ self.unrepr = options['unrepr']
+
+ self.initial_comment = []
+ self.final_comment = []
+ self.configspec = None
+
+ if self._inspec:
+ self.list_values = False
+
+ # Clear section attributes as well
+ Section._initialise(self)
+
+
+ def __repr__(self):
+ return ('ConfigObj({%s})' %
+ ', '.join([('%s: %s' % (repr(key), repr(self[key])))
+ for key in (self.scalars + self.sections)]))
+
+
+ def _handle_bom(self, infile):
+ """
+ Handle any BOM, and decode if necessary.
+
+ If an encoding is specified, that *must* be used - but the BOM should
+ still be removed (and the BOM attribute set).
+
+ (If the encoding is wrongly specified, then a BOM for an alternative
+ encoding won't be discovered or removed.)
+
+ If an encoding is not specified, UTF8 or UTF16 BOM will be detected and
+ removed. The BOM attribute will be set. UTF16 will be decoded to
+ unicode.
+
+ NOTE: This method must not be called with an empty ``infile``.
+
+ Specifying the *wrong* encoding is likely to cause a
+ ``UnicodeDecodeError``.
+
+ ``infile`` must always be returned as a list of lines, but may be
+ passed in as a single string.
+ """
+ if ((self.encoding is not None) and
+ (self.encoding.lower() not in BOM_LIST)):
+ # No need to check for a BOM
+ # the encoding specified doesn't have one
+ # just decode
+ return self._decode(infile, self.encoding)
+
+ if isinstance(infile, (list, tuple)):
+ line = infile[0]
+ else:
+ line = infile
+ if self.encoding is not None:
+ # encoding explicitly supplied
+ # And it could have an associated BOM
+ # TODO: if encoding is just UTF16 - we ought to check for both
+ # TODO: big endian and little endian versions.
+ enc = BOM_LIST[self.encoding.lower()]
+ if enc == 'utf_16':
+ # For UTF16 we try big endian and little endian
+ for BOM, (encoding, final_encoding) in BOMS.items():
+ if not final_encoding:
+ # skip UTF8
+ continue
+ if infile.startswith(BOM):
+ ### BOM discovered
+ ##self.BOM = True
+ # Don't need to remove BOM
+ return self._decode(infile, encoding)
+
+ # If we get this far, will *probably* raise a DecodeError
+ # As it doesn't appear to start with a BOM
+ return self._decode(infile, self.encoding)
+
+ # Must be UTF8
+ BOM = BOM_SET[enc]
+ if not line.startswith(BOM):
+ return self._decode(infile, self.encoding)
+
+ newline = line[len(BOM):]
+
+ # BOM removed
+ if isinstance(infile, (list, tuple)):
+ infile[0] = newline
+ else:
+ infile = newline
+ self.BOM = True
+ return self._decode(infile, self.encoding)
+
+ # No encoding specified - so we need to check for UTF8/UTF16
+ for BOM, (encoding, final_encoding) in BOMS.items():
+ if not line.startswith(BOM):
+ continue
+ else:
+ # BOM discovered
+ self.encoding = final_encoding
+ if not final_encoding:
+ self.BOM = True
+ # UTF8
+ # remove BOM
+ newline = line[len(BOM):]
+ if isinstance(infile, (list, tuple)):
+ infile[0] = newline
+ else:
+ infile = newline
+ # UTF8 - don't decode
+ if isinstance(infile, basestring):
+ return infile.splitlines(True)
+ else:
+ return infile
+ # UTF16 - have to decode
+ return self._decode(infile, encoding)
+
+ # No BOM discovered and no encoding specified, just return
+ if isinstance(infile, basestring):
+ # infile read from a file will be a single string
+ return infile.splitlines(True)
+ return infile
+
+
+ def _a_to_u(self, aString):
+ """Decode ASCII strings to unicode if a self.encoding is specified."""
+ if self.encoding:
+ return aString.decode('ascii')
+ else:
+ return aString
+
+
+ def _decode(self, infile, encoding):
+ """
+ Decode infile to unicode. Using the specified encoding.
+
+ if is a string, it also needs converting to a list.
+ """
+ if isinstance(infile, basestring):
+ # can't be unicode
+ # NOTE: Could raise a ``UnicodeDecodeError``
+ return infile.decode(encoding).splitlines(True)
+ for i, line in enumerate(infile):
+ if not isinstance(line, unicode):
+ # NOTE: The isinstance test here handles mixed lists of unicode/string
+ # NOTE: But the decode will break on any non-string values
+ # NOTE: Or could raise a ``UnicodeDecodeError``
+ infile[i] = line.decode(encoding)
+ return infile
+
+
+ def _decode_element(self, line):
+ """Decode element to unicode if necessary."""
+ if not self.encoding:
+ return line
+ if isinstance(line, str) and self.default_encoding:
+ return line.decode(self.default_encoding)
+ return line
+
+
+ def _str(self, value):
+ """
+ Used by ``stringify`` within validate, to turn non-string values
+ into strings.
+ """
+ if not isinstance(value, basestring):
+ return str(value)
+ else:
+ return value
+
+
+ def _parse(self, infile):
+ """Actually parse the config file."""
+ temp_list_values = self.list_values
+ if self.unrepr:
+ self.list_values = False
+
+ comment_list = []
+ done_start = False
+ this_section = self
+ maxline = len(infile) - 1
+ cur_index = -1
+ reset_comment = False
+
+ while cur_index < maxline:
+ if reset_comment:
+ comment_list = []
+ cur_index += 1
+ line = infile[cur_index]
+ sline = line.strip()
+ # do we have anything on the line ?
+ if not sline or sline.startswith('#'):
+ reset_comment = False
+ comment_list.append(line)
+ continue
+
+ if not done_start:
+ # preserve initial comment
+ self.initial_comment = comment_list
+ comment_list = []
+ done_start = True
+
+ reset_comment = True
+ # first we check if it's a section marker
+ mat = self._sectionmarker.match(line)
+ if mat is not None:
+ # is a section line
+ (indent, sect_open, sect_name, sect_close, comment) = mat.groups()
+ if indent and (self.indent_type is None):
+ self.indent_type = indent
+ cur_depth = sect_open.count('[')
+ if cur_depth != sect_close.count(']'):
+ self._handle_error("Cannot compute the section depth at line %s.",
+ NestingError, infile, cur_index)
+ continue
+
+ if cur_depth < this_section.depth:
+ # the new section is dropping back to a previous level
+ try:
+ parent = self._match_depth(this_section,
+ cur_depth).parent
+ except SyntaxError:
+ self._handle_error("Cannot compute nesting level at line %s.",
+ NestingError, infile, cur_index)
+ continue
+ elif cur_depth == this_section.depth:
+ # the new section is a sibling of the current section
+ parent = this_section.parent
+ elif cur_depth == this_section.depth + 1:
+ # the new section is a child the current section
+ parent = this_section
+ else:
+ self._handle_error("Section too nested at line %s.",
+ NestingError, infile, cur_index)
+
+ sect_name = self._unquote(sect_name)
+ if sect_name in parent:
+ self._handle_error('Duplicate section name at line %s.',
+ DuplicateError, infile, cur_index)
+ continue
+
+ # create the new section
+ this_section = Section(
+ parent,
+ cur_depth,
+ self,
+ name=sect_name)
+ parent[sect_name] = this_section
+ parent.inline_comments[sect_name] = comment
+ parent.comments[sect_name] = comment_list
+ continue
+ #
+ # it's not a section marker,
+ # so it should be a valid ``key = value`` line
+ mat = self._keyword.match(line)
+ if mat is None:
+ # it neither matched as a keyword
+ # or a section marker
+ self._handle_error(
+ 'Invalid line at line "%s".',
+ ParseError, infile, cur_index)
+ else:
+ # is a keyword value
+ # value will include any inline comment
+ (indent, key, value) = mat.groups()
+ if indent and (self.indent_type is None):
+ self.indent_type = indent
+ # check for a multiline value
+ if value[:3] in ['"""', "'''"]:
+ try:
+ (value, comment, cur_index) = self._multiline(
+ value, infile, cur_index, maxline)
+ except SyntaxError:
+ self._handle_error(
+ 'Parse error in value at line %s.',
+ ParseError, infile, cur_index)
+ continue
+ else:
+ if self.unrepr:
+ comment = ''
+ try:
+ value = unrepr(value)
+ except Exception, e:
+ if type(e) == UnknownType:
+ msg = 'Unknown name or type in value at line %s.'
+ else:
+ msg = 'Parse error in value at line %s.'
+ self._handle_error(msg, UnreprError, infile,
+ cur_index)
+ continue
+ else:
+ if self.unrepr:
+ comment = ''
+ try:
+ value = unrepr(value)
+ except Exception, e:
+ if isinstance(e, UnknownType):
+ msg = 'Unknown name or type in value at line %s.'
+ else:
+ msg = 'Parse error in value at line %s.'
+ self._handle_error(msg, UnreprError, infile,
+ cur_index)
+ continue
+ else:
+ # extract comment and lists
+ try:
+ (value, comment) = self._handle_value(value)
+ except SyntaxError:
+ self._handle_error(
+ 'Parse error in value at line %s.',
+ ParseError, infile, cur_index)
+ continue
+ #
+ key = self._unquote(key)
+ if key in this_section:
+ self._handle_error(
+ 'Duplicate keyword name at line %s.',
+ DuplicateError, infile, cur_index)
+ continue
+ # add the key.
+ # we set unrepr because if we have got this far we will never
+ # be creating a new section
+ this_section.__setitem__(key, value, unrepr=True)
+ this_section.inline_comments[key] = comment
+ this_section.comments[key] = comment_list
+ continue
+ #
+ if self.indent_type is None:
+ # no indentation used, set the type accordingly
+ self.indent_type = ''
+
+ # preserve the final comment
+ if not self and not self.initial_comment:
+ self.initial_comment = comment_list
+ elif not reset_comment:
+ self.final_comment = comment_list
+ self.list_values = temp_list_values
+
+
+ def _match_depth(self, sect, depth):
+ """
+ Given a section and a depth level, walk back through the sections
+ parents to see if the depth level matches a previous section.
+
+ Return a reference to the right section,
+ or raise a SyntaxError.
+ """
+ while depth < sect.depth:
+ if sect is sect.parent:
+ # we've reached the top level already
+ raise SyntaxError()
+ sect = sect.parent
+ if sect.depth == depth:
+ return sect
+ # shouldn't get here
+ raise SyntaxError()
+
+
+ def _handle_error(self, text, ErrorClass, infile, cur_index):
+ """
+ Handle an error according to the error settings.
+
+ Either raise the error or store it.
+ The error will have occured at ``cur_index``
+ """
+ line = infile[cur_index]
+ cur_index += 1
+ message = text % cur_index
+ error = ErrorClass(message, cur_index, line)
+ if self.raise_errors:
+ # raise the error - parsing stops here
+ raise error
+ # store the error
+ # reraise when parsing has finished
+ self._errors.append(error)
+
+
+ def _unquote(self, value):
+ """Return an unquoted version of a value"""
+ if (value[0] == value[-1]) and (value[0] in ('"', "'")):
+ value = value[1:-1]
+ return value
+
+
+ def _quote(self, value, multiline=True):
+ """
+ Return a safely quoted version of a value.
+
+ Raise a ConfigObjError if the value cannot be safely quoted.
+ If multiline is ``True`` (default) then use triple quotes
+ if necessary.
+
+ * Don't quote values that don't need it.
+ * Recursively quote members of a list and return a comma joined list.
+ * Multiline is ``False`` for lists.
+ * Obey list syntax for empty and single member lists.
+
+ If ``list_values=False`` then the value is only quoted if it contains
+ a ``\\n`` (is multiline) or '#'.
+
+ If ``write_empty_values`` is set, and the value is an empty string, it
+ won't be quoted.
+ """
+ if multiline and self.write_empty_values and value == '':
+ # Only if multiline is set, so that it is used for values not
+ # keys, and not values that are part of a list
+ return ''
+
+ if multiline and isinstance(value, (list, tuple)):
+ if not value:
+ return ','
+ elif len(value) == 1:
+ return self._quote(value[0], multiline=False) + ','
+ return ', '.join([self._quote(val, multiline=False)
+ for val in value])
+ if not isinstance(value, basestring):
+ if self.stringify:
+ value = str(value)
+ else:
+ raise TypeError('Value "%s" is not a string.' % value)
+
+ if not value:
+ return '""'
+
+ no_lists_no_quotes = not self.list_values and '\n' not in value and '#' not in value
+ need_triple = multiline and ((("'" in value) and ('"' in value)) or ('\n' in value ))
+ hash_triple_quote = multiline and not need_triple and ("'" in value) and ('"' in value) and ('#' in value)
+ check_for_single = (no_lists_no_quotes or not need_triple) and not hash_triple_quote
+
+ if check_for_single:
+ if not self.list_values:
+ # we don't quote if ``list_values=False``
+ quot = noquot
+ # for normal values either single or double quotes will do
+ elif '\n' in value:
+ # will only happen if multiline is off - e.g. '\n' in key
+ raise ConfigObjError('Value "%s" cannot be safely quoted.' % value)
+ elif ((value[0] not in wspace_plus) and
+ (value[-1] not in wspace_plus) and
+ (',' not in value)):
+ quot = noquot
+ else:
+ quot = self._get_single_quote(value)
+ else:
+ # if value has '\n' or "'" *and* '"', it will need triple quotes
+ quot = self._get_triple_quote(value)
+
+ if quot == noquot and '#' in value and self.list_values:
+ quot = self._get_single_quote(value)
+
+ return quot % value
+
+
+ def _get_single_quote(self, value):
+ if ("'" in value) and ('"' in value):
+ raise ConfigObjError('Value "%s" cannot be safely quoted.' % value)
+ elif '"' in value:
+ quot = squot
+ else:
+ quot = dquot
+ return quot
+
+
+ def _get_triple_quote(self, value):
+ if (value.find('"""') != -1) and (value.find("'''") != -1):
+ raise ConfigObjError('Value "%s" cannot be safely quoted.' % value)
+ # upstream version (up to version 4.7.2) has the bug with incorrect quoting;
+ # fixed in our copy based on the suggestion of ConfigObj's author
+ if value.find('"""') == -1:
+ quot = tsquot
+ else:
+ quot = tdquot
+ return quot
+
+
+ def _handle_value(self, value):
+ """
+ Given a value string, unquote, remove comment,
+ handle lists. (including empty and single member lists)
+ """
+ if self._inspec:
+ # Parsing a configspec so don't handle comments
+ return (value, '')
+ # do we look for lists in values ?
+ if not self.list_values:
+ mat = self._nolistvalue.match(value)
+ if mat is None:
+ raise SyntaxError()
+ # NOTE: we don't unquote here
+ return mat.groups()
+ #
+ mat = self._valueexp.match(value)
+ if mat is None:
+ # the value is badly constructed, probably badly quoted,
+ # or an invalid list
+ raise SyntaxError()
+ (list_values, single, empty_list, comment) = mat.groups()
+ if (list_values == '') and (single is None):
+ # change this if you want to accept empty values
+ raise SyntaxError()
+ # NOTE: note there is no error handling from here if the regex
+ # is wrong: then incorrect values will slip through
+ if empty_list is not None:
+ # the single comma - meaning an empty list
+ return ([], comment)
+ if single is not None:
+ # handle empty values
+ if list_values and not single:
+ # FIXME: the '' is a workaround because our regex now matches
+ # '' at the end of a list if it has a trailing comma
+ single = None
+ else:
+ single = single or '""'
+ single = self._unquote(single)
+ if list_values == '':
+ # not a list value
+ return (single, comment)
+ the_list = self._listvalueexp.findall(list_values)
+ the_list = [self._unquote(val) for val in the_list]
+ if single is not None:
+ the_list += [single]
+ return (the_list, comment)
+
+
+ def _multiline(self, value, infile, cur_index, maxline):
+ """Extract the value, where we are in a multiline situation."""
+ quot = value[:3]
+ newvalue = value[3:]
+ single_line = self._triple_quote[quot][0]
+ multi_line = self._triple_quote[quot][1]
+ mat = single_line.match(value)
+ if mat is not None:
+ retval = list(mat.groups())
+ retval.append(cur_index)
+ return retval
+ elif newvalue.find(quot) != -1:
+ # somehow the triple quote is missing
+ raise SyntaxError()
+ #
+ while cur_index < maxline:
+ cur_index += 1
+ newvalue += '\n'
+ line = infile[cur_index]
+ if line.find(quot) == -1:
+ newvalue += line
+ else:
+ # end of multiline, process it
+ break
+ else:
+ # we've got to the end of the config, oops...
+ raise SyntaxError()
+ mat = multi_line.match(line)
+ if mat is None:
+ # a badly formed line
+ raise SyntaxError()
+ (value, comment) = mat.groups()
+ return (newvalue + value, comment, cur_index)
+
+
+ def _handle_configspec(self, configspec):
+ """Parse the configspec."""
+ # FIXME: Should we check that the configspec was created with the
+ # correct settings ? (i.e. ``list_values=False``)
+ if not isinstance(configspec, ConfigObj):
+ try:
+ configspec = ConfigObj(configspec,
+ raise_errors=True,
+ file_error=True,
+ _inspec=True)
+ except ConfigObjError, e:
+ # FIXME: Should these errors have a reference
+ # to the already parsed ConfigObj ?
+ raise ConfigspecError('Parsing configspec failed: %s' % e)
+ except IOError, e:
+ raise IOError('Reading configspec failed: %s' % e)
+
+ self.configspec = configspec
+
+
+
+ def _set_configspec(self, section, copy):
+ """
+ Called by validate. Handles setting the configspec on subsections
+ including sections to be validated by __many__
+ """
+ configspec = section.configspec
+ many = configspec.get('__many__')
+ if isinstance(many, dict):
+ for entry in section.sections:
+ if entry not in configspec:
+ section[entry].configspec = many
+
+ for entry in configspec.sections:
+ if entry == '__many__':
+ continue
+ if entry not in section:
+ section[entry] = {}
+ if copy:
+ # copy comments
+ section.comments[entry] = configspec.comments.get(entry, [])
+ section.inline_comments[entry] = configspec.inline_comments.get(entry, '')
+
+ # Could be a scalar when we expect a section
+ if isinstance(section[entry], Section):
+ section[entry].configspec = configspec[entry]
+
+
+ def _write_line(self, indent_string, entry, this_entry, comment):
+ """Write an individual line, for the write method"""
+ # NOTE: the calls to self._quote here handles non-StringType values.
+ if not self.unrepr:
+ val = self._decode_element(self._quote(this_entry))
+ else:
+ val = repr(this_entry)
+ return '%s%s%s%s%s' % (indent_string,
+ self._decode_element(self._quote(entry, multiline=False)),
+ self._a_to_u(' = '),
+ val,
+ self._decode_element(comment))
+
+
+ def _write_marker(self, indent_string, depth, entry, comment):
+ """Write a section marker line"""
+ return '%s%s%s%s%s' % (indent_string,
+ self._a_to_u('[' * depth),
+ self._quote(self._decode_element(entry), multiline=False),
+ self._a_to_u(']' * depth),
+ self._decode_element(comment))
+
+
+ def _handle_comment(self, comment):
+ """Deal with a comment."""
+ if not comment:
+ return ''
+ start = self.indent_type
+ if not comment.startswith('#'):
+ start += self._a_to_u(' # ')
+ return (start + comment)
+
+
+ # Public methods
+
+ def write(self, outfile=None, section=None):
+ """
+ Write the current ConfigObj as a file
+
+ tekNico: FIXME: use StringIO instead of real files
+
+ >>> filename = a.filename
+ >>> a.filename = 'test.ini'
+ >>> a.write()
+ >>> a.filename = filename
+ >>> a == ConfigObj('test.ini', raise_errors=True)
+ 1
+ """
+ if self.indent_type is None:
+ # this can be true if initialised from a dictionary
+ self.indent_type = DEFAULT_INDENT_TYPE
+
+ out = []
+ cs = self._a_to_u('#')
+ csp = self._a_to_u('# ')
+ if section is None:
+ int_val = self.interpolation
+ self.interpolation = False
+ section = self
+ for line in self.initial_comment:
+ line = self._decode_element(line)
+ stripped_line = line.strip()
+ if stripped_line and not stripped_line.startswith(cs):
+ line = csp + line
+ out.append(line)
+
+ indent_string = self.indent_type * section.depth
+ for entry in (section.scalars + section.sections):
+ if entry in section.defaults:
+ # don't write out default values
+ continue
+ for comment_line in section.comments[entry]:
+ comment_line = self._decode_element(comment_line.lstrip())
+ if comment_line and not comment_line.startswith(cs):
+ comment_line = csp + comment_line
+ out.append(indent_string + comment_line)
+ this_entry = section[entry]
+ comment = self._handle_comment(section.inline_comments[entry])
+
+ if isinstance(this_entry, dict):
+ # a section
+ out.append(self._write_marker(
+ indent_string,
+ this_entry.depth,
+ entry,
+ comment))
+ out.extend(self.write(section=this_entry))
+ else:
+ out.append(self._write_line(
+ indent_string,
+ entry,
+ this_entry,
+ comment))
+
+ if section is self:
+ for line in self.final_comment:
+ line = self._decode_element(line)
+ stripped_line = line.strip()
+ if stripped_line and not stripped_line.startswith(cs):
+ line = csp + line
+ out.append(line)
+ self.interpolation = int_val
+
+ if section is not self:
+ return out
+
+ if (self.filename is None) and (outfile is None):
+ # output a list of lines
+ # might need to encode
+ # NOTE: This will *screw* UTF16, each line will start with the BOM
+ if self.encoding:
+ out = [l.encode(self.encoding) for l in out]
+ if (self.BOM and ((self.encoding is None) or
+ (BOM_LIST.get(self.encoding.lower()) == 'utf_8'))):
+ # Add the UTF8 BOM
+ if not out:
+ out.append('')
+ out[0] = BOM_UTF8 + out[0]
+ return out
+
+ # Turn the list to a string, joined with correct newlines
+ newline = self.newlines or os.linesep
+ output = self._a_to_u(newline).join(out)
+ if self.encoding:
+ output = output.encode(self.encoding)
+ if self.BOM and ((self.encoding is None) or match_utf8(self.encoding)):
+ # Add the UTF8 BOM
+ output = BOM_UTF8 + output
+
+ if not output.endswith(newline):
+ output += newline
+ if outfile is not None:
+ outfile.write(output)
+ else:
+ h = open(self.filename, 'wb')
+ h.write(output)
+ h.close()
+
+
+ def validate(self, validator, preserve_errors=False, copy=False,
+ section=None):
+ """
+ Test the ConfigObj against a configspec.
+
+ It uses the ``validator`` object from *validate.py*.
+
+ To run ``validate`` on the current ConfigObj, call: ::
+
+ test = config.validate(validator)
+
+ (Normally having previously passed in the configspec when the ConfigObj
+ was created - you can dynamically assign a dictionary of checks to the
+ ``configspec`` attribute of a section though).
+
+ It returns ``True`` if everything passes, or a dictionary of
+ pass/fails (True/False). If every member of a subsection passes, it
+ will just have the value ``True``. (It also returns ``False`` if all
+ members fail).
+
+ In addition, it converts the values from strings to their native
+ types if their checks pass (and ``stringify`` is set).
+
+ If ``preserve_errors`` is ``True`` (``False`` is default) then instead
+ of a marking a fail with a ``False``, it will preserve the actual
+ exception object. This can contain info about the reason for failure.
+ For example the ``VdtValueTooSmallError`` indicates that the value
+ supplied was too small. If a value (or section) is missing it will
+ still be marked as ``False``.
+
+ You must have the validate module to use ``preserve_errors=True``.
+
+ You can then use the ``flatten_errors`` function to turn your nested
+ results dictionary into a flattened list of failures - useful for
+ displaying meaningful error messages.
+ """
+ if section is None:
+ if self.configspec is None:
+ raise ValueError('No configspec supplied.')
+ if preserve_errors:
+ # We do this once to remove a top level dependency on the validate module
+ # Which makes importing configobj faster
+ from validate import VdtMissingValue
+ self._vdtMissingValue = VdtMissingValue
+
+ section = self
+
+ if copy:
+ section.initial_comment = section.configspec.initial_comment
+ section.final_comment = section.configspec.final_comment
+ section.encoding = section.configspec.encoding
+ section.BOM = section.configspec.BOM
+ section.newlines = section.configspec.newlines
+ section.indent_type = section.configspec.indent_type
+
+ #
+ configspec = section.configspec
+ self._set_configspec(section, copy)
+
+ def validate_entry(entry, spec, val, missing, ret_true, ret_false):
+ try:
+ check = validator.check(spec,
+ val,
+ missing=missing
+ )
+ except validator.baseErrorClass, e:
+ if not preserve_errors or isinstance(e, self._vdtMissingValue):
+ out[entry] = False
+ else:
+ # preserve the error
+ out[entry] = e
+ ret_false = False
+ ret_true = False
+ else:
+ try:
+ section.default_values.pop(entry, None)
+ except AttributeError:
+ # For Python 2.2 compatibility
+ try:
+ del section.default_values[entry]
+ except KeyError:
+ pass
+
+ try:
+ section.default_values[entry] = validator.get_default_value(configspec[entry])
+ except (KeyError, AttributeError):
+ # No default or validator has no 'get_default_value' (e.g. SimpleVal)
+ pass
+
+ ret_false = False
+ out[entry] = True
+ if self.stringify or missing:
+ # if we are doing type conversion
+ # or the value is a supplied default
+ if not self.stringify:
+ if isinstance(check, (list, tuple)):
+ # preserve lists
+ check = [self._str(item) for item in check]
+ elif missing and check is None:
+ # convert the None from a default to a ''
+ check = ''
+ else:
+ check = self._str(check)
+ if (check != val) or missing:
+ section[entry] = check
+ if not copy and missing and entry not in section.defaults:
+ section.defaults.append(entry)
+ return ret_true, ret_false
+
+ #
+ out = {}
+ ret_true = True
+ ret_false = True
+
+ unvalidated = [k for k in section.scalars if k not in configspec]
+ incorrect_sections = [k for k in configspec.sections if k in section.scalars]
+ incorrect_scalars = [k for k in configspec.scalars if k in section.sections]
+
+ for entry in configspec.scalars:
+ if entry in ('__many__', '___many___'):
+ # reserved names
+ continue
+
+ if (not entry in section.scalars) or (entry in section.defaults):
+ # missing entries
+ # or entries from defaults
+ missing = True
+ val = None
+ if copy and not entry in section.scalars:
+ # copy comments
+ section.comments[entry] = (
+ configspec.comments.get(entry, []))
+ section.inline_comments[entry] = (
+ configspec.inline_comments.get(entry, ''))
+ #
+ else:
+ missing = False
+ val = section[entry]
+
+ ret_true, ret_false = validate_entry(entry, configspec[entry], val,
+ missing, ret_true, ret_false)
+
+ many = None
+ if '__many__' in configspec.scalars:
+ many = configspec['__many__']
+ elif '___many___' in configspec.scalars:
+ many = configspec['___many___']
+
+ if many is not None:
+ for entry in unvalidated:
+ val = section[entry]
+ ret_true, ret_false = validate_entry(entry, many, val, False,
+ ret_true, ret_false)
+
+ for entry in incorrect_scalars:
+ ret_true = False
+ if not preserve_errors:
+ out[entry] = False
+ else:
+ ret_false = False
+ msg = 'Value %r was provided as a section' % entry
+ out[entry] = validator.baseErrorClass(msg)
+ for entry in incorrect_sections:
+ ret_true = False
+ if not preserve_errors:
+ out[entry] = False
+ else:
+ ret_false = False
+ msg = 'Section %r was provided as a single value' % entry
+ out[entry] = validator.baseErrorClass(msg)
+
+ # Missing sections will have been created as empty ones when the
+ # configspec was read.
+ for entry in section.sections:
+ # FIXME: this means DEFAULT is not copied in copy mode
+ if section is self and entry == 'DEFAULT':
+ continue
+ if section[entry].configspec is None:
+ continue
+ if copy:
+ section.comments[entry] = configspec.comments.get(entry, [])
+ section.inline_comments[entry] = configspec.inline_comments.get(entry, '')
+ check = self.validate(validator, preserve_errors=preserve_errors, copy=copy, section=section[entry])
+ out[entry] = check
+ if check == False:
+ ret_true = False
+ elif check == True:
+ ret_false = False
+ else:
+ ret_true = False
+ ret_false = False
+ #
+ if ret_true:
+ return True
+ elif ret_false:
+ return False
+ return out
+
+
+ def reset(self):
+ """Clear ConfigObj instance and restore to 'freshly created' state."""
+ self.clear()
+ self._initialise()
+ # FIXME: Should be done by '_initialise', but ConfigObj constructor (and reload)
+ # requires an empty dictionary
+ self.configspec = None
+ # Just to be sure ;-)
+ self._original_configspec = None
+
+
+ def reload(self):
+ """
+ Reload a ConfigObj from file.
+
+ This method raises a ``ReloadError`` if the ConfigObj doesn't have
+ a filename attribute pointing to a file.
+ """
+ if not isinstance(self.filename, basestring):
+ raise ReloadError()
+
+ filename = self.filename
+ current_options = {}
+ for entry in OPTION_DEFAULTS:
+ if entry == 'configspec':
+ continue
+ current_options[entry] = getattr(self, entry)
+
+ configspec = self._original_configspec
+ current_options['configspec'] = configspec
+
+ self.clear()
+ self._initialise(current_options)
+ self._load(filename, configspec)
+
+
+
+class SimpleVal(object):
+ """
+ A simple validator.
+ Can be used to check that all members expected are present.
+
+ To use it, provide a configspec with all your members in (the value given
+ will be ignored). Pass an instance of ``SimpleVal`` to the ``validate``
+ method of your ``ConfigObj``. ``validate`` will return ``True`` if all
+ members are present, or a dictionary with True/False meaning
+ present/missing. (Whole missing sections will be replaced with ``False``)
+ """
+
+ def __init__(self):
+ self.baseErrorClass = ConfigObjError
+
+ def check(self, check, member, missing=False):
+ """A dummy check method, always returns the value unchanged."""
+ if missing:
+ raise self.baseErrorClass()
+ return member
+
+
+# Check / processing functions for options
+def flatten_errors(cfg, res, levels=None, results=None):
+ """
+ An example function that will turn a nested dictionary of results
+ (as returned by ``ConfigObj.validate``) into a flat list.
+
+ ``cfg`` is the ConfigObj instance being checked, ``res`` is the results
+ dictionary returned by ``validate``.
+
+ (This is a recursive function, so you shouldn't use the ``levels`` or
+ ``results`` arguments - they are used by the function.)
+
+ Returns a list of keys that failed. Each member of the list is a tuple :
+
+ ::
+
+ ([list of sections...], key, result)
+
+ If ``validate`` was called with ``preserve_errors=False`` (the default)
+ then ``result`` will always be ``False``.
+
+ *list of sections* is a flattened list of sections that the key was found
+ in.
+
+ If the section was missing (or a section was expected and a scalar provided
+ - or vice-versa) then key will be ``None``.
+
+ If the value (or section) was missing then ``result`` will be ``False``.
+
+ If ``validate`` was called with ``preserve_errors=True`` and a value
+ was present, but failed the check, then ``result`` will be the exception
+ object returned. You can use this as a string that describes the failure.
+
+ For example *The value "3" is of the wrong type*.
+
+ >>> import validate
+ >>> vtor = validate.Validator()
+ >>> my_ini = '''
+ ... option1 = True
+ ... [section1]
+ ... option1 = True
+ ... [section2]
+ ... another_option = Probably
+ ... [section3]
+ ... another_option = True
+ ... [[section3b]]
+ ... value = 3
+ ... value2 = a
+ ... value3 = 11
+ ... '''
+ >>> my_cfg = '''
+ ... option1 = boolean()
+ ... option2 = boolean()
+ ... option3 = boolean(default=Bad_value)
+ ... [section1]
+ ... option1 = boolean()
+ ... option2 = boolean()
+ ... option3 = boolean(default=Bad_value)
+ ... [section2]
+ ... another_option = boolean()
+ ... [section3]
+ ... another_option = boolean()
+ ... [[section3b]]
+ ... value = integer
+ ... value2 = integer
+ ... value3 = integer(0, 10)
+ ... [[[section3b-sub]]]
+ ... value = string
+ ... [section4]
+ ... another_option = boolean()
+ ... '''
+ >>> cs = my_cfg.split('\\n')
+ >>> ini = my_ini.split('\\n')
+ >>> cfg = ConfigObj(ini, configspec=cs)
+ >>> res = cfg.validate(vtor, preserve_errors=True)
+ >>> errors = []
+ >>> for entry in flatten_errors(cfg, res):
+ ... section_list, key, error = entry
+ ... section_list.insert(0, '[root]')
+ ... if key is not None:
+ ... section_list.append(key)
+ ... else:
+ ... section_list.append('[missing]')
+ ... section_string = ', '.join(section_list)
+ ... errors.append((section_string, ' = ', error))
+ >>> errors.sort()
+ >>> for entry in errors:
+ ... print entry[0], entry[1], (entry[2] or 0)
+ [root], option2 = 0
+ [root], option3 = the value "Bad_value" is of the wrong type.
+ [root], section1, option2 = 0
+ [root], section1, option3 = the value "Bad_value" is of the wrong type.
+ [root], section2, another_option = the value "Probably" is of the wrong type.
+ [root], section3, section3b, section3b-sub, [missing] = 0
+ [root], section3, section3b, value2 = the value "a" is of the wrong type.
+ [root], section3, section3b, value3 = the value "11" is too big.
+ [root], section4, [missing] = 0
+ """
+ if levels is None:
+ # first time called
+ levels = []
+ results = []
+ if res is True:
+ return results
+ if res is False or isinstance(res, Exception):
+ results.append((levels[:], None, res))
+ if levels:
+ levels.pop()
+ return results
+ for (key, val) in res.items():
+ if val == True:
+ continue
+ if isinstance(cfg.get(key), dict):
+ # Go down one level
+ levels.append(key)
+ flatten_errors(cfg[key], val, levels, results)
+ continue
+ results.append((levels[:], key, val))
+ #
+ # Go up one level
+ if levels:
+ levels.pop()
+ #
+ return results
+
+
+"""*A programming language is a medium of expression.* - Paul Graham"""
diff --git a/bzrlib/util/simplemapi.py b/bzrlib/util/simplemapi.py
new file mode 100644
index 0000000..cdd31e7
--- /dev/null
+++ b/bzrlib/util/simplemapi.py
@@ -0,0 +1,259 @@
+"""
+Copyright (c) 2007 Ian Cook and John Popplewell
+
+Permission is hereby granted, free of charge, to any person obtaining
+a copy of this software and associated documentation files (the
+"Software"), to deal in the Software without restriction, including
+without limitation the rights to use, copy, modify, merge, publish,
+distribute, sublicense, and/or sell copies of the Software, and to
+permit persons to whom the Software is furnished to do so, subject to
+the following conditions:
+
+The above copyright notice and this permission notice shall be included
+in all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+
+Date : 11 August 2007
+Version : 1.0.1
+Contact : John Popplewell
+Email : john@johnnypops.demon.co.uk
+Web : http://www.johnnypops.demon.co.uk/python/
+Origin : Based on the original script by Ian Cook
+ http://www.kirbyfooty.com/simplemapi.py
+Comments: Works (and tested) with:
+ Outlook Express, Outlook 97 and 2000,
+ Eudora, Incredimail and Mozilla Thunderbird (1.5.0.2)
+Thanks : Werner F. Bruhin and Michele Petrazzo on the ctypes list.
+
+If you have any bug-fixes, enhancements or suggestions regarding this
+software, please contact me at the above email address.
+"""
+
+from __future__ import absolute_import
+
+import os
+from ctypes import *
+
+FLAGS = c_ulong
+LHANDLE = c_ulong
+LPLHANDLE = POINTER(LHANDLE)
+
+# Return codes
+SUCCESS_SUCCESS = 0
+MAPI_USER_ABORT = 1
+MAPI_E_USER_ABORT = MAPI_USER_ABORT
+MAPI_E_FAILURE = 2
+MAPI_E_LOGON_FAILURE = 3
+MAPI_E_LOGIN_FAILURE = MAPI_E_LOGON_FAILURE
+MAPI_E_DISK_FULL = 4
+MAPI_E_INSUFFICIENT_MEMORY = 5
+MAPI_E_ACCESS_DENIED = 6
+MAPI_E_TOO_MANY_SESSIONS = 8
+MAPI_E_TOO_MANY_FILES = 9
+MAPI_E_TOO_MANY_RECIPIENTS = 10
+MAPI_E_ATTACHMENT_NOT_FOUND = 11
+MAPI_E_ATTACHMENT_OPEN_FAILURE = 12
+MAPI_E_ATTACHMENT_WRITE_FAILURE = 13
+MAPI_E_UNKNOWN_RECIPIENT = 14
+MAPI_E_BAD_RECIPTYPE = 15
+MAPI_E_NO_MESSAGES = 16
+MAPI_E_INVALID_MESSAGE = 17
+MAPI_E_TEXT_TOO_LARGE = 18
+MAPI_E_INVALID_SESSION = 19
+MAPI_E_TYPE_NOT_SUPPORTED = 20
+MAPI_E_AMBIGUOUS_RECIPIENT = 21
+MAPI_E_AMBIG_RECIP = MAPI_E_AMBIGUOUS_RECIPIENT
+MAPI_E_MESSAGE_IN_USE = 22
+MAPI_E_NETWORK_FAILURE = 23
+MAPI_E_INVALID_EDITFIELDS = 24
+MAPI_E_INVALID_RECIPS = 25
+MAPI_E_NOT_SUPPORTED = 26
+# Recipient class
+MAPI_ORIG = 0
+MAPI_TO = 1
+# Send flags
+MAPI_LOGON_UI = 1
+MAPI_DIALOG = 8
+
+class MapiRecipDesc(Structure):
+ _fields_ = [
+ ('ulReserved', c_ulong),
+ ('ulRecipClass', c_ulong),
+ ('lpszName', c_char_p),
+ ('lpszAddress', c_char_p),
+ ('ulEIDSize', c_ulong),
+ ('lpEntryID', c_void_p),
+ ]
+lpMapiRecipDesc = POINTER(MapiRecipDesc)
+lppMapiRecipDesc = POINTER(lpMapiRecipDesc)
+
+class MapiFileDesc(Structure):
+ _fields_ = [
+ ('ulReserved', c_ulong),
+ ('flFlags', c_ulong),
+ ('nPosition', c_ulong),
+ ('lpszPathName', c_char_p),
+ ('lpszFileName', c_char_p),
+ ('lpFileType', c_void_p),
+ ]
+lpMapiFileDesc = POINTER(MapiFileDesc)
+
+class MapiMessage(Structure):
+ _fields_ = [
+ ('ulReserved', c_ulong),
+ ('lpszSubject', c_char_p),
+ ('lpszNoteText', c_char_p),
+ ('lpszMessageType', c_char_p),
+ ('lpszDateReceived', c_char_p),
+ ('lpszConversationID', c_char_p),
+ ('flFlags', FLAGS),
+ ('lpOriginator', lpMapiRecipDesc),
+ ('nRecipCount', c_ulong),
+ ('lpRecips', lpMapiRecipDesc),
+ ('nFileCount', c_ulong),
+ ('lpFiles', lpMapiFileDesc),
+ ]
+lpMapiMessage = POINTER(MapiMessage)
+
+MAPI = windll.mapi32
+MAPISendMail = MAPI.MAPISendMail
+MAPISendMail.restype = c_ulong
+MAPISendMail.argtypes = (LHANDLE, c_ulong, lpMapiMessage, FLAGS, c_ulong)
+
+MAPIResolveName = MAPI.MAPIResolveName
+MAPIResolveName.restype = c_ulong
+MAPIResolveName.argtypes= (LHANDLE, c_ulong, c_char_p, FLAGS, c_ulong, lppMapiRecipDesc)
+
+MAPIFreeBuffer = MAPI.MAPIFreeBuffer
+MAPIFreeBuffer.restype = c_ulong
+MAPIFreeBuffer.argtypes = (c_void_p, )
+
+MAPILogon = MAPI.MAPILogon
+MAPILogon.restype = c_ulong
+MAPILogon.argtypes = (LHANDLE, c_char_p, c_char_p, FLAGS, c_ulong, LPLHANDLE)
+
+MAPILogoff = MAPI.MAPILogoff
+MAPILogoff.restype = c_ulong
+MAPILogoff.argtypes = (LHANDLE, c_ulong, FLAGS, c_ulong)
+
+
+class MAPIError(WindowsError):
+
+ def __init__(self, code):
+ WindowsError.__init__(self)
+ self.code = code
+
+ def __str__(self):
+ return 'MAPI error %d' % (self.code,)
+
+
+def _logon(profileName=None, password=None):
+ pSession = LHANDLE()
+ rc = MAPILogon(0, profileName, password, MAPI_LOGON_UI, 0, byref(pSession))
+ if rc != SUCCESS_SUCCESS:
+ raise MAPIError, rc
+ return pSession
+
+
+def _logoff(session):
+ rc = MAPILogoff(session, 0, 0, 0)
+ if rc != SUCCESS_SUCCESS:
+ raise MAPIError, rc
+
+
+def _resolveName(session, name):
+ pRecipDesc = lpMapiRecipDesc()
+ rc = MAPIResolveName(session, 0, name, 0, 0, byref(pRecipDesc))
+ if rc != SUCCESS_SUCCESS:
+ raise MAPIError, rc
+ rd = pRecipDesc.contents
+ name, address = rd.lpszName, rd.lpszAddress
+ rc = MAPIFreeBuffer(pRecipDesc)
+ if rc != SUCCESS_SUCCESS:
+ raise MAPIError, rc
+ return name, address
+
+
+def _sendMail(session, recipient, subject, body, attach):
+ nFileCount = len(attach)
+ if attach:
+ MapiFileDesc_A = MapiFileDesc * len(attach)
+ fda = MapiFileDesc_A()
+ for fd, fa in zip(fda, attach):
+ fd.ulReserved = 0
+ fd.flFlags = 0
+ fd.nPosition = -1
+ fd.lpszPathName = fa
+ fd.lpszFileName = None
+ fd.lpFileType = None
+ lpFiles = fda
+ else:
+ lpFiles = lpMapiFileDesc()
+
+ RecipWork = recipient.split(';')
+ RecipCnt = len(RecipWork)
+ MapiRecipDesc_A = MapiRecipDesc * len(RecipWork)
+ rda = MapiRecipDesc_A()
+ for rd, ra in zip(rda, RecipWork):
+ rd.ulReserved = 0
+ rd.ulRecipClass = MAPI_TO
+ try:
+ rd.lpszName, rd.lpszAddress = _resolveName(session, ra)
+ except WindowsError:
+ # work-round for Mozilla Thunderbird
+ rd.lpszName, rd.lpszAddress = None, ra
+ rd.ulEIDSize = 0
+ rd.lpEntryID = None
+ recip = rda
+
+ msg = MapiMessage(0, subject, body, None, None, None, 0, lpMapiRecipDesc(),
+ RecipCnt, recip,
+ nFileCount, lpFiles)
+
+ rc = MAPISendMail(session, 0, byref(msg), MAPI_DIALOG, 0)
+ if rc != SUCCESS_SUCCESS:
+ raise MAPIError, rc
+
+
+def SendMail(recipient, subject="", body="", attachfiles=""):
+ """Post an e-mail message using Simple MAPI
+
+ recipient - string: address to send to (multiple addresses separated with a semicolon)
+ subject - string: subject header
+ body - string: message text
+ attach - string: files to attach (multiple attachments separated with a semicolon)
+ """
+
+ attach = []
+ AttachWork = attachfiles.split(';')
+ for file in AttachWork:
+ if os.path.exists(file):
+ attach.append(file)
+ attach = map(os.path.abspath, attach)
+
+ restore = os.getcwd()
+ try:
+ session = _logon()
+ try:
+ _sendMail(session, recipient, subject, body, attach)
+ finally:
+ _logoff(session)
+ finally:
+ os.chdir(restore)
+
+
+if __name__ == '__main__':
+ import sys
+ recipient = "test@johnnypops.demon.co.uk"
+ subject = "Test Message Subject"
+ body = "Hi,\r\n\r\nthis is a quick test message,\r\n\r\ncheers,\r\nJohn."
+ attachment = sys.argv[0]
+ SendMail(recipient, subject, body, attachment)
+
diff --git a/bzrlib/util/tests/__init__.py b/bzrlib/util/tests/__init__.py
new file mode 100644
index 0000000..0ca9514
--- /dev/null
+++ b/bzrlib/util/tests/__init__.py
@@ -0,0 +1 @@
+True
diff --git a/bzrlib/version.py b/bzrlib/version.py
new file mode 100644
index 0000000..6090069
--- /dev/null
+++ b/bzrlib/version.py
@@ -0,0 +1,107 @@
+# Copyright (C) 2006-2010 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""Report on version of bzrlib"""
+
+from __future__ import absolute_import
+
+import os
+import sys
+
+import bzrlib
+from bzrlib import (
+ config,
+ controldir,
+ errors,
+ osutils,
+ trace,
+ )
+
+
+def show_version(show_config=True, show_copyright=True, to_file=None):
+ import platform
+
+ if to_file is None:
+ to_file = sys.stdout
+ to_file.write("Bazaar (bzr) %s\n" % bzrlib.__version__)
+ # is bzrlib itself in a branch?
+ src_tree = _get_bzr_source_tree()
+ if src_tree:
+ src_revision_id = src_tree.last_revision()
+ revno = src_tree.branch.revision_id_to_revno(src_revision_id)
+ to_file.write(" from bzr checkout %s\n" % (src_tree.basedir,))
+ to_file.write(" revision: %s\n" % (revno,))
+ to_file.write(" revid: %s\n" % (src_revision_id,))
+ to_file.write(" branch nick: %s\n" % (src_tree.branch.nick,))
+
+ to_file.write(" Python interpreter: ")
+ # show path to python interpreter
+ # (bzr.exe use python interpreter from pythonXY.dll
+ # but sys.executable point to bzr.exe itself)
+ # however, sys.frozen exists if running from bzr.exe
+ # see http://www.py2exe.org/index.cgi/Py2exeEnvironment
+ if getattr(sys, 'frozen', None) is None: # if not bzr.exe
+ to_file.write(sys.executable + ' ')
+ else:
+ # pythonXY.dll
+ basedir = os.path.dirname(sys.executable)
+ python_dll = "python%d%d.dll" % sys.version_info[:2]
+ to_file.write(os.path.join(basedir, python_dll) + ' ')
+ # and now version of python interpreter
+ to_file.write(bzrlib._format_version_tuple(sys.version_info))
+ to_file.write('\n')
+
+ to_file.write(" Python standard library:" + ' ')
+ to_file.write(os.path.dirname(os.__file__) + '\n')
+ to_file.write(" Platform: %s\n" % platform.platform(aliased=1))
+ to_file.write(" bzrlib: ")
+ if len(bzrlib.__path__) > 1:
+ # print repr, which is a good enough way of making it clear it's
+ # more than one element (eg ['/foo/bar', '/foo/bzr'])
+ to_file.write(repr(bzrlib.__path__) + '\n')
+ else:
+ to_file.write(bzrlib.__path__[0] + '\n')
+ if show_config:
+ config_dir = osutils.normpath(config.config_dir()) # use native slashes
+ if not isinstance(config_dir, unicode):
+ config_dir = config_dir.decode(osutils.get_user_encoding())
+ to_file.write(" Bazaar configuration: %s\n" % (config_dir,))
+ to_file.write(" Bazaar log file: ")
+ to_file.write(trace._bzr_log_filename + '\n')
+ if show_copyright:
+ to_file.write('\n')
+ to_file.write(bzrlib.__copyright__ + '\n')
+ to_file.write("http://bazaar.canonical.com/\n")
+ to_file.write('\n')
+ to_file.write("bzr comes with ABSOLUTELY NO WARRANTY. bzr is free software, and\n")
+ to_file.write("you may use, modify and redistribute it under the terms of the GNU\n")
+ to_file.write("General Public License version 2 or later.\n")
+ to_file.write("\nBazaar is part of the GNU Project to produce a free operating "
+ "system.\n")
+ to_file.write('\n')
+
+
+def _get_bzr_source_tree():
+ """Return the WorkingTree for bzr source, if any.
+
+ If bzr is not being run from its working tree, returns None.
+ """
+ try:
+ control = controldir.ControlDir.open_containing(__file__)[0]
+ return control.open_workingtree(recommend_upgrade=False)
+ except (errors.NotBranchError, errors.UnknownFormatError,
+ errors.NoWorkingTree):
+ return None
diff --git a/bzrlib/version_info_formats/__init__.py b/bzrlib/version_info_formats/__init__.py
new file mode 100644
index 0000000..d1f8653
--- /dev/null
+++ b/bzrlib/version_info_formats/__init__.py
@@ -0,0 +1,207 @@
+# Copyright (C) 2005, 2006 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""Routines for extracting all version information from a bzr branch."""
+
+from __future__ import absolute_import
+
+import time
+
+from bzrlib.osutils import local_time_offset, format_date
+from bzrlib import (
+ registry,
+ revision as _mod_revision,
+ )
+
+
+def create_date_str(timestamp=None, offset=None):
+ """Just a wrapper around format_date to provide the right format.
+
+ We don't want to use '%a' in the time string, because it is locale
+ dependant. We also want to force timezone original, and show_offset
+
+ Without parameters this function yields the current date in the local
+ time zone.
+ """
+ if timestamp is None and offset is None:
+ timestamp = time.time()
+ offset = local_time_offset()
+ return format_date(timestamp, offset, date_fmt='%Y-%m-%d %H:%M:%S',
+ timezone='original', show_offset=True)
+
+
+class VersionInfoBuilder(object):
+ """A class which lets you build up information about a revision."""
+
+ def __init__(self, branch, working_tree=None,
+ check_for_clean=False,
+ include_revision_history=False,
+ include_file_revisions=False,
+ template=None,
+ revision_id=None):
+ """Build up information about the given branch.
+ If working_tree is given, it can be checked for changes.
+
+ :param branch: The branch to work on
+ :param working_tree: If supplied, preferentially check
+ the working tree for changes.
+ :param check_for_clean: If False, we will skip the expense
+ of looking for changes.
+ :param include_revision_history: If True, the output
+ will include the full mainline revision history, including
+ date and message
+ :param include_file_revisions: The output should
+ include the explicit last-changed revision for each file.
+ :param template: Template for the output formatting, not used by
+ all builders.
+ :param revision_id: Revision id to print version for (optional)
+ """
+ self._branch = branch
+ self._check = check_for_clean
+ self._include_history = include_revision_history
+ self._include_file_revs = include_file_revisions
+ self._template = template
+
+ self._clean = None
+ self._file_revisions = {}
+ self._revision_id = revision_id
+
+ if self._revision_id is None:
+ self._tree = working_tree
+ self._working_tree = working_tree
+ else:
+ self._tree = self._branch.repository.revision_tree(self._revision_id)
+ # the working tree is not relevant if an explicit revision was specified
+ self._working_tree = None
+
+ def _extract_file_revisions(self):
+ """Extract the working revisions for all files"""
+
+ # Things seem clean if we never look :)
+ self._clean = True
+
+ if self._working_tree is self._tree:
+ basis_tree = self._working_tree.basis_tree()
+ # TODO: jam 20070215 The working tree should actually be locked at
+ # a higher level, but this will do for now.
+ self._working_tree.lock_read()
+ else:
+ basis_tree = self._branch.repository.revision_tree(self._revision_id)
+
+ basis_tree.lock_read()
+ try:
+ # Build up the list from the basis inventory
+ for info in basis_tree.list_files(include_root=True):
+ self._file_revisions[info[0]] = info[-1].revision
+
+ if not self._check or self._working_tree is not self._tree:
+ return
+
+ delta = self._working_tree.changes_from(basis_tree,
+ include_root=True)
+
+ # Using a 2-pass algorithm for renames. This is because you might have
+ # renamed something out of the way, and then created a new file
+ # in which case we would rather see the new marker
+ # Or you might have removed the target, and then renamed
+ # in which case we would rather see the renamed marker
+ for (old_path, new_path, file_id,
+ kind, text_mod, meta_mod) in delta.renamed:
+ self._clean = False
+ self._file_revisions[old_path] = u'renamed to %s' % (new_path,)
+ for path, file_id, kind in delta.removed:
+ self._clean = False
+ self._file_revisions[path] = 'removed'
+ for path, file_id, kind in delta.added:
+ self._clean = False
+ self._file_revisions[path] = 'new'
+ for (old_path, new_path, file_id,
+ kind, text_mod, meta_mod) in delta.renamed:
+ self._clean = False
+ self._file_revisions[new_path] = u'renamed from %s' % (old_path,)
+ for path, file_id, kind, text_mod, meta_mod in delta.modified:
+ self._clean = False
+ self._file_revisions[path] = 'modified'
+
+ for path in self._working_tree.unknowns():
+ self._clean = False
+ self._file_revisions[path] = 'unversioned'
+ finally:
+ basis_tree.unlock()
+ if self._working_tree is not None:
+ self._working_tree.unlock()
+
+ def _iter_revision_history(self):
+ """Find the messages for all revisions in history."""
+
+ last_rev = self._get_revision_id()
+
+ repository = self._branch.repository
+ repository.lock_read()
+ try:
+ graph = repository.get_graph()
+ revhistory = list(graph.iter_lefthand_ancestry(
+ last_rev, [_mod_revision.NULL_REVISION]))
+ for revision_id in reversed(revhistory):
+ rev = repository.get_revision(revision_id)
+ yield (rev.revision_id, rev.message,
+ rev.timestamp, rev.timezone)
+ finally:
+ repository.unlock()
+
+ def _get_revision_id(self):
+ """Get the revision id we are working on."""
+ if self._revision_id is not None:
+ return self._revision_id
+ if self._working_tree is not None:
+ return self._working_tree.last_revision()
+ return self._branch.last_revision()
+
+ def _get_revno_str(self, revision_id):
+ numbers = self._branch.revision_id_to_dotted_revno(revision_id)
+ revno_str = '.'.join([str(num) for num in numbers])
+ return revno_str
+
+ def generate(self, to_file):
+ """Output the version information to the supplied file.
+
+ :param to_file: The file to write the stream to. The output
+ will already be encoded, so to_file should not try
+ to change encodings.
+ :return: None
+ """
+ raise NotImplementedError(VersionInfoBuilder.generate)
+
+
+format_registry = registry.Registry()
+
+
+format_registry.register_lazy(
+ 'rio',
+ 'bzrlib.version_info_formats.format_rio',
+ 'RioVersionInfoBuilder',
+ 'Version info in RIO (simple text) format (default).')
+format_registry.default_key = 'rio'
+format_registry.register_lazy(
+ 'python',
+ 'bzrlib.version_info_formats.format_python',
+ 'PythonVersionInfoBuilder',
+ 'Version info in Python format.')
+format_registry.register_lazy(
+ 'custom',
+ 'bzrlib.version_info_formats.format_custom',
+ 'CustomVersionInfoBuilder',
+ 'Version info in Custom template-based format.')
diff --git a/bzrlib/version_info_formats/format_custom.py b/bzrlib/version_info_formats/format_custom.py
new file mode 100644
index 0000000..353301d
--- /dev/null
+++ b/bzrlib/version_info_formats/format_custom.py
@@ -0,0 +1,112 @@
+# Copyright (C) 2007 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""A generator which creates a template-based output from the current
+ tree info."""
+
+from __future__ import absolute_import
+
+from bzrlib import errors
+from bzrlib.revision import (
+ NULL_REVISION,
+ )
+from bzrlib.lazy_regex import lazy_compile
+from bzrlib.version_info_formats import (
+ create_date_str,
+ VersionInfoBuilder,
+ )
+
+
+class Template(object):
+ """A simple template engine.
+
+ >>> t = Template()
+ >>> t.add('test', 'xxx')
+ >>> print list(t.process('{test}'))
+ ['xxx']
+ >>> print list(t.process('{test} test'))
+ ['xxx', ' test']
+ >>> print list(t.process('test {test}'))
+ ['test ', 'xxx']
+ >>> print list(t.process('test {test} test'))
+ ['test ', 'xxx', ' test']
+ >>> print list(t.process('{test}\\\\n'))
+ ['xxx', '\\n']
+ >>> print list(t.process('{test}\\n'))
+ ['xxx', '\\n']
+ """
+
+ _tag_re = lazy_compile('{(\w+)}')
+
+ def __init__(self):
+ self._data = {}
+
+ def add(self, name, value):
+ self._data[name] = value
+
+ def process(self, tpl):
+ tpl = tpl.decode('string_escape')
+ pos = 0
+ while True:
+ match = self._tag_re.search(tpl, pos)
+ if not match:
+ if pos < len(tpl):
+ yield tpl[pos:]
+ break
+ start, end = match.span()
+ if start > 0:
+ yield tpl[pos:start]
+ pos = end
+ name = match.group(1)
+ try:
+ data = self._data[name]
+ except KeyError:
+ raise errors.MissingTemplateVariable(name)
+ if not isinstance(data, basestring):
+ data = str(data)
+ yield data
+
+
+class CustomVersionInfoBuilder(VersionInfoBuilder):
+ """Create a version file based on a custom template."""
+
+ def generate(self, to_file):
+ if self._template is None:
+ raise errors.NoTemplate()
+
+ info = Template()
+ info.add('build_date', create_date_str())
+ info.add('branch_nick', self._branch.nick)
+
+ revision_id = self._get_revision_id()
+ if revision_id == NULL_REVISION:
+ info.add('revno', 0)
+ else:
+ info.add('revno', self._get_revno_str(revision_id))
+ info.add('revision_id', revision_id)
+ rev = self._branch.repository.get_revision(revision_id)
+ info.add('date', create_date_str(rev.timestamp, rev.timezone))
+
+ if self._check:
+ self._extract_file_revisions()
+
+ if self._check:
+ if self._clean:
+ info.add('clean', 1)
+ else:
+ info.add('clean', 0)
+
+ to_file.writelines(info.process(self._template))
diff --git a/bzrlib/version_info_formats/format_python.py b/bzrlib/version_info_formats/format_python.py
new file mode 100644
index 0000000..1ca9ab4
--- /dev/null
+++ b/bzrlib/version_info_formats/format_python.py
@@ -0,0 +1,107 @@
+# Copyright (C) 2006, 2009, 2011 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""A generator which creates a python script from the current tree info"""
+
+from __future__ import absolute_import
+
+import pprint
+
+from bzrlib.revision import (
+ NULL_REVISION,
+ )
+from bzrlib.version_info_formats import (
+ create_date_str,
+ VersionInfoBuilder,
+ )
+
+
+# Header and footer for the python format
+_py_version_header = '''#!/usr/bin/env python
+"""This file is automatically generated by generate_version_info
+It uses the current working tree to determine the revision.
+So don't edit it. :)
+"""
+
+'''
+
+
+_py_version_footer = '''
+if __name__ == '__main__':
+ print 'revision: %(revno)s' % version_info
+ print 'nick: %(branch_nick)s' % version_info
+ print 'revision id: %(revision_id)s' % version_info
+'''
+
+
+class PythonVersionInfoBuilder(VersionInfoBuilder):
+ """Create a version file which is a python source module."""
+
+ def generate(self, to_file):
+ info = {'build_date':create_date_str()
+ , 'revno':None
+ , 'revision_id':None
+ , 'branch_nick':self._branch.nick
+ , 'clean':None
+ , 'date':None
+ }
+ revisions = []
+
+ revision_id = self._get_revision_id()
+ if revision_id == NULL_REVISION:
+ info['revno'] = '0'
+ else:
+ info['revno'] = self._get_revno_str(revision_id)
+ info['revision_id'] = revision_id
+ rev = self._branch.repository.get_revision(revision_id)
+ info['date'] = create_date_str(rev.timestamp, rev.timezone)
+
+ if self._check or self._include_file_revs:
+ self._extract_file_revisions()
+
+ if self._check:
+ if self._clean:
+ info['clean'] = True
+ else:
+ info['clean'] = False
+
+ info_str = pprint.pformat(info)
+ to_file.write(_py_version_header)
+ to_file.write('version_info = ')
+ to_file.write(info_str)
+ to_file.write('\n\n')
+
+ if self._include_history:
+ history = list(self._iter_revision_history())
+ revision_str = pprint.pformat(history)
+ to_file.write('revisions = ')
+ to_file.write(revision_str)
+ to_file.write('\n\n')
+ else:
+ to_file.write('revisions = {}\n\n')
+
+ if self._include_file_revs:
+ file_rev_str = pprint.pformat(self._file_revisions)
+ to_file.write('file_revisions = ')
+ to_file.write(file_rev_str)
+ to_file.write('\n\n')
+ else:
+ to_file.write('file_revisions = {}\n\n')
+
+ to_file.write(_py_version_footer)
+
+
+
diff --git a/bzrlib/version_info_formats/format_rio.py b/bzrlib/version_info_formats/format_rio.py
new file mode 100644
index 0000000..c42580a
--- /dev/null
+++ b/bzrlib/version_info_formats/format_rio.py
@@ -0,0 +1,96 @@
+# Copyright (C) 2006, 2009, 2010 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""A generator which creates a rio stanza of the current tree info"""
+
+from __future__ import absolute_import
+
+from bzrlib import hooks
+from bzrlib.revision import (
+ NULL_REVISION,
+ )
+from bzrlib.rio import RioWriter, Stanza
+
+from bzrlib.version_info_formats import (
+ create_date_str,
+ VersionInfoBuilder,
+ )
+
+
+class RioVersionInfoBuilder(VersionInfoBuilder):
+ """This writes a rio stream out."""
+
+ def generate(self, to_file):
+ info = Stanza()
+ revision_id = self._get_revision_id()
+ if revision_id != NULL_REVISION:
+ info.add('revision-id', revision_id)
+ rev = self._branch.repository.get_revision(revision_id)
+ info.add('date', create_date_str(rev.timestamp, rev.timezone))
+ revno = self._get_revno_str(revision_id)
+ for hook in RioVersionInfoBuilder.hooks['revision']:
+ hook(rev, info)
+ else:
+ revno = '0'
+
+ info.add('build-date', create_date_str())
+ info.add('revno', revno)
+
+ if self._branch.nick is not None:
+ info.add('branch-nick', self._branch.nick)
+
+ if self._check or self._include_file_revs:
+ self._extract_file_revisions()
+
+ if self._check:
+ if self._clean:
+ info.add('clean', 'True')
+ else:
+ info.add('clean', 'False')
+
+ if self._include_history:
+ log = Stanza()
+ for (revision_id, message,
+ timestamp, timezone) in self._iter_revision_history():
+ log.add('id', revision_id)
+ log.add('message', message)
+ log.add('date', create_date_str(timestamp, timezone))
+ info.add('revisions', log.to_unicode())
+
+ if self._include_file_revs:
+ files = Stanza()
+ for path in sorted(self._file_revisions.keys()):
+ files.add('path', path)
+ files.add('revision', self._file_revisions[path])
+ info.add('file-revisions', files.to_unicode())
+
+ writer = RioWriter(to_file=to_file)
+ writer.write_stanza(info)
+
+
+class RioVersionInfoBuilderHooks(hooks.Hooks):
+ """Hooks for rio-formatted version-info output."""
+
+ def __init__(self):
+ super(RioVersionInfoBuilderHooks, self).__init__(
+ "bzrlib.version_info_formats.format_rio", "RioVersionInfoBuilder.hooks")
+ self.add_hook('revision',
+ "Invoked when adding information about a revision to the"
+ " RIO stanza that is printed. revision is called with a"
+ " revision object and a RIO stanza.", (1, 15))
+
+
+RioVersionInfoBuilder.hooks = RioVersionInfoBuilderHooks()
diff --git a/bzrlib/versionedfile.py b/bzrlib/versionedfile.py
new file mode 100644
index 0000000..84d91bb
--- /dev/null
+++ b/bzrlib/versionedfile.py
@@ -0,0 +1,1963 @@
+# Copyright (C) 2006-2011 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""Versioned text file storage api."""
+
+from __future__ import absolute_import
+
+from copy import copy
+from cStringIO import StringIO
+import os
+import struct
+from zlib import adler32
+
+from bzrlib.lazy_import import lazy_import
+lazy_import(globals(), """
+from bzrlib import (
+ annotate,
+ bencode,
+ errors,
+ graph as _mod_graph,
+ groupcompress,
+ index,
+ knit,
+ osutils,
+ multiparent,
+ tsort,
+ revision,
+ urlutils,
+ )
+""")
+from bzrlib.registry import Registry
+from bzrlib.textmerge import TextMerge
+
+
+adapter_registry = Registry()
+adapter_registry.register_lazy(('knit-delta-gz', 'fulltext'), 'bzrlib.knit',
+ 'DeltaPlainToFullText')
+adapter_registry.register_lazy(('knit-ft-gz', 'fulltext'), 'bzrlib.knit',
+ 'FTPlainToFullText')
+adapter_registry.register_lazy(('knit-annotated-delta-gz', 'knit-delta-gz'),
+ 'bzrlib.knit', 'DeltaAnnotatedToUnannotated')
+adapter_registry.register_lazy(('knit-annotated-delta-gz', 'fulltext'),
+ 'bzrlib.knit', 'DeltaAnnotatedToFullText')
+adapter_registry.register_lazy(('knit-annotated-ft-gz', 'knit-ft-gz'),
+ 'bzrlib.knit', 'FTAnnotatedToUnannotated')
+adapter_registry.register_lazy(('knit-annotated-ft-gz', 'fulltext'),
+ 'bzrlib.knit', 'FTAnnotatedToFullText')
+# adapter_registry.register_lazy(('knit-annotated-ft-gz', 'chunked'),
+# 'bzrlib.knit', 'FTAnnotatedToChunked')
+
+
+class ContentFactory(object):
+ """Abstract interface for insertion and retrieval from a VersionedFile.
+
+ :ivar sha1: None, or the sha1 of the content fulltext.
+ :ivar storage_kind: The native storage kind of this factory. One of
+ 'mpdiff', 'knit-annotated-ft', 'knit-annotated-delta', 'knit-ft',
+ 'knit-delta', 'fulltext', 'knit-annotated-ft-gz',
+ 'knit-annotated-delta-gz', 'knit-ft-gz', 'knit-delta-gz'.
+ :ivar key: The key of this content. Each key is a tuple with a single
+ string in it.
+ :ivar parents: A tuple of parent keys for self.key. If the object has
+ no parent information, None (as opposed to () for an empty list of
+ parents).
+ """
+
+ def __init__(self):
+ """Create a ContentFactory."""
+ self.sha1 = None
+ self.storage_kind = None
+ self.key = None
+ self.parents = None
+
+
+class ChunkedContentFactory(ContentFactory):
+ """Static data content factory.
+
+ This takes a 'chunked' list of strings. The only requirement on 'chunked' is
+ that ''.join(lines) becomes a valid fulltext. A tuple of a single string
+ satisfies this, as does a list of lines.
+
+ :ivar sha1: None, or the sha1 of the content fulltext.
+ :ivar storage_kind: The native storage kind of this factory. Always
+ 'chunked'
+ :ivar key: The key of this content. Each key is a tuple with a single
+ string in it.
+ :ivar parents: A tuple of parent keys for self.key. If the object has
+ no parent information, None (as opposed to () for an empty list of
+ parents).
+ """
+
+ def __init__(self, key, parents, sha1, chunks):
+ """Create a ContentFactory."""
+ self.sha1 = sha1
+ self.storage_kind = 'chunked'
+ self.key = key
+ self.parents = parents
+ self._chunks = chunks
+
+ def get_bytes_as(self, storage_kind):
+ if storage_kind == 'chunked':
+ return self._chunks
+ elif storage_kind == 'fulltext':
+ return ''.join(self._chunks)
+ raise errors.UnavailableRepresentation(self.key, storage_kind,
+ self.storage_kind)
+
+
+class FulltextContentFactory(ContentFactory):
+ """Static data content factory.
+
+ This takes a fulltext when created and just returns that during
+ get_bytes_as('fulltext').
+
+ :ivar sha1: None, or the sha1 of the content fulltext.
+ :ivar storage_kind: The native storage kind of this factory. Always
+ 'fulltext'.
+ :ivar key: The key of this content. Each key is a tuple with a single
+ string in it.
+ :ivar parents: A tuple of parent keys for self.key. If the object has
+ no parent information, None (as opposed to () for an empty list of
+ parents).
+ """
+
+ def __init__(self, key, parents, sha1, text):
+ """Create a ContentFactory."""
+ self.sha1 = sha1
+ self.storage_kind = 'fulltext'
+ self.key = key
+ self.parents = parents
+ self._text = text
+
+ def get_bytes_as(self, storage_kind):
+ if storage_kind == self.storage_kind:
+ return self._text
+ elif storage_kind == 'chunked':
+ return [self._text]
+ raise errors.UnavailableRepresentation(self.key, storage_kind,
+ self.storage_kind)
+
+
+class AbsentContentFactory(ContentFactory):
+ """A placeholder content factory for unavailable texts.
+
+ :ivar sha1: None.
+ :ivar storage_kind: 'absent'.
+ :ivar key: The key of this content. Each key is a tuple with a single
+ string in it.
+ :ivar parents: None.
+ """
+
+ def __init__(self, key):
+ """Create a ContentFactory."""
+ self.sha1 = None
+ self.storage_kind = 'absent'
+ self.key = key
+ self.parents = None
+
+ def get_bytes_as(self, storage_kind):
+ raise ValueError('A request was made for key: %s, but that'
+ ' content is not available, and the calling'
+ ' code does not handle if it is missing.'
+ % (self.key,))
+
+
+class AdapterFactory(ContentFactory):
+ """A content factory to adapt between key prefix's."""
+
+ def __init__(self, key, parents, adapted):
+ """Create an adapter factory instance."""
+ self.key = key
+ self.parents = parents
+ self._adapted = adapted
+
+ def __getattr__(self, attr):
+ """Return a member from the adapted object."""
+ if attr in ('key', 'parents'):
+ return self.__dict__[attr]
+ else:
+ return getattr(self._adapted, attr)
+
+
+def filter_absent(record_stream):
+ """Adapt a record stream to remove absent records."""
+ for record in record_stream:
+ if record.storage_kind != 'absent':
+ yield record
+
+
+class _MPDiffGenerator(object):
+ """Pull out the functionality for generating mp_diffs."""
+
+ def __init__(self, vf, keys):
+ self.vf = vf
+ # This is the order the keys were requested in
+ self.ordered_keys = tuple(keys)
+ # keys + their parents, what we need to compute the diffs
+ self.needed_keys = ()
+ # Map from key: mp_diff
+ self.diffs = {}
+ # Map from key: parents_needed (may have ghosts)
+ self.parent_map = {}
+ # Parents that aren't present
+ self.ghost_parents = ()
+ # Map from parent_key => number of children for this text
+ self.refcounts = {}
+ # Content chunks that are cached while we still need them
+ self.chunks = {}
+
+ def _find_needed_keys(self):
+ """Find the set of keys we need to request.
+
+ This includes all the original keys passed in, and the non-ghost
+ parents of those keys.
+
+ :return: (needed_keys, refcounts)
+ needed_keys is the set of all texts we need to extract
+ refcounts is a dict of {key: num_children} letting us know when we
+ no longer need to cache a given parent text
+ """
+ # All the keys and their parents
+ needed_keys = set(self.ordered_keys)
+ parent_map = self.vf.get_parent_map(needed_keys)
+ self.parent_map = parent_map
+ # TODO: Should we be using a different construct here? I think this
+ # uses difference_update internally, and we expect the result to
+ # be tiny
+ missing_keys = needed_keys.difference(parent_map)
+ if missing_keys:
+ raise errors.RevisionNotPresent(list(missing_keys)[0], self.vf)
+ # Parents that might be missing. They are allowed to be ghosts, but we
+ # should check for them
+ refcounts = {}
+ setdefault = refcounts.setdefault
+ just_parents = set()
+ for child_key, parent_keys in parent_map.iteritems():
+ if not parent_keys:
+ # parent_keys may be None if a given VersionedFile claims to
+ # not support graph operations.
+ continue
+ just_parents.update(parent_keys)
+ needed_keys.update(parent_keys)
+ for p in parent_keys:
+ refcounts[p] = setdefault(p, 0) + 1
+ just_parents.difference_update(parent_map)
+ # Remove any parents that are actually ghosts from the needed set
+ self.present_parents = set(self.vf.get_parent_map(just_parents))
+ self.ghost_parents = just_parents.difference(self.present_parents)
+ needed_keys.difference_update(self.ghost_parents)
+ self.needed_keys = needed_keys
+ self.refcounts = refcounts
+ return needed_keys, refcounts
+
+ def _compute_diff(self, key, parent_lines, lines):
+ """Compute a single mp_diff, and store it in self._diffs"""
+ if len(parent_lines) > 0:
+ # XXX: _extract_blocks is not usefully defined anywhere...
+ # It was meant to extract the left-parent diff without
+ # having to recompute it for Knit content (pack-0.92,
+ # etc). That seems to have regressed somewhere
+ left_parent_blocks = self.vf._extract_blocks(key,
+ parent_lines[0], lines)
+ else:
+ left_parent_blocks = None
+ diff = multiparent.MultiParent.from_lines(lines,
+ parent_lines, left_parent_blocks)
+ self.diffs[key] = diff
+
+ def _process_one_record(self, key, this_chunks):
+ parent_keys = None
+ if key in self.parent_map:
+ # This record should be ready to diff, since we requested
+ # content in 'topological' order
+ parent_keys = self.parent_map.pop(key)
+ # If a VersionedFile claims 'no-graph' support, then it may return
+ # None for any parent request, so we replace it with an empty tuple
+ if parent_keys is None:
+ parent_keys = ()
+ parent_lines = []
+ for p in parent_keys:
+ # Alternatively we could check p not in self.needed_keys, but
+ # ghost_parents should be tiny versus huge
+ if p in self.ghost_parents:
+ continue
+ refcount = self.refcounts[p]
+ if refcount == 1: # Last child reference
+ self.refcounts.pop(p)
+ parent_chunks = self.chunks.pop(p)
+ else:
+ self.refcounts[p] = refcount - 1
+ parent_chunks = self.chunks[p]
+ p_lines = osutils.chunks_to_lines(parent_chunks)
+ # TODO: Should we cache the line form? We did the
+ # computation to get it, but storing it this way will
+ # be less memory efficient...
+ parent_lines.append(p_lines)
+ del p_lines
+ lines = osutils.chunks_to_lines(this_chunks)
+ # Since we needed the lines, we'll go ahead and cache them this way
+ this_chunks = lines
+ self._compute_diff(key, parent_lines, lines)
+ del lines
+ # Is this content required for any more children?
+ if key in self.refcounts:
+ self.chunks[key] = this_chunks
+
+ def _extract_diffs(self):
+ needed_keys, refcounts = self._find_needed_keys()
+ for record in self.vf.get_record_stream(needed_keys,
+ 'topological', True):
+ if record.storage_kind == 'absent':
+ raise errors.RevisionNotPresent(record.key, self.vf)
+ self._process_one_record(record.key,
+ record.get_bytes_as('chunked'))
+
+ def compute_diffs(self):
+ self._extract_diffs()
+ dpop = self.diffs.pop
+ return [dpop(k) for k in self.ordered_keys]
+
+
+class VersionedFile(object):
+ """Versioned text file storage.
+
+ A versioned file manages versions of line-based text files,
+ keeping track of the originating version for each line.
+
+ To clients the "lines" of the file are represented as a list of
+ strings. These strings will typically have terminal newline
+ characters, but this is not required. In particular files commonly
+ do not have a newline at the end of the file.
+
+ Texts are identified by a version-id string.
+ """
+
+ @staticmethod
+ def check_not_reserved_id(version_id):
+ revision.check_not_reserved_id(version_id)
+
+ def copy_to(self, name, transport):
+ """Copy this versioned file to name on transport."""
+ raise NotImplementedError(self.copy_to)
+
+ def get_record_stream(self, versions, ordering, include_delta_closure):
+ """Get a stream of records for versions.
+
+ :param versions: The versions to include. Each version is a tuple
+ (version,).
+ :param ordering: Either 'unordered' or 'topological'. A topologically
+ sorted stream has compression parents strictly before their
+ children.
+ :param include_delta_closure: If True then the closure across any
+ compression parents will be included (in the data content of the
+ stream, not in the emitted records). This guarantees that
+ 'fulltext' can be used successfully on every record.
+ :return: An iterator of ContentFactory objects, each of which is only
+ valid until the iterator is advanced.
+ """
+ raise NotImplementedError(self.get_record_stream)
+
+ def has_version(self, version_id):
+ """Returns whether version is present."""
+ raise NotImplementedError(self.has_version)
+
+ def insert_record_stream(self, stream):
+ """Insert a record stream into this versioned file.
+
+ :param stream: A stream of records to insert.
+ :return: None
+ :seealso VersionedFile.get_record_stream:
+ """
+ raise NotImplementedError
+
+ def add_lines(self, version_id, parents, lines, parent_texts=None,
+ left_matching_blocks=None, nostore_sha=None, random_id=False,
+ check_content=True):
+ """Add a single text on top of the versioned file.
+
+ Must raise RevisionAlreadyPresent if the new version is
+ already present in file history.
+
+ Must raise RevisionNotPresent if any of the given parents are
+ not present in file history.
+
+ :param lines: A list of lines. Each line must be a bytestring. And all
+ of them except the last must be terminated with \n and contain no
+ other \n's. The last line may either contain no \n's or a single
+ terminated \n. If the lines list does meet this constraint the add
+ routine may error or may succeed - but you will be unable to read
+ the data back accurately. (Checking the lines have been split
+ correctly is expensive and extremely unlikely to catch bugs so it
+ is not done at runtime unless check_content is True.)
+ :param parent_texts: An optional dictionary containing the opaque
+ representations of some or all of the parents of version_id to
+ allow delta optimisations. VERY IMPORTANT: the texts must be those
+ returned by add_lines or data corruption can be caused.
+ :param left_matching_blocks: a hint about which areas are common
+ between the text and its left-hand-parent. The format is
+ the SequenceMatcher.get_matching_blocks format.
+ :param nostore_sha: Raise ExistingContent and do not add the lines to
+ the versioned file if the digest of the lines matches this.
+ :param random_id: If True a random id has been selected rather than
+ an id determined by some deterministic process such as a converter
+ from a foreign VCS. When True the backend may choose not to check
+ for uniqueness of the resulting key within the versioned file, so
+ this should only be done when the result is expected to be unique
+ anyway.
+ :param check_content: If True, the lines supplied are verified to be
+ bytestrings that are correctly formed lines.
+ :return: The text sha1, the number of bytes in the text, and an opaque
+ representation of the inserted version which can be provided
+ back to future add_lines calls in the parent_texts dictionary.
+ """
+ self._check_write_ok()
+ return self._add_lines(version_id, parents, lines, parent_texts,
+ left_matching_blocks, nostore_sha, random_id, check_content)
+
+ def _add_lines(self, version_id, parents, lines, parent_texts,
+ left_matching_blocks, nostore_sha, random_id, check_content):
+ """Helper to do the class specific add_lines."""
+ raise NotImplementedError(self.add_lines)
+
+ def add_lines_with_ghosts(self, version_id, parents, lines,
+ parent_texts=None, nostore_sha=None, random_id=False,
+ check_content=True, left_matching_blocks=None):
+ """Add lines to the versioned file, allowing ghosts to be present.
+
+ This takes the same parameters as add_lines and returns the same.
+ """
+ self._check_write_ok()
+ return self._add_lines_with_ghosts(version_id, parents, lines,
+ parent_texts, nostore_sha, random_id, check_content, left_matching_blocks)
+
+ def _add_lines_with_ghosts(self, version_id, parents, lines, parent_texts,
+ nostore_sha, random_id, check_content, left_matching_blocks):
+ """Helper to do class specific add_lines_with_ghosts."""
+ raise NotImplementedError(self.add_lines_with_ghosts)
+
+ def check(self, progress_bar=None):
+ """Check the versioned file for integrity."""
+ raise NotImplementedError(self.check)
+
+ def _check_lines_not_unicode(self, lines):
+ """Check that lines being added to a versioned file are not unicode."""
+ for line in lines:
+ if line.__class__ is not str:
+ raise errors.BzrBadParameterUnicode("lines")
+
+ def _check_lines_are_lines(self, lines):
+ """Check that the lines really are full lines without inline EOL."""
+ for line in lines:
+ if '\n' in line[:-1]:
+ raise errors.BzrBadParameterContainsNewline("lines")
+
+ def get_format_signature(self):
+ """Get a text description of the data encoding in this file.
+
+ :since: 0.90
+ """
+ raise NotImplementedError(self.get_format_signature)
+
+ def make_mpdiffs(self, version_ids):
+ """Create multiparent diffs for specified versions."""
+ # XXX: Can't use _MPDiffGenerator just yet. This is because version_ids
+ # is a list of strings, not keys. And while self.get_record_stream
+ # is supported, it takes *keys*, while self.get_parent_map() takes
+ # strings... *sigh*
+ knit_versions = set()
+ knit_versions.update(version_ids)
+ parent_map = self.get_parent_map(version_ids)
+ for version_id in version_ids:
+ try:
+ knit_versions.update(parent_map[version_id])
+ except KeyError:
+ raise errors.RevisionNotPresent(version_id, self)
+ # We need to filter out ghosts, because we can't diff against them.
+ knit_versions = set(self.get_parent_map(knit_versions).keys())
+ lines = dict(zip(knit_versions,
+ self._get_lf_split_line_list(knit_versions)))
+ diffs = []
+ for version_id in version_ids:
+ target = lines[version_id]
+ try:
+ parents = [lines[p] for p in parent_map[version_id] if p in
+ knit_versions]
+ except KeyError:
+ # I don't know how this could ever trigger.
+ # parent_map[version_id] was already triggered in the previous
+ # for loop, and lines[p] has the 'if p in knit_versions' check,
+ # so we again won't have a KeyError.
+ raise errors.RevisionNotPresent(version_id, self)
+ if len(parents) > 0:
+ left_parent_blocks = self._extract_blocks(version_id,
+ parents[0], target)
+ else:
+ left_parent_blocks = None
+ diffs.append(multiparent.MultiParent.from_lines(target, parents,
+ left_parent_blocks))
+ return diffs
+
+ def _extract_blocks(self, version_id, source, target):
+ return None
+
+ def add_mpdiffs(self, records):
+ """Add mpdiffs to this VersionedFile.
+
+ Records should be iterables of version, parents, expected_sha1,
+ mpdiff. mpdiff should be a MultiParent instance.
+ """
+ # Does this need to call self._check_write_ok()? (IanC 20070919)
+ vf_parents = {}
+ mpvf = multiparent.MultiMemoryVersionedFile()
+ versions = []
+ for version, parent_ids, expected_sha1, mpdiff in records:
+ versions.append(version)
+ mpvf.add_diff(mpdiff, version, parent_ids)
+ needed_parents = set()
+ for version, parent_ids, expected_sha1, mpdiff in records:
+ needed_parents.update(p for p in parent_ids
+ if not mpvf.has_version(p))
+ present_parents = set(self.get_parent_map(needed_parents).keys())
+ for parent_id, lines in zip(present_parents,
+ self._get_lf_split_line_list(present_parents)):
+ mpvf.add_version(lines, parent_id, [])
+ for (version, parent_ids, expected_sha1, mpdiff), lines in\
+ zip(records, mpvf.get_line_list(versions)):
+ if len(parent_ids) == 1:
+ left_matching_blocks = list(mpdiff.get_matching_blocks(0,
+ mpvf.get_diff(parent_ids[0]).num_lines()))
+ else:
+ left_matching_blocks = None
+ try:
+ _, _, version_text = self.add_lines_with_ghosts(version,
+ parent_ids, lines, vf_parents,
+ left_matching_blocks=left_matching_blocks)
+ except NotImplementedError:
+ # The vf can't handle ghosts, so add lines normally, which will
+ # (reasonably) fail if there are ghosts in the data.
+ _, _, version_text = self.add_lines(version,
+ parent_ids, lines, vf_parents,
+ left_matching_blocks=left_matching_blocks)
+ vf_parents[version] = version_text
+ sha1s = self.get_sha1s(versions)
+ for version, parent_ids, expected_sha1, mpdiff in records:
+ if expected_sha1 != sha1s[version]:
+ raise errors.VersionedFileInvalidChecksum(version)
+
+ def get_text(self, version_id):
+ """Return version contents as a text string.
+
+ Raises RevisionNotPresent if version is not present in
+ file history.
+ """
+ return ''.join(self.get_lines(version_id))
+ get_string = get_text
+
+ def get_texts(self, version_ids):
+ """Return the texts of listed versions as a list of strings.
+
+ Raises RevisionNotPresent if version is not present in
+ file history.
+ """
+ return [''.join(self.get_lines(v)) for v in version_ids]
+
+ def get_lines(self, version_id):
+ """Return version contents as a sequence of lines.
+
+ Raises RevisionNotPresent if version is not present in
+ file history.
+ """
+ raise NotImplementedError(self.get_lines)
+
+ def _get_lf_split_line_list(self, version_ids):
+ return [StringIO(t).readlines() for t in self.get_texts(version_ids)]
+
+ def get_ancestry(self, version_ids, topo_sorted=True):
+ """Return a list of all ancestors of given version(s). This
+ will not include the null revision.
+
+ This list will not be topologically sorted if topo_sorted=False is
+ passed.
+
+ Must raise RevisionNotPresent if any of the given versions are
+ not present in file history."""
+ if isinstance(version_ids, basestring):
+ version_ids = [version_ids]
+ raise NotImplementedError(self.get_ancestry)
+
+ def get_ancestry_with_ghosts(self, version_ids):
+ """Return a list of all ancestors of given version(s). This
+ will not include the null revision.
+
+ Must raise RevisionNotPresent if any of the given versions are
+ not present in file history.
+
+ Ghosts that are known about will be included in ancestry list,
+ but are not explicitly marked.
+ """
+ raise NotImplementedError(self.get_ancestry_with_ghosts)
+
+ def get_parent_map(self, version_ids):
+ """Get a map of the parents of version_ids.
+
+ :param version_ids: The version ids to look up parents for.
+ :return: A mapping from version id to parents.
+ """
+ raise NotImplementedError(self.get_parent_map)
+
+ def get_parents_with_ghosts(self, version_id):
+ """Return version names for parents of version_id.
+
+ Will raise RevisionNotPresent if version_id is not present
+ in the history.
+
+ Ghosts that are known about will be included in the parent list,
+ but are not explicitly marked.
+ """
+ try:
+ return list(self.get_parent_map([version_id])[version_id])
+ except KeyError:
+ raise errors.RevisionNotPresent(version_id, self)
+
+ def annotate(self, version_id):
+ """Return a list of (version-id, line) tuples for version_id.
+
+ :raise RevisionNotPresent: If the given version is
+ not present in file history.
+ """
+ raise NotImplementedError(self.annotate)
+
+ def iter_lines_added_or_present_in_versions(self, version_ids=None,
+ pb=None):
+ """Iterate over the lines in the versioned file from version_ids.
+
+ This may return lines from other versions. Each item the returned
+ iterator yields is a tuple of a line and a text version that that line
+ is present in (not introduced in).
+
+ Ordering of results is in whatever order is most suitable for the
+ underlying storage format.
+
+ If a progress bar is supplied, it may be used to indicate progress.
+ The caller is responsible for cleaning up progress bars (because this
+ is an iterator).
+
+ NOTES: Lines are normalised: they will all have \n terminators.
+ Lines are returned in arbitrary order.
+
+ :return: An iterator over (line, version_id).
+ """
+ raise NotImplementedError(self.iter_lines_added_or_present_in_versions)
+
+ def plan_merge(self, ver_a, ver_b):
+ """Return pseudo-annotation indicating how the two versions merge.
+
+ This is computed between versions a and b and their common
+ base.
+
+ Weave lines present in none of them are skipped entirely.
+
+ Legend:
+ killed-base Dead in base revision
+ killed-both Killed in each revision
+ killed-a Killed in a
+ killed-b Killed in b
+ unchanged Alive in both a and b (possibly created in both)
+ new-a Created in a
+ new-b Created in b
+ ghost-a Killed in a, unborn in b
+ ghost-b Killed in b, unborn in a
+ irrelevant Not in either revision
+ """
+ raise NotImplementedError(VersionedFile.plan_merge)
+
+ def weave_merge(self, plan, a_marker=TextMerge.A_MARKER,
+ b_marker=TextMerge.B_MARKER):
+ return PlanWeaveMerge(plan, a_marker, b_marker).merge_lines()[0]
+
+
+class RecordingVersionedFilesDecorator(object):
+ """A minimal versioned files that records calls made on it.
+
+ Only enough methods have been added to support tests using it to date.
+
+ :ivar calls: A list of the calls made; can be reset at any time by
+ assigning [] to it.
+ """
+
+ def __init__(self, backing_vf):
+ """Create a RecordingVersionedFilesDecorator decorating backing_vf.
+
+ :param backing_vf: The versioned file to answer all methods.
+ """
+ self._backing_vf = backing_vf
+ self.calls = []
+
+ def add_lines(self, key, parents, lines, parent_texts=None,
+ left_matching_blocks=None, nostore_sha=None, random_id=False,
+ check_content=True):
+ self.calls.append(("add_lines", key, parents, lines, parent_texts,
+ left_matching_blocks, nostore_sha, random_id, check_content))
+ return self._backing_vf.add_lines(key, parents, lines, parent_texts,
+ left_matching_blocks, nostore_sha, random_id, check_content)
+
+ def check(self):
+ self._backing_vf.check()
+
+ def get_parent_map(self, keys):
+ self.calls.append(("get_parent_map", copy(keys)))
+ return self._backing_vf.get_parent_map(keys)
+
+ def get_record_stream(self, keys, sort_order, include_delta_closure):
+ self.calls.append(("get_record_stream", list(keys), sort_order,
+ include_delta_closure))
+ return self._backing_vf.get_record_stream(keys, sort_order,
+ include_delta_closure)
+
+ def get_sha1s(self, keys):
+ self.calls.append(("get_sha1s", copy(keys)))
+ return self._backing_vf.get_sha1s(keys)
+
+ def iter_lines_added_or_present_in_keys(self, keys, pb=None):
+ self.calls.append(("iter_lines_added_or_present_in_keys", copy(keys)))
+ return self._backing_vf.iter_lines_added_or_present_in_keys(keys, pb=pb)
+
+ def keys(self):
+ self.calls.append(("keys",))
+ return self._backing_vf.keys()
+
+
+class OrderingVersionedFilesDecorator(RecordingVersionedFilesDecorator):
+ """A VF that records calls, and returns keys in specific order.
+
+ :ivar calls: A list of the calls made; can be reset at any time by
+ assigning [] to it.
+ """
+
+ def __init__(self, backing_vf, key_priority):
+ """Create a RecordingVersionedFilesDecorator decorating backing_vf.
+
+ :param backing_vf: The versioned file to answer all methods.
+ :param key_priority: A dictionary defining what order keys should be
+ returned from an 'unordered' get_record_stream request.
+ Keys with lower priority are returned first, keys not present in
+ the map get an implicit priority of 0, and are returned in
+ lexicographical order.
+ """
+ RecordingVersionedFilesDecorator.__init__(self, backing_vf)
+ self._key_priority = key_priority
+
+ def get_record_stream(self, keys, sort_order, include_delta_closure):
+ self.calls.append(("get_record_stream", list(keys), sort_order,
+ include_delta_closure))
+ if sort_order == 'unordered':
+ def sort_key(key):
+ return (self._key_priority.get(key, 0), key)
+ # Use a defined order by asking for the keys one-by-one from the
+ # backing_vf
+ for key in sorted(keys, key=sort_key):
+ for record in self._backing_vf.get_record_stream([key],
+ 'unordered', include_delta_closure):
+ yield record
+ else:
+ for record in self._backing_vf.get_record_stream(keys, sort_order,
+ include_delta_closure):
+ yield record
+
+
+class KeyMapper(object):
+ """KeyMappers map between keys and underlying partitioned storage."""
+
+ def map(self, key):
+ """Map key to an underlying storage identifier.
+
+ :param key: A key tuple e.g. ('file-id', 'revision-id').
+ :return: An underlying storage identifier, specific to the partitioning
+ mechanism.
+ """
+ raise NotImplementedError(self.map)
+
+ def unmap(self, partition_id):
+ """Map a partitioned storage id back to a key prefix.
+
+ :param partition_id: The underlying partition id.
+ :return: As much of a key (or prefix) as is derivable from the partition
+ id.
+ """
+ raise NotImplementedError(self.unmap)
+
+
+class ConstantMapper(KeyMapper):
+ """A key mapper that maps to a constant result."""
+
+ def __init__(self, result):
+ """Create a ConstantMapper which will return result for all maps."""
+ self._result = result
+
+ def map(self, key):
+ """See KeyMapper.map()."""
+ return self._result
+
+
+class URLEscapeMapper(KeyMapper):
+ """Base class for use with transport backed storage.
+
+ This provides a map and unmap wrapper that respectively url escape and
+ unescape their outputs and inputs.
+ """
+
+ def map(self, key):
+ """See KeyMapper.map()."""
+ return urlutils.quote(self._map(key))
+
+ def unmap(self, partition_id):
+ """See KeyMapper.unmap()."""
+ return self._unmap(urlutils.unquote(partition_id))
+
+
+class PrefixMapper(URLEscapeMapper):
+ """A key mapper that extracts the first component of a key.
+
+ This mapper is for use with a transport based backend.
+ """
+
+ def _map(self, key):
+ """See KeyMapper.map()."""
+ return key[0]
+
+ def _unmap(self, partition_id):
+ """See KeyMapper.unmap()."""
+ return (partition_id,)
+
+
+class HashPrefixMapper(URLEscapeMapper):
+ """A key mapper that combines the first component of a key with a hash.
+
+ This mapper is for use with a transport based backend.
+ """
+
+ def _map(self, key):
+ """See KeyMapper.map()."""
+ prefix = self._escape(key[0])
+ return "%02x/%s" % (adler32(prefix) & 0xff, prefix)
+
+ def _escape(self, prefix):
+ """No escaping needed here."""
+ return prefix
+
+ def _unmap(self, partition_id):
+ """See KeyMapper.unmap()."""
+ return (self._unescape(osutils.basename(partition_id)),)
+
+ def _unescape(self, basename):
+ """No unescaping needed for HashPrefixMapper."""
+ return basename
+
+
+class HashEscapedPrefixMapper(HashPrefixMapper):
+ """Combines the escaped first component of a key with a hash.
+
+ This mapper is for use with a transport based backend.
+ """
+
+ _safe = "abcdefghijklmnopqrstuvwxyz0123456789-_@,."
+
+ def _escape(self, prefix):
+ """Turn a key element into a filesystem safe string.
+
+ This is similar to a plain urlutils.quote, except
+ it uses specific safe characters, so that it doesn't
+ have to translate a lot of valid file ids.
+ """
+ # @ does not get escaped. This is because it is a valid
+ # filesystem character we use all the time, and it looks
+ # a lot better than seeing %40 all the time.
+ r = [((c in self._safe) and c or ('%%%02x' % ord(c)))
+ for c in prefix]
+ return ''.join(r)
+
+ def _unescape(self, basename):
+ """Escaped names are easily unescaped by urlutils."""
+ return urlutils.unquote(basename)
+
+
+def make_versioned_files_factory(versioned_file_factory, mapper):
+ """Create a ThunkedVersionedFiles factory.
+
+ This will create a callable which when called creates a
+ ThunkedVersionedFiles on a transport, using mapper to access individual
+ versioned files, and versioned_file_factory to create each individual file.
+ """
+ def factory(transport):
+ return ThunkedVersionedFiles(transport, versioned_file_factory, mapper,
+ lambda:True)
+ return factory
+
+
+class VersionedFiles(object):
+ """Storage for many versioned files.
+
+ This object allows a single keyspace for accessing the history graph and
+ contents of named bytestrings.
+
+ Currently no implementation allows the graph of different key prefixes to
+ intersect, but the API does allow such implementations in the future.
+
+ The keyspace is expressed via simple tuples. Any instance of VersionedFiles
+ may have a different length key-size, but that size will be constant for
+ all texts added to or retrieved from it. For instance, bzrlib uses
+ instances with a key-size of 2 for storing user files in a repository, with
+ the first element the fileid, and the second the version of that file.
+
+ The use of tuples allows a single code base to support several different
+ uses with only the mapping logic changing from instance to instance.
+
+ :ivar _immediate_fallback_vfs: For subclasses that support stacking,
+ this is a list of other VersionedFiles immediately underneath this
+ one. They may in turn each have further fallbacks.
+ """
+
+ def add_lines(self, key, parents, lines, parent_texts=None,
+ left_matching_blocks=None, nostore_sha=None, random_id=False,
+ check_content=True):
+ """Add a text to the store.
+
+ :param key: The key tuple of the text to add. If the last element is
+ None, a CHK string will be generated during the addition.
+ :param parents: The parents key tuples of the text to add.
+ :param lines: A list of lines. Each line must be a bytestring. And all
+ of them except the last must be terminated with \n and contain no
+ other \n's. The last line may either contain no \n's or a single
+ terminating \n. If the lines list does meet this constraint the add
+ routine may error or may succeed - but you will be unable to read
+ the data back accurately. (Checking the lines have been split
+ correctly is expensive and extremely unlikely to catch bugs so it
+ is not done at runtime unless check_content is True.)
+ :param parent_texts: An optional dictionary containing the opaque
+ representations of some or all of the parents of version_id to
+ allow delta optimisations. VERY IMPORTANT: the texts must be those
+ returned by add_lines or data corruption can be caused.
+ :param left_matching_blocks: a hint about which areas are common
+ between the text and its left-hand-parent. The format is
+ the SequenceMatcher.get_matching_blocks format.
+ :param nostore_sha: Raise ExistingContent and do not add the lines to
+ the versioned file if the digest of the lines matches this.
+ :param random_id: If True a random id has been selected rather than
+ an id determined by some deterministic process such as a converter
+ from a foreign VCS. When True the backend may choose not to check
+ for uniqueness of the resulting key within the versioned file, so
+ this should only be done when the result is expected to be unique
+ anyway.
+ :param check_content: If True, the lines supplied are verified to be
+ bytestrings that are correctly formed lines.
+ :return: The text sha1, the number of bytes in the text, and an opaque
+ representation of the inserted version which can be provided
+ back to future add_lines calls in the parent_texts dictionary.
+ """
+ raise NotImplementedError(self.add_lines)
+
+ def _add_text(self, key, parents, text, nostore_sha=None, random_id=False):
+ """Add a text to the store.
+
+ This is a private function for use by VersionedFileCommitBuilder.
+
+ :param key: The key tuple of the text to add. If the last element is
+ None, a CHK string will be generated during the addition.
+ :param parents: The parents key tuples of the text to add.
+ :param text: A string containing the text to be committed.
+ :param nostore_sha: Raise ExistingContent and do not add the lines to
+ the versioned file if the digest of the lines matches this.
+ :param random_id: If True a random id has been selected rather than
+ an id determined by some deterministic process such as a converter
+ from a foreign VCS. When True the backend may choose not to check
+ for uniqueness of the resulting key within the versioned file, so
+ this should only be done when the result is expected to be unique
+ anyway.
+ :param check_content: If True, the lines supplied are verified to be
+ bytestrings that are correctly formed lines.
+ :return: The text sha1, the number of bytes in the text, and an opaque
+ representation of the inserted version which can be provided
+ back to future _add_text calls in the parent_texts dictionary.
+ """
+ # The default implementation just thunks over to .add_lines(),
+ # inefficient, but it works.
+ return self.add_lines(key, parents, osutils.split_lines(text),
+ nostore_sha=nostore_sha,
+ random_id=random_id,
+ check_content=True)
+
+ def add_mpdiffs(self, records):
+ """Add mpdiffs to this VersionedFile.
+
+ Records should be iterables of version, parents, expected_sha1,
+ mpdiff. mpdiff should be a MultiParent instance.
+ """
+ vf_parents = {}
+ mpvf = multiparent.MultiMemoryVersionedFile()
+ versions = []
+ for version, parent_ids, expected_sha1, mpdiff in records:
+ versions.append(version)
+ mpvf.add_diff(mpdiff, version, parent_ids)
+ needed_parents = set()
+ for version, parent_ids, expected_sha1, mpdiff in records:
+ needed_parents.update(p for p in parent_ids
+ if not mpvf.has_version(p))
+ # It seems likely that adding all the present parents as fulltexts can
+ # easily exhaust memory.
+ chunks_to_lines = osutils.chunks_to_lines
+ for record in self.get_record_stream(needed_parents, 'unordered',
+ True):
+ if record.storage_kind == 'absent':
+ continue
+ mpvf.add_version(chunks_to_lines(record.get_bytes_as('chunked')),
+ record.key, [])
+ for (key, parent_keys, expected_sha1, mpdiff), lines in\
+ zip(records, mpvf.get_line_list(versions)):
+ if len(parent_keys) == 1:
+ left_matching_blocks = list(mpdiff.get_matching_blocks(0,
+ mpvf.get_diff(parent_keys[0]).num_lines()))
+ else:
+ left_matching_blocks = None
+ version_sha1, _, version_text = self.add_lines(key,
+ parent_keys, lines, vf_parents,
+ left_matching_blocks=left_matching_blocks)
+ if version_sha1 != expected_sha1:
+ raise errors.VersionedFileInvalidChecksum(version)
+ vf_parents[key] = version_text
+
+ def annotate(self, key):
+ """Return a list of (version-key, line) tuples for the text of key.
+
+ :raise RevisionNotPresent: If the key is not present.
+ """
+ raise NotImplementedError(self.annotate)
+
+ def check(self, progress_bar=None):
+ """Check this object for integrity.
+
+ :param progress_bar: A progress bar to output as the check progresses.
+ :param keys: Specific keys within the VersionedFiles to check. When
+ this parameter is not None, check() becomes a generator as per
+ get_record_stream. The difference to get_record_stream is that
+ more or deeper checks will be performed.
+ :return: None, or if keys was supplied a generator as per
+ get_record_stream.
+ """
+ raise NotImplementedError(self.check)
+
+ @staticmethod
+ def check_not_reserved_id(version_id):
+ revision.check_not_reserved_id(version_id)
+
+ def clear_cache(self):
+ """Clear whatever caches this VersionedFile holds.
+
+ This is generally called after an operation has been performed, when we
+ don't expect to be using this versioned file again soon.
+ """
+
+ def _check_lines_not_unicode(self, lines):
+ """Check that lines being added to a versioned file are not unicode."""
+ for line in lines:
+ if line.__class__ is not str:
+ raise errors.BzrBadParameterUnicode("lines")
+
+ def _check_lines_are_lines(self, lines):
+ """Check that the lines really are full lines without inline EOL."""
+ for line in lines:
+ if '\n' in line[:-1]:
+ raise errors.BzrBadParameterContainsNewline("lines")
+
+ def get_known_graph_ancestry(self, keys):
+ """Get a KnownGraph instance with the ancestry of keys."""
+ # most basic implementation is a loop around get_parent_map
+ pending = set(keys)
+ parent_map = {}
+ while pending:
+ this_parent_map = self.get_parent_map(pending)
+ parent_map.update(this_parent_map)
+ pending = set()
+ map(pending.update, this_parent_map.itervalues())
+ pending = pending.difference(parent_map)
+ kg = _mod_graph.KnownGraph(parent_map)
+ return kg
+
+ def get_parent_map(self, keys):
+ """Get a map of the parents of keys.
+
+ :param keys: The keys to look up parents for.
+ :return: A mapping from keys to parents. Absent keys are absent from
+ the mapping.
+ """
+ raise NotImplementedError(self.get_parent_map)
+
+ def get_record_stream(self, keys, ordering, include_delta_closure):
+ """Get a stream of records for keys.
+
+ :param keys: The keys to include.
+ :param ordering: Either 'unordered' or 'topological'. A topologically
+ sorted stream has compression parents strictly before their
+ children.
+ :param include_delta_closure: If True then the closure across any
+ compression parents will be included (in the opaque data).
+ :return: An iterator of ContentFactory objects, each of which is only
+ valid until the iterator is advanced.
+ """
+ raise NotImplementedError(self.get_record_stream)
+
+ def get_sha1s(self, keys):
+ """Get the sha1's of the texts for the given keys.
+
+ :param keys: The names of the keys to lookup
+ :return: a dict from key to sha1 digest. Keys of texts which are not
+ present in the store are not present in the returned
+ dictionary.
+ """
+ raise NotImplementedError(self.get_sha1s)
+
+ has_key = index._has_key_from_parent_map
+
+ def get_missing_compression_parent_keys(self):
+ """Return an iterable of keys of missing compression parents.
+
+ Check this after calling insert_record_stream to find out if there are
+ any missing compression parents. If there are, the records that
+ depend on them are not able to be inserted safely. The precise
+ behaviour depends on the concrete VersionedFiles class in use.
+
+ Classes that do not support this will raise NotImplementedError.
+ """
+ raise NotImplementedError(self.get_missing_compression_parent_keys)
+
+ def insert_record_stream(self, stream):
+ """Insert a record stream into this container.
+
+ :param stream: A stream of records to insert.
+ :return: None
+ :seealso VersionedFile.get_record_stream:
+ """
+ raise NotImplementedError
+
+ def iter_lines_added_or_present_in_keys(self, keys, pb=None):
+ """Iterate over the lines in the versioned files from keys.
+
+ This may return lines from other keys. Each item the returned
+ iterator yields is a tuple of a line and a text version that that line
+ is present in (not introduced in).
+
+ Ordering of results is in whatever order is most suitable for the
+ underlying storage format.
+
+ If a progress bar is supplied, it may be used to indicate progress.
+ The caller is responsible for cleaning up progress bars (because this
+ is an iterator).
+
+ NOTES:
+ * Lines are normalised by the underlying store: they will all have \n
+ terminators.
+ * Lines are returned in arbitrary order.
+
+ :return: An iterator over (line, key).
+ """
+ raise NotImplementedError(self.iter_lines_added_or_present_in_keys)
+
+ def keys(self):
+ """Return a iterable of the keys for all the contained texts."""
+ raise NotImplementedError(self.keys)
+
+ def make_mpdiffs(self, keys):
+ """Create multiparent diffs for specified keys."""
+ generator = _MPDiffGenerator(self, keys)
+ return generator.compute_diffs()
+
+ def get_annotator(self):
+ return annotate.Annotator(self)
+
+ missing_keys = index._missing_keys_from_parent_map
+
+ def _extract_blocks(self, version_id, source, target):
+ return None
+
+ def _transitive_fallbacks(self):
+ """Return the whole stack of fallback versionedfiles.
+
+ This VersionedFiles may have a list of fallbacks, but it doesn't
+ necessarily know about the whole stack going down, and it can't know
+ at open time because they may change after the objects are opened.
+ """
+ all_fallbacks = []
+ for a_vfs in self._immediate_fallback_vfs:
+ all_fallbacks.append(a_vfs)
+ all_fallbacks.extend(a_vfs._transitive_fallbacks())
+ return all_fallbacks
+
+
+class ThunkedVersionedFiles(VersionedFiles):
+ """Storage for many versioned files thunked onto a 'VersionedFile' class.
+
+ This object allows a single keyspace for accessing the history graph and
+ contents of named bytestrings.
+
+ Currently no implementation allows the graph of different key prefixes to
+ intersect, but the API does allow such implementations in the future.
+ """
+
+ def __init__(self, transport, file_factory, mapper, is_locked):
+ """Create a ThunkedVersionedFiles."""
+ self._transport = transport
+ self._file_factory = file_factory
+ self._mapper = mapper
+ self._is_locked = is_locked
+
+ def add_lines(self, key, parents, lines, parent_texts=None,
+ left_matching_blocks=None, nostore_sha=None, random_id=False,
+ check_content=True):
+ """See VersionedFiles.add_lines()."""
+ path = self._mapper.map(key)
+ version_id = key[-1]
+ parents = [parent[-1] for parent in parents]
+ vf = self._get_vf(path)
+ try:
+ try:
+ return vf.add_lines_with_ghosts(version_id, parents, lines,
+ parent_texts=parent_texts,
+ left_matching_blocks=left_matching_blocks,
+ nostore_sha=nostore_sha, random_id=random_id,
+ check_content=check_content)
+ except NotImplementedError:
+ return vf.add_lines(version_id, parents, lines,
+ parent_texts=parent_texts,
+ left_matching_blocks=left_matching_blocks,
+ nostore_sha=nostore_sha, random_id=random_id,
+ check_content=check_content)
+ except errors.NoSuchFile:
+ # parent directory may be missing, try again.
+ self._transport.mkdir(osutils.dirname(path))
+ try:
+ return vf.add_lines_with_ghosts(version_id, parents, lines,
+ parent_texts=parent_texts,
+ left_matching_blocks=left_matching_blocks,
+ nostore_sha=nostore_sha, random_id=random_id,
+ check_content=check_content)
+ except NotImplementedError:
+ return vf.add_lines(version_id, parents, lines,
+ parent_texts=parent_texts,
+ left_matching_blocks=left_matching_blocks,
+ nostore_sha=nostore_sha, random_id=random_id,
+ check_content=check_content)
+
+ def annotate(self, key):
+ """Return a list of (version-key, line) tuples for the text of key.
+
+ :raise RevisionNotPresent: If the key is not present.
+ """
+ prefix = key[:-1]
+ path = self._mapper.map(prefix)
+ vf = self._get_vf(path)
+ origins = vf.annotate(key[-1])
+ result = []
+ for origin, line in origins:
+ result.append((prefix + (origin,), line))
+ return result
+
+ def check(self, progress_bar=None, keys=None):
+ """See VersionedFiles.check()."""
+ # XXX: This is over-enthusiastic but as we only thunk for Weaves today
+ # this is tolerable. Ideally we'd pass keys down to check() and
+ # have the older VersiondFile interface updated too.
+ for prefix, vf in self._iter_all_components():
+ vf.check()
+ if keys is not None:
+ return self.get_record_stream(keys, 'unordered', True)
+
+ def get_parent_map(self, keys):
+ """Get a map of the parents of keys.
+
+ :param keys: The keys to look up parents for.
+ :return: A mapping from keys to parents. Absent keys are absent from
+ the mapping.
+ """
+ prefixes = self._partition_keys(keys)
+ result = {}
+ for prefix, suffixes in prefixes.items():
+ path = self._mapper.map(prefix)
+ vf = self._get_vf(path)
+ parent_map = vf.get_parent_map(suffixes)
+ for key, parents in parent_map.items():
+ result[prefix + (key,)] = tuple(
+ prefix + (parent,) for parent in parents)
+ return result
+
+ def _get_vf(self, path):
+ if not self._is_locked():
+ raise errors.ObjectNotLocked(self)
+ return self._file_factory(path, self._transport, create=True,
+ get_scope=lambda:None)
+
+ def _partition_keys(self, keys):
+ """Turn keys into a dict of prefix:suffix_list."""
+ result = {}
+ for key in keys:
+ prefix_keys = result.setdefault(key[:-1], [])
+ prefix_keys.append(key[-1])
+ return result
+
+ def _get_all_prefixes(self):
+ # Identify all key prefixes.
+ # XXX: A bit hacky, needs polish.
+ if type(self._mapper) == ConstantMapper:
+ paths = [self._mapper.map(())]
+ prefixes = [()]
+ else:
+ relpaths = set()
+ for quoted_relpath in self._transport.iter_files_recursive():
+ path, ext = os.path.splitext(quoted_relpath)
+ relpaths.add(path)
+ paths = list(relpaths)
+ prefixes = [self._mapper.unmap(path) for path in paths]
+ return zip(paths, prefixes)
+
+ def get_record_stream(self, keys, ordering, include_delta_closure):
+ """See VersionedFiles.get_record_stream()."""
+ # Ordering will be taken care of by each partitioned store; group keys
+ # by partition.
+ keys = sorted(keys)
+ for prefix, suffixes, vf in self._iter_keys_vf(keys):
+ suffixes = [(suffix,) for suffix in suffixes]
+ for record in vf.get_record_stream(suffixes, ordering,
+ include_delta_closure):
+ if record.parents is not None:
+ record.parents = tuple(
+ prefix + parent for parent in record.parents)
+ record.key = prefix + record.key
+ yield record
+
+ def _iter_keys_vf(self, keys):
+ prefixes = self._partition_keys(keys)
+ sha1s = {}
+ for prefix, suffixes in prefixes.items():
+ path = self._mapper.map(prefix)
+ vf = self._get_vf(path)
+ yield prefix, suffixes, vf
+
+ def get_sha1s(self, keys):
+ """See VersionedFiles.get_sha1s()."""
+ sha1s = {}
+ for prefix,suffixes, vf in self._iter_keys_vf(keys):
+ vf_sha1s = vf.get_sha1s(suffixes)
+ for suffix, sha1 in vf_sha1s.iteritems():
+ sha1s[prefix + (suffix,)] = sha1
+ return sha1s
+
+ def insert_record_stream(self, stream):
+ """Insert a record stream into this container.
+
+ :param stream: A stream of records to insert.
+ :return: None
+ :seealso VersionedFile.get_record_stream:
+ """
+ for record in stream:
+ prefix = record.key[:-1]
+ key = record.key[-1:]
+ if record.parents is not None:
+ parents = [parent[-1:] for parent in record.parents]
+ else:
+ parents = None
+ thunk_record = AdapterFactory(key, parents, record)
+ path = self._mapper.map(prefix)
+ # Note that this parses the file many times; we can do better but
+ # as this only impacts weaves in terms of performance, it is
+ # tolerable.
+ vf = self._get_vf(path)
+ vf.insert_record_stream([thunk_record])
+
+ def iter_lines_added_or_present_in_keys(self, keys, pb=None):
+ """Iterate over the lines in the versioned files from keys.
+
+ This may return lines from other keys. Each item the returned
+ iterator yields is a tuple of a line and a text version that that line
+ is present in (not introduced in).
+
+ Ordering of results is in whatever order is most suitable for the
+ underlying storage format.
+
+ If a progress bar is supplied, it may be used to indicate progress.
+ The caller is responsible for cleaning up progress bars (because this
+ is an iterator).
+
+ NOTES:
+ * Lines are normalised by the underlying store: they will all have \n
+ terminators.
+ * Lines are returned in arbitrary order.
+
+ :return: An iterator over (line, key).
+ """
+ for prefix, suffixes, vf in self._iter_keys_vf(keys):
+ for line, version in vf.iter_lines_added_or_present_in_versions(suffixes):
+ yield line, prefix + (version,)
+
+ def _iter_all_components(self):
+ for path, prefix in self._get_all_prefixes():
+ yield prefix, self._get_vf(path)
+
+ def keys(self):
+ """See VersionedFiles.keys()."""
+ result = set()
+ for prefix, vf in self._iter_all_components():
+ for suffix in vf.versions():
+ result.add(prefix + (suffix,))
+ return result
+
+
+class VersionedFilesWithFallbacks(VersionedFiles):
+
+ def without_fallbacks(self):
+ """Return a clone of this object without any fallbacks configured."""
+ raise NotImplementedError(self.without_fallbacks)
+
+ def add_fallback_versioned_files(self, a_versioned_files):
+ """Add a source of texts for texts not present in this knit.
+
+ :param a_versioned_files: A VersionedFiles object.
+ """
+ raise NotImplementedError(self.add_fallback_versioned_files)
+
+ def get_known_graph_ancestry(self, keys):
+ """Get a KnownGraph instance with the ancestry of keys."""
+ parent_map, missing_keys = self._index.find_ancestry(keys)
+ for fallback in self._transitive_fallbacks():
+ if not missing_keys:
+ break
+ (f_parent_map, f_missing_keys) = fallback._index.find_ancestry(
+ missing_keys)
+ parent_map.update(f_parent_map)
+ missing_keys = f_missing_keys
+ kg = _mod_graph.KnownGraph(parent_map)
+ return kg
+
+
+class _PlanMergeVersionedFile(VersionedFiles):
+ """A VersionedFile for uncommitted and committed texts.
+
+ It is intended to allow merges to be planned with working tree texts.
+ It implements only the small part of the VersionedFiles interface used by
+ PlanMerge. It falls back to multiple versionedfiles for data not stored in
+ _PlanMergeVersionedFile itself.
+
+ :ivar: fallback_versionedfiles a list of VersionedFiles objects that can be
+ queried for missing texts.
+ """
+
+ def __init__(self, file_id):
+ """Create a _PlanMergeVersionedFile.
+
+ :param file_id: Used with _PlanMerge code which is not yet fully
+ tuple-keyspace aware.
+ """
+ self._file_id = file_id
+ # fallback locations
+ self.fallback_versionedfiles = []
+ # Parents for locally held keys.
+ self._parents = {}
+ # line data for locally held keys.
+ self._lines = {}
+ # key lookup providers
+ self._providers = [_mod_graph.DictParentsProvider(self._parents)]
+
+ def plan_merge(self, ver_a, ver_b, base=None):
+ """See VersionedFile.plan_merge"""
+ from bzrlib.merge import _PlanMerge
+ if base is None:
+ return _PlanMerge(ver_a, ver_b, self, (self._file_id,)).plan_merge()
+ old_plan = list(_PlanMerge(ver_a, base, self, (self._file_id,)).plan_merge())
+ new_plan = list(_PlanMerge(ver_a, ver_b, self, (self._file_id,)).plan_merge())
+ return _PlanMerge._subtract_plans(old_plan, new_plan)
+
+ def plan_lca_merge(self, ver_a, ver_b, base=None):
+ from bzrlib.merge import _PlanLCAMerge
+ graph = _mod_graph.Graph(self)
+ new_plan = _PlanLCAMerge(ver_a, ver_b, self, (self._file_id,), graph).plan_merge()
+ if base is None:
+ return new_plan
+ old_plan = _PlanLCAMerge(ver_a, base, self, (self._file_id,), graph).plan_merge()
+ return _PlanLCAMerge._subtract_plans(list(old_plan), list(new_plan))
+
+ def add_lines(self, key, parents, lines):
+ """See VersionedFiles.add_lines
+
+ Lines are added locally, not to fallback versionedfiles. Also, ghosts
+ are permitted. Only reserved ids are permitted.
+ """
+ if type(key) is not tuple:
+ raise TypeError(key)
+ if not revision.is_reserved_id(key[-1]):
+ raise ValueError('Only reserved ids may be used')
+ if parents is None:
+ raise ValueError('Parents may not be None')
+ if lines is None:
+ raise ValueError('Lines may not be None')
+ self._parents[key] = tuple(parents)
+ self._lines[key] = lines
+
+ def get_record_stream(self, keys, ordering, include_delta_closure):
+ pending = set(keys)
+ for key in keys:
+ if key in self._lines:
+ lines = self._lines[key]
+ parents = self._parents[key]
+ pending.remove(key)
+ yield ChunkedContentFactory(key, parents, None, lines)
+ for versionedfile in self.fallback_versionedfiles:
+ for record in versionedfile.get_record_stream(
+ pending, 'unordered', True):
+ if record.storage_kind == 'absent':
+ continue
+ else:
+ pending.remove(record.key)
+ yield record
+ if not pending:
+ return
+ # report absent entries
+ for key in pending:
+ yield AbsentContentFactory(key)
+
+ def get_parent_map(self, keys):
+ """See VersionedFiles.get_parent_map"""
+ # We create a new provider because a fallback may have been added.
+ # If we make fallbacks private we can update a stack list and avoid
+ # object creation thrashing.
+ keys = set(keys)
+ result = {}
+ if revision.NULL_REVISION in keys:
+ keys.remove(revision.NULL_REVISION)
+ result[revision.NULL_REVISION] = ()
+ self._providers = self._providers[:1] + self.fallback_versionedfiles
+ result.update(
+ _mod_graph.StackedParentsProvider(
+ self._providers).get_parent_map(keys))
+ for key, parents in result.iteritems():
+ if parents == ():
+ result[key] = (revision.NULL_REVISION,)
+ return result
+
+
+class PlanWeaveMerge(TextMerge):
+ """Weave merge that takes a plan as its input.
+
+ This exists so that VersionedFile.plan_merge is implementable.
+ Most callers will want to use WeaveMerge instead.
+ """
+
+ def __init__(self, plan, a_marker=TextMerge.A_MARKER,
+ b_marker=TextMerge.B_MARKER):
+ TextMerge.__init__(self, a_marker, b_marker)
+ self.plan = list(plan)
+
+ def _merge_struct(self):
+ lines_a = []
+ lines_b = []
+ ch_a = ch_b = False
+
+ def outstanding_struct():
+ if not lines_a and not lines_b:
+ return
+ elif ch_a and not ch_b:
+ # one-sided change:
+ yield(lines_a,)
+ elif ch_b and not ch_a:
+ yield (lines_b,)
+ elif lines_a == lines_b:
+ yield(lines_a,)
+ else:
+ yield (lines_a, lines_b)
+
+ # We previously considered either 'unchanged' or 'killed-both' lines
+ # to be possible places to resynchronize. However, assuming agreement
+ # on killed-both lines may be too aggressive. -- mbp 20060324
+ for state, line in self.plan:
+ if state == 'unchanged':
+ # resync and flush queued conflicts changes if any
+ for struct in outstanding_struct():
+ yield struct
+ lines_a = []
+ lines_b = []
+ ch_a = ch_b = False
+
+ if state == 'unchanged':
+ if line:
+ yield ([line],)
+ elif state == 'killed-a':
+ ch_a = True
+ lines_b.append(line)
+ elif state == 'killed-b':
+ ch_b = True
+ lines_a.append(line)
+ elif state == 'new-a':
+ ch_a = True
+ lines_a.append(line)
+ elif state == 'new-b':
+ ch_b = True
+ lines_b.append(line)
+ elif state == 'conflicted-a':
+ ch_b = ch_a = True
+ lines_a.append(line)
+ elif state == 'conflicted-b':
+ ch_b = ch_a = True
+ lines_b.append(line)
+ elif state == 'killed-both':
+ # This counts as a change, even though there is no associated
+ # line
+ ch_b = ch_a = True
+ else:
+ if state not in ('irrelevant', 'ghost-a', 'ghost-b',
+ 'killed-base'):
+ raise AssertionError(state)
+ for struct in outstanding_struct():
+ yield struct
+
+ def base_from_plan(self):
+ """Construct a BASE file from the plan text."""
+ base_lines = []
+ for state, line in self.plan:
+ if state in ('killed-a', 'killed-b', 'killed-both', 'unchanged'):
+ # If unchanged, then this line is straight from base. If a or b
+ # or both killed the line, then it *used* to be in base.
+ base_lines.append(line)
+ else:
+ if state not in ('killed-base', 'irrelevant',
+ 'ghost-a', 'ghost-b',
+ 'new-a', 'new-b',
+ 'conflicted-a', 'conflicted-b'):
+ # killed-base, irrelevant means it doesn't apply
+ # ghost-a/ghost-b are harder to say for sure, but they
+ # aren't in the 'inc_c' which means they aren't in the
+ # shared base of a & b. So we don't include them. And
+ # obviously if the line is newly inserted, it isn't in base
+
+ # If 'conflicted-a' or b, then it is new vs one base, but
+ # old versus another base. However, if we make it present
+ # in the base, it will be deleted from the target, and it
+ # seems better to get a line doubled in the merge result,
+ # rather than have it deleted entirely.
+ # Example, each node is the 'text' at that point:
+ # MN
+ # / \
+ # MaN MbN
+ # | X |
+ # MabN MbaN
+ # \ /
+ # ???
+ # There was a criss-cross conflict merge. Both sides
+ # include the other, but put themselves first.
+ # Weave marks this as a 'clean' merge, picking OTHER over
+ # THIS. (Though the details depend on order inserted into
+ # weave, etc.)
+ # LCA generates a plan:
+ # [('unchanged', M),
+ # ('conflicted-b', b),
+ # ('unchanged', a),
+ # ('conflicted-a', b),
+ # ('unchanged', N)]
+ # If you mark 'conflicted-*' as part of BASE, then a 3-way
+ # merge tool will cleanly generate "MaN" (as BASE vs THIS
+ # removes one 'b', and BASE vs OTHER removes the other)
+ # If you include neither, 3-way creates a clean "MbabN" as
+ # THIS adds one 'b', and OTHER does too.
+ # It seems that having the line 2 times is better than
+ # having it omitted. (Easier to manually delete than notice
+ # it needs to be added.)
+ raise AssertionError('Unknown state: %s' % (state,))
+ return base_lines
+
+
+class WeaveMerge(PlanWeaveMerge):
+ """Weave merge that takes a VersionedFile and two versions as its input."""
+
+ def __init__(self, versionedfile, ver_a, ver_b,
+ a_marker=PlanWeaveMerge.A_MARKER, b_marker=PlanWeaveMerge.B_MARKER):
+ plan = versionedfile.plan_merge(ver_a, ver_b)
+ PlanWeaveMerge.__init__(self, plan, a_marker, b_marker)
+
+
+class VirtualVersionedFiles(VersionedFiles):
+ """Dummy implementation for VersionedFiles that uses other functions for
+ obtaining fulltexts and parent maps.
+
+ This is always on the bottom of the stack and uses string keys
+ (rather than tuples) internally.
+ """
+
+ def __init__(self, get_parent_map, get_lines):
+ """Create a VirtualVersionedFiles.
+
+ :param get_parent_map: Same signature as Repository.get_parent_map.
+ :param get_lines: Should return lines for specified key or None if
+ not available.
+ """
+ super(VirtualVersionedFiles, self).__init__()
+ self._get_parent_map = get_parent_map
+ self._get_lines = get_lines
+
+ def check(self, progressbar=None):
+ """See VersionedFiles.check.
+
+ :note: Always returns True for VirtualVersionedFiles.
+ """
+ return True
+
+ def add_mpdiffs(self, records):
+ """See VersionedFiles.mpdiffs.
+
+ :note: Not implemented for VirtualVersionedFiles.
+ """
+ raise NotImplementedError(self.add_mpdiffs)
+
+ def get_parent_map(self, keys):
+ """See VersionedFiles.get_parent_map."""
+ return dict([((k,), tuple([(p,) for p in v]))
+ for k,v in self._get_parent_map([k for (k,) in keys]).iteritems()])
+
+ def get_sha1s(self, keys):
+ """See VersionedFiles.get_sha1s."""
+ ret = {}
+ for (k,) in keys:
+ lines = self._get_lines(k)
+ if lines is not None:
+ if not isinstance(lines, list):
+ raise AssertionError
+ ret[(k,)] = osutils.sha_strings(lines)
+ return ret
+
+ def get_record_stream(self, keys, ordering, include_delta_closure):
+ """See VersionedFiles.get_record_stream."""
+ for (k,) in list(keys):
+ lines = self._get_lines(k)
+ if lines is not None:
+ if not isinstance(lines, list):
+ raise AssertionError
+ yield ChunkedContentFactory((k,), None,
+ sha1=osutils.sha_strings(lines),
+ chunks=lines)
+ else:
+ yield AbsentContentFactory((k,))
+
+ def iter_lines_added_or_present_in_keys(self, keys, pb=None):
+ """See VersionedFile.iter_lines_added_or_present_in_versions()."""
+ for i, (key,) in enumerate(keys):
+ if pb is not None:
+ pb.update("Finding changed lines", i, len(keys))
+ for l in self._get_lines(key):
+ yield (l, key)
+
+
+class NoDupeAddLinesDecorator(object):
+ """Decorator for a VersionedFiles that skips doing an add_lines if the key
+ is already present.
+ """
+
+ def __init__(self, store):
+ self._store = store
+
+ def add_lines(self, key, parents, lines, parent_texts=None,
+ left_matching_blocks=None, nostore_sha=None, random_id=False,
+ check_content=True):
+ """See VersionedFiles.add_lines.
+
+ This implementation may return None as the third element of the return
+ value when the original store wouldn't.
+ """
+ if nostore_sha:
+ raise NotImplementedError(
+ "NoDupeAddLinesDecorator.add_lines does not implement the "
+ "nostore_sha behaviour.")
+ if key[-1] is None:
+ sha1 = osutils.sha_strings(lines)
+ key = ("sha1:" + sha1,)
+ else:
+ sha1 = None
+ if key in self._store.get_parent_map([key]):
+ # This key has already been inserted, so don't do it again.
+ if sha1 is None:
+ sha1 = osutils.sha_strings(lines)
+ return sha1, sum(map(len, lines)), None
+ return self._store.add_lines(key, parents, lines,
+ parent_texts=parent_texts,
+ left_matching_blocks=left_matching_blocks,
+ nostore_sha=nostore_sha, random_id=random_id,
+ check_content=check_content)
+
+ def __getattr__(self, name):
+ return getattr(self._store, name)
+
+
+def network_bytes_to_kind_and_offset(network_bytes):
+ """Strip of a record kind from the front of network_bytes.
+
+ :param network_bytes: The bytes of a record.
+ :return: A tuple (storage_kind, offset_of_remaining_bytes)
+ """
+ line_end = network_bytes.find('\n')
+ storage_kind = network_bytes[:line_end]
+ return storage_kind, line_end + 1
+
+
+class NetworkRecordStream(object):
+ """A record_stream which reconstitures a serialised stream."""
+
+ def __init__(self, bytes_iterator):
+ """Create a NetworkRecordStream.
+
+ :param bytes_iterator: An iterator of bytes. Each item in this
+ iterator should have been obtained from a record_streams'
+ record.get_bytes_as(record.storage_kind) call.
+ """
+ self._bytes_iterator = bytes_iterator
+ self._kind_factory = {
+ 'fulltext': fulltext_network_to_record,
+ 'groupcompress-block': groupcompress.network_block_to_records,
+ 'knit-ft-gz': knit.knit_network_to_record,
+ 'knit-delta-gz': knit.knit_network_to_record,
+ 'knit-annotated-ft-gz': knit.knit_network_to_record,
+ 'knit-annotated-delta-gz': knit.knit_network_to_record,
+ 'knit-delta-closure': knit.knit_delta_closure_to_records,
+ }
+
+ def read(self):
+ """Read the stream.
+
+ :return: An iterator as per VersionedFiles.get_record_stream().
+ """
+ for bytes in self._bytes_iterator:
+ storage_kind, line_end = network_bytes_to_kind_and_offset(bytes)
+ for record in self._kind_factory[storage_kind](
+ storage_kind, bytes, line_end):
+ yield record
+
+
+def fulltext_network_to_record(kind, bytes, line_end):
+ """Convert a network fulltext record to record."""
+ meta_len, = struct.unpack('!L', bytes[line_end:line_end+4])
+ record_meta = bytes[line_end+4:line_end+4+meta_len]
+ key, parents = bencode.bdecode_as_tuple(record_meta)
+ if parents == 'nil':
+ parents = None
+ fulltext = bytes[line_end+4+meta_len:]
+ return [FulltextContentFactory(key, parents, None, fulltext)]
+
+
+def _length_prefix(bytes):
+ return struct.pack('!L', len(bytes))
+
+
+def record_to_fulltext_bytes(record):
+ if record.parents is None:
+ parents = 'nil'
+ else:
+ parents = record.parents
+ record_meta = bencode.bencode((record.key, parents))
+ record_content = record.get_bytes_as('fulltext')
+ return "fulltext\n%s%s%s" % (
+ _length_prefix(record_meta), record_meta, record_content)
+
+
+def sort_groupcompress(parent_map):
+ """Sort and group the keys in parent_map into groupcompress order.
+
+ groupcompress is defined (currently) as reverse-topological order, grouped
+ by the key prefix.
+
+ :return: A sorted-list of keys
+ """
+ # gc-optimal ordering is approximately reverse topological,
+ # properly grouped by file-id.
+ per_prefix_map = {}
+ for item in parent_map.iteritems():
+ key = item[0]
+ if isinstance(key, str) or len(key) == 1:
+ prefix = ''
+ else:
+ prefix = key[0]
+ try:
+ per_prefix_map[prefix].append(item)
+ except KeyError:
+ per_prefix_map[prefix] = [item]
+
+ present_keys = []
+ for prefix in sorted(per_prefix_map):
+ present_keys.extend(reversed(tsort.topo_sort(per_prefix_map[prefix])))
+ return present_keys
+
+
+class _KeyRefs(object):
+
+ def __init__(self, track_new_keys=False):
+ # dict mapping 'key' to 'set of keys referring to that key'
+ self.refs = {}
+ if track_new_keys:
+ # set remembering all new keys
+ self.new_keys = set()
+ else:
+ self.new_keys = None
+
+ def clear(self):
+ if self.refs:
+ self.refs.clear()
+ if self.new_keys:
+ self.new_keys.clear()
+
+ def add_references(self, key, refs):
+ # Record the new references
+ for referenced in refs:
+ try:
+ needed_by = self.refs[referenced]
+ except KeyError:
+ needed_by = self.refs[referenced] = set()
+ needed_by.add(key)
+ # Discard references satisfied by the new key
+ self.add_key(key)
+
+ def get_new_keys(self):
+ return self.new_keys
+
+ def get_unsatisfied_refs(self):
+ return self.refs.iterkeys()
+
+ def _satisfy_refs_for_key(self, key):
+ try:
+ del self.refs[key]
+ except KeyError:
+ # No keys depended on this key. That's ok.
+ pass
+
+ def add_key(self, key):
+ # satisfy refs for key, and remember that we've seen this key.
+ self._satisfy_refs_for_key(key)
+ if self.new_keys is not None:
+ self.new_keys.add(key)
+
+ def satisfy_refs_for_keys(self, keys):
+ for key in keys:
+ self._satisfy_refs_for_key(key)
+
+ def get_referrers(self):
+ result = set()
+ for referrers in self.refs.itervalues():
+ result.update(referrers)
+ return result
+
+
+
diff --git a/bzrlib/vf_repository.py b/bzrlib/vf_repository.py
new file mode 100644
index 0000000..4ab6dff
--- /dev/null
+++ b/bzrlib/vf_repository.py
@@ -0,0 +1,3202 @@
+# Copyright (C) 2005-2011 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""Repository formats built around versioned files."""
+
+from __future__ import absolute_import
+
+
+from bzrlib.lazy_import import lazy_import
+lazy_import(globals(), """
+import itertools
+
+from bzrlib import (
+ check,
+ config as _mod_config,
+ debug,
+ fetch as _mod_fetch,
+ fifo_cache,
+ gpg,
+ graph,
+ inventory_delta,
+ lru_cache,
+ osutils,
+ revision as _mod_revision,
+ serializer as _mod_serializer,
+ static_tuple,
+ symbol_versioning,
+ tsort,
+ ui,
+ versionedfile,
+ vf_search,
+ )
+
+from bzrlib.recordcounter import RecordCounter
+from bzrlib.revisiontree import InventoryRevisionTree
+from bzrlib.testament import Testament
+from bzrlib.i18n import gettext
+""")
+
+from bzrlib import (
+ errors,
+ )
+from bzrlib.decorators import (
+ needs_read_lock,
+ needs_write_lock,
+ only_raises,
+ )
+from bzrlib.inventory import (
+ Inventory,
+ InventoryDirectory,
+ ROOT_ID,
+ entry_factory,
+ )
+
+from bzrlib.repository import (
+ CommitBuilder,
+ InterRepository,
+ MetaDirRepository,
+ RepositoryFormatMetaDir,
+ Repository,
+ RepositoryFormat,
+ )
+
+from bzrlib.trace import (
+ mutter
+ )
+
+
+class VersionedFileRepositoryFormat(RepositoryFormat):
+ """Base class for all repository formats that are VersionedFiles-based."""
+
+ supports_full_versioned_files = True
+ supports_versioned_directories = True
+ supports_unreferenced_revisions = True
+
+ # Should commit add an inventory, or an inventory delta to the repository.
+ _commit_inv_deltas = True
+ # What order should fetch operations request streams in?
+ # The default is unordered as that is the cheapest for an origin to
+ # provide.
+ _fetch_order = 'unordered'
+ # Does this repository format use deltas that can be fetched as-deltas ?
+ # (E.g. knits, where the knit deltas can be transplanted intact.
+ # We default to False, which will ensure that enough data to get
+ # a full text out of any fetch stream will be grabbed.
+ _fetch_uses_deltas = False
+
+
+class VersionedFileCommitBuilder(CommitBuilder):
+ """Commit builder implementation for versioned files based repositories.
+ """
+
+ # this commit builder supports the record_entry_contents interface
+ supports_record_entry_contents = True
+
+ # the default CommitBuilder does not manage trees whose root is versioned.
+ _versioned_root = False
+
+ def __init__(self, repository, parents, config_stack, timestamp=None,
+ timezone=None, committer=None, revprops=None,
+ revision_id=None, lossy=False):
+ super(VersionedFileCommitBuilder, self).__init__(repository,
+ parents, config_stack, timestamp, timezone, committer, revprops,
+ revision_id, lossy)
+ try:
+ basis_id = self.parents[0]
+ except IndexError:
+ basis_id = _mod_revision.NULL_REVISION
+ self.basis_delta_revision = basis_id
+ self.new_inventory = Inventory(None)
+ self._basis_delta = []
+ self.__heads = graph.HeadsCache(repository.get_graph()).heads
+ # memo'd check for no-op commits.
+ self._any_changes = False
+ # API compatibility, older code that used CommitBuilder did not call
+ # .record_delete(), which means the delta that is computed would not be
+ # valid. Callers that will call record_delete() should call
+ # .will_record_deletes() to indicate that.
+ self._recording_deletes = False
+
+ def will_record_deletes(self):
+ """Tell the commit builder that deletes are being notified.
+
+ This enables the accumulation of an inventory delta; for the resulting
+ commit to be valid, deletes against the basis MUST be recorded via
+ builder.record_delete().
+ """
+ self._recording_deletes = True
+
+ def any_changes(self):
+ """Return True if any entries were changed.
+
+ This includes merge-only changes. It is the core for the --unchanged
+ detection in commit.
+
+ :return: True if any changes have occured.
+ """
+ return self._any_changes
+
+ def _ensure_fallback_inventories(self):
+ """Ensure that appropriate inventories are available.
+
+ This only applies to repositories that are stacked, and is about
+ enusring the stacking invariants. Namely, that for any revision that is
+ present, we either have all of the file content, or we have the parent
+ inventory and the delta file content.
+ """
+ if not self.repository._fallback_repositories:
+ return
+ if not self.repository._format.supports_chks:
+ raise errors.BzrError("Cannot commit directly to a stacked branch"
+ " in pre-2a formats. See "
+ "https://bugs.launchpad.net/bzr/+bug/375013 for details.")
+ # This is a stacked repo, we need to make sure we have the parent
+ # inventories for the parents.
+ parent_keys = [(p,) for p in self.parents]
+ parent_map = self.repository.inventories._index.get_parent_map(parent_keys)
+ missing_parent_keys = set([pk for pk in parent_keys
+ if pk not in parent_map])
+ fallback_repos = list(reversed(self.repository._fallback_repositories))
+ missing_keys = [('inventories', pk[0])
+ for pk in missing_parent_keys]
+ resume_tokens = []
+ while missing_keys and fallback_repos:
+ fallback_repo = fallback_repos.pop()
+ source = fallback_repo._get_source(self.repository._format)
+ sink = self.repository._get_sink()
+ stream = source.get_stream_for_missing_keys(missing_keys)
+ missing_keys = sink.insert_stream_without_locking(stream,
+ self.repository._format)
+ if missing_keys:
+ raise errors.BzrError('Unable to fill in parent inventories for a'
+ ' stacked branch')
+
+ def commit(self, message):
+ """Make the actual commit.
+
+ :return: The revision id of the recorded revision.
+ """
+ self._validate_unicode_text(message, 'commit message')
+ rev = _mod_revision.Revision(
+ timestamp=self._timestamp,
+ timezone=self._timezone,
+ committer=self._committer,
+ message=message,
+ inventory_sha1=self.inv_sha1,
+ revision_id=self._new_revision_id,
+ properties=self._revprops)
+ rev.parent_ids = self.parents
+ if self._config_stack.get('create_signatures') == _mod_config.SIGN_ALWAYS:
+ testament = Testament(rev, self.revision_tree())
+ plaintext = testament.as_short_text()
+ self.repository.store_revision_signature(
+ gpg.GPGStrategy(self._config_stack), plaintext,
+ self._new_revision_id)
+ self.repository._add_revision(rev)
+ self._ensure_fallback_inventories()
+ self.repository.commit_write_group()
+ return self._new_revision_id
+
+ def abort(self):
+ """Abort the commit that is being built.
+ """
+ self.repository.abort_write_group()
+
+ def revision_tree(self):
+ """Return the tree that was just committed.
+
+ After calling commit() this can be called to get a
+ RevisionTree representing the newly committed tree. This is
+ preferred to calling Repository.revision_tree() because that may
+ require deserializing the inventory, while we already have a copy in
+ memory.
+ """
+ if self.new_inventory is None:
+ self.new_inventory = self.repository.get_inventory(
+ self._new_revision_id)
+ return InventoryRevisionTree(self.repository, self.new_inventory,
+ self._new_revision_id)
+
+ def finish_inventory(self):
+ """Tell the builder that the inventory is finished.
+
+ :return: The inventory id in the repository, which can be used with
+ repository.get_inventory.
+ """
+ if self.new_inventory is None:
+ # an inventory delta was accumulated without creating a new
+ # inventory.
+ basis_id = self.basis_delta_revision
+ # We ignore the 'inventory' returned by add_inventory_by_delta
+ # because self.new_inventory is used to hint to the rest of the
+ # system what code path was taken
+ self.inv_sha1, _ = self.repository.add_inventory_by_delta(
+ basis_id, self._basis_delta, self._new_revision_id,
+ self.parents)
+ else:
+ if self.new_inventory.root is None:
+ raise AssertionError('Root entry should be supplied to'
+ ' record_entry_contents, as of bzr 0.10.')
+ self.new_inventory.add(InventoryDirectory(ROOT_ID, '', None))
+ self.new_inventory.revision_id = self._new_revision_id
+ self.inv_sha1 = self.repository.add_inventory(
+ self._new_revision_id,
+ self.new_inventory,
+ self.parents
+ )
+ return self._new_revision_id
+
+ def _check_root(self, ie, parent_invs, tree):
+ """Helper for record_entry_contents.
+
+ :param ie: An entry being added.
+ :param parent_invs: The inventories of the parent revisions of the
+ commit.
+ :param tree: The tree that is being committed.
+ """
+ # In this revision format, root entries have no knit or weave When
+ # serializing out to disk and back in root.revision is always
+ # _new_revision_id
+ ie.revision = self._new_revision_id
+
+ def _require_root_change(self, tree):
+ """Enforce an appropriate root object change.
+
+ This is called once when record_iter_changes is called, if and only if
+ the root was not in the delta calculated by record_iter_changes.
+
+ :param tree: The tree which is being committed.
+ """
+ if len(self.parents) == 0:
+ raise errors.RootMissing()
+ entry = entry_factory['directory'](tree.path2id(''), '',
+ None)
+ entry.revision = self._new_revision_id
+ self._basis_delta.append(('', '', entry.file_id, entry))
+
+ def _get_delta(self, ie, basis_inv, path):
+ """Get a delta against the basis inventory for ie."""
+ if not basis_inv.has_id(ie.file_id):
+ # add
+ result = (None, path, ie.file_id, ie)
+ self._basis_delta.append(result)
+ return result
+ elif ie != basis_inv[ie.file_id]:
+ # common but altered
+ # TODO: avoid tis id2path call.
+ result = (basis_inv.id2path(ie.file_id), path, ie.file_id, ie)
+ self._basis_delta.append(result)
+ return result
+ else:
+ # common, unaltered
+ return None
+
+ def _heads(self, file_id, revision_ids):
+ """Calculate the graph heads for revision_ids in the graph of file_id.
+
+ This can use either a per-file graph or a global revision graph as we
+ have an identity relationship between the two graphs.
+ """
+ return self.__heads(revision_ids)
+
+ def get_basis_delta(self):
+ """Return the complete inventory delta versus the basis inventory.
+
+ This has been built up with the calls to record_delete and
+ record_entry_contents. The client must have already called
+ will_record_deletes() to indicate that they will be generating a
+ complete delta.
+
+ :return: An inventory delta, suitable for use with apply_delta, or
+ Repository.add_inventory_by_delta, etc.
+ """
+ if not self._recording_deletes:
+ raise AssertionError("recording deletes not activated.")
+ return self._basis_delta
+
+ def record_delete(self, path, file_id):
+ """Record that a delete occured against a basis tree.
+
+ This is an optional API - when used it adds items to the basis_delta
+ being accumulated by the commit builder. It cannot be called unless the
+ method will_record_deletes() has been called to inform the builder that
+ a delta is being supplied.
+
+ :param path: The path of the thing deleted.
+ :param file_id: The file id that was deleted.
+ """
+ if not self._recording_deletes:
+ raise AssertionError("recording deletes not activated.")
+ delta = (path, None, file_id, None)
+ self._basis_delta.append(delta)
+ self._any_changes = True
+ return delta
+
+ def record_entry_contents(self, ie, parent_invs, path, tree,
+ content_summary):
+ """Record the content of ie from tree into the commit if needed.
+
+ Side effect: sets ie.revision when unchanged
+
+ :param ie: An inventory entry present in the commit.
+ :param parent_invs: The inventories of the parent revisions of the
+ commit.
+ :param path: The path the entry is at in the tree.
+ :param tree: The tree which contains this entry and should be used to
+ obtain content.
+ :param content_summary: Summary data from the tree about the paths
+ content - stat, length, exec, sha/link target. This is only
+ accessed when the entry has a revision of None - that is when it is
+ a candidate to commit.
+ :return: A tuple (change_delta, version_recorded, fs_hash).
+ change_delta is an inventory_delta change for this entry against
+ the basis tree of the commit, or None if no change occured against
+ the basis tree.
+ version_recorded is True if a new version of the entry has been
+ recorded. For instance, committing a merge where a file was only
+ changed on the other side will return (delta, False).
+ fs_hash is either None, or the hash details for the path (currently
+ a tuple of the contents sha1 and the statvalue returned by
+ tree.get_file_with_stat()).
+ """
+ if self.new_inventory.root is None:
+ if ie.parent_id is not None:
+ raise errors.RootMissing()
+ self._check_root(ie, parent_invs, tree)
+ if ie.revision is None:
+ kind = content_summary[0]
+ else:
+ # ie is carried over from a prior commit
+ kind = ie.kind
+ # XXX: repository specific check for nested tree support goes here - if
+ # the repo doesn't want nested trees we skip it ?
+ if (kind == 'tree-reference' and
+ not self.repository._format.supports_tree_reference):
+ # mismatch between commit builder logic and repository:
+ # this needs the entry creation pushed down into the builder.
+ raise NotImplementedError('Missing repository subtree support.')
+ self.new_inventory.add(ie)
+
+ # TODO: slow, take it out of the inner loop.
+ try:
+ basis_inv = parent_invs[0]
+ except IndexError:
+ basis_inv = Inventory(root_id=None)
+
+ # ie.revision is always None if the InventoryEntry is considered
+ # for committing. We may record the previous parents revision if the
+ # content is actually unchanged against a sole head.
+ if ie.revision is not None:
+ if not self._versioned_root and path == '':
+ # repositories that do not version the root set the root's
+ # revision to the new commit even when no change occurs (more
+ # specifically, they do not record a revision on the root; and
+ # the rev id is assigned to the root during deserialisation -
+ # this masks when a change may have occurred against the basis.
+ # To match this we always issue a delta, because the revision
+ # of the root will always be changing.
+ if basis_inv.has_id(ie.file_id):
+ delta = (basis_inv.id2path(ie.file_id), path,
+ ie.file_id, ie)
+ else:
+ # add
+ delta = (None, path, ie.file_id, ie)
+ self._basis_delta.append(delta)
+ return delta, False, None
+ else:
+ # we don't need to commit this, because the caller already
+ # determined that an existing revision of this file is
+ # appropriate. If it's not being considered for committing then
+ # it and all its parents to the root must be unaltered so
+ # no-change against the basis.
+ if ie.revision == self._new_revision_id:
+ raise AssertionError("Impossible situation, a skipped "
+ "inventory entry (%r) claims to be modified in this "
+ "commit (%r).", (ie, self._new_revision_id))
+ return None, False, None
+ # XXX: Friction: parent_candidates should return a list not a dict
+ # so that we don't have to walk the inventories again.
+ parent_candidate_entries = ie.parent_candidates(parent_invs)
+ head_set = self._heads(ie.file_id, parent_candidate_entries.keys())
+ heads = []
+ for inv in parent_invs:
+ if inv.has_id(ie.file_id):
+ old_rev = inv[ie.file_id].revision
+ if old_rev in head_set:
+ heads.append(inv[ie.file_id].revision)
+ head_set.remove(inv[ie.file_id].revision)
+
+ store = False
+ # now we check to see if we need to write a new record to the
+ # file-graph.
+ # We write a new entry unless there is one head to the ancestors, and
+ # the kind-derived content is unchanged.
+
+ # Cheapest check first: no ancestors, or more the one head in the
+ # ancestors, we write a new node.
+ if len(heads) != 1:
+ store = True
+ if not store:
+ # There is a single head, look it up for comparison
+ parent_entry = parent_candidate_entries[heads[0]]
+ # if the non-content specific data has changed, we'll be writing a
+ # node:
+ if (parent_entry.parent_id != ie.parent_id or
+ parent_entry.name != ie.name):
+ store = True
+ # now we need to do content specific checks:
+ if not store:
+ # if the kind changed the content obviously has
+ if kind != parent_entry.kind:
+ store = True
+ # Stat cache fingerprint feedback for the caller - None as we usually
+ # don't generate one.
+ fingerprint = None
+ if kind == 'file':
+ if content_summary[2] is None:
+ raise ValueError("Files must not have executable = None")
+ if not store:
+ # We can't trust a check of the file length because of content
+ # filtering...
+ if (# if the exec bit has changed we have to store:
+ parent_entry.executable != content_summary[2]):
+ store = True
+ elif parent_entry.text_sha1 == content_summary[3]:
+ # all meta and content is unchanged (using a hash cache
+ # hit to check the sha)
+ ie.revision = parent_entry.revision
+ ie.text_size = parent_entry.text_size
+ ie.text_sha1 = parent_entry.text_sha1
+ ie.executable = parent_entry.executable
+ return self._get_delta(ie, basis_inv, path), False, None
+ else:
+ # Either there is only a hash change(no hash cache entry,
+ # or same size content change), or there is no change on
+ # this file at all.
+ # Provide the parent's hash to the store layer, so that the
+ # content is unchanged we will not store a new node.
+ nostore_sha = parent_entry.text_sha1
+ if store:
+ # We want to record a new node regardless of the presence or
+ # absence of a content change in the file.
+ nostore_sha = None
+ ie.executable = content_summary[2]
+ file_obj, stat_value = tree.get_file_with_stat(ie.file_id, path)
+ try:
+ text = file_obj.read()
+ finally:
+ file_obj.close()
+ try:
+ ie.text_sha1, ie.text_size = self._add_text_to_weave(
+ ie.file_id, text, heads, nostore_sha)
+ # Let the caller know we generated a stat fingerprint.
+ fingerprint = (ie.text_sha1, stat_value)
+ except errors.ExistingContent:
+ # Turns out that the file content was unchanged, and we were
+ # only going to store a new node if it was changed. Carry over
+ # the entry.
+ ie.revision = parent_entry.revision
+ ie.text_size = parent_entry.text_size
+ ie.text_sha1 = parent_entry.text_sha1
+ ie.executable = parent_entry.executable
+ return self._get_delta(ie, basis_inv, path), False, None
+ elif kind == 'directory':
+ if not store:
+ # all data is meta here, nothing specific to directory, so
+ # carry over:
+ ie.revision = parent_entry.revision
+ return self._get_delta(ie, basis_inv, path), False, None
+ self._add_text_to_weave(ie.file_id, '', heads, None)
+ elif kind == 'symlink':
+ current_link_target = content_summary[3]
+ if not store:
+ # symlink target is not generic metadata, check if it has
+ # changed.
+ if current_link_target != parent_entry.symlink_target:
+ store = True
+ if not store:
+ # unchanged, carry over.
+ ie.revision = parent_entry.revision
+ ie.symlink_target = parent_entry.symlink_target
+ return self._get_delta(ie, basis_inv, path), False, None
+ ie.symlink_target = current_link_target
+ self._add_text_to_weave(ie.file_id, '', heads, None)
+ elif kind == 'tree-reference':
+ if not store:
+ if content_summary[3] != parent_entry.reference_revision:
+ store = True
+ if not store:
+ # unchanged, carry over.
+ ie.reference_revision = parent_entry.reference_revision
+ ie.revision = parent_entry.revision
+ return self._get_delta(ie, basis_inv, path), False, None
+ ie.reference_revision = content_summary[3]
+ if ie.reference_revision is None:
+ raise AssertionError("invalid content_summary for nested tree: %r"
+ % (content_summary,))
+ self._add_text_to_weave(ie.file_id, '', heads, None)
+ else:
+ raise NotImplementedError('unknown kind')
+ ie.revision = self._new_revision_id
+ # The initial commit adds a root directory, but this in itself is not
+ # a worthwhile commit.
+ if (self.basis_delta_revision != _mod_revision.NULL_REVISION or
+ path != ""):
+ self._any_changes = True
+ return self._get_delta(ie, basis_inv, path), True, fingerprint
+
+ def record_iter_changes(self, tree, basis_revision_id, iter_changes,
+ _entry_factory=entry_factory):
+ """Record a new tree via iter_changes.
+
+ :param tree: The tree to obtain text contents from for changed objects.
+ :param basis_revision_id: The revision id of the tree the iter_changes
+ has been generated against. Currently assumed to be the same
+ as self.parents[0] - if it is not, errors may occur.
+ :param iter_changes: An iter_changes iterator with the changes to apply
+ to basis_revision_id. The iterator must not include any items with
+ a current kind of None - missing items must be either filtered out
+ or errored-on before record_iter_changes sees the item.
+ :param _entry_factory: Private method to bind entry_factory locally for
+ performance.
+ :return: A generator of (file_id, relpath, fs_hash) tuples for use with
+ tree._observed_sha1.
+ """
+ # Create an inventory delta based on deltas between all the parents and
+ # deltas between all the parent inventories. We use inventory delta's
+ # between the inventory objects because iter_changes masks
+ # last-changed-field only changes.
+ # Working data:
+ # file_id -> change map, change is fileid, paths, changed, versioneds,
+ # parents, names, kinds, executables
+ merged_ids = {}
+ # {file_id -> revision_id -> inventory entry, for entries in parent
+ # trees that are not parents[0]
+ parent_entries = {}
+ ghost_basis = False
+ try:
+ revtrees = list(self.repository.revision_trees(self.parents))
+ except errors.NoSuchRevision:
+ # one or more ghosts, slow path.
+ revtrees = []
+ for revision_id in self.parents:
+ try:
+ revtrees.append(self.repository.revision_tree(revision_id))
+ except errors.NoSuchRevision:
+ if not revtrees:
+ basis_revision_id = _mod_revision.NULL_REVISION
+ ghost_basis = True
+ revtrees.append(self.repository.revision_tree(
+ _mod_revision.NULL_REVISION))
+ # The basis inventory from a repository
+ if revtrees:
+ basis_tree = revtrees[0]
+ else:
+ basis_tree = self.repository.revision_tree(
+ _mod_revision.NULL_REVISION)
+ basis_inv = basis_tree.root_inventory
+ if len(self.parents) > 0:
+ if basis_revision_id != self.parents[0] and not ghost_basis:
+ raise Exception(
+ "arbitrary basis parents not yet supported with merges")
+ for revtree in revtrees[1:]:
+ for change in revtree.root_inventory._make_delta(basis_inv):
+ if change[1] is None:
+ # Not present in this parent.
+ continue
+ if change[2] not in merged_ids:
+ if change[0] is not None:
+ basis_entry = basis_inv[change[2]]
+ merged_ids[change[2]] = [
+ # basis revid
+ basis_entry.revision,
+ # new tree revid
+ change[3].revision]
+ parent_entries[change[2]] = {
+ # basis parent
+ basis_entry.revision:basis_entry,
+ # this parent
+ change[3].revision:change[3],
+ }
+ else:
+ merged_ids[change[2]] = [change[3].revision]
+ parent_entries[change[2]] = {change[3].revision:change[3]}
+ else:
+ merged_ids[change[2]].append(change[3].revision)
+ parent_entries[change[2]][change[3].revision] = change[3]
+ else:
+ merged_ids = {}
+ # Setup the changes from the tree:
+ # changes maps file_id -> (change, [parent revision_ids])
+ changes= {}
+ for change in iter_changes:
+ # This probably looks up in basis_inv way to much.
+ if change[1][0] is not None:
+ head_candidate = [basis_inv[change[0]].revision]
+ else:
+ head_candidate = []
+ changes[change[0]] = change, merged_ids.get(change[0],
+ head_candidate)
+ unchanged_merged = set(merged_ids) - set(changes)
+ # Extend the changes dict with synthetic changes to record merges of
+ # texts.
+ for file_id in unchanged_merged:
+ # Record a merged version of these items that did not change vs the
+ # basis. This can be either identical parallel changes, or a revert
+ # of a specific file after a merge. The recorded content will be
+ # that of the current tree (which is the same as the basis), but
+ # the per-file graph will reflect a merge.
+ # NB:XXX: We are reconstructing path information we had, this
+ # should be preserved instead.
+ # inv delta change: (file_id, (path_in_source, path_in_target),
+ # changed_content, versioned, parent, name, kind,
+ # executable)
+ try:
+ basis_entry = basis_inv[file_id]
+ except errors.NoSuchId:
+ # a change from basis->some_parents but file_id isn't in basis
+ # so was new in the merge, which means it must have changed
+ # from basis -> current, and as it hasn't the add was reverted
+ # by the user. So we discard this change.
+ pass
+ else:
+ change = (file_id,
+ (basis_inv.id2path(file_id), tree.id2path(file_id)),
+ False, (True, True),
+ (basis_entry.parent_id, basis_entry.parent_id),
+ (basis_entry.name, basis_entry.name),
+ (basis_entry.kind, basis_entry.kind),
+ (basis_entry.executable, basis_entry.executable))
+ changes[file_id] = (change, merged_ids[file_id])
+ # changes contains tuples with the change and a set of inventory
+ # candidates for the file.
+ # inv delta is:
+ # old_path, new_path, file_id, new_inventory_entry
+ seen_root = False # Is the root in the basis delta?
+ inv_delta = self._basis_delta
+ modified_rev = self._new_revision_id
+ for change, head_candidates in changes.values():
+ if change[3][1]: # versioned in target.
+ # Several things may be happening here:
+ # We may have a fork in the per-file graph
+ # - record a change with the content from tree
+ # We may have a change against < all trees
+ # - carry over the tree that hasn't changed
+ # We may have a change against all trees
+ # - record the change with the content from tree
+ kind = change[6][1]
+ file_id = change[0]
+ entry = _entry_factory[kind](file_id, change[5][1],
+ change[4][1])
+ head_set = self._heads(change[0], set(head_candidates))
+ heads = []
+ # Preserve ordering.
+ for head_candidate in head_candidates:
+ if head_candidate in head_set:
+ heads.append(head_candidate)
+ head_set.remove(head_candidate)
+ carried_over = False
+ if len(heads) == 1:
+ # Could be a carry-over situation:
+ parent_entry_revs = parent_entries.get(file_id, None)
+ if parent_entry_revs:
+ parent_entry = parent_entry_revs.get(heads[0], None)
+ else:
+ parent_entry = None
+ if parent_entry is None:
+ # The parent iter_changes was called against is the one
+ # that is the per-file head, so any change is relevant
+ # iter_changes is valid.
+ carry_over_possible = False
+ else:
+ # could be a carry over situation
+ # A change against the basis may just indicate a merge,
+ # we need to check the content against the source of the
+ # merge to determine if it was changed after the merge
+ # or carried over.
+ if (parent_entry.kind != entry.kind or
+ parent_entry.parent_id != entry.parent_id or
+ parent_entry.name != entry.name):
+ # Metadata common to all entries has changed
+ # against per-file parent
+ carry_over_possible = False
+ else:
+ carry_over_possible = True
+ # per-type checks for changes against the parent_entry
+ # are done below.
+ else:
+ # Cannot be a carry-over situation
+ carry_over_possible = False
+ # Populate the entry in the delta
+ if kind == 'file':
+ # XXX: There is still a small race here: If someone reverts the content of a file
+ # after iter_changes examines and decides it has changed,
+ # we will unconditionally record a new version even if some
+ # other process reverts it while commit is running (with
+ # the revert happening after iter_changes did its
+ # examination).
+ if change[7][1]:
+ entry.executable = True
+ else:
+ entry.executable = False
+ if (carry_over_possible and
+ parent_entry.executable == entry.executable):
+ # Check the file length, content hash after reading
+ # the file.
+ nostore_sha = parent_entry.text_sha1
+ else:
+ nostore_sha = None
+ file_obj, stat_value = tree.get_file_with_stat(file_id, change[1][1])
+ try:
+ text = file_obj.read()
+ finally:
+ file_obj.close()
+ try:
+ entry.text_sha1, entry.text_size = self._add_text_to_weave(
+ file_id, text, heads, nostore_sha)
+ yield file_id, change[1][1], (entry.text_sha1, stat_value)
+ except errors.ExistingContent:
+ # No content change against a carry_over parent
+ # Perhaps this should also yield a fs hash update?
+ carried_over = True
+ entry.text_size = parent_entry.text_size
+ entry.text_sha1 = parent_entry.text_sha1
+ elif kind == 'symlink':
+ # Wants a path hint?
+ entry.symlink_target = tree.get_symlink_target(file_id)
+ if (carry_over_possible and
+ parent_entry.symlink_target == entry.symlink_target):
+ carried_over = True
+ else:
+ self._add_text_to_weave(change[0], '', heads, None)
+ elif kind == 'directory':
+ if carry_over_possible:
+ carried_over = True
+ else:
+ # Nothing to set on the entry.
+ # XXX: split into the Root and nonRoot versions.
+ if change[1][1] != '' or self.repository.supports_rich_root():
+ self._add_text_to_weave(change[0], '', heads, None)
+ elif kind == 'tree-reference':
+ if not self.repository._format.supports_tree_reference:
+ # This isn't quite sane as an error, but we shouldn't
+ # ever see this code path in practice: tree's don't
+ # permit references when the repo doesn't support tree
+ # references.
+ raise errors.UnsupportedOperation(tree.add_reference,
+ self.repository)
+ reference_revision = tree.get_reference_revision(change[0])
+ entry.reference_revision = reference_revision
+ if (carry_over_possible and
+ parent_entry.reference_revision == reference_revision):
+ carried_over = True
+ else:
+ self._add_text_to_weave(change[0], '', heads, None)
+ else:
+ raise AssertionError('unknown kind %r' % kind)
+ if not carried_over:
+ entry.revision = modified_rev
+ else:
+ entry.revision = parent_entry.revision
+ else:
+ entry = None
+ new_path = change[1][1]
+ inv_delta.append((change[1][0], new_path, change[0], entry))
+ if new_path == '':
+ seen_root = True
+ self.new_inventory = None
+ # The initial commit adds a root directory, but this in itself is not
+ # a worthwhile commit.
+ if ((len(inv_delta) > 0 and basis_revision_id != _mod_revision.NULL_REVISION) or
+ (len(inv_delta) > 1 and basis_revision_id == _mod_revision.NULL_REVISION)):
+ # This should perhaps be guarded by a check that the basis we
+ # commit against is the basis for the commit and if not do a delta
+ # against the basis.
+ self._any_changes = True
+ if not seen_root:
+ # housekeeping root entry changes do not affect no-change commits.
+ self._require_root_change(tree)
+ self.basis_delta_revision = basis_revision_id
+
+ def _add_text_to_weave(self, file_id, new_text, parents, nostore_sha):
+ parent_keys = tuple([(file_id, parent) for parent in parents])
+ return self.repository.texts._add_text(
+ (file_id, self._new_revision_id), parent_keys, new_text,
+ nostore_sha=nostore_sha, random_id=self.random_revid)[0:2]
+
+
+class VersionedFileRootCommitBuilder(VersionedFileCommitBuilder):
+ """This commitbuilder actually records the root id"""
+
+ # the root entry gets versioned properly by this builder.
+ _versioned_root = True
+
+ def _check_root(self, ie, parent_invs, tree):
+ """Helper for record_entry_contents.
+
+ :param ie: An entry being added.
+ :param parent_invs: The inventories of the parent revisions of the
+ commit.
+ :param tree: The tree that is being committed.
+ """
+
+ def _require_root_change(self, tree):
+ """Enforce an appropriate root object change.
+
+ This is called once when record_iter_changes is called, if and only if
+ the root was not in the delta calculated by record_iter_changes.
+
+ :param tree: The tree which is being committed.
+ """
+ # versioned roots do not change unless the tree found a change.
+
+
+class VersionedFileRepository(Repository):
+ """Repository holding history for one or more branches.
+
+ The repository holds and retrieves historical information including
+ revisions and file history. It's normally accessed only by the Branch,
+ which views a particular line of development through that history.
+
+ The Repository builds on top of some byte storage facilies (the revisions,
+ signatures, inventories, texts and chk_bytes attributes) and a Transport,
+ which respectively provide byte storage and a means to access the (possibly
+ remote) disk.
+
+ The byte storage facilities are addressed via tuples, which we refer to
+ as 'keys' throughout the code base. Revision_keys, inventory_keys and
+ signature_keys are all 1-tuples: (revision_id,). text_keys are two-tuples:
+ (file_id, revision_id). chk_bytes uses CHK keys - a 1-tuple with a single
+ byte string made up of a hash identifier and a hash value.
+ We use this interface because it allows low friction with the underlying
+ code that implements disk indices, network encoding and other parts of
+ bzrlib.
+
+ :ivar revisions: A bzrlib.versionedfile.VersionedFiles instance containing
+ the serialised revisions for the repository. This can be used to obtain
+ revision graph information or to access raw serialised revisions.
+ The result of trying to insert data into the repository via this store
+ is undefined: it should be considered read-only except for implementors
+ of repositories.
+ :ivar signatures: A bzrlib.versionedfile.VersionedFiles instance containing
+ the serialised signatures for the repository. This can be used to
+ obtain access to raw serialised signatures. The result of trying to
+ insert data into the repository via this store is undefined: it should
+ be considered read-only except for implementors of repositories.
+ :ivar inventories: A bzrlib.versionedfile.VersionedFiles instance containing
+ the serialised inventories for the repository. This can be used to
+ obtain unserialised inventories. The result of trying to insert data
+ into the repository via this store is undefined: it should be
+ considered read-only except for implementors of repositories.
+ :ivar texts: A bzrlib.versionedfile.VersionedFiles instance containing the
+ texts of files and directories for the repository. This can be used to
+ obtain file texts or file graphs. Note that Repository.iter_file_bytes
+ is usually a better interface for accessing file texts.
+ The result of trying to insert data into the repository via this store
+ is undefined: it should be considered read-only except for implementors
+ of repositories.
+ :ivar chk_bytes: A bzrlib.versionedfile.VersionedFiles instance containing
+ any data the repository chooses to store or have indexed by its hash.
+ The result of trying to insert data into the repository via this store
+ is undefined: it should be considered read-only except for implementors
+ of repositories.
+ :ivar _transport: Transport for file access to repository, typically
+ pointing to .bzr/repository.
+ """
+
+ # What class to use for a CommitBuilder. Often it's simpler to change this
+ # in a Repository class subclass rather than to override
+ # get_commit_builder.
+ _commit_builder_class = VersionedFileCommitBuilder
+
+ def add_fallback_repository(self, repository):
+ """Add a repository to use for looking up data not held locally.
+
+ :param repository: A repository.
+ """
+ if not self._format.supports_external_lookups:
+ raise errors.UnstackableRepositoryFormat(self._format, self.base)
+ # This can raise an exception, so should be done before we lock the
+ # fallback repository.
+ self._check_fallback_repository(repository)
+ if self.is_locked():
+ # This repository will call fallback.unlock() when we transition to
+ # the unlocked state, so we make sure to increment the lock count
+ repository.lock_read()
+ self._fallback_repositories.append(repository)
+ self.texts.add_fallback_versioned_files(repository.texts)
+ self.inventories.add_fallback_versioned_files(repository.inventories)
+ self.revisions.add_fallback_versioned_files(repository.revisions)
+ self.signatures.add_fallback_versioned_files(repository.signatures)
+ if self.chk_bytes is not None:
+ self.chk_bytes.add_fallback_versioned_files(repository.chk_bytes)
+
+ @only_raises(errors.LockNotHeld, errors.LockBroken)
+ def unlock(self):
+ super(VersionedFileRepository, self).unlock()
+ if self.control_files._lock_count == 0:
+ self._inventory_entry_cache.clear()
+
+ def add_inventory(self, revision_id, inv, parents):
+ """Add the inventory inv to the repository as revision_id.
+
+ :param parents: The revision ids of the parents that revision_id
+ is known to have and are in the repository already.
+
+ :returns: The validator(which is a sha1 digest, though what is sha'd is
+ repository format specific) of the serialized inventory.
+ """
+ if not self.is_in_write_group():
+ raise AssertionError("%r not in write group" % (self,))
+ _mod_revision.check_not_reserved_id(revision_id)
+ if not (inv.revision_id is None or inv.revision_id == revision_id):
+ raise AssertionError(
+ "Mismatch between inventory revision"
+ " id and insertion revid (%r, %r)"
+ % (inv.revision_id, revision_id))
+ if inv.root is None:
+ raise errors.RootMissing()
+ return self._add_inventory_checked(revision_id, inv, parents)
+
+ def _add_inventory_checked(self, revision_id, inv, parents):
+ """Add inv to the repository after checking the inputs.
+
+ This function can be overridden to allow different inventory styles.
+
+ :seealso: add_inventory, for the contract.
+ """
+ inv_lines = self._serializer.write_inventory_to_lines(inv)
+ return self._inventory_add_lines(revision_id, parents,
+ inv_lines, check_content=False)
+
+ def add_inventory_by_delta(self, basis_revision_id, delta, new_revision_id,
+ parents, basis_inv=None, propagate_caches=False):
+ """Add a new inventory expressed as a delta against another revision.
+
+ See the inventory developers documentation for the theory behind
+ inventory deltas.
+
+ :param basis_revision_id: The inventory id the delta was created
+ against. (This does not have to be a direct parent.)
+ :param delta: The inventory delta (see Inventory.apply_delta for
+ details).
+ :param new_revision_id: The revision id that the inventory is being
+ added for.
+ :param parents: The revision ids of the parents that revision_id is
+ known to have and are in the repository already. These are supplied
+ for repositories that depend on the inventory graph for revision
+ graph access, as well as for those that pun ancestry with delta
+ compression.
+ :param basis_inv: The basis inventory if it is already known,
+ otherwise None.
+ :param propagate_caches: If True, the caches for this inventory are
+ copied to and updated for the result if possible.
+
+ :returns: (validator, new_inv)
+ The validator(which is a sha1 digest, though what is sha'd is
+ repository format specific) of the serialized inventory, and the
+ resulting inventory.
+ """
+ if not self.is_in_write_group():
+ raise AssertionError("%r not in write group" % (self,))
+ _mod_revision.check_not_reserved_id(new_revision_id)
+ basis_tree = self.revision_tree(basis_revision_id)
+ basis_tree.lock_read()
+ try:
+ # Note that this mutates the inventory of basis_tree, which not all
+ # inventory implementations may support: A better idiom would be to
+ # return a new inventory, but as there is no revision tree cache in
+ # repository this is safe for now - RBC 20081013
+ if basis_inv is None:
+ basis_inv = basis_tree.root_inventory
+ basis_inv.apply_delta(delta)
+ basis_inv.revision_id = new_revision_id
+ return (self.add_inventory(new_revision_id, basis_inv, parents),
+ basis_inv)
+ finally:
+ basis_tree.unlock()
+
+ def _inventory_add_lines(self, revision_id, parents, lines,
+ check_content=True):
+ """Store lines in inv_vf and return the sha1 of the inventory."""
+ parents = [(parent,) for parent in parents]
+ result = self.inventories.add_lines((revision_id,), parents, lines,
+ check_content=check_content)[0]
+ self.inventories._access.flush()
+ return result
+
+ def add_revision(self, revision_id, rev, inv=None):
+ """Add rev to the revision store as revision_id.
+
+ :param revision_id: the revision id to use.
+ :param rev: The revision object.
+ :param inv: The inventory for the revision. if None, it will be looked
+ up in the inventory storer
+ """
+ # TODO: jam 20070210 Shouldn't we check rev.revision_id and
+ # rev.parent_ids?
+ _mod_revision.check_not_reserved_id(revision_id)
+ # check inventory present
+ if not self.inventories.get_parent_map([(revision_id,)]):
+ if inv is None:
+ raise errors.WeaveRevisionNotPresent(revision_id,
+ self.inventories)
+ else:
+ # yes, this is not suitable for adding with ghosts.
+ rev.inventory_sha1 = self.add_inventory(revision_id, inv,
+ rev.parent_ids)
+ else:
+ key = (revision_id,)
+ rev.inventory_sha1 = self.inventories.get_sha1s([key])[key]
+ self._add_revision(rev)
+
+ def _add_revision(self, revision):
+ text = self._serializer.write_revision_to_string(revision)
+ key = (revision.revision_id,)
+ parents = tuple((parent,) for parent in revision.parent_ids)
+ self.revisions.add_lines(key, parents, osutils.split_lines(text))
+
+ def _check_inventories(self, checker):
+ """Check the inventories found from the revision scan.
+
+ This is responsible for verifying the sha1 of inventories and
+ creating a pending_keys set that covers data referenced by inventories.
+ """
+ bar = ui.ui_factory.nested_progress_bar()
+ try:
+ self._do_check_inventories(checker, bar)
+ finally:
+ bar.finished()
+
+ def _do_check_inventories(self, checker, bar):
+ """Helper for _check_inventories."""
+ revno = 0
+ keys = {'chk_bytes':set(), 'inventories':set(), 'texts':set()}
+ kinds = ['chk_bytes', 'texts']
+ count = len(checker.pending_keys)
+ bar.update(gettext("inventories"), 0, 2)
+ current_keys = checker.pending_keys
+ checker.pending_keys = {}
+ # Accumulate current checks.
+ for key in current_keys:
+ if key[0] != 'inventories' and key[0] not in kinds:
+ checker._report_items.append('unknown key type %r' % (key,))
+ keys[key[0]].add(key[1:])
+ if keys['inventories']:
+ # NB: output order *should* be roughly sorted - topo or
+ # inverse topo depending on repository - either way decent
+ # to just delta against. However, pre-CHK formats didn't
+ # try to optimise inventory layout on disk. As such the
+ # pre-CHK code path does not use inventory deltas.
+ last_object = None
+ for record in self.inventories.check(keys=keys['inventories']):
+ if record.storage_kind == 'absent':
+ checker._report_items.append(
+ 'Missing inventory {%s}' % (record.key,))
+ else:
+ last_object = self._check_record('inventories', record,
+ checker, last_object,
+ current_keys[('inventories',) + record.key])
+ del keys['inventories']
+ else:
+ return
+ bar.update(gettext("texts"), 1)
+ while (checker.pending_keys or keys['chk_bytes']
+ or keys['texts']):
+ # Something to check.
+ current_keys = checker.pending_keys
+ checker.pending_keys = {}
+ # Accumulate current checks.
+ for key in current_keys:
+ if key[0] not in kinds:
+ checker._report_items.append('unknown key type %r' % (key,))
+ keys[key[0]].add(key[1:])
+ # Check the outermost kind only - inventories || chk_bytes || texts
+ for kind in kinds:
+ if keys[kind]:
+ last_object = None
+ for record in getattr(self, kind).check(keys=keys[kind]):
+ if record.storage_kind == 'absent':
+ checker._report_items.append(
+ 'Missing %s {%s}' % (kind, record.key,))
+ else:
+ last_object = self._check_record(kind, record,
+ checker, last_object, current_keys[(kind,) + record.key])
+ keys[kind] = set()
+ break
+
+ def _check_record(self, kind, record, checker, last_object, item_data):
+ """Check a single text from this repository."""
+ if kind == 'inventories':
+ rev_id = record.key[0]
+ inv = self._deserialise_inventory(rev_id,
+ record.get_bytes_as('fulltext'))
+ if last_object is not None:
+ delta = inv._make_delta(last_object)
+ for old_path, path, file_id, ie in delta:
+ if ie is None:
+ continue
+ ie.check(checker, rev_id, inv)
+ else:
+ for path, ie in inv.iter_entries():
+ ie.check(checker, rev_id, inv)
+ if self._format.fast_deltas:
+ return inv
+ elif kind == 'chk_bytes':
+ # No code written to check chk_bytes for this repo format.
+ checker._report_items.append(
+ 'unsupported key type chk_bytes for %s' % (record.key,))
+ elif kind == 'texts':
+ self._check_text(record, checker, item_data)
+ else:
+ checker._report_items.append(
+ 'unknown key type %s for %s' % (kind, record.key))
+
+ def _check_text(self, record, checker, item_data):
+ """Check a single text."""
+ # Check it is extractable.
+ # TODO: check length.
+ if record.storage_kind == 'chunked':
+ chunks = record.get_bytes_as(record.storage_kind)
+ sha1 = osutils.sha_strings(chunks)
+ length = sum(map(len, chunks))
+ else:
+ content = record.get_bytes_as('fulltext')
+ sha1 = osutils.sha_string(content)
+ length = len(content)
+ if item_data and sha1 != item_data[1]:
+ checker._report_items.append(
+ 'sha1 mismatch: %s has sha1 %s expected %s referenced by %s' %
+ (record.key, sha1, item_data[1], item_data[2]))
+
+ @needs_read_lock
+ def _eliminate_revisions_not_present(self, revision_ids):
+ """Check every revision id in revision_ids to see if we have it.
+
+ Returns a set of the present revisions.
+ """
+ result = []
+ graph = self.get_graph()
+ parent_map = graph.get_parent_map(revision_ids)
+ # The old API returned a list, should this actually be a set?
+ return parent_map.keys()
+
+ def __init__(self, _format, a_bzrdir, control_files):
+ """Instantiate a VersionedFileRepository.
+
+ :param _format: The format of the repository on disk.
+ :param controldir: The ControlDir of the repository.
+ :param control_files: Control files to use for locking, etc.
+ """
+ # In the future we will have a single api for all stores for
+ # getting file texts, inventories and revisions, then
+ # this construct will accept instances of those things.
+ super(VersionedFileRepository, self).__init__(_format, a_bzrdir,
+ control_files)
+ self._transport = control_files._transport
+ self.base = self._transport.base
+ # for tests
+ self._reconcile_does_inventory_gc = True
+ self._reconcile_fixes_text_parents = False
+ self._reconcile_backsup_inventory = True
+ # An InventoryEntry cache, used during deserialization
+ self._inventory_entry_cache = fifo_cache.FIFOCache(10*1024)
+ # Is it safe to return inventory entries directly from the entry cache,
+ # rather copying them?
+ self._safe_to_return_from_cache = False
+
+ def fetch(self, source, revision_id=None, find_ghosts=False,
+ fetch_spec=None):
+ """Fetch the content required to construct revision_id from source.
+
+ If revision_id is None and fetch_spec is None, then all content is
+ copied.
+
+ fetch() may not be used when the repository is in a write group -
+ either finish the current write group before using fetch, or use
+ fetch before starting the write group.
+
+ :param find_ghosts: Find and copy revisions in the source that are
+ ghosts in the target (and not reachable directly by walking out to
+ the first-present revision in target from revision_id).
+ :param revision_id: If specified, all the content needed for this
+ revision ID will be copied to the target. Fetch will determine for
+ itself which content needs to be copied.
+ :param fetch_spec: If specified, a SearchResult or
+ PendingAncestryResult that describes which revisions to copy. This
+ allows copying multiple heads at once. Mutually exclusive with
+ revision_id.
+ """
+ if fetch_spec is not None and revision_id is not None:
+ raise AssertionError(
+ "fetch_spec and revision_id are mutually exclusive.")
+ if self.is_in_write_group():
+ raise errors.InternalBzrError(
+ "May not fetch while in a write group.")
+ # fast path same-url fetch operations
+ # TODO: lift out to somewhere common with RemoteRepository
+ # <https://bugs.launchpad.net/bzr/+bug/401646>
+ if (self.has_same_location(source)
+ and fetch_spec is None
+ and self._has_same_fallbacks(source)):
+ # check that last_revision is in 'from' and then return a
+ # no-operation.
+ if (revision_id is not None and
+ not _mod_revision.is_null(revision_id)):
+ self.get_revision(revision_id)
+ return 0, []
+ inter = InterRepository.get(source, self)
+ if (fetch_spec is not None and
+ not getattr(inter, "supports_fetch_spec", False)):
+ raise errors.UnsupportedOperation(
+ "fetch_spec not supported for %r" % inter)
+ return inter.fetch(revision_id=revision_id,
+ find_ghosts=find_ghosts, fetch_spec=fetch_spec)
+
+ @needs_read_lock
+ def gather_stats(self, revid=None, committers=None):
+ """See Repository.gather_stats()."""
+ result = super(VersionedFileRepository, self).gather_stats(revid, committers)
+ # now gather global repository information
+ # XXX: This is available for many repos regardless of listability.
+ if self.user_transport.listable():
+ # XXX: do we want to __define len__() ?
+ # Maybe the versionedfiles object should provide a different
+ # method to get the number of keys.
+ result['revisions'] = len(self.revisions.keys())
+ # result['size'] = t
+ return result
+
+ def get_commit_builder(self, branch, parents, config_stack, timestamp=None,
+ timezone=None, committer=None, revprops=None,
+ revision_id=None, lossy=False):
+ """Obtain a CommitBuilder for this repository.
+
+ :param branch: Branch to commit to.
+ :param parents: Revision ids of the parents of the new revision.
+ :param config_stack: Configuration stack to use.
+ :param timestamp: Optional timestamp recorded for commit.
+ :param timezone: Optional timezone for timestamp.
+ :param committer: Optional committer to set for commit.
+ :param revprops: Optional dictionary of revision properties.
+ :param revision_id: Optional revision id.
+ :param lossy: Whether to discard data that can not be natively
+ represented, when pushing to a foreign VCS
+ """
+ if self._fallback_repositories and not self._format.supports_chks:
+ raise errors.BzrError("Cannot commit directly to a stacked branch"
+ " in pre-2a formats. See "
+ "https://bugs.launchpad.net/bzr/+bug/375013 for details.")
+ result = self._commit_builder_class(self, parents, config_stack,
+ timestamp, timezone, committer, revprops, revision_id,
+ lossy)
+ self.start_write_group()
+ return result
+
+ def get_missing_parent_inventories(self, check_for_missing_texts=True):
+ """Return the keys of missing inventory parents for revisions added in
+ this write group.
+
+ A revision is not complete if the inventory delta for that revision
+ cannot be calculated. Therefore if the parent inventories of a
+ revision are not present, the revision is incomplete, and e.g. cannot
+ be streamed by a smart server. This method finds missing inventory
+ parents for revisions added in this write group.
+ """
+ if not self._format.supports_external_lookups:
+ # This is only an issue for stacked repositories
+ return set()
+ if not self.is_in_write_group():
+ raise AssertionError('not in a write group')
+
+ # XXX: We assume that every added revision already has its
+ # corresponding inventory, so we only check for parent inventories that
+ # might be missing, rather than all inventories.
+ parents = set(self.revisions._index.get_missing_parents())
+ parents.discard(_mod_revision.NULL_REVISION)
+ unstacked_inventories = self.inventories._index
+ present_inventories = unstacked_inventories.get_parent_map(
+ key[-1:] for key in parents)
+ parents.difference_update(present_inventories)
+ if len(parents) == 0:
+ # No missing parent inventories.
+ return set()
+ if not check_for_missing_texts:
+ return set(('inventories', rev_id) for (rev_id,) in parents)
+ # Ok, now we have a list of missing inventories. But these only matter
+ # if the inventories that reference them are missing some texts they
+ # appear to introduce.
+ # XXX: Texts referenced by all added inventories need to be present,
+ # but at the moment we're only checking for texts referenced by
+ # inventories at the graph's edge.
+ key_deps = self.revisions._index._key_dependencies
+ key_deps.satisfy_refs_for_keys(present_inventories)
+ referrers = frozenset(r[0] for r in key_deps.get_referrers())
+ file_ids = self.fileids_altered_by_revision_ids(referrers)
+ missing_texts = set()
+ for file_id, version_ids in file_ids.iteritems():
+ missing_texts.update(
+ (file_id, version_id) for version_id in version_ids)
+ present_texts = self.texts.get_parent_map(missing_texts)
+ missing_texts.difference_update(present_texts)
+ if not missing_texts:
+ # No texts are missing, so all revisions and their deltas are
+ # reconstructable.
+ return set()
+ # Alternatively the text versions could be returned as the missing
+ # keys, but this is likely to be less data.
+ missing_keys = set(('inventories', rev_id) for (rev_id,) in parents)
+ return missing_keys
+
+ @needs_read_lock
+ def has_revisions(self, revision_ids):
+ """Probe to find out the presence of multiple revisions.
+
+ :param revision_ids: An iterable of revision_ids.
+ :return: A set of the revision_ids that were present.
+ """
+ parent_map = self.revisions.get_parent_map(
+ [(rev_id,) for rev_id in revision_ids])
+ result = set()
+ if _mod_revision.NULL_REVISION in revision_ids:
+ result.add(_mod_revision.NULL_REVISION)
+ result.update([key[0] for key in parent_map])
+ return result
+
+ @needs_read_lock
+ def get_revision_reconcile(self, revision_id):
+ """'reconcile' helper routine that allows access to a revision always.
+
+ This variant of get_revision does not cross check the weave graph
+ against the revision one as get_revision does: but it should only
+ be used by reconcile, or reconcile-alike commands that are correcting
+ or testing the revision graph.
+ """
+ return self._get_revisions([revision_id])[0]
+
+ @needs_read_lock
+ def get_revisions(self, revision_ids):
+ """Get many revisions at once.
+
+ Repositories that need to check data on every revision read should
+ subclass this method.
+ """
+ return self._get_revisions(revision_ids)
+
+ @needs_read_lock
+ def _get_revisions(self, revision_ids):
+ """Core work logic to get many revisions without sanity checks."""
+ revs = {}
+ for revid, rev in self._iter_revisions(revision_ids):
+ if rev is None:
+ raise errors.NoSuchRevision(self, revid)
+ revs[revid] = rev
+ return [revs[revid] for revid in revision_ids]
+
+ def _iter_revisions(self, revision_ids):
+ """Iterate over revision objects.
+
+ :param revision_ids: An iterable of revisions to examine. None may be
+ passed to request all revisions known to the repository. Note that
+ not all repositories can find unreferenced revisions; for those
+ repositories only referenced ones will be returned.
+ :return: An iterator of (revid, revision) tuples. Absent revisions (
+ those asked for but not available) are returned as (revid, None).
+ """
+ if revision_ids is None:
+ revision_ids = self.all_revision_ids()
+ else:
+ for rev_id in revision_ids:
+ if not rev_id or not isinstance(rev_id, basestring):
+ raise errors.InvalidRevisionId(revision_id=rev_id, branch=self)
+ keys = [(key,) for key in revision_ids]
+ stream = self.revisions.get_record_stream(keys, 'unordered', True)
+ for record in stream:
+ revid = record.key[0]
+ if record.storage_kind == 'absent':
+ yield (revid, None)
+ else:
+ text = record.get_bytes_as('fulltext')
+ rev = self._serializer.read_revision_from_string(text)
+ yield (revid, rev)
+
+ @needs_write_lock
+ def add_signature_text(self, revision_id, signature):
+ """Store a signature text for a revision.
+
+ :param revision_id: Revision id of the revision
+ :param signature: Signature text.
+ """
+ self.signatures.add_lines((revision_id,), (),
+ osutils.split_lines(signature))
+
+ def find_text_key_references(self):
+ """Find the text key references within the repository.
+
+ :return: A dictionary mapping text keys ((fileid, revision_id) tuples)
+ to whether they were referred to by the inventory of the
+ revision_id that they contain. The inventory texts from all present
+ revision ids are assessed to generate this report.
+ """
+ revision_keys = self.revisions.keys()
+ w = self.inventories
+ pb = ui.ui_factory.nested_progress_bar()
+ try:
+ return self._serializer._find_text_key_references(
+ w.iter_lines_added_or_present_in_keys(revision_keys, pb=pb))
+ finally:
+ pb.finished()
+
+ def _inventory_xml_lines_for_keys(self, keys):
+ """Get a line iterator of the sort needed for findind references.
+
+ Not relevant for non-xml inventory repositories.
+
+ Ghosts in revision_keys are ignored.
+
+ :param revision_keys: The revision keys for the inventories to inspect.
+ :return: An iterator over (inventory line, revid) for the fulltexts of
+ all of the xml inventories specified by revision_keys.
+ """
+ stream = self.inventories.get_record_stream(keys, 'unordered', True)
+ for record in stream:
+ if record.storage_kind != 'absent':
+ chunks = record.get_bytes_as('chunked')
+ revid = record.key[-1]
+ lines = osutils.chunks_to_lines(chunks)
+ for line in lines:
+ yield line, revid
+
+ def _find_file_ids_from_xml_inventory_lines(self, line_iterator,
+ revision_keys):
+ """Helper routine for fileids_altered_by_revision_ids.
+
+ This performs the translation of xml lines to revision ids.
+
+ :param line_iterator: An iterator of lines, origin_version_id
+ :param revision_keys: The revision ids to filter for. This should be a
+ set or other type which supports efficient __contains__ lookups, as
+ the revision key from each parsed line will be looked up in the
+ revision_keys filter.
+ :return: a dictionary mapping altered file-ids to an iterable of
+ revision_ids. Each altered file-ids has the exact revision_ids that
+ altered it listed explicitly.
+ """
+ seen = set(self._serializer._find_text_key_references(
+ line_iterator).iterkeys())
+ parent_keys = self._find_parent_keys_of_revisions(revision_keys)
+ parent_seen = set(self._serializer._find_text_key_references(
+ self._inventory_xml_lines_for_keys(parent_keys)))
+ new_keys = seen - parent_seen
+ result = {}
+ setdefault = result.setdefault
+ for key in new_keys:
+ setdefault(key[0], set()).add(key[-1])
+ return result
+
+ def _find_parent_keys_of_revisions(self, revision_keys):
+ """Similar to _find_parent_ids_of_revisions, but used with keys.
+
+ :param revision_keys: An iterable of revision_keys.
+ :return: The parents of all revision_keys that are not already in
+ revision_keys
+ """
+ parent_map = self.revisions.get_parent_map(revision_keys)
+ parent_keys = set()
+ map(parent_keys.update, parent_map.itervalues())
+ parent_keys.difference_update(revision_keys)
+ parent_keys.discard(_mod_revision.NULL_REVISION)
+ return parent_keys
+
+ def fileids_altered_by_revision_ids(self, revision_ids, _inv_weave=None):
+ """Find the file ids and versions affected by revisions.
+
+ :param revisions: an iterable containing revision ids.
+ :param _inv_weave: The inventory weave from this repository or None.
+ If None, the inventory weave will be opened automatically.
+ :return: a dictionary mapping altered file-ids to an iterable of
+ revision_ids. Each altered file-ids has the exact revision_ids that
+ altered it listed explicitly.
+ """
+ selected_keys = set((revid,) for revid in revision_ids)
+ w = _inv_weave or self.inventories
+ return self._find_file_ids_from_xml_inventory_lines(
+ w.iter_lines_added_or_present_in_keys(
+ selected_keys, pb=None),
+ selected_keys)
+
+ def iter_files_bytes(self, desired_files):
+ """Iterate through file versions.
+
+ Files will not necessarily be returned in the order they occur in
+ desired_files. No specific order is guaranteed.
+
+ Yields pairs of identifier, bytes_iterator. identifier is an opaque
+ value supplied by the caller as part of desired_files. It should
+ uniquely identify the file version in the caller's context. (Examples:
+ an index number or a TreeTransform trans_id.)
+
+ bytes_iterator is an iterable of bytestrings for the file. The
+ kind of iterable and length of the bytestrings are unspecified, but for
+ this implementation, it is a list of bytes produced by
+ VersionedFile.get_record_stream().
+
+ :param desired_files: a list of (file_id, revision_id, identifier)
+ triples
+ """
+ text_keys = {}
+ for file_id, revision_id, callable_data in desired_files:
+ text_keys[(file_id, revision_id)] = callable_data
+ for record in self.texts.get_record_stream(text_keys, 'unordered', True):
+ if record.storage_kind == 'absent':
+ raise errors.RevisionNotPresent(record.key[1], record.key[0])
+ yield text_keys[record.key], record.get_bytes_as('chunked')
+
+ def _generate_text_key_index(self, text_key_references=None,
+ ancestors=None):
+ """Generate a new text key index for the repository.
+
+ This is an expensive function that will take considerable time to run.
+
+ :return: A dict mapping text keys ((file_id, revision_id) tuples) to a
+ list of parents, also text keys. When a given key has no parents,
+ the parents list will be [NULL_REVISION].
+ """
+ # All revisions, to find inventory parents.
+ if ancestors is None:
+ graph = self.get_graph()
+ ancestors = graph.get_parent_map(self.all_revision_ids())
+ if text_key_references is None:
+ text_key_references = self.find_text_key_references()
+ pb = ui.ui_factory.nested_progress_bar()
+ try:
+ return self._do_generate_text_key_index(ancestors,
+ text_key_references, pb)
+ finally:
+ pb.finished()
+
+ def _do_generate_text_key_index(self, ancestors, text_key_references, pb):
+ """Helper for _generate_text_key_index to avoid deep nesting."""
+ revision_order = tsort.topo_sort(ancestors)
+ invalid_keys = set()
+ revision_keys = {}
+ for revision_id in revision_order:
+ revision_keys[revision_id] = set()
+ text_count = len(text_key_references)
+ # a cache of the text keys to allow reuse; costs a dict of all the
+ # keys, but saves a 2-tuple for every child of a given key.
+ text_key_cache = {}
+ for text_key, valid in text_key_references.iteritems():
+ if not valid:
+ invalid_keys.add(text_key)
+ else:
+ revision_keys[text_key[1]].add(text_key)
+ text_key_cache[text_key] = text_key
+ del text_key_references
+ text_index = {}
+ text_graph = graph.Graph(graph.DictParentsProvider(text_index))
+ NULL_REVISION = _mod_revision.NULL_REVISION
+ # Set a cache with a size of 10 - this suffices for bzr.dev but may be
+ # too small for large or very branchy trees. However, for 55K path
+ # trees, it would be easy to use too much memory trivially. Ideally we
+ # could gauge this by looking at available real memory etc, but this is
+ # always a tricky proposition.
+ inventory_cache = lru_cache.LRUCache(10)
+ batch_size = 10 # should be ~150MB on a 55K path tree
+ batch_count = len(revision_order) / batch_size + 1
+ processed_texts = 0
+ pb.update(gettext("Calculating text parents"), processed_texts, text_count)
+ for offset in xrange(batch_count):
+ to_query = revision_order[offset * batch_size:(offset + 1) *
+ batch_size]
+ if not to_query:
+ break
+ for revision_id in to_query:
+ parent_ids = ancestors[revision_id]
+ for text_key in revision_keys[revision_id]:
+ pb.update(gettext("Calculating text parents"), processed_texts)
+ processed_texts += 1
+ candidate_parents = []
+ for parent_id in parent_ids:
+ parent_text_key = (text_key[0], parent_id)
+ try:
+ check_parent = parent_text_key not in \
+ revision_keys[parent_id]
+ except KeyError:
+ # the parent parent_id is a ghost:
+ check_parent = False
+ # truncate the derived graph against this ghost.
+ parent_text_key = None
+ if check_parent:
+ # look at the parent commit details inventories to
+ # determine possible candidates in the per file graph.
+ # TODO: cache here.
+ try:
+ inv = inventory_cache[parent_id]
+ except KeyError:
+ inv = self.revision_tree(parent_id).root_inventory
+ inventory_cache[parent_id] = inv
+ try:
+ parent_entry = inv[text_key[0]]
+ except (KeyError, errors.NoSuchId):
+ parent_entry = None
+ if parent_entry is not None:
+ parent_text_key = (
+ text_key[0], parent_entry.revision)
+ else:
+ parent_text_key = None
+ if parent_text_key is not None:
+ candidate_parents.append(
+ text_key_cache[parent_text_key])
+ parent_heads = text_graph.heads(candidate_parents)
+ new_parents = list(parent_heads)
+ new_parents.sort(key=lambda x:candidate_parents.index(x))
+ if new_parents == []:
+ new_parents = [NULL_REVISION]
+ text_index[text_key] = new_parents
+
+ for text_key in invalid_keys:
+ text_index[text_key] = [NULL_REVISION]
+ return text_index
+
+ def item_keys_introduced_by(self, revision_ids, _files_pb=None):
+ """Get an iterable listing the keys of all the data introduced by a set
+ of revision IDs.
+
+ The keys will be ordered so that the corresponding items can be safely
+ fetched and inserted in that order.
+
+ :returns: An iterable producing tuples of (knit-kind, file-id,
+ versions). knit-kind is one of 'file', 'inventory', 'signatures',
+ 'revisions'. file-id is None unless knit-kind is 'file'.
+ """
+ for result in self._find_file_keys_to_fetch(revision_ids, _files_pb):
+ yield result
+ del _files_pb
+ for result in self._find_non_file_keys_to_fetch(revision_ids):
+ yield result
+
+ def _find_file_keys_to_fetch(self, revision_ids, pb):
+ # XXX: it's a bit weird to control the inventory weave caching in this
+ # generator. Ideally the caching would be done in fetch.py I think. Or
+ # maybe this generator should explicitly have the contract that it
+ # should not be iterated until the previously yielded item has been
+ # processed?
+ inv_w = self.inventories
+
+ # file ids that changed
+ file_ids = self.fileids_altered_by_revision_ids(revision_ids, inv_w)
+ count = 0
+ num_file_ids = len(file_ids)
+ for file_id, altered_versions in file_ids.iteritems():
+ if pb is not None:
+ pb.update(gettext("Fetch texts"), count, num_file_ids)
+ count += 1
+ yield ("file", file_id, altered_versions)
+
+ def _find_non_file_keys_to_fetch(self, revision_ids):
+ # inventory
+ yield ("inventory", None, revision_ids)
+
+ # signatures
+ # XXX: Note ATM no callers actually pay attention to this return
+ # instead they just use the list of revision ids and ignore
+ # missing sigs. Consider removing this work entirely
+ revisions_with_signatures = set(self.signatures.get_parent_map(
+ [(r,) for r in revision_ids]))
+ revisions_with_signatures = set(
+ [r for (r,) in revisions_with_signatures])
+ revisions_with_signatures.intersection_update(revision_ids)
+ yield ("signatures", None, revisions_with_signatures)
+
+ # revisions
+ yield ("revisions", None, revision_ids)
+
+ @needs_read_lock
+ def get_inventory(self, revision_id):
+ """Get Inventory object by revision id."""
+ return self.iter_inventories([revision_id]).next()
+
+ def iter_inventories(self, revision_ids, ordering=None):
+ """Get many inventories by revision_ids.
+
+ This will buffer some or all of the texts used in constructing the
+ inventories in memory, but will only parse a single inventory at a
+ time.
+
+ :param revision_ids: The expected revision ids of the inventories.
+ :param ordering: optional ordering, e.g. 'topological'. If not
+ specified, the order of revision_ids will be preserved (by
+ buffering if necessary).
+ :return: An iterator of inventories.
+ """
+ if ((None in revision_ids)
+ or (_mod_revision.NULL_REVISION in revision_ids)):
+ raise ValueError('cannot get null revision inventory')
+ for inv, revid in self._iter_inventories(revision_ids, ordering):
+ if inv is None:
+ raise errors.NoSuchRevision(self, revid)
+ yield inv
+
+ def _iter_inventories(self, revision_ids, ordering):
+ """single-document based inventory iteration."""
+ inv_xmls = self._iter_inventory_xmls(revision_ids, ordering)
+ for text, revision_id in inv_xmls:
+ if text is None:
+ yield None, revision_id
+ else:
+ yield self._deserialise_inventory(revision_id, text), revision_id
+
+ def _iter_inventory_xmls(self, revision_ids, ordering):
+ if ordering is None:
+ order_as_requested = True
+ ordering = 'unordered'
+ else:
+ order_as_requested = False
+ keys = [(revision_id,) for revision_id in revision_ids]
+ if not keys:
+ return
+ if order_as_requested:
+ key_iter = iter(keys)
+ next_key = key_iter.next()
+ stream = self.inventories.get_record_stream(keys, ordering, True)
+ text_chunks = {}
+ for record in stream:
+ if record.storage_kind != 'absent':
+ chunks = record.get_bytes_as('chunked')
+ if order_as_requested:
+ text_chunks[record.key] = chunks
+ else:
+ yield ''.join(chunks), record.key[-1]
+ else:
+ yield None, record.key[-1]
+ if order_as_requested:
+ # Yield as many results as we can while preserving order.
+ while next_key in text_chunks:
+ chunks = text_chunks.pop(next_key)
+ yield ''.join(chunks), next_key[-1]
+ try:
+ next_key = key_iter.next()
+ except StopIteration:
+ # We still want to fully consume the get_record_stream,
+ # just in case it is not actually finished at this point
+ next_key = None
+ break
+
+ def _deserialise_inventory(self, revision_id, xml):
+ """Transform the xml into an inventory object.
+
+ :param revision_id: The expected revision id of the inventory.
+ :param xml: A serialised inventory.
+ """
+ result = self._serializer.read_inventory_from_string(xml, revision_id,
+ entry_cache=self._inventory_entry_cache,
+ return_from_cache=self._safe_to_return_from_cache)
+ if result.revision_id != revision_id:
+ raise AssertionError('revision id mismatch %s != %s' % (
+ result.revision_id, revision_id))
+ return result
+
+ def get_serializer_format(self):
+ return self._serializer.format_num
+
+ @needs_read_lock
+ def _get_inventory_xml(self, revision_id):
+ """Get serialized inventory as a string."""
+ texts = self._iter_inventory_xmls([revision_id], 'unordered')
+ text, revision_id = texts.next()
+ if text is None:
+ raise errors.NoSuchRevision(self, revision_id)
+ return text
+
+ @needs_read_lock
+ def revision_tree(self, revision_id):
+ """Return Tree for a revision on this branch.
+
+ `revision_id` may be NULL_REVISION for the empty tree revision.
+ """
+ revision_id = _mod_revision.ensure_null(revision_id)
+ # TODO: refactor this to use an existing revision object
+ # so we don't need to read it in twice.
+ if revision_id == _mod_revision.NULL_REVISION:
+ return InventoryRevisionTree(self,
+ Inventory(root_id=None), _mod_revision.NULL_REVISION)
+ else:
+ inv = self.get_inventory(revision_id)
+ return InventoryRevisionTree(self, inv, revision_id)
+
+ def revision_trees(self, revision_ids):
+ """Return Trees for revisions in this repository.
+
+ :param revision_ids: a sequence of revision-ids;
+ a revision-id may not be None or 'null:'
+ """
+ inventories = self.iter_inventories(revision_ids)
+ for inv in inventories:
+ yield InventoryRevisionTree(self, inv, inv.revision_id)
+
+ def _filtered_revision_trees(self, revision_ids, file_ids):
+ """Return Tree for a revision on this branch with only some files.
+
+ :param revision_ids: a sequence of revision-ids;
+ a revision-id may not be None or 'null:'
+ :param file_ids: if not None, the result is filtered
+ so that only those file-ids, their parents and their
+ children are included.
+ """
+ inventories = self.iter_inventories(revision_ids)
+ for inv in inventories:
+ # Should we introduce a FilteredRevisionTree class rather
+ # than pre-filter the inventory here?
+ filtered_inv = inv.filter(file_ids)
+ yield InventoryRevisionTree(self, filtered_inv, filtered_inv.revision_id)
+
+ def get_parent_map(self, revision_ids):
+ """See graph.StackedParentsProvider.get_parent_map"""
+ # revisions index works in keys; this just works in revisions
+ # therefore wrap and unwrap
+ query_keys = []
+ result = {}
+ for revision_id in revision_ids:
+ if revision_id == _mod_revision.NULL_REVISION:
+ result[revision_id] = ()
+ elif revision_id is None:
+ raise ValueError('get_parent_map(None) is not valid')
+ else:
+ query_keys.append((revision_id ,))
+ for ((revision_id,), parent_keys) in \
+ self.revisions.get_parent_map(query_keys).iteritems():
+ if parent_keys:
+ result[revision_id] = tuple([parent_revid
+ for (parent_revid,) in parent_keys])
+ else:
+ result[revision_id] = (_mod_revision.NULL_REVISION,)
+ return result
+
+ @needs_read_lock
+ def get_known_graph_ancestry(self, revision_ids):
+ """Return the known graph for a set of revision ids and their ancestors.
+ """
+ st = static_tuple.StaticTuple
+ revision_keys = [st(r_id).intern() for r_id in revision_ids]
+ known_graph = self.revisions.get_known_graph_ancestry(revision_keys)
+ return graph.GraphThunkIdsToKeys(known_graph)
+
+ @needs_read_lock
+ def get_file_graph(self):
+ """Return the graph walker for text revisions."""
+ return graph.Graph(self.texts)
+
+ def revision_ids_to_search_result(self, result_set):
+ """Convert a set of revision ids to a graph SearchResult."""
+ result_parents = set()
+ for parents in self.get_graph().get_parent_map(
+ result_set).itervalues():
+ result_parents.update(parents)
+ included_keys = result_set.intersection(result_parents)
+ start_keys = result_set.difference(included_keys)
+ exclude_keys = result_parents.difference(result_set)
+ result = vf_search.SearchResult(start_keys, exclude_keys,
+ len(result_set), result_set)
+ return result
+
+ def _get_versioned_file_checker(self, text_key_references=None,
+ ancestors=None):
+ """Return an object suitable for checking versioned files.
+
+ :param text_key_references: if non-None, an already built
+ dictionary mapping text keys ((fileid, revision_id) tuples)
+ to whether they were referred to by the inventory of the
+ revision_id that they contain. If None, this will be
+ calculated.
+ :param ancestors: Optional result from
+ self.get_graph().get_parent_map(self.all_revision_ids()) if already
+ available.
+ """
+ return _VersionedFileChecker(self,
+ text_key_references=text_key_references, ancestors=ancestors)
+
+ @needs_read_lock
+ def has_signature_for_revision_id(self, revision_id):
+ """Query for a revision signature for revision_id in the repository."""
+ if not self.has_revision(revision_id):
+ raise errors.NoSuchRevision(self, revision_id)
+ sig_present = (1 == len(
+ self.signatures.get_parent_map([(revision_id,)])))
+ return sig_present
+
+ @needs_read_lock
+ def get_signature_text(self, revision_id):
+ """Return the text for a signature."""
+ stream = self.signatures.get_record_stream([(revision_id,)],
+ 'unordered', True)
+ record = stream.next()
+ if record.storage_kind == 'absent':
+ raise errors.NoSuchRevision(self, revision_id)
+ return record.get_bytes_as('fulltext')
+
+ @needs_read_lock
+ def _check(self, revision_ids, callback_refs, check_repo):
+ result = check.VersionedFileCheck(self, check_repo=check_repo)
+ result.check(callback_refs)
+ return result
+
+ def _find_inconsistent_revision_parents(self, revisions_iterator=None):
+ """Find revisions with different parent lists in the revision object
+ and in the index graph.
+
+ :param revisions_iterator: None, or an iterator of (revid,
+ Revision-or-None). This iterator controls the revisions checked.
+ :returns: an iterator yielding tuples of (revison-id, parents-in-index,
+ parents-in-revision).
+ """
+ if not self.is_locked():
+ raise AssertionError()
+ vf = self.revisions
+ if revisions_iterator is None:
+ revisions_iterator = self._iter_revisions(None)
+ for revid, revision in revisions_iterator:
+ if revision is None:
+ pass
+ parent_map = vf.get_parent_map([(revid,)])
+ parents_according_to_index = tuple(parent[-1] for parent in
+ parent_map[(revid,)])
+ parents_according_to_revision = tuple(revision.parent_ids)
+ if parents_according_to_index != parents_according_to_revision:
+ yield (revid, parents_according_to_index,
+ parents_according_to_revision)
+
+ def _check_for_inconsistent_revision_parents(self):
+ inconsistencies = list(self._find_inconsistent_revision_parents())
+ if inconsistencies:
+ raise errors.BzrCheckError(
+ "Revision knit has inconsistent parents.")
+
+ def _get_sink(self):
+ """Return a sink for streaming into this repository."""
+ return StreamSink(self)
+
+ def _get_source(self, to_format):
+ """Return a source for streaming from this repository."""
+ return StreamSource(self, to_format)
+
+
+class MetaDirVersionedFileRepository(MetaDirRepository,
+ VersionedFileRepository):
+ """Repositories in a meta-dir, that work via versioned file objects."""
+
+ def __init__(self, _format, a_bzrdir, control_files):
+ super(MetaDirVersionedFileRepository, self).__init__(_format, a_bzrdir,
+ control_files)
+
+
+class MetaDirVersionedFileRepositoryFormat(RepositoryFormatMetaDir,
+ VersionedFileRepositoryFormat):
+ """Base class for repository formats using versioned files in metadirs."""
+
+
+class StreamSink(object):
+ """An object that can insert a stream into a repository.
+
+ This interface handles the complexity of reserialising inventories and
+ revisions from different formats, and allows unidirectional insertion into
+ stacked repositories without looking for the missing basis parents
+ beforehand.
+ """
+
+ def __init__(self, target_repo):
+ self.target_repo = target_repo
+
+ def insert_stream(self, stream, src_format, resume_tokens):
+ """Insert a stream's content into the target repository.
+
+ :param src_format: a bzr repository format.
+
+ :return: a list of resume tokens and an iterable of keys additional
+ items required before the insertion can be completed.
+ """
+ self.target_repo.lock_write()
+ try:
+ if resume_tokens:
+ self.target_repo.resume_write_group(resume_tokens)
+ is_resume = True
+ else:
+ self.target_repo.start_write_group()
+ is_resume = False
+ try:
+ # locked_insert_stream performs a commit|suspend.
+ missing_keys = self.insert_stream_without_locking(stream,
+ src_format, is_resume)
+ if missing_keys:
+ # suspend the write group and tell the caller what we is
+ # missing. We know we can suspend or else we would not have
+ # entered this code path. (All repositories that can handle
+ # missing keys can handle suspending a write group).
+ write_group_tokens = self.target_repo.suspend_write_group()
+ return write_group_tokens, missing_keys
+ hint = self.target_repo.commit_write_group()
+ to_serializer = self.target_repo._format._serializer
+ src_serializer = src_format._serializer
+ if (to_serializer != src_serializer and
+ self.target_repo._format.pack_compresses):
+ self.target_repo.pack(hint=hint)
+ return [], set()
+ except:
+ self.target_repo.abort_write_group(suppress_errors=True)
+ raise
+ finally:
+ self.target_repo.unlock()
+
+ def insert_stream_without_locking(self, stream, src_format,
+ is_resume=False):
+ """Insert a stream's content into the target repository.
+
+ This assumes that you already have a locked repository and an active
+ write group.
+
+ :param src_format: a bzr repository format.
+ :param is_resume: Passed down to get_missing_parent_inventories to
+ indicate if we should be checking for missing texts at the same
+ time.
+
+ :return: A set of keys that are missing.
+ """
+ if not self.target_repo.is_write_locked():
+ raise errors.ObjectNotLocked(self)
+ if not self.target_repo.is_in_write_group():
+ raise errors.BzrError('you must already be in a write group')
+ to_serializer = self.target_repo._format._serializer
+ src_serializer = src_format._serializer
+ new_pack = None
+ if to_serializer == src_serializer:
+ # If serializers match and the target is a pack repository, set the
+ # write cache size on the new pack. This avoids poor performance
+ # on transports where append is unbuffered (such as
+ # RemoteTransport). This is safe to do because nothing should read
+ # back from the target repository while a stream with matching
+ # serialization is being inserted.
+ # The exception is that a delta record from the source that should
+ # be a fulltext may need to be expanded by the target (see
+ # test_fetch_revisions_with_deltas_into_pack); but we take care to
+ # explicitly flush any buffered writes first in that rare case.
+ try:
+ new_pack = self.target_repo._pack_collection._new_pack
+ except AttributeError:
+ # Not a pack repository
+ pass
+ else:
+ new_pack.set_write_cache_size(1024*1024)
+ for substream_type, substream in stream:
+ if 'stream' in debug.debug_flags:
+ mutter('inserting substream: %s', substream_type)
+ if substream_type == 'texts':
+ self.target_repo.texts.insert_record_stream(substream)
+ elif substream_type == 'inventories':
+ if src_serializer == to_serializer:
+ self.target_repo.inventories.insert_record_stream(
+ substream)
+ else:
+ self._extract_and_insert_inventories(
+ substream, src_serializer)
+ elif substream_type == 'inventory-deltas':
+ self._extract_and_insert_inventory_deltas(
+ substream, src_serializer)
+ elif substream_type == 'chk_bytes':
+ # XXX: This doesn't support conversions, as it assumes the
+ # conversion was done in the fetch code.
+ self.target_repo.chk_bytes.insert_record_stream(substream)
+ elif substream_type == 'revisions':
+ # This may fallback to extract-and-insert more often than
+ # required if the serializers are different only in terms of
+ # the inventory.
+ if src_serializer == to_serializer:
+ self.target_repo.revisions.insert_record_stream(substream)
+ else:
+ self._extract_and_insert_revisions(substream,
+ src_serializer)
+ elif substream_type == 'signatures':
+ self.target_repo.signatures.insert_record_stream(substream)
+ else:
+ raise AssertionError('kaboom! %s' % (substream_type,))
+ # Done inserting data, and the missing_keys calculations will try to
+ # read back from the inserted data, so flush the writes to the new pack
+ # (if this is pack format).
+ if new_pack is not None:
+ new_pack._write_data('', flush=True)
+ # Find all the new revisions (including ones from resume_tokens)
+ missing_keys = self.target_repo.get_missing_parent_inventories(
+ check_for_missing_texts=is_resume)
+ try:
+ for prefix, versioned_file in (
+ ('texts', self.target_repo.texts),
+ ('inventories', self.target_repo.inventories),
+ ('revisions', self.target_repo.revisions),
+ ('signatures', self.target_repo.signatures),
+ ('chk_bytes', self.target_repo.chk_bytes),
+ ):
+ if versioned_file is None:
+ continue
+ # TODO: key is often going to be a StaticTuple object
+ # I don't believe we can define a method by which
+ # (prefix,) + StaticTuple will work, though we could
+ # define a StaticTuple.sq_concat that would allow you to
+ # pass in either a tuple or a StaticTuple as the second
+ # object, so instead we could have:
+ # StaticTuple(prefix) + key here...
+ missing_keys.update((prefix,) + key for key in
+ versioned_file.get_missing_compression_parent_keys())
+ except NotImplementedError:
+ # cannot even attempt suspending, and missing would have failed
+ # during stream insertion.
+ missing_keys = set()
+ return missing_keys
+
+ def _extract_and_insert_inventory_deltas(self, substream, serializer):
+ target_rich_root = self.target_repo._format.rich_root_data
+ target_tree_refs = self.target_repo._format.supports_tree_reference
+ for record in substream:
+ # Insert the delta directly
+ inventory_delta_bytes = record.get_bytes_as('fulltext')
+ deserialiser = inventory_delta.InventoryDeltaDeserializer()
+ try:
+ parse_result = deserialiser.parse_text_bytes(
+ inventory_delta_bytes)
+ except inventory_delta.IncompatibleInventoryDelta, err:
+ mutter("Incompatible delta: %s", err.msg)
+ raise errors.IncompatibleRevision(self.target_repo._format)
+ basis_id, new_id, rich_root, tree_refs, inv_delta = parse_result
+ revision_id = new_id
+ parents = [key[0] for key in record.parents]
+ self.target_repo.add_inventory_by_delta(
+ basis_id, inv_delta, revision_id, parents)
+
+ def _extract_and_insert_inventories(self, substream, serializer,
+ parse_delta=None):
+ """Generate a new inventory versionedfile in target, converting data.
+
+ The inventory is retrieved from the source, (deserializing it), and
+ stored in the target (reserializing it in a different format).
+ """
+ target_rich_root = self.target_repo._format.rich_root_data
+ target_tree_refs = self.target_repo._format.supports_tree_reference
+ for record in substream:
+ # It's not a delta, so it must be a fulltext in the source
+ # serializer's format.
+ bytes = record.get_bytes_as('fulltext')
+ revision_id = record.key[0]
+ inv = serializer.read_inventory_from_string(bytes, revision_id)
+ parents = [key[0] for key in record.parents]
+ self.target_repo.add_inventory(revision_id, inv, parents)
+ # No need to keep holding this full inv in memory when the rest of
+ # the substream is likely to be all deltas.
+ del inv
+
+ def _extract_and_insert_revisions(self, substream, serializer):
+ for record in substream:
+ bytes = record.get_bytes_as('fulltext')
+ revision_id = record.key[0]
+ rev = serializer.read_revision_from_string(bytes)
+ if rev.revision_id != revision_id:
+ raise AssertionError('wtf: %s != %s' % (rev, revision_id))
+ self.target_repo.add_revision(revision_id, rev)
+
+ def finished(self):
+ if self.target_repo._format._fetch_reconcile:
+ self.target_repo.reconcile()
+
+
+class StreamSource(object):
+ """A source of a stream for fetching between repositories."""
+
+ def __init__(self, from_repository, to_format):
+ """Create a StreamSource streaming from from_repository."""
+ self.from_repository = from_repository
+ self.to_format = to_format
+ self._record_counter = RecordCounter()
+
+ def delta_on_metadata(self):
+ """Return True if delta's are permitted on metadata streams.
+
+ That is on revisions and signatures.
+ """
+ src_serializer = self.from_repository._format._serializer
+ target_serializer = self.to_format._serializer
+ return (self.to_format._fetch_uses_deltas and
+ src_serializer == target_serializer)
+
+ def _fetch_revision_texts(self, revs):
+ # fetch signatures first and then the revision texts
+ # may need to be a InterRevisionStore call here.
+ from_sf = self.from_repository.signatures
+ # A missing signature is just skipped.
+ keys = [(rev_id,) for rev_id in revs]
+ signatures = versionedfile.filter_absent(from_sf.get_record_stream(
+ keys,
+ self.to_format._fetch_order,
+ not self.to_format._fetch_uses_deltas))
+ # If a revision has a delta, this is actually expanded inside the
+ # insert_record_stream code now, which is an alternate fix for
+ # bug #261339
+ from_rf = self.from_repository.revisions
+ revisions = from_rf.get_record_stream(
+ keys,
+ self.to_format._fetch_order,
+ not self.delta_on_metadata())
+ return [('signatures', signatures), ('revisions', revisions)]
+
+ def _generate_root_texts(self, revs):
+ """This will be called by get_stream between fetching weave texts and
+ fetching the inventory weave.
+ """
+ if self._rich_root_upgrade():
+ return _mod_fetch.Inter1and2Helper(
+ self.from_repository).generate_root_texts(revs)
+ else:
+ return []
+
+ def get_stream(self, search):
+ phase = 'file'
+ revs = search.get_keys()
+ graph = self.from_repository.get_graph()
+ revs = tsort.topo_sort(graph.get_parent_map(revs))
+ data_to_fetch = self.from_repository.item_keys_introduced_by(revs)
+ text_keys = []
+ for knit_kind, file_id, revisions in data_to_fetch:
+ if knit_kind != phase:
+ phase = knit_kind
+ # Make a new progress bar for this phase
+ if knit_kind == "file":
+ # Accumulate file texts
+ text_keys.extend([(file_id, revision) for revision in
+ revisions])
+ elif knit_kind == "inventory":
+ # Now copy the file texts.
+ from_texts = self.from_repository.texts
+ yield ('texts', from_texts.get_record_stream(
+ text_keys, self.to_format._fetch_order,
+ not self.to_format._fetch_uses_deltas))
+ # Cause an error if a text occurs after we have done the
+ # copy.
+ text_keys = None
+ # Before we process the inventory we generate the root
+ # texts (if necessary) so that the inventories references
+ # will be valid.
+ for _ in self._generate_root_texts(revs):
+ yield _
+ # we fetch only the referenced inventories because we do not
+ # know for unselected inventories whether all their required
+ # texts are present in the other repository - it could be
+ # corrupt.
+ for info in self._get_inventory_stream(revs):
+ yield info
+ elif knit_kind == "signatures":
+ # Nothing to do here; this will be taken care of when
+ # _fetch_revision_texts happens.
+ pass
+ elif knit_kind == "revisions":
+ for record in self._fetch_revision_texts(revs):
+ yield record
+ else:
+ raise AssertionError("Unknown knit kind %r" % knit_kind)
+
+ def get_stream_for_missing_keys(self, missing_keys):
+ # missing keys can only occur when we are byte copying and not
+ # translating (because translation means we don't send
+ # unreconstructable deltas ever).
+ keys = {}
+ keys['texts'] = set()
+ keys['revisions'] = set()
+ keys['inventories'] = set()
+ keys['chk_bytes'] = set()
+ keys['signatures'] = set()
+ for key in missing_keys:
+ keys[key[0]].add(key[1:])
+ if len(keys['revisions']):
+ # If we allowed copying revisions at this point, we could end up
+ # copying a revision without copying its required texts: a
+ # violation of the requirements for repository integrity.
+ raise AssertionError(
+ 'cannot copy revisions to fill in missing deltas %s' % (
+ keys['revisions'],))
+ for substream_kind, keys in keys.iteritems():
+ vf = getattr(self.from_repository, substream_kind)
+ if vf is None and keys:
+ raise AssertionError(
+ "cannot fill in keys for a versioned file we don't"
+ " have: %s needs %s" % (substream_kind, keys))
+ if not keys:
+ # No need to stream something we don't have
+ continue
+ if substream_kind == 'inventories':
+ # Some missing keys are genuinely ghosts, filter those out.
+ present = self.from_repository.inventories.get_parent_map(keys)
+ revs = [key[0] for key in present]
+ # Get the inventory stream more-or-less as we do for the
+ # original stream; there's no reason to assume that records
+ # direct from the source will be suitable for the sink. (Think
+ # e.g. 2a -> 1.9-rich-root).
+ for info in self._get_inventory_stream(revs, missing=True):
+ yield info
+ continue
+
+ # Ask for full texts always so that we don't need more round trips
+ # after this stream.
+ # Some of the missing keys are genuinely ghosts, so filter absent
+ # records. The Sink is responsible for doing another check to
+ # ensure that ghosts don't introduce missing data for future
+ # fetches.
+ stream = versionedfile.filter_absent(vf.get_record_stream(keys,
+ self.to_format._fetch_order, True))
+ yield substream_kind, stream
+
+ def inventory_fetch_order(self):
+ if self._rich_root_upgrade():
+ return 'topological'
+ else:
+ return self.to_format._fetch_order
+
+ def _rich_root_upgrade(self):
+ return (not self.from_repository._format.rich_root_data and
+ self.to_format.rich_root_data)
+
+ def _get_inventory_stream(self, revision_ids, missing=False):
+ from_format = self.from_repository._format
+ if (from_format.supports_chks and self.to_format.supports_chks and
+ from_format.network_name() == self.to_format.network_name()):
+ raise AssertionError(
+ "this case should be handled by GroupCHKStreamSource")
+ elif 'forceinvdeltas' in debug.debug_flags:
+ return self._get_convertable_inventory_stream(revision_ids,
+ delta_versus_null=missing)
+ elif from_format.network_name() == self.to_format.network_name():
+ # Same format.
+ return self._get_simple_inventory_stream(revision_ids,
+ missing=missing)
+ elif (not from_format.supports_chks and not self.to_format.supports_chks
+ and from_format._serializer == self.to_format._serializer):
+ # Essentially the same format.
+ return self._get_simple_inventory_stream(revision_ids,
+ missing=missing)
+ else:
+ # Any time we switch serializations, we want to use an
+ # inventory-delta based approach.
+ return self._get_convertable_inventory_stream(revision_ids,
+ delta_versus_null=missing)
+
+ def _get_simple_inventory_stream(self, revision_ids, missing=False):
+ # NB: This currently reopens the inventory weave in source;
+ # using a single stream interface instead would avoid this.
+ from_weave = self.from_repository.inventories
+ if missing:
+ delta_closure = True
+ else:
+ delta_closure = not self.delta_on_metadata()
+ yield ('inventories', from_weave.get_record_stream(
+ [(rev_id,) for rev_id in revision_ids],
+ self.inventory_fetch_order(), delta_closure))
+
+ def _get_convertable_inventory_stream(self, revision_ids,
+ delta_versus_null=False):
+ # The two formats are sufficiently different that there is no fast
+ # path, so we need to send just inventorydeltas, which any
+ # sufficiently modern client can insert into any repository.
+ # The StreamSink code expects to be able to
+ # convert on the target, so we need to put bytes-on-the-wire that can
+ # be converted. That means inventory deltas (if the remote is <1.19,
+ # RemoteStreamSink will fallback to VFS to insert the deltas).
+ yield ('inventory-deltas',
+ self._stream_invs_as_deltas(revision_ids,
+ delta_versus_null=delta_versus_null))
+
+ def _stream_invs_as_deltas(self, revision_ids, delta_versus_null=False):
+ """Return a stream of inventory-deltas for the given rev ids.
+
+ :param revision_ids: The list of inventories to transmit
+ :param delta_versus_null: Don't try to find a minimal delta for this
+ entry, instead compute the delta versus the NULL_REVISION. This
+ effectively streams a complete inventory. Used for stuff like
+ filling in missing parents, etc.
+ """
+ from_repo = self.from_repository
+ revision_keys = [(rev_id,) for rev_id in revision_ids]
+ parent_map = from_repo.inventories.get_parent_map(revision_keys)
+ # XXX: possibly repos could implement a more efficient iter_inv_deltas
+ # method...
+ inventories = self.from_repository.iter_inventories(
+ revision_ids, 'topological')
+ format = from_repo._format
+ invs_sent_so_far = set([_mod_revision.NULL_REVISION])
+ inventory_cache = lru_cache.LRUCache(50)
+ null_inventory = from_repo.revision_tree(
+ _mod_revision.NULL_REVISION).root_inventory
+ # XXX: ideally the rich-root/tree-refs flags would be per-revision, not
+ # per-repo (e.g. streaming a non-rich-root revision out of a rich-root
+ # repo back into a non-rich-root repo ought to be allowed)
+ serializer = inventory_delta.InventoryDeltaSerializer(
+ versioned_root=format.rich_root_data,
+ tree_references=format.supports_tree_reference)
+ for inv in inventories:
+ key = (inv.revision_id,)
+ parent_keys = parent_map.get(key, ())
+ delta = None
+ if not delta_versus_null and parent_keys:
+ # The caller did not ask for complete inventories and we have
+ # some parents that we can delta against. Make a delta against
+ # each parent so that we can find the smallest.
+ parent_ids = [parent_key[0] for parent_key in parent_keys]
+ for parent_id in parent_ids:
+ if parent_id not in invs_sent_so_far:
+ # We don't know that the remote side has this basis, so
+ # we can't use it.
+ continue
+ if parent_id == _mod_revision.NULL_REVISION:
+ parent_inv = null_inventory
+ else:
+ parent_inv = inventory_cache.get(parent_id, None)
+ if parent_inv is None:
+ parent_inv = from_repo.get_inventory(parent_id)
+ candidate_delta = inv._make_delta(parent_inv)
+ if (delta is None or
+ len(delta) > len(candidate_delta)):
+ delta = candidate_delta
+ basis_id = parent_id
+ if delta is None:
+ # Either none of the parents ended up being suitable, or we
+ # were asked to delta against NULL
+ basis_id = _mod_revision.NULL_REVISION
+ delta = inv._make_delta(null_inventory)
+ invs_sent_so_far.add(inv.revision_id)
+ inventory_cache[inv.revision_id] = inv
+ delta_serialized = ''.join(
+ serializer.delta_to_lines(basis_id, key[-1], delta))
+ yield versionedfile.FulltextContentFactory(
+ key, parent_keys, None, delta_serialized)
+
+
+class _VersionedFileChecker(object):
+
+ def __init__(self, repository, text_key_references=None, ancestors=None):
+ self.repository = repository
+ self.text_index = self.repository._generate_text_key_index(
+ text_key_references=text_key_references, ancestors=ancestors)
+
+ def calculate_file_version_parents(self, text_key):
+ """Calculate the correct parents for a file version according to
+ the inventories.
+ """
+ parent_keys = self.text_index[text_key]
+ if parent_keys == [_mod_revision.NULL_REVISION]:
+ return ()
+ return tuple(parent_keys)
+
+ def check_file_version_parents(self, texts, progress_bar=None):
+ """Check the parents stored in a versioned file are correct.
+
+ It also detects file versions that are not referenced by their
+ corresponding revision's inventory.
+
+ :returns: A tuple of (wrong_parents, dangling_file_versions).
+ wrong_parents is a dict mapping {revision_id: (stored_parents,
+ correct_parents)} for each revision_id where the stored parents
+ are not correct. dangling_file_versions is a set of (file_id,
+ revision_id) tuples for versions that are present in this versioned
+ file, but not used by the corresponding inventory.
+ """
+ local_progress = None
+ if progress_bar is None:
+ local_progress = ui.ui_factory.nested_progress_bar()
+ progress_bar = local_progress
+ try:
+ return self._check_file_version_parents(texts, progress_bar)
+ finally:
+ if local_progress:
+ local_progress.finished()
+
+ def _check_file_version_parents(self, texts, progress_bar):
+ """See check_file_version_parents."""
+ wrong_parents = {}
+ self.file_ids = set([file_id for file_id, _ in
+ self.text_index.iterkeys()])
+ # text keys is now grouped by file_id
+ n_versions = len(self.text_index)
+ progress_bar.update(gettext('loading text store'), 0, n_versions)
+ parent_map = self.repository.texts.get_parent_map(self.text_index)
+ # On unlistable transports this could well be empty/error...
+ text_keys = self.repository.texts.keys()
+ unused_keys = frozenset(text_keys) - set(self.text_index)
+ for num, key in enumerate(self.text_index.iterkeys()):
+ progress_bar.update(gettext('checking text graph'), num, n_versions)
+ correct_parents = self.calculate_file_version_parents(key)
+ try:
+ knit_parents = parent_map[key]
+ except errors.RevisionNotPresent:
+ # Missing text!
+ knit_parents = None
+ if correct_parents != knit_parents:
+ wrong_parents[key] = (knit_parents, correct_parents)
+ return wrong_parents, unused_keys
+
+
+class InterVersionedFileRepository(InterRepository):
+
+ _walk_to_common_revisions_batch_size = 50
+
+ supports_fetch_spec = True
+
+ @needs_write_lock
+ def fetch(self, revision_id=None, find_ghosts=False,
+ fetch_spec=None):
+ """Fetch the content required to construct revision_id.
+
+ The content is copied from self.source to self.target.
+
+ :param revision_id: if None all content is copied, if NULL_REVISION no
+ content is copied.
+ :return: None.
+ """
+ if self.target._format.experimental:
+ ui.ui_factory.show_user_warning('experimental_format_fetch',
+ from_format=self.source._format,
+ to_format=self.target._format)
+ from bzrlib.fetch import RepoFetcher
+ # See <https://launchpad.net/bugs/456077> asking for a warning here
+ if self.source._format.network_name() != self.target._format.network_name():
+ ui.ui_factory.show_user_warning('cross_format_fetch',
+ from_format=self.source._format,
+ to_format=self.target._format)
+ f = RepoFetcher(to_repository=self.target,
+ from_repository=self.source,
+ last_revision=revision_id,
+ fetch_spec=fetch_spec,
+ find_ghosts=find_ghosts)
+
+ def _walk_to_common_revisions(self, revision_ids, if_present_ids=None):
+ """Walk out from revision_ids in source to revisions target has.
+
+ :param revision_ids: The start point for the search.
+ :return: A set of revision ids.
+ """
+ target_graph = self.target.get_graph()
+ revision_ids = frozenset(revision_ids)
+ if if_present_ids:
+ all_wanted_revs = revision_ids.union(if_present_ids)
+ else:
+ all_wanted_revs = revision_ids
+ missing_revs = set()
+ source_graph = self.source.get_graph()
+ # ensure we don't pay silly lookup costs.
+ searcher = source_graph._make_breadth_first_searcher(all_wanted_revs)
+ null_set = frozenset([_mod_revision.NULL_REVISION])
+ searcher_exhausted = False
+ while True:
+ next_revs = set()
+ ghosts = set()
+ # Iterate the searcher until we have enough next_revs
+ while len(next_revs) < self._walk_to_common_revisions_batch_size:
+ try:
+ next_revs_part, ghosts_part = searcher.next_with_ghosts()
+ next_revs.update(next_revs_part)
+ ghosts.update(ghosts_part)
+ except StopIteration:
+ searcher_exhausted = True
+ break
+ # If there are ghosts in the source graph, and the caller asked for
+ # them, make sure that they are present in the target.
+ # We don't care about other ghosts as we can't fetch them and
+ # haven't been asked to.
+ ghosts_to_check = set(revision_ids.intersection(ghosts))
+ revs_to_get = set(next_revs).union(ghosts_to_check)
+ if revs_to_get:
+ have_revs = set(target_graph.get_parent_map(revs_to_get))
+ # we always have NULL_REVISION present.
+ have_revs = have_revs.union(null_set)
+ # Check if the target is missing any ghosts we need.
+ ghosts_to_check.difference_update(have_revs)
+ if ghosts_to_check:
+ # One of the caller's revision_ids is a ghost in both the
+ # source and the target.
+ raise errors.NoSuchRevision(
+ self.source, ghosts_to_check.pop())
+ missing_revs.update(next_revs - have_revs)
+ # Because we may have walked past the original stop point, make
+ # sure everything is stopped
+ stop_revs = searcher.find_seen_ancestors(have_revs)
+ searcher.stop_searching_any(stop_revs)
+ if searcher_exhausted:
+ break
+ (started_keys, excludes, included_keys) = searcher.get_state()
+ return vf_search.SearchResult(started_keys, excludes,
+ len(included_keys), included_keys)
+
+ @needs_read_lock
+ def search_missing_revision_ids(self,
+ revision_id=symbol_versioning.DEPRECATED_PARAMETER,
+ find_ghosts=True, revision_ids=None, if_present_ids=None,
+ limit=None):
+ """Return the revision ids that source has that target does not.
+
+ :param revision_id: only return revision ids included by this
+ revision_id.
+ :param revision_ids: return revision ids included by these
+ revision_ids. NoSuchRevision will be raised if any of these
+ revisions are not present.
+ :param if_present_ids: like revision_ids, but will not cause
+ NoSuchRevision if any of these are absent, instead they will simply
+ not be in the result. This is useful for e.g. finding revisions
+ to fetch for tags, which may reference absent revisions.
+ :param find_ghosts: If True find missing revisions in deep history
+ rather than just finding the surface difference.
+ :return: A bzrlib.graph.SearchResult.
+ """
+ if symbol_versioning.deprecated_passed(revision_id):
+ symbol_versioning.warn(
+ 'search_missing_revision_ids(revision_id=...) was '
+ 'deprecated in 2.4. Use revision_ids=[...] instead.',
+ DeprecationWarning, stacklevel=2)
+ if revision_ids is not None:
+ raise AssertionError(
+ 'revision_ids is mutually exclusive with revision_id')
+ if revision_id is not None:
+ revision_ids = [revision_id]
+ del revision_id
+ # stop searching at found target revisions.
+ if not find_ghosts and (revision_ids is not None or if_present_ids is
+ not None):
+ result = self._walk_to_common_revisions(revision_ids,
+ if_present_ids=if_present_ids)
+ if limit is None:
+ return result
+ result_set = result.get_keys()
+ else:
+ # generic, possibly worst case, slow code path.
+ target_ids = set(self.target.all_revision_ids())
+ source_ids = self._present_source_revisions_for(
+ revision_ids, if_present_ids)
+ result_set = set(source_ids).difference(target_ids)
+ if limit is not None:
+ topo_ordered = self.source.get_graph().iter_topo_order(result_set)
+ result_set = set(itertools.islice(topo_ordered, limit))
+ return self.source.revision_ids_to_search_result(result_set)
+
+ def _present_source_revisions_for(self, revision_ids, if_present_ids=None):
+ """Returns set of all revisions in ancestry of revision_ids present in
+ the source repo.
+
+ :param revision_ids: if None, all revisions in source are returned.
+ :param if_present_ids: like revision_ids, but if any/all of these are
+ absent no error is raised.
+ """
+ if revision_ids is not None or if_present_ids is not None:
+ # First, ensure all specified revisions exist. Callers expect
+ # NoSuchRevision when they pass absent revision_ids here.
+ if revision_ids is None:
+ revision_ids = set()
+ if if_present_ids is None:
+ if_present_ids = set()
+ revision_ids = set(revision_ids)
+ if_present_ids = set(if_present_ids)
+ all_wanted_ids = revision_ids.union(if_present_ids)
+ graph = self.source.get_graph()
+ present_revs = set(graph.get_parent_map(all_wanted_ids))
+ missing = revision_ids.difference(present_revs)
+ if missing:
+ raise errors.NoSuchRevision(self.source, missing.pop())
+ found_ids = all_wanted_ids.intersection(present_revs)
+ source_ids = [rev_id for (rev_id, parents) in
+ graph.iter_ancestry(found_ids)
+ if rev_id != _mod_revision.NULL_REVISION
+ and parents is not None]
+ else:
+ source_ids = self.source.all_revision_ids()
+ return set(source_ids)
+
+ @classmethod
+ def _get_repo_format_to_test(self):
+ return None
+
+ @classmethod
+ def is_compatible(cls, source, target):
+ # The default implementation is compatible with everything
+ return (source._format.supports_full_versioned_files and
+ target._format.supports_full_versioned_files)
+
+
+class InterDifferingSerializer(InterVersionedFileRepository):
+
+ @classmethod
+ def _get_repo_format_to_test(self):
+ return None
+
+ @staticmethod
+ def is_compatible(source, target):
+ if not source._format.supports_full_versioned_files:
+ return False
+ if not target._format.supports_full_versioned_files:
+ return False
+ # This is redundant with format.check_conversion_target(), however that
+ # raises an exception, and we just want to say "False" as in we won't
+ # support converting between these formats.
+ if 'IDS_never' in debug.debug_flags:
+ return False
+ if source.supports_rich_root() and not target.supports_rich_root():
+ return False
+ if (source._format.supports_tree_reference
+ and not target._format.supports_tree_reference):
+ return False
+ if target._fallback_repositories and target._format.supports_chks:
+ # IDS doesn't know how to copy CHKs for the parent inventories it
+ # adds to stacked repos.
+ return False
+ if 'IDS_always' in debug.debug_flags:
+ return True
+ # Only use this code path for local source and target. IDS does far
+ # too much IO (both bandwidth and roundtrips) over a network.
+ if not source.bzrdir.transport.base.startswith('file:///'):
+ return False
+ if not target.bzrdir.transport.base.startswith('file:///'):
+ return False
+ return True
+
+ def _get_trees(self, revision_ids, cache):
+ possible_trees = []
+ for rev_id in revision_ids:
+ if rev_id in cache:
+ possible_trees.append((rev_id, cache[rev_id]))
+ else:
+ # Not cached, but inventory might be present anyway.
+ try:
+ tree = self.source.revision_tree(rev_id)
+ except errors.NoSuchRevision:
+ # Nope, parent is ghost.
+ pass
+ else:
+ cache[rev_id] = tree
+ possible_trees.append((rev_id, tree))
+ return possible_trees
+
+ def _get_delta_for_revision(self, tree, parent_ids, possible_trees):
+ """Get the best delta and base for this revision.
+
+ :return: (basis_id, delta)
+ """
+ deltas = []
+ # Generate deltas against each tree, to find the shortest.
+ # FIXME: Support nested trees
+ texts_possibly_new_in_tree = set()
+ for basis_id, basis_tree in possible_trees:
+ delta = tree.root_inventory._make_delta(basis_tree.root_inventory)
+ for old_path, new_path, file_id, new_entry in delta:
+ if new_path is None:
+ # This file_id isn't present in the new rev, so we don't
+ # care about it.
+ continue
+ if not new_path:
+ # Rich roots are handled elsewhere...
+ continue
+ kind = new_entry.kind
+ if kind != 'directory' and kind != 'file':
+ # No text record associated with this inventory entry.
+ continue
+ # This is a directory or file that has changed somehow.
+ texts_possibly_new_in_tree.add((file_id, new_entry.revision))
+ deltas.append((len(delta), basis_id, delta))
+ deltas.sort()
+ return deltas[0][1:]
+
+ def _fetch_parent_invs_for_stacking(self, parent_map, cache):
+ """Find all parent revisions that are absent, but for which the
+ inventory is present, and copy those inventories.
+
+ This is necessary to preserve correctness when the source is stacked
+ without fallbacks configured. (Note that in cases like upgrade the
+ source may be not have _fallback_repositories even though it is
+ stacked.)
+ """
+ parent_revs = set()
+ for parents in parent_map.values():
+ parent_revs.update(parents)
+ present_parents = self.source.get_parent_map(parent_revs)
+ absent_parents = set(parent_revs).difference(present_parents)
+ parent_invs_keys_for_stacking = self.source.inventories.get_parent_map(
+ (rev_id,) for rev_id in absent_parents)
+ parent_inv_ids = [key[-1] for key in parent_invs_keys_for_stacking]
+ for parent_tree in self.source.revision_trees(parent_inv_ids):
+ current_revision_id = parent_tree.get_revision_id()
+ parents_parents_keys = parent_invs_keys_for_stacking[
+ (current_revision_id,)]
+ parents_parents = [key[-1] for key in parents_parents_keys]
+ basis_id = _mod_revision.NULL_REVISION
+ basis_tree = self.source.revision_tree(basis_id)
+ delta = parent_tree.root_inventory._make_delta(
+ basis_tree.root_inventory)
+ self.target.add_inventory_by_delta(
+ basis_id, delta, current_revision_id, parents_parents)
+ cache[current_revision_id] = parent_tree
+
+ def _fetch_batch(self, revision_ids, basis_id, cache):
+ """Fetch across a few revisions.
+
+ :param revision_ids: The revisions to copy
+ :param basis_id: The revision_id of a tree that must be in cache, used
+ as a basis for delta when no other base is available
+ :param cache: A cache of RevisionTrees that we can use.
+ :return: The revision_id of the last converted tree. The RevisionTree
+ for it will be in cache
+ """
+ # Walk though all revisions; get inventory deltas, copy referenced
+ # texts that delta references, insert the delta, revision and
+ # signature.
+ root_keys_to_create = set()
+ text_keys = set()
+ pending_deltas = []
+ pending_revisions = []
+ parent_map = self.source.get_parent_map(revision_ids)
+ self._fetch_parent_invs_for_stacking(parent_map, cache)
+ self.source._safe_to_return_from_cache = True
+ for tree in self.source.revision_trees(revision_ids):
+ # Find a inventory delta for this revision.
+ # Find text entries that need to be copied, too.
+ current_revision_id = tree.get_revision_id()
+ parent_ids = parent_map.get(current_revision_id, ())
+ parent_trees = self._get_trees(parent_ids, cache)
+ possible_trees = list(parent_trees)
+ if len(possible_trees) == 0:
+ # There either aren't any parents, or the parents are ghosts,
+ # so just use the last converted tree.
+ possible_trees.append((basis_id, cache[basis_id]))
+ basis_id, delta = self._get_delta_for_revision(tree, parent_ids,
+ possible_trees)
+ revision = self.source.get_revision(current_revision_id)
+ pending_deltas.append((basis_id, delta,
+ current_revision_id, revision.parent_ids))
+ if self._converting_to_rich_root:
+ self._revision_id_to_root_id[current_revision_id] = \
+ tree.get_root_id()
+ # Determine which texts are in present in this revision but not in
+ # any of the available parents.
+ texts_possibly_new_in_tree = set()
+ for old_path, new_path, file_id, entry in delta:
+ if new_path is None:
+ # This file_id isn't present in the new rev
+ continue
+ if not new_path:
+ # This is the root
+ if not self.target.supports_rich_root():
+ # The target doesn't support rich root, so we don't
+ # copy
+ continue
+ if self._converting_to_rich_root:
+ # This can't be copied normally, we have to insert
+ # it specially
+ root_keys_to_create.add((file_id, entry.revision))
+ continue
+ kind = entry.kind
+ texts_possibly_new_in_tree.add((file_id, entry.revision))
+ for basis_id, basis_tree in possible_trees:
+ basis_inv = basis_tree.root_inventory
+ for file_key in list(texts_possibly_new_in_tree):
+ file_id, file_revision = file_key
+ try:
+ entry = basis_inv[file_id]
+ except errors.NoSuchId:
+ continue
+ if entry.revision == file_revision:
+ texts_possibly_new_in_tree.remove(file_key)
+ text_keys.update(texts_possibly_new_in_tree)
+ pending_revisions.append(revision)
+ cache[current_revision_id] = tree
+ basis_id = current_revision_id
+ self.source._safe_to_return_from_cache = False
+ # Copy file texts
+ from_texts = self.source.texts
+ to_texts = self.target.texts
+ if root_keys_to_create:
+ root_stream = _mod_fetch._new_root_data_stream(
+ root_keys_to_create, self._revision_id_to_root_id, parent_map,
+ self.source)
+ to_texts.insert_record_stream(root_stream)
+ to_texts.insert_record_stream(from_texts.get_record_stream(
+ text_keys, self.target._format._fetch_order,
+ not self.target._format._fetch_uses_deltas))
+ # insert inventory deltas
+ for delta in pending_deltas:
+ self.target.add_inventory_by_delta(*delta)
+ if self.target._fallback_repositories:
+ # Make sure this stacked repository has all the parent inventories
+ # for the new revisions that we are about to insert. We do this
+ # before adding the revisions so that no revision is added until
+ # all the inventories it may depend on are added.
+ # Note that this is overzealous, as we may have fetched these in an
+ # earlier batch.
+ parent_ids = set()
+ revision_ids = set()
+ for revision in pending_revisions:
+ revision_ids.add(revision.revision_id)
+ parent_ids.update(revision.parent_ids)
+ parent_ids.difference_update(revision_ids)
+ parent_ids.discard(_mod_revision.NULL_REVISION)
+ parent_map = self.source.get_parent_map(parent_ids)
+ # we iterate over parent_map and not parent_ids because we don't
+ # want to try copying any revision which is a ghost
+ for parent_tree in self.source.revision_trees(parent_map):
+ current_revision_id = parent_tree.get_revision_id()
+ parents_parents = parent_map[current_revision_id]
+ possible_trees = self._get_trees(parents_parents, cache)
+ if len(possible_trees) == 0:
+ # There either aren't any parents, or the parents are
+ # ghosts, so just use the last converted tree.
+ possible_trees.append((basis_id, cache[basis_id]))
+ basis_id, delta = self._get_delta_for_revision(parent_tree,
+ parents_parents, possible_trees)
+ self.target.add_inventory_by_delta(
+ basis_id, delta, current_revision_id, parents_parents)
+ # insert signatures and revisions
+ for revision in pending_revisions:
+ try:
+ signature = self.source.get_signature_text(
+ revision.revision_id)
+ self.target.add_signature_text(revision.revision_id,
+ signature)
+ except errors.NoSuchRevision:
+ pass
+ self.target.add_revision(revision.revision_id, revision)
+ return basis_id
+
+ def _fetch_all_revisions(self, revision_ids, pb):
+ """Fetch everything for the list of revisions.
+
+ :param revision_ids: The list of revisions to fetch. Must be in
+ topological order.
+ :param pb: A ProgressTask
+ :return: None
+ """
+ basis_id, basis_tree = self._get_basis(revision_ids[0])
+ batch_size = 100
+ cache = lru_cache.LRUCache(100)
+ cache[basis_id] = basis_tree
+ del basis_tree # We don't want to hang on to it here
+ hints = []
+ a_graph = None
+
+ for offset in range(0, len(revision_ids), batch_size):
+ self.target.start_write_group()
+ try:
+ pb.update(gettext('Transferring revisions'), offset,
+ len(revision_ids))
+ batch = revision_ids[offset:offset+batch_size]
+ basis_id = self._fetch_batch(batch, basis_id, cache)
+ except:
+ self.source._safe_to_return_from_cache = False
+ self.target.abort_write_group()
+ raise
+ else:
+ hint = self.target.commit_write_group()
+ if hint:
+ hints.extend(hint)
+ if hints and self.target._format.pack_compresses:
+ self.target.pack(hint=hints)
+ pb.update(gettext('Transferring revisions'), len(revision_ids),
+ len(revision_ids))
+
+ @needs_write_lock
+ def fetch(self, revision_id=None, find_ghosts=False,
+ fetch_spec=None):
+ """See InterRepository.fetch()."""
+ if fetch_spec is not None:
+ revision_ids = fetch_spec.get_keys()
+ else:
+ revision_ids = None
+ if self.source._format.experimental:
+ ui.ui_factory.show_user_warning('experimental_format_fetch',
+ from_format=self.source._format,
+ to_format=self.target._format)
+ if (not self.source.supports_rich_root()
+ and self.target.supports_rich_root()):
+ self._converting_to_rich_root = True
+ self._revision_id_to_root_id = {}
+ else:
+ self._converting_to_rich_root = False
+ # See <https://launchpad.net/bugs/456077> asking for a warning here
+ if self.source._format.network_name() != self.target._format.network_name():
+ ui.ui_factory.show_user_warning('cross_format_fetch',
+ from_format=self.source._format,
+ to_format=self.target._format)
+ if revision_ids is None:
+ if revision_id:
+ search_revision_ids = [revision_id]
+ else:
+ search_revision_ids = None
+ revision_ids = self.target.search_missing_revision_ids(self.source,
+ revision_ids=search_revision_ids,
+ find_ghosts=find_ghosts).get_keys()
+ if not revision_ids:
+ return 0, 0
+ revision_ids = tsort.topo_sort(
+ self.source.get_graph().get_parent_map(revision_ids))
+ if not revision_ids:
+ return 0, 0
+ # Walk though all revisions; get inventory deltas, copy referenced
+ # texts that delta references, insert the delta, revision and
+ # signature.
+ pb = ui.ui_factory.nested_progress_bar()
+ try:
+ self._fetch_all_revisions(revision_ids, pb)
+ finally:
+ pb.finished()
+ return len(revision_ids), 0
+
+ def _get_basis(self, first_revision_id):
+ """Get a revision and tree which exists in the target.
+
+ This assumes that first_revision_id is selected for transmission
+ because all other ancestors are already present. If we can't find an
+ ancestor we fall back to NULL_REVISION since we know that is safe.
+
+ :return: (basis_id, basis_tree)
+ """
+ first_rev = self.source.get_revision(first_revision_id)
+ try:
+ basis_id = first_rev.parent_ids[0]
+ # only valid as a basis if the target has it
+ self.target.get_revision(basis_id)
+ # Try to get a basis tree - if it's a ghost it will hit the
+ # NoSuchRevision case.
+ basis_tree = self.source.revision_tree(basis_id)
+ except (IndexError, errors.NoSuchRevision):
+ basis_id = _mod_revision.NULL_REVISION
+ basis_tree = self.source.revision_tree(basis_id)
+ return basis_id, basis_tree
+
+
+class InterSameDataRepository(InterVersionedFileRepository):
+ """Code for converting between repositories that represent the same data.
+
+ Data format and model must match for this to work.
+ """
+
+ @classmethod
+ def _get_repo_format_to_test(self):
+ """Repository format for testing with.
+
+ InterSameData can pull from subtree to subtree and from non-subtree to
+ non-subtree, so we test this with the richest repository format.
+ """
+ from bzrlib.repofmt import knitrepo
+ return knitrepo.RepositoryFormatKnit3()
+
+ @staticmethod
+ def is_compatible(source, target):
+ return (
+ InterRepository._same_model(source, target) and
+ source._format.supports_full_versioned_files and
+ target._format.supports_full_versioned_files)
+
+
+InterRepository.register_optimiser(InterVersionedFileRepository)
+InterRepository.register_optimiser(InterDifferingSerializer)
+InterRepository.register_optimiser(InterSameDataRepository)
+
+
+def install_revisions(repository, iterable, num_revisions=None, pb=None):
+ """Install all revision data into a repository.
+
+ Accepts an iterable of revision, tree, signature tuples. The signature
+ may be None.
+ """
+ repository.start_write_group()
+ try:
+ inventory_cache = lru_cache.LRUCache(10)
+ for n, (revision, revision_tree, signature) in enumerate(iterable):
+ _install_revision(repository, revision, revision_tree, signature,
+ inventory_cache)
+ if pb is not None:
+ pb.update(gettext('Transferring revisions'), n + 1, num_revisions)
+ except:
+ repository.abort_write_group()
+ raise
+ else:
+ repository.commit_write_group()
+
+
+def _install_revision(repository, rev, revision_tree, signature,
+ inventory_cache):
+ """Install all revision data into a repository."""
+ present_parents = []
+ parent_trees = {}
+ for p_id in rev.parent_ids:
+ if repository.has_revision(p_id):
+ present_parents.append(p_id)
+ parent_trees[p_id] = repository.revision_tree(p_id)
+ else:
+ parent_trees[p_id] = repository.revision_tree(
+ _mod_revision.NULL_REVISION)
+
+ # FIXME: Support nested trees
+ inv = revision_tree.root_inventory
+ entries = inv.iter_entries()
+ # backwards compatibility hack: skip the root id.
+ if not repository.supports_rich_root():
+ path, root = entries.next()
+ if root.revision != rev.revision_id:
+ raise errors.IncompatibleRevision(repr(repository))
+ text_keys = {}
+ for path, ie in entries:
+ text_keys[(ie.file_id, ie.revision)] = ie
+ text_parent_map = repository.texts.get_parent_map(text_keys)
+ missing_texts = set(text_keys) - set(text_parent_map)
+ # Add the texts that are not already present
+ for text_key in missing_texts:
+ ie = text_keys[text_key]
+ text_parents = []
+ # FIXME: TODO: The following loop overlaps/duplicates that done by
+ # commit to determine parents. There is a latent/real bug here where
+ # the parents inserted are not those commit would do - in particular
+ # they are not filtered by heads(). RBC, AB
+ for revision, tree in parent_trees.iteritems():
+ if not tree.has_id(ie.file_id):
+ continue
+ parent_id = tree.get_file_revision(ie.file_id)
+ if parent_id in text_parents:
+ continue
+ text_parents.append((ie.file_id, parent_id))
+ lines = revision_tree.get_file(ie.file_id).readlines()
+ repository.texts.add_lines(text_key, text_parents, lines)
+ try:
+ # install the inventory
+ if repository._format._commit_inv_deltas and len(rev.parent_ids):
+ # Cache this inventory
+ inventory_cache[rev.revision_id] = inv
+ try:
+ basis_inv = inventory_cache[rev.parent_ids[0]]
+ except KeyError:
+ repository.add_inventory(rev.revision_id, inv, present_parents)
+ else:
+ delta = inv._make_delta(basis_inv)
+ repository.add_inventory_by_delta(rev.parent_ids[0], delta,
+ rev.revision_id, present_parents)
+ else:
+ repository.add_inventory(rev.revision_id, inv, present_parents)
+ except errors.RevisionAlreadyPresent:
+ pass
+ if signature is not None:
+ repository.add_signature_text(rev.revision_id, signature)
+ repository.add_revision(rev.revision_id, rev, inv)
+
+
+def install_revision(repository, rev, revision_tree):
+ """Install all revision data into a repository."""
+ install_revisions(repository, [(rev, revision_tree, None)])
diff --git a/bzrlib/vf_search.py b/bzrlib/vf_search.py
new file mode 100644
index 0000000..5cb3357
--- /dev/null
+++ b/bzrlib/vf_search.py
@@ -0,0 +1,511 @@
+# Copyright (C) 2007-2011 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""Searching in versioned file repositories."""
+
+from __future__ import absolute_import
+
+from bzrlib import (
+ debug,
+ revision,
+ trace,
+ )
+
+from bzrlib.graph import (
+ DictParentsProvider,
+ Graph,
+ invert_parent_map,
+ )
+
+
+class AbstractSearchResult(object):
+ """The result of a search, describing a set of keys.
+
+ Search results are typically used as the 'fetch_spec' parameter when
+ fetching revisions.
+
+ :seealso: AbstractSearch
+ """
+
+ def get_recipe(self):
+ """Return a recipe that can be used to replay this search.
+
+ The recipe allows reconstruction of the same results at a later date.
+
+ :return: A tuple of `(search_kind_str, *details)`. The details vary by
+ kind of search result.
+ """
+ raise NotImplementedError(self.get_recipe)
+
+ def get_network_struct(self):
+ """Return a tuple that can be transmitted via the HPSS protocol."""
+ raise NotImplementedError(self.get_network_struct)
+
+ def get_keys(self):
+ """Return the keys found in this search.
+
+ :return: A set of keys.
+ """
+ raise NotImplementedError(self.get_keys)
+
+ def is_empty(self):
+ """Return false if the search lists 1 or more revisions."""
+ raise NotImplementedError(self.is_empty)
+
+ def refine(self, seen, referenced):
+ """Create a new search by refining this search.
+
+ :param seen: Revisions that have been satisfied.
+ :param referenced: Revision references observed while satisfying some
+ of this search.
+ :return: A search result.
+ """
+ raise NotImplementedError(self.refine)
+
+
+class AbstractSearch(object):
+ """A search that can be executed, producing a search result.
+
+ :seealso: AbstractSearchResult
+ """
+
+ def execute(self):
+ """Construct a network-ready search result from this search description.
+
+ This may take some time to search repositories, etc.
+
+ :return: A search result (an object that implements
+ AbstractSearchResult's API).
+ """
+ raise NotImplementedError(self.execute)
+
+
+class SearchResult(AbstractSearchResult):
+ """The result of a breadth first search.
+
+ A SearchResult provides the ability to reconstruct the search or access a
+ set of the keys the search found.
+ """
+
+ def __init__(self, start_keys, exclude_keys, key_count, keys):
+ """Create a SearchResult.
+
+ :param start_keys: The keys the search started at.
+ :param exclude_keys: The keys the search excludes.
+ :param key_count: The total number of keys (from start to but not
+ including exclude).
+ :param keys: The keys the search found. Note that in future we may get
+ a SearchResult from a smart server, in which case the keys list is
+ not necessarily immediately available.
+ """
+ self._recipe = ('search', start_keys, exclude_keys, key_count)
+ self._keys = frozenset(keys)
+
+ def __repr__(self):
+ kind, start_keys, exclude_keys, key_count = self._recipe
+ if len(start_keys) > 5:
+ start_keys_repr = repr(list(start_keys)[:5])[:-1] + ', ...]'
+ else:
+ start_keys_repr = repr(start_keys)
+ if len(exclude_keys) > 5:
+ exclude_keys_repr = repr(list(exclude_keys)[:5])[:-1] + ', ...]'
+ else:
+ exclude_keys_repr = repr(exclude_keys)
+ return '<%s %s:(%s, %s, %d)>' % (self.__class__.__name__,
+ kind, start_keys_repr, exclude_keys_repr, key_count)
+
+ def get_recipe(self):
+ """Return a recipe that can be used to replay this search.
+
+ The recipe allows reconstruction of the same results at a later date
+ without knowing all the found keys. The essential elements are a list
+ of keys to start and to stop at. In order to give reproducible
+ results when ghosts are encountered by a search they are automatically
+ added to the exclude list (or else ghost filling may alter the
+ results).
+
+ :return: A tuple ('search', start_keys_set, exclude_keys_set,
+ revision_count). To recreate the results of this search, create a
+ breadth first searcher on the same graph starting at start_keys.
+ Then call next() (or next_with_ghosts()) repeatedly, and on every
+ result, call stop_searching_any on any keys from the exclude_keys
+ set. The revision_count value acts as a trivial cross-check - the
+ found revisions of the new search should have as many elements as
+ revision_count. If it does not, then additional revisions have been
+ ghosted since the search was executed the first time and the second
+ time.
+ """
+ return self._recipe
+
+ def get_network_struct(self):
+ start_keys = ' '.join(self._recipe[1])
+ stop_keys = ' '.join(self._recipe[2])
+ count = str(self._recipe[3])
+ return (self._recipe[0], '\n'.join((start_keys, stop_keys, count)))
+
+ def get_keys(self):
+ """Return the keys found in this search.
+
+ :return: A set of keys.
+ """
+ return self._keys
+
+ def is_empty(self):
+ """Return false if the search lists 1 or more revisions."""
+ return self._recipe[3] == 0
+
+ def refine(self, seen, referenced):
+ """Create a new search by refining this search.
+
+ :param seen: Revisions that have been satisfied.
+ :param referenced: Revision references observed while satisfying some
+ of this search.
+ """
+ start = self._recipe[1]
+ exclude = self._recipe[2]
+ count = self._recipe[3]
+ keys = self.get_keys()
+ # New heads = referenced + old heads - seen things - exclude
+ pending_refs = set(referenced)
+ pending_refs.update(start)
+ pending_refs.difference_update(seen)
+ pending_refs.difference_update(exclude)
+ # New exclude = old exclude + satisfied heads
+ seen_heads = start.intersection(seen)
+ exclude.update(seen_heads)
+ # keys gets seen removed
+ keys = keys - seen
+ # length is reduced by len(seen)
+ count -= len(seen)
+ return SearchResult(pending_refs, exclude, count, keys)
+
+
+class PendingAncestryResult(AbstractSearchResult):
+ """A search result that will reconstruct the ancestry for some graph heads.
+
+ Unlike SearchResult, this doesn't hold the complete search result in
+ memory, it just holds a description of how to generate it.
+ """
+
+ def __init__(self, heads, repo):
+ """Constructor.
+
+ :param heads: an iterable of graph heads.
+ :param repo: a repository to use to generate the ancestry for the given
+ heads.
+ """
+ self.heads = frozenset(heads)
+ self.repo = repo
+
+ def __repr__(self):
+ if len(self.heads) > 5:
+ heads_repr = repr(list(self.heads)[:5])[:-1]
+ heads_repr += ', <%d more>...]' % (len(self.heads) - 5,)
+ else:
+ heads_repr = repr(self.heads)
+ return '<%s heads:%s repo:%r>' % (
+ self.__class__.__name__, heads_repr, self.repo)
+
+ def get_recipe(self):
+ """Return a recipe that can be used to replay this search.
+
+ The recipe allows reconstruction of the same results at a later date.
+
+ :seealso SearchResult.get_recipe:
+
+ :return: A tuple ('proxy-search', start_keys_set, set(), -1)
+ To recreate this result, create a PendingAncestryResult with the
+ start_keys_set.
+ """
+ return ('proxy-search', self.heads, set(), -1)
+
+ def get_network_struct(self):
+ parts = ['ancestry-of']
+ parts.extend(self.heads)
+ return parts
+
+ def get_keys(self):
+ """See SearchResult.get_keys.
+
+ Returns all the keys for the ancestry of the heads, excluding
+ NULL_REVISION.
+ """
+ return self._get_keys(self.repo.get_graph())
+
+ def _get_keys(self, graph):
+ NULL_REVISION = revision.NULL_REVISION
+ keys = [key for (key, parents) in graph.iter_ancestry(self.heads)
+ if key != NULL_REVISION and parents is not None]
+ return keys
+
+ def is_empty(self):
+ """Return false if the search lists 1 or more revisions."""
+ if revision.NULL_REVISION in self.heads:
+ return len(self.heads) == 1
+ else:
+ return len(self.heads) == 0
+
+ def refine(self, seen, referenced):
+ """Create a new search by refining this search.
+
+ :param seen: Revisions that have been satisfied.
+ :param referenced: Revision references observed while satisfying some
+ of this search.
+ """
+ referenced = self.heads.union(referenced)
+ return PendingAncestryResult(referenced - seen, self.repo)
+
+
+class EmptySearchResult(AbstractSearchResult):
+ """An empty search result."""
+
+ def is_empty(self):
+ return True
+
+
+class EverythingResult(AbstractSearchResult):
+ """A search result that simply requests everything in the repository."""
+
+ def __init__(self, repo):
+ self._repo = repo
+
+ def __repr__(self):
+ return '%s(%r)' % (self.__class__.__name__, self._repo)
+
+ def get_recipe(self):
+ raise NotImplementedError(self.get_recipe)
+
+ def get_network_struct(self):
+ return ('everything',)
+
+ def get_keys(self):
+ if 'evil' in debug.debug_flags:
+ from bzrlib import remote
+ if isinstance(self._repo, remote.RemoteRepository):
+ # warn developers (not users) not to do this
+ trace.mutter_callsite(
+ 2, "EverythingResult(RemoteRepository).get_keys() is slow.")
+ return self._repo.all_revision_ids()
+
+ def is_empty(self):
+ # It's ok for this to wrongly return False: the worst that can happen
+ # is that RemoteStreamSource will initiate a get_stream on an empty
+ # repository. And almost all repositories are non-empty.
+ return False
+
+ def refine(self, seen, referenced):
+ heads = set(self._repo.all_revision_ids())
+ heads.difference_update(seen)
+ heads.update(referenced)
+ return PendingAncestryResult(heads, self._repo)
+
+
+class EverythingNotInOther(AbstractSearch):
+ """Find all revisions in that are in one repo but not the other."""
+
+ def __init__(self, to_repo, from_repo, find_ghosts=False):
+ self.to_repo = to_repo
+ self.from_repo = from_repo
+ self.find_ghosts = find_ghosts
+
+ def execute(self):
+ return self.to_repo.search_missing_revision_ids(
+ self.from_repo, find_ghosts=self.find_ghosts)
+
+
+class NotInOtherForRevs(AbstractSearch):
+ """Find all revisions missing in one repo for a some specific heads."""
+
+ def __init__(self, to_repo, from_repo, required_ids, if_present_ids=None,
+ find_ghosts=False, limit=None):
+ """Constructor.
+
+ :param required_ids: revision IDs of heads that must be found, or else
+ the search will fail with NoSuchRevision. All revisions in their
+ ancestry not already in the other repository will be included in
+ the search result.
+ :param if_present_ids: revision IDs of heads that may be absent in the
+ source repository. If present, then their ancestry not already
+ found in other will be included in the search result.
+ :param limit: maximum number of revisions to fetch
+ """
+ self.to_repo = to_repo
+ self.from_repo = from_repo
+ self.find_ghosts = find_ghosts
+ self.required_ids = required_ids
+ self.if_present_ids = if_present_ids
+ self.limit = limit
+
+ def __repr__(self):
+ if len(self.required_ids) > 5:
+ reqd_revs_repr = repr(list(self.required_ids)[:5])[:-1] + ', ...]'
+ else:
+ reqd_revs_repr = repr(self.required_ids)
+ if self.if_present_ids and len(self.if_present_ids) > 5:
+ ifp_revs_repr = repr(list(self.if_present_ids)[:5])[:-1] + ', ...]'
+ else:
+ ifp_revs_repr = repr(self.if_present_ids)
+
+ return ("<%s from:%r to:%r find_ghosts:%r req'd:%r if-present:%r"
+ "limit:%r>") % (
+ self.__class__.__name__, self.from_repo, self.to_repo,
+ self.find_ghosts, reqd_revs_repr, ifp_revs_repr,
+ self.limit)
+
+ def execute(self):
+ return self.to_repo.search_missing_revision_ids(
+ self.from_repo, revision_ids=self.required_ids,
+ if_present_ids=self.if_present_ids, find_ghosts=self.find_ghosts,
+ limit=self.limit)
+
+
+def search_result_from_parent_map(parent_map, missing_keys):
+ """Transform a parent_map into SearchResult information."""
+ if not parent_map:
+ # parent_map is empty or None, simple search result
+ return [], [], 0
+ # start_set is all the keys in the cache
+ start_set = set(parent_map)
+ # result set is all the references to keys in the cache
+ result_parents = set()
+ for parents in parent_map.itervalues():
+ result_parents.update(parents)
+ stop_keys = result_parents.difference(start_set)
+ # We don't need to send ghosts back to the server as a position to
+ # stop either.
+ stop_keys.difference_update(missing_keys)
+ key_count = len(parent_map)
+ if (revision.NULL_REVISION in result_parents
+ and revision.NULL_REVISION in missing_keys):
+ # If we pruned NULL_REVISION from the stop_keys because it's also
+ # in our cache of "missing" keys we need to increment our key count
+ # by 1, because the reconsitituted SearchResult on the server will
+ # still consider NULL_REVISION to be an included key.
+ key_count += 1
+ included_keys = start_set.intersection(result_parents)
+ start_set.difference_update(included_keys)
+ return start_set, stop_keys, key_count
+
+
+def _run_search(parent_map, heads, exclude_keys):
+ """Given a parent map, run a _BreadthFirstSearcher on it.
+
+ Start at heads, walk until you hit exclude_keys. As a further improvement,
+ watch for any heads that you encounter while walking, which means they were
+ not heads of the search.
+
+ This is mostly used to generate a succinct recipe for how to walk through
+ most of parent_map.
+
+ :return: (_BreadthFirstSearcher, set(heads_encountered_by_walking))
+ """
+ g = Graph(DictParentsProvider(parent_map))
+ s = g._make_breadth_first_searcher(heads)
+ found_heads = set()
+ while True:
+ try:
+ next_revs = s.next()
+ except StopIteration:
+ break
+ for parents in s._current_parents.itervalues():
+ f_heads = heads.intersection(parents)
+ if f_heads:
+ found_heads.update(f_heads)
+ stop_keys = exclude_keys.intersection(next_revs)
+ if stop_keys:
+ s.stop_searching_any(stop_keys)
+ for parents in s._current_parents.itervalues():
+ f_heads = heads.intersection(parents)
+ if f_heads:
+ found_heads.update(f_heads)
+ return s, found_heads
+
+
+def _find_possible_heads(parent_map, tip_keys, depth):
+ """Walk backwards (towards children) through the parent_map.
+
+ This finds 'heads' that will hopefully succinctly describe our search
+ graph.
+ """
+ child_map = invert_parent_map(parent_map)
+ heads = set()
+ current_roots = tip_keys
+ walked = set(current_roots)
+ while current_roots and depth > 0:
+ depth -= 1
+ children = set()
+ children_update = children.update
+ for p in current_roots:
+ # Is it better to pre- or post- filter the children?
+ try:
+ children_update(child_map[p])
+ except KeyError:
+ heads.add(p)
+ # If we've seen a key before, we don't want to walk it again. Note that
+ # 'children' stays relatively small while 'walked' grows large. So
+ # don't use 'difference_update' here which has to walk all of 'walked'.
+ # '.difference' is smart enough to walk only children and compare it to
+ # walked.
+ children = children.difference(walked)
+ walked.update(children)
+ current_roots = children
+ if current_roots:
+ # We walked to the end of depth, so these are the new tips.
+ heads.update(current_roots)
+ return heads
+
+
+def limited_search_result_from_parent_map(parent_map, missing_keys, tip_keys,
+ depth):
+ """Transform a parent_map that is searching 'tip_keys' into an
+ approximate SearchResult.
+
+ We should be able to generate a SearchResult from a given set of starting
+ keys, that covers a subset of parent_map that has the last step pointing at
+ tip_keys. This is to handle the case that really-long-searches shouldn't be
+ started from scratch on each get_parent_map request, but we *do* want to
+ filter out some of the keys that we've already seen, so we don't get
+ information that we already know about on every request.
+
+ The server will validate the search (that starting at start_keys and
+ stopping at stop_keys yields the exact key_count), so we have to be careful
+ to give an exact recipe.
+
+ Basic algorithm is:
+ 1) Invert parent_map to get child_map (todo: have it cached and pass it
+ in)
+ 2) Starting at tip_keys, walk towards children for 'depth' steps.
+ 3) At that point, we have the 'start' keys.
+ 4) Start walking parent_map from 'start' keys, counting how many keys
+ are seen, and generating stop_keys for anything that would walk
+ outside of the parent_map.
+
+ :param parent_map: A map from {child_id: (parent_ids,)}
+ :param missing_keys: parent_ids that we know are unavailable
+ :param tip_keys: the revision_ids that we are searching
+ :param depth: How far back to walk.
+ """
+ if not parent_map:
+ # No search to send, because we haven't done any searching yet.
+ return [], [], 0
+ heads = _find_possible_heads(parent_map, tip_keys, depth)
+ s, found_heads = _run_search(parent_map, heads, set(tip_keys))
+ start_keys, exclude_keys, keys = s.get_state()
+ if found_heads:
+ # Anything in found_heads are redundant start_keys, we hit them while
+ # walking, so we can exclude them from the start list.
+ start_keys = set(start_keys).difference(found_heads)
+ return start_keys, exclude_keys, len(keys)
diff --git a/bzrlib/views.py b/bzrlib/views.py
new file mode 100644
index 0000000..c179dd3
--- /dev/null
+++ b/bzrlib/views.py
@@ -0,0 +1,285 @@
+# Copyright (C) 2008 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""View management.
+
+Views are contained within a working tree and normally constructed
+when first accessed. Clients should do, for example, ...
+
+ tree.views.lookup_view()
+"""
+
+from __future__ import absolute_import
+
+import re
+
+from bzrlib import (
+ errors,
+ osutils,
+ )
+
+
+_VIEWS_FORMAT_MARKER_RE = re.compile(r'Bazaar views format (\d+)')
+_VIEWS_FORMAT1_MARKER = "Bazaar views format 1\n"
+
+
+class _Views(object):
+ """Base class for View managers."""
+
+ def supports_views(self):
+ raise NotImplementedError(self.supports_views)
+
+
+class PathBasedViews(_Views):
+ """View storage in an unversioned tree control file.
+
+ Views are stored in terms of paths relative to the tree root.
+
+ The top line of the control file is a format marker in the format:
+
+ Bazaar views format X
+
+ where X is an integer number. After this top line, version 1 format is
+ stored as follows:
+
+ * optional name-values pairs in the format 'name=value'
+
+ * optional view definitions, one per line in the format
+
+ views:
+ name file1 file2 ...
+ name file1 file2 ...
+
+ where the fields are separated by a nul character (\0). The views file
+ is encoded in utf-8. The only supported keyword in version 1 is
+ 'current' which stores the name of the current view, if any.
+ """
+
+ def __init__(self, tree):
+ self.tree = tree
+ self._loaded = False
+ self._current = None
+ self._views = {}
+
+ def supports_views(self):
+ return True
+
+ def get_view_info(self):
+ """Get the current view and dictionary of views.
+
+ :return: current, views where
+ current = the name of the current view or None if no view is enabled
+ views = a map from view name to list of files/directories
+ """
+ self._load_view_info()
+ return self._current, self._views
+
+ def set_view_info(self, current, views):
+ """Set the current view and dictionary of views.
+
+ :param current: the name of the current view or None if no view is
+ enabled
+ :param views: a map from view name to list of files/directories
+ """
+ if current is not None and current not in views:
+ raise errors.NoSuchView(current)
+ self.tree.lock_write()
+ try:
+ self._current = current
+ self._views = views
+ self._save_view_info()
+ finally:
+ self.tree.unlock()
+
+ def lookup_view(self, view_name=None):
+ """Return the contents of a view.
+
+ :param view_Name: name of the view or None to lookup the current view
+ :return: the list of files/directories in the requested view
+ """
+ self._load_view_info()
+ try:
+ if view_name is None:
+ if self._current:
+ view_name = self._current
+ else:
+ return []
+ return self._views[view_name]
+ except KeyError:
+ raise errors.NoSuchView(view_name)
+
+ def set_view(self, view_name, view_files, make_current=True):
+ """Add or update a view definition.
+
+ :param view_name: the name of the view
+ :param view_files: the list of files/directories in the view
+ :param make_current: make this view the current one or not
+ """
+ self.tree.lock_write()
+ try:
+ self._load_view_info()
+ self._views[view_name] = view_files
+ if make_current:
+ self._current = view_name
+ self._save_view_info()
+ finally:
+ self.tree.unlock()
+
+ def delete_view(self, view_name):
+ """Delete a view definition.
+
+ If the view deleted is the current one, the current view is reset.
+ """
+ self.tree.lock_write()
+ try:
+ self._load_view_info()
+ try:
+ del self._views[view_name]
+ except KeyError:
+ raise errors.NoSuchView(view_name)
+ if view_name == self._current:
+ self._current = None
+ self._save_view_info()
+ finally:
+ self.tree.unlock()
+
+ def _save_view_info(self):
+ """Save the current view and all view definitions.
+
+ Be sure to have initialised self._current and self._views before
+ calling this method.
+ """
+ self.tree.lock_write()
+ try:
+ if self._current is None:
+ keywords = {}
+ else:
+ keywords = {'current': self._current}
+ self.tree._transport.put_bytes('views',
+ self._serialize_view_content(keywords, self._views))
+ finally:
+ self.tree.unlock()
+
+ def _load_view_info(self):
+ """Load the current view and dictionary of view definitions."""
+ if not self._loaded:
+ self.tree.lock_read()
+ try:
+ try:
+ view_content = self.tree._transport.get_bytes('views')
+ except errors.NoSuchFile, e:
+ self._current, self._views = None, {}
+ else:
+ keywords, self._views = \
+ self._deserialize_view_content(view_content)
+ self._current = keywords.get('current')
+ finally:
+ self.tree.unlock()
+ self._loaded = True
+
+ def _serialize_view_content(self, keywords, view_dict):
+ """Convert view keywords and a view dictionary into a stream."""
+ lines = [_VIEWS_FORMAT1_MARKER]
+ for key in keywords:
+ line = "%s=%s\n" % (key,keywords[key])
+ lines.append(line.encode('utf-8'))
+ if view_dict:
+ lines.append("views:\n".encode('utf-8'))
+ for view in sorted(view_dict):
+ view_data = "%s\0%s\n" % (view, "\0".join(view_dict[view]))
+ lines.append(view_data.encode('utf-8'))
+ return "".join(lines)
+
+ def _deserialize_view_content(self, view_content):
+ """Convert a stream into view keywords and a dictionary of views."""
+ # as a special case to make initialization easy, an empty definition
+ # maps to no current view and an empty view dictionary
+ if view_content == '':
+ return {}, {}
+ lines = view_content.splitlines()
+ match = _VIEWS_FORMAT_MARKER_RE.match(lines[0])
+ if not match:
+ raise ValueError(
+ "format marker missing from top of views file")
+ elif match.group(1) != '1':
+ raise ValueError(
+ "cannot decode views format %s" % match.group(1))
+ try:
+ keywords = {}
+ views = {}
+ in_views = False
+ for line in lines[1:]:
+ text = line.decode('utf-8')
+ if in_views:
+ parts = text.split('\0')
+ view = parts.pop(0)
+ views[view] = parts
+ elif text == 'views:':
+ in_views = True
+ continue
+ elif text.find('=') >= 0:
+ # must be a name-value pair
+ keyword, value = text.split('=', 1)
+ keywords[keyword] = value
+ else:
+ raise ValueError("failed to deserialize views line %s",
+ text)
+ return keywords, views
+ except ValueError, e:
+ raise ValueError("failed to deserialize views content %r: %s"
+ % (view_content, e))
+
+
+class DisabledViews(_Views):
+ """View storage that refuses to store anything.
+
+ This is used by older formats that can't store views.
+ """
+
+ def __init__(self, tree):
+ self.tree = tree
+
+ def supports_views(self):
+ return False
+
+ def _not_supported(self, *a, **k):
+ raise errors.ViewsNotSupported(self.tree)
+
+ get_view_info = _not_supported
+ set_view_info = _not_supported
+ lookup_view = _not_supported
+ set_view = _not_supported
+ delete_view = _not_supported
+
+
+def view_display_str(view_files, encoding=None):
+ """Get the display string for a list of view files.
+
+ :param view_files: the list of file names
+ :param encoding: the encoding to display the files in
+ """
+ if encoding is None:
+ return ", ".join(view_files)
+ else:
+ return ", ".join([v.encode(encoding, 'replace') for v in view_files])
+
+
+def check_path_in_view(tree, relpath):
+ """If a working tree has a view enabled, check the path is within it."""
+ if tree.supports_views():
+ view_files = tree.views.lookup_view()
+ if view_files and not osutils.is_inside_any(view_files, relpath):
+ raise errors.FileOutsideView(relpath, view_files)
diff --git a/bzrlib/weave.py b/bzrlib/weave.py
new file mode 100755
index 0000000..b68104e
--- /dev/null
+++ b/bzrlib/weave.py
@@ -0,0 +1,1032 @@
+# Copyright (C) 2005, 2009 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+# Author: Martin Pool <mbp@canonical.com>
+
+"""Weave - storage of related text file versions"""
+
+from __future__ import absolute_import
+
+# XXX: If we do weaves this way, will a merge still behave the same
+# way if it's done in a different order? That's a pretty desirable
+# property.
+
+# TODO: Nothing here so far assumes the lines are really \n newlines,
+# rather than being split up in some other way. We could accommodate
+# binaries, perhaps by naively splitting on \n or perhaps using
+# something like a rolling checksum.
+
+# TODO: End marker for each version so we can stop reading?
+
+# TODO: Check that no insertion occurs inside a deletion that was
+# active in the version of the insertion.
+
+# TODO: In addition to the SHA-1 check, perhaps have some code that
+# checks structural constraints of the weave: ie that insertions are
+# properly nested, that there is no text outside of an insertion, that
+# insertions or deletions are not repeated, etc.
+
+# TODO: Parallel-extract that passes back each line along with a
+# description of which revisions include it. Nice for checking all
+# shas or calculating stats in parallel.
+
+# TODO: Using a single _extract routine and then processing the output
+# is probably inefficient. It's simple enough that we can afford to
+# have slight specializations for different ways its used: annotate,
+# basis for add, get, etc.
+
+# TODO: Probably the API should work only in names to hide the integer
+# indexes from the user.
+
+# TODO: Is there any potential performance win by having an add()
+# variant that is passed a pre-cooked version of the single basis
+# version?
+
+# TODO: Reweave can possibly be made faster by remembering diffs
+# where the basis and destination are unchanged.
+
+# FIXME: Sometimes we will be given a parents list for a revision
+# that includes some redundant parents (i.e. already a parent of
+# something in the list.) We should eliminate them. This can
+# be done fairly efficiently because the sequence numbers constrain
+# the possible relationships.
+
+# FIXME: the conflict markers should be *7* characters
+
+from copy import copy
+from cStringIO import StringIO
+import os
+
+from bzrlib.lazy_import import lazy_import
+lazy_import(globals(), """
+from bzrlib import tsort
+""")
+from bzrlib import (
+ errors,
+ osutils,
+ )
+from bzrlib.errors import (WeaveError, WeaveFormatError, WeaveParentMismatch,
+ RevisionAlreadyPresent,
+ RevisionNotPresent,
+ UnavailableRepresentation,
+ )
+from bzrlib.osutils import dirname, sha, sha_strings, split_lines
+import bzrlib.patiencediff
+from bzrlib.revision import NULL_REVISION
+from bzrlib.symbol_versioning import *
+from bzrlib.trace import mutter
+from bzrlib.versionedfile import (
+ AbsentContentFactory,
+ adapter_registry,
+ ContentFactory,
+ sort_groupcompress,
+ VersionedFile,
+ )
+from bzrlib.weavefile import _read_weave_v5, write_weave_v5
+
+
+class WeaveContentFactory(ContentFactory):
+ """Content factory for streaming from weaves.
+
+ :seealso ContentFactory:
+ """
+
+ def __init__(self, version, weave):
+ """Create a WeaveContentFactory for version from weave."""
+ ContentFactory.__init__(self)
+ self.sha1 = weave.get_sha1s([version])[version]
+ self.key = (version,)
+ parents = weave.get_parent_map([version])[version]
+ self.parents = tuple((parent,) for parent in parents)
+ self.storage_kind = 'fulltext'
+ self._weave = weave
+
+ def get_bytes_as(self, storage_kind):
+ if storage_kind == 'fulltext':
+ return self._weave.get_text(self.key[-1])
+ elif storage_kind == 'chunked':
+ return self._weave.get_lines(self.key[-1])
+ else:
+ raise UnavailableRepresentation(self.key, storage_kind, 'fulltext')
+
+
+class Weave(VersionedFile):
+ """weave - versioned text file storage.
+
+ A Weave manages versions of line-based text files, keeping track
+ of the originating version for each line.
+
+ To clients the "lines" of the file are represented as a list of strings.
+ These strings will typically have terminal newline characters, but
+ this is not required. In particular files commonly do not have a newline
+ at the end of the file.
+
+ Texts can be identified in either of two ways:
+
+ * a nonnegative index number.
+
+ * a version-id string.
+
+ Typically the index number will be valid only inside this weave and
+ the version-id is used to reference it in the larger world.
+
+ The weave is represented as a list mixing edit instructions and
+ literal text. Each entry in _weave can be either a string (or
+ unicode), or a tuple. If a string, it means that the given line
+ should be output in the currently active revisions.
+
+ If a tuple, it gives a processing instruction saying in which
+ revisions the enclosed lines are active. The tuple has the form
+ (instruction, version).
+
+ The instruction can be '{' or '}' for an insertion block, and '['
+ and ']' for a deletion block respectively. The version is the
+ integer version index. There is no replace operator, only deletes
+ and inserts. For '}', the end of an insertion, there is no
+ version parameter because it always closes the most recently
+ opened insertion.
+
+ Constraints/notes:
+
+ * A later version can delete lines that were introduced by any
+ number of ancestor versions; this implies that deletion
+ instructions can span insertion blocks without regard to the
+ insertion block's nesting.
+
+ * Similarly, deletions need not be properly nested with regard to
+ each other, because they might have been generated by
+ independent revisions.
+
+ * Insertions are always made by inserting a new bracketed block
+ into a single point in the previous weave. This implies they
+ can nest but not overlap, and the nesting must always have later
+ insertions on the inside.
+
+ * It doesn't seem very useful to have an active insertion
+ inside an inactive insertion, but it might happen.
+
+ * Therefore, all instructions are always"considered"; that
+ is passed onto and off the stack. An outer inactive block
+ doesn't disable an inner block.
+
+ * Lines are enabled if the most recent enclosing insertion is
+ active and none of the enclosing deletions are active.
+
+ * There is no point having a deletion directly inside its own
+ insertion; you might as well just not write it. And there
+ should be no way to get an earlier version deleting a later
+ version.
+
+ _weave
+ Text of the weave; list of control instruction tuples and strings.
+
+ _parents
+ List of parents, indexed by version number.
+ It is only necessary to store the minimal set of parents for
+ each version; the parent's parents are implied.
+
+ _sha1s
+ List of hex SHA-1 of each version.
+
+ _names
+ List of symbolic names for each version. Each should be unique.
+
+ _name_map
+ For each name, the version number.
+
+ _weave_name
+ Descriptive name of this weave; typically the filename if known.
+ Set by read_weave.
+ """
+
+ __slots__ = ['_weave', '_parents', '_sha1s', '_names', '_name_map',
+ '_weave_name', '_matcher', '_allow_reserved']
+
+ def __init__(self, weave_name=None, access_mode='w', matcher=None,
+ get_scope=None, allow_reserved=False):
+ """Create a weave.
+
+ :param get_scope: A callable that returns an opaque object to be used
+ for detecting when this weave goes out of scope (should stop
+ answering requests or allowing mutation).
+ """
+ super(Weave, self).__init__()
+ self._weave = []
+ self._parents = []
+ self._sha1s = []
+ self._names = []
+ self._name_map = {}
+ self._weave_name = weave_name
+ if matcher is None:
+ self._matcher = bzrlib.patiencediff.PatienceSequenceMatcher
+ else:
+ self._matcher = matcher
+ if get_scope is None:
+ get_scope = lambda:None
+ self._get_scope = get_scope
+ self._scope = get_scope()
+ self._access_mode = access_mode
+ self._allow_reserved = allow_reserved
+
+ def __repr__(self):
+ return "Weave(%r)" % self._weave_name
+
+ def _check_write_ok(self):
+ """Is the versioned file marked as 'finished' ? Raise if it is."""
+ if self._get_scope() != self._scope:
+ raise errors.OutSideTransaction()
+ if self._access_mode != 'w':
+ raise errors.ReadOnlyObjectDirtiedError(self)
+
+ def copy(self):
+ """Return a deep copy of self.
+
+ The copy can be modified without affecting the original weave."""
+ other = Weave()
+ other._weave = self._weave[:]
+ other._parents = self._parents[:]
+ other._sha1s = self._sha1s[:]
+ other._names = self._names[:]
+ other._name_map = self._name_map.copy()
+ other._weave_name = self._weave_name
+ return other
+
+ def __eq__(self, other):
+ if not isinstance(other, Weave):
+ return False
+ return self._parents == other._parents \
+ and self._weave == other._weave \
+ and self._sha1s == other._sha1s
+
+ def __ne__(self, other):
+ return not self.__eq__(other)
+
+ def _idx_to_name(self, version):
+ return self._names[version]
+
+ def _lookup(self, name):
+ """Convert symbolic version name to index."""
+ if not self._allow_reserved:
+ self.check_not_reserved_id(name)
+ try:
+ return self._name_map[name]
+ except KeyError:
+ raise RevisionNotPresent(name, self._weave_name)
+
+ def versions(self):
+ """See VersionedFile.versions."""
+ return self._names[:]
+
+ def has_version(self, version_id):
+ """See VersionedFile.has_version."""
+ return (version_id in self._name_map)
+
+ __contains__ = has_version
+
+ def get_record_stream(self, versions, ordering, include_delta_closure):
+ """Get a stream of records for versions.
+
+ :param versions: The versions to include. Each version is a tuple
+ (version,).
+ :param ordering: Either 'unordered' or 'topological'. A topologically
+ sorted stream has compression parents strictly before their
+ children.
+ :param include_delta_closure: If True then the closure across any
+ compression parents will be included (in the opaque data).
+ :return: An iterator of ContentFactory objects, each of which is only
+ valid until the iterator is advanced.
+ """
+ versions = [version[-1] for version in versions]
+ if ordering == 'topological':
+ parents = self.get_parent_map(versions)
+ new_versions = tsort.topo_sort(parents)
+ new_versions.extend(set(versions).difference(set(parents)))
+ versions = new_versions
+ elif ordering == 'groupcompress':
+ parents = self.get_parent_map(versions)
+ new_versions = sort_groupcompress(parents)
+ new_versions.extend(set(versions).difference(set(parents)))
+ versions = new_versions
+ for version in versions:
+ if version in self:
+ yield WeaveContentFactory(version, self)
+ else:
+ yield AbsentContentFactory((version,))
+
+ def get_parent_map(self, version_ids):
+ """See VersionedFile.get_parent_map."""
+ result = {}
+ for version_id in version_ids:
+ if version_id == NULL_REVISION:
+ parents = ()
+ else:
+ try:
+ parents = tuple(
+ map(self._idx_to_name,
+ self._parents[self._lookup(version_id)]))
+ except RevisionNotPresent:
+ continue
+ result[version_id] = parents
+ return result
+
+ def get_parents_with_ghosts(self, version_id):
+ raise NotImplementedError(self.get_parents_with_ghosts)
+
+ def insert_record_stream(self, stream):
+ """Insert a record stream into this versioned file.
+
+ :param stream: A stream of records to insert.
+ :return: None
+ :seealso VersionedFile.get_record_stream:
+ """
+ adapters = {}
+ for record in stream:
+ # Raise an error when a record is missing.
+ if record.storage_kind == 'absent':
+ raise RevisionNotPresent([record.key[0]], self)
+ # adapt to non-tuple interface
+ parents = [parent[0] for parent in record.parents]
+ if (record.storage_kind == 'fulltext'
+ or record.storage_kind == 'chunked'):
+ self.add_lines(record.key[0], parents,
+ osutils.chunks_to_lines(record.get_bytes_as('chunked')))
+ else:
+ adapter_key = record.storage_kind, 'fulltext'
+ try:
+ adapter = adapters[adapter_key]
+ except KeyError:
+ adapter_factory = adapter_registry.get(adapter_key)
+ adapter = adapter_factory(self)
+ adapters[adapter_key] = adapter
+ lines = split_lines(adapter.get_bytes(record))
+ try:
+ self.add_lines(record.key[0], parents, lines)
+ except RevisionAlreadyPresent:
+ pass
+
+ def _check_repeated_add(self, name, parents, text, sha1):
+ """Check that a duplicated add is OK.
+
+ If it is, return the (old) index; otherwise raise an exception.
+ """
+ idx = self._lookup(name)
+ if sorted(self._parents[idx]) != sorted(parents) \
+ or sha1 != self._sha1s[idx]:
+ raise RevisionAlreadyPresent(name, self._weave_name)
+ return idx
+
+ def _add_lines(self, version_id, parents, lines, parent_texts,
+ left_matching_blocks, nostore_sha, random_id, check_content):
+ """See VersionedFile.add_lines."""
+ idx = self._add(version_id, lines, map(self._lookup, parents),
+ nostore_sha=nostore_sha)
+ return sha_strings(lines), sum(map(len, lines)), idx
+
+ def _add(self, version_id, lines, parents, sha1=None, nostore_sha=None):
+ """Add a single text on top of the weave.
+
+ Returns the index number of the newly added version.
+
+ version_id
+ Symbolic name for this version.
+ (Typically the revision-id of the revision that added it.)
+ If None, a name will be allocated based on the hash. (sha1:SHAHASH)
+
+ parents
+ List or set of direct parent version numbers.
+
+ lines
+ Sequence of lines to be added in the new version.
+
+ :param nostore_sha: See VersionedFile.add_lines.
+ """
+ self._check_lines_not_unicode(lines)
+ self._check_lines_are_lines(lines)
+ if not sha1:
+ sha1 = sha_strings(lines)
+ if sha1 == nostore_sha:
+ raise errors.ExistingContent
+ if version_id is None:
+ version_id = "sha1:" + sha1
+ if version_id in self._name_map:
+ return self._check_repeated_add(version_id, parents, lines, sha1)
+
+ self._check_versions(parents)
+ ## self._check_lines(lines)
+ new_version = len(self._parents)
+
+ # if we abort after here the (in-memory) weave will be corrupt because only
+ # some fields are updated
+ # XXX: FIXME implement a succeed-or-fail of the rest of this routine.
+ # - Robert Collins 20060226
+ self._parents.append(parents[:])
+ self._sha1s.append(sha1)
+ self._names.append(version_id)
+ self._name_map[version_id] = new_version
+
+
+ if not parents:
+ # special case; adding with no parents revision; can do
+ # this more quickly by just appending unconditionally.
+ # even more specially, if we're adding an empty text we
+ # need do nothing at all.
+ if lines:
+ self._weave.append(('{', new_version))
+ self._weave.extend(lines)
+ self._weave.append(('}', None))
+ return new_version
+
+ if len(parents) == 1:
+ pv = list(parents)[0]
+ if sha1 == self._sha1s[pv]:
+ # special case: same as the single parent
+ return new_version
+
+
+ ancestors = self._inclusions(parents)
+
+ l = self._weave
+
+ # basis a list of (origin, lineno, line)
+ basis_lineno = []
+ basis_lines = []
+ for origin, lineno, line in self._extract(ancestors):
+ basis_lineno.append(lineno)
+ basis_lines.append(line)
+
+ # another small special case: a merge, producing the same text
+ # as auto-merge
+ if lines == basis_lines:
+ return new_version
+
+ # add a sentinel, because we can also match against the final line
+ basis_lineno.append(len(self._weave))
+
+ # XXX: which line of the weave should we really consider
+ # matches the end of the file? the current code says it's the
+ # last line of the weave?
+
+ #print 'basis_lines:', basis_lines
+ #print 'new_lines: ', lines
+
+ s = self._matcher(None, basis_lines, lines)
+
+ # offset gives the number of lines that have been inserted
+ # into the weave up to the current point; if the original edit instruction
+ # says to change line A then we actually change (A+offset)
+ offset = 0
+
+ for tag, i1, i2, j1, j2 in s.get_opcodes():
+ # i1,i2 are given in offsets within basis_lines; we need to map them
+ # back to offsets within the entire weave
+ #print 'raw match', tag, i1, i2, j1, j2
+ if tag == 'equal':
+ continue
+ i1 = basis_lineno[i1]
+ i2 = basis_lineno[i2]
+ # the deletion and insertion are handled separately.
+ # first delete the region.
+ if i1 != i2:
+ self._weave.insert(i1+offset, ('[', new_version))
+ self._weave.insert(i2+offset+1, (']', new_version))
+ offset += 2
+
+ if j1 != j2:
+ # there may have been a deletion spanning up to
+ # i2; we want to insert after this region to make sure
+ # we don't destroy ourselves
+ i = i2 + offset
+ self._weave[i:i] = ([('{', new_version)]
+ + lines[j1:j2]
+ + [('}', None)])
+ offset += 2 + (j2 - j1)
+ return new_version
+
+ def _inclusions(self, versions):
+ """Return set of all ancestors of given version(s)."""
+ if not len(versions):
+ return []
+ i = set(versions)
+ for v in xrange(max(versions), 0, -1):
+ if v in i:
+ # include all its parents
+ i.update(self._parents[v])
+ return i
+ ## except IndexError:
+ ## raise ValueError("version %d not present in weave" % v)
+
+ def get_ancestry(self, version_ids, topo_sorted=True):
+ """See VersionedFile.get_ancestry."""
+ if isinstance(version_ids, basestring):
+ version_ids = [version_ids]
+ i = self._inclusions([self._lookup(v) for v in version_ids])
+ return [self._idx_to_name(v) for v in i]
+
+ def _check_lines(self, text):
+ if not isinstance(text, list):
+ raise ValueError("text should be a list, not %s" % type(text))
+
+ for l in text:
+ if not isinstance(l, basestring):
+ raise ValueError("text line should be a string or unicode, not %s"
+ % type(l))
+
+
+
+ def _check_versions(self, indexes):
+ """Check everything in the sequence of indexes is valid"""
+ for i in indexes:
+ try:
+ self._parents[i]
+ except IndexError:
+ raise IndexError("invalid version number %r" % i)
+
+ def _compatible_parents(self, my_parents, other_parents):
+ """During join check that other_parents are joinable with my_parents.
+
+ Joinable is defined as 'is a subset of' - supersets may require
+ regeneration of diffs, but subsets do not.
+ """
+ return len(other_parents.difference(my_parents)) == 0
+
+ def annotate(self, version_id):
+ """Return a list of (version-id, line) tuples for version_id.
+
+ The index indicates when the line originated in the weave."""
+ incls = [self._lookup(version_id)]
+ return [(self._idx_to_name(origin), text) for origin, lineno, text in
+ self._extract(incls)]
+
+ def iter_lines_added_or_present_in_versions(self, version_ids=None,
+ pb=None):
+ """See VersionedFile.iter_lines_added_or_present_in_versions()."""
+ if version_ids is None:
+ version_ids = self.versions()
+ version_ids = set(version_ids)
+ for lineno, inserted, deletes, line in self._walk_internal(version_ids):
+ if inserted not in version_ids: continue
+ if line[-1] != '\n':
+ yield line + '\n', inserted
+ else:
+ yield line, inserted
+
+ def _walk_internal(self, version_ids=None):
+ """Helper method for weave actions."""
+
+ istack = []
+ dset = set()
+
+ lineno = 0 # line of weave, 0-based
+
+ for l in self._weave:
+ if l.__class__ == tuple:
+ c, v = l
+ isactive = None
+ if c == '{':
+ istack.append(self._names[v])
+ elif c == '}':
+ istack.pop()
+ elif c == '[':
+ dset.add(self._names[v])
+ elif c == ']':
+ dset.remove(self._names[v])
+ else:
+ raise WeaveFormatError('unexpected instruction %r' % v)
+ else:
+ yield lineno, istack[-1], frozenset(dset), l
+ lineno += 1
+
+ if istack:
+ raise WeaveFormatError("unclosed insertion blocks "
+ "at end of weave: %s" % istack)
+ if dset:
+ raise WeaveFormatError("unclosed deletion blocks at end of weave: %s"
+ % dset)
+
+ def plan_merge(self, ver_a, ver_b):
+ """Return pseudo-annotation indicating how the two versions merge.
+
+ This is computed between versions a and b and their common
+ base.
+
+ Weave lines present in none of them are skipped entirely.
+ """
+ inc_a = set(self.get_ancestry([ver_a]))
+ inc_b = set(self.get_ancestry([ver_b]))
+ inc_c = inc_a & inc_b
+
+ for lineno, insert, deleteset, line in self._walk_internal([ver_a, ver_b]):
+ if deleteset & inc_c:
+ # killed in parent; can't be in either a or b
+ # not relevant to our work
+ yield 'killed-base', line
+ elif insert in inc_c:
+ # was inserted in base
+ killed_a = bool(deleteset & inc_a)
+ killed_b = bool(deleteset & inc_b)
+ if killed_a and killed_b:
+ yield 'killed-both', line
+ elif killed_a:
+ yield 'killed-a', line
+ elif killed_b:
+ yield 'killed-b', line
+ else:
+ yield 'unchanged', line
+ elif insert in inc_a:
+ if deleteset & inc_a:
+ yield 'ghost-a', line
+ else:
+ # new in A; not in B
+ yield 'new-a', line
+ elif insert in inc_b:
+ if deleteset & inc_b:
+ yield 'ghost-b', line
+ else:
+ yield 'new-b', line
+ else:
+ # not in either revision
+ yield 'irrelevant', line
+
+ def _extract(self, versions):
+ """Yield annotation of lines in included set.
+
+ Yields a sequence of tuples (origin, lineno, text), where
+ origin is the origin version, lineno the index in the weave,
+ and text the text of the line.
+
+ The set typically but not necessarily corresponds to a version.
+ """
+ for i in versions:
+ if not isinstance(i, int):
+ raise ValueError(i)
+
+ included = self._inclusions(versions)
+
+ istack = []
+ iset = set()
+ dset = set()
+
+ lineno = 0 # line of weave, 0-based
+
+ isactive = None
+
+ result = []
+
+ WFE = WeaveFormatError
+
+ # wow.
+ # 449 0 4474.6820 2356.5590 bzrlib.weave:556(_extract)
+ # +285282 0 1676.8040 1676.8040 +<isinstance>
+ # 1.6 seconds in 'isinstance'.
+ # changing the first isinstance:
+ # 449 0 2814.2660 1577.1760 bzrlib.weave:556(_extract)
+ # +140414 0 762.8050 762.8050 +<isinstance>
+ # note that the inline time actually dropped (less function calls)
+ # and total processing time was halved.
+ # we're still spending ~1/4 of the method in isinstance though.
+ # so lets hard code the acceptable string classes we expect:
+ # 449 0 1202.9420 786.2930 bzrlib.weave:556(_extract)
+ # +71352 0 377.5560 377.5560 +<method 'append' of 'list'
+ # objects>
+ # yay, down to ~1/4 the initial extract time, and our inline time
+ # has shrunk again, with isinstance no longer dominating.
+ # tweaking the stack inclusion test to use a set gives:
+ # 449 0 1122.8030 713.0080 bzrlib.weave:556(_extract)
+ # +71352 0 354.9980 354.9980 +<method 'append' of 'list'
+ # objects>
+ # - a 5% win, or possibly just noise. However with large istacks that
+ # 'in' test could dominate, so I'm leaving this change in place -
+ # when its fast enough to consider profiling big datasets we can review.
+
+
+
+
+ for l in self._weave:
+ if l.__class__ == tuple:
+ c, v = l
+ isactive = None
+ if c == '{':
+ istack.append(v)
+ iset.add(v)
+ elif c == '}':
+ iset.remove(istack.pop())
+ elif c == '[':
+ if v in included:
+ dset.add(v)
+ elif c == ']':
+ if v in included:
+ dset.remove(v)
+ else:
+ raise AssertionError()
+ else:
+ if isactive is None:
+ isactive = (not dset) and istack and (istack[-1] in included)
+ if isactive:
+ result.append((istack[-1], lineno, l))
+ lineno += 1
+ if istack:
+ raise WeaveFormatError("unclosed insertion blocks "
+ "at end of weave: %s" % istack)
+ if dset:
+ raise WeaveFormatError("unclosed deletion blocks at end of weave: %s"
+ % dset)
+ return result
+
+ def _maybe_lookup(self, name_or_index):
+ """Convert possible symbolic name to index, or pass through indexes.
+
+ NOT FOR PUBLIC USE.
+ """
+ if isinstance(name_or_index, (int, long)):
+ return name_or_index
+ else:
+ return self._lookup(name_or_index)
+
+ def get_lines(self, version_id):
+ """See VersionedFile.get_lines()."""
+ int_index = self._maybe_lookup(version_id)
+ result = [line for (origin, lineno, line) in self._extract([int_index])]
+ expected_sha1 = self._sha1s[int_index]
+ measured_sha1 = sha_strings(result)
+ if measured_sha1 != expected_sha1:
+ raise errors.WeaveInvalidChecksum(
+ 'file %s, revision %s, expected: %s, measured %s'
+ % (self._weave_name, version_id,
+ expected_sha1, measured_sha1))
+ return result
+
+ def get_sha1s(self, version_ids):
+ """See VersionedFile.get_sha1s()."""
+ result = {}
+ for v in version_ids:
+ result[v] = self._sha1s[self._lookup(v)]
+ return result
+
+ def num_versions(self):
+ """How many versions are in this weave?"""
+ l = len(self._parents)
+ return l
+
+ __len__ = num_versions
+
+ def check(self, progress_bar=None):
+ # TODO evaluate performance hit of using string sets in this routine.
+ # TODO: check no circular inclusions
+ # TODO: create a nested progress bar
+ for version in range(self.num_versions()):
+ inclusions = list(self._parents[version])
+ if inclusions:
+ inclusions.sort()
+ if inclusions[-1] >= version:
+ raise WeaveFormatError("invalid included version %d for index %d"
+ % (inclusions[-1], version))
+
+ # try extracting all versions; parallel extraction is used
+ nv = self.num_versions()
+ sha1s = {}
+ texts = {}
+ inclusions = {}
+ for i in range(nv):
+ # For creating the ancestry, IntSet is much faster (3.7s vs 0.17s)
+ # The problem is that set membership is much more expensive
+ name = self._idx_to_name(i)
+ sha1s[name] = sha()
+ texts[name] = []
+ new_inc = set([name])
+ for p in self._parents[i]:
+ new_inc.update(inclusions[self._idx_to_name(p)])
+
+ if set(new_inc) != set(self.get_ancestry(name)):
+ raise AssertionError(
+ 'failed %s != %s'
+ % (set(new_inc), set(self.get_ancestry(name))))
+ inclusions[name] = new_inc
+
+ nlines = len(self._weave)
+
+ update_text = 'checking weave'
+ if self._weave_name:
+ short_name = os.path.basename(self._weave_name)
+ update_text = 'checking %s' % (short_name,)
+ update_text = update_text[:25]
+
+ for lineno, insert, deleteset, line in self._walk_internal():
+ if progress_bar:
+ progress_bar.update(update_text, lineno, nlines)
+
+ for name, name_inclusions in inclusions.items():
+ # The active inclusion must be an ancestor,
+ # and no ancestors must have deleted this line,
+ # because we don't support resurrection.
+ if (insert in name_inclusions) and not (deleteset & name_inclusions):
+ sha1s[name].update(line)
+
+ for i in range(nv):
+ version = self._idx_to_name(i)
+ hd = sha1s[version].hexdigest()
+ expected = self._sha1s[i]
+ if hd != expected:
+ raise errors.WeaveInvalidChecksum(
+ "mismatched sha1 for version %s: "
+ "got %s, expected %s"
+ % (version, hd, expected))
+
+ # TODO: check insertions are properly nested, that there are
+ # no lines outside of insertion blocks, that deletions are
+ # properly paired, etc.
+
+ def _imported_parents(self, other, other_idx):
+ """Return list of parents in self corresponding to indexes in other."""
+ new_parents = []
+ for parent_idx in other._parents[other_idx]:
+ parent_name = other._names[parent_idx]
+ if parent_name not in self._name_map:
+ # should not be possible
+ raise WeaveError("missing parent {%s} of {%s} in %r"
+ % (parent_name, other._name_map[other_idx], self))
+ new_parents.append(self._name_map[parent_name])
+ return new_parents
+
+ def _check_version_consistent(self, other, other_idx, name):
+ """Check if a version in consistent in this and other.
+
+ To be consistent it must have:
+
+ * the same text
+ * the same direct parents (by name, not index, and disregarding
+ order)
+
+ If present & correct return True;
+ if not present in self return False;
+ if inconsistent raise error."""
+ this_idx = self._name_map.get(name, -1)
+ if this_idx != -1:
+ if self._sha1s[this_idx] != other._sha1s[other_idx]:
+ raise errors.WeaveTextDiffers(name, self, other)
+ self_parents = self._parents[this_idx]
+ other_parents = other._parents[other_idx]
+ n1 = set([self._names[i] for i in self_parents])
+ n2 = set([other._names[i] for i in other_parents])
+ if not self._compatible_parents(n1, n2):
+ raise WeaveParentMismatch("inconsistent parents "
+ "for version {%s}: %s vs %s" % (name, n1, n2))
+ else:
+ return True # ok!
+ else:
+ return False
+
+ def _reweave(self, other, pb, msg):
+ """Reweave self with other - internal helper for join().
+
+ :param other: The other weave to merge
+ :param pb: An optional progress bar, indicating how far done we are
+ :param msg: An optional message for the progress
+ """
+ new_weave = _reweave(self, other, pb=pb, msg=msg)
+ self._copy_weave_content(new_weave)
+
+ def _copy_weave_content(self, otherweave):
+ """adsorb the content from otherweave."""
+ for attr in self.__slots__:
+ if attr != '_weave_name':
+ setattr(self, attr, copy(getattr(otherweave, attr)))
+
+
+class WeaveFile(Weave):
+ """A WeaveFile represents a Weave on disk and writes on change."""
+
+ WEAVE_SUFFIX = '.weave'
+
+ def __init__(self, name, transport, filemode=None, create=False, access_mode='w', get_scope=None):
+ """Create a WeaveFile.
+
+ :param create: If not True, only open an existing knit.
+ """
+ super(WeaveFile, self).__init__(name, access_mode, get_scope=get_scope,
+ allow_reserved=False)
+ self._transport = transport
+ self._filemode = filemode
+ try:
+ f = self._transport.get(name + WeaveFile.WEAVE_SUFFIX)
+ _read_weave_v5(StringIO(f.read()), self)
+ except errors.NoSuchFile:
+ if not create:
+ raise
+ # new file, save it
+ self._save()
+
+ def _add_lines(self, version_id, parents, lines, parent_texts,
+ left_matching_blocks, nostore_sha, random_id, check_content):
+ """Add a version and save the weave."""
+ self.check_not_reserved_id(version_id)
+ result = super(WeaveFile, self)._add_lines(version_id, parents, lines,
+ parent_texts, left_matching_blocks, nostore_sha, random_id,
+ check_content)
+ self._save()
+ return result
+
+ def copy_to(self, name, transport):
+ """See VersionedFile.copy_to()."""
+ # as we are all in memory always, just serialise to the new place.
+ sio = StringIO()
+ write_weave_v5(self, sio)
+ sio.seek(0)
+ transport.put_file(name + WeaveFile.WEAVE_SUFFIX, sio, self._filemode)
+
+ def _save(self):
+ """Save the weave."""
+ self._check_write_ok()
+ sio = StringIO()
+ write_weave_v5(self, sio)
+ sio.seek(0)
+ bytes = sio.getvalue()
+ path = self._weave_name + WeaveFile.WEAVE_SUFFIX
+ try:
+ self._transport.put_bytes(path, bytes, self._filemode)
+ except errors.NoSuchFile:
+ self._transport.mkdir(dirname(path))
+ self._transport.put_bytes(path, bytes, self._filemode)
+
+ @staticmethod
+ def get_suffixes():
+ """See VersionedFile.get_suffixes()."""
+ return [WeaveFile.WEAVE_SUFFIX]
+
+ def insert_record_stream(self, stream):
+ super(WeaveFile, self).insert_record_stream(stream)
+ self._save()
+
+
+def _reweave(wa, wb, pb=None, msg=None):
+ """Combine two weaves and return the result.
+
+ This works even if a revision R has different parents in
+ wa and wb. In the resulting weave all the parents are given.
+
+ This is done by just building up a new weave, maintaining ordering
+ of the versions in the two inputs. More efficient approaches
+ might be possible but it should only be necessary to do
+ this operation rarely, when a new previously ghost version is
+ inserted.
+
+ :param pb: An optional progress bar, indicating how far done we are
+ :param msg: An optional message for the progress
+ """
+ wr = Weave()
+ ia = ib = 0
+ queue_a = range(wa.num_versions())
+ queue_b = range(wb.num_versions())
+ # first determine combined parents of all versions
+ # map from version name -> all parent names
+ combined_parents = _reweave_parent_graphs(wa, wb)
+ mutter("combined parents: %r", combined_parents)
+ order = tsort.topo_sort(combined_parents.iteritems())
+ mutter("order to reweave: %r", order)
+
+ if pb and not msg:
+ msg = 'reweave'
+
+ for idx, name in enumerate(order):
+ if pb:
+ pb.update(msg, idx, len(order))
+ if name in wa._name_map:
+ lines = wa.get_lines(name)
+ if name in wb._name_map:
+ lines_b = wb.get_lines(name)
+ if lines != lines_b:
+ mutter('Weaves differ on content. rev_id {%s}', name)
+ mutter('weaves: %s, %s', wa._weave_name, wb._weave_name)
+ import difflib
+ lines = list(difflib.unified_diff(lines, lines_b,
+ wa._weave_name, wb._weave_name))
+ mutter('lines:\n%s', ''.join(lines))
+ raise errors.WeaveTextDiffers(name, wa, wb)
+ else:
+ lines = wb.get_lines(name)
+ wr._add(name, lines, [wr._lookup(i) for i in combined_parents[name]])
+ return wr
+
+
+def _reweave_parent_graphs(wa, wb):
+ """Return combined parent ancestry for two weaves.
+
+ Returned as a list of (version_name, set(parent_names))"""
+ combined = {}
+ for weave in [wa, wb]:
+ for idx, name in enumerate(weave._names):
+ p = combined.setdefault(name, set())
+ p.update(map(weave._idx_to_name, weave._parents[idx]))
+ return combined
diff --git a/bzrlib/weavefile.py b/bzrlib/weavefile.py
new file mode 100644
index 0000000..4fd59fb
--- /dev/null
+++ b/bzrlib/weavefile.py
@@ -0,0 +1,167 @@
+# Copyright (C) 2005-2010 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+#
+# Author: Martin Pool <mbp@canonical.com>
+
+"""Store and retrieve weaves in files.
+
+There is one format marker followed by a blank line, followed by a
+series of version headers, followed by the weave itself.
+
+Each version marker has
+
+ 'i' parent version indexes
+ '1' SHA-1 of text
+ 'n' name
+
+The inclusions do not need to list versions included by a parent.
+
+The weave is bracketed by 'w' and 'W' lines, and includes the '{}[]'
+processing instructions. Lines of text are prefixed by '.' if the
+line contains a newline, or ',' if not.
+"""
+
+from __future__ import absolute_import
+
+# TODO: When extracting a single version it'd be enough to just pass
+# an iterator returning the weave lines... We don't really need to
+# deserialize it into memory.
+
+FORMAT_1 = '# bzr weave file v5\n'
+
+
+def write_weave(weave, f, format=None):
+ if format is None or format == 1:
+ return write_weave_v5(weave, f)
+ else:
+ raise ValueError("unknown weave format %r" % format)
+
+
+def write_weave_v5(weave, f):
+ """Write weave to file f."""
+ f.write(FORMAT_1)
+
+ for version, included in enumerate(weave._parents):
+ if included:
+ # mininc = weave.minimal_parents(version)
+ mininc = included
+ f.write('i ')
+ f.write(' '.join(str(i) for i in mininc))
+ f.write('\n')
+ else:
+ f.write('i\n')
+ f.write('1 ' + weave._sha1s[version] + '\n')
+ f.write('n ' + weave._names[version] + '\n')
+ f.write('\n')
+
+ f.write('w\n')
+
+ for l in weave._weave:
+ if isinstance(l, tuple):
+ if l[0] == '}':
+ f.write('}\n')
+ else:
+ f.write('%s %d\n' % l)
+ else: # text line
+ if not l:
+ f.write(', \n')
+ elif l[-1] == '\n':
+ f.write('. ' + l)
+ else:
+ f.write(', ' + l + '\n')
+
+ f.write('W\n')
+
+
+
+def read_weave(f):
+ # FIXME: detect the weave type and dispatch
+ from bzrlib.weave import Weave
+ w = Weave(getattr(f, 'name', None))
+ _read_weave_v5(f, w)
+ return w
+
+
+def _read_weave_v5(f, w):
+ """Private helper routine to read a weave format 5 file into memory.
+
+ This is only to be used by read_weave and WeaveFile.__init__.
+ """
+ # 200 0 2075.5080 1084.0360 bzrlib.weavefile:104(_read_weave_v5)
+ # +60412 0 366.5900 366.5900 +<method 'readline' of 'file' objects>
+ # +59982 0 320.5280 320.5280 +<method 'startswith' of 'str' objects>
+ # +59363 0 297.8080 297.8080 +<method 'append' of 'list' objects>
+ # replace readline call with iter over all lines ->
+ # safe because we already suck on memory.
+ # 200 0 1492.7170 802.6220 bzrlib.weavefile:104(_read_weave_v5)
+ # +59982 0 329.9100 329.9100 +<method 'startswith' of 'str' objects>
+ # +59363 0 320.2980 320.2980 +<method 'append' of 'list' objects>
+ # replaced startswith with slice lookups:
+ # 200 0 851.7250 501.1120 bzrlib.weavefile:104(_read_weave_v5)
+ # +59363 0 311.8780 311.8780 +<method 'append' of 'list' objects>
+ # +200 0 30.2500 30.2500 +<method 'readlines' of 'file' objects>
+
+ from bzrlib.weave import WeaveFormatError
+
+ try:
+ lines = iter(f.readlines())
+ finally:
+ f.close()
+
+ try:
+ l = lines.next()
+ except StopIteration:
+ raise WeaveFormatError('invalid weave file: no header')
+
+ if l != FORMAT_1:
+ raise WeaveFormatError('invalid weave file header: %r' % l)
+
+ ver = 0
+ # read weave header.
+ while True:
+ l = lines.next()
+ if l[0] == 'i':
+ if len(l) > 2:
+ w._parents.append(map(int, l[2:].split(' ')))
+ else:
+ w._parents.append([])
+ l = lines.next()[:-1]
+ w._sha1s.append(l[2:])
+ l = lines.next()
+ name = l[2:-1]
+ w._names.append(name)
+ w._name_map[name] = ver
+ l = lines.next()
+ ver += 1
+ elif l == 'w\n':
+ break
+ else:
+ raise WeaveFormatError('unexpected line %r' % l)
+
+ # read weave body
+ while True:
+ l = lines.next()
+ if l == 'W\n':
+ break
+ elif '. ' == l[0:2]:
+ w._weave.append(l[2:]) # include newline
+ elif ', ' == l[0:2]:
+ w._weave.append(l[2:-1]) # exclude newline
+ elif l == '}\n':
+ w._weave.append(('}', None))
+ else:
+ w._weave.append((intern(l[0]), int(l[2:])))
+ return w
diff --git a/bzrlib/win32utils.py b/bzrlib/win32utils.py
new file mode 100644
index 0000000..a1c3bb5
--- /dev/null
+++ b/bzrlib/win32utils.py
@@ -0,0 +1,646 @@
+# Copyright (C) 2005-2010 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""Win32-specific helper functions
+
+Only one dependency: ctypes should be installed.
+"""
+
+from __future__ import absolute_import
+
+import glob
+import operator
+import os
+import struct
+import sys
+
+from bzrlib import (
+ cmdline,
+ symbol_versioning,
+ )
+from bzrlib.i18n import gettext
+
+# Windows version
+if sys.platform == 'win32':
+ _major,_minor,_build,_platform,_text = sys.getwindowsversion()
+ # from MSDN:
+ # dwPlatformId
+ # The operating system platform.
+ # This member can be one of the following values.
+ # ========================== ======================================
+ # Value Meaning
+ # -------------------------- --------------------------------------
+ # VER_PLATFORM_WIN32_NT The operating system is Windows Vista,
+ # 2 Windows Server "Longhorn",
+ # Windows Server 2003, Windows XP,
+ # Windows 2000, or Windows NT.
+ #
+ # VER_PLATFORM_WIN32_WINDOWS The operating system is Windows Me,
+ # 1 Windows 98, or Windows 95.
+ # ========================== ======================================
+ if _platform == 2:
+ winver = 'Windows NT'
+ else:
+ # don't care about real Windows name, just to force safe operations
+ winver = 'Windows 98'
+else:
+ winver = None
+
+
+# We can cope without it; use a separate variable to help pyflakes
+try:
+ import ctypes
+ has_ctypes = True
+except ImportError:
+ has_ctypes = False
+else:
+ if winver == 'Windows 98':
+ create_buffer = ctypes.create_string_buffer
+ def extract_buffer(buf):
+ return buf.value.decode("mbcs")
+ suffix = 'A'
+ else:
+ create_buffer = ctypes.create_unicode_buffer
+ extract_buffer = operator.attrgetter("value")
+ suffix = 'W'
+try:
+ import pywintypes
+ has_pywintypes = True
+except ImportError:
+ has_pywintypes = has_win32file = has_win32api = False
+else:
+ try:
+ import win32file
+ has_win32file = True
+ except ImportError:
+ has_win32file = False
+ try:
+ import win32api
+ has_win32api = True
+ except ImportError:
+ has_win32api = False
+
+# pulling in win32com.shell is a bit of overhead, and normally we don't need
+# it as ctypes is preferred and common. lazy_imports and "optional"
+# modules don't work well, so we do our own lazy thing...
+has_win32com_shell = None # Set to True or False once we know for sure...
+
+# Special Win32 API constants
+# Handles of std streams
+WIN32_STDIN_HANDLE = -10
+WIN32_STDOUT_HANDLE = -11
+WIN32_STDERR_HANDLE = -12
+
+# CSIDL constants (from MSDN 2003)
+CSIDL_APPDATA = 0x001A # Application Data folder
+CSIDL_LOCAL_APPDATA = 0x001c# <user name>\Local Settings\Application Data (non roaming)
+CSIDL_PERSONAL = 0x0005 # My Documents folder
+
+# from winapi C headers
+MAX_PATH = 260
+UNLEN = 256
+MAX_COMPUTERNAME_LENGTH = 31
+
+# Registry data type ids
+REG_SZ = 1
+REG_EXPAND_SZ = 2
+
+
+def debug_memory_win32api(message='', short=True):
+ """Use trace.note() to dump the running memory info."""
+ from bzrlib import trace
+ if has_ctypes:
+ class PROCESS_MEMORY_COUNTERS_EX(ctypes.Structure):
+ """Used by GetProcessMemoryInfo"""
+ _fields_ = [('cb', ctypes.c_ulong),
+ ('PageFaultCount', ctypes.c_ulong),
+ ('PeakWorkingSetSize', ctypes.c_size_t),
+ ('WorkingSetSize', ctypes.c_size_t),
+ ('QuotaPeakPagedPoolUsage', ctypes.c_size_t),
+ ('QuotaPagedPoolUsage', ctypes.c_size_t),
+ ('QuotaPeakNonPagedPoolUsage', ctypes.c_size_t),
+ ('QuotaNonPagedPoolUsage', ctypes.c_size_t),
+ ('PagefileUsage', ctypes.c_size_t),
+ ('PeakPagefileUsage', ctypes.c_size_t),
+ ('PrivateUsage', ctypes.c_size_t),
+ ]
+ cur_process = ctypes.windll.kernel32.GetCurrentProcess()
+ mem_struct = PROCESS_MEMORY_COUNTERS_EX()
+ ret = ctypes.windll.psapi.GetProcessMemoryInfo(cur_process,
+ ctypes.byref(mem_struct),
+ ctypes.sizeof(mem_struct))
+ if not ret:
+ trace.note(gettext('Failed to GetProcessMemoryInfo()'))
+ return
+ info = {'PageFaultCount': mem_struct.PageFaultCount,
+ 'PeakWorkingSetSize': mem_struct.PeakWorkingSetSize,
+ 'WorkingSetSize': mem_struct.WorkingSetSize,
+ 'QuotaPeakPagedPoolUsage': mem_struct.QuotaPeakPagedPoolUsage,
+ 'QuotaPagedPoolUsage': mem_struct.QuotaPagedPoolUsage,
+ 'QuotaPeakNonPagedPoolUsage':
+ mem_struct.QuotaPeakNonPagedPoolUsage,
+ 'QuotaNonPagedPoolUsage': mem_struct.QuotaNonPagedPoolUsage,
+ 'PagefileUsage': mem_struct.PagefileUsage,
+ 'PeakPagefileUsage': mem_struct.PeakPagefileUsage,
+ 'PrivateUsage': mem_struct.PrivateUsage,
+ }
+ elif has_win32api:
+ import win32process
+ # win32process does not return PrivateUsage, because it doesn't use
+ # PROCESS_MEMORY_COUNTERS_EX (it uses the one without _EX).
+ proc = win32process.GetCurrentProcess()
+ info = win32process.GetProcessMemoryInfo(proc)
+ else:
+ trace.note(gettext('Cannot debug memory on win32 without ctypes'
+ ' or win32process'))
+ return
+ if short:
+ # using base-2 units (see HACKING.txt).
+ trace.note(gettext('WorkingSize {0:>7}KiB'
+ '\tPeakWorking {1:>7}KiB\t{2}').format(
+ info['WorkingSetSize'] / 1024,
+ info['PeakWorkingSetSize'] / 1024,
+ message))
+ return
+ if message:
+ trace.note('%s', message)
+ trace.note(gettext('WorkingSize %8d KiB'), info['WorkingSetSize'] / 1024)
+ trace.note(gettext('PeakWorking %8d KiB'), info['PeakWorkingSetSize'] / 1024)
+ trace.note(gettext('PagefileUsage %8d KiB'), info.get('PagefileUsage', 0) / 1024)
+ trace.note(gettext('PeakPagefileUsage %8d KiB'),
+ info.get('PeakPagefileUsage', 0) / 1024)
+ trace.note(gettext('PrivateUsage %8d KiB'), info.get('PrivateUsage', 0) / 1024)
+ trace.note(gettext('PageFaultCount %8d'), info.get('PageFaultCount', 0))
+
+
+def get_console_size(defaultx=80, defaulty=25):
+ """Return size of current console.
+
+ This function try to determine actual size of current working
+ console window and return tuple (sizex, sizey) if success,
+ or default size (defaultx, defaulty) otherwise.
+ """
+ if not has_ctypes:
+ # no ctypes is found
+ return (defaultx, defaulty)
+
+ # To avoid problem with redirecting output via pipe
+ # we need to use stderr instead of stdout
+ h = ctypes.windll.kernel32.GetStdHandle(WIN32_STDERR_HANDLE)
+ csbi = ctypes.create_string_buffer(22)
+ res = ctypes.windll.kernel32.GetConsoleScreenBufferInfo(h, csbi)
+
+ if res:
+ (bufx, bufy, curx, cury, wattr,
+ left, top, right, bottom, maxx, maxy) = struct.unpack(
+ "hhhhHhhhhhh", csbi.raw)
+ sizex = right - left + 1
+ sizey = bottom - top + 1
+ return (sizex, sizey)
+ else:
+ return (defaultx, defaulty)
+
+
+def _get_sh_special_folder_path(csidl):
+ """Call SHGetSpecialFolderPathW if available, or return None.
+
+ Result is always unicode (or None).
+ """
+ if has_ctypes:
+ try:
+ SHGetSpecialFolderPath = \
+ ctypes.windll.shell32.SHGetSpecialFolderPathW
+ except AttributeError:
+ pass
+ else:
+ buf = ctypes.create_unicode_buffer(MAX_PATH)
+ if SHGetSpecialFolderPath(None,buf,csidl,0):
+ return buf.value
+
+ global has_win32com_shell
+ if has_win32com_shell is None:
+ try:
+ from win32com.shell import shell
+ has_win32com_shell = True
+ except ImportError:
+ has_win32com_shell = False
+ if has_win32com_shell:
+ # still need to bind the name locally, but this is fast.
+ from win32com.shell import shell
+ try:
+ return shell.SHGetSpecialFolderPath(0, csidl, 0)
+ except shell.error:
+ # possibly E_NOTIMPL meaning we can't load the function pointer,
+ # or E_FAIL meaning the function failed - regardless, just ignore it
+ pass
+ return None
+
+
+def get_appdata_location():
+ """Return Application Data location.
+ Return None if we cannot obtain location.
+
+ Windows defines two 'Application Data' folders per user - a 'roaming'
+ one that moves with the user as they logon to different machines, and
+ a 'local' one that stays local to the machine. This returns the 'roaming'
+ directory, and thus is suitable for storing user-preferences, etc.
+ """
+ appdata = _get_sh_special_folder_path(CSIDL_APPDATA)
+ if appdata:
+ return appdata
+ # Use APPDATA if defined, will return None if not
+ return get_environ_unicode('APPDATA')
+
+
+def get_local_appdata_location():
+ """Return Local Application Data location.
+ Return the same as get_appdata_location() if we cannot obtain location.
+
+ Windows defines two 'Application Data' folders per user - a 'roaming'
+ one that moves with the user as they logon to different machines, and
+ a 'local' one that stays local to the machine. This returns the 'local'
+ directory, and thus is suitable for caches, temp files and other things
+ which don't need to move with the user.
+ """
+ local = _get_sh_special_folder_path(CSIDL_LOCAL_APPDATA)
+ if local:
+ return local
+ # Vista supplies LOCALAPPDATA, but XP and earlier do not.
+ local = get_environ_unicode('LOCALAPPDATA')
+ if local:
+ return local
+ return get_appdata_location()
+
+
+def get_home_location():
+ """Return user's home location.
+ Assume on win32 it's the <My Documents> folder.
+ If location cannot be obtained return system drive root,
+ i.e. C:\
+ """
+ home = _get_sh_special_folder_path(CSIDL_PERSONAL)
+ if home:
+ return home
+ home = get_environ_unicode('HOME')
+ if home is not None:
+ return home
+ homepath = get_environ_unicode('HOMEPATH')
+ if homepath is not None:
+ return os.path.join(get_environ_unicode('HOMEDIR', ''), home)
+ # at least return windows root directory
+ windir = get_environ_unicode('WINDIR')
+ if windir:
+ return os.path.splitdrive(windir)[0] + '/'
+ # otherwise C:\ is good enough for 98% users
+ return unicode('C:/')
+
+
+def get_user_name():
+ """Return user name as login name.
+ If name cannot be obtained return None.
+ """
+ if has_ctypes:
+ try:
+ advapi32 = ctypes.windll.advapi32
+ GetUserName = getattr(advapi32, 'GetUserName'+suffix)
+ except AttributeError:
+ pass
+ else:
+ buf = create_buffer(UNLEN+1)
+ n = ctypes.c_int(UNLEN+1)
+ if GetUserName(buf, ctypes.byref(n)):
+ return extract_buffer(buf)
+ # otherwise try env variables
+ return get_environ_unicode('USERNAME')
+
+
+# 1 == ComputerNameDnsHostname, which returns "The DNS host name of the local
+# computer or the cluster associated with the local computer."
+_WIN32_ComputerNameDnsHostname = 1
+
+def get_host_name():
+ """Return host machine name.
+ If name cannot be obtained return None.
+
+ :return: A unicode string representing the host name.
+ """
+ if has_win32api:
+ try:
+ return win32api.GetComputerNameEx(_WIN32_ComputerNameDnsHostname)
+ except (NotImplementedError, win32api.error):
+ # NotImplemented will happen on win9x...
+ pass
+ if has_ctypes:
+ try:
+ kernel32 = ctypes.windll.kernel32
+ except AttributeError:
+ pass # Missing the module we need
+ else:
+ buf = create_buffer(MAX_COMPUTERNAME_LENGTH+1)
+ n = ctypes.c_int(MAX_COMPUTERNAME_LENGTH+1)
+
+ # Try GetComputerNameEx which gives a proper Unicode hostname
+ GetComputerNameEx = getattr(kernel32, 'GetComputerNameEx'+suffix,
+ None)
+ if (GetComputerNameEx is not None
+ and GetComputerNameEx(_WIN32_ComputerNameDnsHostname,
+ buf, ctypes.byref(n))):
+ return extract_buffer(buf)
+
+ # Try GetComputerName in case GetComputerNameEx wasn't found
+ # It returns the NETBIOS name, which isn't as good, but still ok.
+ # The first GetComputerNameEx might have changed 'n', so reset it
+ n = ctypes.c_int(MAX_COMPUTERNAME_LENGTH+1)
+ GetComputerName = getattr(kernel32, 'GetComputerName'+suffix,
+ None)
+ if (GetComputerName is not None
+ and GetComputerName(buf, ctypes.byref(n))):
+ return extract_buffer(buf)
+ return get_environ_unicode('COMPUTERNAME')
+
+
+@symbol_versioning.deprecated_function(
+ symbol_versioning.deprecated_in((2, 5, 0)))
+def _ensure_unicode(s):
+ if s and type(s) != unicode:
+ from bzrlib import osutils
+ s = s.decode(osutils.get_user_encoding())
+ return s
+
+
+get_appdata_location_unicode = symbol_versioning.deprecated_function(
+ symbol_versioning.deprecated_in((2, 5, 0)))(get_appdata_location)
+
+get_home_location_unicode = symbol_versioning.deprecated_function(
+ symbol_versioning.deprecated_in((2, 5, 0)))(get_home_location)
+
+get_user_name_unicode = symbol_versioning.deprecated_function(
+ symbol_versioning.deprecated_in((2, 5, 0)))(get_user_name)
+
+get_host_name_unicode = symbol_versioning.deprecated_function(
+ symbol_versioning.deprecated_in((2, 5, 0)))(get_host_name)
+
+
+def _ensure_with_dir(path):
+ if (not os.path.split(path)[0] or path.startswith(u'*')
+ or path.startswith(u'?')):
+ return u'./' + path, True
+ else:
+ return path, False
+
+def _undo_ensure_with_dir(path, corrected):
+ if corrected:
+ return path[2:]
+ else:
+ return path
+
+
+def glob_one(possible_glob):
+ """Same as glob.glob().
+
+ work around bugs in glob.glob()
+ - Python bug #1001604 ("glob doesn't return unicode with ...")
+ - failing expansion for */* with non-iso-8859-* chars
+ """
+ corrected_glob, corrected = _ensure_with_dir(possible_glob)
+ glob_files = glob.glob(corrected_glob)
+
+ if not glob_files:
+ # special case to let the normal code path handle
+ # files that do not exist, etc.
+ glob_files = [possible_glob]
+ elif corrected:
+ glob_files = [_undo_ensure_with_dir(elem, corrected)
+ for elem in glob_files]
+ return [elem.replace(u'\\', u'/') for elem in glob_files]
+
+
+def glob_expand(file_list):
+ """Replacement for glob expansion by the shell.
+
+ Win32's cmd.exe does not do glob expansion (eg ``*.py``), so we do our own
+ here.
+
+ :param file_list: A list of filenames which may include shell globs.
+ :return: An expanded list of filenames.
+
+ Introduced in bzrlib 0.18.
+ """
+ if not file_list:
+ return []
+ expanded_file_list = []
+ for possible_glob in file_list:
+ expanded_file_list.extend(glob_one(possible_glob))
+ return expanded_file_list
+
+
+def get_app_path(appname):
+ r"""Look up in Windows registry for full path to application executable.
+ Typically, applications create subkey with their basename
+ in HKLM\SOFTWARE\Microsoft\Windows\CurrentVersion\App Paths\
+
+ :param appname: name of application (if no filename extension
+ is specified, .exe used)
+ :return: full path to aplication executable from registry,
+ or appname itself if nothing found.
+ """
+ import _winreg
+
+ basename = appname
+ if not os.path.splitext(basename)[1]:
+ basename = appname + '.exe'
+
+ try:
+ hkey = _winreg.OpenKey(_winreg.HKEY_LOCAL_MACHINE,
+ 'SOFTWARE\\Microsoft\\Windows\\CurrentVersion\\App Paths\\' +
+ basename)
+ except EnvironmentError:
+ return appname
+
+ try:
+ try:
+ path, type_id = _winreg.QueryValueEx(hkey, '')
+ except WindowsError:
+ return appname
+ finally:
+ _winreg.CloseKey(hkey)
+
+ if type_id == REG_SZ:
+ return path
+ if type_id == REG_EXPAND_SZ and has_win32api:
+ fullpath = win32api.ExpandEnvironmentStrings(path)
+ if len(fullpath) > 1 and fullpath[0] == '"' and fullpath[-1] == '"':
+ fullpath = fullpath[1:-1] # remove quotes around value
+ return fullpath
+ return appname
+
+
+def set_file_attr_hidden(path):
+ """Set file attributes to hidden if possible"""
+ if has_win32file:
+ if winver != 'Windows 98':
+ SetFileAttributes = win32file.SetFileAttributesW
+ else:
+ SetFileAttributes = win32file.SetFileAttributes
+ try:
+ SetFileAttributes(path, win32file.FILE_ATTRIBUTE_HIDDEN)
+ except pywintypes.error, e:
+ from bzrlib import trace
+ trace.mutter('Unable to set hidden attribute on %r: %s', path, e)
+
+
+def _command_line_to_argv(command_line, argv, single_quotes_allowed=False):
+ """Convert a Unicode command line into a list of argv arguments.
+
+ It performs wildcard expansion to make wildcards act closer to how they
+ work in posix shells, versus how they work by default on Windows. Quoted
+ arguments are left untouched.
+
+ :param command_line: The unicode string to split into an arg list.
+ :param single_quotes_allowed: Whether single quotes are accepted as quoting
+ characters like double quotes. False by
+ default.
+ :return: A list of unicode strings.
+ """
+ # First, spit the command line
+ s = cmdline.Splitter(command_line, single_quotes_allowed=single_quotes_allowed)
+
+ # Bug #587868 Now make sure that the length of s agrees with sys.argv
+ # we do this by simply counting the number of arguments in each. The counts should
+ # agree no matter what encoding sys.argv is in (AFAIK)
+ # len(arguments) < len(sys.argv) should be an impossibility since python gets
+ # args from the very same PEB as does GetCommandLineW
+ arguments = list(s)
+
+ # Now shorten the command line we get from GetCommandLineW to match sys.argv
+ if len(arguments) < len(argv):
+ raise AssertionError("Split command line can't be shorter than argv")
+ arguments = arguments[len(arguments) - len(argv):]
+
+ # Carry on to process globs (metachars) in the command line
+ # expand globs if necessary
+ # TODO: Use 'globbing' instead of 'glob.glob', this gives us stuff like
+ # '**/' style globs
+ args = []
+ for is_quoted, arg in arguments:
+ if is_quoted or not glob.has_magic(arg):
+ args.append(arg)
+ else:
+ args.extend(glob_one(arg))
+ return args
+
+
+if has_ctypes and winver == 'Windows NT':
+ def get_unicode_argv():
+ prototype = ctypes.WINFUNCTYPE(ctypes.c_wchar_p)
+ GetCommandLineW = prototype(("GetCommandLineW",
+ ctypes.windll.kernel32))
+ command_line = GetCommandLineW()
+ if command_line is None:
+ raise ctypes.WinError()
+ # Skip the first argument, since we only care about parameters
+ argv = _command_line_to_argv(command_line, sys.argv)[1:]
+ return argv
+
+
+ def get_environ_unicode(key, default=None):
+ """Get `key` from environment as unicode or `default` if unset
+
+ The environment is natively unicode on modern windows versions but
+ Python 2 only accesses it through the legacy bytestring api.
+
+ Environmental variable names are case insenstive on Windows.
+
+ A large enough buffer will be allocated to retrieve the value, though
+ it may take two calls to the underlying library function.
+
+ This needs ctypes because pywin32 does not expose the wide version.
+ """
+ cfunc = getattr(get_environ_unicode, "_c_function", None)
+ if cfunc is None:
+ from ctypes.wintypes import DWORD, LPCWSTR, LPWSTR
+ cfunc = ctypes.WINFUNCTYPE(DWORD, LPCWSTR, LPWSTR, DWORD)(
+ ("GetEnvironmentVariableW", ctypes.windll.kernel32))
+ get_environ_unicode._c_function = cfunc
+ buffer_size = 256 # heuristic, 256 characters often enough
+ while True:
+ buffer = ctypes.create_unicode_buffer(buffer_size)
+ length = cfunc(key, buffer, buffer_size)
+ if not length:
+ code = ctypes.GetLastError()
+ if code == 203: # ERROR_ENVVAR_NOT_FOUND
+ return default
+ raise ctypes.WinError(code)
+ if buffer_size > length:
+ return buffer[:length]
+ buffer_size = length
+else:
+ get_unicode_argv = None
+ def get_environ_unicode(key, default=None):
+ """Get `key` from environment as unicode or `default` if unset
+
+ Fallback version that should basically never be needed.
+ """
+ from bzrlib import osutils
+ try:
+ return os.environ[key].decode(osutils.get_user_encoding())
+ except KeyError:
+ return default
+
+
+if has_win32api:
+ def _pywin32_is_local_pid_dead(pid):
+ """True if pid doesn't correspond to live process on this machine"""
+ try:
+ handle = win32api.OpenProcess(1, False, pid) # PROCESS_TERMINATE
+ except pywintypes.error, e:
+ if e[0] == 5: # ERROR_ACCESS_DENIED
+ # Probably something alive we're not allowed to kill
+ return False
+ elif e[0] == 87: # ERROR_INVALID_PARAMETER
+ return True
+ raise
+ handle.close()
+ return False
+ is_local_pid_dead = _pywin32_is_local_pid_dead
+elif has_ctypes and sys.platform == 'win32':
+ from ctypes.wintypes import BOOL, DWORD, HANDLE
+ _kernel32 = ctypes.windll.kernel32
+ _CloseHandle = ctypes.WINFUNCTYPE(BOOL, HANDLE)(
+ ("CloseHandle", _kernel32))
+ _OpenProcess = ctypes.WINFUNCTYPE(HANDLE, DWORD, BOOL, DWORD)(
+ ("OpenProcess", _kernel32))
+ def _ctypes_is_local_pid_dead(pid):
+ """True if pid doesn't correspond to live process on this machine"""
+ handle = _OpenProcess(1, False, pid) # PROCESS_TERMINATE
+ if not handle:
+ errorcode = ctypes.GetLastError()
+ if errorcode == 5: # ERROR_ACCESS_DENIED
+ # Probably something alive we're not allowed to kill
+ return False
+ elif errorcode == 87: # ERROR_INVALID_PARAMETER
+ return True
+ raise ctypes.WinError(errorcode)
+ _CloseHandle(handle)
+ return False
+ is_local_pid_dead = _ctypes_is_local_pid_dead
+
+
+def _is_pywintypes_error(evalue):
+ """True if exception instance is an error from pywin32"""
+ if has_pywintypes and isinstance(evalue, pywintypes.error):
+ return True
+ return False
diff --git a/bzrlib/workingtree.py b/bzrlib/workingtree.py
new file mode 100644
index 0000000..971c1ca
--- /dev/null
+++ b/bzrlib/workingtree.py
@@ -0,0 +1,3191 @@
+# Copyright (C) 2005-2011 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""WorkingTree object and friends.
+
+A WorkingTree represents the editable working copy of a branch.
+Operations which represent the WorkingTree are also done here,
+such as renaming or adding files. The WorkingTree has an inventory
+which is updated by these operations. A commit produces a
+new revision based on the workingtree and its inventory.
+
+At the moment every WorkingTree has its own branch. Remote
+WorkingTrees aren't supported.
+
+To get a WorkingTree, call bzrdir.open_workingtree() or
+WorkingTree.open(dir).
+"""
+
+from __future__ import absolute_import
+
+from cStringIO import StringIO
+import os
+import sys
+
+from bzrlib.lazy_import import lazy_import
+lazy_import(globals(), """
+from bisect import bisect_left
+import collections
+import errno
+import itertools
+import operator
+import stat
+import re
+
+from bzrlib import (
+ branch,
+ conflicts as _mod_conflicts,
+ controldir,
+ errors,
+ filters as _mod_filters,
+ generate_ids,
+ globbing,
+ graph as _mod_graph,
+ ignores,
+ inventory,
+ merge,
+ revision as _mod_revision,
+ revisiontree,
+ rio as _mod_rio,
+ transform,
+ transport,
+ ui,
+ views,
+ xml5,
+ xml7,
+ )
+""")
+
+# Explicitly import bzrlib.bzrdir so that the BzrProber
+# is guaranteed to be registered.
+from bzrlib import (
+ bzrdir,
+ symbol_versioning,
+ )
+
+from bzrlib.decorators import needs_read_lock, needs_write_lock
+from bzrlib.i18n import gettext
+from bzrlib.lock import LogicalLockResult
+import bzrlib.mutabletree
+from bzrlib.mutabletree import needs_tree_write_lock
+from bzrlib import osutils
+from bzrlib.osutils import (
+ file_kind,
+ isdir,
+ normpath,
+ pathjoin,
+ realpath,
+ safe_unicode,
+ splitpath,
+ )
+from bzrlib.trace import mutter, note
+from bzrlib.revision import CURRENT_REVISION
+from bzrlib.symbol_versioning import (
+ deprecated_passed,
+ DEPRECATED_PARAMETER,
+ )
+
+
+MERGE_MODIFIED_HEADER_1 = "BZR merge-modified list format 1"
+# TODO: Modifying the conflict objects or their type is currently nearly
+# impossible as there is no clear relationship between the working tree format
+# and the conflict list file format.
+CONFLICT_HEADER_1 = "BZR conflict list format 1"
+
+ERROR_PATH_NOT_FOUND = 3 # WindowsError errno code, equivalent to ENOENT
+
+
+class TreeEntry(object):
+ """An entry that implements the minimum interface used by commands.
+
+ This needs further inspection, it may be better to have
+ InventoryEntries without ids - though that seems wrong. For now,
+ this is a parallel hierarchy to InventoryEntry, and needs to become
+ one of several things: decorates to that hierarchy, children of, or
+ parents of it.
+ Another note is that these objects are currently only used when there is
+ no InventoryEntry available - i.e. for unversioned objects.
+ Perhaps they should be UnversionedEntry et al. ? - RBC 20051003
+ """
+
+ def __eq__(self, other):
+ # yes, this us ugly, TODO: best practice __eq__ style.
+ return (isinstance(other, TreeEntry)
+ and other.__class__ == self.__class__)
+
+ def kind_character(self):
+ return "???"
+
+
+class TreeDirectory(TreeEntry):
+ """See TreeEntry. This is a directory in a working tree."""
+
+ def __eq__(self, other):
+ return (isinstance(other, TreeDirectory)
+ and other.__class__ == self.__class__)
+
+ def kind_character(self):
+ return "/"
+
+
+class TreeFile(TreeEntry):
+ """See TreeEntry. This is a regular file in a working tree."""
+
+ def __eq__(self, other):
+ return (isinstance(other, TreeFile)
+ and other.__class__ == self.__class__)
+
+ def kind_character(self):
+ return ''
+
+
+class TreeLink(TreeEntry):
+ """See TreeEntry. This is a symlink in a working tree."""
+
+ def __eq__(self, other):
+ return (isinstance(other, TreeLink)
+ and other.__class__ == self.__class__)
+
+ def kind_character(self):
+ return ''
+
+
+class WorkingTree(bzrlib.mutabletree.MutableTree,
+ controldir.ControlComponent):
+ """Working copy tree.
+
+ :ivar basedir: The root of the tree on disk. This is a unicode path object
+ (as opposed to a URL).
+ """
+
+ # override this to set the strategy for storing views
+ def _make_views(self):
+ return views.DisabledViews(self)
+
+ def __init__(self, basedir='.',
+ branch=DEPRECATED_PARAMETER,
+ _internal=False,
+ _transport=None,
+ _format=None,
+ _bzrdir=None):
+ """Construct a WorkingTree instance. This is not a public API.
+
+ :param branch: A branch to override probing for the branch.
+ """
+ self._format = _format
+ self.bzrdir = _bzrdir
+ if not _internal:
+ raise errors.BzrError("Please use bzrdir.open_workingtree or "
+ "WorkingTree.open() to obtain a WorkingTree.")
+ basedir = safe_unicode(basedir)
+ mutter("opening working tree %r", basedir)
+ if deprecated_passed(branch):
+ self._branch = branch
+ else:
+ self._branch = self.bzrdir.open_branch()
+ self.basedir = realpath(basedir)
+ self._transport = _transport
+ self._rules_searcher = None
+ self.views = self._make_views()
+
+ @property
+ def user_transport(self):
+ return self.bzrdir.user_transport
+
+ @property
+ def control_transport(self):
+ return self._transport
+
+ def is_control_filename(self, filename):
+ """True if filename is the name of a control file in this tree.
+
+ :param filename: A filename within the tree. This is a relative path
+ from the root of this tree.
+
+ This is true IF and ONLY IF the filename is part of the meta data
+ that bzr controls in this tree. I.E. a random .bzr directory placed
+ on disk will not be a control file for this tree.
+ """
+ return self.bzrdir.is_control_filename(filename)
+
+ branch = property(
+ fget=lambda self: self._branch,
+ doc="""The branch this WorkingTree is connected to.
+
+ This cannot be set - it is reflective of the actual disk structure
+ the working tree has been constructed from.
+ """)
+
+ def has_versioned_directories(self):
+ """See `Tree.has_versioned_directories`."""
+ return self._format.supports_versioned_directories
+
+ def _supports_executable(self):
+ if sys.platform == 'win32':
+ return False
+ # FIXME: Ideally this should check the file system
+ return True
+
+ def break_lock(self):
+ """Break a lock if one is present from another instance.
+
+ Uses the ui factory to ask for confirmation if the lock may be from
+ an active process.
+
+ This will probe the repository for its lock as well.
+ """
+ raise NotImplementedError(self.break_lock)
+
+ def requires_rich_root(self):
+ return self._format.requires_rich_root
+
+ def supports_tree_reference(self):
+ return False
+
+ def supports_content_filtering(self):
+ return self._format.supports_content_filtering()
+
+ def supports_views(self):
+ return self.views.supports_views()
+
+ def get_config_stack(self):
+ """Retrieve the config stack for this tree.
+
+ :return: A ``bzrlib.config.Stack``
+ """
+ # For the moment, just provide the branch config stack.
+ return self.branch.get_config_stack()
+
+ @staticmethod
+ def open(path=None, _unsupported=False):
+ """Open an existing working tree at path.
+
+ """
+ if path is None:
+ path = osutils.getcwd()
+ control = controldir.ControlDir.open(path, _unsupported=_unsupported)
+ return control.open_workingtree(unsupported=_unsupported)
+
+ @staticmethod
+ def open_containing(path=None):
+ """Open an existing working tree which has its root about path.
+
+ This probes for a working tree at path and searches upwards from there.
+
+ Basically we keep looking up until we find the control directory or
+ run into /. If there isn't one, raises NotBranchError.
+ TODO: give this a new exception.
+ If there is one, it is returned, along with the unused portion of path.
+
+ :return: The WorkingTree that contains 'path', and the rest of path
+ """
+ if path is None:
+ path = osutils.getcwd()
+ control, relpath = controldir.ControlDir.open_containing(path)
+ return control.open_workingtree(), relpath
+
+ @staticmethod
+ def open_containing_paths(file_list, default_directory=None,
+ canonicalize=True, apply_view=True):
+ """Open the WorkingTree that contains a set of paths.
+
+ Fail if the paths given are not all in a single tree.
+
+ This is used for the many command-line interfaces that take a list of
+ any number of files and that require they all be in the same tree.
+ """
+ if default_directory is None:
+ default_directory = u'.'
+ # recommended replacement for builtins.internal_tree_files
+ if file_list is None or len(file_list) == 0:
+ tree = WorkingTree.open_containing(default_directory)[0]
+ # XXX: doesn't really belong here, and seems to have the strange
+ # side effect of making it return a bunch of files, not the whole
+ # tree -- mbp 20100716
+ if tree.supports_views() and apply_view:
+ view_files = tree.views.lookup_view()
+ if view_files:
+ file_list = view_files
+ view_str = views.view_display_str(view_files)
+ note(gettext("Ignoring files outside view. View is %s") % view_str)
+ return tree, file_list
+ if default_directory == u'.':
+ seed = file_list[0]
+ else:
+ seed = default_directory
+ file_list = [osutils.pathjoin(default_directory, f)
+ for f in file_list]
+ tree = WorkingTree.open_containing(seed)[0]
+ return tree, tree.safe_relpath_files(file_list, canonicalize,
+ apply_view=apply_view)
+
+ def safe_relpath_files(self, file_list, canonicalize=True, apply_view=True):
+ """Convert file_list into a list of relpaths in tree.
+
+ :param self: A tree to operate on.
+ :param file_list: A list of user provided paths or None.
+ :param apply_view: if True and a view is set, apply it or check that
+ specified files are within it
+ :return: A list of relative paths.
+ :raises errors.PathNotChild: When a provided path is in a different self
+ than self.
+ """
+ if file_list is None:
+ return None
+ if self.supports_views() and apply_view:
+ view_files = self.views.lookup_view()
+ else:
+ view_files = []
+ new_list = []
+ # self.relpath exists as a "thunk" to osutils, but canonical_relpath
+ # doesn't - fix that up here before we enter the loop.
+ if canonicalize:
+ fixer = lambda p: osutils.canonical_relpath(self.basedir, p)
+ else:
+ fixer = self.relpath
+ for filename in file_list:
+ relpath = fixer(osutils.dereference_path(filename))
+ if view_files and not osutils.is_inside_any(view_files, relpath):
+ raise errors.FileOutsideView(filename, view_files)
+ new_list.append(relpath)
+ return new_list
+
+ @staticmethod
+ def open_downlevel(path=None):
+ """Open an unsupported working tree.
+
+ Only intended for advanced situations like upgrading part of a bzrdir.
+ """
+ return WorkingTree.open(path, _unsupported=True)
+
+ @staticmethod
+ def find_trees(location):
+ def list_current(transport):
+ return [d for d in transport.list_dir('') if d != '.bzr']
+ def evaluate(bzrdir):
+ try:
+ tree = bzrdir.open_workingtree()
+ except errors.NoWorkingTree:
+ return True, None
+ else:
+ return True, tree
+ t = transport.get_transport(location)
+ iterator = controldir.ControlDir.find_bzrdirs(t, evaluate=evaluate,
+ list_current=list_current)
+ return [tr for tr in iterator if tr is not None]
+
+ def __repr__(self):
+ return "<%s of %s>" % (self.__class__.__name__,
+ getattr(self, 'basedir', None))
+
+ def abspath(self, filename):
+ return pathjoin(self.basedir, filename)
+
+ def basis_tree(self):
+ """Return RevisionTree for the current last revision.
+
+ If the left most parent is a ghost then the returned tree will be an
+ empty tree - one obtained by calling
+ repository.revision_tree(NULL_REVISION).
+ """
+ try:
+ revision_id = self.get_parent_ids()[0]
+ except IndexError:
+ # no parents, return an empty revision tree.
+ # in the future this should return the tree for
+ # 'empty:' - the implicit root empty tree.
+ return self.branch.repository.revision_tree(
+ _mod_revision.NULL_REVISION)
+ try:
+ return self.revision_tree(revision_id)
+ except errors.NoSuchRevision:
+ pass
+ # No cached copy available, retrieve from the repository.
+ # FIXME? RBC 20060403 should we cache the inventory locally
+ # at this point ?
+ try:
+ return self.branch.repository.revision_tree(revision_id)
+ except (errors.RevisionNotPresent, errors.NoSuchRevision):
+ # the basis tree *may* be a ghost or a low level error may have
+ # occurred. If the revision is present, its a problem, if its not
+ # its a ghost.
+ if self.branch.repository.has_revision(revision_id):
+ raise
+ # the basis tree is a ghost so return an empty tree.
+ return self.branch.repository.revision_tree(
+ _mod_revision.NULL_REVISION)
+
+ def _cleanup(self):
+ self._flush_ignore_list_cache()
+
+ def relpath(self, path):
+ """Return the local path portion from a given path.
+
+ The path may be absolute or relative. If its a relative path it is
+ interpreted relative to the python current working directory.
+ """
+ return osutils.relpath(self.basedir, path)
+
+ def has_filename(self, filename):
+ return osutils.lexists(self.abspath(filename))
+
+ def get_file(self, file_id, path=None, filtered=True):
+ return self.get_file_with_stat(file_id, path, filtered=filtered)[0]
+
+ def get_file_with_stat(self, file_id, path=None, filtered=True,
+ _fstat=osutils.fstat):
+ """See Tree.get_file_with_stat."""
+ if path is None:
+ path = self.id2path(file_id)
+ file_obj = self.get_file_byname(path, filtered=False)
+ stat_value = _fstat(file_obj.fileno())
+ if filtered and self.supports_content_filtering():
+ filters = self._content_filter_stack(path)
+ file_obj = _mod_filters.filtered_input_file(file_obj, filters)
+ return (file_obj, stat_value)
+
+ def get_file_text(self, file_id, path=None, filtered=True):
+ my_file = self.get_file(file_id, path=path, filtered=filtered)
+ try:
+ return my_file.read()
+ finally:
+ my_file.close()
+
+ def get_file_byname(self, filename, filtered=True):
+ path = self.abspath(filename)
+ f = file(path, 'rb')
+ if filtered and self.supports_content_filtering():
+ filters = self._content_filter_stack(filename)
+ return _mod_filters.filtered_input_file(f, filters)
+ else:
+ return f
+
+ def get_file_lines(self, file_id, path=None, filtered=True):
+ """See Tree.get_file_lines()"""
+ file = self.get_file(file_id, path, filtered=filtered)
+ try:
+ return file.readlines()
+ finally:
+ file.close()
+
+ def get_parent_ids(self):
+ """See Tree.get_parent_ids.
+
+ This implementation reads the pending merges list and last_revision
+ value and uses that to decide what the parents list should be.
+ """
+ last_rev = _mod_revision.ensure_null(self._last_revision())
+ if _mod_revision.NULL_REVISION == last_rev:
+ parents = []
+ else:
+ parents = [last_rev]
+ try:
+ merges_bytes = self._transport.get_bytes('pending-merges')
+ except errors.NoSuchFile:
+ pass
+ else:
+ for l in osutils.split_lines(merges_bytes):
+ revision_id = l.rstrip('\n')
+ parents.append(revision_id)
+ return parents
+
+ def get_root_id(self):
+ """Return the id of this trees root"""
+ raise NotImplementedError(self.get_root_id)
+
+ @needs_read_lock
+ def clone(self, to_controldir, revision_id=None):
+ """Duplicate this working tree into to_bzr, including all state.
+
+ Specifically modified files are kept as modified, but
+ ignored and unknown files are discarded.
+
+ If you want to make a new line of development, see ControlDir.sprout()
+
+ revision
+ If not None, the cloned tree will have its last revision set to
+ revision, and difference between the source trees last revision
+ and this one merged in.
+ """
+ # assumes the target bzr dir format is compatible.
+ result = to_controldir.create_workingtree()
+ self.copy_content_into(result, revision_id)
+ return result
+
+ @needs_read_lock
+ def copy_content_into(self, tree, revision_id=None):
+ """Copy the current content and user files of this tree into tree."""
+ tree.set_root_id(self.get_root_id())
+ if revision_id is None:
+ merge.transform_tree(tree, self)
+ else:
+ # TODO now merge from tree.last_revision to revision (to preserve
+ # user local changes)
+ merge.transform_tree(tree, self)
+ if revision_id == _mod_revision.NULL_REVISION:
+ new_parents = []
+ else:
+ new_parents = [revision_id]
+ tree.set_parent_ids(new_parents)
+
+ def id2abspath(self, file_id):
+ return self.abspath(self.id2path(file_id))
+
+ def _check_for_tree_references(self, iterator):
+ """See if directories have become tree-references."""
+ blocked_parent_ids = set()
+ for path, ie in iterator:
+ if ie.parent_id in blocked_parent_ids:
+ # This entry was pruned because one of its parents became a
+ # TreeReference. If this is a directory, mark it as blocked.
+ if ie.kind == 'directory':
+ blocked_parent_ids.add(ie.file_id)
+ continue
+ if ie.kind == 'directory' and self._directory_is_tree_reference(path):
+ # This InventoryDirectory needs to be a TreeReference
+ ie = inventory.TreeReference(ie.file_id, ie.name, ie.parent_id)
+ blocked_parent_ids.add(ie.file_id)
+ yield path, ie
+
+ def iter_entries_by_dir(self, specific_file_ids=None, yield_parents=False):
+ """See Tree.iter_entries_by_dir()"""
+ # The only trick here is that if we supports_tree_reference then we
+ # need to detect if a directory becomes a tree-reference.
+ iterator = super(WorkingTree, self).iter_entries_by_dir(
+ specific_file_ids=specific_file_ids,
+ yield_parents=yield_parents)
+ if not self.supports_tree_reference():
+ return iterator
+ else:
+ return self._check_for_tree_references(iterator)
+
+ def get_file_size(self, file_id):
+ """See Tree.get_file_size"""
+ # XXX: this returns the on-disk size; it should probably return the
+ # canonical size
+ try:
+ return os.path.getsize(self.id2abspath(file_id))
+ except OSError, e:
+ if e.errno != errno.ENOENT:
+ raise
+ else:
+ return None
+
+ @needs_tree_write_lock
+ def _gather_kinds(self, files, kinds):
+ """See MutableTree._gather_kinds."""
+ for pos, f in enumerate(files):
+ if kinds[pos] is None:
+ fullpath = normpath(self.abspath(f))
+ try:
+ kinds[pos] = file_kind(fullpath)
+ except OSError, e:
+ if e.errno == errno.ENOENT:
+ raise errors.NoSuchFile(fullpath)
+
+ @needs_write_lock
+ def add_parent_tree_id(self, revision_id, allow_leftmost_as_ghost=False):
+ """Add revision_id as a parent.
+
+ This is equivalent to retrieving the current list of parent ids
+ and setting the list to its value plus revision_id.
+
+ :param revision_id: The revision id to add to the parent list. It may
+ be a ghost revision as long as its not the first parent to be
+ added, or the allow_leftmost_as_ghost parameter is set True.
+ :param allow_leftmost_as_ghost: Allow the first parent to be a ghost.
+ """
+ parents = self.get_parent_ids() + [revision_id]
+ self.set_parent_ids(parents, allow_leftmost_as_ghost=len(parents) > 1
+ or allow_leftmost_as_ghost)
+
+ @needs_tree_write_lock
+ def add_parent_tree(self, parent_tuple, allow_leftmost_as_ghost=False):
+ """Add revision_id, tree tuple as a parent.
+
+ This is equivalent to retrieving the current list of parent trees
+ and setting the list to its value plus parent_tuple. See also
+ add_parent_tree_id - if you only have a parent id available it will be
+ simpler to use that api. If you have the parent already available, using
+ this api is preferred.
+
+ :param parent_tuple: The (revision id, tree) to add to the parent list.
+ If the revision_id is a ghost, pass None for the tree.
+ :param allow_leftmost_as_ghost: Allow the first parent to be a ghost.
+ """
+ parent_ids = self.get_parent_ids() + [parent_tuple[0]]
+ if len(parent_ids) > 1:
+ # the leftmost may have already been a ghost, preserve that if it
+ # was.
+ allow_leftmost_as_ghost = True
+ self.set_parent_ids(parent_ids,
+ allow_leftmost_as_ghost=allow_leftmost_as_ghost)
+
+ @needs_tree_write_lock
+ def add_pending_merge(self, *revision_ids):
+ # TODO: Perhaps should check at this point that the
+ # history of the revision is actually present?
+ parents = self.get_parent_ids()
+ updated = False
+ for rev_id in revision_ids:
+ if rev_id in parents:
+ continue
+ parents.append(rev_id)
+ updated = True
+ if updated:
+ self.set_parent_ids(parents, allow_leftmost_as_ghost=True)
+
+ def path_content_summary(self, path, _lstat=os.lstat,
+ _mapper=osutils.file_kind_from_stat_mode):
+ """See Tree.path_content_summary."""
+ abspath = self.abspath(path)
+ try:
+ stat_result = _lstat(abspath)
+ except OSError, e:
+ if getattr(e, 'errno', None) == errno.ENOENT:
+ # no file.
+ return ('missing', None, None, None)
+ # propagate other errors
+ raise
+ kind = _mapper(stat_result.st_mode)
+ if kind == 'file':
+ return self._file_content_summary(path, stat_result)
+ elif kind == 'directory':
+ # perhaps it looks like a plain directory, but it's really a
+ # reference.
+ if self._directory_is_tree_reference(path):
+ kind = 'tree-reference'
+ return kind, None, None, None
+ elif kind == 'symlink':
+ target = osutils.readlink(abspath)
+ return ('symlink', None, None, target)
+ else:
+ return (kind, None, None, None)
+
+ def _file_content_summary(self, path, stat_result):
+ size = stat_result.st_size
+ executable = self._is_executable_from_path_and_stat(path, stat_result)
+ # try for a stat cache lookup
+ return ('file', size, executable, self._sha_from_stat(
+ path, stat_result))
+
+ def _check_parents_for_ghosts(self, revision_ids, allow_leftmost_as_ghost):
+ """Common ghost checking functionality from set_parent_*.
+
+ This checks that the left hand-parent exists if there are any
+ revisions present.
+ """
+ if len(revision_ids) > 0:
+ leftmost_id = revision_ids[0]
+ if (not allow_leftmost_as_ghost and not
+ self.branch.repository.has_revision(leftmost_id)):
+ raise errors.GhostRevisionUnusableHere(leftmost_id)
+
+ def _set_merges_from_parent_ids(self, parent_ids):
+ merges = parent_ids[1:]
+ self._transport.put_bytes('pending-merges', '\n'.join(merges),
+ mode=self.bzrdir._get_file_mode())
+
+ def _filter_parent_ids_by_ancestry(self, revision_ids):
+ """Check that all merged revisions are proper 'heads'.
+
+ This will always return the first revision_id, and any merged revisions
+ which are
+ """
+ if len(revision_ids) == 0:
+ return revision_ids
+ graph = self.branch.repository.get_graph()
+ heads = graph.heads(revision_ids)
+ new_revision_ids = revision_ids[:1]
+ for revision_id in revision_ids[1:]:
+ if revision_id in heads and revision_id not in new_revision_ids:
+ new_revision_ids.append(revision_id)
+ if new_revision_ids != revision_ids:
+ mutter('requested to set revision_ids = %s,'
+ ' but filtered to %s', revision_ids, new_revision_ids)
+ return new_revision_ids
+
+ @needs_tree_write_lock
+ def set_parent_ids(self, revision_ids, allow_leftmost_as_ghost=False):
+ """Set the parent ids to revision_ids.
+
+ See also set_parent_trees. This api will try to retrieve the tree data
+ for each element of revision_ids from the trees repository. If you have
+ tree data already available, it is more efficient to use
+ set_parent_trees rather than set_parent_ids. set_parent_ids is however
+ an easier API to use.
+
+ :param revision_ids: The revision_ids to set as the parent ids of this
+ working tree. Any of these may be ghosts.
+ """
+ self._check_parents_for_ghosts(revision_ids,
+ allow_leftmost_as_ghost=allow_leftmost_as_ghost)
+ for revision_id in revision_ids:
+ _mod_revision.check_not_reserved_id(revision_id)
+
+ revision_ids = self._filter_parent_ids_by_ancestry(revision_ids)
+
+ if len(revision_ids) > 0:
+ self.set_last_revision(revision_ids[0])
+ else:
+ self.set_last_revision(_mod_revision.NULL_REVISION)
+
+ self._set_merges_from_parent_ids(revision_ids)
+
+ @needs_tree_write_lock
+ def set_pending_merges(self, rev_list):
+ parents = self.get_parent_ids()
+ leftmost = parents[:1]
+ new_parents = leftmost + rev_list
+ self.set_parent_ids(new_parents)
+
+ @needs_tree_write_lock
+ def set_merge_modified(self, modified_hashes):
+ """Set the merge modified hashes."""
+ raise NotImplementedError(self.set_merge_modified)
+
+ def _sha_from_stat(self, path, stat_result):
+ """Get a sha digest from the tree's stat cache.
+
+ The default implementation assumes no stat cache is present.
+
+ :param path: The path.
+ :param stat_result: The stat result being looked up.
+ """
+ return None
+
+ @needs_write_lock # because merge pulls data into the branch.
+ def merge_from_branch(self, branch, to_revision=None, from_revision=None,
+ merge_type=None, force=False):
+ """Merge from a branch into this working tree.
+
+ :param branch: The branch to merge from.
+ :param to_revision: If non-None, the merge will merge to to_revision,
+ but not beyond it. to_revision does not need to be in the history
+ of the branch when it is supplied. If None, to_revision defaults to
+ branch.last_revision().
+ """
+ from bzrlib.merge import Merger, Merge3Merger
+ merger = Merger(self.branch, this_tree=self)
+ # check that there are no local alterations
+ if not force and self.has_changes():
+ raise errors.UncommittedChanges(self)
+ if to_revision is None:
+ to_revision = _mod_revision.ensure_null(branch.last_revision())
+ merger.other_rev_id = to_revision
+ if _mod_revision.is_null(merger.other_rev_id):
+ raise errors.NoCommits(branch)
+ self.branch.fetch(branch, last_revision=merger.other_rev_id)
+ merger.other_basis = merger.other_rev_id
+ merger.other_tree = self.branch.repository.revision_tree(
+ merger.other_rev_id)
+ merger.other_branch = branch
+ if from_revision is None:
+ merger.find_base()
+ else:
+ merger.set_base_revision(from_revision, branch)
+ if merger.base_rev_id == merger.other_rev_id:
+ raise errors.PointlessMerge
+ merger.backup_files = False
+ if merge_type is None:
+ merger.merge_type = Merge3Merger
+ else:
+ merger.merge_type = merge_type
+ merger.set_interesting_files(None)
+ merger.show_base = False
+ merger.reprocess = False
+ conflicts = merger.do_merge()
+ merger.set_pending()
+ return conflicts
+
+ def merge_modified(self):
+ """Return a dictionary of files modified by a merge.
+
+ The list is initialized by WorkingTree.set_merge_modified, which is
+ typically called after we make some automatic updates to the tree
+ because of a merge.
+
+ This returns a map of file_id->sha1, containing only files which are
+ still in the working inventory and have that text hash.
+ """
+ raise NotImplementedError(self.merge_modified)
+
+ @needs_write_lock
+ def mkdir(self, path, file_id=None):
+ """See MutableTree.mkdir()."""
+ if file_id is None:
+ file_id = generate_ids.gen_file_id(os.path.basename(path))
+ os.mkdir(self.abspath(path))
+ self.add(path, file_id, 'directory')
+ return file_id
+
+ def get_symlink_target(self, file_id, path=None):
+ if path is not None:
+ abspath = self.abspath(path)
+ else:
+ abspath = self.id2abspath(file_id)
+ target = osutils.readlink(abspath)
+ return target
+
+ def subsume(self, other_tree):
+ raise NotImplementedError(self.subsume)
+
+ def _setup_directory_is_tree_reference(self):
+ if self._branch.repository._format.supports_tree_reference:
+ self._directory_is_tree_reference = \
+ self._directory_may_be_tree_reference
+ else:
+ self._directory_is_tree_reference = \
+ self._directory_is_never_tree_reference
+
+ def _directory_is_never_tree_reference(self, relpath):
+ return False
+
+ def _directory_may_be_tree_reference(self, relpath):
+ # as a special case, if a directory contains control files then
+ # it's a tree reference, except that the root of the tree is not
+ return relpath and osutils.isdir(self.abspath(relpath) + u"/.bzr")
+ # TODO: We could ask all the control formats whether they
+ # recognize this directory, but at the moment there's no cheap api
+ # to do that. Since we probably can only nest bzr checkouts and
+ # they always use this name it's ok for now. -- mbp 20060306
+ #
+ # FIXME: There is an unhandled case here of a subdirectory
+ # containing .bzr but not a branch; that will probably blow up
+ # when you try to commit it. It might happen if there is a
+ # checkout in a subdirectory. This can be avoided by not adding
+ # it. mbp 20070306
+
+ def extract(self, file_id, format=None):
+ """Extract a subtree from this tree.
+
+ A new branch will be created, relative to the path for this tree.
+ """
+ raise NotImplementedError(self.extract)
+
+ def flush(self):
+ """Write the in memory meta data to disk."""
+ raise NotImplementedError(self.flush)
+
+ def _kind(self, relpath):
+ return osutils.file_kind(self.abspath(relpath))
+
+ def list_files(self, include_root=False, from_dir=None, recursive=True):
+ """List all files as (path, class, kind, id, entry).
+
+ Lists, but does not descend into unversioned directories.
+ This does not include files that have been deleted in this
+ tree. Skips the control directory.
+
+ :param include_root: if True, return an entry for the root
+ :param from_dir: start from this directory or None for the root
+ :param recursive: whether to recurse into subdirectories or not
+ """
+ raise NotImplementedError(self.list_files)
+
+ def move(self, from_paths, to_dir=None, after=False):
+ """Rename files.
+
+ to_dir must be known to the working tree.
+
+ If to_dir exists and is a directory, the files are moved into
+ it, keeping their old names.
+
+ Note that to_dir is only the last component of the new name;
+ this doesn't change the directory.
+
+ For each entry in from_paths the move mode will be determined
+ independently.
+
+ The first mode moves the file in the filesystem and updates the
+ working tree metadata. The second mode only updates the working tree
+ metadata without touching the file on the filesystem.
+
+ move uses the second mode if 'after == True' and the target is not
+ versioned but present in the working tree.
+
+ move uses the second mode if 'after == False' and the source is
+ versioned but no longer in the working tree, and the target is not
+ versioned but present in the working tree.
+
+ move uses the first mode if 'after == False' and the source is
+ versioned and present in the working tree, and the target is not
+ versioned and not present in the working tree.
+
+ Everything else results in an error.
+
+ This returns a list of (from_path, to_path) pairs for each
+ entry that is moved.
+ """
+ raise NotImplementedError(self.move)
+
+ @needs_tree_write_lock
+ def rename_one(self, from_rel, to_rel, after=False):
+ """Rename one file.
+
+ This can change the directory or the filename or both.
+
+ rename_one has several 'modes' to work. First, it can rename a physical
+ file and change the file_id. That is the normal mode. Second, it can
+ only change the file_id without touching any physical file.
+
+ rename_one uses the second mode if 'after == True' and 'to_rel' is
+ either not versioned or newly added, and present in the working tree.
+
+ rename_one uses the second mode if 'after == False' and 'from_rel' is
+ versioned but no longer in the working tree, and 'to_rel' is not
+ versioned but present in the working tree.
+
+ rename_one uses the first mode if 'after == False' and 'from_rel' is
+ versioned and present in the working tree, and 'to_rel' is not
+ versioned and not present in the working tree.
+
+ Everything else results in an error.
+ """
+ raise NotImplementedError(self.rename_one)
+
+ @needs_read_lock
+ def unknowns(self):
+ """Return all unknown files.
+
+ These are files in the working directory that are not versioned or
+ control files or ignored.
+ """
+ # force the extras method to be fully executed before returning, to
+ # prevent race conditions with the lock
+ return iter(
+ [subp for subp in self.extras() if not self.is_ignored(subp)])
+
+ def unversion(self, file_ids):
+ """Remove the file ids in file_ids from the current versioned set.
+
+ When a file_id is unversioned, all of its children are automatically
+ unversioned.
+
+ :param file_ids: The file ids to stop versioning.
+ :raises: NoSuchId if any fileid is not currently versioned.
+ """
+ raise NotImplementedError(self.unversion)
+
+ @needs_write_lock
+ def pull(self, source, overwrite=False, stop_revision=None,
+ change_reporter=None, possible_transports=None, local=False,
+ show_base=False):
+ source.lock_read()
+ try:
+ old_revision_info = self.branch.last_revision_info()
+ basis_tree = self.basis_tree()
+ count = self.branch.pull(source, overwrite, stop_revision,
+ possible_transports=possible_transports,
+ local=local)
+ new_revision_info = self.branch.last_revision_info()
+ if new_revision_info != old_revision_info:
+ repository = self.branch.repository
+ if repository._format.fast_deltas:
+ parent_ids = self.get_parent_ids()
+ if parent_ids:
+ basis_id = parent_ids[0]
+ basis_tree = repository.revision_tree(basis_id)
+ basis_tree.lock_read()
+ try:
+ new_basis_tree = self.branch.basis_tree()
+ merge.merge_inner(
+ self.branch,
+ new_basis_tree,
+ basis_tree,
+ this_tree=self,
+ pb=None,
+ change_reporter=change_reporter,
+ show_base=show_base)
+ basis_root_id = basis_tree.get_root_id()
+ new_root_id = new_basis_tree.get_root_id()
+ if new_root_id is not None and basis_root_id != new_root_id:
+ self.set_root_id(new_root_id)
+ finally:
+ basis_tree.unlock()
+ # TODO - dedup parents list with things merged by pull ?
+ # reuse the revisiontree we merged against to set the new
+ # tree data.
+ parent_trees = []
+ if self.branch.last_revision() != _mod_revision.NULL_REVISION:
+ parent_trees.append(
+ (self.branch.last_revision(), new_basis_tree))
+ # we have to pull the merge trees out again, because
+ # merge_inner has set the ids. - this corner is not yet
+ # layered well enough to prevent double handling.
+ # XXX TODO: Fix the double handling: telling the tree about
+ # the already known parent data is wasteful.
+ merges = self.get_parent_ids()[1:]
+ parent_trees.extend([
+ (parent, repository.revision_tree(parent)) for
+ parent in merges])
+ self.set_parent_trees(parent_trees)
+ return count
+ finally:
+ source.unlock()
+
+ @needs_write_lock
+ def put_file_bytes_non_atomic(self, file_id, bytes):
+ """See MutableTree.put_file_bytes_non_atomic."""
+ stream = file(self.id2abspath(file_id), 'wb')
+ try:
+ stream.write(bytes)
+ finally:
+ stream.close()
+
+ def extras(self):
+ """Yield all unversioned files in this WorkingTree.
+
+ If there are any unversioned directories then only the directory is
+ returned, not all its children. But if there are unversioned files
+ under a versioned subdirectory, they are returned.
+
+ Currently returned depth-first, sorted by name within directories.
+ This is the same order used by 'osutils.walkdirs'.
+ """
+ raise NotImplementedError(self.extras)
+
+ def ignored_files(self):
+ """Yield list of PATH, IGNORE_PATTERN"""
+ for subp in self.extras():
+ pat = self.is_ignored(subp)
+ if pat is not None:
+ yield subp, pat
+
+ def get_ignore_list(self):
+ """Return list of ignore patterns.
+
+ Cached in the Tree object after the first call.
+ """
+ ignoreset = getattr(self, '_ignoreset', None)
+ if ignoreset is not None:
+ return ignoreset
+
+ ignore_globs = set()
+ ignore_globs.update(ignores.get_runtime_ignores())
+ ignore_globs.update(ignores.get_user_ignores())
+ if self.has_filename(bzrlib.IGNORE_FILENAME):
+ f = self.get_file_byname(bzrlib.IGNORE_FILENAME)
+ try:
+ ignore_globs.update(ignores.parse_ignore_file(f))
+ finally:
+ f.close()
+ self._ignoreset = ignore_globs
+ return ignore_globs
+
+ def _flush_ignore_list_cache(self):
+ """Resets the cached ignore list to force a cache rebuild."""
+ self._ignoreset = None
+ self._ignoreglobster = None
+
+ def is_ignored(self, filename):
+ r"""Check whether the filename matches an ignore pattern.
+
+ Patterns containing '/' or '\' need to match the whole path;
+ others match against only the last component. Patterns starting
+ with '!' are ignore exceptions. Exceptions take precedence
+ over regular patterns and cause the filename to not be ignored.
+
+ If the file is ignored, returns the pattern which caused it to
+ be ignored, otherwise None. So this can simply be used as a
+ boolean if desired."""
+ if getattr(self, '_ignoreglobster', None) is None:
+ self._ignoreglobster = globbing.ExceptionGlobster(self.get_ignore_list())
+ return self._ignoreglobster.match(filename)
+
+ def kind(self, file_id):
+ return file_kind(self.id2abspath(file_id))
+
+ def stored_kind(self, file_id):
+ """See Tree.stored_kind"""
+ raise NotImplementedError(self.stored_kind)
+
+ def _comparison_data(self, entry, path):
+ abspath = self.abspath(path)
+ try:
+ stat_value = os.lstat(abspath)
+ except OSError, e:
+ if getattr(e, 'errno', None) == errno.ENOENT:
+ stat_value = None
+ kind = None
+ executable = False
+ else:
+ raise
+ else:
+ mode = stat_value.st_mode
+ kind = osutils.file_kind_from_stat_mode(mode)
+ if not self._supports_executable():
+ executable = entry is not None and entry.executable
+ else:
+ executable = bool(stat.S_ISREG(mode) and stat.S_IEXEC & mode)
+ return kind, executable, stat_value
+
+ def _file_size(self, entry, stat_value):
+ return stat_value.st_size
+
+ def last_revision(self):
+ """Return the last revision of the branch for this tree.
+
+ This format tree does not support a separate marker for last-revision
+ compared to the branch.
+
+ See MutableTree.last_revision
+ """
+ return self._last_revision()
+
+ @needs_read_lock
+ def _last_revision(self):
+ """helper for get_parent_ids."""
+ return _mod_revision.ensure_null(self.branch.last_revision())
+
+ def is_locked(self):
+ """Check if this tree is locked."""
+ raise NotImplementedError(self.is_locked)
+
+ def lock_read(self):
+ """Lock the tree for reading.
+
+ This also locks the branch, and can be unlocked via self.unlock().
+
+ :return: A bzrlib.lock.LogicalLockResult.
+ """
+ raise NotImplementedError(self.lock_read)
+
+ def lock_tree_write(self):
+ """See MutableTree.lock_tree_write, and WorkingTree.unlock.
+
+ :return: A bzrlib.lock.LogicalLockResult.
+ """
+ raise NotImplementedError(self.lock_tree_write)
+
+ def lock_write(self):
+ """See MutableTree.lock_write, and WorkingTree.unlock.
+
+ :return: A bzrlib.lock.LogicalLockResult.
+ """
+ raise NotImplementedError(self.lock_write)
+
+ def get_physical_lock_status(self):
+ raise NotImplementedError(self.get_physical_lock_status)
+
+ def set_last_revision(self, new_revision):
+ """Change the last revision in the working tree."""
+ raise NotImplementedError(self.set_last_revision)
+
+ def _change_last_revision(self, new_revision):
+ """Template method part of set_last_revision to perform the change.
+
+ This is used to allow WorkingTree3 instances to not affect branch
+ when their last revision is set.
+ """
+ if _mod_revision.is_null(new_revision):
+ self.branch.set_last_revision_info(0, new_revision)
+ return False
+ _mod_revision.check_not_reserved_id(new_revision)
+ try:
+ self.branch.generate_revision_history(new_revision)
+ except errors.NoSuchRevision:
+ # not present in the repo - dont try to set it deeper than the tip
+ self.branch._set_revision_history([new_revision])
+ return True
+
+ @needs_tree_write_lock
+ def remove(self, files, verbose=False, to_file=None, keep_files=True,
+ force=False):
+ """Remove nominated files from the working tree metadata.
+
+ :files: File paths relative to the basedir.
+ :keep_files: If true, the files will also be kept.
+ :force: Delete files and directories, even if they are changed and
+ even if the directories are not empty.
+ """
+ if isinstance(files, basestring):
+ files = [files]
+
+ inv_delta = []
+
+ all_files = set() # specified and nested files
+ unknown_nested_files=set()
+ if to_file is None:
+ to_file = sys.stdout
+
+ files_to_backup = []
+
+ def recurse_directory_to_add_files(directory):
+ # Recurse directory and add all files
+ # so we can check if they have changed.
+ for parent_info, file_infos in self.walkdirs(directory):
+ for relpath, basename, kind, lstat, fileid, kind in file_infos:
+ # Is it versioned or ignored?
+ if self.path2id(relpath):
+ # Add nested content for deletion.
+ all_files.add(relpath)
+ else:
+ # Files which are not versioned
+ # should be treated as unknown.
+ files_to_backup.append(relpath)
+
+ for filename in files:
+ # Get file name into canonical form.
+ abspath = self.abspath(filename)
+ filename = self.relpath(abspath)
+ if len(filename) > 0:
+ all_files.add(filename)
+ recurse_directory_to_add_files(filename)
+
+ files = list(all_files)
+
+ if len(files) == 0:
+ return # nothing to do
+
+ # Sort needed to first handle directory content before the directory
+ files.sort(reverse=True)
+
+ # Bail out if we are going to delete files we shouldn't
+ if not keep_files and not force:
+ for (file_id, path, content_change, versioned, parent_id, name,
+ kind, executable) in self.iter_changes(self.basis_tree(),
+ include_unchanged=True, require_versioned=False,
+ want_unversioned=True, specific_files=files):
+ if versioned[0] == False:
+ # The record is unknown or newly added
+ files_to_backup.append(path[1])
+ elif (content_change and (kind[1] is not None) and
+ osutils.is_inside_any(files, path[1])):
+ # Versioned and changed, but not deleted, and still
+ # in one of the dirs to be deleted.
+ files_to_backup.append(path[1])
+
+ def backup(file_to_backup):
+ backup_name = self.bzrdir._available_backup_name(file_to_backup)
+ osutils.rename(abs_path, self.abspath(backup_name))
+ return "removed %s (but kept a copy: %s)" % (file_to_backup,
+ backup_name)
+
+ # Build inv_delta and delete files where applicable,
+ # do this before any modifications to meta data.
+ for f in files:
+ fid = self.path2id(f)
+ message = None
+ if not fid:
+ message = "%s is not versioned." % (f,)
+ else:
+ if verbose:
+ # having removed it, it must be either ignored or unknown
+ if self.is_ignored(f):
+ new_status = 'I'
+ else:
+ new_status = '?'
+ # XXX: Really should be a more abstract reporter interface
+ kind_ch = osutils.kind_marker(self.kind(fid))
+ to_file.write(new_status + ' ' + f + kind_ch + '\n')
+ # Unversion file
+ inv_delta.append((f, None, fid, None))
+ message = "removed %s" % (f,)
+
+ if not keep_files:
+ abs_path = self.abspath(f)
+ if osutils.lexists(abs_path):
+ if (osutils.isdir(abs_path) and
+ len(os.listdir(abs_path)) > 0):
+ if force:
+ osutils.rmtree(abs_path)
+ message = "deleted %s" % (f,)
+ else:
+ message = backup(f)
+ else:
+ if f in files_to_backup:
+ message = backup(f)
+ else:
+ osutils.delete_any(abs_path)
+ message = "deleted %s" % (f,)
+ elif message is not None:
+ # Only care if we haven't done anything yet.
+ message = "%s does not exist." % (f,)
+
+ # Print only one message (if any) per file.
+ if message is not None:
+ note(message)
+ self.apply_inventory_delta(inv_delta)
+
+ @needs_tree_write_lock
+ def revert(self, filenames=None, old_tree=None, backups=True,
+ pb=None, report_changes=False):
+ from bzrlib.conflicts import resolve
+ if old_tree is None:
+ basis_tree = self.basis_tree()
+ basis_tree.lock_read()
+ old_tree = basis_tree
+ else:
+ basis_tree = None
+ try:
+ conflicts = transform.revert(self, old_tree, filenames, backups, pb,
+ report_changes)
+ if filenames is None and len(self.get_parent_ids()) > 1:
+ parent_trees = []
+ last_revision = self.last_revision()
+ if last_revision != _mod_revision.NULL_REVISION:
+ if basis_tree is None:
+ basis_tree = self.basis_tree()
+ basis_tree.lock_read()
+ parent_trees.append((last_revision, basis_tree))
+ self.set_parent_trees(parent_trees)
+ resolve(self)
+ else:
+ resolve(self, filenames, ignore_misses=True, recursive=True)
+ finally:
+ if basis_tree is not None:
+ basis_tree.unlock()
+ return conflicts
+
+ def revision_tree(self, revision_id):
+ """See Tree.revision_tree.
+
+ WorkingTree can supply revision_trees for the basis revision only
+ because there is only one cached inventory in the bzr directory.
+ """
+ raise NotImplementedError(self.revision_tree)
+
+ @needs_tree_write_lock
+ def set_root_id(self, file_id):
+ """Set the root id for this tree."""
+ # for compatability
+ if file_id is None:
+ raise ValueError(
+ 'WorkingTree.set_root_id with fileid=None')
+ file_id = osutils.safe_file_id(file_id)
+ self._set_root_id(file_id)
+
+ def _set_root_id(self, file_id):
+ """Set the root id for this tree, in a format specific manner.
+
+ :param file_id: The file id to assign to the root. It must not be
+ present in the current inventory or an error will occur. It must
+ not be None, but rather a valid file id.
+ """
+ raise NotImplementedError(self._set_root_id)
+
+ def unlock(self):
+ """See Branch.unlock.
+
+ WorkingTree locking just uses the Branch locking facilities.
+ This is current because all working trees have an embedded branch
+ within them. IF in the future, we were to make branch data shareable
+ between multiple working trees, i.e. via shared storage, then we
+ would probably want to lock both the local tree, and the branch.
+ """
+ raise NotImplementedError(self.unlock)
+
+ _marker = object()
+
+ def update(self, change_reporter=None, possible_transports=None,
+ revision=None, old_tip=_marker, show_base=False):
+ """Update a working tree along its branch.
+
+ This will update the branch if its bound too, which means we have
+ multiple trees involved:
+
+ - The new basis tree of the master.
+ - The old basis tree of the branch.
+ - The old basis tree of the working tree.
+ - The current working tree state.
+
+ Pathologically, all three may be different, and non-ancestors of each
+ other. Conceptually we want to:
+
+ - Preserve the wt.basis->wt.state changes
+ - Transform the wt.basis to the new master basis.
+ - Apply a merge of the old branch basis to get any 'local' changes from
+ it into the tree.
+ - Restore the wt.basis->wt.state changes.
+
+ There isn't a single operation at the moment to do that, so we:
+
+ - Merge current state -> basis tree of the master w.r.t. the old tree
+ basis.
+ - Do a 'normal' merge of the old branch basis if it is relevant.
+
+ :param revision: The target revision to update to. Must be in the
+ revision history.
+ :param old_tip: If branch.update() has already been run, the value it
+ returned (old tip of the branch or None). _marker is used
+ otherwise.
+ """
+ if self.branch.get_bound_location() is not None:
+ self.lock_write()
+ update_branch = (old_tip is self._marker)
+ else:
+ self.lock_tree_write()
+ update_branch = False
+ try:
+ if update_branch:
+ old_tip = self.branch.update(possible_transports)
+ else:
+ if old_tip is self._marker:
+ old_tip = None
+ return self._update_tree(old_tip, change_reporter, revision, show_base)
+ finally:
+ self.unlock()
+
+ @needs_tree_write_lock
+ def _update_tree(self, old_tip=None, change_reporter=None, revision=None,
+ show_base=False):
+ """Update a tree to the master branch.
+
+ :param old_tip: if supplied, the previous tip revision the branch,
+ before it was changed to the master branch's tip.
+ """
+ # here if old_tip is not None, it is the old tip of the branch before
+ # it was updated from the master branch. This should become a pending
+ # merge in the working tree to preserve the user existing work. we
+ # cant set that until we update the working trees last revision to be
+ # one from the new branch, because it will just get absorbed by the
+ # parent de-duplication logic.
+ #
+ # We MUST save it even if an error occurs, because otherwise the users
+ # local work is unreferenced and will appear to have been lost.
+ #
+ nb_conflicts = 0
+ try:
+ last_rev = self.get_parent_ids()[0]
+ except IndexError:
+ last_rev = _mod_revision.NULL_REVISION
+ if revision is None:
+ revision = self.branch.last_revision()
+
+ old_tip = old_tip or _mod_revision.NULL_REVISION
+
+ if not _mod_revision.is_null(old_tip) and old_tip != last_rev:
+ # the branch we are bound to was updated
+ # merge those changes in first
+ base_tree = self.basis_tree()
+ other_tree = self.branch.repository.revision_tree(old_tip)
+ nb_conflicts = merge.merge_inner(self.branch, other_tree,
+ base_tree, this_tree=self,
+ change_reporter=change_reporter,
+ show_base=show_base)
+ if nb_conflicts:
+ self.add_parent_tree((old_tip, other_tree))
+ note(gettext('Rerun update after fixing the conflicts.'))
+ return nb_conflicts
+
+ if last_rev != _mod_revision.ensure_null(revision):
+ # the working tree is up to date with the branch
+ # we can merge the specified revision from master
+ to_tree = self.branch.repository.revision_tree(revision)
+ to_root_id = to_tree.get_root_id()
+
+ basis = self.basis_tree()
+ basis.lock_read()
+ try:
+ if (basis.get_root_id() is None or basis.get_root_id() != to_root_id):
+ self.set_root_id(to_root_id)
+ self.flush()
+ finally:
+ basis.unlock()
+
+ # determine the branch point
+ graph = self.branch.repository.get_graph()
+ base_rev_id = graph.find_unique_lca(self.branch.last_revision(),
+ last_rev)
+ base_tree = self.branch.repository.revision_tree(base_rev_id)
+
+ nb_conflicts = merge.merge_inner(self.branch, to_tree, base_tree,
+ this_tree=self,
+ change_reporter=change_reporter,
+ show_base=show_base)
+ self.set_last_revision(revision)
+ # TODO - dedup parents list with things merged by pull ?
+ # reuse the tree we've updated to to set the basis:
+ parent_trees = [(revision, to_tree)]
+ merges = self.get_parent_ids()[1:]
+ # Ideally we ask the tree for the trees here, that way the working
+ # tree can decide whether to give us the entire tree or give us a
+ # lazy initialised tree. dirstate for instance will have the trees
+ # in ram already, whereas a last-revision + basis-inventory tree
+ # will not, but also does not need them when setting parents.
+ for parent in merges:
+ parent_trees.append(
+ (parent, self.branch.repository.revision_tree(parent)))
+ if not _mod_revision.is_null(old_tip):
+ parent_trees.append(
+ (old_tip, self.branch.repository.revision_tree(old_tip)))
+ self.set_parent_trees(parent_trees)
+ last_rev = parent_trees[0][0]
+ return nb_conflicts
+
+ def set_conflicts(self, arg):
+ raise errors.UnsupportedOperation(self.set_conflicts, self)
+
+ def add_conflicts(self, arg):
+ raise errors.UnsupportedOperation(self.add_conflicts, self)
+
+ def conflicts(self):
+ raise NotImplementedError(self.conflicts)
+
+ def walkdirs(self, prefix=""):
+ """Walk the directories of this tree.
+
+ returns a generator which yields items in the form:
+ ((curren_directory_path, fileid),
+ [(file1_path, file1_name, file1_kind, (lstat), file1_id,
+ file1_kind), ... ])
+
+ This API returns a generator, which is only valid during the current
+ tree transaction - within a single lock_read or lock_write duration.
+
+ If the tree is not locked, it may cause an error to be raised,
+ depending on the tree implementation.
+ """
+ disk_top = self.abspath(prefix)
+ if disk_top.endswith('/'):
+ disk_top = disk_top[:-1]
+ top_strip_len = len(disk_top) + 1
+ inventory_iterator = self._walkdirs(prefix)
+ disk_iterator = osutils.walkdirs(disk_top, prefix)
+ try:
+ current_disk = disk_iterator.next()
+ disk_finished = False
+ except OSError, e:
+ if not (e.errno == errno.ENOENT or
+ (sys.platform == 'win32' and e.errno == ERROR_PATH_NOT_FOUND)):
+ raise
+ current_disk = None
+ disk_finished = True
+ try:
+ current_inv = inventory_iterator.next()
+ inv_finished = False
+ except StopIteration:
+ current_inv = None
+ inv_finished = True
+ while not inv_finished or not disk_finished:
+ if current_disk:
+ ((cur_disk_dir_relpath, cur_disk_dir_path_from_top),
+ cur_disk_dir_content) = current_disk
+ else:
+ ((cur_disk_dir_relpath, cur_disk_dir_path_from_top),
+ cur_disk_dir_content) = ((None, None), None)
+ if not disk_finished:
+ # strip out .bzr dirs
+ if (cur_disk_dir_path_from_top[top_strip_len:] == '' and
+ len(cur_disk_dir_content) > 0):
+ # osutils.walkdirs can be made nicer -
+ # yield the path-from-prefix rather than the pathjoined
+ # value.
+ bzrdir_loc = bisect_left(cur_disk_dir_content,
+ ('.bzr', '.bzr'))
+ if (bzrdir_loc < len(cur_disk_dir_content)
+ and self.bzrdir.is_control_filename(
+ cur_disk_dir_content[bzrdir_loc][0])):
+ # we dont yield the contents of, or, .bzr itself.
+ del cur_disk_dir_content[bzrdir_loc]
+ if inv_finished:
+ # everything is unknown
+ direction = 1
+ elif disk_finished:
+ # everything is missing
+ direction = -1
+ else:
+ direction = cmp(current_inv[0][0], cur_disk_dir_relpath)
+ if direction > 0:
+ # disk is before inventory - unknown
+ dirblock = [(relpath, basename, kind, stat, None, None) for
+ relpath, basename, kind, stat, top_path in
+ cur_disk_dir_content]
+ yield (cur_disk_dir_relpath, None), dirblock
+ try:
+ current_disk = disk_iterator.next()
+ except StopIteration:
+ disk_finished = True
+ elif direction < 0:
+ # inventory is before disk - missing.
+ dirblock = [(relpath, basename, 'unknown', None, fileid, kind)
+ for relpath, basename, dkind, stat, fileid, kind in
+ current_inv[1]]
+ yield (current_inv[0][0], current_inv[0][1]), dirblock
+ try:
+ current_inv = inventory_iterator.next()
+ except StopIteration:
+ inv_finished = True
+ else:
+ # versioned present directory
+ # merge the inventory and disk data together
+ dirblock = []
+ for relpath, subiterator in itertools.groupby(sorted(
+ current_inv[1] + cur_disk_dir_content,
+ key=operator.itemgetter(0)), operator.itemgetter(1)):
+ path_elements = list(subiterator)
+ if len(path_elements) == 2:
+ inv_row, disk_row = path_elements
+ # versioned, present file
+ dirblock.append((inv_row[0],
+ inv_row[1], disk_row[2],
+ disk_row[3], inv_row[4],
+ inv_row[5]))
+ elif len(path_elements[0]) == 5:
+ # unknown disk file
+ dirblock.append((path_elements[0][0],
+ path_elements[0][1], path_elements[0][2],
+ path_elements[0][3], None, None))
+ elif len(path_elements[0]) == 6:
+ # versioned, absent file.
+ dirblock.append((path_elements[0][0],
+ path_elements[0][1], 'unknown', None,
+ path_elements[0][4], path_elements[0][5]))
+ else:
+ raise NotImplementedError('unreachable code')
+ yield current_inv[0], dirblock
+ try:
+ current_inv = inventory_iterator.next()
+ except StopIteration:
+ inv_finished = True
+ try:
+ current_disk = disk_iterator.next()
+ except StopIteration:
+ disk_finished = True
+
+ def _walkdirs(self, prefix=""):
+ """Walk the directories of this tree.
+
+ :param prefix: is used as the directrory to start with.
+ :returns: a generator which yields items in the form::
+
+ ((curren_directory_path, fileid),
+ [(file1_path, file1_name, file1_kind, None, file1_id,
+ file1_kind), ... ])
+ """
+ raise NotImplementedError(self._walkdirs)
+
+ @needs_tree_write_lock
+ def auto_resolve(self):
+ """Automatically resolve text conflicts according to contents.
+
+ Only text conflicts are auto_resolvable. Files with no conflict markers
+ are considered 'resolved', because bzr always puts conflict markers
+ into files that have text conflicts. The corresponding .THIS .BASE and
+ .OTHER files are deleted, as per 'resolve'.
+
+ :return: a tuple of ConflictLists: (un_resolved, resolved).
+ """
+ un_resolved = _mod_conflicts.ConflictList()
+ resolved = _mod_conflicts.ConflictList()
+ conflict_re = re.compile('^(<{7}|={7}|>{7})')
+ for conflict in self.conflicts():
+ if (conflict.typestring != 'text conflict' or
+ self.kind(conflict.file_id) != 'file'):
+ un_resolved.append(conflict)
+ continue
+ my_file = open(self.id2abspath(conflict.file_id), 'rb')
+ try:
+ for line in my_file:
+ if conflict_re.search(line):
+ un_resolved.append(conflict)
+ break
+ else:
+ resolved.append(conflict)
+ finally:
+ my_file.close()
+ resolved.remove_files(self)
+ self.set_conflicts(un_resolved)
+ return un_resolved, resolved
+
+ def _validate(self):
+ """Validate internal structures.
+
+ This is meant mostly for the test suite. To give it a chance to detect
+ corruption after actions have occurred. The default implementation is a
+ just a no-op.
+
+ :return: None. An exception should be raised if there is an error.
+ """
+ return
+
+ def check_state(self):
+ """Check that the working state is/isn't valid."""
+ raise NotImplementedError(self.check_state)
+
+ def reset_state(self, revision_ids=None):
+ """Reset the state of the working tree.
+
+ This does a hard-reset to a last-known-good state. This is a way to
+ fix if something got corrupted (like the .bzr/checkout/dirstate file)
+ """
+ raise NotImplementedError(self.reset_state)
+
+ def _get_rules_searcher(self, default_searcher):
+ """See Tree._get_rules_searcher."""
+ if self._rules_searcher is None:
+ self._rules_searcher = super(WorkingTree,
+ self)._get_rules_searcher(default_searcher)
+ return self._rules_searcher
+
+ def get_shelf_manager(self):
+ """Return the ShelfManager for this WorkingTree."""
+ from bzrlib.shelf import ShelfManager
+ return ShelfManager(self, self._transport)
+
+
+class InventoryWorkingTree(WorkingTree,
+ bzrlib.mutabletree.MutableInventoryTree):
+ """Base class for working trees that are inventory-oriented.
+
+ The inventory is held in the `Branch` working-inventory, and the
+ files are in a directory on disk.
+
+ It is possible for a `WorkingTree` to have a filename which is
+ not listed in the Inventory and vice versa.
+ """
+
+ def __init__(self, basedir='.',
+ branch=DEPRECATED_PARAMETER,
+ _inventory=None,
+ _control_files=None,
+ _internal=False,
+ _format=None,
+ _bzrdir=None):
+ """Construct a InventoryWorkingTree instance. This is not a public API.
+
+ :param branch: A branch to override probing for the branch.
+ """
+ super(InventoryWorkingTree, self).__init__(basedir=basedir,
+ branch=branch, _transport=_control_files._transport,
+ _internal=_internal, _format=_format, _bzrdir=_bzrdir)
+
+ self._control_files = _control_files
+ self._detect_case_handling()
+
+ if _inventory is None:
+ # This will be acquired on lock_read() or lock_write()
+ self._inventory_is_modified = False
+ self._inventory = None
+ else:
+ # the caller of __init__ has provided an inventory,
+ # we assume they know what they are doing - as its only
+ # the Format factory and creation methods that are
+ # permitted to do this.
+ self._set_inventory(_inventory, dirty=False)
+
+ def _set_inventory(self, inv, dirty):
+ """Set the internal cached inventory.
+
+ :param inv: The inventory to set.
+ :param dirty: A boolean indicating whether the inventory is the same
+ logical inventory as whats on disk. If True the inventory is not
+ the same and should be written to disk or data will be lost, if
+ False then the inventory is the same as that on disk and any
+ serialisation would be unneeded overhead.
+ """
+ self._inventory = inv
+ self._inventory_is_modified = dirty
+
+ def _detect_case_handling(self):
+ wt_trans = self.bzrdir.get_workingtree_transport(None)
+ try:
+ wt_trans.stat(self._format.case_sensitive_filename)
+ except errors.NoSuchFile:
+ self.case_sensitive = True
+ else:
+ self.case_sensitive = False
+
+ self._setup_directory_is_tree_reference()
+
+ def _serialize(self, inventory, out_file):
+ xml5.serializer_v5.write_inventory(self._inventory, out_file,
+ working=True)
+
+ def _deserialize(selt, in_file):
+ return xml5.serializer_v5.read_inventory(in_file)
+
+ def break_lock(self):
+ """Break a lock if one is present from another instance.
+
+ Uses the ui factory to ask for confirmation if the lock may be from
+ an active process.
+
+ This will probe the repository for its lock as well.
+ """
+ self._control_files.break_lock()
+ self.branch.break_lock()
+
+ def is_locked(self):
+ return self._control_files.is_locked()
+
+ def _must_be_locked(self):
+ if not self.is_locked():
+ raise errors.ObjectNotLocked(self)
+
+ def lock_read(self):
+ """Lock the tree for reading.
+
+ This also locks the branch, and can be unlocked via self.unlock().
+
+ :return: A bzrlib.lock.LogicalLockResult.
+ """
+ if not self.is_locked():
+ self._reset_data()
+ self.branch.lock_read()
+ try:
+ self._control_files.lock_read()
+ return LogicalLockResult(self.unlock)
+ except:
+ self.branch.unlock()
+ raise
+
+ def lock_tree_write(self):
+ """See MutableTree.lock_tree_write, and WorkingTree.unlock.
+
+ :return: A bzrlib.lock.LogicalLockResult.
+ """
+ if not self.is_locked():
+ self._reset_data()
+ self.branch.lock_read()
+ try:
+ self._control_files.lock_write()
+ return LogicalLockResult(self.unlock)
+ except:
+ self.branch.unlock()
+ raise
+
+ def lock_write(self):
+ """See MutableTree.lock_write, and WorkingTree.unlock.
+
+ :return: A bzrlib.lock.LogicalLockResult.
+ """
+ if not self.is_locked():
+ self._reset_data()
+ self.branch.lock_write()
+ try:
+ self._control_files.lock_write()
+ return LogicalLockResult(self.unlock)
+ except:
+ self.branch.unlock()
+ raise
+
+ def get_physical_lock_status(self):
+ return self._control_files.get_physical_lock_status()
+
+ @needs_tree_write_lock
+ def _write_inventory(self, inv):
+ """Write inventory as the current inventory."""
+ self._set_inventory(inv, dirty=True)
+ self.flush()
+
+ # XXX: This method should be deprecated in favour of taking in a proper
+ # new Inventory object.
+ @needs_tree_write_lock
+ def set_inventory(self, new_inventory_list):
+ from bzrlib.inventory import (Inventory,
+ InventoryDirectory,
+ InventoryFile,
+ InventoryLink)
+ inv = Inventory(self.get_root_id())
+ for path, file_id, parent, kind in new_inventory_list:
+ name = os.path.basename(path)
+ if name == "":
+ continue
+ # fixme, there should be a factory function inv,add_??
+ if kind == 'directory':
+ inv.add(InventoryDirectory(file_id, name, parent))
+ elif kind == 'file':
+ inv.add(InventoryFile(file_id, name, parent))
+ elif kind == 'symlink':
+ inv.add(InventoryLink(file_id, name, parent))
+ else:
+ raise errors.BzrError("unknown kind %r" % kind)
+ self._write_inventory(inv)
+
+ def _write_basis_inventory(self, xml):
+ """Write the basis inventory XML to the basis-inventory file"""
+ path = self._basis_inventory_name()
+ sio = StringIO(xml)
+ self._transport.put_file(path, sio,
+ mode=self.bzrdir._get_file_mode())
+
+ def _reset_data(self):
+ """Reset transient data that cannot be revalidated."""
+ self._inventory_is_modified = False
+ f = self._transport.get('inventory')
+ try:
+ result = self._deserialize(f)
+ finally:
+ f.close()
+ self._set_inventory(result, dirty=False)
+
+ def _set_root_id(self, file_id):
+ """Set the root id for this tree, in a format specific manner.
+
+ :param file_id: The file id to assign to the root. It must not be
+ present in the current inventory or an error will occur. It must
+ not be None, but rather a valid file id.
+ """
+ inv = self._inventory
+ orig_root_id = inv.root.file_id
+ # TODO: it might be nice to exit early if there was nothing
+ # to do, saving us from trigger a sync on unlock.
+ self._inventory_is_modified = True
+ # we preserve the root inventory entry object, but
+ # unlinkit from the byid index
+ del inv._byid[inv.root.file_id]
+ inv.root.file_id = file_id
+ # and link it into the index with the new changed id.
+ inv._byid[inv.root.file_id] = inv.root
+ # and finally update all children to reference the new id.
+ # XXX: this should be safe to just look at the root.children
+ # list, not the WHOLE INVENTORY.
+ for fid in inv:
+ entry = inv[fid]
+ if entry.parent_id == orig_root_id:
+ entry.parent_id = inv.root.file_id
+
+ @needs_tree_write_lock
+ def set_parent_trees(self, parents_list, allow_leftmost_as_ghost=False):
+ """See MutableTree.set_parent_trees."""
+ parent_ids = [rev for (rev, tree) in parents_list]
+ for revision_id in parent_ids:
+ _mod_revision.check_not_reserved_id(revision_id)
+
+ self._check_parents_for_ghosts(parent_ids,
+ allow_leftmost_as_ghost=allow_leftmost_as_ghost)
+
+ parent_ids = self._filter_parent_ids_by_ancestry(parent_ids)
+
+ if len(parent_ids) == 0:
+ leftmost_parent_id = _mod_revision.NULL_REVISION
+ leftmost_parent_tree = None
+ else:
+ leftmost_parent_id, leftmost_parent_tree = parents_list[0]
+
+ if self._change_last_revision(leftmost_parent_id):
+ if leftmost_parent_tree is None:
+ # If we don't have a tree, fall back to reading the
+ # parent tree from the repository.
+ self._cache_basis_inventory(leftmost_parent_id)
+ else:
+ inv = leftmost_parent_tree.root_inventory
+ xml = self._create_basis_xml_from_inventory(
+ leftmost_parent_id, inv)
+ self._write_basis_inventory(xml)
+ self._set_merges_from_parent_ids(parent_ids)
+
+ def _cache_basis_inventory(self, new_revision):
+ """Cache new_revision as the basis inventory."""
+ # TODO: this should allow the ready-to-use inventory to be passed in,
+ # as commit already has that ready-to-use [while the format is the
+ # same, that is].
+ try:
+ # this double handles the inventory - unpack and repack -
+ # but is easier to understand. We can/should put a conditional
+ # in here based on whether the inventory is in the latest format
+ # - perhaps we should repack all inventories on a repository
+ # upgrade ?
+ # the fast path is to copy the raw xml from the repository. If the
+ # xml contains 'revision_id="', then we assume the right
+ # revision_id is set. We must check for this full string, because a
+ # root node id can legitimately look like 'revision_id' but cannot
+ # contain a '"'.
+ xml = self.branch.repository._get_inventory_xml(new_revision)
+ firstline = xml.split('\n', 1)[0]
+ if (not 'revision_id="' in firstline or
+ 'format="7"' not in firstline):
+ inv = self.branch.repository._serializer.read_inventory_from_string(
+ xml, new_revision)
+ xml = self._create_basis_xml_from_inventory(new_revision, inv)
+ self._write_basis_inventory(xml)
+ except (errors.NoSuchRevision, errors.RevisionNotPresent):
+ pass
+
+ def _basis_inventory_name(self):
+ return 'basis-inventory-cache'
+
+ def _create_basis_xml_from_inventory(self, revision_id, inventory):
+ """Create the text that will be saved in basis-inventory"""
+ inventory.revision_id = revision_id
+ return xml7.serializer_v7.write_inventory_to_string(inventory)
+
+ @needs_tree_write_lock
+ def set_conflicts(self, conflicts):
+ self._put_rio('conflicts', conflicts.to_stanzas(),
+ CONFLICT_HEADER_1)
+
+ @needs_tree_write_lock
+ def add_conflicts(self, new_conflicts):
+ conflict_set = set(self.conflicts())
+ conflict_set.update(set(list(new_conflicts)))
+ self.set_conflicts(_mod_conflicts.ConflictList(sorted(conflict_set,
+ key=_mod_conflicts.Conflict.sort_key)))
+
+ @needs_read_lock
+ def conflicts(self):
+ try:
+ confile = self._transport.get('conflicts')
+ except errors.NoSuchFile:
+ return _mod_conflicts.ConflictList()
+ try:
+ try:
+ if confile.next() != CONFLICT_HEADER_1 + '\n':
+ raise errors.ConflictFormatError()
+ except StopIteration:
+ raise errors.ConflictFormatError()
+ reader = _mod_rio.RioReader(confile)
+ return _mod_conflicts.ConflictList.from_stanzas(reader)
+ finally:
+ confile.close()
+
+ def read_basis_inventory(self):
+ """Read the cached basis inventory."""
+ path = self._basis_inventory_name()
+ return self._transport.get_bytes(path)
+
+ @needs_read_lock
+ def read_working_inventory(self):
+ """Read the working inventory.
+
+ :raises errors.InventoryModified: read_working_inventory will fail
+ when the current in memory inventory has been modified.
+ """
+ # conceptually this should be an implementation detail of the tree.
+ # XXX: Deprecate this.
+ # ElementTree does its own conversion from UTF-8, so open in
+ # binary.
+ if self._inventory_is_modified:
+ raise errors.InventoryModified(self)
+ f = self._transport.get('inventory')
+ try:
+ result = self._deserialize(f)
+ finally:
+ f.close()
+ self._set_inventory(result, dirty=False)
+ return result
+
+ @needs_read_lock
+ def get_root_id(self):
+ """Return the id of this trees root"""
+ return self._inventory.root.file_id
+
+ def has_id(self, file_id):
+ # files that have been deleted are excluded
+ inv, inv_file_id = self._unpack_file_id(file_id)
+ if not inv.has_id(inv_file_id):
+ return False
+ path = inv.id2path(inv_file_id)
+ return osutils.lexists(self.abspath(path))
+
+ def has_or_had_id(self, file_id):
+ if file_id == self.get_root_id():
+ return True
+ inv, inv_file_id = self._unpack_file_id(file_id)
+ return inv.has_id(inv_file_id)
+
+ def all_file_ids(self):
+ """Iterate through file_ids for this tree.
+
+ file_ids are in a WorkingTree if they are in the working inventory
+ and the working file exists.
+ """
+ ret = set()
+ for path, ie in self.iter_entries_by_dir():
+ ret.add(ie.file_id)
+ return ret
+
+ @needs_tree_write_lock
+ def set_last_revision(self, new_revision):
+ """Change the last revision in the working tree."""
+ if self._change_last_revision(new_revision):
+ self._cache_basis_inventory(new_revision)
+
+ def _get_check_refs(self):
+ """Return the references needed to perform a check of this tree.
+
+ The default implementation returns no refs, and is only suitable for
+ trees that have no local caching and can commit on ghosts at any time.
+
+ :seealso: bzrlib.check for details about check_refs.
+ """
+ return []
+
+ @needs_read_lock
+ def _check(self, references):
+ """Check the tree for consistency.
+
+ :param references: A dict with keys matching the items returned by
+ self._get_check_refs(), and values from looking those keys up in
+ the repository.
+ """
+ tree_basis = self.basis_tree()
+ tree_basis.lock_read()
+ try:
+ repo_basis = references[('trees', self.last_revision())]
+ if len(list(repo_basis.iter_changes(tree_basis))) > 0:
+ raise errors.BzrCheckError(
+ "Mismatched basis inventory content.")
+ self._validate()
+ finally:
+ tree_basis.unlock()
+
+ @needs_read_lock
+ def check_state(self):
+ """Check that the working state is/isn't valid."""
+ check_refs = self._get_check_refs()
+ refs = {}
+ for ref in check_refs:
+ kind, value = ref
+ if kind == 'trees':
+ refs[ref] = self.branch.repository.revision_tree(value)
+ self._check(refs)
+
+ @needs_tree_write_lock
+ def reset_state(self, revision_ids=None):
+ """Reset the state of the working tree.
+
+ This does a hard-reset to a last-known-good state. This is a way to
+ fix if something got corrupted (like the .bzr/checkout/dirstate file)
+ """
+ if revision_ids is None:
+ revision_ids = self.get_parent_ids()
+ if not revision_ids:
+ rt = self.branch.repository.revision_tree(
+ _mod_revision.NULL_REVISION)
+ else:
+ rt = self.branch.repository.revision_tree(revision_ids[0])
+ self._write_inventory(rt.root_inventory)
+ self.set_parent_ids(revision_ids)
+
+ def flush(self):
+ """Write the in memory inventory to disk."""
+ # TODO: Maybe this should only write on dirty ?
+ if self._control_files._lock_mode != 'w':
+ raise errors.NotWriteLocked(self)
+ sio = StringIO()
+ self._serialize(self._inventory, sio)
+ sio.seek(0)
+ self._transport.put_file('inventory', sio,
+ mode=self.bzrdir._get_file_mode())
+ self._inventory_is_modified = False
+
+ def get_file_mtime(self, file_id, path=None):
+ """See Tree.get_file_mtime."""
+ if not path:
+ path = self.id2path(file_id)
+ try:
+ return os.lstat(self.abspath(path)).st_mtime
+ except OSError, e:
+ if e.errno == errno.ENOENT:
+ raise errors.FileTimestampUnavailable(path)
+ raise
+
+ def _is_executable_from_path_and_stat_from_basis(self, path, stat_result):
+ inv, file_id = self._path2inv_file_id(path)
+ if file_id is None:
+ # For unversioned files on win32, we just assume they are not
+ # executable
+ return False
+ return inv[file_id].executable
+
+ def _is_executable_from_path_and_stat_from_stat(self, path, stat_result):
+ mode = stat_result.st_mode
+ return bool(stat.S_ISREG(mode) and stat.S_IEXEC & mode)
+
+ def is_executable(self, file_id, path=None):
+ if not self._supports_executable():
+ inv, inv_file_id = self._unpack_file_id(file_id)
+ return inv[inv_file_id].executable
+ else:
+ if not path:
+ path = self.id2path(file_id)
+ mode = os.lstat(self.abspath(path)).st_mode
+ return bool(stat.S_ISREG(mode) and stat.S_IEXEC & mode)
+
+ def _is_executable_from_path_and_stat(self, path, stat_result):
+ if not self._supports_executable():
+ return self._is_executable_from_path_and_stat_from_basis(path, stat_result)
+ else:
+ return self._is_executable_from_path_and_stat_from_stat(path, stat_result)
+
+ @needs_tree_write_lock
+ def _add(self, files, ids, kinds):
+ """See MutableTree._add."""
+ # TODO: Re-adding a file that is removed in the working copy
+ # should probably put it back with the previous ID.
+ # the read and write working inventory should not occur in this
+ # function - they should be part of lock_write and unlock.
+ # FIXME: nested trees
+ inv = self.root_inventory
+ for f, file_id, kind in zip(files, ids, kinds):
+ if file_id is None:
+ inv.add_path(f, kind=kind)
+ else:
+ inv.add_path(f, kind=kind, file_id=file_id)
+ self._inventory_is_modified = True
+
+ def revision_tree(self, revision_id):
+ """See WorkingTree.revision_id."""
+ if revision_id == self.last_revision():
+ try:
+ xml = self.read_basis_inventory()
+ except errors.NoSuchFile:
+ pass
+ else:
+ try:
+ inv = xml7.serializer_v7.read_inventory_from_string(xml)
+ # dont use the repository revision_tree api because we want
+ # to supply the inventory.
+ if inv.revision_id == revision_id:
+ return revisiontree.InventoryRevisionTree(
+ self.branch.repository, inv, revision_id)
+ except errors.BadInventoryFormat:
+ pass
+ # raise if there was no inventory, or if we read the wrong inventory.
+ raise errors.NoSuchRevisionInTree(self, revision_id)
+
+ @needs_read_lock
+ def annotate_iter(self, file_id, default_revision=CURRENT_REVISION):
+ """See Tree.annotate_iter
+
+ This implementation will use the basis tree implementation if possible.
+ Lines not in the basis are attributed to CURRENT_REVISION
+
+ If there are pending merges, lines added by those merges will be
+ incorrectly attributed to CURRENT_REVISION (but after committing, the
+ attribution will be correct).
+ """
+ maybe_file_parent_keys = []
+ for parent_id in self.get_parent_ids():
+ try:
+ parent_tree = self.revision_tree(parent_id)
+ except errors.NoSuchRevisionInTree:
+ parent_tree = self.branch.repository.revision_tree(parent_id)
+ parent_tree.lock_read()
+ try:
+ try:
+ kind = parent_tree.kind(file_id)
+ except errors.NoSuchId:
+ continue
+ if kind != 'file':
+ # Note: this is slightly unnecessary, because symlinks and
+ # directories have a "text" which is the empty text, and we
+ # know that won't mess up annotations. But it seems cleaner
+ continue
+ parent_text_key = (
+ file_id, parent_tree.get_file_revision(file_id))
+ if parent_text_key not in maybe_file_parent_keys:
+ maybe_file_parent_keys.append(parent_text_key)
+ finally:
+ parent_tree.unlock()
+ graph = _mod_graph.Graph(self.branch.repository.texts)
+ heads = graph.heads(maybe_file_parent_keys)
+ file_parent_keys = []
+ for key in maybe_file_parent_keys:
+ if key in heads:
+ file_parent_keys.append(key)
+
+ # Now we have the parents of this content
+ annotator = self.branch.repository.texts.get_annotator()
+ text = self.get_file_text(file_id)
+ this_key =(file_id, default_revision)
+ annotator.add_special_text(this_key, file_parent_keys, text)
+ annotations = [(key[-1], line)
+ for key, line in annotator.annotate_flat(this_key)]
+ return annotations
+
+ def _put_rio(self, filename, stanzas, header):
+ self._must_be_locked()
+ my_file = _mod_rio.rio_file(stanzas, header)
+ self._transport.put_file(filename, my_file,
+ mode=self.bzrdir._get_file_mode())
+
+ @needs_tree_write_lock
+ def set_merge_modified(self, modified_hashes):
+ def iter_stanzas():
+ for file_id, hash in modified_hashes.iteritems():
+ yield _mod_rio.Stanza(file_id=file_id.decode('utf8'),
+ hash=hash)
+ self._put_rio('merge-hashes', iter_stanzas(), MERGE_MODIFIED_HEADER_1)
+
+ @needs_read_lock
+ def merge_modified(self):
+ """Return a dictionary of files modified by a merge.
+
+ The list is initialized by WorkingTree.set_merge_modified, which is
+ typically called after we make some automatic updates to the tree
+ because of a merge.
+
+ This returns a map of file_id->sha1, containing only files which are
+ still in the working inventory and have that text hash.
+ """
+ try:
+ hashfile = self._transport.get('merge-hashes')
+ except errors.NoSuchFile:
+ return {}
+ try:
+ merge_hashes = {}
+ try:
+ if hashfile.next() != MERGE_MODIFIED_HEADER_1 + '\n':
+ raise errors.MergeModifiedFormatError()
+ except StopIteration:
+ raise errors.MergeModifiedFormatError()
+ for s in _mod_rio.RioReader(hashfile):
+ # RioReader reads in Unicode, so convert file_ids back to utf8
+ file_id = osutils.safe_file_id(s.get("file_id"), warn=False)
+ if not self.has_id(file_id):
+ continue
+ text_hash = s.get("hash")
+ if text_hash == self.get_file_sha1(file_id):
+ merge_hashes[file_id] = text_hash
+ return merge_hashes
+ finally:
+ hashfile.close()
+
+ @needs_write_lock
+ def subsume(self, other_tree):
+ def add_children(inventory, entry):
+ for child_entry in entry.children.values():
+ inventory._byid[child_entry.file_id] = child_entry
+ if child_entry.kind == 'directory':
+ add_children(inventory, child_entry)
+ if other_tree.get_root_id() == self.get_root_id():
+ raise errors.BadSubsumeSource(self, other_tree,
+ 'Trees have the same root')
+ try:
+ other_tree_path = self.relpath(other_tree.basedir)
+ except errors.PathNotChild:
+ raise errors.BadSubsumeSource(self, other_tree,
+ 'Tree is not contained by the other')
+ new_root_parent = self.path2id(osutils.dirname(other_tree_path))
+ if new_root_parent is None:
+ raise errors.BadSubsumeSource(self, other_tree,
+ 'Parent directory is not versioned.')
+ # We need to ensure that the result of a fetch will have a
+ # versionedfile for the other_tree root, and only fetching into
+ # RepositoryKnit2 guarantees that.
+ if not self.branch.repository.supports_rich_root():
+ raise errors.SubsumeTargetNeedsUpgrade(other_tree)
+ other_tree.lock_tree_write()
+ try:
+ new_parents = other_tree.get_parent_ids()
+ other_root = other_tree.root_inventory.root
+ other_root.parent_id = new_root_parent
+ other_root.name = osutils.basename(other_tree_path)
+ self.root_inventory.add(other_root)
+ add_children(self.root_inventory, other_root)
+ self._write_inventory(self.root_inventory)
+ # normally we don't want to fetch whole repositories, but i think
+ # here we really do want to consolidate the whole thing.
+ for parent_id in other_tree.get_parent_ids():
+ self.branch.fetch(other_tree.branch, parent_id)
+ self.add_parent_tree_id(parent_id)
+ finally:
+ other_tree.unlock()
+ other_tree.bzrdir.retire_bzrdir()
+
+ @needs_tree_write_lock
+ def extract(self, file_id, format=None):
+ """Extract a subtree from this tree.
+
+ A new branch will be created, relative to the path for this tree.
+ """
+ self.flush()
+ def mkdirs(path):
+ segments = osutils.splitpath(path)
+ transport = self.branch.bzrdir.root_transport
+ for name in segments:
+ transport = transport.clone(name)
+ transport.ensure_base()
+ return transport
+
+ sub_path = self.id2path(file_id)
+ branch_transport = mkdirs(sub_path)
+ if format is None:
+ format = self.bzrdir.cloning_metadir()
+ branch_transport.ensure_base()
+ branch_bzrdir = format.initialize_on_transport(branch_transport)
+ try:
+ repo = branch_bzrdir.find_repository()
+ except errors.NoRepositoryPresent:
+ repo = branch_bzrdir.create_repository()
+ if not repo.supports_rich_root():
+ raise errors.RootNotRich()
+ new_branch = branch_bzrdir.create_branch()
+ new_branch.pull(self.branch)
+ for parent_id in self.get_parent_ids():
+ new_branch.fetch(self.branch, parent_id)
+ tree_transport = self.bzrdir.root_transport.clone(sub_path)
+ if tree_transport.base != branch_transport.base:
+ tree_bzrdir = format.initialize_on_transport(tree_transport)
+ tree_bzrdir.set_branch_reference(new_branch)
+ else:
+ tree_bzrdir = branch_bzrdir
+ wt = tree_bzrdir.create_workingtree(_mod_revision.NULL_REVISION)
+ wt.set_parent_ids(self.get_parent_ids())
+ # FIXME: Support nested trees
+ my_inv = self.root_inventory
+ child_inv = inventory.Inventory(root_id=None)
+ new_root = my_inv[file_id]
+ my_inv.remove_recursive_id(file_id)
+ new_root.parent_id = None
+ child_inv.add(new_root)
+ self._write_inventory(my_inv)
+ wt._write_inventory(child_inv)
+ return wt
+
+ def list_files(self, include_root=False, from_dir=None, recursive=True):
+ """List all files as (path, class, kind, id, entry).
+
+ Lists, but does not descend into unversioned directories.
+ This does not include files that have been deleted in this
+ tree. Skips the control directory.
+
+ :param include_root: if True, return an entry for the root
+ :param from_dir: start from this directory or None for the root
+ :param recursive: whether to recurse into subdirectories or not
+ """
+ # list_files is an iterator, so @needs_read_lock doesn't work properly
+ # with it. So callers should be careful to always read_lock the tree.
+ if not self.is_locked():
+ raise errors.ObjectNotLocked(self)
+
+ if from_dir is None and include_root is True:
+ yield ('', 'V', 'directory', self.get_root_id(), self.root_inventory.root)
+ # Convert these into local objects to save lookup times
+ pathjoin = osutils.pathjoin
+ file_kind = self._kind
+
+ # transport.base ends in a slash, we want the piece
+ # between the last two slashes
+ transport_base_dir = self.bzrdir.transport.base.rsplit('/', 2)[1]
+
+ fk_entries = {'directory':TreeDirectory, 'file':TreeFile, 'symlink':TreeLink}
+
+ # directory file_id, relative path, absolute path, reverse sorted children
+ if from_dir is not None:
+ inv, from_dir_id = self._path2inv_file_id(from_dir)
+ if from_dir_id is None:
+ # Directory not versioned
+ return
+ from_dir_abspath = pathjoin(self.basedir, from_dir)
+ else:
+ inv = self.root_inventory
+ from_dir_id = inv.root.file_id
+ from_dir_abspath = self.basedir
+ children = os.listdir(from_dir_abspath)
+ children.sort()
+ # jam 20060527 The kernel sized tree seems equivalent whether we
+ # use a deque and popleft to keep them sorted, or if we use a plain
+ # list and just reverse() them.
+ children = collections.deque(children)
+ stack = [(from_dir_id, u'', from_dir_abspath, children)]
+ while stack:
+ from_dir_id, from_dir_relpath, from_dir_abspath, children = stack[-1]
+
+ while children:
+ f = children.popleft()
+ ## TODO: If we find a subdirectory with its own .bzr
+ ## directory, then that is a separate tree and we
+ ## should exclude it.
+
+ # the bzrdir for this tree
+ if transport_base_dir == f:
+ continue
+
+ # we know that from_dir_relpath and from_dir_abspath never end in a slash
+ # and 'f' doesn't begin with one, we can do a string op, rather
+ # than the checks of pathjoin(), all relative paths will have an extra slash
+ # at the beginning
+ fp = from_dir_relpath + '/' + f
+
+ # absolute path
+ fap = from_dir_abspath + '/' + f
+
+ dir_ie = inv[from_dir_id]
+ if dir_ie.kind == 'directory':
+ f_ie = dir_ie.children.get(f)
+ else:
+ f_ie = None
+ if f_ie:
+ c = 'V'
+ elif self.is_ignored(fp[1:]):
+ c = 'I'
+ else:
+ # we may not have found this file, because of a unicode
+ # issue, or because the directory was actually a symlink.
+ f_norm, can_access = osutils.normalized_filename(f)
+ if f == f_norm or not can_access:
+ # No change, so treat this file normally
+ c = '?'
+ else:
+ # this file can be accessed by a normalized path
+ # check again if it is versioned
+ # these lines are repeated here for performance
+ f = f_norm
+ fp = from_dir_relpath + '/' + f
+ fap = from_dir_abspath + '/' + f
+ f_ie = inv.get_child(from_dir_id, f)
+ if f_ie:
+ c = 'V'
+ elif self.is_ignored(fp[1:]):
+ c = 'I'
+ else:
+ c = '?'
+
+ fk = file_kind(fap)
+
+ # make a last minute entry
+ if f_ie:
+ yield fp[1:], c, fk, f_ie.file_id, f_ie
+ else:
+ try:
+ yield fp[1:], c, fk, None, fk_entries[fk]()
+ except KeyError:
+ yield fp[1:], c, fk, None, TreeEntry()
+ continue
+
+ if fk != 'directory':
+ continue
+
+ # But do this child first if recursing down
+ if recursive:
+ new_children = os.listdir(fap)
+ new_children.sort()
+ new_children = collections.deque(new_children)
+ stack.append((f_ie.file_id, fp, fap, new_children))
+ # Break out of inner loop,
+ # so that we start outer loop with child
+ break
+ else:
+ # if we finished all children, pop it off the stack
+ stack.pop()
+
+ @needs_tree_write_lock
+ def move(self, from_paths, to_dir=None, after=False):
+ """Rename files.
+
+ to_dir must exist in the inventory.
+
+ If to_dir exists and is a directory, the files are moved into
+ it, keeping their old names.
+
+ Note that to_dir is only the last component of the new name;
+ this doesn't change the directory.
+
+ For each entry in from_paths the move mode will be determined
+ independently.
+
+ The first mode moves the file in the filesystem and updates the
+ inventory. The second mode only updates the inventory without
+ touching the file on the filesystem.
+
+ move uses the second mode if 'after == True' and the target is
+ either not versioned or newly added, and present in the working tree.
+
+ move uses the second mode if 'after == False' and the source is
+ versioned but no longer in the working tree, and the target is not
+ versioned but present in the working tree.
+
+ move uses the first mode if 'after == False' and the source is
+ versioned and present in the working tree, and the target is not
+ versioned and not present in the working tree.
+
+ Everything else results in an error.
+
+ This returns a list of (from_path, to_path) pairs for each
+ entry that is moved.
+ """
+ rename_entries = []
+ rename_tuples = []
+
+ invs_to_write = set()
+
+ # check for deprecated use of signature
+ if to_dir is None:
+ raise TypeError('You must supply a target directory')
+ # check destination directory
+ if isinstance(from_paths, basestring):
+ raise ValueError()
+ to_abs = self.abspath(to_dir)
+ if not isdir(to_abs):
+ raise errors.BzrMoveFailedError('',to_dir,
+ errors.NotADirectory(to_abs))
+ if not self.has_filename(to_dir):
+ raise errors.BzrMoveFailedError('',to_dir,
+ errors.NotInWorkingDirectory(to_dir))
+ to_inv, to_dir_id = self._path2inv_file_id(to_dir)
+ if to_dir_id is None:
+ raise errors.BzrMoveFailedError('',to_dir,
+ errors.NotVersionedError(path=to_dir))
+
+ to_dir_ie = to_inv[to_dir_id]
+ if to_dir_ie.kind != 'directory':
+ raise errors.BzrMoveFailedError('',to_dir,
+ errors.NotADirectory(to_abs))
+
+ # create rename entries and tuples
+ for from_rel in from_paths:
+ from_tail = splitpath(from_rel)[-1]
+ from_inv, from_id = self._path2inv_file_id(from_rel)
+ if from_id is None:
+ raise errors.BzrMoveFailedError(from_rel,to_dir,
+ errors.NotVersionedError(path=from_rel))
+
+ from_entry = from_inv[from_id]
+ from_parent_id = from_entry.parent_id
+ to_rel = pathjoin(to_dir, from_tail)
+ rename_entry = InventoryWorkingTree._RenameEntry(
+ from_rel=from_rel,
+ from_id=from_id,
+ from_tail=from_tail,
+ from_parent_id=from_parent_id,
+ to_rel=to_rel, to_tail=from_tail,
+ to_parent_id=to_dir_id)
+ rename_entries.append(rename_entry)
+ rename_tuples.append((from_rel, to_rel))
+
+ # determine which move mode to use. checks also for movability
+ rename_entries = self._determine_mv_mode(rename_entries, after)
+
+ original_modified = self._inventory_is_modified
+ try:
+ if len(from_paths):
+ self._inventory_is_modified = True
+ self._move(rename_entries)
+ except:
+ # restore the inventory on error
+ self._inventory_is_modified = original_modified
+ raise
+ #FIXME: Should potentially also write the from_invs
+ self._write_inventory(to_inv)
+ return rename_tuples
+
+ @needs_tree_write_lock
+ def rename_one(self, from_rel, to_rel, after=False):
+ """Rename one file.
+
+ This can change the directory or the filename or both.
+
+ rename_one has several 'modes' to work. First, it can rename a physical
+ file and change the file_id. That is the normal mode. Second, it can
+ only change the file_id without touching any physical file.
+
+ rename_one uses the second mode if 'after == True' and 'to_rel' is not
+ versioned but present in the working tree.
+
+ rename_one uses the second mode if 'after == False' and 'from_rel' is
+ versioned but no longer in the working tree, and 'to_rel' is not
+ versioned but present in the working tree.
+
+ rename_one uses the first mode if 'after == False' and 'from_rel' is
+ versioned and present in the working tree, and 'to_rel' is not
+ versioned and not present in the working tree.
+
+ Everything else results in an error.
+ """
+ rename_entries = []
+
+ # create rename entries and tuples
+ from_tail = splitpath(from_rel)[-1]
+ from_inv, from_id = self._path2inv_file_id(from_rel)
+ if from_id is None:
+ # if file is missing in the inventory maybe it's in the basis_tree
+ basis_tree = self.branch.basis_tree()
+ from_id = basis_tree.path2id(from_rel)
+ if from_id is None:
+ raise errors.BzrRenameFailedError(from_rel,to_rel,
+ errors.NotVersionedError(path=from_rel))
+ # put entry back in the inventory so we can rename it
+ from_entry = basis_tree.root_inventory[from_id].copy()
+ from_inv.add(from_entry)
+ else:
+ from_inv, from_inv_id = self._unpack_file_id(from_id)
+ from_entry = from_inv[from_inv_id]
+ from_parent_id = from_entry.parent_id
+ to_dir, to_tail = os.path.split(to_rel)
+ to_inv, to_dir_id = self._path2inv_file_id(to_dir)
+ rename_entry = InventoryWorkingTree._RenameEntry(from_rel=from_rel,
+ from_id=from_id,
+ from_tail=from_tail,
+ from_parent_id=from_parent_id,
+ to_rel=to_rel, to_tail=to_tail,
+ to_parent_id=to_dir_id)
+ rename_entries.append(rename_entry)
+
+ # determine which move mode to use. checks also for movability
+ rename_entries = self._determine_mv_mode(rename_entries, after)
+
+ # check if the target changed directory and if the target directory is
+ # versioned
+ if to_dir_id is None:
+ raise errors.BzrMoveFailedError(from_rel,to_rel,
+ errors.NotVersionedError(path=to_dir))
+
+ # all checks done. now we can continue with our actual work
+ mutter('rename_one:\n'
+ ' from_id {%s}\n'
+ ' from_rel: %r\n'
+ ' to_rel: %r\n'
+ ' to_dir %r\n'
+ ' to_dir_id {%s}\n',
+ from_id, from_rel, to_rel, to_dir, to_dir_id)
+
+ self._move(rename_entries)
+ self._write_inventory(to_inv)
+
+ class _RenameEntry(object):
+ def __init__(self, from_rel, from_id, from_tail, from_parent_id,
+ to_rel, to_tail, to_parent_id, only_change_inv=False,
+ change_id=False):
+ self.from_rel = from_rel
+ self.from_id = from_id
+ self.from_tail = from_tail
+ self.from_parent_id = from_parent_id
+ self.to_rel = to_rel
+ self.to_tail = to_tail
+ self.to_parent_id = to_parent_id
+ self.change_id = change_id
+ self.only_change_inv = only_change_inv
+
+ def _determine_mv_mode(self, rename_entries, after=False):
+ """Determines for each from-to pair if both inventory and working tree
+ or only the inventory has to be changed.
+
+ Also does basic plausability tests.
+ """
+ # FIXME: Handling of nested trees
+ inv = self.root_inventory
+
+ for rename_entry in rename_entries:
+ # store to local variables for easier reference
+ from_rel = rename_entry.from_rel
+ from_id = rename_entry.from_id
+ to_rel = rename_entry.to_rel
+ to_id = inv.path2id(to_rel)
+ only_change_inv = False
+ change_id = False
+
+ # check the inventory for source and destination
+ if from_id is None:
+ raise errors.BzrMoveFailedError(from_rel,to_rel,
+ errors.NotVersionedError(path=from_rel))
+ if to_id is not None:
+ allowed = False
+ # allow it with --after but only if dest is newly added
+ if after:
+ basis = self.basis_tree()
+ basis.lock_read()
+ try:
+ if not basis.has_id(to_id):
+ rename_entry.change_id = True
+ allowed = True
+ finally:
+ basis.unlock()
+ if not allowed:
+ raise errors.BzrMoveFailedError(from_rel,to_rel,
+ errors.AlreadyVersionedError(path=to_rel))
+
+ # try to determine the mode for rename (only change inv or change
+ # inv and file system)
+ if after:
+ if not self.has_filename(to_rel):
+ raise errors.BzrMoveFailedError(from_id,to_rel,
+ errors.NoSuchFile(path=to_rel,
+ extra="New file has not been created yet"))
+ only_change_inv = True
+ elif not self.has_filename(from_rel) and self.has_filename(to_rel):
+ only_change_inv = True
+ elif self.has_filename(from_rel) and not self.has_filename(to_rel):
+ only_change_inv = False
+ elif (not self.case_sensitive
+ and from_rel.lower() == to_rel.lower()
+ and self.has_filename(from_rel)):
+ only_change_inv = False
+ else:
+ # something is wrong, so lets determine what exactly
+ if not self.has_filename(from_rel) and \
+ not self.has_filename(to_rel):
+ raise errors.BzrRenameFailedError(from_rel, to_rel,
+ errors.PathsDoNotExist(paths=(from_rel, to_rel)))
+ else:
+ raise errors.RenameFailedFilesExist(from_rel, to_rel)
+ rename_entry.only_change_inv = only_change_inv
+ return rename_entries
+
+ def _move(self, rename_entries):
+ """Moves a list of files.
+
+ Depending on the value of the flag 'only_change_inv', the
+ file will be moved on the file system or not.
+ """
+ moved = []
+
+ for entry in rename_entries:
+ try:
+ self._move_entry(entry)
+ except:
+ self._rollback_move(moved)
+ raise
+ moved.append(entry)
+
+ def _rollback_move(self, moved):
+ """Try to rollback a previous move in case of an filesystem error."""
+ for entry in moved:
+ try:
+ self._move_entry(WorkingTree._RenameEntry(
+ entry.to_rel, entry.from_id,
+ entry.to_tail, entry.to_parent_id, entry.from_rel,
+ entry.from_tail, entry.from_parent_id,
+ entry.only_change_inv))
+ except errors.BzrMoveFailedError, e:
+ raise errors.BzrMoveFailedError( '', '', "Rollback failed."
+ " The working tree is in an inconsistent state."
+ " Please consider doing a 'bzr revert'."
+ " Error message is: %s" % e)
+
+ def _move_entry(self, entry):
+ inv = self.root_inventory
+ from_rel_abs = self.abspath(entry.from_rel)
+ to_rel_abs = self.abspath(entry.to_rel)
+ if from_rel_abs == to_rel_abs:
+ raise errors.BzrMoveFailedError(entry.from_rel, entry.to_rel,
+ "Source and target are identical.")
+
+ if not entry.only_change_inv:
+ try:
+ osutils.rename(from_rel_abs, to_rel_abs)
+ except OSError, e:
+ raise errors.BzrMoveFailedError(entry.from_rel,
+ entry.to_rel, e[1])
+ if entry.change_id:
+ to_id = inv.path2id(entry.to_rel)
+ inv.remove_recursive_id(to_id)
+ inv.rename(entry.from_id, entry.to_parent_id, entry.to_tail)
+
+ @needs_tree_write_lock
+ def unversion(self, file_ids):
+ """Remove the file ids in file_ids from the current versioned set.
+
+ When a file_id is unversioned, all of its children are automatically
+ unversioned.
+
+ :param file_ids: The file ids to stop versioning.
+ :raises: NoSuchId if any fileid is not currently versioned.
+ """
+ for file_id in file_ids:
+ if not self._inventory.has_id(file_id):
+ raise errors.NoSuchId(self, file_id)
+ for file_id in file_ids:
+ if self._inventory.has_id(file_id):
+ self._inventory.remove_recursive_id(file_id)
+ if len(file_ids):
+ # in the future this should just set a dirty bit to wait for the
+ # final unlock. However, until all methods of workingtree start
+ # with the current in -memory inventory rather than triggering
+ # a read, it is more complex - we need to teach read_inventory
+ # to know when to read, and when to not read first... and possibly
+ # to save first when the in memory one may be corrupted.
+ # so for now, we just only write it if it is indeed dirty.
+ # - RBC 20060907
+ self._write_inventory(self._inventory)
+
+ def stored_kind(self, file_id):
+ """See Tree.stored_kind"""
+ inv, inv_file_id = self._unpack_file_id(file_id)
+ return inv[inv_file_id].kind
+
+ def extras(self):
+ """Yield all unversioned files in this WorkingTree.
+
+ If there are any unversioned directories then only the directory is
+ returned, not all its children. But if there are unversioned files
+ under a versioned subdirectory, they are returned.
+
+ Currently returned depth-first, sorted by name within directories.
+ This is the same order used by 'osutils.walkdirs'.
+ """
+ ## TODO: Work from given directory downwards
+ for path, dir_entry in self.iter_entries_by_dir():
+ if dir_entry.kind != 'directory':
+ continue
+ # mutter("search for unknowns in %r", path)
+ dirabs = self.abspath(path)
+ if not isdir(dirabs):
+ # e.g. directory deleted
+ continue
+
+ fl = []
+ for subf in os.listdir(dirabs):
+ if self.bzrdir.is_control_filename(subf):
+ continue
+ if subf not in dir_entry.children:
+ try:
+ (subf_norm,
+ can_access) = osutils.normalized_filename(subf)
+ except UnicodeDecodeError:
+ path_os_enc = path.encode(osutils._fs_enc)
+ relpath = path_os_enc + '/' + subf
+ raise errors.BadFilenameEncoding(relpath,
+ osutils._fs_enc)
+ if subf_norm != subf and can_access:
+ if subf_norm not in dir_entry.children:
+ fl.append(subf_norm)
+ else:
+ fl.append(subf)
+
+ fl.sort()
+ for subf in fl:
+ subp = pathjoin(path, subf)
+ yield subp
+
+ def _walkdirs(self, prefix=""):
+ """Walk the directories of this tree.
+
+ :param prefix: is used as the directrory to start with.
+ :returns: a generator which yields items in the form::
+
+ ((curren_directory_path, fileid),
+ [(file1_path, file1_name, file1_kind, None, file1_id,
+ file1_kind), ... ])
+ """
+ _directory = 'directory'
+ # get the root in the inventory
+ inv, top_id = self._path2inv_file_id(prefix)
+ if top_id is None:
+ pending = []
+ else:
+ pending = [(prefix, '', _directory, None, top_id, None)]
+ while pending:
+ dirblock = []
+ currentdir = pending.pop()
+ # 0 - relpath, 1- basename, 2- kind, 3- stat, 4-id, 5-kind
+ top_id = currentdir[4]
+ if currentdir[0]:
+ relroot = currentdir[0] + '/'
+ else:
+ relroot = ""
+ # FIXME: stash the node in pending
+ entry = inv[top_id]
+ if entry.kind == 'directory':
+ for name, child in entry.sorted_children():
+ dirblock.append((relroot + name, name, child.kind, None,
+ child.file_id, child.kind
+ ))
+ yield (currentdir[0], entry.file_id), dirblock
+ # push the user specified dirs from dirblock
+ for dir in reversed(dirblock):
+ if dir[2] == _directory:
+ pending.append(dir)
+
+ @needs_write_lock
+ def update_feature_flags(self, updated_flags):
+ """Update the feature flags for this branch.
+
+ :param updated_flags: Dictionary mapping feature names to necessities
+ A necessity can be None to indicate the feature should be removed
+ """
+ self._format._update_feature_flags(updated_flags)
+ self.control_transport.put_bytes('format', self._format.as_string())
+
+
+class WorkingTreeFormatRegistry(controldir.ControlComponentFormatRegistry):
+ """Registry for working tree formats."""
+
+ def __init__(self, other_registry=None):
+ super(WorkingTreeFormatRegistry, self).__init__(other_registry)
+ self._default_format = None
+ self._default_format_key = None
+
+ def get_default(self):
+ """Return the current default format."""
+ if (self._default_format_key is not None and
+ self._default_format is None):
+ self._default_format = self.get(self._default_format_key)
+ return self._default_format
+
+ def set_default(self, format):
+ """Set the default format."""
+ self._default_format = format
+ self._default_format_key = None
+
+ def set_default_key(self, format_string):
+ """Set the default format by its format string."""
+ self._default_format_key = format_string
+ self._default_format = None
+
+
+format_registry = WorkingTreeFormatRegistry()
+
+
+class WorkingTreeFormat(controldir.ControlComponentFormat):
+ """An encapsulation of the initialization and open routines for a format.
+
+ Formats provide three things:
+ * An initialization routine,
+ * a format string,
+ * an open routine.
+
+ Formats are placed in an dict by their format string for reference
+ during workingtree opening. Its not required that these be instances, they
+ can be classes themselves with class methods - it simply depends on
+ whether state is needed for a given format or not.
+
+ Once a format is deprecated, just deprecate the initialize and open
+ methods on the format class. Do not deprecate the object, as the
+ object will be created every time regardless.
+ """
+
+ requires_rich_root = False
+
+ upgrade_recommended = False
+
+ requires_normalized_unicode_filenames = False
+
+ case_sensitive_filename = "FoRMaT"
+
+ missing_parent_conflicts = False
+ """If this format supports missing parent conflicts."""
+
+ supports_versioned_directories = None
+
+ def initialize(self, controldir, revision_id=None, from_branch=None,
+ accelerator_tree=None, hardlink=False):
+ """Initialize a new working tree in controldir.
+
+ :param controldir: ControlDir to initialize the working tree in.
+ :param revision_id: allows creating a working tree at a different
+ revision than the branch is at.
+ :param from_branch: Branch to checkout
+ :param accelerator_tree: A tree which can be used for retrieving file
+ contents more quickly than the revision tree, i.e. a workingtree.
+ The revision tree will be used for cases where accelerator_tree's
+ content is different.
+ :param hardlink: If true, hard-link files from accelerator_tree,
+ where possible.
+ """
+ raise NotImplementedError(self.initialize)
+
+ def __eq__(self, other):
+ return self.__class__ is other.__class__
+
+ def __ne__(self, other):
+ return not (self == other)
+
+ def get_format_description(self):
+ """Return the short description for this format."""
+ raise NotImplementedError(self.get_format_description)
+
+ def is_supported(self):
+ """Is this format supported?
+
+ Supported formats can be initialized and opened.
+ Unsupported formats may not support initialization or committing or
+ some other features depending on the reason for not being supported.
+ """
+ return True
+
+ def supports_content_filtering(self):
+ """True if this format supports content filtering."""
+ return False
+
+ def supports_views(self):
+ """True if this format supports stored views."""
+ return False
+
+ def get_controldir_for_branch(self):
+ """Get the control directory format for creating branches.
+
+ This is to support testing of working tree formats that can not exist
+ in the same control directory as a branch.
+ """
+ return self._matchingbzrdir
+
+
+class WorkingTreeFormatMetaDir(bzrdir.BzrFormat, WorkingTreeFormat):
+ """Base class for working trees that live in bzr meta directories."""
+
+ def __init__(self):
+ WorkingTreeFormat.__init__(self)
+ bzrdir.BzrFormat.__init__(self)
+
+ @classmethod
+ def find_format_string(klass, controldir):
+ """Return format name for the working tree object in controldir."""
+ try:
+ transport = controldir.get_workingtree_transport(None)
+ return transport.get_bytes("format")
+ except errors.NoSuchFile:
+ raise errors.NoWorkingTree(base=transport.base)
+
+ @classmethod
+ def find_format(klass, controldir):
+ """Return the format for the working tree object in controldir."""
+ format_string = klass.find_format_string(controldir)
+ return klass._find_format(format_registry, 'working tree',
+ format_string)
+
+ def check_support_status(self, allow_unsupported, recommend_upgrade=True,
+ basedir=None):
+ WorkingTreeFormat.check_support_status(self,
+ allow_unsupported=allow_unsupported, recommend_upgrade=recommend_upgrade,
+ basedir=basedir)
+ bzrdir.BzrFormat.check_support_status(self, allow_unsupported=allow_unsupported,
+ recommend_upgrade=recommend_upgrade, basedir=basedir)
+
+ def get_controldir_for_branch(self):
+ """Get the control directory format for creating branches.
+
+ This is to support testing of working tree formats that can not exist
+ in the same control directory as a branch.
+ """
+ return self._matchingbzrdir
+
+
+class WorkingTreeFormatMetaDir(bzrdir.BzrFormat, WorkingTreeFormat):
+ """Base class for working trees that live in bzr meta directories."""
+
+ def __init__(self):
+ WorkingTreeFormat.__init__(self)
+ bzrdir.BzrFormat.__init__(self)
+
+ @classmethod
+ def find_format_string(klass, controldir):
+ """Return format name for the working tree object in controldir."""
+ try:
+ transport = controldir.get_workingtree_transport(None)
+ return transport.get_bytes("format")
+ except errors.NoSuchFile:
+ raise errors.NoWorkingTree(base=transport.base)
+
+ @classmethod
+ def find_format(klass, controldir):
+ """Return the format for the working tree object in controldir."""
+ format_string = klass.find_format_string(controldir)
+ return klass._find_format(format_registry, 'working tree',
+ format_string)
+
+ def check_support_status(self, allow_unsupported, recommend_upgrade=True,
+ basedir=None):
+ WorkingTreeFormat.check_support_status(self,
+ allow_unsupported=allow_unsupported, recommend_upgrade=recommend_upgrade,
+ basedir=basedir)
+ bzrdir.BzrFormat.check_support_status(self, allow_unsupported=allow_unsupported,
+ recommend_upgrade=recommend_upgrade, basedir=basedir)
+
+
+format_registry.register_lazy("Bazaar Working Tree Format 4 (bzr 0.15)\n",
+ "bzrlib.workingtree_4", "WorkingTreeFormat4")
+format_registry.register_lazy("Bazaar Working Tree Format 5 (bzr 1.11)\n",
+ "bzrlib.workingtree_4", "WorkingTreeFormat5")
+format_registry.register_lazy("Bazaar Working Tree Format 6 (bzr 1.14)\n",
+ "bzrlib.workingtree_4", "WorkingTreeFormat6")
+format_registry.register_lazy("Bazaar-NG Working Tree format 3",
+ "bzrlib.workingtree_3", "WorkingTreeFormat3")
+format_registry.set_default_key("Bazaar Working Tree Format 6 (bzr 1.14)\n")
diff --git a/bzrlib/workingtree_3.py b/bzrlib/workingtree_3.py
new file mode 100644
index 0000000..bd3fe7b
--- /dev/null
+++ b/bzrlib/workingtree_3.py
@@ -0,0 +1,267 @@
+# Copyright (C) 2007-2011 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""WorkingTree3 format and implementation.
+
+"""
+
+from __future__ import absolute_import
+
+import errno
+
+from bzrlib import (
+ bzrdir,
+ errors,
+ hashcache,
+ inventory,
+ revision as _mod_revision,
+ trace,
+ transform,
+ )
+from bzrlib.decorators import (
+ needs_read_lock,
+ )
+from bzrlib.lockable_files import LockableFiles
+from bzrlib.lockdir import LockDir
+from bzrlib.mutabletree import MutableTree
+from bzrlib.transport.local import LocalTransport
+from bzrlib.workingtree import (
+ InventoryWorkingTree,
+ WorkingTreeFormatMetaDir,
+ )
+
+
+class PreDirStateWorkingTree(InventoryWorkingTree):
+
+ def __init__(self, basedir='.', *args, **kwargs):
+ super(PreDirStateWorkingTree, self).__init__(basedir, *args, **kwargs)
+ # update the whole cache up front and write to disk if anything changed;
+ # in the future we might want to do this more selectively
+ # two possible ways offer themselves : in self._unlock, write the cache
+ # if needed, or, when the cache sees a change, append it to the hash
+ # cache file, and have the parser take the most recent entry for a
+ # given path only.
+ wt_trans = self.bzrdir.get_workingtree_transport(None)
+ cache_filename = wt_trans.local_abspath('stat-cache')
+ self._hashcache = hashcache.HashCache(basedir, cache_filename,
+ self.bzrdir._get_file_mode(),
+ self._content_filter_stack_provider())
+ hc = self._hashcache
+ hc.read()
+ # is this scan needed ? it makes things kinda slow.
+ #hc.scan()
+
+ if hc.needs_write:
+ trace.mutter("write hc")
+ hc.write()
+
+ def _write_hashcache_if_dirty(self):
+ """Write out the hashcache if it is dirty."""
+ if self._hashcache.needs_write:
+ try:
+ self._hashcache.write()
+ except OSError, e:
+ if e.errno not in (errno.EPERM, errno.EACCES):
+ raise
+ # TODO: jam 20061219 Should this be a warning? A single line
+ # warning might be sufficient to let the user know what
+ # is going on.
+ trace.mutter('Could not write hashcache for %s\nError: %s',
+ self._hashcache.cache_file_name(), e)
+
+ @needs_read_lock
+ def get_file_sha1(self, file_id, path=None, stat_value=None):
+ if not path:
+ path = self._inventory.id2path(file_id)
+ return self._hashcache.get_sha1(path, stat_value)
+
+
+class WorkingTree3(PreDirStateWorkingTree):
+ """This is the Format 3 working tree.
+
+ This differs from the base WorkingTree by:
+ - having its own file lock
+ - having its own last-revision property.
+
+ This is new in bzr 0.8
+ """
+
+ @needs_read_lock
+ def _last_revision(self):
+ """See Mutable.last_revision."""
+ try:
+ return self._transport.get_bytes('last-revision')
+ except errors.NoSuchFile:
+ return _mod_revision.NULL_REVISION
+
+ def _change_last_revision(self, revision_id):
+ """See WorkingTree._change_last_revision."""
+ if revision_id is None or revision_id == _mod_revision.NULL_REVISION:
+ try:
+ self._transport.delete('last-revision')
+ except errors.NoSuchFile:
+ pass
+ return False
+ else:
+ self._transport.put_bytes('last-revision', revision_id,
+ mode=self.bzrdir._get_file_mode())
+ return True
+
+ def _get_check_refs(self):
+ """Return the references needed to perform a check of this tree."""
+ return [('trees', self.last_revision())]
+
+ def unlock(self):
+ if self._control_files._lock_count == 1:
+ # do non-implementation specific cleanup
+ self._cleanup()
+ # _inventory_is_modified is always False during a read lock.
+ if self._inventory_is_modified:
+ self.flush()
+ self._write_hashcache_if_dirty()
+ # reverse order of locking.
+ try:
+ return self._control_files.unlock()
+ finally:
+ self.branch.unlock()
+
+
+class WorkingTreeFormat3(WorkingTreeFormatMetaDir):
+ """The second working tree format updated to record a format marker.
+
+ This format:
+ - exists within a metadir controlling .bzr
+ - includes an explicit version marker for the workingtree control
+ files, separate from the ControlDir format
+ - modifies the hash cache format
+ - is new in bzr 0.8
+ - uses a LockDir to guard access for writes.
+ """
+
+ upgrade_recommended = True
+
+ missing_parent_conflicts = True
+
+ supports_versioned_directories = True
+
+ @classmethod
+ def get_format_string(cls):
+ """See WorkingTreeFormat.get_format_string()."""
+ return "Bazaar-NG Working Tree format 3"
+
+ def get_format_description(self):
+ """See WorkingTreeFormat.get_format_description()."""
+ return "Working tree format 3"
+
+ _tree_class = WorkingTree3
+
+ def __get_matchingbzrdir(self):
+ return bzrdir.BzrDirMetaFormat1()
+
+ _matchingbzrdir = property(__get_matchingbzrdir)
+
+ def _open_control_files(self, a_bzrdir):
+ transport = a_bzrdir.get_workingtree_transport(None)
+ return LockableFiles(transport, 'lock', LockDir)
+
+ def initialize(self, a_bzrdir, revision_id=None, from_branch=None,
+ accelerator_tree=None, hardlink=False):
+ """See WorkingTreeFormat.initialize().
+
+ :param revision_id: if supplied, create a working tree at a different
+ revision than the branch is at.
+ :param accelerator_tree: A tree which can be used for retrieving file
+ contents more quickly than the revision tree, i.e. a workingtree.
+ The revision tree will be used for cases where accelerator_tree's
+ content is different.
+ :param hardlink: If true, hard-link files from accelerator_tree,
+ where possible.
+ """
+ if not isinstance(a_bzrdir.transport, LocalTransport):
+ raise errors.NotLocalUrl(a_bzrdir.transport.base)
+ transport = a_bzrdir.get_workingtree_transport(self)
+ control_files = self._open_control_files(a_bzrdir)
+ control_files.create_lock()
+ control_files.lock_write()
+ transport.put_bytes('format', self.as_string(),
+ mode=a_bzrdir._get_file_mode())
+ if from_branch is not None:
+ branch = from_branch
+ else:
+ branch = a_bzrdir.open_branch()
+ if revision_id is None:
+ revision_id = _mod_revision.ensure_null(branch.last_revision())
+ # WorkingTree3 can handle an inventory which has a unique root id.
+ # as of bzr 0.12. However, bzr 0.11 and earlier fail to handle
+ # those trees. And because there isn't a format bump inbetween, we
+ # are maintaining compatibility with older clients.
+ # inv = Inventory(root_id=gen_root_id())
+ inv = self._initial_inventory()
+ wt = self._tree_class(a_bzrdir.root_transport.local_abspath('.'),
+ branch,
+ inv,
+ _internal=True,
+ _format=self,
+ _bzrdir=a_bzrdir,
+ _control_files=control_files)
+ wt.lock_tree_write()
+ try:
+ basis_tree = branch.repository.revision_tree(revision_id)
+ # only set an explicit root id if there is one to set.
+ if basis_tree.get_root_id() is not None:
+ wt.set_root_id(basis_tree.get_root_id())
+ if revision_id == _mod_revision.NULL_REVISION:
+ wt.set_parent_trees([])
+ else:
+ wt.set_parent_trees([(revision_id, basis_tree)])
+ transform.build_tree(basis_tree, wt)
+ for hook in MutableTree.hooks['post_build_tree']:
+ hook(wt)
+ finally:
+ # Unlock in this order so that the unlock-triggers-flush in
+ # WorkingTree is given a chance to fire.
+ control_files.unlock()
+ wt.unlock()
+ return wt
+
+ def _initial_inventory(self):
+ return inventory.Inventory()
+
+ def open(self, a_bzrdir, _found=False):
+ """Return the WorkingTree object for a_bzrdir
+
+ _found is a private parameter, do not use it. It is used to indicate
+ if format probing has already been done.
+ """
+ if not _found:
+ # we are being called directly and must probe.
+ raise NotImplementedError
+ if not isinstance(a_bzrdir.transport, LocalTransport):
+ raise errors.NotLocalUrl(a_bzrdir.transport.base)
+ wt = self._open(a_bzrdir, self._open_control_files(a_bzrdir))
+ return wt
+
+ def _open(self, a_bzrdir, control_files):
+ """Open the tree itself.
+
+ :param a_bzrdir: the dir for the tree.
+ :param control_files: the control files for the tree.
+ """
+ return self._tree_class(a_bzrdir.root_transport.local_abspath('.'),
+ _internal=True,
+ _format=self,
+ _bzrdir=a_bzrdir,
+ _control_files=control_files)
diff --git a/bzrlib/workingtree_4.py b/bzrlib/workingtree_4.py
new file mode 100644
index 0000000..6b68377
--- /dev/null
+++ b/bzrlib/workingtree_4.py
@@ -0,0 +1,2354 @@
+# Copyright (C) 2007-2011 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""WorkingTree4 format and implementation.
+
+WorkingTree4 provides the dirstate based working tree logic.
+
+To get a WorkingTree, call bzrdir.open_workingtree() or
+WorkingTree.open(dir).
+"""
+
+from __future__ import absolute_import
+
+from cStringIO import StringIO
+import os
+import sys
+
+from bzrlib.lazy_import import lazy_import
+lazy_import(globals(), """
+import errno
+import stat
+
+from bzrlib import (
+ bzrdir,
+ cache_utf8,
+ config,
+ conflicts as _mod_conflicts,
+ controldir,
+ debug,
+ dirstate,
+ errors,
+ filters as _mod_filters,
+ generate_ids,
+ osutils,
+ revision as _mod_revision,
+ revisiontree,
+ trace,
+ transform,
+ views,
+ )
+""")
+
+from bzrlib.decorators import needs_read_lock, needs_write_lock
+from bzrlib.inventory import Inventory, ROOT_ID, entry_factory
+from bzrlib.lock import LogicalLockResult
+from bzrlib.lockable_files import LockableFiles
+from bzrlib.lockdir import LockDir
+from bzrlib.mutabletree import (
+ MutableTree,
+ needs_tree_write_lock,
+ )
+from bzrlib.osutils import (
+ file_kind,
+ isdir,
+ pathjoin,
+ realpath,
+ safe_unicode,
+ )
+from bzrlib.symbol_versioning import (
+ deprecated_in,
+ deprecated_method,
+ )
+from bzrlib.transport.local import LocalTransport
+from bzrlib.tree import (
+ InterTree,
+ InventoryTree,
+ )
+from bzrlib.workingtree import (
+ InventoryWorkingTree,
+ WorkingTree,
+ WorkingTreeFormatMetaDir,
+ )
+
+
+class DirStateWorkingTree(InventoryWorkingTree):
+
+ def __init__(self, basedir,
+ branch,
+ _control_files=None,
+ _format=None,
+ _bzrdir=None):
+ """Construct a WorkingTree for basedir.
+
+ If the branch is not supplied, it is opened automatically.
+ If the branch is supplied, it must be the branch for this basedir.
+ (branch.base is not cross checked, because for remote branches that
+ would be meaningless).
+ """
+ self._format = _format
+ self.bzrdir = _bzrdir
+ basedir = safe_unicode(basedir)
+ trace.mutter("opening working tree %r", basedir)
+ self._branch = branch
+ self.basedir = realpath(basedir)
+ # if branch is at our basedir and is a format 6 or less
+ # assume all other formats have their own control files.
+ self._control_files = _control_files
+ self._transport = self._control_files._transport
+ self._dirty = None
+ #-------------
+ # during a read or write lock these objects are set, and are
+ # None the rest of the time.
+ self._dirstate = None
+ self._inventory = None
+ #-------------
+ self._setup_directory_is_tree_reference()
+ self._detect_case_handling()
+ self._rules_searcher = None
+ self.views = self._make_views()
+ #--- allow tests to select the dirstate iter_changes implementation
+ self._iter_changes = dirstate._process_entry
+
+ @needs_tree_write_lock
+ def _add(self, files, ids, kinds):
+ """See MutableTree._add."""
+ state = self.current_dirstate()
+ for f, file_id, kind in zip(files, ids, kinds):
+ f = f.strip('/')
+ if self.path2id(f):
+ # special case tree root handling.
+ if f == '' and self.path2id(f) == ROOT_ID:
+ state.set_path_id('', generate_ids.gen_file_id(f))
+ continue
+ if file_id is None:
+ file_id = generate_ids.gen_file_id(f)
+ # deliberately add the file with no cached stat or sha1
+ # - on the first access it will be gathered, and we can
+ # always change this once tests are all passing.
+ state.add(f, file_id, kind, None, '')
+ self._make_dirty(reset_inventory=True)
+
+ def _get_check_refs(self):
+ """Return the references needed to perform a check of this tree."""
+ return [('trees', self.last_revision())]
+
+ def _make_dirty(self, reset_inventory):
+ """Make the tree state dirty.
+
+ :param reset_inventory: True if the cached inventory should be removed
+ (presuming there is one).
+ """
+ self._dirty = True
+ if reset_inventory and self._inventory is not None:
+ self._inventory = None
+
+ @needs_tree_write_lock
+ def add_reference(self, sub_tree):
+ # use standard implementation, which calls back to self._add
+ #
+ # So we don't store the reference_revision in the working dirstate,
+ # it's just recorded at the moment of commit.
+ self._add_reference(sub_tree)
+
+ def break_lock(self):
+ """Break a lock if one is present from another instance.
+
+ Uses the ui factory to ask for confirmation if the lock may be from
+ an active process.
+
+ This will probe the repository for its lock as well.
+ """
+ # if the dirstate is locked by an active process, reject the break lock
+ # call.
+ try:
+ if self._dirstate is None:
+ clear = True
+ else:
+ clear = False
+ state = self._current_dirstate()
+ if state._lock_token is not None:
+ # we already have it locked. sheese, cant break our own lock.
+ raise errors.LockActive(self.basedir)
+ else:
+ try:
+ # try for a write lock - need permission to get one anyhow
+ # to break locks.
+ state.lock_write()
+ except errors.LockContention:
+ # oslocks fail when a process is still live: fail.
+ # TODO: get the locked lockdir info and give to the user to
+ # assist in debugging.
+ raise errors.LockActive(self.basedir)
+ else:
+ state.unlock()
+ finally:
+ if clear:
+ self._dirstate = None
+ self._control_files.break_lock()
+ self.branch.break_lock()
+
+ def _comparison_data(self, entry, path):
+ kind, executable, stat_value = \
+ WorkingTree._comparison_data(self, entry, path)
+ # it looks like a plain directory, but it's really a reference -- see
+ # also kind()
+ if (self._repo_supports_tree_reference and kind == 'directory'
+ and entry is not None and entry.kind == 'tree-reference'):
+ kind = 'tree-reference'
+ return kind, executable, stat_value
+
+ @needs_write_lock
+ def commit(self, message=None, revprops=None, *args, **kwargs):
+ # mark the tree as dirty post commit - commit
+ # can change the current versioned list by doing deletes.
+ result = WorkingTree.commit(self, message, revprops, *args, **kwargs)
+ self._make_dirty(reset_inventory=True)
+ return result
+
+ def current_dirstate(self):
+ """Return the current dirstate object.
+
+ This is not part of the tree interface and only exposed for ease of
+ testing.
+
+ :raises errors.NotWriteLocked: when not in a lock.
+ """
+ self._must_be_locked()
+ return self._current_dirstate()
+
+ def _current_dirstate(self):
+ """Internal function that does not check lock status.
+
+ This is needed for break_lock which also needs the dirstate.
+ """
+ if self._dirstate is not None:
+ return self._dirstate
+ local_path = self.bzrdir.get_workingtree_transport(None
+ ).local_abspath('dirstate')
+ self._dirstate = dirstate.DirState.on_file(local_path,
+ self._sha1_provider(), self._worth_saving_limit())
+ return self._dirstate
+
+ def _sha1_provider(self):
+ """A function that returns a SHA1Provider suitable for this tree.
+
+ :return: None if content filtering is not supported by this tree.
+ Otherwise, a SHA1Provider is returned that sha's the canonical
+ form of files, i.e. after read filters are applied.
+ """
+ if self.supports_content_filtering():
+ return ContentFilterAwareSHA1Provider(self)
+ else:
+ return None
+
+ def _worth_saving_limit(self):
+ """How many hash changes are ok before we must save the dirstate.
+
+ :return: an integer. -1 means never save.
+ """
+ conf = self.get_config_stack()
+ return conf.get('bzr.workingtree.worth_saving_limit')
+
+ def filter_unversioned_files(self, paths):
+ """Filter out paths that are versioned.
+
+ :return: set of paths.
+ """
+ # TODO: make a generic multi-bisect routine roughly that should list
+ # the paths, then process one half at a time recursively, and feed the
+ # results of each bisect in further still
+ paths = sorted(paths)
+ result = set()
+ state = self.current_dirstate()
+ # TODO we want a paths_to_dirblocks helper I think
+ for path in paths:
+ dirname, basename = os.path.split(path.encode('utf8'))
+ _, _, _, path_is_versioned = state._get_block_entry_index(
+ dirname, basename, 0)
+ if not path_is_versioned:
+ result.add(path)
+ return result
+
+ def flush(self):
+ """Write all cached data to disk."""
+ if self._control_files._lock_mode != 'w':
+ raise errors.NotWriteLocked(self)
+ self.current_dirstate().save()
+ self._inventory = None
+ self._dirty = False
+
+ @needs_tree_write_lock
+ def _gather_kinds(self, files, kinds):
+ """See MutableTree._gather_kinds."""
+ for pos, f in enumerate(files):
+ if kinds[pos] is None:
+ kinds[pos] = self._kind(f)
+
+ def _generate_inventory(self):
+ """Create and set self.inventory from the dirstate object.
+
+ This is relatively expensive: we have to walk the entire dirstate.
+ Ideally we would not, and can deprecate this function.
+ """
+ #: uncomment to trap on inventory requests.
+ # import pdb;pdb.set_trace()
+ state = self.current_dirstate()
+ state._read_dirblocks_if_needed()
+ root_key, current_entry = self._get_entry(path='')
+ current_id = root_key[2]
+ if not (current_entry[0][0] == 'd'): # directory
+ raise AssertionError(current_entry)
+ inv = Inventory(root_id=current_id)
+ # Turn some things into local variables
+ minikind_to_kind = dirstate.DirState._minikind_to_kind
+ factory = entry_factory
+ utf8_decode = cache_utf8._utf8_decode
+ inv_byid = inv._byid
+ # we could do this straight out of the dirstate; it might be fast
+ # and should be profiled - RBC 20070216
+ parent_ies = {'' : inv.root}
+ for block in state._dirblocks[1:]: # skip the root
+ dirname = block[0]
+ try:
+ parent_ie = parent_ies[dirname]
+ except KeyError:
+ # all the paths in this block are not versioned in this tree
+ continue
+ for key, entry in block[1]:
+ minikind, link_or_sha1, size, executable, stat = entry[0]
+ if minikind in ('a', 'r'): # absent, relocated
+ # a parent tree only entry
+ continue
+ name = key[1]
+ name_unicode = utf8_decode(name)[0]
+ file_id = key[2]
+ kind = minikind_to_kind[minikind]
+ inv_entry = factory[kind](file_id, name_unicode,
+ parent_ie.file_id)
+ if kind == 'file':
+ # This is only needed on win32, where this is the only way
+ # we know the executable bit.
+ inv_entry.executable = executable
+ # not strictly needed: working tree
+ #inv_entry.text_size = size
+ #inv_entry.text_sha1 = sha1
+ elif kind == 'directory':
+ # add this entry to the parent map.
+ parent_ies[(dirname + '/' + name).strip('/')] = inv_entry
+ elif kind == 'tree-reference':
+ if not self._repo_supports_tree_reference:
+ raise errors.UnsupportedOperation(
+ self._generate_inventory,
+ self.branch.repository)
+ inv_entry.reference_revision = link_or_sha1 or None
+ elif kind != 'symlink':
+ raise AssertionError("unknown kind %r" % kind)
+ # These checks cost us around 40ms on a 55k entry tree
+ if file_id in inv_byid:
+ raise AssertionError('file_id %s already in'
+ ' inventory as %s' % (file_id, inv_byid[file_id]))
+ if name_unicode in parent_ie.children:
+ raise AssertionError('name %r already in parent'
+ % (name_unicode,))
+ inv_byid[file_id] = inv_entry
+ parent_ie.children[name_unicode] = inv_entry
+ self._inventory = inv
+
+ def _get_entry(self, file_id=None, path=None):
+ """Get the dirstate row for file_id or path.
+
+ If either file_id or path is supplied, it is used as the key to lookup.
+ If both are supplied, the fastest lookup is used, and an error is
+ raised if they do not both point at the same row.
+
+ :param file_id: An optional unicode file_id to be looked up.
+ :param path: An optional unicode path to be looked up.
+ :return: The dirstate row tuple for path/file_id, or (None, None)
+ """
+ if file_id is None and path is None:
+ raise errors.BzrError('must supply file_id or path')
+ state = self.current_dirstate()
+ if path is not None:
+ path = path.encode('utf8')
+ return state._get_entry(0, fileid_utf8=file_id, path_utf8=path)
+
+ def get_file_sha1(self, file_id, path=None, stat_value=None):
+ # check file id is valid unconditionally.
+ entry = self._get_entry(file_id=file_id, path=path)
+ if entry[0] is None:
+ raise errors.NoSuchId(self, file_id)
+ if path is None:
+ path = pathjoin(entry[0][0], entry[0][1]).decode('utf8')
+
+ file_abspath = self.abspath(path)
+ state = self.current_dirstate()
+ if stat_value is None:
+ try:
+ stat_value = osutils.lstat(file_abspath)
+ except OSError, e:
+ if e.errno == errno.ENOENT:
+ return None
+ else:
+ raise
+ link_or_sha1 = dirstate.update_entry(state, entry, file_abspath,
+ stat_value=stat_value)
+ if entry[1][0][0] == 'f':
+ if link_or_sha1 is None:
+ file_obj, statvalue = self.get_file_with_stat(file_id, path)
+ try:
+ sha1 = osutils.sha_file(file_obj)
+ finally:
+ file_obj.close()
+ self._observed_sha1(file_id, path, (sha1, statvalue))
+ return sha1
+ else:
+ return link_or_sha1
+ return None
+
+ def _get_root_inventory(self):
+ """Get the inventory for the tree. This is only valid within a lock."""
+ if 'evil' in debug.debug_flags:
+ trace.mutter_callsite(2,
+ "accessing .inventory forces a size of tree translation.")
+ if self._inventory is not None:
+ return self._inventory
+ self._must_be_locked()
+ self._generate_inventory()
+ return self._inventory
+
+ @deprecated_method(deprecated_in((2, 5, 0)))
+ def _get_inventory(self):
+ return self.root_inventory
+
+ inventory = property(_get_inventory,
+ doc="Inventory of this Tree")
+
+ root_inventory = property(_get_root_inventory,
+ "Root inventory of this tree")
+
+ @needs_read_lock
+ def get_parent_ids(self):
+ """See Tree.get_parent_ids.
+
+ This implementation requests the ids list from the dirstate file.
+ """
+ return self.current_dirstate().get_parent_ids()
+
+ def get_reference_revision(self, file_id, path=None):
+ # referenced tree's revision is whatever's currently there
+ return self.get_nested_tree(file_id, path).last_revision()
+
+ def get_nested_tree(self, file_id, path=None):
+ if path is None:
+ path = self.id2path(file_id)
+ # else: check file_id is at path?
+ return WorkingTree.open(self.abspath(path))
+
+ @needs_read_lock
+ def get_root_id(self):
+ """Return the id of this trees root"""
+ return self._get_entry(path='')[0][2]
+
+ def has_id(self, file_id):
+ state = self.current_dirstate()
+ row, parents = self._get_entry(file_id=file_id)
+ if row is None:
+ return False
+ return osutils.lexists(pathjoin(
+ self.basedir, row[0].decode('utf8'), row[1].decode('utf8')))
+
+ def has_or_had_id(self, file_id):
+ state = self.current_dirstate()
+ row, parents = self._get_entry(file_id=file_id)
+ return row is not None
+
+ @needs_read_lock
+ def id2path(self, file_id):
+ "Convert a file-id to a path."
+ state = self.current_dirstate()
+ entry = self._get_entry(file_id=file_id)
+ if entry == (None, None):
+ raise errors.NoSuchId(tree=self, file_id=file_id)
+ path_utf8 = osutils.pathjoin(entry[0][0], entry[0][1])
+ return path_utf8.decode('utf8')
+
+ def _is_executable_from_path_and_stat_from_basis(self, path, stat_result):
+ entry = self._get_entry(path=path)
+ if entry == (None, None):
+ return False # Missing entries are not executable
+ return entry[1][0][3] # Executable?
+
+ def is_executable(self, file_id, path=None):
+ """Test if a file is executable or not.
+
+ Note: The caller is expected to take a read-lock before calling this.
+ """
+ if not self._supports_executable():
+ entry = self._get_entry(file_id=file_id, path=path)
+ if entry == (None, None):
+ return False
+ return entry[1][0][3]
+ else:
+ self._must_be_locked()
+ if not path:
+ path = self.id2path(file_id)
+ mode = osutils.lstat(self.abspath(path)).st_mode
+ return bool(stat.S_ISREG(mode) and stat.S_IEXEC & mode)
+
+ def all_file_ids(self):
+ """See Tree.iter_all_file_ids"""
+ self._must_be_locked()
+ result = set()
+ for key, tree_details in self.current_dirstate()._iter_entries():
+ if tree_details[0][0] in ('a', 'r'): # relocated
+ continue
+ result.add(key[2])
+ return result
+
+ @needs_read_lock
+ def __iter__(self):
+ """Iterate through file_ids for this tree.
+
+ file_ids are in a WorkingTree if they are in the working inventory
+ and the working file exists.
+ """
+ result = []
+ for key, tree_details in self.current_dirstate()._iter_entries():
+ if tree_details[0][0] in ('a', 'r'): # absent, relocated
+ # not relevant to the working tree
+ continue
+ path = pathjoin(self.basedir, key[0].decode('utf8'), key[1].decode('utf8'))
+ if osutils.lexists(path):
+ result.append(key[2])
+ return iter(result)
+
+ def iter_references(self):
+ if not self._repo_supports_tree_reference:
+ # When the repo doesn't support references, we will have nothing to
+ # return
+ return
+ for key, tree_details in self.current_dirstate()._iter_entries():
+ if tree_details[0][0] in ('a', 'r'): # absent, relocated
+ # not relevant to the working tree
+ continue
+ if not key[1]:
+ # the root is not a reference.
+ continue
+ relpath = pathjoin(key[0].decode('utf8'), key[1].decode('utf8'))
+ try:
+ if self._kind(relpath) == 'tree-reference':
+ yield relpath, key[2]
+ except errors.NoSuchFile:
+ # path is missing on disk.
+ continue
+
+ def _observed_sha1(self, file_id, path, (sha1, statvalue)):
+ """See MutableTree._observed_sha1."""
+ state = self.current_dirstate()
+ entry = self._get_entry(file_id=file_id, path=path)
+ state._observed_sha1(entry, sha1, statvalue)
+
+ def kind(self, file_id):
+ """Return the kind of a file.
+
+ This is always the actual kind that's on disk, regardless of what it
+ was added as.
+
+ Note: The caller is expected to take a read-lock before calling this.
+ """
+ relpath = self.id2path(file_id)
+ if relpath is None:
+ raise AssertionError(
+ "path for id {%s} is None!" % file_id)
+ return self._kind(relpath)
+
+ def _kind(self, relpath):
+ abspath = self.abspath(relpath)
+ kind = file_kind(abspath)
+ if (self._repo_supports_tree_reference and kind == 'directory'):
+ entry = self._get_entry(path=relpath)
+ if entry[1] is not None:
+ if entry[1][0][0] == 't':
+ kind = 'tree-reference'
+ return kind
+
+ @needs_read_lock
+ def _last_revision(self):
+ """See Mutable.last_revision."""
+ parent_ids = self.current_dirstate().get_parent_ids()
+ if parent_ids:
+ return parent_ids[0]
+ else:
+ return _mod_revision.NULL_REVISION
+
+ def lock_read(self):
+ """See Branch.lock_read, and WorkingTree.unlock.
+
+ :return: A bzrlib.lock.LogicalLockResult.
+ """
+ self.branch.lock_read()
+ try:
+ self._control_files.lock_read()
+ try:
+ state = self.current_dirstate()
+ if not state._lock_token:
+ state.lock_read()
+ # set our support for tree references from the repository in
+ # use.
+ self._repo_supports_tree_reference = getattr(
+ self.branch.repository._format, "supports_tree_reference",
+ False)
+ except:
+ self._control_files.unlock()
+ raise
+ except:
+ self.branch.unlock()
+ raise
+ return LogicalLockResult(self.unlock)
+
+ def _lock_self_write(self):
+ """This should be called after the branch is locked."""
+ try:
+ self._control_files.lock_write()
+ try:
+ state = self.current_dirstate()
+ if not state._lock_token:
+ state.lock_write()
+ # set our support for tree references from the repository in
+ # use.
+ self._repo_supports_tree_reference = getattr(
+ self.branch.repository._format, "supports_tree_reference",
+ False)
+ except:
+ self._control_files.unlock()
+ raise
+ except:
+ self.branch.unlock()
+ raise
+ return LogicalLockResult(self.unlock)
+
+ def lock_tree_write(self):
+ """See MutableTree.lock_tree_write, and WorkingTree.unlock.
+
+ :return: A bzrlib.lock.LogicalLockResult.
+ """
+ self.branch.lock_read()
+ return self._lock_self_write()
+
+ def lock_write(self):
+ """See MutableTree.lock_write, and WorkingTree.unlock.
+
+ :return: A bzrlib.lock.LogicalLockResult.
+ """
+ self.branch.lock_write()
+ return self._lock_self_write()
+
+ @needs_tree_write_lock
+ def move(self, from_paths, to_dir, after=False):
+ """See WorkingTree.move()."""
+ result = []
+ if not from_paths:
+ return result
+ state = self.current_dirstate()
+ if isinstance(from_paths, basestring):
+ raise ValueError()
+ to_dir_utf8 = to_dir.encode('utf8')
+ to_entry_dirname, to_basename = os.path.split(to_dir_utf8)
+ id_index = state._get_id_index()
+ # check destination directory
+ # get the details for it
+ to_entry_block_index, to_entry_entry_index, dir_present, entry_present = \
+ state._get_block_entry_index(to_entry_dirname, to_basename, 0)
+ if not entry_present:
+ raise errors.BzrMoveFailedError('', to_dir,
+ errors.NotVersionedError(to_dir))
+ to_entry = state._dirblocks[to_entry_block_index][1][to_entry_entry_index]
+ # get a handle on the block itself.
+ to_block_index = state._ensure_block(
+ to_entry_block_index, to_entry_entry_index, to_dir_utf8)
+ to_block = state._dirblocks[to_block_index]
+ to_abs = self.abspath(to_dir)
+ if not isdir(to_abs):
+ raise errors.BzrMoveFailedError('',to_dir,
+ errors.NotADirectory(to_abs))
+
+ if to_entry[1][0][0] != 'd':
+ raise errors.BzrMoveFailedError('',to_dir,
+ errors.NotADirectory(to_abs))
+
+ if self._inventory is not None:
+ update_inventory = True
+ inv = self.root_inventory
+ to_dir_id = to_entry[0][2]
+ to_dir_ie = inv[to_dir_id]
+ else:
+ update_inventory = False
+
+ rollbacks = []
+ def move_one(old_entry, from_path_utf8, minikind, executable,
+ fingerprint, packed_stat, size,
+ to_block, to_key, to_path_utf8):
+ state._make_absent(old_entry)
+ from_key = old_entry[0]
+ rollbacks.append(
+ lambda:state.update_minimal(from_key,
+ minikind,
+ executable=executable,
+ fingerprint=fingerprint,
+ packed_stat=packed_stat,
+ size=size,
+ path_utf8=from_path_utf8))
+ state.update_minimal(to_key,
+ minikind,
+ executable=executable,
+ fingerprint=fingerprint,
+ packed_stat=packed_stat,
+ size=size,
+ path_utf8=to_path_utf8)
+ added_entry_index, _ = state._find_entry_index(to_key, to_block[1])
+ new_entry = to_block[1][added_entry_index]
+ rollbacks.append(lambda:state._make_absent(new_entry))
+
+ for from_rel in from_paths:
+ # from_rel is 'pathinroot/foo/bar'
+ from_rel_utf8 = from_rel.encode('utf8')
+ from_dirname, from_tail = osutils.split(from_rel)
+ from_dirname, from_tail_utf8 = osutils.split(from_rel_utf8)
+ from_entry = self._get_entry(path=from_rel)
+ if from_entry == (None, None):
+ raise errors.BzrMoveFailedError(from_rel,to_dir,
+ errors.NotVersionedError(path=from_rel))
+
+ from_id = from_entry[0][2]
+ to_rel = pathjoin(to_dir, from_tail)
+ to_rel_utf8 = pathjoin(to_dir_utf8, from_tail_utf8)
+ item_to_entry = self._get_entry(path=to_rel)
+ if item_to_entry != (None, None):
+ raise errors.BzrMoveFailedError(from_rel, to_rel,
+ "Target is already versioned.")
+
+ if from_rel == to_rel:
+ raise errors.BzrMoveFailedError(from_rel, to_rel,
+ "Source and target are identical.")
+
+ from_missing = not self.has_filename(from_rel)
+ to_missing = not self.has_filename(to_rel)
+ if after:
+ move_file = False
+ else:
+ move_file = True
+ if to_missing:
+ if not move_file:
+ raise errors.BzrMoveFailedError(from_rel, to_rel,
+ errors.NoSuchFile(path=to_rel,
+ extra="New file has not been created yet"))
+ elif from_missing:
+ # neither path exists
+ raise errors.BzrRenameFailedError(from_rel, to_rel,
+ errors.PathsDoNotExist(paths=(from_rel, to_rel)))
+ else:
+ if from_missing: # implicitly just update our path mapping
+ move_file = False
+ elif not after:
+ raise errors.RenameFailedFilesExist(from_rel, to_rel)
+
+ rollbacks = []
+ def rollback_rename():
+ """A single rename has failed, roll it back."""
+ # roll back everything, even if we encounter trouble doing one
+ # of them.
+ #
+ # TODO: at least log the other exceptions rather than just
+ # losing them mbp 20070307
+ exc_info = None
+ for rollback in reversed(rollbacks):
+ try:
+ rollback()
+ except Exception, e:
+ exc_info = sys.exc_info()
+ if exc_info:
+ raise exc_info[0], exc_info[1], exc_info[2]
+
+ # perform the disk move first - its the most likely failure point.
+ if move_file:
+ from_rel_abs = self.abspath(from_rel)
+ to_rel_abs = self.abspath(to_rel)
+ try:
+ osutils.rename(from_rel_abs, to_rel_abs)
+ except OSError, e:
+ raise errors.BzrMoveFailedError(from_rel, to_rel, e[1])
+ rollbacks.append(lambda: osutils.rename(to_rel_abs, from_rel_abs))
+ try:
+ # perform the rename in the inventory next if needed: its easy
+ # to rollback
+ if update_inventory:
+ # rename the entry
+ from_entry = inv[from_id]
+ current_parent = from_entry.parent_id
+ inv.rename(from_id, to_dir_id, from_tail)
+ rollbacks.append(
+ lambda: inv.rename(from_id, current_parent, from_tail))
+ # finally do the rename in the dirstate, which is a little
+ # tricky to rollback, but least likely to need it.
+ old_block_index, old_entry_index, dir_present, file_present = \
+ state._get_block_entry_index(from_dirname, from_tail_utf8, 0)
+ old_block = state._dirblocks[old_block_index][1]
+ old_entry = old_block[old_entry_index]
+ from_key, old_entry_details = old_entry
+ cur_details = old_entry_details[0]
+ # remove the old row
+ to_key = ((to_block[0],) + from_key[1:3])
+ minikind = cur_details[0]
+ move_one(old_entry, from_path_utf8=from_rel_utf8,
+ minikind=minikind,
+ executable=cur_details[3],
+ fingerprint=cur_details[1],
+ packed_stat=cur_details[4],
+ size=cur_details[2],
+ to_block=to_block,
+ to_key=to_key,
+ to_path_utf8=to_rel_utf8)
+
+ if minikind == 'd':
+ def update_dirblock(from_dir, to_key, to_dir_utf8):
+ """Recursively update all entries in this dirblock."""
+ if from_dir == '':
+ raise AssertionError("renaming root not supported")
+ from_key = (from_dir, '')
+ from_block_idx, present = \
+ state._find_block_index_from_key(from_key)
+ if not present:
+ # This is the old record, if it isn't present, then
+ # there is theoretically nothing to update.
+ # (Unless it isn't present because of lazy loading,
+ # but we don't do that yet)
+ return
+ from_block = state._dirblocks[from_block_idx]
+ to_block_index, to_entry_index, _, _ = \
+ state._get_block_entry_index(to_key[0], to_key[1], 0)
+ to_block_index = state._ensure_block(
+ to_block_index, to_entry_index, to_dir_utf8)
+ to_block = state._dirblocks[to_block_index]
+
+ # Grab a copy since move_one may update the list.
+ for entry in from_block[1][:]:
+ if not (entry[0][0] == from_dir):
+ raise AssertionError()
+ cur_details = entry[1][0]
+ to_key = (to_dir_utf8, entry[0][1], entry[0][2])
+ from_path_utf8 = osutils.pathjoin(entry[0][0], entry[0][1])
+ to_path_utf8 = osutils.pathjoin(to_dir_utf8, entry[0][1])
+ minikind = cur_details[0]
+ if minikind in 'ar':
+ # Deleted children of a renamed directory
+ # Do not need to be updated.
+ # Children that have been renamed out of this
+ # directory should also not be updated
+ continue
+ move_one(entry, from_path_utf8=from_path_utf8,
+ minikind=minikind,
+ executable=cur_details[3],
+ fingerprint=cur_details[1],
+ packed_stat=cur_details[4],
+ size=cur_details[2],
+ to_block=to_block,
+ to_key=to_key,
+ to_path_utf8=to_path_utf8)
+ if minikind == 'd':
+ # We need to move all the children of this
+ # entry
+ update_dirblock(from_path_utf8, to_key,
+ to_path_utf8)
+ update_dirblock(from_rel_utf8, to_key, to_rel_utf8)
+ except:
+ rollback_rename()
+ raise
+ result.append((from_rel, to_rel))
+ state._mark_modified()
+ self._make_dirty(reset_inventory=False)
+
+ return result
+
+ def _must_be_locked(self):
+ if not self._control_files._lock_count:
+ raise errors.ObjectNotLocked(self)
+
+ def _new_tree(self):
+ """Initialize the state in this tree to be a new tree."""
+ self._dirty = True
+
+ @needs_read_lock
+ def path2id(self, path):
+ """Return the id for path in this tree."""
+ if isinstance(path, list):
+ if path == []:
+ path = [""]
+ path = osutils.pathjoin(*path)
+ path = path.strip('/')
+ entry = self._get_entry(path=path)
+ if entry == (None, None):
+ return None
+ return entry[0][2]
+
+ def paths2ids(self, paths, trees=[], require_versioned=True):
+ """See Tree.paths2ids().
+
+ This specialisation fast-paths the case where all the trees are in the
+ dirstate.
+ """
+ if paths is None:
+ return None
+ parents = self.get_parent_ids()
+ for tree in trees:
+ if not (isinstance(tree, DirStateRevisionTree) and tree._revision_id in
+ parents):
+ return super(DirStateWorkingTree, self).paths2ids(paths,
+ trees, require_versioned)
+ search_indexes = [0] + [1 + parents.index(tree._revision_id) for tree in trees]
+ # -- make all paths utf8 --
+ paths_utf8 = set()
+ for path in paths:
+ paths_utf8.add(path.encode('utf8'))
+ paths = paths_utf8
+ # -- paths is now a utf8 path set --
+ # -- get the state object and prepare it.
+ state = self.current_dirstate()
+ if False and (state._dirblock_state == dirstate.DirState.NOT_IN_MEMORY
+ and '' not in paths):
+ paths2ids = self._paths2ids_using_bisect
+ else:
+ paths2ids = self._paths2ids_in_memory
+ return paths2ids(paths, search_indexes,
+ require_versioned=require_versioned)
+
+ def _paths2ids_in_memory(self, paths, search_indexes,
+ require_versioned=True):
+ state = self.current_dirstate()
+ state._read_dirblocks_if_needed()
+ def _entries_for_path(path):
+ """Return a list with all the entries that match path for all ids.
+ """
+ dirname, basename = os.path.split(path)
+ key = (dirname, basename, '')
+ block_index, present = state._find_block_index_from_key(key)
+ if not present:
+ # the block which should contain path is absent.
+ return []
+ result = []
+ block = state._dirblocks[block_index][1]
+ entry_index, _ = state._find_entry_index(key, block)
+ # we may need to look at multiple entries at this path: walk while the paths match.
+ while (entry_index < len(block) and
+ block[entry_index][0][0:2] == key[0:2]):
+ result.append(block[entry_index])
+ entry_index += 1
+ return result
+ if require_versioned:
+ # -- check all supplied paths are versioned in a search tree. --
+ all_versioned = True
+ for path in paths:
+ path_entries = _entries_for_path(path)
+ if not path_entries:
+ # this specified path is not present at all: error
+ all_versioned = False
+ break
+ found_versioned = False
+ # for each id at this path
+ for entry in path_entries:
+ # for each tree.
+ for index in search_indexes:
+ if entry[1][index][0] != 'a': # absent
+ found_versioned = True
+ # all good: found a versioned cell
+ break
+ if not found_versioned:
+ # none of the indexes was not 'absent' at all ids for this
+ # path.
+ all_versioned = False
+ break
+ if not all_versioned:
+ raise errors.PathsNotVersionedError(
+ [p.decode('utf-8') for p in paths])
+ # -- remove redundancy in supplied paths to prevent over-scanning --
+ search_paths = osutils.minimum_path_selection(paths)
+ # sketch:
+ # for all search_indexs in each path at or under each element of
+ # search_paths, if the detail is relocated: add the id, and add the
+ # relocated path as one to search if its not searched already. If the
+ # detail is not relocated, add the id.
+ searched_paths = set()
+ found_ids = set()
+ def _process_entry(entry):
+ """Look at search_indexes within entry.
+
+ If a specific tree's details are relocated, add the relocation
+ target to search_paths if not searched already. If it is absent, do
+ nothing. Otherwise add the id to found_ids.
+ """
+ for index in search_indexes:
+ if entry[1][index][0] == 'r': # relocated
+ if not osutils.is_inside_any(searched_paths, entry[1][index][1]):
+ search_paths.add(entry[1][index][1])
+ elif entry[1][index][0] != 'a': # absent
+ found_ids.add(entry[0][2])
+ while search_paths:
+ current_root = search_paths.pop()
+ searched_paths.add(current_root)
+ # process the entries for this containing directory: the rest will be
+ # found by their parents recursively.
+ root_entries = _entries_for_path(current_root)
+ if not root_entries:
+ # this specified path is not present at all, skip it.
+ continue
+ for entry in root_entries:
+ _process_entry(entry)
+ initial_key = (current_root, '', '')
+ block_index, _ = state._find_block_index_from_key(initial_key)
+ while (block_index < len(state._dirblocks) and
+ osutils.is_inside(current_root, state._dirblocks[block_index][0])):
+ for entry in state._dirblocks[block_index][1]:
+ _process_entry(entry)
+ block_index += 1
+ return found_ids
+
+ def _paths2ids_using_bisect(self, paths, search_indexes,
+ require_versioned=True):
+ state = self.current_dirstate()
+ found_ids = set()
+
+ split_paths = sorted(osutils.split(p) for p in paths)
+ found = state._bisect_recursive(split_paths)
+
+ if require_versioned:
+ found_dir_names = set(dir_name_id[:2] for dir_name_id in found)
+ for dir_name in split_paths:
+ if dir_name not in found_dir_names:
+ raise errors.PathsNotVersionedError(
+ [p.decode('utf-8') for p in paths])
+
+ for dir_name_id, trees_info in found.iteritems():
+ for index in search_indexes:
+ if trees_info[index][0] not in ('r', 'a'):
+ found_ids.add(dir_name_id[2])
+ return found_ids
+
+ def read_working_inventory(self):
+ """Read the working inventory.
+
+ This is a meaningless operation for dirstate, but we obey it anyhow.
+ """
+ return self.root_inventory
+
+ @needs_read_lock
+ def revision_tree(self, revision_id):
+ """See Tree.revision_tree.
+
+ WorkingTree4 supplies revision_trees for any basis tree.
+ """
+ dirstate = self.current_dirstate()
+ parent_ids = dirstate.get_parent_ids()
+ if revision_id not in parent_ids:
+ raise errors.NoSuchRevisionInTree(self, revision_id)
+ if revision_id in dirstate.get_ghosts():
+ raise errors.NoSuchRevisionInTree(self, revision_id)
+ return DirStateRevisionTree(dirstate, revision_id,
+ self.branch.repository)
+
+ @needs_tree_write_lock
+ def set_last_revision(self, new_revision):
+ """Change the last revision in the working tree."""
+ parents = self.get_parent_ids()
+ if new_revision in (_mod_revision.NULL_REVISION, None):
+ if len(parents) >= 2:
+ raise AssertionError(
+ "setting the last parent to none with a pending merge is "
+ "unsupported.")
+ self.set_parent_ids([])
+ else:
+ self.set_parent_ids([new_revision] + parents[1:],
+ allow_leftmost_as_ghost=True)
+
+ @needs_tree_write_lock
+ def set_parent_ids(self, revision_ids, allow_leftmost_as_ghost=False):
+ """Set the parent ids to revision_ids.
+
+ See also set_parent_trees. This api will try to retrieve the tree data
+ for each element of revision_ids from the trees repository. If you have
+ tree data already available, it is more efficient to use
+ set_parent_trees rather than set_parent_ids. set_parent_ids is however
+ an easier API to use.
+
+ :param revision_ids: The revision_ids to set as the parent ids of this
+ working tree. Any of these may be ghosts.
+ """
+ trees = []
+ for revision_id in revision_ids:
+ try:
+ revtree = self.branch.repository.revision_tree(revision_id)
+ # TODO: jam 20070213 KnitVersionedFile raises
+ # RevisionNotPresent rather than NoSuchRevision if a
+ # given revision_id is not present. Should Repository be
+ # catching it and re-raising NoSuchRevision?
+ except (errors.NoSuchRevision, errors.RevisionNotPresent):
+ revtree = None
+ trees.append((revision_id, revtree))
+ self.set_parent_trees(trees,
+ allow_leftmost_as_ghost=allow_leftmost_as_ghost)
+
+ @needs_tree_write_lock
+ def set_parent_trees(self, parents_list, allow_leftmost_as_ghost=False):
+ """Set the parents of the working tree.
+
+ :param parents_list: A list of (revision_id, tree) tuples.
+ If tree is None, then that element is treated as an unreachable
+ parent tree - i.e. a ghost.
+ """
+ dirstate = self.current_dirstate()
+ if len(parents_list) > 0:
+ if not allow_leftmost_as_ghost and parents_list[0][1] is None:
+ raise errors.GhostRevisionUnusableHere(parents_list[0][0])
+ real_trees = []
+ ghosts = []
+
+ parent_ids = [rev_id for rev_id, tree in parents_list]
+ graph = self.branch.repository.get_graph()
+ heads = graph.heads(parent_ids)
+ accepted_revisions = set()
+
+ # convert absent trees to the null tree, which we convert back to
+ # missing on access.
+ for rev_id, tree in parents_list:
+ if len(accepted_revisions) > 0:
+ # we always accept the first tree
+ if rev_id in accepted_revisions or rev_id not in heads:
+ # We have already included either this tree, or its
+ # descendent, so we skip it.
+ continue
+ _mod_revision.check_not_reserved_id(rev_id)
+ if tree is not None:
+ real_trees.append((rev_id, tree))
+ else:
+ real_trees.append((rev_id,
+ self.branch.repository.revision_tree(
+ _mod_revision.NULL_REVISION)))
+ ghosts.append(rev_id)
+ accepted_revisions.add(rev_id)
+ updated = False
+ if (len(real_trees) == 1
+ and not ghosts
+ and self.branch.repository._format.fast_deltas
+ and isinstance(real_trees[0][1],
+ revisiontree.InventoryRevisionTree)
+ and self.get_parent_ids()):
+ rev_id, rev_tree = real_trees[0]
+ basis_id = self.get_parent_ids()[0]
+ # There are times when basis_tree won't be in
+ # self.branch.repository, (switch, for example)
+ try:
+ basis_tree = self.branch.repository.revision_tree(basis_id)
+ except errors.NoSuchRevision:
+ # Fall back to the set_parent_trees(), since we can't use
+ # _make_delta if we can't get the RevisionTree
+ pass
+ else:
+ delta = rev_tree.root_inventory._make_delta(
+ basis_tree.root_inventory)
+ dirstate.update_basis_by_delta(delta, rev_id)
+ updated = True
+ if not updated:
+ dirstate.set_parent_trees(real_trees, ghosts=ghosts)
+ self._make_dirty(reset_inventory=False)
+
+ def _set_root_id(self, file_id):
+ """See WorkingTree.set_root_id."""
+ state = self.current_dirstate()
+ state.set_path_id('', file_id)
+ if state._dirblock_state == dirstate.DirState.IN_MEMORY_MODIFIED:
+ self._make_dirty(reset_inventory=True)
+
+ def _sha_from_stat(self, path, stat_result):
+ """Get a sha digest from the tree's stat cache.
+
+ The default implementation assumes no stat cache is present.
+
+ :param path: The path.
+ :param stat_result: The stat result being looked up.
+ """
+ return self.current_dirstate().sha1_from_stat(path, stat_result)
+
+ @needs_read_lock
+ def supports_tree_reference(self):
+ return self._repo_supports_tree_reference
+
+ def unlock(self):
+ """Unlock in format 4 trees needs to write the entire dirstate."""
+ if self._control_files._lock_count == 1:
+ # do non-implementation specific cleanup
+ self._cleanup()
+
+ # eventually we should do signature checking during read locks for
+ # dirstate updates.
+ if self._control_files._lock_mode == 'w':
+ if self._dirty:
+ self.flush()
+ if self._dirstate is not None:
+ # This is a no-op if there are no modifications.
+ self._dirstate.save()
+ self._dirstate.unlock()
+ # TODO: jam 20070301 We shouldn't have to wipe the dirstate at this
+ # point. Instead, it could check if the header has been
+ # modified when it is locked, and if not, it can hang on to
+ # the data it has in memory.
+ self._dirstate = None
+ self._inventory = None
+ # reverse order of locking.
+ try:
+ return self._control_files.unlock()
+ finally:
+ self.branch.unlock()
+
+ @needs_tree_write_lock
+ def unversion(self, file_ids):
+ """Remove the file ids in file_ids from the current versioned set.
+
+ When a file_id is unversioned, all of its children are automatically
+ unversioned.
+
+ :param file_ids: The file ids to stop versioning.
+ :raises: NoSuchId if any fileid is not currently versioned.
+ """
+ if not file_ids:
+ return
+ state = self.current_dirstate()
+ state._read_dirblocks_if_needed()
+ ids_to_unversion = set(file_ids)
+ paths_to_unversion = set()
+ # sketch:
+ # check if the root is to be unversioned, if so, assert for now.
+ # walk the state marking unversioned things as absent.
+ # if there are any un-unversioned ids at the end, raise
+ for key, details in state._dirblocks[0][1]:
+ if (details[0][0] not in ('a', 'r') and # absent or relocated
+ key[2] in ids_to_unversion):
+ # I haven't written the code to unversion / yet - it should be
+ # supported.
+ raise errors.BzrError('Unversioning the / is not currently supported')
+ block_index = 0
+ while block_index < len(state._dirblocks):
+ # process one directory at a time.
+ block = state._dirblocks[block_index]
+ # first check: is the path one to remove - it or its children
+ delete_block = False
+ for path in paths_to_unversion:
+ if (block[0].startswith(path) and
+ (len(block[0]) == len(path) or
+ block[0][len(path)] == '/')):
+ # this entire block should be deleted - its the block for a
+ # path to unversion; or the child of one
+ delete_block = True
+ break
+ # TODO: trim paths_to_unversion as we pass by paths
+ if delete_block:
+ # this block is to be deleted: process it.
+ # TODO: we can special case the no-parents case and
+ # just forget the whole block.
+ entry_index = 0
+ while entry_index < len(block[1]):
+ entry = block[1][entry_index]
+ if entry[1][0][0] in 'ar':
+ # don't remove absent or renamed entries
+ entry_index += 1
+ else:
+ # Mark this file id as having been removed
+ ids_to_unversion.discard(entry[0][2])
+ if not state._make_absent(entry):
+ # The block has not shrunk.
+ entry_index += 1
+ # go to the next block. (At the moment we dont delete empty
+ # dirblocks)
+ block_index += 1
+ continue
+ entry_index = 0
+ while entry_index < len(block[1]):
+ entry = block[1][entry_index]
+ if (entry[1][0][0] in ('a', 'r') or # absent, relocated
+ # ^ some parent row.
+ entry[0][2] not in ids_to_unversion):
+ # ^ not an id to unversion
+ entry_index += 1
+ continue
+ if entry[1][0][0] == 'd':
+ paths_to_unversion.add(pathjoin(entry[0][0], entry[0][1]))
+ if not state._make_absent(entry):
+ entry_index += 1
+ # we have unversioned this id
+ ids_to_unversion.remove(entry[0][2])
+ block_index += 1
+ if ids_to_unversion:
+ raise errors.NoSuchId(self, iter(ids_to_unversion).next())
+ self._make_dirty(reset_inventory=False)
+ # have to change the legacy inventory too.
+ if self._inventory is not None:
+ for file_id in file_ids:
+ if self._inventory.has_id(file_id):
+ self._inventory.remove_recursive_id(file_id)
+
+ @needs_tree_write_lock
+ def rename_one(self, from_rel, to_rel, after=False):
+ """See WorkingTree.rename_one"""
+ self.flush()
+ super(DirStateWorkingTree, self).rename_one(from_rel, to_rel, after)
+
+ @needs_tree_write_lock
+ def apply_inventory_delta(self, changes):
+ """See MutableTree.apply_inventory_delta"""
+ state = self.current_dirstate()
+ state.update_by_delta(changes)
+ self._make_dirty(reset_inventory=True)
+
+ def update_basis_by_delta(self, new_revid, delta):
+ """See MutableTree.update_basis_by_delta."""
+ if self.last_revision() == new_revid:
+ raise AssertionError()
+ self.current_dirstate().update_basis_by_delta(delta, new_revid)
+
+ @needs_read_lock
+ def _validate(self):
+ self._dirstate._validate()
+
+ @needs_tree_write_lock
+ def _write_inventory(self, inv):
+ """Write inventory as the current inventory."""
+ if self._dirty:
+ raise AssertionError("attempting to write an inventory when the "
+ "dirstate is dirty will lose pending changes")
+ had_inventory = self._inventory is not None
+ # Setting self._inventory = None forces the dirstate to regenerate the
+ # working inventory. We do this because self.inventory may be inv, or
+ # may have been modified, and either case would prevent a clean delta
+ # being created.
+ self._inventory = None
+ # generate a delta,
+ delta = inv._make_delta(self.root_inventory)
+ # and apply it.
+ self.apply_inventory_delta(delta)
+ if had_inventory:
+ self._inventory = inv
+ self.flush()
+
+ @needs_tree_write_lock
+ def reset_state(self, revision_ids=None):
+ """Reset the state of the working tree.
+
+ This does a hard-reset to a last-known-good state. This is a way to
+ fix if something got corrupted (like the .bzr/checkout/dirstate file)
+ """
+ if revision_ids is None:
+ revision_ids = self.get_parent_ids()
+ if not revision_ids:
+ base_tree = self.branch.repository.revision_tree(
+ _mod_revision.NULL_REVISION)
+ trees = []
+ else:
+ trees = zip(revision_ids,
+ self.branch.repository.revision_trees(revision_ids))
+ base_tree = trees[0][1]
+ state = self.current_dirstate()
+ # We don't support ghosts yet
+ state.set_state_from_scratch(base_tree.root_inventory, trees, [])
+
+
+class ContentFilterAwareSHA1Provider(dirstate.SHA1Provider):
+
+ def __init__(self, tree):
+ self.tree = tree
+
+ def sha1(self, abspath):
+ """See dirstate.SHA1Provider.sha1()."""
+ filters = self.tree._content_filter_stack(
+ self.tree.relpath(osutils.safe_unicode(abspath)))
+ return _mod_filters.internal_size_sha_file_byname(abspath, filters)[1]
+
+ def stat_and_sha1(self, abspath):
+ """See dirstate.SHA1Provider.stat_and_sha1()."""
+ filters = self.tree._content_filter_stack(
+ self.tree.relpath(osutils.safe_unicode(abspath)))
+ file_obj = file(abspath, 'rb', 65000)
+ try:
+ statvalue = os.fstat(file_obj.fileno())
+ if filters:
+ file_obj = _mod_filters.filtered_input_file(file_obj, filters)
+ sha1 = osutils.size_sha_file(file_obj)[1]
+ finally:
+ file_obj.close()
+ return statvalue, sha1
+
+
+class ContentFilteringDirStateWorkingTree(DirStateWorkingTree):
+ """Dirstate working tree that supports content filtering.
+
+ The dirstate holds the hash and size of the canonical form of the file,
+ and most methods must return that.
+ """
+
+ def _file_content_summary(self, path, stat_result):
+ # This is to support the somewhat obsolete path_content_summary method
+ # with content filtering: see
+ # <https://bugs.launchpad.net/bzr/+bug/415508>.
+ #
+ # If the dirstate cache is up to date and knows the hash and size,
+ # return that.
+ # Otherwise if there are no content filters, return the on-disk size
+ # and leave the hash blank.
+ # Otherwise, read and filter the on-disk file and use its size and
+ # hash.
+ #
+ # The dirstate doesn't store the size of the canonical form so we
+ # can't trust it for content-filtered trees. We just return None.
+ dirstate_sha1 = self._dirstate.sha1_from_stat(path, stat_result)
+ executable = self._is_executable_from_path_and_stat(path, stat_result)
+ return ('file', None, executable, dirstate_sha1)
+
+
+class WorkingTree4(DirStateWorkingTree):
+ """This is the Format 4 working tree.
+
+ This differs from WorkingTree by:
+ - Having a consolidated internal dirstate, stored in a
+ randomly-accessible sorted file on disk.
+ - Not having a regular inventory attribute. One can be synthesized
+ on demand but this is expensive and should be avoided.
+
+ This is new in bzr 0.15.
+ """
+
+
+class WorkingTree5(ContentFilteringDirStateWorkingTree):
+ """This is the Format 5 working tree.
+
+ This differs from WorkingTree4 by:
+ - Supporting content filtering.
+
+ This is new in bzr 1.11.
+ """
+
+
+class WorkingTree6(ContentFilteringDirStateWorkingTree):
+ """This is the Format 6 working tree.
+
+ This differs from WorkingTree5 by:
+ - Supporting a current view that may mask the set of files in a tree
+ impacted by most user operations.
+
+ This is new in bzr 1.14.
+ """
+
+ def _make_views(self):
+ return views.PathBasedViews(self)
+
+
+class DirStateWorkingTreeFormat(WorkingTreeFormatMetaDir):
+
+ missing_parent_conflicts = True
+
+ supports_versioned_directories = True
+
+ _lock_class = LockDir
+ _lock_file_name = 'lock'
+
+ def _open_control_files(self, a_bzrdir):
+ transport = a_bzrdir.get_workingtree_transport(None)
+ return LockableFiles(transport, self._lock_file_name,
+ self._lock_class)
+
+ def initialize(self, a_bzrdir, revision_id=None, from_branch=None,
+ accelerator_tree=None, hardlink=False):
+ """See WorkingTreeFormat.initialize().
+
+ :param revision_id: allows creating a working tree at a different
+ revision than the branch is at.
+ :param accelerator_tree: A tree which can be used for retrieving file
+ contents more quickly than the revision tree, i.e. a workingtree.
+ The revision tree will be used for cases where accelerator_tree's
+ content is different.
+ :param hardlink: If true, hard-link files from accelerator_tree,
+ where possible.
+
+ These trees get an initial random root id, if their repository supports
+ rich root data, TREE_ROOT otherwise.
+ """
+ if not isinstance(a_bzrdir.transport, LocalTransport):
+ raise errors.NotLocalUrl(a_bzrdir.transport.base)
+ transport = a_bzrdir.get_workingtree_transport(self)
+ control_files = self._open_control_files(a_bzrdir)
+ control_files.create_lock()
+ control_files.lock_write()
+ transport.put_bytes('format', self.as_string(),
+ mode=a_bzrdir._get_file_mode())
+ if from_branch is not None:
+ branch = from_branch
+ else:
+ branch = a_bzrdir.open_branch()
+ if revision_id is None:
+ revision_id = branch.last_revision()
+ local_path = transport.local_abspath('dirstate')
+ # write out new dirstate (must exist when we create the tree)
+ state = dirstate.DirState.initialize(local_path)
+ state.unlock()
+ del state
+ wt = self._tree_class(a_bzrdir.root_transport.local_abspath('.'),
+ branch,
+ _format=self,
+ _bzrdir=a_bzrdir,
+ _control_files=control_files)
+ wt._new_tree()
+ wt.lock_tree_write()
+ try:
+ self._init_custom_control_files(wt)
+ if revision_id in (None, _mod_revision.NULL_REVISION):
+ if branch.repository.supports_rich_root():
+ wt._set_root_id(generate_ids.gen_root_id())
+ else:
+ wt._set_root_id(ROOT_ID)
+ wt.flush()
+ basis = None
+ # frequently, we will get here due to branching. The accelerator
+ # tree will be the tree from the branch, so the desired basis
+ # tree will often be a parent of the accelerator tree.
+ if accelerator_tree is not None:
+ try:
+ basis = accelerator_tree.revision_tree(revision_id)
+ except errors.NoSuchRevision:
+ pass
+ if basis is None:
+ basis = branch.repository.revision_tree(revision_id)
+ if revision_id == _mod_revision.NULL_REVISION:
+ parents_list = []
+ else:
+ parents_list = [(revision_id, basis)]
+ basis.lock_read()
+ try:
+ wt.set_parent_trees(parents_list, allow_leftmost_as_ghost=True)
+ wt.flush()
+ # if the basis has a root id we have to use that; otherwise we
+ # use a new random one
+ basis_root_id = basis.get_root_id()
+ if basis_root_id is not None:
+ wt._set_root_id(basis_root_id)
+ wt.flush()
+ if wt.supports_content_filtering():
+ # The original tree may not have the same content filters
+ # applied so we can't safely build the inventory delta from
+ # the source tree.
+ delta_from_tree = False
+ else:
+ delta_from_tree = True
+ # delta_from_tree is safe even for DirStateRevisionTrees,
+ # because wt4.apply_inventory_delta does not mutate the input
+ # inventory entries.
+ transform.build_tree(basis, wt, accelerator_tree,
+ hardlink=hardlink,
+ delta_from_tree=delta_from_tree)
+ for hook in MutableTree.hooks['post_build_tree']:
+ hook(wt)
+ finally:
+ basis.unlock()
+ finally:
+ control_files.unlock()
+ wt.unlock()
+ return wt
+
+ def _init_custom_control_files(self, wt):
+ """Subclasses with custom control files should override this method.
+
+ The working tree and control files are locked for writing when this
+ method is called.
+
+ :param wt: the WorkingTree object
+ """
+
+ def open(self, a_bzrdir, _found=False):
+ """Return the WorkingTree object for a_bzrdir
+
+ _found is a private parameter, do not use it. It is used to indicate
+ if format probing has already been done.
+ """
+ if not _found:
+ # we are being called directly and must probe.
+ raise NotImplementedError
+ if not isinstance(a_bzrdir.transport, LocalTransport):
+ raise errors.NotLocalUrl(a_bzrdir.transport.base)
+ wt = self._open(a_bzrdir, self._open_control_files(a_bzrdir))
+ return wt
+
+ def _open(self, a_bzrdir, control_files):
+ """Open the tree itself.
+
+ :param a_bzrdir: the dir for the tree.
+ :param control_files: the control files for the tree.
+ """
+ return self._tree_class(a_bzrdir.root_transport.local_abspath('.'),
+ branch=a_bzrdir.open_branch(),
+ _format=self,
+ _bzrdir=a_bzrdir,
+ _control_files=control_files)
+
+ def __get_matchingbzrdir(self):
+ return self._get_matchingbzrdir()
+
+ def _get_matchingbzrdir(self):
+ """Overrideable method to get a bzrdir for testing."""
+ # please test against something that will let us do tree references
+ return controldir.format_registry.make_bzrdir(
+ 'development-subtree')
+
+ _matchingbzrdir = property(__get_matchingbzrdir)
+
+
+class WorkingTreeFormat4(DirStateWorkingTreeFormat):
+ """The first consolidated dirstate working tree format.
+
+ This format:
+ - exists within a metadir controlling .bzr
+ - includes an explicit version marker for the workingtree control
+ files, separate from the ControlDir format
+ - modifies the hash cache format
+ - is new in bzr 0.15
+ - uses a LockDir to guard access to it.
+ """
+
+ upgrade_recommended = False
+
+ _tree_class = WorkingTree4
+
+ @classmethod
+ def get_format_string(cls):
+ """See WorkingTreeFormat.get_format_string()."""
+ return "Bazaar Working Tree Format 4 (bzr 0.15)\n"
+
+ def get_format_description(self):
+ """See WorkingTreeFormat.get_format_description()."""
+ return "Working tree format 4"
+
+
+class WorkingTreeFormat5(DirStateWorkingTreeFormat):
+ """WorkingTree format supporting content filtering.
+ """
+
+ upgrade_recommended = False
+
+ _tree_class = WorkingTree5
+
+ @classmethod
+ def get_format_string(cls):
+ """See WorkingTreeFormat.get_format_string()."""
+ return "Bazaar Working Tree Format 5 (bzr 1.11)\n"
+
+ def get_format_description(self):
+ """See WorkingTreeFormat.get_format_description()."""
+ return "Working tree format 5"
+
+ def supports_content_filtering(self):
+ return True
+
+
+class WorkingTreeFormat6(DirStateWorkingTreeFormat):
+ """WorkingTree format supporting views.
+ """
+
+ upgrade_recommended = False
+
+ _tree_class = WorkingTree6
+
+ @classmethod
+ def get_format_string(cls):
+ """See WorkingTreeFormat.get_format_string()."""
+ return "Bazaar Working Tree Format 6 (bzr 1.14)\n"
+
+ def get_format_description(self):
+ """See WorkingTreeFormat.get_format_description()."""
+ return "Working tree format 6"
+
+ def _init_custom_control_files(self, wt):
+ """Subclasses with custom control files should override this method."""
+ wt._transport.put_bytes('views', '', mode=wt.bzrdir._get_file_mode())
+
+ def supports_content_filtering(self):
+ return True
+
+ def supports_views(self):
+ return True
+
+
+class DirStateRevisionTree(InventoryTree):
+ """A revision tree pulling the inventory from a dirstate.
+
+ Note that this is one of the historical (ie revision) trees cached in the
+ dirstate for easy access, not the workingtree.
+ """
+
+ def __init__(self, dirstate, revision_id, repository):
+ self._dirstate = dirstate
+ self._revision_id = revision_id
+ self._repository = repository
+ self._inventory = None
+ self._locked = 0
+ self._dirstate_locked = False
+ self._repo_supports_tree_reference = getattr(
+ repository._format, "supports_tree_reference",
+ False)
+
+ def __repr__(self):
+ return "<%s of %s in %s>" % \
+ (self.__class__.__name__, self._revision_id, self._dirstate)
+
+ def annotate_iter(self, file_id,
+ default_revision=_mod_revision.CURRENT_REVISION):
+ """See Tree.annotate_iter"""
+ text_key = (file_id, self.get_file_revision(file_id))
+ annotations = self._repository.texts.annotate(text_key)
+ return [(key[-1], line) for (key, line) in annotations]
+
+ def _comparison_data(self, entry, path):
+ """See Tree._comparison_data."""
+ if entry is None:
+ return None, False, None
+ # trust the entry as RevisionTree does, but this may not be
+ # sensible: the entry might not have come from us?
+ return entry.kind, entry.executable, None
+
+ def _file_size(self, entry, stat_value):
+ return entry.text_size
+
+ def filter_unversioned_files(self, paths):
+ """Filter out paths that are not versioned.
+
+ :return: set of paths.
+ """
+ pred = self.has_filename
+ return set((p for p in paths if not pred(p)))
+
+ def get_root_id(self):
+ return self.path2id('')
+
+ def id2path(self, file_id):
+ "Convert a file-id to a path."
+ entry = self._get_entry(file_id=file_id)
+ if entry == (None, None):
+ raise errors.NoSuchId(tree=self, file_id=file_id)
+ path_utf8 = osutils.pathjoin(entry[0][0], entry[0][1])
+ return path_utf8.decode('utf8')
+
+ def iter_references(self):
+ if not self._repo_supports_tree_reference:
+ # When the repo doesn't support references, we will have nothing to
+ # return
+ return iter([])
+ # Otherwise, fall back to the default implementation
+ return super(DirStateRevisionTree, self).iter_references()
+
+ def _get_parent_index(self):
+ """Return the index in the dirstate referenced by this tree."""
+ return self._dirstate.get_parent_ids().index(self._revision_id) + 1
+
+ def _get_entry(self, file_id=None, path=None):
+ """Get the dirstate row for file_id or path.
+
+ If either file_id or path is supplied, it is used as the key to lookup.
+ If both are supplied, the fastest lookup is used, and an error is
+ raised if they do not both point at the same row.
+
+ :param file_id: An optional unicode file_id to be looked up.
+ :param path: An optional unicode path to be looked up.
+ :return: The dirstate row tuple for path/file_id, or (None, None)
+ """
+ if file_id is None and path is None:
+ raise errors.BzrError('must supply file_id or path')
+ if path is not None:
+ path = path.encode('utf8')
+ parent_index = self._get_parent_index()
+ return self._dirstate._get_entry(parent_index, fileid_utf8=file_id,
+ path_utf8=path)
+
+ def _generate_inventory(self):
+ """Create and set self.inventory from the dirstate object.
+
+ (So this is only called the first time the inventory is requested for
+ this tree; it then remains in memory until it's out of date.)
+
+ This is relatively expensive: we have to walk the entire dirstate.
+ """
+ if not self._locked:
+ raise AssertionError(
+ 'cannot generate inventory of an unlocked '
+ 'dirstate revision tree')
+ # separate call for profiling - makes it clear where the costs are.
+ self._dirstate._read_dirblocks_if_needed()
+ if self._revision_id not in self._dirstate.get_parent_ids():
+ raise AssertionError(
+ 'parent %s has disappeared from %s' % (
+ self._revision_id, self._dirstate.get_parent_ids()))
+ parent_index = self._dirstate.get_parent_ids().index(self._revision_id) + 1
+ # This is identical now to the WorkingTree _generate_inventory except
+ # for the tree index use.
+ root_key, current_entry = self._dirstate._get_entry(parent_index, path_utf8='')
+ current_id = root_key[2]
+ if current_entry[parent_index][0] != 'd':
+ raise AssertionError()
+ inv = Inventory(root_id=current_id, revision_id=self._revision_id)
+ inv.root.revision = current_entry[parent_index][4]
+ # Turn some things into local variables
+ minikind_to_kind = dirstate.DirState._minikind_to_kind
+ factory = entry_factory
+ utf8_decode = cache_utf8._utf8_decode
+ inv_byid = inv._byid
+ # we could do this straight out of the dirstate; it might be fast
+ # and should be profiled - RBC 20070216
+ parent_ies = {'' : inv.root}
+ for block in self._dirstate._dirblocks[1:]: #skip root
+ dirname = block[0]
+ try:
+ parent_ie = parent_ies[dirname]
+ except KeyError:
+ # all the paths in this block are not versioned in this tree
+ continue
+ for key, entry in block[1]:
+ minikind, fingerprint, size, executable, revid = entry[parent_index]
+ if minikind in ('a', 'r'): # absent, relocated
+ # not this tree
+ continue
+ name = key[1]
+ name_unicode = utf8_decode(name)[0]
+ file_id = key[2]
+ kind = minikind_to_kind[minikind]
+ inv_entry = factory[kind](file_id, name_unicode,
+ parent_ie.file_id)
+ inv_entry.revision = revid
+ if kind == 'file':
+ inv_entry.executable = executable
+ inv_entry.text_size = size
+ inv_entry.text_sha1 = fingerprint
+ elif kind == 'directory':
+ parent_ies[(dirname + '/' + name).strip('/')] = inv_entry
+ elif kind == 'symlink':
+ inv_entry.symlink_target = utf8_decode(fingerprint)[0]
+ elif kind == 'tree-reference':
+ inv_entry.reference_revision = fingerprint or None
+ else:
+ raise AssertionError("cannot convert entry %r into an InventoryEntry"
+ % entry)
+ # These checks cost us around 40ms on a 55k entry tree
+ if file_id in inv_byid:
+ raise AssertionError('file_id %s already in'
+ ' inventory as %s' % (file_id, inv_byid[file_id]))
+ if name_unicode in parent_ie.children:
+ raise AssertionError('name %r already in parent'
+ % (name_unicode,))
+ inv_byid[file_id] = inv_entry
+ parent_ie.children[name_unicode] = inv_entry
+ self._inventory = inv
+
+ def get_file_mtime(self, file_id, path=None):
+ """Return the modification time for this record.
+
+ We return the timestamp of the last-changed revision.
+ """
+ # Make sure the file exists
+ entry = self._get_entry(file_id, path=path)
+ if entry == (None, None): # do we raise?
+ raise errors.NoSuchId(self, file_id)
+ parent_index = self._get_parent_index()
+ last_changed_revision = entry[1][parent_index][4]
+ try:
+ rev = self._repository.get_revision(last_changed_revision)
+ except errors.NoSuchRevision:
+ raise errors.FileTimestampUnavailable(self.id2path(file_id))
+ return rev.timestamp
+
+ def get_file_sha1(self, file_id, path=None, stat_value=None):
+ entry = self._get_entry(file_id=file_id, path=path)
+ parent_index = self._get_parent_index()
+ parent_details = entry[1][parent_index]
+ if parent_details[0] == 'f':
+ return parent_details[1]
+ return None
+
+ @needs_read_lock
+ def get_file_revision(self, file_id):
+ inv, inv_file_id = self._unpack_file_id(file_id)
+ return inv[inv_file_id].revision
+
+ def get_file(self, file_id, path=None):
+ return StringIO(self.get_file_text(file_id))
+
+ def get_file_size(self, file_id):
+ """See Tree.get_file_size"""
+ inv, inv_file_id = self._unpack_file_id(file_id)
+ return inv[inv_file_id].text_size
+
+ def get_file_text(self, file_id, path=None):
+ _, content = list(self.iter_files_bytes([(file_id, None)]))[0]
+ return ''.join(content)
+
+ def get_reference_revision(self, file_id, path=None):
+ inv, inv_file_id = self._unpack_file_id(file_id)
+ return inv[inv_file_id].reference_revision
+
+ def iter_files_bytes(self, desired_files):
+ """See Tree.iter_files_bytes.
+
+ This version is implemented on top of Repository.iter_files_bytes"""
+ parent_index = self._get_parent_index()
+ repo_desired_files = []
+ for file_id, identifier in desired_files:
+ entry = self._get_entry(file_id)
+ if entry == (None, None):
+ raise errors.NoSuchId(self, file_id)
+ repo_desired_files.append((file_id, entry[1][parent_index][4],
+ identifier))
+ return self._repository.iter_files_bytes(repo_desired_files)
+
+ def get_symlink_target(self, file_id, path=None):
+ entry = self._get_entry(file_id=file_id)
+ parent_index = self._get_parent_index()
+ if entry[1][parent_index][0] != 'l':
+ return None
+ else:
+ target = entry[1][parent_index][1]
+ target = target.decode('utf8')
+ return target
+
+ def get_revision_id(self):
+ """Return the revision id for this tree."""
+ return self._revision_id
+
+ def _get_root_inventory(self):
+ if self._inventory is not None:
+ return self._inventory
+ self._must_be_locked()
+ self._generate_inventory()
+ return self._inventory
+
+ root_inventory = property(_get_root_inventory,
+ doc="Inventory of this Tree")
+
+ @deprecated_method(deprecated_in((2, 5, 0)))
+ def _get_inventory(self):
+ return self.root_inventory
+
+ inventory = property(_get_inventory,
+ doc="Inventory of this Tree")
+
+ def get_parent_ids(self):
+ """The parents of a tree in the dirstate are not cached."""
+ return self._repository.get_revision(self._revision_id).parent_ids
+
+ def has_filename(self, filename):
+ return bool(self.path2id(filename))
+
+ def kind(self, file_id):
+ entry = self._get_entry(file_id=file_id)[1]
+ if entry is None:
+ raise errors.NoSuchId(tree=self, file_id=file_id)
+ parent_index = self._get_parent_index()
+ return dirstate.DirState._minikind_to_kind[entry[parent_index][0]]
+
+ def stored_kind(self, file_id):
+ """See Tree.stored_kind"""
+ return self.kind(file_id)
+
+ def path_content_summary(self, path):
+ """See Tree.path_content_summary."""
+ inv, inv_file_id = self._path2inv_file_id(path)
+ if inv_file_id is None:
+ return ('missing', None, None, None)
+ entry = inv[inv_file_id]
+ kind = entry.kind
+ if kind == 'file':
+ return (kind, entry.text_size, entry.executable, entry.text_sha1)
+ elif kind == 'symlink':
+ return (kind, None, None, entry.symlink_target)
+ else:
+ return (kind, None, None, None)
+
+ def is_executable(self, file_id, path=None):
+ inv, inv_file_id = self._unpack_file_id(file_id)
+ ie = inv[inv_file_id]
+ if ie.kind != "file":
+ return False
+ return ie.executable
+
+ def is_locked(self):
+ return self._locked
+
+ def list_files(self, include_root=False, from_dir=None, recursive=True):
+ # We use a standard implementation, because DirStateRevisionTree is
+ # dealing with one of the parents of the current state
+ if from_dir is None:
+ inv = self.root_inventory
+ from_dir_id = None
+ else:
+ inv, from_dir_id = self._path2inv_file_id(from_dir)
+ if from_dir_id is None:
+ # Directory not versioned
+ return
+ # FIXME: Support nested trees
+ entries = inv.iter_entries(from_dir=from_dir_id, recursive=recursive)
+ if inv.root is not None and not include_root and from_dir is None:
+ entries.next()
+ for path, entry in entries:
+ yield path, 'V', entry.kind, entry.file_id, entry
+
+ def lock_read(self):
+ """Lock the tree for a set of operations.
+
+ :return: A bzrlib.lock.LogicalLockResult.
+ """
+ if not self._locked:
+ self._repository.lock_read()
+ if self._dirstate._lock_token is None:
+ self._dirstate.lock_read()
+ self._dirstate_locked = True
+ self._locked += 1
+ return LogicalLockResult(self.unlock)
+
+ def _must_be_locked(self):
+ if not self._locked:
+ raise errors.ObjectNotLocked(self)
+
+ @needs_read_lock
+ def path2id(self, path):
+ """Return the id for path in this tree."""
+ # lookup by path: faster than splitting and walking the ivnentory.
+ if isinstance(path, list):
+ if path == []:
+ path = [""]
+ path = osutils.pathjoin(*path)
+ entry = self._get_entry(path=path)
+ if entry == (None, None):
+ return None
+ return entry[0][2]
+
+ def unlock(self):
+ """Unlock, freeing any cache memory used during the lock."""
+ # outside of a lock, the inventory is suspect: release it.
+ self._locked -=1
+ if not self._locked:
+ self._inventory = None
+ self._locked = 0
+ if self._dirstate_locked:
+ self._dirstate.unlock()
+ self._dirstate_locked = False
+ self._repository.unlock()
+
+ @needs_read_lock
+ def supports_tree_reference(self):
+ return self._repo_supports_tree_reference
+
+ def walkdirs(self, prefix=""):
+ # TODO: jam 20070215 This is the lazy way by using the RevisionTree
+ # implementation based on an inventory.
+ # This should be cleaned up to use the much faster Dirstate code
+ # So for now, we just build up the parent inventory, and extract
+ # it the same way RevisionTree does.
+ _directory = 'directory'
+ inv = self._get_root_inventory()
+ top_id = inv.path2id(prefix)
+ if top_id is None:
+ pending = []
+ else:
+ pending = [(prefix, top_id)]
+ while pending:
+ dirblock = []
+ relpath, file_id = pending.pop()
+ # 0 - relpath, 1- file-id
+ if relpath:
+ relroot = relpath + '/'
+ else:
+ relroot = ""
+ # FIXME: stash the node in pending
+ entry = inv[file_id]
+ for name, child in entry.sorted_children():
+ toppath = relroot + name
+ dirblock.append((toppath, name, child.kind, None,
+ child.file_id, child.kind
+ ))
+ yield (relpath, entry.file_id), dirblock
+ # push the user specified dirs from dirblock
+ for dir in reversed(dirblock):
+ if dir[2] == _directory:
+ pending.append((dir[0], dir[4]))
+
+
+class InterDirStateTree(InterTree):
+ """Fast path optimiser for changes_from with dirstate trees.
+
+ This is used only when both trees are in the dirstate working file, and
+ the source is any parent within the dirstate, and the destination is
+ the current working tree of the same dirstate.
+ """
+ # this could be generalized to allow comparisons between any trees in the
+ # dirstate, and possibly between trees stored in different dirstates.
+
+ def __init__(self, source, target):
+ super(InterDirStateTree, self).__init__(source, target)
+ if not InterDirStateTree.is_compatible(source, target):
+ raise Exception, "invalid source %r and target %r" % (source, target)
+
+ @staticmethod
+ def make_source_parent_tree(source, target):
+ """Change the source tree into a parent of the target."""
+ revid = source.commit('record tree')
+ target.branch.fetch(source.branch, revid)
+ target.set_parent_ids([revid])
+ return target.basis_tree(), target
+
+ @classmethod
+ def make_source_parent_tree_python_dirstate(klass, test_case, source, target):
+ result = klass.make_source_parent_tree(source, target)
+ result[1]._iter_changes = dirstate.ProcessEntryPython
+ return result
+
+ @classmethod
+ def make_source_parent_tree_compiled_dirstate(klass, test_case, source,
+ target):
+ from bzrlib.tests.test__dirstate_helpers import \
+ compiled_dirstate_helpers_feature
+ test_case.requireFeature(compiled_dirstate_helpers_feature)
+ from bzrlib._dirstate_helpers_pyx import ProcessEntryC
+ result = klass.make_source_parent_tree(source, target)
+ result[1]._iter_changes = ProcessEntryC
+ return result
+
+ _matching_from_tree_format = WorkingTreeFormat4()
+ _matching_to_tree_format = WorkingTreeFormat4()
+
+ @classmethod
+ def _test_mutable_trees_to_test_trees(klass, test_case, source, target):
+ # This method shouldn't be called, because we have python and C
+ # specific flavours.
+ raise NotImplementedError
+
+ def iter_changes(self, include_unchanged=False,
+ specific_files=None, pb=None, extra_trees=[],
+ require_versioned=True, want_unversioned=False):
+ """Return the changes from source to target.
+
+ :return: An iterator that yields tuples. See InterTree.iter_changes
+ for details.
+ :param specific_files: An optional list of file paths to restrict the
+ comparison to. When mapping filenames to ids, all matches in all
+ trees (including optional extra_trees) are used, and all children of
+ matched directories are included.
+ :param include_unchanged: An optional boolean requesting the inclusion of
+ unchanged entries in the result.
+ :param extra_trees: An optional list of additional trees to use when
+ mapping the contents of specific_files (paths) to file_ids.
+ :param require_versioned: If True, all files in specific_files must be
+ versioned in one of source, target, extra_trees or
+ PathsNotVersionedError is raised.
+ :param want_unversioned: Should unversioned files be returned in the
+ output. An unversioned file is defined as one with (False, False)
+ for the versioned pair.
+ """
+ # TODO: handle extra trees in the dirstate.
+ if (extra_trees or specific_files == []):
+ # we can't fast-path these cases (yet)
+ return super(InterDirStateTree, self).iter_changes(
+ include_unchanged, specific_files, pb, extra_trees,
+ require_versioned, want_unversioned=want_unversioned)
+ parent_ids = self.target.get_parent_ids()
+ if not (self.source._revision_id in parent_ids
+ or self.source._revision_id == _mod_revision.NULL_REVISION):
+ raise AssertionError(
+ "revision {%s} is not stored in {%s}, but %s "
+ "can only be used for trees stored in the dirstate"
+ % (self.source._revision_id, self.target, self.iter_changes))
+ target_index = 0
+ if self.source._revision_id == _mod_revision.NULL_REVISION:
+ source_index = None
+ indices = (target_index,)
+ else:
+ if not (self.source._revision_id in parent_ids):
+ raise AssertionError(
+ "Failure: source._revision_id: %s not in target.parent_ids(%s)" % (
+ self.source._revision_id, parent_ids))
+ source_index = 1 + parent_ids.index(self.source._revision_id)
+ indices = (source_index, target_index)
+ # -- make all specific_files utf8 --
+ if specific_files:
+ specific_files_utf8 = set()
+ for path in specific_files:
+ # Note, if there are many specific files, using cache_utf8
+ # would be good here.
+ specific_files_utf8.add(path.encode('utf8'))
+ specific_files = specific_files_utf8
+ else:
+ specific_files = set([''])
+ # -- specific_files is now a utf8 path set --
+
+ # -- get the state object and prepare it.
+ state = self.target.current_dirstate()
+ state._read_dirblocks_if_needed()
+ if require_versioned:
+ # -- check all supplied paths are versioned in a search tree. --
+ not_versioned = []
+ for path in specific_files:
+ path_entries = state._entries_for_path(path)
+ if not path_entries:
+ # this specified path is not present at all: error
+ not_versioned.append(path.decode('utf-8'))
+ continue
+ found_versioned = False
+ # for each id at this path
+ for entry in path_entries:
+ # for each tree.
+ for index in indices:
+ if entry[1][index][0] != 'a': # absent
+ found_versioned = True
+ # all good: found a versioned cell
+ break
+ if not found_versioned:
+ # none of the indexes was not 'absent' at all ids for this
+ # path.
+ not_versioned.append(path.decode('utf-8'))
+ if len(not_versioned) > 0:
+ raise errors.PathsNotVersionedError(not_versioned)
+ # -- remove redundancy in supplied specific_files to prevent over-scanning --
+ search_specific_files = osutils.minimum_path_selection(specific_files)
+
+ use_filesystem_for_exec = (sys.platform != 'win32')
+ iter_changes = self.target._iter_changes(include_unchanged,
+ use_filesystem_for_exec, search_specific_files, state,
+ source_index, target_index, want_unversioned, self.target)
+ return iter_changes.iter_changes()
+
+ @staticmethod
+ def is_compatible(source, target):
+ # the target must be a dirstate working tree
+ if not isinstance(target, DirStateWorkingTree):
+ return False
+ # the source must be a revtree or dirstate rev tree.
+ if not isinstance(source,
+ (revisiontree.RevisionTree, DirStateRevisionTree)):
+ return False
+ # the source revid must be in the target dirstate
+ if not (source._revision_id == _mod_revision.NULL_REVISION or
+ source._revision_id in target.get_parent_ids()):
+ # TODO: what about ghosts? it may well need to
+ # check for them explicitly.
+ return False
+ return True
+
+InterTree.register_optimiser(InterDirStateTree)
+
+
+class Converter3to4(object):
+ """Perform an in-place upgrade of format 3 to format 4 trees."""
+
+ def __init__(self):
+ self.target_format = WorkingTreeFormat4()
+
+ def convert(self, tree):
+ # lock the control files not the tree, so that we dont get tree
+ # on-unlock behaviours, and so that noone else diddles with the
+ # tree during upgrade.
+ tree._control_files.lock_write()
+ try:
+ tree.read_working_inventory()
+ self.create_dirstate_data(tree)
+ self.update_format(tree)
+ self.remove_xml_files(tree)
+ finally:
+ tree._control_files.unlock()
+
+ def create_dirstate_data(self, tree):
+ """Create the dirstate based data for tree."""
+ local_path = tree.bzrdir.get_workingtree_transport(None
+ ).local_abspath('dirstate')
+ state = dirstate.DirState.from_tree(tree, local_path)
+ state.save()
+ state.unlock()
+
+ def remove_xml_files(self, tree):
+ """Remove the oldformat 3 data."""
+ transport = tree.bzrdir.get_workingtree_transport(None)
+ for path in ['basis-inventory-cache', 'inventory', 'last-revision',
+ 'pending-merges', 'stat-cache']:
+ try:
+ transport.delete(path)
+ except errors.NoSuchFile:
+ # some files are optional - just deal.
+ pass
+
+ def update_format(self, tree):
+ """Change the format marker."""
+ tree._transport.put_bytes('format',
+ self.target_format.as_string(),
+ mode=tree.bzrdir._get_file_mode())
+
+
+class Converter4to5(object):
+ """Perform an in-place upgrade of format 4 to format 5 trees."""
+
+ def __init__(self):
+ self.target_format = WorkingTreeFormat5()
+
+ def convert(self, tree):
+ # lock the control files not the tree, so that we don't get tree
+ # on-unlock behaviours, and so that no-one else diddles with the
+ # tree during upgrade.
+ tree._control_files.lock_write()
+ try:
+ self.update_format(tree)
+ finally:
+ tree._control_files.unlock()
+
+ def update_format(self, tree):
+ """Change the format marker."""
+ tree._transport.put_bytes('format',
+ self.target_format.as_string(),
+ mode=tree.bzrdir._get_file_mode())
+
+
+class Converter4or5to6(object):
+ """Perform an in-place upgrade of format 4 or 5 to format 6 trees."""
+
+ def __init__(self):
+ self.target_format = WorkingTreeFormat6()
+
+ def convert(self, tree):
+ # lock the control files not the tree, so that we don't get tree
+ # on-unlock behaviours, and so that no-one else diddles with the
+ # tree during upgrade.
+ tree._control_files.lock_write()
+ try:
+ self.init_custom_control_files(tree)
+ self.update_format(tree)
+ finally:
+ tree._control_files.unlock()
+
+ def init_custom_control_files(self, tree):
+ """Initialize custom control files."""
+ tree._transport.put_bytes('views', '',
+ mode=tree.bzrdir._get_file_mode())
+
+ def update_format(self, tree):
+ """Change the format marker."""
+ tree._transport.put_bytes('format',
+ self.target_format.as_string(),
+ mode=tree.bzrdir._get_file_mode())
diff --git a/bzrlib/xml5.py b/bzrlib/xml5.py
new file mode 100644
index 0000000..5d439f7
--- /dev/null
+++ b/bzrlib/xml5.py
@@ -0,0 +1,115 @@
+# Copyright (C) 2008, 2009, 2010 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+from __future__ import absolute_import
+
+from bzrlib import (
+ cache_utf8,
+ errors,
+ inventory,
+ xml6,
+ )
+from bzrlib.xml_serializer import (
+ encode_and_escape,
+ get_utf8_or_ascii,
+ unpack_inventory_entry,
+ )
+
+
+class Serializer_v5(xml6.Serializer_v6):
+ """Version 5 serializer
+
+ Packs objects into XML and vice versa.
+ """
+ format_num = '5'
+ root_id = inventory.ROOT_ID
+
+ def _unpack_inventory(self, elt, revision_id, entry_cache=None,
+ return_from_cache=False):
+ """Construct from XML Element
+ """
+ root_id = elt.get('file_id') or inventory.ROOT_ID
+ root_id = get_utf8_or_ascii(root_id)
+
+ format = elt.get('format')
+ if format is not None:
+ if format != '5':
+ raise errors.BzrError("invalid format version %r on inventory"
+ % format)
+ data_revision_id = elt.get('revision_id')
+ if data_revision_id is not None:
+ revision_id = cache_utf8.encode(data_revision_id)
+ inv = inventory.Inventory(root_id, revision_id=revision_id)
+ # Optimizations tested
+ # baseline w/entry cache 2.85s
+ # using inv._byid 2.55s
+ # avoiding attributes 2.46s
+ # adding assertions 2.50s
+ # last_parent cache 2.52s (worse, removed)
+ byid = inv._byid
+ for e in elt:
+ ie = unpack_inventory_entry(e, entry_cache=entry_cache,
+ return_from_cache=return_from_cache)
+ parent_id = ie.parent_id
+ if parent_id is None:
+ ie.parent_id = parent_id = root_id
+ try:
+ parent = byid[parent_id]
+ except KeyError:
+ raise errors.BzrError("parent_id {%s} not in inventory"
+ % (parent_id,))
+ if ie.file_id in byid:
+ raise errors.DuplicateFileId(ie.file_id,
+ byid[ie.file_id])
+ if ie.name in parent.children:
+ raise errors.BzrError("%s is already versioned"
+ % (osutils.pathjoin(inv.id2path(parent_id),
+ ie.name).encode('utf-8'),))
+ parent.children[ie.name] = ie
+ byid[ie.file_id] = ie
+ if revision_id is not None:
+ inv.root.revision = revision_id
+ self._check_cache_size(len(inv), entry_cache)
+ return inv
+
+ def _check_revisions(self, inv):
+ """Extension point for subclasses to check during serialisation.
+
+ In this version, no checking is done.
+
+ :param inv: An inventory about to be serialised, to be checked.
+ :raises: AssertionError if an error has occurred.
+ """
+
+ def _append_inventory_root(self, append, inv):
+ """Append the inventory root to output."""
+ if inv.root.file_id not in (None, inventory.ROOT_ID):
+ fileid1 = ' file_id="'
+ fileid2 = encode_and_escape(inv.root.file_id)
+ else:
+ fileid1 = ""
+ fileid2 = ""
+ if inv.revision_id is not None:
+ revid1 = ' revision_id="'
+ revid2 = encode_and_escape(inv.revision_id)
+ else:
+ revid1 = ""
+ revid2 = ""
+ append('<inventory%s%s format="5"%s%s>\n' % (
+ fileid1, fileid2, revid1, revid2))
+
+
+serializer_v5 = Serializer_v5()
diff --git a/bzrlib/xml6.py b/bzrlib/xml6.py
new file mode 100644
index 0000000..7b3d8e4
--- /dev/null
+++ b/bzrlib/xml6.py
@@ -0,0 +1,35 @@
+# Copyright (C) 2008 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+from __future__ import absolute_import
+
+from bzrlib import xml8
+
+
+class Serializer_v6(xml8.Serializer_v8):
+ """This serialiser supports rich roots.
+
+ While its inventory format number is 6, its revision format is 5.
+ Its inventory_sha1 may be inaccurate-- the inventory may have been
+ converted from format 5 or 7 without updating the sha1.
+ """
+
+ format_num = '6'
+ # Format 6 & 7 reported their revision format as 5.
+ revision_format_num = '5'
+
+
+serializer_v6 = Serializer_v6()
diff --git a/bzrlib/xml7.py b/bzrlib/xml7.py
new file mode 100644
index 0000000..715dab9
--- /dev/null
+++ b/bzrlib/xml7.py
@@ -0,0 +1,34 @@
+# Copyright (C) 2006-2010 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+from __future__ import absolute_import
+
+from bzrlib import (
+ inventory,
+ xml6,
+ )
+
+
+class Serializer_v7(xml6.Serializer_v6):
+ """A Serializer that supports tree references"""
+
+ # this format is used by BzrBranch6
+
+ supported_kinds = set(['file', 'directory', 'symlink', 'tree-reference'])
+ format_num = '7'
+
+
+serializer_v7 = Serializer_v7()
diff --git a/bzrlib/xml8.py b/bzrlib/xml8.py
new file mode 100644
index 0000000..fc29e2d
--- /dev/null
+++ b/bzrlib/xml8.py
@@ -0,0 +1,371 @@
+# Copyright (C) 2005-2010 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+from __future__ import absolute_import
+
+import cStringIO
+
+from bzrlib import (
+ cache_utf8,
+ lazy_regex,
+ revision as _mod_revision,
+ trace,
+ )
+from bzrlib.xml_serializer import (
+ Element,
+ SubElement,
+ XMLSerializer,
+ encode_and_escape,
+ escape_invalid_chars,
+ get_utf8_or_ascii,
+ serialize_inventory_flat,
+ unpack_inventory_entry,
+ unpack_inventory_flat,
+ )
+from bzrlib.revision import Revision
+from bzrlib.errors import BzrError
+
+
+_xml_unescape_map = {
+ 'apos':"'",
+ 'quot':'"',
+ 'amp':'&',
+ 'lt':'<',
+ 'gt':'>'
+}
+
+
+def _unescaper(match, _map=_xml_unescape_map):
+ code = match.group(1)
+ try:
+ return _map[code]
+ except KeyError:
+ if not code.startswith('#'):
+ raise
+ return unichr(int(code[1:])).encode('utf8')
+
+
+_unescape_re = lazy_regex.lazy_compile('\&([^;]*);')
+
+def _unescape_xml(data):
+ """Unescape predefined XML entities in a string of data."""
+ return _unescape_re.sub(_unescaper, data)
+
+
+class Serializer_v8(XMLSerializer):
+ """This serialiser adds rich roots.
+
+ Its revision format number matches its inventory number.
+ """
+
+ __slots__ = []
+
+ root_id = None
+ support_altered_by_hack = True
+ # This format supports the altered-by hack that reads file ids directly out
+ # of the versionedfile, without doing XML parsing.
+
+ supported_kinds = set(['file', 'directory', 'symlink'])
+ format_num = '8'
+ revision_format_num = None
+
+ # The search regex used by xml based repositories to determine what things
+ # where changed in a single commit.
+ _file_ids_altered_regex = lazy_regex.lazy_compile(
+ r'file_id="(?P<file_id>[^"]+)"'
+ r'.* revision="(?P<revision_id>[^"]+)"'
+ )
+
+ def _check_revisions(self, inv):
+ """Extension point for subclasses to check during serialisation.
+
+ :param inv: An inventory about to be serialised, to be checked.
+ :raises: AssertionError if an error has occurred.
+ """
+ if inv.revision_id is None:
+ raise AssertionError("inv.revision_id is None")
+ if inv.root.revision is None:
+ raise AssertionError("inv.root.revision is None")
+
+ def _check_cache_size(self, inv_size, entry_cache):
+ """Check that the entry_cache is large enough.
+
+ We want the cache to be ~2x the size of an inventory. The reason is
+ because we use a FIFO cache, and how Inventory records are likely to
+ change. In general, you have a small number of records which change
+ often, and a lot of records which do not change at all. So when the
+ cache gets full, you actually flush out a lot of the records you are
+ interested in, which means you need to recreate all of those records.
+ An LRU Cache would be better, but the overhead negates the cache
+ coherency benefit.
+
+ One way to look at it, only the size of the cache > len(inv) is your
+ 'working' set. And in general, it shouldn't be a problem to hold 2
+ inventories in memory anyway.
+
+ :param inv_size: The number of entries in an inventory.
+ """
+ if entry_cache is None:
+ return
+ # 1.5 times might also be reasonable.
+ recommended_min_cache_size = inv_size * 1.5
+ if entry_cache.cache_size() < recommended_min_cache_size:
+ recommended_cache_size = inv_size * 2
+ trace.mutter('Resizing the inventory entry cache from %d to %d',
+ entry_cache.cache_size(), recommended_cache_size)
+ entry_cache.resize(recommended_cache_size)
+
+ def write_inventory_to_lines(self, inv):
+ """Return a list of lines with the encoded inventory."""
+ return self.write_inventory(inv, None)
+
+ def write_inventory_to_string(self, inv, working=False):
+ """Just call write_inventory with a StringIO and return the value.
+
+ :param working: If True skip history data - text_sha1, text_size,
+ reference_revision, symlink_target.
+ """
+ sio = cStringIO.StringIO()
+ self.write_inventory(inv, sio, working)
+ return sio.getvalue()
+
+ def write_inventory(self, inv, f, working=False):
+ """Write inventory to a file.
+
+ :param inv: the inventory to write.
+ :param f: the file to write. (May be None if the lines are the desired
+ output).
+ :param working: If True skip history data - text_sha1, text_size,
+ reference_revision, symlink_target.
+ :return: The inventory as a list of lines.
+ """
+ output = []
+ append = output.append
+ self._append_inventory_root(append, inv)
+ serialize_inventory_flat(inv, append,
+ self.root_id, self.supported_kinds, working)
+ if f is not None:
+ f.writelines(output)
+ # Just to keep the cache from growing without bounds
+ # but we may actually not want to do clear the cache
+ #_clear_cache()
+ return output
+
+ def _append_inventory_root(self, append, inv):
+ """Append the inventory root to output."""
+ if inv.revision_id is not None:
+ revid1 = ' revision_id="'
+ revid2 = encode_and_escape(inv.revision_id)
+ else:
+ revid1 = ""
+ revid2 = ""
+ append('<inventory format="%s"%s%s>\n' % (
+ self.format_num, revid1, revid2))
+ append('<directory file_id="%s name="%s revision="%s />\n' % (
+ encode_and_escape(inv.root.file_id),
+ encode_and_escape(inv.root.name),
+ encode_and_escape(inv.root.revision)))
+
+ def _pack_revision(self, rev):
+ """Revision object -> xml tree"""
+ # For the XML format, we need to write them as Unicode rather than as
+ # utf-8 strings. So that cElementTree can handle properly escaping
+ # them.
+ decode_utf8 = cache_utf8.decode
+ revision_id = rev.revision_id
+ if isinstance(revision_id, str):
+ revision_id = decode_utf8(revision_id)
+ format_num = self.format_num
+ if self.revision_format_num is not None:
+ format_num = self.revision_format_num
+ root = Element('revision',
+ committer = rev.committer,
+ timestamp = '%.3f' % rev.timestamp,
+ revision_id = revision_id,
+ inventory_sha1 = rev.inventory_sha1,
+ format=format_num,
+ )
+ if rev.timezone is not None:
+ root.set('timezone', str(rev.timezone))
+ root.text = '\n'
+ msg = SubElement(root, 'message')
+ msg.text = escape_invalid_chars(rev.message)[0]
+ msg.tail = '\n'
+ if rev.parent_ids:
+ pelts = SubElement(root, 'parents')
+ pelts.tail = pelts.text = '\n'
+ for parent_id in rev.parent_ids:
+ _mod_revision.check_not_reserved_id(parent_id)
+ p = SubElement(pelts, 'revision_ref')
+ p.tail = '\n'
+ if isinstance(parent_id, str):
+ parent_id = decode_utf8(parent_id)
+ p.set('revision_id', parent_id)
+ if rev.properties:
+ self._pack_revision_properties(rev, root)
+ return root
+
+ def _pack_revision_properties(self, rev, under_element):
+ top_elt = SubElement(under_element, 'properties')
+ for prop_name, prop_value in sorted(rev.properties.items()):
+ prop_elt = SubElement(top_elt, 'property')
+ prop_elt.set('name', prop_name)
+ prop_elt.text = prop_value
+ prop_elt.tail = '\n'
+ top_elt.tail = '\n'
+
+ def _unpack_entry(self, elt, entry_cache=None, return_from_cache=False):
+ # This is here because it's overridden by xml7
+ return unpack_inventory_entry(elt, entry_cache,
+ return_from_cache)
+
+ def _unpack_inventory(self, elt, revision_id=None, entry_cache=None,
+ return_from_cache=False):
+ """Construct from XML Element"""
+ inv = unpack_inventory_flat(elt, self.format_num, self._unpack_entry,
+ entry_cache, return_from_cache)
+ self._check_cache_size(len(inv), entry_cache)
+ return inv
+
+ def _unpack_revision(self, elt):
+ """XML Element -> Revision object"""
+ format = elt.get('format')
+ format_num = self.format_num
+ if self.revision_format_num is not None:
+ format_num = self.revision_format_num
+ if format is not None:
+ if format != format_num:
+ raise BzrError("invalid format version %r on revision"
+ % format)
+ get_cached = get_utf8_or_ascii
+ rev = Revision(committer = elt.get('committer'),
+ timestamp = float(elt.get('timestamp')),
+ revision_id = get_cached(elt.get('revision_id')),
+ inventory_sha1 = elt.get('inventory_sha1')
+ )
+ parents = elt.find('parents') or []
+ for p in parents:
+ rev.parent_ids.append(get_cached(p.get('revision_id')))
+ self._unpack_revision_properties(elt, rev)
+ v = elt.get('timezone')
+ if v is None:
+ rev.timezone = 0
+ else:
+ rev.timezone = int(v)
+ rev.message = elt.findtext('message') # text of <message>
+ return rev
+
+ def _unpack_revision_properties(self, elt, rev):
+ """Unpack properties onto a revision."""
+ props_elt = elt.find('properties')
+ if not props_elt:
+ return
+ for prop_elt in props_elt:
+ if prop_elt.tag != 'property':
+ raise AssertionError(
+ "bad tag under properties list: %r" % prop_elt.tag)
+ name = prop_elt.get('name')
+ value = prop_elt.text
+ # If a property had an empty value ('') cElementTree reads
+ # that back as None, convert it back to '', so that all
+ # properties have string values
+ if value is None:
+ value = ''
+ if name in rev.properties:
+ raise AssertionError("repeated property %r" % name)
+ rev.properties[name] = value
+
+ def _find_text_key_references(self, line_iterator):
+ """Core routine for extracting references to texts from inventories.
+
+ This performs the translation of xml lines to revision ids.
+
+ :param line_iterator: An iterator of lines, origin_version_id
+ :return: A dictionary mapping text keys ((fileid, revision_id) tuples)
+ to whether they were referred to by the inventory of the
+ revision_id that they contain. Note that if that revision_id was
+ not part of the line_iterator's output then False will be given -
+ even though it may actually refer to that key.
+ """
+ if not self.support_altered_by_hack:
+ raise AssertionError(
+ "_find_text_key_references only "
+ "supported for branches which store inventory as unnested xml"
+ ", not on %r" % self)
+ result = {}
+
+ # this code needs to read every new line in every inventory for the
+ # inventories [revision_ids]. Seeing a line twice is ok. Seeing a line
+ # not present in one of those inventories is unnecessary but not
+ # harmful because we are filtering by the revision id marker in the
+ # inventory lines : we only select file ids altered in one of those
+ # revisions. We don't need to see all lines in the inventory because
+ # only those added in an inventory in rev X can contain a revision=X
+ # line.
+ unescape_revid_cache = {}
+ unescape_fileid_cache = {}
+
+ # jam 20061218 In a big fetch, this handles hundreds of thousands
+ # of lines, so it has had a lot of inlining and optimizing done.
+ # Sorry that it is a little bit messy.
+ # Move several functions to be local variables, since this is a long
+ # running loop.
+ search = self._file_ids_altered_regex.search
+ unescape = _unescape_xml
+ setdefault = result.setdefault
+ for line, line_key in line_iterator:
+ match = search(line)
+ if match is None:
+ continue
+ # One call to match.group() returning multiple items is quite a
+ # bit faster than 2 calls to match.group() each returning 1
+ file_id, revision_id = match.group('file_id', 'revision_id')
+
+ # Inlining the cache lookups helps a lot when you make 170,000
+ # lines and 350k ids, versus 8.4 unique ids.
+ # Using a cache helps in 2 ways:
+ # 1) Avoids unnecessary decoding calls
+ # 2) Re-uses cached strings, which helps in future set and
+ # equality checks.
+ # (2) is enough that removing encoding entirely along with
+ # the cache (so we are using plain strings) results in no
+ # performance improvement.
+ try:
+ revision_id = unescape_revid_cache[revision_id]
+ except KeyError:
+ unescaped = unescape(revision_id)
+ unescape_revid_cache[revision_id] = unescaped
+ revision_id = unescaped
+
+ # Note that unconditionally unescaping means that we deserialise
+ # every fileid, which for general 'pull' is not great, but we don't
+ # really want to have some many fulltexts that this matters anyway.
+ # RBC 20071114.
+ try:
+ file_id = unescape_fileid_cache[file_id]
+ except KeyError:
+ unescaped = unescape(file_id)
+ unescape_fileid_cache[file_id] = unescaped
+ file_id = unescaped
+
+ key = (file_id, revision_id)
+ setdefault(key, False)
+ if revision_id == line_key[-1]:
+ result[key] = True
+ return result
+
+
+serializer_v8 = Serializer_v8()
diff --git a/bzrlib/xml_serializer.py b/bzrlib/xml_serializer.py
new file mode 100644
index 0000000..6cb6556
--- /dev/null
+++ b/bzrlib/xml_serializer.py
@@ -0,0 +1,439 @@
+# Copyright (C) 2005-2010 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""XML externalization support."""
+
+from __future__ import absolute_import
+
+# "XML is like violence: if it doesn't solve your problem, you aren't
+# using enough of it." -- various
+
+# importing this module is fairly slow because it has to load several
+# ElementTree bits
+
+import re
+
+try:
+ import xml.etree.cElementTree as elementtree
+ ParseError = getattr(elementtree, "ParseError", SyntaxError)
+except ImportError:
+ # Fall back to pure python implementation if C extension is unavailable
+ import xml.etree.ElementTree as elementtree
+ try:
+ from xml.etree.ElementTree import ParseError
+ except ImportError:
+ from xml.parsers.expat import ExpatError as ParseError
+
+(ElementTree, SubElement, Element, XMLTreeBuilder, fromstring, tostring) = (
+ elementtree.ElementTree, elementtree.SubElement, elementtree.Element,
+ elementtree.XMLTreeBuilder, elementtree.fromstring, elementtree.tostring)
+
+
+from bzrlib import (
+ cache_utf8,
+ errors,
+ inventory,
+ lazy_regex,
+ serializer,
+ )
+
+
+class XMLSerializer(serializer.Serializer):
+ """Abstract XML object serialize/deserialize"""
+
+ squashes_xml_invalid_characters = True
+
+ def read_inventory_from_string(self, xml_string, revision_id=None,
+ entry_cache=None, return_from_cache=False):
+ """Read xml_string into an inventory object.
+
+ :param xml_string: The xml to read.
+ :param revision_id: If not-None, the expected revision id of the
+ inventory. Some serialisers use this to set the results' root
+ revision. This should be supplied for deserialising all
+ from-repository inventories so that xml5 inventories that were
+ serialised without a revision identifier can be given the right
+ revision id (but not for working tree inventories where users can
+ edit the data without triggering checksum errors or anything).
+ :param entry_cache: An optional cache of InventoryEntry objects. If
+ supplied we will look up entries via (file_id, revision_id) which
+ should map to a valid InventoryEntry (File/Directory/etc) object.
+ :param return_from_cache: Return entries directly from the cache,
+ rather than copying them first. This is only safe if the caller
+ promises not to mutate the returned inventory entries, but it can
+ make some operations significantly faster.
+ """
+ try:
+ return self._unpack_inventory(fromstring(xml_string), revision_id,
+ entry_cache=entry_cache,
+ return_from_cache=return_from_cache)
+ except ParseError, e:
+ raise errors.UnexpectedInventoryFormat(e)
+
+ def read_inventory(self, f, revision_id=None):
+ try:
+ try:
+ return self._unpack_inventory(self._read_element(f),
+ revision_id=None)
+ finally:
+ f.close()
+ except ParseError, e:
+ raise errors.UnexpectedInventoryFormat(e)
+
+ def write_revision(self, rev, f):
+ self._write_element(self._pack_revision(rev), f)
+
+ def write_revision_to_string(self, rev):
+ return tostring(self._pack_revision(rev)) + '\n'
+
+ def read_revision(self, f):
+ return self._unpack_revision(self._read_element(f))
+
+ def read_revision_from_string(self, xml_string):
+ return self._unpack_revision(fromstring(xml_string))
+
+ def _write_element(self, elt, f):
+ ElementTree(elt).write(f, 'utf-8')
+ f.write('\n')
+
+ def _read_element(self, f):
+ return ElementTree().parse(f)
+
+
+def escape_invalid_chars(message):
+ """Escape the XML-invalid characters in a commit message.
+
+ :param message: Commit message to escape
+ :return: tuple with escaped message and number of characters escaped
+ """
+ if message is None:
+ return None, 0
+ # Python strings can include characters that can't be
+ # represented in well-formed XML; escape characters that
+ # aren't listed in the XML specification
+ # (http://www.w3.org/TR/REC-xml/#NT-Char).
+ return re.subn(u'[^\x09\x0A\x0D\u0020-\uD7FF\uE000-\uFFFD]+',
+ lambda match: match.group(0).encode('unicode_escape'),
+ message)
+
+
+def get_utf8_or_ascii(a_str, _encode_utf8=cache_utf8.encode):
+ """Return a cached version of the string.
+
+ cElementTree will return a plain string if the XML is plain ascii. It only
+ returns Unicode when it needs to. We want to work in utf-8 strings. So if
+ cElementTree returns a plain string, we can just return the cached version.
+ If it is Unicode, then we need to encode it.
+
+ :param a_str: An 8-bit string or Unicode as returned by
+ cElementTree.Element.get()
+ :return: A utf-8 encoded 8-bit string.
+ """
+ # This is fairly optimized because we know what cElementTree does, this is
+ # not meant as a generic function for all cases. Because it is possible for
+ # an 8-bit string to not be ascii or valid utf8.
+ if a_str.__class__ is unicode:
+ return _encode_utf8(a_str)
+ else:
+ return intern(a_str)
+
+
+_utf8_re = lazy_regex.lazy_compile('[&<>\'\"]|[\x80-\xff]+')
+_unicode_re = lazy_regex.lazy_compile(u'[&<>\'\"\u0080-\uffff]')
+
+
+_xml_escape_map = {
+ "&":'&amp;',
+ "'":"&apos;", # FIXME: overkill
+ "\"":"&quot;",
+ "<":"&lt;",
+ ">":"&gt;",
+ }
+
+
+def _unicode_escape_replace(match, _map=_xml_escape_map):
+ """Replace a string of non-ascii, non XML safe characters with their escape
+
+ This will escape both Standard XML escapes, like <>"', etc.
+ As well as escaping non ascii characters, because ElementTree did.
+ This helps us remain compatible to older versions of bzr. We may change
+ our policy in the future, though.
+ """
+ # jam 20060816 Benchmarks show that try/KeyError is faster if you
+ # expect the entity to rarely miss. There is about a 10% difference
+ # in overall time. But if you miss frequently, then if None is much
+ # faster. For our use case, we *rarely* have a revision id, file id
+ # or path name that is unicode. So use try/KeyError.
+ try:
+ return _map[match.group()]
+ except KeyError:
+ return "&#%d;" % ord(match.group())
+
+
+def _utf8_escape_replace(match, _map=_xml_escape_map):
+ """Escape utf8 characters into XML safe ones.
+
+ This uses 2 tricks. It is either escaping "standard" characters, like "&<>,
+ or it is handling characters with the high-bit set. For ascii characters,
+ we just lookup the replacement in the dictionary. For everything else, we
+ decode back into Unicode, and then use the XML escape code.
+ """
+ try:
+ return _map[match.group()]
+ except KeyError:
+ return ''.join('&#%d;' % ord(uni_chr)
+ for uni_chr in match.group().decode('utf8'))
+
+
+_to_escaped_map = {}
+
+def encode_and_escape(unicode_or_utf8_str, _map=_to_escaped_map):
+ """Encode the string into utf8, and escape invalid XML characters"""
+ # We frequently get entities we have not seen before, so it is better
+ # to check if None, rather than try/KeyError
+ text = _map.get(unicode_or_utf8_str)
+ if text is None:
+ if unicode_or_utf8_str.__class__ is unicode:
+ # The alternative policy is to do a regular UTF8 encoding
+ # and then escape only XML meta characters.
+ # Performance is equivalent once you use cache_utf8. *However*
+ # this makes the serialized texts incompatible with old versions
+ # of bzr. So no net gain. (Perhaps the read code would handle utf8
+ # better than entity escapes, but cElementTree seems to do just fine
+ # either way)
+ text = str(_unicode_re.sub(_unicode_escape_replace,
+ unicode_or_utf8_str)) + '"'
+ else:
+ # Plain strings are considered to already be in utf-8 so we do a
+ # slightly different method for escaping.
+ text = _utf8_re.sub(_utf8_escape_replace,
+ unicode_or_utf8_str) + '"'
+ _map[unicode_or_utf8_str] = text
+ return text
+
+
+def _clear_cache():
+ """Clean out the unicode => escaped map"""
+ _to_escaped_map.clear()
+
+
+def unpack_inventory_entry(elt, entry_cache=None, return_from_cache=False):
+ elt_get = elt.get
+ file_id = elt_get('file_id')
+ revision = elt_get('revision')
+ # Check and see if we have already unpacked this exact entry
+ # Some timings for "repo.revision_trees(last_100_revs)"
+ # bzr mysql
+ # unmodified 4.1s 40.8s
+ # using lru 3.5s
+ # using fifo 2.83s 29.1s
+ # lru._cache 2.8s
+ # dict 2.75s 26.8s
+ # inv.add 2.5s 26.0s
+ # no_copy 2.00s 20.5s
+ # no_c,dict 1.95s 18.0s
+ # Note that a cache of 10k nodes is more than sufficient to hold all of
+ # the inventory for the last 100 revs for bzr, but not for mysql (20k
+ # is enough for mysql, which saves the same 2s as using a dict)
+
+ # Breakdown of mysql using time.clock()
+ # 4.1s 2 calls to element.get for file_id, revision_id
+ # 4.5s cache_hit lookup
+ # 7.1s InventoryFile.copy()
+ # 2.4s InventoryDirectory.copy()
+ # 0.4s decoding unique entries
+ # 1.6s decoding entries after FIFO fills up
+ # 0.8s Adding nodes to FIFO (including flushes)
+ # 0.1s cache miss lookups
+ # Using an LRU cache
+ # 4.1s 2 calls to element.get for file_id, revision_id
+ # 9.9s cache_hit lookup
+ # 10.8s InventoryEntry.copy()
+ # 0.3s cache miss lookus
+ # 1.2s decoding entries
+ # 1.0s adding nodes to LRU
+ if entry_cache is not None and revision is not None:
+ key = (file_id, revision)
+ try:
+ # We copy it, because some operations may mutate it
+ cached_ie = entry_cache[key]
+ except KeyError:
+ pass
+ else:
+ # Only copying directory entries drops us 2.85s => 2.35s
+ if return_from_cache:
+ if cached_ie.kind == 'directory':
+ return cached_ie.copy()
+ return cached_ie
+ return cached_ie.copy()
+
+ kind = elt.tag
+ if not inventory.InventoryEntry.versionable_kind(kind):
+ raise AssertionError('unsupported entry kind %s' % kind)
+
+ file_id = get_utf8_or_ascii(file_id)
+ if revision is not None:
+ revision = get_utf8_or_ascii(revision)
+ parent_id = elt_get('parent_id')
+ if parent_id is not None:
+ parent_id = get_utf8_or_ascii(parent_id)
+
+ if kind == 'directory':
+ ie = inventory.InventoryDirectory(file_id,
+ elt_get('name'),
+ parent_id)
+ elif kind == 'file':
+ ie = inventory.InventoryFile(file_id,
+ elt_get('name'),
+ parent_id)
+ ie.text_sha1 = elt_get('text_sha1')
+ if elt_get('executable') == 'yes':
+ ie.executable = True
+ v = elt_get('text_size')
+ ie.text_size = v and int(v)
+ elif kind == 'symlink':
+ ie = inventory.InventoryLink(file_id,
+ elt_get('name'),
+ parent_id)
+ ie.symlink_target = elt_get('symlink_target')
+ elif kind == 'tree-reference':
+ file_id = elt.attrib['file_id']
+ name = elt.attrib['name']
+ parent_id = elt.attrib['parent_id']
+ revision = elt.get('revision')
+ reference_revision = elt.get('reference_revision')
+ ie = inventory.TreeReference(file_id, name, parent_id, revision,
+ reference_revision)
+ else:
+ raise errors.UnsupportedInventoryKind(kind)
+ ie.revision = revision
+ if revision is not None and entry_cache is not None:
+ # We cache a copy() because callers like to mutate objects, and
+ # that would cause the item in cache to mutate as well.
+ # This has a small effect on many-inventory performance, because
+ # the majority fraction is spent in cache hits, not misses.
+ entry_cache[key] = ie.copy()
+
+ return ie
+
+
+def unpack_inventory_flat(elt, format_num, unpack_entry,
+ entry_cache=None, return_from_cache=False):
+ """Unpack a flat XML inventory.
+
+ :param elt: XML element for the inventory
+ :param format_num: Expected format number
+ :param unpack_entry: Function for unpacking inventory entries
+ :return: An inventory
+ :raise UnexpectedInventoryFormat: When unexpected elements or data is
+ encountered
+ """
+ if elt.tag != 'inventory':
+ raise errors.UnexpectedInventoryFormat('Root tag is %r' % elt.tag)
+ format = elt.get('format')
+ if format != format_num:
+ raise errors.UnexpectedInventoryFormat('Invalid format version %r'
+ % format)
+ revision_id = elt.get('revision_id')
+ if revision_id is not None:
+ revision_id = cache_utf8.encode(revision_id)
+ inv = inventory.Inventory(root_id=None, revision_id=revision_id)
+ for e in elt:
+ ie = unpack_entry(e, entry_cache, return_from_cache)
+ inv.add(ie)
+ return inv
+
+
+def serialize_inventory_flat(inv, append, root_id, supported_kinds, working):
+ """Serialize an inventory to a flat XML file.
+
+ :param inv: Inventory to serialize
+ :param append: Function for writing a line of output
+ :param working: If True skip history data - text_sha1, text_size,
+ reference_revision, symlink_target. self._check_revisions(inv)
+ """
+ entries = inv.iter_entries()
+ # Skip the root
+ root_path, root_ie = entries.next()
+ for path, ie in entries:
+ if ie.parent_id != root_id:
+ parent_str = ' parent_id="'
+ parent_id = encode_and_escape(ie.parent_id)
+ else:
+ parent_str = ''
+ parent_id = ''
+ if ie.kind == 'file':
+ if ie.executable:
+ executable = ' executable="yes"'
+ else:
+ executable = ''
+ if not working:
+ append('<file%s file_id="%s name="%s%s%s revision="%s '
+ 'text_sha1="%s" text_size="%d" />\n' % (
+ executable, encode_and_escape(ie.file_id),
+ encode_and_escape(ie.name), parent_str, parent_id,
+ encode_and_escape(ie.revision), ie.text_sha1,
+ ie.text_size))
+ else:
+ append('<file%s file_id="%s name="%s%s%s />\n' % (
+ executable, encode_and_escape(ie.file_id),
+ encode_and_escape(ie.name), parent_str, parent_id))
+ elif ie.kind == 'directory':
+ if not working:
+ append('<directory file_id="%s name="%s%s%s revision="%s '
+ '/>\n' % (
+ encode_and_escape(ie.file_id),
+ encode_and_escape(ie.name),
+ parent_str, parent_id,
+ encode_and_escape(ie.revision)))
+ else:
+ append('<directory file_id="%s name="%s%s%s />\n' % (
+ encode_and_escape(ie.file_id),
+ encode_and_escape(ie.name),
+ parent_str, parent_id))
+ elif ie.kind == 'symlink':
+ if not working:
+ append('<symlink file_id="%s name="%s%s%s revision="%s '
+ 'symlink_target="%s />\n' % (
+ encode_and_escape(ie.file_id),
+ encode_and_escape(ie.name),
+ parent_str, parent_id,
+ encode_and_escape(ie.revision),
+ encode_and_escape(ie.symlink_target)))
+ else:
+ append('<symlink file_id="%s name="%s%s%s />\n' % (
+ encode_and_escape(ie.file_id),
+ encode_and_escape(ie.name),
+ parent_str, parent_id))
+ elif ie.kind == 'tree-reference':
+ if ie.kind not in supported_kinds:
+ raise errors.UnsupportedInventoryKind(ie.kind)
+ if not working:
+ append('<tree-reference file_id="%s name="%s%s%s '
+ 'revision="%s reference_revision="%s />\n' % (
+ encode_and_escape(ie.file_id),
+ encode_and_escape(ie.name),
+ parent_str, parent_id,
+ encode_and_escape(ie.revision),
+ encode_and_escape(ie.reference_revision)))
+ else:
+ append('<tree-reference file_id="%s name="%s%s%s />\n' % (
+ encode_and_escape(ie.file_id),
+ encode_and_escape(ie.name),
+ parent_str, parent_id))
+ else:
+ raise errors.UnsupportedInventoryKind(ie.kind)
+ append('</inventory>\n')